diff --git "a/nlp/train.jsonl" "b/nlp/train.jsonl" --- "a/nlp/train.jsonl" +++ "b/nlp/train.jsonl" @@ -1,6199 +1,6199 @@ -{"id": "mintz-etal-2009-distant", "title": "Distant supervision for relation extraction without labeled data", "abstract": "Modern models of relation extraction for tasks like ACE are based on supervised learning of relations from small hand-labeled corpora. We investigate an alternative paradigm that does not require labeled corpora, avoiding the domain dependence of ACE-style algorithms, and allowing the use of corpora of any size. Our experiments use Freebase, a large semantic database of several thousand relations, to provide distant supervision. For each pair of entities that appears in some Freebase relation, we find all sentences containing those entities in a large unlabeled corpus and extract textual features to train a relation classifier. Our algorithm combines the advantages of supervised IE (combining 400,000 noisy pattern features in a probabilistic classifier) and unsupervised IE (extracting large numbers of relations from large corpora of any domain). Our model is able to extract 10,000 instances of 102 relations at a precision of 67.6%. We also analyze feature performance, showing that syntactic parse features are particularly helpful for relations that are ambiguous or lexically distant in their expression.", "phrases": ["relation extraction", "distant supervision", "knowledge base", "wikipedia", "large amount"], "overall_score": 25.976746836012236, "scores": [9.102820832918779, 7.2919053679124755, 2.8958727097966626, 2.028690411561421, 1.7751861194620853], "rank_score": 4.618895088330285} -{"id": "thorne-etal-2018-fever", "title": "FEVER: a Large-scale Dataset for Fact Extraction and VERification", "abstract": "In this paper we introduce a new publicly available dataset for verification against textual sources, FEVER: Fact Extraction and VERification. It consists of 185,445 claims generated by altering sentences extracted from Wikipedia and subsequently verified without knowledge of the sentence they were derived from. The claims are classified as Supported, Refuted or NotEnoughInfo by annotators achieving 0.6841 in Fleiss kappa. For the first two classes, the annotators also recorded the sentence(s) forming the necessary evidence for their judgment. To characterize the challenge of the dataset presented, we develop a pipeline approach and compare it to suitably designed oracles. The best accuracy we achieve on labeling a claim accompanied by the correct evidence is 31.87%, while if we ignore the evidence we achieve 50.91%. Thus we believe that FEVER is a challenging testbed that will help stimulate progress on claim verification against textual sources.", "phrases": ["fact extraction", "verification", "wikipedia", "fever", "veracity"], "overall_score": 19.984630363412727, "scores": [6.296565730480033, 5.932507773227529, 4.966701935602053, 2.3615171867201616, 1.46327242057708], "rank_score": 4.204113009321371} -{"id": "riloff-etal-2013-sarcasm", "title": "Sarcasm as Contrast between a Positive Sentiment and Negative Situation", "abstract": "A common form of sarcasm on Twitter consists of a positive sentiment contrasted with a negative situation. For example, many sarcastic tweets include a positive sentiment, such as \u201clove\u201d or \u201cenjoy\u201d, followed by an expression that describes an undesirable activity or state (e.g., \u201ctaking exams\u201d or \u201cbeing ignored\u201d). We have developed a sarcasm recognizer to identify this type of sarcasm in tweets. We present a novel bootstrapping algorithm that automatically learns lists of positive sentiment phrases and negative situation phrases from sarcastic tweets. We show that identifying contrasting contexts using the phrases learned through bootstrapping yields improved recall for sarcasm recognition.", "phrases": ["negative situation", "sentiment phrase", "sarcasm"], "overall_score": 17.218850538187166, "scores": [5.968757843471252, 4.97745648428764, 1.5217730841667236], "rank_score": 4.1559958039752045} -{"id": "habash-etal-2012-conventional", "title": "Conventional Orthography for Dialectal Arabic", "abstract": "Dialectal Arabic (DA) refers to the day-to-day vernaculars spoken in the Arab world. DA lives side-by-side with the official language, Modern Standard Arabic (MSA). DA differs from MSA on all levels of linguistic representation, from phonology and morphology to lexicon and syntax. Unlike MSA, DA has no standard orthography since there are no Arabic dialect academies, nor is there a large edited body of dialectal literature that follows the same spelling standard. In this paper, we present CODA, a conventional orthography for dialectal Arabic; it is designed primarily for the purpose of developing computational models of Arabic dialects. We explain the design principles of CODA and provide a detailed description of its guidelines as applied to Egyptian Arabic.", "phrases": ["dialectal arabic", "egyptian arabic", "conventional orthography"], "overall_score": 14.440217126970637, "scores": [5.910316112354879, 5.336849260618749, 0.8416925694308235], "rank_score": 4.029619314134817} -{"id": "see-etal-2017-get", "title": "Get To The Point: Summarization with Pointer-Generator Networks", "abstract": "Neural sequence-to-sequence models have provided a viable new approach for abstractive text summarization (meaning they are not restricted to simply selecting and rearranging passages from the original text). However, these models have two shortcomings: they are liable to reproduce factual details inaccurately, and they tend to repeat themselves. In this work we propose a novel architecture that augments the standard sequence-to-sequence attentional model in two orthogonal ways. First, we use a hybrid pointer-generator network that can copy words from the source text via pointing, which aids accurate reproduction of information, while retaining the ability to produce novel words through the generator. Second, we use coverage to keep track of what has been summarized, which discourages repetition. We apply our model to the CNN / Daily Mail summarization task, outperforming the current abstractive state-of-the-art by at least 2 ROUGE points.", "phrases": ["summarization", "pointer-generator network", "sequence-to-sequence model", "copy mechanism", "coverage mechanism"], "overall_score": 21.316808740647822, "scores": [6.897664349104688, 4.9575018414893846, 2.5332007806464514, 2.493676885162117, 2.1688126528073868], "rank_score": 3.8101713018420056} -{"id": "chen-etal-2017-reading", "title": "Reading Wikipedia to Answer Open-Domain Questions", "abstract": "This paper proposes to tackle open-domain question answering using Wikipedia as the unique knowledge source: the answer to any factoid question is a text span in a Wikipedia article. This task of machine reading at scale combines the challenges of document retrieval (finding the relevant articles) with that of machine comprehension of text (identifying the answer spans from those articles). Our approach combines a search component based on bigram hashing and TF-IDF matching with a multi-layer recurrent neural network model trained to detect answers in Wikipedia paragraphs. Our experiments on multiple existing QA datasets indicate that (1) both modules are highly competitive with respect to existing counterparts and (2) multitask learning using distant supervision on their combination is an effective complete system on this challenging task.", "phrases": ["wikipedia", "open-domain question", "knowledge source", "passage", "drqa"], "overall_score": 18.65066455430979, "scores": [6.002582055921312, 5.000043644300589, 2.4777415584394453, 2.2609005891400082, 2.140701769079409], "rank_score": 3.5763939233761532} -{"id": "hatori-etal-2011-incremental", "title": "Incremental Joint POS Tagging and Dependency Parsing in Chinese", "abstract": "We address the problem of joint part-of-speech (POS) tagging and dependency parsing in Chinese. In Chinese, some POS tags are often hard to disambiguate without considering longrange syntactic information. Also, the traditional pipeline approach to POS tagging and dependency parsing may suffer from the problem of error propagation. In this paper, we propose the first incremental approach to the task of joint POS tagging and dependency parsing, which is built upon a shift-reduce parsing framework with dynamic programming. Although the incremental approach encounters difficulties with underspecified POS tags of look-ahead words, we overcome this issue by introducing so-called delayed features. Our joint approach achieved substantial improvements over the pipeline and baseline systems in both POS tagging and dependency parsing task, achieving the new state-of-the-art performance on this joint task.", "phrases": ["pos tagging", "dependency parsing", "transition-based joint model"], "overall_score": 12.556629614065866, "scores": [4.9317342946753655, 4.92626435431025, 0.8243716246261271], "rank_score": 3.5607900912039145} -{"id": "sennrich-etal-2016-neural", "title": "Neural Machine Translation of Rare Words with Subword Units", "abstract": "Neural machine translation (NMT) models typically operate with a fixed vocabulary, but translation is an open-vocabulary problem. Previous work addresses the translation of out-of-vocabulary words by backing off to a dictionary. In this paper, we introduce a simpler and more effective approach, making the NMT model capable of open-vocabulary translation by encoding rare and unknown words as sequences of subword units. This is based on the intuition that various word classes are translatable via smaller units than words, for instance names (via character copying or transliteration), compounds (via compositional translation), and cognates and loanwords (via phonological and morphological transformations). We discuss the suitability of different word segmentation techniques, including simple character n-gram models and a segmentation based on the byte pair encoding compression algorithm, and empirically show that subword models improve over a back-off dictionary baseline for the WMT 15 translation tasks English-German and English-Russian by 1.1 and 1.3 BLEU, respectively.", "phrases": ["subword unit", "segmentation", "neural machine translation", "bpe", "back-translation"], "overall_score": 18.598998404413823, "scores": [5.956208346566254, 4.975971731997002, 3.5091804872600623, 2.146621292369188, 0.8823953130938478], "rank_score": 3.494075434257271} -{"id": "szarvas-etal-2008-bioscope", "title": "The BioScope corpus: annotation for negation, uncertainty and their scope in biomedical texts", "abstract": "This article reports on a corpus annotation project that has produced a freely available resource for research on handling negation and uncertainty in biomedical texts (we call this corpus the BioScope corpus). The corpus consists of three parts, namely medical free texts, biological full papers and biological scientific abstracts. The dataset contains annotations at the token level for negative and speculative keywords and at the sentence level for their linguistic scope. The annotation process was carried out by two independent linguist annotators and a chief annotator -- also responsible for setting up the annotation guidelines -- who resolved cases where the annotators disagreed. We will report our statistics on corpus size, ambiguity levels and the consistency of annotations.", "phrases": ["bioscope corpus", "negation", "scope", "token level"], "overall_score": 11.450694301632634, "scores": [4.784148408935619, 4.467945487127599, 4.118908118131126, 0.5261594864168856], "rank_score": 3.4742903751528074} -{"id": "koehn-etal-2007-moses", "title": "Moses: Open Source Toolkit for Statistical Machine Translation", "abstract": "We describe an open-source toolkit for statistical machine translation whose novel contributions are (a) support for linguistically motivated factors, (b) confusion network decoding, and (c) efficient data formats for translation models and language models. In addition to the SMT decoder, the toolkit also includes a wide variety of tools for training, tuning and applying the system to many translation tasks.", "phrases": ["statistical machine translation", "factor", "moses", "smt system", "baseline system"], "overall_score": 18.634073573773147, "scores": [6.362756733942228, 5.749385079010732, 2.0973725004662334, 1.3998466876229043, 1.3776613078348716], "rank_score": 3.397404461775394} -{"id": "sennrich-etal-2016-improving", "title": "Improving Neural Machine Translation Models with Monolingual Data", "abstract": "Neural Machine Translation (NMT) has obtained state-of-the art performance for several language pairs, while only using parallel data for training. Target-side monolingual data plays an important role in boosting fluency for phrase-based statistical machine translation, and we investigate the use of monolingual data for NMT. In contrast to previous work, which combines NMT models with separately trained language models, we note that encoder-decoder NMT architectures already have the capacity to learn the same information as a language model, and we explore strategies to train with monolingual data without changing the neural network architecture. By pairing monolingual training data with an automatic back-translation, we can treat it as additional parallel training data, and we obtain substantial improvements on the WMT 15 task English German (+2.8-3.7 BLEU), and for the low-resourced IWSLT 14 task Turkish->English (+2.1-3.4 BLEU), obtaining new state-of-the-art results. We also show that fine-tuning on in-domain monolingual and parallel data gives substantial improvements for the IWSLT 15 task English->German.", "phrases": ["neural machine translation", "monolingual data", "back-translation", "data augmentation", "synthetic parallel corpus"], "overall_score": 18.862422546142273, "scores": [7.444062011901696, 3.5292215395867768, 2.607544904455198, 2.0318108160802537, 1.3478916464648363], "rank_score": 3.392106183697752} -{"id": "banarescu-etal-2013-abstract", "title": "Abstract Meaning Representation for Sembanking", "abstract": "We describe Abstract Meaning Representation (AMR), a semantic representation language in which we are writing down the meanings of thousands of English sentences. We hope that a sembank of simple, whole-sentence semantic structures will spur new work in statistical natural language understanding and generation, like the Penn Treebank encouraged work on statistical parsing. This paper gives an overview of AMR and tools associated with it.", "phrases": ["amr", "abstract meaning representation", "acyclic graph", "node", "propbank"], "overall_score": 17.083688213185386, "scores": [8.289922180380453, 3.695417092150451, 1.6569120706624094, 1.5972423980945571, 1.411603251610134], "rank_score": 3.330219398579601} -{"id": "nadeem-etal-2021-stereoset", "title": "StereoSet: Measuring stereotypical bias in pretrained language models", "abstract": "A stereotype is an over-generalized belief about a particular group of people, e.g., Asians are good at math or African Americans are athletic. Such beliefs (biases) are known to hurt target groups. Since pretrained language models are trained on large real-world data, they are known to capture stereotypical biases. It is important to quantify to what extent these biases are present in them. Although this is a rapidly growing area of research, existing literature lacks in two important aspects: 1) they mainly evaluate bias of pretrained language models on a small set of artificial sentences, even though these models are trained on natural data 2) current evaluations focus on measuring bias without considering the language modeling ability of a model, which could lead to misleading trust on a model even if it is a poor language model. We address both these problems. We present StereoSet, a large-scale natural English dataset to measure stereotypical biases in four domains: gender, profession, race, and religion. We contrast both stereotypical bias and language modeling ability of popular models like BERT, GPT-2, RoBERTa, and XLnet. We show that these models exhibit strong stereotypical biases. Our data and code are available at .", "phrases": ["stereotypical bias", "language model", "stereoset"], "overall_score": 10.844047430129605, "scores": [3.5186962447841004, 3.2126188306360954, 3.1393622708857194], "rank_score": 3.2902257821019716} -{"id": "li-etal-2011-joint", "title": "Joint Models for Chinese POS Tagging and Dependency Parsing", "abstract": "Part-of-speech (POS) is an indispensable feature in dependency parsing. Current research usually models POS tagging and dependency parsing independently. This may suffer from error propagation problem. Our experiments show that parsing accuracy drops by about 6% when using automatic POS tags instead of gold ones. To solve this issue, this paper proposes a solution by jointly optimizing POS tagging and dependency parsing in a unique model. We design several joint models and their corresponding decoding algorithms to incorporate different feature sets. We further present an effective pruning strategy to reduce the search space of candidate POS tags, leading to significant improvement of parsing speed. Experimental results on Chinese Penn Treebank 5 show that our joint models significantly improve the state-of-the-art parsing accuracy by about 1.5%. Detailed analysis shows that the joint method is able to choose such POS tags that are more helpful and discriminative from parsing viewpoint. This is the fundamental reason of parsing accuracy improvement.", "phrases": ["pos tagging", "dependency parsing", "first joint model"], "overall_score": 10.855903506097468, "scores": [4.968165505101209, 4.261278773138704, 0.5441786848296697], "rank_score": 3.2578743210231944} -{"id": "rajpurkar-etal-2016-squad", "title": "SQuAD: 100,000+ Questions for Machine Comprehension of Text", "abstract": "We present the Stanford Question Answering Dataset (SQuAD), a new reading comprehension dataset consisting of 100,000+ questions posed by crowdworkers on a set of Wikipedia articles, where the answer to each question is a segment of text from the corresponding reading passage. We analyze the dataset to understand the types of reasoning required to answer the questions, leaning heavily on dependency and constituency trees. We build a strong logistic regression model, which achieves an F1 score of 51.0%, a significant improvement over a simple baseline (20%). However, human performance (86.8%) is much higher, indicating that the dataset presents a good challenge problem for future research. \nThe dataset is freely available at this https URL", "phrases": ["machine comprehension", "wikipedia article", "squad", "large-scale dataset", "answer span"], "overall_score": 18.43672649544106, "scores": [7.657400459202454, 2.6119792988248904, 2.07735685246439, 1.978547866048555, 1.9037351012921058], "rank_score": 3.245803915566479} -{"id": "blatz-etal-2004-confidence", "title": "Confidence Estimation for Machine Translation", "abstract": "We present a detailed study of confidence estimation for machine translation. Various methods for determining whether MT output is correct are investigated, for both whole sentences and words. Since the notion of correctness is not intuitively clear in this context, different ways of defining it are proposed. We present results on data from the NIST 2003 Chinese-to-English MT evaluation.", "phrases": ["machine translation", "correctness", "confidence estimation", "access", "segment"], "overall_score": 15.048360360157487, "scores": [6.483332295202637, 5.04948988640547, 2.054894716401148, 1.3250349868874547, 1.1256668573594073], "rank_score": 3.207683748451223} -{"id": "maynard-greenwood-2014-cares", "title": "Who cares about Sarcastic Tweets? Investigating the Impact of Sarcasm on Sentiment Analysis.", "abstract": "Sarcasm is a common phenomenon in social media, and is inherently difficult to analyse, not just automatically but often for humans too. It has an important effect on sentiment, but is usually ignored in social media analysis, because it is considered too tricky to handle. While there exist a few systems which can detect sarcasm, almost no work has been carried out on studying the effect that sarcasm has on sentiment in tweets, and on incorporating this into automatic tools for sentiment analysis. We perform an analysis of the effect of sarcasm scope on the polarity of tweets, and have compiled a number of rules which enable us to improve the accuracy of sentiment analysis when sarcasm is known to be present. We consider in particular the effect of sentiment and sarcasm contained in hashtags, and have developed a hashtag tokeniser for GATE, so that sentiment and sarcasm found within hashtags can be detected more easily. According to our experiments, the hashtag tokenisation achieves 98% Precision, while the sarcasm detection achieved 91% Precision and polarity detection 80%.", "phrases": ["sarcasm", "sentiment analysis", "hashtag"], "overall_score": 11.289016642588901, "scores": [4.663919794970207, 3.4529746690688086, 1.4870724847042869], "rank_score": 3.2013223162477673} -{"id": "blitzer-etal-2007-biographies", "title": "Biographies, Bollywood, Boom-boxes and Blenders: Domain Adaptation for Sentiment Classification", "abstract": "Automatic sentiment classification has been extensively studied and applied in recent years. However, sentiment is expressed differently in different domains, and annotating corpora for every possible domain of interest is impractical. We investigate domain adaptation for sentiment classifiers, focusing on online reviews for different types of products. First, we extend to sentiment classification the recently-proposed structural correspondence learning (SCL) algorithm, reducing the relative error due to adaptation between domains by an average of 30% over the original SCL algorithm and 46% over a supervised baseline. Second, we identify a measure of domain similarity that correlates well with the potential for adaptation of a classifier from one domain to another. This measure could for instance be used to select a small set of domains to annotate whose trained classifiers would transfer well to many other domains.", "phrases": ["domain adaptation", "sentiment classification", "product", "scl", "pivot feature"], "overall_score": 14.974088109145994, "scores": [5.651208737753669, 5.098672702001909, 2.2836496470001078, 1.472826224753414, 1.30177263944555], "rank_score": 3.16162599019093} -{"id": "pennington-etal-2014-glove", "title": "GloVe: Global Vectors for Word Representation", "abstract": "Recent methods for learning vector space representations of words have succeeded in capturing fine-grained semantic and syntactic regularities using vector arithmetic, but the origin of these regularities has remained opaque. We analyze and make explicit the model properties needed for such regularities to emerge in word vectors. The result is a new global logbilinear regression model that combines the advantages of the two major model families in the literature: global matrix factorization and local context window methods. Our model efficiently leverages statistical information by training only on the nonzero elements in a word-word cooccurrence matrix, rather than on the entire sparse matrix or on individual context windows in a large corpus. The model produces a vector space with meaningful substructure, as evidenced by its performance of 75% on a recent word analogy task. It also outperforms related models on similarity tasks and named entity recognition.", "phrases": ["word representation", "regularity", "glove", "co-occurrence information", "lsa"], "overall_score": 19.178231517434114, "scores": [5.738868272378353, 5.376256631760994, 1.825624295198751, 1.4938583287134768, 1.3312106708918074], "rank_score": 3.1531636397886764} -{"id": "ratinov-etal-2011-local", "title": "Local and Global Algorithms for Disambiguation to Wikipedia", "abstract": "Disambiguating concepts and entities in a context sensitive way is a fundamental problem in natural language processing. The comprehensiveness of Wikipedia has made the online encyclopedia an increasingly popular target for disambiguation. Disambiguation to Wikipedia is similar to a traditional Word Sense Disambiguation task, but distinct in that the Wikipedia link structure provides additional information about which disambiguations are compatible. In this work we analyze approaches that utilize this information to arrive at coherent sets of disambiguations for a given document (which we call \"global\" approaches), and compare them to more traditional (local) approaches. We show that previous approaches for global disambiguation can be improved, but even then the local disambiguation provides a baseline which is very hard to beat.", "phrases": ["disambiguation", "wikipedia", "mention"], "overall_score": 12.633774558266378, "scores": [3.877258196246129, 3.666659814548557, 1.9140736067903696], "rank_score": 3.152663872528352} -{"id": "ganitkevitch-etal-2013-ppdb", "title": "PPDB: The Paraphrase Database", "abstract": "We present the 1.0 release of our paraphrase database, PPDB. Its English portion, PPDB:Eng, contains over 220 million paraphrase pairs, consisting of 73 million phrasal and 8 million lexical paraphrases, as well as 140 million paraphrase patterns, which capture many meaning-preserving syntactic transformations. The paraphrases are extracted from bilingual parallel corpora totaling over 100 million sentence pairs and over 2 billion English words. We also release PPDB:Spa, a collection of 196 million Spanish paraphrases. Each paraphrase pair in PPDB contains a set of associated scores, including paraphrase probabilities derived from the bitext data and a variety of monolingual distributional similarity scores computed from the Google n-grams and the Annotated Gigaword corpus. Our release includes pruning tools that allow users to determine their own precision/recall tradeoff.", "phrases": ["paraphrase database", "parallel corpora", "ppdb", "pivoting", "coverage"], "overall_score": 12.90385276824579, "scores": [6.397264887322019, 5.822644774615403, 1.4687228632465539, 1.0722780659654414, 0.8720341646642527], "rank_score": 3.1265889511627343} -{"id": "wiegand-etal-2010-survey", "title": "A survey on the role of negation in sentiment analysis", "abstract": "This paper presents a survey on the role of negation in sentiment analysis. Negation is a very common linguistic construction that affects polarity and, therefore, needs to be taken into consideration in sentiment analysis. \n \nWe will present various computational approaches modeling negation in sentiment analysis. We will, in particular, focus on aspects, such as level of representation used for sentiment analysis, negation word detection and scope of negation. We will also discuss limits and challenges of negation modeling on that task.", "phrases": ["survey", "negation", "sentiment analysis", "biomedical domain"], "overall_score": 9.431678760557046, "scores": [4.739077748539143, 4.289882158361083, 2.82158893446023, 0.5411203987055733], "rank_score": 3.0979173100165074} -{"id": "riedel-clarke-2006-incremental", "title": "Incremental Integer Linear Programming for Non-projective Dependency Parsing", "abstract": "Integer Linear Programming has recently been used for decoding in a number of probabilistic models in order to enforce global constraints. However, in certain applications, such as non-projective dependency parsing and machine translation, the complete formulation of the decoding problem as an integer linear program renders solving intractable. We present an approach which solves the problem incrementally, thus we avoid creating intractable integer linear programs. This approach is applied to Dutch dependency parsing and we show how the addition of linguistically motivated constraints can yield a significant improvement over state-of-the-art.", "phrases": ["integer linear programming", "dependency parsing", "ilp"], "overall_score": 9.538332756726117, "scores": [3.7545845247744007, 3.686783423179878, 1.8160261842709517], "rank_score": 3.0857980440750765} -{"id": "pak-paroubek-2010-twitter", "title": "Twitter as a Corpus for Sentiment Analysis and Opinion Mining", "abstract": "Microblogging today has become a very popular communication tool among Internet users. Millions of users share opinions on different aspects of life everyday. Therefore microblogging web-sites are rich sources of data for opinion mining and sentiment analysis. Because microblogging has appeared relatively recently, there are a few research works that were devoted to this topic. In our paper, we focus on using Twitter, the most popular microblogging platform, for the task of sentiment analysis. We show how to automatically collect a corpus for sentiment analysis and opinion mining purposes. We perform linguistic analysis of the collected corpus and explain discovered phenomena. Using the corpus, we build a sentiment classifier, that is able to determine positive, negative and neutral sentiments for a document. Experimental evaluations show that our proposed techniques are efficient and performs better than previously proposed methods. In our research, we worked with English, however, the proposed technique can be used with any other language.", "phrases": ["sentiment analysis", "twitter", "n-gram", "social medium"], "overall_score": 13.625789196284089, "scores": [5.503272653565809, 4.908124513118488, 1.0792849719699062, 0.8435841593813108], "rank_score": 3.083566574508879} -{"id": "wang-etal-2018-glue", "title": "GLUE: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding", "abstract": "Human ability to understand language is general, flexible, and robust. In contrast, most NLU models above the word level are designed for a specific task and struggle with out-of-domain data. If we aspire to develop models with understanding beyond the detection of superficial correspondences between inputs and outputs, then it is critical to develop a unified model that can execute a range of linguistic tasks across different domains. To facilitate research in this direction, we present the General Language Understanding Evaluation (GLUE, gluebenchmark.com): a benchmark of nine diverse NLU tasks, an auxiliary dataset for probing models for understanding of specific linguistic phenomena, and an online platform for evaluating and comparing models. For some benchmark tasks, training data is plentiful, but for others it is limited or does not match the genre of the test set. GLUE thus favors models that can represent linguistic knowledge in a way that facilitates sample-efficient learning and effective knowledge-transfer across tasks. While none of the datasets in GLUE were created from scratch for the benchmark, four of them feature privately-held test data, which is used to ensure that the benchmark is used fairly. We evaluate baselines that use ELMo (Peters et al., 2018), a powerful transfer learning technique, as well as state-of-the-art sentence representation models. The best models still achieve fairly low absolute scores. Analysis with our diagnostic dataset yields similarly weak performance over all phenomena tested, with some exceptions.", "phrases": ["multi-task benchmark", "natural language understanding", "glue", "downstream task", "mrpc"], "overall_score": 16.430217853611484, "scores": [6.74935624059816, 3.770926780088176, 1.7864696379142964, 1.815401169217291, 1.0708329571892519], "rank_score": 3.038597357001435} -{"id": "luong-etal-2015-effective", "title": "Effective Approaches to Attention-based Neural Machine Translation", "abstract": "An attentional mechanism has lately been used to improve neural machine translation (NMT) by selectively focusing on parts of the source sentence during translation. However, there has been little work exploring useful architectures for attention-based NMT. This paper examines two simple and effective classes of attentional mechanism: a global approach which always attends to all source words and a local one that only looks at a subset of source words at a time. We demonstrate the effectiveness of both approaches on the WMT translation tasks between English and German in both directions. With local attention, we achieve a significant gain of 5.0 BLEU points over non-attentional systems that already incorporate known techniques such as dropout. Our ensemble model using different attention architectures yields a new state-of-the-art result in the WMT\u201915 English to German translation task with 25.9 BLEU points, an improvement of 1.0 BLEU points over the existing best system backed by NMT and an n-gram reranker. 1", "phrases": ["neural machine translation", "source sentence", "attention model", "context vector", "encoder-decoder architecture"], "overall_score": 17.0183840695781, "scores": [7.1948326517793415, 2.5045684148993566, 1.864055895541572, 1.8111506987977635, 1.660675975166936], "rank_score": 3.007056727236994} -{"id": "devlin-etal-2019-bert", "title": "BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding", "abstract": "We introduce a new language representation model called BERT, which stands for Bidirectional Encoder Representations from Transformers. Unlike recent language representation models (Peters et al., 2018a; Radford et al., 2018), BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly conditioning on both left and right context in all layers. As a result, the pre-trained BERT model can be fine-tuned with just one additional output layer to create state-of-the-art models for a wide range of tasks, such as question answering and language inference, without substantial task-specific architecture modifications. BERT is conceptually simple and empirically powerful. It obtains new state-of-the-art results on eleven natural language processing tasks, including pushing the GLUE score to 80.5 (7.7 point absolute improvement), MultiNLI accuracy to 86.7% (4.6% absolute improvement), SQuAD v1.1 question answering Test F1 to 93.2 (1.5 point absolute improvement) and SQuAD v2.0 Test F1 to 83.1 (5.1 point absolute improvement).", "phrases": ["deep bidirectional transformers", "language understanding", "encoder representation", "pre-trained model", "many nlp task"], "overall_score": 23.579441130185828, "scores": [4.765419831465302, 0.8592532996825256, 3.825201150959441, 3.2212715902710585, 2.3459179005120543], "rank_score": 3.0034127545780764} -{"id": "kim-2014-convolutional", "title": "Convolutional Neural Networks for Sentence Classification", "abstract": "We report on a series of experiments with convolutional neural networks (CNN) trained on top of pre-trained word vectors for sentence-level classification tasks. We show that a simple CNN with little hyperparameter tuning and static vectors achieves excellent results on multiple benchmarks. Learning task-specific vectors through fine-tuning offers further gains in performance. We additionally propose a simple modification to the architecture to allow for the use of both task-specific and static vectors. The CNN models discussed herein improve upon the state of the art on 4 out of 7 tasks, which include sentiment analysis and question classification.", "phrases": ["sentence classification", "cnn", "convolutional neural networks", "learning model", "filter"], "overall_score": 16.582416521367872, "scores": [4.0929984673342945, 3.6728962863205608, 3.5605472888630816, 1.9488089760205762, 1.6663503108303477], "rank_score": 2.9883202658737718} -{"id": "snover-etal-2006-study", "title": "A Study of Translation Edit Rate with Targeted Human Annotation", "abstract": "We examine a new, intuitive measure for evaluating machine-translation output that avoids the knowledge intensiveness of more meaning-based approaches, and the labor-intensiveness of human judgments. Translation Edit Rate (TER) measures the amount of editing that a human would have to perform to change a system output so it exactly matches a reference translation. We show that the single-reference variant of TER correlates as well with human judgments of MT quality as the four-reference variant of BLEU. We also define a human-targeted TER (or HTER) and show that it yields higher correlations with human judgments than BLEU\u2014even when BLEU is given human-targeted references. Our results indicate that HTER correlates with human judgments better than HMETEOR and that the four-reference variants of TER and HTER correlate with human judgments as well as\u2014or better than\u2014a second human judgment does.", "phrases": ["translation edit rate", "human judgment", "hter", "post-editing effort", "deletion"], "overall_score": 14.369932919766216, "scores": [6.610310760165786, 3.143367621082619, 1.9105034995064634, 1.6788092290842365, 1.513393604947113], "rank_score": 2.9712769429572434} -{"id": "agirre-etal-2012-semeval", "title": "SemEval-2012 Task 6: A Pilot on Semantic Textual Similarity", "abstract": "Semantic Textual Similarity (STS) measures the degree of semantic equivalence between two texts. This paper presents the results of the STS pilot task in Semeval. The training data contained 2000 sentence pairs from previously existing paraphrase datasets and machine translation evaluation resources. The test data also comprised 2000 sentences pairs for those datasets, plus two surprise datasets with 400 pairs from a different machine translation evaluation corpus and 750 pairs from a lexical resource mapping exercise. The similarity of pairs of sentences was rated on a 0-5 scale (low to high similarity) by human judges using Amazon Mechanical Turk, with high Pearson correlation scores, around 90%. 35 teams participated in the task, submitting 88 runs. The best results scored a Pearson correlation >80%, well above a simple lexical baseline that only scored a 31% correlation. This pilot task opens an exciting way ahead, although there are still open issues, specially the evaluation metric.", "phrases": ["semantic textual similarity", "semeval", "long text segment"], "overall_score": 13.055840772643379, "scores": [6.714097647480197, 1.592995807576244, 0.5327038320160038], "rank_score": 2.946599095690815} -{"id": "feng-etal-2020-codebert", "title": "CodeBERT: A Pre-Trained Model for Programming and Natural Languages", "abstract": "We present CodeBERT, a bimodal pre-trained model for programming language (PL) and natural language (NL). CodeBERT learns general-purpose representations that support downstream NL-PL applications such as natural language code search, code documentation generation, etc. We develop CodeBERT with Transformer-based neural architecture, and train it with a hybrid objective function that incorporates the pre-training task of replaced token detection, which is to detect plausible alternatives sampled from generators. This enables us to utilize both \u201cbimodal\u201d data of NL-PL pairs and \u201cunimodal data, where the former provides input tokens for model training while the latter helps to learn better generators. We evaluate CodeBERT on two NL-PL applications by fine-tuning model parameters. Results show that CodeBERT achieves state-of-the-art performance on both natural language code search and code documentation generation. Furthermore, to investigate what type of knowledge is learned in CodeBERT, we construct a dataset for NL-PL probing, and evaluate in a zero-shot setting where parameters of pre-trained models are fixed. Results show that CodeBERT performs better than previous pre-trained models on NLPL probing.", "phrases": ["pre-trained model", "programming language", "codebert"], "overall_score": 8.431834654823472, "scores": [3.808196963458705, 3.2426954383636306, 1.7007513600453183], "rank_score": 2.917214587289218} -{"id": "zeng-etal-2015-distant", "title": "Distant Supervision for Relation Extraction via Piecewise Convolutional Neural Networks", "abstract": "Two problems arise when using distant supervision for relation extraction. First, in this method, an already existing knowledge base is heuristically aligned to texts, and the alignment results are treated as labeled data. However, the heuristic alignment can fail, resulting in wrong label problem. In addition, in previous approaches, statistical models have typically been applied to ad hoc features. The noise that originates from the feature extraction process can cause poor performance. In this paper, we propose a novel model dubbed the Piecewise Convolutional Neural Networks (PCNNs) with multi-instance learning to address these two problems. To solve the first problem, distant supervised relation extraction is treated as a multi-instance problem in which the uncertainty of instance labels is taken into account. To address the latter problem, we avoid feature engineering and instead adopt convolutional architecture with piecewise max pooling to automatically learn relevant features. Experiments show that our method is effective and outperforms several competitive baseline methods.", "phrases": ["relation extraction", "convolutional neural network", "multi-instance", "distant supervision", "cnn"], "overall_score": 12.413498774460534, "scores": [4.7970174692190986, 4.16145543504812, 2.2524495817247945, 1.7358509459051226, 1.5662823328074316], "rank_score": 2.9026111529409135} -{"id": "qian-liu-2012-joint", "title": "Joint Chinese Word Segmentation, POS Tagging and Parsing", "abstract": "In this paper, we propose a novel decoding algorithm for discriminative joint Chinese word segmentation, part-of-speech (POS) tagging, and parsing. Previous work often used a pipeline method -- Chinese word segmentation followed by POS tagging and parsing, which suffers from error propagation and is unable to leverage information in later modules for earlier components. In our approach, we train the three individual models separately during training, and incorporate them together in a unified framework during decoding. We extend the CYK parsing algorithm so that it can deal with word segmentation and POS tagging features. As far as we know, this is the first work on joint Chinese word segmentation, POS tagging and parsing. Our experimental results on Chinese Tree Bank 5 corpus show that our approach outperforms the state-of-the-art pipeline system.", "phrases": ["word segmentation", "pos tagging", "joint model"], "overall_score": 7.951703310315294, "scores": [4.194371222823962, 3.8689256924202, 0.5406152840650079], "rank_score": 2.867970733103057} -{"id": "koehn-knowles-2017-six", "title": "Six Challenges for Neural Machine Translation", "abstract": "We explore six challenges for neural machine translation: domain mismatch, amount of training data, rare words, long sentences, word alignment, and beam search. We show both deficiencies and improvements over the quality of phrase-based statistical machine translation.", "phrases": ["neural machine translation", "domain mismatch", "nmt model", "translation quality", "low-resource language pair"], "overall_score": 15.76746526031562, "scores": [7.0219497605306875, 2.2283129723681756, 1.9711661672643426, 1.7583932581050696, 1.3509587802566456], "rank_score": 2.866156187704984} -{"id": "ma-etal-2019-stacl", "title": "STACL: Simultaneous Translation with Implicit Anticipation and Controllable Latency using Prefix-to-Prefix Framework", "abstract": "Simultaneous translation, which translates sentences before they are finished, is use- ful in many scenarios but is notoriously dif- ficult due to word-order differences. While the conventional seq-to-seq framework is only suitable for full-sentence translation, we pro- pose a novel prefix-to-prefix framework for si- multaneous translation that implicitly learns to anticipate in a single translation model. Within this framework, we present a very sim- ple yet surprisingly effective \u201cwait-k\u201d policy trained to generate the target sentence concur- rently with the source sentence, but always k words behind. Experiments show our strat- egy achieves low latency and reasonable qual- ity (compared to full-sentence translation) on 4 directions: zh\u2194en and de\u2194en.", "phrases": ["simultaneous translation", "prefix-to-prefix framework", "wait-k"], "overall_score": 10.740066290758495, "scores": [3.5419474807816145, 3.1707444318720968, 1.8017331001630343], "rank_score": 2.8381416709389153} -{"id": "peters-etal-2018-deep", "title": "Deep Contextualized Word Representations", "abstract": "We introduce a new type of deep contextualized word representation that models both (1) complex characteristics of word use (e.g., syntax and semantics), and (2) how these uses vary across linguistic contexts (i.e., to model polysemy). Our word vectors are learned functions of the internal states of a deep bidirectional language model (biLM), which is pre-trained on a large text corpus. We show that these representations can be easily added to existing models and significantly improve the state of the art across six challenging NLP problems, including question answering, textual entailment and sentiment analysis. We also present an analysis showing that exposing the deep internals of the pre-trained network is crucial, allowing downstream models to mix different types of semi-supervision signals.", "phrases": ["language model", "pre-training", "deep", "elmo", "downstream task"], "overall_score": 18.139556168170326, "scores": [0.9022027521963384, 4.582070687003685, 3.9018956885163623, 2.759713475255896, 2.0177357197610015], "rank_score": 2.8327236645466565} -{"id": "pasha-etal-2014-madamira", "title": "MADAMIRA: A Fast, Comprehensive Tool for Morphological Analysis and Disambiguation of Arabic", "abstract": "In this paper, we present MADAMIRA, a system for morphological analysis and disambiguation of Arabic that combines some of the best aspects of two previously commonly used systems for Arabic processing, MADA (Habash and Rambow, 2005; Habash et al., 2009; Habash et al., 2013) and AMIRA (Diab et al., 2007). MADAMIRA improves upon the two systems with a more streamlined Java implementation that is more robust, portable, extensible, and is faster than its ancestors by more than an order of magnitude. We also discuss an online demo (see ) that highlights these aspects.", "phrases": ["disambiguation", "arabic", "morphological tagger"], "overall_score": 10.957753940060462, "scores": [4.0322566087942135, 3.920042500543476, 0.5394484717241896], "rank_score": 2.8305825270206264} -{"id": "davidov-etal-2010-semi", "title": "Semi-Supervised Recognition of Sarcasm in Twitter and Amazon", "abstract": "Sarcasm is a form of speech act in which the speakers convey their message in an implicit way. The inherently ambiguous nature of sarcasm sometimes makes it hard even for humans to decide whether an utterance is sarcastic or not. Recognition of sarcasm can benefit many sentiment analysis NLP applications, such as review summarization, dialogue systems and review ranking systems. \n \nIn this paper we experiment with semi-supervised sarcasm identification on two very different data sets: a collection of 5.9 million tweets collected from Twitter, and a collection of 66000 product reviews from Amazon. Using the Mechanical Turk we created a gold standard sample in which each sentence was tagged by 3 annotators, obtaining F-scores of 0.78 on the product reviews dataset and 0.83 on the Twitter dataset. We discuss the differences between the datasets and how the algorithm uses them (e.g., for the Amazon dataset the algorithm makes use of structured information). We also discuss the utility of Twitter #sarcasm hashtags for the task.", "phrases": ["sarcasm", "twitter", "emoticon", "semi-supervised approach"], "overall_score": 11.525932223539268, "scores": [4.860803039053243, 3.856581507941789, 1.4020691370778458, 1.0956140648930581], "rank_score": 2.8037669372414835} -{"id": "callison-burch-etal-2009-findings", "title": "Findings of the 2009 Workshop on Statistical Machine Translation", "abstract": "This paper presents the results of the WMT09 shared tasks, which included a translation task, a system combination task, and an evaluation task. We conducted a large-scale manual evaluation of 87 machine translation systems and 22 system combination entries. We used the ranking of these systems to measure how strongly automatic metrics correlate with human judgments of translation quality, for more than 20 metrics. We present a new evaluation technique whereby system output is edited and judged for correctness.", "phrases": ["workshop", "statistical machine translation", "wmt", "quality estimation", "smt system"], "overall_score": 14.419243095256464, "scores": [4.07495587186001, 3.925089086196244, 2.7268437235620984, 1.9247643395963374, 1.2768883079853532], "rank_score": 2.7857082658400083} -{"id": "poliak-etal-2018-hypothesis", "title": "Hypothesis Only Baselines in Natural Language Inference", "abstract": "We propose a hypothesis only baseline for diagnosing Natural Language Inference (NLI). Especially when an NLI dataset assumes inference is occurring based purely on the relationship between a context and a hypothesis, it follows that assessing entailment relations while ignoring the provided context is a degenerate solution. Yet, through experiments on 10 distinct NLI datasets, we find that this approach, which we refer to as a hypothesis-only model, is able to significantly outperform a majority-class baseline across a number of NLI datasets. Our analysis suggests that statistical irregularities may allow a model to perform NLI in some datasets beyond what should be achievable without access to the context.", "phrases": ["natural language inference", "nli dataset", "irregularity", "hypothesis", "annotation artifact"], "overall_score": 11.540217884718954, "scores": [5.042925885106778, 4.056760371508169, 1.7856959771291478, 1.553713568945675, 1.4350834581953624], "rank_score": 2.7748358521770267} -{"id": "rosen-etal-2016-mwes", "title": "MWEs in Treebanks: From Survey to Guidelines", "abstract": "By means of an online survey, we have investigated ways in which various types of multiword expressions are annotated in existing treebanks. The results indicate that there is considerable variation in treatments across treebanks and thereby also, to some extent, across languages and across theoretical frameworks. The comparison is focused on the annotation of light verb constructions and verbal idioms. The survey shows that the light verb constructions either get special annotations as such, or are treated as ordinary verbs, while VP idioms are handled through different strategies. Based on insights from our investigation, we propose some general guidelines for annotating multiword expressions in treebanks. The recommendations address the following application-based needs: distinguishing MWEs from similar but compositional constructions; searching distinct types of MWEs in treebanks; awareness of literal and nonliteral meanings; and normalization of the MWE representation. The cross-lingually and cross-theoretically focused survey is intended as an aid to accessing treebanks and an aid for further work on treebank annotation.", "phrases": ["treebank", "guideline", "mwes"], "overall_score": 5.653835394709148, "scores": [3.0579727594997768, 2.8926561598237006, 2.2061313666259057], "rank_score": 2.718920095316461} -{"id": "yang-etal-2016-hierarchical", "title": "Hierarchical Attention Networks for Document Classification", "abstract": "We propose a hierarchical attention network for document classification. Our model has two distinctive characteristics: (i) it has a hierarchical structure that mirrors the hierarchical structure of documents; (ii) it has two levels of attention mechanisms applied at the wordand sentence-level, enabling it to attend differentially to more and less important content when constructing the document representation. Experiments conducted on six large scale text classification tasks demonstrate that the proposed architecture outperform previous methods by a substantial margin. Visualization of the attention layers illustrates that the model selects qualitatively informative words and sentences.", "phrases": ["document classification", "hierarchical attention networks", "han", "sentence level", "deep learning model"], "overall_score": 13.554675776342666, "scores": [5.758738977045871, 3.2627003460326343, 2.115610658505629, 1.4619993395615323, 0.963188016165503], "rank_score": 2.7124474674622343} -{"id": "zhang-zong-2016-exploiting", "title": "Exploiting Source-side Monolingual Data in Neural Machine Translation", "abstract": "Neural Machine Translation (NMT) based on the encoder-decoder architecture has recently become a new paradigm. Researchers have proven that the target-side monolingual data can greatly enhance the decoder model of NMT. However, the source-side monolingual data is not fully explored although it should be useful to strengthen the encoder model of NMT, especially when the parallel corpus is far from suf\ufb01cient. In this paper, we propose two approaches to make full use of the source-side monolingual data in NMT. The \ufb01rst approach employs the self-learning algorithm to generate the synthetic large-scale parallel data for NMT training. The second approach applies the multi-task learning framework using two NMTs to predict the translation and the reordered source-side monolingual sentences simultaneously. The extensive experiments demonstrate that the proposed methods obtain signi\ufb01cant improvements over the strong attention-based NMT.", "phrases": ["source-side monolingual data", "neural machine translation", "multi-task learning framework"], "overall_score": 10.377957008161786, "scores": [4.502095501335105, 3.069916743264397, 0.5598203822522715], "rank_score": 2.7106108756172578} -{"id": "chiang-2005-hierarchical", "title": "A Hierarchical Phrase-Based Model for Statistical Machine Translation", "abstract": "We present a statistical phrase-based translation model that uses hierarchical phrases---phrases that contain subphrases. The model is formally a synchronous context-free grammar but is learned from a bitext without any syntactic information. Thus it can be seen as a shift to the formal machinery of syntax-based translation systems without any linguistic commitment. In our experiments using BLEU as a metric, the hierarchical phrase-based model achieves a relative improvement of 7.5% over Pharaoh, a state-of-the-art phrase-based system.", "phrases": ["phrase-based model", "context-free grammar", "hpb", "smt system", "syntax-based model"], "overall_score": 14.482045465993675, "scores": [5.70150538667778, 2.42051144182881, 1.895317002100065, 1.77200478577385, 1.7526047453331244], "rank_score": 2.7083886723427257} -{"id": "levy-andrew-2006-tregex", "title": "Tregex and Tsurgeon: tools for querying and manipulating tree data structures", "abstract": "With syntactically annotated corpora becoming increasingly available for a variety of languages and grammatical frameworks, tree query tools have proven invaluable to linguists and computer scientists for both data exploration and corpus-based research. We provide a combined engine for tree query (Tregex) and manipulation (Tsurgeon) that can operate on arbitrary tree data structures with no need for preprocessing. Tregex remedies several expressive and implementational limitations of existing query tools, while Tsurgeon is to our knowledge the most expressive tree manipulation utility available.", "phrases": ["tsurgeon", "tree data structure", "tregex"], "overall_score": 7.124981771193478, "scores": [3.4841056159033785, 2.69755254589905, 1.9178041248681819], "rank_score": 2.6998207622235366} -{"id": "cho-etal-2014-learning", "title": "Learning Phrase Representations using RNN Encoder\u2013Decoder for Statistical Machine Translation", "abstract": "In this paper, we propose a novel neural network model called RNN Encoder\u2010 Decoder that consists of two recurrent neural networks (RNN). One RNN encodes a sequence of symbols into a fixedlength vector representation, and the other decodes the representation into another sequence of symbols. The encoder and decoder of the proposed model are jointly trained to maximize the conditional probability of a target sequence given a source sequence. The performance of a statistical machine translation system is empirically found to improve by using the conditional probabilities of phrase pairs computed by the RNN Encoder\u2010Decoder as an additional feature in the existing log-linear model. Qualitatively, we show that the proposed model learns a semantically and syntactically meaningful representation of linguistic phrases.", "phrases": ["rnn", "statistical machine translation", "recurrent neural network", "encoder-decoder framework", "sequence-to-sequence"], "overall_score": 15.181421832436792, "scores": [2.649460407031867, 2.9349949365266688, 2.8543272974825684, 2.834199338168764, 2.043018466178905], "rank_score": 2.6632000890777547} -{"id": "banea-etal-2008-multilingual", "title": "Multilingual Subjectivity Analysis Using Machine Translation", "abstract": "Although research in other languages is increasing, much of the work in subjectivity analysis has been applied to English data, mainly due to the large body of electronic resources and tools that are available for this language. In this paper, we propose and evaluate methods that can be employed to transfer a repository of subjectivity resources across languages. Specifically, we attempt to leverage on the resources available for English and, by employing machine translation, generate resources for subjectivity analysis in other languages. Through comparative evaluations on two different languages (Romanian and Spanish), we show that automatic translation is a viable alternative for the construction of resources and tools for subjectivity analysis in a new target language.", "phrases": ["subjectivity analysis", "machine translation", "romanian", "sentiment analysis"], "overall_score": 9.452098824573058, "scores": [4.173362724866285, 3.9767615769960636, 1.4236427027744079, 1.0604647623800176], "rank_score": 2.658557941754194} -{"id": "riedel-etal-2013-relation", "title": "Relation Extraction with Matrix Factorization and Universal Schemas", "abstract": "\u00a9 2013 Association for Computational Linguistics. Traditional relation extraction predicts relations within some fixed and finite target schema. Machine learning approaches to this task require either manual annotation or, in the case of distant supervision, existing structured sources of the same schema. The need for existing datasets can be avoided by using a universal schema: the union of all involved schemas (surface form predicates as in OpenIE, and relations in the schemas of preexisting databases). This schema has an almost unlimited set of relations (due to surface forms), and supports integration with existing structured data (through the relation types of existing databases). To populate a database of such schema we present matrix factorization models that learn latent feature vectors for entity tuples and relations. We show that such latent models achieve substantially higher accuracy than a traditional classification approach. More importantly, by operating simultaneously on relations observed in text and in pre-existing structured DBs such as Freebase, we are able to reason about unstructured and structured data in mutually-supporting ways. By doing so our approach outperforms stateof- the-Art distant supervision.", "phrases": ["matrix factorization", "universal schemas", "openie", "relation extraction", "knowledge base"], "overall_score": 12.312089891160248, "scores": [5.475237721298065, 3.145706018671933, 1.4536620263805506, 2.1547972662619714, 1.0530239684354892], "rank_score": 2.656485400209602} -{"id": "zheng-etal-2013-deep", "title": "Deep Learning for Chinese Word Segmentation and POS Tagging", "abstract": "This study explores the feasibility of performing Chinese word segmentation (CWS) and POS tagging by deep learning. We try to avoid task-specific feature engineering, and use deep layers of neural networks to discover relevant features to the tasks. We leverage large-scale unlabeled data to improve internal representation of Chinese characters, and use these improved representations to enhance supervised word segmentation and POS tagging models. Our networks achieved close to state-of-theart performance with minimal computational cost. We also describe a perceptron-style algorithm for training the neural networks, as an alternative to maximum-likelihood method, to speed up the training process and make the learning algorithm easier to be implemented.", "phrases": ["chinese word segmentation", "pos tagging", "deep learning"], "overall_score": 9.574589819646024, "scores": [3.4437185795283742, 2.7994627610158953, 1.711518864175715], "rank_score": 2.6515667349066616} -{"id": "ide-etal-2008-masc", "title": "MASC: the Manually Annotated Sub-Corpus of American English", "abstract": "To answer the critical need for sharable, reusable annotated resources with rich linguistic annotations, we are developing a Manually Annotated Sub-Corpus (MASC) including texts from diverse genres and manual annotations or manually-validated annotations for multiple levels, including WordNet senses and FrameNet frames and frame elements, both of which have become significant resources in the international computational linguistics community. To derive maximal benefit from the semantic information provided by these resources, the MASC will also include manually-validated shallow parses and named entities, which will enable linking WordNet senses and FrameNet frames within the same sentences into more complex semantic structures and, because named entities will often be the role fillers of FrameNet frames, enrich the semantic and pragmatic information derivable from the sub-corpus. All MASC annotations will be published with detailed inter-annotator agreement measures. The MASC and its annotations will be freely downloadable from the ANC website, thus providing maximum accessibility for researchers from around the globe.", "phrases": ["manually annotated sub-corpus", "masc", "project"], "overall_score": 6.329937808547174, "scores": [4.113577327866712, 3.2828622169306523, 0.5229277607981798], "rank_score": 2.639789101865181} -{"id": "abu-farha-etal-2021-overview", "title": "Overview of the WANLP 2021 Shared Task on Sarcasm and Sentiment Detection in Arabic", "abstract": "This paper provides an overview of the WANLP 2021 shared task on sarcasm and sentiment detection in Arabic. The shared task has two subtasks: sarcasm detection (subtask 1) and sentiment analysis (subtask 2). This shared task aims to promote and bring attention to Arabic sarcasm detection, which is crucial to improve the performance in other tasks such as sentiment analysis. The dataset used in this shared task, namely ArSarcasm-v2, consists of 15,548 tweets labelled for sarcasm, sentiment and dialect. We received 27 and 22 submissions for subtasks 1 and 2 respectively. Most of the approaches relied on using and fine-tuning pre-trained language models such as AraBERT and MARBERT. The top achieved results for the sarcasm detection and sentiment analysis tasks were 0.6225 F1-score and 0.748 F1-PN respectively.", "phrases": ["sarcasm", "sentiment detection", "arabic"], "overall_score": 6.7672796695916135, "scores": [2.7749535933555585, 2.6564489333964114, 2.4837007284900907], "rank_score": 2.6383677517473534} -{"id": "nenkova-passonneau-2004-evaluating", "title": "Evaluating Content Selection in Summarization: The Pyramid Method", "abstract": "We present an empirically grounded method for evaluating content selection in summarization. It incorporates the idea that no single best model summary for a collection of documents exists. Our method quanti\ufb01es the relative importance of facts to be conveyed. We argue that it is reliable, predictive and diagnostic, thus improves considerably over the shortcomings of the human evaluation method currently used in the Document Understanding Conference.", "phrases": ["content selection", "summarization", "pyramid method", "evaluation method"], "overall_score": 10.25501654076186, "scores": [3.4519644319784417, 3.110305730514161, 2.7228503191123794, 1.2549515209388094], "rank_score": 2.6350180006359474} -{"id": "chiang-2007-hierarchical", "title": "Hierarchical Phrase-Based Translation", "abstract": "We present a statistical machine translation model that uses hierarchical phrasesphrases that contain subphrases. The model is formally a synchronous context-free grammar but is learned from a parallel text without any syntactic annotations. Thus it can be seen as combining fundamental ideas from both syntax-based translation and phrase-based translation. We describe our system's training and decoding methods in detail, and evaluate it for translation speed and translation accuracy. Using BLEU as a metric of translation accuracy, we find that our system performs significantly better than the Alignment Template System, a state-of-the-art phrase-based system.", "phrases": ["phrase-based translation", "context-free grammar", "cube pruning", "scfg", "smt system"], "overall_score": 13.450510808303255, "scores": [4.8417402796126, 2.2903532415735937, 2.2485026561551003, 2.0423223506327854, 1.7174947875627569], "rank_score": 2.6280826631073673} -{"id": "clark-etal-2019-bert", "title": "What Does BERT Look at? An Analysis of BERT's Attention", "abstract": "Large pre-trained neural networks such as BERT have had great recent success in NLP, motivating a growing body of research investigating what aspects of language they are able to learn from unlabeled data. Most recent analysis has focused on model outputs (e.g., language model surprisal) or internal vector representations (e.g., probing classifiers). Complementary to these works, we propose methods for analyzing the attention mechanisms of pre-trained models and apply them to BERT. BERT's attention heads exhibit patterns such as attending to delimiter tokens, specific positional offsets, or broadly attending over the whole sentence, with heads in the same layer often exhibiting similar behaviors. We further show that certain attention heads correspond well to linguistic notions of syntax and coreference. For example, we find heads that attend to the direct objects of verbs, determiners of nouns, objects of prepositions, and coreferent mentions with remarkably high accuracy. Lastly, we propose an attention-based probing classifier and use it to further demonstrate that substantial syntactic information is captured in BERT's attention.", "phrases": ["bert", "attention head", "behavior", "coreference", "direct object"], "overall_score": 12.528085231007788, "scores": [6.1447901041805215, 2.9478128505815824, 1.621577264735649, 1.3608599474343308, 1.0552400938501727], "rank_score": 2.6260560521564513} -{"id": "rajpurkar-etal-2018-know", "title": "Know What You Don't Know: Unanswerable Questions for SQuAD", "abstract": "Extractive reading comprehension systems can often locate the correct answer to a question in a context document, but they also tend to make unreliable guesses on questions for which the correct answer is not stated in the context. Existing datasets either focus exclusively on answerable questions, or use automatically generated unanswerable questions that are easy to identify. To address these weaknesses, we present SQuADRUn, a new dataset that combines the existing Stanford Question Answering Dataset (SQuAD) with over 50,000 unanswerable questions written adversarially by crowdworkers to look similar to answerable ones. To do well on SQuADRUn, systems must not only answer questions when possible, but also determine when no answer is supported by the paragraph and abstain from answering. SQuADRUn is a challenging natural language understanding task for existing models: a strong neural system that gets 86% F1 on SQuAD achieves only 66% F1 on SQuADRUn. We release SQuADRUn to the community as the successor to SQuAD.", "phrases": ["unanswerable question", "squad", "paragraph", "crowdsourcing", "human performance"], "overall_score": 11.116701755790517, "scores": [5.231560009874633, 4.922353240340845, 1.4976368131784374, 0.880996412684935, 0.5505583909847627], "rank_score": 2.616620973412723} -{"id": "nakov-etal-2013-semeval", "title": "SemEval-2013 Task 2: Sentiment Analysis in Twitter", "abstract": "In recent years, sentiment analysis in social media has attracted a lot of research interest and has been used for a number of applications. Unfortunately, research has been hindered by the lack of suitable datasets, complicating the comparison between approaches. To address this issue, we have proposed SemEval-2013 Task 2: Sentiment Analysis in Twitter, which included two subtasks: A, an expression-level subtask, and B, a messagelevel subtask. We used crowdsourcing on Amazon Mechanical Turk to label a large Twitter training dataset along with additional test sets of Twitter and SMS messages for both subtasks. All datasets used in the evaluation are released to the research community. The task attracted significant interest and a total of 149 submissions from 44 teams. The bestperforming team achieved an F1 of 88.9% and 69% for subtasks A and B, respectively.", "phrases": ["sentiment analysis", "twitter", "semeval-2013 task", "semantic evaluation", "arabic tweet"], "overall_score": 12.815413224838236, "scores": [5.752698485531945, 4.978245476912792, 0.9254994607572631, 0.8428867718723037, 0.583389685807344], "rank_score": 2.6165439761763296} -{"id": "artetxe-etal-2016-learning", "title": "Learning principled bilingual mappings of word embeddings while preserving monolingual invariance", "abstract": "Mapping word embeddings of different languages into a single space has multiple applications. In order to map from a source space into a target space, a common approach is to learn a linear mapping that minimizes the distances between equivalences listed in a bilingual dictionary. In this paper, we propose a framework that generalizes previous work, provides an ef\ufb01cient exact method to learn the optimal linear transformation and yields the best bilingual results in translation induction while preserving monolingual performance in an analogy task.", "phrases": ["principled bilingual mapping", "mapping", "word embedding"], "overall_score": 9.755106676549346, "scores": [3.8567511158698875, 2.9672904559592, 0.9095349295540808], "rank_score": 2.5778588337943895} -{"id": "chklovski-pantel-2004-verbocean", "title": "VerbOcean: Mining the Web for Fine-Grained Semantic Verb Relations", "abstract": "Broad-coverage repositories of semantic relations between verbs could benefit many NLP tasks. We present a semi-automatic method for extracting fine-grained semantic relations between verbs. We detect similarity, strength, antonymy, enablement, and temporal happens-before relations between pairs of strongly associated verbs using lexicosyntactic patterns over the Web. On a set of 29,165 strongly associated verb pairs, our extraction algorithm yielded 65.5% accuracy. Analysis of error types shows that on the relation strength we achieved 75% accuracy. We provide the resource, called VERBOCEAN, for download at http://semantics.isi.edu/ocean/.", "phrases": ["web", "fine-grained semantic relation", "antonymy", "verbocean", "previous approach"], "overall_score": 10.079539545878765, "scores": [5.221454665611411, 3.5628976473409035, 1.6793678187144, 1.5833380660392995, 0.8357131099944403], "rank_score": 2.576554261540091} -{"id": "nivre-etal-2020-universal", "title": "Universal Dependencies v2: An Evergrowing Multilingual Treebank Collection", "abstract": "Universal Dependencies is an open community effort to create cross-linguistically consistent treebank annotation for many languages within a dependency-based lexicalist framework. The annotation consists in a linguistically motivated word segmentation; a morphological layer comprising lemmas, universal part-of-speech tags, and standardized morphological features; and a syntactic layer focusing on syntactic relations between predicates, arguments and modifiers. In this paper, we describe version 2 of the universal guidelines (UD v2), discuss the major changes from UD v1 to UD v2, and give an overview of the currently available treebanks for 90 languages.", "phrases": ["treebank", "guideline", "universal dependencies", "project", "pos tag"], "overall_score": 12.72744662645901, "scores": [4.005993217844579, 3.207681696579725, 2.5040194646473197, 1.641244634124719, 1.5188104656283685], "rank_score": 2.5755498957649423} -{"id": "zhao-etal-2018-gender", "title": "Gender Bias in Coreference Resolution: Evaluation and Debiasing Methods", "abstract": "In this paper, we introduce a new benchmark for co-reference resolution focused on gender bias, WinoBias. Our corpus contains Winograd-schema style sentences with entities corresponding to people referred by their occupation (e.g. the nurse, the doctor, the carpenter). We demonstrate that a rule-based, a feature-rich, and a neural coreference system all link gendered pronouns to pro-stereotypical entities with higher accuracy than anti-stereotypical entities, by an average difference of 21.1 in F1 score. Finally, we demonstrate a data-augmentation approach that, in combination with existing word-embedding debiasing techniques, removes the bias demonstrated by these systems in WinoBias without significantly affecting their performance on existing datasets.", "phrases": ["coreference resolution", "winobias", "pronoun", "gender bias", "stereotype"], "overall_score": 10.774026174013034, "scores": [5.127973959165532, 2.5825879662366638, 1.8862411479915346, 1.686340097427981, 1.5747494688017656], "rank_score": 2.5715785279246957} -{"id": "jiang-zhai-2007-instance", "title": "Instance Weighting for Domain Adaptation in NLP", "abstract": "Domain adaptation is an important problem in natural language processing (NLP) due to the lack of labeled data in novel domains. In this paper, we study the domain adaptation problem from the instance weighting perspective. We formally analyze and characterize the domain adaptation problem from a distributional view, and show that there are two distinct needs for adaptation, corresponding to the different distributions of instances and classification functions in the source and the target domains. We then propose a general instance weighting framework for domain adaptation. Our empirical results on three NLP tasks show that incorporating and exploiting more information from the target domain through instance weighting is effective.", "phrases": ["weight", "domain adaptation", "training instance"], "overall_score": 10.147102551121925, "scores": [5.535209922734226, 1.5832681656558596, 0.585756279692577], "rank_score": 2.568078122694221} -{"id": "koo-etal-2010-dual", "title": "Dual Decomposition for Parsing with Non-Projective Head Automata", "abstract": "This paper introduces algorithms for non-projective parsing based on dual decomposition. We focus on parsing algorithms for non-projective head automata, a generalization of head-automata models to non-projective structures. The dual decomposition algorithms are simple and efficient, relying on standard dynamic programming and minimum spanning tree algorithms. They provably solve an LP relaxation of the non-projective parsing problem. Empirically the LP relaxation is very often tight: for many languages, exact solutions are achieved on over 98% of test sentences. The accuracy of our models is higher than previous work on a broad range of datasets.", "phrases": ["non-projective head automata", "dual decomposition", "dependency parser"], "overall_score": 8.80752389030406, "scores": [4.283563419190219, 2.540929745558194, 0.869936108721662], "rank_score": 2.5648097578233586} -{"id": "strubell-etal-2018-linguistically", "title": "Linguistically-Informed Self-Attention for Semantic Role Labeling", "abstract": "Current state-of-the-art semantic role labeling (SRL) uses a deep neural network with no explicit linguistic features. However, prior work has shown that gold syntax trees can dramatically improve SRL decoding, suggesting the possibility of increased accuracy from explicit modeling of syntax. In this work, we present linguistically-informed self-attention (LISA): a neural network model that combines multi-head self-attention with multi-task learning across dependency parsing, part-of-speech tagging, predicate detection and SRL. Unlike previous models which require significant pre-processing to prepare linguistic features, LISA can incorporate syntax using merely raw tokens as input, encoding the sequence only once to simultaneously perform parsing, predicate detection and role labeling for all predicates. Syntax is incorporated by training one attention head to attend to syntactic parents for each token. Moreover, if a high-quality syntactic parse is already available, it can be beneficially injected at test time without re-training our SRL model. In experiments on CoNLL-2005 SRL, LISA achieves new state-of-the-art performance for a model using predicted predicates and standard word embeddings, attaining 2.5 F1 absolute higher than the previous state-of-the-art on newswire and more than 3.5 F1 on out-of-domain data, nearly 10% reduction in error. On ConLL-2012 English SRL we also show an improvement of more than 2.5 F1. LISA also out-performs the state-of-the-art with contextually-encoded (ELMo) word representations, by nearly 1.0 F1 on news and more than 2.0 F1 on out-of-domain text.", "phrases": ["self-attention", "semantic role labeling", "head", "auxiliary task"], "overall_score": 10.848934879593362, "scores": [4.602234934176391, 4.24271726873389, 0.870630353075699, 0.5335076525989285], "rank_score": 2.562272552146227} -{"id": "wilson-etal-2005-recognizing", "title": "Recognizing Contextual Polarity in Phrase-Level Sentiment Analysis", "abstract": "This paper presents a new approach to phrase-level sentiment analysis that first determines whether an expression is neutral or polar and then disambiguates the polarity of the polar expressions. With this approach, the system is able to automatically identify the contextual polarity for a large subset of sentiment expressions, achieving results that are significantly better than baseline.", "phrases": ["contextual polarity", "sentiment analysis", "phrase level", "news article", "negator"], "overall_score": 12.383397052309713, "scores": [5.586435042413023, 4.5225655923557015, 1.3015589074611194, 0.8497890092220135, 0.5422519695292639], "rank_score": 2.5605201041962244} -{"id": "wei-zou-2019-eda", "title": "EDA: Easy Data Augmentation Techniques for Boosting Performance on Text Classification Tasks", "abstract": "We present EDA: easy data augmentation techniques for boosting performance on text classification tasks. EDA consists of four simple but powerful operations: synonym replacement, random insertion, random swap, and random deletion. On five text classification tasks, we show that EDA improves performance for both convolutional and recurrent neural networks. EDA demonstrates particularly strong results for smaller datasets; on average, across five datasets, training with EDA while using only 50% of the available training set achieved the same accuracy as normal training with all available data. We also performed extensive ablation studies and suggest parameters for practical use.", "phrases": ["data augmentation", "text classification task", "synonyms", "random insertion", "eda"], "overall_score": 11.425598928976854, "scores": [5.081111686338279, 2.5610265297512633, 2.439939838865386, 1.6247108341205354, 1.0525805780933866], "rank_score": 2.5518738934337697} -{"id": "eskander-rambow-2015-slsa", "title": "SLSA: A Sentiment Lexicon for Standard Arabic", "abstract": "Sentiment analysis has been a major area of interest, for which the existence of highquality resources is crucial. In Arabic, there is a reasonable number of sentiment lexicons but with major deficiencies. The paper presents a large-scale Standard Arabic Sentiment Lexicon (SLSA) that is publicly available for free and avoids the deficiencies in the current resources. SLSA has the highest up-to-date reported coverage. The construction of SLSA is based on linking the lexicon of AraMorph with SentiWordNet along with a few heuristics and powerful back-off. SLSA shows a relative improvement of 37.8% over a state-of-theart lexicon when tested for accuracy. It also outperforms it by an absolute 3.5% of F1-score when tested for sentiment analysis.", "phrases": ["sentiment lexicon", "standard arabic", "slsa"], "overall_score": 5.301957210373152, "scores": [3.4513729123078267, 2.7110066418834142, 1.486727820219587], "rank_score": 2.549702458136943} -{"id": "mihalcea-tarau-2004-textrank", "title": "TextRank: Bringing Order into Text", "abstract": "In this paper, the authors introduce TextRank, a graph-based ranking model for text processing, and show how this model can be successfully used in natural language applications.", "phrases": ["textrank", "node", "graph-based method", "unsupervised approach", "keyphrase extraction method"], "overall_score": 12.220799673347104, "scores": [7.2465001280792, 1.8317597846746614, 1.6304732482654867, 1.1064248237485657, 0.926015297463564], "rank_score": 2.5482346564462954} -{"id": "edunov-etal-2018-understanding", "title": "Understanding Back-Translation at Scale", "abstract": "An effective method to improve neural machine translation with monolingual data is to augment the parallel training corpus with back-translations of target language sentences. This work broadens the understanding of back-translation and investigates a number of methods to generate synthetic source sentences. We find that in all but resource poor settings back-translations obtained via sampling or noised beam outputs are most effective. Our analysis shows that sampling or noisy synthetic data gives a much stronger training signal than data generated by beam or greedy search. We also compare how synthetic data compares to genuine bitext and study various domain effects. Finally, we scale to hundreds of millions of monolingual sentences and achieve a new state of the art of 35 BLEU on the WMT'14 English-German test set.", "phrases": ["back-translation", "scale", "neural machine translation", "monolingual data", "beam search"], "overall_score": 11.110940169620923, "scores": [6.535634200270586, 1.3723100054301927, 1.7879992895930832, 1.6230468731831829, 1.3953622737539473], "rank_score": 2.5428705284461985} -{"id": "garrette-baldridge-2013-learning", "title": "Learning a Part-of-Speech Tagger from Two Hours of Annotation", "abstract": "Most work on weakly-supervised learning for part-of-speech taggers has been based on unrealistic assumptions about the amount and quality of training data. For this paper, we attempt to create true low-resource scenarios by allowing a linguist just two hours to annotate data and evaluating on the languages Kinyarwanda and Malagasy. Given these severely limited amounts of either type supervision (tag dictionaries) or token supervision (labeled sentences), we are able to dramatically improve the learning of a hidden Markov model through our method of automatically generalizing the annotations, reducing noise, and inducing word-tag frequency information.", "phrases": ["part-of-speech tagger", "hour", "annotated corpora"], "overall_score": 8.640469369303505, "scores": [3.951106463905102, 3.0905616169692602, 0.5795914930827335], "rank_score": 2.540419857985699} -{"id": "blitzer-etal-2006-domain", "title": "Domain Adaptation with Structural Correspondence Learning", "abstract": "Discriminative learning methods are widely used in natural language processing. These methods work best when their training and test data are drawn from the same distribution. For many NLP tasks, however, we are confronted with new domains in which labeled data is scarce or non-existent. In such cases, we seek to adapt existing models from a resource-rich source domain to a resource-poor target domain. We introduce structural correspondence learning to automatically induce correspondences among features from different domains. We test our technique on part of speech tagging and show performance gains for varying amounts of source and target training data, as well as improvements in target domain parsing accuracy using our improved tagger.", "phrases": ["tagger", "domain adaptation", "scl", "unlabeled data", "pivot feature"], "overall_score": 12.202656612859961, "scores": [5.085186964986051, 2.474492985764058, 2.045726197041956, 2.0316824932458886, 1.0418277425755262], "rank_score": 2.5357832767226958} -{"id": "clarke-etal-2010-driving", "title": "Driving Semantic Parsing from the World's Response", "abstract": "Current approaches to semantic parsing, the task of converting text to a formal meaning representation, rely on annotated training data mapping sentences to logical forms. Providing this supervision is a major bottleneck in scaling semantic parsers. This paper presents a new learning paradigm aimed at alleviating the supervision burden. We develop two novel learning algorithms capable of predicting complex structures which only rely on a binary feedback signal based on the context of an external world. In addition we reformulate the semantic parsing problem to reduce the dependency of the model on syntactic patterns, thus allowing our parser to scale better using less supervision. Our results surprisingly show that without using any annotated meaning representations learning with a weak feedback signal is capable of producing a parser that is competitive with fully supervised parsers.", "phrases": ["semantic parsing", "world", "full logical form"], "overall_score": 10.053164324769238, "scores": [4.245971972681922, 2.4914741262290456, 0.8588449625502442], "rank_score": 2.5320970204870705} -{"id": "resnik-smith-2003-web", "title": "The Web as a Parallel Corpus", "abstract": "Parallel corpora have become an essential resource for work in multilingual natural language processing. In this article, we report on our work using the STRAND system for mining parallel text on the World Wide Web, first reviewing the original algorithm and results and then presenting a set of significant enhancements. These enhancements include the use of supervised learning based on structural features of documents to improve classification performance, a new content-based measure of translational equivalence, and adaptation of the system to take advantage of the Internet Archive for mining parallel text from the Web on a large scale. Finally, the value of these techniques is demonstrated in the construction of a significant parallel corpus for a low-density language pair.", "phrases": ["web", "parallel corpus", "low-density language pair", "document structure", "large number"], "overall_score": 11.151546240899872, "scores": [6.268582762649276, 1.9656805304090696, 1.5087659344086413, 1.4748655170331886, 1.4002863924524613], "rank_score": 2.5236362273905275} -{"id": "berant-etal-2013-semantic", "title": "Semantic Parsing on Freebase from Question-Answer Pairs", "abstract": "In this paper, we train a semantic parser that scales up to Freebase. Instead of relying on annotated logical forms, which is especially expensive to obtain at large scale, we learn from question-answer pairs. The main challenge in this setting is narrowing down the huge number of possible logical predicates for a given question. We tackle this problem in two ways: First, we build a coarse mapping from phrases to predicates using a knowledge base and a large text corpus. Second, we use a bridging operation to generate additional predicates based on neighboring predicates. On the dataset of Cai and Yates (2013), despite not having annotated logical forms, our system outperforms their state-of-the-art parser. Additionally, we collected a more realistic and challenging dataset of question-answer pairs and improves over a natural baseline.", "phrases": ["freebase", "question-answer pair", "knowledge base", "semantic parsing", "webquestions dataset"], "overall_score": 12.132927447876396, "scores": [4.292014671286805, 3.8698024033109015, 2.168714807754383, 1.736086621544463, 0.5398472420915], "rank_score": 2.5212931491976107} -{"id": "dos-santos-gatti-2014-deep", "title": "Deep Convolutional Neural Networks for Sentiment Analysis of Short Texts", "abstract": "Sentiment analysis of short texts such as single sentences and Twitter messages is challenging because of the limited contextual information that they normally contain. Effectively solving this task requires strategies that combine the small text content with prior knowledge and use more than just bag-of-words. In this work we propose a new deep convolutional neural network that exploits from characterto sentence-level information to perform sentiment analysis of short texts. We apply our approach for two corpora of two different domains: the Stanford Sentiment Treebank (SSTb), which contains sentences from movie reviews; and the Stanford Twitter Sentiment corpus (STS), which contains Twitter messages. For the SSTb corpus, our approach achieves state-of-the-art results for single sentence sentiment prediction in both binary positive/negative classification, with 85.7% accuracy, and fine-grained classification, with 48.3% accuracy. For the STS corpus, our approach achieves a sentiment prediction accuracy of 86.4%.", "phrases": ["convolutional neural network", "sentiment analysis", "short text"], "overall_score": 8.71330954992917, "scores": [4.406635868505536, 1.8729730435990208, 1.2627801743433982], "rank_score": 2.5141296954826515} -{"id": "hong-etal-2011-using", "title": "Using Cross-Entity Inference to Improve Event Extraction", "abstract": "Event extraction is the task of detecting certain specified types of events that are mentioned in the source language data. The state-of-the-art research on the task is transductive inference (e.g. cross-event inference). In this paper, we propose a new method of event extraction by well using cross-entity inference. In contrast to previous inference methods, we regard entity-type consistency as key feature to predict event mentions. We adopt this inference method to improve the traditional sentence-level event extraction system. Experiments show that we can get 8.6% gain in trigger (event) identification, and more than 11.8% gain for argument (role) classification in ACE event extraction.", "phrases": ["cross-entity inference", "event extraction", "syntactic feature"], "overall_score": 8.989502212417012, "scores": [3.439441042283985, 3.251282288827519, 0.834981737122318], "rank_score": 2.508568356077941} -{"id": "chan-ng-2007-domain", "title": "Domain Adaptation with Active Learning for Word Sense Disambiguation", "abstract": "When a word sense disambiguation (WSD) system is trained on one domain but applied to a different domain, a drop in accuracy is frequently observed. This highlights the importance of domain adaptation for word sense disambiguation. In this paper, we first show that an active learning approach can be successfully used to perform domain adaptation of WSD systems. Then, by using the predominant sense predicted by expectation-maximization (EM) and adopting a count-merging technique, we improve the effectiveness of the original adaptation process achieved by the basic active learning approach.", "phrases": ["active learning", "word sense disambiguation", "domain adaptation"], "overall_score": 7.091487074950209, "scores": [3.5178809537111486, 2.0300739738085163, 1.960996443714769], "rank_score": 2.5029837904114784} -{"id": "turian-etal-2010-word", "title": "Word Representations: A Simple and General Method for Semi-Supervised Learning", "abstract": "If we take an existing supervised NLP system, a simple and general way to improve accuracy is to use unsupervised word representations as extra word features. We evaluate Brown clusters, Collobert and Weston (2008) embeddings, and HLBL (Mnih & Hinton, 2009) embeddings of words on both NER and chunking. We use near state-of-the-art supervised baselines, and find that each of the three word representations improves the accuracy of these baselines. We find further improvements by combining different word representations. You can download our word features, for off-the-shelf use in existing NLP systems, as well as our code, here: http://metaoptimize.com/projects/wordreprs/", "phrases": ["semi-supervised learning", "brown cluster", "word representation", "inter alia", "pos tagging"], "overall_score": 12.73008017695123, "scores": [6.541820056105674, 1.9677651814372088, 1.681926896141586, 1.1552075943002231, 1.149064002178406], "rank_score": 2.4991567460326194} -{"id": "baroni-zamparelli-2010-nouns", "title": "Nouns are Vectors, Adjectives are Matrices: Representing Adjective-Noun Constructions in Semantic Space", "abstract": "We propose an approach to adjective-noun composition (AN) for corpus-based distributional semantics that, building on insights from theoretical linguistics, represents nouns as vectors and adjectives as data-induced (linear) functions (encoded as matrices) over nominal vectors. Our model significantly outperforms the rivals on the task of reconstructing AN vectors not seen in training. A small post-hoc analysis further suggests that, when the model-generated AN vector is not similar to the corpus-observed AN vector, this is due to anomalies in the latter. We show moreover that our approach provides two novel ways to represent adjective meanings, alternative to its representation via corpus-based co-occurrence vectors, both outperforming the latter in an adjective clustering task.", "phrases": ["semantic space", "noun", "matrix", "compositionality"], "overall_score": 10.822624317063942, "scores": [5.297125889830169, 0.8377284998983523, 2.3602212510429488, 1.5010367648456293], "rank_score": 2.499028101404275} -{"id": "barzilay-lee-2003-learning", "title": "Learning to Paraphrase: An Unsupervised Approach Using Multiple-Sequence Alignment", "abstract": "We address the text-to-text generation problem of sentence-level paraphrasing --- a phenomenon distinct from and more difficult than word- or phrase-level paraphrasing. Our approach applies multiple-sequence alignment to sentences gathered from unannotated comparable corpora: it learns a set of paraphrasing patterns represented by word lattice pairs and automatically determines how to apply these patterns to rewrite new sentences. The results of our evaluation experiments show that the system derives accurate paraphrases, outperforming baseline systems.", "phrases": ["paraphrase", "multiple-sequence alignment", "same event", "news article", "barzilay"], "overall_score": 10.229744441224705, "scores": [5.150425101912548, 2.7470661145763926, 1.5850636238637514, 1.5081862136873154, 1.5017889294071765], "rank_score": 2.498505996689437} -{"id": "koehn-2005-europarl", "title": "Europarl: A Parallel Corpus for Statistical Machine Translation", "abstract": "We collected a corpus of parallel text in 11 languages from the proceedings of the European Parliament, which are published on the web. This corpus has found widespread use in the NLP community. Here, we focus on its acquisition and its application as training data for statistical machine translation (SMT). We trained SMT systems for 110 language pairs, which reveal interesting clues into the challenges ahead.", "phrases": ["statistical machine translation", "europarl", "sentence pair", "bleu score", "test set"], "overall_score": 12.548792906547439, "scores": [5.420524851102527, 3.333154720896209, 1.33657680183664, 1.2525048269249377, 1.1139699788563582], "rank_score": 2.4913462359233343} -{"id": "callison-burch-etal-2006-improved", "title": "Improved Statistical Machine Translation Using Paraphrases", "abstract": "Parallel corpora are crucial for training SMT systems. However, for many language pairs they are available only in very limited quantities. For these language pairs a huge portion of phrases encountered at run-time will be unknown. We show how techniques from paraphrasing can be used to deal with these otherwise unknown source language phrases. Our results show that augmenting a state-of-the-art SMT system with paraphrases leads to significantly improved coverage and translation quality. For a training corpus with 10,000 sentence pairs we increase the coverage of unique test set unigrams from 48% to 90%, with more than half of the newly covered items accurately translated, as opposed to none in current approaches.", "phrases": ["machine translation", "paraphrase", "pivot language"], "overall_score": 8.297811839279058, "scores": [4.235545798053078, 2.677070007785754, 0.557944732034128], "rank_score": 2.4901868459576533} -{"id": "howard-ruder-2018-universal", "title": "Universal Language Model Fine-tuning for Text Classification", "abstract": "Inductive transfer learning has greatly impacted computer vision, but existing approaches in NLP still require task-specific modifications and training from scratch. We propose Universal Language Model Fine-tuning (ULMFiT), an effective transfer learning method that can be applied to any task in NLP, and introduce techniques that are key for fine-tuning a language model. Our method significantly outperforms the state-of-the-art on six text classification tasks, reducing the error by 18-24% on the majority of datasets. Furthermore, with only 100 labeled examples, it matches the performance of training from scratch on 100 times more data. We open-source our pretrained models and code.", "phrases": ["text classification", "transfer learning", "ulmfit", "downstream task", "pre-training"], "overall_score": 12.862087709719791, "scores": [3.668137069340758, 2.6919716108345386, 2.384990018298548, 1.9507811911074269, 1.728497663465429], "rank_score": 2.48487551060934} -{"id": "och-2003-minimum", "title": "Minimum Error Rate Training in Statistical Machine Translation", "abstract": "Often, the training procedure for statistical machine translation models is based on maximum likelihood or related criteria. A general problem of this approach is that there is only a loose relation to the final translation quality on unseen text. In this paper, we analyze various training criteria which directly optimize translation quality. These training criteria make use of recently proposed automatic evaluation metrics. We describe a new algorithm for efficient training an unsmoothed error count. We show that significantly better results can often be obtained if the final evaluation criterion is taken directly into account as part of the training procedure.", "phrases": ["error rate training", "statistical machine translation", "evaluation metric", "mert", "weight"], "overall_score": 12.622102186179468, "scores": [3.0979752606319266, 3.1766054731077578, 2.381422119911731, 2.139867797843051, 1.6240247942973804], "rank_score": 2.4839790891583693} -{"id": "surdeanu-etal-2012-multi", "title": "Multi-instance Multi-label Learning for Relation Extraction", "abstract": "Distant supervision for relation extraction (RE) -- gathering training data by aligning a database of facts with text -- is an efficient approach to scale RE to thousands of different relations. However, this introduces a challenging learning scenario where the relation expressed by a pair of entities found in a sentence is unknown. For example, a sentence containing Balzac and France may express BornIn or Died, an unknown relation, or no relation at all. Because of this, traditional supervised learning, which assumes that each example is explicitly mapped to a label, is not appropriate. We propose a novel approach to multi-instance multi-label learning for RE, which jointly models all the instances of a pair of entities in text and all their labels using a graphical model with latent variables. Our model performs competitively on two difficult domains.", "phrases": ["relation extraction", "distant supervision", "multi-instance multi-label learning", "knowledge base", "learning problem"], "overall_score": 11.00165297946134, "scores": [4.692765729908604, 3.5780047166225235, 2.3135321212681355, 1.2611426496691465, 0.5364100580147924], "rank_score": 2.4763710550966405} -{"id": "jiao-etal-2020-tinybert", "title": "TinyBERT: Distilling BERT for Natural Language Understanding", "abstract": "Language model pre-training, such as BERT, has significantly improved the performances of many natural language processing tasks. However, pre-trained language models are usually computationally expensive, so it is difficult to efficiently execute them on resource-restricted devices. To accelerate inference and reduce model size while maintaining accuracy, we first propose a novel Transformer distillation method that is specially designed for knowledge distillation (KD) of the Transformer-based models. By leveraging this new KD method, the plenty of knowledge encoded in a large \u201cteacher\u201d BERT can be effectively transferred to a small \u201cstudent\u201d TinyBERT. Then, we introduce a new two-stage learning framework for TinyBERT, which performs Transformer distillation at both the pre-training and task-specific learning stages. This framework ensures that TinyBERT can capture the general-domain as well as the task-specific knowledge in BERT. TinyBERT4 with 4 layers is empirically effective and achieves more than 96.8% the performance of its teacher BERT-Base on GLUE benchmark, while being 7.5x smaller and 9.4x faster on inference. TinyBERT4 is also significantly better than 4-layer state-of-the-art baselines on BERT distillation, with only ~28% parameters and ~31% inference time of them. Moreover, TinyBERT6 with 6 layers performs on-par with its teacher BERT-Base.", "phrases": ["natural language understanding", "knowledge distillation", "teacher", "tinybert", "data augmentation"], "overall_score": 10.819861510500054, "scores": [5.8761139645935785, 0.9892796245584803, 2.1243938848324477, 1.8670217867322845, 1.4540187061920267], "rank_score": 2.4621655933817634} -{"id": "francopoulo-etal-2006-lexical", "title": "Lexical Markup Framework (LMF)", "abstract": "Optimizing the production, maintenance and extension of lexical resources is one the crucial aspects impacting Natural Language Processing (NLP). A second aspect involves optimizing the process leading to their integration in applications. With this respect, we believe that the production of a consensual specification on lexicons can be a useful aid for the various NLP actors. Within ISO, the purpose of LMF is to define a standard for lexicons. LMF is a model that provides a common standardized framework for the construction of NLP lexicons. The goals of LMF are to provide a common model for the creation and use of lexical resources, to manage the exchange of data between and among these resources, and to enable the merging of large number of individual electronic resources to form extensive global electronic resources. In this paper, we describe the work in progress within the sub-group ISO-TC37/SC4/WG4. Various experts from a lot of countries have been consulted in order to take into account best practices in a lot of languages for (we hope) all kinds of NLP lexicons.", "phrases": ["lmf", "standardized framework", "lexical markup framework"], "overall_score": 6.971664363591329, "scores": [3.8645383590258238, 2.961764421857156, 0.5557721110948669], "rank_score": 2.4606916306592823} -{"id": "denis-baldridge-2007-joint", "title": "Joint Determination of Anaphoricity and Coreference Resolution using Integer Programming", "abstract": "Standard pairwise coreference resolution systems are subject to errors resulting from their performing anaphora identification as an implicit part of coreference resolution. In this paper, we propose an integer linear programming (ILP) formulation for coreference resolution which models anaphoricity and coreference as a joint task, such that each local model informs the other for the final assignments. This joint ILP formulation provides f score improvements of 3.7-5.3% over a base coreference classifier on the ACE datasets.", "phrases": ["anaphoricity", "coreference resolution", "integer programming"], "overall_score": 8.166537255949136, "scores": [3.3189021768663123, 3.212817165470366, 0.8206540468222119], "rank_score": 2.4507911297196303} -{"id": "hendrickx-etal-2009-semeval", "title": "SemEval-2010 Task 8: Multi-Way Classification of Semantic Relations Between Pairs of Nominals", "abstract": "We present a brief overview of the main challenges in the extraction of semantic relations from English text, and discuss the shortcomings of previous data sets and shared tasks. This leads us to introduce a new task, which will be part of SemEval-2010: multi-way classification of mutually exclusive semantic relations between pairs of common nominals. The task is designed to compare different approaches to the problem and to provide a standard testbed for future research, which can benefit many applications in Natural Language Processing.", "phrases": ["multi-way classification", "semantic relations", "nominal"], "overall_score": 8.409688564461527, "scores": [3.1433240288697766, 2.4705489207694957, 1.7329994150663346], "rank_score": 2.448957454901869} -{"id": "tan-bansal-2019-lxmert", "title": "LXMERT: Learning Cross-Modality Encoder Representations from Transformers", "abstract": "Vision-and-language reasoning requires an understanding of visual concepts, language semantics, and, most importantly, the alignment and relationships between these two modalities. We thus propose the LXMERT (Learning Cross-Modality Encoder Representations from Transformers) framework to learn these vision-and-language connections. In LXMERT, we build a large-scale Transformer model that consists of three encoders: an object relationship encoder, a language encoder, and a cross-modality encoder. Next, to endow our model with the capability of connecting vision and language semantics, we pre-train the model with large amounts of image-and-sentence pairs, via five diverse representative pre-training tasks: masked language modeling, masked object prediction (feature regression and label classification), cross-modality matching, and image question answering. These tasks help in learning both intra-modality and cross-modality relationships. After fine-tuning from our pre-trained parameters, our model achieves the state-of-the-art results on two visual question answering datasets (i.e., VQA and GQA). We also show the generalizability of our pre-trained cross-modality model by adapting it to a challenging visual-reasoning task, NLVR2, and improve the previous best result by 22% absolute (54% to 76%). Lastly, we demonstrate detailed ablation studies to prove that both our novel model components and pre-training strategies significantly contribute to our strong results. Code and pre-trained models publicly available at: ", "phrases": ["cross-modality encoder representations", "transformer", "vision", "visual question answering", "lxmert"], "overall_score": 10.540317484931292, "scores": [4.675980577710786, 3.4079133904941186, 0.9970645087438028, 1.8504534825898244, 1.275127129573251], "rank_score": 2.4413078178223566} -{"id": "kwiatkowksi-etal-2010-inducing", "title": "Inducing Probabilistic CCG Grammars from Logical Form with Higher-Order Unification", "abstract": "This paper addresses the problem of learning to map sentences to logical form, given training data consisting of natural language sentences paired with logical representations of their meaning. Previous approaches have been designed for particular natural languages or specific meaning representations; here we present a more general method. The approach induces a probabilistic CCG grammar that represents the meaning of individual words and defines how these meanings can be combined to analyze complete sentences. We use higher-order unification to define a hypothesis space containing all grammars consistent with the training data, and develop an online learning algorithm that efficiently searches this space while simultaneously estimating the parameters of a log-linear parsing model. Experiments demonstrate high accuracy on benchmark data sets in four languages with two different meaning representations.", "phrases": ["ccg", "logical form", "natural language sentence"], "overall_score": 9.218229088833674, "scores": [4.572071298048821, 1.5230971657101302, 1.2127864597015168], "rank_score": 2.4359849744868227} -{"id": "mayhew-etal-2020-simultaneous", "title": "Simultaneous Translation and Paraphrase for Language Education", "abstract": "We present the task of Simultaneous Translation and Paraphrasing for Language Education (STAPLE). Given a prompt in one language, the goal is to generate a diverse set of correct translations that language learners are likely to produce. This is motivated by the need to create and maintain large, high-quality sets of acceptable translations for exercises in a language-learning application, and synthesizes work spanning machine translation, MT evaluation, automatic paraphrasing, and language education technology. We developed a novel corpus with unique properties for five languages (Hungarian, Japanese, Korean, Portuguese, and Vietnamese), and report on the results of a shared task challenge which attracted 20 teams to solve the task. In our meta-analysis, we focus on three aspects of the resulting systems: external training corpus selection, model architecture and training decisions, and decoding and filtering strategies. We find that strong systems start with a large amount of generic training data, and then fine-tune with in-domain data, sampled according to our provided learner response frequencies.", "phrases": ["paraphrase", "language education", "simultaneous translation"], "overall_score": 5.040956698373338, "scores": [3.134857122167097, 2.088831236967792, 2.048874870944329], "rank_score": 2.4241877433597394} -{"id": "ghosal-etal-2019-dialoguegcn", "title": "DialogueGCN: A Graph Convolutional Neural Network for Emotion Recognition in Conversation", "abstract": "Emotion recognition in conversation (ERC) has received much attention, lately, from researchers due to its potential widespread applications in diverse areas, such as health-care, education, and human resources. In this paper, we present Dialogue Graph Convolutional Network (DialogueGCN), a graph neural network based approach to ERC. We leverage self and inter-speaker dependency of the interlocutors to model conversational context for emotion recognition. Through the graph network, DialogueGCN addresses context propagation issues present in the current RNN-based methods. We empirically show that this method alleviates such issues, while outperforming the current state of the art on a number of benchmark emotion classification datasets.", "phrases": ["emotion recognition", "conversation", "graph convolutional network"], "overall_score": 7.684280434082854, "scores": [3.2518603634714704, 3.056943847467169, 0.9449562767258037], "rank_score": 2.417920162554814} -{"id": "poria-etal-2019-meld", "title": "MELD: A Multimodal Multi-Party Dataset for Emotion Recognition in Conversations", "abstract": "Emotion recognition in conversations is a challenging task that has recently gained popularity due to its potential applications. Until now, however, a large-scale multimodal multi-party emotional conversational database containing more than two speakers per dialogue was missing. Thus, we propose the Multimodal EmotionLines Dataset (MELD), an extension and enhancement of EmotionLines. MELD contains about 13,000 utterances from 1,433 dialogues from the TV-series Friends. Each utterance is annotated with emotion and sentiment labels, and encompasses audio, visual and textual modalities. We propose several strong multimodal baselines and show the importance of contextual and multimodal information for emotion recognition in conversations. The full dataset is available for use at .", "phrases": ["emotion recognition", "conversation", "modality", "meld"], "overall_score": 7.868293016648682, "scores": [4.027276219993358, 3.3843698379975735, 1.7195394098708545, 0.5288021044239587], "rank_score": 2.4149968930714363} -{"id": "bojar-etal-2017-findings", "title": "Findings of the 2017 Conference on Machine Translation (WMT17)", "abstract": "This paper presents the results of the WMT17 shared tasks, which included \nthree machine translation (MT) tasks (news, biomedical, and multimodal), two evaluation tasks (metrics and run-time estimation of MT quality), an automatic post-editing task, a neural MT training task, and a bandit learning task.", "phrases": ["conference", "machine translation", "human evaluation", "state-of-the-art result", "bleu score"], "overall_score": 11.507435416402041, "scores": [6.604145961032283, 2.789560060032282, 1.4685873301944032, 0.5763813337851774, 0.558758592111381], "rank_score": 2.3994866554311054} -{"id": "bowman-etal-2015-large", "title": "A large annotated corpus for learning natural language inference", "abstract": "Understanding entailment and contradiction is fundamental to understanding natural language, and inference about entailment and contradiction is a valuable testing ground for the development of semantic representations. However, machine learning research in this area has been dramatically limited by the lack of large-scale resources. To address this, we introduce the Stanford Natural Language Inference corpus, a new, freely available collection of labeled sentence pairs, written by humans doing a novel grounded task based on image captioning. At 570K pairs, it is two orders of magnitude larger than all other resources of its type. This increase in scale allows lexicalized classifiers to outperform some sophisticated existing entailment models, and it allows a neural network-based model to perform competitively on natural language inference benchmarks for the first time.", "phrases": ["natural language inference", "entailment", "nli", "annotated dataset", "text pair"], "overall_score": 12.414298236247623, "scores": [4.9605202061666525, 2.9154692380688543, 2.248392747670364, 0.9795938444971124, 0.887850782403745], "rank_score": 2.3983653637613456} -{"id": "lin-2004-rouge", "title": "ROUGE: A Package for Automatic Evaluation of Summaries", "abstract": "ROUGE stands for Recall-Oriented Understudy for Gisting Evaluation. It includes measures to automatically determine the quality of a summary by comparing it to other (ideal) summaries created by humans. The measures count the number of overlapping units such as n-gram, word sequences, and word pairs between the computer-generated summary to be evaluated and the ideal summaries created by humans. This paper introduces four different ROUGE measures: ROUGE-N, ROUGE-L, ROUGE-W, and ROUGE-S included in the ROUGE summarization evaluation package and their evaluations. Three of them have been used in the Document Understanding Conference (DUC) 2004, a large-scale summarization evaluation sponsored by NIST.", "phrases": ["automatic evaluation", "summarization", "rouge"], "overall_score": 8.433659002519907, "scores": [4.812111521428049, 0.9205536464124743, 1.442147739388718], "rank_score": 2.3916043024097475} -{"id": "jean-etal-2015-using", "title": "On Using Very Large Target Vocabulary for Neural Machine Translation", "abstract": "Neural machine translation, a recently proposed approach to machine translation based purely on neural networks, has shown promising results compared to the existing approaches such as phrase-based statistical machine translation. Despite its recent success, neural machine translation has its limitation in handling a larger vocabulary, as training complexity as well as decoding complexity increase proportionally to the number of target words. In this paper, we propose a method based on importance sampling that allows us to use a very large target vocabulary without increasing training complexity. We show that decoding can be efficiently done even with the model having a very large target vocabulary by selecting only a small subset of the whole target vocabulary. The models trained by the proposed approach are empirically found to outperform the baseline models with a small vocabulary as well as the LSTM-based neural machine translation models. Furthermore, when we use the ensemble of a few models with very large target vocabularies, we achieve the state-of-the-art translation performance (measured by BLEU) on the English!German translation and almost as high performance as state-of-the-art English!French translation system.", "phrases": ["target vocabulary", "neural machine translation", "state-of-the-art translation performance"], "overall_score": 8.9104236539092, "scores": [3.8270178067333123, 2.745837443455841, 0.5789997000096551], "rank_score": 2.3839516500662694} -{"id": "popescu-etzioni-2005-extracting", "title": "Extracting Product Features and Opinions from Reviews", "abstract": "Consumers are often forced to wade through many on-line reviews in order to make an informed product choice. This paper introduces Opine, an unsupervised information-extraction system which mines reviews in order to build a model of important product features, their evaluation by reviewers, and their relative quality across products.Compared to previous work, Opine achieves 22% higher precision (with only 3% lower recall) on the feature extraction task. Opine's novel use of relaxation labeling for finding the semantic orientation of words in context leads to strong performance on the tasks of finding opinion phrases and their polarity.", "phrases": ["product feature", "review", "sentiment analysis", "syntactic pattern", "information extraction system"], "overall_score": 10.892259437733623, "scores": [4.294110552886099, 3.618178409237986, 1.8937737883676748, 1.223033699637285, 0.8757643038251057], "rank_score": 2.3809721507908304} -{"id": "agirre-etal-2013-sem", "title": "*SEM 2013 shared task: Semantic Textual Similarity", "abstract": "In Semantic Textual Similarity (STS), systems rate the degree of semantic equivalence, on a graded scale from 0 to 5, with 5 being the most similar. This year we set up two tasks: (i) a core task (CORE), and (ii) a typed-similarity task (TYPED). CORE is similar in set up to SemEval STS 2012 task with pairs of sentences from sources related to those of 2012, yet different in genre from the 2012 set, namely, this year we included newswire headlines, machine translation evaluation datasets and multiple lexical resource glossed sets. TYPED, on the other hand, is novel and tries to characterize why two items are deemed similar, using cultural heritage items which are described with metadata such as title, author or description. Several types of similarity have been defined, including similar author, similar time period or similar location. The annotation for both tasks leverages crowdsourcing, with relative high interannotator correlation, ranging from 62% to 87%. The CORE task attracted 34 participants with 89 runs, and the TYPED task attracted 6 teams with 14 runs.", "phrases": ["semantic textual similarity", "sts", "semeval", "long text segment"], "overall_score": 9.360934527024384, "scores": [5.56390607767788, 2.4170033693240534, 0.9632539569374814, 0.5790814206671914], "rank_score": 2.3808112061516518} -{"id": "koehn-etal-2003-statistical", "title": "Statistical Phrase-Based Translation", "abstract": "We propose a new phrase-based translation model and decoding algorithm that enables us to evaluate and compare several, previously proposed phrase-based translation models. Within our framework, we carry out a large number of experiments to understand better and explain why phrase-based models out-perform word-based models. Our empirical results, which hold for all examined language pairs, suggest that the highest levels of performance can be obtained through relatively simple means: heuristic learning of phrase translations from word-based alignments and lexical weighting of phrase translations. Surprisingly, learning phrases longer than three words and learning phrases from high-accuracy word-level alignment models does not have a strong impact on performance. Learning only syntactically motivated phrases degrades the performance of our systems.", "phrases": ["phrase-based translation", "heuristic", "smt system", "parallel corpora", "distortion model"], "overall_score": 14.104741731301468, "scores": [3.5166011362037364, 2.5564544492294097, 2.140834165666755, 1.8460692086318233, 1.812376980786736], "rank_score": 2.374467188103692} -{"id": "zeng-etal-2014-relation", "title": "Relation Classification via Convolutional Deep Neural Network", "abstract": "The state-of-the-art methods used for relation classification are primarily based on statistical machine learning, and their performance strongly depends on the quality of the extracted features. The extracted features are often derived from the output of pre-existing natural language processing (NLP) systems, which leads to the propagation of the errors in the existing tools and hinders the performance of these systems. In this paper, we exploit a convolutional deep neural network (DNN) to extract lexical and sentence level features. Our method takes all of the word tokens as input without complicated pre-processing. First, the word tokens are transformed to vectors by looking up word embeddings 1 . Then, lexical level features are extracted according to the given nouns. Meanwhile, sentence level features are learned using a convolutional approach. These two level features are concatenated to form the final extracted feature vector. Finally, the features are fed into a softmax classifier to predict the relationship between two marked nouns. The experimental results demonstrate that our approach significantly outperforms the state-of-the-art methods.", "phrases": ["sentence level feature", "relation classification", "convolutional neural network", "cnns", "learning method"], "overall_score": 11.136898142615214, "scores": [5.1817130162272145, 2.6385192352117612, 1.6517421029164765, 1.5492205310451017, 0.8253583133571807], "rank_score": 2.3693106397515464} -{"id": "zhou-etal-2011-phrase", "title": "Phrase-Based Translation Model for Question Retrieval in Community Question Answer Archives", "abstract": "Community-based question answer (Q&A) has become an important issue due to the popularity of Q&A archives on the web. This paper is concerned with the problem of question retrieval. Question retrieval in Q&A archives aims to find historical questions that are semantically equivalent or relevant to the queried questions. In this paper, we propose a novel phrase-based translation model for question retrieval. Compared to the traditional word-based translation models, the phrase-based translation model is more effective because it captures contextual information in modeling the translation of phrases as a whole, rather than translating single words in isolation. Experiments conducted on real Q&A data demonstrate that our proposed phrase-based translation model significantly outperforms the state-of-the-art word-based translation model.", "phrases": ["translation model", "question retrieval", "web"], "overall_score": 6.970899648637356, "scores": [4.132307602139737, 2.4490363948059737, 0.5210952432223355], "rank_score": 2.367479746722682} -{"id": "nivre-etal-2007-conll", "title": "The CoNLL 2007 Shared Task on Dependency Parsing", "abstract": "The Conference on Computational Natural Language Learning features a shared task, in which participants train and test their learning systems on the same data sets. In 2007, as in 2006, the shared task has been devoted to dependency parsing, this year with both a multilingual track and a domain adaptation track. In thispaper, we definethe tasksof the different tracks and describe how the data sets were created from existing treebanks for ten languages. In addition, we characterize the different approaches of the participating systems, report the test results, and provide a first analysis of these results.", "phrases": ["conll", "dependency parsing", "availability", "such language", "arabic"], "overall_score": 10.327241465028825, "scores": [5.93479539514254, 2.5839857187320443, 1.5066117134678854, 0.9150147134447243, 0.877151351920485], "rank_score": 2.363511778541536} -{"id": "shen-etal-2016-minimum", "title": "Minimum Risk Training for Neural Machine Translation", "abstract": "We propose minimum risk training for end-to-end neural machine translation. Unlike conventional maximum likelihood estimation, minimum risk training is capable of optimizing model parameters directly with respect to arbitrary evaluation metrics, which are not necessarily differentiable. Experiments show that our approach achieves significant improvements over maximum likelihood estimation on a state-of-the-art neural machine translation system across various languages pairs. Transparent to architectures, our approach can be applied to more neural networks and potentially benefit more NLP tasks.", "phrases": ["neural machine translation", "evaluation metric", "minimum risk training", "mrt", "sentence-level bleu"], "overall_score": 8.71519652881784, "scores": [4.140016519271025, 3.607615295482164, 1.6712820277546236, 1.2643923735042344, 1.1294903760511914], "rank_score": 2.3625593184126474} -{"id": "snow-etal-2008-cheap", "title": "Cheap and Fast \u2013 But is it Good? Evaluating Non-Expert Annotations for Natural Language Tasks", "abstract": "Human linguistic annotation is crucial for many natural language processing tasks but can be expensive and time-consuming. We explore the use of Amazon's Mechanical Turk system, a significantly cheaper and faster method for collecting annotations from a broad base of paid non-expert contributors over the Web. We investigate five tasks: affect recognition, word similarity, recognizing textual entailment, event temporal ordering, and word sense disambiguation. For all five, we show high agreement between Mechanical Turk non-expert annotations and existing gold standard labels provided by expert labelers. For the task of affect recognition, we also show that using non-expert labels for training machine learning algorithms can be as effective as using gold standard annotations from experts. We propose a technique for bias correction that significantly improves annotation quality on two tasks. We conclude that many large labeling tasks can be effectively designed and carried out in this method at a fraction of the usual expense.", "phrases": ["annotator", "natural language task", "crowdsourcing", "mturk", "cost"], "overall_score": 11.796606067805687, "scores": [2.329479245900384, 2.581653737011341, 2.573426969340339, 2.2737310793098664, 2.0290117756139794], "rank_score": 2.357460561435182} -{"id": "baccianella-etal-2010-sentiwordnet", "title": "SentiWordNet 3.0: An Enhanced Lexical Resource for Sentiment Analysis and Opinion Mining", "abstract": "In this work we present SENTIWORDNET 3.0, a lexical resource explicitly devised for supporting sentiment classification and opinion mining applications. SENTIWORDNET 3.0 is an improved version of SENTIWORDNET 1.0, a lexical resource publicly available for research purposes, now currently licensed to more than 300 research groups and used in a variety of research projects worldwide. Both SENTIWORDNET 1.0 and 3.0 are the result of automatically annotating all WORDNET synsets according to their degrees of positivity, negativity, and neutrality. SENTIWORDNET 1.0 and 3.0 differ (a) in the versions of WORDNET which they annotate (WORDNET 2.0 and 3.0, respectively), (b) in the algorithm used for automatically annotating WORDNET, which now includes (additionally to the previous semi-supervised learning step) a random-walk step for refining the scores. We here discuss SENTIWORDNET 3.0, especially focussing on the improvements concerning aspect (b) that it embodies with respect to version 1.0. We also report the results of evaluating SENTIWORDNET 3.0 against a fragment of WORDNET 3.0 manually annotated for positivity, negativity, and neutrality; these results indicate accuracy improvements of about 20% with respect to SENTIWORDNET 1.0.", "phrases": ["sentiwordnet", "sentiment polarity", "entry"], "overall_score": 8.054706852518677, "scores": [5.538305175402627, 0.947277627636067, 0.5511704283056635], "rank_score": 2.345584410448119} -{"id": "joshi-etal-2020-spanbert", "title": "SpanBERT: Improving Pre-training by Representing and Predicting Spans", "abstract": "We present SpanBERT, a pre-training method that is designed to better represent and predict spans of text. Our approach extends BERT by (1) masking contiguous random spans, rather than random tokens, and (2) training the span boundary representations to predict the entire content of the masked span, without relying on the individual token representations within it. SpanBERT consistently outperforms BERT and our better-tuned baselines, with substantial gains on span selection tasks such as question answering and coreference resolution. In particular, with the same training data and model size as BERTlarge, our single model obtains 94.6% and 88.7% F1 on SQuAD 1.1 and 2.0 respectively. We also achieve a new state of the art on the OntoNotes coreference resolution task (79.6% F1), strong performance on the TACRED relation extraction benchmark, and even gains on GLUE.1", "phrases": ["coreference resolution", "spanbert", "language model", "pre-training objective", "downstream task"], "overall_score": 10.821384989051236, "scores": [5.9808996076668155, 2.082249042086814, 1.5879745814414505, 1.220935543945498, 0.8517791501772949], "rank_score": 2.3447675850635745} -{"id": "somasundaran-wiebe-2009-recognizing", "title": "Recognizing Stances in Online Debates", "abstract": "This paper presents an unsupervised opinion analysis method for debate-side classification, i.e., recognizing which stance a person is taking in an online debate. In order to handle the complexities of this genre, we mine the web to learn associations that are indicative of opinion stances in debates. We combine this knowledge with discourse information, and formulate the debate side classification task as an Integer Linear Programming problem. Our results show that our method is substantially better than challenging baseline methods.", "phrases": ["stance", "online debate", "argument trigger expression"], "overall_score": 8.728374371206916, "scores": [4.552066168557661, 1.9185931016054885, 0.5350757968241285], "rank_score": 2.335245022329093} -{"id": "pennacchiotti-etal-2008-automatic", "title": "Automatic induction of FrameNet lexical units", "abstract": "Most attempts to integrate FrameNet in NLP systems have so far failed because of its limited coverage. In this paper, we investigate the applicability of distributional and WordNet-based models on the task of lexical unit induction, i.e. the expansion of FrameNet with new lexical units. Experimental results show that our distributional and WordNet-based models achieve good level of accuracy and coverage, especially when combined.", "phrases": ["framenet", "lexical unit", "automatic induction"], "overall_score": 5.980122353303893, "scores": [3.367321161220113, 2.22088537443889, 1.4062267102538106], "rank_score": 2.3314777486376044} -{"id": "reimers-gurevych-2019-sentence", "title": "Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks", "abstract": "BERT (Devlin et al., 2018) and RoBERTa (Liu et al., 2019) has set a new state-of-the-art performance on sentence-pair regression tasks like semantic textual similarity (STS). However, it requires that both sentences are fed into the network, which causes a massive computational overhead: Finding the most similar pair in a collection of 10,000 sentences requires about 50 million inference computations (~65 hours) with BERT. The construction of BERT makes it unsuitable for semantic similarity search as well as for unsupervised tasks like clustering. In this publication, we present Sentence-BERT (SBERT), a modification of the pretrained BERT network that use siamese and triplet network structures to derive semantically meaningful sentence embeddings that can be compared using cosine-similarity. This reduces the effort for finding the most similar pair from 65 hours with BERT / RoBERTa to about 5 seconds with SBERT, while maintaining the accuracy from BERT. We evaluate SBERT and SRoBERTa on common STS tasks and transfer learning tasks, where it outperforms other state-of-the-art sentence embeddings methods.", "phrases": ["sentence embedding", "siamese architecture", "mnli dataset"], "overall_score": 10.532347095125125, "scores": [5.876758527671839, 0.5558413960001694, 0.5384641055025221], "rank_score": 2.323688009724844} -{"id": "zoph-etal-2016-transfer", "title": "Transfer Learning for Low-Resource Neural Machine Translation", "abstract": "The encoder-decoder framework for neural machine translation (NMT) has been shown effective in large data scenarios, but is much less effective for low-resource languages. We present a transfer learning method that significantly improves Bleu scores across a range of low-resource languages. Our key idea is to first train a high-resource language pair (the parent model), then transfer some of the learned parameters to the low-resource pair (the child model) to initialize and constrain training. Using our transfer learning method we improve baseline NMT models by an average of 5.6 Bleu on four low-resource language pairs. Ensembling and unknown word replacement add another 2 Bleu which brings the NMT performance on low-resource machine translation close to a strong syntax based machine translation (SBMT) system, exceeding its performance on one language pair. Additionally, using the transfer learning model for re-scoring, we can improve the SBMT system by an average of 1.3 Bleu, improving the state-of-the-art on low-resource machine translation.", "phrases": ["neural machine translation", "nmt model", "low-resource language pair", "transfer learning", "translation quality"], "overall_score": 11.298457594812106, "scores": [3.825715752295498, 2.5331641717559163, 1.9758126214202134, 1.69798155764876, 1.5732608006938142], "rank_score": 2.3211869807628407} -{"id": "cho-etal-2014-properties", "title": "On the Properties of Neural Machine Translation: Encoder\u2013Decoder Approaches", "abstract": "Neural machine translation is a relatively new approach to statistical machine translation based purely on neural networks. The neural machine translation models often consist of an encoder and a decoder. The encoder extracts a fixed-length representation from a variable-length input sentence, and the decoder generates a correct translation from this representation. In this paper, we focus on analyzing the properties of the neural machine translation using two models; RNN Encoder--Decoder and a newly proposed gated recursive convolutional neural network. We show that the neural machine translation performs relatively well on short sentences without unknown words, but its performance degrades rapidly as the length of the sentence and the number of unknown words increase. Furthermore, we find that the proposed gated recursive convolutional network learns a grammatical structure of a sentence automatically.", "phrases": ["neural machine translation", "rnn", "source sentence", "encoder-decoder architecture", "recurrent unit"], "overall_score": 10.841614840719126, "scores": [4.614125335132284, 1.9921923951717133, 1.9291338485717069, 1.5356252132658677, 1.461377337034887], "rank_score": 2.306490825835292} -{"id": "socher-etal-2013-recursive", "title": "Recursive Deep Models for Semantic Compositionality Over a Sentiment Treebank", "abstract": "Semantic word spaces have been very useful but cannot express the meaning of longer phrases in a principled way. Further progress towards understanding compositionality in tasks such as sentiment detection requires richer supervised training and evaluation resources and more powerful models of composition. To remedy this, we introduce a Sentiment Treebank. It includes fine grained sentiment labels for 215,154 phrases in the parse trees of 11,855 sentences and presents new challenges for sentiment compositionality. To address them, we introduce the Recursive Neural Tensor Network. When trained on the new treebank, this model outperforms all previous methods on several metrics. It pushes the state of the art in single sentence positive/negative classification from 80% up to 85.4%. The accuracy of predicting fine-grained sentiment labels for all phrases reaches 80.7%, an improvement of 9.7% over bag of features baselines. Lastly, it is the only model that can accurately capture the effects of negation and its scope at various tree levels for both positive and negative phrases.", "phrases": ["sentiment treebank", "composition", "recursive neural network", "movie review", "node"], "overall_score": 12.56456635233206, "scores": [3.441626668960592, 2.7708264811242347, 1.9806028413645131, 1.7377618047895618, 1.594111315307883], "rank_score": 2.304985822309357} -{"id": "dyer-etal-2013-simple", "title": "A Simple, Fast, and Effective Reparameterization of IBM Model 2", "abstract": "We present a simple log-linear reparameterization of IBM Model 2 that overcomes problems arising from Model 1\u2019s strong assumptions and Model 2\u2019s overparameterization. Efficient inference, likelihood evaluation, and parameter estimation algorithms are provided. Training the model is consistently ten times faster than Model 4. On three large-scale translation tasks, systems built using our alignment model outperform IBM Model 4. An open-source implementation of the alignment model described in this paper is available from http://github.com/clab/fast align .", "phrases": ["reparameterization", "ibm model", "fast_align", "variation"], "overall_score": 8.322612850619612, "scores": [4.340946132623607, 3.2557129306382175, 1.1019551417181228, 0.520773277052376], "rank_score": 2.3048468705080807} -{"id": "howcroft-etal-2020-twenty", "title": "Twenty Years of Confusion in Human Evaluation: NLG Needs Evaluation Sheets and Standardised Definitions", "abstract": "Human assessment remains the most trusted form of evaluation in NLG, but highly diverse approaches and a proliferation of different quality criteria used by researchers make it difficult to compare results and draw conclusions across papers, with adverse implications for meta-evaluation and reproducibility. In this paper, we present (i) our dataset of 165 NLG papers with human evaluations, (ii) the annotation scheme we developed to label the papers for different aspects of evaluations, (iii) quantitative analyses of the annotations, and (iv) a set of recommendations for improving standards in evaluation reporting. We use the annotations as a basis for examining information included in evaluation reports, and levels of consistency in approaches, experimental design and terminology, focusing in particular on the 200+ different terms that have been used for evaluated aspects of quality. We conclude that due to a pervasive lack of clarity in reports and extreme diversity in approaches, human evaluation in NLG presents as extremely confused in 2020, and that the field is in urgent need of standard methods and terminology.", "phrases": ["human evaluation", "nlg", "open-ended text generation"], "overall_score": 6.527805253691421, "scores": [3.4906620254216074, 2.858807967614772, 0.5626165260244633], "rank_score": 2.3040288396869477} -{"id": "ghosal-etal-2020-cosmic", "title": "COSMIC: COmmonSense knowledge for eMotion Identification in Conversations", "abstract": "In this paper, we address the task of utterance level emotion recognition in conversations using commonsense knowledge. We propose COSMIC, a new framework that incorporates different elements of commonsense such as mental states, events, and causal relations, and build upon them to learn interactions between interlocutors participating in a conversation. Current state-of-theart methods often encounter difficulties in context propagation, emotion shift detection, and differentiating between related emotion classes. By learning distinct commonsense representations, COSMIC addresses these challenges and achieves new state-of-the-art results for emotion recognition on four different benchmark conversational datasets. Our code is available at .", "phrases": ["commonsense knowledge", "conversation", "emotion recognition"], "overall_score": 6.235995494009728, "scores": [3.1388069562609124, 2.8286902443145223, 0.9407892390240966], "rank_score": 2.3027621465331776} -{"id": "bowman-etal-2016-generating", "title": "Generating Sentences from a Continuous Space", "abstract": "The standard recurrent neural network language model (RNNLM) generates sentences one word at a time and does not work from an explicit global sentence representation. In this work, we introduce and study an RNN-based variational autoencoder generative model that incorporates distributed latent representations of entire sentences. This factorization allows it to explicitly model holistic properties of sentences such as style, topic, and high-level syntactic features. Samples from the prior over these sentence representations remarkably produce diverse and well-formed sentences through simple deterministic decoding. By examining paths through this latent space, we are able to generate coherent novel sentences that interpolate between known sentences. We present techniques for solving the difficult learning problem presented by this model, demonstrate its effectiveness in imputing missing words, explore many interesting properties of the model's latent sentence space, and present negative results on the use of the model in language modeling.", "phrases": ["continuous space", "variational autoencoder", "vae", "latent variable", "text generation"], "overall_score": 10.689118043002471, "scores": [1.4095896896247102, 2.9546267203320387, 2.4508648372464803, 2.3658835706340082, 2.3265915820543093], "rank_score": 2.3015112799783095} -{"id": "artetxe-etal-2018-robust", "title": "A robust self-learning method for fully unsupervised cross-lingual mappings of word embeddings", "abstract": "Recent work has managed to learn cross-lingual word embeddings without parallel data by mapping monolingual embeddings to a shared space through adversarial training. However, their evaluation has focused on favorable conditions, using comparable corpora or closely-related languages, and we show that they often fail in more realistic scenarios. This work proposes an alternative approach based on a fully unsupervised initialization that explicitly exploits the structural similarity of the embeddings, and a robust self-learning algorithm that iteratively improves this solution. Our method succeeds in all tested scenarios and obtains the best published results in standard datasets, even surpassing previous supervised systems. Our implementation is released as an open source project at .", "phrases": ["self-learning method", "mapping", "word embedding", "different language", "unsupervised learning"], "overall_score": 10.084471033082282, "scores": [4.762502102349719, 3.5704728654771416, 1.5737514686989116, 1.0569434200730767, 0.5429592374445131], "rank_score": 2.3013258188086727} -{"id": "waseem-etal-2017-understanding", "title": "Understanding Abuse: A Typology of Abusive Language Detection Subtasks", "abstract": "As the body of research on abusive language detection and analysis grows, there is a need for critical consideration of the relationships between different subtasks that have been grouped under this label. Based on work on hate speech, cyberbullying, and online abuse we propose a typology that captures central similarities and differences between subtasks and discuss the implications of this for data annotation and feature construction. We emphasize the practical actions that can be taken by researchers to best approach their abusive language detection subtask of interest.", "phrases": ["abuse", "abusive language detection", "hate speech", "cyberbullying", "trolling"], "overall_score": 10.265946120892767, "scores": [4.580887587826631, 2.02964286351357, 1.9918768935783973, 1.6753957627866283, 1.215879529887334], "rank_score": 2.298736527518512} -{"id": "li-etal-2016-diversity", "title": "A Diversity-Promoting Objective Function for Neural Conversation Models", "abstract": "Sequence-to-sequence neural network models for generation of conversational responses tend to generate safe, commonplace responses (e.g., \"I don't know\") regardless of the input. We suggest that the traditional objective function, i.e., the likelihood of output (response) given input (message) is unsuited to response generation tasks. Instead we propose using Maximum Mutual Information (MMI) as the objective function in neural models. Experimental results demonstrate that the proposed MMI models produce more diverse, interesting, and appropriate responses, yielding substantive gains in BLEU scores on two conversational datasets and in human evaluations.", "phrases": ["objective function", "neural conversation model", "diversity", "dialogue generation", "language model"], "overall_score": 11.43804722274672, "scores": [4.992306435147645, 2.5392629252878414, 1.868584202472669, 1.0905194553267745, 1.0008471380990762], "rank_score": 2.298304031266801} -{"id": "schmidt-wiegand-2017-survey", "title": "A Survey on Hate Speech Detection using Natural Language Processing", "abstract": "This paper presents a survey on hate speech detection. Given the steadily growing body of social media content, the amount of online hate speech is also increasing. Due to the massive scale of the web, methods that automatically detect hate speech are required. Our survey describes key areas that have been explored to automatically recognize these types of utterances using natural language processing. We also discuss limits of those approaches.", "phrases": ["hate speech detection", "language detection", "abusive language", "cyberbullying", "offensive content"], "overall_score": 11.218033405531392, "scores": [6.158231975536342, 1.978206258269443, 1.9462347805667013, 0.8510230904421778, 0.5535949868433352], "rank_score": 2.2974582183315997} -{"id": "sun-etal-2019-utilizing", "title": "Utilizing BERT for Aspect-Based Sentiment Analysis via Constructing Auxiliary Sentence", "abstract": "Aspect-based sentiment analysis (ABSA), which aims to identify fine-grained opinion polarity towards a specific aspect, is a challenging subtask of sentiment analysis (SA). In this paper, we construct an auxiliary sentence from the aspect and convert ABSA to a sentence-pair classification task, such as question answering (QA) and natural language inference (NLI). We fine-tune the pre-trained model from BERT and achieve new state-of-the-art results on SentiHood and SemEval-2014 Task 4 datasets. The source codes are available at .", "phrases": ["sentiment analysis", "auxiliary sentence", "aspect category"], "overall_score": 7.390078617917716, "scores": [4.164671586795247, 1.8806204706062426, 0.8422789642799464], "rank_score": 2.2958570072271454} -{"id": "langlais-patry-2007-translating", "title": "Translating Unknown Words by Analogical Learning", "abstract": "Unknown words are a well-known hindrance to natural language applications. In particular, they drastically impact machine translation quality. An easy way out commercial translation systems usually offer their users is the possibility to add unknown words and their translations into a dedicated lexicon. Recently, Stroppa and Yvon (2005) have shown how analogical learning alone deals nicely with morphology in different languages. In this study we show that analogical learning offers as well an elegant and effective solution to the problem of identifying potential translations of unknown words.", "phrases": ["unknown word", "analogical learning", "european language"], "overall_score": 5.491682333242988, "scores": [3.411603878404461, 2.904443075859161, 0.554581294860202], "rank_score": 2.290209416374608} -{"id": "och-ney-2004-alignment", "title": "The Alignment Template Approach to Statistical Machine Translation", "abstract": "A phrase-based statistical machine translation approach the alignment template approach is described. This translation approach allows for general many-to-many relations between words. Thereby, the context of words is taken into account in the translation model, and local changes in word order from source to target language can be learned explicitly. The model is described using a log-linear modeling approach, which is a generalization of the often used source-channel approach. Thereby, the model is easier to extend than classical statistical machine translation systems. We describe in detail the process for learning phrasal translations, the feature functions used, and the search algorithm. The evaluation of this approach is performed on three different tasks. For the German-English speech Verbmobil task, we analyze the effect of various system components. On the French-English Canadian Hansards task, the alignment template system obtains significantly better results than a single-word-based translation model. In the Chinese-English 2002 National Institute of Standards and Technology (NIST) machine translation evaluation it yields statistically significantly better NIST scores than all competing research and commercial translation systems.", "phrases": ["alignment template approach", "machine translation", "log-linear model", "smt system", "unit"], "overall_score": 10.764311759390624, "scores": [5.600522438561984, 1.963848202026249, 1.6007144012417982, 1.309298729708804, 0.9758414413218873], "rank_score": 2.2900450425721446} -{"id": "kobayashi-2018-contextual", "title": "Contextual Augmentation: Data Augmentation by Words with Paradigmatic Relations", "abstract": "We propose a novel data augmentation for labeled sentences called contextual augmentation. We assume an invariance that sentences are natural even if the words in the sentences are replaced with other words with paradigmatic relations. We stochastically replace words with other words that are predicted by a bi-directional language model at the word positions. Words predicted according to a context are numerous but appropriate for the augmentation of the original words. Furthermore, we retrofit a language model with a label-conditional architecture, which allows the model to augment sentences without breaking the label-compatibility. Through the experiments for six various different text classification tasks, we demonstrate that the proposed method improves classifiers based on the convolutional or recurrent neural networks.", "phrases": ["paradigmatic relation", "language model", "contextual augmentation"], "overall_score": 7.077637187649632, "scores": [3.3319844373245004, 1.7675403629274353, 1.769650205701164], "rank_score": 2.2897250019843667} -{"id": "ribeiro-etal-2020-beyond", "title": "Beyond Accuracy: Behavioral Testing of NLP Models with CheckList", "abstract": "Although measuring held-out accuracy has been the primary approach to evaluate generalization, it often overestimates the performance of NLP models, while alternative approaches for evaluating models either focus on individual tasks or on specific behaviors. Inspired by principles of behavioral testing in software engineering, we introduce CheckList, a task-agnostic methodology for testing NLP models. CheckList includes a matrix of general linguistic capabilities and test types that facilitate comprehensive test ideation, as well as a software tool to generate a large and diverse number of test cases quickly. We illustrate the utility of CheckList with tests for three tasks, identifying critical failures in both commercial and state-of-art models. In a user study, a team responsible for a commercial sentiment analysis model found new and actionable bugs in an extensively tested model. In another user study, NLP practitioners with CheckList created twice as many tests, and found almost three times as many bugs as users without it.", "phrases": ["behavioral testing", "checklist", "negation", "testing framework", "change"], "overall_score": 10.095161093147171, "scores": [4.791719390542953, 2.9966210615280606, 1.5064854054944814, 1.0826929464890416, 1.045342970293329], "rank_score": 2.284572354869573} -{"id": "marton-etal-2009-improved", "title": "Improved Statistical Machine Translation Using Monolingually-Derived Paraphrases", "abstract": "Untranslated words still constitute a major problem for Statistical Machine Translation (SMT), and current SMT systems are limited by the quantity of parallel training texts. Augmenting the training data with paraphrases generated by pivoting through other languages alleviates this problem, especially for the so-called \"low density\" languages. But pivoting requires additional parallel texts. We address this problem by deriving paraphrases monolingually, using distributional semantic similarity measures, thus providing access to larger training resources, such as comparable and unrelated monolingual corpora. We present what is to our knowledge the first successful integration of a collocational approach to untranslated words with an end-to-end, state of the art SMT system demonstrating significant translation improvements in a low-resource setting.", "phrases": ["statistical machine translation", "paraphrase", "source side"], "overall_score": 7.506536286702864, "scores": [4.302718471424104, 1.9920310391367262, 0.5379942736938759], "rank_score": 2.2775812614182356} -{"id": "chu-wang-2018-survey", "title": "A Survey of Domain Adaptation for Neural Machine Translation", "abstract": "Neural machine translation (NMT) is a deep learning based approach for machine translation, which yields the state-of-the-art translation performance in scenarios where large-scale parallel corpora are available. Although the high-quality and domain-specific translation is crucial in the real world, domain-specific corpora are usually scarce or nonexistent, and thus vanilla NMT performs poorly in such scenarios. Domain adaptation that leverages both out-of-domain parallel corpora as well as monolingual corpora for in-domain translation, is very important for domain-specific translation. In this paper, we give a comprehensive survey of the state-of-the-art domain adaptation techniques for NMT.", "phrases": ["domain adaptation", "neural machine translation", "out-of-domain parallel corpora", "distinction"], "overall_score": 9.078937603908875, "scores": [5.368153841683517, 2.619941640106213, 0.5755170696623257, 0.5403973898547723], "rank_score": 2.2760024853267073} -{"id": "yimam-etal-2013-webanno", "title": "WebAnno: A Flexible, Web-based and Visually Supported System for Distributed Annotations", "abstract": "We present WebAnno, a general purpose web-based annotation tool for a wide range of linguistic annotations. WebAnno offers annotation project management, freely configurable tagsets and the management of users in different roles. WebAnno uses modern web technology for visualizing and editing annotations in a web browser. It supports arbitrarily large documents, pluggable import/export filters, the curation of annotations across various users, and an interface to farming out annotations to a crowdsourcing platform. Currently WebAnno allows part-ofspeech, named entity, dependency parsing and co-reference chain annotations. The architecture design allows adding additional modes of visualization and editing, when new kinds of annotations are to be supported.", "phrases": ["annotation tool", "management", "webanno"], "overall_score": 6.928976210001726, "scores": [4.586555098971573, 1.3903248319824177, 0.8507684294071985], "rank_score": 2.275882786787063} -{"id": "chambers-jurafsky-2008-unsupervised", "title": "Unsupervised Learning of Narrative Event Chains", "abstract": "Hand-coded scripts were used in the 1970-80s as knowledge backbones that enabled inference and other NLP tasks requiring deep semantic knowledge. We propose unsupervised induction of similar schemata called narrative event chains from raw newswire text. A narrative event chain is a partially ordered set of events related by a common protagonist. We describe a three step process to learning narrative event chains. The first uses unsupervised distributional methods to learn narrative relations between events sharing coreferring arguments. The second applies a temporal classifier to partially order the connected events. Finally, the third prunes and clusters self-contained chains from the space of events. We introduce two evaluations: the narrative cloze to evaluate event relatedness, and an order coherence task to evaluate narrative order. We show a 36% improvement over baseline for narrative prediction and 25% for temporal coherence.", "phrases": ["narrative event chain", "script", "newswire text", "protagonist", "co-occurrence"], "overall_score": 10.18546686972776, "scores": [4.66522449067884, 2.32304613616706, 2.0097862458872213, 1.1948412869094716, 1.1529383380637037], "rank_score": 2.2691672995412593} -{"id": "mccoy-etal-2019-right", "title": "Right for the Wrong Reasons: Diagnosing Syntactic Heuristics in Natural Language Inference", "abstract": "A machine learning system can score well on a given test set by relying on heuristics that are effective for frequent example types but break down in more challenging cases. We study this issue within natural language inference (NLI), the task of determining whether one sentence entails another. We hypothesize that statistical NLI models may adopt three fallible syntactic heuristics: the lexical overlap heuristic, the subsequence heuristic, and the constituent heuristic. To determine whether models have adopted these heuristics, we introduce a controlled evaluation set called HANS (Heuristic Analysis for NLI Systems), which contains many examples where the heuristics fail. We find that models trained on MNLI, including BERT, a state-of-the-art model, perform very poorly on HANS, suggesting that they have indeed adopted these heuristics. We conclude that there is substantial room for improvement in NLI systems, and that the HANS dataset can motivate and measure progress in this area.", "phrases": ["reason", "heuristic", "natural language inference", "nli dataset", "annotation artifact"], "overall_score": 10.832566082015894, "scores": [4.854299602785719, 2.700715693392986, 1.6398502714841787, 1.2405165538718295, 0.8780223332591991], "rank_score": 2.2626808909587828} -{"id": "joshi-etal-2017-triviaqa", "title": "TriviaQA: A Large Scale Distantly Supervised Challenge Dataset for Reading Comprehension", "abstract": "We present TriviaQA, a challenging reading comprehension dataset containing over 650K question-answer-evidence triples. TriviaQA includes 95K question-answer pairs authored by trivia enthusiasts and independently gathered evidence documents, six per question on average, that provide high quality distant supervision for answering the questions. We show that, in comparison to other recently introduced large-scale datasets, TriviaQA (1) has relatively complex, compositional questions, (2) has considerable syntactic and lexical variability between questions and corresponding answer-evidence sentences, and (3) requires more cross sentence reasoning to find answers. We also present two baseline algorithms: a feature-based classifier and a state-of-the-art neural network, that performs well on SQuAD reading comprehension. Neither approach comes close to human performance (23% and 40% vs. 80%), suggesting that TriviaQA is a challenging testbed that is worth significant future study.", "phrases": ["reading comprehension", "question-answer pair", "distant supervision", "triviaqa", "paragraph"], "overall_score": 9.903871983595613, "scores": [5.343988130815086, 2.357178574774421, 1.2455748455266986, 1.2372998299481617, 1.1165197625269885], "rank_score": 2.2601122287182713} -{"id": "domhan-hieber-2017-using", "title": "Using Target-side Monolingual Data for Neural Machine Translation through Multi-task Learning", "abstract": "The performance of Neural Machine Translation (NMT) models relies heavily on the availability of sufficient amounts of parallel data, and an efficient and effective way of leveraging the vastly available amounts of monolingual data has yet to be found. We propose to modify the decoder in a neural sequence-to-sequence model to enable multi-task learning for two strongly related tasks: target-side language modeling and translation. The decoder predicts the next target word through two channels, a target-side language model on the lowest layer, and an attentional recurrent model which is conditioned on the source representation. This architecture allows joint training on both large amounts of monolingual and moderate amounts of bilingual data to improve NMT performance. Initial results in the news domain for three language pairs show moderate but consistent improvements over a baseline trained on bilingual data only.", "phrases": ["monolingual data", "neural machine translation", "multi-task learning"], "overall_score": 6.262839697145943, "scores": [3.1964052478976694, 2.681269471423194, 0.8988511103953783], "rank_score": 2.2588419432387474} -{"id": "pang-lee-2004-sentimental", "title": "A Sentimental Education: Sentiment Analysis Using Subjectivity Summarization Based on Minimum Cuts", "abstract": "Sentiment analysis seeks to identify the viewpoint(s) underlying a text span; an example application is classifying a movie review as \"thumbs up\" or \"thumbs down\". To determine this sentiment polarity, we propose a novel machine-learning method that applies text-categorization techniques to just the subjective portions of the document. Extracting these portions can be implemented using efficient techniques for finding minimum cuts in graphs; this greatly facilitates incorporation of cross-sentence contextual constraints.", "phrases": ["sentiment analysis", "movie review", "objective sentence", "subjectivity analysis", "text categorization"], "overall_score": 10.546349598980429, "scores": [5.434658349063358, 2.045100731213141, 1.5709433849519836, 1.1201180329756546, 1.1139386622444207], "rank_score": 2.2569518320897113} -{"id": "gururangan-etal-2020-dont", "title": "Don't Stop Pretraining: Adapt Language Models to Domains and Tasks", "abstract": "Language models pretrained on text from a wide variety of sources form the foundation of today's NLP. In light of the success of these broad-coverage models, we investigate whether it is still helpful to tailor a pretrained model to the domain of a target task. We present a study across four domains (biomedical and computer science publications, news, and reviews) and eight classification tasks, showing that a second phase of pretraining in-domain (domain-adaptive pretraining) leads to performance gains, under both high- and low-resource settings. Moreover, adapting to the task's unlabeled data (task-adaptive pretraining) improves performance even after domain-adaptive pretraining. Finally, we show that adapting to a task corpus augmented using simple data selection strategies is an effective alternative, especially when resources for domain-adaptive pretraining might be unavailable. Overall, we consistently find that multi-phase adaptive pretraining offers large gains in task performance.", "phrases": ["language model", "unlabeled data", "pre-training", "downstream task", "tapt"], "overall_score": 10.756635599590492, "scores": [2.9470434116059225, 2.7764573981155256, 2.0944568939307255, 1.7389404628445475, 1.7167831248915935], "rank_score": 2.254736258277663} -{"id": "wu-etal-2020-mind", "title": "MIND: A Large-scale Dataset for News Recommendation", "abstract": "News recommendation is an important technique for personalized news service. Compared with product and movie recommendations which have been comprehensively studied, the research on news recommendation is much more limited, mainly due to the lack of a high-quality benchmark dataset. In this paper, we present a large-scale dataset named MIND for news recommendation. Constructed from the user click logs of Microsoft News, MIND contains 1 million users and more than 160k English news articles, each of which has rich textual content such as title, abstract and body. We demonstrate MIND a good testbed for news recommendation through a comparative study of several state-of-the-art news recommendation methods which are originally developed on different proprietary datasets. Our results show the performance of news recommendation highly relies on the quality of news content understanding and user interest modeling. Many natural language processing techniques such as effective text representation methods and pre-trained language models can effectively improve the performance of news recommendation. The MIND dataset will be available at .", "phrases": ["large-scale dataset", "news recommendation", "mind"], "overall_score": 5.182958551413714, "scores": [2.9914181796060637, 2.8550117548168217, 0.9063609620138562], "rank_score": 2.2509302988122473} -{"id": "chan-etal-2007-word", "title": "Word Sense Disambiguation Improves Statistical Machine Translation", "abstract": "Recent research presents conflicting evidence on whether word sense disambiguation (WSD) systems can help to improve the performance of statistical machine translation (MT) systems. In this paper, we successfully integrate a state-of-the-art WSD system into a state-of-the-art hierarchical phrase-based MT system, Hiero. We show for the first time that integrating a WSD system improves the performance of a state-ofthe-art statistical MT system on an actual translation task. Furthermore, the improvement is statistically significant.", "phrases": ["wsd", "word sense disambiguation", "smt system", "translation quality", "soft constraint modeling"], "overall_score": 9.32945589725386, "scores": [5.06192112199896, 2.7958169776211013, 1.618913824073673, 1.1639528614707624, 0.5756950124692056], "rank_score": 2.24325995952674} -{"id": "konstas-etal-2017-neural", "title": "Neural AMR: Sequence-to-Sequence Models for Parsing and Generation", "abstract": "Sequence-to-sequence models have shown strong performance across a broad range of applications. However, their application to parsing and generating text using Abstract Meaning Representation (AMR) has been limited, due to the relatively limited amount of labeled data and the non-sequential nature of the AMR graphs. We present a novel training procedure that can lift this limitation using millions of unlabeled sentences and careful preprocessing of the AMR graphs. For AMR parsing, our model achieves competitive results of 62.1 SMATCH, the current best score reported without significant use of external semantic resources. For AMR generation, our model establishes a new state-of-the-art performance of BLEU 33.8. We present extensive ablative and qualitative analysis including strong evidence that sequence-based AMR models are robust against ordering variations of graph-to-sequence conversions.", "phrases": ["amr", "sequence-to-sequence model", "neural amr", "input graph", "semantic parsing"], "overall_score": 9.529178062805691, "scores": [4.778016395850175, 3.7577633021675694, 0.872025023800571, 1.2278012275377514, 0.579162480288064], "rank_score": 2.242953685928826} -{"id": "socher-etal-2012-semantic", "title": "Semantic Compositionality through Recursive Matrix-Vector Spaces", "abstract": "Single-word vector space models have been very successful at learning lexical information. However, they cannot capture the compositional meaning of longer phrases, preventing them from a deeper understanding of language. We introduce a recursive neural network (RNN) model that learns compositional vector representations for phrases and sentences of arbitrary syntactic type and length. Our model assigns a vector and a matrix to every node in a parse tree: the vector captures the inherent meaning of the constituent, while the matrix captures how it changes the meaning of neighboring words or phrases. This matrix-vector RNN can learn the meaning of operators in propositional logic and natural language. The model obtains state of the art performance on three different experiments: predicting fine-grained sentiment distributions of adverb-adjective pairs; classifying sentiment labels of movie reviews and classifying semantic relationships such as cause-effect or topic-message between nouns using the syntactic path between them.", "phrases": ["recursive neural network", "matrix", "parse tree", "semantic compositionality", "network model"], "overall_score": 10.963268480717243, "scores": [2.2732097382667074, 2.553625932935057, 2.326029595759047, 2.0860162215751723, 1.9702041956018121], "rank_score": 2.2418171368275592} -{"id": "zhang-clark-2008-joint", "title": "Joint Word Segmentation and POS Tagging Using a Single Perceptron", "abstract": "For Chinese POS tagging, word segmentation is a preliminary step. To avoid error propagation and improve segmentation by utilizing POS information, segmentation and tagging can be performed simultaneously. A challenge for this joint approach is the large combined search space, which makes efficient decoding very hard. Recent research has explored the integration of segmentation and POS tagging, by decoding under restricted versions of the full combined search space. In this paper, we propose a joint segmentation and POS tagging model that does not impose any hard constraints on the interaction between word and POS information. Fast decoding is achieved by using a novel multiple-beam search algorithm. The system uses a discriminative statistical model, trained using the generalized perceptron algorithm. The joint model gives an error reduction in segmentation accuracy of 14.6% and an error reduction in tagging accuracy of 12.2%, compared to the traditional pipeline approach.", "phrases": ["word segmentation", "pos tagging", "generalized perceptron algorithm", "joint model", "part-of-speech"], "overall_score": 7.469495965563526, "scores": [4.2967687728976385, 4.29343743828485, 1.2180713379574746, 0.8479280650324854, 0.55183390254195], "rank_score": 2.2416079033428797} -{"id": "wang-sennrich-2020-exposure", "title": "On Exposure Bias, Hallucination and Domain Shift in Neural Machine Translation", "abstract": "The standard training algorithm in neural machine translation (NMT) suffers from exposure bias, and alternative algorithms have been proposed to mitigate this. However, the practical impact of exposure bias is under debate. In this paper, we link exposure bias to another well-known problem in NMT, namely the tendency to generate hallucinations under domain shift. In experiments on three datasets with multiple test domains, we show that exposure bias is partially to blame for hallucinations, and that training with Minimum Risk Training, which avoids exposure bias, can mitigate this. Our analysis explains why exposure bias is more problematic under domain shift, and also links exposure bias to the beam search problem, i.e. performance deterioration with increasing beam size. Our results provide a new justification for methods that reduce exposure bias: even if they do not increase performance on in-domain test sets, they can increase model robustness to domain shift.", "phrases": ["hallucination", "domain shift", "neural machine translation"], "overall_score": 6.712625077891789, "scores": [3.135622869109377, 2.715766806088395, 0.8707982223825319], "rank_score": 2.240729299193435} -{"id": "pontiki-etal-2014-semeval", "title": "SemEval-2014 Task 4: Aspect Based Sentiment Analysis", "abstract": "Sentiment analysis is increasingly viewed as a vital task both from an academic and a commercial standpoint. The majority of current approaches, however, attempt to detect the overall polarity of a sentence, paragraph, or text span, irrespective of the entities mentioned (e.g., laptops) and their aspects (e.g., battery, screen). SemEval2014 Task 4 aimed to foster research in the field of aspect-based sentiment analysis, where the goal is to identify the aspects of given target entities and the sentiment expressed for each aspect. The task provided datasets containing manually annotated reviews of restaurants and laptops, as well as a common evaluation procedure. It attracted 163 submissions from 32 teams.", "phrases": ["sentiment analysis", "semeval", "aspect term"], "overall_score": 10.296210608484081, "scores": [3.2142245702341063, 1.9063486096766613, 1.5868079977567722], "rank_score": 2.2357937258891796} -{"id": "liu-etal-2019-linguistic", "title": "Linguistic Knowledge and Transferability of Contextual Representations", "abstract": "Contextual word representations derived from large-scale neural language models are successful across a diverse set of NLP tasks, suggesting that they encode useful and transferable features of language. To shed light on the linguistic knowledge they capture, we study the representations produced by several recent pretrained contextualizers (variants of ELMo, the OpenAI transformer language model, and BERT) with a suite of sixteen diverse probing tasks. We find that linear models trained on top of frozen contextual representations are competitive with state-of-the-art task-specific models in many cases, but fail on tasks requiring fine-grained linguistic knowledge (e.g., conjunct identification). To investigate the transferability of contextual word representations, we quantify differences in the transferability of individual layers within contextualizers, especially between recurrent neural networks (RNNs) and transformers. For instance, higher layers of RNNs are more task-specific, while transformer layers do not exhibit the same monotonic trend. In addition, to better understand what makes contextual word representations transferable, we compare language model pretraining with eleven supervised pretraining tasks. For any given task, pretraining on a closely related task yields better performance than language model pretraining (which is better on average) when the pretraining dataset is fixed. However, language model pretraining on more data gives the best results.", "phrases": ["transferability", "linguistic knowledge", "methodology", "different layer", "capability"], "overall_score": 9.733678311452984, "scores": [4.48831221886221, 3.3994744139373516, 1.2301075759981122, 1.2219471967397706, 0.8310661518182247], "rank_score": 2.2341815114711343} -{"id": "esuli-sebastiani-2006-sentiwordnet", "title": "SENTIWORDNET: A Publicly Available Lexical Resource for Opinion Mining", "abstract": "Opinion mining (OM) is a recent subdiscipline at the crossroads of information retrieval and computational linguistics which is concerned not with the topic a document is about, but with the opinion it expresses. OM has a rich set of applications, ranging from tracking users\u0092 opinions about products or about political candidates as expressed in online forums, to customer relationship management. In order to aid the extraction of opinions from text, recent research has tried to automatically determine the \u0093PNpolarity\u0094 of subjective terms, i.e. identify whether a term that is a marker of opinionated content has a positive or a negative connotation. Research on determining whether a term is indeed a marker of opinionated content (a subjective term) or not (an objective term) has been instead much scarcer. In this work we describe SENTIWORDNET, a lexical resource in which each WORDNET synset sis associated to three numerical scores Obj(s), Pos(s) and Neg(s), describing how objective, positive, and negative the terms contained in the synset are. The method used to develop SENTIWORDNET is based on the quantitative analysis of the glosses associated to synsets, and on the use of the resulting vectorial term representations for semi-supervised synset classi.cation. The three scores are derived by combining the results produced by a committee of eight ternary classi.ers, all characterized by similar accuracy levels but different classification behaviour. SENTIWORDNET is freely available for research purposes, and is endowed with a Web-based graphical user interface.", "phrases": ["information retrieval", "gloss", "sentiwordnet", "polarity", "opinion mining application"], "overall_score": 9.969824485901173, "scores": [6.856261952385035, 1.648722322178034, 1.1084087600351633, 0.9209537047608666, 0.6278001532920638], "rank_score": 2.2324293785302327} -{"id": "zadeh-etal-2017-tensor", "title": "Tensor Fusion Network for Multimodal Sentiment Analysis", "abstract": "Multimodal sentiment analysis is an increasingly popular research area, which extends the conventional language-based definition of sentiment analysis to a multimodal setup where other relevant modalities accompany language. In this paper, we pose the problem of multimodal sentiment analysis as modeling intra-modality and inter-modality dynamics. We introduce a novel model, termed Tensor Fusion Networks, which learns both such dynamics end-to-end. The proposed approach is tailored for the volatile nature of spoken language in online videos as well as accompanying gestures and voice. In the experiments, our model outperforms state-of-the-art approaches for both multimodal and unimodal sentiment analysis.", "phrases": ["multimodal sentiment analysis", "modality", "tensor fusion network", "concatenation"], "overall_score": 8.396120261800117, "scores": [3.8209805235111847, 2.5733281439957074, 2.014037123105201, 0.5208470239551577], "rank_score": 2.232298203641813} -{"id": "stanovsky-etal-2019-evaluating", "title": "Evaluating Gender Bias in Machine Translation", "abstract": "We present the first challenge set and evaluation protocol for the analysis of gender bias in machine translation (MT). Our approach uses two recent coreference resolution datasets composed of English sentences which cast participants into non-stereotypical gender roles (e.g., \u201cThe doctor asked the nurse to help her in the operation\u201d). We devise an automatic gender bias evaluation method for eight target languages with grammatical gender, based on morphological analysis (e.g., the use of female inflection for the word \u201cdoctor\u201d). Our analyses show that four popular industrial MT systems and two recent state-of-the-art academic MT models are significantly prone to gender-biased translation errors for all tested target languages. Our data and code are publicly available at .", "phrases": ["gender bias", "machine translation", "evaluation protocol", "winomt", "adjective"], "overall_score": 7.932086086012677, "scores": [4.809878152333714, 3.787332270619893, 1.4716615201255998, 0.5499166070140004, 0.5363584971550324], "rank_score": 2.231029409449648} -{"id": "rush-etal-2015-neural", "title": "A Neural Attention Model for Abstractive Sentence Summarization", "abstract": "Summarization based on text extraction is inherently limited, but generation-style abstractive methods have proven challenging to build. In this work, we propose a fully data-driven approach to abstractive sentence summarization. Our method utilizes a local attention-based model that generates each word of the summary conditioned on the input sentence. While the model is structurally simple, it can easily be trained end-to-end and scales to a large amount of training data. The model shows significant performance gains on the DUC-2004 shared task compared with several strong baselines.", "phrases": ["neural attention model", "abstractive sentence summarization", "gigaword", "input text", "language generation task"], "overall_score": 11.755008054506241, "scores": [4.126875468494914, 3.0060054204161997, 1.5072194763518496, 1.3999912910136776, 1.0847945473017582], "rank_score": 2.22497724071568} -{"id": "pfeiffer-etal-2020-mad", "title": "MAD-X: An Adapter-Based Framework for Multi-Task Cross-Lingual Transfer", "abstract": "The main goal behind state-of-the-art pre-trained multilingual models such as multilingual BERT and XLM-R is enabling and bootstrapping NLP applications in low-resource languages through zero-shot or few-shot cross-lingual transfer. However, due to limited model capacity, their transfer performance is the weakest exactly on such low-resource languages and languages unseen during pre-training. We propose MAD-X, an adapter-based framework that enables high portability and parameter-efficient transfer to arbitrary tasks and languages by learning modular language and task representations. In addition, we introduce a novel invertible adapter architecture and a strong baseline method for adapting a pre-trained multilingual model to a new language. MAD-X outperforms the state of the art in cross lingual transfer across a representative set of typologically diverse languages on named entity recognition and causal commonsense reasoning, and achieves competitive results on question answering. Our code and adapters are available at AdapterHub.ml.", "phrases": ["cross-lingual transfer", "task representation", "adapter", "new language", "mad-x"], "overall_score": 7.958821286751873, "scores": [4.447594965138613, 2.742416335270969, 1.9049990821124247, 1.1226878130053586, 0.8870586418688662], "rank_score": 2.220951367479246} -{"id": "clark-etal-2018-semi", "title": "Semi-Supervised Sequence Modeling with Cross-View Training", "abstract": "Unsupervised representation learning algorithms such as word2vec and ELMo improve the accuracy of many supervised NLP models, mainly because they can take advantage of large amounts of unlabeled text. However, the supervised models only learn from task-specific labeled data during the main training phase. We therefore propose Cross-View Training (CVT), a semi-supervised learning algorithm that improves the representations of a Bi-LSTM sentence encoder using a mix of labeled and unlabeled data. On labeled examples, standard supervised learning is used. On unlabeled examples, CVT teaches auxiliary prediction modules that see restricted views of the input (e.g., only part of a sentence) to match the predictions of the full model seeing the whole input. Since the auxiliary modules and the full model share intermediate representations, this in turn improves the full model. Moreover, we show that CVT is particularly effective when combined with multi-task learning. We evaluate CVT on five sequence tagging tasks, machine translation, and dependency parsing, achieving state-of-the-art results.", "phrases": ["cross-view training", "unlabeled data", "view", "dependency parsing", "self-training"], "overall_score": 8.451476908268354, "scores": [5.248761669313103, 1.7150324385499738, 1.6677920317936363, 1.4202693000385485, 1.049045849794008], "rank_score": 2.220180257897854} -{"id": "li-etal-2020-flat", "title": "FLAT: Chinese NER Using Flat-Lattice Transformer", "abstract": "Recently, the character-word lattice structure has been proved to be effective for Chinese named entity recognition (NER) by incorporating the word information. However, since the lattice structure is complex and dynamic, the lattice-based models are hard to fully utilize the parallel computation of GPUs and usually have a low inference speed. In this paper, we propose FLAT: Flat-LAttice Transformer for Chinese NER, which converts the lattice structure into a flat structure consisting of spans. Each span corresponds to a character or latent word and its position in the original lattice. With the power of Transformer and well-designed position encoding, FLAT can fully leverage the lattice information and has an excellent parallel ability. Experiments on four datasets show FLAT outperforms other lexicon-based models in performance and efficiency.", "phrases": ["chinese ner", "flat-lattice transformer", "flat"], "overall_score": 5.513374049929326, "scores": [2.370930946782902, 2.184474813971721, 2.1008290524119397], "rank_score": 2.2187449377221875} -{"id": "yih-etal-2015-semantic", "title": "Semantic Parsing via Staged Query Graph Generation: Question Answering with Knowledge Base", "abstract": "We propose a novel semantic parsing framework for question answering using a knowledge base. We define a query graph that resembles subgraphs of the knowledge base and can be directly mapped to a logical form. Semantic parsing is reduced to query graph generation, formulated as a staged search problem. Unlike traditional approaches, our method leverages the knowledge base in an early stage to prune the search space and thus simplifies the semantic matching problem. By applying an advanced entity linking system and a deep convolutional neural network model that matches questions and predicate sequences, our system outperforms previous methods substantially, and achieves an F1 measure of 52.5% on the WEBQUESTIONS dataset.", "phrases": ["query graph", "knowledge base", "semantic parsing", "natural language question", "relation path"], "overall_score": 8.63148727634041, "scores": [4.314353811840693, 3.38427238852562, 1.6460134974628216, 0.916129354776022, 0.8284984561371741], "rank_score": 2.2178535017484657} -{"id": "dua-etal-2019-drop", "title": "DROP: A Reading Comprehension Benchmark Requiring Discrete Reasoning Over Paragraphs", "abstract": "Reading comprehension has recently seen rapid progress, with systems matching humans on the most popular datasets for the task. However, a large body of work has highlighted the brittleness of these systems, showing that there is much work left to be done. We introduce a new reading comprehension benchmark, DROP, which requires Discrete Reasoning Over the content of Paragraphs. In this crowdsourced, adversarially-created, 55k-question benchmark, a system must resolve references in a question, perhaps to multiple input positions, and perform discrete operations over them (such as addition, counting, or sorting). These operations require a much more comprehensive understanding of the content of paragraphs, as they remove the paraphrase-and-entity-typing shortcuts available in prior datasets. We apply state-of-the-art methods from both the reading comprehension and semantic parsing literatures on this dataset and show that the best systems only achieve 38.4% F1 on our generalized accuracy metric, while expert human performance is 96%. We additionally present a new model that combines reading comprehension methods with simple numerical reasoning to achieve 51% F1.", "phrases": ["reading comprehension benchmark", "discrete reasoning", "paragraphs", "drop", "answer type"], "overall_score": 8.94950020693848, "scores": [4.662951437527486, 3.6556861870546844, 0.951559199285013, 0.9258129568623591, 0.8717452563365775], "rank_score": 2.2135510074132236} -{"id": "malmasi-etal-2016-discriminating", "title": "Discriminating between Similar Languages and Arabic Dialect Identification: A Report on the Third DSL Shared Task", "abstract": "We present the results of the third edition of the Discriminating between Similar Languages (DSL) shared task, which was organized as part of the VarDial'2016 workshop at COLING'2016. The challenge offered two subtasks: subtask 1 focused on the identification of very similar languages and language varieties in newswire texts, whereas subtask 2 dealt with Arabic dialect identification in speech transcripts. A total of 37 teams registered to participate in the task, 24 teams submitted test results, and 20 teams also wrote system description papers. High-order character n-grams were the most successful feature, and the best classification approaches included traditional supervised learning methods such as SVM, logistic regression, and language models, while deep learning approaches did not perform very well.", "phrases": ["similar languages", "arabic dialect identification", "discriminating", "dsl task"], "overall_score": 8.928988299875469, "scores": [3.601931441197161, 3.500872879501044, 1.210532201217613, 0.5205740158130953], "rank_score": 2.2084776344322283} -{"id": "morante-sporleder-2012-modality", "title": "Modality and Negation: An Introduction to the Special Issue", "abstract": "Traditionally, most research in NLP has focused on propositional aspects of meaning. To truly understand language, however, extra-propositional aspects are equally important. Modality and negation typically contribute significantly to these extra-propositional meaning aspects. Although modality and negation have often been neglected by mainstream computational linguistics, interest has grown in recent years, as evidenced by several annotation projects dedicated to these phenomena. Researchers have started to work on modeling factuality, belief and certainty, detecting speculative sentences and hedging, identifying contradictions, and determining the scope of expressions of modality and negation. In this article, we will provide an overview of how modality and negation have been modeled in computational linguistics.", "phrases": ["negation", "extra-propositional aspect", "factuality", "modality", "attitude"], "overall_score": 7.193829799133854, "scores": [4.684864663402437, 3.798987290645273, 1.3696642013940796, 0.6270744176650256, 0.5593367513032729], "rank_score": 2.2079854648820176} -{"id": "pavlick-etal-2015-ppdb", "title": "PPDB 2.0: Better paraphrase ranking, fine-grained entailment relations, word embeddings, and style classification", "abstract": "We present a new release of the Paraphrase Database. PPDB 2.0 includes a discriminatively re-ranked set of paraphrases that achieve a higher correlation with human judgments than PPDB 1.0\u2019s heuristic rankings. Each paraphrase pair in the database now also includes finegrained entailment relations, word embedding similarities, and style annotations.", "phrases": ["ranking", "entailment relation", "paraphrase database", "style annotation", "ppdb"], "overall_score": 7.185183071084543, "scores": [4.442205846798385, 2.272039696650758, 1.7301256867751882, 2.0005123812397327, 0.5817741122015314], "rank_score": 2.205331544733119} -{"id": "och-ney-2003-systematic", "title": "A Systematic Comparison of Various Statistical Alignment Models", "abstract": "We present and compare various methods for computing word alignments using statistical or heuristic models. We consider the five alignment models presented in Brown, Della Pietra, Della Pietra, and Mercer (1993), the hidden Markov alignment model, smoothing techniques, and refinements. These statistical models are compared with two heuristic models based on the Dice coefficient. We present different methods for combining word alignments to perform a symmetrization of directed statistical alignment models. As evaluation criterion, we use the quality of the resulting Viterbi alignment compared to a manually produced reference alignment. We evaluate the models on the German-English Verbmobil task and the French-English Hansards task. We perform a detailed analysis of various design decisions of our statistical alignment system and evaluate these on training corpora of various sizes. An important result is that refined alignment models with a first-order dependence and a fertility model yield significantly better results than simple heuristic models. In the Appendix, we present an efficient training algorithm for the alignment models presented.", "phrases": ["systematic comparison", "giza++", "ibm model", "parallel corpora", "smt system"], "overall_score": 11.876072717601813, "scores": [1.431584505870054, 3.3988758552352434, 2.3298668284034716, 2.273486375389023, 1.5848491223088197], "rank_score": 2.203732537441322} -{"id": "zhang-etal-2019-ernie", "title": "ERNIE: Enhanced Language Representation with Informative Entities", "abstract": "Neural language representation models such as BERT pre-trained on large-scale corpora can well capture rich semantic patterns from plain text, and be fine-tuned to consistently improve the performance of various NLP tasks. However, the existing pre-trained language models rarely consider incorporating knowledge graphs (KGs), which can provide rich structured knowledge facts for better language understanding. We argue that informative entities in KGs can enhance language representation with external knowledge. In this paper, we utilize both large-scale textual corpora and KGs to train an enhanced language representation model (ERNIE), which can take full advantage of lexical, syntactic, and knowledge information simultaneously. The experimental results have demonstrated that ERNIE achieves significant improvements on various knowledge-driven tasks, and meanwhile is comparable with the state-of-the-art model BERT on other common NLP tasks. The code and datasets will be available in the future.", "phrases": ["enhanced language representation", "knowledge graph", "ernie", "entity embedding", "downstream task"], "overall_score": 9.746434330620236, "scores": [4.9405372699758825, 0.9756246108561093, 2.309463613930204, 1.701679474281479, 1.0711573609319085], "rank_score": 2.1996924659951165} -{"id": "zhang-yang-2018-chinese", "title": "Chinese NER Using Lattice LSTM", "abstract": "We investigate a lattice-structured LSTM model for Chinese NER, which encodes a sequence of input characters as well as all potential words that match a lexicon. Compared with character-based methods, our model explicitly leverages word and word sequence information. Compared with word-based methods, lattice LSTM does not suffer from segmentation errors. Gated recurrent cells allow our model to choose the most relevant characters and words from a sentence for better NER results. Experiments on various datasets show that lattice LSTM outperforms both word-based and character-based LSTM baselines, achieving the best results.", "phrases": ["lattice lstm", "potential word", "segmentation", "chinese ner"], "overall_score": 6.794301208437397, "scores": [3.7625424692997176, 3.2848377631318195, 1.2093037219570115, 0.5355617827162412], "rank_score": 2.1980614342761977} -{"id": "morante-daelemans-2009-metalearning", "title": "A Metalearning Approach to Processing the Scope of Negation", "abstract": "Finding negation signals and their scope in text is an important subtask in information extraction. In this paper we present a machine learning system that finds the scope of negation in biomedical texts. The system combines several classifiers and works in two phases. To investigate the robustness of the approach, the system is tested on the three subcorpora of the BioScope corpus representing different text types. It achieves the best results to date for this task, with an error reduction of 32.07% compared to current state of the art results.", "phrases": ["scope", "negation", "machine learning system", "bioscope corpus"], "overall_score": 6.456021023253711, "scores": [3.563434097844651, 3.3341115590227988, 1.3518599349179046, 0.5210543415787758], "rank_score": 2.1926149833410324} -{"id": "gamon-2004-sentiment", "title": "Sentiment classification on customer feedback data: noisy data, large feature vectors, and the role of linguistic analysis", "abstract": "We demonstrate that it is possible to perform automatic sentiment classification in the very noisy domain of customer feedback data. We show that by using large feature vectors in combination with feature reduction, we can train linear support vector machines that achieve high classification accuracy on data that present classification challenges even for a human annotator. We also show that, surprisingly, the addition of deep linguistic analysis features to a set of surface level word n-gram features contributes consistently to classification accuracy in this domain.", "phrases": ["customer feedback data", "n-gram", "sentiment classification"], "overall_score": 6.56243610824872, "scores": [3.1117867938991264, 2.595227382613712, 0.8647707919049489], "rank_score": 2.190594989472596} -{"id": "habash-rambow-2006-magead", "title": "MAGEAD: A Morphological Analyzer and Generator for the Arabic Dialects", "abstract": "We present MAGEAD, a morphological analyzer and generator for the Arabic language family. Our work is novel in that it explicitly addresses the need for processing the morphology of the dialects. MAGEAD performs an on-line analysis to or generation from a root+pattern+features representation, it has separate phonological and orthographic representations, and it allows for combining morphemes from different dialects. We present a detailed evaluation of MAGEAD.", "phrases": ["morphological analyzer", "generator", "arabic", "dialect", "disambiguation"], "overall_score": 7.5217451387275736, "scores": [3.927995126313983, 3.065794452457539, 1.569062365412567, 1.2991100070578214, 1.089950054870675], "rank_score": 2.190382401222517} -{"id": "stenetorp-etal-2012-brat", "title": "brat: a Web-based Tool for NLP-Assisted Text Annotation", "abstract": "We introduce the brat rapid annotation tool (BRAT), an intuitive web-based tool for text annotation supported by Natural Language Processing (NLP) technology. BRAT has been developed for rich structured annotation for a variety of NLP tasks and aims to support manual curation efforts and increase annotator productivity using NLP techniques. We discuss several case studies of real-world annotation projects using pre-release versions of BRAT and present an evaluation of annotation assisted by semantic class disambiguation on a multicategory entity mention annotation task, showing a 15% decrease in total annotation time. BRAT is available under an open-source license from: http://brat.nlplab.org", "phrases": ["web-based tool", "text annotation", "support", "brat", "visualization"], "overall_score": 9.034817450086836, "scores": [5.5635977604732325, 2.5265260871769124, 1.498746756339668, 0.8262727095239202, 0.5304870042498652], "rank_score": 2.1891260635527194} -{"id": "sun-etal-2019-mitigating", "title": "Mitigating Gender Bias in Natural Language Processing: Literature Review", "abstract": "As Natural Language Processing (NLP) and Machine Learning (ML) tools rise in popularity, it becomes increasingly vital to recognize the role they play in shaping societal biases and stereotypes. Although NLP models have shown success in modeling various applications, they propagate and may even amplify gender bias found in text corpora. While the study of bias in artificial intelligence is not new, methods to mitigate gender bias in NLP are relatively nascent. In this paper, we review contemporary studies on recognizing and mitigating gender bias in NLP. We discuss gender bias based on four forms of representation bias and analyze methods recognizing gender bias. Furthermore, we discuss the advantages and drawbacks of existing gender debiasing methods. Finally, we discuss future studies for recognizing and mitigating gender bias in NLP.", "phrases": ["gender bias", "literature review", "language model"], "overall_score": 8.398152244705646, "scores": [4.669045756145499, 1.3157028648768943, 0.5590152395960196], "rank_score": 2.1812546202061376} -{"id": "tiedemann-2012-parallel", "title": "Parallel Data, Tools and Interfaces in OPUS", "abstract": "This paper presents the current status of OPUS, a growing language resource of parallel corpora and related tools. The focus in OPUS is to provide freely available data sets in various formats together with basic annotation to be useful for applications in computational linguistics, translation studies and cross-linguistic corpus studies. In this paper, we report about new data sets and their features, additional annotation tools and models provided from the website and essential interfaces and on-line services included in the project.", "phrases": ["opus", "parallel data", "parallel sentence", "large number"], "overall_score": 8.475805057671252, "scores": [4.5841511369632215, 2.231426094610248, 1.047405239424562, 0.8484218677747787], "rank_score": 2.1778510846932027} -{"id": "taboada-etal-2011-lexicon", "title": "Lexicon-Based Methods for Sentiment Analysis", "abstract": "We present a lexicon-based approach to extracting sentiment from text. The Semantic Orientation CALculator (SO-CAL) uses dictionaries of words annotated with their semantic orientation (polarity and strength), and incorporates intensification and negation. SO-CAL is applied to the polarity classification task, the process of assigning a positive or negative label to a text that captures the text's opinion towards its main subject matter. We show that SO-CAL's performance is consistent across domains and in completely unseen data. Additionally, we describe the process of dictionary creation, and our use of Mechanical Turk to check dictionaries for consistency and reliability.", "phrases": ["sentiment analysis", "lexicon-based approach", "semantic orientation", "negation", "statistical model"], "overall_score": 10.196622904700364, "scores": [4.1899122957088615, 2.277759646900797, 2.0092042860410757, 1.863066145808351, 0.5489268285546496], "rank_score": 2.1777738406027467} -{"id": "lample-etal-2016-neural", "title": "Neural Architectures for Named Entity Recognition", "abstract": "Comunicacio presentada a la 2016 Conference of the North American Chapter of the Association for Computational Linguistics, celebrada a San Diego (CA, EUA) els dies 12 a 17 de juny 2016.", "phrases": ["named entity recognition", "neural architecture", "character", "ner task", "conditional random field"], "overall_score": 11.93143691239665, "scores": [2.526466710141822, 2.2782165699945782, 2.09064163027095, 2.022477504166018, 1.9672760217935767], "rank_score": 2.177015687273389} -{"id": "gimpel-etal-2013-systematic", "title": "A Systematic Exploration of Diversity in Machine Translation", "abstract": "This paper addresses the problem of producing a diverse set of plausible translations. We present a simple procedure that can be used with any statistical machine translation (MT) system. We explore three ways of using diverse translations: (1) system combination, (2) discriminative reranking with rich features, and (3) a novel post-editing scenario in which multiple translations are presented to users. We find that diversity can improve performance on these tasks, especially for sentences that are difficult for MT.", "phrases": ["diversity", "machine translation", "high model score"], "overall_score": 5.74279077565373, "scores": [3.0698291695713142, 2.9296009036782498, 0.5287995842216787], "rank_score": 2.176076552490414} -{"id": "lewis-etal-2020-bart", "title": "BART: Denoising Sequence-to-Sequence Pre-training for Natural Language Generation, Translation, and Comprehension", "abstract": "We present BART, a denoising autoencoder for pretraining sequence-to-sequence models. BART is trained by (1) corrupting text with an arbitrary noising function, and (2) learning a model to reconstruct the original text. It uses a standard Tranformer-based neural machine translation architecture which, despite its simplicity, can be seen as generalizing BERT (due to the bidirectional encoder), GPT (with the left-to-right decoder), and other recent pretraining schemes. We evaluate a number of noising approaches, finding the best performance by both randomly shuffling the order of sentences and using a novel in-filling scheme, where spans of text are replaced with a single mask token. BART is particularly effective when fine tuned for text generation but also works well for comprehension tasks. It matches the performance of RoBERTa on GLUE and SQuAD, and achieves new state-of-the-art results on a range of abstractive dialogue, question answering, and summarization tasks, with gains of up to 3.5 ROUGE. BART also provides a 1.1 BLEU increase over a back-translation system for machine translation, with only target language pretraining. We also replicate other pretraining schemes within the BART framework, to understand their effect on end-task performance.", "phrases": ["sequence-to-sequence", "natural language generation", "comprehension", "summarization", "objective"], "overall_score": 12.52635727889627, "scores": [3.389715465268247, 2.0878430963011176, 0.8572326345463072, 2.596431376579611, 1.94442505140257], "rank_score": 2.1751295248195706} -{"id": "read-2005-using", "title": "Using Emoticons to Reduce Dependency in Machine Learning Techniques for Sentiment Classification", "abstract": "Sentiment Classification seeks to identify a piece of text according to its author's general feeling toward their subject, be it positive or negative. Traditional machine learning techniques have been applied to this problem with reasonable success, but they have been shown to work well only when there is a good match between the training and test data with respect to topic. This paper demonstrates that match with respect to domain and time is also important, and presents preliminary experiments with training data labeled with emoticons, which has the potential of being independent of domain, topic and time.", "phrases": ["emoticon", "positive tweet", "negative polarity"], "overall_score": 7.381660558390531, "scores": [5.11353125422384, 0.8638327565935378, 0.533572929969458], "rank_score": 2.170312313595612} -{"id": "vadas-curran-2007-adding", "title": "Adding Noun Phrase Structure to the Penn Treebank", "abstract": "The Penn Treebank does not annotate within base noun phrases (NPs), committing only to flat structures that ignore the complexity of English NPs. This means that tools trained on Treebank data cannot learn the correct internal structure of NPs. This paper details the process of adding gold-standard bracketing within each noun phrase in the Penn Treebank. We then examine the consistency and reliability of our annotations. Finally, we use this resource to determine NP structure using several statistical approaches, thus demonstrating the utility of the corpus. This adds detail to the Penn Treebank that is necessary for many NLP applications.", "phrases": ["noun phrase", "penn treebank", "internal structure"], "overall_score": 6.3903172133112855, "scores": [3.9074764881293738, 1.7334167592938614, 0.8700080738739999], "rank_score": 2.1703004404324115} -{"id": "mccarthy-etal-2019-sigmorphon", "title": "The SIGMORPHON 2019 Shared Task: Morphological Analysis in Context and Cross-Lingual Transfer for Inflection", "abstract": "The SIGMORPHON 2019 shared task on cross-lingual transfer and contextual analysis in morphology examined transfer learning of inflection between 100 language pairs, as well as contextual lemmatization and morphosyntactic description in 66 languages. The first task evolves past years' inflection tasks by examining transfer of morphological inflection knowledge from a high-resource language to a low-resource language. This year also presents a new second challenge on lemmatization and morphological feature analysis in context. All submissions featured a neural component and built on either this year's strong baselines or highly ranked systems from previous years' shared tasks. Every participating team improved in accuracy over the baselines for the inflection task (though not Levenshtein distance), and every team in the contextual analysis task improved on both state-of-the-art neural and non-neural baselines.", "phrases": ["cross-lingual transfer", "inflection", "high resource language"], "overall_score": 6.145456671744205, "scores": [3.1908511766263894, 2.788845153594049, 0.5275333684925733], "rank_score": 2.1690765662376705} -{"id": "kumar-etal-2007-improving", "title": "Improving Word Alignment with Bridge Languages", "abstract": "We describe an approach to improve Statistical Machine Translation (SMT) performance using multi-lingual, parallel, sentence-aligned corpora in several bridge languages. Our approach consists of a simple method for utilizing a bridge language to create a word alignment system and a procedure for combining word alignment systems from multiple bridge languages. The final translation is obtained by consensus decoding that combines hypotheses obtained using all bridge language word alignments. We present experiments showing that multilingual, parallel text in Spanish, French, Russian, and Chinese can be utilized in this framework to improve translation performance on an Arabic-to-English task.", "phrases": ["word alignment", "bridge language", "high quality"], "overall_score": 6.262018837924317, "scores": [4.059291753621526, 1.9040694237549738, 0.536168325169876], "rank_score": 2.166509834182125} -{"id": "hamilton-etal-2016-diachronic", "title": "Diachronic Word Embeddings Reveal Statistical Laws of Semantic Change", "abstract": "Understanding how words change their meanings over time is key to models of language and cultural evolution, but historical data on meaning is scarce, making theories hard to develop and test. Word embeddings show promise as a diachronic tool, but have not been carefully evaluated. We develop a robust methodology for quantifying semantic change by evaluating word embeddings (PPMI, SVD, word2vec) against known historical changes. We then use this methodology to reveal statistical laws of semantic evolution. Using six historical corpora spanning four languages and two centuries, we propose two quantitative laws of semantic change: (i) the law of conformity---the rate of semantic change scales with an inverse power-law of word frequency; (ii) the law of innovation---independent of frequency, words that are more polysemous have higher rates of semantic change.", "phrases": ["word embedding", "semantic change", "methodology", "historical corpora"], "overall_score": 9.557013855839553, "scores": [4.976633084103986, 2.026140096494444, 1.069329098779586, 0.5790482862044289], "rank_score": 2.162787641395611} -{"id": "takamatsu-etal-2012-reducing", "title": "Reducing Wrong Labels in Distant Supervision for Relation Extraction", "abstract": "In relation extraction, distant supervision seeks to extract relations between entities from text by using a knowledge base, such as Freebase, as a source of supervision. When a sentence and a knowledge base refer to the same entity pair, this approach heuristically labels the sentence with the corresponding relation in the knowledge base. However, this heuristic can fail with the result that some sentences are labeled wrongly. This noisy labeled data causes poor extraction performance. In this paper, we propose a method to reduce the number of wrong labels. We present a novel generative model that directly models the heuristic labeling process of distant supervision. The model predicts whether assigned labels are correct or wrong via its hidden variables. Our experimental results show that this model detected wrong labels with higher performance than baseline methods. In the experiment, we also found that our wrong label reduction boosted the performance of relation extraction.", "phrases": ["distant supervision", "relation extraction", "labeling process"], "overall_score": 7.205787445496867, "scores": [2.8213128455126095, 2.7931848338057534, 0.8729079617459607], "rank_score": 2.1624685470214415} -{"id": "koen-2004-pharaoh", "title": "Pharaoh: a beam search decoder for phrase-based statistical machine translation models", "abstract": "We describe Pharaoh, a freely available decoder for phrase-based statistical machine translation models. The decoder is the implement at ion of an efficient dynamic programming search algorithm with lattice generation and XML markup for external components.", "phrases": ["beam search decoder", "machine translation model", "pharaoh", "smt system", "hypothesis"], "overall_score": 9.246155408818653, "scores": [5.04973846949052, 2.005222598616962, 0.8845778621533853, 1.4358633980497324, 1.4346014824918472], "rank_score": 2.1620007621604893} -{"id": "jawahar-etal-2019-bert", "title": "What Does BERT Learn about the Structure of Language?", "abstract": "BERT is a recent language representation model that has surprisingly performed well in diverse language understanding benchmarks. This result indicates the possibility that BERT networks capture structural information about language. In this work, we provide novel support for this claim by performing a series of experiments to unpack the elements of English language structure learned by BERT. Our findings are fourfold. BERT's phrasal representation captures the phrase-level information in the lower layers. The intermediate layers of BERT compose a rich hierarchy of linguistic information, starting with surface features at the bottom, syntactic features in the middle followed by semantic features at the top. BERT requires deeper layers while tracking subject-verb agreement to handle long-term dependency problem. Finally, the compositional scheme underlying BERT mimics classical, tree-like structures.", "phrases": ["bert", "linguistic information", "tree-like structure", "low layer", "attention head"], "overall_score": 9.56742454756242, "scores": [5.172252292254047, 1.9772358117649877, 1.7162786485188806, 1.04942561535724, 0.8812645630933279], "rank_score": 2.1592913861976966} -{"id": "smith-eisner-2005-contrastive", "title": "Contrastive Estimation: Training Log-Linear Models on Unlabeled Data", "abstract": "Conditional random fields (Lafferty et al., 2001) are quite effective at sequence labeling tasks like shallow parsing (Sha and Pereira, 2003) and named-entity extraction (McCallum and Li, 2003). CRFs are log-linear, allowing the incorporation of arbitrary features into the model. To train on unlabeled data, we require unsupervised estimation methods for log-linear models; few exist. We describe a novel approach, contrastive estimation. We show that the new technique can be intuitively understood as exploiting implicit negative evidence and is computationally efficient. Applied to a sequence labeling problem---POS tagging given a tagging dictionary and unlabeled text---contrastive estimation outperforms EM (with the same feature set), is more robust to degradations of the dictionary, and can largely recover by modeling additional features.", "phrases": ["log-linear model", "unlabeled data", "tagging", "contrastive estimation", "neighborhood"], "overall_score": 8.017041956313642, "scores": [5.263786137147332, 1.9206850883279265, 1.7359559091136598, 1.298146774178257, 0.5756719175007715], "rank_score": 2.1588491652535895} -{"id": "kalchbrenner-etal-2014-convolutional", "title": "A Convolutional Neural Network for Modelling Sentences", "abstract": "The ability to accurately represent sentences is central to language understanding. We describe a convolutional architecture dubbed the Dynamic Convolutional Neural Network (DCNN) that we adopt for the semantic modelling of sentences. The network uses Dynamic k-Max Pooling, a global pooling operation over linear sequences. The network handles input sentences of varying length and induces a feature graph over the sentence that is capable of explicitly capturing short and long-range relations. The network does not rely on a parse tree and is easily applicable to any language. We test the DCNN in four experiments: small scale binary and multi-class sentiment prediction, six-way question classification and Twitter sentiment prediction by distant supervision. The network achieves excellent performance in the first three tasks and a greater than 25% error reduction in the last task with respect to the strongest baseline.", "phrases": ["convolutional neural network", "modeling", "k-max pooling", "cnn", "sentiment analysis"], "overall_score": 10.019721665360217, "scores": [4.015027913957637, 2.4749591200571306, 1.6220763887314507, 1.6200528728746204, 1.0547897626231024], "rank_score": 2.157381211648788} -{"id": "zhou-choi-2018-exist", "title": "They Exist! Introducing Plural Mentions to Coreference Resolution and Entity Linking", "abstract": "This paper analyzes arguably the most challenging yet under-explored aspect of resolution tasks such as coreference resolution and entity linking, that is the resolution of plural mentions. Unlike singular mentions each of which represents one entity, plural mentions stand for multiple entities. To tackle this aspect, we take the character identification corpus from the SemEval 2018 shared task that consists of entity annotation for singular mentions, and expand it by adding annotation for plural mentions. We then introduce a novel coreference resolution algorithm that selectively creates clusters to handle both singular and plural mentions, and also a deep learning-based entity linking model that jointly handles both types of mentions through multi-task learning. Adjusted evaluation metrics are proposed for these tasks as well to handle the uniqueness of plural mentions. Our experiments show that the new coreference resolution and entity linking models significantly outperform traditional models designed only for singular mentions. To the best of our knowledge, this is the first time that plural mentions are thoroughly analyzed for these two resolution tasks.", "phrases": ["plural mention", "coreference resolution", "entity linking"], "overall_score": 5.172102014479468, "scores": [3.349104744611402, 2.2069793114061254, 0.914718159348077], "rank_score": 2.1569340717885344} -{"id": "zellers-etal-2018-swag", "title": "SWAG: A Large-Scale Adversarial Dataset for Grounded Commonsense Inference", "abstract": "Given a partial description like \u201cshe opened the hood of the car,\u201d humans can reason about the situation and anticipate what might come next (\u201dthen, she examined the engine\u201d). In this paper, we introduce the task of grounded commonsense inference, unifying natural language inference and commonsense reasoning. We present SWAG, a new dataset with 113k multiple choice questions about a rich spectrum of grounded situations. To address the recurring challenges of the annotation artifacts and human biases found in many existing datasets, we propose Adversarial Filtering (AF), a novel procedure that constructs a de-biased dataset by iteratively training an ensemble of stylistic classifiers, and using them to filter the data. To account for the aggressive adversarial filtering, we use state-of-the-art language models to massively oversample a diverse set of potential counterfactuals. Empirical results demonstrate that while humans can solve the resulting inference problems with high accuracy (88%), various competitive models struggle on our task. We provide comprehensive analysis that indicates significant opportunities for future research.", "phrases": ["situation", "natural language inference", "multiple choice question", "adversarial filtering", "swag"], "overall_score": 8.507865458737482, "scores": [5.146585912879381, 1.9734160038482254, 1.6653767765518068, 1.0639045492885881, 0.9167768446998776], "rank_score": 2.1532120174535754} -{"id": "gurevych-etal-2012-uby", "title": "UBY - A Large-Scale Unified Lexical-Semantic Resource Based on LMF", "abstract": "We present Uby, a large-scale lexical-semantic resource combining a wide range of information from expert-constructed and collaboratively constructed resources for English and German. It currently contains nine resources in two languages: English WordNet, Wiktionary, Wikipedia, FrameNet and VerbNet, German Wikipedia, Wiktionary and GermaNet, and multilingual OmegaWiki modeled according to the LMF standard. For FrameNet, VerbNet and all collaboratively constructed resources, this is done for the first time. Our LMF model captures lexical information at a fine-grained level by employing a large number of Data Categories from ISOCat and is designed to be directly extensible by new languages and resources. All resources in Uby can be accessed with an easy to use publicly available API.", "phrases": ["lexical-semantic resource", "wiktionary", "uby", "uniform representation"], "overall_score": 5.966013193317364, "scores": [3.629604908924563, 3.552344828479216, 0.8783856950702301, 0.5468022154030817], "rank_score": 2.1517844119692726} -{"id": "ide-suderman-2009-bridging", "title": "Bridging the Gaps: Interoperability for GrAF, GATE, and UIMA", "abstract": "This paper explores interoperability for data represented using the Graph Annotation Framework (GrAF) (Ide and Suderman, 2007) and the data formats utilized by two general-purpose annotation systems: the General Architecture for Text Engineering (GATE) (Cunningham, 2002) and the Unstructured Information Management Architecture (UIMA). GrAF is intended to serve as a \"pivot\" to enable interoperability among different formats, and both GATE and UIMA are at least implicitly designed with an eye toward interoperability with other formats and tools. We describe the steps required to perform a round-trip rendering from GrAF to GATE and GrAF to UIMA CAS and back again, and outline the commonalities as well as the differences and gaps that came to light in the process.", "phrases": ["interoperability", "gate", "uima"], "overall_score": 3.8535701510248987, "scores": [2.4143759727141845, 2.285412468355172, 1.7523669132737147], "rank_score": 2.1507184514476907} -{"id": "jiang-zhai-2007-systematic", "title": "A Systematic Exploration of the Feature Space for Relation Extraction", "abstract": "Relation extraction is the task of finding semantic relations between entities from text. The state-of-the-art methods for relation extraction are mostly based on statistical learning, and thus all have to deal with feature selection, which can significantly affect the classification performance. In this paper, we systematically explore a large space of features for relation extraction and evaluate the effectiveness of different feature subspaces. We present a general definition of feature spaces based on a graphic representation of relation instances, and explore three different representations of relation instances and features of different complexities within this framework. Our experiments show that using only basic unit features is generally sufficient to achieve state-of-the-art performance, while overinclusion of complex features may hurt the performance. A combination of features of different levels of complexity and from different sentence representations, coupled with task-oriented feature pruning, gives the best performance.", "phrases": ["systematic exploration", "feature space", "relation extraction", "jiang"], "overall_score": 6.547405956714465, "scores": [4.087151013055612, 2.668611127494301, 1.3212997434192517, 0.5251489266174232], "rank_score": 2.150552702646647} -{"id": "felbo-etal-2017-using", "title": "Using millions of emoji occurrences to learn any-domain representations for detecting sentiment, emotion and sarcasm", "abstract": "NLP tasks are often limited by scarcity of manually annotated data. In social media sentiment analysis and related tasks, researchers have therefore used binarized emoticons and specific hashtags as forms of distant supervision. Our paper shows that by extending the distant supervision to a more diverse set of noisy labels, the models can learn richer representations. Through emoji prediction on a dataset of 1246 million tweets containing one of 64 common emojis we obtain state-of-the-art performance on 8 benchmark datasets within emotion, sentiment and sarcasm detection using a single pretrained model. Our analyses confirm that the diversity of our emotional labels yield a performance improvement over previous distant supervision approaches.", "phrases": ["emojis", "emotion", "sarcasm", "deepmoji", "rich representation"], "overall_score": 8.406275446079029, "scores": [3.723818332015138, 2.2416980799831125, 2.324103115485848, 1.611934020572415, 0.8426003518756466], "rank_score": 2.148830779986432} -{"id": "cai-knight-2013-smatch", "title": "Smatch: an Evaluation Metric for Semantic Feature Structures", "abstract": "The evaluation of whole-sentence semantic structures plays an important role in semantic parsing and large-scale semantic structure annotation. However, there is no widely-used metric to evaluate wholesentence semantic structures. In this paper, we present smatch, a metric that calculates the degree of overlap between two semantic feature structures. We give an efficient algorithm to compute the metric and show the results of an inter-annotator agreement study.", "phrases": ["smatch", "amr parser", "triple"], "overall_score": 6.540542232548491, "scores": [4.0521158483942035, 1.487358545134563, 0.905420363863061], "rank_score": 2.1482982524639422} -{"id": "shin-etal-2020-autoprompt", "title": "AutoPrompt: Eliciting Knowledge from Language Models with Automatically Generated Prompts", "abstract": "The remarkable success of pretrained language models has motivated the study of what kinds of knowledge these models learn during pretraining. Reformulating tasks as fill-in-the-blanks problems (e.g., cloze tests) is a natural approach for gauging such knowledge, however, its usage is limited by the manual effort and guesswork required to write suitable prompts. To address this, we develop AutoPrompt, an automated method to create prompts for a diverse set of tasks, based on a gradient-guided search. Using AutoPrompt, we show that masked language models (MLMs) have an inherent capability to perform sentiment analysis and natural language inference without additional parameters or finetuning, sometimes achieving performance on par with recent state-of-the-art supervised models. We also show that our prompts elicit more accurate factual knowledge from MLMs than the manually created prompts on the LAMA benchmark, and that MLMs can be used as relation extractors more effectively than supervised relation extraction models. These results demonstrate that automatically generated prompts are a viable parameter-free alternative to existing probing methods, and as pretrained LMs become more sophisticated and capable, potentially a replacement for finetuning.", "phrases": ["language model", "prompt", "factual knowledge", "fine-tuning", "bert"], "overall_score": 8.957224330712199, "scores": [5.878998444457321, 1.689732294200255, 1.4736103296980647, 0.8497015247420104, 0.8367467496028607], "rank_score": 2.1457578685401026} -{"id": "palmer-etal-2005-proposition", "title": "The Proposition Bank: An Annotated Corpus of Semantic Roles", "abstract": "The Proposition Bank project takes a practical approach to semantic representation, adding a layer of predicate-argument information, or semantic role labels, to the syntactic structures of the Penn Treebank. The resulting resource can be thought of as shallow, in that it does not represent coreference, quantification, and many other higher-order phenomena, but also broad, in that it covers every instance of every verb in the corpus and allows representative statistics to be calculated. We discuss the criteria used to define the sets of semantic roles used in the annotation process and to analyze the frequency of syntactic/semantic alternations in the corpus. We describe an automatic system for semantic role tagging trained on the corpus and discuss the effect on its performance of various types of information, including a comparison of full syntactic parsing with a flat representation and the contribution of the empty trace categories of the treebank.", "phrases": ["proposition bank", "annotated corpus", "semantic role", "propbank frameset", "broad-coverage"], "overall_score": 11.172093882899391, "scores": [3.301142152720658, 1.9473735532975045, 3.244885703724453, 1.377112984087369, 0.8523223266455779], "rank_score": 2.1445673440951123} -{"id": "dai-etal-2019-transformer", "title": "Transformer-XL: Attentive Language Models beyond a Fixed-Length Context", "abstract": "Transformers have a potential of learning longer-term dependency, but are limited by a fixed-length context in the setting of language modeling. We propose a novel neural architecture Transformer-XL that enables learning dependency beyond a fixed length without disrupting temporal coherence. It consists of a segment-level recurrence mechanism and a novel positional encoding scheme. Our method not only enables capturing longer-term dependency, but also resolves the context fragmentation problem. As a result, Transformer-XL learns dependency that is 80% longer than RNNs and 450% longer than vanilla Transformers, achieves better performance on both short and long sequences, and is up to 1,800+ times faster than vanilla Transformers during evaluation. Notably, we improve the state-of-the-art results of bpc/perplexity to 0.99 on enwiki8, 1.08 on text8, 18.3 on WikiText-103, 21.8 on One Billion Word, and 54.5 on Penn Treebank (without finetuning). When trained only on WikiText-103, Transformer-XL manages to generate reasonably coherent, novel text articles with thousands of tokens. Our code, pretrained models, and hyperparameters are available in both Tensorflow and PyTorch.", "phrases": ["language modeling", "fixed-length context", "transformer-xl", "memory", "previous segment"], "overall_score": 9.5003819571792, "scores": [4.818878027990068, 1.9409843689140476, 1.7669243045532814, 1.3499031968634385, 0.8441121439339047], "rank_score": 2.144160408450948} -{"id": "mohammad-2018-obtaining", "title": "Obtaining Reliable Human Ratings of Valence, Arousal, and Dominance for 20,000 English Words", "abstract": "Words play a central role in language and thought. Factor analysis studies have shown that the primary dimensions of meaning are valence, arousal, and dominance (VAD). We present the NRC VAD Lexicon, which has human ratings of valence, arousal, and dominance for more than 20,000 English words. We use Best\u2013Worst Scaling to obtain fine-grained scores and address issues of annotation consistency that plague traditional rating scale methods of annotation. We show that the ratings obtained are vastly more reliable than those in existing lexicons. We also show that there exist statistically significant differences in the shared understanding of valence, arousal, and dominance across demographic variables such as age, gender, and personality.", "phrases": ["valence", "dominance", "english word"], "overall_score": 5.140790369396084, "scores": [2.3487867143301537, 2.2489077101634414, 1.8339338801863558], "rank_score": 2.1438761015599836} -{"id": "rashkin-etal-2017-truth", "title": "Truth of Varying Shades: Analyzing Language in Fake News and Political Fact-Checking", "abstract": "We present an analytic study on the language of news media in the context of political fact-checking and fake news detection. We compare the language of real news with that of satire, hoaxes, and propaganda to find linguistic characteristics of untrustworthy text. To probe the feasibility of automatic political fact-checking, we also present a case study based on PolitiFact.com using their factuality judgments on a 6-point scale. Experiments show that while media fact-checking remains to be an open research question, stylistic cues can help determine the truthfulness of text.", "phrases": ["political fact-checking", "fake news", "propaganda", "truth", "textual content"], "overall_score": 9.250755572298708, "scores": [3.6744871496821196, 2.424435419592996, 0.8274932354306868, 2.691145237452781, 1.095561181959971], "rank_score": 2.142624444823711} -{"id": "rahman-ng-2009-supervised", "title": "Supervised Models for Coreference Resolution", "abstract": "Traditional learning-based coreference re-solvers operate by training a mention-pair classi\ufb01er for determining whether two mentions are coreferent or not. Two independent lines of recent research have attempted to improve these mention-pair classi\ufb01ers, one by learning a mention-ranking model to rank preceding mentions for a given anaphor, and the other by training an entity-mention classi\ufb01er to determine whether a preceding cluster is coreferent with a given mention. We propose a cluster-ranking approach to coreference resolution that combines the strengths of mention rankers and entity-mention models. We additionally show how our cluster-ranking framework naturally allows discourse-new entity detection to be learned jointly with coreference resolution. Experimental results on the ACE data sets demonstrate its superior performance to competing approaches.", "phrases": ["coreference resolution", "mention", "cluster"], "overall_score": 6.806097839354019, "scores": [4.139835790876913, 1.3810023570062153, 0.9039408546256424], "rank_score": 2.141593000836257} -{"id": "hsu-etal-2018-unified", "title": "A Unified Model for Extractive and Abstractive Summarization using Inconsistency Loss", "abstract": "We propose a unified model combining the strength of extractive and abstractive summarization. On the one hand, a simple extractive model can obtain sentence-level attention with high ROUGE scores but less readable. On the other hand, a more complicated abstractive model can obtain word-level dynamic attention to generate a more readable paragraph. In our model, sentence-level attention is used to modulate the word-level attention such that words in less attended sentences are less likely to be generated. Moreover, a novel inconsistency loss function is introduced to penalize the inconsistency between two levels of attentions. By end-to-end training our model with the inconsistency loss and original losses of extractive and abstractive models, we achieve state-of-the-art ROUGE scores while being the most informative and readable summarization on the CNN/Daily Mail dataset in a solid human evaluation.", "phrases": ["abstractive summarization", "inconsistency loss", "extractor"], "overall_score": 7.344211050277375, "scores": [3.742718775690739, 2.135763518009314, 0.5375675745275289], "rank_score": 2.138683289409194} -{"id": "moro-etal-2014-entity", "title": "Entity Linking meets Word Sense Disambiguation: a Unified Approach", "abstract": "Entity Linking (EL) and Word Sense Disambiguation (WSD) both address the lexical ambiguity of language. But while the two tasks are pretty similar, they differ in a fundamental respect: in EL the textual mention can be linked to a named entity which may or may not contain the exact mention, while in WSD there is a perfect match between the word form (better, its lemma) and a suitable word sense. In this paper we present Babelfy, a unified graph-based approach to EL and WSD based on a loose identification of candidate meanings coupled with a densest subgraph heuristic which selects high-coherence semantic interpretations. Our experiments show state-of-the-art performances on both tasks on 6 different datasets, including a multilingual setting. Babelfy is online at ", "phrases": ["word sense disambiguation", "unified approach", "graph-based approach", "entity linking", "sense repository"], "overall_score": 8.042148006999959, "scores": [3.7963866898159058, 3.47482119307109, 1.2630356690787081, 1.6150760104640385, 0.5416138625749427], "rank_score": 2.138186685000937} -{"id": "anand-etal-2011-cats", "title": "Cats Rule and Dogs Drool!: Classifying Stance in Online Debate", "abstract": "A growing body of work has highlighted the challenges of identifying the stance a speaker holds towards a particular topic, a task that involves identifying a holistic subjective disposition. We examine stance classification on a corpus of 4873 posts across 14 topics on ConvinceMe.net, ranging from the playful to the ideological. We show that ideological debates feature a greater share of rebuttal posts, and that rebuttal posts are significantly harder to classify for stance, for both humans and trained classifiers. We also demonstrate that the number of subjective expressions varies across debates, a fact correlated with the performance of systems sensitive to sentiment-bearing terms. We present results for identifing rebuttals with 63% accuracy, and for identifying stance on a per topic basis that range from 54% to 69%, as compared to unigram baselines that vary between 49% and 60%. Our results suggest that methods that take into account the dialogic context of such posts might be fruitful.", "phrases": ["stance", "online debate", "dialogic structure"], "overall_score": 7.342484418029512, "scores": [3.118179901526188, 2.2267421696830456, 1.0696193765044848], "rank_score": 2.1381804825712396} -{"id": "field-etal-2018-framing", "title": "Framing and Agenda-setting in Russian News: a Computational Analysis of Intricate Political Strategies", "abstract": "Amidst growing concern over media manipulation, NLP attention has focused on overt strategies like censorship and \u201cfake news\u201d. Here, we draw on two concepts from political science literature to explore subtler strategies for government media manipulation: agenda-setting (selecting what topics to cover) and framing (deciding how topics are covered). We analyze 13 years (100K articles) of the Russian newspaper Izvestia and identify a strategy of distraction: articles mention the U.S. more frequently in the month directly following an economic downturn in Russia. We introduce embedding-based methods for cross-lingually projecting English frames to Russian, and discover that these articles emphasize U.S. moral failings and threats to the U.S. Our work offers new ways to identify subtle media manipulation strategies at the intersection of agenda-setting and framing.", "phrases": ["news", "russian newspaper izvestia", "media manipulation strategy", "framing"], "overall_score": 6.880446059965921, "scores": [4.12270579881954, 3.0255786432432896, 0.8345369806940386, 0.5673015820628619], "rank_score": 2.1375307512049324} -{"id": "hoffmann-etal-2011-knowledge", "title": "Knowledge-Based Weak Supervision for Information Extraction of Overlapping Relations", "abstract": "Information extraction (IE) holds the promise of generating a large-scale knowledge base from the Web's natural language text. Knowledge-based weak supervision, using structured data to heuristically label a training corpus, works towards this goal by enabling the automated learning of a potentially unbounded number of relation extractors. Recently, researchers have developed multi-instance learning algorithms to combat the noisy training data that can come from heuristic labeling, but their models assume relations are disjoint --- for example they cannot extract the pair Founded(Jobs, Apple) and CEO-of(Jobs, Apple). \n \nThis paper presents a novel approach for multi-instance learning with overlapping relations that combines a sentence-level extraction model with a simple, corpus-level component for aggregating the individual facts. We apply our model to learn extractors for NY Times text using weak supervision from Free-base. Experiments show that the approach runs quickly and yields surprising gains in accuracy, at both the aggregate and sentence level.", "phrases": ["weak supervision", "information extraction", "knowledge base", "multi-instance", "entity pair"], "overall_score": 9.660811843867828, "scores": [2.941852178415784, 2.2133449292173255, 1.8659918641673139, 1.8562135081964488, 1.8051091846380114], "rank_score": 2.1365023329269763} -{"id": "strotgen-gertz-2010-heideltime", "title": "HeidelTime: High Quality Rule-Based Extraction and Normalization of Temporal Expressions", "abstract": "In this paper, we describe HeidelTime, a system for the extraction and normalization of temporal expressions. HeidelTime is a rule-based system mainly using regular expression patterns for the extraction of temporal expressions and knowledge resources as well as linguistic clues for their normalization. In the TempEval-2 challenge, HeidelTime achieved the highest F-Score (86%) for the extraction and the best results in assigning the correct value attribute, i.e., in understanding the semantics of the temporal expressions.", "phrases": ["normalization", "temporal expression", "heideltime"], "overall_score": 5.908206037250789, "scores": [3.378759342751284, 1.6763446026932494, 1.3377007174244275], "rank_score": 2.130934887622987} -{"id": "tenney-etal-2019-bert", "title": "BERT Rediscovers the Classical NLP Pipeline", "abstract": "Pre-trained text encoders have rapidly advanced the state of the art on many NLP tasks. We focus on one such model, BERT, and aim to quantify where linguistic information is captured within the network. We find that the model represents the steps of the traditional NLP pipeline in an interpretable and localizable way, and that the regions responsible for each step appear in the expected sequence: POS tagging, parsing, NER, semantic roles, then coreference. Qualitative analysis reveals that the model can and often does adjust this pipeline dynamically, revising lower-level decisions on the basis of disambiguating information from higher-level representations.", "phrases": ["classical nlp pipeline", "pos tagging", "bert", "language model", "high layer"], "overall_score": 9.67639743824641, "scores": [2.997401570132253, 2.9867037842086326, 2.0343745988117696, 1.3152124289512712, 1.2906587681151016], "rank_score": 2.1248702300438054} -{"id": "chauhan-etal-2020-sentiment", "title": "Sentiment and Emotion help Sarcasm? A Multi-task Learning Framework for Multi-Modal Sarcasm, Sentiment and Emotion Analysis", "abstract": "In this paper, we hypothesize that sarcasm is closely related to sentiment and emotion, and thereby propose a multi-task deep learning framework to solve all these three problems simultaneously in a multi-modal conversational scenario. We, at first, manually annotate the recently released multi-modal MUStARD sarcasm dataset with sentiment and emotion classes, both implicit and explicit. For multi-tasking, we propose two attention mechanisms, viz. Inter-segment Inter-modal Attention (Ie-Attention) and Intra-segment Inter-modal Attention (Ia-Attention). The main motivation of Ie-Attention is to learn the relationship between the different segments of the sentence across the modalities. In contrast, Ia-Attention focuses within the same segment of the sentence across the modalities. Finally, representations from both the attentions are concatenated and shared across the five classes (i.e., sarcasm, implicit sentiment, explicit sentiment, implicit emotion, explicit emotion) for multi-tasking. Experimental results on the extended version of the MUStARD dataset show the efficacy of our proposed approach for sarcasm detection over the existing state-of-the-art systems. The evaluation also shows that the proposed multi-task framework yields better performance for the primary task, i.e., sarcasm detection, with the help of two secondary tasks, emotion and sentiment analysis.", "phrases": ["emotion", "sarcasm", "multi-task learning framework"], "overall_score": 4.119434244117881, "scores": [2.79445360365588, 2.670224669674788, 0.8862330153294303], "rank_score": 2.116970429553366} -{"id": "zhang-etal-2015-randomized", "title": "Randomized Greedy Inference for Joint Segmentation, POS Tagging and Dependency Parsing", "abstract": "In this paper, we introduce a new approach for joint segmentation, POS tagging and dependency parsing. While joint modeling of these tasks addresses the issue of error propagation inherent in traditional pipeline architectures, it also complicates the inference task. Past research has addressed this challenge by placing constraints on the scoring function. In contrast, we propose an approach that can handle arbitrarily complex scoring functions. Specifically, we employ a randomized greedy algorithm that jointly predicts segmentations, POS tags and dependency trees. Moreover, this architecture readily handles different segmentation tasks, such as morphological segmentation for Arabic and word segmentation for Chinese. The joint model outperforms the state-of-the-art systems on three datasets, obtaining 2.1% TedEval absolute gain against the best published results in the 2013 SPMRL shared task. 1", "phrases": ["joint segmentation", "pos tagging", "dependency parsing", "complex scoring function", "greedy algorithm"], "overall_score": 5.252469461160554, "scores": [3.6174918248811925, 3.1592414830200553, 2.3780384129898153, 0.8703941580761696, 0.543580157445581], "rank_score": 2.1137492072825625} -{"id": "li-etal-2018-multi-head", "title": "Multi-Head Attention with Disagreement Regularization", "abstract": "Multi-head attention is appealing for the ability to jointly attend to information from different representation subspaces at different positions. In this work, we introduce a disagreement regularization to explicitly encourage the diversity among multiple attention heads. Specifically, we propose three types of disagreement regularization, which respectively encourage the subspace, the attended positions, and the output representation associated with each attention head to be different from other heads. Experimental results on widely-used WMT14 English-German and WMT17 Chinese-English translation tasks demonstrate the effectiveness and universality of the proposed approach.", "phrases": ["disagreement regularization", "diversity", "attention head"], "overall_score": 5.560942788880263, "scores": [3.15935639679293, 1.7269078971712848, 1.4352461103106857], "rank_score": 2.1071701347583} -{"id": "zhang-etal-2020-discriminative", "title": "Discriminative Nearest Neighbor Few-Shot Intent Detection by Transferring Natural Language Inference", "abstract": "Intent detection is one of the core components of goal-oriented dialog systems, and detecting out-of-scope (OOS) intents is also a practically important skill. Few-shot learning is attracting much attention to mitigate data scarcity, but OOS detection becomes even more challenging. In this paper, we present a simple yet effective approach, discriminative nearest neighbor classification with deep self-attention. Unlike softmax classifiers, we leverage BERT-style pairwise encoding to train a binary classifier that estimates the best matched training example for a user input. We propose to boost the discriminative ability by transferring a natural language inference (NLI) model. Our extensive experiments on a large-scale multi-domain intent detection task show that our method achieves more stable and accurate in-domain and OOS detection accuracy than RoBERTa-based classifiers and embedding-based nearest neighbor approaches. More notably, the NLI transfer enables our 10-shot model to perform competitively with 50-shot or even full-shot classifiers, while we can keep the inference time constant by leveraging a faster embedding retrieval model.", "phrases": ["intent detection", "natural language inference", "few-shot text classification"], "overall_score": 5.230052585154202, "scores": [3.151343403875858, 2.5673616434586664, 0.5954789308850248], "rank_score": 2.1047279927398495} -{"id": "bykh-meurers-2012-native", "title": "Native Language Identification using Recurring n-grams \u2013 Investigating Abstraction and Domain Dependence", "abstract": "Native Language Identification tackles the problem of determining the native language of an author based on a text the author has written in a second language. In this paper, we discuss the systematic use of recurring n-grams of any length as features for training a native language classifier. Starting with surface n-grams, we investigate two degrees of abstraction incorporating parts-of-speech. The approach outperforms previous work employing a comparable data setup, reaching 89.71% accuracy for a task with seven native languages using data from the International Corpus of Learner English (ICLE). We then investigate the claim by Brooke and Hirst (2011) that a content bias in ICLE seems to result in an easy classification by topic instead of by native language characteristics. We show that training our model on ICLE and testing it on three other, independently compiled learner corpora dealing with other topics still results in high accuracy classification.", "phrases": ["n-gram", "abstraction", "native language identification"], "overall_score": 4.376333658859244, "scores": [2.7174128162661035, 1.9121108234988748, 1.6841912271467059], "rank_score": 2.104571622303895} -{"id": "zhang-etal-2019-paws", "title": "PAWS: Paraphrase Adversaries from Word Scrambling", "abstract": "Existing paraphrase identification datasets lack sentence pairs that have high lexical overlap without being paraphrases. Models trained on such data fail to distinguish pairs like flights from New York to Florida and flights from Florida to New York. This paper introduces PAWS (Paraphrase Adversaries from Word Scrambling), a new dataset with 108,463 well-formed paraphrase and non-paraphrase pairs with high lexical overlap. Challenging pairs are generated by controlled word swapping and back translation, followed by fluency and paraphrase judgments by human raters. State-of-the-art models trained on existing datasets have dismal performance on PAWS (40% accuracy); however, including PAWS training data for these models improves their accuracy to 85% while maintaining performance on existing tasks. In contrast, models that do not capture non-local contextual information fail even with PAWS training examples. As such, PAWS provides an effective instrument for driving further progress on models that better exploit structure, context, and pairwise comparisons.", "phrases": ["paraphrase adversaries", "word scrambling", "sentence pair"], "overall_score": 5.394489504008138, "scores": [3.011027801387659, 2.748371913396891, 0.5500693064832894], "rank_score": 2.103156340422613} -{"id": "gerber-chai-2010-beyond", "title": "Beyond NomBank: A Study of Implicit Arguments for Nominal Predicates", "abstract": "Despite its substantial coverage, NomBank does not account for all within-sentence arguments and ignores extra-sentential arguments altogether. These arguments, which we call implicit, are important to semantic processing, and their recovery could potentially benefit many NLP applications. We present a study of implicit arguments for a select group of frequent nominal predicates. We show that implicit arguments are pervasive for these predicates, adding 65% to the coverage of NomBank. We demonstrate the feasibility of recovering implicit arguments with a supervised classification model. Our results and analyses provide a baseline for future work on this emerging task.", "phrases": ["nombank", "implicit argument", "semantic role"], "overall_score": 6.767454839770441, "scores": [3.5428390800395815, 2.1890799297921877, 0.5753651551740746], "rank_score": 2.1024280550019476} -{"id": "yatskar-etal-2010-sake", "title": "For the sake of simplicity: Unsupervised extraction of lexical simplifications from Wikipedia", "abstract": "We report on work in progress on extracting lexical simplifications (e.g., \"collaborate\" \u2192 \"work together\"), focusing on utilizing edit histories in Simple English Wikipedia for this task. We consider two main approaches: (1) deriving simplification probabilities via an edit model that accounts for a mixture of different operations, and (2) using metadata to focus on edits that are more likely to be simplification operations. We find our methods to outperform a reasonable baseline and yield many high-quality lexical simplifications not included in an independently-created manually prepared list.", "phrases": ["lexical simplification", "wikipedia", "relevant edit"], "overall_score": 7.004304380282841, "scores": [4.093778618555944, 1.6837384671181528, 0.5284923674322756], "rank_score": 2.1020031510354573} -{"id": "tang-etal-2016-aspect", "title": "Aspect Level Sentiment Classification with Deep Memory Network", "abstract": "We introduce a deep memory network for aspect level sentiment classification. Unlike feature-based SVM and sequential neural models such as LSTM, this approach explicitly captures the importance of each context word when inferring the sentiment polarity of an aspect. Such importance degree and text representation are calculated with multiple computational layers, each of which is a neural attention model over an external memory. Experiments on laptop and restaurant datasets demonstrate that our approach performs comparable to state-of-art feature based SVM system, and substantially better than LSTM and attention-based LSTM architectures. On both datasets we show that multiple computational layers could improve the performance. Moreover, our approach is also fast. The deep memory network with 9 layers is 15 times faster than LSTM with a CPU implementation.", "phrases": ["sentiment classification", "deep memory network", "question-answering perspective"], "overall_score": 8.342012295358282, "scores": [3.9163159201276203, 1.860356721783464, 0.5266515042685534], "rank_score": 2.101108048726546} -{"id": "zhao-etal-2011-topical", "title": "Topical Keyphrase Extraction from Twitter", "abstract": "Summarizing and analyzing Twitter content is an important and challenging task. In this paper, we propose to extract topical keyphrases as one way to summarize Twitter. We propose a context-sensitive topical PageRank method for keyword ranking and a probabilistic scoring function that considers both relevance and interestingness of keyphrases for keyphrase ranking. We evaluate our proposed methods on a large Twitter data set. Experiments show that these methods are very effective for topical keyphrase extraction.", "phrases": ["twitter", "pagerank", "topical keyphrase extraction"], "overall_score": 5.822189457693086, "scores": [2.8794544685160752, 2.0945476577847333, 1.3257307669965555], "rank_score": 2.0999109644324547} -{"id": "johnson-etal-2017-googles", "title": "Google's Multilingual Neural Machine Translation System: Enabling Zero-Shot Translation", "abstract": "We propose a simple solution to use a single Neural Machine Translation (NMT) model to translate between multiple languages. Our solution requires no changes to the model architecture from a standard NMT system but instead introduces an artificial token at the beginning of the input sentence to specify the required target language. Using a shared wordpiece vocabulary, our approach enables Multilingual NMT systems using a single model. On the WMT'14 benchmarks, a single multilingual model achieves comparable performance for English\u2192French and surpasses state-of-theart results for English\u2192German. Similarly, a single multilingual model surpasses state-of-the-art results for French\u2192English and German\u2192English on WMT'14 and WMT'15 benchmarks, respectively. On production corpora, multilingual models of up to twelve language pairs allow for better translation of many individual pairs. Our models can also learn to perform implicit bridging between language pairs never seen explicitly during training, showing that transfer learning and zero-shot translation is possible for neural translation. Finally, we show analyses that hints at a universal interlingua representation in our models and also show some interesting examples when mixing languages.", "phrases": ["machine translation", "transfer learning", "google", "nmt model", "resource language"], "overall_score": 11.565366634520641, "scores": [2.133215385997339, 3.1702470232256172, 2.4784702206059994, 1.3968869103910808, 1.3018921681944733], "rank_score": 2.096142341682902} -{"id": "xia-ding-2019-emotion", "title": "Emotion-Cause Pair Extraction: A New Task to Emotion Analysis in Texts", "abstract": "Emotion cause extraction (ECE), the task aimed at extracting the potential causes behind certain emotions in text, has gained much attention in recent years due to its wide applications. However, it suffers from two shortcomings: 1) the emotion must be annotated before cause extraction in ECE, which greatly limits its applications in real-world scenarios; 2) the way to first annotate emotion and then extract the cause ignores the fact that they are mutually indicative. In this work, we propose a new task: emotion-cause pair extraction (ECPE), which aims to extract the potential pairs of emotions and corresponding causes in a document. We propose a 2-step approach to address this new ECPE task, which first performs individual emotion extraction and cause extraction via multi-task learning, and then conduct emotion-cause pairing and filtering. The experimental results on a benchmark emotion cause corpus prove the feasibility of the ECPE task as well as the effectiveness of our approach.", "phrases": ["cause", "ecpe", "emotion-cause pair extraction", "two-step method", "sentiment analysis"], "overall_score": 6.907449945352016, "scores": [4.953029201505706, 2.1752743404287176, 1.9264045916484995, 0.8388584913660506, 0.585486535419041], "rank_score": 2.095810632073603} -{"id": "keller-lapata-2003-using", "title": "Using the Web to Obtain Frequencies for Unseen Bigrams", "abstract": "This article shows that the Web can be employed to obtain frequencies for bigrams that are unseen in a given corpus. We describe a method for retrieving counts for adjective-noun, noun-noun, and verb-object bigrams from the Web by querying a search engine. We evaluate this method by demonstrating: (a) a high correlation between Web frequencies and corpus frequencies; (b) a reliable correlation between Web frequencies and plausibility judgments; (c) a reliable correlation between Web frequencies and frequencies recreated using class-based smoothing; (d) a good performance of Web frequencies in a pseudo disambiguation task.", "phrases": ["web", "frequency", "unseen bigram", "plausibility judgment", "english language"], "overall_score": 7.32223840963989, "scores": [3.738796457359221, 3.2862542025669117, 2.371421224440347, 0.5514426927814795, 0.5228741755136376], "rank_score": 2.0941577505323195} -{"id": "wu-etal-2020-corefqa", "title": "CorefQA: Coreference Resolution as Query-based Span Prediction", "abstract": "In this paper, we present CorefQA, an accurate and extensible approach for the coreference resolution task. We formulate the problem as a span prediction task, like in question answering: A query is generated for each candidate mention using its surrounding context, and a span prediction module is employed to extract the text spans of the coreferences within the document using the generated query. This formulation comes with the following key advantages: (1) The span prediction strategy provides the flexibility of retrieving mentions left out at the mention proposal stage; (2) In the question answering framework, encoding the mention and its context explicitly in a query makes it possible to have a deep and thorough examination of cues embedded in the context of coreferent mentions; and (3) A plethora of existing question answering datasets can be used for data augmentation to improve the model's generalization capability. Experiments demonstrate significant performance boost over previous models, with 83.1 (+3.5) F1 score on the CoNLL-2012 benchmark and 87.5 (+2.5) F1 score on the GAP benchmark.", "phrases": ["coreference resolution", "query-based span prediction", "mention"], "overall_score": 7.119973422047192, "scores": [3.520474613690325, 0.9701960951451299, 1.789447105351219], "rank_score": 2.0933726047288914} -{"id": "gururangan-etal-2018-annotation", "title": "Annotation Artifacts in Natural Language Inference Data", "abstract": "Large-scale datasets for natural language inference are created by presenting crowd workers with a sentence (premise), and asking them to generate three new sentences (hypotheses) that it entails, contradicts, or is logically neutral with respect to. We show that, in a significant portion of such data, this protocol leaves clues that make it possible to identify the label by looking only at the hypothesis, without observing the premise. Specifically, we show that a simple text categorization model can correctly classify the hypothesis alone in about 67% of SNLI (Bowman et. al, 2015) and 53% of MultiNLI (Williams et. al, 2017). Our analysis reveals that specific linguistic phenomena such as negation and vagueness are highly correlated with certain inference classes. Our findings suggest that the success of natural language inference models to date has been overestimated, and that the task remains a hard open problem.", "phrases": ["natural language inference", "hypothesis", "annotation artifact", "dataset bias", "surface-level reasoning"], "overall_score": 9.880768993148756, "scores": [4.476738628690091, 2.171955739937167, 1.6071811438978494, 1.5954694977628103, 0.5416091526887147], "rank_score": 2.0785908325953266} -{"id": "zampieri-etal-2014-report", "title": "A Report on the DSL Shared Task 2014", "abstract": "This paper summarizes the methods, results and findings of the Discriminating between Similar Languages (DSL) shared task 2014. The shared task provided data from 13 different languages and varieties divided into 6 groups. Participants were required to train their systems to discriminate between languages on a training and development set containing 20,000 sentences from each language (closed submission) and/or any other dataset (open submission). One month later, a test set containing 1,000 unidentified instances per language was released for evaluation. The DSL shared task received 22 inscriptions and 8 final submissions. The best system obtained 95.7% average accuracy.", "phrases": ["dsl", "similar language", "group", "task report"], "overall_score": 7.5034769079620665, "scores": [4.7085360606892745, 1.9165799325321649, 1.1223462223094487, 0.5645261370011987], "rank_score": 2.0779970881330216} -{"id": "liu-etal-2020-event", "title": "Event Extraction as Machine Reading Comprehension", "abstract": "Event extraction (EE) is a crucial information extraction task that aims to extract event information in texts. Previous methods for EE typically model it as a classification task, which are usually prone to the data scarcity problem. In this paper, we propose a new learning paradigm of EE, by explicitly casting it as a machine reading comprehension problem (MRC). Our approach includes an unsupervised question generation process, which can transfer event schema into a set of natural questions, followed by a BERT-based question-answering process to retrieve answers as EE results. This learning paradigm enables us to strengthen the reasoning process of EE, by introducing sophisticated models in MRC, and relieve the data scarcity problem, by introducing the large-scale datasets in MRC. The empirical results show that: i) our approach attains state-of-the-art performance by considerable margins over previous methods. ii) Our model is excelled in the data-scarce scenario, for example, obtaining 49.8% in F1 for event argument extraction with only 1% data, compared with 2.2% of the previous method. iii) Our model also fits with zero-shot scenarios, achieving 37.0% and 16% in F1 on two datasets without using any EE training data.", "phrases": ["machine reading comprehension", "event extraction", "eae"], "overall_score": 6.21330380897333, "scores": [3.50671065077503, 1.868829435274647, 0.8466152084908811], "rank_score": 2.074051764846853} -{"id": "yu-etal-2018-spider", "title": "Spider: A Large-Scale Human-Labeled Dataset for Complex and Cross-Domain Semantic Parsing and Text-to-SQL Task", "abstract": "We present Spider, a large-scale complex and cross-domain semantic parsing and text-to-SQL dataset annotated by 11 college students. It consists of 10,181 questions and 5,693 unique complex SQL queries on 200 databases with multiple tables covering 138 different domains. We define a new complex and cross-domain semantic parsing and text-to-SQL task so that different complicated SQL queries and databases appear in train and test sets. In this way, the task requires the model to generalize well to both new SQL queries and new database schemas. Therefore, Spider is distinct from most of the previous semantic parsing tasks because they all use a single database and have the exact same program in the train set and the test set. We experiment with various state-of-the-art models and the best model achieves only 9.7% exact matching accuracy on a database split setting. This shows that Spider presents a strong challenge for future research. Our dataset and task with the most recent updates are publicly available at .", "phrases": ["semantic parsing", "text-to-sql task", "complex sql query", "database", "natural language question"], "overall_score": 8.491319890803451, "scores": [3.952479916109071, 2.012250431200689, 1.9830490083945003, 1.3056046582292442, 1.1161876742331178], "rank_score": 2.0739143376333247} -{"id": "shardlow-2013-comparison", "title": "A Comparison of Techniques to Automatically Identify Complex Words.", "abstract": "Identifying complex words (CWs) is an important, yet often overlooked, task within lexical simplification (The process of automatically replacing CWs with simpler alternatives). If too many words are identified then substitutions may be made erroneously, leading to a loss of meaning. If too few words are identified then those which impede a user\u2019s understanding may be missed, resulting in a complex final text. This paper addresses the task of evaluating different methods for CW identification. A corpus of sentences with annotated CWs is mined from Simple Wikipedia edit histories, which is then used as the basis for several experiments. Firstly, the corpus design is explained and the results of the validation experiments using human judges are reported. Experiments are carried out into the CW identification techniques of: simplifying everything, frequency thresholding and training a support vector machine. These are based upon previous approaches to the task and show that thresholding does not perform significantly differently to the more naive technique of simplifying everything. The support vector machine achieves a slight increase in precision over the other two methods, but at the cost of a dramatic trade off in recall.", "phrases": ["complex word", "simplification", "frequency", "support vector machine", "cwi"], "overall_score": 7.749378606976482, "scores": [4.20047415004076, 2.1519658616849644, 1.7111441739103124, 1.4075072498239112, 0.895499780988452], "rank_score": 2.07331824328968} -{"id": "laubli-etal-2018-machine", "title": "Has Machine Translation Achieved Human Parity? A Case for Document-level Evaluation", "abstract": "Recent research suggests that neural machine translation achieves parity with professional human translation on the WMT Chinese\u2013English news translation task. We empirically test this claim with alternative evaluation protocols, contrasting the evaluation of single sentences and entire documents. In a pairwise ranking experiment, human raters assessing adequacy and fluency show a stronger preference for human over machine translation when evaluating documents as compared to isolated sentences. Our findings emphasise the need to shift towards document-level evaluation as machine translation improves to the degree that errors which are hard or impossible to spot at the sentence-level become decisive in discriminating quality of different translation outputs.", "phrases": ["machine translation", "parity", "document-level evaluation", "claim", "fluency"], "overall_score": 7.638472063775473, "scores": [4.177379644387387, 2.9178249236599463, 1.3674294695761506, 1.044774561392225, 0.8459685698381633], "rank_score": 2.0706754337707745} -{"id": "sellam-etal-2020-bleurt", "title": "BLEURT: Learning Robust Metrics for Text Generation", "abstract": "Text generation has made significant advances in the last few years. Yet, evaluation metrics have lagged behind, as the most popular choices (e.g., BLEU and ROUGE) may correlate poorly with human judgment. We propose BLEURT, a learned evaluation metric for English based on BERT. BLEURT can model human judgment with a few thousand possibly biased training examples. A key aspect of our approach is a novel pre-training scheme that uses millions of synthetic examples to help the model generalize. BLEURT provides state-of-the-art results on the last three years of the WMT Metrics shared task and the WebNLG data set. In contrast to a vanilla BERT-based approach, it yields superior results even when the training data is scarce and out-of-distribution.", "phrases": ["text generation", "evaluation metric", "human judgment", "bleurt", "synthetic data"], "overall_score": 7.417115116372271, "scores": [4.654791012983325, 2.5580790197216947, 1.1454283831582472, 1.096757327802736, 0.8938711683371358], "rank_score": 2.0697853824006276} -{"id": "wang-etal-2016-attention", "title": "Attention-based LSTM for Aspect-level Sentiment Classification", "abstract": "Aspect-level sentiment classification is a fine-grained task in sentiment analysis. Since it provides more complete and in-depth results, aspect-level sentiment analysis has received much attention these years. In this paper, we reveal that the sentiment polarity of a sentence is not only determined by the content but is also highly related to the concerned aspect. For instance, \u201cThe appetizers are ok, but the service is slow.\u201d, for aspect taste, the polarity is positive while for service, the polarity is negative. Therefore, it is worthwhile to explore the connection between an aspect and the content of a sentence. To this end, we propose an Attention-based Long Short-Term Memory Network for aspect-level sentiment classification. The attention mechanism can concentrate on different parts of a sentence when different aspects are taken as input. We experiment on the SemEval 2014 dataset and results show that our model achieves state-ofthe-art performance on aspect-level sentiment classification.", "phrases": ["sentiment classification", "short-term memory network", "different aspect", "attention-based lstm", "hidden state"], "overall_score": 9.039207937877329, "scores": [4.40128162049503, 3.475857613185913, 1.3657181514566896, 0.5693852935708702, 0.5314074151743305], "rank_score": 2.0687300187765665} -{"id": "zhou-etal-2005-exploring", "title": "Exploring Various Knowledge in Relation Extraction", "abstract": "Extracting semantic relationships between entities is challenging. This paper investigates the incorporation of diverse lexical, syntactic and semantic knowledge in feature-based relation extraction using SVM. Our study illustrates that the base phrase chunking information is very effective for relation extraction and contributes to most of the performance improvement from syntactic aspect while additional information from full parsing gives limited further enhancement. This suggests that most of useful information in full parse trees for relation extraction is shallow and can be captured by chunking. We also demonstrate how semantic information such as WordNet and Name List, can be used in feature-based relation extraction to further improve the performance. Evaluation on the ACE corpus shows that effective incorporation of diverse features enables our system outperform previously best-reported systems on the 24 ACE relation subtypes and significantly outperforms tree kernel-based systems by over 20 in F-measure on the 5 ACE relation types.", "phrases": ["relation extraction", "svm", "feature-based method", "syntactic feature", "large amount"], "overall_score": 8.81830396164227, "scores": [5.814986580544526, 1.3565175738236666, 1.210855868953461, 1.1225944415766265, 0.8386606035677201], "rank_score": 2.0687230136932} -{"id": "kim-etal-2009-overview", "title": "Overview of BioNLP'09 Shared Task on Event Extraction", "abstract": "The paper presents the design and implementation of the BioNLP'09 Shared Task, and reports the final results with analysis. The shared task consists of three sub-tasks, each of which addresses bio-molecular event extraction at a different level of specificity. The data was developed based on the GENIA event corpus. The shared task was run over 12 weeks, drawing initial interest from 42 teams. Of these teams, 24 submitted final results. The evaluation results are encouraging, indicating that state-of-the-art performance is approaching a practically applicable level and revealing some remaining challenges.", "phrases": ["shared task", "event extraction", "bionlp", "negation", "trigger"], "overall_score": 8.46523827826377, "scores": [5.318219698441879, 1.654659190765282, 1.6908191613401908, 0.8383607596800446, 0.8356620992085702], "rank_score": 2.067544181887193} -{"id": "sun-etal-2019-aspect", "title": "Aspect-Level Sentiment Analysis Via Convolution over Dependency Tree", "abstract": "We propose a method based on neural networks to identify the sentiment polarity of opinion words expressed on a specific aspect of a sentence. Although a large majority of works typically focus on leveraging the expressive power of neural networks in handling this task, we explore the possibility of integrating dependency trees with neural networks for representation learning. To this end, we present a convolution over a dependency tree (CDT) model which exploits a Bi-directional Long Short Term Memory (Bi-LSTM) to learn representations for features of a sentence, and further enhance the embeddings with a graph convolutional network (GCN) which operates directly on the dependency tree of the sentence. Our approach propagates both contextual and dependency information from opinion words to aspect words, offering discriminative properties for supervision. Experimental results ranks our approach as the new state-of-the-art in aspect-based sentiment classification.", "phrases": ["sentiment analysis", "dependency tree", "convolutional network"], "overall_score": 6.8864852785716195, "scores": [3.9463356002737973, 1.1769622282693684, 1.0766385116872261], "rank_score": 2.066645446743464} -{"id": "antoun-etal-2020-arabert", "title": "AraBERT: Transformer-based Model for Arabic Language Understanding", "abstract": "The Arabic language is a morphologically rich language with relatively few resources and a less explored syntax compared to English. Given these limitations, Arabic Natural Language Processing (NLP) tasks like Sentiment Analysis (SA), Named Entity Recognition (NER), and Question Answering (QA), have proven to be very challenging to tackle. Recently, with the surge of transformers based models, language-specific BERT based models have proven to be very efficient at language understanding, provided they are pre-trained on a very large corpus. Such models were able to set new standards and achieve state-of-the-art results for most NLP tasks. In this paper, we pre-trained BERT specifically for the Arabic language in the pursuit of achieving the same success that BERT did for the English language. The performance of AraBERT is compared to multilingual BERT from Google and other state-of-the-art approaches. The results showed that the newly developed AraBERT achieved state-of-the-art performance on most tested Arabic NLP tasks. The pretrained araBERT models are publicly available on hoping to encourage research and applications for Arabic NLP.", "phrases": ["arabic language understanding", "question answering", "arabert"], "overall_score": 7.282833477374662, "scores": [4.394192981093967, 0.9632646299498971, 0.8383071950807612], "rank_score": 2.0652549353748753} -{"id": "liu-etal-2016-evaluate", "title": "How NOT To Evaluate Your Dialogue System: An Empirical Study of Unsupervised Evaluation Metrics for Dialogue Response Generation", "abstract": "We investigate evaluation metrics for dialogue response generation systems where supervised labels, such as task completion, are not available. Recent works in response generation have adopted metrics from machine translation to compare a model's generated response to a single target response. We show that these metrics correlate very weakly with human judgements in the non-technical Twitter domain, and not at all in the technical Ubuntu domain. We provide quantitative and qualitative results highlighting specific weaknesses in existing metrics, and provide recommendations for future development of better automatic evaluation metrics for dialogue systems.", "phrases": ["dialogue system", "evaluation metric", "machine translation", "human judgment", "response quality"], "overall_score": 9.221911228045977, "scores": [3.9942613403847185, 2.3836377381205516, 1.6270517638095394, 1.4753328022947063, 0.8445047108932002], "rank_score": 2.0649576711005433} -{"id": "li-etal-2013-joint", "title": "Joint Event Extraction via Structured Prediction with Global Features", "abstract": "Traditional approaches to the task of ACE event extraction usually rely on sequential pipelines with multiple stages, which suffer from error propagation since event triggers and arguments are predicted in isolation by independent local classifiers. By contrast, we propose a joint framework based on structured prediction which extracts triggers and arguments together so that the local predictions can be mutually improved. In addition, we propose to incorporate global features which explicitly capture the dependencies of multiple triggers and arguments. Experimental results show that our joint approach with local features outperforms the pipelined baseline, and adding global features further improves the performance significantly. Our approach advances state-ofthe-art sentence-level event extraction, and even outperforms previous argument labeling methods which use external knowledge from other sentences and documents.", "phrases": ["event extraction", "structured prediction", "global feature", "error propagation", "trigger"], "overall_score": 8.708070472001772, "scores": [3.2822178300694516, 2.4858803082924683, 1.8620322450070095, 1.6401665659559876, 1.048525861828512], "rank_score": 2.0637645622306855} -{"id": "meyer-gurevych-2011-psycholinguists", "title": "What Psycholinguists Know About Chemistry: Aligning Wiktionary and WordNet for Increased Domain Coverage", "abstract": "By today, no lexical resource can claim to be fully comprehensive or perform best for every NLP task. This caused a steep increase of resource alignment research. An important challenge is thereby the alignment of differently represented word senses, which we address in this paper. In particular, we propose a new automatically aligned resource of Wiktionary and WordNet that has (i) a very high domain coverage of word senses and (ii) an enriched sense representation, including pronunciations, etymologies, translations, etc. We evaluate our alignment both quantitatively and qualitatively, and explore how it can contribute to practical tasks.", "phrases": ["wiktionary", "wordnet", "domain coverage"], "overall_score": 5.438385499976067, "scores": [3.020895125820407, 2.6208583370697904, 0.5404375478320853], "rank_score": 2.0607303369074272} -{"id": "dahlmeier-ng-2012-beam", "title": "A Beam-Search Decoder for Grammatical Error Correction", "abstract": "We present a novel beam-search decoder for grammatical error correction. The decoder iteratively generates new hypothesis corrections from current hypotheses and scores them based on features of grammatical correctness and fluency. These features include scores from discriminative classifiers for specific error categories, such as articles and prepositions. Unlike all previous approaches, our method is able to perform correction of whole sentences with multiple and interacting errors while still taking advantage of powerful existing classifier approaches. Our decoder achieves an F1 correction score significantly higher than all previous published scores on the Helping Our Own (HOO) shared task data set.", "phrases": ["beam-search decoder", "grammatical error correction", "noun number"], "overall_score": 5.8381354651111375, "scores": [2.8516606768423234, 2.8051906620442977, 0.524965654202443], "rank_score": 2.0606056643630217} -{"id": "qian-etal-2019-reducing", "title": "Reducing Gender Bias in Word-Level Language Models with a Gender-Equalizing Loss Function", "abstract": "Gender bias exists in natural language datasets, which neural language models tend to learn, resulting in biased text generation. In this research, we propose a debiasing approach based on the loss function modification. We introduce a new term to the loss function which attempts to equalize the probabilities of male and female words in the output. Using an array of bias evaluation metrics, we provide empirical evidence that our approach successfully mitigates gender bias in language models without increasing perplexity. In comparison to existing debiasing strategies, data augmentation, and word embedding debiasing, our method performs better in several aspects, especially in reducing gender bias in occupation words. Finally, we introduce a combination of data augmentation and our approach and show that it outperforms existing strategies in all bias evaluation metrics.", "phrases": ["gender bias", "language model", "loss function"], "overall_score": 4.527407325497441, "scores": [2.8519398405389773, 2.4609518942046753, 0.8686438791345495], "rank_score": 2.0605118712927344} -{"id": "yu-ettinger-2020-assessing", "title": "Assessing Phrasal Representation and Composition in Transformers", "abstract": "Deep transformer models have pushed performance on NLP tasks to new limits, suggesting sophisticated treatment of complex linguistic inputs, such as phrases. However, we have limited understanding of how these models handle representation of phrases, and whether this reflects sophisticated composition of phrase meaning like that done by humans. In this paper, we present systematic analysis of phrasal representations in state-of-the-art pre-trained transformers. We use tests leveraging human judgments of phrase similarity and meaning shift, and compare results before and after control of word overlap, to tease apart lexical effects versus composition effects. We find that phrase representation in these models relies heavily on word content, with little evidence of nuanced composition. We also identify variations in phrase representation quality across models, layers, and representation types, and make corresponding recommendations for usage of representations from these models.", "phrases": ["phrasal representation", "composition", "pre-trained transformer"], "overall_score": 5.434744939324831, "scores": [3.153195858524047, 2.491664996649197, 0.5331916770737328], "rank_score": 2.0593508440823256} -{"id": "yeh-etal-2009-wikiwalk", "title": "WikiWalk: Random walks on Wikipedia for Semantic Relatedness", "abstract": "Computing semantic relatedness of natural language texts is a key component of tasks such as information retrieval and summarization, and often depends on knowledge of a broad range of real-world concepts and relationships. We address this knowledge integration issue by computing semantic relatedness using personalized PageRank (random walks) on a graph derived from Wikipedia. This paper evaluates methods for building the graph, including link selection strategies, and two methods for representing input texts as distributions over the graph nodes: one based on a dictionary lookup, the other based on Explicit Semantic Analysis. We evaluate our techniques on standard word relatedness and text similarity datasets, finding that they capture similarity information complementary to existing Wikipedia-based relatedness measures, resulting in small improvements on a state-of-the-art measure.", "phrases": ["random walk", "wikipedia", "semantic relatedness"], "overall_score": 4.522722315419621, "scores": [2.736719107466563, 2.5688258232799996, 0.8695939632066796], "rank_score": 2.0583796313177474} -{"id": "fader-etal-2011-identifying", "title": "Identifying Relations for Open Information Extraction", "abstract": "Open Information Extraction (IE) is the task of extracting assertions from massive corpora without requiring a pre-specified vocabulary. This paper shows that the output of state-of-the-art Open IE systems is rife with uninformative and incoherent extractions. To overcome these problems, we introduce two simple syntactic and lexical constraints on binary relations expressed by verbs. We implemented the constraints in the ReVerb Open IE system, which more than doubles the area under the precision-recall curve relative to previous extractors such as TextRunner and woepos. More than 30% of ReVerb's extractions are at precision 0.8 or higher---compared to virtually none for earlier systems. The paper concludes with a detailed analysis of ReVerb's errors, suggesting directions for future work.", "phrases": ["open information extraction", "lexical constraint", "textrunner", "triple", "oie"], "overall_score": 9.306004319311763, "scores": [5.514304528407633, 1.6037586669699293, 1.2708240946120186, 1.050846183147294, 0.850447238258684], "rank_score": 2.058036142279112} -{"id": "wu-etal-2017-adversarial", "title": "Adversarial Training for Relation Extraction", "abstract": "Adversarial training is a mean of regularizing classification algorithms by generating adversarial noise to the training data. We apply adversarial training in relation extraction within the multi-instance multi-label learning framework. We evaluate various neural network architectures on two different datasets. Experimental results demonstrate that adversarial training is generally effective for both CNN and RNN models and significantly improves the precision of predicted relations.", "phrases": ["relation extraction", "rnn", "adversarial training"], "overall_score": 6.162099037958386, "scores": [3.187782390378844, 2.448424109596324, 0.5346710771268459], "rank_score": 2.056959192367338} -{"id": "hokamp-liu-2017-lexically", "title": "Lexically Constrained Decoding for Sequence Generation Using Grid Beam Search", "abstract": "We present Grid Beam Search (GBS), an algorithm which extends beam search to allow the inclusion of pre-specified lexical constraints. The algorithm can be used with any model which generates sequences token by token. Lexical constraints take the form of phrases or words that must be present in the output sequence. This is a very general way to incorporate auxillary knowledge into a model's output without requiring any modification of the parameters or training data. We demonstrate the feasibility and flexibility of Lexically Constrained Decoding by conducting experiments on Neural Interactive-Predictive Translation, as well as Domain Adaptation for Neural Machine Translation. Experiments show that GBS can provide large improvements in translation quality in interactive scenarios, and that, even without any user input, GBS can be used to achieve significant gains in performance in domain adaptation scenarios.", "phrases": ["decoding", "grid beam search", "lexical constraint", "translation quality", "generation model"], "overall_score": 8.386733991022066, "scores": [4.171161516939492, 2.272880039509346, 2.4420728933434686, 0.851698541348789, 0.5462544225829234], "rank_score": 2.0568134827448032} -{"id": "heilman-etal-2014-predicting", "title": "Predicting Grammaticality on an Ordinal Scale", "abstract": "Automated methods for identifying whether sentences are grammatical have various potential applications (e.g., machine translation, automated essay scoring, computer-assisted language learning). In this work, we construct a statistical model of grammaticality using various linguistic features (e.g., misspelling counts, parser outputs, n-gram language model scores). We also present a new publicly available dataset of learner sentences judged for grammaticality on an ordinal scale. In evaluations, we compare our system to the one from Post (2011) and find that our approach yields state-of-the-art performance.", "phrases": ["grammaticality", "ordinal scale", "gug"], "overall_score": 5.423480195344826, "scores": [3.758833775364471, 1.856473182547887, 0.5499401564451407], "rank_score": 2.0550823714524995} -{"id": "chen-etal-2017-adversarial", "title": "Adversarial Multi-Criteria Learning for Chinese Word Segmentation", "abstract": "Different linguistic perspectives causes many diverse segmentation criteria for Chinese word segmentation (CWS). Most existing methods focus on improve the performance for each single criterion. However, it is interesting to exploit these different criteria and mining their common underlying knowledge. In this paper, we propose adversarial multi-criteria learning for CWS by integrating shared knowledge from multiple heterogeneous segmentation criteria. Experiments on eight corpora with heterogeneous segmentation criteria show that the performance of each corpus obtains a significant improvement, compared to single-criterion learning. Source codes of this paper are available on Github.", "phrases": ["chinese word segmentation", "cws", "adversarial multi-criteria learning"], "overall_score": 6.151446650287076, "scores": [2.8228807250249686, 1.9626696498772245, 1.3746596391148118], "rank_score": 2.0534033380056687} -{"id": "liu-etal-2018-modeling", "title": "Modeling Sentiment Association in Discourse for Humor Recognition", "abstract": "Humor is one of the most attractive parts in human communication. However, automatically recognizing humor in text is challenging due to the complex characteristics of humor. This paper proposes to model sentiment association between discourse units to indicate how the punchline breaks the expectation of the setup. We found that discourse relation, sentiment conflict and sentiment transition are effective indicators for humor recognition. On the perspective of using sentiment related features, sentiment association in discourse is more useful than counting the number of emotional words.", "phrases": ["sentiment association", "humor recognition", "discourse relation"], "overall_score": 4.26491461298143, "scores": [2.823463403693475, 2.416922572652136, 0.9125851856175708], "rank_score": 2.0509903873210606} -{"id": "barzilay-mckeown-2005-sentence", "title": "Sentence Fusion for Multidocument News Summarization", "abstract": "A system that can produce informative summaries, highlighting common information found in many online documents, will help Web users to pinpoint information that they need without extensive reading. In this article, we introduce sentence fusion, a novel text-to-text generation technique for synthesizing common information across documents. Sentence fusion involves bottom-up local multisequence alignment to identify phrases conveying similar information and statistical generation to combine common phrases into a sentence. Sentence fusion moves the summarization field from the use of purely extractive methods to the generation of abstracts that contain sentences not found in any of the input documents and can synthesize information across sources.", "phrases": ["summarization", "common information", "sentence fusion", "linearization", "text-to-text generation process"], "overall_score": 7.803355516233986, "scores": [5.657058854494757, 2.0756890255767937, 1.4721252292878306, 0.524627699208183, 0.520101746431752], "rank_score": 2.0499205109998635} -{"id": "zhou-etal-2019-gear", "title": "GEAR: Graph-based Evidence Aggregating and Reasoning for Fact Verification", "abstract": "Fact verification (FV) is a challenging task which requires to retrieve relevant evidence from plain text and use the evidence to verify given claims. Many claims require to simultaneously integrate and reason over several pieces of evidence for verification. However, previous work employs simple models to extract information from evidence without letting evidence communicate with each other, e.g., merely concatenate the evidence for processing. Therefore, these methods are unable to grasp sufficient relational and logical information among the evidence. To alleviate this issue, we propose a graph-based evidence aggregating and reasoning (GEAR) framework which enables information to transfer on a fully-connected evidence graph and then utilizes different aggregators to collect multi-evidence information. We further employ BERT, an effective pre-trained language representation model, to improve the performance. Experimental results on a large-scale benchmark dataset FEVER have demonstrated that GEAR could leverage multi-evidence information for FV and thus achieves the promising result with a test FEVER score of 67.10%. Our code is available at .", "phrases": ["reasoning", "fact verification", "piece", "veracity"], "overall_score": 6.743189693927057, "scores": [3.3950789911659283, 3.0593876086289966, 0.8801120074552026, 0.8493090887475052], "rank_score": 2.0459719239994083} -{"id": "owoputi-etal-2013-improved", "title": "Improved Part-of-Speech Tagging for Online Conversational Text with Word Clusters", "abstract": "We consider the problem of part-of-speech tagging for informal, online conversational text. We systematically evaluate the use of large-scale unsupervised word clustering and new lexical features to improve tagging accuracy. With these features, our system achieves state-of-the-art tagging results on both Twitter and IRC POS tagging tasks; Twitter tagging is improved from 90% to 93% accuracy (more than 3% absolute). Qualitative analysis of these word clusters yields insights about NLP and linguistic phenomena in this genre. Additionally, we contribute the first POS annotation guidelines for such text and release a new dataset of English language tweets annotated using these guidelines. Tagging software, annotation guidelines, and large-scale word clusters are available at: http://www.ark.cs.cmu.edu/TweetNLP This paper describes release 0.3 of the \u201cCMU Twitter Part-of-Speech Tagger\u201d and annotated data. [This paper is forthcoming in Proceedings of NAACL 2013; Atlanta, GA, USA.]", "phrases": ["part-of-speech tagging", "online conversational text", "word cluster"], "overall_score": 7.07844174891021, "scores": [2.6068314561167103, 2.572203229299878, 0.9481849996079079], "rank_score": 2.042406561674832} -{"id": "chung-etal-2016-character", "title": "A Character-level Decoder without Explicit Segmentation for Neural Machine Translation", "abstract": "The existing machine translation systems, whether phrase-based or neural, have relied almost exclusively on word-level modelling with explicit segmentation. In this paper, we ask a fundamental question: can neural machine translation generate a character sequence without any explicit segmentation? To answer this question, we evaluate an attention-based encoder-decoder with a subword-level encoder and a character-level decoder on four language pairs--En-Cs, En-De, En-Ru and En-Fi-- using the parallel corpora from WMT'15. Our experiments show that the models with a character-level decoder outperform the ones with a subword-level decoder on all of the four language pairs. Furthermore, the ensembles of neural models with a character-level decoder outperform the state-of-the-art non-neural machine translation systems on En-Cs, En-De and En-Fi and perform comparably on En-Ru.", "phrases": ["character-level decoder", "explicit segmentation", "neural machine translation", "word-level modelling", "character"], "overall_score": 6.940426243468159, "scores": [3.501099050803283, 3.4104470411898316, 1.6864980409652321, 1.0675530585236872, 0.5373188181680619], "rank_score": 2.040583201930019} -{"id": "huang-etal-2016-visual", "title": "Visual Storytelling", "abstract": "We introduce the first dataset for sequential vision-to-language, and explore how this data may be used for the task of visual storytelling. The first release of this dataset, SIND1 v.1, includes 81,743 unique photos in 20,211 sequences, aligned to both descriptive (caption) and story language. We establish several strong baselines for the storytelling task, and motivate an automatic metric to benchmark progress. Modelling concrete description as well as figurative and social language, as provided in this dataset and the storytelling task, has the potential to move artificial intelligence from basic understandings of typical visual scenes towards more and more human-like understanding of grounded event structure and subjective expression.", "phrases": ["human-like understanding", "visual storytelling", "image", "temporal sequence"], "overall_score": 6.294108360043956, "scores": [5.278114030030218, 1.769290838316241, 0.5499425527030463, 0.5476178520163681], "rank_score": 2.0362413182664683} -{"id": "dorr-etal-2003-hedge", "title": "Hedge Trimmer: A Parse-and-Trim Approach to Headline Generation", "abstract": "This paper presents Hedge Trimmer, a HEaDline GEneration system that creates a headline for a newspaper story using linguistically-motivated heuristics to guide the choice of a potential headline. We present feasibility tests used to establish the validity of an approach that constructs a headline by selecting words in order from a story. In addition, we describe experimental results that demonstrate the effectiveness of our linguistically-motivated approach over a HMM-based model, using both human evaluation and automatic metrics for comparing the two approaches.", "phrases": ["headline generation", "linguistically-motivated heuristic", "hedge trimmer"], "overall_score": 7.119653217094435, "scores": [2.6845539219092536, 2.338092812796786, 1.0860086237640232], "rank_score": 2.0362184528233542} -{"id": "li-etal-2016-deep", "title": "Deep Reinforcement Learning for Dialogue Generation", "abstract": "Recent neural models of dialogue generation offer great promise for generating responses for conversational agents, but tend to be shortsighted, predicting utterances one at a time while ignoring their influence on future outcomes. Modeling the future direction of a dialogue is crucial to generating coherent, interesting dialogues, a need which led traditional NLP models of dialogue to draw on reinforcement learning. In this paper, we show how to integrate these goals, applying deep reinforcement learning to model future reward in chatbot dialogue. The model simulates dialogues between two virtual agents, using policy gradient methods to reward sequences that display three useful conversational properties: informativity (non-repetitive turns), coherence, and ease of answering (related to forward-looking function). We evaluate our model on diversity, length as well as with human judges, showing that the proposed algorithm generates more interactive responses and manages to foster a more sustained conversation in dialogue simulation. This work marks a first step towards learning a neural conversational model based on the long-term success of dialogues.", "phrases": ["dialogue generation", "reward", "deep reinforcement learning", "encoder-decoder model"], "overall_score": 8.496659616335966, "scores": [3.5468670755242813, 2.3909309415581315, 1.6825542982311978, 0.5213547827423772], "rank_score": 2.035426774513997} -{"id": "velldal-etal-2012-speculation", "title": "Speculation and Negation: Rules, Rankers, and the Role of Syntax", "abstract": "This article explores a combination of deep and shallow approaches to the problem of resolving the scope of speculation and negation within a sentence, specifically in the domain of biomedical research literature. The first part of the article focuses on speculation. After first showing how speculation cues can be accurately identified using a very simple classifier informed only by local lexical context, we go on to explore two different syntactic approaches to resolving the in-sentence scopes of these cues. Whereas one uses manually crafted rules operating over dependency structures, the other automatically learns a discriminative ranking function over nodes in constituent trees. We provide an in-depth error analysis and discussion of various linguistic properties characterizing the problem, and show that although both approaches perform well in isolation, even better results can be obtained by combining them, yielding the best published results to date on the CoNLL-2010 Shared Task data. The last part of the article describes how our speculation system is ported to also resolve the scope of negation. With only modest modifications to the initial design, the system obtains state-of-the-art results on this task also.", "phrases": ["negation", "scope", "syntactic approach", "speculation"], "overall_score": 6.0969229035056935, "scores": [3.4060243783481106, 2.7945942369993158, 1.4004385576315197, 0.5397542838106827], "rank_score": 2.0352028641974074} -{"id": "zhang-etal-2019-joint", "title": "Joint Slot Filling and Intent Detection via Capsule Neural Networks", "abstract": "Being able to recognize words as slots and detect the intent of an utterance has been a keen issue in natural language understanding. The existing works either treat slot filling and intent detection separately in a pipeline manner, or adopt joint models which sequentially label slots while summarizing the utterance-level intent without explicitly preserving the hierarchical relationship among words, slots, and intents. To exploit the semantic hierarchy for effective modeling, we propose a capsule-based neural network model which accomplishes slot filling and intent detection via a dynamic routing-by-agreement schema. A re-routing schema is proposed to further synergize the slot filling performance using the inferred intent representation. Experiments on two real-world datasets show the effectiveness of our model when compared with other alternative model architectures, as well as existing natural language understanding services.", "phrases": ["slot filling", "intent detection", "hierarchical capsule"], "overall_score": 5.50657746840727, "scores": [3.0699736341817747, 2.47394004538782, 0.5563175489719111], "rank_score": 2.0334104095138357} -{"id": "erk-2009-representing", "title": "Representing words as regions in vector space", "abstract": "Vector space models of word meaning typically represent the meaning of a word as a vector computed by summing over all its corpus occurrences. Words close to this point in space can be assumed to be similar to it in meaning. But how far around this point does the region of similar meaning extend? In this paper we discuss two models that represent word meaning as regions in vector space. Both representations can be computed from traditional point representations in vector space. We find that both models perform at over 95% F-score on a token classification task.", "phrases": ["region", "vector space", "diversity"], "overall_score": 4.875072303971533, "scores": [3.033645001155762, 2.522913521810396, 0.5426306609079177], "rank_score": 2.0330630612913585} -{"id": "wachsmuth-etal-2017-pagerank", "title": "\u201cPageRank\u201d for Argument Relevance", "abstract": "Future search engines are expected to deliver pro and con arguments in response to queries on controversial topics. While argument mining is now in the focus of research, the question of how to retrieve the relevant arguments remains open. This paper proposes a radical model to assess relevance objectively at web scale: the relevance of an argument's conclusion is decided by what other arguments reuse it as a premise. We build an argument graph for this model that we analyze with a recursive weighting scheme, adapting key ideas of PageRank. In experiments on a large ground-truth argument graph, the resulting relevance scores correlate with human average judgments. We outline what natural language challenges must be faced at web scale in order to stepwise bring argument relevance to web search engines.", "phrases": ["pagerank", "argument relevance", "premise"], "overall_score": 4.226565514696806, "scores": [2.993419662605688, 2.5380152349084284, 0.5662102105312757], "rank_score": 2.032548369348464} -{"id": "tan-lee-2014-corpus", "title": "A Corpus of Sentence-level Revisions in Academic Writing: A Step towards Understanding Statement Strength in Communication", "abstract": "The strength with which a statement is made can have a significant impact on the audience. For example, international relations can be strained by how the media in one country describes an event in another; and papers can be rejected because they overstate or understate their findings. It is thus important to understand the effects of statement strength. A first step is to be able to distinguish between strong and weak statements. However, even this problem is understudied, partly due to a lack of data. Since strength is inherently relative, revisions of texts that make claims are a natural source of data on strength differences. In this paper, we introduce a corpus of sentence-level revisions from academic writing. We also describe insights gained from our annotation efforts for this task.", "phrases": ["revision", "academic writing", "statement strength"], "overall_score": 4.21810042731032, "scores": [3.1509170695066064, 1.6962732529224709, 1.2382422460231381], "rank_score": 2.0284775228174055} -{"id": "michel-neubig-2018-mtnt", "title": "MTNT: A Testbed for Machine Translation of Noisy Text", "abstract": "Noisy or non-standard input text can cause disastrous mistranslations in most modern Machine Translation (MT) systems, and there has been growing research interest in creating noise-robust MT systems. However, as of yet there are no publicly available parallel corpora of with naturally occurring noisy inputs and translations, and thus previous work has resorted to evaluating on synthetically created datasets. In this paper, we propose a benchmark dataset for Machine Translation of Noisy Text (MTNT), consisting of noisy comments on Reddit () and professionally sourced translations. We commissioned translations of English comments into French and Japanese, as well as French and Japanese comments into English, on the order of 7k-37k sentences per language pair. We qualitatively and quantitatively examine the types of noise included in this dataset, then demonstrate that existing MT models fail badly on a number of noise-related phenomena, even after performing adaptation on a small training set of in-domain data. This indicates that this dataset can provide an attractive testbed for methods tailored to handling noisy text in MT.", "phrases": ["noisy text", "french", "mtnt", "robustness", "new dataset"], "overall_score": 7.092402780738351, "scores": [4.14687779525897, 3.735871167412068, 0.8573608040627656, 0.8490330804516869, 0.5529813382455668], "rank_score": 2.0284248370862112} -{"id": "wu-weld-2010-open", "title": "Open Information Extraction Using Wikipedia", "abstract": "Information-extraction (IE) systems seek to distill semantic relations from natural-language text, but most systems use supervised learning of relation-specific examples and are thus limited by the availability of training data. Open IE systems such as TextRunner, on the other hand, aim to handle the unbounded number of relations found on the Web. But how well can these open systems perform? \n \nThis paper presents WOE, an open IE system which improves dramatically on TextRunner's precision and recall. The key to WOE's performance is a novel form of self-supervised learning for open extractors -- using heuristic matches between Wikipedia infobox attribute values and corresponding sentences to construct training data. Like TextRunner, WOE's extractor eschews lexicalized features and handles an unbounded set of semantic relations. WOE can operate in two modes: when restricted to POS tag features, it runs as quickly as TextRunner, but when set to use dependency-parse features its precision and recall rise even higher.", "phrases": ["information extraction", "wikipedia", "web"], "overall_score": 7.3748906011485, "scores": [2.667692249683243, 2.570957565870331, 0.8435901182792751], "rank_score": 2.0274133112776163} -{"id": "xue-2003-chinese", "title": "Chinese Word Segmentation as Character Tagging", "abstract": "In this paper we report results of a supervised machine-learning approach to Chinese word segmentation. A maximum entropy tagger is trained on manually annotated data to automatically assign to Chinese characters, or hanzi, tags that indicate the position of a hanzi within a word. The tagged output is then converted into segmented text for evaluation. Preliminary results show that this approach is competitive against other supervised machine-learning segmenters reported in previous studies, achieving precision and recall rates of 95.01% and 94.94% respectively, trained on a 237K-word training set.", "phrases": ["word segmentation", "chinese character", "cws task", "character-based tagging approach", "end"], "overall_score": 8.548534645101505, "scores": [5.883749279398765, 1.2429815705109373, 1.1969927133607061, 0.9790530908774522, 0.8270006118290961], "rank_score": 2.025955453195391} -{"id": "moldovan-etal-2003-cogex", "title": "COGEX: A Logic Prover for Question Answering", "abstract": "Recent TREC results have demonstrated the need for deeper text understanding methods. This paper introduces the idea of automated reasoning applied to question answering and shows the feasibility of integrating a logic prover into a Question Answering system. The approach is to transform questions and answer passages into logic representations. World knowledge axioms as well as linguistic axioms are supplied to the prover which renders a deep understanding of the relationship between question text and answer text. Moreover, the trace of the proofs provide answer justifications. The results show that the prover boosts the performance of the QA system on TREC questions by 30%.", "phrases": ["logic prover", "question answering", "cogex"], "overall_score": 5.733412406127691, "scores": [3.2549815297799585, 1.5174269165896759, 1.298520611785257], "rank_score": 2.0236430193849637} -{"id": "wong-kit-2012-extending", "title": "Extending Machine Translation Evaluation Metrics with Lexical Cohesion to Document Level", "abstract": "This paper proposes the utilization of lexical cohesion to facilitate evaluation of machine translation at the document level. As a linguistic means to achieve text coherence, lexical cohesion ties sentences together into a meaningfully interwoven structure through words with the same or related meaning. A comparison between machine and human translation is conducted to illustrate one of their critical distinctions that human translators tend to use more cohesion devices than machine. Various ways to apply this feature to evaluate machine-translated documents are presented, including one without reliance on reference translation. Experimental results show that incorporating this feature into sentence-level evaluation metrics can enhance their correlation with human judgements.", "phrases": ["machine translation", "lexical cohesion", "document level"], "overall_score": 5.339988440960533, "scores": [3.625278754148693, 1.8482699409701062, 0.5967875355901829], "rank_score": 2.0234454102363273} -{"id": "bunescu-mooney-2005-shortest", "title": "A Shortest Path Dependency Kernel for Relation Extraction", "abstract": "We present a novel approach to relation extraction, based on the observation that the information required to assert a relationship between two named entities in the same sentence is typically captured by the shortest path between the two entities in the dependency graph. Experiments on extracting top-level relations from the ACE (Automated Content Extraction) newspaper corpus show that the new shortest path dependency kernel outperforms a recent approach based on dependency tree kernels.", "phrases": ["path", "relation extraction", "kernel-based method", "predicate-argument sequence", "svm"], "overall_score": 8.963524029909152, "scores": [5.65610364351766, 2.0819621267506094, 1.3125331047478823, 0.5399586267693148, 0.5244217862874748], "rank_score": 2.022995857614588} -{"id": "jain-wallace-2019-attention", "title": "Attention is not Explanation", "abstract": "Attention mechanisms have seen wide adoption in neural NLP models. In addition to improving predictive performance, these are often touted as affording transparency: models equipped with attention provide a distribution over attended-to input units, and this is often presented (at least implicitly) as communicating the relative importance of inputs. However, it is unclear what relationship exists between attention weights and model outputs. In this work we perform extensive experiments across a variety of NLP tasks that aim to assess the degree to which attention weights provide meaningful \u201cexplanations\u201d for predictions. We find that they largely do not. For example, learned attention weights are frequently uncorrelated with gradient-based measures of feature importance, and one can identify very different attention distributions that nonetheless yield equivalent predictions. Our findings show that standard attention modules do not provide meaningful explanations and should not be treated as though they do.", "phrases": ["explanation", "transparency", "attention weight", "feature importance"], "overall_score": 7.453218375422271, "scores": [4.84735649058685, 1.6567581421278355, 1.047078687312749, 0.5306304078910726], "rank_score": 2.020455931979627} -{"id": "liu-etal-2019-multi", "title": "Multi-Task Deep Neural Networks for Natural Language Understanding", "abstract": "In this paper, we present a Multi-Task Deep Neural Network (MT-DNN) for learning representations across multiple natural language understanding (NLU) tasks. MT-DNN not only leverages large amounts of cross-task data, but also benefits from a regularization effect that leads to more general representations to help adapt to new tasks and domains. MT-DNN extends the model proposed in Liu et al. (2015) by incorporating a pre-trained bidirectional transformer language model, known as BERT (Devlin et al., 2018). MT-DNN obtains new state-of-the-art results on ten NLU tasks, including SNLI, SciTail, and eight out of nine GLUE tasks, pushing the GLUE benchmark to 82.7% (2.2% absolute improvement) as of February 25, 2019 on the latest GLUE test set. We also demonstrate using the SNLI and SciTail datasets that the representations learned by MT-DNN allow domain adaptation with substantially fewer in-domain labels than the pre-trained BERT representations. Our code and pre-trained models will be made publicly available.", "phrases": ["deep neural network", "natural language understanding", "mt-dnn", "bert", "multi-task learning"], "overall_score": 9.363151409107335, "scores": [1.982783091425734, 2.8096593265441876, 2.115799806002129, 1.6106115340973215, 1.582223816138319], "rank_score": 2.020215514841538} -{"id": "mostafazadeh-etal-2016-generating", "title": "Generating Natural Questions About an Image", "abstract": "There has been an explosion of work in the vision & language community during the past few years from image captioning to video transcription, and answering questions about images. These tasks have focused on literal descriptions of the image. To move beyond the literal, we choose to explore how questions about an image are often directed at commonsense inference and the abstract events evoked by objects in the image. In this paper, we introduce the novel task of Visual Question Generation (VQG), where the system is tasked with asking a natural and engaging question when shown an image. We provide three datasets which cover a variety of images from object-centric to event-centric, with considerably more abstract training data than provided to state-of-the-art captioning systems thus far. We train and test several generative and retrieval models to tackle the task of VQG. Evaluation results show that while such models ask reasonable questions for a variety of images, there is still a wide gap with human performance which motivates further work on connecting images with commonsense knowledge and pragmatics. Our proposed task offers a new challenge to the community which we hope furthers interest in exploring deeper connections between vision & language.", "phrases": ["image", "question generation", "engaging question", "conversation"], "overall_score": 6.7222391943559705, "scores": [4.445448095837328, 1.7911231492145603, 1.3012518356109566, 0.5315966369192829], "rank_score": 2.0173549293955317} -{"id": "conneau-etal-2020-unsupervised", "title": "Unsupervised Cross-lingual Representation Learning at Scale", "abstract": "This paper shows that pretraining multilingual language models at scale leads to significant performance gains for a wide range of cross-lingual transfer tasks. We train a Transformer-based masked language model on one hundred languages, using more than two terabytes of filtered CommonCrawl data. Our model, dubbed XLM-R, significantly outperforms multilingual BERT (mBERT) on a variety of cross-lingual benchmarks, including +14.6% average accuracy on XNLI, +13% average F1 score on MLQA, and +2.4% F1 score on NER. XLM-R performs particularly well on low-resource languages, improving 15.7% in XNLI accuracy for Swahili and 11.4% for Urdu over previous XLM models. We also present a detailed empirical analysis of the key factors that are required to achieve these gains, including the trade-offs between (1) positive transfer and capacity dilution and (2) the performance of high and low resource languages at scale. Finally, we show, for the first time, the possibility of multilingual modeling without sacrificing per-language performance; XLM-R is very competitive with strong monolingual models on the GLUE and XNLI benchmarks. We will make our code and models publicly available.", "phrases": ["cross-lingual representation learning", "scale", "multilinguality", "language model", "pre-trained model"], "overall_score": 11.060532126313747, "scores": [1.6586790240425526, 1.0242413579182272, 3.3203066473364466, 2.2083543097516003, 1.871318958295967], "rank_score": 2.016580059468959} -{"id": "wu-etal-2019-hierarchical", "title": "Hierarchical User and Item Representation with Three-Tier Attention for Recommendation", "abstract": "Utilizing reviews to learn user and item representations is useful for recommender systems. Existing methods usually merge all reviews from the same user or for the same item into a long document. However, different reviews, sentences and even words usually have different informativeness for modeling users and items. In this paper, we propose a hierarchical user and item representation model with three-tier attention to learn user and item representations from reviews for recommendation. Our model contains three major components, i.e., a sentence encoder to learn sentence representations from words, a review encoder to learn review representations from sentences, and a user/item encoder to learn user/item representations from reviews. In addition, we incorporate a three-tier attention network in our model to select important words, sentences and reviews. Besides, we combine the user and item representations learned from the reviews with user and item embeddings based on IDs as the final representations to capture the latent factors of individual users and items. Extensive experiments on four benchmark datasets validate the effectiveness of our approach.", "phrases": ["item representation", "three-tier attention", "hierarchical user"], "overall_score": 2.7946262224072678, "scores": [2.356479032725178, 1.8856719135376823, 1.805539142044973], "rank_score": 2.0158966961026112} -{"id": "gatt-reiter-2009-simplenlg", "title": "SimpleNLG: A Realisation Engine for Practical Applications", "abstract": "This paper describes SimpleNLG, a realisation engine for English which aims to provide simple and robust interfaces to generate syntactic structures and linearise them. The library is also flexible in allowing the use of mixed (canned and non-canned) representations.", "phrases": ["realisation engine", "simplenlg", "project", "other domain-dependent decision", "lexical choice"], "overall_score": 6.563279251565497, "scores": [5.454526763993192, 2.9289980797149813, 0.5648523172798886, 0.5623825603603442, 0.5615005308771585], "rank_score": 2.0144520504451124} -{"id": "reddy-etal-2019-coqa", "title": "CoQA: A Conversational Question Answering Challenge", "abstract": "Humans gather information through conversations involving a series of interconnected questions and answers. For machines to assist in information gathering, it is therefore essential to enable them to answer conversational questions. We introduce CoQA, a novel dataset for building Conversational Question Answering systems. Our dataset contains 127k questions with answers, obtained from 8k conversations about text passages from seven diverse domains. The questions are conversational, and the answers are free-form text with their corresponding evidence highlighted in the passage. We analyze CoQA in depth and show that conversational questions have challenging phenomena not present in existing reading comprehension datasets (e.g., coreference and pragmatic reasoning). We evaluate strong dialogue and reading comprehension models on CoQA. The best system obtains an F1 score of 65.4%, which is 23.4 points behind human performance (88.8%), indicating that there is ample room for improvement. We present CoQA as a challenge to the community at .", "phrases": ["conversation", "coqa", "annotator", "sequential question", "text snippet"], "overall_score": 8.792463790892457, "scores": [5.362259546050278, 2.440152567378402, 1.1129692519772667, 0.5799609299480971, 0.5659561658785001], "rank_score": 2.012259692246509} -{"id": "may-etal-2019-measuring", "title": "On Measuring Social Biases in Sentence Encoders", "abstract": "The Word Embedding Association Test shows that GloVe and word2vec word embeddings exhibit human-like implicit biases based on gender, race, and other social constructs (Caliskan et al., 2017). Meanwhile, research on learning reusable text representations has begun to explore sentence-level texts, with some sentence encoders seeing enthusiastic adoption. Accordingly, we extend the Word Embedding Association Test to measure bias in sentence encoders. We then test several sentence encoders, including state-of-the-art methods such as ELMo and BERT, for the social biases studied in prior work and two important biases that are difficult or impossible to test at the word level. We observe mixed results including suspicious patterns of sensitivity that suggest the test's assumptions may not hold in general. We conclude by proposing directions for future work on measuring bias in sentence encoders.", "phrases": ["sentence encoder", "association", "social bias"], "overall_score": 8.025923698204494, "scores": [3.0576602709315326, 1.5943098401612321, 1.3840959181388515], "rank_score": 2.012022009743872} -{"id": "conneau-kiela-2018-senteval", "title": "SentEval: An Evaluation Toolkit for Universal Sentence Representations", "abstract": "We introduce SentEval, a toolkit for evaluating the quality of universal sentence representations. SentEval encompasses a variety of tasks, including binary and multi-class classification, natural language inference and sentence similarity. The set of tasks was selected based on what appears to be the community consensus regarding the appropriate evaluations for universal sentence representations. The toolkit comes with scripts to download and preprocess datasets, and an easy interface to evaluate sentence encoders. The aim is to provide a fairer, less cumbersome and more centralized way for evaluating sentence representations.", "phrases": ["evaluation toolkit", "sentence representation", "senteval", "caption", "similarity task"], "overall_score": 7.317517612107694, "scores": [5.42511886188243, 1.2909424651521693, 1.6045803120104782, 0.8800518291340028, 0.8575117409837291], "rank_score": 2.011641041832562} -{"id": "yuan-briscoe-2016-grammatical", "title": "Grammatical error correction using neural machine translation", "abstract": "This paper presents the first study using neural machine translation (NMT) for grammatical error correction (GEC). We propose a twostep approach to handle the rare word problem in NMT, which has been proved to be useful and effective for the GEC task. Our best NMTbased system trained on the CLC outperforms our SMT-based system when testing on the publicly available FCE test set. The same system achieves an F0.5 score of 39.90% on the CoNLL-2014 shared task test set, outperforming the state-of-the-art and demonstrating that the NMT-based GEC system generalises effectively.", "phrases": ["error correction", "neural machine translation", "rare word problem"], "overall_score": 5.155094531759167, "scores": [3.2737586111431405, 2.2126752990771092, 0.5430354632347832], "rank_score": 2.009823124485011} -{"id": "li-etal-2018-paraphrase", "title": "Paraphrase Generation with Deep Reinforcement Learning", "abstract": "Automatic generation of paraphrases from a given sentence is an important yet challenging task in natural language processing (NLP). In this paper, we present a deep reinforcement learning approach to paraphrase generation. Specifically, we propose a new framework for the task, which consists of a generator and an evaluator, both of which are learned from data. The generator, built as a sequence-to-sequence learning model, can produce paraphrases given a sentence. The evaluator, constructed as a deep matching model, can judge whether two sentences are paraphrases of each other. The generator is first trained by deep learning and then further fine-tuned by reinforcement learning in which the reward is given by the evaluator. For the learning of the evaluator, we propose two methods based on supervised learning and inverse reinforcement learning respectively, depending on the type of available training data. Experimental results on two datasets demonstrate the proposed models (the generators) can produce more accurate paraphrases and outperform the state-of-the-art methods in paraphrase generation in both automatic evaluation and human evaluation.", "phrases": ["reinforcement learning", "deep learning", "paraphrase generation"], "overall_score": 6.6143274915649855, "scores": [3.905425055652174, 1.5606616673357272, 0.5545336175908406], "rank_score": 2.0068734468595806} -{"id": "abdul-mageed-etal-2011-subjectivity", "title": "Subjectivity and Sentiment Analysis of Modern Standard Arabic", "abstract": "Although Subjectivity and Sentiment Analysis (SSA) has been witnessing a flurry of novel research, there are few attempts to build SSA systems for Morphologically-Rich Languages (MRL). In the current study, we report efforts to partially fill this gap. We present a newly developed manually annotated corpus of Modern Standard Arabic (MSA) together with a new polarity lexicon. The corpus is a collection of newswire documents annotated on the sentence level. We also describe an automatic SSA tagging system that exploits the annotated data. We investigate the impact of different levels of preprocessing settings on the SSA classification task. We show that by explicitly accounting for the rich morphology the system is able to achieve significantly higher levels of performance.", "phrases": ["sentiment analysis", "modern standard arabic", "subjectivity", "specific language"], "overall_score": 6.0117272619600675, "scores": [3.3581856963742056, 2.3264831720414807, 1.8090569872805489, 0.5333295858476741], "rank_score": 2.0067638603859774} -{"id": "luo-etal-2017-learning", "title": "Learning to Predict Charges for Criminal Cases with Legal Basis", "abstract": "The charge prediction task is to determine appropriate charges for a given case, which is helpful for legal assistant systems where the user input is fact description. We argue that relevant law articles play an important role in this task, and therefore propose an attention-based neural network method to jointly model the charge prediction task and the relevant article extraction task in a unified framework. The experimental results show that, besides providing legal basis, the relevant articles can also clearly improve the charge prediction results, and our full model can effectively predict appropriate charges for cases with different expression styles.", "phrases": ["charge", "criminal case", "legal judgment prediction"], "overall_score": 6.1984022525964715, "scores": [3.1915118664242037, 1.8718446702796407, 0.9524802039356558], "rank_score": 2.0052789135465} -{"id": "cohen-etal-2004-learning", "title": "Learning to Classify Email into \u201cSpeech Acts\u201d", "abstract": "It is often useful to classify email according to the intent of the sender (e.g., \"propose a meeting\", \"deliver information\"). We present experimental results in learning to classify email in this fashion, where each class corresponds to a verbnoun pair taken from a predefined ontology describing typical \u201cemail speech acts\u201d. We demonstrate that, although this categorization problem is quite different from \u201ctopical\u201d text classification, certain categories of messages can nonetheless be detected with high precision (above 80%) and reasonable recall (above 50%) using existing text-classification learning methods. This result suggests that useful task-tracking tools could be constructed based on automatic classification into this taxonomy.", "phrases": ["email", "speech act", "textual feature"], "overall_score": 5.902359529215758, "scores": [3.7214615648842333, 1.7587368362483973, 0.533537564507956], "rank_score": 2.0045786552135287} -{"id": "richardson-etal-2013-mctest", "title": "MCTest: A Challenge Dataset for the Open-Domain Machine Comprehension of Text", "abstract": "We present MCTest, a freely available set of stories and associated questions intended for research on the machine comprehension of text. Previous work on machine comprehension (e.g., semantic modeling) has made great strides, but primarily focuses either on limited-domain datasets, or on solving a more restricted goal (e.g., open-domain relation extraction). In contrast, MCTest requires machines to answer multiple-choice reading comprehension questions about fictional stories, directly tackling the high-level goal of open-domain machine comprehension. Reading comprehension can test advanced abilities such as causal reasoning and understanding the world, yet, by being multiple-choice, still provide a clear metric. By being fictional, the answer typically can be found only in the story itself. The stories and questions are also carefully limited to those a young child would understand, reducing the world knowledge that is required for the task. We present the scalable crowd-sourcing methods that allow us to cheaply construct a dataset of 500 stories and 2000 questions. By screening workers (with grammar tests) and stories (with grading), we have ensured that the data is the same quality as another set that we manually edited, but at one tenth the editing cost. By being open-domain, yet carefully restricted, we hope MCTest will serve to encourage research and provide a clear metric for advancement on the machine comprehension of text. 1 Reading Comprehension A major goal for NLP is for machines to be able to understand text as well as people. Several research disciplines are focused on this problem: for example, information extraction, relation extraction, semantic role labeling, and recognizing textual entailment. Yet these techniques are necessarily evaluated individually, rather than by how much they advance us towards the end goal. On the other hand, the goal of semantic parsing is the machine comprehension of text (MCT), yet its evaluation requires adherence to a specific knowledge representation, and it is currently unclear what the best representation is, for open-domain text. We believe that it is useful to directly tackle the top-level task of MCT. For this, we need a way to measure progress. One common method for evaluating someone\u2019s understanding of text is by giving them a multiple-choice reading comprehension test. This has the advantage that it is objectively gradable (vs. essays) yet may test a range of abilities such as causal or counterfactual reasoning, inference among relations, or just basic understanding of the world in which the passage is set. Therefore, we propose a multiple-choice reading comprehension task as a way to evaluate progress on MCT. We have built a reading comprehension dataset containing 500 fictional stories, with 4 multiple choice questions per story. It was built using methods which can easily scale to at least 5000 stories, since the stories were created, and the curation was done, using crowd sourcing almost entirely, at a total of $4.00 per story. We plan to periodically update the dataset to ensure that methods are not overfitting to the existing data. The dataset is open-domain, yet restricted to concepts and words that a 7 year old is expected to understand. This task is still beyond the capability of today\u2019s computers and algorithms.", "phrases": ["open-domain machine comprehension", "story", "reading comprehension dataset", "mctest", "multiple-choice question"], "overall_score": 8.478646361587609, "scores": [4.870065283538469, 0.9689717106423922, 2.0362845861717123, 1.2249001132320267, 0.912098913350727], "rank_score": 2.002464121387065} -{"id": "rush-etal-2010-dual", "title": "On Dual Decomposition and Linear Programming Relaxations for Natural Language Processing", "abstract": "This paper introduces dual decomposition as a framework for deriving inference algorithms for NLP problems. The approach relies on standard dynamic-programming algorithms as oracle solvers for sub-problems, together with a simple method for forcing agreement between the different oracles. The approach provably solves a linear programming (LP) relaxation of the global inference problem. It leads to algorithms that are simple, in that they use existing decoding algorithms; efficient, that they avoid exact algorithms for the full model; and often exact, in that empirically they often recover the correct solution in spite of using an LP relaxation. We give experimental results on two problems: 1) the combination of two lexicalized parsing models; and 2) the combination of a lexicalized parsing model and a trigram part-of-speech tagger.", "phrases": ["dual decomposition", "nlp problem", "parsing model"], "overall_score": 6.876032778310129, "scores": [4.5834968981655475, 0.8857508929540885, 0.5377921617339834], "rank_score": 2.0023466509512065} -{"id": "habash-etal-2013-morphological", "title": "Morphological Analysis and Disambiguation for Dialectal Arabic", "abstract": "The many differences between Dialectal Arabic and Modern Standard Arabic (MSA) pose a challenge to the majority of Arabic natural language processing tools, which are designed for MSA. In this paper, we retarget an existing state-of-the-art MSA morphological tagger to Egyptian Arabic (ARZ). Our evaluation demonstrates that our ARZ morphology tagger outperforms its MSA variant on ARZ input in terms of accuracy in part-of-speech tagging, diacritization, lemmatization and tokenization; and in terms of utility for ARZ-toEnglish statistical machine translation.", "phrases": ["disambiguation", "dialectal arabic", "arabic pos tagging"], "overall_score": 6.517496506911672, "scores": [3.28996422984529, 2.152301271425824, 0.5589346757526447], "rank_score": 2.00040005900792} -{"id": "chen-etal-2018-sequence", "title": "Sequence-to-Action: End-to-End Semantic Graph Generation for Semantic Parsing", "abstract": "This paper proposes a neural semantic parsing approach \u2013 Sequence-to-Action, which models semantic parsing as an end-to-end semantic graph generation process. Our method simultaneously leverages the advantages from two recent promising directions of semantic parsing. Firstly, our model uses a semantic graph to represent the meaning of a sentence, which has a tight-coupling with knowledge bases. Secondly, by leveraging the powerful representation learning and prediction ability of neural network models, we propose a RNN model which can effectively map sentences to action sequences for semantic graph generation. Experiments show that our method achieves state-of-the-art performance on Overnight dataset and gets competitive performance on Geo and Atis datasets.", "phrases": ["semantic parsing", "graph generation process", "sequence-to-action"], "overall_score": 4.96344311072739, "scores": [3.555080378806729, 1.8831107660961528, 0.5541181973625662], "rank_score": 1.9974364474218158} -{"id": "tamura-etal-2012-bilingual", "title": "Bilingual Lexicon Extraction from Comparable Corpora Using Label Propagation", "abstract": "This paper proposes a novel method for lexicon extraction that extracts translation pairs from comparable corpora by using graph-based label propagation. In previous work, it was established that performance drastically decreases when the coverage of a seed lexicon is small. We resolve this problem by utilizing indirect relations with the bilingual seeds together with direct relations, in which each word is represented by a distribution of translated seeds. The seed distributions are propagated over a graph representing relations among words, and translation pairs are extracted by identifying word pairs with a high similarity in the seed distributions. We propose two types of the graphs: a co-occurrence graph, representing co-occurrence relations between words, and a similarity graph, representing context similarities between words. Evaluations using English and Japanese patent comparable corpora show that our proposed graph propagation method outperforms conventional methods. Further, the similarity graph achieved improved performance by clustering synonyms into the same translation.", "phrases": ["lexicon extraction", "comparable corpora", "label propagation"], "overall_score": 4.786097115955783, "scores": [2.629846975732789, 2.3501889383527925, 1.0078366634816036], "rank_score": 1.9959575258557283} -{"id": "kordjamshidi-etal-2010-spatial", "title": "Spatial Role Labeling: Task Definition and Annotation Scheme", "abstract": "One of the essential functions of natural language is to talk about spatial relationships between objects. Linguistic constructs can express highly complex, relational structures of objects, spatial relations between them, and patterns of motion through spaces relative to some reference point. Learning how to map this information onto a formal representation from a text is a challenging problem. At present no well-defined framework for automatic spatial information extraction exists that can handle all of these issues. In this paper we introduce the task of spatial role labeling and propose an annotation scheme that is language-independent and facilitates the application of machine learning techniques. Our framework consists of a set of spatial roles based on the theory of holistic spatial semantics with the intent of covering all aspects of spatial concepts, including both static and dynamic spatial relations. We illustrate our annotation scheme with many examples throughout the paper, and in addition we highlight how to connect to spatial calculi such as region connection calculus and also how our approach fits into related work.", "phrases": ["annotation scheme", "object", "spatial role labeling", "sprl"], "overall_score": 5.253006981664172, "scores": [4.169171380072701, 1.855438822633119, 1.1075566837757966, 0.8297775892452441], "rank_score": 1.9904861189317153} -{"id": "majumder-etal-2020-mime", "title": "MIME: MIMicking Emotions for Empathetic Response Generation", "abstract": "Current approaches to empathetic response generation view the set of emotions expressed in the input text as a flat structure, where all the emotions are treated uniformly. We argue that empathetic responses often mimic the emotion of the user to a varying degree, depending on its positivity or negativity and content. We show that the consideration of these polarity-based emotion clusters and emotional mimicry results in improved empathy and contextual relevance of the response as compared to the state-of-the-art. Also, we introduce stochasticity into the emotion mixture that yields emotionally more varied empathetic responses than the previous work. We demonstrate the importance of these factors to empathetic response generation using both automatic- and human-based evaluations. The implementation of MIME is publicly available at .", "phrases": ["emotion", "empathetic response generation", "mime"], "overall_score": 4.944378645680634, "scores": [3.1595157381954655, 1.688547639935366, 1.1212296487550633], "rank_score": 1.9897643422952982} -{"id": "waseem-hovy-2016-hateful", "title": "Hateful Symbols or Hateful People? Predictive Features for Hate Speech Detection on Twitter", "abstract": "Hate speech in the form of racist and sexist remarks are a common occurrence on social media. For that reason, many social media services address the problem of identifying hate speech, but the definition of hate speech varies markedly and is largely a manual effort (BBC, 2015; Lomas, 2015). We provide a list of criteria founded in critical race theory, and use them to annotate a publicly available corpus of more than 16k tweets. We analyze the impact of various extra-linguistic features in conjunction with character n-grams for hatespeech detection. We also present a dictionary based the most indicative words in our data.", "phrases": ["hate speech detection", "twitter", "abusive language", "sexist tweet", "language detection dataset"], "overall_score": 9.719537368088995, "scores": [3.822097362946936, 3.0164675796083755, 1.4101730144007874, 0.8621651819170705, 0.826564630041775], "rank_score": 1.9874935537829888} -{"id": "martins-smith-2009-summarization", "title": "Summarization with a Joint Model for Sentence Extraction and Compression", "abstract": "Text summarization is one of the oldest problems in natural language processing. Popular approaches rely on extracting relevant sentences from the original documents. As a side effect, sentences that are too long but partly relevant are doomed to either not appear in the final summary, or prevent inclusion of other relevant sentences. Sentence compression is a recent framework that aims to select the shortest subsequence of words that yields an informative and grammatical sentence. This work proposes a one-step approach for document summarization that jointly performs sentence extraction and compression by solving an integer linear program. We report favorable experimental results on newswire data.", "phrases": ["joint model", "sentence extraction", "compression"], "overall_score": 5.627664363677826, "scores": [2.693724443804963, 2.4649805991317506, 0.8002507577100052], "rank_score": 1.9863186002155728} -{"id": "kantor-globerson-2019-coreference", "title": "Coreference Resolution with Entity Equalization", "abstract": "A key challenge in coreference resolution is to capture properties of entity clusters, and use those in the resolution process. Here we provide a simple and effective approach for achieving this, via an \u201cEntity Equalization\u201d mechanism. The Equalization approach represents each mention in a cluster via an approximation of the sum of all mentions in the cluster. We show how this can be done in a fully differentiable end-to-end manner, thus enabling high-order inferences in the resolution process. Our approach, which also employs BERT embeddings, results in new state-of-the-art results on the CoNLL-2012 coreference resolution task, improving average F1 by 3.6%.", "phrases": ["entity equalization", "mention", "coreference resolution"], "overall_score": 6.044951341482228, "scores": [2.4993359966118205, 1.8947899441765739, 1.5624253395520553], "rank_score": 1.9855170934468165} -{"id": "savary-etal-2017-parseme", "title": "The PARSEME Shared Task on Automatic Identification of Verbal Multiword Expressions", "abstract": "Multiword expressions (MWEs) are known as a \u201cpain in the neck\u201d for NLP due to their idiosyncratic behaviour. While some categories of MWEs have been addressed by many studies, verbal MWEs (VMWEs), such as to take a decision, to break one's heart or to turn off, have been rarely modelled. This is notably due to their syntactic variability, which hinders treating them as \u201cwords with spaces\u201d. We describe an initiative meant to bring about substantial progress in understanding, modelling and processing VMWEs. It is a joint effort, carried out within a European research network, to elaborate universal terminologies and annotation guidelines for 18 languages. Its main outcome is a multilingual 5-million-word annotated corpus which underlies a shared task on automatic identification of VMWEs. This paper presents the corpus annotation methodology and outcome, the shared task organisation and the results of the participating systems.", "phrases": ["parseme", "automatic identification", "verbal multiword expressions", "edition"], "overall_score": 6.869832599017946, "scores": [3.3125144462760683, 2.887354037505016, 0.8984807026324152, 0.8305095514589232], "rank_score": 1.982214684468106} -{"id": "bhatia-etal-2015-better", "title": "Better Document-level Sentiment Analysis from RST Discourse Parsing", "abstract": "Discourse structure is the hidden link between surface features and document-level properties, such as sentiment polarity. We show that the discourse analyses produced by Rhetorical Structure Theory (RST) parsers can improve document-level sentiment analysis, via composition of local information up the discourse tree. First, we show that reweighting discourse units according to their position in a dependency representation of the rhetorical structure can yield substantial improvements on lexicon-based sentiment analysis. Next, we present a recursive neural network over the RST structure, which offers significant improvements over classificationbased methods.", "phrases": ["sentiment analysis", "discourse structure", "text categorization"], "overall_score": 6.019635431608339, "scores": [4.120646203031513, 0.9263880887395056, 0.8845712930867405], "rank_score": 1.9772018616192533} -{"id": "paetzold-specia-2016-semeval", "title": "SemEval 2016 Task 11: Complex Word Identification", "abstract": "We report the \ufb01ndings of the Complex Word Identi\ufb01cation task of SemEval 2016. To create a dataset, we conduct a user study with 400 non-native English speakers, and \ufb01nd that complex words tend to be rarer, less ambiguous and shorter. A total of 42 systems were submitted from 21 distinct teams, and nine baselines were provided. The results high-light the effectiveness of Decision Trees and Ensemble methods for the task, but ultimately reveal that word frequencies remain the most reliable predictor of word complexity.", "phrases": ["complex word identification", "semeval", "cwi", "reader", "non-native speaker"], "overall_score": 7.121935684037188, "scores": [3.3639622814265127, 2.4682821467480482, 2.1094259994810862, 1.0781170854445097, 0.841881813552986], "rank_score": 1.9723338653306286} -{"id": "smit-etal-2014-morfessor", "title": "Morfessor 2.0: Toolkit for statistical morphological segmentation", "abstract": "Morfessor is a family of probabilistic machine learning methods for finding the morphological segmentation from raw text data. Recent developments include the development of semi-supervised methods for utilizing annotated data. Morfessor 2.0 is a rewrite of the original, widely-used Morfessor 1.0 software, with well documented command-line tools and library interface. It includes new features such as semi-supervised learning, online training, and integrated evaluation code.", "phrases": ["segmentation", "morfessor", "bpe"], "overall_score": 4.540670441116426, "scores": [2.994040546039395, 2.3747309160435455, 0.5471928880712631], "rank_score": 1.9719881167180677} -{"id": "rudinger-etal-2018-gender", "title": "Gender Bias in Coreference Resolution", "abstract": "We present an empirical study of gender bias in coreference resolution systems. We first introduce a novel, Winograd schema-style set of minimal pair sentences that differ only by pronoun gender. With these \u201cWinogender schemas,\u201d we evaluate and confirm systematic gender bias in three publicly-available coreference resolution systems, and correlate this bias with real-world and textual gender statistics.", "phrases": ["coreference resolution", "pronoun", "gender bias", "occupation", "template"], "overall_score": 7.322035567422272, "scores": [4.6049942513265, 1.8175470785240004, 1.3166987679848305, 1.0652376123502993, 1.0540027984750993], "rank_score": 1.971696101732146} -{"id": "chiu-nichols-2016-named", "title": "Named Entity Recognition with Bidirectional LSTM-CNNs", "abstract": "Named entity recognition is a challenging task that has traditionally required large amounts of knowledge in the form of feature engineering and lexicons to achieve high performance. In this paper, we present a novel neural network architecture that automatically detects word- and character-level features using a hybrid bidirectional LSTM and CNN architecture, eliminating the need for most feature engineering. We also propose a novel method of encoding partial lexicon matches in neural networks and compare it to existing approaches. Extensive evaluation shows that, given only tokenized text and publicly available word embeddings, our system is competitive on the CoNLL-2003 dataset and surpasses the previously reported state of the art performance on the OntoNotes 5.0 dataset by 2.13 F1 points. By using two lexicons constructed from publicly-available sources, we establish new state of the art performance with an F1 score of 91.62 on CoNLL-2003 and 86.28 on OntoNotes, surpassing systems that employ heavy feature engineering, proprietary lexicons, and rich entity linking information.", "phrases": ["entity recognition", "cnn", "word embedding", "convolutional neural network", "tagging"], "overall_score": 9.017947104202046, "scores": [4.098781464785605, 2.00127599724723, 1.4681671974842252, 1.4516920226828793, 0.8363858761228476], "rank_score": 1.9712605116645574} -{"id": "liao-grishman-2010-using", "title": "Using Document Level Cross-Event Inference to Improve Event Extraction", "abstract": "Event extraction is a particularly challenging type of information extraction (IE). Most current event extraction systems rely on local information at the phrase or sentence level. However, this local context may be insufficient to resolve ambiguities in identifying particular types of events; information from a wider scope can serve to resolve some of these ambiguities. In this paper, we use document level information to improve the performance of ACE event extraction. In contrast to previous work, we do not limit ourselves to information about events of the same type, but rather use information about other types of events to make predictions or resolve ambiguities regarding a given event. We learn such relationships from the training corpus and use them to help predict the occurrence of events and event arguments in a text. Experiments show that we can get 9.0% (absolute) gain in trigger (event) classification, and more than 8% gain for argument (role) classification in ACE event extraction.", "phrases": ["document level", "event extraction", "cross-entity inference"], "overall_score": 7.413303364600281, "scores": [4.121133147463173, 0.8413096866807437, 0.9505395676711548], "rank_score": 1.9709941339383568} -{"id": "lee-etal-2018-higher", "title": "Higher-Order Coreference Resolution with Coarse-to-Fine Inference", "abstract": "We introduce a fully-differentiable approximation to higher-order inference for coreference resolution. Our approach uses the antecedent distribution from a span-ranking architecture as an attention mechanism to iteratively refine span representations. This enables the model to softly consider multiple hops in the predicted clusters. To alleviate the computational cost of this iterative process, we introduce a coarse-to-fine approach that incorporates a less accurate but more efficient bilinear factor, enabling more aggressive pruning without hurting accuracy. Compared to the existing state-of-the-art span-ranking approach, our model significantly improves accuracy on the English OntoNotes benchmark, while being far more computationally efficient.", "phrases": ["coreference resolution", "coarse-to-fine inference", "span representation", "mention", "probability distribution"], "overall_score": 7.616658879054923, "scores": [4.973679748509082, 0.9307851925489071, 1.864957235027501, 1.2247569857092393, 0.8434123990586699], "rank_score": 1.9675183121706799} -{"id": "chen-yang-2020-multi", "title": "Multi-View Sequence-to-Sequence Models with Conversational Structure for Abstractive Dialogue Summarization", "abstract": "Text summarization is one of the most challenging and interesting problems in NLP. Although much attention has been paid to summarizing structured text like news reports or encyclopedia articles, summarizing conversations\u2014an essential part of human-human/machine interaction where most important pieces of information are scattered across various utterances of different speakers\u2014remains relatively under-investigated. This work proposes a multi-view sequence-to-sequence model by first extracting conversational structures of unstructured daily chats from different views to represent conversations and then utilizing a multi-view decoder to incorporate different views to generate dialogue summaries. Experiments on a large-scale dialogue summarization corpus demonstrated that our methods significantly outperformed previous state-of-the-art models via both automatic evaluations and human judgment. We also discussed specific challenges that current approaches faced with this task. We have publicly released our code at .", "phrases": ["sequence-to-sequence model", "conversational structure", "summarization model"], "overall_score": 5.573906798346107, "scores": [2.8038529120728657, 2.5453286386471388, 0.5528520642630436], "rank_score": 1.9673445383276826} -{"id": "mikolov-etal-2013-linguistic", "title": "Linguistic Regularities in Continuous Space Word Representations", "abstract": "Continuous space language models have recently demonstrated outstanding results across a variety of tasks. In this paper, we examine the vector-space word representations that are implicitly learned by the input-layer weights. We find that these representations are surprisingly good at capturing syntactic and semantic regularities in language, and that each relationship is characterized by a relation-specific vector offset. This allows vector-oriented reasoning based on the offsets between words. For example, the male/female relationship is automatically learned, and with the induced vector representations, \u201cKing Man + Woman\u201d results in a vector very close to \u201cQueen.\u201d We demonstrate that the word vectors capture syntactic regularities by means of syntactic analogy questions (provided with this paper), and are able to correctly answer almost 40% of the questions. We demonstrate that the word vectors capture semantic regularities by using the vector offset method to answer SemEval-2012 Task 2 questions. Remarkably, this method outperforms the best previous systems.", "phrases": ["regularity", "word embedding", "analogy task", "semantic property", "cbow"], "overall_score": 9.735072811046766, "scores": [3.2382721937344923, 2.9670859866066204, 1.2992184079296376, 1.2282023199883367, 1.1030921247794965], "rank_score": 1.967174206607717} -{"id": "choi-etal-2018-quac", "title": "QuAC: Question Answering in Context", "abstract": "We present QuAC, a dataset for Question Answering in Context that contains 14K information-seeking QA dialogs (100K questions in total). The dialogs involve two crowd workers: (1) a student who poses a sequence of freeform questions to learn as much as possible about a hidden Wikipedia text, and (2) a teacher who answers the questions by providing short excerpts from the text. QuAC introduces challenges not found in existing machine comprehension datasets: its questions are often more open-ended, unanswerable, or only meaningful within the dialog context, as we show in a detailed qualitative evaluation. We also report results for a number of reference models, including a recently state-of-the-art reading comprehension architecture extended to model dialog context. Our best model underperforms humans by 20 F1, suggesting that there is significant room for future work on this data. Dataset, baseline, and leaderboard available at .", "phrases": ["quac", "conversation", "machine reading comprehension", "factoid question"], "overall_score": 7.915862633545584, "scores": [4.427139502054957, 1.8425114191351681, 0.9577470184649475, 0.6386105403144479], "rank_score": 1.9665021199923802} -{"id": "hassan-etal-2010-whats", "title": "What's with the Attitude? Identifying Sentences with Attitude in Online Discussions", "abstract": "Mining sentiment from user generated content is a very important task in Natural Language Processing. An example of such content is threaded discussions which act as a very important tool for communication and collaboration in the Web. Threaded discussions include e-mails, e-mail lists, bulletin boards, newsgroups, and Internet forums. Most of the work on sentiment analysis has been centered around finding the sentiment toward products or topics. In this work, we present a method to identify the attitude of participants in an online discussion toward one another. This would enable us to build a signed network representation of participant interaction where every edge has a sign that indicates whether the interaction is positive or negative. This is different from most of the research on social networks that has focused almost exclusively on positive links. The method is experimentally tested using a manually labeled set of discussion posts. The results show that the proposed method is capable of identifying attitudinal sentences, and their signs, with high accuracy and that it outperforms several other baselines.", "phrases": ["attitude", "online discussion", "sentiment analysis"], "overall_score": 5.183224070333645, "scores": [3.5465846730457047, 1.782701763820437, 0.5628448315621505], "rank_score": 1.964043756142764} -{"id": "jones-etal-2012-semantics", "title": "Semantics-Based Machine Translation with Hyperedge Replacement Grammars", "abstract": "We present an approach to semantics-based statistical machine translation that uses synchronous hyperedge replacement grammars to translate into and from graph-shaped intermediate meaning representations, to our knowledge the first work in NLP to make use of synchronous context free graph grammars. We present algorithms for each step of the semantics-based translation pipeline, including a novel graph-to-word alignment algorithm and two algorithms for synchronous grammar rule extraction. We investigate the influence of syntactic annotations on semantics-based translation by presenting two alternative rule extraction algorithms, one that requires only semantic annotations and another that additionally relies on syntactic annotations, and explore the effect of syntax and language bias in meaning representation structures by running experiments with two different meaning representations, one biased toward an English syntax-like structure and another that is language neutral. While preliminary work, these experiments show promise for semantically-informed machine translation.", "phrases": ["machine translation", "hyperedge replacement", "meaning representation"], "overall_score": 6.467546190290247, "scores": [4.013467921992415, 1.3025338437976408, 0.5710124766330871], "rank_score": 1.9623380808077144} -{"id": "pires-etal-2019-multilingual", "title": "How Multilingual is Multilingual BERT?", "abstract": "In this paper, we show that Multilingual BERT (M-BERT), released by Devlin et al. (2018) as a single language model pre-trained from monolingual corpora in 104 languages, is surprisingly good at zero-shot cross-lingual model transfer, in which task-specific annotations in one language are used to fine-tune the model for evaluation in another language. To understand why, we present a large number of probing experiments, showing that transfer is possible even to languages in different scripts, that transfer works best between typologically similar languages, that monolingual corpora can train models for code-switching, and that the model can find translation pairs. From these results, we can conclude that M-BERT does create multilingual representations, but that these representations exhibit systematic deficiencies affecting certain language pairs.", "phrases": ["multilingual bert", "m-bert", "similar language", "zero-shot cross-lingual transfer", "crosslingual transfer"], "overall_score": 9.624712769539583, "scores": [5.124873747267352, 1.6816426523250572, 1.4054714818512972, 1.0642017678984546, 0.5343848895663031], "rank_score": 1.9621149077816926} -{"id": "ebrahimi-etal-2018-hotflip", "title": "HotFlip: White-Box Adversarial Examples for Text Classification", "abstract": "We propose an efficient method to generate white-box adversarial examples to trick a character-level neural classifier. We find that only a few manipulations are needed to greatly decrease the accuracy. Our method relies on an atomic flip operation, which swaps one token for another, based on the gradients of the one-hot input vectors. Due to efficiency of our method, we can perform adversarial training which makes the model more robust to attacks at test time. With the use of a few semantics-preserving constraints, we demonstrate that HotFlip can be adapted to attack a word-level classifier as well.", "phrases": ["adversarial example", "text classification", "gradient", "hotflip", "nlp model"], "overall_score": 8.128502399710506, "scores": [3.186283644822292, 0.9012356142427156, 2.666145088806599, 1.7611225643375032, 1.2948173325338954], "rank_score": 1.9619208489486013} -{"id": "tai-etal-2020-exbert", "title": "exBERT: Extending Pre-trained Models with Domain-specific Vocabulary Under Constrained Training Resources", "abstract": "We introduce exBERT, a training method to extend BERT pre-trained models from a general domain to a new pre-trained model for a specific domain with a new additive vocabulary under constrained training resources (i.e., constrained computation and data). exBERT uses a small extension module to learn to adapt an augmenting embedding for the new domain in the context of the original BERT's embedding of a general vocabulary. The exBERT training method is novel in learning the new vocabulary and the extension module while keeping the weights of the original BERT model fixed, resulting in a substantial reduction in required training resources. We pre-train exBERT with biomedical articles from ClinicalKey and PubMed Central, and study its performance on biomedical downstream benchmark tasks using the MTL-Bioinformatics-2016 datasets. We demonstrate that exBERT consistently outperforms prior approaches when using limited corpus and pre-training computation resources.", "phrases": ["pre-trained model", "vocabulary", "exbert"], "overall_score": 3.813733280946378, "scores": [2.496426223401525, 2.0209273724254326, 1.3622600381291254], "rank_score": 1.9598712113186945} -{"id": "da-san-martino-etal-2019-fine", "title": "Fine-Grained Analysis of Propaganda in News Article", "abstract": "Propaganda aims at influencing people's mindset with the purpose of advancing a specific agenda. Previous work has addressed propaganda detection at document level, typically labelling all articles from a propagandistic news outlet as propaganda. Such noisy gold labels inevitably affect the quality of any learning system trained on them. A further issue with most existing systems is the lack of explainability. To overcome these limitations, we propose a novel task: performing fine-grained analysis of texts by detecting all fragments that contain propaganda techniques as well as their type. In particular, we create a corpus of news articles manually annotated at fragment level with eighteen propaganda techniques and propose a suitable evaluation measure. We further design a novel multi-granularity neural network, and we show that it outperforms several strong BERT-based baselines.", "phrases": ["document level", "propaganda technique", "news article", "fine-grained analysis", "fallacy"], "overall_score": 7.533668317608977, "scores": [4.368898834806904, 1.7048674243996669, 2.624478213953486, 0.5471183975022348, 0.53824687232895], "rank_score": 1.956721948598248} -{"id": "tang-etal-2015-document", "title": "Document Modeling with Gated Recurrent Neural Network for Sentiment Classification", "abstract": "Document level sentiment classification remains a challenge: encoding the intrinsic relations between sentences in the semantic meaning of a document. To address this, we introduce a neural network model to learn vector-based document representation in a unified, bottom-up fashion. The model first learns sentence representation with convolutional neural network or long short-term memory. Afterwards, semantics of sentences and their relations are adaptively encoded in document representation with gated recurrent neural network. We conduct document level sentiment classification on four large-scale review datasets from IMDB and Yelp Dataset Challenge. Experimental results show that: (1) our neural model shows superior performances over several state-of-the-art algorithms; (2) gated recurrent neural network dramatically outperforms standard recurrent neural network in document modeling for sentiment classification. 1", "phrases": ["recurrent neural network", "sentiment classification", "document modeling", "language processing task"], "overall_score": 7.841045493089182, "scores": [4.525536729908478, 0.8070091345863563, 1.6201861265705746, 0.8739648476046815], "rank_score": 1.9566742096675227} -{"id": "schmidt-2014-research", "title": "The Research and Teaching Corpus of Spoken German \u2014 FOLK", "abstract": "FOLK is the \u201cForschungs- und Lehrkorpus Gesprochenes Deutsch (FOLK)\u201d (eng.: research and teaching corpus of spoken German). The project has set itself the aim of building a corpus of German conversations which a) covers a broad range of interaction types in private, institutional and public settings, b) is sufficiently large and diverse and of sufficient quality to support different qualitative and quantitative research approaches, c) is transcribed, annotated and made accessible according to current technological standards, and d) is available to the scientific community on a sound legal basis and without unnecessary restrictions of usage. This paper gives an overview of the corpus design, the strategies for acquisition of a diverse range of interaction data, and the corpus construction workflow from recording via transcription an annotation to dissemination.", "phrases": ["teaching corpus", "spoken german", "folk"], "overall_score": 2.7120965449264727, "scores": [2.0942234782918567, 1.905496577723307, 1.8693722976511065], "rank_score": 1.956364117888757} -{"id": "resnik-elkiss-2005-linguists", "title": "The Linguist's Search Engine: An Overview", "abstract": "The Linguist's Search Engine (LSE) was designed to provide an intuitive, easy-to-use interface that enables language researchers to seek linguistically interesting examples on the Web, based on syntactic and lexical criteria. We briefly describe its user interface and architecture, as well as recent developments that include LSE search capabilities for Chinese.", "phrases": ["linguist", "search engine", "lse"], "overall_score": 3.5042548532140727, "scores": [2.8858656606091966, 2.423409325431939, 0.5580106291265295], "rank_score": 1.9557618717225551} -{"id": "trischler-etal-2017-newsqa", "title": "NewsQA: A Machine Comprehension Dataset", "abstract": "We present NewsQA, a challenging machine comprehension dataset of over 100,000 human-generated question-answer pairs. Crowdworkers supply questions and answers based on a set of over 10,000 news articles from CNN, with answers consisting of spans of text in the articles. We collect this dataset through a four-stage process designed to solicit exploratory questions that require reasoning. Analysis confirms that NewsQA demands abilities beyond simple word matching and recognizing textual entailment. We measure human performance on the dataset and compare it to several strong neural models. The performance gap between humans and machines (13.3% F1) indicates that significant progress can be made on NewsQA through future research. The dataset is freely available online.", "phrases": ["machine comprehension dataset", "crowdworker", "cnn", "newsqa", "unanswerable question"], "overall_score": 7.6883767387535755, "scores": [5.574159372783581, 0.9746913716888912, 1.2805182023115986, 1.0543919011812046, 0.8933471127282545], "rank_score": 1.955421592138706} -{"id": "zhang-etal-2020-optimizing", "title": "Optimizing the Factual Correctness of a Summary: A Study of Summarizing Radiology Reports", "abstract": "Neural abstractive summarization models are able to generate summaries which have high overlap with human references. However, existing models are not optimized for factual correctness, a critical metric in real-world applications. In this work, we develop a general framework where we evaluate the factual correctness of a generated summary by fact-checking it automatically against its reference using an information extraction module. We further propose a training strategy which optimizes a neural summarization model with a factual correctness reward via reinforcement learning. We apply the proposed method to the summarization of radiology reports, where factual correctness is a key requirement. On two separate datasets collected from hospitals, we show via both automatic and human evaluation that the proposed approach substantially improves the factual correctness and overall quality of outputs over a competitive neural summarization system, producing radiology summaries that approach the quality of human-authored ones.", "phrases": ["factual correctness", "radiology report", "summarization model"], "overall_score": 5.944982975438836, "scores": [3.3578762029934985, 1.3442723151954044, 1.1558963118741004], "rank_score": 1.952681610021001} -{"id": "hieber-etal-2020-sockeye", "title": "Sockeye 2: A Toolkit for Neural Machine Translation", "abstract": "We present Sockeye 2, a modernized and streamlined version of the Sockeye neural machine translation (NMT) toolkit. New features include a simplified code base through the use of MXNet's Gluon API, a focus on state of the art model architectures, and distributed mixed precision training. These improvements result in faster training and inference, higher automatic metric scores, and a shorter path from research to production.", "phrases": ["neural machine translation", "sockeye", "versatile toolkit"], "overall_score": 4.679851831854588, "scores": [3.1932214518738498, 2.095342483097816, 0.5663854678767098], "rank_score": 1.9516498009494587} -{"id": "tsvetkov-etal-2016-learning", "title": "Learning the Curriculum with Bayesian Optimization for Task-Specific Word Representation Learning", "abstract": "We use Bayesian optimization to learn curricula for word representation learning, optimizing performance on downstream tasks that depend on the learned representations as features. The curricula are modeled by a linear ranking function which is the scalar product of a learned weight vector and an engineered feature vector that characterizes the different aspects of the complexity of each instance in the training corpus. We show that learning the curriculum improves performance on a variety of downstream tasks over random orders and in comparison to the natural corpus order.", "phrases": ["curriculum", "bayesian optimization", "word representation learning"], "overall_score": 3.7940314185760444, "scores": [2.6628044861039992, 2.5627500976976454, 0.6236847869133049], "rank_score": 1.9497464569049832} -{"id": "kiritchenko-mohammad-2018-examining", "title": "Examining Gender and Race Bias in Two Hundred Sentiment Analysis Systems", "abstract": "Automatic machine learning systems can inadvertently accentuate and perpetuate inappropriate human biases. Past work on examining inappropriate biases has largely focused on just individual systems. Further, there is no benchmark dataset for examining inappropriate biases in systems. Here for the first time, we present the Equity Evaluation Corpus (EEC), which consists of 8,640 English sentences carefully chosen to tease out biases towards certain races and genders. We use the dataset to examine 219 automatic sentiment analysis systems that took part in a recent shared task, SemEval-2018 Task 1 `Affect in Tweets'. We find that several of the systems show statistically significant bias; that is, they consistently provide slightly higher sentiment intensity predictions for one race or one gender. We make the EEC freely available.", "phrases": ["gender", "sentiment analysis system", "equity evaluation corpus", "english sentence"], "overall_score": 6.424851919015813, "scores": [3.838219805116869, 1.7399388437387227, 1.6806348999826004, 0.5387427737710164], "rank_score": 1.949384080652302} -{"id": "smith-eisner-2009-parser", "title": "Parser Adaptation and Projection with Quasi-Synchronous Grammar Features", "abstract": "We connect two scenarios in structured learning: adapting a parser trained on one corpus to another annotation style, and projecting syntactic annotations from one language to another. We propose quasi-synchronous grammar (QG) features for these structured learning tasks. That is, we score a aligned pair of source and target trees based on local features of the trees and the alignment. Our quasi-synchronous model assigns positive probability to any alignment of any trees, in contrast to a synchronous grammar, which would insist on some form of structural parallelism. \n \nIn monolingual dependency parser adaptation, we achieve high accuracy in translating among multiple annotation styles for the same sentence. On the more difficult problem of cross-lingual parser projection, we learn a dependency parser for a target language by using bilingual text, an English parser, and automatic word alignments. Our experiments show that unsupervised QG projection improves on parses trained using only high-precision projected annotations and far outperforms, by more than 35% absolute dependency accuracy, learning an unsupervised parser from raw target-language text alone. When a few target-language parse trees are available, projection gives a boost equivalent to doubling the number of target-language trees.", "phrases": ["projection", "quasi-synchronous grammar feature", "parser adaptation"], "overall_score": 5.934668806259498, "scores": [2.7590683662134676, 2.1431681193068854, 0.9456450075428571], "rank_score": 1.94929383102107} -{"id": "jamshid-lou-johnson-2017-disfluency", "title": "Disfluency Detection using a Noisy Channel Model and a Deep Neural Language Model", "abstract": "This paper presents a model for disfluency detection in spontaneous speech transcripts called LSTM Noisy Channel Model. The model uses a Noisy Channel Model (NCM) to generate n-best candidate disfluency analyses and a Long Short-Term Memory (LSTM) language model to score the underlying fluent sentences of each analysis. The LSTM language model scores, along with other features, are used in a MaxEnt reranker to identify the most plausible analysis. We show that using an LSTM language model in the reranking process of noisy channel disfluency model improves the state-of-the-art in disfluency detection.", "phrases": ["noisy channel model", "language model", "disfluency detection"], "overall_score": 3.7928308654655347, "scores": [2.7853039944947326, 2.528664258959795, 0.5334202305001688], "rank_score": 1.9491294946515654} -{"id": "lo-wu-2011-meant", "title": "MEANT: An inexpensive, high-accuracy, semi-automatic metric for evaluating translation utility based on semantic roles", "abstract": "We introduce a novel semi-automated metric, MEANT, that assesses translation utility by matching semantic role fillers, producing scores that correlate with human judgment as well as HTER but at much lower labor cost. As machine translation systems improve in lexical choice and fluency, the shortcomings of widespread n-gram based, fluency-oriented MT evaluation metrics such as BLEU, which fail to properly evaluate adequacy, become more apparent. But more accurate, non-automatic adequacy-oriented MT evaluation metrics like HTER are highly labor-intensive, which bottlenecks the evaluation cycle. We first show that when using untrained monolingual readers to annotate semantic roles in MT output, the non-automatic version of the metric HMEANT achieves a 0.43 correlation coefficient with human adequacy judgments at the sentence level, far superior to BLEU at only 0.20, and equal to the far more expensive HTER. We then replace the human semantic role annotators with automatic shallow semantic parsing to further automate the evaluation metric, and show that even the semi-automated evaluation metric achieves a 0.34 correlation coefficient with human adequacy judgment, which is still about 80% as closely correlated as HTER despite an even lower labor cost for the evaluation procedure. The results show that our proposed metric is significantly better correlated with human judgment on adequacy than current widespread automatic evaluation metrics, while being much more cost effective than HTER.", "phrases": ["translation utility", "semantic role", "evaluation metric", "meant", "n-gram matching"], "overall_score": 5.512744290978582, "scores": [4.30374771326782, 2.332027617057337, 1.9542139081835066, 0.5830150432782546, 0.5557800022200385], "rank_score": 1.9457568568013914} -{"id": "mi-etal-2016-coverage", "title": "Coverage Embedding Models for Neural Machine Translation", "abstract": "In this paper, we enhance the attention-based neural machine translation (NMT) by adding explicit coverage embedding models to alleviate issues of repeating and dropping translations in NMT. For each source word, our model starts with a full coverage embedding vector to track the coverage status, and then keeps updating it with neural networks as the translation goes. Experiments on the large-scale Chinese-to-English task show that our enhanced model improves the translation quality significantly on various test sets over the strong large vocabulary NMT system.", "phrases": ["neural machine translation", "source word", "coverage"], "overall_score": 5.828622447448201, "scores": [2.4809861146080756, 2.4395455039902307, 0.9163942965470495], "rank_score": 1.9456419717151185} -{"id": "bunt-etal-2016-dialogbank", "title": "The DialogBank", "abstract": "This paper presents the DialogBank, a new language resource consisting of dialogues with gold standard annotations according to the ISO 24617-2 standard. Some of these dialogues have been taken from existing corpora and have been re-annotated according to the ISO standard; others have been annotated directly according to the standard. The ISO 24617-2 annotations have been designed according to the ISO principles for semantic annotation, as formulated in ISO 24617-6. The DialogBank makes use of three alternative representation formats, which are shown to be interoperable.", "phrases": ["dialogbank", "language resource", "standard"], "overall_score": 4.273952967643554, "scores": [3.862602980296407, 1.0781897994775045, 0.8946866860871052], "rank_score": 1.9451598219536723} -{"id": "ganapathibhotla-liu-2008-mining", "title": "Mining Opinions in Comparative Sentences", "abstract": "This paper studies sentiment analysis from the user-generated content on the Web. In particular, it focuses on mining opinions from comparative sentences, i.e., to determine which entities in a comparison are preferred by its author. A typical comparative sentence compares two or more entities. For example, the sentence, \"the picture quality of Camera X is better than that of Camera Y\", compares two entities \"Camera X\" and \"Camera Y\" with regard to their picture quality. Clearly, \"Camera X\" is the preferred entity. Existing research has studied the problem of extracting some key elements in a comparative sentence. However, there is still no study of mining opinions from comparative sentences, i.e., identifying preferred entities of the author. This paper studies this problem, and proposes a technique to solve the problem. Our experiments using comparative sentences from product reviews and forum posts show that the approach is effective.", "phrases": ["comparative sentence", "web", "opinion mining"], "overall_score": 5.3895273365117715, "scores": [4.271069724889641, 1.0068785250748624, 0.5536350208762738], "rank_score": 1.943861090280259} -{"id": "alm-etal-2005-emotions", "title": "Emotions from Text: Machine Learning for Text-based Emotion Prediction", "abstract": "In addition to information, text contains attitudinal, and more specifically, emotional content. This paper explores the text-based emotion prediction problem empirically, using supervised machine learning with the SNoW learning architecture. The goal is to classify the emotional affinity of sentences in the narrative domain of children's fairy tales, for subsequent usage in appropriate expressive rendering of text-to-speech synthesis. Initial experiments on a preliminary data set of 22 fairy tales show encouraging results over a naive baseline and BOW approach for classification of emotional versus non-emotional contents, with some dependency on parameter tuning. We also discuss results for a tripartite model which covers emotional valence, as well as feature set alternations. In addition, we present plans for a more cognitively sound sequential model, taking into consideration a larger set of basic emotions.", "phrases": ["machine learning", "tale", "emotion", "text instance", "decade"], "overall_score": 7.352692822859088, "scores": [5.174970948889325, 1.9703633636802675, 0.8882833591065151, 0.850047468235915, 0.8313511966845823], "rank_score": 1.9430032673193214} -{"id": "tu-etal-2018-learning", "title": "Learning to Remember Translation History with a Continuous Cache", "abstract": "Existing neural machine translation (NMT) models generally translate sentences in isolation, missing the opportunity to take advantage of document-level information. In this work, we propose to augment NMT models with a very light-weight cache-like memory network, which stores recent hidden representations as translation history. The probability distribution over generated words is updated online depending on the translation history retrieved from the memory, endowing NMT models with the capability to dynamically adapt over time. Experiments on multiple domains with different topics and styles show the effectiveness of the proposed approach with negligible impact on the computational cost.", "phrases": ["translation history", "cache", "neural machine translation", "memory network", "hidden representation"], "overall_score": 7.117741594001863, "scores": [3.9644309158892335, 1.798844221958504, 1.6051994361578308, 1.2425388536760416, 1.1032240842234655], "rank_score": 1.942847502381015} -{"id": "goldberg-nivre-2012-dynamic", "title": "A Dynamic Oracle for Arc-Eager Dependency Parsing", "abstract": "The standard training regime for transition-based dependency parsers makes use of an oracle, which predicts an optimal transition sequence for a sentence and its gold tree. We present an improved oracle for the arc-eager transition system, which provides a set of optimal transitions for every valid parser configuration, including configurations from which the gold tree is not reachable. In such cases, the oracle provides transitions that will lead to the best reachable tree from the given configuration. The oracle is efficient to implement and provably correct. We use the oracle to train a deterministic left-to-right dependency parser that is less sensitive to error propagation, using an online training procedure that also explores parser configurations resulting from non-optimal sequences of transitions. This new parser outperforms greedy parsers trained using conventional oracles on a range of data sets, with an average improvement of over 1.2 LAS points and up to almost 3 LAS points on some data sets.", "phrases": ["oracle", "dependency parsing", "transition sequence"], "overall_score": 6.253194848072265, "scores": [4.39956708182104, 0.8383747074302248, 0.5900508283219602], "rank_score": 1.9426642058577415} -{"id": "kudo-2018-subword", "title": "Subword Regularization: Improving Neural Network Translation Models with Multiple Subword Candidates", "abstract": "Subword units are an effective way to alleviate the open vocabulary problems in neural machine translation (NMT). While sentences are usually converted into unique subword sequences, subword segmentation is potentially ambiguous and multiple segmentations are possible even with the same vocabulary. The question addressed in this paper is whether it is possible to harness the segmentation ambiguity as a noise to improve the robustness of NMT. We present a simple regularization method, subword regularization, which trains the model with multiple subword segmentations probabilistically sampled during training. In addition, for better subword sampling, we propose a new subword segmentation algorithm based on a unigram language model. We experiment with multiple corpora and report consistent improvements especially on low resource and out-of-domain settings.", "phrases": ["consistent improvement", "subword regularization", "tokenization", "translation performance"], "overall_score": 7.882184995938527, "scores": [5.403637432643162, 1.2378438488367196, 0.6002700833079294, 0.5231008697744572], "rank_score": 1.9412130586405671} -{"id": "bott-etal-2012-spanish", "title": "Can Spanish Be Simpler? LexSiS: Lexical Simplification for Spanish", "abstract": "Lexical simplification is the task of replacing a word in a given context by an easier-to-understand synonym. Although a number of lexical simplification approaches have been developed in recent years, most of them have been applied to English, with recent work taking advantage of parallel monolingual datasets for training. Here we present LexSiS, a lexical simplification system for Spanish that does not require a parallel corpus, but instead relies on freely available resources, such as an on-line dictionary and the Web as a corpus. LexSiS uses three techniques for finding a suitable word substitute: a word vector model, word frequency, and word length. In experiments with human informants, we have verified that LexSiS performs better than a hard-to-beat baseline based on synonym frequency.", "phrases": ["spanish", "lexical simplification", "frequency"], "overall_score": 5.708570608014387, "scores": [2.8653404831456415, 2.4282768689042795, 0.5226729311643661], "rank_score": 1.9387634277380956} -{"id": "daume-iii-marcu-2006-bayesian", "title": "Bayesian Query-Focused Summarization", "abstract": "We present BAYESUM (for \"Bayesian summarization\"), a model for sentence extraction in query-focused summarization. BAYESUM leverages the common case in which multiple documents are relevant to a single query. Using these documents as reinforcement for query terms, BAYESUM is not afflicted by the paucity of information in short queries. We show that approximate inference in BAYESUM is possible on large data sets and results in a state-of-the-art summarization system. Furthermore, we show how BAYESUM can be understood as a justified query expansion technique in the language modeling for IR framework.", "phrases": ["summarization", "bayesum", "query"], "overall_score": 5.701590177572185, "scores": [3.757280008686704, 1.182803726635925, 0.8690943980136078], "rank_score": 1.936392711112079} -{"id": "sennrich-haddow-2016-linguistic", "title": "Linguistic Input Features Improve Neural Machine Translation", "abstract": "Neural machine translation has recently achieved impressive results, while using little in the way of external linguistic information. In this paper we show that the strong learning capability of neural MT models does not make linguistic features redundant; they can be easily incorporated to provide further improvements in performance. We generalize the embedding layer of the encoder in the attentional encoder--decoder architecture to support the inclusion of arbitrary features, in addition to the baseline word feature. We add morphological features, part-of-speech tags, and syntactic dependency labels as input features to English German, and English->Romanian neural machine translation systems. In experiments on WMT16 training and test sets, we find that linguistic input features improve model quality according to three metrics: perplexity, BLEU and CHRF3. An open-source implementation of our neural MT system is available, as are sample files and configurations.", "phrases": ["input feature", "neural machine translation", "part-of-speech tag", "dependency label", "translation quality"], "overall_score": 7.682850361541469, "scores": [3.724960622112228, 2.3654347574434875, 1.4476352247494142, 1.2653396820079388, 0.8720522378053311], "rank_score": 1.93508450482368} -{"id": "sirts-goldwater-2013-minimally", "title": "Minimally-Supervised Morphological Segmentation using Adaptor Grammars", "abstract": "This paper explores the use of Adaptor Grammars, a nonparametric Bayesian modelling framework, for minimally supervised morphological segmentation. We compare three training methods: unsupervised training, semi-supervised training, and a novel model selection method. In the model selection method, we train unsupervised Adaptor Grammars using an over-articulated metagrammar, then use a small labelled data set to select which potential morph boundaries identified by the metagrammar should be returned in the final output. We evaluate on five languages and show that semi-supervised training provides a boost over unsupervised training, while the model selection method yields the best average results over all languages and is competitive with state-of-the-art semi-supervised systems. Moreover, this method provides the potential to tune performance according to different evaluation metrics or downstream tasks.", "phrases": ["morphological segmentation", "adaptor grammars", "nonparametric bayesian model"], "overall_score": 5.363784482517873, "scores": [3.7887472543539684, 1.478027568136596, 0.5369541325037179], "rank_score": 1.9345763183314275} -{"id": "haghighi-klein-2010-coreference", "title": "Coreference Resolution in a Modular, Entity-Centered Model", "abstract": "Coreference resolution is governed by syntactic, semantic, and discourse constraints. We present a generative, model-based approach in which each of these factors is modularly encapsulated and learned in a primarily unsu-pervised manner. Our semantic representation first hypothesizes an underlying set of latent entity types, which generate specific entities that in turn render individual mentions. By sharing lexical statistics at the level of abstract entity types, our model is able to substantially reduce semantic compatibility errors, resulting in the best results to date on the complete end-to-end coreference task.", "phrases": ["entity-centered model", "mention", "coreference resolution"], "overall_score": 6.929851723788916, "scores": [3.206095971951807, 0.8913587969058814, 1.70398106234892], "rank_score": 1.9338119437355363} -{"id": "nedoluzhko-etal-2016-coreference", "title": "Coreference in Prague Czech-English Dependency Treebank", "abstract": "We present coreference annotation on parallel Czech-English texts of the Prague Czech-English Dependency Treebank (PCEDT). The paper describes innovations made to PCEDT 2.0 concerning coreference, as well as coreference information already present there. We characterize the coreference annotation scheme, give the statistics and compare our annotation with the coreference annotation in Ontonotes and Prague Dependency Treebank for Czech. We also present the experiments made using this corpus to improve the alignment of coreferential expressions, which helps us to collect better statistics of correspondences between types of coreferential relations in Czech and English. The corpus released as PCEDT 2.0 Coref is publicly available.", "phrases": ["prague", "czech-english dependency treebank", "coreference"], "overall_score": 2.6774489110671027, "scores": [2.577082666985729, 2.295911217905465, 0.9211195144539025], "rank_score": 1.9313711331150323} -{"id": "yang-etal-2008-entity", "title": "An Entity-Mention Model for Coreference Resolution with Inductive Logic Programming", "abstract": "The traditional mention-pair model for coreference resolution cannot capture information beyond mention pairs for both learning and testing. To deal with this problem, we present an expressive entity-mention model that performs coreference resolution at an entity level. The model adopts the Inductive Logic Programming (ILP) algorithm, which provides a relational way to organize different knowledge of entities and mentions. The solution can explicitly express relations between an entity and the contained mentions, and automatically learn first-order rules important for coreference decision. The evaluation on the ACE data set shows that the ILP based entity-mention model is effective for the coreference resolution task.", "phrases": ["entity-mention model", "coreference resolution", "inductive logic programming"], "overall_score": 3.1074330604463776, "scores": [2.0525118890023797, 1.8708404129517582, 1.8689178498283356], "rank_score": 1.9307567172608244} -{"id": "bohnet-nivre-2012-transition", "title": "A Transition-Based System for Joint Part-of-Speech Tagging and Labeled Non-Projective Dependency Parsing", "abstract": "Most current dependency parsers presuppose that input words have been morphologically disambiguated using a part-of-speech tagger before parsing begins. We present a transition-based system for joint part-of-speech tagging and labeled dependency parsing with non-projective trees. Experimental evaluation on Chinese, Czech, English and German shows consistent improvements in both tagging and parsing accuracy when compared to a pipeline system, which lead to improved state-of-the-art results for all languages.", "phrases": ["transition-based system", "joint part-of-speech tagging", "dependency parsing"], "overall_score": 7.16543383658755, "scores": [1.9571429357213366, 1.730106434950996, 2.101328613984909], "rank_score": 1.929525994885747} -{"id": "bosselut-etal-2019-comet", "title": "COMET: Commonsense Transformers for Automatic Knowledge Graph Construction", "abstract": "We present the first comprehensive study on automatic knowledge base construction for two prevalent commonsense knowledge graphs: ATOMIC (Sap et al., 2019) and ConceptNet (Speer et al., 2017). Contrary to many conventional KBs that store knowledge with canonical templates, commonsense KBs only store loosely structured open-text descriptions of knowledge. We posit that an important step toward automatic commonsense completion is the development of generative models of commonsense knowledge, and propose COMmonsEnse Transformers (COMET) that learn to generate rich and diverse commonsense descriptions in natural language. Despite the challenges of commonsense modeling, our investigation reveals promising results when implicit knowledge from deep pre-trained language models is transferred to generate explicit knowledge in commonsense knowledge graphs. Empirical results demonstrate that COMET is able to generate novel knowledge that humans rate as high quality, with up to 77.5% (ATOMIC) and 91.7% (ConceptNet) precision at top 1, which approaches human performance for these resources. Our findings suggest that using generative commonsense models for automatic commonsense KB completion could soon be a plausible alternative to extractive methods.", "phrases": ["commonsense transformer", "language model", "comet", "knowledge model", "reasoning"], "overall_score": 7.982822683547105, "scores": [4.767324416266641, 0.8506580052050382, 2.2184019738361163, 1.2404652111345842, 0.5569460708946102], "rank_score": 1.9267591354673983} -{"id": "yates-etal-2017-depression", "title": "Depression and Self-Harm Risk Assessment in Online Forums", "abstract": "Users suffering from mental health conditions often turn to online resources for support, including specialized online support communities or general communities such as Twitter and Reddit. In this work, we present a framework for supporting and studying users in both types of communities. We propose methods for identifying posts in support communities that may indicate a risk of self-harm, and demonstrate that our approach outperforms strong previously proposed methods for identifying such posts. Self-harm is closely related to depression, which makes identifying depressed users on general forums a crucial related task. We introduce a large-scale general forum dataset consisting of users with self-reported depression diagnoses matched with control users. We show how our method can be applied to effectively identify depressed users from their use of language alone. We demonstrate that our method outperforms strong baselines on this general forum dataset.", "phrases": ["self-harm", "mental health condition", "depression"], "overall_score": 6.201716896425421, "scores": [3.7790171446530088, 1.1589710412852037, 0.8420268570583804], "rank_score": 1.9266716809988642} -{"id": "ding-etal-2014-using", "title": "Using Structured Events to Predict Stock Price Movement: An Empirical Investigation", "abstract": "It has been shown that news events influence the trends of stock price movements. However, previous work on news-driven stock market prediction rely on shallow features (such as bags-of-words, named entities and noun phrases), which do not capture structured entity-relation information, and hence cannot represent complete and exact events. Recent advances in Open Information Extraction (Open IE) techniques enable the extraction of structured events from web-scale data. We propose to adapt Open IE technology for event-based stock price movement prediction, extracting structured events from large-scale public news without manual efforts. Both linear and nonlinear models are employed to empirically investigate the hidden and complex relationships between events and the stock market. Largescale experiments show that the accuracy of S&P 500 index prediction is 60%, and that of individual stock prediction can be over 70%. Our event-based system outperforms bags-of-words-based baselines, and previously reported systems trained on S&P 500 stock historical data.", "phrases": ["structured event", "stock price movement", "news"], "overall_score": 4.435680991599307, "scores": [2.62043240798053, 1.9358429531715868, 1.2228999732520534], "rank_score": 1.9263917781347233} -{"id": "lin-etal-2016-neural", "title": "Neural Relation Extraction with Selective Attention over Instances", "abstract": "Distant supervised relation extraction has been widely used to \ufb01nd novel relational facts from text. However, distant supervision inevitably accompanies with the wrong labelling problem, and these noisy data will substantially hurt the performance of relation extraction. To alleviate this issue, we propose a sentence-level attention-based model for relation extraction. In this model, we employ convolutional neural networks to embed the semantics of sentences. Afterwards, we build sentence-level attention over multiple instances, which is expected to dynamically reduce the weights of those noisy instances. Experimental results on real-world datasets show that, our model can make full use of all informative sentences and effectively reduce the in\ufb02uence of wrong labelled instances. Our model achieves signi\ufb01cant and consistent improvements on relation extraction as compared with baselines. The source code of this paper can be obtained from https: //github.com/thunlp/NRE .", "phrases": ["selective attention", "nre", "neural relation extraction", "bag", "entity pair"], "overall_score": 8.417055225643933, "scores": [2.6807226170245486, 2.2593451708314265, 1.8498641535623317, 1.7951047269185556, 1.0466782643537795], "rank_score": 1.9263429865381283} -{"id": "liu-etal-2017-exploiting", "title": "Exploiting Argument Information to Improve Event Detection via Supervised Attention Mechanisms", "abstract": "This paper tackles the task of event detection (ED), which involves identifying and categorizing events. We argue that arguments provide significant clues to this task, but they are either completely ignored or exploited in an indirect manner in existing detection approaches. In this work, we propose to exploit argument information explicitly for ED via supervised attention mechanisms. In specific, we systematically investigate the proposed model under the supervision of different attention strategies. Experimental results show that our approach advances state-of-the-arts and achieves the best F1 score on ACE 2005 dataset.", "phrases": ["argument information", "event detection", "supervised attention mechanism"], "overall_score": 5.337812940978899, "scores": [2.718975131839909, 2.458933911249341, 0.5977181512681424], "rank_score": 1.9252090647857976} -{"id": "schnabel-etal-2015-evaluation", "title": "Evaluation methods for unsupervised word embeddings", "abstract": "We present a comprehensive study of evaluation methods for unsupervised embedding techniques that obtain meaningful representations of words from text. Different evaluations result in different orderings of embedding methods, calling into question the common assumption that there is one single optimal vector representation. We present new evaluation techniques that directly compare embeddings with respect to specific queries. These methods reduce bias, provide greater insight, and allow us to solicit data-driven relevance judgments rapidly and accurately through crowdsourcing.", "phrases": ["word embedding", "evaluation method", "extrinsic task", "analogy"], "overall_score": 7.567043608137936, "scores": [3.5193709892783542, 3.1065892602055185, 0.5498243510846381, 0.5224648285597098], "rank_score": 1.9245623572820552} -{"id": "stab-gurevych-2017-parsing", "title": "Parsing Argumentation Structures in Persuasive Essays", "abstract": "In this article, we present a novel approach for parsing argumentation structures. We identify argument components using sequence labeling at the token level and apply a new joint model for detecting argumentation structures. The proposed model globally optimizes argument component types and argumentative relations using Integer Linear Programming. We show that our model significantly outperforms challenging heuristic baselines on two different types of discourse. Moreover, we introduce a novel corpus of persuasive essays annotated with argumentation structures. We show that our annotation scheme and annotation guidelines successfully guide human annotators to substantial agreement.", "phrases": ["persuasive essay", "discourse", "writing support system"], "overall_score": 7.998869166058651, "scores": [4.3482430261172444, 0.9017747624744267, 0.5199466507045574], "rank_score": 1.9233214797654092} -{"id": "huang-2008-forest", "title": "Forest Reranking: Discriminative Parsing with Non-Local Features", "abstract": "Conventional n-best reranking techniques often suffer from the limited scope of the nbest list, which rules out many potentially good alternatives. We instead propose forest reranking, a method that reranks a packed forest of exponentially many parses. Since exact inference is intractable with non-local features, we present an approximate algorithm inspired by forest rescoring that makes discriminative training practical over the whole Treebank. Our final result, an F-score of 91.7, outperforms both 50-best and 100-best reranking baselines, and is better than any previously reported systems trained on the Treebank.", "phrases": ["non-local feature", "list", "packed forest", "forest reranking"], "overall_score": 6.9929140161088155, "scores": [2.8063040253348284, 2.5537070884878803, 1.7925449465039056, 0.5370641537934141], "rank_score": 1.9224050535300072} -{"id": "talmor-etal-2020-olmpics", "title": "oLMpics-On What Language Model Pre-training Captures", "abstract": "Recent success of pre-trained language models (LMs) has spurred widespread interest in the language capabilities that they possess. However, efforts to understand whether LM representations are useful for symbolic reasoning tasks have been limited and scattered. In this work, we propose eight reasoning tasks, which conceptually require operations such as comparison, conjunction, and composition. A fundamental challenge is to understand whether the performance of a LM on a task should be attributed to the pre-trained representations or to the process of fine-tuning on the task data. To address this, we propose an evaluation protocol that includes both zero-shot evaluation (no fine-tuning), as well as comparing the learning curve of a fine-tuned LM to the learning curve of multiple controls, which paints a rich picture of the LM capabilities. Our main findings are that: (a) different LMs exhibit qualitatively different reasoning abilities, e.g., RoBERTa succeeds in reasoning tasks where BERT fails completely; (b) LMs do not reason in an abstract manner and are context-dependent, e.g., while RoBERTa can compare ages, it can do so only when the ages are in the typical range of human ages; (c) On half of our reasoning tasks all models fail completely. Our findings and infrastructure can help future work on designing new datasets, models, and objective functions for pre-training.", "phrases": ["language model", "pre-trained representation", "high performance"], "overall_score": 6.77426147766939, "scores": [4.050809478070719, 1.1815888238778307, 0.5307062618482196], "rank_score": 1.921034854598923} -{"id": "song-etal-2018-graph", "title": "A Graph-to-Sequence Model for AMR-to-Text Generation", "abstract": "The problem of AMR-to-text generation is to recover a text representing the same meaning as an input AMR graph. The current state-of-the-art method uses a sequence-to-sequence model, leveraging LSTM for encoding a linearized AMR structure. Although being able to model non-local semantic information, a sequence LSTM can lose information from the AMR graph structure, and thus facing challenges with large-graphs, which result in long sequences. We introduce a neural graph-to-sequence model, using a novel LSTM structure for directly encoding graph-level semantics. On a standard benchmark, our model shows superior results to existing methods in the literature.", "phrases": ["graph-to-sequence model", "amr-to-text generation", "amr structure", "sequential encoder"], "overall_score": 6.929755276927491, "scores": [3.393330236517714, 3.2107250229320177, 0.5515121842607393, 0.5208798692143798], "rank_score": 1.9191118282312125} -{"id": "mccoy-etal-2020-berts", "title": "BERTs of a feather do not generalize together: Large variability in generalization across models with similar test set performance", "abstract": "If the same neural network architecture is trained multiple times on the same dataset, will it make similar linguistic generalizations across runs? To study this question, we fine-tuned 100 instances of BERT on the Multi-genre Natural Language Inference (MNLI) dataset and evaluated them on the HANS dataset, which evaluates syntactic generalization in natural language inference. On the MNLI development set, the behavior of all instances was remarkably consistent, with accuracy ranging between 83.6% and 84.8%. In stark contrast, the same models varied widely in their generalization performance. For example, on the simple case of subject-object swap (e.g., determining that \u201cthe doctor visited the lawyer\u201d does not entail \u201cthe lawyer visited the doctor\u201d), accuracy ranged from 0.0% to 66.2%. Such variation is likely due to the presence of many local minima in the loss surface that are equally attractive to a low-bias learner such as a neural network; decreasing the variability may therefore require models with stronger inductive biases.", "phrases": ["generalization", "variability", "bert"], "overall_score": 4.4189028997331965, "scores": [2.7624298756618906, 2.194480430170976, 0.8004051304283492], "rank_score": 1.9191051454204056} -{"id": "khayrallah-koehn-2018-impact", "title": "On the Impact of Various Types of Noise on Neural Machine Translation", "abstract": "We examine how various types of noise in the parallel training data impact the quality of neural machine translation systems. We create five types of artificial noise and analyze how they degrade performance in neural and statistical machine translation. We find that neural models are generally more harmed by noise than statistical models. For one especially egregious type of noise they learn to just copy the input sentence.", "phrases": ["various type", "noise", "neural machine translation", "wrong language", "translation model"], "overall_score": 7.216197116063683, "scores": [4.93302955427682, 2.4114668053363215, 0.8421667274545531, 0.8351180714691493, 0.5711637713994661], "rank_score": 1.9185889859872618} -{"id": "sennrich-etal-2017-nematus", "title": "Nematus: a Toolkit for Neural Machine Translation", "abstract": "We present Nematus, a toolkit for Neural Machine Translation. The toolkit prioritizes high translation accuracy, usability, and extensibility. Nematus has been used to build top-performing submissions to shared translation tasks at WMT and IWSLT, and has been used to train systems for production environments.", "phrases": ["toolkit", "neural machine translation", "nematus"], "overall_score": 4.215009559949087, "scores": [2.7593101550441252, 2.112371572867053, 0.8833188351979963], "rank_score": 1.9183335210363914} -{"id": "zeng-etal-2018-extracting", "title": "Extracting Relational Facts by an End-to-End Neural Model with Copy Mechanism", "abstract": "The relational facts in sentences are often complicated. Different relational triplets may have overlaps in a sentence. We divided the sentences into three types according to triplet overlap degree, including Normal, EntityPairOverlap and SingleEntiyOverlap. Existing methods mainly focus on Normal class and fail to extract relational triplets precisely. In this paper, we propose an end-to-end model based on sequence-to-sequence learning with copy mechanism, which can jointly extract relational facts from sentences of any of these classes. We adopt two different strategies in decoding process: employing only one united decoder or applying multiple separated decoders. We test our models in two public datasets and our model outperform the baseline method significantly.", "phrases": ["relational fact", "copy mechanism", "sequence-to-sequence model", "extraction"], "overall_score": 6.092385411765077, "scores": [3.916613330829164, 1.7720104531541834, 1.4504228449162997, 0.5290241301133418], "rank_score": 1.9170176897532474} -{"id": "krahmer-van-deemter-2012-computational", "title": "Computational Generation of Referring Expressions: A Survey", "abstract": "This article offers a survey of computational research on referring expression generation (REG). It introduces the REG problem and describes early work in this area, discussing what basic assumptions lie behind it, and showing how its remit has widened in recent years. We discuss computational frameworks underlying REG, and demonstrate a recent trend that seeks to link REG algorithms with well-established Knowledge Representation techniques. Considerable attention is given to recent efforts at evaluating REG algorithms and the lessons that they allow us to learn. The article concludes with a discussion of the way forward in REG, focusing on references in larger and more realistic settings.", "phrases": ["referring expression", "survey", "expression generation", "reg", "object"], "overall_score": 7.4970007922911455, "scores": [2.713731465304528, 0.8957102979753119, 2.382125598397631, 2.115321942011937, 1.4751105744920965], "rank_score": 1.9163999756363004} -{"id": "lison-tiedemann-2016-opensubtitles2016", "title": "OpenSubtitles2016: Extracting Large Parallel Corpora from Movie and TV Subtitles", "abstract": "We present a new major release of the OpenSubtitles collection of parallel corpora. The release is compiled from a large database of movie and TV subtitles and includes a total of 1689 bitexts spanning 2.6 billion sentences across 60 languages. The release also incorporates a number of enhancements in the preprocessing and alignment of the subtitles, such as the automatic correction of OCR errors and the use of meta-data to estimate the quality of each subtitle and score subtitle pairs.", "phrases": ["parallel corpora", "movie", "opensubtitles"], "overall_score": 5.637512773973147, "scores": [2.275935350968456, 1.9404702011468433, 1.5274860488263906], "rank_score": 1.91463053364723} -{"id": "du-etal-2021-self", "title": "Self-training Improves Pre-training for Natural Language Understanding", "abstract": "Unsupervised pre-training has led to much recent progress in natural language understanding. In this paper, we study self-training as another way to leverage unlabeled data through semi-supervised learning. To obtain additional data for a specific task, we introduce SentAugment, a data augmentation method which computes task-specific query embeddings from labeled data to retrieve sentences from a bank of billions of unlabeled sentences crawled from the web. Unlike previous semi-supervised methods, our approach does not require in-domain unlabeled data and is therefore more generally applicable. Experiments show that self-training is complementary to strong RoBERTa baselines on a variety of tasks. Our augmentation approach leads to scalable and effective self-training with improvements of up to 2.6% on standard text classification benchmarks. Finally, we also show strong gains on knowledge-distillation and few-shot learning.", "phrases": ["natural language understanding", "semi-supervised learning", "self-training"], "overall_score": 5.420612749652804, "scores": [3.2329844545424886, 1.9446513830459162, 0.5620795576796713], "rank_score": 1.9132384650893586} -{"id": "yu-hatzivassiloglou-2003-towards", "title": "Towards Answering Opinion Questions: Separating Facts from Opinions and Identifying the Polarity of Opinion Sentences", "abstract": "Opinion question answering is a challenging task for natural language processing. In this paper, we discuss a necessary component for an opinion question answering system: separating opinions from fact, at both the document and sentence level. We present a Bayesian classifier for discriminating between documents with a preponderance of opinions such as editorials from regular news stories, and describe three unsupervised, statistical techniques for the significantly harder task of detecting opinions at the sentence level. We also present a first model for classifying opinion sentences as positive or negative in terms of the main perspective being expressed in the opinion. Results from a large collection of news stories and a human evaluation of 400 sentences are reported, indicating that we achieve very high performance in document classification (upwards of 97% precision and recall), and respectable performance in detecting opinions and classifying them at the sentence level as positive, negative, or neutral (up to 91% accuracy).", "phrases": ["opinion question", "polarity", "sentence level", "subjectivity classification"], "overall_score": 7.884335859396707, "scores": [3.1496461989978357, 1.9981742437925416, 1.4077150178610556, 1.0859272336248187], "rank_score": 1.9103656735690628} -{"id": "wang-etal-2021-k", "title": "K-Adapter: Infusing Knowledge into Pre-Trained Models with Adapters", "abstract": "We study the problem of injecting knowledge into large pre-trained models like BERT and RoBERTa. Existing methods typically update the original parameters of pre-trained models when injecting knowledge. However, when multiple kinds of knowledge are injected, they may suffer from catastrophic forgetting. To address this, we propose K-Adapter, which remains the original parameters of the pre-trained model fixed and supports continual knowledge infusion. Taking RoBERTa as the pre-trained model, K-Adapter has a neural adapter for each kind of infused knowledge, like a plug-in connected to RoBERTa. There is no information flow between different adapters, thus different adapters are efficiently trained in a distributed way. We inject two kinds of knowledge, including factual knowledge obtained from automatically aligned text-triplets on Wikipedia and Wikidata, and linguistic knowledge obtained from dependency parsing. Results on three knowledge-driven tasks (total six datasets) including relation classification, entity typing and question answering demonstrate that each adapter improves the performance, and the combination of both adapters brings further improvements. Probing experiments further indicate that K-Adapter captures richer factual and commonsense knowledge than RoBERTa.", "phrases": ["neural adapter", "linguistic knowledge", "k-adapter", "plm", "limitation"], "overall_score": 6.844680068602785, "scores": [4.788651861940865, 1.52739720409946, 1.4935254419203614, 1.1813364731315055, 0.5593107229846441], "rank_score": 1.9100443408153673} -{"id": "mallinson-etal-2017-paraphrasing", "title": "Paraphrasing Revisited with Neural Machine Translation", "abstract": "Recognizing and generating paraphrases is an important component in many natural language processing applications. A well-established technique for automatically extracting paraphrases leverages bilingual corpora to find meaning-equivalent phrases in a single language by \u201cpivoting\u201d over a shared translation in another language. In this paper we revisit bilingual pivoting in the context of neural machine translation and present a paraphrasing model based purely on neural networks. Our model represents paraphrases in a continuous space, estimates the degree of semantic relatedness between text segments of arbitrary length, and generates candidate paraphrases for any source input. Experimental results across tasks and datasets show that neural paraphrases outperform those obtained with conventional phrase-based pivoting approaches.", "phrases": ["neural machine translation", "bilingual pivoting", "paraphrasing", "back-translation", "sentence similarity score"], "overall_score": 7.224575199736949, "scores": [3.879129945030867, 2.6081622231548023, 1.2490518274727005, 1.2412297932363907, 0.5681624170580212], "rank_score": 1.9091472411905563} -{"id": "resnik-etal-2013-using", "title": "Using Topic Modeling to Improve Prediction of Neuroticism and Depression in College Students", "abstract": "We investigate the value-add of topic modeling in text analysis for depression, and for neuroticism as a strongly associated personality measure. Using Pennebaker\u2019s Linguistic Inquiry and Word Count (LIWC) lexicon to provide baseline features, we show that straightforward topic modeling using Latent Dirichlet Allocation (LDA) yields interpretable, psychologically relevant \u201cthemes\u201d that add value in prediction of clinical assessments.", "phrases": ["topic modeling", "neuroticism", "depression"], "overall_score": 4.391563247902637, "scores": [2.7787908162731965, 2.1038000816964426, 0.8391041585100734], "rank_score": 1.9072316854932374} -{"id": "zhu-etal-2021-mediasum", "title": "MediaSum: A Large-scale Media Interview Dataset for Dialogue Summarization", "abstract": "This paper introduces MediaSum, a large-scale media interview dataset consisting of 463.6K transcripts with abstractive summaries. To create this dataset, we collect interview transcripts from NPR and CNN and employ the overview and topic descriptions as summaries. Compared with existing public corpora for dialogue summarization, our dataset is an order of magnitude larger and contains complex multi-party conversations from multiple domains. We conduct statistical analysis to demonstrate the unique positional bias exhibited in the transcripts of televised and radioed interviews. We also show that MediaSum can be used in transfer learning to improve a model's performance on other dialogue summarization tasks.", "phrases": ["dialogue summarization", "abstractive summary", "mediasum", "medium interview dataset"], "overall_score": 4.1906052470244655, "scores": [2.610064104416238, 2.171011645503561, 1.96685161756504, 0.8809791908149943], "rank_score": 1.9072266395749582} -{"id": "ettinger-2020-bert", "title": "What BERT Is Not: Lessons from a New Suite of Psycholinguistic Diagnostics for Language Models", "abstract": "Pre-training by language modeling has become a popular and successful approach to NLP tasks, but we have yet to understand exactly what linguistic capacities these pre-training processes confer upon models. In this paper we introduce a suite of diagnostics drawn from human language experiments, which allow us to ask targeted questions about information used by language models for generating predictions in context. As a case study, we apply these diagnostics to the popular BERT model, finding that it can generally distinguish good from bad completions involving shared category or role reversal, albeit with less sensitivity than humans, and it robustly retrieves noun hypernyms, but it struggles with challenging inference and role-based event prediction\u2014 and, in particular, it shows clear insensitivity to the contextual impacts of negation.", "phrases": ["bert", "psycholinguistic diagnostic", "language model", "contextual impact", "negation"], "overall_score": 7.6029311064051806, "scores": [3.3969275665459193, 2.367777055466197, 1.5350125178058485, 1.3909963418078286, 0.8391956602189969], "rank_score": 1.905981828368958} -{"id": "pitenis-etal-2020-offensive", "title": "Offensive Language Identification in Greek", "abstract": "As offensive language has become a rising issue for online communities and social media platforms, researchers have been investigating ways of coping with abusive content and developing systems to detect its different types: cyberbullying, hate speech, aggression, etc. With a few notable exceptions, most research on this topic so far has dealt with English. This is mostly due to the availability of language resources for English. To address this shortcoming, this paper presents the first Greek annotated dataset for offensive language identification: the Offensive Greek Tweet Dataset (OGTD). OGTD is a manually annotated dataset containing 4,779 posts from Twitter annotated as offensive and not offensive. Along with a detailed description of the dataset, we evaluate several computational models trained and tested on this data.", "phrases": ["greek", "twitter", "offensive language identification"], "overall_score": 4.734395174292601, "scores": [3.0963475277284567, 2.0801211003308, 0.5393137028744004], "rank_score": 1.9052607769778858} -{"id": "trnka-etal-2007-effects", "title": "The Effects of Word Prediction on Communication Rate for AAC", "abstract": "Individuals using an Augmentative and Alternative Communication (AAC) device communicate at less than 10% of the speed of \"traditional\" speech, creating a large communication gap. In this user study, we compare the communication rate of pseudo-impaired individuals using two different word prediction algorithms and a system without word prediction. Our results show that word prediction can increase AAC communication rate and that more accurate predictions significantly improve communication rate.", "phrases": ["word prediction", "communication rate", "aac"], "overall_score": 3.410166503705752, "scores": [2.2205788186861506, 1.7893325111595981, 1.6998391622361324], "rank_score": 1.9032501640272936} -{"id": "xu-etal-2014-extracting", "title": "Extracting Lexically Divergent Paraphrases from Twitter", "abstract": "We present MultiP (Multi-instance Learning Paraphrase Model), a new model suited to identify paraphrases within the short messages on Twitter. We jointly model paraphrase relations between word and sentence pairs and assume only sentence-level annotations during learning. Using this principled latent variable model alone, we achieve the performance competitive with a state-of-the-art method which combines a latent space model with a feature-based supervised classifier. Our model also captures lexically divergent paraphrases that differ from yet complement previous methods; combining our model with previous work significantly outperforms the state-of-the-art. In addition, we present a novel annotation methodology that has allowed us to crowdsource a paraphrase corpus from Twitter. We make this new dataset available to the research community.", "phrases": ["paraphrase", "twitter", "sentence pair"], "overall_score": 5.4992514162850865, "scores": [3.361505427149935, 1.49491049459671, 0.8514153299894973], "rank_score": 1.9026104172453808} -{"id": "ott-etal-2019-fairseq", "title": "fairseq: A Fast, Extensible Toolkit for Sequence Modeling", "abstract": "fairseq is an open-source sequence modeling toolkit that allows researchers and developers to train custom models for translation, summarization, language modeling, and other text generation tasks. The toolkit is based on PyTorch and supports distributed training across multiple GPUs and machines. We also support fast mixed-precision training and inference on modern GPUs. A demo video can be found at ", "phrases": ["extension", "fairseq", "neural machine translation"], "overall_score": 5.880756206671661, "scores": [4.605267312232305, 0.5710227640161641, 0.5312564509262833], "rank_score": 1.9025155090582508} -{"id": "klein-manning-2004-corpus", "title": "Corpus-Based Induction of Syntactic Structure: Models of Dependency and Constituency", "abstract": "We present a generative model for the unsupervised learning of dependency structures. We also describe the multiplicative combination of this dependency model with a model of linear constituency. The product model outperforms both components on their respective evaluation metrics, giving the best published figures for unsupervised dependency parsing and unsupervised constituency parsing. We also demonstrate that the combined model works and is robust cross-linguistically, being able to exploit either attachment or distributional regularities that are salient in the data.", "phrases": ["induction", "generative model", "dependency model", "valence", "pcfg"], "overall_score": 8.18774702595554, "scores": [1.320526625520182, 2.9700544146185948, 2.5203309549459516, 1.655110043402673, 1.0456220103949467], "rank_score": 1.90232880977647} -{"id": "miculicich-etal-2018-document", "title": "Document-Level Neural Machine Translation with Hierarchical Attention Networks", "abstract": "Neural Machine Translation (NMT) can be improved by including document-level contextual information. For this purpose, we propose a hierarchical attention model to capture the context in a structured and dynamic manner. The model is integrated in the original NMT architecture as another level of abstraction, conditioning on the NMT model's own previous hidden states. Experiments show that hierarchical attention significantly improves the BLEU score over a strong NMT baseline with the state-of-the-art in context-aware methods, and that both the encoder and decoder benefit from context in complementary ways.", "phrases": ["neural machine translation", "hierarchical attention networks", "contextual information", "nmt model", "document-level translation"], "overall_score": 7.390133656222089, "scores": [3.495744938160238, 0.927241313023217, 1.8334680702836978, 1.799448112045026, 1.438541156046412], "rank_score": 1.8988887179117182} -{"id": "heilman-smith-2010-good", "title": "Good Question! Statistical Ranking for Question Generation", "abstract": "We address the challenge of automatically generating questions from reading materials for educational practice and assessment. Our approach is to overgenerate questions, then rank them. We use manually written rules to perform a sequence of general purpose syntactic transformations (e.g., subject-auxiliary inversion) to turn declarative sentences into questions. These questions are then ranked by a logistic regression model trained on a small, tailored dataset consisting of labeled output from our system. Experimental results show that ranking nearly doubles the percentage of questions rated as acceptable by annotators, from 27% of all questions to 52% of the top ranked 20% of questions.", "phrases": ["question generation", "declarative sentence", "regression model", "template", "rule-based approach"], "overall_score": 7.709198235052474, "scores": [3.792962948505174, 1.682428499024876, 1.5733171695414492, 1.3856977691419592, 1.0586442674084924], "rank_score": 1.8986101307243903} -{"id": "genzel-etal-2010-poetic", "title": "\u201cPoetic\u201d Statistical Machine Translation: Rhyme and Meter", "abstract": "As a prerequisite to translation of poetry, we implement the ability to produce translations with meter and rhyme for phrase-based MT, examine whether the hypothesis space of such a system is flexible enough to accomodate such constraints, and investigate the impact of such constraints on translation quality.", "phrases": ["statistical machine translation", "rhyme", "poem"], "overall_score": 4.8694488769085895, "scores": [3.4121935856406416, 0.9079834558472407, 1.3751972504955143], "rank_score": 1.898458097327799} -{"id": "ghosh-etal-2015-semeval", "title": "SemEval-2015 Task 11: Sentiment Analysis of Figurative Language in Twitter", "abstract": "This report summarizes the objectives and evaluation of the SemEval 2015 task on the sentiment analysis of figurative language on Twitter (Task 11). This is the first sentiment analysis task wholly dedicated to analyzing figurative language on Twitter. Specifically, three broad classes of figurative language are considered: irony, sarcasm and metaphor. Gold standard sets of 8000 training tweets and 4000 test tweets were annotated using workers on the crowdsourcing platform CrowdFlower. Participating systems were required to provide a fine-grained sentiment score on an 11-point scale (-5 to +5, including 0 for neutral intent) for each tweet, and systems were evaluated against the gold standard using both a Cosinesimilarity and a Mean-Squared-Error measure.", "phrases": ["sentiment analysis", "figurative language", "twitter", "semeval"], "overall_score": 5.37805694998681, "scores": [3.4977345393149335, 2.724727730834494, 0.8356999023221707, 0.5347103674935381], "rank_score": 1.8982181349912841} -{"id": "mcclosky-etal-2006-effective", "title": "Effective Self-Training for Parsing", "abstract": "We present a simple, but surprisingly effective, method of self-training a two-phase parser-reranker system using readily available unlabeled data. We show that this type of bootstrapping is possible for parsing when the bootstrapped parses are processed by a discriminative reranker. Our improved model achieves an f-score of 92.1%, an absolute 1.1% improvement (12% error reduction) over the previous best result for Wall Street Journal parsing. Finally, we provide some analysis to better understand the phenomenon.", "phrases": ["self-training", "unlabeled data", "reranker", "domain adaptation", "good result"], "overall_score": 7.859494575534271, "scores": [4.399047839937691, 1.3589711323905973, 1.3300711187180854, 1.2795222573105394, 1.1173490330214615], "rank_score": 1.8969922762756752} -{"id": "wong-dras-2011-exploiting", "title": "Exploiting Parse Structures for Native Language Identification", "abstract": "Attempts to profile authors according to their characteristics extracted from textual data, including native language, have drawn attention in recent years, via various machine learning approaches utilising mostly lexical features. Drawing on the idea of contrastive analysis, which postulates that syntactic errors in a text are to some extent influenced by the native language of an author, this paper explores the usefulness of syntactic features for native language identification. We take two types of parse substructure as features---horizontal slices of trees, and the more general feature schemas from discriminative parse reranking---and show that using this kind of syntactic feature results in an accuracy score in classification of seven native languages of around 80%, an error reduction of more than 30%.", "phrases": ["native language identification", "syntactic feature", "context-free grammar"], "overall_score": 6.251937464800467, "scores": [3.258293737727847, 1.8825601115382595, 0.5499048736132216], "rank_score": 1.8969195742931093} -{"id": "kazama-torisawa-2007-exploiting", "title": "Exploiting Wikipedia as External Knowledge for Named Entity Recognition", "abstract": "We explore the use of Wikipedia as external knowledge to improve named entity recognition (NER). Our method retrieves the corresponding Wikipedia entry for each candidate word sequence and extracts a category label from the first sentence of the entry, which can be thought of as a definition part. These category labels are used as features in a CRF-based NE tagger. We demonstrate using the CoNLL 2003 dataset that the Wikipedia category labels extracted by such a simple method actually improve the accuracy of NER.", "phrases": ["wikipedia", "entity recognition", "candidate word sequence"], "overall_score": 6.2478124979907825, "scores": [4.258087062028024, 0.9027179708950595, 0.526198983357535], "rank_score": 1.8956680054268729} -{"id": "koehn-senellart-2010-convergence", "title": "Convergence of Translation Memory and Statistical Machine Translation", "abstract": "We present two methods that merge ideas from statistical machine translation (SMT) and translation memories (TM). We use a TM to retrieve matches for source segments, and replace the mismatched parts with instructions to an SMT system to fill in the gap. We show that for fuzzy matches of over 70%, one method outperforms both SMT and TM baselines.", "phrases": ["translation memory", "statistical machine translation", "segment", "mismatched part", "smt system"], "overall_score": 6.382182911557905, "scores": [3.4573533454801475, 3.2819715283936888, 1.0740812913541304, 0.8376272258888805, 0.8256850705092883], "rank_score": 1.8953436923252274} -{"id": "calixto-liu-2017-incorporating", "title": "Incorporating Global Visual Features into Attention-based Neural Machine Translation.", "abstract": "We introduce multi-modal, attention-based neural machine translation (NMT) models which incorporate visual features into different parts of both the encoder and the decoder. Global image features are extracted using a pre-trained convolutional neural network and are incorporated (i) as words in the source sentence, (ii) to initialise the encoder hidden state, and (iii) as additional data to initialise the decoder hidden state. In our experiments, we evaluate translations into English and German, how different strategies to incorporate global image features compare and which ones perform best. We also study the impact that adding synthetic multi-modal, multilingual data brings and find that the additional data have a positive impact on multi-modal NMT models. We report new state-of-the-art results and our best models also significantly improve on a comparable phrase-based Statistical MT (PBSMT) model trained on the Multi30k data set according to all metrics evaluated. To the best of our knowledge, it is the first time a purely neural model significantly improves over a PBSMT model on all metrics evaluated on this data set.", "phrases": ["global visual feature", "neural machine translation", "source sentence"], "overall_score": 6.012192768359619, "scores": [2.4415460850838633, 2.400728661726688, 0.8330785567878781], "rank_score": 1.8917844345328099} -{"id": "hill-etal-2015-simlex", "title": "SimLex-999: Evaluating Semantic Models With (Genuine) Similarity Estimation", "abstract": "We present SimLex-999, a gold standard resource for evaluating distributional semantic models that improves on existing resources in several important ways. First, in contrast to gold standards such as WordSim-353 and MEN, it explicitly quantifies similarity rather than association or relatedness so that pairs of entities that are associated but not actually similar (Freud, psychology) have a low rating. We show that, via this focus on similarity, SimLex-999 incentivizes the development of models with a different, and arguably wider, range of applications than those which reflect conceptual association. Second, SimLex-999 contains a range of concrete and abstract adjective, noun, and verb pairs, together with an independent rating of concreteness and (free) association strength for each pair. This diversity enables fine-grained analyses of the performance of models on concepts of different types, and consequently greater insight into how architectures can be improved. Further, unlike existing gold standard evaluations, for which automatic approaches have reached or surpassed the inter-annotator agreement ceiling, state-of-the-art models perform well below this ceiling on SimLex-999. There is therefore plenty of scope for SimLex-999 to quantify future improvements to distributional semantic models, guiding the development of the next generation of representation-learning architectures.", "phrases": ["relatedness", "conceptual association", "inter-annotator agreement", "simlex-999", "other type"], "overall_score": 8.330692964722086, "scores": [4.316337623676176, 2.6474215750955294, 1.0777166624855403, 0.8430684200366215, 0.5677177046425914], "rank_score": 1.890452397187292} -{"id": "lu-etal-2008-generative", "title": "A Generative Model for Parsing Natural Language to Meaning Representations", "abstract": "In this paper, we present an algorithm for learning a generative model of natural language sentences together with their formal meaning representations with hierarchical structures. The model is applied to the task of mapping sentences to hierarchical representations of their underlying meaning. We introduce dynamic programming techniques for efficient training and decoding. In experiments, we demonstrate that the model, when coupled with a discriminative reranking technique, achieves state-of-the-art performance when tested on two publicly available corpora. The generative model degrades robustly when presented with instances that are different from those seen in training. This allows a notable improvement in recall compared to previous models.", "phrases": ["generative model", "meaning representation", "hybrid tree", "parsing model", "derivation"], "overall_score": 6.361451896141079, "scores": [3.9770947649867683, 2.847826739459128, 1.0875752806567303, 0.9983205653622187, 0.5351182271149727], "rank_score": 1.8891871155159634} -{"id": "clark-curran-2007-wide", "title": "Wide-Coverage Efficient Statistical Parsing with CCG and Log-Linear Models", "abstract": "This article describes a number of log-linear parsing models for an automatically extracted lexicalized grammar. The models are full parsing models in the sense that probabilities are defined for complete parses, rather than for independent events derived by decomposing the parse tree. Discriminative training is used to estimate the models, which requires incorrect parses for each sentence in the training data as well as the correct parse. The lexicalized grammar formalism used is Combinatory Categorial Grammar (CCG), and the grammar is automatically extracted from CCGbank, a CCG version of the Penn Treebank. The combination of discriminative training and an automatically extracted grammar leads to a significant memory requirement (up to 25 GB), which is satisfied using a parallel implementation of the BFGS optimization algorithm running on a Beowulf cluster. Dynamic programming over a packed chart, in combination with the parallel implementation, allows us to solve one of the largest-scale estimation problems in the statistical parsing literature in under three hours. A key component of the parsing system, for both training and testing, is a Maximum Entropy supertagger which assigns CCG lexical categories to words in a sentence. The supertagger makes the discriminative training feasible, and also leads to a highly efficient parser. Surprisingly, given CCG's spurious ambiguity, the parsing speeds are significantly higher than those reported for comparable parsers in the literature. We also extend the existing parsing techniques for CCG by developing a new model and efficient parsing algorithm which exploits all derivations, including CCG's nonstandard derivations. This model and parsing algorithm, when combined with normal-form constraints, give state-of-the-art accuracy for the recovery of predicate-argument dependencies from CCGbank. The parser is also evaluated on DepBank and compared against the RASP parser, outperforming RASP overall and on the majority of relation types. The evaluation on DepBank raises a number of issues regarding parser evaluation. This article provides a comprehensive blueprint for building a wide-coverage CCG parser. We demonstrate that both accurate and highly efficient parsing is possible with CCG.", "phrases": ["ccg", "derivation", "grammar formalism", "supertagger", "c&c parser"], "overall_score": 7.72831804195541, "scores": [4.564306870200183, 1.401719394419888, 1.324407508920596, 1.232289791070747, 0.9150727963292773], "rank_score": 1.8875592721881382} -{"id": "specia-etal-2009-estimating", "title": "Estimating the Sentence-Level Quality of Machine Translation Systems", "abstract": "We investigate the problem of predicting the quality of sentences produced by machine translation systems when reference translations are not available. The problem is addressed as a regression task and a method that takes into account the contribution of different features is proposed. We experiment with this method for translations produced by various MT systems and different language pairs, annotated with quality scores both automatically and manually. Results show that our method allows obtaining good estimates and that identifying a reduced set of relevant features plays an important role. The experiments also highlight a number of outstanding features that were consistently selected as the most relevant and could be used in different ways to improve MT performance or to enhance MT evaluation.", "phrases": ["machine translation", "quality estimation", "run-time", "nist"], "overall_score": 7.717052777699058, "scores": [2.84361996295593, 2.7170066278265126, 1.1343584979306127, 0.844246317361694], "rank_score": 1.8848078515186875} -{"id": "yang-eisenstein-2013-log", "title": "A Log-Linear Model for Unsupervised Text Normalization", "abstract": "We present a unified unsupervised statistical model for text normalization. The relationship between standard and non-standard tokens is characterized by a log-linear model, permitting arbitrary features. The weights of these features are trained in a maximumlikelihood framework, employing a novel sequential Monte Carlo training algorithm to overcome the large label space, which would be impractical for traditional dynamic programming solutions. This model is implemented in a normalization system called UNLOL, which achieves the best known results on two normalization datasets, outperforming more complex systems. We use the output of UNLOL to automatically normalize a large corpus of social media text, revealing a set of coherent orthographic styles that underlie online language variation.", "phrases": ["log-linear model", "text normalization", "social medium text"], "overall_score": 5.1034177941398395, "scores": [3.5928376105497453, 1.5031114303984598, 0.5576586271031682], "rank_score": 1.8845358893504578} -{"id": "rashkin-etal-2018-event2mind", "title": "Event2Mind: Commonsense Inference on Events, Intents, and Reactions", "abstract": "We investigate a new commonsense inference task: given an event described in a short free-form text (\u201cX drinks coffee in the morning\u201d), a system reasons about the likely intents (\u201cX wants to stay awake\u201d) and reactions (\u201cX feels alert\u201d) of the event's participants. To support this study, we construct a new crowdsourced corpus of 25,000 event phrases covering a diverse range of everyday events and situations. We report baseline performance on this task, demonstrating that neural encoder-decoder models can successfully compose embedding representations of previously unseen events and reason about the likely intents and reactions of the event participants. In addition, we demonstrate how commonsense inference on people's intents and reactions can help unveil the implicit gender inequality prevalent in modern movie scripts.", "phrases": ["commonsense inference", "intent", "free-form text", "diverse range", "event2mind"], "overall_score": 6.133712602679575, "scores": [3.211871194003758, 2.8638311209180682, 1.5786142174251598, 0.9263265352880063, 0.8323877175183921], "rank_score": 1.8826061570306767} -{"id": "dusmanu-etal-2017-argument", "title": "Argument Mining on Twitter: Arguments, Facts and Sources", "abstract": "Social media collect and spread on the Web personal opinions, facts, fake news and all kind of information users may be interested in. Applying argument mining methods to such heterogeneous data sources is a challenging open research issue, in particular considering the peculiarities of the language used to write textual messages on social media. In addition, new issues emerge when dealing with arguments posted on such platforms, such as the need to make a distinction between personal opinions and actual facts, and to detect the source disseminating information about such facts to allow for provenance verification. In this paper, we apply supervised classification to identify arguments on Twitter, and we present two new tasks for argument mining, namely facts recognition and source identification. We study the feasibility of the approaches proposed to address these tasks on a set of tweets related to the Grexit and Brexit news topics.", "phrases": ["twitter", "opinion", "argument mining"], "overall_score": 4.1362492431694395, "scores": [2.6890081425111783, 2.4377571262151476, 0.5206991996312624], "rank_score": 1.882488156119196} -{"id": "belinkov-etal-2017-neural", "title": "What do Neural Machine Translation Models Learn about Morphology?", "abstract": "Neural machine translation (MT) models obtain state-of-the-art performance while maintaining a simple, end-to-end architecture. However, little is known about what these models learn about source and target languages during the training process. In this work, we analyze the representations learned by neural MT models at various levels of granularity and empirically evaluate the quality of the representations for learning morphology through extrinsic part-of-speech and morphological tagging tasks. We conduct a thorough investigation along several parameters: word-based vs. character-based representations, depth of the encoding layer, the identity of the target language, and encoder vs. decoder representations. Our data-driven, quantitative evaluation sheds light on important aspects in the neural MT system and its ability to capture word structure.", "phrases": ["morphology", "linguistic knowledge", "internal representation"], "overall_score": 7.2054399992316895, "scores": [3.1346315620027414, 1.4491976675906346, 1.0621213202589728], "rank_score": 1.8819835166174494} -{"id": "baldridge-osborne-2004-active", "title": "Active Learning and the Total Cost of Annotation", "abstract": "Active learning (AL) promises to reduce the cost of annotating labeled datasets for trainable human language technologies. Contrary to expectations, when creating labeled training material for HPSG parse selection and later reusing it with other models, gains from AL may be negligible or even negative. This has serious implications for using AL, showing that additional cost-saving strategies may need to be adopted. We explore one such strategy: using a model during annotation to automate some of the decisions. Our best results show an 80% reduction in annotation cost compared with labeling randomly selected data with a single model.", "phrases": ["cost", "hpsg parse selection", "active learning"], "overall_score": 4.133330407552562, "scores": [2.937261295906586, 2.1588217989086402, 0.5473961155305254], "rank_score": 1.8811597367819173} -{"id": "nguyen-chiang-2017-transfer", "title": "Transfer Learning across Low-Resource, Related Languages for Neural Machine Translation", "abstract": "We present a simple method to improve neural translation of a low-resource language pair using parallel data from a related, also low-resource, language pair. The method is based on the transfer method of Zoph et al., but whereas their method ignores any source vocabulary overlap, ours exploits it. First, we split words using Byte Pair Encoding (BPE) to increase vocabulary overlap. Then, we train a model on the first language pair and transfer its parameters, including its source word embeddings, to another model and continue training on the second language pair. Our experiments show that transfer learning helps word-based translation only slightly, but when used on top of a much stronger BPE baseline, it yields larger improvements of up to 4.3 BLEU.", "phrases": ["neural machine translation", "low-resource language", "vocabulary", "transfer learning", "parent"], "overall_score": 6.687736721084976, "scores": [3.7850396009987324, 1.468831678055349, 1.6879665815628453, 1.4057419301340486, 1.0575988379250256], "rank_score": 1.8810357257352002} -{"id": "coppersmith-etal-2015-clpsych", "title": "CLPsych 2015 Shared Task: Depression and PTSD on Twitter", "abstract": "This paper presents a summary of the Computational Linguistics and Clinical Psychology (CLPsych) 2015 shared and unshared tasks. These tasks aimed to provide apples-to-apples comparisons of various approaches to modeling language relevant to mental health from social media. The data used for these tasks is from Twitter users who state a diagnosis of depression or post traumatic stress disorder (PTSD) and demographically-matched community controls. The unshared task was a hackathon held at Johns Hopkins University in November 2014 to explore the data, and the shared task was conducted remotely, with each participating team submitted scores for a held-back test set of users. The shared task consisted of three binary classification experiments: (1) depression versus control, (2) PTSD versus control, and (3) depression versus PTSD. Classifiers were compared primarily via their average precision, though a number of other metrics are used along with this to allow a more nuanced interpretation of the performance measures.", "phrases": ["depression", "ptsd", "twitter", "social medium platform"], "overall_score": 5.08814602152154, "scores": [3.2858273381413987, 2.771304760675235, 0.9104135712243656, 0.5480402957591932], "rank_score": 1.8788964914500483} -{"id": "smith-etal-2010-extracting", "title": "Extracting Parallel Sentences from Comparable Corpora using Document Level Alignment", "abstract": "The quality of a statistical machine translation (SMT) system is heavily dependent upon the amount of parallel sentences used in training. In recent years, there have been several approaches developed for obtaining parallel sentences from non-parallel, or comparable data, such as news articles published within the same time period (Munteanu and Marcu, 2005), or web pages with a similar structure (Resnik and Smith, 2003). One resource not yet thoroughly explored is Wikipedia, an online encyclopedia containing linked articles in many languages. We advance the state of the art in parallel sentence extraction by modeling the document level alignment, motivated by the observation that parallel sentence pairs are often found in close proximity. We also include features which make use of the additional annotation given by Wikipedia, and features using an automatically induced lexicon model. Results for both accuracy in sentence extraction and downstream improvement in an SMT system are presented.", "phrases": ["comparable corpora", "document level alignment", "wikipedia", "sentence extraction", "parallel data"], "overall_score": 7.271875772522583, "scores": [3.5484059742440492, 0.9081192369094211, 2.2335833046178393, 1.47312170566564, 1.2290433288548113], "rank_score": 1.8784547100583524} -{"id": "nimishakavi-etal-2016-relation", "title": "Relation Schema Induction using Tensor Factorization with Side Information", "abstract": "Given a set of documents from a specific domain (e.g., medical research journals), how do we automatically build a Knowledge Graph (KG) for that domain? Automatic identification of relations and their schemas, i.e., type signature of arguments of relations (e.g., undergo(Patient, Surgery)), is an important first step towards this goal. We refer to this problem as Relation Schema Induction (RSI). In this paper, we propose Schema Induction using Coupled Tensor Factorization (SICTF), a novel tensor factorization method for relation schema induction. SICTF factorizes Open Information Extraction (OpenIE) triples extracted from a domain corpus along with additional side information in a principled way to induce relation schemas. To the best of our knowledge, this is the first application of tensor factorization for the RSI problem. Through extensive experiments on multiple real-world datasets, we find that SICTF is not only more accurate than state-of-the-art baselines, but also significantly faster (about 14x faster).", "phrases": ["tensor factorization", "side information", "relation schema induction"], "overall_score": 2.062992730269875, "scores": [2.046253605527184, 1.8061152561580476, 1.781081860327684], "rank_score": 1.8778169073376387} -{"id": "zhao-ng-2014-domain", "title": "Domain Adaptation with Active Learning for Coreference Resolution", "abstract": "In the literature, most prior work on coreference resolution centered on the newswire domain. Although a coreference resolution system trained on the newswire domain performs well on newswire texts, there is a huge performance drop when it is applied to the biomedical domain. In this paper, we present an approach integrating domain adaptation with active learning to adapt coreference resolution from the newswire domain to the biomedical domain. We explore the effect of domain adaptation, active learning, and target domain instance weighting for coreference resolution. Experimental results show that domain adaptation with active learning and target domain instance weighting achieves performance on MEDLINE abstracts similar to a system trained on coreference annotation of only target domain training instances, but with a greatly reduced number of target domain training instances that we need to annotate.", "phrases": ["active learning", "coreference resolution", "domain adaptation"], "overall_score": 3.3609399499775963, "scores": [1.9401581120512632, 1.8789429426057032, 1.8082278491923753], "rank_score": 1.875776301283114} -{"id": "zhang-etal-2018-graph", "title": "Graph Convolution over Pruned Dependency Trees Improves Relation Extraction", "abstract": "Dependency trees help relation extraction models capture long-range relations between words. However, existing dependency-based models either neglect crucial information (e.g., negation) by pruning the dependency trees too aggressively, or are computationally inefficient because it is difficult to parallelize over different tree structures. We propose an extension of graph convolutional networks that is tailored for relation extraction, which pools information over arbitrary dependency structures efficiently in parallel. To incorporate relevant information while maximally removing irrelevant content, we further apply a novel pruning strategy to the input trees by keeping words immediately around the shortest path between the two entities among which a relation might hold. The resulting model achieves state-of-the-art performance on the large-scale TACRED dataset, outperforming existing sequence and dependency-based neural models. We also show through detailed analysis that this model has complementary strengths to sequence models, and combining them further improves the state of the art.", "phrases": ["relation extraction", "tacred dataset", "graph convolution", "input sentence", "many study"], "overall_score": 7.770702134870315, "scores": [4.585062908263844, 2.016566453322693, 1.0611651307250907, 0.8671091027555721, 0.847901675627215], "rank_score": 1.875561054138883} -{"id": "schulte-im-walde-etal-2013-exploring", "title": "Exploring Vector Space Models to Predict the Compositionality of German Noun-Noun Compounds", "abstract": "This paper explores two hypotheses regarding vector space models that predict the compositionality of German noun-noun compounds: (1) Against our intuition, we demonstrate that window-based rather than syntax-based distributional features perform better predictions, and that not adjectives or verbs but nouns represent the most salient part-of-speech. Our overall best result is state-of-the-art, reaching Spearman\u2019s = 0.65 with a wordspace model of nominal features from a 20word window of a 1.5 billion word web corpus. (2) While there are no significant differences in predicting compound\u2010modifier vs. compound\u2010head ratings on compositionality, we show that the modifier (rather than the head) properties predominantly influence the degree of compositionality of the compound.", "phrases": ["compositionality", "german noun-noun compound", "component word"], "overall_score": 5.076994524957743, "scores": [3.227535803072756, 1.8361898796661977, 0.560610073176512], "rank_score": 1.8747785853051553} -{"id": "ponti-etal-2020-xcopa", "title": "XCOPA: A Multilingual Dataset for Causal Commonsense Reasoning", "abstract": "In order to simulate human language capacity, natural language processing systems must be able to reason about the dynamics of everyday situations, including their possible causes and effects. Moreover, they should be able to generalise the acquired world knowledge to new languages, modulo cultural differences. Advances in machine reasoning and cross-lingual transfer depend on the availability of challenging evaluation benchmarks. Motivated by both demands, we introduce Cross-lingual Choice of Plausible Alternatives (XCOPA), a typologically diverse multilingual dataset for causal commonsense reasoning in 11 languages, which includes resource-poor languages like Eastern Apur\u00edmac Quechua and Haitian Creole. We evaluate a range of state-of-the-art models on this novel dataset, revealing that the performance of current methods based on multilingual pretraining and zero-shot fine-tuning falls short compared to translation-based transfer. Finally, we propose strategies to adapt multilingual models to out-of-sample resource-lean languages where only a small corpus or a bilingual dictionary is available, and report substantial improvements over the random baseline. The XCOPA dataset is freely available at github.com/cambridgeltl/xcopa.", "phrases": ["multilingual dataset", "causal commonsense reasoning", "cultural difference", "xcopa"], "overall_score": 4.117322322642714, "scores": [2.566883697562374, 2.462411756565118, 1.9423898830448767, 0.5238112362990671], "rank_score": 1.873874143367859} -{"id": "das-etal-2016-human", "title": "Human Attention in Visual Question Answering: Do Humans and Deep Networks look at the same regions?", "abstract": "We conduct large-scale studies on `human attention' in Visual Question Answering (VQA) to understand where humans choose to look to answer questions about images. We design and test multiple game-inspired novel attention-annotation interfaces that require the subject to sharpen regions of a blurred image to answer a question. Thus, we introduce the VQA-HAT (Human ATtention) dataset. We evaluate attention maps generated by state-of-the-art VQA models against human attention both qualitatively (via visualizations) and quantitatively (via rank-order correlation). Overall, our experiments show that current attention models in VQA do not seem to be looking at the same regions as humans.", "phrases": ["visual question answering", "same region", "human attention"], "overall_score": 4.801338505259829, "scores": [3.0011429861427796, 1.6872078701293296, 0.9273606094835991], "rank_score": 1.8719038219185693} -{"id": "talmor-etal-2019-commonsenseqa", "title": "CommonsenseQA: A Question Answering Challenge Targeting Commonsense Knowledge", "abstract": "When answering a question, people often draw upon their rich world knowledge in addition to the particular context. Recent work has focused primarily on answering questions given some relevant document or context, and required very little general background. To investigate question answering with prior knowledge, we present CommonsenseQA: a challenging new dataset for commonsense question answering. To capture common sense beyond associations, we extract from ConceptNet (Speer et al., 2017) multiple target concepts that have the same semantic relation to a single source concept. Crowd-workers are asked to author multiple-choice questions that mention the source concept and discriminate in turn between each of the target concepts. This encourages workers to create questions with complex semantics that often require prior knowledge. We create 12,247 questions through this procedure and demonstrate the difficulty of our task with a large number of strong baselines. Our best baseline is based on BERT-large (Devlin et al., 2018) and obtains 56% accuracy, well below human performance, which is 89%.", "phrases": ["multiple-choice question", "commonsenseqa", "reasoning", "language model", "challenging task"], "overall_score": 7.245781040990998, "scores": [5.420549530442249, 1.3893775730064215, 1.1222434311082605, 0.8711395441808671, 0.5552598076805851], "rank_score": 1.8717139772836766} -{"id": "vadapalli-etal-2017-ssas", "title": "SSAS: Semantic Similarity for Abstractive Summarization", "abstract": "Ideally a metric evaluating an abstract system summary should represent the extent to which the system-generated summary approximates the semantic inference conceived by the reader using a human-written reference summary. Most of the previous approaches relied upon word or syntactic sub-sequence overlap to evaluate system-generated summaries. Such metrics cannot evaluate the summary at semantic inference level. Through this work we introduce the metric of Semantic Similarity for Abstractive Summarization (SSAS), which leverages natural language inference and paraphrasing techniques to frame a novel approach to evaluate system summaries at semantic inference level. SSAS is based upon a weighted composition of quantities representing the level of agreement, contradiction, independence, paraphrasing, and optionally ROUGE score between a system-generated and a human-written summary.", "phrases": ["semantic similarity", "abstractive summarization", "ssas"], "overall_score": 2.5909431409878545, "scores": [2.049465255082749, 1.8998946897777158, 1.6575512862322126], "rank_score": 1.868970410364226} -{"id": "wang-2017-liar", "title": "\u201cLiar, Liar Pants on Fire\u201d: A New Benchmark Dataset for Fake News Detection", "abstract": "Automatic fake news detection is a challenging problem in deception detection, and it has tremendous real-world political and social impacts. However, statistical approaches to combating fake news has been dramatically limited by the lack of labeled benchmark datasets. In this paper, we present LIAR: a new, publicly available dataset for fake news detection. We collected a decade-long, 12.8K manually labeled short statements in various contexts from PolitiFact.com, which provides detailed analysis report and links to source documents for each case. This dataset can be used for fact-checking research as well. Notably, this new dataset is an order of magnitude larger than previously largest public fake news datasets of similar type. Empirically, we investigate automatic fake news detection based on surface-level linguistic patterns. We have designed a novel, hybrid convolutional neural network to integrate meta-data with text. We show that this hybrid approach can improve a text-only deep learning model.", "phrases": ["fake news detection", "liar dataset", "politifact", "news article", "fact verification"], "overall_score": 7.620740704493884, "scores": [4.680078710406835, 1.500876589169531, 1.4643181438904045, 0.8501802010510097, 0.8493295847372065], "rank_score": 1.8689566458509976} -{"id": "bollegala-etal-2006-bottom", "title": "A Bottom-Up Approach to Sentence Ordering for Multi-Document Summarization", "abstract": "Ordering information is a difficult but important task for applications generating natural language texts such as multi-document summarization, question answering, and concept-to-text generation. In multi-document summarization, information is selected from a set of source documents. However, improper ordering of information in a summary can confuse the reader and deteriorate the readability of the summary. Therefore, it is vital to properly order the information in multi-document summarization. We present a bottom-up approach to arrange sentences extracted for multi-document summarization. To capture the association and order of two textual segments (e.g. sentences), we define four criteria: chronology, topical-closeness, precedence, and succession. These criteria are integrated into a criterion by a supervised learning approach. We repeatedly concatenate two textual segments into one segment based on the criterion, until we obtain the overall segment with all sentences arranged. We evaluate the sentence orderings produced by the proposed method and numerous baselines using subjective gradings as well as automatic evaluation measures. We introduce the average continuity, an automatic evaluation measure of sentence ordering in a summary, and investigate its appropriateness for this task.", "phrases": ["bottom-up approach", "sentence ordering", "multi-document summarization"], "overall_score": 3.6362950498677273, "scores": [2.043587031429363, 1.8364874316700408, 1.7259835323836616], "rank_score": 1.8686859984943551} -{"id": "wu-etal-2008-domain", "title": "Domain Adaptation for Statistical Machine Translation with Domain Dictionary and Monolingual Corpora", "abstract": "Statistical machine translation systems are usually trained on large amounts of bilingual text and monolingual text. In this paper, we propose a method to perform domain adaptation for statistical machine translation, where in-domain bilingual corpora do not exist. This method first uses out-of-domain corpora to train a baseline system and then uses in-domain translation dictionaries and in-domain monolingual corpora to improve the in-domain performance. We propose an algorithm to combine these different resources in a unified framework. Experimental results indicate that our method achieves absolute improvements of 8.16 and 3.36 BLEU scores on Chinese to English translation and English to French translation respectively, as compared with the baselines using only out-of-domain corpora.", "phrases": ["baseline system", "domain adaptation", "bilingual data"], "overall_score": 6.014573630280057, "scores": [4.491145897228379, 0.5806988947534117, 0.5337522774792481], "rank_score": 1.868532356487013} -{"id": "roberts-etal-2020-much", "title": "How Much Knowledge Can You Pack Into the Parameters of a Language Model?", "abstract": "It has recently been observed that neural language models trained on unstructured text can implicitly store and retrieve knowledge using natural language queries. In this short paper, we measure the practical utility of this approach by fine-tuning pre-trained models to answer questions without access to any external context or knowledge. We show that this approach scales with model size and performs competitively with open-domain systems that explicitly retrieve answers from an external knowledge source when answering questions. To facilitate reproducibility and future work, we release our code and trained models.", "phrases": ["much knowledge", "language model", "pre-trained model", "access", "plm"], "overall_score": 6.9329821105464795, "scores": [4.477914461771378, 1.393519131156586, 1.4044276461486256, 1.1095820174834516, 0.949210798907514], "rank_score": 1.866930811093511} -{"id": "roberts-etal-2012-empatweet", "title": "EmpaTweet: Annotating and Detecting Emotions on Twitter", "abstract": "The rise of micro-blogging in recent years has resulted in significant access to emotion-laden text. Unlike emotion expressed in other textual sources (e.g., blogs, quotes in newswire, email, product reviews, or even clinical text), micro-blogs differ by (1) placing a strict limit on length, resulting radically in new forms of emotional expression, and (2) encouraging users to express their daily thoughts in real-time, often resulting in far more emotion statements than might normally occur. In this paper, we introduce a corpus collected from Twitter with annotated micro-blog posts (or \u0093tweets\u0094) annotated at the tweet-level with seven emotions: ANGER, DISGUST, FEAR, JOY, LOVE, SADNESS, and SURPRISE. We analyze how emotions are distributed in the data we annotated and compare it to the distributions in other emotion-annotated corpora. We also used the annotated corpus to train a classifier that automatically discovers the emotions in tweets. In addition, we present an analysis of the linguistic style used for expressing emotions our corpus. We hope that these observations will lead to the design of novel emotion detection techniques that account for linguistic style and psycholinguistic theories.", "phrases": ["emotion", "twitter", "social medium"], "overall_score": 5.167824015822359, "scores": [3.122217461528402, 1.941218374490468, 0.5282597238414836], "rank_score": 1.8638985199534515} -{"id": "amir-etal-2016-modelling", "title": "Modelling Context with User Embeddings for Sarcasm Detection in Social Media", "abstract": "We introduce a deep neural network for automated sarcasm detection. Recent work has emphasized the need for models to capitalize on contextual features, beyond lexical and syntactic cues present in utterances. For example, different speakers will tend to employ sarcasm regarding different subjects and, thus, sarcasm detection models ought to encode such speaker information. Current methods have achieved this by way of laborious feature engineering. By contrast, we propose to automatically learn and then exploit user embeddings, to be used in concert with lexical signals to recognize sarcasm. Our approach does not require elaborate feature engineering (and concomitant data scraping); fitting user embeddings requires only the text from their previous posts. The experimental results show that our model outperforms a state-of-the-art approach leveraging an extensive set of carefully crafted features.", "phrases": ["user embedding", "sarcasm detection", "historical tweet"], "overall_score": 6.512967741624366, "scores": [3.5031764410183506, 1.5510010178274647, 0.5339423668348215], "rank_score": 1.8627066085602124} -{"id": "tu-etal-2016-modeling", "title": "Modeling Coverage for Neural Machine Translation", "abstract": "Attention mechanism has enhanced state-of-the-art Neural Machine Translation (NMT) by jointly learning to align and translate. It tends to ignore past alignment information, however, which often leads to over-translation and under-translation. To address this problem, we propose coverage-based NMT in this paper. We maintain a coverage vector to keep track of the attention history. The coverage vector is fed to the attention model to help adjust future attention, which lets NMT system to consider more about untranslated source words. Experiments show that the proposed approach significantly improves both translation quality and alignment quality over standard attention-based NMT.", "phrases": ["coverage", "neural machine translation", "under-translation", "attention model", "untranslated source word"], "overall_score": 7.745779270317099, "scores": [2.686755184881384, 2.5219209752600165, 2.0808847785391698, 1.12836578742726, 0.8944043914813465], "rank_score": 1.8624662235178355} -{"id": "callison-burch-etal-2008-parametric", "title": "ParaMetric: An Automatic Evaluation Metric for Paraphrasing", "abstract": "We present ParaMetric, an automatic evaluation metric for data-driven approaches to paraphrasing. ParaMetric provides an objective measure of quality using a collection of multiple translations whose paraphrases have been manually annotated. ParaMetric calculates precision and recall scores by comparing the paraphrases discovered by automatic paraphrasing techniques against gold standard alignments of words and phrases within equivalent sentences. We report scores for several established paraphrasing techniques.", "phrases": ["automatic evaluation metric", "paraphrasing", "parametric"], "overall_score": 4.09177148372663, "scores": [2.8089490602271616, 1.8272280900222817, 0.9505592160721681], "rank_score": 1.8622454554405372} -{"id": "banon-etal-2020-paracrawl", "title": "ParaCrawl: Web-Scale Acquisition of Parallel Corpora", "abstract": "We report on methods to create the largest publicly available parallel corpora by crawling the web, using open source software. We empirically compare alternative methods and publish benchmark data sets for sentence alignment and sentence pair filtering. We also describe the parallel corpora released and evaluate their quality and their usefulness to create machine translation systems.", "phrases": ["parallel corpora", "web", "paracrawl"], "overall_score": 4.459160972415252, "scores": [2.460305913106012, 1.973832239543377, 1.1447055395666121], "rank_score": 1.8596145640720003} -{"id": "surdeanu-etal-2008-conll", "title": "The CoNLL 2008 Shared Task on Joint Parsing of Syntactic and Semantic Dependencies", "abstract": "The Conference on Computational Natural Language Learning is accompanied every year by a shared task whose purpose is to promote natural language processing applications and evaluate them in a standard setting. In 2008 the shared task was dedicated to the joint parsing of syntactic and semantic dependencies. This shared task not only unifies the shared tasks of the previous four years under a unique dependency-based formalism, but also extends them significantly: this year's syntactic dependencies include more information such as named-entity boundaries; the semantic dependencies model roles of both verbal and nominal predicates. In this paper, we define the shared task and describe how the data sets were created. Furthermore, we report and analyze the results and describe the approaches of the participating systems.", "phrases": ["conll", "joint parsing", "srl", "semantic role"], "overall_score": 7.07542907609957, "scores": [2.86175769043715, 1.8905624594891308, 1.4903236681776455, 1.1921409521387087], "rank_score": 1.8586961925606587} -{"id": "xu-etal-2020-matinf", "title": "MATINF: A Jointly Labeled Large-Scale Dataset for Classification, Question Answering and Summarization", "abstract": "Recently, large-scale datasets have vastly facilitated the development in nearly all domains of Natural Language Processing. However, there is currently no cross-task dataset in NLP, which hinders the development of multi-task learning. We propose MATINF, the first jointly labeled large-scale dataset for classification, question answering and summarization. MATINF contains 1.07 million question-answer pairs with human-labeled categories and user-generated question descriptions. Based on such rich information, MATINF is applicable for three major NLP tasks, including classification, question answering, and summarization. We benchmark existing methods and a novel multi-task baseline over MATINF to inspire further research. Our comprehensive comparison and experiments over MATINF and other datasets demonstrate the merits held by MATINF.", "phrases": ["question answering", "summarization", "matinf"], "overall_score": 2.0413085027039872, "scores": [1.9330134588744787, 1.8387216909946722, 1.802502068555043], "rank_score": 1.8580790728080647} -{"id": "park-levy-2011-automated", "title": "Automated Whole Sentence Grammar Correction Using a Noisy Channel Model", "abstract": "Automated grammar correction techniques have seen improvement over the years, but there is still much room for increased performance. Current correction techniques mainly focus on identifying and correcting a specific type of error, such as verb form misuse or preposition misuse, which restricts the corrections to a limited scope. We introduce a novel technique, based on a noisy channel model, which can utilize the whole sentence context to determine proper corrections. We show how to use the EM algorithm to learn the parameters of the noise model, using only a data set of erroneous sentences, given the proper language model. This frees us from the burden of acquiring a large corpora of corrected sentences. We also present a cheap and efficient way to provide automated evaluation results for grammar corrections by using BLEU and METEOR, in contrast to the commonly used manual evaluations.", "phrases": ["correction", "noisy channel model", "language model"], "overall_score": 5.263196611268214, "scores": [3.624234901866444, 1.054334721509804, 0.8944628017778756], "rank_score": 1.8576774750513747} -{"id": "wang-cho-2019-bert", "title": "BERT has a Mouth, and It Must Speak: BERT as a Markov Random Field Language Model", "abstract": "We show that BERT (Devlin et al., 2018) is a Markov random field language model. This formulation gives way to a natural procedure to sample sentences from BERT. We generate from BERT and find that it can produce high quality, fluent generations. Compared to the generations of a traditional left-to-right language model, BERT generates sentences that are more diverse but of slightly worse quality.", "phrases": ["markov", "language model", "procedure", "bert"], "overall_score": 6.052437635901189, "scores": [4.173797618133959, 1.5035496394193233, 1.2009086945558098, 0.5523865291927107], "rank_score": 1.8576606203254504} -{"id": "mihalcea-etal-2004-pagerank", "title": "PageRank on Semantic Networks, with Application to Word Sense Disambiguation", "abstract": "This paper presents a new open text word sense disambiguation method that combines the use of logical inferences with PageRank-style algorithms applied on graphs extracted from natural language documents. We evaluate the accuracy of the proposed algorithm on several sense-annotated texts, and show that it consistently outperforms the accuracy of other previously proposed knowledge-based word sense disambiguation methods. We also explore and evaluate methods that combine several open-text word sense disambiguation algorithms.", "phrases": ["word sense disambiguation", "pagerank", "wsd"], "overall_score": 4.453379184140567, "scores": [3.057565435029755, 1.439486201017884, 1.074558477195659], "rank_score": 1.8572033710810993} -{"id": "hatori-etal-2012-incremental", "title": "Incremental Joint Approach to Word Segmentation, POS Tagging, and Dependency Parsing in Chinese", "abstract": "We propose the first joint model for word segmentation, POS tagging, and dependency parsing for Chinese. Based on an extension of the incremental joint model for POS tagging and dependency parsing (Hatori et al., 2011), we propose an efficient character-based decoding method that can combine features from state-of-the-art segmentation, POS tagging, and dependency parsing models. We also describe our method to align comparable states in the beam, and how we can combine features of different characteristics in our incremental framework. In experiments using the Chinese Treebank (CTB), we show that the accuracies of the three tasks can be improved significantly over the baseline models, particularly by 0.6% for POS tagging and 2.4% for dependency parsing. We also perform comparison experiments with the partially joint models.", "phrases": ["word segmentation", "pos tagging", "joint modeling"], "overall_score": 3.3272885106383234, "scores": [2.7102543296672748, 2.334266548236914, 0.5264643482631747], "rank_score": 1.8569950753891211} -{"id": "wan-etal-2010-cross", "title": "Cross-Language Document Summarization Based on Machine Translation Quality Prediction", "abstract": "Cross-language document summarization is a task of producing a summary in one language for a document set in a different language. Existing methods simply use machine translation for document translation or summary translation. However, current machine translation services are far from satisfactory, which results in that the quality of the cross-language summary is usually very poor, both in readability and content. In this paper, we propose to consider the translation quality of each sentence in the English-to-Chinese cross-language summarization process. First, the translation quality of each English sentence in the document set is predicted with the SVM regression method, and then the quality score of each sentence is incorporated into the summarization process. Finally, the English sentences with high translation quality and high informative-ness are selected and translated to form the Chinese summary. Experimental results demonstrate the effectiveness and usefulness of the proposed approach.", "phrases": ["machine translation", "quality score", "cross-language document summarization"], "overall_score": 5.900572485567942, "scores": [3.7131994824252557, 1.323210782481739, 0.5335764004119419], "rank_score": 1.8566622217729787} -{"id": "shinyama-sekine-2006-preemptive", "title": "Preemptive Information Extraction using Unrestricted Relation Discovery", "abstract": "We are trying to extend the boundary of Information Extraction (IE) systems. Existing IE systems require a lot of time and human effort to tune for a new scenario. Preemptive Information Extraction is an attempt to automatically create all feasible IE systems in advance without human intervention. We propose a technique called Unrestricted Relation Discovery that discovers all possible relations from texts and presents them as tables. We present a preliminary system that obtains reasonably good results.", "phrases": ["unrestricted relation discovery", "preemptive information extraction", "same relation"], "overall_score": 5.889119367802627, "scores": [2.993476453150923, 2.0302201100947497, 0.5354786573557948], "rank_score": 1.8530584068671558} -{"id": "chersoni-etal-2017-logical", "title": "Logical Metonymy in a Distributional Model of Sentence Comprehension", "abstract": "In theoretical linguistics, logical metonymy is defined as the combination of an event-subcategorizing verb with an entity-denoting direct object (e.g., The author began the book), so that the interpretation of the VP requires the retrieval of a covert event (e.g., writing). Psycholinguistic studies have revealed extra processing costs for logical metonymy, a phenomenon generally explained with the introduction of new semantic structure. In this paper, we present a general distributional model for sentence comprehension inspired by the Memory, Unification and Control model by Hagoort (2013,2016). We show that our distributional framework can account for the extra processing costs of logical metonymy and can identify the covert event in a classification task.", "phrases": ["distributional model", "sentence comprehension", "logical metonymy"], "overall_score": 2.0356950429135066, "scores": [1.907673212540152, 1.8844843248181185, 1.76675090717076], "rank_score": 1.8529694815096767} -{"id": "warner-hirschberg-2012-detecting", "title": "Detecting Hate Speech on the World Wide Web", "abstract": "We present an approach to detecting hate speech in online text, where hate speech is defined as abusive speech targeting specific group characteristics, such as ethnic origin, religion, gender, or sexual orientation. While hate speech against any group may exhibit some common characteristics, we have observed that hatred against each different group is typically characterized by the use of a small set of high frequency stereotypical words; however, such words may be used in either a positive or a negative sense, making our task similar to that of words sense disambiguation. In this paper we describe our definition of hate speech, the collection and annotation of our hate speech corpus, and a mechanism for detecting some commonly used methods of evading common \"dirty word\" filters. We describe pilot classification experiments in which we classify anti-semitic speech reaching an accuracy 94%, precision of 68% and recall at 60%, for an F1 measure of. 6375.", "phrases": ["hate speech", "group", "anti-semitic speech", "social medium", "n-gram"], "overall_score": 7.165049723958896, "scores": [5.493305796643278, 1.2074152444680062, 1.0696308620165145, 0.8999622752573553, 0.5839840403974939], "rank_score": 1.8508596437565294} -{"id": "fader-etal-2007-mavenrank", "title": "MavenRank: Identifying Influential Members of the US Senate Using Lexical Centrality", "abstract": "We introduce a technique for identifying the most salient participants in a discussion. Our method, MavenRank is based on lexical centrality: a random walk is performed on a graph in which each node is a participant in the discussion and an edge links two participants who use similar rhetoric. As a test, we used MavenRank to identify the most influential members of the US Senate using data from the US Congressional Record and used committee ranking to evaluate the output. Our results show that MavenRank scores are largely driven by committee status in most topics, but can capture speaker centrality in topics where speeches are used to indicate ideological position instead of influence legislation.", "phrases": ["influential member", "lexical centrality", "mavenrank"], "overall_score": 2.032274259172076, "scores": [1.9062568283317176, 1.8532805228869336, 1.7900298986686054], "rank_score": 1.849855749962419} -{"id": "carpuat-2009-one", "title": "One Translation Per Discourse", "abstract": "We revisit the one sense per discourse hypothesis of Gale et al. in the context of machine translation. Since a given sense can be lexicalized differently in translation, do we observe one translation per discourse? Analysis of manual translations reveals that the hypothesis still holds when using translations in parallel text as sense annotation, thus confirming that translational differences represent useful sense distinctions. Analysis of Statistical Machine Translation (SMT) output showed that despite ignoring document structure, the one translation per discourse hypothesis is strongly supported in part because of the low variability in SMT lexical choice. More interestingly, cases where the hypothesis does not hold can reveal lexical choice errors. A preliminary study showed that enforcing the one translation per discourse constraint in SMT can potentially improve translation quality, and that SMT systems might benefit from translating sentences within their entire document context.", "phrases": ["discourse", "translation quality", "smt system"], "overall_score": 5.445918534007149, "scores": [3.7908857905902633, 1.2279852520739305, 0.5298109703169706], "rank_score": 1.8495606709937213} -{"id": "jiang-etal-2009-automatic", "title": "Automatic Adaptation of Annotation Standards: Chinese Word Segmentation and POS Tagging \u2013 A Case Study", "abstract": "Manually annotated corpora are valuable but scarce resources, yet for many annotation tasks such as treebanking and sequence labeling there exist multiple corpora with different and incompatible annotation guidelines or standards. This seems to be a great waste of human efforts, and it would be nice to automatically adapt one annotation standard to another. We present a simple yet effective strategy that transfers knowledge from a differently annotated corpus to the corpus with desired annotation. We test the efficacy of this method in the context of Chinese word segmentation and part-of-speech tagging, where no segmentation and POS tagging standards are widely accepted due to the lack of morphology in Chinese. Experiments show that adaptation from the much larger People's Daily corpus to the smaller but more popular Penn Chinese Treebank results in significant improvements in both segmentation and tagging accuracies (with error reductions of 30.2% and 14%, respectively), which in turn helps improve Chinese parsing accuracy.", "phrases": ["chinese word segmentation", "pos tagging", "annotation guideline"], "overall_score": 6.025175842726267, "scores": [3.212433744198518, 1.7673374603339849, 0.5681084599403557], "rank_score": 1.8492932214909528} -{"id": "poon-domingos-2008-joint", "title": "Joint Unsupervised Coreference Resolution with Markov Logic", "abstract": "Machine learning approaches to coreference resolution are typically supervised, and require expensive labeled data. Some unsupervised approaches have been proposed (e.g., Haghighi and Klein (2007)), but they are less accurate. In this paper, we present the first unsupervised approach that is competitive with supervised ones. This is made possible by performing joint inference across mentions, in contrast to the pairwise classification typically used in supervised methods, and by using Markov logic as a representation language, which enables us to easily express relations like apposition and predicate nominals. On MUC and ACE datasets, our model outperforms Haghigi and Klein's one using only a fraction of the training data, and often matches or exceeds the accuracy of state-of-the-art supervised models.", "phrases": ["coreference resolution", "markov logic", "joint inference", "mention", "entity-level information"], "overall_score": 6.282762527520985, "scores": [3.5944542840071865, 2.1445212560913327, 1.5972810048885333, 1.343274632695237, 0.5565727917536396], "rank_score": 1.8472207938871859} -{"id": "xu-etal-2013-filling", "title": "Filling Knowledge Base Gaps for Distant Supervision of Relation Extraction", "abstract": "Distant supervision has attracted recent interest for training information extraction systems because it does not require any human annotation but rather employs existing knowledge bases to heuristically label a training corpus. However, previous work has failed to address the problem of false negative training examples mislabeled due to the incompleteness of knowledge bases. To tackle this problem, we propose a simple yet novel framework that combines a passage retrieval model using coarse features into a state-of-the-art relation extractor using multi-instance learning with fine features. We adapt the information retrieval technique of pseudorelevance feedback to expand knowledge bases, assuming entity pairs in top-ranked passages are more likely to express a relation. Our proposed technique significantly improves the quality of distantly supervised relation extraction, boosting recall from 47.7% to 61.2% with a consistently high level of precision of around 93% in the experiments.", "phrases": ["knowledge base", "distant supervision", "relation extraction", "pseudo-relevance feedback"], "overall_score": 5.869193400486794, "scores": [3.12771180407007, 1.9290901466477506, 1.2778062619166897, 1.0525459562981312], "rank_score": 1.8467885422331602} -{"id": "cai-yates-2013-large", "title": "Large-scale Semantic Parsing via Schema Matching and Lexicon Extension", "abstract": "Supervised training procedures for semantic parsers produce high-quality semantic parsers, but they have difficulty scaling to large databases because of the sheer number of logical constants for which they must see labeled training data. We present a technique for developing semantic parsers for large databases based on a reduction to standard supervised training algorithms, schema matching, and pattern learning. Leveraging techniques from each of these areas, we develop a semantic parser for Freebase that is capable of parsing questions with an F1 that improves by 0.42 over a purely-supervised learning algorithm.", "phrases": ["semantic parsing", "freebase", "knowledge basis"], "overall_score": 5.944209958613326, "scores": [2.6427765231169427, 1.7967189253395053, 1.1005225100089586], "rank_score": 1.846672652821802} -{"id": "diab-etal-2007-arabic", "title": "Arabic diacritization in the context of statistical machine translation", "abstract": "Diacritics in Arabic are optional orthographic symbols typically representing short vowels. Most Arabic text is underspecified for diacritics. However, we do observe partial diacritization depending on genre and domain. In this paper, we investigate the impact of Arabic diacritization on statistical machine translation (SMT). We define several diacritization schemes ranging from full to partial diacritization. We explore the impact of the defined schemes on SMT in two different modes which tease apart the effect of diacritization on the alignment and its consequences on decoding. Our results show that none of the partial diacritization schemes significantly varies in performance from the no-diacritization baseline despite the increase in the number of types in the data. However, a full diacritization scheme performs significantly worse than no diacritization. Crucially, our research suggests that the SMT performance is positively correlated with the increase in the number of tokens correctly affected by a diacritization scheme and the high F-score of the automatic assignment of the particular diacritic.", "phrases": ["diacritization", "machine translation", "arabic"], "overall_score": 4.251241945029887, "scores": [2.8272261171622786, 2.1326820956019916, 0.5789645411221105], "rank_score": 1.8462909179621272} -{"id": "geva-etal-2019-discofuse", "title": "DiscoFuse: A Large-Scale Dataset for Discourse-Based Sentence Fusion", "abstract": "Sentence fusion is the task of joining several independent sentences into a single coherent text. Current datasets for sentence fusion are small and insufficient for training modern neural models. In this paper, we propose a method for automatically-generating fusion examples from raw text and present DiscoFuse, a large scale dataset for discourse-based sentence fusion. We author a set of rules for identifying a diverse set of discourse phenomena in raw text, and decomposing the text into two independent sentences. We apply our approach on two document collections: Wikipedia and Sports articles, yielding 60 million fusion examples annotated with discourse information required to reconstruct the fused text. We develop a sequence-to-sequence model on DiscoFuse and thoroughly analyze its strengths and weaknesses with respect to the various discourse phenomena, using both automatic as well as human evaluation. Finally, we conduct transfer learning experiments with WebSplit, a recent dataset for text simplification. We show that pretraining on DiscoFuse substantially improves performance on WebSplit when viewed as a sentence fusion task.", "phrases": ["large-scale dataset", "sentence fusion", "discofuse"], "overall_score": 4.054546078465457, "scores": [2.473860635152079, 2.2101327868097784, 0.8519169082160528], "rank_score": 1.8453034433926367} -{"id": "maruf-haffari-2018-document", "title": "Document Context Neural Machine Translation with Memory Networks", "abstract": "We present a document-level neural machine translation model which takes both source and target document context into account using memory networks. We model the problem as a structured prediction problem with interdependencies among the observed and hidden variables, i.e., the source sentences and their unobserved target translations in the document. The resulting structured prediction problem is tackled with a neural translation model equipped with two memory components, one each for the source and target side, to capture the documental interdependencies. We train the model end-to-end, and propose an iterative decoding algorithm based on block coordinate descent. Experimental results of English translations from French, German, and Estonian documents show that our model is effective in exploiting both source and target document context, and statistically significantly outperforms the previous work in terms of BLEU and METEOR.", "phrases": ["machine translation", "memory network", "document context"], "overall_score": 6.3933470155730445, "scores": [3.37003018834942, 1.5754220704609494, 0.5887377616193227], "rank_score": 1.844730006809897} -{"id": "qiu-etal-2011-opinion", "title": "Opinion Word Expansion and Target Extraction through Double Propagation", "abstract": "Analysis of opinions, known as opinion mining or sentiment analysis, has attracted a great deal of attention recently due to many practical applications and challenging research problems. In this article, we study two important problems, namely, opinion lexicon expansion and opinion target extraction. Opinion targets (targets, for short) are entities and their attributes on which opinions have been expressed. To perform the tasks, we found that there are several syntactic relations that link opinion words and targets. These relations can be identified using a dependency parser and then utilized to expand the initial opinion lexicon and to extract targets. This proposed method is based on bootstrapping. We call it double propagation as it propagates information between opinion words and targets. A key advantage of the proposed method is that it only needs an initial opinion lexicon to start the bootstrapping process. Thus, the method is semi-supervised due to the use of opinion word seeds. In evaluation, we compare the proposed method with several state-of-the-art methods using a standard product review test collection. The results show that our approach outperforms these existing methods significantly.", "phrases": ["double propagation", "syntactic relation", "opinion word", "absa", "unsupervised method"], "overall_score": 7.629897137846835, "scores": [3.0657371557276383, 2.394708760139105, 1.813122906092843, 1.0584191017949574, 0.875891667083793], "rank_score": 1.8415759181676676} -{"id": "kiela-etal-2015-specializing", "title": "Specializing Word Embeddings for Similarity or Relatedness", "abstract": "We demonstrate the advantage of specializing semantic word embeddings for either similarity or relatedness. We compare two variants of retrofitting and a joint-learning approach, and find that all three yield specialized semantic spaces that capture human intuitions regarding similarity and relatedness better than unspecialized spaces. We also show that using specialized spaces in NLP tasks and applications leads to clear improvements, for document classification and synonym selection, which rely on either similarity or relatedness but not both.", "phrases": ["relatedness", "human intuition", "document classification"], "overall_score": 6.258986432625899, "scores": [4.166900904063577, 0.829968143015311, 0.5238218128856845], "rank_score": 1.8402302866548574} -{"id": "mccarthy-etal-2003-detecting", "title": "Detecting a Continuum of Compositionality in Phrasal Verbs", "abstract": "We investigate the use of an automatically acquired thesaurus for measures designed to indicate the compositionality of candidate multiword verbs, specifically English phrasal verbs identified automatically using a robust parser. We examine various measures using the nearest neighbours of the phrasal verb, and in some cases the neighbours of the simplex counterpart and show that some of these correlate significantly with human rankings of compositionality on the test set. We also show that whilst the compositionality judgements correlate with some statistics commonly used for extracting multiwords, the relationship is not as strong as that using the automatically constructed thesaurus.", "phrases": ["compositionality", "phrasal verb", "thesaurus", "various measure", "vpc"], "overall_score": 6.258912087932934, "scores": [4.580559927686815, 1.8485668849995978, 1.0691095519776914, 0.8603859597567668, 0.8424198169120473], "rank_score": 1.8402084282665836} -{"id": "collins-etal-2005-clause", "title": "Clause Restructuring for Statistical Machine Translation", "abstract": "We describe a method for incorporating syntactic information in statistical machine translation systems. The first step of the method is to parse the source language string that is being translated. The second step is to apply a series of transformations to the parse tree, effectively reordering the surface string on the source language side of the translation system. The goal of this step is to recover an underlying word order that is closer to the target language word-order than the original string. The reordering approach is applied as a pre-processing step in both the training and decoding phases of a phrase-based statistical MT system. We describe experiments on translation from German to English, showing an improvement from 25.2% Bleu score for a baseline system to 26.8% Bleu score for the system with reordering, a statistically significant improvement.", "phrases": ["statistical machine translation", "transformation", "pre-processing step", "clause restructuring", "negation"], "overall_score": 8.730922625962902, "scores": [2.907206835012381, 2.497999599639034, 1.5857756842948116, 1.3033441642969614, 0.9059345316302653], "rank_score": 1.8400521629746909} -{"id": "grissom-ii-etal-2014-dont", "title": "Don't Until the Final Verb Wait: Reinforcement Learning for Simultaneous Machine Translation", "abstract": "We introduce a reinforcement learningbased approach to simultaneous machine translation\u2014producing a translation while receiving input words\u2014 between languages with drastically different word orders: from verb-final languages (e.g., German) to verb-medial languages (English). In traditional machine translation, a translator must \u201cwait\u201d for source material to appear before translation begins. We remove this bottleneck by predicting the final verb in advance. We use reinforcement learning to learn when to trust predictions about unseen, future portions of the sentence. We also introduce an evaluation metric to measure expeditiousness and quality. We show that our new translation model outperforms batch and monotone translation strategies.", "phrases": ["final verb", "reinforcement learning", "simultaneous machine translation"], "overall_score": 5.845104727644211, "scores": [2.9948490007426103, 1.3995845362366848, 1.1231930102755068], "rank_score": 1.8392088490849339} -{"id": "socher-etal-2011-semi", "title": "Semi-Supervised Recursive Autoencoders for Predicting Sentiment Distributions", "abstract": "We introduce a novel machine learning framework based on recursive autoencoders for sentence-level prediction of sentiment label distributions. Our method learns vector space representations for multi-word phrases. In sentiment prediction tasks these representations outperform other state-of-the-art approaches on commonly used datasets, such as movie reviews, without using any pre-defined sentiment lexica or polarity shifting rules. We also evaluate the model's ability to predict sentiment distributions on a new dataset based on confessions from the experience project. The dataset consists of personal user stories annotated with multiple labels which, when aggregated, form a multinomial distribution that captures emotional reactions. Our algorithm can more accurately predict distributions over such labels compared to several competitive baselines.", "phrases": ["sentiment distribution", "multi-word phrase", "recursive neural network", "network model", "deep learning"], "overall_score": 8.230804264528416, "scores": [3.0414666135142916, 2.385742121611461, 1.3482728310114858, 1.3136906917569153, 1.102457281693702], "rank_score": 1.8383259079175711} -{"id": "sanches-duran-etal-2015-normalizer", "title": "A Normalizer for UGC in Brazilian Portuguese", "abstract": "User-generated contents (UGC) represent an important source of information for governments, companies, political candidates and consumers. However, most of the Natural Language Processing tools and techniques are developed from and for texts of standard language, and UGC is a type of text especially full of creativity and idiosyncrasies, which represents noise for NLP purposes. This paper presents UGCNormal, a lexicon-based tool for UGC normalization. It encompasses a tokenizer, a sentence segmentation tool, a phonetic-based speller and some lexicons, which were originated from a deep analysis of a corpus of product reviews in Brazilian Portuguese. The normalizer was evaluated in two different data sets and carried out from 31% to 89% of the appropriate corrections, depending on the type of text noise. The use of UGCNormal was also validated in a task of POS tagging, which improved from 91.35% to 93.15% in accuracy and in a task of opinion classification, which improved the average of F1-score measures (F1-score positive and F1-score negative) from 0.736 to 0.758.", "phrases": ["normalizer", "ugc", "brazilian portuguese"], "overall_score": 2.0189153327430986, "scores": [1.8817366835150253, 1.8219323977567017, 1.8094187120316996], "rank_score": 1.837695931101142} -{"id": "burger-etal-2011-discriminating", "title": "Discriminating Gender on Twitter", "abstract": "Accurate prediction of demographic attributes from social media and other informal online content is valuable for marketing, personalization, and legal investigation. This paper describes the construction of a large, multilingual dataset labeled with gender, and investigates statistical models for determining the gender of uncharacterized Twitter users. We explore several different classifier types on this dataset. We show the degree to which classifier accuracy varies based on tweet volumes as well as when various kinds of profile metadata are included in the models. We also perform a large-scale human assessment using Amazon Mechanical Turk. Our methods significantly out-perform both baseline models and almost all humans on the same task.", "phrases": ["gender", "twitter", "multilingual dataset", "age", "social medium"], "overall_score": 6.186871125151395, "scores": [4.279793955304607, 2.2270237721248303, 1.2103866754454489, 0.884689423327703, 0.5848118235594737], "rank_score": 1.8373411299524125} -{"id": "mekala-shang-2020-contextualized", "title": "Contextualized Weak Supervision for Text Classification", "abstract": "Weakly supervised text classification based on a few user-provided seed words has recently attracted much attention from researchers. Existing methods mainly generate pseudo-labels in a context-free manner (e.g., string matching), therefore, the ambiguous, context-dependent nature of human language has been long overlooked. In this paper, we propose a novel framework ConWea, providing contextualized weak supervision for text classification. Specifically, we leverage contextualized representations of word occurrences and seed word information to automatically differentiate multiple interpretations of the same word, and thus create a contextualized corpus. This contextualized corpus is further utilized to train the classifier and expand seed words in an iterative manner. This process not only adds new contextualized, highly label-indicative keywords but also disambiguates initial seed words, making our weak supervision fully contextualized. Extensive experiments and case studies on real-world datasets demonstrate the necessity and significant advantages of using contextualized weak supervision, especially when the class labels are fine-grained.", "phrases": ["weak supervision", "text classification", "conwea"], "overall_score": 4.397941450152923, "scores": [2.972453974696663, 1.9856630071671553, 0.5441351390394558], "rank_score": 1.8340840403010912} -{"id": "bick-2011-framenet", "title": "A FrameNet for Danish", "abstract": "This paper presents work on a comprehensive FrameNet for Danish (cf. www.framenet.dk), with over 12.000 frames, and an almost complete coverage of Danish verb lemmas. We discuss design principles and frame roles as well as the distinctional use of valency, syntactic function and semantic noun classes. By converting frame distinctors into Constraint Grammar rules, we were able to build a robust frame tagger for running Danish text, using DanGram parses as input. The combined context-informed coverage of the parser-frametagger was 94.3%, with an overall F-score for frame senses of 85.12.", "phrases": ["framenet", "danish", "coverage"], "overall_score": 3.280676624784846, "scores": [2.5133152694550778, 2.4521346736482172, 0.5274915166088097], "rank_score": 1.8309804865707013} -{"id": "lai-etal-2017-race", "title": "RACE: Large-scale ReAding Comprehension Dataset From Examinations", "abstract": "We present RACE, a new dataset for benchmark evaluation of methods in the reading comprehension task. Collected from the English exams for middle and high school Chinese students in the age range between 12 to 18, RACE consists of near 28,000 passages and near 100,000 questions generated by human experts (English instructors), and covers a variety of topics which are carefully designed for evaluating the students' ability in understanding and reasoning. In particular, the proportion of questions that requires reasoning is much larger in RACE than that in other benchmark datasets for reading comprehension, and there is a significant gap between the performance of the state-of-the-art models (43%) and the ceiling human performance (95%). We hope this new dataset can serve as a valuable resource for research and evaluation in machine comprehension. The dataset is freely available at and the code is available at .", "phrases": ["comprehension", "english exam", "high school", "race", "multiple-choice question"], "overall_score": 7.081449265362867, "scores": [4.484989500591126, 1.6381830990234725, 1.30223857133441, 0.8725108765013956, 0.8483987524892688], "rank_score": 1.8292641599879347} -{"id": "wang-etal-2017-exploiting-cross", "title": "Exploiting Cross-Sentence Context for Neural Machine Translation", "abstract": "In translation, considering the document as a whole can help to resolve ambiguities and inconsistencies. In this paper, we propose a cross-sentence context-aware approach and investigate the influence of historical contextual information on the performance of neural machine translation (NMT). First, this history is summarized in a hierarchical way. We then integrate the historical representation into NMT in two strategies: 1) a warm-start of encoder and decoder states, and 2) an auxiliary context source for updating decoder states. Experimental results on a large Chinese-English translation task show that our approach significantly improves upon a strong attention-based NMT system by up to +2.1 BLEU points.", "phrases": ["cross-sentence context", "neural machine translation", "contextual information", "rnn", "translation quality"], "overall_score": 6.699906985734472, "scores": [3.685456970484803, 1.8869745198570536, 1.6416395209016308, 1.0488741455206407, 0.8810349818157148], "rank_score": 1.8287960277159687} -{"id": "yates-etal-2007-textrunner", "title": "TextRunner: Open Information Extraction on the Web", "abstract": "Traditional information extraction systems have focused on satisfying precise, narrow, pre-specified requests from small, homogeneous corpora. In contrast, the TextRunner system demonstrates a new kind of information extraction, called Open Information Extraction (OIE), in which the system makes a single, data-driven pass over the entire corpus and extracts a large set of relational tuples, without requiring any human input. (Banko et al., 2007) TextRunner is a fully-implemented, highly scalable example of OIE. TextRunner's extractions are indexed, allowing a fast query mechanism.", "phrases": ["open information extraction", "oie", "textrunner", "relation phrase", "entity pair"], "overall_score": 5.563544886770802, "scores": [4.037968495659827, 2.757205912940095, 1.2157350593448955, 0.5669185939458947, 0.559146620632908], "rank_score": 1.827394936504724} -{"id": "danescu-niculescu-mizil-etal-2013-computational", "title": "A computational approach to politeness with application to social factors", "abstract": "We propose a computational framework for identifying linguistic aspects of politeness. Our starting point is a new corpus of requests annotated for politeness, which we use to evaluate aspects of politeness theory and to uncover new interactions between politeness markers and context. These findings guide our construction of a classifier with domain-independent lexical and syntactic features operationalizing key components of politeness theory, such as indirection, deference, impersonalization and modality. Our classifier achieves close to human performance and is effective across domains. We use our framework to study the relationship between politeness and social power, showing that polite Wikipedia editors are more likely to achieve high status through elections, but, once elevated, they become less polite. We see a similar negative correlation between politeness and power on Stack Exchange, where users at the top of the reputation scale are less polite than those at the bottom. Finally, we apply our classifier to a preliminary analysis of politeness variation by gender and community.", "phrases": ["politeness", "computational framework", "linguistic cue"], "overall_score": 4.687108620362769, "scores": [4.420638035478057, 0.5397514744048625, 0.5217171134636073], "rank_score": 1.8273688744488423} -{"id": "chen-choi-2016-character", "title": "Character Identification on Multiparty Conversation: Identifying Mentions of Characters in TV Shows", "abstract": "This paper introduces a subtask of entity linking, called character identi\ufb01cation, that maps mentions in multiparty conversation to their referent characters. Transcripts of TV shows are collected as the sources of our corpus and automatically annotated with mentions by linguistically-motivated rules. These mentions are manually linked to their referents through crowdsourcing. Our corpus comprises 543 scenes from two TV shows, and shows the inter-annotator agreement of \u03ba = 79.96. For statistical modeling, this task is reformulated as coreference resolution, and experimented with a state-of-the-art system on our corpus. Our best model gives a purity score of 69.21 on average, which is promising given the challenging nature of this task and our corpus.", "phrases": ["multiparty conversation", "mention", "character identification"], "overall_score": 4.821299402761806, "scores": [2.527339657864185, 2.0807692099650668, 0.8725974608938437], "rank_score": 1.8269021095743652} -{"id": "lazaridou-etal-2013-compositional", "title": "Compositional-ly Derived Representations of Morphologically Complex Words in Distributional Semantics", "abstract": "Speakers of a language can construct an unlimited number of new words through morphological derivation. This is a major cause of data sparseness for corpus-based approaches to lexical semantics, such as distributional semantic models of word meaning. We adapt compositional methods originally developed for phrases to the task of deriving the distributional meaning of morphologically complex words from their parts. Semantic representations constructed in this way beat a strong baseline and can be of higher quality than representations directly constructed from corpus data. Our results constitute a novel evaluation of the proposed composition methods, in which the full additive model achieves the best performance, and demonstrate the usefulness of a compositional morphology component in distributional semantics.", "phrases": ["complex word", "distributional semantic model", "compositional method"], "overall_score": 5.805985090349389, "scores": [3.664570852738581, 0.9551745425690464, 0.8609532337464879], "rank_score": 1.8268995430180386} -{"id": "conneau-etal-2017-deep", "title": "Very Deep Convolutional Networks for Text Classification", "abstract": "The dominant approach for many NLP tasks are recurrent neural networks, in particular LSTMs, and convolutional neural networks. However, these architectures are rather shallow in comparison to the deep convolutional networks which have pushed the state-of-the-art in computer vision. We present a new architecture (VDCNN) for text processing which operates directly at the character level and uses only small convolutions and pooling operations. We are able to show that the performance of this model increases with the depth: using up to 29 convolutional layers, we report improvements over the state-of-the-art on several public text classification tasks. To the best of our knowledge, this is the first time that very deep convolutional nets have been applied to text processing.", "phrases": ["text classification", "convolutional neural network", "cnn", "learning model"], "overall_score": 6.442124203257959, "scores": [3.143675371235939, 1.7865785835465648, 1.512058627312233, 0.865078886574421], "rank_score": 1.8268478671672894} -{"id": "bannard-callison-burch-2005-paraphrasing", "title": "Paraphrasing with Bilingual Parallel Corpora", "abstract": "Previous work has used monolingual parallel corpora to extract and generate paraphrases. We show that this task can be done using bilingual parallel corpora, a much more commonly available resource. Using alignment techniques from phrase-based statistical machine translation, we show how paraphrases in one language can be identified using a phrase in another language as a pivot. We define a paraphrase probability that allows paraphrases extracted from a bilingual parallel corpus to be ranked using translation probabilities, and show how it can be refined to take contextual information into account. We evaluate our paraphrase extraction and ranking methods using a set of manual word alignments, and contrast the quality with paraphrases extracted from automatic alignments.", "phrases": ["bilingual parallel corpora", "paraphrase", "pivot language", "statistical machine paclic"], "overall_score": 6.783322796975432, "scores": [3.599993438582007, 2.266562459767355, 0.8744850397522452, 0.5654797650180095], "rank_score": 1.8266301757799042} -{"id": "peters-etal-2019-knowledge", "title": "Knowledge Enhanced Contextual Word Representations", "abstract": "Contextual word representations, typically trained on unstructured, unlabeled text, do not contain any explicit grounding to real world entities and are often unable to remember facts about those entities. We propose a general method to embed multiple knowledge bases (KBs) into large scale models, and thereby enhance their representations with structured, human-curated knowledge. For each KB, we first use an integrated entity linker to retrieve relevant entity embeddings, then update contextual word representations via a form of word-to-entity attention. In contrast to previous approaches, the entity linkers and self-supervised language modeling objective are jointly trained end-to-end in a multitask setting that combines a small amount of entity linking supervision with a large amount of raw text. After integrating WordNet and a subset of Wikipedia into BERT, the knowledge enhanced BERT (KnowBert) demonstrates improved perplexity, ability to recall facts as measured in a probing task and downstream performance on relationship extraction, entity typing, and word sense disambiguation. KnowBert's runtime is comparable to BERT's and it scales to large KBs.", "phrases": ["entity linker", "entity embedding", "language model", "knowbert", "knowledge basis"], "overall_score": 7.590997707585826, "scores": [2.2616662135482146, 2.019850804147961, 1.9167038645563443, 1.7090615422565194, 1.2189631989355951], "rank_score": 1.825249124688927} -{"id": "culotta-sorensen-2004-dependency", "title": "Dependency Tree Kernels for Relation Extraction", "abstract": "We extend previous work on tree kernels to estimate the similarity between the dependency trees of sentences. Using this kernel within a Support Vector Machine, we detect and classify relations between entities in the Automatic Content Extraction (ACE) corpus of news articles. We examine the utility of different features such as Wordnet hypernyms, parts of speech, and entity types, and find that the dependency tree kernel achieves a 20% F1 improvement over a \"bag-of-words\" kernel.", "phrases": ["kernel", "relation extraction", "low recall", "svm", "feature engineering"], "overall_score": 7.778804687887724, "scores": [4.0726316885538285, 2.2029353506279685, 1.1313683812124489, 0.8625591240066656, 0.8548179916918954], "rank_score": 1.8248625072185614} -{"id": "zaidan-eisner-2008-modeling", "title": "Modeling Annotators: A Generative Approach to Learning from Annotator Rationales", "abstract": "A human annotator can provide hints to a machine learner by highlighting contextual \"rationales\" for each of his or her annotations (Zaidan et al., 2007). How can one exploit this side information to better learn the desired parameters \u03b8? We present a generative model of how a given annotator, knowing the true \u03b8, stochastically chooses rationales. Thus, observing the rationales helps us infer the true \u03b8. We collect substring rationales for a sentiment classification task (Pang and Lee, 2004) and use them to obtain significant accuracy improvements for each annotator. Our new generative approach exploits the rationales more effectively than our previous \"masking SVM\" approach. It is also more principled, and could be adapted to help learn other kinds of probabilistic classifiers for quite different tasks.", "phrases": ["annotator", "generative approach", "rationale"], "overall_score": 4.814874677171227, "scores": [3.19217303631789, 0.8667474723376389, 1.4144823876807655], "rank_score": 1.824467632112098} -{"id": "baldwin-bond-2003-learning", "title": "Learning the Countability of English Nouns from Corpus Data", "abstract": "This paper describes a method for learning the countability preferences of English nouns from raw text corpora. The method maps the corpus-attested lexico-syntactic properties of each noun onto a feature vector, and uses a suite of memory-based classifiers to predict membership in 4 countability classes. We were able to assign countability to English nouns with a precision of 94.6%.", "phrases": ["countability", "english noun", "acquisition"], "overall_score": 5.057779696581885, "scores": [3.3185287042544105, 1.3242461547025473, 0.8298504056687199], "rank_score": 1.8242084215418928} -{"id": "buchholz-marsi-2006-conll", "title": "CoNLL-X Shared Task on Multilingual Dependency Parsing", "abstract": "Each year the Conference on Computational Natural Language Learning (CoNLL) features a shared task, in which participants train and test their systems on exactly the same data sets, in order to better compare systems. The tenth CoNLL (CoNLL-X) saw a shared task on Multilingual Dependency Parsing. In this paper, we describe how treebanks for 13 languages were converted into the same dependency format and how parsing performance was measured. We also give an overview of the parsing approaches that participants took and the results that they achieved. Finally, we try to draw general conclusions about multi-lingual parsing: What makes a particular language, treebank or annotation scheme easier or harder to parse and which phenomena are challenging for any dependency parser?", "phrases": ["multilingual dependency parsing", "data set", "conll-x shared task", "unlabeled attachment score", "advance"], "overall_score": 7.992859991018693, "scores": [4.715268646357072, 2.0954584494061637, 1.248194405355626, 0.5363239166745684, 0.5248041133677783], "rank_score": 1.824009906232242} -{"id": "rahman-ng-2011-coreference", "title": "Coreference Resolution with World Knowledge", "abstract": "While world knowledge has been shown to improve learning-based coreference resolvers, the improvements were typically obtained by incorporating world knowledge into a fairly weak baseline resolver. Hence, it is not clear whether these benefits can carry over to a stronger baseline. Moreover, since there has been no attempt to apply different sources of world knowledge in combination to coreference resolution, it is not clear whether they offer complementary benefits to a resolver. We systematically compare commonly-used and under-investigated sources of world knowledge for coreference resolution by applying them to two learning-based coreference models and evaluating them on documents annotated with two different annotation schemes.", "phrases": ["world knowledge", "different source", "coreference resolution"], "overall_score": 4.8126093290432825, "scores": [2.5450342874615868, 2.3792613308174557, 0.5465320992964224], "rank_score": 1.823609239191822} -{"id": "butnaru-ionescu-2019-moroco", "title": "MOROCO: The Moldavian and Romanian Dialectal Corpus", "abstract": "In this work, we introduce the MOldavian and ROmanian Dialectal COrpus (MOROCO), which is freely available for download at . The corpus contains 33564 samples of text (with over 10 million tokens) collected from the news domain. The samples belong to one of the following six topics: culture, finance, politics, science, sports and tech. The data set is divided into 21719 samples for training, 5921 samples for validation and another 5924 samples for testing. For each sample, we provide corresponding dialectal and category labels. This allows us to perform empirical studies on several classification tasks such as (i) binary discrimination of Moldavian versus Romanian text samples, (ii) intra-dialect multi-class categorization by topic and (iii) cross-dialect multi-class categorization by topic. We perform experiments using a shallow approach based on string kernels, as well as a novel deep approach based on character-level convolutional neural networks containing Squeeze-and-Excitation blocks. We also present and analyze the most discriminative features of our best performing model, before and after named entity removal.", "phrases": ["romanian dialectal corpus", "sample", "news domain", "empirical study", "moroco"], "overall_score": 4.676859897190395, "scores": [3.5707774416558102, 3.3785959840657944, 1.0893204174486062, 0.5505719009215748, 0.5276002158251787], "rank_score": 1.8233731919833929} -{"id": "clinchant-etal-2019-use", "title": "On the use of BERT for Neural Machine Translation", "abstract": "Exploiting large pretrained models for various NMT tasks have gained a lot of visibility recently. In this work we study how BERT pretrained models could be exploited for supervised Neural Machine Translation. We compare various ways to integrate pretrained BERT model with NMT model and study the impact of the monolingual data used for BERT training on the final translation quality. We use WMT-14 English-German, IWSLT15 English-German and IWSLT14 English-Russian datasets for these experiments. In addition to standard task test set evaluation, we perform evaluation on out-of-domain test sets and noise injected test sets, in order to assess how BERT pretrained representations affect model robustness.", "phrases": ["bert", "neural machine translation", "translation quality"], "overall_score": 4.81147155342646, "scores": [3.697502332485418, 0.9327741279252733, 0.8392578684944483], "rank_score": 1.8231781096350463} -{"id": "coster-kauchak-2011-simple", "title": "Simple English Wikipedia: A New Text Simplification Task", "abstract": "In this paper we examine the task of sentence simplification which aims to reduce the reading complexity of a sentence by incorporating more accessible vocabulary and sentence structure. We introduce a new data set that pairs English Wikipedia with Simple English Wikipedia and is orders of magnitude larger than any previously examined for sentence simplification. The data contains the full range of simplification operations including rewording, reordering, insertion and deletion. We provide an analysis of this corpus as well as preliminary results using a phrase-based translation approach for simplification.", "phrases": ["english wikipedia", "text simplification", "deletion", "sentence pair"], "overall_score": 5.995766015555846, "scores": [3.7361142757552344, 1.5960223330497465, 1.0535575902821719, 0.8910810289595501], "rank_score": 1.8191938070116758} -{"id": "bhattacharyya-etal-2016-statistical", "title": "Statistical Machine Translation between Related Languages", "abstract": "Language\u00adindependent Statistical Machine Translation (SMT) has proven to be very challenging. The diversity of languages makes high accuracy difficult and requires substantial parallel corpus as well as linguistic resources (parsers, morph analyzers, etc.). An interesting observation is that a large chunk of machine translation (MT) requirements involve related languages. They are either : (i) between related languages, or (ii) between a lingua franca (like English) and a set of related languages. For instance, India, the European Union and South\u00adEast Asia have such translation requirements due to government, business and socio\u00adcultural communication needs. Related languages share a lot of linguistic features and the divergences among them are at a lower level of the NLP pipeline. The objective of the tutorial is to discuss how the relatedness among languages can be leveraged to bridge this language divergence thereby achieving some/all of these goals: (i) improving translation quality, (ii) achieving better generalization, (iii) sharing linguistic resources, and (iv) reducing resource requirements. We will look at the existing research in SMT from the perspective of related languages, with the goal to build a toolbox of methods that are useful for translation between related languages. This tutorial would be relevant to Machine Translation researchers and developers, especially those interested in translation between low\u00adresource languages which have resource\u00adrich related languages. It will also be relevant for researchers interested in multilingual computation. We start with a motivation for looking at the SMT problem from the perspective of related languages. We introduce notions of language relatedness useful for MT. We explore how lexical, morphological and syntactic similarity among related languages can help MT. Lexical similarity will receive special attention since related languages share a significant vocabulary in terms of cognates, loanwords, etc. Then, we look beyond bilingual MT and present how pivot\u00adbased and multi\u00adsource methods incorporate knowledge from multiple languages, and handle language pairs lacking parallel corpora. We present some studies concerning the implications of languages relatedness to pivot\u00adbased SMT, and ways of handling language divergence in the pivot\u00adbased SMT scenario. Recent advances in deep learning have made it possible to train multi\u00adlanguage neural MT systems, which we think would be relevant to training between related languages.", "phrases": ["related language", "structural similarity", "long period"], "overall_score": 3.538760272207455, "scores": [3.1593980476957375, 1.1604060692522735, 1.1358849968460067], "rank_score": 1.818563037931339} -{"id": "koller-stone-2007-sentence", "title": "Sentence generation as a planning problem", "abstract": "We translate sentence generation from TAG grammars with semantic and pragmatic information into a planning problem by encoding the contribution of each word declaratively and explicitly. This allows us to exploit the performance of off-the-shelf planners. It also opens up new perspectives on referring expression generation and the relationship between language and action.", "phrases": ["planning problem", "action", "sentence generation"], "overall_score": 3.9942339238637823, "scores": [2.598350034304226, 2.315901627649073, 0.5393109347833716], "rank_score": 1.8178541989122234} -{"id": "lin-hovy-2003-automatic", "title": "Automatic Evaluation of Summaries Using N-gram Co-occurrence Statistics", "abstract": "Following the recent adoption by the machine translation community of automatic evaluation using the BLEU/NIST scoring process, we conduct an in-depth study of a similar idea for evaluating summaries. The results show that automatic evaluation using unigram co-occurrences between summary pairs correlates surprising well with human evaluations, based on various statistical metrics; while direct application of the BLEU evaluation procedure does not always give good results.", "phrases": ["n-gram co-occurrence statistic", "automatic evaluation", "rouge", "summarization", "document understanding conference"], "overall_score": 6.998351163904223, "scores": [4.136721339088924, 2.3710160735299755, 1.422437007413411, 0.6034558255637242, 0.5547890669465265], "rank_score": 1.817683862508512} -{"id": "farkas-etal-2010-conll", "title": "The CoNLL-2010 Shared Task: Learning to Detect Hedges and their Scope in Natural Language Text", "abstract": "The CoNLL-2010 Shared Task was dedicated to the detection of uncertainty cues and their linguistic scope in natural language texts. The motivation behind this task was that distinguishing factual and uncertain information in texts is of essential importance in information extraction. This paper provides a general overview of the shared task, including the annotation protocols of the training and evaluation datasets, the exact task definitions, the evaluation metrics employed and the overall results. The paper concludes with an analysis of the prominent approaches and an overview of the systems submitted to the shared task.", "phrases": ["scope", "natural language text", "conll\u20192010"], "overall_score": 5.8490800996272245, "scores": [2.546895750719697, 2.3482476389309963, 0.5562133117530214], "rank_score": 1.8171189004679047} -{"id": "kummerfeld-2019-slate", "title": "SLATE: A Super-Lightweight Annotation Tool for Experts", "abstract": "Many annotation tools have been developed, covering a wide variety of tasks and providing features like user management, pre-processing, and automatic labeling. However, all of these tools use Graphical User Interfaces, and often require substantial effort to install and configure. This paper presents a new annotation tool that is designed to fill the niche of a lightweight interface for users with a terminal-based workflow. SLATE supports annotation at different scales (spans of characters, tokens, and lines, or a document) and of different types (free text, labels, and links), with easily customisable keybindings, and unicode support. In a user study comparing with other tools it was consistently the easiest to install and use. SLATE fills a need not met by existing systems, and has already been used to annotate two corpora, one of which involved over 250 hours of annotation effort.", "phrases": ["annotation tool", "workflow", "support", "slate"], "overall_score": 3.535397312296442, "scores": [3.3385057774620863, 2.522242706429367, 0.8551463204214881, 0.5514444693175129], "rank_score": 1.8168348184076135} -{"id": "lukin-walker-2013-really", "title": "Really? Well. Apparently Bootstrapping Improves the Performance of Sarcasm and Nastiness Classifiers for Online Dialogue", "abstract": "More and more of the information on the web is dialogic, from Facebook newsfeeds, to forum conversations, to comment threads on news articles. In contrast to traditional, monologic Natural Language Processing resources such as news, highly social dialogue is frequent in social media, making it a challenging context for NLP. This paper tests a bootstrapping method, originally proposed in a monologic domain, to train classifiers to identify two different types of subjective language in dialogue: sarcasm and nastiness. We explore two methods of developing linguistic indicators to be used in a first level classifier aimed at maximizing precision at the expense of recall. The best performing classifier for the first phase achieves 54% precision and 38% recall for sarcastic utterances. We then use general syntactic patterns from previous work to create more general sarcasm indicators, improving precision to 62% and recall to 52%. To further test the generality of the method, we then apply it to bootstrapping a classifier for nastiness dialogic acts. Our first phase, using crowdsourced nasty indicators, achieves 58% precision and 49% recall, which increases to 75% precision and 62% recall when we bootstrap over the first level with generalized syntactic patterns.", "phrases": ["sarcasm", "nastiness classifier", "online dialogue", "syntactic pattern"], "overall_score": 5.146462005932238, "scores": [3.84361379893115, 2.0265740601807645, 0.8375511353456442, 0.558162130466866], "rank_score": 1.8164752812311062} -{"id": "kocmi-bojar-2018-trivial", "title": "Trivial Transfer Learning for Low-Resource Neural Machine Translation", "abstract": "Transfer learning has been proven as an effective technique for neural machine translation under low-resource conditions. Existing methods require a common target language, language relatedness, or specific training tricks and regimes. We present a simple transfer learning method, where we first train a \u201cparent\u201d model for a high-resource language pair and then continue the training on a low-resource pair only by replacing the training corpus. This \u201cchild\u201d model performs significantly better than the baseline trained for low-resource pair only. We are the first to show this for targeting different languages, and we observe the improvements even for unrelated languages with different alphabets.", "phrases": ["transfer learning", "neural machine translation", "parent", "high-resource language pair", "vocabulary"], "overall_score": 6.114759924965247, "scores": [3.7424006083916863, 1.8631133515891298, 1.4831214460321036, 1.3698614334228003, 0.6211329788178923], "rank_score": 1.8159259636507223} -{"id": "carpuat-wu-2007-improving", "title": "Improving Statistical Machine Translation Using Word Sense Disambiguation", "abstract": "We show for the first time that incorporating the predictions of a word sense disambiguation system within a typical phrase-based statistical machine translation (SMT) model consistently improves translation quality across all three different IWSLT ChineseEnglish test sets, as well as producing statistically significant improvements on the larger NIST Chinese-English MT task\u2014 and moreover never hurts performance on any test set, according not only to BLEU but to all eight most commonly used automatic evaluation metrics. Recent work has challenged the assumption that word sense disambiguation (WSD) systems are useful for SMT. Yet SMT translation quality still obviously suffers from inaccurate lexical choice. In this paper, we address this problem by investigating a new strategy for integrating WSD into an SMT system, that performs fully phrasal multi-word disambiguation. Instead of directly incorporating a Senseval-style WSD system, we redefine the WSD task to match the exact same phrasal translation disambiguation task faced by phrase-based SMT systems. Our results provide the first known empirical evidence that lexical semantics are indeed useful for SMT, despite claims to the contrary.", "phrases": ["word sense disambiguation", "translation quality", "phrase-based smt system", "phrase pair", "context-dependent probability distribution"], "overall_score": 8.062725971092071, "scores": [5.244583270428613, 1.6518750952302323, 1.114954097189277, 0.5363994401133835, 0.5264159686423583], "rank_score": 1.8148455743207728} -{"id": "voita-etal-2019-good", "title": "When a Good Translation is Wrong in Context: Context-Aware Machine Translation Improves on Deixis, Ellipsis, and Lexical Cohesion", "abstract": "Though machine translation errors caused by the lack of context beyond one sentence have long been acknowledged, the development of context-aware NMT systems is hampered by several problems. Firstly, standard metrics are not sensitive to improvements in consistency in document-level translations. Secondly, previous work on context-aware NMT assumed that the sentence-aligned parallel data consisted of complete documents while in most practical scenarios such document-level data constitutes only a fraction of the available parallel data. To address the first issue, we perform a human study on an English-Russian subtitles dataset and identify deixis, ellipsis and lexical cohesion as three main sources of inconsistency. We then create test sets targeting these phenomena. To address the second shortcoming, we consider a set-up in which a much larger amount of sentence-level data is available compared to that aligned at the document level. We introduce a model that is suitable for this scenario and demonstrate major gains over a context-agnostic baseline on our new benchmarks without sacrificing performance as measured with BLEU.", "phrases": ["machine translation", "deixis", "lexical cohesion", "context-aware nmt", "document level"], "overall_score": 6.342173857997439, "scores": [3.0543999664163737, 2.2111992042073143, 2.0909333845176583, 0.8669971278597597, 0.8457683901560004], "rank_score": 1.813859614631421} -{"id": "lowe-etal-2015-ubuntu", "title": "The Ubuntu Dialogue Corpus: A Large Dataset for Research in Unstructured Multi-Turn Dialogue Systems", "abstract": "This paper introduces the Ubuntu Dialogue Corpus, a dataset containing almost 1 million multi-turn dialogues, with a total of over 7 million utterances and 100 million words. This provides a unique resource for research into building dialogue managers based on neural language models that can make use of large amounts of unlabeled data. The dataset has both the multi-turn property of conversations in the Dialog State Tracking Challenge datasets, and the unstructured nature of interactions from microblog services such as Twitter. We also describe two neural learning architectures suitable for analyzing this dataset, and provide benchmark performance on the task of selecting the best next response.", "phrases": ["ubuntu dialogue corpus", "large dataset", "dialog", "support", "multi-turn response selection"], "overall_score": 7.946504770307226, "scores": [4.572601856961549, 1.3221188299597604, 1.2324636161855533, 1.0889602350965328, 0.8510125477765487], "rank_score": 1.8134314171959889} -{"id": "gehrmann-etal-2018-bottom", "title": "Bottom-Up Abstractive Summarization", "abstract": "Neural summarization produces outputs that are fluent and readable, but which can be poor at content selection, for instance often copying full sentences from the source document. This work explores the use of data-efficient content selectors to over-determine phrases in a source document that should be part of the summary. We use this selector as a bottom-up attention step to constrain the model to likely phrases. We show that this approach improves the ability to compress text, while still generating fluent summaries. This two-step process is both simpler and higher performing than other end-to-end content selection models, leading to significant improvements on ROUGE for both the CNN-DM and NYT corpus. Furthermore, the content selector can be trained with as little as 1,000 sentences making it easy to transfer a trained summarizer to a new domain.", "phrases": ["summarization", "source document", "content selector", "over-determine phrase", "copy mechanism"], "overall_score": 8.178890833634865, "scores": [2.6915957108755975, 1.8973357687376236, 1.8679276497931758, 1.5655145419037568, 1.0434050586844337], "rank_score": 1.8131557459989172} -{"id": "wu-dredze-2019-beto", "title": "Beto, Bentz, Becas: The Surprising Cross-Lingual Effectiveness of BERT", "abstract": "Pretrained contextual representation models (Peters et al., 2018; Devlin et al., 2018) have pushed forward the state-of-the-art on many NLP tasks. A new release of BERT (Devlin, 2018) includes a model simultaneously pretrained on 104 languages with impressive performance for zero-shot cross-lingual transfer on a natural language inference task. This paper explores the broader cross-lingual potential of mBERT (multilingual) as a zero shot language transfer model on 5 NLP tasks covering a total of 39 languages from various language families: NLI, document classification, NER, POS tagging, and dependency parsing. We compare mBERT with the best-published methods for zero-shot cross-lingual transfer and find mBERT competitive on each task. Additionally, we investigate the most effective strategy for utilizing mBERT in this manner, determine to what extent mBERT generalizes away from language specific features, and measure factors that influence cross-lingual transfer.", "phrases": ["bert", "cross-lingual transfer", "language model", "various nlp task", "wikipedia data"], "overall_score": 8.271546988412362, "scores": [4.241211073004393, 2.5075247535412, 1.2218322726296502, 0.5583204900625871, 0.5321499659214988], "rank_score": 1.8122077110318657} -{"id": "bouamor-etal-2014-multidialectal", "title": "A Multidialectal Parallel Corpus of Arabic", "abstract": "The daily spoken variety of Arabic is often termed the colloquial or dialect form of Arabic. There are many Arabic dialects across the Arab World and within other Arabic speaking communities. These dialects vary widely from region to region and to a lesser extent from city to city in each region. The dialects are not standardized, they are not taught, and they do not have official status. However they are the primary vehicles of communication (face-to-face and recently, online) and have a large presence in the arts as well. In this paper, we present the first multidialectal Arabic parallel corpus, a collection of 2,000 sentences in Standard Arabic, Egyptian, Tunisian, Jordanian, Palestinian and Syrian Arabic, in addition to English. Such parallel data does not exist naturally, which makes this corpus a very valuable resource that has many potential applications such as Arabic dialect identification and machine translation.", "phrases": ["multidialectal parallel corpus", "arabic", "multi-dialectal data set", "topic bias"], "overall_score": 5.971558604349291, "scores": [3.620609521675429, 2.5633047988922018, 0.542098154560605, 0.5213833725780775], "rank_score": 1.8118489619265785} -{"id": "dinu-etal-2019-training", "title": "Training Neural Machine Translation to Apply Terminology Constraints", "abstract": "This paper proposes a novel method to inject custom terminology into neural machine translation at run time. Previous works have mainly proposed modifications to the decoding algorithm in order to constrain the output to include run-time-provided target terms. While being effective, these constrained decoding methods add, however, significant computational overhead to the inference step, and, as we show in this paper, can be brittle when tested in realistic conditions. In this paper we approach the problem by training a neural MT system to learn how to use custom terminology when provided with the input. Comparative experiments show that our method is not only more effective than a state-of-the-art implementation of constrained decoding, but is also as fast as constraint-free decoding.", "phrases": ["neural machine translation", "terminology constraint", "decoding"], "overall_score": 6.219900772244042, "scores": [2.2270420543605707, 2.08877512271722, 1.1180127138317053], "rank_score": 1.8112766303031653} -{"id": "poria-etal-2017-context", "title": "Context-Dependent Sentiment Analysis in User-Generated Videos", "abstract": "Multimodal sentiment analysis is a developing area of research, which involves the identification of sentiments in videos. Current research considers utterances as independent entities, i.e., ignores the interdependencies and relations among the utterances of a video. In this paper, we propose a LSTM-based model that enables utterances to capture contextual information from their surroundings in the same video, thus aiding the classification process. Our method shows 5-10% performance improvement over the state of the art and high robustness to generalizability.", "phrases": ["sentiment analysis", "video", "emotion recognition"], "overall_score": 6.58794105620685, "scores": [3.1697217045439534, 1.212276687814055, 1.0512263618584408], "rank_score": 1.8110749180721497} -{"id": "wilson-etal-2005-opinionfinder", "title": "OpinionFinder: A System for Subjectivity Analysis", "abstract": "OpinionFinder is a system that performs subjectivity analysis, automatically identifying when opinions, sentiments, speculations, and other private states are present in text. Specifically, OpinionFinder aims to identify subjective sentences and to mark various aspects of the subjectivity in these sentences, including the source (holder) of the subjectivity and words that are included in phrases expressing positive or negative sentiments.", "phrases": ["subjectivity analysis", "speculation", "opinionfinder"], "overall_score": 4.895444469143491, "scores": [2.891685924055957, 1.9955720651402071, 0.5359551408458684], "rank_score": 1.8077377100140108} -{"id": "banerjee-lavie-2005-meteor", "title": "METEOR: An Automatic Metric for MT Evaluation with Improved Correlation with Human Judgments", "abstract": "We describe METEOR, an automatic metric for machine translation evaluation that is based on a generalized concept of unigram matching between the machineproduced translation and human-produced reference translations. Unigrams can be matched based on their surface forms, stemmed forms, and meanings; furthermore, METEOR can be easily extended to include more advanced matching strategies. Once all generalized unigram matches between the two strings have been found, METEOR computes a score for this matching using a combination of unigram-precision, unigram-recall, and a measure of fragmentation that is designed to directly capture how well-ordered the matched words in the machine translation are in relation to the reference. We evaluate METEOR by measuring the correlation between the metric scores and human judgments of translation quality. We compute the Pearson R correlation value between its scores and human quality assessments of the LDC TIDES 2003 Arabic-to-English and Chinese-to-English datasets. We perform segment-bysegment correlation, and show that METEOR gets an R correlation value of 0.347 on the Arabic data and 0.331 on the Chinese data. This is shown to be an improvement on using simply unigramprecision, unigram-recall and their harmonic F1 combination. We also perform experiments to show the relative contributions of the various mapping modules.", "phrases": ["automatic metric", "matching", "meteor", "synonyms", "human judgement"], "overall_score": 7.3984275743889425, "scores": [5.655894971041653, 0.9057903840861998, 1.0861570264949991, 0.8306944737430018, 0.5563983607040349], "rank_score": 1.806987043213978} -{"id": "higgins-etal-2004-evaluating", "title": "Evaluating Multiple Aspects of Coherence in Student Essays", "abstract": "Criterion Online Essay Evaluation Service includes a capability that labels sentences in student writing with essay-based discourse elements (e.g., thesis statements). We describe a new system that enhances Criterion\u2019s capability, by evaluating multiple aspects of coherence in essays. This system identifies features of sentences based on semantic similarity measures and discourse structure. A support vector machine uses these features to capture breakdowns in coherence due to relatedness to the essay question and relatedness between discourse elements. Intra-sentential quality is evaluated with rule-based heuristics. Results indicate that the system yields higher performance than a baseline on all three aspects.", "phrases": ["coherence", "student essay", "discourse element"], "overall_score": 5.406952288085052, "scores": [2.760474244379006, 1.766989424530605, 0.8871913746839608], "rank_score": 1.8048850145311908} -{"id": "gao-etal-2013-modeling", "title": "Modeling User Leniency and Product Popularity for Sentiment Classification", "abstract": "Classical approaches to sentiment classification exploit only textual features in a given review and are not aware of the personality of the user or the public sentiment toward the target product. In this paper, we propose a model that can accurately estimate the sentiment polarity by referring to the user leniency and product popularity computed during testing. For decoding with this model, we adopt an approximate strategy called \u201ctwo-stage decoding.\u201d Preliminary experimental results on two realworld datasets show that our method significantly improves classification accuracy over existing state-of-the-art methods.", "phrases": ["user leniency", "product popularity", "sentiment classification", "user-specific feature"], "overall_score": 3.9641143276653033, "scores": [3.0212296146091084, 1.9331784745104128, 1.4357038069056518, 0.8264728237236879], "rank_score": 1.8041461799372154} -{"id": "stab-gurevych-2014-identifying", "title": "Identifying Argumentative Discourse Structures in Persuasive Essays", "abstract": "In this paper, we present a novel approach for identifying argumentative discourse structures in persuasive essays. The structure of argumentation consists of several components (i.e. claims and premises) that are connected with argumentative relations. We consider this task in two consecutive steps. First, we identify the components of arguments using multiclass classification. Second, we classify a pair of argument components as either support or non-support for identifying the structure of argumentative discourse. For both tasks, we evaluate several classifiers and propose novel feature sets including structural, lexical, syntactic and contextual features. In our experiments, we obtain a macro F1-score of 0.726 for identifying argument components and 0.722 for argumentative relations.", "phrases": ["discourse structure", "persuasive essay", "gold argument component"], "overall_score": 7.227431847183645, "scores": [4.245047529280557, 0.6094617568003489, 0.5561452662914632], "rank_score": 1.8035515174574563} -{"id": "zhou-jurgens-2020-condolence", "title": "Condolence and Empathy in Online Communities", "abstract": "Offering condolence is a natural reaction to hearing someone's distress. Individuals frequently express distress in social media, where some communities can provide support. However, not all condolence is equal\u2014trite responses offer little actual support despite their good intentions. Here, we develop computational tools to create a massive dataset of 11.4M expressions of distress and 2.8M corresponding offerings of condolence in order to examine the dynamics of condolence online. Our study reveals widespread disparity in what types of distress receive supportive condolence rather than just engagement. Building on studies from social psychology, we analyze the language of condolence and develop a new dataset for quantifying the empathy in a condolence using appraisal theory. Finally, we demonstrate that the features of condolence individuals find most helpful online differ substantially in their features from those seen in interpersonal settings.", "phrases": ["empathy", "condolence", "online support group"], "overall_score": 3.5088844340647793, "scores": [2.9976596891018197, 1.8478191601614673, 0.5641508334354458], "rank_score": 1.803209894232911} -{"id": "huang-etal-2019-glossbert", "title": "GlossBERT: BERT for Word Sense Disambiguation with Gloss Knowledge", "abstract": "Word Sense Disambiguation (WSD) aims to find the exact sense of an ambiguous word in a particular context. Traditional supervised methods rarely take into consideration the lexical resources like WordNet, which are widely utilized in knowledge-based methods. Recent studies have shown the effectiveness of incorporating gloss (sense definition) into neural networks for WSD. However, compared with traditional word expert supervised methods, they have not achieved much improvement. In this paper, we focus on how to better leverage gloss knowledge in a supervised neural WSD system. We construct context-gloss pairs and propose three BERT based models for WSD. We fine-tune the pre-trained BERT model and achieve new state-of-the-art results on WSD task.", "phrases": ["bert", "word sense disambiguation", "gloss knowledge", "wsd system", "classification task"], "overall_score": 6.355278529767301, "scores": [3.520997717959516, 2.9497813327903013, 0.9468747836258823, 1.0545926910677699, 0.5388549847446608], "rank_score": 1.8022203020376264} -{"id": "axelrod-etal-2011-domain", "title": "Domain Adaptation via Pseudo In-Domain Data Selection", "abstract": "We explore efficient domain adaptation for the task of statistical machine translation based on extracting sentences from a large general-domain parallel corpus that are most relevant to the target domain. These sentences may be selected with simple cross-entropy based methods, of which we present three. As these sentences are not themselves identical to the in-domain data, we call them pseudo in-domain subcorpora. These subcorpora -- 1% the size of the original -- can then used to train small domain-adapted Statistical Machine Translation (SMT) systems which outperform systems trained on the entire corpus. Performance is further improved when we use these domain-adapted models in combination with a true in-domain model. The results show that more training data is not always better, and that best results are attained via proper domain-relevant data selection, as well as combining in- and general-domain systems during decoding.", "phrases": ["domain adaptation", "cross-entropy difference", "rich literature", "side"], "overall_score": 7.754543248191576, "scores": [4.271306371003603, 1.2604258608955081, 0.8443618283068478, 0.8306215263547357], "rank_score": 1.8016788966401738} -{"id": "bunescu-pasca-2006-using", "title": "Using Encyclopedic Knowledge for Named entity Disambiguation", "abstract": "We present a new method for detecting and disambiguating named entities in open domain text. A disambiguation SVM kernel is trained to exploit the high coverage and rich structure of the knowledge encoded in an online encyclopedia. The resulting model significantly outperforms a less informed baseline.", "phrases": ["entity disambiguation", "wikipedia", "knowledge base", "textual context"], "overall_score": 7.492917931539491, "scores": [2.6277695352796386, 2.5414396212593724, 1.2087283339666095, 0.8287262039077212], "rank_score": 1.8016659236033357} -{"id": "he-etal-2011-automatically", "title": "Automatically Extracting Polarity-Bearing Topics for Cross-Domain Sentiment Classification", "abstract": "Joint sentiment-topic (JST) model was previously proposed to detect sentiment and topic simultaneously from text. The only supervision required by JST model learning is domain-independent polarity word priors. In this paper, we modify the JST model by incorporating word polarity priors through modifying the topic-word Dirichlet priors. We study the polarity-bearing topics extracted by JST and show that by augmenting the original feature space with polarity-bearing topics, the in-domain supervised classifiers learned from augmented feature representation achieve the state-of-the-art performance of 95% on the movie review data and an average of 90% on the multi-domain sentiment dataset. Furthermore, using feature augmentation and selection according to the information gain criteria for cross-domain sentiment classification, our proposed approach performs either better or comparably compared to previous approaches. Nevertheless, our approach is much simpler and does not require difficult parameter tuning.", "phrases": ["polarity-bearing topic", "cross-domain sentiment classification", "topic-word dirichlet prior"], "overall_score": 4.995276704298678, "scores": [2.6200750042530925, 1.8878320467909337, 0.8970886458258744], "rank_score": 1.801665232289967} -{"id": "pradhan-etal-2005-semantic-role", "title": "Semantic Role Labeling Using Different Syntactic Views", "abstract": "Semantic role labeling is the process of annotating the predicate-argument structure in text with semantic labels. In this paper we present a state-of-the-art baseline semantic role labeling system based on Support Vector Machine classifiers. We show improvements on this system by: i) adding new features including features extracted from dependency parses, ii) performing feature selection and calibration and iii) combining parses obtained from semantic parsers trained using different syntactic views. Error analysis of the baseline system showed that approximately half of the argument identification errors resulted from parse errors in which there was no syntactic constituent that aligned with the correct argument. In order to address this problem, we combined semantic parses from a Minipar syntactic parse and from a chunked syntactic representation with our original baseline system which was based on Charniak parses. All of the reported techniques resulted in performance improvements.", "phrases": ["view", "semantic role labeling", "rule-based dependency parser"], "overall_score": 5.300088246174278, "scores": [2.6618550002794326, 2.14234843473366, 0.5958964994824559], "rank_score": 1.800033311498516} -{"id": "hovy-etal-2013-learning", "title": "Learning Whom to Trust with MACE", "abstract": "Non-expert annotation services like Amazon\u2019s Mechanical Turk (AMT) are cheap and fast ways to evaluate systems and provide categorical annotations for training data. Unfortunately, some annotators choose bad labels in order to maximize their pay. Manual identification is tedious, so we experiment with an item-response model. It learns in an unsupervised fashion to a) identify which annotators are trustworthy and b) predict the correct underlying labels. We match performance of more complex state-of-the-art systems and perform well even under adversarial conditions. We show considerable improvements over standard baselines, both for predicted label accuracy and trustworthiness estimates. The latter can be further improved by introducing a prior on model parameters and using Variational Bayes inference. Additionally, we can achieve even higher accuracy by focusing on the instances our model is most confident in (trading in some recall), and by incorporating annotated control instances. Our system, MACE (Multi-Annotator Competence Estimation), is available for download 1 .", "phrases": ["mace", "annotator", "competence estimation", "spammer", "majority voting"], "overall_score": 6.342713802784936, "scores": [3.945426358577873, 2.2680153861600565, 1.0526129688899024, 0.8815693181606443, 0.8456620467461762], "rank_score": 1.7986572157069305} -{"id": "chung-gildea-2010-effects", "title": "Effects of Empty Categories on Machine Translation", "abstract": "We examine effects that empty categories have on machine translation. Empty categories are elements in parse trees that lack corresponding overt surface forms (words) such as dropped pronouns and markers for control constructions. We start by training machine translation systems with manually inserted empty elements. We find that inclusion of some empty categories in training data improves the translation result. We expand the experiment by automatically inserting these elements into a larger data set using various methods and training on the modified corpus. We show that even when automatic prediction of null elements is not highly accurate, it nevertheless improves the end translation result.", "phrases": ["empty category", "machine translation", "pronoun", "pro"], "overall_score": 5.193789242276637, "scores": [3.6578597639645225, 1.67036286789126, 1.3063773741783418, 0.5531110253760048], "rank_score": 1.7969277578525322} -{"id": "hamilton-etal-2016-cultural", "title": "Cultural Shift or Linguistic Drift? Comparing Two Computational Measures of Semantic Change", "abstract": "Words shift in meaning for many reasons, including cultural factors like new technologies and regular linguistic processes like subjectification. Understanding the evolution of language and culture requires disentangling these underlying causes. Here we show how two different distributional measures can be used to detect two different types of semantic change. The first measure, which has been used in many previous works, analyzes global shifts in a word's distributional semantics; it is sensitive to changes due to regular processes of linguistic drift, such as the semantic generalization of promise (\"I promise.\" \"It promised to be exciting.\"). The second measure, which we develop here, focuses on local changes to a word's nearest semantic neighbors; it is more sensitive to cultural shifts, such as the change in the meaning of cell (\"prison cell\" \"cell phone\"). Comparing measurements made by these two methods allows researchers to determine whether changes are more cultural or linguistic in nature, a distinction that is essential for work in the digital humanities and historical linguistics.", "phrases": ["drift", "semantic change", "cultural shift", "target word"], "overall_score": 4.865229182779376, "scores": [2.841604721276822, 1.8525835282722978, 1.6483827031842173, 0.8437495679116161], "rank_score": 1.7965801301612383} -{"id": "huang-etal-2012-improving", "title": "Improving Word Representations via Global Context and Multiple Word Prototypes", "abstract": "Unsupervised word representations are very useful in NLP tasks both as inputs to learning algorithms and as extra word features in NLP systems. However, most of these models are built with only local context and one representation per word. This is problematic because words are often polysemous and global context can also provide useful information for learning word meanings. We present a new neural network architecture which 1) learns word embeddings that better capture the semantics of words by incorporating both local and global document context, and 2) accounts for homonymy and polysemy by learning multiple embeddings per word. We introduce a new dataset with human judgments on pairs of words in sentential context, and evaluate our model on it, showing that our model outperforms competitive baselines and other neural language models.", "phrases": ["global context", "polysemy", "neural language model", "multi-prototype embedding", "vector representation"], "overall_score": 8.437953605093936, "scores": [3.4875703569459997, 1.62956260883497, 1.4762404240145879, 1.2846877986775243, 1.0975678413818797], "rank_score": 1.7951258059709922} -{"id": "gao-etal-2020-machine", "title": "From Machine Reading Comprehension to Dialogue State Tracking: Bridging the Gap", "abstract": "Dialogue state tracking (DST) is at the heart of task-oriented dialogue systems. However, the scarcity of labeled data is an obstacle to building accurate and robust state tracking systems that work across a variety of domains. Existing approaches generally require some dialogue data with state information and their ability to generalize to unknown domains is limited. In this paper, we propose using machine reading comprehension (RC) in state tracking from two perspectives: model architectures and datasets. We divide the slot types in dialogue state into categorical or extractive to borrow the advantages from both multiple-choice and span-based reading comprehension models. Our method achieves near the current state-of-the-art in joint goal accuracy on MultiWOZ 2.1 given full training data. More importantly, by leveraging machine reading comprehension datasets, our method outperforms the existing approaches by many a large margin in few-shot scenarios when the availability of in-domain data is limited. Lastly, even without any state tracking data, i.e., zero-shot scenario, our proposed approach achieves greater than 90% average slot accuracy in 12 out of 30 slots in MultiWOZ 2.1.", "phrases": ["machine reading comprehension", "dialogue state tracking", "cross-task transfer"], "overall_score": 4.1300096435057165, "scores": [2.500928433934407, 2.330799080418056, 0.5491936807927823], "rank_score": 1.7936403983817486} -{"id": "kazemzadeh-etal-2014-referitgame", "title": "ReferItGame: Referring to Objects in Photographs of Natural Scenes", "abstract": "In this paper we introduce a new game to crowd-source natural language referring expressions. By designing a two player game, we can both collect and verify referring expressions directly within the game. To date, the game has produced a dataset containing 130,525 expressions, referring to 96,654 distinct objects, in 19,894 photographs of natural scenes. This dataset is larger and more varied than previous REG datasets and allows us to study referring expressions in real-world scenes. We provide an in depth analysis of the resulting dataset. Based on our findings, we design a new optimization based model for generating referring expressions and perform experimental evaluations on 3 test sets.", "phrases": ["object", "game", "reg", "real-world image", "reasoning"], "overall_score": 6.570999836239442, "scores": [4.9183058902426495, 1.436168197220535, 1.2099010779117019, 0.861502892971897, 0.5421706207048509], "rank_score": 1.7936097358103271} -{"id": "popovic-2015-chrf", "title": "chrF: character n-gram F-score for automatic MT evaluation", "abstract": "We propose the use of character n-gram F-score for automatic evaluation of machine translation output. Character ngrams have already been used as a part of more complex metrics, but their individual potential has not been investigated yet. We report system-level correlations with human rankings for 6-gram F1-score (CHRF) on the WMT12, WMT13 and WMT14 data as well as segment-level correlation for 6gram F1 (CHRF) and F3-scores (CHRF3) on WMT14 data for all available target languages. The results are very promising, especially for the CHRF3 score \u2013 for translation from English, this variant showed the highest segment-level correlations outperforming even the best metrics on the WMT14 shared evaluation task.", "phrases": ["character n-gram f-score", "variant", "chrf", "machine translation evaluation"], "overall_score": 5.181870958712746, "scores": [3.4063973420801714, 2.615876760402636, 0.5918722322188978, 0.5570709241944457], "rank_score": 1.7928043147240378} -{"id": "thompson-koehn-2019-vecalign", "title": "Vecalign: Improved Sentence Alignment in Linear Time and Space", "abstract": "We introduce Vecalign, a novel bilingual sentence alignment method which is linear in time and space with respect to the number of sentences being aligned and which requires only bilingual sentence embeddings. On a standard German\u2013French test set, Vecalign outperforms the previous state-of-the-art method (which has quadratic time complexity and requires a machine translation system) by 5 F1 points. It substantially outperforms the popular Hunalign toolkit at recovering Bible verse alignments in medium- to low-resource language pairs, and it improves downstream MT quality by 1.7 and 1.6 BLEU in Sinhala-English and Nepali-English, respectively, compared to the Hunalign-based Paracrawl pipeline.", "phrases": ["sentence alignment", "linear time", "vecalign"], "overall_score": 4.454330018663479, "scores": [2.9689378673087847, 1.5304446857930194, 0.8782802484889518], "rank_score": 1.7925542671969186} -{"id": "nema-etal-2017-diversity", "title": "Diversity driven attention model for query-based abstractive summarization", "abstract": "Abstractive summarization aims to generate a shorter version of the document covering all the salient points in a compact and coherent fashion. On the other hand, query-based summarization highlights those points that are relevant in the context of a given query. The encode-attend-decode paradigm has achieved notable success in machine translation, extractive summarization, dialog systems, etc. But it suffers from the drawback of generation of repeated phrases. In this work we propose a model for the query-based summarization task based on the encode-attend-decode paradigm with two key additions (i) a query attention model (in addition to document attention model) which learns to focus on different portions of the query at different time steps (instead of using a static representation for the query) and (ii) a new diversity based attention model which aims to alleviate the problem of repeating phrases in the summary. In order to enable the testing of this model we introduce a new query-based summarization dataset building on debatepedia. Our experiments show that with these two additions the proposed model clearly outperforms vanilla encode-attend-decode models with a gain of 28% (absolute) in ROUGE-L scores.", "phrases": ["attention model", "summarization", "query"], "overall_score": 4.730274987660788, "scores": [3.3428306367020206, 0.8218075971149669, 1.2125943119614744], "rank_score": 1.7924108485928205} -{"id": "ding-palmer-2005-machine", "title": "Machine Translation Using Probabilistic Synchronous Dependency Insertion Grammars", "abstract": "Syntax-based statistical machine translation (MT) aims at applying statistical models to structured data. In this paper, we present a syntax-based statistical machine translation system based on a probabilistic synchronous dependency insertion grammar. Synchronous dependency insertion grammars are a version of synchronous grammars defined on dependency trees. We first introduce our approach to inducing such a grammar from parallel corpora. Second, we describe the graphical model for the machine translation task, which can also be viewed as a stochastic tree-to-tree transducer. We introduce a polynomial time decoding algorithm for the model. We evaluate the outputs of our MT system using the NIST and Bleu automatic MT evaluation software. The result shows that our system outperforms the baseline system based on the IBM models in both translation speed and quality.", "phrases": ["synchronous grammar", "machine translation", "sdig"], "overall_score": 6.265008265046258, "scores": [3.652348701300178, 1.1953992844831238, 0.5276228563309067], "rank_score": 1.7917902807047363} -{"id": "sha-pereira-2003-shallow", "title": "Shallow Parsing with Conditional Random Fields", "abstract": "Conditional random fields for sequence labeling offer advantages over both generative models like HMMs and classifiers applied at each sequence position. Among sequence labeling tasks in language processing, shallow parsing has received much attention, with the development of standard evaluation datasets and extensive comparison among methods. We show here how to train a conditional random field to achieve performance as good as any reported base noun-phrase chunking method on the CoNLL task, and better than any reported single model. Improved training methods based on modern optimization algorithms were critical in achieving these results. We present extensive comparisons between models and training methods that confirm and strengthen previous results on shallow parsing and training methods for maximum-entropy models.", "phrases": ["conditional random fields", "training method", "shallow parsing", "crf"], "overall_score": 5.2751818151812815, "scores": [3.022827188319877, 2.2270201747507654, 1.3745756965260765, 0.5418749720570606], "rank_score": 1.7915745079134449} -{"id": "indurthi-etal-2019-fermi", "title": "FERMI at SemEval-2019 Task 5: Using Sentence embeddings to Identify Hate Speech Against Immigrants and Women in Twitter", "abstract": "This paper describes our system (Fermi) for Task 5 of SemEval-2019: HatEval: Multilingual Detection of Hate Speech Against Immigrants and Women on Twitter. We participated in the subtask A for English and ranked first in the evaluation on the test set. We evaluate the quality of multiple sentence embeddings and explore multiple training models to evaluate the performance of simple yet effective embedding-ML combination algorithms. Our team - Fermi's model achieved an accuracy of 65.00% for English language in task A. Our models, which use pretrained Universal Encoder sentence embeddings for transforming the input and SVM (with RBF kernel) for classification, scored first position (among 68) in the leaderboard on the test set for Subtask A in English language. In this paper we provide a detailed description of the approach, as well as the results obtained in the task.", "phrases": ["sentence embedding", "hate speech", "twitter"], "overall_score": 4.4490356359805014, "scores": [2.887644916775055, 1.6950209675964376, 0.788605068233593], "rank_score": 1.7904236508683617} -{"id": "lester-etal-2021-power", "title": "The Power of Scale for Parameter-Efficient Prompt Tuning", "abstract": "In this work, we explore \u201cprompt tuning,\u201d a simple yet effective mechanism for learning \u201csoft prompts\u201d to condition frozen language models to perform specific downstream tasks. Unlike the discrete text prompts used by GPT-3, soft prompts are learned through backpropagation and can be tuned to incorporate signals from any number of labeled examples. Our end-to-end learned approach outperforms GPT-3's few-shot learning by a large margin. More remarkably, through ablations on model size using T5, we show that prompt tuning becomes more competitive with scale: as models exceed billions of parameters, our method \u201ccloses the gap\u201d and matches the strong performance of model tuning (where all model weights are tuned). This finding is especially relevant because large models are costly to share and serve and the ability to reuse one frozen model for multiple downstream tasks can ease this burden. Our method can be seen as a simplification of the recently proposed \u201cprefix tuning\u201d of Li and Liang (2021) and we provide a comparison to this and other similar approaches. Finally, we show that conditioning a frozen model with soft prompts confers benefits in robustness to domain transfer and enables efficient \u201cprompt ensembling.\u201d We release code and model checkpoints to reproduce our experiments.", "phrases": ["prompt tuning", "language model", "downstream task", "model size", "design"], "overall_score": 6.773137528530367, "scores": [4.379148421053943, 1.5067435887348828, 1.1187309301666952, 1.0758182478172946, 0.8688163597635379], "rank_score": 1.7898515095072711} -{"id": "refaee-rieser-2014-arabic", "title": "An Arabic Twitter Corpus for Subjectivity and Sentiment Analysis", "abstract": "We present a newly collected data set of 8,868 gold-standard annotated Arabic feeds. The corpus is manually labelled for subjectivity and sentiment analysis (SSA) ( = 0:816). In addition, the corpus is annotated with a variety of motivated feature-sets that have previously shown positive impact on performance. The paper highlights issues posed by twitter as a genre, such as mixture of language varieties and topic-shifts. Our next step is to extend the current corpus, using online semi-supervised learning. A first sub-corpus will be released via the ELRA repository as part of this submission.", "phrases": ["arabic twitter corpus", "subjectivity", "sentiment analysis"], "overall_score": 4.119675026134882, "scores": [2.7091273443202852, 1.684639519624327, 0.97368952931043], "rank_score": 1.7891521310850138} -{"id": "bond-foster-2013-linking", "title": "Linking and Extending an Open Multilingual Wordnet", "abstract": "We create an open multilingual wordnet with large wordnets for over 26 languages and smaller ones for 57 languages. It is made by combining wordnets with open licences, data from Wiktionary and the Unicode Common Locale Data Repository. Overall there are over 2 million senses for over 100 thousand concepts, linking over 1.4 million words in hundreds of languages.", "phrases": ["open multilingual wordnet", "wiktionary", "format"], "overall_score": 5.357445384492364, "scores": [3.9781736727834054, 0.8279845502995508, 0.5589193892797991], "rank_score": 1.7883592041209184} -{"id": "molina-etal-2016-overview", "title": "Overview for the Second Shared Task on Language Identification in Code-Switched Data", "abstract": "We present an overview of the first shared task on language identification on codeswitched data. The shared task included code-switched data from four language pairs: Modern Standard ArabicDialectal Arabic (MSA-DA), MandarinEnglish (MAN-EN), Nepali-English (NEPEN), and Spanish-English (SPA-EN). A total of seven teams participated in the task and submitted 42 system runs. The evaluation showed that language identification at the token level is more difficult when the languages present are closely related, as in the case of MSA-DA, where the prediction performance was the lowest among all language pairs. In contrast, the language pairs with the higest F-measure where SPA-EN and NEP-EN. The task made evident that language identification in code-switched data is still far from solved and warrants further research.", "phrases": ["language identification", "code-switched data", "code-switched text"], "overall_score": 5.440055908813185, "scores": [3.075343752793901, 1.7485494205976488, 0.5366085342730351], "rank_score": 1.7868339025548616} -{"id": "dabre-etal-2017-empirical", "title": "An Empirical Study of Language Relatedness for Transfer Learning in Neural Machine Translation", "abstract": "Neural Machine Translation (NMT) is known to outperform Phrase Based Statistical Machine Translation (PBSMT) for resource rich language pairs but not for resource poor ones. Transfer Learning (Zoph et al., 2016) is a simple approach in which we can simply initialize an NMT model (child model) for a resource poor language pair using a previously trained model (parent model) for a resource rich language pair where the target languages are the same. This paper explores how different choices of parent models affect the performance of child models. We empirically show that using a parent model with the source language falling in the same or linguistically similar language family as the source language of the child model is the best.", "phrases": ["transfer learning", "neural machine translation", "source language"], "overall_score": 4.283980110327288, "scores": [2.3097232484728343, 1.9598647769607163, 1.090087385237536], "rank_score": 1.7865584702236956} -{"id": "kovaleva-etal-2019-revealing", "title": "Revealing the Dark Secrets of BERT", "abstract": "BERT-based architectures currently give state-of-the-art performance on many NLP tasks, but little is known about the exact mechanisms that contribute to its success. In the current work, we focus on the interpretation of self-attention, which is one of the fundamental underlying components of BERT. Using a subset of GLUE tasks and a set of handcrafted features-of-interest, we propose the methodology and carry out a qualitative and quantitative analysis of the information encoded by the individual BERT's heads. Our findings suggest that there is a limited set of attention patterns that are repeated across different heads, indicating the overall model overparametrization. While different heads consistently use the same attention patterns, they have varying impact on performance across different tasks. We show that manually disabling attention in certain heads leads to a performance improvement over the regular fine-tuned BERT models.", "phrases": ["bert", "explicit mechanism", "over-parametrization"], "overall_score": 7.251838512929346, "scores": [4.302907049079235, 0.5322316452642436, 0.5227780359938768], "rank_score": 1.7859722434457852} -{"id": "barzilay-lapata-2005-modeling", "title": "Modeling Local Coherence: An Entity-Based Approach", "abstract": "This paper considers the problem of automatic assessment of local coherence. We present a novel entity-based representation of discourse which is inspired by Centering Theory and can be computed automatically from raw text. We view coherence assessment as a ranking learning problem and show that the proposed discourse representation supports the effective learning of a ranking function. Our experiments demonstrate that the induced model achieves significantly higher accuracy than a state-of-the-art coherence model.", "phrases": ["coherence", "entity-based approach", "discourse entity", "local model", "entity-grid model"], "overall_score": 8.009377742442263, "scores": [5.473790688163595, 0.9015576836996537, 1.047400529538334, 0.9214590978871123, 0.577630486505404], "rank_score": 1.7843676971588198} -{"id": "costa-jussa-fonollosa-2016-character", "title": "Character-based Neural Machine Translation", "abstract": "Neural Machine Translation (MT) has reached state-of-the-art results. However, one of the main challenges that neural MT still faces is dealing with very large vocabularies and morphologically rich languages. In this paper, we propose a neural MT system using character-based embeddings in combination with convolutional and highway layers to replace the standard lookup-based word representations. The resulting unlimited-vocabulary and affix-aware source word embeddings are tested in a state-of-the-art neural MT based on an attention-based bidirectional recurrent neural network. The proposed MT scheme provides improved results even when the source language is not morphologically rich. Improvements up to 3 BLEU points are obtained in the German-English WMT task.", "phrases": ["neural machine translation", "lookup-based word representation", "character"], "overall_score": 6.183910039439162, "scores": [3.0547018411326246, 1.7158217163262997, 0.5823742508624876], "rank_score": 1.7842992694404707} -{"id": "ng-etal-2003-exploiting", "title": "Exploiting Parallel Texts for Word Sense Disambiguation: An Empirical Study", "abstract": "A central problem of word sense disambiguation (WSD) is the lack of manually sense-tagged data required for supervised learning. In this paper, we evaluate an approach to automatically acquire sense-tagged training data from English-Chinese parallel corpora, which are then used for disambiguating the nouns in the SENSEVAL-2 English lexical sample task. Our investigation reveals that this method of acquiring sense-tagged data is promising. On a subset of the most difficult SENSEVAL-2 nouns, the accuracy difference between the two approaches is only 14.0%, and the difference could narrow further to 6.5% if we disregard the advantage that manually sense-tagged data have in their sense coverage. Our analysis also highlights the importance of the issue of domain dependence in evaluating WSD programs.", "phrases": ["word sense disambiguation", "parallel corpora", "cross-lingual evidence idea"], "overall_score": 6.236484760772748, "scores": [2.869431512861994, 1.9195017363668865, 0.5619644546706238], "rank_score": 1.7836325679665013} -{"id": "zbib-etal-2012-machine", "title": "Machine Translation of Arabic Dialects", "abstract": "Arabic Dialects present many challenges for machine translation, not least of which is the lack of data resources. We use crowdsourcing to cheaply and quickly build Levantine-English and Egyptian-English parallel corpora, consisting of 1.1M words and 380k words, respectively. The dialectal sentences are selected from a large corpus of Arabic web text, and translated using Amazon's Mechanical Turk. We use this data to build Dialectal Arabic MT systems, and find that small amounts of dialectal data have a dramatic impact on translation quality. When translating Egyptian and Levantine test sets, our Dialectal Arabic MT system performs 6.3 and 7.0 BLEU points higher than a Modern Standard Arabic MT system trained on a 150M-word Arabic-English parallel corpus.", "phrases": ["dialect", "egyptian-english parallel corpora", "da-english data"], "overall_score": 6.341398677341846, "scores": [3.958436284961379, 0.8643047536982255, 0.5281263596946044], "rank_score": 1.7836224661180695} -{"id": "tsur-etal-2015-frame", "title": "A Frame of Mind: Using Statistical Models for Detection of Framing and Agenda Setting Campaigns", "abstract": "Framing is a sophisticated form of discourse in which the speaker tries to induce a cognitive bias through consistent linkage between a topic and a specific context (frame). We build on political science and communication theory and use probabilistic topic models combined with time series regression analysis (autoregressive distributed-lag models) to gain insights about the language dynamics in the political processes. Processing four years of public statements issued by members of the U.S. Congress, our results provide a glimpse into the complex dynamic processes of framing, attention shifts and agenda setting, commonly known as \u2018spin\u2019. We further provide new evidence for the divergence in party discipline in U.S. politics.", "phrases": ["framing", "news article", "computational analysis"], "overall_score": 5.153098291364847, "scores": [3.76140307562303, 1.066836660039522, 0.5203092293145362], "rank_score": 1.782849654992363} -{"id": "saha-mausam-2018-open", "title": "Open Information Extraction from Conjunctive Sentences", "abstract": "We develop CALM, a coordination analyzer that improves upon the conjuncts identified from dependency parses. It uses a language model based scoring and several linguistic constraints to search over hierarchical conjunct boundaries (for nested coordination). By splitting a conjunctive sentence around these conjuncts, CALM outputs several simple sentences. We demonstrate the value of our coordination analyzer in the end task of Open Information Extraction (Open IE). State-of-the-art Open IE systems lose substantial yield due to ineffective processing of conjunctive sentences. Our Open IE system, CALMIE, performs extraction over the simple sentences identified by CALM to obtain up to 1.8x yield with a moderate increase in precision compared to extractions from original sentences.", "phrases": ["conjunctive sentence", "calmie", "open information extraction"], "overall_score": 3.9167483877863583, "scores": [2.56055327995351, 1.9294757069907584, 0.8577380481415816], "rank_score": 1.7825890116952834} -{"id": "reschke-etal-2014-event", "title": "Event Extraction Using Distant Supervision", "abstract": "Distant supervision is a successful paradigm that gathers training data for information extraction systems by automatically aligning vast databases of facts with text. Previous work has demonstrated its usefulness for the extraction of binary relations such as a person's employer or a film's director. Here, we extend the distant supervision approach to template-based event extraction, focusing on the extraction of passenger counts, aircraft types, and other facts concerning airplane crash events. We present a new publicly available dataset and event extraction task in the plane crash domain based on Wikipedia infoboxes and newswire text. Using this dataset, we conduct a preliminary evaluation of four distantly supervised extraction models which assign named entity mentions in text to entries in the event template. Our results indicate that joint inference over sequences of candidate entity mentions is beneficial. Furthermore, we demonstrate that the Searn algorithm outperforms a linear-chain CRF and strong baselines with local inference.", "phrases": ["distant supervision", "newswire text", "event extraction"], "overall_score": 3.703974460122901, "scores": [2.7477000198515804, 2.0226054242851337, 0.573400141061972], "rank_score": 1.7812351950662286} -{"id": "hewitt-liang-2019-designing", "title": "Designing and Interpreting Probes with Control Tasks", "abstract": "Probes, supervised models trained to predict properties (like parts-of-speech) from representations (like ELMo), have achieved high accuracy on a range of linguistic tasks. But does this mean that the representations encode linguistic structure or just that the probe has learned the linguistic task? In this paper, we propose control tasks, which associate word types with random outputs, to complement linguistic tasks. By construction, these tasks can only be learned by the probe itself. So a good probe, (one that reflects the representation), should be selective, achieving high linguistic task accuracy and low control task accuracy. The selectivity of a probe puts linguistic task accuracy in context with the probe's capacity to memorize from word types. We construct control tasks for English part-of-speech tagging and dependency edge prediction, and show that popular probes on ELMo representations are not selective. We also find that dropout, commonly used to control probe complexity, is ineffective for improving selectivity of MLPs, but that other forms of regularization are effective. Finally, we find that while probes on the first layer of ELMo yield slightly better part-of-speech tagging accuracy than the second, probes on the second layer are substantially more selective, which raises the question of which layer better represents parts-of-speech.", "phrases": ["probe", "control task", "selectivity", "capacity", "linguistic knowledge"], "overall_score": 7.169883585183053, "scores": [2.9324905018550798, 1.928328770771224, 1.5035873972084643, 1.4654631506061477, 1.076039695099597], "rank_score": 1.7811819031081026} -{"id": "turney-2008-uniform", "title": "A Uniform Approach to Analogies, Synonyms, Antonyms, and Associations", "abstract": "Recognizing analogies, synonyms, antonyms, and associations appear to be four distinct tasks, requiring distinct NLP algorithms. In the past, the four tasks have been treated independently, using a wide variety of algorithms. These four semantic classes, however, are a tiny sample of the full range of semantic phenomena, and we cannot afford to create ad hoc algorithms for each semantic phenomenon; we need to seek a unified approach. We propose to subsume a broad range of phenomena under analogies. To limit the scope of this paper, we restrict our attention to the subsumption of synonyms, antonyms, and associations. We introduce a supervised corpus-based machine learning algorithm for classifying analogous word pairs, and we show that it can solve multiple-choice SAT analogy questions, TOEFL synonym questions, ESL synonym-antonym questions, and similar-associated-both questions from cognitive psychology.", "phrases": ["uniform approach", "antonyms", "association", "nlp problem", "analogy task"], "overall_score": 6.17278170443428, "scores": [3.3601152275455086, 2.7145437425720482, 1.387760674804043, 0.8777728453334416, 0.5652490632224167], "rank_score": 1.7810883106954918} -{"id": "navigli-ponzetto-2010-babelnet", "title": "BabelNet: Building a Very Large Multilingual Semantic Network", "abstract": "In this paper we present BabelNet -- a very large, wide-coverage multilingual semantic network. The resource is automatically constructed by means of a methodology that integrates lexicographic and encyclopedic knowledge from WordNet and Wikipedia. In addition Machine Translation is also applied to enrich the resource with lexical information for all languages. We conduct experiments on new and existing gold-standard datasets to show the high quality and coverage of the resource.", "phrases": ["multilingual semantic network", "encyclopedic knowledge", "different language", "knowledge base", "link"], "overall_score": 7.7310710119309745, "scores": [4.5229482505540695, 1.556738465131512, 1.0796873780388663, 0.8778276540982339, 0.8617592565491516], "rank_score": 1.7797922008743665} -{"id": "jeong-etal-2009-semi", "title": "Semi-supervised Speech Act Recognition in Emails and Forums", "abstract": "In this paper, we present a semi-supervised method for automatic speech act recognition in email and forums. The major challenge of this task is due to lack of labeled data in these two genres. Our method leverages labeled data in the Switchboard-DAMSL and the Meeting Recorder Dialog Act database and applies simple domain adaptation techniques over a large amount of unlabeled email and forum data to address this problem. Our method uses automatically extracted features such as phrases and dependency trees, called subtree features, for semi-supervised learning. Empirical results demonstrate that our model is effective in email and forum speech act recognition.", "phrases": ["speech act recognition", "forum", "semi-supervised learning"], "overall_score": 4.930412593203693, "scores": [3.2207471217968586, 1.1547580067393994, 0.9593062197773391], "rank_score": 1.7782704494378656} -{"id": "ebrahimi-dou-2015-chain", "title": "Chain Based RNN for Relation Classification", "abstract": "We present a novel approach for relation classification, using a recursive neural network (RNN), based on the shortest path between two entities in a dependency graph. Previous works on RNN are based on constituencybased parsing because phrasal nodes in a parse tree can capture compositionality in a sentence. Compared with constituency-based parse trees, dependency graphs can represent relations more compactly. This is particularly important in sentences with distant entities, where the parse tree spans words that are not relevant to the relation. In such cases RNN cannot be trained effectively in a timely manner. However, due to the lack of phrasal nodes in dependency graphs, application of RNN is not straightforward. In order to tackle this problem, we utilize dependency constituent units called chains. Our experiments on two relation classification datasets show that Chain based RNN provides a shallower network, which performs considerably faster and achieves better classification results.", "phrases": ["rnn", "relation classification", "recursive neural network"], "overall_score": 3.4587418529075804, "scores": [2.9819999665147527, 1.8160840340755326, 0.5342411140919725], "rank_score": 1.7774417048940858} -{"id": "pavlick-kwiatkowski-2019-inherent", "title": "Inherent Disagreements in Human Textual Inferences", "abstract": "We analyze human's disagreements about the validity of natural language inferences. We show that, very often, disagreements are not dismissible as annotation \u201cnoise\u201d, but rather persist as we collect more ratings and as we vary the amount of context provided to raters. We further show that the type of uncertainty captured by current state-of-the-art models for natural language inference is not reflective of the type of uncertainty present in human disagreements. We discuss implications of our results in relation to the recognizing textual entailment (RTE)/natural language inference (NLI) task. We argue for a refined evaluation objective that requires models to explicitly capture the full distribution of plausible human judgments.", "phrases": ["disagreement", "natural language inference", "rating", "nli", "annotation artifact"], "overall_score": 5.411409218961202, "scores": [5.0699103080052135, 1.5533124918363719, 1.165871272498082, 0.5526997779416691, 0.545329384401813], "rank_score": 1.7774246469366297} -{"id": "kogan-etal-2009-predicting", "title": "Predicting Risk from Financial Reports with Regression", "abstract": "We address a text regression problem: given a piece of text, predict a real-world continuous quantity associated with the text's meaning. In this work, the text is an SEC-mandated financial report published annually by a publicly-traded company, and the quantity to be predicted is volatility of stock returns, an empirical measure of financial risk. We apply well-known regression techniques to a large corpus of freely available financial reports, constructing regression models of volatility for the period following a report. Our models rival past volatility (a strong baseline) in predicting the target variable, and a single model that uses both can significantly outperform past volatility. Interestingly, our approach is more accurate for reports after the passage of the Sarbanes-Oxley Act of 2002, giving some evidence for the success of that legislation in making financial reports more informative.", "phrases": ["risk", "report", "text regression problem", "company", "volatility"], "overall_score": 5.790959276451175, "scores": [3.567816902722438, 1.9703393421133082, 1.4494147862711677, 1.321610574567785, 0.5778467694387722], "rank_score": 1.777405675022694} -{"id": "luong-etal-2015-addressing", "title": "Addressing the Rare Word Problem in Neural Machine Translation", "abstract": "Neural Machine Translation (NMT) is a new approach to machine translation that has shown promising results that are comparable to traditional approaches. A significant weakness in conventional NMT systems is their inability to correctly translate very rare words: end-to-end NMTs tend to have relatively small vocabularies with a single unk symbol that represents every possible out-of-vocabulary (OOV) word. In this paper, we propose and implement an effective technique to address this problem. We train an NMT system on data that is augmented by the output of a word alignment algorithm, allowing the NMT system to emit, for each OOV word in the target sentence, the position of its corresponding word in the source sentence. This information is later utilized in a post-processing step that translates every OOV word using a dictionary. Our experiments on the WMT\u201914 English to French translation task show that this method provides a substantial improvement of up to 2.8 BLEU points over an equivalent NMT system that does not use this technique. With 37.5 BLEU points, our NMT system is the first to surpass the best result achieved on a WMT\u201914 contest task.", "phrases": ["rare word problem", "neural machine translation", "nmt system", "post-processing step", "translation quality"], "overall_score": 7.513725214672197, "scores": [3.708394860081773, 1.8497889765314086, 1.5274828012384611, 0.8961456993348225, 0.8910457339485308], "rank_score": 1.7745716142269994} -{"id": "parikh-etal-2016-decomposable", "title": "A Decomposable Attention Model for Natural Language Inference", "abstract": "We propose a simple neural architecture for natural language inference. Our approach uses attention to decompose the problem into subproblems that can be solved separately, thus making it trivially parallelizable. On the Stanford Natural Language Inference (SNLI) dataset, we obtain state-of-the-art results with almost an order of magnitude fewer parameters than previous work and without relying on any word-order information. Adding intra-sentence attention that takes a minimum amount of order into account yields further improvements.", "phrases": ["decomposable attention model", "natural language inference", "nli", "text sequence", "entailment"], "overall_score": 7.406617452370695, "scores": [2.955708933528158, 2.8739718785815214, 1.2808153933743305, 0.891199944989255, 0.8698063880967472], "rank_score": 1.7743005077140022} -{"id": "narayan-gardent-2014-hybrid", "title": "Hybrid Simplification using Deep Semantics and Machine Translation", "abstract": "We present a hybrid approach to sentence simplification which combines deep semantics and monolingual machine translation to derive simple sentences from complex ones. The approach differs from previous work in two main ways. First, it is semantic based in that it takes as input a deep semantic representation rather than e.g., a sentence or a parse tree. Second, it combines a simplification model for splitting and deletion with a monolingual translation model for phrase substitution and reordering. When compared against current state of the art methods, our model yields significantly simpler output that is both grammatical and meaning preserving.", "phrases": ["simplification", "machine translation", "hybrid approach"], "overall_score": 6.091634090195016, "scores": [3.089982225887762, 1.189573981514825, 1.0422173410591293], "rank_score": 1.7739245161539055} -{"id": "huber-carenini-2019-predicting", "title": "Predicting Discourse Structure using Distant Supervision from Sentiment", "abstract": "Discourse parsing could not yet take full advantage of the neural NLP revolution, mostly due to the lack of annotated datasets. We propose a novel approach that uses distant supervision on an auxiliary task (sentiment classification), to generate abundant data for RST-style discourse structure prediction. Our approach combines a neural variant of multiple-instance learning, using document-level supervision, with an optimal CKY-style tree generation algorithm. In a series of experiments, we train a discourse parser (for only structure prediction) on our automatically generated dataset and compare it with parsers trained on human-annotated corpora (news domain RST-DT and Instructional domain). Results indicate that while our parser does not yet match the performance of a parser trained and tested on the same dataset (intra-domain), it does perform remarkably well on the much more difficult and arguably more useful task of inter-domain discourse structure prediction, where the parser is trained on one domain and tested/applied on another one.", "phrases": ["discourse structure", "distant supervision", "sentiment classification"], "overall_score": 3.895978165716603, "scores": [3.287035916219012, 1.428843772959656, 0.603528539596719], "rank_score": 1.7731360762584625} -{"id": "chen-etal-2018-adversarial", "title": "Adversarial Deep Averaging Networks for Cross-Lingual Sentiment Classification", "abstract": "In recent years great success has been achieved in sentiment classification for English, thanks in part to the availability of copious annotated resources. Unfortunately, most languages do not enjoy such an abundance of labeled data. To tackle the sentiment classification problem in low-resource languages without adequate annotated data, we propose an Adversarial Deep Averaging Network (ADAN1) to transfer the knowledge learned from labeled data on a resource-rich source language to low-resource languages where only unlabeled data exist. ADAN has two discriminative branches: a sentiment classifier and an adversarial language discriminator. Both branches take input from a shared feature extractor to learn hidden representations that are simultaneously indicative for the classification task and invariant across languages. Experiments on Chinese and Arabic sentiment classification demonstrate that ADAN significantly outperforms state-of-the-art systems.", "phrases": ["deep averaging network", "sentiment classification", "adversarial training", "different language"], "overall_score": 6.490103129375861, "scores": [3.5836344358975545, 1.5797094664544105, 1.0580088459156172, 0.8647604456865053], "rank_score": 1.771528298488522} -{"id": "zhao-etal-2020-gender", "title": "Gender Bias in Multilingual Embeddings and Cross-Lingual Transfer", "abstract": "Multilingual representations embed words from many languages into a single semantic space such that words with similar meanings are close to each other regardless of the language. These embeddings have been widely used in various settings, such as cross-lingual transfer, where a natural language processing (NLP) model trained on one language is deployed to another language. While the cross-lingual transfer techniques are powerful, they carry gender bias from the source to target languages. In this paper, we study gender bias in multilingual embeddings and how it affects transfer learning for NLP applications. We create a multilingual dataset for bias analysis and propose several ways for quantifying bias in multilingual representations from both the intrinsic and extrinsic perspectives. Experimental results show that the magnitude of bias in the multilingual representations changes differently when we align the embeddings to different target spaces and that the alignment direction can also have an influence on the bias in transfer learning. We further provide recommendations for using the multilingual word representations for downstream tasks.", "phrases": ["cross-lingual transfer", "gender bias", "word embedding"], "overall_score": 4.672823025930941, "scores": [3.3195517083247053, 0.907068808336186, 1.0853023887185609], "rank_score": 1.7706409684598174} -{"id": "baker-etal-2012-modality", "title": "Modality and Negation in SIMT Use of Modality and Negation in Semantically-Informed Syntactic MT", "abstract": "This article describes the resource- and system-building efforts of an 8-week Johns Hopkins University Human Language Technology Center of Excellence Summer Camp for Applied Language Exploration (SCALE-2009) on Semantically Informed Machine Translation (SIMT). We describe a new modality/negation (MN) annotation scheme, the creation of a (publicly available) MN lexicon, and two automated MN taggers that we built using the annotation scheme and lexicon. Our annotation scheme isolates three components of modality and negation: a trigger (a word that conveys modality or negation), a target (an action associated with modality or negation), and a holder (an experiencer of modality). We describe how our MN lexicon was semi-automatically produced and we demonstrate that a structure-based MN tagger results in precision around 86% (depending on genre) for tagging of a standard LDC data set.We apply our MN annotation scheme to statistical machine translation using a syntactic framework that supports the inclusion of semantic annotations. Syntactic tags enriched with semantic annotations are assigned to parse trees in the target-language training texts through a process of tree grafting. Although the focus of our work is modality and negation, the tree grafting procedure is general and supports other types of semantic information. We exploit this capability by including named entities, produced by a pre-existing tagger, in addition to the MN elements produced by the taggers described here. The resulting system significantly outperformed a linguistically naive baseline model (Hiero), and reached the highest scores yet reported on the NIST 2009 Urdu\u2013English test set. This finding supports the hypothesis that both syntactic and semantic information can improve translation quality.", "phrases": ["negation", "semantic information", "machine translation", "modality"], "overall_score": 4.399736313803238, "scores": [2.739820736445746, 2.7022372697916706, 1.0746510223285615, 0.5656275480271131], "rank_score": 1.7705841441482728} -{"id": "yang-etal-2003-coreference", "title": "Coreference Resolution Using Competition Learning Approach", "abstract": "In this paper we propose a competition learning approach to coreference resolution. Traditionally, supervised machine learning approaches adopt the single-candidate model. Nevertheless the preference relationship between the antecedent candidates cannot be determined accurately in this model. By contrast, our approach adopts a twin-candidate learning model. Such a model can present the competition criterion for antecedent candidates reliably, and ensure that the most preferred candidate is selected. Furthermore, our approach applies a candidate filter to reduce the computational cost and data noises during training and resolution. The experimental results on MUC-6 and MUC-7 data set show that our approach can outperform those based on the single-candidate model.", "phrases": ["candidate", "coreference resolution", "training instance"], "overall_score": 5.835417892528154, "scores": [3.3275723087560243, 1.4630327949819903, 0.521021165801222], "rank_score": 1.7705420898464121} -{"id": "bansal-etal-2014-tailoring", "title": "Tailoring Continuous Word Representations for Dependency Parsing", "abstract": "Word representations have proven useful for many NLP tasks, e.g., Brown clusters as features in dependency parsing (Koo et al., 2008). In this paper, we investigate the use of continuous word representations as features for dependency parsing. We compare several popular embeddings to Brown clusters, via multiple types of features, in both news and web domains. We find that all embeddings yield significant parsing gains, including some recent ones that can be trained in a fraction of the time of others. Explicitly tailoring the representations for the task leads to further improvements. Moreover, an ensemble of all representations achieves the best results, suggesting their complementarity.", "phrases": ["dependency parsing", "complementarity", "word embedding", "slight improvement", "entity recognition"], "overall_score": 6.816731573416292, "scores": [5.153275545115431, 1.9906717942345489, 0.5717470536255008, 0.5705249620936899, 0.566339401884216], "rank_score": 1.7705117513906774} -{"id": "engelbrecht-etal-2009-modeling", "title": "Modeling User Satisfaction with Hidden Markov Models", "abstract": "Models for predicting judgments about the quality of Spoken Dialog Systems have been used as overall evaluation metric or as optimization functions in adaptive systems. We describe a new approach to such models, using Hidden Markov Models (HMMs). The user's opinion is regarded as a continuous process evolving over time. We present the data collection method and results achieved with the HMM model.", "phrases": ["user satisfaction", "hidden markov models", "hmm", "sds"], "overall_score": 3.88943643860515, "scores": [3.278068166092611, 1.4840734258472759, 1.4777232589814433, 0.8407703808590536], "rank_score": 1.770158807945096} -{"id": "jia-etal-2019-certified", "title": "Certified Robustness to Adversarial Word Substitutions", "abstract": "State-of-the-art NLP models can often be fooled by adversaries that apply seemingly innocuous label-preserving transformations (e.g., paraphrasing) to input text. The number of possible transformations scales exponentially with text length, so data augmentation cannot cover all transformations of an input. This paper considers one exponentially large family of label-preserving transformations, in which every word in the input can be replaced with a similar word. We train the first models that are provably robust to all word substitutions in this family. Our training procedure uses Interval Bound Propagation (IBP) to minimize an upper bound on the worst-case loss that any combination of word substitutions can induce. To evaluate models' robustness to these transformations, we measure accuracy on adversarially chosen word substitutions applied to test examples. Our IBP-trained models attain 75% adversarial accuracy on both sentiment analysis on IMDB and natural language inference on SNLI; in comparison, on IMDB, models trained normally and ones trained with data augmentation achieve adversarial accuracy of only 12% and 41%, respectively.", "phrases": ["robustness", "propagation", "ibp", "loss", "attack"], "overall_score": 5.542956280361845, "scores": [3.8339952873775034, 1.3435893789434046, 1.313557636881509, 1.2924941003979245, 1.0554108065685448], "rank_score": 1.767809442033777} -{"id": "hazarika-etal-2018-conversational", "title": "Conversational Memory Network for Emotion Recognition in Dyadic Dialogue Videos", "abstract": "Emotion recognition in conversations is crucial for the development of empathetic machines. Present methods mostly ignore the role of inter-speaker dependency relations while classifying emotions in conversations. In this paper, we address recognizing utterance-level emotions in dyadic conversational videos. We propose a deep neural framework, termed Conversational Memory Network (CMN), which leverages contextual information from the conversation history. In particular, CMN uses multimodal approach comprising audio, visual and textual features with gated recurrent units to model past utterances of each speaker into memories. These memories are then merged using attention-based hops to capture inter-speaker dependencies. Experiments show a significant improvement of 3 \u2212 4% in accuracy over the state of the art.", "phrases": ["emotion recognition", "past utterance", "conversational memory network"], "overall_score": 5.008234505752773, "scores": [2.459206609134463, 2.297442178818338, 0.5464123277159384], "rank_score": 1.7676870385562464} -{"id": "nakov-etal-2016-semeval-2016", "title": "SemEval-2016 Task 3: Community Question Answering", "abstract": "This paper describes the SemEval\u20132016 Task 3 on Community Question Answering, which we offered in English and Arabic. For English, we had three subtasks: Question\u2013Comment Similarity (subtask A), Question\u2013Question Similarity (B), and Question\u2013External Comment Similarity (C). For Arabic, we had another subtask: Rerank the correct answers for a new question (D). Eighteen teams participated in the task, submitting a total of 95 runs (38 primary and 57 contrastive) for the four subtasks. A variety of approaches and features were used by the participating systems to address the different subtasks, which are summarized in this paper. The best systems achieved an official score (MAP) of 79.19, 76.70, 55.41, and 45.83 in subtasks A, B, C, and D, respectively. These scores are significantly better than those for the baselines that we provided. For subtask A, the best system improved over the 2015 winner by 3 points absolute in terms of Accuracy.", "phrases": ["community question answering", "comment similarity", "semeval task"], "overall_score": 6.121253555685603, "scores": [3.5129580820692756, 1.2375043452887842, 0.5481988619288717], "rank_score": 1.7662204297623105} -{"id": "yimam-etal-2014-automatic", "title": "Automatic Annotation Suggestions and Custom Annotation Layers in WebAnno", "abstract": "In this paper, we present a flexible approach to the efficient and exhaustive manual annotation of text documents. For this purpose, we extend WebAnno (Yimam et al., 2013) an open-source web-based annotation tool. 1 While it was previously limited to specific annotation layers, our extension allows adding and configuring an arbitrary number of layers through a web-based UI. These layers can be annotated separately or simultaneously, and support most types of linguistic annotations such as spans, semantic classes, dependency relations, lexical chains, and morphology. Further, we tightly integrate a generic machine learning component for automatic annotation suggestions of span annotations. In two case studies, we show that automatic annotation suggestions, combined with our split-pane UI concept, significantly reduces annotation time.", "phrases": ["suggestion", "webanno", "annotation tool"], "overall_score": 4.233991994214906, "scores": [3.1842617143225436, 1.2374326228107846, 0.8754410827223396], "rank_score": 1.7657118066185558} -{"id": "iyyer-etal-2014-neural", "title": "A Neural Network for Factoid Question Answering over Paragraphs", "abstract": "Text classification methods for tasks like factoid question answering typically use manually defined string matching rules or bag of words representations. These methods are ineective when question text contains very few individual words (e.g., named entities) that are indicative of the answer. We introduce a recursive neural network (rnn) model that can reason over such input by modeling textual compositionality. We apply our model, qanta, to a dataset of questions from a trivia competition called quiz bowl. Unlike previous rnn models, qanta learns word and phrase-level representations that combine across sentences to reason about entities. The model outperforms multiple baselines and, when combined with information retrieval methods, rivals the best human players.", "phrases": ["factoid question", "paragraph", "recursive neural network"], "overall_score": 4.893353270913166, "scores": [2.308042562755658, 1.6421639295498147, 1.3445058806426873], "rank_score": 1.7649041243160533} -{"id": "xu-choi-2020-revealing", "title": "Revealing the Myth of Higher-Order Inference in Coreference Resolution", "abstract": "This paper analyzes the impact of higher-order inference (HOI) on the task of coreference resolution. HOI has been adapted by almost all recent coreference resolution models without taking much investigation on its true effectiveness over representation learning. To make a comprehensive analysis, we implement an end-to-end coreference system as well as four HOI approaches, attended antecedent, entity equalization, span clustering, and cluster merging, where the latter two are our original methods. We find that given a high-performing encoder such as SpanBERT, the impact of HOI is negative to marginal, providing a new perspective of HOI to this task. Our best model using cluster merging shows the Avg-F1 of 80.2 on the CoNLL 2012 shared task dataset in English.", "phrases": ["higher-order inference", "coreference resolution", "mention"], "overall_score": 4.776323306171321, "scores": [2.4922837269663165, 1.9169935837802485, 0.8819724277855691], "rank_score": 1.7637499128440446} -{"id": "morante-etal-2008-learning", "title": "Learning the Scope of Negation in Biomedical Texts", "abstract": "In this paper we present a machine learning system that finds the scope of negation in biomedical texts. The system consists of two memory-based engines, one that decides if the tokens in a sentence are negation signals, and another that finds the full scope of these negation signals. Our approach to negation detection differs in two main aspects from existing research on negation. First, we focus on finding the scope of negation signals, instead of determining whether a term is negated or not. Second, we apply supervised machine learning techniques, whereas most existing systems apply rule-based algorithms. As far as we know, this way of approaching the negation scope finding task is novel.", "phrases": ["scope", "negation", "biomedical text", "bioscope corpus"], "overall_score": 5.091881641907551, "scores": [3.1797512805127015, 2.7556468589854415, 0.5825175305593207, 0.5287649211116318], "rank_score": 1.761670147792274} -{"id": "sogaard-goldberg-2016-deep", "title": "Deep multi-task learning with low level tasks supervised at lower layers", "abstract": "In all previous work on deep multi-task learning we are aware of, all task supervisions are on the same (outermost) layer. We present a multi-task learning architecture with deep bi-directional RNNs, where different tasks supervision can happen at different layers. We present experiments in syntactic chunking and CCG supertagging, coupled with the additional task of POS-tagging. We show that it is consistently better to have POS supervision at the innermost rather than the outermost layer. We argue that this is because \u201clowlevel\u201d tasks are better kept at the lower layers, enabling the higher-level tasks to make use of the shared representation of the lower-level tasks. Finally, we also show how this architecture can be used for domain adaptation.", "phrases": ["pos-tagging", "low layer", "mtl", "auxiliary task", "parameter sharing"], "overall_score": 6.855721107682934, "scores": [3.1851056256917385, 1.699050183301249, 1.6326473534269377, 1.3672930895687523, 0.9237630304802493], "rank_score": 1.7615718564937854} -{"id": "artzi-etal-2015-broad", "title": "Broad-coverage CCG Semantic Parsing with AMR", "abstract": "We propose a grammar induction technique for AMR semantic parsing. While previous grammar induction techniques were designed to re-learn a new parser for each target application, the recently annotated AMR Bank provides a unique opportunity to induce a single model for understanding broad-coverage newswire text and support a wide range of applications. We present a new model that combines CCG parsing to recover compositional aspects of meaning and a factor graph to model non-compositional phenomena, such as anaphoric dependencies. Our approach achieves 66.2 Smatch F1 score on the AMR bank, significantly outperforming the previous state of the art.", "phrases": ["ccg", "semantic parsing", "amr", "non-compositional phenomenon"], "overall_score": 6.2625257481684145, "scores": [3.6148972831091686, 1.6584131655179921, 1.2179709597684654, 0.5544712322557144], "rank_score": 1.761438160162835} -{"id": "blanco-moldovan-2011-semantic", "title": "Semantic Representation of Negation Using Focus Detection", "abstract": "Negation is present in all human languages and it is used to reverse the polarity of part of statements that are otherwise affirmative by default. A negated statement often carries positive implicit meaning, but to pinpoint the positive part from the negative part is rather difficult. This paper aims at thoroughly representing the semantics of negation by revealing implicit positive meaning. The proposed representation relies on focus of negation detection. For this, new annotation over PropBank and a learning algorithm are proposed.", "phrases": ["negation", "focus detection", "propbank"], "overall_score": 3.8682066002734796, "scores": [3.3754125265466604, 0.8461479792807206, 1.059929570571259], "rank_score": 1.76049669213288} -{"id": "xu-etal-2014-shift", "title": "Shift-Reduce CCG Parsing with a Dependency Model", "abstract": "This paper presents the first dependency model for a shift-reduce CCG parser. Modelling dependencies is desirable for a number of reasons, including handling the \u201cspurious\u201d ambiguity of CCG; fitting well with the theory of CCG; and optimizing for structures which are evaluated at test time. We develop a novel training technique using a dependency oracle, in which all derivations are hidden. A challenge arises from the fact that the oracle needs to keep track of exponentially many goldstandard derivations, which is solved by integrating a packed parse forest with the beam-search decoder. Standard CCGBank tests show the model achieves up to 1.05 labeled F-score improvements over three existing, competitive CCG parsing models.", "phrases": ["ccg", "dependency model", "shift-reduce ccg"], "overall_score": 3.4247781623018585, "scores": [2.263920348571055, 2.1207702149241103, 0.8952728982779731], "rank_score": 1.7599878205910462} -{"id": "marcheggiani-titov-2017-encoding", "title": "Encoding Sentences with Graph Convolutional Networks for Semantic Role Labeling", "abstract": "Semantic role labeling (SRL) is the task of identifying the predicate-argument structure of a sentence. It is typically regarded as an important step in the standard NLP pipeline. As the semantic representations are closely related to syntactic ones, we exploit syntactic information in our model. We propose a version of graph convolutional networks (GCNs), a recent class of neural networks operating on graphs, suited to model syntactic dependency graphs. GCNs over syntactic dependency trees are used as sentence encoders, producing latent feature representations of words in a sentence. We observe that GCN layers are complementary to LSTM ones: when we stack both GCN and LSTM layers, we obtain a substantial improvement over an already state-of-the-art LSTM SRL model, resulting in the best reported scores on the standard benchmark (CoNLL-2009) both for Chinese and English.", "phrases": ["graph convolutional networks", "semantic role labeling", "gcn", "sentence encoder", "many nlp task"], "overall_score": 7.346073762774739, "scores": [3.648213421191928, 1.8272820415243074, 2.134520383100628, 0.6116172734159396, 0.5773513637742034], "rank_score": 1.7597968966014015} -{"id": "artetxe-schwenk-2019-massively", "title": "Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond", "abstract": "We introduce an architecture to learn joint multilingual sentence representations for 93 languages, belonging to more than 30 different families and written in 28 different scripts. Our system uses a single BiLSTM encoder with a shared byte-pair encoding vocabulary for all languages, which is coupled with an auxiliary decoder and trained on publicly available parallel corpora. This enables us to learn a classifier on top of the resulting embeddings using English annotated data only, and transfer it to any of the 93 languages without any modification. Our experiments in cross-lingual natural language inference (XNLI data set), cross-lingual document classification (MLDoc data set), and parallel corpus mining (BUCC data set) show the effectiveness of our approach. We also introduce a new test set of aligned sentences in 112 languages, and show that our sentence embeddings obtain strong results in multilingual similarity search even for low- resource languages. Our implementation, the pre-trained encoder, and the multilingual test set are available at .", "phrases": ["sentence embedding", "cross-lingual transfer", "low resource language"], "overall_score": 7.611669917215575, "scores": [2.652866657533254, 2.09536006575168, 0.5245545720272734], "rank_score": 1.7575937651040692} -{"id": "specia-etal-2016-shared", "title": "A Shared Task on Multimodal Machine Translation and Crosslingual Image Description", "abstract": "This paper introduces and summarises the findings of a new shared task at the intersection of Natural Language Processing and Computer Vision: the generation of image descriptions in a target language, given an image and/or one or more descriptions in a different (source) language. This challenge was organised along with the Conference on Machine Translation (WMT16), and called for system submissions for two task variants: (i) a translation task, in which a source language image description needs to be translated to a target language, (optionally) with additional cues from the corresponding image, and (ii) a description generation task, in which a target language description needs to be generated for an image, (optionally) with additional cues from source language descriptions of the same image. In this first edition of the shared task, 16 systems were submitted for the translation task and seven for the image description task, from a total of 10 teams.", "phrases": ["multimodal machine translation", "image", "wmt16", "source language", "ambiguity"], "overall_score": 6.6100896234358855, "scores": [3.94212757949157, 2.0604679083189614, 1.3719299030231105, 0.8892678869185267, 0.5234148208748708], "rank_score": 1.7574416197254077} -{"id": "liu-etal-2019-tigs", "title": "TIGS: An Inference Algorithm for Text Infilling with Gradient Search", "abstract": "Text infilling aims at filling in the missing part of a sentence or paragraph, which has been applied to a variety of real-world natural language generation scenarios. Given a well-trained sequential generative model, it is challenging for its unidirectional decoder to generate missing symbols conditioned on the past and future information around the missing part. In this paper, we propose an iterative inference algorithm based on gradient search, which could be the first inference algorithm that can be broadly applied to any neural sequence generative models for text infilling tasks. Extensive experimental comparisons show the effectiveness and efficiency of the proposed method on three different text infilling tasks with various mask ratios and different mask strategies, comparing with five state-of-the-art methods.", "phrases": ["text infilling", "gradient search", "span"], "overall_score": 3.41698379976207, "scores": [2.7021965954822456, 2.0426812789387916, 0.5230690573850221], "rank_score": 1.75598231060202} -{"id": "wang-etal-2020-cord", "title": "CORD-19: The COVID-19 Open Research Dataset", "abstract": "The COVID-19 Open Research Dataset (CORD-19) is a growing resource of scientific papers on COVID-19 and related historical coronavirus research. CORD-19 is designed to facilitate the development of text mining and information retrieval systems over its rich collection of metadata and structured full text papers. Since its release, CORD-19 has been downloaded over 200K times and has served as the basis of many COVID-19 text mining and discovery systems. In this article, we describe the mechanics of dataset construction, highlighting challenges and key design decisions, provide an overview of how CORD-19 has been used, and describe several shared tasks built around the dataset. We hope this resource will continue to bring together the computing community, biomedical experts, and policy makers in the search for effective treatments and management policies for COVID-19.", "phrases": ["scientific paper", "treatment", "cord-19", "allen institute", "white house"], "overall_score": 5.966544197016694, "scores": [4.695119697157064, 1.4523849228301, 0.9217328797814934, 0.8616486567836387, 0.8403545676494578], "rank_score": 1.7542481448403506} -{"id": "linzen-etal-2016-assessing", "title": "Assessing the Ability of LSTMs to Learn Syntax-Sensitive Dependencies", "abstract": "The success of long short-term memory (LSTM) neural networks in language processing is typically attributed to their ability to capture long-distance statistical regularities. Linguistic regularities are often sensitive to syntactic structure; can such dependencies be captured by LSTMs, which do not have explicit structural representations? We begin addressing this question using number agreement in English subject-verb dependencies. We probe the architecture's grammatical competence both using training objectives with an explicit grammatical target (number prediction, grammaticality judgments) and using language models. In the strongly supervised settings, the LSTM achieved very high overall accuracy (less than 1% errors), but errors increased when sequential and structural information conflicted. The frequency of such errors rose sharply in the language-modeling setting. We conclude that LSTMs can capture a non-trivial amount of grammatical structure given targeted supervision, but stronger architectures may be required to further reduce errors; furthermore, the language modeling signal is insufficient for capturing syntax-sensitive dependencies, and should be supplemented with more direct supervision if such dependencies need to be captured.", "phrases": ["syntax-sensitive dependency", "grammaticality judgment", "language model", "rnn", "long-distance dependency"], "overall_score": 8.628468527541054, "scores": [2.9963712878416366, 2.694197127479398, 1.66932515185611, 0.8702675806062249, 0.5386416764762788], "rank_score": 1.7537605648519299} -{"id": "kilgarriff-grefenstette-2003-introduction", "title": "Introduction to the Special Issue on the Web as Corpus", "abstract": "The Web, teeming as it is with language data, of all manner of varieties and languages, in vast quantity and freely available, is a fabulous linguists' playground. This special issue of Computational Linguistics explores ways in which this dream is being explored.", "phrases": ["special issue", "web", "linguistic data", "such resource", "wac"], "overall_score": 6.12426881092092, "scores": [5.343446539839496, 0.8364821152691544, 1.155819631621124, 0.8801135135502924, 0.541832417821842], "rank_score": 1.7515388436203818} -{"id": "kedzie-etal-2018-content", "title": "Content Selection in Deep Learning Models of Summarization", "abstract": "We carry out experiments with deep learning models of summarization across the domains of news, personal stories, meetings, and medical articles in order to understand how content selection is performed. We find that many sophisticated features of state of the art extractive summarizers do not improve performance over simpler models. These results suggest that it is easier to create a summarizer for a new domain than previous work suggests and bring into question the benefit of deep learning models for summarization for those domains that do have massive datasets (i.e., news). At the same time, they suggest important questions for new research in summarization; namely, new forms of sentence representations or external knowledge sources are needed that are better suited to the sumarization task.", "phrases": ["deep learning model", "summarization", "sentence representation", "content selection"], "overall_score": 5.897815585809311, "scores": [2.8699757494265565, 2.7010600171377503, 0.8992368612831975, 0.5357237366988207], "rank_score": 1.7514990911365815} -{"id": "wen-etal-2015-semantically", "title": "Semantically Conditioned LSTM-based Natural Language Generation for Spoken Dialogue Systems", "abstract": "\u00a9 2015 Association for Computational Linguistics. Natural language generation (NLG) is a critical component of spoken dialogue and it has a significant impact both on usability and perceived quality. Most NLG systems in common use employ rules and heuristics and tend to generate rigid and stylised responses without the natural variation of human language. They are also not easily scaled to systems covering multiple domains and languages. This paper presents a statistical language generator based on a semantically controlled Long Short-term Memory (LSTM) structure. The LSTM generator can learn from unaligned data by jointly optimising sentence planning and surface realisation using a simple cross entropy training criterion, and language variation can be easily achieved by sampling from output candidates. With fewer heuristics, an objective evaluation in two differing test domains showed the proposed method improved performance compared to previous methods. Human judges scored the LSTM system higher on informativeness and naturalness and overall preferred it to the other systems..", "phrases": ["natural language generation", "dialogue act", "sc-lstm", "response generation"], "overall_score": 7.738037406349157, "scores": [3.2148409527864263, 1.4875848483731513, 1.2428441987866532, 1.0593161285474482], "rank_score": 1.7511465321234196} -{"id": "liu-etal-2017-adversarial", "title": "Adversarial Multi-task Learning for Text Classification", "abstract": "Neural network models have shown their promising opportunities for multi-task learning, which focus on learning the shared layers to extract the common and task-invariant features. However, in most existing approaches, the extracted shared features are prone to be contaminated by task-specific features or the noise brought by other tasks. In this paper, we propose an adversarial multi-task learning framework, alleviating the shared and private latent feature spaces from interfering with each other. We conduct extensive experiments on 16 different text classification tasks, which demonstrates the benefits of our approach. Besides, we show that the shared knowledge learned by our proposed model can be regarded as off-the-shelf knowledge and easily transferred to new tasks. The datasets of all 16 tasks are publicly available at .", "phrases": ["multi-task learning", "text classification", "adversarial training", "loss", "subspace"], "overall_score": 6.369784839991091, "scores": [3.980051936390673, 2.7203363355001082, 0.9799380199663463, 0.5383071092949195, 0.5368772043619927], "rank_score": 1.751102121102808} -{"id": "hulden-etal-2014-semi", "title": "Semi-supervised learning of morphological paradigms and lexicons", "abstract": "We present a semi-supervised approach to the problem of paradigm induction from inflection tables. Our system extracts generalizations from inflection tables, representing the resulting paradigms in an abstract form. The process is intended to be language-independent, and to provide human-readable generalizations of paradigms. The tools we provide can be used by linguists for the rapid creation of lexical resources. We evaluate the system through an inflection table reconstruction task using Wiktionary data for German, Spanish, and Finnish. With no additional corpus information available, the evaluation yields per word form accuracy scores on inflecting unseen base forms in different languages ranging from 87.81% (German nouns) to 99.52% (Spanish verbs); with additional unlabeled text corpora available for training the scores range from 91.81% (German nouns) to 99.58% (Spanish verbs). We separately evaluate the system in a simulated task of Swedish lexicon creation, and show that on the basis of a small number of inflection tables, the system can accurately collect from a list of noun forms a lexicon with inflection information ranging from 100.0% correct (collect 100 words), to 96.4% correct (collect 1000 words).", "phrases": ["paradigm", "inflection table", "semi-supervised learning"], "overall_score": 5.155779820656732, "scores": [3.065642511736548, 1.5812212186356298, 0.6062047052144693], "rank_score": 1.7510228118622155} -{"id": "kim-rush-2016-sequence", "title": "Sequence-Level Knowledge Distillation", "abstract": "Neural machine translation (NMT) offers a novel alternative formulation of translation that is potentially simpler than statistical approaches. However to reach competitive performance, NMT models need to be exceedingly large. In this paper we consider applying knowledge distillation approaches (Bucila et al., 2006; Hinton et al., 2015) that have proven successful for reducing the size of neural models in other domains to the problem of NMT. We demonstrate that standard knowledge distillation applied to word-level prediction can be effective for NMT, and also introduce two novel sequence-level versions of knowledge distillation that further improve performance, and somewhat surprisingly, seem to eliminate the need for beam search (even when applied on the original teacher model). Our best student model runs 10 times faster than its state-of-the-art teacher with little loss in performance. It is also significantly better than a baseline model trained without knowledge distillation: by 4.2/1.7 BLEU with greedy decoding/beam search. Applying weight pruning on top of knowledge distillation results in a student model that has 13 times fewer parameters than the original teacher model, with a decrease of 0.4 BLEU.", "phrases": ["knowledge distillation", "student", "sequence-level distillation", "specific task", "seqkd"], "overall_score": 7.04312005665132, "scores": [6.535596256813236, 0.6296512212092801, 0.534098660690969, 0.5273128301356832, 0.521794082920124], "rank_score": 1.7496906103538585} -{"id": "kuchaiev-etal-2018-openseq2seq", "title": "OpenSeq2Seq: Extensible Toolkit for Distributed and Mixed Precision Training of Sequence-to-Sequence Models", "abstract": "We present OpenSeq2Seq \u2013 an open-source toolkit for training sequence-to-sequence models. The main goal of our toolkit is to allow researchers to most effectively explore different sequence-to-sequence architectures. The efficiency is achieved by fully supporting distributed and mixed-precision training. OpenSeq2Seq provides building blocks for training encoder-decoder models for neural machine translation and automatic speech recognition. We plan to extend it with other modalities in the future.", "phrases": ["mixed precision training", "sequence-to-sequence model", "openseq2seq"], "overall_score": 2.8149456663813863, "scores": [2.403850407370489, 1.9880556029234093, 0.8551662339359266], "rank_score": 1.7490240814099416} -{"id": "jeretic-etal-2020-natural", "title": "Are Natural Language Inference Models IMPPRESsive? Learning IMPlicature and PRESupposition", "abstract": "Natural language inference (NLI) is an increasingly important task for natural language understanding, which requires one to infer whether a sentence entails another. However, the ability of NLI models to make pragmatic inferences remains understudied. We create an IMPlicature and PRESupposition diagnostic dataset (IMPPRES), consisting of 32K semi-automatically generated sentence pairs illustrating well-studied pragmatic inference types. We use IMPPRES to evaluate whether BERT, InferSent, and BOW NLI models trained on MultiNLI (Williams et al., 2018) learn to make pragmatic inferences. Although MultiNLI appears to contain very few pairs illustrating these inference types, we find that BERT learns to draw pragmatic inferences. It reliably treats scalar implicatures triggered by \u201csome\u201d as entailments. For some presupposition triggers like \u201conly\u201d, BERT reliably recognizes the presupposition as an entailment, even when the trigger is embedded under an entailment canceling operator like negation. BOW and InferSent show weaker evidence of pragmatic reasoning. We conclude that NLI training encourages models to learn some, but not all, pragmatic inferences.", "phrases": ["implicature", "presupposition", "entailment", "nli model"], "overall_score": 4.192269715946655, "scores": [3.15054327509728, 2.70666693231998, 0.5757887144162694, 0.5602501387131876], "rank_score": 1.7483122651366794} -{"id": "hosseini-etal-2014-learning", "title": "Learning to Solve Arithmetic Word Problems with Verb Categorization", "abstract": "This paper presents a novel approach to learning to solve simple arithmetic word problems. Our system, ARIS, analyzes each of the sentences in the problem statement to identify the relevant variables and their values. ARIS then maps this information into an equation that represents the problem, and enables its (trivial) solution as shown in Figure 1. The paper analyzes the arithmetic-word problems \u201cgenre\u201d, identifying seven categories of verbs used in such problems. ARIS learns to categorize verbs with 81.2% accuracy, and is able to solve 77.7% of the problems in a corpus of standard primary school test questions. We report the first learning results on this task without reliance on predefined templates and make our data publicly available. 1", "phrases": ["arithmetic word problem", "verb categorization", "subtraction problem", "solver", "hand-crafted rule"], "overall_score": 6.7266639158569355, "scores": [3.6132038453036537, 2.096705299256989, 1.387257221661952, 1.0948105748284653, 0.5436153163331257], "rank_score": 1.7471184514768372} -{"id": "gonzalez-ibanez-etal-2011-identifying", "title": "Identifying Sarcasm in Twitter: A Closer Look", "abstract": "Sarcasm transforms the polarity of an apparently positive or negative utterance into its opposite. We report on a method for constructing a corpus of sarcastic Twitter messages in which determination of the sarcasm of each message has been made by its author. We use this reliable corpus to compare sarcastic utterances in Twitter to utterances that express positive or negative attitudes without sarcasm. We investigate the impact of lexical and pragmatic factors on machine learning effectiveness for identifying sarcastic utterances and we compare the performance of machine learning techniques and human judges on this task. Perhaps unsurprisingly, neither the human judges nor the machine learning techniques perform very well.", "phrases": ["sarcasm", "twitter", "emoticon", "social medium", "computational linguistic"], "overall_score": 5.821063333390492, "scores": [3.93668562613334, 2.4343022180928973, 1.312391895824596, 0.5300653868028695, 0.5211078029189437], "rank_score": 1.7469105859545295} -{"id": "cheng-etal-2020-ape", "title": "APE: Argument Pair Extraction from Peer Review and Rebuttal via Multi-task Learning", "abstract": "Peer review and rebuttal, with rich interactions and argumentative discussions in between, are naturally a good resource to mine arguments. However, few works study both of them simultaneously. In this paper, we introduce a new argument pair extraction (APE) task on peer review and rebuttal in order to study the contents, the structure and the connections between them. We prepare a challenging dataset that contains 4,764 fully annotated review-rebuttal passage pairs from an open review platform to facilitate the study of this task. To automatically detect argumentative propositions and extract argument pairs from this corpus, we cast it as the combination of a sequence labeling task and a text relation classification task. Thus, we propose a multitask learning framework based on hierarchical LSTM networks. Extensive experiments and analysis demonstrate the effectiveness of our multi-task framework, and also show the challenges of the new task as well as motivate future research directions.", "phrases": ["argument pair extraction", "peer review", "rebuttal", "discussion", "new task"], "overall_score": 3.3993039765758946, "scores": [2.533454234474425, 2.4326809468349815, 2.401597759680142, 0.8390573614003093, 0.527693091476413], "rank_score": 1.746896678773254} -{"id": "louis-nenkova-2012-coherence", "title": "A Coherence Model Based on Syntactic Patterns", "abstract": "We introduce a model of coherence which captures the intentional discourse structure in text. Our work is based on the hypothesis that syntax provides a proxy for the communicative goal of a sentence and therefore the sequence of sentences in a coherent discourse should exhibit detectable structural patterns. Results show that our method has high discriminating power for separating out coherent and incoherent news articles reaching accuracies of up to 90%. We also show that our syntactic patterns are correlated with manual annotations of intentional structure for academic conference articles and can successfully predict the coherence of abstract, introduction and related work sections of these articles.", "phrases": ["coherence", "syntactic pattern", "adjacent sentence"], "overall_score": 4.339238968815002, "scores": [3.288615808566189, 1.39584588983697, 0.5542529662123555], "rank_score": 1.7462382215385048} -{"id": "mohiuddin-joty-2019-revisiting", "title": "Revisiting Adversarial Autoencoder for Unsupervised Word Translation with Cycle Consistency and Improved Training", "abstract": "Adversarial training has shown impressive success in learning bilingual dictionary without any parallel data by mapping monolingual embeddings to a shared space. However, recent work has shown superior performance for non-adversarial methods in more challenging language pairs. In this work, we revisit adversarial autoencoder for unsupervised word translation and propose two novel extensions to it that yield more stable training and improved results. Our method includes regularization terms to enforce cycle consistency and input reconstruction, and puts the target encoders as an adversary against the corresponding discriminator. Extensive experimentations with European, non-European and low-resource languages show that our method is more robust and achieves better performance than recently proposed adversarial and non-adversarial approaches.", "phrases": ["adversarial autoencoder", "unsupervised word translation", "novel extension"], "overall_score": 3.3966772871081665, "scores": [2.449790968157461, 2.2617749540793755, 0.5250745599927694], "rank_score": 1.7455468274098687} -{"id": "shi-etal-2020-learn", "title": "Learn to Combine Linguistic and Symbolic Information for Table-based Fact Verification", "abstract": "Table-based fact verification is expected to perform both linguistic reasoning and symbolic reasoning. Existing methods lack attention to take advantage of the combination of linguistic information and symbolic information. In this work, we propose HeterTFV, a graph-based reasoning approach, that learns to combine linguistic information and symbolic information effectively. We first construct a program graph to encode programs, a kind of LISP-like logical form, to learn the semantic compositionality of the programs. Then we construct a heterogeneous graph to incorporate both linguistic information and symbolic information by introducing program nodes into the heterogeneous graph. Finally, we propose a graph-based reasoning approach to reason over the multiple types of nodes to make an effective combination of both types of information. Experimental results on a large-scale benchmark dataset TABFACT illustrate the effect of our approach.", "phrases": ["symbolic information", "table-based fact verification", "reasoning"], "overall_score": 3.6262159793251247, "scores": [2.560780180788287, 2.148983920762771, 0.5217597090136172], "rank_score": 1.7438412701882247} -{"id": "liao-etal-2018-abstract", "title": "Abstract Meaning Representation for Multi-Document Summarization", "abstract": "Generating an abstract from a collection of documents is a desirable capability for many real-world applications. However, abstractive approaches to multi-document summarization have not been thoroughly investigated. This paper studies the feasibility of using Abstract Meaning Representation (AMR), a semantic representation of natural language grounded in linguistic theory, as a form of content representation. Our approach condenses source documents to a set of summary graphs following the AMR formalism. The summary graphs are then transformed to a set of summary sentences in a surface realization step. The framework is fully data-driven and flexible. Each component can be optimized independently using small-scale, in-domain training data. We perform experiments on benchmark summarization datasets and report promising results. We also describe opportunities and challenges for advancing this line of research.", "phrases": ["summarization", "amr", "abstract meaning representation"], "overall_score": 3.6239512492848407, "scores": [2.507201176094373, 1.8903324265697066, 0.8307228931025238], "rank_score": 1.7427521652555347} -{"id": "bhattacharyya-2010-indowordnet", "title": "IndoWordNet", "abstract": "India is a multilingual country where machine translation and cross lingual search are highly relevant problems. These problems require large resources- like wordnets and lexicons- of high quality and coverage. Wordnets are lexical structures composed of synsets and semantic relations. Synsets are sets of synonyms. They are linked by semantic relations like hypernymy (is-a), meronymy (part-of), troponymy (manner-of) etc. IndoWordnet is a linked structure of wordnets of major Indian languages from Indo-Aryan, Dravidian and Sino-Tibetan families. These wordnets have been created by following the expansion approach from Hindi wordnet which was made available free for research in 2006. Since then a number of Indian languages have been creating their wordnets. In this paper we discuss the methodology, coverage, important considerations and multifarious benefits of IndoWordnet. Case studies are provided for Marathi, Sanskrit, Bodo and Telugu, to bring out the basic methodology of and challenges involved in the expansion approach. The guidelines the lexicographers follow for wordnet construction are enumerated. The difference between IndoWordnet and EuroWordnet also is discussed.", "phrases": ["indian language", "dravidian", "expansion approach", "lexicographer", "indowordnet"], "overall_score": 4.598100144078282, "scores": [4.158883579137169, 1.5534345083542975, 1.1387790979887842, 0.9310135032076963, 0.9295229929278821], "rank_score": 1.7423267363231658} -{"id": "sapena-etal-2010-relaxcor", "title": "RelaxCor: A Global Relaxation Labeling Approach to Coreference Resolution", "abstract": "This paper describes the participation of RelaxCor in the Semeval-2010 task number 1: \"Coreference Resolution in Multiple Languages\". RelaxCor is a constraint-based graph partitioning approach to coreference resolution solved by relaxation labeling. The approach combines the strengths of groupwise classifiers and chain formation methods in one global method.", "phrases": ["coreference resolution", "constraint-based graph", "relaxcor"], "overall_score": 2.4152791065896992, "scores": [2.502756200309232, 2.1867732384510212, 0.5372373453992745], "rank_score": 1.7422555947198426} -{"id": "kwiatkowski-etal-2011-lexical", "title": "Lexical Generalization in CCG Grammar Induction for Semantic Parsing", "abstract": "We consider the problem of learning factored probabilistic CCG grammars for semantic parsing from data containing sentences paired with logical-form meaning representations. Traditional CCG lexicons list lexical items that pair words and phrases with syntactic and semantic content. Such lexicons can be inefficient when words appear repeatedly with closely related lexical content. In this paper, we introduce factored lexicons, which include both lexemes to model word meaning and templates to model systematic variation in word usage. We also present an algorithm for learning factored CCG lexicons, along with a probabilistic parse-selection model. Evaluations on benchmark datasets demonstrate that the approach learns highly accurate parsers, whose generalization performance benefits greatly from the lexical factoring.", "phrases": ["semantic parsing", "lexeme", "template"], "overall_score": 5.382721545167987, "scores": [3.4661761140025495, 0.9004759191653978, 0.8575287664064842], "rank_score": 1.741393599858144} -{"id": "kumar-etal-2020-data", "title": "Data Augmentation using Pre-trained Transformer Models", "abstract": "Language model based pre-trained models such as BERT have provided significant gains across different NLP tasks. In this paper, we study different types of transformer based pre-trained models such as auto-regressive models (GPT-2), auto-encoder models (BERT), and seq2seq models (BART) for conditional data augmentation. We show that prepending the class labels to text sequences provides a simple yet effective way to condition the pre-trained models for data augmentation. Additionally, on three classification benchmarks, pre-trained Seq2Seq model outperforms other data augmentation methods in a low-resource setting. Further, we explore how different pre-trained model based data augmentation differs in-terms of data diversity, and how well such methods preserve the class-label information.", "phrases": ["transformer", "different type", "data augmentation", "pre-trained language model"], "overall_score": 5.673010532786293, "scores": [3.7991073149269043, 1.5255189954897448, 1.118565505741632, 0.5216239486088327], "rank_score": 1.7412039411917783} -{"id": "visweswariah-etal-2010-urdu", "title": "Urdu and Hindi: Translation and sharing of linguistic resources", "abstract": "Hindi and Urdu share a common phonology, morphology and grammar but are written in different scripts. In addition, the vocabularies have also diverged significantly especially in the written form. In this paper we show that we can get reasonable quality translations (we estimated the Translation Error rate at 18%) between the two languages even in absence of a parallel corpus. Linguistic resources such as treebanks, part of speech tagged data and parallel corpora with English are limited for both these languages. We use the translation system to share linguistic resources between the two languages. We demonstrate improvements on three tasks and show: statistical machine translation from Urdu to English is improved (0.8 in BLEU score) by using a Hindi-English parallel corpus, Hindi part of speech tagging is improved (upto 6% absolute) by using an Urdu part of speech corpus and a Hindi-English word aligner is improved by using a manually word aligned Urdu-English corpus (upto 9% absolute in F-Measure).", "phrases": ["hindi", "linguistic resource", "urdu"], "overall_score": 2.4109110668945033, "scores": [1.8732971814553199, 1.6729389942830668, 1.6710779846111434], "rank_score": 1.73910472011651} -{"id": "mani-etal-2008-spatialml", "title": "SpatialML: Annotation Scheme, Corpora, and Tools", "abstract": "SpatialML is an annotation scheme for marking up references to places in natural language. It covers both named and nominal references to places, grounding them where possible with geo-coordinates, including both relative and absolute locations, and characterizes relationships among places in terms of a region calculus. A freely available annotation editor has been developed for SpatialML, along with a corpus of annotated documents released by the Linguistic Data Consortium. Inter-annotator agreement on SpatialML is 77.0 F-measure for extents on that corpus. An automatic tagger for SpatialML extents scores 78.5 F-measure. A disambiguator scores 93.0 F-measure and 93.4 Predictive Accuracy. In adapting the extent tagger to new domains, merging the training data from the above corpus with annotated data in the new domain provides the best performance.", "phrases": ["annotation scheme", "nominal reference", "geo-coordinate", "region", "spatialml"], "overall_score": 4.321450942015185, "scores": [4.118625411089082, 2.8490726779751903, 0.6090106819070321, 0.5877773166619188, 0.5309128771203827], "rank_score": 1.739079792950721} -{"id": "morishita-etal-2020-jparacrawl", "title": "JParaCrawl: A Large Scale Web-Based English-Japanese Parallel Corpus", "abstract": "Recent machine translation algorithms mainly rely on parallel corpora. However, since the availability of parallel corpora remains limited, only some resource-rich language pairs can benefit from them. We constructed a parallel corpus for English-Japanese, for which the amount of publicly available parallel corpora is still limited. We constructed the parallel corpus by broadly crawling the web and automatically aligning parallel sentences. Our collected corpus, called JParaCrawl, amassed over 8.7 million sentence pairs. We show how it includes a broader range of domains and how a neural machine translation model trained with it works as a good pre-trained model for fine-tuning specific domains. The pre-training and fine-tuning approaches achieved or surpassed performance comparable to model training from the initial state and reduced the training time. Additionally, we trained the model with an in-domain dataset and JParaCrawl to show how we achieved the best performance with them. JParaCrawl and the pre-trained models are freely available online for research purposes.", "phrases": ["english-japanese", "parallel corpus", "web", "jparacrawl"], "overall_score": 3.3836344985489477, "scores": [2.7224086575072093, 2.521261921706418, 1.1369682706784638, 0.5747377900655367], "rank_score": 1.738844159989407} -{"id": "mourad-darwish-2013-subjectivity", "title": "Subjectivity and Sentiment Analysis of Modern Standard Arabic and Arabic Microblogs", "abstract": "Though much research has been conducted on Subjectivity and Sentiment Analysis (SSA) during the last decade, little work has focused on Arabic. In this work, we focus on SSA for both Modern Standard Arabic (MSA) news articles and dialectal Arabic microblogs from Twitter. We showcase some of the challenges associated with SSA on microblogs. We adopted a random graph walk approach to extend the Arabic SSA lexicon using ArabicEnglish phrase tables, leading to improvements for SSA on Arabic microblogs. We used different features for both subjectivity and sentiment classification including stemming, part-of-speech tagging, as well as tweet specific features. Our classification features yield results that surpass Arabic SSA results in the literature.", "phrases": ["sentiment analysis", "modern standard arabic", "subjectivity", "arabic tweet"], "overall_score": 4.458074809086165, "scores": [3.202404702656435, 1.6730703519678618, 0.8490477198925344, 1.2277779344503124], "rank_score": 1.738075177241786} -{"id": "nekoto-etal-2020-participatory", "title": "Participatory Research for Low-resourced Machine Translation: A Case Study in African Languages", "abstract": "Research in NLP lacks geographic diversity, and the question of how NLP can be scaled to low-resourced languages has not yet been adequately solved. `Low-resourced'-ness is a complex problem going beyond data availability and reflects systemic problems in society. In this paper, we focus on the task of Machine Translation (MT), that plays a crucial role for information accessibility and communication worldwide. Despite immense improvements in MT over the past decade, MT is centered around a few high-resourced languages. As MT researchers cannot solve the problem of low-resourcedness alone, we propose participatory research as a means to involve all necessary agents required in the MT development process. We demonstrate the feasibility and scalability of participatory research with a case study on MT for African languages. Its implementation leads to a collection of novel translation datasets, MT benchmarks for over 30 languages, with human evaluations for a third of them, and enables participants without formal training to make a unique scientific contribution. Benchmarks, models, data, code, and evaluation results are released at .", "phrases": ["machine translation", "african languages", "participatory research"], "overall_score": 5.022390122669649, "scores": [2.227159934449681, 2.1893478109965363, 0.7963754509271079], "rank_score": 1.737627732124442} -{"id": "durrett-klein-2014-joint", "title": "A Joint Model for Entity Analysis: Coreference, Typing, and Linking", "abstract": "We present a joint model of three core tasks in the entity analysis stack: coreference resolution (within-document clustering), named entity recognition (coarse semantic typing), and entity linking (matching to Wikipedia entities). Our model is formally a structured conditional random field. Unary factors encode local features from strong baselines for each task. We then add binary and ternary factors to capture cross-task interactions, such as the constraint that coreferent mentions have the same semantic type. On the ACE 2005 and OntoNotes datasets, we achieve state-of-the-art results for all three tasks. Moreover, joint modeling improves performance on each task over strong independent baselines.", "phrases": ["joint model", "coreference", "entity linking", "crf model", "exception"], "overall_score": 6.226722675726738, "scores": [3.1839987660949767, 2.988104036538232, 1.4509945935485797, 0.5363183804925108, 0.5285844581024719], "rank_score": 1.7376000469553543} -{"id": "wang-etal-2011-detection", "title": "Detection of Agreement and Disagreement in Broadcast Conversations", "abstract": "We present Conditional Random Fields based approaches for detecting agreement/disagreement between speakers in English broadcast conversation shows. We develop annotation approaches for a variety of linguistic phenomena. Various lexical, structural, durational, and prosodic features are explored. We compare the performance when using features extracted from automatically generated annotations against that when using human annotations. We investigate the efficacy of adding prosodic features on top of lexical, structural, and durational features. Since the training data is highly imbalanced, we explore two sampling approaches, random downsampling and ensemble downsampling. Overall, our approach achieves 79.2% (precision), 50.5% (recall), 61.7% (F1) for agreement detection and 69.2% (precision), 46.9% (recall), and 55.9% (F1) for disagreement detection, on the English broadcast conversation data.", "phrases": ["agreement", "broadcast conversation", "detection"], "overall_score": 3.8177730104009333, "scores": [2.957106267365468, 1.6682097247360699, 0.5873141365347], "rank_score": 1.737543376212079} -{"id": "pitler-nenkova-2008-revisiting", "title": "Revisiting Readability: A Unified Framework for Predicting Text Quality", "abstract": "We combine lexical, syntactic, and discourse features to produce a highly predictive model of human readers' judgments of text readability. This is the first study to take into account such a variety of linguistic factors and the first to empirically demonstrate that discourse relations are strongly associated with the perceived quality of text. We show that various surface metrics generally expected to be related to readability are not very good predictors of readability judgments in our Wall Street Journal corpus. We also establish that readability predictors behave differently depending on the task: predicting text readability or ranking the readability. Our experiments indicate that discourse relations are the one class of features that exhibits robustness across these two tasks.", "phrases": ["readability", "factor", "discourse relation", "essay scoring", "assessment"], "overall_score": 6.60592115086298, "scores": [5.118933224726128, 1.5791507653452397, 0.8422639033290477, 0.5781658022585371, 0.5582749198475909], "rank_score": 1.7353577231013086} -{"id": "nguyen-etal-2016-joint-event", "title": "Joint Event Extraction via Recurrent Neural Networks", "abstract": "Event extraction is a particularly challenging problem in information extraction. The state-of-the-art models for this problem have either applied convolutional neural networks in a pipelined framework (Chen et al., 2015) or followed the joint architecture via structured prediction with rich local and global features (Li et al., 2013). The former is able to learn hidden feature representations automatically from data based on the continuous and generalized representations of words. The latter, on the other hand, is capable of mitigating the error propagation problem of the pipelined approach and exploiting the inter-dependencies between event triggers and argument roles via discrete structures. In this work, we propose to do event extraction in a joint framework with bidirectional recurrent neural networks, thereby bene\ufb01ting from the advantages of the two models as well as addressing issues inherent in the existing approaches. We systematically investigate different memory features for the joint model and demonstrate that the proposed model achieves the state-of-the-art performance on the ACE 2005 dataset.", "phrases": ["event extraction", "recurrent neural networks", "trigger", "argument role", "joint model"], "overall_score": 6.889123127566957, "scores": [4.087870780335763, 0.8749268094945627, 1.4623549769869737, 1.400215458308532, 0.8504715322273076], "rank_score": 1.7351679114706275} -{"id": "greene-resnik-2009-words", "title": "More than Words: Syntactic Packaging and Implicit Sentiment", "abstract": "Work on sentiment analysis often focuses on the words and phrases that people use in overtly opinionated text. In this paper, we introduce a new approach to the problem that focuses not on lexical indicators, but on the syntactic \"packaging\" of ideas, which is well suited to investigating the identification of implicit sentiment, or perspective. We establish a strong predictive connection between linguistically well motivated features and implicit sentiment, and then show how computational approximations of these features can be used to improve on existing state-of-the-art sentiment classification results.", "phrases": ["packaging", "implicit sentiment", "perspective", "syntactic representation", "text classification task"], "overall_score": 5.36225682164812, "scores": [4.015219387704037, 1.7894562728865488, 1.6749503151940632, 0.6293719332189136, 0.5648668187716962], "rank_score": 1.7347729455550518} -{"id": "popovic-ney-2011-towards", "title": "Towards Automatic Error Analysis of Machine Translation Output", "abstract": "Evaluation and error analysis of machine translation output are important but difficult tasks. In this article, we propose a framework for automatic error analysis and classification based on the identification of actual erroneous words using the algorithms for computation of Word Error Rate (WER) and Position-independent word Error Rate (PER), which is just a very first step towards development of automatic evaluation measures that provide more specific information of certain translation problems. The proposed approach enables the use of various types of linguistic knowledge in order to classify translation errors in many different ways. This work focuses on one possible set-up, namely, on five error categories: inflectional errors, errors due to wrong word order, missing words, extra words, and incorrect lexical choices. For each of the categories, we analyze the contribution of various POS classes. We compared the results of automatic error analysis with the results of human error analysis in order to investigate two possible applications: estimating the contribution of each error type in a given translation output in order to identify the main sources of errors for a given translation system, and comparing different translation outputs using the introduced error categories in order to obtain more information about advantages and disadvantages of different systems and possibilites for improvements, as well as about advantages and disadvantages of applied methods for improvements. We used Arabic\u2013English Newswire and Broadcast News and Chinese\u2013English Newswire outputs created in the framework of the GALE project, several Spanish and English European Parliament outputs generated during the TC-Star project, and three German\u2013English outputs generated in the framework of the fourth Machine Translation Workshop. We show that our results correlate very well with the results of a human error analysis, and that all our metrics except the extra words reflect well the differences between different versions of the same translation system as well as the differences between different translation systems.", "phrases": ["automatic error analysis", "machine translation output", "inflectional error"], "overall_score": 5.352011161521591, "scores": [3.4185743619541706, 0.8709235301450736, 0.9048770564299876], "rank_score": 1.7314583161764106} -{"id": "li-etal-2004-joint", "title": "A Joint Source-Channel Model for Machine Transliteration", "abstract": "Most foreign names are transliterated into Chinese, Japanese or Korean with approximate phonetic equivalents. The transliteration is usually achieved through intermediate phonemic mapping. This paper presents a new framework that allows direct orthographical mapping (DOM) between two different languages, through a joint source-channel model, also called n-gram transliteration model (TM). With the n-gram TM model, we automate the orthographic alignment process to derive the aligned transliteration units from a bilingual dictionary. The n-gram TM under the DOM framework greatly reduces system development effort and provides a quantum leap in improvement in transliteration accuracy over that of other state-of-the-art machine learning algorithms. The modeling framework is validated through several experiments for English-Chinese language pair.", "phrases": ["joint source-channel model", "machine transliteration", "orthographic mapping", "source language", "phoneme-based method"], "overall_score": 7.085513315901644, "scores": [3.9458631921272636, 2.4795029142894967, 0.8541353377218109, 0.8387511657202983, 0.5345527515640635], "rank_score": 1.7305610722845866} -{"id": "lee-etal-2011-modeling", "title": "Modeling Syntactic Context Improves Morphological Segmentation", "abstract": "The connection between part-of-speech (POS) categories and morphological properties is well-documented in linguistics but underutilized in text processing systems. This paper proposes a novel model for morphological segmentation that is driven by this connection. Our model learns that words with common affixes are likely to be in the same syntactic category and uses learned syntactic categories to refine the segmentation boundaries of words. Our results demonstrate that incorporating POS categorization yields substantial performance gains on morphological segmentation of Arabic.", "phrases": ["syntactic context", "morphological segmentation", "parametric bayesian model"], "overall_score": 4.300234523553216, "scores": [3.1163196936908393, 1.5382443266279113, 0.5370610138692621], "rank_score": 1.7305416780626708} -{"id": "klein-etal-2017-opennmt", "title": "OpenNMT: Open-Source Toolkit for Neural Machine Translation", "abstract": "We describe an open-source toolkit for neural machine translation (NMT). The toolkit prioritizes efficiency, modularity, and extensibility with the goal of supporting NMT research into model architectures, feature representations, and source modalities, while maintaining competitive performance and reasonable training requirements. The toolkit consists of modeling and translation support, as well as detailed pedagogical documentation about the underlying techniques.", "phrases": ["open-source toolkit", "neural machine translation", "extensibility", "end"], "overall_score": 5.995009705250001, "scores": [3.582945457716246, 1.9475246195830258, 0.8284083523613625, 0.5602981878156722], "rank_score": 1.7297941543690767} -{"id": "chen-etal-2015-lifelong", "title": "Lifelong Learning for Sentiment Classification", "abstract": "This paper proposes a novel lifelong learning (LL) approach to sentiment classification. LL mimics the human continuous learning process, i.e., retaining the knowledge learned from past tasks and use it to help future learning. In this paper, we first discuss LL in general and then LL for sentiment classification in particular. The proposed LL approach adopts a Bayesian optimization framework based on stochastic gradient descent. Our experimental results show that the proposed method outperforms baseline methods significantly, which demonstrates that lifelong learning is a promising research direction.", "phrases": ["sentiment classification", "lifelong learning", "task learning"], "overall_score": 3.8000546327508817, "scores": [2.4607805374304696, 1.849177657932534, 0.8784799897193353], "rank_score": 1.7294793950274465} -{"id": "elgohary-etal-2020-speak", "title": "Speak to your Parser: Interactive Text-to-SQL with Natural Language Feedback", "abstract": "We study the task of semantic parse correction with natural language feedback. Given a natural language utterance, most semantic parsing systems pose the problem as one-shot translation where the utterance is mapped to a corresponding logical form. In this paper, we investigate a more interactive scenario where humans can further interact with the system by providing free-form natural language feedback to correct the system when it generates an inaccurate interpretation of an initial utterance. We focus on natural language to SQL systems and construct, SPLASH, a dataset of utterances, incorrect SQL interpretations and the corresponding natural language feedback. We compare various reference models for the correction task and show that incorporating such a rich form of feedback can significantly improve the overall semantic parsing accuracy while retaining the flexibility of natural language interaction. While we estimated human correction accuracy is 81.5%, our best model achieves only 25.1%, which leaves a large gap for improvement in future research. SPLASH is publicly available at .", "phrases": ["text-to-sql", "natural language feedback", "semantic parsing"], "overall_score": 3.094964173295676, "scores": [3.080611413539161, 1.5225996656582454, 0.5787861025377328], "rank_score": 1.727332393911713} -{"id": "shang-etal-2018-learning", "title": "Learning Named Entity Tagger using Domain-Specific Dictionary", "abstract": "Recent advances in deep neural models allow us to build reliable named entity recognition (NER) systems without handcrafting features. However, such methods require large amounts of manually-labeled training data. There have been efforts on replacing human annotations with distant supervision (in conjunction with external dictionaries), but the generated noisy labels pose significant challenges on learning effective neural models. Here we propose two neural models to suit noisy distant supervision from the dictionary. First, under the traditional sequence labeling framework, we propose a revised fuzzy CRF layer to handle tokens with multiple possible labels. After identifying the nature of noisy labels in distant supervision, we go beyond the traditional framework and propose a novel, more effective neural model AutoNER with a new Tie or Break scheme. In addition, we discuss how to refine distant supervision for better NER performance. Extensive experiments on three benchmark datasets demonstrate that AutoNER achieves the best performance when only using dictionaries with no additional human effort, and delivers competitive results with state-of-the-art supervised benchmarks.", "phrases": ["dictionary", "entity recognition", "knowledge basis"], "overall_score": 6.036965202731362, "scores": [3.0294339940568333, 1.2258933721937844, 0.9243827558685351], "rank_score": 1.7265700407063844} -{"id": "currey-etal-2017-copied", "title": "Copied Monolingual Data Improves Low-Resource Neural Machine Translation", "abstract": "We train a neural machine translation (NMT) system to both translate source-language text and copy target-language text, thereby exploiting monolingual corpora in the target language. Speci\ufb01cally, we create a bitext from the monolingual text in the target language so that each source sentence is identical to the target sentence. This copied data is then mixed with the parallel corpus and the NMT system is trained like normal, with no metadata to distinguish the two input languages. Our proposed method proves to be an effective way of incorporating monolingual data into low-resource NMT. see gains of up to 1.2 BLEU over a strong baseline with back-translation. Further analysis shows that the linguis-tic phenomena behind these gains are different from and largely orthogonal to back-translation, with our copied corpus method improving accuracy on named entities and other words that should remain identical between the source and target languages.", "phrases": ["monolingual data", "neural machine translation", "target sentence", "back-translation"], "overall_score": 5.623577728302646, "scores": [4.436566834743748, 0.9500129582488603, 0.9090124661348074, 0.608534322361333], "rank_score": 1.7260316453721871} -{"id": "park-etal-2018-reducing", "title": "Reducing Gender Bias in Abusive Language Detection", "abstract": "Abusive language detection models tend to have a problem of being biased toward identity words of a certain group of people because of imbalanced training datasets. For example, \u201cYou are a good woman\u201d was considered \u201csexist\u201d when trained on an existing dataset. Such model bias is an obstacle for models to be robust enough for practical use. In this work, we measure them on models trained with different datasets, while analyzing the effect of different pre-trained word embeddings and model architectures. We also experiment with three mitigation methods: (1) debiased word embeddings, (2) gender swap data augmentation, and (3) fine-tuning with a larger corpus. These methods can effectively reduce model bias by 90-98% and can be extended to correct model bias in other scenarios.", "phrases": ["gender bias", "abusive language detection", "hate speech"], "overall_score": 5.808665406235098, "scores": [2.734946320289653, 1.8838337694091742, 0.5562912727645335], "rank_score": 1.725023787487787} -{"id": "lei-etal-2015-high", "title": "High-Order Low-Rank Tensors for Semantic Role Labeling", "abstract": "This paper introduces a tensor-based approach to semantic role labeling (SRL). The motivation behind the approach is to automatically induce a compact feature representation for words and their relations, tailoring them to the task. In this sense, our dimensionality reduction method provides a clear alternative to the traditional feature engineering approach used in SRL. To capture meaningful interactions between the argument, predicate, their syntactic path and the corresponding role label, we compress each feature representation first to a lower dimensional space prior to assessing their interactions. This corresponds to using an overall cross-product feature representation and maintaining associated parameters as a four-way low-rank tensor. The tensor parameters are optimized for the SRL performance using standard online algorithms. Our tensor-based approach rivals the best performing system on the CoNLL-2009 shared task. In addition, we demonstrate that adding the representation tensor to a competitive tensorfree model yields 2% absolute increase in Fscore. 1", "phrases": ["tensor", "semantic role labeling", "feature representation"], "overall_score": 4.285087685989402, "scores": [2.3541369798260807, 1.9503086690978568, 0.8688927777187502], "rank_score": 1.7244461422142294} -{"id": "peng-etal-2017-cross", "title": "Cross-Sentence N-ary Relation Extraction with Graph LSTMs", "abstract": "Past work in relation extraction has focused on binary relations in single sentences. Recent NLP inroads in high-value domains have sparked interest in the more general setting of extracting n-ary relations that span multiple sentences. In this paper, we explore a general relation extraction framework based on graph long short-term memory networks (graph LSTMs) that can be easily extended to cross-sentence n-ary relation extraction. The graph formulation provides a unified way of exploring different LSTM approaches and incorporating various intra-sentential and inter-sentential dependencies, such as sequential, syntactic, and discourse relations. A robust contextual representation is learned for the entities, which serves as input to the relation classifier. This simplifies handling of relations with arbitrary arity, and enables multi-task learning with related relations. We evaluate this framework in two important precision medicine settings, demonstrating its effectiveness with both conventional supervised learning and distant supervision. Cross-sentence extraction produced larger knowledge bases. and multi-task learning significantly improved extraction accuracy. A thorough analysis of various LSTM approaches yielded useful insight the impact of linguistic analysis on extraction accuracy.", "phrases": ["n-ary relation extraction", "graph lstm", "inter-sentential dependency", "sentence boundary"], "overall_score": 6.444308297660525, "scores": [3.7062768986116326, 2.0845315239894746, 0.5555293866949552, 0.5502677827415322], "rank_score": 1.7241513980093985} -{"id": "eljundi-etal-2019-hulmona", "title": "hULMonA: The Universal Language Model in Arabic", "abstract": "Arabic is a complex language with limited resources which makes it challenging to produce accurate text classification tasks such as sentiment analysis. The utilization of transfer learning (TL) has recently shown promising results for advancing accuracy of text classification in English. TL models are pre-trained on large corpora, and then fine-tuned on task-specific datasets. In particular, universal language models (ULMs), such as recently developed BERT, have achieved state-of-the-art results in various NLP tasks in English. In this paper, we hypothesize that similar success can be achieved for Arabic. The work aims at supporting the hypothesis by developing the first Universal Language Model in Arabic (hULMonA - \u062d\u0644\u0645\u0646\u0627 meaning our dream), demonstrating its use for Arabic classifications tasks, and demonstrating how a pre-trained multi-lingual BERT can also be used for Arabic. We then conduct a benchmark study to evaluate both ULM successes with Arabic sentiment analysis. Experiment results show that the developed hULMonA and multi-lingual ULM are able to generalize well to multiple Arabic data sets and achieve new state of the art results in Arabic Sentiment Analysis for some of the tested sets.", "phrases": ["universal language model", "arabic", "hulmona"], "overall_score": 3.088132555708338, "scores": [2.2955017056923714, 1.9746047331581338, 0.9004523477685493], "rank_score": 1.723519595539685} -{"id": "warstadt-etal-2020-blimp-benchmark", "title": "BLiMP: The Benchmark of Linguistic Minimal Pairs for English", "abstract": "We introduce The Benchmark of Linguistic Minimal Pairs (BLiMP),1 a challenge set for evaluating the linguistic knowledge of language models (LMs) on major grammatical phenomena in English. BLiMP consists of 67 individual datasets, each containing 1,000 minimal pairs\u2014that is, pairs of minimally different sentences that contrast in grammatical acceptability and isolate specific phenomenon in syntax, morphology, or semantics. We generate the data according to linguist-crafted grammar templates, and human aggregate agreement with the labels is 96.4%. We evaluate n-gram, LSTM, and Transformer (GPT-2 and Transformer-XL) LMs by observing whether they assign a higher probability to the acceptable sentence in each minimal pair. We find that state-of-the-art models identify morphological contrasts related to agreement reliably, but they struggle with some subtle semantic and syntactic phenomena, such as negative polarity items and extraction islands.", "phrases": ["linguistic minimal pairs", "language model", "phenomena", "negative polarity item", "blimp"], "overall_score": 5.47546681029487, "scores": [2.9441518710849417, 2.559667160305968, 1.4644269464120983, 1.0893685078658824, 0.5568821569121994], "rank_score": 1.722899328516218} -{"id": "rastogi-etal-2018-multi", "title": "Multi-task Learning for Joint Language Understanding and Dialogue State Tracking", "abstract": "This paper presents a novel approach for multi-task learning of language understanding (LU) and dialogue state tracking (DST) in task-oriented dialogue systems. Multi-task training enables the sharing of the neural network layers responsible for encoding the user utterance for both LU and DST and improves performance while reducing the number of network parameters. In our proposed framework, DST operates on a set of candidate values for each slot that has been mentioned so far. These candidate sets are generated using LU slot annotations for the current user utterance, dialogue acts corresponding to the preceding system utterance and the dialogue state estimated for the previous turn, enabling DST to handle slots with a large or unbounded set of possible values and deal with slot values not seen during training. Furthermore, to bridge the gap between training and inference, we investigate the use of scheduled sampling on LU output for the current user utterance as well as the DST output for the preceding turn.", "phrases": ["language understanding", "dialogue state tracking", "multi-task learning"], "overall_score": 3.58231144776083, "scores": [2.352648986296341, 1.8969800683668323, 0.9185539059411394], "rank_score": 1.7227276535347709} -{"id": "sinha-2009-mining", "title": "Mining Complex Predicates In Hindi Using A Parallel Hindi-English Corpus", "abstract": "Complex predicate is a noun, a verb, an adjective or an adverb followed by a light verb that behaves as a single unit of verb. Complex predicates (CPs) are abundantly used in Hindi and other languages of Indo Aryan family. Detecting and interpreting CPs constitute an important and somewhat a difficult task. The linguistic and statistical methods have yielded limited success in mining this data. In this paper, we present a simple method for detecting CPs of all kinds using a Hindi-English parallel corpus. A CP is hypothesized by detecting absence of the conventional meaning of the light verb in the aligned English sentence. This simple strategy exploits the fact that CP is a multiword expression with a meaning that is distinct from the meaning of the light verb. Although there are several shortcomings in the methodology, this empirical method surprisingly yields mining of CPs with an average precision of 89% and a recall of 90%.", "phrases": ["complex predicate", "hindi", "noun", "hindi-english parallel corpus"], "overall_score": 3.7849258548493667, "scores": [2.4628227205782696, 1.6596436240032668, 1.5214235369746993, 1.2464860843597811], "rank_score": 1.7225939914790043} -{"id": "simard-etal-2007-statistical", "title": "Statistical Phrase-Based Post-Editing", "abstract": "We propose to use a statistical phrasebased machine translation system in a post-editing task: the system takes as input raw machine translation output (from a commercial rule-based MT system), and produces post-edited target-language text. We report on experiments that were performed on data collected in precisely such a setting: pairs of raw MT output and their manually post-edited versions. In our evaluation, the output of our automatic post-editing (APE) system is not only better quality than the rule-based MT (both in terms of the BLEU and TER metrics), it is also better than the output of a stateof-the-art phrase-based MT system used in standalone translation mode. These results indicate that automatic post-editing constitutes a simple and efcient way of combining rule-based and statistical MT technologies.", "phrases": ["post-editing", "machine translation", "ape system", "phrase-based smt", "repetitive nature"], "overall_score": 6.123004023397162, "scores": [4.737494389009556, 1.2704866129597623, 1.153818838899605, 0.9241878802603843, 0.5249892036335834], "rank_score": 1.722195384952578} -{"id": "xiao-etal-2019-label", "title": "Label-Specific Document Representation for Multi-Label Text Classification", "abstract": "Multi-label text classification (MLTC) aims to tag most relevant labels for the given document. In this paper, we propose a Label-Specific Attention Network (LSAN) to learn a label-specific document representation. LSAN takes advantage of label semantic information to determine the semantic connection between labels and document for constructing label-specific document representation. Meanwhile, the self-attention mechanism is adopted to identify the label-specific document representation from document content information. In order to seamlessly integrate the above two parts, an adaptive fusion strategy is proposed, which can effectively output the comprehensive label-specific document representation to build multi-label text classifier. Extensive experimental results demonstrate that LSAN consistently outperforms the state-of-the-art methods on four different datasets, especially on the prediction of low-frequency labels. The code and hyper-parameter settings are released to facilitate other researchers.", "phrases": ["document representation", "multi-label text classification", "label-specific attention network"], "overall_score": 3.5812025989065974, "scores": [2.1071027283835093, 1.9307629599158347, 1.1287175415618724], "rank_score": 1.7221944099537387} -{"id": "roth-etal-2008-arabic", "title": "Arabic Morphological Tagging, Diacritization, and Lemmatization Using Lexeme Models and Feature Ranking", "abstract": "We investigate the tasks of general morphological tagging, diacritization, and lemmatization for Arabic. We show that for all tasks we consider, both modeling the lexeme explicitly, and retuning the weights of individual classifiers for the specific task, improve the performance.", "phrases": ["diacritization", "lemmatization", "arabic word"], "overall_score": 4.278327374734323, "scores": [2.908515863780408, 1.6586517833661445, 0.5980091313444962], "rank_score": 1.7217255928303496} -{"id": "dahlmeier-ng-2011-correcting", "title": "Correcting Semantic Collocation Errors with L1-induced Paraphrases", "abstract": "We present a novel approach for automatic collocation error correction in learner English which is based on paraphrases extracted from parallel corpora. Our key assumption is that collocation errors are often caused by semantic similarity in the first language (L1-language) of the writer. An analysis of a large corpus of annotated learner English confirms this assumption. We evaluate our approach on real-world learner data and show that L1-induced paraphrases outperform traditional approaches based on edit distance, homophones, and WordNet synonyms.", "phrases": ["collocation error", "paraphrase", "semantic error correction"], "overall_score": 4.9736171719076525, "scores": [3.483999023741375, 1.1281891253789156, 0.5500721985186927], "rank_score": 1.7207534492129948} -{"id": "li-etal-2009-non", "title": "A Non-negative Matrix Tri-factorization Approach to Sentiment Classification with Lexical Prior Knowledge", "abstract": "Sentiment classification refers to the task of automatically identifying whether a given piece of text expresses positive or negative opinion towards a subject at hand. The proliferation of user-generated web content such as blogs, discussion forums and online review sites has made it possible to perform large-scale mining of public opinion. Sentiment modeling is thus becoming a critical component of market intelligence and social media technologies that aim to tap into the collective wisdom of crowds. In this paper, we consider the problem of learning high-quality sentiment models with minimal manual supervision. We propose a novel approach to learn from lexical prior knowledge in the form of domain-independent sentiment-laden terms, in conjunction with domain-dependent unlabeled data and a few labeled documents. Our model is based on a constrained non-negative tri-factorization of the term-document matrix which can be implemented using simple update rules. Extensive experimental studies demonstrate the effectiveness of our approach on a variety of real-world sentiment prediction tasks.", "phrases": ["non-negative matrix tri-factorization", "sentiment classification", "lexical prior knowledge"], "overall_score": 3.7779983799753176, "scores": [2.555313606031097, 1.9678050412393258, 0.6352048381088437], "rank_score": 1.7194411617930887} -{"id": "schroeder-etal-2009-word", "title": "Word Lattices for Multi-Source Translation", "abstract": "Multi-source statistical machine translation is the process of generating a single translation from multiple inputs. Previous work has focused primarily on selecting from potential outputs of separate translation systems, and solely on multi-parallel corpora and test sets. We demonstrate how multi-source translation can be adapted for multiple monolingual inputs. We also examine different approaches to dealing with multiple sources, including consensus decoding, and we present a novel method of input combination to generate lattices for multi-source translation within a single translation model.", "phrases": ["lattice", "multi-source translation", "paraphrase"], "overall_score": 3.9589885614999103, "scores": [3.0847342547043457, 1.186307303582532, 0.8870591002456326], "rank_score": 1.7193668861775036} -{"id": "niessen-ney-2004-statistical", "title": "Statistical Machine Translation with Scarce Resources Using Morpho-syntactic Information", "abstract": "In statistical machine translation, correspondences between the words in the source and the target language are learned from parallel corpora, and often little or no linguistic knowledge is used to structure the underlying models. In particular, existing statistical systems for machine translation often treat different inflected forms of the same lemma as if they were independent of one another. The bilingual training data can be better exploited by explicitly taking into account the interdependencies of related inflected forms. We propose the construction of hierarchical lexicon models on the basis of equivalence classes of words. In addition, we introduce sentence-level restructuring transformations which aim at the assimilation of word order in related sentences. We have systematically investigated the amount of bilingual training data required to maintain an acceptable quality of machine translation. The combination of the suggested methods for improving translation quality in frameworks with scarce resources has been successfully tested: We were able to reduce the amount of bilingual training data to less than 10 of the original corpus, while losing only 1.6 in translation quality. The improvement of the translation results is demonstrated on two German-English corpora taken from the Verbmobil task and the Nespole! task.", "phrases": ["machine translation", "scarce resource", "morpho-syntactic information", "idiomatic multi-word expression", "morphological analysis"], "overall_score": 6.295434024287271, "scores": [3.2651193595233403, 2.8797904081167554, 1.3026851253683083, 0.585869936683923, 0.5584938056128219], "rank_score": 1.7183917270610294} -{"id": "toutanova-cherry-2009-global", "title": "A global model for joint lemmatization and part-of-speech prediction", "abstract": "We present a global joint model for lemmatization and part-of-speech prediction. Using only morphological lexicons and unlabeled data, we learn a partially-supervised part-of-speech tagger and a lemmatizer which are combined using features on a dynamically linked dependency structure of words. We evaluate our model on English, Bulgarian, Czech, and Slovene, and demonstrate substantial improvements over both a direct transduction approach to lemmatization and a pipelined approach, which predicts part-of-speech tags before lemmatization.", "phrases": ["lemmatization", "part-of-speech prediction", "joint model"], "overall_score": 3.7749775279053934, "scores": [2.3999788808910116, 1.912629313104163, 0.8415907443062689], "rank_score": 1.7180663127671478} -{"id": "chen-etal-2020-mixtext", "title": "MixText: Linguistically-Informed Interpolation of Hidden Space for Semi-Supervised Text Classification", "abstract": "This paper presents MixText, a semi-supervised learning method for text classification, which uses our newly designed data augmentation method called TMix. TMix creates a large amount of augmented training samples by interpolating text in hidden space. Moreover, we leverage recent advances in data augmentation to guess low-entropy labels for unlabeled data, hence making them as easy to use as labeled data. By mixing labeled, unlabeled and augmented data, MixText significantly outperformed current pre-trained and fined-tuned models and other state-of-the-art semi-supervised learning methods on several text classification benchmarks. The improvement is especially prominent when supervision is extremely limited. We have publicly released our code at .", "phrases": ["hidden space", "semi-supervised text classification", "tmix", "unlabeled data", "mixtext"], "overall_score": 6.057786504976323, "scores": [2.8299263772304544, 1.7351381021990815, 1.5553393571691265, 1.2539706369438344, 1.214915337708019], "rank_score": 1.717857962250103} -{"id": "naik-etal-2018-stress", "title": "Stress Test Evaluation for Natural Language Inference", "abstract": "Natural language inference (NLI) is the task of determining if a natural language hypothesis can be inferred from a given premise in a justifiable manner. NLI was proposed as a benchmark task for natural language understanding. Existing models perform well at standard datasets for NLI, achieving impressive results across different genres of text. However, the extent to which these models understand the semantic content of sentences is unclear. In this work, we propose an evaluation methodology consisting of automatically constructed \u201cstress tests\u201d that allow us to examine whether systems have the ability to make real inferential decisions. Our evaluation of six sentence-encoder models on these stress tests reveals strengths and weaknesses of these models with respect to challenging linguistic phenomena, and suggests important directions for future work in this area.", "phrases": ["natural language inference", "nli", "weakness", "stress test evaluation", "reasoning"], "overall_score": 6.910545159783394, "scores": [3.931490715960659, 0.9145996685261285, 1.4389274093357618, 1.241410045659526, 1.057350288139516], "rank_score": 1.7167556255243182} -{"id": "guo-etal-2018-soft", "title": "Soft Layer-Specific Multi-Task Summarization with Entailment and Question Generation", "abstract": "An accurate abstractive summary of a document should contain all its salient information and should be logically entailed by the input document. We improve these important aspects of abstractive summarization via multi-task learning with the auxiliary tasks of question generation and entailment generation, where the former teaches the summarization model how to look for salient questioning-worthy details, and the latter teaches the model how to rewrite a summary which is a directed-logical subset of the input document. We also propose novel multi-task architectures with high-level (semantic) layer-specific sharing across multiple encoder and decoder layers of the three tasks, as well as soft-sharing mechanisms (and show performance ablations and analysis examples of each contribution). Overall, we achieve statistically significant improvements over the state-of-the-art on both the CNN/DailyMail and Gigaword datasets, as well as on the DUC-2002 transfer setup. We also present several quantitative and qualitative analysis studies of our model's learned saliency and entailment skills.", "phrases": ["summarization", "question generation", "auxiliary task", "entailment generation"], "overall_score": 5.38080002038821, "scores": [2.6429487080064034, 1.666349567164101, 1.2781801969594684, 1.2768939678538818], "rank_score": 1.7160931099959635} -{"id": "dale-kilgarriff-2011-helping", "title": "Helping Our Own: The HOO 2011 Pilot Shared Task", "abstract": "The aim of the Helping Our Own (HOO) Shared Task is to promote the development of automated tools and techniques that can assist authors in the writing task, with a specific focus on writing within the natural language processing community. This paper reports on the results of a pilot run of the shared task, in which six teams participated. We describe the nature of the task and the data used, report on the results achieved, and discuss some of the things we learned that will guide future versions of the task.", "phrases": ["hoo", "shared task", "series", "learner", "non-native speaker"], "overall_score": 5.997489197848796, "scores": [4.605014962503799, 1.6596597934989932, 1.1378234868619825, 0.6031242330473512, 0.5707771062660577], "rank_score": 1.7152799164356367} -{"id": "lu-etal-2018-neural", "title": "A neural interlingua for multilingual machine translation", "abstract": "We incorporate an explicit neural interlingua into a multilingual encoder-decoder neural machine translation (NMT) architecture. We demonstrate that our model learns a language-independent representation by performing direct zero-shot translation (without using pivot translation), and by using the source sentence embeddings to create an English Yelp review classifier that, through the mediation of the neural interlingua, can also classify French and German reviews. Furthermore, we show that, despite using a smaller number of parameters than a pairwise collection of bilingual NMT models, our approach produces comparable BLEU scores for each language pair in WMT15.", "phrases": ["interlingua", "machine translation", "recurrent layer"], "overall_score": 5.450813394154697, "scores": [2.7035252256368083, 1.8807451490867262, 0.5611554284239734], "rank_score": 1.7151419343825023} -{"id": "van-der-wees-etal-2017-dynamic", "title": "Dynamic Data Selection for Neural Machine Translation", "abstract": "Intelligent selection of training data has proven a successful technique to simultaneously increase training efficiency and translation performance for phrase-based machine translation (PBMT). With the recent increase in popularity of neural machine translation (NMT), we explore in this paper to what extent and how NMT can also benefit from data selection. While state-of-the-art data selection (Axelrod et al., 2011) consistently performs well for PBMT, we show that gains are substantially lower for NMT. Next, we introduce `dynamic data selection' for NMT, a method in which we vary the selected subset of training data between different training epochs. Our experiments show that the best results are achieved when applying a technique we call `gradual fine-tuning', with improvements up to +2.6 BLEU over the original data selection approach and up to +3.1 BLEU over a general baseline.", "phrases": ["data selection", "neural machine translation", "gradual fine-tuning", "language model", "in-domain sentence"], "overall_score": 5.648842804673662, "scores": [3.6277058022968087, 2.9357307772692995, 0.8931739772753142, 0.5831862517741243, 0.5298670344890013], "rank_score": 1.7139327686209096} -{"id": "han-etal-2012-geolocation", "title": "Geolocation Prediction in Social Media Data by Finding Location Indicative Words", "abstract": "Geolocation prediction is vital to geospatial applications like localised search and local event detection. Predominately, social media geolocation models are based on full text data, including common words with no geospatial dimension (e.g. today) and noisy strings (tmrw), potentially hampering prediction and leading to slower/more memory-intensive models. In this paper, we focus on finding location indicative words (LIWs) via feature selection, and establishing whether the reduced feature set boosts geolocation accuracy. Our results show that an information gain ratiobased approach surpasses other methods at LIW selection, outperforming state-of-the-art geolocation prediction methods by 10.6% in accuracy and reducing the mean and median of prediction error distance by 45km and 209km, respectively, on a public dataset. We further formulate notions of prediction confidence, and demonstrate that performance is even higher in cases where our model is more confident, striking a trade-off between accuracy and coverage. Finally, the identified LIWs reveal regional language differences, which could be potentially useful for lexicographers.", "phrases": ["location", "indicative word", "geolocation prediction"], "overall_score": 4.637524344521424, "scores": [2.4916645008716993, 1.4174384270110552, 1.228384193996183], "rank_score": 1.7124957072929792} -{"id": "socher-etal-2013-parsing", "title": "Parsing with Compositional Vector Grammars", "abstract": "Natural language parsing has typically been done with small sets of discrete categories such as NP and VP, but this representation does not capture the full syntactic nor semantic richness of linguistic phrases, and attempts to improve on this by lexicalizing phrases or splitting categories only partly address the problem at the cost of huge feature spaces and sparseness. Instead, we introduce a Compositional Vector Grammar (CVG), which combines PCFGs with a syntactically untied recursive neural network that learns syntactico-semantic, compositional vector representations. The CVG improves the PCFG of the Stanford Parser by 3.8% to obtain an F1 score of 90.4%. It is fast to train and implemented approximately as an efficient reranker it is about 20% faster than the current Stanford factored parser. The CVG learns a soft notion of head words and improves performance on the types of ambiguities that require semantic information such as PP attachments.", "phrases": ["compositional vector grammar", "recursive neural network", "rnn", "node", "sentiment analysis"], "overall_score": 6.69595552883199, "scores": [3.0451531632752338, 1.843833521822373, 1.5820178148066801, 1.2396176900055296, 0.847552850907921], "rank_score": 1.7116350081635474} -{"id": "geiger-etal-2020-neural", "title": "Neural Natural Language Inference Models Partially Embed Theories of Lexical Entailment and Negation", "abstract": "We address whether neural models for Natural Language Inference (NLI) can learn the compositional interactions between lexical entailment and negation, using four methods: the behavioral evaluation methods of (1) challenge test sets and (2) systematic generalization tasks, and the structural evaluation methods of (3) probes and (4) interventions. To facilitate this holistic evaluation, we present Monotonicity NLI (MoNLI), a new naturalistic dataset focused on lexical entailment and negation. In our behavioral evaluations, we find that models trained on general-purpose NLI datasets fail systematically on MoNLI examples containing negation, but that MoNLI fine-tuning addresses this failure. In our structural evaluations, we look for evidence that our top-performing BERT-based model has learned to implement the monotonicity algorithm behind MoNLI. Probes yield evidence consistent with this conclusion, and our intervention experiments bolster this, showing that the causal dynamics of the model mirror the causal dynamics of this algorithm on subsets of MoNLI. This suggests that the BERT model at least partially embeds a theory of lexical entailment and negation at an algorithmic level.", "phrases": ["lexical entailment", "negation", "nli"], "overall_score": 4.103830352675563, "scores": [2.7096720347186682, 1.8757062787751673, 0.5489122444332591], "rank_score": 1.7114301859756982} -{"id": "sennrich-etal-2016-controlling", "title": "Controlling Politeness in Neural Machine Translation via Side Constraints", "abstract": "Many languages use honori\ufb01cs to express politeness, social distance, or the relative social status between the speaker and their ad-dressee(s). In machine translation from a language without honori\ufb01cs such as English, it is dif\ufb01cult to predict the appropriate honori\ufb01c, but users may want to control the level of politeness in the output. In this paper, we perform a pilot study to control honori\ufb01cs in neural machine translation (NMT) via side constraints , focusing on English \u2192 German. We show that by marking up the (English) source side of the training data with a feature that en-codes the use of honori\ufb01cs on the (German) target side, we can control the honori\ufb01cs produced at test time. Experiments show that the choice of honori\ufb01cs has a big impact on translation quality as measured by B LEU , and oracle experiments show that substantial im-provements are possible by constraining the translation to the desired level of politeness.", "phrases": ["politeness", "neural machine translation", "pronoun", "voice", "source text"], "overall_score": 6.2250493934239, "scores": [4.585777885763889, 2.3425582616266483, 0.5695376625218276, 0.5346357942949268, 0.5240566461604247], "rank_score": 1.7113132500735433} -{"id": "pavlick-callison-burch-2016-simple", "title": "Simple PPDB: A Paraphrase Database for Simplification", "abstract": "We release the Simple Paraphrase Database, a subset of of the Paraphrase Database (PPDB) adapted for the task of text simplification. We train a supervised model to associate simplification scores with each phrase pair, producing rankings competitive with state-of-theart lexical simplification models. Our new simplification database contains 4.5 million paraphrase rules, making it the largest available resource for lexical simplification.", "phrases": ["ppdb", "paraphrase database", "simplification"], "overall_score": 3.93947035564475, "scores": [2.2784953807073722, 2.010998669733817, 0.8431766607926776], "rank_score": 1.7108902370779557} -{"id": "galley-etal-2004-whats", "title": "What's in a translation rule?", "abstract": "Abstract : We propose a theory that gives formal semantics to word-level alignments defined over parallel corpora. We use our theory to introduce a linear algorithm that can be used to derive from word-aligned, parallel corpora the minimal set of syntactically motivated transformation rules that explain human translation data.", "phrases": ["translation rule", "parallel corpora", "syntax-based model"], "overall_score": 7.0309077213630795, "scores": [3.070770075681964, 1.2282546885298538, 0.8319337280723877], "rank_score": 1.7103194974280684} -{"id": "dahlmeier-etal-2013-building", "title": "Building a Large Annotated Corpus of Learner English: The NUS Corpus of Learner English", "abstract": "We describe the NUS Corpus of Learner English (NUCLE), a large, fully annotated corpus of learner English that is freely available for research purposes. The goal of the corpus is to provide a large data resource for the development and evaluation of grammatical error correction systems. Although NUCLE has been available for almost two years, there has been no reference paper that describes the corpus in detail. In this paper, we address this need. We describe the annotation schema and the data collection and annotation process of NUCLE. Most importantly, we report on an unpublished study of annotator agreement for grammatical error correction. Finally, we present statistics on the distribution of grammatical errors in the NUCLE corpus.", "phrases": ["learner english", "nus corpus", "grammatical error", "national university"], "overall_score": 5.816171716465058, "scores": [2.787745220500026, 2.6588819094238496, 0.8574221610670791, 0.5360967679510424], "rank_score": 1.7100365147354992} -{"id": "cucerzan-brill-2004-spelling", "title": "Spelling Correction as an Iterative Process that Exploits the Collective Knowledge of Web Users", "abstract": "Logs of user queries to an internet search engine provide a large amount of implicit and explicit information about language. In this paper, we investigate their use in spelling correction of search queries, a task which poses many additional challenges beyond the traditional spelling correction problem. We present an approach that uses an iterative transformation of the input query strings into other strings that correspond to more and more likely queries according to statistics extracted from internet search query logs.", "phrases": ["iterative process", "search query", "spelling correction", "chinese", "trust dictionary"], "overall_score": 5.433939790034193, "scores": [3.974319083654658, 1.6456570538541786, 1.362339729253056, 1.0436155162322038, 0.5232312179415588], "rank_score": 1.7098325201871312} -{"id": "ponzetto-strube-2006-exploiting", "title": "Exploiting Semantic Role Labeling, WordNet and Wikipedia for Coreference Resolution", "abstract": "In this paper we present an extension of a machine learning based coreference resolution system which uses features induced from different semantic knowledge sources. These features represent knowledge mined from WordNet and Wikipedia, as well as information about semantic role labels. We show that semantic features indeed improve the performance on different referring expression types such as pronouns and common nouns.", "phrases": ["wikipedia", "coreference resolution", "knowledge source", "mention", "semantic similarity"], "overall_score": 5.755958286909426, "scores": [4.056576559364608, 2.052175851924425, 1.0456869572471443, 0.8637651132792838, 0.5286511814907029], "rank_score": 1.7093711326612329} -{"id": "bali-etal-2014-borrowing", "title": "\u201cI am borrowing ya mixing ?\u201d An Analysis of English-Hindi Code Mixing in Facebook", "abstract": "Code-Mixing is a frequently observed phenomenon in social media content generated by multi-lingual users. The processing of such data for linguistic analysis as well as computational modelling is challenging due to the linguistic complexity resulting from the nature of the mixing as well as the presence of non-standard variations in spellings and grammar, and transliteration. Our analysis shows the extent of Code-Mixing in English-Hindi data. The classification of Code-Mixed words based on frequency and linguistic typology underline the fact that while there are easily identifiable cases of borrowing and mixing at the two ends, a large majority of the words form a continuum in the middle, emphasizing the need to handle these at different levels for automatic processing of the data.", "phrases": ["mixing", "facebook", "spelling", "bilingual user", "code-mixed data"], "overall_score": 6.2154317991514345, "scores": [2.6337408630882253, 2.2470837447494323, 1.403957738527206, 1.3588055967512336, 0.8997585595258509], "rank_score": 1.7086693005283897} -{"id": "riloff-etal-2003-learning", "title": "Learning subjective nouns using extraction pattern bootstrapping", "abstract": "We explore the idea of creating a subjectivity classifier that uses lists of subjective nouns learned by bootstrapping algorithms. The goal of our research is to develop a system that can distinguish subjective sentences from objective sentences. First, we use two bootstrapping algorithms that exploit extraction patterns to learn sets of subjective nouns. Then we train a Naive Bayes classifier using the subjective nouns, discourse features, and subjectivity clues identified in prior research. The bootstrapping algorithms learned over 1000 subjective nouns, and the subjectivity classifier performed well, achieving 77% recall with 81% precision.", "phrases": ["noun", "extraction pattern", "self-training", "newswire text", "previous research"], "overall_score": 6.168904725941115, "scores": [4.31814557862489, 2.2789863656892533, 0.8269034359546017, 0.5752619095101802, 0.5427201074314596], "rank_score": 1.708403479442077} -{"id": "tsai-etal-2019-multimodal", "title": "Multimodal Transformer for Unaligned Multimodal Language Sequences", "abstract": "Human language is often multimodal, which comprehends a mixture of natural language, facial gestures, and acoustic behaviors. However, two major challenges in modeling such multimodal human language time-series data exist: 1) inherent data non-alignment due to variable sampling rates for the sequences from each modality; and 2) long-range dependencies between elements across modalities. In this paper, we introduce the Multimodal Transformer (MulT) to generically address the above issues in an end-to-end manner without explicitly aligning the data. At the heart of our model is the directional pairwise crossmodal attention, which attends to interactions between multimodal sequences across distinct time steps and latently adapt streams from one modality to another. Comprehensive experiments on both aligned and non-aligned multimodal time-series show that our model outperforms state-of-the-art methods by a large margin. In addition, empirical analysis suggests that correlated crossmodal signals are able to be captured by the proposed crossmodal attention mechanism in MulT.", "phrases": ["mult", "multimodal transformer", "cross-modal attention", "emotion"], "overall_score": 5.916191088110993, "scores": [3.6681372314916794, 1.355377407058604, 1.2762703270860252, 0.5284226693790584], "rank_score": 1.7070519087538418} -{"id": "liu-etal-2020-multilingual-denoising", "title": "Multilingual Denoising Pre-training for Neural Machine Translation", "abstract": "This paper demonstrates that multilingual denoising pre-training produces significant performance gains across a wide variety of machine translation (MT) tasks. We present mBART\u2014a sequence-to-sequence denoising auto-encoder pre-trained on large-scale monolingual corpora in many languages using the BART objective (Lewis et al., 2019). mBART is the first method for pre-training a complete sequence-to-sequence model by denoising full texts in multiple languages, whereas previous approaches have focused only on the encoder, decoder, or reconstructing parts of the text. Pre-training a complete model allows it to be directly fine-tuned for supervised (both sentence-level and document-level) and unsupervised machine translation, with no task- specific modifications. We demonstrate that adding mBART initialization produces performance gains in all but the highest-resource settings, including up to 12 BLEU points for low resource MT and over 5 BLEU points for many document-level and unsupervised models. We also show that it enables transfer to language pairs with no bi-text or that were not in the pre-training corpus, and present extensive analysis of which factors contribute the most to effective pre-training.1", "phrases": ["neural machine translation", "multilingual denoising", "language model", "pre-trained model", "encoder-decoder model"], "overall_score": 8.02137724822479, "scores": [2.1091802730385574, 1.4708245365300316, 2.1392546178888954, 1.7732868360016545, 1.0399617187037962], "rank_score": 1.706501596432587} -{"id": "liu-etal-2019-inoculation", "title": "Inoculation by Fine-Tuning: A Method for Analyzing Challenge Datasets", "abstract": "Several datasets have recently been constructed to expose brittleness in models trained on existing benchmarks. While model performance on these challenge datasets is significantly lower compared to the original benchmark, it is unclear what particular weaknesses they reveal. For example, a challenge dataset may be difficult because it targets phenomena that current models cannot capture, or because it simply exploits blind spots in a model's specific training set. We introduce inoculation by fine-tuning, a new analysis method for studying challenge datasets by exposing models (the metaphorical patient) to a small amount of data from the challenge dataset (a metaphorical pathogen) and assessing how well they can adapt. We apply our method to analyze the NLI \u201cstress tests\u201d (Naik et al., 2018) and the Adversarial SQuAD dataset (Jia and Liang, 2017). We show that after slight exposure, some of these datasets are no longer challenging, while others remain difficult. Our results indicate that failures on challenge datasets may lead to very different conclusions about models, training datasets, and the challenge datasets themselves.", "phrases": ["fine-tuning", "failure", "inoculation"], "overall_score": 4.730595740669259, "scores": [2.573849579531835, 2.0010884330776517, 0.5436672490260088], "rank_score": 1.7062017538784984} -{"id": "solorio-etal-2014-overview", "title": "Overview for the First Shared Task on Language Identification in Code-Switched Data", "abstract": "We present an overview of the \ufb01rst shared task on language identi\ufb01cation on code-switched data. The shared task included code-switched data from four language pairs: Modern Standard Arabic-Dialectal Arabic (MSA-DA), Mandarin-English (MAN-EN), Nepali-English (NEP-EN), and Spanish-English (SPA-EN). A total of seven teams participated in the task and submitted 42 system runs. The evaluation showed that language identi\ufb01cation at the token level is more dif\ufb01cult when the languages present are closely related, as in the case of MSA-DA, where the prediction performance was the lowest among all language pairs. In contrast, the language pairs with the higest F-measure where SPA-EN and NEP-EN. The task made evident that language identi\ufb01cation in code-switched data is still far from solved and warrants further research.", "phrases": ["language identification", "code-switched data", "codeswitched data", "code-switched text"], "overall_score": 6.293964889818898, "scores": [2.9072571384556536, 2.557678088056343, 0.8294702148821194, 0.5303932196732184], "rank_score": 1.7061996652668334} -{"id": "clark-curran-2004-parsing", "title": "Parsing the WSJ Using CCG and Log-Linear Models", "abstract": "This paper describes and evaluates log-linear parsing models for Combinatory Categorial Grammar (CCG). A parallel implementation of the L-BFGS optimisation algorithm is described, which runs on a Beowulf cluster allowing the complete Penn Treebank to be used for estimation. We also develop a new efficient parsing algorithm for CCG which maximises expected recall of dependencies. We compare models which use all CCG derivations, including non-standard derivations, with normal-form models. The performances of the two models are comparable and the results are competitive with existing wide-coverage CCG parsers.", "phrases": ["ccg", "combinatory categorial grammar", "derivation"], "overall_score": 5.7447609045420736, "scores": [2.9379355238465936, 1.1327582934272287, 1.0474435795510502], "rank_score": 1.7060457989416242} -{"id": "vyas-etal-2014-pos", "title": "POS Tagging of English-Hindi Code-Mixed Social Media Content", "abstract": "Code-mixing is frequently observed in user generated content on social media, especially from multilingual users. The linguistic complexity of such content is compounded by presence of spelling variations, transliteration and non-adherance to formal grammar. We describe our initial efforts to create a multi-level annotated corpus of Hindi-English codemixed text collated from Facebook forums, and explore language identification, back-transliteration, normalization and POS tagging of this data. Our results show that language identification and transliteration for Hindi are two major challenges that impact POS tagging accuracy.", "phrases": ["facebook forum", "language identification", "pos tagging", "social medium text", "code-mixed data"], "overall_score": 6.603626471212739, "scores": [4.012000893313675, 1.2601881464173126, 1.245797786788735, 1.1440397103872118, 0.867143677460244], "rank_score": 1.7058340428734358} -{"id": "suhr-etal-2019-corpus", "title": "A Corpus for Reasoning about Natural Language Grounded in Photographs", "abstract": "We introduce a new dataset for joint reasoning about natural language and images, with a focus on semantic diversity, compositionality, and visual reasoning challenges. The data contains 107,292 examples of English sentences paired with web photographs. The task is to determine whether a natural language caption is true about a pair of photographs. We crowdsource the data using sets of visually rich images and a compare-and-contrast task to elicit linguistically diverse language. Qualitative analysis shows the data requires compositional joint reasoning, including about quantities, comparisons, and relations. Evaluation using state-of-the-art visual reasoning methods shows the data presents a strong challenge.", "phrases": ["reasoning", "image", "nlvr2"], "overall_score": 4.500977127900131, "scores": [2.6277975277456354, 1.3924219336101007, 1.0963542606971124], "rank_score": 1.7055245740176161} -{"id": "surdeanu-etal-2003-using", "title": "Using Predicate-Argument Structures for Information Extraction", "abstract": "In this paper we present a novel, customizable IE paradigm that takes advantage of predicate-argument structures. We also introduce a new way of automatically identifying predicate argument structures, which is central to our IE paradigm. It is based on: (1) an extended set of features; and (2) inductive decision tree learning. The experimental results prove our claim that accurate predicate-argument structures enable high quality IE results.", "phrases": ["predicate-argument structure", "information extraction", "machine translation"], "overall_score": 5.267973881041898, "scores": [2.8521178753121648, 1.386053453912955, 0.8746415426768017], "rank_score": 1.7042709573006405} -{"id": "jiang-zhou-2008-generating", "title": "Generating Chinese Couplets using a Statistical MT Approach", "abstract": "Part of the unique cultural heritage of China is the game of Chinese couplets (duilian). One person challenges the other person with a sentence (first sentence). The other person then replies with a sentence (second sentence) equal in length and word segmentation, in a way that corresponding words in the two sentences match each other by obeying certain constraints on semantic, syntactic, and lexical relatedness. This task is viewed as a difficult problem in AI and has not been explored in the research community. \n \nIn this paper, we regard this task as a kind of machine translation process. We present a phrase-based SMT approach to generate the second sentence. First, the system takes as input the first sentence, and generates as output an N-best list of proposed second sentences, using a phrase-based SMT decoder. Then, a set of filters is used to remove candidates violating linguistic constraints. Finally, a Ranking SVM is applied to rerank the candidates. A comprehensive evaluation, using both human judgments and BLEU scores, has been conducted, and the results demonstrate that this approach is very successful.", "phrases": ["chinese couplet", "first line", "smt system", "poetry generation"], "overall_score": 5.01417991612817, "scores": [4.200770292082756, 1.2470977338807552, 0.8244625796730766, 0.539398150308174], "rank_score": 1.7029321889861904} -{"id": "punyakanok-etal-2008-importance", "title": "The Importance of Syntactic Parsing and Inference in Semantic Role Labeling", "abstract": "We present a general framework for semantic role labeling. The framework combines a machine-learning technique with an integer linear programming-based inference procedure, which incorporates linguistic and structural constraints into a global decision process. Within this framework, we study the role of syntactic parsing information in semantic role labeling. We show that full syntactic parsing information is, by far, most relevant in identifying the argument, especially, in the very first stagethe pruning stage. Surprisingly, the quality of the pruning stage cannot be solely determined based on its recall and precision. Instead, it depends on the characteristics of the output candidates that determine the difficulty of the downstream problems. Motivated by this observation, we propose an effective and simple approach of combining different semantic role labeling systems through joint inference, which significantly improves its performance. Our system has been evaluated in the CoNLL-2005 shared task on semantic role labeling, and achieves the highest F1 score among 19 participants.", "phrases": ["semantic role labeling", "pruning stage", "srl", "essential role"], "overall_score": 6.3626792959582055, "scores": [4.076930782740513, 1.657690108910062, 0.5402552669721006, 0.5343712556851167], "rank_score": 1.7023118535769481} -{"id": "jiang-etal-2011-target", "title": "Target-dependent Twitter Sentiment Classification", "abstract": "Sentiment analysis on Twitter data has attracted much attention recently. In this paper, we focus on target-dependent Twitter sentiment classification; namely, given a query, we classify the sentiments of the tweets as positive, negative or neutral according to whether they contain positive, negative or neutral sentiments about that query. Here the query serves as the target of the sentiments. The state-of-the-art approaches for solving this problem always adopt the target-independent strategy, which may assign irrelevant sentiments to the given target. Moreover, the state-of-the-art approaches only take the tweet to be classified into consideration when classifying the sentiment; they ignore its context (i.e., related tweets). However, because tweets are usually short and more ambiguous, sometimes it is not enough to consider only the current tweet for sentiment classification. In this paper, we propose to improve target-dependent Twitter sentiment classification by 1) incorporating target-dependent features; and 2) taking related tweets into consideration. According to the experimental results, our approach greatly improves the performance of target-dependent sentiment classification.", "phrases": ["twitter sentiment classification", "query", "target-dependent feature", "syntactic structure", "polarity"], "overall_score": 7.1554461428859595, "scores": [4.047861149413998, 1.4215485167406767, 1.0923574677698848, 1.0795086502509483, 0.867605590276069], "rank_score": 1.7017762748903151} -{"id": "augenstein-etal-2016-stance", "title": "Stance Detection with Bidirectional Conditional Encoding", "abstract": "Stance detection is the task of classifying the attitude expressed in a text towards a target such as Hillary Clinton to be \"positive\", negative\" or \"neutral\". Previous work has assumed that either the target is mentioned in the text or that training data for every target is given. This paper considers the more challenging version of this task, where targets are not always mentioned and no training data is available for the test targets. We experiment with conditional LSTM encoding, which builds a representation of the tweet that is dependent on the target, and demonstrate that it outperforms encoding the tweet and the target independently. Performance is improved further when the conditional model is augmented with bidirectional encoding. We evaluate our approach on the SemEval 2016 Task 6 Twitter Stance Detection corpus achieving performance second best only to a system trained on semi-automatically labelled tweets for the test target. When such weak supervision is added, our approach achieves state-of-the-art results.", "phrases": ["attitude", "stance detection", "tweet representation"], "overall_score": 4.714958345794532, "scores": [3.999514875825147, 0.5606691946431118, 0.5415011971385922], "rank_score": 1.7005617558689503} -{"id": "liu-etal-2005-log", "title": "Log-Linear Models for Word Alignment", "abstract": "We present a framework for word alignment based on log-linear models. All knowledge sources are treated as feature functions, which depend on the source language sentence, the target language sentence and possible additional variables. Log-linear models allow statistical alignment models to be easily extended by incorporating syntactic information. In this paper, we use IBM Model 3 alignment probabilities, POS correspondence, and bilingual dictionary coverage as features. Our experiments show that log-linear models significantly outperform IBM translation models.", "phrases": ["word alignment", "knowledge source", "coverage", "log-linear model"], "overall_score": 5.256331903420126, "scores": [3.8295199957641644, 1.8523675542139697, 0.5846103313214533, 0.5355205092395583], "rank_score": 1.7005045976347863} -{"id": "hockenmaier-steedman-2007-ccgbank", "title": "CCGbank: A Corpus of CCG Derivations and Dependency Structures Extracted from the Penn Treebank", "abstract": "This article presents an algorithm for translating the Penn Treebank into a corpus of Combinatory Categorial Grammar (CCG) derivations augmented with local and long-range word-word dependencies. The resulting corpus, CCGbank, includes 99.4% of the sentences in the Penn Treebank. It is available from the Linguistic Data Consortium, and has been used to train wide-coverage statistical parsers that obtain state-of-the-art rates of dependency recovery. In order to obtain linguistically adequate CCG analyses, and to eliminate noise and inconsistencies in the original annotation, an extensive analysis of the constructions and annotations in the Penn Treebank was called for, and a substantial number of changes to the Treebank were necessary. We discuss the implications of our findings for the extraction of other linguistically expressive grammars from the Treebank, and for the design of future treebanks.", "phrases": ["derivation", "wide-coverage statistical parser", "ccgbank", "ptb", "english verb join"], "overall_score": 6.045317978597092, "scores": [5.249506100920822, 1.2677861841618605, 0.8297031357618883, 0.5962266872959188, 0.5585024404042401], "rank_score": 1.7003449097089458} -{"id": "lee-etal-2018-deterministic", "title": "Deterministic Non-Autoregressive Neural Sequence Modeling by Iterative Refinement", "abstract": "We propose a conditional non-autoregressive neural sequence model based on iterative refinement. The proposed model is designed based on the principles of latent variable models and denoising autoencoders, and is generally applicable to any sequence generation task. We extensively evaluate the proposed model on machine translation (En-De and En-Ro) and image caption generation, and observe that it significantly speeds up decoding while maintaining the generation quality comparable to the autoregressive counterpart.", "phrases": ["iterative refinement", "latent variable model", "machine translation", "nat", "non-autoregressive prediction"], "overall_score": 6.576888913800596, "scores": [3.9214005885983596, 1.7852508271710177, 1.269863259981831, 0.870359386924312, 0.6477622210887202], "rank_score": 1.6989272567528482} -{"id": "nakazawa-etal-2016-aspec", "title": "ASPEC: Asian Scientific Paper Excerpt Corpus", "abstract": "In this paper, we describe the details of the ASPEC (Asian Scientific Paper Excerpt Corpus), which is the first large-size parallel corpus of scientific paper domain. ASPEC was constructed in the Japanese-Chinese machine translation project conducted between 2006 and 2010 using the Special Coordination Funds for Promoting Science and Technology. It consists of a Japanese-English scientific paper abstract corpus of approximately 3 million parallel sentences (ASPEC-JE) and a Chinese-Japanese scientific paper excerpt corpus of approximately 0.68 million parallel sentences (ASPEC-JC). ASPEC is used as the official dataset for the machine translation evaluation workshop WAT (Workshop on Asian Translation).", "phrases": ["scientific paper", "aspec", "translation task"], "overall_score": 4.708471416326772, "scores": [3.59929890023506, 0.9103877273990485, 0.5849796442424424], "rank_score": 1.698222090625517} -{"id": "lee-etal-2014-sentence", "title": "A Sentence Judgment System for Grammatical Error Detection", "abstract": "This study develops a sentence judgment system using both rule-based and n-gram statistical methods to detect grammatical errors in Chinese sentences. The rule-based method provides 142 rules developed by linguistic experts to identify potential rule violations in input sentences. The n-gram statistical method relies on the n-gram scores of both correct and incorrect training sentences to determine the correctness of the input sentences, providing learners with improved understanding of linguistic rules and n-gram frequencies.", "phrases": ["sentence judgment system", "grammatical error detection", "n-gram", "rule-based linguistic analysis"], "overall_score": 4.706061484098938, "scores": [3.5494085450497432, 0.9563832382819312, 1.6904936457562745, 0.5931261361401441], "rank_score": 1.6973528913070233} -{"id": "maynez-etal-2020-faithfulness", "title": "On Faithfulness and Factuality in Abstractive Summarization", "abstract": "It is well known that the standard likelihood training and approximate decoding objectives in neural text generation models lead to less human-like responses for open-ended tasks such as language modeling and story generation. In this paper we have analyzed limitations of these models for abstractive document summarization and found that these models are highly prone to hallucinate content that is unfaithful to the input document. We conducted a large scale human evaluation of several neural abstractive summarization systems to better understand the types of hallucinations they produce. Our human annotators found substantial amounts of hallucinated content in all model generated summaries. However, our analysis does show that pretrained models are better summarizers not only in terms of raw metrics, i.e., ROUGE, but also in generating faithful and factual summaries as evaluated by humans. Furthermore, we show that textual entailment measures better correlate with faithfulness than standard metrics, potentially leading the way to automatic evaluation metrics as well as training and decoding criteria.", "phrases": ["faithfulness", "abstractive summarization", "text generation model", "consistency", "original document"], "overall_score": 6.920691830566225, "scores": [3.1398856193946654, 1.9461564713115511, 1.5062758200877402, 1.0452264625813712, 0.8488177088538348], "rank_score": 1.6972724164458328} -{"id": "huang-etal-2021-dagn", "title": "DAGN: Discourse-Aware Graph Network for Logical Reasoning", "abstract": "Recent QA with logical reasoning questions requires passage-level relations among the sentences. However, current approaches still focus on sentence-level relations interacting among tokens. In this work, we explore aggregating passage-level clues for solving logical reasoning QA by using discourse-based information. We propose a discourse-aware graph network (DAGN) that reasons relying on the discourse structure of the texts. The model encodes discourse information as a graph with elementary discourse units (EDUs) and discourse relations, and learns the discourse-aware features via a graph network for downstream QA tasks. Experiments are conducted on two logical reasoning QA datasets, ReClor and LogiQA, and our proposed DAGN achieves competitive results. The source code is available at .", "phrases": ["discourse-aware graph network", "logical reasoning", "dagn"], "overall_score": 2.7313416365262215, "scores": [2.0358512031886056, 1.6956054634560995, 1.3597772643281838], "rank_score": 1.697077976990963} -{"id": "lavie-agarwal-2007-meteor", "title": "METEOR: An Automatic Metric for MT Evaluation with High Levels of Correlation with Human Judgments", "abstract": "Meteor is an automatic metric for Machine Translation evaluation which has been demonstrated to have high levels of correlation with human judgments of translation quality, significantly outperforming the more commonly used Bleu metric. It is one of several automatic metrics used in this year's shared task within the ACL WMT-07 workshop. This paper recaps the technical details underlying the metric and describes recent improvements in the metric. The latest release includes improved metric parameters and extends the metric to support evaluation of MT output in Spanish, French and German, in addition to English.", "phrases": ["automatic metric", "judgment", "translation quality", "meteor"], "overall_score": 4.594982248145044, "scores": [3.7873006242854537, 1.8875813801820625, 0.5583731251069254, 0.5538897265657131], "rank_score": 1.6967862140350387} -{"id": "davidson-etal-2019-racial", "title": "Racial Bias in Hate Speech and Abusive Language Detection Datasets", "abstract": "Technologies for abusive language detection are being developed and applied with little consideration of their potential biases. We examine racial bias in five different sets of Twitter data annotated for hate speech and abusive language. We train classifiers on these datasets and compare the predictions of these classifiers on tweets written in African-American English with those written in Standard American English. The results show evidence of systematic racial bias in all datasets, as classifiers trained on them tend to predict that tweets written in African-American English are abusive at substantially higher rates. If these abusive language detection systems are used in the field they will therefore have a disproportionate negative impact on African-American social media users. Consequently, these systems may discriminate against the groups who are often the targets of the abuse we are trying to detect.", "phrases": ["hate speech", "language detection", "racial bias"], "overall_score": 5.763926066332987, "scores": [2.7330117750113754, 1.8306174587984518, 0.5203974363943339], "rank_score": 1.6946755567347205} -{"id": "thorne-etal-2018-fact", "title": "The Fact Extraction and VERification (FEVER) Shared Task", "abstract": "We present the results of the first Fact Extraction and VERification (FEVER) Shared Task. The task challenged participants to classify whether human-written factoid claims could be SUPPORTED or REFUTED using evidence retrieved from Wikipedia. We received entries from 23 competing teams, 19 of which scored higher than the previously published baseline. The best performing system achieved a FEVER score of 64.21%. In this paper, we present the results of the shared task and a summary of the systems, highlighting commonalities and innovations among participating systems.", "phrases": ["fact extraction", "verification", "complex entity", "human-generated claim", "entailment"], "overall_score": 6.117704889003259, "scores": [3.347892750902014, 3.2172872491446625, 0.8303110088366407, 0.5390675493467962, 0.5365629640580388], "rank_score": 1.6942243044576304} -{"id": "hoffart-etal-2011-robust", "title": "Robust Disambiguation of Named Entities in Text", "abstract": "Disambiguating named entities in natural-language text maps mentions of ambiguous names onto canonical entities like people or places, registered in a knowledge base such as DBpedia or YAGO. This paper presents a robust method for collective disambiguation, by harnessing context from knowledge bases and using a new form of coherence graph. It unifies prior approaches into a comprehensive framework that combines three measures: the prior probability of an entity being mentioned, the similarity between the contexts of a mention and a candidate entity, as well as the coherence among candidate entities for all mentions together. The method builds a weighted graph of mentions and candidate entities, and computes a dense subgraph that approximates the best joint mention-entity mapping. Experiments show that the new method significantly outperforms prior methods in terms of accuracy, with robust behavior across a variety of inputs.", "phrases": ["disambiguation", "mention", "knowledge base", "global coherence", "wikipedia"], "overall_score": 6.75649041772453, "scores": [3.2224299608723537, 1.9428575017564096, 1.2757265000574176, 1.1244549292662989, 0.9034674823906553], "rank_score": 1.6937872748686267} -{"id": "wan-2009-co", "title": "Co-Training for Cross-Lingual Sentiment Classification", "abstract": "The lack of Chinese sentiment corpora limits the research progress on Chinese sentiment classification. However, there are many freely available English sentiment corpora on the Web. This paper focuses on the problem of cross-lingual sentiment classification, which leverages an available English corpus for Chinese sentiment classification by using the English corpus as training data. Machine translation services are used for eliminating the language gap between the training set and test set, and English features and Chinese features are considered as two independent views of the classification problem. We propose a cotraining approach to making use of unlabeled Chinese data. Experimental results show the effectiveness of the proposed approach, which can outperform the standard inductive classifiers and the transductive classifiers.", "phrases": ["sentiment classification", "chinese feature", "co-training"], "overall_score": 4.5852926652725206, "scores": [3.7237376863028495, 0.8156457025405266, 0.5402410546838334], "rank_score": 1.6932081478424033} -{"id": "vu-etal-2016-combining", "title": "Combining Recurrent and Convolutional Neural Networks for Relation Classification", "abstract": "This paper investigates two different neural architectures for the task of relation classification: convolutional neural networks and recurrent neural networks. For both models, we demonstrate the effect of different architectural choices. We present a new context representation for convolutional neural networks for relation classification (extended middle context). Furthermore, we propose connectionist bi-directional recurrent neural networks and introduce ranking loss for their optimization. Finally, we show that combining convolutional and recurrent neural networks using a simple voting scheme is accurate enough to improve results. Our neural models achieve state-of-the-art results on the SemEval 2010 relation classification task.", "phrases": ["convolutional neural networks", "relation classification", "cnn"], "overall_score": 4.059501609852949, "scores": [3.0987669352758287, 0.9285487402568182, 1.0515153175100123], "rank_score": 1.6929436643475528} -{"id": "tai-etal-2015-improved", "title": "Improved Semantic Representations From Tree-Structured Long Short-Term Memory Networks", "abstract": "Because of their superior ability to preserve sequence information over time, Long Short-Term Memory (LSTM) networks, a type of recurrent neural network with a more complex computational unit, have obtained strong results on a variety of sequence modeling tasks. The only underlying LSTM structure that has been explored so far is a linear chain. However, natural language exhibits syntactic properties that would naturally combine words to phrases. We introduce the Tree-LSTM, a generalization of LSTMs to tree-structured network topologies. Tree-LSTMs outperform all existing systems and strong LSTM baselines on two tasks: predicting the semantic relatedness of two sentences (SemEval 2014, Task 1) and sentiment classification (Stanford Sentiment Treebank).", "phrases": ["short-term memory network", "tree-lstm", "tree-structured network topology", "sentiment classification", "constituent"], "overall_score": 8.250170996277863, "scores": [1.9417928856656144, 2.4810236340242984, 1.5257279439036562, 1.268802274457682, 1.2440246171297504], "rank_score": 1.6922742710362002} -{"id": "young-etal-2014-image", "title": "From image descriptions to visual denotations: New similarity metrics for semantic inference over event descriptions", "abstract": "We propose to use the visual denotations of linguistic expressions (i.e. the set of images they describe) to define novel denotational similarity metrics, which we show to be at least as beneficial as distributional similarities for two tasks that require semantic inference. To compute these denotational similarities, we construct a denotation graph, i.e. a subsumption hierarchy over constituents and their denotations, based on a large corpus of 30K images and 150K descriptive captions.", "phrases": ["denotation", "similarity metric", "image caption", "flickr30k", "entailment task"], "overall_score": 6.478824902656412, "scores": [3.092246967028631, 1.8204551149207202, 1.618259645665468, 1.0413874095279927, 0.8886481615302408], "rank_score": 1.6921994597346106} -{"id": "gasco-sanchez-etal-2022-socialdisner", "title": "The SocialDisNER shared task on detection of disease mentions in health-relevant content from social media: methods, evaluation, guidelines and corpora", "abstract": "There is a pressing need to exploit health-related content from social media, a global source of data where key health information is posted directly by citizens, patients and other healthcare stakeholders. Use cases of disease related social media mining include disease outbreak/surveillance, mental health and pharmacovigilance. Current efforts address the exploitation of social media beyond English. The SocialDisNER task, organized as part of the SMM4H 2022 initiative, has applied the LINKAGE methodology to select and annotate a Gold Standard corpus of 9,500 tweets in Spanish enriched with disease mentions generated by patients and medical professionals. As a complementary resource for teams participating in the SocialDisNER track, we have also created a large-scale corpus of 85,000 tweets, where in addition to disease mentions, other medical entities of relevance (e.g., medications, symptoms and procedures, among others) have been automatically labelled. Using these large-scale datasets, co-mention networks or knowledge graphs were released for each entity pair type. Out of the 47 teams registered for the task, 17 teams uploaded a total of 32 runs. The top-performing team achieved a very competitive 0.891 f-score, with a system trained following a continue pre-training strategy. We anticipate that the corpus and systems resulting from the SocialDisNER track might further foster health related text mining of social media content in Spanish and inspire disease detection strategies in other languages.", "phrases": ["disease mention", "social media mining", "spanish"], "overall_score": 3.8945385542128763, "scores": [3.4317669920063887, 1.0550812797917455, 0.5872815391642267], "rank_score": 1.6913766036541205} -{"id": "liu-etal-2006-tree", "title": "Tree-to-String Alignment Template for Statistical Machine Translation", "abstract": "We present a novel translation model based on tree-to-string alignment template (TAT) which describes the alignment between a source parse tree and a target string. A TAT is capable of generating both terminals and non-terminals and performing reordering at both low and high levels. The model is linguistically syntax-based because TATs are extracted automatically from word-aligned, source side parsed parallel texts. To translate a source sentence, we first employ a parser to produce a source parse tree and then apply TATs to transform the tree into a target string. Our experiments show that the TAT-based model significantly outperforms Pharaoh, a state-of-the-art decoder for phrase-based models.", "phrases": ["alignment template", "statistical machine translation", "source side", "smt system", "syntax-based approach"], "overall_score": 7.00630240586457, "scores": [3.0778586141092803, 2.469584554234485, 1.1065628151520899, 0.9040371792289992, 0.8972725203916296], "rank_score": 1.6910631366232969} -{"id": "han-etal-2018-fewrel", "title": "FewRel: A Large-Scale Supervised Few-Shot Relation Classification Dataset with State-of-the-Art Evaluation", "abstract": "We present a Few-Shot Relation Classification Dataset (dataset), consisting of 70, 000 sentences on 100 relations derived from Wikipedia and annotated by crowdworkers. The relation of each sentence is first recognized by distant supervision methods, and then filtered by crowdworkers. We adapt the most recent state-of-the-art few-shot learning methods for relation classification and conduct thorough evaluation of these methods. Empirical results show that even the most competitive few-shot learning models struggle on this task, especially as compared with humans. We also show that a range of different reasoning skills are needed to solve our task. These results indicate that few-shot relation classification remains an open problem and still requires further research. Our detailed analysis points multiple directions for future research.", "phrases": ["relation classification dataset", "few-shot learning model", "fewrel", "baseline model"], "overall_score": 6.3202144125855115, "scores": [2.935765291227269, 2.6659011056988935, 0.6305533296811223, 0.5315823833162243], "rank_score": 1.6909505274808774} -{"id": "du-etal-2019-extracting", "title": "Extracting Symptoms and their Status from Clinical Conversations", "abstract": "This paper describes novel models tailored for a new application, that of extracting the symptoms mentioned in clinical conversations along with their status. Lack of any publicly available corpus in this privacy-sensitive domain led us to develop our own corpus, consisting of about 3K conversations annotated by professional medical scribes. We propose two novel deep learning approaches to infer the symptom names and their status: (1) a new hierarchical span-attribute tagging (SA-T) model, trained using curriculum learning, and (2) a variant of sequence-to-sequence model which decodes the symptoms and their status from a few speaker turns within a sliding window over the conversation. This task stems from a realistic application of assisting medical providers in capturing symptoms mentioned by patients from their clinical conversations. To reflect this application, we define multiple metrics. From inter-rater agreement, we find that the task is inherently difficult. We conduct comprehensive evaluations on several contrasting conditions and observe that the performance of the models range from an F-score of 0.5 to 0.8 depending on the condition. Our analysis not only reveals the inherent challenges of the task, but also provides useful directions to improve the models.", "phrases": ["symptom", "status", "clinical conversation"], "overall_score": 3.028916166113812, "scores": [2.3817408793425794, 2.0803932692162386, 0.6092767491641268], "rank_score": 1.6904702992409815} -{"id": "mairesse-walker-2007-personage", "title": "PERSONAGE: Personality Generation for Dialogue", "abstract": "Over the last fifty years, the \u201cBig Five\u201d model of personality traits has become a standard in psychology, and research has systematically documented correlations between a wide range of linguistic variables and the Big Five traits. A distinct line of research has explored methods for automatically generating language that varies along personality dimensions. We present PERSONAGE (PERSONAlity GEnerator), the first highly parametrizable language generator for extraversion, an important aspect of personality. We evaluate two personality generation methods: (1) direct generation with particular parameter settings suggested by the psychology literature; and (2) overgeneration and selection using statistical models trained from judge\u2019s ratings. Results show that both methods reliably generate utterances that vary along the extraversion dimension, according to human judges.", "phrases": ["personality trait", "language generator", "personage"], "overall_score": 4.977196016929885, "scores": [3.074477410324648, 1.0796546567240186, 0.916982721350424], "rank_score": 1.6903715961330301} -{"id": "zeldes-etal-2019-introduction", "title": "Introduction to Discourse Relation Parsing and Treebanking (DISRPT): 7th Workshop on Rhetorical Structure Theory and Related Formalisms", "abstract": "This overview summarizes the main contributions of the accepted papers at the 2019 workshop on Discourse Relation Parsing and Treebanking (DISRPT 2019). Co-located with NAACL 2019 in Minneapolis, the workshop's aim was to bring together researchers working on corpus-based and computational approaches to discourse relations. In addition to an invited talk, eighteen papers outlined below were presented, four of which were submitted as part of a shared task on elementary discourse unit segmentation and connective detection.", "phrases": ["treebanking", "disrpt", "discourse unit segmentation"], "overall_score": 3.710191861283726, "scores": [2.2051885630844614, 1.9082071559907101, 0.9523475366029565], "rank_score": 1.6885810852260426} -{"id": "kiritchenko-mohammad-2017-best", "title": "Best-Worst Scaling More Reliable than Rating Scales: A Case Study on Sentiment Intensity Annotation", "abstract": "Rating scales are a widely used method for data annotation; however, they present several challenges, such as difficulty in maintaining inter- and intra-annotator consistency. Best\u2013worst scaling (BWS) is an alternative method of annotation that is claimed to produce high-quality annotations while keeping the required number of annotations similar to that of rating scales. However, the veracity of this claim has never been systematically established. Here for the first time, we set up an experiment that directly compares the rating scale method with BWS. We show that with the same total number of annotations, BWS produces significantly more reliable results than the rating scale.", "phrases": ["rating scale", "best-worst scaling", "empirical experiment", "further detail"], "overall_score": 4.455912899690382, "scores": [3.625807252961233, 1.3826827777646187, 1.2161602209943556, 0.529144521415698], "rank_score": 1.6884486932839762} -{"id": "versley-etal-2008-bart-modular", "title": "BART: A modular toolkit for coreference resolution", "abstract": "Developing a full coreference system able to run all the way from raw text to semantic interpretation is a considerable engineering effort. Accordingly, there is very limited availability of off-the shelf tools for researchers whose interests are not primarily in coreference or others who want to concentrate on a specific aspect of the problem. We present BART, a highly modular toolkit for developing coreference applications. In the Johns Hopkins workshop on using lexical and encyclopedic knowledge for entity disambiguation, the toolkit was used to extend a reimplementation of Soon et al.\u0092s proposal with a variety of additional syntactic and knowledge-based features, and experiment with alternative resolution processes, preprocessing tools, and classifiers. BART has been released as open source software and is available from ", "phrases": ["modular toolkit", "coreference resolution", "bart"], "overall_score": 4.048072669259983, "scores": [3.247548030642122, 0.9800330023934, 0.8369512447263455], "rank_score": 1.6881774259206226} -{"id": "munteanu-marcu-2005-improving", "title": "Improving Machine Translation Performance by Exploiting Non-Parallel Corpora", "abstract": "We present a novel method for discovering parallel sentences in comparable, non-parallel corpora. We train a maximum entropy classifier that, given a pair of sentences, can reliably determine whether or not they are translations of each other. Using this approach, we extract parallel data from large Chinese, Arabic, and English non-parallel newspaper corpora. We evaluate the quality of the extracted data by showing that it improves the performance of a state-of-the-art statistical machine translation system. We also show that a good-quality MT system can be built from scratch by starting with a very small parallel corpus (100,000 words) and exploiting a large non-parallel corpus. Thus, our method can be applied with great benefit to language pairs for which only scarce resources are available.", "phrases": ["non-parallel corpora", "maximum entropy classifier", "parallel data", "sentence extraction", "proper smt translation"], "overall_score": 7.479734737618084, "scores": [2.9456916221147424, 2.098595507104464, 1.6888330122136996, 1.132577417270154, 0.5748847054640195], "rank_score": 1.6881164528334156} -{"id": "wu-etal-2020-generating", "title": "Generating Diverse Translation from Model Distribution with Dropout", "abstract": "Despite the improvement of translation quality, neural machine translation (NMT) often suffers from the lack of diversity in its generation. In this paper, we propose to generate diverse translations by deriving a large number of possible models with Bayesian modelling and sampling models from them for inference. The possible models are obtained by applying concrete dropout to the NMT model and each of them has specific confidence for its prediction, which corresponds to a posterior model distribution under specific training data in the principle of Bayesian modeling. With variational inference, the posterior model distribution can be approximated with a variational distribution, from which the final models for inference are sampled. We conducted experiments on Chinese-English and English-German translation tasks and the results shows that our method makes a better trade-off between diversity and accuracy.", "phrases": ["diverse translation", "dropout", "posterior model distribution"], "overall_score": 3.024074123149936, "scores": [2.8376844469160742, 1.6610073160867085, 0.564611947823091], "rank_score": 1.6877679036086246} -{"id": "brown-etal-2005-automatic", "title": "Automatic Question Generation for Vocabulary Assessment", "abstract": "In the REAP system, users are automatically provided with texts to read targeted to their individual reading levels. To find appropriate texts, the user's vocabulary knowledge must be assessed. We describe an approach to automatically generating questions for vocabulary assessment. Traditionally, these assessments have been hand-written. Using data from WordNet, we generate 6 types of vocabulary questions. They can have several forms, including wordbank and multiple-choice. We present experimental results that suggest that these automatically-generated questions give a measure of vocabulary skill that correlates well with subject performance on independently developed human-written questions. In addition, strong correlations with standardized vocabulary tests point to the validity of our approach to automatic assessment of word knowledge.", "phrases": ["question generation", "vocabulary assessment", "distractor"], "overall_score": 4.5664378951447295, "scores": [2.276092386052233, 1.954065132181585, 0.8285794578600513], "rank_score": 1.6862456586979564} -{"id": "dubossarsky-etal-2017-outta", "title": "Outta Control: Laws of Semantic Change and Inherent Biases in Word Representation Models", "abstract": "This article evaluates three proposed laws of semantic change. Our claim is that in order to validate a putative law of semantic change, the effect should be observed in the genuine condition but absent or reduced in a suitably matched control condition, in which no change can possibly have taken place. Our analysis shows that the effects reported in recent literature must be substantially revised: (i) the proposed negative correlation between meaning change and word frequency is shown to be largely an artefact of the models of word representation used; (ii) the proposed negative correlation between meaning change and prototypicality is shown to be much weaker than what has been claimed in prior art; and (iii) the proposed positive correlation between meaning change and polysemy is largely an artefact of word frequency. These empirical observations are corroborated by analytical proofs that show that count representations introduce an inherent dependence on word frequency, and thus word frequency cannot be evaluated as an independent factor with these representations.", "phrases": ["law", "semantic change", "control condition", "polysemy", "previous literature"], "overall_score": 4.873591327996707, "scores": [3.065077549141268, 2.9821481859198897, 1.2520866681636569, 0.5683486641379876, 0.5630733436737951], "rank_score": 1.6861468822073196} -{"id": "zhu-etal-2010-monolingual", "title": "A Monolingual Tree-based Translation Model for Sentence Simplification", "abstract": "In this paper, we consider sentence simplification as a special form of translation with the complex sentence as the source and the simple sentence as the target. We propose a Tree-based Simplification Model (TSM), which, to our knowledge, is the first statistical simplification model covering splitting, dropping, reordering and substitution integrally. We also describe an efficient method to train our model with a large-scale parallel dataset obtained from the Wikipedia and Simple Wikipedia. The evaluation shows that our model achieves better readability scores than a set of baseline systems.", "phrases": ["sentence simplification", "simple english wikipedia", "pwkp", "paraphrasing"], "overall_score": 7.084677049317931, "scores": [3.282224424131762, 1.876813340309998, 1.04580817484533, 0.5349352439035329], "rank_score": 1.6849452957976558} -{"id": "wu-etal-2017-sequential", "title": "Sequential Matching Network: A New Architecture for Multi-turn Response Selection in Retrieval-Based Chatbots", "abstract": "We study response selection for multi-turn conversation in retrieval based chatbots. Existing work either concatenates utterances in context or matches a response with a highly abstract context vector finally, which may lose relationships among the utterances or important information in the context. We propose a sequential matching network (SMN) to address both problems. SMN first matches a response with each utterance in the context on multiple levels of granularity, and distills important matching information from each pair as a vector with convolution and pooling operations. The vectors are then accumulated in a chronological order through a recurrent neural network (RNN) which models relationships among the utterances. The final matching score is calculated with the hidden states of the RNN. Empirical study on two public data sets shows that SMN can significantly outperform state-of-the-art methods for response selection in multi-turn conversation.", "phrases": ["multi-turn response selection", "chatbot", "conversation", "sequential matching network"], "overall_score": 5.724663048790556, "scores": [3.146466266650149, 1.2677208362325298, 1.461107207310316, 0.8572323930857549], "rank_score": 1.6831316758196873} -{"id": "caglayan-etal-2020-simultaneous", "title": "Simultaneous Machine Translation with Visual Context", "abstract": "Simultaneous machine translation (SiMT) aims to translate a continuous input text stream into another language with the lowest latency and highest quality possible. The translation thus has to start with an incomplete source text, which is read progressively, creating the need for anticipation. In this paper, we seek to understand whether the addition of visual information can compensate for the missing source context. To this end, we analyse the impact of different multimodal approaches and visual features on state-of-the-art SiMT frameworks. Our results show that visual context is helpful and that visually-grounded models based on explicit object region information are much better than commonly used global features, reaching up to 3 BLEU points improvement under low latency scenarios. Our qualitative analysis illustrates cases where only the multimodal systems are able to translate correctly from English into gender-marked languages, as well as deal with differences in word order, such as adjective-noun placement between English and French.", "phrases": ["visual context", "simultaneous machine translation", "mmt"], "overall_score": 3.01472705745751, "scores": [2.636793943548596, 1.8643368324240988, 0.5465228447831322], "rank_score": 1.6825512069186088} -{"id": "xiong-etal-2010-error", "title": "Error Detection for Statistical Machine Translation Using Linguistic Features", "abstract": "Automatic error detection is desired in the post-processing to improve machine translation quality. The previous work is largely based on confidence estimation using system-based features, such as word posterior probabilities calculated from N-best lists or word lattices. We propose to incorporate two groups of linguistic features, which convey information from outside machine translation systems, into error detection: lexical and syntactic features. We use a maximum entropy classifier to predict translation errors by integrating word posterior probability feature and linguistic features. The experimental results show that 1) linguistic features alone outperform word posterior probability based confidence estimation in error detection; and 2) linguistic features can further provide complementary information when combined with word confidence scores, which collectively reduce the classification error rate by 18.52% and improve the F measure by 16.37%.", "phrases": ["linguistic feature", "error detection", "pos", "dependency link"], "overall_score": 4.952295270107959, "scores": [3.152862700653657, 1.8745640038730902, 0.8529055128573355, 0.8473266747148589], "rank_score": 1.6819147230247355} -{"id": "zhu-etal-2019-ncls", "title": "NCLS: Neural Cross-Lingual Summarization", "abstract": "Cross-lingual summarization (CLS) is the task to produce a summary in one particular language for a source document in a different language. Existing methods simply divide this task into two steps: summarization and translation, leading to the problem of error propagation. To handle that, we present an end-to-end CLS framework, which we refer to as Neural Cross-Lingual Summarization (NCLS), for the first time. Moreover, we propose to further improve NCLS by incorporating two related tasks, monolingual summarization and machine translation, into the training process of CLS under multi-task learning. Due to the lack of supervised CLS data, we propose a round-trip translation strategy to acquire two high-quality large-scale CLS datasets based on existing monolingual summarization datasets. Experimental results have shown that our NCLS achieves remarkable improvement over traditional pipeline methods on both English-to-Chinese and Chinese-to-English CLS human-corrected test sets. In addition, NCLS with multi-task learning can further significantly improve the quality of generated summaries. We make our dataset and code publicly available here: .", "phrases": ["summarization", "machine translation", "round-trip translation strategy", "large-scale cls dataset"], "overall_score": 4.437041250174698, "scores": [3.9708698913558966, 1.3395253055500436, 0.8513702781015915, 0.5634256762154882], "rank_score": 1.6812977878057551} -{"id": "qazvinian-etal-2011-rumor", "title": "Rumor has it: Identifying Misinformation in Microblogs", "abstract": "A rumor is commonly defined as a statement whose true value is unverifiable. Rumors may spread misinformation (false information) or disinformation (deliberately false information) on a network of people. Identifying rumors is crucial in online social media where large amounts of information are easily spread across a large network by sources with unverified authority. In this paper, we address the problem of rumor detection in microblogs and explore the effectiveness of 3 categories of features: content-based, network-based, and microblog-specific memes for correctly identifying rumors. Moreover, we show how these features are also effective in identifying disinformers, users who endorse a rumor and further help it to spread. We perform our experiments on more than 10,000 manually annotated tweets collected from Twitter and show how our retrieval model achieves more than 0.95 in Mean Average Precision (MAP). Finally, we believe that our dataset is the first large-scale dataset on rumor detection. It can open new dimensions in analyzing online misinformation and other aspects of microblog conversations.", "phrases": ["twitter", "rumor", "social medium platform", "textual characteristic"], "overall_score": 6.20160025838749, "scores": [4.107803364024932, 1.5464965006838838, 0.54177267663337, 0.5285714439431575], "rank_score": 1.6811609963213356} -{"id": "blevins-zettlemoyer-2020-moving", "title": "Moving Down the Long Tail of Word Sense Disambiguation with Gloss Informed Bi-encoders", "abstract": "A major obstacle in Word Sense Disambiguation (WSD) is that word senses are not uniformly distributed, causing existing models to generally perform poorly on senses that are either rare or unseen during training. We propose a bi-encoder model that independently embeds (1) the target word with its surrounding context and (2) the dictionary definition, or gloss, of each sense. The encoders are jointly optimized in the same representation space, so that sense disambiguation can be performed by finding the nearest sense embedding for each target word embedding. Our system outperforms previous state-of-the-art models on English all-words WSD; these gains predominantly come from improved performance on rare senses, leading to a 31.1% error reduction on less frequent senses over prior work. This demonstrates that rare senses can be more effectively disambiguated by modeling their definitions.", "phrases": ["word sense disambiguation", "gloss", "bi-encoder"], "overall_score": 5.410226134916748, "scores": [3.4472760408210767, 1.0445439422262153, 0.5505237691895072], "rank_score": 1.6807812507455997} -{"id": "he-etal-2015-question", "title": "Question-Answer Driven Semantic Role Labeling: Using Natural Language to Annotate Natural Language", "abstract": "This paper introduces the task of questionanswer driven semantic role labeling (QA-SRL), where question-answer pairs are used to represent predicate-argument structure. For example, the verb \u201cintroduce\u201d in the previous sentence would be labeled with the questions \u201cWhat is introduced?\u201d, and \u201cWhat introduces something?\u201d, each paired with the phrase from the sentence that gives the correct answer. Posing the problem this way allows the questions themselves to define the set of possible roles, without the need for predefined frame or thematic role ontologies. It also allows for scalable data collection by annotators with very little training and no linguistic expertise. We gather data in two domains, newswire text and Wikipedia articles, and introduce simple classifierbased models for predicting which questions to ask and what their answers should be. Our results show that non-expert annotators can produce high quality QA-SRL data, and also establish baseline performance levels for future work on this task.", "phrases": ["annotator", "question-answer pair", "predicate-argument structure"], "overall_score": 5.875701292563823, "scores": [1.9925573262185452, 1.5861563021464813, 1.4626322196898354], "rank_score": 1.6804486160182872} -{"id": "rasooli-tetreault-2013-joint", "title": "Joint Parsing and Disfluency Detection in Linear Time", "abstract": "We introduce a novel method to jointly parse and detect disfluencies in spoken utterances. Our model can use arbitrary features for parsing sentences and adapt itself with out-ofdomain data. We show that our method, based on transition-based parsing, performs at a high level of accuracy for both the parsing and disfluency detection tasks. Additionally, our method is the fastest for the joint task, running in linear time.", "phrases": ["disfluency detection", "linear time", "dependency parsing"], "overall_score": 4.655139925328511, "scores": [3.0248607926663387, 0.8262957468571988, 1.1858039241632086], "rank_score": 1.6789868212289154} -{"id": "niehues-etal-2016-pre", "title": "Pre-Translation for Neural Machine Translation", "abstract": "Recently, the development of neural machine translation (NMT) has significantly improved the translation quality of automatic machine translation. While most sentences are more accurate and fluent than translations by statistical machine translation (SMT)-based systems, in some cases, the NMT system produces translations that have a completely different meaning. This is especially the case when rare words occur. When using statistical machine translation, it has already been shown that significant gains can be achieved by simplifying the input in a preprocessing step. A commonly used example is the pre-reordering approach. In this work, we used phrase-based machine translation to pre-translate the input into the target language. Then a neural machine translation system generates the final hypothesis using the pre-translation. Thereby, we use either only the output of the phrase-based machine translation (PBMT) system or a combination of the PBMT output and the source sentence. We evaluate the technique on the English to German translation task. Using this approach we are able to outperform the PBMT system as well as the baseline neural MT system by up to 2 BLEU points. We analyzed the influence of the quality of the initial system on the final result.", "phrases": ["neural machine translation", "pbmt", "pre-translation"], "overall_score": 4.756075342652178, "scores": [2.4828510642952275, 2.0130435539714955, 0.5401631349871128], "rank_score": 1.6786859177512785} -{"id": "miwa-sasaki-2014-modeling", "title": "Modeling Joint Entity and Relation Extraction with Table Representation", "abstract": "This paper proposes a history-based structured learning approach that jointly extracts entities and relations in a sentence. We introduce a novel simple and flexible table representation of entities and relations. We investigate several feature settings, search orders, and learning methods with inexact search on the table. The experimental results demonstrate that a joint learning approach significantly outperforms a pipeline approach by incorporating global features and by selecting appropriate learning methods and search orders.", "phrases": ["joint entity", "relation extraction", "table representation", "feature-based model"], "overall_score": 5.868796659255563, "scores": [2.3466387770740154, 2.2417688447150073, 1.5958172856774413, 0.5296706652851237], "rank_score": 1.6784738931878969} -{"id": "cotterell-etal-2015-modeling", "title": "Modeling Word Forms Using Latent Underlying Morphs and Phonology", "abstract": "The observed pronunciations or spellings of words are often explained as arising from the \u201cunderlying forms\u201d of their morphemes. These forms are latent strings that linguists try to reconstruct by hand. We propose to reconstruct them automatically at scale, enabling generalization to new words. Given some surface word types of a concatenative language along with the abstract morpheme sequences that they express, we show how to recover consistent underlying forms for these morphemes, together with the (stochastic) phonology that maps each concatenation of underlying forms to a surface form. Our technique involves loopy belief propagation in a natural directed graphical model whose variables are unknown strings and whose conditional distributions are encoded as finite-state machines with trainable weights. We define training and evaluation paradigms for the task of surface word prediction, and report results on subsets of 7 languages.", "phrases": ["word form", "phonology", "graphical model"], "overall_score": 4.304946753851041, "scores": [2.7094059249678555, 0.8896841224908648, 1.4360348075343639], "rank_score": 1.6783749516643613} -{"id": "smith-eisner-2008-dependency", "title": "Dependency Parsing by Belief Propagation", "abstract": "We formulate dependency parsing as a graphical model with the novel ingredient of global constraints. We show how to apply loopy belief propagation (BP), a simple and effective tool for approximate learning and inference. As a parsing algorithm, BP is both asymptotically and empirically efficient. Even with second-order features or latent variables, which would make exact parsing considerably slower or NP-hard, BP needs only O(n3) time with a small constant factor. Furthermore, such features significantly improve parse accuracy over exact first-order methods. Incorporating additional features would increase the runtime additively rather than multiplicatively.", "phrases": ["belief propagation", "global constraint", "approximate learning", "dependency parsing", "decomposition"], "overall_score": 4.652429697357339, "scores": [4.105103666526918, 2.59226700966671, 0.5772546871621534, 0.5581011085198584, 0.5573200937018274], "rank_score": 1.6780093131154934} -{"id": "kumar-etal-2020-evaluating", "title": "Evaluating Aggression Identification in Social Media", "abstract": "In this paper, we present the report and findings of the Shared Task on Aggression and Gendered Aggression Identification organised as part of the Second Workshop on Trolling, Aggression and Cyberbullying (TRAC - 2) at LREC 2020. The task consisted of two sub-tasks - aggression identification (sub-task A) and gendered identification (sub-task B) - in three languages - Bangla, Hindi and English. For this task, the participants were provided with a dataset of approximately 5,000 instances from YouTube comments in each language. For testing, approximately 1,000 instances were provided in each language for each sub-task. A total of 70 teams registered to participate in the task and 19 teams submitted their test runs. The best system obtained a weighted F-score of approximately 0.80 in sub-task A for all the three languages. While approximately 0.87 in sub-task B for all the three languages.", "phrases": ["aggression identification", "cyberbullying", "hate speech", "social medium", "abusive language workshop"], "overall_score": 5.814160178611927, "scores": [4.274824382922083, 1.931496105493273, 1.0830728773114395, 0.5504881971540478, 0.5481784937366749], "rank_score": 1.6776120113235033} -{"id": "sporleder-li-2009-unsupervised", "title": "Unsupervised Recognition of Literal and Non-Literal Use of Idiomatic Expressions", "abstract": "We propose an unsupervised method for distinguishing literal and non-literal usages of idiomatic expressions. Our method determines how well a literal interpretation is linked to the overall cohesive structure of the discourse. If strong links can be found, the expression is classified as literal, otherwise as idiomatic. We show that this method can help to tell apart literal and non-literal usages, even for idioms which occur in canonical form.", "phrases": ["idiomatic expression", "unsupervised method", "component word"], "overall_score": 5.813002275177091, "scores": [2.6219569271441627, 1.363587974771045, 1.0462888311293426], "rank_score": 1.6772779110148501} -{"id": "wong-mooney-2007-generation", "title": "Generation by Inverting a Semantic Parser that Uses Statistical Machine Translation", "abstract": "This paper explores the use of statistical machine translation (SMT) methods for tactical natural language generation. We present results on using phrase-based SMT for learning to map meaning representations to natural language. Improved results are obtained by inverting a semantic parser that uses SMT methods to map sentences into meaning representations. Finally, we show that hybridizing these two approaches results in still more accurate generation systems. Automatic and human evaluation of generated sentences are presented across two domains and four languages.", "phrases": ["semantic parser", "smt method", "synchronous grammar"], "overall_score": 5.182693446520644, "scores": [3.218197588363976, 0.9307636223875675, 0.8810830458938004], "rank_score": 1.6766814188817813} -{"id": "marvin-linzen-2018-targeted", "title": "Targeted Syntactic Evaluation of Language Models", "abstract": "We present a data set for evaluating the grammaticality of the predictions of a language model. We automatically construct a large number of minimally different pairs of English sentences, each consisting of a grammatical and an ungrammatical sentence. The sentence pairs represent different variations of structure-sensitive phenomena: subject-verb agreement, reflexive anaphora and negative polarity items. We expect a language model to assign a higher probability to the grammatical sentence than the ungrammatical one. In an experiment using this data set, an LSTM language model performed poorly on many of the constructions. Multi-task training with a syntactic objective (CCG supertagging) improved the LSTM's accuracy, but a large gap remained between its performance and the accuracy of human participants recruited online. This suggests that there is considerable room for improvement over LSTMs in capturing syntax in a language model.", "phrases": ["agreement", "negative polarity item", "targeted syntactic evaluation", "neural language model", "high probability"], "overall_score": 6.994872979665127, "scores": [2.5526431811859696, 1.6834758974614106, 1.4860366201400341, 1.3982165353653249, 1.257950771609019], "rank_score": 1.6756646011523517} -{"id": "chen-etal-2020-distilling", "title": "Distilling Knowledge Learned in BERT for Text Generation", "abstract": "Large-scale pre-trained language model such as BERT has achieved great success in language understanding tasks. However, it remains an open question how to utilize BERT for language generation. In this paper, we present a novel approach, Conditional Masked Language Modeling (C-MLM), to enable the finetuning of BERT on target generation tasks. The finetuned BERT (teacher) is exploited as extra supervision to improve conventional Seq2Seq models (student) for better text generation performance. By leveraging BERT's idiosyncratic bidirectional nature, distilling knowledge learned in BERT can encourage auto-regressive Seq2Seq models to plan ahead, imposing global sequence-level supervision for coherent text generation. Experiments show that the proposed approach significantly outperforms strong Transformer baselines on multiple language generation tasks such as machine translation and text summarization. Our proposed model also achieves new state of the art on IWSLT German-English and English-Vietnamese MT datasets.", "phrases": ["bert", "text generation", "pre-trained language model", "teacher"], "overall_score": 4.64545312771013, "scores": [3.1434482233534142, 2.5016330990177935, 0.5343048628152174, 0.522586004843104], "rank_score": 1.6754930475073824} -{"id": "strapparava-etal-2012-parallel", "title": "A Parallel Corpus of Music and Lyrics Annotated with Emotions", "abstract": "In this paper, we introduce a novel parallel corpus of music and lyrics, annotated with emotions at line level. We first describe the corpus, consisting of 100 popular songs, each of them including a music component, provided in the MIDI format, as well as a lyrics component, made available as raw text. We then describe our work on enhancing this corpus with emotion annotations using crowdsourcing. We also present some initial experiments on emotion classification using the music and the lyrics representations of the songs, which lead to encouraging results, thus demonstrating the promise of using joint music-lyric models for song processing.", "phrases": ["parallel corpus", "music", "lyric", "emotion"], "overall_score": 2.695420315983945, "scores": [2.3947993281134567, 2.30892785610737, 0.8343133985776877, 1.1609946397716169], "rank_score": 1.6747588056425329} -{"id": "prasad-etal-2006-annotating", "title": "Annotating Attribution in the Penn Discourse TreeBank", "abstract": "An emerging task in text understanding and generation is to categorize information as fact or opinion and to further attribute it to the appropriate source. Corpus annotation schemes aim to encode such distinctions for NLP applications concerned with such tasks, such as information extraction, question answering, summarization, and generation. We describe an annotation scheme for marking the attribution of abstract objects such as propositions, facts and eventualities associated with discourse relations and their arguments annotated in the Penn Discourse TreeBank. The scheme aims to capture the source and degrees of factuality of the abstract objects. Key aspects of the scheme are annotation of the text spans signalling the attribution, and annotation of features recording the source, type, scopal polarity, and determinacy of attribution.", "phrases": ["attribution", "penn discourse treebank", "annotation scheme"], "overall_score": 3.000422013202638, "scores": [2.371214035734084, 2.129368318807418, 0.5231198745785355], "rank_score": 1.674567409706679} -{"id": "hovy-etal-2006-ontonotes", "title": "OntoNotes: The 90% Solution", "abstract": "We describe the OntoNotes methodology and its result, a large multilingual richly-annotated corpus constructed at 90% interannotator agreement. An initial portion (300K words of English newswire and 250K words of Chinese newswire) will be made available to the community during 2007.", "phrases": ["ontonotes", "sense inventory", "wsd", "project", "large-scale semantic annotation"], "overall_score": 7.111354206796946, "scores": [4.43921235543444, 1.5339410937775628, 0.9090977242133733, 0.8950294864260397, 0.591981633786722], "rank_score": 1.6738524587276273} -{"id": "post-etal-2012-constructing", "title": "Constructing Parallel Corpora for Six Indian Languages via Crowdsourcing", "abstract": "Recent work has established the efficacy of Amazon's Mechanical Turk for constructing parallel corpora for machine translation research. We apply this to building a collection of parallel corpora between English and six languages from the Indian subcontinent: Bengali, Hindi, Malayalam, Tamil, Telugu, and Urdu. These languages are low-resource, under-studied, and exhibit linguistic phenomena that are difficult for machine translation. We conduct a variety of baseline experiments and analysis, and release the data to the community.", "phrases": ["parallel corpora", "indian language", "crowdsourcing", "bengali"], "overall_score": 4.927385314617704, "scores": [3.1812946748820257, 1.356727814665559, 1.3154417015916615, 0.8403546986142483], "rank_score": 1.6734547224383736} -{"id": "merchant-etal-2020-happens", "title": "What Happens To BERT Embeddings During Fine-tuning?", "abstract": "While much recent work has examined how linguistic information is encoded in pre-trained sentence representations, comparatively little is understood about how these models change when adapted to solve downstream tasks. Using a suite of analysis techniques\u2014supervised probing, unsupervised similarity analysis, and layer-based ablations\u2014we investigate how fine-tuning affects the representations of the BERT model. We find that while fine-tuning necessarily makes some significant changes, there is no catastrophic forgetting of linguistic phenomena. We instead find that fine-tuning is a conservative process that primarily affects the top layers of BERT, albeit with noteworthy variation across tasks. In particular, dependency parsing reconfigures most of the model, whereas SQuAD and MNLI involve much shallower processing. Finally, we also find that fine-tuning has a weaker effect on representations of out-of-domain sentences, suggesting room for improvement in model generalization.", "phrases": ["bert", "fine-tuning", "downstream task", "weight"], "overall_score": 4.740884088975264, "scores": [3.93551289460166, 1.3658219583221487, 0.8691423311269029, 0.5228191028966023], "rank_score": 1.6733240717368285} -{"id": "bingel-sogaard-2017-identifying", "title": "Identifying beneficial task relations for multi-task learning in deep neural networks", "abstract": "Multi-task learning (MTL) in deep neural networks for NLP has recently received increasing interest due to some compelling benefits, including its potential to efficiently regularize models and to reduce the need for labeled data. While it has brought significant improvements in a number of NLP tasks, mixed results have been reported, and little is known about the conditions under which MTL leads to gains in NLP. This paper sheds light on the specific task relations that can lead to gains from MTL models over single-task setups.", "phrases": ["task relation", "multi-task learning", "deep neural network", "predictor"], "overall_score": 6.291470714304004, "scores": [2.488336590187659, 1.8769771611014405, 1.8010655157688835, 0.5245390789804705], "rank_score": 1.6727295865096137} -{"id": "matuschek-gurevych-2013-dijkstra", "title": "Dijkstra-WSA: A Graph-Based Approach to Word Sense Alignment", "abstract": "In this paper, we present Dijkstra-WSA, a novel graph-based algorithm for word sense alignment. We evaluate it on four different pairs of lexical-semantic resources with different characteristics (WordNet-OmegaWiki, WordNet-Wiktionary, GermaNet-Wiktionary and WordNet-Wikipedia) and show that it achieves competitive performance on 3 out of 4 datasets. Dijkstra-WSA outperforms the state of the art on every dataset if it is combined with a back-off based on gloss similarity. We also demonstrate that Dijkstra-WSA is not only flexibly applicable to different resources but also highly parameterizable to optimize for precision or recall.", "phrases": ["graph-based approach", "word sense alignment", "dijkstra-wsa", "wiktionary", "research area"], "overall_score": 3.6719610953406696, "scores": [2.9417989755868112, 2.6187233195448845, 1.7117843522738443, 0.5490666378089988, 0.5345342838522744], "rank_score": 1.6711815138133628} -{"id": "prasad-etal-2010-realization", "title": "Realization of Discourse Relations by Other Means: Alternative Lexicalizations", "abstract": "Studies of discourse relations have not, in the past, attempted to characterize what serves as evidence for them, beyond lists of frozen expressions, or markers, drawn from a few well-defined syntactic classes. In this paper, we describe how the lexicalized discourse relation annotations of the Penn Discourse Treebank (PDTB) led to the discovery of a wide range of additional expressions, annotated as AltLex (alternative lexicalizations) in the PDTB 2.0. Further analysis of AltLex annotation suggests that the set of markers is open-ended, and drawn from a wider variety of syntactic types than currently assumed. As a first attempt towards automatically identifying discourse relation markers, we propose the use of syntactic paraphrase methods.", "phrases": ["other mean", "lexicalization", "altlex relation"], "overall_score": 4.524989361831697, "scores": [3.658540962246352, 0.7874603799636092, 0.5668186121505269], "rank_score": 1.6709399847868294} -{"id": "marton-etal-2013-dependency", "title": "Dependency Parsing of Modern Standard Arabic with Lexical and Inflectional Features", "abstract": "We explore the contribution of lexical and inflectional morphology features to dependency parsing of Arabic, a morphologically rich language with complex agreement patterns. Using controlled experiments, we contrast the contribution of different part-of-speech (POS) tag sets and morphological features in two input conditions: machine-predicted condition (in which POS tags and morphological feature values are automatically assigned), and gold condition (in which their true values are known). We find that more informative (fine-grained) tag sets are useful in the gold condition, but may be detrimental in the predicted condition, where they are outperformed by simpler but more accurately predicted tag sets. We identify a set of features (definiteness, person, number, gender, and undiacritized lemma) that improve parsing quality in the predicted condition, whereas other features are more useful in gold. We are the first to show that functional features for gender and number (e.g., \u201cbroken plurals\u201d), and optionally the related rationality (\u201chumanness\u201d) feature, are more helpful for parsing than form-based gender and number. We finally show that parsing quality in the predicted condition can dramatically improve by training in a combined gold+predicted condition. We experimented with two transition-based parsers, MaltParser and Easy-First Parser. Our findings are robust across parsers, models, and input conditions. This suggests that the contribution of the linguistic knowledge in the tag sets and features we identified goes beyond particular experimental settings, and may be informative for other parsers and morphologically rich languages.", "phrases": ["arabic", "inflectional morphology", "dependency parsing"], "overall_score": 3.845652819841998, "scores": [3.0927057677426357, 1.3560154764026806, 0.561716152773863], "rank_score": 1.67014579897306} -{"id": "hillard-etal-2003-detection", "title": "Detection Of Agreement vs. Disagreement In Meetings: Training With Unlabeled Data", "abstract": "To support summarization of automatically transcribed meetings, we introduce a classifier to recognize agreement or disagreement utterances, utilizing both word-based and prosodic cues. We show that hand-labeling efforts can be minimized by using unsupervised training on a large unlabeled data set combined with supervised training on a small amount of data. For ASR transcripts with over 45% WER, the system recovers nearly 80% of agree/disagree utterances with a confusion rate of only 3%.", "phrases": ["agreement", "unlabeled data", "detection"], "overall_score": 4.004736024924462, "scores": [3.408407127754016, 0.8205582378207829, 0.7813485589164357], "rank_score": 1.6701046414970786} -{"id": "bos-2008-wide", "title": "Wide-Coverage Semantic Analysis with Boxer", "abstract": "Boxer is an open-domain software component for semantic analysis of text, based on Combinatory Categorial Grammar (CCG) and Discourse Representation Theory (DRT). Used together with the CC (b) discourse structure triggered by conditionals, negation or discourse adverbs was overall correctly computed; (c) some measure and time expressions are correctly analysed, others aren't; (d) several shallow analyses are given for lexical phrases that require deep analysis; (e) bridging references and pronouns are not resolved in most cases. Boxer is distributed with the C&C tools and freely available for research purposes.", "phrases": ["boxer", "wide-coverage semantic analysis", "meaning representation", "parsing system"], "overall_score": 5.732196746824844, "scores": [3.9921533843417314, 1.0178748311816146, 1.1029361201268895, 0.5640515126767416], "rank_score": 1.6692539620817444} -{"id": "ng-2010-supervised", "title": "Supervised Noun Phrase Coreference Research: The First Fifteen Years", "abstract": "The research focus of computational coreference resolution has exhibited a shift from heuristic approaches to machine learning approaches in the past decade. This paper surveys the major milestones in supervised coreference research since its inception fifteen years ago.", "phrases": ["noun phrase", "coreference resolution", "survey", "mention-pair model", "same discourse entity"], "overall_score": 6.424269945221233, "scores": [2.3513776475330923, 1.9546599742596902, 1.4216196436670097, 1.3513908841650517, 1.2638397209179213], "rank_score": 1.668577574108553} -{"id": "narayan-etal-2017-split", "title": "Split and Rephrase", "abstract": "We propose a new sentence simplification task (Split-and-Rephrase) where the aim is to split a complex sentence into a meaning preserving sequence of shorter sentences. Like sentence simplification, splitting-and-rephrasing has the potential of benefiting both natural language processing and societal applications. Because shorter sentences are generally better processed by NLP systems, it could be used as a preprocessing step which facilitates and improves the performance of parsers, semantic role labellers and machine translation systems. It should also be of use for people with reading disabilities because it allows the conversion of longer sentences into shorter ones. This paper makes two contributions towards this new task. First, we create and make available a benchmark consisting of 1,066,115 tuples mapping a single complex sentence to a sequence of sentences expressing the same meaning. Second, we propose five models (vanilla sequence-to-sequence to semantically-motivated models) to understand the difficulty of the proposed task.", "phrases": ["rephrase", "complex sentence", "split"], "overall_score": 4.822005235841128, "scores": [2.429912388232029, 1.5300564079306103, 1.044929161341922], "rank_score": 1.6682993191681872} -{"id": "chen-etal-2013-vector", "title": "Vector Space Model for Adaptation in Statistical Machine Translation", "abstract": "This paper proposes a new approach to domain adaptation in statistical machine translation (SMT) based on a vector space model (VSM). The general idea is first to create a vector profile for the in-domain development (\u201cdev\u201d) set. This profile might, for instance, be a vector with a dimensionality equal to the number of training subcorpora; each entry in the vector reflects the contribution of a particular subcorpus to all the phrase pairs that can be extracted from the dev set. Then, for each phrase pair extracted from the training data, we create a vector with features defined in the same way, and calculate its similarity score with the vector representing the dev set. Thus, we obtain a decoding feature whose value represents the phrase pair\u2019s closeness to the dev. This is a simple, computationally cheap form of instance weighting for phrase pairs. Experiments on large scale NIST evaluation data show improvements over strong baselines: +1.8 BLEU on Arabic to English and +1.4 BLEU on Chinese to English over a non-adapted baseline, and significant improvements in most circumstances over baselines with linear mixture model adaptation. An informal analysis suggests that VSM adaptation may help in making a good choice among words with the same meaning, on the basis of style and genre.", "phrases": ["vsm", "phrase pair", "vector space model"], "overall_score": 4.14535133205093, "scores": [3.3683406718594346, 1.1144931067460062, 0.5218025111375847], "rank_score": 1.6682120965810086} -{"id": "zhou-etal-2010-predicting", "title": "Predicting Discourse Connectives for Implicit Discourse Relation Recognition", "abstract": "Existing works indicate that the absence of explicit discourse connectives makes it difficult to recognize implicit discourse relations. In this paper we attempt to overcome this difficulty for implicit relation recognition by automatically inserting discourse connectives between arguments with the use of a language model. Then we propose two algorithms to leverage the information of these predicted connectives. One is to use these predicted implicit connectives as additional features in a supervised model. The other is to perform implicit relation recognition based only on these predicted connectives. Results on Penn Discourse Treebank 2.0 show that predicted discourse connectives help implicit relation recognition and the first algorithm can achieve an absolute average f-score improvement of 3% over a state of the art baseline system.", "phrases": ["discourse relation", "language model", "implicit connective"], "overall_score": 5.6146734978781, "scores": [1.8660839083155016, 1.7502550387675986, 1.3859006374626865], "rank_score": 1.6674131948485957} -{"id": "shi-etal-2015-automatically", "title": "Automatically Solving Number Word Problems by Semantic Parsing and Reasoning", "abstract": "This paper presents a semantic parsing and reasoning approach to automatically solving math word problems. A new meaning representation language is designed to bridge natural language text and math expressions. A CFG parser is implemented based on 9,600 semi-automatically created grammar rules. We conduct experiments on a test set of over 1,500 number word problems (i.e., verbally expressed number problems) and yield 95.4% precision and 60.2% recall.", "phrases": ["number word problem", "rule-based approach", "dolphin language"], "overall_score": 4.9941596681321085, "scores": [3.908860000175412, 0.5523067503303738, 0.5401076079073712], "rank_score": 1.6670914528043859} -{"id": "dong-etal-2014-adaptive", "title": "Adaptive Recursive Neural Network for Target-dependent Twitter Sentiment Classification", "abstract": "We propose Adaptive Recursive Neural Network (AdaRNN) for target-dependent Twitter sentiment classification. AdaRNN adaptively propagates the sentiments of words to target depending on the context and syntactic relationships between them. It consists of more than one composition functions, and we model the adaptive sentiment propagations as distributions over these composition functions. The experimental studies illustrate that AdaRNN improves the baseline methods. Furthermore, we introduce a manually annotated dataset for target-dependent Twitter sentiment analysis.", "phrases": ["recursive neural network", "twitter sentiment classification", "dependency tree", "input sentence", "deep learning"], "overall_score": 6.648677957593597, "scores": [3.187684564155839, 2.254768754660843, 1.833088082149931, 0.5351333896434438, 0.523123840798517], "rank_score": 1.6667597262817146} -{"id": "huang-etal-2019-unicoder", "title": "Unicoder: A Universal Language Encoder by Pre-training with Multiple Cross-lingual Tasks", "abstract": "We present Unicoder, a universal language encoder that is insensitive to different languages. Given an arbitrary NLP task, a model can be trained with Unicoder using training data in one language and directly applied to inputs of the same task in other languages. Comparing to similar efforts such as Multilingual BERT and XLM , three new cross-lingual pre-training tasks are proposed, including cross-lingual word recovery, cross-lingual paraphrase classification and cross-lingual masked language model. These tasks help Unicoder learn the mappings among different languages from more perspectives. We also find that doing fine-tuning on multiple languages together can bring further improvement. Experiments are performed on two tasks: cross-lingual natural language inference (XNLI) and cross-lingual question answering (XQA), where XLM is our baseline. On XNLI, 1.8% averaged accuracy improvement (on 15 languages) is obtained. On XQA, which is a new cross-lingual dataset built by us, 5.5% averaged accuracy improvement (on French and German) is obtained.", "phrases": ["universal language encoder", "cross-lingual task", "xlm", "pre-training task", "unicoder"], "overall_score": 5.819136118943313, "scores": [2.2215992198251873, 2.0033697087620452, 1.4161246080204406, 1.398086718124366, 1.282174721120912], "rank_score": 1.6642709951705903} -{"id": "shen-etal-2007-guided", "title": "Guided Learning for Bidirectional Sequence Classification", "abstract": "In this paper, we propose guided learning, a new learning framework for bidirectional sequence classification. The tasks of learning the order of inference and training the local classifier are dynamically incorporated into a single Perceptron like learning algorithm. We apply this novel learning algorithm to POS tagging. It obtains an error rate of 2.67% on the standard PTB test set, which represents 3.3% relative error reduction over the previous best result on the same data set, while using fewer features.", "phrases": ["bidirectional sequence classification", "guided learning", "tagging accuracy"], "overall_score": 5.065900975349016, "scores": [2.5924727573282524, 1.868532682379699, 0.5308128953250147], "rank_score": 1.663939445010989} -{"id": "hajic-etal-2012-announcing", "title": "Announcing Prague Czech-English Dependency Treebank 2.0", "abstract": "We introduce a substantial update of the Prague Czech-English Dependency Treebank, a parallel corpus manually annotated at the deep syntactic layer of linguistic representation. The English part consists of the Wall Street Journal (WSJ) section of the Penn Treebank. The Czech part was translated from the English source sentence by sentence. This paper gives a high level overview of the underlying linguistic theory (the so-called tectogrammatical annotation) with some details of the most important features like valency annotation, ellipsis reconstruction or coreference.", "phrases": ["prague", "czech-english dependency treebank", "detail"], "overall_score": 3.830296021301369, "scores": [2.623599817002517, 1.8208309838692367, 0.54599847744974], "rank_score": 1.6634764261071646} -{"id": "wang-etal-2020-tplinker", "title": "TPLinker: Single-stage Joint Extraction of Entities and Relations Through Token Pair Linking", "abstract": "Extracting entities and relations from unstructured text has attracted increasing attention in recent years but remains challenging, due to the intrinsic difficulty in identifying overlapping relations with shared entities. Prior works show that joint learning can result in a noticeable performance gain. However, they usually involve sequential interrelated steps and suffer from the problem of exposure bias. At training time, they predict with the ground truth conditions while at inference it has to make extraction from scratch. This discrepancy leads to error accumulation. To mitigate the issue, we propose in this paper a one-stage joint extraction model, namely, TPLinker, which is capable of discovering overlapping relations sharing one or both entities while being immune from the exposure bias. TPLinker formulates joint extraction as a token pair linking problem and introduces a novel handshaking tagging scheme that aligns the boundary tokens of entity pairs under each relation type. Experiment results show that TPLinker performs significantly better on overlapping and multiple relation extraction, and achieves state-of-the-art performance on two public datasets.", "phrases": ["extraction", "token pair", "tagging scheme", "entity pair", "tplinker"], "overall_score": 4.13352454089975, "scores": [3.0752031786306633, 2.991669221306213, 1.124758923501967, 0.5797207257504653, 0.5459111792953546], "rank_score": 1.6634526456969325} -{"id": "agirre-etal-2009-study", "title": "A Study on Similarity and Relatedness Using Distributional and WordNet-based Approaches", "abstract": "This paper presents and compares WordNet-based and distributional similarity approaches. The strengths and weaknesses of each approach regarding similarity and relatedness tasks are discussed, and a combination is presented. Each of our methods independently provide the best results in their class on the RG and WordSim353 datasets, and a supervised combination of them yields the best published results on all datasets. Finally, we pioneer cross-lingual similarity, showing that our methods are easily adapted for a cross-lingual task with minor losses.", "phrases": ["relatedness", "wordsim353 dataset", "terawords"], "overall_score": 6.47335831323371, "scores": [2.832196009472507, 1.2596380839403263, 0.8981380666915411], "rank_score": 1.6633240533681246} -{"id": "chi-etal-2020-finding", "title": "Finding Universal Grammatical Relations in Multilingual BERT", "abstract": "Recent work has found evidence that Multilingual BERT (mBERT), a transformer-based multilingual masked language model, is capable of zero-shot cross-lingual transfer, suggesting that some aspects of its representations are shared cross-lingually. To better understand this overlap, we extend recent work on finding syntactic trees in neural networks' internal representations to the multilingual setting. We show that subspaces of mBERT representations recover syntactic tree distances in languages other than English, and that these subspaces are approximately shared across languages. Motivated by these results, we present an unsupervised analysis method that provides evidence mBERT learns representations of syntactic dependency labels, in the form of clusters which largely agree with the Universal Dependencies taxonomy. This evidence suggests that even without explicit supervision, multilingual masked language models learn certain linguistic universals.", "phrases": ["multilingual bert", "tree distance", "syntactic dependency label"], "overall_score": 4.387533683009965, "scores": [3.2491258400339498, 1.2112847391754242, 0.5272040896045227], "rank_score": 1.6625382229379657} -{"id": "alsarsour-etal-2018-dart", "title": "DART: A Large Dataset of Dialectal Arabic Tweets", "abstract": "In this paper, we present a new large manually-annotated multi-dialect dataset of Arabic tweets that is publicly available. The Dialectal ARabic Tweets (DART) dataset has about 25K tweets that are annotated via crowdsourcing and it is well-balanced over five main groups of Arabic dialects: Egyptian, Maghrebi, Levantine, Gulf, and Iraqi. The paper outlines the pipeline of constructing the dataset from crawling tweets that match a list of dialect phrases to annotating the tweets by the crowd. We also touch some challenges that we face during the process. We evaluate the quality of the dataset from two perspectives: the inter-annotator agreement and the accuracy of the final labels. Results show that both measures were substantially high for the Egyptian, Gulf, and Levantine dialect groups, but lower for the Iraqi and Maghrebi dialects, which indicates the difficulty of identifying those two dialects manually and hence automatically.", "phrases": ["dialectal arabic tweets", "dialect", "dart"], "overall_score": 2.675313059616446, "scores": [2.090790384379505, 2.0174198734083, 0.8785861366819738], "rank_score": 1.6622654648232595} -{"id": "lin-etal-2020-joint", "title": "A Joint Neural Model for Information Extraction with Global Features", "abstract": "Most existing joint neural models for Information Extraction (IE) use local task-specific classifiers to predict labels for individual instances (e.g., trigger, relation) regardless of their interactions. For example, a victim of a die event is likely to be a victim of an attack event in the same sentence. In order to capture such cross-subtask and cross-instance inter-dependencies, we propose a joint neural framework, OneIE, that aims to extract the globally optimal IE result as a graph from an input sentence. OneIE performs end-to-end IE in four stages: (1) Encoding a given sentence as contextualized word representations; (2) Identifying entity mentions and event triggers as nodes; (3) Computing label scores for all nodes and their pairwise links using local classifiers; (4) Searching for the globally optimal graph with a beam decoder. At the decoding stage, we incorporate global features to capture the cross-subtask and cross-instance interactions. Experiments show that adding global features improves the performance of our model and achieves new state of-the-art on all subtasks. In addition, as OneIE does not use any language-specific feature, we prove it can be easily applied to new languages or trained in a multilingual manner.", "phrases": ["joint neural model", "information extraction", "global feature", "input sentence", "error propagation"], "overall_score": 5.653258720296146, "scores": [3.218047405830954, 2.769321485070106, 0.9250528272040298, 0.8431573451293194, 0.5551099176171163], "rank_score": 1.662137796170305} -{"id": "wu-etal-2018-study", "title": "A Study of Reinforcement Learning for Neural Machine Translation", "abstract": "Recent studies have shown that reinforcement learning (RL) is an effective approach for improving the performance of neural machine translation (NMT) system. However, due to its instability, successfully RL training is challenging, especially in real-world systems where deep models and large datasets are leveraged. In this paper, taking several large-scale translation tasks as testbeds, we conduct a systematic study on how to train better NMT models using reinforcement learning. We provide a comprehensive comparison of several important factors (e.g., baseline reward, reward shaping) in RL training. Furthermore, to fill in the gap that it remains unclear whether RL is still beneficial when monolingual data is used, we propose a new method to leverage RL to further boost the performance of NMT systems trained with source/target monolingual data. By integrating all our findings, we obtain competitive results on WMT14 English-German, WMT17 English-Chinese, and WMT17 Chinese-English translation tasks, especially setting a state-of-the-art performance on WMT17 Chinese-English translation task.", "phrases": ["reinforcement learning", "neural machine translation", "gradient estimation"], "overall_score": 4.386455256064232, "scores": [2.5030984520418054, 1.9583263335676069, 0.5249639602959926], "rank_score": 1.6621295819684683} -{"id": "quirk-poon-2017-distant", "title": "Distant Supervision for Relation Extraction beyond the Sentence Boundary", "abstract": "The growing demand for structured knowledge has led to great interest in relation extraction, especially in cases with limited supervision. However, existing distance supervision approaches only extract relations expressed in single sentences. In general, cross-sentence relation extraction is under-explored, even in the supervised-learning setting. In this paper, we propose the first approach for applying distant supervision to cross-sentence relation extraction. At the core of our approach is a graph representation that can incorporate both standard dependencies and discourse relations, thus providing a unifying way to model relations within and across sentences. We extract features from multiple paths in this graph, increasing accuracy and robustness when confronted with linguistic variation and analysis error. Experiments on an important extraction task for precision medicine show that our approach can learn an accurate cross-sentence extractor, using only a small existing knowledge base and unlabeled text from biomedical research articles. Compared to the existing distant supervision paradigm, our approach extracted twice as many relations at similar precision, thus demonstrating the prevalence of cross-sentence relations and the promise of our approach.", "phrases": ["relation extraction", "sentence boundary", "distant supervision", "biomedical domain", "document graph"], "overall_score": 5.533006727320508, "scores": [3.770804252138818, 1.9095292455500392, 0.9010899796027685, 0.8771019781944952, 0.8437961913770071], "rank_score": 1.6604643293726256} -{"id": "zhou-etal-2018-neural-document", "title": "Neural Document Summarization by Jointly Learning to Score and Select Sentences", "abstract": "Sentence scoring and sentence selection are two main steps in extractive document summarization systems. However, previous works treat them as two separated subtasks. In this paper, we present a novel end-to-end neural network framework for extractive document summarization by jointly learning to score and select sentences. It first reads the document sentences with a hierarchical encoder to obtain the representation of sentences. Then it builds the output summary by extracting sentences one by one. Different from previous methods, our approach integrates the selection strategy into the scoring model, which directly predicts the relative importance given previously selected sentences. Experiments on the CNN/Daily Mail dataset show that the proposed framework significantly outperforms the state-of-the-art extractive summarization models.", "phrases": ["document summarization", "select sentence", "neural sequence model"], "overall_score": 5.591117121676809, "scores": [3.4408624152221834, 0.9842796992160837, 0.5561105618666247], "rank_score": 1.6604175587682972} -{"id": "ramesh-etal-2022-samanantar", "title": "Samanantar: The Largest Publicly Available Parallel Corpora Collection for 11 Indic Languages", "abstract": "We present Samanantar, the largest publicly available parallel corpora collection for Indic languages. The collection contains a total of 49.7 million sentence pairs between English and 11 Indic languages (from two language families). Specifically, we compile 12.4 million sentence pairs from existing, publicly available parallel corpora, and additionally mine 37.4 million sentence pairs from the Web, resulting in a 4 increase. We mine the parallel sentences from the Web by combining many corpora, tools, and methods: (a) Web-crawled monolingual corpora, (b) document OCR for extracting sentences from scanned documents, (c) multilingual representation models for aligning sentences, and (d) approximate nearest neighbor search for searching in a large collection of sentences. Human evaluation of samples from the newly mined corpora validate the high quality of the parallel sentences across 11 languages. Further, we extract 83.4 million sentence pairs between all 55 Indic language pairs from the English-centric parallel corpus using English as the pivot language. We trained multilingual NMT models spanning all these languages on Samanantar which outperform existing models and baselines on publicly available benchmarks, such as FLORES, establishing the utility of Samanantar. Our data and models are available publicly at Samanantar and we hope they will help advance research in NMT and multilingual NLP for Indic languages.", "phrases": ["parallel corpora collection", "indic language", "samanantar"], "overall_score": 2.6723377229416303, "scores": [2.2473894742063423, 1.7844175335397803, 0.9494433448692376], "rank_score": 1.66041678420512} -{"id": "strube-muller-2003-machine", "title": "A Machine Learning Approach to Pronoun Resolution in Spoken Dialogue", "abstract": "We apply a decision tree based approach to pronoun resolution in spoken dialogue. Our system deals with pronouns with NP-and non-NP-antecedents. We present a set of features designed for pronoun resolution in spoken dialogue and determine the most promising features. We evaluate the system on twenty Switchboard dialogues and show that it compares well to Byron's (2002) manually tuned system.", "phrases": ["machine learning approach", "pronoun resolution", "spoken dialogue"], "overall_score": 4.599012588701406, "scores": [3.264072685910088, 0.9022778624872387, 0.8098789426317469], "rank_score": 1.658743163676358} -{"id": "castro-ferreira-etal-2016-individual", "title": "Individual Variation in the Choice of Referential Form", "abstract": "This study aims to measure the variation between writers in their choices of referential form by collecting and analysing a new and publicly available corpus of referring expressions. The corpus is composed of referring expressions produced by different participants in identical situations. Results, measured in terms of normalized entropy, reveal substantial individual variation. We discuss the problems and prospects of this finding for automatic text generation applications.", "phrases": ["choice", "referential form", "writer", "individual variation", "position"], "overall_score": 3.81816763459231, "scores": [3.5615484926716445, 1.7000481820416489, 0.8463994624164249, 1.1092362953083943, 1.073813240987045], "rank_score": 1.6582091346850316} -{"id": "johnson-goldwater-2009-improving", "title": "Improving nonparameteric Bayesian inference: experiments on unsupervised word segmentation with adaptor grammars", "abstract": "One of the reasons nonparametric Bayesian inference is attracting attention in computational linguistics is because it provides a principled way of learning the units of generalization together with their probabilities. Adaptor grammars are a framework for defining a variety of hierarchical nonparametric Bayesian models. This paper investigates some of the choices that arise in formulating adaptor grammars and associated inference procedures, and shows that they can have a dramatic impact on performance in an unsupervised word segmentation task. With appropriate adaptor grammars and inference procedures we achieve an 87% word token f-score on the standard Brent version of the Bernstein-Ratner corpus, which is an error reduction of over 35% over the best previously reported results for this corpus.", "phrases": ["bayesian inference", "word segmentation", "adaptor grammar"], "overall_score": 3.97545518416768, "scores": [2.6767425359001753, 1.4603915211716914, 0.8365466902882288], "rank_score": 1.6578935824533652} -{"id": "henrich-hinrichs-2010-standardizing", "title": "Standardizing Wordnets in the ISO Standard LMF: Wordnet-LMF for GermaNet", "abstract": "It has been recognized for quite some time that sustainable data formats play an important role in the development and curation of linguistic resources. The purpose of this paper is to show how GermaNet, the German version of the Princeton WordNet, can be converted to the Lexical Markup Framework (LMF), a published ISO standard (ISO-24613) for encoding lexical resources. The conversion builds on Wordnet-LMF, which has been proposed in the context of the EU KYOTO project as an LMF format for wordnets. The present paper proposes a number of crucial modifications and a set of extensions to Wordnet-LMF that are needed for conversion of wordnets in general and for conversion of Ger-maNet in particular.", "phrases": ["wordnet-lmf", "germanet", "conversion"], "overall_score": 2.6671138588378676, "scores": [2.5572596647771757, 1.8863521446591438, 0.5279012353958608], "rank_score": 1.6571710149440602} -{"id": "cohn-lapata-2007-machine", "title": "Machine Translation by Triangulation: Making Effective Use of Multi-Parallel Corpora", "abstract": "Current phrase-based SMT systems perform poorly when using small training sets. This is a consequence of unreliable translation estimates and low coverage over source and target phrases. This paper presents a method which alleviates this problem by exploiting multiple translations of the same source phrase. Central to our approach is triangulation, the process of translating from a source to a target language via an intermediate third language. This allows the use of a much wider range of parallel corpora for training, and can be combined with a standard phrase-table using conventional smoothing methods. Experimental results demonstrate BLEU improvements for triangulated models over a standard phrase-based system.", "phrases": ["triangulation", "machine translation", "pivot language", "second strategy", "source-target model"], "overall_score": 5.794030605539204, "scores": [2.8414550260750295, 2.6396011355967555, 1.7049463411369263, 0.5571740872287572, 0.5422775433851865], "rank_score": 1.6570908266845312} -{"id": "das-bandyopadhyay-2010-sentiwordnet", "title": "SentiWordNet for Indian Languages", "abstract": "The discipline where sentiment/ opinion/ emotion has been identified and classified in human written text is well known as sentiment analysis. A typical computational approach to sentiment analysis starts with prior polarity lexicons where entries are tagged with their prior out of context polarity as human beings perceive using their cognitive knowledge. Till date, all research efforts found in sentiment lexicon literature deal mostly with English texts. In this article, we propose multiple computational techniques like, WordNet based, dictionary based, corpus based or generative approaches for generating SentiWordNet(s) for Indian languages. Currently, SentiWordNet(s) are being developed for three Indian languages: Bengali, Hindi and Telugu. An online intuitive game has been developed to create and validate the developed SentiWordNet(s) by involving Internet population. A number of automatic, semi-automatic and manual validations and evaluation methodologies have been adopted to measure the coverage and credibility of the developed SentiWordNet(s).", "phrases": ["indian language", "wordnet", "bengali"], "overall_score": 3.215585903180958, "scores": [2.9379170732160866, 1.053586510636204, 0.9659492123244049], "rank_score": 1.652484265392232} -{"id": "prabhumoye-etal-2018-style", "title": "Style Transfer Through Back-Translation", "abstract": "Style transfer is the task of rephrasing the text to contain specific stylistic properties without changing the intent or affect within the context. This paper introduces a new method for automatic style transfer. We first learn a latent representation of the input sentence which is grounded in a language translation model in order to better preserve the meaning of the sentence while reducing stylistic properties. Then adversarial generation techniques are used to make the output match the desired style. We evaluate this technique on three different style transformations: sentiment, gender and political slant. Compared to two state-of-the-art style transfer modeling techniques we show improvements both in automatic evaluation of style transfer and in manual evaluation of meaning preservation and fluency.", "phrases": ["stylistic property", "latent representation", "fluency", "style transfer", "sentiment transfer"], "overall_score": 6.430295193574602, "scores": [4.058809708548722, 1.446609330974106, 1.1174302579015, 1.0648092605922932, 0.5736365442987882], "rank_score": 1.6522590204630816} -{"id": "hassan-mihalcea-2009-cross", "title": "Cross-lingual Semantic Relatedness Using Encyclopedic Knowledge", "abstract": "In this paper, we address the task of crosslingual semantic relatedness. We introduce a method that relies on the information extracted from Wikipedia, by exploiting the interlanguage links available between Wikipedia versions in multiple languages. Through experiments performed on several language pairs, we show that the method performs well, with a performance comparable to monolingual measures of relatedness.", "phrases": ["semantic relatedness", "wikipedia", "different language"], "overall_score": 3.6303665940768215, "scores": [3.1921418515393776, 0.9244054782596737, 0.8402057916475363], "rank_score": 1.6522510404821957} -{"id": "pitler-nenkova-2009-using", "title": "Using Syntax to Disambiguate Explicit Discourse Connectives in Text", "abstract": "Discourse connectives are words or phrases such as once, since, and on the contrary that explicitly signal the presence of a discourse relation. There are two types of ambiguity that need to be resolved during discourse processing. First, a word can be ambiguous between discourse or non-discourse usage. For example, once can be either a temporal discourse connective or a simply a word meaning \"formerly\". Secondly, some connectives are ambiguous in terms of the relation they mark. For example since can serve as either a temporal or causal connective. We demonstrate that syntactic features improve performance in both disambiguation tasks. We report state-of-the-art results for identifying discourse vs. non-discourse usage and human-level performance on sense disambiguation.", "phrases": ["disambiguation", "discourse connective", "syntactic feature"], "overall_score": 6.048560690609683, "scores": [2.0575390879564863, 2.0031397950502408, 0.8923378325708529], "rank_score": 1.6510055718591932} -{"id": "gao-etal-2019-jointly", "title": "Jointly Optimizing Diversity and Relevance in Neural Response Generation", "abstract": "Although recent neural conversation models have shown great potential, they often generate bland and generic responses. While various approaches have been explored to diversify the output of the conversation model, the improvement often comes at the cost of decreased relevance. In this paper, we propose a SpaceFusion model to jointly optimize diversity and relevance that essentially fuses the latent space of a sequence-to-sequence model and that of an autoencoder model by leveraging novel regularization terms. As a result, our approach induces a latent space in which the distance and direction from the predicted response vector roughly match the relevance and diversity, respectively. This property also lends itself well to an intuitive visualization of the latent space. Both automatic and human evaluation results demonstrate that the proposed approach brings significant improvement compared to strong baselines in both diversity and relevance.", "phrases": ["relevance", "latent space", "direction"], "overall_score": 4.355608815626715, "scores": [2.8741630535221523, 1.2451300182323515, 0.8320303800877206], "rank_score": 1.6504411506140748} -{"id": "stoyanov-etal-2010-coreference", "title": "Coreference Resolution with Reconcile", "abstract": "Despite the existence of several noun phrase coreference resolution data sets as well as several formal evaluations on the task, it remains frustratingly difficult to compare results across different coreference resolution systems. This is due to the high cost of implementing a complete end-to-end coreference resolution system, which often forces researchers to substitute available gold-standard information in lieu of implementing a module that would compute that information. Unfortunately, this leads to inconsistent and often unrealistic evaluation scenarios. \n \nWith the aim to facilitate consistent and realistic experimental evaluations in coreference resolution, we present Reconcile, an infrastructure for the development of learning-based noun phrase (NP) coreference resolution systems. Reconcile is designed to facilitate the rapid creation of coreference resolution systems, easy implementation of new feature sets and approaches to coreference resolution, and empirical evaluation of coreference resolvers across a variety of benchmark data sets and standard scoring metrics. We describe Reconcile and present experimental results showing that Reconcile can be used to create a coreference resolver that achieves performance comparable to state-of-the-art systems on six benchmark data sets.", "phrases": ["reconcile", "coreference resolution", "mention-pair model"], "overall_score": 3.953071849293286, "scores": [2.5666867086366243, 1.8168249018949607, 0.562165409816355], "rank_score": 1.6485590067826468} -{"id": "pilehvar-etal-2013-align", "title": "Align, Disambiguate and Walk: A Unified Approach for Measuring Semantic Similarity", "abstract": "Semantic similarity is an essential component of many Natural Language Processing applications. However, prior methods for computing semantic similarity often operate at different levels, e.g., single words or entire documents, which requires adapting the method for each data type. We present a unified approach to semantic similarity that operates at multiple levels, all the way from comparing word senses to comparing text documents. Our method leverages a common probabilistic representation over word senses in order to compare different types of linguistic data. This unified representation shows state-ofthe-art performance on three tasks: semantic textual similarity, word similarity, and word sense coarsening.", "phrases": ["disambiguate", "walk", "semantic similarity", "text document"], "overall_score": 4.347960254685548, "scores": [3.5182627849816774, 1.400990214073502, 1.1403596366514264, 0.5305590985609879], "rank_score": 1.6475429335668983} -{"id": "minkov-etal-2005-extracting", "title": "Extracting Personal Names from Email: Applying Named Entity Recognition to Informal Text", "abstract": "There has been little prior work on Named Entity Recognition for \"informal\" documents like email. We present two methods for improving performance of person name recognizers for email: email-specific structural features and a recall-enhancing method which exploits name repetition across multiple documents.", "phrases": ["personal name", "email", "entity recognition", "informal text"], "overall_score": 4.091377791885545, "scores": [2.8170047720189078, 1.8312515903136843, 1.3896095374916546, 0.5481002848364139], "rank_score": 1.6464915461651652} -{"id": "rothe-etal-2020-leveraging", "title": "Leveraging Pre-trained Checkpoints for Sequence Generation Tasks", "abstract": "Unsupervised pre-training of large neural models has recently revolutionized Natural Language Processing. By warm-starting from the publicly released checkpoints, NLP practitioners have pushed the state-of-the-art on multiple benchmarks while saving significant amounts of compute time. So far the focus has been mainly on the Natural Language Understanding tasks. In this paper, we demonstrate the efficacy of pre-trained checkpoints for Sequence Generation. We developed a Transformer-based sequence-to-sequence model that is compatible with publicly available pre-trained BERT, GPT-2, and RoBERTa checkpoints and conducted an extensive empirical study on the utility of initializing our model, both encoder and decoder, with these checkpoints. Our models result in new state-of-the-art results on Machine Translation, Text Summarization, Sentence Splitting, and Sentence Fusion.", "phrases": ["pre-trained checkpoint", "checkpoint", "state-of-the-art result"], "overall_score": 5.542947844579174, "scores": [2.0968195345289384, 2.004695465335506, 0.8368225782165162], "rank_score": 1.6461125260269869} -{"id": "hough-schlangen-2017-joint", "title": "Joint, Incremental Disfluency Detection and Utterance Segmentation from Speech", "abstract": "We present the joint task of incremental disfluency detection and utterance segmentation and a simple deep learning system which performs it on transcripts and ASR results. We show how the constraints of the two tasks interact. Our joint-task system outperforms the equivalent individual task systems, provides competitive results and is suitable for future use in conversation agents in the psychiatric domain.", "phrases": ["incremental disfluency detection", "utterance segmentation", "joint task"], "overall_score": 2.2792974646946753, "scores": [2.5095992518507098, 1.877627663215476, 0.545269808472507], "rank_score": 1.6441655745128976} -{"id": "castilho-etal-2020-context", "title": "On Context Span Needed for Machine Translation Evaluation", "abstract": "Despite increasing efforts to improve evaluation of machine translation (MT) by going beyond the sentence level to the document level, the definition of what exactly constitutes a \u201cdocument level\u201d is still not clear. This work deals with the context span necessary for a more reliable MT evaluation. We report results from a series of surveys involving three domains and 18 target languages designed to identify the necessary context span as well as issues related to it. Our findings indicate that, despite the fact that some issues and spans are strongly dependent on domain and on the target language, a number of common patterns can be observed so that general guidelines for context-aware MT evaluation can be drawn.", "phrases": ["context span", "native speaker", "document-level evaluation"], "overall_score": 3.783699429710932, "scores": [3.3005238448022554, 1.0767755941653379, 0.552419911544222], "rank_score": 1.6432397835039385} -{"id": "kryscinski-etal-2020-evaluating", "title": "Evaluating the Factual Consistency of Abstractive Text Summarization", "abstract": "The most common metrics for assessing summarization algorithms do not account for whether summaries are factually consistent with source documents. We propose a weakly-supervised, model-based approach for verifying factual consistency and identifying conflicts between source documents and generated summaries. Training data is generated by applying a series of rule-based transformations to the sentences of source documents. The factual consistency model is then trained jointly for three tasks: 1) predict whether each summary sentence is factually consistent or not, 2) in either case, extract a span in the source document to support this consistency prediction, 3) for each summary sentence that is deemed inconsistent, extract the inconsistent span from it. Transferring this model to summaries generated by several neural models reveals that this highly scalable approach outperforms previous models, including those trained with strong supervision using datasets from related domains, such as natural language inference and fact checking. Additionally, human evaluation shows that the auxiliary span extraction tasks provide useful assistance in the process of verifying factual consistency. We also release a manually annotated dataset for factual consistency verification, code for training data generation, and trained model weights at .", "phrases": ["factual consistency", "series", "natural language inference", "input document"], "overall_score": 6.780197231261249, "scores": [4.152868803171951, 1.045444356791607, 0.8491030156497732, 0.5239203486633508], "rank_score": 1.6428341310691705} -{"id": "chatterjee-etal-2018-findings", "title": "Findings of the WMT 2018 Shared Task on Automatic Post-Editing", "abstract": "We present the results from the fourth round of the WMT shared task on MT Automatic Post-Editing. The task consists in automatically correcting the output of a \u201cblack-box\u201d machine translation system by learning from human corrections. Keeping the same general evaluation setting of the three previous rounds, this year we focused on one language pair (English-German) and on domain-specific data (Information Technology), with MT outputs produced by two different paradigms: phrase-based (PBSMT) and neural (NMT). Five teams submitted respectively 11 runs for the PBSMT subtask and 10 runs for the NMT subtask. In the former subtask, characterized by original translations of lower quality, top results achieved impressive improvements, up to -6.24 TER and +9.53 BLEU points over the baseline \u201cdo-nothing\u201d system. The NMT subtask proved to be more challenging due to the higher quality of the original translations and the availability of less training data. In this case, top results show smaller improvements up to -0.38 TER and +0.8 BLEU points.", "phrases": ["automatic post-editing", "machine translation", "ape system"], "overall_score": 5.0763474218199205, "scores": [3.0097174335413195, 1.3904730985474494, 0.5266399774417321], "rank_score": 1.642276836510167} -{"id": "wang-etal-2019-tree", "title": "Tree Transformer: Integrating Tree Structures into Self-Attention", "abstract": "Pre-training Transformer from large-scale raw texts and fine-tuning on the desired task have achieved state-of-the-art results on diverse NLP tasks. However, it is unclear what the learned attention captures. The attention computed by attention heads seems not to match human intuitions about hierarchical structures. This paper proposes Tree Transformer, which adds an extra constraint to attention heads of the bidirectional Transformer encoder in order to encourage the attention heads to follow tree structures. The tree structures can be automatically induced from raw texts by our proposed \u201cConstituent Attention\u201d module, which is simply implemented by self-attention between two adjacent words. With the same training procedure identical to BERT, the experiments demonstrate the effectiveness of Tree Transformer in terms of inducing tree structures, better language modeling, and further learning more explainable attention scores.", "phrases": ["self-attention", "human intuition", "tree transformer", "locality constraint", "low layer"], "overall_score": 4.834603984379787, "scores": [3.2759165550973233, 2.9585262095787392, 0.8574732373353444, 0.5709434396165334, 0.5468606758330179], "rank_score": 1.6419440234921918} -{"id": "chen-manning-2014-fast", "title": "A Fast and Accurate Dependency Parser using Neural Networks", "abstract": "Almost all current dependency parsers classify based on millions of sparse indicator features. Not only do these features generalize poorly, but the cost of feature computation restricts parsing speed significantly. In this work, we propose a novel way of learning a neural network classifier for use in a greedy, transition-based dependency parser. Because this classifier learns and uses just a small number of dense features, it can work very fast, while achieving an about 2% improvement in unlabeled and labeled attachment scores on both English and Chinese datasets. Concretely, our parser is able to parse more than 1000 sentences per second at 92.2% unlabeled attachment score on the English Penn Treebank.", "phrases": ["dependency parser", "word embedding", "feed-forward neural network", "deep learning"], "overall_score": 7.731874779697924, "scores": [2.6952908182037367, 1.9471126301911876, 1.3881357353196335, 0.536463849873292], "rank_score": 1.6417507583969626} -{"id": "zhou-etal-2014-simple", "title": "A Simple Bayesian Modelling Approach to Event Extraction from Twitter", "abstract": "With the proliferation of social media sites, social streams have proven to contain the most up-to-date information on current events. Therefore, it is crucial to extract events from the social streams such as tweets. However, it is not straightforward to adapt the existing event extraction systems since texts in social media are fragmented and noisy. In this paper we propose a simple and yet effective Bayesian model, called Latent Event Model (LEM), to extract structured representation of events from social media. LEM is fully unsupervised and does not require annotated data for training. We evaluate LEM on a Twitter corpus. Experimental results show that the proposed model achieves 83% in F-measure, and outperforms the state-of-the-art baseline by over 7%.", "phrases": ["bayesian model", "event extraction", "twitter"], "overall_score": 2.941153162111936, "scores": [1.9861074871168731, 1.7280168512145293, 1.2103421639370215], "rank_score": 1.6414888340894747} -{"id": "ismail-manandhar-2010-bilingual", "title": "Bilingual lexicon extraction from comparable corpora using in-domain terms", "abstract": "Many existing methods for bilingual lexicon learning from comparable corpora are based on similarity of context vectors. These methods suffer from noisy vectors that greatly affect their accuracy. We introduce a method for filtering this noise allowing highly accurate learning of bilingual lexicons. Our method is based on the notion of in-domain terms which can be thought of as the most important contextually relevant words. We provide a method for identifying such terms. Our evaluation shows that the proposed method can learn highly accurate bilingual lexicons without using orthographic features or a large initial seed dictionary. In addition, we also introduce a method for measuring the similarity between two words in different languages without requiring any initial dictionary.", "phrases": ["comparable corpora", "in-domain term", "context vector"], "overall_score": 3.4124729267634875, "scores": [2.310149121343352, 1.7880762968898523, 0.824932350376327], "rank_score": 1.6410525895365102} -{"id": "zhang-etal-2017-dependency", "title": "Dependency Parsing as Head Selection", "abstract": "Conventional graph-based dependency parsers guarantee a tree structure both during training and inference. Instead, we formalize dependency parsing as the problem of independently selecting the head of each word in a sentence. Our model which we call DENSE (as shorthand for Dependency Neural Selection) produces a distribution over possible heads for each word using features obtained from a bidirectional recurrent neural network. Without enforcing structural constraints during training, DeNSe generates (at inference time) trees for the overwhelming majority of sentences, while non-tree outputs can be adjusted with a maximum spanning tree algorithm. We evaluate DeNSe on four languages (English, Chinese, Czech, and German) with varying degrees of non-projectivity. Despite the simplicity of the approach, our parsers are on par with the state of the art.", "phrases": ["head selection", "czech", "simplicity", "dependency parsing"], "overall_score": 4.826473243413853, "scores": [3.3799673453307797, 2.065617080379772, 0.5606042891057056, 0.5505418237533815], "rank_score": 1.6391826346424097} -{"id": "icard-iii-moss-2014-recent", "title": "Recent Progress on Monotonicity", "abstract": "This paper serves two purposes. It is a summary of much work concerning formal treatments of monotonicity and polarity in natural language, and it also discusses connections to related work on exclusion relations, and connections to psycholinguistics and computational linguistics. The second part of the paper presents a summary of some new work on a formal Monotonicity Calculus.", "phrases": ["monotonicity", "precise semantic", "pervasive feature"], "overall_score": 3.6010936104066658, "scores": [3.7874525648177286, 0.5861927704645035, 0.5431396591388815], "rank_score": 1.6389283314737044} -{"id": "potthast-etal-2010-evaluation", "title": "An Evaluation Framework for Plagiarism Detection", "abstract": "We present an evaluation framework for plagiarism detection. The framework provides performance measures that address the specifics of plagiarism detection, and the PAN-PC-10 corpus, which contains 64 558 artificial and 4 000 simulated plagiarism cases, the latter generated via Amazon's Mechanical Turk. We discuss the construction principles behind the measures and the corpus, and we compare the quality of our corpus to existing corpora. Our analysis gives empirical evidence that the construction of tailored training corpora for plagiarism detection can be automated, and hence be done on a large scale.", "phrases": ["evaluation framework", "plagiarism detection", "source text"], "overall_score": 3.188818487883988, "scores": [2.690036315321577, 1.6847000253575026, 0.541449264445709], "rank_score": 1.6387285350415963} -{"id": "melamud-etal-2015-simple", "title": "A Simple Word Embedding Model for Lexical Substitution", "abstract": "The lexical substitution task requires identifying meaning-preserving substitutes for a target word instance in a given sentential context. Since its introduction in SemEval-2007, various models addressed this challenge, mostly in an unsupervised setting. In this work we propose a simple model for lexical substitution, which is based on the popular skip-gram word embedding model. The novelty of our approach is in leveraging explicitly the context embeddings generated within the skip-gram model, which were so far considered only as an internal component of the learning process. Our model is efficient, very simple to implement, and at the same time achieves state-ofthe-art results on lexical substitution tasks in an unsupervised setting.", "phrases": ["lexical substitution", "substitute", "simple model"], "overall_score": 4.437633841641062, "scores": [3.3118965272919314, 1.0632735071636092, 0.5408767653802491], "rank_score": 1.6386822666119298} -{"id": "hardmeier-federico-2010-modelling", "title": "Modelling pronominal anaphora in statistical machine translation", "abstract": "Current Statistical Machine Translation (SMT) systems translate texts sentence by sentence without considering any cross-sentential context. Assuming independence between sentences makes it difficult to take certain translation decisions when the necessary information cannot be determined locally. We argue for the necessity to include crosssentence dependencies in SMT. As a case in point, we study the problem of pronominal anaphora translation by manually evaluating German-English SMT output. We then present a word dependency model for SMT, which can represent links between word pairs in the same or in different sentences. We use this model to integrate the output of a coreference resolution system into English-German SMT with a view to improving the translation of anaphoric pronouns.", "phrases": ["machine translation", "pronoun", "anaphora resolution"], "overall_score": 4.987484597926505, "scores": [2.6464087103468197, 1.6926674177768957, 0.5754725736319078], "rank_score": 1.6381829005852078} -{"id": "carreras-marquez-2004-introduction", "title": "Introduction to the CoNLL-2004 Shared Task: Semantic Role Labeling", "abstract": "In this paper we describe the CoNLL-2004 shared task: semantic role labeling. We introduce the specification and goal of the task, describe the data sets and evaluation methods, and present a general overview of the systems that have contributed to the task, providing comparative description.", "phrases": ["conll-2004", "semantic role labeling", "srl", "propbank", "much attention"], "overall_score": 6.271895642971186, "scores": [4.267560961324917, 0.8766801266151506, 1.926630836522783, 0.5603343382582123, 0.5595525384591434], "rank_score": 1.6381517602360414} -{"id": "card-etal-2015-media", "title": "The Media Frames Corpus: Annotations of Frames Across Issues", "abstract": "We describe the first version of the Media Frames Corpus: several thousand news articles on three policy issues, annotated in terms of media framing. We motivate framing as a phenomenon of study for computational linguistics and describe our annotation process.", "phrases": ["media frames corpus", "framing", "frame", "congressional speech", "several previous work"], "overall_score": 5.335842830363811, "scores": [3.1898982562414724, 1.8471759920756956, 1.2509989181764665, 1.0593771918092472, 0.8411388502969113], "rank_score": 1.6377178417199587} -{"id": "liu-avci-2019-incorporating", "title": "Incorporating Priors with Feature Attribution on Text Classification", "abstract": "Feature attribution methods, proposed recently, help users interpret the predictions of complex models. Our approach integrates feature attributions into the objective function to allow machine learning practitioners to incorporate priors in model building. To demonstrate the effectiveness our technique, we apply it to two tasks: (1) mitigating unintended bias in text classifiers by neutralizing identity terms; (2) improving classifier performance in scarce data setting by forcing model to focus on toxic terms. Our approach adds an L2 distance loss between feature attributions and task-specific prior values to the objective. Our experiments show that i) a classifier trained with our technique reduces undesired model biases without a tradeoff on the original task; ii) incorporating prior helps model performance in scarce data settings.", "phrases": ["prior", "feature attribution", "objective function"], "overall_score": 3.926980512204557, "scores": [2.371714605747587, 1.977999995349873, 0.5633196211457736], "rank_score": 1.6376780740810781} -{"id": "xu-etal-2020-bert", "title": "BERT-of-Theseus: Compressing BERT by Progressive Module Replacing", "abstract": "In this paper, we propose a novel model compression approach to effectively compress BERT by progressive module replacing. Our approach first divides the original BERT into several modules and builds their compact substitutes. Then, we randomly replace the original modules with their substitutes to train the compact modules to mimic the behavior of the original modules. We progressively increase the probability of replacement through the training. In this way, our approach brings a deeper level of interaction between the original and compact models. Compared to the previous knowledge distillation approaches for BERT compression, our approach does not introduce any additional loss function. Our approach outperforms existing knowledge distillation approaches on GLUE benchmark, showing a new perspective of model compression.", "phrases": ["bert", "progressive module", "model compression"], "overall_score": 4.068711606520094, "scores": [2.259970323987776, 1.8091274463377969, 0.8430122361415298], "rank_score": 1.637370002155701} -{"id": "riezler-etal-2007-statistical", "title": "Statistical Machine Translation for Query Expansion in Answer Retrieval", "abstract": "We present an approach to query expansion in answer retrieval that uses Statistical Machine Translation (SMT) techniques to bridge the lexical gap between questions and answers. SMT-based query expansion is done by i) using a full-sentence paraphraser to introduce synonyms in context of the entire query, and ii) by translating query terms into answer terms using a full-sentence SMT model trained on question-answer pairs. We evaluate these global, context-aware query expansion techniques on tfidf retrieval from 10 million question-answer pairs extracted from FAQ pages. Experimental results show that SMTbased expansion improves retrieval performance over local expansion and over retrieval without expansion.", "phrases": ["query expansion", "lexical gap", "question-answer pair", "statistical machine translation", "paraphrase generation"], "overall_score": 5.395587209788867, "scores": [3.81461494107355, 2.3965999919850747, 0.8898593239120809, 0.5434850094808156, 0.5408992819416025], "rank_score": 1.6370917096786246} -{"id": "quirk-etal-2004-monolingual", "title": "Monolingual Machine Translation for Paraphrase Generation", "abstract": "We apply statistical machine translation (SMT) tools to generate novel paraphrases of input sentences in the same language. The system is trained on large volumes of sentence pairs automatically extracted from clustered news articles available on the World Wide Web. Alignment Error Rate (AER) is measured to gauge the quality of the resulting corpus. A monotone phrasal decoder generates contextual replacements. Human evaluation shows that this system outperforms baseline paraphrase generation techniques and, in a departure from previous work, offers better coverage and scalability than the current best-of-breed paraphrasing approaches.", "phrases": ["paraphrase generation", "news article", "monolingual machine translation", "smt technique"], "overall_score": 6.331893916536476, "scores": [3.3318668156825395, 1.458607387831747, 1.228643972368642, 0.5234446088395238], "rank_score": 1.6356406961806131} -{"id": "nikolaus-etal-2019-compositional", "title": "Compositional Generalization in Image Captioning", "abstract": "Image captioning models are usually evaluated on their ability to describe a held-out set of images, not on their ability to generalize to unseen concepts. We study the problem of compositional generalization, which measures how well a model composes unseen combinations of concepts when describing images. State-of-the-art image captioning models show poor generalization performance on this task. We propose a multi-task model to address the poor performance, that combines caption generation and image\u2013sentence ranking, and uses a decoding mechanism that re-ranks the captions according their similarity to the image. This model is substantially better at generalizing to unseen combinations of concepts compared to state-of-the-art captioning models.", "phrases": ["image captioning", "unseen combination", "compositional generalization"], "overall_score": 3.180617510454375, "scores": [2.877885054124115, 1.504502656246695, 0.5211544886333097], "rank_score": 1.6345140663347066} -{"id": "hasan-etal-2019-ur", "title": "UR-FUNNY: A Multimodal Language Dataset for Understanding Humor", "abstract": "Humor is a unique and creative communicative behavior often displayed during social interactions. It is produced in a multimodal manner, through the usage of words (text), gestures (visual) and prosodic cues (acoustic). Understanding humor from these three modalities falls within boundaries of multimodal language; a recent research trend in natural language processing that models natural language as it happens in face-to-face communication. Although humor detection is an established research area in NLP, in a multimodal context it has been understudied. This paper presents a diverse multimodal dataset, called UR-FUNNY, to open the door to understanding multimodal language used in expressing humor. The dataset and accompanying studies, present a framework in multimodal humor detection for the natural language processing community. UR-FUNNY is publicly available for research.", "phrases": ["multimodal language", "humor", "ted talk", "ur-funny dataset"], "overall_score": 4.3134729411083494, "scores": [3.6840497698774866, 1.201124605656055, 1.0850708606271386, 0.5676543277524698], "rank_score": 1.6344748909782874} -{"id": "tseng-etal-2005-morphological", "title": "Morphological features help POS tagging of unknown words across language varieties", "abstract": "Part-of-speech tagging, like any supervised statistical NLP task, is more difficult when test sets are very different from training sets, for example when tagging across genres or language varieties. We examined the problem of POS tagging of different varieties of Mandarin Chinese (PRC-Mainland, PRCHong Kong, and Taiwan). An analytic study first showed that unknown words were a major source of difficulty in cross-variety tagging. Unknown words in English tend to be proper nouns. By contrast, we found that Mandarin unknown words were mostly common nouns and verbs. We showed these results are caused by the high frequency of morphological compounding in Mandarin; in this sense Mandarin is more like German than English. Based on this analysis, we propose a variety of new morphological unknown-word features for POS tagging, extending earlier work by others on unknown-word tagging in English and German. Our features were implemented in a maximum entropy Markov model. Our system achieves state-of-the-art performance in Mandarin tagging, including improving unknown-word tagging performance on unseen varieties in Chinese Treebank 5.0 from 61% to 80% correct.", "phrases": ["pos tagging", "unknown word", "morphological feature"], "overall_score": 4.191206423151193, "scores": [1.8646755073721348, 1.646540281466317, 1.3908768130589055], "rank_score": 1.634030867299119} -{"id": "nguyen-etal-2009-convolution", "title": "Convolution Kernels on Constituent, Dependency and Sequential Structures for Relation Extraction", "abstract": "This paper explores the use of innovative kernels based on syntactic and semantic structures for a target relation extraction task. Syntax is derived from constituent and dependency parse trees whereas semantics concerns to entity types and lexical sequences. We investigate the effectiveness of such representations in the automated relation extraction from texts. We process the above data by means of Support Vector Machines along with the syntactic tree, the partial tree and the word sequence kernels. Our study on the ACE 2004 corpus illustrates that the combination of the above kernels achieves high effectiveness and significantly improves the current state-of-the-art.", "phrases": ["constituent", "relation extraction", "tree kernel"], "overall_score": 3.759910354733896, "scores": [2.3331917026219178, 1.7298340281869142, 0.8356992277266553], "rank_score": 1.632908319511829} -{"id": "wang-etal-2019-persuasion", "title": "Persuasion for Good: Towards a Personalized Persuasive Dialogue System for Social Good", "abstract": "Developing intelligent persuasive conversational agents to change people's opinions and actions for social good is the frontier in advancing the ethical development of automated dialogue systems. To do so, the first step is to understand the intricate organization of strategic disclosures and appeals employed in human persuasion conversations. We designed an online persuasion task where one participant was asked to persuade the other to donate to a specific charity. We collected a large dataset with 1,017 dialogues and annotated emerging persuasion strategies from a subset. Based on the annotation, we built a baseline classifier with context information and sentence-level features to predict the 10 persuasion strategies used in the corpus. Furthermore, to develop an understanding of personalized persuasion processes, we analyzed the relationships between individuals' demographic and psychological backgrounds including personality, morality, value systems, and their willingness for donation. Then, we analyzed which types of persuasion strategies led to a greater amount of donation depending on the individuals' personal backgrounds. This work lays the ground for developing a personalized persuasive dialogue system.", "phrases": ["social good", "persuasion", "negotiation"], "overall_score": 4.526867136237441, "scores": [2.6689910150072547, 1.7080097935810976, 0.5211657675713822], "rank_score": 1.6327221920532449} -{"id": "maharjan-etal-2018-letting", "title": "Letting Emotions Flow: Success Prediction by Modeling the Flow of Emotions in Books", "abstract": "Books have the power to make us feel happiness, sadness, pain, surprise, or sorrow. An author's dexterity in the use of these emotions captivates readers and makes it difficult for them to put the book down. In this paper, we model the flow of emotions over a book using recurrent neural networks and quantify its usefulness in predicting success in books. We obtained the best weighted F1-score of 69% for predicting books' success in a multitask setting (simultaneously predicting success and genre of books).", "phrases": ["emotion", "success prediction", "book"], "overall_score": 2.6272569031606823, "scores": [2.4376535730221236, 1.336705819281963, 1.1228600956858066], "rank_score": 1.6324064959966311} -{"id": "lebanoff-etal-2018-adapting", "title": "Adapting the Neural Encoder-Decoder Framework from Single to Multi-Document Summarization", "abstract": "Generating a text abstract from a set of documents remains a challenging task. The neural encoder-decoder framework has recently been exploited to summarize single documents, but its success can in part be attributed to the availability of large parallel data automatically acquired from the Web. In contrast, parallel data for multi-document summarization are scarce and costly to obtain. There is a pressing need to adapt an encoder-decoder model trained on single-document summarization data to work with multiple-document input. In this paper, we present an initial investigation into a novel adaptation method. It exploits the maximal marginal relevance method to select representative sentences from multi-document input, and leverages an abstractive encoder-decoder model to fuse disparate sentences to an abstractive summary. The adaptation method is robust and itself requires no training data. Our system compares favorably to state-of-the-art extractive and abstractive approaches judged by automatic metrics and human assessors.", "phrases": ["neural encoder-decoder framework", "summarization", "multi-document input"], "overall_score": 4.806405330902129, "scores": [3.481295301157711, 0.8430740968228426, 0.5727319156246661], "rank_score": 1.6323671045350732} -{"id": "beisswenger-etal-2016-empirist", "title": "EmpiriST 2015: A Shared Task on the Automatic Linguistic Annotation of Computer-Mediated Communication and Web Corpora", "abstract": "This paper describes the goals, design and results of a shared task on the automatic linguistic annotation of German language data from genres of computer-mediated communication (CMC), social media interactions and Web corpora. The two subtasks of tokenization and part-of-speech tagging were performed on two data sets: (i) a genuine CMC data set with samples from several CMC genres, and (ii) a Web corpora data set of CC-licensed Web pages which represents the type of data found in large corpora crawled from the Web. The teams participating in the shared task achieved a substantial improvement over current off-the-shelf tools for German. The best tokenizer reached an F1score of 99.57% (vs. 98.95% off-the-shelf baseline), while the best tagger reached an accuracy of 90.44% (vs. 84.86% baseline). The gold standard (more than 20,000 tokens of training and test data) is freely available online together with detailed annotation guidelines. 1 Motivation, premises and goals Over the past decade, there has been a growing interest in collecting, processing and analyzing data from genres of computer-mediated communication and social media interactions (henceforth referred to as CMC) such as chats, blogs, forums, tweets, newsgroups, messaging applications (SMS, WhatsApp), interactions on \u201csocial network\u201d sites and on wiki talk pages. The development of resources, tools and best practices for automatic linguistic processing and annotation of CMC discourse has turned out to be a desideratum for several fields of research in the humanities: 1. Large corpora crawled from the Web often contain substantial amounts of CMC (blogs, forums, etc.) and similar forms of noncanonical language. Such data are often regarded as \u201cbycatch\u201d that proves difficult for linguistic annotation by means of standard natural language processing (NLP) tools that are optimized for edited text (Giesbrecht and Evert, 2009). 2. For corpus-based variational linguistics, corpora of CMC discourse are an important resource that closes the \u201cCMC gap\u201d in corpora of contemporary written language and language-in-interaction. With a considerable part of contemporary everyday communication being mediated through CMC technologies, up-to-date investigations of language change and linguistic variation need to be able to include CMC discourse in their empirical analyses. In order to harness the full potential of corpusbased research, the preparation of any type of linguistic corpus which includes CMC discourse\u2014 whether a genuine CMC corpus or a broadcoverage Web corpus\u2014faces the challenge of handling and annotating the linguistic peculiarities characteristic for the types of written discourse found in CMC genres. Two fundamental (but nontrivial) tasks are (i) accurate tokenization and (ii) sufficiently reliable part-of-speech (PoS) annotation. Together, they provide a layer of basic linguistic information on the token level that is a pre-", "phrases": ["automatic linguistic annotation", "computer-mediated communication", "german", "tokenization", "empirist"], "overall_score": 3.586006875274348, "scores": [2.4704876955761144, 2.2510838428599778, 1.8377206993767043, 1.0633505179349174, 0.5376675563228968], "rank_score": 1.632062062414122} -{"id": "uzzaman-etal-2013-semeval", "title": "SemEval-2013 Task 1: TempEval-3: Evaluating Time Expressions, Events, and Temporal Relations", "abstract": "Within the SemEval-2013 evaluation exercise, the TempEval-3 shared task aims to advance research on temporal information processing. It follows on from TempEval-1 and -2, with: a three-part structure covering temporal expression, event, and temporal relation extraction; a larger dataset; and new single measures to rank systems \u2010 in each task and in general. In this paper, we describe the participants\u2019 approaches, results, and the observations from the results, which may guide future research in this area.", "phrases": ["tempeval-3", "temporal information extraction", "news article", "relation identification"], "overall_score": 6.597006296858001, "scores": [3.525758128213337, 1.6119263506128219, 0.8530304532673999, 0.5360450418321165], "rank_score": 1.631689993481419} -{"id": "moghe-etal-2018-towards", "title": "Towards Exploiting Background Knowledge for Building Conversation Systems", "abstract": "Existing dialog datasets contain a sequence of utterances and responses without any explicit background knowledge associated with them. This has resulted in the development of models which treat conversation as a sequence-to-sequence generation task (i.e., given a sequence of utterances generate the response sequence). This is not only an overly simplistic view of conversation but it is also emphatically different from the way humans converse by heavily relying on their background knowledge about the topic (as opposed to simply relying on the previous sequence of utterances). For example, it is common for humans to (involuntarily) produce utterances which are copied or suitably modified from background articles they have read about the topic. To facilitate the development of such natural conversation models which mimic the human process of conversing, we create a new dataset containing movie chats wherein each response is explicitly generated by copying and/or modifying sentences from unstructured background knowledge such as plots, comments and reviews about the movie. We establish baseline results on this dataset (90K utterances from 9K conversations) using three different models: (i) pure generation based models which ignore the background knowledge (ii) generation based models which learn to copy information from the background knowledge when required and (iii) span prediction based models which predict the appropriate response span in the background knowledge.", "phrases": ["background knowledge", "conversation", "dialog dataset"], "overall_score": 4.967578499523801, "scores": [2.792872452526529, 1.5440861135839015, 0.5579751397206532], "rank_score": 1.6316445686103613} -{"id": "liu-etal-2021-plome", "title": "PLOME: Pre-training with Misspelled Knowledge for Chinese Spelling Correction", "abstract": "Chinese spelling correction (CSC) is a task to detect and correct spelling errors in texts. CSC is essentially a linguistic problem, thus the ability of language understanding is crucial to this task. In this paper, we propose a Pre-trained masked Language model with Misspelled knowledgE (PLOME) for CSC, which jointly learns how to understand language and correct spelling errors. To this end, PLOME masks the chosen tokens with similar characters according to a confusion set rather than the fixed token \u201c[MASK]\u201d as in BERT. Besides character prediction, PLOME also introduces pronunciation prediction to learn the misspelled knowledge on phonic level. Moreover, phonological and visual similarity knowledge is important to this task. PLOME utilizes GRU networks to model such knowledge based on characters' phonics and strokes. Experiments are conducted on widely used benchmarks. Our method achieves superior performance against state-of-the-art approaches by a remarkable margin. We release the source code and pre-trained model for further use by the community ().", "phrases": ["misspelled knowledge", "chinese spelling correction", "plome"], "overall_score": 3.3924148587804894, "scores": [2.09417621417041, 1.9533416826792835, 0.8467021965509531], "rank_score": 1.6314066978002153} -{"id": "barman-etal-2014-code", "title": "Code Mixing: A Challenge for Language Identification in the Language of Social Media", "abstract": "In social media communication, multilingual speakers often switch between languages, and, in such an environment, automatic language identification becomes both a necessary and challenging task. In this paper, we describe our work in progress on the problem of automatic language identification for the language of social media. We describe a new dataset that we are in the process of creating, which contains Facebook posts and comments that exhibit code mixing between Bengali, English and Hindi. We also present some preliminary word-level language identification experiments using this dataset. Different techniques are employed, including a simple unsupervised dictionary-based approach, supervised word-level classification with and without contextual clues, and sequence labelling using Conditional Random Fields. We find that the dictionary-based approach is surpassed by supervised classification and sequence labelling, and that it is important to take contextual clues into consideration.", "phrases": ["language identification", "code mixing", "social medium", "bengali-hindi-english facebook comment", "code-mixed text"], "overall_score": 6.171497367867661, "scores": [4.484401446562538, 0.8695763787394698, 1.347761058892885, 0.8520381985327533, 0.6005412322843755], "rank_score": 1.630863663002404} -{"id": "shirani-etal-2019-learning", "title": "Learning Emphasis Selection for Written Text in Visual Media from Crowd-Sourced Label Distributions", "abstract": "In visual communication, text emphasis is used to increase the comprehension of written text to convey the author's intent. We study the problem of emphasis selection, i.e. choosing candidates for emphasis in short written text, to enable automated design assistance in authoring. Without knowing the author's intent and only considering the input text, multiple emphasis selections are valid. We propose a model that employs end-to-end label distribution learning (LDL) on crowd-sourced data and predicts a selection distribution, capturing the inter-subjectivity (common-sense) in the audience as well as the ambiguity of the input. We compare the model with several baselines in which the problem is transformed to single-label learning by mapping label distributions to absolute labels via majority voting.", "phrases": ["emphasis selection", "candidate", "label distribution learning", "visual medium"], "overall_score": 4.4152307673302555, "scores": [3.7092467806627942, 1.0986164107894985, 0.8806081020813282, 0.8331666960918335], "rank_score": 1.6304094974063634} -{"id": "gonzalez-rubio-etal-2012-active", "title": "Active learning for interactive machine translation", "abstract": "Translation needs have greatly increased during the last years. In many situations, text to be translated constitutes an unbounded stream of data that grows continually with time. An effective approach to translate text documents is to follow an interactive-predictive paradigm in which both the system is guided by the user and the user is assisted by the system to generate error-free translations. Unfortunately, when processing such unbounded data streams even this approach requires an overwhelming amount of manpower. Is in this scenario where the use of active learning techniques is compelling. In this work, we propose different active learning techniques for interactive machine translation. Results show that for a given translation quality the use of active learning allows us to greatly reduce the human effort required to translate the sentences in the stream.", "phrases": ["machine translation", "human effort", "active learning"], "overall_score": 2.623767179288281, "scores": [2.3519590292787194, 1.9982880129640177, 0.5404675836854872], "rank_score": 1.6302382086427414} -{"id": "kim-hovy-2004-determining", "title": "Determining the Sentiment of Opinions", "abstract": "Identifying sentiments (the affective parts of opinions) is a challenging problem. We present a system that, given a topic, automatically finds the people who hold opinions about that topic and the sentiment of each opinion. The system contains a module for determining word sentiment and another for combining sentiments within a sentence. We experiment with various models of classifying and combining sentiment at word and sentence levels, with promising results.", "phrases": ["opinion", "sentence level", "synonyms", "orientation", "subjectivity"], "overall_score": 7.099291219668837, "scores": [4.035304936795484, 1.1204224403592375, 1.0559279024986377, 1.0536973995365209, 0.8821865552179121], "rank_score": 1.6295078468815583} -{"id": "florescu-caragea-2017-positionrank", "title": "PositionRank: An Unsupervised Approach to Keyphrase Extraction from Scholarly Documents", "abstract": "The large and growing amounts of online scholarly data present both challenges and opportunities to enhance knowledge discovery. One such challenge is to automatically extract a small set of keyphrases from a document that can accurately describe the document's content and can facilitate fast information processing. In this paper, we propose PositionRank, an unsupervised model for keyphrase extraction from scholarly documents that incorporates information from all positions of a word's occurrences into a biased PageRank. Our model obtains remarkable improvements in performance over PageRank models that do not take into account word positions as well as over strong baselines for this task. Specifically, on several datasets of research papers, PositionRank achieves improvements as high as 29.09%.", "phrases": ["keyphrase extraction", "scholarly document", "positionrank"], "overall_score": 2.9194062331964132, "scores": [2.222342299245441, 1.7788918671022158, 0.8868207595529443], "rank_score": 1.629351641966867} -{"id": "oda-etal-2015-syntax", "title": "Syntax-based Simultaneous Translation through Prediction of Unseen Syntactic Constituents", "abstract": "Simultaneous translation is a method to reduce the latency of communication through machine translation (MT) by dividing the input into short segments before performing translation. However, short segments pose problems for syntaxbased translation methods, as it is difficult to generate accurate parse trees for sub-sentential segments. In this paper, we perform the first experiments applying syntax-based SMT to simultaneous translation, and propose two methods to prevent degradations in accuracy: a method to predict unseen syntactic constituents that help generate complete parse trees, and a method that waits for more input when the current utterance is not enough to generate a fluent translation. Experiments on English-Japanese translation show that the proposed methods allow for improvements in accuracy, particularly with regards to word order of the target sentences.", "phrases": ["simultaneous translation", "syntactic constituent", "complete parse tree"], "overall_score": 3.5786961276295504, "scores": [2.6242964673758937, 1.35173105185661, 0.9101768740864661], "rank_score": 1.6287347977729898} -{"id": "goo-etal-2018-slot", "title": "Slot-Gated Modeling for Joint Slot Filling and Intent Prediction", "abstract": "Attention-based recurrent neural network models for joint intent detection and slot filling have achieved the state-of-the-art performance, while they have independent attention weights. Considering that slot and intent have the strong relationship, this paper proposes a slot gate that focuses on learning the relationship between intent and slot attention vectors in order to obtain better semantic frame results by the global optimization. The experiments show that our proposed model significantly improves sentence-level semantic frame accuracy with 4.2% and 1.9% relative improvement compared to the attentional model on benchmark ATIS and Snips datasets respectively", "phrases": ["slot filling", "intent detection", "joint modeling"], "overall_score": 6.432131571728051, "scores": [2.218184963763747, 2.137613949369684, 0.5278266208824581], "rank_score": 1.6278751780052965} -{"id": "cucerzan-2007-large", "title": "Large-Scale Named Entity Disambiguation Based on Wikipedia Data", "abstract": "This paper presents a large-scale system for the recognition and semantic disambiguation of named entities based on information extracted from a large encyclopedic collection and Web search results. It describes in detail the disambiguation paradigm employed and the information extraction process from Wikipedia. Through a process of maximizing the agreement between the contextual information extracted from Wikipedia and the context of a document, as well as the agreement among the category tags associated with the candidate entities, the implemented system shows high disambiguation accuracy on both news stories and Wikipedia articles.", "phrases": ["entity disambiguation", "wikipedia", "knowledge base", "overlap", "noun phrase"], "overall_score": 7.19248913476066, "scores": [2.7989762523230883, 2.5465246822631102, 1.418985704651766, 0.838701661029518, 0.5352463855981261], "rank_score": 1.6276869371731215} -{"id": "nakanishi-etal-2005-probabilistic", "title": "Probabilistic Models for Disambiguation of an HPSG-Based Chart Generator", "abstract": "We describe probabilistic models for a chart generator based on HPSG. Within the research field of parsing with lexicalized grammars such as HPSG, recent developments have achieved efficient estimation of probabilistic models and high-speed parsing guided by probabilistic models. The focus of this paper is to show that two essential techniques -- model estimation on packed parse forests and beam search during parsing -- are successfully exported to the task of natural language generation. Additionally, we report empirical evaluation of the performance of several disambiguation models and how the performance changes according to the feature set used in the models and the size of training data.", "phrases": ["chart generator", "hpsg grammar", "probabilistic model"], "overall_score": 4.406920242684929, "scores": [2.799635277576251, 1.1956763414747509, 0.8867104064911291], "rank_score": 1.6273406751807105} -{"id": "lin-2004-path", "title": "A Path-based Transfer Model for Machine Translation", "abstract": "We propose a path-based transfer model for machine translation. The model is trained with a word-aligned parallel corpus where the source language sentences are parsed. The training algorithm extracts a set of transfer rules and their probabilities from the training corpus. A rule translates a path in the source language dependency tree into a fragment in the target dependency tree. The problem of finding the most probable translation becomes a graph-theoretic problem of finding the minimum path covering of the source language dependency tree.", "phrases": ["path-based transfer model", "machine translation", "syntax-based model"], "overall_score": 4.173746566416605, "scores": [2.512733061413621, 1.8397568366981825, 0.5291814155244846], "rank_score": 1.627223771212096} -{"id": "kim-etal-2011-overview", "title": "Overview of BioNLP Shared Task 2011", "abstract": "The BioNLP Shared Task 2011, an information extraction task held over 6 months up to March 2011, met with community-wide participation, receiving 46 final submissions from 24 teams. Five main tasks and three supporting tasks were arranged, and their results show advances in the state of the art in fine-grained biomedical domain information extraction and demonstrate that extraction methods successfully generalize in various aspects.", "phrases": ["bionlp shared task", "information extraction", "series", "protein", "biomedical event"], "overall_score": 5.99996596192835, "scores": [4.314719099910283, 1.3176077044985115, 1.0919456419284688, 0.8397401463351842, 0.5684921917235694], "rank_score": 1.626500956879203} -{"id": "bonial-etal-2014-propbank", "title": "PropBank: Semantics of New Predicate Types", "abstract": "This research focuses on expanding PropBank, a corpus annotated with predicate argument structures, with new predicate types; namely, noun, adjective and complex predicates, such as Light Verb Constructions. This effort is in part inspired by a sister project to PropBank, the Abstract Meaning Representation project, which also attempts to capture \u0093who is doing what to whom\u0094 in a sentence, but does so in a way that abstracts away from syntactic structures. For example, alternate realizations of a `destroying' event in the form of either the verb `destroy' or the noun `destruction' would receive the same Abstract Meaning Representation. In order for PropBank to reach the same level of coverage and continue to serve as the bedrock for Abstract Meaning Representation, predicate types other than verbs, which have previously gone without annotation, must be annotated. This research describes the challenges therein, including the development of new annotation practices that walk the line between abstracting away from language-particular syntactic facts to explore deeper semantics, and maintaining the connection between semantics and syntactic structures that has proven to be very valuable for PropBank as a corpus of training data for Natural Language Processing applications.", "phrases": ["predicate", "connection", "propbank"], "overall_score": 3.899313943647351, "scores": [3.143984436930053, 1.2081912528489918, 0.526244966720446], "rank_score": 1.6261402188331637} -{"id": "hua-etal-2019-argument", "title": "Argument Mining for Understanding Peer Reviews", "abstract": "Peer-review plays a critical role in the scientific writing and publication ecosystem. To assess the efficiency and efficacy of the reviewing process, one essential element is to understand and evaluate the reviews themselves. In this work, we study the content and structure of peer reviews under the argument mining framework, through automatically detecting (1) the argumentative propositions put forward by reviewers, and (2) their types (e.g., evaluating the work or making suggestions for improvement). We first collect 14.2K reviews from major machine learning and natural language processing venues. 400 reviews are annotated with 10,386 propositions and corresponding types of Evaluation, Request, Fact, Reference, or Quote. We then train state-of-the-art proposition segmentation and classification models on the data to evaluate their utilities and identify new challenges for this new domain, motivating future directions for argument mining. Further experiments show that proposition usage varies across venues in amount, type, and topic.", "phrases": ["peer review", "quote", "argument mining"], "overall_score": 3.1637015789042695, "scores": [2.2924397838666253, 2.052779581034006, 0.5322436265537711], "rank_score": 1.6258209971514672} -{"id": "graca-etal-2019-generalizing", "title": "Generalizing Back-Translation in Neural Machine Translation", "abstract": "Back-translation \u2014 data augmentation by translating target monolingual data \u2014 is a crucial component in modern neural machine translation (NMT). In this work, we reformulate back-translation in the scope of cross-entropy optimization of an NMT model, clarifying its underlying mathematical assumptions and approximations beyond its heuristic usage. Our formulation covers broader synthetic data generation schemes, including sampling from a target-to-source NMT model. With this formulation, we point out fundamental problems of the sampling-based approaches and propose to remedy them by (i) disabling label smoothing for the target-to-source model and (ii) sampling from a restricted search space. Our statements are investigated on the WMT 2018 German - English news translation task.", "phrases": ["back-translation", "neural machine translation", "formulation"], "overall_score": 3.379629142291756, "scores": [3.3530563766351973, 0.9417412555260297, 0.5809765714669107], "rank_score": 1.625258067876046} -{"id": "levine-etal-2020-sensebert", "title": "SenseBERT: Driving Some Sense into BERT", "abstract": "The ability to learn from large unlabeled corpora has allowed neural language models to advance the frontier in natural language understanding. However, existing self-supervision techniques operate at the word form level, which serves as a surrogate for the underlying semantic content. This paper proposes a method to employ weak-supervision directly at the word sense level. Our model, named SenseBERT, is pre-trained to predict not only the masked words but also their WordNet supersenses. Accordingly, we attain a lexical-semantic level language model, without the use of human annotation. SenseBERT achieves significantly improved lexical understanding, as we demonstrate by experimenting on SemEval Word Sense Disambiguation, and by attaining a state of the art result on the `Word in Context' task.", "phrases": ["language model", "pre-training", "supersense", "sensebert", "wordin-context task"], "overall_score": 5.5256293717960485, "scores": [4.5982815117350375, 1.2222806493330745, 0.8911405524567978, 0.8522020354855736, 0.5591600892549287], "rank_score": 1.6246129676530825} -{"id": "falke-etal-2019-ranking", "title": "Ranking Generated Summaries by Correctness: An Interesting but Challenging Application for Natural Language Inference", "abstract": "While recent progress on abstractive summarization has led to remarkably fluent summaries, factual errors in generated summaries still severely limit their use in practice. In this paper, we evaluate summaries produced by state-of-the-art models via crowdsourcing and show that such errors occur frequently, in particular with more abstractive models. We study whether textual entailment predictions can be used to detect such errors and if they can be reduced by reranking alternative predicted summaries. That leads to an interesting downstream application for entailment models. In our experiments, we find that out-of-the-box entailment models trained on NLI datasets do not yet offer the desired performance for the downstream task and we therefore release our annotations as additional test data for future extrinsic evaluations of NLI.", "phrases": ["summaries", "correctness", "natural language inference", "entailment model", "beam search"], "overall_score": 6.251895323115576, "scores": [2.4452094534283297, 2.089671930244422, 0.8946447481883972, 1.4763344055388719, 1.2131727697110781], "rank_score": 1.6238066614222197} -{"id": "levy-manning-2003-harder", "title": "Is it Harder to Parse Chinese, or the Chinese Treebank?", "abstract": "We present a detailed investigation of the challenges posed when applying parsing models developed against English corpora to Chinese. We develop a factored-model statistical parser for the Penn Chinese Treebank, showing the implications of gross statistical differences between WSJ and Chinese Tree-banks for the most general methods of parser adaptation. We then provide a detailed analysis of the major sources of statistical parse errors for this corpus, showing their causes and relative frequencies, and show that while some types of errors are due to difficult ambiguities inherent in Chinese grammar, others arise due to treebank annotation practices. We show how each type of error can be addressed with simple, targeted changes to the independence assumptions of the maximum likelihood-estimated PCFG factor of the parsing model, which raises our F1 from 80.7% to 82.6% on our development set, and achieves parse accuracy close to the best published figures for Chinese parsing.", "phrases": ["chinese", "investigation", "function word"], "overall_score": 4.941885834505124, "scores": [3.768588065955265, 0.5706032123087422, 0.5304254865253599], "rank_score": 1.6232055882631222} -{"id": "fernandes-etal-2014-latent", "title": "Latent Trees for Coreference Resolution", "abstract": "We describe a structure learning system for unrestricted coreference resolution that explores two key modeling techniques: latent coreference trees and automatic entropy-guided feature induction. The latent tree modeling makes the learning problem computationally feasible because it incorporates a meaningful hidden structure. Additionally, using an automatic feature induction method, we can efficiently build enhanced nonlinear models using linear model learning algorithms. We present empirical results that highlight the contribution of each modeling technique used in the proposed system. Empirical evaluation is performed on the multilingual unrestricted coreference CoNLL-2012 Shared Task datasets, which comprise three languages: Arabic, Chinese and English. We apply the same system to all languages, except for minor adaptations to some language-dependent features such as nested mentions and specific static pronoun lists. A previous version of this system was submitted to the CoNLL-2012 Shared Task closed track, achieving an official score of 58.69, the best among the competitors. The unique enhancement added to the current system version is the inclusion of candidate arcs linking nested mentions for the Chinese language. By including such arcs, the score increases by almost 4.5 points for that language. The current system shows a score of 60.15, which corresponds to a 3.5% error reduction, and is the best performing system for each of the three languages.", "phrases": ["coreference resolution", "mention", "perceptron"], "overall_score": 4.393568791218399, "scores": [2.942585793242243, 1.3709783461224085, 0.5536670398396664], "rank_score": 1.6224103930681057} -{"id": "macherey-etal-2011-language", "title": "Language-independent compound splitting with morphological operations", "abstract": "Translating compounds is an important problem in machine translation. Since many compounds have not been observed during training, they pose a challenge for translation systems. Previous decompounding methods have often been restricted to a small set of languages as they cannot deal with more complex compound forming processes. We present a novel and unsupervised method to learn the compound parts and morphological operations needed to split compounds into their compound parts. The method uses a bilingual corpus to learn the morphological operations required to split a compound into its parts. Furthermore, monolingual corpora are used to learn and filter the set of compound part candidates. We evaluate our method within a machine translation task and show significant improvements for various languages to show the versatility of the approach.", "phrases": ["compound splitting", "operation", "machine translation", "transitional element"], "overall_score": 4.496970784641391, "scores": [3.0231501433836634, 1.728561462795744, 1.1944333446584539, 0.5416124991868241], "rank_score": 1.6219393625061713} -{"id": "jiang-etal-2020-cross", "title": "Cross-lingual Information Retrieval with BERT", "abstract": "Multiple neural language models have been developed recently, e.g., BERT and XLNet, and achieved impressive results in various NLP tasks including sentence classification, question answering and document ranking. In this paper, we explore the use of the popular bidirectional language model, BERT, to model and learn the relevance between English queries and foreign-language documents in the task of cross-lingual information retrieval. A deep relevance matching model based on BERT is introduced and trained by finetuning a pretrained multilingual BERT model with weak supervision, using home-made CLIR training data derived from parallel corpora. Experimental results of the retrieval of Lithuanian documents against short English queries show that our model is effective and outperforms the competitive baseline approaches.", "phrases": ["bert", "language model", "query", "cross-lingual information retrieval"], "overall_score": 3.1552498362472883, "scores": [3.1405083259233626, 1.6936111316275246, 1.0865696786322447, 0.5652215062565034], "rank_score": 1.6214776606099088} -{"id": "turner-charniak-2005-supervised", "title": "Supervised and Unsupervised Learning for Sentence Compression", "abstract": "In Statistics-Based Summarization - Step One: Sentence Compression, Knight and Marcu (Knight and Marcu, 2000) (KM Knight and Marcu use a corpus of 1035 training sentences. More data is not easily available, so in addition to improving the original K&M noisy-channel model, we create unsupervised and semi-supervised models of the task. Finally, we point out problems with modeling the task in this way. They suggest areas for future research.", "phrases": ["sentence compression", "marcu", "noisy-channel model", "unsupervised variant"], "overall_score": 5.0100221583875735, "scores": [3.640534470530347, 1.069398507629263, 0.9456253758400793, 0.8277200014228946], "rank_score": 1.620819588855646} -{"id": "lawrence-etal-2017-counterfactual", "title": "Counterfactual Learning from Bandit Feedback under Deterministic Logging : A Case Study in Statistical Machine Translation", "abstract": "The goal of counterfactual learning for statistical machine translation (SMT) is to optimize a target SMT system from logged data that consist of user feedback to translations that were predicted by another, historic SMT system. A challenge arises by the fact that risk-averse commercial SMT systems deterministically log the most probable translation. The lack of sufficient exploration of the SMT output space seemingly contradicts the theoretical requirements for counterfactual learning. We show that counterfactual learning from deterministic bandit logs is possible nevertheless by smoothing out deterministic components in learning. This can be achieved by additive and multiplicative control variates that avoid degenerate behavior in empirical risk minimization. Our simulation experiments show improvements of up to 2 BLEU points by counterfactual learning from deterministic bandit feedback.", "phrases": ["bandit feedback", "deterministic logging", "statistical machine translation"], "overall_score": 3.370295059331344, "scores": [1.748112914951166, 1.7453874066976547, 1.3688076467810844], "rank_score": 1.6207693228099682} -{"id": "kalchbrenner-blunsom-2013-recurrent", "title": "Recurrent Continuous Translation Models", "abstract": "We introduce a class of probabilistic continuous translation models called Recurrent Continuous Translation Models that are purely based on continuous representations for words, phrases and sentences and do not rely on alignments or phrasal translation units. The models have a generation and a conditioning aspect. The generation of the translation is modelled with a target Recurrent Language Model, whereas the conditioning on the source sentence is modelled with a Convolutional Sentence Model. Through various experiments, we show first that our models obtain a perplexity with respect to gold translations that is > 43% lower than that of stateof-the-art alignment-based translation models. Secondly, we show that they are remarkably sensitive to the word order, syntax, and meaning of the source sentence despite lacking alignments. Finally we show that they match a state-of-the-art system when rescoring n-best lists of translations.", "phrases": ["machine translation", "recurrent neural network", "nmt system", "target sequence", "encoder-decoder architecture"], "overall_score": 7.524986955007689, "scores": [3.3925433420842355, 1.685343298878182, 1.086398552765958, 1.0836995400685223, 0.8531711749347103], "rank_score": 1.620231181746322} -{"id": "brants-etal-2007-large", "title": "Large Language Models in Machine Translation", "abstract": "Systems, methods, and computer program products for machine translation are provided. In some implementations a system is provided. The system includes a language model including a collection of n-grams from a corpus, each n-gram having a corresponding relative frequency in the corpus and an order n corresponding to a number of tokens in the n-gram, each n-gram corresponding to a backoff n-gram having an order of n-1 and a collection of backoff scores, each backoff score associated with an n-gram, the backoff score determined as a function of a backoff factor and a relative frequency of a corresponding backoff n-gram in the corpus.", "phrases": ["machine translation", "relative frequency", "billion", "more data", "cluster"], "overall_score": 6.270034065137308, "scores": [4.339664695339303, 1.4261600192617903, 0.9030222675856093, 0.8621869220722809, 0.5672720833017492], "rank_score": 1.6196611975121467} -{"id": "ruppenhofer-etal-2009-semeval", "title": "SemEval-2010 Task 10: Linking Events and Their Participants in Discourse", "abstract": "In this paper, we describe the SemEval-2010 shared task on \"Linking Events and Their Participants in Discourse\". This task is a variant of the classical semantic role labelling task. The novel aspect is that we focus on linking local semantic argument structures across sentence boundaries. Specifically, the task aims at linking locally uninstantiated roles to their co-referents in the wider discourse context (if such co-referents exist). This task is potentially beneficial for a number of NLP applications and we hope that it will not only attract researchers from the semantic role labelling community but also from co-reference resolution and information extraction.", "phrases": ["discourse", "semeval task", "f-score annotator agreement"], "overall_score": 4.928728974760389, "scores": [3.316460918625534, 0.9996008116710073, 0.5405905778197063], "rank_score": 1.618884102705416} -{"id": "shwartz-etal-2016-improving", "title": "Improving Hypernymy Detection with an Integrated Path-based and Distributional Method", "abstract": "Detecting hypernymy relations is a key task in NLP, which is addressed in the literature using two complementary approaches. Distributional methods, whose supervised variants are the current best performers, and path-based methods, which received less research attention. We suggest an improved path-based algorithm, in which the dependency paths are encoded using a recurrent neural network, that achieves results comparable to distributional methods. We then extend the approach to integrate both path-based and distributional signals, significantly improving upon the state-of-the-art on this task.", "phrases": ["hypernymy detection", "semantic relation", "dbpedia"], "overall_score": 6.196765967483086, "scores": [3.255062263040772, 1.0770176988433775, 0.5235061265640291], "rank_score": 1.6185286961493928} -{"id": "chen-etal-2010-emotion", "title": "Emotion Cause Detection with Linguistic Constructions", "abstract": "This paper proposes a multi-label approach to detect emotion causes. The multi-label model not only detects multi-clause causes, but also captures the long-distance information to facilitate emotion cause detection. In addition, based on the linguistic analysis, we create two sets of linguistic patterns during feature extraction. Both manually generalized patterns and automatically generalized patterns are designed to extract general cause expressions or specific constructions for emotion causes. Experiments show that our system achieves a performance much higher than a baseline model.", "phrases": ["cause", "emotion", "linguistic cue"], "overall_score": 5.706276331086571, "scores": [1.9474713169133233, 1.7198649883398482, 1.1871954063400643], "rank_score": 1.6181772371977452} -{"id": "wang-poon-2018-deep", "title": "Deep Probabilistic Logic: A Unifying Framework for Indirect Supervision", "abstract": "Deep learning has emerged as a versatile tool for a wide range of NLP tasks, due to its superior capacity in representation learning. But its applicability is limited by the reliance on annotated examples, which are difficult to produce at scale. Indirect supervision has emerged as a promising direction to address this bottleneck, either by introducing labeling functions to automatically generate noisy examples from unlabeled text, or by imposing constraints over interdependent label decisions. A plethora of methods have been proposed, each with respective strengths and limitations. Probabilistic logic offers a unifying language to represent indirect supervision, but end-to-end modeling with probabilistic logic is often infeasible due to intractable inference and learning. In this paper, we propose deep probabilistic logic (DPL) as a general framework for indirect supervision, by composing probabilistic logic with deep learning. DPL models label decisions as latent variables, represents prior knowledge on their relations using weighted first-order logical formulas, and alternates between learning a deep neural network for the end task and refining uncertain formula weights for indirect supervision, using variational EM. This framework subsumes prior indirect supervision methods as special cases, and enables novel combination via infusion of rich domain and linguistic knowledge. Experiments on biomedical machine reading demonstrate the promise of this approach.", "phrases": ["probabilistic logic", "indirect supervision", "deep learning", "dpl"], "overall_score": 3.5523749924183003, "scores": [2.993359820771457, 2.3809281347646962, 0.5679683201676747, 0.5247658558708731], "rank_score": 1.6167555328936754} -{"id": "galley-etal-2004-identifying", "title": "Identifying Agreement and Disagreement in Conversational Speech: Use of Bayesian Networks to Model Pragmatic Dependencies", "abstract": "We describe a statistical approach for modeling agreements and disagreements in conversational interaction. Our approach first identifies adjacency pairs using maximum entropy ranking based on a set of lexical, durational, and structural features that look both forward and backward in the discourse. We then classify utterances as agreement or disagreement using these adjacency pairs and features that represent various pragmatic influences of previous agreement or disagreement on the current utterance. Our approach achieves 86.9% accuracy, a 4.9% increase over previous work.", "phrases": ["disagreement", "conversational speech", "bayesian networks", "pragmatic influence"], "overall_score": 4.99685077319769, "scores": [4.191996703679269, 0.8829246920873312, 0.8167279848179884, 0.5745843882447925], "rank_score": 1.6165584422073453} -{"id": "woodsend-lapata-2011-learning", "title": "Learning to Simplify Sentences with Quasi-Synchronous Grammar and Integer Programming", "abstract": "Text simplification aims to rewrite text into simpler versions, and thus make information accessible to a broader audience. Most previous work simplifies sentences using handcrafted rules aimed at splitting long sentences, or substitutes difficult words using a predefined dictionary. This paper presents a data-driven model based on quasi-synchronous grammar, a formalism that can naturally capture structural mismatches and complex rewrite operations. We describe how such a grammar can be induced from Wikipedia and propose an integer linear programming model for selecting the most appropriate simplification from the space of possible rewrites generated by the grammar. We show experimentally that our method creates simplifications that significantly reduce the reading difficulty of the input, while maintaining grammaticality and preserving its meaning.", "phrases": ["quasi-synchronous grammar", "integer programming", "simplification", "complex rewrite operation", "sentence splitting"], "overall_score": 5.699633337262689, "scores": [3.663390161951278, 1.583705069575701, 1.407890651724115, 0.8649031318256722, 0.5615781200555473], "rank_score": 1.6162934270264624} -{"id": "sridhar-etal-2015-joint", "title": "Joint Models of Disagreement and Stance in Online Debate", "abstract": "Online debate forums present a valuable opportunity for the understanding and modeling of dialogue. To understand these debates, a key challenge is inferring the stances of the participants, all of which are interrelated and dependent. While collectively modeling users\u2019 stances has been shown to be effective (Walker et al., 2012c; Hasan and Ng, 2013), there are many modeling decisions whose ramifications are not well understood. To investigate these choices and their effects, we introduce a scalable unified probabilistic modeling framework for stance classification models that 1) are collective, 2) reason about disagreement, and 3) can model stance at either the author level or at the post level. We comprehensively evaluate the possible modeling choices on eight topics across two online debate corpora, finding accuracy improvements of up to 11.5 percentage points over a local classifier. Our results highlight the importance of making the correct modeling choices for online dialogues, and having a unified probabilistic modeling framework that makes this possible.", "phrases": ["stance", "online debate", "social interaction"], "overall_score": 4.375573412823271, "scores": [2.8236411491995934, 1.497330271275538, 0.5263243324348682], "rank_score": 1.6157652509699998} -{"id": "shenoy-sardana-2020-multilogue", "title": "Multilogue-Net: A Context-Aware RNN for Multi-modal Emotion Detection and Sentiment Analysis in Conversation", "abstract": "Sentiment Analysis and Emotion Detection in conversation is key in several real-world applications, with an increase in modalities available aiding a better understanding of the underlying emotions. Multi-modal Emotion Detection and Sentiment Analysis can be particularly useful, as applications will be able to use specific subsets of available modalities, as per the available data. Current systems dealing with Multi-modal functionality fail to leverage and capture - the context of the conversation through all modalities, the dependency between the listener(s) and speaker emotional states, and the relevance and relationship between the available modalities. In this paper, we propose an end to end RNN architecture that attempts to take into account all the mentioned drawbacks. Our proposed model, at the time of writing, out-performs the state of the art on a benchmark dataset on a variety of accuracy and regression metrics.", "phrases": ["context-aware rnn", "conversation", "multilogue-net"], "overall_score": 2.599937895912333, "scores": [2.2720097847417304, 1.3340973935510905, 1.2401895489544115], "rank_score": 1.6154322424157443} -{"id": "zhao-etal-2018-paragraph", "title": "Paragraph-level Neural Question Generation with Maxout Pointer and Gated Self-attention Networks", "abstract": "Question generation, the task of automatically creating questions that can be answered by a certain span of text within a given passage, is important for question-answering and conversational systems in digital assistants such as Alexa, Cortana, Google Assistant and Siri. Recent sequence to sequence neural models have outperformed previous rule-based systems. Existing models mainly focused on using one or two sentences as the input. Long text has posed challenges for sequence to sequence neural models in question generation \u2013 worse performances were reported if using the whole paragraph (with multiple sentences) as the input. In reality, however, it often requires the whole paragraph as context in order to generate high quality questions. In this paper, we propose a maxout pointer mechanism with gated self-attention encoder to address the challenges of processing long text inputs for question generation. With sentence-level inputs, our model outperforms previous approaches with either sentence-level or paragraph-level inputs. Furthermore, our model can effectively utilize paragraphs as inputs, pushing the state-of-the-art result from 13.9 to 16.3 (BLEU_4).", "phrases": ["question generation", "paragraph", "self-attention encoder"], "overall_score": 6.11066704430209, "scores": [1.9519156563475888, 1.486108762986184, 1.4063419857199773], "rank_score": 1.6147888016845833} -{"id": "tran-etal-2018-importance", "title": "The Importance of Being Recurrent for Modeling Hierarchical Structure", "abstract": "Recent work has shown that recurrent neural networks (RNNs) can implicitly capture and exploit hierarchical information when trained to solve common natural language processing tasks (Blevins et al., 2018) such as language modeling (Linzen et al., 2016; Gulordava et al., 2018) and neural machine translation (Shi et al., 2016). In contrast, the ability to model structured data with non-recurrent neural networks has received little attention despite their success in many NLP tasks (Gehring et al., 2017; Vaswani et al., 2017). In this work, we compare the two architectures\u2014recurrent versus non-recurrent\u2014with respect to their ability to model hierarchical structure and find that recurrency is indeed important for this purpose. The code and data used in our experiments is available at ", "phrases": ["recurrent", "hierarchical structure", "language understanding"], "overall_score": 4.751921148338703, "scores": [3.4269501549706973, 0.8687704924169714, 0.5458683771713873], "rank_score": 1.613863008186352} -{"id": "zhang-lapata-2017-sentence", "title": "Sentence Simplification with Deep Reinforcement Learning", "abstract": "Sentence simplification aims to make sentences easier to read and understand. Most recent approaches draw on insights from machine translation to learn simplification rewrites from monolingual corpora of complex and simple sentences. We address the simplification problem with an encoder-decoder model coupled with a deep reinforcement learning framework. Our model, which we call DRESS (as shorthand for Deep REinforcement Sentence Simplification), explores the space of possible simplifications while learning to optimize a reward function that encourages outputs which are simple, fluent, and preserve the meaning of the input. Experiments on three datasets demonstrate that our model outperforms competitive simplification systems.", "phrases": ["deep reinforcement learning", "reward", "sentence simplification", "fluency", "seq2seq model"], "overall_score": 6.1049629371117256, "scores": [3.548808669165037, 0.8760696385989301, 1.2774727133934107, 1.240115086926209, 1.1239411385195242], "rank_score": 1.6132814493206218} -{"id": "phandi-etal-2015-flexible", "title": "Flexible Domain Adaptation for Automated Essay Scoring Using Correlated Linear Regression", "abstract": "Most of the current automated essay scoring (AES) systems are trained using manually graded essays from a specific prompt. These systems experience a drop in accuracy when used to grade an essay from a different prompt. Obtaining a large number of manually graded essays each time a new prompt is introduced is costly and not viable. We propose domain adaptation as a solution to adapt an AES system from an initial prompt to a new prompt. We also propose a novel domain adaptation technique that uses Bayesian linear ridge regression. We evaluate our domain adaptation technique on the publicly available Automated Student Assessment Prize (ASAP) dataset and show that our proposed technique is a competitive default domain adaptation algorithm for the AES task.", "phrases": ["domain adaptation", "essay", "cblrr"], "overall_score": 4.7482802242444215, "scores": [2.681538267213309, 1.6312367404126682, 0.5251043892722139], "rank_score": 1.6126264656327305} -{"id": "raganato-etal-2017-word", "title": "Word Sense Disambiguation: A Unified Evaluation Framework and Empirical Comparison", "abstract": "Word Sense Disambiguation is a long-standing task in Natural Language Processing, lying at the core of human language understanding. However, the evaluation of automatic systems has been problematic, mainly due to the lack of a reliable evaluation framework. In this paper we develop a unified evaluation framework and analyze the performance of various Word Sense Disambiguation systems in a fair setup. The results show that supervised systems clearly outperform knowledge-based models. Among the supervised systems, a linear classifier trained on conventional local features still proves to be a hard baseline to beat. Nonetheless, recent approaches exploiting neural networks on unlabeled corpora achieve promising results, surpassing this hard baseline in most test sets.", "phrases": ["unified evaluation framework", "word sense disambiguation", "wsd", "supervised model"], "overall_score": 5.189347778048767, "scores": [2.220833648319964, 2.0993037751137673, 1.2653468767560683, 0.8631618239721962], "rank_score": 1.6121615310404989} -{"id": "ma-hovy-2016-end", "title": "End-to-end Sequence Labeling via Bi-directional LSTM-CNNs-CRF", "abstract": "State-of-the-art sequence labeling systems traditionally require large amounts of task-specific knowledge in the form of hand-crafted features and data pre-processing. In this paper, we introduce a novel neutral network architecture that benefits from both word- and character-level representations automatically, by using combination of bidirectional LSTM, CNN and CRF. Our system is truly end-to-end, requiring no feature engineering or data pre-processing, thus making it applicable to a wide range of sequence labeling tasks. We evaluate our system on two data sets for two sequence labeling tasks --- Penn Treebank WSJ corpus for part-of-speech (POS) tagging and CoNLL 2003 corpus for named entity recognition (NER). We obtain state-of-the-art performance on both the two data --- 97.55\\% accuracy for POS tagging and 91.21\\% F1 for NER.", "phrases": ["sequence labeling", "bi-directional lstm-cnns-crf", "hand-crafted feature", "convolutional neural network", "crf layer"], "overall_score": 8.43311600700961, "scores": [2.706767958915459, 0.9388433740798323, 1.474131194760762, 1.473611464706746, 1.4671897896316723], "rank_score": 1.6121087564188943} -{"id": "quirk-etal-2007-generative", "title": "Generative models of noisy translations with applications to parallel fragment extraction", "abstract": "The development of broad domain statistical machine translation systems is gated by the availability of parallel data. A promising strategy for mitigating data scarcity is to mine parallel data from comparable corpora. Although comparable corpora seldom contain parallel sentences, they often contain parallel words or phrases. Recent fragment extraction approaches have shown that including parallel fragments in SMT training data can significantly improve translation quality. We describe efficient and effective generative models for extracting fragments, and demonstrate that these algorithms produce competitive improvements on cross-domain test data without suffering in-domain degradation even at very large scale.", "phrases": ["noisy translation", "fragment", "parallel data", "cross-domain test data", "generative model"], "overall_score": 4.744962450279972, "scores": [3.3860001271382223, 1.4299321885916711, 1.8240443359670773, 0.8469812550801159, 0.5705404551404927], "rank_score": 1.6114996723835158} -{"id": "recasens-etal-2010-typology", "title": "A Typology of Near-Identity Relations for Coreference (NIDENT)", "abstract": "The task of coreference resolution requires people or systems to decide when two referring expressions refer to the 'same' entity or event. In real text, this is often a difficult decision because identity is never adequately defined, leading to contradictory treatment of cases in previous work. This paper introduces the concept of 'near-identity', a middle ground category between identity and non-identity, to handle such cases systematically. We present a typology of Near-Identity Relations (NIDENT) that includes fifteen types\u2015grouped under four main families\u2015that capture a wide range of ways in which (near-)coreference relations hold between discourse entities. We validate the theoretical model by annotating a small sample of real data and showing that inter-annotator agreement is high enough for stability (K=0.58, and up to K=0.65 and K=0.84 when leaving out one and two outliers, respectively). This work enables subsequent creation of the first internally consistent language resource of this type through larger annotation efforts.", "phrases": ["typology", "near-identity relations", "coreference"], "overall_score": 3.5373953388280373, "scores": [2.081609741936409, 1.8308707447031316, 0.917333509592777], "rank_score": 1.6099379987441058} -{"id": "xin-etal-2020-deebert", "title": "DeeBERT: Dynamic Early Exiting for Accelerating BERT Inference", "abstract": "Large-scale pre-trained language models such as BERT have brought significant improvements to NLP applications. However, they are also notorious for being slow in inference, which makes them difficult to deploy in real-time applications. We propose a simple but effective method, DeeBERT, to accelerate BERT inference. Our approach allows samples to exit earlier without passing through the entire model. Experiments show that DeeBERT is able to save up to ~40% inference time with minimal degradation in model quality. Further analyses show different behaviors in the BERT transformer layers and also reveal their redundancy. Our work provides new ideas to efficiently apply deep transformer-based models to downstream tasks. Code is available at .", "phrases": ["exiting", "deebert", "current layer"], "overall_score": 4.653064257510797, "scores": [2.62190320635813, 1.6822730727793496, 0.5253729767315886], "rank_score": 1.6098497519563562} -{"id": "chakravarthi-etal-2018-improving", "title": "Improving Wordnets for Under-Resourced Languages Using Machine Translation", "abstract": "Wordnets are extensively used in natural language processing, but the current approaches for manually building a wordnet from scratch involves large research groups for a long period of time, which are typically not available for under-resourced languages. Even if wordnet-like resources are available for under-resourced languages, they are often not easily accessible, which can alter the results of applications using these resources. Our proposed method presents an expand approach for improving and generating wordnets with the help of machine translation. We apply our methods to improve and extend wordnets for the Dravidian languages, i.e., Tamil, Telugu, Kannada, which are severly under-resourced languages. We report evaluation results of the generated wordnet senses in term of precision for these languages. In addition to that, we carried out a manual evaluation of the translations for the Tamil language, where we demonstrate that our approach can aid in improving wordnet resources for under-resourced Dravidian languages.", "phrases": ["machine translation", "dravidian language", "india"], "overall_score": 4.3594613329998575, "scores": [2.7563402408451654, 1.2428854486759284, 0.8302209705432243], "rank_score": 1.6098155533547727} -{"id": "yang-mitchell-2017-leveraging", "title": "Leveraging Knowledge Bases in LSTMs for Improving Machine Reading", "abstract": "This paper focuses on how to take advantage of external knowledge bases (KBs) to improve recurrent neural networks for machine reading. Traditional methods that exploit knowledge from KBs encode knowledge as discrete indicator features. Not only do these features generalize poorly, but they require task-specific feature engineering to achieve good performance. We propose KBLSTM, a novel neural model that leverages continuous representations of KBs to enhance the learning of recurrent neural networks for machine reading. To effectively integrate background knowledge with information from the currently processed text, our model employs an attention mechanism with a sentinel to adaptively decide whether to attend to background knowledge and which information from KBs is useful. Experimental results show that our model achieves accuracies that surpass the previous state-of-the-art results for both entity extraction and event extraction on the widely used ACE2005 dataset.", "phrases": ["machine reading", "entity extraction", "knowledge basis"], "overall_score": 4.739972131687478, "scores": [2.5960195723477466, 1.397258244209189, 0.8361367156090662], "rank_score": 1.6098048440553339} -{"id": "bertoldi-etal-2008-phrase", "title": "Phrase-based statistical machine translation with pivot languages.", "abstract": "Translation with pivot languages has recently gained attention as a means to circumvent the data bottleneck of statistical machine translation (SMT). This paper tries to give a mathematically sound formulation of the various approaches presented in the literature and introduces new methods for training alignment models through pivot languages. We present experimental results on Chinese-Spanish translation via English, on a popular traveling domain task. In contrast to previous literature, we report experimental results by using parallel corpora that are either disjoint or overlapped on the pivot language side. Finally, our original method for generating training data through random sampling shows to perform as well as the best methods based on the coupling of translation systems.", "phrases": ["statistical machine translation", "pivot language", "source-target corpus", "bridging", "many researcher"], "overall_score": 5.044284394738328, "scores": [3.4807273880341048, 0.9569579063739435, 1.4617760266441713, 1.0910990192215766, 1.0532822685117869], "rank_score": 1.6087685217571166} -{"id": "ultes-etal-2017-pydial", "title": "PyDial: A Multi-domain Statistical Dialogue System Toolkit", "abstract": "Statistical Spoken Dialogue Systems have been around for many years. However, access to these systems has always been dif-\ufb01cult as there is still no publicly available end-to-end system implementation. To al-leviate this, we present PyDial, an open-source end-to-end statistical spoken dialogue system toolkit which provides implementations of statistical approaches for all dialogue system modules. Moreover, it has been extended to provide multi-domain conversational functionality. It offers easy con\ufb01guration, easy extensibility, and domain-independent implementations of the respective dialogue system modules. The toolkit is available for download under the Apache 2.0 license.", "phrases": ["dialogue system toolkit", "statistical approach", "pydial"], "overall_score": 3.3453143343433056, "scores": [2.2806953107237975, 1.9998235751317377, 0.5457495145163156], "rank_score": 1.6087561334572837} -{"id": "zhao-etal-2017-learning", "title": "Learning Discourse-level Diversity for Neural Dialog Models using Conditional Variational Autoencoders", "abstract": "While recent neural encoder-decoder models have shown great promise in modeling open-domain conversations, they often generate dull and generic responses. Unlike past work that has focused on diversifying the output of the decoder from word-level to alleviate this problem, we present a novel framework based on conditional variational autoencoders that capture the discourse-level diversity in the encoder. Our model uses latent variables to learn a distribution over potential conversational intents and generates diverse responses using only greedy decoders. We have further developed a novel variant that is integrated with linguistic prior knowledge for better performance. Finally, the training procedure is improved through introducing a bag-of-word loss. Our proposed models have been validated to generate significantly more diverse responses than baseline approaches and exhibit competence of discourse-level decision-making.", "phrases": ["discourse-level diversity", "conditional variational autoencoders", "dialogue generation", "neural dialogue model"], "overall_score": 7.307728633569773, "scores": [2.729887743706428, 1.3516251972422613, 1.761193547253913, 0.5911517435703769], "rank_score": 1.6084645579432448} -{"id": "saunders-byrne-2020-reducing", "title": "Reducing Gender Bias in Neural Machine Translation as a Domain Adaptation Problem", "abstract": "Training data for NLP tasks often exhibits gender bias in that fewer sentences refer to women than to men. In Neural Machine Translation (NMT) gender bias has been shown to reduce translation quality, particularly when the target language has grammatical gender. The recent WinoMT challenge set allows us to measure this effect directly (Stanovsky et al, 2019) Ideally we would reduce system bias by simply debiasing all data prior to training, but achieving this effectively is itself a challenge. Rather than attempt to create a `balanced' dataset, we use transfer learning on a small set of trusted, gender-balanced examples. This approach gives strong and consistent improvements in gender debiasing with much less computational cost than training from scratch. A known pitfall of transfer learning on new domains is `catastrophic forgetting', which we address at adaptation and inference time. During adaptation we show that Elastic Weight Consolidation allows a performance trade-off between general translation quality and bias reduction. At inference time we propose a lattice-rescoring scheme which outperforms all systems evaluated in Stanovsky et al, 2019 on WinoMT with no degradation of general test set BLEU. We demonstrate our approach translating from English into three languages with varied linguistic properties and data availability.", "phrases": ["gender bias", "neural machine translation", "domain adaptation problem"], "overall_score": 4.648099811435341, "scores": [3.1522326547258053, 0.8492915804492116, 0.8228722792912606], "rank_score": 1.6081321714887593} -{"id": "specia-etal-2013-quest", "title": "QuEst - A translation quality estimation framework", "abstract": "We describe QUEST, an open source framework for machine translation quality estimation. The framework allows the extraction of several quality indicators from source segments, their translations, external resources (corpora, language models, topic models, etc.), as well as language tools (parsers, part-of-speech tags, etc.). It also provides machine learning algorithms to build quality estimation models. We benchmark the framework on a number of datasets and discuss the efficacy of features and algorithms.", "phrases": ["quality estimation", "machine translation", "quest", "access"], "overall_score": 5.414655531730235, "scores": [2.6996199163521695, 1.7194196975714253, 1.1870564233815446, 0.8259560366606753], "rank_score": 1.6080130184914536} -{"id": "nguyen-etal-2016-j", "title": "J-NERD: Joint Named Entity Recognition and Disambiguation with Rich Linguistic Features", "abstract": "Methods for Named Entity Recognition and Disambiguation (NERD) perform NER and NED in two separate stages. Therefore, NED may be penalized with respect to precision by NER false positives, and suffers in recall from NER false negatives. Conversely, NED does not fully exploit information computed by NER such as types of mentions. This paper presents J-NERD, a new approach to perform NER and NED jointly, by means of a probabilistic graphical model that captures mention spans, mention types, and the mapping of mentions to entities in a knowledge base. We present experiments with different kinds of texts from the CoNLL'03, ACE'05, and ClueWeb'09-FACC1 corpora. J-NERD consistently outperforms state-of-the-art competitors in end-to-end NERD precision, recall, and F1.", "phrases": ["disambiguation", "graphical model", "j-nerd"], "overall_score": 3.1285579307651497, "scores": [1.8822822828319312, 1.8671652790028832, 1.0738346420490286], "rank_score": 1.6077607346279477} -{"id": "yu-li-2014-chinese", "title": "Chinese Spelling Error Detection and Correction Based on Language Model, Pronunciation, and Shape", "abstract": "S pell ing check is an important preprocessing task when dealing with user generated texts such as tweets and product comments. Compared with some western languages such as English, Chinese spelling check is more complex because there is no word delimiter in Chinese written texts and misspelled characters can only be determined in word level. Our system works as follows. First, we use character-level n-gram language model s to detect potential misspelled characters with low probabilities below some predefined threshold. Second, for each potential incorrect character, we generate a candidate set based on pronunciation and shape similarities. Third, we filter some candidate corrections if the candidate cannot form a legal word with its neighbors according to a word dictionary. Finally, we find the best candidate with highest language model probability. If the probability is higher than a predefined threshold, then we replace the original character; or we consider the original character as correct and take no action. Our preliminary experiments shows that our simple method can achieve relatively high precision but low recall.", "phrases": ["spelling error", "detection", "correction", "language model", "pronunciation"], "overall_score": 4.643767698019204, "scores": [2.6367262796516977, 1.7929001230155353, 1.2632846658865016, 1.8117923412195396, 0.5284634057634521], "rank_score": 1.606633363107345} -{"id": "zhang-etal-2016-rationale", "title": "Rationale-Augmented Convolutional Neural Networks for Text Classification", "abstract": "We present a new Convolutional Neural Network (CNN) model for text classification that jointly exploits labels on documents and their constituent sentences. Specifically, we consider scenarios in which annotators explicitly mark sentences (or snippets) that support their overall document categorization, i.e., they provide rationales. Our model exploits such supervision via a hierarchical approach in which each document is represented by a linear combination of the vector representations of its component sentences. We propose a sentence-level convolutional model that estimates the probability that a given sentence is a rationale, and we then scale the contribution of each sentence to the aggregate document representation in proportion to these estimates. Experiments on five classification datasets that have document labels and associated rationales demonstrate that our approach consistently outperforms strong baselines. Moreover, our model naturally provides explanations for its predictions.", "phrases": ["convolutional neural networks", "text classification", "rationale", "cnn model"], "overall_score": 4.8107248053446785, "scores": [3.141309380997935, 0.8943003687440628, 1.8404886317602016, 0.5473391837478737], "rank_score": 1.6058593913125183} -{"id": "faruqui-etal-2016-morphological", "title": "Morphological Inflection Generation Using Character Sequence to Sequence Learning", "abstract": "Morphological inflection generation is the task of generating the inflected form of a given lemma corresponding to a particular linguistic transformation. We model the problem of inflection generation as a character sequence to sequence learning problem and present a variant of the neural encoder-decoder model for solving it. Our model is language independent and can be trained in both supervised and semi-supervised settings. We evaluate our system on seven datasets of morphologically rich languages and achieve either better or comparable results to existing state-of-the-art models of inflection generation.", "phrases": ["character sequence", "rich language", "morphological inflection generation", "sequence-to-sequence model", "reinflection"], "overall_score": 5.840729419168726, "scores": [2.586385931729921, 2.253712124634409, 1.229298118868567, 1.0855414361020315, 0.8733662730300492], "rank_score": 1.6056607768729958} -{"id": "wang-etal-2007-jeopardy", "title": "What is the Jeopardy Model? A Quasi-Synchronous Grammar for QA", "abstract": "This paper presents a syntax-driven approach to question answering, specifically the answer-sentence selection problem for short-answer questions. Rather than using syntactic features to augment existing statistical classifiers (as in previous work), we build on the idea that questions and their (correct) answers relate to each other via loose but predictable syntactic transformations. We propose a probabilistic quasi-synchronous grammar, inspired by one proposed for machine translation (D. Smith and Eisner, 2006), and parameterized by mixtures of a robust nonlexical syntax/alignment model with a(n optional) lexical-semantics-driven log-linear model. Our model learns soft alignments as a hidden variable in discriminative training. Experimentalresultsusing theTRECdataset are shown to significantly outperform strong state-of-the-art baselines.", "phrases": ["quasi-synchronous grammar", "syntax-driven approach", "dependency tree"], "overall_score": 5.405027773329064, "scores": [3.73874883737797, 0.5561834411587855, 0.5205291892643455], "rank_score": 1.605153822600367} -{"id": "wang-etal-2021-kepler", "title": "KEPLER: A Unified Model for Knowledge Embedding and Pre-trained Language Representation", "abstract": "Pre-trained language representation models (PLMs) cannot well capture factual knowledge from text. In contrast, knowledge embedding (KE) methods can effectively represent the relational facts in knowledge graphs (KGs) with informative entity embeddings, but conventional KE models cannot take full advantage of the abundant textual information. In this paper, we propose a unified model for Knowledge Embedding and Pre-trained LanguagERepresentation (KEPLER), which can not only better integrate factual knowledge into PLMs but also produce effective text-enhanced KE with the strong PLMs. In KEPLER, we encode textual entity descriptions with a PLM as their embeddings, and then jointly optimize the KE and language modeling objectives. Experimental results show that KEPLER achieves state-of-the-art performances on various NLP tasks, and also works remarkably well as an inductive KE model on KG link prediction. Furthermore, for pre-training and evaluating KEPLER, we construct Wikidata5M1 , a large-scale KG dataset with aligned entity descriptions, and benchmark state-of-the-art KE methods on it. It shall serve as a new KE benchmark and facilitate the research on large KG, inductive KE, and KG with text. The source code can be obtained from .", "phrases": ["unified model", "knowledge embedding", "language modeling objective"], "overall_score": 5.094727564025294, "scores": [2.53606154104919, 1.7256343802523917, 0.5475941373260619], "rank_score": 1.6030966862092146} -{"id": "irsoy-cardie-2014-opinion", "title": "Opinion Mining with Deep Recurrent Neural Networks", "abstract": "Recurrent neural networks (RNNs) are connectionist models of sequential data that are naturally applicable to the analysis of natural language. Recently, \u201cdepth in space\u201d \u2014 as an orthogonal notion to \u201cdepth in time\u201d \u2014 in RNNs has been investigated by stacking multiple layers of RNNs and shown empirically to bring a temporal hierarchy to the architecture. In this work we apply these deep RNNs to the task of opinion expression extraction formulated as a token-level sequence-labeling task. Experimental results show that deep, narrow RNNs outperform traditional shallow, wide RNNs with the same number of parameters. Furthermore, our approach outperforms previous CRF-based baselines, including the state-of-the-art semi-Markov CRF model, and does so without access to the powerful opinion lexicons and syntactic features relied upon by the semi-CRF, as well as without the standard layer-by-layer pre-training typically required of RNN architectures.", "phrases": ["recurrent neural network", "rnn", "opinion mining"], "overall_score": 4.953953359177275, "scores": [2.2100610022500664, 1.3742814702767558, 1.2236989250880508], "rank_score": 1.6026804658716245} -{"id": "barzilay-lee-2004-catching", "title": "Catching the Drift: Probabilistic Content Models, with Applications to Generation and Summarization", "abstract": "We consider the problem of modeling the content structure of texts within a specic domain, in terms of the topics the texts address and the order in which these topics appear. We rst present an effective knowledge-lean method for learning content models from unannotated documents, utilizing a novel adaptation of algorithms for Hidden Markov Models. We then apply our method to two complementary tasks: information ordering and extractive summarization. Our experiments show that incorporating content models in these applications yields substantial improvement over previously-proposed methods.", "phrases": ["content model", "summarization", "hidden markov models", "hmm", "re-occurrence"], "overall_score": 6.16721872437404, "scores": [3.053649822977089, 2.1155833596533906, 1.6688484400189671, 0.5972771158691539, 0.5737091757022001], "rank_score": 1.6018135828441604} -{"id": "straka-strakova-2017-tokenizing", "title": "Tokenizing, POS Tagging, Lemmatizing and Parsing UD 2.0 with UDPipe", "abstract": "Many natural language processing tasks, including the most advanced ones, routinely start by several basic processing steps \u2013 tokenization and segmentation, most likely also POS tagging and lemmatization, and commonly parsing as well. A multilingual pipeline performing these steps can be trained using the Universal Dependencies project, which contains annotations of the described tasks for 50 languages in the latest release UD 2.0. We present an update to UDPipe, a simple-to-use pipeline processing CoNLL-U version 2.0 files, which performs these tasks for multiple languages without requiring additional external data. We provide models for all 50 languages of UD 2.0, and furthermore, the pipeline can be trained easily using data in CoNLL-U format. UDPipe is a standalone application in C++, with bindings available for Python, Java, C# and Perl. In the CoNLL 2017 Shared Task: Multilingual Parsing from Raw Text to Universal Dependencies, UDPipe was the eight best system, while achieving low running times and moderately sized models.", "phrases": ["pos tagging", "udpipe", "tokenization"], "overall_score": 4.6298219926487025, "scores": [2.9817686820732616, 0.8945838915005555, 0.9290728669431952], "rank_score": 1.6018084801723376} -{"id": "gong-etal-2018-information", "title": "Information Aggregation via Dynamic Routing for Sequence Encoding", "abstract": "While much progress has been made in how to encode a text sequence into a sequence of vectors, less attention has been paid to how to aggregate these preceding vectors (outputs of RNN/CNN) into fixed-size encoding vector. Usually, a simple max or average pooling is used, which is a bottom-up and passive way of aggregation and lack of guidance by task information. In this paper, we propose an aggregation mechanism to obtain a fixed-size encoding with a dynamic routing policy. The dynamic routing policy is dynamically deciding that what and how much information need be transferred from each word to the final encoding of the text sequence. Following the work of Capsule Network, we design two dynamic routing policies to aggregate the outputs of RNN/CNN encoding layer into a final encoding vector. Compared to the other aggregation methods, dynamic routing can refine the messages according to the state of final encoding vector. Experimental results on five text classification tasks show that our method outperforms other aggregating models by a significant margin. Related source code is released on our github page. Related source code is released on our github page.", "phrases": ["dynamic routing", "sequence encoding", "aggregation mechanism"], "overall_score": 3.6836280669194967, "scores": [2.9965677946651392, 1.2808550986169553, 0.5219151352591438], "rank_score": 1.5997793428470797} -{"id": "mostafazadeh-etal-2016-corpus", "title": "A Corpus and Cloze Evaluation for Deeper Understanding of Commonsense Stories", "abstract": "Representation and learning of commonsense knowledge is one of the foundational problems in the quest to enable deep language understanding. This issue is particularly challenging for understanding casual and correlational relationships between events. While this topic has received a lot of interest in the NLP community, research has been hindered by the lack of a proper evaluation framework. This paper attempts to address this problem with a new framework for evaluating story understanding and script learning: the `Story Cloze Test\u2019. This test requires a system to choose the correct ending to a four-sentence story. We created a new corpus of 50k five-sentence commonsense stories, ROCStories, to enable this evaluation. This corpus is unique in two ways: (1) it captures a rich set of causal and temporal commonsense relations between daily events, and (2) it is a high quality collection of everyday life stories that can also be used for story generation. Experimental evaluation shows that a host of baselines and state-of-the-art models based on shallow language understanding struggle to achieve a high score on the Story Cloze Test. We discuss these implications for script and story learning, and offer suggestions for deeper language understanding.", "phrases": ["story", "commonsense knowledge", "language understanding", "rocstories corpus", "causal relation"], "overall_score": 7.142820388088937, "scores": [2.9445283116361125, 1.6605921030834032, 1.3171866678379789, 1.24623916060398, 0.8285063140245995], "rank_score": 1.5994105114372148} -{"id": "gao-etal-2005-chinese", "title": "Chinese Word Segmentation and Named Entity Recognition: A Pragmatic Approach", "abstract": "This article presents a pragmatic approach to Chinese word segmentation. It differs from most previous approaches mainly in three respects. First, while theoretical linguists have defined Chinese words using various linguistic criteria, Chinese words in this study are defined pragmatically as segmentation units whose definition depends on how they are used and processed in realistic computer applications. Second, we propose a pragmatic mathematical framework in which segmenting known words and detecting unknown words of different types (i.e., morphologically derived words, factoids, named entities, and other unlisted words) can be performed simultaneously in a unified way. These tasks are usually conducted separately in other systems. Finally, we do not assume the existence of a universal word segmentation standard that is application-independent. Instead, we argue for the necessity of multiple segmentation standards due to the pragmatic fact that different natural language processing applications might require different granularities of Chinese words. These pragmatic approaches have been implemented in an adaptive Chinese word segmenter, called MSRSeg, which will be described in detail. It consists of two components: (1) a generic segmenter that is based on the framework of linear mixture models and provides a unified approach to the five fundamental features of word-level Chinese language processing: lexicon word processing, morphological analysis, factoid detection, named entity recognition, and new word identification; and (2) a set of output adaptors for adapting the output of (1) to different application-specific standards. Evaluation on five test sets with different standards shows that the adaptive system achieves state-of-the-art performance on all the test sets.", "phrases": ["word segmentation", "entity recognition", "pragmatic approach", "different type"], "overall_score": 4.620923311524168, "scores": [3.1924233689234827, 1.7753775579159814, 0.8967475264726709, 0.530370537852699], "rank_score": 1.5987297477912086} -{"id": "yadav-bethard-2018-survey", "title": "A Survey on Recent Advances in Named Entity Recognition from Deep Learning models", "abstract": "Named Entity Recognition (NER) is a key component in NLP systems for question answering, information retrieval, relation extraction, etc. NER systems have been studied and developed widely for decades, but accurate systems using deep neural networks (NN) have only been introduced in the last few years. We present a comprehensive survey of deep neural network architectures for NER, and contrast them with previous approaches to NER based on feature engineering and other supervised or semi-supervised learning algorithms. Our results highlight the improvements achieved by neural networks, and show how incorporating some of the lessons learned from past work on feature-based NER systems can yield further improvements.", "phrases": ["named entity recognition", "deep learning model", "question answering"], "overall_score": 5.382929566017356, "scores": [2.222265309607007, 2.04841199138801, 0.5250963742026679], "rank_score": 1.5985912250658947} -{"id": "lu-nguyen-2018-similar", "title": "Similar but not the Same: Word Sense Disambiguation Improves Event Detection via Neural Representation Matching", "abstract": "Event detection (ED) and word sense disambiguation (WSD) are two similar tasks in that they both involve identifying the classes (i.e. event types or word senses) of some word in a given sentence. It is thus possible to extract the knowledge hidden in the data for WSD, and utilize it to improve the performance on ED. In this work, we propose a method to transfer the knowledge learned on WSD to ED by matching the neural representations learned for the two tasks. Our experiments on two widely used datasets for ED demonstrate the effectiveness of the proposed method.", "phrases": ["word sense disambiguation", "event detection", "neural representation matching"], "overall_score": 2.2157259688560527, "scores": [2.2914801238902593, 1.663772113777347, 0.839673063188675], "rank_score": 1.5983084336187605} -{"id": "mayer-cysouw-2014-creating", "title": "Creating a massively parallel Bible corpus", "abstract": "We present our ongoing effort to create a massively parallel Bible corpus. While an ever-increasing number of Bible translations is available in electronic form on the internet, there is no large-scale parallel Bible corpus that allows language researchers to easily get access to the texts and their parallel structure for a large variety of different languages. We report on the current status of the corpus, with over 900 translations in more than 830 language varieties. All translations are tokenized (e.g., separating punctuation marks) and Unicode normalized. Mainly due to copyright restrictions only portions of the texts are made publicly available. However, we provide co-occurrence information for each translation in a (sparse) matrix format. All word forms in the translation are given together with their frequency and the verses in which they occur.", "phrases": ["parallel bible corpus", "word form", "pbc"], "overall_score": 4.097576719842877, "scores": [3.699154824482656, 0.5523174095465742, 0.5411097808041643], "rank_score": 1.5975273382777981} -{"id": "nn-2012-aida", "title": "AIDA: Automatic Identification and Glossing of Dialectal Arabic", "abstract": "Description AIDA is a system for dialect identification, classification and glossing on the token and sentence level for written Arabic. Automatic dialect identification in Arabic is quite challenging because of the diglossic nature of the language and informality associated with the typical genres where dialectal Arabic (DA) is used. Moreover, DA lacks a standard orthography. Additionally the abundance of faux amis between the different varieties of Arabic, namely between Modern Standard Arabic (MSA) and DA, exacerbates the challenge of identifying dialectal variants. Hence identifying whether a (sequence of) token(s) is MSA or DA and providing an MSA-Gloss for the dialectal tokens in an utterance can aid Arabic MT in handling such informal genres more accurately.", "phrases": ["glossing", "dialectal arabic", "sentence level"], "overall_score": 2.213782022190435, "scores": [2.241437500087383, 2.024845184130963, 0.5244358333165762], "rank_score": 1.596906172511641} -{"id": "ling-etal-2015-finding", "title": "Finding Function in Form: Compositional Character Models for Open Vocabulary Word Representation", "abstract": "We introduce a model for constructing vector representations of words by composing characters using bidirectional LSTMs. Relative to traditional word representation models that have independent vectors for each word type, our model requires only a single vector per character type and a fixed set of parameters for the compositional model. Despite the compactness of this model and, more importantly, the arbitrary nature of the form\u2010function relationship in language, our \u201ccomposed\u201d word representations yield state-of-the-art results in language modeling and part-of-speech tagging. Benefits over traditional baselines are particularly pronounced in morphologically rich languages (e.g., Turkish).", "phrases": ["character", "bidirectional lstm", "language modeling", "pos tagging", "bilstm"], "overall_score": 6.483933947825744, "scores": [2.3493870509214405, 1.599106729231373, 1.5090703695011434, 1.4780378110694796, 1.0486671584153542], "rank_score": 1.596853823827758} -{"id": "hulden-2009-foma", "title": "Foma: a Finite-State Compiler and Library", "abstract": "Foma is a compiler, programming language, and C library for constructing finite-state automata and transducers for various uses. It has specific support for many natural language processing applications such as producing morphological and phonological analyzers. Foma is largely compatible with the Xerox/PARC finite-state toolkit. It also embraces Unicode fully and supports various different formats for specifying regular expressions: the Xerox/PARC format, a Perl-like format, and a mathematical format that takes advantage of the 'Mathematical Operators' Unicode block.", "phrases": ["automata", "transducer", "foma"], "overall_score": 3.6763881105268315, "scores": [3.0247675918096517, 0.9100685662047855, 0.855069051195138], "rank_score": 1.596635069736525} -{"id": "maccartney-manning-2009-extended", "title": "An extended model of natural logic", "abstract": "We propose a model of natural language inference which identifies valid inferences by their lexical and syntactic features, without full semantic interpretation. We extend past work in natural logic, which has focused on semantic containment and monotonicity, by incorporating both semantic exclusion and implicativity. Our model decomposes an inference problem into a sequence of atomic edits linking premise to hypothesis; predicts a lexical semantic relation for each edit; propagates these relations upward through a semantic composition tree according to properties of intermediate nodes; and joins the resulting semantic relations across the edit sequence. A computational implementation of the model achieves 70% accuracy and 89% precision on the FraCaS test suite. Moreover, including this model as a component in an existing system yields significant performance gains on the Recognizing Textual Entailment challenge.", "phrases": ["natural logic", "containment", "exclusion", "table", "deduction"], "overall_score": 4.781147528808894, "scores": [4.476534845904693, 1.1451073672284915, 0.8998271850760331, 0.883569150510417, 0.5748927205335654], "rank_score": 1.59598625385064} -{"id": "fomicheva-etal-2021-eval4nlp", "title": "The Eval4NLP Shared Task on Explainable Quality Estimation: Overview and Results", "abstract": "In this paper, we introduce the Eval4NLP-2021 shared task on explainable quality estimation. Given a source-translation pair, this shared task requires not only to provide a sentence-level score indicating the overall quality of the translation, but also to explain this score by identifying the words that negatively impact translation quality. We present the data, annotation guidelines and evaluation setup of the shared task, describe the six participating systems, and analyze the results. To the best of our knowledge, this is the first shared task on explainable NLP evaluation metrics. Datasets and results are available at .", "phrases": ["eval4nlp", "explainable quality estimation", "sentence-level score"], "overall_score": 3.1056395145674514, "scores": [2.8711745605088104, 1.330620376588929, 0.586154058504892], "rank_score": 1.5959829985342104} -{"id": "mirkin-etal-2015-motivating", "title": "Motivating Personality-aware Machine Translation", "abstract": "Language use is known to be influenced by personality traits as well as by sociodemographic characteristics such as age or mother tongue. As a result, it is possible to automatically identify these traits of the author from her texts. It has recently been shown that knowledge of such dimensions can improve performance in NLP tasks such as topic and sentiment modeling. We posit that machine translation is another application that should be personalized. In order to motivate this, we explore whether translation preserves demographic and psychometric traits. We show that, largely, both translation of the source training data into the target language, and the target test data into the source language has a detrimental effect on the accuracy of predicting author traits. We argue that this supports the need for personal and personality-aware machine translation models.", "phrases": ["machine translation", "personalization", "trait"], "overall_score": 3.9656699263607904, "scores": [2.998830613456648, 0.8991585443385239, 0.8897197809278843], "rank_score": 1.595902979574352} -{"id": "kudo-richardson-2018-sentencepiece", "title": "SentencePiece: A simple and language independent subword tokenizer and detokenizer for Neural Text Processing", "abstract": "This paper describes SentencePiece, a language-independent subword tokenizer and detokenizer designed for Neural-based text processing, including Neural Machine Translation. It provides open-source C++ and Python implementations for subword units. While existing subword segmentation tools assume that the input is pre-tokenized into word sequences, SentencePiece can train subword models directly from raw sentences, which allows us to make a purely end-to-end and language independent system. We perform a validation experiment of NMT on English-Japanese machine translation, and find that it is possible to achieve comparable accuracy to direct subword training from raw sentences. We also compare the performance of subword training and segmentation with various configurations. SentencePiece is available under the Apache 2 license at .", "phrases": ["tokenization", "neural text processing", "sentencepiece", "vocabulary size"], "overall_score": 5.673759180165419, "scores": [4.3095480561237665, 0.9323159675452186, 0.5975618987267864, 0.5439256730466808], "rank_score": 1.595837898860613} -{"id": "kumar-byrne-2004-minimum", "title": "Minimum Bayes-Risk Decoding for Statistical Machine Translation", "abstract": "Abstract : We present Minimum Bayes-Risk (MBR) decoding for statistical machine translation. This statistical approach aims to minimize expected loss of translation errors under loss functions that measure translation performance. We describe a hierarchy of loss functions that incorporate different levels of linguistic information from word strings, word-to-word alignments from an MT system, and syntactic structure from parse-trees of source and target language sentences. We report the performance of the MBR decoders on a Chinese-to-English translation task. Our results show that MBR decoding can be used to tune statistical MT performance for specific loss functions.", "phrases": ["statistical machine translation", "mbr", "minimum bayes-risk", "hypothesis", "system combination"], "overall_score": 5.427702685675257, "scores": [2.7215782876431014, 0.9610555073923684, 2.15738619253449, 1.2589009580119752, 0.880184758396283], "rank_score": 1.5958211407956437} -{"id": "habernal-gurevych-2016-argument", "title": "Which argument is more convincing? Analyzing and predicting convincingness of Web arguments using bidirectional LSTM", "abstract": "We propose a new task in the field of computational argumentation in which we investigate qualitative properties of Web arguments, namely their convincingness. We cast the problem as relation classification, where a pair of arguments having the same stance to the same prompt is judged. We annotate a large datasets of 16k pairs of arguments over 32 topics and investigate whether the relation \u201cA is more convincing than B\u201d exhibits properties of total ordering; these findings are used as global constraints for cleaning the crowdsourced data. We propose two tasks: (1) predicting which argument from an argument pair is more convincing and (2) ranking all arguments to the topic based on their convincingness. We experiment with feature-rich SVM and bidirectional LSTM and obtain 0.76-0.78 accuracy and 0.35-0.40 Spearman\u2019s correlation in a cross-topic evaluation. We release the newly created corpus UKPConvArg1 and the experimental software under open licenses.", "phrases": ["convincingness", "bidirectional lstm", "annotator", "argument pair", "social medium argument"], "overall_score": 5.530155286646112, "scores": [3.5197232706706045, 2.2495370170671785, 1.0592664681680959, 0.5903330909773129, 0.5594677605070381], "rank_score": 1.595665521478046} -{"id": "koehn-knight-2003-empirical", "title": "Empirical Methods for Compound Splitting", "abstract": "Compounded words are a challenge for NLP applications such as machine translation (MT). We introduce methods to learn splitting rules from monolingual and parallel corpora. We evaluate them against a gold standard and measure their impact on performance of statistical MT systems. Results show accuracy of 99.1% and performance gains for MT of 0.039 BLEU on a German-English noun phrase translation task.", "phrases": ["compound splitting", "parallel corpora", "german", "segmentation", "empirical method"], "overall_score": 6.209554279390643, "scores": [3.495494185133936, 1.7742603733673825, 1.1916391426814614, 0.9010802561908088, 0.6152251702110192], "rank_score": 1.5955398255169215} -{"id": "agirre-soroa-2009-personalizing", "title": "Personalizing PageRank for Word Sense Disambiguation", "abstract": "In this paper we propose a new graph-based method that uses the knowledge in a LKB (based on WordNet) in order to perform unsupervised Word Sense Disambiguation. Our algorithm uses the full graph of the LKB efficiently, performing better than previous approaches in English all-words datasets. We also show that the algorithm can be easily ported to other languages with good results, with the only requirement of having a wordnet. In addition, we make an analysis of the performance of the algorithm, showing that it is efficient and that it could be tuned to be faster.", "phrases": ["pagerank", "word sense disambiguation", "wsd", "knowledge base", "node"], "overall_score": 5.528404292985817, "scores": [2.8087364257824166, 1.463483206979228, 1.5776583827063146, 1.2666127687069426, 0.8593106733449916], "rank_score": 1.5951602915039789} -{"id": "johnson-2010-pcfgs", "title": "PCFGs, Topic Models, Adaptor Grammars and Learning Topical Collocations and the Structure of Proper Names", "abstract": "This paper establishes a connection between two apparently very different kinds of probabilistic models. Latent Dirichlet Allocation (LDA) models are used as \"topic models\" to produce a low-dimensional representation of documents, while Probabilistic Context-Free Grammars (PCFGs) define distributions over trees. The paper begins by showing that LDA topic models can be viewed as a special kind of PCFG, so Bayesian inference for PCFGs can be used to infer Topic Models as well. Adaptor Grammars (AGs) are a hierarchical, non-parameteric Bayesian extension of PCFGs. Exploiting the close relationship between LDA and PCFGs just described, we propose two novel probabilistic models that combine insights from LDA and AG models. The first replaces the unigram component of LDA topic models with multi-word sequences or collocations generated by an AG. The second extension builds on the first one to learn aspects of the internal structure of proper names.", "phrases": ["topic models", "adaptor grammars", "collocation", "proper name"], "overall_score": 3.315119803279558, "scores": [2.3571385820562742, 2.292352031249534, 0.8742010532101286, 0.8532508670096832], "rank_score": 1.594235633381405} -{"id": "xu-etal-2012-paraphrasing", "title": "Paraphrasing for Style", "abstract": "We present initial investigation into the task of paraphrasing language while targeting a particular writing style. The plays of William Shakespeare and their modern translations are used as a testbed for evaluating paraphrase systems targeting a specific style of writing. We show that even with a relatively small amount of parallel training data, it is possible to learn paraphrase models which capture stylistic phenomena, and these models outperform baselines based on dictionaries and out-of-domain parallel text. In addition we present an initial investigation into automatic evaluation metrics for paraphrasing writing style. To the best of our knowledge this is the first work to investigate the task of paraphrasing text with the goal of targeting a specific style of writing.", "phrases": ["style", "paraphrasing", "parallel corpus", "shakespearean english", "fluency"], "overall_score": 5.193833828724078, "scores": [4.129929742591027, 1.5644032338671778, 0.8879054602037469, 0.828937450114675, 0.5594808572959354], "rank_score": 1.5941313488145124} -{"id": "yasunaga-etal-2021-qa", "title": "QA-GNN: Reasoning with Language Models and Knowledge Graphs for Question Answering", "abstract": "The problem of answering questions using knowledge from pre-trained language models (LMs) and knowledge graphs (KGs) presents two challenges: given a QA context (question and answer choice), methods need to (i) identify relevant knowledge from large KGs, and (ii) perform joint reasoning over the QA context and KG. Here we propose a new model, QA-GNN, which addresses the above challenges through two key innovations: (i) relevance scoring, where we use LMs to estimate the importance of KG nodes relative to the given QA context, and (ii) joint reasoning, where we connect the QA context and KG to form a joint graph, and mutually update their representations through graph-based message passing. We evaluate QA-GNN on the CommonsenseQA and OpenBookQA datasets, and show its improvement over existing LM and LM+KG models, as well as its capability to perform interpretable and structured reasoning, e.g., correctly handling negation in questions.", "phrases": ["reasoning", "knowledge graph", "relevance scoring", "qa-gnn"], "overall_score": 4.202764109459438, "scores": [3.388707366131224, 1.357865006321812, 1.0942305151561655, 0.5292961053856173], "rank_score": 1.5925247482487046} -{"id": "di-fabio-etal-2019-verbatlas", "title": "VerbAtlas: a Novel Large-Scale Verbal Semantic Resource and Its Application to Semantic Role Labeling", "abstract": "We present VerbAtlas, a new, hand-crafted lexical-semantic resource whose goal is to bring together all verbal synsets from WordNet into semantically-coherent frames. The frames define a common, prototypical argument structure while at the same time providing new concept-specific information. In contrast to PropBank, which defines enumerative semantic roles, VerbAtlas comes with an explicit, cross-frame set of semantic roles linked to selectional preferences expressed in terms of WordNet synsets, and is the first resource enriched with semantic information about implicit, shadow, and default arguments. We demonstrate the effectiveness of VerbAtlas in the task of dependency-based Semantic Role Labeling and show how its integration into a high-performance system leads to improvements on both the in-domain and out-of-domain test sets of CoNLL-2009. VerbAtlas is available at .", "phrases": ["prototypical argument structure", "concept-specific information", "verbatlas"], "overall_score": 3.0978935823095712, "scores": [3.305646450820034, 0.9158820932517017, 0.5544785862885968], "rank_score": 1.5920023767867775} -{"id": "ferguson-etal-2015-disfluency", "title": "Disfluency Detection with a Semi-Markov Model and Prosodic Features", "abstract": "We present a discriminative model for detecting disfluencies in spoken language transcripts. Structurally, our model is a semiMarkov conditional random field with features targeting characteristics unique to speech repairs. This gives a significant performance improvement over standard chain-structured CRFs that have been employed in past work. We then incorporate prosodic features over silences and relative word duration into our semi-CRF model, resulting in further performance gains; moreover, these features are not easily replaced by discrete prosodic indicators such as ToBI breaks. Our final system, the semi-CRF with prosodic information, achieves an F-score of 85.4, which is 1.3 F1 better than the best prior reported F-score on this dataset.", "phrases": ["prosodic feature", "semi-crf", "disfluency detection"], "overall_score": 3.310090815223798, "scores": [2.3914111846942294, 1.8603549865622222, 0.5236854327590277], "rank_score": 1.5918172013384932} -{"id": "maas-etal-2011-learning", "title": "Learning Word Vectors for Sentiment Analysis", "abstract": "Unsupervised vector-based approaches to semantics can model rich lexical meanings, but they largely fail to capture sentiment information that is central to many word meanings and important for a wide range of NLP tasks. We present a model that uses a mix of unsupervised and supervised techniques to learn word vectors capturing semantic term--document information as well as rich sentiment content. The proposed model can leverage both continuous and multi-dimensional sentiment information as well as non-sentiment annotations. We instantiate the model to utilize the document-level sentiment polarity annotations present in many online documents (e.g. star ratings). We evaluate the model using small, widely used sentiment and subjectivity corpora and find it out-performs several previously introduced methods for sentiment classification. We also introduce a large dataset of movie reviews to serve as a more robust benchmark for work in this area.", "phrases": ["word vector", "movie review", "probabilistic model", "document model", "product"], "overall_score": 6.490659401782288, "scores": [4.175560030037639, 1.61693815074537, 1.0966132218100737, 0.5423356319819994, 0.5275962496051971], "rank_score": 1.5918086568360559} -{"id": "li-etal-2017-dailydialog", "title": "DailyDialog: A Manually Labelled Multi-turn Dialogue Dataset", "abstract": "We develop a high-quality multi-turn dialog dataset, DailyDialog, which is intriguing in several aspects. The language is human-written and less noisy. The dialogues in the dataset reflect our daily communication way and cover various topics about our daily life. We also manually label the developed dataset with communication intention and emotion information. Then, we evaluate existing approaches on DailyDialog dataset and hope it benefit the research field of dialog systems. The dataset is available on ", "phrases": ["multi-turn dialog dataset", "dialog system", "dailydialog", "conversation", "learner"], "overall_score": 6.128142465122067, "scores": [4.295388956436932, 1.6311288740992929, 0.8615289549651913, 0.6388395482035901, 0.5314351373994098], "rank_score": 1.5916642942208834} -{"id": "pagnoni-etal-2021-understanding", "title": "Understanding Factuality in Abstractive Summarization with FRANK: A Benchmark for Factuality Metrics", "abstract": "Modern summarization models generate highly fluent but often factually unreliable outputs. This motivated a surge of metrics attempting to measure the factuality of automatically generated summaries. Due to the lack of common benchmarks, these metrics cannot be compared. Moreover, all these methods treat factuality as a binary concept and fail to provide deeper insights on the kinds of inconsistencies made by different systems. To address these limitations, we devise a typology of factual errors and use it to collect human annotations of generated summaries from state-of-the-art summarization systems for the CNN/DM and XSum datasets. Through these annotations we identify the proportion of different categories of factual errors and benchmark factuality metrics, showing their correlation with human judgement as well as their specific strengths and weaknesses.", "phrases": ["summarization", "factuality metric", "consistency"], "overall_score": 4.686262711464878, "scores": [2.8500016638536585, 1.4035550144926359, 0.5211349467369423], "rank_score": 1.5915638750277454} -{"id": "wu-etal-2021-applying", "title": "Applying the Transformer to Character-level Transduction", "abstract": "The transformer has been shown to outperform recurrent neural network-based sequence-to-sequence models in various word-level NLP tasks. Yet for character-level transduction tasks, e.g. morphological inflection generation and historical text normalization, there are few works that outperform recurrent models using the transformer. In an empirical study, we uncover that, in contrast to recurrent sequence-to-sequence models, the batch size plays a crucial role in the performance of the transformer on character-level tasks, and we show that with a large enough batch size, the transformer does indeed outperform recurrent models. We also introduce a simple technique to handle feature-guided character-level transduction that further improves performance. With these insights, we achieve state-of-the-art performance on morphological inflection and historical text normalization. We also show that the transformer outperforms a strong baseline on two other character-level transduction tasks: grapheme-to-phoneme conversion and transliteration.", "phrases": ["transformer", "character-level transduction", "character-level task"], "overall_score": 3.496935742479824, "scores": [2.18313671050545, 2.0281910725967123, 0.5632443455957074], "rank_score": 1.59152404289929} -{"id": "malmasi-zampieri-2017-detecting", "title": "Detecting Hate Speech in Social Media", "abstract": "In this paper we examine methods to detect hate speech in social media, while distinguishing this from general profanity. We aim to establish lexical baselines for this task by applying supervised classification methods using a recently released dataset annotated for this purpose. As features, our system uses character n-grams, word n-grams and word skip-grams. We obtain results of 78% accuracy in identifying posts across three classes. Results demonstrate that the main challenge lies in discriminating profanity and hate speech from each other. A number of directions for future work are discussed.", "phrases": ["hate speech", "social media", "abusive language workshop", "cyberbullying", "english tweet"], "overall_score": 5.870801583786462, "scores": [5.344968598433076, 0.8934367863152705, 0.5904937642013566, 0.570207416606418, 0.5583255717819384], "rank_score": 1.5914864274676117} -{"id": "gliwa-etal-2019-samsum", "title": "SAMSum Corpus: A Human-annotated Dialogue Dataset for Abstractive Summarization", "abstract": "This paper introduces the SAMSum Corpus, a new dataset with abstractive dialogue summaries. We investigate the challenges it poses for automated summarization by testing several models and comparing their results with those obtained on a corpus of news articles. We show that model-generated summaries of dialogues achieve higher ROUGE scores than the model-generated summaries of news \u2013 in contrast with human evaluators' judgement. This suggests that a challenging task of abstractive dialogue summarization requires dedicated models and non-standard quality measures. To our knowledge, our study is the first attempt to introduce a high-quality chat-dialogues corpus, manually annotated with abstractive summarizations, which can be used by the research community for further studies.", "phrases": ["abstractive summarization", "samsum corpus", "conversation", "dialogue summarization dataset"], "overall_score": 5.405724929500937, "scores": [2.2501740911517127, 0.9087582941039462, 1.6668340657237208, 1.531671031063105], "rank_score": 1.5893593705106213} -{"id": "xu-etal-2020-improving", "title": "Improving AMR Parsing with Sequence-to-Sequence Pre-training", "abstract": "In the literature, the research on abstract meaning representation (AMR) parsing is much restricted by the size of human-curated dataset which is critical to build an AMR parser with good performance. To alleviate such data size restriction, pre-trained models have been drawing more and more attention in AMR parsing. However, previous pre-trained models, like BERT, are implemented for general purpose which may not work as expected for the specific task of AMR parsing. In this paper, we focus on sequence-to-sequence (seq2seq) AMR parsing and propose a seq2seq pre-training approach to build pre-trained models in both single and joint way on three relevant tasks, i.e., machine translation, syntactic parsing, and AMR parsing itself. Moreover, we extend the vanilla fine-tuning method to a multi-task learning fine-tuning method that optimizes for the performance of AMR parsing while endeavors to preserve the response of pre-trained models. Extensive experimental results on two English benchmark datasets show that both the single and joint pre-trained models significantly improve the performance (e.g., from 71.5 to 80.2 on AMR 2.0), which reaches the state of the art. The result is very encouraging since we achieve this with seq2seq models rather than complex models. We make our code and model available at .", "phrases": ["amr", "sequence-to-sequence", "machine translation"], "overall_score": 3.0923536716022983, "scores": [2.44077033791178, 1.7891288609104892, 0.5375670787500312], "rank_score": 1.5891554258574336} -{"id": "potthast-etal-2018-stylometric", "title": "A Stylometric Inquiry into Hyperpartisan and Fake News", "abstract": "We report on a comparative style analysis of hyperpartisan (extremely one-sided) news and fake news. A corpus of 1,627 articles from 9 political publishers, three each from the mainstream, the hyperpartisan left, and the hyperpartisan right, have been fact-checked by professional journalists at BuzzFeed: 97% of the 299 fake news articles identified are also hyperpartisan. We show how a style analysis can distinguish hyperpartisan news from the mainstream (F1 = 0.78), and satire from both (F1 = 0.81). But stylometry is no silver bullet as style-based fake news detection does not work (F1 = 0.46). We further reveal that left-wing and right-wing news share significantly more stylistic similarities than either does with the mainstream. This result is robust: it has been confirmed by three different modeling approaches, one of which employs Unmasking in a novel way. Applications of our results include partisanship detection and pre-screening for semi-automatic fake news detection.", "phrases": ["fake news", "professional journalist", "hyperpartisan news", "propaganda detection", "story"], "overall_score": 6.013618514856718, "scores": [4.307580093918468, 1.7379269476158958, 0.8268011524532806, 0.5429016859399893, 0.5305051827581139], "rank_score": 1.5891430125371495} -{"id": "yang-etal-2007-building", "title": "Building Emotion Lexicon from Weblog Corpora", "abstract": "An emotion lexicon is an indispensable resource for emotion analysis. This paper aims to mine the relationships between words and emotions using weblog corpora. A collocation model is proposed to learn emotion lexicons from weblog articles. Emotion classification at sentence level is experimented by using the mined lexicons to demonstrate their usefulness.", "phrases": ["emotion lexicon", "weblog corpora", "collocation model"], "overall_score": 3.8100339883829033, "scores": [3.009839228996682, 0.9097051051801636, 0.8471784225720991], "rank_score": 1.5889075855829813} -{"id": "bapna-firat-2019-simple", "title": "Simple, Scalable Adaptation for Neural Machine Translation", "abstract": "Fine-tuning pre-trained Neural Machine Translation (NMT) models is the dominant approach for adapting to new languages and domains. However, fine-tuning requires adapting and maintaining a separate model for each target task. We propose a simple yet efficient approach for adaptation in NMT. Our proposed approach consists of injecting tiny task specific adapter layers into a pre-trained model. These lightweight adapters, with just a small fraction of the original model size, adapt the model to multiple individual tasks simultaneously. We evaluate our approach on two tasks: (i) Domain Adaptation and (ii) Massively Multilingual NMT. Experiments on domain adaptation demonstrate that our proposed approach is on par with full fine-tuning on various domains, dataset sizes and model capacities. On a massively multilingual dataset of 103 languages, our adaptation approach bridges the gap between individual bilingual models and one massively multilingual model for most language pairs, paving the way towards universal machine translation.", "phrases": ["adapter", "neural machine translation", "pre-trained model", "multilingual nmt"], "overall_score": 5.816854554778784, "scores": [2.060510694542309, 1.9760519359570796, 1.4206593233643268, 0.8938157702307864], "rank_score": 1.5877594310236256} -{"id": "yang-etal-2018-hotpotqa", "title": "HotpotQA: A Dataset for Diverse, Explainable Multi-hop Question Answering", "abstract": "Existing question answering (QA) datasets fail to train QA systems to perform complex reasoning and provide explanations for answers. We introduce HotpotQA, a new dataset with 113k Wikipedia-based question-answer pairs with four key features: (1) the questions require finding and reasoning over multiple supporting documents to answer; (2) the questions are diverse and not constrained to any pre-existing knowledge bases or knowledge schemas; (3) we provide sentence-level supporting facts required for reasoning, allowing QA systems to reason with strong supervision and explain the predictions; (4) we offer a new type of factoid comparison questions to test QA systems' ability to extract relevant facts and perform necessary comparison. We show that HotpotQA is challenging for the latest QA systems, and the supporting facts enable models to improve performance and make explainable predictions.", "phrases": ["multi-hop question", "wikipedia-based question-answer pair", "hotpotqa", "complex question", "wikipedia page"], "overall_score": 7.477407715504676, "scores": [2.7812218027126523, 2.0561864573875717, 1.698661369798684, 0.8362295696454768, 0.5662927574846415], "rank_score": 1.587718391405805} -{"id": "ku-etal-2020-room", "title": "Room-Across-Room: Multilingual Vision-and-Language Navigation with Dense Spatiotemporal Grounding", "abstract": "We introduce Room-Across-Room (RxR), a new Vision-and-Language Navigation (VLN) dataset. RxR is multilingual (English, Hindi, and Telugu) and larger (more paths and instructions) than other VLN datasets. It emphasizes the role of language in VLN by addressing known biases in paths and eliciting more references to visible entities. Furthermore, each word in an instruction is time-aligned to the virtual poses of instruction creators and validators. We establish baseline scores for monolingual and multilingual settings and multitask learning when including Room-to-Room annotations (Anderson et al., 2018). We also provide results for a model that learns from synchronized pose traces by focusing only on portions of the panorama attended to in human demonstrations. The size, scope and detail of RxR dramatically expands the frontier for research on embodied language agents in photorealistic simulated environments.", "phrases": ["vision-and-language navigation", "environment", "room-across-room"], "overall_score": 3.08887435610654, "scores": [2.2825921554299677, 1.949268153837939, 0.5302419249068394], "rank_score": 1.5873674113915819} -{"id": "strzalkowski-etal-2013-robust", "title": "Robust Extraction of Metaphor from Novel Data", "abstract": "This article describes our novel approach to the automated detection and analysis of metaphors in text. We employ robust, quantitative language processing to implement a system prototype combined with sound social science methods for validation. We show results in 4 different languages and discuss how our methods are a significant step forward from previously established techniques of metaphor identification. We use Topical Structure and Tracking, an Imageability score, and innovative methods to build an effective metaphor identification system that is fully automated and performs well over baseline.", "phrases": ["metaphor", "novel data", "robust extraction"], "overall_score": 4.298120888702796, "scores": [3.1808536025010112, 0.7916091700086708, 0.7890304453266127], "rank_score": 1.5871644059454315} -{"id": "yang-etal-2018-sgm", "title": "SGM: Sequence Generation Model for Multi-label Classification", "abstract": "Multi-label classification is an important yet challenging task in natural language processing. It is more complex than single-label classification in that the labels tend to be correlated. Existing methods tend to ignore the correlations between labels. Besides, different parts of the text can contribute differently for predicting different labels, which is not considered by existing models. In this paper, we propose to view the multi-label classification task as a sequence generation problem, and apply a sequence generation model with a novel decoder structure to solve it. Extensive experimental results show that our proposed methods outperform previous work by a substantial margin. Further analysis of experimental results demonstrates that the proposed methods not only capture the correlations between labels, but also select the most informative words automatically when predicting different labels.", "phrases": ["sequence generation model", "multi-label classification", "sgm"], "overall_score": 3.942618750215437, "scores": [2.4265464401781687, 1.3333281063721405, 1.0000049650826144], "rank_score": 1.586626503877641} -{"id": "arthur-etal-2016-incorporating", "title": "Incorporating Discrete Translation Lexicons into Neural Machine Translation", "abstract": "Neural machine translation (NMT) often makes mistakes in translating low-frequency content words that are essential to understanding the meaning of the sentence. We propose a method to alleviate this problem by augmenting NMT systems with discrete translation lexicons that efficiently encode translations of these low-frequency words. We describe a method to calculate the lexicon probability of the next word in the translation candidate by using the attention vector of the NMT model to select which source word lexical probabilities the model should focus on. We test two methods to combine this probability with the standard NMT probability: (1) using it as a bias, and (2) linear interpolation. Experiments on two corpora show an improvement of 2.0-2.3 BLEU and 0.13-0.44 NIST score, and faster convergence time.", "phrases": ["neural machine translation", "probability", "source sentence"], "overall_score": 6.038111877128198, "scores": [2.0529144747681425, 1.803748724063689, 0.9019240623355098], "rank_score": 1.586195753722447} -{"id": "swanson-etal-2020-rationalizing", "title": "Rationalizing Text Matching: Learning Sparse Alignments via Optimal Transport", "abstract": "Selecting input features of top relevance has become a popular method for building self-explaining models. In this work, we extend this selective rationalization approach to text matching, where the goal is to jointly select and align text pieces, such as tokens or sentences, as a justification for the downstream prediction. Our approach employs optimal transport (OT) to find a minimal cost alignment between the inputs. However, directly applying OT often produces dense and therefore uninterpretable alignments. To overcome this limitation, we introduce novel constrained variants of the OT problem that result in highly sparse alignments with controllable sparsity. Our model is end-to-end differentiable using the Sinkhorn algorithm for OT and can be trained without any alignment annotations. We evaluate our model on the StackExchange, MultiNews, e-SNLI, and MultiRC datasets. Our model achieves very sparse rationale selections with high fidelity while preserving prediction accuracy compared to strong attention baseline models.", "phrases": ["sparse alignment", "optimal transport", "rationale"], "overall_score": 3.0862463193739904, "scores": [2.2840365205399102, 1.9395013236272876, 0.5345127588459163], "rank_score": 1.586016867671038} -{"id": "yang-etal-2015-wikiqa", "title": "WikiQA: A Challenge Dataset for Open-Domain Question Answering", "abstract": "We describe the WIKIQA dataset, a new publicly available set of question and sentence pairs, collected and annotated for research on open-domain question answering. Most previous work on answer sentence selection focuses on a dataset created using the TREC-QA data, which includes editor-generated questions and candidate answer sentences selected by matching content words in the question. WIKIQA is constructed using a more natural process and is more than an order of magnitude larger than the previous dataset. In addition, the WIKIQA dataset also includes questions for which there are no correct sentences, enabling researchers to work on answer triggering, a critical component in any QA system. We compare several systems on the task of answer sentence selection on both datasets and also describe the performance of a system on the problem of answer triggering using the WIKIQA dataset.", "phrases": ["open-domain question answering", "wikiqa", "semantic feature", "paragraph", "state-of-the-art result"], "overall_score": 6.294720073699544, "scores": [5.0008819133945455, 0.982036253345012, 0.8765662012368711, 0.5340288800081688, 0.533762895380657], "rank_score": 1.5854552286730508} -{"id": "gulcehre-etal-2016-pointing", "title": "Pointing the Unknown Words", "abstract": "The problem of rare and unknown words is an important issue that can potentially influence the performance of many NLP systems, including both the traditional count-based and the deep learning models. We propose a novel way to deal with the rare and unseen words for the neural network models using attention. Our model uses two softmax layers in order to predict the next word in conditional language models: one predicts the location of a word in the source sentence, and the other predicts a word in the shortlist vocabulary. At each time-step, the decision of which softmax layer to use choose adaptively made by an MLP which is conditioned on the context.~We motivate our work from a psychological evidence that humans naturally have a tendency to point towards objects in the context or the environment when the name of an object is not known.~We observe improvements on two tasks, neural machine translation on the Europarl English to French parallel corpora and text summarization on the Gigaword dataset using our proposed model.", "phrases": ["unknown word", "location", "neural machine translation", "copy mechanism", "pointer network"], "overall_score": 6.353099961677991, "scores": [2.825647153645711, 1.8231774714674964, 1.640802046718112, 1.0539735476027354, 0.5832424812053209], "rank_score": 1.5853685401278752} -{"id": "tang-etal-2010-cascade", "title": "A Cascade Method for Detecting Hedges and their Scope in Natural Language Text", "abstract": "Detecting hedges and their scope in natural language text is very important for information inference. In this paper, we present a system based on a cascade method for the CoNLL-2010 shared task. The system composes of two components: one for detecting hedges and another one for detecting their scope. For detecting hedges, we build a cascade subsystem. Firstly, a conditional random field (CRF) model and a large margin-based model are trained respectively. Then, we train another CRF model using the result of the first phase. For detecting the scope of hedges, a CRF model is trained according to the result of the first subtask. The experiments show that our system achieves 86.36% F-measure on biological corpus and 55.05% F-measure on Wikipedia corpus for hedge detection, and 49.95% F-measure on biological corpus for hedge scope detection. Among them, 86.36% is the best result on biological corpus for hedge detection.", "phrases": ["cascade method", "scope", "natural language text"], "overall_score": 3.6502152830372117, "scores": [2.1353049238239485, 1.7129999449906188, 0.9075001967313653], "rank_score": 1.5852683551819775} -{"id": "burlot-yvon-2018-using", "title": "Using Monolingual Data in Neural Machine Translation: a Systematic Study", "abstract": "Neural Machine Translation (MT) has radically changed the way systems are developed. A major difference with the previous generation (Phrase-Based MT) is the way monolingual target data, which often abounds, is used in these two paradigms. While Phrase-Based MT can seamlessly integrate very large language models trained on billions of sentences, the best option for Neural MT developers seems to be the generation of artificial parallel data through back-translation - a technique that fails to fully take advantage of existing datasets. In this paper, we conduct a systematic study of back-translation, comparing alternative uses of monolingual data, as well as multiple data generation procedures. Our findings confirm that back-translation is very effective and give new explanations as to why this is the case. We also introduce new data simulation techniques that are almost as effective, yet much cheaper to implement.", "phrases": ["monolingual data", "neural machine translation", "backtranslation"], "overall_score": 4.748929306435022, "scores": [2.7367793514909002, 0.941327033426708, 1.077588256113638], "rank_score": 1.5852315470104152} -{"id": "iyyer-etal-2018-adversarial", "title": "Adversarial Example Generation with Syntactically Controlled Paraphrase Networks", "abstract": "We propose syntactically controlled paraphrase networks (SCPNs) and use them to generate adversarial examples. Given a sentence and a target syntactic form (e.g., a constituency parse), SCPNs are trained to produce a paraphrase of the sentence with the desired syntax. We show it is possible to create training data for this task by first doing backtranslation at a very large scale, and then using a parser to label the syntactic transformations that naturally occur during this process. Such data allows us to train a neural encoder-decoder model with extra inputs to specify the target syntax. A combination of automated and human evaluations show that SCPNs generate paraphrases that follow their target specifications without decreasing paraphrase quality when compared to baseline (uncontrolled) paraphrase systems. Furthermore, they are more capable of generating syntactically adversarial examples that both (1) \u201cfool\u201d pretrained models and (2) improve the robustness of these models to syntactic variation when used to augment their training data.", "phrases": ["paraphrase", "syntax", "adversarial example generation", "parallel corpus"], "overall_score": 7.022061794633065, "scores": [1.8657602007308098, 2.451185621827975, 1.4985725415187776, 0.5237749205973611], "rank_score": 1.584823321168731} -{"id": "nguyen-grishman-2015-relation", "title": "Relation Extraction: Perspective from Convolutional Neural Networks", "abstract": "Up to now, relation extraction systems have made extensive use of features generated by linguistic analysis modules. Errors in these features lead to errors of relation detection and classification. In this work, we depart from these traditional approaches with complicated feature engineering by introducing a convolutional neural network for relation extraction that automatically learns features from sentences and minimizes the dependence on external toolkits and resources. Our model takes advantages of multiple window sizes for filters and pre-trained word embeddings as an initializer on a non-static architecture to improve the performance. We emphasize the relation extraction problem with an unbalanced corpus. The experimental results show that our system significantly outperforms not only the best baseline systems for relation extraction but also the state-of-the-art systems for relation classification.", "phrases": ["convolutional neural networks", "multiple window size", "relation extraction", "cnn", "research effort"], "overall_score": 5.221089767135389, "scores": [3.8963845561692136, 1.4127008237036247, 1.2353432421768649, 0.8414033336911723, 0.5349025639034767], "rank_score": 1.5841469039288705} -{"id": "xu-etal-2015-semeval", "title": "SemEval-2015 Task 1: Paraphrase and Semantic Similarity in Twitter (PIT)", "abstract": "In this shared task, we present evaluations on two related tasks Paraphrase Identification (PI) and Semantic Textual Similarity (SS) systems for the Twitter data. Given a pair of sentences, participants are asked to produce a binary yes/no judgement or a graded score to measure their semantic equivalence. The task features a newly constructed Twitter Paraphrase Corpus that contains 18,762 sentence pairs. A total of 19 teams participated, submitting 36 runs to the PI task and 26 runs to the SS task. The evaluation shows encouraging results and open challenges for future research. The best systems scored a F1-measure of 0.674 for the PI task and a Pearson correlation of 0.619 for the SS task respectively, comparing to a strong baseline using logistic regression model of 0.589 F1 and 0.511 Pearson; while the best SS systems can often reach >0.80 Pearson on well-formed text. This shared task also provides insights into the relation between the PI and SS tasks and suggests the importance to bringing these two research areas together. We make all the data, baseline systems and evaluation scripts publicly available. 1", "phrases": ["paraphrase", "semantic similarity", "twitter"], "overall_score": 4.488186792394692, "scores": [1.8449183254053572, 1.6664987961909066, 1.2409819186776736], "rank_score": 1.5841330134246459} -{"id": "zheng-etal-2021-comae", "title": "CoMAE: A Multi-factor Hierarchical Framework for Empathetic Response Generation", "abstract": "The capacity of empathy is crucial to the success of open-domain dialog systems. Due to its nature of multi-dimensionality, there are various factors that relate to empathy expression, such as communication mechanism, dialog act and emotion. However, existing methods for empathetic response generation usually either consider only one empathy factor or ignore the hierarchical relationships between different factors, leading to a weak ability of empathy modeling. In this paper, we propose a multi-factor hierarchical framework, CoMAE, for empathetic response generation, which models the above three key factors of empathy expression in a hierarchical way. We show experimentally that our CoMAE-based model can generate more empathetic responses than previous methods. We also highlight the importance of hierarchical modeling of different factors through both the empirical analysis on a real-life corpus and the extensive experiments. Our codes and used data are available at https://github.com/chujiezheng/CoMAE.", "phrases": ["multi-factor hierarchical framework", "empathetic response generation", "emotion", "comae"], "overall_score": 2.8380578944283235, "scores": [2.7549529015458667, 1.9703094809266017, 1.0833696827733912, 0.5271690133465611], "rank_score": 1.5839502696481051} -{"id": "aharoni-goldberg-2017-towards", "title": "Towards String-To-Tree Neural Machine Translation", "abstract": "We present a simple method to incorporate syntactic information about the target language in a neural machine translation system by translating into linearized, lexicalized constituency trees. An experiment on the WMT16 German-English news translation task resulted in an improved BLEU score when compared to a syntax-agnostic NMT baseline trained on the same dataset. An analysis of the translations from the syntax-aware system shows that it performs more reordering during translation in comparison to the baseline. A small-scale human evaluation also showed an advantage to the syntax-aware system.", "phrases": ["neural machine translation", "parse tree", "target sentence"], "overall_score": 5.031116561944185, "scores": [2.4035435220560664, 1.4691816639605582, 0.8765177442644143], "rank_score": 1.5830809767603462} -{"id": "yu-etal-2015-domain", "title": "Domain Adaptation for Dependency Parsing via Self-Training", "abstract": "This paper presents a successful approach for domain adaptation of a dependency parser via self-training. We improve parsing accuracy for out-of-domain texts with a self-training approach that uses confidence-based methods to select additional training samples. We compare two confidence-based methods: The first method uses the parse score of the employed parser to measure the confidence into a parse tree. The second method calculates the score differences between the best tree and alternative trees. With these methods, we were able to improve the labeled accuracy score by 1.6 percentage points on texts from a chemical domain and by 0.6 on average on texts of three web domains. Our improvements on the chemical texts of 1.5% UAS is substantially higher than improvements reported in previous work of 0.5% UAS. For the three web domains, no positive results for self-training have been reported before.", "phrases": ["self-training", "out-of-domain text", "domain adaptation"], "overall_score": 3.2917114830467322, "scores": [2.201050308311226, 1.964517694034896, 0.583367830282654], "rank_score": 1.5829786108762587} -{"id": "chan-roth-2010-exploiting", "title": "Exploiting Background Knowledge for Relation Extraction", "abstract": "Relation extraction is the task of recognizing semantic relations among entities. Given a particular sentence supervised approaches to Relation Extraction employed feature or kernel functions which usually have a single sentence in their scope. The overall aim of this paper is to propose methods for using knowledge and resources that are external to the target sentence, as a way to improve relation extraction. We demonstrate this by exploiting background knowledge such as relationships among the target relations, as well as by considering how target relations relate to some existing knowledge resources. Our methods are general and we suggest that some of them could be applied to other NLP tasks.", "phrases": ["background knowledge", "relation extraction", "target sentence", "cluster"], "overall_score": 4.059763844075009, "scores": [3.444326106754786, 1.8241770032997766, 0.5339617847868144, 0.5286758464212131], "rank_score": 1.5827851853156476} -{"id": "carpuat-wu-2007-phrase", "title": "How phrase sense disambiguation outperforms word sense disambiguation for statistical machine translation", "abstract": "We present comparative empirical evidence arguing that a generalized phrase sense disambiguation approach better improves statistical machine translation than ordinary word sense disambiguation, along with a data analysis suggesting the reasons for this. Standalone word sense disambiguation, as exemplified by the Senseval series of evaluations, typically defines the target of disambiguation as a single word. But in order to be useful in statistical machine translation, our studies indicate that word sense disambiguation should be redefined to move beyond the particular case of single word targets, and instead to generalize to multi-word phrase targets. We investigate how and why the phrase sense disambiguation approach\u2014in contrast to recent efforts to apply traditional word sense disambiguation to SMT\u2014is able to yield statistically significant yimprovements in translation quality even under large data conditions, and consistently improve SMT across both IWSLT and NIST Chinese-English text translation tasks. We discuss architectural issues raised by this change of perspective, and consider the new model architecture necessitated by the phrase sense disambiguation approach. This material is based upon work supported in part by", "phrases": ["sense disambiguation", "machine translation", "wsd", "smt system"], "overall_score": 4.574770151750654, "scores": [3.09461807816744, 1.388020821546759, 1.0165762058761703, 0.8318322958422045], "rank_score": 1.5827618503581435} -{"id": "dhingra-etal-2017-gated", "title": "Gated-Attention Readers for Text Comprehension", "abstract": "In this paper we study the problem of answering cloze-style questions over documents. Our model, the Gated-Attention (GA) Reader, integrates a multi-hop architecture with a novel attention mechanism, which is based on multiplicative interactions between the query embedding and the intermediate states of a recurrent neural network document reader. This enables the reader to build query-specific representations of tokens in the document for accurate answer selection. The GA Reader obtains state-of-the-art results on three benchmarks for this task\u2013the CNN & Daily Mail news stories and the Who Did What dataset. The effectiveness of multiplicative interaction is demonstrated by an ablation study, and by comparing to alternative compositional operators for implementing the gated-attention.", "phrases": ["reader", "multiplicative interaction", "cnn", "gated-attention", "reading comprehension"], "overall_score": 5.989355705458385, "scores": [4.054224002409374, 1.2800241239712637, 1.0816906496478742, 0.8735132155249048, 0.6242049401379668], "rank_score": 1.5827313863382766} -{"id": "alberti-etal-2019-synthetic", "title": "Synthetic QA Corpora Generation with Roundtrip Consistency", "abstract": "We introduce a novel method of generating synthetic question answering corpora by combining models of question generation and answer extraction, and by filtering the results to ensure roundtrip consistency. By pretraining on the resulting corpora we obtain significant improvements on SQuAD2 and NQ, establishing a new state-of-the-art on the latter. Our synthetic data generation models, for both question generation and answer extraction, can be fully reproduced by finetuning a publicly available BERT model on the extractive subsets of SQuAD2 and NQ. We also describe a more powerful variant that does full sequence-to-sequence pretraining for question generation, obtaining exact match and F1 at less than 0.1% and 0.4% from human performance on SQuAD2.", "phrases": ["roundtrip consistency", "question generation", "bert", "data augmentation", "language model"], "overall_score": 5.713392835380915, "scores": [3.221349274873071, 1.9012995119606781, 1.1303058474053118, 0.8295687658869133, 0.8287514801122731], "rank_score": 1.5822549760476494} -{"id": "yan-etal-2009-unsupervised", "title": "Unsupervised Relation Extraction by Mining Wikipedia Texts Using Information from the Web", "abstract": "This paper presents an unsupervised relation extraction method for discovering and enhancing relations in which a specified concept in Wikipedia participates. Using respective characteristics of Wikipedia articles and Web corpus, we develop a clustering approach based on combinations of patterns: dependency patterns from dependency analysis of texts in Wikipedia, and surface patterns generated from highly redundant information related to the Web. Evaluations of the proposed approach on two different domains demonstrate the superiority of the pattern combination over existing approaches. Fundamentally, our method demonstrates how deep linguistic patterns contribute complementarily with Web surface patterns to the generation of various relations.", "phrases": ["wikipedia", "web", "unsupervised relation extraction"], "overall_score": 3.6400023320674832, "scores": [2.102379348647223, 1.467489059765121, 1.1726303723832838], "rank_score": 1.580832926931876} -{"id": "nivre-etal-2017-universal", "title": "Universal Dependencies", "abstract": "Universal Dependencies (UD) is a project that seeks to develop cross-linguistically consistent treebank annotation for many languages. This tutorial gives an introduction to the UD framework and resources, from basic design principles to annotation guidelines and existing treebanks. We also discuss tools for developing and exploiting UD treebanks and survey applications of UD in NLP and linguistics.", "phrases": ["project", "treebank", "universal dependencies"], "overall_score": 3.6379374162653586, "scores": [3.5835187248617366, 0.5822826146549975, 0.5740070966635216], "rank_score": 1.5799361453934184} -{"id": "zou-etal-2013-bilingual", "title": "Bilingual Word Embeddings for Phrase-Based Machine Translation", "abstract": "We introduce bilingual word embeddings: semantic embeddings associated across two languages in the context of neural language models. We propose a method to learn bilingual embeddings from a large unlabeled corpus, while utilizing MT word alignments to constrain translational equivalence. The new embeddings significantly out-perform baselines in word semantic similarity. A single semantic similarity feature induced with bilingual embeddings adds near half a BLEU point to the results of NIST08 Chinese-English machine translation task.", "phrases": ["machine translation", "semantic similarity", "sentiment analysis"], "overall_score": 6.493139741357412, "scores": [3.25186815929752, 0.9213025294801606, 0.5653398731340773], "rank_score": 1.5795035206372525} -{"id": "gedigian-etal-2006-catching", "title": "Catching Metaphors", "abstract": "Metaphors are ubiquitous in language and developing methods to identify and deal with metaphors is an open problem in Natural Language Processing (NLP). In this paper we describe results from using a maximum entropy (ME) classifier to identify metaphors. Using the Wall Street Journal (WSJ) corpus, we annotated all the verbal targets associated with a set of frames which includes frames of spatial motion, manipulation, and health. One surprising finding was that over 90% of annotated targets from these frames are used metaphorically, underscoring the importance of processing figurative language. We then used this labeled data and each verbal target's PropBank annotation to train a maximum entropy classifier to make this literal vs. metaphoric distinction. Using the classifier, we reduce the final error in the test set by 5% over the verb-specific majority class baseline and 31% over the corpus-wide majority class baseline.", "phrases": ["metaphor", "wall street journal", "propbank annotation", "entropy classifier", "framenet"], "overall_score": 4.952291272547911, "scores": [4.119667998940377, 1.4815294320349106, 0.8824111598334891, 0.8488049397867685, 0.5647327109585705], "rank_score": 1.5794292483108232} -{"id": "he-etal-2018-dureader", "title": "DuReader: a Chinese Machine Reading Comprehension Dataset from Real-world Applications", "abstract": "This paper introduces DuReader, a new large-scale, open-domain Chinese machine reading comprehension (MRC) dataset, designed to address real-world MRC. DuReader has three advantages over previous MRC datasets: (1) data sources: questions and documents are based on Baidu Search and Baidu Zhidao; answers are manually generated. (2) question types: it provides rich annotations for more question types, especially yes-no and opinion questions, that leaves more opportunity for the research community. (3) scale: it contains 200K questions, 420K answers and 1M documents; it is the largest Chinese MRC dataset so far. Experiments show that human performance is well above current state-of-the-art baseline systems, leaving plenty of room for the community to make improvements. To help the community make these improvements, both DuReader and baseline systems have been posted online. We also organize a shared competition to encourage the exploration of more models. Since the release of the task, there are significant improvements over the baselines.", "phrases": ["chinese", "reading comprehension", "dureader", "new language"], "overall_score": 5.017414900634382, "scores": [3.973773945832923, 1.2170515063926257, 0.5963672402165145, 0.5278858662934323], "rank_score": 1.578769639683874} -{"id": "ghosal-etal-2018-contextual", "title": "Contextual Inter-modal Attention for Multi-modal Sentiment Analysis", "abstract": "Multi-modal sentiment analysis offers various challenges, one being the effective combination of different input modalities, namely text, visual and acoustic. In this paper, we propose a recurrent neural network based multi-modal attention framework that leverages the contextual information for utterance-level sentiment prediction. The proposed approach applies attention on multi-modal multi-utterance representations and tries to learn the contributing features amongst them. We evaluate our proposed approach on two multi-modal sentiment analysis benchmark datasets, viz. CMU Multi-modal Opinion-level Sentiment Intensity (CMU-MOSI) corpus and the recently released CMU Multi-modal Opinion Sentiment and Emotion Intensity (CMU-MOSEI) corpus. Evaluation results show the effectiveness of our proposed approach with the accuracies of 82.31% and 79.80% for the MOSI and MOSEI datasets, respectively. These are approximately 2 and 1 points performance improvement over the state-of-the-art models for the datasets.", "phrases": ["multi-modal sentiment analysis", "contextual information", "contextual inter-modal attention"], "overall_score": 3.633254665034935, "scores": [2.0740671219286657, 1.835386020501582, 0.8242542146915122], "rank_score": 1.5779024523739198} -{"id": "ohta-etal-2011-overview", "title": "Overview of the Epigenetics and Post-translational Modifications (EPI) task of BioNLP Shared Task 2011", "abstract": "This paper presents the preparation, resources, results and analysis of the Epigenetics and Post-translational Modifications (EPI) task, a main task of the BioNLP Shared Task 2011. The task concerns the extraction of detailed representations of 14 protein and DNA modification events, the catalysis of these reactions, and the identification of instances of negated or speculatively stated event instances. Seven teams submitted final results to the EPI task in the shared task, with the highest-performing system achieving 53% F-score in the full task and 69% F-score in the extraction of a simplified set of core event arguments.", "phrases": ["epigenetics", "post-translational modifications", "protein"], "overall_score": 2.1870808933038934, "scores": [2.2173251962279634, 1.985307044866783, 0.5303038970940509], "rank_score": 1.5776453793962657} -{"id": "rust-etal-2021-good", "title": "How Good is Your Tokenizer? On the Monolingual Performance of Multilingual Language Models", "abstract": "In this work, we provide a systematic and comprehensive empirical comparison of pretrained multilingual language models versus their monolingual counterparts with regard to their monolingual task performance. We study a set of nine typologically diverse languages with readily available pretrained monolingual models on a set of five diverse monolingual downstream tasks. We first aim to establish, via fair and controlled comparisons, if a gap between the multilingual and the corresponding monolingual representation of that language exists, and subsequently investigate the reason for any performance difference. To disentangle conflating factors, we train new monolingual models on the same data, with monolingually and multilingually trained tokenizers. We find that while the pretraining data size is an important factor, a designated monolingual tokenizer plays an equally important role in the downstream performance. Our results show that languages that are adequately represented in the multilingual model's vocabulary exhibit negligible performance decreases over their monolingual counterparts. We further find that replacing the original multilingual tokenizer with the specialized monolingual tokenizer improves the downstream performance of the multilingual model for almost every task and language.", "phrases": ["tokenizer", "counterpart", "multilingual model"], "overall_score": 4.162696270065599, "scores": [2.664641545126373, 1.222710096868904, 0.8446747031911704], "rank_score": 1.5773421150621492} -{"id": "mager-etal-2021-findings", "title": "Findings of the AmericasNLP 2021 Shared Task on Open Machine Translation for Indigenous Languages of the Americas", "abstract": "This paper presents the results of the 2021 Shared Task on Open Machine Translation for Indigenous Languages of the Americas. The shared task featured two independent tracks, and participants submitted machine translation systems for up to 10 indigenous languages. Overall, 8 teams participated with a total of 214 submissions. We provided training sets consisting of data collected from various sources, as well as manually translated sentences for the development and test sets. An official baseline trained on this data was also provided. Team submissions featured a variety of architectures, including both statistical and neural models, and for the majority of languages, many teams were able to considerably improve over the baseline. The best performing systems achieved 12.97 ChrF higher than baseline, when averaged across languages.", "phrases": ["shared task", "machine translation", "indigenous languages"], "overall_score": 2.5386149192286, "scores": [2.099391297668633, 1.7525543238888113, 0.8800447825754251], "rank_score": 1.5773301347109563} -{"id": "xu-etal-2016-optimizing", "title": "Optimizing Statistical Machine Translation for Text Simplification", "abstract": "Most recent sentence simplification systems use basic machine translation models to learn lexical and syntactic paraphrases from a manually simplified parallel corpus. These methods are limited by the quality and quantity of manually simplified corpora, which are expensive to build. In this paper, we conduct an in-depth adaptation of statistical machine translation to perform text simplification, taking advantage of large-scale paraphrases learned from bilingual texts and a small amount of manual simplifications with multiple references. Our work is the first to design automatic metrics that are effective for tuning and evaluating simplification systems, which will facilitate iterative development for this task.", "phrases": ["text simplification", "automatic metric", "sari", "complex sentence", "fkgl"], "overall_score": 5.65171417556385, "scores": [4.266974377192755, 1.949617519309355, 0.5734336060430663, 0.558594489759645, 0.5370843567264451], "rank_score": 1.577140869806253} -{"id": "meng-etal-2012-cross", "title": "Cross-Lingual Mixture Model for Sentiment Classification", "abstract": "The amount of labeled sentiment data in English is much larger than that in other languages. Such a disproportion arouse interest in cross-lingual sentiment classification, which aims to conduct sentiment classification in the target language (e.g. Chinese) using labeled data in the source language (e.g. English). Most existing work relies on machine translation engines to directly adapt labeled data from the source language to the target language. This approach suffers from the limited coverage of vocabulary in the machine translation results. In this paper, we propose a generative cross-lingual mixture model (CLMM) to leverage unlabeled bilingual parallel data. By fitting parameters to maximize the likelihood of the bilingual parallel data, the proposed model learns previously unseen sentiment words from the large bilingual parallel data and improves vocabulary coverage significantly. Experiments on multiple data sets show that CLMM is consistently effective in two settings: (1) labeled data in the target language are unavailable; and (2) labeled data in the target language are also available.", "phrases": ["sentiment classification", "source language", "vocabulary", "bilingual parallel data", "cross-lingual mixture model"], "overall_score": 4.162096895463625, "scores": [3.399889078940865, 2.5250774253286563, 0.8483712498832798, 0.5631303580860296, 0.5491068784158948], "rank_score": 1.5771149981309451} -{"id": "specia-etal-2018-findings", "title": "Findings of the WMT 2018 Shared Task on Quality Estimation", "abstract": "We report the results of the WMT18 shared task on Quality Estimation, i.e. the task of predicting the quality of the output of machine translation systems at various granularity levels: word, phrase, sentence and document. This year we include four language pairs, three text domains, and translations produced by both statistical and neural machine translation systems. Participating teams from ten institutions submitted a variety of systems to different task variants and language pairs.", "phrases": ["wmt", "quality estimation", "team", "translation quality", "sentence-level"], "overall_score": 5.81647064712926, "scores": [3.835457902156957, 1.6645184129763757, 1.29039438510384, 0.5610771782089209, 0.5323427407385181], "rank_score": 1.5767581238369224} -{"id": "lawrence-reed-2019-argument", "title": "Argument Mining: A Survey", "abstract": "Argument mining is the automatic identification and extraction of the structure of inference and reasoning expressed as arguments presented in natural language. Understanding argumentative structure makes it possible to determine not only what positions people are adopting, but also why they hold the opinions they do, providing valuable insights in domains as diverse as financial market prediction and public relations. This survey explores the techniques that establish the foundations for argument mining, provides a review of recent advances in argument mining techniques, and discusses the challenges faced in automatically extracting a deeper understanding of reasoning expressed in language in general.", "phrases": ["survey", "argument mining", "claim", "unstructured text", "student feedback"], "overall_score": 4.461484090980241, "scores": [4.111318686008592, 1.7445568279967865, 0.9133145285354415, 0.5696926756194393, 0.5346579390231571], "rank_score": 1.5747081314366833} -{"id": "alzantot-etal-2018-generating", "title": "Generating Natural Language Adversarial Examples", "abstract": "Deep neural networks (DNNs) are vulnerable to adversarial examples, perturbations to correctly classified examples which can cause the model to misclassify. In the image domain, these perturbations can often be made virtually indistinguishable to human perception, causing humans and state-of-the-art models to disagree. However, in the natural language domain, small perturbations are clearly perceptible, and the replacement of a single word can drastically alter the semantics of the document. Given these challenges, we use a black-box population-based optimization algorithm to generate semantically and syntactically similar adversarial examples that fool well-trained sentiment analysis and textual entailment models with success rates of 97% and 70%, respectively. We additionally demonstrate that 92.3% of the successful sentiment analysis adversarial examples are classified to their original label by 20 human annotators, and that the examples are perceptibly quite similar. Finally, we discuss an attempt to use adversarial training as a defense, but fail to yield improvement, demonstrating the strength and diversity of our adversarial examples. We hope our findings encourage researchers to pursue improving the robustness of DNNs in the natural language domain.", "phrases": ["adversarial example", "perturbation", "sentiment analysis", "sample", "input sentence"], "overall_score": 6.523286665803933, "scores": [2.905617997518027, 1.9393945343527528, 1.0702287696787312, 1.0590394847037359, 0.8981239224941753], "rank_score": 1.5744809417494845} -{"id": "daume-iii-2007-frustratingly", "title": "Frustratingly Easy Domain Adaptation", "abstract": "We describe an approach to domain adaptation that is appropriate exactly in the case when one has enough \u201ctarget\u201d data to do slightly better than just using only \u201csource\u201d data. Our approach is incredibly simple, easy to implement as a preprocessing step (10 lines of Perl!) and outperforms stateof-the-art approaches on a range of datasets. Moreover, it is trivially extended to a multidomain adaptation problem, where one has data from a variety of different domains.", "phrases": ["domain adaptation", "feature augmentation", "access", "model parameter", "small amount"], "overall_score": 7.413355058030191, "scores": [3.3786498187811924, 1.3441809790995913, 1.123994186711777, 1.1040438523158993, 0.9197198854702832], "rank_score": 1.5741177444757488} -{"id": "zhu-hovy-2007-active", "title": "Active Learning for Word Sense Disambiguation with Methods for Addressing the Class Imbalance Problem", "abstract": "In this paper, we analyze the effect of resampling techniques, including undersampling and over-sampling used in active learning for word sense disambiguation (WSD). Experimental results show that under-sampling causes negative effects on active learning, but over-sampling is a relatively good choice. To alleviate the withinclass imbalance problem of over-sampling, we propose a bootstrap-based oversampling (BootOS) method that works better than ordinary over-sampling in active learning for WSD. Finally, we investigate when to stop active learning, and adopt two strategies, max-confidence and min-error, as stopping conditions for active learning. According to experimental results, we suggest a prediction solution by considering max-confidence as the upper bound and min-error as the lower bound for stopping conditions.", "phrases": ["word sense disambiguation", "class imbalance problem", "wsd", "active learning"], "overall_score": 4.036160367335582, "scores": [2.474868839406338, 1.9257458297594863, 0.8504513899328751, 1.0432654146892504], "rank_score": 1.5735828684469872} -{"id": "arivazhagan-etal-2019-monotonic", "title": "Monotonic Infinite Lookback Attention for Simultaneous Machine Translation", "abstract": "Simultaneous machine translation begins to translate each source sentence before the source speaker is finished speaking, with applications to live and streaming scenarios. Simultaneous systems must carefully schedule their reading of the source sentence to balance quality against latency. We present the first simultaneous translation system to learn an adaptive schedule jointly with a neural machine translation (NMT) model that attends over all source tokens read thus far. We do so by introducing Monotonic Infinite Lookback (MILk) attention, which maintains both a hard, monotonic attention head to schedule the reading of the source sentence, and a soft attention head that extends from the monotonic head back to the beginning of the source. We show that MILk's adaptive schedule allows it to arrive at latency-quality trade-offs that are favorable to those of a recently proposed wait-k strategy for many latency values.", "phrases": ["simultaneous machine translation", "latency", "policy", "bernoulli variable", "live broadcast"], "overall_score": 5.063212209080875, "scores": [3.2470390230060997, 1.4726319606708318, 1.330702817603847, 0.9484329335347914, 0.8660698316611634], "rank_score": 1.5729753132953468} -{"id": "zhang-etal-2018-speeding", "title": "Speeding Up Neural Machine Translation Decoding by Cube Pruning", "abstract": "Although neural machine translation has achieved promising results, it suffers from slow translation speed. The direct consequence is that a trade-off has to be made between translation quality and speed, thus its performance can not come into full play. We apply cube pruning, a popular technique to speed up dynamic programming, into neural machine translation to speed up the translation. To construct the equivalence class, similar target hidden states are combined, leading to less RNN expansion operations on the target side and less softmax operations over the large target vocabulary. The experiments show that, at the same or even better translation quality, our method can translate faster compared with naive beam search by 3.3x on GPUs and 3.5x on CPUs.", "phrases": ["neural machine translation", "cube pruning", "translation quality"], "overall_score": 2.180429264890889, "scores": [2.50136174346739, 1.6887543023746547, 0.528425685358836], "rank_score": 1.5728472437336267} -{"id": "gu-etal-2022-ppt", "title": "PPT: Pre-trained Prompt Tuning for Few-shot Learning", "abstract": "Prompts for pre-trained language models (PLMs) have shown remarkable performance by bridging the gap between pre-training tasks and various downstream tasks. Among these methods, prompt tuning, which freezes PLMs and only tunes soft prompts, provides an efficient and effective solution for adapting large-scale PLMs to downstream tasks. However, prompt tuning is yet to be fully explored. In our pilot experiments, we find that prompt tuning performs comparably with conventional full-model tuning when downstream data are sufficient, whereas it is much worse under few-shot learning settings, which may hinder the application of prompt tuning. We attribute this low performance to the manner of initializing soft prompts. Therefore, in this work, we propose to pre-train prompts by adding soft prompts into the pre-training stage to obtain a better initialization. We name this Pre-trained Prompt Tuning framework \u201cPPT\u201d. To ensure the generalization of PPT, we formulate similar classification tasks into a unified task form and pre-train soft prompts for this unified task. Extensive experiments show that tuning pre-trained prompts for downstream tasks can reach or even outperform full-model fine-tuning under both full-data and few-shot settings. Our approach is effective and efficient for using large-scale PLMs in practice.", "phrases": ["prompt", "few-shot learning", "low performance", "ppt"], "overall_score": 3.2703415388942187, "scores": [3.0444109417057885, 1.911042038482515, 0.7959528006103255, 0.5394015794358663], "rank_score": 1.5727018400586237} -{"id": "takamura-etal-2005-extracting", "title": "Extracting Semantic Orientations of Words using Spin Model", "abstract": "We propose a method for extracting semantic orientations of words: desirable or undesirable. Regarding semantic orientations as spins of electrons, we use the mean field approximation to compute the approximate probability function of the system instead of the intractable actual probability function. We also propose a criterion for parameter selection on the basis of magnetization. Given only a small number of seed words, the proposed method extracts semantic orientations with high accuracy in the experiments on English lexicon. The result is comparable to the best value ever reported.", "phrases": ["semantic orientation", "spin model", "electron", "seed word", "sentiment analysis"], "overall_score": 5.758339080176875, "scores": [3.173525002857973, 2.058670443373303, 1.2243372920683575, 0.8722875815337229, 0.5301153777005535], "rank_score": 1.5717871395067822} -{"id": "gao-etal-2021-simcse", "title": "SimCSE: Simple Contrastive Learning of Sentence Embeddings", "abstract": "This paper presents SimCSE, a simple contrastive learning framework that greatly advances the state-of-the-art sentence embeddings. We first describe an unsupervised approach, which takes an input sentence and predicts itself in a contrastive objective, with only standard dropout used as noise. This simple method works surprisingly well, performing on par with previous supervised counterparts. We find that dropout acts as minimal data augmentation and removing it leads to a representation collapse. Then, we propose a supervised approach, which incorporates annotated pairs from natural language inference datasets into our contrastive learning framework, by using \u201centailment\u201d pairs as positives and \u201ccontradiction\u201d pairs as hard negatives. We evaluate SimCSE on standard semantic textual similarity (STS) tasks, and our unsupervised and supervised models using BERT base achieve an average of 76.3% and 81.6% Spearman's correlation respectively, a 4.2% and 2.2% improvement compared to previous best results. We also show\u2014both theoretically and empirically\u2014that contrastive learning objective regularizes pre-trained embeddings' anisotropic space to be more uniform, and it better aligns positive pairs when supervised signals are available.", "phrases": ["simple contrastive learning", "sentence embeddings", "language inference dataset", "textual similarity", "simcse"], "overall_score": 6.785196586300851, "scores": [3.998849150736643, 0.9518417924586975, 0.8987108673357187, 1.1241185442341148, 0.8842853969492452], "rank_score": 1.571561150342884} -{"id": "sun-etal-2019-dream", "title": "DREAM: A Challenge Data Set and Models for Dialogue-Based Reading Comprehension", "abstract": "We present DREAM, the first dialogue-based multiple-choice reading comprehension data set. Collected from English as a Foreign Language examinations designed by human experts to evaluate the comprehension level of Chinese learners of English, our data set contains 10,197 multiple-choice questions for 6,444 dialogues. In contrast to existing reading comprehension data sets, DREAM is the first to focus on in-depth multi-turn multi-party dialogue understanding. DREAM is likely to present significant challenges for existing reading comprehension systems: 84% of answers are non-extractive, 85% of questions require reasoning beyond a single sentence, and 34% of questions also involve commonsense knowledge. We apply several popular neural reading comprehension models that primarily exploit surface information within the text and find them to, at best, just barely outperform a rule-based approach. We next investigate the effects of incorporating dialogue structure and different kinds of general world knowledge into both rule-based and (neural and non-neural) machine learning-based reading comprehension models. Experimental results on the DREAM data set show the effectiveness of dialogue structure and general world knowledge. DREAM is available at .", "phrases": ["reading comprehension", "dialogue understanding", "dream", "passage"], "overall_score": 4.705781540153334, "scores": [3.408179275241829, 1.4225589363732019, 0.8269364390817885, 0.6256392244387899], "rank_score": 1.5708284687839023} -{"id": "mishra-etal-2018-author", "title": "Author Profiling for Abuse Detection", "abstract": "The rapid growth of social media in recent years has fed into some highly undesirable phenomena such as proliferation of hateful and offensive language on the Internet. Previous research suggests that such abusive content tends to come from users who share a set of common stereotypes and form communities around them. The current state-of-the-art approaches to abuse detection are oblivious to user and community information and rely entirely on textual (i.e., lexical and semantic) cues. In this paper, we propose a novel approach to this problem that incorporates community-based profiling features of Twitter users. Experimenting with a dataset of 16k tweets, we show that our methods significantly outperform the current state of the art in abuse detection. Further, we conduct a qualitative analysis of model characteristics. We release our code, pre-trained models and all the resources used in the public domain.", "phrases": ["abuse detection", "community-based profiling feature", "author profiling"], "overall_score": 3.9020680219922586, "scores": [3.2129497882045857, 0.9432615572227024, 0.5547117256568865], "rank_score": 1.5703076903613917} -{"id": "kamocki-witt-2020-privacy", "title": "Privacy by Design and Language Resources", "abstract": "Privacy by Design (also referred to as Data Protection by Design) is an approach in which solutions and mechanisms addressing privacy and data protection are embedded through the entire project lifecycle, from the early design stage, rather than just added as an additional lawyer to the final product. Formulated in the 1990 by the Privacy Commissionner of Ontario, the principle of Privacy by Design has been discussed by institutions and policymakers on both sides of the Atlantic, and mentioned already in the 1995 EU Data Protection Directive (95/46/EC). More recently, Privacy by Design was introduced as one of the requirements of the General Data Protection Regulation (GDPR), obliging data controllers to define and adopt, already at the conception phase, appropriate measures and safeguards to implement data protection principles and protect the rights of the data subject. Failing to meet this obligation may result in a hefty fine, as it was the case in the Uniontrad decision by the French Data Protection Authority (CNIL). The ambition of the proposed paper is to analyse the practical meaning of Privacy by Design in the context of Language Resources, and propose measures and safeguards that can be implemented by the community to ensure respect of this principle.", "phrases": ["design", "language resources", "privacy"], "overall_score": 1.7239648215971193, "scores": [1.9965286319694633, 1.8534773629669408, 0.8576552228909025], "rank_score": 1.5692204059424355} -{"id": "rogers-etal-2020-primer", "title": "A Primer in BERTology: What We Know About How BERT Works", "abstract": "Transformer-based models have pushed state of the art in many areas of NLP, but our understanding of what is behind their success is still limited. This paper is the first survey of over 150 studies of the popular BERT model. We review the current state of knowledge about how BERT works, what kind of information it learns and how it is represented, common modifications to its training objectives and architecture, the overparameterization issue, and approaches to compression. We then outline directions for future research.", "phrases": ["bertology", "many study", "interpretation work"], "overall_score": 7.4592371552272985, "scores": [3.35328266032838, 0.8337391431907762, 0.5205172492896095], "rank_score": 1.5691796842695886} -{"id": "nallapati-etal-2016-abstractive", "title": "Abstractive Text Summarization using Sequence-to-sequence RNNs and Beyond", "abstract": "In this work, we model abstractive text summarization using Attentional Encoder-Decoder Recurrent Neural Networks, and show that they achieve state-of-the-art performance on two different corpora. We propose several novel models that address critical problems in summarization that are not adequately modeled by the basic architecture, such as modeling key-words, capturing the hierarchy of sentence-to-word structure, and emitting words that are rare or unseen at training time. Our work shows that many of our proposed models contribute to further improvement in performance. We also propose a new dataset consisting of multi-sentence summaries, and establish performance benchmarks for further research.", "phrases": ["sequence-to-sequence", "rnn", "abstractive text summarization", "language model", "hierarchical attention"], "overall_score": 7.73094793134832, "scores": [2.5540815657578033, 1.6116695591958188, 1.3947563065166895, 1.235245201742355, 1.0493355491118261], "rank_score": 1.5690176364648987} -{"id": "wittenburg-etal-2006-elan", "title": "ELAN: a Professional Framework for Multimodality Research", "abstract": "Utilization of computer tools in linguistic research has gained importance with the maturation of media frameworks for the handling of digital audio and video. The increased use of these tools in gesture, sign language and multimodal interaction studies has led to stronger requirements on the flexibility, the efficiency and in particular the time accuracy of annotation tools. This paper describes the efforts made to make ELAN a tool that meets these requirements, with special attention to the developments in the area of time accuracy. In subsequent sections an overview will be given of other enhancements in the latest versions of ELAN that makes it a useful tool in multimodality research.", "phrases": ["multimodality research", "audio", "elan"], "overall_score": 2.810797668945367, "scores": [2.891908687656318, 0.9621949899986265, 0.8521044667167078], "rank_score": 1.5687360481238841} -{"id": "liu-etal-2021-visually", "title": "Visually Grounded Reasoning across Languages and Cultures", "abstract": "The design of widespread vision-and-language datasets and pre-trained encoders directly adopts, or draws inspiration from, the concepts and images of ImageNet. While one can hardly overestimate how much this benchmark contributed to progress in computer vision, it is mostly derived from lexical databases and image queries in English, resulting in source material with a North American or Western European bias. Therefore, we devise a new protocol to construct an ImageNet-style hierarchy representative of more languages and cultures. In particular, we let the selection of both concepts and images be entirely driven by native speakers, rather than scraping them automatically. Specifically, we focus on a typologically diverse set of languages, namely, Indonesian, Mandarin Chinese, Swahili, Tamil, and Turkish. On top of the concepts and images obtained through this new protocol, we create a multilingual dataset for Multicultural Reasoning over Vision and Language (MaRVL) by eliciting statements from native speaker annotators about pairs of images. The task consists of discriminating whether each grounded statement is true or false. We establish a series of baselines using state-of-the-art models and find that their cross-lingual transfer performance lags dramatically behind supervised performance in English. These results invite us to reassess the robustness and accuracy of current state-of-the-art models beyond a narrow domain, but also open up new exciting challenges for the development of truly multilingual and multicultural systems.", "phrases": ["reasoning", "culture", "visual question"], "overall_score": 2.809930542667684, "scores": [2.2438159100029775, 1.880626494986602, 0.5803138822116624], "rank_score": 1.5682520957337474} -{"id": "eisenschlos-etal-2020-understanding", "title": "Understanding tables with intermediate pre-training", "abstract": "Table entailment, the binary classification task of finding if a sentence is supported or refuted by the content of a table, requires parsing language and table structure as well as numerical and discrete reasoning. While there is extensive work on textual entailment, table entailment is less well studied. We adapt TAPAS (Herzig et al., 2020), a table-based BERT model, to recognize entailment. Motivated by the benefits of data augmentation, we create a balanced dataset of millions of automatically created training examples which are learned in an intermediate step prior to fine-tuning. This new data is not only useful for table entailment, but also for SQA (Iyyer et al., 2017), a sequential table QA task. To be able to use long examples as input of BERT models, we evaluate table pruning techniques as a pre-processing step to drastically improve the training and prediction efficiency at a moderate drop in accuracy. The different methods set the new state-of-the-art on the TabFact (Chen et al., 2020) and SQA datasets.", "phrases": ["table", "pre-training", "table-based fact verification"], "overall_score": 4.440166642566729, "scores": [3.2825368628892164, 0.8693853184862967, 0.5496298410463769], "rank_score": 1.5671840074739631} -{"id": "kong-etal-2010-dependency", "title": "Dependency-driven Anaphoricity Determination for Coreference Resolution", "abstract": "This paper proposes a dependency-driven scheme to dynamically determine the syntactic parse tree structure for tree kernel-based anaphoricity determination in coreference resolution. Given a full syntactic parse tree, it keeps the nodes and the paths related with current mention based on constituent dependencies from both syntactic and semantic perspectives, while removing the noisy information, eventually leading to a dependency-driven dynamic syntactic parse tree (D-DSPT). Evaluation on the ACE 2003 corpus shows that the D-DSPT outperforms all previous parse tree structures on anaphoricity determination, and that applying our anaphoricity determination module in coreference resolution achieves the so far best performance.", "phrases": ["anaphoricity determination", "coreference resolution", "dependency-driven scheme", "mention"], "overall_score": 3.0474700526071192, "scores": [2.6557966095660124, 2.5143299607336873, 0.55777496955596, 0.5364576939693623], "rank_score": 1.5660898084562556} -{"id": "habernal-etal-2017-argotario", "title": "Argotario: Computational Argumentation Meets Serious Games", "abstract": "An important skill in critical thinking and argumentation is the ability to spot and recognize fallacies. Fallacious arguments, omnipresent in argumentative discourse, can be deceptive, manipulative, or simply leading to `wrong moves' in a discussion. Despite their importance, argumentation scholars and NLP researchers with focus on argumentation quality have not yet investigated fallacies empirically. The nonexistence of resources dealing with fallacious argumentation calls for scalable approaches to data acquisition and annotation, for which the serious games methodology offers an appealing, yet unexplored, alternative. We present Argotario, a serious game that deals with fallacies in everyday argumentation. Argotario is a multilingual, open-source, platform-independent application with strong educational aspects, accessible at .", "phrases": ["game", "fallacy", "argotario", "irrelevant authority", "propaganda technique"], "overall_score": 4.132535816113016, "scores": [2.3445526092406976, 1.876760147704574, 1.387571418985183, 1.1663144149514357, 1.0543695085642255], "rank_score": 1.5659136198892232} -{"id": "eskander-etal-2014-foreign", "title": "Foreign Words and the Automatic Processing of Arabic Social Media Text Written in Roman Script", "abstract": "Arabic on social media has all the properties of any language on social media that make it tough for natural language processing, plus some specific problems. These include diglossia, the use of an alternative alphabet (Roman), and code switching with foreign languages. In this paper, we present a system which can process Arabic written in Roman alphabet (\u201cArabizi\u201d). It identifies whether each word is a foreign word or one of another four categories (Arabic, name, punctuation, sound), and transliterates Arabic words and names into the Arabic alphabet. We obtain an overall system performance of 83.8% on an unseen test set.", "phrases": ["roman script", "arabizi", "foreign word", "social medium text"], "overall_score": 3.7524064986489516, "scores": [2.140182713364575, 2.0502338117463186, 1.548528562845535, 0.5205551349533915], "rank_score": 1.564875055727455} -{"id": "liu-lapata-2019-text", "title": "Text Summarization with Pretrained Encoders", "abstract": "Bidirectional Encoder Representations from Transformers (BERT) represents the latest incarnation of pretrained language models which have recently advanced a wide range of natural language processing tasks. In this paper, we showcase how BERT can be usefully applied in text summarization and propose a general framework for both extractive and abstractive models. We introduce a novel document-level encoder based on BERT which is able to express the semantics of a document and obtain representations for its sentences. Our extractive model is built on top of this encoder by stacking several inter-sentence Transformer layers. For abstractive summarization, we propose a new fine-tuning schedule which adopts different optimizers for the encoder and the decoder as a means of alleviating the mismatch between the two (the former is pretrained while the latter is not). We also demonstrate that a two-staged fine-tuning approach can further boost the quality of the generated summaries. Experiments on three datasets show that our model achieves state-of-the-art results across the board in both extractive and abstractive settings.", "phrases": ["language model", "advance", "inter-sentence transformer layer", "text summarization"], "overall_score": 5.691664752338766, "scores": [3.279623485447204, 1.619954337096954, 0.8286584951110721, 0.5304907638958893], "rank_score": 1.5646817703877798} -{"id": "daxenberger-etal-2017-essence", "title": "What is the Essence of a Claim? Cross-Domain Claim Identification", "abstract": "Argument mining has become a popular research area in NLP. It typically includes the identification of argumentative components, e.g. claims, as the central component of an argument. We perform a qualitative analysis across six different datasets and show that these appear to conceptualize claims quite differently. To learn about the consequences of such different conceptualizations of claim for practical applications, we carried out extensive experiments using state-of-the-art feature-rich and deep learning systems, to identify claims in a cross-domain fashion. While the divergent conceptualization of claims in different datasets is indeed harmful to cross-domain classification, we show that there are shared properties on the lexical level as well as system configurations that can help to overcome these gaps.", "phrases": ["claim", "cross-domain claim identification", "argument mining", "cross-domain classification"], "overall_score": 4.90389623213442, "scores": [4.09808277489825, 1.0122920047244786, 0.5783193280236557, 0.5672845603687745], "rank_score": 1.5639946670037899} -{"id": "talbot-osborne-2007-smoothed", "title": "Smoothed Bloom Filter Language Models: Tera-Scale LMs on the Cheap", "abstract": "A Bloom filter (BF) is a randomised data structure for set membership queries. Its space requirements fall significantly below lossless information-theoretic lower bounds but it produces false positives with some quantifiable probability. Here we present a general framework for deriving smoothed language model probabilities from BFs. We investigate how a BF containing n-gram statistics can be used as a direct replacement for a conventional n-gram model. Recent work has demonstrated that corpus statistics can be stored efficiently within a BF, here we consider how smoothed language model probabilities can be derived efficiently from this randomised representation. Our proposal takes advantage of the one-sided error guarantees of the BF and simple inequalities that hold between related n-gram statistics in order to further reduce the BF storage requirements and the error rate of the derived probabilities. We use these models as replacements for a conventional language model in machine translation experiments.", "phrases": ["bloom filter", "language model", "data structure"], "overall_score": 4.431124968728316, "scores": [2.072192623739388, 1.3663599893331584, 1.253425466895576], "rank_score": 1.5639926933227073} -{"id": "shekhar-etal-2017-foil", "title": "FOIL it! Find One mismatch between Image and Language caption", "abstract": "In this paper, we aim to understand whether current language and vision (LaVi) models truly grasp the interaction between the two modalities. To this end, we propose an extension of the MS-COCO dataset, FOIL-COCO, which associates images with both correct and `foil' captions, that is, descriptions of the image that are highly similar to the original ones, but contain one single mistake (`foil word'). We show that current LaVi models fall into the traps of this data and perform badly on three tasks: a) caption classification (correct vs. foil); b) foil word detection; c) foil word correction. Humans, in contrast, have near-perfect performance on those tasks. We demonstrate that merely utilising language cues is not enough to model FOIL-COCO and that it challenges the state-of-the-art by requiring a fine-grained understanding of the relation between text and image.", "phrases": ["image", "language caption", "foil"], "overall_score": 3.8854623411176337, "scores": [2.5035304136344707, 0.895322104194619, 1.292022700500486], "rank_score": 1.5636250727765253} -{"id": "herzig-etal-2020-tapas", "title": "TaPas: Weakly Supervised Table Parsing via Pre-training", "abstract": "Answering natural language questions over tables is usually seen as a semantic parsing task. To alleviate the collection cost of full logical forms, one popular approach focuses on weak supervision consisting of denotations instead of logical forms. However, training semantic parsers from weak supervision poses difficulties, and in addition, the generated logical forms are only used as an intermediate step prior to retrieving the denotation. In this paper, we present TaPas, an approach to question answering over tables without generating logical forms. TaPas trains from weak supervision, and predicts the denotation by selecting table cells and optionally applying a corresponding aggregation operator to such selection. TaPas extends BERT's architecture to encode tables as input, initializes from an effective joint pre-training of text segments and tables crawled from Wikipedia, and is trained end-to-end. We experiment with three different semantic parsing datasets, and find that TaPas outperforms or rivals semantic parsing models by improving state-of-the-art accuracy on SQA from 55.1 to 67.2 and performing on par with the state-of-the-art on WikiSQL and WikiTQ, but with a simpler model architecture. We additionally find that transfer learning, which is trivial in our setting, from WikiSQL to WikiTQ, yields 48.7 accuracy, 4.2 points above the state-of-the-art.", "phrases": ["table", "language model", "pre-training model"], "overall_score": 5.687785292027942, "scores": [2.5314843793374706, 1.5823970845924147, 0.5769643681224633], "rank_score": 1.563615277350783} -{"id": "ono-etal-2015-word", "title": "Word Embedding-based Antonym Detection using Thesauri and Distributional Information", "abstract": "This paper proposes a novel approach to train word embeddings to capture antonyms. Word embeddings have shown to capture synonyms and analogies. Such word embeddings, however, cannot capture antonyms since they depend on the distributional hypothesis. Our approach utilizes supervised synonym and antonym information from thesauri, as well as distributional information from large-scale unlabelled text data. The evaluation results on the GRE antonym question task show that our model outperforms the state-of-the-art systems and it can answer the antonym questions in the F-score of 89%.", "phrases": ["antonym detection", "thesauri", "distributional information"], "overall_score": 3.8850804284281923, "scores": [2.1218757987439636, 1.6593661962979067, 0.9091721443701446], "rank_score": 1.563471379804005} -{"id": "chen-etal-2018-joint", "title": "Joint Learning for Emotion Classification and Emotion Cause Detection", "abstract": "We present a neural network-based joint approach for emotion classification and emotion cause detection, which attempts to capture mutual benefits across the two sub-tasks of emotion analysis. Considering that emotion classification and emotion cause detection need different kinds of features (affective and event-based separately), we propose a joint encoder which uses a unified framework to extract features for both sub-tasks and a joint model trainer which simultaneously learns two models for the two sub-tasks separately. Our experiments on Chinese microblogs show that the joint approach is very promising.", "phrases": ["emotion classification", "detection", "joint approach"], "overall_score": 3.884665659609585, "scores": [2.9650048202734065, 0.8828045125815271, 0.8421040608023352], "rank_score": 1.563304464552423} -{"id": "shao-etal-2017-character", "title": "Character-based Joint Segmentation and POS Tagging for Chinese using Bidirectional RNN-CRF", "abstract": "We present a character-based model for joint segmentation and POS tagging for Chinese. The bidirectional RNN-CRF architecture for general sequence tagging is adapted and applied with novel vector representations of Chinese characters that capture rich contextual information and lower-than-character level features. The proposed model is extensively evaluated and compared with a state-of-the-art tagger respectively on CTB5, CTB9 and UD Chinese. The experimental results indicate that our model is accurate and robust across datasets in different sizes, genres and annotation schemes. We obtain state-of-the-art performance on CTB5, achieving 94.38 F1-score for joint segmentation and POS tagging.", "phrases": ["joint segmentation", "pos tagging", "word boundary"], "overall_score": 4.232811233752942, "scores": [3.00728126124476, 0.8272152901710431, 0.8546461004044641], "rank_score": 1.5630475506067558} -{"id": "kulick-2010-simultaneous", "title": "Simultaneous Tokenization and Part-Of-Speech Tagging for Arabic without a Morphological Analyzer", "abstract": "We describe an approach to simultaneous tokenization and part-of-speech tagging that is based on separating the closed and open-class items, and focusing on the likelihood of the possible stems of the openclass words. By encoding some basic linguistic information, the machine learning task is simplified, while achieving state-of-the-art tokenization results and competitive POS results, although with a reduced tag set and some evaluation difficulties.", "phrases": ["part-of-speech tagging", "arabic", "simultaneous tokenization"], "overall_score": 2.1664558684930793, "scores": [2.233820374612842, 1.666588642560203, 0.787893689496592], "rank_score": 1.562767568889879} -{"id": "mcdonald-etal-2005-online", "title": "Online Large-Margin Training of Dependency Parsers", "abstract": "We present an effective training algorithm for linearly-scored dependency parsers that implements online large-margin multi-class training (Crammer and Singer, 2003; Crammer et al., 2003) on top of efficient parsing techniques for dependency trees (Eisner, 1996). The trained parsers achieve a competitive dependency accuracy for both English and Czech with no language specific enhancements.", "phrases": ["dependency parser", "edge", "mst", "feature representation", "learning method"], "overall_score": 6.614972507886923, "scores": [3.844336791088535, 1.1095760681534792, 1.069418999765834, 0.9037815359580919, 0.8844195703770344], "rank_score": 1.562306593068595} -{"id": "martinez-alonso-plank-2017-multitask", "title": "When is multitask learning effective? Semantic sequence prediction under varying data conditions", "abstract": "Multitask learning has been applied successfully to a range of tasks, mostly morphosyntactic. However, little is known on when MTL works and whether there are data characteristics that help to determine the success of MTL. In this paper we evaluate a range of semantic sequence labeling tasks in a MTL setup. We examine different auxiliary task configurations, amongst which a novel setup, and correlate their impact to data-dependent conditions. Our results show that MTL is not always effective, because significant improvements are obtained only for 1 out of 5 tasks. When successful, auxiliary tasks with compact and more uniform label distributions are preferable.", "phrases": ["mtl", "auxiliary task", "uniform label distribution"], "overall_score": 5.413767335699387, "scores": [2.100940506019756, 2.0367263208010042, 0.5485823458233365], "rank_score": 1.5620830575480324} -{"id": "kipper-etal-2006-extending", "title": "Extending VerbNet with Novel Verb Classes", "abstract": "Lexical classifications have proved useful in supporting various natural language processing (NLP) tasks. The largest verb classification for English is Levin's (1993) work which defined groupings of verbs based on syntactic properties. VerbNet - the largest computational verb lexicon currently available for English - provides detailed syntactic-semantic descriptions of Levin classes. While the classes included are extensive enough for some NLP use, they are not comprehensive. Korhonen and Briscoe (2004) have proposed a significant extension of Levin's classification which incorporates 57 novel classes for verbs not covered (comprehensively) by Levin. This paper describes the integration of these classes into VerbNet. The result is the most extensive Levin-style classification for English verbs which can be highly useful for practical applications.", "phrases": ["verbnet", "levin class", "extension"], "overall_score": 4.0066612992647554, "scores": [3.6058873467330614, 0.5448880597992841, 0.5354706836010403], "rank_score": 1.562082030044462} -{"id": "yang-etal-2019-reducing", "title": "Reducing Word Omission Errors in Neural Machine Translation: A Contrastive Learning Approach", "abstract": "While neural machine translation (NMT) has achieved remarkable success, NMT systems are prone to make word omission errors. In this work, we propose a contrastive learning approach to reducing word omission errors in NMT. The basic idea is to enable the NMT model to assign a higher probability to a ground-truth translation and a lower probability to an erroneous translation, which is automatically constructed from the ground-truth translation by omitting words. We design different types of negative examples depending on the number of omitted words, word frequency, and part of speech. Experiments on Chinese-to-English, German-to-English, and Russian-to-English translation tasks show that our approach is effective in reducing word omission errors and achieves better translation performance than three baseline methods.", "phrases": ["word omission error", "neural machine translation", "contrastive learning approach", "ground-truth translation"], "overall_score": 3.0394392742394754, "scores": [2.9002245573948318, 1.9959468863704002, 0.8243036508992848, 0.5273761243962218], "rank_score": 1.5619628047651846} -{"id": "clouatre-etal-2021-mlmlm", "title": "MLMLM: Link Prediction with Mean Likelihood Masked Language Model", "abstract": "Knowledge Bases (KBs) are easy to query, verifiable, and interpretable. They however scale with man-hours and high-quality data. Masked Language Models (MLMs), such as BERT, scale with computing power as well as unstructured raw text data. The knowledge contained within those models is however not directly interpretable. We propose to perform link prediction with MLMs to address both the KBs scalability issues and the MLMs interpretability issues. To do that we introduce MLMLM, Mean Likelihood Masked Language Model, an approach comparing the mean likelihood of generating the different entities to perform link prediction in a tractable manner. We obtain State of the Art (SotA) results on the WN18RR dataset and the best non-entity-embedding based results on the FB15k-237 dataset. We also obtain convincing results on link prediction on previously unseen entities, making MLMLM a suitable approach to introducing new entities to a KB.", "phrases": ["link prediction", "query", "mlmlm"], "overall_score": 2.1645419139910995, "scores": [2.21035225098209, 1.9209661387681762, 0.5528424379166301], "rank_score": 1.5613869425556322} -{"id": "liang-etal-2006-alignment", "title": "Alignment by Agreement", "abstract": "We present an unsupervised approach to symmetric word alignment in which two simple asymmetric models are trained jointly to maximize a combination of data likelihood and agreement between the models. Compared to the standard practice of intersecting predictions of independently-trained models, joint training provides a 32% reduction in AER. Moreover, a simple and efficient pair of HMM aligners provides a 29% reduction in AER over symmetrized IBM model 4 predictions.", "phrases": ["agreement", "word alignment", "asymmetric model", "hmm aligner", "direction"], "overall_score": 5.549136639426271, "scores": [4.056806733043979, 0.9925705562386581, 0.9342061628686498, 0.9306005712234898, 0.8897447952028552], "rank_score": 1.5607857637155265} -{"id": "jiang-etal-2020-know", "title": "How Can We Know What Language Models Know?", "abstract": "Recent work has presented intriguing results examining the knowledge contained in language models (LMs) by having the LM fill in the blanks of prompts such as \u201cObama is a __ by profession\u201d. These prompts are usually manually created, and quite possibly sub-optimal; another prompt such as \u201cObama worked as a __ \u201d may result in more accurately predicting the correct profession. Because of this, given an inappropriate prompt, we might fail to retrieve facts that the LM does know, and thus any given prompt only provides a lower bound estimate of the knowledge contained in an LM. In this paper, we attempt to more accurately estimate the knowledge contained in LMs by automatically discovering better prompts to use in this querying process. Specifically, we propose mining-based and paraphrasing-based methods to automatically generate high-quality and diverse prompts, as well as ensemble methods to combine answers from different prompts. Extensive experiments on the LAMA benchmark for extracting relational knowledge from LMs demonstrate that our methods can improve accuracy from 31.1% to 39.6%, providing a tighter lower bound on what LMs know. We have released the code and the resulting LM Prompt And Query Archive (LPAQA) at .", "phrases": ["language model", "prompt", "paraphrasing-based method", "relational knowledge", "mlm"], "overall_score": 6.465640666223043, "scores": [2.339938644587796, 2.277491450472262, 1.232173619870951, 1.1178481157024716, 0.8353847812648475], "rank_score": 1.5605673223796657} -{"id": "modi-etal-2016-inscript", "title": "InScript: Narrative texts annotated with script information", "abstract": "This paper presents the InScript corpus (Narrative Texts Instantiating Script structure). InScript is a corpus of 1,000 stories centered around 10 different scenarios. Verbs and noun phrases are annotated with event and participant types, respectively. Additionally, the text is annotated with coreference information. The corpus shows rich lexical variation and will serve as a unique resource for the study of the role of script knowledge in natural language processing.", "phrases": ["story", "different scenario", "participant", "script knowledge", "inscript"], "overall_score": 4.002712659866558, "scores": [3.0456050044983853, 1.6975885573577576, 1.3609813874915684, 0.8708606546596594, 0.8276772414188212], "rank_score": 1.5605425690852384} -{"id": "zhu-etal-2021-ontogum", "title": "OntoGUM: Evaluating Contextualized SOTA Coreference Resolution on 12 More Genres", "abstract": "SOTA coreference resolution produces increasingly impressive scores on the OntoNotes benchmark. However lack of comparable data following the same scheme for more genres makes it difficult to evaluate generalizability to open domain data. This paper provides a dataset and comprehensive evaluation showing that the latest neural LM based end-to-end systems degrade very substantially out of domain. We make an OntoNotes-like coreference dataset called OntoGUM publicly available, converted from GUM, an English corpus covering 12 genres, using deterministic rules, which we evaluate. Thanks to the rich syntactic and discourse annotations in GUM, we are able to create the largest human-annotated coreference corpus following the OntoNotes guidelines, and the first to be evaluated for consistency with the OntoNotes scheme. Out-of-domain evaluation across 12 genres shows nearly 15-20% degradation for both deterministic and deep learning systems, indicating a lack of generalizability or covert overfitting in existing coreference resolution models.", "phrases": ["sota coreference resolution", "genre", "ontogum", "ontonotes schema"], "overall_score": 2.5102797448194294, "scores": [2.5277675055529465, 2.208241560915249, 0.9437334754283181, 0.5591554619982835], "rank_score": 1.5597245009736993} -{"id": "nguyen-etal-2007-subtree", "title": "Subtree Mining for Relation Extraction from Wikipedia", "abstract": "In this study, we address the problem of extracting relations between entities from Wikipedia's English articles. Our proposed method first anchors the appearance of entities in Wikipedia's articles using neither Named Entity Recognizer (NER) nor coreference resolution tool. It then classifies the relationships between entity pairs using SVM with features extracted from the web structure and subtrees mined from the syntactic structure of text. We evaluate our method on manually annotated data from actual Wikipedia articles.", "phrases": ["relation extraction", "wikipedia", "subtree"], "overall_score": 2.509520039551079, "scores": [2.3438271215024082, 1.8023034078024625, 0.5316268793466422], "rank_score": 1.5592524695505041} -{"id": "bekoulis-etal-2018-adversarial", "title": "Adversarial training for multi-context joint entity and relation extraction", "abstract": "Adversarial training (AT) is a regularization method that can be used to improve the robustness of neural network methods by adding small perturbations in the training data. We show how to use AT for the tasks of entity recognition and relation extraction. In particular, we demonstrate that applying AT to a general purpose baseline model for jointly extracting entities and relations, allows improving the state-of-the-art effectiveness on several datasets in different contexts (i.e., news, biomedical, and real estate data) and for different languages (English and Dutch).", "phrases": ["joint entity", "relation extraction", "adversarial training"], "overall_score": 3.589638080496949, "scores": [2.1429448550633836, 1.7397716364842786, 0.7941635396211548], "rank_score": 1.5589600103896057} -{"id": "ikeda-etal-2016-japanese", "title": "Japanese Text Normalization with Encoder-Decoder Model", "abstract": "Text normalization is the task of transforming lexical variants to their canonical forms. We model the problem of text normalization as a character-level sequence to sequence learning problem and present a neural encoder-decoder model for solving it. To train the encoder-decoder model, many sentences pairs are generally required. However, Japanese non-standard canonical pairs are scarce in the form of parallel corpora. To address this issue, we propose a method of data augmentation to increase data size by converting existing resources into synthesized non-standard forms using handcrafted rules. We conducted an experiment to demonstrate that the synthesized corpus contributes to stably train an encoder-decoder model and improve the performance of Japanese text normalization.", "phrases": ["text normalization", "encoder-decoder model", "sentence pair", "data augmentation", "character level"], "overall_score": 3.4241125042611937, "scores": [3.7917628209836804, 1.844350688651881, 1.0610813443279807, 0.5531928699779154, 0.541516070463523], "rank_score": 1.5583807588809961} -{"id": "utiyama-isahara-2007-comparison", "title": "A Comparison of Pivot Methods for Phrase-Based Statistical Machine Translation", "abstract": "We compare two pivot strategies for phrase-based statistical machine translation (SMT), namely phrase translation and sentence translation. The phrase translation strategy means that we directly construct a phrase translation table (phrase-table) of the source and target language pair from two phrase-tables; one constructed from the source language and English and one constructed from English and the target language. We then use that phrase-table in a phrase-based SMT system. The sentence translation strategy means that we first translate a source language sentence into n English sentences and then translate these n sentences into target language sentences separately. Then, we select the highest scoring sentence from these target sentences. We conducted controlled experiments using the Europarl corpus to evaluate the performance of these pivot strategies as compared to directly trained SMT systems. The phrase translation strategy significantly outperformed the sentence translation strategy. Its relative performance was 0.92 to 0.97 compared to directly trained SMT systems.", "phrases": ["statistical machine translation", "pivot language", "many researcher", "bridging", "transfer method"], "overall_score": 6.124006820021907, "scores": [2.0022177424654766, 2.4818208939198625, 1.107808869262956, 1.1026349352970415, 1.0932571386690302], "rank_score": 1.5575479159228736} -{"id": "kim-cho-2021-length", "title": "Length-Adaptive Transformer: Train Once with Length Drop, Use Anytime with Search", "abstract": "Despite transformers' impressive accuracy, their computational cost is often prohibitive to use with limited computational resources. Most previous approaches to improve inference efficiency require a separate model for each possible computational budget. In this paper, we extend PoWER-BERT (Goyal et al., 2020) and propose Length-Adaptive Transformer that can be used for various inference scenarios after one-shot training. We train a transformer with LengthDrop, a structural variant of dropout, which stochastically determines a sequence length at each layer. We then conduct a multi-objective evolutionary search to find a length configuration that maximizes the accuracy and minimizes the efficiency metric under any given computational budget. Additionally, we significantly extend the applicability of PoWER-BERT beyond sequence-level classification into token-level classification with Drop-and-Restore process that drops word-vectors temporarily in intermediate layers and restores at the last layer if necessary. We empirically verify the utility of the proposed approach by demonstrating the superior accuracy-efficiency trade-off under various setups, including span-based question answering and text classification. Code is available at .", "phrases": ["search", "one-shot training", "length-adaptive transformer"], "overall_score": 3.0307893790330467, "scores": [2.460813589263649, 1.679448599191086, 0.5322907254160519], "rank_score": 1.5575176379569289} -{"id": "duan-etal-2019-contrastive", "title": "Contrastive Attention Mechanism for Abstractive Sentence Summarization", "abstract": "We propose a contrastive attention mechanism to extend the sequence-to-sequence framework for abstractive sentence summarization task, which aims to generate a brief summary of a given source sentence. The proposed contrastive attention mechanism accommodates two categories of attention: one is the conventional attention that attends to relevant parts of the source sentence, the other is the opponent attention that attends to irrelevant or less relevant parts of the source sentence. Both attentions are trained in an opposite way so that the contribution from the conventional attention is encouraged and the contribution from the opponent attention is discouraged through a novel softmax and softmin functionality. Experiments on benchmark datasets show that, the proposed contrastive attention mechanism is more focused on the relevant parts for the summary than the conventional attention mechanism, and greatly advances the state-of-the-art performance on the abstractive sentence summarization task. We release the code at Abstractive-Text-Summarization.", "phrases": ["abstractive sentence summarization", "relevant part", "contrastive attention mechanism"], "overall_score": 2.788766202896734, "scores": [2.8382084164398447, 0.9721857122155554, 0.8589260297555166], "rank_score": 1.556440052803639} -{"id": "pasunuru-etal-2017-towards", "title": "Towards Improving Abstractive Summarization via Entailment Generation", "abstract": "Abstractive summarization, the task of rewriting and compressing a document into a short summary, has achieved considerable success with neural sequence-to-sequence models. However, these models can still benefit from stronger natural language inference skills, since a correct summary is logically entailed by the input document, i.e., it should not contain any contradictory or unrelated information. We incorporate such knowledge into an abstractive summarization model via multi-task learning, where we share its decoder parameters with those of an entailment generation model. We achieve promising initial improvements based on multiple metrics and datasets (including a test-only setting). The domain mismatch between the entailment (captions) and summarization (news) datasets suggests that the model is learning some domain-agnostic inference skills.", "phrases": ["abstractive summarization", "entailment generation", "auxiliary task", "question generation"], "overall_score": 3.235979629662691, "scores": [2.868302417055553, 1.9950007713846634, 0.8314507954076997, 0.5299550349948416], "rank_score": 1.5561772547106896} -{"id": "hildebrand-etal-2005-adaptation", "title": "Adaptation of the translation model for statistical machine translation based on information retrieval", "abstract": "In this paper we present experiments concerning translation model adaptation for statistical machine translation. We develop a method to adapt translation models using in- formation retrieval. The approach selects sentences similar to the test set to form an adapted training corpus. The method allows a better use of additionally available out-of-domain training data or finds in-domain data in a mixed corpus. The adapted translation models significantly improve the translation performance compared to competitive baseline sys- tems.", "phrases": ["translation model", "information retrieval", "out-of-domain training data", "sentence pair", "similar approach"], "overall_score": 5.486824581281094, "scores": [3.901530162784968, 1.7417465414024902, 1.063890419629904, 0.5430292247012707, 0.5295308560307745], "rank_score": 1.5559454409098814} -{"id": "zhou-xu-2015-end", "title": "End-to-end learning of semantic role labeling using recurrent neural networks", "abstract": "Semantic role labeling (SRL) is one of the basic natural language processing (NLP) problems. To this date, most of the successful SRL systems were built on top of some form of parsing results (Koomen et al., 2005; Palmer et al., 2010; Pradhan et al., 2013), where pre-defined feature templates over the syntactic structure are used. The attempts of building an end-to-end SRL learning system without using parsing were less successful (Collobert et al., 2011). In this work, we propose to use deep bi-directional recurrent network as an end-to-end system for SRL. We take only original text information as input feature, without using any syntactic knowledge. The proposed algorithm for semantic role labeling was mainly evaluated on CoNLL-2005 shared task and achieved F1 score of 81.07. This result outperforms the previous state-of-the-art system from the combination of different parsing trees or models. We also obtained the same conclusion with F1 = 81.27 on CoNLL2012 shared task. As a result of simplicity, our model is also computationally efficient that the parsing speed is 6.7k tokens per second. Our analysis shows that our model is better at handling longer sentences than traditional models. And the latent variables of our model implicitly capture the syntactic structure of a sentence.", "phrases": ["semantic role labeling", "recurrent neural network", "state-of-the-art result"], "overall_score": 5.43922852144072, "scores": [2.289807535872212, 1.3073605386916833, 1.0696845712454313], "rank_score": 1.5556175486031087} -{"id": "sap-etal-2020-social", "title": "Social Bias Frames: Reasoning about Social and Power Implications of Language", "abstract": "Warning: this paper contains content that may be offensive or upsetting. Language has the power to reinforce stereotypes and project social biases onto others. At the core of the challenge is that it is rarely what is stated explicitly, but rather the implied meanings, that frame people's judgments about others. For example, given a statement that \u201cwe shouldn't lower our standards to hire more women,\u201d most listeners will infer the implicature intended by the speaker - that \u201cwomen (candidates) are less qualified.\u201d Most semantic formalisms, to date, do not capture such pragmatic implications in which people express social biases and power differentials in language. We introduce Social Bias Frames, a new conceptual formalism that aims to model the pragmatic frames in which people project social biases and stereotypes onto others. In addition, we introduce the Social Bias Inference Corpus to support large-scale modelling and evaluation with 150k structured annotations of social media posts, covering over 34k implications about a thousand demographic groups. We then establish baseline approaches that learn to recover Social Bias Frames from unstructured text. We find that while state-of-the-art neural models are effective at high-level categorization of whether a given statement projects unwanted social bias (80% F1), they are not effective at spelling out more detailed explanations in terms of Social Bias Frames. Our study motivates future work that combines structured pragmatic inference with commonsense reasoning on social implications.", "phrases": ["implication", "stereotype", "social bias frames", "hate speech"], "overall_score": 5.529228598376958, "scores": [2.5439924936797698, 1.5886396669546043, 1.2195391625058476, 0.868573878454068], "rank_score": 1.5551863003985726} -{"id": "diab-etal-2004-automatic", "title": "Automatic Tagging of Arabic Text: From Raw Text to Base Phrase Chunks", "abstract": "To date, there are no fully automated systems addressing the community's need for fundamental language processing tools for Arabic text. In this paper, we present a Support Vector Machine (SVM) based approach to automatically tokenize (segmenting off clitics), part-of-speech (POS) tag and annotate base phrases (BPs) in Arabic text. We adapt highly accurate tools that have been developed for English text and apply them to Arabic text. Using standard evaluation metrics, we report that the SVM-TOK tokenizer achieves an F\u03b2=1 score of 99.12, the SVM-POS tagger achieves an accuracy of 95.49%, and the SVM-BP chunker yields an F\u03b2=1 score of 92.08.", "phrases": ["arabic text", "pos tagging", "disambiguation"], "overall_score": 5.288908397384939, "scores": [1.8824763726513394, 1.7132287404794893, 1.0693358744053878], "rank_score": 1.5550136625120723} -{"id": "lin-etal-2006-side", "title": "Which Side are You on? Identifying Perspectives at the Document and Sentence Levels", "abstract": "In this paper we investigate a new problem of identifying the perspective from which a document is written. By perspective we mean a point of view, for example, from the perspective of Democrats or Republicans. Can computers learn to identify the perspective of a document? Not every sentence is written strongly from a perspective. Can computers learn to identify which sentences strongly convey a particular perspective? We develop statistical models to capture how perspectives are expressed at the document and sentence levels, and evaluate the proposed models on articles about the Israeli-Palestinian conflict. The results show that the proposed models successfully learn how perspectives are reflected in word usage and can identify the perspective of a document with high accuracy.", "phrases": ["perspective", "sentence level", "conflict", "document collection"], "overall_score": 4.937800915524169, "scores": [4.189863324405041, 0.9556824240263102, 0.5446782632881774, 0.5246498439364132], "rank_score": 1.5537184639139856} -{"id": "duan-etal-2017-question", "title": "Question Generation for Question Answering", "abstract": "This paper presents how to generate questions from given passages using neural networks, where large scale QA pairs are automatically crawled and processed from Community-QA website, and used as training data. The contribution of the paper is 2-fold: First, two types of question generation approaches are proposed, one is a retrieval-based method using convolution neural network (CNN), the other is a generation-based method using recurrent neural network (RNN); Second, we show how to leverage the generated questions to improve existing question answering systems. We evaluate our question generation method for the answer sentence selection task on three benchmark datasets, including SQuAD, MS MARCO, and WikiQA. Experimental results show that, by using generated questions as an extra signal, significant QA improvement can be achieved.", "phrases": ["question generation", "single sentence", "synthetic data"], "overall_score": 5.651717565760035, "scores": [3.2097128482440986, 0.9182239401516664, 0.5331631698676155], "rank_score": 1.5536999860877934} -{"id": "misra-etal-2020-exploring", "title": "Exploring BERT's Sensitivity to Lexical Cues using Tests from Semantic Priming", "abstract": "Models trained to estimate word probabilities in context have become ubiquitous in natural language processing. How do these models use lexical cues in context to inform their word probabilities? To answer this question, we present a case study analyzing the pre-trained BERT model with tests informed by semantic priming. Using English lexical stimuli that show priming in humans, we find that BERT too shows \u201cpriming\u201d, predicting a word with greater probability when the context includes a related word versus an unrelated one. This effect decreases as the amount of information provided by the context increases. Follow-up analysis shows BERT to be increasingly distracted by related prime words as context becomes more informative, assigning lower probabilities to related words. Our findings highlight the importance of considering contextual constraint effects when studying word prediction in these models, and highlight possible parallels with human processing.", "phrases": ["bert", "semantic priming", "related word"], "overall_score": 2.1538786549305615, "scores": [2.0913725924209614, 2.0277713304434037, 0.5419411583530024], "rank_score": 1.5536950270724559} -{"id": "voita-titov-2020-information", "title": "Information-Theoretic Probing with Minimum Description Length", "abstract": "To measure how well pretrained representations encode some linguistic property, it is common to use accuracy of a probe, i.e. a classifier trained to predict the property from the representations. Despite widespread adoption of probes, differences in their accuracy fail to adequately reflect differences in representations. For example, they do not substantially favour pretrained representations over randomly initialized ones. Analogously, their accuracy can be similar when probing for genuine linguistic labels and probing for random synthetic tasks. To see reasonable differences in accuracy with respect to these random baselines, previous work had to constrain either the amount of probe training data or its model size. Instead, we propose an alternative to the standard probes, information-theoretic probing with minimum description length (MDL). With MDL probing, training a probe to predict labels is recast as teaching it to effectively transmit the data. Therefore, the measure of interest changes from probe accuracy to the description length of labels given representations. In addition to probe quality, the description length evaluates \u201cthe amount of effort\u201d needed to achieve the quality. This amount of effort characterizes either (i) size of a probing model, or (ii) the amount of data needed to achieve the high quality. We consider two methods for estimating MDL which can be easily implemented on top of the standard probing pipelines: variational coding and online coding. We show that these methods agree in results and are more informative and stable than the standard probes.", "phrases": ["probing", "minimum description length", "intermediate representation", "significant uncertainty", "diagnostic classifier"], "overall_score": 5.649549036764949, "scores": [4.268621868791951, 1.833685131851375, 0.5618008480963854, 0.5515194556640388, 0.5498919007686986], "rank_score": 1.5531038410344897} -{"id": "poon-etal-2009-unsupervised", "title": "Unsupervised Morphological Segmentation with Log-Linear Models", "abstract": "Morphological segmentation breaks words into morphemes (the basic semantic units). It is a key component for natural language processing systems. Unsupervised morphological segmentation is attractive, because in every language there are virtually unlimited supplies of text, but very few labeled resources. However, most existing model-based systems for unsupervised morphological segmentation use directed generative models, making it difficult to leverage arbitrary overlapping features that are potentially helpful to learning. In this paper, we present the first log-linear model for unsupervised morphological segmentation. Our model uses overlapping features such as morphemes and their contexts, and incorporates exponential priors inspired by the minimum description length (MDL) principle. We present efficient algorithms for learning and inference by combining contrastive estimation with sampling. Our system, based on monolingual features only, outperforms a state-of-the-art system by a large margin, even when the latter uses bilingual information such as phrasal alignment and phonetic correspondence. On the Arabic Penn Treebank, our system reduces F1 error by 11% compared to Morfessor.", "phrases": ["log-linear model", "unit", "generative model", "contrastive estimation", "unsupervised morphological segmentation"], "overall_score": 5.05867333369516, "scores": [3.0422315199548464, 2.0488030503025083, 0.9166329798775971, 0.8827547459611661, 0.8728119645545801], "rank_score": 1.5526468521301395} -{"id": "bender-friedman-2018-data", "title": "Data Statements for Natural Language Processing: Toward Mitigating System Bias and Enabling Better Science", "abstract": "In this paper, we propose data statements as a design solution and professional practice for natural language processing technologists, in both research and development. Through the adoption and widespread use of data statements, the field can begin to address critical scientific and ethical issues that result from the use of data from certain populations in the development of technology for other populations. We present a form that data statements can take and explore the implications of adopting them as part of regular practice. We argue that data statements will help alleviate issues related to exclusion and bias in language technology, lead to better precision in claims about how natural language processing research can generalize and thus better engineering results, protect companies from public embarrassment, and ultimately lead to language technology that meets its users in their own preferred linguistic style and furthermore does not misrepresent them to others.", "phrases": ["practice", "language processing technologist", "data statement", "template"], "overall_score": 4.798702173232007, "scores": [4.06559860099536, 1.044558898180729, 0.5675041484854606, 0.5321555847331393], "rank_score": 1.5524543080986724} -{"id": "zeman-resnik-2008-cross", "title": "Cross-Language Parser Adaptation between Related Languages", "abstract": "The present paper describes an approach to adapting a parser to a new language. Presumably the target language is much poorer in linguistic resources than the source language. The technique has been tested on two European languages due to test data availability; however, it is easily applicable to any pair of sufficiently related languages, including some of the Indic language group. Our adaptation technique using existing annotations in the source language achieves performance equivalent to that obtained by training on 1546 trees in the target language.", "phrases": ["related language", "treebank", "part-of-speech tag"], "overall_score": 6.040937276812921, "scores": [1.7441500707113013, 1.5640402026500388, 1.3484512060093536], "rank_score": 1.5522138264568979} -{"id": "baroni-lenci-2010-distributional", "title": "Distributional Memory: A General Framework for Corpus-Based Semantics", "abstract": "Research into corpus-based semantics has focused on the development of ad hoc models that treat single tasks, or sets of closely related tasks, as unrelated challenges to be tackled by extracting different kinds of distributional information from the corpus. As an alternative to this \u201cone task, one model\u201d approach, the Distributional Memory framework extracts distributional information once and for all from the corpus, in the form of a set of weighted word-link-word tuples arranged into a third-order tensor. Different matrices are then generated from the tensor, and their rows and columns constitute natural spaces to deal with different semantic problems. In this way, the same distributional information can be shared across tasks such as modeling word similarity judgments, discovering synonyms, concept categorization, predicting selectional preferences of verbs, solving analogy problems, classifying relations between word pairs, harvesting qualia structures with patterns or example pairs, predicting the typical properties of concepts, and classifying verbs into alternation classes. Extensive empirical testing in all these domains shows that a Distributional Memory implementation performs competitively against task-specific algorithms recently reported in the literature for the same tasks, and against our implementations of several state-of-the-art methods. The Distributional Memory approach is thus shown to be tenable despite the constraints imposed by its multi-purpose nature.", "phrases": ["general framework", "corpus-based semantic", "distributional memory", "syntactic relation", "co-occurrence"], "overall_score": 6.4293105080613975, "scores": [3.079844614503396, 1.3004784044373034, 0.8909539945790158, 1.3360338381778079, 1.151681955254969], "rank_score": 1.5517985613904985} -{"id": "jia-etal-2013-graph", "title": "Graph Model for Chinese Spell Checking", "abstract": "This paper describes our system in the Bake-Off 2013 task of SIGHAN 7. We illustrate that Chinese spell checking and correction can be efficiently tackled with by utilizing word segmenter. A graph model is used to represent the sentence and a single source shortest path (SSSP) algorithm is performed on the graph to correct spell errors. Our system achieves 4 first ranks out of 10 metrics on the standard test set.", "phrases": ["chinese spell checking", "single source", "graph model"], "overall_score": 3.019503747686781, "scores": [2.644514285199361, 0.9677055568854739, 1.042934070061626], "rank_score": 1.551717970715487} -{"id": "huang-sagae-2010-dynamic", "title": "Dynamic Programming for Linear-Time Incremental Parsing", "abstract": "Incremental parsing techniques such as shift-reduce have gained popularity thanks to their efficiency, but there remains a major problem: the search is greedy and only explores a tiny fraction of the whole space (even with beam search) as opposed to dynamic programming. We show that, surprisingly, dynamic programming is in fact possible for many shift-reduce parsers, by merging \"equivalent\" stacks based on feature values. Empirically, our algorithm yields up to a five-fold speedup over a state-of-the-art shift-reduce dependency parser with no loss in accuracy. Better search also leads to better learning, and our final parser outperforms all previously reported dependency parsers for English and Chinese, yet is much faster.", "phrases": ["dynamic programming", "shift-reduce parsing", "decoding", "beam-search", "transition-based dependency parsing"], "overall_score": 5.6441987049201146, "scores": [4.717912640511269, 0.9343460332648225, 0.9196619990329158, 0.6086913185689354, 0.5775529799565982], "rank_score": 1.551632994266908} -{"id": "aguilar-etal-2018-named", "title": "Named Entity Recognition on Code-Switched Data: Overview of the CALCS 2018 Shared Task", "abstract": "In the third shared task of the Computational Approaches to Linguistic Code-Switching (CALCS) workshop, we focus on Named Entity Recognition (NER) on code-switched social-media data. We divide the shared task into two competitions based on the English-Spanish (ENG-SPA) and Modern Standard Arabic-Egyptian (MSA-EGY) language pairs. We use Twitter data and 9 entity types to establish a new dataset for code-switched NER benchmarks. In addition to the CS phenomenon, the diversity of the entities and the social media challenges make the task considerably hard to process. As a result, the best scores of the competitions are 63.76% and 71.61% for ENG-SPA and MSA-EGY, respectively. We present the scores of 9 participants and discuss the most common challenges among submissions.", "phrases": ["entity recognition", "linguistic code-switching", "eng-spa"], "overall_score": 3.720591179314043, "scores": [3.0826429096243073, 1.0426382561546699, 0.5295399452848989], "rank_score": 1.5516070370212922} -{"id": "wang-etal-2020-response", "title": "Response Selection for Multi-Party Conversations with Dynamic Topic Tracking", "abstract": "While participants in a multi-party multi-turn conversation simultaneously engage in multiple conversation topics, existing response selection methods are developed mainly focusing on a two-party single-conversation scenario. Hence, the prolongation and transition of conversation topics are ignored by current methods. In this work, we frame response selection as a dynamic topic tracking task to match the topic between the response and relevant conversation context. With this new formulation, we propose a novel multi-task learning framework that supports efficient encoding through large pretrained models with only two utterances at once to perform dynamic topic disentanglement and response selection. We also propose Topic-BERT an essential pretraining step to embed topic information into BERT with self-supervised learning. Experimental results on the DSTC-8 Ubuntu IRC dataset show state-of-the-art results in response selection and topic disentanglement tasks outperforming existing methods by a good margin.", "phrases": ["conversation", "dynamic topic", "response selection"], "overall_score": 3.5726852892621013, "scores": [2.2097014603867864, 1.314105749257675, 1.130985310465899], "rank_score": 1.5515975067034535} -{"id": "filatova-2012-irony", "title": "Irony and Sarcasm: Corpus Generation and Analysis Using Crowdsourcing", "abstract": "The ability to reliably identify sarcasm and irony in text can improve the performance of many Natural Language Processing (NLP) systems including summarization, sentiment analysis, etc. The existing sarcasm detection systems have focused on identifying sarcasm on a sentence level or for a specific phrase. However, often it is impossible to identify a sentence containing sarcasm without knowing the context. In this paper we describe a corpus generation experiment where we collect regular and sarcastic Amazon product reviews. We perform qualitative and quantitative analysis of the corpus. The resulting corpus can be used for identifying sarcasm on two levels: a document and a text utterance (where a text utterance can be as short as a sentence and as long as a whole document).", "phrases": ["sarcasm", "crowdsourcing", "corpus generation experiment", "irony", "annotator"], "overall_score": 4.723274708324653, "scores": [3.0871717511229257, 1.8225749110185212, 0.8996398923942064, 1.1198944373241955, 0.8277232755426551], "rank_score": 1.5514008534805006} -{"id": "wang-etal-2020-relational", "title": "Relational Graph Attention Network for Aspect-based Sentiment Analysis", "abstract": "Aspect-based sentiment analysis aims to determine the sentiment polarity towards a specific aspect in online reviews. Most recent efforts adopt attention-based neural network models to implicitly connect aspects with opinion words. However, due to the complexity of language and the existence of multiple aspects in a single sentence, these models often confuse the connections. In this paper, we address this problem by means of effective encoding of syntax information. Firstly, we define a unified aspect-oriented dependency tree structure rooted at a target aspect by reshaping and pruning an ordinary dependency parse tree. Then, we propose a relational graph attention network (R-GAT) to encode the new tree structure for sentiment prediction. Extensive experiments are conducted on the SemEval 2014 and Twitter datasets, and the experimental results confirm that the connections between aspects and opinion words can be better established with our approach, and the performance of the graph attention network (GAT) is significantly improved as a consequence.", "phrases": ["graph attention network", "sentiment analysis", "aspect term", "dependency graph"], "overall_score": 5.376628956381888, "scores": [3.9077902797672297, 0.8792705640405917, 0.8788110687095269, 0.539596833140374], "rank_score": 1.5513671864144305} -{"id": "khandelwal-sawant-2020-negbert", "title": "NegBERT: A Transfer Learning Approach for Negation Detection and Scope Resolution", "abstract": "Negation is an important characteristic of language, and a major component of information extraction from text. This subtask is of considerable importance to the biomedical domain. Over the years, multiple approaches have been explored to address this problem: Rule-based systems, Machine Learning classifiers, Conditional Random Field models, CNNs and more recently BiLSTMs. In this paper, we look at applying Transfer Learning to this problem. First, we extensively review previous literature addressing Negation Detection and Scope Resolution across the 3 datasets that have gained popularity over the years: the BioScope Corpus, the Sherlock dataset, and the SFU Review Corpus. We then explore the decision choices involved with using BERT, a popular transfer learning model, for this task, and report state-of-the-art results for scope resolution across all 3 datasets. Our model, referred to as NegBERT, achieves a token level F1 score on scope resolution of 92.36 on the Sherlock dataset, 95.68 on the BioScope Abstracts subcorpus, 91.24 on the BioScope Full Papers subcorpus, 90.95 on the SFU Review Corpus, outperforming the previous state-of-the-art systems by a significant margin. We also analyze the model's generalizability to datasets on which it is not trained.", "phrases": ["negation detection", "scope resolution", "bioscope corpus", "transfer learning model", "negbert"], "overall_score": 3.8542904358144967, "scores": [2.4458360161747286, 2.4367736689188924, 1.7878400364079277, 0.5567526350409274, 0.5282005197453008], "rank_score": 1.5510805752575556} -{"id": "wang-potts-2019-talkdown", "title": "TalkDown: A Corpus for Condescension Detection in Context", "abstract": "Condescending language use is caustic; it can bring dialogues to an end and bifurcate communities. Thus, systems for condescension detection could have a large positive impact. A challenge here is that condescension is often impossible to detect from isolated utterances, as it depends on the discourse and social context. To address this, we present TalkDown, a new labeled dataset of condescending linguistic acts in context. We show that extending a language-only model with representations of the discourse improves performance, and we motivate techniques for dealing with the low rates of condescension overall. We also use our model to estimate condescension rates in various online communities and relate these differences to differing community norms.", "phrases": ["condescension detection", "community", "linguistic act", "talkdown", "social medium message"], "overall_score": 4.721802446704476, "scores": [2.533137281836831, 2.42745553084977, 1.3564138737553941, 0.8821620648021028, 0.5554176301840171], "rank_score": 1.550917276285623} -{"id": "ando-2006-applying", "title": "Applying Alternating Structure Optimization to Word Sense Disambiguation", "abstract": "This paper presents a new application of the recently proposed machine learning method Alternating Structure Optimization (ASO), to word sense disambiguation (WSD). Given a set of WSD problems and their respective labeled examples, we seek to improve overall performance on that set by using all the labeled examples (irrespective of target words) for the entire set in learning a disambiguator for each individual problem. Thus, in effect, on each individual problem (e.g., disambiguation of \"art\") we benefit from training examples for other problems (e.g., disambiguation of \"bar\", \"canal\", and so forth). We empirically study the effective use of ASO for this purpose in the multitask and semi-supervised learning configurations. Our performance results rival or exceed those of the previous best systems on several Senseval lexical sample task data sets.", "phrases": ["alternating structure optimization", "word sense disambiguation", "wsd"], "overall_score": 3.7184375893294095, "scores": [2.9697832841910294, 0.8502050504887093, 0.8321384260398204], "rank_score": 1.5507089202398532} -{"id": "gardner-etal-2020-evaluating", "title": "Evaluating Models' Local Decision Boundaries via Contrast Sets", "abstract": "Standard test sets for supervised learning evaluate in-distribution generalization. Unfortunately, when a dataset has systematic gaps (e.g., annotation artifacts), these evaluations are misleading: a model can learn simple decision rules that perform well on the test set but do not capture the abilities a dataset is intended to test. We propose a more rigorous annotation paradigm for NLP that helps to close systematic gaps in the test data. In particular, after a dataset is constructed, we recommend that the dataset authors manually perturb the test instances in small but meaningful ways that (typically) change the gold label, creating contrast sets. Contrast sets provide a local view of a model's decision boundary, which can be used to more accurately evaluate a model's true linguistic capabilities. We demonstrate the efficacy of contrast sets by creating them for 10 diverse NLP datasets (e.g., DROP reading comprehension, UD parsing, and IMDb sentiment analysis). Although our contrast sets are not explicitly adversarial, model performance is significantly lower on them than on the original test sets\u2014up to 25% in some cases. We release our contrast sets as new evaluation benchmarks and encourage future dataset construction efforts to follow similar annotation processes.", "phrases": ["decision boundary", "contrast set", "gold label", "model evaluation"], "overall_score": 5.21938267191242, "scores": [4.157977481522667, 0.8705132050706649, 0.6167784824260799, 0.554818896225971], "rank_score": 1.5500220163113456} -{"id": "ran-etal-2019-numnet", "title": "NumNet: Machine Reading Comprehension with Numerical Reasoning", "abstract": "Numerical reasoning, such as addition, subtraction, sorting and counting is a critical skill in human's reading comprehension, which has not been well considered in existing machine reading comprehension (MRC) systems. To address this issue, we propose a numerical MRC model named as NumNet, which utilizes a numerically-aware graph neural network to consider the comparing information and performs numerical reasoning over numbers in the question and passage. Our system achieves an EM-score of 64.56% on the DROP dataset, outperforming all existing machine reading comprehension models by considering the numerical relations among numbers.", "phrases": ["machine reading comprehension", "numerical reasoning", "drop", "mathematical problem"], "overall_score": 4.389764551452505, "scores": [2.7616944764730906, 2.059336071336997, 0.8449320490043534, 0.5316145262239914], "rank_score": 1.5493942807596082} -{"id": "zhao-etal-2019-moverscore", "title": "MoverScore: Text Generation Evaluating with Contextualized Embeddings and Earth Mover Distance", "abstract": "A robust evaluation metric has a profound impact on the development of text generation systems. A desirable metric compares system output against references based on their semantics rather than surface forms. In this paper we investigate strategies to encode system and reference texts to devise a metric that shows a high correlation with human judgment of text quality. We validate our new metric, namely MoverScore, on a number of text generation tasks including summarization, machine translation, image captioning, and data-to-text generation, where the outputs are produced by a variety of neural and non-neural systems. Our findings suggest that metrics combining contextualized representations with a distance measure perform the best. Such metrics also demonstrate strong generalization capability across tasks. For ease-of-use we make our metrics available as web service.", "phrases": ["contextual embedding", "text generation task", "summarization", "moverscore"], "overall_score": 5.320172091408812, "scores": [3.787394499013801, 0.9522233819816346, 0.9227487081784164, 0.5347119374556141], "rank_score": 1.5492696316573666} -{"id": "rei-yannakoudakis-2016-compositional", "title": "Compositional Sequence Labeling Models for Error Detection in Learner Writing", "abstract": "In this paper, we present the first experiments using neural network models for the task of error detection in learner writing. We perform a systematic comparison of alternative compositional architectures and propose a framework for error detection based on bidirectional LSTMs. Experiments on the CoNLL-14 shared task dataset show the model is able to outperform other participants on detecting errors in learner writing. Finally, the model is integrated with a publicly deployed self-assessment system, leading to performance comparable to human annotators.", "phrases": ["error detection", "learner writing", "compositional architecture", "bi-lstm"], "overall_score": 3.971238784690682, "scores": [2.776242848993235, 2.3628491125338575, 0.5275553892764291, 0.5264398899066219], "rank_score": 1.5482718101775361} -{"id": "mrksic-etal-2017-neural", "title": "Neural Belief Tracker: Data-Driven Dialogue State Tracking", "abstract": "One of the core components of modern spoken dialogue systems is the belief tracker, which estimates the user's goal at every step of the dialogue. However, most current approaches have difficulty scaling to larger, more complex dialogue domains. This is due to their dependency on either: a) Spoken Language Understanding models that require large amounts of annotated training data; or b) hand-crafted lexicons for capturing some of the linguistic variation in users' language. We propose a novel Neural Belief Tracking (NBT) framework which overcomes these problems by building on recent advances in representation learning. NBT models reason over pre-trained word vectors, learning to compose them into distributed representations of user utterances and dialogue context. Our evaluation on two datasets shows that this approach surpasses past limitations, matching the performance of state-of-the-art models which rely on hand-crafted semantic lexicons and outperforming them when such lexicons are not provided.", "phrases": ["dialogue state tracking", "pre-trained word vector", "user utterance", "neural belief tracker", "ontology"], "overall_score": 6.146851170372667, "scores": [2.433118546573538, 1.648752678717165, 1.5986198542243302, 1.135385418387499, 0.925180462407007], "rank_score": 1.5482113920619078} -{"id": "zampieri-etal-2017-findings", "title": "Findings of the VarDial Evaluation Campaign 2017", "abstract": "We present the results of the VarDial Evaluation Campaign on Natural Language Processing (NLP) for Similar Languages, Varieties and Dialects, which we organized as part of the fourth edition of the VarDial workshop at EACL'2017. This year, we included four shared tasks: Discriminating between Similar Languages (DSL), Arabic Dialect Identification (ADI), German Dialect Identification (GDI), and Cross-lingual Dependency Parsing (CLP). A total of 19 teams submitted runs across the four tasks, and 15 of them wrote system description papers.", "phrases": ["vardial evaluation campaign", "identification", "team", "acoustic feature", "ili"], "overall_score": 5.546884739806803, "scores": [3.742756454780564, 1.5002833244459668, 0.8443413977995416, 0.8280134935182345, 0.8240436233085106], "rank_score": 1.5478876587705634} -{"id": "vajjala-meurers-2012-improving", "title": "On Improving the Accuracy of Readability Classification using Insights from Second Language Acquisition", "abstract": "We investigate the problem of readability assessment using a range of lexical and syntactic features and study their impact on predicting the grade level of texts. As empirical basis, we combined two web-based text sources, Weekly Reader and BBC Bitesize, targeting different age groups, to cover a broad range of school grades. On the conceptual side, we explore the use of lexical and syntactic measures originally designed to measure language development in the production of second language learners. We show that the developmental measures from Second Language Acquisition (SLA) research when combined with traditional readability features such as word length and sentence length provide a good indication of text readability across different grades. The resulting classifiers significantly outperform the previous approaches on readability classification, reaching a classification accuracy of 93.3%.", "phrases": ["readability classification", "second language acquisition", "syntactic feature"], "overall_score": 4.85333051526045, "scores": [1.9468028726229998, 1.8427222724477506, 0.8540782370731861], "rank_score": 1.5478677940479788} -{"id": "spiegler-monson-2010-emma", "title": "EMMA: A novel Evaluation Metric for Morphological Analysis", "abstract": "We present a novel Evaluation Metric for Morphological Analysis (EMMA) that is both linguistically appealing and empirically sound. EMMA uses a graphbased assignment algorithm, optimized via integer linear programming, to match morphemes of predicted word analyses to the analyses of a morphologically rich answer key. This is necessary especially for unsupervised morphology analysis systems which do not have access to linguistically motivated morpheme labels. Across 3 languages, EMMA scores of 14 systems have a substantially greater positive correlation with mean average precision in an information retrieval (IR) task than do scores from the metric currently used by the Morpho Challenge (MC) competition series. We compute EMMA and MC metric scores for 93 separate system-language pairs from the 2007, 2008, and 2009 MC competitions, demonstrating that EMMA is not susceptible to two types of gaming that have plagued recent MC competitions: Ambiguity Hijacking and Shared Morpheme Padding. The EMMA evaluation script is publicly available from http://www.cs.bris.ac.uk/ Research/MachineLearning/ Morphology/Resources/.", "phrases": ["evaluation metric", "morphological analysis", "emma"], "overall_score": 2.144437986532259, "scores": [1.89073828645523, 1.8282578661693847, 0.9216589203713915], "rank_score": 1.546885024332002} -{"id": "yokoi-etal-2020-word", "title": "Word Rotator's Distance", "abstract": "One key principle for assessing textual similarity is measuring the degree of semantic overlap between texts by considering the word alignment. Such alignment-based approaches are both intuitive and interpretable; however, they are empirically inferior to the simple cosine similarity between general-purpose sentence vectors. We focus on the fact that the norm of word vectors is a good proxy for word importance, and the angle of them is a good proxy for word similarity. However, alignment-based approaches do not distinguish the norm and direction, whereas sentence-vector approaches automatically use the norm as the word importance. Accordingly, we propose decoupling word vectors into their norm and direction then computing the alignment-based similarity with the help of earth mover's distance (optimal transport), which we refer to as word rotator's distance. Furthermore, we demonstrate how to grow the norm and direction of word vectors (vector converter); this is a new systematic approach derived from the sentence-vector estimation methods, which can significantly improve the performance of the proposed method. On several STS benchmarks, the proposed methods outperform not only alignment-based approaches but also strong baselines. The source code is avaliable at ", "phrases": ["distance", "word vector", "word rotator", "wmd"], "overall_score": 3.2148614163383322, "scores": [2.6900725815144293, 2.074862733030491, 0.8257789722640222, 0.5933718765198335], "rank_score": 1.5460215408321938} -{"id": "newman-etal-2010-automatic", "title": "Automatic Evaluation of Topic Coherence", "abstract": "This paper introduces the novel task of topic coherence evaluation, whereby a set of words, as generated by a topic model, is rated for coherence or interpretability. We apply a range of topic scoring models to the evaluation task, drawing on WordNet, Wikipedia and the Google search engine, and existing research on lexical similarity/relatedness. In comparison with human scores for a set of learned topics over two distinct datasets, we show a simple co-occurrence measure based on pointwise mutual information over Wikipedia data is able to achieve results for the task at or nearing the level of inter-annotator correlation, and that other Wikipedia-based lexical relatedness methods also achieve strong results. Google produces strong, if less consistent, results, while our results over WordNet are patchy at best.", "phrases": ["topic coherence", "wikipedia", "mutual information", "automatic evaluation", "pmi"], "overall_score": 4.847506594656386, "scores": [3.517334819858591, 0.894031161562816, 1.34811381000051, 1.0920512425354771, 0.8785208507339476], "rank_score": 1.5460103769382685} -{"id": "joshi-etal-2019-bert", "title": "BERT for Coreference Resolution: Baselines and Analysis", "abstract": "We apply BERT to coreference resolution, achieving a new state of the art on the GAP (+11.5 F1) and OntoNotes (+3.9 F1) benchmarks. A qualitative analysis of model predictions indicates that, compared to ELMo and BERT-base, BERT-large is particularly better at distinguishing between related but distinct entities (e.g., President and CEO), but that there is still room for improvement in modeling document-level context, conversations, and mention paraphrasing. We will release all code and trained models upon publication.", "phrases": ["coreference resolution", "bert", "mention detection", "co-reference resolution", "span representation"], "overall_score": 5.205296549068008, "scores": [3.136643830729246, 2.908635417871981, 0.5816268249699253, 0.556426496077029, 0.5458614362864195], "rank_score": 1.5458388011869204} -{"id": "prasad-etal-2006-attribution", "title": "Attribution and its annotation in the Penn Discourse TreeBank", "abstract": "In this paper, we describe an annotation scheme for the attribution of abstract objects (propositions, facts, and eventualities) associated with discourse relations and their arguments annotated in the Penn Discourse TreeBank. The scheme aims to capture both the source and degrees of factuality of the abstract objects through the annotation of text spans signalling the attribution, and of features recording the source, type, scopal polarity, and determinacy of attribution. RESUME. Dans cet article, nous decrivons un schema d\u2019annotation pour l\u2019encodage des objets abstraits (propositions, faits et possibilites) associes aux relations de discours et a leurs arguments tels qu\u2019annotes dans le Penn Discourse TreeBank. Ce schema a pour objet la capture de la source et du degre de factualite des objets abstraits. Les aspects cles de ce schema comprennent l\u2019annotation des intervalles textuels signalant l\u2019attribution, ainsi que l\u2019annotation des proprietes caracterisant la source, le type, la polarite de la portee, et le degre de determination de l\u2019attribution.", "phrases": ["penn discourse treebank", "annotation scheme", "attribution"], "overall_score": 2.769329993987293, "scores": [2.2242340209174682, 1.8771633930334233, 0.535380080263337], "rank_score": 1.5455924980714097} -{"id": "li-etal-2009-report", "title": "Report of NEWS 2009 Machine Transliteration Shared Task", "abstract": "This report documents the details of the Machine Transliteration Shared Task conducted as a part of the Named Entities Workshop (NEWS), an ACL-IJCNLP 2009 workshop. The shared task features machine transliteration of proper names from English to a set of languages. This shared task has witnessed enthusiastic participation of 31 teams from all over the world, with diversity of participation for a given system and wide coverage for a given language pair (more than a dozen participants per language pair). Diverse transliteration methodologies are represented adequately in the shared task for a given language pair, thus underscoring the fact that the workshop may truly indicate the state of the art in machine transliteration in these language pairs. We measure and report 6 performance metrics on the submitted results. We believe that the shared task has successfully achieved the following objectives: (i) bringing together the community of researchers in the area of Machine Transliteration to focus on various research avenues, (ii) Calibrating systems on common corpora, using common metrics, thus creating a reasonable baseline for the state-of-the-art of transliteration systems, and (iii) providing a quantitative basis for meaningful comparison and analysis between various algorithmic approaches used in machine transliteration. We believe that the results of this shared task would uncover a host of interesting research problems, giving impetus to research in this significant research area.", "phrases": ["transliteration", "named entities workshop", "news"], "overall_score": 4.705423177703526, "scores": [2.560584766300621, 1.5098038891784538, 0.5662234312645475], "rank_score": 1.545537362247874} -{"id": "dong-etal-2018-banditsum", "title": "BanditSum: Extractive Summarization as a Contextual Bandit", "abstract": "In this work, we propose a novel method for training neural networks to perform single-document extractive summarization without heuristically-generated extractive labels. We call our approach BanditSum as it treats extractive summarization as a contextual bandit (CB) problem, where the model receives a document to summarize (the context), and chooses a sequence of sentences to include in the summary (the action). A policy gradient reinforcement learning algorithm is used to train the model to select sequences of sentences that maximize ROUGE score. We perform a series of experiments demonstrating that BanditSum is able to achieve ROUGE scores that are better than or comparable to the state-of-the-art for extractive summarization, and converges using significantly fewer update steps than competing approaches. In addition, we show empirically that BanditSum performs significantly better than competing approaches when good summary sentences appear late in the source document.", "phrases": ["extractive summarization", "contextual bandit", "action", "rouge score", "source document"], "overall_score": 4.548853081399311, "scores": [3.715911450317021, 1.8727092805111072, 1.0509382325026986, 0.5641612447628974, 0.5207616262811802], "rank_score": 1.5448963668749809} -{"id": "spitkovsky-etal-2011-punctuation", "title": "Punctuation: Making a Point in Unsupervised Dependency Parsing", "abstract": "We show how punctuation can be used to improve unsupervised dependency parsing. Our linguistic analysis confirms the strong connection between English punctuation and phrase boundaries in the Penn Treebank. However, approaches that naively include punctuation marks in the grammar (as if they were words) do not perform well with Klein and Manning's Dependency Model with Valence (DMV). Instead, we split a sentence at punctuation and impose parsing restrictions over its fragments. Our grammar inducer is trained on the Wall Street Journal (WSJ) and achieves 59.5% accuracy out-of-domain (Brown sentences with 100 or fewer words), more than 6% higher than the previous best results. Further evaluation, using the 2006/7 CoNLL sets, reveals that punctuation aids grammar induction in 17 of 18 languages, for an overall average net gain of 1.3%. Some of this improvement is from training, but more than half is from parsing with induced constraints, in inference. Punctuation-aware decoding works with existing (even already-trained) parsing models and always increased accuracy in our experiments.", "phrases": ["unsupervised dependency parsing", "fragment", "punctuation"], "overall_score": 3.005557651087997, "scores": [2.8190387295238684, 0.9624459153846459, 0.8521686394640156], "rank_score": 1.5445510947908432} -{"id": "berant-liang-2014-semantic", "title": "Semantic Parsing via Paraphrasing", "abstract": "A central challenge in semantic parsing is handling the myriad ways in which knowledge base predicates can be expressed. Traditionally, semantic parsers are trained primarily from text paired with knowledge base information. Our goal is to exploit the much larger amounts of raw text not tied to any knowledge base. In this paper, we turn semantic parsing on its head. Given an input utterance, we first use a simple method to deterministically generate a set of candidate logical forms with a canonical realization in natural language for each. Then, we use a paraphrase model to choose the realization that best paraphrases the input, and output the corresponding logical form. We present two simple paraphrase models, an association model and a vector space model, and train them jointly from question-answer pairs. Our system PARASEMPRE improves stateof-the-art accuracies on two recently released question-answering datasets.", "phrases": ["knowledge base", "semantic parsing", "freebase", "natural language question", "language utterance"], "overall_score": 5.49082168851413, "scores": [4.8687979679194004, 0.9037963349794097, 0.8514103533274612, 0.5744927520373024, 0.5234212246675493], "rank_score": 1.5443837265862246} -{"id": "das-etal-2014-frame", "title": "Frame-Semantic Parsing", "abstract": "Frame semantics is a linguistic theory that has been instantiated for English in the FrameNet lexicon. We solve the problem of frame-semantic parsing using a two-stage statistical model that takes lexical targets (i.e., content words and phrases) in their sentential contexts and predicts frame-semantic structures. Given a target in context, the first stage disambiguates it to a semantic frame. This model uses latent variables and semi-supervised learning to improve frame disambiguation for targets unseen at training time. The second stage finds the target's locally expressed semantic arguments. At inference time, a fast exact dual decomposition algorithm collectively predicts all the arguments of a frame at once in order to respect declaratively stated linguistic constraints, resulting in qualitatively better structures than na\u00efve local predictors. Both components are feature-based and discriminatively trained on a small set of annotated frame-semantic parses. On the SemEval 2007 benchmark data set, the approach, along with a heuristic identifier of frame-evoking targets, outperforms the prior state of the art by significant margins. Additionally, we present experiments on the much larger FrameNet 1.5 data set. We have released our frame-semantic parser as open-source software.", "phrases": ["frame", "frame-semantic parsing", "semafor", "role labeling", "argument identification"], "overall_score": 5.301859130116257, "scores": [2.772588722239781, 1.7415950813769452, 1.4132598802847678, 0.9257538262594828, 0.8664863651771065], "rank_score": 1.5439367750676165} -{"id": "nangia-etal-2020-crows", "title": "CrowS-Pairs: A Challenge Dataset for Measuring Social Biases in Masked Language Models", "abstract": "Pretrained language models, especially masked language models (MLMs) have seen success across many NLP tasks. However, there is ample evidence that they use the cultural biases that are undoubtedly present in the corpora they are trained on, implicitly creating harm with biased representations. To measure some forms of social bias in language models against protected demographic groups in the US, we introduce the Crowdsourced Stereotype Pairs benchmark (CrowS-Pairs). CrowS-Pairs has 1508 examples that cover stereotypes dealing with nine types of bias, like race, religion, and age. In CrowS-Pairs a model is presented with two sentences: one that is more stereotyping and another that is less stereotyping. The data focuses on stereotypes about historically disadvantaged groups and contrasts them with advantaged groups. We find that all three of the widely-used MLMs we evaluate substantially favor sentences that express stereotypes in every category in CrowS-Pairs. As work on building less biased models advances, this dataset can be used as a benchmark to evaluate progress.", "phrases": ["language model", "social bias", "crows-pair", "stereotypical sentence"], "overall_score": 5.088254205239364, "scores": [2.877866139173312, 1.5036105138157345, 1.2642634436177556, 0.5296313336036401], "rank_score": 1.5438428575526106} -{"id": "gusev-etal-2011-using", "title": "Using Query Patterns to Learn the Duration of Events", "abstract": "We present the first approach to learning the durations of events without annotated training data, employing web query patterns to infer duration distributions. For example, we learn that \"war\" lasts years or decades, while \"look\" lasts seconds or minutes. Learning aspectual information is an important goal for computational semantics and duration information may help enable rich document understanding. We first describe and improve a supervised baseline that relies on event duration annotations. We then show how web queries for linguistic patterns can help learn the duration of events without labeled data, producing fine-grained duration judgments that surpass the supervised system. We evaluate on the TimeBank duration corpus, and also investigate how an event's participants (arguments) effect its duration using a corpus collected through Amazon's Mechanical Turk. We make available a new database of events and their duration distributions for use in research involving the temporal and aspectual properties of events.", "phrases": ["query pattern", "duration", "full-length weblog"], "overall_score": 4.070319411381706, "scores": [3.159135364826172, 0.8701878230488568, 0.5976919576903477], "rank_score": 1.5423383818551255} -{"id": "schmidt-2014-database", "title": "The Database for Spoken German \u2014 DGD2", "abstract": "The Database for Spoken German (Datenbank f\u00fcr Gesprochenes Deutsch, DGD2, ) is the central platform for publishing and disseminating spoken language corpora from the Archive of Spoken German (Archiv f\u00fcr Gesprochenes Deutsch, AGD, ) at the Institute for the German Language in Mannheim. The corpora contained in the DGD2 come from a variety of sources, some of them in-house projects, some of them external projects. Most of the corpora were originally intended either for research into the (dialectal) variation of German or for studies in conversation analysis and related fields. The AGD has taken over the task of permanently archiving these resources and making them available for reuse to the research community. To date, the DGD2 offers access to 19 different corpora, totalling around 9000 speech events, 2500 hours of audio recordings or 8 million transcribed words. This paper gives an overview of the data made available via the DGD2, of the technical basis for its implementation, and of the most important functionalities it offers. The paper concludes with information about the users of the database and future plans for its development.", "phrases": ["database", "spoken german", "dgd2"], "overall_score": 1.694273372645426, "scores": [2.0081093245274855, 1.7980370859551216, 0.8204358427510401], "rank_score": 1.5421940844112159} -{"id": "ott-etal-2011-finding", "title": "Finding Deceptive Opinion Spam by Any Stretch of the Imagination", "abstract": "Consumers increasingly rate, review and research products online (Jansen, 2010; Litvin et al., 2008). Consequently, websites containing consumer reviews are becoming targets of opinion spam. While recent work has focused primarily on manually identifiable instances of opinion spam, in this work we study deceptive opinion spam---fictitious opinions that have been deliberately written to sound authentic. Integrating work from psychology and computational linguistics, we develop and compare three approaches to detecting deceptive opinion spam, and ultimately develop a classifier that is nearly 90% accurate on our gold-standard opinion spam dataset. Based on feature analysis of our learned models, we additionally make several theoretical contributions, including revealing a relationship between deceptive opinions and imaginative writing.", "phrases": ["deceptive opinion spam", "fake hotel review", "news", "reputation"], "overall_score": 6.0276594545226905, "scores": [4.23493184155029, 0.8622435643441392, 0.5345351927776868, 0.531504133101172], "rank_score": 1.540803682943322} -{"id": "mubarak-darwish-2014-using", "title": "Using Twitter to Collect a Multi-Dialectal Corpus of Arabic", "abstract": "This paper describes the collection and classification of a multi-dialectal corpus of Arabic based on the geographical information of tweets. We mapped information of user locations to one of the Arab countries, and extracted tweets that have dialectal word(s). Manual evaluation of the extracted corpus shows that the accuracy of assignment of tweets to some countries (like Saudi Arabia and Egypt) is above 93% while the accuracy for other countries, such Algeria and Syria is below 70%.", "phrases": ["twitter", "multi-dialectal corpus", "arab country"], "overall_score": 3.544165337864533, "scores": [2.3475556988033843, 1.7450034277292037, 0.525075221029433], "rank_score": 1.5392114491873405} -{"id": "goldwater-griffiths-2007-fully", "title": "A fully Bayesian approach to unsupervised part-of-speech tagging", "abstract": "Unsupervised learning of linguistic structure is a difficult problem. A common approach is to define a generative model and maximize the probability of the hidden structure given the observed data. Typically, this is done using maximum-likelihood estimation (MLE) of the model parameters. We show using part-of-speech tagging that a fully Bayesian approach can greatly improve performance. Rather than estimating a single set of parameters, the Bayesian approach integrates over all possible parameter values. This difference ensures that the learned structure will have high probability over a range of possible parameters, and permits the use of priors favoring the sparse distributions that are typical of natural language. Our model has the structure of a standard trigram HMM, yet its accuracy is closer to that of a state-of-the-art discriminative model (Smith and Eisner, 2005), up to 14 percentage points better than MLE. We find improvements both when training from data alone, and using a tagging dictionary.", "phrases": ["bayesian approach", "tagging", "unsupervised learning", "bhmm", "markov model"], "overall_score": 5.334492054639512, "scores": [3.3679494101916063, 2.3874102602878544, 0.8288519301065281, 0.5669038445653385, 0.5449297877386732], "rank_score": 1.5392090465780002} -{"id": "filippova-etal-2015-sentence", "title": "Sentence Compression by Deletion with LSTMs", "abstract": "We present an LSTM approach to deletion-based sentence compression where the task is to translate a sentence into a sequence of zeros and ones, corresponding to token deletion decisions. We demonstrate that even the most basic version of the system, which is given no syntactic information (no PoS or NE tags, or dependencies) or desired compression length, performs surprisingly well: around 30% of the compressions from a large test set could be regenerated. We compare the LSTM system with a competitive baseline which is trained on the same amount of data but is additionally provided with all kinds of linguistic features. In an experiment with human raters the LSTMbased model outperforms the baseline achieving 4.5 in readability and 3.8 in informativeness.", "phrases": ["deletion", "sentence compression", "language generation task", "seq2seq", "sequence-to-sequence"], "overall_score": 5.181205406534204, "scores": [4.311836946477069, 1.7104854152298912, 0.5925602061265286, 0.5432020031592163, 0.5353371955097868], "rank_score": 1.5386843533004986} -{"id": "denero-klein-2010-discriminative", "title": "Discriminative Modeling of Extraction Sets for Machine Translation", "abstract": "We present a discriminative model that directly predicts which set of phrasal translation rules should be extracted from a sentence pair. Our model scores extraction sets: nested collections of all the overlapping phrase pairs consistent with an underlying word alignment. Extraction set models provide two principle advantages over word-factored alignment models. First, we can incorporate features on phrase pairs, in addition to word links. Second, we can optimize for an extraction-based loss function that relates directly to the end task of generating translations. Our model gives improvements in alignment quality relative to state-of-the-art unsupervised and supervised baselines, as well as providing up to a 1.4 improvement in BLEU score in Chinese-to-English translation experiments.", "phrases": ["extraction set", "word alignment", "loss function"], "overall_score": 3.542715864484732, "scores": [2.359157553286042, 1.7188476159006347, 0.5377406835038063], "rank_score": 1.5385819508968275} -{"id": "hu-etal-2016-harnessing", "title": "Harnessing Deep Neural Networks with Logic Rules", "abstract": "Combining deep neural networks with structured logic rules is desirable to harness flexibility and reduce uninterpretability of the neural models. We propose a general framework capable of enhancing various types of neural networks (e.g., CNNs and RNNs) with declarative first-order logic rules. Specifically, we develop an iterative distillation method that transfers the structured information of logic rules into the weights of neural networks. We deploy the framework on a CNN for sentiment analysis, and an RNN for named entity recognition. With a few highly intuitive rules, we obtain substantial improvements and achieve state-of-the-art or comparable results to previous best-performing systems.", "phrases": ["deep neural network", "logic rule", "various type", "weight", "entity recognition"], "overall_score": 5.281745051474917, "scores": [3.992409359505122, 1.2407128266007996, 0.9956914471944592, 0.9135336326298206, 0.5480498394760238], "rank_score": 1.538079421081245} -{"id": "cer-etal-2018-universal", "title": "Universal Sentence Encoder for English", "abstract": "We present easy-to-use TensorFlow Hub sentence embedding models having good task transfer performance. Model variants allow for trade-offs between accuracy and compute resources. We report the relationship between model complexity, resources, and transfer performance. Comparisons are made with baselines without transfer learning and to baselines that incorporate word-level transfer. Transfer learning using sentence-level embeddings is shown to outperform models without transfer learning and often those that use only word-level transfer. We show good transfer task performance with minimal training data and obtain encouraging results on word embedding association tests (WEAT) of model bias.", "phrases": ["transfer learning", "universal sentence encoder", "other nlp task", "dozen", "semantic similarity"], "overall_score": 6.293355502926754, "scores": [4.835796471233619, 0.8938512616889913, 0.8541724662398962, 0.5537623117488062, 0.5478423565932397], "rank_score": 1.5370849735009104} -{"id": "liu-etal-2016-neural", "title": "Neural Machine Translation with Supervised Attention", "abstract": "The attention mechanism is appealing for neural machine translation, since it is able to dynamically encode a source sentence by generating a alignment between a target word and source words. Unfortunately, it has been proved to be worse than conventional alignment models in alignment accuracy. In this paper, we analyze and explain this issue from the point view of reordering, and propose a supervised attention which is learned with guidance from conventional alignment models. Experiments on two Chinese-to-English translation tasks show that the supervised attention mechanism yields better alignments leading to substantial gains over the standard attention based NMT.", "phrases": ["alignment model", "neural machine translation", "attention model"], "overall_score": 4.679238917935728, "scores": [2.680175135633316, 1.4092428945738151, 0.5213927097209508], "rank_score": 1.5369369133093607} -{"id": "jamshid-lou-etal-2018-disfluency", "title": "Disfluency Detection using Auto-Correlational Neural Networks", "abstract": "In recent years, the natural language processing community has moved away from task-specific feature engineering, i.e., researchers discovering ad-hoc feature representations for various tasks, in favor of general-purpose methods that learn the input representation by themselves. However, state-of-the-art approaches to disfluency detection in spontaneous speech transcripts currently still depend on an array of hand-crafted features, and other representations derived from the output of pre-existing systems such as language models or dependency parsers. As an alternative, this paper proposes a simple yet effective model for automatic disfluency detection, called an auto-correlational neural network (ACNN). The model uses a convolutional neural network (CNN) and augments it with a new auto-correlation operator at the lowest layer that can capture the kinds of \u201crough copy\u201d dependencies that are characteristic of repair disfluencies in speech. In experiments, the ACNN model outperforms the baseline CNN on a disfluency detection task with a 5% increase in f-score, which is close to the previous best result on this task.", "phrases": ["auto-correlation operator", "disfluency detection", "human-annotated corpora"], "overall_score": 3.538305713061548, "scores": [3.5314350016240517, 0.5470705549737075, 0.531494382810384], "rank_score": 1.5366666464693808} -{"id": "rubin-etal-2016-fake", "title": "Fake News or Truth? Using Satirical Cues to Detect Potentially Misleading News", "abstract": "Satire is an attractive subject in deception detection research: it is a type of deception that intentionally incorporates cues revealing its own deceptiveness. Whereas other types of fabrications aim to instill a false sense of truth in the reader, a successful satirical hoax must eventually be exposed as a jest. This paper provides a conceptual overview of satire and humor, elaborating and illustrating the unique features of satirical news, which mimics the format and style of journalistic reporting. Satirical news stories were carefully matched and examined in contrast with their legitimate news counterparts in 12 contemporary news topics in 4 domains (civics, science, business, and \u201csoft\u201d news). Building on previous work in satire detection, we proposed an SVMbased algorithm, enriched with 5 predictive features (Absurdity, Humor, Grammar, Negative Affect, and Punctuation) and tested their combinations on 360 news articles. Our best predicting feature combination (Absurdity, Grammar and Punctuation) detects satirical news with a 90% precision and 84% recall (F-score=87%). Our work in algorithmically identifying satirical news pieces can aid in minimizing the potential deceptive impact of satire.", "phrases": ["satire", "news", "journalistic reporting", "deceptive content", "social medium"], "overall_score": 4.946309137872359, "scores": [4.204138968991243, 1.4902113944235087, 0.8864424525299218, 0.5787770959131914, 0.5237167493709652], "rank_score": 1.5366573322457662} -{"id": "carlos-yalamanchi-2012-intention", "title": "Intention Analysis for Sales, Marketing and Customer Service", "abstract": "In recent years, social media has become a customer touch-point for the business functions of marketing, sales and customer service. We aim to show that intention analysis might be useful to these business functions and that it can be performed effectively on short texts (at the granularity level of a single sentence). We demonstrate a scheme of categorization of intentions that is amenable to automation using simple machine learning techniques that are language-independent. We discuss the grounding that this scheme of categorization has in speech act theory. In the demonstration we go over a number of usage scenarios in an attempt to show that the use of automatic intention detection tools would benefit the business functions of sales, marketing and service. We also show that social media can be used not just to convey pleasure or displeasure (that is, to express sentiment) but also to discuss personal needs and to report problems (to express intentions). We evaluate methods for automatically discovering intentions in text, and establish that it is possible to perform intention analysis on social media with an accuracy of 66.97%\u00b10.10%.", "phrases": ["marketing", "customer service", "intention analysis"], "overall_score": 1.687081776024997, "scores": [1.8480200601499774, 1.8389937048643825, 0.9199302681813136], "rank_score": 1.5356480110652244} -{"id": "lapata-keller-2004-web", "title": "The Web as a Baseline: Evaluating the Performance of Unsupervised Web-based Models for a Range of NLP Tasks", "abstract": "Previous work demonstrated that web counts can be used to approximate bigram frequencies, and thus should be useful for a wide variety of NLP tasks. So far, only two generation tasks (candidate selection for machine translation and confusion-set disambiguation) have been tested using web-scale data sets. The present paper investigates if these results generalize to tasks covering both syntax and semantics, both generation and analysis, and a larger range of n-grams. For the majority of tasks, we find that simple, unsupervised models perform better when n-gram frequencies are obtained from the web rather than from a large corpus. However, in most cases, web-based models fail to outperform more sophisticated state-of-theart models trained on small corpora. We argue that web-based models should therefore be used as a baseline for, rather than an alternative to, standard models.", "phrases": ["web", "unsupervised model", "n-gram frequency", "noun compound"], "overall_score": 4.939877267677391, "scores": [3.3634524603987903, 1.0623651601582544, 0.8762358425530269, 0.8365831745796213], "rank_score": 1.534659159422423} -{"id": "wang-etal-2018-switchout", "title": "SwitchOut: an Efficient Data Augmentation Algorithm for Neural Machine Translation", "abstract": "In this work, we examine methods for data augmentation for text-based tasks such as neural machine translation (NMT). We formulate the design of a data augmentation policy with desirable properties as an optimization problem, and derive a generic analytic solution. This solution not only subsumes some existing augmentation schemes, but also leads to an extremely simple data augmentation strategy for NMT: randomly replacing words in both the source sentence and the target sentence with other random words from their corresponding vocabularies. We name this method SwitchOut. Experiments on three translation datasets of different scales show that SwitchOut yields consistent improvements of about 0.5 BLEU, achieving better or comparable performances to strong alternatives such as word dropout (Sennrich et al., 2016a). Code to implement this method is included in the appendix.", "phrases": ["data augmentation", "neural machine translation", "other random word"], "overall_score": 4.997787599235589, "scores": [2.5845118927886452, 1.1337323309510277, 0.8836337816344911], "rank_score": 1.5339593351247214} -{"id": "battisti-etal-2020-corpus", "title": "A Corpus for Automatic Readability Assessment and Text Simplification of German", "abstract": "In this paper, we present a corpus for use in automatic readability assessment and automatic text simplification for German, the first of its kind for this language. The corpus is compiled from web sources and consists of parallel as well as monolingual-only (simplified German) data amounting to approximately 6,200 documents (nearly 211,000 sentences). As a unique feature, the corpus contains information on text structure (e.g., paragraphs, lines), typography (e.g., font type, font style), and images (content, position, and dimensions). While the importance of considering such information in machine learning tasks involving simplified language, such as readability assessment, has repeatedly been stressed in the literature, we provide empirical evidence for its benefit. We also demonstrate the added value of leveraging monolingual-only data for automatic text simplification via machine translation through applying back-translation, a data augmentation technique.", "phrases": ["automatic readability assessment", "text simplification", "german", "monolingual-only data"], "overall_score": 2.1262550585979803, "scores": [1.9880173612046055, 1.8316877030657959, 1.7677737421839708, 0.5475964509543844], "rank_score": 1.5337688143521893} -{"id": "ruder-plank-2017-learning", "title": "Learning to select data for transfer learning with Bayesian Optimization", "abstract": "Domain similarity measures can be used to gauge adaptability and select suitable data for transfer learning, but existing approaches define ad hoc measures that are deemed suitable for respective tasks. Inspired by work on curriculum learning, we propose to learn data selection measures using Bayesian Optimization and evaluate them across models, domains and tasks. Our learned measures outperform existing domain similarity measures significantly on three tasks: sentiment analysis, part-of-speech tagging, and parsing. We show the importance of complementing similarity with diversity, and that learned measures are\u2013to some degree\u2013transferable across models, domains, and even tasks.", "phrases": ["transfer learning", "bayesian optimization", "target data"], "overall_score": 5.052132384231408, "scores": [3.423129104025406, 0.6176823674339555, 0.5578376027798351], "rank_score": 1.532883024746399} -{"id": "hirao-etal-2013-single", "title": "Single-Document Summarization as a Tree Knapsack Problem", "abstract": "Recent studies on extractive text summarization formulate it as a combinatorial optimization problem such as a Knapsack Problem, a Maximum Coverage Problem or a Budgeted Median Problem. These methods successfully improved summarization quality, but they did not consider the rhetorical relations between the textual units of a source document. Thus, summaries generated by these methods may lack logical coherence. This paper proposes a single document summarization method based on the trimming of a discourse tree. This is a two-fold process. First, we propose rules for transforming a rhetorical structure theorybased discourse tree into a dependency-based discourse tree, which allows us to take a treetrimming approach to summarization. Second, we formulate the problem of trimming a dependency-based discourse tree as a Tree Knapsack Problem, then solve it with integer linear programming (ILP). Evaluation results showed that our method improved ROUGE scores.", "phrases": ["summarization", "tree knapsack problem", "discourse tree", "rst tree"], "overall_score": 4.804959073598057, "scores": [3.4183558456806233, 0.7966109452385117, 1.0746793642755128, 0.8401168010724432], "rank_score": 1.5324407390667727} -{"id": "kocmi-bojar-2017-curriculum", "title": "Curriculum Learning and Minibatch Bucketing in Neural Machine Translation", "abstract": "We examine the effects of particular orderings of sentence pairs on the on-line training of neural machine translation (NMT). We focus on two types of such orderings: (1) ensuring that each minibatch contains sentences similar in some aspect and (2) gradual inclusion of some sentence types as the training progresses (so called \u201ccurriculum learning\u201d). In our English-to-Czech experiments, the internal homogeneity of minibatches has no effect on the training but some of our \u201ccurricula\u201d achieve a small improvement over the baseline.", "phrases": ["neural machine translation", "curriculum", "training example", "sentence length"], "overall_score": 4.9326553238944815, "scores": [1.915140562960541, 1.579630398266544, 1.3777503496975128, 1.2571408348295987], "rank_score": 1.5324155364385492} -{"id": "fisch-etal-2019-mrqa", "title": "MRQA 2019 Shared Task: Evaluating Generalization in Reading Comprehension", "abstract": "We present the results of the Machine Reading for Question Answering (MRQA) 2019 shared task on evaluating the generalization capabilities of reading comprehension systems. In this task, we adapted and unified 18 distinct question answering datasets into the same format. Among them, six datasets were made available for training, six datasets were made available for development, and the rest were hidden for final evaluation. Ten teams submitted systems, which explored various ideas including data sampling, multi-task learning, adversarial training and ensembling. The best system achieved an average F1 score of 72.5 on the 12 held-out datasets, 10.7 absolute points higher than our initial baseline based on BERT.", "phrases": ["generalization", "question answering", "mrqa"], "overall_score": 4.588465861431177, "scores": [2.4494619773719966, 1.5974595486385463, 0.5480810734583784], "rank_score": 1.5316675331563072} -{"id": "ng-low-2004-chinese", "title": "Chinese Part-of-Speech Tagging: One-at-a-Time or All-at-Once? Word-Based or Character-Based?", "abstract": "Chinese part-of-speech (POS) tagging assigns one POS tag to each word in a Chinese sentence. However, since words are not demarcated in a Chinese sentence, Chinese POS tagging requires word segmentation as a prerequisite. We could perform Chinese POS tagging strictly after word segmentation (one-at-a-time approach), or perform both word segmentation and POS tagging in a combined, single step simultaneously (all-atonce approach). Also, we could choose to assign POS tags on a word-by-word basis, making use of word features in the surrounding context (word-based), or on a character-by-character basis with character features (character-based). This paper presents an in-depth study on such issues of processing architecture and feature representation for Chinese POS tagging, within a maximum entropy framework. We found that while the all-at-once, characterbased approach is the best, the one-at-a-time, character-based approach is a worthwhile compromise, performing only slightly worse in terms of accuracy, but taking shorter time to train and run. As part of our investigation, we also built a state-of-the-art Chinese word segmenter, which outperforms the best SIGHAN 2003 word segmenters in the closed track on 3 out of 4 test corpora.", "phrases": ["all-at-once", "pos tagging", "word segmentation"], "overall_score": 5.445599839528174, "scores": [0.8469194710393168, 2.1881297123966523, 1.559943834418007], "rank_score": 1.5316643392846585} -{"id": "karpukhin-etal-2020-dense", "title": "Dense Passage Retrieval for Open-Domain Question Answering", "abstract": "Open-domain question answering relies on efficient passage retrieval to select candidate contexts, where traditional sparse vector space models, such as TF-IDF or BM25, are the de facto method. In this work, we show that retrieval can be practically implemented using dense representations alone, where embeddings are learned from a small number of questions and passages by a simple dual-encoder framework. When evaluated on a wide range of open-domain QA datasets, our dense retriever outperforms a strong Lucene-BM25 system greatly by 9%-19% absolute in terms of top-20 passage retrieval accuracy, and helps our end-to-end QA system establish new state-of-the-art on multiple open-domain QA benchmarks.", "phrases": ["open-domain question", "dense passage retrieval", "knowledge source", "cross-encoder", "in-batch negative"], "overall_score": 7.441593759111317, "scores": [3.1131845935462805, 1.5627341839455664, 1.0877760705432955, 1.0486859153306836, 0.8438754250752135], "rank_score": 1.5312512376882081} -{"id": "ye-ling-2019-multi", "title": "Multi-Level Matching and Aggregation Network for Few-Shot Relation Classification", "abstract": "This paper presents a multi-level matching and aggregation network (MLMAN) for few-shot relation classification. Previous studies on this topic adopt prototypical networks, which calculate the embedding vector of a query instance and the prototype vector of the support set for each relation candidate independently. On the contrary, our proposed MLMAN model encodes the query instance and each support set in an interactive way by considering their matching information at both local and instance levels. The final class prototype for each support set is obtained by attentive aggregation over the representations of support instances, where the weights are calculated using the query instance. Experimental results demonstrate the effectiveness of our proposed methods, which achieve a new state-of-the-art performance on the FewRel dataset.", "phrases": ["aggregation network", "relation classification", "prototypical network", "multi-level matching"], "overall_score": 3.671390840740152, "scores": [2.5931877194840145, 1.6791843122440104, 0.7956456664505053, 1.056337910489229], "rank_score": 1.5310889021669398} -{"id": "casacuberta-vidal-2004-machine", "title": "Machine Translation with Inferred Stochastic Finite-State Transducers", "abstract": "Finite-state transducers are models that are being used in different areas of pattern recognition and computational linguistics. One of these areas is machine translation, in which the approaches that are based on building models automatically from training examples are becoming more and more attractive. Finite-state transducers are very adequate for use in constrained tasks in which training samples of pairs of sentences are available. A technique for inferring finite-state transducers is proposed in this article. This technique is based on formal relations between finite-state transducers and rational grammars. Given a training corpus of source-target pairs of sentences, the proposed approach uses statistical alignment methods to produce a set of conventional strings from which a stochastic rational grammar (e.g., an n-gram) is inferred. This grammar is finally converted into a finite-state transducer. The proposed methods are assessed through a series of machine translation experiments within the framework of the E u Trans project.", "phrases": ["finite-state transducer", "transducer", "machine translation"], "overall_score": 3.3609882072624977, "scores": [1.9102462777776101, 1.7257142687745501, 0.9529944131686443], "rank_score": 1.5296516532402682} -{"id": "guo-etal-2018-multi", "title": "Multi-Source Domain Adaptation with Mixture of Experts", "abstract": "We propose a mixture-of-experts approach for unsupervised domain adaptation from multiple sources. The key idea is to explicitly capture the relationship between a target example and different source domains. This relationship, expressed by a point-to-set metric, determines how to combine predictors trained on various domains. The metric is learned in an unsupervised fashion using meta-training. Experimental results on sentiment analysis and part-of-speech tagging demonstrate that our approach consistently outperforms multiple baselines and can robustly handle negative transfer.", "phrases": ["domain adaptation", "mixture", "meta-training", "adversarial learning"], "overall_score": 4.861082184941367, "scores": [2.8880029804535043, 2.0792237880420954, 0.5951315561183085, 0.5559548877323494], "rank_score": 1.5295783030865644} -{"id": "lin-etal-2012-entity", "title": "Entity Linking at Web Scale", "abstract": "This paper investigates entity linking over millions of high-precision extractions from a corpus of 500 million Web documents, toward the goal of creating a useful knowledge base of general facts. This paper is the first to report on entity linking over this many extractions, and describes new opportunities (such as corpus-level features) and challenges we found when entity linking at Web scale. We present several techniques that we developed and also lessons that we learned. We envision a future where information extraction and entity linking are paired to automatically generate knowledge bases with billions of assertions over millions of linked entities.", "phrases": ["web scale", "entity linking", "wikipedia"], "overall_score": 3.6673011581336974, "scores": [2.120281191384703, 1.882738564161768, 0.5851303606017408], "rank_score": 1.5293833720494039} -{"id": "zhang-etal-2018-personalizing", "title": "Personalizing Dialogue Agents: I have a dog, do you have pets too?", "abstract": "Chit-chat models are known to have several problems: they lack specificity, do not display a consistent personality and are often not very captivating. In this work we present the task of making chit-chat more engaging by conditioning on profile information. We collect data and train models to (i)condition on their given profile information; and (ii) information about the person they are talking to, resulting in improved dialogues, as measured by next utterance prediction. Since (ii) is initially unknown our model is trained to engage its partner with personal topics, and we show the resulting dialogue can be used to predict profile information about the interlocutors.", "phrases": ["persona", "conversation", "dialog system", "persona-based dialogue generation"], "overall_score": 7.042721981296587, "scores": [2.5298665413963883, 1.9505709161127749, 1.063455622764428, 0.5733375078380969], "rank_score": 1.529307647027922} -{"id": "gao-etal-2019-soft", "title": "Soft Contextual Data Augmentation for Neural Machine Translation", "abstract": "While data augmentation is an important trick to boost the accuracy of deep learning methods in computer vision tasks, its study in natural language tasks is still very limited. In this paper, we present a novel data augmentation method for neural machine translation. Different from previous augmentation methods that randomly drop, swap or replace words with other words in a sentence, we softly augment a randomly chosen word in a sentence by its contextual mixture of multiple related words. More accurately, we replace the one-hot representation of a word by a distribution (provided by a language model) over the vocabulary, i.e., replacing the embedding of this word by a weighted combination of multiple semantically similar words. Since the weights of those words depend on the contextual information of the word to be replaced,the newly generated sentences capture much richer information than previous augmentation methods. Experimental results on both small scale and large scale machine translation data sets demonstrate the superiority of our method over strong baselines.", "phrases": ["data augmentation", "neural machine translation", "language model", "contextual information"], "overall_score": 4.982404730637442, "scores": [2.4984594619959006, 1.5742748600248255, 1.5119892551381362, 0.5322280508773853], "rank_score": 1.5292379070090618} -{"id": "riaz-girju-2014-recognizing", "title": "Recognizing Causality in Verb-Noun Pairs via Noun and Verb Semantics", "abstract": "Several supervised approaches have been proposed for causality identification by relying on shallow linguistic features. However, such features do not lead to improved performance. Therefore, novel sources of knowledge are required to achieve progress on this problem. In this paper, we propose a model for the recognition of causality in verb-noun pairs by employing additional types of knowledge along with linguistic features. In particular, we focus on identifying and employing semantic classes of nouns and verbs with high tendency to encode cause or non-cause relations. Our model incorporates the information about these classes to minimize errors in predictions made by a basic supervised classifier relying merely on shallow linguistic features. As compared with this basic classifier our model achieves 14.74% (29.57%) improvement in F-score (accuracy), respectively.", "phrases": ["causality", "noun", "semantic class"], "overall_score": 3.359867955114174, "scores": [2.281475664505165, 1.7379597386087642, 0.5679900104331989], "rank_score": 1.5291418045157095} -{"id": "birch-etal-2016-hume", "title": "HUME: Human UCCA-Based Evaluation of Machine Translation", "abstract": "Human evaluation of machine translation normally uses sentence-level measures such as relative ranking or adequacy scales. However, these provide no insight into possible errors, and do not scale well with sentence length. We argue for a semantics-based evaluation, which captures what meaning components are retained in the MT output, thus providing a more fine-grained analysis of translation quality, and enabling the construction and tuning of semantics-based MT. We present a novel human semantic evaluation measure, Human UCCA-based MT Evaluation (HUME), building on the UCCA semantic representation scheme. HUME covers a wider range of semantic phenomena than previous methods and does not rely on semantic annotation of the potentially garbled MT output. We experiment with four language pairs, demonstrating HUME's broad applicability, and report good inter-annotator agreement rates and correlation with human adequacy scores.", "phrases": ["machine translation", "evaluation measure", "hume"], "overall_score": 3.519100411170607, "scores": [2.1723633002214355, 1.839575319498655, 0.5730390497844863], "rank_score": 1.528325889834859} -{"id": "blodgett-etal-2020-language", "title": "Language (Technology) is Power: A Critical Survey of \u201cBias\u201d in NLP", "abstract": "We survey 146 papers analyzing \u201cbias\u201d in NLP systems, finding that their motivations are often vague, inconsistent, and lacking in normative reasoning, despite the fact that analyzing \u201cbias\u201d is an inherently normative process. We further find that these papers' proposed quantitative techniques for measuring or mitigating \u201cbias\u201d are poorly matched to their motivations and do not engage with the relevant literature outside of NLP. Based on these findings, we describe the beginnings of a path forward by proposing three recommendations that should guide work analyzing \u201cbias\u201d in NLP systems. These recommendations rest on a greater recognition of the relationships between language and social hierarchies, encouraging researchers and practitioners to articulate their conceptualizations of \u201cbias\u201d\u2014i.e., what kinds of system behaviors are harmful, in what ways, to whom, and why, as well as the normative reasoning underlying these statements\u2014and to center work around the lived experiences of members of communities affected by NLP systems, while interrogating and reimagining the power relations between technologists and such communities.", "phrases": ["technology", "critical survey", "harm", "gender", "language model"], "overall_score": 6.714943639497057, "scores": [1.653667826661887, 0.8419333747728404, 2.3372973007251563, 1.5882621052522414, 1.2190957741611264], "rank_score": 1.5280512763146503} -{"id": "oepen-etal-2014-semeval", "title": "SemEval 2014 Task 8: Broad-Coverage Semantic Dependency Parsing", "abstract": "Task 18 at SemEval 2015 defines Broad-Coverage Semantic Dependency Parsing (SDP) as the problem of recovering sentence-internal predicate\u2013argument relationships for all content words, i.e. the sema ...", "phrases": ["semantic dependency parsing", "acyclic graph", "reentrancie", "empty node"], "overall_score": 4.978185797405094, "scores": [4.523873312370662, 0.5365040491653965, 0.52758203731693, 0.5238125996871857], "rank_score": 1.5279429996350435} -{"id": "niu-etal-2020-self", "title": "A Self-Training Method for Machine Reading Comprehension with Soft Evidence Extraction", "abstract": "Neural models have achieved great success on machine reading comprehension (MRC), many of which typically consist of two components: an evidence extractor and an answer predictor. The former seeks the most relevant information from a reference text, while the latter is to locate or generate answers from the extracted evidence. Despite the importance of evidence labels for training the evidence extractor, they are not cheaply accessible, particularly in many non-extractive MRC tasks such as YES/NO question answering and multi-choice MRC. To address this problem, we present a Self-Training method (STM), which supervises the evidence extractor with auto-generated evidence labels in an iterative process. At each iteration, a base MRC model is trained with golden answers and noisy evidence labels. The trained model will predict pseudo evidence labels as extra supervision in the next iteration. We evaluate STM on seven datasets over three MRC tasks. Experimental results demonstrate the improvement on existing MRC models, and we also analyze how and why such a self-training method works in MRC.", "phrases": ["self-training method", "machine reading comprehension", "soft evidence extraction"], "overall_score": 3.5181117413180725, "scores": [1.96763020324411, 1.8000252624385467, 0.8160340822377811], "rank_score": 1.5278965159734792} -{"id": "proisl-2018-someweta", "title": "SoMeWeTa: A Part-of-Speech Tagger for German Social Media and Web Texts", "abstract": "Off-the-shelf part-of-speech taggers typically perform relatively poorly on web and social media texts since those domains are quite different from the newspaper articles on which most tagger models are trained. In this paper, we describe SoMeWeTa, a part-of-speech tagger based on the averaged structured perceptron that is capable of domain adaptation and that can use various external resources. We train the tagger on the German web and social media data of the EmpiriST 2015 shared task. Using the TIGER corpus as background data and adding external information about word classes and Brown clusters, we substantially improve on the state of the art for both the web and the social media data sets. The tagger is available as free software.", "phrases": ["part-of-speech tagger", "social media", "someweta"], "overall_score": 2.458482557527557, "scores": [2.056626802717498, 1.7188006374880733, 0.8071958567864246], "rank_score": 1.5275410989973317} -{"id": "neale-etal-2016-word", "title": "Word Sense-Aware Machine Translation: Including Senses as Contextual Features for Improved Translation Models", "abstract": "Although it is commonly assumed that word sense disambiguation (WSD) should help to improve lexical choice and improve the quality of machine translation systems, how to successfully integrate word senses into such systems remains an unanswered question. Some successful approaches have involved reformulating either WSD or the word senses it produces, but work on using traditional word senses to improve machine translation have met with limited success. In this paper, we build upon previous work that experimented on including word senses as contextual features in maxent-based translation models. Training on a large, open-domain corpus (Europarl), we demonstrate that this aproach yields significant improvements in machine translation from English to Portuguese.", "phrases": ["machine translation", "contextual feature", "wsd"], "overall_score": 2.11630136760791, "scores": [1.9334115918373673, 1.8022144698752722, 0.8441401703990554], "rank_score": 1.5265887440372314} -{"id": "cucchiarini-etal-2006-jasmin", "title": "JASMIN-CGN: Extension of the Spoken Dutch Corpus with Speech of Elderly People, Children and Non-natives in the Human-Machine Interaction Modality", "abstract": "Large speech corpora (LSC) constitute an indispensable resource for conducting research in speech processing and for developing real-life speech applications. In 2004 the Spoken Dutch Corpus (CGN) became available, a corpus of standard Dutch as spoken by adult natives in the Netherlands and Flanders. Owing to budget constraints, CGN does not include speech of children, non-natives, elderly people and recordings of speech produced in human-machine interactions. Since such recordings would be extremely useful for conducting research and for developing HLT applications for these specific groups of speakers of Dutch, a new project, JASMIN-CGN, was started which aims at extending CGN in different ways: by collecting a corpus of contemporary Dutch as spoken by children of different age groups, non-natives with different mother tongues and elderly people in the Netherlands and Flanders and, in addition, by collecting speech material in a communication setting that was not envisaged in CGN: human-machine interaction. We expect that the knowledge gathered from these data can be generalized to developing appropriate systems also for other speaker groups (i.e. adult natives). One third of the data will be collected in Flanders and two thirds in the Netherlands.", "phrases": ["spoken dutch corpus", "non-native", "jasmin-cgn"], "overall_score": 2.455462710784195, "scores": [2.157941628591071, 1.8333517416929217, 0.5857009178720014], "rank_score": 1.5256647627186648} -{"id": "de-marneffe-etal-2008-finding", "title": "Finding Contradictions in Text", "abstract": "Detecting conflicting statements is a foundational text understanding task with applications in information analysis. We propose an appropriate definition of contradiction for NLP tasks and develop available corpora, from which we construct a typology of contradictions. We demonstrate that a system for contradiction needs to make more fine-grained distinctions than the common systems for entailment. In particular, we argue for the centrality of event coreference and therefore incorporate such a component based on topicality. We present the first detailed breakdown of performance on this task. Detecting some types of contradiction requires deeper inferential paths than our system is capable of, but we achieve good performance on types arising from negation and antonymy.", "phrases": ["contradiction", "typology", "negation", "google news"], "overall_score": 4.490826261803517, "scores": [4.090795961796704, 0.9088570764109694, 0.5570290723106822, 0.5440743236664055], "rank_score": 1.5251891085461902} -{"id": "tian-etal-2021-hypogen-hyperbole", "title": "HypoGen: Hyperbole Generation with Commonsense and Counterfactual Knowledge", "abstract": "A hyperbole is an intentional and creative exaggeration not to be taken literally. Despite its ubiquity in daily life, the computational explorations of hyperboles are scarce. In this paper, we tackle the under-explored and challenging task: sentence-level hyperbole generation. We start with a representative syntactic pattern for intensification and systematically study the semantic (commonsense and counterfactual) relationships between each component in such hyperboles. We then leverage commonsense and counterfactual inference to generate hyperbole candidates based on our findings from the pattern, and train neural classifiers to rank and select high-quality hyperboles. Automatic and human evaluations show that our generation method is able to generate hyperboles with high success rate, intensity, funniness, and creativity.", "phrases": ["hyperbole generation", "commonsense", "counterfactual knowledge"], "overall_score": 2.4537003216097943, "scores": [1.963260563011617, 1.7804168211219116, 0.8300318021354317], "rank_score": 1.52456972875632} -{"id": "bojanowski-etal-2017-enriching", "title": "Enriching Word Vectors with Subword Information", "abstract": "Continuous word representations, trained on large unlabeled corpora are useful for many natural language processing tasks. Popular models that learn such representations ignore the morphology of words, by assigning a distinct vector to each word. This is a limitation, especially for languages with large vocabularies and many rare words. In this paper, we propose a new approach based on the skipgram model, where each word is represented as a bag of character n-grams. A vector representation is associated to each character n-gram; words being represented as the sum of these representations. Our method is fast, allowing to train models on large corpora quickly and allows us to compute word representations for words that did not appear in the training data. We evaluate our word representations on nine different languages, both on word similarity and analogy tasks. By comparing to recently proposed morphological word representations, we show that our vectors achieve state-of-the-art performance on these tasks.", "phrases": ["subword information", "fasttext model", "unit", "facebook", "cbow"], "overall_score": 7.873075072338837, "scores": [3.4266427452793025, 1.466406806870257, 1.0428805260918754, 0.8572276783532997, 0.8287215546576612], "rank_score": 1.524375862250479} -{"id": "gordon-etal-2020-compressing", "title": "Compressing BERT: Studying the Effects of Weight Pruning on Transfer Learning", "abstract": "Pre-trained universal feature extractors, such as BERT for natural language processing and VGG for computer vision, have become effective methods for improving deep learning models without requiring more labeled data. While effective, feature extractors like BERT may be prohibitively large for some deployment scenarios. We explore weight pruning for BERT and ask: how does compression during pre-training affect transfer learning? We find that pruning affects transfer learning in three broad regimes. Low levels of pruning (30-40%) do not affect pre-training loss or transfer to downstream tasks at all. Medium levels of pruning increase the pre-training loss and prevent useful pre-training information from being transferred to downstream tasks. High levels of pruning additionally prevent models from fitting downstream datasets, leading to further degradation. Finally, we observe that fine-tuning BERT on a specific task does not improve its prunability. We conclude that BERT can be pruned once during pre-training rather than separately for each task without affecting performance.", "phrases": ["bert", "weight pruning", "transfer learning", "pre-training loss"], "overall_score": 4.315070961822233, "scores": [2.9278062970942353, 1.7609454999408503, 0.8387868609115504, 0.5645842255980117], "rank_score": 1.523030720886162} -{"id": "abbes-etal-2020-daict", "title": "DAICT: A Dialectal Arabic Irony Corpus Extracted from Twitter", "abstract": "Identifying irony in user-generated social media content has a wide range of applications; however to date Arabic content has received limited attention. To bridge this gap, this study builds a new open domain Arabic corpus annotated for irony detection. We query Twitter using irony-related hashtags to collect ironic messages, which are then manually annotated by two linguists according to our working definition of irony. Challenges which we have encountered during the annotation process reflect the inherent limitations of Twitter messages interpretation, as well as the complexity of Arabic and its dialects. Once published, our corpus will be a valuable free resource for developing open domain systems for automatic irony recognition in Arabic language and its dialects in social media text.", "phrases": ["arabic", "twitter", "daict"], "overall_score": 3.9058341334436846, "scores": [2.487477875328051, 1.2381995389639207, 0.8426398377599594], "rank_score": 1.5227724173506436} -{"id": "wang-2008-examination", "title": "A Re-examination of Dependency Path Kernels for Relation Extraction", "abstract": "Extracting semantic relations between entities from natural language text is an important step towards automatic knowledge extraction from large text collections and the Web. The state-of-the-art approach to relation extraction employs Support Vector Machines (SVM) and kernel methods for classification. Despite the diversity of kernels and the near exhaustive trial-and-error on kernel combination, there lacks a clear understanding of how these kernels relate to each other and why some are superior than others. In this paper, we provide an analysis of the relative strength and weakness of several kernels through systematic experimentation. We show that relation extraction can benefit from increasing the feature space through convolution kernel and introducing bias towards more syntactically meaningful feature space. Based on our analysis, we propose a new convolution dependency path kernel that combines the above two benefits. Our experimental results on the standard ACE 2003 datasets demonstrate that our new kernel gives consistent and significantly better performance than baseline methods, obtaining very competitive results to the state-ofthe-art performance.", "phrases": ["relation extraction", "several kernel", "syntactic feature"], "overall_score": 3.345153025587208, "scores": [2.8673024429955416, 0.8678661447266174, 0.8321656667162285], "rank_score": 1.5224447514794626} -{"id": "jones-etal-2020-robust", "title": "Robust Encodings: A Framework for Combating Adversarial Typos", "abstract": "Despite excellent performance on many tasks, NLP systems are easily fooled by small adversarial perturbations of inputs. Existing procedures to defend against such perturbations are either (i) heuristic in nature and susceptible to stronger attacks or (ii) provide guaranteed robustness to worst-case attacks, but are incompatible with state-of-the-art models like BERT. In this work, we introduce robust encodings (RobEn): a simple framework that confers guaranteed robustness, without making compromises on model architecture. The core component of RobEn is an encoding function, which maps sentences to a smaller, discrete space of encodings. Systems using these encodings as a bottleneck confer guaranteed robustness with standard training, and the same encodings can be used across multiple tasks. We identify two desiderata to construct robust encoding functions: perturbations of a sentence should map to a small set of encodings (stability), and models using encodings should still perform well (fidelity). We instantiate RobEn to defend against a large family of adversarial typos. Across six tasks from GLUE, our instantiation of RobEn paired with BERT achieves an average robust accuracy of 71.3% against all adversarial typos in the family considered, while previous work using a typo-corrector achieves only 35.3% accuracy against a simple greedy attack.", "phrases": ["encoding", "adversarial typo", "robustness"], "overall_score": 4.017326540228833, "scores": [2.784932745491256, 0.9086864947714423, 0.8731552232702811], "rank_score": 1.5222581545109932} -{"id": "aguilar-etal-2020-lince", "title": "LinCE: A Centralized Benchmark for Linguistic Code-switching Evaluation", "abstract": "Recent trends in NLP research have raised an interest in linguistic code-switching (CS); modern approaches have been proposed to solve a wide range of NLP tasks on multiple language pairs. Unfortunately, these proposed methods are hardly generalizable to different code-switched languages. In addition, it is unclear whether a model architecture is applicable for a different task while still being compatible with the code-switching setting. This is mainly because of the lack of a centralized benchmark and the sparse corpora that researchers employ based on their specific needs and interests. To facilitate research in this direction, we propose a centralized benchmark for Linguistic Code-switching Evaluation (LinCE) that combines eleven corpora covering four different code-switched language pairs (i.e., Spanish-English, Nepali-English, Hindi-English, and Modern Standard Arabic-Egyptian Arabic) and four tasks (i.e., language identification, named entity recognition, part-of-speech tagging, and sentiment analysis). As part of the benchmark centralization effort, we provide an online platform where researchers can submit their results while comparing with others in real-time. In addition, we provide the scores of different popular models, including LSTM, ELMo, and multilingual BERT so that the NLP community can compare against state-of-the-art systems. LinCE is a continuous effort, and we will expand it with more low-resource languages and tasks.", "phrases": ["centralized benchmark", "linguistic code-switching evaluation", "code-switched language pair", "lince"], "overall_score": 3.343744820782292, "scores": [1.9906834114432643, 1.7853617896787366, 1.740815163934376, 0.5703550343563559], "rank_score": 1.5218038498531832} -{"id": "eikema-aziz-2020-map", "title": "Is MAP Decoding All You Need? The Inadequacy of the Mode in Neural Machine Translation", "abstract": "Recent studies have revealed a number of pathologies of neural machine translation (NMT) systems. Hypotheses explaining these mostly suggest there is something fundamentally wrong with NMT as a model or its training algorithm, maximum likelihood estimation (MLE). Most of this evidence was gathered using maximum a posteriori (MAP) decoding, a decision rule aimed at identifying the highest-scoring translation, i.e. the mode. We argue that the evidence corroborates the inadequacy of MAP decoding more than casts doubt on the model and its training algorithm. In this work, we show that translation distributions do reproduce various statistics of the data well, but that beam search strays from such statistics. We show that some of the known pathologies and biases of NMT are due to MAP decoding and not to NMT's statistical assumptions nor MLE. In particular, we show that the most likely translations under the model accumulate so little probability mass that the mode can be considered essentially arbitrary. We therefore advocate for the use of decision rules that take into account the translation distribution holistically. We show that an approximation to minimum Bayes risk decoding gives competitive results confirming that NMT models do capture important aspects of translation well in expectation.", "phrases": ["map", "mode", "neural machine translation", "highest-scoring translation"], "overall_score": 3.6487655512879003, "scores": [2.4862279077530367, 2.117306217547736, 0.9157941824346304, 0.5672853866646039], "rank_score": 1.5216534236000014} -{"id": "zhang-etal-2008-tree", "title": "A Tree Sequence Alignment-based Tree-to-Tree Translation Model", "abstract": "This paper presents a translation model that is based on tree sequence alignment, where a tree sequence refers to a single sequence of subtrees that covers a phrase. The model leverages on the strengths of both phrase-based and linguistically syntax-based method. It automatically learns aligned tree sequence pairs with mapping probabilities from word-aligned biparsed parallel texts. Compared with previous models, it not only captures non-syntactic phrases and discontinuous phrases with linguistically structured features, but also supports multi-level structure reordering of tree typology with larger span. This gives our model stronger expressive power than other reported models. Experimental results on the NIST MT-2005 Chinese-English translation task show that our method statistically significantly outperforms the baseline systems.", "phrases": ["tree sequence", "translation model", "previous model", "non-syntactic phrase", "synchronous grammar"], "overall_score": 4.3978060852299645, "scores": [3.127835847925594, 2.4538107321087486, 0.9260479076963796, 0.5514195978130453, 0.5485683401090268], "rank_score": 1.5215364851305588} -{"id": "riloff-wiebe-2003-learning", "title": "Learning Extraction Patterns for Subjective Expressions", "abstract": "This paper presents a bootstrapping process that learns linguistically rich extraction patterns for subjective (opinionated) expressions. High-precision classifiers label unannotated data to automatically create a large training set, which is then given to an extraction pattern learning algorithm. The learned patterns are then used to identify more subjective sentences. The bootstrapping process learns many subjective patterns and increases recall while maintaining high precision.", "phrases": ["extraction pattern", "subjective expression", "subjectivity analysis", "sentiment analysis", "clause"], "overall_score": 5.952255791731816, "scores": [2.98754119575068, 1.70533243077584, 1.4955786177966195, 0.8454708381521573, 0.5737210743621447], "rank_score": 1.5215288313674882} -{"id": "thomas-etal-2006-get", "title": "Get out the vote: Determining support or opposition from Congressional floor-debate transcripts", "abstract": "We investigate whether one can determine from the transcripts of U.S. Congressional floor debates whether the speeches represent support of or opposition to proposed legislation. To address this problem, we exploit the fact that these speeches occur as part of a discussion; this allows us to use sources of information regarding relationships between discourse segments, such as whether a given utterance indicates agreement with the opinion expressed by another. We find that the incorporation of such information yields substantial improvements over classifying speeches in isolation.", "phrases": ["support", "floor-debate transcript", "legislation", "participant", "political debate"], "overall_score": 5.980565654171147, "scores": [3.499801385310216, 1.445651303044693, 1.2344939741076704, 0.8247748652158423, 0.6006076664690663], "rank_score": 1.5210658388294978} -{"id": "faruqui-dyer-2014-improving", "title": "Improving Vector Space Word Representations Using Multilingual Correlation", "abstract": "The distributional hypothesis of Harris (1954), according to which the meaning of words is evidenced by the contexts they occur in, has motivated several effective techniques for obtaining vector space semantic representations of words using unannotated text corpora. This paper argues that lexico-semantic content should additionally be invariant across languages and proposes a simple technique based on canonical correlation analysis (CCA) for incorporating multilingual evidence into vectors generated monolingually. We evaluate the resulting word representations on standard lexical semantic evaluation tasks and show that our method produces substantially better semantic representations than monolingual techniques.", "phrases": ["canonical correlation analysis", "monolingual embedding", "mapping", "different language", "semantic task"], "overall_score": 6.695241873951034, "scores": [2.631599463267179, 1.5698476241591794, 1.482329498518419, 1.3388685689407918, 0.5739834232880068], "rank_score": 1.519325715634715} -{"id": "wong-dras-2009-contrastive", "title": "Contrastive Analysis and Native Language Identification", "abstract": "Attempts to profile authors based on their characteristics, including native language, have drawn attention in recent years, via several approaches using machine learning with simple features. In this paper we investigate the potential usefulness to this task of contrastive analysis from second language acquistion research, which postulates that the (syntactic) errors in a text are influenced by an author\u2019s native language. We explore this, first, by conducting an analysis of three syntactic error types, through hypothesis testing and machine learning; and second, through adding in these errors as features to the replication of a previous machine learning approach. This preliminary study provides some support for the use of this kind of syntactic errors as a clue to identifying the native language of an author.", "phrases": ["native language identification", "syntactic error", "contrastive analysis", "disagreement", "learner"], "overall_score": 4.550830323840092, "scores": [2.911393010692212, 2.2018215766922764, 1.1248451887865656, 0.8375512663104346, 0.5199113678726383], "rank_score": 1.5191044820708255} -{"id": "yu-etal-2017-joint", "title": "Joint Embeddings of Chinese Words, Characters, and Fine-grained Subcharacter Components", "abstract": "Word embeddings have attracted much attention recently. Different from alphabetic writing systems, Chinese characters are often composed of subcharacter components which are also semantically informative. In this work, we propose an approach to jointly embed Chinese words as well as their characters and fine-grained subcharacter components. We use three likelihoods to evaluate whether the context words, characters, and components can predict the current target word, and collected 13,253 subcharacter components to demonstrate the existing approaches of decomposing Chinese characters are not enough. Evaluation on both word similarity and word analogy tasks demonstrates the superior performance of our model.", "phrases": ["chinese word", "character", "semantic information"], "overall_score": 4.303101791050095, "scores": [2.3793915550401827, 1.6298957914250498, 0.5471310398284259], "rank_score": 1.5188061287645527} -{"id": "see-etal-2019-makes", "title": "What makes a good conversation? How controllable attributes affect human judgments", "abstract": "A good conversation requires balance \u2013 between simplicity and detail; staying on topic and changing it; asking questions and answering them. Although dialogue agents are commonly evaluated via human judgments of overall quality, the relationship between quality and these individual factors is less well-studied. In this work, we examine two controllable neural text generation methods, conditional training and weighted decoding, in order to control four important attributes for chit-chat dialogue: repetition, specificity, response-relatedness and question-asking. We conduct a large-scale human evaluation to measure the effect of these control parameters on multi-turn interactive conversations on the PersonaChat task. We provide a detailed analysis of their relationship to high-level aspects of conversation, and show that by controlling combinations of these variables our models obtain clear improvements in human quality judgments.", "phrases": ["conversation", "attribute", "human judgment", "engagingness", "response generation"], "overall_score": 4.694305434483512, "scores": [3.5836155963526424, 1.7395228033824732, 0.8783639729138746, 0.8270100412940067, 0.5648893353330496], "rank_score": 1.5186803498552093} -{"id": "narasimhan-etal-2016-improving", "title": "Improving Information Extraction by Acquiring External Evidence with Reinforcement Learning", "abstract": "Most successful information extraction systems operate with access to a large collection of documents. In this work, we explore the task of acquiring and incorporating external evidence to improve extraction accuracy in domains where the amount of training data is scarce. This process entails issuing search queries, extraction from new sources and reconciliation of extracted values, which are repeated until sufficient evidence is collected. We approach the problem using a reinforcement learning framework where our model learns to select optimal actions based on contextual information. We employ a deep Q-network, trained to optimize a reward function that reflects extraction accuracy while penalizing extra effort. Our experiments on two databases -- of shooting incidents, and food adulteration cases -- demonstrate that our system significantly outperforms traditional extractors and a competitive meta-classifier baseline.", "phrases": ["information extraction", "external evidence", "reinforcement learning", "action"], "overall_score": 4.207317782256847, "scores": [2.3532905223783542, 1.8127866229911611, 1.3607155943103735, 0.5430837602260168], "rank_score": 1.5174691249764762} -{"id": "vanmassenhove-etal-2019-lost", "title": "Lost in Translation: Loss and Decay of Linguistic Richness in Machine Translation", "abstract": "This work presents an empirical approach to quantifying the loss of lexical richness in Machine Translation (MT) systems compared to Human Translation (HT). Our experiments show how current MT systems indeed fail to render the lexical diversity of human generated or translated text. The inability of MT systems to generate diverse outputs and its tendency to exacerbate already frequent patterns while ignoring less frequent ones, might be the underlying cause for, among others, the currently heavily debated issues related to gender biased output. Can we indeed, aside from biased data, talk about an algorithm that exacerbates seen biases?", "phrases": ["loss", "linguistic richness", "machine translation"], "overall_score": 3.8890156140295167, "scores": [2.7651090286868167, 0.8960348463197385, 0.887502205723522], "rank_score": 1.5162153602433592} -{"id": "vulic-mrksic-2018-specialising", "title": "Specialising Word Vectors for Lexical Entailment", "abstract": "We present LEAR (Lexical Entailment Attract-Repel), a novel post-processing method that transforms any input word vector space to emphasise the asymmetric relation of lexical entailment (LE), also known as the IS-A or hyponymy-hypernymy relation. By injecting external linguistic constraints (e.g., WordNet links) into the initial vector space, the LE specialisation procedure brings true hyponymy-hypernymy pairs closer together in the transformed Euclidean space. The proposed asymmetric distance measure adjusts the norms of word vectors to reflect the actual WordNet-style hierarchy of concepts. Simultaneously, a joint objective enforces semantic similarity using the symmetric cosine distance, yielding a vector space specialised for both lexical relations at once. LEAR specialisation achieves state-of-the-art performance in the tasks of hypernymy directionality, hypernymy detection, and graded lexical entailment, demonstrating the effectiveness and robustness of the proposed asymmetric specialisation model.", "phrases": ["word vector", "lexical entailment", "hypernymy detection"], "overall_score": 3.635642986867163, "scores": [2.090143311882778, 1.8874158406869666, 0.5709835149642635], "rank_score": 1.5161808891780026} -{"id": "yu-jiang-2016-learning", "title": "Learning Sentence Embeddings with Auxiliary Tasks for Cross-Domain Sentiment Classification", "abstract": "In this paper, we study cross-domain sentiment classi\ufb01cation with neural network archi-tectures. We borrow the idea from Structural Correspondence Learning and use two auxiliary tasks to help induce a sentence embedding that supposedly works well across domains for sentiment classi\ufb01cation. We also propose to jointly learn this sentence embedding together with the sentiment classi\ufb01er itself. Experiment results demonstrate that our proposed joint model outperforms several state-of-the-art methods on \ufb01ve benchmark datasets.", "phrases": ["auxiliary task", "sentiment classification", "pivot word"], "overall_score": 4.614709762792751, "scores": [2.555421337185789, 1.4679162393458123, 0.5238876686632946], "rank_score": 1.5157417483982984} -{"id": "lee-etal-2017-end", "title": "End-to-end Neural Coreference Resolution", "abstract": "We introduce the first end-to-end coreference resolution model and show that it significantly outperforms all previous work without using a syntactic parser or hand-engineered mention detector. The key idea is to directly consider all spans in a document as potential mentions and learn distributions over possible antecedents for each. The model computes span embeddings that combine context-dependent boundary representations with a head-finding attention mechanism. It is trained to maximize the marginal likelihood of gold antecedent spans from coreference clusters and is factored to enable aggressive pruning of potential mentions. Experiments demonstrate state-of-the-art performance, with a gain of 1.5 F1 on the OntoNotes benchmark and by 3.1 F1 using a 5-model ensemble, despite the fact that this is the first approach to be successfully trained with no external resources.", "phrases": ["neural coreference resolution", "end-to-end", "wsc", "input sentence", "approximation"], "overall_score": 7.150295347569777, "scores": [2.488689821882624, 2.3207151726785162, 1.0450559151221652, 0.8911110198965572, 0.831303198088892], "rank_score": 1.5153750255337508} -{"id": "hammerton-2003-named", "title": "Named Entity Recognition with Long Short-Term Memory", "abstract": "In this approach to named entity recognition, a recurrent neural network, known as Long Short-Term Memory, is applied. The network is trained to perform 2 passes on each sentence, outputting its decisions on the second pass. The first pass is used to acquire information for disambiguation during the second pass. SARDNET, a self-organising map for sequences is used to generate representations for the lexical items presented to the LSTM network, whilst orthogonal representations are used to represent the part of speech and chunk tags.", "phrases": ["entity recognition", "long short-term memory", "lstm network"], "overall_score": 3.3265519804268044, "scores": [1.996962387355347, 1.97023784318624, 0.5747369224549157], "rank_score": 1.5139790509988342} -{"id": "zopf-etal-2016-next", "title": "The Next Step for Multi-Document Summarization: A Heterogeneous Multi-Genre Corpus Built with a Novel Construction Approach", "abstract": "Research in multi-document summarization has focused on newswire corpora since the early beginnings. However, the newswire genre provides genre-specific features such as sentence position which are easy to exploit in summarization systems. Such easy to exploit genre-specific features are available in other genres as well. We therefore present the new hMDS corpus for multi-document summarization, which contains heterogeneous source documents from multiple text genres, as well as summaries with different lengths. For the construction of the corpus, we developed a novel construction approach which is suited to build large and heterogeneous summarization corpora with little effort. The method reverses the usual process of writing summaries for given source documents: it combines already available summaries with appropriate source documents. In a detailed analysis, we show that our new corpus is significantly different from the homogeneous corpora commonly used, and that it is heterogeneous along several dimensions. Our experimental evaluation using well-known state-of-the-art summarization systems shows that our corpus poses new challenges in the field of multi-document summarization. Last but not least, we make our corpus publicly available to the research community at the corpus web page .", "phrases": ["summarization", "novel construction approach", "wikipedia article"], "overall_score": 3.1479634410265374, "scores": [3.1943004362766807, 0.7979071555062276, 0.5493436534858342], "rank_score": 1.5138504150895808} -{"id": "weir-etal-2016-aligning", "title": "Aligning Packed Dependency Trees: A Theory of Composition for Distributional Semantics", "abstract": "We present a new framework for compositional distributional semantics in which the distributional contexts of lexemes are expressed in terms of anchored packed dependency trees. We show that these structures have the potential to capture the full sentential contexts of a lexeme and provide a uniform basis for the composition of distributional knowledge in a way that captures both mutual disambiguation and generalization.", "phrases": ["composition", "distributional semantic", "anchored packed tree"], "overall_score": 3.629736639994178, "scores": [3.0335194596722443, 0.951772732137786, 0.5558610618409112], "rank_score": 1.5137177512169806} -{"id": "lin-etal-2019-open", "title": "Open Sesame: Getting inside BERT's Linguistic Knowledge", "abstract": "How and to what extent does BERT encode syntactically-sensitive hierarchical information or positionally-sensitive linear information? Recent work has shown that contextual representations like BERT perform well on tasks that require sensitivity to linguistic structure. We present here two studies which aim to provide a better understanding of the nature of BERT's representations. The first of these focuses on the identification of structurally-defined elements using diagnostic classifiers, while the second explores BERT's representation of subject-verb agreement and anaphor-antecedent dependencies through a quantitative assessment of self-attention vectors. In both cases, we find that BERT encodes positional information about word tokens well on its lower layers, but switches to a hierarchically-oriented encoding on higher layers. We conclude then that BERT's representations do indeed model linguistically relevant aspects of hierarchical structure, though they do not appear to show the sharp sensitivity to hierarchical structure that is found in human processing of reflexive anaphora.", "phrases": ["bert", "linguistic knowledge", "subject-verb agreement", "agreement", "relevant aspect"], "overall_score": 4.9294549940951535, "scores": [3.777682218652894, 1.8210763945856958, 0.8669324967356855, 0.5572032141567466, 0.5420365128917252], "rank_score": 1.5129861674045495} -{"id": "dannells-etal-2013-multilingual", "title": "Multilingual access to cultural heritage content on the Semantic Web", "abstract": "As the amount of cultural data available on the Semantic Web is expanding, the demand of accessing this data in multiple languages is increasing. Previous work on multilingual access to cultural heritage information has shown that mapping from ontologies to natural language requires at least two different steps: (1) mapping multilingual metadata to interoperable knowledge sources; (2) assigning multilingual knowledge to cultural data. This paper presents our work on making cultural heritage content available on the Semantic Web and accessible in 15 languages. The objective of our work is both to form queries and to retrieve semantic content in multiple languages. We describe our experiences with processing museum data extracted from two different sources, harmonizing this data and making its content accessible in natural language.", "phrases": ["cultural heritage content", "semantic web", "multilingual access"], "overall_score": 1.6617314462079056, "scores": [1.8979265509072258, 1.7728410318550516, 0.8669518566110624], "rank_score": 1.5125731464577798} -{"id": "martins-etal-2009-concise", "title": "Concise Integer Linear Programming Formulations for Dependency Parsing", "abstract": "We formulate the problem of non-projective dependency parsing as a polynomial-sized integer linear program. Our formulation is able to handle non-local output features in an efficient manner; not only is it compatible with prior knowledge encoded as hard constraints, it can also learn soft constraints from data. In particular, our model is able to learn correlations among neighboring arcs (siblings and grandparents), word valency, and tendencies toward nearly-projective parses. The model parameters are learned in a max-margin framework by employing a linear programming relaxation. We evaluate the performance of our parser on data in several natural languages, achieving improvements over existing state-of-the-art methods.", "phrases": ["integer linear programming", "dependency parsing", "similar line"], "overall_score": 4.192530255834374, "scores": [2.583482823962602, 1.4038295660812536, 0.5490945666080354], "rank_score": 1.512135652217297} -{"id": "fung-cheung-2004-multi", "title": "Multi-level Bootstrapping For Extracting Parallel Sentences From a Quasi-Comparable Corpus", "abstract": "We propose a completely unsupervised method for mining parallel sentences from quasi-comparable bilingual texts which have very different sizes, and which include both in-topic and off-topic documents. We discuss and analyze different bilingual corpora with various levels of comparability. We propose that while better document matching leads to better parallel sentence extraction, better sentence matching also leads to better document matching. Based on this, we use multi-level bootstrapping to improve the alignments between documents, sentences, and bilingual word pairs, iteratively. Our method is the first method that does not rely on any supervised training data, such as a sentence-aligned corpus, or temporal information, such as the publishing date of a news article. It is validated by experimental results that show a 23% improvement over a method without multilevel bootstrapping.", "phrases": ["parallel sentence", "in-topic", "document matching", "multi-level bootstrapping", "comparable corpora"], "overall_score": 3.9903349360949214, "scores": [3.1908414889942853, 1.9071564254771394, 1.3496885108276604, 0.5743667419233057, 0.5380988827458889], "rank_score": 1.5120304099936557} -{"id": "rabinovich-etal-2017-abstract", "title": "Abstract Syntax Networks for Code Generation and Semantic Parsing", "abstract": "Tasks like code generation and semantic parsing require mapping unstructured (or partially structured) inputs to well-formed, executable outputs. We introduce abstract syntax networks, a modeling framework for these problems. The outputs are represented as abstract syntax trees (ASTs) and constructed by a decoder with a dynamically-determined modular structure paralleling the structure of the output tree. On the benchmark Hearthstone dataset for code generation, our model obtains 79.2 BLEU and 22.7% exact match accuracy, compared to previous state-of-the-art values of 67.1 and 6.1%. Furthermore, we perform competitively on the Atis, Jobs, and Geo semantic parsing datasets with no task-specific engineering.", "phrases": ["code generation", "ast", "modular structure", "valid program", "programming language"], "overall_score": 5.237901889467379, "scores": [3.1794201706613077, 1.3491314803924435, 1.2256894250668096, 0.9220893659410273, 0.8803646385359302], "rank_score": 1.5113390161195037} -{"id": "chrysostomou-aletras-2022-empirical", "title": "An Empirical Study on Explanations in Out-of-Domain Settings", "abstract": "Recent work in Natural Language Processing has focused on developing approaches that extract faithful explanations, either via identifying the most important tokens in the input (i.e. post-hoc explanations) or by designing inherently faithful models that first select the most important tokens and then use them to predict the correct label (i.e. select-then-predict models). Currently, these approaches are largely evaluated on in-domain settings. Yet, little is known about how post-hoc explanations and inherently faithful models perform in out-of-domain settings. In this paper, we conduct an extensive empirical study that examines: (1) the out-of-domain faithfulness of post-hoc explanations, generated by five feature attribution methods; and (2) the out-of-domain performance of two inherently faithful models over six datasets. Contrary to our expectations, results show that in many cases out-of-domain post-hoc explanation faithfulness measured by sufficiency and comprehensiveness is higher compared to in-domain. We find this misleading and suggest using a random baseline as a yardstick for evaluating post-hoc explanation faithfulness. Our findings also show that select-then predict models demonstrate comparable predictive performance in out-of-domain settings to full-text trained models.", "phrases": ["empirical study", "explanation", "out-of-domain setting"], "overall_score": 1.6601946798709537, "scores": [1.8801919538119756, 1.804294321711994, 0.8490366888432107], "rank_score": 1.5111743214557267} -{"id": "clark-etal-2019-boolq", "title": "BoolQ: Exploring the Surprising Difficulty of Natural Yes/No Questions", "abstract": "In this paper we study yes/no questions that are naturally occurring \u2014 meaning that they are generated in unprompted and unconstrained settings. We build a reading comprehension dataset, BoolQ, of such questions, and show that they are unexpectedly challenging. They often query for complex, non-factoid information, and require difficult entailment-like inference to solve. We also explore the effectiveness of a range of transfer learning baselines. We find that transferring from entailment data is more effective than transferring from paraphrase or extractive QA data, and that it, surprisingly, continues to be very beneficial even when starting from massive pre-trained language models such as BERT. Our best method trains BERT on MultiNLI and then re-trains it on our train set. It achieves 80.4% accuracy compared to 90% accuracy of human annotators (and 62% majority-baseline), leaving a significant gap for future work.", "phrases": ["difficulty", "boolq", "question answering"], "overall_score": 4.52685367003141, "scores": [3.385919401381116, 0.593290486380629, 0.554092747451018], "rank_score": 1.5111008784042543} -{"id": "gimpel-etal-2011-part", "title": "Part-of-Speech Tagging for Twitter: Annotation, Features, and Experiments", "abstract": "We address the problem of part-of-speech tagging for English data from the popular micro-blogging service Twitter. We develop a tagset, annotate data, develop features, and report tagging results nearing 90% accuracy. The data and tools have been made available to the research community with the goal of enabling richer text analysis of Twitter and related social media data sets.", "phrases": ["twitter", "tagset", "part-of-speech tagging", "annotated tweet", "pos"], "overall_score": 5.716456098422156, "scores": [3.640659301302988, 1.9887945062709809, 0.8366406026402221, 0.5633820477956913, 0.5236018116210837], "rank_score": 1.5106156539261932} -{"id": "clark-manning-2016-improving", "title": "Improving Coreference Resolution by Learning Entity-Level Distributed Representations", "abstract": "A long-standing challenge in coreference resolution has been the incorporation of entity-level information - features defined over clusters of mentions instead of mention pairs. We present a neural network based coreference system that produces high-dimensional vector representations for pairs of coreference clusters. Using these representations, our system learns when combining clusters is desirable. We train the system with a learning-to-search algorithm that teaches it which local decisions (cluster merges) will lead to a high-scoring final coreference partition. The system substantially outperforms the current state-of-the-art on the English and Chinese portions of the CoNLL 2012 Shared Task dataset despite using few hand-engineered features.", "phrases": ["coreference resolution", "entity-level information", "cluster", "mention"], "overall_score": 4.9759404489369645, "scores": [3.618814311356286, 1.3020382695464876, 0.5912267299169028, 0.526982270489097], "rank_score": 1.5097653953271934} -{"id": "de-jong-etal-2018-clarin", "title": "CLARIN: Towards FAIR and Responsible Data Science Using Language Resources", "abstract": "CLARIN is a European Research Infrastructure providing access to language resources and technologies for researchers in the humanities and social sciences. It supports the study of language data in general and aims to increase the potential for comparative research of cultural and societal phenomena across the boundaries of languages. This paper outlines the CLARIN vision and strategy, and it explains how the design and implementation of CLARIN are compliant with the FAIR principles: findability, accessibility, interoperability and reusability of data. The paper also explains the approach of CLARIN towards the enabling of responsible data science. Attention is paid to (i) the development of measures for increasing the transparency and explainability of the results from applying CLARIN technologies, in particular in the context of multidisciplinary research, and (ii) stimulating the uptake of its resources, tools and services by the various communities of use, all in accordance with the principles for Open Science.", "phrases": ["responsible data science", "language resources", "clarin"], "overall_score": 2.704558650828225, "scores": [2.774584288683997, 0.8833738665002401, 0.8703706142907726], "rank_score": 1.5094429231583366} -{"id": "mausam-etal-2012-open", "title": "Open Language Learning for Information Extraction", "abstract": "Open Information Extraction (IE) systems extract relational tuples from text, without requiring a pre-specified vocabulary, by identifying relation phrases and associated arguments in arbitrary sentences. However, state-of-the-art Open IE systems such as ReVerb and woe share two important weaknesses -- (1) they extract only relations that are mediated by verbs, and (2) they ignore context, thus extracting tuples that are not asserted as factual. This paper presents ollie, a substantially improved Open IE system that addresses both these limitations. First, ollie achieves high yield by extracting relations mediated by nouns, adjectives, and more. Second, a context-analysis step increases precision by including contextual information from the sentence in the extractions. ollie obtains 2.7 times the area under precision-yield curve (AUC) compared to ReVerb and 1.9 times the AUC of woeparse.", "phrases": ["information extraction", "relation phrase", "open language learning", "openie", "recall"], "overall_score": 6.047705329099078, "scores": [2.7392905289587484, 1.451307115742741, 1.3849686647484518, 1.1018510284436074, 0.8683806399057976], "rank_score": 1.5091595955598691} -{"id": "grusky-etal-2018-newsroom", "title": "Newsroom: A Dataset of 1.3 Million Summaries with Diverse Extractive Strategies", "abstract": "We present NEWSROOM, a summarization dataset of 1.3 million articles and summaries written by authors and editors in newsrooms of 38 major news publications. Extracted from search and social media metadata between 1998 and 2017, these high-quality summaries demonstrate high diversity of summarization styles. In particular, the summaries combine abstractive and extractive strategies, borrowing words and phrases from articles at varying rates. We analyze the extraction strategies used in NEWSROOM summaries against other datasets to quantify the diversity and difficulty of our new data, and train existing methods on the data to evaluate its utility and challenges. The dataset is available online at summari.es.", "phrases": ["summarization dataset", "abstractiveness", "newsroom", "large-scale dataset", "extractive method"], "overall_score": 5.22974592869475, "scores": [3.118822165369272, 1.8681624905958767, 1.4010455822750758, 0.5996165246215969, 0.5572817535753393], "rank_score": 1.5089857032874323} -{"id": "fujiki-etal-2003-automatic", "title": "Automatic Acquisition of Script Knowledge from a Text Collection", "abstract": "In this paper, we describe a method for automatic acquisition of script knowledge from a Japanese text collection. Script knowledge represents a typical sequence of actions that occur in a particular situation. We extracted sequences (pairs) of actions occurring in time order from a Japanese text collection and then chose those that were typical of certain situations by ranking these sequences (pairs) in terms of the frequency of their occurrence. To extract sequences of actions occurring in time order, we constructed a text collection in which texts describing facts relating to a similar situation were clustered together and arranged in time order.We also describe a preliminary experiment with our acquisition system and discuss the results.", "phrases": ["script knowledge", "text collection", "automatic acquisition"], "overall_score": 2.934864554863776, "scores": [1.9219991890350399, 1.735313464053459, 0.867353436384193], "rank_score": 1.5082220298242308} -{"id": "xu-etal-2015-classifying", "title": "Classifying Relations via Long Short Term Memory Networks along Shortest Dependency Paths", "abstract": "Relation classification is an important research arena in the field of natural language processing (NLP). In this paper, we present SDP-LSTM, a novel neural network to classify the relation of two entities in a sentence. Our neural architecture leverages the shortest dependency path (SDP) between two entities; multichannel recurrent neural networks, with long short term memory (LSTM) units, pick up heterogeneous information along the SDP. Our proposed model has several distinct features: (1) The shortest dependency paths retain most relevant information (to relation classification), while eliminating irrelevant words in the sentence. (2) The multichannel LSTM networks allow effective information integration from heterogeneous sources over the dependency paths. (3) A customized dropout strategy regularizes the neural network to alleviate overfitting. We test our model on the SemEval 2010 relation classification task, and achieve an $F_1$-score of 83.7\\%, higher than competing methods in the literature.", "phrases": ["short term memory", "recurrent neural network", "short dependency path", "cnn", "input sentence"], "overall_score": 6.011889460454371, "scores": [2.4576611174440623, 1.5652675589168534, 1.4136031038001822, 1.238877878037877, 0.8602051628635866], "rank_score": 1.5071229642125121} -{"id": "somasundaran-etal-2014-lexical", "title": "Lexical Chaining for Measuring Discourse Coherence Quality in Test-taker Essays", "abstract": "This paper presents an investigation of lexical chaining (Morris and Hirst, 1991) for measuring discourse coherence quality in test-taker essays. We hypothesize that attributes of lexical chains, as well as interactions between lexical chains and explicit discourse elements, can be harnessed for representing coherence. Our experiments reveal that performance achieved by our new lexical chain features is better than that of previous discourse features used for this task, and that the best system performance is achieved when combining lexical chaining features with complementary discourse features, such as those provided by a discourse parser based on rhetorical structure theory, and features that reflect errors in grammar, word usage, and mechanics.", "phrases": ["discourse coherence quality", "test-taker essay", "lexical chaining"], "overall_score": 3.469741194592854, "scores": [1.9127845063808153, 1.7243205414748592, 0.8835633154765457], "rank_score": 1.5068894544440734} -{"id": "liu-li-2016-recognizing", "title": "Recognizing Implicit Discourse Relations via Repeated Reading: Neural Networks with Multi-Level Attention", "abstract": "Recognizing implicit discourse relations is a challenging but important task in the field of Natural Language Processing. For such a complex text processing task, different from previous studies, we argue that it is necessary to repeatedly read the arguments and dynamically exploit the efficient features useful for recognizing discourse relations. To mimic the repeated reading strategy, we propose the neural networks with multi-level attention (NNMA), combining the attention mechanism and external memories to gradually fix the attention on some specific words helpful to judging the discourse relations. Experiments on the PDTB dataset show that our proposed method achieves the state-of-art results. The visualization of the attention weights also illustrates the progress that our model observes the arguments on each level and progressively locates the important words.", "phrases": ["discourse relation", "multi-level attention", "memory"], "overall_score": 4.266891747823611, "scores": [2.687062490757038, 1.3077831320328517, 0.5232310939971844], "rank_score": 1.506025572262358} -{"id": "fonseca-etal-2019-findings", "title": "Findings of the WMT 2019 Shared Tasks on Quality Estimation", "abstract": "We report the results of the WMT19 shared task on Quality Estimation, i.e. the task of predicting the quality of the output of machine translation systems given just the source text and the hypothesis translations. The task includes estimation at three granularity levels: word, sentence and document. A novel addition is evaluating sentence-level QE against human judgments: in other words, designing MT metrics that do not need a reference translation. This year we include three language pairs, produced solely by neural machine translation systems. Participating teams from eleven institutions submitted a variety of systems to different task variants and language pairs.", "phrases": ["wmt", "shared tasks", "quality estimation", "granularity level", "post-editing"], "overall_score": 4.582816484356283, "scores": [3.633029243770113, 1.6732155897296388, 0.7965794633674083, 0.8896459167860852, 0.5338603982885365], "rank_score": 1.5052661223883563} -{"id": "rios-etal-2011-tine", "title": "TINE: A Metric to Assess MT Adequacy", "abstract": "We describe TINE, a new automatic evaluation metric for Machine Translation that aims at assessing segment-level adequacy. Lexical similarity and shallow-semantics are used as indicators of adequacy between machine and reference translations. The metric is based on the combination of a lexical matching component and an adequacy component. Lexical matching is performed comparing bags-of-words without any linguistic annotation. The adequacy component consists in: i) using ontologies to align predicates (verbs), ii) using semantic roles to align predicate arguments (core arguments and modifiers), and iii) matching predicate arguments using distributional semantics. TINE's performance is comparable to that of previous metrics at segment level for several language pairs, with average Kendall's tau correlation from 0.26 to 0.29. We show that the addition of the shallow-semantic component improves the performance of simple lexical matching strategies and metrics such as BLEU.", "phrases": ["evaluation metric", "tine", "adequacy judgment", "meteor"], "overall_score": 3.1293793440651583, "scores": [3.3532282889526357, 0.9095674949166237, 0.9093010470505064, 0.8475565834044481], "rank_score": 1.5049133535810537} -{"id": "roller-etal-2021-recipes", "title": "Recipes for Building an Open-Domain Chatbot", "abstract": "Building open-domain chatbots is a challenging area for machine learning research. While prior work has shown that scaling neural models in the number of parameters and the size of the data they are trained on gives improved results, we highlight other ingredients. Good conversation requires blended skills: providing engaging talking points, and displaying knowledge, empathy and personality appropriately, while maintaining a consistent persona. We show that large scale models can learn these skills when given appropriate training data and choice of generation strategy. We build variants of these recipes with 90M, 2.7B and 9.4B parameter models, and make our models and code publicly available. Human evaluations show our best models outperform existing approaches in multi-turn dialogue on engagingness and humanness measurements. We then discuss the limitations of this work by analyzing failure cases of our models.", "phrases": ["open-domain chatbot", "dialogue system", "interlocutor", "text generation task"], "overall_score": 6.411351178184057, "scores": [3.118674663600007, 1.532047750014618, 0.8440218437109074, 0.5215190916680708], "rank_score": 1.5040658372484008} -{"id": "turney-2013-distributional", "title": "Distributional Semantics Beyond Words: Supervised Learning of Analogy and Paraphrase", "abstract": "There have been several efforts to extend distributional semantics beyond individual words, to measure the similarity of word pairs, phrases, and sentences (briefly, tuples; ordered sets of words, contiguous or noncontiguous). One way to extend beyond words is to compare two tuples using a function that combines pairwise similarities between the component words in the tuples. A strength of this approach is that it works with both relational similarity (analogy) and compositional similarity (paraphrase). However, past work required hand-coding the combination function for different tasks. The main contribution of this paper is that combination functions are generated by supervised learning. We achieve state-of-the-art results in measuring relational similarity between word pairs (SAT analogies and SemEval 2012 Task 2) and measuring compositional similarity between noun-modifier phrases and unigrams (multiple-choice paraphrase questions).", "phrases": ["supervised learning", "analogy", "paraphrase"], "overall_score": 2.9253645017922367, "scores": [1.862954368096384, 1.8237518915900606, 0.823313645208581], "rank_score": 1.5033399682983422} -{"id": "martins-etal-2011-dual", "title": "Dual Decomposition with Many Overlapping Components", "abstract": "Dual decomposition has been recently proposed as a way of combining complementary models, with a boost in predictive power. However, in cases where lightweight decompositions are not readily available (e.g., due to the presence of rich features or logical constraints), the original subgradient algorithm is inefficient. We sidestep that difficulty by adopting an augmented Lagrangian method that accelerates model consensus by regularizing towards the averaged votes. We show how first-order logical constraints can be handled efficiently, even though the corresponding subproblems are no longer combinatorial, and report experiments in dependency parsing, with state-of-the-art results.", "phrases": ["consensus", "dependency parsing", "state-of-the-art result", "dual decomposition"], "overall_score": 4.258767122620931, "scores": [4.034771631919932, 0.8960656798478736, 0.5548488081349984, 0.5269456242690592], "rank_score": 1.5031579360429657} -{"id": "li-etal-2007-probabilistic", "title": "A Probabilistic Approach to Syntax-based Reordering for Statistical Machine Translation", "abstract": "Inspired by previous preprocessing approaches to SMT, this paper proposes a novel, probabilistic approach to reordering which combines the merits of syntax and phrase-based SMT. Given a source sentence and its parse tree, our method generates, by tree operations, an n-best list of reordered inputs, which are then fed to standard phrase-based decoder to produce the optimal translation. Experiments show that, for the NIST MT-05 task of Chinese-toEnglish translation, the proposal leads to BLEU improvement of 1.56%.", "phrases": ["probabilistic approach", "reordering", "phrase-based machine translation"], "overall_score": 5.060752148404733, "scores": [3.0366630094181426, 0.8607093981932472, 0.6113661207985671], "rank_score": 1.502912842803319} -{"id": "balazevic-etal-2019-tucker", "title": "TuckER: Tensor Factorization for Knowledge Graph Completion", "abstract": "Knowledge graphs are structured representations of real world facts. However, they typically contain only a small subset of all possible facts. Link prediction is a task of inferring missing facts based on existing ones. We propose TuckER, a relatively straightforward but powerful linear model based on Tucker decomposition of the binary tensor representation of knowledge graph triples. TuckER outperforms previous state-of-the-art models across standard link prediction datasets, acting as a strong baseline for more elaborate models. We show that TuckER is a fully expressive model, derive sufficient bounds on its embedding dimensionalities and demonstrate that several previously introduced linear models can be viewed as special cases of TuckER.", "phrases": ["tensor factorization", "knowledge graph", "tucker"], "overall_score": 3.852973796207034, "scores": [2.6189030923181447, 0.9294077987159441, 0.9581801845092746], "rank_score": 1.502163691847788} -{"id": "kim-etal-2019-compound", "title": "Compound Probabilistic Context-Free Grammars for Grammar Induction", "abstract": "We study a formalization of the grammar induction problem that models sentences as being generated by a compound probabilistic context free grammar. In contrast to traditional formulations which learn a single stochastic grammar, our context-free rule probabilities are modulated by a per-sentence continuous latent variable, which induces marginal dependencies beyond the traditional context-free assumptions. Inference in this context-dependent grammar is performed by collapsed variational inference, in which an amortized variational posterior is placed on the continuous variable, and the latent trees are marginalized with dynamic programming. Experiments on English and Chinese show the effectiveness of our approach compared to recent state-of-the-art methods for grammar induction from words with neural language models.", "phrases": ["grammar induction", "chinese", "pcfg", "syntax-dependent model", "mixture"], "overall_score": 4.950394737829075, "scores": [3.9765237449366535, 1.8202729428972406, 0.5834383546317007, 0.5693263373634363, 0.5605110829361395], "rank_score": 1.5020144925530343} -{"id": "wei-gulla-2010-sentiment", "title": "Sentiment Learning on Product Reviews via Sentiment Ontology Tree", "abstract": "Existing works on sentiment analysis on product reviews suffer from the following limitations: (1) The knowledge of hierarchical relationships of products attributes is not fully utilized. (2) Reviews or sentences mentioning several attributes associated with complicated sentiments are not dealt with very well. In this paper, we propose a novel HL-SOT approach to labeling a product's attributes and their associated sentiments in product reviews by a Hierarchical Learning (HL) process with a defined Sentiment Ontology Tree (SOT). The empirical analysis against a human-labeled data set demonstrates promising and reasonable performance of the proposed HL-SOT approach. While this paper is mainly on sentiment analysis on reviews of one product, our proposed HL-SOT approach is easily generalized to labeling a mix of reviews of more than one products.", "phrases": ["review", "sentiment ontology tree", "hl-sot approach", "hierarchical learning"], "overall_score": 3.9637420934316823, "scores": [4.113017868812233, 0.8267823590058547, 0.5395557662376486, 0.5284590677103472], "rank_score": 1.501953765441521} -{"id": "meyers-etal-2004-nombank", "title": "The NomBank Project: An Interim Report", "abstract": "This paper describes NomBank, a project that will provide argument structure for instances of common nouns in the Penn Treebank II corpus. NomBank is part of a larger effort to add additional layers of annotation to the Penn Tree-bank II corpus. The University of Pennsylva-nia\u2019s PropBank, NomBank and other annotation projects taken together should lead to the creation of better tools for the automatic analysis of text. This paper describes the NomBank project in detail including its speci(cid:2)cations and the process involved in creating the resource", "phrases": ["nombank project", "argument structure", "recent release"], "overall_score": 4.893411463626353, "scores": [2.5271325883157556, 1.4296266559416269, 0.5490109867848828], "rank_score": 1.5019234103474217} -{"id": "cotterell-etal-2015-labeled", "title": "Labeled Morphological Segmentation with Semi-Markov Models", "abstract": "We present labeled morphological segmentation\u2014an alternative view of morphological processing that unifies several tasks. We introduce a new hierarchy of morphotactic tagsets and CHIPMUNK, a discriminative morphological segmentation system that, contrary to previous work, explicitly models morphotactics. We show improved performance on three tasks for all six languages: (i) morphological segmentation, (ii) stemming and (iii) morphological tag classification. For morphological segmentation our method shows absolute improvements of 2-6 points F1 over a strong baseline.", "phrases": ["morphological segmentation", "semi-markov crf", "manner"], "overall_score": 4.164100151314234, "scores": [2.998129632521119, 0.9570329329451613, 0.5504824130832414], "rank_score": 1.5018816595165072} -{"id": "dubey-keller-2003-probabilistic", "title": "Probabilistic Parsing for German Using Sister-Head Dependencies", "abstract": "We present a probabilistic parsing model for German trained on the Negra treebank. We observe that existing lexicalized parsing models using head-head dependencies, while successful for English, fail to outperform an unlexicalized baseline model for German. Learning curves show that this effect is not due to lack of training data. We propose an alternative model that uses sister-head dependencies instead of head-head dependencies. This model out-performs the baseline, achieving a labeled precision and recall of up to 74%. This indicates that sister-head dependencies are more appropriate for treebanks with very flat structures such as Negra.", "phrases": ["german", "sister-head dependency", "lexicalization"], "overall_score": 3.7317908266401267, "scores": [1.992776490723819, 1.3701483599782007, 1.142424467299731], "rank_score": 1.5017831060005833} -{"id": "weber-etal-2019-nlprolog", "title": "NLProlog: Reasoning with Weak Unification for Question Answering in Natural Language", "abstract": "Rule-based models are attractive for various tasks because they inherently lead to interpretable and explainable decisions and can easily incorporate prior knowledge. However, such systems are difficult to apply to problems involving natural language, due to its large linguistic variability. In contrast, neural models can cope very well with ambiguity by learning distributed representations of words and their composition from data, but lead to models that are difficult to interpret. In this paper, we describe a model combining neural networks with logic programming in a novel manner for solving multi-hop reasoning tasks over natural language. Specifically, we propose to use an Prolog prover which we extend to utilize a similarity function over pretrained sentence encoders. We fine-tune the representations for the similarity function via backpropagation. This leads to a system that can apply rule-based reasoning to natural language, and induce domain-specific natural language rules from training data. We evaluate the proposed system on two different question answering tasks, showing that it outperforms two baselines \u2013 BiDAF (Seo et al., 2016a) and FastQA( Weissenborn et al., 2017) on a subset of the WikiHop corpus and achieves competitive results on the MedHop data set (Welbl et al., 2017).", "phrases": ["reasoning", "unification", "prolog prover", "nlprolog"], "overall_score": 4.063051877320444, "scores": [2.4914142659246092, 1.6327421739836745, 1.2952113607196287, 0.5820746773095072], "rank_score": 1.5003606194843548} -{"id": "sabou-etal-2014-corpus", "title": "Corpus Annotation through Crowdsourcing: Towards Best Practice Guidelines", "abstract": "Crowdsourcing is an emerging collaborative approach that can be used for the acquisition of annotated corpora and a wide range of other linguistic resources. Although the use of this approach is intensifying in all its key genres (paid-for crowdsourcing, games with a purpose, volunteering-based approaches), the community still lacks a set of best-practice guidelines similar to the annotation best practices for traditional, expert-based corpus acquisition. In this paper we focus on the use of crowdsourcing methods for corpus acquisition and propose a set of best practice guidelines based in our own experiences in this area and an overview of related literature. We also introduce GATE Crowd, a plugin of the GATE platform that relies on these guidelines and offers tool support for using crowdsourcing in a more principled and efficient manner.", "phrases": ["crowdsourcing", "corpus annotation", "good practice"], "overall_score": 3.848069185292983, "scores": [2.4563160610637524, 0.9464966913117048, 1.0979418228743054], "rank_score": 1.500251525083254} -{"id": "jia-liang-2017-adversarial", "title": "Adversarial Examples for Evaluating Reading Comprehension Systems", "abstract": "Standard accuracy metrics indicate that reading comprehension systems are making rapid progress, but the extent to which these systems truly understand language remains unclear. To reward systems with real language understanding abilities, we propose an adversarial evaluation scheme for the Stanford Question Answering Dataset (SQuAD). Our method tests whether systems can answer questions about paragraphs that contain adversarially inserted sentences, which are automatically generated to distract computer systems without changing the correct answer or misleading humans. In this adversarial setting, the accuracy of sixteen published models drops from an average of 75% F1 score to 36%; when the adversary is allowed to add ungrammatical sequences of words, average accuracy on four models decreases further to 7%. We hope our insights will motivate the development of new models that understand language more precisely.", "phrases": ["reading comprehension system", "paragraph", "adversarial examples", "distractor sentence", "incorrect answer"], "overall_score": 7.7555901046574744, "scores": [1.4132583408164592, 2.1006207470082434, 2.0168067408581822, 1.4098755527593272, 0.5593072112273687], "rank_score": 1.4999737185339161} -{"id": "zheng-etal-2017-joint", "title": "Joint Extraction of Entities and Relations Based on a Novel Tagging Scheme", "abstract": "Joint extraction of entities and relations is an important task in information extraction. To tackle this problem, we firstly propose a novel tagging scheme that can convert the joint extraction task to a tagging problem.. Then, based on our tagging scheme, we study different end-to-end models to extract entities and their relations directly, without identifying entities and relations separately. We conduct experiments on a public dataset produced by distant supervision method and the experimental results show that the tagging based methods are better than most of the existing pipelined and joint learning methods. What's more, the end-to-end model proposed in this paper, achieves the best results on the public dataset.", "phrases": ["novel tagging scheme", "joint extraction", "relational triple", "fact extraction", "schema"], "overall_score": 5.2444007296981745, "scores": [3.1345982457865693, 2.3626927773629185, 0.8856381322695335, 0.5712176458875486, 0.5453375234157334], "rank_score": 1.4998968649444606} -{"id": "shan-etal-2020-contextual", "title": "A Contextual Hierarchical Attention Network with Adaptive Objective for Dialogue State Tracking", "abstract": "Recent studies in dialogue state tracking (DST) leverage historical information to determine states which are generally represented as slot-value pairs. However, most of them have limitations to efficiently exploit relevant context due to the lack of a powerful mechanism for modeling interactions between the slot and the dialogue history. Besides, existing methods usually ignore the slot imbalance problem and treat all slots indiscriminately, which limits the learning of hard slots and eventually hurts overall performance. In this paper, we propose to enhance the DST through employing a contextual hierarchical attention network to not only discern relevant information at both word level and turn level but also learn contextual representations. We further propose an adaptive objective to alleviate the slot imbalance problem by dynamically adjust weights of different slots during training. Experimental results show that our approach reaches 52.68% and 58.55% joint accuracy on MultiWOZ 2.0 and MultiWOZ 2.1 datasets respectively and achieves new state-of-the-art performance with considerable improvements (+1.24% and +5.98%).", "phrases": ["hierarchical attention network", "adaptive objective", "dialogue state tracking"], "overall_score": 3.295184516399179, "scores": [1.8680691979863888, 1.8427062947433188, 0.7883338159701682], "rank_score": 1.4997031028999588} -{"id": "chang-etal-2008-optimizing", "title": "Optimizing Chinese Word Segmentation for Machine Translation Performance", "abstract": "Previous work has shown that Chinese word segmentation is useful for machine translation to English, yet the way different segmentation strategies affect MT is still poorly understood. In this paper, we demonstrate that optimizing segmentation for an existing segmentation standard does not always yield better MT performance. We find that other factors such as segmentation consistency and granularity of Chinese \"words\" can be more important for machine translation. Based on these findings, we implement methods inside a conditional random field segmenter that directly optimize segmentation granularity with respect to the MT task, providing an improvement of 0.73 BLEU. We also show that improving segmentation consistency using external lexicon and proper noun features yields a 0.32 BLEU increase.", "phrases": ["chinese", "word segmentation", "translation performance", "speech recognition", "tokenization"], "overall_score": 4.996200758541611, "scores": [4.297624175790419, 1.1468427537295798, 0.9746368271795685, 0.542775799770367, 0.5349603632967492], "rank_score": 1.4993679839533367} -{"id": "xie-etal-2020-worldtree", "title": "WorldTree V2: A Corpus of Science-Domain Structured Explanations and Inference Patterns supporting Multi-Hop Inference", "abstract": "Explainable question answering for complex questions often requires combining large numbers of facts to answer a question while providing a human-readable explanation for the answer, a process known as multi-hop inference. Standardized science questions require combining an average of 6 facts, and as many as 16 facts, in order to answer and explain, but most existing datasets for multi-hop reasoning focus on combining only two facts, significantly limiting the ability of multi-hop inference algorithms to learn to generate large inferences. In this work we present the second iteration of the WorldTree project, a corpus of 5,114 standardized science exam questions paired with large detailed multi-fact explanations that combine core scientific knowledge and world knowledge. Each explanation is represented as a lexically-connected \u201cexplanation graph\u201d that combines an average of 6 facts drawn from a semi-structured knowledge base of 9,216 facts across 66 tables. We use this explanation corpus to author a set of 344 high-level science domain inference patterns similar to semantic frames supporting multi-hop inference. Together, these resources provide training data and instrumentation for developing many-fact multi-hop inference models for question answering.", "phrases": ["explanation", "multi-hop inference", "worldtree corpus"], "overall_score": 3.7255603142599583, "scores": [2.363580880120451, 1.5578920547582118, 0.5763543552263447], "rank_score": 1.4992757633683358} -{"id": "venugopal-etal-2009-preference", "title": "Preference Grammars: Softening Syntactic Constraints to Improve Statistical Machine Translation", "abstract": "We propose a novel probabilistic synchoronous context-free grammar formalism for statistical machine translation, in which syntactic nonterminal labels are represented as \"soft\" preferences rather than as \"hard\" matching constraints. This formalism allows us to efficiently score unlabeled synchronous derivations without forgoing traditional syntactic constraints. Using this score as a feature in a log-linear model, we are able to approximate the selection of the most likely unlabeled derivation. This helps reduce fragmentation of probability across differently labeled derivations of the same translation. It also allows the importance of syntactic preferences to be learned alongside other features (e.g., the language model) and for particular labeling procedures. We show improvements in translation quality on small and medium sized Chinese-to-English translation tasks.", "phrases": ["syntactic constraint", "derivation", "log-linear model", "preference grammar"], "overall_score": 3.9565670120373992, "scores": [2.4553093022251056, 2.1787661502065734, 0.8262122692731396, 0.5366521213780405], "rank_score": 1.4992349607707147} -{"id": "acosta-etal-2011-identification", "title": "Identification and Treatment of Multiword Expressions Applied to Information Retrieval", "abstract": "The extensive use of Multiword Expressions (MWE) in natural language texts prompts more detailed studies that aim for a more adequate treatment of these expressions. A MWE typically expresses concepts and ideas that usually cannot be expressed by a single word. Intuitively, with the appropriate treatment of MWEs, the results of an Information Retrieval (IR) system could be improved. The aim of this paper is to apply techniques for the automatic extraction of MWEs from corpora to index them as a single unit. Experimental results show improvements on the retrieval of relevant documents when identifying MWEs and treating them as a single indexing unit.", "phrases": ["treatment", "multiword expressions", "information retrieval", "compositionality"], "overall_score": 2.4124475265996947, "scores": [2.8466959169131365, 1.6704593069411808, 0.9585466253652978, 0.5200498550536603], "rank_score": 1.4989379260683187} -{"id": "chakravarthi-muralidaran-2021-findings", "title": "Findings of the Shared Task on Hope Speech Detection for Equality, Diversity, and Inclusion", "abstract": "Hope is considered significant for the well-being, recuperation and restoration of human life by health professionals. Hope speech reflects the belief that one can discover pathways to their desired objectives and become roused to utilise those pathways. To encourage research in natural language processing towards positive reinforcement approach, we created a hope speech detection dataset. This paper reports on the shared task of hope speech detection for Tamil, English, and Malayalam languages. The shared task was conducted as a part of the EACL 2021 workshop on Language Technology for Equality, Diversity, and Inclusion (LT-EDI-2021). We summarize here the datasets for this challenge which are openly available at , and present an overview of the methods and the results of the competing systems. To the best of our knowledge, this is the first shared task to conduct hope speech detection.", "phrases": ["hope speech detection", "inclusion", "dravidian language", "emotion", "religion"], "overall_score": 5.670157278939425, "scores": [3.787091463261257, 2.107803650249472, 0.5386399412550369, 0.5356699861551125, 0.5226992073717438], "rank_score": 1.4983808496585245} -{"id": "chen-etal-2017-cost", "title": "Cost Weighting for Neural Machine Translation Domain Adaptation", "abstract": "In this paper, we propose a new domain adaptation technique for neural machine translation called cost weighting, which is appropriate for adaptation scenarios in which a small in-domain data set and a large general-domain data set are available. Cost weighting incorporates a domain classifier into the neural machine translation training algorithm, using features derived from the encoder representation in order to distinguish in-domain from out-of-domain data. Classifier probabilities are used to weight sentences according to their domain similarity when updating the parameters of the neural translation model. We compare cost weighting to two traditional domain adaptation techniques developed for statistical machine translation: data selection and sub-corpus weighting. Experiments on two large-data tasks show that both the traditional techniques and our novel proposal lead to significant gains, with cost weighting outperforming the traditional methods.", "phrases": ["domain classifier", "cost weighting", "nmt system"], "overall_score": 3.9532934364599326, "scores": [2.3367513850553467, 1.330536666887045, 0.8266955293498037], "rank_score": 1.4979945270973987} -{"id": "lin-bilmes-2011-class", "title": "A Class of Submodular Functions for Document Summarization", "abstract": "We design a class of submodular functions meant for document summarization tasks. These functions each combine two terms, one which encourages the summary to be representative of the corpus, and the other which positively rewards diversity. Critically, our functions are monotone nondecreasing and submodular, which means that an efficient scalable greedy optimization scheme has a constant factor guarantee of optimality. When evaluated on DUC 2004-2007 corpora, we obtain better than existing state-of-art results in both generic and query-focused document summarization. Lastly, we show that several well-established methods for document summarization correspond, in fact, to submodular function optimization, adding further evidence that submodular functions are a natural fit for document summarization.", "phrases": ["submodularity", "function", "document summarization"], "overall_score": 4.328948046678813, "scores": [1.9478255766714199, 1.6203419111557749, 0.924972228390233], "rank_score": 1.4977132387391425} -{"id": "straka-etal-2016-udpipe", "title": "UDPipe: Trainable Pipeline for Processing CoNLL-U Files Performing Tokenization, Morphological Analysis, POS Tagging and Parsing", "abstract": "Automatic natural language processing of large texts often presents recurring challenges in multiple languages: even for most advanced tasks, the texts are first processed by basic processing steps \u2013 from tokenization to parsing. We present an extremely simple-to-use tool consisting of one binary and one model (per language), which performs these tasks for multiple languages without the need for any other external data. UDPipe, a pipeline processing CoNLL-U-formatted files, performs tokenization, morphological analysis, part-of-speech tagging, lemmatization and dependency parsing for nearly all treebanks of Universal Dependencies 1.2 (namely, the whole pipeline is currently available for 32 out of 37 treebanks). In addition, the pipeline is easily trainable with training data in CoNLL-U format (and in some cases also with additional raw corpora) and requires minimal linguistic knowledge on the users' part. The training code is also released.", "phrases": ["tokenization", "morphological analysis", "pos tagging", "sentence segmentation", "baseline udpipe system"], "overall_score": 5.092564048510727, "scores": [2.110394087674913, 1.6539933045891217, 1.5894630168744865, 1.2888556683038987, 0.8437221962704176], "rank_score": 1.4972856547425677} -{"id": "airola-etal-2008-graph", "title": "A Graph Kernel for Protein-Protein Interaction Extraction", "abstract": "In this paper, we propose a graph kernel based approach for the automated extraction of protein-protein interactions (PPI) from scientific literature. In contrast to earlier approaches to PPI extraction, the introduced all-dependency-paths kernel has the capability to consider full, general dependency graphs. We evaluate the proposed method across five publicly available PPI corpora providing the most comprehensive evaluation done for a machine learning based PPI-extraction system. Our method is shown to achieve state-of-the-art performance with respect to comparable evaluations, achieving 56.4 F-score and 84.8 AUC on the AImed corpus. Further, we identify several pitfalls that can make evaluations of PPI-extraction systems incomparable, or even invalid. These include incorrect cross-validation strategies and problems related to comparing F-score results achieved on different evaluation resources.", "phrases": ["graph kernel", "protein-protein interaction extraction", "relation extraction"], "overall_score": 4.150058095894678, "scores": [3.005637438947285, 0.9539965034358544, 0.5308172333781196], "rank_score": 1.4968170585870864} -{"id": "jansen-ustalov-2019-textgraphs", "title": "TextGraphs 2019 Shared Task on Multi-Hop Inference for Explanation Regeneration", "abstract": "While automated question answering systems are increasingly able to retrieve answers to natural language questions, their ability to generate detailed human-readable explanations for their answers is still quite limited. The Shared Task on Multi-Hop Inference for Explanation Regeneration tasks participants with regenerating detailed gold explanations for standardized elementary science exam questions by selecting facts from a knowledge base of semi-structured tables. Each explanation contains between 1 and 16 interconnected facts that form an \u201cexplanation graph\u201d spanning core scientific knowledge and detailed world knowledge. It is expected that successfully combining these facts to generate detailed explanations will require advancing methods in multi-hop inference and information combination, and will make use of the supervised training data provided by the WorldTree explanation corpus. The top-performing system achieved a mean average precision (MAP) of 0.56, substantially advancing the state-of-the-art over a baseline information retrieval model. Detailed extended analyses of all submitted systems showed large relative improvements in accessing the most challenging multi-hop inference problems, while absolute performance remains low, highlighting the difficulty of generating detailed explanations through multi-hop reasoning.", "phrases": ["shared task", "multi-hop inference", "explanation regeneration", "knowledge base", "worldtree corpus"], "overall_score": 3.5862499793124902, "scores": [2.8614174500742644, 1.7591193269031797, 1.485087879113899, 0.8322825527916818, 0.5400048167061832], "rank_score": 1.4955824051178417} -{"id": "zhang-etal-2019-aspect", "title": "Aspect-based Sentiment Classification with Aspect-specific Graph Convolutional Networks", "abstract": "Due to their inherent capability in semantic alignment of aspects and their context words, attention mechanism and Convolutional Neural Networks (CNNs) are widely applied for aspect-based sentiment classification. However, these models lack a mechanism to account for relevant syntactical constraints and long-range word dependencies, and hence may mistakenly recognize syntactically irrelevant contextual words as clues for judging aspect sentiment. To tackle this problem, we propose to build a Graph Convolutional Network (GCN) over the dependency tree of a sentence to exploit syntactical information and word dependencies. Based on it, a novel aspect-specific sentiment classification framework is raised. Experiments on three benchmarking collections illustrate that our proposed model has comparable effectiveness to a range of state-of-the-art models, and further demonstrate that both syntactical information and long-range word dependencies are properly captured by the graph convolution structure.", "phrases": ["sentiment classification", "convolutional network", "gcn", "tree-structured syntactic information"], "overall_score": 5.228239568324753, "scores": [1.9406821021777492, 1.9831229394295968, 1.4983308880947115, 0.5589631829587619], "rank_score": 1.495274778165205} -{"id": "kobus-etal-2017-domain", "title": "Domain Control for Neural Machine Translation", "abstract": "Machine translation systems are very sensitive to the domains they were trained on. Several domain adaptation techniques have already been deeply studied. We propose a new technique for neural machine translation (NMT) that we call domain control which is performed at runtime using a unique neural network covering multiple domains. The presented approach shows quality improvements when compared to dedicated domains translating on any of the covered domains and even on out-of-domain data. In addition, model parameters do not need to be re-estimated for each domain, making this effective to real use cases. Evaluation is carried out on English-to-French translation for two different testing scenarios. We first consider the case where an end-user performs translations on a known domain. Secondly, we consider the scenario where the domain is not known and predicted at the sentence level before translating. Results show consistent accuracy improvements for both conditions.", "phrases": ["neural machine translation", "domain control", "special token", "source sentence"], "overall_score": 5.314908716584124, "scores": [2.07475743733899, 1.8349615608968122, 1.1499683829637792, 0.9199338819378358], "rank_score": 1.4949053157843544} -{"id": "popescu-2011-studying", "title": "Studying Translationese at the Character Level", "abstract": "This paper presents a set of preliminary experiments which show that identifying translationese is possible with machine learning methods that work at character level, more precisely methods that use string kernels. But caution is necessary because string kernels very easily can introduce confounding factors.", "phrases": ["character level", "text analysis task", "recent result"], "overall_score": 3.58137151887776, "scores": [2.8456537581306227, 1.104258771861149, 0.5307312572970615], "rank_score": 1.4935479290962776} -{"id": "velikovich-etal-2010-viability", "title": "The viability of web-derived polarity lexicons", "abstract": "We examine the viability of building large polarity lexicons semi-automatically from the web. We begin by describing a graph propagation framework inspired by previous work on constructing polarity lexicons from lexical graphs (Kim and Hovy, 2004; Hu and Liu, 2004; Esuli and Sabastiani, 2009; Blair-Goldensohn et al., 2008; Rao and Ravichandran, 2009). We then apply this technique to build an English lexicon that is significantly larger than those previously studied. Crucially, this web-derived lexicon does not require WordNet, part-of-speech taggers, or other language-dependent resources typical of sentiment analysis systems. As a result, the lexicon is not limited to specific word classes -- e.g., adjectives that occur in WordNet -- and in fact contains slang, misspellings, multiword expressions, etc. We evaluate a lexicon derived from English documents, both qualitatively and quantitatively, and show that it provides superior performance to previously studied lexicons, including one derived from WordNet.", "phrases": ["polarity lexicon", "web", "word co-occurrence"], "overall_score": 4.807225235859236, "scores": [2.8936732094192728, 1.0419076453822376, 0.5447646112023588], "rank_score": 1.4934484886679564} -{"id": "bean-riloff-2004-unsupervised", "title": "Unsupervised Learning of Contextual Role Knowledge for Coreference Resolution", "abstract": "We present a coreference resolver called BABAR that uses contextual role knowledge to evaluate possible antecedents for an anaphor. BABAR uses information extraction patterns to identify contextual roles and creates four contextual role knowledge sources using unsupervised learning. These knowledge sources determine whether the contexts surrounding an anaphor and antecedent are compatible. BABAR applies a Dempster-Shafer probabilistic model to make resolutions based on evidence from the contextual role knowledge sources as well as general knowledge sources. Experiments in two domains showed that the contextual role knowledge improved coreference performance, especially on pronouns.", "phrases": ["contextual role knowledge", "coreference resolution", "information extraction pattern", "unsupervised learning"], "overall_score": 3.829915954255242, "scores": [2.595462986632456, 2.05765278199045, 0.7886005442639266, 0.5309800962861114], "rank_score": 1.4931741022932359} -{"id": "qi-etal-2018-cross", "title": "Cross-lingual Lexical Sememe Prediction", "abstract": "Sememes are defined as the minimum semantic units of human languages. As important knowledge sources, sememe-based linguistic knowledge bases have been widely used in many NLP tasks. However, most languages still do not have sememe-based linguistic knowledge bases. Thus we present a task of cross-lingual lexical sememe prediction, aiming to automatically predict sememes for words in other languages. We propose a novel framework to model correlations between sememes and multi-lingual words in low-dimensional semantic space for sememe prediction. Experimental results on real-world datasets show that our proposed model achieves consistent and significant improvements as compared to baseline methods in cross-lingual sememe prediction. The codes and data of this paper are available at .", "phrases": ["lexical sememe prediction", "new language", "hownet"], "overall_score": 2.6739846362550566, "scores": [3.0411202671237665, 0.8979914516086615, 0.5380260034537282], "rank_score": 1.4923792407287186} -{"id": "liu-etal-2015-dependency", "title": "A Dependency-Based Neural Network for Relation Classification", "abstract": "Previous research on relation classification has verified the effectiveness of using dependency shortest paths or subtrees. In this paper, we further explore how to make full use of the combination of these dependency information. We first propose a new structure, termed augmented dependency path (ADP), which is composed of the shortest dependency path between two entities and the subtrees attached to the shortest path. To exploit the semantic representation behind the ADP structure, we develop dependency-based neural networks (DepNN): a recursive neural network designed to model the subtrees, and a convolutional neural network to capture the most important features on the shortest path. Experiments on the SemEval-2010 dataset show that our proposed method achieves state-of-art results.", "phrases": ["dependency-based neural network", "relation classification", "augmented dependency path", "convolutional neural network"], "overall_score": 4.678417796335433, "scores": [2.642931668970024, 1.9019776847694077, 0.8706841796045632, 0.552738696475238], "rank_score": 1.4920830574548083} -{"id": "xu-etal-2009-using", "title": "Using a Dependency Parser to Improve SMT for Subject-Object-Verb Languages", "abstract": "We introduce a novel precedence reordering approach based on a dependency parser to statistical machine translation systems. Similar to other preprocessing reordering approaches, our method can efficiently incorporate linguistic knowledge into SMT systems without increasing the complexity of decoding. For a set of five subject-object-verb (SOV) order languages, we show significant improvements in BLEU scores when translating from English, compared to other reordering approaches, in state-of-the-art phrase-based SMT systems.", "phrases": ["dependency parser", "smt system", "sov language"], "overall_score": 5.022051048426474, "scores": [2.6914768044893385, 1.2364473002598153, 0.5463347385375495], "rank_score": 1.4914196144289011} -{"id": "cortis-etal-2017-semeval", "title": "SemEval-2017 Task 5: Fine-Grained Sentiment Analysis on Financial Microblogs and News", "abstract": "This paper discusses the \u201cFine-Grained Sentiment Analysis on Financial Microblogs and News\u201d task as part of SemEval-2017, specifically under the \u201cDetecting sentiment, humour, and truth\u201d theme. This task contains two tracks, where the first one concerns Microblog messages and the second one covers News Statements and Headlines. The main goal behind both tracks was to predict the sentiment score for each of the mentioned companies/stocks. The sentiment scores for each text instance adopted floating point values in the range of -1 (very negative/bearish) to 1 (very positive/bullish), with 0 designating neutral sentiment. This task attracted a total of 32 participants, with 25 participating in Track 1 and 29 in Track 2.", "phrases": ["fine-grained sentiment analysis", "financial microblogs", "news"], "overall_score": 2.400221341465674, "scores": [1.9581816174741884, 1.659786462310377, 0.8560560305999094], "rank_score": 1.4913413701281584} -{"id": "heilman-etal-2007-combining", "title": "Combining Lexical and Grammatical Features to Improve Readability Measures for First and Second Language Texts", "abstract": "This work evaluates a system that uses interpolated predictions of reading difficulty that are based on both vocabulary and grammatical features. The combined approach is compared to individual grammar- and language modeling-based approaches. While the vocabulary-based language modeling approach outperformed the grammar-based approach, grammar-based predictions can be combined using confidence scores with the vocabulary-based predictions to produce more accurate predictions of reading difficulty for both first and second language texts. The results also indicate that grammatical features may play a more important role in second language readability than in first language readability.", "phrases": ["grammatical feature", "readability", "second language learner"], "overall_score": 5.258606353666796, "scores": [2.085999028799654, 1.4768557894042742, 0.9108281619892441], "rank_score": 1.4912276600643908} -{"id": "haber-etal-2019-photobook", "title": "The PhotoBook Dataset: Building Common Ground through Visually-Grounded Dialogue", "abstract": "This paper introduces the PhotoBook dataset, a large-scale collection of visually-grounded, task-oriented dialogues in English designed to investigate shared dialogue history accumulating during conversation. Taking inspiration from seminal work on dialogue analysis, we propose a data-collection task formulated as a collaborative game prompting two online participants to refer to images utilising both their visual context as well as previously established referring expressions. We provide a detailed description of the task setup and a thorough analysis of the 2,500 dialogues collected. To further illustrate the novel features of the dataset, we propose a baseline model for reference resolution which uses a simple method to take into account shared information accumulated in a reference chain. Our results show that this information is particularly important to resolve later descriptions and underline the need to develop more sophisticated models of common ground in dialogue interaction.", "phrases": ["photobook dataset", "common ground", "visually-grounded dialogue", "dialogue history", "image"], "overall_score": 4.133875342882847, "scores": [2.3317992288900666, 2.203331380578107, 0.9573364944656186, 1.0452451368671176, 0.9171895802368986], "rank_score": 1.4909803642075619} -{"id": "miller-etal-2016-key", "title": "Key-Value Memory Networks for Directly Reading Documents", "abstract": "Directly reading documents and being able to answer questions from them is an unsolved challenge. To avoid its inherent difficulty, question answering (QA) has been directed towards using Knowledge Bases (KBs) instead, which has proven effective. Unfortunately KBs often suffer from being too restrictive, as the schema cannot support certain types of answers, and too sparse, e.g. Wikipedia contains much more information than Freebase. In this work we introduce a new method, Key-Value Memory Networks, that makes reading documents more viable by utilizing different encodings in the addressing and output stages of the memory read operation. To compare using KBs, information extraction or Wikipedia documents directly in a single framework we construct an analysis tool, WikiMovies, a QA dataset that contains raw text alongside a preprocessed KB, in the domain of movies. Our method reduces the gap between all three settings. It also achieves state-of-the-art results on the existing WikiQA benchmark.", "phrases": ["memory network", "key-value memory networks", "key"], "overall_score": 5.7402975710591635, "scores": [0.9492786847678166, 2.3049790041338114, 1.2185299791682302], "rank_score": 1.4909292226899529} -{"id": "collins-roark-2004-incremental", "title": "Incremental Parsing with the Perceptron Algorithm", "abstract": "This paper describes an incremental parsing approach where parameters are estimated using a variant of the perceptron algorithm. A beam-search algorithm is used during both training and decoding phases of the method. The perceptron approach was implemented with the same feature set as that of an existing generative model (Roark, 2001a), and experimental results show that it gives competitive performance to the generative model on parsing the Penn treebank. We demonstrate that training a perceptron model to combine with the generative model during search provides a 2.1 percent F-measure improvement over the generative model alone, to 88.8 percent.", "phrases": ["perceptron algorithm", "generative model", "update", "dependency parsing", "competitive accuracy"], "overall_score": 5.674553625667322, "scores": [3.206881999245818, 1.919437231326616, 0.9479553704265107, 0.8522889961064151, 0.5268862962285021], "rank_score": 1.4906899786667727} -{"id": "pandia-etal-2021-pragmatic", "title": "Pragmatic competence of pre-trained language models through the lens of discourse connectives", "abstract": "As pre-trained language models (LMs) continue to dominate NLP, it is increasingly important that we understand the depth of language capabilities in these models. In this paper, we target pre-trained LMs' competence in pragmatics, with a focus on pragmatics relating to discourse connectives. We formulate cloze-style tests using a combination of naturally-occurring data and controlled inputs drawn from psycholinguistics. We focus on testing models' ability to use pragmatic cues to predict discourse connectives, models' ability to understand implicatures relating to connectives, and the extent to which models show humanlike preferences regarding temporal dynamics of connectives. We find that although models predict connectives reasonably well in the context of naturally-occurring data, when we control contexts to isolate high-level pragmatic cues, model sensitivity is much lower. Models also do not show substantial humanlike temporal preferences. Overall, the findings suggest that at present, dominant pre-training paradigms do not result in substantial pragmatic competence in our models.", "phrases": ["pre-trained language model", "discourse connective", "pragmatic competence"], "overall_score": 2.066506191347117, "scores": [1.858950119629174, 1.817334223964919, 0.7957230077401451], "rank_score": 1.4906691171114126} -{"id": "balahur-etal-2010-sentiment", "title": "Sentiment Analysis in the News", "abstract": "Recent years have brought a significant growth in the volume of research in sentiment analysis, mostly on highly subjective text types (movie or product reviews). The main difference these texts have with news articles is that their target is clearly defined and unique across the text. Following different annotation efforts and the analysis of the issues encountered, we realised that news opinion mining is different from that of other text types. We identified three subtasks that need to be addressed: definition of the target; separation of the good and bad news content from the good and bad sentiment expressed on the target; and analysis of clearly marked opinion that is expressed explicitly, not needing interpretation or the use of world knowledge. Furthermore, we distinguish three different possible views on newspaper articles \u2015 author, reader and text, which have to be addressed differently at the time of analysing sentiment. Given these definitions, we present work on mining opinions about entities in English language news, in which we apply these concepts. Results showed that this idea is more appropriate in the context of news opinion mining and that the approaches taking this into consideration produce a better performance.", "phrases": ["product review", "news article", "sentiment analysis", "idiom"], "overall_score": 4.607507489131836, "scores": [3.351880966798528, 1.5535430044649716, 0.5311541968173843, 0.5258213248486681], "rank_score": 1.4905998732323882} -{"id": "li-etal-2017-adversarial", "title": "Adversarial Learning for Neural Dialogue Generation", "abstract": "We apply adversarial training to open-domain dialogue generation, training a system to produce sequences that are indistinguishable from human-generated dialogue utterances. We cast the task as a reinforcement learning problem where we jointly train two systems: a generative model to produce response sequences, and a discriminator\u2014analagous to the human evaluator in the Turing test\u2014 to distinguish between the human-generated dialogues and the machine-generated ones. In this generative adversarial network approach, the outputs from the discriminator are used to encourage the system towards more human-like dialogue. Further, we investigate models for adversarial evaluation that uses success in fooling an adversary as a dialogue evaluation metric, while avoiding a number of potential pitfalls. Experimental results on several metrics, including adversarial evaluation, demonstrate that the adversarially-trained system generates higher-quality responses than previous baselines", "phrases": ["neural dialogue generation", "adversarial learning", "text generation", "dialogue model"], "overall_score": 5.567166620528982, "scores": [2.347592742944091, 1.9209096219634547, 1.110050031441273, 0.5793488513124048], "rank_score": 1.489475311915306} -{"id": "shaar-etal-2020-known", "title": "That is a Known Lie: Detecting Previously Fact-Checked Claims", "abstract": "The recent proliferation of \u201dfake news\u201d has triggered a number of responses, most notably the emergence of several manual fact-checking initiatives. As a result and over time, a large number of fact-checked claims have been accumulated, which increases the likelihood that a new claim in social media or a new statement by a politician might have already been fact-checked by some trusted fact-checking organization, as viral claims often come back after a while in social media, and politicians like to repeat their favorite statements, true or false, over and over again. As manual fact-checking is very time-consuming (and fully automatic fact-checking has credibility issues), it is important to try to save this effort and to avoid wasting time on claims that have already been fact-checked. Interestingly, despite the importance of the task, it has been largely ignored by the research community so far. Here, we aim to bridge this gap. In particular, we formulate the task and we discuss how it relates to, but also differs from, previous work. We further create a specialized dataset, which we release to the research community. Finally, we present learning-to-rank experiments that demonstrate sizable improvements over state-of-the-art retrieval and textual similarity approaches.", "phrases": ["fact-checking", "claim", "misinformation"], "overall_score": 3.571278892301745, "scores": [3.276131675804316, 0.6182076436927603, 0.5736776112015137], "rank_score": 1.48933897689953} -{"id": "nothman-etal-2009-analysing", "title": "Analysing Wikipedia and Gold-Standard Corpora for NER Training", "abstract": "Named entity recognition (ner) for English typically involves one of three gold standards: muc, conll, or bbn, all created by costly manual annotation. Recent work has used Wikipedia to automatically create a massive corpus of named entity annotated text. \n \nWe present the first comprehensive cross-corpus evaluation of ner. We identify the causes of poor cross-corpus performance and demonstrate ways of making them more compatible. Using our process, we develop a Wikipedia corpus which outperforms gold standard corpora on cross-corpus evaluation by up to 11%.", "phrases": ["wikipedia", "gold-standard corpora", "ner training"], "overall_score": 3.095178515383361, "scores": [2.7040564858386835, 0.9039385931601327, 0.8574036158108237], "rank_score": 1.4884662316032131} -{"id": "khalifa-etal-2016-large", "title": "A Large Scale Corpus of Gulf Arabic", "abstract": "Most Arabic natural language processing tools and resources are developed to serve Modern Standard Arabic (MSA), which is the official written language in the Arab World. Some Dialectal Arabic varieties, notably Egyptian Arabic, have received some attention lately and have a growing collection of resources that include annotated corpora and morphological analyzers and taggers. Gulf Arabic, however, lags behind in that respect. In this paper, we present the Gumar Corpus, a large-scale corpus of Gulf Arabic consisting of 110 million words from 1,200 forum novels. We annotate the corpus for sub-dialect information at the document level. We also present results of a preliminary study in the morphological annotation of Gulf Arabic which includes developing guidelines for a conventional orthography. The text of the corpus is publicly browsable through a web interface we developed for it.", "phrases": ["gulf arabic", "large-scale corpus", "gulf dialect"], "overall_score": 4.216833384105829, "scores": [3.400070206969485, 0.5407781469729998, 0.5242231447700663], "rank_score": 1.488357166237517} -{"id": "speriosu-etal-2011-twitter", "title": "Twitter Polarity Classification with Label Propagation over Lexical Links and the Follower Graph", "abstract": "There is high demand for automated tools that assign polarity to microblog content such as tweets (Twitter posts), but this is challenging due to the terseness and informality of tweets in addition to the wide variety and rapid evolution of language in Twitter. It is thus impractical to use standard supervised machine learning techniques dependent on annotated training examples. We do without such annotations by using label propagation to incorporate labels from a maximum entropy classifier trained on noisy labels and knowledge about word types encoded in a lexicon, in combination with the Twitter follower graph. Results on polarity classification for several datasets show that our label propagation approach rivals a model supervised with in-domain annotated tweets, and it outperforms the noisily supervised classifier it exploits as well as a lexicon-based polarity ratio classifier.", "phrases": ["polarity classification", "label propagation", "follower graph", "twitter", "n-gram"], "overall_score": 4.458474515476907, "scores": [3.26089344833872, 0.8130138676940282, 1.3425647508798029, 1.1703411197876907, 0.8545635925864971], "rank_score": 1.4882753558573478} -{"id": "ji-grishman-2008-refining", "title": "Refining Event Extraction through Cross-Document Inference", "abstract": "We apply the hypothesis of \u201cOne Sense Per Discourse\u201d (Yarowsky, 1995) to information extraction (IE), and extend the scope of \u201cdiscourse\u201d from one single document to a cluster of topically-related documents. We employ a similar approach to propagate consistent event arguments across sentences and documents. Combining global evidence from related documents with local decisions, we design a simple scheme to conduct cross-document inference for improving the ACE event extraction task 1 . Without using any additional labeled data this new approach obtained 7.6% higher F-Measure in trigger labeling and 6% higher F-Measure in argument labeling over a state-of-the-art IE system which extracts events independently for each sentence.", "phrases": ["event extraction", "cross-document inference", "trigger", "topic-related document", "rule-based approach"], "overall_score": 5.963795604713967, "scores": [0.9592423871111214, 1.856618063673368, 1.7356057173717017, 1.5001935518367706, 1.3894430396818602], "rank_score": 1.4882205519349643} -{"id": "chen-ji-2009-one", "title": "Can One Language Bootstrap the Other: A Case Study on Event Extraction", "abstract": "This paper proposes a new bootstrapping framework using cross-lingual information projection. We demonstrate that this framework is particularly effective for a challenging NLP task which is situated at the end of a pipeline and thus suffers from the errors propagated from upstream processing and has low-performance baseline. Using Chinese event extraction as a case study and bitexts as a new source of information, we present three bootstrapping techniques. We first conclude that the standard mono-lingual bootstrapping approach is not so effective. Then we exploit a second approach that potentially benefits from the extra information captured by an English event extraction system and projected into Chinese. Such a cross-lingual scheme produces significant performance gain. Finally we show that the combination of mono-lingual and cross-lingual information in bootstrapping can further enhance the performance. Ultimately this new framework obtained 10.1% relative improvement in trigger labeling (F-measure) and 9.5% relative improvement in argument-labeling.", "phrases": ["bootstrapping", "case study", "event extraction", "extra information", "cross-lingual bootstrapping"], "overall_score": 3.697009470013407, "scores": [2.9137505857592543, 1.737571951864319, 1.3026804247995303, 0.8684581710617285, 0.6164691585823115], "rank_score": 1.4877860584134286} -{"id": "budzianowski-etal-2018-multiwoz", "title": "MultiWOZ - A Large-Scale Multi-Domain Wizard-of-Oz Dataset for Task-Oriented Dialogue Modelling", "abstract": "Even though machine learning has become the major scene in dialogue research community, the real breakthrough has been blocked by the scale of data available. To address this fundamental obstacle, we introduce the Multi-Domain Wizard-of-Oz dataset (MultiWOZ), a fully-labeled collection of human-human written conversations spanning over multiple domains and topics. At a size of 10k dialogues, it is at least one order of magnitude larger than all previous annotated task-oriented corpora. The contribution of this work apart from the open-sourced dataset is two-fold:firstly, a detailed description of the data collection procedure along with a summary of data structure and analysis is provided. The proposed data-collection pipeline is entirely based on crowd-sourcing without the need of hiring professional annotators;secondly, a set of benchmark results of belief tracking, dialogue act and response generation is reported, which shows the usability of the data and sets a baseline for future studies.", "phrases": ["wizard-of-oz", "task-oriented dialogue", "annotated task-oriented corpora", "multiwoz dataset", "dialogue dataset"], "overall_score": 7.103463957040357, "scores": [2.214911066437839, 2.1336099204033414, 1.3627265486632336, 1.1665242940921254, 0.5539925590816928], "rank_score": 1.4863528777356465} -{"id": "moore-lewis-2010-intelligent", "title": "Intelligent Selection of Language Model Training Data", "abstract": "We address the problem of selecting non-domain-specific language model training data to build auxiliary language models for use in tasks such as machine translation. Our approach is based on comparing the cross-entropy, according to domain-specific and non-domain-specifc language models, for each sentence of the text source used to produce the latter language model. We show that this produces better language models, trained on less data, than both random data selection and two other previously proposed methods.", "phrases": ["language model", "intelligent selection", "cross-entropy difference", "in-domain data", "smt system"], "overall_score": 6.432589412601303, "scores": [0.8825532307971853, 2.5454261709975223, 1.6687703850038744, 1.2691293956726322, 1.0607953633413953], "rank_score": 1.4853349091625219} -{"id": "yih-etal-2012-polarity", "title": "Polarity Inducing Latent Semantic Analysis", "abstract": "Existing vector space models typically map synonyms and antonyms to similar word vectors, and thus fail to represent antonymy. We introduce a new vector space representation where antonyms lie on opposite sides of a sphere: in the word vector space, synonyms have cosine similarities close to one, while antonyms are close to minus one. \n \nWe derive this representation with the aid of a thesaurus and latent semantic analysis (LSA). Each entry in the thesaurus -- a word sense along with its synonyms and antonyms -- is treated as a \"document,\" and the resulting document collection is subjected to LSA. The key contribution of this work is to show how to assign signs to the entries in the co-occurrence matrix on which LSA operates, so as to induce a subspace with the desired property. \n \nWe evaluate this procedure with the Graduate Record Examination questions of (Mohammed et al., 2008) and find that the method improves on the results of that study. Further improvements result from refining the subspace representation with discriminative training, and augmenting the training data with general newspaper text. Altogether, we improve on the best previous results by 11 points absolute in F measure.", "phrases": ["latent semantic analysis", "thesaurus", "polarity", "pilsa", "negative similarity"], "overall_score": 4.521715353993787, "scores": [2.9344605243547726, 1.7678483590453566, 1.0896502747437373, 1.0722084092270157, 0.5618170434946433], "rank_score": 1.485196922173105} -{"id": "jiang-etal-2013-discriminative", "title": "Discriminative Learning with Natural Annotations: Word Segmentation as a Case Study", "abstract": "Structural information in web text provides natural annotations for NLP problems such as word segmentation and parsing. In this paper we propose a discriminative learning algorithm to take advantage of the linguistic knowledge in large amounts of natural annotations on the Internet. It utilizes the Internet as an external corpus with massive (although slight and sparse) natural annotations, and enables a classifier to evolve on the large-scaled and real-time updated web text. With Chinese word segmentation as a case study, experiments show that the segmenter enhanced with the Chinese wikipedia achieves significant improvement on a series of testing sets from different domains, even with a single classifier and local features.", "phrases": ["natural annotation", "case study", "web text", "segmentation accuracy"], "overall_score": 3.9194435131784107, "scores": [3.660696107028819, 0.8114041401312093, 0.8764741329892032, 0.5920976457211818], "rank_score": 1.4851680064676036} -{"id": "goldwater-mcclosky-2005-improving", "title": "Improving Statistical MT through Morphological Analysis", "abstract": "In statistical machine translation, estimating word-to-word alignment probabilities for the translation model can be difficult due to the problem of sparse data: most words in a given corpus occur at most a handful of times. With a highly inflected language such as Czech, this problem can be particularly severe. In addition, much of the morphological variation seen in Czech words is not reflected in either the morphology or syntax of a language like English. In this work, we show that using morphological analysis to modify the Czech input can improve a Czech-English machine translation system. We investigate several different methods of incorporating morphological information, and show that a system that combines these methods yields the best results. Our final system achieves a BLEU score of .333, as compared to .270 for the baseline word-to-word system.", "phrases": ["morphological analysis", "machine translation", "czech", "analyzer", "arabic"], "overall_score": 5.192205747961526, "scores": [3.7976315871126105, 1.6195119796246384, 0.9276518989670961, 0.5446856173210598, 0.5353645045869513], "rank_score": 1.484969117522471} -{"id": "upadhyay-etal-2018-robust", "title": "Robust Cross-Lingual Hypernymy Detection Using Dependency Context", "abstract": "Cross-lingual Hypernymy Detection involves determining if a word in one language (\u201cfruit\u201d) is a hypernym of a word in another language (\u201cpomme\u201d i.e. apple in French). The ability to detect hypernymy cross-lingually can aid in solving cross-lingual versions of tasks such as textual entailment and event coreference. We propose BiSparse-Dep, a family of unsupervised approaches for cross-lingual hypernymy detection, which learns sparse, bilingual word embeddings based on dependency contexts. We show that BiSparse-Dep can significantly improve performance on this task, compared to approaches based only on lexical context. Our approach is also robust, showing promise for low-resource settings: our dependency-based embeddings can be learned using a parser trained on related languages, with negligible loss in performance. We also crowd-source a challenging dataset for this task on four languages \u2013 Russian, French, Arabic, and Chinese. Our embeddings and datasets are publicly available.", "phrases": ["cross-lingual hypernymy detection", "dependency context", "bilingual word embedding"], "overall_score": 3.086666031102109, "scores": [2.1298809105227736, 1.781424857113804, 0.5418120083148537], "rank_score": 1.4843725919838107} -{"id": "bak-etal-2012-self", "title": "Self-Disclosure and Relationship Strength in Twitter Conversations", "abstract": "In social psychology, it is generally accepted that one discloses more of his/her personal information to someone in a strong relationship. We present a computational framework for automatically analyzing such self-disclosure behavior in Twitter conversations. Our framework uses text mining techniques to discover topics, emotions, sentiments, lexical patterns, as well as personally identifiable information (PII) and personally embarrassing information (PEI). Our preliminary results illustrate that in relationships with high relationship strength, Twitter users show significantly more frequent behaviors of self-disclosure.", "phrases": ["relationship strength", "twitter conversation", "self-disclosure"], "overall_score": 2.6595653374506525, "scores": [2.7099976210540175, 0.8067361471016896, 0.9362612623599813], "rank_score": 1.484331676838563} -{"id": "mostafazadeh-etal-2017-image", "title": "Image-Grounded Conversations: Multimodal Context for Natural Question and Response Generation", "abstract": "The popularity of image sharing on social media and the engagement it creates between users reflect the important role that visual context plays in everyday conversations. We present a novel task, Image Grounded Conversations (IGC), in which natural-sounding conversations are generated about a shared image. To benchmark progress, we introduce a new multiple reference dataset of crowd-sourced, event-centric conversations on images. IGC falls on the continuum between chit-chat and goal-directed conversation models, where visual grounding constrains the topic of conversation to event-driven utterances. Experiments with models trained on social media data show that the combination of visual and textual context enhances the quality of generated conversational turns. In human evaluation, the gap between human performance and that of both neural and retrieval architectures suggests that multi-modal IGC presents an interesting challenge for dialog research.", "phrases": ["conversation", "response generation", "agent", "visual dialog"], "overall_score": 4.518903863602194, "scores": [3.105807433010935, 0.8541763521895972, 1.0535792392329044, 0.9235308299068278], "rank_score": 1.4842734635850663} -{"id": "novikova-etal-2017-need", "title": "Why We Need New Evaluation Metrics for NLG", "abstract": "The majority of NLG evaluation relies on automatic metrics, such as BLEU . In this paper, we motivate the need for novel, system- and data-independent automatic evaluation methods: We investigate a wide range of metrics, including state-of-the-art word-based and novel grammar-based ones, and demonstrate that they only weakly reflect human judgements of system outputs as generated by data-driven, end-to-end NLG. We also show that metric performance is data- and system-specific. Nevertheless, our results also suggest that automatic metrics perform reliably at system-level and can support system development by finding cases where a system performs poorly.", "phrases": ["nlg", "judgement", "human evaluation", "natural language generation", "open research problem"], "overall_score": 5.774138157303623, "scores": [3.294124472787019, 1.650669610671573, 1.0678946492195969, 0.855417548502456, 0.5501937879500016], "rank_score": 1.4836600138261293} -{"id": "li-2011-parsing", "title": "Parsing the Internal Structure of Words: A New Paradigm for Chinese Word Segmentation", "abstract": "Lots of Chinese characters are very productive in that they can form many structured words either as prefixes or as suffixes. Previous research in Chinese word segmentation mainly focused on identifying only the word boundaries without considering the rich internal structures of many words. In this paper we argue that this is unsatisfying in many ways, both practically and theoretically. Instead, we propose that word structures should be recovered in morphological analysis. An elegant approach for doing this is given and the result is shown to be promising enough for encouraging further effort in this direction. Our probability model is trained with the Penn Chinese Treebank and actually is able to parse both word and phrase structures in a unified way.", "phrases": ["internal structure", "chinese word segmentation", "cws"], "overall_score": 3.414167933580093, "scores": [2.9957422524120543, 0.9299021508533303, 0.5226184782692029], "rank_score": 1.4827542938448623} -{"id": "sun-etal-2021-chinesebert", "title": "ChineseBERT: Chinese Pretraining Enhanced by Glyph and Pinyin Information", "abstract": "Recent pretraining models in Chinese neglect two important aspects specific to the Chinese language: glyph and pinyin, which carry significant syntax and semantic information for language understanding. In this work, we propose ChineseBERT, which incorporates both the glyph and pinyin information of Chinese characters into language model pretraining. The glyph embedding is obtained based on different fonts of a Chinese character, being able to capture character semantics from the visual features, and the pinyin embedding characterizes the pronunciation of Chinese characters, which handles the highly prevalent heteronym phenomenon in Chinese (the same character has different pronunciations with different meanings). Pretrained on large-scale unlabeled Chinese corpus, the proposed ChineseBERT model yields significant performance boost over baseline models with fewer training steps. The proposed model achieves new SOTA performances on a wide range of Chinese NLP tasks, including machine reading comprehension, natural language inference, text classification, sentence pair matching, and competitive performances in named entity recognition and word segmentation.", "phrases": ["glyph", "pinyin information", "chinese character"], "overall_score": 2.65615711608663, "scores": [2.209886054875094, 1.709137924285809, 0.528264557672086], "rank_score": 1.482429512277663} -{"id": "wang-etal-2014-knowledge", "title": "Knowledge Graph and Text Jointly Embedding", "abstract": "We examine the embedding approach to reason new relational facts from a largescale knowledge graph and a text corpus. We propose a novel method of jointly embedding entities and words into the same continuous vector space. The embedding process attempts to preserve the relations between entities in the knowledge graph and the concurrences of words in the text corpus. Entity names and Wikipedia anchors are utilized to align the embeddings of entities and words in the same space. Large scale experiments on Freebase and a Wikipedia/NY Times corpus show that jointly embedding brings promising improvement in the accuracy of predicting facts, compared to separately embedding knowledge graphs and text. Particularly, jointly embedding enables the prediction of facts containing entities out of the knowledge graph, which cannot be handled by previous embedding methods. At the same time, concerning the quality of the word embeddings, experiments on the analogical reasoning task show that jointly embedding is comparable to or slightly better than word2vec (Skip-Gram).", "phrases": ["vector space", "freebase", "knowledge graph", "entity embedding", "link prediction task"], "overall_score": 4.828717967550571, "scores": [2.8258622286497923, 1.7435629362022596, 1.2900083628842038, 0.9645797789933376, 0.5863226228541073], "rank_score": 1.48206718591674} -{"id": "chiang-2010-learning", "title": "Learning to Translate with Source and Target Syntax", "abstract": "Statistical translation models that try to capture the recursive structure of language have been widely adopted over the last few years. These models make use of varying amounts of information from linguistic theory: some use none at all, some use information about the grammar of the target language, some use information about the grammar of the source language. But progress has been slower on translation models that are able to learn the relationship between the grammars of both the source and target language. We discuss the reasons why this has been a challenge, review existing attempts to meet this challenge, and show how some old and new ideas can be combined into a simple approach that uses both source and target syntax for significant improvements in translation accuracy.", "phrases": ["target syntax", "translation quality", "syntactic label"], "overall_score": 5.0892738006082325, "scores": [3.0084017773274727, 0.9008210113881647, 0.5368687348297404], "rank_score": 1.4820305078484592} -{"id": "tsur-rappoport-2007-using", "title": "Using Classifier Features for Studying the Effect of Native Language on the Choice of Written Second Language Words", "abstract": "We apply machine learning techniques to study language transfer, a major topic in the theory of Second Language Acquisition (SLA). Using an SVM for the problem of native language classification, we show that a careful analysis of the effects of various features can lead to scientific insights. In particular, we demonstrate that character bigrams alone allow classification levels of about 66% for a 5-class task, even when content and function word differences are accounted for. This may show that native language has a strong effect on the word choice of people writing in a second language.", "phrases": ["native language", "character n-gram", "language writing"], "overall_score": 4.106348970839561, "scores": [2.616270198222684, 1.273958318757136, 0.5529284553124797], "rank_score": 1.4810523240974334} -{"id": "liu-etal-2020-coach", "title": "Coach: A Coarse-to-Fine Approach for Cross-domain Slot Filling", "abstract": "As an essential task in task-oriented dialog systems, slot filling requires extensive training data in a certain domain. However, such data are not always available. Hence, cross-domain slot filling has naturally arisen to cope with this data scarcity problem. In this paper, we propose a Coarse-to-fine approach (Coach) for cross-domain slot filling. Our model first learns the general pattern of slot entities by detecting whether the tokens are slot entities or not. It then predicts the specific types for the slot entities. In addition, we propose a template regularization approach to improve the adaptation robustness by regularizing the representation of utterances based on utterance templates. Experimental results show that our model significantly outperforms state-of-the-art approaches in slot filling. Furthermore, our model can also be applied to the cross-domain named entity recognition task, and it achieves better adaptation performance than other existing baselines. The code is available at .", "phrases": ["coarse-to-fine approach", "cross-domain slot", "coach"], "overall_score": 2.6536599207825557, "scores": [1.7906514575639572, 1.7782382218332102, 0.8742177237284886], "rank_score": 1.4810358010418854} -{"id": "tetreault-etal-2013-report", "title": "A Report on the First Native Language Identification Shared Task", "abstract": "Native Language Identification (NLI) is the task of automatically identifying the native language (L1) of an individual based on their language production in a learned language. It is typically framed as a classification task where the set of L1s is known a priori. Two previous shared tasks on NLI have been organized where the aim was to identify the L1 of learners of English based on essays (2013) and spoken responses (2016) they provided during a standardized assessment of academic English proficiency. The 2017 shared task combines the inputs from the two prior tasks for the first time. There are three tracks: NLI on the essay only, NLI on the spoken response only (based on a transcription of the response and i-vector acoustic features), and NLI using both responses. We believe this makes for a more interesting shared task while building on the methods and results from the previous two shared tasks. In this paper, we report the results of the shared task. A total of 19 teams competed across the three different sub-tasks. The fusion track showed that combining the written and spoken responses provides a large boost in prediction accuracy. Multiple classifier systems (e.g. ensembles and meta-classifiers) were the most effective in all tasks, with most based on traditional classifiers (e.g. SVMs) with lexical/syntactic features.", "phrases": ["report", "native language identification", "learner", "nli shared task", "text classification"], "overall_score": 5.603772789312355, "scores": [1.8723566485565288, 2.4771903199350227, 1.61144918715774, 0.8910850888680533, 0.5521100506081644], "rank_score": 1.480838259025102} -{"id": "hoang-etal-2018-iterative", "title": "Iterative Back-Translation for Neural Machine Translation", "abstract": "We present iterative back-translation, a method for generating increasingly better synthetic parallel data from monolingual data to train neural machine translation systems. Our proposed method is very simple yet effective and highly applicable in practice. We demonstrate improvements in neural machine translation quality in both high and low resourced scenarios, including the best reported BLEU scores for the WMT 2017 German\u2194English tasks.", "phrases": ["neural machine translation", "monolingual data", "bleu score", "iterative back-translation", "iteration"], "overall_score": 5.305631625891535, "scores": [3.883839609410047, 0.952637453370306, 1.4310632916969308, 0.5797057284811601, 0.5555773944826484], "rank_score": 1.4805646954882186} -{"id": "zhang-etal-2021-ambert", "title": "AMBERT: A Pre-trained Language Model with Multi-Grained Tokenization", "abstract": "Pre-trained language models such as BERT have exhibited remarkable performances in many tasks in natural language understanding (NLU). The tokens in the models are usually fine-grained in the sense that for languages like English they are words or sub-words and for languages like Chinese they are characters. In English, for example, there are multi-word expressions which form natural lexical units and thus the use of coarse-grained tokenization also appears to be reasonable. In fact, both fine-grained and coarse-grained tokenizations have advantages and disadvantages for learning of pre-trained language models. In this paper, we propose a novel pre-trained language model, referred to as AMBERT (A Multi-grained BERT), on the basis of both fine-grained and coarse-grained tokenizations. For English, AMBERT takes both the sequence of words (fine-grained tokens) and the sequence of phrases (coarse-grained tokens) as input after tokenization, employs one encoder for processing the sequence of words and the other encoder for processing the sequence of the phrases, utilizes shared parameters between the two encoders, and finally creates a sequence of contextualized representations of the words and a sequence of contextualized representations of the phrases. Experiments have been conducted on benchmark datasets for Chinese and English, including CLUE, GLUE, SQuAD and RACE. The results show that AMBERT outperforms the existing best performing models in almost all cases, particularly the improvements are significant for Chinese.", "phrases": ["language model", "tokenization", "chinese", "ambert"], "overall_score": 2.877380944063563, "scores": [2.4294445901705104, 1.8898227116053767, 1.0458046217732633, 0.5496532665331428], "rank_score": 1.4786812975205734} -{"id": "le-roux-etal-2014-syntactic", "title": "Syntactic Parsing and Compound Recognition via Dual Decomposition: Application to French", "abstract": "In this paper we show how the task of syntactic parsing of non-segmented texts, including compound recognition, can be represented as constraints between phrase-structure parsers and CRF sequence labellers. In order to build a joint system we use dual decomposition, a way to combine several elementary systems which has proven successful in various NLP tasks. We evaluate this proposition on the French SPMRL corpus. This method compares favorably with pipeline architectures and improves state-of-the-art results.", "phrases": ["compound recognition", "dual decomposition", "syntactic parsing"], "overall_score": 2.3795653484365134, "scores": [1.7974262661725846, 1.7337989872169974, 0.9042959867637815], "rank_score": 1.478507080051121} -{"id": "dolan-etal-2004-unsupervised", "title": "Unsupervised Construction of Large Paraphrase Corpora: Exploiting Massively Parallel News Sources", "abstract": "We investigate unsupervised techniques for acquiring monolingual sentence-level paraphrases from a corpus of temporally and topically clustered news articles collected from thousands of web-based news sources. Two techniques are employed: (1) simple string edit distance, and (2) a heuristic strategy that pairs initial (presumably summary) sentences from different news stories in the same cluster. We evaluate both datasets using a word alignment algorithm and a metric borrowed from machine translation. Results show that edit distance data is cleaner and more easily-aligned than the heuristic data, with an overall alignment error rate (AER) of 11.58% on a similarly-extracted test set. On test data extracted by the heuristic strategy, however, performance of the two training sets is similar, with AERs of 13.2% and 14.7% respectively. Analysis of 100 pairs of sentences from each set reveals that the edit distance data lacks many of the complex lexical and syntactic alternations that characterize monolingual paraphrase. The summary sentences, while less readily alignable, retain more of the non-trivial alternations that are of greatest interest learning paraphrase relationships.", "phrases": ["paraphrase", "news article", "sentence pair", "mrpc corpus"], "overall_score": 6.0257530167285696, "scores": [2.363873884621587, 1.5278322648967404, 1.1201642229125228, 0.8992984147346966], "rank_score": 1.4777921967913867} -{"id": "denkowski-lavie-2011-meteor", "title": "Meteor 1.3: Automatic Metric for Reliable Optimization and Evaluation of Machine Translation Systems", "abstract": "This paper describes Meteor 1.3, our submission to the 2011 EMNLP Workshop on Statistical Machine Translation automatic evaluation metric tasks. New metric features include improved text normalization, higher-precision paraphrase matching, and discrimination between content and function words. We include Ranking and Adequacy versions of the metric shown to have high correlation with human judgments of translation quality as well as a more balanced Tuning version shown to outperform BLEU in minimum error rate training for a phrase-based Urdu-English system.", "phrases": ["human judgment", "meteor", "monolingual alignment"], "overall_score": 3.5411293054371, "scores": [2.9959659766485514, 0.8496103732478754, 0.5847205178703153], "rank_score": 1.476765622588914} -{"id": "martelli-etal-2021-semeval", "title": "SemEval-2021 Task 2: Multilingual and Cross-lingual Word-in-Context Disambiguation (MCL-WiC)", "abstract": "In this paper, we introduce the first SemEval task on Multilingual and Cross-Lingual Word-in-Context disambiguation (MCL-WiC). This task allows the largely under-investigated inherent ability of systems to discriminate between word senses within and across languages to be evaluated, dropping the requirement of a fixed sense inventory. Framed as a binary classification, our task is divided into two parts. In the multilingual sub-task, participating systems are required to determine whether two target words, each occurring in a different context within the same language, express the same meaning or not. Instead, in the cross-lingual part, systems are asked to perform the task in a cross-lingual scenario, in which the two target words and their corresponding contexts are provided in two different languages. We illustrate our task, as well as the construction of our manually-created dataset including five languages, namely Arabic, Chinese, English, French and Russian, and the results of the participating systems. Datasets and results are available at: .", "phrases": ["word-in-context disambiguation", "semeval task", "different language"], "overall_score": 3.069801479317091, "scores": [2.4546352100874103, 1.0431487417181269, 0.9310034189188336], "rank_score": 1.4762624569081237} -{"id": "ji-smith-2017-neural", "title": "Neural Discourse Structure for Text Categorization", "abstract": "We show that discourse structure, as defined by Rhetorical Structure Theory and provided by an existing discourse parser, benefits text categorization. Our approach uses a recursive neural network and a newly proposed attention mechanism to compute a representation of the text that focuses on salient content, from the perspective of both RST and the task. Experiments consider variants of the approach and illustrate its strengths and weaknesses.", "phrases": ["discourse structure", "text categorization", "recursive neural network", "sentiment analysis", "downstream task"], "overall_score": 4.421434363993225, "scores": [3.219075247644164, 1.4142348986159188, 1.3156388376904071, 0.8918247780043673, 0.5387814857306279], "rank_score": 1.4759110495370973} -{"id": "carpuat-wu-2005-word", "title": "Word Sense Disambiguation vs. Statistical Machine Translation", "abstract": "We directly investigate a subject of much recent debate: do word sense disambiguation models help statistical machine translation quality? We present empirical results casting doubt on this common, but unproved, assumption. Using a state-of-the-art Chinese word sense disambiguation model to choose translation candidates for a typical IBM statistical MT system, we find that word sense disambiguation does not yield significantly better translation quality than the statistical machine translation system alone. Error analysis suggests several key factors behind this surprising finding, including inherent limitations of current statistical MT architectures.", "phrases": ["translation quality", "word sense disambiguation", "wsd", "smt system", "parallel sentence"], "overall_score": 4.969063556316865, "scores": [2.031981062666789, 1.8605685591280745, 1.6287262523079944, 1.2830523844627237, 0.5740902220239679], "rank_score": 1.4756836961179098} -{"id": "ma-etal-2018-stack", "title": "Stack-Pointer Networks for Dependency Parsing", "abstract": "We introduce a novel architecture for dependency parsing: stack-pointer networks (StackPtr). Combining pointer networks (Vinyals et al., 2015) with an internal stack, the proposed model first reads and encodes the whole sentence, then builds the dependency tree top-down (from root-to-leaf) in a depth-first fashion. The stack tracks the status of the depth-first search and the pointer networks select one child for the word at the top of the stack at each step. The StackPtr parser benefits from the information of whole sentence and all previously derived subtree structures, and removes the left-to-right restriction in classical transition-based parsers. Yet the number of steps for building any (non-projective) parse tree is linear in the length of the sentence just as other transition-based parsers, yielding an efficient decoding algorithm with O(n^2) time complexity. We evaluate our model on 29 treebanks spanning 20 languages and different dependency annotation schemas, and achieve state-of-the-art performances on 21 of them", "phrases": ["dependency parsing", "pointer network", "manner", "error propagation", "various neural architecture"], "overall_score": 4.492735304767237, "scores": [3.2232153473832446, 1.7161648763002357, 1.3362506399214404, 0.5564157129164542, 0.5463442822543801], "rank_score": 1.475678171755151} -{"id": "zhu-etal-2008-active", "title": "Active Learning with Sampling by Uncertainty and Density for Word Sense Disambiguation and Text Classification", "abstract": "This paper addresses two issues of active learning. Firstly, to solve a problem of uncertainty sampling that it often fails by selecting outliers, this paper presents a new selective sampling technique, sampling by uncertainty and density (SUD), in which a k-Nearest-Neighbor-based density measure is adopted to determine whether an unlabeled example is an outlier. Secondly, a technique of sampling by clustering (SBC) is applied to build a representative initial training data set for active learning. Finally, we implement a new algorithm of active learning with SUD and SBC techniques. The experimental results from three real-world data sets show that our method outperforms competing methods, particularly at the early stages of active learning.", "phrases": ["density", "text classification", "active learning"], "overall_score": 3.2394146343165877, "scores": [2.222854272141391, 1.40411784971601, 0.7959912853385839], "rank_score": 1.474321135731995} -{"id": "ruder-etal-2018-discriminative", "title": "A Discriminative Latent-Variable Model for Bilingual Lexicon Induction", "abstract": "We introduce a novel discriminative latent-variable model for the task of bilingual lexicon induction. Our model combines the bipartite matching dictionary prior of Haghighi et al. (2008) with a state-of-the-art embedding-based approach. To train the model, we derive an efficient Viterbi EM algorithm. We provide empirical improvements on six language pairs under two metrics and show that the prior theoretically and empirically helps to mitigate the hubness problem. We also demonstrate how previous work may be viewed as a similarly fashioned latent-variable model, albeit with a different prior.", "phrases": ["latent-variable model", "bilingual lexicon induction", "hubness problem"], "overall_score": 2.8667815424408527, "scores": [2.0510099847857988, 1.8275513111250479, 0.5411415518788081], "rank_score": 1.4732342825965514} -{"id": "schmidt-2019-generalization", "title": "Generalization in Generation: A closer look at Exposure Bias", "abstract": "Exposure bias refers to the train-test discrepancy that seemingly arises when an autoregressive generative model uses only ground-truth contexts at training time but generated ones at test time. We separate the contribution of the learning framework and the model to clarify the debate on consequences and review proposed counter-measures. In this light, we argue that generalization is the underlying property to address and propose unconditional generation as its fundamental benchmark. Finally, we combine latent variable modeling with a recent formulation of exploration in reinforcement learning to obtain a rigorous handling of true and generated contexts. Results on language modeling and variational sentence auto-encoding confirm the model's generalization capability.", "phrases": ["exposure bias", "train-test discrepancy", "generalization", "mle"], "overall_score": 3.0631587677819665, "scores": [2.9978568367084426, 1.842396170119595, 0.5313842375763134, 0.5206347072417711], "rank_score": 1.4730679879115305} -{"id": "croce-etal-2011-structured", "title": "Structured Lexical Similarity via Convolution Kernels on Dependency Trees", "abstract": "A central topic in natural language processing is the design of lexical and syntactic features suitable for the target application. In this paper, we study convolution dependency tree kernels for automatic engineering of syntactic and semantic patterns exploiting lexical similarities. We define efficient and powerful kernels for measuring the similarity between dependency structures, whose surface forms of the lexical nodes are in part or completely different. The experiments with such kernels for question classification show an unprecedented results, e.g. 41% of error reduction of the former state-of-the-art. Additionally, semantic role classification confirms the benefit of semantic smoothing for dependency kernels.", "phrases": ["lexical similarity", "convolution kernel", "node", "sptk"], "overall_score": 4.61799951344521, "scores": [2.959602811517902, 1.2671845113582925, 1.080695954728338, 0.5837723847207708], "rank_score": 1.4728139155813258} -{"id": "koehn-hoang-2007-factored", "title": "Factored Translation Models", "abstract": "We present an extension of phrase-based statistical machine translation models that enables the straight-forward integration of additional annotation at the word-level \u2014 may it be linguistic markup or automatically generated word classes. In a number of experiments we show that factored translation models lead to better translation performance, both in terms of automatic scores, as well as more grammatical coherence.", "phrases": ["integration", "factored translation models", "linguistic information", "phrase-based model", "feature function"], "overall_score": 5.952699039157558, "scores": [2.0794415416798357, 1.5630259113484612, 1.5097419242908672, 1.1427958872750852, 1.0666365311896204], "rank_score": 1.472328359156774} -{"id": "peinelt-etal-2020-tbert", "title": "tBERT: Topic Models and BERT Joining Forces for Semantic Similarity Detection", "abstract": "Semantic similarity detection is a fundamental task in natural language understanding. Adding topic information has been useful for previous feature-engineered semantic similarity models as well as neural models for other tasks. There is currently no standard way of combining topics with pretrained contextual representations such as BERT. We propose a novel topic-informed BERT-based architecture for pairwise semantic similarity detection and show that our model improves performance over strong neural baselines across a variety of English language datasets. We find that the addition of topics to BERT helps particularly with resolving domain-specific cases.", "phrases": ["topic model", "bert", "semantic similarity detection"], "overall_score": 3.388786364568448, "scores": [2.5904202384878072, 0.9677846183798005, 0.8569887985755665], "rank_score": 1.4717312184810583} -{"id": "elsherief-etal-2021-latent", "title": "Latent Hatred: A Benchmark for Understanding Implicit Hate Speech", "abstract": "Hate speech has grown significantly on social media, causing serious consequences for victims of all demographics. Despite much attention being paid to characterize and detect discriminatory speech, most work has focused on explicit or overt hate speech, failing to address a more pervasive form based on coded or indirect language. To fill this gap, this work introduces a theoretically-justified taxonomy of implicit hate speech and a benchmark corpus with fine-grained labels for each message and its implication. We present systematic analyses of our dataset using contemporary baselines to detect and explain implicit hate speech, and we discuss key features that challenge existing models. This dataset will continue to serve as a useful benchmark for understanding this multifaceted issue.", "phrases": ["implicit hate speech", "taxonomy", "stereotype"], "overall_score": 3.0595908146004254, "scores": [2.9691729776897064, 0.876308200599736, 0.5685753170840158], "rank_score": 1.471352165124486} -{"id": "snyder-barzilay-2008-unsupervised", "title": "Unsupervised Multilingual Learning for Morphological Segmentation", "abstract": "For centuries, the deep connection between languages has brought about major discoveries about human communication. In this paper we investigate how this powerful source of information can be exploited for unsupervised language learning. In particular, we study the task of morphological segmentation of multiple languages. We present a nonparametric Bayesian model that jointly induces morpheme segmentations of each language under consideration and at the same time identifies cross-lingual morpheme patterns, or abstract morphemes. We apply our model to three Semitic languages: Arabic, Hebrew, Aramaic, as well as to English. Our results demonstrate that learning morphological models in tandem reduces error by up to 24% relative to monolingual models. Furthermore, we provide evidence that our joint model achieves better performance when applied to languages from the same family.", "phrases": ["morphological segmentation", "abstract morpheme", "arabic", "hebrew", "unsupervised multilingual learning"], "overall_score": 4.902315652407309, "scores": [3.8852902668428255, 0.9696104579759812, 1.0886153392173052, 0.8840280511360623, 0.5284204383803187], "rank_score": 1.4711929107104986} -{"id": "d-zamora-reina-etal-2022-black", "title": "LSCDiscovery: A shared task on semantic change discovery and detection in Spanish", "abstract": "We present the first shared task on semantic change discovery and detection in Spanish. We create the first dataset of Spanish words manually annotated by semantic change using the DURel framewok (Schlechtweg et al., 2018). The task is divided in two phases: 1) graded change discovery, and 2) binary change detection. In addition to introducing a new language for this task, the main novelty with respect to the previous tasks consists in predicting and evaluating changes for all vocabulary words in the corpus. Six teams participated in phase 1 and seven teams in phase 2 of the shared task, and the best system obtained a Spearman rank correlation of 0.735 for phase 1 and an F1 score of 0.735 for phase 2. We describe the systems developed by the competing teams, highlighting the techniques that were particularly useful.", "phrases": ["semantic change discovery", "spanish", "change detection"], "overall_score": 1.615951795574994, "scores": [2.0041866671247095, 1.8573272039462412, 0.5511942669403442], "rank_score": 1.4709027126704317} -{"id": "kong-etal-2014-constituent", "title": "A Constituent-Based Approach to Argument Labeling with Joint Inference in Discourse Parsing", "abstract": "Discourse parsing is a challenging task and plays a critical role in discourse analysis. In this paper, we focus on labeling full argument spans of discourse connectives in the Penn Discourse Treebank (PDTB). Previous studies cast this task as a linear tagging or subtree extraction problem. In this paper, we propose a novel constituent-based approach to argument labeling, which integrates the advantages of both linear tagging and subtree extraction. In particular, the proposed approach unifies intra- and intersentence cases by treating the immediately preceding sentence as a special constituent. Besides, a joint inference mechanism is introduced to incorporate global information across arguments into our constituent-based approach via integer linear programming. Evaluation on PDTB shows significant performance improvements of our constituent-based approach over the best state-of-the-art system. It also shows the effectiveness of our joint inference mechanism in modeling global information across arguments.", "phrases": ["constituent-based approach", "discourse parsing", "linear tagging"], "overall_score": 3.0580796870101676, "scores": [1.9503898672679196, 1.928196442917898, 0.5332900889070247], "rank_score": 1.4706254663642808} -{"id": "he-etal-2019-towards", "title": "Towards Understanding Neural Machine Translation with Word Importance", "abstract": "Although neural machine translation (NMT) has advanced the state-of-the-art on various language pairs, the interpretability of NMT remains unsatisfactory. In this work, we propose to address this gap by focusing on understanding the input-output behavior of NMT models. Specifically, we measure the word importance by attributing the NMT output to every input word through a gradient-based method. We validate the approach on a couple of perturbation operations, language pairs, and model architectures, demonstrating its superiority on identifying input words with higher influence on translation performance. Encouragingly, the calculated importance can serve as indicators of input words that are under-translated by NMT models. Furthermore, our analysis reveals that words of certain syntactic categories have higher importance while the categories vary across language pairs, which can inspire better design principles of NMT architectures for multi-lingual translation.", "phrases": ["neural machine translation", "word importance", "certain syntactic category"], "overall_score": 2.634512402025789, "scores": [2.072904154589702, 1.788722870060175, 0.5494210774050571], "rank_score": 1.4703493673516446} -{"id": "yin-neubig-2017-syntactic", "title": "A Syntactic Neural Model for General-Purpose Code Generation", "abstract": "We consider the problem of parsing natural language descriptions into source code written in a general-purpose programming language like Python. Existing data-driven methods treat this problem as a language generation task without considering the underlying syntax of the target programming language. Informed by previous work in semantic parsing, in this paper we propose a novel neural architecture powered by a grammar model to explicitly capture the target syntax as prior knowledge. Experiments find this an effective way to scale up to generation of complex programs from natural language descriptions, achieving state-of-the-art results that well outperform previous code generation and semantic parsing approaches.", "phrases": ["code generation", "natural language description", "programming language", "grammar-aware", "top-down decoding"], "overall_score": 5.660621694699245, "scores": [3.6236385932803388, 1.4059163989310324, 1.2414284642147373, 0.5425740183288064, 0.5376174414808385], "rank_score": 1.4702349832471506} -{"id": "talmor-berant-2018-web", "title": "The Web as a Knowledge-Base for Answering Complex Questions", "abstract": "Answering complex questions is a time-consuming activity for humans that requires reasoning and integration of information. Recent work on reading comprehension made headway in answering simple questions, but tackling complex questions is still an ongoing research challenge. Conversely, semantic parsers have been successful at handling compositionality, but only when the information resides in a target knowledge-base. In this paper, we present a novel framework for answering broad and complex questions, assuming answering simple questions is possible using a search engine and a reading comprehension model. We propose to decompose complex questions into a sequence of simple questions, and compute the final answer from the sequence of answers. To illustrate the viability of our approach, we create a new dataset of complex questions, ComplexWebQuestions, and present a model that decomposes questions and interacts with the web to compute an answer. We empirically demonstrate that question decomposition improves performance from 20.8 precision@1 to 27.5 precision@1 on this new dataset.", "phrases": ["web", "complex question", "reasoning"], "overall_score": 4.543980062682018, "scores": [1.8659310668179285, 1.6497403365825805, 0.8944719038308101], "rank_score": 1.4700477690771063} -{"id": "koponen-etal-2012-post", "title": "Post-editing time as a measure of cognitive effort", "abstract": "Post-editing machine translations has been attracting increasing attention both as a common practice within the translation industry and as a way to evaluate Machine Translation (MT) quality via edit distance metrics between the MT and its post-edited version. Commonly used metrics such as HTER are limited in that they cannot fully capture the effort required for post-editing. Particularly, the cognitive effort required may vary for different types of errors and may also depend on the context. We suggest post-editing time as a way to assess some of the cognitive effort involved in post-editing. This paper presents two experiments investigating the connection between post-editing time and cognitive effort. First, we examine whether sentences with long and short post-editing times involve edits of different levels of difficulty. Second, we study the variability in post-editing time and other statistics among editors.", "phrases": ["cognitive effort", "variability", "post-editing time"], "overall_score": 3.2299757010338226, "scores": [2.6077232211648558, 1.2755018895260655, 0.5268507655078342], "rank_score": 1.4700252920662518} -{"id": "zhang-etal-2019-bridging", "title": "Bridging the Gap between Training and Inference for Neural Machine Translation", "abstract": "Neural Machine Translation (NMT) generates target words sequentially in the way of predicting the next word conditioned on the context words. At training time, it predicts with the ground truth words as context while at inference it has to generate the entire sequence from scratch. This discrepancy of the fed context leads to error accumulation among the way. Furthermore, word-level training requires strict matching between the generated sequence and the ground truth sequence which leads to overcorrection over different but reasonable translations. In this paper, we address these issues by sampling context words not only from the ground truth sequence but also from the predicted sequence by the model during training, where the predicted sequence is selected with a sentence-level optimum. Experiment results on Chinese-English and WMT'14 English-German translation tasks demonstrate that our approach can achieve significant improvements on multiple datasets.", "phrases": ["neural machine translation", "ground truth word", "training step"], "overall_score": 4.843701268366961, "scores": [3.2879568699243347, 0.5609042758066008, 0.5600657507988376], "rank_score": 1.4696422988432578} -{"id": "tang-etal-2014-building", "title": "Building Large-Scale Twitter-Specific Sentiment Lexicon : A Representation Learning Approach", "abstract": "In this paper, we propose to build large-scale sentiment lexicon from Twitter with a representation learning approach. We cast sentiment lexicon learning as a phrase-level sentiment classification task. The challenges are developing effective feature representation of phrases and obtaining training data with minor manual annotations for building the sentiment classifier. Specifically, we develop a dedicated neural architecture and integrate the sentiment information of text (e.g. sentences or tweets) into its hybrid loss function for learning sentiment-specific phrase embedding (SSPE). The neural network is trained from massive tweets collected with positive and negative emoticons, without any manual annotation. Furthermore, we introduce the Urban Dictionary to expand a small number of sentiment seeds to obtain more training data for building the phrase-level sentiment classifier. We evaluate our sentiment lexicon (TS-Lex) by applying it in a supervised learning framework for Twitter sentiment classification. Experiment results on the benchmark dataset of SemEval 2013 show that, TS-Lex yields better performance than previously introduced sentiment lexicons.", "phrases": ["sentiment lexicon", "representation learning approach", "twitter", "negative score"], "overall_score": 4.073200563708436, "scores": [3.9616086145102485, 0.8190820603214047, 0.5717509785306908, 0.5239446004459463], "rank_score": 1.4690965634520727} -{"id": "wu-etal-2018-beyond", "title": "Beyond Error Propagation in Neural Machine Translation: Characteristics of Language Also Matter", "abstract": "Neural machine translation usually adopts autoregressive models and suffers from exposure bias as well as the consequent error propagation problem. Many previous works have discussed the relationship between error propagation and the accuracy drop (i.e., the left part of the translated sentence is often better than its right part in left-to-right decoding models) problem. In this paper, we conduct a series of analyses to deeply understand this problem and get several interesting findings. (1) The role of error propagation on accuracy drop is overstated in the literature, although it indeed contributes to the accuracy drop problem. (2) Characteristics of a language play a more important role in causing the accuracy drop: the left part of the translation result in a right-branching language (e.g., English) is more likely to be more accurate than its right part, while the right part is more accurate for a left-branching language (e.g., Japanese). Our discoveries are confirmed on different model structures including Transformer and RNN, and in other sequence generation tasks such as text summarization.", "phrases": ["error propagation", "neural machine translation", "accuracy drop"], "overall_score": 2.3635426346884856, "scores": [2.0918102317352933, 1.7532539377992555, 0.5605906552245191], "rank_score": 1.4685516082530228} -{"id": "clark-2003-combining", "title": "Combining Distributional and Morphological Information for Part of Speech Induction", "abstract": "In this paper we discuss algorithms for clustering words into classes from unlabelled text using unsupervised algorithms, based on distributional and morphological information. We show how the use of morphological information can improve the performance on rare words, and that this is robust across a wide range of languages.", "phrases": ["morphological information", "pos induction", "suffix"], "overall_score": 4.944580437228525, "scores": [3.029090213677431, 0.8475908306971439, 0.5285574795436392], "rank_score": 1.4684128413060715} -{"id": "reichart-etal-2008-multi", "title": "Multi-Task Active Learning for Linguistic Annotations", "abstract": "We extend the classical single-task active learning (AL) approach. In the multi-task active learning (MTAL) paradigm, we select examples for several annotation tasks rather than for a single one as usually done in the context of AL. We introduce two MTAL metaprotocols, alternating selection and rank combination, and propose a method to implement them in practice. We experiment with a twotask annotation scenario that includes named entity and syntactic parse tree annotations on three different corpora. MTAL outperforms random selection and a stronger baseline, onesided example selection, in which one task is pursued using AL and the selected examples are provided also to the other task.", "phrases": ["active learning", "mtal", "annotation task"], "overall_score": 3.5208722405517876, "scores": [2.948878792516621, 0.8449918999135771, 0.6110826186994702], "rank_score": 1.4683177703765562} -{"id": "dong-lapata-2016-language", "title": "Language to Logical Form with Neural Attention", "abstract": "Semantic parsing aims at mapping natural language to machine interpretable meaning representations. Traditional approaches rely on high-quality lexicons, manually-built templates, and linguistic features which are either domain- or representation-specific. In this paper we present a general method based on an attention-enhanced encoder-decoder model. We encode input utterances into vector representations, and generate their logical forms by conditioning the output sequences or trees on the encoding vectors. Experimental results on four datasets show that our approach performs competitively without using hand-engineered features and is easy to adapt across domains and meaning representations.", "phrases": ["logical form", "encoder-decoder model", "sequence-to-sequence model", "program", "neural semantic parser"], "overall_score": 5.177197404750125, "scores": [3.163417177591266, 1.255967707164606, 1.0747928973224843, 0.9837019480431287, 0.8628294353341004], "rank_score": 1.4681418330911171} -{"id": "pedersen-etal-2004-wordnet", "title": "WordNet::Similarity - Measuring the Relatedness of Concepts", "abstract": "WordNet::Similarity is a freely available software package that makes it possible to measure the semantic similarity and relatedness between a pair of concepts (or synsets). It provides six measures of similarity, and three measures of relatedness, all of which are based on the lexical database WordNet. These measures are implemented as Perl modules which take as input two concepts, and return a numeric value that represents the degree to which they are similar or related.", "phrases": ["value", "wordnet::similarity", "lcs"], "overall_score": 4.838338489999033, "scores": [3.326065523643974, 0.5494610701232042, 0.5285188915284021], "rank_score": 1.4680151617651933} -{"id": "monsalve-etal-2019-assessing", "title": "Assessing Back-Translation as a Corpus Generation Strategy for non-English Tasks: A Study in Reading Comprehension and Word Sense Disambiguation", "abstract": "Corpora curated by experts have sustained Natural Language Processing mainly in English, but the expensiveness of corpora creation is a barrier for the development in further languages. Thus, we propose a corpus generation strategy that only requires a machine translation system between English and the target language in both directions, where we filter the best translations by computing automatic translation metrics and the task performance score. By studying Reading Comprehension in Spanish and Word Sense Disambiguation in Portuguese, we identified that a more quality-oriented metric has high potential in the corpora selection without degrading the task performance. We conclude that it is possible to systematise the building of quality corpora using machine translation and automatic metrics, besides some prior effort to clean and process the data.", "phrases": ["corpus generation strategy", "reading comprehension", "word sense disambiguation"], "overall_score": 1.6124648382481674, "scores": [1.7644176384645682, 1.7640904884180895, 0.8746781151072829], "rank_score": 1.4677287473299803} -{"id": "ribeiro-etal-2021-investigating", "title": "Investigating Pretrained Language Models for Graph-to-Text Generation", "abstract": "Graph-to-text generation aims to generate fluent texts from graph-based data. In this paper, we investigate two recent pretrained language models (PLMs) and analyze the impact of different task-adaptive pretraining strategies for PLMs in graph-to-text generation. We present a study across three graph domains: meaning representations, Wikipedia knowledge graphs (KGs) and scientific KGs. We show that approaches based on PLMs BART and T5 achieve new state-of-the-art results and that task-adaptive pretraining strategies improve their performance even further. We report new state-of-the-art BLEU scores of 49.72 on AMR-LDC2017T10, 59.70 on WebNLG, and 25.66 on AGENDA datasets - a relative improvement of 31.8%, 4.5%, and 42.4%, respectively, with our models generating significantly more fluent texts than human references. In an extensive analysis, we identify possible reasons for the PLMs' success on graph-to-text tasks. Our findings suggest that the PLMs benefit from similar facts seen during pretraining or fine-tuning, such that they perform well even when the input graph is reduced to a simple bag of node and edge labels.", "phrases": ["pretrained language models", "graph-to-text generation", "pre-trained model"], "overall_score": 4.5367049146848455, "scores": [3.0242507891497477, 0.8550867236197343, 0.5237449260587508], "rank_score": 1.4676941462760775} -{"id": "haruechaiyasak-etal-2006-collaborative", "title": "A Collaborative Framework for Collecting Thai Unknown Words from the Web", "abstract": "We propose a collaborative framework for collecting Thai unknown words found on Web pages over the Internet. Our main goal is to design and construct a Web-based system which allows a group of interested users to participate in constructing a Thai unknown-word open dictionary. The proposed framework provides supporting algorithms and tools for automatically identifying and extracting unknown words from Web pages of given URLs. The system yields the result of unknown-word candidates which are presented to the users for verification. The approved unknown words could be combined with the set of existing words in the lexicon to improve the performance of many NLP tasks such as word segmentation, information retrieval and machine translation. Our framework includes word segmentation and morphological analysis modules for handling the non-segmenting characteristic of Thai written language. To take advantage of large available text resource on the Web, our unknown-word boundary identification approach is based on the statistical string pattern-matching algorithm.", "phrases": ["collaborative framework", "unknown word", "web"], "overall_score": 1.6119953718293907, "scores": [1.92377271422696, 1.6623101538218452, 0.8158213936912712], "rank_score": 1.4673014205800257} -{"id": "ljubesic-klubicka-2014-bs", "title": "bs,hr,srWaC - Web Corpora of Bosnian, Croatian and Serbian", "abstract": "In this paper we present the construction process of top-level-domain web corpora of Bosnian, Croatian and Serbian. For constructing the corpora we use the SpiderLing crawler with its associated tools adapted for simultaneous crawling and processing of text written in two scripts, Latin and Cyrillic. In addition to the modified collection process we focus on two sources of noise in the resulting corpora: 1. they contain documents written in the other, closely related languages that can not be identified with standard language identification methods and 2. as most web corpora, they partially contain low-quality data not suitable for the specific research and application objectives. We approach both problems by using language modeling on the crawled data only, omitting the need for manually validated language samples for training. On the task of discriminating between closely related languages we outperform the state-of-the-art Blacklist classifier reducing its error to a fourth.", "phrases": ["web corpora", "bosnian", "serbian"], "overall_score": 2.359496191185671, "scores": [1.9180160258990877, 1.6964865945660714, 0.7836096141668477], "rank_score": 1.4660374115440022} -{"id": "lin-etal-2015-hierarchical", "title": "Hierarchical Recurrent Neural Network for Document Modeling", "abstract": "This paper proposes a novel hierarchical recurrent neural network language model (HRNNLM) for document modeling. After establishing a RNN to capture the coherence between sentences in a document, HRNNLM integrates it as the sentence history information into the word level RNN to predict the word sequence with cross-sentence contextual information. A two-step training approach is designed, in which sentence-level and word-level language models are approximated for the convergence in a pipeline style. Examined by the standard sentence reordering scenario, HRNNLM is proved for its better accuracy in modeling the sentence coherence. And at the word level, experimental results also indicate a significant lower model perplexity, followed by a practical better translation result when applied to a Chinese-English document translation reranking task.", "phrases": ["document modeling", "network language model", "hierarchical recurrent", "rnnlm"], "overall_score": 4.237253628278793, "scores": [3.0211315747917307, 1.3963112461459797, 0.9192444832810445, 0.5272692843454693], "rank_score": 1.465989147141056} -{"id": "lin-etal-2015-unsupervised", "title": "Unsupervised POS Induction with Word Embeddings", "abstract": "Unsupervised word embeddings have been shown to be valuable as features in supervised learning problems; however, their role in unsupervised problems has been less thoroughly explored. In this paper, we show that embeddings can likewise add value to the problem of unsupervised POS induction. In two representative models of POS induction, we replace multinomial distributions over the vocabulary with multivariate Gaussian distributions over word embeddings and observe consistent improvements in eight languages. We also analyze the effect of various choices while inducing word embeddings on \"downstream\" POS induction results.", "phrases": ["pos induction", "word embedding", "autoencoder"], "overall_score": 3.5151943025194523, "scores": [2.421705668690703, 1.4344553065487127, 0.5416886836623027], "rank_score": 1.4659498863005727} -{"id": "garcia-salido-etal-2018-lexical", "title": "A Lexical Tool for Academic Writing in Spanish based on Expert and Novice Corpora", "abstract": "The object of this article is to describe the extraction of data from a corpus of academic texts in Spanish and the use of those data for developing a lexical tool oriented to the production of academic texts. The corpus provides the lexical combinations that will be included in the afore-mentioned tool, namely collocations, idioms and formulas. They have been retrieved from the corpus controlling for their keyness (i.e., their specificity with regard to academic texts) and their even distribution across the corpus. For the extraction of collocations containing academic vocabulary other methods have been used, taking advantage of the morphological and syntactic information with which the corpus has been enriched. In the case of collocations and other multiword units, several association measures are being tested in order to restrict the list of candidates the lexicographers will have to deal with manually.", "phrases": ["lexical tool", "academic writing", "spanish"], "overall_score": 1.609439685513933, "scores": [1.771600009500995, 1.7051513560653058, 0.9181740383679268], "rank_score": 1.4649751346447426} -{"id": "wang-etal-2021-automated", "title": "Automated Concatenation of Embeddings for Structured Prediction", "abstract": "Pretrained contextualized embeddings are powerful word representations for structured prediction tasks. Recent work found that better word representations can be obtained by concatenating different types of embeddings. However, the selection of embeddings to form the best concatenated representation usually varies depending on the task and the collection of candidate embeddings, and the ever-increasing number of embedding types makes it a more difficult problem. In this paper, we propose Automated Concatenation of Embeddings (ACE) to automate the process of finding better concatenations of embeddings for structured prediction tasks, based on a formulation inspired by recent progress on neural architecture search. Specifically, a controller alternately samples a concatenation of embeddings, according to its current belief of the effectiveness of individual embedding types in consideration for a task, and updates the belief based on a reward. We follow strategies in reinforcement learning to optimize the parameters of the controller and compute the reward based on the accuracy of a task model, which is fed with the sampled concatenation as input and trained on a task dataset. Empirical results on 6 tasks and 21 datasets show that our approach outperforms strong baselines and achieves state-of-the-art performance with fine-tuned embeddings in all the evaluations.", "phrases": ["embeddings", "automated concatenation", "ner task"], "overall_score": 3.756610701972688, "scores": [1.9807955697662611, 1.8450723046471167, 0.5679156024937535], "rank_score": 1.4645944923023773} -{"id": "nivre-etal-2016-universal", "title": "Universal Dependencies v1: A Multilingual Treebank Collection", "abstract": "Cross-linguistically consistent annotation is necessary for sound comparative evaluation and cross-lingual learning experiments. It is also useful for multilingual system development and comparative linguistic studies. Universal Dependencies is an open community effort to create cross-linguistically consistent treebank annotation for many languages within a dependency-based lexicalist framework. In this paper, we describe v1 of the universal guidelines, the underlying design principles, and the currently available treebanks for 33 languages.", "phrases": ["treebank", "universal dependencies", "project", "annotation guideline", "morphological analysis"], "overall_score": 5.699675330500964, "scores": [2.772501578122141, 1.7244630433867274, 1.7196207390057077, 0.5652947160669959, 0.5407541843939446], "rank_score": 1.4645268521951034} -{"id": "cybulska-vossen-2015-translating", "title": "Translating Granularity of Event Slots into Features for Event Coreference Resolution.", "abstract": "Using clues from event semantics to solve coreference, we present an \u201cevent template\u201d approach to cross-document event coreference resolution on news articles. The approach uses a pairwise model, in which event information is compared along five semantically motivated slots of an event template. The templates, filled in on the sentence level for every event mention from the data set, are used for supervised classification. In this study, we determine granularity of events and we use the grain size as a clue for solving event coreference. We experiment with a newly-created granularity ontology employing granularity levels of locations, times and human participants as well as event durations as features in event coreference resolution. The granularity ontology is available for research. Results show that determining granularity along semantic event slots, even on the sentence level exclusively, improves precision and solves event coreference with scores comparable to those achieved in related work.", "phrases": ["granularity", "event slot", "event coreference resolution"], "overall_score": 2.622395381769944, "scores": [1.7751410355044759, 1.6705983915486111, 0.9450207617010755], "rank_score": 1.463586729584721} -{"id": "lin-etal-2020-birds", "title": "Birds have four legs?! NumerSense: Probing Numerical Commonsense Knowledge of Pre-Trained Language Models", "abstract": "Recent works show that pre-trained language models (PTLMs), such as BERT, possess certain commonsense and factual knowledge. They suggest that it is promising to use PTLMs as \u201cneural knowledge bases\u201d via predicting masked words. Surprisingly, we find that this may not work for numerical commonsense knowledge (e.g., a bird usually has two legs). In this paper, we investigate whether and to what extent we can induce numerical commonsense knowledge from PTLMs as well as the robustness of this process. In this paper, we investigate whether and to what extent we can induce numerical commonsense knowledge from PTLMs as well as the robustness of this process. To study this, we introduce a novel probing task with a diagnostic dataset, NumerSense, containing 13.6k masked-word-prediction probes (10.5k for fine-tuning and 3.1k for testing). Our analysis reveals that: (1) BERT and its stronger variant RoBERTa perform poorly on the diagnostic dataset prior to any fine-tuning; (2) fine-tuning with distant supervision brings some improvement; (3) the best supervised model still performs poorly as compared to human performance (54.06% vs. 96.3% in accuracy).", "phrases": ["numerical commonsense knowledge", "pre-trained language model", "bird"], "overall_score": 2.6220514975690197, "scores": [1.9369003628894692, 1.8588938047692916, 0.5943902448148844], "rank_score": 1.4633948041578817} -{"id": "kenter-etal-2016-siamese", "title": "Siamese CBOW: Optimizing Word Embeddings for Sentence Representations", "abstract": "We present the Siamese Continuous Bag of Words (Siamese CBOW) model, a neural network for efficient estimation of high-quality sentence embeddings. Averaging the embeddings of words in a sentence has proven to be a surprisingly successful and efficient way of obtaining sentence embeddings. However, word embeddings trained with the methods currently available are not optimized for the task of sentence representation, and, thus, likely to be suboptimal. Siamese CBOW handles this problem by training word embeddings directly for the purpose of being averaged. The underlying neural network learns word embeddings by predicting, from a sentence representation, its surrounding sentences. We show the robustness of the Siamese CBOW model by evaluating it on 20 datasets stemming from a wide variety of sources.", "phrases": ["sentence embedding", "siamese cbow", "sum"], "overall_score": 3.509066752849351, "scores": [2.8532807422611146, 1.0130426448657435, 0.5238601116973812], "rank_score": 1.4633944996080797} -{"id": "kajiwara-etal-2013-selecting", "title": "Selecting Proper Lexical Paraphrase for Children", "abstract": "We propose a method for acquiring plain lexical paraphrase using a Japanese dictionary in order to achieve lexical simplification for children. The proposed method extracts plain words that are the most similar to the headword from the dictionary definition. The definition statements describe the headword using plain words; therefore, paraphrasing by replacing the headword with the most similar word in the dictionary definition is expected to be an accurate means of lexical simplification. However, it is difficult to determine which word is the most appropriate for the paraphrase. The method proposed in this paper measures the similarity of each word in the definition statements against the headword and selects the one with the closest semantic match for the paraphrase. This method compares favorably with the method that acquires the target word from the end of the definition statements.", "phrases": ["lexical paraphrase", "japanese dictionary", "child"], "overall_score": 3.041885883220088, "scores": [1.983490301294023, 1.8396844131690782, 0.5653389642086649], "rank_score": 1.4628378928905885} -{"id": "liebrecht-etal-2013-perfect", "title": "The perfect solution for detecting sarcasm in tweets #not", "abstract": "To avoid a sarcastic message being understood in its unintended literal meaning, in microtexts such as messages on Twitter.com sarcasm is often explicitly marked with the hashtag \u2018#sarcasm\u2019. We collected a training corpus of about 78 thousand Dutch tweets with this hashtag. Assuming that the human labeling is correct (annotation of a sample indicates that about 85% of these tweets are indeed sarcastic), we train a machine learning classifier on the harvested examples, and apply it to a test set of a day\u2019s stream of 3.3 million Dutch tweets. Of the 135 explicitly marked tweets on this day, we detect 101 (75%) when we remove the hashtag. We annotate the top of the ranked list of tweets most likely to be sarcastic that do not have the explicit hashtag. 30% of the top-250 ranked tweets are indeed sarcastic. Analysis shows that sarcasm is often signalled by hyperbole, using intensifiers and exclamations; in contrast, non-hyperbolic sarcastic messages often receive an explicit marker. We hypothesize that explicit markers such as hashtags are the digital extralinguistic equivalent of nonverbal expressions that people employ in live interaction when conveying sarcasm.", "phrases": ["sarcasm", "hashtag", "n-gram", "emoticon"], "overall_score": 4.226132170585737, "scores": [3.371208651461434, 1.0583347369907667, 0.8642600292222957, 0.5547621297024853], "rank_score": 1.4621413868442454} -{"id": "bernhard-gurevych-2008-answering", "title": "Answering Learners' Questions by Retrieving Question Paraphrases from Social Q&A Sites", "abstract": "Information overload is a well-known problem which can be particularly detrimental to learners. In this paper, we propose a method to support learners in the information seeking process which consists in answering their questions by retrieving question paraphrases and their corresponding answers from social Q&A sites. Given the novelty of this kind of data, it is crucial to get a better understanding of how questions in social Q&A sites can be automatically analysed and retrieved. We discuss and evaluate several pre-processing strategies and question similarity metrics, using a new question paraphrase corpus collected from the WikiAnswers Q&A site. The results show that viable performance levels of more than 80% accuracy can be obtained for the task of question paraphrase retrieval.", "phrases": ["question paraphrase", "social q&a site", "wikianswers repository"], "overall_score": 2.8449279135288545, "scores": [2.660044197001661, 0.8815541151949614, 0.8444129045751131], "rank_score": 1.4620037389239118} -{"id": "xu-etal-2004-need", "title": "Do We Need Chinese Word Segmentation for Statistical Machine Translation?", "abstract": "In Chinese texts, words are not separated by white spaces. This is problematic for many natural language processing tasks. The standard approach is to segment the Chinese character sequence into words. Here, we investigate Chinese word segmentation for statistical machine translation. We pursue two goals: the first one is the maximization of the final translation quality; the second is the minimization of the manual effort for building a translation system. The commonly used method for getting the word boundaries is based on a word segmentation tool and a predefined monolingual dictionary. To avoid the dependence of the translation system on an external dictionary, we have developed a system that learns a domainspecific dictionary from the parallel training corpus. This method produces results that are comparable with the predefined dictionary. Further more, our translation system is able to work without word segmentation with only a minor loss in translation quality.", "phrases": ["word segmentation", "chinese character", "parallel training corpus"], "overall_score": 4.053381059548331, "scores": [3.276192519027721, 0.570183288768197, 0.5394687572868035], "rank_score": 1.4619481883609071} -{"id": "zhao-etal-2017-men", "title": "Men Also Like Shopping: Reducing Gender Bias Amplification using Corpus-level Constraints", "abstract": "Language is increasingly being used to de-fine rich visual recognition problems with supporting image collections sourced from the web. Structured prediction models are used in these tasks to take advantage of correlations between co-occurring labels and visual input but risk inadvertently encoding social biases found in web corpora. In this work, we study data and models associated with multilabel object classification and visual semantic role labeling. We find that (a) datasets for these tasks contain significant gender bias and (b) models trained on these datasets further amplify existing bias. For example, the activity cooking is over 33% more likely to involve females than males in a training set, and a trained model further amplifies the disparity to 68% at test time. We propose to inject corpus-level constraints for calibrating existing structured prediction models and design an algorithm based on Lagrangian relaxation for collective inference. Our method results in almost no performance loss for the underlying recognition task but decreases the magnitude of bias amplification by 47.5% and 40.5% for multilabel classification and visual semantic role labeling, respectively\u3002", "phrases": ["gender bias", "corpus-level constraint", "semantic role labeling", "man", "social group"], "overall_score": 5.49774682370866, "scores": [2.968081567465934, 0.8031878055785202, 1.8952557859361114, 1.109357430276997, 0.532618227768069], "rank_score": 1.4617001634051263} -{"id": "han-baldwin-2011-lexical", "title": "Lexical Normalisation of Short Text Messages: Makn Sens a #twitter", "abstract": "Twitter provides access to large volumes of data in real time, but is notoriously noisy, hampering its utility for NLP. In this paper, we target out-of-vocabulary words in short text messages and propose a method for identifying and normalising ill-formed words. Our method uses a classifier to detect ill-formed words, and generates correction candidates based on morphophonemic similarity. Both word similarity and context are then exploited to select the most probable correction candidate for the word. The proposed method doesn't require any annotations, and achieves state-of-the-art performance over an SMS corpus and a novel dataset based on Twitter.", "phrases": ["short text message", "twitter", "ill-formed word", "lexical normalisation", "normalization"], "overall_score": 6.030041815330467, "scores": [1.8007570936874353, 0.9143355430642331, 1.9608001854372246, 1.424013395809396, 1.2054553873575609], "rank_score": 1.4610723210711698} -{"id": "ling-etal-2016-latent", "title": "Latent Predictor Networks for Code Generation", "abstract": "Many language generation tasks require the production of text conditioned on both structured and unstructured inputs. We present a novel neural network architecture which generates an output sequence conditioned on an arbitrary number of input functions. Crucially, our approach allows both the choice of conditioning context and the granularity of generation, for example characters or tokens, to be marginalised, thus permitting scalable and effective training. Using this framework, we address the problem of generating programming code from a mixed natural language and structured specification. We create two new data sets for this paradigm derived from the collectible trading card games Magic the Gathering and Hearthstone. On these, and a third preexisting corpus, we demonstrate that marginalising multiple predictors allows our model to outperform strong benchmarks.", "phrases": ["code generation", "sequence-to-sequence model", "java", "python", "programming language"], "overall_score": 5.27449246483915, "scores": [2.1732741111694764, 1.309588342303506, 1.3009171359279768, 1.2772825801828445, 1.2424719904834256], "rank_score": 1.460706832013446} -{"id": "fadaee-etal-2017-data", "title": "Data Augmentation for Low-Resource Neural Machine Translation", "abstract": "The quality of a Neural Machine Translation system depends substantially on the availability of sizable parallel corpora. For low-resource language pairs this is not the case, resulting in poor translation quality. Inspired by work in computer vision, we propose a novel data augmentation approach that targets low-frequency words by generating new sentence pairs containing rare words in new, synthetically created contexts. Experimental results on simulated low-resource settings show that our method improves translation quality by up to 2.9 BLEU points over the baseline and up to 3.2 BLEU over back-translation.", "phrases": ["rare word", "data augmentation", "parallel training data", "synthetic sentence pair"], "overall_score": 5.771515294421277, "scores": [2.5500363932097594, 2.1940189493363103, 0.5598263315822438, 0.5388511011542622], "rank_score": 1.4606831938206437} -{"id": "vilar-etal-2006-error", "title": "Error Analysis of Statistical Machine Translation Output", "abstract": "Evaluation of automatic translation output is a difficult task. Several performance measures like Word Error Rate, Position Independent Word Error Rate and the BLEU and NIST scores are widely use and provide a useful tool for comparing different systems and to evaluate improvements within a system. However the interpretation of all of these measures is not at all clear, and the identification of the most prominent source of errors in a given system using these measures alone is not possible. Therefore some analysis of the generated translations is needed in order to identify the main problems and to focus the research efforts. This area is however mostly unexplored and few works have dealt with it until now. In this paper we will present a framework for classification of the errors of a machine translation system and we will carry out an error analysis of the system used by the RWTH in the first TC-STAR evaluation.", "phrases": ["rwth", "error analysis", "typology", "missing word", "human evaluation"], "overall_score": 5.233408191774773, "scores": [4.149402796960577, 1.2363722830193193, 0.8621996911393472, 0.5273367513999468, 0.5267402897554319], "rank_score": 1.4604103624549243} -{"id": "mishra-etal-2016-leveraging", "title": "Leveraging Cognitive Features for Sentiment Analysis", "abstract": "Sentiments expressed in user-generated short text and sentences are nuanced by subtleties at lexical, syntactic, semantic and pragmatic levels. To address this, we propose to augment traditional features used for sentiment analysis and sarcasm detection, with cognitive features derived from the eye-movement patterns of readers. Statistical classification using our enhanced feature set improves the performance (F-score) of polarity detection by a maximum of 3.7% and 9.3% on two datasets, over the systems that use only traditional features. We perform feature significance analysis, and experiment on a held-out dataset, showing that cognitive features indeed empower sentiment analyzers to handle complex constructs.", "phrases": ["cognitive feature", "sentiment analysis", "sarcasm detection"], "overall_score": 2.6149288114328075, "scores": [2.607048946278731, 0.8738915641071944, 0.8973181616210923], "rank_score": 1.4594195573356725} -{"id": "romanov-shivade-2018-lessons", "title": "Lessons from Natural Language Inference in the Clinical Domain", "abstract": "State of the art models using deep neural networks have become very good in learning an accurate mapping from inputs to outputs. However, they still lack generalization capabilities in conditions that differ from the ones encountered during training. This is even more challenging in specialized, and knowledge intensive domains, where training data is limited. To address this gap, we introduce MedNLI - a dataset annotated by doctors, performing a natural language inference task (NLI), grounded in the medical history of patients. We present strategies to: 1) leverage transfer learning using datasets from the open domain, (e.g. SNLI) and 2) incorporate domain knowledge from external data and lexical sources (e.g. medical terminologies). Our results demonstrate performance gains using both strategies.", "phrases": ["natural language inference", "clinical domain", "mednli", "history", "open domain model"], "overall_score": 4.296359785720276, "scores": [2.4113227291476984, 1.8157697161947743, 1.7173673588216745, 0.8302880245159213, 0.5209710096443722], "rank_score": 1.459143767664888} -{"id": "schoenmackers-etal-2010-learning", "title": "Learning First-Order Horn Clauses from Web Text", "abstract": "Even the entire Web corpus does not explicitly answer all questions, yet inference can uncover many implicit answers. But where do inference rules come from? \n \nThis paper investigates the problem of learning inference rules from Web text in an unsupervised, domain-independent manner. The Sherlock system, described herein, is a first-order learner that acquires over 30,000 Horn clauses from Web text. Sherlock embodies several innovations, including a novel rule scoring function based on Statistical Relevance (Salmon et al., 1971) which is effective on ambiguous, noisy and incomplete Web extractions. Our experiments show that inference over the learned rules discovers three times as many facts (at precision 0.8) as the TextRunner system which merely extracts facts explicitly stated in Web text.", "phrases": ["horn clause", "web text", "inference rule", "entailment rule"], "overall_score": 4.574994766564602, "scores": [1.842400491957679, 1.7519280502610042, 1.7068152983588512, 0.5352499799849844], "rank_score": 1.4590984551406296} -{"id": "tandon-etal-2019-wiqa", "title": "WIQA: A dataset for \u201cWhat if...\u201d reasoning over procedural text", "abstract": "We introduce WIQA, the first large-scale dataset of \u201cWhat if...\u201d questions over procedural text. WIQA contains a collection of paragraphs, each annotated with multiple influence graphs describing how one change affects another, and a large (40k) collection of \u201cWhat if...?\u201d multiple-choice questions derived from these. For example, given a paragraph about beach erosion, would stormy weather hasten or decelerate erosion? WIQA contains three kinds of questions: perturbations to steps mentioned in the paragraph; external (out-of-paragraph) perturbations requiring commonsense knowledge; and irrelevant (no effect) perturbations. We find that state-of-the-art models achieve 73.8% accuracy, well below the human performance of 96.3%. We analyze the challenges, in particular tracking chains of influences, and present the dataset as an open challenge to the community.", "phrases": ["reasoning", "procedural text", "large-scale dataset", "wiqa"], "overall_score": 3.4981907678202764, "scores": [2.9855723835126664, 1.7761511669330152, 0.5520844354374503, 0.521627460366108], "rank_score": 1.4588588615623101} -{"id": "ebrahimi-etal-2018-adversarial", "title": "On Adversarial Examples for Character-Level Neural Machine Translation", "abstract": "Evaluating on adversarial examples has become a standard procedure to measure robustness of deep learning models. Due to the difficulty of creating white-box adversarial examples for discrete text input, most analyses of the robustness of NLP models have been done through black-box adversarial examples. We investigate adversarial examples for character-level neural machine translation (NMT), and contrast black-box adversaries with a novel white-box adversary, which employs differentiable string-edit operations to rank adversarial changes. We propose two novel types of attacks which aim to remove or change a word in a translation, rather than simply break the NMT. We demonstrate that white-box adversarial examples are significantly stronger than their black-box counterparts in different attack scenarios, which show more serious vulnerabilities than previously known. In addition, after performing adversarial training, which takes only 3 times longer than regular training, we can improve the model's robustness significantly.", "phrases": ["adversarial example", "neural machine translation", "perturbation", "character-level nmt", "token level"], "overall_score": 4.635266638130049, "scores": [2.7373750766360145, 1.8020503133898904, 1.3473366468729937, 0.8483213522981283, 0.5575348066731196], "rank_score": 1.4585236391740293} -{"id": "miura-etal-2016-selecting", "title": "Selecting Syntactic, Non-redundant Segments in Active Learning for Machine Translation", "abstract": "Active learning is a framework that makes it possible to efficiently train statistical models by selecting informative examples from a pool of unlabeled data. Previous work has found this framework effective for machine translation (MT), making it possible to train better translation models with less effort, particularly when annotators translate short phrases instead of full sentences. However, previous methods for phrase-based active learning in MT fail to consider whether the selected units are coherent and easy for human translators to translate, and also have problems with selecting redundant phrases with similar content. In this paper, we tackle these problems by proposing two new methods for selecting more syntactically coherent and less redundant segments in active learning for MT. Experiments using both simulation and extensive manual translation by professional translators find the proposed method effective, achieving both greater gain of BLEU score for the same number of translated words, and allowing translators to be more confident in their translations1.", "phrases": ["segment", "active learning", "machine translation"], "overall_score": 1.6022145940056294, "scores": [1.8727978127094336, 1.7030788382568478, 0.7993190678474674], "rank_score": 1.4583985729379163} -{"id": "alansary-nagi-2014-international", "title": "The International Corpus of Arabic: Compilation, Analysis and Evaluation", "abstract": "This paper focuses on a project for building the first International Corpus of Arabic (ICA). It is planned to contain 100 million analyzed tokens with an interface which allows users to interact with the corpus data in a number of ways [ICA website]. ICA is a representative corpus of Arabic that has been initiated in 2006, it is intended to cover the Modern Standard Arabic (MSA) language as being used all over the Arab world. ICA has been analyzed by Bibliotheca Alexandrina Morphological Analysis Enhancer (BAMAE). BAMAE is based on Buckwalter Arabic Morphological Analyzer (BAMA). Precision and Recall are the evaluation measures used to evaluate the BAMAE system. At this point, Precision measurement ranges from 95%-92% while recall measurement was 92%-89%. This depends on the number of qualifiers retrieved for every word. The percentages are expected to rise by implementing the improvements while working on larger amounts of data.", "phrases": ["international corpus", "arabic", "ica"], "overall_score": 1.6020185688073572, "scores": [1.997693694745074, 1.8261371533180508, 0.5508295812760002], "rank_score": 1.4582201431130415} -{"id": "liu-etal-2013-additive", "title": "Additive Neural Networks for Statistical Machine Translation", "abstract": "Most statistical machine translation (SMT) systems are modeled using a loglinear framework. Although the log-linear model achieves success in SMT, it still suffers from some limitations: (1) the features are required to be linear with respect to the model itself; (2) features cannot be further interpreted to reach their potential. A neural network is a reasonable method to address these pitfalls. However, modeling SMT with a neural network is not trivial, especially when taking the decoding efficiency into consideration. In this paper, we propose a variant of a neural network, i.e. additive neural networks, for SMT to go beyond the log-linear translation model. In addition, word embedding is employed as the input to the neural network, which encodes each word as a feature vector. Our model outperforms the log-linear translation models with/without embedding features on Chinese-to-English and Japanese-to-English translation tasks.", "phrases": ["statistical machine translation", "log-linear model", "additive neural network"], "overall_score": 3.0319837233671407, "scores": [2.515475372012522, 1.304183939840879, 0.5545685699044279], "rank_score": 1.458075960585943} -{"id": "vlachos-riedel-2014-fact", "title": "Fact Checking: Task definition and dataset construction", "abstract": "In this paper we introduce the task of fact checking, i.e. the assessment of the truthfulness of a claim. The task is commonly performed manually by journalists verifying the claims made by public figures. Furthermore, ordinary citizens need to assess the truthfulness of the increasing volume of statements they consume. Thus, developing fact checking systems is likely to be of use to various members of society. We first define the task and detail the construction of a publicly available dataset using statements fact-checked by journalists available online. Then, we discuss baseline approaches for the task and the challenges that need to be addressed. Finally, we discuss how fact checking relates to mainstream natural language processing tasks and can stimulate further research.", "phrases": ["fact checking", "website", "verdict", "news detection", "assignment"], "overall_score": 5.414565254186477, "scores": [2.4965504686128877, 1.418569302420876, 1.2272483830914882, 1.0810058982939785, 1.0668645062089757], "rank_score": 1.4580477117256414} -{"id": "elming-habash-2009-syntactic", "title": "Syntactic Reordering for English-Arabic Phrase-Based Machine Translation", "abstract": "We investigate syntactic reordering within an English to Arabic translation task. We extend a pre-translation syntactic reordering approach developed on a close language pair (English-Danish) to the distant language pair, English-Arabic. We achieve significant improvements in translation quality over related approaches, measured by manual as well as automatic evaluations. These results prove the viability of this approach for distant languages.", "phrases": ["arabic", "syntactic reordering", "rich language"], "overall_score": 3.356831693883794, "scores": [1.9512259464899857, 1.302558881521131, 1.1197756159839154], "rank_score": 1.4578534813316775} -{"id": "ning-etal-2018-multi", "title": "A Multi-Axis Annotation Scheme for Event Temporal Relations", "abstract": "Existing temporal relation (TempRel) annotation schemes often have low inter-annotator agreements (IAA) even between experts, suggesting that the current annotation task needs a better definition. This paper proposes a new multi-axis modeling to better capture the temporal structure of events. In addition, we identify that event end-points are a major source of confusion in annotation, so we also propose to annotate TempRels based on start-points only. A pilot expert annotation effort using the proposed scheme shows significant improvement in IAA from the conventional 60's to 80's (Cohen's Kappa). This better-defined annotation scheme further enables the use of crowdsourcing to alleviate the labor intensity for each annotator. We hope that this work can foster more interesting studies towards event understanding.", "phrases": ["annotation scheme", "temporal relation", "start-point"], "overall_score": 4.438454420885551, "scores": [2.6200245137265807, 1.2199393616934688, 0.5335835478708669], "rank_score": 1.457849141096972} -{"id": "elliott-keller-2013-image", "title": "Image Description using Visual Dependency Representations", "abstract": "Describing the main event of an image involves identifying the objects depicted and predicting the relationships between them. Previous approaches have represented images as unstructured bags of regions, which makes it difficult to accurately predict meaningful relationships between regions. In this paper, we introduce visual dependency representations to capture the relationships between the objects in an image, and hypothesize that this representation can improve image description. We test this hypothesis using a new data set of region-annotated images, associated with visual dependency representations and gold-standard descriptions. We describe two template-based description generation models that operate over visual dependency representations. In an image description task, we find that these models outperform approaches that rely on object proximity or corpus information to generate descriptions on both automatic measures and on human judgements.", "phrases": ["visual dependency representation", "image description", "spatial relation", "caption", "preposition"], "overall_score": 4.506173669362357, "scores": [2.9482335745938477, 1.2749680197906341, 1.1219795122203224, 1.105929872546703, 0.8379731039003899], "rank_score": 1.4578168166103793} -{"id": "wachsmuth-etal-2017-computational", "title": "Computational Argumentation Quality Assessment in Natural Language", "abstract": "Research on computational argumentation faces the problem of how to automatically assess the quality of an argument or argumentation. While different quality dimensions have been approached in natural language processing, a common understanding of argumentation quality is still missing. This paper presents the first holistic work on computational argumentation quality in natural language. We comprehensively survey the diverse existing theories and approaches to assess logical, rhetorical, and dialectical quality dimensions, and we derive a systematic taxonomy from these. In addition, we provide a corpus with 320 arguments, annotated for all 15 dimensions in the taxonomy. Our results establish a common ground for research on computational argumentation quality assessment.", "phrases": ["argumentation", "dialectical quality dimension", "taxonomy", "reasonableness"], "overall_score": 5.223826837314775, "scores": [2.146093784084056, 1.7479158129415782, 1.0525230679036544, 0.8844138734086511], "rank_score": 1.4577366345844847} -{"id": "ghosh-etal-2018-sarcasm", "title": "Sarcasm Analysis Using Conversation Context", "abstract": "Computational models for sarcasm detection have often relied on the content of utterances in isolation. However, the speaker's sarcastic intent is not always apparent without additional context. Focusing on social media discussions, we investigate three issues: (1) does modeling conversation context help in sarcasm detection? (2) can we identify what part of conversation context triggered the sarcastic reply? and (3) given a sarcastic post that contains multiple sentences, can we identify the specific sentence that is sarcastic? To address the first issue, we investigate several types of Long Short-Term Memory (LSTM) networks that can model both the conversation context and the current turn. We show that LSTM networks with sentence-level attention on context and current turn, as well as the conditional LSTM network, outperform the LSTM model that reads only the current turn. As conversation context, we consider the prior turn, the succeeding turn, or both. Our computational models are tested on two types of social media platforms: Twitter and discussion forums. We discuss several differences between these data sets, ranging from their size to the nature of the gold-label annotations. To address the latter two issues, we present a qualitative analysis of the attention weights produced by the LSTM models (with attention) and discuss the results compared with human performance on the two tasks.", "phrases": ["conversation context", "sentence-level attention", "sarcasm analysis"], "overall_score": 3.622013393831716, "scores": [2.894507032762483, 0.9583419932031254, 0.5199672254707116], "rank_score": 1.45760541714544} -{"id": "dalvi-etal-2018-tracking", "title": "Tracking State Changes in Procedural Text: a Challenge Dataset and Models for Process Paragraph Comprehension", "abstract": "We present a new dataset and models for comprehending paragraphs about processes (e.g., photosynthesis), an important genre of text describing a dynamic world. The new dataset, ProPara, is the first to contain natural (rather than machine-generated) text about a changing world along with a full annotation of entity states (location and existence) during those changes (81k datapoints). The end-task, tracking the location and existence of entities through the text, is challenging because the causal effects of actions are often implicit and need to be inferred. We find that previous models that have worked well on synthetic data achieve only mediocre performance on ProPara, and introduce two new neural models that exploit alternative mechanisms for state prediction, in particular using LSTM input encoding and span prediction. The new models improve accuracy by up to 19%. We are releasing the ProPara dataset and our models to the community.", "phrases": ["state change", "procedural text", "paragraph", "scientific process"], "overall_score": 4.691820117685558, "scores": [2.0988267986815434, 1.2969464024003479, 1.219922498686182, 1.21468779180718], "rank_score": 1.4575958728938132} -{"id": "muller-etal-2019-enhancing", "title": "Enhancing BERT for Lexical Normalization", "abstract": "Language model-based pre-trained representations have become ubiquitous in natural language processing. They have been shown to significantly improve the performance of neural models on a great variety of tasks. However, it remains unclear how useful those general models can be in handling non-canonical text. In this article, focusing on User Generated Content (UGC), we study the ability of BERT to perform lexical normalisation. Our contribution is simple: by framing lexical normalisation as a token prediction task, by enhancing its architecture and by carefully fine-tuning it, we show that BERT can be a competitive lexical normalisation model without the need of any UGC resources aside from 3,000 training sentences. To the best of our knowledge, it is the first work done in adapting and analysing the ability of this model to handle noisy UGC data.", "phrases": ["bert", "lexical normalization", "ugc"], "overall_score": 3.029899363178531, "scores": [1.9843588542849624, 1.8465099051515996, 0.5403520262137335], "rank_score": 1.4570735952167653} -{"id": "jia-etal-2018-modeling", "title": "Modeling discourse cohesion for discourse parsing via memory network", "abstract": "Identifying long-span dependencies between discourse units is crucial to improve discourse parsing performance. Most existing approaches design sophisticated features or exploit various off-the-shelf tools, but achieve little success. In this paper, we propose a new transition-based discourse parser that makes use of memory networks to take discourse cohesion into account. The automatically captured discourse cohesion benefits discourse parsing, especially for long span scenarios. Experiments on the RST discourse treebank show that our method outperforms traditional featured based methods, and the memory based discourse cohesion can improve the overall parsing performance significantly.", "phrases": ["discourse cohesion", "memory network", "transition-based discourse parser"], "overall_score": 1.600566140080847, "scores": [1.990680137323504, 1.7670175514840631, 0.6129965677293104], "rank_score": 1.4568980855122924} -{"id": "kim-etal-2014-composite", "title": "A Composite Kernel Approach for Dialog Topic Tracking with Structured Domain Knowledge from Wikipedia", "abstract": "Dialog topic tracking aims at analyzing and maintaining topic transitions in ongoing dialogs. This paper proposes a composite kernel approach for dialog topic tracking to utilize various types of domain knowledge obtained from Wikipedia. Two kernels are defined based on history sequences and context trees constructed based on the extracted features. The experimental results show that our composite kernel approach can significantly improve the performances of topic tracking in mixed-initiative human-human dialogs.", "phrases": ["dialog topic tracking", "domain knowledge", "wikipedia"], "overall_score": 2.019351680418205, "scores": [1.7514369322969146, 1.6774602917428705, 0.9410657586854243], "rank_score": 1.4566543275750696} -{"id": "geng-etal-2019-induction", "title": "Induction Networks for Few-Shot Text Classification", "abstract": "Text classification tends to struggle when data is deficient or when it needs to adapt to unseen classes. In such challenging scenarios, recent studies have used meta-learning to simulate the few-shot task, in which new queries are compared to a small support set at the sample-wise level. However, this sample-wise comparison may be severely disturbed by the various expressions in the same class. Therefore, we should be able to learn a general representation of each class in the support set and then compare it to new queries. In this paper, we propose a novel Induction Network to learn such a generalized class-wise representation, by innovatively leveraging the dynamic routing algorithm in meta-learning. In this way, we find the model is able to induce and generalize better. We evaluate the proposed model on a well-studied sentiment classification dataset (English) and a real-world dialogue intent classification dataset (Chinese). Experiment results show that on both datasets, the proposed model significantly outperforms the existing state-of-the-art approaches, proving the effectiveness of class-wise generalization in few-shot text classification.", "phrases": ["text classification", "induction networks", "few-shot learning", "new class", "nlp community"], "overall_score": 4.434628987942446, "scores": [3.9669132866857684, 0.8854630728755145, 1.2898807590324628, 0.5778719714615715, 0.5628341310311586], "rank_score": 1.4565926442172952} -{"id": "galley-etal-2006-scalable", "title": "Scalable Inference and Training of Context-Rich Syntactic Translation Models", "abstract": "Statistical MT has made great progress in the last few years, but current translation models are weak on re-ordering and target language fluency. Syntactic approaches seek to remedy these problems. In this paper, we take the framework for acquiring multi-level syntactic translation rules of (Galley et al., 2004) from aligned tree-string pairs, and present two main extensions of their approach: first, instead of merely computing a single derivation that minimally explains a sentence pair, we construct a large number of derivations that include contextually richer rules, and account for multiple interpretations of unaligned words. Second, we propose probability estimates and a training procedure for weighting these rules. We contrast different approaches on real examples, show that our estimates based on multiple derivations favor phrasal re-orderings that are linguistically better motivated, and establish that our larger rules provide a 3.63 BLEU point increase over minimal rules.", "phrases": ["translation model", "smt system", "syntax-based model", "string-to-tree model", "artificial constituent node"], "overall_score": 6.011433207036094, "scores": [3.237760056238049, 1.4688186048491025, 1.1214321738628703, 0.8887892760919226, 0.5660172704550905], "rank_score": 1.456563476299407} -{"id": "hu-etal-2018-shot", "title": "Few-Shot Charge Prediction with Discriminative Legal Attributes", "abstract": "Automatic charge prediction aims to predict the final charges according to the fact descriptions in criminal cases and plays a crucial role in legal assistant systems. Existing works on charge prediction perform adequately on those high-frequency charges but are not yet capable of predicting few-shot charges with limited cases. Moreover, these exist many confusing charge pairs, whose fact descriptions are fairly similar to each other. To address these issues, we introduce several discriminative attributes of charges as the internal mapping between fact descriptions and charges. These attributes provide additional information for few-shot charges, as well as effective signals for distinguishing confusing charges. More specifically, we propose an attribute-attentive charge prediction model to infer the attributes and charges simultaneously. Experimental results on real-work datasets demonstrate that our proposed model achieves significant and consistent improvements than other state-of-the-art baselines. Specifically, our model outperforms other baselines by more than 50% in the few-shot scenario. Our codes and datasets can be obtained from .", "phrases": ["charge prediction", "legal attribute", "discriminative attribute"], "overall_score": 4.037632222500994, "scores": [2.9909837969620603, 0.8493314182442725, 0.5284887730454173], "rank_score": 1.4562679960839169} -{"id": "zhang-etal-2018-learning-summarize", "title": "Learning to Summarize Radiology Findings", "abstract": "The Impression section of a radiology report summarizes crucial radiology findings in natural language and plays a central role in communicating these findings to physicians. However, the process of generating impressions by summarizing findings is time-consuming for radiologists and prone to errors. We propose to automate the generation of radiology impressions with neural sequence-to-sequence learning. We further propose a customized neural model for this task which learns to encode the study background information and use this information to guide the decoding process. On a large dataset of radiology reports collected from actual hospital studies, our model outperforms existing non-neural and neural baselines under the ROUGE metrics. In a blind experiment, a board-certified radiologist indicated that 67% of sampled system summaries are at least as good as the corresponding human-written summaries, suggesting significant clinical validity. To our knowledge our work represents the first attempt in this direction.", "phrases": ["summarization", "radiology finding", "reference", "medical knowledge"], "overall_score": 4.208489574432117, "scores": [3.1304912531753617, 1.281395194143585, 0.8420315063084405, 0.5702319162777624], "rank_score": 1.4560374674762877} -{"id": "rei-etal-2020-comet", "title": "COMET: A Neural Framework for MT Evaluation", "abstract": "We present COMET, a neural framework for training multilingual machine translation evaluation models which obtains new state-of-the-art levels of correlation with human judgements. Our framework leverages recent breakthroughs in cross-lingual pretrained language modeling resulting in highly multilingual and adaptable MT evaluation models that exploit information from both the source input and a target-language reference translation in order to more accurately predict MT quality. To showcase our framework, we train three models with different types of human judgements: Direct Assessments, Human-mediated Translation Edit Rate and Multidimensional Quality Metric. Our models achieve new state-of-the-art performance on the WMT 2019 Metrics shared task and demonstrate robustness to high-performing systems.", "phrases": ["neural framework", "reference", "comet", "evaluation metric", "bertscore"], "overall_score": 4.0364974516766035, "scores": [3.4241160924357805, 1.8702513333244317, 0.8603077737768838, 0.5799791084563458, 0.5446392621250257], "rank_score": 1.4558587140236936} -{"id": "takase-kiyono-2021-rethinking", "title": "Rethinking Perturbations in Encoder-Decoders for Fast Training", "abstract": "We often use perturbations to regularize neural models. For neural encoder-decoders, previous studies applied the scheduled sampling (Bengio et al., 2015) and adversarial perturbations (Sato et al., 2019) as perturbations but these methods require considerable computational time. Thus, this study addresses the question of whether these approaches are efficient enough for training time. We compare several perturbations in sequence-to-sequence problems with respect to computational time. Experimental results show that the simple techniques such as word dropout (Gal and Ghahramani, 2016) and random replacement of input tokens achieve comparable (or better) scores to the recently proposed perturbations, even though these simple methods are faster.", "phrases": ["perturbation", "encoder-decoder", "word dropout"], "overall_score": 2.82908827702458, "scores": [2.582070970254314, 0.9201787766520318, 0.8593415810355316], "rank_score": 1.4538637759806259} -{"id": "dyer-etal-2015-transition", "title": "Transition-Based Dependency Parsing with Stack Long Short-Term Memory", "abstract": "This work was sponsored in part by the U. S. Army Research Laboratory and the U. S. Army Research Office/nunder contract/grant number W911NF-10-1-0533, and in part by NSF CAREER grant IIS-1054319./nMiguel Ballesteros is supported by the European Commission under the contract numbers FP7-ICT-610411 (project MULTISENSOR) and H2020-RIA-645012 (project KRISTINA).", "phrases": ["dependency parsing", "stack lstm", "pos tag", "short-term memory network", "transition-based method"], "overall_score": 6.294169103556009, "scores": [3.455622312799307, 1.8103976738401182, 0.8632637800615413, 0.5713879867727973, 0.5661911644124061], "rank_score": 1.453372583577234} -{"id": "nothman-etal-2012-event", "title": "Event Linking: Grounding Event Reference in a News Archive", "abstract": "Interpreting news requires identifying its constituent events. Events are complex linguistically and ontologically, so disambiguating their reference is challenging. We introduce event linking, which canonically labels an event reference with the article where it was first reported. This implicitly relaxes coreference to co-reporting, and will practically enable augmenting news archives with semantic hyperlinks. We annotate and analyse a corpus of 150 documents, extracting 501 links to a news archive with reasonable inter-annotator agreement.", "phrases": ["event reference", "coreference", "news archive", "inter-annotator agreement"], "overall_score": 3.0221386513522353, "scores": [2.3908858258058414, 2.3006829110615854, 0.5636651367468735, 0.5581320532986727], "rank_score": 1.4533414817282433} -{"id": "johnson-etal-2007-improving", "title": "Improving Translation Quality by Discarding Most of the Phrasetable", "abstract": "It is possible to reduce the bulk of phrasetables for Statistical Machine Translation using a technique based on the significance testing of phrase pair co-occurrence in the parallel corpus. The savings can be quite substantial (up to 90%) and cause no reduction in BLEU score. In some cases, an improvement in BLEU is obtained at the same time although the effect is less pronounced if state-of-the-art phrasetable smoothing is employed.", "phrases": ["translation quality", "significance testing", "bleu score", "phrase table"], "overall_score": 4.941689526516076, "scores": [2.143762837187262, 1.9023016916609163, 1.2440166549418206, 0.5216244857011217], "rank_score": 1.4529264173727803} -{"id": "boyd-graber-etal-2007-topic", "title": "A Topic Model for Word Sense Disambiguation", "abstract": "We develop latent Dirichlet allocation with WORDNET (LDAWN), an unsupervised probabilistic topic model that includes word sense as a hidden variable. We develop a probabilistic posterior inference algorithm for simultaneously disambiguating a corpus and learning the domains in which to consider each word. Using the WORDNET hierarchy, we embed the construction of Abney and Light (1999) in the topic model and show that automatically learned domains improve WSD accuracy compared to alternative contexts.", "phrases": ["topic model", "word sense disambiguation", "ldawn"], "overall_score": 2.8268949710423126, "scores": [2.8448575148711117, 0.9872908750986757, 0.5260615290462999], "rank_score": 1.452736639672029} -{"id": "tsvetkov-etal-2014-metaphor", "title": "Metaphor Detection with Cross-Lingual Model Transfer", "abstract": "We show that it is possible to reliably discriminate whether a syntactic construction is meant literally or metaphorically using lexical semantic features of the words that participate in the construction. Our model is constructed using English resources, and we obtain state-of-the-art performance relative to previous work in this language. Using a model transfer approach by pivoting through a bilingual dictionary, we show our model can identify metaphoric expressions in other languages. We provide results on three new test sets in Spanish, Farsi, and Russian. The results support the hypothesis that metaphors are conceptual, rather than lexical, in nature.", "phrases": ["cross-lingual model transfer", "metaphor detection", "abstractness", "sentiment analysis", "language processing application"], "overall_score": 5.711641588513218, "scores": [2.794047046524472, 0.946516646355987, 1.74798859856777, 1.2274644173953204, 0.5473287724204221], "rank_score": 1.4526690962527944} -{"id": "bojar-etal-2016-findings", "title": "Findings of the 2016 Conference on Machine Translation", "abstract": "This paper presents the results of the WMT16 shared tasks, which included five machine translation (MT) tasks (standard news, IT-domain, biomedical, multimodal, pronoun), three evaluation tasks (metrics, tuning, run-time estimation of MT quality), and an automatic post-editing task and bilingual document alignment task. This year, 102 MT systems from 24 institutions (plus 36 anonymized online systems) were submitted to the 12 translation directions in the news translation task. The IT-domain task received 31 submissions from 12 institutions in 7 directions and the Biomedical task received 15 submissions systems from 5 institutions. Evaluation was both automatic and manual (relative ranking and 100-point scale assessments). The quality estimation task had three subtasks, with a total of 14 teams, submitting 39 entries. The automatic post-editing task had a total of 6 teams, submitting 11 entries.", "phrases": ["conference", "machine translation", "automatic post-editing task", "online system", "state-of-the-art result"], "overall_score": 4.02711827009499, "scores": [3.83059062727043, 1.6974294866612938, 0.5955706910368892, 0.5933891047878782, 0.5453995369177365], "rank_score": 1.4524758893348457} -{"id": "kikuchi-etal-2016-controlling", "title": "Controlling Output Length in Neural Encoder-Decoders", "abstract": "Neural encoder-decoder models have shown great success in many sequence generation tasks. However, previous work has not investigated situations in which we would like to control the length of encoder-decoder outputs. This capability is crucial for applications such as text summarization, in which we have to generate concise summaries with a desired length. In this paper, we propose methods for controlling the output sequence length for neural encoder-decoder models: two decoding-based methods and two learning-based methods. Results show that our learning-based methods have the capability to control length without degrading summary quality in a summarization task.", "phrases": ["output length", "encoder-decoder model", "sentence compression"], "overall_score": 5.163962894361962, "scores": [2.5085019265584716, 1.3059602706467868, 0.5428857797452717], "rank_score": 1.4524493256501767} -{"id": "eger-mehler-2016-linearity", "title": "On the Linearity of Semantic Change: Investigating Meaning Variation via Dynamic Graph Models", "abstract": "We consider two graph models of semantic change. The first is a time-series model that relates embedding vectors from one time period to embedding vectors of previous time periods. In the second, we construct one graph for each word: nodes in this graph correspond to time points and edge weights to the similarity of the word\u2019s meaning across two time points. We apply our two models to corpora across three different languages. We find that semantic change is linear in two senses. Firstly, today\u2019s embedding vectors (= meaning) of words can be derived as linear combinations of embedding vectors of their neighbors in previous time periods. Secondly, self-similarity of words decays linearly in time. We consider both findings as new laws/hypotheses of semantic change.", "phrases": ["semantic change", "time period", "self-similarity"], "overall_score": 3.3439643257315432, "scores": [3.2537727992210432, 0.5660097098482508, 0.5370132539703177], "rank_score": 1.4522652543465373} -{"id": "begum-etal-2008-dependency", "title": "Dependency Annotation Scheme for Indian Languages", "abstract": "The paper introduces a dependency annotation effort which aims to fully annotate a million word Hindi corpus. It is the first attempt of its kind to develop a large scale tree-bank for an Indian language. In this paper we provide the motivation for following the Paninian framework as the annotation scheme and argue that the Paninian framework is better suited to model the various linguistic phenomena manifest in Indian languages. We present the basic annotation scheme. We also show how the scheme handles some phenomenon such as complex verbs, ellipses, etc. Empirical results of some experiments done on the currently annotated sentences are also reported.", "phrases": ["indian language", "paninian framework", "dependency annotation scheme"], "overall_score": 4.350185459281738, "scores": [2.326220079449372, 0.9853367061193352, 1.044825956992819], "rank_score": 1.4521275808538423} -{"id": "petrovic-etal-2012-using", "title": "Using paraphrases for improving first story detection in news and Twitter", "abstract": "First story detection (FSD) involves identifying first stories about events from a continuous stream of documents. A major problem in this task is the high degree of lexical variation in documents which makes it very difficult to detect stories that talk about the same event but expressed using different words. We suggest using paraphrases to alleviate this problem, making this the first work to use paraphrases for FSD. We show a novel way of integrating paraphrases with locality sensitive hashing (LSH) in order to obtain an efficient FSD system that can scale to very large datasets. Our system achieves state-of-the-art results on the first story detection task, beating both the best supervised and unsupervised systems. To test our approach on large data, we construct a corpus of events for Twitter, consisting of 50 million documents, and show that paraphrasing is also beneficial in this domain.", "phrases": ["paraphrase", "story detection", "twitter"], "overall_score": 2.336910152385157, "scores": [1.846221127788715, 1.6988705466461573, 0.8109200753769001], "rank_score": 1.452003916603924} -{"id": "beltagy-etal-2019-scibert", "title": "SciBERT: A Pretrained Language Model for Scientific Text", "abstract": "Obtaining large-scale annotated data for NLP tasks in the scientific domain is challenging and expensive. We release SciBERT, a pretrained language model based on BERT (Devlin et. al., 2018) to address the lack of high-quality, large-scale labeled scientific data. SciBERT leverages unsupervised pretraining on a large multi-domain corpus of scientific publications to improve performance on downstream scientific NLP tasks. We evaluate on a suite of tasks including sequence tagging, sentence classification and dependency parsing, with datasets from a variety of scientific domains. We demonstrate statistically significant improvements over BERT and achieve new state-of-the-art results on several of these tasks. The code and pretrained models are available at .", "phrases": ["language model", "scientific text", "downstream task", "biomedical domain", "pre-trained model"], "overall_score": 6.265573504208681, "scores": [1.8034555947070987, 2.329728531541373, 1.348628892831898, 0.9036179609348524, 0.8706092677444408], "rank_score": 1.4512080495519324} -{"id": "lindemann-etal-2019-compositional", "title": "Compositional Semantic Parsing across Graphbanks", "abstract": "Most semantic parsers that map sentences to graph-based meaning representations are hand-designed for specific graphbanks. We present a compositional neural semantic parser which achieves, for the first time, competitive accuracies across a diverse range of graphbanks. Incorporating BERT embeddings and multi-task learning improves the accuracy further, setting new states of the art on DM, PAS, PSD, AMR 2015 and EDS.", "phrases": ["graphbank", "pas", "compositional structure"], "overall_score": 3.4795989431675967, "scores": [2.9074316783021388, 0.8407908113663598, 0.6050939157308903], "rank_score": 1.451105468466463} -{"id": "yang-etal-2006-kernel", "title": "Kernel-Based Pronoun Resolution with Structured Syntactic Knowledge", "abstract": "Syntactic knowledge is important for pronoun resolution. Traditionally, the syntactic information for pronoun resolution is represented in terms of features that have to be selected and defined heuristically. In the paper, we propose a kernel-based method that can automatically mine the syntactic information from the parse trees for pronoun resolution. Specifically, we utilize the parse trees directly as a structured feature and apply kernel functions to this feature, as well as other normal features, to learn the resolution classifier. In this way, our approach avoids the efforts of decoding the parse trees into the set of flat syntactic features. The experimental results show that our approach can bring significant performance improvement and is reliably effective for the pronoun resolution task.", "phrases": ["pronoun resolution", "syntactic knowledge", "convolution tree kernel"], "overall_score": 3.017435224517428, "scores": [2.9268454110746642, 0.8863873260755869, 0.5400060974647188], "rank_score": 1.4510796115383233} -{"id": "karpukhin-etal-2019-training", "title": "Training on Synthetic Noise Improves Robustness to Natural Noise in Machine Translation", "abstract": "Contemporary machine translation systems achieve greater coverage by applying subword models such as BPE and character-level CNNs, but these methods are highly sensitive to orthographical variations such as spelling mistakes. We show how training on a mild amount of random synthetic noise can dramatically improve robustness to these variations, without diminishing performance on clean text. We focus on translation performance on natural typos, and show that robustness to such noise can be achieved using a balanced diet of simple synthetic noises at training time, without access to the natural noise data or distribution.", "phrases": ["synthetic noise", "robustness", "machine translation", "training sample", "adversarial example"], "overall_score": 4.416479302308694, "scores": [2.793826364998011, 2.4487828872454873, 0.9174620399090537, 0.5574183402759535, 0.5356664743978372], "rank_score": 1.4506312213652686} -{"id": "ruder-plank-2018-strong", "title": "Strong Baselines for Neural Semi-Supervised Learning under Domain Shift", "abstract": "Novel neural models have been proposed in recent years for learning under domain shift. Most models, however, only evaluate on a single task, on proprietary datasets, or compare to weak baselines, which makes comparison of models difficult. In this paper, we re-evaluate classic general-purpose bootstrapping approaches in the context of neural networks under domain shifts vs. recent neural approaches and propose a novel multi-task tri-training method that reduces the time and space complexity of classic tri-training. Extensive experiments on two benchmarks for part-of-speech tagging and sentiment analysis are negative: while our novel method establishes a new state-of-the-art for sentiment analysis, it does not fare consistently the best. More importantly, we arrive at the somewhat surprising conclusion that classic tri-training, with some additions, outperforms the state-of-the-art for NLP. Hence classic approaches constitute an important and strong baseline.", "phrases": ["domain shift", "tri-training", "strong baseline"], "overall_score": 4.1089828500314045, "scores": [2.2113932184680776, 1.56526199162326, 0.5742167692302538], "rank_score": 1.4502906597738638} -{"id": "krishnamurthy-mitchell-2012-weakly", "title": "Weakly Supervised Training of Semantic Parsers", "abstract": "We present a method for training a semantic parser using only a knowledge base and an unlabeled text corpus, without any individually annotated sentences. Our key observation is that multiple forms of weak supervision can be combined to train an accurate semantic parser: semantic supervision from a knowledge base, and syntactic supervision from dependency-parsed sentences. We apply our approach to train a semantic parser that uses 77 relations from Freebase in its knowledge representation. This semantic parser extracts instances of binary relations with state-of-the-art accuracy, while simultaneously recovering much richer semantic structures, such as conjunctions of multiple relations with partially shared arguments. We demonstrate recovery of this richer structure by extracting logical forms from natural language queries against Freebase. On this task, the trained semantic parser achieves 80% precision and 56% recall, despite never having seen an annotated logical form.", "phrases": ["semantic parser", "knowledge base", "freebase", "distant supervision"], "overall_score": 4.27014543977818, "scores": [3.2914935655586377, 1.0613279110034997, 0.8718246209995745, 0.5763169653400604], "rank_score": 1.4502407657254432} -{"id": "kuang-etal-2018-modeling", "title": "Modeling Coherence for Neural Machine Translation with Dynamic and Topic Caches", "abstract": "Sentences in a well-formed text are connected to each other via various links to form the cohesive structure of the text. Current neural machine translation (NMT) systems translate a text in a conventional sentence-by-sentence fashion, ignoring such cross-sentence links and dependencies. This may lead to generate an incoherent target text for a coherent source text. In order to handle this issue, we propose a cache-based approach to modeling coherence for neural machine translation by capturing contextual information either from recently translated sentences or the entire document. Particularly, we explore two types of caches: a dynamic cache, which stores words from the best translation hypotheses of preceding sentences, and a topic cache, which maintains a set of target-side topical words that are semantically related to the document to be translated. On this basis, we build a new layer to score target words in these two caches with a cache-based neural model. Here the estimated probabilities from the cache-based neural model are combined with NMT probabilities into the final word prediction probabilities via a gating mechanism. Finally, the proposed cache-based neural model is trained jointly with NMT system in an end-to-end manner. Experiments and analysis presented in this paper demonstrate that the proposed cache-based model achieves substantial improvements over several state-of-the-art SMT and NMT baselines.", "phrases": ["coherence", "neural machine translation", "topic cache", "translation quality"], "overall_score": 4.545682296581712, "scores": [2.4132524970958875, 1.6930929039518172, 1.149914012698199, 0.5427400211609502], "rank_score": 1.4497498587267135} -{"id": "sokolov-etal-2017-shared", "title": "A Shared Task on Bandit Learning for Machine Translation", "abstract": "We introduce and describe the results of a novel shared task on bandit learning for machine translation. The task was organized jointly by Amazon and Heidelberg University for the first time at the Second Conference on Machine Translation (WMT 2017). The goal of the task is to encourage research on learning machine translation from weak user feedback instead of human references or post-edits. On each of a sequence of rounds, a machine translation system is required to propose a translation for an input, and receives a real-valued estimate of the quality of the proposed translation for learning. This paper describes the shared task's learning and evaluation setup, using services hosted on Amazon Web Services (AWS), the data and evaluation metrics, and the results of various machine translation architectures and learning protocols.", "phrases": ["bandit learning", "machine translation", "feedback", "post-edit"], "overall_score": 2.59676915500471, "scores": [2.723503218522629, 2.0270638586455343, 0.5249154154160103, 0.5216553478503531], "rank_score": 1.4492844601086317} -{"id": "liu-etal-2022-makes", "title": "What Makes Good In-Context Examples for GPT-3?", "abstract": "GPT-3 has attracted lots of attention due to its superior performance across a wide range of NLP tasks, especially with its in-context learning abilities. Despite its success, we found that the empirical results of GPT-3 depend heavily on the choice of in-context examples. In this work, we investigate whether there are more effective strategies for judiciously selecting in-context examples (relative to random sampling) that better leverage GPT-3's in-context learning capabilities. Inspired by the recent success of leveraging a retrieval module to augment neural networks, we propose to retrieve examples that are semantically-similar to a test query sample to formulate its corresponding prompt. Intuitively, the examples selected with such a strategy may serve as more informative inputs to unleash GPT-3's power of text generation. We evaluate the proposed approach on several natural language understanding and generation benchmarks, where the retrieval-based prompt selection approach consistently outperforms the random selection baseline. Moreover, it is observed that the sentence encoders fine-tuned on task-related datasets yield even more helpful retrieval results. Notably, significant gains are observed on tasks such as table-to-text generation (44.3% on the ToTTo dataset) and open-domain question answering (45.5% on the NQ dataset).", "phrases": ["in-context example", "gpt-3", "few-shot learning"], "overall_score": 3.7173261687904016, "scores": [2.76442349240356, 1.0025907378363845, 0.5808215170545076], "rank_score": 1.449278582431484} -{"id": "prasad-etal-2014-reflections", "title": "Reflections on the Penn Discourse TreeBank, Comparable Corpora, and Complementary Annotation", "abstract": "The Penn Discourse Treebank (PDTB) was released to the public in 2008. It remains the largest manually annotated corpus of discourse relations to date. Its focus on discourse relations that are either lexically-grounded in explicit discourse connectives or associated with sentential adjacency has not only facilitated its use in language technology and psycholinguistics but also has spawned the annotation of comparable corpora in other languages and genres.Given this situation, this paper has four aims: (1) to provide a comprehensive introduction to the PDTB for those who are unfamiliar with it; (2) to correct some wrong (or perhaps inadvertent) assumptions about the PDTB and its annotation that may have weakened previous results or the performance of decision procedures induced from the data; (3) to explain variations seen in the annotation of comparable resources in other languages and genres, which should allow developers of future comparable resources to recognize whether the variations are relevant to them; and (4) to enumerate and explain relationships between PDTB annotation and complementary annotation of other linguistic phenomena. The paper draws on work done by ourselves and others since the corpus was released.", "phrases": ["penn discourse treebank", "comparable corpora", "annotator", "pdtb annotation"], "overall_score": 4.017513175556428, "scores": [2.9782692497847205, 1.7029233830506147, 0.5615466381844438, 0.5533070640615505], "rank_score": 1.4490115837703326} -{"id": "el-kholy-habash-2010-orthographic", "title": "Orthographic and Morphological Processing for English-Arabic Statistical Machine Translation", "abstract": "Much of the work on Statistical Machine Translation (SMT) from morphologically rich languages has shown that morphological tokenization and orthographic normalization help improve SMT quality because of the sparsity reduction they contribute. In this paper, we study the effect of these processes on SMT when translating into a morphologically rich language, namely Arabic. We explore a space of tokenization schemes and normalization options. We only evaluate on detokenized and orthographically correct (enriched) output. Our results show that the best performing tokenization scheme is that of the Penn Arabic Treebank. Additionally, training on orthographically normalized (reduced) text then jointly enriching and detokenizing the output outperforms training on enriched text.", "phrases": ["statistical machine translation", "rich language", "tokenization", "arabic"], "overall_score": 4.186033175596868, "scores": [2.4408241710750365, 1.441247685013708, 1.38808041437693, 0.5229200762469656], "rank_score": 1.4482680866781599} -{"id": "collins-koo-2005-discriminative", "title": "Discriminative Reranking for Natural Language Parsing", "abstract": "This article considers approaches which rerank the output of an existing probabilistic parser. The base parser produces a set of candidate parses for each input sentence, with associated probabilities that define an initial ranking of these parses. A second model then attempts to improve upon this initial ranking, using additional features of the tree as evidence. The strength of our approach is that it allows a tree to be represented as an arbitrary set of features, without concerns about how these features interact or overlap and without the need to define a derivation or a generative model which takes these features into account. We introduce a new method for the reranking task, based on the boosting approach to ranking problems described in Freund et al. (1998). We apply the boosting method to parsing the Wall Street Journal treebank. The method combined the log-likelihood under a baseline model (that of Collins [1999]) with evidence from an additional 500,000 features over parse trees that were not included in the original model. The new model achieved 89.75 F-measure, a 13 relative decrease in F-measure error over the baseline model's score of 88.2. The article also introduces a new algorithm for the boosting approach which takes advantage of the sparsity of the feature space in the parsing data. Experiments show significant efficiency gains for the new algorithm over the obvious implementation of the boosting approach. We argue that the method is an appealing alternative-in terms of both simplicity and efficiency-to work on feature selection methods within log-linear (maximum-entropy) models. Although the experiments in this article are on natural language parsing (NLP), the approach should be applicable to many other NLP problems which are naturally framed as ranking tasks, for example, speech recognition, machine translation, or natural language generation.", "phrases": ["generative model", "collins", "feature space", "discriminative reranking", "syntactic parsing"], "overall_score": 6.416864302102624, "scores": [2.715072875789202, 1.2904692104843858, 1.2707330057757655, 1.0514262428195935, 0.9134740436501777], "rank_score": 1.4482350757038247} -{"id": "valenzuela-escarcega-etal-2016-odins", "title": "Odin's Runes: A Rule Language for Information Extraction", "abstract": "Odin is an information extraction framework that applies cascades of finite state automata over both surface text and syntactic dependency graphs. Support for syntactic patterns allow us to concisely define relations that are otherwise difficult to express in languages such as Common Pattern Specification Language (CPSL), which are currently limited to shallow linguistic features. The interaction of lexical and syntactic automata provides robustness and flexibility when writing extraction rules. This paper describes Odin's declarative language for writing these cascaded automata.", "phrases": ["rule language", "information extraction framework", "odin"], "overall_score": 3.1789619440333547, "scores": [2.254709133182268, 1.4676388507441682, 0.6180758081931657], "rank_score": 1.446807930706534} -{"id": "mccrae-doyle-2019-adapting", "title": "Adapting Term Recognition to an Under-Resourced Language: the Case of Irish", "abstract": "Automatic Term Recognition (ATR) is an important method for the summarization and analysis of large corpora, and normally requires a significant amount of linguistic input, in particular the use of part-ofspeech taggers. For an under-resourced language such as Irish, the resources necessary for this may be scarce or entirely absent. We evaluate two methods for the automatic extraction of terms, based on the small part-of-speech-tagged corpora that are available for Irish and on a large terminology list, and show that both methods can produce viable term extractors. We evaluate this with a newly constructed corpus that is the first available corpus for term extraction in Irish. Our results shine some light on the challenge of adapting natural language processing systems to under-resourced scenarios.", "phrases": ["term recognition", "under-resourced language", "irish"], "overall_score": 1.5894419559238617, "scores": [1.7854813605323936, 1.6664593803120547, 0.8883765093407019], "rank_score": 1.4467724167283835} -{"id": "zhao-xing-2006-bitam", "title": "BiTAM: Bilingual Topic AdMixture Models for Word Alignment", "abstract": "We propose a novel bilingual topical admixture (BiTAM) formalism for word alignment in statistical machine translation. Under this formalism, the parallel sentence-pairs within a document-pair are assumed to constitute a mixture of hidden topics; each word-pair follows a topic-specific bilingual translation model. Three BiTAM models are proposed to capture topic sharing at different levels of linguistic granularity (i.e., at the sentence or word levels). These models enable word-alignment process to leverage topical contents of document-pairs. Efficient variational approximation algorithms are designed for inference and parameter estimation. With the inferred latent topics, BiTAM models facilitate coherent pairing of bilingual linguistic entities that share common topical aspects. Our preliminary experiments show that the proposed models improve word alignment accuracy, and lead to better translation quality.", "phrases": ["word alignment", "mixture", "hidden topic", "different level", "topical content"], "overall_score": 4.180514326511289, "scores": [4.00026347056721, 0.9598911740137673, 0.8697033842890788, 0.8606040161328226, 0.5413314346604241], "rank_score": 1.4463586959326609} -{"id": "sagae-lavie-2006-parser", "title": "Parser Combination by Reparsing", "abstract": "We present a novel parser combination scheme that works by reparsing input sentences once they have already been parsed by several different parsers. We apply this idea to dependency and constituent parsing, generating results that surpass state-of-the-art accuracy levels for individual parsers.", "phrases": ["reparsing", "parser combination", "dependency graph"], "overall_score": 5.099832632440478, "scores": [3.1974472753145546, 0.5776908061009566, 0.5634704614494465], "rank_score": 1.4462028476216526} -{"id": "tomanek-olsson-2009-web", "title": "A Web Survey on the Use of Active Learning to Support Annotation of Text Data", "abstract": "As supervised machine learning methods for addressing tasks in natural language processing (NLP) prove increasingly viable, the focus of attention is naturally shifted towards the creation of training data. The manual annotation of corpora is a tedious and time consuming process. To obtain high-quality annotated data constitutes a bottleneck in machine learning for NLP today. Active learning is one way of easing the burden of annotation. This paper presents a first probe into the NLP research community concerning the nature of the annotation projects undertaken in general, and the use of active learning as annotation support in particular.", "phrases": ["web survey", "active learning", "annotation project"], "overall_score": 2.8124169054439108, "scores": [2.98395696926685, 0.7928778646252649, 0.5590543233887544], "rank_score": 1.4452963857602896} -{"id": "ng-etal-2013-conll", "title": "The CoNLL-2013 Shared Task on Grammatical Error Correction", "abstract": "The CoNLL-2013 shared task was devoted to grammatical error correction. In this paper, we give the task definition, present the data sets, and describe the evaluation metric and scorer used in the shared task. We also give an overview of the various approaches adopted by the participating teams, and present the evaluation results.", "phrases": ["conll-2013", "grammatical error correction", "learner", "content word", "punctuation"], "overall_score": 5.401058190231598, "scores": [4.423676447816872, 0.8266052979323202, 0.8688235628270111, 0.5703387976433064, 0.5357239019579865], "rank_score": 1.4450336016354994} -{"id": "yang-etal-2018-modeling", "title": "Modeling Localness for Self-Attention Networks", "abstract": "Self-attention networks have proven to be of profound value for its strength of capturing global dependencies. In this work, we propose to model localness for self-attention networks, which enhances the ability of capturing useful local context. We cast localness modeling as a learnable Gaussian bias, which indicates the central and scope of the local region to be paid more attention. The bias is then incorporated into the original attention distribution to form a revised distribution. To maintain the strength of capturing long distance dependencies while enhance the ability of capturing short-range dependencies, we only apply localness modeling to lower layers of self-attention networks. Quantitative and qualitative analyses on Chinese-English and English-German translation tasks demonstrate the effectiveness and universality of the proposed approach.", "phrases": ["localness", "self-attention network", "gaussian bias", "scope", "translation task"], "overall_score": 4.176182349555162, "scores": [2.0695122974825133, 2.0248670342395827, 1.7217382010393825, 0.8860304372992482, 0.5221517037551258], "rank_score": 1.4448599347631705} -{"id": "kushman-etal-2014-learning", "title": "Learning to Automatically Solve Algebra Word Problems", "abstract": "We present an approach for automatically learning to solve algebra word problems. Our algorithm reasons across sentence boundaries to construct and solve a system of linear equations, while simultaneously recovering an alignment of the variables and numbers in these equations to the problem text. The learning algorithm uses varied supervision, including either full equations or just the final answers. We evaluate performance on a newly gathered corpus of algebra word problems, demonstrating that the system can correctly answer almost 70% of the questions in the dataset. This is, to our knowledge, the first learning result for this task.", "phrases": ["algebra word problem", "operation", "hand-crafted feature", "learning method", "mwp solver"], "overall_score": 5.7360469426796605, "scores": [4.4252938306183855, 0.8349973874147737, 0.8270416692908941, 0.5939415661794731, 0.5424348701111208], "rank_score": 1.4447418647229295} -{"id": "grave-etal-2018-learning", "title": "Learning Word Vectors for 157 Languages", "abstract": "Distributed word representations, or word vectors, have recently been applied to many tasks in natural language processing, leading to state-of-the-art performance. A key ingredient to the successful application of these representations is to train them on very large corpora, and use these pre-trained models in downstream tasks. In this paper, we describe how we trained such high quality word representations for 157 languages. We used two sources of data to train these models: the free online encyclopedia Wikipedia and data from the common crawl project. We also introduce three new word analogy datasets to evaluate these word vectors, for French, Hindi and Polish. Finally, we evaluate our pre-trained word vectors on 10 languages for which evaluation datasets exists, showing very strong performance compared to previous models.", "phrases": ["word vector", "pre-trained model", "wikipedia", "hindi", "fasttext"], "overall_score": 5.291528494908154, "scores": [3.2621215624930566, 1.363501896238576, 1.1139791507400656, 0.9321756192755226, 0.5500580688600085], "rank_score": 1.4443672595214458} -{"id": "liu-etal-2010-tesla", "title": "TESLA: Translation Evaluation of Sentences with Linear-Programming-Based Analysis", "abstract": "We present TESLA-M and TESLA, two novel automatic machine translation evaluation metrics with state-of-the-art performances. TESLA-M builds on the success of METEOR and MaxSim, but employs a more expressive linear programming framework. TESLA further exploits parallel texts to build a shallow semantic representation. We evaluate both on the WMT 2009 shared evaluation task and show that they outperform all participating systems in most tasks.", "phrases": ["translation evaluation", "sentences", "tesla"], "overall_score": 3.3233066717521624, "scores": [2.5164034488475453, 0.9395786121089097, 0.8738991866862215], "rank_score": 1.4432937492142255} -{"id": "agirre-etal-2016-semeval", "title": "SemEval-2016 Task 1: Semantic Textual Similarity, Monolingual and Cross-Lingual Evaluation", "abstract": "Comunicacio presentada al 10th International Workshop on Semantic Evaluation (SemEval-2016), celebrat els dies 16 i 17 de juny de 2016 a San Diego, California.", "phrases": ["semantic textual similarity", "semeval", "cross-lingual sub-task"], "overall_score": 4.586356021254398, "scores": [2.5519638738862604, 1.171106930762459, 0.6063297650882622], "rank_score": 1.4431335232456606} -{"id": "hamoui-etal-2020-flodusta", "title": "FloDusTA: Saudi Tweets Dataset for Flood, Dust Storm, and Traffic Accident Events", "abstract": "The rise of social media platforms makes it a valuable information source of recent events and users' perspective towards them. Twitter has been one of the most important communication platforms in recent years. Event detection, one of the information extraction aspects, involves identifying specified types of events in the text. Detecting events from tweets can help to predict real-world events precisely. A serious challenge that faces Arabic event detection is the lack of Arabic datasets that can be exploited in detecting events. This paper will describe FloDusTA, which is a dataset of tweets that we have built for the purpose of developing an event detection system. The dataset contains tweets written in both Modern Standard Arabic and Saudi dialect. The process of building the dataset starting from tweets collection to annotation by human annotators will be present. The tweets are labeled with four labels: flood, dust storm, traffic accident, and non-event. The dataset was tested for classification and the result was strongly encouraging.", "phrases": ["flood", "dust storm", "traffic accident event"], "overall_score": 1.5851849334718442, "scores": [1.7733868931015384, 1.716539530381405, 0.8387661002288346], "rank_score": 1.4428975079039261} -{"id": "zeman-etal-2017-conll", "title": "CoNLL 2017 Shared Task: Multilingual Parsing from Raw Text to Universal Dependencies", "abstract": "The Conference on Computational Natural Language Learning (CoNLL) features a shared task, in which participants train and test their learning systems on the same data sets. In 2017, the task was devoted to learning dependency parsers for a large number of languages, in a real-world setting without any gold-standard annotation on input. All test sets followed a unified annotation scheme, namely that of Universal Dependencies. In this paper, we define the task and evaluation methodology, describe how the data sets were prepared, report and analyze the main results, and provide a brief categorization of the different approaches of the participating systems.", "phrases": ["shared task", "raw text", "universal dependency"], "overall_score": 4.954750050676697, "scores": [1.762875958632254, 1.7238632273388652, 0.8418296953036355], "rank_score": 1.4428562937582516} -{"id": "brooke-etal-2009-cross", "title": "Cross-Linguistic Sentiment Analysis: From English to Spanish", "abstract": "We explore the adaptation of English resources and techniques for text sentiment analysis to a new language, Spanish. Our main focus is the modification of an existing English semantic orientation calculator and the building of dictionaries; however we also compare alternate approaches, including machine translation and Support Vector Machine classification. The results indicate that, although languageindependent methods provide a decent baseline performance, there is also a significant cost to automation, and thus the best path to long-term improvement is through the inclusion of language-specific knowledge and resources.", "phrases": ["sentiment analysis", "spanish", "machine translation"], "overall_score": 3.459272398242482, "scores": [2.2400473399838545, 1.515858678706745, 0.5719799037902502], "rank_score": 1.44262864082695} -{"id": "srivastava-singh-2020-phinc", "title": "PHINC: A Parallel Hinglish Social Media Code-Mixed Corpus for Machine Translation", "abstract": "Code-mixing is the phenomenon of using more than one language in a sentence. In the multilingual communities, it is a very frequently observed pattern of communication on social media platforms. Flexibility to use multiple languages in one text message might help to communicate efficiently with the target audience. But, the noisy user-generated code-mixed text adds to the challenge of processing and understanding natural language to a much larger extent. Machine translation from monolingual source to the target language is a well-studied research problem. Here, we demonstrate that widely popular and sophisticated translation systems such as Google Translate fail at times to translate code-mixed text effectively. To address this challenge, we present a parallel corpus of the 13,738 code-mixed Hindi-English sentences and their corresponding human translation in English. In addition, we also propose a translation pipeline build on top of Google Translate. The evaluation of the proposed pipeline on PHINC demonstrates an increase in the performance of the underlying system. With minimal effort, we can extend the dataset and the proposed approach to other code-mixing language pairs.", "phrases": ["machine translation", "code-mixed language", "phinc"], "overall_score": 2.5843706784856697, "scores": [1.9742490327873425, 1.7833106191311177, 0.5695345639124669], "rank_score": 1.4423647386103091} -{"id": "petrov-etal-2006-learning", "title": "Learning Accurate, Compact, and Interpretable Tree Annotation", "abstract": "We present an automatic approach to tree annotation in which basic nonterminal symbols are alternately split and merged to maximize the likelihood of a training treebank. Starting with a simple X-bar grammar, we learn a new grammar whose nonterminals are subsymbols of the original nonterminals. In contrast with previous work, we are able to split various terminals to different degrees, as appropriate to the actual complexity in the data. Our grammars automatically learn the kinds of linguistic distinctions exhibited in previous work on manual tree annotation. On the other hand, our grammars are much more compact and substantially more accurate than previous work on automatic annotation. Despite its simplicity, our best grammar achieves an F1 of 90.2% on the Penn Treebank, higher than fully lexicalized systems.", "phrases": ["berkeley parser", "latent annotation", "pcfg", "variable approach", "subcategorie"], "overall_score": 6.373578654300102, "scores": [2.2144141378283266, 1.6016804426655649, 1.2678892129550734, 1.0757251242773114, 1.0521133904313948], "rank_score": 1.4423644616315343} -{"id": "zribi-etal-2014-conventional", "title": "A Conventional Orthography for Tunisian Arabic", "abstract": "Tunisian Arabic is a dialect of the Arabic language spoken in Tunisia. Tunisian Arabic is an under-resourced language. It has neither a standard orthography nor large collections of written text and dictionaries. Actually, there is no strict separation between Modern Standard Arabic, the official language of the government, media and education, and Tunisian Arabic; the two exist on a continuum dominated by mixed forms. In this paper, we present a conventional orthography for Tunisian Arabic, following a previous effort on developing a conventional orthography for Dialectal Arabic (or CODA) demonstrated for Egyptian Arabic. We explain the design principles of CODA and provide a detailed description of its guidelines as applied to Tunisian Arabic.", "phrases": ["conventional orthography", "tunisian arabic", "dialect"], "overall_score": 3.3208999577427245, "scores": [2.2149502567548525, 0.8998259948724026, 1.211969328173967], "rank_score": 1.4422485266004073} -{"id": "guo-etal-2018-implicit", "title": "Implicit Discourse Relation Recognition using Neural Tensor Network with Interactive Attention and Sparse Learning", "abstract": "Implicit discourse relation recognition aims to understand and annotate the latent relations between two discourse arguments, such as temporal, comparison, etc. Most previous methods encode two discourse arguments separately, the ones considering pair specific clues ignore the bidirectional interactions between two arguments and the sparsity of pair patterns. In this paper, we propose a novel neural Tensor network framework with Interactive Attention and Sparse Learning (TIASL) for implicit discourse relation recognition. (1) We mine the most correlated word pairs from two discourse arguments to model pair specific clues, and integrate them as interactive attention into argument representations produced by the bidirectional long short-term memory network. Meanwhile, (2) the neural tensor network with sparse constraint is proposed to explore the deeper and the more important pair patterns so as to fully recognize discourse relations. The experimental results on PDTB show that our proposed TIASL framework is effective.", "phrases": ["neural tensor network", "discourse argument", "word pair"], "overall_score": 3.457577756841236, "scores": [2.912819790403865, 0.8906564411089959, 0.5222895298994842], "rank_score": 1.4419219204707818} -{"id": "nguyen-etal-2020-bertweet", "title": "BERTweet: A pre-trained language model for English Tweets", "abstract": "We present BERTweet, the first public large-scale pre-trained language model for English Tweets. Our BERTweet, having the same architecture as BERT-base (Devlin et al., 2019), is trained using the RoBERTa pre-training procedure (Liu et al., 2019). Experiments show that BERTweet outperforms strong baselines RoBERTa-base and XLM-R-base (Conneau et al., 2020), producing better performance results than the previous state-of-the-art models on three Tweet NLP tasks: Part-of-speech tagging, Named-entity recognition and text classification. We release BERTweet under the MIT License to facilitate future research and applications on Tweet data. Our BERTweet is available at ", "phrases": ["language model", "english tweets", "pre-training procedure", "strong baseline", "part-of-speech tagging"], "overall_score": 4.637564745767356, "scores": [2.687335315606105, 2.5200677589736435, 0.8689874652622266, 0.5810587465871532, 0.5462531831391793], "rank_score": 1.4407404939136614} -{"id": "murray-etal-2010-generating", "title": "Generating and Validating Abstracts of Meeting Conversations: a User Study", "abstract": "In this paper we present a complete system for automatically generating natural language abstracts of meeting conversations. This system is comprised of components relating to interpretation of the meeting documents according to a meeting ontology, transformation or content selection from that source representation to a summary representation, and generation of new summary text. In a formative user study, we compare this approach to gold-standard human abstracts and extracts to gauge the usefulness of the different summary types for browsing meeting conversations. We find that our automatically generated summaries are ranked significantly higher than human-selected extracts on coherence and usability criteria. More generally, users demonstrate a strong preference for abstract-style summaries over extracts.", "phrases": ["abstract", "meeting conversation", "user study"], "overall_score": 3.1640191482973115, "scores": [1.7424028500886848, 1.723347865976381, 0.8542707978029075], "rank_score": 1.4400071712893245} -{"id": "jin-etal-2019-pubmedqa", "title": "PubMedQA: A Dataset for Biomedical Research Question Answering", "abstract": "We introduce PubMedQA, a novel biomedical question answering (QA) dataset collected from PubMed abstracts. The task of PubMedQA is to answer research questions with yes/no/maybe (e.g.: Do preoperative statins reduce atrial fibrillation after coronary artery bypass grafting?) using the corresponding abstracts. PubMedQA has 1k expert-annotated, 61.2k unlabeled and 211.3k artificially generated QA instances. Each PubMedQA instance is composed of (1) a question which is either an existing research article title or derived from one, (2) a context which is the corresponding abstract without its conclusion, (3) a long answer, which is the conclusion of the abstract and, presumably, answers the research question, and (4) a yes/no/maybe answer which summarizes the conclusion. PubMedQA is the first QA dataset where reasoning over biomedical research texts, especially their quantitative contents, is required to answer the questions. Our best performing model, multi-phase fine-tuning of BioBERT with long answer bag-of-word statistics as additional supervision, achieves 68.1% accuracy, compared to single human performance of 78.0% accuracy and majority-baseline of 55.2% accuracy, leaving much room for improvement. PubMedQA is publicly available at .", "phrases": ["biomedical question", "abstract", "pubmedqa"], "overall_score": 3.314649814837631, "scores": [2.8637221333629546, 0.8263787255217684, 0.6285015131921322], "rank_score": 1.4395341240256183} -{"id": "schubert-tong-2003-extracting", "title": "Extracting and evaluating general world knowledge from the Brown Corpus", "abstract": "We have been developing techniques for extracting general world knowledge from miscellaneous texts by a process of approximate interpretation and abstraction, focusing initially on the Brown corpus. We apply interpretive rules to clausal patterns and patterns of modification, and concurrently abstract general \"possibilistic\" propositions from the resulting formulas. Two examples are \"A person may believe a proposition\", and \"Children may live with relatives\". Our methods currently yield over 117,000 such propositions (of variable quality) for the Brown corpus (more than 2 per sentence). We report here on our efforts to evaluate these results with a judging scheme aimed at determining how many of these propositions pass muster as \"reasonable general claims\" about the world in the opinion of human judges. We find that nearly 60% of the extracted propositions are favorably judged according to our scheme by any given judge. The percentage unanimously judged to be reasonable claims by multiple judges is lower, but still sufficiently high to suggest that our techniques may be of some use in tackling the long-standing \"knowledge acquisition bottleneck\" in AI.", "phrases": ["world knowledge", "brown corpus", "proposition"], "overall_score": 2.7996841999934268, "scores": [1.9197984566967567, 1.8553760904985062, 0.5410846614109479], "rank_score": 1.4387530695354036} -{"id": "ding-etal-2019-event-representation", "title": "Event Representation Learning Enhanced with External Commonsense Knowledge", "abstract": "Prior work has proposed effective methods to learn event representations that can capture syntactic and semantic information over text corpus, demonstrating their effectiveness for downstream tasks such as script event prediction. On the other hand, events extracted from raw texts lacks of commonsense knowledge, such as the intents and emotions of the event participants, which are useful for distinguishing event pairs when there are only subtle differences in their surface realizations. To address this issue, this paper proposes to leverage external commonsense knowledge about the intent and sentiment of the event. Experiments on three event-related tasks, i.e., event similarity, script event prediction and stock market prediction, show that our model obtains much better event embeddings for the tasks, achieving 78% improvements on hard similarity task, yielding more precise inferences on subsequent events under given contexts, and better accuracies in predicting the volatilities of the stock market.", "phrases": ["external commonsense knowledge", "event pair", "event representation learning"], "overall_score": 3.5740811869551505, "scores": [2.2596247670718848, 1.5156351218094914, 0.5396883454034896], "rank_score": 1.438316078094955} -{"id": "navigli-velardi-2010-learning", "title": "Learning Word-Class Lattices for Definition and Hypernym Extraction", "abstract": "Definition extraction is the task of automatically identifying definitional sentences within texts. The task has proven useful in many research areas including ontology learning, relation extraction and question answering. However, current approaches -- mostly focused on lexicosyntactic patterns -- suffer from both low recall and precision, as definitional sentences occur in highly variable syntactic structures. In this paper, we propose Word-Class Lattices (WCLs), a generalization of word lattices that we use to model textual definitions. Lattices are learned from a dataset of definitions from Wikipedia. Our method is applied to the task of definition and hypernym extraction and compares favorably to other pattern generalization methods proposed in the literature.", "phrases": ["definition", "hypernym extraction", "wikipedia", "supervised approach"], "overall_score": 4.683810377895679, "scores": [3.7546434540209397, 0.9222940733181223, 0.5481397817770635, 0.5252868353913646], "rank_score": 1.4375910361268724} -{"id": "asher-etal-2016-discourse", "title": "Discourse Structure and Dialogue Acts in Multiparty Dialogue: the STAC Corpus", "abstract": "This paper describes the STAC resource, a corpus of multi-party chats annotated for discourse structure in the style of SDRT (Asher and Lascarides, 2003; Lascarides and Asher, 2009). The main goal of the STAC project is to study the discourse structure of multi-party dialogues in order to understand the linguistic strategies adopted by interlocutors to achieve their conversational goals, especially when these goals are opposed. The STAC corpus is not only a rich source of data on strategic conversation, but also the first corpus that we are aware of that provides full discourse structures for multi-party dialogues. It has other remarkable features that make it an interesting resource for other topics: interleaved threads, creative language, and interactions between linguistic and extra-linguistic contexts.", "phrases": ["multiparty dialogue", "stac corpus", "discourse structure"], "overall_score": 3.57207899225188, "scores": [2.4739916557144017, 0.9246505657368399, 0.9138887855666253], "rank_score": 1.4375103356726224} -{"id": "diab-etal-2009-committed", "title": "Committed Belief Annotation and Tagging", "abstract": "We present a preliminary pilot study of belief annotation and automatic tagging. Our objective is to explore semantic meaning beyond surface propositions. We aim to model people's cognitive states, namely their beliefs as expressed through linguistic means. We model the strength of their beliefs and their (the human) degree of commitment to their utterance. We explore only the perspective of the author of a text. We classify predicates into one of three possibilities: committed belief, non committed belief, or not applicable. We proceed to manually annotate data to that end, then we build a supervised framework to test the feasibility of automatically predicting these belief states. Even though the data is relatively small, we show that automatic prediction of a belief class is a feasible task. Using syntactic features, we are able to obtain significant improvements over a simple baseline of 23% F-measure absolute points. The best performing automatic tagging condition is where we use POS tag, word type feature AlphaNumeric, and shallow syntactic chunk information CHUNK. Our best overall performance is 53.97% F-measure.", "phrases": ["belief", "tagging", "writer", "surface word"], "overall_score": 3.793373976501553, "scores": [2.650732849902565, 1.7209895161254758, 0.8445313622280516, 0.5333356178072294], "rank_score": 1.4373973365158306} -{"id": "gonen-goldberg-2019-lipstick", "title": "Lipstick on a Pig: Debiasing Methods Cover up Systematic Gender Biases in Word Embeddings But do not Remove Them", "abstract": "Word embeddings are widely used in NLP for a vast range of tasks. It was shown that word embeddings derived from text corpora reflect gender biases in society. This phenomenon is pervasive and consistent across different word embedding models, causing serious concern. Several recent works tackle this problem, and propose methods for significantly reducing this gender bias in word embeddings, demonstrating convincing results. However, we argue that this removal is superficial. While the bias is indeed substantially reduced according to the provided bias definition, the actual effect is mostly hiding the bias, not removing it. The gender bias information is still reflected in the distances between \u201cgender-neutralized\u201d words in the debiased embeddings, and can be recovered from them. We present a series of experiments to support this claim, for two debiasing methods. We conclude that existing bias removal techniques are insufficient, and should not be trusted for providing gender-neutral modeling.", "phrases": ["debiasing method", "word embedding", "gender bias", "bias removal technique", "implicit bias"], "overall_score": 5.810118624628998, "scores": [1.852025153712617, 2.111127031628758, 1.8370682605155706, 0.825241296316937, 0.5598523185860812], "rank_score": 1.4370628121519928} -{"id": "dubossarsky-etal-2019-time", "title": "Time-Out: Temporal Referencing for Robust Modeling of Lexical Semantic Change", "abstract": "State-of-the-art models of lexical semantic change detection suffer from noise stemming from vector space alignment. We have empirically tested the Temporal Referencing method for lexical semantic change and show that, by avoiding alignment, it is less affected by this noise. We show that, trained on a diachronic corpus, the skip-gram with negative sampling architecture with temporal referencing outperforms alignment models on a synthetic task as well as a manual testset. We introduce a principled way to simulate lexical semantic change and systematically control for possible biases.", "phrases": ["temporal referencing", "lexical semantic change", "change detection"], "overall_score": 3.9836993804627303, "scores": [1.9613032942187323, 1.8208597398192043, 0.5282844714015767], "rank_score": 1.4368158351465043} -{"id": "lin-etal-2019-moel", "title": "MoEL: Mixture of Empathetic Listeners", "abstract": "Previous research on empathetic dialogue systems has mostly focused on generating responses given certain emotions. However, being empathetic not only requires the ability of generating emotional responses, but more importantly, requires the understanding of user emotions and replying appropriately. In this paper, we propose a novel end-to-end approach for modeling empathy in dialogue systems: Mixture of Empathetic Listeners (MoEL). Our model first captures the user emotions and outputs an emotion distribution. Based on this, MoEL will softly combine the output states of the appropriate Listener(s), which are each optimized to react to certain emotions, and generate an empathetic response. Human evaluations on EMPATHETIC-DIALOGUES dataset confirm that MoEL outperforms multitask training baseline in terms of empathy, relevance, and fluency. Furthermore, the case study on generated responses of different Listeners shows high interpretability of our model.", "phrases": ["mixture", "empathetic listeners", "emotion"], "overall_score": 3.155042032036652, "scores": [2.253550005392664, 0.9454160822832978, 1.1087984411483494], "rank_score": 1.4359215096081037} -{"id": "gu-etal-2017-learning", "title": "Learning to Translate in Real-time with Neural Machine Translation", "abstract": "Translating in real-time, a.k.a.simultaneous translation, outputs translation words before the input sentence ends, which is a challenging problem for conventional machine translation methods. We propose a neural machine translation (NMT) framework for simultaneous translation in which an agent learns to make decisions on when to translate from the interaction with a pre-trained NMT environment. To trade off quality and delay, we extensively explore various targets for delay and design a method for beam-search applicable in the simultaneous MT setting. Experiments against state-of-the-art baselines on two language pairs demonstrate the efficacy of the proposed framework both quantitatively and qualitatively.", "phrases": ["real-time", "neural machine translation", "read", "latency", "source sentence"], "overall_score": 5.14348716487063, "scores": [1.9808609211966828, 0.8069948190111105, 1.6624906233030492, 1.4646412788893977, 1.261599468210376], "rank_score": 1.435317422122123} -{"id": "patwa-etal-2020-semeval", "title": "SemEval-2020 Task 9: Overview of Sentiment Analysis of Code-Mixed Tweets", "abstract": "In this paper, we present the results of the SemEval-2020 Task 9 on Sentiment Analysis of Code-Mixed Tweets (SentiMix 2020). We also release and describe our Hinglish (Hindi-English)and Spanglish (Spanish-English) corpora annotated with word-level language identification and sentence-level sentiment labels. These corpora are comprised of 20K and 19K examples, respectively. The sentiment labels are - Positive, Negative, and Neutral. SentiMix attracted 89 submissions in total including 61 teams that participated in the Hinglish contest and 28 submitted systems to the Spanglish competition. The best performance achieved was 75.0% F1 score for Hinglish and 80.6% F1 for Spanglish. We observe that BERT-like models and ensemble methods are the most common and successful approaches among the participants.", "phrases": ["sentiment analysis", "code-mixed tweets", "semeval-2020 task", "code-mixed text", "twitter"], "overall_score": 4.728669123043824, "scores": [3.3575211007014736, 1.88511819440377, 0.8767306959199153, 0.5323109696638744, 0.5220192485336591], "rank_score": 1.4347400418445386} -{"id": "futrell-etal-2015-quantifying", "title": "Quantifying Word Order Freedom in Dependency Corpora", "abstract": "Using recently available dependency corpora, we present novel measures of a key quantitative property of language, word order freedom: the extent to which word order in a sentence is free to vary while conveying the same meaning. We discuss two topics. First, we discuss linguistic and statistical issues associated with our measures and with the annotation styles of available corpora. We find that we can measure reliable upper bounds on word order freedom in head direction and the ordering of certain sisters, but that more general measures of word order freedom are not currently feasible. Second, we present results of our measures in 34 languages and demonstrate a correlation between quantitative word order freedom of subjects and objects and the presence of nominative-accusative case marking. To our knowledge this is the first large-scale quantitative test of the hypothesis that languages with more word order freedom have more case marking (Sapir, 1921; Kiparsky, 1997).", "phrases": ["word order freedom", "dependency corpora", "presence"], "overall_score": 3.5650929284762403, "scores": [2.492614741075609, 0.9746923632438865, 0.8367897060541197], "rank_score": 1.4346989367912053} -{"id": "ye-etal-2018-interpretable", "title": "Interpretable Charge Predictions for Criminal Cases: Learning to Generate Court Views from Fact Descriptions", "abstract": "In this paper, we propose to study the problem of court view generation from the fact description in a criminal case. The task aims to improve the interpretability of charge prediction systems and help automatic legal document generation. We formulate this task as a text-to-text natural language generation (NLG) problem. Sequence-to-sequence model has achieved cutting-edge performances in many NLG tasks. However, due to the non-distinctions of fact descriptions, it is hard for Seq2Seq model to generate charge-discriminative court views. In this work, we explore charge labels to tackle this issue. We propose a label-conditioned Seq2Seq model with attention for this problem, to decode court views conditioned on encoded charge labels. Experimental results show the effectiveness of our method.", "phrases": ["charge prediction", "criminal case", "fact description"], "overall_score": 3.440062528403589, "scores": [2.162899899345489, 1.3294393361088865, 0.8115132731528888], "rank_score": 1.434617502869088} -{"id": "kobayashi-etal-2015-effects", "title": "Effects of Game on User Engagement with Spoken Dialogue System", "abstract": "In this study, we examine the effects of using a game for encouraging the use of a spoken dialogue system. As a case study, we developed a word-chain game, called Shiritori in Japanese, and released the game as a module in a Japanese Android/iOS app, Onsei-Assist, which is a Siri-like personal assistant based on a spoken dialogue technology. We analyzed the log after the release and confirmed that the game can increase the number of user utterances. Furthermore, we discovered a positive side effect, in which users who have played the game tend to begin using non-game modules. This suggests that just adding a game module to the system can improve user engagement with an assistant agent.", "phrases": ["game", "user engagement", "spoken dialogue system"], "overall_score": 1.9887558813318713, "scores": [1.7901476360151964, 1.6676003455662276, 0.8460043897229516], "rank_score": 1.4345841237681254} -{"id": "malmasi-etal-2017-report", "title": "A Report on the 2017 Native Language Identification Shared Task", "abstract": "Native Language Identification (NLI) is the task of automatically identifying the native language (L1) of an individual based on their language production in a learned language. It is typically framed as a classification task where the set of L1s is known a priori. Two previous shared tasks on NLI have been organized where the aim was to identify the L1 of learners of English based on essays (2013) and spoken responses (2016) they provided during a standardized assessment of academic English proficiency. The 2017 shared task combines the inputs from the two prior tasks for the first time. There are three tracks: NLI on the essay only, NLI on the spoken response only (based on a transcription of the response and i-vector acoustic features), and NLI using both responses. We believe this makes for a more interesting shared task while building on the methods and results from the previous two shared tasks. In this paper, we report the results of the shared task. A total of 19 teams competed across the three different sub-tasks. The fusion track showed that combining the written and spoken responses provides a large boost in prediction accuracy. Multiple classifier systems (e.g. ensembles and meta-classifiers) were the most effective in all tasks, with most based on traditional classifiers (e.g. SVMs) with lexical/syntactic features.", "phrases": ["native language identification", "nli", "learned language"], "overall_score": 3.302517286599917, "scores": [2.481810923105069, 1.2461750793806408, 0.5748090993956214], "rank_score": 1.434265033960444} -{"id": "barzilay-elhadad-2003-sentence", "title": "Sentence Alignment for Monolingual Comparable Corpora", "abstract": "We address the problem of sentence alignment for monolingual corpora, a phenomenon distinct from alignment in parallel corpora. Aligning large comparable corpora automatically would provide a valuable resource for learning of text-to-text rewriting rules. We incorporate context into the search for an optimal alignment in two complementary ways: learning rules for matching paragraphs using topic structure and further refining the matching through local alignment to find good sentence pairs. Evaluation shows that our alignment method outperforms state-of-the-art systems developed for the same task.", "phrases": ["sentence alignment", "programming", "britannica elementary", "alignment technique"], "overall_score": 4.557481323034342, "scores": [3.4459000836921687, 0.8772055713437206, 0.8701436879144839, 0.5429421331208427], "rank_score": 1.434047869017804} -{"id": "savoldi-etal-2021-gender", "title": "Gender Bias in Machine Translation", "abstract": "AbstractMachine translation (MT) technology has facilitated our daily tasks by providing accessible shortcuts for gathering, processing, and communicating information. However, it can suffer from biases that harm users and society at large. As a relatively new field of inquiry, studies of gender bias in MT still lack cohesion. This advocates for a unified framework to ease future research. To this end, we: i) critically review current conceptualizations of bias in light of theoretical insights from related disciplines, ii) summarize previous analyses aimed at assessing gender bias in MT, iii) discuss the mitigating strategies proposed so far, and iv) point toward potential directions for future work.", "phrases": ["society", "gender bias", "million"], "overall_score": 2.9816919584475476, "scores": [3.225667624383935, 0.540176479664759, 0.5358280978620849], "rank_score": 1.4338907339702596} -{"id": "bergsma-etal-2012-language", "title": "Language Identification for Creating Language-Specific Twitter Collections", "abstract": "Social media services such as Twitter offer an immense volume of real-world linguistic data. We explore the use of Twitter to obtain authentic user-generated text in low-resource languages such as Nepali, Urdu, and Ukrainian. Automatic language identification (LID) can be used to extract language-specific data from Twitter, but it is unclear how well LID performs on short, informal texts in low-resource languages. We address this question by annotating and releasing a large collection of tweets in nine languages, focusing on confusable languages using the Cyrillic, Arabic, and Devanagari scripts. This is the first publicly-available collection of LID-annotated tweets in non-Latin scripts, and should become a standard evaluation set for LID systems. We also advance the state-of-the-art by evaluating new, highly-accurate LID systems, trained both on our new corpus and on standard materials only. Both types of systems achieve a huge performance improvement over the existing state-of-the-art, correctly classifying around 98% of our gold standard tweets. We provide a detailed analysis showing how the accuracy of our systems vary along certain dimensions, such as the tweet-length and the amount of in- and out-of-domain training data.", "phrases": ["language-specific twitter collection", "devanagari script", "language identification", "social medium platform"], "overall_score": 4.219636865817627, "scores": [3.0991097542450916, 1.583416472952534, 0.5258672255819961, 0.5239540615331939], "rank_score": 1.4330868785782038} -{"id": "chen-etal-2009-improving", "title": "Improving Dependency Parsing with Subtrees from Auto-Parsed Data", "abstract": "This paper presents a simple and effective approach to improve dependency parsing by using subtrees from auto-parsed data. First, we use a baseline parser to parse large-scale unannotated data. Then we extract subtrees from dependency parse trees in the auto-parsed data. Finally, we construct new subtree-based features for parsing algorithms. To demonstrate the effectiveness of our proposed approach, we present the experimental results on the English Penn Treebank and the Chinese Penn Treebank. These results show that our approach significantly outperforms baseline systems. And, it achieves the best accuracy for the Chinese data and an accuracy which is competitive with the best known systems for the English data.", "phrases": ["subtree", "auto-parsed data", "large amount"], "overall_score": 2.9788785445361183, "scores": [2.008612229322713, 1.7599279035192568, 0.5290731707708218], "rank_score": 1.4325377678709306} -{"id": "white-etal-2016-universal", "title": "Universal Decompositional Semantics on Universal Dependencies", "abstract": "We present a framework for augmenting data sets from the Universal Dependencies project with Universal Decompositional Semantics . Where the Universal Dependencies project aims to provide a syntactic annotation standard that can be used consistently across many languages as well as a collection of corpora that use that standard, our extension has similar aims for semantic annotation. We describe results from annotating the English Universal Dependencies treebank, dealing with word senses, semantic roles, and event properties", "phrases": ["universal decompositional semantics", "predpatt", "lightweight tool"], "overall_score": 3.7801500344880425, "scores": [2.689524011907295, 1.077274676846348, 0.5303607462471196], "rank_score": 1.4323864783335878} -{"id": "barr-etal-2008-linguistic", "title": "The Linguistic Structure of English Web-Search Queries", "abstract": "Web-search queries are known to be short, but little else is known about their structure. In this paper we investigate the applicability of part-of-speech tagging to typical English-language web search-engine queries and the potential value of these tags for improving search results. We begin by identifying a set of part-of-speech tags suitable for search queries and quantifying their occurrence. We find that proper-nouns constitute 40% of query terms, and proper nouns and nouns together constitute over 70% of query terms. We also show that the majority of queries are noun-phrases, not unstructured collections of terms. We then use a set of queries manually labeled with these tags to train a Brill tagger and evaluate its performance. In addition, we investigate classification of search queries into grammatical classes based on the syntax of part-of-speech tag sequences. We also conduct preliminary investigative experiments into the practical applicability of leveraging query-trained part-of-speech taggers for information-retrieval tasks. In particular, we show that part-of-speech information can be a significant feature in machine-learned search-result relevance. These experiments also include the potential use of the tagger in selecting words for omission or substitution in query reformulation, actions which can improve recall. We conclude that training a part-of-speech tagger on labeled corpora of queries significantly outperforms taggers based on traditional corpora, and leveraging the unique linguistic structure of web-search queries can improve search experience.", "phrases": ["linguistic structure", "query", "occurrence", "pos tagger"], "overall_score": 3.5587288379295487, "scores": [2.3798765906920916, 1.6318775591023442, 0.8688178658586277, 0.8479793377479363], "rank_score": 1.43213783835025} -{"id": "lin-2003-improving", "title": "Improving Summarization Performance by Sentence Compression \u2014 A Pilot Study", "abstract": "In this paper we study the effectiveness of applying sentence compression on an extraction based multi-document summarization system. Our results show that pure syntactic-based compression does not improve system performance. Topic signature-based reranking of compressed sentences does not help much either. However reranking using an oracle showed a significant improvement remains possible.", "phrases": ["summarization performance", "sentence compression", "conciseness", "pipeline approach"], "overall_score": 3.778148059696016, "scores": [3.727911141278421, 0.9507112338473979, 0.5267557001726517, 0.5211334594044492], "rank_score": 1.4316278836757301} -{"id": "petroni-etal-2019-language", "title": "Language Models as Knowledge Bases?", "abstract": "Recent progress in pretraining language models on large textual corpora led to a surge of improvements for downstream NLP tasks. Whilst learning linguistic knowledge, these models may also be storing relational knowledge present in the training data, and may be able to answer queries structured as \u201cfill-in-the-blank\u201d cloze statements. Language models have many advantages over structured knowledge bases: they require no schema engineering, allow practitioners to query about an open class of relations, are easy to extend to more data, and require no human supervision to train. We present an in-depth analysis of the relational knowledge already present (without fine-tuning) in a wide range of state-of-the-art pretrained language models. We find that (i) without fine-tuning, BERT contains relational knowledge competitive with traditional NLP methods that have some access to oracle knowledge, (ii) BERT also does remarkably well on open-domain question answering against a supervised baseline, and (iii) certain types of factual knowledge are learned much more readily than others by standard language model pretraining approaches. The surprisingly strong ability of these models to recall factual knowledge without any fine-tuning demonstrates their potential as unsupervised open-domain QA systems. The code to reproduce our analysis is available at .", "phrases": ["knowledge bases", "query", "open-domain question", "language models", "dante"], "overall_score": 7.269008537924232, "scores": [2.1210933369912195, 0.9691321566350828, 1.9441119008571686, 1.2494198148570257, 0.8688014297774295], "rank_score": 1.430511727823585} -{"id": "jain-etal-2020-scirex", "title": "SciREX: A Challenge Dataset for Document-Level Information Extraction", "abstract": "Extracting information from full documents is an important problem in many domains, but most previous work focus on identifying relationships within a sentence or a paragraph. It is challenging to create a large-scale information extraction (IE) dataset at the document level since it requires an understanding of the whole document to annotate entities and their document-level relationships that usually span beyond sentences or even sections. In this paper, we introduce SciREX, a document level IE dataset that encompasses multiple IE tasks, including salient entity identification and document level N-ary relation identification from scientific articles. We annotate our dataset by integrating automatic and human annotations, leveraging existing scientific knowledge resources. We develop a neural model as a strong baseline that extends previous state-of-the-art IE models to document-level IE. Analyzing the model performance shows a significant gap between human performance and current baselines, inviting the community to use our dataset as a challenge to develop document-level IE models. Our data and code are publicly available at .", "phrases": ["information extraction", "document level", "n-ary relation identification", "scirex"], "overall_score": 3.6691141195066814, "scores": [2.6137111702080187, 1.9154161128795928, 0.5986433960232103, 0.5941576838536754], "rank_score": 1.430482090741124} -{"id": "shao-ng-2004-mining", "title": "Mining New Word Translations from Comparable Corpora", "abstract": "New words such as names, technical terms, etc appear frequently. As such, the bilingual lexicon of a machine translation system has to be constantly updated with these new word translations. Comparable corpora such as news documents of the same period from different news agencies are readily available. In this paper, we present a new approach to mining new word translations from comparable corpora, by using context information to complement transliteration information. We evaluated our approach on six months of Chinese and English Gigaword corpora, with encouraging results.", "phrases": ["new word translation", "comparable corpora", "news document", "context information"], "overall_score": 3.872318810008139, "scores": [3.6082901945240278, 1.0226085096054895, 0.5444166580285619, 0.5443995950196829], "rank_score": 1.4299287392944404} -{"id": "hasan-ney-2005-clustered", "title": "Clustered language models based on regular expressions for SMT", "abstract": "In this paper, we present a language model based on clusters obtained by applying regular expressions to the training data and, thus, discriminating several different sentence types as, e.g. interrog- atives, imperatives or enumerations. The main motivation lies in the observation that different sentence types also underlie a different syntactic structure, and thus yield a varying distribution of n-grams reflect- ing their word order. We show that this assumption is valid by applying the models to English-Spanish bilingual corpora and obtaining good perplexity reductions of approximately 25%. In addition, we per- form an n-best rescoring experiment and show a relative improvement of 4-5% in word error rate. The models can be easily adapted to other translation tasks and do not need complicated training methods, thus being a valuable alternative for on-demand rescoring of sentence hypotheses such as they occur in the CAT framework.", "phrases": ["language model", "perplexity reduction", "specific class"], "overall_score": 2.5617700795402873, "scores": [3.2021004756035, 0.5504766703272265, 0.5366761665866786], "rank_score": 1.4297511041724682} -{"id": "choi-cardie-2008-learning", "title": "Learning with Compositional Semantics as Structural Inference for Subsentential Sentiment Analysis", "abstract": "Determining the polarity of a sentiment-bearing expression requires more than a simple bag-of-words approach. In particular, words or constituents within the expression can interact with each other to yield a particular overall polarity. In this paper, we view such subsentential interactions in light of compositional semantics, and present a novel learning-based approach that incorporates structural inference motivated by compositional semantics into the learning procedure. Our experiments show that (1) simple heuristics based on compositional semantics can perform better than learning-based methods that do not incorporate compositional semantics (accuracy of 89.7% vs. 89.1%), but (2) a method that integrates compositional semantics into learning performs better than all other alternatives (90.7%). We also find that \"content-word negators\", not widely employed in previous work, play an important role in determining expression-level polarity. Finally, in contrast to conventional wisdom, we find that expression-level classification accuracy uniformly decreases as additional, potentially disambiguating, context is considered.", "phrases": ["compositional semantic", "structural inference", "subsentential sentiment analysis", "polarity"], "overall_score": 5.162475588112479, "scores": [1.6642130722267041, 0.9388060668231311, 1.7390841581151075, 1.376637159421145], "rank_score": 1.429685114146522} -{"id": "dale-etal-2012-hoo", "title": "HOO 2012: A Report on the Preposition and Determiner Error Correction Shared Task", "abstract": "Incorrect usage of prepositions and determiners constitute the most common types of errors made by non-native speakers of English. It is not surprising, then, that there has been a significant amount of work directed towards the automated detection and correction of such errors. However, to date, the use of different data sets and different task definitions has made it difficult to compare work on the topic. This paper reports on the HOO 2012 shared task on error detection and correction in the use of prepositions and determiners, where systems developed by 14 teams from around the world were evaluated on the same previously unseen errorful text.", "phrases": ["preposition", "determiner error correction", "non-native speaker", "textual error", "helping"], "overall_score": 4.998486108835557, "scores": [3.0569124770303553, 0.7997473576332862, 1.3268640837218986, 1.1058222888297038, 0.8584806185032849], "rank_score": 1.429565365143706} -{"id": "zhang-etal-2003-chinese-lexical", "title": "Chinese Lexical Analysis Using Hierarchical Hidden Markov Model", "abstract": "This paper presents a unified approach for Chinese lexical analysis using hierarchical hidden Markov model (HHMM), which aims to incorporate Chinese word segmentation, Part-Of-Speech tagging, disambiguation and unknown words recognition into a whole theoretical frame. A class-based HMM is applied in word segmentation, and in this level unknown words are treated in the same way as common words listed in the lexicon. Unknown words are recognized with reliability in role-based HMM. As for disambiguation, the authors bring forth an n-shortest-path strategy that, in the early stage, reserves top N segmentation results as candidates and covers more ambiguity. Various experiments show that each level in HHMM contributes to lexical analysis. An HHMM-based system ICTCLAS was accomplished. The recent official evaluation indicates that ICTCLAS is one of the best Chinese lexical analyzers. In a word, HHMM is effective to Chinese lexical analysis.", "phrases": ["hidden markov model", "unified approach", "chinese lexical analysis"], "overall_score": 2.781317652035929, "scores": [2.74667011563935, 0.9927531036794981, 0.548520373636125], "rank_score": 1.4293145309849908} -{"id": "pruthi-etal-2020-learning", "title": "Learning to Deceive with Attention-Based Explanations", "abstract": "Attention mechanisms are ubiquitous components in neural architectures applied to natural language processing. In addition to yielding gains in predictive accuracy, attention weights are often claimed to confer interpretability, purportedly useful both for providing insights to practitioners and for explaining why a model makes its decisions to stakeholders. We call the latter use of attention mechanisms into question by demonstrating a simple method for training models to produce deceptive attention masks. Our method diminishes the total weight assigned to designated impermissible tokens, even when the models can be shown to nevertheless rely on these features to drive predictions. Across multiple models and tasks, our approach manipulates attention weights while paying surprisingly little cost in accuracy. Through a human study, we show that our manipulated attention-based explanations deceive people into thinking that predictions from a model biased against gender minorities do not rely on the gender. Consequently, our results cast doubt on attention's reliability as a tool for auditing algorithms in the context of fairness and accountability.", "phrases": ["explanation", "attention weight", "practitioner", "deceptive attention mask"], "overall_score": 3.770636363674705, "scores": [3.338291436658734, 1.2669873752595469, 0.5779883965439463, 0.5318589032155621], "rank_score": 1.4287815279194471} -{"id": "medlock-briscoe-2007-weakly", "title": "Weakly Supervised Learning for Hedge Classification in Scientific Literature", "abstract": "We investigate automatic classification of speculative language (\u2018hedging\u2019), in biomedical text using weakly supervised machine learning. Our contributions include a precise description of the task with annotation guidelines, analysis and discussion, a probabilistic weakly supervised learning model, and experimental evaluation of the methods presented. We show that hedge classification is feasible using weakly supervised ML, and point toward avenues for future research.", "phrases": ["hedge classification", "annotation guideline", "weakly supervised learning", "single word"], "overall_score": 4.807758646534066, "scores": [3.28443645405195, 0.8406864943661448, 1.0494857696936268, 0.5365124773828572], "rank_score": 1.4277802988736448} -{"id": "walker-etal-2012-stance", "title": "Stance Classification using Dialogic Properties of Persuasion", "abstract": "Public debate functions as a forum for both expressing and forming opinions, an important aspect of public life. We present results for automatically classifying posts in online debate as to the position, or stance that the speaker takes on an issue, such as Pro or Con. We show that representing the dialogic structure of the debates in terms of agreement relations between speakers, greatly improves performance for stance classification, over models that operate on post content and parent-post context alone.", "phrases": ["online debate", "dialogic structure", "stance classification", "collective classification"], "overall_score": 4.947819665631827, "scores": [3.135269572902079, 1.1415696642641269, 0.906401617555312, 0.5273150611344228], "rank_score": 1.4276389789639852} -{"id": "guan-etal-2020-knowledge", "title": "A Knowledge-Enhanced Pretraining Model for Commonsense Story Generation", "abstract": "Story generation, namely, generating a reasonable story from a leading context, is an important but challenging task. In spite of the success in modeling fluency and local coherence, existing neural language generation models (e.g., GPT-2) still suffer from repetition, logic conflicts, and lack of long-range coherence in generated stories. We conjecture that this is because of the difficulty of associating relevant commonsense knowledge, understanding the causal relationships, and planning entities and events with proper temporal order. In this paper, we devise a knowledge-enhanced pretraining model for commonsense story generation. We propose to utilize commonsense knowledge from external knowledge bases to generate reasonable stories. To further capture the causal and temporal dependencies between the sentences in a reasonable story, we use multi-task learning, which combines a discriminative objective to distinguish true and fake stories during fine-tuning. Automatic and manual evaluation shows that our model can generate more reasonable stories than state-of-the-art baselines, particularly in terms of logic and global coherence.", "phrases": ["knowledge-enhanced pretraining model", "commonsense story generation", "knowledge graph"], "overall_score": 4.412465514044618, "scores": [2.907603776677394, 0.8427509118665346, 0.5321476522931763], "rank_score": 1.4275007802790347} -{"id": "napoles-etal-2017-jfleg", "title": "JFLEG: A Fluency Corpus and Benchmark for Grammatical Error Correction", "abstract": "We present a new parallel corpus, JHU FLuency-Extended GUG corpus (JFLEG) for developing and evaluating grammatical error correction (GEC). Unlike other corpora, it represents a broad range of language proficiency levels and uses holistic fluency edits to not only correct grammatical errors but also make the original text more native sounding. We describe the types of corrections made and benchmark four leading GEC systems on this corpus, identifying specific areas in which they do well and how they can improve. JFLEG fulfills the need for a new gold standard to properly assess the current state of GEC.", "phrases": ["grammatical error correction", "parallel corpus", "native sounding", "jfleg", "language learner"], "overall_score": 4.043966923268792, "scores": [3.2024018670497587, 1.9674154209878147, 0.854863567438963, 0.5637048815762719, 0.5483287143184756], "rank_score": 1.4273428902742569} -{"id": "stab-gurevych-2014-annotating", "title": "Annotating Argument Components and Relations in Persuasive Essays", "abstract": "In this paper, we present a novel approach to model arguments, their components and relations in persuasive essays in English. We propose an annotation scheme that includes the annotation of claims and premises as well as support and attack relations for capturing the structure of argumentative discourse. We further conduct a manual annotation study with three annotators on 90 persuasive essays. The obtained inter-rater agreement of \u03b1U =0 .72 for argument components and \u03b1 =0 .81 for argumentative relations indicates that the proposed annotation scheme successfully guides annotators to substantial agreement. The final corpus and the annotation guidelines are freely available to encourage future research in argument recognition.", "phrases": ["persuasive essay", "annotator", "discourse structure", "student-written text", "writing support system"], "overall_score": 5.522469890017418, "scores": [4.627067177986774, 0.8929243583847662, 0.5620878619527577, 0.5294711561570942, 0.5212098917686768], "rank_score": 1.4265520892500139} -{"id": "moon-etal-2019-opendialkg", "title": "OpenDialKG: Explainable Conversational Reasoning with Attention-based Walks over Knowledge Graphs", "abstract": "We study a conversational reasoning model that strategically traverses through a large-scale common fact knowledge graph (KG) to introduce engaging and contextually diverse entities and attributes. For this study, we collect a new Open-ended Dialog - KG parallel corpus called OpenDialKG, where each utterance from 15K human-to-human role-playing dialogs is manually annotated with ground-truth reference to corresponding entities and paths from a large-scale KG with 1M+ facts. We then propose the DialKG Walker model that learns the symbolic transitions of dialog contexts as structured traversals over KG, and predicts natural entities to introduce given previous dialog contexts via a novel domain-agnostic, attention-based graph path decoder. Automatic and human evaluations show that our model can retrieve more natural and human-like responses than the state-of-the-art baselines or rule-based models, in both in-domain and cross-domain tasks. The proposed model also generates a KG walk path for each entity retrieved, providing a natural way to explain conversational reasoning.", "phrases": ["conversational reasoning", "knowledge graph", "opendialkg", "recommendation", "parallel dialog\u2194kg corpus"], "overall_score": 4.751868012709533, "scores": [3.161305878711068, 0.9056067604954922, 1.6845793000382778, 0.8320443278379007, 0.5466803780830238], "rank_score": 1.4260433290331525} -{"id": "nguyen-etal-2018-novel", "title": "A Novel Embedding Model for Knowledge Base Completion Based on Convolutional Neural Network", "abstract": "In this paper, we propose a novel embedding model, named ConvKB, for knowledge base completion. Our model ConvKB advances state-of-the-art models by employing a convolutional neural network, so that it can capture global relationships and transitional characteristics between entities and relations in knowledge bases. In ConvKB, each triple (head entity, relation, tail entity) is represented as a 3-column matrix where each column vector represents a triple element. This 3-column matrix is then fed to a convolution layer where multiple filters are operated on the matrix to generate different feature maps. These feature maps are then concatenated into a single feature vector representing the input triple. The feature vector is multiplied with a weight vector via a dot product to return a score. This score is then used to predict whether the triple is valid or not. Experiments show that ConvKB achieves better link prediction performance than previous state-of-the-art embedding models on two benchmark datasets WN18RR and FB15k-237.", "phrases": ["knowledge base completion", "convolutional neural network", "relation embedding"], "overall_score": 3.54341123828796, "scores": [2.8218597721256664, 0.9210051757684973, 0.535055800465055], "rank_score": 1.425973582786406} -{"id": "cheng-etal-2020-spellgcn", "title": "SpellGCN: Incorporating Phonological and Visual Similarities into Language Models for Chinese Spelling Check", "abstract": "Chinese Spelling Check (CSC) is a task to detect and correct spelling errors in Chinese natural language. Existing methods have made attempts to incorporate the similarity knowledge between Chinese characters. However, they take the similarity knowledge as either an external input resource or just heuristic rules. This paper proposes to incorporate phonological and visual similarity knowledge into language models for CSC via a specialized graph convolutional network (SpellGCN). The model builds a graph over the characters, and SpellGCN is learned to map this graph into a set of inter-dependent character classifiers. These classifiers are applied to the representations extracted by another network, such as BERT, enabling the whole network to be end-to-end trainable. Experiments are conducted on three human-annotated datasets. Our method achieves superior performance against previous models by a large margin.", "phrases": ["language model", "spellgcn", "confusion set"], "overall_score": 3.6566896401903373, "scores": [2.3201570511709297, 1.1244974835015176, 0.8322598958829385], "rank_score": 1.425638143518462} -{"id": "lacruz-etal-2012-average", "title": "Average Pause Ratio as an Indicator of Cognitive Effort in Post-Editing: A Case Study", "abstract": "Pauses are known to be good indicators of cognitive demand in monolingual language production and in translation. However, a previous effort by O'Brien (2006) to establish an analogous relationship in post-editing did not produce the expected result. In this case study, we introduce a metric for pause activity, the average pause ratio, which is sensitive to both the number and duration of pauses. We measured cognitive effort in a segment by counting the number of complete editing events. We found that the average pause ratio was higher for less cognitively demanding segments than for more cognitively demanding segments. Moreover, this effect became more pronounced as the minimum threshold for pause length was shortened.", "phrases": ["pause ratio", "cognitive effort", "post-editing"], "overall_score": 1.9756805162607365, "scores": [1.7629985039955351, 1.6521510840638742, 0.8603071367260573], "rank_score": 1.4251522415951559} -{"id": "hasan-ng-2014-taking", "title": "Why are You Taking this Stance? Identifying and Classifying Reasons in Ideological Debates", "abstract": "Recent years have seen a surge of interest in stance classification in online debates. Oftentimes, however, it is important to determine not only the stance expressed by an author in her debate posts, but also the reasons behind her supporting or opposing the issue under debate. We therefore examine the new task of reason classification in this paper. Given the close interplay between stance classification and reason classification, we design computational models for examining how automatically computed stance information can be profitably exploited for reason classification. Experiments on our reason-annotated corpus of ideological debate posts from four domains demonstrate that sophisticated models of stances and reasons can indeed yield more accurate reason and stance classification results than their simpler counterparts.", "phrases": ["stance", "online debate", "reason classification", "counterargument"], "overall_score": 4.695928664962188, "scores": [3.169598748914963, 1.1610885069778465, 0.831513920436684, 0.5370234587238119], "rank_score": 1.4248061587633263} -{"id": "matsuzaki-etal-2005-probabilistic", "title": "Probabilistic CFG with Latent Annotations", "abstract": "This paper defines a generative probabilistic model of parse trees, which we call PCFG-LA. This model is an extension of PCFG in which non-terminal symbols are augmented with latent variables. Fine-grained CFG rules are automatically induced from a parsed corpus by training a PCFG-LA model using an EM-algorithm. Because exact parsing with a PCFG-LA is NP-hard, several approximations are described and empirically compared. In experiments using the Penn WSJ corpus, our automatically trained model gave a performance of 86.6% (F1, sentences \u2264 40 words), which is comparable to that of an unlexicalized PCFG parser created using extensive manual feature selection.", "phrases": ["latent annotation", "variable", "probabilistic cfg", "treebank", "tsg"], "overall_score": 5.572663844261796, "scores": [2.8974004800524065, 1.517151202964636, 1.2697943822597388, 0.9155252141977966, 0.5226121984208988], "rank_score": 1.4244966955790954} -{"id": "cotterell-etal-2016-sigmorphon", "title": "The SIGMORPHON 2016 Shared Task\u2014Morphological Reinflection", "abstract": "The 2016 SIGMORPHON Shared Task was devoted to the problem of morphological rein\ufb02ection. It introduced morphological datasets for 10 languages with diverse ty-pological characteristics. The shared task drew submissions from 9 teams representing 11 institutions re\ufb02ecting a variety of approaches to addressing supervised learning of rein\ufb02ection. For the simplest task, in-\ufb02ection generation from lemmas, the best system averaged 95.56% exact-match accuracy across all languages, ranging from Maltese (88.99%) to Hungarian (99.30%). With the relatively large training datasets provided, recurrent neural network architectures consistently performed best\u2014in fact, there was a signi\ufb01cant margin between neural and non-neural approaches. The best neural approach, averaged over all tasks and languages, outperformed the best non-neural one by 13.76% absolute; on individual tasks and languages the gap in accuracy sometimes exceeded 60%. Overall, the results show a strong state of the art, and serve as encouragement for future shared tasks that explore morphological analysis and generation with varying degrees of supervision.", "phrases": ["sigmorphon", "shared task", "art", "morphological inflection", "reinflection task"], "overall_score": 5.389946254682457, "scores": [2.9318040328201693, 1.5769063842132092, 0.876177170326917, 0.8707347974960621, 0.8660427219495457], "rank_score": 1.4243330213611807} -{"id": "tatman-2017-gender", "title": "Gender and Dialect Bias in YouTube's Automatic Captions", "abstract": "This project evaluates the accuracy of YouTube's automatically-generated captions across two genders and five dialect groups. Speakers' dialect and gender was controlled for by using videos uploaded as part of the \u201caccent tag challenge\u201d, where speakers explicitly identify their language background. The results show robust differences in accuracy across both gender and dialect, with lower accuracy for 1) women and 2) speakers from Scotland. This finding builds on earlier research finding that speaker's sociolinguistic identity may negatively impact their ability to use automatic speech recognition, and demonstrates the need for sociolinguistically-stratified validation of systems.", "phrases": ["dialect", "youtube", "caption", "gender"], "overall_score": 3.278906256021084, "scores": [2.5001674924288135, 1.7691313232015047, 0.8813973613908177, 0.5453473976508958], "rank_score": 1.4240108936680078} -{"id": "nakashole-etal-2012-patty", "title": "PATTY: A Taxonomy of Relational Patterns with Semantic Types", "abstract": "This paper presents PATTY: a large resource for textual patterns that denote binary relations between entities. The patterns are semantically typed and organized into a subsumption taxonomy. The PATTY system is based on efficient algorithms for frequent itemset mining and can process Web-scale corpora. It harnesses the rich type system and entity population of large knowledge bases. The PATTY taxonomy comprises 350,569 pattern synsets. Random-sampling-based evaluation shows a pattern accuracy of 84.7%. PATTY has 8,162 subsumptions, with a random-sampling-based precision of 75%. The PATTY resource is freely available for interactive access and download.", "phrases": ["taxonomy", "patty", "entity pair", "semantic type signature", "paraphrase"], "overall_score": 4.744696224285446, "scores": [3.364904491859373, 2.0375818809237853, 0.5807744595070183, 0.5735076008345968, 0.5626868851143442], "rank_score": 1.4238910636478235} -{"id": "sheng-etal-2020-towards", "title": "Towards Controllable Biases in Language Generation", "abstract": "We present a general approach towards controllable societal biases in natural language generation (NLG). Building upon the idea of adversarial triggers, we develop a method to induce societal biases in generated text when input prompts contain mentions of specific demographic groups. We then analyze two scenarios: 1) inducing negative biases for one demographic and positive biases for another demographic, and 2) equalizing biases between demographics. The former scenario enables us to detect the types of biases present in the model. Specifically, we show the effectiveness of our approach at facilitating bias analysis by finding topics that correspond to demographic inequalities in generated text and comparing the relative effectiveness of inducing biases for different demographics. The second scenario is useful for mitigating biases in downstream applications such as dialogue generation. In our experiments, the mitigation technique proves to be effective at equalizing the amount of biases across demographics while simultaneously generating less negatively biased text overall.", "phrases": ["language generation", "trigger", "group"], "overall_score": 3.5379132598923855, "scores": [2.589316637777945, 0.8457059854333634, 0.8362604773360168], "rank_score": 1.423761033515775} -{"id": "barba-etal-2021-esc", "title": "ESC: Redesigning WSD with Extractive Sense Comprehension", "abstract": "Word Sense Disambiguation (WSD) is a historical NLP task aimed at linking words in contexts to discrete sense inventories and it is usually cast as a multi-label classification task. Recently, several neural approaches have employed sense definitions to better represent word meanings. Yet, these approaches do not observe the input sentence and the sense definition candidates all at once, thus potentially reducing the model performance and generalization power. We cope with this issue by reframing WSD as a span extraction problem \u2014 which we called Extractive Sense Comprehension (ESC) \u2014 and propose ESCHER, a transformer-based neural architecture for this new formulation. By means of an extensive array of experiments, we show that ESC unleashes the full potential of our model, leading it to outdo all of its competitors and to set a new state of the art on the English WSD task. In the few-shot scenario, ESCHER proves to exploit training data efficiently, attaining the same performance as its closest competitor while relying on almost three times fewer annotations. Furthermore, ESCHER can nimbly combine data annotated with senses from different lexical resources, achieving performances that were previously out of everyone's reach. The model along with data is available at .", "phrases": ["wsd", "extractive sense comprehension", "span extraction problem", "gloss"], "overall_score": 2.769744252217631, "scores": [2.550429811000859, 2.072095708938415, 0.5409306398683316, 0.5300117602035359], "rank_score": 1.4233669800027853} -{"id": "del-tredici-fernandez-2017-semantic", "title": "Semantic Variation in Online Communities of Practice", "abstract": "We introduce a framework for quantifying semantic variation of common words in Communities of Practice and in sets of topic-related communities. We show that while some meaning shifts are shared across related communities, others are community-specific, and therefore independent from the discussed topic. We propose such findings as evidence in favour of sociolinguistic theories of socially-driven semantic variation. Results are evaluated using an independent language modelling task. Furthermore, we investigate extralinguistic features and show that factors such as prominence and dissemination of words are related to semantic variation.", "phrases": ["online community", "practice", "semantic variation"], "overall_score": 2.5495435833247955, "scores": [1.840257777021554, 1.8377995679052976, 0.5907247552004896], "rank_score": 1.4229273667091136} -{"id": "freitag-etal-2014-jane", "title": "Jane: Open Source Machine Translation System Combination", "abstract": "Different machine translation engines can be remarkably dissimilar not only with respect to their technical paradigm, but also with respect to the translation output they yield. System combination is a method for combining the output of multiple machine translation engines in order to take benefit of the strengths of each of the individual engines. In this work we introduce a novel system combination implementation which is integrated into Jane, RWTH\u2019s open source statistical machine translation toolkit. On the most recent Workshop on Statistical Machine Translation system combination shared task, we achieve improvements of up to 0.7 points in BLEU over the best system combination hypotheses which were submitted for the official evaluation. Moreover, we enhance our system combination pipeline with additional n-gram language models and lexical translation models.", "phrases": ["individual engine", "machine translation toolkit", "jane"], "overall_score": 2.957695136415557, "scores": [2.321681071198835, 1.0563440573827536, 0.8890269771865419], "rank_score": 1.4223507019227102} -{"id": "ahn-2006-stages", "title": "The stages of event extraction", "abstract": "Event detection and recognition is a complex task consisting of multiple sub-tasks of varying difficulty. In this paper, we present a simple, modular approach to event extraction that allows us to experiment with a variety of machine learning methods for these sub-tasks, as well as to evaluate the impact on performance these sub-tasks have on the overall task.", "phrases": ["stage", "event extraction", "modular approach", "trigger", "classification problem"], "overall_score": 4.738164672676207, "scores": [4.660304883709579, 0.8121460711565055, 0.5654725762442929, 0.5393515472233908, 0.5323795935325132], "rank_score": 1.4219309343732565} -{"id": "yin-etal-2016-multi", "title": "Multi-Granularity Chinese Word Embedding", "abstract": "This paper considers the problem of learning Chinese word embeddings. In contrast to English, a Chinese word is usually composed of characters, and most of the characters themselves can be further divided into components such as radicals. While characters and radicals contain rich information and are capable of indicating semantic meanings of words, they have not been fully exploited by existing word embedding methods. In this work, we propose multi-granularity embedding (MGE) for Chinese words. The key idea is to make full use of such word-character-radical composition, and enrich word embeddings by further incorporating \ufb01ner-grained semantics from characters and radicals. Quantitative evaluation demonstrates the superiority of MGE in word similarity computation and analogical reasoning. Qualitative analysis further shows its capability to identify \ufb01ner-grained semantic meanings of words.", "phrases": ["chinese word", "character", "multi-granularity", "cwe model"], "overall_score": 3.8483087061848678, "scores": [1.9055544641606745, 1.490939202075584, 1.4458149734995334, 0.8419415334974193], "rank_score": 1.4210625433083028} -{"id": "zhao-etal-2008-pivot", "title": "Pivot Approach for Extracting Paraphrase Patterns from Bilingual Corpora", "abstract": "Paraphrase patterns are useful in paraphrase recognition and generation. In this paper, we present a pivot approach for extracting paraphrase patterns from bilingual parallel corpora, whereby the English paraphrase patterns are extracted using the sentences in a foreign language as pivots. We propose a loglinear model to compute the paraphrase likelihood of two patterns and exploit feature functions based on maximum likelihood estimation (MLE) and lexical weighting (LW). Using the presented method, we extract over 1,000,000 pairs of paraphrase patterns from 2M bilingual sentence pairs, the precision of which exceeds 67%. The evaluation results show that: (1) The pivot approach is effective in extracting paraphrase patterns, which significantly outperforms the conventional method DIRT. Especially, the log-linear model with the proposed feature functions achieves high performance. (2) The coverage of the extracted paraphrase patterns is high, which is above 84%. (3) The extracted paraphrase patterns can be classified into 5 types, which are useful in various applications.", "phrases": ["paraphrase pattern", "bilingual corpora", "pivot approach"], "overall_score": 3.8460331722672683, "scores": [1.7334925897321638, 0.8114454136078921, 1.7157287716354064], "rank_score": 1.420222258325154} -{"id": "li-etal-2018-delete", "title": "Delete, Retrieve, Generate: a Simple Approach to Sentiment and Style Transfer", "abstract": "We consider the task of text attribute transfer: transforming a sentence to alter a specific attribute (e.g., sentiment) while preserving its attribute-independent content (e.g., \u201cscreen is just the right size\u201d to \u201cscreen is too small\u201d). Our training data includes only sentences labeled with their attribute (e.g., positive and negative), but not pairs of sentences that only differ in the attributes, so we must learn to disentangle attributes from attribute-independent content in an unsupervised way. Previous work using adversarial methods has struggled to produce high-quality outputs. In this paper, we propose simpler methods motivated by the observation that text attributes are often marked by distinctive phrases (e.g., \u201ctoo small\u201d). Our strongest method extracts content words by deleting phrases associated with the sentence's original attribute value, retrieves new phrases associated with the target attribute, and uses a neural model to fluently combine these into a final output. Based on human evaluation, our best method generates grammatical and appropriate responses on 22% more inputs than the best previous system, averaged over three attribute transfer datasets: altering sentiment of reviews on Yelp, altering sentiment of reviews on Amazon, and altering image captions to be more romantic or humorous.", "phrases": ["retrieve", "style transfer", "text attribute", "input sentence", "sentiment polarity"], "overall_score": 6.4017803797602735, "scores": [3.1932957302627436, 1.2616557258232093, 1.5065141157273612, 0.5775527733826408, 0.5569468145608568], "rank_score": 1.4191930319513621} -{"id": "lao-etal-2011-random", "title": "Random Walk Inference and Learning in A Large Scale Knowledge Base", "abstract": "We consider the problem of performing learning and inference in a large scale knowledge base containing imperfect knowledge with incomplete coverage. We show that a soft inference procedure based on a combination of constrained, weighted, random walks through the knowledge base graph can be used to reliably infer new beliefs for the knowledge base. More specifically, we show that the system can learn to infer different target relations by tuning the weights associated with random walks that follow different paths through the graph, using a version of the Path Ranking Algorithm (Lao and Cohen, 2010b). We apply this approach to a knowledge base of approximately 500,000 beliefs extracted imperfectly from the web by NELL, a never-ending language learner (Carlson et al., 2010). This new system improves significantly over NELL's earlier Horn-clause learning and inference method: it obtains nearly double the precision at rank 100, and the new learning method is also applicable to many more inference tasks.", "phrases": ["scale knowledge base", "path ranking algorithm", "random walk", "reasoning", "entity pair"], "overall_score": 4.961816771655171, "scores": [0.9403668363580528, 2.0361656294142425, 1.7208697502119803, 1.3231842098786477, 1.074803308649936], "rank_score": 1.4190779469025718} -{"id": "hamon-etal-2009-end", "title": "End-to-End Evaluation in Simultaneous Translation", "abstract": "This paper presents the end-to-end evaluation of an automatic simultaneous translation system, built with state-of-the-art components. It shows whether, and for which situations, such a system might be advantageous when compared to a human interpreter. Using speeches in English translated into Spanish, we present the evaluation procedure and we discuss the results both for the recognition and translation components as well as for the overall system. Even if the translation process remains the Achilles' heel of the system, the results show that the system can keep at least half of the information, becoming potentially useful for final users.", "phrases": ["simultaneous translation", "human interpreter", "end-to-end evaluation"], "overall_score": 2.76134082473876, "scores": [1.9573753982243391, 1.770454413461975, 0.5293156059671933], "rank_score": 1.4190484725511692} -{"id": "dyer-etal-2016-recurrent", "title": "Recurrent Neural Network Grammars", "abstract": "We introduce recurrent neural network grammars, probabilistic models of sentences with explicit phrase structure. We explain efficient inference procedures that allow application to both parsing and language modeling. Experiments show that they provide better parsing in English than any single previously published supervised generative model and better language modeling than state-of-the-art sequential RNNs in English and Chinese.", "phrases": ["neural network grammar", "generative model", "rnng", "transition-based parser", "discriminative parser"], "overall_score": 6.126217799234898, "scores": [2.29476387363743, 1.747987253901256, 1.0833873655041424, 1.0594969220749397, 0.9090196691982806], "rank_score": 1.4189310168632099} -{"id": "tseng-2003-semantic", "title": "Semantic Classification of Chinese Unknown Words", "abstract": "This paper describes a classifier that assigns semantic thesaurus categories to unknown Chinese words (words not already in the CiLin thesaurus and the Chinese Electronic Dictionary, but in the Sinica Corpus). The focus of the paper differs in two ways from previous research in this particular area.Prior research in Chinese unknown words mostly focused on proper nouns (Lee 1993, Lee, Lee and Chen 1994, Huang, Hong and Chen 1994, Chen and Chen 2000). This paper does not address proper nouns, focusing rather on common nouns, adjectives, and verbs. My analysis of the Sinica Corpus shows that contrary to expectation, most of unknown words in Chinese are common nouns, adjectives, and verbs rather than proper nouns. Other previous research has focused on features related to unknown word contexts (Caraballo 1999; Roark and Charniak 1998). While context is clearly an important feature, this paper focuses on non-contextual features, which may play a key role for unknown words that occur only once and hence have limited context. The feature I focus on, following Ciaramita (2002), is morphological similarity to words whose semantic category is known. My nearest neighbor approach to lexical acquisition computes the distance between an unknown word and examples from the CiLin thesaurus based upon its morphological structure. The classifier improves on baseline semantic categorization performance for adjectives and verbs, but not for nouns.", "phrases": ["chinese", "unknown word", "morphological similarity"], "overall_score": 2.283262089578445, "scores": [2.482260593295476, 0.8959289526066709, 0.8778219571298506], "rank_score": 1.4186705010106657} -{"id": "holtzman-etal-2018-learning", "title": "Learning to Write with Cooperative Discriminators", "abstract": "Despite their local fluency, long-form text generated from RNNs is often generic, repetitive, and even self-contradictory. We propose a unified learning framework that collectively addresses all the above issues by composing a committee of discriminators that can guide a base RNN generator towards more globally coherent generations. More concretely, discriminators each specialize in a different principle of communication, such as Grice's maxims, and are collectively combined with the base RNN generator through a composite decoding objective. Human evaluation demonstrates that text generated by our model is preferred over that of baselines by a large margin, significantly enhancing the overall coherence, style, and information of the generations.", "phrases": ["discriminator", "fluency", "text generation", "beam search"], "overall_score": 4.447297012284686, "scores": [3.70947239306499, 0.8860618033665548, 0.5444434300134373, 0.533510131486417], "rank_score": 1.41837193948285} -{"id": "settles-craven-2008-analysis", "title": "An Analysis of Active Learning Strategies for Sequence Labeling Tasks", "abstract": "Active learning is well-suited to many problems in natural language processing, where unlabeled data may be abundant but annotation is slow and expensive. This paper aims to shed light on the best active learning approaches for sequence labeling tasks such as information extraction and document segmentation. We survey previously used query selection strategies for sequence models, and propose several novel algorithms to address their shortcomings. We also conduct a large-scale empirical comparison using multiple corpora, which demonstrates that our proposed methods advance the state of the art.", "phrases": ["active learning", "sequence labeling task", "annotation effort"], "overall_score": 4.775829491305541, "scores": [2.3096061656179567, 1.416383346337333, 0.5289049782547298], "rank_score": 1.4182981634033398} -{"id": "van-aken-etal-2018-challenges", "title": "Challenges for Toxic Comment Classification: An In-Depth Error Analysis", "abstract": "Toxic comment classification has become an active research field with many recently proposed approaches. However, while these approaches address some of the task's challenges others still remain unsolved and directions for further research are needed. To this end, we compare different deep learning and shallow approaches on a new, large comment dataset and propose an ensemble that outperforms all individual models. Further, we validate our findings on a second dataset. The results of the ensemble enable us to perform an extensive error analysis, which reveals open challenges for state-of-the-art methods and directions towards pending future research. These challenges include missing paradigmatic context and inconsistent dataset labels.", "phrases": ["comment classification", "in-depth error analysis", "explicit abuse"], "overall_score": 3.8403077448561356, "scores": [2.4231991157729174, 1.300279698109033, 0.5308452861215307], "rank_score": 1.4181080333344938} -{"id": "li-etal-2019-word-segmentation", "title": "Is Word Segmentation Necessary for Deep Learning of Chinese Representations?", "abstract": "Segmenting a chunk of text into words is usually the first step of processing Chinese text, but its necessity has rarely been explored. In this paper, we ask the fundamental question of whether Chinese word segmentation (CWS) is necessary for deep learning-based Chinese Natural Language Processing. We benchmark neural word-based models which rely on word segmentation against neural char-based models which do not involve word segmentation in four end-to-end NLP benchmark tasks: language modeling, machine translation, sentence matching/paraphrase and text classification. Through direct comparisons between these two types of models, we find that char-based models consistently outperform word-based models. Based on these observations, we conduct comprehensive experiments to study why word-based models underperform char-based models in these deep learning-based NLP tasks. We show that it is because word-based models are more vulnerable to data sparsity and the presence of out-of-vocabulary (OOV) words, and thus more prone to overfitting. We hope this paper could encourage researchers in the community to rethink the necessity of word segmentation in deep learning-based Chinese Natural Language Processing.", "phrases": ["word segmentation", "deep learning", "chinese"], "overall_score": 3.8398641531224627, "scores": [2.276450959566056, 1.4230909153519036, 0.554290810561346], "rank_score": 1.4179442284931019} -{"id": "zhang-etal-2017-adversarial", "title": "Adversarial Training for Unsupervised Bilingual Lexicon Induction", "abstract": "Word embeddings are well known to capture linguistic regularities of the language on which they are trained. Researchers also observe that these regularities can transfer across languages. However, previous endeavors to connect separate monolingual word embeddings typically require cross-lingual signals as supervision, either in the form of parallel corpus or seed lexicon. In this work, we show that such cross-lingual connection can actually be established without any form of supervision. We achieve this end by formulating the problem as a natural adversarial game, and investigating techniques that are crucial to successful training. We carry out evaluation on the unsupervised bilingual lexicon induction task. Even though this task appears intrinsically cross-lingual, we are able to demonstrate encouraging performance without any cross-lingual clues.", "phrases": ["induction", "word embedding", "cross-lingual signal", "adversarial training", "unsupervised bwe"], "overall_score": 5.4287951337234315, "scores": [3.5144504919825517, 1.5743891055287769, 0.8969513947255102, 0.5544657787032398, 0.5494580541434266], "rank_score": 1.4179429650167013} -{"id": "kumar-etal-2018-benchmarking", "title": "Benchmarking Aggression Identification in Social Media", "abstract": "In this paper, we present the report and findings of the Shared Task on Aggression Identification organised as part of the First Workshop on Trolling, Aggression and Cyberbullying (TRAC - 1) at COLING 2018. The task was to develop a classifier that could discriminate between Overtly Aggressive, Covertly Aggressive, and Non-aggressive texts. For this task, the participants were provided with a dataset of 15,000 aggression-annotated Facebook Posts and Comments each in Hindi (in both Roman and Devanagari script) and English for training and validation. For testing, two different sets - one from Facebook and another from a different social media - were provided. A total of 130 teams registered to participate in the task, 30 teams submitted their test runs, and finally 20 teams also sent their system description paper which are included in the TRAC workshop proceedings. The best system obtained a weighted F-score of 0.64 for both Hindi and English on the Facebook test sets, while the best scores on the surprise set were 0.60 and 0.50 for English and Hindi respectively. The results presented in this report depict how challenging the task is. The positive response from the community and the great levels of participation in the first edition of this shared task also highlights the interest in this topic.", "phrases": ["aggression identification", "aggressive", "cyberbullying", "social medium", "hate speech"], "overall_score": 4.672306462565986, "scores": [3.655537046387835, 1.2595518428927484, 1.0589584250828632, 0.5702726939769475, 0.5438743600756697], "rank_score": 1.4176388736832126} -{"id": "taira-etal-2008-japanese", "title": "A Japanese Predicate Argument Structure Analysis using Decision Lists", "abstract": "This paper describes a new automatic method for Japanese predicate argument structure analysis. The method learns relevant features to assign case roles to the argument of the target predicate using the features of the words located closest to the target predicate under various constraints such as dependency types, words, semantic categories, parts of speech, functional words and predicate voices. We constructed decision lists in which these features were sorted by their learned weights. Using our method, we integrated the tasks of semantic role labeling and zero-pronoun identification, and achieved a 17% improvement compared with a baseline method in a sentence level performance analysis.", "phrases": ["predicate", "decision list", "zero-pronoun identification"], "overall_score": 3.5224976691534176, "scores": [2.5217565333157697, 1.160680729985995, 0.570234766998374], "rank_score": 1.4175573434333797} -{"id": "lei-etal-2014-low", "title": "Low-Rank Tensors for Scoring Dependency Structures", "abstract": "Accurate scoring of syntactic structures such as head-modifier arcs in dependency parsing typically requires rich, highdimensional feature representations. A small subset of such features is often selected manually. This is problematic when features lack clear linguistic meaning as in embeddings or when the information is blended across features. In this paper, we use tensors to map high-dimensional feature vectors into low dimensional representations. We explicitly maintain the parameters as a low-rank tensor to obtain low dimensional representations of words in their syntactic roles, and to leverage modularity in the tensor for easy training with online algorithms. Our parser consistently outperforms the Turbo and MST parsers across 14 different languages. We also obtain the best published UAS results on 5 languages. 1", "phrases": ["tensor", "scoring", "dependency parsing", "rbgparser"], "overall_score": 4.31493346841804, "scores": [3.1223473269074127, 1.4104086356307282, 0.5886309215685701, 0.5477235352529595], "rank_score": 1.4172776048399176} -{"id": "he-etal-2017-unsupervised", "title": "An Unsupervised Neural Attention Model for Aspect Extraction", "abstract": "Aspect extraction is an important and challenging task in aspect-based sentiment analysis. Existing works tend to apply variants of topic models on this task. While fairly successful, these methods usually do not produce highly coherent aspects. In this paper, we present a novel neural approach with the aim of discovering coherent aspects. The model improves coherence by exploiting the distribution of word co-occurrences through the use of neural word embeddings. Unlike topic models which typically assume independently generated words, word embedding models encourage words that appear in similar contexts to be located close to each other in the embedding space. In addition, we use an attention mechanism to de-emphasize irrelevant words during training, further improving the coherence of aspects. Experimental results on real-life datasets demonstrate that our approach discovers more meaningful and coherent aspects, and substantially outperforms baseline methods on several evaluation tasks.", "phrases": ["aspect extraction", "topic model", "neural approach"], "overall_score": 4.379772057422897, "scores": [2.787748482119178, 0.8834980366292169, 0.5795253067867917], "rank_score": 1.416923941845062} -{"id": "peters-etal-2019-tune", "title": "To Tune or Not to Tune? Adapting Pretrained Representations to Diverse Tasks", "abstract": "While most previous work has focused on different pretraining objectives and architectures for transfer learning, we ask how to best adapt the pretrained model to a given target task. We focus on the two most common forms of adaptation, feature extraction (where the pretrained weights are frozen), and directly fine-tuning the pretrained model. Our empirical results across diverse NLP tasks with two state-of-the-art models show that the relative performance of fine-tuning vs. feature extraction depends on the similarity of the pretraining and target tasks. We explore possible explanations for this finding and provide a set of adaptation guidelines for the NLP practitioner.", "phrases": ["weight", "fine-tuning", "downstream task"], "overall_score": 5.0360409781193285, "scores": [1.7946274176364359, 1.232724737577663, 1.2220554057516366], "rank_score": 1.4164691869885786} -{"id": "marsi-krahmer-2005-explorations", "title": "Explorations in Sentence Fusion", "abstract": "The invention provides methods and compositions for expressing a recombinant gene in eukaryotic cells, especially fungi, preferably yeast. The invention provides transcriptional regulating elements having a novel nucleotide sequence which are capable of trancriptionally regulating the expression of a cis joined gene, typically in response to the availability of certain nutrients to the host cell. Preferred regulatory elements are responsive to nutrient depletion, particulary glucose, ethanol, phosphate or a nitrogen source. Nucleic acid constructs comprising such regulatory elements operably linked to recombinant genes, cells comprising such regulatory elements, and methods of producing recombinant protein in such cells are also provided. The invention discloses regulatory elements which are induced through the ras gene product. Accordingly, the disclosed expression systems also provide a convenient marker for ras gene function. Finally, the invention also provides methods and compositions for the diagnosis and treatment of fungal infection. In particular, the invention provides gp37-derived peptides encoded by YGP1 and gp37- selective binding agents, such as antibodies.", "phrases": ["sentence fusion", "union fusion", "variant"], "overall_score": 3.927005009218963, "scores": [2.8289323807179954, 0.8547759519941693, 0.5653946565475723], "rank_score": 1.416367663086579} -{"id": "bing-etal-2015-abstractive", "title": "Abstractive Multi-Document Summarization via Phrase Selection and Merging", "abstract": "We propose an abstraction-based multi-document summarization framework that can construct new sentences by exploring more fine-grained syntactic units than sentences, namely, noun/verb phrases. Different from existing abstraction-based approaches, our method first constructs a pool of concepts and facts represented by phrases from the input documents. Then new sentences are generated by selecting and merging informative phrases to maximize the salience of phrases and meanwhile satisfy the sentence construction constraints. We employ integer linear optimization for conducting phrase selection and merging simultaneously in order to achieve the global optimal solution for a summary. Experimental results on the benchmark data set TAC 2011 show that our framework outperforms the state-of-the-art models under automated pyramid evaluation metric, and achieves reasonably well results on manual linguistic quality evaluation.", "phrases": ["summarization", "phrase selection", "fine-grained syntactic unit", "input document"], "overall_score": 3.9266534689076904, "scores": [2.154216662324319, 1.8497836320178256, 1.1126503844166677, 0.5483128081237579], "rank_score": 1.4162408717206427} -{"id": "ji-eisenstein-2014-representation", "title": "Representation Learning for Text-level Discourse Parsing", "abstract": "Text-level discourse parsing is notoriously difficult, as distinctions between discourse relations require subtle semantic judgments that are not easily captured using standard features. In this paper, we present a representation learning approach, in which we transform surface features into a latent space that facilitates RST discourse parsing. By combining the machinery of large-margin transition-based structured prediction with representation learning, our method jointly learns to parse discourse while at the same time learning a discourse-driven projection of surface features. The resulting shift-reduce discourse parser obtains substantial improvements over the previous state-of-the-art in predicting relations and nuclearity on the RST Treebank.", "phrases": ["rst", "discourse parser", "representation learning", "shift-reduce parser", "dplp"], "overall_score": 4.993165246446622, "scores": [2.412050897700648, 1.8803936932970633, 1.0554577401716596, 0.8784723937614908, 0.853396630821425], "rank_score": 1.4159542711504574} -{"id": "wellington-etal-2006-empirical", "title": "Empirical Lower Bounds on the Complexity of Translational Equivalence", "abstract": "This paper describes a study of the patterns of translational equivalence exhibited by a variety of bitexts. The study found that the complexity of these patterns in every bitext was higher than suggested in the literature. These findings shed new light on why \"syntactic\" constraints have not helped to improve statistical translation models, including finite-state phrase-based models, tree-to-string models, and tree-to-tree models. The paper also presents evidence that inversion transduction grammars cannot generate some translational equivalence relations, even in relatively simple real bitexts in syntactically similar languages with rigid word order. Instructions for replicating our experiments are at http://nip.cs.nyu.edu/GenPar/ACL06", "phrases": ["complexity", "translational equivalence", "bitext", "itg", "inside-out alignment"], "overall_score": 4.309839720549359, "scores": [2.4567858928723987, 1.9445432061290262, 1.2454843835602183, 0.8520359721313161, 0.5791731395042644], "rank_score": 1.4156045188394446} -{"id": "chen-etal-2018-temporally", "title": "Temporally Grounding Natural Sentence in Video", "abstract": "We introduce an effective and efficient method that grounds (i.e., localizes) natural sentences in long, untrimmed video sequences. Specifically, a novel Temporal GroundNet (TGN) is proposed to temporally capture the evolving fine-grained frame-by-word interactions between video and sentence. TGN sequentially scores a set of temporal candidates ended at each frame based on the exploited frame-by-word interactions, and finally grounds the segment corresponding to the sentence. Unlike traditional methods treating the overlapping segments separately in a sliding window fashion, TGN aggregates the historical information and generates the final grounding result in one single pass. We extensively evaluate our proposed TGN on three public datasets with significant improvements over the state-of-the-arts. We further show the consistent effectiveness and efficiency of TGN through an ablation study and a runtime test.", "phrases": ["natural sentence", "video", "frame-by-word interaction"], "overall_score": 3.2595253364290717, "scores": [2.734792833567385, 0.9281634591689242, 0.5838253089686494], "rank_score": 1.4155938672349861} -{"id": "hulth-2003-improved", "title": "Improved Automatic Keyword Extraction Given More Linguistic Knowledge", "abstract": "In this paper, experiments on automatic extraction of keywords from abstracts using a supervised machine learning algorithm are discussed. The main point of this paper is that by adding linguistic knowledge to the representation (such as syntactic features), rather than relying only on statistics (such as term frequency and n-grams), a better result is obtained as measured by keywords previously assigned by professional indexers. In more detail, extracting NP-chunks gives a better precision than n-grams, and by adding the PoS tag(s) assigned to the term as a feature, a dramatic improvement of the results is obtained, independent of the term selection approach applied.", "phrases": ["keyword", "linguistic knowledge", "binary classification problem", "adjective"], "overall_score": 4.948889270774646, "scores": [2.721528595916021, 1.5541360492426406, 0.8552241789893951, 0.5306339196483479], "rank_score": 1.4153806859491012} -{"id": "coppersmith-etal-2015-adhd", "title": "From ADHD to SAD: Analyzing the Language of Mental Health on Twitter through Self-Reported Diagnoses", "abstract": "Many significant challenges exist for the mental health field, but one in particular is a lack of data available to guide research. Language provides a natural lens for studying mental health \u2010 much existing work and therapy have strong linguistic components, so the creation of a large, varied, language-centric dataset could provide significant grist for the field of mental health research. We examine a broad range of mental health conditions in Twitter data by identifying self-reported statements of diagnosis. We systematically explore language differences between ten conditions with respect to the general population, and to each other. Our aim is to provide guidance and a roadmap for where deeper exploration is likely to be fruitful.", "phrases": ["mental health", "twitter", "self-reported diagnosis"], "overall_score": 3.6301355853857316, "scores": [1.7463546692991636, 1.6906593162161607, 0.8088424578006344], "rank_score": 1.4152854811053197} -{"id": "kudo-etal-2004-applying", "title": "Applying Conditional Random Fields to Japanese Morphological Analysis", "abstract": "This paper presents Japanese morphological analysis based on conditional random fields (CRFs). Previous work in CRFs assumed that observation sequence (word) boundaries were fixed. However, word boundaries are not clear in Japanese, and hence a straightforward application of CRFs is not possible. We show how CRFs can be applied to situations where word boundary ambiguity exists. CRFs offer a solution to the long-standing problems in corpus-based or statistical Japanese morphological analysis. First, flexible feature designs for hierarchical tagsets become possible. Second, influences of label and length bias are minimized. We experiment CRFs on the standard testbed corpus used for Japanese morphological analysis, and evaluate our results using the same experimental dataset as the HMMs and MEMMs previously reported in this task. Our results confirm that CRFs not only solve the long-standing problems but also improve the performance over HMMs and MEMMs.", "phrases": ["conditional random fields", "japanese morphological analysis", "word boundary", "mecab", "dictionary-based approach"], "overall_score": 4.71514943140143, "scores": [3.893606792964323, 0.9284670609140734, 0.8426858718837934, 0.8398994650027348, 0.5704609241669046], "rank_score": 1.415024022986366} -{"id": "voorhees-2008-contradictions", "title": "Contradictions and Justifications: Extensions to the Textual Entailment Task", "abstract": "The third PASCAL Recognizing Textual Entailment Challenge (RTE-3) contained an optional task that extended the main entailment task by requiring a system to make three-way entailment decisions (entails, contradicts, neither) and to justify its response. Contradiction was rare in the RTE-3 test set, occurring in only about 10% of the cases, and systems found accurately detecting it difficult. Subsequent analysis of the results shows a test set must contain many more entailment pairs for the three-way decision task than the traditional two-way task to have equal confidence in system comparisons. Each of six human judges representing eventual end users rated the quality of a justification by assigning \u201cunderstandability\u201d and \u201ccorrectness\u201d scores. Ratings of the same justification across judges differed significantly, signaling the need for a better characterization of the justification task.", "phrases": ["justification", "optional task", "contradiction"], "overall_score": 2.7526272343010625, "scores": [2.8578754145806275, 0.8623567428737035, 0.5234795611531111], "rank_score": 1.4145705728691473} -{"id": "xu-koehn-2017-zipporah", "title": "Zipporah: a Fast and Scalable Data Cleaning System for Noisy Web-Crawled Parallel Corpora", "abstract": "We introduce Zipporah, a fast and scalable data cleaning system. We propose a novel type of bag-of-words translation feature, and train logistic regression models to classify good data and synthetic noisy data in the proposed feature space. The trained model is used to score parallel sentences in the data pool for selection. As shown in experiments, Zipporah selects a high-quality parallel corpus from a large, mixed quality data pool. In particular, for one noisy dataset, Zipporah achieves a 2.1 BLEU score improvement with using 1/5 of the data over using the entire corpus.", "phrases": ["regression model", "noisy data", "zipporah", "sentence pair", "adequacy"], "overall_score": 4.088294839758239, "scores": [3.3993504695629286, 1.4282337002356778, 1.1525056895673844, 0.5494320258247978, 0.5427428305667705], "rank_score": 1.4144529431515118} -{"id": "wang-etal-2020-semeval", "title": "SemEval-2020 Task 4: Commonsense Validation and Explanation", "abstract": "In this paper, we present SemEval-2020 Task 4, Commonsense Validation and Explanation (ComVE), which includes three subtasks, aiming to evaluate whether a system can distinguish a natural language statement that makes sense to humans from one that does not, and provide the reasons. Specifically, in our first subtask, the participating systems are required to choose from two natural language statements of similar wording the one that makes sense and the one does not. The second subtask additionally asks a system to select the key reason from three options why a given statement does not make sense. In the third subtask, a participating system needs to generate the reason automatically. 39 teams submitted their valid systems to at least one subtask. For Subtask A and Subtask B, top-performing teams have achieved results closed to human performance. However, for Subtask C, there is still a considerable gap between system and human performance. The dataset used in our task can be found at .", "phrases": ["validation", "explanation", "natural language statement", "semeval-2020 task"], "overall_score": 3.731943836886575, "scores": [3.6023848413016086, 0.9643273909883873, 0.5604182899144882, 0.5293496080405766], "rank_score": 1.4141200325612653} -{"id": "narayan-etal-2018-ranking", "title": "Ranking Sentences for Extractive Summarization with Reinforcement Learning", "abstract": "Single document summarization is the task of producing a shorter version of a document while preserving its principal information content. In this paper we conceptualize extractive summarization as a sentence ranking task and propose a novel training algorithm which globally optimizes the ROUGE evaluation metric through a reinforcement learning objective. We use our algorithm to train a neural summarization model on the CNN and DailyMail datasets and demonstrate experimentally that it outperforms state-of-the-art extractive and abstractive systems when evaluated automatically and by humans.", "phrases": ["extractive summarization", "reinforcement learning", "human evaluation", "document encoder"], "overall_score": 5.0657976827849485, "scores": [3.2731005164131863, 1.3307075009157587, 0.5272234662417242, 0.5235195538712583], "rank_score": 1.413637759360482} -{"id": "li-etal-2011-automatic", "title": "Automatic Evaluation of Chinese Translation Output: Word-Level or Character-Level?", "abstract": "Word is usually adopted as the smallest unit in most tasks of Chinese language processing. However, for automatic evaluation of the quality of Chinese translation output when translating from other languages, either a word-level approach or a character-level approach is possible. So far, there has been no detailed study to compare the correlations of these two approaches with human assessment. In this paper, we compare word-level metrics with character-level metrics on the submitted output of English-to-Chinese translation systems in the IWSLT'08 CT-EC and NIST'08 EC tasks. Our experimental results reveal that character-level metrics correlate with human assessment better than word-level metrics. Our analysis suggests several key reasons behind this finding.", "phrases": ["chinese translation output", "word-level metric", "automatic evaluation"], "overall_score": 2.2743238892963493, "scores": [1.8627089400791297, 1.8191937930241477, 0.557447921666649], "rank_score": 1.4131168849233091} -{"id": "schick-schutze-2021-exploiting", "title": "Exploiting Cloze-Questions for Few-Shot Text Classification and Natural Language Inference", "abstract": "Some NLP tasks can be solved in a fully unsupervised fashion by providing a pretrained language model with \u201ctask descriptions\u201d in natural language (e.g., Radford et al., 2019). While this approach underperforms its supervised counterpart, we show in this work that the two ideas can be combined: We introduce Pattern-Exploiting Training (PET), a semi-supervised training procedure that reformulates input examples as cloze-style phrases to help language models understand a given task. These phrases are then used to assign soft labels to a large set of unlabeled examples. Finally, standard supervised training is performed on the resulting training set. For several tasks and languages, PET outperforms supervised training and strong semi-supervised approaches in low-resource settings by a large margin.", "phrases": ["text classification", "cloze-style phrase", "few-shot learning", "prompt-based finetuning"], "overall_score": 5.8306590419925906, "scores": [2.6500695875194915, 1.6307674639568441, 0.8394808360501449, 0.5307305136308149], "rank_score": 1.4127621002893238} -{"id": "green-etal-2014-human", "title": "Human Effort and Machine Learnability in Computer Aided Translation", "abstract": "Analyses of computer aided translation typically focus on either frontend interfaces and human effort, or backend translation and machine learnability of corrections. However, this distinction is artificial in practice since the frontend and backend must work in concert. We present the first holistic, quantitative evaluation of these issues by contrasting two assistive modes: postediting and interactive machine translation (MT). We describe a new translator interface, extensive modifications to a phrasebased MT system, and a novel objective function for re-tuning to human corrections. Evaluation with professional bilingual translators shows that post-edit is faster than interactive at the cost of translation quality for French-English and EnglishGerman. However, re-tuning the MT system to interactive output leads to larger, statistically significant reductions in HTER versus re-tuning to post-edit. Analysis shows that tuning directly to HTER results in fine-grained corrections to subsequent machine output.", "phrases": ["machine learnability", "translator", "human effort"], "overall_score": 3.8249292619579656, "scores": [2.2516541522414895, 0.8635059551133537, 1.1221275844329666], "rank_score": 1.4124292305959365} -{"id": "chiang-etal-2006-parsing", "title": "Parsing Arabic Dialects", "abstract": "The Arabic language is a collection of spoken dialects with important phonological, morphological, lexical, and syntactic differences, along with a standard written language, Modern Standard Arabic (MSA). Since the spoken dialects are not officially written, it is very costly to obtain adequate corpora to use for training dialect NLP tools such as parsers. In this paper, we address the problem of parsing transcribed spoken Levantine Arabic (LA).We do not assume the existence of any annotated LA corpus (except for development and testing), nor of a parallel corpus LAMSA. Instead, we use explicit knowledge about the relation between LA and MSA.", "phrases": ["arabic dialect", "levantine arabic", "msa treebank", "danlp"], "overall_score": 4.485398420174637, "scores": [2.8829750281186217, 1.3597224024988983, 0.8707839092924711, 0.5319842936076867], "rank_score": 1.4113664083794195} -{"id": "colmenares-etal-2015-heads", "title": "HEADS: Headline Generation as Sequence Prediction Using an Abstract Feature-Rich Space", "abstract": "Automatic headline generation is a sub-task of document summarization with many reported applications. In this study we present a sequence-prediction technique for learning how editors title their news stories. The introduced technique models the problem as a discrete optimization task in a feature-rich space. In this space the global optimum can be found in polynomial time by means of dynamic programming. We train and test our model on an extensive corpus of financial news, and compare it against a number of baselines by using standard metrics from the document summarization domain, as well as some new ones proposed in this work. We also assess the readability and informativeness of the generated titles through human evaluation. The obtained results are very appealing and substantiate the soundness of the approach.", "phrases": ["headline generation", "feature-rich space", "optimization task"], "overall_score": 2.7462636122780886, "scores": [3.1300811519784864, 0.5693075391333154, 0.5345122630684186], "rank_score": 1.4113003180600734} -{"id": "lee-2004-morphological", "title": "Morphological Analysis for Statistical Machine Translation", "abstract": "We present a novel morphological analysis technique which induces a morphological and syntactic symmetry between two languages with highly asymmetrical morphological structures to improve statistical machine translation qualities. The technique pre-supposes fine-grained segmentation of a word in the morphologically rich language into the sequence of prefix(es)-stem-suffix(es) and part-of-speech tagging of the parallel corpus. The algorithm identifies morphemes to be merged or deleted in the morphologically rich language to induce the desired morphological and syntactic symmetry. The technique improves Arabic-to-English translation qualities significantly when applied to IBM Model 1 and Phrase Translation Models trained on the training corpus size ranging from 3,500 to 3.3 million sentence pairs.", "phrases": ["machine translation", "symmetry", "morphological analysis", "arabic", "pos"], "overall_score": 4.8903922612242, "scores": [2.5229579263079374, 1.592868401121169, 1.060032609661196, 1.0068337351165375, 0.8726519910630773], "rank_score": 1.4110689326539834} -{"id": "khayrallah-etal-2018-regularized", "title": "Regularized Training Objective for Continued Training for Domain Adaptation in Neural Machine Translation", "abstract": "Supervised domain adaptation\u2014where a large generic corpus and a smaller in-domain corpus are both available for training\u2014is a challenge for neural machine translation (NMT). Standard practice is to train a generic model and use it to initialize a second model, then continue training the second model on in-domain data to produce an in-domain model. We add an auxiliary term to the training objective during continued training that minimizes the cross entropy between the in-domain model's output word distribution and that of the out-of-domain model to prevent the model's output from differing too much from the original out-of-domain model. We perform experiments on EMEA (descriptions of medicines) and TED (rehearsed presentations), initialized from a general domain (WMT) model. Our method shows improvements over standard continued training by up to 1.5 BLEU.", "phrases": ["continued training", "domain adaptation", "neural machine translation", "model parameter"], "overall_score": 3.6192734252957353, "scores": [1.9109104002298496, 1.770753453095695, 1.43830476764347, 0.524233927930641], "rank_score": 1.411050637224914} -{"id": "vaswani-etal-2018-tensor2tensor", "title": "Tensor2Tensor for Neural Machine Translation", "abstract": "Tensor2Tensor is a library for deep learning models that is well-suited for neural machine translation and includes the reference implementation of the state-of-the-art Transformer model.", "phrases": ["neural machine translation", "implementation", "tensor2tensor"], "overall_score": 3.2489437006829562, "scores": [2.4055063042127807, 0.9793604802177807, 0.848128179232253], "rank_score": 1.410998321220938} -{"id": "nairn-etal-2006-computing", "title": "Computing relative polarity for textual inference", "abstract": "Semantic relations between main and complement sentences are of great signi(cid:12)-cance in any system of automatic data processing that depends on natural language. In this paper we present a strategy for detecting author commitment to the truth/falsity of complement clauses based on their syntactic type and on the meaning of their embedding predicate. We show that the implications of a predicate at an arbitrary depth of embedding about its complement clause depend on a globally determined notion of relative polarity. We, moreover, observe that di(cid:11)erent classes of complement-taking verbs have a di(cid:11)erent e(cid:11)ect on the polarity of their complement clauses and that this e(cid:11)ect depends recursively on their own embedding. A polarity propagation algorithm is presented as part of a general strategy of canonicalization of linguistically-based representations, with a view to minimizing the demands on the entailment and contradiction detection process.", "phrases": ["polarity", "author commitment", "notion"], "overall_score": 3.50525121154092, "scores": [3.143898999180704, 0.5651859342210439, 0.5227656415564345], "rank_score": 1.410616858319394} -{"id": "xiao-guo-2014-distributed", "title": "Distributed Word Representation Learning for Cross-Lingual Dependency Parsing", "abstract": "This paper proposes to learn languageindependent word representations to address cross-lingual dependency parsing, which aims to predict the dependency parsing trees for sentences in the target language by training a dependency parser with labeled sentences from a source language. We first combine all sentences from both languages to induce real-valued distributed representation of words under a deep neural network architecture, which is expected to capture semantic similarities of words not only within the same language but also across different languages. We then use the induced interlingual word representation as augmenting features to train a delexicalized dependency parser on labeled sentences in the source language and apply it to the target sentences. To investigate the effectiveness of the proposed technique, extensive experiments are conducted on cross-lingual dependency parsing tasks with nine different languages. The experimental results demonstrate the superior cross-lingual generalizability of the word representation induced by the proposed approach, comparing to alternative comparison methods.", "phrases": ["dependency parsing", "pseudo-cross-lingual method", "word pair"], "overall_score": 3.9960213528616646, "scores": [3.0770930870209683, 0.590355318335126, 0.5638122174045221], "rank_score": 1.4104202075868721} -{"id": "lee-etal-2011-discriminative", "title": "A Discriminative Model for Joint Morphological Disambiguation and Dependency Parsing", "abstract": "Most previous studies of morphological disambiguation and dependency parsing have been pursued independently. Morphological taggers operate on n-grams and do not take into account syntactic relations; parsers use the \"pipeline\" approach, assuming that morphological information has been separately obtained. \n \nHowever, in morphologically-rich languages, there is often considerable interaction between morphology and syntax, such that neither can be disambiguated without the other. In this paper, we propose a discriminative model that jointly infers morphological properties and syntactic structures. In evaluations on various highly-inflected languages, this joint model outperforms both a baseline tagger in morphological disambiguation, and a pipeline parser in head selection.", "phrases": ["dependency parsing", "morphological property", "highly-inflected language", "joint tagging"], "overall_score": 3.909095779225552, "scores": [4.023994046606432, 0.5566997934226318, 0.5364232374332727, 0.5225160175863466], "rank_score": 1.4099082737621704} -{"id": "liu-seneff-2009-review", "title": "Review Sentiment Scoring via a Parse-and-Paraphrase Paradigm", "abstract": "This paper presents a parse-and-paraphrase paradigm to assess the degrees of sentiment for product reviews. Sentiment identification has been well studied; however, most previous work provides binary polarities only (positive and negative), and the polarity of sentiment is simply reversed when a negation is detected. The extraction of lexical features such as unigram/bigram also complicates the sentiment classification task, as linguistic structure such as implicit long-distance dependency is often disregarded. In this paper, we propose an approach to extracting adverb-adjective-noun phrases based on clause structure obtained by parsing sentences into a hierarchical representation. We also propose a robust general solution for modeling the contribution of adverbials and negation to the score for degree of sentiment. In an application involving extracting aspect-based pros and cons from restaurant reviews, we obtained a 45% relative improvement in recall through the use of parsing methods, while also improving precision.", "phrases": ["parse-and-paraphrase paradigm", "product review", "polarity"], "overall_score": 3.615524674116572, "scores": [2.455770705816291, 1.2288720938493314, 0.5441245211380469], "rank_score": 1.4095891069345565} -{"id": "qin-etal-2019-stack", "title": "A Stack-Propagation Framework with Token-Level Intent Detection for Spoken Language Understanding", "abstract": "Intent detection and slot filling are two main tasks for building a spoken language understanding (SLU) system. The two tasks are closely tied and the slots often highly depend on the intent. In this paper, we propose a novel framework for SLU to better incorporate the intent information, which further guiding the slot filling. In our framework, we adopt a joint model with Stack-Propagation which can directly use the intent information as input for slot filling, thus to capture the intent semantic knowledge. In addition, to further alleviate the error propagation, we perform the token-level intent detection for the Stack-Propagation framework. Experiments on two publicly datasets show that our model achieves the state-of-the-art performance and outperforms other previous methods by a large margin. Finally, we use the Bidirectional Encoder Representation from Transformer (BERT) model in our framework, which further boost our performance in SLU task.", "phrases": ["stack-propagation framework", "intent detection", "spoken language understanding", "slu task"], "overall_score": 4.222705106069386, "scores": [3.397744646247904, 0.8999649365161307, 0.7967471601060025, 0.543837631226049], "rank_score": 1.4095735935240215} -{"id": "petroni-etal-2021-kilt", "title": "KILT: a Benchmark for Knowledge Intensive Language Tasks", "abstract": "Challenging problems such as open-domain question answering, fact checking, slot filling and entity linking require access to large, external knowledge sources. While some models do well on individual tasks, developing general models is difficult as each task might require computationally expensive indexing of custom knowledge sources, in addition to dedicated infrastructure. To catalyze research on models that condition on specific information in large textual resources, we present a benchmark for knowledge-intensive language tasks (KILT). All tasks in KILT are grounded in the same snapshot of Wikipedia, reducing engineering turnaround through the re-use of components, as well as accelerating research into task-agnostic memory architectures. We test both task-specific and general baselines, evaluating downstream performance in addition to the ability of the models to provide provenance. We find that a shared dense vector index coupled with a seq2seq model is a strong baseline, outperforming more tailor-made approaches for fact checking, open-domain question answering and dialogue, and yielding competitive results on entity linking and slot filling, by generating disambiguated text. KILT data and code are available at .", "phrases": ["question answering", "knowledge source", "kilt", "knowledge-intensive task", "genre"], "overall_score": 4.356355069233448, "scores": [3.164509345012405, 1.2675438650448623, 1.1584127958228025, 0.8805529659045598, 0.575721949713247], "rank_score": 1.4093481842995754} -{"id": "yu-chen-2012-detecting", "title": "Detecting Word Ordering Errors in Chinese Sentences for Learning Chinese as a Foreign Language", "abstract": "Automatic detection of sentence errors is an important NLP task and is valuable to assist foreign language learners. In this paper, we investigate the problem of word ordering errors in Chinese sentences and propose classifiers to detect this type of errors. Word n-gram features in Google Chinese Web 5-gram corpus and ClueWeb09 corpus, and POS features in the Chinese POStagged ClueWeb09 corpus are adopted in the classifiers. The experimental results show that integrating syntactic features, web corpus features and perturbation features are useful for word ordering error detection, and the proposed classifier achieves 71.64% accuracy in the experimental datasets.", "phrases": ["chinese", "syntactic feature", "word-ordering error"], "overall_score": 4.290176321465963, "scores": [1.84234418841962, 1.3446816376394226, 1.0404118846717008], "rank_score": 1.4091459035769145} -{"id": "beinborn-etal-2014-predicting", "title": "Predicting the Difficulty of Language Proficiency Tests", "abstract": "Language proficiency tests are used to evaluate and compare the progress of language learners. We present an approach for automatic difficulty prediction of C-tests that performs on par with human experts. On the basis of detailed analysis of newly collected data, we develop a model for C-test difficulty introducing four dimensions: solution difficulty, candidate ambiguity, inter-gap dependency, and paragraph difficulty. We show that cues from all four dimensions contribute to C-test difficulty.", "phrases": ["difficulty", "language proficiency test", "c-tests"], "overall_score": 2.9300221220090976, "scores": [2.746148640294931, 0.8965556605810641, 0.584424084241487], "rank_score": 1.4090427950391609} -{"id": "muis-lu-2017-labeling", "title": "Labeling Gaps Between Words: Recognizing Overlapping Mentions with Mention Separators", "abstract": "In this paper, we propose a new model that is capable of recognizing overlapping mentions. We introduce a novel notion of mention separators that can be effectively used to capture how mentions overlap with one another. On top of a novel multigraph representation that we introduce, we show that efficient and exact inference can still be performed. We present some theoretical analysis on the differences between our model and a recently proposed model for recognizing overlapping mentions, and discuss the possible implications of the differences. Through extensive empirical analysis on standard datasets, we demonstrate the effectiveness of our approach.", "phrases": ["gap", "mention separator", "multigraph representation", "ambiguity issue", "spurious structure issue"], "overall_score": 4.350299770790739, "scores": [4.334476708935336, 1.0812814679531122, 0.5572886531455155, 0.5424523462679144, 0.5214468320977821], "rank_score": 1.407389201679932} -{"id": "sun-etal-2018-stance", "title": "Stance Detection with Hierarchical Attention Network", "abstract": "Stance detection aims to assign a stance label (for or against) to a post toward a specific target. Recently, there is a growing interest in using neural models to detect stance of documents. Most of these works model the sequence of words to learn document representation. However, much linguistic information, such as polarity and arguments of the document, is correlated with the stance of the document, and can inspire us to explore the stance. Hence, we present a neural model to fully employ various linguistic information to construct the document representation. In addition, since the influences of different linguistic information are different, we propose a hierarchical attention network to weigh the importance of various linguistic information, and learn the mutual attention between the document and the linguistic information. The experimental results on two datasets demonstrate the effectiveness of the proposed hierarchical attention neural model.", "phrases": ["hierarchical attention network", "document representation", "linguistic information", "stance detection"], "overall_score": 3.374742296597772, "scores": [2.543137773273749, 2.0050283778327636, 0.5462911927473356, 0.53505005770904], "rank_score": 1.407376850390722} -{"id": "baly-etal-2018-predicting", "title": "Predicting Factuality of Reporting and Bias of News Media Sources", "abstract": "We present a study on predicting the factuality of reporting and bias of news media. While previous work has focused on studying the veracity of claims or documents, here we are interested in characterizing entire news media. This is an under-studied, but arguably important research problem, both in its own right and as a prior for fact-checking systems. We experiment with a large list of news websites and with a rich set of features derived from (i) a sample of articles from the target news media, (ii) its Wikipedia page, (iii) its Twitter account, (iv) the structure of its URL, and (v) information about the Web traffic it attracts. The experimental results show sizable performance gains over the baseline, and reveal the importance of each feature type.", "phrases": ["factuality", "wikipedia page", "news medium", "political bias", "article level"], "overall_score": 4.067129052836868, "scores": [3.469843584627271, 1.5525213825771307, 0.9159589695836805, 0.5581870019713335, 0.5391394783987531], "rank_score": 1.4071300834316338} -{"id": "lin-etal-2008-mining", "title": "Mining Parenthetical Translations from the Web by Word Alignment", "abstract": "Documents in languages such as Chinese, Japanese and Korean sometimes annotate terms with their translations in English inside a pair of parentheses. We present a method to extract such translations from a large collection of web documents by building a partially parallel corpus and use a word alignment algorithm to identify the terms being translated. The method is able to generalize across the translations for different terms and can reliably extract translations that occurred only once in the entire web. Our experiment on Chinese web pages produced more than 26 million pairs of translations, which is over two orders of magnitude more than previous results. We show that the addition of the extracted translation pairs as training data provides significant increase in the BLEU score for a statistical machine translation system.", "phrases": ["web", "word alignment", "different method"], "overall_score": 3.373908341334241, "scores": [1.8348469667051912, 1.832889037134456, 0.5533511882588451], "rank_score": 1.407029064032831} -{"id": "quirk-corston-oliver-2006-impact", "title": "The impact of parse quality on syntactically-informed statistical machine translation", "abstract": "We investigate the impact of parse quality on a syntactically-informed statistical machine translation system applied to technical text. We vary parse quality by varying the amount of data used to train the parser. As the amount of data increases, parse quality improves, leading to improvements in machine translation output and results that significantly outperform a state-of-the-art phrasal baseline.", "phrases": ["parse quality", "syntax-based model", "translation mistake"], "overall_score": 3.7132075417853727, "scores": [2.446510077936901, 0.9031316231856132, 0.8714195469027933], "rank_score": 1.4070204160084359} -{"id": "kaji-kitsuregawa-2007-building", "title": "Building Lexicon for Sentiment Analysis from Massive Collection of HTML Documents", "abstract": "Recognizing polarity requires a list of polar words and phrases. For the purpose of building such lexicon automatically, a lot of studies have investigated (semi-) unsupervised method of learning polarity of words and phrases. In this paper, we explore to use structural clues that can extract polar sentences from Japanese HTML documents, and build lexicon from the extracted polar sentences. The key idea is to develop the structural clues so that it achieves extremely high precision at the cost of recall. In order to compensate for the low recall, we used massive collection of HTML documents. Thus, we could prepare enough polar sentence corpus.", "phrases": ["sentiment analysis", "massive collection", "html document"], "overall_score": 4.066177527310898, "scores": [2.5365997783565994, 0.8942706840663885, 0.7895321721542771], "rank_score": 1.4068008781924217} -{"id": "zhang-etal-2020-spelling", "title": "Spelling Error Correction with Soft-Masked BERT", "abstract": "Spelling error correction is an important yet challenging task because a satisfactory solution of it essentially needs human-level language understanding ability. Without loss of generality we consider Chinese spelling error correction (CSC) in this paper. A state-of-the-art method for the task selects a character from a list of candidates for correction (including non-correction) at each position of the sentence on the basis of BERT, the language representation model. The accuracy of the method can be sub-optimal, however, because BERT does not have sufficient capability to detect whether there is an error at each position, apparently due to the way of pre-training it using mask language modeling. In this work, we propose a novel neural architecture to address the aforementioned issue, which consists of a network for error detection and a network for error correction based on BERT, with the former being connected to the latter with what we call soft-masking technique. Our method of using `Soft-Masked BERT' is general, and it may be employed in other language detection-correction problems. Experimental results on two datasets, including one large dataset which we create and plan to release, demonstrate that the performance of our proposed method is significantly better than the baselines including the one solely based on BERT.", "phrases": ["soft-masked bert", "detection", "spelling error correction", "correction network"], "overall_score": 4.281543525014734, "scores": [2.761835877256327, 0.9406362914280485, 1.0823078100029178, 0.8404615658832342], "rank_score": 1.4063103861426318} -{"id": "silberer-lapata-2014-learning", "title": "Learning Grounded Meaning Representations with Autoencoders", "abstract": "In this paper we address the problem of grounding distributional representations of lexical meaning. We introduce a new model which uses stacked autoencoders to learn higher-level embeddings from textual and visual input. The two modalities are encoded as vectors of attributes and are obtained automatically from text and images, respectively. We evaluate our model on its ability to simulate similarity judgments and concept categorization. On both tasks, our approach outperforms baselines and related models.", "phrases": ["autoencoder", "attribute", "visual representation"], "overall_score": 4.210588115794516, "scores": [2.815088525010902, 0.8702074677674203, 0.5312905356292497], "rank_score": 1.405528842802524} -{"id": "haffari-etal-2009-active", "title": "Active Learning for Statistical Phrase-based Machine Translation", "abstract": "Statistical machine translation (SMT) models need large bilingual corpora for training, which are unavailable for some language pairs. This paper provides the first serious experimental study of active learning for SMT. We use active learning to improve the quality of a phrase-based SMT system, and show significant improvements in translation compared to a random sentence selection baseline, when test and training data are taken from the same or different domains. Experimental results are shown in a simulated setting using three language pairs, and in a realistic situation for Bangla-English, a language pair with limited translation resources.", "phrases": ["machine translation", "active learning", "seed corpus", "query selection strategy"], "overall_score": 3.6039193346489027, "scores": [2.9633203352890862, 0.9529841877577544, 0.8719576812266451, 0.8319958708654439], "rank_score": 1.4050645187847324} -{"id": "huang-etal-2018-zero", "title": "Zero-Shot Transfer Learning for Event Extraction", "abstract": "Most previous supervised event extraction methods have relied on features derived from manual annotations, and thus cannot be applied to new event types without extra annotation effort. We take a fresh look at event extraction and model it as a generic grounding problem: mapping each event mention to a specific type in a target event ontology. We design a transferable architecture of structural and compositional neural networks to jointly represent and map event mentions and types into a shared semantic space. Based on this new framework, we can select, for each event mention, the event type which is semantically closest in this space as its type. By leveraging manual annotations available for a small set of existing event types, our framework can be applied to new unseen event types without additional manual annotations. When tested on 23 unseen event types, our zero-shot framework, without manual annotations, achieved performance comparable to a supervised model trained from 3,000 sentences annotated with 500 event mentions.", "phrases": ["event extraction", "supervised model", "zero-shot learning"], "overall_score": 4.136097412607671, "scores": [3.045379021651357, 0.6234126876966549, 0.5453530990921193], "rank_score": 1.4047149361467104} -{"id": "peng-etal-2019-palm", "title": "PaLM: A Hybrid Parser and Language Model", "abstract": "We present PaLM, a hybrid parser and neural language model. Building on an RNN language model, PaLM adds an attention layer over text spans in the left context. An unsupervised constituency parser can be derived from its attention weights, using a greedy decoding algorithm. We evaluate PaLM on language modeling, and empirically show that it outperforms strong baselines. If syntactic annotations are available, the attention component can be trained in a supervised manner, providing syntactically-informed representations of the context, and further improving language modeling performance.", "phrases": ["language model", "attention component", "palm"], "overall_score": 2.5165227689895486, "scores": [1.8544410018948796, 1.83679611568107, 0.5222571804177598], "rank_score": 1.4044980993312366} -{"id": "ramanathan-etal-2008-simple", "title": "Simple Syntactic and Morphological Processing Can Help English-Hindi Statistical Machine Translation", "abstract": "In this paper, we report our work on incorporating syntactic and morphological information for English to Hindi statistical machine translation. Two simple and computationally inexpensive ideas have proven to be surprisingly effective: (i) reordering the English source sentence as per Hindi syntax, and (ii) using the suffixes of Hindi words. The former is done by applying simple transformation rules on the English parse tree. The latter, by using a simple suffix separation program. With only a small amount of bilingual training data and limited tools for Hindi, we achieve reasonable performance and substantial improvements over the baseline phrase-based system. Our approach eschews the use of parsing or other sophisticated linguistic tools for the target language (Hindi) making it a useful framework for statistical machine translation from English to Indian languages in general, since such tools are not widely available for Indian languages currently.", "phrases": ["statistical machine translation", "hindi", "indian language"], "overall_score": 3.2320444822570122, "scores": [2.005552106029669, 1.2626777211300355, 0.9427474245705145], "rank_score": 1.4036590839100729} -{"id": "boratko-etal-2018-systematic", "title": "A Systematic Classification of Knowledge, Reasoning, and Context within the ARC Dataset", "abstract": "The recent work of Clark et al. (2018) introduces the AI2 Reasoning Challenge (ARC) and the associated ARC dataset that partitions open domain, complex science questions into easy and challenge sets. That paper includes an analysis of 100 questions with respect to the types of knowledge and reasoning required to answer them; however, it does not include clear definitions of these types, nor does it offer information about the quality of the labels. We propose a comprehensive set of definitions of knowledge and reasoning types necessary for answering the questions in the ARC dataset. Using ten annotators and a sophisticated annotation interface, we analyze the distribution of labels across the challenge set and statistics related to them. Additionally, we demonstrate that although naive information retrieval methods return sentences that are irrelevant to answering the query, sufficient supporting text is often present in the (ARC) corpus. Evaluating with human-selected relevant sentences improves the performance of a neural machine comprehension model by 42 points.", "phrases": ["reasoning", "arc dataset", "definition"], "overall_score": 2.2588318666937157, "scores": [1.8482171621595653, 1.8015067361478216, 0.5607495519125294], "rank_score": 1.4034911500733056} -{"id": "voita-etal-2019-analyzing", "title": "Analyzing Multi-Head Self-Attention: Specialized Heads Do the Heavy Lifting, the Rest Can Be Pruned", "abstract": "Multi-head self-attention is a key component of the Transformer, a state-of-the-art architecture for neural machine translation. In this work we evaluate the contribution made by individual attention heads to the overall performance of the model and analyze the roles played by them in the encoder. We find that the most important and confident heads play consistent and often linguistically-interpretable roles. When pruning heads using a method based on stochastic gates and a differentiable relaxation of the L0 penalty, we observe that specialized heads are last to be pruned. Our novel pruning method removes the vast majority of heads without seriously affecting performance. For example, on the English-Russian WMT dataset, pruning 38 out of 48 encoder heads results in a drop of only 0.15 BLEU.", "phrases": ["self-attention", "head", "heavy lifting", "encoder-decoder attention", "downstream task"], "overall_score": 6.094809549926257, "scores": [2.443629775934476, 0.8014882803164318, 2.691789544078565, 0.5399736653534115, 0.538637338423174], "rank_score": 1.4031037208212116} -{"id": "li-etal-2010-topic", "title": "Topic Models for Word Sense Disambiguation and Token-Based Idiom Detection", "abstract": "This paper presents a probabilistic model for sense disambiguation which chooses the best sense based on the conditional probability of sense paraphrases given a context. We use a topic model to decompose this conditional probability into two conditional probabilities with latent variables. We propose three different instantiations of the model for solving sense disambiguation problems with different degrees of resource availability. The proposed models are tested on three different tasks: coarse-grained word sense disambiguation, fine-grained word sense disambiguation, and detection of literal vs. non-literal usages of potentially idiomatic expressions. In all three cases, we outperform state-of-the-art systems either quantitatively or statistically significantly.", "phrases": ["word sense disambiguation", "probability", "topic model"], "overall_score": 3.230237006457832, "scores": [2.484243042249581, 0.8937395487227597, 0.8306397304606046], "rank_score": 1.4028741071443152} -{"id": "galley-etal-2003-discourse", "title": "Discourse Segmentation of Multi-Party Conversation", "abstract": "We present a domain-independent topic segmentation algorithm for multi-party speech. Our feature-based algorithm combines knowledge about content using a text-based algorithm as a feature and about form using linguistic and acoustic cues about topic shifts extracted from speech. This segmentation algorithm uses automatically induced decision rules to combine the different features. The embedded text-based algorithm builds on lexical cohesion and has performance comparable to state-of-the-art algorithms based on lexical information. A significant error reduction is obtained by combining the two knowledge sources.", "phrases": ["conversation", "topic segmentation", "lexical cohesion", "annotator"], "overall_score": 4.8171836758313535, "scores": [1.771455636118564, 1.687872755671407, 1.3100026638657845, 0.8418531322638831], "rank_score": 1.4027960469799097} -{"id": "niu-etal-2017-study", "title": "A Study of Style in Machine Translation: Controlling the Formality of Machine Translation Output", "abstract": "Stylistic variations of language, such as formality, carry speakers' intention beyond literal meaning and should be conveyed adequately in translation. We propose to use lexical formality models to control the formality level of machine translation output. We demonstrate the effectiveness of our approach in empirical evaluations, as measured by automatic metrics and human assessments.", "phrases": ["style", "formality", "linguistic nuance", "fluency"], "overall_score": 3.4856417690142285, "scores": [2.4787578405952173, 2.01934132378124, 0.5616080732793661, 0.5511945148290931], "rank_score": 1.402725438121229} -{"id": "riesa-yarowsky-2006-minimally", "title": "Minimally Supervised Morphological Segmentation with Applications to Machine Translation", "abstract": "Inflected languages in a low-resource setting present a data sparsity problem for statistical machine translation. In this paper, we present a minimally supervised algorithm for morpheme segmentation on Arabic dialects which reduces unknown words at translation time by over 50%, total vocabulary size by over 40%, and yields a significant increase in BLEU score over a previous state-of-the-art phrase-based statistical MT system.", "phrases": ["machine translation", "da-to-english smt", "oov"], "overall_score": 3.082067968760464, "scores": [2.4656330423187134, 0.915942468020087, 0.8265532361050082], "rank_score": 1.402709582147936} -{"id": "gan-etal-2019-multi", "title": "Multi-step Reasoning via Recurrent Dual Attention for Visual Dialog", "abstract": "This paper presents a new model for visual dialog, Recurrent Dual Attention Network (ReDAN), using multi-step reasoning to answer a series of questions about an image. In each question-answering turn of a dialog, ReDAN infers the answer progressively through multiple reasoning steps. In each step of the reasoning process, the semantic representation of the question is updated based on the image and the previous dialog history, and the recurrently-refined representation is used for further reasoning in the subsequent step. On the VisDial v1.0 dataset, the proposed ReDAN model achieves a new state-of-the-art of 64.47% NDCG score. Visualization on the reasoning process further demonstrates that ReDAN can locate context-relevant visual and textual clues via iterative refinement, which can lead to the correct answer step-by-step.", "phrases": ["visual dialog", "attention network", "multi-step reasoning"], "overall_score": 3.484879227464675, "scores": [2.7955912953051887, 0.8825115854873792, 0.5291528256887844], "rank_score": 1.4024185688271176} -{"id": "jiao-etal-2019-higru", "title": "HiGRU: Hierarchical Gated Recurrent Units for Utterance-Level Emotion Recognition", "abstract": "In this paper, we address three challenges in utterance-level emotion recognition in dialogue systems: (1) the same word can deliver different emotions in different contexts; (2) some emotions are rarely seen in general dialogues; (3) long-range contextual information is hard to be effectively captured. We therefore propose a hierarchical Gated Recurrent Unit (HiGRU) framework with a lower-level GRU to model the word-level inputs and an upper-level GRU to capture the contexts of utterance-level embeddings. Moreover, we promote the framework to two variants, Hi-GRU with individual features fusion (HiGRU-f) and HiGRU with self-attention and features fusion (HiGRU-sf), so that the word/utterance-level individual inputs and the long-range contextual information can be sufficiently utilized. Experiments on three dialogue emotion datasets, IEMOCAP, Friends, and EmotionPush demonstrate that our proposed Hi-GRU models attain at least 8.7%, 7.5%, 6.0% improvement over the state-of-the-art methods on each dataset, respectively. Particularly, by utilizing only the textual feature in IEMOCAP, our HiGRU models gain at least 3.8% improvement over the state-of-the-art conversational memory network (CMN) with the trimodal features of text, video, and audio.", "phrases": ["gated recurrent unit", "emotion recognition", "higru"], "overall_score": 3.081385392958832, "scores": [1.9271819896512745, 1.691342559527802, 0.5886722363600444], "rank_score": 1.4023989285130403} -{"id": "sanderson-guenter-2006-short", "title": "Short Text Authorship Attribution via Sequence Kernels, Markov Chains and Author Unmasking: An Investigation", "abstract": "We present an investigation of recently proposed character and word sequence kernels for the task of authorship attribution based on relatively short texts. Performance is compared with two corresponding probabilistic approaches based on Markov chains. Several configurations of the sequence kernels are studied on a relatively large dataset (50 authors), where each author covered several topics. Utilising Moffat smoothing, the two probabilistic approaches obtain similar performance, which in turn is comparable to that of character sequence kernels and is better than that of word sequence kernels. The results further suggest that when using a realistic setup that takes into account the case of texts which are not written by any hypothesised authors, the amount of training material has more influence on discrimination performance than the amount of test material. Moreover, we show that the recently proposed author unmasking approach is less useful when dealing with short texts.", "phrases": ["authorship identification", "string kernel", "text analysis task"], "overall_score": 3.484829336694958, "scores": [1.5236733265094324, 1.3862514057577409, 1.2972707416460212], "rank_score": 1.402398491304398} -{"id": "aly-etal-2021-fact", "title": "The Fact Extraction and VERification Over Unstructured and Structured information (FEVEROUS) Shared Task", "abstract": "The Fact Extraction and VERification Over Unstructured and Structured information (FEVEROUS) shared task, asks participating systems to determine whether human-authored claims are Supported or Refuted based on evidence retrieved from Wikipedia (or NotEnoughInfo if the claim cannot be verified). Compared to the FEVER 2018 shared task, the main challenge is the addition of structured data (tables and lists) as a source of evidence. The claims in the FEVEROUS dataset can be verified using only structured evidence, only unstructured evidence, or a mixture of both. Submissions are evaluated using the FEVEROUS score that combines label accuracy and evidence retrieval. Unlike FEVER 2018, FEVEROUS requires partial evidence to be returned for NotEnoughInfo claims, and the claims are longer and thus more complex. The shared task received 13 entries, six of which were able to beat the baseline system. The winning team was \u201cBust a move!\u201d, achieving a FEVEROUS score of 27% (+9% compared to the baseline). In this paper we describe the shared task, present the full results and highlight commonalities and innovations among the participating systems.", "phrases": ["fact extraction", "structured information", "wikipedia"], "overall_score": 2.7284509592739803, "scores": [1.9065287112366378, 1.7777899293555892, 0.5221206350319371], "rank_score": 1.4021464252080547} -{"id": "li-etal-2020-sentence", "title": "On the Sentence Embeddings from Pre-trained Language Models", "abstract": "Pre-trained contextual representations like BERT have achieved great success in natural language processing. However, the sentence embeddings from the pre-trained language models without fine-tuning have been found to poorly capture semantic meaning of sentences. In this paper, we argue that the semantic information in the BERT embeddings is not fully exploited. We first reveal the theoretical connection between the masked language model pre-training objective and the semantic similarity task theoretically, and then analyze the BERT sentence embeddings empirically. We find that BERT always induces a non-smooth anisotropic semantic space of sentences, which harms its performance of semantic similarity. To address this issue, we propose to transform the anisotropic sentence embedding distribution to a smooth and isotropic Gaussian distribution through normalizing flows that are learned with an unsupervised objective. Experimental results show that our proposed BERT-flow method obtains significant performance gains over the state-of-the-art sentence embeddings on a variety of semantic textual similarity tasks. The code is available at .", "phrases": ["sentence embedding", "pre-trained language model", "anisotropic", "semantic space"], "overall_score": 4.9017776358336125, "scores": [1.7717809794471173, 1.5767620775536466, 1.178682080408862, 1.0804019586722065], "rank_score": 1.4019067740204583} -{"id": "bouamor-etal-2018-madar", "title": "The MADAR Arabic Dialect Corpus and Lexicon", "abstract": "In this paper, we present two resources that were created as part of the Multi Arabic Dialect Applications and Resources (MADAR) project. The \ufb01rst is a large parallel corpus of 25 Arabic city dialects in the travel domain. The second is a lexicon of 1,045 concepts with an average of 45 words from 25 cities per concept. These resources are the \ufb01rst of their kind in terms of the breadth of their coverage and the \ufb01ne location granularity. The focus on cities, as opposed to regions in studying Arabic dialects, opens new avenues to many areas of research from dialectology to dialect identi\ufb01cation and machine translation.", "phrases": ["dialect", "arabic city dialect", "madar corpus"], "overall_score": 4.814043247844997, "scores": [1.8899861513658283, 1.1886969511214078, 1.126961497665047], "rank_score": 1.4018815333840944} -{"id": "jain-etal-2019-entity", "title": "Entity Projection via Machine Translation for Cross-Lingual NER", "abstract": "Although over 100 languages are supported by strong off-the-shelf machine translation systems, only a subset of them possess large annotated corpora for named entity recognition. Motivated by this fact, we leverage machine translation to improve annotation-projection approaches to cross-lingual named entity recognition. We propose a system that improves over prior entity-projection methods by: (a) leveraging machine translation systems twice: first for translating sentences and subsequently for translating entities; (b) matching entities based on orthographic and phonetic similarity; and (c) identifying matches based on distributional statistics derived from the dataset. Our approach improves upon current state-of-the-art methods for cross-lingual named entity recognition on 5 diverse languages by an average of 4.1 points. Further, our method achieves state-of-the-art F_1 scores for Armenian, outperforming even a monolingual model trained on Armenian source data.", "phrases": ["machine translation", "cross-lingual ner", "entity recognition"], "overall_score": 3.483331526652798, "scores": [2.1579151871245275, 1.5116644330698275, 0.5358075644107221], "rank_score": 1.4017957282016924} -{"id": "suzuki-gao-2012-unified", "title": "A Unified Approach to Transliteration-based Text Input with Online Spelling Correction", "abstract": "This paper presents an integrated, end-to-end approach to online spelling correction for text input. Online spelling correction refers to the spelling correction as you type, as opposed to post-editing. The online scenario is particularly important for languages that routinely use transliteration-based text input methods, such as Chinese and Japanese, because the desired target characters cannot be input at all unless they are in the list of candidates provided by an input method, and spelling errors prevent them from appearing in the list. For example, a user might type suesheng by mistake to mean xuesheng 'student' in Chinese; existing input methods fail to convert this misspelled input to the desired target Chinese characters. In this paper, we propose a unified approach to the problem of spelling correction and transliteration-based character conversion using an approach inspired by the phrase-based statistical machine translation framework. At the phrase (substring) level, k most probable pinyin (Romanized Chinese) corrections are generated using a monotone decoder; at the sentence level, input pinyin strings are directly transliterated into target Chinese characters by a decoder using a log-linear model that refer to the features of both levels. A new method of automatically deriving parallel training data from user keystroke logs is also presented. Experiments on Chinese pinyin conversion show that our integrated method reduces the character error rate by 20% (from 8.9% to 7.12%) over the previous state-of-the art based on a noisy channel model.", "phrases": ["text input", "online spelling correction", "machine translation framework"], "overall_score": 2.2548554607543707, "scores": [2.409751481666352, 0.8717095853676644, 0.9216003434135845], "rank_score": 1.4010204701492004} -{"id": "jakob-gurevych-2010-extracting", "title": "Extracting Opinion Targets in a Single and Cross-Domain Setting with Conditional Random Fields", "abstract": "In this paper, we focus on the opinion target extraction as part of the opinion mining task. We model the problem as an information extraction task, which we address based on Conditional Random Fields (CRF). As a baseline we employ the supervised algorithm by Zhuang et al. (2006), which represents the state-of-the-art on the employed data. We evaluate the algorithms comprehensively on datasets from four different domains annotated with individual opinion target instances on a sentence level. Furthermore, we investigate the performance of our CRF-based approach and the baseline in a single- and cross-domain opinion target extraction setting. Our CRF-based approach improves the performance by 0.077, 0.126, 0.071 and 0.178 regarding F-Measure in the single-domain extraction in the four domains. In the cross-domain setting our approach improves the performance by 0.409, 0.242, 0.294 and 0.343 regarding F-Measure over the baseline.", "phrases": ["opinion target", "conditional random fields", "target extraction", "crf", "sequence labeling problem"], "overall_score": 4.810834009792991, "scores": [3.389569458243397, 0.9180955815789169, 1.5921951352793031, 0.5708828308174405, 0.5339919032697993], "rank_score": 1.4009469818377713} -{"id": "ganjigunte-ashok-etal-2013-success", "title": "Success with Style: Using Writing Style to Predict the Success of Novels", "abstract": "Predicting the success of literary works is a curious question among publishers and aspiring writers alike. We examine the quantitative connection, if any, between writing style and successful literature. Based on novels over several different genres, we probe the predictive power of statistical stylometry in discriminating successful literary works, and identify characteristic stylistic elements that are more prominent in successful writings. Our study reports for the first time that statistical stylometry can be surprisingly effective in discriminating highly successful literature from less successful counterpart, achieving accuracy up to 84%. Closer analyses lead to several new insights into characteristics of the writing style in successful literature, including findings that are contrary to the conventional wisdom with respect to good writing style and readability.", "phrases": ["writing style", "novel", "successful literature"], "overall_score": 3.8833768973846894, "scores": [2.715909059941061, 0.9599284989790381, 0.5260588848996456], "rank_score": 1.400632147939915} -{"id": "durrett-klein-2013-easy", "title": "Easy Victories and Uphill Battles in Coreference Resolution", "abstract": "Classical coreference systems encode various syntactic, discourse, and semantic phenomena explicitly, using heterogenous features computed from hand-crafted heuristics. In contrast, we present a state-of-the-art coreference system that captures such phenomena implicitly, with a small number of homogeneous feature templates examining shallow properties of mentions. Surprisingly, our features are actually more effective than the corresponding hand-engineered ones at modeling these key linguistic phenomena, allowing us to win \u201ceasy victories\u201d without crafted heuristics. These features are successful on syntax and discourse; however, they do not model semantic compatibility well, nor do we see gains from experiments with shallow semantic features from the literature, suggesting that this approach to semantics is an \u201cuphill battle.\u201d Nonetheless, our final system 1 outperforms the Stanford system (Lee et al. (2011), the winner of the CoNLL 2011 shared task) by 3.5% absolute on the CoNLL metric and outperforms the IMS system (Bj\u00a8 orkelund and Farkas (2012), the best publicly available English coreference system) by 1.9% absolute.", "phrases": ["uphill battle", "coreference resolution", "mention", "semantic compatibility"], "overall_score": 5.0573807312464165, "scores": [2.4197846508227894, 2.107119442293309, 0.5497800616281776, 0.5256373087674414], "rank_score": 1.4005803658779292} -{"id": "rijhwani-etal-2017-estimating", "title": "Estimating Code-Switching on Twitter with a Novel Generalized Word-Level Language Detection Technique", "abstract": "Word-level language detection is necessary for analyzing code-switched text, where multiple languages could be mixed within a sentence. Existing models are restricted to code-switching between two specific languages and fail in real-world scenarios as text input rarely has a priori information on the languages used. We present a novel unsupervised word-level language detection technique for code-switched text for an arbitrarily large number of languages, which does not require any manually annotated training data. Our experiments with tweets in seven languages show a 74% relative error reduction in word-level labeling with respect to competitive baselines. We then use this system to conduct a large-scale quantitative analysis of code-switching patterns on Twitter, both global as well as region-specific, with 58M tweets.", "phrases": ["twitter", "language identification", "code-mixed text"], "overall_score": 3.9675492050977734, "scores": [2.770058419138507, 0.9060830457026059, 0.5249709011809602], "rank_score": 1.4003707886740244} -{"id": "scholak-etal-2021-picard", "title": "PICARD: Parsing Incrementally for Constrained Auto-Regressive Decoding from Language Models", "abstract": "Large pre-trained language models for textual data have an unconstrained output space; at each decoding step, they can produce any of 10,000s of sub-word tokens. When fine-tuned to target constrained formal languages like SQL, these models often generate invalid code, rendering it unusable. We propose PICARD (code available at ), a method for constraining auto-regressive decoders of language models through incremental parsing. PICARD helps to find valid output sequences by rejecting inadmissible tokens at each decoding step. On the challenging Spider and CoSQL text-to-SQL translation tasks, we show that PICARD transforms fine-tuned T5 models with passable performance into state-of-the-art solutions.", "phrases": ["auto-regressive decoder", "language model", "picard"], "overall_score": 2.724637878240208, "scores": [2.266291321824184, 0.9871995592185842, 0.9470697865136644], "rank_score": 1.4001868891854776} -{"id": "bos-etal-2004-wide", "title": "Wide-Coverage Semantic Representations from a CCG Parser", "abstract": "This paper shows how to construct semantic representations from the derivations produced by a wide-coverage CCG parser. Unlike the dependency structures returned by the parser itself, these can be used directly for semantic interpretation. We demonstrate that well-formed semantic representations can be produced for over 97% of the sentences in unseen WSJ text. We believe this is a major step towards widecoverage semantic interpretation, one of the key objectives of the field of NLP.", "phrases": ["ccg parser", "boxer", "logical form"], "overall_score": 4.389634512247345, "scores": [1.9698742849280233, 1.358225688027527, 0.8718451169892758], "rank_score": 1.3999816966482754} -{"id": "lee-etal-2010-emotion", "title": "Emotion Cause Events: Corpus Construction and Analysis", "abstract": "Emotion processing has always been a great challenge. Given the fact that an emotion is triggered by cause events and that cause events are an integral part of emotion, this paper constructs a Chinese emotion cause corpus as a first step towards automatic inference of cause-emotion correlation. The corpus focuses on five primary emotions, namely happiness, sadness, fear, anger, and surprise. It is annotated with emotion cause events based on our proposed annotation scheme. Corpus data shows that most emotions are expressed with causes, and that causes mostly occur before the corresponding emotion verbs. We also examine the correlations between emotions and cause events in terms of linguistic cues: causative verbs, perception verbs, epistemic markers, conjunctions, prepositions, and others. Results show that each group of linguistic cues serves as an indicator marking the cause events in different structures of emotional constructions. We believe that the emotion cause corpus will be the useful resource for automatic emotion cause detection as well as emotion detection and classification.", "phrases": ["cause", "linguistic cue", "emotion"], "overall_score": 3.791171335274159, "scores": [2.331518274149953, 1.2975766623364833, 0.5707954500334723], "rank_score": 1.3999634621733028} -{"id": "wu-ng-2013-grammatical", "title": "Grammatical Error Correction Using Integer Linear Programming", "abstract": "We propose a joint inference algorithm for grammatical error correction. Different from most previous work where different error types are corrected independently, our proposed inference process considers all possible errors in a uni ed framework. We use integer linear programming (ILP) to model the inference process, which can easily incorporate both the power of existing error classi ers and prior knowledge on grammatical error correction. Experimental results on the Helping Our Own shared task show that our method is competitive with state-of-the-art systems.", "phrases": ["integer linear programming", "inference process", "grammatical error correction"], "overall_score": 2.252941737225583, "scores": [2.706496560684276, 0.9625460624391797, 0.5304515974735717], "rank_score": 1.3998314068656759} -{"id": "rastogi-etal-2019-scaling", "title": "Scaling Multi-Domain Dialogue State Tracking via Query Reformulation", "abstract": "We present a novel approach to dialogue state tracking and referring expression resolution tasks. Successful contextual understanding of multi-turn spoken dialogues requires resolving referring expressions across turns and tracking the entities relevant to the conversation across turns. Tracking conversational state is particularly challenging in a multi-domain scenario when there exist multiple spoken language understanding (SLU) sub-systems, and each SLU sub-system operates on its domain-specific meaning representation. While previous approaches have addressed the disparate schema issue by learning candidate transformations of the meaning representation, in this paper, we instead model the reference resolution as a dialogue context-aware user query reformulation task \u2013 the dialog state is serialized to a sequence of natural language tokens representing the conversation. We develop our model for query reformulation using a pointer-generator network and a novel multi-task learning setup. In our experiments, we show a significant improvement in absolute F1 on an internal as well as a, soon to be released, public benchmark respectively.", "phrases": ["dialogue state tracking", "query reformulation", "conversation"], "overall_score": 3.356584529109317, "scores": [1.9320897642076191, 1.7339537875992783, 0.5333698677693617], "rank_score": 1.3998044731920862} -{"id": "gao-etal-2020-supert", "title": "SUPERT: Towards New Frontiers in Unsupervised Evaluation Metrics for Multi-Document Summarization", "abstract": "We study unsupervised multi-document summarization evaluation metrics, which require neither human-written reference summaries nor human annotations (e.g. preferences, ratings, etc.). We propose SUPERT, which rates the quality of a summary by measuring its semantic similarity with a pseudo reference summary, i.e. selected salient sentences from the source documents, using contextualized embeddings and soft token alignment techniques. Compared to the state-of-the-art unsupervised evaluation metrics, SUPERT correlates better with human ratings by 18- 39%. Furthermore, we use SUPERT as rewards to guide a neural-based reinforcement learning summarizer, yielding favorable performance compared to the state-of-the-art unsupervised summarizers. All source code is available at .", "phrases": ["evaluation metric", "summarization", "reference"], "overall_score": 3.5899560482368416, "scores": [1.8190212729133877, 1.3299995549566916, 1.0498410769003061], "rank_score": 1.399620634923462} -{"id": "tsarfaty-2006-integrated", "title": "Integrated Morphological and Syntactic Disambiguation for Modern Hebrew", "abstract": "Current parsing models are not immediately applicable for languages that exhibit strong interaction between morphology and syntax, e.g., Modern Hebrew (MH), Arabic and other Semitic languages. This work represents a first attempt at modeling morphological-syntactic interaction in a generative probabilistic framework to allow for MH parsing. We show that morphological information selected in tandem with syntactic categories is instrumental for parsing Semitic languages. We further show that redundant morphological information helps syntactic disambiguation.", "phrases": ["syntactic disambiguation", "modern hebrew", "morphological analysis"], "overall_score": 3.692636373199811, "scores": [1.9887495853478654, 1.66334359698308, 0.5455833877397972], "rank_score": 1.3992255233569144} -{"id": "kirov-etal-2018-unimorph", "title": "UniMorph 2.0: Universal Morphology", "abstract": "The Universal Morphology UniMorph project is a collaborative effort to improve how NLP handles complex morphology across the world's languages. The project releases annotated morphological data using a universal tagset, the UniMorph schema. Each inflected form is associated with a lemma, which typically carries its underlying lexical meaning, and a bundle of morphological features from our schema. Additional supporting data and tools are also released on a per-language basis when available. UniMorph is based at the Center for Language and Speech Processing (CLSP) at Johns Hopkins University in Baltimore, Maryland and is sponsored by the DARPA LORELEI program. This paper details advances made to the collection, annotation, and dissemination of project resources since the initial UniMorph release described at LREC 2016. lexical resources} }", "phrases": ["morphological data", "unimorph", "database", "wiktionary", "inflection table"], "overall_score": 4.3853170508500146, "scores": [4.151397265420902, 1.1007237130434386, 0.6082495808184918, 0.573374443261675, 0.55927866270646], "rank_score": 1.3986047330501936} -{"id": "seddah-etal-2012-french", "title": "The French Social Media Bank: a Treebank of Noisy User Generated Content", "abstract": "In recent years, statistical parsers have reached high performance levels on well-edited texts. Domain adaptation techniques have improved parsing results on text genres differing from the journalistic data most parsers are trained on. However, such corpora usually comply with standard linguistic, spelling and typographic conventions. In the meantime, the emergence of Web 2.0 communication media has caused the apparition of new types of online textual data. Although valuable, e.g., in terms of data mining and sentiment analysis, such user-generated content rarely complies with standard conventions: they are noisy. This prevents most NLP tools, especially treebank based parsers, from performing well on such data. For this reason, we have developed the French Social Media Bank, the first user-generated content treebank for French, a morphologically rich language (MRL). The first release of this resource contains 1,700 sentences from various Web 2.0 sources, including data specifically chosen for their high noisiness. We describe here how we created this treebank and expose the methodology we used for fully annotating it. We also provide baseline POS tagging and statistical constituency parsing results, which are lower by far than usual results on edited texts. This highlights the high difficulty of automatically processing such noisy data in a MRL.", "phrases": ["social media bank", "treebank", "user-generated content", "ugc", "online forum"], "overall_score": 3.6905275203823216, "scores": [2.7037310913632844, 2.2898035696522303, 0.8290991916308484, 0.5878118145127998, 0.5816864835288144], "rank_score": 1.3984264301375955} -{"id": "kapustin-kapustin-2019-modeling", "title": "Modeling language constructs with fuzzy sets: some approaches, examples and interpretations", "abstract": "We present and discuss a couple of approaches, including different types of projections, and some examples, discussing the use of fuzzy sets for modeling meaning of certain types of language constructs. We are mostly focusing on words other than adjectives and linguistic hedges as these categories are the most studied from before. We discuss logical and linguistic interpretations of membership functions. We argue that using fuzzy sets for modeling meaning of words and other natural language constructs, along with situations described with natural language is interesting both from purely linguistic perspective, and also as a knowledge representation for problems of computational linguistics and natural language processing.", "phrases": ["fuzzy set", "interpretation", "natural language construct"], "overall_score": 1.9384071481023362, "scores": [1.9371303370614534, 1.6950975819988356, 0.5625676506261492], "rank_score": 1.3982651898954794} -{"id": "park-etal-2019-thisiscompetition", "title": "ThisIsCompetition at SemEval-2019 Task 9: BERT is unstable for out-of-domain samples", "abstract": "This paper describes our system, Joint Encoders for Stable Suggestion Inference (JESSI), for the SemEval 2019 Task 9: Suggestion Mining from Online Reviews and Forums. JESSI is a combination of two sentence encoders: (a) one using multiple pre-trained word embeddings learned from log-bilinear regression (GloVe) and translation (CoVe) models, and (b) one on top of word encodings from a pre-trained deep bidirectional transformer (BERT). We include a domain adversarial training module when training for out-of-domain samples. Our experiments show that while BERT performs exceptionally well for in-domain samples, several runs of the model show that it is unstable for out-of-domain samples. The problem is mitigated tremendously by (1) combining BERT with a non-BERT encoder, and (2) using an RNN-based classifier on top of BERT. Our final models obtained second place with 77.78% F-Score on Subtask A (i.e. in-domain) and achieved an F-Score of 79.59% on Subtask B (i.e. out-of-domain), even without using any additional external data.", "phrases": ["bert", "out-of-domain sample", "stable suggestion inference"], "overall_score": 1.535138667720644, "scores": [1.951928048731448, 1.712983312462235, 0.5271189398192941], "rank_score": 1.3973434336709925} -{"id": "miller-etal-2017-parlai", "title": "ParlAI: A Dialog Research Software Platform", "abstract": "We introduce ParlAI (pronounced \u201cpar-lay\u201d), an open-source software platform for dialog research implemented in Python, available at . Its goal is to provide a unified framework for sharing, training and testing dialog models; integration of Amazon Mechanical Turk for data collection, human evaluation, and online/reinforcement learning; and a repository of machine learning models for comparing with others' models, and improving upon existing architectures. Over 20 tasks are supported in the first release, including popular datasets such as SQuAD, bAbI tasks, MCTest, WikiQA, QACNN, QADailyMail, CBT, bAbI Dialog, Ubuntu, OpenSubtitles and VQA. Several models are integrated, including neural models such as memory networks, seq2seq and attentive LSTMs.", "phrases": ["dialog research", "software platform", "parlai", "chatbot", "evaluation system"], "overall_score": 4.318700755961555, "scores": [4.001945530382689, 1.03320919263677, 0.8787652965152738, 0.5401749510174745, 0.5317371898398787], "rank_score": 1.3971664320784172} -{"id": "duong-etal-2016-attentional", "title": "An Attentional Model for Speech Translation Without Transcription", "abstract": "For many low-resource languages, spoken language resources are more likely to be annotated with translations than transcriptions. This bilingual speech data can be used for word-spotting, spoken document retrieval, and even for documentation of endangered languages. We experiment with the neural, attentional model applied to this data. On phoneto-word alignment and translation reranking tasks, we achieve large improvements relative to several baselines. On the more challenging speech-to-word alignment task, our model nearly matches GIZA++\u2019s performance on gold transcriptions, but without recourse to transcriptions or to a lexicon.", "phrases": ["attentional model", "transcription", "sequence-to-sequence model"], "overall_score": 3.583477813987668, "scores": [1.865489977403781, 1.800333422590414, 0.5254614730149266], "rank_score": 1.3970949576697071} -{"id": "wang-etal-2020-structure", "title": "Structure-Level Knowledge Distillation For Multilingual Sequence Labeling", "abstract": "Multilingual sequence labeling is a task of predicting label sequences using a single unified model for multiple languages. Compared with relying on multiple monolingual models, using a multilingual model has the benefit of a smaller model size, easier in online serving, and generalizability to low-resource languages. However, current multilingual models still underperform individual monolingual models significantly due to model capacity limitations. In this paper, we propose to reduce the gap between monolingual models and the unified multilingual model by distilling the structural knowledge of several monolingual models (teachers) to the unified multilingual model (student). We propose two novel KD methods based on structure-level information: (1) approximately minimizes the distance between the student's and the teachers' structure-level probability distributions, (2) aggregates the structure-level knowledge to local distributions and minimizes the distance between two local probability distributions. Our experiments on 4 multilingual tasks with 25 datasets show that our approaches outperform several strong baselines and have stronger zero-shot generalizability than both the baseline model and teacher models.", "phrases": ["knowledge distillation", "multilingual sequence labeling", "structural knowledge"], "overall_score": 2.9046830597670854, "scores": [1.957404079513442, 1.6629419644780892, 0.5702258016886241], "rank_score": 1.396857281893385} -{"id": "ma-2006-champollion", "title": "Champollion: A Robust Parallel Text Sentence Aligner", "abstract": "This paper describes Champollion, a lexicon-based sentence aligner designed for robust alignment of potential noisy parallel text. Champollion increases the robustness of the alignment by assigning greater weights to less frequent translated words. Experiments on a manually aligned Chinese \u0096 English parallel corpus show that Champollion achieves high precision and recall on noisy data. Champollion can be easily ported to new language pairs. It\u0092s freely available to the public.", "phrases": ["lexicon-based sentence aligner", "robust alignment", "champollion"], "overall_score": 3.471036632312935, "scores": [2.5837641343141, 1.039631051135205, 0.5671485107604495], "rank_score": 1.3968478987365849} -{"id": "jiang-de-marneffe-2019-evaluating", "title": "Evaluating BERT for natural language inference: A case study on the CommitmentBank", "abstract": "Natural language inference (NLI) datasets (e.g., MultiNLI) were collected by soliciting hypotheses for a given premise from annotators. Such data collection led to annotation artifacts: systems can identify the premise-hypothesis relationship without observing the premise (e.g., negation in hypothesis being indicative of contradiction). We address this problem by recasting the CommitmentBank for NLI, which contains items involving reasoning over the extent to which a speaker is committed to complements of clause-embedding verbs under entailment-canceling environments (conditional, negation, modal and question). Instead of being constructed to stand in certain relationships with the premise, hypotheses in the recast CommitmentBank are the complements of the clause-embedding verb in each premise, leading to no annotation artifacts in the hypothesis. A state-of-the-art BERT-based model performs well on the CommitmentBank with 85% F1. However analysis of model behavior shows that the BERT models still do not capture the full complexity of pragmatic reasoning, nor encode some of the linguistic generalizations, highlighting room for improvement.", "phrases": ["bert", "natural language inference", "reasoning"], "overall_score": 2.7180148977802108, "scores": [2.390675450887654, 1.2759830119660014, 0.5236915886629574], "rank_score": 1.3967833505055378} -{"id": "xing-etal-2018-adaptive", "title": "Adaptive Multi-Task Transfer Learning for Chinese Word Segmentation in Medical Text", "abstract": "Chinese word segmentation (CWS) trained from open source corpus faces dramatic performance drop when dealing with domain text, especially for a domain with lots of special terms and diverse writing styles, such as the biomedical domain. However, building domain-specific CWS requires extremely high annotation cost. In this paper, we propose an approach by exploiting domain-invariant knowledge from high resource to low resource domains. Extensive experiments show that our model achieves consistently higher accuracy than the single-task CWS and other transfer learning baselines, especially when there is a large disparity between source and target domains.", "phrases": ["chinese word segmentation", "medical text", "domain-invariant knowledge"], "overall_score": 2.2476041156281887, "scores": [2.328476032323037, 1.334345113452177, 0.5267237225240506], "rank_score": 1.396514956099755} -{"id": "luan-etal-2019-general", "title": "A general framework for information extraction using dynamic span graphs", "abstract": "We introduce a general framework for several information extraction tasks that share span representations using dynamically constructed span graphs. The graphs are dynamically constructed by selecting the most confident entity spans and linking these nodes with confidence-weighted relation types and coreferences. The dynamic span graph allow coreference and relation type confidences to propagate through the graph to iteratively refine the span representations. This is unlike previous multi-task frameworks for information extraction in which the only interaction between tasks is in the shared first-layer LSTM. Our framework significantly outperforms state-of-the-art on multiple information extraction tasks across multiple datasets reflecting different domains. We further observe that the span enumeration approach is good at detecting nested span entities, with significant F1 score improvement on the ACE dataset.", "phrases": ["information extraction", "span graph", "confidence-weighted relation type", "coreference", "entity recognition"], "overall_score": 4.839336001837313, "scores": [2.455054472591292, 2.324332654515658, 1.1188394228091068, 0.5470806357828273, 0.5363788653472292], "rank_score": 1.3963372102092229} -{"id": "kothur-etal-2018-document", "title": "Document-Level Adaptation for Neural Machine Translation", "abstract": "It is common practice to adapt machine translation systems to novel domains, but even a well-adapted system may be able to perform better on a particular document if it were to learn from a translator's corrections within the document itself. We focus on adaptation within a single document \u2013 appropriate for an interactive translation scenario where a model adapts to a human translator's input over the course of a document. We propose two methods: single-sentence adaptation (which performs online adaptation one sentence at a time) and dictionary adaptation (which specifically addresses the issue of translating novel words). Combining the two models results in improvements over both approaches individually, and over baseline systems, even on short documents. On WMT news test data, we observe an improvement of +1.8 BLEU points and +23.3% novel word translation accuracy and on EMEA data (descriptions of medications) we observe an improvement of +2.7 BLEU points and +49.2% novel word translation accuracy.", "phrases": ["adaptation", "neural machine translation", "novel word"], "overall_score": 2.9025932555776666, "scores": [2.328105521273095, 0.9480600636784896, 0.9113913105880668], "rank_score": 1.3958522985132171} -{"id": "mohammad-etal-2018-semeval", "title": "SemEval-2018 Task 1: Affect in Tweets", "abstract": "We present the SemEval-2018 Task 1: Affect in Tweets, which includes an array of subtasks on inferring the affectual state of a person from their tweet. For each task, we created labeled data from English, Arabic, and Spanish tweets. The individual tasks are: 1. emotion intensity regression, 2. emotion intensity ordinal classification, 3. valence (sentiment) regression, 4. valence ordinal classification, and 5. emotion classification. Seventy-five teams (about 200 team members) participated in the shared task. We summarize the methods, resources, and tools used by the participating teams, with a focus on the techniques and resources that are particularly useful. We also analyze systems for consistent bias towards a particular race or gender. The data is made freely available to further improve our understanding of how people convey emotions through language.", "phrases": ["tweets", "semeval-2018 task", "emotion dataset"], "overall_score": 5.039496881877907, "scores": [2.7389362410501192, 0.9098179565330757, 0.5381287533401249], "rank_score": 1.3956276503077731} -{"id": "felice-yuan-2014-generating", "title": "Generating artificial errors for grammatical error correction", "abstract": "This paper explores the generation of artificial errors for correcting grammatical mistakes made by learners of English as a second language. Artificial errors are injected into a set of error-free sentences in a probabilistic manner using statistics from a corpus. Unlike previous approaches, we use linguistic information to derive error generation probabilities and build corpora to correct several error types, including open-class errors. In addition, we also analyse the variables involved in the selection of candidate sentences. Experiments using the NUCLE corpus from the CoNLL 2013 shared task reveal that: 1) training on artificially created errors improves precision at the expense of recall and 2) different types of linguistic information are better suited for correcting different error types.", "phrases": ["artificial error", "learner", "linguistic information", "different type"], "overall_score": 3.68161180120424, "scores": [2.9053390618262087, 0.9245196140745281, 0.8764618222989032, 0.8738717316386901], "rank_score": 1.3950480574595827} -{"id": "stahlberg-byrne-2019-nmt", "title": "On NMT Search Errors and Model Errors: Cat Got Your Tongue?", "abstract": "We report on search errors and model errors in neural machine translation (NMT). We present an exact inference procedure for neural sequence models based on a combination of beam search and depth-first search. We use our exact search to find the global best model scores under a Transformer base model for the entire WMT15 English-German test set. Surprisingly, beam search fails to find these global best model scores in most cases, even with a very large beam size of 100. For more than 50% of the sentences, the model in fact assigns its global best score to the empty translation, revealing a massive failure of neural models in properly accounting for adequacy. We show by constraining search with a minimum translation length that at the root of the problem of empty translations lies an inherent bias towards shorter translations. We conclude that vanilla NMT in its current form requires just the right amount of beam search errors, which, from a modelling perspective, is a highly unsatisfactory conclusion indeed, as the model often prefers an empty translation.", "phrases": ["model error", "neural machine translation", "beam search", "likely output"], "overall_score": 4.310647701502422, "scores": [2.4125197379542986, 1.5147059939321323, 1.0811013354622843, 0.5699175107146426], "rank_score": 1.3945611445158395} -{"id": "hajic-etal-2014-comparing", "title": "Comparing Czech and English AMRs", "abstract": "This paper describes in detail the differences between Czech and English annotation using the Abstract Meaning Representation scheme, which stresses the use of ontologies (and semantically-oriented verbal lexicons) and relations based on meaning or ontological content rather than semantics or syntax. The basic \u201cslogan\u201d of the AMR specification clearly states that AMR is not an interlingua, yet it is expected that many relations as well as structures constructed from these relations will be similar or even identical across languages. In our study, we have investigated 100 sentences in English and their translations into Czech, annotated manually by AMRs, with the goal to describe the differences and if possible, to classify them into two main categories: those which are merely convention differences and thus can be unified by changing such conventions in the AMR annotation guidelines, and those which are so deeply rooted in the language structure that the level of abstraction which is inherent in the current AMR scheme does not allow for such unification.", "phrases": ["amr", "ontology", "interlingua"], "overall_score": 2.7127709495556602, "scores": [3.068378009398621, 0.560997316717001, 0.5528901565007829], "rank_score": 1.3940884942054683} -{"id": "baly-etal-2018-integrating", "title": "Integrating Stance Detection and Fact Checking in a Unified Corpus", "abstract": "A reasonable approach for fact checking a claim involves retrieving potentially relevant documents from different sources (e.g., news websites, social media, etc.), determining the stance of each document with respect to the claim, and finally making a prediction about the claim's factuality by aggregating the strength of the stances, while taking the reliability of the source into account. Moreover, a fact checking system should be able to explain its decision by providing relevant extracts (rationales) from the documents. Yet, this setup is not directly supported by existing datasets, which treat fact checking, document retrieval, source credibility, stance detection and rationale extraction as independent tasks. In this paper, we support the interdependencies between these tasks as annotations in the same corpus. We implement this setup on an Arabic fact checking corpus, the first of its kind.", "phrases": ["stance detection", "fact checking", "other language"], "overall_score": 3.949602363161501, "scores": [2.755844666594012, 0.8926104491709922, 0.5336539069607477], "rank_score": 1.394036340908584} -{"id": "geffet-dagan-2005-distributional", "title": "The Distributional Inclusion Hypotheses and Lexical Entailment", "abstract": "This paper suggests refinements for the Distributional Similarity Hypothesis. Our proposed hypotheses relate the distributional behavior of pairs of words to lexical entailment -- a tighter notion of semantic similarity that is required by many NLP applications. To automatically explore the validity of the defined hypotheses we developed an inclusion testing algorithm for characteristic features of two words, which incorporates corpus and web-based feature sampling to overcome data sparseness. The degree of hypotheses validity was then empirically tested and manually analyzed with respect to the word sense level. In addition, the above testing algorithm was exploited to improve lexical entailment acquisition.", "phrases": ["distributional inclusion hypotheses", "lexical entailment", "hypothesis", "hypernym", "directional similarity measure"], "overall_score": 4.874227687358249, "scores": [3.5960633932460833, 0.8921087223433278, 1.0936256666089812, 0.8533531505110044, 0.534986556874544], "rank_score": 1.3940274979167884} -{"id": "vatanen-etal-2010-language", "title": "Language Identification of Short Text Segments with N-gram Models", "abstract": "There are many accurate methods for language identification of long text samples, but identification of very short strings still presents a challenge. This paper studies a language identification task, in which the test samples have only 5-21 characters. We compare two distinct methods that are well suited for this task: a naive Bayes classifier based on character n-gram models, and the ranking method by Cavnar and Trenkle (1994). For the n-gram models, we test several standard smoothing techniques, including the current state-of-the-art, the modified Kneser-Ney interpolation. Experiments are conducted with 281 languages using the Universal Declaration of Human Rights. Advanced language model smoothing techniques improve the identification accuracy and the respective classifiers outperform the ranking method. The higher accuracy is obtained at the cost of larger models and slower classification speed. However, there are several methods to reduce the size of an n-gram model, and our experiments with model pruning show that it provides an easy way to balance the size and the identification accuracy. We also compare the results to the language identifier in Google AJAX Language API, using a subset of 50 languages.", "phrases": ["character", "naive bayes classifier", "universal declaration", "language identification"], "overall_score": 3.3425066379047554, "scores": [2.863271849163476, 1.29268308244879, 0.8960265213755367, 0.5237526932395479], "rank_score": 1.3939335365568375} -{"id": "titov-mcdonald-2008-joint", "title": "A Joint Model of Text and Aspect Ratings for Sentiment Summarization", "abstract": "Online reviews are often accompanied with numerical ratings provided by users for a set of service or product aspects. We propose a statistical model which is able to discover corresponding topics in text and extract textual evidence from reviews supporting each of these aspect ratings \u2010 a fundamental problem in aspect-based sentiment summarization (Hu and Liu, 2004a). Our model achieves high accuracy, without any explicitly labeled data except the user provided opinion ratings. The proposed approach is general and can be used for segmentation in other applications where sequential data is accompanied with correlated signals.", "phrases": ["joint model", "aspect rating", "sentiment summarization", "much attention"], "overall_score": 5.3350827494377535, "scores": [2.009707094970657, 1.5648041721980628, 1.474878012304031, 0.5244759086643063], "rank_score": 1.3934662970342642} -{"id": "habash-rambow-2005-arabic", "title": "Arabic Tokenization, Part-of-Speech Tagging and Morphological Disambiguation in One Fell Swoop", "abstract": "We present an approach to using a morphological analyzer for tokenizing and morphologically tagging (including part-of-speech tagging) Arabic words in one process. We learn classifiers for individual morphological features, as well as ways of using these classifiers to choose among entries from the output of the analyzer. We obtain accuracy rates on all tasks in the high nineties.", "phrases": ["part-of-speech tagging", "morphological disambiguation", "arabic pos tagging"], "overall_score": 4.952128404117478, "scores": [1.91158500546735, 1.7101054921574605, 0.5569116969881036], "rank_score": 1.3928673982043047} -{"id": "dodge-etal-2012-detecting", "title": "Detecting Visual Text", "abstract": "When people describe a scene, they often include information that is not visually apparent; sometimes based on background knowledge, sometimes to tell a story. We aim to separate visual text---descriptions of what is being seen---from non-visual text in natural images and their descriptions. To do so, we first concretely define what it means to be visual, annotate visual text and then develop algorithms to automatically classify noun phrases as visual or non-visual. We find that using text alone, we are able to achieve high accuracies at this task, and that incorporating features derived from computer vision algorithms improves performance. Finally, we show that we can reliably mine visual nouns and adjectives from large corpora and that we can use these effectively in the classification task.", "phrases": ["visual text", "noun phrase", "object"], "overall_score": 3.0601928089740773, "scores": [2.6717484055050438, 0.9267808521459868, 0.5797320460033293], "rank_score": 1.3927537678847866} -{"id": "rajani-etal-2019-explain", "title": "Explain Yourself! Leveraging Language Models for Commonsense Reasoning", "abstract": "Deep learning models perform poorly on tasks that require commonsense reasoning, which often necessitates some form of world-knowledge or reasoning over information not immediately present in the input. We collect human explanations for commonsense reasoning in the form of natural language sequences and highlighted annotations in a new dataset called Common Sense Explanations (CoS-E). We use CoS-E to train language models to automatically generate explanations that can be used during training and inference in a novel Commonsense Auto-Generated Explanation (CAGE) framework. CAGE improves the state-of-the-art by 10% on the challenging CommonsenseQA task. We further study commonsense reasoning in DNNs using both human and auto-generated explanations including transfer to out-of-domain tasks. Empirical results indicate that we can effectively leverage language models for commonsense reasoning.", "phrases": ["language model", "commonsense reasoning", "highlighted annotation", "cage", "correct answer"], "overall_score": 5.580962775097866, "scores": [3.6115887193140046, 1.1536922503785274, 1.0748477633655622, 0.5660441250695489, 0.557264566622086], "rank_score": 1.392687484949946} -{"id": "du-etal-2013-topic", "title": "Topic Segmentation with a Structured Topic Model", "abstract": "We present a new hierarchical Bayesian model for unsupervised topic segmentation. This new model integrates a point-wise boundary sampling algorithm used in Bayesian segmentation into a structured topic model that can capture a simple hierarchical topic structure latent in documents. We develop an MCMC inference algorithm to split/merge segment(s). Experimental results show that our model outperforms previous unsupervised segmentation methods using only lexical information on Choi\u2019s datasets and two meeting transcripts and has performance comparable to those previous methods on two written datasets.", "phrases": ["hierarchical bayesian model", "point-wise boundary", "topic segmentation"], "overall_score": 2.241177126147663, "scores": [2.69986559999082, 0.9285694382888058, 0.5491298907547459], "rank_score": 1.392521643011457} -{"id": "liu-etal-2015-toward", "title": "Toward Abstractive Summarization Using Semantic Representations", "abstract": "We present a novel abstractive summarization framework that draws on the recent development of a treebank for the Abstract Meaning Representation (AMR). In this framework, the source text is parsed to a set of AMR graphs, the graphs are transformed into a summary graph, and then text is generated from the summary graph. We focus on the graph-tograph transformation that reduces the source semantic graph into a summary graph, making use of an existing AMR parser and assuming the eventual availability of an AMR-totext generator. The framework is data-driven, trainable, and not specifically designed for a particular domain. Experiments on goldstandard AMR annotations and system parses show promising results. Code is available at: https://github.com/summarization", "phrases": ["abstractive summarization", "amr", "graph-based representation", "predicate"], "overall_score": 4.587739751742102, "scores": [2.5448962734595266, 1.9307797943467204, 0.5593208864233468, 0.5329239572249791], "rank_score": 1.3919802278636433} -{"id": "liu-etal-2015-representation", "title": "Representation Learning Using Multi-Task Deep Neural Networks for Semantic Classification and Information Retrieval", "abstract": "Methods of deep neural networks (DNNs) have recently demonstrated superior performance on a number of natural language processing tasks. However, in most previous work, the models are learned based on either unsupervised objectives, which does not directly optimize the desired task, or singletask supervised objectives, which often suffer from insufficient training data. We develop a multi-task DNN for learning representations across multiple tasks, not only leveraging large amounts of cross-task data, but also benefiting from a regularization effect that leads to more general representations to help tasks in new domains. Our multi-task DNN approach combines tasks of multiple-domain classification (for query classification) and information retrieval (ranking for web search), and demonstrates significant gains over strong baselines in a comprehensive set of domain adaptation.", "phrases": ["deep neural network", "information retrieval", "more general representation", "multi-task learning"], "overall_score": 4.422132890342258, "scores": [1.8463066477968617, 1.9924209787840121, 1.1798274916876967, 0.5472824998539709], "rank_score": 1.3914594045306354} -{"id": "yao-etal-2019-docred", "title": "DocRED: A Large-Scale Document-Level Relation Extraction Dataset", "abstract": "Multiple entities in a document generally exhibit complex inter-sentence relations, and cannot be well handled by existing relation extraction (RE) methods that typically focus on extracting intra-sentence relations for single entity pairs. In order to accelerate the research on document-level RE, we introduce DocRED, a new dataset constructed from Wikipedia and Wikidata with three features: (1) DocRED annotates both named entities and relations, and is the largest human-annotated dataset for document-level RE from plain text; (2) DocRED requires reading multiple sentences in a document to extract entities and infer their relations by synthesizing all information of the document; (3) along with the human-annotated data, we also offer large-scale distantly supervised data, which enables DocRED to be adopted for both supervised and weakly supervised scenarios. In order to verify the challenges of document-level RE, we implement recent state-of-the-art methods for RE and conduct a thorough evaluation of these methods on DocRED. Empirical results show that DocRED is challenging for existing RE methods, which indicates that document-level RE remains an open problem and requires further efforts. Based on the detailed analysis on the experiments, we discuss multiple promising directions for future research. We make DocRED and the code for our baselines publicly available at .", "phrases": ["relation extraction dataset", "wikipedia", "multiple sentence", "state-of-the-art method", "document level"], "overall_score": 5.06153815577817, "scores": [1.9118474290275158, 2.2097782846953122, 1.1496285274891114, 1.1187136605838588, 0.5673080684851234], "rank_score": 1.3914551940561846} -{"id": "zhang-etal-2014-type", "title": "Type-Supervised Domain Adaptation for Joint Segmentation and POS-Tagging", "abstract": "We report an empirical investigation on type-supervised domain adaptation for joint Chinese word segmentation and POS-tagging, making use of domain-speci\ufb01c tag dictionaries and only unlabeled target domain data to improve target-domain accuracies, given a set of annotated source domain sentences. Previous work on POS-tagging of other languages showed that type-supervision can be a competitive alternative to token-supervision, while semi-supervised techniques such as label propagation are important to the effectiveness of type-supervision. We report similar \ufb01ndings using a novel approach for joint Chinese segmentation and POS-tagging, under a cross-domain setting. With the help of unlabeled sentences and a lexicon of 3,000 words, we obtain 33% error reduction in target-domain tagging. In addition, combined type-and token-supervision can lead to improved cost-effectiveness.", "phrases": ["domain adaptation", "pos-tagging", "cws"], "overall_score": 3.203724932338113, "scores": [2.7477986358659408, 0.9011556701212127, 0.5251258729637805], "rank_score": 1.3913600596503113} -{"id": "cao-etal-2020-unsupervised", "title": "Unsupervised Parsing via Constituency Tests", "abstract": "We propose a method for unsupervised parsing based on the linguistic notion of a constituency test. One type of constituency test involves modifying the sentence via some transformation (e.g. replacing the span with a pronoun) and then judging the result (e.g. checking if it is grammatical). Motivated by this idea, we design an unsupervised parser by specifying a set of transformations and using an unsupervised neural acceptability model to make grammaticality decisions. To produce a tree given a sentence, we score each span by aggregating its constituency test judgments, and we choose the binary tree with the highest total score. While this approach already achieves performance in the range of current methods, we further improve accuracy by fine-tuning the grammaticality model through a refinement procedure, where we alternate between improving the estimated trees and improving the grammaticality model. The refined model achieves 62.8 F1 on the Penn Treebank test set, an absolute improvement of 7.6 points over the previously best published result.", "phrases": ["constituency test", "neural acceptability model", "grammaticality decision", "unsupervised parsing"], "overall_score": 2.491951256636699, "scores": [2.1130545949866946, 1.9541997639861408, 0.9587857657001946, 0.5370977840336741], "rank_score": 1.390784477176676} -{"id": "chan-roth-2011-exploiting", "title": "Exploiting Syntactico-Semantic Structures for Relation Extraction", "abstract": "In this paper, we observe that there exists a second dimension to the relation extraction (RE) problem that is orthogonal to the relation type dimension. We show that most of these second dimensional structures are relatively constrained and not difficult to identify. We propose a novel algorithmic approach to RE that starts by first identifying these structures and then, within these, identifying the semantic type of the relation. In the real RE problem where relation arguments need to be identified, exploiting these structures also allows reducing pipelined propagated errors. We show that this RE framework provides significant improvement in RE performance.", "phrases": ["relation extraction", "pipeline", "entity recognition", "traditional approach", "error propagation problem"], "overall_score": 4.473877931713129, "scores": [3.285365756580952, 1.3856161760913308, 0.8830913909549507, 0.8615317052257901, 0.5338366009686473], "rank_score": 1.3898883259643342} -{"id": "choi-etal-2005-identifying", "title": "Identifying Sources of Opinions with Conditional Random Fields and Extraction Patterns", "abstract": "Recent systems have been developed for sentiment classification, opinion recognition, and opinion analysis (e.g., detecting polarity and strength). We pursue another aspect of opinion analysis: identifying the sources of opinions, emotions, and sentiments. We view this problem as an information extraction task and adopt a hybrid approach that combines Conditional Random Fields (Lafferty et al., 2001) and a variation of AutoSlog (Riloff, 1996a). While CRFs model source identification as a sequence tagging task, AutoSlog learns extraction patterns. Our results show that the combination of these two methods performs better than either one alone. The resulting system identifies opinion sources with 79.3% precision and 59.5% recall using a head noun matching measure, and 81.2% precision and 60.6% recall using an overlap measure.", "phrases": ["opinion", "conditional random fields", "information extraction task", "hybrid approach", "crf"], "overall_score": 4.579278640112976, "scores": [2.8798347349372078, 1.7208522466510066, 1.2120831011243867, 0.5946831666864375, 0.5396118304096792], "rank_score": 1.3894130159617437} -{"id": "carl-2012-translog", "title": "Translog-II: a Program for Recording User Activity Data for Empirical Reading and Writing Research", "abstract": "This paper presents a novel implementation of Translog-II. Translog-II is a Windows-oriented program to record and study reading and writing processes on a computer. In our research, it is an instrument to acquire objective, digital data of human translation processes. As their predecessors, Translog 2000 and Translog 2006, also Translog-II consists of two main components: Translog-II Supervisor and Translog-II User, which are used to create a project file, to run a text production experiments (a user reads, writes or translates a text) and to replay the session. Translog produces a log files which contains all user activity data of the reading, writing, or translation session, and which can be evaluated by external tools. While there is a large body of translation process research based on Translog, this paper gives an overview of the Translog-II functions and its data visualization options.", "phrases": ["user activity data", "reading", "translation process research", "translog-ii"], "overall_score": 3.0508146602684256, "scores": [2.6947432201400865, 1.734981599274525, 0.5703365666445669, 0.5538809678299206], "rank_score": 1.3884855884722747} -{"id": "bouchacourt-baroni-2018-agents", "title": "How agents see things: On visual representations in an emergent language game", "abstract": "There is growing interest in the language developed by agents interacting in emergent-communication settings. Earlier studies have focused on the agents' symbol usage, rather than on their representation of visual input. In this paper, we consider the referential games of Lazaridou et al. (2017), and investigate the representations the agents develop during their evolving interaction. We find that the agents establish successful communication by inducing visual representations that almost perfectly align with each other, but, surprisingly, do not capture the conceptual properties of the objects depicted in the input images. We conclude that, if we care about developing language-like communication systems, we must pay more attention to the visual semantics agents associate to the symbols they use.", "phrases": ["agent", "visual representation", "emergent language"], "overall_score": 2.887234887795292, "scores": [2.7473181125454964, 0.842525147188523, 0.5755561947698519], "rank_score": 1.3884664848346235} -{"id": "rama-2016-siamese", "title": "Siamese Convolutional Networks for Cognate Identification", "abstract": "In this paper, we present phoneme level Siamese convolutional networks for the task of pair-wise cognate identification. We represent a word as a two-dimensional matrix and employ a siamese convolutional network for learning deep representations. We present siamese architectures that jointly learn phoneme level feature representations and language relatedness from raw words for cognate identification. Compared to previous works, we train and test on larger and realistic datasets; and, show that siamese architectures consistently perform better than traditional linear classifier approach.", "phrases": ["cognate identification", "siamese architecture", "language relatedness", "convolutional neural network", "phonetic feature"], "overall_score": 3.1969342125231113, "scores": [3.2709009715025954, 1.2363836986863515, 0.978087225548076, 0.8789994615605516, 0.5776830802349509], "rank_score": 1.3884108875065049} -{"id": "li-etal-2018-transformation", "title": "Transformation Networks for Target-Oriented Sentiment Classification", "abstract": "Target-oriented sentiment classification aims at classifying sentiment polarities over individual opinion targets in a sentence. RNN with attention seems a good fit for the characteristics of this task, and indeed it achieves the state-of-the-art performance. After re-examining the drawbacks of attention mechanism and the obstacles that block CNN to perform well in this classification task, we propose a new model that achieves new state-of-the-art results on a few benchmarks. Instead of attention, our model employs a CNN layer to extract salient features from the transformed word representations originated from a bi-directional RNN layer. Between the two layers, we propose a component which first generates target-specific representations of words in the sentence, and then incorporates a mechanism for preserving the original contextual information from the RNN layer.", "phrases": ["sentiment classification", "cnn", "contextual information", "absa", "deep learning"], "overall_score": 4.465084748898463, "scores": [3.8511004465724534, 0.8622733588339606, 0.8527123397914602, 0.8492650190955275, 0.5204316863564661], "rank_score": 1.3871565701299737} -{"id": "murawaki-2015-continuous", "title": "Continuous Space Representations of Linguistic Typology and their Application to Phylogenetic Inference", "abstract": "For phylogenetic inference, linguistic typology is a promising alternative to lexical evidence because it allows us to compare an arbitrary pair of languages. A challenging problem with typology-based phylogenetic inference is that the changes of typological features over time are less intuitive than those of lexical features. In this paper, we work on reconstructing typologically natural ancestors To do this, we leverage dependencies among typological features. We first represent each language by continuous latent components that capture feature dependencies. We then combine them with a typology evaluator that distinguishes typologically natural languages from other possible combinations of features. We perform phylogenetic inference in the continuous space and use the evaluator to ensure the typological naturalness of inferred ancestors. We show that the proposed method reconstructs known language families more accurately than baseline methods. Lastly, assuming the monogenesis hypothesis, we attempt to reconstruct a common ancestor of the world\u2019s languages.", "phrases": ["linguistic typology", "phylogenetic inference", "continuous latent component"], "overall_score": 2.485290936621092, "scores": [2.72914505221578, 0.9050218050204026, 0.5270349881630183], "rank_score": 1.3870672817997338} -{"id": "nguyen-moschitti-2011-end", "title": "End-to-End Relation Extraction Using Distant Supervision from External Semantic Repositories", "abstract": "In this paper, we extend distant supervision (DS) based on Wikipedia for Relation Extraction (RE) by considering (i) relations defined in external repositories, e.g. YAGO, and (ii) any subset of Wikipedia documents. We show that training data constituted by sentences containing pairs of named entities in target relations is enough to produce reliable supervision. Our experiments with state-of-the-art relation extraction models, trained on the above data, show a meaningful F1 of 74.29% on a manually annotated test set: this highly improves the state-of-art in RE using DS. Additionally, our end-to-end experiments demonstrated that our extractors can be applied to any general text document.", "phrases": ["relation extraction", "distant supervision", "wikipedia"], "overall_score": 3.192073745870193, "scores": [2.418879309463339, 0.9015041237350156, 0.8385166077806491], "rank_score": 1.3863000136596677} -{"id": "le-nagard-koehn-2010-aiding", "title": "Aiding Pronoun Translation with Co-Reference Resolution", "abstract": "We propose a method to improve the translation of pronouns by resolving their co-reference to prior mentions. We report results using two different co-reference resolution methods and point to remaining challenges.", "phrases": ["pronoun translation", "co-reference resolution", "antecedent", "annotated corpus", "coreference link"], "overall_score": 4.56881155040252, "scores": [2.658371904295916, 0.8578454775056418, 1.3449458850518348, 1.2301927618159643, 0.8398297917342293], "rank_score": 1.3862371640807174} -{"id": "melamud-etal-2016-context2vec", "title": "context2vec: Learning Generic Context Embedding with Bidirectional LSTM", "abstract": "Context representations are central to various NLP tasks, such as word sense disam-biguation, named entity recognition, co-reference resolution, and many more. In this work we present a neural model for ef\ufb01ciently learning a generic context embedding function from large corpora, us-ing bidirectional LSTM. With a very simple application of our context representations, we manage to surpass or nearly reach state-of-the-art results on sentence completion, lexical substitution and word sense disambiguation tasks, while substantially outperforming the popular context representation of averaged word embeddings. We release our code and pre-trained models, suggesting they could be useful in a wide variety of NLP tasks.", "phrases": ["generic context", "bidirectional lstm", "state-of-the-art result", "word sense disambiguation", "supervised approach"], "overall_score": 5.27512313427389, "scores": [3.1450075539785076, 2.211088645825329, 0.5295465143367434, 0.5220269330848734, 0.5211336659784066], "rank_score": 1.3857606626407721} -{"id": "miller-etal-2004-name", "title": "Name Tagging with Word Clusters and Discriminative Training", "abstract": "We present a technique for augmenting annotated training data with hierarchical word clusters that are automatically derived from a large unannotated corpus. Cluster membership is encoded in features that are incorporated in a discriminatively trained tagging model. Active learning is used to select training examples. We evaluate the technique for named-entity tagging. Compared with a state-of-the-art HMM-based name finder, the presented technique requires only 13% as much annotated data to achieve the same level of performance. Given a large annotated training set of 1,000,000 words, the technique achieves a 25% reduction in error over the state-of-the-art HMM trained on the same material.", "phrases": ["word cluster", "active learning", "name tagging", "unlabeled data"], "overall_score": 4.758277867832795, "scores": [1.8980702681441508, 1.915253978469048, 0.904471196544464, 0.8247736865327284], "rank_score": 1.385642282422598} -{"id": "baroni-etal-2014-dont", "title": "Don't count, predict! A systematic comparison of context-counting vs. context-predicting semantic vectors", "abstract": "Context-predicting models (more commonly known as embeddings or neural language models) are the new kids on the distributional semantics block. Despite the buzz surrounding these models, the literature is still lacking a systematic comparison of the predictive models with classic, count-vector-based distributional semantic approaches. In this paper, we perform such an extensive evaluation, on a wide range of lexical semantics tasks and across many parameter settings. The results, to our own surprise, show that the buzz is fully justified, as the context-predicting models obtain a thorough and resounding victory against their count-based counterparts.", "phrases": ["systematic comparison", "word embedding", "count-based method", "semantic model", "preference"], "overall_score": 6.088840011038552, "scores": [1.67262795290581, 2.417964950050541, 1.04451997964716, 0.9652894771926446, 0.8274739185816881], "rank_score": 1.3855752556755687} -{"id": "koo-etal-2008-simple", "title": "Simple Semi-supervised Dependency Parsing", "abstract": "We present a simple and effective semisupervised method for training dependency parsers. We focus on the problem of lexical representation, introducing features that incorporate word clusters derived from a large unannotated corpus. We demonstrate the effectiveness of the approach in a series of dependency parsing experiments on the Penn Treebank and Prague Dependency Treebank, and we show that the cluster-based features yield substantial gains in performance across a wide range of conditions. For example, in the case of English unlabeled second-order parsing, we improve from a baseline accuracy of 92.02% to 93.16%, and in the case of Czech unlabeled second-order parsing, we improve from a baseline accuracy of 86.13% to 87.13%. In addition, we demonstrate that our method also improves performance when small amounts of training data are available, and can roughly halve the amount of supervised data required to reach a desired level of performance.", "phrases": ["word cluster", "czech", "unlabeled data", "brown cluster", "error reduction"], "overall_score": 5.694963282103281, "scores": [2.4524873653017614, 1.4699292442673622, 1.3326676553529988, 1.1043429714061737, 0.5672791894458828], "rank_score": 1.3853412851548357} -{"id": "soni-etal-2013-exploring", "title": "Exploring Verb Frames for Sentence Simplification in Hindi", "abstract": "Systems processing on natural language text encounters fatal problems due to long and complex sentences. Their performance degrades as the complexity of the sentence increases. This paper addresses the task of simplifying complex sentences in Hindi into multiple simple sentences, using a rule based approach. Our approach utilizes two linguistic resources viz. verb demand frames and conjuncts\u2019 list. We performed automatic as well as human evaluation of our system.", "phrases": ["sentence simplification", "hindi", "conjunct"], "overall_score": 2.2289889801147313, "scores": [1.8907972122530081, 1.7285262137897415, 0.5355227402382979], "rank_score": 1.3849487220936825} -{"id": "islamaj-dogan-etal-2017-biocreative", "title": "BioCreative VI Precision Medicine Track: creating a training corpus for mining protein-protein interactions affected by mutations", "abstract": "The Precision Medicine Track in BioCre-ative VI aims to bring together the Bi-oNLP community for a novel challenge focused on mining the biomedical litera-ture in search of mutations and protein-protein interactions (PPI). In order to support this track with an effective train-ing dataset with limited curator time, the track organizers carefully reviewed Pub-Med articles from two different sources: curated public PPI databases, and the re-sults of state-of-the-art public text mining tools. We detail here the data collection, manual review and annotation process and describe this training corpus charac-teristics. We also describe a corpus per-formance baseline. This analysis will provide useful information to developers and researchers for comparing and devel-oping innovative text mining approaches for the BioCreative VI challenge and other Precision Medicine related applica-tions.", "phrases": ["precision medicine track", "protein-protein interaction", "mutation"], "overall_score": 1.9195398828678432, "scores": [1.8143306774613377, 1.7697900760973369, 0.569845251144354], "rank_score": 1.3846553349010096} -{"id": "yu-etal-2018-diverse", "title": "Diverse Few-Shot Text Classification with Multiple Metrics", "abstract": "We study few-shot learning in natural language domains. Compared to many existing works that apply either metric-based or optimization-based meta-learning to image domain with low inter-task variance, we consider a more realistic setting, where tasks are diverse. However, it imposes tremendous difficulties to existing state-of-the-art metric-based algorithms since a single metric is insufficient to capture complex task variations in natural language domain. To alleviate the problem, we propose an adaptive metric learning approach that automatically determines the best weighted combination from a set of metrics obtained from meta-training tasks for a newly seen few-shot task. Extensive quantitative evaluations on real-world sentiment analysis and dialog intent classification datasets demonstrate that the proposed method performs favorably against state-of-the-art few shot learning algorithms in terms of predictive accuracy. We make our code and data available for further study.", "phrases": ["text classification", "multiple metric", "few-shot task", "new class", "training task"], "overall_score": 4.5635655788598415, "scores": [2.974706419670117, 1.4104118210965961, 1.0483085817015059, 0.9107153358222939, 0.5790851803132155], "rank_score": 1.3846454677207458} -{"id": "venugopal-etal-2014-relieving", "title": "Relieving the Computational Bottleneck: Joint Inference for Event Extraction with High-Dimensional Features", "abstract": "Several state-of-the-art event extraction systems employ models based on Support Vector Machines (SVMs) in a pipeline architecture, which fails to exploit the joint dependencies that typically exist among events and arguments. While there have been attempts to overcome this limitation using Markov Logic Networks (MLNs), it remains challenging to perform joint inference in MLNs when the model encodes many high-dimensional sophisticated features such as those essential for event extraction. In this paper, we propose a new model for event extraction that combines the power of MLNs and SVMs, dwarfing their limitations. The key idea is to reliably learn and process high-dimensional features using SVMs; encode the output of SVMs as low-dimensional, soft formulas in MLNs; and use the superior joint inferencing power of MLNs to enforce joint consistency constraints over the soft formulas. We evaluate our approach for the task of extracting biomedical events on the BioNLP 2013, 2011 and 2009 Genia shared task datasets. Our approach yields the best F1 score to date on the BioNLP\u201913 (53.61) and BioNLP\u201911 (58.07) datasets and the second-best F1 score to date on the BioNLP\u201909 dataset (58.16).", "phrases": ["joint inference", "event extraction", "svms", "hand-crafted feature", "argument extraction"], "overall_score": 3.186274914954279, "scores": [2.928646016706516, 2.320465755292826, 0.5695737303347846, 0.5650409606177604, 0.5351816040050943], "rank_score": 1.3837816133913963} -{"id": "luo-etal-2018-incorporating", "title": "Incorporating Glosses into Neural Word Sense Disambiguation", "abstract": "Word Sense Disambiguation (WSD) aims to identify the correct meaning of polysemous words in the particular context. Lexical resources like WordNet which are proved to be of great help for WSD in the knowledge-based methods. However, previous neural networks for WSD always rely on massive labeled data (context), ignoring lexical resources like glosses (sense definitions). In this paper, we integrate the context and glosses of the target word into a unified framework in order to make full use of both labeled data and lexical knowledge. Therefore, we propose GAS: a gloss-augmented WSD neural network which jointly encodes the context and glosses of the target word. GAS models the semantic relationship between the context and the gloss in an improved memory network framework, which breaks the barriers of the previous supervised methods and knowledge-based methods. We further extend the original gloss of word sense via its semantic relations in WordNet to enrich the gloss information. The experimental results show that our model outperforms the state-of-the-art systems on several English all-words WSD datasets.", "phrases": ["gloss", "target word", "semantic relationship"], "overall_score": 3.5491818161575184, "scores": [3.033605288781812, 0.5628082679716957, 0.5547582461120867], "rank_score": 1.3837239342885315} -{"id": "pitler-etal-2008-easily", "title": "Easily Identifiable Discourse Relations", "abstract": "We present a corpus study of local discourse relations based on the Penn Discourse Tree Bank, a large manually annotated corpus of explicitly or implicitly realized relations. We show that while there is a large degree of ambiguity in temporal explicit discourse connectives, overall connectives are mostly unambiguous and allow high-accuracy prediction of discourse relation type. We achieve 93.09% accuracy in classifying the explicit relations and 74.74% accuracy overall. In addition, we show that some pairs of relations occur together in text more often than expected by chance. This finding suggests that global sequence classification of the relations in text can lead to better results, especially for implicit relations.", "phrases": ["discourse relation", "explicit relation", "cue", "high accuracy", "previous study"], "overall_score": 5.102853658609034, "scores": [2.379455068473028, 1.568367949137641, 1.3990499806102246, 1.04797711876815, 0.52168608605521], "rank_score": 1.383307240608851} -{"id": "flanigan-etal-2014-discriminative", "title": "A Discriminative Graph-Based Parser for the Abstract Meaning Representation", "abstract": "Abstract Meaning Representation (AMR) is a semantic formalism for which a grow- ing set of annotated examples is avail- able. We introduce the first approach to parse sentences into this representa- tion, providing a strong baseline for fu- ture improvement. The method is based on a novel algorithm for finding a maxi- mum spanning, connected subgraph, em- bedded within a Lagrangian relaxation of an optimization problem that imposes lin- guistically inspired constraints. Our ap- proach is described in the general frame- work of structured prediction, allowing fu- ture incorporation of additional features and constraints, and may extend to other formalisms as well. Our open-source sys- tem, JAMR, is available at: http://github.com/jflanigan/jamr", "phrases": ["abstract meaning representation", "amr graph", "aligner", "jamr parser"], "overall_score": 5.685087588848895, "scores": [1.9387854700828306, 1.5222516329956075, 1.5033492794220562, 0.5673694209504627], "rank_score": 1.3829389508627394} -{"id": "kim-etal-2006-automatically", "title": "Automatically Assessing Review Helpfulness", "abstract": "User-supplied reviews are widely and increasingly used to enhance e-commerce and other websites. Because reviews can be numerous and varying in quality, it is important to assess how helpful each review is. While review helpfulness is currently assessed manually, in this paper we consider the task of automatically assessing it. Experiments using SVM regression on a variety of features over Amazon.com product reviews show promising results, with rank correlations of up to 0.66. We found that the most useful features include the length of the review, its unigrams, and its product rating.", "phrases": ["review helpfulness", "ranking", "percentage", "semantic feature"], "overall_score": 4.070406826610947, "scores": [2.966975605681012, 1.1604320975709022, 0.8525632363775627, 0.5496485979617063], "rank_score": 1.3824048843977959} -{"id": "van-gael-etal-2009-infinite", "title": "The infinite HMM for unsupervised PoS tagging", "abstract": "We extend previous work on fully unsupervised part-of-speech tagging. Using a non-parametric version of the HMM, called the infinite HMM (iHMM), we address the problem of choosing the number of hidden states in unsupervised Markov models for PoS tagging. We experiment with two non-parametric priors, the Dirichlet and Pitman-Yor processes, on the Wall Street Journal dataset using a parallelized implementation of an iHMM inference algorithm. We evaluate the results with a variety of clustering evaluation metrics and achieve equivalent or better performances than previously reported. Building on this promising result we evaluate the output of the unsupervised PoS tagger as a direct replacement for the output of a fully supervised PoS tagger for the task of shallow parsing and compare the two evaluations.", "phrases": ["hmm", "pos tagging", "non-parametric prior"], "overall_score": 2.6889858451226836, "scores": [2.594489872965461, 0.9645765811531648, 0.5865296512741852], "rank_score": 1.3818653684642703} -{"id": "laubli-etal-2013-assessing", "title": "Assessing post-editing efficiency in a realistic translation environment", "abstract": "In many experimental studies on assessing post-editing efficiency, idiosyncratic user interfaces isolate translators from translation aids that are available to them in their daily work. In contrast, our experimental design allows translators to use a well-known translator workbench for both conventional translation and post-editing. We find that post-editing reduces translation time significantly, although considerably less than reported in isolated experiments, and argue that overall assessments of post-editing efficiency should be based on a realistic translation environment.", "phrases": ["post-editing efficiency", "realistic translation environment", "translator"], "overall_score": 3.181327359829884, "scores": [1.871476770868428, 0.902126774294817, 1.3712952073426339], "rank_score": 1.3816329175019595} -{"id": "lei-etal-2016-rationalizing", "title": "Rationalizing Neural Predictions", "abstract": "Prediction without justification has limited applicability. As a remedy, we learn to extract pieces of input text as justifications -- rationales -- that are tailored to be short and coherent, yet sufficient for making the same prediction. Our approach combines two modular components, generator and encoder, which are trained to operate well together. The generator specifies a distribution over text fragments as candidate rationales and these are passed through the encoder for prediction. Rationales are never given during training. Instead, the model is regularized by desiderata for rationales. We evaluate the approach on multi-aspect sentiment analysis against manually annotated test cases. Our approach outperforms attention-based baseline by a significant margin. We also successfully illustrate the method on the question retrieval task.", "phrases": ["neural prediction", "rationale", "subset", "hard attention", "reinforcement learning"], "overall_score": 6.162892848609547, "scores": [2.911575348208118, 1.5748268100914056, 0.9861114382573474, 0.8638592114812035, 0.563559618769448], "rank_score": 1.3799864853615045} -{"id": "bunt-2006-dimensions", "title": "Dimensions in Dialogue Act Annotation", "abstract": "This paper is concerned with the fundamentals of multidimensional dialogue act annotation, i.e. with what it means to annotate dialogues with information about the communicative acts that are performed with the utterances, taking various 'dimensions' into account. Two ideas seem to be prevalent in the literature concerning the notion of dimension: (1) dimensions correspond to different types of information; and (2) a dimension is formed by a set of mutually exclusive tags. In DAMSL, for instance, the terms \u0093dimension\u0094 and \u0093layer\u0094 are used sometimes in the sense of (1) and sometimes in that of (2). We argue that being mutually exclusive is not a good criterion for a set of dialogue act types to constitute a dimension, even though the description of an object in a multidimensional space should never assign more than one value per dimension. We define a dimension of dialogue act annotation as an aspect of participating in a dialogue that can be addressed independently by means of dialogue acts. We show that DAMSL dimensions such as Info-request, Statement, and Answer do not qualify as proper dimensions, and that the communicative functions in these categories do not fall in any specific dimension, but should be considered as \u0093general-purpose\u0094 in the sense that they can be used in any dimension. We argue that using the notion of dimension that we propose, a multidimensional taxonomy of dialogue acts emerges that optimally supports multidimensional dialogue act annotation.", "phrases": ["dialogue act annotation", "communicative function", "dimension"], "overall_score": 2.2190304639403884, "scores": [2.57735601198018, 1.0005424498623, 0.5583849824520786], "rank_score": 1.3787611480981863} -{"id": "mizumoto-nagata-2017-analyzing", "title": "Analyzing the Impact of Spelling Errors on POS-Tagging and Chunking in Learner English", "abstract": "Part-of-speech (POS) tagging and chunking have been used in tasks targeting learner English; however, to the best our knowledge, few studies have evaluated their performance and no studies have revealed the causes of POS-tagging/chunking errors in detail. Therefore, we investigate performance and analyze the causes of failure. We focus on spelling errors that occur frequently in learner English. We demonstrate that spelling errors reduced POS-tagging performance by 0.23% owing to spelling errors, and that a spell checker is not necessary for POS-tagging/chunking of learner English.", "phrases": ["spelling error", "pos-tagging", "learner english"], "overall_score": 1.9110791796882174, "scores": [1.668001490719296, 1.652584839449758, 0.8150703527544549], "rank_score": 1.3785522276411697} -{"id": "gangi-reddy-etal-2019-multi", "title": "Multi-Level Memory for Task Oriented Dialogs", "abstract": "Recent end-to-end task oriented dialog systems use memory architectures to incorporate external knowledge in their dialogs. Current work makes simplifying assumptions about the structure of the knowledge base, such as the use of triples to represent knowledge, and combines dialog utterances (context) as well as knowledge base (KB) results as part of the same memory. This causes an explosion in the memory size, and makes the reasoning over memory harder. In addition, such a memory design forces hierarchical properties of the data to be fit into a triple structure of memory. This requires the memory reader to infer relationships across otherwise connected attributes. In this paper we relax the strong assumptions made by existing architectures and separate memories used for modeling dialog context and KB results. Instead of using triples to store KB results, we introduce a novel multi-level memory architecture consisting of cells for each query and their corresponding results. The multi-level memory first addresses queries, followed by results and finally each key-value pair within a result. We conduct detailed experiments on three publicly available task oriented dialog data sets and we find that our method conclusively outperforms current state-of-the-art models. We report a 15-25% increase in both entity F1 and BLEU scores.", "phrases": ["memory", "dialog", "multi-level memory architecture", "task-oriented dialogue generation"], "overall_score": 3.7330758139462046, "scores": [3.212171162641781, 0.832113943681364, 0.8917547118414906, 0.5780024435730475], "rank_score": 1.3785105654344207} -{"id": "barnes-etal-2021-structured", "title": "Structured Sentiment Analysis as Dependency Graph Parsing", "abstract": "Structured sentiment analysis attempts to extract full opinion tuples from a text, but over time this task has been subdivided into smaller and smaller sub-tasks, e.g., target extraction or targeted polarity classification. We argue that this division has become counterproductive and propose a new unified framework to remedy the situation. We cast the structured sentiment problem as dependency graph parsing, where the nodes are spans of sentiment holders, targets and expressions, and the arcs are the relations between them. We perform experiments on five datasets in four languages (English, Norwegian, Basque, and Catalan) and show that this approach leads to strong improvements over state-of-the-art baselines. Our analysis shows that refining the sentiment graphs with syntactic dependency information further improves results.", "phrases": ["sentiment analysis", "dependency graph parsing", "target extraction", "polarity", "strong improvement"], "overall_score": 3.984260924872015, "scores": [4.308386887535962, 0.9636320630478742, 0.5527374570314938, 0.5369855317452384, 0.5305564544143335], "rank_score": 1.3784596787549803} -{"id": "feng-etal-2020-genaug", "title": "GenAug: Data Augmentation for Finetuning Text Generators", "abstract": "In this paper, we investigate data augmentation for text generation, which we call GenAug. Text generation and language modeling are important tasks within natural language processing, and are especially challenging for low-data regimes. We propose and evaluate various augmentation methods, including some that incorporate external knowledge, for finetuning GPT-2 on a subset of Yelp Reviews. We also examine the relationship between the amount of augmentation and the quality of the generated text. We utilize several metrics that evaluate important aspects of the generated text including its diversity and fluency. Our experiments demonstrate that insertion of character-level synthetic noise and keyword replacement with hypernyms are effective augmentation methods, and that the quality of generations improves to a peak at approximately three times the amount of original data.", "phrases": ["data augmentation", "text generator", "character-level synthetic noise", "genaug"], "overall_score": 2.2180356751271995, "scores": [2.314401900718554, 1.7202877426833365, 0.9287859697894638, 0.5490965910328176], "rank_score": 1.378143051056043} -{"id": "koehn-etal-2020-findings", "title": "Findings of the WMT 2020 Shared Task on Parallel Corpus Filtering and Alignment", "abstract": "Following two preceding WMT Shared Task on Parallel Corpus Filtering (Koehn et al., 2018, 2019), we posed again the challenge of assigning sentence-level quality scores for very noisy corpora of sentence pairs crawled from the web, with the goal of sub-selecting the highest-quality data to be used to train ma-chine translation systems. This year, the task tackled the low resource condition of Pashto\u2013English and Khmer\u2013English and also included the challenge of sentence alignment from document pairs.", "phrases": ["wmt", "shared task", "parallel corpus filtering"], "overall_score": 2.4686343883531676, "scores": [2.478230913794235, 0.8349479937747335, 0.8201343480602561], "rank_score": 1.3777710852097413} -{"id": "dubey-2005-lexicalization", "title": "What to Do When Lexicalization Fails: Parsing German with Suffix Analysis and Smoothing", "abstract": "In this paper, we present an unlexicalized parser for German which employs smoothing and suffix analysis to achieve a labelled bracket F-score of 76.2, higher than previously reported results on the NEGRA corpus. In addition to the high accuracy of the model, the use of smoothing in an unlexicalized parser allows us to better examine the interplay between smoothing and parsing results.", "phrases": ["german", "suffix analysis", "smoothing"], "overall_score": 2.864915674830474, "scores": [2.474178263268934, 0.7926811649030555, 0.8663402084709935], "rank_score": 1.3777332122143278} -{"id": "rosenberg-hirschberg-2007-v", "title": "V-Measure: A Conditional Entropy-Based External Cluster Evaluation Measure", "abstract": "We present V-measure, an external entropybased cluster evaluation measure. Vmeasure provides an elegant solution to many problems that affect previously defined cluster evaluation measures including 1) dependence on clustering algorithm or data set, 2) the \u201cproblem of matching\u201d, where the clustering of only a portion of data points are evaluated and 3) accurate evaluation and combination of two desirable aspects of clustering, homogeneity and completeness. We compare V-measure to a number of popular cluster evaluation measures and demonstrate that it satisfies several desirable properties of clustering solutions, using simulated clustering results. Finally, we use V-measure to evaluate two clustering tasks: document clustering and pitch accent type clustering.", "phrases": ["evaluation measure", "clustering solution", "v-measure"], "overall_score": 2.8649117823222934, "scores": [2.9558697924684685, 0.6028067701896623, 0.574517458282604], "rank_score": 1.3777313403135782} -{"id": "mcdonald-etal-2005-non", "title": "Non-Projective Dependency Parsing using Spanning Tree Algorithms", "abstract": "We formalize weighted dependency parsing as searching for maximum spanning trees (MSTs) in directed graphs. Using this representation, the parsing algorithm of Eisner (1996) is sufficient for searching over all projective trees in O(n3) time. More surprisingly, the representation is extended naturally to non-projective parsing using Chu-Liu-Edmonds (Chu and Liu, 1965; Edmonds, 1967) MST algorithm, yielding an O(n2) parsing algorithm. We evaluate these methods on the Prague Dependency Treebank using online large-margin learning techniques (Crammer et al., 2003; McDonald et al., 2005) and show that MST parsing increases efficiency and accuracy for languages with non-projective dependencies.", "phrases": ["maximum spanning tree", "non-projective dependency parsing", "mstparser", "graph-based parser", "graph-based approach"], "overall_score": 6.019775512239497, "scores": [0.9913760197074712, 2.1618473080966822, 1.6594458655087392, 1.1935952327986055, 0.8822209334753983], "rank_score": 1.3776970719173793} -{"id": "heafield-2011-kenlm", "title": "KenLM: Faster and Smaller Language Model Queries", "abstract": "We present KenLM, a library that implements two data structures for efficient language model queries, reducing both time and memory costs. The Probing data structure uses linear probing hash tables and is designed for speed. Compared with the widely-used SRILM, our Probing model is 2.4 times as fast while using 57% of the memory. The Trie data structure is a trie with bit-level packing, sorted records, interpolation search, and optional quantization aimed at lower memory consumption. Trie simultaneously uses less memory than the smallest lossless baseline and less CPU than the fastest baseline. Our code is open-source, thread-safe, and integrated into the Moses, cdec, and Joshua translation systems. This paper describes the several performance techniques used and presents benchmarks against alternative implementations.", "phrases": ["language model", "kenlm", "storage", "sentence pair", "qe-clean system"], "overall_score": 4.318219144606734, "scores": [3.2246533527011745, 1.707163722412273, 0.8479803854662598, 0.5562393400716502, 0.5499895276209525], "rank_score": 1.377205265654462} -{"id": "mikolov-etal-2018-advances", "title": "Advances in Pre-Training Distributed Word Representations", "abstract": "Many Natural Language Processing applications nowadays rely on pre-trained word representations estimated from large text corpora such as news collections, Wikipedia and Web Crawl. In this paper, we show how to train high-quality word vector representations by using a combination of known tricks that are however rarely used together. The main result of our work is the new set of publicly available pre-trained models that outperform the current state of the art by a large margin on a number of tasks.", "phrases": ["word representation", "pre-trained model", "fasttext"], "overall_score": 4.054481350483547, "scores": [1.9545273069269908, 1.568675206537248, 0.6077861528025241], "rank_score": 1.376996222088921} -{"id": "chen-etal-2018-neural-natural", "title": "Neural Natural Language Inference Models Enhanced with External Knowledge", "abstract": "Modeling natural language inference is a very challenging task. With the availability of large annotated data, it has recently become feasible to train complex models such as neural-network-based inference models, which have shown to achieve the state-of-the-art performance. Although there exist relatively large annotated data, can machines learn all knowledge needed to perform natural language inference (NLI) from these data? If not, how can neural-network-based NLI models benefit from external knowledge and how to build NLI models to leverage it? In this paper, we enrich the state-of-the-art neural natural language inference models with external knowledge. We demonstrate that the proposed models improve neural NLI models to achieve the state-of-the-art performance on the SNLI and MultiNLI datasets.", "phrases": ["external knowledge", "nli model", "kim", "co-attention", "local inference collection"], "overall_score": 3.9779705036810333, "scores": [3.5829514016037787, 1.3348517890251348, 0.8305587942201227, 0.5791431862804455, 0.5539115407756116], "rank_score": 1.3762833423810186} -{"id": "chen-cardie-2018-unsupervised", "title": "Unsupervised Multilingual Word Embeddings", "abstract": "Multilingual Word Embeddings (MWEs) represent words from multiple languages in a single distributional vector space. Unsupervised MWE (UMWE) methods acquire multilingual embeddings without cross-lingual supervision, which is a significant advantage over traditional supervised approaches and opens many new possibilities for low-resource languages. Prior art for learning UMWEs, however, merely relies on a number of independently trained Unsupervised Bilingual Word Embeddings (UBWEs) to obtain multilingual embeddings. These methods fail to leverage the interdependencies that exist among many languages. To address this shortcoming, we propose a fully unsupervised framework for learning MWEs that directly exploits the relations between all language pairs. Our model substantially outperforms previous approaches in the experiments on multilingual word translation and cross-lingual word similarity. In addition, our model even beats supervised approaches trained with cross-lingual resources.", "phrases": ["multilingual word embedding", "multiple language", "unsupervised approach"], "overall_score": 4.18964040573654, "scores": [2.368739301609493, 0.8833108224613059, 0.8763218864203354], "rank_score": 1.376124003497045} -{"id": "seroussi-etal-2014-authorship", "title": "Authorship Attribution with Topic Models", "abstract": "Authorship attribution deals with identifying the authors of anonymous texts. Traditionally, research in this field has focused on formal texts, such as essays and novels, but recently more attention has been given to texts generated by on-line users, such as e-mails and blogs. Authorship attribution of such on-line texts is a more challenging task than traditional authorship attribution, because such texts tend to be short, and the number of candidate authors is often larger than in traditional settings. We address this challenge by using topic models to obtain author representations. In addition to exploring novel ways of applying two popular topic models to this task, we test our new model that projects authors and documents to two disjoint topic spaces. Utilizing our model in authorship attribution yields state-of-the-art performance on several data sets, containing either formal texts written by a few authors or informal texts generated by tens to thousands of on-line users. We also present experimental results that demonstrate the applicability of topical author representations to two other problems: inferring the sentiment polarity of texts, and predicting the ratings that users would give to items such as movies.", "phrases": ["blog", "authorship attribution", "author-topic model"], "overall_score": 2.6770838408271467, "scores": [2.9369915408167206, 0.6409647810770301, 0.5492905226639982], "rank_score": 1.3757489481859162} -{"id": "vickrey-koller-2008-sentence", "title": "Sentence Simplification for Semantic Role Labeling", "abstract": "Parse-tree paths are commonly used to incorporate information from syntactic parses into NLP systems. These systems typically treat the pathsas atomic(or nearly atomic)features; these features are quite sparse due to the immense variety of syntactic expression. In this paper, we propose a general method for learning how to iteratively simplify a sentence, thus decomposing complicated syntax into small, easy-to-process pieces. Our method applies a series of hand-written transformation rules corresponding to basic syntactic patterns \u2014 for example, one rule \u201cdepassivizes\u201d a sentence. The model is parameterized by learned weights specifying preferences for some rules over others. After applying all possible transformations to a sentence, we are left with a set of candidate simplified sentences. We apply our simplification system to semantic role labeling (SRL). As we do not have labeled examples of correct simplifications, we use labeled training data for the SRL task to jointly learn both the weights of the simplification model and of an SRL model, treating the simplification as a hidden variable. By extracting and labeling simplified sentences, this combined simplification/SRL system better generalizes across syntactic variation. It achieves a statistically significant 1.2% F1 measure increase over a strong baseline on the Conll2005 SRL task, attaining near-state-of-the-art performance.", "phrases": ["semantic role labeling", "path", "text simplification"], "overall_score": 3.418278387933398, "scores": [2.977953448306827, 0.6115066324043713, 0.5373891772579427], "rank_score": 1.3756164193230471} -{"id": "katiyar-cardie-2016-investigating", "title": "Investigating LSTMs for Joint Extraction of Opinion Entities and Relations", "abstract": "We investigate the use of deep bi-directional LSTMs for joint extraction of opinion entities and the IS - FROM and IS - ABOUT relations that connect them \u2014 the \ufb01rst such attempt using a deep learning approach. Perhaps surprisingly, we \ufb01nd that standard LSTMs are not competitive with a state-of-the-art CRF+ILP joint inference approach (Yang and Cardie, 2013) to opinion entities extraction, performing below even the standalone sequence-tagging CRF. Incorporating sentence-level and a novel relation-level optimization, however, allows the LSTM to identify opinion relations and to perform within 1\u2013 3% of the state-of-the-art joint model for opinion entities and the IS - FROM relation; and to perform as well as the state-of-the-art for the IS - ABOUT relation \u2014 all without access to opinion lexicons, parsers and other preprocessing components required for the feature-rich CRF+ILP approach.", "phrases": ["joint extraction", "opinion entity", "bidirectional lstm"], "overall_score": 3.8955316320732134, "scores": [2.225458260818436, 1.338801609818201, 0.5605953651107471], "rank_score": 1.3749517452491282} -{"id": "lin-etal-2020-commongen", "title": "CommonGen: A Constrained Text Generation Challenge for Generative Commonsense Reasoning", "abstract": "Recently, large-scale pre-trained language models have demonstrated impressive performance on several commonsense-reasoning benchmark datasets. However, building machines with commonsense to compose realistically plausible sentences remains challenging. In this paper, we present a constrained text generation task, CommonGen associated with a benchmark dataset, to explicitly test machines for the ability of generative commonsense reasoning. Given a set of common concepts (e.g., dog, frisbee, catch, throw); the task is to generate a coherent sentence describing an everyday scenario using these concepts (e.g., \u201ca man throws a frisbee and his dog catches it\u201d). The CommonGen task is challenging because it inherently requires 1) relational reasoning with background commonsense knowledge and 2) compositional generalization ability to work on unseen concept combinations. Our dataset, constructed through a combination of crowdsourced and existing caption corpora, consists of 77k commonsense descriptions over 35k unique concept-sets. Experiments show that there is a large gap between state-of-the-art text generation models (e.g., T5) and human performance (31.6% v.s. 63.5% in SPICE metric). Furthermore, we demonstrate that the learned generative commonsense reasoning capability can be transferred to improve downstream tasks such as CommonsenseQA (76.9% to 78.4 in dev accuracy) by generating additional context.", "phrases": ["language model", "commongen", "bart"], "overall_score": 3.1654982025799225, "scores": [2.693271270602786, 0.9077574305481753, 0.5232465044144043], "rank_score": 1.3747584018551222} -{"id": "wiegand-klakow-2010-convolution", "title": "Convolution Kernels for Opinion Holder Extraction", "abstract": "Opinion holder extraction is one of the important subtasks in sentiment analysis. The effective detection of an opinion holder depends on the consideration of various cues on various levels of representation, though they are hard to formulate explicitly as features. In this work, we propose to use convolution kernels for that task which identify meaningful fragments of sequences or trees by themselves. We not only investigate how different levels of information can be effectively combined in different kernels but also examine how the scope of these kernels should be chosen. In general relation extraction, the two candidate entities thought to be involved in a relation are commonly chosen to be the boundaries of sequences and trees. The definition of boundaries in opinion holder extraction, however, is less straightforward since there might be several expressions beside the candidate opinion holder to be eligible for being a boundary.", "phrases": ["opinion holder extraction", "sentiment analysis", "convolution kernel"], "overall_score": 2.463139410485251, "scores": [2.583465141231851, 0.954161922809489, 0.5864857749656395], "rank_score": 1.3747042796689932} -{"id": "lewis-etal-2021-paq", "title": "PAQ: 65 Million Probably-Asked Questions and What You Can Do With Them", "abstract": "Open-domain Question Answering models that directly leverage question-answer (QA) pairs, such as closed-book QA (CBQA) models and QA-pair retrievers, show promise in terms of speed and memory compared with conventional models which retrieve and read from text corpora. QA-pair retrievers also offer interpretable answers, a high degree of control, and are trivial to update at test time with new knowledge. However, these models fall short of the accuracy of retrieve-and-read systems, as substantially less knowledge is covered by the available QA-pairs relative to text corpora like Wikipedia. To facilitate improved QA-pair models, we introduce Probably Asked Questions (PAQ), a very large resource of 65M automatically generated QA-pairs. We introduce a new QA-pair retriever, RePAQ, to complement PAQ. We find that PAQ preempts and caches test questions, enabling RePAQ to match the accuracy of recent retrieve-and-read models, whilst being significantly faster. Using PAQ, we train CBQA models which outperform comparable baselines by 5%, but trail RePAQ by over 15%, indicating the effectiveness of explicit retrieval. RePAQ can be configured for size (under 500MB) or speed (over 1K questions per second) while retaining high accuracy. Lastly, we demonstrate RePAQ's strength at selective QA, abstaining from answering when it is likely to be incorrect. This enables RePAQ to \u201cback-off\u201d to a more expensive state-of-the-art model, leading to a combined system which is both more accurate and 2x faster than the state-of-the-art model alone.", "phrases": ["memory", "asked questions", "paq"], "overall_score": 3.627140155547447, "scores": [2.654139323316899, 0.9233592005489772, 0.5457239406603929], "rank_score": 1.3744074881754231} -{"id": "angeli-etal-2010-simple", "title": "A Simple Domain-Independent Probabilistic Approach to Generation", "abstract": "We present a simple, robust generation system which performs content selection and surface realization in a unified, domain-independent framework. In our approach, we break up the end-to-end generation process into a sequence of local decisions, arranged hierarchically and each trained discriminatively. We deployed our system in three different domains---Robocup sportscasting, technical weather forecasts, and common weather forecasts, obtaining results comparable to state-of-the-art domain-specific systems both in terms of BLEU scores and human evaluation.", "phrases": ["probabilistic approach", "content selection", "surface realization", "decision", "database record"], "overall_score": 4.805510473347333, "scores": [1.3846960987081243, 1.5761676403339138, 1.4828946948034492, 1.3698259984453909, 1.058287555498903], "rank_score": 1.3743743975579563} -{"id": "weeds-weir-2005-co", "title": "Co-occurrence Retrieval: A Flexible Framework for Lexical Distributional Similarity", "abstract": "Techniques that exploit knowledge of distributional similarity between words have been proposed in many areas of Natural Language Processing. For example, in language modeling, the sparse data problem can be alleviated by estimating the probabilities of unseen co-occurrences of events from the probabilities of seen co-occurrences of similar events. In other applications, distributional similarity is taken to be an approximation to semantic similarity. However, due to the wide range of potential applications and the lack of a strict definition of the concept of distributional similarity, many methods of calculating distributional similarity have been proposed or adopted. In this work, a flexible, parameterized framework for calculating distributional similarity is proposed. Within this framework, the problem of finding distributionally similar words is cast as one of co-occurrence retrieval (CR) for which precision and recall can be measured by analogy with the way they are measured in document retrieval. As will be shown, a number of popular existing measures of distributional similarity are simulated with parameter settings within the CR framework. In this article, the CR framework is then used to systematically investigate three fundamental questions concerning distributional similarity. First, is the relationship of lexical similarity necessarily symmetric, or are there advantages to be gained from considering it as an asymmetric relationship? Second, are some co-occurrences inherently more salient than others in the calculation of distributional similarity? Third, is it necessary to consider the difference in the extent to which each word occurs in each co-occurrence type? Two application-based tasks are used for evaluation: automatic thesaurus generation and pseudo-disambiguation. It is possible to achieve significantly better results on both these tasks by varying the parameters within the CR framework rather than using other existing distributional similarity measures; it will also be shown that any single unparameterized measure is unlikely to be able to do better on both tasks. This is due to an inherent asymmetry in lexical substitutability and therefore also in lexical distributional similarity.", "phrases": ["flexible framework", "lexical distributional similarity", "co-occurrence retrieval", "well result"], "overall_score": 3.721257449656462, "scores": [2.1012905109844975, 2.015481594510027, 0.8211593680367345, 0.5586541483185339], "rank_score": 1.3741464054624482} -{"id": "cui-etal-2017-attention", "title": "Attention-over-Attention Neural Networks for Reading Comprehension", "abstract": "Cloze-style reading comprehension is a representative problem in mining relationship between document and query. In this paper, we present a simple but novel model called attention-over-attention reader for better solving cloze-style reading comprehension task. The proposed model aims to place another attention mechanism over the document-level attention and induces \u201cattended attention\u201d for final answer predictions. One advantage of our model is that it is simpler than related works while giving excellent performance. In addition to the primary model, we also propose an N-best re-ranking strategy to double check the validity of the candidates and further improve the performance. Experimental results show that the proposed methods significantly outperform various state-of-the-art systems by a large margin in public datasets, such as CNN and Children's Book Test.", "phrases": ["reading comprehension", "reader", "two-way attention mechanism", "neural network model"], "overall_score": 4.804628188080864, "scores": [1.92382523110792, 1.569690525497564, 1.4735767851070694, 0.529395715347862], "rank_score": 1.3741220642651037} -{"id": "dutta-weikum-2015-cross", "title": "Cross-Document Co-Reference Resolution using Sample-Based Clustering with Knowledge Enrichment", "abstract": "Identifying and linking named entities across information sources is the basis of knowledge acquisition and at the heart of Web search, recommendations, and analytics. An important problem in this context is cross-document co-reference resolution (CCR): computing equivalence classes of textual mentions denoting the same entity, within and across documents. Prior methods employ ranking, clustering, or probabilistic graphical models using syntactic features and distant features from knowledge bases. However, these methods exhibit limitations regarding run-time and robustness. This paper presents the CROCS framework for unsupervised CCR, improving the state of the art in two ways. First, we extend the way knowledge bases are harnessed, by constructing a notion of semantic summaries for intra-document co-reference chains using co-occurring entity mentions belonging to different chains. Second, we reduce the computational cost by a new algorithm that embeds sample-based bisection, using spectral clustering or graph partitioning, in a hierarchical clustering process. This allows scaling up CCR to large corpora. Experiments with three datasets show significant gains in output quality, compared to the best prior methods, and the run-time efficiency of CROCS.", "phrases": ["co-reference resolution", "clustering", "mention"], "overall_score": 2.2095973601825416, "scores": [1.9214479582321455, 1.6521644424724975, 0.5450876928716881], "rank_score": 1.3729000311921105} -{"id": "du-etal-2017-learning", "title": "Learning to Ask: Neural Question Generation for Reading Comprehension", "abstract": "We study automatic question generation for sentences from text passages in reading comprehension. We introduce an attention-based sequence learning model for the task and investigate the effect of encoding sentence- vs. paragraph-level information. In contrast to all previous work, our model does not rely on hand-crafted rules or a sophisticated NLP pipeline; it is instead trainable end-to-end via sequence-to-sequence learning. Automatic evaluation results show that our system significantly outperforms the state-of-the-art rule-based system. In human evaluations, questions generated by our system are also rated as being more natural (i.e.,, grammaticality, fluency) and as more difficult to answer (in terms of syntactic and lexical divergence from the original text and reasoning needed to answer).", "phrases": ["question generation", "comprehension", "input text"], "overall_score": 6.221142062378182, "scores": [2.6663640117367464, 0.9199044148599904, 0.5313303630882309], "rank_score": 1.3725329298949893} -{"id": "saha-etal-2021-explagraphs", "title": "ExplaGraphs: An Explanation Graph Generation Task for Structured Commonsense Reasoning", "abstract": "Recent commonsense-reasoning tasks are typically discriminative in nature, where a model answers a multiple-choice question for a certain context. Discriminative tasks are limiting because they fail to adequately evaluate the model's ability to reason and explain predictions with underlying commonsense knowledge. They also allow such models to use reasoning shortcuts and not be \u201cright for the right reasons\u201d. In this work, we present ExplaGraphs, a new generative and structured commonsense-reasoning task (and an associated dataset) of explanation graph generation for stance prediction. Specifically, given a belief and an argument, a model has to predict if the argument supports or counters the belief and also generate a commonsense-augmented graph that serves as non-trivial, complete, and unambiguous explanation for the predicted stance. We collect explanation graphs through a novel Create-Verify-And-Refine graph collection framework that improves the graph quality (up to 90%) via multiple rounds of verification and refinement. A significant 79% of our graphs contain external commonsense nodes with diverse structures and reasoning depths. Next, we propose a multi-level evaluation framework, consisting of automatic metrics and human evaluation, that check for the structural and semantic correctness of the generated graphs and their degree of match with ground-truth graphs. Finally, we present several structured, commonsense-augmented, and text generation models as strong starting points for this explanation graph generation task, and observe that there is a large gap with human performance, thereby encouraging future work for this new challenging task.", "phrases": ["explanation graph", "semantic correctness", "explagraphs"], "overall_score": 2.6705801738054142, "scores": [2.3467824511661504, 1.2322488559244993, 0.53818886636172], "rank_score": 1.3724067244841232} -{"id": "iter-etal-2018-automatic", "title": "Automatic Detection of Incoherent Speech for Diagnosing Schizophrenia", "abstract": "Schizophrenia is a mental disorder which afflicts an estimated 0.7% of adults world wide. It affects many areas of mental function, often evident from incoherent speech. Diagnosing schizophrenia relies on subjective judgments resulting in disagreements even among trained clinicians. Recent studies have proposed the use of natural language processing for diagnosis by drawing on automatically-extracted linguistic features like discourse coherence and lexicon. Here, we present the first benchmark comparison of previously proposed coherence models for detecting symptoms of schizophrenia and evaluate their performance on a new dataset of recorded interviews between subjects and clinicians. We also present two alternative coherence metrics based on modern sentence embedding techniques that outperform the previous methods on our dataset. Lastly, we propose a novel computational model for reference incoherence based on ambiguous pronoun usage and show that it is a highly predictive feature on our data. While the number of subjects is limited in this pilot study, our results suggest new directions for diagnosing common symptoms of schizophrenia.", "phrases": ["incoherent speech", "schizophrenia", "linguistic feature"], "overall_score": 2.456524017860312, "scores": [2.691445339457519, 0.8983745442757218, 0.5232165925053769], "rank_score": 1.3710121587462059} -{"id": "finkel-etal-2005-incorporating", "title": "Incorporating Non-local Information into Information Extraction Systems by Gibbs Sampling", "abstract": "Most current statistical natural language processing models use only local features so as to permit dynamic programming in inference, but this makes them unable to fully account for the long distance structure that is prevalent in language use. We show how to solve this dilemma with Gibbs sampling, a simple Monte Carlo method used to perform approximate inference in factored probabilistic models. By using simulated annealing in place of Viterbi decoding in sequence models such as HMMs, CMMs, and CRFs, it is possible to incorporate non-local structure while preserving tractable inference. We use this technique to augment an existing CRF-based information extraction system with long-distance dependency models, enforcing label consistency and extraction template consistency constraints. This technique results in an error reduction of up to 9% over state-of-the-art systems on two established information extraction tasks.", "phrases": ["non-local information", "information extraction", "gibbs sampling", "stanford ner", "coarse-grained type"], "overall_score": 6.057843073182952, "scores": [2.7619028449110683, 0.887589524535303, 1.7218031362733313, 0.9401498689172473, 0.5431162749669071], "rank_score": 1.3709123299207715} -{"id": "espinosa-etal-2008-hypertagging", "title": "Hypertagging: Supertagging for Surface Realization with CCG", "abstract": "In lexicalized grammatical formalisms, it is possible to separate lexical category assignment from the combinatory processes that make use of such categories, such as parsing and realization. We adapt techniques from supertagging \u2014 a relatively recent technique that performs complex lexical tagging before full parsing (Bangalore and Joshi, 1999; Clark, 2002) \u2014 for chart realization in OpenCCG, an open-source NLP toolkit for CCG. We call this approach hypertagging, as it operates at a level \u201cabove\u201d the syntax, tagging semantic representations with syntactic lexical categories. Our results demonstrate that a hypertagger-informed chart realizer can achieve substantial improvements in realization speed (being approximately twice as fast) with superior realization quality.", "phrases": ["supertag", "surface realization", "ccg", "lexical category"], "overall_score": 3.011313928636288, "scores": [2.479553291948404, 1.3498670931544134, 1.0628441638506079, 0.5897675741116121], "rank_score": 1.3705080307662592} -{"id": "pantel-pennacchiotti-2006-espresso", "title": "Espresso: Leveraging Generic Patterns for Automatically Harvesting Semantic Relations", "abstract": "In this paper, we present Espresso, a weakly-supervised, general-purpose, and accurate algorithm for harvesting semantic relations. The main contributions are: i) a method for exploiting generic patterns by filtering incorrect instances using the Web; and ii) a principled measure of pattern and instance reliability enabling the filtering algorithm. We present an empirical comparison of Espresso with various state of the art systems, on different size and genre corpora, on extracting various general and specific relations. Experimental results show that our exploitation of generic patterns substantially increases system recall with small effect on overall precision.", "phrases": ["semantic relation", "recall", "espresso", "pattern-based approach", "broad coverage"], "overall_score": 5.087241921235854, "scores": [3.132220596031465, 1.5643053841560948, 1.0508055293924832, 0.5617147480709529, 0.5404800607525124], "rank_score": 1.3699052636807019} -{"id": "feng-etal-2022-language", "title": "Language-agnostic BERT Sentence Embedding", "abstract": "While BERT is an effective method for learning monolingual sentence embeddings for semantic similarity and embedding based transfer learning BERT based cross-lingual sentence embeddings have yet to be explored. We systematically investigate methods for learning multilingual sentence embeddings by combining the best methods for learning monolingual and cross-lingual representations including: masked language modeling (MLM), translation language modeling (TLM), dual encoder translation ranking, and additive margin softmax. We show that introducing a pre-trained multilingual language model dramatically reduces the amount of parallel training data required to achieve good performance by 80%. Composing the best of these methods produces a model that achieves 83.7% bi-text retrieval accuracy over 112 languages on Tatoeba, well above the 65.5% achieved by LASER, while still performing competitively on monolingual transfer learning benchmarks. Parallel data mined from CommonCrawl using our best model is shown to train competitive NMT models for en-zh and en-de. We publicly release our best multilingual sentence embedding model for 109+ languages at .", "phrases": ["sentence embedding", "language model", "language-agnostic bert sentence"], "overall_score": 4.46068444316381, "scores": [1.5189659820655397, 1.4796539613872859, 1.1087025908321289], "rank_score": 1.3691075114283182} -{"id": "zhou-kong-2009-global", "title": "Global Learning of Noun Phrase Anaphoricity in Coreference Resolution via Label Propagation", "abstract": "Knowledge of noun phrase anaphoricity might be profitably exploited in coreference resolution to bypass the resolution of non-anaphoric noun phrases. However, it is surprising to notice that recent attempts to incorporate automatically acquired anaphoricity information into coreference resolution have been somewhat disappointing. This paper employs a global learning method in determining the anaphoricity of noun phrases via a label propagation algorithm to improve learning-based coreference resolution. In particular, two kinds of kernels, i.e. the feature-based RBF kernel and the convolution tree kernel, are employed to compute the anaphoricity similarity between two noun phrases. Experiments on the ACE 2003 corpus demonstrate the effectiveness of our method in anaphoricity determination of noun phrases and its application in learning-based coreference resolution.", "phrases": ["noun phrase anaphoricity", "coreference resolution", "label propagation algorithm"], "overall_score": 2.453085202740186, "scores": [2.3134519910329763, 0.8725537705018596, 0.92127299691992], "rank_score": 1.3690929194849186} -{"id": "zhao-bethard-2020-berts", "title": "How does BERT's attention change when you fine-tune? An analysis methodology and a case study in negation scope", "abstract": "Large pretrained language models like BERT, after fine-tuning to a downstream task, have achieved high performance on a variety of NLP problems. Yet explaining their decisions is difficult despite recent work probing their internal representations. We propose a procedure and analysis methods that take a hypothesis of how a transformer-based model might encode a linguistic phenomenon, and test the validity of that hypothesis based on a comparison between knowledge-related downstream tasks with downstream control tasks, and measurement of cross-dataset consistency. We apply this methodology to test BERT and RoBERTa on a hypothesis that some attention heads will consistently attend from a word in negation scope to the negation cue. We find that after fine-tuning BERT and RoBERTa on a negation scope task, the average attention head improves its sensitivity to negation and its attention consistency across negation datasets compared to the pre-trained models. However, only the base models (not the large models) improve compared to a control task, indicating there is evidence for a shallow encoding of negation only in the base models.", "phrases": ["bert", "methodology", "negation scope", "attention head"], "overall_score": 2.6637470378390513, "scores": [2.1548526042621177, 1.8636837110142548, 0.9023237013934884, 0.5547207322814279], "rank_score": 1.368895187237822} -{"id": "sun-etal-2009-chinese", "title": "Chinese Semantic Role Labeling with Shallow Parsing", "abstract": "Most existing systems for Chinese Semantic Role Labeling (SRL) make use of full syntactic parses. In this paper, we evaluate SRL methods that take partial parses as inputs. We first extend the study on Chinese shallow parsing presented in (Chen et al., 2006) by raising a set of additional features. On the basis of our shallow parser, we implement SRL systems which cast SRL as the classification of syntactic chunks with IOB2 representation for semantic roles (i.e. semantic chunks). Two labeling strategies are presented: 1) directly tagging semantic chunks in one-stage, and 2) identifying argument boundaries as a chunking task and labeling their semantic types as a classification task. Lor both methods, we present encouraging results, achieving significant improvements over the best reported SRL performance in the literature. Additionally, we put forward a rule-based algorithm to automatically acquire Chinese verb formation, which is empirically shown to enhance SRL.", "phrases": ["semantic role", "shallow parsing", "srl", "basis"], "overall_score": 3.007186793915351, "scores": [2.9661245149422917, 1.3511408694599458, 0.6295727644202703, 0.5276806144093877], "rank_score": 1.3686296908079738} -{"id": "tonelli-pianta-2009-novel", "title": "A novel approach to mapping FrameNet lexical units to WordNet synsets (short paper)", "abstract": "In this paper we present a novel approach to mapping FrameNet lexical units to WordNet synsets in order to automatically enrich the lexical unit set of a given frame. While the mapping approaches proposed in the past mainly rely on the semantic similarity between lexical units in a frame and lemmas in a synset, we exploit the definition of the lexical entries in FrameNet and the WordNet glosses to find the best candidate synset(s) for the mapping. Evaluation results are also reported and discussed.", "phrases": ["novel approach", "mapping framenet", "wordnet synset"], "overall_score": 2.2023400924773693, "scores": [2.455771036334623, 0.8492412590331959, 0.8001602163444892], "rank_score": 1.3683908372374358} -{"id": "fazly-stevenson-2007-distinguishing", "title": "Distinguishing Subtypes of Multiword Expressions Using Linguistically-Motivated Statistical Measures", "abstract": "We identify several classes of multiword expressions that each require a different encoding in a (computational) lexicon, as well as a different treatment within a computational system. We examine linguistic properties pertaining to the degree of semantic idiosyncrasy of these classes of expressions. Accordingly, we propose statistical measures to quantify each property, and use the measures to automatically distinguish the classes.", "phrases": ["statistical measure", "idiom", "syntactic behavior"], "overall_score": 4.404408607405511, "scores": [2.2876109110391045, 1.2265566320413401, 0.5907518577036968], "rank_score": 1.3683064669280471} -{"id": "passarotti-etal-2017-lemlat", "title": "The Lemlat 3.0 Package for Morphological Analysis of Latin", "abstract": "This paper introduces the main components of the downloadable package of the 3.0 version of the morphological analyser for Latin Lemlat. The processes of word form analysis and treatment of spelling variation performed by the tool are detailed, as well as the different output formats and the connection of the results with a recently built resource for derivational morphology of Latin. A light evaluation of the tool\u2019s lexical coverage against a diachronic vocabulary of the entire Latin world is also provided.", "phrases": ["latin", "morphological analyzer", "lemma"], "overall_score": 2.4515738653081085, "scores": [2.6577629821947655, 0.9128359831912326, 0.5341493126253165], "rank_score": 1.3682494260037714} -{"id": "conneau-etal-2018-xnli", "title": "XNLI: Evaluating Cross-lingual Sentence Representations", "abstract": "State-of-the-art natural language processing systems rely on supervision in the form of annotated data to learn competent models. These models are generally trained on data in a single language (usually English), and cannot be directly used beyond that language. Since collecting data in every language is not realistic, there has been a growing interest in cross-lingual language understanding (XLU) and low-resource cross-language transfer. In this work, we construct an evaluation set for XLU by extending the development and test sets of the Multi-Genre Natural Language Inference Corpus (MultiNLI) to 14 languages, including low-resource languages such as Swahili and Urdu. We hope that our dataset, dubbed XNLI, will catalyze research in cross-lingual sentence understanding by providing an informative standard evaluation task. In addition, we provide several baselines for multilingual sentence understanding, including two based on machine translation systems, and two that use parallel data to train aligned multilingual bag-of-words and LSTM encoders. We find that XNLI represents a practical and challenging evaluation suite, and that directly translating the test data yields the best performance among available baselines.", "phrases": ["natural language inference", "cross-lingual transfer", "nli", "representation learning", "zero-shot"], "overall_score": 6.061063379940709, "scores": [2.1341284604388817, 1.8624313027225563, 1.76248093114378, 0.5451245456656832, 0.5355029917679732], "rank_score": 1.367933646347775} -{"id": "nangia-bowman-2019-human", "title": "Human vs. Muppet: A Conservative Estimate of Human Performance on the GLUE Benchmark", "abstract": "The GLUE benchmark (Wang et al., 2019b) is a suite of language understanding tasks which has seen dramatic progress in the past year, with average performance moving from 70.0 at launch to 83.9, state of the art at the time of writing (May 24, 2019). Here, we measure human performance on the benchmark, in order to learn whether significant headroom remains for further progress. We provide a conservative estimate of human performance on the benchmark through crowdsourcing: Our annotators are non-experts who must learn each task from a brief set of instructions and 20 examples. In spite of limited training, these annotators robustly outperform the state of the art on six of the nine GLUE tasks and achieve an average score of 87.1. Given the fast pace of progress however, the headroom we observe is quite limited. To reproduce the data-poor setting that our annotators must learn in, we also train the BERT model (Devlin et al., 2019) in limited-data regimes, and conclude that low-resource sentence classification remains a challenge for modern neural network approaches to text understanding.", "phrases": ["human performance", "glue benchmark", "annotator"], "overall_score": 2.2013511562384394, "scores": [1.8048936165929736, 1.7646370044885282, 0.533798508730908], "rank_score": 1.3677763766041366} -{"id": "mizukami-etal-2016-analyzing", "title": "Analyzing the Effect of Entrainment on Dialogue Acts", "abstract": "Entrainment is a factor in dialogue that affects not only human-human but also human-machine interaction. While entrainment on the lexical level is well documented, less is known about how entrainment affects dialogue on a more abstract, structural level. In this paper, we investigate the effect of entrainment on dialogue acts and on lexical choice given dialogue acts, as well as how entrainment changes during a dialogue. We also define a novel measure of entrainment to measure these various types of entrainment. These results may serve as guidelines for dialogue systems that would like to entrain with users in a similar manner.", "phrases": ["entrainment", "dialogue act", "lexical choice"], "overall_score": 1.8961067974033559, "scores": [1.909772578130647, 1.6712875282758215, 0.5221957040080459], "rank_score": 1.367751936804838} -{"id": "li-etal-2019-semi-supervised-domain", "title": "Semi-supervised Domain Adaptation for Dependency Parsing", "abstract": "During the past decades, due to the lack of sufficient labeled data, most studies on cross-domain parsing focus on unsupervised domain adaptation, assuming there is no target-domain training data. However, unsupervised approaches make limited progress so far due to the intrinsic difficulty of both domain adaptation and parsing. This paper tackles the semi-supervised domain adaptation problem for Chinese dependency parsing, based on two newly-annotated large-scale domain-aware datasets. We propose a simple domain embedding approach to merge the source- and target-domain training data, which is shown to be more effective than both direct corpus concatenation and multi-task learning. In order to utilize unlabeled target-domain data, we employ the recent contextualized word representations and show that a simple fine-tuning procedure can further boost cross-domain parsing accuracy by large margin.", "phrases": ["dependency parsing", "semi-supervised domain adaptation", "extra domain"], "overall_score": 2.4506653995050853, "scores": [2.540358609880853, 0.9855975470553086, 0.5772710478195773], "rank_score": 1.367742401585246} -{"id": "eisenstein-barzilay-2008-bayesian", "title": "Bayesian Unsupervised Topic Segmentation", "abstract": "This paper describes a novel Bayesian approach to unsupervised topic segmentation. Unsupervised systems for this task are driven by lexical cohesion: the tendency of well-formed segments to induce a compact and consistent lexical distribution. We show that lexical cohesion can be placed in a Bayesian context by modeling the words in each topic segment as draws from a multinomial language model associated with the segment; maximizing the observation likelihood in such a model yields a lexically-cohesive segmentation. This contrasts with previous approaches, which relied on hand-crafted cohesion metrics. The Bayesian framework provides a principled way to incorporate additional features such as cue phrases, a powerful indicator of discourse structure that has not been previously used in unsupervised segmentation systems. Our model yields consistent improvements over an array of state-of-the-art systems on both text and speech datasets. We also show that both an entropy-based analysis and a well-known previous technique can be derived as special cases of the Bayesian framework.", "phrases": ["topic segmentation", "bayesian approach", "lexical cohesion"], "overall_score": 4.557424246801128, "scores": [1.8701404430997266, 1.2864979013769064, 0.9464329702202282], "rank_score": 1.3676904382322872} -{"id": "wang-etal-2017-gated", "title": "Gated Self-Matching Networks for Reading Comprehension and Question Answering", "abstract": "In this paper, we present the gated self-matching networks for reading comprehension style question answering, which aims to answer questions from a given passage. We first match the question and passage with gated attention-based recurrent networks to obtain the question-aware passage representation. Then we propose a self-matching attention mechanism to refine the representation by matching the passage against itself, which effectively encodes information from the whole passage. We finally employ the pointer networks to locate the positions of answers from the passages. We conduct extensive experiments on the SQuAD dataset. The single model achieves 71.3% on the evaluation metrics of exact match on the hidden test set, while the ensemble model further boosts the results to 75.9%. At the time of submission of the paper, our model holds the first place on the SQuAD leaderboard for both single and ensemble model.", "phrases": ["reading comprehension", "passage", "self-match attention mechanism"], "overall_score": 5.235887842861398, "scores": [1.7563961467374187, 1.7651399397966347, 0.5811368728578312], "rank_score": 1.3675576531306284} -{"id": "ghazvininejad-etal-2019-mask", "title": "Mask-Predict: Parallel Decoding of Conditional Masked Language Models", "abstract": "Most machine translation systems generate text autoregressively from left to right. We, instead, use a masked language modeling objective to train a model to predict any subset of the target words, conditioned on both the input text and a partially masked target translation. This approach allows for efficient iterative decoding, where we first predict all of the target words non-autoregressively, and then repeatedly mask out and regenerate the subset of words that the model is least confident about. By applying this strategy for a constant number of iterations, our model improves state-of-the-art performance levels for non-autoregressive and parallel decoding translation models by over 4 BLEU on average. It is also able to reach within about 1 BLEU point of a typical left-to-right transformer model, while decoding significantly faster.", "phrases": ["parallel decoding", "language model", "mask-predict", "nat", "neural machine translation"], "overall_score": 5.044617762847203, "scores": [2.2899624712742863, 0.8255616862996784, 1.6099629517388248, 1.2094166315376556, 0.902698064246915], "rank_score": 1.367520361019472} -{"id": "ma-etal-2017-detect", "title": "Detect Rumors in Microblog Posts Using Propagation Structure via Kernel Learning", "abstract": "How fake news goes viral via social media? How does its propagation pattern differ from real stories? In this paper, we attempt to address the problem of identifying rumors, i.e., fake information, out of microblog posts based on their propagation structure. We firstly model microblog posts diffusion with propagation trees, which provide valuable clues on how an original message is transmitted and developed over time. We then propose a kernel-based method called Propagation Tree Kernel, which captures high-order patterns differentiating different types of rumors by evaluating the similarities between their propagation tree structures. Experimental results on two real-world datasets demonstrate that the proposed kernel-based approach can detect rumors more quickly and accurately than state-of-the-art rumor detection models.", "phrases": ["rumor", "propagation tree", "twitter", "social medium"], "overall_score": 4.226250289762838, "scores": [1.9373946833468851, 1.3816594471217907, 1.08723732566247, 1.062737654318187], "rank_score": 1.367257277612333} -{"id": "li-etal-2017-modeling", "title": "Modeling Source Syntax for Neural Machine Translation", "abstract": "Even though a linguistics-free sequence to sequence model in neural machine translation (NMT) has certain capability of implicitly learning syntactic information of source sentences, this paper shows that source syntax can be explicitly incorporated into NMT effectively to provide further improvements. Specifically, we linearize parse trees of source sentences to obtain structural label sequences. On the basis, we propose three different sorts of encoders to incorporate source syntax into NMT: 1) Parallel RNN encoder that learns word and label annotation vectors parallelly; 2) Hierarchical RNN encoder that learns word and label annotation vectors in a two-level hierarchy; and 3) Mixed RNN encoder that stitchingly learns word and label annotation vectors over sequences where words and labels are mixed. Experimentation on Chinese-to-English translation demonstrates that all the three proposed syntactic encoders are able to improve translation accuracy. It is interesting to note that the simplest RNN encoder, i.e., Mixed RNN encoder yields the best performance with an significant improvement of 1.4 BLEU points. Moreover, an in-depth analysis from several perspectives is provided to reveal how source syntax benefits NMT.", "phrases": ["source syntax", "neural machine translation", "label sequence", "linguistic feature", "simple rnn encoder"], "overall_score": 4.162177482810464, "scores": [2.996262835206477, 1.8449689734274723, 0.8518767189461607, 0.5715705568363224, 0.5708387479349374], "rank_score": 1.367103566470274} -{"id": "branavan-etal-2009-reinforcement", "title": "Reinforcement Learning for Mapping Instructions to Actions", "abstract": "In this paper, we present a reinforcement learning approach for mapping natural language instructions to sequences of executable actions. We assume access to a reward function that defines the quality of the executed actions. During training, the learner repeatedly constructs action sequences for a set of documents, executes those actions, and observes the resulting reward. We use a policy gradient algorithm to estimate the parameters of a log-linear model for action selection. We apply our method to interpret instructions in two domains --- Windows troubleshooting guides and game tutorials. Our results demonstrate that this method can rival supervised learning techniques while requiring few or no annotated training examples.", "phrases": ["mapping instruction", "action", "reinforcement learning", "environment", "supervision signal"], "overall_score": 4.1601160394301, "scores": [3.067068462295474, 1.3978238783354238, 0.9779752159170418, 0.8488886262878493, 0.5403761540519545], "rank_score": 1.3664264673775488} -{"id": "williams-etal-2014-finding", "title": "Finding Good Enough: A Task-Based Evaluation of Query Biased Summarization for Cross-Language Information Retrieval", "abstract": "In this paper we present our task-based evaluation of query biased summarization for cross-language information retrieval (CLIR) using relevance prediction. We describe our 13 summarization methods each from one of four summarization strategies. We show how well our methods perform using Farsi text from the CLEF 2008 shared-task, which we translated to English automtatically. We report precision/recall/F1, accuracy and time-on-task. We found that different summarization methods perform optimally for different evaluation metrics, but overall query biased word clouds are the best summarization strategy. In our analysis, we demonstrate that using the ROUGE metric on our sentence-based summaries cannot make the same kinds of distinctions as our evaluation framework does. Finally, we present our recommendations for creating muchneeded evaluation standards and datasets.", "phrases": ["task-based evaluation", "cross-language information retrieval", "summarization method"], "overall_score": 1.5006494907948393, "scores": [1.847749224963381, 1.666906232176981, 0.5831946386767936], "rank_score": 1.3659500319390518} -{"id": "ma-etal-2019-domain", "title": "Domain Adaptation with BERT-based Domain Classification and Data Selection", "abstract": "The performance of deep neural models can deteriorate substantially when there is a domain shift between training and test data. For example, the pre-trained BERT model can be easily fine-tuned with just one additional output layer to create a state-of-the-art model for a wide range of tasks. However, the fine-tuned BERT model suffers considerably at zero-shot when applied to a different domain. In this paper, we present a novel two-step domain adaptation framework based on curriculum learning and domain-discriminative data selection. The domain adaptation is conducted in a mostly unsupervised manner using a small target domain validation set for hyper-parameter tuning. We tested the framework on four large public datasets with different domain similarities and task types. Our framework outperforms a popular discrepancy-based domain adaptation method on most transfer tasks while consuming only a fraction of the training budget.", "phrases": ["data selection", "bert", "target domain", "adversarial learning"], "overall_score": 3.503289706804421, "scores": [3.128583428232398, 1.2270937439716936, 0.5712915993642876, 0.5363589103029471], "rank_score": 1.3658319204678313} -{"id": "maekawa-etal-2010-design", "title": "Design, Compilation, and Preliminary Analyses of Balanced Corpus of Contemporary Written Japanese", "abstract": "Compilation of a 100 million words balanced corpus called the Balanced Corpus of Contemporary Written Japanese (or BCCWJ) is underway at the National Institute for Japanese Language and Linguistics. The corpus covers a wide range of text genres including books, magazines, newspapers, governmental white papers, textbooks, minutes of the National Diet, internet text (bulletin board and blogs) and so forth, and when possible, samples are drawn from the rigidly defined statistical populations by means of random sampling. All texts are dually POS-analyzed based upon two different, but mutually related, definitions of \u0091word.\u0092 Currently, more than 90 million words have been sampled and XML annotated with respect to text-structure and lexical and character information. A preliminary linear discriminant analysis of text genres using the data of POS frequencies and sentence length revealed it was possible to classify the text genres with a correct identification rate of 88% as far as the samples of books, newspapers, whitepapers, and internet bulletin boards are concerned. When the samples of blogs were included in this data set, however, the identification rate went down to 68%, suggesting the considerable variance of the blog texts in terms of the textual register and style.", "phrases": ["balanced corpus", "contemporary written japanese", "national institute", "linguistics"], "overall_score": 1.8930993529738016, "scores": [1.9735686707010904, 1.753314967391593, 0.8694575633774532, 0.8659888954206814], "rank_score": 1.3655825242227044} -{"id": "singh-etal-2015-detection", "title": "Detection of Multiword Expressions for Hindi Language using Word Embeddings and WordNet-based Features", "abstract": "Detection of Multiword Expressions (MWEs) is a challenging problem faced by several natural language processing applications. The difficulty emanates from the task of detecting MWEs with respect to a given context. In this paper, we propose approaches that use Word Embeddings and WordNet-based features for the detection of MWEs for Hindi language. These approaches are restricted to two types of MWEs viz., noun compounds and noun+verb compounds. The results obtained indicate that using linguistic information from a rich lexical resource such as WordNet, help in improving the accuracy of MWEs detection. It also demonstrates that the linguistic information which word embeddings capture from a corpus can be comparable to that provided by WordNet. Thus, we can say that, for the detection of above mentioned MWEs, word embeddings can be a reasonable alternative to WordNet, especially for those languages whose WordNets does not have a better coverage.", "phrases": ["hindi language", "word embeddings", "noun+verb compound"], "overall_score": 1.8928268700162532, "scores": [1.7991009129477349, 1.76152004247644, 0.5355369525265652], "rank_score": 1.3653859693169135} -{"id": "yimam-etal-2018-report", "title": "A Report on the Complex Word Identification Shared Task 2018", "abstract": "We report the findings of the second Complex Word Identification (CWI) shared task organized as part of the BEA workshop co-located with NAACL-HLT'2018. The second CWI shared task featured multilingual and multi-genre datasets divided into four tracks: English monolingual, German monolingual, Spanish monolingual, and a multilingual track with a French test set, and two tasks: binary classification and probabilistic classification. A total of 12 teams submitted their results in different task/track combinations and 11 of them wrote system description papers that are referred to in this report and appear in the BEA workshop proceedings.", "phrases": ["report", "complex word identification", "complexity", "cwi", "non-native speaker"], "overall_score": 4.642871716366045, "scores": [0.8137177477963765, 2.097472543313726, 1.941349784527052, 1.0744532897365653, 0.8983554682436447], "rank_score": 1.3650697667234728} -{"id": "devlin-etal-2014-fast", "title": "Fast and Robust Neural Network Joint Models for Statistical Machine Translation", "abstract": "Recent work has shown success in using neural network language models (NNLMs) as features in MT systems. Here, we present a novel formulation for a neural network joint model (NNJM), which augments the NNLM with a source context window. Our model is purely lexicalized and can be integrated into any MT decoder. We also present several variations of the NNJM which provide significant additive improvements.", "phrases": ["joint model", "statistical machine translation", "network language model", "neural network model", "objective function"], "overall_score": 5.196142941432307, "scores": [2.437038697506638, 1.8068362723170315, 1.1808532553304216, 0.8406983502243205, 0.5596373164112487], "rank_score": 1.365012778357932} -{"id": "cai-etal-2007-improving", "title": "Improving Word Sense Disambiguation Using Topic Features", "abstract": "This paper presents a novel approach for exploiting the global context for the task of word sense disambiguation (WSD). This is done by using topic features constructed using the latent dirichlet allocation (LDA) algorithm on unlabeled data. The features are", "phrases": ["word sense disambiguation", "topic feature", "global context", "latent dirichlet allocation"], "overall_score": 2.999183457472992, "scores": [2.5679616831015224, 1.8235684099186769, 0.5403891268964774, 0.528029641767763], "rank_score": 1.36498721542111} -{"id": "kong-etal-2014-dependency", "title": "A Dependency Parser for Tweets", "abstract": "We describe a new dependency parser for English tweets, TWEEBOPARSER. The parser builds on several contributions: new syntactic annotations for a corpus of tweets (TWEEBANK), with conventions informed by the domain; adaptations to a statistical parsing algorithm; and a new approach to exploiting out-of-domain Penn Treebank data. Our experiments show that the parser achieves over 80% unlabeled attachment accuracy on our new, high-quality test set and measure the benefit of our contributions. Our dataset and parser can be found at http://www.ark.cs.cmu.edu/TweetNLP.", "phrases": ["dependency parser", "english tweet", "tweeboparser", "social medium text"], "overall_score": 4.641934891755986, "scores": [3.2091926631769625, 0.8392044348599553, 0.8295436861295471, 0.5812365241348673], "rank_score": 1.364794327075333} -{"id": "welleck-etal-2019-dialogue", "title": "Dialogue Natural Language Inference", "abstract": "Consistency is a long standing issue faced by dialogue models. In this paper, we frame the consistency of dialogue agents as natural language inference (NLI) and create a new natural language inference dataset called Dialogue NLI. We propose a method which demonstrates that a model trained on Dialogue NLI can be used to improve the consistency of a dialogue model, and evaluate the method with human evaluation and with automatic metrics on a suite of evaluation sets designed to measure a dialogue model's consistency.", "phrases": ["natural language inference", "nli", "dialoguenli"], "overall_score": 4.391298159783304, "scores": [1.6821655846140207, 1.4128254293293279, 0.9977094181677056], "rank_score": 1.3642334773703515} -{"id": "liu-etal-2019-tree", "title": "Tree-structured Decoding for Solving Math Word Problems", "abstract": "Automatically solving math word problems is an interesting research topic that needs to bridge natural language descriptions and formal math equations. Previous studies introduced end-to-end neural network methods, but these approaches did not efficiently consider an important characteristic of the equation, i.e., an abstract syntax tree. To address this problem, we propose a tree-structured decoding method that generates the abstract syntax tree of the equation in a top-down manner. In addition, our approach can automatically stop during decoding without a redundant stop token. The experimental results show that our method achieves single model state-of-the-art performance on Math23K, which is the largest dataset on this task.", "phrases": ["math word problem", "syntax tree", "top-down manner", "mwp"], "overall_score": 3.600277753682402, "scores": [2.8850040175732197, 1.1635810283474934, 0.8794026366678716, 0.5289271229829601], "rank_score": 1.3642287013928862} -{"id": "daudert-ahmadi-2019-cofif", "title": "CoFiF: A Corpus of Financial Reports in French Language", "abstract": "In an era when machine learning and artificial intelligence have huge momentum, the data demand to train and test models is steadily growing. We introduce CoFiF, the first corpus comprising company reports in the French language. It contains over 188 million tokens in 2655 reports, covering reference documents, annual, semestrial and trimestrial reports. Our main focus is on the 60 largest French companies listed in France\u2019s main stock indices CAC40 and CAC Next 20. The corpus spans over 20 years, ranging from 1995 to 2018. To evaluate this novel collection of organizational writing, we use CoFiF to generate two character-level language models, a forward and a backward one, which we use to demonstrate the corpus potential on business, economics, and management research in the French language. The corpus is accessible on Github 1.", "phrases": ["french language", "trimestrial report", "cofif"], "overall_score": 1.498541029126196, "scores": [1.8226314878079843, 1.7139181391362805, 0.5555428553169758], "rank_score": 1.3640308274204136} -{"id": "yin-etal-2019-benchmarking", "title": "Benchmarking Zero-shot Text Classification: Datasets, Evaluation and Entailment Approach", "abstract": "Zero-shot text classification (0Shot-TC) is a challenging NLU problem to which little attention has been paid by the research community. 0Shot-TC aims to associate an appropriate label with a piece of text, irrespective of the text domain and the aspect (e.g., topic, emotion, event, etc.) described by the label. And there are only a few articles studying 0Shot-TC, all focusing only on topical categorization which, we argue, is just the tip of the iceberg in 0Shot-TC. In addition, the chaotic experiments in literature make no uniform comparison, which blurs the progress. This work benchmarks the 0Shot-TC problem by providing unified datasets, standardized evaluations, and state-of-the-art baselines. Our contributions include: i) The datasets we provide facilitate studying 0Shot-TC relative to conceptually different and diverse aspects: the \u201ctopic\u201d aspect includes \u201csports\u201d and \u201cpolitics\u201d as labels; the \u201cemotion\u201d aspect includes \u201cjoy\u201d and \u201canger\u201d; the \u201csituation\u201d aspect includes \u201cmedical assistance\u201d and \u201cwater shortage\u201d. ii) We extend the existing evaluation setup (label-partially-unseen) \u2013 given a dataset, train on some labels, test on all labels \u2013 to include a more challenging yet realistic evaluation label-fully-unseen 0Shot-TC (Chang et al., 2008), aiming at classifying text snippets without seeing task specific training data at all. iii) We unify the 0Shot-TC of diverse aspects within a textual entailment formulation and study it this way.", "phrases": ["zero-shot text classification", "entailment approach", "natural language inference", "pre-trained nli model"], "overall_score": 4.638628358780072, "scores": [2.550000490655968, 1.289826546914165, 1.0691658227236793, 0.5462957786891892], "rank_score": 1.3638221597457505} -{"id": "miwa-bansal-2016-end", "title": "End-to-End Relation Extraction using LSTMs on Sequences and Tree Structures", "abstract": "We present a novel end-to-end neural model to extract entities and relations between them. Our recurrent neural network based model captures both word sequence and dependency tree substructure information by stacking bidirectional tree-structured LSTM-RNNs on bidirectional sequential LSTM-RNNs. This allows our model to jointly represent both entities and relations with shared parameters in a single model. We further encourage detection of entities during training and use of entity information in relation extraction via entity pretraining and scheduled sampling. Our model improves over the state-of-the-art feature-based model on end-to-end relation extraction, achieving 12.1% and 5.7% relative error reductions in F1-score on ACE2005 and ACE2004, respectively. We also show that our LSTM-RNN based model compares favorably to the state-of-the-art CNN based model (in F1-score) on nominal relation classification (SemEval-2010 Task 8). Finally, we present an extensive ablation analysis of several model components.", "phrases": ["dependency tree", "end-to-end relation extraction", "lstm model", "bottom-up", "entity pair"], "overall_score": 6.1956307391951375, "scores": [2.60423293009184, 1.8955437409418163, 1.2470645421093849, 0.5368042011254576, 0.5347881632458842], "rank_score": 1.3636867155028765} -{"id": "kamath-etal-2020-selective", "title": "Selective Question Answering under Domain Shift", "abstract": "To avoid giving wrong answers, question answering (QA) models need to know when to abstain from answering. Moreover, users often ask questions that diverge from the model's training data, making errors more likely and thus abstention more critical. In this work, we propose the setting of selective question answering under domain shift, in which a QA model is tested on a mixture of in-domain and out-of-domain data, and must answer (i.e., not abstain on) as many questions as possible while maintaining high accuracy. Abstention policies based solely on the model's softmax probabilities fare poorly, since models are overconfident on out-of-domain inputs. Instead, we train a calibrator to identify inputs on which the QA model errs, and abstain when it predicts an error is likely. Crucially, the calibrator benefits from observing the model's behavior on out-of-domain data, even if from a different domain than the test data. We combine this method with a SQuAD-trained QA model and evaluate on mixtures of SQuAD and five other QA datasets. Our method answers 56% of questions while maintaining 80% accuracy; in contrast, directly using the model's probabilities only answers 48% at 80% accuracy.", "phrases": ["domain shift", "selective question", "out-of-domain setting", "identification", "confidence"], "overall_score": 4.0139410529697885, "scores": [2.94869538149378, 2.2251688920189494, 0.5863668296809849, 0.5355117918185572, 0.5203960730062153], "rank_score": 1.3632277936036972} -{"id": "mrksic-etal-2016-counter", "title": "Counter-fitting Word Vectors to Linguistic Constraints", "abstract": "In this work, we present a novel counter-fitting method which injects antonymy and synonymy constraints into vector space representations in order to improve the vectors' capability for judging semantic similarity. Applying this method to publicly available pre-trained word vectors leads to a new state of the art performance on the SimLex-999 dataset. We also show how the method can be used to tailor the word vector space for the downstream task of dialogue state tracking, resulting in robust improvements across different dialogue domains.", "phrases": ["word vector", "downstream task", "ppdb", "post-processor", "distance"], "overall_score": 4.6804150828934405, "scores": [3.9210737765303434, 0.8838094054184497, 0.844350434370081, 0.596034738774729, 0.5695722430022916], "rank_score": 1.362968119619179} -{"id": "sutton-mccallum-2005-joint", "title": "Joint Parsing and Semantic Role Labeling", "abstract": "A striking feature of human syntactic processing is that it is context-dependent, that is, it seems to take into account semantic information from the discourse context and world knowledge. In this paper, we attempt to use this insight to bridge the gap between SRL results from gold parses and from automatically-generated parses. To do this, we jointly perform parsing and semantic role labeling, using a probabilistic SRL system to rerank the results of a probabilistic parser. Our current results are negative, because a locally-trained SRL model can return inaccurate probability estimates.", "phrases": ["semantic role labeling", "probabilistic srl system", "joint parsing"], "overall_score": 3.4955620376179364, "scores": [2.5666305205202193, 0.9576359440742247, 0.5641909087831759], "rank_score": 1.3628191244592067} -{"id": "yang-etal-2019-assessing", "title": "Assessing the Ability of Self-Attention Networks to Learn Word Order", "abstract": "Self-attention networks (SAN) have attracted a lot of interests due to their high parallelization and strong performance on a variety of NLP tasks, e.g. machine translation. Due to the lack of recurrence structure such as recurrent neural networks (RNN), SAN is ascribed to be weak at learning positional information of words for sequence modeling. However, neither this speculation has been empirically confirmed, nor explanations for their strong performances on machine translation tasks when \u201clacking positional information\u201d have been explored. To this end, we propose a novel word reordering detection task to quantify how well the word order information learned by SAN and RNN. Specifically, we randomly move one word to another position, and examine whether a trained model can detect both the original and inserted positions. Experimental results reveal that: 1) SAN trained on word reordering detection indeed has difficulty learning the positional information even with the position embedding; and 2) SAN trained on machine translation learns better positional information than its RNN counterpart, in which position embedding plays a critical role. Although recurrence structure make the model more universally-effective on learning word order, learning objectives matter more in the downstream tasks such as machine translation.", "phrases": ["self-attention networks", "word order", "downstream task"], "overall_score": 2.8338984505439884, "scores": [2.6133616013789047, 0.9432489148965113, 0.5318407247073134], "rank_score": 1.3628170803275765} -{"id": "bengtson-roth-2008-understanding", "title": "Understanding the Value of Features for Coreference Resolution", "abstract": "In recent years there has been substantial work on the important problem of coreference resolution, most of which has concentrated on the development of new models and algorithmic techniques. These works often show that complex models improve over a weak pairwise baseline. However, less attention has been given to the importance of selecting strong features to support learning a coreference model. \n \nThis paper describes a rather simple pairwise classification model for coreference resolution, developed with a well-designed set of features. We show that this produces a state-of-the-art system that outperforms systems built with complex models. We suggest that our system can be used as a baseline for the development of more complex models -- which may have less impact when a more robust set of features is used. The paper also presents an ablation study and discusses the relative contributions of various features.", "phrases": ["coreference resolution", "complex model", "state-of-the-art system", "mention"], "overall_score": 4.540942461780369, "scores": [2.553021428208572, 1.1521483992507142, 0.9032396036553177, 0.8425674797132502], "rank_score": 1.3627442277069635} -{"id": "riaz-girju-2013-toward", "title": "Toward a Better Understanding of Causality between Verbal Events: Extraction and Analysis of the Causal Power of Verb-Verb Associations", "abstract": "The identification of causal relations between verbal events is important for achieving natural language understanding. However, the problem has proven notoriously difficult since it is not clear which types of knowledge are necessary to solve this challenging problem close to human level performance. Instead of employing a large set of features proved useful in other NLP tasks, we split the problem in smaller sub problems. Since verbs play a very important role in causal relations, in this paper we harness, explore, and evaluate the predictive power of causal associations of verb-verb pairs. More specifically, we propose a set of knowledge-rich metrics to learn the likelihood of causal relations between verbs. Employing these metrics, we automatically generate a knowledge base (KBc) which identifies three categories of verb pairs: Strongly Causal, Ambiguous, and Strongly Non-causal. The knowledge base is evaluated empirically. The results show that our metrics perform significantly better than the state-of-the-art on the task of detecting causal verbal events.", "phrases": ["causality", "verbal event", "verb-verb pair"], "overall_score": 3.689568704103006, "scores": [2.333703014481204, 0.83665222892305, 0.9169789233715017], "rank_score": 1.3624447222585852} -{"id": "artetxe-etal-2019-bilingual", "title": "Bilingual Lexicon Induction through Unsupervised Machine Translation", "abstract": "A recent research line has obtained strong results on bilingual lexicon induction by aligning independently trained word embeddings in two languages and using the resulting cross-lingual embeddings to induce word translation pairs through nearest neighbor or related retrieval methods. In this paper, we propose an alternative approach to this problem that builds on the recent work on unsupervised machine translation. This way, instead of directly inducing a bilingual lexicon from cross-lingual embeddings, we use them to build a phrase-table, combine it with a language model, and use the resulting machine translation system to generate a synthetic parallel corpus, from which we extract the bilingual lexicon using statistical word alignment techniques. As such, our method can work with any word embedding and cross-lingual mapping technique, and it does not require any additional resource besides the monolingual corpus used to train the embeddings. When evaluated on the exact same cross-lingual embeddings, our proposed method obtains an average improvement of 6 accuracy points over nearest neighbor and 4 points over CSLS retrieval, establishing a new state-of-the-art in the standard MUSE dataset.", "phrases": ["induction", "unsupervised machine translation", "cross-lingual embedding"], "overall_score": 3.2669454381950938, "scores": [2.5914345992480854, 0.9183142403746636, 0.5775173666063473], "rank_score": 1.3624220687430322} -{"id": "yen-etal-2015-writeahead", "title": "WriteAhead: Mining Grammar Patterns in Corpora for Assisted Writing", "abstract": "This paper describes WriteAhead, a resource-rich, Interactive Writing Environment that provides L2 learners with writing prompts, as well as \u201dget it right\u201d advice, to helps them write fluently and accurately. The method involves automatically analyzing reference and learner corpora, extracting grammar patterns with example phrases, and computing dubious, overused patterns. At run-time, as the user types (or mouses over) a word, the system automatically retrieves and displays grammar patterns and examples, most relevant to the word. The user can opt for patterns from a general corpus, academic corpus, learner corpus, or commonly overused dubious patterns found in a learner corpus. WriteAhead proactively engages the user with steady, timely, and spot-on information for effective assisted writing. Preliminary experiments show that WriteAhead fulfills the design goal of fostering learner independence and encouraging self-editing, and is likely to induce better writing, and improve writing skills in the long run.", "phrases": ["grammar pattern", "interactive writing environment", "writeahead", "english sentence", "esl learner"], "overall_score": 3.2661756163846656, "scores": [3.031915151235032, 1.201807043381628, 1.1682522439307481, 0.8682431268758525, 0.5402875751390335], "rank_score": 1.3621010281124588} -{"id": "chi-etal-2016-geolocation", "title": "Geolocation Prediction in Twitter Using Location Indicative Words and Textual Features", "abstract": "Knowing the location of a social media user and their posts is important for various purposes, such as the recommendation of location-based items/services, and locality detection of crisis/disasters. This paper describes our submission to the shared task \u201cGeolocation Prediction in Twitter\u201d of the 2nd Workshop on Noisy User-generated Text. In this shared task, we propose an algorithm to predict the location of Twitter users and tweets using a multinomial Naive Bayes classifier trained on Location Indicative Words and various textual features (such as city/country names, #hashtags and @mentions). We compared our approach against various baselines based on Location Indicative Words, city/country names, #hashtags and @mentions as individual feature sets, and experimental results show that our approach outperforms these baselines in terms of classification accuracy, mean and median error distance.", "phrases": ["location indicative words", "textual feature", "hashtag", "geolocation prediction"], "overall_score": 2.6498386967103693, "scores": [2.4656507362429996, 0.8900552739200746, 0.8766300530878838, 1.2146547918957598], "rank_score": 1.3617477137866794} -{"id": "hartung-frank-2011-exploring", "title": "Exploring Supervised LDA Models for Assigning Attributes to Adjective-Noun Phrases", "abstract": "This paper introduces an attribute selection task as a way to characterize the inherent meaning of property-denoting adjectives in adjective-noun phrases, such as e.g. hot in hot summer denoting the attribute temperature, rather than taste. We formulate this task in a vector space model that represents adjectives and nouns as vectors in a semantic space defined over possible attributes. The vectors incorporate latent semantic information obtained from two variants of LDA topic models. Our LDA models outperform previous approaches on a small set of 10 attributes with considerable gains on sparse representations, which highlights the strong smoothing power of LDA models. For the first time, we extend the attribute selection task to a new data set with more than 200 classes. We observe that large-scale attribute selection is a hard problem, but a subset of attributes performs robustly on the large scale as well. Again, the LDA models outperform the VSM baseline.", "phrases": ["lda", "attribute", "adjective-noun phrase"], "overall_score": 2.8316007403199728, "scores": [2.4492014887127054, 1.10701818678372, 0.528916670340717], "rank_score": 1.3617121152790475} -{"id": "sahin-steedman-2018-data", "title": "Data Augmentation via Dependency Tree Morphing for Low-Resource Languages", "abstract": "Neural NLP systems achieve high scores in the presence of sizable training dataset. Lack of such datasets leads to poor system performances in the case low-resource languages. We present two simple text augmentation techniques using dependency trees, inspired from image processing. We \u201ccrop\u201d sentences by removing dependency links, and we \u201crotate\u201d sentences by moving the tree fragments around the root. We apply these techniques to augment the training sets of low-resource languages in Universal Dependencies project. We implement a character-level sequence tagging model and evaluate the augmented datasets on part-of-speech tagging task. We show that crop and rotate provides improvements over the models trained with non-augmented data for majority of the languages, especially for languages with rich case marking systems.", "phrases": ["dependency tree morphing", "low-resource language", "data augmentation"], "overall_score": 3.261923917715126, "scores": [2.363490318097539, 0.8990085817230827, 0.8184848963254346], "rank_score": 1.3603279320486854} -{"id": "chen-etal-2010-semafor", "title": "SEMAFOR: Frame Argument Resolution with Log-Linear Models", "abstract": "This paper describes the SEMAFOR system's performance in the SemEval 2010 task on linking events and their participants in discourse. Our entry is based upon SEMAFOR 1.0 (Das et al., 2010a), a frame-semantic probabilistic parser built from log-linear models. The extended system models null instantiations, including non-local argument reference. Performance is evaluated on the task data with and without gold-standard overt arguments. In both settings, it fares the best of the submitted systems with respect to recall and F1.", "phrases": ["semafor", "framenet-style parser", "extension"], "overall_score": 2.988768856874848, "scores": [2.951400914877404, 0.582310088991328, 0.5470309754034751], "rank_score": 1.360247326424069} -{"id": "davidov-etal-2010-enhanced", "title": "Enhanced Sentiment Learning Using Twitter Hashtags and Smileys", "abstract": "Automated identification of diverse sentiment types can be beneficial for many NLP systems such as review summarization and public media analysis. In some of these systems there is an option of assigning a sentiment value to a single sentence or a very short text. \n \nIn this paper we propose a supervised sentiment classification framework which is based on data from Twitter, a popular microblogging service. By utilizing 50 Twitter tags and 15 smileys as sentiment labels, this framework avoids the need for labor intensive manual annotation, allowing identification and classification of diverse sentiment types of short texts. We evaluate the contribution of different feature types for sentiment classification and show that our framework successfully identifies sentiment types of untagged sentences. The quality of the sentiment identification was also confirmed by human judges. We also explore dependencies and overlap between different sentiment types represented by smileys and Twitter hashtags.", "phrases": ["hashtag", "smileys", "emoticon", "tweet label"], "overall_score": 5.017370113772293, "scores": [0.7853221535882505, 2.093484108783632, 2.0135842685493417, 0.5481451940147466], "rank_score": 1.3601339312339926} -{"id": "dasgupta-ng-2009-mine", "title": "Mine the Easy, Classify the Hard: A Semi-Supervised Approach to Automatic Sentiment Classification", "abstract": "Supervised polarity classification systems are typically domain-specific. Building these systems involves the expensive process of annotating a large amount of data for each domain. A potential solution to this corpus annotation bottleneck is to build unsupervised polarity classification systems. However, unsupervised learning of polarity is difficult, owing in part to the prevalence of sentimentally ambiguous reviews, where reviewers discuss both the positive and negative aspects of a product. To address this problem, we propose a semi-supervised approach to sentiment classification where we first mine the unambiguous reviews using spectral techniques and then exploit them to classify the ambiguous reviews via a novel combination of active learning, transductive learning, and ensemble learning.", "phrases": ["semi-supervised approach", "sentiment classification", "review", "active learning"], "overall_score": 2.9884551575677367, "scores": [2.541094013169096, 0.9381378827006166, 1.0731389009606007, 0.8880474260365665], "rank_score": 1.36010455571672} -{"id": "kim-linzen-2020-cogs", "title": "COGS: A Compositional Generalization Challenge Based on Semantic Interpretation", "abstract": "Natural language is characterized by compositionality: the meaning of a complex expression is constructed from the meanings of its constituent parts. To facilitate the evaluation of the compositional abilities of language processing architectures, we introduce COGS, a semantic parsing dataset based on a fragment of English. The evaluation portion of COGS contains multiple systematic gaps that can only be addressed by compositional generalization; these include new combinations of familiar syntactic structures, or new combinations of familiar words and familiar structures. In experiments with Transformers and LSTMs, we found that in-distribution accuracy on the COGS test set was near-perfect (96\u201399%), but generalization accuracy was substantially lower (16\u201335%) and showed high sensitivity to random seed (+-6\u20138%). These findings indicate that contemporary standard NLP models are limited in their compositional generalization capacity, and position COGS as a good way to measure progress.", "phrases": ["generalization", "fragment", "systematic gap", "cogs", "semantic parser"], "overall_score": 3.7701865576361806, "scores": [3.1607035365057903, 1.5536880521833711, 0.9700146213487431, 0.5723831774698311, 0.5422474249022017], "rank_score": 1.3598073624819875} -{"id": "guu-etal-2017-language", "title": "From Language to Programs: Bridging Reinforcement Learning and Maximum Marginal Likelihood", "abstract": "Our goal is to learn a semantic parser that maps natural language utterances into executable programs when only indirect supervision is available: examples are labeled with the correct execution result, but not the program itself. Consequently, we must search the space of programs for those that output the correct result, while not being misled by spurious programs: incorrect programs that coincidentally output the correct result. We connect two common learning paradigms, reinforcement learning (RL) and maximum marginal likelihood (MML), and then present a new learning algorithm that combines the strengths of both. The new algorithm guards against spurious programs by combining the systematic search traditionally employed in MML with the randomized exploration of RL, and by updating parameters such that probability is spread more evenly across consistent programs. We apply our learning algorithm to a new neural semantic parser and show significant gains over existing state-of-the-art results on a recent context-dependent semantic parsing task.", "phrases": ["program", "reinforcement learning", "maximum marginal likelihood", "strength", "exploration"], "overall_score": 4.139423133944263, "scores": [2.6354872745463394, 1.80157418301489, 0.9808326404497318, 0.8333176330127969, 0.5469367776789136], "rank_score": 1.3596297017405345} -{"id": "miceli-barone-etal-2017-regularization", "title": "Regularization techniques for fine-tuning in neural machine translation", "abstract": "We investigate techniques for supervised domain adaptation for neural machine translation where an existing model trained on a large out-of-domain dataset is adapted to a small in-domain dataset. In this scenario, overfitting is a major challenge. We investigate a number of techniques to reduce overfitting and improve transfer learning, including regularization techniques such as dropout and L2-regularization towards an out-of-domain prior. In addition, we introduce tuneout, a novel regularization technique inspired by dropout. We apply these techniques, alone and in combination, to neural machine translation, obtaining improvements on IWSLT datasets for English\u2192German and English\u2192Russian. We also investigate the amounts of in-domain training data needed for domain adaptation in NMT, and find a logarithmic relationship between the amount of training data and gain in BLEU score.", "phrases": ["machine translation", "dropout", "regularization technique", "continued training", "miceli"], "overall_score": 4.201543184947463, "scores": [1.9714681264274818, 1.870867522663376, 1.2311705760517375, 0.8656228488314467, 0.8571916630359331], "rank_score": 1.359264147401995} -{"id": "gao-etal-2018-neural-approaches", "title": "Neural Approaches to Conversational AI", "abstract": "This tutorial surveys neural approaches to conversational AI that were developed in the last few years. We group conversational systems into three categories: (1) question answering agents, (2) task-oriented dialogue agents, and (3) social bots. For each category, we present a review of state-of-the-art neural approaches, draw the connection between neural approaches and traditional symbolic approaches, and discuss the progress we have made and challenges we are facing, using specific systems and models as case studies.", "phrases": ["agent", "dialogue system", "neural approach", "language understanding"], "overall_score": 5.343851593995069, "scores": [2.096480238725678, 1.3692101648221269, 1.1205046567942716, 0.8503141780316134], "rank_score": 1.3591273095934224} -{"id": "zhou-rush-2019-simple", "title": "Simple Unsupervised Summarization by Contextual Matching", "abstract": "We propose an unsupervised method for sentence summarization using only language modeling. The approach employs two language models, one that is generic (i.e. pretrained), and the other that is specific to the target domain. We show that by using a product-of-experts criteria these are enough for maintaining continuous contextual matching while maintaining output fluency. Experiments on both abstractive and extractive sentence summarization data sets show promising results of our method without being exposed to any paired data.", "phrases": ["summarization", "contextual matching", "language model", "fluency", "beam search"], "overall_score": 3.3769495250149366, "scores": [2.932512954001131, 1.2934282949171985, 1.1657059307026016, 0.8775988586093635, 0.5256762686158016], "rank_score": 1.3589844613692192} -{"id": "zoph-etal-2016-simple", "title": "Simple, Fast Noise-Contrastive Estimation for Large RNN Vocabularies", "abstract": "We present a simple algorithm to efficiently train language models with noise-contrastive estimation (NCE) on graphics processing units (GPUs). Our NCE-trained language models achieve significantly lower perplexity on the One Billion Word Benchmark language modeling challenge, and contain one sixth of the parameters in the best single model in Chelba et al. (2013). When incorporated into a strong Arabic-English machine translation system they give a strong boost in translation quality. We release a toolkit so that others may also train large-scale, large vocabulary LSTM language models with NCE, parallelizing computation across multiple GPUs.", "phrases": ["noise-contrastive estimation", "estimation", "nce"], "overall_score": 2.4345588215090204, "scores": [1.8019758520271199, 1.7174096604489812, 0.5568739352686961], "rank_score": 1.3587531492482656} -{"id": "belinkov-glass-2019-analysis", "title": "Analysis Methods in Neural Language Processing: A Survey", "abstract": "The field of natural language processing has seen impressive progress in recent years, with neural network models replacing many of the traditional systems. A plethora of new models have been proposed, many of which are thought to be opaque compared to their feature-rich counterparts. This has led researchers to analyze, interpret, and evaluate neural networks in novel and more fine-grained ways. In this survey paper, we review analysis methods in neural language processing, categorize them according to prominent research trends, highlight existing limitations, and point to potential directions for future work.", "phrases": ["neural language processing", "survey", "interpretability", "area", "input token"], "overall_score": 5.791044306576188, "scores": [2.939007162654055, 0.9892170946215839, 1.4311980697465287, 0.8437126358407167, 0.589592234136595], "rank_score": 1.358545439399896} -{"id": "agirre-etal-2009-use", "title": "Use of Rich Linguistic Information to Translate Prepositions and Grammar Cases to Basque", "abstract": "This paper presents three successful techniques to translate prepositions heading verbal complements by means of rich linguistic information, in the context of a rule-based Machine Translation system for an agglutinative language with scarce resources. This information comes in the form of lexicalized syntactic dependency triples, verb subcategorization and manually coded selection rules based on lexical, syntactic and semantic information. The first two resources have been automatically extracted from monolingual corpora. The results obtained using a new evaluation methodology show that all proposed techniques improve precision over the baselines, including a translation dictionary compiled from an aligned corpus, and a state-of-the-art statistical Machine Translation system. The results also show that linguistic information in all three techniques are complementary, and that a combination of them obtains the best F-score results overall.", "phrases": ["rich linguistic information", "preposition", "grammatical case"], "overall_score": 2.6432980189509143, "scores": [1.683636984153632, 1.30407745302631, 1.08744497380442], "rank_score": 1.3583864703281208} -{"id": "da-san-martino-etal-2020-prta", "title": "Prta: A System to Support the Analysis of Propaganda Techniques in the News", "abstract": "Recent events, such as the 2016 US Presidential Campaign, Brexit and the COVID-19 \u201cinfodemic\u201d, have brought into the spotlight the dangers of online disinformation. There has been a lot of research focusing on fact-checking and disinformation detection. However, little attention has been paid to the specific rhetorical and psychological techniques used to convey propaganda messages. Revealing the use of such techniques can help promote media literacy and critical thinking, and eventually contribute to limiting the impact of \u201cfake news\u201d and disinformation campaigns. Prta (Propaganda Persuasion Techniques Analyzer) allows users to explore the articles crawled on a regular basis by highlighting the spans in which propaganda techniques occur and to compare them on the basis of their use of propaganda techniques. The system further reports statistics about the use of such techniques, overall and over time, or according to filtering criteria specified by the user based on time interval, keywords, and/or political orientation of the media. Moreover, it allows users to analyze any text or URL through a dedicated interface or via an API. The system is available online: .", "phrases": ["propaganda technique", "news", "prta"], "overall_score": 2.1860799026366986, "scores": [1.8011624297137971, 1.6782954542114035, 0.5954055558153664], "rank_score": 1.3582878132468557} -{"id": "bohra-etal-2018-dataset", "title": "A Dataset of Hindi-English Code-Mixed Social Media Text for Hate Speech Detection", "abstract": "Hate speech detection in social media texts is an important Natural language Processing task, which has several crucial applications like sentiment analysis, investigating cyberbullying and examining socio-political controversies. While relevant research has been done independently on code-mixed social media texts and hate speech detection, our work is the first attempt in detecting hate speech in Hindi-English code-mixed social media text. In this paper, we analyze the problem of hate speech detection in code-mixed texts and present a Hindi-English code-mixed dataset consisting of tweets posted online on Twitter. The tweets are annotated with the language at word level and the class they belong to (Hate Speech or Normal Speech). We also propose a supervised classification system for detecting hate speech in the text using various character level, word level, and lexicon based features.", "phrases": ["hate speech detection", "code-mixed text", "twitter", "social medium text"], "overall_score": 3.581446422048052, "scores": [3.5530546584123956, 0.8309233347142675, 0.5231486709881931, 0.5212456290633021], "rank_score": 1.3570930732945397} -{"id": "mihalcea-strapparava-2009-lie", "title": "The Lie Detector: Explorations in the Automatic Recognition of Deceptive Language", "abstract": "In this paper, we present initial experiments in the recognition of deceptive language. We introduce three data sets of true and lying texts collected for this purpose, and we show that automatic classification is a viable technique to distinguish between truth and falsehood as expressed in language. We also introduce a method for class-based feature analysis, which sheds some light on the features that are characteristic for deceptive text.", "phrases": ["lie detector", "deceptive language", "death penalty", "fake news"], "overall_score": 4.5218254682124766, "scores": [2.800185227805686, 0.8609469375868289, 1.2096810145489172, 0.5572155672793974], "rank_score": 1.3570071868052072} -{"id": "gauthier-etal-2020-syntaxgym", "title": "SyntaxGym: An Online Platform for Targeted Evaluation of Language Models", "abstract": "Targeted syntactic evaluations have yielded insights into the generalizations learned by neural network language models. However, this line of research requires an uncommon confluence of skills: both the theoretical knowledge needed to design controlled psycholinguistic experiments, and the technical proficiency needed to train and deploy large-scale language models. We present SyntaxGym, an online platform designed to make targeted evaluations accessible to both experts in NLP and linguistics, reproducible across computing environments, and standardized following the norms of psycholinguistic experimental design. This paper releases two tools of independent value for the computational linguistics community: 1. A website, syntaxgym.org, which centralizes the process of targeted syntactic evaluation and provides easy tools for analysis and visualization; 2. Two command-line tools, `syntaxgym` and `lm-zoo`, which allow any user to reproduce targeted syntactic evaluations and general language model inference on their own machine.", "phrases": ["online platform", "targeted evaluation", "syntaxgym"], "overall_score": 2.821130109466861, "scores": [2.4062807086641755, 0.8746240133878472, 0.7891256965783568], "rank_score": 1.3566768062101264} -{"id": "snow-etal-2006-semantic", "title": "Semantic Taxonomy Induction from Heterogenous Evidence", "abstract": "We propose a novel algorithm for inducing semantic taxonomies. Previous algorithms for taxonomy induction have typically focused on independent classifiers for discovering new single relationships based on hand-constructed or automatically discovered textual patterns. By contrast, our algorithm flexibly incorporates evidence from multiple classifiers over heterogenous relationships to optimize the entire structure of the taxonomy, using knowledge of a word's coordinate terms to help in determining its hypernyms, and vice versa. We apply our algorithm on the problem of sense-disambiguated noun hyponym acquisition, where we combine the predictions of hypernym and coordinate term classifiers with the knowledge in a preexisting semantic taxonomy (WordNet 2.1). We add 10,000 novel synsets to WordNet 2.1 at 84% precision, a relative error reduction of 70% over a non-joint algorithm using the same component classifiers. Finally, we show that a taxonomy built using our algorithm shows a 23% relative F-score improvement over WordNet 2.1 on an independent testset of hypernym pairs.", "phrases": ["taxonomy induction", "hypernym", "dependency path", "high precision"], "overall_score": 5.133631946030542, "scores": [3.373351473209938, 0.9343229834617078, 0.5780680927767002, 0.5406577556706436], "rank_score": 1.3566000762797474} -{"id": "liu-gildea-2010-semantic", "title": "Semantic Role Features for Machine Translation", "abstract": "We propose semantic role features for a Tree-to-String transducer to model the reordering/deletion of source-side semantic roles. These semantic features, as well as the Tree-to-String templates, are trained based on a conditional log-linear model and are shown to significantly outperform systems trained based on Max-Likelihood and EM. We also show significant improvement in sentence fluency by using the semantic role features in the log-linear model, based on manual evaluation.", "phrases": ["machine translation", "deletion", "semantic role", "srl", "smt system"], "overall_score": 4.253403548215208, "scores": [2.4724446882214006, 1.6618500897766229, 1.2693922438042928, 0.8287924066092806, 0.5501890367489821], "rank_score": 1.356533693032116} -{"id": "naderi-hirst-2017-classifying", "title": "Classifying Frames at the Sentence Level in News Articles", "abstract": "Previous approaches to generic frame classification analyze frames at the document level. Here, we propose a supervised based approach based on deep neural networks and distributional representations for classifying frames at the sentence level in news articles. We conduct our experiments on the publicly available Media Frames Corpus compiled from the U.S. Newspapers. Using (B)LSTMs and GRU networks to represent the meaning of frames, we demonstrate that our approach yields at least 14-point improvement over several baseline methods.", "phrases": ["frame", "sentence level", "recurrent neural network"], "overall_score": 2.9784179106760793, "scores": [2.40604851953609, 1.1298736120569073, 0.5306870917849754], "rank_score": 1.3555364077926575} -{"id": "allaway-mckeown-2020-zero", "title": "Zero-Shot Stance Detection: A Dataset and Model using Generalized Topic Representations", "abstract": "Stance detection is an important component of understanding hidden influences in everyday life. Since there are thousands of potential topics to take a stance on, most with little to no training data, we focus on zero-shot stance detection: classifying stance from no training examples. In this paper, we present a new dataset for zero-shot stance detection that captures a wider range of topics and lexical variation than in previous datasets. Additionally, we propose a new model for stance detection that implicitly captures relationships between topics using generalized topic representations and show that this model improves performance on a number of challenging linguistic phenomena.", "phrases": ["generalized topic representation", "zero-shot stance detection", "linguistic phenomenon"], "overall_score": 2.8173936667226673, "scores": [2.64091253248109, 0.8951598610084992, 0.5285674777231759], "rank_score": 1.3548799570709218} -{"id": "kauchak-barzilay-2006-paraphrasing", "title": "Paraphrasing for Automatic Evaluation", "abstract": "This paper studies the impact of paraphrases on the accuracy of automatic evaluation. Given a reference sentence and a machine-generated sentence, we seek to find a paraphrase of the reference sentence that is closer in wording to the machine output than the original reference. We apply our paraphrasing method in the context of machine translation evaluation. Our experiments show that the use of a paraphrased synthetic reference refines the accuracy of automatic evaluation. We also found a strong connection between the quality of automatic paraphrases as judged by humans and their contribution to automatic evaluation.", "phrases": ["automatic evaluation", "reference", "paraphrasing"], "overall_score": 3.915876361659681, "scores": [1.982849490574479, 0.9605257691360846, 1.121025471055597], "rank_score": 1.35480024358872} -{"id": "coster-kauchak-2011-learning", "title": "Learning to Simplify Sentences Using Wikipedia", "abstract": "In this paper we examine the sentence simplification problem as an English-to-English translation problem, utilizing a corpus of 137K aligned sentence pairs extracted by aligning English Wikipedia and Simple English Wikipedia. This data set contains the full range of transformation operations including rewording, reordering, insertion and deletion. We introduce a new translation model for text simplification that extends a phrase-based machine translation approach to include phrasal deletion. Evaluated based on three metrics that compare against a human reference (BLEU, word-F1 and SSA) our new approach performs significantly better than two text compression techniques (including T3) and the phrase-based translation system without deletion.", "phrases": ["wikipedia", "simplification", "machine translation", "change", "giza++"], "overall_score": 4.694041858035418, "scores": [2.021718468563194, 1.7297060755866702, 1.6432928306748162, 0.840795198686839, 0.5365583368013938], "rank_score": 1.3544141820625826} -{"id": "hajic-etal-2009-conll", "title": "The CoNLL-2009 Shared Task: Syntactic and Semantic Dependencies in Multiple Languages", "abstract": "For the 11th straight year, the Conference on Computational Natural Language Learning has been accompanied by a shared task whose purpose is to promote natural language processing applications and evaluate them in a standard setting. In 2009, the shared task was dedicated to the joint parsing of syntactic and semantic dependencies in multiple languages. This shared task combines the shared tasks of the previous five years under a unique dependency-based formalism similar to the 2008 task. In this paper, we define the shared task, describe how the data sets were created and show their quantitative properties, report the results and summarize the approaches of the participating systems.", "phrases": ["semantic dependency", "multiple language", "conll", "joint learning"], "overall_score": 4.24372607120832, "scores": [1.9342442659748824, 1.71786960879295, 1.2226035047069002, 0.5390716808259437], "rank_score": 1.3534472650751692} -{"id": "johannsen-etal-2015-cross", "title": "Cross-lingual syntactic variation over age and gender", "abstract": "Most computational sociolinguistics studies have focused on phonological and lexical variation. We present the first large-scale study of syntactic variation among demographic groups (age and gender) across several languages. We harvest data from online user-review sites and parse it with universal dependencies. We show that several age and gender-specific variations hold across languages, for example that women are more likely to use VP conjunctions.", "phrases": ["syntactic variation", "gender", "large-scale study"], "overall_score": 3.470439400056002, "scores": [2.6020044516317076, 0.9220401112949295, 0.5350290284801796], "rank_score": 1.3530245304689388} -{"id": "de-saeger-etal-2008-looking", "title": "Looking for Trouble", "abstract": "This paper presents a method for mining potential troubles or obstacles related to the use of a given object. Some example instances of this relation are (medicine, side effect) and (amusement park, height restriction). Our acquisition method consists of three steps. First, we use an un-supervised method to collect training samples from Web documents. Second, a set of expressions generally referring to troubles is acquired by a supervised learning method. Finally, the acquired troubles are associated with objects so that each of the resulting pairs consists of an object and a trouble or obstacle in using that object. To show the effectiveness of our method we conducted experiments using a large collection of Japanese Web documents for acquisition. Experimental results show an 85.5% precision for the top 10,000 acquired troubles, and a 74% precision for the top 10% of over 60,000 acquired object-trouble pairs.", "phrases": ["trouble", "obstacle", "web"], "overall_score": 2.4242505104778704, "scores": [2.6262259504077923, 0.8553946296641318, 0.5773793338880315], "rank_score": 1.3529999713199852} -{"id": "ferraro-etal-2015-survey", "title": "A Survey of Current Datasets for Vision and Language Research", "abstract": "Integrating vision and language has long been a dream in work on artificial intelligence (AI). In the past two years, we have witnessed an explosion of work that brings together vision and language from images to videos and beyond. The available corpora have played a crucial role in advancing this area of research. In this paper, we propose a set of quality metrics for evaluating and analyzing the vision & language datasets and categorize them accordingly. Our analyses show that the most recent datasets have been using more complex language and more abstract concepts, however, there are different strengths and weaknesses in each.", "phrases": ["survey", "vision", "language research"], "overall_score": 2.811061888986936, "scores": [2.2737208781454314, 0.9158976140150864, 0.8658865547128967], "rank_score": 1.3518350156244716} -{"id": "abdul-mageed-diab-2012-awatif", "title": "AWATIF: A Multi-Genre Corpus for Modern Standard Arabic Subjectivity and Sentiment Analysis", "abstract": "We present AWATIF, a multi-genre corpus of Modern Standard Arabic (MSA) labeled for subjectivity and sentiment analysis (SSA) at the sentence level. The corpus is labeled using both regular as well as crowd sourcing methods under three different conditions with two types of annotation guidelines. We describe the sub-corpora constituting the corpus and provide examples from the various SSA categories. In the process, we present our linguistically-motivated and genre-nuanced annotation guidelines and provide evidence showing their impact on the labeling task.", "phrases": ["multi-genre corpus", "modern standard arabic", "subjectivity", "sentiment analysis"], "overall_score": 2.1754918407302863, "scores": [2.3350549997174106, 1.9191428469558949, 0.5934469868107338, 0.5591914884964492], "rank_score": 1.351709080495122} -{"id": "koller-kuhlmann-2009-dependency", "title": "Dependency Trees and the Strong Generative Capacity of CCG", "abstract": "We propose a novel algorithm for extracting dependencies from the derivations of a large fragment of CCG. Unlike earlier proposals, our dependency structures are always tree-shaped. We then use these dependency trees to compare the strong generative capacities of CCG and TAG and obtain surprising results: Both formalisms generate the same languages of derivation trees --- but the mechanisms they use to bring the words in these trees into a linear order are incomparable.", "phrases": ["strong generative capacity", "ccg", "derivation"], "overall_score": 2.175294624371383, "scores": [2.6190345163839748, 0.8873006721707102, 0.5484244406903216], "rank_score": 1.3515865430816687} -{"id": "scarlini-etal-2019-just", "title": "Just \u201cOneSeC\u201d for Producing Multilingual Sense-Annotated Data", "abstract": "The well-known problem of knowledge acquisition is one of the biggest issues in Word Sense Disambiguation (WSD), where annotated data are still scarce in English and almost absent in other languages. In this paper we formulate the assumption of One Sense per Wikipedia Category and present OneSeC, a language-independent method for the automatic extraction of hundreds of thousands of sentences in which a target word is tagged with its meaning. Our automatically-generated data consistently lead a supervised WSD model to state-of-the-art performance when compared with other automatic and semi-automatic methods. Moreover, our approach outperforms its competitors on multilingual and domain-specific settings, where it beats the existing state of the art on all languages and most domains. All the training data are available for research purposes at .", "phrases": ["onesec", "multilingual sense-annotated data", "wikipedia category"], "overall_score": 2.420443370523097, "scores": [2.13409886243123, 0.9791819383464243, 0.9393446973857212], "rank_score": 1.3508751660544585} -{"id": "cheng-lapata-2016-neural", "title": "Neural Summarization by Extracting Sentences and Words", "abstract": "Traditional approaches to extractive summarization rely heavily on humanengineered features. In this work we propose a data-driven approach based on neural networks and continuous sentence features. We develop a general framework for single-document summarization composed of a hierarchical document encoder and an attention-based extractor. This architecture allows us to develop different classes of summarization models which can extract sentences or words. We train our models on large scale corpora containing hundreds of thousands of document-summary pairs 1 . Experimental results on two summarization datasets demonstrate that our models obtain results comparable to the state of the art without any access to linguistic annotation.", "phrases": ["summarization", "hierarchical document encoder", "extractor", "cnn", "recurrent neural network"], "overall_score": 5.551789744586004, "scores": [2.7642480329343826, 1.162028087965556, 1.0504113862818178, 0.9274515228377476, 0.8484273682959764], "rank_score": 1.350513279663096} -{"id": "narayan-etal-2020-stepwise", "title": "Stepwise Extractive Summarization and Planning with Structured Transformers", "abstract": "We propose encoder-centric stepwise models for extractive summarization using structured transformers \u2013 HiBERT and Extended Transformers. We enable stepwise summarization by injecting the previously generated summary into the structured transformer as an auxiliary sub-structure. Our models are not only efficient in modeling the structure of long inputs, but they also do not rely on task-specific redundancy-aware modeling, making them a general purpose extractive content planner for different tasks. When evaluated on CNN/DailyMail extractive summarization, stepwise models achieve state-of-the-art performance in terms of Rouge without any redundancy aware modeling or sentence filtering. This also holds true for Rotowire table-to-text generation, where our models surpass previously reported metrics for content selection, planning and ordering, highlighting the strength of stepwise modeling. Amongst the two structured transformers we test, stepwise Extended Transformers provides the best performance across both datasets and sets a new standard for these challenges.", "phrases": ["extractive summarization", "planning", "structured transformer"], "overall_score": 2.806998587116502, "scores": [2.620495721764069, 0.8120179746455393, 0.6171292450056971], "rank_score": 1.3498809804717684} -{"id": "wallace-etal-2019-universal", "title": "Universal Adversarial Triggers for Attacking and Analyzing NLP", "abstract": "Adversarial examples highlight model vulnerabilities and are useful for evaluation and interpretation. We define universal adversarial triggers: input-agnostic sequences of tokens that trigger a model to produce a specific prediction when concatenated to any input from a dataset. We propose a gradient-guided search over tokens which finds short trigger sequences (e.g., one word for classification and four words for language modeling) that successfully trigger the target prediction. For example, triggers cause SNLI entailment accuracy to drop from 89.94% to 0.55%, 72% of \u201cwhy\u201d questions in SQuAD to be answered \u201cto kill american people\u201d, and the GPT-2 language model to spew racist output even when conditioned on non-racial contexts. Furthermore, although the triggers are optimized using white-box access to a specific model, they transfer to other models for all tasks we consider. Finally, since triggers are input-agnostic, they provide an analysis of global model behavior. For instance, they confirm that SNLI models exploit dataset biases and help to diagnose heuristics learned by reading comprehension models.", "phrases": ["trigger", "input-agnostic sequence", "universal adversarial trigger", "attack method"], "overall_score": 5.195306249061032, "scores": [2.2963946366509096, 1.6810530783203097, 0.8630582963053661, 0.5570078365078645], "rank_score": 1.3493784619461124} -{"id": "feng-etal-2021-survey", "title": "A Survey of Data Augmentation Approaches for NLP", "abstract": "Data augmentation has recently seen increased interest in NLP due to more work in low-resource domains, new tasks, and the popularity of large-scale neural networks that require large amounts of training data. Despite this recent upsurge, this area is still relatively underexplored, perhaps due to the challenges posed by the discrete nature of language data. In this paper, we present a comprehensive and unifying survey of data augmentation for NLP by summarizing the literature in a structured manner. We first introduce and motivate data augmentation for NLP, and then discuss major methodologically representative approaches. Next, we highlight techniques that are used for popular NLP applications and tasks. We conclude by outlining current challenges and directions for future research. Overall, our paper aims to clarify the landscape of existing literature in data augmentation for NLP and motivate additional work in this area. We also present a GitHub repository with a paper list that will be continuously updated at https://github.com/styfeng/DataAug4NLP", "phrases": ["survey", "data augmentation approaches", "popular nlp application"], "overall_score": 4.7173254148854955, "scores": [2.5018595053288983, 0.9230669904370242, 0.6225340047115786], "rank_score": 1.349153500159167} -{"id": "warstadt-etal-2020-blimp", "title": "BLiMP: A Benchmark of Linguistic Minimal Pairs for English", "abstract": "We introduce The Benchmark of Linguistic Minimal Pairs (shortened to BLiMP, or ), a challenge set for evaluating what language models (LMs) know about major grammatical phenomena in English. consists of 67 sub-datasets, each containing 1000 minimal pairs isolating specific contrasts in syntax, morphology, or semantics. The data is automatically generated according to expert-crafted grammars, and aggregate human agreement with the labels is 96.4%. We use it to evaluate n-gram, LSTM, and Transformer (GPT-2 and Transformer-XL) LMs. We find that state-of-the-art models identify morphological contrasts reliably, but they struggle with semantic restrictions on the distribution of quantifiers and negative polarity items and subtle syntactic phenomena such as extraction islands.", "phrases": ["linguistic minimal pairs", "language model", "blimp"], "overall_score": 2.625232752271691, "scores": [2.1592174293517985, 0.9686204522852774, 0.9194703975445256], "rank_score": 1.3491027597272005} -{"id": "yang-choi-2019-friendsqa", "title": "FriendsQA: Open-Domain Question Answering on TV Show Transcripts", "abstract": "This paper presents FriendsQA, a challenging question answering dataset that contains 1,222 dialogues and 10,610 open-domain questions, to tackle machine comprehension on everyday conversations. Each dialogue, involving multiple speakers, is annotated with several types of questions regarding the dialogue contexts, and the answers are annotated with certain spans in the dialogue. A series of crowdsourcing tasks are conducted to ensure good annotation quality, resulting a high inter-annotator agreement of 81.82%. A comprehensive annotation analytics is provided for a deeper understanding in this dataset. Three state-of-the-art QA systems are experimented, R-Net, QANet, and BERT, and evaluated on this dataset. BERT in particular depicts promising results, an accuracy of 74.2% for answer utterance selection and an F1-score of 64.2% for answer span selection, suggesting that the FriendsQA task is hard yet has a great potential of elevating QA research on multiparty dialogue to another level.", "phrases": ["open-domain question", "conversation", "friendsqa"], "overall_score": 3.234023041409766, "scores": [1.8739358967382171, 1.09583997417284, 1.0763012177296296], "rank_score": 1.348692362880229} -{"id": "raghunathan-etal-2010-multi", "title": "A Multi-Pass Sieve for Coreference Resolution", "abstract": "Most coreference resolution models determine if two mentions are coreferent using a single function over a set of constraints or features. This approach can lead to incorrect decisions as lower precision features often overwhelm the smaller number of high precision ones. To overcome this problem, we propose a simple coreference architecture based on a sieve that applies tiers of deterministic coreference models one at a time from highest to lowest precision. Each tier builds on the previous tier's entity cluster output. Further, our model propagates global information by sharing attributes (e.g., gender and number) across mentions in the same cluster. This cautious sieve guarantees that stronger features are given precedence over weaker ones and that each decision is made using all of the information available at the time. The framework is highly modular: new coreference modules can be plugged in without any change to the other modules. In spite of its simplicity, our approach outperforms many state-of-the-art supervised and unsupervised models on several standard corpora. This suggests that sieve-based approaches could be applied to other NLP tasks.", "phrases": ["coreference resolution", "mention", "decision", "cluster", "multi-pass sieve system"], "overall_score": 4.7557718223350465, "scores": [2.193950090553824, 1.73958482702342, 1.2361143504444214, 1.0507419046136126, 0.5227818369546924], "rank_score": 1.3486346019179942} -{"id": "sheng-etal-2019-woman", "title": "The Woman Worked as a Babysitter: On Biases in Language Generation", "abstract": "We present a systematic study of biases in natural language generation (NLG) by analyzing text generated from prompts that contain mentions of different demographic groups. In this work, we introduce the notion of the regard towards a demographic, use the varying levels of regard towards different demographics as a defining metric for bias in NLG, and analyze the extent to which sentiment scores are a relevant proxy metric for regard. To this end, we collect strategically-generated text from language models and manually annotate the text with both sentiment and regard scores. Additionally, we build an automatic regard classifier through transfer learning, so that we can analyze biases in unseen text. Together, these methods reveal the extent of the biased nature of language model generations. Our analysis provides a study of biases in NLG, bias metrics and correlated human judgments, and empirical evidence on the usefulness of our annotated dataset.", "phrases": ["woman", "language generation", "demographic", "gender bias"], "overall_score": 4.672884678454318, "scores": [2.703770230793108, 1.277522666783422, 0.8885227627434099, 0.5234223814817106], "rank_score": 1.3483095104504126} -{"id": "marcu-etal-2006-spmt", "title": "SPMT: Statistical Machine Translation with Syntactified Target Language Phrases", "abstract": "We introduce SPMT, a new class of statistical Translation Models that use Syntactified target language Phrases. The SPMT models outperform a state of the art phrase-based baseline model by 2.64 Bleu points on the NIST 2003 Chinese-English test corpus and 0.28 points on a human-based quality metric that ranks translations on a scale from 1 to 5.", "phrases": ["machine translation", "spmt model", "syntax-based model", "synchronous grammar", "span"], "overall_score": 5.039154223069636, "scores": [2.7061177100660285, 1.425012163099286, 1.1542787551582971, 0.8446733935432662, 0.6109554930861036], "rank_score": 1.3482075029905964} -{"id": "demszky-etal-2020-goemotions", "title": "GoEmotions: A Dataset of Fine-Grained Emotions", "abstract": "Understanding emotion expressed in language has a wide range of applications, from building empathetic chatbots to detecting harmful online behavior. Advancement in this area can be improved using large-scale datasets with a fine-grained typology, adaptable to multiple downstream tasks. We introduce GoEmotions, the largest manually annotated dataset of 58k English Reddit comments, labeled for 27 emotion categories or Neutral. We demonstrate the high quality of the annotations via Principal Preserved Component Analysis. We conduct transfer learning experiments with existing emotion benchmarks to show that our dataset generalizes well to other domains and different emotion taxonomies. Our BERT-based model achieves an average F1-score of .46 across our proposed taxonomy, leaving much room for improvement.", "phrases": ["emotion", "reddit comment", "taxonomy"], "overall_score": 3.9687696781032415, "scores": [2.0717819415631316, 1.0626253607149598, 0.9092523281484687], "rank_score": 1.3478865434755198} -{"id": "cheng-roth-2013-relational", "title": "Relational Inference for Wikification", "abstract": "Wikification, commonly referred to as Disambiguation to Wikipedia (D2W), is the task of identifying concepts and entities in text and disambiguating them into the most specific corresponding Wikipedia pages. Previous approaches to D2W focused on the use of local and global statistics over the given text, Wikipedia articles and its link structures, to evaluate context compatibility among a list of probable candidates. However, these methods fail (often, embarrassingly), when some level of text understanding is needed to support Wikification. In this paper we introduce a novel approach to Wikification by incorporating, along with statistical methods, richer relational analysis of the text. We provide an extensible, efficient and modular Integer Linear Programming (ILP) formulation of Wikification that incorporates the entity-relation inference problem, and show that the ability to identify relations in text helps both candidate generation and ranking Wikipedia titles considerably. Our results show significant improvements in both Wikification and the TAC Entity Linking task.", "phrases": ["wikification", "ilp", "mention", "global coherence"], "overall_score": 4.035178128851524, "scores": [2.06531539980859, 1.6200610944352858, 0.8698425998612994, 0.8326831086031933], "rank_score": 1.3469755506770922} -{"id": "zwarts-johnson-2011-impact", "title": "The impact of language models and loss functions on repair disfluency detection", "abstract": "Unrehearsed spoken language often contains disfluencies. In order to correctly interpret a spoken utterance, any such disfluencies must be identified and removed or otherwise dealt with. Operating on transcripts of speech which contain disfluencies, we study the effect of language model and loss function on the performance of a linear reranker that rescores the 25-best output of a noisy-channel model. We show that language models trained on large amounts of non-speech data improve performance more than a language model trained on a more modest amount of speech data, and that optimising f-score rather than log loss improves disfluency detection performance. \n \nOur approach uses a log-linear reranker, operating on the top n analyses of a noisy channel model. We use large language models, introduce new features into this reranker and examine different optimisation strategies. We obtain a disfluency detection f-scores of 0.838 which improves upon the current state-of-the-art.", "phrases": ["language model", "disfluency", "n-b reranking"], "overall_score": 3.3469356814239335, "scores": [2.4208462576719425, 1.047780460360732, 0.572091288468065], "rank_score": 1.3469060021669133} -{"id": "matusov-etal-2006-computing", "title": "Computing Consensus Translation for Multiple Machine Translation Systems Using Enhanced Hypothesis Alignment", "abstract": "This paper describes a novel method for computing a consensus translation from the outputs of multiple machine translation (MT) systems. The outputs are combined and a possibly new translation hypothesis can be generated. Similarly to the well-established ROVER approach of (Fiscus, 1997) for combining speech recognition hypotheses, the consensus translation is computed by voting on a confusion network. To create the confusion network, we produce pairwise word alignments of the original machine translation hypotheses with an enhanced statistical alignment algorithm that explicitly models word reordering. The context of a whole document of translations rather than a single sentence is taken into account to produce the alignment. The proposed alignment and voting approach was evaluated on several machine translation tasks, including a large vocabulary task. The method was also tested in the framework of multi-source and speech translation. On all tasks and conditions, we achieved significant improvements in translation quality, increasing e. g. the BLEU score by as much as 15% relative.", "phrases": ["consensus translation", "hypothesis", "system combination", "translation output"], "overall_score": 4.826034108563145, "scores": [1.6525110538153969, 1.6495208957824417, 1.4966545994524838, 0.5882352911254118], "rank_score": 1.3467304600439336} -{"id": "habernal-etal-2018-argument", "title": "The Argument Reasoning Comprehension Task: Identification and Reconstruction of Implicit Warrants", "abstract": "Reasoning is a crucial part of natural language argumentation. To comprehend an argument, one must analyze its warrant, which explains why its claim follows from its premises. As arguments are highly contextualized, warrants are usually presupposed and left implicit. Thus, the comprehension does not only require language understanding and logic skills, but also depends on common sense. In this paper we develop a methodology for reconstructing warrants systematically. We operationalize it in a scalable crowdsourcing process, resulting in a freely licensed dataset with warrants for 2k authentic arguments from news comments. On this basis, we present a new challenging task, the argument reasoning comprehension task. Given an argument with a claim and a premise, the goal is to choose the correct implicit warrant from two options. Both warrants are plausible and lexically close, but lead to contradicting claims. A solution to this task will define a substantial step towards automatic warrant reconstruction. However, experiments with several neural attention and language models reveal that current approaches do not suffice.", "phrases": ["reasoning", "implicit warrant", "natural language argumentation"], "overall_score": 3.9650902793793885, "scores": [2.611355513740478, 0.8605684591922225, 0.5679868291942552], "rank_score": 1.3466369340423183} -{"id": "verhagen-etal-2010-semeval", "title": "SemEval-2010 Task 13: TempEval-2", "abstract": "Tempeval-2 comprises evaluation tasks for time expressions, events and temporal relations, the latter of which was split up in four sub tasks, motivated by the notion that smaller subtasks would make both data preparation and temporal relation extraction easier. Manually annotated data were provided for six languages: Chinese, English, French, Italian, Korean and Spanish.", "phrases": ["tempeval-2", "information extraction", "nlp community", "same sentence", "international evaluation"], "overall_score": 4.999575644261049, "scores": [3.442229308740146, 1.345365170806765, 0.8486669028976617, 0.5520365515941316, 0.5431934096825897], "rank_score": 1.3462982687442588} -{"id": "katz-giesbrecht-2006-automatic", "title": "Automatic Identification of Non-Compositional Multi-Word Expressions using Latent Semantic Analysis", "abstract": "Making use of latent semantic analysis, we explore the hypothesis that local linguistic context can serve to identify multi-word expressions that have non-compositional meanings. We propose that vector-similarity between distribution vectors associated with an MWE as a whole and those associated with its constituent parts can serve as a good measure of the degree to which the MWE is compositional. We present experiments that show that low (cosine) similarity does, in fact, correlate with non-compositionality.", "phrases": ["multi-word expression", "latent semantic analysis", "local linguistic context", "automatic identification"], "overall_score": 3.731954957526703, "scores": [3.429090979552415, 0.7865356309860391, 0.6273377994606745, 0.5411085000456286], "rank_score": 1.3460182275111892} -{"id": "birch-etal-2008-predicting", "title": "Predicting Success in Machine Translation", "abstract": "The performance of machine translation systems varies greatly depending on the source and target languages involved. Determining the contribution of different characteristics of language pairs on system performance is key to knowing what aspects of machine translation to improve and which are irrelevant. This paper investigates the effect of different explanatory variables on the performance of a phrase-based system for 110 European language pairs. We show that three factors are strong predictors of performance in isolation: the amount of reordering, the morphological complexity of the target language and the historical relatedness of the two languages. Together, these factors contribute 75% to the variability of the performance of the system.", "phrases": ["machine translation", "explanatory variable", "european language pair", "complexity", "smt performance"], "overall_score": 4.159346536962901, "scores": [3.287631352099315, 1.4200480162677345, 0.8740418549014508, 0.5762377235700126, 0.5701054517010594], "rank_score": 1.3456128797079145} -{"id": "mairesse-etal-2010-phrase", "title": "Phrase-Based Statistical Language Generation Using Graphical Models and Active Learning", "abstract": "Most previous work on trainable language generation has focused on two paradigms: (a) using a statistical model to rank a set of generated utterances, or (b) using statistics to inform the generation decision process. Both approaches rely on the existence of a handcrafted generator, which limits their scalability to new domains. This paper presents Bagel, a statistical language generator which uses dynamic Bayesian networks to learn from semantically-aligned data produced by 42 untrained annotators. A human evaluation shows that Bagel can generate natural and informative utterances from unseen inputs in the information presentation domain. Additionally, generation performance on sparse datasets is improved significantly by using certainty-based active learning, yielding ratings close to the human gold standard with a fraction of the data.", "phrases": ["language generation", "active learning", "nlg system"], "overall_score": 3.9619732323533827, "scores": [2.2104130663551405, 1.2187579087889695, 0.6075639618539751], "rank_score": 1.3455783123326952} -{"id": "li-liang-2021-prefix", "title": "Prefix-Tuning: Optimizing Continuous Prompts for Generation", "abstract": "Fine-tuning is the de facto way of leveraging large pretrained language models for downstream tasks. However, fine-tuning modifies all the language model parameters and therefore necessitates storing a full copy for each task. In this paper, we propose prefix-tuning, a lightweight alternative to fine-tuning for natural language generation tasks, which keeps language model parameters frozen and instead optimizes a sequence of continuous task-specific vectors, which we call the prefix. Prefix-tuning draws inspiration from prompting for language models, allowing subsequent tokens to attend to this prefix as if it were \u201cvirtual tokens\u201d. We apply prefix-tuning to GPT-2 for table-to-text generation and to BART for summarization. We show that by learning only 0.1% of the parameters, prefix-tuning obtains comparable performance in the full data setting, outperforms fine-tuning in low-data settings, and extrapolates better to examples with topics that are unseen during training.", "phrases": ["prompt", "fine-tuning", "language model", "prefix-tuning", "text generation task"], "overall_score": 4.660146427235297, "scores": [2.2079865421770135, 1.468309237742518, 1.4164337095742388, 1.0598454536558173, 0.5705951972391962], "rank_score": 1.3446340280777567} -{"id": "wiebe-etal-2004-learning", "title": "Learning Subjective Language", "abstract": "Subjectivity in natural language refers to aspects of language used to express opinions, evaluations, and speculations. There are numerous natural language processing applications for which subjectivity analysis is relevant, including information extraction and text categorization. The goal of this work is learning subjective language from corpora. Clues of subjectivity are generated and tested, including low-frequency words, collocations, and adjectives and verbs identified using distributional similarity. The features are also examined working together in concert. The features, generated from different data sets using different procedures, exhibit consistency in performance in that they all do better and worse on the same data sets. In addition, this article shows that the density of subjectivity clues in the surrounding context strongly affects how likely it is that a word is subjective, and it provides the results of an annotation study assessing the subjectivity of sentences with high-density features. Finally, the clues are used to perform opinion piece recognition (a type of text categorization and genre detection) to demonstrate the utility of the knowledge acquired in this article.", "phrases": ["subjective language", "collocation", "history", "opinion mining"], "overall_score": 4.991302812834053, "scores": [3.4447814686636122, 0.8392008988106139, 0.5544577223189023, 0.5378420700020844], "rank_score": 1.3440705399488033} -{"id": "kumano-etal-2007-extracting", "title": "Extracting phrasal alignments from comparable corpora by using joint probability SMT model", "abstract": "We propose a method of extracting phrasal alignments from comparable corpora by using an extended phrase-based joint probability model for statistical machine translation (SMT). Our method does not require preexisting dictionaries or splitting documents into sentences in advance. By checking each alignment for its reliability by using log-likelihood ratio statistics while searching for optimal alignments , our method aims to produce phrasal alignments for only parallel parts of the comparable corpora. Experimental result shows that our method achieves about 0.8 in precision of phrasal alignment extraction when using 2,000 Japanese-English document pairs as training data.", "phrases": ["phrasal alignment", "comparable corpora", "probability smt model"], "overall_score": 2.4081935637044403, "scores": [2.2030631649518555, 0.947100610276081, 0.8819514808593616], "rank_score": 1.3440384186957661} -{"id": "kurita-etal-2019-measuring", "title": "Measuring Bias in Contextualized Word Representations", "abstract": "Contextual word embeddings such as BERT have achieved state of the art performance in numerous NLP tasks. Since they are optimized to capture the statistical properties of training data, they tend to pick up on and amplify social stereotypes present in the data as well. In this study, we (1) propose a template-based method to quantify bias in BERT; (2) show that this method obtains more consistent results in capturing social biases than the traditional cosine based method; and (3) conduct a case study, evaluating gender bias in a downstream task of Gender Pronoun Resolution. Although our case study focuses on gender bias, the proposed technique is generalizable to unveiling other biases, including in multiclass settings, such as racial and religious biases.", "phrases": ["word embedding", "stereotype", "gender bias", "association"], "overall_score": 4.737355593400207, "scores": [1.5459193579146842, 1.499318655274373, 1.4804780254427092, 0.847932583317755], "rank_score": 1.3434121554873804} -{"id": "nguyen-grishman-2015-event", "title": "Event Detection and Domain Adaptation with Convolutional Neural Networks", "abstract": "We study the event detection problem using convolutional neural networks (CNNs) that overcome the two fundamental limitations of the traditional feature-based approaches to this task: complicated feature engineering for rich feature sets and error propagation from the preceding stages which generate these features. The experimental results show that the CNNs outperform the best reported feature-based systems in the general setting as well as the domain adaptation setting without resorting to extensive external resources.", "phrases": ["domain adaptation", "convolutional neural networks", "cnn", "event detection", "learning method"], "overall_score": 4.427380526962918, "scores": [2.1927418951153514, 1.8685899747432617, 0.865881101160422, 1.2594134159387387, 0.529999324451302], "rank_score": 1.3433251422818153} -{"id": "mitchell-lapata-2008-vector", "title": "Vector-based Models of Semantic Composition", "abstract": "This paper proposes a framework for representing the meaning of phrases and sentences in vector space. Central to our approach is vector composition which we operationalize in terms of additive and multiplicative functions. Under this framework, we introduce a wide range of composition models which we evaluate empirically on a sentence similarity task. Experimental results demonstrate that the multiplicative models are superior to the additive alternatives when compared against human judgments.", "phrases": ["semantic composition", "operation", "general framework", "word vector", "co-occurrence"], "overall_score": 6.073884570791677, "scores": [1.9333676428698354, 1.8119459415031498, 1.2514952598674203, 1.1633387584102879, 0.5560936228021203], "rank_score": 1.3432482450905627} -{"id": "mai-etal-2019-divide", "title": "Divide, Conquer and Combine: Hierarchical Feature Fusion Network with Local and Global Perspectives for Multimodal Affective Computing", "abstract": "We propose a general strategy named `divide, conquer and combine' for multimodal fusion. Instead of directly fusing features at holistic level, we conduct fusion hierarchically so that both local and global interactions are considered for a comprehensive interpretation of multimodal embeddings. In the `divide' and `conquer' stages, we conduct local fusion by exploring the interaction of a portion of the aligned feature vectors across various modalities lying within a sliding window, which ensures that each part of multimodal embeddings are explored sufficiently. On its basis, global fusion is conducted in the `combine' stage to explore the interconnection across local interactions, via an Attentive Bi-directional Skip-connected LSTM that directly connects distant local interactions and integrates two levels of attention mechanism. In this way, local interactions can exchange information sufficiently and thus obtain an overall view of multimodal information. Our method achieves state-of-the-art performance on multimodal affective computing with higher efficiency.", "phrases": ["combine", "multimodal affective computing", "holistic level"], "overall_score": 2.406280655607814, "scores": [2.2310294778191677, 0.9266122953230201, 0.8712706399360814], "rank_score": 1.342970804359423} -{"id": "li-etal-2017-end", "title": "End-to-End Task-Completion Neural Dialogue Systems", "abstract": "One of the major drawbacks of modularized task-completion dialogue systems is that each module is trained individually, which presents several challenges. For example, downstream modules are affected by earlier modules, and the performance of the entire system is not robust to the accumulated errors. This paper presents a novel end-to-end learning framework for task-completion dialogue systems to tackle such issues. Our neural dialogue system can directly interact with a structured database to assist users in accessing information and accomplishing certain tasks. The reinforcement learning based dialogue manager offers robust capabilities to handle noises caused by other components of the dialogue system. Our experiments in a movie-ticket booking domain show that our end-to-end system not only outperforms modularized dialogue system baselines for both objective and subjective evaluation, but also is robust to noises as demonstrated by several systematic experiments with different error granularity and rates specific to the language understanding module.", "phrases": ["dialogue system", "end-to-end", "deep learning"], "overall_score": 4.735462530741247, "scores": [1.8332218246208212, 1.1069376229403451, 1.0884665233384145], "rank_score": 1.3428753236331936} -{"id": "reddy-etal-2011-empirical", "title": "An Empirical Study on Compositionality in Compound Nouns", "abstract": "A multiword is compositional if its meaning can be expressed in terms of the meaning of its constituents. In this paper, we collect and analyse the compositionality judgments for a range of compound nouns using Mechanical Turk. Unlike existing compositionality datasets, our dataset has judgments on the contribution of constituent words as well as judgments for the phrase as a whole. We use this dataset to study the relation between the judgments at constituent level to that for the whole phrase. We then evaluate two different types of distributional models for compositionality detection \u2013 constituent based models and composition function based models. Both the models show competitive performance though the composition function based models perform slightly better. In both types, additive models perform better than their multiplicative counterparts.", "phrases": ["compositionality", "judgment", "noun compound", "co-occurrence", "standard distributional model"], "overall_score": 4.65188615301788, "scores": [3.1977453487950362, 1.3726643154340648, 1.0424185440936093, 0.5563720844966573, 0.5420527909195662], "rank_score": 1.3422506167477868} -{"id": "chang-etal-2013-multi", "title": "Multi-Relational Latent Semantic Analysis", "abstract": "We present Multi-Relational Latent Semantic Analysis (MRLSA) which generalizes Latent Semantic Analysis (LSA). MRLSA provides an elegant approach to combining multiple relations between words by constructing a 3-way tensor. Similar to LSA, a lowrank approximation of the tensor is derived using a tensor decomposition. Each word in the vocabulary is thus represented by a vector in the latent semantic space and each relation is captured by a latent square matrix. The degree of two words having a specific relation can then be measured through simple linear algebraic operations. We demonstrate that by integrating multiple relations from both homogeneous and heterogeneous information sources, MRLSA achieves stateof-the-art performance on existing benchmark datasets for two relations, antonymy and is-a.", "phrases": ["latent semantic analysis", "multiple relation", "knowledge graph"], "overall_score": 3.5414628169815026, "scores": [2.046673478645283, 1.4145766011870777, 0.5645769955095037], "rank_score": 1.3419423584472883} -{"id": "zampieri-etal-2017-complex", "title": "Complex Word Identification: Challenges in Data Annotation and System Performance", "abstract": "This paper revisits the problem of complex word identification (CWI) following up the SemEval CWI shared task. We use ensemble classifiers to investigate how well computational methods can discriminate between complex and non-complex words. Furthermore, we analyze the classification performance to understand what makes lexical complexity challenging. Our findings show that most systems performed poorly on the SemEval CWI dataset, and one of the reasons for that is the way in which human annotation was performed.", "phrases": ["most system", "complex word identification", "complexity"], "overall_score": 3.440681250146963, "scores": [2.6387595360677794, 0.8556303662868948, 0.5298781481679079], "rank_score": 1.3414226835075274} -{"id": "post-vilar-2018-fast", "title": "Fast Lexically Constrained Decoding with Dynamic Beam Allocation for Neural Machine Translation", "abstract": "The end-to-end nature of neural machine translation (NMT) removes many ways of manually guiding the translation process that were available in older paradigms. Recent work, however, has introduced a new capability: lexically constrained or guided decoding, a modification to beam search that forces the inclusion of pre-specified words and phrases in the output. However, while theoretically sound, existing approaches have computational complexities that are either linear (Hokamp and Liu, 2017) or exponential (Anderson et al., 2017) in the number of constraints. We present a algorithm for lexically constrained decoding with a complexity of O(1) in the number of constraints. We demonstrate the algorithm's remarkable ability to properly place these constraints, and use it to explore the shaky relationship between model and BLEU scores. Our implementation is available as part of Sockeye.", "phrases": ["decoding", "neural machine translation", "translation process", "lexical constraint", "unconstrained generation"], "overall_score": 4.842908896563499, "scores": [1.9665785559770061, 1.7754557438958645, 1.575854927537849, 0.838483211759091, 0.5495524997567369], "rank_score": 1.3411849877853095} -{"id": "schuster-etal-2020-limitations", "title": "The Limitations of Stylometry for Detecting Machine-Generated Fake News", "abstract": "Recent developments in neural language models (LMs) have raised concerns about their potential misuse for automatically spreading misinformation. In light of these concerns, several studies have proposed to detect machine-generated fake news by capturing their stylistic differences from human-written text. These approaches, broadly termed stylometry, have found success in source attribution and misinformation detection in human-written texts. However, in this work, we show that stylometry is limited against machine-generated misinformation. Whereas humans speak differently when trying to deceive, LMs generate stylistically consistent text, regardless of underlying motive. Thus, though stylometry can successfully prevent impersonation by identifying text provenance, it fails to distinguish legitimate LM applications from those that introduce false information. We create two benchmarks demonstrating the stylistic similarity between malicious and legitimate uses of LMs, utilized in auto-completion and editing-assistance settings.1 Our findings highlight the need for non-stylometry approaches in detecting machine-generated misinformation, and open up the discussion on the desired evaluation benchmarks.", "phrases": ["stylometry", "fake news", "human-written text"], "overall_score": 3.087659485410625, "scores": [1.8216406082036372, 1.679870698710622, 0.5213491226159453], "rank_score": 1.3409534765100684} -{"id": "zollmann-venugopal-2006-syntax", "title": "Syntax Augmented Machine Translation via Chart Parsing", "abstract": "We present translation results on the shared task \"Exploiting Parallel Texts for Statistical Machine Translation\" generated by a chart parsing decoder operating on phrase tables augmented and generalized with target language syntactic categories. We use a target language parser to generate parse trees for each sentence on the target side of the bilingual training corpus, matching them with phrase table lattices built for the corresponding source sentence. Considering phrases that correspond to syntactic categories in the parse trees we develop techniques to augment (declare a syntactically motivated category for a phrase pair) and generalize (form mixed terminal and nonterminal phrases) the phrase table into a synchronous bilingual grammar. We present results on the French-to-English task for this workshop, representing significant improvements over the workshop's baseline system. Our translation system is available open-source under the GNU General Public License.", "phrases": ["chart", "syntactic category", "target side", "non-terminal", "synchronous grammar"], "overall_score": 5.187386111764096, "scores": [1.7950657567899846, 1.770772082893621, 1.495703356910669, 1.043838863994914, 0.5945899192020799], "rank_score": 1.3399939959582536} -{"id": "van-der-lee-etal-2019-best", "title": "Best practices for the human evaluation of automatically generated text", "abstract": "Currently, there is little agreement as to how Natural Language Generation (NLG) systems should be evaluated. While there is some agreement regarding automatic metrics, there is a high degree of variation in the way that human evaluation is carried out. This paper provides an overview of how human evaluation is currently conducted, and presents a set of best practices, grounded in the literature. With this paper, we hope to contribute to the quality and consistency of human evaluations in NLG.", "phrases": ["practice", "human evaluation", "van der", "nlg system", "fluency"], "overall_score": 3.4366989816846787, "scores": [2.413530297753761, 2.115575127784961, 1.0688119202199102, 0.5645729879747307, 0.5368602239826967], "rank_score": 1.339870111543212} -{"id": "de-lhoneux-etal-2018-parameter", "title": "Parameter sharing between dependency parsers for related languages", "abstract": "Previous work has suggested that parameter sharing between transition-based neural dependency parsers for related languages can lead to better performance, but there is no consensus on what parameters to share. We present an evaluation of 27 different parameter sharing strategies across 10 languages, representing five pairs of related languages, each pair from a different language family. We find that sharing transition classifier parameters always helps, whereas the usefulness of sharing word and/or character LSTM parameters varies. Based on this result, we propose an architecture where the transition classifier is shared, and the sharing of word and character parameters is controlled by a parameter that can be tuned on validation data. This model is linguistically motivated and obtains significant improvements over a monolingually trained baseline. We also find that sharing transition classifier parameters helps when training a parser on unrelated language pairs, but we find that, in the case of unrelated languages, sharing too many parameters does not help.", "phrases": ["dependency parser", "character lstm parameter", "unrelated language", "parameter sharing"], "overall_score": 3.714720688311354, "scores": [2.2535572767959633, 2.007156686641942, 0.5520460539961707, 0.5464490978803506], "rank_score": 1.3398022788286068} -{"id": "li-etal-2017-multi", "title": "Multi-modal Summarization for Asynchronous Collection of Text, Image, Audio and Video", "abstract": "The rapid increase of the multimedia data over the Internet necessitates multi-modal summarization from collections of text, image, audio and video. In this work, we propose an extractive Multi-modal Summarization (MMS) method which can automatically generate a textual summary given a set of documents, images, audios and videos related to a specific topic. The key idea is to bridge the semantic gaps between multi-modal contents. For audio information, we design an approach to selectively use its transcription. For vision information, we learn joint representations of texts and images using a neural network. Finally, all the multi-modal aspects are considered to generate the textural summary by maximizing the salience, non-redundancy, readability and coverage through budgeted optimization of submodular functions. We further introduce an MMS corpus in English and Chinese. The experimental results on this dataset demonstrate that our method outperforms other competitive baseline methods.", "phrases": ["asynchronous collection", "video", "multimedia data", "multi-modal summarization", "multimodal corpus"], "overall_score": 3.0848902672536282, "scores": [2.808416569454344, 2.0190353240981787, 0.800056722791846, 0.5424233845990909, 0.5288221007830323], "rank_score": 1.3397508203452984} -{"id": "pfeiffer-etal-2021-adapterfusion", "title": "AdapterFusion: Non-Destructive Task Composition for Transfer Learning", "abstract": "Sequential fine-tuning and multi-task learning are methods aiming to incorporate knowledge from multiple tasks; however, they suffer from catastrophic forgetting and difficulties in dataset balancing. To address these shortcomings, we propose AdapterFusion, a new two stage learning algorithm that leverages knowledge from multiple tasks. First, in the knowledge extraction stage we learn task specific parameters called adapters, that encapsulate the task-specific information. We then combine the adapters in a separate knowledge composition step. We show that by separating the two stages, i.e., knowledge extraction and knowledge composition, the classifier can effectively exploit the representations learned from multiple tasks in a non-destructive manner. We empirically evaluate AdapterFusion on 16 diverse NLU tasks, and find that it effectively combines various types of knowledge at different layers of the model. We show that our approach outperforms traditional strategies such as full fine-tuning as well as multi-task learning. Our code and adapters are available at AdapterHub.ml.", "phrases": ["transfer learning", "adapter", "target task"], "overall_score": 3.627867397553529, "scores": [1.4325783321821857, 1.7222422612156225, 0.8641603650167832], "rank_score": 1.3396603194715304} -{"id": "jang-etal-2016-metaphor", "title": "Metaphor Detection with Topic Transition, Emotion and Cognition in Context", "abstract": "Metaphor is a common linguistic tool in communication, making its detection in discourse a crucial task for natural language understanding. One popular approach to this challenge is to capture semantic incohesion between a metaphor and the dominant topic of the surrounding text. While these methods are effective, they tend to overclassify target words as metaphorical when they deviate in meaning from its context. We present a new approach that (1) distinguishes literal and non-literal use of target words by examining sentence-level topic transitions and (2) captures the motivation of speakers to express emotions and abstract concepts metaphorically. Experiments on an online breast cancer discussion forum dataset demonstrate a significant improvement in metaphor detection over the state-of-theart. These experimental results also reveal a tendency toward metaphor usage in personal topics and certain emotional contexts.", "phrases": ["topic transition", "emotion", "metaphor detection"], "overall_score": 2.9425695159307597, "scores": [2.2401600467349962, 0.9575664732523606, 0.8199367807274258], "rank_score": 1.3392211002382608} -{"id": "peng-etal-2016-event", "title": "Event Detection and Co-reference with Minimal Supervision", "abstract": "An important aspect of natural language understanding involves recognizing and catego-rizing events and the relations among them. However, these tasks are quite subtle and annotating training data for machine learning based approaches is an expensive task, resulting in supervised systems that attempt to learn complex models from small amounts of data, which they over-\ufb01t. This paper addresses this challenge by developing an event detection and co-reference system with minimal supervision, in the form of a few event examples. We view these tasks as semantic similarity problems between event mentions or event mentions and an ontology of types, thus facilitating the use of large amounts of out of domain text data. Notably, our semantic re-latedness function exploits the structure of the text by making use of a semantic-role-labeling based representation of an event. We show that our approach to event detection is competitive with the top supervised meth-ods. More signi\ufb01cantly, we outperform state-of-the-art supervised methods for event co-reference on benchmark data sets, and support signi\ufb01cantly better transfer across domains.", "phrases": ["minimal supervision", "co-reference system", "event detection", "trigger example", "ace annotation guideline"], "overall_score": 3.942818410134465, "scores": [2.432414462640852, 1.8649299719599284, 0.8930668480767474, 0.8862459398618907, 0.6187072221512679], "rank_score": 1.3390728889381374} -{"id": "yasunaga-etal-2018-robust", "title": "Robust Multilingual Part-of-Speech Tagging via Adversarial Training", "abstract": "Adversarial training (AT) is a powerful regularization method for neural networks, aiming to achieve robustness to input perturbations. Yet, the specific effects of the robustness obtained from AT are still unclear in the context of natural language processing. In this paper, we propose and analyze a neural POS tagging model that exploits AT. In our experiments on the Penn Treebank WSJ corpus and the Universal Dependencies (UD) dataset (27 languages), we find that AT not only improves the overall tagging accuracy, but also 1) prevents over-fitting well in low resource languages and 2) boosts tagging accuracy for rare / unseen words. We also demonstrate that 3) the improved tagging performance by AT contributes to the downstream task of dependency parsing, and that 4) AT helps the model to learn cleaner word representations. 5) The proposed AT model is generally effective in different sequence labeling tasks. These positive results motivate further use of AT for natural language tasks.", "phrases": ["adversarial training", "overall tagging accuracy", "word representation"], "overall_score": 2.9420070399987783, "scores": [2.8973521402617175, 0.5887404057659771, 0.5308027732011036], "rank_score": 1.3389651064095993} -{"id": "solorio-liu-2008-part", "title": "Part-of-Speech Tagging for English-Spanish Code-Switched Text", "abstract": "Code-switching is an interesting linguistic phenomenon commonly observed in highly bilingual communities. It consists of mixing languages in the same conversational event. This paper presents results on Part-of-Speech tagging Spanish-English code-switched discourse. We explore different approaches to exploit existing resources for both languages that range from simple heuristics, to language identification, to machine learning. The best results are achieved by training a machine learning algorithm with features that combine the output of an English and a Spanish Part-of-Speech tagger.", "phrases": ["code-switched text", "language identification", "part-of-speech tagging"], "overall_score": 4.759967120662538, "scores": [2.179193662140422, 1.2909533679709722, 0.5463096191443332], "rank_score": 1.3388188830852423} -{"id": "mihalcea-nastase-2012-word", "title": "Word Epoch Disambiguation: Finding How Words Change Over Time", "abstract": "In this paper we introduce the novel task of \"word epoch disambiguation,\" defined as the problem of identifying changes in word usage over time. Through experiments run using word usage examples collected from three major periods of time (1800, 1900, 2000), we show that the task is feasible, and significant differences can be observed between occurrences of words in different periods of time.", "phrases": ["change", "word epoch disambiguation", "probability distribution", "supervised learning approach"], "overall_score": 3.869307027714521, "scores": [2.5903267017999094, 1.6882946623219428, 0.5534758349847232, 0.5226562399886104], "rank_score": 1.3386883597737964} -{"id": "baldwin-etal-2003-empirical", "title": "An Empirical Model of Multiword Expression Decomposability", "abstract": "This paper presents a construction-inspecific model of multiword expression decomposability based on latent semantic analysis. We use latent semantic analysis to determine the similarity between a multiword expression and its constituent words, and claim that higher similarities indicate greater decomposability. We test the model over English noun-noun compounds and verb-particles, and evaluate its correlation with similarities and hyponymy values in WordNet. Based on mean hyponymy over partitions of data ranked on similarity, we furnish evidence for the calculated similarities being correlated with the semantic relational content of WordNet.", "phrases": ["multiword expression decomposability", "compositionality", "mwes", "idiom", "wordnet-based gold standard"], "overall_score": 5.003049296289621, "scores": [2.107215358103274, 1.764149684503361, 1.213998247146458, 1.044235072845153, 0.5631404388951494], "rank_score": 1.338547760298679} -{"id": "shaw-etal-2021-compositional", "title": "Compositional Generalization and Natural Language Variation: Can a Semantic Parsing Approach Handle Both?", "abstract": "Sequence-to-sequence models excel at handling natural language variation, but have been shown to struggle with out-of-distribution compositional generalization. This has motivated new specialized architectures with stronger compositional biases, but most of these approaches have only been evaluated on synthetically-generated datasets, which are not representative of natural language variation. In this work we ask: can we develop a semantic parsing approach that handles both natural language variation and compositional generalization? To better assess this capability, we propose new train and test splits of non-synthetic datasets. We demonstrate that strong existing approaches do not perform well across a broad set of evaluations. We also propose NQG-T5, a hybrid model that combines a high-precision grammar-based approach with a pre-trained sequence-to-sequence model. It outperforms existing approaches across several compositional generalization challenges on non-synthetic data, while also being competitive with the state-of-the-art on standard evaluations. While still far from solving this problem, our study highlights the importance of diverse evaluations and the open challenge of handling both compositional generalization and natural language variation in semantic parsing.", "phrases": ["language variation", "specialized architecture", "compositional generalization"], "overall_score": 3.3257232079307903, "scores": [2.9052584806471593, 0.5859935505400142, 0.5238563933661485], "rank_score": 1.3383694748511072} -{"id": "koper-schulte-im-walde-2016-automatically", "title": "Automatically Generated Affective Norms of Abstractness, Arousal, Imageability and Valence for 350 000 German Lemmas", "abstract": "This paper presents a collection of 350,000 German lemmatised words, rated on four psycholinguistic affective attributes. All ratings were obtained via a supervised learning algorithm that can automatically calculate a numerical rating of a word. We applied this algorithm to abstractness, arousal, imageability and valence. Comparison with human ratings reveals high correlation across all rating types. The full resource is publically available at: ", "phrases": ["abstractness", "valence", "rating"], "overall_score": 2.397894614546732, "scores": [1.7815241284249455, 1.7043671389002477, 0.5289801298604216], "rank_score": 1.3382904657285382} -{"id": "luong-manning-2015-stanford", "title": "Stanford neural machine translation systems for spoken language domains", "abstract": "Neural Machine Translation (NMT), though recently developed, has shown promising results for various language pairs. Despite that, NMT has only been applied to mostly formal texts such as those in the WMT shared tasks. This work further explores the effectiveness of NMT in spoken language domains by participating in the MT track of the IWSLT 2015. We consider two scenarios: (a) how to adapt existing NMT systems to a new domain and (b) the generalization of NMT to low-resource language pairs. Our results demonstrate that using an existing NMT framework1, we can achieve competitive results in the aforementioned scenarios when translating from English to German and Vietnamese. Notably, we have advanced state-of-the-art results in the IWSLT EnglishGerman MT track by up to 5.2 BLEU points.", "phrases": ["neural machine translation", "spoken language domain", "fine-tuning", "target domain", "continued training"], "overall_score": 5.386842839922894, "scores": [0.9159909441290269, 1.9834480724649786, 1.7204578226975733, 1.2339351820029536, 0.8373135652158152], "rank_score": 1.3382291173020697} -{"id": "auli-etal-2013-joint", "title": "Joint Language and Translation Modeling with Recurrent Neural Networks", "abstract": "We present a joint language and translation model based on a recurrent neural network which predicts target words based on an unbounded history of both source and target words. The weaker independence assumptions of this model result in a vastly larger search space compared to related feedforward-based language or translation models. We tackle this issue with a new lattice rescoring algorithm and demonstrate its effectiveness empirically. Our joint model builds on a well known recurrent neural network language model (Mikolov, 2012) augmented by a layer of additional inputs from the source language. We show competitive accuracy compared to the traditional channel model features. Our best results improve the output of a system trained on WMT 2012 French-English data by up to 1.5 BLEU, and by 1.1 BLEU on average across several test sets.", "phrases": ["recurrent neural networks", "joint language", "extended context"], "overall_score": 3.9384804253669303, "scores": [2.601682702495361, 0.8853846160865048, 0.5257315064920028], "rank_score": 1.337599608357956} -{"id": "bod-2006-subtrees", "title": "An All-Subtrees Approach to Unsupervised Parsing", "abstract": "We investigate generalizations of the all-subtrees \"DOP\" approach to unsupervised parsing. Unsupervised DOP models assign all possible binary trees to a set of sentences and next use (a large random subset of) all subtrees from these binary trees to compute the most probable parse trees. We will test both a relative frequency estimator for unsupervised DOP and a maximum likelihood estimator which is known to be statistically consistent. We report state-of-the-art results on English (WSJ), German (NEGRA) and Chinese (CTB) data. To the best of our knowledge this is the first paper which tests a maximum likelihood estimator for DOP on the Wall Street Journal, leading to the surprising result that an unsupervised parsing model beats a widely used supervised model (a treebank PCFG).", "phrases": ["unsupervised parsing", "dop", "chinese"], "overall_score": 3.3234277893836794, "scores": [2.527155889858147, 0.9175979939358354, 0.5675833076259255], "rank_score": 1.3374457304733027} -{"id": "caines-etal-2016-crowdsourcing", "title": "Crowdsourcing a Multi-lingual Speech Corpus: Recording, Transcription and Annotation of the CrowdIS Corpora", "abstract": "We announce the release of the CROWDED CORPUS: a pair of speech corpora collected via crowdsourcing, containing a native speaker corpus of English (CROWDED_ENGLISH), and a corpus of German/English bilinguals (CROWDED_BILINGUAL). Release 1 of the CROWDED CORPUS contains 1000 recordings amounting to 33,400 tokens collected from 80 speakers and is freely available to other researchers. We recruited participants via the Crowdee application for Android. Recruits were prompted to respond to business-topic questions of the type found in language learning oral tests. We then used the CrowdFlower web application to pass these recordings to crowdworkers for transcription and annotation of errors and sentence boundaries. Finally, the sentences were tagged and parsed using standard natural language processing tools. We propose that crowdsourcing is a valid and economical method for corpus collection, and discuss the advantages and disadvantages of this approach.", "phrases": ["recording", "transcription", "crowded corpus"], "overall_score": 1.8539577830931513, "scores": [1.7377741615007334, 1.6685652941420708, 0.6057040938861749], "rank_score": 1.337347849842993} -{"id": "pan-etal-2017-cross", "title": "Cross-lingual Name Tagging and Linking for 282 Languages", "abstract": "The ambitious goal of this work is to develop a cross-lingual name tagging and linking framework for 282 languages that exist in Wikipedia. Given a document in any of these languages, our framework is able to identify name mentions, assign a coarse-grained or fine-grained type to each mention, and link it to an English Knowledge Base (KB) if it is linkable. We achieve this goal by performing a series of new KB mining methods: generating \u201csilver-standard\u201d annotations by transferring annotations from English to other languages through cross-lingual links and KB properties, refining annotations through self-training and topic selection, deriving language-specific morphology features from anchor links, and mining word translation pairs from cross-lingual links. Both name tagging and linking results for 282 languages are promising on Wikipedia data and on-Wikipedia data.", "phrases": ["name tagging", "link", "wikipedia", "cross-lingual link", "language-independent framework"], "overall_score": 4.249804353935907, "scores": [3.1065336538804016, 1.5609000170927017, 0.8314569507528496, 0.6251637738185033, 0.5621198809161503], "rank_score": 1.3372348552921214} -{"id": "chen-etal-2017-enhanced", "title": "Enhanced LSTM for Natural Language Inference", "abstract": "Reasoning and inference are central to human and artificial intelligence. Modeling inference in human language is very challenging. With the availability of large annotated data (Bowman et al., 2015), it has recently become feasible to train neural network based inference models, which have shown to be very effective. In this paper, we present a new state-of-the-art result, achieving the accuracy of 88.6% on the Stanford Natural Language Inference Dataset. Unlike the previous top models that use very complicated network architectures, we first demonstrate that carefully designing sequential inference models based on chain LSTMs can outperform all previous models. Based on this, we further show that by explicitly considering recursive architectures in both local inference modeling and inference composition, we achieve additional improvement. Particularly, incorporating syntactic parsing information contributes to our best result\u2014it further improves the performance even when added to the already very strong model.", "phrases": ["natural language inference", "sequential inference model", "esim", "snli", "bidirectional lstm"], "overall_score": 5.283393041301636, "scores": [1.5520451343131993, 2.1054709375107294, 1.5350522327906118, 0.8567083374768738, 0.6364575439011372], "rank_score": 1.3371468371985102} -{"id": "zhang-etal-2018-improving", "title": "Improving the Transformer Translation Model with Document-Level Context", "abstract": "Although the Transformer translation model (Vaswani et al., 2017) has achieved state-of-the-art performance in a variety of translation tasks, how to use document-level context to deal with discourse phenomena problematic for Transformer still remains a challenge. In this work, we extend the Transformer model with a new context encoder to represent document-level context, which is then incorporated into the original encoder and decoder. As large-scale document-level parallel corpora are usually not available, we introduce a two-step training method to take full advantage of abundant sentence-level parallel corpora and limited document-level parallel corpora. Experiments on the NIST Chinese-English datasets and the IWSLT French-English datasets show that our approach improves over Transformer significantly.", "phrases": ["transformer translation model", "document-level context", "new context encoder"], "overall_score": 4.753551106317075, "scores": [2.568371018656234, 0.9081691245201263, 0.5345026780367965], "rank_score": 1.3370142737377189} -{"id": "hinrichs-etal-2010-weblicht-web", "title": "WebLicht: Web-Based LRT Services for German", "abstract": "This software demonstration presents WebLicht (short for: Web-Based Linguistic Chaining Tool), a web-based service environment for the integration and use of language resources and tools (LRT). WebLicht is being developed as part of the D-SPIN project. We-bLicht is implemented as a web application so that there is no need for users to install any software on their own computers or to concern themselves with the technical details involved in building tool chains. The integrated web services are part of a prototypical infrastructure that was developed to facilitate chaining of LRT services. WebLicht allows the integration and use of distributed web services with standardized APIs. The nature of these open and standardized APIs makes it possible to access the web services from nearly any programming language, shell script or workflow engine (UIMA, Gate etc.) Additionally, an application for integration of additional services is available, allowing anyone to contribute his own web service.", "phrases": ["api", "workflow engine", "weblicht"], "overall_score": 2.3952905796206245, "scores": [2.9157002665723466, 0.5590801451334257, 0.5357309667873287], "rank_score": 1.336837126164367} -{"id": "nguyen-etal-2014-gender", "title": "Why Gender and Age Prediction from Tweets is Hard: Lessons from a Crowdsourcing Experiment", "abstract": "There is a growing interest in automatically predicting the gender and age of authors from texts. However, most research so far ignores that language use is related to the social identity of speakers, which may be different from their biological identity. In this paper, we combine insights from sociolinguistics with data collected through an online game, to underline the importance of approaching age and gender as social variables rather than static biological variables. In our game, thousands of players guessed the gender and age of Twitter users based on tweets alone. We show that more than 10% of the Twitter users do not employ language that the crowd associates with their biological sex. It is also shown that older Twitter users are often perceived to be younger. Our findings highlight the limitations of current approaches to gender and age prediction from texts.", "phrases": ["gender", "age prediction", "language use", "twitter user", "crowd"], "overall_score": 3.204323797878204, "scores": [2.615150503553799, 2.150548429286321, 0.8557412279819886, 0.5371849995584765, 0.5229089212532675], "rank_score": 1.3363068163267706} -{"id": "wei-etal-2020-iterative", "title": "Iterative Domain-Repaired Back-Translation", "abstract": "In this paper, we focus on the domain-specific translation with low resources, where in-domain parallel corpora are scarce or nonexistent. One common and effective strategy for this case is exploiting in-domain monolingual data with the back-translation method. However, the synthetic parallel data is very noisy because they are generated by imperfect out-of-domain systems, resulting in the poor performance of domain adaptation. To address this issue, we propose a novel iterative domain-repaired back-translation framework, which introduces the Domain-Repair (DR) model to refine translations in synthetic bilingual data. To this end, we construct corresponding data for the DR model training by round-trip translating the monolingual sentences, and then design the unified training framework to optimize paired DR and NMT models jointly. Experiments on adapting NMT models between specific domains and from the general domain to specific domains demonstrate the effectiveness of our proposed approach, achieving 15.79 and 4.47 BLEU improvements on average over unadapted models and back-translation.", "phrases": ["back-translation", "domain adaptation", "iterative"], "overall_score": 2.1492341431795725, "scores": [2.5686959295756044, 0.8450476011245931, 0.5924392364170917], "rank_score": 1.3353942557057632} -{"id": "prettenhofer-stein-2010-cross", "title": "Cross-Language Text Classification Using Structural Correspondence Learning", "abstract": "We present a new approach to cross-language text classification that builds on structural correspondence learning, a recently proposed theory for domain adaptation. The approach uses unlabeled documents, along with a simple word translation oracle, in order to induce task-specific, cross-lingual word correspondences. We report on analyses that reveal quantitative insights about the use of unlabeled data and the complexity of inter-language correspondence modeling. \n \nWe conduct experiments in the field of cross-language sentiment classification, employing English as source language, and German, French, and Japanese as target languages. The results are convincing; they demonstrate both the robustness and the competitiveness of the presented ideas.", "phrases": ["structural correspondence learning", "theory", "cross-language text classification", "sentiment analysis"], "overall_score": 4.065538234099225, "scores": [3.274027470796515, 0.9925512602657502, 0.5464387278676904, 0.5284287839681966], "rank_score": 1.3353615607245382} -{"id": "ma-etal-2020-simple", "title": "A Simple and Effective Unified Encoder for Document-Level Machine Translation", "abstract": "Most of the existing models for document-level machine translation adopt dual-encoder structures. The representation of the source sentences and the document-level contexts are modeled with two separate encoders. Although these models can make use of the document-level contexts, they do not fully model the interaction between the contexts and the source sentences, and can not directly adapt to the recent pre-training models (e.g., BERT) which encodes multiple sentences with a single encoder. In this work, we propose a simple and effective unified encoder that can outperform the baseline models of dual-encoder models in terms of BLEU and METEOR scores. Moreover, the pre-training models can further boost the performance of our proposed model.", "phrases": ["machine translation", "bert", "contextual information"], "overall_score": 3.317957551034105, "scores": [2.906675174518826, 0.5568907090740347, 0.5421671502623671], "rank_score": 1.335244344618409} -{"id": "tsakalidis-etal-2022-identifying", "title": "Identifying Moments of Change from Longitudinal User Text", "abstract": "Identifying changes in individuals' behaviour and mood, as observed via content shared on online platforms, is increasingly gaining importance. Most research to-date on this topic focuses on either: (a) identifying individuals at risk or with a certain mental health condition given a batch of posts or (b) providing equivalent labels at the post level. A disadvantage of such work is the lack of a strong temporal component and the inability to make longitudinal assessments following an individual's trajectory and allowing timely interventions. Here we define a new task, that of identifying moments of change in individuals on the basis of their shared content online. The changes we consider are sudden shifts in mood (switches) or gradual mood progression (escalations). We have created detailed guidelines for capturing moments of change and a corpus of 500 manually annotated user timelines (18.7K posts). We have developed a variety of baseline models drawing inspiration from related tasks and show that the best performance is obtained through context aware sequential modelling. We also introduce new metrics for capturing rare events in temporal windows.", "phrases": ["change", "baseline mood", "sequential classification task"], "overall_score": 2.776445859017685, "scores": [2.9273042656453785, 0.5470784047840876, 0.5311820016720465], "rank_score": 1.3351882240338375} -{"id": "zeng-etal-2021-sire", "title": "SIRE: Separate Intra- and Inter-sentential Reasoning for Document-level Relation Extraction", "abstract": "Document-level relation extraction has attracted much attention in recent years. It is usually formulated as a classification problem that predicts relations for all entity pairs in the document. However, previous works indiscriminately represent intra- and inter-sentential relations in the same way, confounding the different patterns for predicting them. Besides, they create a document graph and use paths between entities on the graph as clues for logical reasoning. However, not all entity pairs can be connected with a path and have the correct logical reasoning paths in their graph. Thus many cases of logical reasoning cannot be covered. This paper proposes an effective architecture, SIRE, to represent intra- and inter-sentential relations in different ways. We design a new and straightforward form of logical reasoning module that can cover more logical reasoning chains. Experiments on the public datasets show SIRE outperforms the previous state-of-the-art methods. Further analysis shows that our predictions are reliable and explainable. Our code is available at https://github.com/DreamInvoker/SIRE.", "phrases": ["reasoning", "document-level relation extraction", "entity pair", "sire"], "overall_score": 2.5975237501389574, "scores": [3.0490767976783286, 0.9477293800875289, 0.7862984634255806, 0.5563479566584363], "rank_score": 1.3348631494624688} -{"id": "kurata-etal-2016-improved", "title": "Improved Neural Network-based Multi-label Classification with Better Initialization Leveraging Label Co-occurrence", "abstract": "In a multi-label text classification task, in which multiple labels can be assigned to one text, label co-occurrence itself is informative. We propose a novel neural network initialization method to treat some of the neurons in the final hidden layer as dedicated neurons for each pattern of label co-occurrence. These dedicated neurons are initialized to connect to the corresponding co-occurring labels with stronger weights than to others. In experiments with a natural language query classification task, which requires multi-label classification, our initialization method improved classification accuracy without any computational overhead in training and evaluation.", "phrases": ["multi-label classification", "initialization method", "neuron"], "overall_score": 3.3165309742106266, "scores": [2.013481369265883, 1.1189329594970048, 0.8715964148522608], "rank_score": 1.3346702478717163} -{"id": "lan-etal-2017-continuously", "title": "A Continuously Growing Dataset of Sentential Paraphrases", "abstract": "A major challenge in paraphrase research is the lack of parallel corpora. In this paper, we present a new method to collect large-scale sentential paraphrases from Twitter by linking tweets through shared URLs. The main advantage of our method is its simplicity, as it gets rid of the classifier or human in the loop needed to select data before annotation and subsequent application of paraphrase identification algorithms in the previous work. We present the largest human-labeled paraphrase corpus to date of 51,524 sentence pairs and the first cross-domain benchmarking for automatic paraphrase identification. In addition, we show that more than 30,000 new sentential paraphrases can be easily and continuously captured every month at ~70% precision, and demonstrate their utility for downstream NLP tasks through phrasal paraphrase extraction. We make our code and data freely available.", "phrases": ["sentential paraphrase", "twitter", "url", "sentence pair"], "overall_score": 3.3155717485733893, "scores": [2.0079980044556254, 1.3744357922115173, 1.0602231534794757, 0.8944799581654211], "rank_score": 1.33428422707801} -{"id": "klementiev-roth-2006-named", "title": "Named Entity Transliteration and Discovery from Multilingual Comparable Corpora", "abstract": "Named Entity recognition (NER) is an important part of many natural language processing tasks. Most current approaches employ machine learning techniques and require supervised data. However, many languages lack such resources. This paper presents an algorithm to automatically discover Named Entities (NEs) in a resource free language, given a bilingual corpora in which it is weakly temporally aligned with a resource rich language. We observe that NEs have similar time distributions across such corpora, and that they are often transliterated, and develop an algorithm that exploits both iteratively. The algorithm makes use of a new, frequency based, metric for time distributions and a resource free discriminative approach to transliteration. We evaluate the algorithm on an English-Russian corpus, and show high level of NEs discovery in Russian.", "phrases": ["transliteration", "discovery", "comparable corpora", "lexicon induction"], "overall_score": 4.4459046997728, "scores": [1.652282965607831, 1.6372116062913955, 1.175610243033162, 0.8717881473054414], "rank_score": 1.3342232405594574} -{"id": "simard-foster-2013-pepr", "title": "PEPr: Post-Edit Propagation Using Phrase-based Statistical Machine Translation", "abstract": "Translators who work by post-editing machine translation output often find themselves repeatedly correcting the same errors. We propose a method for Post-edit Propagation (PEPr), which learns posteditor corrections and applies them on-thefly to further MT output. Our proposal is based on a phrase-based SMT system, used in an automatic post-editing (APE) setting with online learning. Simulated experiments on a variety of data sets show that for documents with high levels of internal repetition, the proposed mechanism could substantially reduce the post-editing effort.", "phrases": ["post-edit propagation", "translator", "correction", "pepr"], "overall_score": 2.1467747184552106, "scores": [2.011116276115618, 1.8923355330392397, 0.8703803412907799, 0.5616323663767531], "rank_score": 1.3338661292055976} -{"id": "kirchhoff-etal-2007-semi", "title": "Semi-automatic error analysis for large-scale statistical machine translation", "abstract": "This paper presents a general framework for semi-automatic error analysis in large-scale statistical machine translation (SMT) systems. The main objective is to relate characteristics of input documents (which can be either in text or audio form) to the system's overall translation performance and thus identify particularly problematic input characteristics (e.g. source, genre, dialect, etc.). Various measurements of these factors are extracted from the input, either automatically or by human annotation, and are related to translation performance scores by means of mutual information. We apply this analysis to a state-of-the-art large-scale SMT system operating on Chinese and Arabic text and audio documents, and demonstrate how the proposed error analysis can help identify system weaknesses.", "phrases": ["error analysis", "statistical machine translation", "characteristic", "input document"], "overall_score": 2.3898194107776436, "scores": [2.3859963028644366, 0.9510159510919168, 1.1026423719595069, 0.8954798088579122], "rank_score": 1.3337836086934431} -{"id": "matsoukas-etal-2009-discriminative", "title": "Discriminative Corpus Weight Estimation for Machine Translation", "abstract": "Current statistical machine translation (SMT) systems are trained on sentence-aligned and word-aligned parallel text collected from various sources. Translation model parameters are estimated from the word alignments, and the quality of the translations on a given test set depends on the parameter estimates. There are at least two factors affecting the parameter estimation: domain match and training data quality. This paper describes a novel approach for automatically detecting and down-weighing certain parts of the training corpus by assigning a weight to each sentence in the training bitext so as to optimize a discriminative objective function on a designated tuning set. This way, the proposed method can limit the negative effects of low quality training data, and can adapt the translation model to the domain of interest. It is shown that such discriminative corpus weights can provide significant improvements in Arabic-English translation on various conditions, using a state-of-the-art SMT system.", "phrases": ["weight", "machine translation", "meta-information", "development set"], "overall_score": 4.180554555238743, "scores": [2.01645898474096, 1.9383702841146124, 0.8456077618405454, 0.5327631187417695], "rank_score": 1.333300037359472} -{"id": "levy-etal-2015-supervised", "title": "Do Supervised Distributional Methods Really Learn Lexical Inference Relations?", "abstract": "Distributional representations of words have been recently used in supervised settings for recognizing lexical inference relations between word pairs, such as hypernymy and entailment. We investigate a collection of these state-of-the-art methods, and show that they do not actually learn a relation between two words. Instead, they learn an independent property of a single word in the pair: whether that word is a \u201cprototypical hypernym\u201d.", "phrases": ["distributional method", "entailment", "hypernym", "lexical memorization", "word embedding"], "overall_score": 4.95129446988598, "scores": [1.9057294331206807, 1.7460998551575606, 1.4933695179900137, 0.9502173288043168, 0.571068830008658], "rank_score": 1.333296993016246} -{"id": "lapata-2003-probabilistic", "title": "Probabilistic Text Structuring: Experiments with Sentence Ordering", "abstract": "Ordering information is a critical task for natural language generation applications. In this paper we propose an approach to information ordering that is particularly suited for text-to-text generation. We describe a model that learns constraints on sentence order from a corpus of domain-specific texts and an algorithm that yields the most likely order among several alternatives. We evaluate the automatically generated orderings against authored texts from our corpus and against human subjects that are asked to mimic the model's task. We also assess the appropriateness of such a model for multidocument summarization.", "phrases": ["sentence ordering", "text-to-text generation", "probabilistic model", "source document"], "overall_score": 4.6208088770014255, "scores": [2.2352489520813243, 1.6446643566486476, 0.8896567213812951, 0.563564411285259], "rank_score": 1.3332836103491315} -{"id": "kepler-etal-2019-openkiwi", "title": "OpenKiwi: An Open Source Framework for Quality Estimation", "abstract": "We introduce OpenKiwi, a Pytorch-based open source framework for translation quality estimation. OpenKiwi supports training and testing of word-level and sentence-level quality estimation systems, implementing the winning systems of the WMT 2015\u201318 quality estimation campaigns. We benchmark OpenKiwi on two datasets from WMT 2018 (English-German SMT and NMT), yielding state-of-the-art performance on the word-level tasks and near state-of-the-art in the sentence-level tasks.", "phrases": ["translator", "openkiwi", "machine-translated sentence"], "overall_score": 3.31187896588275, "scores": [2.929774536933106, 0.5447922921126466, 0.523827596956491], "rank_score": 1.3327981420007478} -{"id": "devlin-etal-2015-language", "title": "Language Models for Image Captioning: The Quirks and What Works", "abstract": "Two recent approaches have achieved state-of-the-art results in image captioning. The first uses a pipelined process where a set of candidate words is generated by a convolutional neural network (CNN) trained on images, and then a maximum entropy (ME) language model is used to arrange these words into a coherent sentence. The second uses the penultimate activation layer of the CNN as input to a recurrent neural network (RNN) that then generates the caption sequence. In this paper, we compare the merits of these different language modeling approaches for the first time by using the same state-ofthe-art CNN as input. We examine issues in the different approaches, including linguistic irregularities, caption repetition, and data set overlap. By combining key aspects of the ME and RNN methods, we achieve a new record performance over previously published results on the benchmark COCO dataset. However, the gains we see in BLEU do not translate to human judgments.", "phrases": ["image captioning", "cnn", "language model", "dense vector", "output string"], "overall_score": 3.5171277038155027, "scores": [3.249694001067121, 1.1795873701196478, 0.8530294710314716, 0.8283870051005234, 0.5529082523794487], "rank_score": 1.3327212199396425} -{"id": "niu-etal-2020-evaluating", "title": "Evaluating Robustness to Input Perturbations for Neural Machine Translation", "abstract": "Neural Machine Translation (NMT) models are sensitive to small perturbations in the input. Robustness to such perturbations is typically measured using translation quality metrics such as BLEU on the noisy input. This paper proposes additional metrics which measure the relative degradation and changes in translation when small perturbations are added to the input. We focus on a class of models employing subword regularization to address robustness and perform extensive evaluations of these models using the robustness measures proposed. Results show that our proposed metrics reveal a clear trend of improved robustness to perturbations when subword regularization methods are used.", "phrases": ["robustness", "neural machine translation", "small perturbation", "nlp model"], "overall_score": 2.9270166527767256, "scores": [2.5441594054373264, 1.7038621386683732, 0.5435845781282687, 0.5369646264607524], "rank_score": 1.3321426871736803} -{"id": "xu-etal-2016-improved", "title": "Improved relation classification by deep recurrent neural networks with data augmentation", "abstract": "Nowadays, neural networks play an important role in the task of relation classification. By designing different neural architectures, researchers have improved the performance to a large extent in comparison with traditional methods. However, existing neural networks for relation classification are usually of shallow architectures (e.g., one-layer convolutional neural networks or recurrent networks). They may fail to explore the potential representation space in different abstraction levels. In this paper, we propose deep recurrent neural networks (DRNNs) for relation classification to tackle this challenge. Further, we propose a data augmentation method by leveraging the directionality of relations. We evaluated our DRNNs on the SemEval-2010 Task 8, and achieve an F1-score of 86.1%, outperforming previous state-of-the-art recorded results.", "phrases": ["relation classification", "recurrent neural network", "data augmentation"], "overall_score": 3.773675911336234, "scores": [1.8243710931191848, 1.3056147125529378, 0.8658402614890496], "rank_score": 1.3319420223870573} -{"id": "hou-etal-2014-rule", "title": "A Rule-Based System for Unrestricted Bridging Resolution: Recognizing Bridging Anaphora and Finding Links to Antecedents", "abstract": "Bridging resolution plays an important role in establishing (local) entity coherence. This paper proposes a rule-based approach for the challenging task of unrestricted bridging resolution, where bridging anaphors are not limited to definite NPs and semantic relations between anaphors and their antecedents are not restricted to meronymic relations. The system consists of eight rules which target different relations based on linguistic insights. Our rule-based system significantly outperforms a reimplementation of a previous rule-based system (Vieira and Poesio, 2000). Furthermore, it performs better than a learning-based approach which has access to the same knowledge resources as the rule-based system. Additionally, incorporating the rules and more features into the learning-based system yields a minor improvement over the rule-based system.", "phrases": ["rule-based system", "unrestricted bridging resolution", "anaphor"], "overall_score": 2.769069884510258, "scores": [2.206411480912102, 0.8860992173772406, 0.9024126919685814], "rank_score": 1.3316411300859747} -{"id": "rambow-etal-2004-summarizing", "title": "Summarizing Email Threads", "abstract": "Summarizing threads of email is different from summarizing other types of written communication as it has an inherent dialog structure. We present initial research which shows that sentence extraction techniques can work for email threads as well, but profit from email-specific features. In addition, the presentation of the summary should take into account the dialogic structure of email communication.", "phrases": ["email thread", "sentence extraction technique", "summarization", "discussion issue", "recipient"], "overall_score": 3.6059978523374725, "scores": [3.609953558791461, 1.3403527975148997, 0.6158006852562568, 0.5625621144440917, 0.5292536750947733], "rank_score": 1.3315845662202963} -{"id": "gehrmann-etal-2019-gltr", "title": "GLTR: Statistical Detection and Visualization of Generated Text", "abstract": "The rapid improvement of language models has raised the specter of abuse of text generation systems. This progress motivates the development of simple methods for detecting generated text that can be used by non-experts. In this work, we introduce GLTR, a tool to support humans in detecting whether a text was generated by a model. GLTR applies a suite of baseline statistical methods that can detect generation artifacts across multiple sampling schemes. In a human-subjects study, we show that the annotation scheme provided by GLTR improves the human detection-rate of fake text from 54% to 72% without any prior training. GLTR is open-source and publicly deployed, and has already been widely used to detect generated outputs.", "phrases": ["statistical method", "generation artifact", "gltr", "synthetic text"], "overall_score": 3.8479996031141726, "scores": [3.2865025870932554, 0.8838705004931824, 0.5791861536635788, 0.5757067458699844], "rank_score": 1.3313164967800002} -{"id": "wang-etal-2018-tree", "title": "A Tree-based Decoder for Neural Machine Translation", "abstract": "Recent advances in Neural Machine Translation (NMT) show that adding syntactic information to NMT systems can improve the quality of their translations. Most existing work utilizes some specific types of linguistically-inspired tree structures, like constituency and dependency parse trees. This is often done via a standard RNN decoder that operates on a linearized target tree structure. However, it is an open question of what specific linguistic formalism, if any, is the best structural representation for NMT. In this paper, we (1) propose an NMT model that can naturally generate the topology of an arbitrary tree structure on the target side, and (2) experiment with various target tree structures. Our experiments show the surprising result that our model delivers the best improvements with balanced binary trees constructed without any linguistic knowledge; this model outperforms standard seq2seq models by up to 2.1 BLEU points, and other methods for incorporating target-side syntax by up to 0.7 BLEU.", "phrases": ["neural machine translation", "tree structure", "target sentence"], "overall_score": 2.9250651215041676, "scores": [2.5677898135689894, 0.869794273853633, 0.5561794336240125], "rank_score": 1.3312545070155448} -{"id": "stahlberg-etal-2016-syntactically", "title": "Syntactically Guided Neural Machine Translation", "abstract": "We investigate the use of hierarchical phrase-based SMT lattices in end-to-end neural machine translation (NMT). Weight pushing transforms the Hiero scores for complete translation hypotheses, with the full translation grammar score and full n-gram language model score, into posteriors compatible with NMT predictive probabilities. With a slightly modified NMT beam-search decoder we find gains over both Hiero and NMT decoding alone, with practical advantages in extending NMT to very large input and output vocabularies.", "phrases": ["neural machine translation", "lattice", "hiero", "research track nagoya"], "overall_score": 3.6897545946798322, "scores": [3.05989299580295, 0.8958494569788836, 0.8310003420110368, 0.5364478610489913], "rank_score": 1.3307976639604653} -{"id": "pruksachatkun-etal-2020-intermediate", "title": "Intermediate-Task Transfer Learning with Pretrained Language Models: When and Why Does It Work?", "abstract": "While pretrained models such as BERT have shown large gains across natural language understanding tasks, their performance can be improved by further training the model on a data-rich intermediate task, before fine-tuning it on a target task. However, it is still poorly understood when and why intermediate-task training is beneficial for a given target task. To investigate this, we perform a large-scale study on the pretrained RoBERTa model with 110 intermediate-target task combinations. We further evaluate all trained models with 25 probing tasks meant to reveal the specific skills that drive transfer. We observe that intermediate tasks requiring high-level inference and reasoning abilities tend to work best. We also observe that target task performance is strongly correlated with higher-level abilities such as coreference resolution. However, we fail to observe more granular correlations between probing and target task performance, highlighting the need for further work on broad-coverage probing benchmarks. We also observe evidence that the forgetting of knowledge learned during pretraining may limit our analysis, highlighting the need for further work on transfer learning methods in these settings.", "phrases": ["language model", "intermediate task", "pre-training"], "overall_score": 4.7677802793719675, "scores": [1.9535771414912377, 1.159115395166615, 0.8787307218106019], "rank_score": 1.3304744194894849} -{"id": "kochkina-etal-2018-one", "title": "All-in-one: Multi-task Learning for Rumour Verification", "abstract": "Automatic resolution of rumours is a challenging task that can be broken down into smaller components that make up a pipeline, including rumour detection, rumour tracking and stance classification, leading to the final outcome of determining the veracity of a rumour. In previous work, these steps in the process of rumour verification have been developed as separate components where the output of one feeds into the next. We propose a multi-task learning approach that allows joint training of the main and auxiliary tasks, improving the performance of rumour verification. We examine the connection between the dataset properties and the outcomes of the multi-task learning models used.", "phrases": ["multi-task learning", "rumour verification", "stance classification", "target claim", "story"], "overall_score": 4.171500142815682, "scores": [2.496672018857555, 1.8423480999572972, 1.2442275090269994, 0.546107879017564, 0.52270610694192], "rank_score": 1.3304123227602669} -{"id": "yin-etal-2017-document", "title": "Document-Level Multi-Aspect Sentiment Classification as Machine Comprehension", "abstract": "Document-level multi-aspect sentiment classification is an important task for customer relation management. In this paper, we model the task as a machine comprehension problem where pseudo question-answer pairs are constructed by a small number of aspect-related keywords and aspect ratings. A hierarchical iterative attention model is introduced to build aspectspecific representations by frequent and repeated interactions between documents and aspect questions. We adopt a hierarchical architecture to represent both word level and sentence level information, and use the attention operations for aspect questions and documents alternatively with the multiple hop mechanism. Experimental results on the TripAdvisor and BeerAdvocate datasets show that our model outperforms classical baselines. We will release our code and data for the method replicability.", "phrases": ["multi-aspect sentiment classification", "machine comprehension problem", "pseudo question-answer pair", "rating"], "overall_score": 2.7664664536104135, "scores": [3.1124482326388483, 1.14904813729848, 0.5318301068059045, 0.5282301011359964], "rank_score": 1.3303891444698073} -{"id": "graehl-knight-2004-training", "title": "Training Tree Transducers", "abstract": "Many probabilistic models for natural language are now written in terms of hierarchical tree structure. Tree-based modeling still lacks many of the standard tools taken for granted in (\ufb01nite-state) string-based modeling. The theory of tree transducer automata provides a possible framework to draw on, as it has been worked out in an extensive literature. We motivate the use of tree transducers for natural language and address the training problem for probabilistic tree-to-tree and tree-to-string transducers", "phrases": ["tree transducer", "training problem", "machine translation", "mapping"], "overall_score": 4.567783203031404, "scores": [2.251832786605758, 1.3802080568127293, 1.0912508923950361, 0.5973841211790724], "rank_score": 1.330168964248149} -{"id": "song-etal-2017-amr", "title": "AMR-to-text Generation with Synchronous Node Replacement Grammar", "abstract": "This paper addresses the task of AMR-to-text generation by leveraging synchronous node replacement grammar. During training, graph-to-string rules are learned using a heuristic extraction algorithm. At test time, a graph transducer is applied to collapse input AMRs and generate output sentences. Evaluated on a standard benchmark, our method gives the state-of-the-art result.", "phrases": ["node replacement grammar", "input amr", "output sentence", "amr-to-text generation"], "overall_score": 3.4110459738659644, "scores": [3.230191992864183, 0.9931675116953815, 0.5500681909839197, 0.5460472702184711], "rank_score": 1.3298687414404888} -{"id": "lin-etal-2015-modeling", "title": "Modeling Relation Paths for Representation Learning of Knowledge Bases", "abstract": "Representation learning of knowledge bases aims to embed both entities and relations into a low-dimensional space. Most existing methods only consider direct relations in representation learning. We argue that multiple-step relation paths also contain rich inference patterns between entities, and propose a path-based representation learning model. This model considers relation paths as translations between entities for representation learning, and addresses two key challenges: (1) Since not all relation paths are reliable, we design a path-constraint resource allocation algorithm to measure the reliability of relation paths. (2) We represent relation paths via semantic composition of relation embeddings. Experimental results on real-world datasets show that, as compared with baselines, our model achieves significant and consistent improvements on knowledge base completion and relation extraction from text. The source code of this paper can be obtained from https://github.com/mrlyk423/ relation_extraction.", "phrases": ["relation path", "representation learning", "knowledge graph"], "overall_score": 4.11027067383322, "scores": [1.8022176130302423, 1.5726057158143047, 0.6143849099820141], "rank_score": 1.3297360796088538} -{"id": "dolan-brockett-2005-automatically", "title": "Automatically Constructing a Corpus of Sentential Paraphrases", "abstract": "An obstacle to research in automatic paraphrase identification and generation is the lack of large-scale, publiclyavailable labeled corpora of sentential paraphrases. This paper describes the creation of the recently-released Microsoft Research Paraphrase Corpus, which contains 5801 sentence pairs, each hand-labeled with a binary judgment as to whether the pair constitutes a paraphrase. The corpus was created using heuristic extraction techniques in conjunction with an SVM-based classifier to select likely sentence-level paraphrases from a large corpus of topicclustered news data. These pairs were then submitted to human judges, who confirmed that 67% were in fact semantically equivalent. In addition to describing the corpus itself, we explore a number of issues that arose in defining guidelines for the human raters.", "phrases": ["sentential paraphrase", "sentence pair", "heuristic extraction technique"], "overall_score": 4.332202757106561, "scores": [2.0096865334985603, 1.442732593097394, 0.536599651592868], "rank_score": 1.3296729260629407} -{"id": "keller-2010-cognitively", "title": "Cognitively Plausible Models of Human Language Processing", "abstract": "We pose the development of cognitively plausible models of human language processing as a challenge for computational linguistics. Existing models can only deal with isolated phenomena (e.g., garden paths) on small, specifically selected data sets. The challenge is to build models that integrate multiple aspects of human language processing at the syntactic, semantic, and discourse level. Like human language processing, these models should be incremental, predictive, broad coverage, and robust to noise. This challenge can only be met if standardized data sets and evaluation measures are developed.", "phrases": ["plausible model", "human language processing", "incrementality", "coverage"], "overall_score": 3.767053701346304, "scores": [3.695548769339074, 0.5744867613925386, 0.5265028123340373, 0.5218803482047224], "rank_score": 1.329604672817593} -{"id": "mcdonald-etal-2011-multi", "title": "Multi-Source Transfer of Delexicalized Dependency Parsers", "abstract": "We present a simple method for transferring dependency parsers from source languages with labeled training data to target languages without labeled training data. We first demonstrate that delexicalized parsers can be directly transferred between languages, producing significantly higher accuracies than unsupervised parsers. We then use a constraint driven learning algorithm where constraints are drawn from parallel corpora to project the final parser. Unlike previous work on projecting syntactic resources, we show that simple methods for introducing multiple source languages can significantly improve the overall quality of the resulting parsers. The projected parsers from our system result in state-of-the-art performance when compared to previously studied unsupervised and projected parsing systems across eight different languages.", "phrases": ["source language", "unsupervised parser", "multi-source transfer", "part-of-speech tag", "cross-lingual dependency"], "overall_score": 5.873211945118717, "scores": [1.7333645551933847, 2.3258119188810427, 1.1252918016824032, 0.8511384704225412, 0.6100413620099424], "rank_score": 1.329129621637863} -{"id": "lau-etal-2014-learning", "title": "Learning Word Sense Distributions, Detecting Unattested Senses and Identifying Novel Senses Using Topic Models", "abstract": "Unsupervised word sense disambiguation (WSD) methods are an attractive approach to all-words WSD due to their non-reliance on expensive annotated data. Unsupervised estimates of sense frequency have been shown to be very useful for WSD due to the skewed nature of word sense distributions. This paper presents a fully unsupervised topic modelling-based approach to sense frequency estimation, which is highly portable to different corpora and sense inventories, in being applicable to any part of speech, and not requiring a hierarchical sense inventory, parsing or parallel text. We demonstrate the effectiveness of the method over the tasks of predominant sense learning and sense distribution acquisition, and also the novel tasks of detecting senses which aren\u2019t attested in the corpus, and identifying novel senses in the corpus which aren\u2019t captured in the sense inventory.", "phrases": ["novel sense", "estimation", "unsupervised topic"], "overall_score": 2.7637165910971557, "scores": [2.846139471735672, 0.6053324673368632, 0.5357282813258828], "rank_score": 1.329066740132806} -{"id": "boltuzic-snajder-2014-back", "title": "Back up your Stance: Recognizing Arguments in Online Discussions", "abstract": "In online discussions, users often back up their stance with arguments. Their arguments are often vague, implicit, and poorly worded, yet they provide valuable insights into reasons underpinning users\u2019 opinions. In this paper, we make a first step towards argument-based opinion mining from online discussions and introduce a new task of argument recognition. We match usercreated comments to a set of predefined topic-based arguments, which can be either attacked or supported in the comment. We present a manually-annotated corpus for argument recognition in online discussions. We describe a supervised model based on comment-argument similarity and entailment features. Depending on problem formulation, model performance ranges from 70.5% to 81.8% F1-score, and decreases only marginally when applied to an unseen topic.", "phrases": ["stance", "online discussion", "argument mining", "textual entailment"], "overall_score": 3.9129249984001673, "scores": [2.198371953009527, 1.97978177132359, 0.6026874530718844, 0.5348403851423078], "rank_score": 1.3289203906368274} -{"id": "kaji-kitsuregawa-2014-accurate", "title": "Accurate Word Segmentation and POS Tagging for Japanese Microblogs: Corpus Annotation and Joint Modeling with Lexical Normalization", "abstract": "Microblogs have recently received widespread interest from NLP researchers. However, current tools for Japanese word segmentation and POS tagging still perform poorly on microblog texts. We developed an annotated corpus and proposed a joint model for overcoming this situation. Our annotated corpus of microblog texts enables not only training of accurate statistical models but also quantitative evaluation of their performance. Our joint model with lexical normalization handles the orthographic diversity of microblog texts. We conducted an experiment to demonstrate that the corpus and model substantially contribute to boosting accuracy.", "phrases": ["pos tagging", "microblog", "lexical normalization"], "overall_score": 2.7627672747237866, "scores": [2.2892337560482163, 0.8533693503677289, 0.8432275399583783], "rank_score": 1.3286102154581079} -{"id": "li-etal-2014-constructing", "title": "Constructing Information Networks Using One Single Model", "abstract": "In this paper, we propose a new framework that unifies the output of three information extraction (IE) tasks - entity mentions, relations and events as an information network representation, and extracts all of them using one single joint model based on structured prediction. This novel formulation allows different parts of the information network fully interact with each other. For example, many relations can now be considered as the resultant states of events. Our approach achieves substantial improvements over traditional pipelined approaches, and significantly advances state-of-the-art end-toend event argument extraction.", "phrases": ["information network", "entity mention", "joint model"], "overall_score": 3.506195454604234, "scores": [2.5862211683415213, 0.8474832431218107, 0.5520318003931121], "rank_score": 1.3285787372854814} -{"id": "xue-etal-2015-conll", "title": "The CoNLL-2015 Shared Task on Shallow Discourse Parsing", "abstract": "The CoNLL-2015 Shared Task is on Shallow Discourse Parsing, a task focusing on identifying individual discourse relations that are present in a natural language text. A discourse relation can be expressed explicitly or implicitly, and takes two arguments realized as sentences, clauses, or in some rare cases, phrases. Sixteen teams from three continents participated in this task. For the first time in the history of the CoNLL shared tasks, participating teams, instead of running their systems on the test set and submitting the output, were asked to deploy their systems on a remote virtual machine and use a web-based evaluation platform to run their systems on the test set. This meant they were unable to actually see the data set, thus preserving its integrity and ensuring its replicability. In this paper, we present the task definition, the training and test sets, and the evaluation protocol and metric used during this shared task. We also summarize the different approaches adopted by the participating teams, and present the evaluation results. The evaluation data sets and the scorer will serve as a benchmark for future research on shallow discourse parsing.", "phrases": ["conll-2015 shared task", "shallow discourse parsing", "individual discourse relation", "clause"], "overall_score": 4.165050067333107, "scores": [3.3632407699519744, 0.8505047479860641, 0.5517361517453216, 0.547939157149664], "rank_score": 1.3283552067082562} -{"id": "shirani-etal-2020-semeval", "title": "SemEval-2020 Task 10: Emphasis Selection for Written Text in Visual Media", "abstract": "In this paper, we present the main findings and compare the results of SemEval-2020 Task 10, Emphasis Selection for Written Text in Visual Media. The goal of this shared task is to design automatic methods for emphasis selection, i.e. choosing candidates for emphasis in textual content to enable automated design assistance in authoring. The main focus is on short text instances for social media, with a variety of examples, from social media posts to inspirational quotes. Participants were asked to model emphasis using plain text with no additional context from the user or other design considerations. SemEval-2020 Emphasis Selection shared task attracted 197 participants in the early phase and a total of 31 teams made submissions to this task. The highest-ranked submission achieved 0.823 Matchm score. The analysis of systems submitted to the task indicates that BERT and RoBERTa were the most common choice of pre-trained models used, and part of speech tag (POS) was the most useful feature. Full results can be found on the task's website.", "phrases": ["emphasis selection", "visual media", "semeval-2020 task"], "overall_score": 2.918527434434517, "scores": [2.2603329025977548, 0.877917277388454, 0.8465870522271142], "rank_score": 1.328279077404441} -{"id": "fitzgerald-etal-2018-large", "title": "Large-Scale QA-SRL Parsing", "abstract": "We present a new large-scale corpus of Question-Answer driven Semantic Role Labeling (QA-SRL) annotations, and the first high-quality QA-SRL parser. Our corpus, QA-SRL Bank 2.0, consists of over 250,000 question-answer pairs for over 64,000 sentences across 3 domains and was gathered with a new crowd-sourcing scheme that we show has high precision and good recall at modest cost. We also present neural models for two QA-SRL subtasks: detecting argument spans for a predicate and generating questions to label the semantic relationship. The best models achieve question accuracy of 82.6% and span-level accuracy of 77.6% (under human evaluation) on the full pipelined QA-SRL prediction task. They can also, as we show, be used to gather additional annotations at low cost.", "phrases": ["qa-srl", "semantic role labeling", "scheme"], "overall_score": 3.7628397046122446, "scores": [2.9219840636585905, 0.5317948239739854, 0.5305730629605062], "rank_score": 1.3281173168643607} -{"id": "alabau-etal-2014-casmacat", "title": "CASMACAT: A Computer-assisted Translation Workbench", "abstract": "CASMACAT is a modular, web-based translation workbench that offers advanced functionalities for computer-aided translation and the scientific study of human translation: automatic interaction with machine translation (MT) engines and translation memories (TM) to obtain raw translations or close TM matches for conventional post-editing; interactive translation prediction based on an MT engine\u2019s search graph, detailed recording and replay of edit actions and translator\u2019s gaze (the latter via eye-tracking), and the support of e-pen as an alternative input device. The system is open source sofware and interfaces with multiple MT systems.", "phrases": ["translation workbench", "e-pen", "interface", "casmacat"], "overall_score": 3.2993582576102343, "scores": [2.0437822999318853, 1.8136647215020323, 0.8770330252323371, 0.5765577066299814], "rank_score": 1.327759438324059} -{"id": "lamproudis-etal-2021-developing", "title": "Developing a Clinical Language Model for Swedish: Continued Pretraining of Generic BERT with In-Domain Data", "abstract": "The use of pretrained language models, fine-tuned to perform a specific downstream task, has become widespread in NLP. Using a generic language model in specialized domains may, however, be sub-optimal due to differences in language use and vocabulary. In this paper, it is investigated whether an existing, generic language model for Swedish can be improved for the clinical domain through continued pretraining with clinical text. The generic and domain-specific language models are fine-tuned and evaluated on three representative clinical NLP tasks: (i) identifying protected health information, (ii) assigning ICD-10 diagnosis codes to discharge summaries, and (iii) sentence-level uncertainty prediction. The results show that continued pretraining on in-domain data leads to improved performance on all three downstream tasks, indicating that there is a potential added value of domain-specific language models for clinical NLP.", "phrases": ["swedish", "in-domain data", "clinical domain"], "overall_score": 1.8401585943164716, "scores": [1.7392578616114456, 1.6587379581982433, 0.58418569789468], "rank_score": 1.3273938392347897} -{"id": "lin-xu-2019-deep", "title": "Deep Unknown Intent Detection with Margin Loss", "abstract": "Identifying the unknown (novel) user intents that have never appeared in the training set is a challenging task in the dialogue system. In this paper, we present a two-stage method for detecting unknown intents. We use bidirectional long short-term memory (BiLSTM) network with the margin loss as the feature extractor. With margin loss, we can learn discriminative deep features by forcing the network to maximize inter-class variance and to minimize intra-class variance. Then, we feed the feature vectors to the density-based novelty detection algorithm, local outlier factor (LOF), to detect unknown intents. Experiments on two benchmark datasets show that our method can yield consistent improvements compared with the baseline methods.", "phrases": ["intent detection", "margin loss", "discriminative deep feature", "ood detection", "unknown class"], "overall_score": 4.101405841063494, "scores": [2.360918789101618, 2.2687586411710314, 0.8331309427040477, 0.6030731679650889, 0.5684593051495558], "rank_score": 1.3268681692182684} -{"id": "kumar-talukdar-2020-nile", "title": "NILE : Natural Language Inference with Faithful Natural Language Explanations", "abstract": "The recent growth in the popularity and success of deep learning models on NLP classification tasks has accompanied the need for generating some form of natural language explanation of the predicted labels. Such generated natural language (NL) explanations are expected to be faithful, i.e., they should correlate well with the model's internal decision making. In this work, we focus on the task of natural language inference (NLI) and address the following question: can we build NLI systems which produce labels with high accuracy, while also generating faithful explanations of its decisions? We propose Natural-language Inference over Label-specific Explanations (NILE), a novel NLI method which utilizes auto-generated label-specific NL explanations to produce labels along with its faithful explanation. We demonstrate NILE's effectiveness over previously reported methods through automated and human evaluation of the produced labels and explanations. Our evaluation of NILE also supports the claim that accurate systems capable of providing testable explanations of their decisions can be designed. We discuss the faithfulness of NILE's explanations in terms of sensitivity of the decisions to the corresponding explanations. We argue that explicit evaluation of faithfulness, in addition to label and explanation accuracy, is an important step in evaluating model's explanations. Further, we demonstrate that task-specific probes are necessary to establish such sensitivity.", "phrases": ["natural language inference", "decision making", "faithful explanation", "nile"], "overall_score": 2.758951824605693, "scores": [2.830563181147568, 0.9563029842994923, 0.9795834327962737, 0.5406518889702542], "rank_score": 1.326775371803397} -{"id": "zampieri-etal-2019-predicting", "title": "Predicting the Type and Target of Offensive Posts in Social Media", "abstract": "As offensive content has become pervasive in social media, there has been much research in identifying potentially offensive messages. However, previous work on this topic did not consider the problem as a whole, but rather focused on detecting very specific types of offensive content, e.g., hate speech, cyberbulling, or cyber-aggression. In contrast, here we target several different kinds of offensive content. In particular, we model the task hierarchically, identifying the type and the target of offensive messages in social media. For this purpose, we complied the Offensive Language Identification Dataset (OLID), a new dataset with tweets annotated for offensive content using a fine-grained three-layer annotation scheme, which we make publicly available. We discuss the main similarities and differences between OLID and pre-existing datasets for hate speech identification, aggression detection, and similar tasks. We further experiment with and we compare the performance of different machine learning models on OLID.", "phrases": ["offensive language", "social medium", "twitter post"], "overall_score": 5.744451627855785, "scores": [2.1957142634612627, 1.2302925289900253, 0.5533088405975839], "rank_score": 1.3264385443496238} -{"id": "mimno-etal-2011-optimizing", "title": "Optimizing Semantic Coherence in Topic Models", "abstract": "Latent variable models have the potential to add value to large document collections by discovering interpretable, low-dimensional subspaces. In order for people to use such models, however, they must trust them. Unfortunately, typical dimensionality reduction methods for text, such as latent Dirichlet allocation, often produce low-dimensional subspaces (topics) that are obviously flawed to human domain experts. The contributions of this paper are threefold: (1) An analysis of the ways in which topics can be flawed; (2) an automated evaluation metric for identifying such topics that does not rely on human annotators or reference collections outside the training data; (3) a novel statistical topic model based on this metric that significantly improves topic quality in a large-scale document collection from the National Institutes of Health (NIH).", "phrases": ["semantic coherence", "topic model", "co-document frequency", "pmi"], "overall_score": 4.321131525264486, "scores": [1.8649471256345949, 1.6548970589374126, 0.9243925127454217, 0.860862737076305], "rank_score": 1.3262748585984334} -{"id": "dziri-etal-2019-evaluating", "title": "Evaluating Coherence in Dialogue Systems using Entailment", "abstract": "Evaluating open-domain dialogue systems is difficult due to the diversity of possible correct answers. Automatic metrics such as BLEU correlate weakly with human annotations, resulting in a significant bias across different models and datasets. Some researchers resort to human judgment experimentation for assessing response quality, which is expensive, time consuming, and not scalable. Moreover, judges tend to evaluate a small number of dialogues, meaning that minor differences in evaluation configuration may lead to dissimilar results. In this paper, we present interpretable metrics for evaluating topic coherence by making use of distributed sentence representations. Furthermore, we introduce calculable approximations of human judgment based on conversational coherence by adopting state-of-the-art entailment techniques. Results show that our metrics can be used as a surrogate for human judgment, making it easy to evaluate dialogue systems on large-scale datasets and allowing an unbiased estimate for the quality of the responses.", "phrases": ["coherence", "dialogue system", "entailment"], "overall_score": 3.1801875716442645, "scores": [2.207617211786489, 0.8609501601405639, 0.9101563126143696], "rank_score": 1.3262412281804743} -{"id": "hu-etal-2019-texar", "title": "Texar: A Modularized, Versatile, and Extensible Toolkit for Text Generation", "abstract": "We introduce Texar, an open-source toolkit aiming to support the broad set of text generation tasks that transform any inputs into natural language, such as machine translation, summarization, dialog, content manipulation, and so forth. With the design goals of modularity, versatility, and extensibility in mind, Texar extracts common patterns underlying the diverse tasks and methodologies, creates a library of highly reusable modules and functionalities, and allows arbitrary model architectures and algorithmic paradigms. In Texar, model architecture, inference, and learning processes are properly decomposed. Modules at a high concept level can be freely assembled or plugged in/swapped out. Texar is thus particularly suitable for researchers and practitioners to do fast prototyping and experimentation. The versatile toolkit also fosters technique sharing across different text generation tasks. Texar supports both TensorFlow and PyTorch, and is released under Apache License 2.0 at .", "phrases": ["versatility", "text generation task", "texar"], "overall_score": 2.3754121386613654, "scores": [2.7969631318034147, 0.6147630642683787, 0.5655020750054056], "rank_score": 1.325742757025733} -{"id": "kurita-sogaard-2019-multi", "title": "Multi-Task Semantic Dependency Parsing with Policy Gradient for Learning Easy-First Strategies", "abstract": "In Semantic Dependency Parsing (SDP), semantic relations form directed acyclic graphs, rather than trees. We propose a new iterative predicate selection (IPS) algorithm for SDP. Our IPS algorithm combines the graph-based and transition-based parsing approaches in order to handle multiple semantic head words. We train the IPS model using a combination of multi-task learning and task-specific policy gradient training. Trained this way, IPS achieves a new state of the art on the SemEval 2015 Task 18 datasets. Furthermore, we observe that policy gradient training learns an easy-first strategy.", "phrases": ["semantic dependency parsing", "policy gradient", "easy-first strategy"], "overall_score": 2.9124279752617297, "scores": [2.342418452372299, 0.8033429219631106, 0.8307479073774947], "rank_score": 1.3255030939043013} -{"id": "cao-etal-2019-multi", "title": "Multi-Channel Graph Neural Network for Entity Alignment", "abstract": "Entity alignment typically suffers from the issues of structural heterogeneity and limited seed alignments. In this paper, we propose a novel Multi-channel Graph Neural Network model (MuGNN) to learn alignment-oriented knowledge graph (KG) embeddings by robustly encoding two KGs via multiple channels. Each channel encodes KGs via different relation weighting schemes with respect to self-attention towards KG completion and cross-KG attention for pruning exclusive entities respectively, which are further combined via pooling techniques. Moreover, we also infer and transfer rule knowledge for completing two KGs consistently. MuGNN is expected to reconcile the structural differences of two KGs, and thus make better use of seed alignments. Extensive experiments on five publicly available datasets demonstrate our superior performance (5% Hits@1 up on average). Source code and data used in the experiments can be accessed at .", "phrases": ["graph neural network", "entity alignment", "mugnn"], "overall_score": 2.9114706773405854, "scores": [2.2460973127881907, 0.8998506598029128, 0.8292542539427104], "rank_score": 1.3250674088446044} -{"id": "yang-eisenstein-2017-overcoming", "title": "Overcoming Language Variation in Sentiment Analysis with Social Attention", "abstract": "Variation in language is ubiquitous, particularly in newer forms of writing such as social media. Fortunately, variation is not random; it is often linked to social properties of the author. In this paper, we show how to exploit social networks to make sentiment analysis more robust to social language variation. The key idea is linguistic homophily: the tendency of socially linked individuals to use language in similar ways. We formalize this idea in a novel attention-based neural network architecture, in which attention is divided among several basis models, depending on the author's position in the social network. This has the effect of smoothing the classification function across the social network, and makes it possible to induce personalized classifiers even for authors for whom there is no labeled data or demographic metadata. This model significantly improves the accuracies of sentiment analysis on Twitter and on review data.", "phrases": ["sentiment analysis", "text classification", "social medium"], "overall_score": 3.395656713485953, "scores": [2.8737865418697077, 0.550206265017027, 0.5476139271111781], "rank_score": 1.3238689113326376} -{"id": "schwenk-etal-2021-wikimatrix", "title": "WikiMatrix: Mining 135M Parallel Sentences in 1620 Language Pairs from Wikipedia", "abstract": "We present an approach based on multilingual sentence embeddings to automatically extract parallel sentences from the content of Wikipedia articles in 96 languages, including several dialects or low-resource languages. We do not limit the extraction process to alignments with English, but we systematically consider all possible language pairs. In total, we are able to extract 135M parallel sentences for 16720 different language pairs, out of which only 34M are aligned with English. This corpus is freely available. To get an indication on the quality of the extracted bitexts, we train neural MT baseline systems on the mined data only for 1886 languages pairs, and evaluate them on the TED corpus, achieving strong BLEU scores for many language pairs. The WikiMatrix bitexts seem to be particularly interesting to train MT systems between distant languages without the need to pivot through English.", "phrases": ["mining", "parallel sentence", "wikipedia", "low-resource language", "parallel data"], "overall_score": 3.9657477068765674, "scores": [2.877495315646972, 0.830063222034348, 1.8203935850269992, 0.5698483084389231, 0.521195101073329], "rank_score": 1.3237991064441141} -{"id": "li-etal-2019-deep", "title": "Deep Reinforcement Learning with Distributional Semantic Rewards for Abstractive Summarization", "abstract": "Deep reinforcement learning (RL) has been a commonly-used strategy for the abstractive summarization task to address both the exposure bias and non-differentiable task issues. However, the conventional reward Rouge-L simply looks for exact n-grams matches between candidates and annotated references, which inevitably makes the generated sentences repetitive and incoherent. In this paper, instead of Rouge-L, we explore the practicability of utilizing the distributional semantics to measure the matching degrees. With distributional semantics, sentence-level evaluation can be obtained, and semantically-correct phrases can also be generated without being limited to the surface form of the reference sentences. Human judgments on Gigaword and CNN/Daily Mail datasets show that our proposed distributional semantics reward (DSR) has distinct superiority in capturing the lexical and compositional diversity of natural language.", "phrases": ["reward", "abstractive summarization", "deep reinforcement learning"], "overall_score": 3.174098398300489, "scores": [2.0097101461452986, 0.8531149545392257, 1.108280436292844], "rank_score": 1.3237018456591227} -{"id": "rai-etal-2016-supervised", "title": "Supervised Metaphor Detection using Conditional Random Fields", "abstract": "In this paper, we propose a novel approach for supervised classification of linguistic metaphors in an open domain text using Conditional Random Fields (CRF). We analyze CRF based classification model for metaphor detection using syntactic, conceptual, affective, and word embeddings based features which are extracted from MRC Psycholinguistic Database (MRCPD) and WordNet-Affect. We use word embeddings given by Huang et al. to capture information such as coherence and analogy between words. To tackle the bottleneck of limited coverage of psychological features in MRCPD, we employ synonymy relations from WordNet \u00ae . A comparison of our approach with previous approaches shows the efficacy of CRF classifier in detecting metaphors. The experiments conducted on VU Amsterdam metaphor corpus provides an accuracy of more than 92% and Fmeasure of approximately 78%. Results shows that inclusion of conceptual features improves the recall by 5% whereas affective features do not have any major impact on metaphor detection in open text.", "phrases": ["metaphor detection", "conditional random fields", "open domain text"], "overall_score": 2.907076910843877, "scores": [2.3739034633998974, 1.0323717382844044, 0.5629279569225969], "rank_score": 1.323067719535633} -{"id": "hao-etal-2019-multi", "title": "Multi-Granularity Self-Attention for Neural Machine Translation", "abstract": "Current state-of-the-art neural machine translation (NMT) uses a deep multi-head self-attention network with no explicit phrase information. However, prior work on statistical machine translation has shown that extending the basic translation unit from words to phrases has produced substantial improvements, suggesting the possibility of improving NMT performance from explicit modeling of phrases. In this work, we present multi-granularity self-attention (Mg-Sa): a neural network that combines multi-head self-attention and phrase modeling. Specifically, we train several attention heads to attend to phrases in either n-gram or syntactic formalisms. Moreover, we exploit interactions among phrases to enhance the strength of structure modeling \u2013 a commonly-cited weakness of self-attention. Experimental results on WMT14 English-to-German and NIST Chinese-to-English translation tasks show the proposed approach consistently improves performance. Targeted linguistic analysis reveal that Mg-Sa indeed captures useful phrase information at various levels of granularities.", "phrases": ["neural machine translation", "head", "multi-granularity self-attention"], "overall_score": 2.369812438669466, "scores": [2.516744562406614, 0.9085653746851567, 0.5425425777724944], "rank_score": 1.322617504954755} -{"id": "xu-etal-2020-clue", "title": "CLUE: A Chinese Language Understanding Evaluation Benchmark", "abstract": "The advent of natural language understanding (NLU) benchmarks for English, such as GLUE and SuperGLUE allows new NLU models to be evaluated across a diverse set of tasks. These comprehensive benchmarks have facilitated a broad range of research and applications in natural language processing (NLP). The problem, however, is that most such benchmarks are limited to English, which has made it difficult to replicate many of the successes in English NLU for other languages. To help remedy this issue, we introduce the first large-scale Chinese Language Understanding Evaluation (CLUE) benchmark. CLUE is an open-ended, community-driven project that brings together 9 tasks spanning several well-established single-sentence/sentence-pair classification tasks, as well as machine reading comprehension, all on original Chinese text. To establish results on these tasks, we report scores using an exhaustive set of current state-of-the-art pre-trained Chinese models (9 in total). We also introduce a number of supplementary datasets and additional tools to help facilitate further progress on Chinese NLU. Our benchmark is released at ", "phrases": ["other language", "clue", "pretraining"], "overall_score": 3.0447248332588415, "scores": [2.454626847229156, 0.9648613532927489, 0.5474333814724351], "rank_score": 1.3223071939981133} -{"id": "ji-lin-2009-gender", "title": "Gender and Animacy Knowledge Discovery from Web-Scale N-Grams for Unsupervised Person Mention Detection", "abstract": "In this paper we present a simple approach to discover gender and animacy knowledge for person mention detection. We learn noun-gender and noun-animacy pair counts from web-scale n-grams using specific lexical patterns, and then apply confidence estimation metrics to filter noise. The selected informative pairs are then used to detect person mentions from raw texts in an unsupervised learning framework. Experiments showed that this approach can achieve high performance comparable to state-of-the-art supervised learning methods which require manually annotated corpora and gazetteers.", "phrases": ["web-scale n-gram", "person mention detection", "gender"], "overall_score": 2.3689111718743696, "scores": [2.5042408710297552, 0.860126859633459, 0.6019757644739475], "rank_score": 1.3221144983790538} -{"id": "inui-etal-2003-text", "title": "Text Simplification for Reading Assistance: A Project Note", "abstract": "This paper describes our ongoing research project on text simplification for congenitally deaf people. Text simplification we are aiming at is the task of offering a deaf reader a syntactic and lexical paraphrase of a given text for assisting her/him to understand what it means. In this paper, we discuss the issues we should address to realize text simplification and report on the present results in three different aspects of this task: readability assessment, paraphrase representation and post-transfer error detection.", "phrases": ["aim", "paraphrase", "text simplification"], "overall_score": 3.2844070821543507, "scores": [2.5761844834190772, 0.8444151309765503, 0.544628313705285], "rank_score": 1.321742642700304} -{"id": "peters-etal-2017-semi", "title": "Semi-supervised sequence tagging with bidirectional language models", "abstract": "Pre-trained word embeddings learned from unlabeled text have become a standard component of neural network architectures for NLP tasks. However, in most cases, the recurrent network that operates on word-level representations to produce context sensitive representations is trained on relatively little labeled data. In this paper, we demonstrate a general semi-supervised approach for adding pretrained context embeddings from bidirectional language models to NLP systems and apply it to sequence labeling tasks. We evaluate our model on two standard datasets for named entity recognition (NER) and chunking, and in both cases achieve state of the art results, surpassing previous systems that use other forms of transfer or joint learning with additional labeled data and task specific gazetteers.", "phrases": ["language model", "entity recognition", "semi-supervised sequence", "advance", "many nlp task"], "overall_score": 4.938799858912833, "scores": [3.324141000137857, 0.9496097401919006, 0.903641665561919, 0.8695675738014093, 0.5598302978022254], "rank_score": 1.321358055499062} -{"id": "louis-nenkova-2013-makes", "title": "What Makes Writing Great? First Experiments on Article Quality Prediction in the Science Journalism Domain", "abstract": "Great writing is rare and highly admired. Readers seek out articles that are beautifully written, informative and entertaining. Yet information-access technologies lack capabilities for predicting article quality at this level. In this paper we present first experiments on article quality prediction in the science journalism domain. We introduce a corpus of great pieces of science journalism, along with typical articles from the genre. We implement features to capture aspects of great writing, including surprising, visual and emotional content, as well as general features related to discourse organization and sentence structure. We show that the distinction between great and typical articles can be detected fairly accurately, and that the entire spectrum of our features contribute to the distinction.", "phrases": ["article quality", "science journalism domain", "great writing"], "overall_score": 3.5776740559616114, "scores": [2.3068093988597322, 1.0662128066882595, 0.5903541615209648], "rank_score": 1.3211254556896521} -{"id": "van-hee-etal-2015-detection", "title": "Detection and Fine-Grained Classification of Cyberbullying Events", "abstract": "In the current era of online interactions, both positive and negative experiences are abundant on the Web. As in real life, negative experiences can have a serious impact on youngsters. Recent studies have reported cybervictimization rates among teenagers that vary between 20% and 40%. In this paper, we focus on cyberbullying as a particular form of cybervictimization and explore its automatic detection and fine-grained classification. Data containing cyberbullying was collected from the social networking site Ask.fm. We developed and applied a new scheme for cyberbullying annotation, which describes the presence and severity of cyberbullying, a post author's role (harasser, victim or bystander) and a number of fine-grained categories related to cyberbullying, such as insults and threats. We present experimental results on the automatic detection of cyberbullying and explore the feasibility of detecting the more fine-grained cyberbullying categories in online posts. For the first task, an F-score of 55.39% is obtained. We observe that the detection of the fine-grained categories (e.g. threats) is more challenging, presumably due to data sparsity, and because they are often expressed in a subtle and implicit way.", "phrases": ["fine-grained classification", "cyberbullying", "victim"], "overall_score": 3.8184680746844077, "scores": [2.533110805163715, 0.8293158073942096, 0.6008712548386725], "rank_score": 1.321099289132199} -{"id": "shu-etal-2019-generating", "title": "Generating Diverse Translations with Sentence Codes", "abstract": "Users of machine translation systems may desire to obtain multiple candidates translated in different ways. In this work, we attempt to obtain diverse translations by using sentence codes to condition the sentence generation. We describe two methods to extract the codes, either with or without the help of syntax information. For diverse generation, we sample multiple candidates, each of which conditioned on a unique code. Experiments show that the sampled translations have much higher diversity scores when using reasonable sentence codes, where the translation quality is still on par with the baselines even under strong constraint imposed by the codes. In qualitative analysis, we show that our method is able to generate paraphrase translations with drastically different structures. The proposed approach can be easily adopted to existing translation systems as no modification to the model is required.", "phrases": ["diversity", "sentence code", "inference time", "bleu score"], "overall_score": 3.5772926166384433, "scores": [3.2084587529100546, 0.9607400223028998, 0.574239864198688, 0.5404997679080457], "rank_score": 1.320984601829922} -{"id": "yan-etal-2021-consert", "title": "ConSERT: A Contrastive Framework for Self-Supervised Sentence Representation Transfer", "abstract": "Learning high-quality sentence representations benefits a wide range of natural language processing tasks. Though BERT-based pre-trained language models achieve high performance on many downstream tasks, the native derived sentence representations are proved to be collapsed and thus produce a poor performance on the semantic textual similarity (STS) tasks. In this paper, we present ConSERT, a Contrastive Framework for Self-Supervised SEntence Representation Transfer, that adopts contrastive learning to fine-tune BERT in an unsupervised and effective way. By making use of unlabeled texts, ConSERT solves the collapse issue of BERT-derived sentence representations and make them more applicable for downstream tasks. Experiments on STS datasets demonstrate that ConSERT achieves an 8% relative improvement over the previous state-of-the-art, even comparable to the supervised SBERT-NLI. And when further incorporating NLI supervision, we achieve new state-of-the-art performance on STS tasks. Moreover, ConSERT obtains comparable results with only 1000 samples available, showing its robustness in data scarcity scenarios.", "phrases": ["contrastive framework", "sentence representation", "pre-trained language model", "consert", "data augmentation strategy"], "overall_score": 4.140309749400262, "scores": [2.639458008073968, 0.838891655880125, 1.6209770976767963, 0.8784900085258027, 0.6245072404671845], "rank_score": 1.3204648021247753} -{"id": "li-etal-2012-active", "title": "Active Learning for Chinese Word Segmentation", "abstract": "Currently, the best performing models for Chinese word segmentation (CWS) are extremely resource intensive in terms of annotation data quanti ty. One promising solution to minimize the cost of data acquisition is active learning, which aims to actively select the most useful instances to annotate for learning. Active learning on CWS, h owever, remains challenging due to its inherent nature. In this paper, we propose a Word Bounda ry Annotation (WBA) model to make effective active learning on CWS possible. This is achie ved by annotating only those uncertain boundaries. In this way, the manual annotation cost is l argely reduced, compared to annotating the whole character sequence. To further minimize the a nnotation effort, a diversity measurement among the instances is considered to avoid duplicat e annotation. Experimental results show that employing the WBA model and the diversity measurement into active learning on CWS can save much annotation cost with little loss in the perfor mance.", "phrases": ["chinese word segmentation", "annotation cost", "active learning"], "overall_score": 2.1251087165780316, "scores": [2.5909459278945266, 0.8334415738479962, 0.5368253542986924], "rank_score": 1.3204042853470719} -{"id": "poesio-artstein-2008-anaphoric", "title": "Anaphoric Annotation in the ARRAU Corpus", "abstract": "Arrau is a new corpus annotated for anaphoric relations, with information about agreement and explicit representation of multiple antecedents for ambiguous anaphoric expressions and discourse antecedents for expressions which refer to abstract entities such as events, actions and plans. The corpus contains texts from different genres: task-oriented dialogues from the Trains-91 and Trains-93 corpus, narratives from the English Pear Stories corpus, newspaper articles from the Wall Street Journal portion of the Penn Treebank, and mixed text from the Gnome corpus.", "phrases": ["arrau corpus", "antecedent", "task-oriented dialogue", "anaphora", "discourse deixis"], "overall_score": 4.301547601202513, "scores": [3.0300176077223906, 0.9674756724391905, 0.9135972815179666, 0.8653137064436511, 0.8249157833303383], "rank_score": 1.3202640102907075} -{"id": "yin-roth-2018-twowingos", "title": "TwoWingOS: A Two-Wing Optimization Strategy for Evidential Claim Verification", "abstract": "Determining whether a given claim is supported by evidence is a fundamental NLP problem that is best modeled as Textual Entailment. However, given a large collection of text, finding evidence that could support or refute a given claim is a challenge in itself, amplified by the fact that different evidence might be needed to support or refute a claim. Nevertheless, most prior work decouples evidence finding from determining the truth value of the claim given the evidence. We propose to consider these two aspects jointly. We develop TwoWingOS (two-wing optimization strategy), a system that, while identifying appropriate evidence for a claim, also determines whether or not the claim is supported by the evidence. Given the claim, TwoWingOS attempts to identify a subset of the evidence candidates; given the predicted evidence, it then attempts to determine the truth value of the corresponding claim entailment problem. We treat this problem as coupled optimization problems, training a joint model for it. TwoWingOS offers two advantages: (i) Unlike pipeline systems it facilitates flexible-size evidence set, and (ii) Joint training improves both the claim entailment and the evidence identification. Experiments on a benchmark dataset show state-of-the-art performance.", "phrases": ["optimization strategy", "claim", "evidence identification", "twowingos"], "overall_score": 3.5751399073480497, "scores": [2.1574881574398486, 0.8283687165194237, 1.1681321831467237, 1.1267696317734404], "rank_score": 1.320189672219859} -{"id": "li-hovy-2014-model", "title": "A Model of Coherence Based on Distributed Sentence Representation", "abstract": "Coherence is what makes a multi-sentence text meaningful, both logically and syntactically. To solve the challenge of ordering a set of sentences into coherent order, existing approaches focus mostly on defining and using sophisticated features to capture the cross-sentence argumentation logic and syntactic relationships. But both argumentation semantics and crosssentence syntax (such as coreference and tense rules) are very hard to formalize. In this paper, we introduce a neural network model for the coherence task based on distributed sentence representation. The proposed approach learns a syntacticosemantic representation for sentences automatically, using either recurrent or recursive neural networks. The architecture obviated the need for feature engineering, and learns sentence representations, which are to some extent able to capture the \u2018rules\u2019 governing coherent sentence structure. The proposed approach outperforms existing baselines and generates the stateof-art performance in standard coherence evaluation tasks 1 .", "phrases": ["coherence", "sentence representation", "network model", "recursive neural network", "readability assessment"], "overall_score": 4.016738658726724, "scores": [3.071690430940883, 1.6030564229536517, 0.8749498337934596, 0.5238646150096519, 0.5231032660323628], "rank_score": 1.319332913746002} -{"id": "xue-palmer-2004-calibrating", "title": "Calibrating Features for Semantic Role Labeling", "abstract": "This paper takes a critical look at the features used in the semantic role tagging literature and show that the information in the input, generally a syntactic parse tree, has yet to be fully exploited. We propose an additional set of features and our experiments show that these features lead to fairly significant improvements in the tasks we performed. We further show that different features are needed for different subtasks. Finally, we show that by using a Maximum Entropy classifier and fewer features, we achieved results comparable with the best previously reported results obtained with SVM models. We believe this is a clear indication that developing features that capture the right kind of information is crucial to advancing the stateof-the-art in semantic analysis.", "phrases": ["semantic role labeling", "maximum entropy classifier", "srl", "candidate", "argument identification"], "overall_score": 4.571238146431936, "scores": [2.55531524679686, 1.6399642887369292, 0.9249828365382573, 0.9084712541383806, 0.5661689783693844], "rank_score": 1.3189805209159622} -{"id": "mairesse-walker-2011-controlling", "title": "Controlling User Perceptions of Linguistic Style: Trainable Generation of Personality Traits", "abstract": "Recent work in natural language generation has begun to take linguistic variation into account, developing algorithms that are capable of modifying the system's linguistic style based either on the user's linguistic style or other factors, such as personality or politeness. While stylistic control has traditionally relied on handcrafted rules, statistical methods are likely to be needed for generation systems to scale to the production of the large range of variation observed in human dialogues. Previous work on statistical natural language generation (SNLG) has shown that the grammaticality and naturalness of generated utterances can be optimized from data; however these data-driven methods have not been shown to produce stylistic variation that is perceived by humans in the way that the system intended. This paper describes Personage, a highly parameterizable language generator whose parameters are based on psychological findings about the linguistic reflexes of personality. We present a novel SNLG method which uses parameter estimation models trained on personality-annotated data to predict the generation decisions required to convey any combination of scalar values along the five main dimensions of personality. A human evaluation shows that parameter estimation models produce recognizable stylistic variation along multiple dimensions, on a continuous scale, and without the computational cost incurred by overgeneration techniques.", "phrases": ["linguistic style", "personality trait", "parameterizable language generator"], "overall_score": 3.4806493824905993, "scores": [1.7861199448505003, 1.6474713516069828, 0.5231049186240218], "rank_score": 1.3188987383605018} -{"id": "romanov-etal-2019-adversarial", "title": "Adversarial Decomposition of Text Representation", "abstract": "In this paper, we present a method for adversarial decomposition of text representation. This method can be used to decompose a representation of an input sentence into several independent vectors, each of them responsible for a specific aspect of the input sentence. We evaluate the proposed method on two case studies: the conversion between different social registers and diachronic language change. We show that the proposed method is capable of fine-grained controlled change of these aspects of the input sentence. It is also learning a continuous (rather than categorical) representation of the style of the sentence, which is more linguistically realistic. The model uses adversarial-motivational training and includes a special motivational loss, which acts opposite to the discriminator and encourages a better decomposition. Furthermore, we evaluate the obtained meaning embeddings on a downstream task of paraphrase detection and show that they significantly outperform the embeddings of a regular autoencoder.", "phrases": ["decomposition", "text representation", "adversarial-motivational training", "special motivational loss", "discriminator"], "overall_score": 2.741512759341806, "scores": [2.3295540178621854, 1.930767936577871, 0.9029154003166234, 0.8439044337762924, 0.5848029821940981], "rank_score": 1.3183889541454141} -{"id": "zhang-etal-2021-bstc", "title": "BSTC: A Large-Scale Chinese-English Speech Translation Dataset", "abstract": "This paper presents BSTC (Baidu Speech Translation Corpus), a large-scale Chinese-English speech translation dataset. This dataset is constructed based on a collection of licensed videos of talks or lectures, including about 68 hours of Mandarin data, their manual transcripts and translations into English, as well as automated transcripts by an automatic speech recognition (ASR) model. We have further asked three experienced interpreters to simultaneously interpret the testing talks in a mock conference setting. This corpus is expected to promote the research of automatic simultaneous translation as well as the development of practical systems. We have organized simultaneous translation tasks and used this corpus to evaluate automatic simultaneous translation systems.", "phrases": ["speech translation dataset", "hour", "bstc"], "overall_score": 2.361863419530126, "scores": [2.124602244462938, 0.9612574127783036, 0.868683561466048], "rank_score": 1.31818107290243} -{"id": "lee-etal-2015-overview", "title": "Overview of the NLP-TEA 2015 Shared Task for Chinese Grammatical Error Diagnosis", "abstract": "This paper introduces the NLP-TEA 2015 shared task for Chinese grammatical error diagnosis. We describe the task, data preparation, performance metrics, and evaluation results. The hope is that such an evaluation campaign may produce more advanced Chinese grammatical error diagnosis techniques. All data sets with gold standards and evaluation tools are publicly available for research purposes.", "phrases": ["nlp-tea", "shared task", "grammatical error diagnosis"], "overall_score": 3.4786264571482204, "scores": [1.7185112053012366, 0.7869577235531366, 1.4489276863059655], "rank_score": 1.3181322050534463} -{"id": "ren-etal-2019-generating", "title": "Generating Natural Language Adversarial Examples through Probability Weighted Word Saliency", "abstract": "We address the problem of adversarial attacks on text classification, which is rarely studied comparing to attacks on image classification. The challenge of this task is to generate adversarial examples that maintain lexical correctness, grammatical correctness and semantic similarity. Based on the synonyms substitution strategy, we introduce a new word replacement order determined by both the word saliency and the classification probability, and propose a greedy algorithm called probability weighted word saliency (PWWS) for text adversarial attack. Experiments on three popular datasets using convolutional as well as LSTM models show that PWWS reduces the classification accuracy to the most extent, and keeps a very low word substitution rate. A human evaluation study shows that our generated adversarial examples maintain the semantic similarity well and are hard for humans to perceive. Performing adversarial training using our perturbed datasets improves the robustness of the models. At last, our method also exhibits a good transferability on the generated adversarial examples.", "phrases": ["adversarial example", "word saliency", "grammatical correctness", "greedy algorithm", "perturbation"], "overall_score": 4.9575154815187705, "scores": [2.237785923913161, 1.5780046199072373, 1.1821959860533375, 1.0438628265739693, 0.5484879415248177], "rank_score": 1.3180674595945046} -{"id": "schulte-im-walde-etal-2016-role", "title": "The Role of Modifier and Head Properties in Predicting the Compositionality of English and German Noun-Noun Compounds: A Vector-Space Perspective", "abstract": "In this paper, we explore the role of constituent properties in English and German noun-noun compounds (corpus frequencies of the compounds and their constituents; productivity and ambiguity of the constituents; and semantic relations between the constituents), when predicting the degrees of compositionality of the compounds within a vector space model. The results demonstrate that the empirical and semantic properties of the compounds and the head nouns play a significant role.", "phrases": ["modifier", "compositionality", "noun-noun compound"], "overall_score": 2.120481726749791, "scores": [2.2800962462474197, 0.881673845460654, 0.7908180330667287], "rank_score": 1.3175293749249342} -{"id": "pampari-etal-2018-emrqa", "title": "emrQA: A Large Corpus for Question Answering on Electronic Medical Records", "abstract": "We propose a novel methodology to generate domain-specific large-scale question answering (QA) datasets by re-purposing existing annotations for other NLP tasks. We demonstrate an instance of this methodology in generating a large-scale QA dataset for electronic medical records by leveraging existing expert annotations on clinical notes for various NLP tasks from the community shared i2b2 datasets. The resulting corpus (emrQA) has 1 million questions-logical form and 400,000+ question-answer evidence pairs. We characterize the dataset and explore its learning potential by training baseline models for question to logical form and question to answer mapping.", "phrases": ["record", "emrqa", "domain knowledge", "question-logical form pair", "clinical question"], "overall_score": 3.807470551344333, "scores": [3.766390516970555, 0.8756430304291731, 0.8322957802355146, 0.5571807389101845, 0.5549619693488467], "rank_score": 1.317294407178855} -{"id": "li-etal-2020-unified", "title": "A Unified MRC Framework for Named Entity Recognition", "abstract": "The task of named entity recognition (NER) is normally divided into nested NER and flat NER depending on whether named entities are nested or not. Models are usually separately developed for the two tasks, since sequence labeling models, the most widely used backbone for flat NER, are only able to assign a single label to a particular token, which is unsuitable for nested NER where a token may be assigned several labels. In this paper, we propose a unified framework that is capable of handling both flat and nested NER tasks. Instead of treating the task of NER as a sequence labeling problem, we propose to formulate it as a machine reading comprehension (MRC) task. For example, extracting entities with the per label is formalized as extracting answer spans to the question \u201cwhich person is mentioned in the text\u201d.This formulation naturally tackles the entity overlapping issue in nested NER: the extraction of two overlapping entities with different categories requires answering two independent questions. Additionally, since the query encodes informative prior knowledge, this strategy facilitates the process of entity extraction, leading to better performances for not only nested NER, but flat NER. We conduct experiments on both nested and flat NER datasets.Experiment results demonstrate the effectiveness of the proposed formulation. We are able to achieve a vast amount of performance boost over current SOTA models on nested NER datasets, i.e., +1.28, +2.55, +5.44, +6.37,respectively on ACE04, ACE05, GENIA and KBP17, along with SOTA results on flat NER datasets, i.e., +0.24, +1.95, +0.21, +1.49 respectively on English CoNLL 2003, English OntoNotes 5.0, Chinese MSRA and Chinese OntoNotes 4.0.", "phrases": ["mrc", "entity recognition", "nested ner task", "machine reading comprehension", "information extraction"], "overall_score": 4.791020537101745, "scores": [1.93400248595563, 1.6852554223167162, 1.5217269155744142, 0.8997821986705224, 0.5446722313286222], "rank_score": 1.3170878507691812} -{"id": "song-etal-2019-semantic", "title": "Semantic Neural Machine Translation Using AMR", "abstract": "It is intuitive that semantic representations can be useful for machine translation, mainly because they can help in enforcing meaning preservation and handling data sparsity (many sentences correspond to one meaning) of machine translation models. On the other hand, little work has been done on leveraging semantics for neural machine translation (NMT). In this work, we study the usefulness of AMR (abstract meaning representation) on NMT. Experiments on a standard English-to-German dataset show that incorporating AMR as additional knowledge can significantly improve a strong attention-based sequence-to-sequence neural translation model.", "phrases": ["usefulness", "amr", "neural translation model"], "overall_score": 3.0321442092908675, "scores": [2.785125171009933, 0.6419509651495227, 0.5234543591303117], "rank_score": 1.3168434984299224} -{"id": "martindale-carpuat-2018-fluency", "title": "Fluency Over Adequacy: A Pilot Study in Measuring User Trust in Imperfect MT", "abstract": "Although measuring intrinsic quality has been a key factor in the advancement of Machine Translation (MT), successfully deploying MT requires considering not just intrinsic quality but also the user experience, including aspects such as trust. This work introduces a method of studying how users modulate their trust in an MT system after seeing errorful (disfluent or inadequate) output amidst good (fluent and adequate) output. We conduct a survey to determine how users respond to good translations compared to translations that are either adequate but not fluent, or fluent but not adequate. In this pilot study, users responded strongly to disfluent translations, but were, surprisingly, much less concerned with adequacy.", "phrases": ["adequacy", "trust", "fluency"], "overall_score": 2.118697000018593, "scores": [1.803973981834617, 1.5838107941270836, 0.5614766096128948], "rank_score": 1.3164204618581985} -{"id": "liu-etal-2010-semantic", "title": "Semantic Role Labeling for News Tweets", "abstract": "News tweets that report what is happening have become an important real-time information source. We raise the problem of Semantic Role Labeling (SRL) for news tweets, which is meaningful for fine grained information extraction and retrieval. We present a self-supervised learning approach to train a domain specific SRL system to resolve the problem. A large volume of training data is automatically labeled, by leveraging the existing SRL system on news domain and content similarity between news and news tweets. On a human annotated test set, our system achieves state-of-the-art performance, outperforming the SRL system trained on news.", "phrases": ["news tweet", "information extraction", "semantic role labeling"], "overall_score": 1.8247736521747293, "scores": [2.481426365026026, 0.9164528848125015, 0.5510085982674586], "rank_score": 1.3162959493686621} -{"id": "tratz-hovy-2010-isi", "title": "ISI: Automatic Classification of Relations Between Nominals Using a Maximum Entropy Classifier", "abstract": "The automatic interpretation of semantic relations between nominals is an important subproblem within natural language understanding applications and is an area of increasing interest. In this paper, we present the system we used to participate in the SemEval 2010 Task 8 Multi-Way Classification of Semantic Relations between Pairs of Nominals. Our system, based upon a Maximum Entropy classifier trained using a large number of boolean features, received the third highest score.", "phrases": ["nominals", "maximum entropy classifier", "semantic relation"], "overall_score": 2.737152402562888, "scores": [2.4464261262806253, 0.9006788561128074, 0.6017712149413581], "rank_score": 1.3162920657782635} -{"id": "tiedemann-scherrer-2017-neural", "title": "Neural Machine Translation with Extended Context", "abstract": "We investigate the use of extended context in attention-based neural machine translation. We base our experiments on translated movie subtitles and discuss the effect of increasing the segments beyond single translation units. We study the use of extended source language context as well as bilingual context extensions. The models learn to distinguish between information from different segments and are surprisingly robust with respect to translation quality. In this pilot study, we observe interesting cross-sentential attention patterns that improve textual coherence in translation at least in some selected cases.", "phrases": ["extended context", "translation quality", "coherence", "neural machine translation", "consecutive sentence"], "overall_score": 4.854510644479356, "scores": [3.1204539727773954, 0.85296417620774, 0.9158651333113418, 0.8626069916375655, 0.8280355610854209], "rank_score": 1.3159851670038927} -{"id": "smith-etal-2005-context", "title": "Context-Based Morphological Disambiguation with Random Fields", "abstract": "Finite-state approaches have been highly successful at describing the morphological processes of many languages. Such approaches have largely focused on modeling the phone- or character-level processes that generate candidate lexical types, rather than tokens in context. For the full analysis of words in context, disambiguation is also required (Hakkani-Tur et al., 2000; Hajic et al., 2001). In this paper, we apply a novel source-channel model to the problem of morphological disambiguation (segmentation into morphemes, lemmatization, and POS tagging) for concatenative, templatic, and inflectional languages. The channel model exploits an existing morphological dictionary, constraining each word's analysis to be linguistically valid. The source model is a factored, conditionally-estimated random field (Lafferty et al., 2001) that learns to disambiguate the full sentence by modeling local contexts. Compared with baseline state-of-the-art methods, our method achieves statistically significant error rate reductions on Korean, Arabic, and Czech, for various training set sizes and accuracy measures.", "phrases": ["morphological disambiguation", "random field", "pos tagging"], "overall_score": 3.472838000871468, "scores": [2.5946308767757063, 0.831617382621119, 0.5215682149551337], "rank_score": 1.3159388247839863} -{"id": "klein-manning-2003-accurate", "title": "Accurate Unlexicalized Parsing", "abstract": "We demonstrate that an unlexicalized PCFG can parse much more accurately than previously shown, by making use of simple, linguistically motivated state splits, which break down false independence assumptions latent in a vanilla treebank grammar. Indeed, its performance of 86.36% (LP/LR F1) is better than that of early lexicalized PCFG models, and surprisingly close to the current state-of-the-art. This result has potential uses beyond establishing a strong lower bound on the maximum possible accuracy of unlexicalized models: an unlexicalized PCFG is much more compact, easier to replicate, and easier to interpret than more complex lexical models, and the parsing algorithms are simpler, more widely understood, of lower asymptotic complexity, and easier to optimize.", "phrases": ["unlexicalized pcfg", "pcfg", "stanford parser", "symbol", "refinement"], "overall_score": 5.607323401501507, "scores": [1.9090299255081316, 1.7403404967079508, 1.2507414392179965, 1.118251430696844, 0.5588646058663042], "rank_score": 1.3154455795994455} -{"id": "wang-manning-2012-baselines", "title": "Baselines and Bigrams: Simple, Good Sentiment and Topic Classification", "abstract": "Variants of Naive Bayes (NB) and Support Vector Machines (SVM) are often used as baseline methods for text classification, but their performance varies greatly depending on the model variant, features used and task/dataset. We show that: (i) the inclusion of word bigram features gives consistent gains on sentiment analysis tasks; (ii) for short snippet sentiment tasks, NB actually does better than SVMs (while for longer documents the opposite result holds); (iii) a simple but novel SVM variant using NB log-count ratios as feature values consistently performs well across tasks and datasets. Based on these observations, we identify simple NB and SVM variants which outperform most published results on sentiment analysis datasets, sometimes providing a new state-of-the-art performance level.", "phrases": ["naive bayes", "svm", "bag-of-word", "bow", "document representation"], "overall_score": 4.97778109768442, "scores": [1.6840268621343997, 1.5719624614336816, 1.4063712481491293, 1.3581940760602678, 0.5565220571897092], "rank_score": 1.3154153409934373} -{"id": "gandrabur-foster-2003-confidence", "title": "Confidence estimation for translation prediction", "abstract": "The purpose of this work is to investigate the use of machine learning approaches for confidence estimation within a statistical machine translation application. Specifically, we attempt to learn probabilities of correctness for various model predictions, based on the native probabilites (i.e. the probabilites given by the original model) and on features of the current context. Our experiments were conducted using three original translation models and two types of neural nets (single-layer and multilayer perceptrons) for the confidence estimation task.", "phrases": ["machine translation", "confidence estimation", "quality indicator"], "overall_score": 3.562201902996741, "scores": [2.3891793596587854, 1.0153296830009158, 0.5417271477331653], "rank_score": 1.315412063464289} -{"id": "luo-etal-2004-mention", "title": "A Mention-Synchronous Coreference Resolution Algorithm Based On the Bell Tree", "abstract": "This paper proposes a new approach for coreference resolution which uses the Bell tree to represent the search space and casts the coreference resolution problem as finding the best path from the root of the Bell tree to the leaf nodes. A Maximum Entropy model is used to rank these paths. The coreference performance on the 2002 and 2003 Automatic Content Extraction (ACE) data will be reported. We also train a coreference system using the MUC6 data and competitive results are obtained.", "phrases": ["coreference resolution", "mention", "entity-level feature"], "overall_score": 4.473643399147285, "scores": [1.7149208055717657, 1.7079346400042654, 0.5230873185228537], "rank_score": 1.3153142546996284} -{"id": "choubey-etal-2018-identifying", "title": "Identifying the Most Dominant Event in a News Article by Mining Event Coreference Relations", "abstract": "Identifying the most dominant and central event of a document, which governs and connects other foreground and background events in the document, is useful for many applications, such as text summarization, storyline generation and text segmentation. We observed that the central event of a document usually has many coreferential event mentions that are scattered throughout the document for enabling a smooth transition of subtopics. Our empirical experiments, using gold event coreference relations, have shown that the central event of a document can be well identified by mining properties of event coreference chains. But the performance drops when switching to system predicted event coreference relations. In addition, we found that the central event can be more accurately identified by further considering the number of sub-events as well as the realis status of an event.", "phrases": ["dominant event", "event coreference relation", "news article"], "overall_score": 2.888557256422677, "scores": [1.6770415046793283, 1.3016689726058495, 0.9652067074450967], "rank_score": 1.3146390615767582} -{"id": "bostan-etal-2020-goodnewseveryone", "title": "GoodNewsEveryone: A Corpus of News Headlines Annotated with Emotions, Semantic Roles, and Reader Perception", "abstract": "Most research on emotion analysis from text focuses on the task of emotion classification or emotion intensity regression. Fewer works address emotions as a phenomenon to be tackled with structured learning, which can be explained by the lack of relevant datasets. We fill this gap by releasing a dataset of 5000 English news headlines annotated via crowdsourcing with their associated emotions, the corresponding emotion experiencers and textual cues, related emotion causes and targets, as well as the reader's perception of the emotion of the headline. This annotation task is comparably challenging, given the large number of classes and roles to be identified. We therefore propose a multiphase annotation procedure in which we first find relevant instances with emotional content and then annotate the more fine-grained aspects. Finally, we develop a baseline for the task of automatic prediction of semantic role structures and discuss the results. The corpus we release enables further research on emotion classification, emotion intensity prediction, emotion cause detection, and supports further qualitative studies.", "phrases": ["emotion", "reader", "experiencer", "cue", "goodnewseveryone"], "overall_score": 3.151577475813985, "scores": [3.438947474637348, 0.8350226702603234, 0.9010541941975144, 0.8525866790750485, 0.5439384393172464], "rank_score": 1.314309891497496} -{"id": "huang-etal-2012-structured", "title": "Structured Perceptron with Inexact Search", "abstract": "Most existing theory of structured prediction assumes exact inference, which is often intractable in many practical problems. This leads to the routine use of approximate inference such as beam search but there is not much theory behind it. Based on the structured perceptron, we propose a general framework of \"violation-fixing\" perceptrons for inexact search with a theoretical guarantee for convergence under new separability conditions. This framework subsumes and justifies the popular heuristic \"early-update\" for perceptron with beam search (Collins and Roark, 2004). We also propose several new update methods within this framework, among which the \"max-violation\" method dramatically reduces training time (by 3 fold as compared to early-update) on state-of-the-art part-of-speech tagging and incremental parsing systems.", "phrases": ["inexact search", "collins", "tagging"], "overall_score": 3.558868159071172, "scores": [2.8549689179621973, 0.5589554984075477, 0.5286186254350211], "rank_score": 1.3141810139349221} -{"id": "jia-etal-2019-cross", "title": "Cross-Domain NER using Cross-Domain Language Modeling", "abstract": "Due to limitation of labeled resources, cross-domain named entity recognition (NER) has been a challenging task. Most existing work considers a supervised setting, making use of labeled data for both the source and target domains. A disadvantage of such methods is that they cannot train for domains without NER data. To address this issue, we consider using cross-domain LM as a bridge cross-domains for NER domain adaptation, performing cross-domain and cross-task knowledge transfer by designing a novel parameter generation network. Results show that our method can effectively extract domain differences from cross-domain LM contrast, allowing unsupervised domain adaptation while also giving state-of-the-art results among supervised domain adaptation methods.", "phrases": ["domain adaptation", "cross-task knowledge transfer", "cross-domain ner", "language modeling task"], "overall_score": 3.999953749639056, "scores": [2.406007333152919, 1.4183313582889392, 0.8489184862600659, 0.582021877006003], "rank_score": 1.3138197636769817} -{"id": "feng-etal-2021-multidoc2dial", "title": "MultiDoc2Dial: Modeling Dialogues Grounded in Multiple Documents", "abstract": "We propose MultiDoc2Dial, a new task and dataset on modeling goal-oriented dialogues grounded in multiple documents. Most previous works treat document-grounded dialogue modeling as machine reading comprehension task based on a single given document or passage. In this work, we aim to address more realistic scenarios where a goal-oriented information-seeking conversation involves multiple topics, and hence is grounded on different documents. To facilitate such task, we introduce a new dataset that contains dialogues grounded in multiple documents from four different domains. We also explore modeling the dialogue-based and document-based contexts in the dataset. We present strong baseline approaches and various experimental results, aiming to support further research efforts on such a task.", "phrases": ["multiple document", "conversation", "dialdoc"], "overall_score": 2.88673734550906, "scores": [2.8294855184438132, 0.556271813497749, 0.5556750213349022], "rank_score": 1.313810784425488} -{"id": "borin-etal-2012-korp", "title": "Korp \u2014 the corpus infrastructure of Spr\u00e5kbanken", "abstract": "We present Korp, the corpus infrastructure of Spr\u00e5kbanken (the Swedish Language Bank). The infrastructure consists of three main components: the Korp corpus pipeline, the Korp backend, and the Korp frontend. The Korp corpus pipeline is used for importing corpora, annotating them, and then exporting the annotated corpora into different formats. An essential feature of the pipeline is the ability to leave existing annotations untouched, both structural and word level annotations, and to use the existing annotations as the foundation of other annotations. The Korp backend consists of a set of REST-based web services for searching in and retrieving information about the corpora. Finally, the Korp frontend is a graphical search interface that interacts with the Korp backend. The interface has been inspired by corpus search interfaces such as SketchEngine, Glossa, and DeepDict, and it uses State Chart XML (SCXML) in order to enable users to bookmark interaction states. We give a functional and technical overview of the three components, followed by a discussion of planned future work.", "phrases": ["corpus infrastructure", "spr\u00e5kbanken", "korp"], "overall_score": 2.352894806484382, "scores": [2.1473961106968287, 0.9303449421309565, 0.8617857311407366], "rank_score": 1.313175594656174} -{"id": "cherry-2008-cohesive", "title": "Cohesive Phrase-Based Decoding for Statistical Machine Translation", "abstract": "Phrase-based decoding produces state-of-theart translations with no regard for syntax. We add syntax to this process with a cohesion constraint based on a dependency tree for the source sentence. The constraint allows the decoder to employ arbitrary, non-syntactic phrases, but ensures that those phrases are translated in an order that respects the source tree\u2019s structure. In this way, we target the phrasal decoder\u2019s weakness in order modeling, without affecting its strengths. To further increase flexibility, we incorporate cohesion as a decoder feature, creating a soft constraint. The resulting cohesive, phrase-based decoder is shown to produce translations that are preferred over non-cohesive output in both automatic and human evaluations.", "phrases": ["decoding", "cohesion", "syntactic constraint"], "overall_score": 3.865755562092352, "scores": [1.836368908534707, 1.5084408013839379, 0.5938919471149124], "rank_score": 1.3129005523445192} -{"id": "xu-etal-2018-unsupervised-cross", "title": "Unsupervised Cross-lingual Transfer of Word Embedding Spaces", "abstract": "Cross-lingual transfer of word embeddings aims to establish the semantic mappings among words in different languages by learning the transformation functions over the corresponding word embedding spaces. Successfully solving this problem would benefit many downstream tasks such as to translate text classification models from resource-rich languages (e.g. English) to low-resource languages. Supervised methods for this problem rely on the availability of cross-lingual supervision, either using parallel corpora or bilingual lexicons as the labeled data for training, which may not be available for many low resource languages. This paper proposes an unsupervised learning approach that does not require any cross-lingual labeled data. Given two monolingual word embedding spaces for any language pair, our algorithm optimizes the transformation functions in both directions simultaneously based on distributional matching as well as minimizing the back-translation losses. We use a neural network implementation to calculate the Sinkhorn distance, a well-defined distributional similarity measure, and optimize our objective through back-propagation. Our evaluation on benchmark datasets for bilingual lexicon induction and cross-lingual word similarity prediction shows stronger or competitive performance of the proposed method compared to other state-of-the-art supervised and unsupervised baseline methods over many language pairs.", "phrases": ["cross-lingual transfer", "lexicon induction", "many language pair"], "overall_score": 3.8651114408017913, "scores": [2.533981754446372, 0.8535528718164007, 0.5505107550301928], "rank_score": 1.3126817937643218} -{"id": "chen-cardie-2018-multinomial", "title": "Multinomial Adversarial Networks for Multi-Domain Text Classification", "abstract": "Many text classification tasks are known to be highly domain-dependent. Unfortunately, the availability of training data can vary drastically across domains. Worse still, for some domains there may not be any annotated data at all. In this work, we propose a multinomial adversarial network (MAN) to tackle this real-world problem of multi-domain text classification (MDTC) in which labeled data may exist for multiple domains, but in insufficient amounts to train effective classifiers for one or more of the domains. We provide theoretical justifications for the MAN framework, proving that different instances of MANs are essentially minimizers of various f-divergence metrics (Ali and Silvey, 1966) among multiple probability distributions. MANs are thus a theoretically sound generalization of traditional adversarial networks that discriminate over two distributions. More specifically, for the MDTC task, MAN learns features that are invariant across multiple domains by resorting to its ability to reduce the divergence among the feature distributions of each domain. We present experimental results showing that MANs significantly outperform the prior art on the MDTC task. We also show that MANs achieve state-of-the-art performance for domains with no labeled data.", "phrases": ["text classification", "man", "multinomial adversarial network"], "overall_score": 3.0223702884305292, "scores": [2.391801196325747, 1.0037071471923529, 0.5422878720830551], "rank_score": 1.3125987385337183} -{"id": "etchegoyhen-etal-2014-machine", "title": "Machine Translation for Subtitling: A Large-Scale Evaluation", "abstract": "This article describes a large-scale evaluation of the use of Statistical Machine Translation for professional subtitling. The work was carried out within the FP7 EU-funded project SUMAT and involved two rounds of evaluation: a quality evaluation and a measure of productivity gain/loss. We present the SMT systems built for the project and the corpora they were trained on, which combine professionally created and crowd-sourced data. Evaluation goals, methodology and results are presented for the eleven translation pairs that were evaluated by professional subtitlers. Overall, a majority of the machine translated subtitles received good quality ratings. The results were also positive in terms of productivity, with a global gain approaching 40%. We also evaluated the impact of applying quality estimation and filtering of poor MT output, which resulted in higher productivity gains for filtered files as opposed to fully machine-translated files. Finally, we present and discuss feedback from the subtitlers who participated in the evaluation, a key aspect for any eventual adoption of machine translation technology in professional subtitling.", "phrases": ["subtitler", "machine translation", "sumat project"], "overall_score": 2.7293013507090182, "scores": [2.4997550938565363, 0.857450842356182, 0.5803435875467324], "rank_score": 1.3125165079198169} -{"id": "turney-2006-similarity", "title": "Similarity of Semantic Relations", "abstract": "There are at least two kinds of similarity. Relational similarity is correspondence between relations, in contrast with attributional similarity, which is correspondence between attributes. When two words have a high degree of attributional similarity, we call them synonyms. When two pairs of words have a high degree of relational similarity, we say that their relations are analogous. For example, the word pair mason:stone is analogous to the pair carpenter:wood. This article introduces Latent Relational Analysis (LRA), a method for measuring relational similarity. LRA has potential applications in many areas, including information extraction, word sense disambiguation, and information retrieval. Recently the Vector Space Model (VSM) of information retrieval has been adapted to measuring relational similarity, achieving a score of 47% on a collection of 374 college-level multiple-choice word analogy questions. In the VSM approach, the relation between a pair of words is characterized by a vector of frequencies of predefined patterns in a large corpus. LRA extends the VSM approach in three ways: (1) The patterns are derived automatically from the corpus, (2) the Singular Value Decomposition (SVD) is used to smooth the frequency data, and (3) automatically generated synonyms are used to explore variations of the word pairs. LRA achieves 56% on the 374 analogy questions, statistically equivalent to the average human score of 57%. On the related problem of classifying semantic relations, LRA achieves similar gains over the VSM.", "phrases": ["relational similarity", "attributional similarity", "wood", "lra", "word sense disambiguation"], "overall_score": 4.588621272179289, "scores": [2.3538663900218, 1.498405966516952, 0.9275788206140395, 0.9100428971058624, 0.8718267164362212], "rank_score": 1.312344158138975} -{"id": "williams-etal-2018-broad", "title": "A Broad-Coverage Challenge Corpus for Sentence Understanding through Inference", "abstract": "This paper introduces the Multi-Genre Natural Language Inference (MultiNLI) corpus, a dataset designed for use in the development and evaluation of machine learning models for sentence understanding. At 433k examples, this resource is one of the largest corpora available for natural language inference (a.k.a. recognizing textual entailment), improving upon available resources in both its coverage and difficulty. MultiNLI accomplishes this by offering data from ten distinct genres of written and spoken English, making it possible to evaluate systems on nearly the full complexity of the language, while supplying an explicit setting for evaluating cross-genre domain adaptation. In addition, an evaluation using existing machine learning models designed for the Stanford NLI corpus shows that it represents a substantially more difficult task than does that corpus, despite the two showing similar levels of inter-annotator agreement.", "phrases": ["sentence understanding", "entailment", "nli", "contradiction", "large-scale annotated dataset"], "overall_score": 5.947535434259469, "scores": [0.9415583756015685, 2.40629535018852, 1.7555908735195793, 0.8441900025018116, 0.6132091736462374], "rank_score": 1.3121687550915433} -{"id": "ye-etal-2020-safer", "title": "SAFER: A Structure-free Approach for Certified Robustness to Adversarial Word Substitutions", "abstract": "State-of-the-art NLP models can often be fooled by human-unaware transformations such as synonymous word substitution. For security reasons, it is of critical importance to develop models with certified robustness that can provably guarantee that the prediction is can not be altered by any possible synonymous word substitution. In this work, we propose a certified robust method based on a new randomized smoothing technique, which constructs a stochastic ensemble by applying random word substitutions on the input sentences, and leverage the statistical properties of the ensemble to provably certify the robustness. Our method is simple and structure-free in that it only requires the black-box queries of the model outputs, and hence can be applied to any pre-trained models (such as BERT) and any types of models (world-level or subword-level). Our method significantly outperforms recent state-of-the-art methods for certified robustness on both IMDB and Amazon text classification tasks. To the best of our knowledge, we are the first work to achieve certified robustness on large systems such as BERT with practically meaningful certified accuracy.", "phrases": ["robustness", "safer", "word substitution attack"], "overall_score": 3.146408634591588, "scores": [1.750973084578914, 1.5819828939903993, 0.6035069732755695], "rank_score": 1.3121543172816275} -{"id": "mason-2004-cormet", "title": "CorMet: A Computational, Corpus-Based Conventional Metaphor Extraction System", "abstract": "CorMet is a corpus-based system for discovering metaphorical mappings between concepts. It does this by finding systematic variations in domain-specific selectional preferences, which are inferred from large, dynamically mined Internet corpora. Metaphors transfer structure from a source domain to a target domain, making some concepts in the target domain metaphorically equivalent to concepts in the source domain. The verbs that select for a concept in the source domain tend to select for its metaphorical equivalent in the target domain. This regularity, detectable with a shallow linguistic analysis, is used to find the metaphorical interconcept mappings, which can then be used to infer the existence of higher-level conventional metaphors. Most other computational metaphor systems use small, hand-coded semantic knowledge bases and work on a few examples. Although Cor Met's only knowledge base is Word Net (Fellbaum 1998) it can find the mappings constituting many conventional metaphors and in some cases recognize sentences instantiating those mappings. CorMet is tested on its ability to find a subset of the Master Metaphor List (Lakoff, Espenson, and Schwartz 1991).", "phrases": ["metaphor", "corpus-based system", "domain-specific selectional preference"], "overall_score": 4.323680916820118, "scores": [2.002943672278865, 1.0403898225730535, 0.8922504790556401], "rank_score": 1.311861324635853} -{"id": "birch-etal-2007-ccg", "title": "CCG Supertags in Factored Statistical Machine Translation", "abstract": "Combinatorial Categorial Grammar (CCG) supertags present phrase-based machine translation with an opportunity to access rich syntactic information at a word level. The challenge is incorporating this information into the translation process. Factored translation models allow the inclusion of supertags as a factor in the source or target language. We show that this results in an improvement in the quality of translation and that the value of syntactic supertags in flat structured phrase-based models is largely due to better local reorderings.", "phrases": ["supertag", "factor", "combinatorial categorial grammar", "translation model", "ccg"], "overall_score": 3.8622540136702033, "scores": [1.6303673997697294, 1.4051075170390706, 1.3903203232735901, 1.3066848297484048, 0.8260766552326557], "rank_score": 1.31171134501269} -{"id": "wan-2008-exploration", "title": "An Exploration of Document Impact on Graph-Based Multi-Document Summarization", "abstract": "The graph-based ranking algorithm has been recently exploited for multi-document summarization by making only use of the sentence-to-sentence relationships in the documents, under the assumption that all the sentences are indistinguishable. However, given a document set to be summarized, different documents are usually not equally important, and moreover, different sentences in a specific document are usually differently important. This paper aims to explore document impact on summarization performance. We propose a document-based graph model to incorporate the document-level information and the sentence-to-document relationship into the graph-based ranking process. Various methods are employed to evaluate the two factors. Experimental results on the DUC2001 and DUC2002 datasets demonstrate that the good effectiveness of the proposed model. Moreover, the results show the robustness of the proposed model.", "phrases": ["document impact", "summarization", "document-level information", "graph-based ranking process"], "overall_score": 3.1440578182461247, "scores": [2.6963776750551363, 0.8257337210913775, 0.865368056831677, 0.857216349898928], "rank_score": 1.3111739507192797} -{"id": "baroni-etal-2012-entailment", "title": "Entailment above the word level in distributional semantics", "abstract": "We introduce two ways to detect entailment using distributional semantic representations of phrases. Our first experiment shows that the entailment relation between adjective-noun constructions and their head nouns (big cat|= cat), once represented as semantic vector pairs, generalizes to lexical entailment among nouns (dog|= animal). Our second experiment shows that a classifier fed semantic vector pairs can similarly generalize the entailment relation among quantifier phrases (many dogs|= some dogs) to entailment involving unseen quantifiers (all cats|= several cats). Moreover, nominal and quantifier phrase entailment appears to be cued by different distributional correlates, as predicted by the type-based view of entailment in formal semantics.", "phrases": ["quantifier phrase", "entailment", "hypernym"], "overall_score": 4.622031359308354, "scores": [1.977786784671063, 1.1106187708607087, 0.8437204937281421], "rank_score": 1.310708683086638} -{"id": "naseem-etal-2012-selective", "title": "Selective Sharing for Multilingual Dependency Parsing", "abstract": "We present a novel algorithm for multilingual dependency parsing that uses annotations from a diverse set of source languages to parse a new unannotated language. Our motivation is to broaden the advantages of multilingual learning to languages that exhibit significant differences from existing resource-rich languages. The algorithm learns which aspects of the source languages are relevant for the target language and ties model parameters accordingly. The model factorizes the process of generating a dependency tree into two steps: selection of syntactic dependents and their ordering. Being largely language-universal, the selection component is learned in a supervised fashion from all the training languages. In contrast, the ordering decisions are only influenced by languages with similar properties. We systematically model this cross-lingual sharing using typological features. In our experiments, the model consistently outperforms a state-of-the-art multi-lingual parser. The largest improvement is achieved on the non Indo-European languages yielding a gain of 14.4%.", "phrases": ["multilingual dependency parsing", "significant difference", "typological feature", "selective sharing", "cross-lingual transfer"], "overall_score": 4.801620497733734, "scores": [2.1005010552278223, 1.785706264512225, 0.9246884276893844, 0.9054478664690501, 0.83687011843544], "rank_score": 1.3106427464667845} -{"id": "chodorow-etal-2007-detection", "title": "Detection of Grammatical Errors Involving Prepositions", "abstract": "This paper presents ongoing work on the detection of preposition errors of non-native speakers of English. Since prepositions account for a substantial proportion of all grammatical errors by ESL (English as a Second Language) learners, developing an NLP application that can reliably detect these types of errors will provide an invaluable learning resource to ESL students. To address this problem, we use a maximum entropy classifier combined with rule-based filters to detect preposition errors in a corpus of student essays. Although our work is preliminary, we achieve a precision of 0.8 with a recall of 0.3.", "phrases": ["grammatical error", "preposition", "detection"], "overall_score": 3.6324469433011735, "scores": [1.7291964915871307, 0.8500336973910695, 1.3511547045664574], "rank_score": 1.3101282978482192} -{"id": "futrell-etal-2019-neural", "title": "Neural language models as psycholinguistic subjects: Representations of syntactic state", "abstract": "We investigate the extent to which the behavior of neural network language models reflects incremental representations of syntactic state. To do so, we employ experimental methodologies which were originally developed in the field of psycholinguistics to study syntactic representation in the human mind. We examine neural network model behavior on sets of artificial sentences containing a variety of syntactically complex structures. These sentences not only test whether the networks have a representation of syntactic state, they also reveal the specific lexical cues that networks use to update these states. We test four models: two publicly available LSTM sequence models of English (Jozefowicz et al., 2016; Gulordava et al., 2018) trained on large datasets; an RNN Grammar (Dyer et al., 2016) trained on a small, parsed dataset; and an LSTM trained on the same small corpus as the RNNG. We find evidence for basic syntactic state representations in all models, but only the models trained on large datasets are sensitive to subtle lexical cues signaling changes in syntactic state.", "phrases": ["syntactic state", "neural language model", "language modeling performance", "human-like representation"], "overall_score": 3.924221765633249, "scores": [2.354194490015813, 1.8225390621482618, 0.5372108626179395, 0.5258052120799931], "rank_score": 1.3099374067155019} -{"id": "gerani-etal-2014-abstractive", "title": "Abstractive Summarization of Product Reviews Using Discourse Structure", "abstract": "We propose a novel abstractive summarization system for product reviews by taking advantage of their discourse structure. First, we apply a discourse parser to each review and obtain a discourse tree representation for every review. We then modify the discourse trees such that every leaf node only contains the aspect words. Second, we aggregate the aspect discourse trees and generate a graph. We then select a subgraph representing the most important aspects and the rhetorical relations between them using a PageRank algorithm, and transform the selected subgraph into an aspect tree. Finally, we generate a natural language summary by applying a template-based NLG framework. Quantitative and qualitative analysis of the results, based on two user studies, show that our approach significantly outperforms extractive and abstractive baselines.", "phrases": ["review", "discourse structure", "summarization system", "sentiment analysis"], "overall_score": 3.9880767852605468, "scores": [2.712784003109572, 1.3750949444477452, 0.5759258795239642, 0.5758698566667251], "rank_score": 1.3099186709370017} -{"id": "angelidis-lapata-2018-summarizing", "title": "Summarizing Opinions: Aspect Extraction Meets Sentiment Prediction and They Are Both Weakly Supervised", "abstract": "We present a neural framework for opinion summarization from online product reviews which is knowledge-lean and only requires light supervision (e.g., in the form of product domain labels and user-provided ratings). Our method combines two weakly supervised components to identify salient opinions and form extractive summaries from multiple reviews: an aspect extractor trained under a multi-task objective, and a sentiment predictor based on multiple instance learning. We introduce an opinion summarization dataset that includes a training set of product reviews from six diverse domains and human-annotated development and test sets with gold standard aspect annotations, salience labels, and opinion summaries. Automatic evaluation shows significant improvements over baselines, and a large-scale study indicates that our opinion summaries are preferred by human judges according to multiple criteria.", "phrases": ["opinion", "aspect annotation", "extractive summarization", "weakly-supervised method"], "overall_score": 4.216123160815621, "scores": [3.16063376276505, 0.9535986432458758, 0.5785930385171731, 0.5464237719131767], "rank_score": 1.309812304110319} -{"id": "clark-etal-2019-sentence", "title": "Sentence Mover's Similarity: Automatic Evaluation for Multi-Sentence Texts", "abstract": "For evaluating machine-generated texts, automatic methods hold the promise of avoiding collection of human judgments, which can be expensive and time-consuming. The most common automatic metrics, like BLEU and ROUGE, depend on exact word matching, an inflexible approach for measuring semantic similarity. We introduce methods based on sentence mover's similarity; our automatic metrics evaluate text in a continuous space using word and sentence embeddings. We find that sentence-based metrics correlate with human judgments significantly better than ROUGE, both on machine-generated summaries (average length of 3.4 sentences) and human-authored essays (average length of 7.5). We also show that sentence mover's similarity can be used as a reward when learning a generation model via reinforcement learning; we present both automatic and human evaluations of summaries learned in this way, finding that our approach outperforms ROUGE.", "phrases": ["human judgment", "word matching", "sentence mover"], "overall_score": 3.856451140710789, "scores": [2.8006902031943945, 0.5893122851095649, 0.539219174631507], "rank_score": 1.3097405543118221} -{"id": "cohn-specia-2013-modelling", "title": "Modelling Annotator Bias with Multi-task Gaussian Processes: An Application to Machine Translation Quality Estimation", "abstract": "Annotating linguistic data is often a complex, time consuming and expensive endeavour. Even with strict annotation guidelines, human subjects often deviate in their analyses, each bringing different biases, interpretations of the task and levels of consistency. We present novel techniques for learning from the outputs of multiple annotators while accounting for annotator specific behaviour. These techniques use multi-task Gaussian Processes to learn jointly a series of annotator and metadata specific models, while explicitly representing correlations between models which can be learned directly from data. Our experiments on two machine translation quality estimation datasets show uniform significant accuracy gains from multi-task learning, and consistently outperform strong baselines.", "phrases": ["annotator bias", "multi-task gaussian processes", "regression"], "overall_score": 3.7855960288747537, "scores": [1.9266697863559268, 1.446967499188453, 0.555541739817606], "rank_score": 1.3097263417873286} -{"id": "piskorski-etal-2019-second", "title": "The Second Cross-Lingual Challenge on Recognition, Normalization, Classification, and Linking of Named Entities across Slavic Languages", "abstract": "We describe the Second Multilingual Named Entity Challenge in Slavic languages. The task is recognizing mentions of named entities in Web documents, their normalization, and cross-lingual linking. The Challenge was organized as part of the 7th Balto-Slavic Natural Language Processing Workshop, co-located with the ACL-2019 conference. Eight teams participated in the competition, which covered four languages and five entity types. Performance for the named entity recognition task reached 90% F-measure, much higher than reported in the first edition of the Challenge. Seven teams covered all four languages, and five teams participated in the cross-lingual entity linking task. Detailed evaluation information is available on the shared task web page.", "phrases": ["linking", "slavic language", "cross-lingual linking", "edition"], "overall_score": 2.5483581374651747, "scores": [2.129997956229487, 1.273853946944272, 0.9933820795625197, 0.8411541076949957], "rank_score": 1.3095970226078186} -{"id": "deyoung-etal-2020-eraser", "title": "ERASER: A Benchmark to Evaluate Rationalized NLP Models", "abstract": "State-of-the-art models in NLP are now predominantly based on deep neural networks that are opaque in terms of how they come to make predictions. This limitation has increased interest in designing more interpretable deep models for NLP that reveal the `reasoning' behind model outputs. But work in this direction has been conducted on different datasets and tasks with correspondingly unique aims and metrics; this makes it difficult to track progress. We propose the Evaluating Rationales And Simple English Reasoning (ERASER a benchmark to advance research on interpretable models in NLP. This benchmark comprises multiple datasets and tasks for which human annotations of \u201crationales\u201d (supporting evidence) have been collected. We propose several metrics that aim to capture how well the rationales provided by models align with human rationales, and also how faithful these rationales are (i.e., the degree to which provided rationales influenced the corresponding predictions). Our hope is that releasing this benchmark facilitates progress on designing more interpretable NLP systems. The benchmark, code, and documentation are available at ", "phrases": ["rationale", "eraser", "agreement", "e-snli"], "overall_score": 5.069168331996955, "scores": [1.2836915013067582, 2.585760065831991, 0.8388335425734749, 0.5295398626553159], "rank_score": 1.309456243091885} -{"id": "jacovi-goldberg-2020-towards", "title": "Towards Faithfully Interpretable NLP Systems: How Should We Define and Evaluate Faithfulness?", "abstract": "With the growing popularity of deep-learning based NLP models, comes a need for interpretable systems. But what is interpretability, and what constitutes a high-quality interpretation? In this opinion piece we reflect on the current state of interpretability evaluation research. We call for more clearly differentiating between different desired criteria an interpretation should satisfy, and focus on the faithfulness criteria. We survey the literature with respect to faithfulness evaluation, and arrange the current approaches around three assumptions, providing an explicit form to how faithfulness is \u201cdefined\u201d by the community. We provide concrete guidelines on how evaluation of interpretation methods should and should not be conducted. Finally, we claim that the current binary definition for faithfulness sets a potentially unrealistic bar for being considered faithful. We call for discarding the binary notion of faithfulness in favor of a more graded one, which we believe will be of greater practical utility.", "phrases": ["faithfulness", "nlp model", "interpretation method", "plausibility", "trustworthiness"], "overall_score": 4.894302174811965, "scores": [2.508367499427617, 2.0856692176104423, 0.8423378329532419, 0.5848545017390666, 0.5260352528389222], "rank_score": 1.309452860913858} -{"id": "akhtar-etal-2019-multi", "title": "Multi-task Learning for Multi-modal Emotion Recognition and Sentiment Analysis", "abstract": "Related tasks often have inter-dependence on each other and perform better when solved in a joint framework. In this paper, we present a deep multi-task learning framework that jointly performs sentiment and emotion analysis both. The multi-modal inputs (i.e. text, acoustic and visual frames) of a video convey diverse and distinctive information, and usually do not have equal contribution in the decision making. We propose a context-level inter-modal attention framework for simultaneously predicting the sentiment and expressed emotions of an utterance. We evaluate our proposed approach on CMU-MOSEI dataset for multi-modal sentiment and emotion analysis. Evaluation results suggest that multi-task learning framework offers improvement over the single-task framework. The proposed approach reports new state-of-the-art performance for both sentiment analysis and emotion analysis.", "phrases": ["emotion recognition", "sentiment analysis", "multi-task learning framework"], "overall_score": 3.35839092184285, "scores": [2.345878814047024, 0.9747116080749004, 0.607429730096475], "rank_score": 1.3093400507394664} -{"id": "sinha-etal-2009-semeval", "title": "SemEval-2010 Task 2: Cross-Lingual Lexical Substitution", "abstract": "In this paper we describe the SemEval-2010 Cross-Lingual Lexical Substitution task, where given an English target word in context, participating systems had to find an alternative substitute word or phrase in Spanish. The task is based on the English Lexical Substitution task run at SemEval-2007. In this paper we provide background and motivation for the task, we describe the data annotation process and the scoring system, and present the results of the participating systems.", "phrases": ["cross-lingual lexical substitution", "semeval task", "identification"], "overall_score": 3.6294240686304273, "scores": [2.779078484433095, 0.6256367455513016, 0.5223988488377254], "rank_score": 1.3090380262740406} -{"id": "ustalov-etal-2017-negative", "title": "Negative Sampling Improves Hypernymy Extraction Based on Projection Learning", "abstract": "We present a new approach to extraction of hypernyms based on projection learning and word embeddings. In contrast to classification-based approaches, projection-based methods require no candidate hyponym-hypernym pairs. While it is natural to use both positive and negative training examples in supervised relation extraction, the impact of positive examples on hypernym prediction was not studied so far. In this paper, we show that explicit negative examples used for regularization of the model significantly improve performance compared to the state-of-the-art approach of Fu et al. (2014) on three datasets from different languages.", "phrases": ["hypernymy extraction", "projection learning", "negative example"], "overall_score": 2.876018370452926, "scores": [2.4629485807055467, 0.9366997561241863, 0.5271487690987386], "rank_score": 1.308932368642824} -{"id": "wang-etal-2019-dynamically", "title": "Dynamically Composing Domain-Data Selection with Clean-Data Selection by \u201cCo-Curricular Learning\u201d for Neural Machine Translation", "abstract": "Noise and domain are important aspects of data quality for neural machine translation. Existing research focus separately on domain-data selection, clean-data selection, or their static combination, leaving the dynamic interaction across them not explicitly examined. This paper introduces a \u201cco-curricular learning\u201d method to compose dynamic domain-data selection with dynamic clean-data selection, for transfer learning across both capabilities. We apply an EM-style optimization procedure to further refine the \u201cco-curriculum\u201d. Experiment results and analysis with two domains demonstrate the effectiveness of the method and the properties of data scheduled by the co-curriculum.", "phrases": ["domain-data selection", "co-curricular learning", "machine translation", "curriculum"], "overall_score": 3.13730437795471, "scores": [2.1463174641210165, 1.6995858763314524, 0.8598308654925552, 0.5276959835118162], "rank_score": 1.3083575473642102} -{"id": "sugawara-etal-2017-evaluation", "title": "Evaluation Metrics for Machine Reading Comprehension: Prerequisite Skills and Readability", "abstract": "Knowing the quality of reading comprehension (RC) datasets is important for the development of natural-language understanding systems. In this study, two classes of metrics were adopted for evaluating RC datasets: prerequisite skills and readability. We applied these classes to six existing datasets, including MCTest and SQuAD, and highlighted the characteristics of the datasets according to each metric and the correlation between the two classes. Our dataset analysis suggests that the readability of RC datasets does not directly affect the question difficulty and that it is possible to create an RC dataset that is easy to read but difficult to answer.", "phrases": ["prerequisite skill", "readability", "comprehension dataset"], "overall_score": 3.1366040268579463, "scores": [2.2964694633678664, 1.052401271898388, 0.5753256995482166], "rank_score": 1.3080654782714902} -{"id": "xu-etal-2019-bert", "title": "BERT Post-Training for Review Reading Comprehension and Aspect-based Sentiment Analysis", "abstract": "Question-answering plays an important role in e-commerce as it allows potential customers to actively seek crucial information about products or services to help their purchase decision making. Inspired by the recent success of machine reading comprehension (MRC) on formal documents, this paper explores the potential of turning customer reviews into a large source of knowledge that can be exploited to answer user questions. We call this problem Review Reading Comprehension (RRC). To the best of our knowledge, no existing work has been done on RRC. In this work, we first build an RRC dataset called ReviewRC based on a popular benchmark for aspect-based sentiment analysis. Since ReviewRC has limited training examples for RRC (and also for aspect-based sentiment analysis), we then explore a novel post-training approach on the popular language model BERT to enhance the performance of fine-tuning of BERT for RRC. To show the generality of the approach, the proposed post-training is also applied to some other review-based tasks such as aspect extraction and aspect sentiment classification in aspect-based sentiment analysis. Experimental results demonstrate that the proposed post-training is highly effective.", "phrases": ["review reading comprehension", "sentiment analysis", "post-training approach", "aspect term"], "overall_score": 5.090620719728339, "scores": [2.2599967654543196, 0.8694577020009598, 1.2707256704576084, 0.8319429610901127], "rank_score": 1.3080307747507502} -{"id": "gaman-etal-2020-report", "title": "A Report on the VarDial Evaluation Campaign 2020", "abstract": "This paper presents the results of the VarDial Evaluation Campaign 2020 organized as part of the seventh workshop on Natural Language Processing (NLP) for Similar Languages, Varieties and Dialects (VarDial), co-located with COLING 2020. The campaign included three shared tasks each focusing on a different challenge of language and dialect identification: Romanian Dialect Identification (RDI), Social Media Variety Geolocation (SMG), and Uralic Language Identification (ULI). The campaign attracted 30 teams who enrolled to participate in one or multiple shared tasks and 14 of them submitted runs across the three shared tasks. Finally, 11 papers describing participating systems are published in the VarDial proceedings and referred to in this report.", "phrases": ["vardial evaluation campaign", "similar languages", "smg", "uli", "participant"], "overall_score": 3.4519563729860083, "scores": [3.2869867242804767, 1.077354992800974, 1.070237115266609, 0.5589974329208942, 0.5465551942648567], "rank_score": 1.3080262919067622} -{"id": "malmasi-etal-2022-multiconer", "title": "MultiCoNER: A Large-scale Multilingual Dataset for Complex Named Entity Recognition", "abstract": "We present AnonData, a large multilingual dataset for Named Entity Recognition that covers 3 domains (Wiki sentences, questions, and search queries) across 11 languages, as well as multilingual and code-mixing subsets. This dataset is designed to represent contemporary challenges in NER, including low-context scenarios (short and uncased text), syntactically complex entities like movie titles, and long-tail entity distributions. The 26M token dataset is compiled from public resources using techniques such as heuristic-based sentence sampling, template extraction and slotting, and machine translation. We tested the performance of two NER models on our dataset: a baseline XLM-RoBERTa model, and a state-of-the-art NER GEMNET model that leverages gazetteers. The baseline achieves moderate performance (macro-F1=54%). GEMNET, which uses gazetteers, improvement significantly (average improvement of macro-F1=+30%) and demonstrates the difficulty of our dataset. AnonData poses challenges even for large pre-trained language models, and we believe that it can help further research in building robust NER systems.", "phrases": ["multilingual dataset", "complex entity", "multiconer"], "overall_score": 2.8736227092646858, "scores": [1.9463205292998687, 1.3955805496990554, 0.5816250897486834], "rank_score": 1.3078420562492024} -{"id": "shi-etal-2022-selective", "title": "Selective Differential Privacy for Language Modeling", "abstract": "With the increasing applications of language models, it has become crucial to protect these models from leaking private information. Previous work has attempted to tackle this challenge by training RNN-based language models with differential privacy guarantees. However, applying classical differential privacy to language models leads to poor model performance as the underlying privacy notion is over-pessimistic and provides undifferentiated protection for all tokens in the data. Given that the private information in natural language is sparse (for example, the bulk of an email might not carry personally identifiable information), we propose a new privacy notion, selective differential privacy, to provide rigorous privacy guarantees on the sensitive portion of the data to improve model utility. To realize such a new notion, we develop a corresponding privacy mechanism, Selective-DPSGD, for RNN-based language models. Besides language modeling, we also apply the method to a more concrete application \u2013 dialog systems. Experiments on both language modeling and dialog system building show that the proposed privacy-preserving mechanism achieves better utilities while remaining safe under various privacy attacks compared to the baselines. The data and code are released at to facilitate future research.", "phrases": ["language modeling", "notion", "selective differential privacy"], "overall_score": 1.812430871875971, "scores": [2.457503282911559, 0.9236559121320952, 0.5410183511706316], "rank_score": 1.3073925154047619} -{"id": "pang-etal-2003-syntax", "title": "Syntax-based Alignment of Multiple Translations: Extracting Paraphrases and Generating New Sentences", "abstract": "We describe a syntax-based algorithm that automatically builds Finite State Automata (word lattices) from semantically equivalent translation sets. These FSAs are good representations of paraphrases. They can be used to extract lexical and syntactic paraphrase pairs and to generate new, unseen sentences that express the same meaning as the sentences in the input sets. Our FSAs can also predict the correctness of alternative semantic renderings, which may be used to evaluate the quality of translations.", "phrases": ["multiple translation", "paraphrase", "syntax-based algorithm", "lattice", "entailment"], "overall_score": 4.789352733349637, "scores": [1.830801442036049, 2.38816497508652, 0.9373850712266086, 0.8281747111752463, 0.5519445848683098], "rank_score": 1.3072941568785468} -{"id": "kiperwasser-goldberg-2016-simple", "title": "Simple and Accurate Dependency Parsing Using Bidirectional LSTM Feature Representations", "abstract": "We present a simple and effective scheme for dependency parsing which is based on bidirectional-LSTMs (BiLSTMs). Each sentence token is associated with a BiLSTM vector representing the token in its sentential context, and feature vectors are constructed by concatenating a few BiLSTM vectors. The BiLSTM is trained jointly with the parser objective, resulting in very effective feature extractors for parsing. We demonstrate the effectiveness of the approach by applying it to a greedy transition-based parser as well as to a globally optimized graph-based parser. The resulting parsers have very simple architectures, and match or surpass the state-of-the-art accuracies on English and Chinese.", "phrases": ["dependency parsing", "bidirectional lstm", "bilstms", "improved performance", "hand-crafted feature"], "overall_score": 5.41577679343328, "scores": [2.2576254127250315, 1.6252580408228894, 0.9575698885858992, 0.85560882257887, 0.8397822515153055], "rank_score": 1.307168883245599} -{"id": "hedderich-etal-2021-survey", "title": "A Survey on Recent Approaches for Natural Language Processing in Low-Resource Scenarios", "abstract": "Deep neural networks and huge language models are becoming omnipresent in natural language applications. As they are known for requiring large amounts of training data, there is a growing body of work to improve the performance in low-resource settings. Motivated by the recent fundamental changes towards neural models and the popular pre-train and fine-tune paradigm, we survey promising approaches for low-resource natural language processing. After a discussion about the different dimensions of data availability, we give a structured overview of methods that enable learning when training data is sparse. This includes mechanisms to create additional labeled data like data augmentation and distant supervision as well as transfer learning settings that reduce the need for target supervision. A goal of our survey is to explain how these methods differ in their requirements as understanding them is essential for choosing a technique suited for a specific low-resource setting. Further key aspects of this work are to highlight open issues and to outline promising directions for future research.", "phrases": ["survey", "low-resource scenario", "data augmentation", "other language"], "overall_score": 3.2469305081695548, "scores": [2.1696110740725807, 1.6316090745126994, 0.8466061064379466, 0.5788175844088362], "rank_score": 1.3066609598580157} -{"id": "zou-etal-2015-negation", "title": "Negation and Speculation Identification in Chinese Language", "abstract": "Identifying negative or speculative narrative fragments from fact is crucial for natural language processing (NLP) applications. Previous studies on negation and speculation identification in Chinese language suffers much from two problems: corpus scarcity and the bottleneck in fundamental Chinese information processing. To resolve these problems, this paper constructs a Chinese corpus which consists of three sub-corpora from different resources. In order to detect the negative and speculative cues, a sequence labeling model is proposed. Moreover, a bilingual cue expansion method is proposed to increase the coverage in cue detection. In addition, this paper presents a new syntactic structure-based framework to identify the linguistic scope of a cue, instead of the traditional chunking-based framework. Experimental results justify the usefulness of our Chinese corpus and the appropriateness of our syntactic structure-based framework which obtained significant improvement over the stateof-the-art on negation and speculation identification in Chinese language. *", "phrases": ["speculation identification", "chinese language", "negation"], "overall_score": 2.3410930734772806, "scores": [2.2960654047072473, 0.8208938172145331, 0.8028075442377904], "rank_score": 1.3065889220531903} -{"id": "callison-burch-2009-fast", "title": "Fast, Cheap, and Creative: Evaluating Translation Quality Using Amazon's Mechanical Turk", "abstract": "Manual evaluation of translation quality is generally thought to be excessively time consuming and expensive. We explore a fast and inexpensive way of doing it using Amazon's Mechanical Turk to pay small sums to a large number of non-expert annotators. For $10 we redundantly recreate judgments from a WMT08 translation task. We find that when combined non-expert judgments have a high-level of agreement with the existing gold-standard judgments of machine translation quality, and correlate more strongly with expert judgments than Bleu does. We go on to show that Mechanical Turk can be used to calculate human-mediated translation edit rate (HTER), to conduct reading comprehension experiments with machine translation, and to create high quality reference translations.", "phrases": ["translation quality", "mechanical turk", "expert", "mturk"], "overall_score": 3.913240194653965, "scores": [2.8838749760850293, 0.887888762192968, 0.8395575814173333, 0.6137653533690649], "rank_score": 1.3062716682660989} -{"id": "fadaee-monz-2018-back", "title": "Back-Translation Sampling by Targeting Difficult Words in Neural Machine Translation", "abstract": "Neural Machine Translation has achieved state-of-the-art performance for several language pairs using a combination of parallel and synthetic data. Synthetic data is often generated by back-translating sentences randomly sampled from monolingual data using a reverse translation model. While back-translation has been shown to be very effective in many cases, it is not entirely clear why. In this work, we explore different aspects of back-translation, and show that words with high prediction loss during training benefit most from the addition of synthetic data. We introduce several variations of sampling strategies targeting difficult-to-predict words using prediction losses and frequencies of words. In addition, we also target the contexts of difficult words and sample sentences that are similar in context. Experimental results for the WMT news translation task show that our method improves translation quality by up to 1.7 and 1.2 Bleu points over back-translation using random sampling for German-English and English-German, respectively.", "phrases": ["difficult word", "neural machine translation", "synthetic data", "prediction loss", "back-translation"], "overall_score": 3.1314239920543683, "scores": [1.8661615648490746, 1.8359720852197228, 1.0420300197945846, 0.957149622573429, 0.8282128874116549], "rank_score": 1.3059052359696932} -{"id": "berzak-etal-2016-universal", "title": "Universal Dependencies for Learner English", "abstract": "We introduce the Treebank of Learner English (TLE), the first publicly available syntactic treebank for English as a Second Language (ESL). The TLE provides manually annotated POS tags and Universal Dependency (UD) trees for 5,124 sentences from the Cambridge First Certificate in English (FCE) corpus. The UD annotations are tied to a pre-existing error annotation of the FCE, whereby full syntactic analyses are provided for both the original and error corrected versions of each sentence. Further on, we delineate ESL annotation guidelines that allow for consistent syntactic treatment of ungrammatical English. Finally, we benchmark POS tagging and dependency parsing performance on the TLE dataset and measure the effect of grammatical errors on parsing accuracy. We envision the treebank to support a wide range of linguistic and computational research on second language acquisition as well as automatic processing of ungrammatical language. The treebank is available at universaldependencies.org. The annotation manual used in this project and a graphical query engine are available at esltreebank.org.", "phrases": ["learner english", "treebank", "esl", "pos tagging", "universal dependency"], "overall_score": 3.4456727971345864, "scores": [3.2528355471106845, 1.614365289147936, 0.6161476468750583, 0.5242147578673969, 0.5206632557626798], "rank_score": 1.3056452993527512} -{"id": "bikel-2004-intricacies", "title": "Intricacies of Collins' Parsing Model", "abstract": "This article documents a large set of heretofore unpublished details Collins used in his parser, such that, along with Collins' (1999) thesis, this article contains all information necessary to duplicate Collins' benchmark results. Indeed, these as-yet-unpublished details account for an 11 relative increase in error from an implementation including all details to a clean-room implementation of Collins' model. We also show a cleaner and equally well-performing method for the handling of punctuation and conjunction and reveal certain other probabilistic oddities about Collins' parser. We not only analyze the effect of the unpublished details, but also reanalyze the effect of certain well-known details, revealing that bilexical dependencies are barely used by the model and that head choice is not nearly as important to overall parsing performance as once thought. Finally, we perform experiments that show that the true discriminative power of lexicalization appears to lie in the fact that unlexicalized syntactic structures are generated conditioning on the headword and its part of speech.", "phrases": ["collins", "simple likelihood", "part-of-speech category"], "overall_score": 3.9110483274508536, "scores": [2.8664411897359874, 0.5268253982258689, 0.5233534270947399], "rank_score": 1.3055400050188652} -{"id": "teufel-etal-2009-towards", "title": "Towards Domain-Independent Argumentative Zoning: Evidence from Chemistry and Computational Linguistics", "abstract": "Argumentative Zoning (AZ) is an analysis of the argumentative and rhetorical structure of a scientific paper. It has been shown to be reliably used by independent human coders, and has proven useful for various information access tasks. Annotation experiments have however so far been restricted to one discipline, computational linguistics (CL). Here, we present a more informative AZ scheme with 15 categories in place of the original 7, and show that it can be applied to the life sciences as well as to CL. We use a domain expert to encode basic knowledge about the subject (such as terminology and domain specific rules for individual categories) as part of the annotation guidelines. Our results show that non-expert human coders can then use these guidelines to reliably annotate this scheme in two domains, chemistry and computational linguistics.", "phrases": ["argumentative zoning", "computational linguistics", "scientific article", "annotation scheme"], "overall_score": 3.7724464161987297, "scores": [2.9404306304225742, 0.883114822757696, 0.8483690889642378, 0.5487930099450642], "rank_score": 1.305176888022393} -{"id": "eisenstein-2013-bad", "title": "What to do about bad language on the internet", "abstract": "The rise of social media has brought computational linguistics in ever-closer contact with bad language: text that defies our expectations about vocabulary, spelling, and syntax. This paper surveys the landscape of bad language, and offers a critical review of the NLP community\u2019s response, which has largely followed two paths: normalization and domain adaptation. Each approach is evaluated in the context of theoretical and empirical work on computer-mediated communication. In addition, the paper presents a quantitative analysis of the lexical diversity of social media text, and its relationship to other corpora.", "phrases": ["bad language", "internet", "social medium", "hashtag", "unique language"], "overall_score": 4.638215028881775, "scores": [2.868750218179127, 0.8063933789342228, 1.4706834162323805, 0.8250701908182482, 0.5519733399631759], "rank_score": 1.304574108825431} -{"id": "eryigit-etal-2008-dependency", "title": "Dependency Parsing of Turkish", "abstract": "The suitability of different parsing methods for different languages is an important topic in syntactic parsing. Especially lesser-studied languages, typologically different from the languages for which methods have originally been developed, pose interesting challenges in this respect. This article presents an investigation of data-driven dependency parsing of Turkish, an agglutinative, free constituent order language that can be seen as the representative of a wider class of languages of similar type. Our investigations show that morphological structure plays an essential role in finding syntactic relations in such a language. In particular, we show that employing sublexical units called inflectional groups, rather than word forms, as the basic parsing units improves parsing accuracy. We test our claim on two different parsing methods, one based on a probabilistic model with beam search and the other based on discriminative classifiers and a deterministic parsing strategy, and show that the usefulness of sublexical units holds regardless of the parsing method. We examine the impact of morphological and lexical information in detail and show that, properly used, this kind of information can improve parsing accuracy substantially. Applying the techniques presented in this article, we achieve the highest reported accuracy for parsing the Turkish Treebank.", "phrases": ["turkish", "inflectional group", "dependency parsing", "previous study", "hindi"], "overall_score": 3.53126221406064, "scores": [3.3522405227279775, 0.9609611237512453, 0.8276371661929515, 0.8240608451784512, 0.555035261788922], "rank_score": 1.3039869839279095} -{"id": "moryossef-etal-2019-step", "title": "Step-by-Step: Separating Planning from Realization in Neural Data-to-Text Generation", "abstract": "Data-to-text generation can be conceptually divided into two parts: ordering and structuring the information (planning), and generating fluent language describing the information (realization). Modern neural generation systems conflate these two steps into a single end-to-end differentiable system. We propose to split the generation process into a symbolic text-planning stage that is faithful to the input, followed by a neural generation stage that focuses only on realization. For training a plan-to-text generator, we present a method for matching reference texts to their corresponding text plans. For inference time, we describe a method for selecting high-quality text plans for new inputs. We implement and evaluate our approach on the WebNLG benchmark. Our results demonstrate that decoupling text planning from neural realization indeed improves the system's reliability and adequacy while maintaining fluent output. We observe improvements both in BLEU scores and in manual evaluations. Another benefit of our approach is the ability to output diverse realizations of the same input, paving the way to explicit control over the generated text structure.", "phrases": ["realization", "data-to-text generation", "text planning"], "overall_score": 4.434942655255447, "scores": [2.724684584771073, 0.6166280965851133, 0.5704943891479988], "rank_score": 1.3039356901680617} -{"id": "xiong-litman-2011-automatically", "title": "Automatically Predicting Peer-Review Helpfulness", "abstract": "Identifying peer-review helpfulness is an important task for improving the quality of feedback that students receive from their peers. As a first step towards enhancing existing peer-review systems with new functionality based on helpfulness detection, we examine whether standard product review analysis techniques also apply to our new context of peer reviews. In addition, we investigate the utility of incorporating additional specialized features tailored to peer review. Our preliminary results show that the structural features, review uni-grams and meta-data combined are useful in modeling the helpfulness of both peer reviews and product reviews, while peer-review specific auxiliary features can further improve helpfulness prediction.", "phrases": ["peer-review helpfulness", "feedback", "utility", "specialized feature"], "overall_score": 2.86479535598246, "scores": [2.5750286608127912, 0.9113591587320177, 0.8668338802484963, 0.8620765187539535], "rank_score": 1.3038245546368148} -{"id": "nan-etal-2020-reasoning", "title": "Reasoning with Latent Structure Refinement for Document-Level Relation Extraction", "abstract": "Document-level relation extraction requires integrating information within and across multiple sentences of a document and capturing complex interactions between inter-sentence entities. However, effective aggregation of relevant information in the document remains a challenging research question. Existing approaches construct static document-level graphs based on syntactic trees, co-references or heuristics from the unstructured text to model the dependencies. Unlike previous methods that may not be able to capture rich non-local interactions for inference, we propose a novel model that empowers the relational reasoning across sentences by automatically inducing the latent document-level graph. We further develop a refinement strategy, which enables the model to incrementally aggregate relevant information for multi-hop reasoning. Specifically, our model achieves an F1 score of 59.05 on a large-scale document-level dataset (DocRED), significantly improving over the previous results, and also yields new state-of-the-art results on the CDR and GDA dataset. Furthermore, extensive analyses show that the model is able to discover more accurate inter-sentence relations.", "phrases": ["latent structure", "relation extraction", "document-level graph", "reasoning", "mention"], "overall_score": 4.19503235534951, "scores": [1.9503327666192947, 1.676572481428587, 1.4138729548200224, 0.922046016595397, 0.553476165503055], "rank_score": 1.3032600769932712} -{"id": "madnani-dorr-2010-generating", "title": "Generating Phrasal and Sentential Paraphrases: A Survey of Data-Driven Methods", "abstract": "The task of paraphrasing is inherently familiar to speakers of all languages. Moreover, the task of automatically generating or extracting semantic equivalences for the various units of language\u2014words, phrases, and sentences\u2014is an important part of natural language processing (NLP) and is being increasingly employed to improve the performance of several NLP applications. In this article, we attempt to conduct a comprehensive and application-independent survey of data-driven phrasal and sentential paraphrase generation methods, while also conveying an appreciation for the importance and potential use of paraphrases in the field of NLP research. Recent work done in manual and automatic construction of paraphrase corpora is also examined. We also discuss the strategies used for evaluating paraphrase generation techniques and briefly explore some future trends in paraphrase generation.", "phrases": ["sentential paraphrase", "survey", "several nlp application", "same semantic content", "data-driven approach"], "overall_score": 5.123418242524539, "scores": [3.002093015218836, 0.9258968264216633, 0.8806174660638435, 0.8621617768325195, 0.8445481257212258], "rank_score": 1.3030634420516176} -{"id": "hu-etal-2015-improved", "title": "Improved beam search with constrained softmax for NMT", "abstract": "We propose an improved beam search decoding algorithm with constrained softmax operations for neural machine translation (NMT). NMT is a newly emerging approach to predict the best translation by building a neural network instead of a log-linear model. It has achieved comparable translation quality to the existing phrase-based statistical machine translation systems. However, how to perform ef\ufb01cient decoding for NMT is still challenging, especially for commercial systems which provide real-time translation service. Unlike the standard beam search algorithm, we use a priority queue to choose the best hypothesis for the next search, which drastically reduces search space. Another time consuming factor is the softmax operation in the output layer because of the large target vocabulary size. To solve this problem, we introduce a limited word set of translation candidates to greatly reduce the computation complexity. Our experiments show that, under the GPU environment, our method achieves a speed about 3.5 times faster than the well optimized baseline system without sacri\ufb01cing the translation quality. Our method translates about 117 words per second, beating the real-time translation requirements for practical MT systems.", "phrases": ["beam search", "priority queue", "hypothesis"], "overall_score": 2.0957843198445483, "scores": [2.5219756760439287, 0.8423634365697698, 0.5422129270513206], "rank_score": 1.302184013221673} -{"id": "zhang-etal-2019-amr", "title": "AMR Parsing as Sequence-to-Graph Transduction", "abstract": "We propose an attention-based model that treats AMR parsing as sequence-to-graph transduction. Unlike most AMR parsers that rely on pre-trained aligners, external semantic resources, or data augmentation, our proposed parser is aligner-free, and it can be effectively trained with limited amounts of labeled AMR data. Our experimental results outperform all previously reported SMATCH scores, on both AMR 2.0 (76.3% on LDC2017T10) and AMR 1.0 (70.2% on LDC2014T12).", "phrases": ["sequence-to-graph transduction", "smatch score", "amr", "semantic parser", "two-stage"], "overall_score": 4.242117334929201, "scores": [3.9627266879342886, 0.9095025800723564, 0.5790517566469128, 0.5374729223402612, 0.5213621367752597], "rank_score": 1.3020232167538157} -{"id": "ritter-etal-2010-unsupervised", "title": "Unsupervised Modeling of Twitter Conversations", "abstract": "We propose the first unsupervised approach to the problem of modeling dialogue acts in an open domain. Trained on a corpus of noisy Twitter conversations, our method discovers dialogue acts by clustering raw utterances. Because it accounts for the sequential behaviour of these acts, the learned model can provide insight into the shape of communication in a new medium. We address the challenge of evaluating the emergent model with a qualitative visualization and an intrinsic conversation ordering task. This work is inspired by a corpus of 1.3 million Twitter conversations, which will be made publicly available. This huge amount of data, available only because Twitter blurs the line between chatting and publishing, highlights the need to be able to adapt quickly to a new medium.", "phrases": ["twitter", "conversation", "hidden markov model", "additional word source"], "overall_score": 4.956172860654985, "scores": [1.852457372464226, 1.4727280781388494, 1.0440511393935092, 0.8386565436592169], "rank_score": 1.3019732834139501} -{"id": "rubin-2007-stating", "title": "Stating with Certainty or Stating with Doubt: Intercoder Reliability Results for Manual Annotation of Epistemically Modalized Statements", "abstract": "Texts exhibit subtle yet identifiable modality about writers' estimation of how true each statement is (e.g., definitely true or somewhat true). This study is an analysis of such explicit certainty and doubt markers in epistemically modalized statements for a written news discourse. The study systematically accounts for five levels of writer's certainty (ABSOLUTE, HIGH, MODERATE, LOW CERTAINTY and UNCERTAINTY) in three news pragmatic contexts: perspective, focus, and time. The study concludes that independent coders' perceptions of the boundaries between shades of certainty in epistemically modalized statements are highly subjective and present difficulties for manual annotation and consequent automation for opinion extraction and sentiment analysis. While stricter annotation instructions and longer coder training can improve inter-coder agreement results, it is not entirely clear that a five-level distinction of certainty is preferable to a simplistic distinction between statements with certainty and statements with doubt.", "phrases": ["certainty", "manual annotation", "modalized statement"], "overall_score": 2.7073280729919564, "scores": [2.4898289271567893, 0.8249241784098337, 0.5910956793983462], "rank_score": 1.301949594988323} -{"id": "lau-etal-2018-deep", "title": "Deep-speare: A joint neural model of poetic language, meter and rhyme", "abstract": "In this paper, we propose a joint architecture that captures language, rhyme and meter for sonnet modelling. We assess the quality of generated poems using crowd and expert judgements. The stress and rhyme models perform very well, as generated poems are largely indistinguishable from human-written poems. Expert evaluation, however, reveals that a vanilla language model captures meter implicitly, and that machine-generated poems still underperform in terms of readability and emotion. Our research shows the importance expert evaluation for poetry generation, and that future research should look beyond rhyme/meter and focus on poetic language.", "phrases": ["poetic language", "rhyme", "poem", "deep-speare"], "overall_score": 3.235088370294431, "scores": [2.5535266224557036, 0.8920963279058856, 0.8812444599979978, 0.8807139216319907], "rank_score": 1.3018953329978944} -{"id": "liang-etal-2006-end", "title": "An End-to-End Discriminative Approach to Machine Translation", "abstract": "We present a perceptron-style discriminative approach to machine translation in which large feature sets can be exploited. Unlike discriminative reranking approaches, our system can take advantage of learned features in all stages of decoding. We first discuss several challenges to error-driven discriminative approaches. In particular, we explore different ways of updating parameters given a training example. We find that making frequent but smaller updates is preferable to making fewer but larger updates. Then, we discuss an array of features and show both how they quantitatively increase BLEU score and how they qualitatively interact on specific examples. One particular feature we investigate is a novel way to introduce learning into the initial phrase extraction process, which has previously been entirely heuristic.", "phrases": ["machine translation", "training example", "bleu score", "model parameter"], "overall_score": 4.470492308967932, "scores": [3.265149049141239, 0.8414887882169239, 0.5690601874767586, 0.5316508006109059], "rank_score": 1.301837206361457} -{"id": "mohammad-yang-2011-tracking", "title": "Tracking Sentiment in Mail: How Genders Differ on Emotional Axes", "abstract": "With the widespread use of email, we now have access to unprecedented amounts of text that we ourselves have written. In this paper, we show how sentiment analysis can be used in tandem with effective visualizations to quantify and track emotions in many types of mail. We create a large word--emotion association lexicon by crowdsourcing, and use it to compare emotions in love letters, hate mail, and suicide notes. We show that there are marked differences across genders in how they use emotion words in work-place email. For example, women use many words from the joy--sadness axis, whereas men prefer terms from the fear--trust axis. Finally, we show visualizations that can help people track emotions in their emails.", "phrases": ["mail", "gender", "emotion word", "workplace email"], "overall_score": 2.8587034608149446, "scores": [2.912898393133266, 0.7877572267403523, 0.9592838902805718, 0.5442685445011264], "rank_score": 1.3010520136638293} -{"id": "sproat-emerson-2003-first", "title": "The First International Chinese Word Segmentation Bakeoff", "abstract": "This paper presents the results from the ACL-SIGHAN-sponsored First International Chinese Word Segmentation Bakeoff held in 2003 and reported in conjunction with the Second SIGHAN Workshop on Chinese Language Processing, Sapporo, Japan. We give the motivation for having an international segmentation contest (given that there have been two within-China contests to date) and we report on the results of this first international contest, analyze these results, and make some recommendations for the future.", "phrases": ["chinese", "word segmentation", "sighan"], "overall_score": 4.020284579253698, "scores": [1.7903047570563062, 1.223901290331716, 0.8876665149436186], "rank_score": 1.3006241874438802} -{"id": "soboroff-harman-2005-novelty", "title": "Novelty Detection: The TREC Experience", "abstract": "A challenge for search systems is to detect not only when an item is relevant to the user's information need, but also when it contains something new which the user has not seen before. In the TREC novelty track, the task was to highlight sentences containing relevant and new information in a short, topical document stream. This is analogous to highlighting key parts of a document for another person to read, and this kind of output can be useful as input to a summarization system. Search topics involved both news events and reported opinions on hot-button subjects. When people performed this task, they tended to select small blocks of consecutive sentences, whereas current systems identified many relevant and novel passages. We also found that opinions are much harder to track than events.", "phrases": ["trec", "new information", "novelty detection"], "overall_score": 2.9940354888504657, "scores": [2.232535524771802, 1.0986200464911482, 0.5697237030278365], "rank_score": 1.3002930914302622} -{"id": "perez-rosas-etal-2018-automatic", "title": "Automatic Detection of Fake News", "abstract": "The proliferation of misleading information in everyday access media outlets such as social media feeds, news blogs, and online newspapers have made it challenging to identify trustworthy news sources, thus increasing the need for computational tools able to provide insights into the reliability of online content. In this paper, we focus on the automatic identification of fake content in online news. Our contribution is twofold. First, we introduce two novel datasets for the task of fake news detection, covering seven different news domains. We describe the collection, annotation, and validation process in detail and present several exploratory analyses on the identification of linguistic differences in fake and legitimate news content. Second, we conduct a set of learning experiments to build accurate fake news detectors, and show that we can achieve accuracies of up to 76%. In addition, we provide comparative analyses of the automatic and manual identification of fake news.", "phrases": ["fake news", "linguistic feature", "recent research"], "overall_score": 3.757642774804253, "scores": [2.5267321653567865, 0.8512209782405082, 0.5222123951838016], "rank_score": 1.3000551795936988} -{"id": "castilho-etal-2017-comparative", "title": "A Comparative Quality Evaluation of PBSMT and NMT using Professional Translators", "abstract": "This paper reports on a comparative evaluation of phrase-based statistical machine translation \n(PBSMT) and neural machine translation (NMT) for four language pairs, using the PET interface to compare educational domain output from both systems using a variety of metrics, \nincluding automatic evaluation as well as human rankings of adequacy and fluency, error-type \nmarkup, and post-editing (technical and temporal) effort, performed by professional translators. \nOur results show a preference for NMT in side-by-side ranking for all language pairs, texts, and \nsegment lengths. In addition, perceived fluency is improved and annotated errors are fewer in \nthe NMT output. Results are mixed for perceived adequacy and for errors of omission, addition, and mistranslation. Despite far fewer segments requiring post-editing, document-level \npost-editing performance was not found to have significantly improved in NMT compared to \nPBSMT. This evaluation was conducted as part of the TraMOOC project, which aims to create \na replicable semi-automated methodology for high-quality machine translation of educational \ndata.", "phrases": ["pbsmt", "professional translator", "comparative evaluation", "semantic faithfulness"], "overall_score": 3.430739643317133, "scores": [2.435083033410451, 1.3892146343810636, 0.854770909849738, 0.5208785471410525], "rank_score": 1.2999867811955763} -{"id": "lee-etal-2017-fully", "title": "Fully Character-Level Neural Machine Translation without Explicit Segmentation", "abstract": "Most existing machine translation systems operate at the level of words, relying on explicit segmentation to extract tokens. We introduce a neural machine translation (NMT) model that maps a source character sequence to a target character sequence without any segmentation. We employ a character-level convolutional network with max-pooling at the encoder to reduce the length of source representation, allowing the model to be trained at a speed comparable to subword-level models while capturing local regularities. Our character-to-character model outperforms a recently proposed baseline with a subword-level encoder on WMT'15 DE-EN and CS-EN, and gives comparable performance on FI-EN and RU-EN. We then demonstrate that it is possible to share a single character-level encoder across multiple languages by training a model on a many-to-one translation task. In this multilingual setting, the character-level encoder significantly outperforms the subword-level encoder on all the language pairs. We observe that on CS-EN, FI-EN and RU-EN, the quality of the multilingual character-level translation even surpasses the models specifically trained on that language pair alone, both in terms of the BLEU score and human judgment.", "phrases": ["neural machine translation", "explicit segmentation", "character-level encoder", "many-to-one translation task", "multilingual setting"], "overall_score": 4.693993729549262, "scores": [3.510510328652638, 0.8439869471404681, 0.9703054941482745, 0.6194256863750067, 0.5554952193624059], "rank_score": 1.2999447351357585} -{"id": "madnani-etal-2012-examining", "title": "Re-examining Machine Translation Metrics for Paraphrase Identification", "abstract": "We propose to re-examine the hypothesis that automated metrics developed for MT evaluation can prove useful for paraphrase identification in light of the significant work on the development of new MT metrics over the last 4 years. We show that a meta-classifier trained using nothing but recent MT metrics outperforms all previous paraphrase identification approaches on the Microsoft Research Paraphrase corpus. In addition, we apply our system to a second corpus developed for the task of plagiarism detection and obtain extremely positive results. Finally, we conduct extensive error analysis and uncover the top systematic sources of error for a paraphrase identification approach relying solely on MT metrics. We release both the new dataset and the error analysis annotations for use by the community.", "phrases": ["machine translation metric", "paraphrase identification", "error analysis", "nist"], "overall_score": 3.8914400821738404, "scores": [2.9522129418192318, 1.1878395865687323, 0.5295915474594504, 0.5263343719291964], "rank_score": 1.2989946119441527} -{"id": "sagae-tsujii-2007-dependency", "title": "Dependency Parsing and Domain Adaptation with LR Models and Parser Ensembles", "abstract": "We present a data-driven variant of the LR algorithm for dependency parsing, and extend it with a best-first search for probabilistic generalized LR dependency parsing. Parser actions are determined by a classifier, based on features that represent the current state of the parser. We apply this parsing framework to both tracks of the CoNLL 2007 shared task, in each case taking advantage of multiple models trained with different learners. In the multilingual track, we train three LR models for each of the ten languages, and combine the analyses obtained with each individual model with a maximum spanning tree voting scheme. In the domain adaptation track, we use two models to parse unlabeled data in the target domain to supplement the labeled out-ofdomain training set, in a scheme similar to one iteration of co-training.", "phrases": ["variant", "training set", "dependency parsing"], "overall_score": 4.231680797214301, "scores": [2.5087560995863014, 0.8590116807284538, 0.5286920831342625], "rank_score": 1.298819954483006} -{"id": "min-etal-2013-distant", "title": "Distant Supervision for Relation Extraction with an Incomplete Knowledge Base", "abstract": "Distant supervision, heuristically labeling a corpus using a knowledge base, has emerged as a popular choice for training relation extractors. In this paper, we show that a significant number of \u201cnegative\u201c examples generated by the labeling process are false negatives because the knowledge base is incomplete. Therefore the heuristic for generating negative examples has a seriousflaw. Building on a state-of-the-art distantly-supervised extraction algorithm, we proposed an algorithm that learns from only positive and unlabeled labels at the pair-of-entity level. Experimental results demonstrate its advantage over existing algorithms.", "phrases": ["relation extraction", "knowledge base", "distant supervision", "downstream task", "schema"], "overall_score": 4.459603981900406, "scores": [3.103291718481353, 1.4350936419470668, 0.9069269828120974, 0.5249823866929901, 0.5230375341991271], "rank_score": 1.2986664528265268} -{"id": "kermes-etal-2016-royal", "title": "The Royal Society Corpus: From Uncharted Data to Corpus", "abstract": "We present the Royal Society Corpus (RSC) built from the Philosophical Transactions and Proceedings of the Royal Society of London. At present, the corpus contains articles from the first two centuries of the journal (1665\u20151869) and amounts to around 35 million tokens. The motivation for building the RSC is to investigate the diachronic linguistic development of scientific English. Specifically, we assume that due to specialization, linguistic encodings become more compact over time (Halliday, 1988; Halliday and Martin, 1993), thus creating a specific discourse type characterized by high information density that is functional for expert communication. When building corpora from uncharted material, typically not all relevant meta-data (e.g. author, time, genre) or linguistic data (e.g. sentence/word boundaries, words, parts of speech) is readily available. We present an approach to obtain good quality meta-data and base text data adopting the concept of Agile Software Development.", "phrases": ["royal society corpus", "rsc", "proceedings"], "overall_score": 1.8002165110112816, "scores": [2.5186293431936733, 0.8291217830571966, 0.5479940231927419], "rank_score": 1.298581716481204} -{"id": "wang-etal-2019-confusionset", "title": "Confusionset-guided Pointer Networks for Chinese Spelling Check", "abstract": "This paper proposes Confusionset-guided Pointer Networks for Chinese Spell Check (CSC) task. More concretely, our approach utilizes the off-the-shelf confusionset for guiding the character generation. To this end, our novel Seq2Seq model jointly learns to copy a correct character from an input sentence through a pointer network, or generate a character from the confusionset rather than the entire vocabulary. We conduct experiments on three human-annotated datasets, and results demonstrate that our proposed generative model outperforms all competitor models by a large margin of up to 20% F1 score, achieving state-of-the-art performance on three datasets.", "phrases": ["pointer networks", "confusion", "spelling error"], "overall_score": 3.678988625490214, "scores": [1.727155274363584, 1.233935565722854, 0.9344738549002761], "rank_score": 1.2985215649955715} -{"id": "sogaard-etal-2018-limitations", "title": "On the Limitations of Unsupervised Bilingual Dictionary Induction", "abstract": "Unsupervised machine translation - i.e., not assuming any cross-lingual supervision signal, whether a dictionary, translations, or comparable corpora - seems impossible, but nevertheless, Lample et al. (2017) recently proposed a fully unsupervised machine translation (MT) model. The model relies heavily on an adversarial, unsupervised cross-lingual word embedding technique for bilingual dictionary induction (Conneau et al., 2017), which we examine here. Our results identify the limitations of current unsupervised MT: unsupervised bilingual dictionary induction performs much worse on morphologically rich languages that are not dependent marking, when monolingual corpora from different domains or different embedding algorithms are used. We show that a simple trick, exploiting a weak supervision signal from identical words, enables more robust induction and establish a near-perfect correlation between unsupervised bilingual dictionary induction performance and a previously unexplored graph similarity metric.", "phrases": ["assumption", "isomorphism", "unsupervised method", "distant language pair", "spelling"], "overall_score": 5.053389952611774, "scores": [2.1395548645348565, 1.4796683321806952, 1.1556280962478491, 1.147164513325582, 0.5703059936988758], "rank_score": 1.2984643599975718} -{"id": "shutova-etal-2010-metaphor", "title": "Metaphor Identification Using Verb and Noun Clustering", "abstract": "We present a novel approach to automatic metaphor identification in unrestricted text. Starting from a small seed set of manually annotated metaphorical expressions, the system is capable of harvesting a large number of metaphors of similar syntactic structure from a corpus. Our method is distinguished from previous work in that it does not employ any hand-crafted knowledge, other than the initial seed set, but, in contrast, captures metaphoricity by means of verb and noun clustering. Being the first to employ unsupervised methods for metaphor identification, our system operates with the precision of 0.79.", "phrases": ["noun", "clustering", "metaphor identification", "dependency feature", "target concept"], "overall_score": 4.653010777206883, "scores": [2.5457465544666045, 1.3204020487419958, 0.9419753871309657, 0.8591431693780394, 0.824969740823993], "rank_score": 1.2984473801083196} -{"id": "clark-gardner-2018-simple", "title": "Simple and Effective Multi-Paragraph Reading Comprehension", "abstract": "We introduce a method of adapting neural paragraph-level question answering models to the case where entire documents are given as input. Most current question answering models cannot scale to document or multi-document input, and naively applying these models to each paragraph independently often results in them being distracted by irrelevant text. We show that it is possible to significantly improve performance by using a modified training scheme that teaches the model to ignore non-answer containing paragraphs. Our method involves sampling multiple paragraphs from each document, and using an objective function that requires the model to produce globally correct output. We additionally identify and improve upon a number of other design decisions that arise when working with document-level data. Experiments on TriviaQA and SQuAD shows our method advances the state of the art, including a 10 point gain on TriviaQA.", "phrases": ["reading comprehension", "paragraph", "open-domain question"], "overall_score": 4.7894835827879465, "scores": [1.504451874649208, 1.5243116532942564, 0.8663083840269205], "rank_score": 1.2983573039901284} -{"id": "xu-etal-2016-hybrid", "title": "Hybrid Question Answering over Knowledge Base and Free Text", "abstract": "Recent trend in question answering (QA) systems focuses on using structured knowledge bases (KBs) to find answers. While these systems are able to provide more precise answers than information retrieval (IR) based QA systems, the natural incompleteness of KB inevitably limits the question scope that the system can answer. In this paper, we present a hybrid question answering (hybrid-QA) system which exploits both structured knowledge base and free text to answer a question. The main challenge is to recognize the meaning of a question using these two resources, i.e., structured KB and free text. To address this, we map relational phrases to KB predicates and textual relations simultaneously, and further develop an integer linear program (ILP) model to infer on these candidates and provide a globally optimal solution. Experiments on benchmark datasets show that our system can benefit from both structured KB and free text, outperforming the state-of-the-art systems.", "phrases": ["knowledge base", "free text", "hybrid question"], "overall_score": 2.9886082445655457, "scores": [2.1070955147236017, 0.9003070229895384, 0.8864056698430015], "rank_score": 1.2979360691853805} -{"id": "zhang-etal-2016-variational-neural", "title": "Variational Neural Machine Translation", "abstract": "Models of neural machine translation are often from a discriminative family of encoderdecoders that learn a conditional distribution of a target sentence given a source sentence. In this paper, we propose a variational model to learn this conditional distribution for neural machine translation: a variational encoderdecoder model that can be trained end-to-end. Different from the vanilla encoder-decoder model that generates target translations from hidden representations of source sentences alone, the variational model introduces a continuous latent variable to explicitly model underlying semantics of source sentences and to guide the generation of target translations. In order to perform efficient posterior inference and large-scale training, we build a neural posterior approximator conditioned on both the source and the target sides, and equip it with a reparameterization technique to estimate the variational lower bound. Experiments on both Chinese-English and English- German translation tasks show that the proposed variational neural machine translation achieves significant improvements over the vanilla neural machine translation baselines.", "phrases": ["machine translation", "end-to-end", "latent variable", "vae"], "overall_score": 4.276592963549258, "scores": [1.928536291154826, 1.6674575059270684, 1.0695897951137892, 0.5247133034561178], "rank_score": 1.2975742239129504} -{"id": "niu-etal-2017-improved", "title": "Improved Word Representation Learning with Sememes", "abstract": "Sememes are minimum semantic units of word meanings, and the meaning of each word sense is typically composed by several sememes. Since sememes are not explicit for each word, people manually annotate word sememes and form linguistic common-sense knowledge bases. In this paper, we present that, word sememe information can improve word representation learning (WRL), which maps words into a low-dimensional semantic space and serves as a fundamental step for many NLP tasks. The key idea is to utilize word sememes to capture exact meanings of a word within specific contexts accurately. More specifically, we follow the framework of Skip-gram and present three sememe-encoded models to learn representations of sememes, senses and words, where we apply the attention scheme to detect word senses in various contexts. We conduct experiments on two tasks including word similarity and word analogy, and our models significantly outperform baselines. The results indicate that WRL can benefit from sememes via the attention scheme, and also confirm our models being capable of correctly modeling sememe information.", "phrases": ["word representation learning", "sememe", "hownet"], "overall_score": 2.524535680011791, "scores": [2.3596839037294255, 0.983589462273094, 0.5487907376315331], "rank_score": 1.2973547012113509} -{"id": "qi-etal-2020-stanza", "title": "Stanza: A Python Natural Language Processing Toolkit for Many Human Languages", "abstract": "We introduce Stanza, an open-source Python natural language processing toolkit supporting 66 human languages. Compared to existing widely used toolkits, Stanza features a language-agnostic fully neural pipeline for text analysis, including tokenization, multi-word token expansion, lemmatization, part-of-speech and morphological feature tagging, dependency parsing, and named entity recognition. We have trained Stanza on a total of 112 datasets, including the Universal Dependencies treebanks and other multilingual corpora, and show that the same neural architecture generalizes well and achieves competitive performance on all languages tested. Additionally, Stanza includes a native Python interface to the widely used Java Stanford CoreNLP software, which further extends its functionality to cover other tasks such as coreference resolution and relation extraction. Source code, documentation, and pretrained models for 66 languages are available at .", "phrases": ["pipeline", "tokenization", "morphological feature tagging", "stanza", "opinion extraction"], "overall_score": 3.819844669087463, "scores": [3.284852609248069, 1.1422852364524625, 0.8718043214570588, 0.6058868705236573, 0.5817116855516137], "rank_score": 1.2973081446465726} -{"id": "ziai-meurers-2014-focus", "title": "Focus Annotation in Reading Comprehension Data", "abstract": "When characterizing the information structure of sentences, the so-called focus identifies the part of a sentence addressing the current question under discussion in the discourse. While this notion is precisely defined in formal semantics and potentially very useful in theoretical and practical terms, it has turned out to be difficult to reliably annotate focus in corpus data. We present a new focus annotation effort designed to overcome this problem. On the one hand, it is based on a task-based corpus providing more explicit context. The annotation study is based on the CREG corpus (Ott et al., 2012), which consists of answers to explicitly given reading comprehension questions. On the other hand, we operationalize focus annotation as an incremental process including several substeps which provide guidance, such as explicit answer typing. We evaluate the focus annotation both intrinsically by calculating agreement between annotators and extrinsically by showing that the focus information substantially improves the automatic meaning assessment of answers in the CoMiC system (Meurers et al., 2011).", "phrases": ["information structure", "focus annotation", "explicit question", "ill-formed learner language"], "overall_score": 2.324248197892218, "scores": [3.247155431939588, 0.8557696473415106, 0.560667376792287, 0.5251580158715475], "rank_score": 1.2971876179862332} -{"id": "kirov-etal-2016-large", "title": "Very-large Scale Parsing and Normalization of Wiktionary Morphological Paradigms", "abstract": "Wiktionary is a large-scale resource for cross-lingual lexical information with great potential utility for machine translation (MT) and many other NLP tasks, especially automatic morphological analysis and generation. However, it is designed primarily for human viewing rather than machine readability, and presents numerous challenges for generalized parsing and extraction due to a lack of standardized formatting and grammatical descriptor definitions. This paper describes a large-scale effort to automatically extract and standardize the data in Wiktionary and make it available for use by the NLP research community. The methodological innovations include a multidimensional table parsing algorithm, a cross-lexeme, token-frequency-based method of separating inflectional form data from grammatical descriptors, the normalization of grammatical descriptors to a unified annotation scheme that accounts for cross-linguistic diversity, and a verification and correction process that exploits within-language, cross-lexeme table format consistency to minimize human effort. The effort described here resulted in the extraction of a uniquely large normalized resource of nearly 1,000,000 inflectional paradigms across 350 languages. Evaluation shows that even though the data is extracted using a language-independent approach, it is comparable in quantity and quality to data extracted using hand-tuned, language-specific approaches.", "phrases": ["normalization", "wiktionary", "inflectional paradigm"], "overall_score": 3.326828360815, "scores": [0.781577484175995, 1.6581020606012447, 1.451424602527417], "rank_score": 1.297034715768219} -{"id": "morales-zhai-2017-identifying", "title": "Identifying Humor in Reviews using Background Text Sources", "abstract": "We study the problem of automatically identifying humorous text from a new kind of text data, i.e., online reviews. We propose a generative language model, based on the theory of incongruity, to model humorous text, which allows us to leverage background text sources, such as Wikipedia entry descriptions, and enables construction of multiple features for identifying humorous reviews. Evaluation of these features using supervised learning for classifying reviews into humorous and non-humorous reviews shows that the features constructed based on the proposed generative model are much more effective than the major features proposed in the existing literature, allowing us to achieve almost 86% accuracy. These humorous review predictions can also supply good indicators for identifying helpful reviews.", "phrases": ["humor", "background text source", "generative language model", "wikipedia entry description", "yelp review"], "overall_score": 2.5236558184920748, "scores": [2.941026849757072, 1.4329149444347726, 1.052216346891749, 0.5294664875856575, 0.5288880805050168], "rank_score": 1.2969025418348537} -{"id": "cohen-etal-2012-spectral", "title": "Spectral Learning of Latent-Variable PCFGs", "abstract": "We introduce a spectral learning algorithm for latent-variable PCFGs (Petrov et al., 2006). Under a separability (singular value) condition, we prove that the method provides consistent parameter estimates.", "phrases": ["pcfg", "spectral learning algorithm", "definition"], "overall_score": 3.3263317173364007, "scores": [2.3232635929714682, 1.0244937477636413, 0.5427659255352046], "rank_score": 1.2968410887567714} -{"id": "villavicencio-etal-2004-lexical", "title": "Lexical Encoding of MWEs", "abstract": "Multiword Expressions present a challenge for language technology, given their flexible nature. Each type of multiword expression has its own characteristics, and providing a uniform lexical encoding for them is a difficult task to undertake. Nonetheless, in this paper we present an architecture for the lexical encoding of these expressions in a database, that takes into account their flexibility. This encoding extends in a straightforward manner the one required for simplex (single) words, and maximises the information contained for them in the description of multiwords.", "phrases": ["mwes", "lexical encoding", "idiom"], "overall_score": 2.3233488660091073, "scores": [1.993733057553075, 1.3664433066783643, 0.5298807096849792], "rank_score": 1.2966856913054727} -{"id": "dong-lapata-2018-coarse", "title": "Coarse-to-Fine Decoding for Neural Semantic Parsing", "abstract": "Semantic parsing aims at mapping natural language utterances into structured meaning representations. In this work, we propose a structure-aware neural architecture which decomposes the semantic parsing process into two stages. Given an input utterance, we first generate a rough sketch of its meaning, where low-level information (such as variable names and arguments) is glossed over. Then, we fill in missing details by taking into account the natural language input and the sketch itself. Experimental results on four datasets characteristic of different domains and meaning representations show that our approach consistently improves performance, achieving competitive results despite the use of relatively simple decoders.", "phrases": ["neural semantic parsing", "natural language utterance", "sketch", "coarse-to-fine"], "overall_score": 5.097729839498372, "scores": [1.7619778576951848, 0.9312024512854022, 1.9613913847892772, 0.5315482572984666], "rank_score": 1.2965299877670826} -{"id": "gao-etal-2021-making", "title": "Making Pre-trained Language Models Better Few-shot Learners", "abstract": "The recent GPT-3 model (Brown et al., 2020) achieves remarkable few-shot performance solely by leveraging a natural-language prompt and a few task demonstrations as input context. Inspired by their findings, we study few-shot learning in a more practical scenario, where we use smaller language models for which fine-tuning is computationally efficient. We present LM-BFF\u2014better few-shot fine-tuning of language models\u2014a suite of simple and complementary techniques for fine-tuning language models on a small number of annotated examples. Our approach includes (1) prompt-based fine-tuning together with a novel pipeline for automating prompt generation; and (2) a refined strategy for dynamically and selectively incorporating demonstrations into each context. Finally, we present a systematic evaluation for analyzing few-shot performance on a range of NLP tasks, including classification and regression. Our experiments demonstrate that our methods combine to dramatically outperform standard fine-tuning procedures in this low resource setting, achieving up to 30% absolute improvement, and 11% on average across all tasks. Our approach makes minimal assumptions on task resources and domain expertise, and hence constitutes a strong task-agnostic method for few-shot learning.", "phrases": ["language model", "learner", "fine-tuning", "in-context learning", "text classification task"], "overall_score": 5.631816779219945, "scores": [2.0652787555028955, 2.0576187002163846, 0.922992695582976, 0.8752222405575413, 0.5614716931527094], "rank_score": 1.2965168170025012} -{"id": "ling-etal-2017-program", "title": "Program Induction by Rationale Generation: Learning to Solve and Explain Algebraic Word Problems", "abstract": "Solving algebraic word problems requires executing a series of arithmetic operations\u2014a program\u2014to obtain a final answer. However, since programs can be arbitrarily complicated, inducing them directly from question-answer pairs is a formidable challenge. To make this task more feasible, we solve these problems by generating answer rationales, sequences of natural language and human-readable mathematical expressions that derive the final answer through a series of small steps. Although rationales do not explicitly specify programs, they provide a scaffolding for their structure via intermediate milestones. To evaluate our approach, we have created a new 100,000-sample dataset of questions, answers and rationales. Experimental results show that indirect supervision of program learning via answer rationales is a promising strategy for inducing arithmetic programs.", "phrases": ["algebraic word problem", "explanation", "program", "solver", "correct answer"], "overall_score": 4.680814203075492, "scores": [2.2127541276992426, 1.5476642465266386, 1.1022303808589247, 1.068290444921921, 0.5505349241832053], "rank_score": 1.2962948248379864} -{"id": "power-etal-2003-document", "title": "Document Structure", "abstract": "We argue the case for abstract document structure as a separate descriptive level in the analysis and generation of written texts. The purpose of this representation is to mediate between the message of a text (i.e., its discourse structure) and its physical presentation (i.e., its organization into graphical constituents like sections, paragraphs, sentences, bulleted lists, figures, and footnotes). Abstract document structure can be seen as an extension of Nunberg's text-grammar it is also closely related to logical markup in languages like HTML and LaTEX. We show that by using this intermediate representation, several subtasks in language generation and language understanding can be defined more cleanly.", "phrases": ["language generation", "document structure", "constraint satisfaction"], "overall_score": 2.695164093218295, "scores": [2.772588722239781, 0.5603939141875183, 0.5553172352407345], "rank_score": 1.2960999572226781} -{"id": "mitchell-etal-2015-quantifying", "title": "Quantifying the Language of Schizophrenia in Social Media", "abstract": "Analyzing symptoms of schizophrenia has traditionally been challenging given the low prevalence of the condition, affecting around 1% of the U.S. population. We explore potential linguistic markers of schizophrenia using the tweets 1 of self-identified schizophrenia sufferers, and describe several natural language processing (NLP) methods to analyze the language of schizophrenia. We examine how these signals compare with the widelyused LIWC categories for understanding mental health (Pennebaker et al., 2007), and provide preliminary evidence of additional linguistic signals that may aid in identifying and getting help to people suffering from schizophrenia.", "phrases": ["schizophrenia", "linguistic marker", "social medium", "twitter"], "overall_score": 3.5098513163877723, "scores": [2.9830723566564212, 1.1242105109599367, 0.5468767886016929, 0.5301627244515831], "rank_score": 1.2960805951674084} -{"id": "choubey-etal-2020-discourse", "title": "Discourse as a Function of Event: Profiling Discourse Structure in News Articles around the Main Event", "abstract": "Understanding discourse structures of news articles is vital to effectively contextualize the occurrence of a news event. To enable computational modeling of news structures, we apply an existing theory of functional discourse structure for news articles that revolves around the main event and create a human-annotated corpus of 802 documents spanning over four domains and three media sources. Next, we propose several document-level neural-network models to automatically construct news content structures. Finally, we demonstrate that incorporating system predicted news structures yields new state-of-the-art performance for event coreference resolution. The news documents we annotated are openly available and the annotations are publicly released for future research.", "phrases": ["main event", "news article", "discourse role"], "overall_score": 2.6947211611629247, "scores": [2.1585734143822966, 1.1191376329739686, 0.6099498084320353], "rank_score": 1.2958869519294336} -{"id": "su-etal-2019-dual", "title": "Dual Supervised Learning for Natural Language Understanding and Generation", "abstract": "Natural language understanding (NLU) and natural language generation (NLG) are both critical research topics in the NLP and dialogue fields. Natural language understanding is to extract the core semantic meaning from the given utterances, while natural language generation is opposite, of which the goal is to construct corresponding sentences based on the given semantics. However, such dual relationship has not been investigated in literature. This paper proposes a novel learning framework for natural language understanding and generation on top of dual supervised learning, providing a way to exploit the duality. The preliminary experiments show that the proposed approach boosts the performance for both tasks, demonstrating the effectiveness of the dual relationship.", "phrases": ["natural language understanding", "nlu", "dual supervised learning"], "overall_score": 2.0843978407401504, "scores": [1.8426290255169688, 0.9483288990266131, 1.094369663373851], "rank_score": 1.2951091959724776} -{"id": "he-etal-2015-multi", "title": "Multi-Perspective Sentence Similarity Modeling with Convolutional Neural Networks", "abstract": "Modeling sentence similarity is complicated by the ambiguity and variability of linguistic expression. To cope with these challenges, we propose a model for comparing sentences that uses a multiplicity of perspectives. We first model each sentence using a convolutional neural network that extracts features at multiple levels of granularity and uses multiple types of pooling. We then compare our sentence representations at several granularities using multiple similarity metrics. We apply our model to three tasks, including the Microsoft Research paraphrase identification task and two SemEval semantic textual similarity tasks. We obtain strong performance on all tasks, rivaling or exceeding the state of the art without using external resources such as WordNet or parsers.", "phrases": ["sentence similarity", "convolutional neural network", "well-studied task"], "overall_score": 3.941910140395237, "scores": [2.4540956389107556, 0.8742160380727145, 0.5559528219927757], "rank_score": 1.294754832992082} -{"id": "wang-etal-2016-recursive", "title": "Recursive Neural Conditional Random Fields for Aspect-based Sentiment Analysis", "abstract": "In aspect-based sentiment analysis, extracting aspect terms along with the opinions being expressed from user-generated content is one of the most important subtasks. Previous studies have shown that exploiting connections between aspect and opinion terms is promising for this task. In this paper, we propose a novel joint model that integrates recursive neural networks and conditional random fields into a unified framework for explicit aspect and opinion terms co-extraction. The proposed model learns high-level discriminative features and double propagate information between aspect and opinion terms, simultaneously. Moreover, it is flexible to incorporate hand-crafted features into the proposed model to further boost its information extraction performance. Experimental results on the SemEval Challenge 2014 dataset show the superiority of our proposed model over several baseline methods as well as the winning systems of the challenge.", "phrases": ["conditional random field", "sentiment analysis", "aspect term", "recursive neural network"], "overall_score": 3.9991802216293, "scores": [2.430614129287566, 1.2914609345665424, 0.8869542629308983, 0.5661570797094397], "rank_score": 1.2937966016236115} -{"id": "baldwin-lui-2010-language", "title": "Language Identification: The Long and the Short of the Matter", "abstract": "Language identification is the task of identifying the language a given document is written in. This paper describes a detailed examination of what models perform best under different conditions, based on experiments across three separate datasets and a range of tokenisation strategies. We demonstrate that the task becomes increasingly difficult as we increase the number of languages, reduce the amount of training data and reduce the length of documents. We also show that it is possible to perform language identification without having to perform explicit character encoding detection.", "phrases": ["condition", "length", "language identification", "baldwin"], "overall_score": 3.501767773176773, "scores": [2.978005250493359, 0.844859625475249, 0.8268816957993913, 0.5226357891668306], "rank_score": 1.2930955902337076} -{"id": "haouari-etal-2021-arcov19", "title": "ArCOV19-Rumors: Arabic COVID-19 Twitter Dataset for Misinformation Detection", "abstract": "In this paper we introduce ArCOV19-Rumors, an Arabic COVID-19 Twitter dataset for misinformation detection composed of tweets containing claims from 27th January till the end of April 2020. We collected 138 verified claims, mostly from popular fact-checking websites, and identified 9.4K relevant tweets to those claims. Tweets were manually-annotated by veracity to support research on misinformation detection, which is one of the major problems faced during a pandemic. ArCOV19-Rumors supports two levels of misinformation detection over Twitter: verifying free-text claims (called claim-level verification) and verifying claims expressed in tweets (called tweet-level verification). Our dataset covers, in addition to health, claims related to other topical categories that were influenced by COVID-19, namely, social, politics, sports, entertainment, and religious. Moreover, we present benchmarking results for tweet-level verification on the dataset. We experimented with SOTA models of versatile approaches that either exploit content, user profiles features, temporal features and propagation structure of the conversational threads for tweet verification.", "phrases": ["covid-19 twitter dataset", "misinformation detection", "arcov19-rumor"], "overall_score": 2.0810768485242535, "scores": [2.1300657124875046, 0.9043891309611604, 0.8446823990253557], "rank_score": 1.2930457474913402} -{"id": "yu-etal-2018-syntaxsqlnet", "title": "SyntaxSQLNet: Syntax Tree Networks for Complex and Cross-Domain Text-to-SQL Task", "abstract": "Most existing studies in text-to-SQL tasks do not require generating complex SQL queries with multiple clauses or sub-queries, and generalizing to new, unseen databases. In this paper we propose SyntaxSQLNet, a syntax tree network to address the complex and cross-domain text-to-SQL generation task. SyntaxSQLNet employs a SQL specific syntax tree-based decoder with SQL generation path history and table-aware column attention encoders. We evaluate SyntaxSQLNet on a new large-scale text-to-SQL corpus containing databases with multiple tables and complex SQL queries containing multiple SQL clauses and nested queries. We use a database split setting where databases in the test set are unseen during training. Experimental results show that SyntaxSQLNet can handle a significantly greater number of complex SQL examples than prior work, outperforming the previous state-of-the-art model by 9.5% in exact matching accuracy. To our knowledge, we are the first to study this complex text-to-SQL task. Our task and models with the latest updates are available at .", "phrases": ["text-to-sql generation", "syntaxsqlnet", "natural language question"], "overall_score": 3.935614383789546, "scores": [2.697851735051888, 0.6140800068209334, 0.5661290682808201], "rank_score": 1.2926869367178806} -{"id": "fahmi-bouma-2006-learning", "title": "Learning to Identify Definitions using Syntactic Features", "abstract": "This paper describes an approach to learning concept definitions which operates on fully parsed text. A subcorpus of the Dutch version of Wikipedia was searched for sentences which have the syntactic properties of definitions. Next, we experimented with various text classification techniques to distinguish actual definitions from other sentences. A maximum entropy classifier which incorporates features referring to the position of the sentence in the document as well as various syntactic features, gives the best results.", "phrases": ["definition", "syntactic feature", "position", "machine learning", "symbolic method"], "overall_score": 3.5005628116881593, "scores": [2.4501009943526846, 2.41280365320131, 0.5399259880840501, 0.5359342355613824, 0.5244883031017485], "rank_score": 1.2926506348602351} -{"id": "hardmeier-etal-2013-docent", "title": "Docent: A Document-Level Decoder for Phrase-Based Statistical Machine Translation", "abstract": "We describe Docent, an open-source decoder for statistical machine translation that breaks with the usual sentence-bysentence paradigm and translates complete documents as units. By taking translation to the document level, our decoder can handle feature models with arbitrary discourse-wide dependencies and constitutes an essential infrastructure component in the quest for discourse-aware SMT models. 1 Motivation", "phrases": ["document-level decoder", "statistical machine translation", "unit", "docent", "document-level smt paradigm"], "overall_score": 3.4110295707246334, "scores": [2.9151526681498403, 1.4661425125311802, 0.9424176199772561, 0.5894775442754623, 0.5494005439536943], "rank_score": 1.2925181777774866} -{"id": "vaswani-etal-2013-decoding", "title": "Decoding with Large-Scale Neural Language Models Improves Translation", "abstract": "We explore the application of neural language models to machine translation. We develop a new model that combines the neural probabilistic language model of Bengio et al., rectified linear units, and noise-contrastive estimation, and we incorporate it into a machine translation system both by reranking k-best lists and by direct integration into the decoder. Our large-scale, large-vocabulary experiments across four language pairs show that our neural language model improves translation quality by up to 1.1 Bleu.", "phrases": ["language model", "unit", "noise-contrastive estimation", "estimation", "feedforward"], "overall_score": 4.160283635012795, "scores": [1.780462396868979, 1.6335715241634696, 1.0924906666575982, 1.0594211507473759, 0.8963781618378249], "rank_score": 1.2924647800550495} -{"id": "verhagen-etal-2005-automating", "title": "Automating Temporal Annotation with TARSQI", "abstract": "We present an overview of TARSQI, a modular system for automatic temporal annotation that adds time expressions, events and temporal relations to news texts.", "phrases": ["tarsqi", "time expression", "toolkit"], "overall_score": 2.839687439849161, "scores": [2.221203828851574, 1.127243347172485, 0.528745172641307], "rank_score": 1.292397449555122} -{"id": "wang-etal-2019-learning-deep", "title": "Learning Deep Transformer Models for Machine Translation", "abstract": "Transformer is the state-of-the-art model in recent machine translation evaluations. Two strands of research are promising to improve models of this kind: the first uses wide networks (a.k.a. Transformer-Big) and has been the de facto standard for development of the Transformer system, and the other uses deeper language representation but faces the difficulty arising from learning deep networks. Here, we continue the line of research on the latter. We claim that a truly deep Transformer model can surpass the Transformer-Big counterpart by 1) proper use of layer normalization and 2) a novel way of passing the combination of previous layers to the next. On WMT'16 English-German and NIST OpenMT'12 Chinese-English tasks, our deep system (30/25-layer encoder) outperforms the shallow Transformer-Big/Base baseline (6-layer encoder) by 0.4-2.4 BLEU points. As another bonus, the deep model is 1.6X smaller in size and 3X faster in training than Transformer-Big.", "phrases": ["deep transformer", "machine translation", "previous layer"], "overall_score": 4.106689902015645, "scores": [2.0935561580007502, 0.8928905039864417, 0.8901615906483792], "rank_score": 1.2922027508785237} -{"id": "li-etal-2004-applying", "title": "Applying Machine Learning to Chinese Temporal Relation Resolution", "abstract": "Temporal relation resolution involves extraction of temporal information explicitly or implicitly embedded in a language. This information is often inferred from a variety of interactive grammatical and lexical cues, especially in Chinese. For this purpose, inter-clause relations (temporal or otherwise) in a multiple-clause sentence play an important role. In this paper, a computational model based on machine learning and heterogeneous collaborative bootstrapping is proposed for analyzing temporal relations in a Chinese multiple-clause sentence. The model makes use of the fact that events are represented in different temporal structures. It takes into account the effects of linguistic features such as tense/aspect, temporal connectives, and discourse structures. A set of experiments has been conducted to investigate how linguistic features could affect temporal relation resolution.", "phrases": ["machine learning", "chinese", "temporal relation"], "overall_score": 2.975402176828911, "scores": [1.6484721521360768, 1.1690438353353965, 1.0590862530476848], "rank_score": 1.2922007468397194} -{"id": "liu-lapata-2019-hierarchical", "title": "Hierarchical Transformers for Multi-Document Summarization", "abstract": "In this paper, we develop a neural summarization model which can effectively process multiple input documents and distill Transformer architecture with the ability to encode documents in a hierarchical manner. We represent cross-document relationships via an attention mechanism which allows to share information as opposed to simply concatenating text spans and processing them as a flat sequence. Our model learns latent dependencies among textual units, but can also take advantage of explicit graph representations focusing on similarity or discourse relations. Empirical results on the WikiSum dataset demonstrate that the proposed architecture brings substantial improvements over several strong baselines.", "phrases": ["summarization", "cross-document relationship", "wikisum", "hierarchical transformer", "large-scale dataset"], "overall_score": 4.766737299697964, "scores": [2.8024463849459966, 1.2968177603037943, 0.9075658945421803, 0.8972163364965378, 0.5569092594154066], "rank_score": 1.292191127140783} -{"id": "wible-tsao-2010-stringnet", "title": "StringNet as a Computational Resource for Discovering and Investigating Linguistic Constructions", "abstract": "We describe and motivate the design of a lexico-grammatical knowledgebase called StringNet and illustrate its significance for research into constructional phenomena in English. StringNet consists of a massive archive of what we call hybrid n-grams. Unlike traditional n-grams, hybrid n-grams can consist of any co-occurring combination of POS tags, lexemes, and specific word forms. Further, we detect and represent superordinate and subordinate relations among hybrid n-grams by cross-indexing, allowing the navigation of StringNet through these hierarchies, from specific fixed expressions (\"It's the thought that counts\") up to their hosting proto-constructions (e.g. the It Cleft construction: \"it's the [noun] that [verb]\"). StringNet supports discovery of grammatical dependencies (e.g., subject-verb agreement) in non-canonical configurations as well as lexical dependencies (e.g., adjective/noun collocations specific to families of constructions).", "phrases": ["pos tag", "stringnet", "frequency"], "overall_score": 2.5139204660683405, "scores": [2.7889343684708536, 0.5622319266306287, 0.5245323859842517], "rank_score": 1.2918995603619114} -{"id": "nalisnick-baird-2013-character", "title": "Character-to-Character Sentiment Analysis in Shakespeare's Plays", "abstract": "We present an automatic method for analyzing sentiment dynamics between characters in plays. This literary format\u2019s structured dialogue allows us to make assumptions about who is participating in a conversation. Once we have an idea of who a character is speaking to, the sentiment in his or her speech can be attributed accordingly, allowing us to generate lists of a character\u2019s enemies and allies as well as pinpoint scenes critical to a character\u2019s emotional development. Results of experiments on Shakespeare\u2019s plays are presented along with discussion of how this work can be extended to unstructured texts (i.e. novels).", "phrases": ["sentiment analysis", "play", "character"], "overall_score": 2.079070203890912, "scores": [2.1638772420526062, 0.8804078776202973, 0.8311117275652923], "rank_score": 1.2917989490793986} -{"id": "bawden-etal-2018-evaluating", "title": "Evaluating Discourse Phenomena in Neural Machine Translation", "abstract": "For machine translation to tackle discourse phenomena, models must have access to extra-sentential linguistic context. There has been recent interest in modelling context in neural machine translation (NMT), but models have been principally evaluated with standard automatic metrics, poorly adapted to evaluating discourse phenomena. In this article, we present hand-crafted, discourse test sets, designed to test the models' ability to exploit previous source and target sentences. We investigate the performance of recently proposed multi-encoder NMT models trained on subtitles for English to French. We also explore a novel way of exploiting context from the previous sentence. Despite gains using BLEU, multi-encoder models give limited improvement in the handling of discourse phenomena: 50% accuracy on our coreference test set and 53.5% for coherence/cohesion (compared to a non-contextual baseline of 50%). A simple strategy of decoding the concatenation of the previous and current sentence leads to good performance, and our novel strategy of multi-encoding and decoding of two sentences leads to the best performance (72.5% for coreference and 57% for coherence/cohesion), highlighting the importance of target-side context.", "phrases": ["discourse phenomena", "neural machine translation", "previous sentence", "lexical consistency"], "overall_score": 4.105040763527991, "scores": [2.3949236030062115, 0.9341457583448256, 0.9438601014299559, 0.8938058823891094], "rank_score": 1.2916838362925258} -{"id": "yao-van-durme-2011-nonparametric", "title": "Nonparametric Bayesian Word Sense Induction", "abstract": "We propose the use of a nonparametric Bayesian model, the Hierarchical Dirichlet Process (HDP), for the task of Word Sense Induction. Results are shown through comparison against Latent Dirichlet Allocation (LDA), a parametric Bayesian model employed by Brody and Lapata (2009) for this task. We find that the two models achieve similar levels of induction quality, while the HDP confers the advantage of automatically inducing a variable number of senses per word, as compared to manually fixing the number of senses a priori, as in LDA. This flexibility allows for the model to adapt to terms with greater or lesser polysemy, when evidenced by corpus distributional statistics. When trained on out-of-domain data, experimental results confirm the model's ability to make use of a restricted set of topically coherent induced senses, when then applied in a restricted domain.", "phrases": ["word sense induction", "bayesian model", "hdp", "van", "yao"], "overall_score": 2.837134949455536, "scores": [2.1064109723604907, 1.250759572888695, 1.093720855888538, 1.0752650427594532, 0.9300223616737684], "rank_score": 1.2912357611141891} -{"id": "wieting-etal-2016-charagram", "title": "Charagram: Embedding Words and Sentences via Character n-grams", "abstract": "We present Charagram embeddings, a simple approach for learning character-based compositional models to embed textual sequences. A word or sentence is represented using a character n-gram count vector, followed by a single nonlinear transformation to yield a low-dimensional embedding. We use three tasks for evaluation: word similarity, sentence similarity, and part-of-speech tagging. We demonstrate that Charagram embeddings outperform more complex architectures based on character-level recurrent and convolutional neural networks, achieving new state-of-the-art performance on several similarity tasks.", "phrases": ["charagram", "subword information", "well embedding"], "overall_score": 3.7310710552960145, "scores": [2.308324990670177, 0.9698521595262223, 0.5944088364710479], "rank_score": 1.2908619955558158} -{"id": "gomez-rodriguez-vilares-2018-constituent", "title": "Constituent Parsing as Sequence Labeling", "abstract": "We introduce a method to reduce constituent parsing to sequence labeling. For each word w_t, it generates a label that encodes: (1) the number of ancestors in the tree that the words w_t and w_t+1 have in common, and (2) the nonterminal symbol at the lowest common ancestor. We first prove that the proposed encoding function is injective for any tree without unary branches. In practice, the approach is made extensible to all constituency trees by collapsing unary branches. We then use the PTB and CTB treebanks as testbeds and propose a set of fast baselines. We achieve 90% F-score on the PTB test set, outperforming the Vinyals et al. (2015) sequence-to-sequence parser. In addition, sacrificing some accuracy, our approach achieves the fastest constituent parsing speeds reported to date on PTB by a wide margin.", "phrases": ["sequence labeling", "common ancestor", "constituent"], "overall_score": 3.2064436436620243, "scores": [1.8555091333554055, 1.459503966056185, 0.5560904415631767], "rank_score": 1.290367846991589} -{"id": "davison-etal-2019-commonsense", "title": "Commonsense Knowledge Mining from Pretrained Models", "abstract": "Inferring commonsense knowledge is a key challenge in machine learning. Due to the sparsity of training data, previous work has shown that supervised methods for commonsense knowledge mining underperform when evaluated on novel data. In this work, we develop a method for generating commonsense knowledge using a large, pre-trained bidirectional language model. By transforming relational triples into masked sentences, we can use this model to rank a triple's validity by the estimated pointwise mutual information between the two entities. Since we do not update the weights of the bidirectional model, our approach is not biased by the coverage of any one commonsense knowledge base. Though we do worse on a held-out test set than models explicitly trained on a corresponding training set, our approach outperforms these methods when mining commonsense knowledge from new sources, suggesting that our unsupervised technique generalizes better than current supervised approaches.", "phrases": ["language model", "commonsense knowledge", "fine-tuning"], "overall_score": 4.093033079340905, "scores": [2.005652425059133, 1.312570325578205, 0.5454938172718808], "rank_score": 1.2879055226364065} -{"id": "qian-etal-2014-bilingual", "title": "Bilingual Active Learning for Relation Classification via Pseudo Parallel Corpora", "abstract": "Active learning (AL) has been proven effective to reduce human annotation efforts in NLP. However, previous studies on AL are limited to applications in a single language. This paper proposes a bilingual active learning paradigm for relation classification, where the unlabeled instances are first jointly chosen in terms of their prediction uncertainty scores in two languages and then manually labeled by an oracle. Instead of using a parallel corpus, labeled and unlabeled instances in one language are translated into ones in the other language and all instances in both languages are then fed into a bilingual active learning engine as pseudo parallel corpora. Experimental results on the ACE RDC 2005 Chinese and English corpora show that bilingual active learning for relation classification significantly outperforms monolingual active learning.", "phrases": ["pseudo parallel corpora", "learning paradigm", "bilingual active learning"], "overall_score": 2.0727518619202816, "scores": [2.4162537688077492, 0.9116331218965008, 0.5357325367494047], "rank_score": 1.2878731424845518} -{"id": "miura-etal-2017-unifying", "title": "Unifying Text, Metadata, and User Network Representations with a Neural Network for Geolocation Prediction", "abstract": "We propose a novel geolocation prediction model using a complex neural network. Geolocation prediction in social media has attracted many researchers to use information of various types. Our model unifies text, metadata, and user network representations with an attention mechanism to overcome previous ensemble approaches. In an evaluation using two open datasets, the proposed model exhibited a maximum 3.8% increase in accuracy and a maximum of 6.6% increase in accuracy@161 against previous models. We further analyzed several intermediate layers of our model, which revealed that their states capture some statistical characteristics of the datasets.", "phrases": ["metadata", "user network representation", "geolocation prediction"], "overall_score": 2.964409571265505, "scores": [2.472641920965326, 0.8446786187219357, 0.5449596170181177], "rank_score": 1.2874267189017932} -{"id": "ferreira-freitas-2020-natural", "title": "Natural Language Premise Selection: Finding Supporting Statements for Mathematical Text", "abstract": "Mathematical text is written using a combination of words and mathematical expressions. This combination, along with a specific way of structuring sentences makes it challenging for state-of-art NLP tools to understand and reason on top of mathematical discourse. In this work, we propose a new NLP task, the natural premise selection, which is used to retrieve supporting definitions and supporting propositions that are useful for generating an informal mathematical proof for a particular statement. We also make available a dataset, NL-PS, which can be used to evaluate different approaches for the natural premise selection task. Using different baselines, we demonstrate the underlying interpretation challenges associated with the task.", "phrases": ["premise selection", "mathematical text", "adaptation"], "overall_score": 2.6764175833341013, "scores": [2.7381388710974317, 0.600051775949779, 0.5230637277769219], "rank_score": 1.2870847916080441} -{"id": "xu-lapata-2020-coarse", "title": "Coarse-to-Fine Query Focused Multi-Document Summarization", "abstract": "We consider the problem of better modeling query-cluster interactions to facilitate query focused multi-document summarization. Due to the lack of training data, existing work relies heavily on retrieval-style methods for assembling query relevant summaries. We propose a coarse-to-fine modeling framework which employs progressively more accurate modules for estimating whether text segments are relevant, likely to contain an answer, and central. The modules can be independently developed and leverage training data if available. We present an instantiation of this framework with a trained evidence estimator which relies on distant supervision from question answering (where various resources exist) to identify segments which are likely to answer the query and should be included in the summary. Our framework is robust across domains and query types (i.e., long vs short) and outperforms strong comparison systems on benchmark datasets.", "phrases": ["query", "summarization", "coarse-to-fine framework"], "overall_score": 2.8263709004787394, "scores": [2.2500901394954367, 1.076665779449599, 0.5322545749735118], "rank_score": 1.2863368313061827} -{"id": "zhou-wang-2018-mojitalk", "title": "MojiTalk: Generating Emotional Responses at Scale", "abstract": "Generating emotional language is a key step towards building empathetic natural language processing agents. However, a major challenge for this line of research is the lack of large-scale labeled training data, and previous studies are limited to only small sets of human annotated sentiment labels. Additionally, explicitly controlling the emotion and sentiment of generated text is also difficult. In this paper, we take a more radical approach: we exploit the idea of leveraging Twitter data that are naturally labeled with emojis. We collect a large corpus of Twitter conversations that include emojis in the response and assume the emojis convey the underlying emotions of the sentence. We investigate several conditional variational autoencoders training on these conversations, which allow us to use emojis to control the emotion of the generated text. Experimentally, we show in our quantitative and qualitative analyses that the proposed models can successfully generate high-quality abstractive conversation responses in accordance with designated emotions.", "phrases": ["emotion", "emojis", "response generation", "signal", "vae"], "overall_score": 4.140515642265033, "scores": [2.251648427563258, 1.5287028332192363, 0.9588756075464253, 0.8624090383568398, 0.8299816323887246], "rank_score": 1.2863235078148967} -{"id": "wang-etal-2017-two", "title": "A Two-Stage Parsing Method for Text-Level Discourse Analysis", "abstract": "Previous work introduced transition-based algorithms to form a unified architecture of parsing rhetorical structures (including span, nuclearity and relation), but did not achieve satisfactory performance. In this paper, we propose that transition-based model is more appropriate for parsing the naked discourse tree (i.e., identifying span and nuclearity) due to data sparsity. At the same time, we argue that relation labeling can benefit from naked tree structure and should be treated elaborately with consideration of three kinds of relations including within-sentence, across-sentence and across-paragraph relations. Thus, we design a pipelined two-stage parsing method for generating an RST tree from text. Experimental results show that our method achieves state-of-the-art performance, especially on span and nuclearity identification.", "phrases": ["two-stage", "discourse tree", "same parser"], "overall_score": 4.03313347682016, "scores": [1.71579028081539, 1.5929033212125738, 0.5501559436010112], "rank_score": 1.286283181876325} -{"id": "chakravarthi-2020-hopeedi", "title": "HopeEDI: A Multilingual Hope Speech Detection Dataset for Equality, Diversity, and Inclusion", "abstract": "Over the past few years, systems have been developed to control online content and eliminate abusive, offensive or hate speech content. However, people in power sometimes misuse this form of censorship to obstruct the democratic right of freedom of speech. Therefore, it is imperative that research should take a positive reinforcement approach towards online content that is encouraging, positive and supportive contents. Until now, most studies have focused on solving this problem of negativity in the English language, though the problem is much more than just harmful content. Furthermore, it is multilingual as well. Thus, we have constructed a Hope Speech dataset for Equality, Diversity and Inclusion (HopeEDI) containing user-generated comments from the social media platform YouTube with 28,451, 20,198 and 10,705 comments in English, Tamil and Malayalam, respectively, manually labelled as containing hope speech or not. To our knowledge, this is the first research of its kind to annotate hope speech for equality, diversity and inclusion in a multilingual setting. We determined that the inter-annotator agreement of our dataset using Krippendorff's alpha. Further, we created several baselines to benchmark the resulting dataset and the results have been expressed using precision, recall and F1-score. The dataset is publicly available for the research community. We hope that this resource will spur further research on encouraging inclusive and responsive speech that reinforces positiveness.", "phrases": ["hope speech detection", "user-generated comment", "hopeedi", "language identification", "emotion"], "overall_score": 5.244587794418588, "scores": [3.523994618265123, 0.9669407467527041, 0.8499642401116007, 0.5651672599352975, 0.5250056469205902], "rank_score": 1.286214502397063} -{"id": "luo-2007-coreference", "title": "Coreference or Not: A Twin Model for Coreference Resolution", "abstract": "A twin-model is proposed for coreference resolution: a link component, modeling the coreferential relationship between an anaphor and a candidate antecedent, and a creation component modeling the possibility that a phrase is not coreferential with any candidate antecedent. The creation model depends on all candidate antecedents and is often expensive to compute; Therefore constraints are imposed on feature forms so that features in the creation model can be efficiently computed from feature values in the link model. The proposed twin-model is tested on the data from the 2005 Automatic Content Extraction (ACE) task and the proposed model performs better than a thresholding baseline without tuning free parameter.", "phrases": ["coreference resolution", "twin-model", "coreferential relationship", "anaphor", "candidate"], "overall_score": 2.304522010001811, "scores": [2.8509496438671884, 0.910974908036914, 0.9051338129016137, 0.9007603092078033, 0.8630724405027319], "rank_score": 1.2861782229032503} -{"id": "liu-etal-2017-soft", "title": "A Soft-label Method for Noise-tolerant Distantly Supervised Relation Extraction", "abstract": "Distant-supervised relation extraction inevitably suffers from wrong labeling problems because it heuristically labels relational facts with knowledge bases. Previous sentence level denoise models don't achieve satisfying performances because they use hard labels which are determined by distant supervision and immutable during training. To this end, we introduce an entity-pair level denoise method which exploits semantic information from correctly labeled entity pairs to correct wrong labels dynamically during training. We propose a joint score function which combines the relational scores based on the entity-pair representation and the confidence of the hard label to obtain a new label, namely a soft label, for certain entity pair. During training, soft labels instead of hard labels serve as gold labels. Experiments on the benchmark dataset show that our method dramatically reduces noisy instances and outperforms other state-of-the-art systems.", "phrases": ["soft-label method", "relation extraction", "noisy instance"], "overall_score": 3.85194814882669, "scores": [1.3950629113649051, 1.3652519981212308, 1.0971207327085444], "rank_score": 1.2858118807315602} -{"id": "mei-etal-2016-talk", "title": "What to talk about and how? Selective Generation using LSTMs with Coarse-to-Fine Alignment", "abstract": "We propose an end-to-end, domain-independent neural encoder-aligner-decoder model for selective generation, i.e., the joint task of content selection and surface realization. Our model first encodes a full set of over-determined database event records via an LSTM-based recurrent neural network, then utilizes a novel coarse-to-fine aligner to identify the small subset of salient records to talk about, and finally employs a decoder to generate free-form descriptions of the aligned, selected records. Our model achieves the best selection and generation results reported to-date (with 59% relative improvement in generation) on the benchmark WeatherGov dataset, despite using no specialized features or linguistic resources. Using an improved k-nearest neighbor beam filter helps further. We also perform a series of ablations and visualizations to elucidate the contributions of our key model components. Lastly, we evaluate the generalizability of our model on the RoboCup dataset, and get results that are competitive with or better than the state-of-the-art, despite being severely data-starved.", "phrases": ["selective generation", "end-to-end", "encoder-aligner-decoder model", "recurrent neural network", "sentence planning"], "overall_score": 5.080104773695739, "scores": [1.7637172387653812, 1.3884033690694693, 1.3585080249446304, 1.0901924074374636, 0.8276672226123537], "rank_score": 1.2856976525658594} -{"id": "mirkin-etal-2009-source", "title": "Source-Language Entailment Modeling for Translating Unknown Terms", "abstract": "This paper addresses the task of handling unknown terms in SMT. We propose using source-language monolingual models and resources to paraphrase the source text prior to translation. We further present a conceptual extension to prior work by allowing translations of entailed texts rather than paraphrases only. A method for performing this process efficiently is presented and applied to some 2500 sentences with unknown terms. Our experiments show that the proposed approach substantially increases the number of properly translated texts.", "phrases": ["entailment", "unknown term", "paraphrase", "synonym"], "overall_score": 3.0802808395681174, "scores": [1.8201675162409494, 1.442830632914772, 0.9557285236325395, 0.9195808663452483], "rank_score": 1.2845768847833772} -{"id": "habernal-gurevych-2017-argumentation", "title": "Argumentation Mining in User-Generated Web Discourse", "abstract": "The goal of argumentation mining, an evolving research field in computational linguistics, is to design methods capable of analyzing people's argumentation. In this article, we go beyond the state of the art in several ways. (i) We deal with actual Web data and take up the challenges given by the variety of registers, multiple domains, and unrestricted noisy user-generated Web discourse. (ii) We bridge the gap between normative argumentation theories and argumentation phenomena encountered in actual data by adapting an argumentation model tested in an extensive annotation study. (iii) We create a new gold standard corpus (90k tokens in 340 documents) and experiment with several machine learning methods to identify argument components. We offer the data, source codes, and annotation guidelines to the community under free licenses. Our findings show that argumentation mining in user-generated Web discourse is a feasible but challenging task.", "phrases": ["user-generated web discourse", "annotation study", "argumentation mining", "premise", "forum post"], "overall_score": 4.769731428760247, "scores": [2.971801650866264, 0.9661392718258902, 1.0761776865031212, 0.8451463074014869, 0.5627612930537894], "rank_score": 1.2844052419301104} -{"id": "smith-fellbaum-2004-medical", "title": "Medical WordNet: A New Methodology for the Construction and Validation of Information Resources for Consumer Health", "abstract": "A consumer health information system must be able to comprehend both expert and nonexpert medical vocabulary and to map between the two. We describe an ongoing project to create a new lexical database called Medical WordNet (MWN), consisting of medically relevant terms used by and intelligible to non-expert subjects and supplemented by a corpus of natural-language sentences that is designed to provide medically validated contexts for MWN terms. The corpus derives primarily from online health information sources targeted to consumers, and involves two sub-corpora, called Medical FactNet (MFN) and Medical BeliefNet (MBN), respectively. The former consists of statements accredited as true on the basis of a rigorous process of validation, the latter of statements which non-experts believe to be true. We summarize the MWN / MFN / MBN project, and describe some of its applications.", "phrases": ["validation", "mwn", "medical wordnet"], "overall_score": 2.300351957166497, "scores": [2.5190768650149233, 0.8085272672564766, 0.5239484840363449], "rank_score": 1.2838508721025816} -{"id": "gao-etal-2018-action", "title": "What Action Causes This? Towards Naive Physical Action-Effect Prediction", "abstract": "Despite recent advances in knowledge representation, automated reasoning, and machine learning, artificial agents still lack the ability to understand basic action-effect relations regarding the physical world, for example, the action of cutting a cucumber most likely leads to the state where the cucumber is broken apart into smaller pieces. If artificial agents (e.g., robots) ever become our partners in joint tasks, it is critical to empower them with such action-effect understanding so that they can reason about the state of the world and plan for actions. Towards this goal, this paper introduces a new task on naive physical action-effect prediction, which addresses the relations between concrete actions (expressed in the form of verb-noun pairs) and their effects on the state of the physical world as depicted by images. We collected a dataset for this task and developed an approach that harnesses web image data through distant supervision to facilitate learning for action-effect prediction. Our empirical results have shown that web data can be used to complement a small number of seed examples (e.g., three examples for each action) for model learning. This opens up possibilities for agents to learn physical action-effect relations for tasks at hand through communication with humans with a few examples.", "phrases": ["action", "world", "causal relation"], "overall_score": 2.4980566605394365, "scores": [2.7776562650231047, 0.549913177886308, 0.5236720880813814], "rank_score": 1.2837471769969315} -{"id": "zhang-weiss-2016-stack", "title": "Stack-propagation: Improved Representation Learning for Syntax", "abstract": "Traditional syntax models typically leverage part-of-speech (POS) information by constructing features from hand-tuned templates. We demonstrate that a better approach is to utilize POS tags as a regularizer of learned representations. We propose a simple method for learning a stacked pipeline of models which we call \u201cstack-propagation\u201d. We apply this to dependency parsing and tagging, where we use the hidden layer of the tagger network as a representation of the input tokens for the parser. At test time, our parser does not require predicted POS tags. On 19 languages from the Universal Dependencies, our method is 1.3% (absolute) more accurate than a state-of-the-art graph-based approach and 2.7% more accurate than the most comparable greedy model.", "phrases": ["pos tagging", "dependency parsing", "stack-propagation"], "overall_score": 2.9550784028997588, "scores": [1.7480025116328388, 1.2241798709792862, 0.8779403493003937], "rank_score": 1.2833742439708395} -{"id": "aroyehun-gelbukh-2018-aggression", "title": "Aggression Detection in Social Media: Using Deep Neural Networks, Data Augmentation, and Pseudo Labeling", "abstract": "With the advent of the read-write web which facilitates social interactions in online spaces, the rise of anti-social behaviour in online spaces has attracted the attention of researchers. In this paper, we address the challenge of automatically identifying aggression in social media posts. Our team, saroyehun, participated in the English track of the Aggression Detection in Social Media Shared Task. On this task, we investigate the efficacy of deep neural network models of varying complexity. Our results reveal that deep neural network models require more data points to do better than an NBSVM linear baseline based on character n-grams. Our improved deep neural network models were trained on augmented data and pseudo labeled examples. Our LSTM classifier receives a weighted macro-F1 score of 0.6425 to rank first overall on the Facebook subtask of the shared task. On the social media sub-task, our CNN-LSTM model records a weighted macro-F1 score of 0.5920 to place third overall.", "phrases": ["data augmentation", "aggression detection", "recurrent neural network"], "overall_score": 3.3866530761414557, "scores": [1.752809705230138, 1.5669125899513727, 0.5301217814932321], "rank_score": 1.283281358891581} -{"id": "huang-huang-2013-optimized", "title": "Optimized Event Storyline Generation based on Mixture-Event-Aspect Model", "abstract": "Recently, much research focuses on event storyline generation, which aims to produce a concise, global and temporal event summary from a collection of articles. Generally, each event contains multiple sub-events and the storyline should be composed by the component summaries of all the sub-events. However, different sub-events have different part-whole relationship with the major event, which is important to correspond to users\u2019 interests but seldom considered in previous work. To distinguish different types of sub-events, we propose a mixture-event-aspect model which models different sub-events into local and global aspects. Combining these local/global aspects with summarization requirements together, we utilize an optimization method to generate the component summaries along the timeline. Wedevelopexperimentalsystemson 6 distinctively different datasets. Evaluation and comparison results indicate the effectiveness of our proposed method.", "phrases": ["event storyline generation", "mixture-event-aspect model", "sub-event"], "overall_score": 2.496969631704319, "scores": [2.394080450741803, 0.9345583691672799, 0.5209268441322861], "rank_score": 1.2831885546804565} -{"id": "komachi-etal-2006-phrase", "title": "Phrase reordering for statistical machine translation based on predicate-argument structure", "abstract": "In this paper, we describe a novel phrase reordering model based on predicate-argument structure. Our phrase reordering method utilizes a general predicate-argument structure analyzer to reorder source language chunks based on predicate-argument structure. We explicitly model longdistance phrase alignments by reordering arguments and predicates. The reordering approach is applied as a preprocessing step in training phase of a phrase-based statistical MT system. We report experimental results in the evaluation campaign of IWSLT 2006.", "phrases": ["predicate-argument structure", "input sentence", "heuristic rule"], "overall_score": 2.954571288489035, "scores": [2.7327075077695593, 0.5698459121810175, 0.5469086009911281], "rank_score": 1.2831540069805685} -{"id": "henderson-etal-2008-hybrid", "title": "Hybrid Reinforcement/Supervised Learning of Dialogue Policies from Fixed Data Sets", "abstract": "Abstract We propose a method for learning dialogue management policies from a fixed data set. The method addresses the challenges posed by Information State Update (ISU)-based dialogue systems, which represent the state of a dialogue as a large set of features, resulting in a very large state space and a huge policy space. To address the problem that any fixed data set will only provide information about small portions of these state and policy spaces, we propose a hybrid model that combines reinforcement learning with supervised learning. The reinforcement learning is used to optimize a measure of dialogue reward, while the supervised learning is used to restrict the learned policy to the portions of these spaces for which we have data. We also use linear function approximation to address the need to generalize from a fixed amount of data to large state spaces. To demonstrate the effectiveness of this method on this challenging task, we trained this model on the COMMUNICATOR corpus, to which we have added annotations for user actions and Information States. When tested with a user simulation trained on a different part of the same data set, our hybrid model outperforms a pure supervised learning model and a pure reinforcement learning model. It also outperforms the hand-crafted systems on the COMMUNICATOR data, according to automatic evaluation measures, improving over the average COMMUNICATOR system policy by 10%. The proposed method will improve techniques for bootstrapping and automatic optimization of dialogue management policies from limited initial data sets.", "phrases": ["supervised learning", "dialogue policy", "reinforcement learning approach"], "overall_score": 3.554144315852229, "scores": [2.181837442943165, 1.0765959574520074, 0.5872263839176085], "rank_score": 1.281886594770927} -{"id": "rohrbach-etal-2018-object", "title": "Object Hallucination in Image Captioning", "abstract": "Despite continuously improving performance, contemporary image captioning models are prone to \u201challucinating\u201d objects that are not actually in a scene. One problem is that standard metrics only measure similarity to ground truth captions and may not fully capture image relevance. In this work, we propose a new image relevance metric to evaluate current models with veridical visual labels and assess their rate of object hallucination. We analyze how captioning model architectures and learning objectives contribute to object hallucination, explore when hallucination is likely due to image misclassification or language priors, and assess how well current sentence metrics capture object hallucination. We investigate these questions on the standard image captioning benchmark, MSCOCO, using a diverse set of models. Our analysis yields several interesting findings, including that models which score best on standard sentence metrics do not always have lower hallucination and that models which hallucinate more tend to make errors driven by language priors.", "phrases": ["image captioning", "object hallucination", "text generation model"], "overall_score": 2.950629990578776, "scores": [2.3936939095527694, 0.9293095108270266, 0.5213235487600227], "rank_score": 1.2814423230466063} -{"id": "li-eisner-2019-specializing", "title": "Specializing Word Embeddings (for Parsing) by Information Bottleneck", "abstract": "Pre-trained word embeddings like ELMo and BERT contain rich syntactic and semantic information, resulting in state-of-the-art performance on various tasks. We propose a very fast variational information bottleneck (VIB) method to nonlinearly compress these embeddings, keeping only the information that helps a discriminative parser. We compress each word embedding to either a discrete tag or a continuous vector. In the discrete version, our automatically compressed tags form an alternative tag set: we show experimentally that our tags capture most of the information in traditional POS tag annotations, but our tag sequences can be parsed more accurately at the same level of tag granularity. In the continuous version, we show experimentally that moderately compressing the word embeddings by our method yields a more accurate parser in 8 of 9 languages, unlike simple dimensionality reduction.", "phrases": ["information bottleneck", "discriminative parser", "dimensionality reduction"], "overall_score": 2.9497704064977817, "scores": [2.749986414877129, 0.5556273440655408, 0.5375932723278259], "rank_score": 1.2810690104234985} -{"id": "hu-etal-2019-open", "title": "Open-Domain Targeted Sentiment Analysis via Span-Based Extraction and Classification", "abstract": "Open-domain targeted sentiment analysis aims to detect opinion targets along with their sentiment polarities from a sentence. Prior work typically formulates this task as a sequence tagging problem. However, such formulation suffers from problems such as huge search space and sentiment inconsistency. To address these problems, we propose a span-based extract-then-classify framework, where multiple opinion targets are directly extracted from the sentence under the supervision of target span boundaries, and corresponding polarities are then classified using their span representations. We further investigate three approaches under this framework, namely the pipeline, joint, and collapsed models. Experiments on three benchmark datasets show that our approach consistently outperforms the sequence tagging baseline. Moreover, we find that the pipeline model achieves the best performance compared with the other two models.", "phrases": ["sentiment analysis", "span representation", "aspect term"], "overall_score": 3.7024699733790105, "scores": [1.9330050771278917, 1.0763152647587308, 0.8335797590408304], "rank_score": 1.2809667003091512} -{"id": "heilman-madnani-2013-ets", "title": "ETS: Domain Adaptation and Stacking for Short Answer Scoring", "abstract": "Automatic scoring of short text responses to educational assessment items is a challenging task, particularly because large amounts of labeled data (i.e., human-scored responses) may or may not be available due to the variety of possible questions and topics. As such, it seems desirable to integrate various approaches, making use of model answers from experts (e.g., to give higher scores to responses that are similar), prescored student responses (e.g., to learn direct associations between particular phrases and scores), etc. Here, we describe a system that uses stacking (Wolpert, 1992) and domain adaptation (Daume III, 2007) to achieve this aim, allowing us to integrate item-specific n-gram features and more general text similarity measures (Heilman and Madnani, 2012). We report encouraging results from the Joint Student Response Analysis and 8th Recognizing Textual Entailment Challenge.", "phrases": ["domain adaptation", "short answer scoring", "n-gram feature"], "overall_score": 2.2948652485710825, "scores": [2.339812976362761, 0.9685017135745801, 0.5340513552547308], "rank_score": 1.2807886817306906} -{"id": "roth-yih-2004-linear", "title": "A Linear Programming Formulation for Global Inference in Natural Language Tasks", "abstract": "Abstract : Given a collection of discrete random variables representing outcomes of learned local predictors in natural language. e.g.. named entities and relations. we seek an optimal global assignment to the variables in the presence of general (non-sequential) constraints. Examples of these constraints include the type of arguments a relation can take, and the mutual activity of different relations. etc. We develop a linear programing formulation for this problem and evaluate it in the context of simultaneously learning named entities and relations. Our approach allows us to efficiently incorporate domain and task specific constraints at decision time, resulting in significant improvements in the accuracy and the \"human-like\" quality of the inferences.", "phrases": ["global inference", "integer linear programming", "relation extraction", "mention", "nlp community"], "overall_score": 5.345695834218479, "scores": [1.7512809624049808, 2.3741191798615766, 1.1459473795687478, 0.5847641049753208, 0.5468590232413589], "rank_score": 1.280594130010397} -{"id": "zhang-etal-2015-fixed", "title": "The Fixed-Size Ordinally-Forgetting Encoding Method for Neural Network Language Models", "abstract": "In this paper, we propose the new fixedsize ordinally-forgetting encoding (FOFE) method, which can almost uniquely encode any variable-length sequence of words into a fixed-size representation. FOFE can model the word order in a sequence using a simple ordinally-forgetting mechanism according to the positions of words. In this work, we have applied FOFE to feedforward neural network language models (FNN-LMs). Experimental results have shown that without using any recurrent feedbacks, FOFE based FNNLMs can significantly outperform not only the standard fixed-input FNN-LMs but also the popular recurrent neural network (RNN) LMs.", "phrases": ["fixed-size", "ordinally-forgetting encoding", "encoding method"], "overall_score": 2.662607300964253, "scores": [1.6387998313392251, 1.2872283364369457, 0.9153021811597047], "rank_score": 1.2804434496452919} -{"id": "hasegawa-etal-2004-discovering", "title": "Discovering Relations among Named Entities from Large Corpora", "abstract": "Discovering the significant relations embedded in documents would be very useful not only for information retrieval but also for question answering and summarization. Prior methods for relation discovery, however, needed large annotated corpora which cost a great deal of time and effort. We propose an unsupervised method for relation discovery from large corpora. The key idea is clustering pairs of named entities according to the similarity of context words intervening between the named entities. Our experiments using one year of newspapers reveals not only that the relations among named entities could be detected with high recall and precision, but also that appropriate labels could be automatically provided for the relations.", "phrases": ["large corpora", "relation extraction", "clustering method", "relationship type", "difficulty"], "overall_score": 4.845089945302305, "scores": [1.929021128203197, 2.0105474440790942, 1.3966609275440525, 0.535938119151781, 0.5295860112773928], "rank_score": 1.2803507260511033} -{"id": "zhang-etal-2006-synchronous", "title": "Synchronous Binarization for Machine Translation", "abstract": "Systems based on synchronous grammars and tree transducers promise to improve the quality of statistical machine translation output, but are often very computationally intensive. The complexity is exponential in the size of individual grammar rules due to arbitrary re-orderings between the two languages, and rules extracted from parallel corpora can be quite large. We devise a linear-time algorithm for factoring syntactic re-orderings by binarizing synchronous rules when possible and show that the resulting rule set significantly improves the speed and accuracy of a state-of-the-art syntax-based machine translation system.", "phrases": ["binarization", "machine translation", "synchronous grammar", "scfg", "span"], "overall_score": 3.7692682615378734, "scores": [2.2986200920101965, 1.501818401902167, 1.2140182910385324, 0.8376285355367847, 0.5485707776817237], "rank_score": 1.2801312196338805} -{"id": "kim-hovy-2006-extracting", "title": "Extracting Opinions, Opinion Holders, and Topics Expressed in Online News Media Text", "abstract": "This paper presents a method for identifying an opinion with its holder and topic, given a sentence from online news media texts. We introduce an approach of exploiting the semantic structure of a sentence, anchored to an opinion bearing verb or adjective. This method uses semantic role labeling as an intermediate step to label an opinion holder and topic using data from FrameNet. We decompose our task into three phases: identifying an opinion-bearing word, labeling semantic roles related to the word in the sentence, and then finding the holder and the topic of the opinion word among the labeled semantic roles. For a broader coverage, we also employ a clustering technique to predict the most probable frame for a word which is not defined in FrameNet. Our experimental results show that our system performs significantly better than the baseline.", "phrases": ["opinion holder", "role labeling", "opinion word", "subjectivity analysis system", "well-trained srl model"], "overall_score": 5.057710634771544, "scores": [2.1415286149985255, 1.775715447075275, 1.3812500482018168, 0.5602359677397118, 0.5414200962029281], "rank_score": 1.2800300348436515} -{"id": "yang-etal-2011-corpus", "title": "Corpus-Guided Sentence Generation of Natural Images", "abstract": "We propose a sentence generation strategy that describes images by predicting the most likely nouns, verbs, scenes and prepositions that make up the core sentence structure. The input are initial noisy estimates of the objects and scenes detected in the image using state of the art trained detectors. As predicting actions from still images directly is unreliable, we use a language model trained from the English Gigaword corpus to obtain their estimates; together with probabilities of co-located nouns, scenes and prepositions. We use these estimates as parameters on a HMM that models the sentence generation process, with hidden nodes as sentence components and image detections as the emissions. Experimental results show that our strategy of combining vision and language produces readable and descriptive sentences compared to naive strategies that use vision alone.", "phrases": ["sentence generation", "image", "preposition", "caption"], "overall_score": 4.265143255233065, "scores": [0.9481625656761374, 2.063564467316859, 1.2567646933973935, 0.8514140203415931], "rank_score": 1.2799764366829958} -{"id": "pinter-etal-2017-mimicking", "title": "Mimicking Word Embeddings using Subword RNNs", "abstract": "Word embeddings improve generalization over lexical features by placing each word in a lower-dimensional space, using distributional information obtained from unlabeled data. However, the effectiveness of word embeddings for downstream NLP tasks is limited by out-of-vocabulary (OOV) words, for which embeddings do not exist. In this paper, we present MIMICK, an approach to generating OOV word embeddings compositionally, by learning a function from spellings to distributional embeddings. Unlike prior work, MIMICK does not require re-training on the original word embedding corpus; instead, learning is performed at the type level. Intrinsic and extrinsic evaluations demonstrate the power of this simple approach. On 23 languages, MIMICK improves performance over a word-based baseline for tagging part-of-speech and morphosyntactic attributes. It is competitive with (and complementary to) a supervised character-based model in low resource settings.", "phrases": ["word embedding", "spelling", "mimick"], "overall_score": 4.013163465144488, "scores": [1.6280316926405454, 1.3732655086398091, 0.8384452974522633], "rank_score": 1.279914166244206} -{"id": "mcclosky-etal-2006-reranking", "title": "Reranking and Self-Training for Parser Adaptation", "abstract": "Statistical parsers trained and tested on the Penn Wall Street Journal (WSJ) treebank have shown vast improvements over the last 10 years. Much of this improvement, however, is based upon an ever-increasing number of features to be trained on (typically) the WSJ treebank data. This has led to concern that such parsers may be too finely tuned to this corpus at the expense of portability to other genres. Such worries have merit. The standard \"Charniak parser\" checks in at a labeled precision-recall f-measure of 89.7% on the Penn WSJ test set, but only 82.9% on the test set from the Brown treebank corpus.This paper should allay these fears. In particular, we show that the reranking parser described in Charniak and Johnson (2005) improves performance of the parser on Brown to 85.2%. Furthermore, use of the self-training techniques described in (McClosky et al., 2006) raise this to 87.8% (an error reduction of 28%) again without any use of labeled Brown data. This is remarkable since training the parser and reranker on labeled Brown data achieves only 88.4%.", "phrases": ["self-training", "wsj", "reranking", "target domain", "parser degrade"], "overall_score": 4.50994842179837, "scores": [2.9842306978721425, 0.8999889817247688, 1.0833371267177094, 0.8306443142282695, 0.5964207428714737], "rank_score": 1.2789243726828727} -{"id": "kawahara-kurohashi-2008-coordination", "title": "Coordination Disambiguation without Any Similarities", "abstract": "The use of similarities has been one of the main approaches to resolve the ambiguities of coordinate structures. In this paper, we present an alternative method for coordination disambiguation, which does not use similarities. Our hypothesis is that coordinate structures are supported by surrounding dependency relations, and that such dependency relations rather yield similarity between conjuncts, which humans feel. Based on this hypothesis, we built a Japanese fully-lexicalized generative parser that includes coordination disambiguation. Experimental results on web sentences indicated the effectiveness of our approach, and endorsed our hypothesis.", "phrases": ["coordinate structure", "dependency relation", "conjunct", "generative parser", "coordination disambiguation"], "overall_score": 2.2910947946456974, "scores": [3.150424991165079, 0.9168585667291022, 0.9054837508216262, 0.8942145580176272, 0.5264398899066219], "rank_score": 1.2786843513280115} -{"id": "xu-etal-2015-problems", "title": "Problems in Current Text Simplification Research: New Data Can Help", "abstract": "Simple Wikipedia has dominated simplification research in the past 5 years. In this opinion paper, we argue that focusing on Wikipedia limits simplification research. We back up our arguments with corpus analysis and by highlighting statements that other researchers have made in the simplification literature. We introduce a new simplification dataset that is a significant improvement over Simple Wikipedia, and present a novel quantitative-comparative approach to study the quality of simplification data resources.", "phrases": ["simplification", "newsela", "parallel corpus", "complexity level", "simple english wikipedia"], "overall_score": 5.026337477791276, "scores": [2.2669842814305534, 2.142655816782229, 0.8891086337333712, 0.5553878835341556, 0.5377255209753353], "rank_score": 1.278372427291129} -{"id": "yang-etal-2018-distantly", "title": "Distantly Supervised NER with Partial Annotation Learning and Reinforcement Learning", "abstract": "A bottleneck problem with Chinese named entity recognition (NER) in new domains is the lack of annotated data. One solution is to utilize the method of distant supervision, which has been widely used in relation extraction, to automatically populate annotated training data without humancost. The distant supervision assumption here is that if a string in text is included in a predefined dictionary of entities, the string might be an entity. However, this kind of auto-generated data suffers from two main problems: incomplete and noisy annotations, which affect the performance of NER models. In this paper, we propose a novel approach which can partially solve the above problems of distant supervision for NER. In our approach, to handle the incomplete problem, we apply partial annotation learning to reduce the effect of unknown labels of characters. As for noisy annotation, we design an instance selector based on reinforcement learning to distinguish positive sentences from auto-generated annotations. In experiments, we create two datasets for Chinese named entity recognition in two domains with the help of distant supervision. The experimental results show that the proposed approach obtains better performance than the comparison systems on both two datasets.", "phrases": ["reinforcement learning", "annotated data", "distant supervision", "ner model", "instance selector"], "overall_score": 4.164534313475981, "scores": [2.6042883524779046, 1.487075318995471, 1.198045992654553, 0.5620174202332939, 0.5396271168825247], "rank_score": 1.2782108402487495} -{"id": "gkatzia-etal-2013-generating", "title": "Generating Student Feedback from Time-Series Data Using Reinforcement Learning", "abstract": "We describe a statistical Natural Language Generation (NLG) method for summarisation of time-series data in the context of feedback generation for students. In this paper, we initially present a method for collecting time-series data from students (e.g. marks, lectures attended) and use example feedback from lecturers in a datadriven approach to content selection. We show a novel way of constructing a reward function for our Reinforcement Learning agent that is informed by the lecturers\u2019 method of providing feedback. We evaluate our system with undergraduate students by comparing it to three baseline systems: a rule-based system, lecturerconstructed summaries and a Brute Force system. Our evaluation shows that the feedback generated by our learning agent is viewed by students to be as good as the feedback from the lecturers. Our findings suggest that the learning agent needs to take into account both the student and lecturers\u2019 preferences.", "phrases": ["student", "feedback", "time-series data", "reinforcement learning"], "overall_score": 2.2896223958671085, "scores": [1.7051636667556056, 1.4387912690986973, 1.0873382990128333, 0.8801571248255036], "rank_score": 1.27786258992316} -{"id": "wang-etal-2016-relation", "title": "Relation Classification via Multi-Level Attention CNNs", "abstract": "Relation classification is a crucial ingredient \nin numerous information extraction systems \nseeking to mine structured facts from \ntext. We propose a novel convolutional \nneural network architecture for this task, \nrelying on two levels of attention in order \nto better discern patterns in heterogeneous \ncontexts. This architecture enables endto-end \nlearning from task-specific labeled \ndata, forgoing the need for external knowledge \nsuch as explicit dependency structures. \nExperiments show that our model outperforms \nprevious state-of-the-art methods, including \nthose relying on much richer forms \nof prior knowledge.", "phrases": ["multi-level attention cnn", "relation classification", "rnn", "deep learning model", "lexicon-level feature"], "overall_score": 4.467902541302483, "scores": [2.293713601517362, 1.9338944064611332, 1.074672588649711, 0.5473780196518596, 0.5394345899542543], "rank_score": 1.277818641246864} -{"id": "zhong-etal-2020-logicalfactchecker", "title": "LogicalFactChecker: Leveraging Logical Operations for Fact Checking with Graph Module Network", "abstract": "Verifying the correctness of a textual statement requires not only semantic reasoning about the meaning of words, but also symbolic reasoning about logical operations like count, superlative, aggregation, etc. In this work, we propose LogicalFactChecker, a neural network approach capable of leveraging logical operations for fact checking. It achieves the state-of-the-art performance on TABFACT, a large-scale, benchmark dataset built for verifying a textual statement with semi-structured tables. This is achieved by a graph module network built upon the Transformer-based architecture. With a textual statement and a table as the input, LogicalFactChecker automatically derives a program (a.k.a. logical form) of the statement in a semantic parsing manner. A heterogeneous graph is then constructed to capture not only the structures of the table and the program, but also the connections between inputs with different modalities. Such a graph reveals the related contexts of each word in the statement, the table and the program. The graph is used to obtain graph-enhanced contextual representations of words in Transformer-based architecture. After that, a program-driven module network is further introduced to exploit the hierarchical structure of the program, where semantic compositionality is dynamically modeled along the program structure with a set of function-specific modules. Ablation experiments suggest that both the heterogeneous graph and the module network are important to obtain strong results.", "phrases": ["graph module network", "symbolic reasoning", "graph-enhanced contextual representation", "logicalfactchecker"], "overall_score": 3.2769648512079264, "scores": [1.9166021300216716, 1.8091943693457035, 0.83598184974439, 0.5485991196286751], "rank_score": 1.27759436718511} -{"id": "lalor-etal-2016-building", "title": "Building an Evaluation Scale using Item Response Theory", "abstract": "Evaluation of NLP methods requires testing against a previously vetted gold-standard test set and reporting standard metrics (accuracy/precision/recall/F1). The current assumption is that all items in a given test set are equal with regards to difficulty and discriminating power. We propose Item Response Theory (IRT) from psychometrics as an alternative means for gold-standard test-set generation and NLP system evaluation. IRT is able to describe characteristics of individual items - their difficulty and discriminating power - and can account for these characteristics in its estimation of human intelligence or ability for an NLP task. In this paper, we demonstrate IRT by generating a gold-standard test set for Recognizing Textual Entailment. By collecting a large number of human responses and fitting our IRT model, we show that our IRT model compares NLP systems with the performance in a human population and is able to provide more insight into system performance than standard evaluation metrics. We show that a high accuracy score does not always imply a high IRT score, which depends on the item characteristics and the response pattern.", "phrases": ["item response theory", "difficulty", "human response", "model performance"], "overall_score": 3.174479463620654, "scores": [2.607747331513813, 1.0771913035971528, 0.9022161138181551, 0.5228633097234798], "rank_score": 1.2775045146631503} -{"id": "gamback-sikdar-2017-using", "title": "Using Convolutional Neural Networks to Classify Hate-Speech", "abstract": "The paper introduces a deep learning-based Twitter hate-speech text classification system. The classifier assigns each tweet to one of four predefined categories: racism, sexism, both (racism and sexism) and non-hate-speech. Four Convolutional Neural Network models were trained on resp. character 4-grams, word vectors based on semantic information built using word2vec, randomly generated word vectors, and word vectors combined with character n-grams. The feature set was down-sized in the networks by max-pooling, and a softmax function used to classify tweets. Tested by 10-fold cross-validation, the model based on word2vec embeddings performed best, with higher precision than recall, and a 78.3% F-score.", "phrases": ["convolutional neural networks", "hate-speech", "offensive language detection"], "overall_score": 4.917442414895393, "scores": [1.6368218030678081, 1.3567173047411274, 0.8380873051976396], "rank_score": 1.277208804335525} -{"id": "simard-etal-2005-translating", "title": "Translating with Non-contiguous Phrases", "abstract": "This paper presents a phrase-based statistical machine translation method, based on non-contiguous phrases, i.e. phrases with gaps. A method for producing such phrases from a word-aligned corpora is proposed. A statistical translation model is also presented that deals such phrases, as well as a training method based on the maximization of translation accuracy, as measured with the NIST evaluation metric. Translations are produced by means of a beam-search decoder. Experimental results are presented, that demonstrate how the proposed method allows to better generalize from the training data.", "phrases": ["non-contiguous phrase", "gap", "translation model", "additional linguistic phenomenon", "matrax"], "overall_score": 3.7591488792141865, "scores": [2.613988839691425, 1.6930965152758395, 0.9543220272657821, 0.5652940137155408, 0.5567708135491761], "rank_score": 1.2766944418995525} -{"id": "velldal-etal-2018-norec", "title": "NoReC: The Norwegian Review Corpus", "abstract": "This paper presents the Norwegian Review Corpus (NoReC), created for training and evaluating models for document-level sentiment analysis. The full-text reviews have been collected from major Norwegian news sources and cover a range of different domains, including literature, movies, video games, restaurants, music and theater, in addition to product reviews across a range of categories. Each review is labeled with a manually assigned score of 1-6, as provided by the rating of the original author. This first release of the corpus comprises more than 35,000 reviews. It is distributed using the CoNLL-U format, pre-processed using UDPipe, along with a rich set of metadata. The work reported in this paper forms part of the SANT initiative (Sentiment Analysis for Norwegian Text), a project seeking to provide resources and tools for sentiment analysis and opinion mining for Norwegian. As resources for sentiment analysis have so far been unavailable for Norwegian, NoReC represents a highly valuable and sought-after addition to Norwegian language technology.", "phrases": ["norwegian review corpus", "full-text review", "news source", "norec"], "overall_score": 2.0544568722074654, "scores": [3.0362063500813607, 0.8797751660142323, 0.6155977470005348, 0.5744440418981541], "rank_score": 1.2765058262485705} -{"id": "lai-etal-2019-gated", "title": "A Gated Self-attention Memory Network for Answer Selection", "abstract": "Answer selection is an important research problem, with applications in many areas. Previous deep learning based approaches for the task mainly adopt the Compare-Aggregate architecture that performs word-level comparison followed by aggregation. In this work, we take a departure from the popular Compare-Aggregate architecture, and instead, propose a new gated self-attention memory network for the task. Combined with a simple transfer learning technique from a large-scale online corpus, our model outperforms previous methods by a large margin, achieving new state-of-the-art results on two standard answer selection datasets: TrecQA and WikiQA.", "phrases": ["self-attention memory network", "answer selection", "many nlp task"], "overall_score": 2.6520694201393855, "scores": [2.336835336711623, 0.9543475653999148, 0.5349444984168231], "rank_score": 1.2753758001761202} -{"id": "su-etal-2020-deepmet", "title": "DeepMet: A Reading Comprehension Paradigm for Token-level Metaphor Detection", "abstract": "Machine metaphor understanding is one of the major topics in NLP. Most of the recent attempts consider it as classification or sequence tagging task. However, few types of research introduce the rich linguistic information into the field of computational metaphor by leveraging powerful pre-training language models. We focus a novel reading comprehension paradigm for solving the token-level metaphor detection task which provides an innovative type of solution for this task. We propose an end-to-end deep metaphor detection model named DeepMet based on this paradigm. The proposed approach encodes the global text context (whole sentence), local text context (sentence fragments), and question (query word) information as well as incorporating two types of part-of-speech (POS) features by making use of the advanced pre-training language model. The experimental results by using several metaphor datasets show that our model achieves competitive results in the second shared task on metaphor detection.", "phrases": ["reading comprehension paradigm", "metaphor detection", "language model", "part-of-speech"], "overall_score": 2.9343416723363074, "scores": [1.8335235674979578, 1.604286299355531, 1.1106404198114412, 0.5490232985927421], "rank_score": 1.2743683963144181} -{"id": "rinott-etal-2015-show", "title": "Show Me Your Evidence - an Automatic Method for Context Dependent Evidence Detection", "abstract": "Engaging in a debate with oneself or others to take decisions is an integral part of our day-today life. A debate on a topic (say, use of performance enhancing drugs) typically proceeds by one party making an assertion/claim (say, PEDs are bad for health) and then providing an evidence to support the claim (say, a 2006 study shows that PEDs have psychiatric side effects). In this work, we propose the task of automatically detecting such evidences from unstructured text that support a given claim. This task has many practical applications in decision support and persuasion enhancement in a wide range of domains. We first introduce an extensive benchmark data set tailored for this task, which allows training statistical models and assessing their performance. Then, we suggest a system architecture based on supervised learning to address the evidence detection task. Finally, promising experimental results are reported.", "phrases": ["evidence detection", "claim", "wikipedia article", "lexical feature", "technology"], "overall_score": 4.566333566252222, "scores": [2.0891946043492813, 1.7877179772232525, 1.0366271772196476, 0.9079211365362051, 0.5498373239291611], "rank_score": 1.2742596438515095} -{"id": "jovanoski-etal-2015-sentiment", "title": "Sentiment Analysis in Twitter for Macedonian", "abstract": "We present work on sentiment analysis in Twitter for Macedonian. As this is pioneering work for this combination of language and genre, we created suitable resources for training and evaluating a system for sentiment analysis of Macedonian tweets. In particular, we developed a corpus of tweets annotated with tweet-level sentiment polarity (positive, negative, and neutral), as well as with phrase-level sentiment, which we made freely available for research purposes. We further bootstrapped several large-scale sentiment lexicons for Macedonian, motivated by previous work for English. The impact of several different pre-processing steps as well as of various features is shown in experiments that represent the first attempt to build a system for sentiment analysis in Twitter for the morphologically rich Macedonian language. Overall, our experimental results show an F1-score of 92.16, which is very strong and is on par with the best results for English, which were achieved in recent SemEval competitions.", "phrases": ["twitter", "macedonian tweet", "sentiment analysis"], "overall_score": 2.2822134607294586, "scores": [2.357273929313144, 0.8812318598214616, 0.5826769643396201], "rank_score": 1.2737275844914084} -{"id": "zhang-etal-2020-improving", "title": "Improving Massively Multilingual Neural Machine Translation and Zero-Shot Translation", "abstract": "Massively multilingual models for neural machine translation (NMT) are theoretically attractive, but often underperform bilingual models and deliver poor zero-shot translations. In this paper, we explore ways to improve them. We argue that multilingual NMT requires stronger modeling capacity to support language pairs with varying typological characteristics, and overcome this bottleneck via language-specific components and deepening NMT architectures. We identify the off-target translation issue (i.e. translating into a wrong target language) as the major source of the inferior zero-shot performance, and propose random online backtranslation to enforce the translation of unseen training language pairs. Experiments on OPUS-100 (a novel multilingual dataset with 100 languages) show that our approach substantially narrows the performance gap with bilingual models in both one-to-many and many-to-many settings, and improves zero-shot performance by ~10 BLEU, approaching conventional pivot-based methods.", "phrases": ["neural machine translation", "zero-shot translation", "multilingual nmt", "training language pair", "language-aware layer normalization"], "overall_score": 4.563863937644404, "scores": [3.2994354389047134, 0.871255329235404, 1.1256441342240964, 0.5422424671272248, 0.5292750348419655], "rank_score": 1.2735704808666806} -{"id": "calixto-etal-2019-latent", "title": "Latent Variable Model for Multi-modal Translation", "abstract": "In this work, we propose to model the interaction between visual and textual features for multi-modal neural machine translation (MMT) through a latent variable model. This latent variable can be seen as a multi-modal stochastic embedding of an image and its description in a foreign language. It is used in a target-language decoder and also to predict image features. Importantly, our model formulation utilises visual and textual inputs during training but does not require that images be available at test time. We show that our latent variable MMT formulation improves considerably over strong baselines, including a multi-task learning approach (Elliott and Kadar, 2017) and a conditional variational auto-encoder approach (Toyama et al., 2016). Finally, we show improvements due to (i) predicting image features in addition to only conditioning on them, (ii) imposing a constraint on the KL term to promote models with non-negligible mutual information between inputs and latent variable, and (iii) by training on additional target-language image descriptions (i.e. synthetic data).", "phrases": ["textual feature", "image", "latent variable model"], "overall_score": 2.4775929712486464, "scores": [2.3493196751401726, 0.9044872397312909, 0.5658858481034107], "rank_score": 1.2732309209916248} -{"id": "rudinger-etal-2015-script", "title": "Script Induction as Language Modeling", "abstract": "The narrative cloze is an evaluation metric commonly used for work on automatic script induction. While prior work in this area has focused on count-based methods from distributional semantics, such as pointwise mutual information, we argue that the narrative cloze can be productively reframed as a language modeling task. By training a discriminative language model for this task, we attain improvements of up to 27 percent over prior methods on standard narrative cloze metrics.", "phrases": ["language modeling", "script induction", "event prediction", "cloze task", "other system"], "overall_score": 3.934814936940277, "scores": [2.5713622209581923, 2.1859626250755255, 0.545646682000336, 0.5370302343496136, 0.5248654245183263], "rank_score": 1.272973437380399} -{"id": "ghosh-veale-2017-magnets", "title": "Magnets for Sarcasm: Making Sarcasm Detection Timely, Contextual and Very Personal", "abstract": "Sarcasm is a pervasive phenomenon in social media, permitting the concise communication of meaning, affect and attitude. Concision requires wit to produce and wit to understand, which demands from each party knowledge of norms, context and a speaker's mindset. Insight into a speaker's psychological profile at the time of production is a valuable source of context for sarcasm detection. Using a neural architecture, we show significant gains in detection accuracy when knowledge of the speaker's mood at the time of production can be inferred. Our focus is on sarcasm detection on Twitter, and show that the mood exhibited by a speaker over tweets leading up to a new post is as useful a cue for sarcasm as the topical context of the post itself. The work opens the door to an empirical exploration not just of sarcasm in text but of the sarcastic state of mind.", "phrases": ["sarcasm", "mood", "twitter"], "overall_score": 2.646851267449043, "scores": [2.4395490313509725, 0.827290960769466, 0.5517592053989644], "rank_score": 1.2728663991731342} -{"id": "jurgens-etal-2018-measuring", "title": "Measuring the Evolution of a Scientific Field through Citation Frames", "abstract": "Citations have long been used to characterize the state of a scientific field and to identify influential works. However, writers use citations for different purposes, and this varied purpose influences uptake by future scholars. Unfortunately, our understanding of how scholars use and frame citations has been limited to small-scale manual citation analysis of individual papers. We perform the largest behavioral study of citations to date, analyzing how scientific works frame their contributions through different types of citations and how this framing affects the field as a whole. We introduce a new dataset of nearly 2,000 citations annotated for their function, and use it to develop a state-of-the-art classifier and label the papers of an entire field: Natural Language Processing. We then show how differences in framing affect scientific uptake and reveal the evolution of the publication venues and the field as a whole. We demonstrate that authors are sensitive to discourse structure and publication venue when citing, and that how a paper frames its work through citations is predictive of the citation count it will receive. Finally, we use changes in citation framing to show that the field of NLP is undergoing a significant increase in consensus.", "phrases": ["evolution", "scientific field", "framing", "citation context"], "overall_score": 3.1620961801198764, "scores": [2.1364210842304194, 1.8290233553694124, 0.6040545182069789, 0.520585501325125], "rank_score": 1.272521114782984} -{"id": "koehn-knight-2003-feature", "title": "Feature-Rich Statistical Translation of Noun Phrases", "abstract": "We define noun phrase translation as a subtask of machine translation. This enables us to build a dedicated noun phrase translation subsystem that improves over the currently best general statistical machine translation methods by incorporating special modeling and special features. We achieved 65.5% translation accuracy in a German-English translation task vs. 53.2% with IBM Model 4.", "phrases": ["noun phrase", "overall translation performance", "isolated translation"], "overall_score": 3.4447990429395046, "scores": [2.627886100489254, 0.6034193032880609, 0.5848709450260734], "rank_score": 1.2720587829344627} -{"id": "shi-etal-2016-string", "title": "Does String-Based Neural MT Learn Source Syntax?", "abstract": "We investigate whether a neural, encoder-decoder translation system learns syntactic information on the source side as a by-product of training. We propose two methods to detect whether the encoder has learned local and global source syntax. A \ufb01ne-grained analysis of the syntactic structure learned by the encoder reveals which kinds of syntax are learned and which are missing.", "phrases": ["syntactic information", "source side", "nmt encoder", "part-of-speech", "sentence vector"], "overall_score": 5.328418375743597, "scores": [2.1612890263087343, 1.5170726581301939, 1.071150089528609, 1.054133518475324, 0.5553731754683907], "rank_score": 1.2718036935822503} -{"id": "wu-etal-2020-attentive", "title": "Attentive Pooling with Learnable Norms for Text Representation", "abstract": "Pooling is an important technique for learning text representations in many neural NLP models. In conventional pooling methods such as average, max and attentive pooling, text representations are weighted summations of the L1 or L norm of input features. However, their pooling norms are always fixed and may not be optimal for learning accurate text representations in different tasks. In addition, in many popular pooling methods such as max and attentive pooling some features may be over-emphasized, while other useful ones are not fully exploited. In this paper, we propose an Attentive Pooling with Learnable Norms (APLN) approach for text representation. Different from existing pooling methods that use a fixed pooling norm, we propose to learn the norm in an end-to-end manner to automatically find the optimal ones for text representation in different tasks. In addition, we propose two methods to ensure the numerical stability of the model training. The first one is scale limiting, which re-scales the input to ensure non-negativity and alleviate the risk of exponential explosion. The second one is re-formulation, which decomposes the exponent operation to avoid computing the real-valued powers of the input and further accelerate the pooling operation. Experimental results on four benchmark datasets show that our approach can effectively improve the performance of attentive pooling.", "phrases": ["learnable norms", "text representation", "attentive pooling"], "overall_score": 1.3967137871853568, "scores": [2.0573790644736927, 0.8826989893815068, 0.873952978544721], "rank_score": 1.2713436774666402} -{"id": "ardila-etal-2020-common", "title": "Common Voice: A Massively-Multilingual Speech Corpus", "abstract": "The Common Voice corpus is a massively-multilingual collection of transcribed speech intended for speech technology research and development. Common Voice is designed for Automatic Speech Recognition purposes but can be useful in other domains (e.g. language identification). To achieve scale and sustainability, the Common Voice project employs crowdsourcing for both data collection and data validation. The most recent release includes 29 languages, and as of November 2019 there are a total of 38 languages collecting data. Over 50,000 individuals have participated so far, resulting in 2,500 hours of collected audio. To our knowledge this is the largest audio corpus in the public domain for speech recognition, both in terms of number of hours and number of languages. As an example use case for Common Voice, we present speech recognition experiments using Mozilla's DeepSpeech Speech-to-Text toolkit. By applying transfer learning from a source English model, we find an average Character Error Rate improvement of 5.99 5.48 for twelve target languages (German, French, Italian, Turkish, Catalan, Slovenian, Welsh, Irish, Breton, Tatar, Chuvash, and Kabyle). For most of these languages, these are the first ever published results on end-to-end Automatic Speech Recognition.", "phrases": ["speech recognition", "project", "data collection", "mozilla", "common voice"], "overall_score": 3.6012962083571343, "scores": [3.5011616828562104, 0.8941497304463674, 0.8656521194621064, 0.5511253538681651, 0.5434088663201283], "rank_score": 1.2710995505905953} -{"id": "yu-etal-2019-sparc", "title": "SParC: Cross-Domain Semantic Parsing in Context", "abstract": "We present SParC, a dataset for cross-domainSemanticParsing inContext that consists of 4,298 coherent question sequences (12k+ individual questions annotated with SQL queries). It is obtained from controlled user interactions with 200 complex databases over 138 domains. We provide an in-depth analysis of SParC and show that it introduces new challenges compared to existing datasets. SParC demonstrates complex contextual dependencies, (2) has greater semantic diversity, and (3) requires generalization to unseen domains due to its cross-domain nature and the unseen databases at test time. We experiment with two state-of-the-art text-to-SQL models adapted to the context-dependent, cross-domain setup. The best model obtains an exact match accuracy of 20.2% over all questions and less than10% over all interaction sequences, indicating that the cross-domain setting and the con-textual phenomena of the dataset present significant challenges for future research. The dataset, baselines, and leaderboard are released at .", "phrases": ["semantic parsing", "coherent question sequence", "database", "sparc"], "overall_score": 2.641922769544884, "scores": [2.1209288637233716, 1.8991366855416865, 0.5385325227972035, 0.5233870986497915], "rank_score": 1.2704962926780135} -{"id": "kozareva-hovy-2010-semi", "title": "A Semi-Supervised Method to Learn and Construct Taxonomies Using the Web", "abstract": "Although many algorithms have been developed to harvest lexical resources, few organize the mined terms into taxonomies. We propose (1) a semi-supervised algorithm that uses a root concept, a basic level concept, and recursive surface patterns to learn automatically from the Web hyponym-hypernym pairs subordinated to the root; (2) a Web based concept positioning procedure to validate the learned pairs' is-a relations; and (3) a graph algorithm that derives from scratch the integrated taxonomy structure of all the terms. Comparing results with WordNet, we find that the algorithm misses some concepts and links, but also that it discovers many additional ones lacking in WordNet. We evaluate the taxonomization power of our method on reconstructing parts of the WordNet taxonomy. Experiments show that starting from scratch, the algorithm can reconstruct 62% of the WordNet taxonomy for the regions tested.", "phrases": ["web", "root", "long path", "lexico-syntactic pattern", "dap"], "overall_score": 4.037535343835942, "scores": [2.6968521447117766, 1.048047601802405, 0.9326039396226039, 0.847831216569967, 0.8268786836092116], "rank_score": 1.2704427172631927} -{"id": "ataman-federico-2018-compositional", "title": "Compositional Representation of Morphologically-Rich Input for Neural Machine Translation", "abstract": "Neural machine translation (NMT) models are typically trained with fixed-size input and output vocabularies, which creates an important bottleneck on their accuracy and generalization capability. As a solution, various studies proposed segmenting words into sub-word units and performing translation at the sub-lexical level. However, statistical word segmentation methods have recently shown to be prone to morphological errors, which can lead to inaccurate translations. In this paper, we propose to overcome this problem by replacing the source-language embedding layer of NMT with a bi-directional recurrent neural network that generates compositional representations of the input at any desired level of granularity. We test our approach in a low-resource setting with five languages from different morphological typologies, and under different composition assumptions. By training NMT to compose word representations from character n-grams, our approach consistently outperforms (from 1.71 to 2.48 BLEU points) NMT learning embeddings of statistically generated sub-word units.", "phrases": ["neural machine translation", "compositional representation", "morphologically-rich language"], "overall_score": 3.0463386153627847, "scores": [2.280976251305823, 0.9385162229035428, 0.5917731593489424], "rank_score": 1.2704218778527694} -{"id": "cook-stevenson-2010-automatically", "title": "Automatically Identifying Changes in the Semantic Orientation of Words", "abstract": "The meanings of words are not fixed but in fact undergo change, with new word senses arising and established senses taking on new aspects of meaning or falling out of usage. Two types of semantic change are amelioration and pejoration; in these processes a word sense changes to become more positive or negative, respectively. In this first computational study of amelioration and pejoration we adapt a web-based method for determining semantic orientation to the task of identifying ameliorations and pejorations in corpora from differing time periods. We evaluate our proposed method on a small dataset of known historical ameliorations and pejorations, and find it to perform better than a random baseline. Since this test dataset is small, we conduct a further evaluation on artificial examples of amelioration and pejoration, and again find evidence that our proposed method is able to identify changes in semantic orientation. Finally, we conduct a preliminary evaluation in which we apply our methods to the task of finding words which have recently undergone amelioration or pejoration.", "phrases": ["semantic orientation", "pejoration", "time period"], "overall_score": 3.438885067546366, "scores": [1.8974074064779856, 1.0761535586649003, 0.8360638337031954], "rank_score": 1.2698749329486938} -{"id": "he-wang-2008-chinese", "title": "Chinese Named Entity Recognition and Word Segmentation Based on Character", "abstract": "Chinese word segmentation and named entity recognition (NER) are both important tasks in Chinese information processing. This paper presents a character-based Conditional Random Fields (CRFs) model for such two tasks. In The SIGHAN Bakeoff 2007, this model participated in all closed tracks for both Chinese NER and word segmentation tasks, and turns out to perform well. Our system ranks 2nd in the closed track on NER of MSRA, and 4th in the closed track on word segmentation of SXU.", "phrases": ["word segmentation", "chinese ner", "character-based approach"], "overall_score": 2.6401594507929773, "scores": [1.89630887381591, 1.3622338932410498, 0.5504021797581983], "rank_score": 1.2696483156050526} -{"id": "billami-etal-2018-resyf", "title": "ReSyf: a French lexicon with ranked synonyms", "abstract": "In this article, we present ReSyf, a lexical resource of monolingual synonyms ranked according to their difficulty to be read and understood by native learners of French. The synonyms come from an existing lexical network and they have been semantically disambiguated and refined. A ranking algorithm, based on a wide range of linguistic features and validated through an evaluation campaign with human annotators, automatically sorts the synonyms corresponding to a given word sense by reading difficulty. ReSyf is freely available and will be integrated into a web platform for reading assistance. It can also be applied to perform lexical simplification of French texts.", "phrases": ["synonym", "difficulty", "resyf"], "overall_score": 1.7600852492423353, "scores": [2.424429966040521, 0.8524800082532483, 0.5319894166418295], "rank_score": 1.2696331303118662} -{"id": "gao-etal-2019-dialog", "title": "Dialog State Tracking: A Neural Reading Comprehension Approach", "abstract": "Dialog state tracking is used to estimate the current belief state of a dialog given all the preceding conversation. Machine reading comprehension, on the other hand, focuses on building systems that read passages of text and answer questions that require some understanding of passages. We formulate dialog state tracking as a reading comprehension task to answer the question what is the state of the current dialog? after reading conversational context. In contrast to traditional state tracking methods where the dialog state is often predicted as a distribution over a closed set of all the possible slot values within an ontology, our method uses a simple attention-based neural network to point to the slot values within the conversation. Experiments on MultiWOZ-2.0 cross-domain dialog dataset show that our simple system can obtain similar accuracies compared to the previous more complex methods. By exploiting recent advances in contextual word embeddings, adding a model that explicitly tracks whether a slot value should be carried over to the next turn, and combining our method with a traditional joint state tracking method that relies on closed set vocabulary, we can obtain a joint-goal accuracy of 47.33% on the standard test split, exceeding current state-of-the-art by 11.75%**.", "phrases": ["reading comprehension task", "dialog state tracking", "dialogue context"], "overall_score": 4.034850021928462, "scores": [2.337497034411876, 0.8958797753278668, 0.5754164681450857], "rank_score": 1.2695977592949428} -{"id": "dong-etal-2017-learning-generate", "title": "Learning to Generate Product Reviews from Attributes", "abstract": "Automatically generating product reviews is a meaningful, yet not well-studied task in sentiment analysis. Traditional natural language generation methods rely extensively on hand-crafted rules and predefined templates. This paper presents an attention-enhanced attribute-to-sequence model to generate product reviews for given attribute information, such as user, product, and rating. The attribute encoder learns to represent input attributes as vectors. Then, the sequence decoder generates reviews by conditioning its output on these vectors. We also introduce an attention mechanism to jointly generate reviews and align words with input attributes. The proposed model is trained end-to-end to maximize the likelihood of target product reviews given the attributes. We build a publicly available dataset for the review generation task by leveraging the Amazon book reviews and their metadata. Experiments on the dataset show that our approach outperforms baseline methods and the attention mechanism significantly improves the performance of our model.", "phrases": ["review", "attribute", "review generation task"], "overall_score": 3.6693772540061653, "scores": [1.6590041277582954, 1.5233472299807078, 0.6262008577140922], "rank_score": 1.2695174051510318} -{"id": "guu-etal-2018-generating", "title": "Generating Sentences by Editing Prototypes", "abstract": "We propose a new generative language model for sentences that first samples a prototype sentence from the training corpus and then edits it into a new sentence. Compared to traditional language models that generate from scratch either left-to-right or by first sampling a latent sentence vector, our prototype-then-edit model improves perplexity on language modeling and generates higher quality outputs according to human evaluation. Furthermore, the model gives rise to a latent edit vector that captures interpretable semantics such as sentence similarity and sentence-level analogies.", "phrases": ["prototype", "unconditional text generation", "neural editor model", "generative model", "human-written sentence"], "overall_score": 4.9126776379887955, "scores": [3.0058230221214775, 0.964570022116451, 0.9266188487002325, 0.8543560133936752, 0.5937916761160043], "rank_score": 1.2690319164895683} -{"id": "bel-etal-2007-automatic", "title": "Automatic Acquisition of Grammatical Types for Nouns", "abstract": "The work we present here is concerned with the acquisition of deep grammatical information for nouns in Spanish. The aim is to build a learner that can handle noise, but, more interestingly, that is able to overcome the problem of sparse data, especially important in the case of nouns. We have based our work on two main points. Firstly, we have used distributional evidences as features. Secondly, we made the learner deal with all occurrences of a word as a single complex unit. The obtained results show that grammatical features of nouns is a level of generalization that can be successfully approached with a Decision Tree learner.", "phrases": ["acquisition", "noun", "decision tree learner"], "overall_score": 2.2737907096862764, "scores": [2.6904620572131677, 0.5700943380221528, 0.5465238776529191], "rank_score": 1.269026757629413} -{"id": "del-corro-etal-2015-finet", "title": "FINET: Context-Aware Fine-Grained Named Entity Typing", "abstract": "We propose FINET, a system for detecting the types of named entities in short inputs\u2014such as sentences or tweets\u2014with respect to WordNet\u2019s super fine-grained type system. FINET generates candidate types using a sequence of multiple extractors, ranging from explicitly mentioned types to implicit types, and subsequently selects the most appropriate using ideas from word-sense disambiguation. FINET combats data scarcity and noise from existing systems: It does not rely on supervision in its extractors and generates training data for type selection from WordNet and other resources. FINET supports the most fine-grained type system so far, including types with no annotated training data. Our experiments indicate that FINET outperforms state-of-the-art methods in terms of recall, precision, and granularity of extracted types.", "phrases": ["entity typing", "type system", "finet"], "overall_score": 3.801009852875938, "scores": [2.082767547652686, 1.1931396132782264, 0.5305176185103477], "rank_score": 1.2688082598137533} -{"id": "caselli-vossen-2017-event", "title": "The Event StoryLine Corpus: A New Benchmark for Causal and Temporal Relation Extraction", "abstract": "This paper reports on the Event StoryLine Corpus (ESC) v1.0, a new benchmark dataset for the temporal and causal relation detection. By developing this dataset, we also introduce a new task, the StoryLine Extraction from news data, which aims at extracting and classifying events relevant for stories, from across news documents spread in time and clustered around a single seminal event or topic. In addition to describing the dataset, we also report on three baselines systems whose results show the complexity of the task and suggest directions for the development of more robust systems.", "phrases": ["event storyline corpus", "causal relation", "dataset eventstoryline", "eci"], "overall_score": 3.5178663691231074, "scores": [2.528814926624591, 1.1357480796270607, 0.8787611711243754, 0.5318841878679444], "rank_score": 1.2688020913109928} -{"id": "ortiz-martinez-etal-2010-online", "title": "Online Learning for Interactive Statistical Machine Translation", "abstract": "State-of-the-art Machine Translation (MT) systems are still far from being perfect. An alternative is the so-called Interactive Machine Translation (IMT) framework. In this framework, the knowledge of a human translator is combined with a MT system. The vast majority of the existing work on IMT makes use of the well-known batch learning paradigm. In the batch learning paradigm, the training of the IMT system and the interactive translation process are carried out in separate stages. This paradigm is not able to take advantage of the new knowledge produced by the user of the IMT system. In this paper, we present an application of the online learning paradigm to the IMT framework. In the online learning paradigm, the training and prediction stages are no longer separated. This feature is particularly useful in IMT since it allows the user feedback to be taken into account. The online learning techniques proposed here incrementally update the statistical models involved in the translation process. Empirical results show the great potential of online learning in the IMT framework.", "phrases": ["interactive machine translation", "imt system", "online learning"], "overall_score": 3.042233296142825, "scores": [2.2884947170583234, 0.8539955982904287, 0.6636391649339769], "rank_score": 1.2687098267609096} -{"id": "bethard-etal-2015-semeval", "title": "SemEval-2015 Task 6: Clinical TempEval", "abstract": "Clinical TempEval 2015 brought the temporal information extraction tasks of past TempEval campaigns to the clinical domain. Nine sub-tasks were included, covering problems in time expression identification, event expression identification and temporal relation identification. Participant systems were trained and evaluated on a corpus of clinical notes and pathology reports from the Mayo Clinic, annotated with an extension of TimeML for the clinical domain. Three teams submitted a total of 13 system runs, with the best systems achieving near-human performance on identifying events and times, but with a large performance gap still remaining for temporal relations.", "phrases": ["clinical tempeval", "clinical note", "semeval"], "overall_score": 3.594151124405473, "scores": [2.264879347510757, 1.0192059788678818, 0.5216476219843474], "rank_score": 1.2685776494543288} -{"id": "zhong-ng-2012-word", "title": "Word Sense Disambiguation Improves Information Retrieval", "abstract": "Previous research has conflicting conclusions on whether word sense disambiguation (WSD) systems can improve information retrieval (IR) performance. In this paper, we propose a method to estimate sense distributions for short queries. Together with the senses predicted for words in documents, we propose a novel approach to incorporate word senses into the language modeling approach to IR and also exploit the integration of synonym relations. Our experimental results on standard TREC collections show that using the word senses tagged by a supervised WSD system, we obtain significant improvements over a state-of-the-art IR system.", "phrases": ["wsd", "language modeling approach", "word sense disambiguation"], "overall_score": 2.6374191256746684, "scores": [2.1296136113855186, 1.1103046958159208, 0.5650731861551105], "rank_score": 1.2683304977855168} -{"id": "vulic-etal-2011-identifying", "title": "Identifying Word Translations from Comparable Corpora Using Latent Topic Models", "abstract": "A topic model outputs a set of multinomial distributions over words for each topic. In this paper, we investigate the value of bilingual topic models, i.e., a bilingual Latent Dirichlet Allocation model for finding translations of terms in comparable corpora without using any linguistic resources. Experiments on a document-aligned English-Italian Wikipedia corpus confirm that the developed methods which only use knowledge from word-topic distributions outperform methods based on similarity measures in the original word-document space. The best results, obtained by combining knowledge from word-topic distributions with similarity measures in the original space, are also reported.", "phrases": ["comparable corpora", "topic model", "cross-lingual signal", "statistical method"], "overall_score": 3.4344085908924566, "scores": [2.9712658323841725, 0.9945307717393276, 0.5765770006375999, 0.5305140241234894], "rank_score": 1.2682219072211474} -{"id": "zhang-etal-2018-guiding", "title": "Guiding Neural Machine Translation with Retrieved Translation Pieces", "abstract": "One of the difficulties of neural machine translation (NMT) is the recall and appropriate translation of low-frequency words or phrases. In this paper, we propose a simple, fast, and effective method for recalling previously seen translation examples and incorporating them into the NMT decoding process. Specifically, for an input sentence, we use a search engine to retrieve sentence pairs whose source sides are similar with the input sentence, and then collect n-grams that are both in the retrieved target sentences and aligned with words that match in the source sentences, which we call \u201ctranslation pieces\u201d. We compute pseudo-probabilities for each retrieved sentence based on similarities between the input sentence and the retrieved source sentences, and use these to weight the retrieved translation pieces. Finally, an existing NMT model is used to translate the input sentence, with an additional bonus given to outputs that contain the collected translation pieces. We show our method improves NMT translation results up to 6 BLEU points on three narrow domain translation tasks where repetitiveness of the target sentences is particularly salient. It also causes little increase in the translation time, and compares favorably to another alternative retrieval-based method with respect to accuracy, speed, and simplicity of implementation.", "phrases": ["neural machine translation", "translation piece", "search engine", "sentence pair", "n-gram"], "overall_score": 3.5930061422614914, "scores": [2.007984384117421, 1.366364210252061, 1.212497422686665, 0.9012345981963233, 0.8527869897220018], "rank_score": 1.2681735209948946} -{"id": "shing-etal-2018-expert", "title": "Expert, Crowdsourced, and Machine Assessment of Suicide Risk via Online Postings", "abstract": "We report on the creation of a dataset for studying assessment of suicide risk via online postings in Reddit. Evaluation of risk-level annotations by experts yields what is, to our knowledge, the first demonstration of reliability in risk assessment by clinicians based on social media postings. We also introduce and demonstrate the value of a new, detailed rubric for assessing suicide risk, compare crowdsourced with expert performance, and present baseline predictive modeling experiments using the new dataset, which will be made available to researchers through the American Association of Suicidology.", "phrases": ["suicide risk", "online posting", "post", "suicidology", "expert"], "overall_score": 3.2522839426047807, "scores": [2.3424898443319666, 1.8220376934482025, 0.7972596700942418, 0.847301660439888, 0.5307710847560427], "rank_score": 1.2679719906140685} -{"id": "malmasi-dras-2014-language", "title": "Language Transfer Hypotheses with Linear SVM Weights", "abstract": "Language transfer, the characteristic second language usage patterns caused by native language interference, is investigated by Second Language Acquisition (SLA) researchers seeking to find overused and underused linguistic features. In this paper we develop and present a methodology for deriving ranked lists of such features. Using very large learner data, we show our method\u2019s ability to find relevant candidates using sophisticated linguistic features. To illustrate its applicability to SLA research, we formulate plausible language transfer hypotheses supported by current evidence. This is the first work to extend Native Language Identification to a broader linguistic interpretation of learner data and address the automatic extraction of underused features on a per-native language basis.", "phrases": ["linear svm weight", "sla", "linguistic feature", "language transfer hypothesis", "learner feedback"], "overall_score": 3.1503703987337053, "scores": [2.0509520983484313, 1.740488629946471, 1.1652554342163655, 0.8434207153228618, 0.5388946882592677], "rank_score": 1.2678023132186793} -{"id": "chambers-2013-event", "title": "Event Schema Induction with a Probabilistic Entity-Driven Model", "abstract": "Event schema induction is the task of learning high-level representations of complex events (e.g., a bombing) and their entity roles (e.g., perpetrator and victim) from unlabeled text. Event schemas have important connections to early NLP research on frames and scripts, as well as modern applications like template extraction. Recent research suggests event schemas can be learned from raw text. Inspired by a pipelined learner based on named entity coreference, this paper presents the first generative model for schema induction that integrates coreference chains into learning. Our generative model is conceptually simpler than the pipelined approach and requires far less training data. It also provides an interesting contrast with a recent HMM-based model. We evaluate on a common dataset for template schema extraction. Our generative model matches the pipeline\u2019s performance, and outperforms the HMM by 7 F1 points (20%).", "phrases": ["high-level representation", "entity role", "generative model", "event schema induction"], "overall_score": 3.7328971422291217, "scores": [3.3666572119540685, 0.5946768455233419, 0.5645581559645914, 0.5452227509250177], "rank_score": 1.267778741091755} -{"id": "bulte-tezcan-2019-neural", "title": "Neural Fuzzy Repair: Integrating Fuzzy Matches into Neural Machine Translation", "abstract": "We present a simple yet powerful data augmentation method for boosting Neural Machine Translation (NMT) performance by leveraging information retrieved from a Translation Memory (TM). We propose and test two methods for augmenting NMT training data with fuzzy TM matches. Tests on the DGT-TM data set for two language pairs show consistent and substantial improvements over a range of baseline systems. The results suggest that this method is promising for any translation environment in which a sizeable TM is available and a certain amount of repetition across translations is to be expected, especially considering its ease of implementation.", "phrases": ["fuzzy match", "neural machine translation", "translation memory", "source sentence"], "overall_score": 3.1499731895439274, "scores": [1.704026425989959, 0.9054819485104482, 1.390736780801378, 1.0703247026245346], "rank_score": 1.26764246448158} -{"id": "dugast-etal-2007-statistical", "title": "Statistical Post-Editing on SYSTRAN`s Rule-Based Translation System", "abstract": "This article describes the combination of a SYSTRAN system with a \"statistical post-editing\" (SPE) system. We document qualitative analysis on two experiments performed in the shared task of the ACL 2007 Workshop on Statistical Machine Translation. Comparative results and more integrated \"hybrid\" techniques are discussed.", "phrases": ["post-editing", "systran", "rule-based machine translation", "smt system"], "overall_score": 3.8577258878897123, "scores": [2.5328959220195837, 1.0430061230579575, 0.9622806920974463, 0.5302323811900088], "rank_score": 1.267103779591249} -{"id": "coppersmith-etal-2014-quantifying", "title": "Quantifying Mental Health Signals in Twitter", "abstract": "The ubiquity of social media provides a rich opportunity to enhance the data available to mental health clinicians and researchers, enabling a better-informed and better-equipped mental health field. We present analysis of mental health phenomena in publicly available Twitter data, demonstrating how rigorous application of simple natural language processing methods can yield insight into specific disorders as well as mental health writ large, along with evidence that as-of-yet undiscovered linguistic signals relevant to mental health exist in social media. We present a novel method for gathering data for a range of mental illnesses quickly and cheaply, then focus on analysis of four in particular: post-traumatic stress disorder (PTSD), depression, bipolar disorder, and seasonal affective disorder (SAD). We intend for these proof-of-concept results to inform the necessary ethical discussion regarding the balance between the utility of such data and the privacy of mental health related information.", "phrases": ["mental health", "twitter", "social medium data", "facebook", "self-report"], "overall_score": 3.971615749671918, "scores": [2.2133913072642035, 1.5288658057261195, 1.1494935107505733, 0.8778723785741637, 0.5636939744713226], "rank_score": 1.2666633953572766} -{"id": "nooralahzadeh-etal-2020-zero", "title": "Zero-Shot Cross-Lingual Transfer with Meta Learning", "abstract": "Learning what to share between tasks has become a topic of great importance, as strategic sharing of knowledge has been shown to improve downstream task performance. This is particularly important for multilingual applications, as most languages in the world are under-resourced. Here, we consider the setting of training models on multiple different languages at the same time, when little or no data is available for languages other than English. We show that this challenging setup can be approached using meta-learning: in addition to training a source language model, another model learns to select which training instances are the most beneficial to the first. We experiment using standard supervised, zero-shot cross-lingual, as well as few-shot cross-lingual settings for different natural language understanding tasks (natural language inference, question answering). Our extensive experimental setup demonstrates the consistent effectiveness of meta-learning for a total of 15 languages. We improve upon the state-of-the-art for zero-shot and few-shot NLI (on MultiNLI and XNLI) and QA (on the MLQA dataset). A comprehensive error analysis indicates that the correlation of typological features between languages can partly explain when parameter sharing learned via meta-learning is beneficial.", "phrases": ["cross-lingual transfer", "natural language inference", "few-shot nli"], "overall_score": 3.7278131965536017, "scores": [2.171686086362355, 1.055411880753123, 0.5710583773664151], "rank_score": 1.2660521148272978} -{"id": "tsvetkov-etal-2015-evaluation", "title": "Evaluation of Word Vector Representations by Subspace Alignment", "abstract": "Unsupervisedly learned word vectors have proven to provide exceptionally effective features in many NLP tasks. Most common intrinsic evaluations of vector quality measure correlation with similarity judgments. However, these often correlate poorly with how well the learned representations perform as features in downstream evaluation tasks. We present QVEC\u2014a computationally inexpensive intrinsic evaluation measure of the quality of word embeddings based on alignment to a matrix of features extracted from manually crafted lexical resources\u2014that obtains strong correlation with performance of the vectors in a battery of downstream semantic evaluation tasks.1", "phrases": ["intrinsic evaluation", "word embedding", "qvec"], "overall_score": 3.8534571922364123, "scores": [1.423803817698047, 1.2371740657508599, 1.1361271841536291], "rank_score": 1.2657016892008455} -{"id": "gaussier-etal-2004-geometric", "title": "A Geometric View on Bilingual Lexicon Extraction from Comparable Corpora", "abstract": "We present a geometric view on bilingual lexicon extraction from comparable corpora, which allows to re-interpret the methods proposed so far and identify unresolved problems. This motivates three new methods that aim at solving these problems. Empirical evaluation shows the strengths and weaknesses of these methods, as well as a significant gain in the accuracy of extracted lexicons.", "phrases": ["lexicon extraction", "comparable corpora", "cross-lingual signal", "statistical method"], "overall_score": 3.9123078297557763, "scores": [3.3228614504936025, 0.6415519468934635, 0.5767306503470929, 0.5216241138679985], "rank_score": 1.2656920404005394} -{"id": "peng-etal-2020-shot", "title": "Few-shot Natural Language Generation for Task-Oriented Dialog", "abstract": "As a crucial component in task-oriented dialog systems, the Natural Language Generation (NLG) module converts a dialog act represented in a semantic form into a response in natural language. The success of traditional template-based or statistical models typically relies on heavily annotated data, which is infeasible for new domains. Therefore, it is pivotal for an NLG system to generalize well with limited labelled data in real applications. To this end, we present FewshotWOZ, the first NLG benchmark to simulate the few-shot learning setting in task-oriented dialog systems. Further, we develop the SC-GPT model. It is pre-trained on a large set of annotated NLG corpus to acquire the controllable generation ability, and fine-tuned with only a few domain-specific labels to adapt to new domains. Experiments on FewshotWOZ and the large Multi-Domain-WOZ datasets show that the proposed SC-GPT significantly outperforms existing methods, measured by various automatic metrics and human evaluations.", "phrases": ["natural language generation", "few-shot learning", "task-oriented dialogue system"], "overall_score": 3.7914652484867704, "scores": [2.3536236848568035, 0.8612349390106897, 0.5820079539212761], "rank_score": 1.2656221925962565} -{"id": "sogaard-2016-evaluating", "title": "Evaluating word embeddings with fMRI and eye-tracking", "abstract": "The workshop CfP assumes that down-stream evaluation of word embeddings is impractical, and that a valid evaluation metric for pairs of word embeddings can be found. I argue below that if so, the only meaningful evaluation procedure is comparison with measures of human word processing in the wild . Such evaluation is non-trivial, but I present a practical procedure here, evaluating word embeddings as features in a multi-dimensional regression model predicting brain imaging or eye-tracking word-level aggregate statistics.", "phrases": ["word embedding", "fmri", "text stimulus"], "overall_score": 2.780723609028351, "scores": [2.436316892584352, 0.8174190986497711, 0.5429495697833081], "rank_score": 1.265561853672477} -{"id": "li-etal-2019-incremental", "title": "Incremental Transformer with Deliberation Decoder for Document Grounded Conversations", "abstract": "Document Grounded Conversations is a task to generate dialogue responses when chatting about the content of a given document. Obviously, document knowledge plays a critical role in Document Grounded Conversations, while existing dialogue models do not exploit this kind of knowledge effectively enough. In this paper, we propose a novel Transformer-based architecture for multi-turn document grounded conversations. In particular, we devise an Incremental Transformer to encode multi-turn utterances along with knowledge in related documents. Motivated by the human cognitive process, we design a two-pass decoder (Deliberation Decoder) to improve context coherence and knowledge correctness. Our empirical study on a real-world Document Grounded Dataset proves that responses generated by our model significantly outperform competitive baselines on both context coherence and knowledge relevance.", "phrases": ["deliberation decoder", "multi-turn utterance", "incremental transformer", "dialogue generation"], "overall_score": 2.913819837826554, "scores": [2.116642371478326, 1.815509098645572, 0.6008300639915726, 0.5288419731977314], "rank_score": 1.2654558768283004} -{"id": "forascu-tufis-2012-romanian", "title": "Romanian TimeBank: An Annotated Parallel Corpus for Temporal Information", "abstract": "The paper describes the main steps for the construction, annotation and validation of the Romanian version of the TimeBank corpus. Starting from the English TimeBank corpus \u2015 the reference annotated corpus in the temporal domain, we have translated all the 183 English news texts into Romanian and mapped the English annotations onto Romanian, with a success rate of 96.53%. Based on ISO-Time - the emerging standard for representing temporal information, which includes many of the previous annotations schemes -, we have evaluated the automatic transfer onto Romanian and, and, when necessary, corrected the Romanian annotations so that in the end we obtained a 99.18% transfer rate for the TimeML annotations. In very few cases, due to language peculiarities, some original annotations could not be transferred. For the portability of the temporal annotation standard to Romanian, we suggested some additions for the ISO-Time standard, concerning especially the EVENT tag, based on linguistic evidence, the Romanian grammar, and also on the localisations of TimeML to other Romance languages. Future improvements to the Ro-TimeBank will take into consideration all temporal expressions, signals and events in texts, even those with a not very clear temporal anchoring.", "phrases": ["annotated parallel corpus", "temporal information", "romanian timebank"], "overall_score": 1.3900347625099831, "scores": [1.9318414549649754, 0.972115559699632, 0.8918354869699123], "rank_score": 1.2652641672115066} -{"id": "chen-etal-2018-keyphrase", "title": "Keyphrase Generation with Correlation Constraints", "abstract": "In this paper, we study automatic keyphrase generation. Although conventional approaches to this task show promising results, they neglect correlation among keyphrases, resulting in duplication and coverage issues. To solve these problems, we propose a new sequence-to-sequence architecture for keyphrase generation named CorrRNN, which captures correlation among multiple keyphrases in two ways. First, we employ a coverage vector to indicate whether the word in the source document has been summarized by previous phrases to improve the coverage for keyphrases. Second, preceding phrases are taken into account to eliminate duplicate phrases and improve result coherence. Experiment results show that our model significantly outperforms the state-of-the-art method on benchmark datasets in terms of both accuracy and diversity.", "phrases": ["correlation constraint", "sequence-to-sequence architecture", "keyphrase generation", "review mechanism", "generative model"], "overall_score": 3.5080001611442655, "scores": [3.4865176169214416, 0.8570535606644312, 0.8288977677831766, 0.5848677224723384, 0.5688813770592577], "rank_score": 1.265243608980129} -{"id": "lu-etal-2009-natural", "title": "Natural Language Generation with Tree Conditional Random Fields", "abstract": "This paper presents an effective method for generating natural language sentences from their underlying meaning representations. The method is built on top of a hybrid tree representation that jointly encodes both the meaning representation as well as the natural language in a tree structure. By using a tree conditional random field on top of the hybrid tree representation, we are able to explicitly model phrase-level dependencies amongst neighboring natural language phrases and meaning representation components in a simple and natural way. We show that the additional dependencies captured by the tree conditional random field allows it to perform better than directly inverting a previously developed hybrid tree semantic parser. Furthermore, we demonstrate that the model performs better than a previous state-of-the-art natural language generation model. Experiments are performed on two benchmark corpora with standard automatic evaluation metrics.", "phrases": ["conditional random field", "meaning representation", "language generation model"], "overall_score": 3.0336130039212525, "scores": [1.618067621919902, 1.1583096741032826, 1.0189673610197294], "rank_score": 1.2651148856809715} -{"id": "du-cardie-2020-event", "title": "Event Extraction by Answering (Almost) Natural Questions", "abstract": "The problem of event extraction requires detecting the event trigger and extracting its corresponding arguments. Existing work in event argument extraction typically relies heavily on entity recognition as a preprocessing/concurrent step, causing the well-known problem of error propagation. To avoid this issue, we introduce a new paradigm for event extraction by formulating it as a question answering (QA) task that extracts the event arguments in an end-to-end manner. Empirical results demonstrate that our framework outperforms prior methods substantially; in addition, it is capable of extracting event arguments for roles not seen at training time (i.e., in a zero-shot learning setting).", "phrases": ["trigger", "entity recognition", "event extraction", "machine reading comprehension", "eae"], "overall_score": 4.168799968558033, "scores": [3.5314039175681544, 0.877543133091038, 0.8248373354208745, 0.5467439202323114, 0.5438137925913683], "rank_score": 1.2648684197807494} -{"id": "snover-etal-2009-fluency", "title": "Fluency, Adequacy, or HTER? Exploring Different Human Judgments with a Tunable MT Metric", "abstract": "Automatic Machine Translation (MT) evaluation metrics have traditionally been evaluated by the correlation of the scores they assign to MT output with human judgments of translation performance. Different types of human judgments, such as Fluency, Adequacy, and HTER, measure varying aspects of MT performance that can be captured by automatic MT metrics. We explore these differences through the use of a new tunable MT metric: TER-Plus, which extends the Translation Edit Rate evaluation metric with tunable parameters and the incorporation of morphology, synonymy and paraphrases. TER-Plus was shown to be one of the top metrics in NIST's Metrics MATR 2008 Challenge, having the highest average rank in terms of Pearson and Spearman correlation. Optimizing TER-Plus to different types of human judgments yields significantly improved correlations and meaningful changes in the weight of different types of edits, demonstrating significant differences between the types of human judgments.", "phrases": ["hter", "human judgment", "paraphrase", "fluency", "translation error rate"], "overall_score": 3.583263391773297, "scores": [2.860242894615089, 1.8393588347000844, 0.5543980637600133, 0.5462419455158982, 0.5234320491429155], "rank_score": 1.2647347575468} -{"id": "vaibhav-etal-2019-improving", "title": "Improving Robustness of Machine Translation with Synthetic Noise", "abstract": "Modern Machine Translation (MT) systems perform remarkably well on clean, in-domain text. However most of the human generated text, particularly in the realm of social media, is full of typos, slang, dialect, idiolect and other noise which can have a disastrous impact on the accuracy of MT. In this paper we propose methods to enhance the robustness of MT systems by emulating naturally occurring noise in otherwise clean data. Synthesizing noise in this manner we are ultimately able to make a vanilla MT system more resilient to naturally occurring noise, partially mitigating loss in accuracy resulting therefrom.", "phrases": ["robustness", "machine translation", "synthetic noise", "clean data", "medium"], "overall_score": 3.4242580787116346, "scores": [2.0713629501308746, 1.7521721686303544, 0.9311227550526482, 1.043357794562987, 0.5243525013821724], "rank_score": 1.2644736339518075} -{"id": "gehman-etal-2020-realtoxicityprompts", "title": "RealToxicityPrompts: Evaluating Neural Toxic Degeneration in Language Models", "abstract": "Pretrained neural language models (LMs) are prone to generating racist, sexist, or otherwise toxic language which hinders their safe deployment. We investigate the extent to which pretrained LMs can be prompted to generate toxic language, and the effectiveness of controllable text generation algorithms at preventing such toxic degeneration. We create and release RealToxicityPrompts, a dataset of 100K naturally occurring, sentence-level prompts derived from a large corpus of English web text, paired with toxicity scores from a widely-used toxicity classifier. Using RealToxicityPrompts, we find that pretrained LMs can degenerate into toxic text even from seemingly innocuous prompts. We empirically assess several controllable generation methods, and find that while data- or compute-intensive methods (e.g., adaptive pretraining on non-toxic data) are more effective at steering away from toxicity than simpler solutions (e.g., banning \u201cbad\u201d words), no current method is failsafe against neural toxic degeneration. To pinpoint the potential cause of such persistent toxic degeneration, we analyze two web text corpora used to pretrain several LMs (including GPT-2; Radford et. al, 2019), and find a significant amount of offensive, factually unreliable, and otherwise toxic content. Our work provides a test bed for evaluating toxic generations by LMs and stresses the need for better data selection processes for pretraining.", "phrases": ["neural toxic degeneration", "language model", "toxic language", "realtoxicityprompt"], "overall_score": 3.8493686822388336, "scores": [1.694461878906118, 0.8046797240134501, 1.4357803098384905, 1.1225132166965879], "rank_score": 1.2643587823636615} -{"id": "edunov-etal-2020-evaluation", "title": "On The Evaluation of Machine Translation Systems Trained With Back-Translation", "abstract": "Back-translation is a widely used data augmentation technique which leverages target monolingual data. However, its effectiveness has been challenged since automatic metrics such as BLEU only show significant improvements for test examples where the source itself is a translation, or translationese. This is believed to be due to translationese inputs better matching the back-translated training data. In this work, we show that this conjecture is not empirically supported and that back-translation improves translation quality of both naturally occurring text as well as translationese according to professional human translators. We provide empirical evidence to support the view that back-translation is preferred by humans because it produces more fluent outputs. BLEU cannot capture human preferences because references are translationese when source sentences are natural text. We recommend complementing BLEU with a language model score to measure fluency.", "phrases": ["back-translation", "monolingual data", "professional human translator", "fluent output", "bleu score"], "overall_score": 3.65407908233593, "scores": [4.101112429900402, 0.5691435194111624, 0.5596388037437418, 0.5471669836970087, 0.5440612681922996], "rank_score": 1.2642246009889229} -{"id": "verga-etal-2017-generalizing", "title": "Generalizing to Unseen Entities and Entity Pairs with Row-less Universal Schema", "abstract": "Universal schema predicts the types of entities and relations in a knowledge base (KB) by jointly embedding the union of all available schema types\u2014not only types from multiple structured databases (such as Freebase or Wikipedia infoboxes), but also types expressed as textual patterns from raw text. This prediction is typically modeled as a matrix completion problem, with one type per column, and either one or two entities per row (in the case of entity types or binary relation types, respectively). Factorizing this sparsely observed matrix yields a learned vector embedding for each row and each column. In this paper we explore the problem of making predictions for entities or entity-pairs unseen at training time (and hence without a pre-learned row embedding). We propose an approach having no per-row parameters at all; rather we produce a row vector on the fly using a learned aggregation function of the vectors of the observed columns for that row. We experiment with various aggregation functions, including neural network attention models. Our approach can be understood as a natural language database, in that questions about KB entities are answered by attending to textual or database evidence. In experiments predicting both relations and entity types, we demonstrate that despite having an order of magnitude fewer parameters than traditional universal schema, we can match the accuracy of the traditional model, and more importantly, we can now make predictions about unseen rows with nearly the same accuracy as rows available at training time.", "phrases": ["entity pair", "universal schema", "matrix"], "overall_score": 2.4587013179887283, "scores": [2.6467601977754347, 0.6178791497857478, 0.5259282475290037], "rank_score": 1.2635225316967287} -{"id": "fan-etal-2014-distant", "title": "Distant Supervision for Relation Extraction with Matrix Completion", "abstract": "The essence of distantly supervised relation extraction is that it is an incomplete multi-label classification problem with sparse and noisy features. To tackle the sparsity and noise challenges, we propose solving the classification problem using matrix completion on factorized matrix of minimized rank. We formulate relation classification as completing the unknown labels of testing items (entity pairs) in a sparse matrix that concatenates training and testing textual features with training labels. Our algorithmic framework is based on the assumption that the rank of item-byfeature and item-by-label joint matrix is low. We apply two optimization models to recover the underlying low-rank matrix leveraging the sparsity of feature-label matrix. The matrix completion problem is then solved by the fixed point continuation (FPC) algorithm, which can find the global optimum. Experiments on two widely used datasets with different dimensions of textual features demonstrate that our low-rank matrix completion approach significantly outperforms the baseline and the state-of-the-art methods.", "phrases": ["relation extraction", "matrix completion", "distant supervision"], "overall_score": 2.9084937230441485, "scores": [1.9482010527255653, 0.9407958078379309, 0.900431463141459], "rank_score": 1.2631427745683184} -{"id": "li-caragea-2019-multi", "title": "Multi-Task Stance Detection with Sentiment and Stance Lexicons", "abstract": "Stance detection aims to detect whether the opinion holder is in support of or against a given target. Recent works show improvements in stance detection by using either the attention mechanism or sentiment information. In this paper, we propose a multi-task framework that incorporates target-specific attention mechanism and at the same time takes sentiment classification as an auxiliary task. Moreover, we used a sentiment lexicon and constructed a stance lexicon to provide guidance for the attention layer. Experimental results show that the proposed model significantly outperforms state-of-the-art deep learning methods on the SemEval-2016 dataset.", "phrases": ["stance detection", "sentiment information", "auxiliary task"], "overall_score": 2.775391941277628, "scores": [2.3598625488877603, 0.8396437562494322, 0.5898996161851641], "rank_score": 1.2631353071074523} -{"id": "sordoni-etal-2015-neural", "title": "A Neural Network Approach to Context-Sensitive Generation of Conversational Responses", "abstract": "We present a novel response generation system that can be trained end to end on large quantities of unstructured Twitter conversations. A neural network architecture is used to address sparsity issues that arise when integrating contextual information into classic statistical models, allowing the system to take into account previous dialog utterances. Our dynamic-context generative models show consistent gains over both context-sensitive and non-context-sensitive Machine Translation and Information Retrieval baselines.", "phrases": ["conversation", "recurrent neural network", "non-task-oriented dialogue system"], "overall_score": 5.777601268338509, "scores": [2.004283873104702, 1.2401099067075556, 0.5444365717580525], "rank_score": 1.2629434505234367} -{"id": "medelyan-etal-2009-human", "title": "Human-competitive tagging using automatic keyphrase extraction", "abstract": "This paper connects two research areas: automatic tagging on the web and statistical keyphrase extraction. First, we analyze the quality of tags in a collaboratively created folksonomy using traditional evaluation techniques. Next, we demonstrate how documents can be tagged automatically with a state-of-the-art keyphrase extraction algorithm, and further improve performance in this new domain using a new algorithm, \"Maui\", that utilizes semantic information extracted from Wikipedia. Maui outperforms existing approaches and extracts tags that are competitive with those assigned by the best performing human taggers.", "phrases": ["tagging", "keyphrase extraction", "wikipedia", "hand-crafted rule", "large set"], "overall_score": 3.8433502619914397, "scores": [3.186117635832778, 0.9038968239059522, 1.1384342847391393, 0.5514806610748444, 0.5319804926468711], "rank_score": 1.2623819796399167} -{"id": "bunescu-mooney-2007-learning", "title": "Learning to Extract Relations from the Web using Minimal Supervision", "abstract": "We present a new approach to relation extraction that requires only a handful of training examples. Given a few pairs of named entities known to exhibit or not exhibit a particular relation, bags of sentences containing the pairs are extracted from the web. We extend an existing relation extraction method to handle this weaker form of supervision, and present experimental results demonstrating that our approach can reliably extract relations from web documents.", "phrases": ["web", "relation extraction", "distant supervision", "knowledge base", "multi-instance learning"], "overall_score": 4.334341528095707, "scores": [0.8835511069556651, 1.9258462874341749, 1.7669624046569807, 1.1370491650401706, 0.5975369859075275], "rank_score": 1.2621891899989037} -{"id": "herbelot-baroni-2017-high", "title": "High-risk learning: acquiring new word vectors from tiny data", "abstract": "Distributional semantics models are known to struggle with small data. It is generally accepted that in order to learn `a good vector' for a word, a model must have sufficient examples of its usage. This contradicts the fact that humans can guess the meaning of a word from a few occurrences only. In this paper, we show that a neural language model such as Word2Vec only necessitates minor modifications to its standard architecture to learn new terms from tiny data, using background knowledge from a previously learnt semantic space. We test our model on word definitions and on a nonce task involving 2-6 sentences' worth of context, showing a large increase in performance over state-of-the-art models on the definitional task.", "phrases": ["word vector", "tiny data", "usage", "learning rate"], "overall_score": 3.418021881149599, "scores": [2.2593193681333066, 1.3733043228602884, 0.8534765193435836, 0.5625829784137861], "rank_score": 1.2621707971877412} -{"id": "wang-etal-2021-voxpopuli", "title": "VoxPopuli: A Large-Scale Multilingual Speech Corpus for Representation Learning, Semi-Supervised Learning and Interpretation", "abstract": "We introduce VoxPopuli, a large-scale multilingual corpus providing 400K hours of unlabeled speech data in 23 languages. It is the largest open data to date for unsupervised representation learning as well as semi-supervised learning. VoxPopuli also contains 1.8K hours of transcribed speeches in 15 languages and their aligned oral interpretations into 15 target languages totaling 17.3K hours. We provide speech recognition (ASR) baselines and validate the versatility of VoxPopuli unlabeled data in semi-supervised ASR and speech-to-text translation under challenging out-of-domain settings. The corpus is available at .", "phrases": ["semi-supervised learning", "interpretation", "unlabeled speech data", "voxpopuli"], "overall_score": 2.2614043493878717, "scores": [1.7756667281732372, 1.7455684000380205, 0.9245292241942797, 0.6026908408847854], "rank_score": 1.2621137983225807} -{"id": "luong-etal-2013-better", "title": "Better Word Representations with Recursive Neural Networks for Morphology", "abstract": "Vector-space word representations have been very successful in recent years at improving performance across a variety of NLP tasks. However, common to most existing work, words are regarded as independent entities without any explicit relationship among morphologically related words being modeled. As a result, rare and complex words are often poorly estimated, and all unknown words are represented in a rather crude way using only one or a few vectors. This paper addresses this shortcoming by proposing a novel model that is capable of building representations for morphologically complex words from their morphemes. We combine recursive neural networks (RNNs), where each morpheme is a basic unit, with neural language models (NLMs) to consider contextual information in learning morphologicallyaware word representations. Our learned models outperform existing word representations by a good margin on word similarity tasks across many datasets, including a new dataset we introduce focused on rare words to complement existing ones in an interesting way.", "phrases": ["recursive neural networks", "morphology", "word embedding", "rich language", "trend"], "overall_score": 5.163656577229301, "scores": [1.9256380753036213, 0.9312311444080811, 2.041516338693288, 0.8791162166712145, 0.5283385111488251], "rank_score": 1.2611680572450061} -{"id": "wang-etal-2014-beam", "title": "A Beam-Search Decoder for Disfluency Detection", "abstract": "In this paper 1 , we present a novel beam-search decoder for disfluency detection. We first propose node-weighted max-margin Markov networks (M3N) to boost the performance on words belonging to specific part-of-speech (POS) classes. Next, we show the importance of measuring the quality of cleaned-up sentences and performing multiple passes of disfluency detection. Finally, we propose using the beam-search decoder to combine multiple discriminative models such as M3N and multiple generative models such as language models (LM) and perform multiple passes of disfluency detection. The decoder iteratively generates new hypotheses from current hypotheses by making incremental corrections to the current sentence based on certain patterns as well as information provided by existing models. It then rescores each hypothesis based on features of lexical correctness and fluency. Our decoder achieves an edit-word F1 score higher than all previous published scores on the same data set, both with and without using external sources of information.", "phrases": ["beam-search decoder", "disfluency detection", "language model"], "overall_score": 2.0284596936193595, "scores": [2.3128702787690174, 0.9340578198111724, 0.5341305143951957], "rank_score": 1.260352870991795} -{"id": "filippova-2010-multi", "title": "Multi-Sentence Compression: Finding Shortest Paths in Word Graphs", "abstract": "We consider the task of summarizing a cluster of related sentences with a short sentence which we call multi-sentence compression and present a simple approach based on shortest paths in word graphs. The advantage and the novelty of the proposed method is that it is syntaxlean and requires little more than a tokenizer and a tagger. Despite its simplicity, it is capable of generating grammatical and informative summaries as our experiments with English and Spanish data demonstrate.", "phrases": ["cluster", "tagger", "multi-sentence compression", "short path", "graph-based approach"], "overall_score": 3.835309157574813, "scores": [3.4634381040954163, 0.8788616866010257, 0.8771438214450357, 0.548411757049339, 0.5308486739344316], "rank_score": 1.25974080862505} -{"id": "feng-etal-2012-syntactic", "title": "Syntactic Stylometry for Deception Detection", "abstract": "Most previous studies in computerized deception detection have relied only on shallow lexico-syntactic patterns. This paper investigates syntactic stylometry for deception detection, adding a somewhat unconventional angle to prior literature. Over four different datasets spanning from the product review to the essay domain, we demonstrate that features driven from Context Free Grammar (CFG) parse trees consistently improve the detection performance over several baselines that are based only on shallow lexico-syntactic features. Our results improve the best published result on the hotel review data (Ott et al., 2011) reaching 91.2% accuracy with 14% error reduction.", "phrases": ["deception detection", "parse tree", "syntactic stylometry", "production rule"], "overall_score": 4.054783646318726, "scores": [2.500858364048067, 0.921144551217536, 1.0566793359259727, 0.5600752118860852], "rank_score": 1.259689365769415} -{"id": "grabar-etal-2018-cas", "title": "CAS: French Corpus with Clinical Cases", "abstract": "Textual corpora are extremely important for various NLP applications as they provide information necessary for creating, setting and testing these applications and the corresponding tools. They are also crucial for designing reliable methods and reproducible results. Yet, in some areas, such as the medical area, due to confidentiality or to ethical reasons, it is complicated and even impossible to access textual data representative of those produced in these areas. We propose the CAS corpus built with clinical cases, such as they are reported in the published scientific literature in French. We describe this corpus, currently containing over 397,000 word occurrences, and the existing linguistic and semantic annotations.", "phrases": ["french corpus", "clinical case", "scientific literature"], "overall_score": 1.7462505741597831, "scores": [2.3204522040412225, 0.9174103551049193, 0.5410980060885942], "rank_score": 1.259653521744912} -{"id": "schneider-smith-2015-corpus", "title": "A Corpus and Model Integrating Multiword Expressions and Supersenses", "abstract": "This paper introduces a task of identifying and semantically classifying lexical expressions in running text. We investigate the online reviews genre, adding semantic supersense annotations to a 55,000 word English corpus that was previously annotated for multiword expressions. The noun and verb supersenses apply to full lexical expressions, whether single- or multiword. We then present a sequence tagging model that jointly infers lexical expressions and their supersenses. Results show that even with our relatively small training corpus in a noisy domain, the joint task can be performed to attain 70% class labeling F1.", "phrases": ["multiword expression", "supersense", "semantic class", "mwe"], "overall_score": 3.020167194498885, "scores": [2.6520060325315438, 0.914569367674888, 0.8474542344207318, 0.6240005558645433], "rank_score": 1.2595075476229267} -{"id": "banea-etal-2010-multilingual", "title": "Multilingual Subjectivity: Are More Languages Better?", "abstract": "While subjectivity related research in other languages has increased, most of the work focuses on single languages. This paper explores the integration of features originating from multiple languages into a machine learning approach to subjectivity analysis, and aims to show that this enriched feature set provides for more effective modeling for the source as well as the target languages. We show not only that we are able to achieve over 75% macro accuracy in all of the six languages we experiment with, but also that by using features drawn from multiple languages we can construct high-precision meta-classifiers with a precision of over 83%.", "phrases": ["subjectivity", "other language", "romanian", "disambiguation task", "sentiment classification"], "overall_score": 3.948615546272324, "scores": [2.7681779997674067, 1.5871341483731285, 0.8549985266554948, 0.5439065856130197, 0.5424225583032615], "rank_score": 1.2593279637424621} -{"id": "mcdonald-nivre-2007-characterizing", "title": "Characterizing the Errors of Data-Driven Dependency Parsing Models", "abstract": "We present a comparative error analysis of the two dominant approaches in datadriven dependency parsing: global, exhaustive, graph-based models, and local, greedy, transition-based models. We show that, in spite of similar performance overall, the two models produce different types of errors, in a way that can be explained by theoretical properties of the two models. This analysis leads to new directions for parser development.", "phrases": ["graph-based model", "different type", "transition-based parser", "error propagation", "long dependency"], "overall_score": 4.900666006099429, "scores": [1.9034296879820372, 1.5153396060286304, 1.0912619647591513, 0.9032468067187911, 0.882832277117097], "rank_score": 1.2592220685211415} -{"id": "negri-etal-2012-semeval", "title": "Semeval-2012 Task 8: Cross-lingual Textual Entailment for Content Synchronization", "abstract": "This paper presents the first round of the task on Cross-lingual Textual Entailment for Content Synchronization, organized within SemEval-2012. The task was designed to promote research on semantic inference over texts written in different languages, targeting at the same time a real application scenario. Participants were presented with datasets for different language pairs, where multi-directional entailment relations (\"forward\", \"backward\", \"bidirectional\", \"no_entailment\") had to be identified. We report on the training and test data used for evaluation, the process of their creation, the participating systems (10 teams, 92 runs), the approaches adopted and the results achieved.", "phrases": ["textual entailment", "content synchronization", "different language"], "overall_score": 2.447984915580464, "scores": [2.4629684035277846, 0.7815844250609628, 0.5294933422001159], "rank_score": 1.2580153902629545} -{"id": "karan-etal-2012-evaluation", "title": "Evaluation of Classification Algorithms and Features for Collocation Extraction in Croatian", "abstract": "Collocations can be defined as words that occur together significantly more often than it would be expected by chance. Many natural language processing applications such as natural language generation, word sense disambiguation and machine translation can benefit from having access to information about collocated words. We approach collocation extraction as a classification problem where the task is to classify a given n-gram as either a collocation (positive) or a non-collocation (negative). Among the features used are word frequencies, classical association measures (Dice, PMI, chi2), and POS tags. In addition, semantic word relatedness modeled by latent semantic analysis is also included. We apply wrapper feature subset selection to determine the best set of features. Performance of various classification algorithms is tested. Experiments are conducted on a manually annotated set of bigrams and trigrams sampled from a Croatian newspaper corpus. Best results obtained are 79.8 F1 measure for bigrams and 67.5 F1 measure for trigrams. The best classifier for bigrams was SVM, while for trigrams the decision tree gave the best performance. Features which contributed the most to overall performance were PMI, semantic relatedness, and POS information.", "phrases": ["collocation extraction", "croatian", "classification problem"], "overall_score": 2.0246725790034366, "scores": [1.9781891085072452, 1.2690600166970092, 0.5267502879349686], "rank_score": 1.257999804379741} -{"id": "watanabe-etal-2007-graph", "title": "A Graph-Based Approach to Named Entity Categorization in Wikipedia Using Conditional Random Fields", "abstract": "This paper presents a method for categorizing named entities in Wikipedia. In Wikipedia, an anchor text is glossed in a linked HTML text. We formalize named entity categorization as a task of categorizing anchor texts with linked HTML texts which glosses a named entity. Using this representation, we introduce a graph structure in which anchor texts are regarded as nodes. In order to incorporate HTML structure on the graph, three types of cliques are defined based on the HTML tree structure. We propose a method with Conditional Random Fields (CRFs) to categorize the nodes on the graph. Since the defined graph may include cycles, the exact inference of CRFs is computationally expensive. We introduce an approximate inference method using Treebased Reparameterization (TRP) to reduce computational cost. In experiments, our proposed model obtained significant improvements compare to baseline models that use Support Vector Machines.", "phrases": ["graph-based approach", "entity categorization", "wikipedia", "html structure"], "overall_score": 2.447699179880121, "scores": [2.7903399344654134, 0.8937785009555548, 0.8223023210854763, 0.525053448134326], "rank_score": 1.2578685511601926} -{"id": "rashkin-etal-2019-towards", "title": "Towards Empathetic Open-domain Conversation Models: A New Benchmark and Dataset", "abstract": "One challenge for dialogue agents is recognizing feelings in the conversation partner and replying accordingly, a key communicative skill. While it is straightforward for humans to recognize and acknowledge others' feelings in a conversation, this is a significant challenge for AI systems due to the paucity of suitable publicly-available datasets for training and evaluation. This work proposes a new benchmark for empathetic dialogue generation and EmpatheticDialogues, a novel dataset of 25k conversations grounded in emotional situations. Our experiments indicate that dialogue models that use our dataset are perceived to be more empathetic by human evaluators, compared to models merely trained on large-scale Internet conversation data. We also present empirical comparisons of dialogue model adaptations for empathetic responding, leveraging existing models or datasets without requiring lengthy re-training of the full model.", "phrases": ["conversation", "empathetic dialogue generation", "emotion class"], "overall_score": 4.814769385173025, "scores": [2.1867082072359825, 1.0587842419220075, 0.5272056595665988], "rank_score": 1.2575660362415295} -{"id": "blloshmi-etal-2020-xl", "title": "XL-AMR: Enabling Cross-Lingual AMR Parsing with Transfer Learning Techniques", "abstract": "Abstract Meaning Representation (AMR) is a popular formalism of natural language that represents the meaning of a sentence as a semantic graph. It is agnostic about how to derive meanings from strings and for this reason it lends itself well to the encoding of semantics across languages. However, cross-lingual AMR parsing is a hard task, because training data are scarce in languages other than English and the existing English AMR parsers are not directly suited to being used in a cross-lingual setting. In this work we tackle these two problems so as to enable cross-lingual AMR parsing: we explore different transfer learning techniques for producing automatic AMR annotations across languages and develop a cross-lingual AMR parser, XL-AMR. This can be trained on the produced data and does not rely on AMR aligners or source-copy mechanisms as is commonly the case in English AMR parsing. The results of XL-AMR significantly surpass those previously reported in Chinese, German, Italian and Spanish. Finally we provide a qualitative analysis which sheds light on the suitability of AMR across languages. We release XL-AMR at github.com/SapienzaNLP/xl-amr.", "phrases": ["cross-lingual amr", "xl-amr", "english sentence", "alignment-based parser", "seq2seq problem"], "overall_score": 3.124523955763197, "scores": [2.937672068059787, 1.712751897677556, 0.5549867582237312, 0.5532348871208448, 0.5283590859149794], "rank_score": 1.2574009393993797} -{"id": "chang-lee-2003-topic", "title": "Topic Segmentation for Short Texts", "abstract": "Topic segmentation, which aims to fmd the boundaries between topic blocks in a text, is an important task for semantic analysis of texts. Although different solutions have been proposed for the task, many limitations and difficulties exist in the approaches. In particular most of the methods do not work well for such case as short texts, internet news and student's writings. In this paper, we focus on the short texts and present a method for topic segmentation. It can overcome the limitations in previous works. In preliminary experiments, the method show the accuracy of topic segmentation is increased effectively.", "phrases": ["boundary", "writing", "topic segmentation"], "overall_score": 1.741599257789616, "scores": [2.668535767319999, 0.5731161431853774, 0.5272430081380916], "rank_score": 1.2562983062144892} -{"id": "kwiatkowski-etal-2019-natural", "title": "Natural Questions: A Benchmark for Question Answering Research", "abstract": "We present the Natural Questions corpus, a question answering data set. Questions consist of real anonymized, aggregated queries issued to the Google search engine. An annotator is presented with a question along with a Wikipedia page from the top 5 search results, and annotates a long answer (typically a paragraph) and a short answer (one or more entities) if present on the page, or marks null if no long/short answer is present. The public release consists of 307,373 training examples with single annotations; 7,830 examples with 5-way annotations for development data; and a further 7,842 examples with 5-way annotated sequestered as test data. We present experiments validating quality of the data. We also describe analysis of 25-way annotations on 302 examples, giving insights into human variability on the annotation task. We introduce robust metrics for the purposes of evaluating question answering systems; demonstrate high human upper bounds on these metrics; and establish baseline results using competitive methods drawn from related literature.", "phrases": ["annotator", "natural question", "passage", "wikipedia article", "source domain"], "overall_score": 5.404027341762888, "scores": [2.307023759420229, 1.5527372048317036, 1.2783112373053986, 0.5746809822272595, 0.5650646753080667], "rank_score": 1.2555635718185314} -{"id": "schick-schutze-2021-just", "title": "It's Not Just Size That Matters: Small Language Models Are Also Few-Shot Learners", "abstract": "When scaled to hundreds of billions of parameters, pretrained language models such as GPT-3 (Brown et al., 2020) achieve remarkable few-shot performance. However, enormous amounts of compute are required for training and applying such big models, resulting in a large carbon footprint and making it difficult for researchers and practitioners to use them. We show that performance similar to GPT-3 can be obtained with language models that are much \u201cgreener\u201d in that their parameter count is several orders of magnitude smaller. This is achieved by converting textual inputs into cloze questions that contain a task description, combined with gradient-based optimization; exploiting unlabeled data gives further improvements. We identify key factors required for successful natural language understanding with small language models.", "phrases": ["language model", "learner", "few-shot learning", "prompt", "small model"], "overall_score": 4.63065847985099, "scores": [1.3838810393349212, 1.9671578161216006, 1.2861587920981226, 1.0554466678075445, 0.5838666650749152], "rank_score": 1.2553021960874209} -{"id": "yessenalina-cardie-2011-compositional", "title": "Compositional Matrix-Space Models for Sentiment Analysis", "abstract": "We present a general learning-based approach for phrase-level sentiment analysis that adopts an ordinal sentiment scale and is explicitly compositional in nature. Thus, we can model the compositional effects required for accurate assignment of phrase-level sentiment. For example, combining an adverb (e.g., \"very\") with a positive polar adjective (e.g., \"good\") produces a phrase (\"very good\") with increased polarity over the adjective alone. Inspired by recent work on distributional approaches to compositionality, we model each word as a matrix and combine words using iterated matrix multiplication, which allows for the modeling of both additive and multiplicative semantic effects. Although the multiplication-based matrix-space framework has been shown to be a theoretically elegant way to model composition (Rudolph and Giesbrecht, 2010), training such models has to be done carefully: the optimization is non-convex and requires a good initial starting point. This paper presents the first such algorithm for learning a matrix-space model for semantic composition. In the context of the phrase-level sentiment analysis task, our experimental results show statistically significant improvements in performance over a bag-of-words model.", "phrases": ["matrix-space model", "sentiment analysis", "semantic effect", "composition"], "overall_score": 3.4796233243111994, "scores": [2.521381238824196, 0.8566210310307167, 1.093358773056057, 0.548674271234367], "rank_score": 1.255008828536334} -{"id": "shi-etal-2017-fast", "title": "Fast(er) Exact Decoding and Global Training for Transition-Based Dependency Parsing via a Minimal Feature Set", "abstract": "We first present a minimal feature set for transition-based dependency parsing, continuing a recent trend started by Kiperwasser and Goldberg (2016a) and Cross and Huang (2016a) of using bi-directional LSTM features. We plug our minimal feature set into the dynamic-programming framework of Huang and Sagae (2010) and Kuhlmann et al. (2011) to produce the first implementation of worst-case O(n^3) exact decoders for arc-hybrid and arc-eager transition systems. With our minimal features, we also present O(n^3) global training methods. Finally, using ensembles including our new parsers, we achieve the best unlabeled attachment score reported (to our knowledge) on the Chinese Treebank and the \u201csecond-best-in-class\u201d result on the English Penn Treebank.", "phrases": ["exact decoding", "dependency parsing", "implementation"], "overall_score": 3.118276862288028, "scores": [1.8996455947463298, 1.3331735024370452, 0.5318416749475173], "rank_score": 1.254886924043631} -{"id": "goldberg-nivre-2013-training", "title": "Training Deterministic Parsers with Non-Deterministic Oracles", "abstract": "Greedy transition-based parsers are very fast but tend to suffer from error propagation. This problem is aggravated by the fact that they are normally trained using oracles that are deterministic and incomplete in the sense that they assume a unique canonical path through the transition system and are only valid as long as the parser does not stray from this path. In this paper, we give a general characterization of oracles that are nondeterministic and complete, present a method for deriving such oracles for transition systems that satisfy a property we call arc decomposition, and instantiate this method for three well-known transition systems from the literature. We say that these oracles are dynamic, because they allow us to dynamically explore alternative and nonoptimal paths during training \u2014 in contrast to oracles that statically assume a unique optimal path. Experimental evaluation on a wide range of data sets clearly shows that using dynamic oracles to train greedy parsers gives substantial improvements in accuracy. Moreover, this improvement comes at no cost in terms of efficiency, unlike other techniques like beam search.", "phrases": ["oracle", "dependency parsing", "imitation"], "overall_score": 3.7580122834609067, "scores": [2.083020052582909, 1.1594498797183914, 0.5208960232978462], "rank_score": 1.254455318533049} -{"id": "chen-qian-2020-relation", "title": "Relation-Aware Collaborative Learning for Unified Aspect-Based Sentiment Analysis", "abstract": "Aspect-based sentiment analysis (ABSA) involves three subtasks, i.e., aspect term extraction, opinion term extraction, and aspect-level sentiment classification. Most existing studies focused on one of these subtasks only. Several recent researches made successful attempts to solve the complete ABSA problem with a unified framework. However, the interactive relations among three subtasks are still under-exploited. We argue that such relations encode collaborative signals between different subtasks. For example, when the opinion term is \u201cdelicious\u201d, the aspect term must be \u201cfood\u201d rather than \u201cplace\u201d. In order to fully exploit these relations, we propose a Relation-Aware Collaborative Learning (RACL) framework which allows the subtasks to work coordinately via the multi-task learning and relation propagation mechanisms in a stacked multi-layer network. Extensive experiments on three real-world datasets demonstrate that RACL significantly outperforms the state-of-the-art methods for the complete ABSA task.", "phrases": ["sentiment analysis", "aspect term", "relation-aware collaborative learning"], "overall_score": 3.9866200132180505, "scores": [2.355393280005232, 0.8747380422123164, 0.5331340842544176], "rank_score": 1.2544218021573221} -{"id": "tan-bansal-2020-vokenization", "title": "Vokenization: Improving Language Understanding with Contextualized, Visual-Grounded Supervision", "abstract": "Humans learn language by listening, speaking, writing, reading, and also, via interaction with the multimodal real world. Existing language pre-training frameworks show the effectiveness of text-only self-supervision while we explore the idea of a visually-supervised language model in this paper. We find that the main reason hindering this exploration is the large divergence in magnitude and distributions between the visually-grounded language datasets and pure-language corpora. Therefore, we develop a technique named \u201cvokenization\u201d that extrapolates multimodal alignments to language-only data by contextually mapping language tokens to their related images (which we call \u201cvokens\u201d). The \u201cvokenizer\u201d is trained on relatively small image captioning datasets and we then apply it to generate vokens for large language corpora. Trained with these contextually generated vokens, our visually-supervised language models show consistent improvements over self-supervised alternatives on multiple pure-language tasks such as GLUE, SQuAD, and SWAG.", "phrases": ["language understanding", "visual-grounded supervision", "vokenization", "visual information"], "overall_score": 3.0072367393325132, "scores": [2.169213295260266, 1.4384200821414383, 0.8818575929957362, 0.5269695455333229], "rank_score": 1.2541151289826908} -{"id": "hassan-menezes-2013-social", "title": "Social Text Normalization using Contextual Graph Random Walks", "abstract": "We introduce a social media text normalization system that can be deployed as a preprocessing step for Machine Translation and various NLP applications to handle social media text. The proposed system is based on unsupervised learning of the normalization equivalences from unlabeled text. The proposed approach uses Random Walks on a contextual similarity bipartite graph constructed from n-gram sequences on large unlabeled text corpus. We show that the proposed approach has a very high precision of (92.43) and a reasonable recall of (56.4). When used as a preprocessing step for a state-of-the-art machine translation system, the translation quality on social media text improved by 6%. The proposed approach is domain and language independent and can be deployed as a preprocessing step for any NLP application to handle social media text.", "phrases": ["contextual similarity", "bipartite graph", "n-gram sequence", "unlabeled text corpus", "social medium text"], "overall_score": 3.553039749053586, "scores": [1.734326611427656, 1.4893786200901378, 1.2746587994350145, 1.1600486136764374, 0.6119230441876412], "rank_score": 1.2540671377633774} -{"id": "mehdad-etal-2013-towards", "title": "Towards Topic Labeling with Phrase Entailment and Aggregation", "abstract": "We propose a novel framework for topic labeling that assigns the most representative phrases for a given set of sentences covering the same topic. We build an entailment graph over phrases that are extracted from the sentences, and use the entailment relations to identify and select the most relevant phrases. We then aggregate those selected phrases by means of phrase generalization and merging. We motivate our approach by applying over conversational data, and show that our framework improves performance significantly over baseline algorithms.", "phrases": ["topic labeling", "entailment graph", "noun phrase"], "overall_score": 2.246299472957942, "scores": [2.000218826869237, 1.1987180190526745, 0.5621139729009694], "rank_score": 1.2536836062742935} -{"id": "etxeberria-etal-2016-evaluating", "title": "Evaluating the Noisy Channel Model for the Normalization of Historical Texts: Basque, Spanish and Slovene", "abstract": "This paper presents a method for the normalization of historical texts using a combination of weighted finite-state transducers and language models. We have extended our previous work on the normalization of dialectal texts and tested the method against a 17th century literary work in Basque. This preprocessed corpus is made available in the LREC repository. The performance of this method for learning relations between historical and contemporary word forms is evaluated against resources in three languages. The method we present learns to map phonological changes using a noisy channel model. The model is based on techniques commonly used for phonological inference and producing Grapheme-to-Grapheme conversion systems encoded as weighted transducers and produces F-scores above 80% in the task for Basque. A wider evaluation shows that the approach performs equally well with all the languages in our evaluation suite: Basque, Spanish and Slovene. A comparison against other methods that address the same task is also provided.", "phrases": ["noisy channel model", "normalization", "historical text", "language model"], "overall_score": 2.605780205521002, "scores": [2.18569012987773, 1.7387134409776535, 0.5616741769457251, 0.5263838257345912], "rank_score": 1.2531153933839247} -{"id": "ritter-etal-2011-data", "title": "Data-Driven Response Generation in Social Media", "abstract": "We present a data-driven approach to generating responses to Twitter status posts, based on phrase-based Statistical Machine Translation. We find that mapping conversational stimuli onto responses is more difficult than translating between languages, due to the wider range of possible responses, the larger fraction of unaligned words/phrases, and the presence of large phrase pairs whose alignment cannot be further decomposed. After addressing these challenges, we compare approaches based on SMT and Information Retrieval in a human evaluation. We show that SMT outperforms IR on this task, and its output is preferred over actual human responses in 15% of cases. As far as we are aware, this is the first work to investigate the use of phrase-based SMT to directly translate a linguistic stimulus into an appropriate response.", "phrases": ["conversation", "data-driven response generation", "social medium"], "overall_score": 5.338678119462075, "scores": [0.8693114476391406, 1.774410000307341, 1.1135474111691586], "rank_score": 1.252422953038547} -{"id": "yamada-matsumoto-2003-statistical", "title": "Statistical Dependency Analysis with Support Vector Machines", "abstract": "In this paper, we propose a method for analyzing word-word dependencies using deterministic bottom-up manner using Support Vector machines. We experimented with dependency trees converted from Penn treebank data, and achieved over 90% accuracy of word-word dependency. Though the result is little worse than the most up-to-date phrase structure based parsers, it looks satisfactorily accurate considering that our parser uses no information from phrase structures.", "phrases": ["support vector machines", "dependency parsing", "decision", "series", "yamada"], "overall_score": 5.245000335329172, "scores": [0.9477295040319034, 2.363692479469702, 1.2696631500538587, 1.132044456460135, 0.5463364737587915], "rank_score": 1.251893212754878} -{"id": "webster-etal-2018-mind", "title": "Mind the GAP: A Balanced Corpus of Gendered Ambiguous Pronouns", "abstract": "Coreference resolution is an important task for natural language understanding, and the resolution of ambiguous pronouns a longstanding challenge. Nonetheless, existing corpora do not capture ambiguous pronouns in sufficient volume or diversity to accurately indicate the practical utility of models. Furthermore, we find gender bias in existing corpora and systems favoring masculine entities. To address this, we present and release GAP, a gender-balanced labeled corpus of 8,908 ambiguous pronoun\u2013name pairs sampled to provide diverse coverage of challenges posed by real-world text. We explore a range of baselines that demonstrate the complexity of the challenge, the best achieving just 66.9% F1. We show that syntactic structure and continuous neural models provide promising, complementary cues for approaching the challenge.", "phrases": ["balanced corpus", "gendered ambiguous pronouns", "masculine", "coreference resolution dataset"], "overall_score": 4.297743962972302, "scores": [2.67258346506104, 1.2731092843109277, 0.5380602947306519, 0.5223738947036748], "rank_score": 1.2515317347015735} -{"id": "liang-etal-2011-semi", "title": "Semi-Automatic Identification of Bilingual Synonymous Technical Terms from Phrase Tables and Parallel Patent Sentences", "abstract": "In the research field of machine translation of patent documents, the issue of ac- quiring technical term translation equivalent pairs automatically from parallel patent docu- ments is one of those most important. We take an approach of utilizing the phrase table of a state-of-the-art phrase-based statistical machine translation model. In this task, we con- sider situations where a technical term is observed in many parallel patent sentences and is translated into many translation equivalents. We apply SVM to the task of identifying synonymous translation equivalent pairs and achieve almost 98% precision and over 40% F- measure. Then, in order to improve recall, we introduce a semi-automatic framework, where we employ the strategy of selecting more than one seeds for each set of candidates bilingual synonymous term pairs. By manually judging whether each pair of two seeds is synonymous or not, we achieve over 95% precision and 50% recall.", "phrases": ["parallel patent sentence", "technical term translation", "equivalent pair"], "overall_score": 2.012971889190962, "scores": [1.7757763457028222, 1.0681269209772657, 0.9082860044423261], "rank_score": 1.2507297570408047} -{"id": "wu-etal-2021-bass", "title": "BASS: Boosting Abstractive Summarization with Unified Semantic Graph", "abstract": "Abstractive summarization for long-document or multi-document remains challenging for the Seq2Seq architecture, as Seq2Seq is not good at analyzing long-distance relations in text. In this paper, we present BASS, a novel framework for Boosting Abstractive Summarization based on a unified Semantic graph, which aggregates co-referent phrases distributing across a long range of context and conveys rich relations between phrases. Further, a graph-based encoder-decoder model is proposed to improve both the document representation and summary generation process by leveraging the graph structure. Specifically, several graph augmentation methods are designed to encode both the explicit and implicit relations in the text while the graph-propagation attention mechanism is developed in the decoder to select salient content into the summary. Empirical results show that the proposed architecture brings substantial improvements for both long-document and multi-document summarization tasks.", "phrases": ["abstractive summarization", "unified semantic graph", "co-referent phrase", "bass"], "overall_score": 1.7338541017517917, "scores": [1.8246559415383568, 1.7167317866937473, 0.9272821526945894, 0.5341755475179027], "rank_score": 1.250711357111149} -{"id": "akbik-etal-2018-contextual", "title": "Contextual String Embeddings for Sequence Labeling", "abstract": "Recent advances in language modeling using recurrent neural networks have made it viable to model language as distributions over characters. By learning to predict the next character on the basis of previous characters, such models have been shown to automatically internalize linguistic concepts such as words, sentences, subclauses and even sentiment. In this paper, we propose to leverage the internal states of a trained character language model to produce a novel type of word embedding which we refer to as contextual string embeddings. Our proposed embeddings have the distinct properties that they (a) are trained without any explicit notion of words and thus fundamentally model words as sequences of characters, and (b) are contextualized by their surrounding text, meaning that the same word will have different embeddings depending on its contextual use. We conduct a comparative evaluation against previous embeddings and find that our embeddings are highly useful for downstream tasks: across four classic sequence labeling tasks we consistently outperform the previous state-of-the-art. In particular, we significantly outperform previous work on English and German named entity recognition (NER), allowing us to report new state-of-the-art F1-scores on the CoNLL03 shared task. We release all code and pre-trained language models in a simple-to-use framework to the research community, to enable reproduction of these experiments and application of our proposed embeddings to other tasks: ", "phrases": ["language model", "downstream task", "sequence labeling task", "contextual string embeddings", "well result"], "overall_score": 5.293566002125026, "scores": [0.9966809009049635, 1.9439952608709683, 1.3816371256511786, 1.0711206733970793, 0.8576676545667287], "rank_score": 1.2502203230781839} -{"id": "cocarascu-toni-2017-identifying", "title": "Identifying attack and support argumentative relations using deep learning", "abstract": "We propose a deep learning architecture to capture argumentative relations of attack and support from one piece of text to another, of the kind that naturally occur in a debate. The architecture uses two (unidirectional or bidirectional) Long Short-Term Memory networks and (trained or non-trained) word embeddings, and allows to considerably improve upon existing techniques that use syntactic features and supervised classifiers for the same form of (relation-based) argument mining.", "phrases": ["attack", "argumentative relation", "deep learning"], "overall_score": 2.4327860506357286, "scores": [2.088554427864914, 0.8319254242878666, 0.8301343041330796], "rank_score": 1.2502047187619534} -{"id": "johnson-zhang-2015-effective", "title": "Effective Use of Word Order for Text Categorization with Convolutional Neural Networks", "abstract": "Convolutional neural network (CNN) is a neural network that can make use of the internal structure of data such as the 2D structure of image data. This paper studies CNN on text categorization to exploit the 1D structure (namely, word order) of text data for accurate prediction. Instead of using low-dimensional word vectors as input as is often done, we directly apply CNN to high-dimensional text data, which leads to directly learning embedding of small text regions for use in classification. In addition to a straightforward adaptation of CNN from image to text, a simple but new variation which employs bag-of-word conversion in the convolution layer is proposed. An extension to combine multiple convolution layers is also explored for higher accuracy. The experiments demonstrate the effectiveness of our approach in comparison with state-of-the-art methods.", "phrases": ["text categorization", "convolutional neural networks", "cnn", "one-hot vector"], "overall_score": 3.8056116940932005, "scores": [1.9103526211874342, 0.920840701583638, 1.6252235842868, 0.5435287618449869], "rank_score": 1.2499864172257147} -{"id": "tan-etal-2019-hierarchical", "title": "Hierarchical Modeling of Global Context for Document-Level Neural Machine Translation", "abstract": "Document-level machine translation (MT) remains challenging due to the difficulty in efficiently using document context for translation. In this paper, we propose a hierarchical model to learn the global context for document-level neural machine translation (NMT). This is done through a sentence encoder to capture intra-sentence dependencies and a document encoder to model document-level inter-sentence consistency and coherence. With this hierarchical architecture, we feedback the extracted global document context to each word in a top-down fashion to distinguish different translations of a word according to its specific surrounding context. In addition, since large-scale in-domain document-level parallel corpora are usually unavailable, we use a two-step training strategy to take advantage of a large-scale corpus with out-of-domain parallel sentence pairs and a small-scale corpus with in-domain parallel document pairs to achieve the domain adaptability. Experimental results on several benchmark corpora show that our proposed model can significantly improve document-level translation performance over several strong NMT baselines.", "phrases": ["global context", "neural machine translation", "document-level nmt"], "overall_score": 2.8771121453969086, "scores": [2.2795249453109125, 0.8984191642505142, 0.5705976761266847], "rank_score": 1.2495139285627037} -{"id": "min-etal-2020-ambigqa", "title": "AmbigQA: Answering Ambiguous Open-domain Questions", "abstract": "Ambiguity is inherent to open-domain question answering; especially when exploring new topics, it can be difficult to ask questions that have a single, unambiguous answer. In this paper, we introduce AmbigQA, a new open-domain question answering task which involves finding every plausible answer, and then rewriting the question for each one to resolve the ambiguity. To study this task, we construct AmbigNQ, a dataset covering 14,042 questions from NQ-open, an existing open-domain QA benchmark. We find that over half of the questions in NQ-open are ambiguous, with diverse sources of ambiguity such as event and entity references. We also present strong baseline models for AmbigQA which we show benefit from weakly supervised learning that incorporates NQ-open, strongly suggesting our new task and data will support significant future research effort. Our data and baselines are available at .", "phrases": ["ambiguity", "open-domain question", "ambigqa"], "overall_score": 3.5399458850733065, "scores": [2.23951057821302, 0.8616052419556132, 0.6472209146908234], "rank_score": 1.2494455782864855} -{"id": "fiszman-etal-2007-interpreting", "title": "Interpreting comparative constructions in biomedical text", "abstract": "We propose a methodology using underspecified semantic interpretation to process comparative constructions in MEDLINE citations, concentrating on two structures that are prevalent in the research literature reporting on clinical trials for drug therapies. The method exploits an existing semantic processor, SemRep, which constructs predications based on the Unified Medical Language System. Results of a preliminary evaluation were recall of 70%, precision of 96%, and F-score of 81%. We discuss the generalization of the methodology to other entities such as therapeutic and diagnostic procedures. The available structures in computable format are potentially useful for interpreting outcome statements in MEDLINE citations.", "phrases": ["comparative construction", "biomedical text", "interpretation"], "overall_score": 2.430137575567955, "scores": [2.3249016418238426, 0.8740935314653167, 0.5475358421552915], "rank_score": 1.2488436718148168} -{"id": "dyer-etal-2008-generalizing", "title": "Generalizing Word Lattice Translation", "abstract": "Word lattice decoding has proven useful in spoken language translation; we argue that it provides a compelling model for translation of text genres, as well. We show that prior work in translating lattices using finite state techniques can be naturally extended to more expressive synchronous context-free grammarbased models. Additionally, we resolve a significant complication that non-linear word lattice inputs introduce in reordering models. Our experiments evaluating the approach demonstrate substantial gains for ChineseEnglish and Arabic-English translation.", "phrases": ["lattice", "segmentation", "source sentence", "value"], "overall_score": 4.018225946616756, "scores": [2.1416628016584083, 1.3235062468049197, 0.9862168649136386, 0.5419423977967467], "rank_score": 1.2483320777934281} -{"id": "tang-etal-2015-learning", "title": "Learning Semantic Representations of Users and Products for Document Level Sentiment Classification", "abstract": "Neural network methods have achieved promising results for sentiment classification of text. However, these models only use semantics of texts, while ignoring users who express the sentiment and products which are evaluated, both of which have great influences on interpreting the sentiment of text. In this paper, we address this issue by incorporating userand productlevel information into a neural network approach for document level sentiment classification. Users and products are modeled using vector space models, the representations of which capture important global clues such as individual preferences of users or overall qualities of products. Such global evidence in turn facilitates embedding learning procedure at document level, yielding better text representations. By combining evidence at user-, productand documentlevel in a unified neural framework, the proposed model achieves state-of-the-art performances on IMDB and Yelp datasets1.", "phrases": ["product", "sentiment classification", "attention model"], "overall_score": 3.379929985687811, "scores": [1.8579177443835677, 1.3609931235261286, 0.5254030125849904], "rank_score": 1.2481046268315623} -{"id": "zmigrod-etal-2019-counterfactual", "title": "Counterfactual Data Augmentation for Mitigating Gender Stereotypes in Languages with Rich Morphology", "abstract": "Gender stereotypes are manifest in most of the world's languages and are consequently propagated or amplified by NLP systems. Although research has focused on mitigating gender stereotypes in English, the approaches that are commonly employed produce ungrammatical sentences in morphologically rich languages. We present a novel approach for converting between masculine-inflected and feminine-inflected sentences in such languages. For Spanish and Hebrew, our approach achieves F1 scores of 82% and 73% at the level of tags and accuracies of 90% and 87% at the level of forms. By evaluating our approach using four different languages, we show that, on average, it reduces gender stereotyping by a factor of 2.5 without any sacrifice to grammaticality.", "phrases": ["gender", "rich language", "feminine-inflected sentence", "counterfactual data augmentation"], "overall_score": 3.912617839136957, "scores": [2.0462597544325813, 1.563859961519947, 0.8364645204794973, 0.5448047691796718], "rank_score": 1.2478472514029244} -{"id": "samardzic-etal-2016-archimob", "title": "ArchiMob - A Corpus of Spoken Swiss German", "abstract": "Swiss dialects of German are, unlike most dialects of well standardised languages, widely used in everyday communication. Despite this fact, automatic processing of Swiss German is still a considerable challenge due to the fact that it is mostly a spoken variety rarely recorded and that it is subject to considerable regional variation. This paper presents a freely available general-purpose corpus of spoken Swiss German suitable for linguistic research, but also for training automatic tools. The corpus is a result of a long design process, intensive manual work and specially adapted computational processing. We first describe how the documents were transcribed, segmented and aligned with the sound source, and how inconsistent transcriptions were unified through an additional normalisation layer. We then present a bootstrapping approach to automatic normalisation using different machine-translation-inspired methods. Furthermore, we evaluate the performance of part-of-speech taggers on our data and show how the same bootstrapping approach improves part-of-speech tagging by 10% over four rounds. Finally, we present the modalities of access of the corpus as well as the data format.", "phrases": ["dialect", "archimob", "normalization"], "overall_score": 2.9911978928689407, "scores": [1.7450754394107435, 1.1615412344628224, 0.8356625575853366], "rank_score": 1.2474264104863009} -{"id": "wang-etal-2020-pyramid", "title": "Pyramid: A Layered Model for Nested Named Entity Recognition", "abstract": "This paper presents Pyramid, a novel layered model for Nested Named Entity Recognition (nested NER). In our approach, token or text region embeddings are recursively inputted into L flat NER layers, from bottom to top, stacked in a pyramid shape. Each time an embedding passes through a layer of the pyramid, its length is reduced by one. Its hidden state at layer l represents an l-gram in the input text, which is labeled only if its corresponding text region represents a complete entity mention. We also design an inverse pyramid to allow bidirectional interaction between layers. The proposed method achieves state-of-the-art F1 scores in nested NER on ACE-2004, ACE-2005, GENIA, and NNE, which are 80.27, 79.42, 77.78, and 93.70 with conventional embeddings, and 87.74, 86.34, 79.31, and 94.68 with pre-trained contextualized embeddings. In addition, our model can be used for the more general task of Overlapping Named Entity Recognition. A preliminary experiment confirms the effectiveness of our method in overlapping NER.", "phrases": ["flat ner layer", "entity mention", "pyramid"], "overall_score": 2.990382613598501, "scores": [2.603520528957535, 0.6033386981298945, 0.5344000107799828], "rank_score": 1.2470864126224708} -{"id": "xu-etal-2019-scaling", "title": "Scaling up Open Tagging from Tens to Thousands: Comprehension Empowered Attribute Value Extraction from Product Title", "abstract": "Supplementing product information by extracting attribute values from title is a crucial task in e-Commerce domain. Previous studies treat each attribute only as an entity type and build one set of NER tags (e.g., BIO) for each of them, leading to a scalability issue which unfits to the large sized attribute system in real world e-Commerce. In this work, we propose a novel approach to support value extraction scaling up to thousands of attributes without losing performance: (1) We propose to regard attribute as a query and adopt only one global set of BIO tags for any attributes to reduce the burden of attribute tag or model explosion; (2) We explicitly model the semantic representations for attribute and title, and develop an attention mechanism to capture the interactive semantic relations in-between to enforce our framework to be attribute comprehensive. We conduct extensive experiments in real-life datasets. The results show that our model not only outperforms existing state-of-the-art NER tagging models, but also is robust and generates promising results for up to 8,906 attributes.", "phrases": ["attribute", "product title", "e-commerce domain", "bio tag"], "overall_score": 3.0981651354958486, "scores": [1.2838988513111969, 1.6191000403786968, 1.2202696692659194, 0.8639049181930614], "rank_score": 1.2467933697872184} -{"id": "kumar-etal-2019-reinforcement", "title": "Reinforcement Learning based Curriculum Optimization for Neural Machine Translation", "abstract": "We consider the problem of making efficient use of heterogeneous training data in neural machine translation (NMT). Specifically, given a training dataset with a sentence-level feature such as noise, we seek an optimal curriculum, or order for presenting examples to the system during training. Our curriculum framework allows examples to appear an arbitrary number of times, and thus generalizes data weighting, filtering, and fine-tuning schemes. Rather than relying on prior knowledge to design a curriculum, we use reinforcement learning to learn one automatically, jointly with the NMT system, in the course of a single training run. We show that this approach can beat uniform baselines on Paracrawl and WMT English-to-French datasets by +3.4 and +1.3 BLEU respectively. Additionally, we match the performance of strong filtering baselines and hand-designed, state-of-the-art curricula.", "phrases": ["curriculum", "neural machine translation", "reinforcement learning", "model training"], "overall_score": 2.9894677218063475, "scores": [2.7073430467841937, 0.8944505273536764, 0.852554068842233, 0.5324718494618754], "rank_score": 1.2467048731104946} -{"id": "mohammad-kiritchenko-2018-understanding", "title": "Understanding Emotions: A Dataset of Tweets to Study Interactions between Affect Categories", "abstract": "Human emotions are complex and nuanced. Yet, an overwhelming majority of the work in automatically detecting emotions from text has focused only on classifying text into positive, negative, and neutral classes, and a much smaller amount on classifying text into basic emotion categories such as joy, sadness, and fear. Our goal is to create a single textual dataset that is annotated for many emotion (or affect) dimensions (from both the basic emotion model and the VAD model). For each emotion dimension, we annotate the data for not just coarse classes (such as anger or no anger) but also for fine-grained real-valued scores indicating the intensity of emotion (anger, sadness, valence, etc.). We use Best\u2013Worst Scaling (BWS) to address the limitations of traditional rating scale methods such as interand intra-annotator inconsistency by employing comparative annotations. We show that the fine-grained intensity scores thus obtained are reliable (repeat annotations lead to similar scores). We choose Twitter as the source of the textual data we annotate because tweets are self-contained, widely used, public posts, and tend to be rich in emotions. The new dataset is useful for training and testing supervised machine learning algorithms for multi-label emotion classification, emotion intensity regression, detecting valence, detecting ordinal class of intensity of emotion (slightly sad, very angry, etc.), and detecting ordinal class of valence (or sentiment). We make the data available for the recent SemEval-2018 Task 1: Affect in Tweets, which explores these five tasks. The dataset also sheds light on crucial research questions such as: which emotions often present together in tweets?; how do the intensities of the three negative emotions relate to each other?; and how do the intensities of the basic emotions relate to valence?", "phrases": ["emotion", "tweets", "dimension"], "overall_score": 2.0064679798832636, "scores": [2.306441036178947, 0.9097760013623336, 0.5238489153888917], "rank_score": 1.246688650976724} -{"id": "bjerva-augenstein-2018-phonology", "title": "From Phonology to Syntax: Unsupervised Linguistic Typology at Different Levels with Language Embeddings", "abstract": "A core part of linguistic typology is the classification of languages according to linguistic properties, such as those detailed in the World Atlas of Language Structure (WALS). Doing this manually is prohibitively time-consuming, which is in part evidenced by the fact that only 100 out of over 7,000 languages spoken in the world are fully covered in WALS. We learn distributed language representations, which can be used to predict typological properties on a massively multilingual scale. Additionally, quantitative and qualitative analyses of these language embeddings can tell us how language similarities are encoded in NLP models for tasks at different typological levels. The representations are learned in an unsupervised manner alongside tasks at three typological levels: phonology (grapheme-to-phoneme prediction, and phoneme reconstruction), morphology (morphological inflection), and syntax (part-of-speech tagging). We consider more than 800 languages and find significant differences in the language representations encoded, depending on the target task. For instance, although Norwegian Bokm\u00e5l and Danish are typologically close to one another, they are phonologically distant, which is reflected in their language embeddings growing relatively distant in a phonological task. We are also able to predict typological features in WALS with high accuracies, even for unseen language families.", "phrases": ["phonology", "syntax", "language embedding"], "overall_score": 2.5921902575477485, "scores": [1.9295516665692043, 0.8519622098849043, 0.9582261531507134], "rank_score": 1.246580009868274} -{"id": "xu-etal-2018-unpaired", "title": "Unpaired Sentiment-to-Sentiment Translation: A Cycled Reinforcement Learning Approach", "abstract": "The goal of sentiment-to-sentiment \u201ctranslation\u201d is to change the underlying sentiment of a sentence while keeping its content. The main challenge is the lack of parallel data. To solve this problem, we propose a cycled reinforcement learning method that enables training on unpaired data by collaboration between a neutralization module and an emotionalization module. We evaluate our approach on two review datasets, Yelp and Amazon. Experimental results show that our approach significantly outperforms the state-of-the-art systems. Especially, the proposed method substantially improves the content preservation performance. The BLEU score is improved from 1.64 to 22.46 and from 0.56 to 14.06 on the two datasets, respectively.", "phrases": ["reinforcement learning", "style", "sentiment transfer"], "overall_score": 4.358595465732933, "scores": [1.4749006411295058, 1.3142882395715558, 0.9504816812337874], "rank_score": 1.246556853978283} -{"id": "xu-etal-2016-question", "title": "Question Answering on Freebase via Relation Extraction and Textual Evidence", "abstract": "Existing knowledge-based question answering systems often rely on small annotated training data. While shallow methods like relation extraction are robust to data scarcity, they are less expressive than the deep meaning representation methods like semantic parsing, thereby failing at answering questions involving multiple constraints. Here we alleviate this problem by empowering a relation extraction method with additional evidence from Wikipedia. We first present a neural network based relation extractor to retrieve the candidate answers from Freebase, and then infer over Wikipedia to validate these answers. Experiments on the WebQuestions question answering dataset show that our method achieves an F_1 of 53.3%, a substantial improvement over the state-of-the-art.", "phrases": ["relation extraction", "wikipedia", "candidate answer"], "overall_score": 4.196479996438744, "scores": [2.3102362129237797, 0.8654046614906005, 0.5630980499190967], "rank_score": 1.246246308111159} -{"id": "ghanimifard-dobnik-2019-goes", "title": "What goes into a word: generating image descriptions with top-down spatial knowledge", "abstract": "Generating grounded image descriptions requires associating linguistic units with their corresponding visual clues. A common method is to train a decoder language model with attention mechanism over convolutional visual features. Attention weights align the stratified visual features arranged by their location with tokens, most commonly words, in the target description. However, words such as spatial relations (e.g. next to and under) are not directly referring to geometric arrangements of pixels but to complex geometric and conceptual representations. The aim of this paper is to evaluate what representations facilitate generating image descriptions with spatial relations and lead to better grounded language generation. In particular, we investigate the contribution of three different representational modalities in generating relational referring expressions: (i) pre-trained convolutional visual features, (ii) different top-down geometric relational knowledge between objects, and (iii) world knowledge captured by contextual embeddings in language models.", "phrases": ["image description", "spatial knowledge", "language model"], "overall_score": 1.7275490303077348, "scores": [2.350946486369266, 0.8374843294807389, 0.550058812526255], "rank_score": 1.2461632094587534} -{"id": "jiang-etal-2008-cascaded", "title": "A Cascaded Linear Model for Joint Chinese Word Segmentation and Part-of-Speech Tagging", "abstract": "We propose a cascaded linear model for joint Chinese word segmentation and partof-speech tagging. With a character-based perceptron as the core, combined with realvalued features such as language models, the cascaded model is able to efficiently utilize knowledge sources that are inconvenient to incorporate into the perceptron directly. Experiments show that the cascaded model achieves improved accuracies on both segmentation only and joint segmentation and part-of-speech tagging. On the Penn Chinese Treebank 5.0, we obtain an error reduction of 18.5% on segmentation and 12% on joint segmentation and part-of-speech tagging over the perceptron-only baseline.", "phrases": ["linear model", "part-of-speech tagging", "joint segmentation"], "overall_score": 3.6013949374258862, "scores": [2.210574689819388, 0.9594151035968799, 0.5680016198896031], "rank_score": 1.2459971377686236} -{"id": "cui-etal-2021-template", "title": "Template-Based Named Entity Recognition Using BART", "abstract": "There is a recent interest in investigating few-shot NER, where the low-resource target domain has different label sets compared with a resource-rich source domain. Existing methods use a similarity-based metric. However, they cannot make full use of knowledge transfer in NER model parameters. To address the issue, we propose a template-based method for NER, treating NER as a language model ranking problem in a sequence-to-sequence framework, where original sentences and statement templates filled by candidate named entity span are regarded as the source sequence and the target sequence, respectively. For inference, the model is required to classify each candidate span based on the corresponding template scores. Our experiments demonstrate that the proposed method achieves 92.55% F1 score on the CoNLL03 (rich-resource task), and significantly better than fine-tuning BERT 10.88%, 15.34%, and 11.73% F1 score on the MIT Movie, the MIT Restaurant, and the ATIS (low-resource task), respectively.", "phrases": ["entity recognition", "bart", "template-based method", "language model"], "overall_score": 3.7326153890200726, "scores": [1.8322073638851444, 0.9229468883382083, 1.1320263605814693, 1.0967298947811972], "rank_score": 1.2459776268965048} -{"id": "dredze-etal-2010-entity", "title": "Entity Disambiguation for Knowledge Base Population", "abstract": "The integration of facts derived from information extraction systems into existing knowledge bases requires a system to disambiguate entity mentions in the text. This is challenging due to issues such as non-uniform variations in entity names, mention ambiguity, and entities absent from a knowledge base. We present a state of the art system for entity disambiguation that not only addresses these challenges but also scales to knowledge bases with several million entries using very little resources. Further, our approach achieves performance of up to 95% on entities mentioned from newswire and 80% on a public test set that was designed to include challenging queries.", "phrases": ["knowledge base", "entity disambiguation", "wikipedia entry", "ranking problem", "large number"], "overall_score": 4.0105829738653656, "scores": [3.04840058479092, 1.2002151844661217, 0.9187849933138423, 0.5328001781097219, 0.52958733335072], "rank_score": 1.2459576548062652} -{"id": "beilharz-etal-2020-librivoxdeen", "title": "LibriVoxDeEn: A Corpus for German-to-English Speech Translation and German Speech Recognition", "abstract": "We present a corpus of sentence-aligned triples of German audio, German text, and English translation, based on German audio books. The speech translation data consist of 110 hours of audio material aligned to over 50k parallel sentences. An even larger dataset comprising 547 hours of German speech aligned to German text is available for speech recognition. The audio data is read speech and thus low in disfluencies. The quality of audio and sentence alignments has been checked by a manual evaluation, showing that speech alignment quality is in general very high. The sentence alignment quality is comparable to well-used parallel translation data and can be adjusted by cutoffs on the automatic alignment score. To our knowledge, this corpus is to date the largest resource for German speech recognition and for end-to-end German-to-English speech translation.", "phrases": ["german-to-english speech translation", "speech recognition", "librivoxdeen"], "overall_score": 1.7270103960577532, "scores": [2.0951280937358923, 0.8440792856994133, 0.7981166214990025], "rank_score": 1.2457746669781027} -{"id": "gur-etal-2018-dialsql", "title": "DialSQL: Dialogue Based Structured Query Generation", "abstract": "The recent advance in deep learning and semantic parsing has significantly improved the translation accuracy of natural language questions to structured queries. However, further improvement of the existing approaches turns out to be quite challenging. Rather than solely relying on algorithmic innovations, in this work, we introduce DialSQL, a dialogue-based structured query generation framework that leverages human intelligence to boost the performance of existing algorithms via user interaction. DialSQL is capable of identifying potential errors in a generated SQL query and asking users for validation via simple multi-choice questions. User feedback is then leveraged to revise the query. We design a generic simulator to bootstrap synthetic training dialogues and evaluate the performance of DialSQL on the WikiSQL dataset. Using SQLNet as a black box query generation tool, DialSQL improves its performance from 61.3% to 69.0% using only 2.4 validation questions per dialogue.", "phrases": ["query", "semantic parsing", "dialsql"], "overall_score": 2.73680197316852, "scores": [1.76150157644099, 1.1012327939039854, 0.8739823968865983], "rank_score": 1.245572255743858} -{"id": "hu-etal-2019-domain-adaptation", "title": "Domain Adaptation of Neural Machine Translation by Lexicon Induction", "abstract": "It has been previously noted that neural machine translation (NMT) is very sensitive to domain shift. In this paper, we argue that this is a dual effect of the highly lexicalized nature of NMT, resulting in failure for sentences with large numbers of unknown words, and lack of supervision for domain-specific words. To remedy this problem, we propose an unsupervised adaptation method which fine-tunes a pre-trained out-of-domain NMT model using a pseudo-in-domain corpus. Specifically, we perform lexicon induction to extract an in-domain lexicon, and construct a pseudo-parallel in-domain corpus by performing word-for-word back-translation of monolingual in-domain target sentences. In five domains over twenty pairwise adaptation settings and two model architectures, our method achieves consistent improvements without using any in-domain parallel sentences, improving up to 14 BLEU over unadapted models, and up to 2 BLEU over strong back-translation baselines.", "phrases": ["neural machine translation", "lexicon induction", "adaptation method", "pseudo-parallel in-domain corpus", "back-translation"], "overall_score": 3.372928814059852, "scores": [2.877319642851119, 0.957845534011374, 1.2517411283934956, 0.5818982631499118, 0.5587919744628923], "rank_score": 1.2455193085737584} -{"id": "culotta-etal-2007-first", "title": "First-Order Probabilistic Models for Coreference Resolution", "abstract": "Traditional noun phrase coreference resolution systems represent features only of pairs of noun phrases. In this paper, we propose a machine learning method that enables features over sets of noun phrases, resulting in a first-order probabilistic model for coreference. We outline a set of approximations that make this approach practical, and apply our method to the ACE coreference dataset, achieving a 45% error reduction over a comparable method that only considers features of pairs of noun phrases. This result demonstrates an example of how a firstorder logic representation can be incorporated into a probabilistic model and scaled efficiently.", "phrases": ["coreference resolution", "mention", "first-order probabilistic model", "cluster"], "overall_score": 4.0070207487247185, "scores": [1.5165892657901245, 1.6299743565903275, 1.2764037914239503, 0.5564365355713573], "rank_score": 1.24485098734394} -{"id": "ritter-etal-2013-modeling", "title": "Modeling Missing Data in Distant Supervision for Information Extraction", "abstract": "Distant supervision algorithms learn information extraction models given only large readily available databases and text collections. Most previous work has used heuristics for generating labeled data, for example assuming that facts not contained in the database are not mentioned in the text, and facts in the database must be mentioned at least once. In this paper, we propose a new latent-variable approach that models missing data. This provides a natural way to incorporate side information, for instance modeling the intuition that text will often mention rare entities which are likely to be missing in the database. Despite the added complexity introduced by reasoning about missing data, we demonstrate that a carefully designed local search approach to inference is very accurate and scales to large datasets. Experiments demonstrate improved performance for binary and unary relation extraction when compared to learning with heuristic labels, including on average a 27% increase in area under the precision recall curve in the binary case.", "phrases": ["distant supervision", "relation extraction", "knowledge base", "graphical model"], "overall_score": 3.4509138867722458, "scores": [2.105147328922103, 1.3884704377027084, 0.9317537816855673, 0.5532448026707987], "rank_score": 1.2446540877452943} -{"id": "dong-zhang-2016-automatic", "title": "Automatic Features for Essay Scoring \u2013 An Empirical Study", "abstract": "Essay scoring is a complicated processing requiring analyzing, summarizing and judging expertise. Traditional work on essay scoring focused on automatic handcrafted features, which are expensive yet sparse. Neural models offer a way to learn syntactic and semantic features automatically, which can potentially improve upon discrete features. In this paper, we employ convolutional neural network (CNN) for the effect of automatically learning features, and compare the result with the state-of-art discrete baselines. For in-domain and domain-adaptation essay scoring tasks, our neural model empirically outperforms discrete models.", "phrases": ["essay scoring", "automatic feature", "sentence representation"], "overall_score": 3.7892484892916247, "scores": [2.079051379922618, 0.8110903129751702, 0.8436936459461052], "rank_score": 1.2446117796146312} -{"id": "guu-etal-2015-traversing", "title": "Traversing Knowledge Graphs in Vector Space", "abstract": "Path queries on a knowledge graph can be used to answer compositional questions such as \"What languages are spoken by people living in Lisbon?\". However, knowledge graphs often have missing facts (edges) which disrupts path queries. Recent models for knowledge base completion impute missing facts by embedding knowledge graphs in vector spaces. We show that these models can be recursively applied to answer path queries, but that they suffer from cascading errors. This motivates a new \"compositional\" training objective, which dramatically improves all models' ability to answer path queries, in some cases more than doubling accuracy. On a standard knowledge base completion task, we also demonstrate that compositional training acts as a novel form of structural regularization, reliably improving performance across all base models (reducing errors by up to 43%) and achieving new state-of-the-art results.", "phrases": ["knowledge graph", "vector space", "path", "reasoning", "entity pair"], "overall_score": 4.187623456164822, "scores": [1.74338390733375, 1.5122092792203652, 1.2719234501773071, 1.1265433093457438, 0.5640207744718846], "rank_score": 1.2436161441098101} -{"id": "yamangil-nelken-2008-mining", "title": "Mining Wikipedia Revision Histories for Improving Sentence Compression", "abstract": "A well-recognized limitation of research on supervised sentence compression is the dearth of available training data. We propose a new and bountiful resource for such training data, which we obtain by mining the revision history of Wikipedia for sentence compressions and expansions. Using only a fraction of the available Wikipedia data, we have collected a training corpus of over 380,000 sentence pairs, two orders of magnitude larger than the standardly used Ziff-Davis corpus. Using this new-found data, we propose a novel lexicalized noisy channel model for sentence compression, achieving improved results in grammaticality and compression rate criteria with a slight decrease in importance.", "phrases": ["wikipedia", "revision history", "sentence compression"], "overall_score": 2.4195210753911414, "scores": [2.2806315206857612, 0.8941642020557091, 0.555367887175082], "rank_score": 1.243387869972184} -{"id": "belinkov-etal-2019-adversarial", "title": "On Adversarial Removal of Hypothesis-only Bias in Natural Language Inference", "abstract": "Popular Natural Language Inference (NLI) datasets have been shown to be tainted by hypothesis-only biases. Adversarial learning may help models ignore sensitive biases and spurious correlations in data. We evaluate whether adversarial learning can be used in NLI to encourage models to learn representations free of hypothesis-only biases. Our analyses indicate that the representations learned via adversarial learning may be less biased, with only small drops in NLI accuracy.", "phrases": ["hypothesis-only bias", "natural language inference", "nli", "adversarial training"], "overall_score": 3.0892053735570055, "scores": [2.651988189556183, 0.8962259065729117, 0.9002206687888616, 0.5243160204213005], "rank_score": 1.2431876963348143} -{"id": "zhou-etal-2016-deep", "title": "Deep Recurrent Models with Fast-Forward Connections for Neural Machine Translation", "abstract": "Neural machine translation (NMT) aims at solving machine translation (MT) problems using neural networks and has exhibited promising results in recent years. However, most of the existing NMT models are shallow and there is still a performance gap between a single NMT model and the best conventional MT system. In this work, we introduce a new type of linear connections, named fast-forward connections, based on deep Long Short-Term Memory (LSTM) networks, and an interleaved bi-directional architecture for stacking the LSTM layers. Fast-forward connections play an essential role in propagating the gradients and building a deep topology of depth 16. On the WMT'14 English-to-French task, we achieve BLEU=37.7 with a single attention model, which outperforms the corresponding single shallow model by 6.2 BLEU points. This is the first time that a single NMT model achieves state-of-the-art performance and outperforms the best conventional model by 0.7 BLEU points. We can still achieve BLEU=36.3 even without using an attention mechanism. After special handling of unknown words and model ensembling, we obtain the best score reported to date on this task with BLEU=40.4. Our models are also validated on the more difficult WMT'14 English-to-German task.", "phrases": ["fast-forward connection", "connection", "neural machine translation", "attention model"], "overall_score": 3.0884059793320118, "scores": [2.9165661305272264, 0.9103277455250344, 0.593262433637218, 0.5513076760429413], "rank_score": 1.242865996433105} -{"id": "lakew-etal-2018-transfer", "title": "Transfer Learning in Multilingual Neural Machine Translation with Dynamic Vocabulary", "abstract": "We propose a method to transfer knowledge across neural machine translation (NMT) models by means of a shared dynamic vocabulary. Our approach allows to extend an initial model for a given language pair to cover new languages by adapting its vocabulary as long as new data become available (i.e., introducing new vocabulary items if they are not included in the initial model). The parameter transfer mechanism is evaluated in two scenarios: i) to adapt a trained single language NMT system to work with a new language pair and ii) to continuously add new language pairs to grow to a multilingual NMT system. In both the scenarios our goal is to improve the translation performance, while minimizing the training convergence time. Preliminary experiments spanning five languages with different training data sizes (i.e., 5k and 50k parallel sentences) show a significant performance gain ranging from +3.85 up to +13.63 BLEU in different language directions. Moreover, when compared with training an NMT model from scratch, our transfer-learning approach allows us to reach higher performance after training up to 4% of the total training steps.", "phrases": ["neural machine translation", "dynamic vocabulary", "transfer learning", "low-resource language pair"], "overall_score": 2.727157052427864, "scores": [2.1092374387727975, 1.6965217180630197, 0.6254124888631788, 0.5335590068847311], "rank_score": 1.241182663145932} -{"id": "graham-etal-2013-continuous", "title": "Continuous Measurement Scales in Human Evaluation of Machine Translation", "abstract": "We explore the use of continuous rating scales for human evaluation in the context of machine translation evaluation, comparing two assessor-intrinsic qualitycontrol techniques that do not rely on agreement with expert judgments. Experiments employing Amazon\u2019s Mechanical Turk service show that quality-control techniques made possible by the use of the continuous scale show dramatic improvements to intra-annotator agreement of up to +0.101 in the kappa coefficient, with inter-annotator agreement increasing by up to+0.144 when additional standardization of scores is applied.", "phrases": ["human evaluation", "continuous scale", "inter-annotator agreement", "direct assessment"], "overall_score": 2.975087270077927, "scores": [2.7845221651980046, 1.0491415348510627, 0.5730084768387953, 0.5561588588578582], "rank_score": 1.2407077589364304} -{"id": "piad-morffis-etal-2019-general", "title": "A General-Purpose Annotation Model for Knowledge Discovery: Case Study in Spanish Clinical Text", "abstract": "Knowledge discovery from text in natural language is a task usually aided by the manual construction of annotated corpora. Specifically in the clinical domain, several annotation models are used depending on the characteristics of the task to solve (e.g., named entity recognition, relation extraction, etc.). However, few general-purpose annotation models exist, that can support a broad range of knowledge extraction tasks. This paper presents an annotation model designed to capture a large portion of the semantics of natural language text. The structure of the annotation model is presented, with examples of annotated sentences and a brief description of each semantic role and relation defined. This research focuses on an application to clinical texts in the Spanish language. Nevertheless, the presented annotation model is extensible to other domains and languages. An example of annotated sentences, guidelines, and suitable configuration files for an annotation tool are also provided for the research community.", "phrases": ["annotation model", "knowledge discovery", "domain independence"], "overall_score": 1.7198180279085642, "scores": [2.329994929316799, 0.8635064508908514, 0.5282580299350331], "rank_score": 1.2405864700475613} -{"id": "jauhar-etal-2016-tables", "title": "Tables as Semi-structured Knowledge for Question Answering", "abstract": "Question answering requires access to a knowledge base to check facts and reason about information. Knowledge in the form of natural language text is easy to acquire, but difficult for automated reasoning. Highly-structured knowledge bases can facilitate reasoning, but are difficult to acquire. In this paper we explore tables as a semi-structured formalism that provides a balanced compromise to this trade-off. We first use the structure of tables to guide the construction of a dataset of over 9000 multiple-choice questions with rich alignment annotations, easily and efficiently via crowd-sourcing. We then use this annotated data to train a semi-structured feature-driven model for question answering that uses tables as a knowledge base. In benchmark evaluations, we significantly outperform both a strong unstructured retrieval baseline and a highly structured Markov Logic Network model.", "phrases": ["question answering", "table", "highly-structured knowledge basis"], "overall_score": 2.2225781765123895, "scores": [2.207737850977594, 0.8996387149226494, 0.6139569300571314], "rank_score": 1.2404444986524583} -{"id": "dorow-widdows-2003-discovering", "title": "Discovering Corpus-Specific Word Senses", "abstract": "This paper presents an unsupervised algorithm which automatically discovers word senses from text. The algorithm is based on a graph model representing words and relationships between them. Sense clusters are iteratively computed by clustering the local graph of similar words around an ambiguous word. Discrimination against previously extracted sense clusters enables us to discover new senses. We use the same data for both recognising and resolving ambiguity.", "phrases": ["word sense", "co-occurrence graph", "subgraph"], "overall_score": 3.081827762134191, "scores": [2.5455308912551082, 0.607196218895062, 0.5679290711157742], "rank_score": 1.2402187270886482} -{"id": "gupta-etal-2015-distributional", "title": "Distributional vectors encode referential attributes", "abstract": "Distributional methods have proven to excel at capturing fuzzy, graded aspects of meaning (Italy is more similar to Spain than to Germany). In contrast, it is difficult to extract the values of more specific attributes of word referents from distributional representations, attributes of the kind typically found in structured knowledge bases (Italy has 60 million inhabitants). In this paper, we pursue the hypothesis that distributional vectors also implicitly encode referential attributes. We show that a standard supervised regression model is in fact sufficient to retrieve such attributes to a reasonable degree of accuracy: When evaluated on the prediction of both categorical and numeric attributes of countries and cities, the model consistently reduces baseline error by 30%, and is not far from the upper bound. Further analysis suggests that our model is able to \u201cobjectify\u201d distributional representations for entities, anchoring them more firmly in the external world in measurable ways.", "phrases": ["attribute", "regression model", "country", "word embedding", "knowledge basis"], "overall_score": 3.0806797692759917, "scores": [2.950341913974223, 1.1610755341333237, 1.041886161690671, 0.5227675420368423, 0.5227125520493899], "rank_score": 1.23975674077689} -{"id": "gong-etal-2011-cache", "title": "Cache-based Document-level Statistical Machine Translation", "abstract": "Statistical machine translation systems are usually trained on a large amount of bilingual sentence pairs and translate one sentence at a time, ignoring document-level information. In this paper, we propose a cache-based approach to document-level translation. Since caches mainly depend on relevant data to supervise subsequent decisions, it is critical to fill the caches with highly-relevant data of a reasonable size. In this paper, we present three kinds of caches to store relevant document-level information: 1) a dynamic cache, which stores bilingual phrase pairs from the best translation hypotheses of previous sentences in the test document; 2) a static cache, which stores relevant bilingual phrase pairs extracted from similar bilingual document pairs (i.e. source documents similar to the test document and their corresponding target documents) in the training parallel corpus; 3) a topic cache, which stores the target-side topic words related with the test document in the source-side. In particular, three new features are designed to explore various kinds of document-level information in above three kinds of caches. Evaluation shows the effectiveness of our cache-based approach to document-level translation with the performance improvement of 0.81 in BLUE score over Moses. Especially, detailed analysis and discussion are presented to give new insights to document-level translation.", "phrases": ["statistical machine translation", "document-level information", "cache", "topic cache", "consistency"], "overall_score": 4.081278053288573, "scores": [0.9091047186304585, 2.0062302727030317, 1.2544177665575829, 1.1856060263120465, 0.8362068472543382], "rank_score": 1.2383131262914917} -{"id": "brody-lapata-2009-bayesian", "title": "Bayesian Word Sense Induction", "abstract": "Sense induction seeks to automatically identify word senses directly from a corpus. A key assumption underlying previous work is that the context surrounding an ambiguous word is indicative of its meaning. Sense induction is thus typically viewed as an unsupervised clustering problem where the aim is to partition a word's contexts into different classes, each representing a word sense. Our work places sense induction in a Bayesian context by modeling the contexts of the ambiguous word as samples from a multinomial distribution over senses which are in turn characterized as distributions over words. The Bayesian framework provides a principled way to incorporate a wide range of features beyond lexical co-occurrences and to systematically assess their utility on the sense induction task. The proposed approach yields improvements over state-of-the-art systems on a benchmark dataset.", "phrases": ["ambiguous word", "wsi", "latent dirichlet allocation", "lda model", "bayesian approach"], "overall_score": 3.8814450153614852, "scores": [1.5768980991749433, 1.3495951709619283, 1.2389948177060124, 1.082775906590322, 0.941262676741479], "rank_score": 1.2379053342349369} -{"id": "sennrich-etal-2016-edinburgh", "title": "Edinburgh Neural Machine Translation Systems for WMT 16", "abstract": "We participated in the WMT 2016 shared news translation task by building neural translation systems for four language pairs, each trained in both directions: English Czech, English German, English Romanian and English Russian. Our systems are based on an attentional encoder-decoder, using BPE subword segmentation for open-vocabulary translation with a fixed vocabulary. We experimented with using automatic back-translations of the monolingual News corpus as additional training data, pervasive dropout, and target-bidirectional models. All reported methods give substantial improvements, and we see improvements of 4.3--11.2 BLEU over our baseline systems. In the human evaluation, our systems were the (tied) best constrained system for 7 out of 8 translation directions in which we participated.", "phrases": ["machine translation", "wmt", "direction"], "overall_score": 4.566345572669478, "scores": [0.8419964624594217, 2.3484417991301143, 0.5231655274231146], "rank_score": 1.2378679296708837} -{"id": "deng-etal-2021-htcinfomax", "title": "HTCInfoMax: A Global Model for Hierarchical Text Classification via Information Maximization", "abstract": "The current state-of-the-art model HiAGM for hierarchical text classification has two limitations. First, it correlates each text sample with all labels in the dataset which contains irrelevant information. Second, it does not consider any statistical constraint on the label representations learned by the structure encoder, while constraints for representation learning are proved to be helpful in previous work. In this paper, we propose HTCInfoMax to address these issues by introducing information maximization which includes two modules: text-label mutual information maximization and label prior matching. The first module can model the interaction between each text sample and its ground truth labels explicitly which filters out irrelevant information. The second one encourages the structure encoder to learn better representations with desired characteristics for all labels which can better handle label imbalance in hierarchical text classification. Experimental results on two benchmark datasets demonstrate the effectiveness of the proposed HTCInfoMax.", "phrases": ["hierarchical text classification", "information maximization", "htcinfomax"], "overall_score": 1.3596247171125608, "scores": [1.8182019966662677, 0.9915484263322937, 0.9030008300232482], "rank_score": 1.2375837510072698} -{"id": "card-etal-2018-neural", "title": "Neural Models for Documents with Metadata", "abstract": "Most real-world document collections involve various types of metadata, such as author, source, and date, and yet the most commonly-used approaches to modeling text corpora ignore this information. While specialized models have been developed for particular applications, few are widely used in practice, as customization typically requires derivation of a custom inference algorithm. In this paper, we build on recent advances in variational inference methods and propose a general neural framework, based on topic models, to enable flexible incorporation of metadata and allow for rapid exploration of alternative models. Our approach achieves strong performance, with a manageable tradeoff between perplexity, coherence, and sparsity. Finally, we demonstrate the potential of our framework through an exploration of a corpus of articles about US immigration.", "phrases": ["metadata", "neural framework", "topic model", "scholar"], "overall_score": 3.075242863587539, "scores": [2.2285233225683343, 1.5845846758631255, 0.6031327852091863, 0.534034292245852], "rank_score": 1.2375687689716244} -{"id": "maier-etal-2012-annotating", "title": "Annotating Coordination in the Penn Treebank", "abstract": "Finding coordinations provides useful information for many NLP endeavors. However, the task has not received much attention in the literature. A major reason for that is that the annotation of major treebanks does not reliably annotate coordination. This makes it virtually impossible to detect coordinations in which two conjuncts are separated by punctuation rather than by a coordinating conjunction. In this paper, we present an annotation scheme for the Penn Treebank which introduces a distinction between coordinating from non-coordinating punctuation. We discuss the general annotation guidelines as well as problematic cases. Eventually, we show that this additional annotation allows the retrieval of a considerable number of coordinate structures beyond the ones having a coordinating conjunction.", "phrases": ["coordination", "penn treebank", "punctuation"], "overall_score": 1.991721391982148, "scores": [1.8250701831704674, 0.8195164853537574, 1.0679915737203958], "rank_score": 1.237526080748207} -{"id": "sun-etal-2019-hierarchical", "title": "Hierarchical Attention Prototypical Networks for Few-Shot Text Classification", "abstract": "Most of the current effective methods for text classification tasks are based on large-scale labeled data and a great number of parameters, but when the supervised training data are few and difficult to be collected, these models are not available. In this work, we propose a hierarchical attention prototypical networks (HAPN) for few-shot text classification. We design the feature level, word level, and instance level multi cross attention for our model to enhance the expressive ability of semantic space, so it can highlight or weaken the importance of the features, words, and instances separately. We verify the effectiveness of our model on two standard benchmark few-shot text classification datasets\u2014FewRel and CSID, and achieve the state-of-the-art performance. The visualization of hierarchical attention layers illustrates that our model can capture more important features, words, and instances. In addition, our attention mechanism increases support set augmentability and accelerates convergence speed in the training stage.", "phrases": ["prototypical network", "text classification", "hapn", "semantic space", "hierarchical attention"], "overall_score": 3.1740760070427045, "scores": [2.7942149298779335, 1.1178821590906463, 1.1123406061101933, 0.6103344904554528, 0.5526326414055234], "rank_score": 1.23748096538795} -{"id": "yang-etal-2021-document", "title": "Document-level Event Extraction via Parallel Prediction Networks", "abstract": "Document-level event extraction (DEE) is indispensable when events are described throughout a document. We argue that sentence-level extractors are ill-suited to the DEE task where event arguments always scatter across sentences and multiple events may co-exist in a document. It is a challenging task because it requires a holistic understanding of the document and an aggregated ability to assemble arguments across multiple sentences. In this paper, we propose an end-to-end model, which can extract structured events from a document in a parallel manner. Specifically, we first introduce a document-level encoder to obtain the document-aware representations. Then, a multi-granularity non-autoregressive decoder is used to generate events in parallel. Finally, to train the entire model, a matching loss function is proposed, which can bootstrap a global optimization. The empirical results on the widely used DEE dataset show that our approach significantly outperforms current state-of-the-art methods in the challenging DEE task. Code will be available at .", "phrases": ["parallel prediction network", "dee", "document-aware representation", "document-level event extraction"], "overall_score": 2.407863282187439, "scores": [2.5603676939102074, 1.3226439332869648, 0.5441977722633309, 0.5223783980159455], "rank_score": 1.2373969493691122} -{"id": "firat-etal-2016-zero", "title": "Zero-Resource Translation with Multi-Lingual Neural Machine Translation", "abstract": "In this paper, we propose a novel finetuning algorithm for the recently introduced multi-way, mulitlingual neural machine translate that enables zero-resource machine translation. When used together with novel many-to-one translation strategies, we empirically show that this finetuning algorithm allows the multi-way, multilingual model to translate a zero-resource language pair (1) as well as a single-pair neural translation model trained with up to 1M direct parallel sentences of the same language pair and (2) better than pivot-based translation strategy, while keeping only one additional copy of attention-related parameters.", "phrases": ["neural machine translation", "multilingual model", "zero-resource translation", "zero-shot language pair", "fine-tuning"], "overall_score": 4.652992755155694, "scores": [2.377455639278275, 0.887932044814021, 1.8452136146483602, 0.551237358267852, 0.5236774176894817], "rank_score": 1.237103214939598} -{"id": "wang-etal-2020-rat", "title": "RAT-SQL: Relation-Aware Schema Encoding and Linking for Text-to-SQL Parsers", "abstract": "When translating natural language questions into SQL queries to answer questions from a database, contemporary semantic parsing models struggle to generalize to unseen database schemas. The generalization challenge lies in (a) encoding the database relations in an accessible way for the semantic parser, and (b) modeling alignment between database columns and their mentions in a given query. We present a unified framework, based on the relation-aware self-attention mechanism, to address schema encoding, schema linking, and feature representation within a text-to-SQL encoder. On the challenging Spider dataset this framework boosts the exact match accuracy to 57.2%, surpassing its best counterparts by 8.7% absolute improvement. Further augmented with BERT, it achieves the new state-of-the-art performance of 65.6% on the Spider leaderboard. In addition, we observe qualitative improvements in the model's understanding of schema linking and alignment. Our implementation will be open-sourced at .", "phrases": ["schema encoding", "text-to-sql parser", "relation-aware transformer"], "overall_score": 4.46578203431032, "scores": [1.7030680991440332, 1.444806481922866, 0.5623582259481658], "rank_score": 1.2367442690050217} -{"id": "litkowski-2004-senseval", "title": "Senseval-3 task: Automatic labeling of semantic roles", "abstract": "The SENSEVAL-3 task to perform automatic labeling of semantic roles was designed to encourage research into and use of the FrameNet dataset. The task was based on the considerable expansion of the FrameNet data since the baseline study of automatic labeling of semantic roles by Gildea and Jurafsky. The FrameNet data provide an extensive body of \u201cgold standard\u201d data that can be used in lexical semantics research, as the basis for its further exploitation in NLP applications. Eight teams participated in the task, with a total of 20 runs. Discussions among participants during development of the task and the scoring of their runs contributed to a successful task. Participants used a wide variety of techniques, investigating many aspects of the FrameNet data. They achieved results showing considerable improvements from Gildea and Jurafsky\u2019s baseline study. Importantly, their efforts have contributed considerably to making the complex FrameNet dataset more accessible. They have amply demonstrated that FrameNet is a substantial lexical resource that will permit extensive further research and exploitation in NLP applications in the future.", "phrases": ["labeling", "semantic role", "senseval-3 task"], "overall_score": 2.404959705755771, "scores": [1.9942273186721347, 0.8705291391456595, 0.8429579609440082], "rank_score": 1.2359048062539342} -{"id": "hosseini-etal-2019-duality", "title": "Duality of Link Prediction and Entailment Graph Induction", "abstract": "Link prediction and entailment graph induction are often treated as different problems. In this paper, we show that these two problems are actually complementary. We train a link prediction model on a knowledge graph of assertions extracted from raw text. We propose an entailment score that exploits the new facts discovered by the link prediction model, and then form entailment graphs between relations. We further use the learned entailments to predict improved link prediction scores. Our results show that the two tasks can benefit from each other. The new entailment score outperforms prior state-of-the-art results on a standard entialment dataset and the new link prediction scores show improvements over the raw link prediction scores.", "phrases": ["link prediction", "entailment graph induction", "bipartite graph", "triple"], "overall_score": 2.404513942141821, "scores": [2.8196173791334225, 0.9734054488042524, 0.5762614382603188, 0.5734186500885525], "rank_score": 1.2356757290716365} -{"id": "bertoldi-federico-2009-domain", "title": "Domain Adaptation for Statistical Machine Translation with Monolingual Resources", "abstract": "Domain adaptation has recently gained interest in statistical machine translation to cope with the performance drop observed when testing conditions deviate from training conditions. The basic idea is that in-domain training data can be exploited to adapt all components of an already developed system. Previous work showed small performance gains by adapting from limited in-domain bilingual data. Here, we aim instead at significant performance gains by exploiting large but cheap monolingual in-domain data, either in the source or in the target language. We propose to synthesize a bilingual corpus by translating the monolingual adaptation data into the counterpart language. Investigations were conducted on a state-of-the-art phrase-based system trained on the Spanish--English part of the UN corpus, and adapted on the corresponding Europarl data. Translation, re-ordering, and language models were estimated after translating in-domain texts with the baseline. By optimizing the interpolation of these models on a development set the BLEU score was improved from 22.60% to 28.10% on a test set.", "phrases": ["bilingual corpus", "in-domain text", "domain adaptation", "pseudo"], "overall_score": 3.8743535611789692, "scores": [2.8771145445644435, 0.9738660993879239, 0.5670661703810411, 0.5245278413571894], "rank_score": 1.2356436639226493} -{"id": "ding-riloff-2018-human", "title": "Human Needs Categorization of Affective Events Using Labeled and Unlabeled Data", "abstract": "We often talk about events that impact us positively or negatively. For example \u201cI got a job\u201d is good news, but \u201cI lost my job\u201d is bad news. When we discuss an event, we not only understand its affective polarity but also the reason why the event is beneficial or detrimental. For example, getting or losing a job has affective polarity primarily because it impacts us financially. Our work aims to categorize affective events based upon human need categories that often explain people's motivations and desires: PHYSIOLOGICAL, HEALTH, LEISURE, SOCIAL, FINANCIAL, COGNITION, and FREEDOM. We create classification models based on event expressions as well as models that use contexts surrounding event mentions. We also design a co-training model that learns from unlabeled data by simultaneously training event expression and event context classifiers in an iterative learning process. Our results show that co-training performs well, producing substantially better results than the individual classifiers.", "phrases": ["affective event", "unlabeled data", "human need category"], "overall_score": 2.2132991196248257, "scores": [2.309345135501261, 0.8270982866815878, 0.5693538530145582], "rank_score": 1.2352657583991355} -{"id": "springorum-etal-2012-automatic", "title": "Automatic classification of German an particle verbs", "abstract": "The current study works at the interface of theoretical and computational linguistics to explore the semantic properties of an particle verbs, i.e., German particle verbs with the particle an. Based on a thorough analysis of the particle verbs from a theoretical point of view, we identified empirical features and performed an automatic semantic classification. A focus of the study was on the mutual profit of theoretical and empirical perspectives with respect to salient semantic properties of the an particle verbs: (a) how can we transform the theoretical insights into empirical, corpus-based features, (b) to what extent can we replicate the theoretical classification by a machine learning approach, and (c) can the computational analysis in turn deepen our insights to the semantic properties of the particle verbs? The best classification result of 70% correct class assignments was reached through a GermaNet-based generalization of direct object nouns plus a prepositional phrase feature. These particle verb features in combination with a detailed analysis of the results at the same time confirmed and enlarged our knowledge about salient properties.", "phrases": ["german", "particle verb", "automatic classification"], "overall_score": 2.2132024344265506, "scores": [1.950827158703145, 0.8786522055565953, 0.8761560278279029], "rank_score": 1.2352117973625478} -{"id": "shimizu-nakagawa-2007-structural", "title": "Structural Correspondence Learning for Dependency Parsing", "abstract": "Following (Blitzer et al., 2006), we present an application of structural correspondence learning to non-projective dependency parsing (McDonald et al., 2005). To induce the correspondences among dependency edges from different domains, we looked at every two tokens in a sentence and examined whether or not there is a preposition, a determiner or a helping verb between them. Three binary linear classifiers were trained to predict the existence of a preposition, etc, on unlabeled data and we used singular value decomposition to induce new features. During the training, the parser was trained with these additional features in addition to these described in (McDonald et al., 2005). We discriminatively trained our parser in an on-line fashion using a variant of the voted perceptron (Collins, 2002; Collins and Roark, 2004; Crammer and Singer, 2003).", "phrases": ["dependency parsing", "preposition", "determiner"], "overall_score": 1.712093783895963, "scores": [2.603319046904428, 0.5662772231230472, 0.5354475473178146], "rank_score": 1.2350146057817633} -{"id": "klein-nabi-2020-contrastive", "title": "Contrastive Self-Supervised Learning for Commonsense Reasoning", "abstract": "We propose a self-supervised method to solve Pronoun Disambiguation and Winograd Schema Challenge problems. Our approach exploits the characteristic structure of training corpora related to so-called \u201ctrigger\u201d words, which are responsible for flipping the answer in pronoun disambiguation. We achieve such commonsense reasoning by constructing pair-wise contrastive auxiliary predictions. To this end, we leverage a mutual exclusive loss regularized by a contrastive margin. Our architecture is based on the recently introduced transformer networks, BERT, that exhibits strong performance on many NLP benchmarks. Empirical results show that our method alleviates the limitation of current supervised approaches for commonsense reasoning. This study opens up avenues for exploiting inexpensive self-supervision to achieve performance gain in commonsense reasoning tasks.", "phrases": ["commonsense reasoning", "loss", "contrastive margin", "contrastive self-supervised learning"], "overall_score": 2.842022938942396, "scores": [2.882915238296292, 0.9891959621057448, 0.5402064742033693, 0.5247818446951737], "rank_score": 1.2342748798251448} -{"id": "choi-etal-2006-joint", "title": "Joint Extraction of Entities and Relations for Opinion Recognition", "abstract": "We present an approach for the joint extraction of entities and relations in the context of opinion recognition and analysis. We identify two types of opinion-related entities --- expressions of opinions and sources of opinions --- along with the linking relation that exists between them. Inspired by Roth and Yih (2004), we employ an integer linear programming approach to solve the joint opinion recognition task, and show that global, constraint-based inference can significantly boost the performance of both relation extraction and the extraction of opinion-related entities. Performance further improves when a semantic role labeling system is incorporated. The resulting system achieves F-measures of 79 and 69 for entity and relation extraction, respectively, improving substantially over prior results in the area.", "phrases": ["opinion recognition", "integer linear programming", "joint extraction", "opinion holder extraction", "named-entity tagger"], "overall_score": 4.1979501421668735, "scores": [1.9565903952705384, 1.8802322909674263, 0.9025855654919409, 0.8295333399111037, 0.6023411524897465], "rank_score": 1.234256548826151} -{"id": "lee-etal-2011-stanfords", "title": "Stanford's Multi-Pass Sieve Coreference Resolution System at the CoNLL-2011 Shared Task", "abstract": "This paper details the coreference resolution system submitted by Stanford at the CoNLL-2011 shared task. Our system is a collection of deterministic coreference resolution models that incorporate lexical, syntactic, semantic, and discourse information. All these models use global document-level information by sharing mention attributes, such as gender and number, across mentions in the same cluster. We participated in both the open and closed tracks and submitted results using both predicted and gold mentions. Our system was ranked \ufb01rst in both tracks, with a score of 57.8 in the closed track and 58.3 in the open track.", "phrases": ["coreference resolution system", "stanford", "pronoun", "candidate mention", "rule-based approach"], "overall_score": 4.5512371541050705, "scores": [2.8373656556606472, 1.405692238406968, 0.8502719418867016, 0.5509574505556134, 0.5245740312940578], "rank_score": 1.2337722635607975} -{"id": "goldberg-tsarfaty-2008-single", "title": "A Single Generative Model for Joint Morphological Segmentation and Syntactic Parsing", "abstract": "Morphological processes in Semitic languages deliver space-delimited words which introduce multiple, distinct, syntactic units into the structure of the input sentence. These words are in turn highly ambiguous, breaking the assumption underlying most parsers that the yield of a tree for a given sentence is known in advance. Here we propose a single joint model for performing both morphological segmentation and syntactic disambiguation which bypasses the associated circularity. Using a treebank grammar, a data-driven lexicon, and a linguistically motivated unknown-tokens handling technique our model outperforms previous pipelined, integrated or factorized systems for Hebrew morphological and syntactic processing, yielding an error reduction of 12% over the best published results so far.", "phrases": ["single generative model", "joint morphological segmentation", "syntactic parsing", "semitic language"], "overall_score": 3.812849773649436, "scores": [1.8628699418636396, 1.394075273139121, 0.8492138053542612, 0.8279042688830209], "rank_score": 1.2335158223100107} -{"id": "zlatkova-etal-2019-fact", "title": "Fact-Checking Meets Fauxtography: Verifying Claims About Images", "abstract": "The recent explosion of false claims in social media and on the Web in general has given rise to a lot of manual fact-checking initiatives. Unfortunately, the number of claims that need to be fact-checked is several orders of magnitude larger than what humans can handle manually. Thus, there has been a lot of research aiming at automating the process. Interestingly, previous work has largely ignored the growing number of claims about images. This is despite the fact that visual imagery is more influential than text and naturally appears alongside fake news. Here we aim at bridging this gap. In particular, we create a new dataset for this problem, and we explore a variety of features modeling the claim, the image, and the relationship between the claim and the image. The evaluation results show sizable improvements over the baseline. We release our dataset, hoping to enable further research on fact-checking claims about images.", "phrases": ["claim", "image", "fake news"], "overall_score": 2.209181021656391, "scores": [2.212938226410052, 0.9335079240885048, 0.552456061986762], "rank_score": 1.2329674041617729} -{"id": "moneglia-etal-2012-imagact", "title": "The IMAGACT Cross-linguistic Ontology of Action. A new infrastructure for natural language disambiguation", "abstract": "Action verbs, which are highly frequent in speech, cause disambiguation problems that are relevant to Language Technologies. This is a consequence of the peculiar way each natural language categorizes Action i.e. it is a consequence of semantic factors. Action verbs are frequently \u0093general\u0094, since they extend productively to actions belonging to different ontological types. Moreover, each language categorizes action in its own way and therefore the cross-linguistic reference to everyday activities is puzzling. This paper briefly sketches the IMAGACT project, which aims at setting up a cross-linguistic Ontology of Action for grounding disambiguation tasks in this crucial area of the lexicon. The project derives information on the actual variation of action verbs in English and Italian from spontaneous speech corpora, where references to action are high in frequency. Crucially it makes use of the universal language of images to identify action types, avoiding the underdeterminacy of semantic definitions. Action concept entries are implemented as prototypic scenes; this will make it easier to extend the Ontology to other languages.", "phrases": ["cross-linguistic ontology", "action verb", "disambiguation task"], "overall_score": 1.983289414027101, "scores": [1.9900063234767729, 1.154359401631255, 0.5524952697238712], "rank_score": 1.2322869982772997} -{"id": "shao-etal-2017-generating", "title": "Generating High-Quality and Informative Conversation Responses with Sequence-to-Sequence Models", "abstract": "Sequence-to-sequence models have been applied to the conversation response generation problem where the source sequence is the conversation history and the target sequence is the response. Unlike translation, conversation responding is inherently creative. The generation of long, informative, coherent, and diverse responses remains a hard task. In this work, we focus on the single turn setting. We add self-attention to the decoder to maintain coherence in longer responses, and we propose a practical approach, called the glimpse-model, for scaling to large datasets. We introduce a stochastic beam-search algorithm with segment-by-segment reranking which lets us inject diversity earlier in the generation process. We trained on a combined data set of over 2.3B conversation messages mined from the web. In human evaluation studies, our method produces longer responses overall, with a higher proportion rated as acceptable and excellent as length increases, compared to baseline sequence-to-sequence models with explicit length-promotion. A back-off strategy produces better responses overall, in the full spectrum of lengths.", "phrases": ["conversation", "sequence-to-sequence model", "diverse response"], "overall_score": 3.7514675330260294, "scores": [2.274806630865377, 0.833291374572317, 0.5885088776745548], "rank_score": 1.2322022943707496} -{"id": "cohn-etal-2016-incorporating", "title": "Incorporating Structural Alignment Biases into an Attentional Neural Translation Model", "abstract": "Neural encoder-decoder models of machine translation have achieved impressive results, rivalling traditional translation models. However their modelling formulation is overly simplistic, and omits several key inductive biases built into traditional models. In this paper we extend the attentional neural translation model to include structural biases from word based alignment models, including positional bias, Markov conditioning, fertility and agreement over translation directions. We show improvements over a baseline attentional model and standard phrase-based model over several language pairs, evaluating on difficult languages in a low resource setting.", "phrases": ["translation direction", "attention model", "structural bias", "machine translation model"], "overall_score": 4.147601109999789, "scores": [1.5523762586303875, 1.2594322182138502, 1.1791823199040334, 0.9359313620529035], "rank_score": 1.2317305397002938} -{"id": "fazly-etal-2009-unsupervised", "title": "Unsupervised Type and Token Identification of Idiomatic Expressions", "abstract": "Idiomatic expressions are plentiful in everyday language, yet they remain mysterious, as it is not clear exactly how people learn and understand them. They are of special interest to linguists, psycholinguists, and lexicographers, mainly because of their syntactic and semantic idiosyncrasies as well as their unclear lexical status. Despite a great deal of research on the properties of idioms in the linguistics literature, there is not much agreement on which properties are characteristic of these expressions. Because of their peculiarities, idiomatic expressions have mostly been overlooked by researchers in computational linguistics. In this article, we look into the usefulness of some of the identified linguistic properties of idioms for their automatic recognition. Specifically, we develop statistical measures that each model a specific property of idiomatic expressions by looking at their actual usage patterns in text. We use these statistical measures in a type-based classification task where we automatically separate idiomatic expressions (expressions with a possible idiomatic interpretation) from similar-on-the-surface literal phrases (for which no idiomatic interpretation is possible). In addition, we use some of the measures in a token identification task where we distinguish idiomatic and literal usages of potentially idiomatic expressions in context.", "phrases": ["idiomatic expression", "token identification task", "noun", "lexico-syntactic fixedness", "unsupervised method"], "overall_score": 4.603764134486194, "scores": [3.325380186903558, 1.0875487565606037, 0.6029070411885706, 0.5885760555254922, 0.5541902503588975], "rank_score": 1.2317204581074246} -{"id": "wang-etal-2017-statistical", "title": "A Statistical Framework for Product Description Generation", "abstract": "We present in this paper a statistical framework that generates accurate and fluent product description from product attributes. Specifically, after extracting templates and learning writing knowledge from attribute-description parallel data, we use the learned knowledge to decide what to say and how to say for product description generation. To evaluate accuracy and fluency for the generated descriptions, in addition to BLEU and Recall, we propose to measure what to say (in terms of attribute coverage) and to measure how to say (by attribute-specified generation) separately. Experimental results show that our framework is effective.", "phrases": ["statistical framework", "product description generation", "template"], "overall_score": 2.2066681382999267, "scores": [1.807840324377511, 0.969953226143448, 0.9169012612507803], "rank_score": 1.2315649372572464} -{"id": "nelken-shieber-2006-towards", "title": "Towards Robust Context-Sensitive Sentence Alignment for Monolingual Corpora", "abstract": "Aligning sentences belonging to comparable monolingual corpora has been suggested as a first step towards training text rewriting algorithms, for tasks such as summarization or paraphrasing. We present here a new monolingual sentence alignment algorithm, combining a sentence-based TF*IDF score, turned into a probability distribution using logistic regression, with a global alignment dynamic programming algorithm. Our approach provides a simpler and more robust solution achieving a substantial improvement in accuracy over existing systems.", "phrases": ["monolingual corpora", "tf*idf score", "logistic regression"], "overall_score": 2.951799006871147, "scores": [1.956389757211609, 0.8771028949480282, 0.8594947443579323], "rank_score": 1.2309957988391897} -{"id": "belinkov-etal-2017-evaluating", "title": "Evaluating Layers of Representation in Neural Machine Translation on Part-of-Speech and Semantic Tagging Tasks", "abstract": "While neural machine translation (NMT) models provide improved translation quality in an elegant framework, it is less clear what they learn about language. Recent work has started evaluating the quality of vector representations learned by NMT models on morphological and syntactic tasks. In this paper, we investigate the representations learned at different layers of NMT encoders. We train NMT systems on parallel data and use the models to extract features for training a classifier on two tasks: part-of-speech and semantic tagging. We then measure the performance of the classifier as a proxy to the quality of the original NMT model for the given task. Our quantitative analysis yields interesting insights regarding representation learning in NMT models. For instance, we find that higher layers are better at learning semantics while lower layers tend to be better for part-of-speech tagging. We also observe little effect of the target language on source-side representations, especially in higher quality models.", "phrases": ["neural machine translation", "part-of-speech", "semantic tagging"], "overall_score": 3.333096646422973, "scores": [2.238241387818928, 0.8732529407179798, 0.5809371984706357], "rank_score": 1.2308105090025145} -{"id": "rosti-etal-2007-combining", "title": "Combining Outputs from Multiple Machine Translation Systems", "abstract": "Currently there are several approaches to machine translation (MT) based on different paradigms; e.g., phrasal, hierarchical and syntax-based. These three approaches yield similar translation accuracy despite using fairly different levels of linguistic knowledge. The availability of such a variety of systems has led to a growing interest toward finding better translations by combining outputs from multiple systems. This paper describes three different approaches to MT system combination. These combination methods operate on sentence, phrase and word level exploiting information from -best lists, system scores and target-to-source phrase alignments. The word-level combination provides the most robust gains but the best results on the development test sets (NIST MT05 and the newsgroup portion of GALE 2006 dry-run) were achieved by combining all three methods.", "phrases": ["system combination", "source sentence", "confusion network", "hypothesis", "confidence score"], "overall_score": 4.26544977770989, "scores": [1.6930443699472932, 1.5434482139878243, 1.2520743891268495, 1.1104868114167397, 0.5546894569842818], "rank_score": 1.2307486482925978} -{"id": "cettolo-etal-2012-wit3", "title": "WIT3: Web Inventory of Transcribed and Translated Talks", "abstract": "We describe here a Web inventory named WIT 3 that offers access to a collection of transcribed and translated talks. The core of WIT 3 is the TED Talks corpus, that basically redistributes the original content published by the TED Conference website (http://www.ted.com). Since 2007, the TED Conference, based in California, has been posting all video recordings of its talks together with subtitles in English and their translations in more than 80 languages. Aside from its cultural and social relevance, this content, which is published under the Creative Commons BYNC-ND license, also represents a precious language resource for the machine translation research community, thanks to its size, variety of topics, and covered languages. This effort repurposes the original content in a way which is more convenient for machine translation researchers.", "phrases": ["web inventory", "transcribed", "parallel corpus"], "overall_score": 3.686650634741801, "scores": [1.739527387150138, 1.412898154901596, 0.5394771028746814], "rank_score": 1.2306342149754719} -{"id": "ghaddar-langlais-2016-wikicoref", "title": "WikiCoref: An English Coreference-annotated Corpus of Wikipedia Articles", "abstract": "This paper presents WikiCoref, an English corpus annotated for anaphoric relations, where all documents are from the English version of Wikipedia. Our annotation scheme follows the one of OntoNotes with a few disparities. We annotated each markable with coreference type, mention type and the equivalent Freebase topic. Since most similar annotation efforts concentrate on very specific types of written text, mainly newswire, there is a lack of resources for otherwise over-used Wikipedia texts. The corpus described in this paper addresses this issue. We present a freely available resource we initially devised for improving coreference resolution algorithms dedicated to Wikipedia texts. Our corpus has no restriction on the topics of the documents being annotated, and documents of various sizes have been considered for annotation.", "phrases": ["anaphoric relation", "wikicoref", "out-of-domain evaluation", "ontonotes guideline", "small dataset"], "overall_score": 2.950629757245996, "scores": [3.5778996339559446, 0.8842420476036149, 0.5887995272325769, 0.5573154664451824, 0.5442842441218866], "rank_score": 1.230508183871841} -{"id": "aharoni-goldberg-2017-morphological", "title": "Morphological Inflection Generation with Hard Monotonic Attention", "abstract": "We present a neural model for morphological inflection generation which employs a hard attention mechanism, inspired by the nearly-monotonic alignment commonly found between the characters in a word and the characters in its inflection. We evaluate the model on three previously studied morphological inflection generation datasets and show that it provides state of the art results in various setups compared to previous neural and non-neural approaches. Finally we present an analysis of the continuous representations learned by both the hard and soft (Bahdanau, 2014) attention models for the task, shedding some light on the features such models extract.", "phrases": ["hard monotonic attention", "morphological inflection generation", "input character", "monotonic alignment"], "overall_score": 3.6227318885270825, "scores": [2.075209527795533, 1.4018897457365285, 0.8969356789506594, 0.5474212762385332], "rank_score": 1.2303640571803136} -{"id": "spitkovsky-etal-2013-breaking", "title": "Breaking Out of Local Optima with Count Transforms and Model Recombination: A Study in Grammar Induction", "abstract": "Many statistical learning problems in NLP call for local model search methods. But accuracy tends to suffer with current techniques, which often explore either too narrowly or too broadly: hill-climbers can get stuck in local optima, whereas samplers may be inefficient. We propose to arrange individual local optimizers into organized networks. Our building blocks are operators of two types: (i) transform, which suggests new places to search, via non-random restarts from already-found local optima; and (ii) join, which merges candidate solutions to find better optima. Experiments on grammar induction show that pursuing different transforms (e.g., discarding parts of a learned model or ignoring portions of training data) results in improvements. Groups of locally-optimal solutions can be further perturbed jointly, by constructing mixtures. Using these tools, we designed several modular dependency grammar induction networks of increasing complexity. Our complete system achieves 48.6% accuracy (directed dependency macro-average over all 19 languages in the 2006/7 CoNLL data) \u2014 more than 5% higher than the previous state-of-the-art.", "phrases": ["local optima", "model recombination", "grammar induction", "punctuation"], "overall_score": 2.703045423639802, "scores": [2.605057710877875, 0.9175507840811405, 0.8702410304473133, 0.5279864264958808], "rank_score": 1.2302089879755524} -{"id": "foster-etal-2006-phrasetable", "title": "Phrasetable Smoothing for Statistical Machine Translation", "abstract": "We discuss different strategies for smoothing the phrasetable in Statistical MT, and give results over a range of translation settings. We show that any type of smoothing is a better idea than the relative-frequency estimates that are often used. The best smoothing techniques yield consistent gains of approximately 1% (absolute) according to the BLEU metric.", "phrases": ["smoothing", "translation model probability", "phrase pair"], "overall_score": 3.246501236673132, "scores": [2.474650573965069, 0.6261245906090305, 0.5897485693075339], "rank_score": 1.2301745779605444} -{"id": "platanios-etal-2019-competence", "title": "Competence-based Curriculum Learning for Neural Machine Translation", "abstract": "Current state-of-the-art NMT systems use large neural networks that are not only slow to train, but also often require many heuristics and optimization tricks, such as specialized learning rate schedules and large batch sizes. This is undesirable as it requires extensive hyperparameter tuning. In this paper, we propose a curriculum learning framework for NMT that reduces training time, reduces the need for specialized heuristics or large batch sizes, and results in overall better performance. Our framework consists of a principled way of deciding which training samples are shown to the model at different times during training, based on the estimated difficulty of a sample and the current competence of the model. Filtering training samples in this manner prevents the model from getting stuck in bad local optima, making it converge faster and reach a better solution than the common approach of uniformly sampling training examples. Furthermore, the proposed method can be easily applied to existing NMT models by simply modifying their input data pipelines. We show that our framework can help improve the training time and the performance of both recurrent neural network models and Transformers, achieving up to a 70% decrease in training time, while at the same time obtaining accuracy improvements of up to 2.2 BLEU.", "phrases": ["curriculum learning", "neural machine translation", "competence", "training example"], "overall_score": 4.007947495101482, "scores": [1.3723053889213301, 1.2959332059032476, 1.2074577414469572, 1.0449037114303739], "rank_score": 1.2301500119254771} -{"id": "konstantinova-etal-2012-review", "title": "A review corpus annotated for negation, speculation and their scope", "abstract": "This paper presents a freely available resource for research on handling negation and speculation in review texts. The SFU Review Corpus, consisting of 400 documents of movie, book, and consumer product reviews, was annotated at the token level with negative and speculative keywords and at the sentence level with their linguistic scope. We report statistics on corpus size and the consistency of annotations. The annotated corpus will be useful in many applications, such as document mining and sentiment analysis.", "phrases": ["negation", "sfu review corpus", "token level", "speculative keyword"], "overall_score": 2.8322071123822865, "scores": [2.3724025796552177, 1.5008833408565136, 0.5253221182232837, 0.5214396433240656], "rank_score": 1.23001192051477} -{"id": "bjerva-etal-2016-semantic", "title": "Semantic Tagging with Deep Residual Networks", "abstract": "We propose a novel semantic tagging task, semtagging, tailored for the purpose of multilingual semantic parsing, and present the first tagger using deep residual networks (ResNets). Our tagger uses both word and character representations, and includes a novel residual bypass architecture. We evaluate the tagset both intrinsically on the new task of semantic tagging, as well as on Part-of-Speech (POS) tagging. Our system, consisting of a ResNet and an auxiliary loss function predicting our semantic tags, significantly outperforms prior results on English Universal Dependencies POS tagging (95.71% accuracy on UD v1.2 and 95.67% accuracy on UD v1.3).", "phrases": ["deep residual network", "resnets", "semantic tagging"], "overall_score": 2.701651298730624, "scores": [2.588052074640498, 0.5694131810551154, 0.5312582274623167], "rank_score": 1.2295744943859768} -{"id": "belinkov-etal-2019-dont", "title": "Don't Take the Premise for Granted: Mitigating Artifacts in Natural Language Inference", "abstract": "Natural Language Inference (NLI) datasets often contain hypothesis-only biases\u2014artifacts that allow models to achieve non-trivial performance without learning whether a premise entails a hypothesis. We propose two probabilistic methods to build models that are more robust to such biases and better transfer across datasets. In contrast to standard approaches to NLI, our methods predict the probability of a premise given a hypothesis and NLI label, discouraging models from ignoring the premise. We evaluate our methods on synthetic and existing NLI datasets by training on datasets containing biases and testing on datasets containing no (or different) hypothesis-only biases. Our results indicate that these methods can make NLI models more robust to dataset-specific artifacts, transferring better than a baseline architecture in 9 out of 12 NLI datasets. Additionally, we provide an extensive analysis of the interplay of our methods with known biases in NLI datasets, as well as the effects of encouraging models to ignore biases and fine-tuning on target datasets.", "phrases": ["premise", "artifact", "natural language inference", "nli dataset"], "overall_score": 2.9480811090774455, "scores": [1.7419270550050747, 1.7223009334417325, 0.9192142415302719, 0.5343390301477666], "rank_score": 1.2294453150312115} -{"id": "wang-wang-2019-riemannian", "title": "Riemannian Normalizing Flow on Variational Wasserstein Autoencoder for Text Modeling", "abstract": "Recurrent Variational Autoencoder has been widely used for language modeling and text generation tasks. These models often face a difficult optimization problem, also known as KL vanishing, where the posterior easily collapses to the prior and model will ignore latent codes in generative tasks. To address this problem, we introduce an improved Variational Wasserstein Autoencoder (WAE) with Riemannian Normalizing Flow (RNF) for text modeling. The RNF transforms a latent variable into a space that respects the geometric characteristics of input space, which makes posterior impossible to collapse to the non-informative prior. The Wasserstein objective minimizes the distance between marginal distribution and the prior directly and therefore does not force the posterior to match the prior. Empirical experiments show that our model avoids KL vanishing over a range of datasets and has better performance in tasks such as language modeling, likelihood approximation, and text generation. Through a series of experiments and analysis over latent space, we show that our model learns latent distributions that respect latent space geometry and is able to generate sentences that are more diverse.", "phrases": ["variational wasserstein autoencoder", "text modeling", "riemannian normalizing flow"], "overall_score": 1.3505611473648087, "scores": [2.0024126180736275, 0.8542981275374678, 0.8312904572576975], "rank_score": 1.2293337342895976} -{"id": "ghosh-etal-2017-affect", "title": "Affect-LM: A Neural Language Model for Customizable Affective Text Generation", "abstract": "Human verbal communication includes affective messages which are conveyed through use of emotionally colored words. There has been a lot of research effort in this direction but the problem of integrating state-of-the-art neural language models with affective information remains an area ripe for exploration. In this paper, we propose an extension to an LSTM (Long Short-Term Memory) language model for generation of conversational text, conditioned on affect categories. Our proposed model, Affect-LM enables us to customize the degree of emotional content in generated sentences through an additional design parameter. Perception studies conducted using Amazon Mechanical Turk show that Affect-LM can generate naturally looking emotional sentences without sacrificing grammatical correctness. Affect-LM also learns affect-discriminative word representations, and perplexity experiments show that additional affective information in conversational text can improve language model prediction.", "phrases": ["neural language model", "text generation", "affect-lm"], "overall_score": 3.4069970388082793, "scores": [2.240819265547761, 0.8523887024772939, 0.5932353311340107], "rank_score": 1.228814433053022} -{"id": "chen-etal-2017-recurrent", "title": "Recurrent Attention Network on Memory for Aspect Sentiment Analysis", "abstract": "We propose a novel framework based on neural networks to identify the sentiment of opinion targets in a comment/review. Our framework adopts multiple-attention mechanism to capture sentiment features separated by a long distance, so that it is more robust against irrelevant information. The results of multiple attentions are non-linearly combined with a recurrent neural network, which strengthens the expressive power of our model for handling more complications. The weighted-memory mechanism not only helps us avoid the labor-intensive feature engineering work, but also provides a tailor-made memory for different opinion targets of a sentence. We examine the merit of our model on four datasets: two are from SemEval2014, i.e. reviews of restaurants and laptops; a twitter dataset, for testing its performance on social media data; and a Chinese news comment dataset, for testing its language sensitivity. The experimental results show that our model consistently outperforms the state-of-the-art methods on different types of data.", "phrases": ["attention network", "memory", "aspect sentiment analysis", "sentence representation", "deep learning"], "overall_score": 4.649672322889181, "scores": [3.210827109845538, 0.9046492282308871, 0.9478487650871057, 0.5449303661457539, 0.5352954262556061], "rank_score": 1.228710179112978} -{"id": "ford-etal-2018-importance", "title": "The Importance of Generation Order in Language Modeling", "abstract": "Neural language models are a critical component of state-of-the-art systems for machine translation, summarization, audio transcription, and other tasks. These language models are almost universally autoregressive in nature, generating sentences one token at a time from left to right. This paper studies the influence of token generation order on model quality via a novel two-pass language model that produces partially-filled sentence \u201ctemplates\u201d and then fills in missing tokens. We compare various strategies for structuring these two passes and observe a surprisingly large variation in model quality. We find the most effective strategy generates function words in the first pass followed by content words in the second. We believe these experimental results justify a more extensive investigation of the generation order for neural language models.", "phrases": ["generation order", "language modeling", "partially-filled sentence", "template"], "overall_score": 2.5548778576718494, "scores": [2.380426242677866, 0.8937739150137011, 1.0675153794338625, 0.5728306166614983], "rank_score": 1.2286365384467322} -{"id": "kate-wong-2010-semantic", "title": "Semantic Parsing: The Task, the State of the Art and the Future", "abstract": "Semantic parsing is the task of mapping natural language sentences into complete formal meaning representations which a computer can execute for some domain-specific application. This is a challenging task and is critical for developing computing systems that can understand and process natural language input, for example, a computing system that answers natural language queries about a database, or a robot that takes commands in natural language. While the importance of semantic parsing was realized a long time ago, it is only in the past few years that the state-of-the-art in semantic parsing has been significantly advanced with more accurate and robust semantic parser learners that use a variety of statistical learning methods. Semantic parsers have also been extended to work beyond a single sentence, for example, to use discourse contexts and to learn domain-specific language from perceptual contexts. Some of the future research directions of semantic parsing with potentially large impacts include mapping entire natural language documents into machine processable form to enable automated reasoning about them and to convert natural language web pages into machine processable representations for the Semantic Web to support automated high-end web applications. This tutorial will introduce the semantic parsing task and will bring the audience up-to-date with the current research and state-of-the-art in semantic parsing. It will also provide insights about semantic parsing and how it relates to and differs from other natural language processing tasks. It will point out research challenges and some promising future directions for semantic parsing.", "phrases": ["mapping", "domain-specific application", "semantic parsing"], "overall_score": 1.7027759352401048, "scores": [2.5747414403824616, 0.5814798269418597, 0.528658328949628], "rank_score": 1.228293198757983} -{"id": "zhang-etal-2018-learning-control", "title": "Learning to Control the Specificity in Neural Response Generation", "abstract": "In conversation, a general response (e.g., \u201cI don't know\u201d) could correspond to a large variety of input utterances. Previous generative conversational models usually employ a single model to learn the relationship between different utterance-response pairs, thus tend to favor general and trivial responses which appear frequently. To address this problem, we propose a novel controlled response generation mechanism to handle different utterance-response relationships in terms of specificity. Specifically, we introduce an explicit specificity control variable into a sequence-to-sequence model, which interacts with the usage representation of words through a Gaussian Kernel layer, to guide the model to generate responses at different specificity levels. We describe two ways to acquire distant labels for the specificity control variable in learning. Empirical studies show that our model can significantly outperform the state-of-the-art response generation models under both automatic and human evaluations.", "phrases": ["control", "specificity", "neural response generation", "conversation", "sequence-to-sequence model"], "overall_score": 3.5492499413693137, "scores": [3.2462474373906502, 0.9563880101403465, 0.8252661796271175, 0.5652442293918142, 0.5466351797011509], "rank_score": 1.227956207250216} -{"id": "doan-etal-2021-phomt", "title": "PhoMT: A High-Quality and Large-Scale Benchmark Dataset for Vietnamese-English Machine Translation", "abstract": "We introduce a high-quality and large-scale Vietnamese-English parallel dataset of 3.02M sentence pairs, which is 2.9M pairs larger than the benchmark Vietnamese-English machine translation corpus IWSLT15. We conduct experiments comparing strong neural baselines and well-known automatic translation engines on our dataset and find that in both automatic and human evaluations: the best performance is obtained by fine-tuning the pre-trained sequence-to-sequence denoising auto-encoder mBART. To our best knowledge, this is the first large-scale Vietnamese-English machine translation study. We hope our publicly available dataset and study can serve as a starting point for future research and applications on Vietnamese-English machine translation. We release our dataset at: ", "phrases": ["high-quality", "vietnamese-english machine translation", "phomt"], "overall_score": 1.7015047660149867, "scores": [1.8571118978307843, 0.9721619148956662, 0.8528549192416861], "rank_score": 1.2273762439893787} -{"id": "kozareva-etal-2008-semantic", "title": "Semantic Class Learning from the Web with Hyponym Pattern Linkage Graphs", "abstract": "We present a novel approach to weakly supervised semantic class learning from the web, using a single powerful hyponym pattern combined with graph structures, which capture two properties associated with pattern-based extractions: popularity and productivity. Intuitively, a candidate is popular if it was discovered many times by other instances in the hyponym pattern. A candidate is productive if it frequently leads to the discovery of other instances. Together, these two measures capture not only frequency of occurrence, but also cross-checking that the candidate occurs both near the class name and near other class members. We developed two algorithms that begin with just a class name and one seed instance and then automatically generate a ranked list of new class instances. We conducted experiments on four semantic classes and consistently achieved high accuracies.", "phrases": ["web", "hyponym pattern", "class member", "semantic class learning", "concept pair"], "overall_score": 4.253540093754827, "scores": [2.7080349033666815, 0.9371331276293563, 1.414019955862414, 0.5510949461816399, 0.5262782664423743], "rank_score": 1.227312239896493} -{"id": "clark-etal-2018-neural", "title": "Neural Text Generation in Stories Using Entity Representations as Context", "abstract": "We introduce an approach to neural text generation that explicitly represents entities mentioned in the text. Entity representations are vectors that are updated as the text proceeds; they are designed specifically for narrative text like fiction or news stories. Our experiments demonstrate that modeling entities offers a benefit in two automatic evaluations: mention generation (in which a model chooses which entity to mention next and which words to use in the mention) and selection between a correct next sentence and a distractor from later in the same story. We also conduct a human evaluation on automatically generated text in story contexts; this study supports our emphasis on entities and suggests directions for further research.", "phrases": ["story", "neural text generation", "entity context", "language model"], "overall_score": 3.735238273460954, "scores": [2.598631169988744, 0.9146900239750829, 0.8661365582218843, 0.528028856786725], "rank_score": 1.2268716522431091} -{"id": "nivre-2008-algorithms", "title": "Algorithms for Deterministic Incremental Dependency Parsing", "abstract": "Abstract Parsing algorithms that process the input from left to right and construct a single derivation have often been considered inadequate for natural language parsing because of the massive ambiguity typically found in natural language grammars. Nevertheless, it has been shown that such algorithms, combined with treebank-induced classifiers, can be used to build highly accurate disambiguating parsers, in particular for dependency-based syntactic representations. In this article, we first present a general framework for describing and analyzing algorithms for deterministic incremental dependency parsing, formalized as transition systems. We then describe and analyze two families of such algorithms: stack-based and list-based algorithms. In the former family, which is restricted to projective dependency structures, we describe an arc-eager and an arc-standard variant; in the latter family, we present a projective and a non-projective variant. For each of the four algorithms, we give proofs of correctness and complexity. In addition, we perform an experimental evaluation of all algorithms in combination with SVM classifiers for predicting the next parsing action, using data from thirteen languages. We show that all four algorithms give competitive accuracy, although the non-projective list-based algorithm generally outperforms the projective algorithms for languages with a non-negligible proportion of non-projective constructions. However, the projective algorithms often produce comparable results when combined with the technique known as pseudo-projective parsing. The linear time complexity of the stack-based algorithms gives them an advantage with respect to efficiency both in learning and in parsing, but the projective list-based algorithm turns out to be equally efficient in practice. Moreover, when the projective algorithms are used to implement pseudo-projective parsing, they sometimes become less efficient in parsing (but not in learning) than the non-projective list-based algorithm. Although most of the algorithms have been partially described in the literature before, this is the first comprehensive analysis and evaluation of the algorithms within a unified framework.", "phrases": ["dependency parsing", "derivation", "action", "generative model", "local inference"], "overall_score": 5.101954828359297, "scores": [2.5996225298816356, 1.5692671612322868, 0.859112785546661, 0.5572066432844389, 0.5485949881495277], "rank_score": 1.22676082161891} -{"id": "ahmad-kondrak-2005-learning", "title": "Learning a Spelling Error Model from Search Query Logs", "abstract": "Applying the noisy channel model to search query spelling correction requires an error model and a language model. Typically, the error model relies on a weighted string edit distance measure. The weights can be learned from pairs of misspelled words and their corrections. This paper investigates using the Expectation Maximization algorithm to learn edit distance weights directly from search query logs, without relying on a corpus of paired words.", "phrases": ["spelling error model", "query log", "character-based error probability"], "overall_score": 3.5428529324892963, "scores": [1.508649263441096, 1.6404743882930741, 0.5281053304657439], "rank_score": 1.225742994066638} -{"id": "zhao-etal-2019-improving", "title": "Improving Grammatical Error Correction via Pre-Training a Copy-Augmented Architecture with Unlabeled Data", "abstract": "Neural machine translation systems have become state-of-the-art approaches for Grammatical Error Correction (GEC) task. In this paper, we propose a copy-augmented architecture for the GEC task by copying the unchanged words from the source sentence to the target sentence. Since the GEC suffers from not having enough labeled training data to achieve high accuracy. We pre-train the copy-augmented architecture with a denoising auto-encoder using the unlabeled One Billion Benchmark and make comparisons between the fully pre-trained model and a partially pre-trained model. It is the first time copying words from the source context and fully pre-training a sequence to sequence model are experimented on the GEC task. Moreover, We add token-level and sentence-level multi-task learning for the GEC task. The evaluation results on the CoNLL-2014 test set show that our approach outperforms all recently published state-of-the-art results by a large margin.", "phrases": ["grammatical error correction", "neural machine translation", "pre-trained model"], "overall_score": 4.247630161657196, "scores": [2.605259355685449, 0.535952579328797, 0.5356090468376878], "rank_score": 1.2256069939506447} -{"id": "gu-feng-2020-investigating", "title": "Investigating Catastrophic Forgetting During Continual Training for Neural Machine Translation", "abstract": "Neural machine translation (NMT) models usually suffer from catastrophic forgetting during continual training where the models tend to gradually forget previously learned knowledge and swing to fit the newly added data which may have a different distribution, e.g. a different domain. Although many methods have been proposed to solve this problem, we cannot get to know what causes this phenomenon yet. Under the background of domain adaptation, we investigate the cause of catastrophic forgetting from the perspectives of modules and parameters (neurons). The investigation on the modules of the NMT model shows that some modules have tight relation with the general-domain knowledge while some other modules are more essential in the domain adaptation. And the investigation on the parameters shows that some parameters are important for both the general-domain and in-domain translation and the great change of them during continual training brings about the performance decline in general-domain. We conducted experiments across different language pairs and domains to ensure the validity and reliability of our findings.", "phrases": ["catastrophic forgetting", "neural machine translation", "domain adaptation"], "overall_score": 1.972410028686034, "scores": [2.228872680445041, 0.9034483931592897, 0.5442606946907462], "rank_score": 1.225527256098359} -{"id": "baziotis-etal-2017-datastories-semeval", "title": "DataStories at SemEval-2017 Task 4: Deep LSTM with Attention for Message-level and Topic-based Sentiment Analysis", "abstract": "In this paper we present two deep-learning systems that competed at SemEval-2017 Task 4 \u201cSentiment Analysis in Twitter\u201d. We participated in all subtasks for English tweets, involving message-level and topic-based sentiment polarity classification and quantification. We use Long Short-Term Memory (LSTM) networks augmented with two kinds of attention mechanisms, on top of word embeddings pre-trained on a big collection of Twitter messages. Also, we present a text processing tool suitable for social network messages, which performs tokenization, word normalization, segmentation and spell correction. Moreover, our approach uses no hand-crafted features or sentiment lexicons. We ranked 1st (tie) in Subtask A, and achieved very competitive results in the rest of the Subtasks. Both the word embeddings and our text processing tool are available to the research community.", "phrases": ["semeval-2017 task", "deep lstm", "sentiment analysis", "tweet preprocessor"], "overall_score": 3.234084114988866, "scores": [2.3915787574884493, 0.8765766950346946, 0.7879723941743506, 0.8457499241205506], "rank_score": 1.2254694427045112} -{"id": "wallace-etal-2019-allennlp", "title": "AllenNLP Interpret: A Framework for Explaining Predictions of NLP Models", "abstract": "Neural NLP models are increasingly accurate but are imperfect and opaque\u2014they break in counterintuitive ways and leave end users puzzled at their behavior. Model interpretation methods ameliorate this opacity by providing explanations for specific model predictions. Unfortunately, existing interpretation codebases make it difficult to apply these methods to new models and tasks, which hinders adoption for practitioners and burdens interpretability researchers. We introduce AllenNLP Interpret, a flexible framework for interpreting NLP models. The toolkit provides interpretation primitives (e.g., input gradients) for any AllenNLP model and task, a suite of built-in interpretation methods, and a library of front-end visualization components. We demonstrate the toolkit's flexibility and utility by implementing live demos for five interpretation methods (e.g., saliency maps and adversarial attacks) on a variety of models and tasks (e.g., masked language modeling using BERT and reading comprehension using BiDAF). These demos, alongside our code and tutorials, are available at .", "phrases": ["interpretation method", "explanation", "allennlp interpret"], "overall_score": 2.547553491609763, "scores": [2.265356450722703, 0.8740399559119896, 0.5359463821100758], "rank_score": 1.225114262914923} -{"id": "merlo-van-der-plas-2009-abstraction", "title": "Abstraction and Generalisation in Semantic Role Labels: PropBank, VerbNet or both?", "abstract": "Semantic role labels are the representation of the grammatically relevant aspects of a sentence meaning. Capturing the nature and the number of semantic roles in a sentence is therefore fundamental to correctly describing the interface between grammar and meaning. In this paper, we compare two annotation schemes, Prop-Bank and VerbNet, in a task-independent, general way, analysing how well they fare in capturing the linguistic generalisations that are known to hold for semantic role labels, and consequently how well they grammaticalise aspects of meaning. We show that VerbNet is more verb-specific and better able to generalise to new semantic role instances, while PropBank better captures some of the structural constraints among roles. We conclude that these two resources should be used together, as they are complementary.", "phrases": ["generalisation", "verbnet", "propbank role"], "overall_score": 2.3838409423854965, "scores": [2.3293651266355644, 0.783315246277593, 0.5624753533819955], "rank_score": 1.225051908765051} -{"id": "patry-langlais-2011-identifying", "title": "Identifying Parallel Documents from a Large Bilingual Collection of Texts: Application to Parallel Article Extraction in Wikipedia.", "abstract": "While several recent works on dealing with large bilingual collections of texts, e.g. (Smith et al., 2010), seek for extracting parallel sentences from comparable corpora, we present Paradocs, a system designed to recognize pairs of parallel documents in a (large) bilingual collection of texts. We show that this system outperforms a fair baseline (Enright and Kondrak, 2007) in a number of controlled tasks. We applied it on the French-English cross-language linked article pairs of Wikipedia in order see whether parallel articles in this resource are available, and if our system is able to locate them. According to some manual evaluation we conducted, a fourth of the article pairs in Wikipedia are indeed in translation relation, and Paradocs identifies parallel or noisy parallel article pairs with a precision of 80%.", "phrases": ["parallel document", "large bilingual collection", "wikipedia"], "overall_score": 2.383622854269617, "scores": [1.691670364398233, 0.8446916328812502, 1.1384575036519478], "rank_score": 1.2249398336438102} -{"id": "tanev-magnini-2006-weakly", "title": "Weakly Supervised Approaches for Ontology Population", "abstract": "We present a weakly supervised approach to automatic ontology population from text and compare it with two other unsupervised approaches. In our experiments we populate a part of our ontology of Named Entities. We considered two high level categories-geographical locations and person names and ten sub-classes for each category. For each sub-class we automatically learn a syntactic model from a list of training examples and a parsed corpus. A novel syntactic indexing method allowed us to use large quantities of syntactically annotated data. The syntactic model for each named entity sub-class is a set of weighted syntactic features, i.e. words which typically co-occur with the members of the class in the corpus. The method is weakly supervised, since no manually annotated corpus is used in the learning process. The syntactic models are used to classify the unknown Named Entities in the test set. The method achieved promising results, i.e. 65% accuracy, and outperforms significantly the other two approaches.", "phrases": ["ontology population", "person name", "entity classification"], "overall_score": 2.9363096239158755, "scores": [2.576777598565483, 0.5676443708877245, 0.5291867038177933], "rank_score": 1.2245362244236668} -{"id": "gkatzia-etal-2015-virtual", "title": "From the Virtual to the RealWorld: Referring to Objects in Real-World Spatial Scenes", "abstract": "Predicting the success of referring expressions (RE) is vital for real-world applications such as navigation systems. Traditionally, research has focused on studying Referring Expression Generation (REG) in virtual, controlled environments. In this paper, we describe a novel study of spatial references from real scenes rather than virtual. First, we investigate how humans describe objects in open, uncontrolled scenarios and compare our findings to those reported in virtual environments. We show that REs in real-world scenarios differ significantly to those in virtual worlds. Second, we propose a novel approach to quantifying image complexity when complete annotations are not present (e.g. due to poor object recognition capabitlities), and third, we present a model for success prediction of REs for objects in real scenes. Finally, we discuss implications for Natural Language Generation (NLG) systems and future directions.", "phrases": ["object", "real-world image", "setup"], "overall_score": 2.193842856094893, "scores": [2.2026447287438033, 0.9490848107791262, 0.5214914933873659], "rank_score": 1.2244070109700984} -{"id": "guan-etal-2020-neuinfer", "title": "NeuInfer: Knowledge Inference on N-ary Facts", "abstract": "Knowledge inference on knowledge graph has attracted extensive attention, which aims to find out connotative valid facts in knowledge graph and is very helpful for improving the performance of many downstream applications. However, researchers have mainly poured attention to knowledge inference on binary facts. The studies on n-ary facts are relatively scarcer, although they are also ubiquitous in the real world. Therefore, this paper addresses knowledge inference on n-ary facts. We represent each n-ary fact as a primary triple coupled with a set of its auxiliary descriptive attribute-value pair(s). We further propose a neural network model, NeuInfer, for knowledge inference on n-ary facts. Besides handling the common task to infer an unknown element in a whole fact, NeuInfer can cope with a new type of task, flexible knowledge inference. It aims to infer an unknown element in a partial fact consisting of the primary triple coupled with any number of its auxiliary description(s). Experimental results demonstrate the remarkable superiority of NeuInfer.", "phrases": ["knowledge inference", "n-ary fact", "neuinfer"], "overall_score": 1.696921396021015, "scores": [1.9247642486552592, 0.9247373267989359, 0.8227085487726478], "rank_score": 1.2240700414089476} -{"id": "hu-etal-2020-monalog", "title": "MonaLog: a Lightweight System for Natural Language Inference Based on Monotonicity", "abstract": "We present a new logic-based inference engine for natural language inference (NLI) called MonaLog, which is based on natural logic and the monotonicity calculus. In contrast to existing logic-based approaches, our system is intentionally designed to be as lightweight as possible, and operates using a small set of well-known (surface-level) monotonicity facts about quantifiers, lexical items and tokenlevel polarity information. Despite its simplicity, we find our approach to be competitive with other logic-based NLI models on the SICK benchmark. We also use MonaLog in combination with the current state-of-the-art model BERT in a variety of settings, including for compositional data augmentation. We show that MonaLog is capable of generating large amounts of high-quality training data for BERT, improving its accuracy on SICK.", "phrases": ["natural language inference", "monotonicity", "monalog"], "overall_score": 1.9687506822646308, "scores": [1.8339735625178544, 0.9319923487836, 0.903794817685802], "rank_score": 1.2232535763290855} -{"id": "mihalcea-strapparava-2012-lyrics", "title": "Lyrics, Music, and Emotions", "abstract": "In this paper, we explore the classification of emotions in songs, using the music and the lyrics representation of the songs. We introduce a novel corpus of music and lyrics, consisting of 100 songs annotated for emotions. We show that textual and musical features can both be successfully used for emotion recognition in songs. Moreover, through comparative experiments, we show that the joint use of lyrics and music brings significant improvements over each of the individual textual and musical classifiers, with error rate reductions of up to 31%.", "phrases": ["music", "emotion", "lyric"], "overall_score": 1.3431364063551383, "scores": [1.8851514594605376, 0.9015809232698581, 0.8809939485947565], "rank_score": 1.2225754437750507} -{"id": "mallinson-etal-2020-felix", "title": "FELIX: Flexible Text Editing Through Tagging and Insertion", "abstract": "We present FELIX \u2013 a flexible text-editing approach for generation, designed to derive maximum benefit from the ideas of decoding with bi-directional contexts and self-supervised pretraining. In contrast to conventional sequenceto-sequence (seq2seq) models, FELIX is efficient in low-resource settings and fast at inference time, while being capable of modeling flexible input-output transformations. We achieve this by decomposing the text-editing task into two sub-tasks: tagging to decide on the subset of input tokens and their order in the output text and insertion to in-fill the missing tokens in the output not present in the input. The tagging model employs a novel Pointer mechanism, while the insertion model is based on a Masked Language Model (MLM). Both of these models are chosen to be non-autoregressive to guarantee faster inference. FELIX performs favourably when compared to recent text-editing methods and strong seq2seq baselines when evaluated on four NLG tasks: Sentence Fusion, Machine Translation Automatic Post-Editing, Summarization, and Text Simplification", "phrases": ["insertion", "output text", "text-editing method", "felix", "edit-based model"], "overall_score": 2.931591368136979, "scores": [2.2477675871779152, 1.7175801766061132, 0.9839760573494859, 0.5902479825068757, 0.5732709910238233], "rank_score": 1.2225685589328426} -{"id": "christensen-etal-2013-towards", "title": "Towards Coherent Multi-Document Summarization", "abstract": "This paper presents G-FLOW, a novel system for coherent extractive multi-document summarization (MDS). 1 Where previous work on MDS considered sentence selection and ordering separately, G-FLOW introduces a joint model for selection and ordering that balances coherence and salience. G-FLOW\u2019s core representation is a graph that approximates the discourse relations across sentences based on indicators including discourse cues, deverbal nouns, co-reference, and more. This graph enables G-FLOW to estimate the coherence of a candidate summary. We evaluate G-FLOW on Mechanical Turk, and find that it generates dramatically better summaries than an extractive summarizer based on a pipeline of state-of-the-art sentence selection and reordering components, underscoring the value of our joint model.", "phrases": ["summarization", "discourse relation", "multi-document graph", "passage"], "overall_score": 3.3099978767123344, "scores": [2.8981255114216964, 0.8420024321249664, 0.6191230142126657, 0.529872405411893], "rank_score": 1.2222808407928052} -{"id": "michaelov-bergen-2020-well", "title": "How well does surprisal explain N400 amplitude under different experimental conditions?", "abstract": "We investigate the extent to which word surprisal can be used to predict a neural measure of human language processing difficulty\u2014the N400. To do this, we use recurrent neural networks to calculate the surprisal of stimuli from previously published neurolinguistic studies of the N400. We find that surprisal can predict N400 amplitude in a wide range of cases, and the cases where it cannot do so provide valuable insight into the neurocognitive processes underlying the response.", "phrases": ["surprisal", "n400 amplitude", "stimulus"], "overall_score": 1.966679195583146, "scores": [2.003873923205177, 0.7958990707240131, 0.8661264739330217], "rank_score": 1.221966489287404} -{"id": "sennrich-etal-2017-university", "title": "The University of Edinburgh's Neural MT Systems for WMT17", "abstract": "This paper describes the University of Edinburgh's submissions to the WMT17 shared news translation and biomedical translation tasks. We participated in 12 translation directions for news, translating between English and Czech, German, Latvian, Russian, Turkish and Chinese. For the biomedical task we submitted systems for English to Czech, German, Polish and Romanian. Our systems are neural machine translation systems trained with Nematus, an attentional encoder-decoder. We follow our setup from last year and build BPE-based models with parallel and back-translated monolingual training data. Novelties this year include the use of deep architectures, layer normalization, and more compact models due to weight tying and improvements in BPE segmentations. We perform extensive ablative experiments, reporting on the effectivenes of layer normalization, deep architectures, and different ensembling techniques.", "phrases": ["edinburgh", "wmt17", "neural machine translation"], "overall_score": 2.929084307390913, "scores": [2.177023939218073, 0.8934384595643252, 0.5941067014009961], "rank_score": 1.2215230333944647} -{"id": "wieting-etal-2017-learning", "title": "Learning Paraphrastic Sentence Embeddings from Back-Translated Bitext", "abstract": "We consider the problem of learning general-purpose, paraphrastic sentence embeddings in the setting of Wieting et al. (2016b). We use neural machine translation to generate sentential paraphrases via back-translation of bilingual sentence pairs. We evaluate the paraphrase pairs by their ability to serve as training data for learning paraphrastic sentence embeddings. We find that the data quality is stronger than prior work based on bitext and on par with manually-written English paraphrase pairs, with the advantage that our approach can scale up to generate large training sets for many languages and domains. We experiment with several language pairs and data sources, and develop a variety of data filtering techniques. In the process, we explore how neural machine translation output differs from human-written sentences, finding clear differences in length, the amount of repetition, and the use of rare words.", "phrases": ["back-translation", "neural machine translation", "paraphrase", "sentence pair"], "overall_score": 3.8296894708795985, "scores": [1.8483136706874252, 1.650656251945976, 0.8347585076370405, 0.5518675327822101], "rank_score": 1.221398990763163} -{"id": "chen-etal-2019-meta", "title": "Meta Relational Learning for Few-Shot Link Prediction in Knowledge Graphs", "abstract": "Link prediction is an important way to complete knowledge graphs (KGs), while embedding-based methods, effective for link prediction in KGs, perform poorly on relations that only have a few associative triples. In this work, we propose a Meta Relational Learning (MetaR) framework to do the common but challenging few-shot link prediction in KGs, namely predicting new triples about a relation by only observing a few associative triples. We solve few-shot link prediction by focusing on transferring relation-specific meta information to make model learn the most important knowledge and learn faster, corresponding to relation meta and gradient meta respectively in MetaR. Empirically, our model achieves state-of-the-art results on few-shot link prediction KG benchmarks.", "phrases": ["link prediction", "knowledge graph", "meta relational learning"], "overall_score": 2.928527162415532, "scores": [2.2684479539391447, 0.7919252281634496, 0.6034988755764404], "rank_score": 1.2212906858930115} -{"id": "li-fung-2014-language", "title": "Language Modeling with Functional Head Constraint for Code Switching Speech Recognition", "abstract": "In this paper, we propose novel structured language modeling methods for code mixing speech recognition by incorporating a well-known syntactic constraint for switching code, namely the Functional Head Constraint (FHC). Code mixing data is not abundantly available for training language models. Our proposed methods successfully alleviate this core problem for code mixing speech recognition by using bilingual data to train a structured language model with syntactic constraint. Linguists and bilingual speakers found that code switch do not happen between the functional head and its complements. We propose to learn the code mixing language model from bilingual data with this constraint in a weighted finite state transducer (WFST) framework. The constrained code switch language model is obtained by first expanding the search network with a translation model, and then using parsing to restrict paths to those permissible under the constraint. We im", "phrases": ["functional head constraint", "speech recognition", "language model"], "overall_score": 2.1879866121886793, "scores": [2.213013749848867, 0.8888665453182727, 0.5615354418759543], "rank_score": 1.2211385790143647} -{"id": "mimno-etal-2009-polylingual", "title": "Polylingual Topic Models", "abstract": "Topic models are a useful tool for analyzing large text collections, but have previously been applied in only monolingual, or at most bilingual, contexts. Meanwhile, massive collections of interlinked documents in dozens of languages, such as Wikipedia, are now widely available, calling for tools that can characterize content in many languages. We introduce a polylingual topic model that discovers topics aligned across multiple languages. We explore the model's characteristics using two large corpora, each with over ten different languages, and demonstrate its usefulness in supporting machine translation and tracking topic trends across languages.", "phrases": ["polylingual topic models", "lda", "tuple", "string similarity", "induction"], "overall_score": 4.111741206239915, "scores": [1.6479184330021646, 1.5628649385158402, 1.1211148762643475, 0.8917578549964608, 0.8817492637826867], "rank_score": 1.2210810733122999} -{"id": "hahn-etal-2006-agreement", "title": "Agreement/Disagreement Classification: Exploiting Unlabeled Data using Contrast Classifiers", "abstract": "Several semi-supervised learning methods have been proposed to leverage unlabeled data, but imbalanced class distributions in the data set can hurt the performance of most algorithms. In this paper, we adapt the new approach of contrast classifiers for semi-supervised learning. This enables us to exploit large amounts of unlabeled data with a skewed distribution. In experiments on a speech act (agreement/disagreement) classification problem, we achieve better results than other semi-supervised methods. We also obtain performance comparable to the best results reported so far on this task and outperform systems with equivalent feature sets.", "phrases": ["unlabeled data", "contrast classifier", "agreement"], "overall_score": 1.9651491451569383, "scores": [1.852093588991322, 0.9174462989735019, 0.893507558553066], "rank_score": 1.2210158155059634} -{"id": "angeli-etal-2014-combining", "title": "Combining Distant and Partial Supervision for Relation Extraction", "abstract": "Broad-coverage relation extraction either requires expensive supervised training data, or suffers from drawbacks inherent to distant supervision. We present an approach for providing partial supervision to a distantly supervised relation extractor using a small number of carefully selected examples. We compare against established active learning criteria and propose a novel criterion to sample examples which are both uncertain and representative. In this way, we combine the benefits of fine-grained supervision for difficult examples with the coverage of a large distantly supervised corpus. Our approach gives a substantial increase of 3.9% endto-end F1 on the 2013 KBP Slot Filling evaluation, yielding a net F1 of 37.7%.", "phrases": ["partial supervision", "relation extraction", "active learning method", "crowdsourcing"], "overall_score": 2.927461301443239, "scores": [1.9899950605047965, 1.8439839872386943, 0.525622724646051, 0.5237829769816986], "rank_score": 1.2208461873428103} -{"id": "petrovic-etal-2010-streaming", "title": "Streaming First Story Detection with application to Twitter", "abstract": "With the recent rise in popularity and size of social media, there is a growing need for systems that can extract useful information from this amount of data. We address the problem of detecting new events from a stream of Twitter posts. To make event detection feasible on web-scale corpora, we present an algorithm based on locality-sensitive hashing which is able overcome the limitations of traditional approaches, while maintaining competitive results. In particular, a comparison with a state-of-the-art system on the first story detection task shows that we achieve over an order of magnitude speedup in processing time, while retaining comparable performance. Event detection experiments on a collection of 160 million Twitter posts show that celebrity deaths are the fastest spreading news on Twitter.", "phrases": ["story detection", "twitter", "event detection", "stream", "incoming tweet"], "overall_score": 3.458666363332452, "scores": [2.702636722207815, 1.0796229269641664, 0.9203959257184513, 0.8328969086235603, 0.5682348832022672], "rank_score": 1.220757473343252} -{"id": "baldwin-2006-compositionality", "title": "Compositionality and Multiword Expressions: Six of One, Half a Dozen of the Other?", "abstract": "In this talk, I will investigate the relationship between compositionality and multiword expressions, as part of which I will outline different approaches for formalising the notion of compositionality. I will then briefly review computational methods that have been proposed for modelling compositionality, and applications thereof. Finally, I will discuss possible future directions for modelling compositionality, and present some preliminary results.", "phrases": ["compositionality", "chance", "multi-word expression", "mwes"], "overall_score": 2.9269375405588374, "scores": [2.8629176756955594, 0.8479621158779956, 0.6410410481820917, 0.5305902085989681], "rank_score": 1.2206277620886539} -{"id": "guo-etal-2019-attention", "title": "Attention Guided Graph Convolutional Networks for Relation Extraction", "abstract": "Dependency trees convey rich structural information that is proven useful for extracting relations among entities in text. However, how to effectively make use of relevant information while ignoring irrelevant information from the dependency trees remains a challenging research question. Existing approaches employing rule based hard-pruning strategies for selecting relevant partial dependency structures may not always yield optimal results. In this work, we propose Attention Guided Graph Convolutional Networks (AGGCNs), a novel model which directly takes full dependency trees as inputs. Our model can be understood as a soft-pruning approach that automatically learns how to selectively attend to the relevant sub-structures useful for the relation extraction task. Extensive results on various tasks including cross-sentence n-ary relation extraction and large-scale sentence-level relation extraction show that our model is able to better leverage the structural information of the full dependency trees, giving significantly better results than previous approaches.", "phrases": ["convolutional network", "relation extraction", "relevant sub-structure", "input sentence", "previous study"], "overall_score": 3.8268612195514713, "scores": [3.1625203702456974, 0.9170292138510245, 0.8615516118739346, 0.6076609689843568, 0.5537227321785738], "rank_score": 1.2204969794267175} -{"id": "arnold-etal-2008-exploiting", "title": "Exploiting Feature Hierarchy for Transfer Learning in Named Entity Recognition", "abstract": "We present a novel hierarchical prior structure for supervised transfer learning in named entity recognition, motivated by the common structure of feature spaces for this task across natural language data sets. The problem of transfer learning, where information gained in one learning task is used to improve performance in another related task, is an important new area of research. In the subproblem of domain adaptation, a model trained over a source domain is generalized to perform well on a related target domain, where the two domains\u2019 data are distributed similarly, but not identically. We introduce the concept of groups of closely-related domains, called genres, and show how inter-genre adaptation is related to domain adaptation. We also examine multitask learning, where two domains may be related, but where the concept to be learned in each case is distinct. We show that our prior conveys useful information across domains, genres and tasks, while remaining robust to spurious signals not related to the target domain and concept. We further show that our model generalizes a class of similar hierarchical priors, smoothed to varying degrees, and lay the groundwork for future exploration in this area.", "phrases": ["transfer learning", "entity recognition", "domain adaptation"], "overall_score": 2.374464683955839, "scores": [2.211187801324867, 0.8502308844249037, 0.5992817095514889], "rank_score": 1.2202334651004199} -{"id": "ogren-2006-knowtator", "title": "Knowtator: A Prot\u00e9g\u00e9 plug-in for annotated corpus construction", "abstract": "A general-purpose text annotation tool called Knowtator is introduced. Knowtator facilitates the manual creation of annotated corpora that can be used for evaluating or training a variety of natural language processing systems. Building on the strengths of the widely used Protege knowledge representation system, Knowtator has been developed as a Protege plug-in that leverages Protege's knowledge representation capabilities to specify annotation schemas. Knowtator's unique advantage over other annotation tools is the ease with which complex annotation schemas (e.g. schemas which have constrained relationships between annotation types) can be defined and incorporated into use. Knowtator is available under the Mozilla Public License 1.1 at http://bionlp.sourceforge.net/Knowtator.", "phrases": ["annotation tool", "protege", "knowtator"], "overall_score": 2.185065155508047, "scores": [2.527251594147081, 0.6009911916783225, 0.5302814631622803], "rank_score": 1.2195080829958946} -{"id": "florian-etal-2004-statistical", "title": "A Statistical Model for Multilingual Entity Detection and Tracking", "abstract": "Abstract : Entity detection and tracking is a relatively new addition to the repertoire of natural language tasks. In this paper, we present a statistical language-independent framework for identifying and tracking named, nominal and pronominal references to entities within unrestricted text documents, and chaining them into clusters corresponding to each logical entity present in the text. Both the mention detection model and the novel entity tracking model can use arbitrary feature types, being able to integrate a wide array of lexical, syntactic and semantic features. In addition, the mention detection model crucially uses feature streams derived from different named entity classifiers. The proposed framework is evaluated with several experiments run in Arabic, Chinese and English texts; a system based on the approach described here and submitted to the latest Automatic Content Extraction (ACE) evaluation achieved top-tier results in all three evaluation languages.", "phrases": ["tracking", "language-independent framework", "mention", "automatic content extraction"], "overall_score": 4.018673338336957, "scores": [2.1304453127915712, 1.3209781082422232, 0.8846568130948873, 0.5411919146096152], "rank_score": 1.2193180371845742} -{"id": "bejan-harabagiu-2010-unsupervised", "title": "Unsupervised Event Coreference Resolution with Rich Linguistic Features", "abstract": "This paper examines how a new class of nonparametric Bayesian models can be effectively applied to an open-domain event coreference task. Designed with the purpose of clustering complex linguistic objects, these models consider a potentially infinite number of features and categorical outcomes. The evaluation performed for solving both within- and cross-document event coreference shows significant improvements of the models when compared against two baselines for this task.", "phrases": ["event coreference", "nonparametric bayesian model", "ecb", "information extraction", "multiple document"], "overall_score": 4.146957044131048, "scores": [1.835966075316761, 1.274236419754831, 1.2126759483702492, 0.9222600785453448, 0.8511807720498481], "rank_score": 1.2192638588074067} -{"id": "yan-etal-2020-unknown", "title": "Unknown Intent Detection Using Gaussian Mixture Model with an Application to Zero-shot Intent Classification", "abstract": "User intent classification plays a vital role in dialogue systems. Since user intent may frequently change over time in many realistic scenarios, unknown (new) intent detection has become an essential problem, where the study has just begun. This paper proposes a semantic-enhanced Gaussian mixture model (SEG) for unknown intent detection. In particular, we model utterance embeddings with a Gaussian mixture distribution and inject dynamic class semantic information into Gaussian means, which enables learning more class-concentrated embeddings that help to facilitate downstream outlier detection. Coupled with a density-based outlier detection algorithm, SEG achieves competitive results on three real task-oriented dialogue datasets in two languages for unknown intent detection. On top of that, we propose to integrate SEG as an unknown intent identifier into existing generalized zero-shot intent classification models to improve their performance. A case study on a state-of-the-art method, ReCapsNet, shows that SEG can push the classification performance to a significantly higher level.", "phrases": ["intent detection", "gaussian mixture model", "downstream outlier detection"], "overall_score": 3.029140083911482, "scores": [2.2224090639484637, 0.9007623946211685, 0.5338754781874245], "rank_score": 1.2190156455856855} -{"id": "smith-etal-2013-dirt", "title": "Dirt Cheap Web-Scale Parallel Text from the Common Crawl", "abstract": "Parallel text is the fuel that drives modern machine translation systems. The Web is a comprehensive source of preexisting parallel text, but crawling the entire web is impossible for all but the largest companies. We bring web-scale parallel text to the masses by mining the Common Crawl, a public Web crawl hosted on Amazon\u2019s Elastic Cloud. Starting from nothing more than a set of common two-letter language codes, our open-source extension of the STRAND algorithm mined 32 terabytes of the crawl in just under a day, at a cost of about $500. Our large-scale experiment uncovers large amounts of parallel text in dozens of language pairs across a variety of domains and genres, some previously unavailable in curated datasets. Even with minimal cleaning and filtering, the resulting data boosts translation performance across the board for five different language pairs in the news domain, and on open domain test sets we see improvements of up to 5 BLEU. We make our code and data available for other researchers seeking to mine this rich new data resource. 1", "phrases": ["common crawl", "strand algorithm", "different language", "parallel document"], "overall_score": 3.3797149164061735, "scores": [2.766404830108422, 0.9863793922185546, 0.5640061490357027, 0.5591075781549647], "rank_score": 1.218974487379411} -{"id": "hassan-etal-2008-language", "title": "Language Independent Text Correction using Finite State Automata", "abstract": "Many natural language applications, like machine translation and information extraction, are required to operate on text with spelling errors. Those spelling mistakes have to be corrected automatically to avoid deteriorating the performance of such applications. In this work, we introduce a novel approach for automatic correction of spelling mistakes by deploying finite state automata to propose candidates corrections within a specified edit distance from the misspelled word. After choosing candidate corrections, a language model is used to assign scores the candidate corrections and choose best correction in the given context. The proposed approach is language independent and requires only a dictionary and text data for building a language model. The approach have been tested on both Arabic and English text and achieved accuracy of 89%.", "phrases": ["finite state automata", "candidate correction", "edit-distance measure", "morphological analyzer"], "overall_score": 2.9224638427168936, "scores": [2.3343673563281118, 0.8758916016013977, 0.8328692750527809, 0.8319201077341837], "rank_score": 1.2187620851791185} -{"id": "mrini-etal-2020-rethinking", "title": "Rethinking Self-Attention: Towards Interpretability in Neural Parsing", "abstract": "Attention mechanisms have improved the performance of NLP tasks while allowing models to remain explainable. Self-attention is currently widely used, however interpretability is difficult due to the numerous attention distributions. Recent work has shown that model representations can benefit from label-specific information, while facilitating interpretation of predictions. We introduce the Label Attention Layer: a new form of self-attention where attention heads represent labels. We test our novel layer by running constituency and dependency parsing experiments and show our new model obtains new state-of-the-art results for both tasks on both the Penn Treebank (PTB) and Chinese Treebank. Additionally, our model requires fewer self-attention layers compared to existing work. Finally, we find that the Label Attention heads learn relations between syntactic categories and show pathways to analyze errors.", "phrases": ["self-attention", "interpretability", "label attention layer"], "overall_score": 2.370430860730286, "scores": [1.9743259091193215, 0.8309092043619724, 0.8492463566128922], "rank_score": 1.2181604900313954} -{"id": "narayan-etal-2018-dont", "title": "Don't Give Me the Details, Just the Summary! Topic-Aware Convolutional Neural Networks for Extreme Summarization", "abstract": "We introduce \u201cextreme summarization\u201d, a new single-document summarization task which does not favor extractive strategies and calls for an abstractive modeling approach. The idea is to create a short, one-sentence news summary answering the question \u201cWhat is the article about?\u201d. We collect a real-world, large-scale dataset for this task by harvesting online articles from the British Broadcasting Corporation (BBC). We propose a novel abstractive model which is conditioned on the article's topics and based entirely on convolutional neural networks. We demonstrate experimentally that this architecture captures long-range dependencies in a document and recognizes pertinent content, outperforming an oracle extractive system and state-of-the-art abstractive approaches when evaluated automatically and by humans.", "phrases": ["convolutional neural network", "extreme summarization", "large-scale dataset", "topical information"], "overall_score": 4.881290885654299, "scores": [3.2486392201864605, 0.5447371368660283, 0.5413206514998493, 0.5376614004189672], "rank_score": 1.2180896022428263} -{"id": "fischer-etal-2020-royal", "title": "The Royal Society Corpus 6.0: Providing 300+ Years of Scientific Writing for Humanistic Study", "abstract": "We present a new, extended version of the Royal Society Corpus (RSC), a diachronic corpus of scientific English now covering 300+ years of scientific writing (1665\u20131996). The corpus comprises 47 837 texts, primarily scientific articles, and is based on publications of the Royal Society of London, mainly its Philosophical Transactions and Proceedings. The corpus has been built on the basis of the FAIR principles and is freely available under a Creative Commons license, excluding copy-righted parts. We provide information on how the corpus can be found, the file formats available for download as well as accessibility via a web-based corpus query platform. We show a number of analytic tools that we have implemented for better usability and provide an example of use of the corpus for linguistic analysis as well as examples of subsequent, external uses of earlier releases. We place the RSC against the background of existing English diachronic/scientific corpora, elaborating on its value for linguistic and humanistic study.", "phrases": ["royal society corpus", "scientific writing", "humanistic study"], "overall_score": 1.338138487047852, "scores": [1.9196520380610629, 0.910156138758689, 0.8242702478903778], "rank_score": 1.218026141570043} -{"id": "vickrey-etal-2005-word", "title": "Word-Sense Disambiguation for Machine Translation", "abstract": "In word sense disambiguation, a system attempts to determine the sense of a word from contextual features. Major barriers to building a high-performing word sense disambiguation system include the difficulty of labeling data for this task and of predicting fine-grained sense distinctions. These issues stem partly from the fact that the task is being treated in isolation from possible uses of automatically disambiguated data. In this paper, we consider the related task of word translation, where we wish to determine the correct translation of a word from context. We can use parallel language corpora as a large supply of partially labeled data for this task. We present algorithms for solving the word translation problem and demonstrate a significant improvement over a baseline system. We then show that the word-translation system can be used to improve performance on a simplified machine-translation task and can effectively and accurately prune the set of candidate translations for a word.", "phrases": ["machine translation", "word-sense disambiguation", "wsd", "smt system", "blank filling task"], "overall_score": 4.397645430727253, "scores": [1.8356340331948025, 1.0210567931339896, 2.137592742520215, 0.5540032596126846, 0.5410864792617728], "rank_score": 1.2178746615446927} -{"id": "judge-etal-2006-questionbank", "title": "QuestionBank: Creating a Corpus of Parse-Annotated Questions", "abstract": "This paper describes the development of QuestionBank, a corpus of 4000 parse-annotated questions for (i) use in training parsers employed in QA, and (ii) evaluation of question parsing. We present a series of experiments to investigate the effectiveness of QuestionBank as both an exclusive and supplementary training resource for a state-of-the-art parser in parsing both question and non-question test sets. We introduce a new method for recovering empty nodes and their antecedents (capturing long distance dependencies) from parser output in CFG trees using LFG f-structure reentrancies. Our main findings are (i) using QuestionBank training data improves parser performance to 89.75% labelled bracketing f-score, an increase of almost 11% over the baseline; (ii) back-testing experiments on non-question data (Penn-II WSJ Section 23) shows that the retrained parser does not suffer a performance drop on non-question material; (iii) ablation experiments show that the size of training material provided by QuestionBank is sufficient to achieve optimal results; (iv) our method for recovering empty nodes captures long distance dependencies in questions from the ATIS corpus with high precision (96.82%) and low recall (39.38%). In summary, QuestionBank provides a useful new resource in parser-based QA research.", "phrases": ["test set", "questionbank", "sentence construction"], "overall_score": 2.675233575369128, "scores": [2.5625633271883195, 0.5696910230277803, 0.5203994608191161], "rank_score": 1.217551270345072} -{"id": "pichotta-mooney-2014-statistical", "title": "Statistical Script Learning with Multi-Argument Events", "abstract": "Scripts represent knowledge of stereotypical event sequences that can aid text understanding. Initial statistical methods have been developed to learn probabilistic scripts from raw text corpora; however, they utilize a very impoverished representation of events, consisting of a verb and one dependent argument. We present a script learning approach that employs events with multiple arguments. Unlike previous work, we model the interactions between multiple entities in a script. Experiments on a large corpus using the task of inferring held-out events (the \u201cnarrative cloze evaluation\u201d) demonstrate that modeling multi-argument events improves predictive accuracy.", "phrases": ["script learning", "multi-argument event", "event sequence", "narrative chain", "co-occurrence"], "overall_score": 3.918922021472281, "scores": [2.499315834993581, 1.430376257454801, 1.05090716377951, 0.5660184685840434, 0.5407901695773188], "rank_score": 1.2174815788778508} -{"id": "abney-bird-2010-human", "title": "The Human Language Project: Building a Universal Corpus of the World's Languages", "abstract": "We present a grand challenge to build a corpus that will include all of the world's languages, in a consistent structure that permits large-scale cross-linguistic processing, enabling the study of universal linguistics. The focal data types, bilingual texts and lexicons, relate each language to one of a set of reference languages. We propose that the ability to train systems to translate into and out of a given language be the yardstick for determining when we have successfully captured a language. We call on the computational linguistics community to begin work on this Universal Corpus, pursuing the many strands of activity described here, as their contribution to the global effort to document the world's linguistic heritage before more languages fall silent.", "phrases": ["universal corpus", "processing", "low-resource language"], "overall_score": 3.024157589558191, "scores": [2.484575874209698, 0.6339320120131022, 0.5325237408399671], "rank_score": 1.2170105423542557} -{"id": "wang-etal-2021-link", "title": "Link Prediction on N-ary Relational Facts: A Graph-based Approach", "abstract": "Link prediction on knowledge graphs (KGs) is a key research topic. Previous work mainly focused on binary relations, paying less attention to higher-arity relations although they are ubiquitous in real-world KGs. This paper considers link prediction upon n-ary relational facts and proposes a graph-based approach to this task. The key to our approach is to represent the n-ary structure of a fact as a small heterogeneous graph, and model this graph with edge-biased fully-connected attention. The fully-connected attention captures universal inter-vertex interactions, while with edge-aware attentive biases to particularly encode the graph structure and its heterogeneity. In this fashion, our approach fully models global and local dependencies in each n-ary fact, and hence can more effectively capture associations therein. Extensive evaluation verifies the effectiveness and superiority of our approach. It performs substantially and consistently better than current state-of-the-art across a variety of n-ary relational benchmarks. Our code is publicly available.", "phrases": ["n-ary relational fact", "graph-based approach", "link prediction"], "overall_score": 1.6870526102142405, "scores": [1.870680635907441, 0.9345659917463068, 0.8456070240585514], "rank_score": 1.216951217237433} -{"id": "ostling-2016-bayesian", "title": "A Bayesian model for joint word alignment and part-of-speech transfer", "abstract": "Current methods for word alignment require considerable amounts of parallel text to deliver accurate results, a requirement which is met only for a small minority of the world's approximately 7,000 languages. We show that by jointly performing word alignment and annotation transfer in a novel Bayesian model, alignment accuracy can be improved for language pairs where annotations are available for only one of the languages\u2014a finding which could facilitate the study and processing of a vast number of low-resource languages. We also present an evaluation where our method is used to perform single-source and multi-source part-of-speech transfer with 22 translations of the same text in four different languages. This allows us to quantify the considerable variation in accuracy depending on the specific source text(s) used, even with different translations into the same language.", "phrases": ["bayesian model", "word alignment", "part-of-speech transfer"], "overall_score": 1.336438824476955, "scores": [1.8574302732363048, 0.9406473844495594, 0.8513594683920852], "rank_score": 1.216479042025983} -{"id": "calixto-etal-2017-doubly", "title": "Doubly-Attentive Decoder for Multi-modal Neural Machine Translation", "abstract": "We introduce a Multi-modal Neural Machine Translation model in which a doubly-attentive decoder naturally incorporates spatial visual features obtained using pre-trained convolutional neural networks, bridging the gap between image description and translation. Our decoder learns to attend to source-language words and parts of an image independently by means of two separate attention mechanisms as it generates words in the target language. We find that our model can efficiently exploit not just back-translated in-domain multi-modal data but also large general-domain text-only MT corpora. We also report state-of-the-art results on the Multi30k data set.", "phrases": ["machine translation", "visual feature", "doubly-attentive decoder"], "overall_score": 3.44646837883827, "scores": [1.9943412580398034, 1.0506829897209702, 0.604332112290895], "rank_score": 1.216452120017223} -{"id": "hu-etal-2015-lcsts", "title": "LCSTS: A Large Scale Chinese Short Text Summarization Dataset", "abstract": "Automatic text summarization is widely regarded as the highly difficult problem, partially because of the lack of large text summarization data set. Due to the great challenge of constructing the large scale summaries for full text, in this paper, we introduce a large corpus of Chinese short text summarization dataset constructed from the Chinese microblogging website Sina Weibo, which is released to the public {this http URL}. This corpus consists of over 2 million real Chinese short texts with short summaries given by the author of each text. We also manually tagged the relevance of 10,666 short summaries with their corresponding short texts. Based on the corpus, we introduce recurrent neural network for the summary generation and achieve promising results, which not only shows the usefulness of the proposed corpus for short text summarization research, but also provides a baseline for further research on this topic.", "phrases": ["text summarization", "large corpus", "lcsts"], "overall_score": 3.445821262453683, "scores": [1.6424409826626316, 1.1744886290742167, 0.8317415372424407], "rank_score": 1.2162237163264298} -{"id": "blevins-etal-2018-deep", "title": "Deep RNNs Encode Soft Hierarchical Syntax", "abstract": "We present a set of experiments to demonstrate that deep recurrent neural networks (RNNs) learn internal representations that capture soft hierarchical notions of syntax from highly varied supervision. We consider four syntax tasks at different depths of the parse tree; for each word, we predict its part of speech as well as the first (parent), second (grandparent) and third level (great-grandparent) constituent labels that appear above it. These predictions are made from representations produced at different depths in networks that are pretrained with one of four objectives: dependency parsing, semantic role labeling, machine translation, or language modeling. In every case, we find a correspondence between network depth and syntactic depth, suggesting that a soft syntactic hierarchy emerges. This effect is robust across all conditions, indicating that the models encode significant amounts of syntax even in the absence of an explicit syntactic training supervision.", "phrases": ["rnns", "syntax", "internal representation", "deep layer"], "overall_score": 3.445556534973083, "scores": [3.0953475029525075, 0.5982376847709323, 0.590154115300646, 0.5807818135399008], "rank_score": 1.2161302791409967} -{"id": "shindo-etal-2012-bayesian", "title": "Bayesian Symbol-Refined Tree Substitution Grammars for Syntactic Parsing", "abstract": "We propose Symbol-Refined Tree Substitution Grammars (SR-TSGs) for syntactic parsing. An SR-TSG is an extension of the conventional TSG model where each nonterminal symbol can be refined (subcategorized) to fit the training data. We aim to provide a unified model where TSG rules and symbol refinement are learned from training data in a fully automatic and consistent fashion. We present a novel probabilistic SR-TSG model based on the hierarchical Pitman-Yor Process to encode backoff smoothing from a fine-grained SR-TSG to simpler CFG rules, and develop an efficient training method based on Markov Chain Monte Carlo (MCMC) sampling. Our SR-TSG parser achieves an F1 score of 92.4% in the Wall Street Journal (WSJ) English Penn Treebank parsing task, which is a 7.7 point improvement over a conventional Bayesian TSG parser, and better than state-of-the-art discriminative reranking parsers.", "phrases": ["syntactic parsing", "refined latent variable", "pcfg"], "overall_score": 2.366393982234873, "scores": [2.5614822017250187, 0.559871116816202, 0.5269045160515423], "rank_score": 1.2160859448642545} -{"id": "raganato-tiedemann-2018-analysis", "title": "An Analysis of Encoder Representations in Transformer-Based Machine Translation", "abstract": "The attention mechanism is a successful technique in modern NLP, especially in tasks like machine translation. The recently proposed network architecture of the Transformer is based entirely on attention mechanisms and achieves new state of the art results in neural machine translation, outperforming other sequence-to-sequence models. However, so far not much is known about the internal properties of the model and the representations it learns to achieve that performance. To study this question, we investigate the information that is learned by the attention mechanism in Transformer models with different translation quality. We assess the representations of the encoder by extracting dependency relations based on self-attention weights, we perform four probing tasks to study the amount of syntactic and semantic captured information and we also test attention in a transfer learning scenario. Our analysis sheds light on the relative strengths and weaknesses of the various encoder representations. We observe that specific attention heads mark syntactic dependency relations and we can also confirm that lower layers tend to learn more about syntax while higher layers tend to encode more semantics.", "phrases": ["machine translation", "transformer", "syntactic dependency relation"], "overall_score": 3.961914622873458, "scores": [1.8605782739037235, 1.2531359502308488, 0.5343495241048012], "rank_score": 1.2160212494131244} -{"id": "razmara-etal-2013-graph", "title": "Graph Propagation for Paraphrasing Out-of-Vocabulary Words in Statistical Machine Translation", "abstract": "Out-of-vocabulary (oov) words or phrases still remain a challenge in statistical machine translation especially when a limited amount of parallel text is available for training or when there is a domain shift from training data to test data. In this paper, we propose a novel approach to finding translations for oov words. We induce a lexicon by constructing a graph on source language monolingual text and employ a graph propagation technique in order to find translations for all the source language phrases. Our method differs from previous approaches by adopting a graph propagation approach that takes into account not only one-step (from oov directly to a source language phrase that has a translation) but multi-step paraphrases from oov source language words to other source language phrases and eventually to target language translations. Experimental results show that our graph propagation method significantly improves performance over two strong baselines under intrinsic and extrinsic evaluation metrics.", "phrases": ["paraphrasing", "machine translation", "graph propagation", "oov problem"], "overall_score": 2.915885861454793, "scores": [2.59810106094657, 0.8814438666739121, 0.8551369210913755, 0.5293935669787053], "rank_score": 1.2160188539226409} -{"id": "schlangen-etal-2016-resolving", "title": "Resolving References to Objects in Photographs using the Words-As-Classifiers Model", "abstract": "A common use of language is to refer to visually present objects. Modelling it in computers requires modelling the link between language and perception. The \"words as classifiers\" model of grounded semantics views words as classifiers of perceptual contexts, and composes the meaning of a phrase through composition of the denotations of its component words. It was recently shown to perform well in a game-playing scenario with a small number of object types. We apply it to two large sets of real-world photographs that contain a much larger variety of types and for which referring expressions are available. Using a pre-trained convolutional neural network to extract image features, and augmenting these with in-picture positional information, we show that the model achieves performance competitive with the state of the art in a reference resolution task (given expression, find bounding box of its referent), while, as we argue, being conceptually simpler and more flexible.", "phrases": ["object", "word use", "train image classifier"], "overall_score": 2.6717326625125195, "scores": [2.548634577447611, 0.5552139895768401, 0.544025241694134], "rank_score": 1.2159579362395283} -{"id": "misra-etal-2015-using", "title": "Using Summarization to Discover Argument Facets in Online Idealogical Dialog", "abstract": "More and more of the information available on the web is dialogic, and a significant portion of it takes place in online forum conversations about current social and political topics. We aim to develop tools to summarize what these conversations are about. What are the CENTRAL PROPOSITIONS associated with different stances on an issue, what are the abstract objects under discussion that are central to a speaker's argument? How can we recognize that two CENTRAL PROPOSITIONS realize the same FACET of the argument? We hypothesize that the CENTRAL PROPOSITIONS are exactly those arguments that people find most salient, and use human summarization as a probe for discovering them. We describe our corpus of human summaries of opinionated dialogs, then show how we can identify similar repeated arguments, and group them into FACETS across many discussions of a topic. We define a new task, ARGUMENT FACET SIMILARITY (AFS), and show that we can predict AFS with a .54 correlation score, versus an ngram system baseline of .39 and a semantic textual similarity system baseline of .45.", "phrases": ["summarization", "argument facet", "dialog"], "overall_score": 3.2087543043518516, "scores": [2.2462364610058763, 0.8083511423004215, 0.5930265674926909], "rank_score": 1.2158713902663296} -{"id": "mitchell-etal-2013-open", "title": "Open Domain Targeted Sentiment", "abstract": "We propose a novel approach to sentiment analysis for a low resource setting. The intuition behind this work is that sentiment expressed towards an entity, targeted sentiment, may be viewed as a span of sentiment expressed across the entity. This representation allows us to model sentiment detection as a sequence tagging problem, jointly discovering people and organizations along with whether there is sentiment directed towards them. We compare performance in both Spanish and English on microblog data, using only a sentiment lexicon as an external resource. By leveraging linguisticallyinformed features within conditional random fields (CRFs) trained to minimize empirical risk, our best models in Spanish significantly outperform a strong baseline, and reach around 90% accuracy on the combined task of named entity recognition and sentiment prediction. Our models in English, trained on a much smaller dataset, are not yet statistically significant against their baselines.", "phrases": ["sentiment analysis", "conditional random field", "polarity", "hand-crafted linguistic feature", "open domain"], "overall_score": 4.422353996354902, "scores": [1.6670800548058369, 1.6313585829319903, 1.0486542682004143, 0.8799195546956746, 0.8516806646548961], "rank_score": 1.2157386250577624} -{"id": "gao-callan-2021-condenser", "title": "Condenser: a Pre-training Architecture for Dense Retrieval", "abstract": "Pre-trained Transformer language models (LM) have become go-to text representation encoders. Prior research fine-tunes deep LMs to encode text sequences such as sentences and passages into single dense vector representations for efficient text comparison and retrieval. However, dense encoders require a lot of data and sophisticated techniques to effectively train and suffer in low data situations. This paper finds a key reason is that standard LMs' internal attention structure is not ready-to-use for dense encoders, which needs to aggregate text information into the dense representation. We propose to pre-train towards dense encoder with a novel Transformer architecture, Condenser, where LM prediction CONditions on DENSE Representation. Our experiments show Condenser improves over standard LM by large margins on various text retrieval and similarity tasks.", "phrases": ["pre-training architecture", "retrieval", "condenser"], "overall_score": 1.956624317019036, "scores": [2.0921544335348217, 0.9548775000492532, 0.6001251923342289], "rank_score": 1.215719041972768} -{"id": "kambhatla-2004-combining", "title": "Combining Lexical, Syntactic, and Semantic Features with Maximum Entropy Models for Information Extraction", "abstract": "Extracting semantic relationships between entities is challenging because of a paucity of annotated data and the errors induced by entity detection modules. We employ Maximum Entropy models to combine diverse lexical, syntactic and semantic features derived from the text. Our system obtained competitive results in the Automatic Content Extraction (ACE) evaluation. Here we present our general approach and describe our ACE results", "phrases": ["semantic feature", "maximum entropy model", "ace", "hand-crafted feature", "traditional supervised approach"], "overall_score": 4.755680415263717, "scores": [2.100163421607088, 2.035712507225822, 0.8350208301122596, 0.5573293482151177, 0.5500617871912411], "rank_score": 1.2156575788703057} -{"id": "menezes-quirk-2008-syntactic", "title": "Syntactic Models for Structural Word Insertion and Deletion during Translation", "abstract": "An important problem in translation neglected by most recent statistical machine translation systems is insertion and deletion of words, such as function words, motivated by linguistic structure rather than adjacent lexical context. Phrasal and hierarchical systems can only insert or delete words in the context of a larger phrase or rule. While this may suffice when translating in-domain, it performs poorly when trying to translate broad domains such as web text. Various syntactic approaches have been proposed that begin to address this problem by learning lexicalized and unlexicalized rules. Among these, the treelet approach uses unlexicalized order templates to model ordering separately from lexical choice. We introduce an extension to the latter that allows for structural word insertion and deletion, without requiring a lexical anchor, and show that it produces gains of more than 1.0% BLEU over both phrasal and baseline treelet systems on broad domain text.", "phrases": ["deletion", "lexical anchor", "treelet system"], "overall_score": 2.177387728297814, "scores": [2.5261639400898654, 0.5905334264011719, 0.528972321364833], "rank_score": 1.21522322928529} -{"id": "isabelle-etal-2017-challenge", "title": "A Challenge Set Approach to Evaluating Machine Translation", "abstract": "Neural machine translation represents an exciting leap forward in translation quality. But what longstanding weaknesses does it resolve, and which remain? We address these questions with a challenge set approach to translation evaluation and error analysis. A challenge set consists of a small set of sentences, each hand-designed to probe a system's capacity to bridge a particular structural divergence between languages. To exemplify this approach, we present an English-French challenge set, and use it to analyze phrase-based and neural systems. The resulting analysis provides not only a more fine-grained picture of the strengths of neural systems, but also insight into which linguistic phenomena remain out of reach.", "phrases": ["challenge set", "machine translation", "strength", "nmt system", "agreement"], "overall_score": 3.911361019753151, "scores": [2.486903549561362, 1.2470704897678386, 0.9257805430767292, 0.8664749712403397, 0.5494335544720823], "rank_score": 1.2151326216236704} -{"id": "shwartz-etal-2020-unsupervised", "title": "Unsupervised Commonsense Question Answering with Self-Talk", "abstract": "Natural language understanding involves reading between the lines with implicit background knowledge. Current systems either rely on pre-trained language models as the sole implicit source of world knowledge, or resort to external knowledge bases (KBs) to incorporate additional relevant knowledge. We propose an unsupervised framework based on self-talk as a novel alternative to multiple-choice commonsense tasks. Inspired by inquiry-based discovery learning (Bruner, 1961), our approach inquires language models with a number of information seeking questions such as \u201cwhat is the definition of...\u201d to discover additional background knowledge. Empirical results demonstrate that the self-talk procedure substantially improves the performance of zero-shot language model baselines on four out of six commonsense benchmarks, and competes with models that obtain knowledge from external KBs. While our approach improves performance on several benchmarks, the self-talk induced knowledge even when leading to correct answers is not always seen as helpful by human judges, raising interesting questions about the inner-workings of pre-trained language models for commonsense reasoning.", "phrases": ["self-talk", "commonsense task", "zero-shot setting", "prompt", "knowledge generation"], "overall_score": 3.640125291196964, "scores": [2.8093653426989014, 0.9603538071359397, 0.8883956614143036, 0.8551714656612495, 0.5622320918897946], "rank_score": 1.2151036737600376} -{"id": "xu-etal-2007-domain", "title": "Domain dependent statistical machine translation", "abstract": "While statistical machine translation (SMT) has advanced significantly with better modeling techniques and much more training data, domain specific SMT has received much less attention and leaves much room for further improvements. In this work, we address domain issues and propose to use the combination of feature weights and language model adaptation, to distinguish multiple domains, which share a general translation engine with phrase-based log-linear models. The proposed method requires much less parallel data than what is typically used to build a domain independent system, which makes it easy, cheap and efficient to capture as many domains as required. Domain adaptation during decoding is approached with source text classification methods. Our results on the GALE tasks show significant improvements with the proposed domain dependent translation than domain independent translation.", "phrases": ["statistical machine translation", "feature weight", "smt model"], "overall_score": 3.116670118702717, "scores": [2.1460227138368886, 0.9061753758798547, 0.593102090931506], "rank_score": 1.215100060216083} -{"id": "ma-collins-2018-noise", "title": "Noise Contrastive Estimation and Negative Sampling for Conditional Models: Consistency and Statistical Efficiency", "abstract": "Noise Contrastive Estimation (NCE) is a powerful parameter estimation method for log-linear models, which avoids calculation of the partition function or its derivatives at each training step, a computationally demanding step in many cases. It is closely related to negative sampling methods, now widely used in NLP. This paper considers NCE-based estimation of conditional models. Conditional models are frequently encountered in practice; however there has not been a rigorous theoretical analysis of NCE in this setting, and we will argue there are subtle but important questions when generalizing NCE to the conditional case. In particular, we analyze two variants of NCE for conditional models: one based on a classification objective, the other based on a ranking objective. We show that the ranking-based variant of NCE gives consistent parameter estimates under weaker assumptions than the classification-based method; we analyze the statistical efficiency of the ranking-based and classification-based variants of NCE; finally we describe experiments on synthetic data and language modeling showing the effectiveness and tradeoffs of both methods.", "phrases": ["negative sampling", "statistical efficiency", "noise contrastive estimation"], "overall_score": 1.3348476636831184, "scores": [1.9391880558485937, 0.8753126483321415, 0.8305914109859517], "rank_score": 1.2150307050555622} -{"id": "meng-etal-2017-deep", "title": "Deep Keyphrase Generation", "abstract": "Keyphrase provides highly-summative information that can be effectively used for understanding, organizing and retrieving text content. Though previous studies have provided many workable solutions for automated keyphrase extraction, they commonly divided the to-be-summarized content into multiple text chunks, then ranked and selected the most meaningful ones. These approaches could neither identify keyphrases that do not appear in the text, nor capture the real semantic meaning behind the text. We propose a generative model for keyphrase prediction with an encoder-decoder framework, which can effectively overcome the above drawbacks. We name it as deep keyphrase generation since it attempts to capture the deep semantic meaning of the content with a deep learning method. Empirical analysis on six datasets demonstrates that our proposed model not only achieves a significant performance boost on extracting keyphrases that appear in the source text, but also can generate absent keyphrases based on the semantic meaning of the text. Code and dataset are available at .", "phrases": ["generative model", "encoder-decoder framework", "source text", "deep keyphrase generation", "sequence-to-sequence"], "overall_score": 4.1721713239586995, "scores": [1.0397208947842924, 1.568364365978274, 1.3121030506696942, 1.2961583376552408, 0.8584740702637638], "rank_score": 1.2149641438702532} -{"id": "yuan-felice-2013-constrained", "title": "Constrained Grammatical Error Correction using Statistical Machine Translation", "abstract": "This paper describes our use of phrasebased statistical machine translation (PBSMT) for the automatic correction of errors in learner text in our submission to the CoNLL 2013 Shared Task on Grammatical Error Correction. Since the limited training data provided for the task was insufficient for training an effective SMT system, we also explored alternative ways of generating pairs of incorrect and correct sentences automatically from other existing learner corpora. Our approach does not yield particularly high performance but reveals many problems that require careful attention when building SMT systems for error correction.", "phrases": ["statistical machine translation", "learner corpora", "error type"], "overall_score": 3.116071889873343, "scores": [2.5137472569147334, 0.5935647339664356, 0.5372884931111197], "rank_score": 1.2148668279974297} -{"id": "liu-etal-2019-xqa", "title": "XQA: A Cross-lingual Open-domain Question Answering Dataset", "abstract": "Open-domain question answering (OpenQA) aims to answer questions through text retrieval and reading comprehension. Recently, lots of neural network-based models have been proposed and achieved promising results in OpenQA. However, the success of these models relies on a massive volume of training data (usually in English), which is not available in many other languages, especially for those low-resource languages. Therefore, it is essential to investigate cross-lingual OpenQA. In this paper, we construct a novel dataset XQA for cross-lingual OpenQA research. It consists of a training set in English as well as development and test sets in eight other languages. Besides, we provide several baseline systems for cross-lingual OpenQA, including two machine translation-based methods and one zero-shot cross-lingual method (multilingual BERT). Experimental results show that the multilingual BERT model achieves the best results in almost all target languages, while the performance of cross-lingual OpenQA is still much lower than that of English. Our analysis indicates that the performance of cross-lingual OpenQA is related to not only how similar the target language and English are, but also how difficult the question set of the target language is. The XQA dataset is publicly available at .", "phrases": ["open-domain question", "other language", "cross-lingual openqa research", "xqa", "wikipedia"], "overall_score": 2.796252454692724, "scores": [2.423068763334102, 1.9481083296539452, 0.6133368363518931, 0.5501091752570623, 0.5373619508103611], "rank_score": 1.2143970110814726} -{"id": "marton-resnik-2008-soft", "title": "Soft Syntactic Constraints for Hierarchical Phrased-Based Translation", "abstract": "In adding syntax to statistical MT, there is a tradeoff between taking advantage of linguistic analysis, versus allowing the model to exploit linguistically unmotivated mappings learned from parallel training data. A number of previous efforts have tackled this tradeoff by starting with a commitment to linguistically motivated analyses and then nding appropriate ways to soften that commitment. We present an approach that explores the tradeoff from the other direction, starting with a context-free translation model learned directly from aligned parallel text, and then adding soft constituent-level constraints based on parses of the source language. We obtain substantial improvements in performance for translation from Chinese and Arabic to English.", "phrases": ["syntactic constraint", "parallel text", "soft constraint"], "overall_score": 4.282071215088785, "scores": [2.149772754194713, 0.9151030491958653, 0.5780340907033169], "rank_score": 1.2143032980312984} -{"id": "rosenberg-binkowski-2004-augmenting", "title": "Augmenting the kappa statistic to determine interannotator reliability for multiply labeled data points", "abstract": "This paper describes a method for evaluating interannotator reliability in an email corpus annotated for type (e.g., question, answer, social chat) when annotators are allowed to assign multiple labels to a message. An augmentation is proposed to Cohen's kappa statistic which permits all data to be included in the reliability measure and which further permits the identification of more or less reliably annotated data points.", "phrases": ["interannotator reliability", "multiply", "data point"], "overall_score": 1.953746512284231, "scores": [1.8799385369425055, 0.9185336629703151, 0.8433206841557572], "rank_score": 1.2139309613561926} -{"id": "shen-etal-2017-deep", "title": "Deep Active Learning for Named Entity Recognition", "abstract": "Deep neural networks have advanced the state of the art in named entity recognition. However, under typical training procedures, advantages over classical methods emerge only with large datasets. As a result, deep learning is employed only when large public datasets or a large budget for manually labeling data is available. In this work, we show otherwise: by combining deep learning with active learning, we can outperform classical methods even with a significantly smaller amount of training data.", "phrases": ["active learning", "entity recognition", "deep learning"], "overall_score": 3.8575851828357686, "scores": [1.7056183765079573, 0.9852191096577942, 0.9506224029010979], "rank_score": 1.213819963022283} -{"id": "zhao-grishman-2005-extracting", "title": "Extracting Relations with Integrated Information Using Kernel Methods", "abstract": "Entity relation detection is a form of information extraction that finds predefined relations between pairs of entities in text. This paper describes a relation detection approach that combines clues from different levels of syntactic processing using kernel methods. Information from three different levels of processing is considered: tokenization, sentence parsing and deep dependency analysis. Each source of information is represented by kernel functions. Then composite kernels are developed to integrate and extend individual kernels so that processing errors occurring at one level can be overcome by information from other levels. We present an evaluation of these methods on the 2004 ACE relation detection task, using Support Vector Machines, and show that each level of syntactic processing contributes useful information for this task. When evaluated on the official test data, our approach produced very competitive ACE value scores. We also compare the SVM with KNN on different kernels.", "phrases": ["kernel", "different level", "svm", "relation extraction", "feature-based method"], "overall_score": 4.0005260743725675, "scores": [2.1415175891513294, 1.6595760045831618, 0.8907227092929504, 0.8528028364616431, 0.5244404605732212], "rank_score": 1.213811920012461} -{"id": "schwenk-2008-investigations", "title": "Investigations on large-scale lightly-supervised training for statistical machine translation.", "abstract": "Sentence-aligned bilingual texts are a crucial resource to build statistical machine translation (SMT) systems. In this paper we propose to apply lightly-supervised training to produce additional parallel data. The idea is to translate large amounts of monolingual data (up to 275M words) with an SMT system, and to use those as additional training data. Results are reported for the translation from French into English. We consider two setups: first the intial SMT system is only trained with a very limited amount of human-produced translations, and then the case where we have more than 100 million words. In both conditions, lightly-supervised training achieves significant improvements of the BLEU score.", "phrases": ["lightly-supervised training", "statistical machine translation", "phrase-based smt", "similar work", "monolingual source data"], "overall_score": 3.5083207972765518, "scores": [3.48312794267793, 0.929190648171955, 0.5562898267468319, 0.5519171105319793, 0.548452947896439], "rank_score": 1.2137956952050273} -{"id": "denkowski-etal-2014-learning", "title": "Learning from Post-Editing: Online Model Adaptation for Statistical Machine Translation", "abstract": "Using machine translation output as a starting point for human translation has become an increasingly common application of MT. We propose and evaluate three computationally efficient online methods for updating statistical MT systems in a scenario where post-edited MT output is constantly being returned to the system: (1) adding new rules to the translation model from the post-edited content, (2) updating a Bayesian language model of the target language that is used by the MT system, and (3) updating the MT system\u2019s discriminative parameters with a MIRA step. Individually, these techniques can substantially improve MT quality, even over strong baselines. Moreover, we see super-additive improvements when all three techniques are used in tandem.", "phrases": ["online model adaptation", "statistical machine translation", "online learning"], "overall_score": 2.9104573476696918, "scores": [1.8552628558834268, 0.8989639617482905, 0.8870381458791649], "rank_score": 1.2137549878369607} -{"id": "li-etal-2017-deep", "title": "Deep Recurrent Generative Decoder for Abstractive Text Summarization", "abstract": "We propose a new framework for abstractive text summarization based on a sequence-to-sequence oriented encoder-decoder model equipped with a deep recurrent generative decoder (DRGN). Latent structure information implied in the target summaries is learned based on a recurrent latent random model for improving the summarization quality. Neural variational inference is employed to address the intractable posterior inference for the recurrent latent variables. Abstractive summaries are generated based on both the generative latent variables and the discriminative deterministic states. Extensive experiments on some benchmark datasets in different languages show that DRGN achieves improvements over the state-of-the-art methods.", "phrases": ["abstractive text summarization", "encoder-decoder model", "latent structure information", "deep recurrent"], "overall_score": 3.113095229962443, "scores": [2.4296091882997444, 1.0406942299566364, 0.840577076828388, 0.543944760480342], "rank_score": 1.2137063138912776} -{"id": "zhou-etal-2008-diagnostic", "title": "Diagnostic Evaluation of Machine Translation Systems Using Automatically Constructed Linguistic Check-Points", "abstract": "We present a diagnostic evaluation platform which provides multi-factored evaluation based on automatically constructed check-points. A check-point is a linguistically motivated unit (e.g. an ambiguous word, a noun phrase, a verb~obj collocation, a prepositional phrase etc.), which are pre-defined in a linguistic taxonomy. We present a method that automatically extracts check-points from parallel sentences. By means of checkpoints, our method can monitor a MT system in translating important linguistic phenomena to provide diagnostic evaluation. The effectiveness of our approach for diagnostic evaluation is verified through experiments on various types of MT systems.", "phrases": ["check-point", "diagnostic evaluation", "linguistic checkpoint"], "overall_score": 2.7936780066950493, "scores": [1.7328821027192398, 0.8016782870424224, 1.1052764378047448], "rank_score": 1.2132789425221355} -{"id": "madaan-etal-2020-politeness", "title": "Politeness Transfer: A Tag and Generate Approach", "abstract": "This paper introduces a new task of politeness transfer which involves converting non-polite sentences to polite sentences while preserving the meaning. We also provide a dataset of more than 1.39 instances automatically labeled for politeness to encourage benchmark evaluations on this new task. We design a tag and generate pipeline that identifies stylistic attributes and subsequently generates a sentence in the target style while preserving most of the source content. For politeness as well as five other transfer tasks, our model outperforms the state-of-the-art methods on automatic metrics for content preservation, with a comparable or better performance on style transfer accuracy. Additionally, our model surpasses existing methods on human evaluations for grammaticality, meaning preservation and transfer accuracy across all the six style transfer tasks. The data and code is located at .", "phrases": ["style transfer task", "politeness transfer", "source text"], "overall_score": 3.5038464485745853, "scores": [2.5665480561964364, 0.5433428039685608, 0.5268521702107443], "rank_score": 1.2122476767919137} -{"id": "huang-etal-2019-achieving", "title": "Achieving Verified Robustness to Symbol Substitutions via Interval Bound Propagation", "abstract": "Neural networks are part of many contemporary NLP systems, yet their empirical successes come at the price of vulnerability to adversarial attacks. Previous work has used adversarial training and data augmentation to partially mitigate such brittleness, but these are unlikely to find worst-case adversaries due to the complexity of the search space arising from discrete text perturbations. In this work, we approach the problem from the opposite direction: to formally verify a system's robustness against a predefined class of adversarial attacks. We study text classification under synonym replacements or character flip perturbations. We propose modeling these input perturbations as a simplex and then using Interval Bound Propagation \u2013 a formal model verification method. We modify the conventional log-likelihood training objective to train models that can be efficiently verified, which would otherwise come with exponential search complexity. The resulting models show only little difference in terms of nominal accuracy, but have much improved verified accuracy under perturbations and come with an efficiently computable formal guarantee on worst case adversaries.", "phrases": ["robustness", "interval bound propagation", "ibp", "edit distance", "loss"], "overall_score": 2.663520646342946, "scores": [2.3589364365220717, 1.7631378505325461, 0.8543637403163102, 0.5482888455447028, 0.5363755601639113], "rank_score": 1.2122204866159085} -{"id": "mccarthy-etal-2004-finding", "title": "Finding Predominant Word Senses in Untagged Text", "abstract": "In word sense disambiguation (WSD), the heuristic of choosing the most common sense is extremely powerful because the distribution of the senses of a word is often skewed. The problem with using the predominant, or first sense heuristic, aside from the fact that it does not take surrounding context into account, is that it assumes some quantity of hand-tagged data. Whilst there are a few hand-tagged corpora available for some languages, one would expect the frequency distribution of the senses of words, particularly topical words, to depend on the genre and domain of the text under consideration. We present work on the use of a thesaurus acquired from raw textual corpora and the WordNet similarity package to find predominant noun senses automatically. The acquired predominant senses give a precision of 64% on the nouns of the SENSEVAL-2 English all-words task. This is a very promising result given that our method does not require any hand-tagged text, such as SemCor. Furthermore, we demonstrate that our method discovers appropriate predominant senses for words from two domain-specific corpora.", "phrases": ["disambiguation", "predominant sense", "ranking"], "overall_score": 4.665882272150052, "scores": [2.1759358804023017, 0.9135981982714996, 0.546078958663532], "rank_score": 1.2118710124457779} -{"id": "edunov-etal-2019-pre", "title": "Pre-trained language model representations for language generation", "abstract": "Pre-trained language model representations have been successful in a wide range of language understanding tasks. In this paper, we examine different strategies to integrate pre-trained representations into sequence to sequence models and apply it to neural machine translation and abstractive summarization. We find that pre-trained representations are most effective when added to the encoder network which slows inference by only 14%. Our experiments in machine translation show gains of up to 5.3 BLEU in a simulated resource-poor setup. While returns diminish with more labeled data, we still observe improvements when millions of sentence-pairs are available. Finally, on abstractive summarization we achieve a new state of the art on the full text version of CNN/DailyMail.", "phrases": ["language model representation", "neural machine translation", "elmo"], "overall_score": 3.4322523884481284, "scores": [1.9738450064088848, 1.0884043858920371, 0.5720541051557381], "rank_score": 1.21143449915222} -{"id": "bhat-etal-2018-universal", "title": "Universal Dependency Parsing for Hindi-English Code-Switching", "abstract": "Code-switching is a phenomenon of mixing grammatical structures of two or more languages under varied social constraints. The code-switching data differ so radically from the benchmark corpora used in NLP community that the application of standard technologies to these data degrades their performance sharply. Unlike standard corpora, these data often need to go through additional processes such as language identification, normalization and/or back-transliteration for their efficient processing. In this paper, we investigate these indispensable processes and other problems associated with syntactic parsing of code-switching data and propose methods to mitigate their effects. In particular, we study dependency parsing of code-switching data of Hindi and English multilingual speakers from Twitter. We present a treebank of Hindi-English code-switching tweets under Universal Dependencies scheme and propose a neural stacking model for parsing that efficiently leverages the part-of-speech tag and syntactic tree annotations in the code-switching treebank and the preexisting Hindi and English treebanks. We also present normalization and back-transliteration models with a decoding process tailored for code-switching data. Results show that our neural stacking parser is 1.5% LAS points better than the augmented parsing model and 3.8% LAS points better than the one which uses first-best normalization and/or back-transliteration.", "phrases": ["dependency parsing", "code-switching", "language identification"], "overall_score": 3.2804298599096504, "scores": [1.769068983961263, 0.9895770285415442, 0.8754408207927586], "rank_score": 1.2113622777651887} -{"id": "jeong-etal-2009-efficient", "title": "Efficient Inference of CRFs for Large-Scale Natural Language Data", "abstract": "This paper presents an efficient inference algorithm of conditional random fields (CRFs) for large-scale data. Our key idea is to decompose the output label state into an active set and an inactive set in which most unsupported transitions become a constant. Our method unifies two previous methods for efficient inference of CRFs, and also derives a simple but robust special case that performs faster than exact inference when the active sets are sufficiently small. We demonstrate that our method achieves dramatic speedup on six standard natural language processing problems.", "phrases": ["crfs", "efficient inference", "similar idea"], "overall_score": 1.6792478724372504, "scores": [2.2482146132216676, 0.8415202062007012, 0.544229047560477], "rank_score": 1.211321288994282} -{"id": "ren-etal-2020-simulspeech", "title": "SimulSpeech: End-to-End Simultaneous Speech to Text Translation", "abstract": "In this work, we develop SimulSpeech, an end-to-end simultaneous speech to text translation system which translates speech in source language to text in target language concurrently. SimulSpeech consists of a speech encoder, a speech segmenter and a text decoder, where 1) the segmenter builds upon the encoder and leverages a connectionist temporal classification (CTC) loss to split the input streaming speech in real time, 2) the encoder-decoder attention adopts a wait-k strategy for simultaneous translation. SimulSpeech is more challenging than previous cascaded systems (with simultaneous automatic speech recognition (ASR) and simultaneous neural machine translation (NMT)). We introduce two novel knowledge distillation methods to ensure the performance: 1) Attention-level knowledge distillation transfers the knowledge from the multiplication of the attention matrices of simultaneous NMT and ASR models to help the training of the attention mechanism in SimulSpeech; 2) Data-level knowledge distillation transfers the knowledge from the full-sentence NMT model and also reduces the complexity of data distribution to help on the optimization of SimulSpeech. Experiments on MuST-C English-Spanish and English-German spoken language translation datasets show that SimulSpeech achieves reasonable BLEU scores and lower delay compared to full-sentence end-to-end speech to text translation (without simultaneous translation), and better performance than the two-stage cascaded simultaneous translation model in terms of BLEU scores and translation delay.", "phrases": ["end-to-end", "connectionist temporal classification", "simultaneous translation", "simulspeech", "low latency"], "overall_score": 2.786388468844766, "scores": [2.3867178243827443, 1.7310116635824058, 0.8279692274190712, 0.5824711340484949, 0.5223958328579477], "rank_score": 1.2101131364581328} -{"id": "oh-etal-2010-co", "title": "Co-STAR: A Co-training Style Algorithm for Hyponymy Relation Acquisition from Structured and Unstructured Text", "abstract": "This paper proposes a co-training style algorithm called Co-STAR that acquires hyponymy relations simultaneously from structured and unstructured text. In Co-STAR, two independent processes for hyponymy relation acquisition -- one handling structured text and the other handling unstructured text -- collaborate by repeatedly exchanging the knowledge they acquired about hyponymy relations. Unlike conventional co-training, the two processes in Co-STAR are applied to different source texts and training data. We show the effectiveness of this algorithm through experiments on large-scale hyponymy-relation acquisition from Japanese Wikipedia and Web texts. We also show that Co-STAR is robust against noisy training data.", "phrases": ["co-training style algorithm", "hyponymy relation acquisition", "unstructured text"], "overall_score": 1.3293051311446018, "scores": [1.9246885510063942, 0.8529731621748857, 0.8522953103911662], "rank_score": 1.2099856745241488} -{"id": "habernal-etal-2018-adapting", "title": "Adapting Serious Game for Fallacious Argumentation to German: Pitfalls, Insights, and Best Practices", "abstract": "As argumentation about controversies is culture- and language-dependent, porting a serious game that deals with daily argumentation to another language requires substantial adaptation. This article presents a study of deploying Argotario (serious game for learning argumentation fallacies) into the German context. We examine all steps that are necessary to end up with a successful serious game platform, such as topic selection, initial data creation, or effective campaigns. Moreover, we analyze users\u2019 behavior and in-game created data in order to assess the dissemination strategies and qualitative aspects of the resulting corpus. We also report on classification experiments based on neural networks and feature-based models.", "phrases": ["serious game", "fallacy", "propaganda technique"], "overall_score": 3.005453515805252, "scores": [0.8727823239282956, 1.7117853438288397, 1.0438827403034598], "rank_score": 1.2094834693535317} -{"id": "van-de-cruys-2009-non", "title": "A Non-negative Tensor Factorization Model for Selectional Preference Induction", "abstract": "Distributional similarity methods have proven to be a valuable tool for the induction of semantic similarity. Up till now, most algorithms use two-way cooccurrence data to compute the meaning of words. Co-occurrence frequencies, however, need not be pairwise. One can easily imagine situations where it is desirable to investigate co-occurrence frequencies of three modes and beyond. This paper will investigate a tensor factorization method called non-negative tensor factorization to build a model of three-way cooccurrences. The approach is applied to the problem of selectional preference induction, and automatically evaluated in a pseudo-disambiguation task. The results show that non-negative tensor factorization is a promising tool for NLP.", "phrases": ["tensor factorization", "selectional preference induction", "co-occurrence"], "overall_score": 2.6558287487432057, "scores": [1.8997097674936374, 1.1755252171923078, 0.5509242747780595], "rank_score": 1.2087197531546683} -{"id": "stefanescu-etal-2012-hybrid", "title": "Hybrid Parallel Sentence Mining from Comparable Corpora", "abstract": "Mining for parallel sentences in comparable corpora is much more difficult than aligning sentences in parallel corpora. Sentence alignment in parallel corpora usually exploits simple empirical evidence (turned into assumptions) such as (i) the length of a sentence is proportional with the length of its translation and (ii) the discourse flow is necessarily the same in both parts of the bi-text (Gale and Church, 1993). Thus, the extraction tools search for parallel sentences around the same (relative) text positions, making sentence alignment a much easier task when compared to kind of work undertaken here. For comparable corpora, the second assumption does not hold. Parallel sentences, should they exist at all, are scattered all around the source and target documents, and so, any two sentences 1 have to be processed in order to determine if they are parallel or not. Also, we aim at finding pairs of quasi-parallel sentences that are not entirely parallel but contain spans of contiguous text that is parallel. Thus, finding parallel sentences in comparable corpora is confronted", "phrases": ["parallel sentence", "mining", "comparable corpora"], "overall_score": 2.512784707423738, "scores": [2.409124653650103, 0.6336334300151172, 0.5824239525566313], "rank_score": 1.2083940120739507} -{"id": "prabhakaran-etal-2015-new", "title": "A New Dataset and Evaluation for Belief/Factuality", "abstract": "The terms \u201cbelief\u201d and \u201cfactuality\u201d both refer to the intention of the writer to present the propositional content of an utterance as firmly believed by the writer, not firmly believed, or having some other status. This paper presents an ongoing annotation effort and an associated evaluation.", "phrases": ["belief", "factuality", "annotation effort", "discussion"], "overall_score": 2.350765627663484, "scores": [1.8890790935253499, 1.871986616797548, 0.5419729707424377, 0.5291795563588683], "rank_score": 1.208054559356051} -{"id": "van-den-beukel-aroyo-2018-homonym", "title": "Homonym Detection For Humor Recognition In Short Text", "abstract": "In this paper, automatic homophone- and homograph detection are suggested as new useful features for humor recognition systems. The system combines style-features from previous studies on humor recognition in short text with ambiguity-based features. The performance of two potentially useful homograph detection methods is evaluated using crowdsourced annotations as ground truth. Adding homophones and homographs as features to the classifier results in a small but significant improvement over the style-features alone. For the task of humor recognition, recall appears to be a more important quality measure than precision. Although the system was designed for humor recognition in oneliners, it also performs well at the classification of longer humorous texts.", "phrases": ["humor recognition", "short text", "homonym detection"], "overall_score": 1.9438885797408654, "scores": [1.9363255584242987, 0.8720997002861608, 0.8149923917429428], "rank_score": 1.2078058834844674} -{"id": "garg-ramakrishnan-2020-bae", "title": "BAE: BERT-based Adversarial Examples for Text Classification", "abstract": "Modern text classification models are susceptible to adversarial examples, perturbed versions of the original text indiscernible by humans which get misclassified by the model. Recent works in NLP use rule-based synonym replacement strategies to generate adversarial examples. These strategies can lead to out-of-context and unnaturally complex token replacements, which are easily identifiable by humans. We present BAE, a black box attack for generating adversarial examples using contextual perturbations from a BERT masked language model. BAE replaces and inserts tokens in the original text by masking a portion of the text and leveraging the BERT-MLM to generate alternatives for the masked tokens. Through automatic and human evaluations, we show that BAE performs a stronger attack, in addition to generating adversarial examples with improved grammaticality and semantic coherence as compared to prior work.", "phrases": ["attack", "bert", "language model"], "overall_score": 3.6769567614707026, "scores": [1.6846040946291232, 1.084017333444543, 0.8545643128928444], "rank_score": 1.2077285803221702} -{"id": "lauscher-etal-2020-specializing", "title": "Specializing Unsupervised Pretraining Models for Word-Level Semantic Similarity", "abstract": "Unsupervised pretraining models have been shown to facilitate a wide range of downstream NLP applications. These models, however, retain some of the limitations of traditional static word embeddings. In particular, they encode only the distributional knowledge available in raw text corpora, incorporated through language modeling objectives. In this work, we complement such distributional knowledge with external lexical knowledge, that is, we integrate the discrete knowledge on word-level semantic similarity into pretraining. To this end, we generalize the standard BERT model to a multi-task learning setting where we couple BERT's masked language modeling and next sentence prediction objectives with an auxiliary task of binary word relation classification. Our experiments suggest that our \u201cLexically Informed\u201d BERT (LIBERT), specialized for the word-level semantic similarity, yields better performance than the lexically blind \u201cvanilla\u201d BERT on several language understanding tasks. Concretely, LIBERT outperforms BERT in 9 out of 10 tasks of the GLUE benchmark and is on a par with BERT in the remaining one. Moreover, we show consistent gains on 3 benchmarks for lexical simplification, a task where knowledge about word-level semantic similarity is paramount, as well as large gains on lexical reasoning probes.", "phrases": ["word-level semantic similarity", "distributional knowledge", "bert"], "overall_score": 2.1634009288404616, "scores": [2.0289545973248835, 1.042414247355296, 0.5508822989499216], "rank_score": 1.2074170478767003} -{"id": "hulpus-etal-2019-spreading", "title": "A Spreading Activation Framework for Tracking Conceptual Complexity of Texts", "abstract": "We propose an unsupervised approach for assessing conceptual complexity of texts, based on spreading activation. Using DBpedia knowledge graph as a proxy to long-term memory, mentioned concepts become activated and trigger further activation as the text is sequentially traversed. Drawing inspiration from psycholinguistic theories of reading comprehension, we model memory processes such as semantic priming, sentence wrap-up, and forgetting. We show that our models capture various aspects of conceptual text complexity and significantly outperform current state of the art.", "phrases": ["activation framework", "conceptual complexity", "priming"], "overall_score": 1.943183657016573, "scores": [2.2666481163633567, 0.8272584228133423, 0.5281971319323999], "rank_score": 1.2073678903696996} -{"id": "skantze-hjalmarsson-2010-towards", "title": "Towards Incremental Speech Generation in Dialogue Systems", "abstract": "We present a first step towards a model of speech generation for incremental dialogue systems. The model allows a dialogue system to incrementally interpret spoken input, while simultaneously planning, realising and self-monitoring the system response. The model has been implemented in a general dialogue system framework. Using this framework, we have implemented a specific application and tested it in a Wizard-of-Oz setting, comparing it with a non-incremental version of the same system. The results show that the incremental version, while producing longer utterances, has a shorter response time and is perceived as more efficient by the users.", "phrases": ["incremental speech generation", "dialogue system", "pause"], "overall_score": 3.1860432601957402, "scores": [1.542301550352848, 1.5520954753966258, 0.527399921716111], "rank_score": 1.207265649155195} -{"id": "kiela-bottou-2014-learning", "title": "Learning Image Embeddings using Convolutional Neural Networks for Improved Multi-Modal Semantics", "abstract": "We construct multi-modal concept representations by concatenating a skip-gram linguistic representation vector with a visual concept representation vector computed using the feature extraction layers of a deep convolutional neural network (CNN) trained on a large labeled object recognition dataset. This transfer learning approach brings a clear performance gain over features based on the traditional bag-of-visual-word approach. Experimental results are reported on the WordSim353 and MEN semantic relatedness evaluation tasks. We use visual features computed using either ImageNet or ESP Game images.", "phrases": ["convolutional neural networks", "multi-modal semantic", "cnn", "visual feature"], "overall_score": 3.836260301126895, "scores": [1.3731481473477078, 0.8802108060649647, 1.5344235925556717, 1.0406571292738924], "rank_score": 1.2071099188105592} -{"id": "bentivogli-etal-2004-revising", "title": "Revising the Wordnet Domains Hierarchy: semantics, coverage and balancing", "abstract": "The continuous expansion of the multilingual information society has led in recent years to a pressing demand for multilingual linguistic resources suitable to be used for different applications. \n \nIn this paper we present the WordNet Domains Hierarchy (WDH), a language-independent resource composed of 164, hierarchically organized, domain labels (e.g. Architecture, Sport, Medicine). Although WDH has been successfully applied to various Natural Language Processing tasks, the first available version presented some problems, mostly related to the lack of a clear semantics of the domain labels. Other correlated issues were the coverage and the balancing of the domains. We illustrate a new version of WDH addressing these problems by an explicit and systematic reference to the Dewey Decimal Classification. The new version of WDH has a better defined semantics and is applicable to a wider range of tasks.", "phrases": ["wordnet domains hierarchy", "coverage", "balancing"], "overall_score": 2.6518458644850487, "scores": [1.9813367162802928, 0.8453606846143856, 0.7940237923389929], "rank_score": 1.2069070644112239} -{"id": "liu-etal-2018-knowledge", "title": "Knowledge Diffusion for Neural Dialogue Generation", "abstract": "End-to-end neural dialogue generation has shown promising results recently, but it does not employ knowledge to guide the generation and hence tends to generate short, general, and meaningless responses. In this paper, we propose a neural knowledge diffusion (NKD) model to introduce knowledge into dialogue generation. This method can not only match the relevant facts for the input utterance but diffuse them to similar entities. With the help of facts matching and entity diffusion, the neural dialogue generation is augmented with the ability of convergent and divergent thinking over the knowledge base. Our empirical study on a real-world dataset prove that our model is capable of generating meaningful, diverse and natural responses for both factoid-questions and knowledge grounded chi-chats. The experiment results also show that our model outperforms competitive baseline models significantly.", "phrases": ["neural dialogue generation", "knowledge diffusion", "external knowledge graph"], "overall_score": 3.5535676177511846, "scores": [2.1358345794506497, 0.949801482139133, 0.5349867221337099], "rank_score": 1.206874261241164} -{"id": "gao-etal-2014-modeling", "title": "Modeling Interestingness with Deep Neural Networks", "abstract": "This paper presents a deep semantic similarity model (DSSM), a special type of deep neural networks designed for text analysis, for recommending target documents to be of interest to a user based on a source document that she is reading. We observe, identify, and detect naturally occurring signals of interestingness in click transitions on the Web between source and target documents, which we collect from commercial Web browser logs. The DSSM is trained on millions of Web transitions, and maps source-target document pairs to feature vectors in a latent space in such a way that the distance between source documents and their corresponding interesting targets in that space is minimized. The effectiveness of the DSSM is demonstrated using two interestingness tasks: automatic highlighting and contextual entity search. The results on large-scale, real-world datasets show that the semantics of documents are important for modeling interestingness and that the DSSM leads to significant quality improvement on both tasks, outperforming not only the classic document models that do not use semantics but also state-of-the-art topic models.", "phrases": ["interestingness", "source document", "convolutional-pooling structure"], "overall_score": 2.6511941718202046, "scores": [2.5640524775322207, 0.5318592750486854, 0.5239196463118957], "rank_score": 1.2066104662976007} -{"id": "fang-cohn-2016-learning", "title": "Learning when to trust distant supervision: An application to low-resource POS tagging using cross-lingual projection", "abstract": "Cross lingual projection of linguistic annotation suffers from many sources of bias and noise, leading to unreliable annotations that cannot be used directly. In this paper, we introduce a novel approach to sequence tagging that learns to correct the errors from cross-lingual projection using an explicit debiasing layer. This is framed as joint learning over two corpora, one tagged with gold standard and the other with projected tags. We evaluated with only 1,000 tokens tagged with gold standard tags, along with more plentiful parallel data. Our system equals or exceeds the state-of-the-art on eight simulated low-resource settings, as well as two real low-resource languages, Malagasy and Kinyarwanda.", "phrases": ["distant supervision", "low-resource pos tagging", "cross-lingual projection"], "overall_score": 2.778160322347668, "scores": [1.880316239398088, 0.9316143804137971, 0.8076884737025689], "rank_score": 1.2065396978381513} -{"id": "conneau-etal-2017-supervised", "title": "Supervised Learning of Universal Sentence Representations from Natural Language Inference Data", "abstract": "Many modern NLP systems rely on word embeddings, previously trained in an unsupervised manner on large corpora, as base features. Efforts to obtain embeddings for larger chunks of text, such as sentences, have however not been so successful. Several attempts at learning unsupervised representations of sentences have not reached satisfactory enough performance to be widely adopted. In this paper, we show how universal sentence representations trained using the supervised data of the Stanford Natural Language Inference datasets can consistently outperform unsupervised methods like SkipThought vectors on a wide range of transfer tasks. Much like how computer vision uses ImageNet to obtain features, which can then be transferred to other tasks, our work tends to indicate the suitability of natural language inference for transfer learning to other NLP tasks. Our encoder is publicly available.", "phrases": ["universal sentence representations", "supervised learning", "infersent", "textual similarity", "entailment"], "overall_score": 5.689714206165221, "scores": [1.6772823285988323, 0.9495341341235025, 2.3184417659746575, 0.5464663674631868, 0.5374325164741993], "rank_score": 1.2058314225268758} -{"id": "ferreira-vlachos-2016-emergent", "title": "Emergent: a novel data-set for stance classification", "abstract": "We present Emergent, a novel data-set derived from a digital journalism project for rumour debunking. The data-set contains 300 rumoured claims and 2,595 associated news articles, collected and labelled by journalists with an estimation of their veracity (true, false or unverified). Each associated article is summarized into a headline and labelled to indicate whether its stance is for, against, or observing the claim, where observing indicates that the article merely repeats the claim. Thus, Emergent provides a real-world data source for a variety of natural language processing tasks in the context of fact-checking. Further to presenting the dataset, we address the task of determining the article headline stance with respect to the claim. For this purpose we use a logistic regression classifier and develop features that examine the headline and its agreement with the claim. The accuracy achieved was 73% which is 26% higher than the one achieved by the Excitement Open Platform (Magnini et al., 2014).", "phrases": ["novel data-set", "news article", "emergent"], "overall_score": 2.6491045861694626, "scores": [2.249713018078859, 0.8265312411686024, 0.5407341054052881], "rank_score": 1.2056594548842499} -{"id": "malmasi-etal-2022-semeval", "title": "SemEval-2022 Task 11: Multilingual Complex Named Entity Recognition (MultiCoNER)", "abstract": "We present the findings of SemEval-2022 Task 11 on Multilingual Complex Named Entity Recognition MULTICONER. Divided into 13 tracks, the task focused on methods to identify complex named entities (like names of movies, products and groups) in 11 languages in both monolingual and multi-lingual scenarios. Eleven tracks required building monolingual NER models for individual languages, one track focused on multilingual models able to work on all languages, and the last track featured code-mixed texts within any of these languages. The task is based on the MULTICONER dataset comprising of 2.3 millions instances in Bangla, Chinese, Dutch, English, Farsi, German, Hindi, Korean, Russian, Spanish, and Turkish. Results showed that methods fusing external knowledge into transformer models achieved the best results. However, identifying entities like creative works is still challenging even with external knowledge. MULTICONER was one of the most popular tasks in SemEval-2022 and it attracted 377 participants during the practice phase. 236 participants signed up for the final test phase and 55 teams submitted their systems.", "phrases": ["entity recognition", "multiconer", "semeval-2022 task"], "overall_score": 2.9951791851636944, "scores": [2.1448659928669405, 0.8839256048829842, 0.5872547258645598], "rank_score": 1.2053487745381615} -{"id": "hoang-etal-2017-towards", "title": "Towards Decoding as Continuous Optimisation in Neural Machine Translation", "abstract": "We propose a novel decoding approach for neural machine translation (NMT) based on continuous optimisation. We reformulate decoding, a discrete optimization problem, into a continuous problem, such that optimization can make use of efficient gradient-based techniques. Our powerful decoding framework allows for more accurate decoding for standard neural machine translation models, as well as enabling decoding in intractable models such as intersection of several different NMT models. Our empirical results show that our decoding framework is effective, and can leads to substantial improvements in translations, especially in situations where greedy search and beam search are not feasible. Finally, we show how the technique is highly competitive with, and complementary to, reranking.", "phrases": ["decoding", "continuous optimisation", "neural machine translation"], "overall_score": 2.1589663550634213, "scores": [1.8212479757619464, 0.9527925077827092, 0.8407857118378704], "rank_score": 1.2049420651275087} -{"id": "zaidan-callison-burch-2011-arabic", "title": "The Arabic Online Commentary Dataset: an Annotated Dataset of Informal Arabic with High Dialectal Content", "abstract": "The written form of Arabic, Modern Standard Arabic (MSA), differs quite a bit from the spoken dialects of Arabic, which are the true \"native\" languages of Arabic speakers used in daily life. However, due to MSA's prevalence in written form, almost all Arabic datasets have predominantly MSA content. We present the Arabic Online Commentary Dataset, a 52M-word monolingual dataset rich in dialectal content, and we describe our long-term annotation effort to identify the dialect level (and dialect itself) in each sentence of the dataset. So far, we have labeled 108K sentences, 41% of which as having dialectal content. We also present experimental results on the task of automatic dialect identification, using the collected labels for training and evaluation.", "phrases": ["arabic", "dialect", "n-gram"], "overall_score": 3.6678938588793666, "scores": [1.616453280352627, 1.4654372477981503, 0.5323648441519568], "rank_score": 1.2047517907675782} -{"id": "lee-etal-2021-dialogue", "title": "Dialogue State Tracking with a Language Model using Schema-Driven Prompting", "abstract": "Task-oriented conversational systems often use dialogue state tracking to represent the user's intentions, which involves filling in values of pre-defined slots. Many approaches have been proposed, often using task-specific architectures with special-purpose classifiers. Recently, good results have been obtained using more general architectures based on pretrained language models. Here, we introduce a new variation of the language modeling approach that uses schema-driven prompting to provide task-aware history encoding that is used for both categorical and non-categorical slots. We further improve performance by augmenting the prompting with schema descriptions, a naturally occurring source of in-domain knowledge. Our purely generative system achieves state-of-the-art performance on MultiWOZ 2.2 and achieves competitive performance on two other benchmarks: MultiWOZ 2.1 and M2M. The data and code will be available at .", "phrases": ["language model", "prompting", "dialogue state tracking"], "overall_score": 1.6694494926436103, "scores": [1.9950618263167157, 0.833270096805982, 0.7844278329546012], "rank_score": 1.2042532520257663} -{"id": "rudzewitz-2016-exploring", "title": "Exploring the Intersection of Short Answer Assessment, Authorship Attribution, and Plagiarism Detection", "abstract": "In spite of methodological and conceptual parallels, the computational linguistic applications short answer scoring (Burrows et al., 2015), authorship attribution (Stamatatos, 2009), and plagiarism detection (Zesch and Gurevych, 2012) have not been linked in practice. This work explores the practical usefulness of the combination of features from each of these fields for two tasks: short answer assessment, and plagiarism detection. The experiments show that incorporating features from the other domain yields significant improvements. A feature analysis reveals that robust lexical and semantic features are most informative for these tasks.", "phrases": ["short answer assessment", "authorship attribution", "plagiarism detection"], "overall_score": 1.3229531338050093, "scores": [1.8976860995520075, 0.8651826746105483, 0.8497427379721114], "rank_score": 1.2042038373782225} -{"id": "honnibal-johnson-2014-joint", "title": "Joint Incremental Disfluency Detection and Dependency Parsing", "abstract": "We present an incremental dependency parsing model that jointly performs disfluency detection. The model handles speech repairs using a novel non-monotonic transition system, and includes several novel classes of features. For comparison, we evaluated two pipeline systems, using state-of-the-art disfluency detectors. The joint model performed better on both tasks, with a parse accuracy of 90.5% and 84.0% accuracy at disfluency detection. The model runs in expected linear time, and processes over 550 tokens a second.", "phrases": ["disfluency detection", "dependency parsing", "new joint model"], "overall_score": 3.41089225245761, "scores": [1.4677228319160276, 1.6191111372310136, 0.5248519558963056], "rank_score": 1.2038953083477824} -{"id": "schlechtweg-etal-2019-wind", "title": "A Wind of Change: Detecting and Evaluating Lexical Semantic Change across Times and Domains", "abstract": "We perform an interdisciplinary large-scale evaluation for detecting lexical semantic divergences in a diachronic and in a synchronic task: semantic sense changes across time, and semantic sense changes across domains. Our work addresses the superficialness and lack of comparison in assessing models of diachronic lexical change, by bringing together and extending benchmark models on a common state-of-the-art evaluation task. In addition, we demonstrate that the same evaluation task and modelling approaches can successfully be utilised for the synchronic detection of domain-specific sense divergences in the field of term extraction.", "phrases": ["change", "evaluation task", "semantic change detection", "word sense"], "overall_score": 3.543237696917909, "scores": [2.7550411571229207, 0.9523746463145742, 0.584861938401532, 0.5211861770783706], "rank_score": 1.2033659797293494} -{"id": "caglayan-etal-2019-probing", "title": "Probing the Need for Visual Context in Multimodal Machine Translation", "abstract": "Current work on multimodal machine translation (MMT) has suggested that the visual modality is either unnecessary or only marginally beneficial. We posit that this is a consequence of the very simple, short and repetitive sentences used in the only available dataset for the task (Multi30K), rendering the source text sufficient as context. In the general case, however, we believe that it is possible to combine visual and textual information in order to ground translations. In this paper we probe the contribution of the visual modality to state-of-the-art MMT models by conducting a systematic analysis where we partially deprive the models from source-side textual context. Our results show that under limited textual context, models are capable of leveraging the visual input to generate better translations. This contradicts the current belief that MMT models disregard the visual modality because of either the quality of the image features or the way they are integrated into the model.", "phrases": ["multimodal machine translation", "visual modality", "textual context"], "overall_score": 2.643756602308609, "scores": [1.94946931575603, 1.1204774716614814, 0.539729660194964], "rank_score": 1.2032254825374917} -{"id": "gillick-favre-2009-scalable", "title": "A Scalable Global Model for Summarization", "abstract": "We present an Integer Linear Program for exact inference under a maximum coverage model for automatic summarization. We compare our model, which operates at the sub-sentence or \"concept-level, to a sentence-level model, previously solved with an ILP. Our model scales more efficiently to larger problems because it does not require a quadratic number of variables to address redundancy in pairs of selected sentences. We also show how to include sentence compression in the ILP formulation, which has the desirable property of performing compression and sentence selection simultaneously. The resulting system performs at least as well as the best systems participating in the recent Text Analysis Conference, as judged by a variety of automatic and manual content-based metrics.", "phrases": ["summarization", "maximum coverage model", "thread", "approximation", "relevant concept"], "overall_score": 4.13181907553201, "scores": [3.886726521779108, 0.5387546311161696, 0.5366619956132028, 0.5295647341597836, 0.5243586159713106], "rank_score": 1.2032132997279148} -{"id": "tan-etal-2014-effect", "title": "The effect of wording on message propagation: Topic- and author-controlled natural experiments on Twitter", "abstract": "Consider a person trying to spread an important message on a social network. He/she can spend hours trying to craft the message. Does it actually matter? While there has been extensive prior work looking into predicting popularity of social-media content, the effect of wording per se has rarely been studied since it is often confounded with the popularity of the author and the topic. To control for these confounding factors, we take advantage of the surprising fact that there are many pairs of tweets containing the same url and written by the same user but employing different wording. Given such pairs, we ask: which version attracts more retweets? This turns out to be a more difficult task than predicting popular topics. Still, humans can answer this question better than chance (but far from perfectly), and the computational methods we develop can do better than both an average human and a strong competing method trained on non-controlled data.", "phrases": ["wording", "message propagation", "twitter", "popularity", "retweet"], "overall_score": 3.3347995452807653, "scores": [1.3762641926062633, 1.307783784153136, 1.2656412858495234, 1.233033440237402, 0.8311507550728386], "rank_score": 1.2027746915838327} -{"id": "de-cao-etal-2021-editing", "title": "Editing Factual Knowledge in Language Models", "abstract": "The factual knowledge acquired during pre-training and stored in the parameters of Language Models (LMs) can be useful in downstream tasks (e.g., question answering or textual inference). However, some facts can be incorrectly induced or become obsolete over time. We present KnowledgeEditor, a method which can be used to edit this knowledge and, thus, fix `bugs' or unexpected predictions without the need for expensive re-training or fine-tuning. Besides being computationally efficient, KnowledgeEditordoes not require any modifications in LM pre-training (e.g., the use of meta-learning). In our approach, we train a hyper-network with constrained optimization to modify a fact without affecting the rest of the knowledge; the trained hyper-network is then used to predict the weight update at test time. We show KnowledgeEditor's efficacy with two popular architectures and knowledge-intensive tasks: i) a BERT model fine-tuned for fact-checking, and ii) a sequence-to-sequence BART model for question answering. With our method, changing a prediction on the specific wording of a query tends to result in a consistent change in predictions also for its paraphrases. We show that this can be further encouraged by exploiting (e.g., automatically-generated) paraphrases during training. Interestingly, our hyper-network can be regarded as a `probe' revealing which components need to be changed to manipulate factual knowledge; our analysis shows that the updates tend to be concentrated on a small subset of components. Source code available at ", "phrases": ["factual knowledge", "language models", "update"], "overall_score": 2.50098721327884, "scores": [1.8542361729626566, 0.8712771434453025, 0.8826485335161323], "rank_score": 1.2027206166413638} -{"id": "weiss-2014-muck", "title": "MUCK: A toolkit for extracting and visualizing semantic dimensions of large text collections", "abstract": "Users with large text collections are often faced with one of two problems; either they wish to retrieve a semanticallyrelevant subset of data from the collection for further scrutiny (needle-in-a-haystack) or they wish to glean a high-level understanding of how a subset compares to the parent corpus in the context of aforementioned semantic dimensions (forestfor-the-trees). In this paper, I describe MUCK 1 , an open-source toolkit that addresses both of these problems through a distributed text processing engine with an interactive visualization interface.", "phrases": ["semantic dimension", "text collection", "muck"], "overall_score": 1.3206777288959979, "scores": [1.9290453566894255, 0.8610595410509922, 0.8162931259803252], "rank_score": 1.202132674573581} -{"id": "ajjour-etal-2019-modeling", "title": "Modeling Frames in Argumentation", "abstract": "In argumentation, framing is used to emphasize a specific aspect of a controversial topic while concealing others. When talking about legalizing drugs, for instance, its economical aspect may be emphasized. In general, we call a set of arguments that focus on the same aspect a frame. An argumentative text has to serve the \u201cright\u201d frame(s) to convince the audience to adopt the author's stance (e.g., being pro or con legalizing drugs). More specifically, an author has to choose frames that fit the audience's cultural background and interests. This paper introduces frame identification, which is the task of splitting a set of arguments into non-overlapping frames. We present a fully unsupervised approach to this task, which first removes topical information and then identifies frames using clustering. For evaluation purposes, we provide a corpus with 12, 326 debate-portal arguments, organized along the frames of the debates' topics. On this corpus, our approach outperforms different strong baselines, achieving an F1-score of 0.28.", "phrases": ["frame", "argumentation", "same aspect", "clustering"], "overall_score": 2.9868590699462114, "scores": [2.0725425608033414, 1.366157515082717, 0.8489258202883297, 0.5203761592767246], "rank_score": 1.2020005138627783} -{"id": "cao-etal-2018-retrieve", "title": "Retrieve, Rerank and Rewrite: Soft Template Based Neural Summarization", "abstract": "Most previous seq2seq summarization systems purely depend on the source text to generate summaries, which tends to work unstably. Inspired by the traditional template-based summarization approaches, this paper proposes to use existing summaries as soft templates to guide the seq2seq model. To this end, we use a popular IR platform to Retrieve proper summaries as candidate templates. Then, we extend the seq2seq framework to jointly conduct template Reranking and template-aware summary generation (Rewriting). Experiments show that, in terms of informativeness, our model significantly outperforms the state-of-the-art methods, and even soft templates themselves demonstrate high competitiveness. In addition, the import of high-quality external summaries improves the stability and readability of generated summaries.", "phrases": ["template", "summarization", "language generation task"], "overall_score": 4.005258892393295, "scores": [1.7305446514966565, 1.3374193548075803, 0.5379901422147284], "rank_score": 1.2019847161729886} -{"id": "mcdonald-pereira-2006-online", "title": "Online Learning of Approximate Dependency Parsing Algorithms", "abstract": "In this paper we extend the maximum spanning tree (MST) dependency parsing framework of McDonald et al. (2005c) to incorporate higher-order feature representations and allow dependency structures with multiple parents per word. We show that those extensions can make the MST framework computationally intractable, but that the intractability can be circumvented with new approximate parsing algorithms. We conclude with experiments showing that discriminative online learning using those approximate algorithms achieves the best reported parsing accuracy for Czech and Danish.", "phrases": ["dependency parsing", "maximum spanning tree", "approximate algorithm", "online learning", "head"], "overall_score": 5.053061528177555, "scores": [0.7818109953774081, 2.021985422921956, 1.2154081299898956, 1.0439754506955283, 0.9456558251538529], "rank_score": 1.2017671648277282} -{"id": "omelianchuk-etal-2020-gector", "title": "GECToR \u2013 Grammatical Error Correction: Tag, Not Rewrite", "abstract": "In this paper, we present a simple and efficient GEC sequence tagger using a Transformer encoder. Our system is pre-trained on synthetic data and then fine-tuned in two stages: first on errorful corpora, and second on a combination of errorful and error-free parallel corpora. We design custom token-level transformations to map input tokens to target corrections. Our best single-model/ensemble GEC tagger achieves an F_0.5 of 65.3/66.5 on CONLL-2014 (test) and F_0.5 of 72.4/73.6 on BEA-2019 (test). Its inference speed is up to 10 times as fast as a Transformer-based seq2seq GEC system.", "phrases": ["grammatical error correction", "tagging", "token-level transformation"], "overall_score": 3.8675873894178423, "scores": [2.2166093742075224, 0.840722578710549, 0.5472687833432014], "rank_score": 1.2015335787537575} -{"id": "ziser-reichart-2017-neural", "title": "Neural Structural Correspondence Learning for Domain Adaptation", "abstract": "We introduce a neural network model that marries together ideas from two prominent strands of research on domain adaptation through representation learning: structural correspondence learning (SCL, (Blitzer et al., 2006)) and autoencoder neural networks (NNs). Our model is a three-layer NN that learns to encode the non-pivot features of an input example into a low dimensional representation, so that the existence of pivot features (features that are prominent in both domains and convey useful information for the NLP task) in the example can be decoded from that representation. The low-dimensional representation is then employed in a learning algorithm for the task. Moreover, we show how to inject pre-trained word embeddings into our model in order to improve generalization across examples with similar pivot features. We experiment with the task of cross-domain sentiment classification on 16 domain pairs and show substantial improvements over strong baselines.", "phrases": ["domain adaptation", "neural network model", "scl", "pivot"], "overall_score": 2.880452882286202, "scores": [2.488751808072758, 0.8633406563935201, 0.8473799119021662, 0.6054962391702674], "rank_score": 1.2012421538846778} -{"id": "macherey-och-2007-empirical", "title": "An Empirical Study on Computing Consensus Translations from Multiple Machine Translation Systems", "abstract": "This paper presents an empirical study on how different selections of input translation systems affect translation quality in system combination. We give empirical evidence that the systems to be combined should be of similar quality and need to be almost uncorrelated in order to be beneficial for system combination. Experimental results are presented for composite translations computed from large numbers of different research systems as well as a set of translation systems derived from one of the bestranked machine translation engines in the 2006 NIST machine translation evaluation.", "phrases": ["empirical study", "system combination", "similar quality", "translation output"], "overall_score": 3.2520506115837082, "scores": [1.6557873214699932, 1.2737954296594862, 0.98127851007873, 0.8926695009026013], "rank_score": 1.2008826905277026} -{"id": "gui-etal-2016-event", "title": "Event-Driven Emotion Cause Extraction with Corpus Construction", "abstract": "In this paper, we present our work in emotion cause extraction. Since there is no open dataset available, the lack of annotated resources has limited the research in this area. Thus, we \ufb01rst present a dataset we built using SINA city news. The annotation is based on the scheme of the W3C Emotion Markup Language. Second, we propose a 7-tuple de\ufb01nition to describe emotion cause events. Based on this general de\ufb01nition, we propose a new event-driven emotion cause extraction method using multi-kernel SVMs where a syntactical tree based approach is used to represent events in text. A convolution kernel based multi-kernel SVM are used to extract emotion causes. Because traditional convolution kernels do not use lexical information at the terminal nodes of syntactic trees, we modify the kernel function with a synonym based improvement. Even with very limited training data, we can still extract suf\ufb01cient features for the task. Evaluations show that our approach achieves 11.6% higher F-measure compared to referenced methods. The contributions of our work include resource construction, concept de\ufb01nition and algorithm development.", "phrases": ["emotion cause extraction", "sina city news", "syntactical tree", "ece", "public corpus"], "overall_score": 4.16164202149863, "scores": [1.9877417920404643, 1.6866097539314637, 1.2379760403423503, 0.5616431908521193, 0.5300095292047963], "rank_score": 1.2007960612742388} -{"id": "cotterell-etal-2016-joint", "title": "A Joint Model of Orthography and Morphological Segmentation", "abstract": "We present a model of morphological segmentation that jointly learns to segment and restore orthographic changes, e.g., funniest7! fun-y-est. We term this form of analysis canonical segmentation and contrast it with the traditional surface segmentation, which segments a surface form into a sequence of substrings, e.g., funniest7! funn-i-est. We derive an importance sampling algorithm for approximate inference in the model and report experimental results on English, German and Indonesian.", "phrases": ["morphological segmentation", "change", "surface form"], "overall_score": 2.336309470766855, "scores": [2.481404385556962, 0.5692665961749644, 0.5512057111375827], "rank_score": 1.2006255642898362} -{"id": "turney-etal-2011-literal", "title": "Literal and Metaphorical Sense Identification through Concrete and Abstract Context", "abstract": "Metaphor is ubiquitous in text, even in highly technical text. Correct inference about textual entailment requires computers to distinguish the literal and metaphorical senses of a word. Past work has treated this problem as a classical word sense disambiguation task. In this paper, we take a new approach, based on research in cognitive linguistics that views metaphor as a method for transferring knowledge from a familiar, well-understood, or concrete domain to an unfamiliar, less understood, or more abstract domain. This view leads to the hypothesis that metaphorical word usage is correlated with the degree of abstractness of the word's context. We introduce an algorithm that uses this hypothesis to classify a word sense in a given context as either literal (denotative) or metaphorical (connotative). We evaluate this algorithm with a set of adjective-noun phrases (e.g., in dark comedy, the adjective dark is used metaphorically; in dark hair, it is used literally) and with the TroFi (Trope Finder) Example Base of literal and nonliteral usage for fifty verbs. We achieve state-of-the-art performance on both datasets.", "phrases": ["metaphor", "concreteness level", "physical experience"], "overall_score": 4.766670428277323, "scores": [2.2485628829394444, 0.8287482059925124, 0.5244420305352973], "rank_score": 1.2005843731557515} -{"id": "cheng-etal-2019-robust", "title": "Robust Neural Machine Translation with Doubly Adversarial Inputs", "abstract": "Neural machine translation (NMT) often suffers from the vulnerability to noisy perturbations in the input. We propose an approach to improving the robustness of NMT models, which consists of two parts: (1) attack the translation model with adversarial source examples; (2) defend the translation model with adversarial target inputs to improve its robustness against the adversarial source inputs. For the generation of adversarial inputs, we propose a gradient-based method to craft adversarial examples informed by the translation loss over the clean inputs. Experimental results on Chinese-English and English-German translation tasks demonstrate that our approach achieves significant improvements (2.8 and 1.6 BLEU points) over Transformer on standard clean benchmarks as well as exhibiting higher robustness on noisy data.", "phrases": ["neural machine translation", "translation model", "gradient-based method", "adversarial example", "noise"], "overall_score": 4.1215572126779545, "scores": [2.0393948484878948, 1.4424909110639228, 1.0623568145703766, 0.910645859000974, 0.5462364506486321], "rank_score": 1.20022497675436} -{"id": "may-knight-2007-syntactic", "title": "Syntactic Re-Alignment Models for Machine Translation", "abstract": "We present a method for improving word alignment for statistical syntax-based machine translation that employs a syntactically informed alignment model closer to the translation model than commonly-used word alignment models. This leads to extraction of more useful linguistic patterns and improved BLEU scores on translation experiments in Chinese and Arabic.", "phrases": ["machine translation", "link", "ssmt"], "overall_score": 2.981667193095288, "scores": [2.5046451125754388, 0.5605003824051477, 0.5345879517663996], "rank_score": 1.199911148915662} -{"id": "lowe-etal-2017-towards", "title": "Towards an Automatic Turing Test: Learning to Evaluate Dialogue Responses", "abstract": "Automatically evaluating the quality of dialogue responses for unstructured domains is a challenging problem. Unfortunately, existing automatic evaluation metrics are biased and correlate very poorly with human judgements of response quality (Liu et al., 2016). Yet having an accurate automatic evaluation procedure is crucial for dialogue research, as it allows rapid prototyping and testing of new models with fewer expensive human evaluations. In response to this challenge, we formulate automatic dialogue evaluation as a learning problem. We present an evaluation model (ADEM)that learns to predict human-like scores to input responses, using a new dataset of human response scores. We show that the ADEM model's predictions correlate significantly, and at a level much higher than word-overlap metrics such as BLEU, with human judgements at both the utterance and system-level. We also show that ADEM can generalize to evaluating dialogue mod-els unseen during training, an important step for automatic dialogue evaluation.", "phrases": ["dialogue response", "human response score", "dialog evaluation metric", "annotator"], "overall_score": 4.512993077414957, "scores": [2.8337210040665495, 0.8643124151384652, 0.5784012139403578, 0.5230899213547165], "rank_score": 1.1998811386250223} -{"id": "yoshikawa-etal-2009-jointly", "title": "Jointly Identifying Temporal Relations with Markov Logic", "abstract": "Recent work on temporal relation identification has focused on three types of relations between events: temporal relations between an event and a time expression, between a pair of events and between an event and the document creation time. These types of relations have mostly been identified in isolation by event pairwise comparison. However, this approach neglects logical constraints between temporal relations of different types that we believe to be helpful. We therefore propose a Markov Logic model that jointly identifies relations of all three relation types simultaneously. By evaluating our model on the TempEval data we show that this approach leads to about 2% higher accuracy for all three types of relations ---and to the best results for the task when compared to those of other machine learning based systems.", "phrases": ["markov logic", "global information", "transitivity constraint"], "overall_score": 3.5329648835260814, "scores": [2.1881424898637714, 0.8423821645348004, 0.5691066253023758], "rank_score": 1.199877093233649} -{"id": "ghosal-etal-2021-cider", "title": "CIDER: Commonsense Inference for Dialogue Explanation and Reasoning", "abstract": "Commonsense inference to understand and explain human language is a fundamental research problem in natural language processing. Explaining human conversations poses a great challenge as it requires contextual understanding, planning, inference, and several aspects of reasoning including causal, temporal, and commonsense reasoning. In this work, we introduce CIDER \u2013 a manually curated dataset that contains dyadic dialogue explanations in the form of implicit and explicit knowledge triplets inferred using contextual commonsense inference. Extracting such rich explanations from conversations can be conducive to improving several downstream applications. The annotated triplets are categorized by the type of commonsense knowledge present (e.g., causal, conditional, temporal). We set up three different tasks conditioned on the annotated dataset: Dialogue-level Natural Language Inference, Span Extraction, and Multi-choice Span Selection. Baseline results obtained with transformer-based models reveal that the tasks are difficult, paving the way for promising future research. The dataset and the baseline implementations are publicly available at .", "phrases": ["commonsense inference", "dialogue explanation", "cider"], "overall_score": 1.3167495439905101, "scores": [1.7208836324797652, 0.9705154997980181, 0.904272127471705], "rank_score": 1.1985570865831627} -{"id": "mohammad-turney-2010-emotions", "title": "Emotions Evoked by Common Words and Phrases: Using Mechanical Turk to Create an Emotion Lexicon", "abstract": "Even though considerable attention has been given to semantic orientation of words and the creation of large polarity lexicons, research in emotion analysis has had to rely on limited and small emotion lexicons. In this paper, we show how we create a high-quality, moderate-sized emotion lexicon using Mechanical Turk. In addition to questions about emotions evoked by terms, we show how the inclusion of a word choice question can discourage malicious data entry, help identify instances where the annotator may not be familiar with the target term (allowing us to reject such annotations), and help obtain annotations at sense level (rather than at word level). We perform an extensive analysis of the annotations to better understand the distribution of emotions evoked by terms of different parts of speech. We identify which emotions tend to be evoked simultaneously by the same term and show that certain emotions indeed go hand in hand.", "phrases": ["mechanical turk", "annotator", "different part", "emotion"], "overall_score": 2.491920247851481, "scores": [2.951368182458865, 0.795330909711658, 0.5262318699315486, 0.5205103497194332], "rank_score": 1.1983603279553763} -{"id": "agic-etal-2016-multilingual", "title": "Multilingual Projection for Parsing Truly Low-Resource Languages", "abstract": "We propose a novel approach to cross-lingual part-of-speech tagging and dependency parsing for truly low-resource languages. Our annotation projection-based approach yields tagging and parsing models for over 100 languages. All that is needed are freely available parallel texts, and taggers and parsers for resource-rich languages. The empirical evaluation across 30 test languages shows that our method consistently provides top-level accuracies, close to established upper bounds, and outperforms several competitive baselines.", "phrases": ["low-resource language", "annotation projection", "parallel corpora"], "overall_score": 3.463686716277437, "scores": [1.453378165638863, 1.2661946099456618, 0.8754873132933592], "rank_score": 1.1983533629592946} -{"id": "nayeem-etal-2018-abstractive", "title": "Abstractive Unsupervised Multi-Document Summarization using Paraphrastic Sentence Fusion", "abstract": "In this work, we aim at developing an unsupervised abstractive summarization system in the multi-document setting. We design a paraphrastic sentence fusion model which jointly performs sentence fusion and paraphrasing using skip-gram word embedding model at the sentence level. Our model improves the information coverage and at the same time abstractiveness of the generated sentences. We conduct our experiments on the human-generated multi-sentence compression datasets and evaluate our system on several newly proposed Machine Translation (MT) evaluation metrics. Furthermore, we apply our sentence level model to implement an abstractive multi-document summarization system where documents usually contain a related set of sentences. We also propose an optimal solution for the classical summary length limit problem which was not addressed in the past research. For the document level summary, we conduct experiments on the datasets of two different domains (e.g., news article and user reviews) which are well suited for multi-document abstractive summarization. Our experiments demonstrate that the methods bring significant improvements over the state-of-the-art methods.", "phrases": ["summarization", "sentence fusion", "paraphrasing"], "overall_score": 2.759249967189778, "scores": [1.8955943299193572, 1.1303210512485744, 0.5690657236588161], "rank_score": 1.1983270349422492} -{"id": "girju-2003-automatic", "title": "Automatic Detection of Causal Relations for Question Answering", "abstract": "Causation relations are a pervasive feature of human language. Despite this, the automatic acquisition of causal information in text has proved to be a difficult task in NLP. This paper provides a method for the automatic detection and extraction of causal relations. We also present an inductive learning approach to the automatic discovery of lexical and semantic constraints necessary in the disambiguation of causal relations that are then used in question answering. We devised a classification of causal questions and tested the procedure on a QA system.", "phrases": ["causal relation", "question answering", "automatic detection", "lexico-syntactic pattern", "reasoning"], "overall_score": 3.8569452477850943, "scores": [1.7497582256132613, 0.9232082870238664, 2.149559317040398, 0.5875438054605058, 0.5810674226933629], "rank_score": 1.1982274115662788} -{"id": "min-etal-2019-compositional", "title": "Compositional Questions Do Not Necessitate Multi-hop Reasoning", "abstract": "Multi-hop reading comprehension (RC) questions are challenging because they require reading and reasoning over multiple paragraphs. We argue that it can be difficult to construct large multi-hop RC datasets. For example, even highly compositional questions can be answered with a single hop if they target specific entity types, or the facts needed to answer them are redundant. Our analysis is centered on HotpotQA, where we show that single-hop reasoning can solve much more of the dataset than previously thought. We introduce a single-hop BERT-based RC model that achieves 67 F1\u2014comparable to state-of-the-art multi-hop models. We also design an evaluation setting where humans are not shown all of the necessary paragraphs for the intended multi-hop reasoning but can still answer over 80% of questions. Together with detailed error analysis, these results suggest there should be an increasing focus on the role of evidence in multi-hop reasoning and possibly even a shift towards information retrieval style evaluations with large and diverse evidence collections.", "phrases": ["multi-hop", "reasoning", "correct answer", "passage", "annotation artifact"], "overall_score": 4.152650812008431, "scores": [2.1258982068419052, 1.566627602347648, 0.8833894013355602, 0.8802245062101767, 0.5348690162927995], "rank_score": 1.198201746605618} -{"id": "duong-etal-2017-multilingual-semantic", "title": "Multilingual Semantic Parsing And Code-Switching", "abstract": "Extending semantic parsing systems to new domains and languages is a highly expensive, time-consuming process, so making effective use of existing resources is critical. In this paper, we describe a transfer learning method using crosslingual word embeddings in a sequence-to-sequence model. On the NLmaps corpus, our approach achieves state-of-the-art accuracy of 85.7% for English. Most importantly, we observed a consistent improvement for German compared with several baseline domain adaptation techniques. As a by-product of this approach, our models that are trained on a combination of English and German utterances perform reasonably well on code-switching utterances which contain a mixture of English and German, even though the training data does not contain any such. As far as we know, this is the first study of code-switching in semantic parsing. We manually constructed the set of code-switching test utterances for the NLmaps corpus and achieve 78.3% accuracy on this dataset.", "phrases": ["semantic parsing", "code-switching", "word embedding", "sequence-to-sequence model", "multiple language"], "overall_score": 2.7586051144884314, "scores": [3.2196173501637837, 0.8336301552136808, 0.8399756865107616, 0.5583906012637191, 0.5386211017101246], "rank_score": 1.198046978972414} -{"id": "bostrom-durrett-2020-byte", "title": "Byte Pair Encoding is Suboptimal for Language Model Pretraining", "abstract": "The success of pretrained transformer language models (LMs) in natural language processing has led to a wide range of pretraining setups. In particular, these models employ a variety of subword tokenization methods, most notably byte-pair encoding (BPE) (Sennrich et al., 2016; Gage, 1994), the WordPiece method (Schuster and Nakajima, 2012), and unigram language modeling (Kudo, 2018), to segment text. However, to the best of our knowledge, the literature does not contain a direct evaluation of the impact of tokenization on language model pretraining. We analyze differences between BPE and unigram LM tokenization, finding that the latter method recovers subword units that align more closely with morphology and avoids problems stemming from BPE's greedy construction procedure. We then compare the fine-tuned task performance of identical transformer masked language models pretrained with these tokenizations. Across downstream tasks and two languages (English and Japanese), we find that the unigram LM tokenization method matches or outperforms BPE. We hope that developers of future pretrained LMs will consider adopting the unigram LM method over the more prevalent BPE.", "phrases": ["language model pretraining", "unigram", "byte pair encoding"], "overall_score": 3.1606937542456897, "scores": [1.891466188760406, 0.8710426406888941, 0.8304715716697003], "rank_score": 1.1976601337063335} -{"id": "calzolari-etal-2012-lre", "title": "The LRE Map. Harmonising Community Descriptions of Resources", "abstract": "Accurate and reliable documentation of Language Resources is an undisputable need: documentation is the gateway to discovery of Language Resources, a necessary step towards promoting the data economy. Language resources that are not documented virtually do not exist: for this reason every initiative able to collect and harmonise metadata about resources represents a valuable opportunity for the NLP community. In this paper we describe the LRE Map, reporting statistics on resources associated with LREC2012 papers and providing comparisons with LREC2010 data. The LRE Map, jointly launched by FLaReNet and ELRA in conjunction with the LREC 2010 Conference, is an instrument for enhancing availability of information about resources, either new or already existing ones. It wants to reinforce and facilitate the use of standards in the community. The LRE Map web interface provides the possibility of searching according to a fixed set of metadata and to view the details of extracted resources. The LRE Map is continuing to collect bottom-up input about resources from authors of other conferences through standard submission process. This will help broadening the notion of \u0093language resources\u0094 and attract to the field neighboring disciplines that so far have been only marginally involved by the standard notion of language resources.", "phrases": ["lre map", "language resource", "conference", "availability"], "overall_score": 2.3303775595775584, "scores": [2.5672733878816123, 0.8623698798845031, 0.8278184869452934, 0.5328469051388794], "rank_score": 1.1975771649625722} -{"id": "qiu-etal-2013-mining", "title": "Mining User Relations from Online Discussions using Sentiment Analysis and Probabilistic Matrix Factorization", "abstract": "Advances in sentiment analysis have enabled extraction of user relations implied in online textual exchanges such as forum posts. However, recent studies in this direction only consider direct relation extraction from text. As user interactions can be sparse in online discussions, we propose to apply collaborative filtering through probabilistic matrix factorization to generalize and improve the opinion matrices extracted from forum posts. Experiments with two tasks show that the learned latent factor representation can give good performance on a relation polarity prediction task and improve the performance of a subgroup detection task.", "phrases": ["online discussion", "sentiment analysis", "probabilistic matrix factorization"], "overall_score": 1.3154908593480477, "scores": [1.8192405474543292, 0.9251053176465979, 0.8478882822419942], "rank_score": 1.1974113824476404} -{"id": "marcus-etal-2016-cocogen", "title": "CoCoGen - Complexity Contour Generator: Automatic Assessment of Linguistic Complexity Using a Sliding-Window Technique", "abstract": "We present a novel approach to the automatic assessment of text complexity based on a sliding-window technique that tracks the distribution of complexity within a text. Such distribution is captured by what we term \u201ccomplexity contours\u201d derived from a series of measurements for a given linguistic complexity measure. This approach is implemented in an automatic computational tool, CoCoGen \u2013 Complexity Contour Generator, which in its current version supports 32 indices of linguistic complexity. The goal of the paper is twofold: (1) to introduce the design of our computational tool based on a sliding-window technique and (2) to showcase this approach in the area of second language (L2) learning, i.e. more specifically, in the area of L2 writing.", "phrases": ["complexity contour generator", "automatic assessment", "sliding-window technique"], "overall_score": 1.3153557467287518, "scores": [1.9595373649846566, 0.8366530965336709, 0.7956747314063074], "rank_score": 1.1972883976415452} -{"id": "yang-etal-2019-enhancing-topic", "title": "Enhancing Topic-to-Essay Generation with External Commonsense Knowledge", "abstract": "Automatic topic-to-essay generation is a challenging task since it requires generating novel, diverse, and topic-consistent paragraph-level text with a set of topics as input. Previous work tends to perform essay generation based solely on the given topics while ignoring massive commonsense knowledge. However, this commonsense knowledge provides additional background information, which can help to generate essays that are more novel and diverse. Towards filling this gap, we propose to integrate commonsense from the external knowledge base into the generator through dynamic memory mechanism. Besides, the adversarial training based on a multi-label discriminator is employed to further improve topic-consistency. We also develop a series of automatic evaluation metrics to comprehensively assess the quality of the generated essay. Experiments show that with external commonsense knowledge and adversarial training, the generated essays are more novel, diverse, and topic-consistent than existing methods in terms of both automatic and human evaluation.", "phrases": ["external commonsense knowledge", "topic-consistency", "essay generation"], "overall_score": 2.9750702179523643, "scores": [2.083831981940231, 0.979037898961763, 0.5288991115543404], "rank_score": 1.1972563308187782} -{"id": "santus-etal-2015-evalution", "title": "EVALution 1.0: an Evolving Semantic Dataset for Training and Evaluation of Distributional Semantic Models", "abstract": "In this paper, we introduce EVALution 1.0, a dataset designed for the training and the evaluation of Distributional Semantic Models (DSMs). This version consists of almost 7.5K tuples, instantiating several semantic relations between word pairs (including hypernymy, synonymy, antonymy, meronymy). The dataset is enriched with a large amount of additional information (i.e. relation domain, word frequency, word POS, word semantic field, etc.) that can be used for either filtering the pairs or performing an in-depth analysis of the results. The tuples were extracted from a combination of ConceptNet 5.0 and WordNet 4.0, and subsequently filtered through automatic methods and crowdsourcing in order to ensure their quality. The dataset is freely downloadable1. An extension in RDF format, including also scripts for data processing, is under development.", "phrases": ["distributional semantic models", "hypernymy", "evalution"], "overall_score": 2.6302268579953347, "scores": [2.141044209396398, 0.9209396091944275, 0.5292196730213898], "rank_score": 1.197067830537405} -{"id": "zhang-etal-2019-curriculum", "title": "Curriculum Learning for Domain Adaptation in Neural Machine Translation", "abstract": "We introduce a curriculum learning approach to adapt generic neural machine translation models to a specific domain. Samples are grouped by their similarities to the domain of interest and each group is fed to the training algorithm with a particular schedule. This approach is simple to implement on top of any neural framework or architecture, and consistently outperforms both unadapted and adapted baselines in experiments with two distinct domains and two language pairs.", "phrases": ["domain adaptation", "neural machine translation", "curriculum"], "overall_score": 3.159005005687449, "scores": [1.411999998368869, 0.9003691810933114, 1.2786915037265312], "rank_score": 1.1970202277295705} -{"id": "castilho-2020-page", "title": "On the Same Page? Comparing Inter-Annotator Agreement in Sentence and Document Level Human Machine Translation Evaluation", "abstract": "Document-level evaluation of machine translation has raised interest in the community especially since responses to the claims of \u201chuman parity\u201d (Toral et al., 2018; L\u00e4ubli et al., 2018) with document-level human evaluations have been published. Yet, little is known about best practices regarding human evaluation of machine translation at the document-level. This paper presents a comparison of the differences in inter-annotator agreement between quality assessments using sentence and document-level set-ups. We report results of the agreement between professional translators for fluency and adequacy scales, error annotation, and pair-wise ranking, along with the effort needed to perform the different tasks. To best of our knowledge, this is the first study of its kind.", "phrases": ["inter-annotator agreement", "machine translation", "error annotation"], "overall_score": 2.144751134029799, "scores": [2.4493417937445523, 0.583425051268846, 0.5582583526162097], "rank_score": 1.1970083992098692} -{"id": "zhang-etal-2008-improved", "title": "Improved Statistical Machine Translation by Multiple Chinese Word Segmentation", "abstract": "Chinese word segmentation (CWS) is a necessary step in Chinese-English statistical machine translation (SMT) and its performance has an impact on the results of SMT. However, there are many settings involved in creating a CWS system such as various specifications and CWS methods. This paper investigates the effect of these settings to SMT. We tested dictionary-based and CRF-based approaches and found there was no significant difference between the two in the qualty of the resulting translations. We also found the correlation between the CWS F-score and SMT BLEU score was very weak. This paper also proposes two methods of combining advantages of different specifications: a simple concatenation of training data and a feature interpolation approach in which the same types of features of translation models from various CWS schemes are linearly interpolated. We found these approaches were very effective in improving quality of translations.", "phrases": ["chinese", "word segmentation", "different specification", "translation performance", "tokenization"], "overall_score": 2.8679265201133055, "scores": [3.3298465712414145, 0.9321746370395945, 0.6063001010679836, 0.5611717890813972, 0.5505981771289525], "rank_score": 1.1960182551118685} -{"id": "tetreault-etal-2010-using", "title": "Using Parse Features for Preposition Selection and Error Detection", "abstract": "We evaluate the effect of adding parse features to a leading model of preposition usage. Results show a significant improvement in the preposition selection task on native speaker text and a modest increment in precision and recall in an ESL error detection task. Analysis of the parser output indicates that it is robust enough in the face of noisy non-native writing to extract useful information.", "phrases": ["parse feature", "preposition", "error detection"], "overall_score": 3.4566238604504735, "scores": [1.2833425128814762, 0.878648301308801, 1.4257385334350305], "rank_score": 1.1959097825417693} -{"id": "higashinaka-isozaki-2008-corpus", "title": "Corpus-based Question Answering for why-Questions", "abstract": "This paper proposes a corpus-based approach for answering why-questions. Conventional systems use hand-crafted patterns to extract and evaluate answer candidates. However, such hand-crafted patterns are likely to have low coverage of causal expressions, and it is also difficult to assign suitable weights to the patterns by hand. In our approach, causal expressions are automatically collected from corpora tagged with semantic relations. From the collected expressions, features are created to train an answer candidate ranker that maximizes the QA performance with regards to the corpus of why-questions and answers. NAZEQA, a Japanese why-QA system based on our approach, clearly outperforms a baseline that uses hand-crafted patterns with a Mean Reciprocal Rank (top-5) of 0.305, making it presumably the best-performing fully implemented why-QA system.", "phrases": ["why-question", "nazeqa", "candidate answer paragraph", "negative example"], "overall_score": 2.627379231918779, "scores": [3.1362529613970866, 0.5958475001397673, 0.52919922219961, 0.5217875964978624], "rank_score": 1.1957718200585816} -{"id": "zaghouani-charfi-2018-arap", "title": "Arap-Tweet: A Large Multi-Dialect Twitter Corpus for Gender, Age and Language Variety Identification", "abstract": "In this paper, we present Arap-Tweet, which is a large-scale and multi-dialectal corpus of Tweets from 11 regions and 16 countries in the Arab world representing the major Arabic dialectal varieties. To build this corpus, we collected data from Twitter and we provided a team of experienced annotators with annotation guidelines that they used to annotate the corpus for age categories, gender, and dialectal variety. During the data collection effort, we based our search on distinctive keywords that are specific to the different Arabic dialects and we also validated the location using Twitter API. In this paper, we report on the corpus data collection and annotation efforts. We also present some issues that we encountered during these phases. Then, we present the results of the evaluation performed to ensure the consistency of the annotation. The provided corpus will enrich the limited set of available language resources for Arabic and will be an invaluable enabler for developing author profiling tools and NLP tools for Arabic.", "phrases": ["twitter", "gender", "country", "arab world", "arap-tweet"], "overall_score": 2.7517988049403117, "scores": [1.8509822217797849, 1.7397546110615234, 1.2469524958985163, 0.6150334695785783, 0.5227323831492976], "rank_score": 1.19509103629354} -{"id": "uzzaman-allen-2010-trips", "title": "TRIPS and TRIOS System for TempEval-2: Extracting Temporal Information from Text", "abstract": "Extracting temporal information from raw text is fundamental for deep language understanding, and key to many applications like question answering, information extraction, and document summarization. In this paper, we describe two systems we submitted to the TempEval 2 challenge, for extracting temporal information from raw text. The systems use a combination of deep semantic parsing, Markov Logic Networks and Conditional Random Field classifiers. Our two submitted systems, TRIPS and TRIOS, approached all tasks and outperformed all teams in two tasks. Furthermore, TRIOS mostly had second-best performances in other tasks. TRIOS also outperformed the other teams that attempted all the tasks. Our system is notable in that for tasks C -- F, they operated on raw text while all other systems used tagged events and temporal expressions in the corpus as input.", "phrases": ["trios", "temporal information", "trips"], "overall_score": 3.0651724636684445, "scores": [1.8479091329724884, 0.8953022111225241, 0.841856471866039], "rank_score": 1.1950226053203505} -{"id": "artetxe-etal-2018-unsupervised", "title": "Unsupervised Statistical Machine Translation", "abstract": "While modern machine translation has relied on large parallel corpora, a recent line of work has managed to train Neural Machine Translation (NMT) systems from monolingual corpora only (Artetxe et al., 2018c; Lample et al., 2018). Despite the potential of this approach for low-resource settings, existing systems are far behind their supervised counterparts, limiting their practical interest. In this paper, we propose an alternative approach based on phrase-based Statistical Machine Translation (SMT) that significantly closes the gap with supervised systems. Our method profits from the modular architecture of SMT: we first induce a phrase table from monolingual corpora through cross-lingual embedding mappings, combine it with an n-gram language model, and fine-tune hyperparameters through an unsupervised MERT variant. In addition, iterative backtranslation improves results further, yielding, for instance, 14.08 and 26.22 BLEU points in WMT 2014 English-German and English-French, respectively, an improvement of more than 7-10 BLEU points over previous unsupervised systems, and closing the gap with supervised SMT (Moses trained on Europarl) down to 2-5 BLEU points. Our implementation is available at .", "phrases": ["machine translation", "alternative approach", "mapping", "smt model", "downstream task"], "overall_score": 4.761789349396624, "scores": [2.6740496393901645, 1.2940178521217565, 0.9263075453933949, 0.5379727073727263, 0.5363266021360141], "rank_score": 1.1937348692828114} -{"id": "saers-etal-2010-word", "title": "Word Alignment with Stochastic Bracketing Linear Inversion Transduction Grammar", "abstract": "The class of Linear Inversion Transduction Grammars (litgs) is introduced, and used to induce a word alignment over a parallel corpus. We show that alignment via Stochastic Bracketing litgs is considerably faster than Stochastic Bracketing itgs, while still yielding alignments superior to the widely-used heuristic of intersecting bidirectional ibm alignments. Performance is measured as the translation quality of a phrase-based machine translation system built upon the word alignments, and an improvementof 2.85 bleu points over baseline is noted for French--English.", "phrases": ["transduction grammar", "word alignment", "restriction"], "overall_score": 2.4818934397280907, "scores": [2.4189486500260378, 0.6379087259516732, 0.5237579815328567], "rank_score": 1.1935384525035226} -{"id": "das-etal-2017-question", "title": "Question Answering on Knowledge Bases and Text using Universal Schema and Memory Networks", "abstract": "Existing question answering methods infer answers either from a knowledge base or from raw text. While knowledge base (KB) methods are good at answering compositional questions, their performance is often affected by the incompleteness of the KB. Au contraire, web text contains millions of facts that are absent in the KB, however in an unstructured form. Universal schema can support reasoning on the union of both structured KBs and unstructured text by aligning them in a common embedded space. In this paper we extend universal schema to natural language question answering, employing Memory networks to attend to the large body of facts in the combination of text and KB. Our models can be trained in an end-to-end fashion on question-answer pairs. Evaluation results on Spades fill-in-the-blank question answering dataset show that exploiting universal schema for question answering is better than using either a KB or text alone. This model also outperforms the current state-of-the-art by 8.5 F1 points.", "phrases": ["universal schema", "memory network", "unstructured text", "knowledge basis"], "overall_score": 3.231516375160227, "scores": [2.581836709255221, 1.0952154597849137, 0.5644521835244597, 0.5316957511040299], "rank_score": 1.193300025917156} -{"id": "sanchez-cartagena-etal-2018-prompsits", "title": "Prompsit's submission to WMT 2018 Parallel Corpus Filtering shared task", "abstract": "This paper describes Prompsit Language Engineering's submissions to the WMT 2018 parallel corpus filtering shared task. Our four submissions were based on an automatic classifier for identifying pairs of sentences that are mutual translations. A set of hand-crafted hard rules for discarding sentences with evident flaws were applied before the classifier. We explored different strategies for achieving a training corpus with diverse vocabulary and fluent sentences: language model scoring, an active-learning-inspired data selection algorithm and n-gram saturation. Our submissions were very competitive in comparison with other participants on the 100 million word training corpus.", "phrases": ["submission", "wmt", "sentence pair"], "overall_score": 2.48138007440252, "scores": [2.2568736977371913, 0.7909469971883158, 0.5320540329756954], "rank_score": 1.1932915759670675} -{"id": "kolachina-ranta-2016-abstract", "title": "From Abstract Syntax to Universal Dependencies", "abstract": "Abstract syntax is a semantic tree representation that lies between parse trees and logical forms. It abstracts away from word order and lexical items, but contains enough information to generate both surface strings and logical forms. Abstract syntax is commonly used in compilers as an intermediate between source and target languages. Grammatical Framework (GF) is a grammar formalism that generalizes the idea to natural languages, to capture cross-lingual generalizations and perform interlingual translation. As one of the main results, the GF Resource Grammar Library (GF-RGL) has implemented a shared abstract syntax for over 30 languages. Each language has its own set of concrete syntax rules (morphology and syntax), by which it can be generated from the abstract syntax and parsed into it. This paper presents a conversion method from abstract syntax trees to dependency trees. The method is applied for converting GF-RGL trees to Universal Dependencies (UD), which uses a common set of labels for different languages. The correspondence between GF-RGL and UD turns out to be good, and the relatively few discrepancies give rise to interesting questions about universality. The conversion also has potential for practical applications: (1) it makes the GF parser usable as a rule-based dependency parser; (2) it enables bootstrapping UD treebanks from GF treebanks; (3) it defines formal criteria to assess the informal annotation schemes of UD; (4) it gives a method to check the consistency of manually annotated UD trees with respect to the annotation schemes; (5) it makes information from UD treebanks available.", "phrases": ["universal dependencies", "conversion", "syntax tree"], "overall_score": 1.9201415280606116, "scores": [2.081391196868331, 0.9758828916780564, 0.5218789435018122], "rank_score": 1.1930510106827332} -{"id": "eisele-etal-2008-using", "title": "Using Moses to Integrate Multiple Rule-Based Machine Translation Engines into a Hybrid System", "abstract": "Based on an architecture that allows to combine statistical machine translation (SMT) with rule-based machine translation (RBMT) in a multi-engine setup, we present new results that show that this type of system combination can actually increase the lexical coverage of the resulting hybrid system, at least as far as this can be measured via BLEU score.", "phrases": ["hybrid system", "rbmt system", "smt system", "phrase table", "high quality"], "overall_score": 3.0592955968976234, "scores": [1.6897412530352893, 1.3723978882951697, 1.2393176210435461, 1.1341108570705156, 0.5280893003266519], "rank_score": 1.1927313839542346} -{"id": "agirre-etal-2006-methodology", "title": "A methodology for the joint development of the Basque WordNet and Semcor", "abstract": "This paper describes the methodology adopted to jointly develop the Basque WordNet and a hand annotated corpora (the Basque Semcor). This joint development allows for better motivated sense distinctions, and a tighter coupling between both resources. The methodology involves edition, tagging and refereeing tasks. We are currently half way through the nominal part of the 300.000 word corpus (roughly equivalent to a 500.000 word corpus for English). We present a detailed description of the task, including the main criteria for difficult cases in the edition of the senses and the tagging of the corpus, with special mention to multiword entries. Finally we give a detailed picture of the current figures, as well as an analysis of the agreement rates.", "phrases": ["methodology", "joint development", "basque wordnet"], "overall_score": 1.6532813478466202, "scores": [1.956321786485379, 0.8303015670663635, 0.7911478490470684], "rank_score": 1.1925904008662702} -{"id": "hendricks-etal-2018-localizing", "title": "Localizing Moments in Video with Temporal Language", "abstract": "Localizing moments in a longer video via natural language queries is a new, challenging task at the intersection of language and video understanding. Though moment localization with natural language is similar to other language and vision tasks like natural language object retrieval in images, moment localization offers an interesting opportunity to model temporal dependencies and reasoning in text. We propose a new model that explicitly reasons about different temporal segments in a video, and shows that temporal context is important for localizing phrases which include temporal language. To benchmark whether our model, and other recent video localization models, can effectively reason about temporal language, we collect the novel TEMPOral reasoning in video and language (TEMPO) dataset. Our dataset consists of two parts: a dataset with real videos and template sentences (TEMPO - Template Language) which allows for controlled studies on temporal language, and a human language dataset which consists of temporal sentences annotated by humans (TEMPO - Human Language).", "phrases": ["moment", "video", "temporal language"], "overall_score": 1.6532446371325016, "scores": [1.7714551624427861, 0.9529077140787354, 0.8533288825294798], "rank_score": 1.192563919683667} -{"id": "feng-hirst-2012-text", "title": "Text-level Discourse Parsing with Rich Linguistic Features", "abstract": "In this paper, we develop an RST-style text-level discourse parser, based on the HILDA discourse parser (Hernault et al., 2010b). We significantly improve its tree-building step by incorporating our own rich linguistic features. We also analyze the difficulty of extending traditional sentence-level discourse parsing to text-level parsing by comparing discourse-parsing performance under different discourse conditions.", "phrases": ["discourse", "feature engineering", "pdtb", "well performance"], "overall_score": 4.015026545815925, "scores": [2.868236800952613, 0.8304830965712576, 0.5404253186538089, 0.5302920397488978], "rank_score": 1.1923593139816444} -{"id": "zampieri-etal-2019-semeval", "title": "SemEval-2019 Task 6: Identifying and Categorizing Offensive Language in Social Media (OffensEval)", "abstract": "We present the results and the main findings of SemEval-2019 Task 6 on Identifying and Categorizing Offensive Language in Social Media (OffensEval). The task was based on a new dataset, the Offensive Language Identification Dataset (OLID), which contains over 14,000 English tweets, and it featured three sub-tasks. In sub-task A, systems were asked to discriminate between offensive and non-offensive posts. In sub-task B, systems had to identify the type of offensive content in the post. Finally, in sub-task C, systems had to detect the target of the offensive posts. OffensEval attracted a large number of participants and it was one of the most popular tasks in SemEval-2019. In total, nearly 800 teams signed up to participate in the task and 115 of them submitted results, which are presented and analyzed in this report.", "phrases": ["categorizing offensive language", "social media", "english tweet", "semeval-2019 task", "offenseval task"], "overall_score": 4.820463654383161, "scores": [1.935507814272892, 1.789727893861888, 0.854184222657373, 0.8330257124949422, 0.5489722748252713], "rank_score": 1.1922835836224732} -{"id": "dugan-etal-2020-roft", "title": "RoFT: A Tool for Evaluating Human Detection of Machine-Generated Text", "abstract": "In recent years, large neural networks for natural language generation (NLG) have made leaps and bounds in their ability to generate fluent text. However, the tasks of evaluating quality differences between NLG systems and understanding how humans perceive the generated text remain both crucial and difficult. In this system demonstration, we present Real or Fake Text (RoFT), a website that tackles both of these challenges by inviting users to try their hand at detecting machine-generated text in a variety of domains. We introduce a novel evaluation task based on detecting the boundary at which a text passage that starts off human-written transitions to being machine-generated. We show preliminary results of using RoFT to evaluate detection of machine-generated news articles.", "phrases": ["human detection", "machine-generated text", "boundary", "roft", "annotator"], "overall_score": 2.3197142156229527, "scores": [1.9928254715554374, 1.7098951627040397, 0.861574282037971, 0.8282748992399206, 0.5679166353635404], "rank_score": 1.1920972901801818} -{"id": "provilkov-etal-2020-bpe", "title": "BPE-Dropout: Simple and Effective Subword Regularization", "abstract": "Subword segmentation is widely used to address the open vocabulary problem in machine translation. The dominant approach to subword segmentation is Byte Pair Encoding (BPE), which keeps the most frequent words intact while splitting the rare ones into multiple tokens. While multiple segmentations are possible even with the same vocabulary, BPE splits words into unique sequences; this may prevent a model from better learning the compositionality of words and being robust to segmentation errors. So far, the only way to overcome this BPE imperfection, its deterministic nature, was to create another subword segmentation algorithm (Kudo, 2018). In contrast, we show that BPE itself incorporates the ability to produce multiple segmentations of the same word. We introduce BPE-dropout - simple and effective subword regularization method based on and compatible with conventional BPE. It stochastically corrupts the segmentation procedure of BPE, which leads to producing multiple segmentations within the same fixed BPE framework. Using BPE-dropout during training and the standard BPE during inference improves translation quality up to 2.3 BLEU compared to BPE and up to 0.9 BLEU compared to the previous subword regularization.", "phrases": ["segmentation", "bpe", "tokenization", "same word", "downstream task"], "overall_score": 4.129086771572883, "scores": [1.8866132239835127, 1.8623225327753505, 1.1309699413634704, 0.544887440077412, 0.5322198705486734], "rank_score": 1.1914026017496837} -{"id": "li-jurafsky-2015-multi", "title": "Do Multi-Sense Embeddings Improve Natural Language Understanding?", "abstract": "Learning a distinct representation for each sense of an ambiguous word could lead to more powerful and fine-grained models of vector-space representations. Yet while \u2018multi-sense\u2019 methods have been proposed and tested on artificial wordsimilarity tasks, we don\u2019t know if they improve real natural language understanding tasks. In this paper we introduce a multisense embedding model based on Chinese Restaurant Processes that achieves state of the art performance on matching human word similarity judgments, and propose a pipelined architecture for incorporating multi-sense embeddings into language understanding. We then test the performance of our model on part-of-speech tagging, named entity recognition, sentiment analysis, semantic relation identification and semantic relatedness, controlling for embedding dimensionality. We find that multi-sense embeddings do improve performance on some tasks (part-of-speech tagging, semantic relation identification, semantic relatedness) but not on others (named entity recognition, various forms of sentiment analysis). We discuss how these differences may be caused by the different role of word sense information in each of the tasks. The results highlight the importance of testing embedding models in real applications.", "phrases": ["multi-sense embedding", "tagging", "entity recognition", "sentiment analysis", "semantic relation identification"], "overall_score": 4.011621046816699, "scores": [1.369799405945906, 1.3401893328373997, 1.1502302361121437, 1.0794892322989555, 1.0170316359348692], "rank_score": 1.191347968625855} -{"id": "malmi-etal-2019-encode", "title": "Encode, Tag, Realize: High-Precision Text Editing", "abstract": "We propose LaserTagger - a sequence tagging approach that casts text generation as a text editing task. Target texts are reconstructed from the inputs using three main edit operations: keeping a token, deleting it, and adding a phrase before the token. To predict the edit operations, we propose a novel model, which combines a BERT encoder with an autoregressive Transformer decoder. This approach is evaluated on English text on four tasks: sentence fusion, sentence splitting, abstractive summarization, and grammar correction. LaserTagger achieves new state-of-the-art results on three of these tasks, performs comparably to a set of strong seq2seq baselines with a large number of training examples, and outperforms them when the number of examples is limited. Furthermore, we show that at inference time tagging can be more than two orders of magnitude faster than comparable seq2seq models, making it more attractive for running in a live environment.", "phrases": ["lasertagger", "text generation", "edit operation", "sequence tagging model"], "overall_score": 3.9254329115938336, "scores": [1.7423573085521535, 1.3462102817919828, 1.1476050116322813, 0.5279380881898559], "rank_score": 1.1910276725415685} -{"id": "rahman-ng-2012-resolving", "title": "Resolving Complex Cases of Definite Pronouns: The Winograd Schema Challenge", "abstract": "We examine the task of resolving complex cases of definite pronouns, specifically those for which traditional linguistic constraints on coreference (e.g., Binding Constraints, gender and number agreement) as well as commonly-used resolution heuristics (e.g., string-matching facilities, syntactic salience) are not useful. Being able to solve this task has broader implications in artificial intelligence: a restricted version of it, sometimes referred to as the Winograd Schema Challenge, has been suggested as a conceptually and practically appealing alternative to the Turing Test. We employ a knowledge-rich approach to this task, which yields a pronoun resolver that outperforms state-of-the-art resolvers by nearly 18 points in accuracy on our dataset.", "phrases": ["pronoun", "winograd schema challenge", "coreference resolution problem"], "overall_score": 3.5672940616719777, "scores": [2.139139762768597, 0.8658860225778034, 0.5673502508872187], "rank_score": 1.190792012077873} -{"id": "corley-mihalcea-2005-measuring", "title": "Measuring the Semantic Similarity of Texts", "abstract": "This paper presents a knowledge-based method for measuring the semantic-similarity of texts. While there is a large body of previous work focused on finding the semantic similarity of concepts and words, the application of these word-oriented methods to text similarity has not been yet explored. In this paper, we introduce a method that combines word-to-word similarity metrics into a text-to-text metric, and we show that this method outperforms the traditional text similarity metrics based on lexical matching.", "phrases": ["semantic similarity", "paraphrase", "implication", "recognition", "sts"], "overall_score": 3.441115479990754, "scores": [3.802481530483783, 0.5481113572005291, 0.5452266758302078, 0.5345139982896605, 0.5223876938440273], "rank_score": 1.1905442511296414} -{"id": "morin-etal-2007-bilingual", "title": "Bilingual Terminology Mining - Using Brain, not brawn comparable corpora", "abstract": "Current research in text mining favours the quantity of texts over their quality. But for bilingual terminology mining, and for many language pairs, large comparable corpora are not available. More importantly, as terms are defined vis-a-vis a specific domain with a restricted register, it is expected that the quality rather than the quantity of the corpus matters more in terminology mining. Our hypothesis, therefore, is that the quality of the corpus is more important than the quantity and ensures the quality of the acquired terminological resources. We show how important the type of discourse is as a characteristic of the comparable corpus.", "phrases": ["comparable corpora", "quantity", "specific domain", "lexicon extraction"], "overall_score": 2.852597568944143, "scores": [3.09541606674636, 0.5794411485565584, 0.5453369036938613, 0.5383082247942894], "rank_score": 1.1896255859477673} -{"id": "roark-etal-2004-discriminative", "title": "Discriminative Language Modeling with Conditional Random Fields and the Perceptron Algorithm", "abstract": "This paper describes discriminative language modeling for a large vocabulary speech recognition task. We contrast two parameter estimation methods: the perceptron algorithm, and a method based on conditional random fields (CRFs). The models are encoded as deterministic weighted finite state automata, and are applied by intersecting the automata with word-lattices that are the output from a baseline recognizer. The perceptron algorithm has the benefit of automatically selecting a relatively small feature set in just a couple of passes over the training data. However, using the feature set output from the perceptron algorithm (initialized with their weights), CRF training provides an additional 0.5% reduction in word error rate, for a total 1.8% absolute reduction from the baseline of 39.2%.", "phrases": ["conditional random field", "perceptron algorithm", "candidate"], "overall_score": 2.613701759495088, "scores": [2.38346007044541, 0.6235983563695405, 0.5615823754790691], "rank_score": 1.1895469340980065} -{"id": "duan-etal-2010-mixture", "title": "Mixture Model-based Minimum Bayes Risk Decoding using Multiple Machine Translation Systems", "abstract": "We present Mixture Model-based Minimum Bayes Risk (MMMBR) decoding, an approach that makes use of multiple SMT systems to improve translation accuracy. Unlike existing MBR decoding methods defined on the basis of single SMT systems, an MMMBR decoder reranks translation outputs in the combined search space of multiple systems using the MBR decision rule and a mixture distribution of component SMT models for translation hypotheses. MMMBR decoding is a general method that is independent of specific SMT models and can be applied to various commonly used search spaces. Experimental results on the NIST Chinese-to-English MT evaluation tasks show that our approach brings significant improvements to single system-based MBR decoding and outperforms a state-of-the-art system combination method.", "phrases": ["minimum bayes risk", "translation accuracy", "mixture"], "overall_score": 2.1311518852531495, "scores": [2.1563850525074835, 0.8206318607791901, 0.5912386285768474], "rank_score": 1.189418513954507} -{"id": "terra-clarke-2003-frequency", "title": "Frequency Estimates for Statistical Word Similarity Measures", "abstract": "Statistical measures of word similarity have application in many areas of natural language processing, such as language modeling and information retrieval. We report a comparative study of two methods for estimating word co-occurrence frequencies required by word similarity measures. Our frequency estimates are generated from a terabyte-sized corpus of Web data, and we study the impact of corpus size on the effectiveness of the measures. We base the evaluation on one TOEFL question set and two practice questions sets, each consisting of a number of multiple choice questions seeking the best synonym for a given target word. For two question sets, a context for the target word is provided, and we examine a number of word similarity measures that exploit this context. Our best combination of similarity measure and frequency estimation method answers 6-8% more questions than the best results previously reported for the same question sets.", "phrases": ["word similarity measure", "web data", "frequency estimate"], "overall_score": 2.6129028585784297, "scores": [2.105758636739389, 0.89368659620192, 0.5681047829239145], "rank_score": 1.1891833386217412} -{"id": "wieting-gimpel-2018-paranmt", "title": "ParaNMT-50M: Pushing the Limits of Paraphrastic Sentence Embeddings with Millions of Machine Translations", "abstract": "We describe ParaNMT-50M, a dataset of more than 50 million English-English sentential paraphrase pairs. We generated the pairs automatically by using neural machine translation to translate the non-English side of a large parallel corpus, following Wieting et al. (2017). Our hope is that ParaNMT-50M can be a valuable resource for paraphrase generation and can provide a rich source of semantic knowledge to improve downstream natural language understanding tasks. To show its utility, we use ParaNMT-50M to train paraphrastic sentence embeddings that outperform all supervised systems on every SemEval semantic textual similarity competition, in addition to showing how it can be used for paraphrase generation.", "phrases": ["paraphrase", "parallel corpus", "back-translation", "pivot language"], "overall_score": 4.157298097789218, "scores": [1.7324356437486848, 1.359035256087553, 1.0982236723817433, 0.5662489224908872], "rank_score": 1.188985873677217} -{"id": "gu-etal-2018-meta", "title": "Meta-Learning for Low-Resource Neural Machine Translation", "abstract": "In this paper, we propose to extend the recently introduced model-agnostic meta-learning algorithm (MAML, Finn, et al., 2017) for low-resource neural machine translation (NMT). We frame low-resource translation as a meta-learning problem where we learn to adapt to low-resource languages based on multilingual high-resource language tasks. We use the universal lexical representation (Gu et al., 2018b) to overcome the input-output mismatch across different languages. We evaluate the proposed meta-learning strategy using eighteen European languages (Bg, Cs, Da, De, El, Es, Et, Fr, Hu, It, Lt, Nl, Pl, Pt, Sk, Sl, Sv and Ru) as source tasks and five diverse languages (Ro,Lv, Fi, Tr and Ko) as target tasks. We show that the proposed approach significantly outperforms the multilingual, transfer learning based approach (Zoph et al., 2016) and enables us to train a competitive NMT system with only a fraction of training examples. For instance, the proposed approach can achieve as high as 22.04 BLEU on Romanian-English WMT'16 by seeing only 16,000 translated words (~600 parallel sentences)", "phrases": ["machine translation", "meta-learning algorithm", "cross-lingual transfer"], "overall_score": 4.155624160601242, "scores": [2.0588046254188352, 0.9068649055014365, 0.5998518536738346], "rank_score": 1.1885071281980355} -{"id": "sharma-etal-2016-shallow", "title": "Shallow Parsing Pipeline - Hindi-English Code-Mixed Social Media Text", "abstract": "In this study, the problem of shallow parsing of Hindi-English code-mixed social media text (CSMT) has been addressed. We have annotated the data, developed a language identifier, a normalizer, a part-of-speech tagger and a shallow parser. To the best of our knowledge, we are the first to attempt shallow parsing on CSMT. The pipeline developed has been made available to the research community with the goal of enabling better text analysis of Hindi English CSMT. The pipeline is accessible at 1.", "phrases": ["language identifier", "shallow parsing", "medium text", "pos tag", "code-mixed data"], "overall_score": 3.294836371226116, "scores": [1.7673911684485915, 1.4299910479426006, 1.1164187065470423, 1.0662899000891506, 0.5617142936082467], "rank_score": 1.1883610233271265} -{"id": "huang-carley-2019-syntax", "title": "Syntax-Aware Aspect Level Sentiment Classification with Graph Attention Networks", "abstract": "Aspect level sentiment classification aims to identify the sentiment expressed towards an aspect given a context sentence. Previous neural network based methods largely ignore the syntax structure in one sentence. In this paper, we propose a novel target-dependent graph attention network (TD-GAT) for aspect level sentiment classification, which explicitly utilizes the dependency relationship among words. Using the dependency graph, it propagates sentiment features directly from the syntactic context of an aspect target. In our experiments, we show our method outperforms multiple baselines with GloVe embeddings. We also demonstrate that using BERT representations further substantially boosts the performance.", "phrases": ["graph attention network", "dependency tree", "recent effort"], "overall_score": 3.7762394209437624, "scores": [1.5627578929210106, 1.469081891441965, 0.5328318252399913], "rank_score": 1.1882238698676557} -{"id": "tseng-etal-2005-conditional", "title": "A Conditional Random Field Word Segmenter for Sighan Bakeoff 2005", "abstract": "We present a Chinese word segmentation system submitted to the closed track of Sighan bakeoff 2005. Our segmenter was built using a conditional random field sequence model that provides a framework to use a large number of linguistic features such as character identity, morphological and character reduplication features. Because our morphological features were extracted from the training corpora automatically, our system was not biased toward any particular variety of Mandarin. Thus, our system does not overfit the variety of Mandarin most familiar to the system's designers. Our final system achieved a F-score of 0.947 (AS), 0.943 (HK), 0.950 (PK) and 0.964 (MSR).", "phrases": ["random field", "word segmentation", "sighan bakeoff", "crf", "end"], "overall_score": 3.5594077723728326, "scores": [0.8617560464630624, 1.7749443620610943, 1.5341352531416383, 1.2107150908305762, 0.5592467676874417], "rank_score": 1.1881595040367625} -{"id": "rashkin-etal-2018-modeling", "title": "Modeling Naive Psychology of Characters in Simple Commonsense Stories", "abstract": "Understanding a narrative requires reading between the lines and reasoning about the unspoken but obvious implications about events and people's mental states \u2014 a capability that is trivial for humans but remarkably hard for machines. To facilitate research addressing this challenge, we introduce a new annotation framework to explain naive psychology of story characters as fully-specified chains of mental states with respect to motivations and emotional reactions. Our work presents a new large-scale dataset with rich low-level annotations and establishes baseline performance on several new tasks, suggesting avenues for future research.", "phrases": ["naive psychology", "story", "mental state", "chain", "emotional reaction"], "overall_score": 3.2941052666843285, "scores": [0.914236201648133, 1.7924976725643598, 1.5448200494005215, 0.8577371968704438, 0.8311955450311636], "rank_score": 1.1880973331029243} -{"id": "marjou-2021-oteann", "title": "OTEANN: Estimating the Transparency of Orthographies with an Artificial Neural Network", "abstract": "To transcribe spoken language to written medium, most alphabets enable an unambiguous sound-to-letter rule. However, some writing systems have distanced themselves from this simple concept and little work exists in Natural Language Processing (NLP) on measuring such distance. In this study, we use an Artificial Neural Network (ANN) model to evaluate the transparency between written words and their pronunciation, hence its name Orthographic Transparency Estimation with an ANN (OTEANN). Based on datasets derived from Wikimedia dictionaries, we trained and tested this model to score the percentage of false predictions in phoneme-to-grapheme and grapheme-to-phoneme translation tasks. The scores obtained on 17 orthographies were in line with the estimations of other studies. Interestingly, the model also provided insight into typical mistakes made by learners who only consider the phonemic rule in reading and writing.", "phrases": ["transparency", "orthography", "artificial neural network", "oteann", "purpose"], "overall_score": 1.646523724272717, "scores": [2.1152137059891434, 1.9121108234988748, 0.8146340065842987, 0.5756946819508739, 0.5209258112624993], "rank_score": 1.187715805857138} -{"id": "prakash-etal-2016-neural", "title": "Neural Paraphrase Generation with Stacked Residual LSTM Networks", "abstract": "In this paper, we propose a novel neural approach for paraphrase generation. Conventional paraphrase generation methods either leverage hand-written rules and thesauri-based alignments, or use statistical machine learning principles. To the best of our knowledge, this work is the first to explore deep learning models for paraphrase generation. Our primary contribution is a stacked residual LSTM network, where we add residual connections between LSTM layers. This allows for efficient training of deep LSTMs. We evaluate our model and other state-of-the-art deep learning models on three different datasets: PPDB, WikiAnswers, and MSCOCO. Evaluation results demonstrate that our model outperforms sequence to sequence, attention-based, and bi-directional LSTM models on BLEU, METEOR, TER, and an embedding-based sentence similarity metric.", "phrases": ["lstm network", "neural paraphrase generation", "syntax"], "overall_score": 4.494026583577254, "scores": [0.9881309080963278, 1.7300261134683008, 0.8445819146371552], "rank_score": 1.1875796454005945} -{"id": "cui-etal-2013-bilingual", "title": "Bilingual Data Cleaning for SMT using Graph-based Random Walk", "abstract": "The quality of bilingual data is a key factor in Statistical Machine Translation (SMT). Low-quality bilingual data tends to produce incorrect translation knowledge and also degrades translation modeling performance. Previous work often used supervised learning methods to filter lowquality data, but a fair amount of human labeled examples are needed which are not easy to obtain. To reduce the reliance on labeled examples, we propose an unsupervised method to clean bilingual data. The method leverages the mutual reinforcement between the sentence pairs and the extracted phrase pairs, based on the observation that better sentence pairs often lead to better phrase extraction and vice versa. End-to-end experiments show that the proposed method substantially improves the performance in largescale Chinese-to-English translation tasks.", "phrases": ["graph-based random walk", "sentence pair", "bilingual data cleaning"], "overall_score": 2.7343761934151125, "scores": [1.44344441536126, 1.2746456977497853, 0.8444833636323612], "rank_score": 1.187524492247802} -{"id": "reddy-etal-2014-large", "title": "Large-scale Semantic Parsing without Question-Answer Pairs", "abstract": "In this paper we introduce a novel semantic parsing approach to query Freebase in natural language without requiring manual annotations or question-answer pairs. Our key insight is to represent natural language via semantic graphs whose topology shares many commonalities with Freebase. Given this representation, we conceptualize semantic parsing as a graph matching problem. Our model converts sentences to semantic graphs using CCG and subsequently grounds them to Freebase guided by denotations as a form of weak supervision. Evaluation experiments on a subset of the Free917 and WebQuestions benchmark datasets show our semantic parser improves over the state of the art.", "phrases": ["semantic parsing", "question-answer pair", "freebase", "distant supervision"], "overall_score": 3.723428527302925, "scores": [2.500864148118873, 1.1159103693527421, 0.5851773355196471, 0.5480853288819002], "rank_score": 1.1875092954682906} -{"id": "eck-etal-2005-low", "title": "Low Cost Portability for Statistical Machine Translation based on N-gram Frequency and TF-IDF", "abstract": "Statistical machine translation relies heavily on the available training data. In some cases it is necessary to limit the amount of training data that can be created for or actually used by the systems. We introduce weighting schemes which allow us to sort sentences based on the frequency of unseen n-grams. A second approach uses TF-IDF to rank the sentences. After sorting we can select smaller training corpora and we are able to show that systems trained on much less training data achieve a very competitive performance compared to baseline systems using all available training data.", "phrases": ["statistical machine translation", "tf-idf", "weighting scheme"], "overall_score": 2.4687147082490157, "scores": [1.8066660940665704, 0.9261590927179424, 0.8287772801759866], "rank_score": 1.1872008223201664} -{"id": "och-etal-2004-smorgasbord", "title": "A Smorgasbord of Features for Statistical Machine Translation", "abstract": "We describe a methodology for rapid experimentation in statistical machine translation which we use to add a large number of features to a baseline system exploiting features from a wide range of levels of syntactic representation. Feature values were combined in a log-linear model to select the highest scoring candidate translation from an n-best list. Feature weights were optimized directly against the BLEU evaluation metric on held-out data. We present results for a small selection of features at each level of syntactic representation.", "phrases": ["statistical machine translation", "list", "distortion model", "och"], "overall_score": 4.644017749049474, "scores": [2.083820593730694, 1.5959954240095526, 0.5374515625930689, 0.5311889012422227], "rank_score": 1.1871141203938844} -{"id": "goswami-etal-2020-unsupervised-relation", "title": "Unsupervised Relation Extraction from Language Models using Constrained Cloze Completion", "abstract": "We show that state-of-the-art self-supervised language models can be readily used to extract relations from a corpus without the need to train a fine-tuned extractive head. We introduce RE-Flex, a simple framework that performs constrained cloze completion over pretrained language models to perform unsupervised relation extraction. RE-Flex uses contextual matching to ensure that language model predictions matches supporting evidence from the input corpus that is relevant to a target relation. We perform an extensive experimental study over multiple relation extraction benchmarks and demonstrate that RE-Flex outperforms competing unsupervised relation extraction methods based on pretrained language models by up to 27.8 F1 points compared to the next-best method. Our results show that constrained inference queries against a language model can enable accurate unsupervised relation extraction.", "phrases": ["fine-tuned extractive head", "unsupervised relation extraction", "template"], "overall_score": 1.910342760193548, "scores": [2.4951402316488522, 0.535537035156148, 0.5302108148688592], "rank_score": 1.1869626938912865} -{"id": "guo-diab-2012-modeling", "title": "Modeling Sentences in the Latent Space", "abstract": "Sentence Similarity is the process of computing a similarity score between two sentences. Previous sentence similarity work finds that latent semantics approaches to the problem do not perform well due to insufficient information in single sentences. In this paper, we show that by carefully handling words that are not in the sentences (missing words), we can train a reliable latent variable model on sentences. In the process, we propose a new evaluation framework for sentence similarity: Concept Definition Retrieval. The new framework allows for large scale tuning and testing of Sentence Similarity models. Experiments on the new task and previous data sets show significant improvement of our model over baselines and other traditional latent variable models. Our results indicate comparable and even better performance than current state of the art systems addressing the problem of sentence similarity.", "phrases": ["latent space", "missing word", "wtmf", "paraphrase", "cross-lingual setting"], "overall_score": 3.6133888410206945, "scores": [2.17433850220774, 1.378386220147043, 0.9047357454211202, 0.8783704376721457, 0.5984148012819828], "rank_score": 1.1868491413460063} -{"id": "ding-etal-2020-self", "title": "Self-Attention with Cross-Lingual Position Representation", "abstract": "Position encoding (PE), an essential part of self-attention networks (SANs), is used to preserve the word order information for natural language processing tasks, generating fixed position indices for input sequences. However, in cross-lingual scenarios, machine translation, the PEs of source and target sentences are modeled independently. Due to word order divergences in different languages, modeling the cross-lingual positional relationships might help SANs tackle this problem. In this paper, we augment SANs with cross-lingual position representations to model the bilingually aware latent structure for the input sentence. Specifically, we utilize bracketing transduction grammar (BTG)-based reordering information to encourage SANs to learn bilingual diagonal alignments. Experimental results on WMT'14 English\u21d2German, WAT'17 Japanese\u21d2English, and WMT'17 Chinese\u21d4English translation tasks demonstrate that our approach significantly and consistently improves translation quality over strong baselines. Extensive analyses confirm that the performance gains come from the cross-lingual information.", "phrases": ["cross-lingual position representation", "position representation", "self-attention"], "overall_score": 1.303374780673616, "scores": [1.7210845324682755, 0.9329442415791689, 0.905119783048384], "rank_score": 1.186382852365276} -{"id": "cowan-collins-2005-morphology", "title": "Morphology and Reranking for the Statistical Parsing of Spanish", "abstract": "We present two methods for incorporating detailed features in a Spanish parser, building on a baseline model that is a lexicalized PCFG. The first method exploits Spanish morphology, and achieves an F1 constituency score of 83.6%. This is an improvement over 81.2% accuracy for the baseline, which makes little or no use of morphological information. The second model uses a reranking approach to add arbitrary global features of parse trees to the morphological model. The reranking model reaches 85.1% F1 accuracy on the Spanish parsing task. The resulting model for Spanish parsing combines an approach that specifically targets morphological information with an approach that makes use of general structural features.", "phrases": ["spanish", "morphology", "pronoun"], "overall_score": 2.6066695723545457, "scores": [1.7020118681092646, 1.3236943363885596, 0.533333138919741], "rank_score": 1.1863464478058552} -{"id": "tromble-eisner-2009-learning", "title": "Learning Linear Ordering Problems for Better Translation", "abstract": "We apply machine learning to the Linear Ordering Problem in order to learn sentence-specific reordering models for machine translation. We demonstrate that even when these models are used as a mere preprocessing step for German-English translation, they significantly outperform Moses' integrated lexicalized reordering model. \n \nOur models are trained on automatically aligned bitext. Their form is simple but novel. They assess, based on features of the input sentence, how strongly each pair of input word tokens wi, wj would like to reverse their relative order. Combining all these pairwise preferences to find the best global reordering is NP-hard. However, we present a non-trivial O(n3) algorithm, based on chart parsing, that at least finds the best reordering within a certain exponentially large neighborhood. We show how to iterate this reordering process within a local search algorithm, which we use in training.", "phrases": ["linear ordering problem", "machine translation", "learning objective", "minimal syntactic information", "interpolation"], "overall_score": 3.9520598182061257, "scores": [3.6523412646377125, 0.6137664688684347, 0.5785572185929649, 0.5428747486959481, 0.5425582360784632], "rank_score": 1.1860195873747048} -{"id": "cao-rei-2016-joint", "title": "A Joint Model for Word Embedding and Word Morphology", "abstract": "This paper presents a joint model for performing unsupervised morphological analysis on words, and learning a character-level composition function from morphemes to word embeddings. Our model splits individual words into segments, and weights each segment according to its ability to predict context words. Our morphological analysis is comparable to dedicated morphological analyzers at the task of morpheme boundary recovery, and also performs better than word-based embedding models at the task of syntactic analogy answering. Finally, we show that incorporating morphology explicitly into character-level models helps them produce embeddings for unseen words which correlate better with human judgments.", "phrases": ["joint model", "word embedding", "segmentation"], "overall_score": 3.2116878578980965, "scores": [1.7173880512585613, 1.0139020358205049, 0.8266437982575863], "rank_score": 1.1859779617788841} -{"id": "martin-2017-community2vec", "title": "community2vec: Vector representations of online communities encode semantic relationships", "abstract": "Vector embeddings of words have been shown to encode meaningful semantic relationships that enable solving of complex analogies. This vector embedding concept has been extended successfully to many different domains and in this paper we both create and visualize vector representations of an unstructured collection of online communities based on user participation. Further, we quantitatively and qualitatively show that these representations allow solving of semantically meaningful community analogies and also other more general types of relationships. These results could help improve community recommendation engines and also serve as a tool for sociological studies of community relatedness.", "phrases": ["vector representation", "online community", "semantic relationship"], "overall_score": 1.302720481273043, "scores": [1.8679788322809965, 0.8573497619801369, 0.8320332558936147], "rank_score": 1.1857872833849161} -{"id": "costa-jussa-fonollosa-2006-statistical", "title": "Statistical Machine Reordering", "abstract": "Reordering is currently one of the most important problems in statistical machine translation systems. This paper presents a novel strategy for dealing with it: statistical machine reordering (SMR). It consists in using the powerful techniques developed for statistical machine translation (SMT) to translate the source language (S) into a reordered source language (S'), which allows for an improved translation into the target language (T). The SMT task changes from S2T to S'2T which leads to a monotonized word alignment and shorter translation units. In addition, the use of classes in SMR helps to infer new word reorderings. Experiments are reported in the EsEn WMT06 tasks and the ZhEn IWSLT05 task and show significant improvement in translation quality.", "phrases": ["statistical machine", "source sentence", "smt technique", "example costa-jussa\u0300"], "overall_score": 3.427227273261646, "scores": [2.078464002837567, 1.2020220455564605, 0.9119197535174272, 0.5505512435258376], "rank_score": 1.185739261359323} -{"id": "jin-chen-2008-fourth", "title": "The Fourth International Chinese Language Processing Bakeoff: Chinese Word Segmentation, Named Entity Recognition and Chinese POS Tagging", "abstract": "The Fourth International Chinese Language Processing Bakeoff was held in 2007 to assess the state of the art in three important tasks: Chinese word segmentation, named entity recognition and Chinese POS tagging. Twenty-eight groups submitted result sets in the three tasks across two tracks and a total of seven corpora. Strong results have been found in all the tasks as well as continuing challenges.", "phrases": ["chinese word segmentation", "entity recognition", "pos tagging", "cws"], "overall_score": 2.6050609690981523, "scores": [2.4902488908566225, 0.8723585581121434, 0.8591598456242107, 0.5206900690623466], "rank_score": 1.1856143409138307} -{"id": "misra-etal-2017-mapping", "title": "Mapping Instructions and Visual Observations to Actions with Reinforcement Learning", "abstract": "We propose to directly map raw visual observations and text input to actions for instruction execution. While existing approaches assume access to structured environment representations or use a pipeline of separately trained models, we learn a single model to jointly reason about linguistic and visual input. We use reinforcement learning in a contextual bandit setting to train a neural network agent. To guide the agent's exploration, we use reward shaping with different forms of supervision. Our approach does not require intermediate representations, planning procedures, or training different models. We evaluate in a simulated environment, and show significant improvements over supervised learning and common reinforcement learning variants.", "phrases": ["visual observation", "action", "reinforcement learning", "environment", "mapping instruction"], "overall_score": 3.128684732495242, "scores": [1.9502050759486316, 1.3715294638291207, 1.1692738760943255, 0.8564246677408156, 0.5802227830964615], "rank_score": 1.185531173341871} -{"id": "kim-etal-2020-efficient", "title": "Efficient Dialogue State Tracking by Selectively Overwriting Memory", "abstract": "Recent works in dialogue state tracking (DST) focus on an open vocabulary-based setting to resolve scalability and generalization issues of the predefined ontology-based approaches. However, they are inefficient in that they predict the dialogue state at every turn from scratch. Here, we consider dialogue state as an explicit fixed-sized memory and propose a selectively overwriting mechanism for more efficient DST. This mechanism consists of two steps: (1) predicting state operation on each of the memory slots, and (2) overwriting the memory with new values, of which only a few are generated according to the predicted state operations. Our method decomposes DST into two sub-tasks and guides the decoder to focus only on one of the tasks, thus reducing the burden of the decoder. This enhances the effectiveness of training and DST performance. Our SOM-DST (Selectively Overwriting Memory for Dialogue State Tracking) model achieves state-of-the-art joint goal accuracy with 51.72% in MultiWOZ 2.0 and 53.01% in MultiWOZ 2.1 in an open vocabulary-based DST setting. In addition, we analyze the accuracy gaps between the current and the ground truth-given situations and suggest that it is a promising direction to improve state operation prediction to boost the DST performance.", "phrases": ["dialogue state tracking", "memory", "state operation", "previous turn"], "overall_score": 3.286592930563536, "scores": [1.993086353417962, 1.6910275892068325, 0.5312918577025768, 0.5261455220173672], "rank_score": 1.1853878305861847} -{"id": "gonzalez-agirre-etal-2019-pharmaconer", "title": "PharmaCoNER: Pharmacological Substances, Compounds and proteins Named Entity Recognition track", "abstract": "One of the biomedical entity types of relevance for medicine or biosciences are chemical compounds and drugs. The correct detection these entities is critical for other text mining applications building on them, such as adverse drug-reaction detection, medication-related fake news or drug-target extraction. Although a significant effort was made to detect mentions of drugs/chemicals in English texts, so far only very limited attempts were made to recognize them in medical documents in other languages. Taking into account the growing amount of medical publications and clinical records written in Spanish, we have organized the first shared task on detecting drug and chemical entities in Spanish medical documents. Additionally, we included a clinical concept-indexing sub-track asking teams to return SNOMED-CT identifiers related to drugs/chemicals for a collection of documents. For this task, named PharmaCoNER, we generated annotation guidelines together with a corpus of 1,000 manually annotated clinical case studies. A total of 22 teams participated in the sub-track 1, (77 system runs), and 7 teams in the sub-track 2 (19 system runs). Top scoring teams used sophisticated deep learning approaches yielding very competitive results with F-measures above 0.91. These results indicate that there is a real interest in promoting biomedical text mining efforts beyond English. We foresee that the PharmaCoNER annotation guidelines, corpus and participant systems will foster the development of new resources for clinical and biomedical text mining systems of Spanish medical data.", "phrases": ["compound", "protein", "entity recognition track", "pharmaconer"], "overall_score": 2.306236273820796, "scores": [2.5513129423512493, 0.8758305499006681, 0.7884518110146189, 0.5250886896514536], "rank_score": 1.1851709982294976} -{"id": "ionescu-etal-2014-characters", "title": "Can characters reveal your native language? A language-independent approach to native language identification", "abstract": "A common approach in text mining tasks such as text categorization, authorship identification or plagiarism detection is to rely on features like words, part-of-speech tags, stems, or some other high-level linguistic features. In this work, an approach that uses character n-grams as features is proposed for the task of native language identification. Instead of doing standard feature selection, the proposed approach combines several string kernels using multiple kernel learning. Kernel Ridge Regression and Kernel Discriminant Analysis are independently used in the learning stage. The empirical results obtained in all the experiments conducted in this work indicate that the proposed approach achieves state of the art performance in native language identification, reaching an accuracy that is 1.7% above the top scoring system of the 2013 NLI Shared Task. Furthermore, the proposed approach has an important advantage in that it is language independent and linguistic theory neutral. In the cross-corpus experiment, the proposed approach shows that it can also be topic independent, improving the state of the art system by 32.3%.", "phrases": ["character", "native language identification", "kernel discriminant analysis", "text analysis task"], "overall_score": 3.425384440581092, "scores": [2.0184525307807926, 1.3493998435233787, 0.8433620430967519, 0.5291923226294338], "rank_score": 1.185101685007589} -{"id": "rothe-etal-2016-ultradense", "title": "Ultradense Word Embeddings by Orthogonal Transformation", "abstract": "Embeddings are generic representations that are useful for many NLP tasks. In this paper, we introduce DENSIFIER, a method that learns an orthogonal transformation of the embedding space that focuses the information relevant for a task in an ultradense subspace of a dimensionality that is smaller by a factor of 100 than the original space. We show that ultradense embeddings generated by DENSIFIER reach state of the art on a lexicon creation task in which words are annotated with three types of lexical information - sentiment, concreteness and frequency. On the SemEval2015 10B sentiment analysis task we show that no information is lost when the ultradense subspace is used, but training is an order of magnitude more efficient due to the compactness of the ultradense space.", "phrases": ["orthogonal transformation", "ultradense subspace", "sentiment analysis task", "ultradense word embeddings", "downstream task"], "overall_score": 3.5501156325440473, "scores": [2.9059036045940614, 1.0163726253636076, 0.9133602352472994, 0.569016063279464, 0.5206360293150982], "rank_score": 1.1850577115599061} -{"id": "ozgur-radev-2009-detecting", "title": "Detecting Speculations and their Scopes in Scientific Text", "abstract": "Distinguishing speculative statements from factual ones is important for most biomedical text mining applications. We introduce an approach which is based on solving two sub-problems to identify speculative sentence fragments. The first sub-problem is identifying the speculation keywords in the sentences and the second one is resolving their linguistic scopes. We formulate the first sub-problem as a supervised classification task, where we classify the potential keywords as real speculation keywords or not by using a diverse set of linguistic features that represent the contexts of the keywords. After detecting the actual speculation keywords, we use the syntactic structures of the sentences to determine their scopes.", "phrases": ["scope", "syntactic structure", "speculation cue", "heuristic rule"], "overall_score": 2.6032812643832766, "scores": [2.4543288887266237, 0.909584258409798, 0.8488619749529981, 0.5264423274793189], "rank_score": 1.1848043623921845} -{"id": "mohammad-etal-2016-semeval", "title": "SemEval-2016 Task 6: Detecting Stance in Tweets", "abstract": "Here for the \ufb01rst time we present a shared task on detecting stance from tweets: given a tweet and a target entity (person, organization, etc.), automatic natural language systems must determine whether the tweeter is in favor of the given target, against the given target, or whether neither inference is likely. The target of interest may or may not be referred to in the tweet, and it may or may not be the target of opinion. Two tasks are proposed. Task A is a traditional supervised classi\ufb01cation task where 70% of the annotated data for a target is used as training and the rest for testing. For Task B, we use as test data all of the instances for a new target (not used in task A) and no training data is provided. Our shared task received submissions from 19 teams for Task A and from 9 teams for Task B. The highest clas-si\ufb01cation F-score obtained was 67.82 for Task A and 56.28 for Task B. However, systems found it markedly more dif\ufb01cult to infer stance towards the target of interest from tweets that express opinion towards another entity.", "phrases": ["tweets", "stance detection", "semeval"], "overall_score": 4.536048634856197, "scores": [0.868287233301096, 2.129875540176256, 0.5561387385544102], "rank_score": 1.1847671706772542} -{"id": "pereira-2009-zac", "title": "ZAC.PB: An Annotated Corpus for Zero Anaphora Resolution in Portuguese", "abstract": "This paper describes the methodology adopted in the construction of an annotated corpus for the study of zero anaphora in Portuguese, the ZAC corpus. To our knowledge, no such corpus exists at this time for the Portuguese language. The purpose of this linguistic resource is to promote the use of automatic discovery of linguistic parameters for anaphora resolution systems. Because of the complexity of the linguistic phenomena involved, a detailed description of the different situations is provided. This paper will only focus on the annotation of subject zero anaphors. The main issues regarding zero anaphora in Portuguese are: indefinite subjects, either without verbal agreement marks or with first person plural or third person plural verbal agreement; position of the anaphor relative to its antecedent, i.e. anaphoric and cataphoric relations; coreference chains inside the same sentence and spanning several sentences; and determining the head of the antecedent noun phrase for a given anaphor. Finally, preliminary observations taken from the ZAC corpus are presented.", "phrases": ["annotated corpus", "portuguese", "anaphor"], "overall_score": 1.9067920910525886, "scores": [2.13734471770862, 0.8466705907354752, 0.5702543088947415], "rank_score": 1.1847565391129455} -{"id": "ghosh-etal-2011-shallow", "title": "Shallow Discourse Parsing with Conditional Random Fields", "abstract": "Parsing discourse is a challenging natural language processing task. In this paper we take a data driven approach to identify arguments of explicit discourse connectives. In contrast to previous work we do not make any assumptions on the span of arguments and consider parsing as a token-level sequence labeling task. We design the argument segmentation task as a cascade of decisions based on conditional random fields (CRFs). We train the CRFs on lexical, syntactic and semantic features extracted from the Penn Discourse Treebank and evaluate feature combinations on the commonly used test split. We show that the best combination of features includes syntactic and semantic features. The comparative error analysis investigates the performance variability over connective types and argument positions.", "phrases": ["discourse", "conditional random fields", "sequence labeling task", "crfs", "linear tagging approach"], "overall_score": 3.0382249289277246, "scores": [1.9304874099967828, 1.4596599518523836, 1.061764856238132, 0.9077688244849421, 0.5629016394004277], "rank_score": 1.1845165363945338} -{"id": "sasano-etal-2008-fully", "title": "A Fully-Lexicalized Probabilistic Model for Japanese Zero Anaphora Resolution", "abstract": "This paper presents a probabilistic model for Japanese zero anaphora resolution. First, this model recognizes discourse entities and links all mentions to them. Zero pronouns are then detected by case structure analysis based on automatically constructed case frames. Their appropriate antecedents are selected from the entities with high salience scores, based on the case frames and several preferences on the relation between a zero pronoun and an antecedent. Case structure and zero anaphora relation are simultaneously determined based on probabilistic evaluation metrics.", "phrases": ["probabilistic model", "anaphora resolution", "predicate"], "overall_score": 2.727063486370604, "scores": [2.1117465686886168, 0.9119418865670087, 0.5293574165361653], "rank_score": 1.184348623930597} -{"id": "levy-goldberg-2014-linguistic", "title": "Linguistic Regularities in Sparse and Explicit Word Representations", "abstract": "Recent work has shown that neuralembedded word representations capture many relational similarities, which can be recovered by means of vector arithmetic in the embedded space. We show that Mikolov et al.\u2019s method of first adding and subtracting word vectors, and then searching for a word similar to the result, is equivalent to searching for a word that maximizes a linear combination of three pairwise word similarities. Based on this observation, we suggest an improved method of recovering relational similarities, improving the state-of-the-art results on two recent word-analogy datasets. Moreover, we demonstrate that analogy recovery is not restricted to neural word embeddings, and that a similar amount of relational similarities can be recovered from traditional distributional word representations.", "phrases": ["regularity", "word embedding", "closely-related vector"], "overall_score": 4.276098479897616, "scores": [1.6925143172030728, 1.31498142957133, 0.5451453683205862], "rank_score": 1.184213705031663} -{"id": "akbik-etal-2014-exploratory", "title": "Exploratory Relation Extraction in Large Text Corpora", "abstract": "In this paper, we propose and demonstrate Exploratory Relation Extraction (ERE), a novel approach to identifying and extracting relations from large text corpora based on user-driven and data-guided incremental exploration. We draw upon ideas from the information seeking paradigm of Exploratory Search (ES) to enable an exploration process in which users begin with a vaguely defined information need and progressively sharpen their definition of extraction tasks as they identify relations of interest in the underlying data. This process extends the application of Relation Extraction to use cases characterized by imprecise information needs and uncertainty regarding the information content of available data. We present an interactive workflow that allows users to build extractors based on entity types and human-readable extraction patterns derived from subtrees in dependency trees. In order to evaluate the viability of our approach on large text corpora, we conduct experiments on a dataset of over 160 million sentences with mentions of over 6 million FREEBASE entities extracted from the CLUEWEB09 corpus. Our experiments indicate that even non-expert users can intuitively use our approach to identify relations and create high precision extractors with minimal effort.", "phrases": ["large text corpora", "paradigm", "exploratory relation extraction"], "overall_score": 1.3006139896609545, "scores": [2.0897785749414477, 0.9257025436147552, 0.5361284977108948], "rank_score": 1.1838698720890326} -{"id": "ferres-saggion-2022-alexsis", "title": "ALEXSIS: A Dataset for Lexical Simplification in Spanish", "abstract": "Lexical Simplification is the process of reducing the lexical complexity of a text by replacing difficult words with easier to read (or understand) expressions while preserving the original information and meaning. In this paper we introduce ALEXSIS, a new dataset for this task, and we use ALEXSIS to benchmark Lexical Simplification systems in Spanish. The paper describes the evaluation of three kind of approaches to Lexical Simplification, a thesaurus-based approach, a single transformers-based approach, and a combination of transformers. We also report state of the art results on a previous Lexical Simplification dataset for Spanish.", "phrases": ["lexical simplification", "spanish", "alexsis"], "overall_score": 1.3004802219264953, "scores": [1.7989123236495244, 0.9398244557755777, 0.8125075549245102], "rank_score": 1.183748111449871} -{"id": "guo-etal-2019-autosem", "title": "AutoSeM: Automatic Task Selection and Mixing in Multi-Task Learning", "abstract": "Multi-task learning (MTL) has achieved success over a wide range of problems, where the goal is to improve the performance of a primary task using a set of relevant auxiliary tasks. However, when the usefulness of the auxiliary tasks w.r.t. the primary task is not known a priori, the success of MTL models depends on the correct choice of these auxiliary tasks and also a balanced mixing ratio of these tasks during alternate training. These two problems could be resolved via manual intuition or hyper-parameter tuning over all combinatorial task choices, but this introduces inductive bias or is not scalable when the number of candidate auxiliary tasks is very large. To address these issues, we present AutoSeM, a two-stage MTL pipeline, where the first stage automatically selects the most useful auxiliary tasks via a Beta-Bernoulli multi-armed bandit with Thompson Sampling, and the second stage learns the training mixing ratio of these selected auxiliary tasks via a Gaussian Process based Bayesian optimization framework. We conduct several MTL experiments on the GLUE language understanding tasks, and show that our AutoSeM framework can successfully find relevant auxiliary tasks and automatically learn their mixing ratio, achieving significant performance boosts on several primary tasks. Finally, we present ablations for each stage of AutoSeM and analyze the learned auxiliary task choices.", "phrases": ["multi-task learning", "ratio", "useful auxiliary task", "thompson sampling", "autosem"], "overall_score": 2.460958644308679, "scores": [2.233798560402944, 0.9530081709942052, 1.1179869334018253, 1.0419725096048524, 0.5705885455577688], "rank_score": 1.1834709439923192} -{"id": "harashima-etal-2016-large", "title": "A Large-scale Recipe and Meal Data Collection as Infrastructure for Food Research", "abstract": "Everyday meals are an important part of our daily lives and, currently, there are many Internet sites that help us plan these meals. Allied to the growth in the amount of food data such as recipes available on the Internet is an increase in the number of studies on these data, such as recipe analysis and recipe search. However, there are few publicly available resources for food research; those that do exist do not include a wide range of food data or any meal data (that is, likely combinations of recipes). In this study, we construct a large-scale recipe and meal data collection as the underlying infrastructure to promote food research. Our corpus consists of approximately 1.7 million recipes and 36000 meals in cookpad, one of the largest recipe sites in the world. We made the corpus available to researchers in February 2015 and as of February 2016, 82 research groups at 56 universities have made use of it to enhance their studies.", "phrases": ["recipe", "meal data collection", "food research"], "overall_score": 1.2997844136471421, "scores": [1.7362344084597199, 0.9095129913998079, 0.9035968785198485], "rank_score": 1.183114759459792} -{"id": "wang-etal-2018-joint-embedding", "title": "Joint Embedding of Words and Labels for Text Classification", "abstract": "Word embeddings are effective intermediate representations for capturing semantic regularities between words, when learning the representations of text sequences. We propose to view text classification as a label-word joint embedding problem: each label is embedded in the same space with the word vectors. We introduce an attention framework that measures the compatibility of embeddings between text sequences and labels. The attention is learned on a training set of labeled samples to ensure that, given a text sequence, the relevant words are weighted higher than the irrelevant ones. Our method maintains the interpretability of word embeddings, and enjoys a built-in ability to leverage alternative sources of information, in addition to input text sequences. Extensive results on the several large text datasets show that the proposed framework outperforms the state-of-the-art methods by a large margin, in terms of both accuracy and speed.", "phrases": ["text classification", "same space", "state-of-the-art method", "previous label", "deep learning"], "overall_score": 3.601569255743801, "scores": [3.7509799161347823, 0.5625087357335068, 0.5518408847417091, 0.5277068079871825, 0.5217981317696884], "rank_score": 1.1829668952733736} -{"id": "toutanova-etal-2015-representing", "title": "Representing Text for Joint Embedding of Text and Knowledge Bases", "abstract": "Models that learn to represent textual and knowledge base relations in the same continuous latent space are able to perform joint inferences among the two kinds of relations and obtain high accuracy on knowledge base completion (Riedel et al., 2013). In this paper we propose a model that captures the compositional structure of textual relations, and jointly optimizes entity, knowledge base, and textual relation representations. The proposed model significantly improves performance over a model that does not share parameters among textual relations with common sub-structure.", "phrases": ["knowledge base completion", "textual relation", "schema"], "overall_score": 4.673649364722605, "scores": [1.4181347446682497, 1.2990902454829591, 0.8312648254052978], "rank_score": 1.1828299385188357} -{"id": "morey-etal-2018-dependency", "title": "A Dependency Perspective on RST Discourse Parsing and Evaluation", "abstract": "Computational text-level discourse analysis mostly happens within Rhetorical Structure Theory (RST), whose structures have classically been presented as constituency trees, and relies on data from the RST Discourse Treebank (RST-DT); as a result, the RST discourse parsing community has largely borrowed from the syntactic constituency parsing community. The standard evaluation procedure for RST discourse parsers is thus a simplified variant of PARSEVAL, and most RST discourse parsers use techniques that originated in syntactic constituency parsing. In this article, we isolate a number of conceptual and computational problems with the constituency hypothesis. We then examine the consequences, for the implementation and evaluation of RST discourse parsers, of adopting a dependency perspective on RST structures, a view advocated so far only by a few approaches to discourse parsing. While doing that, we show the importance of the notion of headedness of RST structures. We analyze RST discourse parsing as dependency parsing by adapting to RST a recent proposal in syntactic parsing that relies on head-ordered dependency trees, a representation isomorphic to headed constituency trees. We show how to convert the original trees from the RST corpus, RST-DT, and their binarized versions used by all existing RST parsers to head-ordered dependency trees. We also propose a way to convert existing simple dependency parser output to constituent trees. This allows us to evaluate and to compare approaches from both constituent-based and dependency-based perspectives in a unified framework, using constituency and dependency metrics. We thus propose an evaluation framework to compare extant approaches easily and uniformly, something the RST parsing community has lacked up to now. We can also compare parsers' predictions to each other across frameworks. This allows us to characterize families of parsing strategies across the different frameworks, in particular with respect to the notion of headedness. Our experiments provide evidence for the conceptual similarities between dependency parsers and shift-reduce constituency parsers, and confirm that dependency parsing constitutes a viable approach to RST discourse parsing.", "phrases": ["dependency perspective", "rst", "edu"], "overall_score": 2.835831155475747, "scores": [1.8609842647540378, 0.8629046091238037, 0.8240114714524615], "rank_score": 1.1826334484434344} -{"id": "mukherjee-liu-2012-aspect", "title": "Aspect Extraction through Semi-Supervised Modeling", "abstract": "Aspect extraction is a central problem in sentiment analysis. Current methods either extract aspects without categorizing them, or extract and categorize them using unsupervised topic modeling. By categorizing, we mean the synonymous aspects should be clustered into the same category. In this paper, we solve the problem in a different setting where the user provides some seed words for a few aspect categories and the model extracts and clusters aspect terms into categories simultaneously. This setting is important because categorizing aspects is a subjective task. For different application purposes, different categorizations may be needed. Some form of user guidance is desired. In this paper, we propose two statistical models to solve this seeded problem, which aim to discover exactly what the user wants. Our experimental results show that the two proposed models are indeed able to perform the task effectively.", "phrases": ["extraction", "sentiment analysis", "topic model", "aspect category"], "overall_score": 3.6004364318381326, "scores": [2.2638205320348526, 1.3056146349466502, 0.5978392862367452, 0.563104784230107], "rank_score": 1.1825948093620886} -{"id": "somasundaran-etal-2007-detecting", "title": "Detecting Arguing and Sentiment in Meetings", "abstract": "This paper analyzes opinion categories like Sentiment and Arguing in meetings. We first annotate the categories manually. We then develop genre-specific lexicons using interesting function word combinations for detecting the opinions. We analyze relations between dialog structure information and opinion expression in context of multiparty discourse. Finally we show that classifiers using lexical and discourse knowledge have significant improvement over baseline.", "phrases": ["arguing", "meeting", "dialog structure information"], "overall_score": 2.7230055553683994, "scores": [2.173766185280738, 0.8302725021105613, 0.5437201732738874], "rank_score": 1.1825862868883954} -{"id": "mullen-collier-2004-sentiment", "title": "Sentiment Analysis using Support Vector Machines with Diverse Information Sources", "abstract": "This paper introduces an approach to sentiment analysis which uses support vector machines (SVMs) to bring together diverse sources of potentially pertinent information, including several fa-vorability measures for phrases and adjectives and, where available, knowledge of the topic of the text. Models using the features introduced are further combined with unigram models which have been shown to be effective in the past (Pang et al., 2002) and lemmatized versions of the unigram models. Experiments on movie review data from Epinions.com demonstrate that hybrid SVMs which combine unigram-style feature-based SVMs with those based on real-valued favorability measures obtain superior performance, producing the best re-sults yet published using this data. Further experiments using a feature set enriched with topic information on a smaller dataset of music reviews hand-annotated for topic are also reported, the results of which suggest that incorporating topic information into such models may also yield improvement.", "phrases": ["support vector machines", "svm", "diverse source", "favorability measure", "sentiment analysis"], "overall_score": 3.8970894561495637, "scores": [1.9412775249567635, 0.9218692539747874, 1.3487799040538722, 0.8635488249278989, 0.8366639798553128], "rank_score": 1.182427897553727} -{"id": "shekhar-etal-2019-beyond", "title": "Beyond task success: A closer look at jointly learning to see, ask, and GuessWhat", "abstract": "We propose a grounded dialogue state encoder which addresses a foundational issue on how to integrate visual grounding with dialogue system components. As a test-bed, we focus on the GuessWhat?! game, a two-player game where the goal is to identify an object in a complex visual scene by asking a sequence of yes/no questions. Our visually-grounded encoder leverages synergies between guessing and asking questions, as it is trained jointly using multi-task learning. We further enrich our model via a cooperative learning regime. We show that the introduction of both the joint architecture and cooperative learning lead to accuracy improvements over the baseline system. We compare our approach to an alternative system which extends the baseline with reinforcement learning. Our in-depth analysis shows that the linguistic skills of the two models differ dramatically, despite approaching comparable performance levels. This points at the importance of analyzing the linguistic output of competing systems beyond numeric comparison solely based on task success.", "phrases": ["task success", "guesswhat", "reinforcement learning", "gdse"], "overall_score": 2.597175918290136, "scores": [1.8416541236170534, 1.8182818851884264, 0.5370384146783256, 0.5311283750727128], "rank_score": 1.1820256996391296} -{"id": "qiu-etal-2006-paraphrase", "title": "Paraphrase Recognition via Dissimilarity Significance Classification", "abstract": "We propose a supervised, two-phase framework to address the problem of paraphrase recognition (PR). Unlike most PR systems that focus on sentence similarity, our framework detects dissimilarities between sentences and makes its paraphrase judgment based on the significance of such dissimilarities. The ability to differentiate significant dissimilarities not only reveals what makes two sentences a non-paraphrase, but also helps to recall additional paraphrases that contain extra but insignificant information. Experimental results show that while being accurate at discerning non-paraphrasing dissimilarities, our implemented system is able to achieve higher paraphrase recall (93%), at an overall performance comparable to the alternatives.", "phrases": ["dissimilarity", "significance", "paraphrase recognition"], "overall_score": 2.596905350253516, "scores": [2.058845084337698, 0.9513529899844775, 0.5355096021346091], "rank_score": 1.1819025588189283} -{"id": "maegaard-etal-2006-kunsti", "title": "KUNSTI - Knowledge Generation for Norwegian Language Technology", "abstract": "KUNSTI is the Norwegian national language technology programme, running 2001-2006 inclusive. The goal of the programme is to boost Norwegian language technology research. In this paper we describe the background, the objectives, the methodology applied in the management of the programme, the projects selected, and our first conclusions. We also describe national programmes form Sweden, France and Germany and compare objectives and methods.", "phrases": ["knowledge generation", "norwegian language technology", "kunsti"], "overall_score": 1.6383596177957962, "scores": [1.8016259141071074, 0.8898799546024532, 0.8539740749705388], "rank_score": 1.1818266478933663} -{"id": "oostdijk-etal-2008-coi", "title": "From D-Coi to SoNaR: a reference corpus for Dutch", "abstract": "The computational linguistics community in The Netherlands and Belgium has long recognized the dire need for a major reference corpus of written Dutch. In part to answer this need, the STEVIN programme was established. To pave the way for the effective building of a 500-million-word reference corpus of written Dutch, a pilot project was established. The Dutch Corpus Initiative project or D-Coi was highly successful in that it not only realized about 10% of the projected large reference corpus, but also established the best practices and developed all the protocols and the necessary tools for building the larger corpus within the confines of a necessarily limited budget. We outline the steps involved in an endeavour of this kind, including the major highlights and possible pitfalls. Once converted to a suitable XML format, further linguistic annotation based on the state-of-the-art tools developed either before or during the pilot by the consortium partners proved easily and fruitfully applicable. Linguistic enrichment of the corpus includes PoS tagging, syntactic parsing and semantic annotation, involving both semantic role labeling and spatiotemporal annotation. D-Coi is expected to be followed by SoNaR, during which the 500-million-word reference corpus of Dutch should be built.", "phrases": ["sonar", "reference corpus", "dutch"], "overall_score": 1.2983234899347376, "scores": [1.7887742082580211, 0.9031959804407772, 0.8533847194701574], "rank_score": 1.1817849693896518} -{"id": "ravi-knight-2011-bayesian", "title": "Bayesian Inference for Zodiac and Other Homophonic Ciphers", "abstract": "We introduce a novel Bayesian approach for deciphering complex substitution ciphers. Our method uses a decipherment model which combines information from letter n-gram language models as well as word dictionaries. Bayesian inference is performed on our model using an efficient sampling technique. We evaluate the quality of the Bayesian decipherment output on simple and homophonic letter substitution ciphers and show that unlike a previous approach, our method consistently produces almost 100% accurate decipherments. The new method can be applied on more complex substitution ciphers and we demonstrate its utility by cracking the famous Zodiac-408 cipher in a fully automated fashion, which has never been done before.", "phrases": ["decipherment", "zodiac-408 cipher", "bayesian inference"], "overall_score": 2.7207828022684493, "scores": [0.8290750755092605, 1.5498845467914175, 1.165903250146683], "rank_score": 1.1816209574824537} -{"id": "fraser-etal-2013-automatic", "title": "Automatic speech recognition in the diagnosis of primary progressive aphasia", "abstract": "Narrative speech can provide a valuable source of information about an individual\u2019s linguistic abilities across lexical, syntactic, and pragmatic levels. However, analysis of narrative speech is typically done by hand, and is therefore extremely time-consuming. Use of automatic speech recognition (ASR) software could make this type of analysis more efficient and widely available. In this paper, we present the results of an initial attempt to use ASR technology to generate transcripts of spoken narratives from participants with semantic dementia (SD), progressive nonfluent aphasia (PNFA), and healthy controls. We extract text features from the transcripts and use these features, alone and in combination with acoustic features from the speech signals, to classify transcripts as patient versus control, and SD versus PNFA. Additionally, we generate artificially noisy transcripts by applying insertions, substitutions, and deletions to manually-transcribed data, allowing experiments to be conducted across a wider range of noise levels than are produced by a tuned ASR system. We find that reasonably good classification accuracies can be achieved by selecting appropriate features from the noisy transcripts. We also find that the choice of using ASR data or manually transcribed data as the training set can have a strong effect on the accuracy of the classifiers.", "phrases": ["diagnosis", "aphasia", "automatic speech recognition"], "overall_score": 1.63662532440343, "scores": [1.8670002633669491, 0.8852466659977719, 0.7894799296004579], "rank_score": 1.1805756196550596} -{"id": "vilar-etal-2007-translate", "title": "Can We Translate Letters?", "abstract": "Current statistical machine translation systems handle the translation process as the transformation of a string of symbols into another string of symbols. Normally the symbols dealt with are the words in different languages, sometimes with some additional information included, like morphological data. In this work we try to push the approach to the limit, working not on the level of words, but treating both the source and target sentences as a string of letters. We try to find out if a nearly unmodified state-of-the-art translation system is able to cope with the problem and whether it is capable to further generalize translation rules, for example at the level of word suffixes and translation of unseen words. Experiments are carried out for the translation of Catalan to Spanish.", "phrases": ["letter", "translation system", "csmt", "address", "spelling-variant oov"], "overall_score": 3.475724404557896, "scores": [3.378600266070109, 0.8248743329741692, 0.6177150474340117, 0.5506974978876568, 0.5302973280422065], "rank_score": 1.1804368944816308} -{"id": "hsieh-etal-2019-robustness", "title": "On the Robustness of Self-Attentive Models", "abstract": "This work examines the robustness of self-attentive neural networks against adversarial input perturbations. Specifically, we investigate the attention and feature extraction mechanisms of state-of-the-art recurrent neural networks and self-attentive architectures for sentiment analysis, entailment and machine translation under adversarial attacks. We also propose a novel attack algorithm for generating more natural adversarial examples that could mislead neural models but not humans. Experimental results show that, compared to recurrent neural models, self-attentive models are more robust against adversarial perturbation. In addition, we provide theoretical explanations for their superior robustness to support our claims.", "phrases": ["robustness", "self-attentive model", "machine translation", "attack"], "overall_score": 2.9332467455386437, "scores": [2.4659611656422893, 1.0739837884462509, 0.6508819009929475, 0.5308744543643116], "rank_score": 1.1804253273614498} -{"id": "jiao-etal-2006-semi", "title": "Semi-Supervised Conditional Random Fields for Improved Sequence Segmentation and Labeling", "abstract": "We present a new semi-supervised training procedure for conditional random fields (CRFs) that can be used to train sequence segmentors and labelers from a combination of labeled and unlabeled training data. Our approach is based on extending the minimum entropy regularization framework to the structured prediction case, yielding a training objective that combines unlabeled conditional entropy with labeled conditional likelihood. Although the training objective is no longer concave, it can still be used to improve an initial model (e.g. obtained from supervised training) by iterative ascent. We apply our new training algorithm to the problem of identifying gene and protein mentions in biological texts, and show that incorporating unlabeled data improves the performance of the supervised CRF in this case.", "phrases": ["crf", "conditional entropy", "unlabeled data"], "overall_score": 2.7178095581471875, "scores": [1.3981755252456374, 1.0784561972529312, 1.064357359403147], "rank_score": 1.1803296939672385} -{"id": "ostermann-etal-2018-mcscript", "title": "MCScript: A Novel Dataset for Assessing Machine Comprehension Using Script Knowledge", "abstract": "We introduce a large dataset of narrative texts and questions about these texts, intended to be used in a machine comprehension task that requires reasoning using commonsense knowledge. Our dataset complements similar datasets in that we focus on stories about everyday activities, such as going to the movies or working in the garden, and that the questions require commonsense knowledge, or more specifically, script knowledge, to be answered. We show that our mode of data collection via crowdsourcing results in a substantial amount of such inference questions. The dataset forms the basis of a shared task on commonsense and script knowledge organized at SemEval 2018 and provides challenging test cases for the broader natural language understanding community.", "phrases": ["machine comprehension", "script knowledge", "story", "mcscript"], "overall_score": 2.7174509055270994, "scores": [1.9249104053613721, 1.3295117268967933, 0.9030951516921841, 0.5631784485033058], "rank_score": 1.180173933113414} -{"id": "li-etal-2014-text", "title": "Text-level Discourse Dependency Parsing", "abstract": "Previous researches on Text-level discourse parsing mainly made use of constituency structure to parse the whole document into one discourse tree. In this paper, we present the limitations of constituency based discourse parsing and first propose to use dependency structure to directly represent the relations between elementary discourse units (EDUs). The state-of-the-art dependency parsing techniques, the Eisner algorithm and maximum spanning tree (MST) algorithm, are adopted to parse an optimal discourse dependency tree based on the arcfactored model and the large-margin learning techniques. Experiments show that our discourse dependency parsers achieve a competitive performance on text-level discourse parsing.", "phrases": ["dependency structure", "discourse unit", "edu", "rst tree"], "overall_score": 3.4746911563059912, "scores": [2.0689667769120064, 1.2043387311735925, 0.8866796952477747, 0.5603587139851821], "rank_score": 1.180085979329639} -{"id": "mi-etal-2008-forest", "title": "Forest-Based Translation", "abstract": "Among syntax-based translation models, the tree-based approach, which takes as input a parse tree of the source sentence, is a promising direction being faster and simpler than its string-based counterpart. However, current tree-based systems suffer from a major drawback: they only use the 1-best parse to direct the translation, which potentially introduces translation mistakes due to parsing errors. We propose a forest-based approach that translates a packed forest of exponentially many parses, which encodes many more alternatives than standard n-best lists. Large-scale experiments show an absolute improvement of 1.7 BLEU points over the 1-best baseline. This result is also 0.8 points higher than decoding with 30-best parses, and takes even less time.", "phrases": ["parse tree", "forest", "alternative"], "overall_score": 3.410128491215694, "scores": [1.6756911158161514, 1.3357101704419598, 0.5280691800232039], "rank_score": 1.1798234887604384} -{"id": "neelakantan-etal-2015-compositional", "title": "Compositional Vector Space Models for Knowledge Base Completion", "abstract": "Knowledge base (KB) completion adds new facts to a KB by making inferences from existing facts, for example by inferring with high likelihood nationality(X,Y) from bornIn(X,Y). Most previous methods infer simple one-hop relational synonyms like this, or use as evidence a multi-hop relational path treated as an atomic feature, like bornIn(X,Z)\u2192 containedIn(Z,Y). This paper presents an approach that reasons about conjunctions of multi-hop relations non-atomically, composing the implications of a path using a recurrent neural network (RNN) that takes as inputs vector embeddings of the binary relation in the path. Not only does this allow us to generalize to paths unseen at training time, but also, with a single high-capacity RNN, to predict new relation types not seen when the compositional model was trained (zero-shot learning). We assemble a new dataset of over 52M relational triples, and show that our method improves over a traditional classifier by 11%, and a method leveraging pre-trained embeddings by 7%.", "phrases": ["knowledge base completion", "path", "reasoning", "recurrent neural network", "compositional model"], "overall_score": 3.3423255199635684, "scores": [1.5267603188264194, 1.692902481146541, 0.9281235031774124, 0.8875093571951099, 0.863175640757586], "rank_score": 1.1796942602206135} -{"id": "garcia-etal-2021-probing", "title": "Probing for idiomaticity in vector space models", "abstract": "Contextualised word representation models have been successfully used for capturing different word usages and they may be an attractive alternative for representing idiomaticity in language. In this paper, we propose probing measures to assess if some of the expected linguistic properties of noun compounds, especially those related to idiomatic meanings, and their dependence on context and sensitivity to lexical choice, are readily available in some standard and widely used representations. For that, we constructed the Noun Compound Senses Dataset, which contains noun compounds and their paraphrases, in context neutral and context informative naturalistic sentences, in two languages: English and Portuguese. Results obtained using four types of probing measures with models like ELMo, BERT and some of its variants, indicate that idiomaticity is not yet accurately represented by contextualised models", "phrases": ["idiomaticity", "vector space model", "noun compound", "paraphrase", "bert"], "overall_score": 2.716051516730694, "scores": [3.020075350615403, 0.9611704038274587, 0.8651594299205317, 0.5292273988873956, 0.5221983481547003], "rank_score": 1.179566186281098} -{"id": "bansal-etal-2020-self", "title": "Self-Supervised Meta-Learning for Few-Shot Natural Language Classification Tasks", "abstract": "Self-supervised pre-training of transformer models has revolutionized NLP applications. Such pre-training with language modeling objectives provides a useful initial point for parameters that generalize well to new tasks with fine-tuning. However, fine-tuning is still data inefficient \u2014 when there are few labeled examples, accuracy can be low. Data efficiency can be improved by optimizing pre-training directly for future fine-tuning with few examples; this can be treated as a meta-learning problem. However, standard meta-learning techniques require many training tasks in order to generalize; unfortunately, finding a diverse set of such supervised tasks is usually difficult. This paper proposes a self-supervised approach to generate a large, rich, meta-learning task distribution from unlabeled text. This is achieved using a cloze-style objective, but creating separate multi-class classification tasks by gathering tokens-to-be blanked from among only a handful of vocabulary terms. This yields as many unique meta-training tasks as the number of subsets of vocabulary terms. We meta-train a transformer model on this distribution of tasks using a recent meta-learning framework. On 17 NLP tasks, we show that this meta-training leads to better few-shot generalization than language-model pre-training followed by finetuning. Furthermore, we show how the self-supervised tasks can be combined with supervised tasks for meta-learning, providing substantial accuracy gains over previous supervised meta-learning.", "phrases": ["meta-learning", "generalization", "unlabeled text"], "overall_score": 3.02486981696751, "scores": [2.401880187594661, 0.5808201123515975, 0.5552289868461453], "rank_score": 1.1793097622641346} -{"id": "bel-etal-2012-automatic", "title": "Automatic lexical semantic classification of nouns", "abstract": "The work we present here addresses cue-based noun classification in English and Spanish. Its main objective is to automatically acquire lexical semantic information by classifying nouns into previously known noun lexical classes. This is achieved by using particular aspects of linguistic contexts as cues that identify a specific lexical class. Here we concentrate on the task of identifying such cues and the theoretical background that allows for an assessment of the complexity of the task. The results show that, despite of the a-priori complexity of the task, cue-based classification is a useful tool in the automatic acquisition of lexical semantic classes.", "phrases": ["noun", "semantic class", "most approach"], "overall_score": 2.1129327866559358, "scores": [2.388622601528877, 0.6147757892241528, 0.5343523335106214], "rank_score": 1.1792502414212171} -{"id": "pool-nissim-2016-distant", "title": "Distant supervision for emotion detection using Facebook reactions", "abstract": "We exploit the Facebook reaction feature in a distant supervised fashion to train a support vector machine classifier for emotion detection, using several feature combinations and combining different Facebook pages. We test our models on existing benchmarks for emotion detection and show that employing only information that is derived completely automatically, thus without relying on any handcrafted lexicon as it's usually done, we can achieve competitive results. The results also show that there is large room for improvement, especially by gearing the collection of Facebook pages, with a view to the target domain.", "phrases": ["emotion detection", "facebook reaction", "distant supervision"], "overall_score": 2.294668384031694, "scores": [1.74971728017726, 0.9324159286831908, 0.8555456276660355], "rank_score": 1.1792262788421621} -{"id": "sirts-etal-2017-idea", "title": "Idea density for predicting Alzheimer's disease from transcribed speech", "abstract": "Idea Density (ID) measures the rate at which ideas or elementary predications are expressed in an utterance or in a text. Lower ID is found to be associated with an increased risk of developing Alzheimer's disease (AD) (Snowdon et al., 1996; Engelman et al., 2010). ID has been used in two different versions: propositional idea density (PID) counts the expressed ideas and can be applied to any text while semantic idea density (SID) counts pre-defined information content units and is naturally more applicable to normative domains, such as picture description tasks. In this paper, we develop DEPID, a novel dependency-based method for computing PID, and its version DEPID-R that enables to exclude repeating ideas\u2014a feature characteristic to AD speech. We conduct the first comparison of automatically extracted PID and SID in the diagnostic classification task on two different AD datasets covering both closed-topic and free-recall domains. While SID performs better on the normative dataset, adding PID leads to a small but significant improvement (+1.7 F-score). On the free-topic dataset, PID performs better than SID as expected (77.6 vs 72.3 in F-score) but adding the features derived from the word embedding clustering underlying the automatic SID increases the results considerably, leading to an F-score of 84.8.", "phrases": ["alzheimer", "disease", "idea density"], "overall_score": 1.294982143609784, "scores": [1.8374640361122316, 0.8525978586091328, 0.8461687399634364], "rank_score": 1.1787435448949337} -{"id": "barrett-etal-2018-sequence", "title": "Sequence Classification with Human Attention", "abstract": "Learning attention functions requires large volumes of data, but many NLP tasks simulate human behavior, and in this paper, we show that human attention really does provide a good inductive bias on many attention functions in NLP. Specifically, we use estimated human attention derived from eye-tracking corpora to regularize attention functions in recurrent neural networks. We show substantial improvements across a range of tasks, including sentiment analysis, grammatical error detection, and detection of abusive language.", "phrases": ["human attention", "sentiment analysis", "rationale"], "overall_score": 3.1092660173247073, "scores": [2.4771397075218147, 0.5296405054873474, 0.5277387030062007], "rank_score": 1.178172972005121} -{"id": "tamburini-melandri-2012-anita", "title": "AnIta: a powerful morphological analyser for Italian", "abstract": "In this paper we present AnIta, a powerful morphological analyser for Italian implemented within the framework of finite-state-automata models. It is provided by a large lexicon containing more than 110,000 lemmas that enable it to cover relevant portions of Italian texts. We describe our design choices for the management of inflectional phenomena as well as some interesting new features to explicitly handle derivational and compositional processes in Italian, namely the wordform segmentation structure and Derivation Graph. Two different evaluation experiments, for testing coverage (Recall) and Precision, are described in detail, comparing the AnIta performances with some other freely available tools to handle Italian morphology. The experiments results show that the AnIta Morphological Analyser obtains the best performances among the tested systems, with Recall = 97.21% and Precision = 98.71%. This tool was a fundamental building block for designing a performant PoS-tagger and Lemmatiser for the Italian language that participated to two EVALITA evaluation campaigns ranking, in both cases, together with the best performing systems.", "phrases": ["powerful morphological analyser", "italian", "anita"], "overall_score": 1.294055277582565, "scores": [1.7309807558918657, 0.9466572612487698, 0.8560616080967585], "rank_score": 1.1778998750791312} -{"id": "daume-iii-jagarlamudi-2011-domain", "title": "Domain Adaptation for Machine Translation by Mining Unseen Words", "abstract": "We show that unseen words account for a large part of the translation error when moving to new domains. Using an extension of a recent approach to mining translations from comparable corpora (Haghighi et al., 2008), we are able to find translations for otherwise OOV terms. We show several approaches to integrating such translations into a phrase-based translation system, yielding consistent improvements in translations quality (between 0.5 and 1.5 Bleu points) on four domains and two language pairs.", "phrases": ["machine translation", "new domain", "induction", "dictionary mining technique"], "overall_score": 3.586074953359951, "scores": [2.1417714748000063, 1.120114232014839, 0.8605402362798862, 0.5890846819233327], "rank_score": 1.177877656254516} -{"id": "wang-lu-2018-neural", "title": "Neural Segmental Hypergraphs for Overlapping Mention Recognition", "abstract": "In this work, we propose a novel segmental hypergraph representation to model overlapping entity mentions that are prevalent in many practical datasets. We show that our model built on top of such a new representation is able to capture features and interactions that cannot be captured by previous models while maintaining a low time complexity for inference. We also present a theoretical analysis to formally assess how our representation is better than alternative representations reported in the literature in terms of representational power. Coupled with neural networks for feature learning, our model achieves the state-of-the-art performance in three benchmark datasets annotated with overlapping mentions.", "phrases": ["hypergraph", "ambiguity", "neural segmental hypergraph"], "overall_score": 3.5857506494061013, "scores": [1.6408750014612443, 1.278267589166747, 0.6141708167325941], "rank_score": 1.1777711357868619} -{"id": "liu-etal-2020-norm", "title": "Norm-Based Curriculum Learning for Neural Machine Translation", "abstract": "A neural machine translation (NMT) system is expensive to train, especially with high-resource settings. As the NMT architectures become deeper and wider, this issue gets worse and worse. In this paper, we aim to improve the efficiency of training an NMT by introducing a novel norm-based curriculum learning method. We use the norm (aka length or module) of a word embedding as a measure of 1) the difficulty of the sentence, 2) the competence of the model, and 3) the weight of the sentence. The norm-based sentence difficulty takes the advantages of both linguistically motivated and model-based sentence difficulties. It is easy to determine and contains learning-dependent features. The norm-based model competence makes NMT learn the curriculum in a fully automated way, while the norm-based sentence weight further enhances the learning of the vector representation of the NMT. Experimental results for the WMT'14 English-German and WMT'17 Chinese-English translation tasks demonstrate that the proposed method outperforms strong baselines in terms of BLEU score (+1.17/+1.56) and training speedup (2.22x/3.33x).", "phrases": ["curriculum", "neural machine translation", "norm", "training example"], "overall_score": 2.7117430485591365, "scores": [1.953647747394504, 1.3037484132118586, 0.9056733223557648, 0.547710686352811], "rank_score": 1.1776950423287347} -{"id": "chen-etal-2017-neural", "title": "Neural Machine Translation with Source Dependency Representation", "abstract": "Source dependency information has been successfully introduced into statistical machine translation. However, there are only a few preliminary attempts for Neural Machine Translation (NMT), such as concatenating representations of source word and its dependency label together. In this paper, we propose a novel NMT with source dependency representation to improve translation performance of NMT, especially long sentences. Empirical results on NIST Chinese-to-English translation task show that our method achieves 1.6 BLEU improvements on average over a strong NMT system.", "phrases": ["source dependency representation", "neural machine translation", "cnn"], "overall_score": 1.8953428912141017, "scores": [2.108772005602252, 0.8772393016603601, 0.5469169465790059], "rank_score": 1.1776427512805394} -{"id": "gardent-etal-2017-webnlg", "title": "The WebNLG Challenge: Generating Text from RDF Data", "abstract": "The WebNLG challenge consists in mapping sets of RDF triples to text. It provides a common benchmark on which to train, evaluate and compare \u201cmicroplanners\u201d, i.e. generation systems that verbalise a given content by making a range of complex interacting choices including referring expression generation, aggregation, lexicalisation, surface realisation and sentence segmentation. In this paper, we introduce the microplanning task, describe data preparation, introduce our evaluation methodology, analyse participant results and provide a brief description of the participating systems.", "phrases": ["webnlg challenge", "rdf data", "generation task", "table"], "overall_score": 3.9229040834542332, "scores": [2.410610994588184, 0.8743234482798714, 0.8885809111103582, 0.5355642616037297], "rank_score": 1.1772699038955359} -{"id": "cherry-bergsma-2005-expectation", "title": "An Expectation Maximization Approach to Pronoun Resolution", "abstract": "We propose an unsupervised Expectation Maximization approach to pronoun resolution. The system learns from a fixed list of potential antecedents for each pronoun. We show that unsupervised learning is possible in this context, as the performance of our system is comparable to supervised methods. Our results indicate that a probabilistic gender/number model, determined automatically from unlabeled text, is a powerful feature for this task.", "phrases": ["expectation maximization approach", "pronoun resolution", "coreference resolution"], "overall_score": 2.586318968749693, "scores": [2.1005366776508176, 0.8431471620315649, 0.5875696272051772], "rank_score": 1.1770844889625198} -{"id": "li-etal-2018-one", "title": "One Sentence One Model for Neural Machine Translation", "abstract": "Neural machine translation (NMT) becomes a new state-of-the-art and achieves promising translation results using a simple encoder-decoder neural network. This neural network is trained once on the parallel corpus and the fixed network is used to translate all the test sentences. We argue that the general fixed network cannot best fit the specific test sentences. In this paper, we propose the dynamic NMT which learns a general network as usual, and then fine-tunes the network for each test sentence. The fine-tune work is done on a small set of the bilingual training data that is obtained through similarity search according to the test sentence. Extensive experiments demonstrate that this method can significantly improve the translation performance, especially when highly similar sentences are available.", "phrases": ["neural machine translation", "similar sentence", "nmt model", "training corpus"], "overall_score": 3.0189144949373077, "scores": [2.0806939403241143, 1.2174555164300316, 0.8375881983813341, 0.5722141586579097], "rank_score": 1.1769879534483474} -{"id": "janssen-2012-neotag", "title": "NeoTag: a POS Tagger for Grammatical Neologism Detection", "abstract": "POS Taggers typically fail to correctly tag grammatical neologisms: for known words, a tagger will only take known tags into account, and hence discard any possibility that the word is used in a novel or deviant grammatical category in the text at hand. Grammatical neologisms are relatively rare, and therefore do not pose a significant problem for the overall performance of a tagger. But for studies on neologisms and grammaticalization processes, this makes traditional taggers rather unfit. This article describes a modified POS tagger that explicitly considers new tags for known words, hence making it better fit for neologism research. This tagger, called NeoTag, has an overall accuracy that is comparable to other taggers, but scores much better for grammatical neologisms. To achieve this, the tagger applies a system of \\em lexical smoothing, which adds new categories to known words based on known homographs. NeoTag also lemmatizes words as part of the tagging system, achieving a high accuracy on lemmatization for both known and unknown words, without the need for an external lexicon. The use of NeoTag is not restricted to grammatical neologism detection, and it can be used for other purposes as well.", "phrases": ["pos tagger", "grammatical neologism detection", "neotag"], "overall_score": 1.2925945977524052, "scores": [1.6831480925909796, 0.9560353264229258, 0.8905275019866262], "rank_score": 1.1765703070001772} -{"id": "yin-etal-2018-structvae", "title": "StructVAE: Tree-structured Latent Variable Models for Semi-supervised Semantic Parsing", "abstract": "Semantic parsing is the task of transducing natural language (NL) utterances into formal meaning representations (MRs), commonly represented as tree structures. Annotating NL utterances with their corresponding MRs is expensive and time-consuming, and thus the limited availability of labeled data often becomes the bottleneck of data-driven, supervised models. We introduce StructVAE, a variational auto-encoding model for semi-supervised semantic parsing, which learns both from limited amounts of parallel data, and readily-available unlabeled NL utterances. StructVAE models latent MRs not observed in the unlabeled data as tree-structured latent variables. Experiments on semantic parsing on the ATIS domain and Python code generation show that with extra unlabeled data, StructVAE outperforms strong supervised models.", "phrases": ["latent variable", "semi-supervised semantic parsing", "limited amount", "unlabeled data", "logical form"], "overall_score": 2.820461831320957, "scores": [3.1003155849732766, 1.1437968620429255, 0.5724323007568942, 0.5364946293929403, 0.528080335016902], "rank_score": 1.176223942436588} -{"id": "miculicich-werlen-popescu-belis-2017-validation", "title": "Validation of an Automatic Metric for the Accuracy of Pronoun Translation (APT)", "abstract": "In this paper, we define and assess a reference-based metric to evaluate the accuracy of pronoun translation (APT). The metric automatically aligns a candidate and a reference translation using GIZA++ augmented with specific heuristics, and then counts the number of identical or different pronouns, with provision for legitimate variations and omitted pronouns. All counts are then combined into one score. The metric is applied to the results of seven systems (including the baseline) that participated in the DiscoMT 2015 shared task on pronoun translation from English to French. The APT metric reaches around 0.993-0.999 Pearson correlation with human judges (depending on the parameters of APT), while other automatic metrics such as BLEU, METEOR, or those specific to pronouns used at DiscoMT 2015 reach only 0.972-0.986 Pearson correlation.", "phrases": ["automatic metric", "pronoun translation", "apt"], "overall_score": 1.6302977999086117, "scores": [1.9382952688722739, 0.8002409041322386, 0.7894976536460003], "rank_score": 1.1760112755501708} -{"id": "mcclosky-etal-2008-self", "title": "When is Self-Training Effective for Parsing?", "abstract": "Self-training has been shown capable of improving on state-of-the-art parser performance (McClosky et al., 2006) despite the conventional wisdom on the matter and several studies to the contrary (Charniak, 1997; Steedman et al., 2003). However, it has remained unclear when and why self-training is helpful. In this paper, we test four hypotheses (namely, presence of a phase transition, impact of search errors, value of non-generative reranker features, and effects of unknown words). From these experiments, we gain a better understanding of why self-training works for parsing. Since improvements from self-training are correlated with unknown bigrams and biheads but not unknown words, the benefit of self-training appears most influenced by seeing known words in new combinations.", "phrases": ["self-training", "known word", "factor", "new context"], "overall_score": 2.286998500360617, "scores": [2.7988283942384555, 0.8270664216362842, 0.5479515929018978, 0.5272925445730693], "rank_score": 1.1752847383374267} -{"id": "pantel-etal-2007-isp", "title": "ISP: Learning Inferential Selectional Preferences", "abstract": "Semantic inference is a key component for advanced natural language understanding. However, existing collections of automatically acquired inference rules have shown disappointing results when used in applications such as textual entailment and question answering. This paper presents ISP, a collection of methods for automatically learning admissible argument values to which an inference rule can be applied, which we call inferential selectional preferences, and methods for filtering out incorrect inferences. We evaluate ISP and present empirical evidence of its effectiveness.", "phrases": ["inferential selectional preferences", "inference rule", "isp", "semantic class", "argument type"], "overall_score": 3.396202545586466, "scores": [2.601529598255167, 0.9752414987947677, 0.9402090650025189, 0.8358999312679799, 0.5221471178132722], "rank_score": 1.175005442226741} -{"id": "ljubesic-etal-2018-predicting", "title": "Predicting Concreteness and Imageability of Words Within and Across Languages via Word Embeddings", "abstract": "The notions of concreteness and imageability, traditionally important in psycholinguistics, are gaining significance in semantic-oriented natural language processing tasks. In this paper we investigate the predictability of these two concepts via supervised learning, using word embeddings as explanatory variables. We perform predictions both within and across languages by exploiting collections of cross-lingual embeddings aligned to a single vector space. We show that the notions of concreteness and imageability are highly predictable both within and across languages, with a moderate loss of up to 20% in correlation when predicting across languages. We further show that the cross-lingual transfer via word embeddings is more efficient than the simple transfer via bilingual dictionaries.", "phrases": ["concreteness", "imageability", "word embedding"], "overall_score": 1.2908667647760483, "scores": [1.9024261082118192, 0.8168014838320212, 0.8057651049002725], "rank_score": 1.1749975656480378} -{"id": "boudin-2016-pke", "title": "pke: an open source python-based keyphrase extraction toolkit", "abstract": "We describe pke, an open source python-based keyphrase extraction toolkit. It provides an end-to-end keyphrase extraction pipeline in which each component can be easily modified or extented to develop new approaches. pke also allows for easy benchmarking of state-of-the-art keyphrase extraction approaches, and ships with supervised models trained on the SemEval-2010 dataset.", "phrases": ["open source", "keyphrase extraction toolkit", "pke"], "overall_score": 1.290807173145684, "scores": [1.7197226296126575, 0.9993242006060961, 0.805783138806751], "rank_score": 1.1749433230085016} -{"id": "shain-etal-2016-memory-access", "title": "Memory access during incremental sentence processing causes reading time latency", "abstract": "Studies on the role of memory as a predictor of reading time latencies (1) differ in their predictions about when memory effects should occur in processing and (2) have had mixed results, with strong positive effects emerging from isolated constructed stimuli and weak or even negative effects emerging from naturally-occurring stimuli. Our study addresses these concerns by comparing several implementations of prominent sentence processing theories on an exploratory corpus and evaluating the most successful of these on a confirmatory corpus, using a new self-paced reading corpus of seemingly natural narratives constructed to contain an unusually high proportion of memory-intensive constructions. We show highly significant and complementary broad-coverage latency effects both for predictors based on the Dependency Locality Theory and for predictors based on a left-corner parsing model of sentence processing. Our results indicate that memory access during sentence processing does take time, but suggest that stimuli requiring many memory access events may be necessary in order to observe the effect.", "phrases": ["sentence processing", "time latency", "memory access"], "overall_score": 1.2908029478206606, "scores": [1.853771247956652, 0.8640411469221123, 0.8070060359769957], "rank_score": 1.17493947695192} -{"id": "wadden-etal-2020-fact", "title": "Fact or Fiction: Verifying Scientific Claims", "abstract": "We introduce scientific claim verification, a new task to select abstracts from the research literature containing evidence that SUPPORTS or REFUTES a given scientific claim, and to identify rationales justifying each decision. To study this task, we construct SciFact, a dataset of 1.4K expert-written scientific claims paired with evidence-containing abstracts annotated with labels and rationales. We develop baseline models for SciFact, and demonstrate that simple domain adaptation techniques substantially improve performance compared to models trained on Wikipedia or political news. We show that our system is able to verify claims related to COVID-19 by identifying evidence from the CORD-19 corpus. Our experiments indicate that SciFact will provide a challenging testbed for the development of new systems designed to retrieve and reason over corpora containing specialized domain knowledge. Data and code for this new task are publicly available at . A leaderboard and COVID-19 fact-checking demo are available at .", "phrases": ["claim", "scifact", "wikipedia"], "overall_score": 3.1815247298404183, "scores": [1.6500862046970808, 1.3437297244969153, 0.530702997979693], "rank_score": 1.1748396423912297} -{"id": "guo-etal-2013-improved", "title": "Improved Information Structure Analysis of Scientific Documents Through Discourse and Lexical Constraints", "abstract": "Inferring the information structure of scientific documents is useful for many downstream applications. Existing feature-based machine learning approaches to this task require substantial training data and suffer from limited performance. Our idea is to guide feature-based models with declarative domain knowledge encoded as posterior distribution constraints. We explore a rich set of discourse and lexical constraints which we incorporate through the Generalized Expectation (GE) criterion. Our constrained model improves the performance of existing fully and lightly supervised models. Even a fully unsupervised version of this model outperforms lightly supervised feature-based models, showing that our approach can be useful even when no labeled data is available.", "phrases": ["information structure analysis", "scientific document", "discourse"], "overall_score": 1.890007049815217, "scores": [1.8207943137279181, 0.8564584779836609, 0.8457294281308492], "rank_score": 1.174327406614143} -{"id": "deng-byrne-2005-hmm", "title": "HMM Word and Phrase Alignment for Statistical Machine Translation", "abstract": "Estimation and alignment procedures for word and phrase alignment hidden Markov models (HMMs) are developed for the alignment of parallel text. The development of these models is motivated by an analysis of the desirable features of IBM Model 4, one of the original and most effective models for word alignment. These models are formulated to capture the desirable aspects of Model 4 in an HMM alignment formalism. Alignment behavior is analyzed and compared to human-generated reference alignments, and the ability of these models to capture different types of alignment phenomena is evaluated. In analyzing alignment performance, Chinese-English word alignments are shown to be comparable to those of IBM Model 4 even when models are trained over large parallel texts. In translation performance, phrase-based statistical machine translation systems based on these HMM alignments can equal and exceed systems based on Model 4 alignments, and this is shown in Arabic-English and Chinese-English translation. These alignment models can also be used to generate posterior statistics over collections of parallel text, and this is used to refine and extend phrase translation tables with a resulting improvement in translation quality.", "phrases": ["phrase alignment", "markov model", "hmm model"], "overall_score": 3.517273236097383, "scores": [2.0469911992268512, 0.9175452151252946, 0.557747536534421], "rank_score": 1.1740946502955223} -{"id": "elfardy-diab-2013-sentence", "title": "Sentence Level Dialect Identification in Arabic", "abstract": "This paper introduces a supervised approach for performing sentence level dialect identification between Modern Standard Arabic and Egyptian Dialectal Arabic. We use token level labels to derive sentence-level features. These features are then used with other core and meta features to train a generative classifier that predicts the correct label for each sentence in the given input text. The system achieves an accuracy of 85.5% on an Arabic online-commentary dataset outperforming a previously proposed approach achieving 80.9% and reflecting a significant gain over a majority baseline of 51.9% and two strong baseline systems of 78.5% and 80.4%, respectively.", "phrases": ["dialect", "arabic", "egyptian dialectal arabic", "sentence level"], "overall_score": 3.731118896903612, "scores": [1.305366992651851, 1.8679115163787186, 0.9602083707361739, 0.5626184678196626], "rank_score": 1.1740263368966015} -{"id": "schuster-manning-2016-enhanced", "title": "Enhanced English Universal Dependencies: An Improved Representation for Natural Language Understanding Tasks", "abstract": "Many shallow natural language understanding tasks use dependency trees to extract relations between content words. However, strict surface-structure dependency trees tend to follow the linguistic structure of sentences too closely and frequently fail to provide direct relations between content words. To mitigate this problem, the original Stanford Dependencies representation also defines two dependency graph representations which contain additional and augmented relations that explicitly capture otherwise implicit relations between content words. In this paper, we revisit and extend these dependency graph representations in light of the recent Universal Dependencies (UD) initiative and provide a detailed account of an enhanced and an enhanced++ English UD representation. We further present a converter from constituency to basic, i.e., strict surface structure, UD trees, and a converter from basic UD trees to enhanced and enhanced++ English UD graphs. We release both converters as part of Stanford CoreNLP and the Stanford Parser.", "phrases": ["content word", "implicit relation", "enhanced universal dependencies"], "overall_score": 3.9119176384127687, "scores": [1.480098365586443, 1.0925371044832153, 0.949283091471828], "rank_score": 1.173972853847162} -{"id": "das-etal-2010-probabilistic", "title": "Probabilistic Frame-Semantic Parsing", "abstract": "This paper contributes a formalization of frame-semantic parsing as a structure prediction problem and describes an implemented parser that transforms an English sentence into a frame-semantic representation. It finds words that evoke FrameNet frames, selects frames for them, and locates the arguments for each frame. The system uses two feature-based, discriminative probabilistic (log-linear) models, one with latent variables to permit disambiguation of new predicate words. The parser is demonstrated to significantly outperform previously published results.", "phrases": ["frame-semantic parsing", "frame", "disambiguation", "predicate", "srl"], "overall_score": 3.1789890958065468, "scores": [2.1533558253470932, 1.3456888064475638, 0.9257669882209203, 0.8619029904066419, 0.58280194158383], "rank_score": 1.17390331040121} -{"id": "wang-etal-2018-joint", "title": "Joint Training of Candidate Extraction and Answer Selection for Reading Comprehension", "abstract": "While sophisticated neural-based techniques have been developed in reading comprehension, most approaches model the answer in an independent manner, ignoring its relations with other answer candidates. This problem can be even worse in open-domain scenarios, where candidates from multiple passages should be combined to answer a single question. In this paper, we formulate reading comprehension as an extract-then-select two-stage procedure. We first extract answer candidates from passages, then select the final answer by combining information from all the candidates. Furthermore, we regard candidate extraction as a latent variable and train the two-stage process jointly with reinforcement learning. As a result, our approach has improved the state-of-the-art performance significantly on two challenging open-domain reading comprehension datasets. Further analysis demonstrates the effectiveness of our model components, especially the information fusion of all the candidates and the joint training of the extract-then-select procedure.", "phrases": ["candidate extraction", "answer selection", "reading comprehension"], "overall_score": 1.6268192012504818, "scores": [1.7307564132058697, 0.8961985148661641, 0.8935510630284885], "rank_score": 1.1735019970335074} -{"id": "gupta-etal-2010-capturing", "title": "Capturing the Stars: Predicting Ratings for Service and Product Reviews", "abstract": "Bloggers, professional reviewers, and consumers continuously create opinion--rich web reviews about products and services, with the result that textual reviews are now abundant on the web and often convey a useful overall rating (number of stars). However, an overall rating cannot express the multiple or conflicting opinions that might be contained in the text, or explicitly rate the different aspects of the evaluated entity. This work addresses the task of automatically predicting ratings, for given aspects of a textual review, by assigning a numerical score to each evaluated aspect in the reviews. We handle this task as both a regression and a classification modeling problem and explore several combinations of syntactic and semantic features. Our results suggest that classification techniques perform better than ranking modeling when handling evaluative text.", "phrases": ["star", "rating", "service"], "overall_score": 1.289063715178854, "scores": [1.8073697678855178, 0.8895254736916033, 0.8231738359542318], "rank_score": 1.1733563591771177} -{"id": "li-2010-understanding", "title": "Understanding the Semantic Structure of Noun Phrase Queries", "abstract": "Determining the semantic intent of web queries not only involves identifying their semantic class, which is a primary focus of previous works, but also understanding their semantic structure. In this work, we formally define the semantic structure of noun phrase queries as comprised of intent heads and intent modifiers. We present methods that automatically identify these constituents as well as their semantic roles based on Markov and semi-Markov conditional random fields. We show that the use of semantic features and syntactic features significantly contribute to improving the understanding performance.", "phrases": ["semantic structure", "query", "intent modifier", "conditional random field", "wonderland"], "overall_score": 3.09647077035682, "scores": [2.43540581880276, 1.5194933914197182, 0.8405161782008409, 0.5474002883244642, 0.5238071048199197], "rank_score": 1.1733245563135406} -{"id": "abdul-mageed-etal-2018-tweet", "title": "You Tweet What You Speak: A City-Level Dataset of Arabic Dialects", "abstract": "Arabic has a wide range of varieties or dialects. Although a number of pioneering works have targeted some Arabic dialects, other dialects remain largely without investigation. A serious bottleneck for studying these dialects is lack of any data that can be exploited in computational models. In this work, we aim to bridge this gap: We present a considerably large dataset of > 1/4 billion tweets representing a wide range of dialects. Our dataset is more nuanced than previously reported work in that it is labeled at the fine-grained level of city. More specifically, the data represent 29 major Arab cities from 10 Arab countries with varying dialects (e.g., Egyptian, Gulf, KSA, Levantine, Yemeni).", "phrases": ["arabic dialect", "large dataset", "country"], "overall_score": 2.813397418870832, "scores": [1.8625005750975654, 1.1185850889527906, 0.5387478968051593], "rank_score": 1.1732778536185051} -{"id": "stanojevic-simaan-2015-beer", "title": "BEER 1.1: ILLC UvA submission to metrics and tuning task", "abstract": "We describe the submissions of ILLC UvA to the metrics and tuning tasks on WMT15. Both submissions are based on the BEER evaluation metric originally presented on WMT14 (Stanojevic and Sima\u2019an, 2014a). The main changes introduced this year are: (i) extending the learning-to-rank trained sentence level metric to the corpus level (but still decomposable to sentence level), (ii) incorporating syntactic ingredients based on dependency trees, and (iii) a technique for finding parameters of BEER that avoid \u201cgaming of the metric\u201d during tuning.", "phrases": ["illc", "submission", "beer"], "overall_score": 1.2888352380876844, "scores": [1.8100847989557955, 0.8944197891488194, 0.8149405829944341], "rank_score": 1.1731483903663495} -{"id": "koponen-2012-comparing", "title": "Comparing human perceptions of post-editing effort with post-editing operations", "abstract": "Post-editing performed by translators is an increasingly common use of machine translated texts. While high quality MT may increase productivity, post-editing poor translations can be a frustrating task which requires more effort than translating from scratch. For this reason, estimating whether machine translations are of sufficient quality to be used for post-editing and finding means to reduce post-editing effort are an important field of study. Post-editing effort consists of different aspects, of which temporal effort, or the time spent on post-editing, is the most visible and involves not only the technical effort needed to perform the editing, but also the cognitive effort required to detect and plan necessary corrections. Cognitive effort is difficult to examine directly, but ways to reduce the cognitive effort in particular may prove valuable in reducing the frustration associated with post-editing work. In this paper, we describe an experiment aimed at studying the relationship between technical post-editing effort and cognitive post-editing effort by comparing cases where the edit distance and a manual score reflecting perceived effort differ. We present results of an error analysis performed on such sentences and discuss the clues they may provide about edits requiring great cognitive effort compared to the technical effort, on one hand, or little cognitive effort, on the other.", "phrases": ["post-editing effort", "cognitive effort", "edit distance", "error analysis"], "overall_score": 3.095695872989837, "scores": [2.4633838650708504, 0.855925495442115, 0.8356101061867718, 0.5372042522513035], "rank_score": 1.1730309297377601} -{"id": "shu-etal-2017-doc", "title": "DOC: Deep Open Classification of Text Documents", "abstract": "Traditional supervised learning makes the closed-world assumption that the classes appeared in the test data must have appeared in training. This also applies to text learning or text classification. As learning is used increasingly in dynamic open environments where some new/test documents may not belong to any of the training classes, identifying these novel documents during classification presents an important problem. This problem is called open-world classification or open classification. This paper proposes a novel deep learning based approach. It outperforms existing state-of-the-art techniques dramatically.", "phrases": ["doc", "multi-class classifier", "unknown intent detection", "final layer", "space risk"], "overall_score": 3.5705377686424384, "scores": [1.9351581660284447, 1.1420247880070082, 1.0537806075265503, 0.8815094017690254, 0.8513986974611135], "rank_score": 1.1727743321584285} -{"id": "dou-etal-2014-beyond", "title": "Beyond Parallel Data: Joint Word Alignment and Decipherment Improves Machine Translation", "abstract": "Inspired by previous work, where decipherment is used to improve machine translation, we propose a new idea to combine word alignment and decipherment into a single learning process. We use EM to estimate the model parameters, not only to maximize the probability of parallel corpus, but also the monolingual corpus. We apply our approach to improve Malagasy-English machine translation, where only a small amount of parallel data is available. In our experiments, we observe gains of 0.9 to 2.1 Bleu over a strong baseline.", "phrases": ["parallel data", "decipherment", "machine translation"], "overall_score": 1.8868002053643986, "scores": [1.723248070806078, 0.9294646891838043, 0.8642918863915701], "rank_score": 1.1723348821271509} -{"id": "duong-etal-2013-simpler", "title": "Simpler unsupervised POS tagging with bilingual projections", "abstract": "We present an unsupervised approach to part-of-speech tagging based on projections of tags in a word-aligned bilingual parallel corpus. In contrast to the existing state-of-the-art approach of Das and Petrov, we have developed a substantially simpler method by automatically identifying \u201cgood\u201d training sentences from the parallel corpus and applying self-training. In experimental results on eight languages, our method achieves state-of-the-art results.", "phrases": ["projection", "training sentence", "pos tagger", "european language"], "overall_score": 3.0056606646533255, "scores": [2.219926375499188, 1.0048940156055695, 0.8746641341031498, 0.5877981393168218], "rank_score": 1.171820666131182} -{"id": "lievers-huang-2016-lexicon", "title": "A lexicon of perception for the identification of synaesthetic metaphors in corpora", "abstract": "Synaesthesia is a type of metaphor associating linguistic expressions that refer to two different sensory modalities. Previous studies, based on the analysis of poetic texts, have shown that synaesthetic transfers tend to go from the lower toward the higher senses (e.g., sweet music vs. musical sweetness). In non-literary language synaesthesia is rare, and finding a sufficient number of examples manually would be too time-consuming. In order to verify whether the directionality also holds for conventional synaesthesia found in non-literary texts, an automatic procedure for the identification of instances of synaesthesia is therefore highly desirable. In this paper, we first focus on the preliminary step of this procedure, that is, the creation of a controlled lexicon of perception. Next, we present the results of a small pilot study that applies the extraction procedure to English and Italian corpus data.", "phrases": ["perception", "identification", "synaesthetic aspect"], "overall_score": 1.6244207477660921, "scores": [2.18837608710106, 0.7816809777286383, 0.545258570849226], "rank_score": 1.1717718785596414} -{"id": "tromble-etal-2008-lattice", "title": "Lattice Minimum Bayes-Risk Decoding for Statistical Machine Translation", "abstract": "We present Minimum Bayes-Risk (MBR) decoding over translation lattices that compactly encode a huge number of translation hypotheses. We describe conditions on the loss function that will enable efficient implementation of MBR decoders on lattices. We introduce an approximation to the BLEU score (Papineni et al., 2001) that satisfies these conditions. The MBR decoding under this approximate BLEU is realized using Weighted Finite State Automata. Our experiments show that the Lattice MBR decoder yields moderate, consistent gains in translation performance over N-best MBR decoding on Arabic-to-English, Chinese-to-English and English-to-Chinese translation tasks. We conduct a range of experiments to understand why Lattice MBR improves upon N-best MBR and study the impact of various parameters on MBR performance.", "phrases": ["mbr", "approximation", "translation task", "lattice minimum bayes-risk", "compact representation"], "overall_score": 3.77159441044468, "scores": [0.8808463308448188, 1.6352746054418856, 1.5701554705637766, 0.9318686378067691, 0.8404133708403582], "rank_score": 1.1717116830995216} -{"id": "yu-etal-2016-building", "title": "Building Chinese Affective Resources in Valence-Arousal Dimensions", "abstract": "An increasing amount of research has recently focused on representing affective states as continuous numerical values on multiple dimensions, such as the valence-arousal (VA) space. Compared to the categorical approach that represents affective states as several classes (e.g., positive and negative), the dimensional approach can provide more fine-grained sentiment analysis. However, affective resources with valence-arousal ratings are still very rare, especially for the Chinese language. Therefore, this study builds 1) an affective lexicon called Chinese valence-arousal words (CVAW) containing 1,653 words, and 2) an affective corpus called Chinese valence-arousal text (CVAT) containing 2,009 sentences extracted from web texts. To improve the annotation quality, a corpus cleanup procedure is used to remove outlier ratings and improper texts. Experiments using CVAW words to predict the VA ratings of the CVAT corpus show results comparable to those obtained using English affective resources.", "phrases": ["dimension", "affective state", "chinese sentence"], "overall_score": 3.0919897661494544, "scores": [1.6758872736450892, 0.9466160589972409, 0.892376467184028], "rank_score": 1.1716265999421194} -{"id": "pado-lapata-2007-dependency", "title": "Dependency-Based Construction of Semantic Space Models", "abstract": "Traditionally, vector-based semantic space models use word co-occurrence counts from large corpora to represent lexical meaning. In this article we present a novel framework for constructing semantic spaces that takes syntactic relations into account. We introduce a formalization for this class of models, which allows linguistic knowledge to guide the construction process. We evaluate our framework on a range of tasks relevant for cognitive science and natural language processing: semantic priming, synonymy detection, and word sense disambiguation. In all cases, our framework obtains results that are comparable or superior to the state of the art.", "phrases": ["semantic space", "co-occurrence", "pado\u0301", "target word", "statistic"], "overall_score": 4.628968869540189, "scores": [1.6544868509433506, 1.5168257254622626, 0.9614619002277162, 0.8670328157651502, 0.8578026137832605], "rank_score": 1.171521981236348} -{"id": "purver-battersby-2012-experimenting", "title": "Experimenting with Distant Supervision for Emotion Classification", "abstract": "We describe a set of experiments using automatically labelled data to train supervised classifiers for multi-class emotion detection in Twitter messages with no manual intervention. By cross-validating between models trained on different labellings for the same six basic emotion classes, and testing on manually labelled data, we conclude that the method is suitable for some emotions (happiness, sadness and anger) but less able to distinguish others; and that different labelling conventions are more suitable for some emotions than others.", "phrases": ["distant supervision", "emotion classification", "twitter message", "hashtag", "community"], "overall_score": 3.449255866414843, "scores": [2.8127980331812457, 0.925579714739702, 1.069546331953158, 0.5257467516500569, 0.5235669832518708], "rank_score": 1.1714475629552066} -{"id": "toutanova-etal-2008-applying", "title": "Applying Morphology Generation Models to Machine Translation", "abstract": "We improve the quality of statistical machine translation (SMT) by applying models that predict word forms from their stems using extensive morphological and syntactic information from both the source and target languages. Our inflection generation models are trained independently of the SMT system. We investigate different ways of combining the inflection prediction component with the SMT system by training the base MT system on fully inflected forms or on word stems. We applied our inflection generation models in translating English into two morphologically complex languages, Russian and Arabic, and show that our model improves the quality of SMT over both phrasal and syntax-based SMT systems according to BLEU and human judgements.", "phrases": ["morphological generation", "machine translation", "inflection", "morpho-syntactic information"], "overall_score": 3.860169386652591, "scores": [2.409681907557509, 1.0977786294479819, 0.6034450424031494, 0.5739978834650228], "rank_score": 1.1712258657184158} -{"id": "kong-zhou-2011-combining", "title": "Combining Dependency and Constituent-based Syntactic Information for Anaphoricity Determination in Coreference Resolution", "abstract": "This paper systematically explores the effectiveness of dependency and constituent-based syntactic information for anaphoricity determination. In particular, this paper proposes two ways to combine dependency and constituent-based syntactic information to explore their complementary advantage. One is a dependency-driven constituent-based structured representation, and the other uses a composite kernel. Evaluation on the Automatic Content Extraction (ACE) 2003 corpus shows that dependency and constituent-based syntactic information are quite complementary and proper combination can much improve the performance of anaphoricity determination, and further improve the performance of coreference resolution.", "phrases": ["constituent-based syntactic information", "anaphoricity determination", "coreference resolution"], "overall_score": 1.285828875527168, "scores": [1.715485918642445, 0.9024001955847942, 0.8933495294756767], "rank_score": 1.1704118812343054} -{"id": "chen-bansal-2018-fast", "title": "Fast Abstractive Summarization with Reinforce-Selected Sentence Rewriting", "abstract": "Inspired by how humans summarize long documents, we propose an accurate and fast summarization model that first selects salient sentences and then rewrites them abstractively (i.e., compresses and paraphrases) to generate a concise overall summary. We use a novel sentence-level policy gradient method to bridge the non-differentiable computation between these two neural networks in a hierarchical way, while maintaining language fluency. Empirically, we achieve the new state-of-the-art on all metrics (including human evaluation) on the CNN/Daily Mail dataset, as well as significantly higher abstractiveness scores. Moreover, by first operating at the sentence-level and then the word-level, we enable parallel decoding of our neural generative model that results in substantially faster (10-20x) inference speed as well as 4x faster training convergence than previous long-paragraph encoder-decoder models. We also demonstrate the generalization of our model on the test-only DUC-2002 dataset, where we achieve higher scores than a state-of-the-art model.", "phrases": ["summarization", "extractor", "content selection", "extract-then-rewrite architecture", "pipeline"], "overall_score": 4.937996686257101, "scores": [2.4397222229568327, 1.243317709143543, 1.0622841005373818, 0.5706747695275758, 0.5353907807943289], "rank_score": 1.1702779165919326} -{"id": "kumar-etal-2009-efficient", "title": "Efficient Minimum Error Rate Training and Minimum Bayes-Risk Decoding for Translation Hypergraphs and Lattices", "abstract": "Minimum Error Rate Training (MERT) and Minimum Bayes-Risk (MBR) decoding are used in most current state-of-the-art Statistical Machine Translation (SMT) systems. The algorithms were originally developed to work with N-best lists of translations, and recently extended to lattices that encode many more hypotheses than typical N-best lists. We here extend lattice-based MERT and MBR algorithms to work with hypergraphs that encode a vast number of translations produced by MT systems based on Synchronous Context Free Grammars. These algorithms are more efficient than the lattice-based versions presented earlier. We show how MERT can be employed to optimize parameters for MBR decoding. Our experiments show speedups from MERT and MBR as well as performance improvements from MBR decoding on several language pairs.", "phrases": ["minimum bayes-risk", "lattice", "mbr", "context free grammars", "loss function"], "overall_score": 3.087985541426541, "scores": [3.1202495429195958, 0.8211501961530272, 0.8249318919995605, 0.5608058226585174, 0.5234090781188558], "rank_score": 1.1701093063699115} -{"id": "kodaira-etal-2016-controlled", "title": "Controlled and Balanced Dataset for Japanese Lexical Simplification", "abstract": "We propose a new dataset for evaluating a Japanese lexical simplification method. Previous datasets have several deficiencies. All of them substitute only a single target word, and some of them extract sentences only from newswire corpus. In addition, most of these datasets do not allow ties and integrate simplification ranking from all the annotators without considering the quality. In contrast, our dataset has the following advantages: (1) it is the first controlled and balanced dataset for Japanese lexical simplification with high correlation with human judgment and (2) the consistency of the simplification ranking is improved by allowing candidates to have ties and by considering the reliability of annotators.", "phrases": ["balanced dataset", "japanese lexical simplification", "annotator"], "overall_score": 1.8831238388952172, "scores": [2.10871569074237, 0.8431209477963745, 0.5583152430840699], "rank_score": 1.1700506272076048} -{"id": "ribeiro-etal-2018-semantically", "title": "Semantically Equivalent Adversarial Rules for Debugging NLP models", "abstract": "Complex machine learning models for NLP are often brittle, making different predictions for input instances that are extremely similar semantically. To automatically detect this behavior for individual instances, we present semantically equivalent adversaries (SEAs) \u2013 semantic-preserving perturbations that induce changes in the model's predictions. We generalize these adversaries into semantically equivalent adversarial rules (SEARs) \u2013 simple, universal replacement rules that induce adversaries on many instances. We demonstrate the usefulness and flexibility of SEAs and SEARs by detecting bugs in black-box state-of-the-art models for three domains: machine comprehension, visual question-answering, and sentiment analysis. Via user studies, we demonstrate that we generate high-quality local adversaries for more instances than humans, and that SEARs induce four times as many mistakes as the bugs discovered by human experts. SEARs are also actionable: retraining models using data augmentation significantly reduces bugs, while maintaining accuracy.", "phrases": ["equivalent adversarial rules", "nlp model", "semantic-preserving perturbation", "attack", "paraphrase"], "overall_score": 4.425735602516884, "scores": [1.7048509039686208, 0.9237368478085933, 1.1039094139844419, 1.071230570742401, 1.0439385152719503], "rank_score": 1.1695332503552014} -{"id": "tomokiyo-boitet-2016-corpus", "title": "Corpus and dictionary development for classifiers/quantifiers towards a French-Japanese machine translation", "abstract": "Although quantifiers/classifiers expressions occur frequently in everyday communications or written documents, there is no description for them in classical bilingual paper dictionaries, nor in machine-readable dictionaries. The paper describes a corpus and dictionary development for quantifiers/classifiers, and their usage in the framework of French-Japanese machine translation (MT). They often cause problems of lexical ambiguity and of set phrase recognition during analysis, in particular for a long-distance language pair like French and Japanese. For the development of a dictionary aiming at ambiguity resolution for expressions including quantifiers and classifiers which may be ambiguous with common nouns, we have annotated our corpus with UWs (interlingual lexemes) of UNL (Universal Networking Language) found on the UNL-jp dictionary. The extraction of potential classifiers/quantifiers from corpus is made by UNLexplorer web service. Keywords : classifiers, quantifiers, phraseology study, corpus annotation, UNL (Universal Networking Language), UWs dictionary, Tori Bank, French-Japanese machine translation (MT).", "phrases": ["dictionary development", "quantifier", "french-japanese machine translation"], "overall_score": 1.2846792649585528, "scores": [1.7113136423491209, 0.9527866204249241, 0.8439961190241754], "rank_score": 1.1693654605994068} -{"id": "zhang-etal-2017-corpus", "title": "A Corpus of Annotated Revisions for Studying Argumentative Writing", "abstract": "This paper presents ArgRewrite, a corpus of between-draft revisions of argumentative essays. Drafts are manually aligned at the sentence level, and the writer's purpose for each revision is annotated with categories analogous to those used in argument mining and discourse analysis. The corpus should enable advanced research in writing comparison and revision analysis, as demonstrated via our own studies of student revision behavior and of automatic revision purpose prediction.", "phrases": ["revision", "argumentative writing", "argrewrite corpus"], "overall_score": 2.8026555542853075, "scores": [1.472883303035495, 1.5047610522891577, 0.5287500891014925], "rank_score": 1.1687981481420484} -{"id": "kreuz-caucci-2007-lexical", "title": "Lexical Influences on the Perception of Sarcasm", "abstract": "Speakers and listeners make use of a variety of pragmatic factors to produce and identify sarcastic statements. It is also possible that lexical factors play a role, although this possibility has not been investigated previously. College students were asked to read excerpts from published works that originally contained the phrase said sarcastically, although the word sarcastically was deleted. The participants rated the characters' statements in these excerpts as more likely to be sarcastic than those from similar excerpts that did not originally contain the word sarcastically. The use of interjections, such as gee or gosh, predicted a significant amount of the variance in the participants' ratings of sarcastic intent. This outcome suggests that sarcastic statements may be more formulaic than previously realized. It also suggests that computer software could be written to recognize such lexical factors, greatly increasing the likelihood that non-literal intent could be correctly interpreted by such programs, even if they are unable to identify the pragmatic components of nonliteral language.", "phrases": ["sarcasm", "factor", "lexical feature"], "overall_score": 2.997386075973426, "scores": [2.0424953183163157, 0.9377452898826696, 0.5255433176168373], "rank_score": 1.1685946419386075} -{"id": "xiao-etal-2016-transg", "title": "TransG : A Generative Model for Knowledge Graph Embedding", "abstract": "Recently, knowledge graph embedding, which projects symbolic entities and relations into continuous vector space, has become a new, hot topic in artificial intelligence. This paper addresses a new issue of multiple relation semantics that a relation may have multiple meanings revealed by the entity pairs associated with the corresponding triples, and proposes a novel Gaussian mixture model for embedding, TransG. The new model can discover latent semantics for a relation and leverage a mixture of relation component vectors for embedding a fact triple. To the best of our knowledge, this is the first generative model for knowledge graph embedding, which is able to deal with multiple relation semantics. Extensive experiments show that the proposed model achieves substantial improvements against the state-of-the-art baselines.", "phrases": ["generative model", "knowledge graph", "relation semantic"], "overall_score": 2.2737519731225544, "scores": [1.7194503538133663, 0.9144355661743925, 0.8715461898551332], "rank_score": 1.1684773699476307} -{"id": "zhang-wallace-2017-sensitivity", "title": "A Sensitivity Analysis of (and Practitioners' Guide to) Convolutional Neural Networks for Sentence Classification", "abstract": "Convolutional Neural Networks (CNNs) have recently achieved remarkably strong performance on the practically important task of sentence classification (Kim, 2014; Kalchbrenner et al., 2014; Johnson and Zhang, 2014; Zhang et al., 2016). However, these models require practitioners to specify an exact model architecture and set accompanying hyperparameters, including the filter region size, regularization parameters, and so on. It is currently unknown how sensitive model performance is to changes in these configurations for the task of sentence classification. We thus conduct a sensitivity analysis of one-layer CNNs to explore the effect of architecture components on model performance; our aim is to distinguish between important and comparatively inconsequential design decisions for sentence classification. We focus on one-layer CNNs (to the exclusion of more complex models) due to their comparative simplicity and strong empirical performance, which makes it a modern standard baseline method akin to Support Vector Machine (SVMs) and logistic regression. We derive practical advice from our extensive empirical results for those interested in getting the most out of CNNs for sentence classification in real world settings.", "phrases": ["convolutional neural networks", "sentence classification", "cnn", "model performance", "architecture component"], "overall_score": 3.5573086951371775, "scores": [1.947421812222543, 0.9033096994043104, 1.9209988192110037, 0.5363313946518252, 0.5340839113104127], "rank_score": 1.1684291273600191} -{"id": "gui-etal-2017-part", "title": "Part-of-Speech Tagging for Twitter with Adversarial Neural Networks", "abstract": "In this work, we study the problem of part-of-speech tagging for Tweets. In contrast to newswire articles, Tweets are usually informal and contain numerous out-of-vocabulary words. Moreover, there is a lack of large scale labeled datasets for this domain. To tackle these challenges, we propose a novel neural network to make use of out-of-domain labeled data, unlabeled in-domain data, and labeled in-domain data. Inspired by adversarial neural networks, the proposed method tries to learn common features through adversarial discriminator. In addition, we hypothesize that domain-specific features of target domain should be preserved in some degree. Hence, the proposed method adopts a sequence-to-sequence autoencoder to perform this task. Experimental results on three different datasets show that our method achieves better performance than state-of-the-art methods.", "phrases": ["twitter", "adversarial discriminator", "part-of-speech tagging"], "overall_score": 2.6900853626457932, "scores": [1.813721822150657, 0.8277103240024887, 0.8634355403841821], "rank_score": 1.168289228845776} -{"id": "zhou-etal-2006-paraeval", "title": "ParaEval: Using Paraphrases to Evaluate Summaries Automatically", "abstract": "ParaEval is an automated evaluation method for comparing reference and peer summaries. It facilitates a tiered-comparison strategy where recall-oriented global optimal and local greedy searches for paraphrase matching are enabled in the top tiers. We utilize a domain-independent paraphrase table extracted from a large bilingual parallel corpus using methods from Machine Translation (MT). We show that the quality of ParaEval's evaluations, measured by correlating with human judgments, closely resembles that of ROUGE's.", "phrases": ["paraphrase", "machine translation", "summarization"], "overall_score": 2.429166047939157, "scores": [2.0300758879903062, 0.8699122731298049, 0.6045576497375534], "rank_score": 1.1681819369525548} -{"id": "hashimoto-etal-2012-excitatory", "title": "Excitatory or Inhibitory: A New Semantic Orientation Extracts Contradiction and Causality from the Web", "abstract": "We propose a new semantic orientation, Excitation, and its automatic acquisition method. Excitation is a semantic property of predicates that classifies them into excitatory, inhibitory and neutral. We show that Excitation is useful for extracting contradiction pairs (e.g., destroy cancer b develop cancer) and causality pairs (e.g., increase in crime \u21d2 heighten anxiety). Our experiments show that with automatically acquired Excitation knowledge we can extract one million contradiction pairs and 500,000 causality pairs with about 70% precision from a 600 million page Web corpus. Furthermore, by combining these extracted causality and contradiction pairs, we can generate one million plausible causality hypotheses that are not written in any single sentence in our corpus with reasonable precision.", "phrases": ["causality", "predicate", "excitatory"], "overall_score": 2.5656654783368045, "scores": [1.7565437733347036, 0.8676329309485169, 0.8788773368934814], "rank_score": 1.1676846803922338} -{"id": "koper-schulte-im-walde-2014-rank", "title": "A Rank-based Distance Measure to Detect Polysemy and to Determine Salient Vector-Space Features for German Prepositions", "abstract": "This paper addresses vector space models of prepositions, a notoriously ambiguous word class. We propose a rank-based distance measure to explore the vector-spatial properties of the ambiguous objects, focusing on two research tasks: (i) to distinguish polysemous from monosemous prepositions in vector space; and (ii) to determine salient vector-space features for a classification of preposition senses. The rank-based measure predicts the polysemy vs. monosemy of prepositions with a precision of up to 88%, and suggests preposition-subcategorised nouns as more salient preposition features than preposition-subcategorising verbs.", "phrases": ["rank-based distance measure", "polysemy", "preposition"], "overall_score": 1.2828252721293263, "scores": [1.7853680759886768, 0.8771369855792739, 0.8405285892331299], "rank_score": 1.16767788360036} -{"id": "yasunaga-etal-2017-graph", "title": "Graph-based Neural Multi-Document Summarization", "abstract": "We propose a neural multi-document summarization system that incorporates sentence relation graphs. We employ a Graph Convolutional Network (GCN) on the relation graphs, with sentence embeddings obtained from Recurrent Neural Networks as input node features. Through multiple layer-wise propagation, the GCN generates high-level hidden sentence features for salience estimation. We then use a greedy heuristic to extract salient sentences that avoid redundancy. In our experiments on DUC 2004, we consider three types of sentence relation graphs and demonstrate the advantage of combining sentence relations in graphs with the representation power of deep neural networks. Our model improves upon other traditional graph-based extractive approaches and the vanilla GRU sequence model with no graph, and it achieves competitive results against other state-of-the-art multi-document summarization systems.", "phrases": ["summarization", "sentence relation graph", "graph convolutional network"], "overall_score": 3.4377936276782437, "scores": [2.440020909777052, 0.5354535792773699, 0.5271896707422982], "rank_score": 1.16755471993224} -{"id": "liu-etal-2018-negpar", "title": "NegPar: A parallel corpus annotated for negation", "abstract": "Although the existence of English corpora annotated for negation has allowed for extensive work on monolingual negation detection, little is understood on how negation-related phenomena translate across languages. The current study fills this gap by presenting NegPar, the first English-Chinese parallel corpus annotated for negation in the narrative domain (a collection of stories from Conan Doyle\u2019s Sherlock Holmes). While we followed the annotation guidelines in the CONANDOYLE-NEG corpus (Morante and Daelemans, 2012), we reannotated certain scope-related phenomena to ensure more consistent and interpretable semantic representation. To both ease the annotation process and analyze how similar negation is signaled in the two languages, we experimented with first projecting the annotations from English and then manually correcting the projection output in Chinese. Results show that projecting negation via word-alignment offers limited help to the annotation process, as negation can be rendered in different ways across languages.", "phrases": ["parallel corpus", "negation", "annotation process"], "overall_score": 1.282352201131382, "scores": [1.9925006788751862, 0.9375977950890683, 0.5716433534989002], "rank_score": 1.1672472758210517} -{"id": "mohammad-etal-2013-nrc", "title": "NRC-Canada: Building the State-of-the-Art in Sentiment Analysis of Tweets", "abstract": "In this paper, we describe how we created two state-of-the-art SVM classifiers, one to detect the sentiment of messages such as tweets and SMS (message-level task) and one to detect the sentiment of a term within a message (term-level task). Among submissions from 44 teams in a competition, our submissions stood first in both tasks on tweets, obtaining an F-score of 69.02 in the message-level task and 88.93 in the term-level task. We implemented a variety of surface-form, semantic, and sentiment features. We also generated two large word\u2010sentiment association lexicons, one from tweets with sentiment-word hashtags, and one from tweets with emoticons. In the message-level task, the lexicon-based features provided a gain of 5 F-score points over all others. Both of our systems can be replicated using freely available resources. 1", "phrases": ["sentiment analysis", "hashtag", "lexicon-based feature", "n-gram", "twitter data"], "overall_score": 4.957839360984227, "scores": [2.3517067307710264, 1.5695953659357589, 0.8328694714999665, 0.5478488843302926, 0.5327976165926506], "rank_score": 1.166963613825939} -{"id": "rahimi-etal-2017-continuous", "title": "Continuous Representation of Location for Geolocation and Lexical Dialectology using Mixture Density Networks", "abstract": "We propose a method for embedding two-dimensional locations in a continuous vector space using a neural network-based model incorporating mixtures of Gaussian distributions, presenting two model variants for text-based geolocation and lexical dialectology. Evaluated over Twitter data, the proposed model outperforms conventional regression-based geolocation and provides a better estimate of uncertainty. We also show the effectiveness of the representation for predicting words from location in lexical dialectology, and evaluate it using the DARE dataset.", "phrases": ["location", "lexical dialectology", "mixture density networks"], "overall_score": 2.0907308149002928, "scores": [1.7954929639363473, 0.8792701922074685, 0.8258140990181909], "rank_score": 1.1668590850540022} -{"id": "bruni-etal-2013-vsem", "title": "VSEM: An open library for visual semantics representation", "abstract": "VSEM is an open library for visual semantics. Starting from a collection of tagged images, it is possible to automatically construct an image-based representation of concepts by using off-theshelf VSEM functionalities. VSEM is entirely written in MATLAB and its objectoriented design allows a large flexibility and reusability. The software is accompanied by a website with supporting documentation and examples.", "phrases": ["open library", "visual semantic representation", "vsem"], "overall_score": 1.2812489315394784, "scores": [1.7095681436223513, 0.9570583013172261, 0.8321026647432915], "rank_score": 1.1662430365609564} -{"id": "dredze-crammer-2008-online", "title": "Online Methods for Multi-Domain Learning and Adaptation", "abstract": "NLP tasks are often domain specific, yet systems can learn behaviors across multiple domains. We develop a new multi-domain online learning framework based on parameter combination from multiple classifiers. Our algorithms draw from multi-task learning and domain adaptation to adapt multiple source domain classifiers to a new target domain, learn across multiple similar domains, and learn across a large number of disparate domains. We evaluate our algorithms on two popular NLP domain adaptation tasks: sentiment classification and spam filtering.", "phrases": ["multi-domain learning", "adaptation", "learning framework"], "overall_score": 2.990788728720283, "scores": [1.9948154214331602, 0.9116148401012735, 0.5916373163145748], "rank_score": 1.1660225259496695} -{"id": "zhu-etal-2020-attend", "title": "Attend, Translate and Summarize: An Efficient Method for Neural Cross-Lingual Summarization", "abstract": "Cross-lingual summarization aims at summarizing a document in one language (e.g., Chinese) into another language (e.g., English). In this paper, we propose a novel method inspired by the translation pattern in the process of obtaining a cross-lingual summary. We first attend to some words in the source text, then translate them into the target language, and summarize to get the final summary. Specifically, we first employ the encoder-decoder attention distribution to attend to the source words. Second, we present three strategies to acquire the translation probability, which helps obtain the translation candidates for each source word. Finally, each summary word is generated either from the neural distribution or from the translation candidates of source words. Experimental results on Chinese-to-English and English-to-Chinese summarization tasks have shown that our proposed method can significantly outperform the baselines, achieving comparable performance with the state-of-the-art.", "phrases": ["summarization", "translation pattern", "probabilistic bilingual lexicon"], "overall_score": 2.424601537202553, "scores": [2.3588010892652016, 0.5788578663305237, 0.5603016582581561], "rank_score": 1.165986871284627} -{"id": "shah-etal-2016-shef", "title": "SHEF-Multimodal: Grounding Machine Translation on Images", "abstract": "This paper describes the University of \nSheffield\u2019s submission for the WMT16 \nMultimodal Machine Translation shared \ntask, where we participated in Task 1 to \ndevelop German-to-English and Englishto-German \nstatistical machine translation \n(SMT) systems in the domain of image \ndescriptions. Our proposed systems are \nstandard phrase-based SMT systems based \non the Moses decoder, trained only on the \nprovided data. We investigate how image \nfeatures can be used to re-rank the n-best \nlist produced by the SMT model, with the \naim of improving performance by grounding \nthe translations on images. Our submissions \nare able to outperform the strong, \ntext-only baseline system for both directions", "phrases": ["image", "multimodal machine translation", "phrase-based smt system", "textual feature", "input bitext"], "overall_score": 2.795787212877868, "scores": [2.9380636550152746, 1.1519920640797754, 0.6442022907665423, 0.553125072405106, 0.5422860542322302], "rank_score": 1.1659338272997857} -{"id": "jung-etal-2019-earlier", "title": "Earlier Isn't Always Better: Sub-aspect Analysis on Corpus and System Biases in Summarization", "abstract": "Despite the recent developments on neural summarization systems, the underlying logic behind the improvements from the systems and its corpus-dependency remains largely unexplored. Position of sentences in the original text, for example, is a well known bias for news summarization. Following in the spirit of the claim that summarization is a combination of sub-functions, we define three sub-aspects of summarization: position, importance, and diversity and conduct an extensive analysis of the biases of each sub-aspect with respect to the domain of nine different summarization corpora (e.g., news, academic papers, meeting minutes, movie script, books, posts). We find that while position exhibits substantial bias in news articles, this is not the case, for example, with academic papers and meeting minutes. Furthermore, our empirical study shows that different types of summarization systems (e.g., neural-based) are composed of different degrees of the sub-aspects. Our study provides useful lessons regarding consideration of underlying sub-aspects when collecting a new summarization dataset or developing a new system.", "phrases": ["sub-aspect", "summarization", "dataset bias"], "overall_score": 2.6846392310444656, "scores": [2.42043614777828, 0.5449630874606015, 0.53237277659192], "rank_score": 1.1659240039436005} -{"id": "zheng-etal-2019-simultaneous", "title": "Simultaneous Translation with Flexible Policy via Restricted Imitation Learning", "abstract": "Simultaneous translation is widely useful but remains one of the most difficult tasks in NLP. Previous work either uses fixed-latency policies, or train a complicated two-staged model using reinforcement learning. We propose a much simpler single model that adds a \u201cdelay\u201d token to the target vocabulary, and design a restricted dynamic oracle to greatly simplify training. Experiments on Chinese - English simultaneous translation show that our work leads to flexible policies that achieve better BLEU scores and lower latencies compared to both fixed and RL-learned policies.", "phrases": ["policy", "imitation learning", "target vocabulary", "simultaneous translation"], "overall_score": 3.075772485521733, "scores": [2.70552175196958, 0.8516677800234626, 0.5754063873359659, 0.5293300661442093], "rank_score": 1.1654814963683044} -{"id": "wubben-etal-2012-sentence", "title": "Sentence Simplification by Monolingual Machine Translation", "abstract": "In this paper we describe a method for simplifying sentences using Phrase Based Machine Translation, augmented with a re-ranking heuristic based on dissimilarity, and trained on a monolingual parallel corpus. We compare our system to a word-substitution baseline and two state-of-the-art systems, all trained and tested on paired sentences from the English part of Wikipedia and Simple Wikipedia. Human test subjects judge the output of the different systems. Analysing the judgements shows that by relatively careful phrase-based paraphrasing our model achieves similar simplification results to state-of-the-art systems, while generating better formed output. We also argue that text readability metrics such as the Flesch-Kincaid grade level should be used with caution when evaluating the output of simplification systems.", "phrases": ["dissimilarity", "sentence simplification", "pbmt", "fluency", "adequacy"], "overall_score": 4.511711571408355, "scores": [1.5941795195827422, 1.3849253050906571, 1.078919914472439, 0.8888827849522852, 0.8803687639268286], "rank_score": 1.1654552576049904} -{"id": "teufel-etal-2006-automatic", "title": "Automatic classification of citation function", "abstract": "Citation function is defined as the author's reason for citing a given paper (e.g. acknowledgement of the use of the cited method). The automatic recognition of the rhetorical function of citations in scientific text has many applications, from improvement of impact factor calculations to text summarisation and more informative citation indexers. We show that our annotation scheme for citation function is reliable, and present a supervised machine learning framework to automatically classify citation function, using both shallow and linguistically-inspired features. We find, amongst other things, a strong relationship between citation function and sentiment classification.", "phrases": ["citation function", "automatic classification", "table", "explicit sentiment", "neutral"], "overall_score": 4.109722356758962, "scores": [3.3525817699375633, 0.8606569697228662, 0.5426442947891043, 0.5404898523580919, 0.530771456589166], "rank_score": 1.1654288686793584} -{"id": "tian-etal-2020-improving", "title": "Improving Constituency Parsing with Span Attention", "abstract": "Constituency parsing is a fundamental and important task for natural language understanding, where a good representation of contextual information can help this task. N-grams, which is a conventional type of feature for contextual information, have been demonstrated to be useful in many tasks, and thus could also be beneficial for constituency parsing if they are appropriately modeled. In this paper, we propose span attention for neural chart-based constituency parsing to leverage n-gram information. Considering that current chart-based parsers with Transformer-based encoder represent spans by subtraction of the hidden states at the span boundaries, which may cause information loss especially for long spans, we incorporate n-grams into span representations by weighting them according to their contributions to the parsing process. Moreover, we propose categorical span attention to further enhance the model by weighting n-grams within different length categories, and thus benefit long-sentence parsing. Experimental results on three widely used benchmark datasets demonstrate the effectiveness of our approach in parsing Arabic, Chinese, and English, where state-of-the-art performance is obtained by our approach on all of them.", "phrases": ["constituency", "span attention", "hidden state"], "overall_score": 1.6156267695267925, "scores": [2.1673534686072573, 0.8062477442942757, 0.5226838795841068], "rank_score": 1.16542836416188} -{"id": "hou-etal-2021-learning", "title": "Learning to Bridge Metric Spaces: Few-shot Joint Learning of Intent Detection and Slot Filling", "abstract": "In this paper, we investigate few-shot joint learning for dialogue language understanding. Most existing few-shot models learn a single task each time with only a few examples. However, dialogue language understanding contains two closely related tasks, i.e., intent detection and slot filling, and often benefits from jointly learning the two tasks. This calls for new few-shot learning techniques that are able to capture task relations from only a few examples and jointly learn multiple tasks. To achieve this, we propose a similarity-based few-shot learning scheme, named Contrastive Prototype Merging network (ConProm), that learns to bridge metric spaces of intent and slot on data-rich domains, and then adapt the bridged metric space to the specific few-shot domain. Experiments on two public datasets, Snips and FewJoint, show that our model significantly outperforms the strong baselines in one and five shots settings.", "phrases": ["few-shot joint learning", "intent detection", "slot filling"], "overall_score": 1.614307419732632, "scores": [1.8041096613574963, 0.8722962140918086, 0.8170240879284849], "rank_score": 1.1644766544592633} -{"id": "qi-etal-2020-prophetnet", "title": "ProphetNet: Predicting Future N-gram for Sequence-to-SequencePre-training", "abstract": "This paper presents a new sequence-to-sequence pre-training model called ProphetNet, which introduces a novel self-supervised objective named future n-gram prediction and the proposed n-stream self-attention mechanism. Instead of optimizing one-step-ahead prediction in the traditional sequence-to-sequence model, the ProphetNet is optimized by n-step ahead prediction that predicts the next n tokens simultaneously based on previous context tokens at each time step. The future n-gram prediction explicitly encourages the model to plan for the future tokens and prevent overfitting on strong local correlations. We pre-train ProphetNet using a base scale dataset (16GB) and a large-scale dataset (160GB), respectively. Then we conduct experiments on CNN/DailyMail, Gigaword, and SQuAD 1.1 benchmarks for abstractive summarization and question generation tasks. Experimental results show that ProphetNet achieves new state-of-the-art results on all these datasets compared to the models using the same scale pre-training corpus.", "phrases": ["future n-gram prediction", "future token", "prophetnet", "summarization task", "generation model"], "overall_score": 3.545068570458206, "scores": [2.556762916094283, 1.2790338910492067, 0.8756946960389954, 0.5605081082711534, 0.5500441457752816], "rank_score": 1.1644087514457842} -{"id": "al-khatib-etal-2016-news", "title": "A News Editorial Corpus for Mining Argumentation Strategies", "abstract": "Many argumentative texts, and news editorials in particular, follow a specific strategy to persuade their readers of some opinion or attitude. This includes decisions such as when to tell an anecdote or where to support an assumption with statistics, which is reflected by the composition of different types of argumentative discourse units in a text. While several argument mining corpora have recently been published, they do not allow the study of argumentation strategies due to incomplete or coarse-grained unit annotations. This paper presents a novel corpus with 300 editorials from three diverse news portals that provides the basis for mining argumentation strategies. Each unit in all editorials has been assigned one of six types by three annotators with a high Fleiss' Kappa agreement of 0.56. We investigate various challenges of the annotation process and we conduct a first corpus analysis. Our results reveal different strategies across the news portals, exemplifying the benefit of studying editorials\u2014a so far underresourced text genre in argument mining.", "phrases": ["mining argumentation strategy", "news editorial", "decision", "discourse unit"], "overall_score": 3.228389271922945, "scores": [1.0131987197955703, 1.9277221489031482, 1.1810606555836227, 0.5355996683800232], "rank_score": 1.164395298165591} -{"id": "shaw-etal-2018-self", "title": "Self-Attention with Relative Position Representations", "abstract": "Relying entirely on an attention mechanism, the Transformer introduced by Vaswani et al. (2017) achieves state-of-the-art results for machine translation. In contrast to recurrent and convolutional neural networks, it does not explicitly model relative or absolute position information in its structure. Instead, it requires adding representations of absolute positions to its inputs. In this work we present an alternative approach, extending the self-attention mechanism to efficiently consider representations of the relative positions, or distances between sequence elements. On the WMT 2014 English-to-German and English-to-French translation tasks, this approach yields improvements of 1.3 BLEU and 0.3 BLEU over absolute position representations, respectively. Notably, we observe that combining relative and absolute position representations yields no further improvement in translation quality. We describe an efficient implementation of our method and cast it as an instance of relation-aware self-attention mechanisms that can generalize to arbitrary graph-labeled inputs.", "phrases": ["relative position representations", "sequence element", "self-attention", "san"], "overall_score": 3.8374194026555637, "scores": [1.601815814582778, 0.9234006486929709, 1.2898732934793211, 0.8422031356662909], "rank_score": 1.1643232231053402} -{"id": "wu-dredze-2020-languages", "title": "Are All Languages Created Equal in Multilingual BERT?", "abstract": "Multilingual BERT (mBERT) trained on 104 languages has shown surprisingly good cross-lingual performance on several NLP tasks, even without explicit cross-lingual signals. However, these evaluations have focused on cross-lingual transfer with high-resource languages, covering only a third of the languages covered by mBERT. We explore how mBERT performs on a much wider set of languages, focusing on the quality of representation for low-resource languages, measured by within-language performance. We consider three tasks: Named Entity Recognition (99 languages), Part-of-speech Tagging and Dependency Parsing (54 languages each). mBERT does better than or comparable to baselines on high resource languages but does much worse for low resource languages. Furthermore, monolingual BERT models for these languages do even worse. Paired with similar languages, the performance gap between monolingual BERT and mBERT can be narrowed. We find that better models for low resource languages require more efficient pretraining techniques or more data.", "phrases": ["multilingual bert", "low-resource language", "monolingual ability"], "overall_score": 4.294652953649099, "scores": [0.9257985385327459, 2.0344284404889326, 0.5324214041014852], "rank_score": 1.1642161277077212} -{"id": "freedman-etal-2011-extreme", "title": "Extreme Extraction \u2013 Machine Reading in a Week", "abstract": "We report on empirical results in extreme extraction. It is extreme in that (1) from receipt of the ontology specifying the target concepts and relations, development is limited to one week and that (2) relatively little training data is assumed. We are able to surpass human recall and achieve an F1 of 0.51 on a question-answering task with less than 50 hours of effort using a hybrid approach that mixes active learning, bootstrapping, and limited (5 hours) manual rule writing. We compare the performance of three systems: extraction with handwritten rules, bootstrapped extraction, and a combination. We show that while the recall of the handwritten rules surpasses that of the learned system, the learned system is able to improve the overall recall and F1.", "phrases": ["week", "ontology", "active learning", "extreme extraction"], "overall_score": 2.2652810432701465, "scores": [2.1039831470756125, 0.8830283095843487, 1.104453447158576, 0.5650317887340531], "rank_score": 1.1641241731381478} -{"id": "jiang-etal-2020-smart", "title": "SMART: Robust and Efficient Fine-Tuning for Pre-trained Natural Language Models through Principled Regularized Optimization", "abstract": "Transfer learning has fundamentally changed the landscape of natural language processing (NLP). Many state-of-the-art models are first pre-trained on a large text corpus and then fine-tuned on downstream tasks. However, due to limited data resources from downstream tasks and the extremely high complexity of pre-trained models, aggressive fine-tuning often causes the fine-tuned model to overfit the training data of downstream tasks and fail to generalize to unseen data. To address such an issue in a principled manner, we propose a new learning framework for robust and efficient fine-tuning for pre-trained models to attain better generalization performance. The proposed framework contains two important ingredients: 1. Smoothness-inducing regularization, which effectively manages the complexity of the model; 2. Bregman proximal point optimization, which is an instance of trust-region methods and can prevent aggressive updating. Our experiments show that the proposed framework achieves new state-of-the-art performance on a number of NLP tasks including GLUE, SNLI, SciTail and ANLI. Moreover, it also outperforms the state-of-the-art T5 model, which is the largest pre-trained model containing 11 billion parameters, on GLUE.", "phrases": ["fine-tuning", "downstream task", "unseen data", "smart", "adversarial training"], "overall_score": 3.3646627174903925, "scores": [2.4719239629869283, 0.8299724327800831, 1.104153667031638, 0.8558912481494192, 0.5585257419466316], "rank_score": 1.16409341057894} -{"id": "erkan-radev-2004-lexpagerank", "title": "LexPageRank: Prestige in Multi-Document Text Summarization", "abstract": "Multidocument extractive summarization relies on the concept of sentence centrality to identify the most important sentences in a document. Central-ity is typically de\ufb01ned in terms of the presence of particular important words or in terms of similarity to a centroid pseudo-sentence. We are now considering an approach for computing sentence importance based on the concept of eigenvector centrality (prestige) that we call LexPageRank. In this model, a sentence connectivity matrix is constructed based on cosine similarity. If the cosine similarity be-tween two sentences exceeds a particular prede\ufb01ned threshold, a corresponding edge is added to the connectivity matrix. We provide an evaluation of our method on DUC 2004 data. The results show that our approach outperforms centroid-based summarization and is quite successful compared to other summarization systems.", "phrases": ["prestige", "text summarization", "eigenvector centrality", "lexpagerank"], "overall_score": 2.985760939074153, "scores": [2.360376504893701, 0.8900904121502234, 0.8415682966179774, 0.5642141276959846], "rank_score": 1.1640623353394715} -{"id": "peng-etal-2017-deep", "title": "Deep Multitask Learning for Semantic Dependency Parsing", "abstract": "We present a deep neural architecture that parses sentences into three semantic dependency graph formalisms. By using efficient, nearly arc-factored inference and a bidirectional-LSTM composed with a multi-layer perceptron, our base system is able to significantly improve the state of the art for semantic dependency parsing, without using hand-engineered features or syntax. We then explore two multitask learning approaches\u2014one that shares parameters across formalisms, and one that uses higher-order structures to predict the graphs jointly. We find that both approaches improve performance across formalisms on average, achieving a new state of the art. Our code is open-source and available at .", "phrases": ["semantic dependency parsing", "multi-task learning", "bilstm"], "overall_score": 3.543906429643143, "scores": [2.0321099320205662, 0.9195763480599786, 0.5403948283377009], "rank_score": 1.1640270361394152} -{"id": "sap-etal-2019-social", "title": "Social IQa: Commonsense Reasoning about Social Interactions", "abstract": "We introduce Social IQa, the first large-scale benchmark for commonsense reasoning about social situations. Social IQa contains 38,000 multiple choice questions for probing emotional and social intelligence in a variety of everyday situations (e.g., Q: \u201cJordan wanted to tell Tracy a secret, so Jordan leaned towards Tracy. Why did Jordan do this?\u201d A: \u201cMake sure no one else could hear\u201d). Through crowdsourcing, we collect commonsense questions along with correct and incorrect answers about social interactions, using a new framework that mitigates stylistic artifacts in incorrect answers by asking workers to provide the right answer to a different but related question. Empirical results show that our benchmark is challenging for existing question-answering models based on pretrained language models, compared to human performance (20% gap). Notably, we further establish Social IQa as a resource for transfer learning of commonsense knowledge, achieving state-of-the-art performance on multiple commonsense reasoning tasks (Winograd Schemas, COPA).", "phrases": ["commonsense reasoning", "multiple choice question", "human performance", "social iqa", "life"], "overall_score": 3.746800443608468, "scores": [2.1498780853073276, 2.0302596614614576, 0.5537626009523465, 0.5445665067772393, 0.5415781665951089], "rank_score": 1.1640090042186961} -{"id": "cao-etal-2020-incremental", "title": "Incremental Event Detection via Knowledge Consolidation Networks", "abstract": "Conventional approaches to event detection usually require a fixed set of pre-defined event types. Such a requirement is often challenged in real-world applications, as new events continually occur. Due to huge computation cost and storage budge, it is infeasible to store all previous data and re-train the model with all previous data and new data, every time new events arrive. We formulate such challenging scenarios as incremental event detection, which requires a model to learn new classes incrementally without performance degradation on previous classes. However, existing incremental learning methods cannot handle semantic ambiguity and training data imbalance problems between old and new classes in the task of incremental event detection. In this paper, we propose a Knowledge Consolidation Network (KCN) to address the above issues. Specifically, we devise two components, prototype enhanced retrospection and hierarchical distillation, to mitigate the adverse effects of semantic ambiguity and class imbalance, respectively. Experimental results demonstrate the effectiveness of the proposed method, outperforming the state-of-the-art model by 19% and 13.4% of whole F1 score on ACE benchmark and TAC KBP benchmark, respectively.", "phrases": ["event detection", "knowledge consolidation network", "catastrophic forgetting"], "overall_score": 2.0840395833439165, "scores": [2.4081516076812997, 0.5590241635909781, 0.522198141580743], "rank_score": 1.1631246376176736} -{"id": "ma-etal-2019-sentence", "title": "Sentence-Level Evidence Embedding for Claim Verification with Hierarchical Attention Networks", "abstract": "Claim verification is generally a task of verifying the veracity of a given claim, which is critical to many downstream applications. It is cumbersome and inefficient for human fact-checkers to find consistent pieces of evidence, from which solid verdict could be inferred against the claim. In this paper, we propose a novel end-to-end hierarchical attention network focusing on learning to represent coherent evidence as well as their semantic relatedness with the claim. Our model consists of three main components: 1) A coherence-based attention layer embeds coherent evidence considering the claim and sentences from relevant articles; 2) An entailment-based attention layer attends on sentences that can semantically infer the claim on top of the first attention; and 3) An output layer predicts the verdict based on the embedded evidence. Experimental results on three public benchmark datasets show that our proposed model outperforms a set of state-of-the-art baselines.", "phrases": ["claim verification", "hierarchical attention networks", "sentence-level evidence"], "overall_score": 2.982277752832681, "scores": [1.7287983586242421, 0.8813259336016486, 0.8779887313203089], "rank_score": 1.1627043411820666} -{"id": "wadden-etal-2019-entity", "title": "Entity, Relation, and Event Extraction with Contextualized Span Representations", "abstract": "We examine the capabilities of a unified, multi-task framework for three information extraction tasks: named entity recognition, relation extraction, and event extraction. Our framework (called DyGIE++) accomplishes all tasks by enumerating, refining, and scoring text spans designed to capture local (within-sentence) and global (cross-sentence) context. Our framework achieves state-of-the-art results across all tasks, on four datasets from a variety of domains. We perform experiments comparing different techniques to construct span representations. Contextualized embeddings like BERT perform well at capturing relationships among entities in the same or adjacent sentences, while dynamic span graph updates model long-range cross-sentence relationships. For instance, propagating span representations via predicted coreference links can enable the model to disambiguate challenging entity mentions. Our code is publicly available at and can be easily adapted for new tasks or datasets.", "phrases": ["event extraction", "span representation", "multi-task framework", "entity recognition", "argument extraction"], "overall_score": 4.4515528320656355, "scores": [2.712084289871494, 1.1013849149661938, 0.896554636892921, 0.5624244535588991, 0.5410405785284448], "rank_score": 1.1626977747635903} -{"id": "niehues-2012-continuous", "title": "Continuous space language models using restricted Boltzmann machines", "abstract": "We present a novel approach for continuous space language models in statistical machine translation by using Restricted Boltzmann Machines (RBMs). The probability of an n-gram is calculated by the free energy of the RBM instead of a feedforward neural net. Therefore, the calculation is much faster and can be integrated into the translation process instead of using the language model only in a re-ranking step. Furthermore, it is straightforward to introduce additional word factors into the language model. We observed a faster convergence in training if we include automatically generated word classes as an additional word factor. We evaluated the RBM-based language model on the German to English and English to French translation task of TED lectures. Instead of replacing the conventional n-gram-based language model, we trained the RBM-based language model on the more important but smaller in-domain data and combined them in a log-linear way. With this approach we could show improvements of about half a BLEU point on the translation task.", "phrases": ["space language model", "boltzmann machine", "probability"], "overall_score": 2.417709339069578, "scores": [2.006983551189002, 0.9265462949436399, 0.5544874276539724], "rank_score": 1.162672424595538} -{"id": "kryscinski-etal-2019-neural", "title": "Neural Text Summarization: A Critical Evaluation", "abstract": "Text summarization aims at compressing long documents into a shorter form that conveys the most important parts of the original document. Despite increased interest in the community and notable research effort, progress on benchmark datasets has stagnated. We critically evaluate key ingredients of the current research setup: datasets, evaluation metrics, and models, and highlight three primary shortcomings: 1) automatically collected datasets leave the task underconstrained and may contain noise detrimental to training and evaluation, 2) current evaluation protocol is weakly correlated with human judgment and does not account for important characteristics such as factual correctness, 3) models overfit to layout biases of current datasets and offer limited diversity in their outputs.", "phrases": ["evaluation metric", "neural text summarization", "source document", "tendency"], "overall_score": 4.476448541898743, "scores": [2.0456903730250784, 1.2345434739748007, 0.8313976237027876, 0.5390458590812722], "rank_score": 1.162669332445985} -{"id": "badlani-etal-2019-ensemble", "title": "An Ensemble of Humour, Sarcasm, and Hate Speechfor Sentiment Classification in Online Reviews", "abstract": "Due to the nature of online user reviews, sentiment analysis on such data requires a deep semantic understanding of the text. Many online reviews are sarcastic, humorous, or hateful. Signals from such language nuances may reinforce or completely alter the sentiment of a review as predicted by a machine learning model that attempts to detect sentiment alone. Thus, having a model that is explicitly aware of these features should help it perform better on reviews that are characterized by them. We propose a composite two-step model that extracts features pertaining to sarcasm, humour, hate speech, as well as sentiment, in the first step, feeding them in conjunction to inform sentiment classification in the second step. We show that this multi-step approach leads to a better empirical performance for sentiment classification than a model that predicts sentiment alone. A qualitative analysis reveals that the conjunctive approach can better capture the nuances of sentiment as expressed in online reviews.", "phrases": ["humour", "sarcasm", "sentiment classification", "review", "composite two-step model"], "overall_score": 2.262227878083619, "scores": [2.321625544119094, 1.7868513832050177, 0.6226320033969558, 0.5585709403285045, 0.5230959119994804], "rank_score": 1.1625551566098102} -{"id": "schulte-im-walde-etal-2016-ghost", "title": "GhoSt-NN: A Representative Gold Standard of German Noun-Noun Compounds", "abstract": "This paper presents a novel gold standard of German noun-noun compounds (Ghost-NN) including 868 compounds annotated with corpus frequencies of the compounds and their constituents, productivity and ambiguity of the constituents, semantic relations between the constituents, and compositionality ratings of compound-constituent pairs. Moreover, a subset of the compounds containing 180 compounds is balanced for the productivity of the modifiers (distinguishing low/mid/high productivity) and the ambiguity of the heads (distinguishing between heads with 1, 2 and 2 senses", "phrases": ["german noun-noun compound", "semantic relation", "ghost-nn"], "overall_score": 1.6115983965004514, "scores": [2.0079959090189785, 0.9042205046397579, 0.5753511081449733], "rank_score": 1.1625225072679033} -{"id": "perez-etal-2022-robertuito", "title": "RoBERTuito: a pre-trained language model for social media text in Spanish", "abstract": "Since BERT appeared, Transformer language models and transfer learning have become state-of-the-art for natural language processing tasks. Recently, some works geared towards pre-training specially-crafted models for particular domains, such as scientific papers, medical documents, user-generated texts, among others. These domain-specific models have been shown to improve performance significantly in most tasks; however, for languages other than English, such models are not widely available. In this work, we present RoBERTuito, a pre-trained language model for user-generated text in Spanish, trained on over 500 million tweets. Experiments on a benchmark of tasks involving user-generated text showed that RoBERTuito outperformed other pre-trained language models in Spanish. In addition to this, our model has some cross-lingual abilities, achieving top results for English-Spanish tasks of the Linguistic Code-Switching Evaluation benchmark (LinCE) and also competitive performance against monolingual models in English Twitter tasks. To facilitate further research, we make RoBERTuito publicly available at the HuggingFace model hub together with the dataset used to pre-train it.", "phrases": ["language model", "spanish", "social medium text"], "overall_score": 1.6112560359313857, "scores": [1.8114565241706952, 0.8541781493830263, 0.8211919654072077], "rank_score": 1.1622755463203098} -{"id": "zhou-etal-2021-defense", "title": "Defense against Synonym Substitution-based Adversarial Attacks via Dirichlet Neighborhood Ensemble", "abstract": "Although deep neural networks have achieved prominent performance on many NLP tasks, they are vulnerable to adversarial examples. We propose Dirichlet Neighborhood Ensemble (DNE), a randomized method for training a robust model to defense synonym substitution-based attacks. During training, DNE forms virtual sentences by sampling embedding vectors for each word in an input sentence from a convex hull spanned by the word and its synonyms, and it augments them with the training data. In such a way, the model is robust to adversarial attacks while maintaining the performance on the original clean data. DNE is agnostic to the network architectures and scales to large models (e.g., BERT) for NLP applications. Through extensive experimentation, we demonstrate that our method consistently outperforms recently proposed defense methods by a significant margin across different network architectures and multiple data sets.", "phrases": ["synonyms", "attack", "dirichlet neighborhood ensemble"], "overall_score": 2.081961483795319, "scores": [1.874189182642897, 1.053348041659814, 0.5583572602269993], "rank_score": 1.16196482817657} -{"id": "kolluru-etal-2020-openie6", "title": "OpenIE6: Iterative Grid Labeling and Coordination Analysis for Open Information Extraction", "abstract": "A recent state-of-the-art neural open information extraction (OpenIE) system generates extractions iteratively, requiring repeated encoding of partial outputs. This comes at a significant computational cost. On the other hand,sequence labeling approaches for OpenIE are much faster, but worse in extraction quality. In this paper, we bridge this trade-off by presenting an iterative labeling-based system that establishes a new state of the art for OpenIE, while extracting 10x faster. This is achieved through a novel Iterative Grid Labeling (IGL) architecture, which treats OpenIE as a 2-D grid labeling task. We improve its performance further by applying coverage (soft) constraints on the grid at training time. Moreover, on observing that the best OpenIE systems falter at handling coordination structures, our OpenIE system also incorporates a new coordination analyzer built with the same IGL architecture. This IGL based coordination analyzer helps our OpenIE system handle complicated coordination structures, while also establishing a new state of the art on the task of coordination analysis, with a 12.3 pts improvement in F1 over previous analyzers. Our OpenIE system - OpenIE6 - beats the previous systems by as much as 4 pts in F1, while being much faster.", "phrases": ["iterative grid labeling", "open information extraction", "openie6"], "overall_score": 2.0818956326385063, "scores": [1.6787121841745323, 0.9572755758055898, 0.8497964678584238], "rank_score": 1.161928075946182} -{"id": "shutova-etal-2016-black", "title": "Black Holes and White Rabbits: Metaphor Identification with Visual Features", "abstract": "Metaphor is pervasive in our communication, which makes it an important problem for nat-ural language processing (NLP). Numerous approaches to metaphor processing have thus been proposed, all of which relied on linguistic features and textual data to construct their models. Human metaphor comprehension is, however, known to rely on both our linguistic and perceptual experience, and vision can play a particularly important role when metaphorically projecting imagery across domains. In this paper, we present the \ufb01rst metaphor identi\ufb01cation method that simultaneously draws knowledge from linguistic and visual data. Our results demonstrate that it outperforms linguistic and visual models in isolation, as well as being competitive with the best-performing metaphor identi\ufb01cation methods, that rely on hand-crafted knowledge about domains and perception.", "phrases": ["metaphor identification", "visual feature", "experience"], "overall_score": 3.7399589929251964, "scores": [2.152946645384517, 0.8098185817214029, 0.522885537081293], "rank_score": 1.1618835880624043} -{"id": "klein-nabi-2019-attention", "title": "Attention Is (not) All You Need for Commonsense Reasoning", "abstract": "The recently introduced BERT model exhibits strong performance on several language understanding benchmarks. In this paper, we describe a simple re-implementation of BERT for commonsense reasoning. We show that the attentions produced by BERT can be directly utilized for tasks such as the Pronoun Disambiguation Problem and Winograd Schema Challenge. Our proposed attention-guided commonsense reasoning method is conceptually simple yet empirically powerful. Experimental analysis on multiple datasets demonstrates that our proposed system performs remarkably well on all cases while outperforming the previously reported state of the art by a margin. While results suggest that BERT seems to implicitly learn to establish complex relationships between entities, solving commonsense reasoning tasks might require more than unsupervised models learned from huge text corpora.", "phrases": ["commonsense reasoning", "huge text corpora", "maximum attention score"], "overall_score": 2.415943242164802, "scores": [2.3635881515237505, 0.5917730767193595, 0.530108106297254], "rank_score": 1.1618231115134547} -{"id": "u-maji-2006-computational", "title": "Computational Complexity of Statistical Machine Translation", "abstract": "In this paper we study a set of problems that are of considerable importance to Statistical Machine Translation (SMT) but which have not been addressed satisfactorily by the SMT research community. Over the last decade, a variety of SMT algorithms have been built and empirically tested whereas little is known about the computational complexity of some of the fundamental problems of SMT. Our work aims at providing useful insights into the the computational complexity of those problems. We prove that while IBM Models 1-2 are conceptually and computationally simple, computations involving the higher (and more useful) models are hard. Since it is unlikely that there exists a polynomial time solution for any of these hard problems (unless P = NP and P#P = P), our results highlight and justify the need for developing polynomial time approximations for these computations. We also discuss some practical ways of dealing with complexity.", "phrases": ["statistical machine translation", "computational complexity", "viterbi alignment"], "overall_score": 1.8695810476111, "scores": [2.001899236095166, 0.9626370376100062, 0.5203717799088283], "rank_score": 1.1616360178713334} -{"id": "wang-ng-2013-beam", "title": "A Beam-Search Decoder for Normalization of Social Media Text with Application to Machine Translation", "abstract": "Social media texts are written in an informal style, which hinders other natural language processing (NLP) applications such as machine translation. Text normalization is thus important for processing of social media text. Previous work mostly focused on normalizing words by replacing an informal word with its formal form. In this paper, to further improve other downstream NLP applications, we argue that other normalization operations should also be performed, e.g., missing word recovery and punctuation correction. A novel beam-search decoder is proposed to effectively integrate various normalization operations. Empirical results show that our system obtains statistically significant improvements over two strong baselines in both normalization and translation tasks, for both Chinese and English.", "phrases": ["beam-search decoder", "machine translation", "punctuation correction", "social medium text"], "overall_score": 2.2602785966184817, "scores": [2.385271476162811, 0.8969696347916369, 0.8416110438487846, 0.522361541581024], "rank_score": 1.161553424096064} -{"id": "chambers-jurafsky-2009-unsupervised", "title": "Unsupervised Learning of Narrative Schemas and their Participants", "abstract": "We describe an unsupervised system for learning narrative schemas, coherent sequences or sets of events (arrested(POLICE, SUSPECT), convicted(JUDGE, SUSPECT)) whose arguments are filled with participant semantic roles defined over words (Judge = {judge, jury, court}, Police = {police, agent, authorities}). Unlike most previous work in event structure or semantic role learning, our system does not use supervised techniques, hand-built knowledge, or predefined classes of events or roles. Our unsupervised learning algorithm uses coreferring arguments in chains of verbs to learn both rich narrative event structure and argument roles. By jointly addressing both tasks, we improve on previous results in narrative/frame learning and induce rich frame-specific semantic roles.", "phrases": ["narrative schemas", "semantic role", "text corpora", "protagonist"], "overall_score": 3.870319822857589, "scores": [2.457033451102913, 0.8350996054336995, 0.825421438386165, 0.5284026317051933], "rank_score": 1.1614892816569928} -{"id": "sun-etal-2019-pullnet", "title": "PullNet: Open Domain Question Answering with Iterative Retrieval on Knowledge Bases and Text", "abstract": "We consider open-domain question answering (QA) where answers are drawn from either a corpus, a knowledge base (KB), or a combination of both of these. We focus on a setting in which a corpus is supplemented with a large but incomplete KB, and on questions that require non-trivial (e.g., \u201cmulti-hop\u201d) reasoning. We describe PullNet, an integrated framework for (1) learning what to retrieve and (2) reasoning with this heterogeneous information to find the best answer. PullNet uses an iterative process to construct a question-specific subgraph that contains information relevant to the question. In each iteration, a graph convolutional network (graph CNN) is used to identify subgraph nodes that should be expanded using retrieval (or \u201cpull\u201d) operations on the corpus and/or KB. After the subgraph is complete, another graph CNN is used to extract the answer from the subgraph. This retrieve-and-reason process allows us to answer multi-hop questions using large KBs and corpora. PullNet is weakly supervised, requiring question-answer pairs but not gold inference paths. Experimentally PullNet improves over the prior state-of-the art, and in the setting where a corpus is used with incomplete KB these improvements are often dramatic. PullNet is also often superior to prior systems in a KB-only setting or a text-only setting.", "phrases": ["pullnet", "answer entity", "query", "unstructured text"], "overall_score": 3.5352811397886157, "scores": [2.925601277706479, 0.597364661912288, 0.5642587476707768, 0.5575512499601264], "rank_score": 1.1611939843124175} -{"id": "vala-etal-2015-mr", "title": "Mr. Bennet, his coachman, and the Archbishop walk into a bar but only one of them gets recognized: On The Difficulty of Detecting Characters in Literary Texts", "abstract": "Characters are fundamental to literary analysis. Current approaches are heavily reliant on NER to identify characters, causing many to be overlooked. We propose a novel technique for character detection, achieving significant improvements over state of the art on multiple datasets.", "phrases": ["character", "literary text", "novel technique"], "overall_score": 2.6722216515913724, "scores": [1.412710940733685, 1.5438986623107864, 0.5249837500811088], "rank_score": 1.160531117708527} -{"id": "zheng-etal-2019-chid", "title": "ChID: A Large-scale Chinese IDiom Dataset for Cloze Test", "abstract": "Cloze-style reading comprehension in Chinese is still limited due to the lack of various corpora. In this paper we propose a large-scale Chinese cloze test dataset ChID, which studies the comprehension of idiom, a unique language phenomenon in Chinese. In this corpus, the idioms in a passage are replaced by blank symbols and the correct answer needs to be chosen from well-designed candidate idioms. We carefully study how the design of candidate idioms and the representation of idioms affect the performance of state-of-the-art models. Results show that the machine accuracy is substantially worse than that of human, indicating a large space for further research.", "phrases": ["idiom dataset", "chid", "multiple-choice mrc dataset", "chengyu"], "overall_score": 2.54984949306631, "scores": [2.63218670782447, 0.8866621107536827, 0.5881651386094884, 0.5349321039793808], "rank_score": 1.1604865152917556} -{"id": "kementchedjhieva-lopez-2018-indicatements", "title": "`Indicatements' that character language models learn English morpho-syntactic units and regularities", "abstract": "Character language models have access to surface morphological patterns, but it is not clear whether or how they learn abstract morphological regularities. We instrument a character language model with several probes, finding that it can develop a specific unit to identify word boundaries and, by extension, morpheme boundaries, which allows it to capture linguistic properties and regularities of these units. Our language model proves surprisingly good at identifying the selectional restrictions of English derivational morphemes, a task that requires both morphological and syntactic awareness. Thus we conclude that, when morphemes overlap extensively with the words of a language, a character language model can perform morphological abstraction.", "phrases": ["character language model", "unit", "regularity"], "overall_score": 1.6087282698401844, "scores": [1.7160317798889317, 0.9432147062491706, 0.8221099594163718], "rank_score": 1.160452148518158} -{"id": "zhang-etal-2020-top", "title": "A Top-down Neural Architecture towards Text-level Parsing of Discourse Rhetorical Structure", "abstract": "Due to its great importance in deep natural language understanding and various down-stream applications, text-level parsing of discourse rhetorical structure (DRS) has been drawing more and more attention in recent years. However, all the previous studies on text-level discourse parsing adopt bottom-up approaches, which much limit the DRS determination on local information and fail to well benefit from global information of the overall discourse. In this paper, we justify from both computational and perceptive points-of-view that the top-down architecture is more suitable for text-level DRS parsing. On the basis, we propose a top-down neural architecture toward text-level DRS parsing. In particular, we cast discourse parsing as a recursive split point ranking task, where a split point is classified to different levels according to its rank and the elementary discourse units (EDUs) associated with it are arranged accordingly. In this way, we can determine the complete DRS as a hierarchical tree structure via an encoder-decoder with an internal stack. Experimentation on both the English RST-DT corpus and the Chinese CDTB corpus shows the great effectiveness of our proposed top-down approach towards text-level DRS parsing.", "phrases": ["top-down neural architecture", "text-level parsing", "rhetorical structure"], "overall_score": 2.4130035571030497, "scores": [1.7950183475358512, 0.8465304096480029, 0.839679508296145], "rank_score": 1.1604094218266663} -{"id": "isozaki-etal-2010-head", "title": "Head Finalization: A Simple Reordering Rule for SOV Languages", "abstract": "English is a typical SVO (Subject-Verb-Object) language, while Japanese is a typical SOV language. Conventional Statistical Machine Translation (SMT) systems work well within each of these language families. However, SMT-based translation from an SVO language to an SOV language does not work well because their word orders are completely different. Recently, a few groups have proposed rule-based preprocessing methods to mitigate this problem (Xu et al., 2009; Hong et al., 2009). These methods rewrite SVO sentences to derive more SOV-like sentences by using a set of handcrafted rules. In this paper, we propose an alternative single reordering rule: Head Finalization. This is a syntax-based preprocessing approach that offers the advantage of simplicity. We do not have to be concerned about part-of-speech tags or rule weights because the powerful Enju parser allows us to implement the rule at a general level. Our experiments show that its result, Head Final English (HFE), follows almost the same order as Japanese. We also show that this rule improves automatic evaluation scores.", "phrases": ["word order", "preprocessing method", "head finalization", "translation quality", "english-to-japanese translation"], "overall_score": 3.476124946126435, "scores": [2.6172682536831946, 1.5660536541021493, 0.5477927788434704, 0.5455643829357191, 0.5251159987286181], "rank_score": 1.1603590136586304} -{"id": "waseem-2016-racist", "title": "Are You a Racist or Am I Seeing Things? Annotator Influence on Hate Speech Detection on Twitter", "abstract": "Hate speech in the form of racism and sexism is commonplace on the internet (Waseem and Hovy, 2016). For this reason, there has been both an academic and an industry interest in detection of hate speech. The volume of data to be reviewed for creating data sets encourages a use of crowd sourcing for the annotation efforts. In this paper, we provide an examination of the in\ufb02uence of annotator knowledge of hate speech on classi\ufb01cation models by comparing classi\ufb01cation results obtained from training on expert and amateur annotations. We provide an evaluation on our own data set and run our models on the data set released by Waseem and Hovy (2016). We \ufb01nd that amateur annotators are more likely than expert annotators to label items as hate speech, and that systems trained on expert annotations outperform systems trained on amateur annotations.", "phrases": ["racist", "hate speech detection", "twitter", "abusive language"], "overall_score": 4.515826605420037, "scores": [1.793083185789134, 0.8269584774272386, 0.7945816039960837, 1.2267283466966952], "rank_score": 1.1603379034772878} -{"id": "graham-2015-evaluating", "title": "Re-evaluating Automatic Summarization with BLEU and 192 Shades of ROUGE", "abstract": "We provide an analysis of current evaluation methodologies applied to summarization metrics and identify the following areas of concern: (1) movement away from evaluation by correlation with human assessment; (2) omission of important components of human assessment from evaluations, in addition to large numbers of metric variants; (3) absence of methods of significance testing improvements over a baseline. We outline an evaluation methodology that overcomes all such challenges, providing the first method of significance testing suitable for evaluation of summarization metrics. Our evaluation reveals for the first time which metric variants significantly outperform others, optimal metric variants distinct from current recommended best variants, as well as machine translation metric BLEU to have performance on-par with ROUGE for the purpose of evaluation of summarization systems. We subsequently replicate a recent large-scale evaluation that relied on, what we now know to be, suboptimal ROUGE variants revealing distinct conclusions about the relative performance of state-of-the-art summarization systems.", "phrases": ["rouge", "summarization system", "evaluation metric", "human judgment", "bleu metric"], "overall_score": 3.531895244716752, "scores": [2.8544373995468226, 0.9118272268929927, 0.8359995954734925, 0.6043319883465206, 0.5938130771779879], "rank_score": 1.160081857487563} -{"id": "hasan-ng-2013-stance", "title": "Stance Classification of Ideological Debates: Data, Models, Features, and Constraints", "abstract": "Determining the stance expressed in a post written for a two-sided debate in an online debate forum is a relatively new and challenging problem in opinion mining. We seek to gain a better understanding of how to improve machine learning approaches to stance classification of ideological debates, specifically by examining how the performance of a learning-based stance classification system varies with the amount and quality of the training data, the complexity of the underlying model, the richness of the feature set, as well as the application of extra-linguistic constraints.", "phrases": ["online debate forum", "stance classification", "politician", "collective classification"], "overall_score": 3.8233120719032083, "scores": [2.9829569204107194, 0.5769460243550487, 0.5598321156530502, 0.5204364375574857], "rank_score": 1.1600428744940758} -{"id": "zhang-etal-2017-ordinal", "title": "Ordinal Common-sense Inference", "abstract": "Humans have the capacity to draw common-sense inferences from natural language: various things that are likely but not certain to hold based on established discourse, and are rarely stated explicitly. We propose an evaluation of automated common-sense inference based on an extension of recognizing textual entailment: predicting ordinal human responses on the subjective likelihood of an inference holding in a given context. We describe a framework for extracting common-sense knowledge from corpora, which is then used to construct a dataset for this ordinal entailment task. We train a neural sequence-to-sequence model on this dataset, which we use to score and generate possible inferences. Further, we annotate subsets of previously established datasets via our ordinal annotation protocol in order to then analyze the distinctions between these and what we have constructed.", "phrases": ["common-sense inference", "human response", "joci", "plausibility", "situation"], "overall_score": 3.686425486561892, "scores": [1.0022818852129525, 1.4666976681954218, 1.3021836993886766, 1.1292645494009927, 0.8993881910985322], "rank_score": 1.1599631986593153} -{"id": "jia-liang-2016-data", "title": "Data Recombination for Neural Semantic Parsing", "abstract": "Modeling crisp logical regularities is crucial in semantic parsing, making it difficult for neural models with no task-specific prior knowledge to achieve good results. In this paper, we introduce data recombination, a novel framework for injecting such prior knowledge into a model. From the training data, we induce a high-precision synchronous context-free grammar, which captures important conditional independence properties commonly found in semantic parsing. We then train a sequence-to-sequence recurrent network (RNN) model with a novel attention-based copying mechanism on datapoints sampled from this grammar, thereby teaching the model about these structural properties. Data recombination improves the accuracy of our RNN model on three semantic parsing datasets, leading to new state-of-the-art performance on the standard GeoQuery dataset for models with comparable supervision.", "phrases": ["neural semantic parsing", "context-free grammar", "data recombination", "generalization", "brevity"], "overall_score": 4.055695802555699, "scores": [2.2893917438108145, 0.9740902414729395, 1.118217635197418, 0.8929893824032117, 0.5249492522302277], "rank_score": 1.159927651022922} -{"id": "demberg-etal-2007-phonological", "title": "Phonological Constraints and Morphological Preprocessing for Grapheme-to-Phoneme Conversion", "abstract": "Grapheme-to-phoneme conversion (g2p) is a core component of any text-to-speech system. We show that adding simple syllabification and stress assignment constraints, namely \u2018one nucleus per syllable\u2019 and \u2018one main stress per word\u2019, to a joint n-gram model for g2p conversion leads to a dramatic improvement in conversion accuracy. Secondly, we assessed morphological preprocessing for g2p conversion. While morphological information has been incorporated in some past systems, its contribution has never been quantitatively assessed for German. We compare the relevance of morphological preprocessing with respect to the morphological segmentation method, training set size, the g2p conversion algorithm, and two languages, English and German.", "phrases": ["morphological preprocessing", "grapheme-to-phoneme conversion", "german"], "overall_score": 2.078195155572233, "scores": [2.093070898807914, 0.8615206141238458, 0.5249968881847976], "rank_score": 1.1598628003721858} -{"id": "petrov-etal-2012-universal", "title": "A Universal Part-of-Speech Tagset", "abstract": "To facilitate future research in unsupervised induction of syntactic structure and to standardize best-practices, we propose a tagset that consists of twelve universal part-of-speech categories. In addition to the tagset, we develop a mapping from 25 different treebank tagsets to this universal set. As a result, when combined with the original treebank data, this universal tagset and mapping produce a dataset consisting of common parts-of-speech for 22 different languages. We highlight the use of this resource via three experiments, that (1) compare tagging accuracies across languages, (2) present an unsupervised grammar induction approach that does not use gold standard part-of-speech tags, and (3) use the universal tags to transfer dependency parsers between languages, achieving state-of-the-art results.", "phrases": ["universal part-of-speech tagset", "mapping", "treebank", "universal set", "pos tag"], "overall_score": 4.464931922690065, "scores": [1.0334889096661157, 1.637777614473408, 1.3192332379258809, 1.2650173572917418, 0.5428734679374124], "rank_score": 1.159678117458912} -{"id": "whitelaw-etal-2009-using", "title": "Using the Web for Language Independent Spellchecking and Autocorrection", "abstract": "We have designed, implemented and evaluated an end-to-end system spellchecking and autocorrection system that does not require any manually annotated training data. The World Wide Web is used as a large noisy corpus from which we infer knowledge about misspellings and word usage. This is used to build an error model and an n-gram language model. A small secondary set of news texts with artificially inserted misspellings are used to tune confidence classifiers. Because no manual annotation is required, our system can easily be instantiated for new languages. When evaluated on human typed data with real misspellings in English and German, our web-based systems outperform baselines which use candidate corrections based on hand-curated dictionaries. Our system achieves 3.8% total error rate in English. We show similar improvements in preliminary results on artificial data for Russian and Arabic.", "phrases": ["web", "error model", "edit distance", "spelling correction", "human-compiled lexicon"], "overall_score": 3.5836828664253924, "scores": [1.9967512536045466, 1.4016660844952917, 1.2372638105728204, 0.580807800543738, 0.5803949831473265], "rank_score": 1.1593767864727444} -{"id": "reiter-2007-architecture", "title": "An Architecture for Data-to-Text Systems", "abstract": "I present an architecture for data-to-text systems, that is NLG systems which produce texts from non-linguistic input data; this essentially extends the architecture of Reiter and Dale (2000) to systems whose input is raw data instead of AI knowledge bases. This architecture is being used in the BabyTalk project, and is based on experiences in several projects at Aberdeen; it also seems to be compatible with many data-to-text systems developed elsewhere. It consists of four stages which are organised in a pipeline: Signal Analysis, Data Interpretation, Document Planning, and Microplanning and Realisation.", "phrases": ["data-to-text system", "nlg system", "document planning", "natural language generation"], "overall_score": 3.5290902924171, "scores": [1.3538239096860463, 1.263288219491309, 1.1540749079771628, 0.8654551484173089], "rank_score": 1.1591605463929568} -{"id": "chaganty-etal-2018-price", "title": "The price of debiasing automatic metrics in natural language evalaution", "abstract": "For evaluating generation systems, automatic metrics such as BLEU cost nothing to run but have been shown to correlate poorly with human judgment, leading to systematic bias against certain model improvements. On the other hand, averaging human judgments, the unbiased gold standard, is often too expensive. In this paper, we use control variates to combine automatic metrics with human evaluation to obtain an unbiased estimator with lower cost than human evaluation alone. In practice, however, we obtain only a 7-13% cost reduction on evaluating summarization and open-response question answering systems. We then prove that our estimator is optimal: there is no unbiased estimator with lower cost. Our theory further highlights the two fundamental bottlenecks\u2014the automatic metric and the prompt shown to human evaluators\u2014both of which need to be improved to obtain greater cost savings.", "phrases": ["automatic metric", "judgment", "summarization", "natural language generation"], "overall_score": 3.2135379149992933, "scores": [2.6711991263614343, 0.8360525052488237, 0.58560672014744, 0.5432968619204414], "rank_score": 1.159038803419535} -{"id": "sajous-etal-2020-englawi", "title": "ENGLAWI: From Human- to Machine-Readable Wiktionary", "abstract": "This paper introduces ENGLAWI, a large, versatile, XML-encoded machine-readable dictionary extracted from Wiktionary. ENGLAWI contains 752,769 articles encoding the full body of information included in Wiktionary: simple words, compounds and multiword expressions, lemmas and inflectional paradigms, etymologies, phonemic transcriptions in IPA, definition glosses and usage examples, translations, semantic and morphological relations, spelling variants, etc. It is fully documented, released under a free license and supplied with G-PeTo, a series of scripts allowing easy information extraction from ENGLAWI. Additional resources extracted from ENGLAWI, such as an inflectional lexicon, a lexicon of diatopic variants and the inclusion dates of headwords in Wiktionary's nomenclature are also provided. The paper describes the content of the resource and illustrates how it can be - and has been - used in previous studies. We finally introduce an ongoing work that computes lexicographic word embeddings from ENGLAWI's definitions.", "phrases": ["wiktionary", "machine-readable dictionary", "englawi"], "overall_score": 1.6067433026010511, "scores": [1.9580321866483148, 0.8944507752424252, 0.6245779300753972], "rank_score": 1.1590202973220458} -{"id": "schwarm-ostendorf-2005-reading", "title": "Reading Level Assessment Using Support Vector Machines and Statistical Language Models", "abstract": "Reading proficiency is a fundamental component of language competency. However, finding topical texts at an appropriate reading level for foreign and second language learners is a challenge for teachers. This task can be addressed with natural language processing technology to assess reading level. Existing measures of reading level are not well suited to this task, but previous work and our own pilot experiments have shown the benefit of using statistical language models. In this paper, we also use support vector machines to combine features from traditional reading level measures, statistical language models, and other language processing tools to produce a better method of assessing reading level.", "phrases": ["support vector machines", "language model", "reading level measure", "readability", "verb phrase"], "overall_score": 4.385902287933938, "scores": [0.8440879618056228, 2.0598609943587434, 1.478726531681117, 0.8256372683607835, 0.5867222608320386], "rank_score": 1.1590070034076612} -{"id": "yao-etal-2013-answer", "title": "Answer Extraction as Sequence Tagging with Tree Edit Distance", "abstract": "Our goal is to extract answers from preretrieved sentences for Question Answering (QA). We construct a linear-chain Conditional Random Field based on pairs of questions and their possible answer sentences, learning the association between questions and answer types. This casts answer extraction as an answer sequence tagging problem for the first time, where knowledge of shared structure between question and source sentence is incorporated through features based on Tree Edit Distance (TED). Our model is free of manually created question and answer templates, fast to run (processing 200 QA pairs per second excluding parsing time), and yields an F1 of 63.3% on a new public dataset based on prior TREC QA evaluations. The developed system is open-source, and includes an implementation of the TED model that is state of the art in the task of ranking QA pairs.", "phrases": ["tree edit distance", "conditional random field", "answer extraction", "pre-selected sentence", "search engine"], "overall_score": 3.7306172790158607, "scores": [2.3579977644597747, 0.931457590780152, 1.3922346113446529, 0.5673091839844931, 0.5459079567416196], "rank_score": 1.1589814214621386} -{"id": "herdagdelen-etal-2009-measuring", "title": "Measuring semantic relatedness with vector space models and random walks", "abstract": "Both vector space models and graph random walk models can be used to determine similarity between concepts. Noting that vectors can be regarded as local views of a graph, we directly compare vector space models and graph random walk models on standard tasks of predicting human similarity ratings, concept categorization, and semantic priming, varying the size of the dataset from which vector space and graph are extracted.", "phrases": ["vector space model", "random walk model", "semantic priming"], "overall_score": 1.606647012022787, "scores": [2.3272965776560275, 0.5807802848926161, 0.5687756525078749], "rank_score": 1.1589508383521727} -{"id": "romanello-etal-2009-citations", "title": "Citations in the Digital Library of Classics: Extracting Canonical References by Using Conditional Random Fields", "abstract": "Scholars of Classics cite ancient texts by using abridged citations called canonical references. In the scholarly digital library, canonical references create a complex textile of links between ancient and modern sources reflecting the deep hypertextual nature of texts in this field. This paper aims to demonstrate the suitability of Conditional Random Fields (CRF) for extracting this particular kind of reference from unstructured texts in order to enhance the capabilities of navigating and aggregating scholarly electronic resources. In particular, we developed a parser which recognizes word level n-grams of a text as being canonical references by using a CRF model trained with both positive and negative examples.", "phrases": ["digital library", "conditional random fields", "crf", "unstructured text"], "overall_score": 1.2730854358198875, "scores": [1.7854229502358645, 1.7750006412491415, 0.5511859213524665, 0.5236396972848656], "rank_score": 1.1588123025305845} -{"id": "garg-etal-2019-jointly", "title": "Jointly Learning to Align and Translate with Transformer Models", "abstract": "The state of the art in machine translation (MT) is governed by neural approaches, which typically provide superior translation accuracy over statistical approaches. However, on the closely related task of word alignment, traditional statistical word alignment models often remain the go-to solution. In this paper, we present an approach to train a Transformer model to produce both accurate translations and alignments. We extract discrete alignments from the attention probabilities learnt during regular neural machine translation model training and leverage them in a multi-task framework to optimize towards translation and alignment objectives. We demonstrate that our approach produces competitive results compared to GIZA++ trained IBM alignment models without sacrificing translation accuracy and outperforms previous attempts on Transformer model based word alignment. Finally, by incorporating IBM model alignments into our multi-task training, we report significantly better alignment accuracies compared to GIZA++ on three publicly available data sets.", "phrases": ["word alignment", "multi-task framework", "training pipeline"], "overall_score": 3.8175729797399014, "scores": [2.0443789710643427, 0.8876367859361924, 0.5428889196694238], "rank_score": 1.1583015588899863} -{"id": "chakravarthi-etal-2022-overview", "title": "Overview of The Shared Task on Homophobia and Transphobia Detection in Social Media Comments", "abstract": "Homophobia and Transphobia Detection is the task of identifying homophobia, transphobia, and non-anti-LGBT+ content from the given corpus. Homophobia and transphobia are both toxic languages directed at LGBTQ+ individuals that are described as hate speech. This paper summarizes our findings on the \u201cHomophobia and Transphobia Detection in social media comments\u201d shared task held at LT-EDI 2022 - ACL 2022 1. This shared taskfocused on three sub-tasks for Tamil, English, and Tamil-English (code-mixed) languages. It received 10 systems for Tamil, 13 systems for English, and 11 systems for Tamil-English. The best systems for Tamil, English, and Tamil-English scored 0.570, 0.870, and 0.610, respectively, on average macro F1-score.", "phrases": ["transphobia detection", "hate speech", "social medium"], "overall_score": 3.680679610158148, "scores": [1.8722088640468975, 1.075266943239861, 0.5269898310959368], "rank_score": 1.1581552127942316} -{"id": "wisniewski-etal-2013-design", "title": "Design and Analysis of a Large Corpus of Post-Edited Translations: Quality Estimation, Failure Analysis and the Variability of Post-Edition", "abstract": "Machine Translation (MT) is now often used to produce approximate translations that are then corrected by trained professional post-editors. As a result, more and more datasets of post-edited translations are being collected. These datasets are very useful for training, adapting or testing existing MT systems. In this work, we present the design and content of one such corpus of post-edited translations, and consider less studied possible uses of these data, notably the development of an automatic Quality Estimation (QE) system and the detection of frequent errors in automatic translations. Both applications require a careful assessment of the variability in post-editions, that we study here.", "phrases": ["post-edited translation", "quality estimation", "design"], "overall_score": 1.8638858772336033, "scores": [1.7624217350585083, 0.9175454544730404, 0.794325039141028], "rank_score": 1.1580974095575256} -{"id": "bayerl-paul-2011-determines", "title": "What Determines Inter-Coder Agreement in Manual Annotations? A Meta-Analytic Investigation", "abstract": "Recent discussions of annotator agreement have mostly centered around its calculation and interpretation, and the correct choice of indices. Although these discussions are important, they only consider the \u201cback-end\u201d of the story, namely, what to do once the data are collected. Just as important in our opinion is to know how agreement is reached in the first place and what factors influence coder agreement as part of the annotation process or setting, as this knowledge can provide concrete guidelines for the planning and set-up of annotation projects. To investigate whether there are factors that consistently impact annotator agreement we conducted a meta-analytic investigation of annotation studies reporting agreement percentages. Our meta-analysis synthesized factors reported in 96 annotation studies from three domains (word-sense disambiguation, prosodic transcriptions, and phonetic transcriptions) and was based on a total of 346 agreement indices. Our analysis identified seven factors that influence reported agreement values: annotation domain, number of categories in a coding scheme, number of annotators in a project, whether annotators received training, the intensity of annotator training, the annotation purpose, and the method used for the calculation of percentage agreements. Based on our results we develop practical recommendations for the assessment, interpretation, calculation, and reporting of coder agreement. We also briefly discuss theoretical implications for the concept of annotation quality.", "phrases": ["agreement", "meta-analytic investigation", "factor", "annotation study", "scheme"], "overall_score": 2.253420258153537, "scores": [1.8461683489781742, 1.770357677499034, 1.040225968110066, 0.6085160199087098, 0.5248766621416073], "rank_score": 1.1580289353275184} -{"id": "nguyen-etal-2015-improving-topic", "title": "Improving Topic Models with Latent Feature Word Representations", "abstract": "Probabilistic topic models are widely used to discover latent topics in document collections, while latent feature vector representations of words have been used to obtain high performance in many NLP tasks. In this paper, we extend two different Dirichlet multinomial topic models by incorporating latent feature vector representations of words trained on very large corpora to improve the word-topic mapping learnt on a smaller corpus. Experimental results show that by using information from the external corpora, our new models produce significant improvements on topic coherence, document clustering and document classification tasks, especially on datasets with few or short documents.", "phrases": ["dirichlet", "word embedding", "lftm", "mixture"], "overall_score": 2.9697969830377544, "scores": [1.8568574002603788, 1.1787857805354627, 1.0553723011828906, 0.5403383097029639], "rank_score": 1.157838447920424} -{"id": "xu-etal-2021-document", "title": "Document-level Event Extraction via Heterogeneous Graph-based Interaction Model with a Tracker", "abstract": "Document-level event extraction aims to recognize event information from a whole piece of article. Existing methods are not effective due to two challenges of this task: a) the target event arguments are scattered across sentences; b) the correlation among events in a document is non-trivial to model. In this paper, we propose Heterogeneous Graph-based Interaction Model with a Tracker (GIT) to solve the aforementioned two challenges. For the first challenge, GIT constructs a heterogeneous graph interaction network to capture global interactions among different sentences and entity mentions. For the second, GIT introduces a Tracker module to track the extracted events and hence capture the interdependency among the events. Experiments on a large-scale dataset (Zheng et al, 2019) show GIT outperforms the previous methods by 2.8 F1. Further analysis reveals is effective in extracting multiple correlated events and event arguments that scatter across the document.", "phrases": ["graph-based interaction model", "tracker", "entity mention", "document-level event extraction"], "overall_score": 2.407287549535438, "scores": [2.4482647997603992, 0.8535939995463706, 0.8009260066618619, 0.5278576069760639], "rank_score": 1.157660603236174} -{"id": "prabhakaran-etal-2019-perturbation", "title": "Perturbation Sensitivity Analysis to Detect Unintended Model Biases", "abstract": "Data-driven statistical Natural Language Processing (NLP) techniques leverage large amounts of language data to build models that can understand language. However, most language data reflect the public discourse at the time the data was produced, and hence NLP models are susceptible to learning incidental associations around named referents at a particular point in time, in addition to general linguistic meaning. An NLP system designed to model notions such as sentiment and toxicity should ideally produce scores that are independent of the identity of such entities mentioned in text and their social associations. For example, in a general purpose sentiment analysis system, a phrase such as I hate Katy Perry should be interpreted as having the same sentiment as I hate Taylor Swift. Based on this idea, we propose a generic evaluation framework, Perturbation Sensitivity Analysis, which detects unintended model biases related to named entities, and requires no new annotations or corpora. We demonstrate the utility of this analysis by employing it on two different NLP models \u2014 a sentiment model and a toxicity model \u2014 applied on online comments in English language from four different genres.", "phrases": ["nlp model", "toxicity", "perturbation sensitivity analysis"], "overall_score": 3.0534846955178434, "scores": [2.0515003169611457, 0.888857705194919, 0.530750386045514], "rank_score": 1.157036136067193} -{"id": "asahara-matsumoto-2004-japanese", "title": "Japanese Unknown Word Identification by Character-based Chunking", "abstract": "We introduce a character-based chunking for unknown word identification in Japanese text. A major advantage of our method is an ability to detect low frequency unknown words of unrestricted character type patterns. The method is built upon SVM-based chunking, by use of character n-gram and surrounding context of n-best word segmentation candidates from statistical morphological analysis as features. It is applied to newspapers and patent texts, achieving 95% precision and 55-70% recall for newspapers and more than 85% precision for patent texts.", "phrases": ["unknown word identification", "character-based chunking", "japanese text"], "overall_score": 1.8620825704405954, "scores": [2.0311428880080764, 0.9153849800824728, 0.524402988057354], "rank_score": 1.1569769520493012} -{"id": "obeid-etal-2019-adida", "title": "ADIDA: Automatic Dialect Identification for Arabic", "abstract": "This demo paper describes ADIDA, a web-based system for automatic dialect identification for Arabic text. The system distinguishes among the dialects of 25 Arab cities (from Rabat to Muscat) in addition to Modern Standard Arabic. The results are presented with either a point map or a heat map visualizing the automatic identification probabilities over a geographical map of the Arab World.", "phrases": ["automatic dialect identification", "arab city", "adida"], "overall_score": 1.861407763443349, "scores": [1.6976765406518628, 0.9357469957179966, 0.8362494762936212], "rank_score": 1.1565576708878267} -{"id": "elsahar-etal-2018-zero", "title": "Zero-Shot Question Generation from Knowledge Graphs for Unseen Predicates and Entity Types", "abstract": "We present a neural model for question generation from knowledge graphs triples in a \u201cZero-shot\u201d setup, that is generating questions for predicate, subject types or object types that were not seen at training time. Our model leverages triples occurrences in the natural language corpus in a encoder-decoder architecture, paired with an original part-of-speech copy action mechanism to generate questions. Benchmark and human evaluation show that our model outperforms state-of-the-art on this task.", "phrases": ["question generation", "unseen predicate", "zero-shot learning"], "overall_score": 2.966272877304107, "scores": [2.4034595694431418, 0.5417725940037872, 0.5241613378420206], "rank_score": 1.1564645004296497} -{"id": "yu-etal-2021-improving", "title": "Improving Math Word Problems with Pre-trained Knowledge and Hierarchical Reasoning", "abstract": "The recent algorithms for math word problems (MWP) neglect to use outside knowledge not present in the problems. Most of them only capture the word-level relationship and ignore to build hierarchical reasoning like the human being for mining the contextual structure between words and sentences. In this paper, we propose a Reasoning with Pre-trained Knowledge and Hierarchical Structure (RPKHS) network, which contains a pre-trained knowledge encoder and a hierarchical reasoning encoder. Firstly, our pre-trained knowledge encoder aims at reasoning the MWP by using outside knowledge from the pre-trained transformer-based models. Secondly, the hierarchical reasoning encoder is presented for seamlessly integrating the word-level and sentence-level reasoning to bridge the entity and context domain on MWP. Extensive experiments show that our RPKHS significantly outperforms state-of-the-art approaches on two large-scale commonly-used datasets, and boosts performance from 77.4% to 83.9% on Math23K, from 75.5 to 82.2% on Math23K with 5-fold cross-validation and from 83.7% to 89.8% on MAWPS. More extensive ablations are shown to demonstrate the effectiveness and interpretability of our proposed method.", "phrases": ["math word problem", "pre-trained knowledge", "hierarchical reasoning"], "overall_score": 1.2704279736116804, "scores": [1.8163075909727864, 0.8460744182945006, 0.8067981192889011], "rank_score": 1.156393376185396} -{"id": "lin-etal-2021-batch", "title": "In-Batch Negatives for Knowledge Distillation with Tightly-Coupled Teachers for Dense Retrieval", "abstract": "We present an efficient training approach to text retrieval with dense representations that applies knowledge distillation using the ColBERT late-interaction ranking model. Specifically, we propose to transfer the knowledge from a bi-encoder teacher to a student by distilling knowledge from ColBERT's expressive MaxSim operator into a simple dot product. The advantage of the bi-encoder teacher\u2013student setup is that we can efficiently add in-batch negatives during knowledge distillation, enabling richer interactions between teacher and student models. In addition, using ColBERT as the teacher reduces training cost compared to a full cross-encoder. Experiments on the MS MARCO passage and document ranking tasks and data from the TREC 2019 Deep Learning Track demonstrate that our approach helps models learn robust representations for dense retrieval effectively and efficiently.", "phrases": ["knowledge distillation", "dense retrieval", "in-batch negative"], "overall_score": 1.2703846941562111, "scores": [1.7747174953722449, 0.8576046535861378, 0.8367397956235799], "rank_score": 1.156353981527321} -{"id": "wang-jiang-2019-explicit", "title": "Explicit Utilization of General Knowledge in Machine Reading Comprehension", "abstract": "To bridge the gap between Machine Reading Comprehension (MRC) models and human beings, which is mainly reflected in the hunger for data and the robustness to noise, in this paper, we explore how to integrate the neural networks of MRC models with the general knowledge of human beings. On the one hand, we propose a data enrichment method, which uses WordNet to extract inter-word semantic connections as general knowledge from each given passage-question pair. On the other hand, we propose an end-to-end MRC model named as Knowledge Aided Reader (KAR), which explicitly uses the above extracted general knowledge to assist its attention mechanisms. Based on the data enrichment method, KAR is comparable in performance with the state-of-the-art MRC models, and significantly more robust to noise than them. When only a subset (20%-80%) of the training examples are available, KAR outperforms the state-of-the-art MRC models by a large margin, and is still reasonably robust to noise.", "phrases": ["general knowledge", "machine reading comprehension", "downstream task"], "overall_score": 2.249960417348354, "scores": [2.041410527577309, 0.8900272005192676, 0.5373150585220378], "rank_score": 1.1562509288728715} -{"id": "han-sun-2011-generative", "title": "A Generative Entity-Mention Model for Linking Entities with Knowledge Base", "abstract": "Linking entities with knowledge base (entity linking) is a key issue in bridging the textual data with the structural knowledge base. Due to the name variation problem and the name ambiguity problem, the entity linking decisions are critically depending on the heterogenous knowledge of entities. In this paper, we propose a generative probabilistic model, called entity-mention model, which can leverage heterogenous entity knowledge (including popularity knowledge, name knowledge and context knowledge) for the entity linking task. In our model, each name mention to be linked is modeled as a sample generated through a three-step generative story, and the entity knowledge is encoded in the distribution of entities in document P(e), the distribution of possible names of a specific entity P(s\\e), and the distribution of possible contexts of a specific entity P(c\\e). To find the referent entity of a name mention, our method combines the evidences from all the three distributions P(e), P(s\\e) and P(c\\e). Experimental results show that our method can significantly outperform the traditional methods.", "phrases": ["entity-mention model", "knowledge base", "probabilistic model"], "overall_score": 2.8725335449034453, "scores": [2.0533712799571413, 0.868955851228921, 0.5456504829611516], "rank_score": 1.1559925380490712} -{"id": "kumar-etal-2020-noisy", "title": "Noisy Text Data: Achilles' Heel of BERT", "abstract": "Owing to the phenomenal success of BERT on various NLP tasks and benchmark datasets, industry practitioners are actively experimenting with fine-tuning BERT to build NLP applications for solving industry use cases. For most datasets that are used by practitioners to build industrial NLP applications, it is hard to guarantee absence of any noise in the data. While BERT has performed exceedingly well for transferring the learnings from one use case to another, it remains unclear how BERT performs when fine-tuned on noisy text. In this work, we explore the sensitivity of BERT to noise in the data. We work with most commonly occurring noise (spelling mistakes, typos) and show that this results in significant degradation in the performance of BERT. We present experimental results to show that BERT's performance on fundamental NLP tasks like sentiment analysis and textual similarity drops significantly in the presence of (simulated) noise on benchmark datasets viz. IMDB Movie Review, STS-B, SST-2. Further, we identify shortcomings in the existing BERT pipeline that are responsible for this drop in performance. Our findings suggest that practitioners need to be vary of presence of noise in their datasets while fine-tuning BERT to solve industry use cases.", "phrases": ["bert", "benchmark dataset", "noise", "spelling mistake", "presence"], "overall_score": 2.2482869650669914, "scores": [2.5544573350737227, 1.2299991002695556, 0.9201451936271847, 0.5408746996406754, 0.5314783939860834], "rank_score": 1.1553909445194446} -{"id": "kurokawa-etal-2009-automatic", "title": "Automatic Detection of Translated Text and its Impact on Machine Translation", "abstract": "We investigate the possibility of automatically detecting whether a piece of text is an original or a translation. On a large parallel English-French corpus where reference information is available, we \ufb01nd that this is possible with around 90% accuracy. We further study the implication this has on Machine Translation performance. After separating our corpus according to translation direction, we train direction-speci\ufb01c phrase-based MT systems and show that they yield improved translation performance. This suggests that taking directionality into account when training SMT systems may have a signi\ufb01cant effect on output quality.", "phrases": ["machine translation", "automatic detection", "french-translated-to-english text"], "overall_score": 3.570925382543367, "scores": [2.093803081756551, 0.7874382558927747, 0.5845072922315163], "rank_score": 1.155249543293614} -{"id": "yang-etal-2019-exploring", "title": "Exploring Pre-trained Language Models for Event Extraction and Generation", "abstract": "Traditional approaches to the task of ACE event extraction usually depend on manually annotated data, which is often laborious to create and limited in size. Therefore, in addition to the difficulty of event extraction itself, insufficient training data hinders the learning process as well. To promote event extraction, we first propose an event extraction model to overcome the roles overlap problem by separating the argument prediction in terms of roles. Moreover, to address the problem of insufficient training data, we propose a method to automatically generate labeled data by editing prototypes and screen out generated samples by ranking the quality. Experiments on the ACE2005 dataset demonstrate that our extraction model can surpass most existing extraction methods. Besides, incorporating our generation method exhibits further significant improvement. It obtains new state-of-the-art results on the event extraction task, including pushing the F1 score of trigger classification to 81.1%, and the F1 score of argument classification to 58.9%.", "phrases": ["event extraction", "argument extraction", "prior study"], "overall_score": 3.515737981453216, "scores": [2.3768148341255113, 0.5664281047415114, 0.5210816506559405], "rank_score": 1.154774863174321} -{"id": "al-sallab-etal-2015-deep", "title": "Deep Learning Models for Sentiment Analysis in Arabic", "abstract": "In this paper, deep learning framework is proposed for text sentiment classification in Arabic. Four different architectures are explored. Three are based on Deep Belief Networks and Deep Auto Encoders, where the input data model is based on the ordinary Bag-of-Words, with features based on the recently developed Arabic Sentiment Lexicon in combination with other standard lexicon features. The fourth model, based on the Recursive Auto Encoder, is proposed to tackle the lack of context handling in the first three models. The evaluation is carried out using Linguistic Data Consortium Arabic Tree Bank dataset, with benchmarking against the state of the art systems in sentiment classification with reported results on the same dataset. The results show high improvement of the fourth model over the state of the art, with the advantage of using no lexicon resources that are scarce and costly in terms of their development.", "phrases": ["sentiment analysis", "arabic", "deep learning model"], "overall_score": 2.6586260115418834, "scores": [1.7979155290591387, 0.8164719157404304, 0.8494923739717034], "rank_score": 1.154626606257091} -{"id": "varadi-etal-2008-clarin", "title": "CLARIN: Common Language Resources and Technology Infrastructure", "abstract": "The paper provides a general introduction to the CLARIN project, a large-scale European research infrastructure project designed to establish an integrated and interoperable infrastructure of language resources and technologies. The goal is to make language resources and technology much more accessible to all researchers working with language material, particularly non-expert users in the Humanities and Social Sciences. CLARIN intends to build a virtual, distributed infrastructure consisting of a federation of trusted digital archives and repositories where language resources and tools are accessible through web services. The CLARIN project consists of 32 partners from 22 countries and is currently engaged in the preparatory phase of developing the infrastructure. The paper describes the objectives of the project in terms of its technical, legal, linguistic and user dimensions.", "phrases": ["language resource", "technology", "infrastructure", "humanity", "clarin"], "overall_score": 2.400821728987443, "scores": [2.5884879181459395, 1.22381435861543, 0.8748013197211189, 0.5545663802204798, 0.5310860274114516], "rank_score": 1.1545512008228838} -{"id": "papandrea-etal-2017-supwsd", "title": "SupWSD: A Flexible Toolkit for Supervised Word Sense Disambiguation", "abstract": "In this demonstration we present SupWSD, a Java API for supervised Word Sense Disambiguation (WSD). This toolkit includes the implementation of a state-of-the-art supervised WSD system, together with a Natural Language Processing pipeline for preprocessing and feature extraction. Our aim is to provide an easy-to-use tool for the research community, designed to be modular, fast and scalable for training and testing on large datasets. The source code of SupWSD is available at .", "phrases": ["toolkit", "word sense disambiguation", "supwsd"], "overall_score": 1.2677080213193415, "scores": [1.6643951132853934, 0.9850313691529442, 0.8123262243047293], "rank_score": 1.1539175689143555} -{"id": "zhou-etal-2007-tree", "title": "Tree Kernel-Based Relation Extraction with Context-Sensitive Structured Parse Tree Information", "abstract": "This paper proposes a tree kernel with contextsensitive structured parse tree information for relation extraction. It resolves two critical problems in previous tree kernels for relation extraction in two ways. First, it automatically determines a d ynamic context-sensitive tree span for relation extraction by extending the widely -used Shortest Path-enclosed Tree (SPT) to include necessary context information outside SPT. Second, it pr oposes a context -sensitive convolution tree kernel, which enumerates both context-free and contextsensitive sub-trees by consid ering their ancestor node paths as their contexts. Moreover, this paper evaluates the complementary nature between our tree kernel and a state -of-the-art linear kernel. Evaluation on the ACE RDC corpora shows that our dynamic context-sensitive tree span is much more suitable for relation extraction than SPT and our tree kernel outperforms the state-of-the-art Collins and Duffy\u2019s convolution tree kernel. It also shows that our tree kernel achieves much better performance than the state-of-the-art linear kernels . Finally, it shows that feature-based and tree kernel-based methods much complement each other and the composite kernel can well integrate both flat and structured features.", "phrases": ["relation extraction", "tree kernel", "shortest path-enclosed tree", "syntactic feature", "cs-spt"], "overall_score": 3.5128133124053447, "scores": [2.7774045447685505, 1.1071884863541772, 0.829198528424385, 0.5326011647591901, 0.5226784260316322], "rank_score": 1.153814230067587} -{"id": "xie-etal-2012-exploring", "title": "Exploring Content Features for Automated Speech Scoring", "abstract": "Most previous research on automated speech scoring has focused on restricted, predictable speech. For automated scoring of unrestricted spontaneous speech, speech proficiency has been evaluated primarily on aspects of pronunciation, fluency, vocabulary and language usage but not on aspects of content and topicality. In this paper, we explore features representing the accuracy of the content of a spoken response. Content features are generated using three similarity measures, including a lexical matching method (Vector Space Model) and two semantic similarity measures (Latent Semantic Analysis and Pointwise Mutual Information). All of the features exhibit moderately high correlations with human proficiency scores on human speech transcriptions. The correlations decrease somewhat due to recognition errors when evaluated on the output of an automatic speech recognition system; however, the additional use of word confidence scores can achieve correlations at a similar level as for human transcriptions.", "phrases": ["content feature", "speech scoring", "similarity measure", "latent semantic analysis", "sample response"], "overall_score": 3.04489477889267, "scores": [2.7367336888227722, 1.050637921463985, 0.8963869364787833, 0.5485416920685258, 0.5366058488115892], "rank_score": 1.153781217529131} -{"id": "chang-etal-2007-guiding", "title": "Guiding Semi-Supervision with Constraint-Driven Learning", "abstract": "Over the last few years, two of the main research directions in machine learning of natural language processing have been the study of semi-supervised learning algorithms as a way to train classiers when the labeled data is scarce, and the study of ways to exploit knowledge and global information in structured learning tasks. In this paper, we suggest a method for incorporating domain knowledge in semi-supervised learning algorithms. Our novel framework unies and can exploit several kinds of task specic constraints. The experimental results presented in the information extraction domain demonstrate that applying constraints helps the model to generate better feedback during learning, and hence the framework allows for high performance learning with significantly less training data than was possible before on these tasks.", "phrases": ["constraint-driven learning", "semi-supervised learning", "domain knowledge", "declarative constraint", "hard constraint"], "overall_score": 3.7587235038407067, "scores": [2.057489461207913, 1.459575721677529, 0.8723178344003107, 0.8305685510970092, 0.5483297885030539], "rank_score": 1.1536562713771632} -{"id": "zhang-clark-2010-fast", "title": "A Fast Decoder for Joint Word Segmentation and POS-Tagging Using a Single Discriminative Model", "abstract": "We show that the standard beam-search algorithm can be used as an efficient decoder for the global linear model of Zhang and Clark (2008) for joint word segmentation and POS-tagging, achieving a significant speed improvement. Such decoding is enabled by: (1) separating full word features from partial word features so that feature templates can be instantiated incrementally, according to whether the current character is separated or appended; (2) deciding the POS-tag of a potential word when its first character is processed. Early-update is used with perceptron training so that the linear model gives a high score to a correct partial candidate as well as a full output. Effective scoring of partial structures allows the decoder to give high accuracy with a small beam-size of 16. In our 10-fold cross-validation experiments with the Chinese Tree-bank, our system performed over 10 times as fast as Zhang and Clark (2008) with little accuracy loss. The accuracy of our system on the standard CTB 5 test was competitive with the best in the literature.", "phrases": ["decoding", "joint word segmentation", "pos-tagging"], "overall_score": 2.6561333152624806, "scores": [1.9663713696785556, 0.9286167237461892, 0.5656440326289114], "rank_score": 1.1535440420178855} -{"id": "stoyanov-cardie-2008-topic", "title": "Topic Identification for Fine-Grained Opinion Analysis", "abstract": "Within the area of general-purpose fine-grained subjectivity analysis, opinion topic identification has, to date, received little attention due to both the difficulty of the task and the lack of appropriately annotated resources. In this paper, we provide an operational definition of opinion topic and present an algorithm for opinion topic identification that, following our new definition, treats the task as a problem in topic coreference resolution. We develop a methodology for the manual annotation of opinion topics and use it to annotate topic information for a portion of an existing general-purpose opinion corpus. In experiments using the corpus, our topic identification approach statistically significantly outperforms several non-trivial baselines according to three evaluation measures.", "phrases": ["opinion", "subjectivity analysis", "topic identification"], "overall_score": 3.455418903960278, "scores": [1.9678803459938197, 0.930032445962631, 0.5624287089824209], "rank_score": 1.1534471669796238} -{"id": "britz-etal-2017-effective", "title": "Effective Domain Mixing for Neural Machine Translation", "abstract": "Neural Machine Translation (NMT) models are often trained on heterogeneous mixtures of domains, from news to parliamentary proceedings, each with unique distributions and language. In this work we show that training NMT systems on naively mixed data can degrade performance versus models fit to each constituent domain. We demonstrate that this problem can be circumvented, and propose three models that do so by jointly learning domain discrimination and translation. We demonstrate the efficacy of these techniques by merging pairs of domains in three languages: Chinese, French, and Japanese. After training on composite data, each approach out-performs its domain-specific counter-parts, with a model based on a discriminator network doing so most reliably. We obtain consistent performance improvements and an average increase of 1.1 BLEU.", "phrases": ["neural machine translation", "discriminator network", "domain adaptation"], "overall_score": 3.7114489190406563, "scores": [1.9861588398637, 0.9180379046668692, 0.554882562319633], "rank_score": 1.1530264356167341} -{"id": "chang-etal-2020-convokit", "title": "ConvoKit: A Toolkit for the Analysis of Conversations", "abstract": "This paper describes the design and functionality of ConvoKit, an open-source toolkit for analyzing conversations and the social interactions embedded within. ConvoKit provides an unified framework for representing and manipulating conversational data, as well as a large and diverse collection of conversational datasets. By providing an intuitive interface for exploring and interacting with conversational data, this toolkit lowers the technical barriers for the broad adoption of computational methods for conversational analysis.", "phrases": ["toolkit", "conversation", "convokit"], "overall_score": 1.2659010627357297, "scores": [1.7285182249375255, 0.8951438721841987, 0.8331563158702616], "rank_score": 1.152272804330662} -{"id": "wang-etal-2021-semeval", "title": "SemEval-2021 Task 9: Fact Verification and Evidence Finding for Tabular Data in Scientific Documents (SEM-TAB-FACTS)", "abstract": "Understanding tables is an important and relevant task that involves understanding table structure as well as being able to compare and contrast information within cells. In this paper, we address this challenge by presenting a new dataset and tasks that addresses this goal in a shared task in SemEval 2020 Task 9: Fact Verification and Evidence Finding for Tabular Data in Scientific Documents (SEM-TAB-FACTS). Our dataset contains 981 manually-generated tables and an auto-generated dataset of 1980 tables providing over 180K statement and over 16M evidence annotations. SEM-TAB-FACTS featured two sub-tasks. In sub-task A, the goal was to determine if a statement is supported, refuted or unknown in relation to a table. In sub-task B, the focus was on identifying the specific cells of a table that provide evidence for the statement. 69 teams signed up to participate in the task with 19 successful submissions to subtask A and 12 successful submissions to subtask B. We present our results and main findings from the competition.", "phrases": ["fact verification", "scientific documents", "sem-tab-facts"], "overall_score": 2.5316774724246374, "scores": [1.7636812234480148, 0.8625683159208436, 0.8303986774837239], "rank_score": 1.152216072284194} -{"id": "oraby-etal-2016-creating", "title": "Creating and Characterizing a Diverse Corpus of Sarcasm in Dialogue", "abstract": "The use of irony and sarcasm in social media allows us to study them at scale for the first time. However, their diversity has made it difficult to construct a high-quality corpus of sarcasm in dialogue. Here, we describe the process of creating a large- scale, highly-diverse corpus of online debate forums dialogue, and our novel methods for operationalizing classes of sarcasm in the form of rhetorical questions and hyperbole. We show that we can use lexico-syntactic cues to reliably retrieve sarcastic utterances with high accuracy. To demonstrate the properties and quality of our corpus, we conduct supervised learning experiments with simple features, and show that we achieve both higher precision and F than previous work on sarcasm in debate forums dialogue. We apply a weakly-supervised linguistic pattern learner and qualitatively analyze the linguistic differences in each class.", "phrases": ["sarcasm", "debate forum", "rhetorical question"], "overall_score": 2.064328882119899, "scores": [2.3721274231439984, 0.5608064010655981, 0.523437833213722], "rank_score": 1.1521238858077727} -{"id": "hashimoto-etal-2017-joint", "title": "A Joint Many-Task Model: Growing a Neural Network for Multiple NLP Tasks", "abstract": "Transfer and multi-task learning have traditionally focused on either a single source-target pair or very few, similar tasks. Ideally, the linguistic levels of morphology, syntax and semantics would benefit each other by being trained in a single model. We introduce a joint many-task model together with a strategy for successively growing its depth to solve increasingly complex tasks. Higher layers include shortcut connections to lower-level task predictions to reflect linguistic hierarchies. We use a simple regularization term to allow for optimizing all model weights to improve one task's loss without exhibiting catastrophic interference of the other tasks. Our single end-to-end model obtains state-of-the-art or competitive results on five different tasks from tagging, parsing, relatedness, and entailment tasks.", "phrases": ["joint many-task model", "depth", "complex task", "relatedness", "neural architecture"], "overall_score": 4.220754558967554, "scores": [2.4315357796557757, 1.1169141535264027, 0.8356928759343197, 0.8251480493861552, 0.5511616282550795], "rank_score": 1.1520904973515464} -{"id": "wang-etal-2017-instance", "title": "Instance Weighting for Neural Machine Translation Domain Adaptation", "abstract": "Instance weighting has been widely applied to phrase-based machine translation domain adaptation. However, it is challenging to be applied to Neural Machine Translation (NMT) directly, because NMT is not a linear model. In this paper, two instance weighting technologies, i.e., sentence weighting and domain weighting with a dynamic weight learning strategy, are proposed for NMT domain adaptation. Empirical results on the IWSLT English-German/French tasks show that the proposed methods can substantially improve NMT performance by up to 2.7-6.7 BLEU points, outperforming the existing baselines by up to 1.6-3.6 BLEU points.", "phrases": ["weight learning strategy", "instance weighting", "language model", "sentence pair", "out-of-domain data"], "overall_score": 3.708243579823285, "scores": [2.866143992616733, 0.9009805919852962, 0.8954989951997093, 0.5740111868278776, 0.5235184383718884], "rank_score": 1.1520306410003007} -{"id": "barrault-etal-2018-findings", "title": "Findings of the Third Shared Task on Multimodal Machine Translation", "abstract": "We present the results from the third shared task on multimodal machine translation. In this task a source sentence in English is supplemented by an image and participating systems are required to generate a translation for such a sentence into German, French or Czech. The image can be used in addition to (or instead of) the source sentence. This year the task was extended with a third target language (Czech) and a new test set. In addition, a variant of this task was introduced with its own test set where the source sentence is given in multiple languages: English, French and German, and participating systems are required to generate a translation in Czech. Seven teams submitted 45 different systems to the two variants of the task. Compared to last year, the performance of the multimodal submissions improved, but text-only systems remain competitive.", "phrases": ["multimodal machine translation", "image", "french", "change"], "overall_score": 3.1938092441446857, "scores": [2.0450686361034287, 1.4975696167749535, 0.5372320571059658, 0.5278224480885192], "rank_score": 1.1519231895182167} -{"id": "cohan-etal-2018-smhd", "title": "SMHD: a Large-Scale Resource for Exploring Online Language Usage for Multiple Mental Health Conditions", "abstract": "Mental health is a significant and growing public health concern. As language usage can be leveraged to obtain crucial insights into mental health conditions, there is a need for large-scale, labeled, mental health-related datasets of users who have been diagnosed with one or more of such conditions. In this paper, we investigate the creation of high-precision patterns to identify self-reported diagnoses of nine different mental health conditions, and obtain high-quality labeled data without the need for manual labelling. We introduce the SMHD (Self-reported Mental Health Diagnoses) dataset and make it available. SMHD is a novel large dataset of social media posts from users with one or multiple mental health conditions along with matched control users. We examine distinctions in users' language, as measured by linguistic and psychological variables. We further explore text classification methods to identify individuals with mental conditions through their language.", "phrases": ["mental health condition", "insight", "smhd", "disorder"], "overall_score": 2.954574178164553, "scores": [2.0912998783879666, 1.108964526610076, 0.8860736556800881, 0.5212759954350357], "rank_score": 1.1519035140282916} -{"id": "tan-etal-2017-abstractive", "title": "Abstractive Document Summarization with a Graph-Based Attentional Neural Model", "abstract": "Abstractive summarization is the ultimate goal of document summarization research, but previously it is less investigated due to the immaturity of text generation techniques. Recently impressive progress has been made to abstractive sentence summarization using neural models. Unfortunately, attempts on abstractive document summarization are still in a primitive stage, and the evaluation results are worse than extractive methods on benchmark datasets. In this paper, we review the difficulties of neural abstractive document summarization, and propose a novel graph-based attention mechanism in the sequence-to-sequence framework. The intuition is to address the saliency factor of summarization, which has been overlooked by prior works. Experimental results demonstrate our model is able to achieve considerable improvement over previous neural abstractive models. The data-driven neural abstractive method is also competitive with state-of-the-art extractive methods.", "phrases": ["attentional neural model", "graph-based attention mechanism", "abstractive document summarization", "input text"], "overall_score": 3.8382251240165077, "scores": [1.9315177100030476, 0.8392160802801774, 1.3100060214149114, 0.5266906293760797], "rank_score": 1.151857610268554} -{"id": "eisenstein-etal-2010-latent", "title": "A Latent Variable Model for Geographic Lexical Variation", "abstract": "The rapid growth of geotagged social media raises new computational possibilities for investigating geographic linguistic variation. In this paper, we present a multi-level generative model that reasons jointly about latent topics and geographical regions. High-level topics such as \"sports\" or \"entertainment\" are rendered differently in each geographic region, revealing topic-specific regional distinctions. Applied to a new dataset of geotagged microblogs, our model recovers coherent topics and their regional variants, while identifying geographic areas of linguistic consistency. The model also enables prediction of an author's geographic location from raw text, outperforming both text regression and supervised topic models.", "phrases": ["latent variable model", "variation", "regional distinction", "location", "demographic language variation"], "overall_score": 4.0947471519380905, "scores": [1.3992448790527205, 1.7576315668839644, 1.3886814689436004, 0.6181921506459575, 0.5948241740697394], "rank_score": 1.1517148479191965} -{"id": "peyrard-2019-simple", "title": "A Simple Theoretical Model of Importance for Summarization", "abstract": "Research on summarization has mainly been driven by empirical approaches, crafting systems to perform well on standard datasets with the notion of information Importance remaining latent. We argue that establishing theoretical models of Importance will advance our understanding of the task and help to further improve summarization systems. To this end, we propose simple but rigorous definitions of several concepts that were previously used only intuitively in summarization: Redundancy, Relevance, and Informativeness. Importance arises as a single quantity naturally unifying these concepts. Additionally, we provide intuitions to interpret the proposed quantities and experiments to demonstrate the potential of the framework to inform and guide subsequent works.", "phrases": ["theoretical model", "summarization", "notion", "source document"], "overall_score": 2.9534688115714443, "scores": [2.6256151525306355, 0.8940339575716326, 0.5639226931569246, 0.5223184502535162], "rank_score": 1.1514725633781773} -{"id": "ansell-etal-2021-mad-g", "title": "MAD-G: Multilingual Adapter Generation for Efficient Cross-Lingual Transfer", "abstract": "Adapter modules have emerged as a general parameter-efficient means to specialize a pretrained encoder to new domains. Massively multilingual transformers (MMTs) have particularly benefited from additional training of language-specific adapters. However, this approach is not viable for the vast majority of languages, due to limitations in their corpus size or compute budgets. In this work, we propose MAD-G (Multilingual ADapter Generation), which contextually generates language adapters from language representations based on typological features. In contrast to prior work, our time- and space-efficient MAD-G approach enables (1) sharing of linguistic knowledge across languages and (2) zero-shot inference by generating language adapters for unseen languages. We thoroughly evaluate MAD-G in zero-shot cross-lingual transfer on part-of-speech tagging, dependency parsing, and named entity recognition. While offering (1) improved fine-tuning efficiency (by a factor of around 50 in our experiments), (2) a smaller parameter budget, and (3) increased language coverage, MAD-G remains competitive with more expensive methods for language-specific adapter training across the board. Moreover, it offers substantial benefits for low-resource languages, particularly on the NER task in low-resource African languages. Finally, we demonstrate that MAD-G's transfer performance can be further improved via: (i) multi-source training, i.e., by generating and combining adapters of multiple languages with available task-specific training data; and (ii) by further fine-tuning generated MAD-G adapters for languages with monolingual data.", "phrases": ["multilingual adapter generation", "cross-lingual transfer", "part-of-speech tagging", "dependency parsing", "mad-g"], "overall_score": 1.8530726817589789, "scores": [2.958564849187767, 0.9411403731988269, 0.781887469056427, 0.5420968324872778, 0.5332044433442984], "rank_score": 1.1513787934549196} -{"id": "akkaya-etal-2009-subjectivity", "title": "Subjectivity Word Sense Disambiguation", "abstract": "Many approaches to opinion and sentiment analysis rely on lexicons of words that may be used to express subjectivity. These are compiled as lists of keywords, rather than word meanings (senses). However, many keywords have both subjective and objective senses. False hits -- subjectivity clues used with objective senses -- are a significant source of error in subjectivity and sentiment analysis. This talk will focus on sense-level opinion and sentiment analysis. First, I will give the results of a study showing that even words judged in previous work to be reliable opinion clues have significant degrees of subjectivity sense ambiguity. Then, we will consider the task of distinguishing between the subjective and objective senses of words in a dictionary, and the related task of creating \"usage inventories\" of opinion clues. Given such distinctions, the next step is to automatically determine which word instances in a corpus are being used with subjective senses, and which are being used with objective senses (we call this task \"SWSD\"). We will see evidence that SWSD is more feasible than full word sense disambiguation, because it is more coarse grained -- often, the exact sense need not be pinpointed, and that SWSD can be exploited to improve the performance of opinion and sentiment analysis systems via sense-aware classification. Finally, I will discuss experiments in acquiring SWSD data, via token-based context discrimination where the context vector representation is adapted to distinguish between subjective and objective contexts, and the clustering process is enriched by pair-wise constraints, making it semi-supervised.", "phrases": ["sentiment analysis", "word instance", "swsd", "subjectivity"], "overall_score": 2.8601513311445204, "scores": [1.50432935506509, 1.2310730369591396, 0.97167126046521, 0.896964622169343], "rank_score": 1.1510095686646957} -{"id": "thorne-vlachos-2018-automated", "title": "Automated Fact Checking: Task Formulations, Methods and Future Directions", "abstract": "The recently increased focus on misinformation has stimulated research in fact checking, the task of assessing the truthfulness of a claim. Research in automating this task has been conducted in a variety of disciplines including natural language processing, machine learning, knowledge representation, databases, and journalism. While there has been substantial progress, relevant papers and articles have been published in research communities that are often unaware of each other and use inconsistent terminology, thus impeding understanding and further progress. In this paper we survey automated fact checking research stemming from natural language processing and related disciplines, unifying the task formulations and methodologies across papers and authors. Furthermore, we highlight the use of evidence as an important distinguishing factor among them cutting across task formulations and methods. We conclude with proposing avenues for future NLP research on automated fact checking.", "phrases": ["misinformation", "truthfulness", "claim", "knowledge representation", "survey"], "overall_score": 4.1245483751754755, "scores": [1.8706300888520802, 1.243002579175475, 1.2082013906136122, 0.8631915529796225, 0.5698600831544933], "rank_score": 1.1509771389550567} -{"id": "mathias-etal-2018-eyes", "title": "Eyes are the Windows to the Soul: Predicting the Rating of Text Quality Using Gaze Behaviour", "abstract": "Predicting a reader's rating of text quality is a challenging task that involves estimating different subjective aspects of the text, like structure, clarity, etc. Such subjective aspects are better handled using cognitive information. One such source of cognitive information is gaze behaviour. In this paper, we show that gaze behaviour does indeed help in effectively predicting the rating of text quality. To do this, we first we model text quality as a function of three properties - organization, coherence and cohesion. Then, we demonstrate how capturing gaze behaviour helps in predicting each of these properties, and hence the overall quality, by reporting improvements obtained by adding gaze features to traditional textual features for score prediction. We also hypothesize that if a reader has fully understood the text, the corresponding gaze behaviour would give a better indication of the assigned rating, as opposed to partial understanding. Our experiments validate this hypothesis by showing greater agreement between the given rating and the predicted rating when the reader has a full understanding of the text.", "phrases": ["rating", "text quality", "gaze behaviour"], "overall_score": 1.5955821837549728, "scores": [1.8444449833009864, 0.8124976393745563, 0.7959651330755806], "rank_score": 1.1509692519170411} -{"id": "shi-zhou-2005-error", "title": "Error Detection Using Linguistic Features", "abstract": "Recognition errors hinder the proliferation of speech recognition (SR) systems. Based on the observation that recognition errors may result in ungrammatical sentences, especially in dictation application where an acceptable level of accuracy of generated documents is indispensable, we propose to incorporate two kinds of linguistic features into error detection: lexical features of words, and syntactic features from a robust lexicalized parser. Transformation-based learning is chosen to predict recognition errors by integrating word confidence scores with linguistic features. The experimental results on a dictation data corpus show that linguistic features alone are not as useful as word confidence scores in detecting errors. However, linguistic features provide complementary information when combined with word confidence scores, which collectively reduce the classification error rate by 12.30% and improve the F measure by 53.62%.", "phrases": ["linguistic feature", "speech recognition", "error detection"], "overall_score": 1.2639850557995413, "scores": [1.9685121201428244, 0.946999843499675, 0.5360743753340633], "rank_score": 1.1505287796588544} -{"id": "luyckx-daelemans-2008-authorship", "title": "Authorship Attribution and Verification with Many Authors and Limited Data", "abstract": "Most studies in statistical or machine learning based authorship attribution focus on two or a few authors. This leads to an overestimation of the importance of the features extracted from the training data and found to be discriminating for these small sets of authors. Most studies also use sizes of training data that are unrealistic for situations in which stylometry is applied (e.g., forensics), and thereby overestimate the accuracy of their approach in these situations. A more realistic interpretation of the task is as an authorship verification problem that we approximate by pooling data from many different authors as negative examples. In this paper, we show, on the basis of a new corpus with 145 authors, what the effect is of many authors on feature selection and learning, and show robustness of a memory-based learning approach in doing authorship attribution and verification with many authors and limited training data when compared to eager learning methods such as SVMs and maximum entropy learning.", "phrases": ["verification", "authorship attribution", "essay"], "overall_score": 2.758320067467156, "scores": [2.109609263507432, 0.7855795240817399, 0.5557376545587773], "rank_score": 1.1503088140493165} -{"id": "titov-klementiev-2011-bayesian", "title": "A Bayesian Model for Unsupervised Semantic Parsing", "abstract": "We propose a non-parametric Bayesian model for unsupervised semantic parsing. Following Poon and Domingos (2009), we consider a semantic parsing setting where the goal is to (1) decompose the syntactic dependency tree of a sentence into fragments, (2) assign each of these fragments to a cluster of semantically equivalent syntactic structures, and (3) predict predicate-argument relations between the fragments. We use hierarchical Pitman-Yor processes to model statistical dependencies between meaning representations of predicates and those of their arguments, as well as the clusters of their syntactic realizations. We develop a modification of the Metropolis-Hastings split-merge sampler, resulting in an efficient inference algorithm for the model. The method is experimentally evaluated by using the induced semantic representation for the question answering task in the biomedical domain.", "phrases": ["bayesian model", "unsupervised semantic parsing", "fragment"], "overall_score": 2.3915583776249365, "scores": [2.1085460913387712, 0.8197680304616489, 0.5219752895955304], "rank_score": 1.150096470465317} -{"id": "el-mekki-etal-2021-domain", "title": "Domain Adaptation for Arabic Cross-Domain and Cross-Dialect Sentiment Analysis from Contextualized Word Embedding", "abstract": "Finetuning deep pre-trained language models has shown state-of-the-art performances on a wide range of Natural Language Processing (NLP) applications. Nevertheless, their generalization performance drops under domain shift. In the case of Arabic language, diglossia makes building and annotating corpora for each dialect and/or domain a more challenging task. Unsupervised Domain Adaptation tackles this issue by transferring the learned knowledge from labeled source domain data to unlabeled target domain data. In this paper, we propose a new unsupervised domain adaptation method for Arabic cross-domain and cross-dialect sentiment analysis from Contextualized Word Embedding. Several experiments are performed adopting the coarse-grained and the fine-grained taxonomies of Arabic dialects. The obtained results show that our method yields very promising results and outperforms several domain adaptation methods for most of the evaluated datasets. On average, our method increases the performance by an improvement rate of 20.8% over the zero-shot transfer learning from BERT.", "phrases": ["arabic cross-domain", "cross-dialect sentiment analysis", "contextualized word"], "overall_score": 1.5942097537653146, "scores": [1.8408337601698381, 0.8110869045048735, 0.7980170941663408], "rank_score": 1.1499792529470174} -{"id": "passonneau-carpenter-2013-benefits", "title": "The Benefits of a Model of Annotation", "abstract": "Standard agreement measures for interannotator reliability are neither necessary nor sufficient to ensure a high quality corpus. In a case study of word sense annotation, conventional methods for evaluating labels from trained annotators are contrasted with a probabilistic annotation model applied to crowdsourced data. The annotation model provides far more information, including a certainty measure for each gold standard label; the crowdsourced data was collected at less than half the cost of the conventional approach.", "phrases": ["annotator", "reliability", "probabilistic model"], "overall_score": 3.3859294222642755, "scores": [1.6749808219418392, 1.2423554355637294, 0.5324850288803558], "rank_score": 1.1499404287953081} -{"id": "pramanick-etal-2021-momenta-multimodal", "title": "MOMENTA: A Multimodal Framework for Detecting Harmful Memes and Their Targets", "abstract": "Internet memes have become powerful means to transmit political, psychological, and socio-cultural ideas. Although memes are typically humorous, recent days have witnessed an escalation of harmful memes used for trolling, cyberbullying, and abuse. Detecting such memes is challenging as they can be highly satirical and cryptic. Moreover, while previous work has focused on specific aspects of memes such as hate speech and propaganda, there has been little work on harm in general. Here, we aim to bridge this gap. In particular, we focus on two tasks: (i)detecting harmful memes, and (ii) identifying the social entities they target. We further extend the recently released HarMeme dataset, which covered COVID-19, with additional memes and a new topic: US politics. To solve these tasks, we propose MOMENTA (MultimOdal framework for detecting harmful MemEs aNd Their tArgets), a novel multimodal deep neural network that uses global and local perspectives to detect harmful memes. MOMENTA systematically analyzes the local and the global perspective of the input meme (in both modalities) and relates it to the background context. MOMENTA is interpretable and generalizable, and our experiments show that it outperforms several strong rivaling approaches.", "phrases": ["multimodal framework", "harmful memes", "momenta"], "overall_score": 2.5266282000221176, "scores": [1.7316732977036273, 0.9026426307811657, 0.8154382196577424], "rank_score": 1.1499180493808452} -{"id": "christopoulou-etal-2019-connecting", "title": "Connecting the Dots: Document-level Neural Relation Extraction with Edge-oriented Graphs", "abstract": "Document-level relation extraction is a complex human process that requires logical inference to extract relationships between named entities in text. Existing approaches use graph-based neural models with words as nodes and edges as relations between them, to encode relations across sentences. These models are node-based, i.e., they form pair representations based solely on the two target node representations. However, entity relations can be better expressed through unique edge representations formed as paths between nodes. We thus propose an edge-oriented graph neural model for document-level relation extraction. The model utilises different types of nodes and edges to create a document-level graph. An inference mechanism on the graph edges enables to learn intra- and inter-sentence relations using multi-instance learning internally. Experiments on two document-level biomedical datasets for chemical-disease and gene-disease associations show the usefulness of the proposed edge-oriented approach.", "phrases": ["relation extraction", "edge", "document-level graph", "logical reasoning"], "overall_score": 3.5006712902070416, "scores": [1.4647353766872482, 1.4481717738153261, 1.1560201736189406, 0.530376982960169], "rank_score": 1.149826076770421} -{"id": "nozza-etal-2021-honest", "title": "HONEST: Measuring Hurtful Sentence Completion in Language Models", "abstract": "Language models have revolutionized the field of NLP. However, language models capture and proliferate hurtful stereotypes, especially in text generation. Our results show that 4.3% of the time, language models complete a sentence with a hurtful word. These cases are not random, but follow language and gender-specific patterns. We propose a score to measure hurtful sentence completions in language models (HONEST). It uses a systematic template- and lexicon-based bias evaluation methodology for six languages. Our findings suggest that these models replicate and amplify deep-seated societal stereotypes about gender roles. Sentence completions refer to sexual promiscuity when the target is female in 9% of the time, and in 4% to homosexuality when the target is male. The results raise questions about the use of these models in production settings.", "phrases": ["hurtful sentence completion", "language model", "honest"], "overall_score": 2.0600198550668254, "scores": [2.027533760313575, 0.8461624807725281, 0.5754606749719632], "rank_score": 1.1497189720193555} -{"id": "hsu-etal-2018-emotionlines", "title": "EmotionLines: An Emotion Corpus of Multi-Party Conversations", "abstract": "Feeling emotion is a critical characteristic to distinguish people from machines. Among all the multi-modal resources for emotion detection, textual datasets are those containing the least additional information in addition to semantics, and hence are adopted widely for testing the developed systems. However, most of the textual emotional datasets consist of emotion labels of only individual words, sentences or documents, which makes it challenging to discuss the contextual flow of emotions. In this paper, we introduce EmotionLines, the first dataset with emotions labeling on all utterances in each dialogue only based on their textual content. Dialogues in EmotionLines are collected from Friends TV scripts and private Facebook messenger dialogues. Then one of seven emotions, six Ekman's basic emotions plus the neutral emotion, is labeled on each utterance by 5 Amazon MTurkers. A total of 29,245 utterances from 2,000 dialogues are labeled in EmotionLines. We also provide several strong baselines for emotion detection models on EmotionLines in this paper.", "phrases": ["conversation", "emotion detection", "textual content", "few year"], "overall_score": 3.322903940667612, "scores": [2.3032853135162132, 1.2117671306806788, 0.5528833808749811, 0.5306476361591175], "rank_score": 1.1496458653077477} -{"id": "yao-etal-2012-unsupervised", "title": "Unsupervised Relation Discovery with Sense Disambiguation", "abstract": "To discover relation types from text, most methods cluster shallow or syntactic patterns of relation mentions, but consider only one possible sense per pattern. In practice this assumption is often violated. In this paper we overcome this issue by inducing clusters of pattern senses from feature representations of patterns. In particular, we employ a topic model to partition entity pairs associated with patterns into sense clusters using local and global features. We merge these sense clusters into semantic relations using hierarchical agglomerative clustering. We compare against several baselines: a generative latent-variable model, a clustering method that does not disambiguate between path senses, and our own approach but with only local features. Experimental results show our proposed approach discovers dramatically more accurate clusters than models without sense disambiguation, and that incorporating global features, such as the document theme, is crucial.", "phrases": ["sense disambiguation", "cluster", "probabilistic model"], "overall_score": 2.948465697936937, "scores": [2.0435727562672072, 0.8534155552336413, 0.5515776682052261], "rank_score": 1.1495219932353582} -{"id": "song-etal-2016-amr", "title": "AMR-to-text generation as a Traveling Salesman Problem", "abstract": "The task of AMR-to-text generation is to generate grammatical text that sustains the semantic meaning for a given AMR graph. We at- tack the task by first partitioning the AMR graph into smaller fragments, and then generating the translation for each fragment, before finally deciding the order by solving an asymmetric generalized traveling salesman problem (AGTSP). A Maximum Entropy classifier is trained to estimate the traveling costs, and a TSP solver is used to find the optimized solution. The final model reports a BLEU score of 22.44 on the SemEval-2016 Task8 dataset.", "phrases": ["salesman problem", "agtsp", "amr-to-text generation", "small fragment"], "overall_score": 2.525707430986959, "scores": [2.591372148776054, 0.9559301596212278, 0.5299243381047761, 0.5207693108323944], "rank_score": 1.149498989333613} -{"id": "wang-etal-2020-heterogeneous", "title": "Heterogeneous Graph Neural Networks for Extractive Document Summarization", "abstract": "As a crucial step in extractive document summarization, learning cross-sentence relations has been explored by a plethora of approaches. An intuitive way is to put them in the graph-based neural network, which has a more complex structure for capturing inter-sentence relationships. In this paper, we present a heterogeneous graph-based neural network for extractive summarization (HETERSUMGRAPH), which contains semantic nodes of different granularity levels apart from sentences. These additional nodes act as the intermediary between sentences and enrich the cross-sentence relations. Besides, our graph structure is flexible in natural extension from a single-document setting to multi-document via introducing document nodes. To our knowledge, we are the first one to introduce different types of nodes into graph-based neural networks for extractive document summarization and perform a comprehensive qualitative analysis to investigate their benefits. The code will be released on Github.", "phrases": ["extractive document summarization", "graph-based neural network", "inter-sentence relationship", "heterogeneous graph", "multiple document"], "overall_score": 3.499050777386398, "scores": [2.025933108645014, 1.4765986656755432, 1.1627786950970618, 0.5582936354481288, 0.5228649210003473], "rank_score": 1.149293805173219} -{"id": "chollampatt-etal-2016-adapting", "title": "Adapting Grammatical Error Correction Based on the Native Language of Writers with Neural Network Joint Models", "abstract": "An important aspect for the task of grammatical error correction (GEC) that has not yet been adequately explored is adaptation based on the native language (L1) of writers, despite the marked in\ufb02uences of L1 on second language (L2) writing. In this paper, we adapt a neural network joint model (NNJM) using L1-speci\ufb01c learner text and integrate it into a statistical machine translation (SMT) based GEC system. Speci\ufb01cally, we train an NNJM on general learner text (not L1-speci\ufb01c) and subsequently train on L1-speci\ufb01c data using a Kullback-Leibler divergence regularized ob-jective function in order to preserve generalization of the model. We incorporate this adapted NNJM as a feature in an SMT-based English GEC system and show that adaptation achieves signi\ufb01cant F 0 . 5 score gains on English texts written by L1 Chinese, Russian, and Spanish writers.", "phrases": ["grammatical error correction", "native language", "neural language model"], "overall_score": 2.5247295562354752, "scores": [1.6879692994208542, 0.8670639023255409, 0.8921286163181508], "rank_score": 1.1490539393548487} -{"id": "xu-etal-2018-double", "title": "Double Embeddings and CNN-based Sequence Labeling for Aspect Extraction", "abstract": "One key task of fine-grained sentiment analysis of product reviews is to extract product aspects or features that users have expressed opinions on. This paper focuses on supervised aspect extraction using deep learning. Unlike other highly sophisticated supervised deep learning models, this paper proposes a novel and yet simple CNN model employing two types of pre-trained embeddings for aspect extraction: general-purpose embeddings and domain-specific embeddings. Without using any additional supervision, this model achieves surprisingly good results, outperforming state-of-the-art sophisticated existing methods. To our knowledge, this paper is the first to report such double embeddings based CNN model for aspect extraction and achieve very good results.", "phrases": ["aspect extraction", "sentiment analysis", "cnn", "convolutional neural network", "sequence tagging problem"], "overall_score": 3.9080871710530984, "scores": [2.3980071737826902, 1.2564184821174067, 1.0415281277077544, 0.5259162249246846, 0.5232937272210595], "rank_score": 1.149032747150719} -{"id": "wiseman-etal-2018-learning", "title": "Learning Neural Templates for Text Generation", "abstract": "While neural, encoder-decoder models have had significant empirical success in text generation, there remain several unaddressed problems with this style of generation. Encoder-decoder models are largely (a) uninterpretable, and (b) difficult to control in terms of their phrasing or content. This work proposes a neural generation system using a hidden semi-markov model (HSMM) decoder, which learns latent, discrete templates jointly with learning to generate. We show that this model learns useful templates, and that these templates make generation both more interpretable and controllable. Furthermore, we show that this approach scales to real data sets and achieves strong performance nearing that of encoder-decoder text generation models.", "phrases": ["text generation", "control", "model decoder"], "overall_score": 4.1793274746093445, "scores": [2.375794028257763, 0.5363521759919367, 0.5346401736628231], "rank_score": 1.1489287926375076} -{"id": "dione-2012-morphological", "title": "A Morphological Analyzer For Wolof Using Finite-State Techniques", "abstract": "This paper reports on the design and implementation of a morphological analyzer for Wolof. The main motivation for this work is to obtain a linguistically motivated tool using finite-state techniques. The finite-state technology is especially attractive in dealing with human language morphologies. Finite-state transducers (FST) are fast, efficient and can be fully reversible, enabling users to perform analysis as well as generation. Hence, I use this approach to construct a new FST tool for Wolof, as a first step towards a computational grammar for the language in the Lexical Functional Grammar framework. This article focuses on the methods used to model complex morphological issues and on developing strategies to limit ambiguities. It discusses experimental evaluations conducted to assess the performance of the analyzer with respect to various statistical criteria. In particular, I also wanted to create morphosyntactically annotated resources for Wolof, obtained by automatically analyzing text corpora with a computational morphology.", "phrases": ["morphological analyzer", "wolof", "finite-state technique"], "overall_score": 1.2621946287486923, "scores": [1.8374271040413321, 0.8222440052573103, 0.7870260788756309], "rank_score": 1.1488990627247577} -{"id": "lee-yeung-2016-annotated", "title": "An Annotated Corpus of Direct Speech", "abstract": "We propose a scheme for annotating direct speech in literary texts, based on the Text Encoding Initiative (TEI) and the coreference annotation guidelines from the Message Understanding Conference (MUC). The scheme encodes the speakers and listeners of utterances in a text, as well as the quotative verbs that reports the utterances. We measure inter-annotator agreement on this annotation task. We then present statistics on a manually annotated corpus that consists of books from the New Testament. Finally, we visualize the corpus as a conversational network.", "phrases": ["annotated corpus", "direct speech", "new testament"], "overall_score": 1.2620386662778502, "scores": [1.9587760666579226, 0.9307390852416217, 0.5567561467982027], "rank_score": 1.1487570995659155} -{"id": "kiperwasser-ballesteros-2018-scheduled", "title": "Scheduled Multi-Task Learning: From Syntax to Translation", "abstract": "Neural encoder-decoder models of machine translation have achieved impressive results, while learning linguistic knowledge of both the source and target languages in an implicit end-to-end manner. We propose a framework in which our model begins learning syntax and translation interleaved, gradually putting more focus on translation. Using this approach, we achieve considerable improvements in terms of BLEU score on relatively large parallel corpus (WMT14 English to German) and a low-resource (WIT German to English) setup.", "phrases": ["multi-task learning", "syntax", "part-of-speech tagging"], "overall_score": 2.6450171098728625, "scores": [1.6747760373983425, 0.9306564143438816, 0.8407165543301894], "rank_score": 1.1487163353574712} -{"id": "suhr-etal-2019-executing", "title": "Executing Instructions in Situated Collaborative Interactions", "abstract": "We study a collaborative scenario where a user not only instructs a system to complete tasks, but also acts alongside it. This allows the user to adapt to the system abilities by changing their language or deciding to simply accomplish some tasks themselves, and requires the system to effectively recover from errors as the user strategically assigns it new goals. We build a game environment to study this scenario, and learn to map user instructions to system actions. We introduce a learning approach focused on recovery from cascading errors between instructions, and modeling methods to explicitly reason about instructions with multiple goals. We evaluate with a new evaluation protocol using recorded interactions and online games with human users, and observe how users adapt to the system abilities.", "phrases": ["instruction", "collaborative scenario", "environment"], "overall_score": 2.2350646174409436, "scores": [2.3686511965893478, 0.5468634026092551, 0.5302734067779429], "rank_score": 1.148596001992182} -{"id": "chieu-ng-2003-named", "title": "Named Entity Recognition with a Maximum Entropy Approach", "abstract": "The named entity recognition (NER) task involves identifying noun phrases that are names, and assigning a class to each name. This task has its origin from the Message Understanding Conferences (MUC) in the 1990s, a series of conferences aimed at evaluating systems that extract information from natural language texts. It became evident that in order to achieve good performance in information extraction, a system needs to be able to recognize names. A separate subtask on NER was created in MUC-6 and MUC-7 (Chinchor, 1998).", "phrases": ["entity recognition", "global feature", "same token", "gazetteer"], "overall_score": 3.3803304926761677, "scores": [2.6087256657759257, 0.882648140621761, 0.5724950166103522, 0.5282867850298992], "rank_score": 1.1480389020094846} -{"id": "klementiev-etal-2012-inducing", "title": "Inducing Crosslingual Distributed Representations of Words", "abstract": "Distributed representations of words have proven extremely useful in numerous natural language processing tasks. Their appeal is that they can help alleviate data sparsity problems common to supervised learning. Methods for inducing these representations require only unlabeled language data, which are plentiful for many natural languages. In this work, we induce distributed representations for a pair of languages jointly. We treat it as a multitask learning problem where each task corresponds to a single word, and task relatedness is derived from co-occurrence statistics in bilingual parallel data. These representations can be used for a number of crosslingual learning tasks, where a learner can be trained on annotations present in one language and applied to test data in another. We show that our representations are informative by using them for crosslingual document classification, where classifiers trained on these representations substantially outperform strong baselines (e.g. machine translation) when applied to a new language.", "phrases": ["bilingual parallel data", "new language", "word embedding", "cross-lingual representation", "mapping"], "overall_score": 4.660401120337505, "scores": [2.467298586563752, 0.9593551351265623, 0.8873119932559411, 0.847771300178348, 0.5770470390202034], "rank_score": 1.1477568108289613} -{"id": "yan-etal-2021-partition", "title": "A Partition Filter Network for Joint Entity and Relation Extraction", "abstract": "In joint entity and relation extraction, existing work either sequentially encode task-specific features, leading to an imbalance in inter-task feature interaction where features extracted later have no direct contact with those that come first. Or they encode entity features and relation features in a parallel manner, meaning that feature representation learning for each task is largely independent of each other except for input sharing. We propose a partition filter network to model two-way interaction between tasks properly, where feature encoding is decomposed into two steps: partition and filter. In our encoder, we leverage two gates: entity and relation gate, to segment neurons into two task partitions and one shared partition. The shared partition represents inter-task information valuable to both tasks and is evenly shared across two tasks to ensure proper two-way interaction. The task partitions represent intra-task information and are formed through concerted efforts of both gates, making sure that encoding of task-specific features is dependent upon each other. Experiment results on six public datasets show that our model performs significantly better than previous approaches. In addition, contrary to what previous work has claimed, our auxiliary experiments suggest that relation prediction is contributory to named entity prediction in a non-negligible way. The source code can be found at .", "phrases": ["partition filter network", "joint entity", "relation extraction"], "overall_score": 1.5910977415111838, "scores": [1.7196403837242713, 0.903960345397844, 0.819602502749607], "rank_score": 1.1477344106239074} -{"id": "zhang-nivre-2011-transition", "title": "Transition-based Dependency Parsing with Rich Non-local Features", "abstract": "Transition-based dependency parsers generally use heuristic decoding algorithms but can accommodate arbitrarily rich feature representations. In this paper, we show that we can improve the accuracy of such parsers by considering even richer feature sets than those employed in previous systems. In the standard Penn Treebank setup, our novel features improve attachment score form 91.4% to 92.9%, giving the best results so far for transition-based parsing and rivaling the best results overall. For the Chinese Treebank, they give a signficant improvement of the state of the art. An open source release of our parser is freely available.", "phrases": ["dependency parsing", "non-local feature", "transition-based model", "graph-based model", "linear time"], "overall_score": 4.144198312379142, "scores": [2.95584965076812, 1.1227616012229316, 0.5817268893948763, 0.5439894630847172, 0.5341001480234621], "rank_score": 1.1476855504988215} -{"id": "lai-nguyen-2019-extending", "title": "Extending Event Detection to New Types with Learning from Keywords", "abstract": "Traditional event detection classifies a word or a phrase in a given sentence for a set of prede- fined event types. The limitation of such pre- defined set is that it prevents the adaptation of the event detection models to new event types. We study a novel formulation of event detec- tion that describes types via several keywords to match the contexts in documents. This fa- cilitates the operation of the models to new types. We introduce a novel feature-based attention mechanism for convolutional neural networks for event detection in the new for- mulation. Our extensive experiments demon- strate the benefits of the new formulation for new type extension for event detection as well as the proposed attention mechanism for this problem", "phrases": ["event detection", "new type", "keyword"], "overall_score": 2.0562561633136927, "scores": [2.0240634552969228, 0.8609379108660083, 0.557853880807676], "rank_score": 1.147618415656869} -{"id": "choi-etal-2018-ultra", "title": "Ultra-Fine Entity Typing", "abstract": "We introduce a new entity typing task: given a sentence with an entity mention, the goal is to predict a set of free-form phrases (e.g. skyscraper, songwriter, or criminal) that describe appropriate types for the target entity. This formulation allows us to use a new type of distant supervision at large scale: head words, which indicate the type of the noun phrases they appear in. We show that these ultra-fine types can be crowd-sourced, and introduce new evaluation sets that are much more diverse and fine-grained than existing benchmarks. We present a model that can predict ultra-fine types, and is trained using a multitask objective that pools our new head-word supervision with prior supervision from entity linking. Experimental results demonstrate that our model is effective in predicting entity types at varying granularity; it achieves state of the art performance on an existing fine-grained entity typing benchmark, and sets baselines for our newly-introduced datasets.", "phrases": ["entity type", "noun phrase", "granularity", "ultra-fine entity typing", "large type set"], "overall_score": 4.046064606629615, "scores": [2.4141568686511503, 1.3661397341104238, 0.8898383695456131, 0.544291680784352, 0.5224578463599506], "rank_score": 1.1473768998902978} -{"id": "poon-domingos-2009-unsupervised", "title": "Unsupervised Semantic Parsing", "abstract": "We present the first unsupervised approach to the problem of learning a semantic parser, using Markov logic. Our USP system transforms dependency trees into quasi-logical forms, recursively induces lambda forms from these, and clusters them to abstract away syntactic variations of the same meaning. The MAP semantic parse of a sentence is obtained by recursively assigning its parts to lambda-form clusters and composing them. We evaluate our approach by using it to extract a knowledge base from biomedical abstracts and answer questions. USP substantially outperforms TextRunner, DIRT and an informed baseline on both precision and recall on this task.", "phrases": ["usp", "cluster", "unsupervised semantic parsing", "formalism", "negation"], "overall_score": 3.823024121303952, "scores": [2.6876395242336426, 1.3008897958850851, 0.5868191853328374, 0.5827016705849218, 0.5784286469618968], "rank_score": 1.1472957645996769} -{"id": "irvine-callison-burch-2017-comprehensive", "title": "A Comprehensive Analysis of Bilingual Lexicon Induction", "abstract": "Bilingual lexicon induction is the task of inducing word translations from monolingual corpora in two languages. In this article we present the most comprehensive analysis of bilingual lexicon induction to date. We present experiments on a wide range of languages and data sizes. We examine translation into English from 25 foreign languages: Albanian, Azeri, Bengali, Bosnian, Bulgarian, Cebuano, Gujarati, Hindi, Hungarian, Indonesian, Latvian, Nepali, Romanian, Serbian, Slovak, Somali, Spanish, Swedish, Tamil, Telugu, Turkish, Ukrainian, Uzbek, Vietnamese, and Welsh. We analyze the behavior of bilingual lexicon induction on low-frequency words, rather than testing solely on high-frequency words, as previous research has done. Low-frequency words are more relevant to statistical machine translation, where systems typically lack translations of rare words that fall outside of their training data. We systematically explore a wide range of features and phenomena that affect the quality of the translations discovered by bilingual lexicon induction. We provide illustrative examples of the highest ranking translations for orthogonal signals of translation equivalence like contextual similarity and temporal similarity. We analyze the effects of frequency and burstiness, and the sizes of the seed bilingual dictionaries and the monolingual training corpora. Additionally, we introduce a novel discriminative approach to bilingual lexicon induction. Our discriminative model is capable of combining a wide variety of features that individually provide only weak indications of translation equivalence. When feature weights are discriminatively set, these signals produce dramatically higher translation quality than previous approaches that combined signals in an unsupervised fashion (e.g., using minimum reciprocal rank). We also directly compare our model's performance against a sophisticated generative approach, the matching canonical correlation analysis (MCCA) algorithm used by Haghighi et al. (2008). Our algorithm achieves an accuracy of 42% versus MCCA's 15%.", "phrases": ["comprehensive analysis", "bilingual lexicon induction", "bli"], "overall_score": 2.2323638664188374, "scores": [2.1266641533403825, 0.7888087708129572, 0.5261513474029651], "rank_score": 1.1472080905187683} -{"id": "zhu-etal-2021-enhancing", "title": "Enhancing Factual Consistency of Abstractive Summarization", "abstract": "Automatic abstractive summaries are found to often distort or fabricate facts in the article. This inconsistency between summary and original text has seriously impacted its applicability. We propose a fact-aware summarization model FASum to extract and integrate factual relations into the summary generation process via graph attention. We then design a factual corrector model FC to automatically correct factual errors from summaries generated by existing systems. Empirical results show that the fact-aware summarization can produce abstractive summaries with higher factual consistency compared with existing systems, and the correction model improves the factual consistency of given summaries via modifying only a few keywords.", "phrases": ["factual consistency", "graph attention", "knowledge graph"], "overall_score": 2.5205809073003356, "scores": [2.3868159883272875, 0.5337080293375791, 0.5209734059022777], "rank_score": 1.1471658078557148} -{"id": "zhang-mcdonald-2012-generalized", "title": "Generalized Higher-Order Dependency Parsing with Cube Pruning", "abstract": "State-of-the-art graph-based parsers use features over higher-order dependencies that rely on decoding algorithms that are slow and difficult to generalize. On the other hand, transition-based dependency parsers can easily utilize such features without increasing the linear complexity of the shift-reduce system beyond a constant. In this paper, we attempt to address this imbalance for graph-based parsing by generalizing the Eisner (1996) algorithm to handle arbitrary features over higher-order dependencies. The generalization is at the cost of asymptotic efficiency. To account for this, cube pruning for decoding is utilized (Chiang, 2007). For the first time, label tuple and structural features such as valencies can be scored efficiently with third-order features in a graph-based parser. Our parser achieves the state-of-art unlabeled accuracy of 93.06% and labeled accuracy of 91.86% on the standard test set for English, at a faster speed than a reimplementation of the third-order model of Koo et al. (2010).", "phrases": ["cube pruning", "dependency parser", "higher-order feature"], "overall_score": 2.7502043580670725, "scores": [1.8799965543446633, 0.9711135469051899, 0.5896627998004332], "rank_score": 1.1469243003500955} -{"id": "chernodub-etal-2019-targer", "title": "TARGER: Neural Argument Mining at Your Fingertips", "abstract": "We present TARGER, an open source neural argument mining framework for tagging arguments in free input texts and for keyword-based retrieval of arguments from an argument-tagged web-scale corpus. The currently available models are pre-trained on three recent argument mining datasets and enable the use of neural argument mining without any reproducibility effort on the user's side. The open source code ensures portability to other domains and use cases.", "phrases": ["neural argument mining", "input text", "retrieval", "targer"], "overall_score": 2.231591413999871, "scores": [2.1902025314025573, 0.9875172594985594, 0.8516708422956143, 0.557853880807676], "rank_score": 1.1468111285011018} -{"id": "kuhlmann-nivre-2006-mildly", "title": "Mildly Non-Projective Dependency Structures", "abstract": "Syntactic parsing requires a fine balance between expressivity and complexity, so that naturally occurring structures can be accurately parsed without compromising efficiency. In dependency-based parsing, several constraints have been proposed that restrict the class of permissible structures, such as projectivity, planarity, multi-planarity, well-nestedness, gap degree, and edge degree. While projectivity is generally taken to be too restrictive for natural language syntax, it is not clear which of the other proposals strikes the best balance between expressivity and complexity. In this paper, we review and compare the different constraints theoretically, and provide an experimental evaluation using data from two treebanks, investigating how large a proportion of the structures found in the treebanks are permitted under different constraints. The results indicate that a combination of the well-nestedness constraint and a parametric constraint on discontinuity gives a very good fit with the linguistic data.", "phrases": ["dependency structure", "planarity", "gap degree", "edge degree", "non-projective structure"], "overall_score": 3.2487820717409073, "scores": [1.2703053053069013, 1.2681684314097874, 1.2558839739423904, 1.0746378842248727, 0.864392041731043], "rank_score": 1.146677527322999} -{"id": "ringger-etal-2008-assessing", "title": "Assessing the Costs of Machine-Assisted Corpus Annotation through a User Study", "abstract": "Fixed, limited budgets often constrain the amount of expert annotation that can go into the construction of annotated corpora. Estimating the cost of annotation is the first step toward using annotation resources wisely. We present here a study of the cost of annotation. This study includes the participation of annotators at various skill levels and with varying backgrounds. Conducted over the web, the study consists of tests that simulate machine-assisted pre-annotation, requiring correction by the annotator rather than annotation from scratch. The study also includes tests representative of an annotation scenario involving Active Learning as it progresses from a na\u00efve model to a knowledgeable model; in particular, annotators encounter pre-annotation of varying degrees of accuracy. The annotation interface lists tags considered likely by the annotation model in preference to other tags. We present the experimental parameters of the study and report both descriptive and inferential statistics on the results of the study. We conclude with a model for estimating the hourly cost of annotation for annotators of various skill levels. We also present models for two granularities of annotation: sentence at a time and word at a time.", "phrases": ["cost", "corpus annotation", "user study", "background", "active learning"], "overall_score": 2.7494803053484653, "scores": [2.534128814059038, 0.9361899109399971, 0.8581128874934594, 0.8578730728405086, 0.5468070492336842], "rank_score": 1.1466223469133374} -{"id": "frermann-lapata-2016-bayesian", "title": "A Bayesian Model of Diachronic Meaning Change", "abstract": "Word meanings change over time and an automated procedure for extracting this information from text would be useful for historical exploratory studies, information retrieval or question answering. We present a dynamic Bayesian model of diachronic meaning change, which infers temporal word representations as a set of senses and their prevalence. Unlike previous work, we explicitly model language change as a smooth, gradual process. We experimentally show that this modeling decision is beneficial: our model performs competitively on meaning change detection tasks whilst inducing discernible word senses and their development over time. Application of our model to the SemEval-2015 temporal classification benchmark datasets further reveals that it performs on par with highly optimized task-specific systems.", "phrases": ["diachronic meaning change", "gradual process", "word sense"], "overall_score": 3.313302846908889, "scores": [2.026050976956396, 0.8733290790295688, 0.5395922885133119], "rank_score": 1.146324114833092} -{"id": "okuma-etal-2009-bypassed", "title": "Bypassed alignment graph for learning coordination in Japanese sentences", "abstract": "Past work on English coordination has focused on coordination scope disambiguation. In Japanese, detecting whether coordination exists in a sentence is also a problem, and the state-of-the-art alignment-based method specialized for scope disambiguation does not perform well on Japanese sentences. To take the detection of coordination into account, this paper introduces a 'bypass' to the alignment graph used by this method, so as to explicitly represent the non-existence of coordinate structures in a sentence. We also present an effective feature decomposition scheme based on the distance between words in conjuncts.", "phrases": ["alignment graph", "coordination", "japanese sentence"], "overall_score": 1.588852986907148, "scores": [1.725388559340766, 0.8927461062887982, 0.8202108217392752], "rank_score": 1.1461151624562798} -{"id": "vashishtha-etal-2020-temporal", "title": "Temporal Reasoning in Natural Language Inference", "abstract": "We introduce five new natural language inference (NLI) datasets focused on temporal reasoning. We recast four existing datasets annotated for event duration\u2014how long an event lasts\u2014and event ordering\u2014how events are temporally arranged\u2014into more than one million NLI examples. We use these datasets to investigate how well neural models trained on a popular NLI corpus capture these forms of temporal reasoning.", "phrases": ["natural language inference", "nli", "temporal reasoning", "duration"], "overall_score": 1.8445148385679442, "scores": [2.5144846433129673, 0.9737303689818023, 0.5527717483084175, 0.5432592654601998], "rank_score": 1.1460615065158466} -{"id": "lee-etal-2020-postech", "title": "POSTECH-ETRI's Submission to the WMT2020 APE Shared Task: Automatic Post-Editing with Cross-lingual Language Model", "abstract": "This paper describes POSTECH-ETRI's submission to WMT2020 for the shared task on automatic post-editing (APE) for 2 language pairs: English-German (En-De) and English-Chinese (En-Zh). We propose APE systems based on a cross-lingual language model, which jointly adopts translation language modeling (TLM) and masked language modeling (MLM) training objectives in the pre-training stage; the APE models then utilize jointly learned language representations between the source language and the target language. In addition, we created 19 million new sythetic triplets as additional training data for our final ensemble model. According to experimental results on the WMT2020 APE development data set, our models showed an improvement over the baseline by TER of -3.58 and a BLEU score of +5.3 for the En-De subtask; and TER of -5.29 and a BLEU score of +7.32 for the En-Zh subtask.", "phrases": ["automatic post-editing", "language model", "postech-etri"], "overall_score": 1.2588549255066495, "scores": [1.7973730944676725, 0.8520536188610414, 0.7881506881569582], "rank_score": 1.1458591338285575} -{"id": "luan-etal-2018-multi", "title": "Multi-Task Identification of Entities, Relations, and Coreference for Scientific Knowledge Graph Construction", "abstract": "We introduce a multi-task setup of identifying entities, relations, and coreference clusters in scientific articles. We create SciERC, a dataset that includes annotations for all three tasks and develop a unified framework called SciIE with shared span representations. The multi-task setup reduces cascading errors between tasks and leverages cross-sentence relations through coreference links. Experiments show that our multi-task model outperforms previous models in scientific information extraction without using any domain-specific features. We further show that the framework supports construction of a scientific knowledge graph, which we use to analyze information in scientific literature.", "phrases": ["coreference", "knowledge graph", "scientific article", "information extraction", "multi-task learning"], "overall_score": 4.073373576643679, "scores": [1.3718931445022733, 1.253749326491374, 1.236733075655515, 0.9551896690023404, 0.9109506795506855], "rank_score": 1.1457031790404377} -{"id": "feng-hirst-2011-classifying", "title": "Classifying arguments by scheme", "abstract": "Argumentation schemes are structures or templates for various kinds of arguments. Given the text of an argument with premises and conclusion identified, we classify it as an instance of one of five common schemes, using features specific to each scheme. We achieve accuracies of 63--91% in one-against-others classification and 80--94% in pairwise classification (baseline = 50% in both cases).", "phrases": ["scheme", "argumentation scheme", "one-against-other classification", "monologic text", "araucaria corpus"], "overall_score": 3.5923313191554658, "scores": [1.8908925629783495, 1.8357070801793771, 0.8881487273019594, 0.5632031547486075, 0.5505414519202583], "rank_score": 1.1456985954257104} -{"id": "liu-etal-2021-fast", "title": "Fast, Effective, and Self-Supervised: Transforming Masked Language Models into Universal Lexical and Sentence Encoders", "abstract": "Previous work has indicated that pretrained Masked Language Models (MLMs) are not effective as universal lexical and sentence encoders off-the-shelf, i.e., without further task-specific fine-tuning on NLI, sentence similarity, or paraphrasing tasks using annotated task data. In this work, we demonstrate that it is possible to turn MLMs into effective lexical and sentence encoders even without any additional data, relying simply on self-supervision. We propose an extremely simple, fast, and effective contrastive learning technique, termed Mirror-BERT, which converts MLMs (e.g., BERT and RoBERTa) into such encoders in 20-30 seconds with no access to additional external knowledge. Mirror-BERT relies on identical and slightly modified string pairs as positive (i.e., synonymous) fine-tuning examples, and aims to maximise their similarity during \u201cidentity fine-tuning\u201d. We report huge gains over off-the-shelf MLMs with Mirror-BERT both in lexical-level and in sentence-level tasks, across different domains and different languages. Notably, in sentence similarity (STS) and question-answer entailment (QNLI) tasks, our self-supervised Mirror-BERT model even matches the performance of the Sentence-BERT models from prior work which rely on annotated task data. Finally, we delve deeper into the inner workings of MLMs, and suggest some evidence on why this simple Mirror-BERT fine-tuning approach can yield effective universal lexical and sentence encoders.", "phrases": ["masked language models", "sentence encoder", "fine-tuning", "contrastive learning technique"], "overall_score": 2.5169190783537543, "scores": [1.9077629234215925, 0.9173525970264382, 1.2259958622348581, 0.5308855680432182], "rank_score": 1.1454992376815267} -{"id": "sinha-etal-2019-clutrr", "title": "CLUTRR: A Diagnostic Benchmark for Inductive Reasoning from Text", "abstract": "The recent success of natural language understanding (NLU) systems has been troubled by results highlighting the failure of these models to generalize in a systematic and robust way. In this work, we introduce a diagnostic benchmark suite, named CLUTRR, to clarify some key issues related to the robustness and systematicity of NLU systems. Motivated by the classic work on inductive logic programming, CLUTRR requires that an NLU system infer kinship relations between characters in short stories. Successful performance on this task requires both extracting relationships between entities, as well as inferring the logical rules governing these relationships. CLUTRR allows us to precisely measure a model's ability for systematic generalization by evaluating on held-out combinations of logical rules, and allows us to evaluate a model's robustness by adding curated noise facts. Our empirical results highlight a substantial performance gap between state-of-the-art NLU models (e.g., BERT and MAC) and a graph neural network model that works directly with symbolic inputs\u2014with the graph-based model exhibiting both stronger generalization and greater robustness.", "phrases": ["generalization", "story", "clutrr", "synthetic dataset"], "overall_score": 2.381943334626033, "scores": [2.6349370202094025, 0.8801684532798753, 0.5423486461413138, 0.5244363290940738], "rank_score": 1.1454726121811662} -{"id": "regneri-etal-2013-grounding", "title": "Grounding Action Descriptions in Videos", "abstract": "Recent work has shown that the integration of visual information into text-based models can substantially improve model predictions, but so far only visual information extracted from static images has been used. In this paper, we consider the problem of grounding sentences describing actions in visual information extracted from videos. We present a general purpose corpus that aligns high quality videos with multiple natural language descriptions of the actions portrayed in the videos, together with an annotation of how similar the action descriptions are to each other. Experimental results demonstrate that a text-based model of similarity between actions improves substantially when combined with visual information from videos depicting the described actions.", "phrases": ["action", "video", "visual information", "language description"], "overall_score": 3.1015679965211764, "scores": [1.6313397353441492, 1.3288865753860608, 1.072164450288887, 0.5488655174041017], "rank_score": 1.1453140696057997} -{"id": "luu-etal-2014-taxonomy", "title": "Taxonomy Construction Using Syntactic Contextual Evidence", "abstract": "Taxonomies are the backbone of many structured, semantic knowledge resources. Recent works for extracting taxonomic relations from text focused on collecting lexical-syntactic patterns to extract the taxonomic relations by matching the patterns to text. These approaches, however, often show low coverage due to the lack of contextual analysis across sentences. To address this issue, we propose a novel approach that collectively utilizes contextual information of terms in syntactic structures such that if the set of contexts of a term includes most of contexts of another term, a subsumption relation between the two terms is inferred. We apply this method to the task of taxonomy construction from scratch, where we introduce another novel graph-based algorithm for taxonomic structure induction. Our experiment results show that the proposed method is well complementary with previous methods of linguistic pattern matching and significantly improves recall and thus F-measure.", "phrases": ["contextual information", "recall", "taxonomy construction"], "overall_score": 2.7458622188813484, "scores": [2.0148149837267693, 0.8384312187372926, 0.582094260520666], "rank_score": 1.145113487661576} -{"id": "lee-etal-2019-latent", "title": "Latent Retrieval for Weakly Supervised Open Domain Question Answering", "abstract": "Recent work on open domain question answering (QA) assumes strong supervision of the supporting evidence and/or assumes a blackbox information retrieval (IR) system to retrieve evidence candidates. We argue that both are suboptimal, since gold evidence is not always available, and QA is fundamentally different from IR. We show for the first time that it is possible to jointly learn the retriever and reader from question-answer string pairs and without any IR system. In this setting, evidence retrieval from all of Wikipedia is treated as a latent variable. Since this is impractical to learn from scratch, we pre-train the retriever with an Inverse Cloze Task. We evaluate on open versions of five QA datasets. On datasets where the questioner already knows the answer, a traditional IR system such as BM25 is sufficient. On datasets where a user is genuinely seeking an answer, we show that learned retrieval is crucial, outperforming BM25 by up to 19 points in exact match.", "phrases": ["retriever", "domain question", "inverse cloze task", "self-supervised task", "sentence encoder"], "overall_score": 4.958908898382524, "scores": [2.058292869914322, 1.6303380249529913, 0.9568706675698261, 0.5548331911438211, 0.5249183487662049], "rank_score": 1.145050620469433} -{"id": "purandare-litman-2006-humor", "title": "Humor: Prosody Analysis and Automatic Recognition for F*R*I*E*N*D*S*", "abstract": "We analyze humorous spoken conversations from a classic comedy television show, FRIENDS, by examining acoustic-prosodic and linguistic features and their utility in automatic humor recognition. Using a simple annotation scheme, we automatically label speaker turns in our corpus that are followed by laughs as humorous and the rest as non-humorous. Our humor-prosody analysis reveals significant differences in prosodic characteristics (such as pitch, tempo, energy etc.) of humorous and non-humorous speech, even when accounted for the gender and speaker differences. Humor recognition was carried out using standard supervised learning classifiers, and shows promising results significantly above the baseline.", "phrases": ["conversation", "comedy television show", "characteristic", "humor"], "overall_score": 2.8452939596163502, "scores": [2.138862638131289, 1.342792104920692, 0.5524469314178462, 0.5460204156040128], "rank_score": 1.14503052251846} -{"id": "weeds-etal-2004-characterising", "title": "Characterising Measures of Lexical Distributional Similarity", "abstract": "This work investigates the variation in a word's distributionally nearest neighbours with respect to the similarity measure used. We identify one type of variation as being the relative frequency of the neighbour words with respect to the frequency of the target word. We then demonstrate a three-way connection between relative frequency of similar words, a concept of distributional gnerality and the semantic relation of hyponymy. Finally, we consider the impact that this has on one application of distributional similarity methods (judging the compositionality of collocations).", "phrases": ["similarity measure", "target word", "distributional inclusion hypothesis", "entailment", "generality"], "overall_score": 4.037634644032837, "scores": [1.7001967184210596, 1.386804010037814, 1.1472565626809867, 0.902245122519234, 0.5884293053861753], "rank_score": 1.144986343809054} -{"id": "swanson-etal-2015-argument", "title": "Argument Mining: Extracting Arguments from Online Dialogue", "abstract": "Online forums are now one of the primary venues for public dialogue on current social and political issues. The related corpora are often huge, covering any topic imaginable. Our aim is to use these dialogue corpora to automatically discover the semantic aspects of arguments that conversants are making across multiple dialogues on a topic. We frame this goal as consisting of two tasks: argument extraction and argument facet similarity. We focus here on the argument extraction task, and show that we can train regressors to predict the quality of extracted arguments with RRSE values as low as .73 for some topics. A secondary goal is to develop regressors that are topic independent: we report results of cross-domain training and domain-adaptation with RRSE values for several topics as low as .72, when trained on topic independent features.", "phrases": ["regressor", "argument mining", "online forum dialogue"], "overall_score": 3.1733017111469013, "scores": [2.3151905180219123, 0.5940039101998079, 0.5243855532153519], "rank_score": 1.144526660479024} -{"id": "elliott-etal-2017-findings", "title": "Findings of the Second Shared Task on Multimodal Machine Translation and Multilingual Image Description", "abstract": "We present the results from the second shared task on multimodal machine translation and multilingual image description. Nine teams submitted 19 systems to two tasks. The multimodal translation task, in which the source sentence is supplemented by an image, was extended with a new language (French) and two new test sets. The multilingual image description task was changed such that at test time, only the image is given. Compared to last year, multimodal systems improved, but text-only systems remain competitive.", "phrases": ["multimodal machine translation", "multilingual image description", "edition"], "overall_score": 3.428661572907285, "scores": [1.9542318503597946, 0.9298889507774543, 0.549425250198996], "rank_score": 1.1445153504454149} -{"id": "lusetti-etal-2018-encoder", "title": "Encoder-Decoder Methods for Text Normalization", "abstract": "Text normalization is the task of mapping non-canonical language, typical of speech transcription and computer-mediated communication, to a standardized writing. It is an up-stream task necessary to enable the subsequent direct employment of standard natural language processing tools and indispensable for languages such as Swiss German, with strong regional variation and no written standard. Text normalization has been addressed with a variety of methods, most successfully with character-level statistical machine translation (CSMT). In the meantime, machine translation has changed and the new methods, known as neural encoder-decoder (ED) models, resulted in remarkable improvements. Text normalization, however, has not yet followed. A number of neural methods have been tried, but CSMT remains the state-of-the-art. In this work, we normalize Swiss German WhatsApp messages using the ED framework. We exploit the flexibility of this framework, which allows us to learn from the same training data in different ways. In particular, we modify the decoding stage of a plain ED model to include target-side language models operating at different levels of granularity: characters and words. Our systematic comparison shows that our approach results in an improvement over the CSMT state-of-the-art.", "phrases": ["text normalization", "non-canonical language", "speech transcription", "german whatsapp message", "encoder-decoder"], "overall_score": 2.3797707785254216, "scores": [3.056775271613543, 0.8892936869822401, 0.6160444425259554, 0.5947371238041029, 0.5652886427926491], "rank_score": 1.144427833543698} -{"id": "munteanu-marcu-2006-extracting", "title": "Extracting Parallel Sub-Sentential Fragments from Non-Parallel Corpora", "abstract": "We present a novel method for extracting parallel sub-sentential fragments from comparable, non-parallel bilingual corpora. By analyzing potentially similar sentence pairs using a signal processing-inspired approach, we detect which segments of the source sentence are translated into segments in the target sentence, and which are not. This method enables us to extract useful machine translation training data even from very non-parallel corpora, which contain no parallel sentence pairs. We evaluate the quality of the extracted data by showing that it improves the performance of a state-of-the-art statistical machine translation system.", "phrases": ["parallel sub-sentential fragment", "fragment", "non-parallel corpora", "parallel data", "word translation probability"], "overall_score": 3.966217745895209, "scores": [0.9146634379067692, 1.9351588191173241, 1.284364408074809, 1.045120614085614, 0.5427353939043051], "rank_score": 1.1444085346177642} -{"id": "lapata-2006-automatic", "title": "Automatic Evaluation of Information Ordering: Kendall's Tau", "abstract": "This article considers the automatic evaluation of information ordering, a task underlying many text-based applications such as concept-to-text generation and multidocument summarization. We propose an evaluation method based on Kendall's , a metric of rank correlation. The method is inexpensive, robust, and representation independent. We show that Kendall's correlates reliably with human ratings and reading times.", "phrases": ["information ordering", "reading time", "automatic evaluation"], "overall_score": 2.226515394230634, "scores": [2.023922406217636, 0.8623784951114147, 0.5463068097385129], "rank_score": 1.1442025703558547} -{"id": "shinyama-sekine-2003-paraphrase", "title": "Paraphrase Acquisition for Information Extraction", "abstract": "We are trying to find paraphrases from Japanese news articles which can be used for Information Extraction. We focused on the fact that a single event can be reported in more than one article in different ways. However, certain kinds of noun phrases such as names, dates and numbers behave as \"anchors\" which are unlikely to change across articles. Our key idea is to identify these anchors among comparable articles and extract portions of expressions which share the anchors. This way we can extract expressions which convey the same information. Obtained paraphrases are generalized as templates and stored for future use.In this paper, first we describe our basic idea of paraphrase acquisition. Our method is divided into roughly four steps, each of which is explained in turn. Then we illustrate several issues which we encounter in real texts. To solve these problems, we introduce two techniques: coreference resolution and structural restriction of possible portions of expressions. Finally we discuss the experimental results and conclusions.", "phrases": ["information extraction", "news article", "paraphrase acquisition", "same event"], "overall_score": 2.743064286147947, "scores": [1.9789270951012836, 1.0013586855400627, 1.0439332269786414, 0.5515676287108978], "rank_score": 1.1439466590827214} -{"id": "wang-etal-2018-modeling", "title": "Modeling Semantic Plausibility by Injecting World Knowledge", "abstract": "Distributional data tells us that a man can swallow candy, but not that a man can swallow a paintball, since this is never attested. However both are physically plausible events. This paper introduces the task of semantic plausibility: recognizing plausible but possibly novel events. We present a new crowdsourced dataset of semantic plausibility judgments of single events such as man swallow paintball. Simple models based on distributional representations perform poorly on this task, despite doing well on selection preference, but injecting manually elicited knowledge about entity properties provides a substantial performance boost. Our error analysis shows that our new dataset is a great testbed for semantic plausibility models: more sophisticated knowledge representation and propagation could address many of the remaining errors.", "phrases": ["semantic plausibility", "world knowledge", "distributional data"], "overall_score": 1.5853668373377467, "scores": [2.011268064307719, 0.8704803670343241, 0.5490528799834378], "rank_score": 1.1436004371084936} -{"id": "chiu-etal-2016-intrinsic", "title": "Intrinsic Evaluation of Word Vectors Fails to Predict Extrinsic Performance", "abstract": "The quality of word representations is frequently assessed using correlation with human judgements of word similarity. Here, we question whether such intrinsic evaluation can predict the merits of the representations for downstream tasks. We study the correlation between results on ten word similarity benchmarks and tagger performance on three standard sequence labeling tasks using a variety of word vectors induced from an unannotated corpus of 3.8 billion words, and demonstrate that most intrinsic evaluations are poor predictors of downstream performance. We argue that this issue can be traced in part to a failure to distinguish specific similarity from relatedness in intrinsic evaluation datasets. We make our evaluation tools openly available to facilitate further study.", "phrases": ["word vector", "poor predictor", "intrinsic evaluation"], "overall_score": 3.017829745753135, "scores": [2.0350185290510905, 0.8537274876376242, 0.5418309304893489], "rank_score": 1.1435256490593544} -{"id": "daya-etal-2004-learning", "title": "Learning Hebrew Roots: Machine Learning with Linguistic Constraints", "abstract": "The morphology of Semitic languages is unique in the sense that the major word-formation mechanism is an inherently non-concatenative process of interdigitation, whereby two morphemes, a root and a pattern, are interwoven. Identifying the root of a given word in a Semitic language is an important task, in some cases a crucial part of morphological analysis. It is also a non-trivial task, which many humans find challenging. We present a machine learning approach to the problem of extracting roots of Hebrew words. Given the large number of potential roots (thousands), we address the problem as one of combining several classifiers, each predicting the value of one of the root\u2019s consonants. We show that when these predictors are combined by enforcing some fairly simple linguistics constraints, high accuracy, which compares favorably with human performance on this task, can be achieved.", "phrases": ["hebrew root", "machine learning", "linguistic constraint", "morphology"], "overall_score": 1.8402901266580511, "scores": [2.272504240166095, 0.9062109893208043, 0.868068347535865, 0.5269626046483552], "rank_score": 1.14343654541778} -{"id": "rajagopal-etal-2022-curie", "title": "CURIE: An Iterative Querying Approach for Reasoning About Situations", "abstract": "Predicting the effects of unexpected situations is an important reasoning task, e.g., would cloudy skies help or hinder plant growth? Given a context, the goal of such situational reasoning is to elicit the consequences of a new situation (st) that arises in that context. We propose CURIE, a method to iteratively build a graph of relevant consequences explicitly in a structured situational graph (st graph) using natural language queries over a finetuned language model. Across multiple domains, CURIE generates st graphs that humans find relevant and meaningful in eliciting the consequences of a new situation (75% of the graphs were judged correct by humans). We present a case study of a situation reasoning end task (WIQA-QA), where simply augmenting their input with st graphs improves accuracy by 3 points. We show that these improvements mainly come from a hard subset of the data, that requires background knowledge and multi-hop reasoning.", "phrases": ["reasoning", "situation", "curie"], "overall_score": 1.2558930367728676, "scores": [1.7440806400188051, 0.8705892521672547, 0.8148194273684355], "rank_score": 1.143163106518165} -{"id": "cai-etal-2017-crf", "title": "CRF Autoencoder for Unsupervised Dependency Parsing", "abstract": "Unsupervised dependency parsing, which tries to discover linguistic dependency structures from unannotated data, is a very challenging task. Almost all previous work on this task focuses on learning generative models. In this paper, we develop an unsupervised dependency parsing model based on the CRF autoencoder. The encoder part of our model is discriminative and globally normalized which allows us to use rich features as well as universal linguistic priors. We propose an exact algorithm for parsing as well as a tractable learning algorithm. We evaluated the performance of our model on eight multilingual treebanks and found that our model achieved comparable performance with state-of-the-art approaches.", "phrases": ["unsupervised dependency parsing", "generative model", "crf autoencoder", "head"], "overall_score": 2.63199996162395, "scores": [2.5284017787098474, 0.9453258507787178, 0.5774499821814526, 0.5210746271413897], "rank_score": 1.143063059702852} -{"id": "kocisky-etal-2018-narrativeqa", "title": "The NarrativeQA Reading Comprehension Challenge", "abstract": "Reading comprehension (RC)\u2014in contrast to information retrieval\u2014requires integrating information and reasoning about events, entities, and their relations across a full document. Question answering is conventionally used to assess RC ability, in both artificial agents and children learning to read. However, existing RC datasets and tasks are dominated by questions that can be solved by selecting answers using superficial information (e.g., local context similarity or global term frequency); they thus fail to test for the essential integrative aspect of RC. To encourage progress on deeper comprehension of language, we present a new dataset and set of tasks in which the reader must answer questions about stories by reading entire books or movie scripts. These tasks are designed so that successfully answering their questions requires understanding the underlying narrative rather than relying on shallow pattern matching or salience. We show that although humans solve the tasks easily, standard RC models struggle on the tasks presented here. We provide an analysis of the dataset and the challenges it presents.", "phrases": ["narrativeqa", "comprehension", "question answering", "book", "annotator"], "overall_score": 4.062215771589789, "scores": [2.396942078458482, 1.1766497231866564, 1.0765038254670196, 0.5322601524703608, 0.5304685365380761], "rank_score": 1.142564863224119} -{"id": "pivovarova-etal-2013-event", "title": "Event representation across genre", "abstract": "This paper describes an approach for investigating the representation of events and their distribution in a corpus. We collect and analyze statistics about subject-verb-object triplets and their content, which helps us compare corpora belonging to the same domain but to different genre/text type. We argue that event structure is strongly related to the genre of the corpus, and propose statistical properties that are able to capture these genre differences. The results obtained can be used for the improvement of Information Extraction.", "phrases": ["genre", "statistic", "event representation"], "overall_score": 1.5835536715912826, "scores": [1.9032484361308908, 0.9699887981789075, 0.5536403091695825], "rank_score": 1.1422925144931269} -{"id": "xie-etal-2018-noising", "title": "Noising and Denoising Natural Language: Diverse Backtranslation for Grammar Correction", "abstract": "Translation-based methods for grammar correction that directly map noisy, ungrammatical text to their clean counterparts are able to correct a broad range of errors; however, such techniques are bottlenecked by the need for a large parallel corpus of noisy and clean sentence pairs. In this paper, we consider synthesizing parallel data by noising a clean monolingual corpus. While most previous approaches introduce perturbations using features computed from local context windows, we instead develop error generation processes using a neural sequence transduction model trained to translate clean examples to their noisy counterparts. Given a corpus of clean examples, we propose beam search noising procedures to synthesize additional noisy examples that human evaluators were nearly unable to discriminate from nonsynthesized examples. Surprisingly, when trained on additional data synthesized using our best-performing noising scheme, our model approaches the same performance as when trained on additional nonsynthesized data.", "phrases": ["grammar correction", "noise", "back-translation"], "overall_score": 3.5301757046866173, "scores": [0.9516206137225396, 1.3042778507442074, 1.1703007139216288], "rank_score": 1.1420663927961252} -{"id": "zarrella-marsh-2016-mitre", "title": "MITRE at SemEval-2016 Task 6: Transfer Learning for Stance Detection", "abstract": "We describe MITRE's submission to the SemEval-2016 Task 6, Detecting Stance in Tweets. This effort achieved the top score in Task A on supervised stance detection, producing an average F1 score of 67.8 when assessing whether a tweet author was in favor or against a topic. We employed a recurrent neural network initialized with features learned via distant supervision on two large unlabeled datasets. We trained embeddings of words and phrases with the word2vec skip-gram method, then used those features to learn sentence representations via a hashtag prediction auxiliary task. These sentence vectors were then fine-tuned for stance detection on several hundred labeled examples. The result was a high performing system that used transfer learning to maximize the value of the available training data.", "phrases": ["semeval-2016 task", "transfer learning", "stance detection", "sentence representation", "auxiliary task"], "overall_score": 2.738366626986539, "scores": [2.9530122708301265, 0.87918126211882, 0.8143290414510309, 0.5350001907557306, 0.52841515008701], "rank_score": 1.1419875830485435} -{"id": "iyyer-etal-2014-political", "title": "Political Ideology Detection Using Recursive Neural Networks", "abstract": "An individual\u2019s words often reveal their political ideology. Existing automated techniques to identify ideology from text focus on bags of words or wordlists, ignoring syntax. Taking inspiration from recent work in sentiment analysis that successfully models the compositional aspect of language, we apply a recursive neural network (RNN) framework to the task of identifying the political position evinced by a sentence. To show the importance of modeling subsentential elements, we crowdsource political annotations at a phrase and sentence level. Our model outperforms existing models on our newly annotated dataset and an existing dataset.", "phrases": ["recursive neural network", "political ideology detection", "text classification task"], "overall_score": 3.627036606377742, "scores": [1.490339305280316, 1.4106819029953477, 0.5228068324035344], "rank_score": 1.1412760135597326} -{"id": "agirre-etal-2014-random", "title": "Random Walks for Knowledge-Based Word Sense Disambiguation", "abstract": "Word Sense Disambiguation (WSD) systems automatically choose the intended meaning of a word in context. In this article we present a WSD algorithm based on random walks over large Lexical Knowledge Bases (LKB). We show that our algorithm performs better than other graph-based methods when run on a graph built from WordNet and eXtended WordNet. Our algorithm and LKB combination compares favorably to other knowledge-based approaches in the literature that use similar knowledge on a variety of English data sets and a data set on Spanish. We include a detailed analysis of the factors that affect the algorithm. The algorithm and the LKBs used are publicly available, and the results easily reproducible.", "phrases": ["word sense disambiguation", "graph-based algorithm", "random walk"], "overall_score": 3.718364748176789, "scores": [2.027610505680763, 0.8382667924429155, 0.5579298587091972], "rank_score": 1.1412690522776254} -{"id": "habernal-gurevych-2016-makes", "title": "What makes a convincing argument? Empirical analysis and detecting attributes of convincingness in Web argumentation", "abstract": "This article tackles a new challenging task in computational argumentation. Given a pair of two arguments to a certain controversial topic, we aim to directly assess qualitative properties of the arguments in order to explain why one argument is more convincing than the other one. We approach this task in a fully empirical manner by annotating 26k explanations written in natural language. These explanations describe convincingness of arguments in the given argument pair, such as their strengths or flaws. We create a new crowd-sourced corpus containing 9,111 argument pairs, multi-labeled with 17 classes, which was cleaned and curated by employing several strict quality measures. We propose two tasks on this data set, namely (1) predicting the full label distribution and (2) classifying types of flaws in less convincing arguments. Our experiments with feature-rich SVM learners and Bidirectional LSTM neural networks with convolution and attention mechanism reveal that such a novel fine-grained analysis of Web argument convincingness is a very challenging task. We release the new UKPConvArg2 corpus and software under permissive licenses to the research community.", "phrases": ["convincingness", "empirical manner", "argument pair"], "overall_score": 3.577915335742177, "scores": [2.31452171361383, 0.5884834690777981, 0.5202975785433405], "rank_score": 1.1411009204116562} -{"id": "sun-korhonen-2009-improving", "title": "Improving Verb Clustering with Automatically Acquired Selectional Preferences", "abstract": "In previous research in automatic verb classification, syntactic features have proved the most useful features, although manual classifications rely heavily on semantic features. We show, in contrast with previous work, that considerable additional improvement can be obtained by using semantic features in automatic classification: verb selectional preferences acquired from corpus data using a fully unsupervised method. We report these promising results using a new framework for verb clustering which incorporates a recent subcategorization acquisition system, rich syntactic-semantic feature sets, and a variation of spectral clustering which performs particularly well in high dimensional feature space.", "phrases": ["verb clustering", "selectional preference", "feature space"], "overall_score": 2.3726847997436713, "scores": [2.0493416243205886, 0.8401285224211862, 0.5335904474410431], "rank_score": 1.1410201980609394} -{"id": "peled-reichart-2017-sarcasm", "title": "Sarcasm SIGN: Interpreting Sarcasm with Sentiment Based Monolingual Machine Translation", "abstract": "Sarcasm is a form of speech in which speakers say the opposite of what they truly mean in order to convey a strong sentiment. In other words, \u201cSarcasm is the giant chasm between what I say, and the person who doesn't get it.\u201d. In this paper we present the novel task of sarcasm interpretation, defined as the generation of a non-sarcastic utterance conveying the same message as the original sarcastic one. We introduce a novel dataset of 3000 sarcastic tweets, each interpreted by five human judges. Addressing the task as monolingual machine translation (MT), we experiment with MT algorithms and evaluation measures. We then present SIGN: an MT based sarcasm interpretation algorithm that targets sentiment words, a defining element of textual sarcasm. We show that while the scores of n-gram based automatic measures are similar for all interpretation models, SIGN's interpretations are scored higher by humans for adequacy and sentiment polarity. We conclude with a discussion on future research directions for our new task.", "phrases": ["machine translation", "non-sarcastic utterance", "sarcasm"], "overall_score": 2.219564754095306, "scores": [2.2594418251752364, 0.5823194261342012, 0.5801306924262651], "rank_score": 1.140630647911901} -{"id": "callison-burch-etal-2007-meta", "title": "(Meta-) Evaluation of Machine Translation", "abstract": "This paper evaluates the translation quality of machine translation systems for 8 language pairs: translating French, German, Spanish, and Czech to English and back. We carried out an extensive human evaluation which allowed us not only to rank the different MT systems, but also to perform higher-level analysis of the evaluation process. We measured timing and intra- and inter-annotator agreement for three types of subjective evaluation. We measured the correlation of automatic evaluation metrics with human judgments. This meta-evaluation reveals surprising facts about the most commonly used methodologies.", "phrases": ["machine translation", "inter-annotator agreement", "judgment", "gim\u00e9nez", "fluency"], "overall_score": 3.716282526679766, "scores": [1.4778252266609426, 1.216697332924709, 1.0657601618328667, 1.0503987439556268, 0.8924683389845103], "rank_score": 1.1406299608717312} -{"id": "muzny-zettlemoyer-2013-automatic", "title": "Automatic Idiom Identification in Wiktionary", "abstract": "Online resources, such as Wiktionary, provide an accurate but incomplete source of idiomatic phrases. In this paper, we study the problem of automatically identifying idiomatic dictionary entries with such resources. We train an idiom classifier on a newly gathered corpus of over 60,000 Wiktionary multi-word definitions, incorporating features that model whether phrase meanings are constructed compositionally. Experiments demonstrate that the learned classifier can provide high quality idiom labels, more than doubling the number of idiomatic entries from 7,764 to 18,155 at precision levels of over 65%. These gains also translate to idiom detection in sentences, by simply using known word sense disambiguation algorithms to match phrases to their definitions. In a set of Wiktionary definition example sentences, the more complete set of idioms boosts detection recall by over 28 percentage points.", "phrases": ["wiktionary", "precision level", "graph-based feature", "wordnet"], "overall_score": 2.6263303917627785, "scores": [2.640996533013959, 0.8346989841397928, 0.562710558489859, 0.5239971115459101], "rank_score": 1.1406007967973801} -{"id": "chisholm-hachey-2015-entity", "title": "Entity Disambiguation with Web Links", "abstract": "Entity disambiguation with Wikipedia relies on structured information from redirect pages, article text, inter-article links, and categories. We explore whether web links can replace a curated encyclopaedia, obtaining entity prior, name, context, and coherence models from a corpus of web pages with links to Wikipedia. Experiments compare web link models to Wikipedia models on well-known conll and tac data sets. Results show that using 34 million web links approaches Wikipedia performance. Combining web link and Wikipedia models produces the best-known disambiguation accuracy of 88.7 on standard newswire test data.", "phrases": ["web link", "wikipedia", "entity disambiguation"], "overall_score": 2.9251884309512617, "scores": [1.833926546158092, 0.9995913007329776, 0.5878227216177491], "rank_score": 1.1404468561696064} -{"id": "breit-etal-2021-wic", "title": "WiC-TSV: An Evaluation Benchmark for Target Sense Verification of Words in Context", "abstract": "We present WiC-TSV, a new multi-domain evaluation benchmark for Word Sense Disambiguation. More specifically, we introduce a framework for Target Sense Verification of Words in Context which grounds its uniqueness in the formulation as binary classification task thus being independent of external sense inventories, and the coverage of various domains. This makes the dataset highly flexible for the evaluation of a diverse set of models and systems in and across domains. WiC-TSV provides three different evaluation settings, depending on the input signals provided to the model. We set baseline performance on the dataset using state-of-the-art language models. Experimental results show that even though these models can perform decently on the task, there remains a gap between machine and human performance, especially in out-of-domain settings. WiC-TSV data is available at .", "phrases": ["evaluation benchmark", "target sense verification", "wic-tsv"], "overall_score": 1.5809561298135502, "scores": [1.8160243141310994, 0.8136423276445401, 0.7915897107418864], "rank_score": 1.1404187841725086} -{"id": "fan-etal-2018-multi", "title": "Multi-grained Attention Network for Aspect-Level Sentiment Classification", "abstract": "We propose a novel multi-grained attention network (MGAN) model for aspect level sentiment classification. Existing approaches mostly adopt coarse-grained attention mechanism, which may bring information loss if the aspect has multiple words or larger context. We propose a fine-grained attention mechanism, which can capture the word-level interaction between aspect and context. And then we leverage the fine-grained and coarse-grained attention mechanisms to compose the MGAN framework. Moreover, unlike previous works which train each aspect with its context separately, we design an aspect alignment loss to depict the aspect-level interactions among the aspects that have the same context. We evaluate the proposed approach on three datasets: laptop and restaurant are from SemEval 2014, and the last one is a twitter dataset. Experimental results show that the multi-grained attention network consistently outperforms the state-of-the-art methods on all three datasets. We also conduct experiments to evaluate the effectiveness of aspect alignment loss, which indicates the aspect-level interactions can bring extra useful information and further improve the performance.", "phrases": ["sentiment classification", "aspect-level interaction", "multi-grained attention network"], "overall_score": 3.4718993127837985, "scores": [1.9779098915740614, 0.8956997007312987, 0.5475174157582939], "rank_score": 1.1403756693545513} -{"id": "ritter-etal-2011-named", "title": "Named Entity Recognition in Tweets: An Experimental Study", "abstract": "People tweet more than 100 Million times daily, yielding a noisy, informal, but sometimes informative corpus of 140-character messages that mirrors the zeitgeist in an unprecedented manner. The performance of standard NLP tools is severely degraded on tweets. This paper addresses this issue by re-building the NLP pipeline beginning with part-of-speech tagging, through chunking, to named-entity recognition. Our novel T-ner system doubles F1 score compared with the Stanford NER system. T-ner leverages the redundancy inherent in tweets to achieve this performance, using LabeledLDA to exploit Freebase dictionaries as a source of distant supervision. LabeledLDA outperforms co-training, increasing F1 by 25% over ten common entity types. \n \nOur NLP tools are available at: http://github.com/aritter/twitter_nlp", "phrases": ["entity recognition", "tweets", "standard pos tagger", "noisy text", "dependency parser"], "overall_score": 5.131388368275082, "scores": [2.0765813009491287, 1.3759733525479294, 0.8792521581236745, 0.8278095813395445, 0.5421673981511159], "rank_score": 1.1403567582222787} -{"id": "gkotsis-etal-2016-dont", "title": "Don't Let Notes Be Misunderstood: A Negation Detection Method for Assessing Risk of Suicide in Mental Health Records", "abstract": "Mental Health Records (MHRs) contain freetext documentation about patients\u2019 suicide and suicidality. In this paper, we address the problem of determining whether grammatic variants (inflections) of the word \u201csuicide\u201d are affirmed or negated. To achieve this, we populate and annotate a dataset with over 6,000 sentences originating from a large repository of MHRs. The resulting dataset has high InterAnnotator Agreement ( 0.93). Furthermore, we develop and propose a negation detection method that leverages syntactic features of text 1 . Using parse trees, we build a set of basic rules that rely on minimum domain knowledge and render the problem as binary classification (affirmed vs. negated). Since the overall goal is to identify patients who are expected to be at high risk of suicide, we focus on the evaluation of positive (affirmed) cases as determined by our classifier. Our negation detection approach yields a recall (sensitivity) value of 94.6% for the positive cases and an overall accuracy value of 91.9%. We believe that our approach can be integrated with other clinical Natural Language Processing tools in order to further advance information extraction capabilities.", "phrases": ["negation detection method", "suicide", "mental health records", "parse tree"], "overall_score": 2.21876392737581, "scores": [2.2920560520834115, 0.8879019263310363, 0.8584490246368947, 0.5224694145015635], "rank_score": 1.1402191043882266} -{"id": "niemann-gurevych-2011-peoples", "title": "The People's Web meets Linguistic Knowledge: Automatic Sense Alignment of Wikipedia and WordNet", "abstract": "We propose a method to automatically align WordNet synsets and Wikipedia articles to obtain a sense inventory of higher coverage and quality. For each WordNet synset, we first extract a set of Wikipedia articles as alignment candidates; in a second step, we determine which article (if any) is a valid alignment, i.e. is about the same sense or concept. In this paper, we go significantly beyond state-of-the-art word overlap approaches, and apply a threshold-based Personalized PageRank method for the disambiguation step. We show that WordNet synsets can be aligned to Wikipedia articles with a performance of up to 0.78 F1-Measure based on a comprehensive, well-balanced reference dataset consisting of 1,815 manually annotated sense alignment candidates. The fully-aligned resource as well as the reference dataset is publicly available.", "phrases": ["wordnet", "alignment candidate", "personalized pagerank method"], "overall_score": 2.218582060776119, "scores": [2.351407228923788, 0.5345770033466589, 0.5343926980618918], "rank_score": 1.140125643444113} -{"id": "kominek-black-2006-learning", "title": "Learning Pronunciation Dictionaries: Language Complexity and Word Selection Strategies", "abstract": "The speed with which pronunciation dictionaries can be bootstrapped depends on the efficiency of learning algorithms and on the ordering of words presented to the user. This paper presents an active-learning word selection strategy that is mindful of human limitations. Learning rates approach that of an oracle system that knows the final LTS rule set.", "phrases": ["pronunciation dictionary", "word selection strategy", "letter-to-sound rule"], "overall_score": 2.370793776916099, "scores": [1.996931741594388, 0.8861714769475293, 0.5372292063853541], "rank_score": 1.1401108083090905} -{"id": "ionescu-etal-2016-string", "title": "String Kernels for Native Language Identification: Insights from Behind the Curtains", "abstract": "The most common approach in text mining classification tasks is to rely on features like words, part-of-speech tags, stems, or some other high-level linguistic features. Recently, an approach that uses only character p-grams as features has been proposed for the task of native language identification (NLI). The approach obtained state-of-the-art results by combining several string kernels using multiple kernel learning. Despite the fact that the approach based on string kernels performs so well, several questions about this method remain unanswered. First, it is not clear why such a simple approach can compete with far more complex approaches that take words, lemmas, syntactic information, or even semantics into account. Second, although the approach is designed to be language independent, all experiments to date have been on English. This work is an extensive study that aims to systematically present the string kernel approach and to clarify the open questions mentioned above.A broad set of native language identification experiments were conducted to compare the string kernels approach with other state-of-the-art methods. The empirical results obtained in all of the experiments conducted in this work indicate that the proposed approach achieves state-of-the-art performance in NLI, reaching an accuracy that is 1.7% above the top scoring system of the 2013 NLI Shared Task. Furthermore, the results obtained on both the Arabic and the Norwegian corpora demonstrate that the proposed approach is language independent. In the Arabic native language identification task, string kernels show an increase of more than 17% over the best accuracy reported so far. The results of string kernels on Norwegian native language identification are also significantly better than the state-of-the-art approach. In addition, in a cross-corpus experiment, the proposed approach shows that it can also be topic independent, improving the state-of-the-art system by 32.3%.To gain additional insights about the string kernels approach, the features selected by the classifier as being more discriminating are analyzed in this work. The analysis also offers information about localized language transfer effects, since the features used by the proposed model are p-grams of various lengths. The features captured by the model typically include stems, function words, and word prefixes and suffixes, which have the potential to generalize over purely word-based features. By analyzing the discriminating features, this article offers insights into two kinds of language transfer effects, namely, word choice (lexical transfer) and morphological differences. The goal of the current study is to give a full view of the string kernels approach and shed some light on why this approach works so well.", "phrases": ["native language identification", "nli shared task", "string kernel", "text analysis task"], "overall_score": 2.504615699330536, "scores": [1.9588112961865465, 1.1835121100505441, 0.8838385450843189, 0.533436962990716], "rank_score": 1.1398997285780312} -{"id": "wallace-etal-2014-humans", "title": "Humans Require Context to Infer Ironic Intent (so Computers Probably do, too)", "abstract": "Automatically detecting verbal irony (roughly, sarcasm) is a challenging task because ironists say something other than \u2010 and often opposite to \u2010 what they actually mean. Discerning ironic intent exclusively from the words and syntax comprising texts (e.g., tweets, forum posts) is therefore not always possible: additional contextual information about the speaker and/or the topic at hand is often necessary. We introduce a new corpus that provides empirical evidence for this claim. We show that annotators frequently require context to make judgements concerning ironic intent, and that machine learning approaches tend to misclassify those same comments for which annotators required additional context.", "phrases": ["intent", "irony", "contextual information", "annotator", "sarcastic intent"], "overall_score": 3.414083412275827, "scores": [2.200625261736538, 1.3025463146943983, 1.0908724489051314, 0.5786823197815493, 0.5255188592602844], "rank_score": 1.1396490408755802} -{"id": "bohnet-etal-2013-joint", "title": "Joint Morphological and Syntactic Analysis for Richly Inflected Languages", "abstract": "Joint morphological and syntactic analysis has been proposed as a way of improving parsing accuracy for richly inflected languages. Starting from a transition-based model for joint part-of-speech tagging and dependency parsing, we explore different ways of integrating morphological features into the model. We also investigate the use of rule-based morphological analyzers to provide hard or soft lexical constraints and the use of word clusters to tackle the sparsity of lexical features. Evaluation on five morphologically rich languages (Czech, Finnish, German, Hungarian, and Russian) shows consistent improvements in both morphological and syntactic accuracy for joint prediction over a pipeline model, with further improvements thanks to lexical constraints and word clusters. The final results improve the state of the art in dependency parsing for all languages.", "phrases": ["syntactic analysis", "tagging", "morphological analyzer", "czech", "transition-based parser"], "overall_score": 3.0859434840701043, "scores": [2.491999811719305, 1.265876026498492, 0.8607472261311511, 0.5496699577088985, 0.5294290563845818], "rank_score": 1.1395444156884857} -{"id": "morris-etal-2020-textattack", "title": "TextAttack: A Framework for Adversarial Attacks, Data Augmentation, and Adversarial Training in NLP", "abstract": "While there has been substantial research using adversarial attacks to analyze NLP models, each attack is implemented in its own code repository. It remains challenging to develop NLP attacks and utilize them to improve model performance. This paper introduces TextAttack, a Python framework for adversarial attacks, data augmentation, and adversarial training in NLP. TextAttack builds attacks from four components: a goal function, a set of constraints, a transformation, and a search method. TextAttack's modular design enables researchers to easily construct attacks from combinations of novel and existing components. TextAttack provides implementations of 16 adversarial attacks from the literature and supports a variety of models and datasets, including BERT and other transformers, and all GLUE tasks. TextAttack also includes data augmentation and adversarial training modules for using components of adversarial attacks to improve model accuracy and robustness. TextAttack is democratizing NLP: anyone can try data augmentation and adversarial training on any model or dataset, with just a few lines of code. Code and tutorials are available at .", "phrases": ["data augmentation", "adversarial training", "nlp model", "textattack"], "overall_score": 2.7325031461776357, "scores": [2.2842135131065864, 0.8903731912404695, 0.8198030447474234, 0.563779537404466], "rank_score": 1.1395423216247362} -{"id": "condoravdi-etal-2003-entailment", "title": "Entailment, intensionality and text understanding", "abstract": "We argue that the detection of entailment and contradiction relations between texts is a minimal metric for the evaluation of text understanding systems. Intensionality, which is widespread in natural language, raises a number of detection issues that cannot be brushed aside. We describe a contexted clausal representation, derived from approaches in formal semantics, that permits an extended range of intensional entailments and contradictions to be tractably detected.", "phrases": ["intensionality", "text understanding", "contradiction", "entailment", "nli"], "overall_score": 3.084256450406832, "scores": [0.927602301013724, 0.8796842283902285, 1.6119381041872296, 1.2346218487644236, 1.04076074677091], "rank_score": 1.1389214458253032} -{"id": "kummerfeld-etal-2019-large", "title": "A Large-Scale Corpus for Conversation Disentanglement", "abstract": "Disentangling conversations mixed together in a single stream of messages is a difficult task, made harder by the lack of large manually annotated datasets. We created a new dataset of 77,563 messages manually annotated with reply-structure graphs that both disentangle conversations and define internal conversation structure. Our data is 16 times larger than all previously released datasets combined, the first to include adjudication of annotation disagreements, and the first to include context. We use our data to re-examine prior work, in particular, finding that 89% of conversations in a widely used dialogue corpus are either missing messages or contain extra messages. Our manually-annotated data presents an opportunity to develop robust data-driven methods for conversation disentanglement, which will help advance dialogue research.", "phrases": ["conversation disentanglement", "message", "large-scale dataset"], "overall_score": 2.919934533421575, "scores": [1.9964029057706563, 0.8257686260455787, 0.5930240059756196], "rank_score": 1.1383985125972849} -{"id": "rossiello-etal-2017-centroid", "title": "Centroid-based Text Summarization through Compositionality of Word Embeddings", "abstract": "The textual similarity is a crucial aspect for many extractive text summarization methods. A bag-of-words representation does not allow to grasp the semantic relationships between concepts when comparing strongly related sentences with no words in common. To overcome this issue, in this paper we propose a centroid-based method for text summarization that exploits the compositional capabilities of word embeddings. The evaluations on multi-document and multilingual datasets prove the effectiveness of the continuous vector representation of words compared to the bag-of-words model. Despite its simplicity, our method achieves good performance even in comparison to more complex deep learning models. Our method is unsupervised and it can be adopted in other summarization tasks.", "phrases": ["text summarization", "word embedding", "semantic relationship", "centroid-based method"], "overall_score": 2.2150731746552643, "scores": [1.9868131399566578, 1.1985777966504108, 0.8479837905508107, 0.519915003574288], "rank_score": 1.1383224326830417} -{"id": "yang-etal-2021-pcfgs", "title": "PCFGs Can Do Better: Inducing Probabilistic Context-Free Grammars with Many Symbols", "abstract": "Probabilistic context-free grammars (PCFGs) with neural parameterization have been shown to be effective in unsupervised phrase-structure grammar induction. However, due to the cubic computational complexity of PCFG representation and parsing, previous approaches cannot scale up to a relatively large number of (nonterminal and preterminal) symbols. In this work, we present a new parameterization form of PCFGs based on tensor decomposition, which has at most quadratic computational complexity in the symbol number and therefore allows us to use a much larger number of symbols. We further use neural parameterization for the new form to improve unsupervised parsing performance. We evaluate our model across ten languages and empirically demonstrate the effectiveness of using more symbols.", "phrases": ["context-free grammar", "complexity", "pcfg"], "overall_score": 1.2503359157481841, "scores": [1.8650918444408924, 0.9856391303929274, 0.5635834160893373], "rank_score": 1.1381047969743856} -{"id": "delpech-etal-2012-extraction", "title": "Extraction of Domain-Specific Bilingual Lexicon from Comparable Corpora: Compositional Translation and Ranking", "abstract": "This paper proposes a method for extracting translations of morphologically constructed terms from comparable corpora. The method is based on compositional translation and exploits translation equivalences at the morpheme-level, which allows for the generation of \"fertile\" translations (translation pairs in which the target term has more words than the source term). Ranking methods relying on corpus-based and translation-based features are used to select the best candidate translation. We obtain an average precision of 91% on the Top1 candidate translation. The method was tested on two language pairs (English-French and English-German) and with a small specialized comparable corpora (400k words per language).", "phrases": ["comparable corpora", "compositional translation", "morpheme-level", "target term"], "overall_score": 2.5002628525922796, "scores": [2.2882864905092926, 0.8704364907257783, 0.8412569150554784, 0.5516947543242644], "rank_score": 1.1379186626537034} -{"id": "su-etal-2012-translation", "title": "Translation Model Adaptation for Statistical Machine Translation with Monolingual Topic Information", "abstract": "To adapt a translation model trained from the data in one domain to another, previous works paid more attention to the studies of parallel corpus while ignoring the in-domain monolingual corpora which can be obtained more easily. In this paper, we propose a novel approach for translation model adaptation by utilizing in-domain monolingual topic information instead of the in-domain bilingual corpora, which incorporates the topic information into translation probability estimation. Our method establishes the relationship between the out-of-domain bilingual corpus and the in-domain monolingual corpora via topic mapping and phrase-topic distribution probability estimation from in-domain monolingual corpora. Experimental result on the NIST Chinese-English translation task shows that our approach significantly outperforms the baseline system.", "phrases": ["topic information", "in-domain monolingual corpora", "distribution probability estimation", "translation model adaptation"], "overall_score": 2.7268403691049135, "scores": [2.549353831539812, 0.9081674813069924, 0.5625859117639809, 0.5286158160292009], "rank_score": 1.1371807601599966} -{"id": "nikolaus-fourtassi-2021-evaluating", "title": "Evaluating the Acquisition of Semantic Knowledge from Cross-situational Learning in Artificial Neural Networks", "abstract": "When learning their native language, children acquire the meanings of words and sentences from highly ambiguous input without much explicit supervision. One possible learning mechanism is cross-situational learning, which has been successfully tested in laboratory experiments with children. Here we use Artificial Neural Networks to test if this mechanism scales up to more natural language and visual scenes using a large dataset of crowd-sourced images with corresponding descriptions. We evaluate learning using a series of tasks inspired by methods commonly used in laboratory studies of language acquisition. We show that the model acquires rich semantic knowledge both at the word- and sentence-level, mirroring the patterns and trajectory of learning in early childhood. Our work highlights the usefulness of low-level co-occurrence statistics across modalities in facilitating the early acquisition of higher-level semantic knowledge.", "phrases": ["semantic knowledge", "cross-situational learning", "artificial neural networks"], "overall_score": 1.576083673729248, "scores": [1.7635114930796254, 0.8296645549540164, 0.8175361021392265], "rank_score": 1.1369040500576226} -{"id": "ghader-monz-2017-attention", "title": "What does Attention in Neural Machine Translation Pay Attention to?", "abstract": "Attention in neural machine translation provides the possibility to encode relevant parts of the source sentence at each translation step. As a result, attention is considered to be an alignment model as well. However, there is no work that specifically studies attention and provides analysis of what is being learned by attention models. Thus, the question still remains that how attention is similar or different from the traditional alignment. In this paper, we provide detailed analysis of attention and compare it to traditional alignment. We answer the question of whether attention is only capable of modelling translational equivalent or it captures more information. We show that attention is different from alignment in some cases and is capturing useful information other than alignments.", "phrases": ["neural machine translation", "source sentence", "attention weight"], "overall_score": 3.078429568068103, "scores": [1.9824933973093164, 0.8867096861847817, 0.541106186417306], "rank_score": 1.1367697566371346} -{"id": "wright-etal-2017-vectors", "title": "Vectors for Counterspeech on Twitter", "abstract": "A study of conversations on Twitter found that some arguments between strangers led to favorable change in discourse and even in attitudes. The authors propose that such exchanges can be usefully distinguished according to whether individuals or groups take part on each side, since the opportunity for a constructive exchange of views seems to vary accordingly.", "phrases": ["counterspeech", "twitter", "stranger", "attitude"], "overall_score": 2.036792678758644, "scores": [1.952141521339839, 0.9284803229621367, 0.8404252231538916, 0.8259754849320532], "rank_score": 1.13675563809698} -{"id": "surdeanu-manning-2010-ensemble", "title": "Ensemble Models for Dependency Parsing: Cheap and Good?", "abstract": "Previous work on dependency parsing used various kinds of combination models but a systematic analysis and comparison of these approaches is lacking. In this paper we implemented such a study for English dependency parsing and find several non-obvious facts: (a) the diversity of base parsers is more important than complex models for learning (e.g., stacking, supervised meta-classification), (b) approximate, linear-time re-parsing algorithms guarantee well-formed dependency trees without significant performance loss, and (c) the simplest scoring model for re-parsing (unweighted voting) performs essentially as well as other more complex models. This study proves that fast and accurate ensemble parsers can be built with minimal effort.", "phrases": ["dependency parsing", "voting", "linear combination"], "overall_score": 2.915542785801575, "scores": [1.99504768211935, 0.8799710238583113, 0.5350401834738777], "rank_score": 1.1366862964838462} -{"id": "alsentzer-etal-2019-publicly", "title": "Publicly Available Clinical BERT Embeddings", "abstract": "Contextual word embedding models such as ELMo and BERT have dramatically improved performance for many natural language processing (NLP) tasks in recent months. However, these models have been minimally explored on specialty corpora, such as clinical text; moreover, in the clinical domain, no publicly-available pre-trained BERT models yet exist. In this work, we address this need by exploring and releasing BERT models for clinical text: one for generic clinical text and another for discharge summaries specifically. We demonstrate that using a domain-specific model yields performance improvements on 3/5 clinical NLP tasks, establishing a new state-of-the-art on the MedNLI dataset. We find that these domain-specific models are not as performant on 2 clinical de-identification tasks, and argue that this is a natural consequence of the differences between de-identified source text and synthetically non de-identified task text.", "phrases": ["clinicalbert", "biomedical domain", "pre-trained model"], "overall_score": 4.400311007988308, "scores": [1.5408104381603227, 1.0450715321133426, 0.8241535682500714], "rank_score": 1.1366785128412455} -{"id": "deutsch-roth-2019-summary", "title": "Summary Cloze: A New Task for Content Selection in Topic-Focused Summarization", "abstract": "A key challenge in topic-focused summarization is determining what information should be included in the summary, a problem known as content selection. In this work, we propose a new method for studying content selection in topic-focused summarization called the summary cloze task. The goal of the summary cloze task is to generate the next sentence of a summary conditioned on the beginning of the summary, a topic, and a reference document(s). The main challenge is deciding what information in the references is relevant to the topic and partial summary and should be included in the summary. Although the cloze task does not address all aspects of the traditional summarization problem, the more narrow scope of the task allows us to collect a large-scale datset of nearly 500k summary cloze instances from Wikipedia. We report experimental results on this new dataset using various extractive models and a two-step abstractive model that first extractively selects a small number of sentences and then abstractively summarizes them. Our results show that the topic and partial summary help the models identify relevant content, but the task remains a significant challenge.", "phrases": ["content selection", "summarization", "cloze task"], "overall_score": 1.8292049307382952, "scores": [1.9722831203182898, 0.8443993100741735, 0.5929643474167305], "rank_score": 1.136548925936398} -{"id": "chiang-scheffler-2008-flexible", "title": "Flexible Composition and Delayed Tree-Locality", "abstract": "Flexible composition is an extension of TAG that has been used in a variety of TAG-analyses. In this paper, we present a dedicated study of the formal and linguistic properties of TAGs with flexible composition (TAG-FC). We start by presenting a survey of existing applications of flexible composition. In the main part of the paper, we discuss a formal definition of TAGFCs and give a proof of equivalence of TAG-FC to tree-local MCTAG, via a formalism called delayed tree-local MCTAG. We then proceed to argue that delayed treelocality is more intuitive for the analysis of many cases where flexible composition has been employed.", "phrases": ["flexible composition", "derivation", "standard tag"], "overall_score": 2.036415280914415, "scores": [2.018276252172882, 0.8454726716592232, 0.5458861012169297], "rank_score": 1.1365450083496784} -{"id": "davidov-rappoport-2010-extraction", "title": "Extraction and Approximation of Numerical Attributes from the Web", "abstract": "We present a novel framework for automated extraction and approximation of numerical object attributes such as height and weight from the Web. Given an object-attribute pair, we discover and analyze attribute information for a set of comparable objects in order to infer the desired value. This allows us to approximate the desired numerical values even when no exact values can be found in the text. \n \nOur framework makes use of relation defining patterns and WordNet similarity information. First, we obtain from the Web and WordNet a list of terms similar to the given object. Then we retrieve attribute values for each term in this list, and information that allows us to compare different objects in the list and to infer the attribute value range. Finally, we combine the retrieved data for all terms from the list to select or approximate the requested value. \n \nWe evaluate our method using automated question answering, WordNet enrichment, and comparison with answers given in Wikipedia and by leading search engines. In all of these, our framework provides a significant improvement.", "phrases": ["numerical attribute", "web", "object", "height", "value"], "overall_score": 2.3631201545594838, "scores": [1.8345119587712868, 1.2554407638116087, 1.1005406058876244, 0.9654543618637883, 0.5261551896785722], "rank_score": 1.136420576002576} -{"id": "wang-etal-2017-learning", "title": "Learning to Rank Semantic Coherence for Topic Segmentation", "abstract": "Topic segmentation plays an important role for discourse parsing and information retrieval. Due to the absence of training data, previous work mainly adopts unsupervised methods to rank semantic coherence between paragraphs for topic segmentation. In this paper, we present an intuitive and simple idea to automatically create a \u201cquasi\u201d training dataset, which includes a large amount of text pairs from the same or different documents with different semantic coherence. With the training corpus, we design a symmetric CNN neural network to model text pairs and rank the semantic coherence within the learning to rank framework. Experiments show that our algorithm is able to achieve competitive performance over strong baselines on several real-world datasets.", "phrases": ["topic segmentation", "text pair", "cnn", "coherence score"], "overall_score": 2.0354639088496844, "scores": [2.0149100641646167, 1.100086886847653, 0.8358836916339674, 0.5931755073159559], "rank_score": 1.1360140374905483} -{"id": "lee-etal-2012-joint", "title": "Joint Entity and Event Coreference Resolution across Documents", "abstract": "We introduce a novel coreference resolution system that models entities and events jointly. Our iterative method cautiously constructs clusters of entity and event mentions using linear regression to model cluster merge operations. As clusters are built, information flows between entity and event clusters through features that model semantic role dependencies. Our system handles nominal and verbal events as well as entities, and our joint formulation allows information from event coreference to help entity coreference, and vice versa. In a cross-document domain with comparable documents, joint coreference resolution performs significantly better (over 3 CoNLL F1 points) than two strong baselines that resolve entities and events separately.", "phrases": ["event coreference resolution", "cluster", "joint entity", "ecb corpus", "cross-document coreference"], "overall_score": 4.1019604211147564, "scores": [2.4879032606026796, 0.8729534911093738, 0.8779476178462623, 0.8341144882801311, 0.6070225315117039], "rank_score": 1.13598827787003} -{"id": "li-etal-2008-pnr2", "title": "PNR2: Ranking Sentences with Positive and Negative Reinforcement for Query-Oriented Update Summarization", "abstract": "Query-oriented update summarization is an emerging summarization task very recently. It brings new challenges to the sentence ranking algorithms that require not only to locate the important and query-relevant information, but also to capture the new information when document collections evolve. In this paper, we propose a novel graph based sentence ranking algorithm, namely PNR2, for update summarization. Inspired by the intuition that \"a sentence receives a positive influence from the sentences that correlate to it in the same collection, whereas a sentence receives a negative influence from the sentences that correlates to it in the different (perhaps previously read) collection\", PNR2 models both the positive and the negative mutual reinforcement in the ranking process. Automatic evaluation on the DUC 2007 data set pilot task demonstrates the effectiveness of the algorithm.", "phrases": ["reinforcement", "update summarization", "ranking process"], "overall_score": 1.8263210143763422, "scores": [2.3166958632003753, 0.558315862805942, 0.5292594178507881], "rank_score": 1.1347570479523685} -{"id": "anthonio-etal-2020-wikihowtoimprove", "title": "wikiHowToImprove: A Resource and Analyses on Edits in Instructional Texts", "abstract": "Instructional texts, such as articles in wikiHow, describe the actions necessary to accomplish a certain goal. In wikiHow and other resources, such instructions are subject to revision edits on a regular basis. Do these edits improve instructions only in terms of style and correctness, or do they provide clarifications necessary to follow the instructions and to accomplish the goal? We describe a resource and first studies towards answering this question. Specifically, we create wikiHowToImprove, a collection of revision histories for about 2.7 million sentences from about 246000 wikiHow articles. We describe human annotation studies on categorizing a subset of sentence-level edits and provide baseline models for the task of automatically distinguishing \u201colder\u201d from \u201cnewer\u201d revisions of a sentence.", "phrases": ["edit", "instructional text", "wikihowtoimprove"], "overall_score": 2.4930690382086333, "scores": [1.1894568127662037, 1.10913697454969, 1.1053400625836154], "rank_score": 1.1346446166331698} -{"id": "budanitsky-hirst-2006-evaluating", "title": "Evaluating WordNet-based Measures of Lexical Semantic Relatedness", "abstract": "The quantification of lexical semantic relatedness has many applications in NLP, and many different measures have been proposed. We evaluate five of these measures, all of which use WordNet as their central resource, by comparing their performance in detecting and correcting real-word spelling errors. An information-content-based measure proposed by Jiang and Conrath is found superior to those proposed by Hirst and St-Onge, Leacock and Chodorow, Lin, and Resnik. In addition, we explain why distributional similarity is not an adequate proxy for lexical semantic relatedness.", "phrases": ["lexical semantic relatedness", "central resource", "overview", "distance", "knowledge-based measure"], "overall_score": 4.343056127431707, "scores": [2.0399282680792914, 1.3302378450147374, 0.8772342526328234, 0.8765490448493256, 0.5478485951267523], "rank_score": 1.134359601140586} -{"id": "xiong-etal-2017-deeppath", "title": "DeepPath: A Reinforcement Learning Method for Knowledge Graph Reasoning", "abstract": "We study the problem of learning to reason in large scale knowledge graphs (KGs). More specifically, we describe a novel reinforcement learning framework for learning multi-hop relational paths: we use a policy-based agent with continuous states based on knowledge graph embeddings, which reasons in a KG vector-space by sampling the most promising relation to extend its path. In contrast to prior work, our approach includes a reward function that takes the accuracy, diversity, and efficiency into consideration. Experimentally, we show that our proposed method outperforms a path-ranking based algorithm and knowledge graph embedding methods on Freebase and Never-Ending Language Learning datasets.", "phrases": ["reinforcement learning method", "knowledge graph reasoning", "deeppath"], "overall_score": 3.695683052711456, "scores": [1.6219705402356934, 0.9133036441749742, 0.8676480521621965], "rank_score": 1.1343074121909547} -{"id": "falke-gurevych-2017-bringing", "title": "Bringing Structure into Summaries: Crowdsourcing a Benchmark Corpus of Concept Maps", "abstract": "Concept maps can be used to concisely represent important information and bring structure into large document collections. Therefore, we study a variant of multi-document summarization that produces summaries in the form of concept maps. However, suitable evaluation datasets for this task are currently missing. To close this gap, we present a newly created corpus of concept maps that summarize heterogeneous collections of web documents on educational topics. It was created using a novel crowdsourcing approach that allows us to efficiently determine important elements in large document collections. We release the corpus along with a baseline system and proposed evaluation protocol to enable further research on this variant of summarization.", "phrases": ["concept map", "large document collection", "summarization"], "overall_score": 2.35858264760077, "scores": [1.5126021961515992, 1.3359811726215005, 0.5541321204472931], "rank_score": 1.1342384964067975} -{"id": "shutova-teufel-2010-metaphor", "title": "Metaphor Corpus Annotated for Source - Target Domain Mappings", "abstract": "Besides making our thoughts more vivid and filling our communication with richer imagery, metaphor also plays an important structural role in our cognition. Although there is a consensus in the linguistics and NLP research communities that the phenomenon of metaphor is not restricted to similarity-based extensions of meanings of isolated words, but rather involves reconceptualization of a whole area of experience (target domain) in terms of another (source domain), there still has been no proposal for a comprehensive procedure for annotation of cross-domain mappings. However, a corpus annotated for conceptual mappings could provide a new starting point for both linguistic and cognitive experiments. The annotation scheme we present in this paper is a step towards filling this gap. We test our procedure in an experimental setting involving multiple annotators and estimate their agreement on the task. The associated corpus annotated for source \u2015 target domain mappings will be publicly available.", "phrases": ["annotator", "target domain", "metaphor", "language understanding"], "overall_score": 3.8945184665545125, "scores": [2.3538273064106496, 1.088571876056674, 0.5550891775917961, 0.5389507524312983], "rank_score": 1.1341097781226046} -{"id": "vu-etal-2014-acquiring", "title": "Acquiring a Dictionary of Emotion-Provoking Events", "abstract": "This paper is concerned with the discovery and aggregation of events that provoke a particular emotion in the person who experiences them, or emotion-provoking events. We first describe the creation of a small manually-constructed dictionary of events through a survey of 30 subjects. Next, we describe first attempts at automatically acquiring and aggregating these events from web data, with a baseline from previous work and some simple extensions using seed expansion and clustering. Finally, we propose several evaluation measures for evaluating the automatically acquired events, and perform an evaluation of the effectiveness of automatic event extraction.", "phrases": ["dictionary", "emotion-provoking event", "psychologist"], "overall_score": 2.3581454573181992, "scores": [1.945592103135531, 0.8858931598547624, 0.5705994939775095], "rank_score": 1.134028252322601} -{"id": "kao-jurafsky-2015-computational", "title": "A computational analysis of poetic style: Imagism and its influence on modern professional and amateur poetry", "abstract": "How do standards of poetic beauty change as a function of time and expertise? Here we use computational methods to compare the stylistic features of 359 English poems written by 19th century professional poets, Imagist poets, contemporary professional poets, and contemporary amateur poets. Building upon techniques designed to analyze style and sentiment in texts, we examine elements of poetic craft such as imagery, sound devices, emotive language, and diction. We find that contemporary professional poets use significantly more concrete words than 19th century poets, fewer emotional words, and more complex sound devices. These changes are consistent with the tenets of Imagism, an early 20thcentury literary movement. Further analyses show that contemporary amateur poems resemble 19th century professional poems more than contemporary professional poems on several dimensions. The stylistic similarities between contemporary amateur poems and 19th century professional poems suggest that elite standards of poetic beauty in the past \u201ctrickled down\u201d to influence amateur works in the present. Our results highlight the influence of Imagism on the modern aesthetic and reveal the dynamics between \u201chigh\u201d and \u201clow\u201d art. We suggest that computational linguistics may shed light on the forces and trends that shape poetic style.", "phrases": ["poetic style", "imagism", "stylistic feature"], "overall_score": 2.031713077607314, "scores": [2.06302286115222, 0.8070691856357642, 0.5316699293593584], "rank_score": 1.1339206587157808} -{"id": "jiang-etal-2009-mining", "title": "Mining Bilingual Data from the Web with Adaptively Learnt Patterns", "abstract": "Mining bilingual data (including bilingual sentences and terms) from the Web can benefit many NLP applications, such as machine translation and cross language information retrieval. In this paper, based on the observation that bilingual data in many web pages appear collectively following similar patterns, an adaptive pattern-based bilingual data mining method is proposed. Specifically, given a web page, the method contains four steps: 1) preprocessing: parse the web page into a DOM tree and segment the inner text of each node into snippets; 2) seed mining: identify potential translation pairs (seeds) using a word based alignment model which takes both translation and transliteration into consideration; 3) pattern learning: learn generalized patterns with the identified seeds; 4) pattern based mining: extract all bilingual data in the page using the learned patterns. Our experiments on Chinese web pages produced more than 7.5 million pairs of bilingual sentences and more than 5 million pairs of bilingual terms, both with over 80% accuracy.", "phrases": ["bilingual data", "web", "potential translation pair", "adaptive pattern-based method"], "overall_score": 2.490630034536508, "scores": [2.202162502497715, 0.9370148992464755, 0.8551842347283158, 0.5397766764276618], "rank_score": 1.133534578225042} -{"id": "paperno-etal-2016-lambada", "title": "The LAMBADA dataset: Word prediction requiring a broad discourse context", "abstract": "We introduce LAMBADA, a dataset to evaluate the capabilities of computational models for text understanding by means of a word prediction task. LAMBADA is a collection of narrative passages sharing the characteristic that human subjects are able to guess their last word if they are exposed to the whole passage, but not if they only see the last sentence preceding the target word. To succeed on LAMBADA, computational models cannot simply rely on local context, but must be able to keep track of information in the broader discourse. We show that LAMBADA exemplifies a wide range of linguistic phenomena, and that none of several state-of-the-art language models reaches accuracy above 1% on this novel benchmark. We thus propose LAMBADA as a challenging test set, meant to encourage the development of new models capable of genuine understanding of broad context in natural language text.", "phrases": ["lambada dataset", "word prediction task", "broad context"], "overall_score": 2.7179741598810465, "scores": [1.9289006057903795, 0.8431981406615364, 0.6283510447215827], "rank_score": 1.1334832637244996} -{"id": "shen-etal-2017-conditional", "title": "A Conditional Variational Framework for Dialog Generation", "abstract": "Deep latent variable models have been shown to facilitate the response generation for open-domain dialog systems. However, these latent variables are highly randomized, leading to uncontrollable generated responses. In this paper, we propose a framework allowing conditional response generation based on specific attributes. These attributes can be either manually assigned or automatically detected. Moreover, the dialog states for both speakers are modeled separately in order to reflect personal features. We validate this framework on two different scenarios, where the attribute refers to genericness and sentiment states respectively. The experiment result testified the potential of our model, where meaningful responses can be generated in accordance with the specified attributes.", "phrases": ["conditional variational framework", "latent variable", "response generation", "dialogue generation", "generative adversarial network"], "overall_score": 3.450451359622799, "scores": [1.7599633897776876, 1.5357000105972798, 0.9446972283703453, 0.8733982284389126, 0.552895651368049], "rank_score": 1.1333309017104547} -{"id": "wang-etal-2018-watch", "title": "Watch, Listen, and Describe: Globally and Locally Aligned Cross-Modal Attentions for Video Captioning", "abstract": "A major challenge for video captioning is to combine audio and visual cues. Existing multi-modal fusion methods have shown encouraging results in video understanding. However, the temporal structures of multiple modalities at different granularities are rarely explored, and how to selectively fuse the multi-modal representations at different levels of details remains uncharted. In this paper, we propose a novel hierarchically aligned cross-modal attention (HACA) framework to learn and selectively fuse both global and local temporal dynamics of different modalities. Furthermore, for the first time, we validate the superior performance of the deep audio features on the video captioning task. Finally, our HACA model significantly outperforms the previous best systems and achieves new state-of-the-art results on the widely used MSR-VTT dataset.", "phrases": ["cross-modal attention", "video captioning", "local temporal dynamic"], "overall_score": 2.0305104934505165, "scores": [1.9208570450977531, 0.9426930863494113, 0.5361983197084863], "rank_score": 1.13324948371855} -{"id": "lapshinova-koltunski-etal-2018-parcorfull", "title": "ParCorFull: a Parallel Corpus Annotated with Full Coreference", "abstract": "ParCorFull is a parallel corpus annotated with full coreference chains that has been created to address an important problem that machine translation and other multilingual natural language processing (NLP) technologies face -- translation of coreference across languages. Our corpus contains parallel texts for the language pair English-German, two major European languages. Despite being typologically very close, these languages still have systemic differences in the realisation of coreference, and thus pose problems for multilingual coreference resolution and machine translation. Our parallel corpus covers the genres of planned speech (public lectures) and newswire. It is richly annotated for coreference in both languages, including annotation of both nominal coreference and reference to antecedents expressed as clauses, sentences and verb phrases. This resource supports research in the areas of natural language processing, contrastive linguistics and translation studies on the mechanisms involved in coreference translation in order to develop a better understanding of the phenomenon.", "phrases": ["parallel corpus", "coreference chain", "parcorfull", "pronoun", "incongruence"], "overall_score": 2.3562685463710653, "scores": [2.75005337399972, 0.9664026779112476, 0.8986290536908402, 0.5279542009585309, 0.5225889381932988], "rank_score": 1.1331256489507275} -{"id": "siddharthan-teufel-2007-whose", "title": "Whose Idea Was This, and Why Does it Matter? Attributing Scientific Work to Citations", "abstract": "Scientific papers revolve around citations, and for many discourse level tasks one needs to know whose work is being talked about at any point in the discourse. In this paper, we introduce the scientific attribution task, which links different linguistic expressions to citations. We discuss the suitability of different evaluation metrics and evaluate our classification approach to deciding attribution both intrinsically and in an extrinsic evaluation where information about scientific attribution is shown to improve performance on Argumentative Zoning, a rhetorical classification task.", "phrases": ["citation", "scientific attribution", "argumentative zoning"], "overall_score": 2.489488106565721, "scores": [1.9343217971308133, 0.9393369704630862, 0.5253858256317371], "rank_score": 1.1330148644085456} -{"id": "chen-etal-2015-event", "title": "Event Extraction via Dynamic Multi-Pooling Convolutional Neural Networks", "abstract": "Traditional approaches to the task of ACE event extraction primarily rely on elaborately designed features and complicated natural language processing (NLP) tools. These traditional approaches lack generalization, take a large amount of human effort and are prone to error propagation and data sparsity problems. This paper proposes a novel event-extraction method, which aims to automatically extract lexical-level and sentence-level features without using complicated NLP tools. We introduce a word-representation model to capture meaningful semantic regularities for words and adopt a framework based on a convolutional neural network (CNN) to capture sentence-level clues. However, CNN can only capture the most important information in a sentence and may miss valuable facts when considering multiple-event sentences. We propose a dynamic multi-pooling convolutional neural network (DMCNN), which uses a dynamic multi-pooling layer according to event triggers and arguments, to reserve more crucial information. The experimental results show that our approach significantly outperforms other state-of-the-art methods.", "phrases": ["convolutional neural networks", "event extraction", "significant progress", "trigger word", "feature representation"], "overall_score": 4.287062274789638, "scores": [2.179098821509146, 1.382988972664949, 1.0445629470302935, 0.530382023364729, 0.5274062428792067], "rank_score": 1.1328878014896646} -{"id": "son-etal-2017-recognizing", "title": "Recognizing Counterfactual Thinking in Social Media Texts", "abstract": "Counterfactual statements, describing events that did not occur and their consequents, have been studied in areas including problem-solving, affect management, and behavior regulation. People with more counterfactual thinking tend to perceive life events as more personally meaningful. Nevertheless, counterfactuals have not been studied in computational linguistics. We create a counterfactual tweet dataset and explore approaches for detecting counterfactuals using rule-based and supervised statistical approaches. A combined rule-based and statistical approach yielded the best results (F1 = 0.77) outperforming either approach used alone.", "phrases": ["counterfactual thinking", "counterfactual tweet dataset", "social medium"], "overall_score": 2.6084402693929922, "scores": [1.904174488163978, 0.924523608500636, 0.5697955494502104], "rank_score": 1.132831215371608} -{"id": "munro-etal-2010-crowdsourcing", "title": "Crowdsourcing and language studies: the new generation of linguistic data", "abstract": "We present a compendium of recent and current projects that utilize crowdsourcing technologies for language studies, finding that the quality is comparable to controlled laboratory experiments, and in some cases superior. While crowdsourcing has primarily been used for annotation in recent language studies, the results here demonstrate that far richer data may be generated in a range of linguistic disciplines from semantics to psycholinguistics. For these, we report a number of successful methods for evaluating data quality in the absence of a 'correct' response for any given data point.", "phrases": ["language study", "crowdsourcing", "survey"], "overall_score": 2.607992186129917, "scores": [1.9268988437743781, 0.8980061196227495, 0.573004882451937], "rank_score": 1.1326366152830214} -{"id": "maccartney-etal-2008-phrase", "title": "A Phrase-Based Alignment Model for Natural Language Inference", "abstract": "The alignment problem---establishing links between corresponding phrases in two related sentences---is as important in natural language inference (NLI) as it is in machine translation (MT). But the tools and techniques of MT alignment do not readily transfer to NLI, where one cannot assume semantic equivalence, and for which large volumes of bitext are lacking. We present a new NLI aligner, the MANLI system, designed to address these challenges. It uses a phrase-based alignment representation, exploits external lexical resources, and capitalizes on a new set of supervised training data. We compare the performance of MANLI to existing NLI and MT aligners on an NLI alignment task over the well-known Recognizing Textual Entailment data. We show that MANLI significantly outperforms existing aligners, achieving gains of 6.2% in F1 over a representative NLI aligner and 10.5% over GIZA++.", "phrases": ["aligner", "natural language inference", "giza++", "unit", "predicate"], "overall_score": 3.5509163794729255, "scores": [2.550337288836067, 1.160992739291209, 0.8519699658769425, 0.5542207406750055, 0.544930118257005], "rank_score": 1.1324901705872459} -{"id": "shang-etal-2015-neural", "title": "Neural Responding Machine for Short-Text Conversation", "abstract": "We propose Neural Responding Machine (NRM), a neural network-based response generator for Short-Text Conversation. NRM takes the general encoder-decoder framework: it formalizes the generation of response as a decoding process based on the latent representation of the input text, while both encoding and decoding are realized with recurrent neural networks (RNN). The NRM is trained with a large amount of one-round conversation data collected from a microblogging service. Empirical study shows that NRM can generate grammatically correct and content-wise appropriate responses to over 75% of the input text, outperforming state-of-the-arts in the same setting, including retrieval-based and SMT-based models.", "phrases": ["short-text conversation", "neural responding machine", "dialog system", "previous utterance", "smt phrase table"], "overall_score": 4.67348522904277, "scores": [2.790311731052746, 0.8882770439802274, 0.8752278720435295, 0.5731737773194842, 0.5349105376582312], "rank_score": 1.1323801924108436} -{"id": "gerz-etal-2016-simverb", "title": "SimVerb-3500: A Large-Scale Evaluation Set of Verb Similarity", "abstract": "Verbs play a critical role in the meaning of sentences, but these ubiquitous words have received little attention in recent distributional semantics research. We introduce SimVerb-3500, an evaluation resource that provides human ratings for the similarity of 3,500 verb pairs. SimVerb-3500 covers all normed verb types from the USF free-association database, providing at least three examples for every VerbNet class. This broad coverage facilitates detailed analyses of how syntactic and semantic phenomena together influence human understanding of verb meaning. Further, with significantly larger development and test sets than existing benchmarks, SimVerb-3500 enables more robust evaluation of representation learning architectures and promotes the development of methods tailored to verbs. We hope that SimVerb-3500 will enable a richer understanding of the diversity and complexity of verb semantics and guide the development of systems that can effectively represent and interpret this meaning.", "phrases": ["human understanding", "verb semantic", "simverb-3500"], "overall_score": 2.8132665357994617, "scores": [2.238069848804727, 0.612557556755104, 0.545797811507549], "rank_score": 1.13214173902246} -{"id": "albrecht-hwa-2007-regression", "title": "Regression for Sentence-Level MT Evaluation with Pseudo References", "abstract": "Many automatic evaluation metrics for machine translation (MT) rely on making comparisons to human translations, a resource that may not always be available. We present a method for developing sentence-level MT evaluation metrics that do not directly rely on human reference translations. Our metrics are developed using regression learning and are based on a set of weaker indicators of fluency and adequacy ( pseudo references). Experimental results suggest that they rival standard reference-based metrics in terms of correlations with human judgments on new test instances.", "phrases": ["human reference translation", "fluency", "regression", "state-of-the-art correlation"], "overall_score": 3.3328701231101565, "scores": [2.472943100442598, 0.9375068029993077, 0.5885071837681044, 0.5287239368384893], "rank_score": 1.1319202560121249} -{"id": "junczys-dowmunt-2018-dual", "title": "Dual Conditional Cross-Entropy Filtering of Noisy Parallel Corpora", "abstract": "In this work we introduce dual conditional cross-entropy filtering for noisy parallel data. For each sentence pair of the noisy parallel corpus we compute cross-entropy scores according to two inverse translation models trained on clean data. We penalize divergent cross-entropies and weigh the penalty by the cross-entropy average of both models. Sorting or thresholding according to these scores results in better subsets of parallel data. We achieve higher BLEU scores with models trained on parallel data filtered only from Paracrawl than with models trained on clean WMT data. We further evaluate our method in the context of the WMT2018 shared task on parallel corpus filtering and achieve the overall highest ranking scores of the shared task, scoring top in three out of four subtasks.", "phrases": ["filtering", "sentence pair", "dual cross-entropy", "high-resource language"], "overall_score": 3.3905155596046526, "scores": [1.7654213526183768, 1.634665294897222, 0.5803175179133121, 0.5467234280957401], "rank_score": 1.1317818983811627} -{"id": "zhang-etal-2021-supporting", "title": "Supporting Clustering with Contrastive Learning", "abstract": "Unsupervised clustering aims at discovering the semantic categories of data according to some distance measured in the representation space. However, different categories often overlap with each other in the representation space at the beginning of the learning process, which poses a significant challenge for distance-based clustering in achieving good separation between different categories. To this end, we propose Supporting Clustering with Contrastive Learning (SCCL) \u2013 a novel framework to leverage contrastive learning to promote better separation. We assess the performance of SCCL on short text clustering and show that SCCL significantly advances the state-of-the-art results on most benchmark datasets with 3%-11% improvement on Accuracy and 4%-15% improvement on Normalized Mutual Information. Furthermore, our quantitative analysis demonstrates the effectiveness of SCCL in leveraging the strengths of both bottom-up instance discrimination and top-down clustering to achieve better intra-cluster and inter-cluster distances when evaluated with the ground truth cluster labels.", "phrases": ["clustering", "contrastive learning", "sccl", "loss", "remarkable success"], "overall_score": 2.4864783254181715, "scores": [2.591338418413533, 0.9035405458016732, 1.0701833234081095, 0.5579001946889187, 0.5352627875703414], "rank_score": 1.131645053976515} -{"id": "jarrold-etal-2014-aided", "title": "Aided diagnosis of dementia type through computer-based analysis of spontaneous speech", "abstract": "This pilot study evaluates the ability of machined learned algorithms to assist with the differential diagnosis of dementia subtypes based on brief (< 10 min) spontaneous speech samples. We analyzed 1 recordings of a brief spontaneous speech sample from 48 participants from 5 different groups: 4 types of dementia plus healthy controls. Recordings were analyzed using a speech recognition system optimized for speakerindependent spontaneous speech. Lexical and acoustic features were automatically extracted. The resulting feature profiles were used as input to a machine learning system that was trained to identify the diagnosis assigned to each research participant. Between groups lexical and acoustic differences features were detected in accordance with expectations from prior research literature suggesting that classifications were based on features consistent with human-observed symptomatology. Machine learning algorithms were able to identify participants' diagnostic group with accuracy comparable to existing diagnostic methods in use today. Results suggest this clinical speech analytic approach offers promise as an additional, objective and easily obtained source of diagnostic information for clinicians.", "phrases": ["diagnosis", "dementia", "spontaneous speech"], "overall_score": 2.0276356509241293, "scores": [1.6696902816918118, 0.8343123450505051, 0.890932383922419], "rank_score": 1.131645003554912} -{"id": "mirza-tonelli-2014-analysis", "title": "An Analysis of Causality between Events and its Relation to Temporal Information", "abstract": "In this work we present an annotation framework to capture causality between events, inspired by TimeML, and a language resource covering both temporal and causal relations. This data set is then used to build an automatic extraction system for causal signals and causal links between given event pairs. The evaluation and analysis of the system\u2019s performance provides an insight into explicit causality in text and the connection between temporal and causal relations.", "phrases": ["causality", "temporal information", "broader-coverage linguistic approach"], "overall_score": 3.0641181847212744, "scores": [1.9290191637313407, 0.8963433438676774, 0.5690924956436916], "rank_score": 1.1314850010809032} -{"id": "elsner-charniak-2008-talking", "title": "You Talking to Me? A Corpus and Algorithm for Conversation Disentanglement", "abstract": "When multiple conversations occur simultaneously, a listener must decide which conversation each utterance is part of in order to interpret and respond to it appropriately. We refer to this task as disentanglement. We present a corpus of Internet Relay Chat (IRC) dialogue in which the various conversations have been manually disentangled, and evaluate annotator reliability. This is, to our knowledge, the first such corpus for internet chat. We propose a graph-theoretic model for disentanglement, using discourse-based features which have not been previously applied to this task. The model\u2019s predicted disentanglements are highly correlated with manual annotations.", "phrases": ["conversation disentanglement", "chat", "thread"], "overall_score": 3.3310683391134592, "scores": [1.9609085786441276, 0.8572071168812031, 0.5758092891824237], "rank_score": 1.1313083282359182} -{"id": "sharma-etal-2022-disarm", "title": "DISARM: Detecting the Victims Targeted by Harmful Memes", "abstract": "Internet memes have emerged as an increasingly popular means of communication on the web. Although memes are typically intended to elicit humour, they have been increasingly used to spread hatred, trolling, and cyberbullying, as well as to target specific individuals, communities, or society on political, socio-cultural, and psychological grounds. While previous work has focused on detecting harmful, hateful, and offensive memes in general, identifying whom these memes attack (i.e., the `victims') remains a challenging and underexplored area. We attempt to address this problem in this paper. To this end, we create a dataset in which we annotate each meme with its victim(s) such as the name of the targeted person(s), organization(s), and community(ies). We then propose DISARM (Detecting vIctimS targeted by hARmful Memes), a framework that uses named-entity recognition and person identification to detect all entities a meme is referring to, and then, incorporates a novel contextualized multimodal deep neural network to classify whether the meme intends to harm these entities. We perform several systematic experiments on three different test sets, corresponding to entities that are (i) all seen while training, (ii) not seen as a harmful target while training, and (iii) not seen at all while training. The evaluation shows that DISARM significantly outperforms 10 unimodal and multimodal systems. Finally, we demonstrate that DISARM is interpretable and comparatively more generalizable and that it can reduce the relative error rate of harmful target identification by up to 9 % absolute over multimodal baseline systems.", "phrases": ["victim", "meme", "disarm"], "overall_score": 1.820692292535553, "scores": [1.748733295163404, 0.8245040069805396, 0.820541877163359], "rank_score": 1.1312597264357676} -{"id": "xing-etal-2015-normalized", "title": "Normalized Word Embedding and Orthogonal Transform for Bilingual Word Translation", "abstract": "Word embedding has been found to be highly powerful to translate words from one language to another by a simple linear transform. However, we found some inconsistence among the objective functions of the embedding and the transform learning, as well as the distance measurement. This paper proposes a solution which normalizes the word vectors on a hypersphere and constrains the linear transform as an orthogonal transform. The experimental results confirmed that the proposed solution can offer better performance on a word similarity task and an English-toSpanish word translation task.", "phrases": ["word embedding", "orthogonal transform", "improved result"], "overall_score": 4.401889124547207, "scores": [0.8710765394752988, 1.9992482468074055, 0.5228605003176596], "rank_score": 1.1310617622001213} -{"id": "choshen-abend-2018-reference", "title": "Reference-less Measure of Faithfulness for Grammatical Error Correction", "abstract": "We propose USim, a semantic measure for Grammatical Error Correction (that measures the semantic faithfulness of the output to the source, thereby complementing existing reference-less measures (RLMs) for measuring the output's grammaticality. USim operates by comparing the semantic symbolic structure of the source and the correction, without relying on manually-curated references. Our experiments establish the validity of USim, by showing that the semantic structures can be consistently applied to ungrammatical text, that valid corrections obtain a high USim similarity score to the source, and that invalid corrections obtain a lower score.", "phrases": ["faithfulness", "grammatical error correction", "reference-less measure"], "overall_score": 1.5679181228697374, "scores": [1.6852325281953662, 0.9058044517726969, 0.8020045706080909], "rank_score": 1.1310138501920513} -{"id": "lin-etal-2018-multi-lingual", "title": "A Multi-lingual Multi-task Architecture for Low-resource Sequence Labeling", "abstract": "We propose a multi-lingual multi-task architecture to develop supervised models with a minimal amount of labeled data for sequence labeling. In this new architecture, we combine various transfer models using two layers of parameter sharing. On the first layer, we construct the basis of the architecture to provide universal word representation and feature extraction capability for all models. On the second level, we adopt different parameter sharing strategies for different transfer schemes. This architecture proves to be particularly effective for low-resource settings, when there are less than 200 training sentences for the target task. Using Name Tagging as a target task, our approach achieved 4.3%-50.5% absolute F-score gains compared to the mono-lingual single-task baseline model.", "phrases": ["multi-task architecture", "name tagging", "cross-lingual transfer"], "overall_score": 2.9843288087662025, "scores": [1.9151636127636555, 0.8846024627068615, 0.5927280268094973], "rank_score": 1.1308313674266715} -{"id": "haponchyk-moschitti-2017-dont", "title": "Don't understand a measure? Learn it: Structured Prediction for Coreference Resolution optimizing its measures", "abstract": "An interesting aspect of structured prediction is the evaluation of an output structure against the gold standard. Especially in the loss-augmented setting, the need of finding the max-violating constraint has severely limited the expressivity of effective loss functions. In this paper, we trade off exact computation for enabling the use and study of more complex loss functions for coreference resolution. Most interestingly, we show that such functions can be (i) automatically learned also from controversial but commonly accepted coreference measures, e.g., MELA, and (ii) successfully used in learning algorithms. The accurate model comparison on the standard CoNLL-2012 setting shows the benefit of more expressive loss functions.", "phrases": ["structured prediction", "coreference resolution", "expressive loss function"], "overall_score": 1.2416297391911528, "scores": [1.9355425199423544, 0.8956194467488598, 0.5593783139834961], "rank_score": 1.1301800935582367} -{"id": "nadejde-tetreault-2019-personalizing", "title": "Personalizing Grammatical Error Correction: Adaptation to Proficiency Level and L1", "abstract": "Grammar error correction (GEC) systems have become ubiquitous in a variety of software applications, and have started to approach human-level performance for some datasets. However, very little is known about how to efficiently personalize these systems to the user's characteristics, such as their proficiency level and first language, or to emerging domains of text. We present the first results on adapting a general purpose neural GEC system to both the proficiency level and the first language of a writer, using only a few thousand annotated sentences. Our study is the broadest of its kind, covering five proficiency levels and twelve different languages, and comparing three different adaptation scenarios: adapting to the proficiency level only, to the first language only, or to both aspects simultaneously. We show that tailoring to both scenarios achieves the largest performance improvement (3.6 F0.5) relative to a strong baseline.", "phrases": ["proficiency level", "learner data", "native language"], "overall_score": 2.19902614214481, "scores": [2.3372154327931867, 0.5318385350233652, 0.5211737000113452], "rank_score": 1.1300758892759657} -{"id": "nakov-2008-improving", "title": "Improving English-Spanish Statistical Machine Translation: Experiments in Domain Adaptation, Sentence Paraphrasing, Tokenization, and Recasing", "abstract": "We describe the experiments of the UC Berkeley team on improving English-Spanish machine translation of news text, as part of the WMT'08 Shared Translation Task. We experiment with domain adaptation, combining a small in-domain news bi-text and a large out-of-domain one from the Europarl corpus, building two separate phrase translation models and two separate language models. We further add a third phrase translation model trained on a version of the news bi-text augmented with monolingual sentence-level syntactic paraphrases on the source-language side, and we combine all models in a log-linear model using minimum error rate training. Finally, we experiment with different tokenization and recasing rules, achieving 35.09% Bleu score on the WMT'07 news test data when translating from English to Spanish, which is a sizable improvement over the highest Bleu score achieved on that dataset at WMT'07: 33.10% (in fact, by our system). On the WMT'08 English to Spanish news translation, we achieve 21.92%, which makes our team the second best on Bleu score.", "phrases": ["domain adaptation", "tokenization", "phrase table"], "overall_score": 2.34937115040511, "scores": [1.7270384537705261, 0.7887257280820937, 0.873661926044431], "rank_score": 1.1298087026323504} -{"id": "patro-etal-2019-deep", "title": "A deep-learning framework to detect sarcasm targets", "abstract": "In this paper we propose a deep learning framework for sarcasm target detection in predefined sarcastic texts. Identification of sarcasm targets can help in many core natural language processing tasks such as aspect based sentiment analysis, opinion mining etc. To begin with, we perform an empirical study of the socio-linguistic features and identify those that are statistically significant in indicating sarcasm targets (p-values in the range(0.05,0.001)). Finally, we present a deep-learning framework augmented with socio-linguistic features to detect sarcasm targets in sarcastic book-snippets and tweets. We achieve a huge improvement in the performance in terms of exact match and dice scores compared to the current state-of-the-art baseline.", "phrases": ["deep-learning framework", "sarcasm target", "socio-linguistic feature"], "overall_score": 1.5658327274513677, "scores": [1.9633525002944947, 0.8884338955860598, 0.5367422702530376], "rank_score": 1.129509555377864} -{"id": "zhang-etal-2017-end", "title": "End-to-End Neural Relation Extraction with Global Optimization", "abstract": "Neural networks have shown promising results for relation extraction. State-of-the-art models cast the task as an end-to-end problem, solved incrementally using a local classifier. Yet previous work using statistical models have demonstrated that global optimization can achieve better performances compared to local classification. We build a globally optimized neural model for end-to-end relation extraction, proposing novel LSTM features in order to better learn context representations. In addition, we present a novel method to integrate syntactic information to facilitate global learning, yet requiring little background on syntactic grammars thus being easy to extend. Experimental results show that our proposed model is highly effective, achieving the best performances on two standard benchmarks.", "phrases": ["relation extraction", "global optimization", "entity recognition"], "overall_score": 3.1997261723509225, "scores": [1.7065869920979324, 1.1434897278831053, 0.5380121216837928], "rank_score": 1.1293629472216102} -{"id": "bramsen-etal-2011-extracting", "title": "Extracting Social Power Relationships from Natural Language", "abstract": "Sociolinguists have long argued that social context influences language use in all manner of ways, resulting in lects. This paper explores a text classification problem we will call lect modeling, an example of what has been termed computational sociolinguistics. In particular, we use machine learning techniques to identify social power relationships between members of a social network, based purely on the content of their interpersonal communication. We rely on statistical methods, as opposed to language-specific engineering, to extract features which represent vocabulary and grammar usage indicative of social power lect. We then apply support vector machines to model the social power lects representing superior-subordinate communication in the Enron email corpus. Our results validate the treatment of lect modeling as a text classification problem -- albeit a hard one -- and constitute a case for future research in computational sociolinguistics.", "phrases": ["enron email corpus", "power relation", "subordinate", "organizational structure", "mean"], "overall_score": 3.130542109304305, "scores": [1.6671991373214337, 1.1433852014606751, 1.0933141530812647, 0.8999215452075334, 0.8417019334133388], "rank_score": 1.129104394096849} -{"id": "foster-etal-2010-discriminative", "title": "Discriminative Instance Weighting for Domain Adaptation in Statistical Machine Translation", "abstract": "We describe a new approach to SMT adaptation that weights out-of-domain phrase pairs according to their relevance to the target domain, determined by both how similar to it they appear to be, and whether they belong to general language or not. This extends previous work on discriminative weighting by using a finer granularity, focusing on the properties of instances rather than corpus components, and using a simpler training procedure. We incorporate instance weighting into a mixture-model framework, and find that it yields consistent improvements over a wide range of baselines.", "phrases": ["domain adaptation", "phrase pair", "perplexity", "perform weighting"], "overall_score": 3.8768709243169406, "scores": [1.8374139983558915, 1.2472615822781554, 0.8820766102763512, 0.549130593106201], "rank_score": 1.12897069600415} -{"id": "ratinov-roth-2009-design", "title": "Design Challenges and Misconceptions in Named Entity Recognition", "abstract": "We analyze some of the fundamental design challenges and misconceptions that underlie the development of an efficient and robust NER system. In particular, we address issues such as the representation of text chunks, the inference approach needed to combine local NER decisions, the sources of prior knowledge and how to use them within an NER system. In the process of comparing several solutions to these challenges we reach some surprising conclusions, as well as develop an NER system that achieves 90.8 F1 score on the CoNLL-2003 NER shared task, the best reported result for this dataset.", "phrases": ["named entity recognition", "ner system", "conditional random fields", "wikipedia", "tagging schema"], "overall_score": 4.961158995797531, "scores": [0.9686741201994025, 1.4862020151538764, 1.352609437574069, 1.2861204169250506, 0.5511959195320032], "rank_score": 1.1289603818768803} -{"id": "yang-etal-2020-csp", "title": "CSP:Code-Switching Pre-training for Neural Machine Translation", "abstract": "This paper proposes a new pre-training method, called Code-Switching Pre-training (CSP for short) for Neural Machine Translation (NMT). Unlike traditional pre-training method which randomly masks some fragments of the input sentence, the proposed CSP randomly replaces some words in the source sentence with their translation words in the target language. Specifically, we firstly perform lexicon induction with unsupervised word embedding mapping between the source and target languages, and then randomly replace some words in the input sentence with their translation words according to the extracted translation lexicons. CSP adopts the encoder-decoder framework: its encoder takes the code-mixed sentence as input, and its decoder predicts the replaced fragment of the input sentence. In this way, CSP is able to pre-train the NMT model by explicitly making the most of the alignment information extracted from the source and target monolingual corpus. Additionally, we relieve the pretrain-finetune discrepancy caused by the artificial symbols like [mask]. To verify the effectiveness of the proposed method, we conduct extensive experiments on unsupervised and supervised NMT. Experimental results show that CSP achieves significant improvements over baselines without pre-training or with other pre-training methods.", "phrases": ["code-switching", "neural machine translation", "lexicon induction", "other pre-training method", "csp"], "overall_score": 2.707121903514363, "scores": [1.9738691039303229, 1.7354474410340628, 0.822549528140263, 0.5835343702070871, 0.5293871631860269], "rank_score": 1.1289575212995526} -{"id": "glockner-etal-2018-breaking", "title": "Breaking NLI Systems with Sentences that Require Simple Lexical Inferences", "abstract": "We create a new NLI test set that shows the deficiency of state-of-the-art models in inferences that require lexical and world knowledge. The new examples are simpler than the SNLI test set, containing sentences that differ by at most one word from sentences in the training set. Yet, the performance on the new test set is substantially worse across systems trained on SNLI, demonstrating that these systems are limited in their generalization ability, failing to capture many simple inferences.", "phrases": ["snli", "natural language inference", "hypernym", "premise", "entailment"], "overall_score": 4.563938583942521, "scores": [1.4650624214526942, 1.1490854032403897, 1.1224943771516755, 1.0415133370124066, 0.8660204579351736], "rank_score": 1.128835199358468} -{"id": "hiraoka-etal-2019-stochastic", "title": "Stochastic Tokenization with a Language Model for Neural Text Classification", "abstract": "For unsegmented languages such as Japanese and Chinese, tokenization of a sentence has a significant impact on the performance of text classification. Sentences are usually segmented with words or subwords by a morphological analyzer or byte pair encoding and then encoded with word (or subword) representations for neural networks. However, segmentation is potentially ambiguous, and it is unclear whether the segmented tokens achieve the best performance for the target task. In this paper, we propose a method to simultaneously learn tokenization and text classification to address these problems. Our model incorporates a language model for unsupervised tokenization into a text classifier and then trains both models simultaneously. To make the model robust against infrequent tokens, we sampled segmentation for each sentence stochastically during training, which resulted in improved performance of text classification. We conducted experiments on sentiment analysis as a text classification task and show that our method achieves better performance than previous methods.", "phrases": ["tokenization", "language model", "text classification task"], "overall_score": 2.0221476833654637, "scores": [1.8255830412897673, 0.9476973697893757, 0.612465920547614], "rank_score": 1.1285821105422522} -{"id": "hernandez-mena-etal-2020-masri", "title": "MASRI-HEADSET: A Maltese Corpus for Speech Recognition", "abstract": "Maltese, the national language of Malta, is spoken by approximately 500,000 people. Speech processing for Maltese is still in its early stages of development. In this paper, we present the first spoken Maltese corpus designed purposely for Automatic Speech Recognition (ASR). The MASRI-HEADSET corpus was developed by the MASRI project at the University of Malta. It consists of 8 hours of speech paired with text, recorded by using short text snippets in a laboratory environment. The speakers were recruited from different geographical locations all over the Maltese islands, and were roughly evenly distributed by gender. This paper also presents some initial results achieved in baseline experiments for Maltese ASR using Sphinx and Kaldi. The MASRI HEADSET Corpus is publicly available for research/academic purposes.", "phrases": ["maltese corpus", "speech recognition", "asr"], "overall_score": 1.239648522074261, "scores": [1.9194045146071619, 0.9258160766617267, 0.5399095447970433], "rank_score": 1.1283767120219772} -{"id": "florian-etal-2003-named", "title": "Named Entity Recognition through Classifier Combination", "abstract": "This paper presents a classifier-combination experimental framework for named entity recognition in which four diverse classifiers (robust linear classifier, maximum entropy, transformation-based learning, and hidden Markov model) are combined under different conditions. When no gazetteer or other additional training resources are used, the combined system attains a performance of 91.6F on the English development data; integrating name, location and person gazetteers, and named entity systems trained on additional, more general, data reduces the F-measure error by a factor of 15 to 21% on the English data.", "phrases": ["entity recognition", "gazetteer", "location", "linguistic feature", "large annotated training"], "overall_score": 3.4339510582696926, "scores": [3.0806276843320335, 0.8593234424120578, 0.595274009519312, 0.5688712549353465, 0.5354597764960911], "rank_score": 1.1279112335389683} -{"id": "verhoeven-etal-2014-automatic", "title": "Automatic Compound Processing: Compound Splitting and Semantic Analysis for Afrikaans and Dutch", "abstract": "Compounding, the process of combining several simplex words into a complex whole, is a productive process in a wide range of languages. In particular, concatenative compounding, in which the components are \u201cglued\u201d together, leads to problems, for instance, in computational tools that rely on a predefined lexicon. Here we present the AuCoPro project, which focuses on compounding in the closely related languages Afrikaans and Dutch. The project consists of subprojects focusing on compound splitting (identifying the boundaries of the components) and compound semantics (identifying semantic relations between the components). We describe the developed datasets as well as results showing the effectiveness of the developed datasets.", "phrases": ["compound splitting", "semantic analysis", "dutch"], "overall_score": 1.2390672036850956, "scores": [1.6905288752848984, 0.848278768993614, 0.8447350753844854], "rank_score": 1.1278475732209994} -{"id": "patra-etal-2019-bilingual", "title": "Bilingual Lexicon Induction with Semi-supervision in Non-Isometric Embedding Spaces", "abstract": "Recent work on bilingual lexicon induction (BLI) has frequently depended either on aligned bilingual lexicons or on distribution matching, often with an assumption about the isometry of the two spaces. We propose a technique to quantitatively estimate this assumption of the isometry between two embedding spaces and empirically show that this assumption weakens as the languages in question become increasingly etymologically distant. We then propose Bilingual Lexicon Induction with Semi-Supervision (BLISS) \u2014 a semi-supervised approach that relaxes the isometric assumption while leveraging both limited aligned bilingual lexicons and a larger set of unaligned word embeddings, as well as a novel hubness filtering technique. Our proposed method obtains state of the art results on 15 of 18 language pairs on the MUSE dataset, and does particularly well when the embedding spaces don't appear to be isometric. In addition, we also show that adding supervision stabilizes the learning procedure, and is effective even with minimal supervision.", "phrases": ["semi-supervision", "isometric assumption", "bilingual lexicon induction"], "overall_score": 3.1948679411773813, "scores": [1.9856670670756584, 0.8676002716058565, 0.5296772756517596], "rank_score": 1.1276482047777583} -{"id": "lareau-etal-2011-collocations", "title": "Collocations in Multilingual Natural Language Generation: Lexical Functions meet Lexical Functional Grammar", "abstract": "In a collocation, the choice of one lexical item depends on the choice made for another. This poses a problem for simple approaches to lexicalisation in natural language generation systems. In the Meaning-Text framework, recurrent patterns of collocations have been characterised by lexical functions, which offer an elegant way of describing these relationships. Previous work has shown that using lexical functions in the context of multilingual natural language generation allows for a more efficient development of linguistic resources. We propose a way to encode lexical functions in the Lexical Functional Grammar framework.", "phrases": ["language generation", "lexical function", "collocation"], "overall_score": 1.2382460547974994, "scores": [1.8913705844633966, 0.886296536821322, 0.6036332725931065], "rank_score": 1.1271001312926083} -{"id": "do-etal-2011-minimally", "title": "Minimally Supervised Event Causality Identification", "abstract": "This paper develops a minimally supervised approach, based on focused distributional similarity methods and discourse connectives, for identifying of causality relations between events in context. While it has been shown that distributional similarity can help identifying causality, we observe that discourse connectives and the particular discourse relation they evoke in context provide additional information towards determining causality between events. We show that combining discourse relation predictions and distributional similarity methods in a global inference procedure provides additional improvements towards determining event causality.", "phrases": ["causality", "supervised approach", "similarity method", "discourse relation", "pmi"], "overall_score": 3.869866464875482, "scores": [1.9668770247343823, 1.060828167285826, 0.897508387979183, 0.8672371208382115, 0.8422040524198239], "rank_score": 1.1269309506514853} -{"id": "peldszus-stede-2015-joint", "title": "Joint prediction in MST-style discourse parsing for argumentation mining", "abstract": "We introduce a new approach to argumentation mining that we applied to a parallel German/English corpus of short texts annotated with argumentation structure. We focus on structure prediction, which we break into a number of subtasks: relation identification, central claim identification, role classification, and function classification. Our new model jointly predicts different aspects of the structure by combining the different subtask predictions in the edge weights of an evidence graph; we then apply a standard MST decoding algorithm. This model not only outperforms two reasonable baselines and two datadriven models of global argument structure for the difficult subtask of relation identification, but also improves the results for central claim identification and function classification and it compares favorably to a complex mstparser pipeline.", "phrases": ["argumentation mining", "short text", "discourse unit"], "overall_score": 3.7942703645266755, "scores": [1.9798827451770067, 0.8773052355492331, 0.523213287322059], "rank_score": 1.1268004226827664} -{"id": "elson-etal-2010-extracting", "title": "Extracting Social Networks from Literary Fiction", "abstract": "We present a method for extracting social networks from literature, namely, nineteenth-century British novels and serials. We derive the networks from dialogue interactions, and thus our method depends on the ability to determine when two characters are in conversation. Our approach involves character name chunking, quoted speech attribution and conversation detection given the set of quotes. We extract features from the social networks and examine their correlation with one another, as well as with metadata such as the novel's setting. Our results provide evidence that the majority of novels in this time period do not fit two characterizations provided by literacy scholars. Instead, our results suggest an alternative explanation for differences in social networks.", "phrases": ["social network", "literary fiction", "british novel", "narrative text", "network extraction"], "overall_score": 4.098365645914593, "scores": [0.929195605946932, 2.166337631615078, 0.8804070056456323, 0.8297930430905142, 0.827625575808999], "rank_score": 1.126671772421431} -{"id": "teufel-etal-2006-annotation", "title": "An annotation scheme for citation function", "abstract": "We study the interplay of the discourse structure of a scientific argument with formal citations. One subproblem of this is to classify academic citations in scientific articles according to their rhetorical function, e.g., as a rival approach, as a part of the solution, or as a flawed approach that justifies the current research. Here, we introduce our annotation scheme with 12 categories, and present an agreement study.", "phrases": ["annotation scheme", "citation function", "rhetorical function"], "overall_score": 2.593841596002937, "scores": [1.850086291648495, 0.9773100684317007, 0.552076916145402], "rank_score": 1.1264910920751994} -{"id": "xue-li-2018-aspect", "title": "Aspect Based Sentiment Analysis with Gated Convolutional Networks", "abstract": "Aspect based sentiment analysis (ABSA) can provide more detailed information than general sentiment analysis, because it aims to predict the sentiment polarities of the given aspects or entities in text. We summarize previous approaches into two subtasks: aspect-category sentiment analysis (ACSA) and aspect-term sentiment analysis (ATSA). Most previous approaches employ long short-term memory and attention mechanisms to predict the sentiment polarity of the concerned targets, which are often complicated and need more training time. We propose a model based on convolutional neural networks and gating mechanisms, which is more accurate and efficient. First, the novel Gated Tanh-ReLU Units can selectively output the sentiment features according to the given aspect or entity. The architecture is much simpler than attention layer used in the existing models. Second, the computations of our model could be easily parallelized during training, because convolutional layers do not have time dependency as in LSTM layers, and gating units also work independently. The experiments on SemEval datasets demonstrate the efficiency and effectiveness of our models.", "phrases": ["convolutional neural network", "sentiment classification", "aspect information"], "overall_score": 3.753526899113466, "scores": [1.4284765570382023, 1.4281819658297503, 0.5226600822642176], "rank_score": 1.1264395350440568} -{"id": "shuster-etal-2021-retrieval-augmentation", "title": "Retrieval Augmentation Reduces Hallucination in Conversation", "abstract": "Despite showing increasingly human-like conversational abilities, state-of-the-art dialogue models often suffer from factual incorrectness and hallucination of knowledge (Roller et al., 2020). In this work we explore the use of neural-retrieval-in-the-loop architectures - recently shown to be effective in open-domain QA (Lewis et al., 2020b; Izacard and Grave, 2020) - for knowledge-grounded dialogue, a task that is arguably more challenging as it requires querying based on complex multi-turn dialogue context and generating conversationally coherent responses. We study various types of architectures with multiple components - retrievers, rankers, and encoder-decoders - with the goal of maximizing knowledgeability while retaining conversational ability. We demonstrate that our best models obtain state-of-the-art performance on two knowledge-grounded conversational tasks. The models exhibit open-domain conversational capabilities, generalize effectively to scenarios not within the training data, and, as verified by human evaluations, substantially reduce the well-known problem of knowledge hallucination in state-of-the-art chatbots.", "phrases": ["hallucination", "conversation", "knowledge-grounded dialogue"], "overall_score": 3.25297663434914, "scores": [1.390693041944029, 1.4343438876775598, 0.5513211033501705], "rank_score": 1.125452677657253} -{"id": "sennrich-zhang-2019-revisiting", "title": "Revisiting Low-Resource Neural Machine Translation: A Case Study", "abstract": "It has been shown that the performance of neural machine translation (NMT) drops starkly in low-resource conditions, underperforming phrase-based statistical machine translation (PBSMT) and requiring large amounts of auxiliary data to achieve competitive results. In this paper, we re-assess the validity of these results, arguing that they are the result of lack of system adaptation to low-resource settings. We discuss some pitfalls to be aware of when training low-resource NMT systems, and recent techniques that have shown to be especially helpful in low-resource settings, resulting in a set of best practices for low-resource NMT. In our experiments on German\u2013English with different amounts of IWSLT14 training data, we show that, without the use of any auxiliary monolingual or multilingual data, an optimized NMT system can outperform PBSMT with far less data than previously claimed. We also apply these techniques to a low-resource Korean\u2013English dataset, surpassing previously reported results by 4 BLEU.", "phrases": ["low-resource setting", "low-resource nmt", "nmt model", "hyperparameter"], "overall_score": 3.9001465242948608, "scores": [1.345531347229249, 1.2207853888118347, 1.061869795808477, 0.8731911076228572], "rank_score": 1.1253444098681045} -{"id": "rentoumi-etal-2010-united", "title": "United we Stand: Improving Sentiment Analysis by Joining Machine Learning and Rule Based Methods", "abstract": "In the past, we have succesfully used machine learning approaches for sentiment analysis. In the course of those experiments, we observed that our machine learning method, although able to cope well with figurative language could not always reach a certain decision about the polarity orientation of sentences, yielding erroneous evaluations. We support the conjecture that these cases bearing mild figurativeness could be better handled by a rule-based system. These two systems, acting complementarily, could bridge the gap between machine learning and rule-based approaches. Experimental results using the corpus of the Affective Text Task of SemEval \u009207, provide evidence in favor of this direction.", "phrases": ["sentiment analysis", "machine learning", "rule-based system"], "overall_score": 2.016244425992929, "scores": [2.030386435378588, 0.8139744365958066, 0.5315014476397261], "rank_score": 1.1252874398713735} -{"id": "tang-etal-2018-self", "title": "Why Self-Attention? A Targeted Evaluation of Neural Machine Translation Architectures", "abstract": "Recently, non-recurrent architectures (convolutional, self-attentional) have outperformed RNNs in neural machine translation. CNNs and self-attentional networks can connect distant words via shorter network paths than RNNs, and it has been speculated that this improves their ability to model long-range dependencies. However, this theoretical argument has not been tested empirically, nor have alternative explanations for their strong performance been explored in-depth. We hypothesize that the strong performance of CNNs and self-attentional networks could also be due to their ability to extract semantic features from the source text, and we evaluate RNNs, CNNs and self-attention networks on two tasks: subject-verb agreement (where capturing long-range dependencies is required) and word sense disambiguation (where semantic feature extraction is required). Our experimental results show that: 1) self-attentional networks and CNNs do not outperform RNNs in modeling subject-verb agreement over long distances; 2) self-attentional networks perform distinctly better than RNNs and CNNs on word sense disambiguation.", "phrases": ["self-attention", "cnns", "long-range dependency", "agreement", "word sense disambiguation"], "overall_score": 3.576141219244825, "scores": [0.9026092897444459, 1.4809573926181718, 1.208166663962641, 1.12779217286243, 0.906781349965146], "rank_score": 1.125261373830567} -{"id": "friedrich-etal-2016-situation", "title": "Situation entity types: automatic classification of clause-level aspect", "abstract": "This paper describes the \ufb01rst robust approach to automatically labeling clauses with their situation entity type (Smith, 2003), capturing aspectual phenomena at the clause level which are relevant for interpreting both semantics at the clause level and discourse structure. Previous work on this task used a small data set from a limited domain, and relied mainly on words as features, an approach which is impractical in larger settings. We provide a new corpus of texts from 13 genres (40,000 clauses) annotated with situation entity types. We show that our sequence labeling approach using distributional information in the form of Brown clusters, as well as syntactic-semantic features targeted to the task, is robust across genres, reaching accuracies of up to 76%.", "phrases": ["automatic classification", "clause-level aspect", "genre", "situation entity type"], "overall_score": 2.4724044787918937, "scores": [1.9276250435372786, 0.9540604587330567, 0.7950033867022451, 0.8242701923959439], "rank_score": 1.125239770342131} -{"id": "kumar-jena-etal-2020-c", "title": "C-Net: Contextual Network for Sarcasm Detection", "abstract": "Automatic Sarcasm Detection in conversations is a difficult and tricky task. Classifying an utterance as sarcastic or not in isolation can be futile since most of the time the sarcastic nature of a sentence heavily relies on its context. This paper presents our proposed model, C-Net, which takes contextual information of a sentence in a sequential manner to classify it as sarcastic or non-sarcastic. Our model showcases competitive performance in the Sarcasm Detection shared task organised on CodaLab and achieved 75.0% F1-score on the Twitter dataset and 66.3% F1-score on Reddit dataset.", "phrases": ["sarcasm detection", "contextual information", "c-net"], "overall_score": 1.8106740524137188, "scores": [1.9955604092738595, 0.8462584756905187, 0.5332862466314175], "rank_score": 1.1250350438652652} -{"id": "he-etal-2010-improving", "title": "Improving the Post-Editing Experience using Translation Recommendation: A User Study", "abstract": "We report findings from a user study with professional post-editors using a translation recommendation framework (He et al., 2010) to integrate Statistical Machine Translation (SMT) output with Translation Memory (TM) systems. The framework recommends SMT outputs to a TM user when it predicts that SMT outputs are more suitable for post-editing than the hits provided by the TM. We analyze the effectiveness of the model as well as the reaction of potential users. Based on the performance statistics and the users' comments, we find that translation recommendation can reduce the workload of professional post-editors and improve the acceptance of MT in the localization industry.", "phrases": ["translation recommendation", "user study", "post-editor"], "overall_score": 2.0152668980190316, "scores": [1.6604088069944714, 0.8562686571742321, 0.857548149195467], "rank_score": 1.1247418711213901} -{"id": "li-etal-2020-active-learning", "title": "Active Learning for Coreference Resolution using Discrete Annotation", "abstract": "We improve upon pairwise annotation for active learning in coreference resolution, by asking annotators to identify mention antecedents if a presented mention pair is deemed not coreferent. This simple modification, when combined with a novel mention clustering algorithm for selecting which examples to label, is much more efficient in terms of the performance obtained per annotation budget. In experiments with existing benchmark coreference datasets, we show that the signal from this additional question leads to significant performance gains per human-annotation hour. Future work can use our annotation protocol to effectively develop coreference models for new domains. Our code is publicly available.", "phrases": ["coreference resolution", "discrete annotation", "mention", "active learning"], "overall_score": 1.8093408684720336, "scores": [1.8424019325703738, 0.9161513281495303, 0.8442430781902133, 0.8940304215222912], "rank_score": 1.1242066901081023} -{"id": "weeds-weir-2003-general", "title": "A General Framework for Distributional Similarity", "abstract": "We present a general framework for distributional similarity based on the concepts of precision and recall. Different parameter settings within this framework approximate different existing similarity measures as well as many more which have, until now, been unexplored. We show that optimal parameter settings outperform two existing state-of-the-art similarity measures on two evaluation tasks for high and low frequency nouns.", "phrases": ["distributional similarity", "dih", "inclusion hypothesis", "research area", "weight"], "overall_score": 3.6619769720415043, "scores": [2.7100095381119313, 0.900102145653528, 0.872769008103321, 0.5757773941634055, 0.5611523298146127], "rank_score": 1.1239620831693597} -{"id": "tuggener-etal-2020-ledgar", "title": "LEDGAR: A Large-Scale Multi-label Corpus for Text Classification of Legal Provisions in Contracts", "abstract": "We present LEDGAR, a multilabel corpus of legal provisions in contracts. The corpus was crawled and scraped from the public domain (SEC filings) and is, to the best of our knowledge, the first freely available corpus of its kind. Since the corpus was constructed semi-automatically, we apply and discuss various approaches to noise removal. Due to the rather large labelset of over 12'000 labels annotated in almost 100'000 provisions in over 60'000 contracts, we believe the corpus to be of interest for research in the field of Legal NLP, (large-scale or extreme) text classification, as well as for legal studies. We discuss several methods to sample subcopora from the corpus and implement and evaluate different automatic classification approaches. Finally, we perform transfer experiments to evaluate how well the classifiers perform on contracts stemming from outside the corpus.", "phrases": ["text classification", "contract", "ledgar"], "overall_score": 1.8086042935967086, "scores": [1.7817465066390852, 0.8024243702042617, 0.787076214375085], "rank_score": 1.123749030406144} -{"id": "fan-etal-2018-hierarchical", "title": "Hierarchical Neural Story Generation", "abstract": "We explore story generation: creative systems that can build coherent and fluent passages of text about a topic. We collect a large dataset of 300K human-written stories paired with writing prompts from an online forum. Our dataset enables hierarchical story generation, where the model first generates a premise, and then transforms it into a passage of text. We gain further improvements with a novel form of model fusion that improves the relevance of the story to the prompt, and adding a new gated multi-scale self-attention mechanism to model long-range context. Experiments show large improvements over strong baselines on both automated and human evaluations. Human judges prefer stories generated by our approach to those from a strong non-hierarchical model by a factor of two to one.", "phrases": ["story generation", "coherence", "language model", "generation model", "perplexity"], "overall_score": 4.522973374927901, "scores": [2.135150678174124, 1.0662856859804202, 1.0397796031029773, 0.8322892974783886, 0.5446043098114384], "rank_score": 1.1236219149094697} -{"id": "luo-etal-2020-grace", "title": "GRACE: Gradient Harmonized and Cascaded Labeling for Aspect-based Sentiment Analysis", "abstract": "In this paper, we focus on the imbalance issue, which is rarely studied in aspect term extraction and aspect sentiment classification when regarding them as sequence labeling tasks. Besides, previous works usually ignore the interaction between aspect terms when labeling polarities. We propose a GRadient hArmonized and CascadEd labeling model (GRACE) to solve these problems. Specifically, a cascaded labeling module is developed to enhance the interchange between aspect terms and improve the attention of sentiment tokens when labeling sentiment polarities. The polarities sequence is designed to depend on the generated aspect terms labels. To alleviate the imbalance issue, we extend the gradient harmonized mechanism used in object detection to the aspect-based sentiment analysis by adjusting the weight of each label dynamically. The proposed GRACE adopts a post-pretraining BERT as its backbone. Experimental results demonstrate that the proposed model achieves consistency improvement on multiple benchmark datasets and generates state-of-the-art results.", "phrases": ["gradient", "sentiment analysis", "aspect term"], "overall_score": 1.8083043871843718, "scores": [1.9892100575509957, 0.8179567693460181, 0.5635212373281684], "rank_score": 1.1235626880750607} -{"id": "he-etal-2019-unlearn", "title": "Unlearn Dataset Bias in Natural Language Inference by Fitting the Residual", "abstract": "Statistical natural language inference (NLI) models are susceptible to learning dataset bias: superficial cues that happen to associate with the label on a particular dataset, but are not useful in general, e.g., negation words indicate contradiction. As exposed by several recent challenge datasets, these models perform poorly when such association is absent, e.g., predicting that \u201cI love dogs.\u201d contradicts \u201cI don't love cats.\u201d. Our goal is to design learning algorithms that guard against known dataset bias. We formalize the concept of dataset bias under the framework of distribution shift and present a simple debiasing algorithm based on residual fitting, which we call DRiFt. We first learn a biased model that only uses features that are known to relate to dataset bias. Then, we train a debiased model that fits to the residual of the biased model, focusing on examples that cannot be predicted well by biased features only. We use DRiFt to train three high-performing NLI models on two benchmark datasets, SNLI and MNLI. Our debiased models achieve significant gains over baseline models on two challenge test sets, while maintaining reasonable performance on the original test sets.", "phrases": ["dataset bias", "natural language inference", "residual", "training objective", "adversarial dataset"], "overall_score": 3.6164098416954777, "scores": [2.7087075547494286, 0.9470879059777026, 0.8282936062285233, 0.5678953582459311, 0.5655200056249055], "rank_score": 1.123500886165298} -{"id": "khashabi-etal-2018-looking", "title": "Looking Beyond the Surface: A Challenge Set for Reading Comprehension over Multiple Sentences", "abstract": "We present a reading comprehension challenge in which questions can only be answered by taking into account information from multiple sentences. We solicit and verify questions and answers for this challenge through a 4-step crowdsourcing experiment. Our challenge dataset contains 6,500+ questions for 1000+ paragraphs across 7 different domains (elementary school science, news, travel guides, fiction stories, etc) bringing in linguistic diversity to the texts and to the questions wordings. On a subset of our dataset, we found human solvers to achieve an F1-score of 88.1%. We analyze a range of baselines, including a recent state-of-art reading comprehension system, and demonstrate the difficulty of this challenge, despite a high human performance. The dataset is the first to study multi-sentence inference at scale, with an open-ended set of question types that requires reasoning skills.", "phrases": ["reading comprehension", "multiple sentence", "reasoning", "correct answer"], "overall_score": 3.7824186287922714, "scores": [2.2799243767148867, 1.0411155582000917, 0.6318854425026293, 0.5401976741527853], "rank_score": 1.1232807628925983} -{"id": "borin-etal-2012-open", "title": "The open lexical infrastructure of Spr\u00e5kbanken", "abstract": "We present our ongoing work on Karp, Spr\u00e5kbanken's (the Swedish Language Bank) open lexical infrastructure, which has two main functions: (1) to support the work on creating, curating, and integrating our various lexical resources; and (2) to publish daily versions of the resources, making them searchable and downloadable. An important requirement on the lexical infrastructure is also that we maintain a strong bidirectional connection to our corpus infrastructure. At the heart of the infrastructure is the SweFN++ project with the goal to create free Swedish lexical resources geared towards language technology applications. The infrastructure currently hosts 15 Swedish lexical resources, including historical ones, some of which have been created from scratch using existing free resources, both external and in-house. The resources are integrated through links to a pivot lexical resource, SALDO, a large morphological and lexical-semantic resource for modern Swedish. SALDO has been selected as the pivot partly because of its size and quality, but also because its form and sense units have been assigned persistent identifiers (PIDs) to which the lexical information in other lexical resources and in corpora are linked.", "phrases": ["lexical infrastructure", "spr\u00e5kbanken", "strong bidirectional connection"], "overall_score": 1.2339906581538762, "scores": [1.9708144811584762, 0.8639788029017775, 0.5348868229679249], "rank_score": 1.1232267023427263} -{"id": "huang-lee-2008-contrastive", "title": "Contrastive Approach towards Text Source Classification based on Top-Bag-of-Word Similarity", "abstract": "This paper proposes a method to automatically classify texts from different varieties of the same language. We show that similarity measure is a robust tool for studying comparable corpora of language variations. We take LDC\u2019s Chinese Gigaword Corpus composed of three varieties of Chinese from Mainland China, Singapore, and Taiwan, as the comparable corpora. Top-bag-of-word similarity measures reflect distances among the three varieties of the same language. A Top-bag-of-word similarity based contrastive approach was taken to solve the text source classification problem. Our results show that a contrastive approach using similarity to rule out identity of source and to arrive actual source by inference is more robust that directly confirmation of source by similarity. We show that this approach is robust when applied to other texts.", "phrases": ["top-bag-of-word similarity", "same language", "chinese"], "overall_score": 2.5859376305429373, "scores": [1.9882308338129964, 0.8328703227711042, 0.5480741738882022], "rank_score": 1.1230584434907676} -{"id": "zhao-etal-2006-improved", "title": "An Improved Chinese Word Segmentation System with Conditional Random Field", "abstract": "In this paper, we describe a Chinese word segmentation system that we developed for the Third SIGHAN Chinese Language Processing Bakeoff (Bakeoff2006). We took part in six tracks, namely the closed and open track on three corpora, Academia Sinica (CKIP), City University of Hong Kong (CityU), and University of Pennsylvania/University of Colorado (UPUC). Based on a conditional random field based approach, our word segmenter achieved the highest F measures in four tracks, and the third highest in the other two tracks. We found that the use of a 6-tag set, tone feature of Chinese character and assistant segmenters trained on other corpora further improve Chinese word segmentation performance.", "phrases": ["word segmentation system", "conditional random field", "chinese character"], "overall_score": 2.790522059662986, "scores": [1.8682619781578929, 0.9663774929135313, 0.5343265943955329], "rank_score": 1.1229886884889857} -{"id": "chen-etal-2015-long", "title": "Long Short-Term Memory Neural Networks for Chinese Word Segmentation", "abstract": "Currently most of state-of-the-art methods for Chinese word segmentation are based on supervised learning, whose features aremostly extracted from a local context. Thesemethods cannot utilize the long distance information which is also crucial for word segmentation. In this paper, we propose a novel neural network model for Chinese word segmentation, which adopts the long short-term memory (LSTM) neural network to keep the previous important information inmemory cell and avoids the limit of window size of local context. Experiments on PKU, MSRA and CTB6 benchmark datasets show that our model outperforms the previous neural network models and state-of-the-art methods.", "phrases": ["chinese word segmentation", "long distance information", "neural network model", "long short-term memory", "context window"], "overall_score": 3.418434313266779, "scores": [2.435271958878383, 1.2056696458661469, 0.8461383656889478, 0.5827305909389539, 0.5442625538563626], "rank_score": 1.1228146230457585} -{"id": "jiang-etal-2019-challenge", "title": "A Challenge Dataset and Effective Models for Aspect-Based Sentiment Analysis", "abstract": "Aspect-based sentiment analysis (ABSA) has attracted increasing attention recently due to its broad applications. In existing ABSA datasets, most sentences contain only one aspect or multiple aspects with the same sentiment polarity, which makes ABSA task degenerate to sentence-level sentiment analysis. In this paper, we present a new large-scale Multi-Aspect Multi-Sentiment (MAMS) dataset, in which each sentence contains at least two different aspects with different sentiment polarities. The release of this dataset would push forward the research in this field. In addition, we propose simple yet effective CapsNet and CapsNet-BERT models which combine the strengths of recent NLP advances. Experiments on our new dataset show that the proposed model significantly outperforms the state-of-the-art baseline methods", "phrases": ["challenge dataset", "sentiment analysis", "semeval dataset"], "overall_score": 3.1789355392617025, "scores": [2.0184520069216307, 0.7860181012506358, 0.5616041896889676], "rank_score": 1.1220247659537448} -{"id": "gamon-etal-2005-sentence", "title": "Sentence-level MT evaluation without reference translations: beyond language modeling", "abstract": "In this paper we investigate the possibility of evaluating MT quality and fluency at the sentence level in the absence of reference translations. We measure the correlation between automatically-generated scores and human judgments, and we evaluate the per- formance of our system when used as a classifier for identifying highly dysfluent and ill- formed sentences. We show that we can substantially improve on the correlation between language model perplexity scores and human judgment by combining these perplexity scores with class probabilities from a machine-learned classifier. The classifier uses linguis- tic features and has been trained to distinguish human translations from machine transla- tions. We show that this approach also performs well in identifying dysfluent sentences.", "phrases": ["reference translation", "sentence level", "judgment", "perplexity score", "svm classifier"], "overall_score": 3.517876444581868, "scores": [2.740099679787631, 0.9303638523647987, 0.8398138140297976, 0.5610664363631376, 0.5384201052496019], "rank_score": 1.1219527775589935} -{"id": "pei-etal-2014-max", "title": "Max-Margin Tensor Neural Network for Chinese Word Segmentation", "abstract": "Recently, neural network models for natural language processing tasks have been increasingly focused on for their ability to alleviate the burden of manual feature engineering. In this paper, we propose a novel neural network model for Chinese word segmentation called Max-Margin Tensor Neural Network (MMTNN). By exploiting tag embeddings and tensorbased transformation, MMTNN has the ability to model complicated interactions between tags and context characters. Furthermore, a new tensor factorization approach is proposed to speed up the model and avoid overfitting. Experiments on the benchmark dataset show that our model achieves better performances than previous neural network models and that our model can achieve a competitive performance with minimal feature engineering. Despite Chinese word segmentation being a specific case, MMTNN can be easily generalized and applied to other sequence labeling tasks.", "phrases": ["chinese word segmentation", "neural network model", "tensor model"], "overall_score": 3.4671719378566017, "scores": [1.9241832888449388, 0.8970315451772497, 0.5438362678379304], "rank_score": 1.1216837006200395} -{"id": "mccoy-etal-2012-linking", "title": "Linking Uncertainty in Physicians' Narratives to Diagnostic Correctness", "abstract": "In the medical domain, misdiagnoses and diagnostic uncertainty put lives at risk and incur substantial financial costs. Clearly, medical reasoning and decision-making need to be better understood. We explore a possible link between linguistic expression and diagnostic correctness. We report on an unusual data set of spoken diagnostic narratives used to computationally model and predict diagnostic correctness based on automatically extracted and linguistically motivated features that capture physicians' uncertainty. A multimodal data set was collected as dermatologists viewed images of skin conditions and explained their diagnostic process and observations aloud. We discuss experimentation and analysis in initial and secondary pilot studies. In both cases, we experimented with computational modeling using features from the acoustic-prosodic and lexical-structural linguistic modalities.", "phrases": ["uncertainty", "narrative", "diagnostic correctness"], "overall_score": 1.23223723876811, "scores": [1.7749854493334523, 0.8026232389530235, 0.7872833254247459], "rank_score": 1.1216306712370738} -{"id": "kafle-etal-2017-data", "title": "Data Augmentation for Visual Question Answering", "abstract": "Data augmentation is widely used to train deep neural networks for image classification tasks. Simply flipping images can help learning tremendously by increasing the number of training images by a factor of two. However, little work has been done studying data augmentation in natural language processing. Here, we describe two methods for data augmentation for Visual Question Answering (VQA). The first uses existing semantic annotations to generate new questions. The second method is a generative approach using recurrent neural networks. Experiments show that the proposed data augmentation improves performance of both baseline and state-of-the-art VQA algorithms.", "phrases": ["visual question answering", "new question", "data augmentation"], "overall_score": 1.8050986526163277, "scores": [1.8347771624718952, 0.9938382986497588, 0.5360970984693743], "rank_score": 1.1215708531970094} -{"id": "lee-seneff-2008-correcting", "title": "Correcting Misuse of Verb Forms", "abstract": "This paper proposes a method to correct English verb form errors made by non-native speakers. A basic approach is template matching on parse trees. The proposed method improves on this approach in two ways. To improve recall, irregularities in parse trees caused by verb form errors are taken into account; to improve precision, n-gram counts are utilized to filter proposed corrections. Evaluation on non-native corpora, representing two genres and mother tongues, shows promising results.", "phrases": ["misuse", "verb form error", "parse tree", "correction"], "overall_score": 3.036562201831745, "scores": [0.84294792144968, 1.6008182743122674, 1.1459699374448928, 0.8955015490131226], "rank_score": 1.1213094205549907} -{"id": "pustejovsky-etal-2010-iso", "title": "ISO-TimeML: An International Standard for Semantic Annotation", "abstract": "In this paper, we present ISO-TimeML, a revised and interoperable version of the temporal markup language, TimeML. We describe the changes and enrichments made, while framing the effort in a more general methodology of semantic annotation. In particular, we assume a principled distinction between the annotation of an expression and the representation which that annotation denotes. This involves not only the specification of an annotation language for a particular phenomenon, but also the development of a meta-model that allows one to interpret the syntactic expressions of the specification semantically.", "phrases": ["standard", "semantic annotation", "timeml"], "overall_score": 3.4657257763736062, "scores": [0.9266311968401196, 1.5350284305866646, 0.9019879076708415], "rank_score": 1.121215845032542} -{"id": "ion-2012-pexacc", "title": "PEXACC: A Parallel Sentence Mining Algorithm from Comparable Corpora", "abstract": "Extracting parallel data from comparable corpora in order to enrich existing statistical translation models is an avenue that attracted a lot of research in recent years. There are experiments that convincingly show how parallel data extracted from comparable corpora is able to improve statistical machine translation. Yet, the existing body of research on parallel sentence mining from comparable corpora does not take into account the degree of comparability of the corpus being processed or the computation time it takes to extract parallel sentences from a corpus of a given size. We will show that the performance of a parallel sentence extractor crucially depends on the degree of comparability such that it is more difficult to process a weakly comparable corpus than a strongly comparable corpus. In this paper we describe PEXACC, a distributed (running on multiple CPUs), trainable parallel sentence/phrase extractor from comparable corpora. PEXACC is freely available for download with the ACCURAT Toolkit, a collection of MT-related tools developed in the ACCURAT project.", "phrases": ["parallel sentence", "comparable corpora", "pexacc"], "overall_score": 1.2316403610011686, "scores": [1.8864210321389068, 0.8523707305430026, 0.6244703463583979], "rank_score": 1.1210873696801025} -{"id": "margatina-etal-2021-active", "title": "Active Learning by Acquiring Contrastive Examples", "abstract": "Common acquisition functions for active learning use either uncertainty or diversity sampling, aiming to select difficult and diverse data points from the pool of unlabeled data, respectively. In this work, leveraging the best of both worlds, we propose an acquisition function that opts for selecting contrastive examples, i.e. data points that are similar in the model feature space and yet the model outputs maximally different predictive likelihoods. We compare our approach, CAL (Contrastive Active Learning), with a diverse set of acquisition functions in four natural language understanding tasks and seven datasets. Our experiments show that CAL performs consistently better or equal than the best performing baseline across all tasks, on both in-domain and out-of-domain data. We also conduct an extensive ablation study of our method and we further analyze all actively acquired datasets showing that CAL achieves a better trade-off between uncertainty and diversity compared to other strategies.", "phrases": ["contrastive example", "likelihood", "active learning"], "overall_score": 2.1811852207889344, "scores": [1.837807687722304, 0.9570407631882454, 0.5678739571839474], "rank_score": 1.1209074693648322} -{"id": "gonen-goldberg-2019-language", "title": "Language Modeling for Code-Switching: Evaluation, Integration of Monolingual Data, and Discriminative Training", "abstract": "We focus on the problem of language modeling for code-switched language, in the context of automatic speech recognition (ASR). Language modeling for code-switched language is challenging for (at least) three reasons: (1) lack of available large-scale code-switched data for training; (2) lack of a replicable evaluation setup that is ASR directed yet isolates language modeling performance from the other intricacies of the ASR system; and (3) the reliance on generative modeling. We tackle these three issues: we propose an ASR-motivated evaluation setup which is decoupled from an ASR system and the choice of vocabulary, and provide an evaluation dataset for English-Spanish code-switching. This setup lends itself to a discriminative training approach, which we demonstrate to work better than generative language modeling. Finally, we explore a variety of training protocols and verify the effectiveness of training with large amounts of monolingual data followed by fine-tuning with small amounts of code-switched data, for both the generative and discriminative cases.", "phrases": ["code-switching", "monolingual data", "language modeling"], "overall_score": 2.0074623334146424, "scores": [1.701711696809613, 0.8435155866845374, 0.8159308985460739], "rank_score": 1.1203860606800748} -{"id": "firat-etal-2016-multi", "title": "Multi-Way, Multilingual Neural Machine Translation with a Shared Attention Mechanism", "abstract": "We propose multi-way, multilingual neural machine translation. The proposed approach enables a single neural translation model to translate between multiple languages, with a number of parameters that grows only linearly with the number of languages. This is made possible by having a single attention mechanism that is shared across all language pairs. We train the proposed multi-way, multilingual model on ten language pairs from WMT'15 simultaneously and observe clear performance improvements over models trained on only one language pair. In particular, we observe that the proposed model significantly improves the translation quality of low-resource language pairs.", "phrases": ["machine translation", "single attention mechanism", "multi-way multilingual nmt", "main goal"], "overall_score": 5.065632561983988, "scores": [2.428100702633433, 0.9497924135417865, 0.5742577121886049, 0.5289366666997907], "rank_score": 1.1202718737659036} -{"id": "abend-rappoport-2017-state", "title": "The State of the Art in Semantic Representation", "abstract": "Semantic representation is receiving growing attention in NLP in the past few years, and many proposals for semantic schemes (e.g., AMR, UCCA, GMB, UDS) have been put forth. Yet, little has been done to assess the achievements and the shortcomings of these new contenders, compare them with syntactic schemes, and clarify the general goals of research on semantic representation. We address these gaps by critically surveying the state of the art in the field.", "phrases": ["art", "semantic representation", "annotator"], "overall_score": 2.6853583956452463, "scores": [2.008051307125328, 0.8221353060409413, 0.5294576875350735], "rank_score": 1.1198814335671143} -{"id": "zhang-etal-2020-seqmix", "title": "SeqMix: Augmenting Active Sequence Labeling via Sequence Mixup", "abstract": "Active learning is an important technique for low-resource sequence labeling tasks. However, current active sequence labeling methods use the queried samples alone in each iteration, which is an inefficient way of leveraging human annotations. We propose a simple but effective data augmentation method to improve label efficiency of active sequence labeling. Our method, SeqMix, simply augments the queried samples by generating extra labeled sequences in each iteration. The key difficulty is to generate plausible sequences along with token-level labels. In SeqMix, we address this challenge by performing mixup for both sequences and token-level labels of the queried samples. Furthermore, we design a discriminator during sequence mixup, which judges whether the generated sequences are plausible or not. Our experiments on Named Entity Recognition and Event Detection tasks show that SeqMix can improve the standard active sequence labeling method by 2.27%\u20133.75% in terms of F_1 scores. The code and data for SeqMix can be found at .", "phrases": ["sequence mixup", "active learning", "sequence labeling task"], "overall_score": 1.8019146779907658, "scores": [1.8776356520676918, 0.873835847286114, 0.6073061162403838], "rank_score": 1.1195925385313965} -{"id": "dagan-etal-2006-direct", "title": "Direct Word Sense Matching for Lexical Substitution", "abstract": "This paper investigates conceptually and empirically the novel sense matching task, which requires to recognize whether the senses of two synonymous words match in context. We suggest direct approaches to the problem, which avoid the intermediate step of explicit word sense disambiguation, and demonstrate their appealing advantages and stimulating potential for future research.", "phrases": ["lexical substitution", "indirect approach", "direction"], "overall_score": 2.8714710781908828, "scores": [1.973781069538579, 0.8241178148622856, 0.5606131304710812], "rank_score": 1.1195040049573153} -{"id": "gerber-chai-2012-semantic", "title": "Semantic Role Labeling of Implicit Arguments for Nominal Predicates", "abstract": "Nominal predicates often carry implicit arguments. Recent work on semantic role labeling has focused on identifying arguments within the local context of a predicate; implicit arguments, however, have not been systematically examined. To address this limitation, we have manually annotated a corpus of implicit arguments for ten predicates from NomBank. Through analysis of this corpus, we find that implicit arguments add 71% to the argument structures that are present in NomBank. Using the corpus, we train a discriminative model that is able to identify implicit arguments with an F1 score of 50%, significantly outperforming an informed baseline model. This article describes our investigation, explores a wide variety of features important for the task, and discusses future directions for work on implicit argument identification.", "phrases": ["implicit argument", "nombank", "semantic role labeling"], "overall_score": 3.171712417766084, "scores": [0.9464746292130576, 1.5430753850091026, 0.8688759487431807], "rank_score": 1.119475320988447} -{"id": "su-etal-2017-sample", "title": "Sample-efficient Actor-Critic Reinforcement Learning with Supervised Data for Dialogue Management", "abstract": "Deep reinforcement learning (RL) methods have significant potential for dialogue policy optimisation. However, they suffer from a poor performance in the early stages of learning. This is especially problematic for on-line learning with real users. Two approaches are introduced to tackle this problem. Firstly, to speed up the learning process, two sample-efficient neural networks algorithms: trust region actor-critic with experience replay (TRACER) and episodic natural actor-critic with experience replay (eNACER) are presented. For TRACER, the trust region helps to control the learning step size and avoid catastrophic model changes. For eNACER, the natural gradient identifies the steepest ascent direction in policy space to speed up the convergence. Both models employ off-policy learning with experience replay to improve sample-efficiency. Secondly, to mitigate the cold start issue, a corpus of demonstration data is utilised to pre-train the models prior to on-line reinforcement learning. Combining these two approaches, we demonstrate a practical approach to learn deep RL-based dialogue policies and demonstrate their effectiveness in a task-oriented information seeking domain.", "phrases": ["reinforcement learning", "dialogue policy", "action"], "overall_score": 2.871322562548072, "scores": [2.27837259314711, 0.5569516897062508, 0.5230140260827782], "rank_score": 1.1194461029787133} -{"id": "mihalcea-strapparava-2005-making", "title": "Making Computers Laugh: Investigations in Automatic Humor Recognition", "abstract": "Humor is one of the most interesting and puzzling aspects of human behavior. Despite the attention it has received in fields such as philosophy, linguistics, and psychology, there have been only few attempts to create computational models for humor recognition or generation. In this paper, we bring empirical evidence that computational approaches can be successfully applied to the task of humor recognition. Through experiments performed on very large data sets, we show that automatic classification techniques can be effectively used to distinguish between humorous and non-humorous texts, with significant improvements observed over apriori known baselines.", "phrases": ["automatic humor recognition", "alliteration", "stylistic feature", "joke", "human-centric feature"], "overall_score": 3.647259283978825, "scores": [0.9994327139059034, 1.6196044421279578, 1.3692811191989174, 1.0791362387205987, 0.5297695728959133], "rank_score": 1.1194448173698581} -{"id": "xue-etal-2014-interlingua", "title": "Not an Interlingua, But Close: Comparison of English AMRs to Chinese and Czech", "abstract": "Abstract Meaning Representations (AMRs) are rooted, directional and labeled graphs that abstract away from morpho-syntactic idiosyncrasies such as word category (verbs and nouns), word order, and function words (determiners, some prepositions). Because these syntactic idiosyncrasies account for many of the cross-lingual differences, it would be interesting to see if this representation can serve, e.g., as a useful, minimally divergent transfer layer in machine translation. To answer this question, we have translated 100 English sentences that have existing AMRs into Chinese and Czech to create AMRs for them. A cross-linguistic comparison of English to Chinese and Czech AMRs reveals both cases where the AMRs for the language pairs align well structurally and cases of linguistic divergence. We found that the level of compatibility of AMR between English and Chinese is higher than between English and Czech. We believe this kind of comparison is beneficial to further refining the annotation standards for each of the three languages and will lead to more compatible annotation guidelines between the languages.", "phrases": ["interlingua", "english amr", "chinese", "word order", "non-english language"], "overall_score": 2.954103061553783, "scores": [2.174877387912232, 1.455309236032107, 0.832428328587088, 0.5994791942547362, 0.5347965088337621], "rank_score": 1.1193781311239852} -{"id": "bicici-yuret-2011-instance", "title": "Instance Selection for Machine Translation using Feature Decay Algorithms", "abstract": "We present an empirical study of instance selection techniques for machine translation. In an active learning setting, instance selection minimizes the human effort by identifying the most informative sentences for translation. In a transductive learning setting, selection of training instances relevant to the test set improves the final translation quality. After reviewing the state of the art in the field, we generalize the main ideas in a class of instance selection algorithms that use feature decay. Feature decay algorithms increase diversity of the training set by devaluing features that are already included. We show that the feature decay rate has a very strong effect on the final translation quality whereas the initial feature values, inclusion of higher order features, or sentence length normalizations do not. We evaluate the best instance selection methods using a standard Moses baseline using the whole 1.6 million sentence English-German section of the Europarl corpus. We show that selecting the best 3000 training sentences for a specific test sentence is sufficient to obtain a score within 1 BLEU of the baseline, using 5% of the training data is sufficient to exceed the baseline, and a ~ 2 BLEU improvement over the baseline is possible by optimally selected subset of the training data. In out-of-domain translation, we are able to reduce the training set size to about 7% and achieve a similar performance with the baseline.", "phrases": ["machine translation", "feature decay algorithm", "fda", "bic\u0327ici", "similar one"], "overall_score": 2.4589872855675377, "scores": [2.447706719557164, 0.9471352527287322, 0.8424604814794739, 0.8249555966266272, 0.533408662358556], "rank_score": 1.1191333425501107} -{"id": "redkar-etal-2017-hindi", "title": "Hindi Shabdamitra: A Wordnet based E-Learning Tool for Language Learning and Teaching", "abstract": "In today's technology driven digital era, education domain is undergoing a transformation from traditional approaches to more learner controlled and flexible methods of learning. This transformation has opened the new avenues for interdisciplinary research in the field of educational technology and natural language processing in developing quality digital aids for learning and teaching. The tool presented here - Hindi Shabhadamitra, developed using Hindi Wordnet for Hindi language learning, is one such e-learning tool. It has been developed as a teaching and learning aid suitable for formal school based curriculum and informal setup for self learning users. Besides vocabulary, it also provides word based grammar along with images and pronunciation for better learning and retention. This aid demonstrates that how a rich lexical resource like wordnet can be systematically remodeled for practical usage in the educational domain.", "phrases": ["wordnet", "language learning", "teaching"], "overall_score": 1.5512961424202008, "scores": [1.6895082666731251, 0.877652098399376, 0.7899105123572033], "rank_score": 1.1190236258099013} -{"id": "guo-etal-2019-star", "title": "Star-Transformer", "abstract": "Although Transformer has achieved great successes on many NLP tasks, its heavy structure with fully-connected attention connections leads to dependencies on large training data. In this paper, we present Star-Transformer, a lightweight alternative by careful sparsification. To reduce model complexity, we replace the fully-connected structure with a star-shaped topology, in which every two non-adjacent nodes are connected through a shared relay node. Thus, complexity is reduced from quadratic to linear, while preserving the capacity to capture both local composition and long-range dependency. The experiments on four tasks (22 datasets) show that Star-Transformer achieved significant improvements against the standard Transformer for the modestly sized datasets.", "phrases": ["transformer", "star-shaped topology", "star-transformer"], "overall_score": 2.4583272800127376, "scores": [2.1972248392658003, 0.5864443775445821, 0.5728296664212944], "rank_score": 1.1188329610772254} -{"id": "cai-etal-2011-language", "title": "Language-Independent Parsing with Empty Elements", "abstract": "We present a simple, language-independent method for integrating recovery of empty elements into syntactic parsing. This method outperforms the best published method we are aware of on English and a recently published method on Chinese.", "phrases": ["empty element", "syntactic parsing", "chinese"], "overall_score": 2.4581785901540285, "scores": [1.8175188843194168, 1.0072102588890062, 0.5315667250102556], "rank_score": 1.118765289406226} -{"id": "ferret-2017-turning", "title": "Turning Distributional Thesauri into Word Vectors for Synonym Extraction and Expansion", "abstract": "In this article, we propose to investigate a new problem consisting in turning a distributional thesaurus into dense word vectors. We propose more precisely a method for performing such task by associating graph embedding and distributed representation adaptation. We have applied and evaluated it for English nouns at a large scale about its ability to retrieve synonyms. In this context, we have also illustrated the interest of the developed method for three different tasks: the improvement of already existing word embeddings, the fusion of heterogeneous representations and the expansion of synsets.", "phrases": ["distributional thesauri", "word vector", "thesaurus"], "overall_score": 1.5509185966463046, "scores": [1.862765909762964, 0.8987583380311227, 0.5947296045120547], "rank_score": 1.118751284102047} -{"id": "mohammad-etal-2009-generating", "title": "Generating High-Coverage Semantic Orientation Lexicons From Overtly Marked Words and a Thesaurus", "abstract": "Sentiment analysis often relies on a semantic orientation lexicon of positive and negative words. A number of approaches have been proposed for creating such lexicons, but they tend to be computationally expensive, and usually rely on significant manual annotation and large corpora. Most of these methods use WordNet. In contrast, we propose a simple approach to generate a high-coverage semantic orientation lexicon, which includes both individual words and multi-word expressions, using only a Roget-like thesaurus and a handful of affixes. Further, the lexicon has properties that support the Polyanna Hypothesis. Using the General Inquirer as gold standard, we show that our lexicon has 14 percentage points more correct entries than the leading WordNet-based high-coverage lexicon (SentiWordNet). In an extrinsic evaluation, we obtain significantly higher performance in determining phrase polarity using our thesaurus-based lexicon than with any other. Additionally, we explore the use of visualization techniques to gain insight into the our algorithm beyond the evaluations mentioned above.", "phrases": ["thesaurus", "entry", "sentiment lexicon"], "overall_score": 2.778733168822321, "scores": [2.2652073869550633, 0.5589401293051193, 0.5305859531754462], "rank_score": 1.1182444898118762} -{"id": "he-etal-2010-exploring", "title": "Exploring English Lexicon Knowledge for Chinese Sentiment Analysis", "abstract": "This paper presents a weakly-supervised method for Chinese sentiment analysis by incorporating lexical prior knowledge obtained from English sentiment lexicons through machine translation. A mechanism is introduced to incorporate the prior information about polarity bearing words obtained from existing sentiment lexicons into latent Dirichlet allocation (LDA) where sentiment labels are considered as topics. Experiments on Chinese product reviews on mobile phones, digital cameras, MP3 players, and monitors demonstrate the feasibility and effectiveness of the proposed approach and show that the weakly supervised LDA model performs as well as supervised classifiers such as Naive Bayes and Support vector Machines with an average of 83% accuracy achieved over a total of 5484 review documents. Moreover, the LDA model is able to extract highly domain-salient polarity words from text.", "phrases": ["sentiment analysis", "lexical prior knowledge", "machine translation", "latent dirichlet allocation"], "overall_score": 1.7991189173441018, "scores": [2.5328801368465, 0.8556966999532445, 0.5478371922443054, 0.5350077100477788], "rank_score": 1.1178554347729572} -{"id": "duan-etal-2008-searching", "title": "Searching Questions by Identifying Question Topic and Question Focus", "abstract": "This paper is concerned with the problem of question search. In question search, given a question as query, we are to return questions semantically equivalent or close to the queried question. In this paper, we propose to conduct question search by identifying question topic and question focus. More specifically, we first summarize questions in a data structure consisting of question topic and question focus. Then we model question topic and question focus in a language modeling framework for search. We also propose to use the MDLbased tree cut model for identifying question topic and question focus automatically. Experimental results indicate that our approach of identifying question topic and question focus for search significantly outperforms the baseline methods such as Vector Space Model (VSM) and Language Model for Information Retrieval (LMIR).", "phrases": ["question topic", "language model", "many study", "yahoo"], "overall_score": 3.3487690978721476, "scores": [2.0170822461785876, 1.0888882647297846, 0.8392889071497787, 0.5261269303612037], "rank_score": 1.1178465871048386} -{"id": "hu-etal-2020-systematic", "title": "A Systematic Assessment of Syntactic Generalization in Neural Language Models", "abstract": "While state-of-the-art neural network models continue to achieve lower perplexity scores on language modeling benchmarks, it remains unknown whether optimizing for broad-coverage predictive performance leads to human-like syntactic knowledge. Furthermore, existing work has not provided a clear picture about the model properties required to produce proper syntactic generalizations. We present a systematic evaluation of the syntactic knowledge of neural language models, testing 20 combinations of model types and data sizes on a set of 34 English-language syntactic test suites. We find substantial differences in syntactic generalization performance by model architecture, with sequential models underperforming other architectures. Factorially manipulating model architecture and training dataset size (1M-40M words), we find that variability in syntactic generalization performance is substantially greater by architecture than by dataset size for the corpora tested in our experiments. Our results also reveal a dissociation between perplexity and syntactic generalization performance.", "phrases": ["syntactic generalization", "perplexity", "generalization ability"], "overall_score": 3.401671776977267, "scores": [1.9652628836924078, 0.8579762076129673, 0.5286873732480345], "rank_score": 1.117308821517803} -{"id": "tillmann-2004-unigram", "title": "A Unigram Orientation Model for Statistical Machine Translation", "abstract": "In this paper, we present a unigram segmentation model for statistical machine translation where the segmentation units are blocks: pairs of phrases without internal structure. The segmentation model uses a novel orientation component to handle swapping of neighbor blocks. During training, we collect block unigram counts with orientation: we count how often a block occurs to the left or to the right of some predecessor block. The orientation model is shown to improve translation performance over two models: 1) no block re-ordering is used, and 2) the block swapping is controlled only by a language model. We show experimental results on a standard Arabic-English translation task.", "phrases": ["orientation", "statistical machine translation", "swap", "distortion", "several researcher"], "overall_score": 4.0917436285890965, "scores": [2.0901857444748755, 1.5620478074552417, 0.8769233422203571, 0.5278477740556929, 0.5273747196933117], "rank_score": 1.116875877579896} -{"id": "chollampatt-ng-2018-neural", "title": "Neural Quality Estimation of Grammatical Error Correction", "abstract": "Grammatical error correction (GEC) systems deployed in language learning environments are expected to accurately correct errors in learners' writing. However, in practice, they often produce spurious corrections and fail to correct many errors, thereby misleading learners. This necessitates the estimation of the quality of output sentences produced by GEC systems so that instructors can selectively intervene and re-correct the sentences which are poorly corrected by the system and ensure that learners get accurate feedback. We propose the first neural approach to automatic quality estimation of GEC output sentences that does not employ any hand-crafted features. Our system is trained in a supervised manner on learner sentences and corresponding GEC system outputs with quality score labels computed using human-annotated references. Our neural quality estimation models for GEC show significant improvements over a strong feature-based baseline. We also show that a state-of-the-art GEC system can be improved when quality scores are used as features for re-ranking the N-best candidates.", "phrases": ["quality estimation", "grammatical error correction", "neural approach"], "overall_score": 2.4537820851923513, "scores": [1.9014712439248387, 0.8545046584308063, 0.5943171589487664], "rank_score": 1.116764353768137} -{"id": "gala-etal-2014-model", "title": "A model to predict lexical complexity and to grade words (Un mod\u00e8le pour pr\u00e9dire la complexit\u00e9 lexicale et graduer les mots) [in French]", "abstract": "Analysing lexical complexity is a task that has mainly attracted the attention of psycholinguists and language teachers. More recently, this issue has seen a growing interest in the field of Natural Language Processing (NLP) and, in particular, that of automatic text simplification. The aim of this task is to identify words and structures which may be difficult to understand by a target audience and provide automated tools to simplify these contents. This article focuses on the lexical issue by identifying a set of predictors of the lexical complexity whose efficiency are assessed with a correlational analysis. The best of those variables are integrated into a model able to predict the difficulty of words for learners of French. Mots-cl\u00e9s : complexit\u00e9 lexicale, analyse morphologique, mots gradu\u00e9s, ressources lexicales.", "phrases": ["lexical complexity", "french", "learner", "foreign language"], "overall_score": 1.5480081158459207, "scores": [2.1310946160543818, 0.9232031433323279, 0.8822341609192311, 0.5300753436676149], "rank_score": 1.116651815993389} -{"id": "li-etal-2018-self", "title": "A Self-Attentive Model with Gate Mechanism for Spoken Language Understanding", "abstract": "Spoken Language Understanding (SLU), which typically involves intent determination and slot filling, is a core component of spoken dialogue systems. Joint learning has shown to be effective in SLU given that slot tags and intents are supposed to share knowledge with each other. However, most existing joint learning methods only consider joint learning by sharing parameters on surface level rather than semantic level. In this work, we propose a novel self-attentive model with gate mechanism to fully utilize the semantic correlation between slot and intent. Our model first obtains intent-augmented embeddings based on neural network with self-attention mechanism. And then the intent semantic representation is utilized as the gate for labelling slot tags. The objectives of both tasks are optimized simultaneously via joint learning in an end-to-end way. We conduct experiment on popular benchmark ATIS. The results show that our model achieves state-of-the-art and outperforms other popular methods by a large margin in terms of both intent detection error rate and slot filling F1-score. This paper gives a new perspective for research on SLU.", "phrases": ["self-attentive model", "spoken language understanding", "part-of-speech tag"], "overall_score": 2.9459765556813053, "scores": [1.9420857828015123, 0.8534841228584445, 0.5533265233283349], "rank_score": 1.116298809662764} -{"id": "daume-iii-campbell-2007-bayesian", "title": "A Bayesian Model for Discovering Typological Implications", "abstract": "A standard form of analysis for linguistic typology is the universal implication. These implications state facts about the range of extant languages, such as \u201cif objects come after verbs, then adjectives come after nouns.\u201d Such implications are typically discovered by painstaking hand analysis over a small sample of languages. We propose a computational model for assisting at this process. Our model is able to discover both well-known implications as well as some novel implications that deserve further study. Moreover, through a careful application of hierarchical analysis, we are able to cope with the well-known sampling problem: languages are not independent.", "phrases": ["bayesian model", "implication", "linguistic typology", "daume\u0301"], "overall_score": 2.6767561409118517, "scores": [1.469521600571302, 1.3074965891973513, 1.1273331655291503, 0.5608247035182212], "rank_score": 1.1162940147040064} -{"id": "lin-etal-2014-cmu", "title": "The CMU Submission for the Shared Task on Language Identification in Code-Switched Data", "abstract": "We describe the CMU submission for the 2014 shared task on language identification in code-switched data. We participated in all four language pairs: Spanish\u2010English, Mandarin\u2010English, Nepali\u2010English, and Modern Standard Arabic\u2010Arabic dialects. After describing our CRF-based baseline system, we discuss three extensions for learning from unlabeled data: semi-supervised learning, word embeddings, and word lists.", "phrases": ["cmu submission", "language identification", "code-switched data"], "overall_score": 1.796501841193677, "scores": [1.767749120803371, 0.7926608999978374, 0.7882780410016779], "rank_score": 1.1162293539342956} -{"id": "zhou-etal-2008-context", "title": "Context-Sensitive Convolution Tree Kernel for Pronoun Resolution", "abstract": "This paper proposes a context-sensitive convolution tree kernel for pronoun resolution. It resolves two critical problems in previous researches in two ways. First, given a parse tree and a pair of an anaphor and an antecedent candidate, it implements a dynamic-expansion scheme to automatically determine a proper tree span for pronoun resolution by taking predicateand antecedent competitor-related information into consideration. Second, it applies a context-sensitive convolution tree kernel, which enumerates both context-free and context-sensitive sub-trees by considering their ancestor node paths as their contexts. Evaluation on the ACE 2003 corpus shows that our dynamic-expansion tree span scheme can well cover necessary structured information in the parse tree for pronoun resolution and the context-sensitive tree kernel much outperforms previous tree kernels.", "phrases": ["convolution tree kernel", "pronoun resolution", "parse tree structure"], "overall_score": 1.5471607825503697, "scores": [1.9133329869231106, 0.9013153104754695, 0.5334734852663793], "rank_score": 1.1160405942216531} -{"id": "nguyen-etal-2009-building", "title": "Building a Large Syntactically-Annotated Corpus of Vietnamese", "abstract": "Treebank is an important resource for both research and application of natural language processing. For Vietnamese, we still lack such kind of corpora. This paper presents up-to-date results of a project for Vietnamese treebank construction. Since Vietnamese is an isolating language and has no word delimiter, there are many ambiguities in sentence analysis. We systematically applied a lot of linguistic techniques to handle such ambiguities. Annotators are supported by automatic-labeling tools and a tree-editor tool. Raw texts are extracted from Tuoi Tre (Youth), an online Vietnamese daily newspaper. The current annotation agreement is around 90 percent.", "phrases": ["vietnamese", "treebank", "processing", "syllable", "underscore-based representation"], "overall_score": 2.4521549431897953, "scores": [2.670670935918761, 0.9482327538526285, 0.8433087404270494, 0.5699062317765702, 0.548000385670629], "rank_score": 1.1160238095291277} -{"id": "nguyen-etal-2017-distinguishing", "title": "Distinguishing Antonyms and Synonyms in a Pattern-based Neural Network", "abstract": "Distinguishing between antonyms and synonyms is a key task to achieve high performance in NLP systems. While they are notoriously difficult to distinguish by distributional co-occurrence models, pattern-based methods have proven effective to differentiate between the relations. In this paper, we present a novel neural network model AntSynNET that exploits lexico-syntactic patterns from syntactic parse trees. In addition to the lexical and syntactic information, we successfully integrate the distance between the related words along the syntactic path as a new pattern feature. The results from classification experiments show that AntSynNET improves the performance over prior pattern-based methods.", "phrases": ["synonyms", "pattern-based neural network", "syntactic parse tree"], "overall_score": 1.9992121847012727, "scores": [1.905832371445954, 0.9036112560672815, 0.5379010675243098], "rank_score": 1.1157815650125151} -{"id": "chen-etal-2020-joint-aspect", "title": "Joint Aspect Extraction and Sentiment Analysis with Directional Graph Convolutional Networks", "abstract": "End-to-end aspect-based sentiment analysis (EASA) consists of two sub-tasks: the first extracts the aspect terms in a sentence and the second predicts the sentiment polarities for such terms. For EASA, compared to pipeline and multi-task approaches, joint aspect extraction and sentiment analysis provides a one-step solution to predict both aspect terms and their sentiment polarities through a single decoding process, which avoid the mismatches in between the results of aspect terms and sentiment polarities, as well as error propagation. Previous studies, especially recent ones, for this task focus on using powerful encoders (e.g., Bi-LSTM and BERT) to model contextual information from the input, with limited efforts paid to using advanced neural architectures (such as attentions and graph convolutional networks) or leveraging extra knowledge (such as syntactic information). To extend such efforts, in this paper, we propose directional graph convolutional networks (D-GCN) to jointly perform aspect extraction and sentiment analysis with encoding syntactic information, where dependency among words are integrated in our model to enhance its ability of representing input sentences and help EASA accordingly. Experimental results on three benchmark datasets demonstrate the effectiveness of our approach, where D-GCN achieves state-of-the-art performance on all datasets.", "phrases": ["sentiment analysis", "input sentence", "joint aspect extraction"], "overall_score": 1.5467815858844076, "scores": [1.9500400603126973, 0.8567367331042406, 0.5405243915237644], "rank_score": 1.1157670616469006} -{"id": "tiedemann-2010-context", "title": "Context Adaptation in Statistical Machine Translation Using Models with Exponentially Decaying Cache", "abstract": "We report results from a domain adaptation task for statistical machine translation (SMT) using cache-based adaptive language and translation models. We apply an exponential decay factor and integrate the cache models in a standard phrase-based SMT decoder. Without the need for any domain-specific resources we obtain a 2.6% relative improvement on average in BLEU scores using our dynamic adaptation procedure.", "phrases": ["machine translation", "cache-based translation model", "cache-model"], "overall_score": 3.1604706212946305, "scores": [1.9052316359522796, 0.9139518032056441, 0.527338941083895], "rank_score": 1.1155074600806063} -{"id": "martins-etal-2010-turbo", "title": "Turbo Parsers: Dependency Parsing by Approximate Variational Inference", "abstract": "We present a unified view of two state-of-the-art non-projective dependency parsers, both approximate: the loopy belief propagation parser of Smith and Eisner (2008) and the relaxed linear program of Martins et al. (2009). By representing the model assumptions with a factor graph, we shed light on the optimization problems tackled in each method. We also propose a new aggressive online algorithm to learn the model parameters, which makes use of the underlying variational representation. The algorithm does not require a learning rate parameter and provides a single framework for a wide family of convex loss functions, including CRFs and structured SVMs. Experiments show state-of-the-art performance for 14 languages.", "phrases": ["factor graph", "turbo parser", "exact inference"], "overall_score": 3.0206386019964016, "scores": [1.930510725039385, 0.8537090473289812, 0.5620681961120159], "rank_score": 1.115429322826794} -{"id": "mann-mccallum-2008-generalized", "title": "Generalized Expectation Criteria for Semi-Supervised Learning of Conditional Random Fields", "abstract": "This paper presents a semi-supervised training method for linear-chain conditional random fields that makes use of labeled features rather than labeled instances. This is accomplished by using generalized expectation criteria to express a preference for parameter settings in which the model\u2019s distribution on unlabeled data matches a target distribution. We induce target conditional probability distributions of labels given features from both annotated feature occurrences in context and adhoc feature majority label assignment. The use of generalized expectation criteria allows for a dramatic reduction in annotation time by shifting from traditional instance-labeling to feature-labeling, and the methods presented outperform traditional CRF training and other semi-supervised methods when limited human effort is available.", "phrases": ["expectation", "semi-supervised learning", "penalty"], "overall_score": 2.450385474023106, "scores": [1.89227797569424, 0.9243411745475755, 0.5290363179768267], "rank_score": 1.115218489406214} -{"id": "guillou-2012-improving", "title": "Improving Pronoun Translation for Statistical Machine Translation", "abstract": "Machine Translation is a well--established field, yet the majority of current systems translate sentences in isolation, losing valuable contextual information from previously translated sentences in the discourse. One important type of contextual information concerns who or what a coreferring pronoun corefers to (i.e., its antecedent). Languages differ significantly in how they achieve coreference, and awareness of antecedents is important in choosing the correct pronoun. Disregarding a pronoun's antecedent in translation can lead to inappropriate coreferring forms in the target text, seriously degrading a reader's ability to understand it. \n \nThis work assesses the extent to which source-language annotation of coreferring pronouns can improve English--Czech Statistical Machine Translation (SMT). As with previous attempts that use this method, the results show little improvement. This paper attempts to explain why and to provide insight into the factors affecting performance.", "phrases": ["pronoun translation", "anaphora resolution", "imperfect coreference"], "overall_score": 2.859879514982042, "scores": [1.9388757308691944, 0.8760452233005649, 0.5300334091542684], "rank_score": 1.1149847877746757} -{"id": "huang-etal-2011-nonparametric", "title": "Nonparametric Bayesian Machine Transliteration with Synchronous Adaptor Grammars", "abstract": "Machine transliteration is defined as automatic phonetic translation of names across languages. In this paper, we propose synchronous adaptor grammar, a novel nonpara-metric Bayesian learning approach, for machine transliteration. This model provides a general framework without heuristic or restriction to automatically learn syllable equivalents between languages. The proposed model outperforms the state-of-the-art EM-based model in the English to Chinese transliteration task.", "phrases": ["synchronous adaptor grammar", "grapheme-based transliteration", "nonparametric bayesian"], "overall_score": 1.9975321582765886, "scores": [2.1962771278226123, 0.6214843611193815, 0.5267702842940422], "rank_score": 1.114843924412012} -{"id": "salazar-etal-2020-masked", "title": "Masked Language Model Scoring", "abstract": "Pretrained masked language models (MLMs) require finetuning for most NLP tasks. Instead, we evaluate MLMs out of the box via their pseudo-log-likelihood scores (PLLs), which are computed by masking tokens one by one. We show that PLLs outperform scores from autoregressive language models like GPT-2 in a variety of tasks. By rescoring ASR and NMT hypotheses, RoBERTa reduces an end-to-end LibriSpeech model's WER by 30% relative and adds up to +1.7 BLEU on state-of-the-art baselines for low-resource translation pairs, with further gains from domain adaptation. We attribute this success to PLL's unsupervised expression of linguistic acceptability without a left-to-right bias, greatly improving on scores from GPT-2 (+10 points on island effects, NPI licensing in BLiMP). One can finetune MLMs to give scores without masking, enabling computation in a single inference pass. In all, PLLs and their associated pseudo-perplexities (PPPLs) enable plug-and-play use of the growing number of pretrained MLMs; e.g., we use a single cross-lingual model to rescore translations in multiple languages. We release our library for language model scoring at .", "phrases": ["language model", "scoring", "mlm"], "overall_score": 2.770241225879815, "scores": [1.398721045275231, 1.0044951623363334, 0.9412650341077066], "rank_score": 1.1148270805730902} -{"id": "iyyer-etal-2017-search", "title": "Search-based Neural Structured Learning for Sequential Question Answering", "abstract": "Recent work in semantic parsing for question answering has focused on long and complicated questions, many of which would seem unnatural if asked in a normal conversation between two humans. In an effort to explore a conversational QA setting, we present a more realistic task: answering sequences of simple but inter-related questions. We collect a dataset of 6,066 question sequences that inquire about semi-structured tables from Wikipedia, with 17,553 question-answer pairs in total. To solve this sequential question answering task, we propose a novel dynamic neural semantic parsing framework trained using a weakly supervised reward-guided search. Our model effectively leverages the sequential context to outperform state-of-the-art QA systems that are designed to answer highly complex questions.", "phrases": ["sequential question", "semantic parsing", "query"], "overall_score": 3.631867105899625, "scores": [1.2547082425220606, 1.24100771534413, 0.8484456378842405], "rank_score": 1.1147205319168103} -{"id": "esteve-etal-2010-epac", "title": "The EPAC Corpus: Manual and Automatic Annotations of Conversational Speech in French Broadcast News", "abstract": "This paper presents the EPAC corpus which is composed by a set of 100 hours of conversational speech manually transcribed and by the outputs of automatic tools (automatic segmentation, transcription, POS tagging, etc.) applied on the entire French ESTER 1 audio corpus: this concerns about 1700 hours of audio recordings from radiophonic shows. This corpus was built during the EPAC project funded by the French Research Agency (ANR) from 2007 to 2010. This corpus increases significantly the amount of French manually transcribed audio recordings easily available and it is now included as a part of the ESTER 1 corpus in the ELRA catalog without additional cost. By providing a large set of automatic outputs of speech processing tools, the EPAC corpus should be useful to researchers who want to work on such data without having to develop and deal with such tools. These automatic annotations are various: segmentation and speaker diarization, one-best hypotheses from the LIUM automatic speech recognition system with confidence measures, but also word-lattices and confusion networks, named entities, part-of-speech tags, chunks, etc. The 100 hours of speech manually transcribed were split into three data sets in order to get an official training corpus, an official development corpus and an official test corpus. These data sets were used to develop and to evaluate some automatic tools which have been used to process the 1700 hours of audio recording. For example, on the EPAC test data set our ASR system yields a word error rate equals to 17.25%.", "phrases": ["epac corpus", "conversational speech", "hour"], "overall_score": 1.2243734431529494, "scores": [1.9080113636290266, 0.8923484927356492, 0.5430583516292601], "rank_score": 1.1144727359979787} -{"id": "liang-etal-2017-neural", "title": "Neural Symbolic Machines: Learning Semantic Parsers on Freebase with Weak Supervision", "abstract": "Harnessing the statistical power of neural networks to perform language understanding and symbolic reasoning is difficult, when it requires executing efficient discrete operations against a large knowledge-base. In this work, we introduce a Neural Symbolic Machine, which contains (a) a neural \u201cprogrammer\u201d, i.e., a sequence-to-sequence model that maps language utterances to programs and utilizes a key-variable memory to handle compositionality (b) a symbolic \u201ccomputer\u201d, i.e., a Lisp interpreter that performs program execution, and helps find good programs by pruning the search space. We apply REINFORCE to directly optimize the task reward of this structured prediction problem. To train with weak supervision and improve the stability of REINFORCE, we augment it with an iterative maximum-likelihood training process. NSM outperforms the state-of-the-art on the WebQuestionsSP dataset when trained from question-answer pairs only, without requiring any feature engineering or domain-specific knowledge.", "phrases": ["weak supervision", "neural symbolic machines", "semantic parsing", "natural language question", "knowledge graph"], "overall_score": 4.242288192094581, "scores": [2.176990722125727, 0.8470410604706257, 1.4155819473256928, 0.5700068746086017, 0.562567898514898], "rank_score": 1.114437700609109} -{"id": "kong-zhou-2010-tree", "title": "A Tree Kernel-Based Unified Framework for Chinese Zero Anaphora Resolution", "abstract": "This paper proposes a unified framework for zero anaphora resolution, which can be divided into three sub-tasks: zero anaphor detection, anaphoricity determination and antecedent identification. In particular, all the three sub-tasks are addressed using tree kernel-based methods with appropriate syntactic parse tree structures. Experimental results on a Chinese zero anaphora corpus show that the proposed tree kernel-based methods significantly outperform the feature-based ones. This indicates the critical role of the structural information in zero anaphora resolution and the necessity of tree kernel-based methods in modeling such structural information. To our best knowledge, this is the first systematic work dealing with all the three sub-tasks in Chinese zero anaphora resolution via a unified framework. Moreover, we release a Chinese zero anaphora corpus of 100 documents, which adds a layer of annotation to the manually-parsed sentences in the Chinese Treebank (CTB) 6.0.", "phrases": ["unified framework", "anaphora resolution", "pronoun", "azp resolution"], "overall_score": 3.4443936390584065, "scores": [2.416443455536649, 0.9719850521030656, 0.5457717831889202, 0.523057985020907], "rank_score": 1.1143145689623855} -{"id": "bohnet-kuhn-2012-best", "title": "The Best of Both Worlds \u2013 A Graph-based Completion Model for Transition-based Parsers", "abstract": "Transition-based dependency parsers are often forced to make attachment decisions at a point when only partial information about the relevant graph configuration is available. In this paper, we describe a model that takes into account complete structures as they become available to rescore the elements of a beam, combining the advantages of transition-based and graph-based approaches. We also propose an efficient implementation that allows for the use of sophisticated features and show that the completion model leads to a substantial increase in accuracy. We apply the new transition-based parser on typologically different languages such as English, Chinese, Czech, and German and report competitive labeled and unlabeled attachment scores.", "phrases": ["completion model", "transition-based parser", "search"], "overall_score": 2.1681820232183093, "scores": [1.9776932758107002, 0.8355248928654855, 0.5294572743871587], "rank_score": 1.1142251476877816} -{"id": "liu-zhang-2012-unsupervised", "title": "Unsupervised Domain Adaptation for Joint Segmentation and POS-Tagging", "abstract": "We report an empirical investigation on type-supervised domain adaptation for joint Chinese word segmentation and POS-tagging, making use of domainspecific tag dictionaries and only unlabeled target domain data to improve target-domain accuracies, given a set of annotated source domain sentences. Previous work on POS-tagging of other languages showed that type-supervision can be a competitive alternative to tokensupervision, while semi-supervised techniques such as label propagation are important to the effectiveness of typesupervision. We report similar findings using a novel approach for joint Chinese segmentation and POS-tagging, under a cross-domain setting. With the help of unlabeled sentences and a lexicon of 3,000 words, we obtain 33% error reduction in target-domain tagging. In addition, combined type- and token-supervision can lead to improved cost-effectiveness.", "phrases": ["pos-tagging", "unsupervised domain adaptation", "cws", "character clustering", "newswire"], "overall_score": 2.8577164644129653, "scores": [2.4885647346969626, 0.9141583026088081, 1.081819634426857, 0.55565114138543, 0.5305135696607832], "rank_score": 1.1141414765557682} -{"id": "xu-etal-2013-gathering", "title": "Gathering and Generating Paraphrases from Twitter with Application to Normalization", "abstract": "We present a new and unique paraphrase resource, which contains meaningpreserving transformations between informal user-generated text. Sentential paraphrases are extracted from a comparable corpus of temporally and topically related messages on Twitter which often express semantically identical information through distinct surface forms. We demonstrate the utility of this new resource on the task of paraphrasing and normalizing noisy text, showing improvement over several state-of-the-art paraphrase and normalization systems 1 .", "phrases": ["twitter", "user-generated text", "text normalization", "paraphrase research"], "overall_score": 2.3167393803369567, "scores": [2.7641625628714004, 0.5785009478469769, 0.5654735264844968, 0.5483275161895228], "rank_score": 1.1141161383480993} -{"id": "brooke-hirst-2012-robust", "title": "Robust, Lexicalized Native Language Identification", "abstract": "Previous approaches to the task of native language identification (Koppel et al., 2005) have been limited to small, within-corpus evaluations. Because these are restrictive and unreliable, we apply cross-corpus evaluation to the task. We demonstrate the efficacy of lexical features, which had previously been avoided due to the within-corpus topic confounds, and provide a detailed evaluation of various options, including a simple bias adaptation technique and a number of classifier algorithms. Using a new web corpus as a training set, we reach high classification accuracy for a 7-language task, performance which is robust across two independent test sets. Although we show that even higher accuracy is possible using crossvalidation, we present strong evidence calling into question the validity of cross-validation evaluation using the standard dataset.", "phrases": ["native language identification", "cross-corpus evaluation", "nli"], "overall_score": 2.9398606311425297, "scores": [1.6116708976261405, 1.1339601407112172, 0.5963129938953087], "rank_score": 1.1139813440775554} -{"id": "pettersson-etal-2013-normalisation", "title": "Normalisation of Historical Text Using Context-Sensitive Weighted Levenshtein Distance and Compound Splitting", "abstract": "Natural language processing for historical text imposes a variety of challenges, such as to deal with a high degree of spelling variation. Furthermore, there is often not enough linguistically annotated data available for training part-of-speech taggers and other tools aimed at handling this specific kind of text. In this paper we present a Levenshtein-based approach to normalisation of historical text to a modern spelling. This enables us to apply standard NLP tools trained on contemporary corpora on the normalised version of the historical input text. In its basic version, no annotated historical data is needed, since the only data used for the Levenshtein comparisons are a contemporary dictionary or corpus. In addition, a (small) corpus of manually normalised historical text can optionally be included to learn normalisation for frequent words and weights for edit operations in a supervised fashion, which improves precision. We show that this method is successful both in terms of normalisation accuracy, and by the performance of a standard modern tagger applied to the historical text. We also compare our method to a previously implemented approach using a set of hand-written normalisation rules, and we see that the Levenshtein-based approach clearly outperforms the hand-crafted rules. Furthermore, the experiments were carried out on Swedish data with promising results and we believe that our method could be successfully applicable to analyse historical text for other languages, including those with less resources.", "phrases": ["historical text", "compound splitting", "spelling", "normalisation", "edit distance calculation"], "overall_score": 2.4476565036868827, "scores": [1.766040554147501, 1.2658902431331824, 0.8817449895315729, 1.080752308103909, 0.5754543124940762], "rank_score": 1.1139764814820483} -{"id": "ruder-etal-2016-hierarchical", "title": "A Hierarchical Model of Reviews for Aspect-based Sentiment Analysis", "abstract": "Opinion mining from customer reviews has become pervasive in recent years. Sentences in reviews, however, are usually classified independently, even though they form part of a review's argumentative structure. Intuitively, sentences in a review build and elaborate upon each other; knowledge of the review structure and sentential context should thus inform the classification of each sentence. We demonstrate this hypothesis for the task of aspect-based sentiment analysis by modeling the interdependencies of sentences in a review with a hierarchical bidirectional LSTM. We show that the hierarchical model outperforms two non-hierarchical baselines, obtains results competitive with the state-of-the-art, and outperforms the state-of-the-art on five multilingual, multi-domain datasets without any hand-engineered features or external resources.", "phrases": ["hierarchical model", "sentiment analysis", "aspect category"], "overall_score": 2.9396214584828897, "scores": [1.9753890812879824, 0.7978749919410648, 0.5684080748081276], "rank_score": 1.1138907160123914} -{"id": "nerima-wehrli-2008-generating", "title": "Generating Bilingual Dictionaries by Transitivity", "abstract": "Recently the LATL has undertaken the development of a multilingual translation system based on a symbolic parsing technology and on a transfer-based translation model. A crucial component of the system is the lexical database, notably the bilingual dictionaries containing the information for the lexical transfer from one language to another. As the number of necessary bilingual dictionaries is a quadratic function of the number of languages considered, we will face the problem of getting a large number of dictionaries. In this paper we discuss a solution to derive a bilingual dictionary by transitivity using existing ones and to check the generated translations in a parallel corpus. Our first experiments concerns the generation of two bilingual dictionaries and the quality of the entries are very promising. The number of generated entries could however be improved and we conclude the paper with the possible ways we plan to explore.", "phrases": ["bilingual dictionary", "transitivity", "database"], "overall_score": 1.5441752047283692, "scores": [1.9489243712630764, 0.8433837098701513, 0.5493527840547501], "rank_score": 1.1138869550626591} -{"id": "peshterliev-etal-2019-active", "title": "Active Learning for New Domains in Natural Language Understanding", "abstract": "We explore active learning (AL) for improving the accuracy of new domains in a natural language understanding (NLU) system. We propose an algorithm called Majority-CRF that uses an ensemble of classification models to guide the selection of relevant utterances, as well as a sequence labeling model to help prioritize informative examples. Experiments with three domains show that Majority-CRF achieves 6.6%-9% relative error rate reduction compared to random sampling with the same annotation budget, and statistically significant improvements compared to other AL approaches. Additionally, case studies with human-in-the-loop AL on six new domains show 4.6%-9% improvement on an existing NLU system.", "phrases": ["new domain", "natural language understanding", "active learning"], "overall_score": 1.5440076833638992, "scores": [1.8925259558445162, 0.9170058626389898, 0.5317665233418255], "rank_score": 1.1137661139417772} -{"id": "salaberri-etal-2015-ixagroupehuspaceeval", "title": "IXAGroupEHUSpaceEval: (X-Space) A WordNet-based approach towards the Automatic Recognition of Spatial Information following the ISO-Space Annotation Scheme", "abstract": "This paper presents X-Space, a system that follows the ISO-Space annotation scheme in order to capture spatial information as well as our contribution to the SemEval-2015 task 8 (SpaceEval). Our system is the only participant system that reported results for all three evaluation configurations in SpaceEval.", "phrases": ["spatial information", "iso-space annotation scheme", "wordnet"], "overall_score": 1.5436399837840025, "scores": [1.847299753802646, 0.9018558318923283, 0.5913470385896761], "rank_score": 1.1135008747615502} -{"id": "gu-etal-2018-universal", "title": "Universal Neural Machine Translation for Extremely Low Resource Languages", "abstract": "In this paper, we propose a new universal machine translation approach focusing on languages with a limited amount of parallel data. Our proposed approach utilizes a transfer-learning approach to share lexical and sentence level representations across multiple source languages into one target language. The lexical part is shared through a Universal Lexical Representation to support multi-lingual word-level sharing. The sentence-level sharing is represented by a model of experts from all source languages that share the source encoders with all other languages. This enables the low-resource language to utilize the lexical and sentence representations of the higher resource languages. Our approach is able to achieve 23 BLEU on Romanian-English WMT2016 using a tiny parallel corpus of 6k sentences, compared to the 18 BLEU of strong baseline system which uses multi-lingual training and back-translation. Furthermore, we show that the proposed approach can achieve almost 20 BLEU on the same dataset through fine-tuning a pre-trained multi-lingual system in a zero-shot setting.", "phrases": ["machine translation", "resource language", "transfer learning"], "overall_score": 4.285519296617142, "scores": [1.8978564640982711, 0.8738168573915025, 0.5675643441366387], "rank_score": 1.1130792218754708} -{"id": "pasupat-etal-2018-mapping", "title": "Mapping natural language commands to web elements", "abstract": "The web provides a rich, open-domain environment with textual, structural, and spatial properties. We propose a new task for grounding language in this environment: given a natural language command (e.g., \u201cclick on the second article\u201d), choose the correct element on the web page (e.g., a hyperlink or text box). We collected a dataset of over 50,000 commands that capture various phenomena such as functional references (e.g. \u201cfind who made this site\u201d), relational reasoning (e.g. \u201carticle by john\u201d), and visual reasoning (e.g. \u201ctop-most article\u201d). We also implemented and analyzed three baseline models that capture different phenomena present in the dataset.", "phrases": ["natural language command", "hyperlink", "mapping"], "overall_score": 1.5430230501863222, "scores": [1.9631278956789178, 0.8321335268925228, 0.5439061311503135], "rank_score": 1.1130558512405846} -{"id": "greene-etal-2010-automatic", "title": "Automatic Analysis of Rhythmic Poetry with Applications to Generation and Translation", "abstract": "We employ statistical methods to analyze, generate, and translate rhythmic poetry. We first apply unsupervised learning to reveal word-stress patterns in a corpus of raw poetry. We then use these word-stress patterns, in addition to rhyme and discourse models, to generate English love poetry. Finally, we translate Italian poetry into English, choosing target realizations that conform to desired rhythmic patterns.", "phrases": ["rhythmic poetry", "statistical method", "rhyme scheme", "finite-state transducer"], "overall_score": 3.3877741810128112, "scores": [2.4988369139308104, 0.8392024049057037, 0.5585028535521549, 0.5544339663138046], "rank_score": 1.1127440346756186} -{"id": "zhou-etal-2019-learning", "title": "Learning to Discriminate Perturbations for Blocking Adversarial Attacks in Text Classification", "abstract": "Adversarial attacks against machine learning models have threatened various real-world applications such as spam filtering and sentiment analysis. In this paper, we propose a novel framework, learning to discriminate perturbations (DISP), to identify and adjust malicious perturbations, thereby blocking adversarial attacks for text classification models. To identify adversarial attacks, a perturbation discriminator validates how likely a token in the text is perturbed and provides a set of potential perturbations. For each potential perturbation, an embedding estimator learns to restore the embedding of the original word based on the context and a replacement token is chosen based on approximate kNN search. DISP can block adversarial attacks for any NLP model without modifying the model structure or training procedure. Extensive experiments on two benchmark datasets demonstrate that DISP significantly outperforms baseline methods in blocking adversarial attacks for text classification. In addition, in-depth analysis shows the robustness of DISP across different situations.", "phrases": ["perturbation", "attack", "text classification", "adversarial input"], "overall_score": 3.2149743189811364, "scores": [0.8987148335557003, 1.6036916274762403, 1.3805524942817875, 0.5662601601141682], "rank_score": 1.112304778856974} -{"id": "chen-etal-2010-twin", "title": "A Twin-Candidate Based Approach for Event Pronoun Resolution using Composite Kernel", "abstract": "Event Anaphora Resolution is an important task for cascaded event template extraction and other NLP study. In this paper, we provide a first systematic study of resolving pronouns to their event verb antecedents for general purpose. First, we explore various positional, lexical and syntactic features useful for the event pronoun resolution. We further explore tree kernel to model structural information embedded in syntactic parses. A composite kernel is then used to combine the above diverse information. In addition, we employed a twin-candidate based preferences learning model to capture the pair wise candidates' preference knowledge. Besides we also look into the incorporation of the negative training instances with anaphoric pronouns whose antecedents are not verbs. Although these negative training instances are not used in previous study on anaphora resolution, our study shows that they are very useful for the final resolution through random sampling strategy. Our experiments demonstrate that it's meaningful to keep certain training data as development data to help SVM select a more accurate hyper plane which provides significant improvement over the default setting with all training data.", "phrases": ["event pronoun resolution", "composite kernel", "antecedent"], "overall_score": 1.5419788743912224, "scores": [2.01344653263163, 0.8029505760458746, 0.5205108041821395], "rank_score": 1.1123026376198812} -{"id": "dai-etal-2019-style", "title": "Style Transformer: Unpaired Text Style Transfer without Disentangled Latent Representation", "abstract": "Disentangling the content and style in the latent space is prevalent in unpaired text style transfer. However, two major issues exist in most of the current neural models. 1) It is difficult to completely strip the style information from the semantics for a sentence. 2) The recurrent neural network (RNN) based encoder and decoder, mediated by the latent representation, cannot well deal with the issue of the long-term dependency, resulting in poor preservation of non-stylistic semantic content. In this paper, we propose the Style Transformer, which makes no assumption about the latent representation of source sentence and equips the power of attention mechanism in Transformer to achieve better style transfer and better content preservation.", "phrases": ["latent representation", "style transformer", "output sentence"], "overall_score": 3.332118021453598, "scores": [1.9273577444000234, 0.8627039710648744, 0.5468032482728685], "rank_score": 1.112288321245922} -{"id": "szpektor-etal-2008-contextual", "title": "Contextual Preferences", "abstract": "The validity of semantic inferences depends on the contexts in which they are applied. We propose a generic framework for handling contextual considerations within applied inference, termed Contextual Preferences. This framework defines the various context-aware components needed for inference and their relationships. Contextual preferences extend and generalize previous notions, such as selectional preferences, while experiments show that the extended framework allows improving inference quality on real application data.", "phrases": ["generic framework", "contextual preferences", "context matching"], "overall_score": 1.9916757124833984, "scores": [2.1972245773362196, 0.570241459994593, 0.5672601020122217], "rank_score": 1.1115753797810113} -{"id": "chen-etal-2019-semantically", "title": "Semantically Conditioned Dialog Response Generation via Hierarchical Disentangled Self-Attention", "abstract": "Semantically controlled neural response generation on limited-domain has achieved great performance. However, moving towards multi-domain large-scale scenarios are shown to be difficult because the possible combinations of semantic inputs grow exponentially with the number of domains. To alleviate such scalability issue, we exploit the structure of dialog acts to build a multi-layer hierarchical graph, where each act is represented as a root-to-leaf route on the graph. Then, we incorporate such graph structure prior as an inductive bias to build a hierarchical disentangled self-attention network, where we disentangle attention heads to model designated nodes on the dialog act graph. By activating different (disentangled) heads at each layer, combinatorially many dialog act semantics can be modeled to control the neural response generation. On the large-scale Multi-Domain-WOZ dataset, our model can yield a significant improvement over the baselines on various automatic and human evaluation metrics.", "phrases": ["response generation", "hierarchical disentangled self-attention", "dialog act"], "overall_score": 3.2126371390653135, "scores": [1.3106892380286896, 1.1014095385819125, 0.9223897336878645], "rank_score": 1.1114961700994888} -{"id": "sajjad-etal-2020-arabench", "title": "AraBench: Benchmarking Dialectal Arabic-English Machine Translation", "abstract": "Low-resource machine translation suffers from the scarcity of training data and the unavailability of standard evaluation sets. While a number of research efforts target the former, the unavailability of evaluation benchmarks remain a major hindrance in tracking the progress in low-resource machine translation. In this paper, we introduce AraBench, an evaluation suite for dialectal Arabic to English machine translation. Compared to Modern Standard Arabic, Arabic dialects are challenging due to their spoken nature, non-standard orthography, and a large variation in dialectness. To this end, we pool together already available Dialectal Arabic-English resources and additionally build novel test sets. AraBench offers 4 coarse, 15 fine-grained and 25 city-level dialect categories, belonging to diverse genres, such as media, chat, religion and travel with varying level of dialectness. We report strong baselines using several training settings: fine-tuning, back-translation and data augmentation. The evaluation suite opens a wide range of research frontiers to push efforts in low-resource machine translation, particularly Arabic dialect translation. The evaluation suite and the dialectal system are publicly available for research purposes.", "phrases": ["machine translation", "evaluation benchmark", "dialectal arabic"], "overall_score": 1.9912575456122126, "scores": [1.870459698305996, 0.9271942425069598, 0.536372048406636], "rank_score": 1.1113419964065308} -{"id": "parikh-etal-2020-totto", "title": "ToTTo: A Controlled Table-To-Text Generation Dataset", "abstract": "We present ToTTo, an open-domain English table-to-text dataset with over 120,000 training examples that proposes a controlled generation task: given a Wikipedia table and a set of highlighted table cells, produce a one-sentence description. To obtain generated targets that are natural but also faithful to the source table, we introduce a dataset construction process where annotators directly revise existing candidate sentences from Wikipedia. We present systematic analyses of our dataset and annotation process as well as results achieved by several state-of-the-art baselines. While usually fluent, existing methods often hallucinate phrases that are not supported by the table, suggesting that this dataset can serve as a useful research benchmark for high-precision conditional text generation.", "phrases": ["table-to-text generation dataset", "wikipedia table", "table cell", "annotator", "totto"], "overall_score": 3.211729210603353, "scores": [1.6502493443421289, 1.0122564326890193, 1.270777184854215, 1.1002756954446908, 0.5223515847162786], "rank_score": 1.1111820484092667} -{"id": "lee-etal-2019-countering", "title": "Countering Language Drift via Visual Grounding", "abstract": "Emergent multi-agent communication protocols are very different from natural language and not easily interpretable by humans. We find that agents that were initially pretrained to produce natural language can also experience detrimental language drift: when a non-linguistic reward is used in a goal-based task, e.g. some scalar success metric, the communication protocol may easily and radically diverge from natural language. We recast translation as a multi-agent communication game and examine auxiliary training constraints for their effectiveness in mitigating language drift. We show that a combination of syntactic (language model likelihood) and semantic (visual grounding) constraints gives the best communication performance, allowing pre-trained agents to retain English syntax while learning to accurately convey the intended meaning.", "phrases": ["language drift", "visual grounding", "agent"], "overall_score": 1.787897853909619, "scores": [1.8737450810385696, 0.9251679095556815, 0.53373719758036], "rank_score": 1.1108833960582036} -{"id": "haghighi-klein-2006-prototype", "title": "Prototype-Driven Learning for Sequence Models", "abstract": "We investigate prototype-driven learning for primarily unsupervised sequence modeling. Prior knowledge is specified declaratively, by providing a few canonical examples of each target annotation label. This sparse prototype information is then propagated across a corpus using distributional similarity features in a log-linear generative model. On part-of-speech induction in English and Chinese, as well as an information extraction task, prototype features provide substantial error rate reductions over competitive baselines and outperform previous work. For example, we can achieve an English part-of-speech tagging accuracy of 80.5% using only three examples of each tag and no dictionary constraints. We also compare to semi-supervised learning and discuss the system's error trends.", "phrases": ["prior knowledge", "prototype", "prototype-driven learning"], "overall_score": 3.6606459480743183, "scores": [0.9381272854566034, 1.8659313983803523, 0.5280048528928784], "rank_score": 1.1106878455766112} -{"id": "garcia-etal-2019-comparison", "title": "A comparison of statistical association measures for identifying dependency-based collocations in various languages.", "abstract": "This paper presents an exploration of different statistical association measures to automatically identify collocations from corpora in English, Portuguese, and Spanish. To evaluate the impact of the association metrics we manually annotated corpora with three different syntactic patterns of collocations (adjective-noun, verb-object and nominal compounds). We took advantage of the PARSEME 1.1 Shared Task corpora by selecting a subset of 155k tokens in the three referred languages, in which we annotated 1,526 collocations with the corresponding Lexical Functions according to the Meaning-Text Theory. Using the resulting gold-standard, we have carried out a comparison between frequency data and several well-known association measures, both symmetric and asymmetric. The results show that the combination of dependency triples with raw frequency information is as powerful as the best association measures in most syntactic patterns and languages. Furthermore, and despite the asymmetric behaviour of collocations, directional approaches perform worse than the symmetric ones in the extraction of these phraseological combinations.", "phrases": ["statistical association measure", "collocation", "portuguese"], "overall_score": 1.2201139369811838, "scores": [1.8846213139889019, 0.9105714763573804, 0.5365939088368532], "rank_score": 1.1105955663943785} -{"id": "zaidan-callison-burch-2014-arabic", "title": "Arabic Dialect Identification", "abstract": "The written form of the Arabic language, Modern Standard Arabic (MSA), differs in a non-trivial manner from the various spoken regional dialects of Arabic\u2014the true \u201cnative languages\u201d of Arabic speakers. Those dialects, in turn, differ quite a bit from each other. However, due to MSA's prevalence in written form, almost all Arabic data sets have predominantly MSA content. In this article, we describe the creation of a novel Arabic resource with dialect annotations. We have created a large monolingual data set rich in dialectal Arabic content called the Arabic On-line Commentary Data set (Zaidan and Callison-Burch 2011). We describe our annotation effort to identify the dialect level (and dialect itself) in each of more than 100,000 sentences from the data set by crowdsourcing the annotation task, and delve into interesting annotator behaviors (like over-identification of one's own dialect). Using this new annotated data set, we consider the task of Arabic dialect identification: Given the word sequence forming an Arabic sentence, determine the variety of Arabic in which it is written. We use the data to train and evaluate automatic classifiers for dialect identification, and establish that classifiers using dialectal data significantly and dramatically outperform baselines that use MSA-only data, achieving near-human classification accuracy. Finally, we apply our classifiers to discover dialectical data from a large Web crawl consisting of 3.5 million pages mined from on-line Arabic newspapers.", "phrases": ["arabic resource", "zaidan", "arabic dialect identification", "language variation"], "overall_score": 3.8485396777669143, "scores": [2.7725885569806152, 0.5834296372106996, 0.5647685721975702, 0.5210285198341045], "rank_score": 1.1104538215557473} -{"id": "bhagat-etal-2007-ledir", "title": "LEDIR: An Unsupervised Algorithm for Learning Directionality of Inference Rules", "abstract": "Semantic inference is a core component of many natural language applications. In response, several researchers have developed algorithms for automatically learning inference rules from textual corpora. However, these rules are often either imprecise or underspecified in directionality. In this paper we propose an algorithm called LEDIR that filters incorrect inference rules and identifies the directionality of correct ones. Based on an extension to Harris\u2019s distributional hypothesis, we use selectional preferences to gather evidence of inference directionality and plausibility. Experiments show empirical evidence that our approach can classify inference rules significantly better than several baselines.", "phrases": ["directionality", "inference rule", "selectional preference", "ledir", "distributional similarity method"], "overall_score": 3.006163200295269, "scores": [1.8229096570228447, 1.2841290065728075, 0.9858916138566208, 0.8877932888607489, 0.5696964352654634], "rank_score": 1.110084000315697} -{"id": "kondrak-2009-identification", "title": "Identification of Cognates and Recurrent Sound Correspondences in Word Lists", "abstract": "Identification of cognates and recurrent sound corresponde nces is a component of two principal tasks of historical linguistics: demonstrat ing the relatedness of languages, and reconstructing the histories of language families. We prop ose methods for detecting and quan- tifying three characteristics of cognates: recurrent soun d correspondences, phonetic similarity, and semantic affinity. The ultimate goal is to identify cogna tes and correspondences directly from lists of words representing pairs of languages that areknown to be related. The proposed solutions are language independent, and are evaluated agai nst authentic linguistic data. The results of evaluation experiments involving the Indo-Euro pean, Algonquian, and Totonac lan- guage families indicate that our methods are more accurate t han comparable programs, and achieve high precision and recall on various test sets. The r", "phrases": ["cognate", "phonetic similarity", "semantic affinity", "identification"], "overall_score": 2.3083524621409857, "scores": [2.0888446229602295, 0.9358674512351767, 0.8876091523654129, 0.528010306445353], "rank_score": 1.110082883251543} -{"id": "chen-he-2013-automated", "title": "Automated Essay Scoring by Maximizing Human-Machine Agreement", "abstract": "Previous approaches for automated essay scoring (AES) learn a rating model by minimizing either the classification, regression, or pairwise classification loss, depending on the learning algorithm used. In this paper, we argue that the current AES systems can be further improved by taking into account the agreement between human and machine raters. To this end, we propose a rankbased approach that utilizes listwise learning to rank algorithms for learning a rating model, where the agreement between the human and machine raters is directly incorporated into the loss function. Various linguistic and statistical features are utilized to facilitate the learning algorithms. Experiments on the publicly available English essay dataset, Automated Student Assessment Prize (ASAP), show that our proposed approach outperforms the state-of-the-art algorithms, and achieves performance comparable to professional human raters, which suggests the effectiveness of our proposed method for automated essay scoring.", "phrases": ["essay", "machine rater", "ranking problem"], "overall_score": 3.4795238213616737, "scores": [1.7358156791419743, 1.040129787275514, 0.5532175762232171], "rank_score": 1.1097210142135685} -{"id": "nicolai-etal-2015-inflection", "title": "Inflection Generation as Discriminative String Transduction", "abstract": "We approach the task of morphological inflection generation as discriminative string transduction. Our supervised system learns to generate word-forms from lemmas accompanied by morphological tags, and refines them by referring to the other forms within a paradigm. Results of experiments on six diverse languages with varying amounts of training data demonstrate that our approach improves the state of the art in terms of predicting inflected word-forms.", "phrases": ["discriminative string transduction", "inflection generation", "word form", "semi-supervised learning"], "overall_score": 3.005051963068493, "scores": [2.051844361465589, 0.941556764324701, 0.9133206838805914, 0.5319728080956568], "rank_score": 1.1096736544416346} -{"id": "rink-harabagiu-2011-generative", "title": "A generative model for unsupervised discovery of relations and argument classes from clinical texts", "abstract": "This paper presents a generative model for the automatic discovery of relations between entities in electronic medical records. The model discovers relation instances and their types by determining which context tokens express the relation. Additionally, the valid semantic classes for each type of relation are determined. We show that the model produces clusters of relation trigger words which better correspond with manually annotated relations than several existing clustering techniques. The discovered relations reveal some of the implicit semantic structure present in patient records.", "phrases": ["generative model", "discovery", "clinical text"], "overall_score": 1.2188771617072398, "scores": [1.7361860824520534, 0.8068299110209406, 0.7853934216035438], "rank_score": 1.1094698050255125} -{"id": "zesch-melamud-2014-automatic", "title": "Automatic Generation of Challenging Distractors Using Context-Sensitive Inference Rules", "abstract": "Automatically generating challenging distractors for multiple-choice gap-fill items is still an unsolved problem. We propose to employ context-sensitive lexical inference rules in order to generate distractors that are semantically similar to the gap target word in some sense, but not in the particular sense induced by the gap-fill context. We hypothesize that such distractors should be particularly hard to distinguish from the correct answer. We focus on verbs as they are especially difficult to master for language learners and find that our approach is quite effective. In our test set of 20 items, our proposed method decreases the number of invalid distractors in 90% of the cases, and fully eliminates all of them in 65%. Further analysis on that dataset does not support our hypothesis regarding item difficulty as measured by average error rate of language learners. We conjecture that this may be due to limitations in our evaluation setting, which we plan to address in future work.", "phrases": ["distractor", "inference rule", "unsolved problem", "target word", "gap-fill context"], "overall_score": 2.6603713521219365, "scores": [2.2407164743465726, 1.3390434492853238, 0.8508223869008532, 0.5687978385508966, 0.5479249861761883], "rank_score": 1.109461027051967} -{"id": "kartsaklis-sadrzadeh-2016-distributional", "title": "Distributional Inclusion Hypothesis for Tensor-based Composition", "abstract": "According to the distributional inclusion hypothesis, entailment between words can be measured via the feature inclusions of their distributional vectors. In recent work, we showed how this hypothesis can be extended from words to phrases and sentences in the setting of compositional distributional semantics. This paper focuses on inclusion properties of tensors; its main contribution is a theoretical and experimental analysis of how feature inclusion works in different concrete models of verb tensors. We present results for relational, Frobenius, projective, and holistic methods and compare them to the simple vector addition, multiplication, min, and max models. The degrees of entailment thus obtained are evaluated via a variety of existing word-based measures, such as Weed's and Clarke's, KL-divergence, APinc, balAPinc, and two of our previously proposed metrics at the phrase/sentence level. We perform experiments on three entailment datasets, investigating which version of tensor-based composition achieves the highest performance when combined with the sentence-level measures.", "phrases": ["tensor-based composition", "entailment", "distributional inclusion hypothesis"], "overall_score": 1.7850884070095494, "scores": [1.7690062518266498, 0.9812195218897515, 0.5771875919407992], "rank_score": 1.1091377885524} -{"id": "hill-etal-2016-learning", "title": "Learning Distributed Representations of Sentences from Unlabelled Data", "abstract": "Unsupervised methods for learning distributed representations of words are ubiquitous in today's NLP research, but far less is known about the best ways to learn distributed phrase or sentence representations from unlabelled data. This paper is a systematic comparison of models that learn such representations. We find that the optimal approach depends critically on the intended application. Deeper, more complex models are preferable for representations to be used in supervised systems, but shallow log-linear models work best for building representation spaces that can be decoded with simple spatial distance metrics. We also propose two new unsupervised representation-learning objectives designed to optimise the trade-off between training time, domain portability and performance.", "phrases": ["unlabelled data", "sentence representation", "sequential denoising autoencoder", "bag-of-word", "hypothesis"], "overall_score": 4.444674918527955, "scores": [0.8691565171711119, 2.1979045511908835, 1.1099335237293153, 0.8468543501981952, 0.5218278371047584], "rank_score": 1.109135355878853} -{"id": "gollapalli-li-2015-emnlp", "title": "EMNLP versus ACL: Analyzing NLP research over time", "abstract": "The conferences ACL (Association for Computational Linguistics) and EMNLP (Empirical Methods in Natural Language Processing) rank among the premier venues that track the research developments in Natural Language Processing and Computational Linguistics. In this paper, we present a study on the research papers of approximately two decades from these two NLP conferences. We apply keyphrase extraction and corpus analysis tools to the proceedings from these venues and propose probabilistic and vector-based representations to represent the topics published in a venue for a given year. Next, similarity metrics are studied over pairs of venue representations to capture the progress of the two venues with respect to each other and over time.", "phrases": ["venue", "keyphrase extraction", "emnlp"], "overall_score": 1.5375383283159045, "scores": [2.256705133387976, 0.5476152491845052, 0.5229779995846126], "rank_score": 1.1090994607190312} -{"id": "zhang-etal-2019-improving", "title": "Improving Deep Transformer with Depth-Scaled Initialization and Merged Attention", "abstract": "The general trend in NLP is towards increasing model capacity and performance via deeper neural networks. However, simply stacking more layers of the popular Transformer architecture for machine translation results in poor convergence and high computational overhead. Our empirical analysis suggests that convergence is poor due to gradient vanishing caused by the interaction between residual connection and layer normalization. We propose depth-scaled initialization (DS-Init), which decreases parameter variance at the initialization stage, and reduces output variance of residual connections so as to ease gradient back-propagation through normalization layers. To address computational cost, we propose a merged attention sublayer (MAtt) which combines a simplified average-based self-attention sublayer and the encoder-decoder attention sublayer on the decoder side. Results on WMT and IWSLT translation tasks with five translation directions show that deep Transformers with DS-Init and MAtt can substantially outperform their base counterpart in terms of BLEU (+1.1 BLEU on average for 12-layer models), while matching the decoding speed of the baseline model thanks to the efficiency improvements of MAtt. Source code for reproduction will be released soon.", "phrases": ["deep transformer", "initialization", "attention sublayer", "deep model"], "overall_score": 2.755887892159281, "scores": [2.1525077419572, 0.8074434976465218, 0.8933455411507695, 0.582906715895009], "rank_score": 1.1090508741623752} -{"id": "cettolo-etal-2015-iwslt", "title": "The IWSLT 2015 Evaluation Campaign", "abstract": "The IWSLT 2015 Evaluation Campaign featured three tracks: automatic speech recognition (ASR), spoken language translation (SLT), and machine translation (MT). For ASR we offered two tasks, on English and German, while for SLT and MT a number of tasks were proposed, involving English, German, French, Chinese, Czech, Thai, and Viet-namese. All tracks involved the transcription or translation of TED talks, either made available by the of\ufb01cial TED web-site or by other TEDx events. A notable change with respect to previous evaluations was the use of unsegmented speech in the SLT track in order to better \ufb01t a real application scenario. Thus, from one side participants were encouraged to develop advanced methods for sentence segmentation, from the other side organisers had to cope with the automatic evaluation of SLT outputs not matching the sentence-wise arrangement of the human references. A new evaluation server was also developed to allow participants to score their MT and SLT systems on selected dev and test sets. This year 16 teams participated in the evaluation, for a total of 63 primary submissions. All runs were evaluated with objective metrics, and submissions for two of the MT translation tracks were also evaluated with human post-editing.", "phrases": ["iwslt", "evaluation campaign", "ted talk", "participant", "cascade system"], "overall_score": 3.321154421358039, "scores": [2.9367359523903906, 0.9490272016321124, 0.5642316038527782, 0.5528351251985392, 0.5403130250505817], "rank_score": 1.1086285816248804} -{"id": "georgi-etal-2012-measuring", "title": "Measuring the Divergence of Dependency Structures Cross-Linguistically to Improve Syntactic Projection Algorithms", "abstract": "Syntactic parses can provide valuable information for many NLP tasks, such as machine translation, semantic analysis, etc. However, most of the world's languages do not have large amounts of syntactically annotated corpora available for building parsers. Syntactic projection techniques attempt to address this issue by using parallel corpora between resource-poor and resource-rich languages, bootstrapping the resource-poor language with the syntactic analysis of the resource-rich language. In this paper, we investigate the possibility of using small, parallel, annotated corpora to automatically detect divergent structural patterns between two languages. These patterns can then be used to improve structural projection algorithms, allowing for better performing NLP tools for resource-poor languages, in particular those that may not have large amounts of annotated data necessary for traditional, fully-supervised methods. While this detection process is not exhaustive, we demonstrate that important instances of divergence are picked up with minimal prior knowledge of a given language pair.", "phrases": ["divergence", "parallel corpora", "resource-rich language"], "overall_score": 1.5366226242102266, "scores": [2.2061867284464816, 0.569412106870537, 0.5497179241818002], "rank_score": 1.1084389198329396} -{"id": "chrupala-etal-2008-learning", "title": "Learning Morphology with Morfette", "abstract": "Morfette is a modular, data-driven, probabilistic system which learns to perform joint morphological tagging and lemmatization from morphologically annotated corpora. The system is composed of two learning modules which are trained to predict morphological tags and lemmas using the Maximum Entropy classifier. The third module dynamically combines the predictions of the Maximum-Entropy models and outputs a probability distribution over tag-lemma pair sequences. The lemmatization module exploits the idea of recasting lemmatization as a classification task by using class labels which encode mappings from word forms to lemmas. Experimental evaluation results and error analysis on three morphologically rich languages show that the system achieves high accuracy with no language-specific feature engineering or additional resources.", "phrases": ["morfette", "probabilistic system", "joint morphological tagging", "classification task", "edit tree"], "overall_score": 2.92497558064136, "scores": [2.67470670996948, 1.1247936279268056, 0.5971775472217007, 0.592412960209714, 0.5526144215824832], "rank_score": 1.1083410533820366} -{"id": "roberts-etal-2010-linguistic", "title": "A Linguistic Resource for Semantic Parsing of Motion Events", "abstract": "This paper presents a corpus of annotated motion events and their event structure. We consider motion events triggered by a set of motion evoking words and contemplate both literal and figurative interpretations of them. Figurative motion events are extracted into the same event structure but are marked as figurative in the corpus. To represent the event structure of motion, we use the FrameNet annotation standard, which encodes motion in over 70 frames. In order to acquire a diverse set of texts that are different from FrameNet's, we crawled blog and news feeds for five different domains: sports, newswire, finance, military, and gossip. We then annotated these documents with an automatic FrameNet parser. Its output was manually corrected to account for missing and incorrect frames as well as missing and incorrect frame elements. The corpus, UTD-MotionEvent, may act as a resource for semantic parsing, detection of figurative language, spatial reasoning, and other tasks.", "phrases": ["semantic parsing", "motion event", "framenet"], "overall_score": 1.217596841175251, "scores": [1.8591871658998411, 0.9069187116987597, 0.5588073435653207], "rank_score": 1.1083044070546404} -{"id": "yeniterzi-oflazer-2010-syntax", "title": "Syntax-to-Morphology Mapping in Factored Phrase-Based Statistical Machine Translation from English to Turkish", "abstract": "We present a novel scheme to apply factored phrase-based SMT to a language pair with very disparate morphological structures. Our approach relies on syntactic analysis on the source side (English) and then encodes a wide variety of local and non-local syntactic structures as complex structural tags which appear as additional factors in the training data. On the target side (Turkish), we only perform morphological analysis and disambiguation but treat the complete complex morphological tag as a factor, instead of separating morphemes. We incrementally explore capturing various syntactic substructures as complex tags on the English side, and evaluate how our translations improve in BLEU scores. Our maximal set of source and target side transformations, coupled with some additional techniques, provide an 39% relative improvement from a baseline 17.08 to 23.78 BLEU, all averaged over 10 training and test sets. Now that the syntactic analysis on the English side is available, we also experiment with more long distance constituent reordering to bring the English constituent order close to Turkish, but find that these transformations do not provide any additional consistent tangible gains when averaged over the 10 sets.", "phrases": ["factor", "statistical machine translation", "morphological structure", "syntax-to-morphology mapping", "rich language"], "overall_score": 3.0711975445418394, "scores": [1.4875205828372204, 1.4350625050681436, 1.2475018868656773, 0.8310972559559504, 0.5373196031490999], "rank_score": 1.1077003667752183} -{"id": "cai-etal-2016-bidirectional", "title": "Bidirectional Recurrent Convolutional Neural Network for Relation Classification", "abstract": "Relation classification is an important semantic processing task in the field of natural language processing (NLP). In this paper, we present a novel model BRCNN to classify the relation of two entities in a sentence. Some state-of-the-art systems concentrate on modeling the shortest dependency path (SDP) between two entities leveraging convolutional or recurrent neural networks. We further explore how to make full use of the dependency relations information in the SDP, by combining convolutional neural networks and twochannel recurrent neural networks with long short term memory (LSTM) units. We propose a bidirectional architecture to learn relation representations with directional information along the SDP forwards and backwards at the same time, which benefits classifying the direction of relations. Experimental results show that our method outperforms the state-of-theart approaches on the SemEval-2010 Task 8 dataset.", "phrases": ["relation classification", "state-of-the-art system", "rnn"], "overall_score": 2.9997030706270764, "scores": [1.9138578938031294, 0.8609553291831347, 0.5482821938632755], "rank_score": 1.10769847228318} -{"id": "ethayarajh-2020-classifier", "title": "Is Your Classifier Actually Biased? Measuring Fairness under Uncertainty with Bernstein Bounds", "abstract": "Most NLP datasets are not annotated with protected attributes such as gender, making it difficult to measure classification bias using standard measures of fairness (e.g., equal opportunity). However, manually annotating a large dataset with a protected attribute is slow and expensive. Instead of annotating all the examples, can we annotate a subset of them and use that sample to estimate the bias? While it is possible to do so, the smaller this annotated sample is, the less certain we are that the estimate is close to the true bias. In this work, we propose using Bernstein bounds to represent this uncertainty about the bias estimate as a confidence interval. We provide empirical evidence that a 95% confidence interval derived this way consistently bounds the true bias. In quantifying this uncertainty, our method, which we call Bernstein-bounded unfairness, helps prevent classifiers from being deemed biased or unbiased when there is insufficient evidence to make either claim. Our findings suggest that the datasets currently used to measure specific biases are too small to conclusively identify bias except in the most egregious cases. For example, consider a co-reference resolution system that is 5% more accurate on gender-stereotypical sentences \u2013 to claim it is biased with 95% confidence, we need a bias-specific dataset that is 3.8 times larger than WinoBias, the largest available.", "phrases": ["fairness", "uncertainty", "claim"], "overall_score": 1.53480856994405, "scores": [1.8579570136233896, 0.8976117905955225, 0.5658222646393317], "rank_score": 1.1071303562860813} -{"id": "swayamdipta-etal-2018-syntactic", "title": "Syntactic Scaffolds for Semantic Structures", "abstract": "We introduce the syntactic scaffold, an approach to incorporating syntactic information into semantic tasks. Syntactic scaffolds avoid expensive syntactic processing at runtime, only making use of a treebank during training, through a multitask objective. We improve over strong baselines on PropBank semantics, frame semantics, and coreference resolution, achieving competitive performance on all three tasks.", "phrases": ["scaffold", "semantic task", "coreference resolution", "srl", "multi-task learning"], "overall_score": 3.470620848785738, "scores": [1.5452340008762597, 1.442717851230492, 0.8801289673955625, 0.836739415574597, 0.8295877557815248], "rank_score": 1.106881598171687} -{"id": "kotani-etal-2014-listenability", "title": "A Listenability Measuring Method for an Adaptive Computer-assisted Language Learningand Teaching System", "abstract": "In teaching and learning of English as a foreign language, the Internet serves as a source of authentic listening material, enabling learners to practice English in real contexts. An adaptive computer-assisted language learning and teaching system can pick up news clips as authentic materials from the Internet according to learner listening proficiency if it is equipped with a listenability measuring method that takes into both linguistic features of a news clip and the listening proficiency. Therefore, we developed a method for measuring listening proficiency-based listenability. With our method, listenability is measured through multiple regression analysis using both learner and linguistic features as independent variables. Learner features account for learner listening proficiency, and linguistic features explain lexical, syntactic, and phonological complexities of sentences. A cross validation test showed that listenability measured with our method exhibited higher correlation (r = 0.57) than listenability measured with other methods using either learner features (r = 0.43) or other linguistic features (r = 0.32, r = 0.36). A comparison of our method with other methods showed a statistically significant difference (p < 0.003 after Bonferroni correction). These results suggest the effectiveness of learner and linguistic features for measuring listening proficiency-based listenability.", "phrases": ["listenability", "teaching system", "learner"], "overall_score": 1.7814406548451265, "scores": [2.210185008706202, 0.5642013201106275, 0.5462276092832566], "rank_score": 1.1068713127000287} -{"id": "wang-etal-2020-asking", "title": "Asking and Answering Questions to Evaluate the Factual Consistency of Summaries", "abstract": "Practical applications of abstractive summarization models are limited by frequent factual inconsistencies with respect to their input. Existing automatic evaluation metrics for summarization are largely insensitive to such errors. We propose QAGS (pronounced \u201ckags\u201d), an automatic evaluation protocol that is designed to identify factual inconsistencies in a generated summary. QAGS is based on the intuition that if we ask questions about a summary and its source, we will receive similar answers if the summary is factually consistent with the source. To evaluate QAGS, we collect human judgments of factual consistency on model-generated summaries for the CNN/DailyMail (Hermann et al., 2015) and XSUM (Narayan et al., 2018) summarization datasets. QAGS has substantially higher correlations with these judgments than other automatic evaluation metrics. Also, QAGS offers a natural form of interpretability: The answers and questions generated while computing QAGS indicate which tokens of a summary are inconsistent and why. We believe QAGS is a promising tool in automatically generating usable and factually consistent text. Code for QAGS will be available at .", "phrases": ["factual consistency", "summarizer", "evaluation metric", "similar answer", "question generation"], "overall_score": 3.9030717397335377, "scores": [2.763301495415274, 1.0931791363427266, 0.562295799298248, 0.5592910984586937, 0.5560684620941123], "rank_score": 1.106827198321811} -{"id": "cheng-etal-2016-semi", "title": "Semi-Supervised Learning for Neural Machine Translation", "abstract": "While end-to-end neural machine translation (NMT) has made remarkable progress recently, NMT systems only rely on parallel corpora for parameter estimation. Since parallel corpora are usually limited in quantity, quality, and coverage, especially for low-resource languages, it is appealing to exploit monolingual corpora to improve NMT. We propose a semi-supervised approach for training NMT models on the concatenation of labeled (parallel corpora) and unlabeled (monolingual corpora) data. The central idea is to reconstruct the monolingual corpora using an autoencoder, in which the source-to-target and target-to-source translation models serve as the encoder and decoder, respectively. Our approach can not only exploit the monolingual corpora of the target language, but also of the source language. Experiments on the Chinese-English dataset show that our approach achieves significant improvements over state-of-the-art SMT and NMT systems.", "phrases": ["neural machine translation", "target-to-source translation model", "semi-supervised learning", "monolingual data", "usage"], "overall_score": 3.688070573548878, "scores": [0.9642380270944283, 0.9563433901655543, 2.092571481741073, 0.9617153170971864, 0.5591117096341122], "rank_score": 1.1067959851464708} -{"id": "wang-etal-2010-character-based", "title": "A Character-Based Joint Model for Chinese Word Segmentation", "abstract": "The character-based tagging approach is a dominant technique for Chinese word segmentation, and both discriminative and generative models can be adopted in that framework. However, generative and discriminative character-based approaches are significantly different and complement each other. A simple joint model combining the character-based generative model and the discriminative one is thus proposed in this paper to take advantage of both approaches. Experiments on the Second SIGHAN Bakeoff show that this joint approach achieves 21% relative error reduction over the discriminative model and 14% over the generative one. In addition, closed tests also show that the proposed joint model outperforms all the existing approaches reported in the literature and achieves the best F-score in four out of five corpora.", "phrases": ["joint model", "chinese word segmentation", "character-based tagging approach"], "overall_score": 1.9829251597247968, "scores": [1.7894292941397214, 0.946683661400522, 0.5839618543544721], "rank_score": 1.1066916032982383} -{"id": "tron-etal-2006-morphdb", "title": "Morphdb.hu: Hungarian lexical database and morphological grammar", "abstract": "This paper describes morphdb.hu, a Hungarian lexical database and morphological grammar. Morphdb.hu is the outcome of a several-year collaborative effort and represents the resource with the widest coverage and broadest range of applicability presently available for Hungarian. The grammar resource is the formalization of well-founded theoretical decisions handling inflection and productive derivation. The lexical database was created by merging three independent lexical databases, and the resulting resource was further extended.", "phrases": ["hungarian", "lexical database", "morphdb.hu"], "overall_score": 1.5338320589226897, "scores": [1.861707321361968, 0.8881591728801512, 0.5694113632042905], "rank_score": 1.1064259524821365} -{"id": "fukui-etal-2016-multimodal", "title": "Multimodal Compact Bilinear Pooling for Visual Question Answering and Visual Grounding", "abstract": "Modeling textual or visual information with vector representations trained from large language or visual datasets has been successfully explored in recent years. However, tasks such as visual question answering require combining these vector representations with each other. Approaches to multimodal pooling include element-wise product or sum, as well as concatenation of the visual and textual representations. We hypothesize that these methods are not as expressive as an outer product of the visual and textual vectors. As the outer product is typically infeasible due to its high dimensionality, we instead propose utilizing Multimodal Compact Bilinear pooling (MCB) to efficiently and expressively combine multimodal features. We extensively evaluate MCB on the visual question answering and grounding tasks. We consistently show the benefit of MCB over ablations without MCB. For visual question answering, we present an architecture which uses MCB twice, once for predicting attention over spatial features and again to combine the attended representation with the question representation. This model outperforms the state-of-the-art on the Visual7W dataset and the VQA challenge.", "phrases": ["visual question", "image", "multimodal compact bilinear"], "overall_score": 3.5162583372006555, "scores": [1.3777174268702808, 1.050187212223278, 0.8913516022165658], "rank_score": 1.1064187471033748} -{"id": "zou-lu-2019-text2math", "title": "Text2Math: End-to-end Parsing Text into Math Expressions", "abstract": "We propose Text2Math, a model for semantically parsing text into math expressions. The model can be used to solve different math related problems including arithmetic word problems and equation parsing problems. Unlike previous approaches, we tackle the problem from an end-to-end structured prediction perspective where our algorithm aims to predict the complete math expression at once as a tree structure, where minimal manual efforts are involved in the process. Empirical results on benchmark datasets demonstrate the efficacy of our approach.", "phrases": ["math expression", "word problem", "text2math"], "overall_score": 1.9822052183726708, "scores": [1.833101860872793, 0.9312110635940507, 0.554556464670526], "rank_score": 1.1062897963791232} -{"id": "zhou-etal-2016-attention", "title": "Attention-Based Bidirectional Long Short-Term Memory Networks for Relation Classification", "abstract": "Relation classification is an important semantic processing task in the field of natural language processing (NLP). State-ofthe-art systems still rely on lexical resources such as WordNet or NLP systems like dependency parser and named entity recognizers (NER) to get high-level features. Another challenge is that important information can appear at any position in the sentence. To tackle these problems, we propose Attention-Based Bidirectional Long Short-Term Memory Networks(AttBLSTM) to capture the most important semantic information in a sentence. The experimental results on the SemEval-2010 relation classification task show that our method outperforms most of the existing methods, with only word vectors.", "phrases": ["relation classification", "deep neural network", "self-attention", "argument representation", "powerful encoder"], "overall_score": 3.994702077578375, "scores": [2.84051916298912, 1.0825903205470193, 0.5560462347362992, 0.5263682913729969, 0.52589788115727], "rank_score": 1.1062843781605411} -{"id": "hogan-2007-coordinate", "title": "Coordinate Noun Phrase Disambiguation in a Generative Parsing Model", "abstract": "In this paper we present methods for improving the disambiguation of noun phrase (NP) coordination within the framework of a lexicalised history-based parsing model. As well as reducing noise in the data, we look at modelling two main sources of information for disambiguation: symmetry in conjunct structure, and the dependency between conjunct lexical heads. Our changes to the baseline model result in an increase in NP coordination dependency f-score from 69.9% to 73.8%, which represents a relative reduction in f-score error of 13%.", "phrases": ["generative parsing model", "conjunct", "coordination disambiguation"], "overall_score": 2.919511432775063, "scores": [1.4161119326350053, 1.3146474013981757, 0.5880523492287635], "rank_score": 1.106270561087315} -{"id": "macher-etal-2021-read", "title": "Do we read what we hear? Modeling orthographic influences on spoken word recognition", "abstract": "Theories and models of spoken word recognition aim to explain the process of accessing lexical knowledge given an acoustic realization of a word form. There is consensus that phonological and semantic information is crucial for this process. However, there is accumulating evidence that orthographic information could also have an impact on auditory word recognition. This paper presents two models of spoken word recognition that instantiate different hypotheses regarding the influence of orthography on this process. We show that these models reproduce human-like behavior in different ways and provide testable hypotheses for future research on the source of orthographic effects in spoken word recognition.", "phrases": ["influence", "word recognition", "orthographic effect"], "overall_score": 1.2149204002114224, "scores": [1.888832617789772, 0.8052195017640623, 0.623552496951004], "rank_score": 1.105868205501613} -{"id": "martschat-strube-2014-recall", "title": "Recall Error Analysis for Coreference Resolution", "abstract": "We present a novel method for coreference resolution error analysis which we apply to perform a recall error analysis of four state-of-the-art English coreference resolution systems. Our analysis highlights differences between the systems and identifies that the majority of recall errors for nouns and names are shared by all systems. We characterize this set of common challenging errors in terms of a broad range of lexical and semantic properties.", "phrases": ["coreference resolution", "name", "recall error analysis"], "overall_score": 2.2995669773132237, "scores": [1.5128422321713069, 0.9420268853368876, 0.8627047568536169], "rank_score": 1.1058579581206038} -{"id": "othman-etal-2003-chart", "title": "A chart parser for analyzing modern standard Arabic sentence", "abstract": "The parsing of Arabic sentence is a necessary prerequisite for many natural language processing applications such as machine translation and information retrieval. In this paper we report our attempt to develop an efficient chart parser for Analyzing Modern Standard Arabic (MSA) sentence. From a practical point of view, the parser is able to satisfy syntactic constraints reducing parsing ambiguity. Lexical semantic features are also used to disambiguate the sentence structure. We explain also an Arabic morphological analyzer based on ATN technique. Both the Arabic parser and the Arabic morphological analyzer are implemented in Prolog. The linguistic rules were acquired from a set of sentences from MSA sentence in the Agriculture domain.", "phrases": ["chart parser", "arabic sentence", "prolog"], "overall_score": 1.2144618560763007, "scores": [1.9110065283860207, 0.8810839941827748, 0.5242619393592606], "rank_score": 1.1054508206426854} -{"id": "reimers-etal-2019-classification", "title": "Classification and Clustering of Arguments with Contextualized Word Embeddings", "abstract": "We experiment with two recent contextualized word embedding methods (ELMo and BERT) in the context of open-domain argument search. For the first time, we show how to leverage the power of contextualized word embeddings to classify and cluster topic-dependent arguments, achieving impressive results on both tasks and across multiple datasets. For argument classification, we improve the state-of-the-art for the UKP Sentential Argument Mining Corpus by 20.8 percentage points and for the IBM Debater - Evidence Sentences dataset by 7.4 percentage points. For the understudied task of argument clustering, we propose a pre-training step which improves by 7.8 percentage points over strong baselines on a novel dataset, and by 12.3 percentage points for the Argument Facet Similarity (AFS) Corpus.", "phrases": ["clustering", "argument classification", "unseen topic"], "overall_score": 3.1949483073961207, "scores": [1.570136259185741, 1.2199008563078149, 0.5260916475292847], "rank_score": 1.1053762543409469} -{"id": "fan-etal-2019-strategies", "title": "Strategies for Structuring Story Generation", "abstract": "Writers often rely on plans or sketches to write long stories, but most current language models generate word by word from left to right. We explore coarse-to-fine models for creating narrative texts of several hundred words, and introduce new models which decompose stories by abstracting over actions and entities. The model first generates the predicate-argument structure of the text, where different mentions of the same entity are marked with placeholder tokens. It then generates a surface realization of the predicate-argument structure, and finally replaces the entity placeholders with context-sensitive names and references. Human judges prefer the stories from our models to a wide range of previous approaches to hierarchical text generation. Extensive analysis shows that our methods can help improve the diversity and coherence of events and entities in generated stories.", "phrases": ["story", "language model", "predicate-argument structure", "coherence"], "overall_score": 3.641055016686892, "scores": [1.730070815844129, 0.9146904446236348, 0.8977891764898518, 0.876424366368842], "rank_score": 1.1047437008316143} -{"id": "pradhan-etal-2012-conll", "title": "CoNLL-2012 Shared Task: Modeling Multilingual Unrestricted Coreference in OntoNotes", "abstract": "The CoNLL-2012 shared task involved predicting coreference in three languages -- English, Chinese and Arabic -- using OntoNotes data. It was a follow-on to the English-only task organized in 2011. Until the creation of the OntoNotes corpus, resources in this subfield of language processing have tended to be limited to noun phrase coreference, often on a restricted set of entities, such as ACE entities. OntoNotes provides a large-scale corpus of general anaphoric coreference not restricted to noun phrases or to a specified set of entity types and covering multiple languages. OntoNotes also provides additional layers of integrated annotation, capturing additional shallow semantic structure. This paper briefly describes the OntoNotes annotation (coreference and other layers) and then describes the parameters of the shared task including the format, pre-processing information, evaluation criteria, and presents and discusses the results achieved by the participating systems. Being a task that has a complex evaluation history, and multiple evalation conditions, it has, in the past, been difficult to judge the improvement in new algorithms over previously reported results. Having a standard test set and evaluation parameters, all based on a resource that provides multiple integrated annotation layers (parses, semantic roles, word senses, named entities and coreference) that could support joint models, should help to energize ongoing research in the task of entity and event coreference.", "phrases": ["conll-2012", "coreference resolution", "mention", "discourse deixis", "identity anaphora"], "overall_score": 4.364753678020012, "scores": [0.786012027976289, 1.9462669301394055, 1.6926111491055988, 0.5670013887880093, 0.5313740741376107], "rank_score": 1.1046531140293827} -{"id": "pennacchiotti-pantel-2009-entity", "title": "Entity Extraction via Ensemble Semantics", "abstract": "Combining information extraction systems yields significantly higher quality resources than each system in isolation. In this paper, we generalize such a mixing of sources and features in a framework called Ensemble Semantics. We show very large gains in entity extraction by combining state-of-the-art distributional and pattern-based systems with a large set of features from a webcrawl, query logs, and Wikipedia. Experimental results on a web-scale extraction of actors, athletes and musicians show significantly higher mean average precision scores (29% gain) compared with the current state of the art.", "phrases": ["ensemble semantic", "query log", "wikipedia", "entity extraction"], "overall_score": 2.426079150705631, "scores": [2.071372128471323, 0.9349239430996404, 0.8760664395966136, 0.5342623085799988], "rank_score": 1.104156204936894} -{"id": "zhang-etal-2009-hpsg", "title": "HPSG Supertagging: A Sequence Labeling View", "abstract": "Supertagging is a widely used speed-up technique for deep parsing. In another aspect, supertagging has been exploited in other NLP tasks than parsing for utilizing the rich syntactic information given by the supertags. However, the performance of supertagger is still a bottleneck for such applications. In this paper, we investigated the relationship between supertagging and parsing, not just to speed up the deep parser; We started from a sequence labeling view of HPSG supertagging, examining how well a supertagger can do when separated from parsing. Comparison of two types of supertagging model, point-wise model and sequential model, showed that the former model works competitively well despite its simplicity, which indicates the true dependency among supertag assignments is far more complex than the crude first-order approximation made in the sequential model. We then analyzed the limitation of separated supertagging by using a CFG-filter. The results showed that big gains could be acquired by resorting to a light-weight parser.", "phrases": ["sequence labeling view", "sequential model", "hpsg"], "overall_score": 1.5299645198690093, "scores": [1.8771990154564187, 0.9002599241272575, 0.5334493987429497], "rank_score": 1.1036361127755419} -{"id": "chklovski-etal-2004-senseval", "title": "The Senseval-3 Multilingual English-Hindi lexical sample task", "abstract": "This paper describes the English\u2010Hindi Multilingual lexical sample task in SENSEVAL\u20103. Rather than tagging an English word with a sense from an English dictionary, this task seeks to assign the most appropriate Hindi translation to an ambiguous target word. Training data was solicited via the Open Mind Word Expert (OMWE) from Web users who are fluent in English and Hindi.", "phrases": ["senseval-3", "lexical sample task", "distinction"], "overall_score": 1.7760560654303095, "scores": [1.963564532290191, 0.7984677559117429, 0.5485447493630949], "rank_score": 1.103525679188343} -{"id": "li-etal-2021-document", "title": "Document-Level Event Argument Extraction by Conditional Generation", "abstract": "Event extraction has long been treated as a sentence-level task in the IE community. We argue that this setting does not match human informative seeking behavior and leads to incomplete and uninformative extraction results. We propose a document-level neural event argument extraction model by formulating the task as conditional generation following event templates. We also compile a new document-level event extraction benchmark dataset WikiEvents which includes complete event and coreference annotation. On the task of argument extraction, we achieve an absolute gain of 7.6% F1 and 5.7% F1 over the next best model on the RAMS and WikiEvents dataset respectively. On the more challenging task of informative argument extraction, which requires implicit coreference reasoning, we achieve a 9.3% F1 gain over the best baseline. To demonstrate the portability of our model, we also create the first end-to-end zero-shot event extraction framework and achieve 97% of fully supervised model's trigger extraction performance and 82% of the argument extraction performance given only access to 10 out of the 33 types on ACE.", "phrases": ["conditional generation", "event extraction", "template", "wikievents"], "overall_score": 2.830347305974515, "scores": [1.8938070534244424, 1.04783028599925, 0.8683192174190888, 0.6039275578527782], "rank_score": 1.1034710286738898} -{"id": "tsuboi-etal-2008-training", "title": "Training Conditional Random Fields Using Incomplete Annotations", "abstract": "We address corpus building situations, where complete annotations to the whole corpus is time consuming and unrealistic. Thus, annotation is done only on crucial part of sentences, or contains unresolved label ambiguities. We propose a parameter estimation method for Conditional Random Fields (CRFs), which enables us to use such incomplete annotations. We show promising results of our method as applied to two types of NLP tasks: a domain adaptation task of a Japanese word segmentation using partial annotations, and a part-of-speech tagging task using ambiguous tags in the Penn treebank corpus.", "phrases": ["conditional random fields", "incomplete annotation", "parameter estimation method", "crf", "japanese word segmentation"], "overall_score": 2.9879548113233882, "scores": [2.4131270653889714, 0.9545423544608134, 1.0542306082352888, 0.5551266914224549, 0.5397742801697563], "rank_score": 1.1033601999354568} -{"id": "klerke-etal-2016-improving", "title": "Improving sentence compression by learning to predict gaze", "abstract": "We show how eye-tracking corpora can be used to improve sentence compression models, presenting a novel multi-task learning algorithm based on multi-layer LSTMs. We obtain performance competitive with or better than state-of-the-art approaches.", "phrases": ["sentence compression", "gaze", "multi-task learning approach", "function"], "overall_score": 2.829788875201032, "scores": [2.456939659063913, 0.8673477349429696, 0.5541947536711681, 0.5345311026133309], "rank_score": 1.1032533125728454} -{"id": "rottger-etal-2021-hatecheck", "title": "HateCheck: Functional Tests for Hate Speech Detection Models", "abstract": "Detecting online hate is a difficult task that even state-of-the-art models struggle with. Typically, hate speech detection models are evaluated by measuring their performance on held-out test data using metrics such as accuracy and F1 score. However, this approach makes it difficult to identify specific model weak points. It also risks overestimating generalisable model performance due to increasingly well-evidenced systematic gaps and biases in hate speech datasets. To enable more targeted diagnostic insights, we introduce HateCheck, a suite of functional tests for hate speech detection models. We specify 29 model functionalities motivated by a review of previous research and a series of interviews with civil society stakeholders. We craft test cases for each functionality and validate their quality through a structured annotation process. To illustrate HateCheck's utility, we test near-state-of-the-art transformer models as well as two popular commercial models, revealing critical model weaknesses.", "phrases": ["hate speech detection", "speech detection model", "online hate", "review", "hatecheck"], "overall_score": 2.829787384824328, "scores": [2.9298186986087202, 0.8437533004081432, 0.6360102286538445, 0.5840000705365859, 0.5226813593818268], "rank_score": 1.103252731517824} -{"id": "wu-etal-2019-extract", "title": "Extract and Edit: An Alternative to Back-Translation for Unsupervised Neural Machine Translation", "abstract": "The overreliance on large parallel corpora significantly limits the applicability of machine translation systems to the majority of language pairs. Back-translation has been dominantly used in previous approaches for unsupervised neural machine translation, where pseudo sentence pairs are generated to train the models with a reconstruction loss. However, the pseudo sentences are usually of low quality as translation errors accumulate during training. To avoid this fundamental issue, we propose an alternative but more effective approach, extract-edit, to extract and then edit real sentences from the target monolingual corpora. Furthermore, we introduce a comparative translation loss to evaluate the translated target sentences and thus train the unsupervised translation systems. Experiments show that the proposed approach consistently outperforms the previous state-of-the-art unsupervised machine translation systems across two benchmarks (English-French and English-German) and two low-resource language pairs (English-Romanian and English-Russian) by more than 2 (up to 3.63) BLEU points.", "phrases": ["alternative", "back-translation", "parallel corpora", "extract-edit", "real sentence"], "overall_score": 1.9766498307489617, "scores": [2.983913193937323, 0.8029849086375898, 0.5866524801492384, 0.5748342187888378, 0.5675615760456099], "rank_score": 1.1031892755117199} -{"id": "hahn-powell-etal-2016-causal", "title": "This before That: Causal Precedence in the Biomedical Domain", "abstract": "Causal precedence between biochemical interactions is crucial in the biomedical domain, because it transforms collections of individual interactions, e.g., bindings and phosphorylations, into the causal mechanisms needed to inform meaningful search and inference. Here, we analyze causal precedence in the biomedical domain as distinct from open-domain, temporal precedence. First, we describe a novel, hand-annotated text corpus of causal precedence in the biomedical domain. Second, we use this corpus to investigate a battery of models of precedence, covering rule-based, feature-based, and latent representation models. The highest-performing individual model achieved a micro F1 of 43 points, approaching the best performers on the simpler temporal-only precedence tasks. Feature-based and latent representation models each outperform the rule-based models, but their performance is complementary to one another. We apply a sieve-based architecture to capitalize on this lack of overlap, achieving a micro F1 score of 46 points.", "phrases": ["causal precedence", "biomedical domain", "event mention"], "overall_score": 1.9766083214532972, "scores": [1.8337291822189234, 0.9118132750447246, 0.5639558689344785], "rank_score": 1.103166108732709} -{"id": "dong-etal-2010-hownet", "title": "HowNet and Its Computation of Meaning", "abstract": "The presentation will mainly cover (1) What is HowNet? HowNet is an on-line common-sense knowledgebase unveiling inter-conceptual relationships and inter-attribute relationships of concepts as connoting in lexicons of the Chinese and their English equivalents. (2) How it functions in the computation of meaning and as a NLP platform? The presentation will show 9 HowNet-based application tools. All of them are not merely demonstration of some methodology or algo-rithm, but are real application tools that can be tested by users themselves. Apart from the tools that are specially designed to deal with Chinese, most of the tools are bilingual, even the WSD tool.", "phrases": ["computation", "english equivalent", "hownet"], "overall_score": 1.2117265210798007, "scores": [1.930357361959892, 0.8126071442293591, 0.5659185281034669], "rank_score": 1.1029610114309059} -{"id": "gardent-etal-2003-bridges", "title": "Which bridges for bridging definite descriptions?", "abstract": "This paper presents a corpus study of bridging definite descriptions in the french corpus PAROLE. It proposes a typology of bridging relations; describes a system for annotating NPs which allows for a user friendly collection of all relevant information on the bridging definite descriptions occurring in the corpus and discusses the results of the corpus study", "phrases": ["definite description", "corpus study", "bridging relation"], "overall_score": 2.29227227152834, "scores": [1.8990595273091828, 0.8682763919326202, 0.5397139192594123], "rank_score": 1.1023499461670718} -{"id": "cao-etal-2018-joint", "title": "Joint Representation Learning of Cross-lingual Words and Entities via Attentive Distant Supervision", "abstract": "Jointly representation learning of words and entities benefits many NLP tasks, but has not been well explored in cross-lingual settings. In this paper, we propose a novel method for joint representation learning of cross-lingual words and entities. It captures mutually complementary knowledge, and enables cross-lingual inferences among knowledge bases and texts. Our method does not require parallel corpus, and automatically generates comparable data via distant supervision using multi-lingual knowledge bases. We utilize two types of regularizers to align cross-lingual words and entities, and design knowledge attention and cross-lingual attention to further reduce noises. We conducted a series of experiments on three tasks: word translation, entity relatedness, and cross-lingual entity linking. The results, both qualitative and quantitative, demonstrate the significance of our method.", "phrases": ["cross-lingual word", "distant supervision", "joint representation learning"], "overall_score": 1.5280483351142118, "scores": [1.8703041121349724, 0.8741823376095907, 0.5622751832173023], "rank_score": 1.1022538776539552} -{"id": "koehn-etal-2005-edinburgh", "title": "Edinburgh System Description for the 2005 IWSLT Speech Translation Evaluation", "abstract": "Our participation in the IWSLT 2005 speech translation task is our first effort to work on limited domain speech data. We adapted our statistical machine translation system that performed successfully in previous DARPA competitions on open domain text translations. We participated in the supplied corpora transcription track. We achieved the highest BLEU score in 2 out of 5 language pairs and had competitive results for the other language pairs.", "phrases": ["bleu score", "reordering model", "phrase-based smt"], "overall_score": 3.6721655791728876, "scores": [2.2322639637884207, 0.5412224462405149, 0.5325822012699033], "rank_score": 1.1020228704329462} -{"id": "liu-etal-2015-fine", "title": "Fine-grained Opinion Mining with Recurrent Neural Networks and Word Embeddings", "abstract": "The tasks in fine-grained opinion mining can be regarded as either a token-level sequence labeling problem or as a semantic compositional task. We propose a general class of discriminative models based on recurrent neural networks (RNNs) and word embeddings that can be successfully applied to such tasks without any taskspecific feature engineering effort. Our experimental results on the task of opinion target identification show that RNNs, without using any hand-crafted features, outperform feature-rich CRF-based models. Our framework is flexible, allows us to incorporate other linguistic features, and achieves results that rival the top performing systems in SemEval-2014.", "phrases": ["recurrent neural networks", "linguistic feature", "fine-grained opinion mining", "sentiment analysis"], "overall_score": 3.7477349509131583, "scores": [1.968001619389752, 1.3264988164106968, 0.5787417717664808, 0.534305523851881], "rank_score": 1.1018869328547027} -{"id": "song-etal-2015-light", "title": "From Light to Rich ERE: Annotation of Entities, Relations, and Events", "abstract": "We describe the evolution of the Entities, Relations and Events (ERE) annotation task, created to support research and technology development within the DARPA DEFT program. We begin by describing the specification for Light ERE annotation, including the motivation for the task within the context of DEFT. We discuss the transition from Light ERE to a more complex Rich ERE specification, enabling more comprehensive treatment of phenomena of interest to DEFT.", "phrases": ["rich ere", "entities", "event type", "automatic content extraction", "trigger"], "overall_score": 3.1214398613703804, "scores": [2.719833129536834, 0.8034259027217868, 0.8965527379034599, 0.5561263441169679, 0.5327184574521857], "rank_score": 1.1017313143462468} -{"id": "elsner-etal-2013-joint", "title": "A Joint Learning Model of Word Segmentation, Lexical Acquisition, and Phonetic Variability", "abstract": "We present a cognitive model of early lexical acquisition which jointly performs word segmentation and learns an explicit model of phonetic variation. We define the model as a Bayesian noisy channel; we sample segmentations and word forms simultaneously from the posterior, using beam sampling to control the size of the search space. Compared to a pipelined approach in which segmentation is performed first, our model is qualitatively more similar to human learners. On data with variable pronunciations, the pipelined approach learns to treat syllables or morphemes as words. In contrast, our joint model, like infant learners, tends to learn multiword collocations. We also conduct analyses of the phonetic variations that the model learns to accept and its patterns of word recognition errors, and relate these to developmental evidence.", "phrases": ["word segmentation", "lexical acquisition", "learner"], "overall_score": 2.4196470579964466, "scores": [1.8253099797017336, 0.9221682078058957, 0.556208312663253], "rank_score": 1.101228833390294} -{"id": "fraser-etal-2021-understanding", "title": "Understanding and Countering Stereotypes: A Computational Approach to the Stereotype Content Model", "abstract": "Stereotypical language expresses widely-held beliefs about different social categories. Many stereotypes are overtly negative, while others may appear positive on the surface, but still lead to negative consequences. In this work, we present a computational approach to interpreting stereotypes in text through the Stereotype Content Model (SCM), a comprehensive causal theory from social psychology. The SCM proposes that stereotypes can be understood along two primary dimensions: warmth and competence. We present a method for defining warmth and competence axes in semantic embedding space, and show that the four quadrants defined by this subspace accurately represent the warmth and competence concepts, according to annotated lexicons. We then apply our computational SCM model to textual stereotype data and show that it compares favourably with survey-based studies in the psychological literature. Furthermore, we explore various strategies to counter stereotypical beliefs with anti-stereotypes. It is known that countering stereotypes with anti-stereotypical examples is one of the most effective ways to reduce biased thinking, yet the problem of generating anti-stereotypes has not been previously studied. Thus, a better understanding of how to generate realistic and effective anti-stereotypes can contribute to addressing pressing societal concerns of stereotyping, prejudice, and discrimination.", "phrases": ["stereotype", "computational approach", "scm"], "overall_score": 1.5264085417274273, "scores": [1.9881521239739517, 0.7919651382520138, 0.5230957880551059], "rank_score": 1.101071016760357} -{"id": "lagarda-etal-2009-statistical", "title": "Statistical Post-Editing of a Rule-Based Machine Translation System", "abstract": "Automatic post-editing (APE) systems aim at correcting the output of machine translation systems to produce better quality translations, i.e. produce translations can be manually post-edited with an increase in productivity. In this work, we present an APE system that uses statistical models to enhance a commercial rule-based machine translation (RBMT) system. In addition, a procedure for effortless human evaluation has been established. We have tested the APE system with two corpora of different complexity. For the Parliament corpus, we show that the APE system significantly complements and improves the RBMT system. Results for the Protocols corpus, although less conclusive, are promising as well. Finally, several possible sources of errors have been identified which will help develop future system enhancements.", "phrases": ["post-editing", "ape system", "statistical information"], "overall_score": 2.2890352901067543, "scores": [2.2331738807558517, 0.5442123976995128, 0.5249935830014797], "rank_score": 1.1007932871522814} -{"id": "liu-etal-2016-effective", "title": "Effective Crowd Annotation for Relation Extraction", "abstract": "Can crowdsourced annotation of training data boost performance for relation extraction over methods based solely on distant supervision? While crowdsourcing has been shown effective for many NLP tasks, previous researchers found only minimal improvement when applying the method to relation extraction. This paper demonstrates that a much larger boost is possible, e.g., raising F1 from 0.40 to 0.60. Furthermore, the gains are due to a simple, generalizable technique, Gated Instruction , which combines an interactive tutorial, feedback to correct errors during training, and improved screening.", "phrases": ["relation extraction", "crowdsourcing", "distant supervision", "correct error", "screening"], "overall_score": 2.1418781896827523, "scores": [2.976418184985988, 0.9214938690389699, 0.5514936752341588, 0.5317399579309074, 0.5223925689894212], "rank_score": 1.1007076512358889} -{"id": "procopio-etal-2021-sgl", "title": "SGL: Speaking the Graph Languages of Semantic Parsing via Multilingual Translation", "abstract": "Graph-based semantic parsing aims to represent textual meaning through directed graphs. As one of the most promising general-purpose meaning representations, these structures and their parsing have gained a significant interest momentum during recent years, with several diverse formalisms being proposed. Yet, owing to this very heterogeneity, most of the research effort has focused mainly on solutions specific to a given formalism. In this work, instead, we reframe semantic parsing towards multiple formalisms as Multilingual Neural Machine Translation (MNMT), and propose SGL, a many-to-many seq2seq architecture trained with an MNMT objective. Backed by several experiments, we show that this framework is indeed effective once the learning procedure is enhanced with large parallel corpora coming from Machine Translation: we report competitive performances on AMR and UCCA parsing, especially once paired with pre-trained architectures. Furthermore, we find that models trained under this configuration scale remarkably well to tasks such as cross-lingual AMR parsing: SGL outperforms all its competitors by a large margin without even explicitly seeing non-English to AMR examples at training time and, once these examples are included as well, sets an unprecedented state of the art in this task. We release our code and our models for research purposes at .", "phrases": ["semantic parsing", "seq2seq architecture", "sgl"], "overall_score": 1.7713759710963908, "scores": [1.9664682836234693, 0.8007314759662049, 0.5346535596552608], "rank_score": 1.1006177730816449} -{"id": "xie-etal-2021-factual-consistency", "title": "Factual Consistency Evaluation for Text Summarization via Counterfactual Estimation", "abstract": "Despite significant progress has been achieved in text summarization, factual inconsistency in generated summaries still severely limits its practical applications. Among the key factors to ensure factual consistency, a reliable automatic evaluation metric is the first and the most crucial one. However, existing metrics either neglect the intrinsic cause of the factual inconsistency or rely on auxiliary tasks, leading to an unsatisfied correlation with human judgments or increasing the inconvenience of usage in practice. In light of these challenges, we propose a novel metric to evaluate the factual consistency in text summarization via counterfactual estimation, which formulates the causal relationship among the source document, the generated summary, and the language prior. We remove the effect of language prior, which can cause factual inconsistency, from the total causal effect on the generated summary, and provides a simple yet effective way to evaluate consistency without relying on other auxiliary tasks. We conduct a series of experiments on three public abstractive text summarization datasets, and demonstrate the advantages of the proposed metric in both improving the correlation with human judgments and the convenience of usage. The source code is available at .", "phrases": ["text summarization", "counterfactual estimation", "causal relationship"], "overall_score": 1.771190591603893, "scores": [1.8258453637649865, 0.930436475051468, 0.5452259321639612], "rank_score": 1.100502590326805} -{"id": "batchkarov-etal-2016-critique", "title": "A critique of word similarity as a method for evaluating distributional semantic models", "abstract": "This paper aims to re-think the role of the word similarity task in distributional semantics research. We argue while it is a valuable tool, it should be used with care because it provides only an approximate measure of the quality of a distributional model. Word similarity evaluations assume there exists a single notion of similarity that is independent of a particular application. Further, the small size and low inter-annotator agreement of existing data sets makes it challenging to find significant differences between models.", "phrases": ["critique", "word similarity", "inter-annotator agreement", "consequence"], "overall_score": 3.1805378154736847, "scores": [1.472304569626612, 0.847177709143427, 1.545176299265595, 0.5369036871433277], "rank_score": 1.1003905662947404} -{"id": "fan-etal-2018-controllable", "title": "Controllable Abstractive Summarization", "abstract": "Current models for document summarization disregard user preferences such as the desired length, style, the entities that the user might be interested in, or how much of the document the user has already read. We present a neural summarization model with a simple but effective mechanism to enable users to specify these high level attributes in order to control the shape of the final summaries to better suit their needs. With user input, our system can produce high quality summaries that follow user preferences. Without user input, we set the control variables automatically \u2013 on the full text CNN-Dailymail dataset, we outperform state of the art abstractive systems (both in terms of F1-ROUGE1 40.38 vs. 39.53 F1-ROUGE and human evaluation.", "phrases": ["length", "attribute", "controllable abstractive summarization", "special token", "speaker style"], "overall_score": 4.002413608601841, "scores": [1.0397208947842924, 2.144258743829283, 0.8823361170085762, 0.8780301256642293, 0.5571232700352438], "rank_score": 1.1002938302643248} -{"id": "bergsma-etal-2008-discriminative", "title": "Discriminative Learning of Selectional Preference from Unlabeled Text", "abstract": "We present a discriminative method for learning selectional preferences from unlabeled text. Positive examples are taken from observed predicate-argument pairs, while negatives are constructed from unobserved combinations. We train a Support Vector Machine classifier to distinguish the positive from the negative instances. We show how to partition the examples for efficient training with 57 thousand features and 6.5 million training instances. The model outperforms other recent approaches, achieving excellent correlation with human plausibility judgments. Compared to Mutual Information, it identifies 66% more verb-object pairs in unseen text, and resolves 37% more pronouns correctly in a pronoun resolution experiment.", "phrases": ["selectional preference", "unlabeled text", "svm classifier", "predicate"], "overall_score": 2.6374546682673796, "scores": [2.380197523992264, 0.9205454041115751, 0.5539097642395782, 0.5449634179789333], "rank_score": 1.0999040275805876} -{"id": "wang-etal-2020-building", "title": "Building a Bridge: A Method for Image-Text Sarcasm Detection Without Pretraining on Image-Text Data", "abstract": "Sarcasm detection in social media with text and image is becoming more challenging. Previous works of image-text sarcasm detection were mainly to fuse the summaries of text and image: different sub-models read the text and image respectively to get the summaries, and fuses the summaries. Recently, some multi-modal models based on the architecture of BERT are proposed such as ViLBERT. However, they can only be pretrained on the image-text data. In this paper, we propose an image-text model for sarcasm detection using the pretrained BERT and ResNet without any further pretraining. BERT and ResNet have been pretrained on much larger text or image data than image-text data. We connect the vector spaces of BERT and ResNet to utilize more data. We use the pretrained Multi-Head Attention of BERT to model the text and image. Besides, we propose a 2D-Intra-Attention to extract the relationships between words and images. In experiments, our model outperforms the state-of-the-art model.", "phrases": ["sarcasm detection", "image-text data", "bert"], "overall_score": 1.2083395533210513, "scores": [1.876625389674362, 0.8966184384067093, 0.5263903534716442], "rank_score": 1.0998780605175718} -{"id": "gao-etal-2020-explicit", "title": "Explicit Memory Tracker with Coarse-to-Fine Reasoning for Conversational Machine Reading", "abstract": "The goal of conversational machine reading is to answer user questions given a knowledge base text which may require asking clarification questions. Existing approaches are limited in their decision making due to struggles in extracting question-related rules and reasoning about them. In this paper, we present a new framework of conversational machine reading that comprises a novel Explicit Memory Tracker (EMT) to track whether conditions listed in the rule text have already been satisfied to make a decision. Moreover, our framework generates clarification questions by adopting a coarse-to-fine reasoning strategy, utilizing sentence-level entailment scores to weight token-level distributions. On the ShARC benchmark (blind, held-out) testset, EMT achieves new state-of-the-art results of 74.6% micro-averaged decision accuracy and 49.5 BLEU4. We also show that EMT is more interpretable by visualizing the entailment-oriented reasoning process as the conversation flows. Code and models are released at .", "phrases": ["conversational machine reading", "reasoning strategy", "explicit memory tracker"], "overall_score": 1.7692262876377034, "scores": [1.8687620017277313, 0.8897263875225431, 0.539357909701278], "rank_score": 1.0992820996505175} -{"id": "ferreira-freitas-2021-star", "title": "STAR: Cross-modal [STA]tement [R]epresentation for selecting relevant mathematical premises", "abstract": "Mathematical statements written in natural language are usually composed of two different modalities: mathematical elements and natural language. These two modalities have several distinct linguistic and semantic properties. State-of-the-art representation techniques have demonstrated an inability in capturing such an entangled style of discourse. In this work, we propose STAR, a model that uses cross-modal attention to learn how to represent mathematical text for the task of Natural Language Premise Selection. This task uses conjectures written in both natural and mathematical language to recommend premises that most likely will be relevant to prove a particular statement. We found that STAR not only outperforms baselines that do not distinguish between natural language and mathematical elements, but it also achieves better performance than state-of-the-art models.", "phrases": ["premise", "mathematical text", "star"], "overall_score": 1.2075538953948142, "scores": [1.8434035512875355, 0.8625140902570335, 0.5915711300186329], "rank_score": 1.0991629238544005} -{"id": "nivre-etal-2006-labeled", "title": "Labeled Pseudo-Projective Dependency Parsing with Support Vector Machines", "abstract": "We use SVM classifiers to predict the next action of a deterministic parser that builds labeled projective dependency graphs in an incremental fashion. Non-projective dependencies are captured indirectly by projectivizing the training data for the classifiers and applying an inverse transformation to the output of the parser. We present evaluation results and an error analysis focusing on Swedish and Turkish.", "phrases": ["dependency parsing", "non-projective dependency", "transformation"], "overall_score": 3.17653405831205, "scores": [2.155591434796885, 0.610731856119853, 0.5306927932261989], "rank_score": 1.0990053613809792} -{"id": "artzi-zettlemoyer-2013-weakly", "title": "Weakly Supervised Learning of Semantic Parsers for Mapping Instructions to Actions", "abstract": "The context in which language is used provides a strong signal for learning to recover its meaning. In this paper, we show it can be used within a grounded CCG semantic parsing approach that learns a joint model of meaning and context for interpreting and executing natural language instructions, using various types of weak supervision. The joint nature provides crucial benefits by allowing situated cues, such as the set of visible objects, to directly influence learning. It also enables algorithms that learn while executing instructions, for example by trying to replicate human actions. Experiments on a benchmark navigational dataset demonstrate strong performance under differing forms of supervision, including correctly executing 60% more instruction sets relative to the previous state of the art.", "phrases": ["mapping instruction", "semantic parsing", "weak supervision", "program"], "overall_score": 3.9967698871562183, "scores": [1.2982392011918336, 2.0196168527411293, 0.5523039409245536, 0.5248093190315042], "rank_score": 1.0987423284722553} -{"id": "maccartney-manning-2008-modeling", "title": "Modeling Semantic Containment and Exclusion in Natural Language Inference", "abstract": "We propose an approach to natural language inference based on a model of natural logic, which identifies valid inferences by their lexical and syntactic features, without full semantic interpretation. We greatly extend past work in natural logic, which has focused solely on semantic containment and monotonicity, to incorporate both semantic exclusion and implicativity. Our system decomposes an inference problem into a sequence of atomic edits linking premise to hypothesis; predicts a lexical entailment relation for each edit using a statistical classifier; propagates these relations upward through a syntax tree according to semantic properties of intermediate nodes; and composes the resulting entailment relations across the edit sequence. We evaluate our system on the FraCaS test suite, and achieve a 27% reduction in error from previous work. We also show that hybridizing an existing RTE system with our natural logic system yields significant gains on the RTE3 test suite.", "phrases": ["exclusion", "natural language inference", "atomic edit", "entailment"], "overall_score": 2.9742059197202346, "scores": [2.493630093390882, 0.7862835281284626, 0.5721829659903466, 0.5410360339013827], "rank_score": 1.0982831553527685} -{"id": "qazvinian-radev-2008-scientific", "title": "Scientific Paper Summarization Using Citation Summary Networks", "abstract": "Quickly moving to a new area of research is painful for researchers due to the vast amount of scientific literature in each field of study. One possible way to overcome this problem is to summarize a scientific topic. In this paper, we propose a model of summarizing a single article, which can be further used to summarize an entire topic. Our model is based on analyzing others' viewpoint of the target article's contributions and the study of its citation summary network using a clustering approach.", "phrases": ["scientific paper summarization", "citation network", "c-lexrank"], "overall_score": 3.872639162417862, "scores": [1.4462838629824404, 0.9785154149303669, 0.8697922438993814], "rank_score": 1.0981971739373961} -{"id": "komatani-etal-2006-multi", "title": "Multi-Domain Spoken Dialogue System with Extensibility and Robustness against Speech Recognition Errors", "abstract": "We developed a multi-domain spoken dialogue system that can handle user requests across multiple domains. Such systems need to satisfy two requirements: extensibility and robustness against speech recognition errors. Extensibility is required to allow for the modification and addition of domains independent of other domains. Robustness against speech recognition errors is required because such errors are inevitable in speech recognition. However, the systems should still behave appropriately, even when their inputs are erroneous. Our system was constructed on an extensible architecture and is equipped with a robust and extensible domain selection method. Domain selection was based on three choices: (I) the previous domain, (II) the domain in which the speech recognition result can be accepted with the highest recognition score, and (III) other domains. With the third choice we newly introduced, our system can prevent dialogues from continuously being stuck in an erroneous domain. Our experimental results, obtained with 10 subjects, showed that our method reduced the domain selection errors by 18.3%, compared to a conventional method.", "phrases": ["spoken dialogue system", "speech recognition error", "multiple domain"], "overall_score": 1.9676703883692845, "scores": [1.970258928517498, 0.8015036494188603, 0.5227706819609944], "rank_score": 1.0981777532991175} -{"id": "choi-etal-2017-coarse", "title": "Coarse-to-Fine Question Answering for Long Documents", "abstract": "We present a framework for question answering that can efficiently scale to longer documents while maintaining or even improving performance of state-of-the-art models. While most successful approaches for reading comprehension rely on recurrent neural networks (RNNs), running them over long documents is prohibitively slow because it is difficult to parallelize over sequences. Inspired by how people first skim the document, identify relevant parts, and carefully read these parts to produce an answer, we combine a coarse, fast model for selecting relevant sentences and a more expensive RNN for producing the answer from those sentences. We treat sentence selection as a latent variable trained jointly from the answer only using reinforcement learning. Experiments demonstrate state-of-the-art performance on a challenging subset of the WikiReading dataset and on a new dataset, while speeding up the model by 3.5x-6.7x.", "phrases": ["long document", "relevant sentence", "coarse-to-fine framework"], "overall_score": 3.697507502684248, "scores": [2.1831120868897314, 0.5750177390925668, 0.5360632203403652], "rank_score": 1.098064348774221} -{"id": "tran-etal-2020-revisiting", "title": "Revisiting Unsupervised Relation Extraction", "abstract": "Unsupervised relation extraction (URE) extracts relations between named entities from raw text without manually-labelled data and existing knowledge bases (KBs). URE methods can be categorised into generative and discriminative approaches, which rely either on hand-crafted features or surface form. However, we demonstrate that by using only named entities to induce relation types, we can outperform existing methods on two popular datasets. We conduct a comparison and evaluation of our findings with other URE techniques, to ascertain the important features in URE. We conclude that entity types provide a strong inductive bias for URE.", "phrases": ["unsupervised relation extraction", "important feature", "entity type"], "overall_score": 1.9670930761478511, "scores": [2.172569931679762, 0.5627648874406477, 0.5582318285200832], "rank_score": 1.0978555492134976} -{"id": "levy-etal-2014-context", "title": "Context Dependent Claim Detection", "abstract": "While discussing a concrete controversial topic, most humans will find it challenging to swiftly raise a diverse set of convincing and relevant claims that should set the basis of their arguments. Here, we formally define the challenging task of automatic claim detection in a given context and discuss its associated unique difficulties. Further, we outline a preliminary solution to this task, and assess its performance over annotated real world data, collected specifically for that purpose over hundreds of Wikipedia articles. We report promising results of a supervised learning approach, which is based on a cascade of classifiers designed to properly handle the skewed data which is inherent to the defined task. These results demonstrate the viability of the introduced task.", "phrases": ["claim", "detection", "wikipedia", "argumentation mining", "cdcs"], "overall_score": 3.6580976514224406, "scores": [2.318299826145018, 1.0955403593050679, 0.9347027813539371, 0.6129029484118296, 0.527559396811202], "rank_score": 1.0978010624054109} -{"id": "madnani-etal-2012-exploring", "title": "Exploring Grammatical Error Correction with Not-So-Crummy Machine Translation", "abstract": "To date, most work in grammatical error correction has focused on targeting specific error types. We present a probe study into whether we can use round-trip translations obtained from Google Translate via 8 different pivot languages for whole-sentence grammatical error correction. We develop a novel alignment algorithm for combining multiple round-trip translations into a lattice using the TERp machine translation metric. We further implement six different methods for extracting whole-sentence corrections from the lattice. Our preliminary experiments yield fairly satisfactory results but leave significant room for improvement. Most importantly, though, they make it clear the methods we propose have strong potential and require further study.", "phrases": ["grammatical error correction", "round-trip translation", "different pivot language"], "overall_score": 1.9668926605708086, "scores": [1.9023183241893002, 0.8466973888968722, 0.5442153723644989], "rank_score": 1.0977436951502237} -{"id": "he-etal-2008-improving", "title": "Improving Statistical Machine Translation using Lexicalized Rule Selection", "abstract": "This paper proposes a novel lexicalized approach for rule selection for syntax-based statistical machine translation (SMT). We build maximum entropy (MaxEnt) models which combine rich context information for selecting translation rules during decoding. We successfully integrate the MaxEnt-based rule selection models into the state-of-the-art syntax-based SMT model. Experiments show that our lexicalized approach for rule selection achieves statistically significant improvements over the state-of-the-art SMT system.", "phrases": ["statistical machine translation", "context information", "entropy model", "source-side"], "overall_score": 3.288499854888401, "scores": [2.0518966164169683, 1.249348634815532, 0.5482069596280007, 0.5414606673281559], "rank_score": 1.0977282195471643} -{"id": "roark-etal-2007-syntactic", "title": "Syntactic complexity measures for detecting Mild Cognitive Impairment", "abstract": "We consider the diagnostic utility of various syntactic complexity measures when extracted from spoken language samples of healthy and cognitively impaired subjects. We examine measures calculated from manually built parse trees, as well as the same measures calculated from automatic parses. We show statistically significant differences between clinical subject groups for a number of syntactic complexity measures, and these differences are preserved with automatic parsing. Different measures show different patterns for our data set, indicating that using multiple, complementary measures is important for such an application.", "phrases": ["mild cognitive impairment", "language sample", "syntactic complexity measure"], "overall_score": 2.4114548263671605, "scores": [1.8429400582895417, 0.9112985340577459, 0.5382625719497103], "rank_score": 1.0975003880989993} -{"id": "neelakantan-etal-2014-efficient", "title": "Efficient Non-parametric Estimation of Multiple Embeddings per Word in Vector Space", "abstract": "There is rising interest in vector-space word embeddings and their use in NLP, especially given recent methods for their fast estimation at very large scale. Nearly all this work, however, assumes a single vector per word type\u2014ignoring polysemy and thus jeopardizing their usefulness for downstream tasks. We present an extension to the Skip-gram model that efficiently learns multiple embeddings per word type. It differs from recent related work by jointly performing word sense discrimination and embedding learning, by non-parametrically estimating the number of senses per word type, and by its efficiency and scalability. We present new state-of-the-art results in the word similarity in context task and demonstrate its scalability by training with one machine on a corpus of nearly 1 billion tokens in less than 6 hours.", "phrases": ["vector space", "skip-gram model", "multi-prototype embedding", "polysemous word", "assumption"], "overall_score": 4.596796505642723, "scores": [1.3685031536433487, 2.0561780853602847, 0.9793328316697976, 0.5508106177867136, 0.5310647089790509], "rank_score": 1.097177879487839} -{"id": "erk-2007-simple", "title": "A Simple, Similarity-based Model for Selectional Preferences", "abstract": "We propose a new, simple model for the automatic induction of selectional preferences, using corpus-based semantic similarity metrics. Focusing on the task of semantic role labeling, we compute selectional preferences for semantic roles. In evaluations the similarity-based model shows lower error rates than both Resnik\u2019s WordNet-based model and the EM-based clustering model, but has coverage problems.", "phrases": ["similarity-based model", "selectional preferences", "simple model", "induction"], "overall_score": 3.4400464033015106, "scores": [1.8477963722879336, 0.9371947899556318, 1.0577177418948889, 0.5458131806099775], "rank_score": 1.097130521187108} -{"id": "nogueira-dos-santos-etal-2018-fighting", "title": "Fighting Offensive Language on Social Media with Unsupervised Text Style Transfer", "abstract": "We introduce a new approach to tackle the problem of offensive language in online social media. Our approach uses unsupervised text style transfer to translate offensive sentences into non-offensive ones. We propose a new method for training encoder-decoders using non-parallel data that combines a collaborative classifier, attention and the cycle consistency loss. Experimental results on data from Twitter and Reddit show that our method outperforms a state-of-the-art text style transfer system in two out of three quantitative metrics and produces reliable non-offensive transferred sentences.", "phrases": ["offensive language", "style transfer", "social medium"], "overall_score": 3.170470809309214, "scores": [1.8731569181647758, 0.8807247262272007, 0.5368412191786186], "rank_score": 1.0969076211901985} -{"id": "baroni-lenci-2011-blessed", "title": "How we BLESSed distributional semantic evaluation", "abstract": "We introduce BLESS, a data set specifically designed for the evaluation of distributional semantic models. BLESS contains a set of tuples instantiating different, explicitly typed semantic relations, plus a number of controlled random tuples. It is thus possible to assess the ability of a model to detect truly related word pairs, as well as to perform in-depth analyses of the types of semantic relations that a model favors. We discuss the motivations for BLESS, describe its construction and structure, and present examples of its usage in the evaluation of distributional semantic models.", "phrases": ["bless", "word pair", "hypernymy"], "overall_score": 3.169466356352122, "scores": [1.2755957475014126, 1.1197284344920515, 0.8943561309560754], "rank_score": 1.0965601043165132} -{"id": "kanojia-etal-2019-utilizing", "title": "Utilizing Wordnets for Cognate Detection among Indian Languages", "abstract": "Automatic Cognate Detection (ACD) is a challenging task which has been utilized to help NLP applications like Machine Translation, Information Retrieval and Computational Phylogenetics. Unidentified cognate pairs can pose a challenge to these applications and result in a degradation of performance. In this paper, we detect cognate word pairs among ten Indian languages with Hindi and use deep learning methodologies to predict whether a word pair is cognate or not. We identify IndoWordnet as a potential resource to detect cognate word pairs based on orthographic similarity-based methods and train neural network models using the data obtained from it. We identify parallel corpora as another potential resource and perform the same experiments for them. We also validate the contribution of Wordnets through further experimentation and report improved performance of up to 26%. We discuss the nuances of cognate detection among closely related Indian languages and release the lists of detected cognates as a dataset. We also observe the behaviour of, to an extent, unrelated Indian language pairs and release the lists of detected cognates among them as well.", "phrases": ["wordnet", "cognate detection", "indian language"], "overall_score": 1.2046451339032243, "scores": [1.6483060887818193, 0.829576554448176, 0.8116631219015662], "rank_score": 1.0965152550438537} -{"id": "pavlick-etal-2015-framenet", "title": "FrameNet+: Fast Paraphrastic Tripling of FrameNet", "abstract": "We increase the lexical coverage of FrameNet through automatic paraphrasing. We use crowdsourcing to manually filter out bad paraphrases in order to ensure a high-precision resource. Our expanded FrameNet contains an additional 22K lexical units, a 3-fold increase over the current FrameNet, and achieves 40% better coverage when evaluated in a practical setting on New York Times data.", "phrases": ["framenet", "paraphrasing", "crowdsourcing", "lexical unit"], "overall_score": 2.409034210571428, "scores": [2.2985890774296656, 0.990520236962177, 0.5631379600076609, 0.533347599096757], "rank_score": 1.096398718374065} -{"id": "zhu-etal-2020-babywalk", "title": "BabyWalk: Going Farther in Vision-and-Language Navigation by Taking Baby Steps", "abstract": "Learning to follow instructions is of fundamental importance to autonomous agents for vision-and-language navigation (VLN). In this paper, we study how an agent can navigate long paths when learning from a corpus that consists of shorter ones. We show that existing state-of-the-art agents do not generalize well. To this end, we propose BabyWalk, a new VLN agent that is learned to navigate by decomposing long instructions into shorter ones (BabySteps) and completing them sequentially. A special design memory buffer is used by the agent to turn its past experiences into contexts for future steps. The learning process is composed of two phases. In the first phase, the agent uses imitation learning from demonstration to accomplish BabySteps. In the second phase, the agent uses curriculum-based reinforcement learning to maximize rewards on navigation tasks with increasingly longer instructions. We create two new benchmark datasets (of long navigation tasks) and use them in conjunction with existing ones to examine BabyWalk's generalization ability. Empirical results show that BabyWalk achieves state-of-the-art results on several metrics, in particular, is able to follow long instructions better. The codes and the datasets are released on our project page: .", "phrases": ["vision-and-language navigation", "long instruction", "babywalk"], "overall_score": 2.4088763556813895, "scores": [1.8192163189681008, 0.9427591487009788, 0.5270051588835738], "rank_score": 1.096326875517551} -{"id": "nagata-etal-2020-supervised", "title": "A Supervised Word Alignment Method based on Cross-Language Span Prediction using Multilingual BERT", "abstract": "We present a novel supervised word alignment method based on cross-language span prediction. We first formalize a word alignment problem as a collection of independent predictions from a token in the source sentence to a span in the target sentence. Since this step is equivalent to a SQuAD v2.0 style question answering task, we solve it using the multilingual BERT, which is fine-tuned on manually created gold word alignment data. It is nontrivial to obtain accurate alignment from a set of independently predicted spans. We greatly improved the word alignment accuracy by adding to the question the source token's context and symmetrizing two directional predictions. In experiments using five word alignment datasets from among Chinese, Japanese, German, Romanian, French, and English, we show that our proposed method significantly outperformed previous supervised and unsupervised word alignment methods without any bitexts for pretraining. For example, we achieved 86.7 F1 score for the Chinese-English data, which is 13.3 points higher than the previous state-of-the-art supervised method.", "phrases": ["cross-language span prediction", "multilingual bert", "neural word alignment"], "overall_score": 2.279519277280634, "scores": [1.7861187661673865, 0.9571188481441318, 0.5454135426320462], "rank_score": 1.0962170523145216} -{"id": "nguyen-etal-2017-hierarchical", "title": "Hierarchical Embeddings for Hypernymy Detection and Directionality", "abstract": "We present a novel neural model HyperVec to learn hierarchical embeddings for hypernymy detection and directionality. While previous embeddings have shown limitations on prototypical hypernyms, HyperVec represents an unsupervised measure where embeddings are learned in a specific order and capture the hypernym\u2013hyponym distributional hierarchy. Moreover, our model is able to generalize over unseen hypernymy pairs, when using only small sets of training data, and by mapping to other languages. Results on benchmark datasets show that HyperVec outperforms both state-of-the-art unsupervised measures and embedding models on hypernymy detection and directionality, and on predicting graded lexical entailment.", "phrases": ["hypernymy detection", "directionality", "word embedding"], "overall_score": 3.1683270633228786, "scores": [1.9286838938678557, 0.7798392262968995, 0.579974687773658], "rank_score": 1.096165935979471} -{"id": "jansen-etal-2018-worldtree", "title": "WorldTree: A Corpus of Explanation Graphs for Elementary Science Questions supporting Multi-hop Inference", "abstract": "Developing methods of automated inference that are able to provide users with compelling human-readable justifications for why the answer to a question is correct is critical for domains such as science and medicine, where user trust and detecting costly errors are limiting factors to adoption. One of the central barriers to training question answering models on explainable inference tasks is the lack of gold explanations to serve as training data. In this paper we present a corpus of explanations for standardized science exams, a recent challenge task for question answering. We manually construct a corpus of detailed explanations for nearly all publicly available standardized elementary science question (approximately 1,680 3rd through 5th grade questions) and represent these as \"explanation graphs\" -- sets of lexically overlapping sentences that describe how to arrive at the correct answer to a question through a combination of domain and world knowledge. We also provide an explanation-centered tablestore, a collection of semi-structured tables that contain the knowledge to construct these elementary science explanations. Together, these two knowledge resources map out a substantial portion of the knowledge required for answering and explaining elementary science exams, and provide both structured and free-text training data for the explainable inference task.", "phrases": ["explanation", "semi-structured table", "worldtree", "semantic drift", "science exam question"], "overall_score": 3.3880212094443745, "scores": [1.636645926000502, 1.8588366674191943, 0.8379038235262557, 0.5824748523797276, 0.5645248149278715], "rank_score": 1.0960772168507102} -{"id": "kiela-etal-2018-dynamic", "title": "Dynamic Meta-Embeddings for Improved Sentence Representations", "abstract": "While one of the first steps in many NLP systems is selecting what pre-trained word embeddings to use, we argue that such a step is better left for neural networks to figure out by themselves. To that end, we introduce dynamic meta-embeddings, a simple yet effective method for the supervised learning of embedding ensembles, which leads to state-of-the-art performance within the same model class on a variety of tasks. We subsequently show how the technique can be used to shed new light on the usage of word embeddings in NLP systems.", "phrases": ["meta-embedding", "ensemble", "natural language inference"], "overall_score": 3.0389672811405153, "scores": [1.8955571720083497, 0.8325637341967219, 0.5601063632388569], "rank_score": 1.0960757564813095} -{"id": "dong-etal-2020-multi", "title": "Multi-Fact Correction in Abstractive Text Summarization", "abstract": "Pre-trained neural abstractive summarization systems have dominated extractive strategies on news summarization performance, at least in terms of ROUGE. However, system-generated abstractive summaries often face the pitfall of factual inconsistency: generating incorrect facts with respect to the source text. To address this challenge, we propose Span-Fact, a suite of two factual correction models that leverages knowledge learned from question answering models to make corrections in system-generated summaries via span selection. Our models employ single or multi-masking strategies to either iteratively or auto-regressively replace entities in order to ensure semantic consistency w.r.t. the source text, while retaining the syntactic structure of summaries generated by abstractive summarization models. Experiments show that our models significantly boost the factual consistency of system-generated summaries without sacrificing summary quality in terms of both automatic metrics and human evaluation.", "phrases": ["correction", "abstractive summarization system", "consistency"], "overall_score": 2.626776697432872, "scores": [2.130504640832128, 0.6309295834870791, 0.5249186792845367], "rank_score": 1.0954509678679145} -{"id": "agarwal-etal-2020-history", "title": "History for Visual Dialog: Do we really need it?", "abstract": "Visual Dialogue involves \u201cunderstanding\u201d the dialogue history (what has been discussed previously) and the current question (what is asked), in addition to grounding information in the image, to accurately generate the correct response. In this paper, we show that co-attention models which explicitly encode dialoh history outperform models that don't, achieving state-of-the-art performance (72 % NDCG on val set). However, we also expose shortcomings of the crowdsourcing dataset collection procedure, by showing that dialogue history is indeed only required for a small amount of the data, and that the current evaluation metric encourages generic replies. To that end, we propose a challenging subset (VisdialConv) of the VisdialVal set and the benchmark NDCG of 63%.", "phrases": ["visual dialog", "dialogue history", "history"], "overall_score": 2.4069387670760927, "scores": [1.4860242446242322, 1.239948115363265, 0.5603627628347466], "rank_score": 1.095445040940748} -{"id": "freitag-etal-2019-ape", "title": "APE at Scale and Its Implications on MT Evaluation Biases", "abstract": "In this work, we train an Automatic Post-Editing (APE) model and use it to reveal biases in standard MT evaluation procedures. The goal of our APE model is to correct typical errors introduced by the translation process, and convert the \u201ctranslationese\u201d output into natural text. Our APE model is trained entirely on monolingual data that has been round-trip translated through English, to mimic errors that are similar to the ones introduced by NMT. We apply our model to the output of existing NMT systems, and demonstrate that, while the human-judged quality improves in all cases, BLEU scores drop with forward-translated test sets. We verify these results for the WMT18 English to German, WMT15 English to French, and WMT16 English to Romanian tasks. Furthermore, we selectively apply our APE model on the output of the top submissions of the most recent WMT evaluation campaigns. We see quality improvements on all tasks of up to 2.5 BLEU points.", "phrases": ["monolingual data", "bleu score", "test set", "ape"], "overall_score": 2.8091753954602785, "scores": [2.2560059218570645, 1.045513600382118, 0.5474552369971251, 0.531892078993116], "rank_score": 1.095216709557356} -{"id": "jain-etal-2018-mixed", "title": "A Mixed Hierarchical Attention Based Encoder-Decoder Approach for Standard Table Summarization", "abstract": "Structured data summarization involves generation of natural language summaries from structured input data. In this work, we consider summarizing structured data occurring in the form of tables as they are prevalent across a wide variety of domains. We formulate the standard table summarization problem, which deals with tables conforming to a single predefined schema. To this end, we propose a mixed hierarchical attention based encoder-decoder model which is able to leverage the structure in addition to the content of the tables. Our experiments on the publicly available weathergov dataset show around 18 BLEU (around 30%) improvement over the current state-of-the-art.", "phrases": ["mixed hierarchical attention", "table", "encoder-decoder model"], "overall_score": 1.7626542868635373, "scores": [2.2222386817484234, 0.533667871360266, 0.5296895048300361], "rank_score": 1.0951986859795753} -{"id": "fu-etal-2014-learning", "title": "Learning Semantic Hierarchies via Word Embeddings", "abstract": "Semantic hierarchy construction aims to build structures of concepts linked by hypernym\u2010hyponym (\u201cis-a\u201d) relations. A major challenge for this task is the automatic discovery of such relations. This paper proposes a novel and effective method for the construction of semantic hierarchies based on word embeddings, which can be used to measure the semantic relationship between words. We identify whether a candidate word pair has hypernym\u2010hyponym relation by using the word-embedding-based semantic projections between words and their hypernyms. Our result, an F-score of 73.74%, outperforms the state-of-theart methods on a manually labeled test dataset. Moreover, combining our method with a previous manually-built hierarchy extension method can further improve Fscore to 80.29%.", "phrases": ["word embedding", "hypernym", "projection learning", "chinese thesaurus", "cluster"], "overall_score": 3.8614779980912224, "scores": [2.0939840274451784, 1.4231008138491616, 0.8454948047088049, 0.5712533005525909, 0.541327592384817], "rank_score": 1.0950321077881104} -{"id": "eck-etal-2005-low-cost", "title": "Low Cost Portability for Statistical Machine Translation based on N-gram Coverage", "abstract": "Statistical machine translation relies heavily on the available training data. However, in some cases, it is necessary to limit the amount of training data that can be created for or actually used by the systems. To solve that problem, we introduce a weighting scheme that tries to select more informative sentences first. This selection is based on the previously unseen n-grams the sentences contain, and it allows us to sort the sentences according to their estimated importance. After sorting, we can construct smaller training corpora, and we are able to demonstrate that systems trained on much less training data show a very competitive performance compared to baseline systems using all available training data.", "phrases": ["statistical machine translation", "n-gram coverage", "sentence pair"], "overall_score": 3.0359673054168144, "scores": [1.8083155775725996, 0.9325699295684113, 0.5440957247283892], "rank_score": 1.0949937439564668} -{"id": "giulianelli-etal-2018-hood", "title": "Under the Hood: Using Diagnostic Classifiers to Investigate and Improve how Language Models Track Agreement Information", "abstract": "How do neural language models keep track of number agreement between subject and verb? We show that `diagnostic classifiers', trained to predict number from the internal states of a language model, provide a detailed understanding of how, when, and where this information is represented. Moreover, they give us insight into when and where number information is corrupted in cases where the language model ends up making agreement errors. To demonstrate the causal role played by the representations we find, we then use agreement information to influence the course of the LSTM during the processing of difficult sentences. Results from such an intervention reveal a large increase in the language model's accuracy. Together, these results show that diagnostic classifiers give us an unrivalled detailed look into the representation of linguistic information in neural models, and demonstrate that this knowledge can be used to improve their performance.", "phrases": ["diagnostic classifier", "language model", "track", "agreement", "subject-verb agreement"], "overall_score": 3.2236785061667472, "scores": [1.3414659689454806, 1.332400202582535, 1.1859587720016544, 1.0687678786521986, 0.5455883868295656], "rank_score": 1.0948362418022868} -{"id": "berzak-etal-2015-contrastive", "title": "Contrastive Analysis with Predictive Power: Typology Driven Estimation of Grammatical Error Distributions in ESL", "abstract": "This work examines the impact of crosslinguistic transfer on grammatical errors in English as Second Language (ESL) texts. Using a computational framework that formalizes the theory of Contrastive Analysis (CA), we demonstrate that language specific error distributions in ESL writing can be predicted from the typological properties of the native language and their relation to the typology of English. Our typology driven model enables to obtain accurate estimates of such distributions without access to any ESL data for the target languages. Furthermore, we present a strategy for adjusting our method to low-resource languages that lack typological documentation using a bootstrapping approach which approximates native language typology from ESL texts. Finally, we show that our framework is instrumental for linguistic inquiry seeking to identify first language factors that contribute to a wide range of difficulties in second language acquisition.", "phrases": ["esl", "native language", "contrastive analysis"], "overall_score": 1.5176016309339349, "scores": [1.9171136784930671, 0.813893129086185, 0.553147712910834], "rank_score": 1.0947181734966953} -{"id": "kao-jurafsky-2012-computational", "title": "A Computational Analysis of Style, Affect, and Imagery in Contemporary Poetry", "abstract": "What makes a poem beautiful? We use computational methods to compare the stylistic and content features employed by awardwinning poets and amateur poets. Building upon existing techniques designed to quantitatively analyze style and affect in texts, we examined elements of poetic craft such as diction, sound devices, emotive language, and imagery. Results showed that the most important indicator of high-quality poetry we could detect was the frequency of references to concrete objects. This result highlights the influence of Imagism in contemporary professional poetry, and suggests that concreteness may be one of the most appealing features of poetry to the modern aesthetic. We also report on other features that characterize high-quality poetry and argue that methods from computational linguistics may provide important insights into the analysis of beauty in verbal art.", "phrases": ["style", "contemporary poetry", "imagism"], "overall_score": 2.275530889210399, "scores": [1.8083730891449554, 0.9318617733851454, 0.5426622667233956], "rank_score": 1.0942990430844988} -{"id": "pimentel-etal-2020-information", "title": "Information-Theoretic Probing for Linguistic Structure", "abstract": "The success of neural networks on a diverse set of NLP tasks has led researchers to question how much these networks actually \u201cknow\u201d about natural language. Probes are a natural way of assessing this. When probing, a researcher chooses a linguistic task and trains a supervised model to predict annotations in that linguistic task from the network's learned representations. If the probe does well, the researcher may conclude that the representations encode knowledge related to the task. A commonly held belief is that using simpler models as probes is better; the logic is that simpler models will identify linguistic structure, but not learn the task itself. We propose an information-theoretic operationalization of probing as estimating mutual information that contradicts this received wisdom: one should always select the highest performing probe one can, even if it is more complex, since it will result in a tighter estimate, and thus reveal more of the linguistic information inherent in the representation. The experimental portion of our paper focuses on empirically estimating the mutual information between a linguistic property and BERT, comparing these estimates to several baselines. We evaluate on a set of ten typologically diverse languages often underrepresented in NLP research\u2014plus English\u2014totalling eleven languages. Our implementation is available in .", "phrases": ["probing", "linguistic structure", "estimate", "bert", "diagnostic classifier"], "overall_score": 3.7215566043153587, "scores": [2.5358576112384728, 0.894603970489212, 0.8919659580484443, 0.5828153688910591, 0.5657077400373648], "rank_score": 1.0941901297409105} -{"id": "baldwin-etal-2015-shared", "title": "Shared Tasks of the 2015 Workshop on Noisy User-generated Text: Twitter Lexical Normalization and Named Entity Recognition", "abstract": "This paper presents the results of the two shared tasks associated with W-NUT 2015: (1) a text normalization task with 10 participants; and (2) a named entity tagging task with 8 participants. We outline the task, annotation process and dataset statistics, and provide a high-level overview of the participating systems for each shared task.", "phrases": ["workshop", "noisy user-generated text", "lexical normalization", "named entity recognition", "english tweet"], "overall_score": 3.825107605435663, "scores": [1.8541927201546586, 1.251406683254217, 0.8751101232243342, 0.9151467259534716, 0.5740412639960708], "rank_score": 1.0939795033165505} -{"id": "herzig-etal-2011-annotation", "title": "An Annotation Scheme for Automated Bias Detection in Wikipedia", "abstract": "BiasML is a novel annotation scheme with the purpose of identifying the presence as well as nuances of biased language within the subset of Wikipedia articles dedicated to service providers. Whereas Wikipedia currently uses only manual flagging to detect possible bias, our scheme provides a foundation for the automating of bias flagging by improving upon the methodology of annotation schemes in classic sentiment analysis. We also address challenges unique to the task of identifying biased writing within the specific context of Wikipedia's neutrality policy. We perform a detailed analysis of inter-annotator agreement, which shows that although the agreement scores for intra-sentential tags were relatively low, the agreement scores on the sentence and entry levels were encouraging (74.8% and 66.7%, respectively). Based on an analysis of our first implementation of our scheme, we suggest possible improvements to our guidelines, in hope that further rounds of annotation after incorporating them could provide appropriate data for use within a machine learning framework for automated detection of bias within Wikipedia.", "phrases": ["annotation scheme", "wikipedia", "biased language"], "overall_score": 1.2015887436806683, "scores": [1.7877516351743916, 0.9270387259432226, 0.5664092651965991], "rank_score": 1.0937332087714045} -{"id": "luu-malamud-2020-non", "title": "Non-Topical Coherence in Social Talk: A Call for Dialogue Model Enrichment", "abstract": "Current models of dialogue mainly focus on utterances within a topically coherent discourse segment, rather than new-topic utterances (NTUs), which begin a new topic not correlating with the content of prior discourse. As a result, these models may sufficiently account for discourse context of task-oriented but not social conversations. We conduct a pilot annotation study of NTUs as a first step towards a model capable of rationalizing conversational coherence in social talk. We start with the naturally occurring social dialogues in the Disco-SPICE corpus, annotated with discourse relations in the Penn Discourse Treebank and Cognitive approach to Coherence Relations frameworks. We first annotate content-based coherence relations that are not available in Disco-SPICE, and then heuristically identify NTUs, which lack a coherence relation to prior discourse. Based on the interaction between NTUs and their discourse context, we construct a classification for NTUs that actually convey certain non-topical coherence in social talk. This classification introduces new sequence-based social intents that traditional taxonomies of speech acts do not capture. The new findings advocates the development of a Bayesian game-theoretic model for social talk.", "phrases": ["social talk", "new-topic utterance", "prior discourse", "non-topical coherence"], "overall_score": 1.51582540777707, "scores": [2.2926769307696877, 0.9184881753849019, 0.5847033309170621, 0.5778791602352881], "rank_score": 1.093436899326735} -{"id": "quirk-2004-training", "title": "Training a Sentence-Level Machine Translation Confidence Measure", "abstract": "We present a supervised method for training a sentence level confidence measure on translation output using a human-annotated corpus. We evaluate a variety of machine learning methods. The resultant measure, while trained on a very small dataset, correlates well with human judgments, and proves to be effective on one task based evaluation. Although the experiments have only been run on one MT system, we believe the nature of the features gathered are general enough that the approach will also work well on other systems.", "phrases": ["translation output", "confidence estimation", "small set"], "overall_score": 3.822649723962659, "scores": [1.3609790851721668, 1.3328051230072577, 0.5860454419181059], "rank_score": 1.0932765500325103} -{"id": "hovy-etal-2013-events", "title": "Events are Not Simple: Identity, Non-Identity, and Quasi-Identity", "abstract": "Despite considerable theoretical and computational work on coreference, deciding when two entities or events are identical is very difficult. In a project to build corpora containing coreference links between events, we have identified three levels of event identity (full, partial, and none). Event coreference annotation on two corpora was performed to validate the findings.", "phrases": ["identity", "coreference", "event relation"], "overall_score": 3.0310252023393134, "scores": [1.4410617074466832, 0.9435940464582098, 0.8949780172634032], "rank_score": 1.0932112570560986} -{"id": "li-etal-2015-hierarchical", "title": "A Hierarchical Neural Autoencoder for Paragraphs and Documents", "abstract": "Natural language generation of coherent long texts like paragraphs or longer documents is a challenging problem for recurrent networks models. In this paper, we explore an important step toward this generation task: training an LSTM (Longshort term memory) auto-encoder to preserve and reconstruct multi-sentence paragraphs. We introduce an LSTM model that hierarchically builds an embedding for a paragraph from embeddings for sentences and words, then decodes this embedding to reconstruct the original paragraph. We evaluate the reconstructed paragraph using standard metrics like ROUGE and Entity Grid, showing that neural models are able to encode texts in a way that preserve syntactic, semantic, and discourse coherence. While only a first step toward generating coherent text units from neural models, our work has the potential to significantly impact natural language generation and summarization1.", "phrases": ["autoencoder", "paragraph", "natural language generation"], "overall_score": 3.6026220446948596, "scores": [1.5389273011022992, 1.2076344947190836, 0.5326861079704613], "rank_score": 1.0930826345972815} -{"id": "roller-etal-2012-supervised", "title": "Supervised Text-based Geolocation Using Language Models on an Adaptive Grid", "abstract": "The geographical properties of words have recently begun to be exploited for geolocating documents based solely on their text, often in the context of social media and online content. One common approach for geolocating texts is rooted in information retrieval. Given training documents labeled with latitude/longitude coordinates, a grid is overlaid on the Earth and pseudo-documents constructed by concatenating the documents within a given grid cell; then a location for a test document is chosen based on the most similar pseudo-document. Uniform grids are normally used, but they are sensitive to the dispersion of documents over the earth. We define an alternative grid construction using k-d trees that more robustly adapts to data, especially with larger training sets. We also provide a better way of choosing the locations for pseudo-documents. We evaluate these strategies on existing Wikipedia and Twitter corpora, as well as a new, larger Twitter corpus. The adaptive grid achieves competitive results with a uniform grid on small training sets and outperforms it on the large Twitter corpus. The two grid constructions can also be combined to produce consistently strong results across all training sets.", "phrases": ["adaptive grid", "location", "similar pseudo-document", "geolocation prediction"], "overall_score": 2.6208650710689434, "scores": [2.129694870919231, 1.1201299316355993, 0.5766248018513357, 0.5454929083464685], "rank_score": 1.0929856281881587} -{"id": "erk-etal-2010-flexible", "title": "A Flexible, Corpus-Driven Model of Regular and Inverse Selectional Preferences", "abstract": "We present a vector space\u2013based model for selectional preferences that predicts plausibility scores for argument headwords. It does not require any lexical resources (such as WordNet). It can be trained either on one corpus with syntactic annotation, or on a combination of a small semantically annotated primary corpus and a large, syntactically analyzed generalization corpus. Our model is able to predict inverse selectional preferences, that is, plausibility scores for predicates given argument heads. We evaluate our model on one NLP task (pseudo-disambiguation) and one cognitive task (prediction of human plausibility judgments), gauging the influence of different parameters and comparing our model against other model classes. We obtain consistent benefits from using the disambiguation and semantic role information provided by a semantically tagged primary corpus. As for parameters, we identify settings that yield good performance across a range of experimental conditions. However, frequency remains a major influence of prediction quality, and we also identify more robust parameter settings suitable for applications with many infrequent items.", "phrases": ["selectional preference", "plausibility", "predicate", "distributional similarity metric", "thematic fit"], "overall_score": 3.426802693098787, "scores": [2.012022825319324, 0.8856600033895343, 0.8713410989933295, 0.8629008111448815, 0.8326088515670229], "rank_score": 1.0929067180828185} -{"id": "neves-etal-2016-scielo", "title": "The Scielo Corpus: a Parallel Corpus of Scientific Publications for Biomedicine", "abstract": "The biomedical scientific literature is a rich source of information not only in the English language, for which it is more abundant, but also in other languages, such as Portuguese, Spanish and French. We present the first freely available parallel corpus of scientific publications for the biomedical domain. Documents from the \u201dBiological Sciences\u201d and \u201dHealth Sciences\u201d categories were retrieved from the Scielo database and parallel titles and abstracts are available for the following language pairs: Portuguese/English (about 86,000 documents in total), Spanish/English (about 95,000 documents) and French/English (about 2,000 documents). Additionally, monolingual data was also collected for all four languages. Sentences in the parallel corpus were automatically aligned and a manual analysis of 200 documents by native experts found that a minimum of 79% of sentences were correctly aligned in all language pairs. We demonstrate the utility of the corpus by running baseline machine translation experiments. We show that for all language pairs, a statistical machine translation system trained on the parallel corpora achieves performance that rivals or exceeds the state of the art in the biomedical domain. Furthermore, the corpora are currently being used in the biomedical task in the First Conference on Machine Translation (WMT'16).", "phrases": ["scielo corpus", "parallel corpus", "scientific publication"], "overall_score": 1.758928746881549, "scores": [1.756307250923198, 0.9428195715835099, 0.579524811009294], "rank_score": 1.0928838778386674} -{"id": "bogin-etal-2019-global", "title": "Global Reasoning over Database Structures for Text-to-SQL Parsing", "abstract": "State-of-the-art semantic parsers rely on auto-regressive decoding, emitting one symbol at a time. When tested against complex databases that are unobserved at training time (zero-shot), the parser often struggles to select the correct set of database constants in the new database, due to the local nature of decoding. %since their decisions are based on weak, local information only. In this work, we propose a semantic parser that globally reasons about the structure of the output query to make a more contextually-informed selection of database constants. We use message-passing through a graph neural network to softly select a subset of database constants for the output query, conditioned on the question. Moreover, we train a model to rank queries based on the global alignment of database constants to question words. We apply our techniques to the current state-of-the-art model for Spider, a zero-shot semantic parsing dataset with complex databases, increasing accuracy from 39.4% to 47.4%.", "phrases": ["database", "text-to-sql", "global reasoning"], "overall_score": 2.803109293507632, "scores": [1.4595456195903436, 1.2808494016485719, 0.5381601112668539], "rank_score": 1.0928517108352565} -{"id": "lai-etal-2017-natural", "title": "Natural Language Inference from Multiple Premises", "abstract": "We define a novel textual entailment task that requires inference over multiple premise sentences. We present a new dataset for this task that minimizes trivial lexical inferences, emphasizes knowledge of everyday events, and presents a more challenging setting for textual entailment. We evaluate several strong neural baselines and analyze how the multiple premise task differs from standard textual entailment.", "phrases": ["premise", "entailment", "natural language inference"], "overall_score": 2.4005542113346543, "scores": [1.4049580543289744, 0.9869177405594752, 0.8857421183131301], "rank_score": 1.0925393044005265} -{"id": "nopp-hanbury-2015-detecting", "title": "Detecting Risks in the Banking System by Sentiment Analysis", "abstract": "In November 2014, the European Central Bank (ECB) started to directly supervise the largest banks in the Eurozone via the Single Supervisory Mechanism (SSM). While supervisory risk assessments are usually based on quantitative data and surveys, this work explores whether sentiment analysis is capable of measuring a bank\u2019s attitude and opinions towards risk by analyzing text data. For realizing this study, a collection consisting of more than 500 CEO letters and outlook sections extracted from bank annual reports is built up. Based on these data, two distinct experiments are conducted. The evaluations find promising opportunities, but also limitations for risk sentiment analysis in banking supervision. At the level of individual banks, predictions are relatively inaccurate. In contrast, the analysis of aggregated figures revealed strong and significant correlations between uncertainty or negativity in textual disclosures and the quantitative risk indicator\u2019s future evolution. Risk sentiment analysis should therefore rather be used for macroprudential analyses than for assessments of individual banks.", "phrases": ["bank", "sentiment analysis", "textual disclosure"], "overall_score": 1.5144620786197063, "scores": [1.8915500062262773, 0.8614197958123729, 0.5243905936199117], "rank_score": 1.0924534652195208} -{"id": "kobayashi-etal-2020-efficient", "title": "Efficient Estimation of Influence of a Training Instance", "abstract": "Understanding the influence of a training instance on a neural network model leads to improving interpretability. However, it is difficult and inefficient to evaluate the influence, which shows how a model's prediction would be changed if a training instance were not used. In this paper, we propose an efficient method for estimating the influence. Our method is inspired by dropout, which zero-masks a sub-network and prevents the sub-network from learning each training instance. By switching between dropout masks, we can use sub-networks that learned or did not learn each training instance and estimate its influence. Through experiments with BERT and VGGNet on classification datasets, we demonstrate that the proposed method can capture training influences, enhance the interpretability of error predictions, and cleanse the training dataset for improving generalization.", "phrases": ["influence", "training instance", "dropout mask"], "overall_score": 1.1997473543911743, "scores": [1.8575871690552326, 0.8799003254070351, 0.5386838175635826], "rank_score": 1.0920571040086167} -{"id": "bonafonte-etal-2006-tc", "title": "TC-STAR:Specifications of Language Resources and Evaluation for Speech Synthesis", "abstract": "In the framework of the EU funded project TC-STAR (Technology and Corpora for Speech to Speech Translation),research on TTS aims on providing a synthesized voice sounding like the source speaker speaking the target language. To progress in this direction, research is focused on naturalness, intelligibility, expressivity and voice conversion both, in the TC-STAR framework. For this purpose, specifications on large, high quality TTS databases have been developed and the data have been recorded for UK English, Spanish and Mandarin. The development of speech technology in TC-STAR is evaluation driven. Assessment of speech synthesis is needed to determine how well a system or technique performs in comparison to previous versions as well as other approaches (systems & methods). Apart from testing the whole system, all components of the system will be evaluated separately. This approach grants better assesment of each component as well as identification of the best techniques in the different speech synthesisprocesses.This paper describes the specifications of Language Resources for speech synthesis and the specifications for evaluation of speech synthesis activities.", "phrases": ["specification", "language resources", "speech synthesis", "tc-star"], "overall_score": 1.757457519814246, "scores": [1.744262943007075, 0.9266978789135593, 0.8156410546264857, 0.8812771357132086], "rank_score": 1.0919697530650823} -{"id": "xu-etal-2013-mining", "title": "Mining Opinion Words and Opinion Targets in a Two-Stage Framework", "abstract": "This paper proposes a novel two-stage method for mining opinion words and opinion targets. In the first stage, we propose a Sentiment Graph Walking algorithm, which naturally incorporates syntactic patterns in a Sentiment Graph to extract opinion word/target candidates. Then random walking is employed to estimate confidence of candidates, which improves extraction accuracy by considering confidence of patterns. In the second stage, we adopt a self-learning strategy to refine the results from the first stage, especially for filtering out high-frequency noise terms and capturing the long-tail terms, which are not investigated by previous methods. The experimental results on three real world datasets demonstrate the effectiveness of our approach compared with stateof-the-art unsupervised methods.", "phrases": ["opinion target", "two-stage framework", "syntactic pattern"], "overall_score": 1.955648977268321, "scores": [1.9317377308509598, 0.8143600895168238, 0.5283076076848023], "rank_score": 1.0914684760175286} -{"id": "kim-etal-2021-self", "title": "Self-Guided Contrastive Learning for BERT Sentence Representations", "abstract": "Although BERT and its variants have reshaped the NLP landscape, it still remains unclear how best to derive sentence embeddings from such pre-trained Transformers. In this work, we propose a contrastive learning method that utilizes self-guidance for improving the quality of BERT sentence representations. Our method fine-tunes BERT in a self-supervised fashion, does not rely on data augmentation, and enables the usual [CLS] token embeddings to function as sentence vectors. Moreover, we redesign the contrastive learning objective (NT-Xent) and apply it to sentence representation learning. We demonstrate with extensive experiments that our approach is more effective than competitive baselines on diverse sentence-related tasks. We also show it is efficient at inference and robust to domain shifts.", "phrases": ["contrastive learning", "sentence representation", "self-guidance"], "overall_score": 2.711817034868859, "scores": [2.14003307975698, 0.5747048208619402, 0.559208468875745], "rank_score": 1.091315456498222} -{"id": "severyn-etal-2014-opinion", "title": "Opinion Mining on YouTube", "abstract": "This paper defines a systematic approach to Opinion Mining (OM) on YouTube comments by (i) modeling classifiers for predicting the opinion polarity and the type of comment and (ii) proposing robust shallow syntactic structures for improving model adaptability. We rely on the tree kernel technology to automatically extract and learn features with better generalization power than bag-of-words. An extensive empirical evaluation on our manually annotated YouTube comments corpus shows a high classification accuracy and highlights the benefits of structural models in a cross-domain setting.", "phrases": ["youtube", "systematic approach", "opinion mining", "social medium", "facebook"], "overall_score": 2.1232623236162667, "scores": [2.009572463166101, 1.4482886065118539, 0.8671421058827589, 0.569409751927423, 0.5612920151245875], "rank_score": 1.0911409885225447} -{"id": "malik-2006-punjabi", "title": "Punjabi Machine Transliteration", "abstract": "Machine Transliteration is to transcribe a word written in a script with approximate phonetic equivalence in another language. It is useful for machine translation, cross-lingual information retrieval, multilingual text and speech processing. Punjabi Machine Transliteration (PMT) is a special case of machine transliteration and is a process of converting a word from Shahmukhi (based on Arabic script) to Gurmukhi (derivation of Landa, Shardha and Takri, old scripts of Indian subcontinent), two scripts of Punjabi, irrespective of the type of word.The Punjabi Machine Transliteration System uses transliteration rules (character mappings and dependency rules) for transliteration of Shahmukhi words into Gurmukhi. The PMT system can transliterate every word written in Shahmukhi.", "phrases": ["shahmukhi", "arabic script", "punjabi machine transliteration"], "overall_score": 1.7556485396105865, "scores": [2.1972248392658003, 0.5455561199774241, 0.5297563521626415], "rank_score": 1.0908457704686219} -{"id": "fung-cheung-2004-mining", "title": "Mining Very-Non-Parallel Corpora: Parallel Sentence and Lexicon Extraction via Bootstrapping and E", "abstract": "We present a method capable of extracting parallel sentences from far more disparate \u201cvery-non-parallel corpora\u201d than previous \u201ccomparable corpora\u201d methods, by exploiting bootstrapping on top of IBM Model 4 EM. Step 1 of our method, like previous methods, uses similarity measures to find matching documents in a corpus first, and then extracts parallel sentences as well as new word translations from these documents. But unlike previous methods, we extend this with an iterative bootstrapping framework based on the principle of \u201cfind-one-get-more\u201d, which claims that documents found to contain one pair of parallel sentences must contain others even if the documents are judged to be of low similarity. We re-match documents based on extracted sentence pairs, and refine the mining process iteratively until convergence. This novel \u201cfind-one-get-more\u201d principle allows us to add more parallel sentences from dissimilar documents, to the baseline set. Experimental results show that our proposed method is nearly 50% more effective than the baseline method without iteration. We also show that our method is effective in boosting the performance of the IBM Model 4 EM lexical learner as the latter, though stronger than Model 1 used in previous work, does not perform well on data from very-non-parallel corpus. Figure1. Parallel sentence and lexicon extraction via Bootstrapping and EM The most challenging task is to extract bilingual sentences and lexicon from very-non-parallel data. Recent work (Munteanu et al., 2004, Zhao and Vogel, 2002) on extracting parallel sentences from comparable data, and others on extracting paraphrasing sentences from monolingual corpora (Barzilay and Elhadad 2003) are based on the \u201cfind-topic-extract-sentence\u201d principle which claims that parallel sentences only exist in document pairs with high similarity. They all use lexical information (e.g. word overlap, cosine similarity) to match documents first, before extracting sentences from these documents.", "phrases": ["lexicon extraction", "bootstrapping", "comparable corpora", "parallel sentence pair", "cross-lingual signal"], "overall_score": 3.8456550778554988, "scores": [1.9295597863862106, 0.8273379331295347, 1.4920578349431328, 0.6141651152913706, 0.5896046698888288], "rank_score": 1.0905450679278155} -{"id": "yang-etal-2017-identifying-semantic", "title": "Identifying Semantic Edit Intentions from Revisions in Wikipedia", "abstract": "Most studies on human editing focus merely on syntactic revision operations, failing to capture the intentions behind revision changes, which are essential for facilitating the single and collaborative writing process. In this work, we develop in collaboration with Wikipedia editors a 13-category taxonomy of the semantic intention behind edits in Wikipedia articles. Using labeled article edits, we build a computational classifier of intentions that achieved a micro-averaged F1 score of 0.621. We use this model to investigate edit intention effectiveness: how different types of edits predict the retention of newcomers and changes in the quality of articles, two key concerns for Wikipedia today. Our analysis shows that the types of edits that users make in their first session predict their subsequent survival as Wikipedia editors, and articles in different stages need different types of edits.", "phrases": ["edit", "wikipedia", "semantic intention"], "overall_score": 2.6143293371876735, "scores": [1.9202661885602854, 0.828761957295507, 0.5217519005180287], "rank_score": 1.0902600154579403} -{"id": "clark-etal-2019-bam", "title": "BAM! Born-Again Multi-Task Networks for Natural Language Understanding", "abstract": "It can be challenging to train multi-task neural networks that outperform or even match their single-task counterparts. To help address this, we propose using knowledge distillation where single-task models teach a multi-task model. We enhance this training with teacher annealing, a novel method that gradually transitions the model from distillation to supervised learning, helping the multi-task model surpass its single-task teachers. We evaluate our approach by multi-task fine-tuning BERT on the GLUE benchmark. Our method consistently improves over standard single-task and multi-task training.", "phrases": ["natural language understanding", "counterpart", "multi-task model", "teacher", "bam"], "overall_score": 3.669587498345207, "scores": [0.9463989611724722, 0.8140966457489877, 1.8710185408157063, 1.2586909672965718, 0.5586590234639278], "rank_score": 1.0897728276995333} -{"id": "bender-koller-2020-climbing", "title": "Climbing towards NLU: On Meaning, Form, and Understanding in the Age of Data", "abstract": "The success of the large neural language models on many NLP tasks is exciting. However, we find that these successes sometimes lead to hype in which these models are being described as \u201cunderstanding\u201d language or capturing \u201cmeaning\u201d. In this position paper, we argue that a system trained only on form has a priori no way to learn meaning. In keeping with the ACL 2020 theme of \u201cTaking Stock of Where We've Been and Where We're Going\u201d, we argue that a clear understanding of the distinction between form and meaning will help guide the field towards better science around natural language understanding.", "phrases": ["nlu", "language understanding", "reason"], "overall_score": 4.284177709190655, "scores": [0.8502039349893394, 1.5802782582832198, 0.83836396831741], "rank_score": 1.0896153871966565} -{"id": "zheng-etal-2018-multi", "title": "Multi-Reference Training with Pseudo-References for Neural Translation and Text Generation", "abstract": "Neural text generation, including neural machine translation, image captioning, and summarization, has been quite successful recently. However, during training time, typically only one reference is considered for each example, even though there are often multiple references available, e.g., 4 references in NIST MT evaluations, and 5 references in image captioning data. We first investigate several different ways of utilizing multiple human references during training. But more importantly, we then propose an algorithm to generate exponentially many pseudo-references by first compressing existing human references into lattices and then traversing them to generate new pseudo-references. These approaches lead to substantial improvements over strong baselines in both machine translation (+1.5 BLEU) and image captioning (+3.1 BLEU / +11.7 CIDEr).", "phrases": ["pseudo-reference", "text generation", "image captioning"], "overall_score": 1.7535379968553302, "scores": [1.8263448634756632, 0.900420308147761, 0.541838077948274], "rank_score": 1.0895344165238994} -{"id": "zayed-etal-2020-contextual", "title": "Contextual Modulation for Relation-Level Metaphor Identification", "abstract": "Identifying metaphors in text is very challenging and requires comprehending the underlying comparison. The automation of this cognitive process has gained wide attention lately. However, the majority of existing approaches concentrate on word-level identification by treating the task as either single-word classification or sequential labelling without explicitly modelling the interaction between the metaphor components. On the other hand, while existing relation-level approaches implicitly model this interaction, they ignore the context where the metaphor occurs. In this work, we address these limitations by introducing a novel architecture for identifying relation-level metaphoric expressions of certain grammatical relations based on contextual modulation. In a methodology inspired by works in visual reasoning, our approach is based on conditioning the neural network computation on the deep contextualised features of the candidate expressions using feature-wise linear modulation. We demonstrate that the proposed architecture achieves state-of-the-art results on benchmark datasets. The proposed methodology is generic and could be applied to other textual classification problems that benefit from contextual interaction.", "phrases": ["metaphoric expression", "grammatical relation", "contextual modulation"], "overall_score": 1.5103853167673542, "scores": [1.7420904990635238, 0.9825011973621274, 0.543946413072001], "rank_score": 1.089512703165884} -{"id": "valitutti-etal-2013-everything", "title": "\u201cLet Everything Turn Well in Your Wife\u201d: Generation of Adult Humor Using Lexical Constraints", "abstract": "We propose a method for automated generation of adult humor by lexical replacement and present empirical evaluation results of the obtained humor. We propose three types of lexical constraints as building blocks of humorous word substitution: constraints concerning the similarity of sounds or spellings of the original word and the substitute, a constraint requiring the substitute to be a taboo word, and constraints concerning the position and context of the replacement. Empirical evidence from extensive user studies indicates that these constraints can increase the effectiveness of humor generation significantly.", "phrases": ["adult humor", "lexical constraint", "creativity"], "overall_score": 2.1194834985029223, "scores": [1.933766768348997, 0.8085599265991371, 0.5252704747339407], "rank_score": 1.0891990565606917} -{"id": "meaney-etal-2021-semeval", "title": "SemEval 2021 Task 7: HaHackathon, Detecting and Rating Humor and Offense", "abstract": "SemEval 2021 Task 7, HaHackathon, was the first shared task to combine the previously separate domains of humor detection and offense detection. We collected 10,000 texts from Twitter and the Kaggle Short Jokes dataset, and had each annotated for humor and offense by 20 annotators aged 18-70. Our subtasks were binary humor detection, prediction of humor and offense ratings, and a novel controversy task: to predict if the variance in the humor ratings was higher than a specific threshold. The subtasks attracted 36-58 submissions, with most of the participants choosing to use pre-trained language models. Many of the highest performing teams also implemented additional optimization techniques, including task-adaptive training and adversarial training. The results suggest that the participating systems are well suited to humor detection, but that humor controversy is a more challenging task. We discuss which models excel in this task, which auxiliary techniques boost their performance, and analyze the errors which were not captured by the best systems.", "phrases": ["hahackathon", "humor", "offense", "semeval"], "overall_score": 1.752977210222087, "scores": [1.7265661947362576, 0.8671750804293983, 0.8298316939429258, 0.9331709516827441], "rank_score": 1.0891859801978314} -{"id": "mueller-etal-2013-efficient", "title": "Efficient Higher-Order CRFs for Morphological Tagging", "abstract": "Training higher-order conditional random fields is prohibitive for huge tag sets. We present an approximated conditional random field using coarse-to-fine decoding and early updating. We show that our implementation yields fast and accurate morphological taggers across six languages with different morphological properties and that across languages higher-order models give significant improvements over 1-order models.", "phrases": ["crf", "morphological tagging", "random field", "coarse-to-fine decoding", "structured prediction model"], "overall_score": 3.08566730616568, "scores": [1.6274089714966264, 1.5095109790208874, 0.8998713856928011, 0.873088169297584, 0.5356463540943892], "rank_score": 1.0891051719204576} -{"id": "deriu-etal-2020-spot", "title": "Spot The Bot: A Robust and Efficient Framework for the Evaluation of Conversational Dialogue Systems", "abstract": "The lack of time efficient and reliable evalu-ation methods is hampering the development of conversational dialogue systems (chat bots). Evaluations that require humans to converse with chat bots are time and cost intensive, put high cognitive demands on the human judges, and tend to yield low quality results. In this work, we introduce Spot The Bot, a cost-efficient and robust evaluation framework that replaces human-bot conversations with conversations between bots. Human judges then only annotate for each entity in a conversation whether they think it is human or not (assuming there are humans participants in these conversations). These annotations then allow us to rank chat bots regarding their ability to mimic conversational behaviour of humans. Since we expect that all bots are eventually recognized as such, we incorporate a metric that measures which chat bot is able to uphold human-like be-havior the longest, i.e.Survival Analysis. This metric has the ability to correlate a bot's performance to certain of its characteristics (e.g.fluency or sensibleness), yielding interpretable results. The comparably low cost of our frame-work allows for frequent evaluations of chatbots during their evaluation cycle. We empirically validate our claims by applying Spot The Bot to three domains, evaluating several state-of-the-art chat bots, and drawing comparisonsto related work. The framework is released asa ready-to-use tool.", "phrases": ["bot", "conversation", "human judge", "chatbot"], "overall_score": 2.1189068049797988, "scores": [1.6890339122022098, 1.3093667440611205, 0.8304144710210756, 0.5267956515760074], "rank_score": 1.0889026947151033} -{"id": "mani-etal-2006-machine", "title": "Machine Learning of Temporal Relations", "abstract": "This paper investigates a machine learning approach for temporally ordering and anchoring events in natural language texts. To address data sparseness, we used temporal reasoning as an over-sampling method to dramatically expand the amount of training data, resulting in predictive accuracy on link labeling as high as 93% using a Maximum Entropy classifier on human annotated data. This method compared favorably against a series of increasingly sophisticated baselines involving expansion of rules derived from human intuitions.", "phrases": ["temporal relation", "reasoning", "series", "machine learning", "relation extraction"], "overall_score": 3.665686497560241, "scores": [1.7434437117214645, 2.0053277633436726, 0.5772491922948874, 0.5597889416959595, 0.557262046419806], "rank_score": 1.0886143310951581} -{"id": "anderson-etal-2017-guided", "title": "Guided Open Vocabulary Image Captioning with Constrained Beam Search", "abstract": "Existing image captioning models do not generalize well to out-of-domain images containing novel scenes or objects. This limitation severely hinders the use of these models in real world applications dealing with images in the wild. We address this problem using a flexible approach that enables existing deep captioning architectures to take advantage of image taggers at test time, without re-training. Our method uses constrained beam search to force the inclusion of selected tag words in the output, and fixed, pretrained word embeddings to facilitate vocabulary expansion to previously unseen tag words. Using this approach we achieve state of the art results for out-of-domain captioning on MSCOCO (and improved results for in-domain captioning). Perhaps surprisingly, our results significantly outperform approaches that incorporate the same tag predictions into the learning algorithm. We also show that we can significantly improve the quality of generated ImageNet captions by leveraging ground-truth labels.", "phrases": ["image", "caption", "constrained beam search", "vocabulary expansion"], "overall_score": 3.627003909061806, "scores": [2.2926445576357497, 0.8916090135121439, 0.5915717084257135, 0.5780538391736416], "rank_score": 1.0884697796868121} -{"id": "sumita-etal-2005-measuring", "title": "Measuring Non-native Speakers' Proficiency of English by Using a Test with Automatically-Generated Fill-in-the-Blank Questions", "abstract": "This paper proposes the automatic generation of Fill-in-the-Blank Questions (FBQs) together with testing based on Item Response Theory (IRT) to measure English proficiency. First, the proposal generates an FBQ from a given sentence in English. The position of a blank in the sentence is determined, and the word at that position is considered as the correct choice. The candidates for incorrect choices for the blank are hypothesized through a thesaurus. Then, each of the candidates is verified by using the Web. Finally, the blanked sentence, the correct choice and the incorrect choices surviving the verification are together laid out to form the FBQ. Second, the proficiency of non-native speakers who took the test consisting of such FBQs is estimated through IRT. \n \nOur experimental results suggest that: (1) the generated questions plus IRT estimate the non-native speakers' English proficiency; (2) while on the other hand, the test can be completed almost perfectly by English native speakers; and (3) the number of questions can be reduced by using item information in IRT. \n \nThe proposed method provides teachers and testers with a tool that reduces time and expenditure for testing English proficiency.", "phrases": ["proficiency", "fill-in-the-blank question", "english language learning"], "overall_score": 2.7044415015961856, "scores": [1.8524182507067828, 0.8340253518515286, 0.5785983681252734], "rank_score": 1.088347323561195} -{"id": "stratos-2017-sub", "title": "A Sub-Character Architecture for Korean Language Processing", "abstract": "We introduce a novel sub-character architecture that exploits a unique compositional structure of the Korean language. Our method decomposes each character into a small set of primitive phonetic units called jamo letters from which character- and word-level representations are induced. The jamo letters divulge syntactic and semantic information that is difficult to access with conventional character-level units. They greatly alleviate the data sparsity problem, reducing the observation space to 1.6% of the original while increasing accuracy in our experiments. We apply our architecture to dependency parsing and achieve dramatic improvement over strong lexical baselines.", "phrases": ["sub-character architecture", "korean language processing", "primitive phonetic unit"], "overall_score": 2.39096651771646, "scores": [1.789782899073866, 0.9370391777935437, 0.5377051940979299], "rank_score": 1.0881757569884465} -{"id": "lazic-etal-2015-plato", "title": "Plato: A Selective Context Model for Entity Resolution", "abstract": "We present Plato, a probabilistic model for entity resolution that includes a novel approach for handling noisy or uninformative features, and supplements labeled training data derived from Wikipedia with a very large unlabeled text corpus. Training and inference in the proposed model can easily be distributed across many servers, allowing it to scale to over 107 entities. We evaluate Plato on three standard datasets for entity resolution. Our approach achieves the best results to-date on TAC KBP 2011 and is highly competitive on both the CoNLL 2003 and TAC KBP 2012 datasets.", "phrases": ["context model", "entity resolution", "wikipedia", "plato", "textual feature"], "overall_score": 2.505186922138269, "scores": [1.711150853114624, 1.3389780256546198, 0.9375753611572977, 0.9093766137345809, 0.5428634284430841], "rank_score": 1.0879888564208415} -{"id": "luong-etal-2015-bilingual", "title": "Bilingual Word Representations with Monolingual Quality in Mind", "abstract": "Recent work in learning bilingual representations tend to tailor towards achieving good performance on bilingual tasks, most often the crosslingual document classification (CLDC) evaluation, but to the detriment of preserving clustering structures of word representations monolingually. In this work, we propose a joint model to learn word representations from scratch that utilizes both the context coocurrence information through the monolingual component and the meaning equivalent signals from the bilingual constraint. Specifically, we extend the recently popular skipgram model to learn high quality bilingual representations efficiently. Our learned embeddings achieve a new state-of-the-art accuracy of 80.3 for the German to English CLDC task and a highly competitive performance of 90.7 for the other classification direction. At the same time, our models outperform best embeddings from past bilingual representation work by a large margin in the monolingual word similarity evaluation. 1", "phrases": ["scratch", "word embedding", "bilingual model", "joint training"], "overall_score": 3.9850685361764056, "scores": [1.955943241266267, 1.0030420425041853, 0.8590315873765982, 0.5330151802845544], "rank_score": 1.0877580128579012} -{"id": "fabbri-etal-2021-summeval", "title": "SummEval: Re-evaluating Summarization Evaluation", "abstract": "The scarcity of comprehensive up-to-date studies on evaluation metrics for text summarization and the lack of consensus regarding evaluation protocols continue to inhibit progress. We address the existing shortcomings of summarization evaluation methods along five dimensions: 1) we re-evaluate 14 automatic evaluation metrics in a comprehensive and consistent fashion using neural summarization model outputs along with expert and crowd-sourced human annotations; 2) we consistently benchmark 23 recent summarization models using the aforementioned automatic evaluation metrics; 3) we assemble the largest collection of summaries generated by models trained on the CNN/DailyMail news dataset and share it in a unified format; 4) we implement and share a toolkit that provides an extensible and unified API for evaluating summarization models across a broad range of automatic metrics; and 5) we assemble and share the largest and most diverse, in terms of model types, collection of human judgments of model-generated summaries on the CNN/Daily Mail dataset annotated by both expert judges and crowd-source workers. We hope that this work will help promote a more complete evaluation protocol for text summarization as well as advance research in developing evaluation metrics that better correlate with human judgments.", "phrases": ["evaluation metric", "summarization model", "summeval", "human judgement", "insight"], "overall_score": 3.699495747581964, "scores": [2.2891904878103246, 1.1701013287379736, 0.8294772869808023, 0.6257943201659847, 0.5239562099023505], "rank_score": 1.087703926719487} -{"id": "yang-etal-2017-semi", "title": "Semi-Supervised QA with Generative Domain-Adaptive Nets", "abstract": "We study the problem of semi-supervised question answering\u2014utilizing unlabeled text to boost the performance of question answering models. We propose a novel training framework, the Generative Domain-Adaptive Nets. In this framework, we train a generative model to generate questions based on the unlabeled text, and combine model-generated questions with human-generated questions for training question answering models. We develop novel domain adaptation algorithms, based on reinforcement learning, to alleviate the discrepancy between the model-generated data distribution and the human-generated data distribution. Experiments show that our proposed framework obtains substantial improvement from unlabeled text.", "phrases": ["generative domain-adaptive net", "unlabeled text", "question generation", "passage", "rule-based method"], "overall_score": 3.4566516585671967, "scores": [1.956790247540725, 1.3286981689150454, 1.1038282717339862, 0.5252406041397047, 0.5237578575884823], "rank_score": 1.0876630299835888} -{"id": "antypas-etal-2021-covid", "title": "COVID-19 and Misinformation: A Large-Scale Lexical Analysis on Twitter", "abstract": "Social media is often used by individuals and organisations as a platform to spread misinformation. With the recent coronavirus pandemic we have seen a surge of misinformation on Twitter, posing a danger to public health. In this paper, we compile a large COVID-19 Twitter misinformation corpus and perform an analysis to discover patterns with respect to vocabulary usage. Among others, our analysis reveals that the variety of topics and vocabulary usage are considerably more limited and negative in tweets related to misinformation than in randomly extracted tweets. In addition to our qualitative analysis, our experimental results show that a simple linear model based only on lexical features is effective in identifying misinformation-related tweets (with accuracy over 80%), providing evidence to the fact that the vocabulary used in misinformation largely differs from generic tweets.", "phrases": ["misinformation", "twitter", "lexical feature"], "overall_score": 1.1949124807969218, "scores": [1.8095928952029634, 0.9003852318897992, 0.5529905101292741], "rank_score": 1.0876562124073457} -{"id": "tan-etal-2017-neural", "title": "Neural Post-Editing Based on Quality Estimation", "abstract": "Automatic post-editing (APE) is a challenging task on WMT evaluation campaign. We find that only a small number of edit operations are required for most machine translation outputs, through analysis of the training set of WMT17 APE en-de task. Based on this statistics analysis, two neural post-editing (NPE) models are trained depended on the edit numbers: single edit and minor edits. The improved quality estimation (QE) approach is exploited to rank models, and select the best translation as the post-edited output from the n -best list translation hypotheses generated by the best APE model and the raw translation system. Experimental results on the datasets of WMT16 APE test set show that the proposed approach significantly outperformed the baseline. Our approach can bring considerable relief from the overcorrection problem in APE.", "phrases": ["quality estimation", "ape", "neural post-editing"], "overall_score": 1.5076170078812192, "scores": [1.750575314940716, 0.9726894221532106, 0.5392826341512117], "rank_score": 1.087515790415046} -{"id": "zeng-etal-2020-double", "title": "Double Graph Based Reasoning for Document-level Relation Extraction", "abstract": "Document-level relation extraction aims to extract relations among entities within a document. Different from sentence-level relation extraction, it requires reasoning over multiple sentences across paragraphs. In this paper, we propose Graph Aggregation-and-Inference Network (GAIN), a method to recognize such relations for long paragraphs. GAIN constructs two graphs, a heterogeneous mention-level graph (MG) and an entity-level graph (EG). The former captures complex interaction among different mentions and the latter aggregates mentions underlying for the same entities. Based on the graphs we propose a novel path reasoning mechanism to infer relations between entities. Experiments on the public dataset, DocRED, show GAIN achieves a significant performance improvement (2.85 on F1) over the previous state-of-the-art. Our code is available at .", "phrases": ["document-level relation extraction", "graph aggregation-and-inference network", "same entity", "path reasoning mechanism", "double graph"], "overall_score": 2.9449357255687594, "scores": [2.5634998508814597, 1.2484915052429744, 0.5555690488947705, 0.5472561410170103, 0.5225562995080341], "rank_score": 1.0874745691088497} -{"id": "nivre-nilsson-2005-pseudo", "title": "Pseudo-Projective Dependency Parsing", "abstract": "In order to realize the full potential of dependency-based syntactic parsing, it is desirable to allow non-projective dependency structures. We show how a data-driven deterministic dependency parser, in itself restricted to projective structures, can be combined with graph transformation techniques to produce non-projective structures. Experiments using data from the Prague Dependency Treebank show that the combined system can handle non-projective constructions with a precision sufficient to yield a significant improvement in overall parsing accuracy. This leads to the best reported performance for robust non-projective parsing of Czech.", "phrases": ["non-projectivity", "transformation", "pseudo-projective parsing", "dependency tree", "transition-based parsing"], "overall_score": 3.661573719372781, "scores": [1.520175358431923, 1.4506386933411122, 1.3158553516441145, 0.584476347452702, 0.5658189594560138], "rank_score": 1.0873929420651731} -{"id": "collins-etal-2005-discriminative", "title": "Discriminative Syntactic Language Modeling for Speech Recognition", "abstract": "We describe a method for discriminative training of a language model that makes use of syntactic features. We follow a reranking approach, where a baseline recogniser is used to produce 1000-best output for each acoustic input, and a second \"reranking\" model is then used to choose an utterance from these 1000-best lists. The reranking model makes use of syntactic features together with a parameter estimation method that is based on the perception algorithm. We describe experiments on the Switchboard speech recognition task. The syntactic features provide an additional 0.3% reduction in test-set error rate beyond the model of (Roark et al., 2004a; Roark et al., 2004b) (significant at p < 0.001), which makes use of a discriminatively trained n-gram model, giving a total reduction of 1.2% over the baseline Switchboard system.", "phrases": ["speech recognition", "syntactic feature", "reduction", "n-gram"], "overall_score": 2.607075296707472, "scores": [2.2687701715341944, 0.9867225199694651, 0.555631971322186, 0.5378147196101284], "rank_score": 1.0872348456089935} -{"id": "wang-etal-2020-global", "title": "Global-to-Local Neural Networks for Document-Level Relation Extraction", "abstract": "Relation extraction (RE) aims to identify the semantic relations between named entities in text. Recent years have witnessed it raised to the document level, which requires complex reasoning with entities and mentions throughout an entire document. In this paper, we propose a novel model to document-level RE, by encoding the document information in terms of entity global and local representations as well as context relation representations. Entity global representations model the semantic information of all entities in the document, entity local representations aggregate the contextual information of multiple mentions of specific entities, and context relation representations encode the topic information of other relations. Experimental results demonstrate that our model achieves superior performance on two public datasets for document-level RE. It is particularly effective in extracting relations between entities of long distance and having multiple mentions.", "phrases": ["relation extraction", "specific entity", "document graph"], "overall_score": 2.1156024599352468, "scores": [1.8698033027763916, 0.8672451751728225, 0.5245653138730567], "rank_score": 1.0872045972740902} -{"id": "kaljahi-etal-2014-syntax", "title": "Syntax and Semantics in Quality Estimation of Machine Translation", "abstract": "We employ syntactic and semantic information in estimating the quality of machine translation from a new data set which contains source text from English customer support forums and target text consisting of its machine translation into French. These translations have been both post-edited and evaluated by professional translators. We find that quality estimation using syntactic and semantic information on this data set can hardly improve over a baseline which uses only surface features. However, the performance can be improved when they are combined with such surface features. We also introduce a novel metric to measure translation adequacy based on predicate-argument structure match using word alignments. While word alignments can be reliably used, the two main factors affecting the performance of all semantic-based methods seems to be the low quality of semantic role labelling (especially on ill-formed text) and the lack of nominal predicate annotation.", "phrases": ["quality estimation", "machine translation", "semantic information"], "overall_score": 1.1943988258572416, "scores": [1.812524542036602, 0.9067909250487296, 0.5422505235115623], "rank_score": 1.087188663532298} -{"id": "xiao-etal-2019-lattice", "title": "Lattice-Based Transformer Encoder for Neural Machine Translation", "abstract": "Neural machine translation (NMT) takes deterministic sequences for source representations. However, either word-level or subword-level segmentations have multiple choices to split a source sequence with different word segmentors or different subword vocabulary sizes. We hypothesize that the diversity in segmentations may affect the NMT performance. To integrate different segmentations with the state-of-the-art NMT model, Transformer, we propose lattice-based encoders to explore effective word or subword representation in an automatic way during training. We propose two methods: 1) lattice positional encoding and 2) lattice-aware self-attention. These two methods can be used together and show complementary to each other to further improve translation performance. Experiment results show superiorities of lattice-based encoders in word-level and subword-level representations over conventional Transformer encoder.", "phrases": ["transformer encoder", "encoding", "neural machine translation"], "overall_score": 1.7494966337324676, "scores": [1.932749695786565, 0.7951133873345456, 0.5332070461761612], "rank_score": 1.087023376432424} -{"id": "liu-etal-2018-efficient-low", "title": "Efficient Low-rank Multimodal Fusion With Modality-Specific Factors", "abstract": "Multimodal research is an emerging field of artificial intelligence, and one of the main research problems in this field is multimodal fusion. The fusion of multimodal data is the process of integrating multiple unimodal representations into one compact multimodal representation. Previous research in this field has exploited the expressiveness of tensors for multimodal representation. However, these methods often suffer from exponential increase in dimensions and in computational complexity introduced by transformation of input into tensor. In this paper, we propose the Low-rank Multimodal Fusion method, which performs multimodal fusion using low-rank tensors to improve efficiency. We evaluate our model on three different tasks: multimodal sentiment analysis, speaker trait analysis, and emotion recognition. Our model achieves competitive results on all these tasks while drastically reducing computational complexity. Additional experiments also show that our model can perform robustly for a wide range of low-rank settings, and is indeed much more efficient in both training and inference compared to other methods that utilize tensor representations.", "phrases": ["low-rank multimodal fusion", "factor", "complexity", "lmf"], "overall_score": 2.9436762423051026, "scores": [1.6143139809801148, 1.3286535473663432, 0.849537949718771, 0.5555324439895243], "rank_score": 1.0870094805136883} -{"id": "abu-farha-magdy-2021-benchmarking", "title": "Benchmarking Transformer-based Language Models for Arabic Sentiment and Sarcasm Detection", "abstract": "The introduction of transformer-based language models has been a revolutionary step for natural language processing (NLP) research. These models, such as BERT, GPT and ELECTRA, led to state-of-the-art performance in many NLP tasks. Most of these models were initially developed for English and other languages followed later. Recently, several Arabic-specific models started emerging. However, there are limited direct comparisons between these models. In this paper, we evaluate the performance of 24 of these models on Arabic sentiment and sarcasm detection. Our results show that the models achieving the best performance are those that are trained on only Arabic data, including dialectal Arabic, and use a larger number of parameters, such as the recently released MARBERT. However, we noticed that AraELECTRA is one of the top performing models while being much more efficient in its computational cost. Finally, the experiments on AraGPT2 variants showed low performance compared to BERT models, which indicates that it might not be suitable for classification tasks.", "phrases": ["language model", "arabic sentiment", "sarcasm detection"], "overall_score": 1.5067377187317408, "scores": [1.7935984272780754, 0.9343249819102413, 0.5327211429136316], "rank_score": 1.086881517367316} -{"id": "bastings-etal-2018-jump", "title": "Jump to better conclusions: SCAN both left and right", "abstract": "Lake and Baroni (2018) recently introduced the SCAN data set, which consists of simple commands paired with action sequences and is intended to test the strong generalization abilities of recurrent sequence-to-sequence models. Their initial experiments suggested that such models may fail because they lack the ability to extract systematic rules. Here, we take a closer look at SCAN and show that it does not always capture the kind of generalization that it was designed for. To mitigate this we propose a complementary dataset, which requires mapping actions back to the original commands, called NACS. We show that models that do well on SCAN do not necessarily do well on NACS, and that NACS exhibits properties more closely aligned with realistic use-cases for sequence-to-sequence models.", "phrases": ["conclusion", "scan", "generalization ability", "nacs"], "overall_score": 2.2600739426978795, "scores": [2.3517051911999007, 0.9094138359002695, 0.5534763307622209, 0.5328679343677399], "rank_score": 1.0868658230575328} -{"id": "scarton-etal-2010-simplifica", "title": "SIMPLIFICA: a tool for authoring simplified texts in Brazilian Portuguese guided by readability assessments", "abstract": "SIMPLIFICA is an authoring tool for producing simplified texts in Portuguese. It provides functionalities for lexical and syntactic simplification and for readability assessment. This tool is the first of its kind for Portuguese; it brings innovative aspects for simplification tools in general, since the authoring process is guided by readability assessment based on the levels of literacy of the Brazilian population.", "phrases": ["portuguese", "readability assessment", "simplifica"], "overall_score": 1.1939850792901652, "scores": [1.654586636271387, 0.8220543083922559, 0.7837952208675462], "rank_score": 1.086812055177063} -{"id": "chen-etal-2020-question", "title": "Question Directed Graph Attention Network for Numerical Reasoning over Text", "abstract": "Numerical reasoning over texts, such as addition, subtraction, sorting and counting, is a challenging machine reading comprehension task, since it requires both natural language understanding and arithmetic computation. To address this challenge, we propose a heterogeneous graph representation for the context of the passage and question needed for such reasoning, and design a question directed graph attention network to drive multi-step numerical reasoning over this context graph. Our model, which combines deep learning and graph reasoning, achieves remarkable results in benchmark datasets such as DROP.", "phrases": ["graph attention network", "numerical reasoning", "passage"], "overall_score": 2.6059001646076347, "scores": [1.7951702666927427, 0.851479175324829, 0.613584890359905], "rank_score": 1.086744777459159} -{"id": "nakagawa-etal-2010-dependency", "title": "Dependency Tree-based Sentiment Classification using CRFs with Hidden Variables", "abstract": "In this paper, we present a dependency tree-based method for sentiment classification of Japanese and English subjective sentences using conditional random fields with hidden variables. Subjective sentences often contain words which reverse the sentiment polarities of other words. Therefore, interactions between words need to be considered in sentiment classification, which is difficult to be handled with simple bag-of-words approaches, and the syntactic dependency structures of subjective sentences are exploited in our method. In the method, the sentiment polarity of each dependency subtree in a sentence, which is not observable in training data, is represented by a hidden variable. The polarity of the whole sentence is calculated in consideration of interactions between the hidden variables. Sum-product belief propagation is used for inference. Experimental results of sentiment classification for Japanese and English subjective sentences showed that the method performs better than other methods based on bag-of-features.", "phrases": ["sentiment classification", "variable", "syntactic structure"], "overall_score": 3.4531698768027037, "scores": [1.476175348631708, 1.2120098106234418, 0.571517219440529], "rank_score": 1.0865674595652262} -{"id": "ma-etal-2016-letter", "title": "Letter Sequence Labeling for Compound Splitting", "abstract": "For languages such as German where compounds occur frequently and are written as single tokens, a wide variety of NLP applications bene\ufb01ts from recognizing and splitting compounds. As the traditional word frequency-based approach to compound splitting has several drawbacks, this paper introduces a letter sequence labeling approach, which can utilize rich word form features to build discriminative learning models that are optimized for splitting. Experiments show that the proposed method signi\ufb01cantly outperforms state-of-the-art compound splitters.", "phrases": ["compound splitting", "letter sequence", "conditional random fields"], "overall_score": 1.9465777995028282, "scores": [0.9618778783166652, 1.2416912688863766, 1.0556481187307734], "rank_score": 1.0864057553112716} -{"id": "cao-etal-2018-rst", "title": "The RST Spanish-Chinese Treebank", "abstract": "Discourse analysis is necessary for different tasks of Natural Language Processing (NLP). As two of the most spoken languages in the world, discourse analysis between Spanish and Chinese is important for NLP research. This paper aims to present the first open Spanish-Chinese parallel corpus annotated with discourse information, whose theoretical framework is based on the Rhetorical Structure Theory (RST). We have evaluated and harmonized each annotation part to obtain a high annotated-quality corpus. The corpus is already available to the public.", "phrases": ["rst", "spanish-chinese treebank", "discourse analysis"], "overall_score": 1.193507656461457, "scores": [1.734935892562667, 0.9799352102819802, 0.5442613557274099], "rank_score": 1.0863774861906856} -{"id": "lee-etal-2013-deterministic", "title": "Deterministic Coreference Resolution Based on Entity-Centric, Precision-Ranked Rules", "abstract": "We propose a new deterministic approach to coreference resolution that combines the global information and precise features of modern machine-learning models with the transparency and modularity of deterministic, rule-based systems. Our sieve architecture applies a battery of deterministic coreference models one at a time from highest to lowest precision, where each model builds on the previous model's cluster output. The two stages of our sieve-based architecture, a mention detection stage that heavily favors recall, followed by coreference sieves that are precision-oriented, offer a powerful way to achieve both high precision and high recall. Further, our approach makes use of global information through an entity-centric model that encourages the sharing of features across all mentions that point to the same real-world entity. Despite its simplicity, our approach gives state-of-the-art performance on several corpora and genres, and has also been incorporated into hybrid state-of-the-art coreference systems for Chinese and Arabic. Our system thus offers a new paradigm for combining knowledge in rule-based systems that has implications throughout computational linguistics.", "phrases": ["coreference resolution", "rule-based system", "mention", "entity-centric model", "semantic knowledge"], "overall_score": 3.7643850201520213, "scores": [1.6346643033422266, 1.5927439798100378, 1.1042566234919922, 0.5633749003367663, 0.5358197935889984], "rank_score": 1.0861719201140043} -{"id": "kondratyuk-straka-2019-75", "title": "75 Languages, 1 Model: Parsing Universal Dependencies Universally", "abstract": "We present UDify, a multilingual multi-task model capable of accurately predicting universal part-of-speech, morphological features, lemmas, and dependency trees simultaneously for all 124 Universal Dependencies treebanks across 75 languages. By leveraging a multilingual BERT self-attention model pretrained on 104 languages, we found that fine-tuning it on all datasets concatenated together with simple softmax classifiers for each UD task can meet or exceed state-of-the-art UPOS, UFeats, Lemmas, (and especially) UAS, and LAS scores, without requiring any recurrent or language-specific components. We evaluate UDify for multilingual learning, showing that low-resource languages benefit the most from cross-linguistic annotations. We also evaluate for zero-shot learning, with results suggesting that multilingual training provides strong UD predictions even for languages that neither UDify nor BERT have ever been trained on.", "phrases": ["universal dependency", "udify", "bert", "low-resource language", "multilingual training"], "overall_score": 3.7291448857150677, "scores": [0.9543910183796429, 1.482658896722705, 1.248782839822636, 1.1809277045846582, 0.5629989770491413], "rank_score": 1.0859518873117566} -{"id": "banea-mihalcea-2011-word", "title": "Word Sense Disambiguation with Multilingual Features", "abstract": "This paper explores the role played by a multilingual feature representation for the task of word sense disambiguation. We translate the context of an ambiguous word in multiple languages, and show through experiments on standard datasets that by using a multilingual vector space we can obtain error rate reductions of up to 25%, as compared to a monolingual classifier.", "phrases": ["multiple language", "vector space", "word sense disambiguation"], "overall_score": 1.9451523980275875, "scores": [2.1214229424276185, 0.5797386150551737, 0.5556691133197214], "rank_score": 1.085610223600838} -{"id": "zheng-lapata-2019-sentence", "title": "Sentence Centrality Revisited for Unsupervised Summarization", "abstract": "Single document summarization has enjoyed renewed interest in recent years thanks to the popularity of neural network models and the availability of large-scale datasets. In this paper we develop an unsupervised approach arguing that it is unrealistic to expect large-scale and high-quality training data to be available or created for different types of summaries, domains, or languages. We revisit a popular graph-based ranking algorithm and modify how node (aka sentence) centrality is computed in two ways: (a) we employ BERT, a state-of-the-art neural representation learning model to better capture sentential meaning and (b) we build graphs with directed edges arguing that the contribution of any two nodes to their respective centrality is influenced by their relative position in a document. Experimental results on three news summarization datasets representative of different languages and writing styles show that our approach outperforms strong baselines by a wide margin.", "phrases": ["centrality", "document summarization", "bert", "position", "unsupervised model"], "overall_score": 2.939607162891824, "scores": [1.7196085953437052, 1.4682567721372852, 1.0454069255905312, 0.6388242204159532, 0.5554379570614225], "rank_score": 1.0855068941097796} -{"id": "hassan-etal-2007-supertagged", "title": "Supertagged Phrase-Based Statistical Machine Translation", "abstract": "Until quite recently, extending Phrase-based Statistical Machine Translation (PBSMT) with syntactic structure caused system performance to deteriorate. In this work we show that incorporating lexical syntactic descriptions in the form of supertags can yield significantly better PBSMT systems. We describe a novel PBSMT model that integrates supertags into the target language model and the target side of the translation model. Two kinds of supertags are employed: those from Lexicalized Tree-Adjoining Grammar and Combinatory Categorial Grammar. Despite the differences between these two approaches, the supertaggers give similar improvements. In addition to supertagging, we also explore the utility of a surface global grammaticality measure based on combinatory operators. We perform various experiments on the Arabic to English NIST 2005 test set addressing issues such as sparseness, scalability and the utility of system subcomponents. Our best result (0.4688 BLEU) improves by 6.1% relative to a state-of-theart PBSMT model, which compares very favourably with the leading systems on the NIST 2005 task.", "phrases": ["statistical machine translation", "target side", "supertag"], "overall_score": 2.697170803848582, "scores": [0.9789959598126026, 1.4519169375580407, 0.8253512412584978], "rank_score": 1.085421379543047} -{"id": "howes-etal-2014-linguistic", "title": "Linguistic Indicators of Severity and Progress in Online Text-based Therapy for Depression", "abstract": "Mental illnesses such as depression and anxiety are highly prevalent, and therapy is increasingly being offered online. This new setting is a departure from face-toface therapy, and offers both a challenge and an opportunity \u2010 it is not yet known what features or approaches are likely to lead to successful outcomes in such a different medium, but online text-based therapy provides large amounts of data for linguistic analysis. We present an initial investigation into the application of computational linguistic techniques, such as topic and sentiment modelling, to online therapy for depression and anxiety. We find that important measures such as symptom severity can be predicted with comparable accuracy to face-to-face data, using general features such as discussion topic and sentiment; however, measures of patient progress are captured only by finergrained lexical features, suggesting that aspects of style or dialogue structure may also be important.", "phrases": ["online text-based therapy", "depression", "lexical feature"], "overall_score": 1.5047110801691368, "scores": [1.817123501617127, 0.907664484999663, 0.5314708333792436], "rank_score": 1.0854196066653445} -{"id": "jagarlamudi-etal-2012-incorporating", "title": "Incorporating Lexical Priors into Topic Models", "abstract": "Topic models have great potential for helping users understand document corpora. This potential is stymied by their purely unsupervised nature, which often leads to topics that are neither entirely meaningful nor effective in extrinsic tasks (Chang et al., 2009). We propose a simple and effective way to guide topic models to learn topics of specific interest to a user. We achieve this by providing sets of seed words that a user believes are representative of the underlying topics in a corpus. Our model uses these seeds to improve both topic-word distributions (by biasing topics to produce appropriate seed words) and to improve document-topic distributions (by biasing documents to select topics related to the seed words they contain). Extrinsic evaluation on a document clustering task reveals a significant improvement when using seed information, even over other models that use seed information naively.", "phrases": ["prior", "seed word", "domain knowledge"], "overall_score": 2.9393021309004905, "scores": [1.7359713532762495, 0.9465856096834673, 0.5736258024530049], "rank_score": 1.085394255137574} -{"id": "wang-etal-2019-second", "title": "Second-Order Semantic Dependency Parsing with End-to-End Neural Networks", "abstract": "Semantic dependency parsing aims to identify semantic relationships between words in a sentence that form a graph. In this paper, we propose a second-order semantic dependency parser, which takes into consideration not only individual dependency edges but also interactions between pairs of edges. We show that second-order parsing can be approximated using mean field (MF) variational inference or loopy belief propagation (LBP). We can unfold both algorithms as recurrent layers of a neural network and therefore can train the parser in an end-to-end manner. Our experiments show that our approach achieves state-of-the-art performance.", "phrases": ["semantic dependency", "recurrent layer", "second-order information"], "overall_score": 2.6969858134218154, "scores": [1.87855581068521, 0.8344400667491249, 0.5430449243220309], "rank_score": 1.0853469339187887} -{"id": "an-etal-2020-multimodal", "title": "Multimodal Topic-Enriched Auxiliary Learning for Depression Detection", "abstract": "From the perspective of health psychology, human beings with long-term and sustained negativity are highly possible to be diagnosed with depression. Inspired by this, we argue that the global topic information derived from user-generated contents (e.g., texts and images) is crucial to boost the performance of the depression detection task, though this information has been neglected by almost all previous studies on depression detection. To this end, we propose a new Multimodal Topic-enriched Auxiliary Learning (MTAL) approach, aiming at capturing the topic information inside different modalities (i.e., texts and images) for depression detection. Especially, in our approach, a modality-agnostic topic model is proposed to be capable of mining the topical clues from either the discrete textual signals or the continuous visual signals. On this basis, the topic modeling w.r.t. the two modalities are cast as two auxiliary tasks for improving the performance of the primary task (i.e., depression detection). Finally, the detailed evaluation demonstrates the great advantage of our MTAL approach to depression detection over the state-of-the-art baselines. This justifies the importance of the multimodal topic information to depression detection and the effectiveness of our approach in capturing such information.", "phrases": ["auxiliary learning", "depression detection", "topic model"], "overall_score": 1.1922689546047485, "scores": [1.8376579949668495, 0.8623155933413951, 0.5557763252035973], "rank_score": 1.085249971170614} -{"id": "kovelamudi-etal-2011-domain", "title": "Domain Independent Model for Product Attribute Extraction from User Reviews using Wikipedia", "abstract": "The world of E-commerce is expanding, posing a large arena of products, their descriptions, customer and professional reviews that are pertinent to them. Most of the product attribute extraction techniques in literature work on structured descriptions using several text analysis tools. However, attributes in these descriptions are limited compared to those in customer reviews of a product, where users discuss deeper and more specific attributes. In this paper, we propose a novel supervised domain independent model for product attribute extraction from user reviews. The user generated content contains unstructured and semi-structured text where conventional language grammar dependent tools like parts-of-speech taggers, named entity recognizers, parsers do not perform at expected levels. We used Wikipedia and Web to identify product attributes from customer reviews and achieved F1score of 0.73.", "phrases": ["product attribute extraction", "user review", "wikipedia"], "overall_score": 1.192239245256818, "scores": [1.8178229845627811, 0.9008874544949613, 0.5369583466124483], "rank_score": 1.0852229285567303} -{"id": "shutova-etal-2013-statistical", "title": "Statistical Metaphor Processing", "abstract": "Metaphor is highly frequent in language, which makes its computational processing indispensable for real-world NLP applications addressing semantic tasks. Previous approaches to metaphor modeling rely on task-specific hand-coded knowledge and operate on a limited domain or a subset of phenomena. We present the first integrated open-domain statistical model of metaphor processing in unrestricted text. Our method first identifies metaphorical expressions in running text and then paraphrases them with their literal paraphrases. Such a text-to-text model of metaphor interpretation is compatible with other NLP applications that can benefit from metaphor resolution. Our approach is minimally supervised, relies on the state-of-the-art parsing and lexical acquisition technologies (distributional clustering and selectional preference induction), and operates with a high accuracy.", "phrases": ["metaphor", "selectional preference", "increase"], "overall_score": 3.1353675339691334, "scores": [1.840042726050649, 0.8663994045562651, 0.5478460336096809], "rank_score": 1.0847627214055315} -{"id": "dasigi-etal-2012-genre", "title": "Genre Independent Subgroup Detection in Online Discussion Threads: A Study of Implicit Attitude using Textual Latent Semantics", "abstract": "We describe an unsupervised approach to the problem of automatically detecting subgroups of people holding similar opinions in a discussion thread. An intuitive way of identifying this is to detect the attitudes of discussants towards each other or named entities or topics mentioned in the discussion. Sentiment tags play an important role in this detection, but we also note another dimension to the detection of people\u2019s attitudes in a discussion: if two persons share the same opinion, they tend to use similar language content. We consider the latter to be an implicit attitude. In this paper, we investigate the impact of implicit and explicit attitude in two genres of social media discussion data, more formal wikipedia discussions and a debate discussion forum that is much more informal. Experimental results strongly suggest that implicit attitude is an important complement for explicit attitudes (expressed via sentiment) and it can improve the sub-group detection performance independent of genre.", "phrases": ["subgroup", "discussion thread", "implicit attitude"], "overall_score": 1.7454185937704585, "scores": [2.175575773147314, 0.5553793313723204, 0.5225135386988581], "rank_score": 1.0844895477394976} -{"id": "briakou-carpuat-2021-beyond", "title": "Beyond Noise: Mitigating the Impact of Fine-grained Semantic Divergences on Neural Machine Translation", "abstract": "While it has been shown that Neural Machine Translation (NMT) is highly sensitive to noisy parallel training samples, prior work treats all types of mismatches between source and target as noise. As a result, it remains unclear how samples that are mostly equivalent but contain a small number of semantically divergent tokens impact NMT training. To close this gap, we analyze the impact of different types of fine-grained semantic divergences on Transformer models. We show that models trained on synthetic divergences output degenerated text more frequently and are less confident in their predictions. Based on these findings, we introduce a divergent-aware NMT framework that uses factors to help NMT recover from the degradation caused by naturally occurring divergences, improving both translation quality and model calibration on EN-FR tasks.", "phrases": ["noise", "neural machine translation", "divergence output"], "overall_score": 1.7449442911939816, "scores": [1.907810987499678, 0.7832149752786848, 0.5615585781591799], "rank_score": 1.084194846979181} -{"id": "wei-etal-2020-novel", "title": "A Novel Cascade Binary Tagging Framework for Relational Triple Extraction", "abstract": "Extracting relational triples from unstructured text is crucial for large-scale knowledge graph construction. However, few existing works excel in solving the overlapping triple problem where multiple relational triples in the same sentence share the same entities. In this work, we introduce a fresh perspective to revisit the relational triple extraction task and propose a novel cascade binary tagging framework (CasRel) derived from a principled problem formulation. Instead of treating relations as discrete labels as in previous works, our new framework models relations as functions that map subjects to objects in a sentence, which naturally handles the overlapping problem. Experiments show that the CasRel framework already outperforms state-of-the-art methods even when its encoder module uses a randomly initialized BERT encoder, showing the power of the new tagging framework. It enjoys further performance boost when employing a pre-trained BERT encoder, outperforming the strongest baseline by 17.5 and 30.2 absolute gain in F1-score on two public datasets NYT and WebNLG, respectively. In-depth analysis on different scenarios of overlapping triples shows that the method delivers consistent performance gain across all these scenarios. The source code and data are released online.", "phrases": ["binary tagging framework", "relational triple extraction", "unstructured text"], "overall_score": 3.247724588103391, "scores": [1.549297427527711, 1.1800583587424553, 0.5229955170561977], "rank_score": 1.084117101108788} -{"id": "long-etal-2017-world", "title": "World Knowledge for Reading Comprehension: Rare Entity Prediction with Hierarchical LSTMs Using External Descriptions", "abstract": "Humans interpret texts with respect to some background information, or world knowledge, and we would like to develop automatic reading comprehension systems that can do the same. In this paper, we introduce a task and several models to drive progress towards this goal. In particular, we propose the task of rare entity prediction: given a web document with several entities removed, models are tasked with predicting the correct missing entities conditioned on the document context and the lexical resources. This task is challenging due to the diversity of language styles and the extremely large number of rare entities. We propose two recurrent neural network architectures which make use of external knowledge in the form of entity descriptions. Our experiments show that our hierarchical LSTM model performs significantly better at the rare entity prediction task than those that do not make use of external resources.", "phrases": ["rare entity prediction", "reading comprehension system", "world knowledge"], "overall_score": 1.9423329472976543, "scores": [1.8420433509741931, 0.8309286016565695, 0.5791380219315112], "rank_score": 1.0840366581874246} -{"id": "liu-etal-2010-pem", "title": "PEM: A Paraphrase Evaluation Metric Exploiting Parallel Texts", "abstract": "We present PEM, the first fully automatic metric to evaluate the quality of paraphrases, and consequently, that of paraphrase generation systems. Our metric is based on three criteria: adequacy, fluency, and lexical dissimilarity. The key component in our metric is a robust and shallow semantic similarity measure based on pivot language N-grams that allows us to approximate adequacy independently of lexical similarity. Human evaluation shows that PEM achieves high correlation with human judgments.", "phrases": ["paraphrase", "automatic metric", "lexical dissimilarity", "pem"], "overall_score": 2.253909542419356, "scores": [2.112379511294004, 1.128924198148827, 0.5581293265224353, 0.5361724566490235], "rank_score": 1.0839013731535725} -{"id": "weller-seppi-2019-humor", "title": "Humor Detection: A Transformer Gets the Last Laugh", "abstract": "Much previous work has been done in attempting to identify humor in text. In this paper we extend that capability by proposing a new task: assessing whether or not a joke is humorous. We present a novel way of approaching this problem by building a model that learns to identify humorous jokes based on ratings gleaned from Reddit pages, consisting of almost 16,000 labeled instances. Using these ratings to determine the level of humor, we then employ a Transformer architecture for its advantages in learning from sentence context. We demonstrate the effectiveness of this approach and show results that are comparable to human performance. We further demonstrate our model's increased capabilities on humor identification problems, such as the previously created datasets for short jokes and puns. These experiments show that this method outperforms all previous work done on these tasks, with an F-measure of 93.1% for the Puns dataset and 98.6% on the Short Jokes dataset.", "phrases": ["joke", "human performance", "humor detection", "bert", "annotator"], "overall_score": 3.4438713916800157, "scores": [1.5406883260644066, 1.7520578363683144, 1.0530803218110603, 0.550819624411255, 0.521561976421621], "rank_score": 1.0836416170153313} -{"id": "beigman-klebanov-flor-2013-associative", "title": "Associative Texture Is Lost In Translation", "abstract": "We present a suggestive finding regarding the loss of associative texture in the process of machine translation, using comparisons between (a) original and backtranslated texts, (b) reference and system translations, and (c) better and worse MT systems. We represent the amount of association in a text using word association profile \u2010 a distribution of pointwise mutual information between all pairs of content word types in a text. We use the average of the distribution, which we term lexical tightness, as a single measure of the amount of association in a text. We show that the lexical tightness of humancomposed texts is higher than that of the machine translated materials; human references are tighter than machine translations, and better MT systems produce lexically tighter translations. While the phenomenon of the loss of associative texture has been theoretically predicted by translation scholars, we present a measure capable of quantifying the extent of this phenomenon.", "phrases": ["loss", "machine translation", "lexical tightness", "associative texture"], "overall_score": 1.7437057002665846, "scores": [2.041643906833844, 0.8966228040663368, 0.8713820909727321, 0.5240522667925284], "rank_score": 1.0834252671663605} -{"id": "xu-etal-2020-end", "title": "End-to-End Slot Alignment and Recognition for Cross-Lingual NLU", "abstract": "Natural language understanding (NLU) in the context of goal-oriented dialog systems typically includes intent classification and slot labeling tasks. Existing methods to expand an NLU system to new languages use machine translation with slot label projection from source to the translated utterances, and thus are sensitive to projection errors. In this work, we propose a novel end-to-end model that learns to align and predict target slot labels jointly for cross-lingual transfer. We introduce MultiATIS++, a new multilingual NLU corpus that extends the Multilingual ATIS corpus to nine languages across four language families, and evaluate our method using the corpus. Results show that our method outperforms a simple label projection method using fast-align on most languages, and achieves competitive performance to the more complex, state-of-the-art projection method with only half of the training time. We release our MultiATIS++ corpus to the community to continue future research on cross-lingual NLU.", "phrases": ["slot alignment", "cross-lingual nlu", "natural language understanding"], "overall_score": 3.3969202516707493, "scores": [1.8501537385155633, 0.848211865733334, 0.5517634195076948], "rank_score": 1.0833763412521973} -{"id": "barbosa-feng-2010-robust", "title": "Robust Sentiment Detection on Twitter from Biased and Noisy Data", "abstract": "In this paper, we propose an approach to automatically detect sentiments on Twitter messages (tweets) that explores some characteristics of how tweets are written and meta-information of the words that compose these messages. Moreover, we leverage sources of noisy labels as our training data. These noisy labels were provided by a few sentiment detection websites over twitter data. In our experiments, we show that since our features are able to capture a more abstract representation of tweets, our solution is more effective than previous ones and also more robust regarding biased and noisy data, which is the kind of data provided by these sources.", "phrases": ["twitter", "sentiment analysis", "machine translation", "pos tagging"], "overall_score": 3.968890768434525, "scores": [1.740337169920926, 1.541384999138232, 0.5311903059451328, 0.5204561447130189], "rank_score": 1.0833421549293274} -{"id": "toutanova-etal-2005-joint", "title": "Joint Learning Improves Semantic Role Labeling", "abstract": "Despite much recent progress on accurate semantic role labeling, previous work has largely used independent classifiers, possibly combined with separate label sequence models via Viterbi decoding. This stands in stark contrast to the linguistic observation that a core argument frame is a joint structure, with strong dependencies between arguments. We show how to build a joint model of argument frames, incorporating novel features that model these interactions into discriminative log-linear models. This system achieves an error reduction of 22% on all arguments and 32% on core arguments over a state-of-the art independent classifier for gold-standard parse trees on PropBank.", "phrases": ["semantic role labeling", "argument frame", "strong dependency", "error reduction", "joint learning"], "overall_score": 3.348160899646114, "scores": [2.903385543814114, 0.9033060430452649, 0.5677547226957524, 0.5214957074960963, 0.5199669362671713], "rank_score": 1.0831817906636796} -{"id": "bach-etal-2011-goodness", "title": "Goodness: A Method for Measuring Machine Translation Confidence", "abstract": "State-of-the-art statistical machine translation (MT) systems have made significant progress towards producing user-acceptable translation output. However, there is still no efficient way for MT systems to inform users which words are likely translated correctly and how confident it is about the whole sentence. We propose a novel framework to predict word-level and sentence-level MT errors with a large number of novel features. Experimental results show that the MT error prediction accuracy is increased from 69.1 to 72.2 in F-score. The Pearson correlation between the proposed confidence measure and the human-targeted translation edit rate (HTER) is 0.6. Improvements between 0.4 and 0.9 TER reduction are obtained with the n-best list reranking task using the proposed confidence measure. Also, we present a visualization prototype of MT errors at the word and sentence levels with the objective to improve post-editor productivity.", "phrases": ["machine translation", "f-score", "goodness"], "overall_score": 3.1306750684374856, "scores": [1.4425915399048221, 1.2617512527388273, 0.5450749266011226], "rank_score": 1.0831392397482573} -{"id": "michel-etal-2019-evaluation", "title": "On Evaluation of Adversarial Perturbations for Sequence-to-Sequence Models", "abstract": "Adversarial examples \u2014 perturbations to the input of a model that elicit large changes in the output \u2014 have been shown to be an effective way of assessing the robustness of sequence-to-sequence (seq2seq) models. However, these perturbations only indicate weaknesses in the model if they do not change the input so significantly that it legitimately results in changes in the expected output. This fact has largely been ignored in the evaluations of the growing body of related literature. Using the example of untargeted attacks on machine translation (MT), we propose a new evaluation framework for adversarial attacks on seq2seq models that takes the semantic equivalence of the pre- and post-perturbation input into account. Using this framework, we demonstrate that existing methods may not preserve meaning in general, breaking the aforementioned assumption that source side perturbations should not result in changes in the expected output. We further use this framework to demonstrate that adding additional constraints on attacks allows for adversarial perturbations that are more meaning-preserving, but nonetheless largely change the output sequence. Finally, we show that performing untargeted adversarial training with meaning-preserving attacks is beneficial to the model in terms of adversarial robustness, without hurting test performance. A toolkit implementing our evaluation framework is released at .", "phrases": ["perturbation", "sequence-to-sequence model", "attack", "machine translation", "adversarial training"], "overall_score": 3.068450024598336, "scores": [1.400352644732521, 1.3771358386853199, 1.2456218512144774, 0.8308898732103143, 0.5611409269321658], "rank_score": 1.0830282269549598} -{"id": "sapkota-etal-2016-domain", "title": "Domain Adaptation for Authorship Attribution: Improved Structural Correspondence Learning", "abstract": "We present the \ufb01rst domain adaptation model for authorship attribution to leverage unlabeled data. The model includes extensions to structural correspondence learning needed to make it appropriate for the task. For example, we propose a median-based classi\ufb01cation instead of the standard binary classi\ufb01cation used in previous work. Our results show that punctuation-based character n -grams form excellent pivot features. We also show how singular value decomposition plays a critical role in achieving domain adaptation, and that replacing (instead of concatenating) non-pivot features with correspondence features yields better performance.", "phrases": ["authorship attribution", "pivot feature", "domain adaptation"], "overall_score": 1.7429153299642952, "scores": [1.8131248846359052, 0.9101298005791241, 0.5255478622438994], "rank_score": 1.0829341824863097} -{"id": "ettinger-etal-2016-probing", "title": "Probing for semantic evidence of composition by means of simple classification tasks", "abstract": "We propose a diagnostic method for probing specific information captured in vector representations of sentence meaning, via simple classification tasks with strategically constructed sentence sets. We identify some key types of semantic information that we might expect to be captured in sentence composition, and illustrate example classification tasks for targeting this information.", "phrases": ["classification task", "linguistic knowledge", "body"], "overall_score": 2.8575660183535203, "scores": [1.8087860211291618, 0.8936758343522185, 0.5459321672094236], "rank_score": 1.0827980075636015} -{"id": "kobayashi-etal-2016-dynamic", "title": "Dynamic Entity Representation with Max-pooling Improves Machine Reading", "abstract": "We propose a novel neural network model for machine reading, DER Network, which explicitly implements a reader building dynamic meaning representations for entities by gathering and accumulating information around the entities as it reads a document. Evaluated on a recent large scale dataset (Hermann et al., 2015), our model exhibits better results than previous research, and we find that max-pooling is suited for modeling the accumulation of information on entities. Further analysis suggests that our model can put together multiple pieces of information encoded in different sentences to answer complicated questions. Our code for the model is available at https://github. com/soskek/der-network", "phrases": ["machine reading", "dynamic entity representation", "discourse"], "overall_score": 2.2516030364372077, "scores": [1.8023776520041404, 0.9158479742931299, 0.5301509084212215], "rank_score": 1.0827921782394974} -{"id": "botha-etal-2020-entity", "title": "Entity Linking in 100 Languages", "abstract": "We propose a new formulation for multilingual entity linking, where language-specific mentions resolve to a language-agnostic Knowledge Base. We train a dual encoder in this new setting, building on prior work with improved feature representation, negative mining, and an auxiliary entity-pairing task, to obtain a single entity retrieval model that covers 100+ languages and 20 million entities. The model outperforms state-of-the-art results from a far more limited cross-lingual linking task. Rare entities and low-resource languages pose challenges at this large-scale, so we advocate for an increased focus on zero- and few-shot evaluation. To this end, we provide Mewsli-9, a large new multilingual dataset matched to our setting, and show how frequency-based analysis provided key insights for our model and training enhancements.", "phrases": ["formulation", "entity linking", "wikidata item", "hyperlink"], "overall_score": 2.596320443223668, "scores": [2.6567084993350574, 0.5735199539572476, 0.5609051434172218, 0.5398652966553743], "rank_score": 1.0827497233412253} -{"id": "chen-etal-2020-seqvat", "title": "SeqVAT: Virtual Adversarial Training for Semi-Supervised Sequence Labeling", "abstract": "Virtual adversarial training (VAT) is a powerful technique to improve model robustness in both supervised and semi-supervised settings. It is effective and can be easily adopted on lots of image classification and text classification tasks. However, its benefits to sequence labeling tasks such as named entity recognition (NER) have not been shown as significant, mostly, because the previous approach can not combine VAT with the conditional random field (CRF). CRF can significantly boost accuracy for sequence models by putting constraints on label transitions, which makes it an essential component in most state-of-the-art sequence labeling model architectures. In this paper, we propose SeqVAT, a method which naturally applies VAT to sequence labeling models with CRF. Empirical studies show that SeqVAT not only significantly improves the sequence labeling performance over baselines under supervised settings, but also outperforms state-of-the-art approaches under semi-supervised settings.", "phrases": ["virtual adversarial training", "image classification", "seqvat"], "overall_score": 1.9397764780851674, "scores": [1.9190275979403217, 0.7849313569756947, 0.543870641744437], "rank_score": 1.0826098655534846} -{"id": "hermann-etal-2014-semantic", "title": "Semantic Frame Identification with Distributed Word Representations", "abstract": "We present a novel technique for semantic frame identi\ufb01cation using distributed representations of predicates and their syntactic context; this technique leverages automatic syntactic parses and a generic set of word embeddings. Given labeled data annotated with frame-semantic parses, we learn a model that projects the set of word representations for the syntactic context around a predicate to a low dimensional representation. The latter is used for semantic frame identi\ufb01cation; with a standard argument identi\ufb01cation method inspired by prior work, we achieve state-of-the-art results on FrameNet-style frame-semantic analysis. Additionally, we report strong results on PropBank-style semantic role labeling in comparison to prior work.", "phrases": ["frame", "word embedding", "wsabie algorithm"], "overall_score": 3.1262739567108686, "scores": [1.3817631776384056, 1.2999408565718045, 0.5631456445588752], "rank_score": 1.0816165595896952} -{"id": "zhong-etal-2020-extractive", "title": "Extractive Summarization as Text Matching", "abstract": "This paper creates a paradigm shift with regard to the way we build neural extractive summarization systems. Instead of following the commonly used framework of extracting sentences individually and modeling the relationship between sentences, we formulate the extractive summarization task as a semantic text matching problem, in which a source document and candidate summaries will be (extracted from the original text) matched in a semantic space. Notably, this paradigm shift to semantic matching framework is well-grounded in our comprehensive analysis of the inherent gap between sentence-level and summary-level extractors based on the property of the dataset. Besides, even instantiating the framework with a simple form of a matching model, we have driven the state-of-the-art extractive result on CNN/DailyMail to a new level (44.41 in ROUGE-1). Experiments on the other five datasets also show the effectiveness of the matching framework. We believe the power of this matching-based summarization framework has not been fully exploited. To encourage more instantiations in the future, we have released our codes, processed dataset, as well as generated summaries in .", "phrases": ["paradigm", "semantic space", "extractive summarization", "matchsum"], "overall_score": 3.292751316022053, "scores": [2.3077511484561115, 0.9153537812871317, 0.5615497367938044, 0.5414771106151627], "rank_score": 1.0815329442880526} -{"id": "chambers-etal-2014-dense", "title": "Dense Event Ordering with a Multi-Pass Architecture", "abstract": "The past 10 years of event ordering research has focused on learning partial orderings over document events and time expressions. The most popular corpus, the TimeBank, contains a small subset of the possible ordering graph. Many evaluations follow suit by only testing certain pairs of events (e.g., only main verbs of neighboring sentences). This has led most research to focus on specific learners for partial labelings. This paper attempts to nudge the discussion from identifying some relations to all relations. We present new experiments on strongly connected event graphs that contain \u223c10 times more relations per document than the TimeBank. We also describe a shift away from the single learner to a sieve-based architecture that naturally blends multiple learners into a precision-ranked cascade of sieves. Each sieve adds labels to the event graph one at a time, and earlier sieves inform later ones through transitive closure. This paper thus describes innovations in both approach and task. We experiment on the densest event graphs to date and show a 14% gain over state-of-the-art.", "phrases": ["ordering", "sieve", "temporal relation extraction", "caevo", "task-specific model"], "overall_score": 3.844947525746899, "scores": [1.3522390010745455, 1.2407785386337147, 1.160242627937201, 1.072626514916736, 0.5813863315687533], "rank_score": 1.0814546028261902} -{"id": "lee-etal-2010-qualia", "title": "Qualia Modification in Noun-Noun Compounds: A Cross-Language Survey", "abstract": "In analyzing the formation of a given compound, both its internal syntactic structure and semantic relations need to be considered. The Generative Lexicon Theory (GL Theory) provides us with an explanatory model of compounds that captures the qualia modification relations in the semantic composition within a compound, which can be applied to natural language processing tasks. In this paper, we primarily discuss the qualia structure of noun-noun compounds found in Chinese as well as a couple of other languages like German, Spanish, Japanese and Italian. We briefly review the construction of compounds and focus on the noun-noun construction. While analyzing the semantic relationship between the words that compose a compound, we use the GL Theory to demonstrate that the proposed qualia structure enables compositional interpretation within the compound. Besides, we attempt to examine whether or not for each semantic head, its modifier can fit in one of the four quales. Finally, our analysis reveals the potentials and limits of qualia-based treatment of composition of nominal compounds and suggests a path for future work.", "phrases": ["noun-noun compound", "other language", "qualia modification"], "overall_score": 1.187581130887046, "scores": [1.9010200702218283, 0.8157146156127058, 0.5262141045712146], "rank_score": 1.0809829301352496} -{"id": "bingel-sogaard-2016-text", "title": "Text Simplification as Tree Labeling", "abstract": "We present a new, structured approach to text simplification using conditional random fields over top-down traversals of dependency graphs that jointly predicts possible compressions and paraphrases. Our model reaches readability scores comparable to word-based compression approaches across a range of metrics and human judgements while maintaining more of the important information.", "phrases": ["structured approach", "paraphrase", "text simplification"], "overall_score": 1.4985405853710216, "scores": [2.1037010489170394, 0.5715944781005861, 0.5676100796108008], "rank_score": 1.0809685355428087} -{"id": "lewis-etal-2019-unsupervised", "title": "Unsupervised Question Answering by Cloze Translation", "abstract": "Obtaining training data for Question Answering (QA) is time-consuming and resource-intensive, and existing QA datasets are only available for limited domains and languages. In this work, we explore to what extent high quality training data is actually required for Extractive QA, and investigate the possibility of unsupervised Extractive QA. We approach this problem by first learning to generate context, question and answer triples in an unsupervised manner, which we then use to synthesize Extractive QA training data automatically. To generate such triples, we first sample random context paragraphs from a large corpus of documents and then random noun phrases or Named Entity mentions from these paragraphs as answers. Next we convert answers in context to \u201cfill-in-the-blank\u201d cloze questions and finally translate them into natural questions. We propose and compare various unsupervised ways to perform cloze-to-natural question translation, including training an unsupervised NMT model using non-aligned corpora of natural questions and cloze questions as well as a rule-based approach. We find that modern QA models can learn to answer human questions surprisingly well using only synthetic training data. We demonstrate that, without using the SQuAD training data at all, our approach achieves 56.4 F1 on SQuAD v1 (64.5 F1 when the answer is a Named Entity mention), outperforming early supervised models.", "phrases": ["cloze translation", "triple", "large corpus", "noun phrase", "unsupervised question"], "overall_score": 3.5210297834164597, "scores": [2.1680662646834907, 0.923696875747842, 0.9082242545436412, 0.848515573082327, 0.5550044822692737], "rank_score": 1.080701490065315} -{"id": "louis-etal-2010-discourse", "title": "Discourse indicators for content selection in summarization", "abstract": "We present analyses aimed at eliciting which specific aspects of discourse provide the strongest indication for text importance. In the context of content selection for single document summarization of news, we examine the benefits of both the graph structure of text provided by discourse relations and the semantic sense of these relations. We find that structure information is the most robust indicator of importance. Semantic sense only provides constraints on content selection but is not indicative of important content by itself. However, sense features complement structure information and lead to improved performance. Further, both types of discourse information prove complementary to non-discourse features. While our results establish the usefulness of discourse features, we also find that lexical overlap provides a simple and cheap alternative to discourse for computing text structure with comparable performance for the task of content selection.", "phrases": ["indicator", "content selection", "document summarization", "discourse relation"], "overall_score": 2.487633012096024, "scores": [1.7255857923151443, 1.4366666843337654, 0.6167180802009444, 0.5424906037648197], "rank_score": 1.0803652901536684} -{"id": "francois-miltsakaki-2012-nlp", "title": "Do NLP and machine learning improve traditional readability formulas?", "abstract": "Readability formulas are methods used to match texts with the readers' reading level. Several methodological paradigms have previously been investigated in the field. The most popular paradigm dates several decades back and gave rise to well known readability formulas such as the Flesch formula (among several others). This paper compares this approach (henceforth \"classic\") with an emerging paradigm which uses sophisticated NLP-enabled features and machine learning techniques. Our experiments, carried on a corpus of texts for French as a foreign language, yield four main results: (1) the new readability formula performed better than the \"classic\" formula; (2) \"non-classic\" features were slightly more informative than \"classic\" features; (3) modern machine learning algorithms did not improve the explanatory power of our readability model, but allowed to better classify new observations; and (4) combining \"classic\" and \"non-classic\" features resulted in a significant gain in performance.", "phrases": ["machine learning", "readability formula", "significant gain"], "overall_score": 2.590423807674811, "scores": [1.9319724197553994, 0.7883121257046442, 0.5205873604907414], "rank_score": 1.0802906353169284} -{"id": "marcheggiani-titov-2020-graph", "title": "Graph Convolutions over Constituent Trees for Syntax-Aware Semantic Role Labeling", "abstract": "Semantic role labeling (SRL) is the task of identifying predicates and labeling argument spans with semantic roles. Even though most semantic-role formalisms are built upon constituent syntax, and only syntactic constituents can be labeled as arguments (e.g., FrameNet and PropBank), all the recent work on syntax-aware SRL relies on dependency representations of syntax. In contrast, we show how graph convolutional networks (GCNs) can be used to encode constituent structures and inform an SRL system. Nodes in our SpanGCN correspond to constituents. The computation is done in 3 stages. First, initial node representations are produced by `composing' word representations of the first and last words in the constituent. Second, graph convolutions relying on the constituent tree are performed, yielding syntactically-informed constituent representations. Finally, the constituent representations are `decomposed' back into word representations, which are used as input to the SRL classifier. We evaluate SpanGCN against alternatives, including a model using GCNs over dependency trees, and show its effectiveness on standard English SRL benchmarks CoNLL-2005, CoNLL-2012, and FrameNet.", "phrases": ["semantic role labeling", "graph convolution", "pos tag"], "overall_score": 2.1009529318790214, "scores": [1.8703547955088664, 0.8471953712167823, 0.5214785205428429], "rank_score": 1.079676229089497} -{"id": "tatsumi-2009-correlation", "title": "Correlation between Automatic Evaluation Metric Scores, Post-Editing Speed, and Some Other Factors", "abstract": "This paper summarises the results of a pilot project conducted to investigate the correlation between automatic evaluation metric scores and post-editing speed on a segment by segment basis. Firstly, the results from the comparison of various automatic metrics and post-editing speed will be reported. Secondly, further analysis is carried out by taking into consideration other relevant variables, such as text length and structures, and by means of multiple regression. It has been found that different automatic metrics achieve different levels and types of correlation with post-editing speed. We suggest that some of the source text characteristics and machine translation errors may be able to account for the gap between the automatic metric scores and post-editing speed, and may also help with understanding human post-editing process.", "phrases": ["speed", "automatic metric", "post-editing time"], "overall_score": 2.0999015975840103, "scores": [1.7017279364436255, 0.9464745515411879, 0.5892053624292293], "rank_score": 1.0791359501380142} -{"id": "peng-dredze-2015-named", "title": "Named Entity Recognition for Chinese Social Media with Jointly Trained Embeddings", "abstract": "We consider the task of named entity recognition for Chinese social media. The long line of work in Chinese NER has focused on formal domains, and NER for social media has been largely restricted to English. We present a new corpus of Weibo messages annotated for both name and nominal mentions. Additionally, we evaluate three types of neural embeddings for representing Chinese text. Finally, we propose a joint training objective for the embeddings that makes use of both (NER) labeled and unlabeled raw text. Our methods yield a 9% improvement over a stateof-the-art baseline.", "phrases": ["entity recognition", "neural embedding", "chinese text"], "overall_score": 3.057088993478162, "scores": [1.8056165422361135, 0.8934490688175996, 0.537989233289316], "rank_score": 1.0790182814476763} -{"id": "li-etal-2021-structurallm", "title": "StructuralLM: Structural Pre-training for Form Understanding", "abstract": "Large pre-trained language models achieve state-of-the-art results when fine-tuned on downstream NLP tasks. However, they almost exclusively focus on text-only representation, while neglecting cell-level layout information that is important for form image understanding. In this paper, we propose a new pre-training approach, StructuralLM, to jointly leverage cell and layout information from scanned documents. Specifically, we pre-train StructuralLM with two new designs to make the most of the interactions of cell and layout information: 1) each cell as a semantic unit; 2) classification of cell positions. The pre-trained StructuralLM achieves new state-of-the-art results in different types of downstream tasks, including form understanding (from 78.95 to 85.14), document visual question answering (from 72.59 to 83.94) and document image classification (from 94.43 to 96.08).", "phrases": ["form understanding", "pre-training approach", "structurallm"], "overall_score": 1.495615574390937, "scores": [1.72815505957368, 0.916376844938793, 0.5920438538626823], "rank_score": 1.0788585861250517} -{"id": "gkatzia-etal-2015-game", "title": "A Game-Based Setup for Data Collection and Task-Based Evaluation of Uncertain Information Presentation", "abstract": "Decision-making is often dependent on uncertain data, e.g. data associated with confidence scores, such as probabilities. A concrete example of such data is weather data. We will demo a game-based setup for exploring the effectiveness of different approaches (graphics vs NLG) to communicating uncertainty in rainfall and temperature predictions (www.macs.hw.ac.uk/ InteractionLab/weathergame/ ). The game incorporates a natural language extension of the MetOffice Weather game1. The extended version of the game can be used in three ways: (1) to compare the effectiveness of different information presentations of uncertain data; (2) to collect data for the development of effective data-driven approaches; and (3) to serve as a task-based evaluation setup for Natural Language Generation (NLG).", "phrases": ["game-based setup", "task-based evaluation", "decision-making"], "overall_score": 1.18505528998175, "scores": [1.8535324991437092, 0.8528795841721963, 0.5296393486731862], "rank_score": 1.0786838106630305} -{"id": "wang-etal-2019-self", "title": "Self-Attention with Structural Position Representations", "abstract": "Although self-attention networks (SANs) have advanced the state-of-the-art on various NLP tasks, one criticism of SANs is their ability of encoding positions of input words (Shaw et al., 2018). In this work, we propose to augment SANs with structural position representations to model the latent structure of the input sentence, which is complementary to the standard sequential positional representations. Specifically, we use dependency tree to represent the grammatical structure of a sentence, and propose two strategies to encode the positional relationships among words in the dependency tree. Experimental results on NIST Chinese-to-English and WMT14 English-to-German translation tasks show that the proposed approach consistently boosts performance over both the absolute and relative sequential position representations.", "phrases": ["position", "dependency tree", "self-attention"], "overall_score": 2.369809348382456, "scores": [1.0054288328795529, 1.363495232487981, 0.8667160774195101], "rank_score": 1.078546714262348} -{"id": "selfridge-etal-2011-stability", "title": "Stability and Accuracy in Incremental Speech Recognition", "abstract": "Conventional speech recognition approaches usually wait until the user has finished talking before returning a recognition hypothesis. This results in spoken dialogue systems that are unable to react while the user is still speaking. Incremental Speech Recognition (ISR), where partial phrase results are returned during user speech, has been used to create more reactive systems. However, ISR output is unstable and so prone to revision as more speech is decoded. This paper tackles the problem of stability in ISR. We first present a method that increases the stability and accuracy of ISR output, without adding delay. Given that some revisions are unavoidable, we next present a pair of methods for predicting the stability and accuracy of ISR results. Taken together, we believe these approaches give ISR more utility for real spoken dialogue systems.", "phrases": ["incremental speech recognition", "isr", "delay", "stability"], "overall_score": 1.7358046399728115, "scores": [2.230043541635424, 0.9900707615004224, 0.5554344866189387, 0.5385154597883247], "rank_score": 1.0785160623857775} -{"id": "bergsma-etal-2010-creating", "title": "Creating Robust Supervised Classifiers via Web-Scale N-Gram Data", "abstract": "In this paper, we systematically assess the value of using web-scale N-gram data in state-of-the-art supervised NLP classifiers. We compare classifiers that include or exclude features for the counts of various N-grams, where the counts are obtained from a web-scale auxiliary corpus. We show that including N-gram count features can advance the state-of-the-art accuracy on standard data sets for adjective ordering, spelling correction, noun compound bracketing, and verb part-of-speech disambiguation. More importantly, when operating on new domains, or when labeled training data is not plentiful, we show that using web-scale N-gram features is essential for achieving robust performance.", "phrases": ["web-scale n-gram data", "ordering", "spelling correction", "noun compound bracketing", "disambiguation"], "overall_score": 2.369567485957434, "scores": [2.119537835234257, 1.258697874254776, 0.890930223003377, 0.5631130058736105, 0.5599042512789645], "rank_score": 1.078436637928997} -{"id": "lo-etal-2012-fully", "title": "Fully Automatic Semantic MT Evaluation", "abstract": "We introduce the first fully automatic, fully semantic frame based MT evaluation metric, MEANT, that outperforms all other commonly used automatic metrics in correlating with human judgment on translation adequacy. Recent work on HMEANT, which is a human metric, indicates that machine translation can be better evaluated via semantic frames than other evaluation paradigms, requiring only minimal effort from monolingual humans to annotate and align semantic frames in the reference and machine translations. We propose a surprisingly effective Occam's razor automation of HMEANT that combines standard shallow semantic parsing with a simple maximum weighted bipartite matching algorithm for aligning semantic frames. The matching criterion is based on lexical similarity scoring of the semantic role fillers through a simple context vector model which can readily be trained using any publicly available large monolingual corpus. Sentence level correlation analysis, following standard NIST MetricsMATR protocol, shows that this fully automated version of HMEANT achieves significantly higher Kendall correlation with human adequacy judgments than BLEU, NIST, METEOR, PER, CDER, WER, or TER. Furthermore, we demonstrate that performing the semantic frame alignment automatically actually tends to be just as good as performing it manually. Despite its high performance, fully automated MEANT is still able to preserve HMEANT's virtues of simplicity, representational transparency, and inexpensiveness.", "phrases": ["semantic frame", "meant", "role filler", "development set", "propose hmeant"], "overall_score": 2.9898821260362394, "scores": [1.7420994046692726, 1.3295165475322712, 0.9158416251314607, 0.8382670543724964, 0.5661355133882902], "rank_score": 1.0783720290187584} -{"id": "chiruzzo-etal-2020-development", "title": "Development of a Guarani - Spanish Parallel Corpus", "abstract": "This paper presents the development of a Guarani - Spanish parallel corpus with sentence-level alignment. The Guarani sentences of the corpus use the Jopara Guarani dialect, the dialect of Guarani spoken in Paraguay, which is based on Guarani grammar and may include several Spanish loanwords or neologisms. The corpus has around 14,500 sentence pairs aligned using a semi-automatic process, containing 228,000 Guarani tokens and 336,000 Spanish tokens extracted from web sources.", "phrases": ["guarani", "spanish parallel corpus", "web source"], "overall_score": 1.932151056034178, "scores": [1.7392160838433004, 0.9689128370645412, 0.5269331885168255], "rank_score": 1.0783540364748891} -{"id": "lewis-etal-2015-joint", "title": "Joint A* CCG Parsing and Semantic Role Labelling", "abstract": "Joint models of syntactic and semantic parsing have the potential to improve performance on both tasks\u2014but to date, the best results have been achieved with pipelines. We introduce a joint model using CCG, which is motivated by the close link between CCG syntax and semantics. Semantic roles are recovered by labelling the deep dependency structures produced by the grammar. Furthermore, because CCG is lexicalized, we show it is possible to factor the parsing model over words and introduce a new A parsing algorithm\u2014 which we demonstrate is faster and more accurate than adaptive supertagging. Our joint model is the first to substantially improve both syntactic and semantic accuracy over a comparable pipeline, and also achieves state-of-the-art results for a nonensemble semantic role labelling model.", "phrases": ["ccg", "semantic role labeling", "joint model"], "overall_score": 2.9199809544846125, "scores": [1.3713069550331694, 1.2553477117358018, 0.6081239425376184], "rank_score": 1.0782595364355299} -{"id": "wang-fu-2020-item", "title": "Item-based Collaborative Filtering with BERT", "abstract": "In e-commerce, recommender systems have become an indispensable part of helping users explore the available inventory. In this work, we present a novel approach for item-based collaborative filtering, by leveraging BERT to understand items, and score relevancy between different items. Our proposed method could address problems that plague traditional recommender systems such as cold start, and \u201cmore of the same\u201d recommended content. We conducted experiments on a large-scale real-world dataset with full cold-start scenario, and the proposed approach significantly outperforms the popular Bi-LSTM model.", "phrases": ["bert", "e-commerce", "item-based collaborative filtering"], "overall_score": 1.1845271822086805, "scores": [1.6877534694462355, 1.0168333885755252, 0.5300224607345277], "rank_score": 1.078203106252096} -{"id": "saito-etal-2014-morphological", "title": "Morphological Analysis for Japanese Noisy Text based on Character-level and Word-level Normalization", "abstract": "Social media texts are often written in a non-standard style and include many lexical variants such as insertions, phonetic substitutions, abbreviations that mimic spoken language. The normalization of such a variety of non-standard tokens is one promising solution for handling noisy text. A normalization task is very difficult to conduct in Japanese morphological analysis because there are no explicit boundaries between words. To address this issue, in this paper we propose a novel method for normalizing and morphologically analyzing Japanese noisy text. We generate both character-level and word-level normalization candidates and use discriminative methods to formulate a cost function. Experimental results show that the proposed method achieves acceptable levels in both accuracy and recall for word segmentation, POS tagging, and normalization. These levels exceed those achieved with the conventional rule-based system.", "phrases": ["japanese noisy text", "non-standard token", "morphological analysis"], "overall_score": 2.369024769595026, "scores": [1.7681862812738058, 0.8858249904488297, 0.580557639481361], "rank_score": 1.0781896370679989} -{"id": "li-etal-2014-weakly", "title": "Weakly Supervised User Profile Extraction from Twitter", "abstract": "While user attribute extraction on social media has received considerable attention, existing approaches, mostly supervised, encounter great difficulty in obtaining gold standard data and are therefore limited to predicting unary predicates (e.g., gender). In this paper, we present a weaklysupervised approach to user profile extraction from Twitter. Users\u2019 profiles from social media websites such as Facebook or Google Plus are used as a distant source of supervision for extraction of their attributes from user-generated text. In addition to traditional linguistic features used in distant supervision for information extraction, our approach also takes into account network information, a unique opportunity offered by social media. We test our algorithm on three attribute domains: spouse, education and job; experimental results demonstrate our approach is able to make accurate predictions for users\u2019 attributes based on their tweets. 1", "phrases": ["twitter", "facebook", "distant supervision", "job"], "overall_score": 2.241674538852758, "scores": [2.38458994736265, 0.8440145096826437, 0.5430244735002511, 0.5404413901076924], "rank_score": 1.0780175801633094} -{"id": "mirza-tonelli-2016-catena", "title": "CATENA: CAusal and TEmporal relation extraction from NAtural language texts", "abstract": "We present CATENA, a sieve-based system to perform temporal and causal relation extraction and classification from English texts, exploiting the interaction between the temporal and the causal model. We evaluate the performance of each sieve, showing that the rule-based, the machine-learned and the reasoning components all contribute to achieving state-of-the-art performance on TempEval-3 and TimeBank-Dense data. Although causal relations are much sparser than temporal ones, the architecture and the selected features are mostly suitable to serve both tasks. The effects of the interaction between the temporal and the causal components, although limited, yield promising results and confirm the tight connection between the temporal and the causal dimension of texts.", "phrases": ["causal", "temporal relation extraction", "natural language text"], "overall_score": 3.281394468921264, "scores": [1.51764212448154, 0.9037752344746431, 0.8119907068831662], "rank_score": 1.0778026886131162} -{"id": "dong-etal-2019-editnts", "title": "EditNTS: An Neural Programmer-Interpreter Model for Sentence Simplification through Explicit Editing", "abstract": "We present the first sentence simplification model that learns explicit edit operations (ADD, DELETE, and KEEP) via a neural programmer-interpreter approach. Most current neural sentence simplification systems are variants of sequence-to-sequence models adopted from machine translation. These methods learn to simplify sentences as a byproduct of the fact that they are trained on complex-simple sentence pairs. By contrast, our neural programmer-interpreter is directly trained to predict explicit edit operations on targeted parts of the input sentence, resembling the way that humans perform simplification and revision. Our model outperforms previous state-of-the-art neural sentence simplification models (without external knowledge) by large margins on three benchmark text simplification corpora in terms of SARI (+0.95 WikiLarge, +1.89 WikiSmall, +1.41 Newsela), and is judged by humans to produce overall better and simpler output sentences.", "phrases": ["sentence simplification", "neural programmer-interpreter approach", "editnt", "interpreter"], "overall_score": 3.5522067311131096, "scores": [1.848860261333857, 1.3228724013638593, 0.6016395447009293, 0.5377716695974122], "rank_score": 1.0777859692490144} -{"id": "alam-etal-2022-survey", "title": "A Survey on Multimodal Disinformation Detection", "abstract": "Recent years have witnessed the proliferation of offensive content online such as fake news, propaganda, misinformation, and disinformation. While initially this was mostly about textual content, over time images and videos gained popularity, as they are much easier to consume, attract more attention, and spread further than text. As a result, researchers started leveraging different modalities and combinations thereof to tackle online multimodal offensive content. In this study, we offer a survey on the state-of-the-art on multimodal disinformation detection covering various combinations of modalities: text, images, speech, video, social media network structure, and temporal information. Moreover, while some studies focused on factuality, others investigated how harmful the content is. While these two components in the definition of disinformation \u2013 (i) factuality, and (ii) harmfulness \u2013, are equally important, they are typically studied in isolation. Thus, we argue for the need to tackle disinformation detection by taking into account multiple modalities as well as both factuality and harmfulness, in the same framework. Finally, we discuss current challenges and future research directions.", "phrases": ["survey", "multimodal disinformation detection", "factuality", "harmfulness"], "overall_score": 1.7342118192294012, "scores": [2.2222281051618062, 1.0204247387966194, 0.5357927737153743, 0.5316599311798217], "rank_score": 1.0775263872134053} -{"id": "bonin-etal-2010-contrastive-filtering", "title": "Contrastive Filtering of Domain-Specific Multi-Word Terms from Different Types of Corpora", "abstract": "In this paper we tackle the challenging task of Multi-word term (MWT) extraction from different types of specialized corpora. Contrastive filtering of previously extracted MWTs results in a considerable increment of acquired domainspecific terms.", "phrases": ["filtering", "multi-word term", "wikipedia", "sublanguage"], "overall_score": 1.4936965744105266, "scores": [2.253836234267998, 0.9421394888010509, 0.5604869550979185, 0.5534346028228317], "rank_score": 1.0774743202474497} -{"id": "flanigan-etal-2016-generation", "title": "Generation from Abstract Meaning Representation using Tree Transducers", "abstract": "Language generation from purely semantic representations is a challenging task. This paper addresses generating English from the Ab-stract Meaning Representation (AMR), consisting of re-entrant graphs whose nodes are concepts and edges are relations. The new method is trained statistically from AMR-annotated English and consists of two major steps: (i) generating an appropriate spanning tree for the AMR, and (ii) applying tree-to-string transducers to generate English. The method relies on discriminative learning and an argument realization model to overcome data sparsity. Initial tests on held-out data show good promise despite the complexity of the task. The system is available open-source as part of JAMR at:", "phrases": ["language generation", "semantic representation", "tree-to-string transducer", "amr graph", "statistical method"], "overall_score": 3.5509957453931205, "scores": [1.6130460494997383, 1.5301535662031827, 1.0438813356005499, 0.6057100432161471, 0.5943017072167549], "rank_score": 1.0774185403472747} -{"id": "iyyer-etal-2016-feuding", "title": "Feuding Families and Former Friends: Unsupervised Learning for Dynamic Fictional Relationships", "abstract": "Understanding how a fictional relationship between two characters changes over time (e.g., from best friends to sworn enemies) is a key challenge in digital humanities scholarship. We present a novel unsupervised neural network for this task that incorporates dictionary learning to generate interpretable, accurate relationship trajectories. While previous work on characterizing literary relationships relies on plot summaries annotated with predefined labels, our model jointly learns a set of global relationship descriptors as well as a trajectory over these descriptors for each relationship in a dataset of raw text from novels. We find that our model learns descriptors of events (e.g., marriage or murder) as well as interpersonal states (love, sadness). Our model outperforms topic model baselines on two crowdsourced tasks, and we also find interesting correlations to annotations in an existing dataset.", "phrases": ["friend", "relationship trajectory", "fictional character"], "overall_score": 3.1139057048999335, "scores": [0.8245599058934044, 1.2584945027076517, 1.1489579057939], "rank_score": 1.077337438131652} -{"id": "wang-etal-2020-formality", "title": "Formality Style Transfer with Shared Latent Space", "abstract": "Conventional approaches for formality style transfer borrow models from neural machine translation, which typically requires massive parallel data for training. However, the dataset for formality style transfer is considerably smaller than translation corpora. Moreover, we observe that informal and formal sentences closely resemble each other, which is different from the translation task where two languages have different vocabularies and grammars. In this paper, we present a new approach, Sequence-to-Sequence with Shared Latent Space (S2S-SLS), for formality style transfer, where we propose two auxiliary losses and adopt joint training of bi-directional transfer and auto-encoding. Experimental results show that S2S-SLS (with either RNN or Transformer architectures) consistently outperforms baselines in various settings, especially when we have limited data.", "phrases": ["shared latent space", "sequence-to-sequence", "loss", "formality style transfer"], "overall_score": 2.3667957969214, "scores": [2.070812515921842, 0.887868461349942, 0.8265245548159054, 0.5234952194590798], "rank_score": 1.0771751878866922} -{"id": "light-etal-2004-language", "title": "The Language of Bioscience: Facts, Speculations, and Statements In Between", "abstract": "We explore the use of speculative language in MEDLINE abstracts. Results from a manual annotation experiment suggest that the notion of speculative sentence can be reliably annotated by humans. In addition, an experiment with automated methods also suggest that reliable automated methods might also be developed. Distributional observations are also presented as well as a discussion of possible uses for a system that can recognize speculative language.", "phrases": ["statement", "speculative language", "medline abstract", "biomedicine"], "overall_score": 3.97329491733154, "scores": [0.8583297901486997, 1.686492139284709, 1.2429601781264992, 0.5206209907310015], "rank_score": 1.0771007745727272} -{"id": "hofmann-etal-2020-appraisal", "title": "Appraisal Theories for Emotion Classification in Text", "abstract": "Automatic emotion categorization has been predominantly formulated as text classification in which textual units are assigned to an emotion from a predefined inventory, for instance following the fundamental emotion classes proposed by Paul Ekman (fear, joy, anger, disgust, sadness, surprise) or Robert Plutchik (adding trust, anticipation). This approach ignores existing psychological theories to some degree, which provide explanations regarding the perception of events. For instance, the description that somebody discovers a snake is associated with fear, based on the appraisal as being an unpleasant and non-controllable situation. This emotion reconstruction is even possible without having access to explicit reports of a subjective feeling (for instance expressing this with the words \u201cI am afraid.\u201d). Automatic classification approaches therefore need to learn properties of events as latent variables (for instance that the uncertainty and the mental or physical effort associated with the encounter of a snake leads to fear). With this paper, we propose to make such interpretations of events explicit, following theories of cognitive appraisal of events, and show their potential for emotion classification when being encoded in classification models. Our results show that high quality appraisal dimension assignments in event descriptions lead to an improvement in the classification of discrete emotion categories. We make our corpus of appraisal-annotated emotion-associated event descriptions publicly available.", "phrases": ["emotion classification", "latent variable", "appraisal"], "overall_score": 2.4800371202929927, "scores": [0.9531829325621417, 1.7208200616267215, 0.5571963145865704], "rank_score": 1.0770664362584779} -{"id": "kozareva-ravi-2019-proseqo", "title": "ProSeqo: Projection Sequence Networks for On-Device Text Classification", "abstract": "We propose a novel on-device sequence model for text classification using recurrent projections. Our model ProSeqo uses dynamic recurrent projections without the need to store or look up any pre-trained embeddings. This results in fast and compact neural networks that can perform on-device inference for complex short and long text classification tasks. We conducted exhaustive evaluation on multiple text classification tasks. Results show that ProSeqo outperformed state-of-the-art neural and on-device approaches for short text classification tasks such as dialog act and intent prediction. To the best of our knowledge, ProSeqo is the first on-device long text classification neural model. It achieved comparable results to previous neural approaches for news article, answers and product categorization, while preserving small memory footprint and maintaining high accuracy.", "phrases": ["projection sequence networks", "text classification task", "proseqo"], "overall_score": 1.4922752048500587, "scores": [1.7883946722953727, 0.8577715446862985, 0.5831808395364412], "rank_score": 1.076449018839371} -{"id": "soricut-echihabi-2010-trustrank", "title": "TrustRank: Inducing Trust in Automatic Translations via Ranking", "abstract": "The adoption of Machine Translation technology for commercial applications is hampered by the lack of trust associated with machine-translated output. In this paper, we describe TrustRank, an MT system enhanced with a capability to rank the quality of translation outputs from good to bad. This enables the user to set a quality threshold, granting the user control over the quality of the translations. \n \nWe quantify the gains we obtain in translation quality, and show that our solution works on a wide variety of domains and language pairs.", "phrases": ["trust", "reference translation", "bleu score"], "overall_score": 3.169140195963828, "scores": [1.8223630099876154, 0.8754634122191068, 0.5311148651359007], "rank_score": 1.076313762447541} -{"id": "neveol-etal-2014-language", "title": "Language Resources for French in the Biomedical Domain", "abstract": "The biomedical domain offers a wealth of linguistic resources for Natural Language Processing, including terminologies and corpora. While many of these resources are prominently available for English, other languages including French benefit from substantial coverage thanks to the contribution of an active community over the past decades. However, access to terminological resources in languages other than English may not be as straight-forward as access to their English counterparts. Herein, we review the extent of resource coverage for French and give pointers to access French-language resources. We also discuss the sources and methods for making additional material available for French.", "phrases": ["french", "biomedical domain", "linguistic resource"], "overall_score": 1.4918845389242752, "scores": [1.7840872403383312, 0.8550010160848207, 0.5894133824043026], "rank_score": 1.0761672129424849} -{"id": "habash-roth-2009-catib", "title": "CATiB: The Columbia Arabic Treebank", "abstract": "The Columbia Arabic Treebank (CATiB) is a database of syntactic analyses of Arabic sentences. CATiB contrasts with previous approaches to Arabic treebanking in its emphasis on speed with some constraints on linguistic richness. Two basic ideas inspire the CATiB approach: no annotation of redundant information and using representations and terminology inspired by traditional Arabic syntax. We describe CATiB's representation and annotation procedure, and report on inter-annotator agreement and speed.", "phrases": ["columbia arabic treebank", "syntax", "catib"], "overall_score": 2.477828389631675, "scores": [1.699065160324727, 0.9795933716973215, 0.5496630581387223], "rank_score": 1.0761071967202571} -{"id": "pang-lee-2005-seeing", "title": "Seeing Stars: Exploiting Class Relationships for Sentiment Categorization with Respect to Rating Scales", "abstract": "We address the rating-inference problem, wherein rather than simply decide whether a review is \"thumbs up\" or \"thumbs down\", as in previous sentiment analysis work, one must determine an author's evaluation with respect to a multi-point scale (e.g., one to five \"stars\"). This task represents an interesting twist on standard multi-class text categorization because there are several different degrees of similarity between class labels; for example, \"three stars\" is intuitively closer to \"four stars\" than to \"one star\".We first evaluate human performance at the task. Then, we apply a meta-algorithm, based on a metric labeling formulation of the problem, that alters a given n-ary classifier's output in an explicit attempt to ensure that similar items receive similar labels. We show that the meta-algorithm can provide significant improvements over both multi-class and regression versions of SVMs when we employ a novel similarity measure appropriate to the problem.", "phrases": ["rating scale", "sentiment analysis", "movie review", "multi-class classification"], "overall_score": 4.38653491555186, "scores": [0.8764540520762031, 1.5555676655301702, 1.3180212586209894, 0.5530786758942804], "rank_score": 1.0757804130304107} -{"id": "whitelaw-patrick-2004-selecting", "title": "Selecting Systemic Features for Text Classification", "abstract": "Systemic features use linguisticallyderived language models as a basis for text classification. The graph structure of these models allows for feature representations not available with traditional bag-of-words approaches. This paper explores the set of possible representations, and proposes feature selection methods that aim to produce the most compact and effective set of attributes for a given classification problem. We show that small sets of systemic features can outperform larger sets of wordbased features in the task of identifying financial scam documents.", "phrases": ["systemic feature", "text classification", "financial scam document"], "overall_score": 1.7313282682268496, "scores": [2.101771151765353, 0.578375970602767, 0.5470570863516869], "rank_score": 1.0757347362399357} -{"id": "qian-etal-2017-linguistically", "title": "Linguistically Regularized LSTM for Sentiment Classification", "abstract": "This paper deals with sentence-level sentiment classification. Though a variety of neural network models have been proposed recently, however, previous models either depend on expensive phrase-level annotation, most of which has remarkably degraded performance when trained with only sentence-level annotation; or do not fully employ linguistic resources (e.g., sentiment lexicons, negation words, intensity words). In this paper, we propose simple models trained with sentence-level annotation, but also attempt to model the linguistic role of sentiment lexicons, negation words, and intensity words. Results show that our models are able to capture the linguistic role of sentiment words, negation words, and intensity words in sentiment expression.", "phrases": ["sentiment classification", "neural network model", "linguistic role"], "overall_score": 2.3621946613105003, "scores": [2.0630731516317424, 0.6013945479874865, 0.5607756628607412], "rank_score": 1.0750811208266569} -{"id": "liu-gildea-2005-syntactic", "title": "Syntactic Features for Evaluation of Machine Translation", "abstract": "Automatic evaluation of machine translation, based on computing n-gram similarity between system output and human reference translations, has revolutionized the development of MT systems. We explore the use of syntactic information, including constituent labels and head-modier dependencies, in computing similarity between output and reference. Our results show that adding syntactic information to the evaluation metric improves both sentence-level and corpus-level correlation with human judgments.", "phrases": ["machine translation", "adequacy judgment", "dependency information", "meteor", "stm"], "overall_score": 3.6547841189101122, "scores": [1.9474642448146404, 0.8958760428313397, 0.8723694345277377, 0.8287326866648472, 0.8283479775929771], "rank_score": 1.0745580772863084} -{"id": "mi-etal-2016-supervised", "title": "Supervised Attentions for Neural Machine Translation", "abstract": "In this paper, we improve the attention or alignment accuracy of neural machine translation by utilizing the alignments of training sentence pairs. We simply compute the distance between the machine attentions and the \"true\" alignments, and minimize this cost in the training procedure. Our experiments on large-scale Chinese-to-English task show that our model improves both translation and alignment qualities significantly over the large-vocabulary neural machine translation system, and even beats a state-of-the-art traditional syntax-based system.", "phrases": ["neural machine translation", "nmt attention model", "usefulness", "extra term"], "overall_score": 3.458289483128418, "scores": [2.5925530732828785, 0.6125026907120261, 0.549876159833147, 0.5425802155475276], "rank_score": 1.0743780348438947} -{"id": "chang-etal-2012-learning", "title": "Learning to Find Translations and Transliterations on the Web", "abstract": "In recent years, state-of-the-art cross-linguistic systems have been based on parallel corpora. Nevertheless, it is difficult at times to find translations of a certain technical term or named entity even with a very large parallel corpora. In this paper, we present a new method for learning to find translations on the Web for a given term. In our approach, we use a small set of terms and translations to obtain mixed-code snippets returned by a search engine. We then automatically annotate the data with translation tags, automatically generate features to augment the tagged data, and automatically train a conditional random fields model for identifying translations. At runtime, we obtain mixed-code webpages containing the given term and run the model to extract translations as output. Preliminary experiments and evaluation results show our method cleanly combines various features, resulting in a system that outperforms previous works.", "phrases": ["transliteration", "web", "technical term"], "overall_score": 1.180213352406284, "scores": [1.8177210939558313, 0.8804579511475642, 0.5246504223434938], "rank_score": 1.0742764891489631} -{"id": "galley-manning-2008-simple", "title": "A Simple and Effective Hierarchical Phrase Reordering Model", "abstract": "While phrase-based statistical machine translation systems currently deliver state-of-the-art performance, they remain weak on word order changes. Current phrase reordering models can properly handle swaps between adjacent phrases, but they typically lack the ability to perform the kind of long-distance re-orderings possible with syntax-based systems. In this paper, we present a novel hierarchical phrase reordering model aimed at improving non-local reorderings, which seamlessly integrates with a standard phrase-based system with little loss of computational efficiency. We show that this model can successfully handle the key examples often used to motivate syntax-based systems, such as the rotation of a prepositional phrase around a noun phrase. We contrast our model with reordering models commonly used in phrase-based systems, and show that our approach provides statistically significant BLEU point gains for two language pairs: Chinese-English (+0.53 on MT05 and +0.71 on MT08) and Arabic-English (+0.55 on MT05).", "phrases": ["reordering model", "hrm", "orientation", "phrase-based decoding", "shift reduce algorithm"], "overall_score": 3.7226315011489697, "scores": [2.5167048752391223, 0.8613497951318914, 0.8422035285606623, 0.6190027881694754, 0.5313610186635048], "rank_score": 1.0741244011529312} -{"id": "li-nenkova-2014-reducing", "title": "Reducing Sparsity Improves the Recognition of Implicit Discourse Relations", "abstract": "The earliest work on automatic detection of implicit discourse relations relied on lexical features. More recently, researchers have demonstrated that syntactic features are superior to lexical features for the task. In this paper we re-examine the two classes of state of the art representations: syntactic production rules and word pair features. In particular, we focus on the need to reduce sparsity in instance representation, demonstrating that different representation choices even for the same class of features may exacerbate sparsity issues and reduce performance. We present results that clearly reveal that lexicalization of the syntactic features is necessary for good performance. We introduce a novel, less sparse, syntactic representation which leads to improvement in discourse relation recognition. Finally, we demonstrate that classifiers trained on different representations, especially lexical ones, behave rather differently and thus could likely be combined in future systems.", "phrases": ["sparsity", "implicit discourse relation", "syntactic representation"], "overall_score": 1.9244305945400373, "scores": [1.7660506384363637, 0.8992699184365537, 0.5568149377464707], "rank_score": 1.0740451648731295} -{"id": "bao-etal-2020-plato", "title": "PLATO: Pre-trained Dialogue Generation Model with Discrete Latent Variable", "abstract": "Pre-training models have been proved effective for a wide range of natural language processing tasks. Inspired by this, we propose a novel dialogue generation pre-training framework to support various kinds of conversations, including chit-chat, knowledge grounded dialogues, and conversational question answering. In this framework, we adopt flexible attention mechanisms to fully leverage the bi-directional context and the uni-directional characteristic of language generation. We also introduce discrete latent variables to tackle the inherent one-to-many mapping problem in response generation. Two reciprocal tasks of response generation and latent act recognition are designed and carried out simultaneously within a shared network. Comprehensive experiments on three publicly available datasets verify the effectiveness and superiority of the proposed framework.", "phrases": ["dialogue generation model", "discrete latent variable", "open-domain conversational data", "original pre-training task"], "overall_score": 3.268894880651703, "scores": [2.1858185362496325, 0.951802130258882, 0.5953011120225192, 0.5618665799296211], "rank_score": 1.0736970896151639} -{"id": "yang-etal-2015-humor", "title": "Humor Recognition and Humor Anchor Extraction", "abstract": "Humor is an essential component in personal communication. How to create computational models to discover the structures behind humor, recognize humor and even extract humor anchors remains a challenge. In this work, we first identify several semantic structures behind humor and design sets of features for each structure, and next employ a computational approach to recognize humor. Furthermore, we develop a simple and effective method to extract anchors that enable humor in a sentence. Experiments conducted on two datasets demonstrate that our humor recognizer is effective in automatically distinguishing between humorous and non-humorous texts and our extracted humor anchors correlate quite well with human annotations.", "phrases": ["semantic structure", "humor recognition", "incongruity", "phonetic style", "interpersonal effect"], "overall_score": 3.6146224311882276, "scores": [0.9692266022483931, 1.2143691124300349, 1.0653836188233696, 1.0643728937647414, 1.0538958757947636], "rank_score": 1.0734496206122606} -{"id": "socher-etal-2014-grounded", "title": "Grounded Compositional Semantics for Finding and Describing Images with Sentences", "abstract": "Previous work on Recursive Neural Networks (RNNs) shows that these models can produce compositional feature vectors for accurately representing and classifying sentences or images. However, the sentence vectors of previous models cannot accurately represent visually grounded meaning. We introduce the DT-RNN model which uses dependency trees to embed sentences into a vector space in order to retrieve images that are described by those sentences. Unlike previous RNN-based models which use constituency trees, DT-RNNs naturally focus on the action and agents in a sentence. They are better able to abstract from the details of word order and syntactic expression. DT-RNNs outperform other recursive and recurrent neural networks, kernelized CCA and a bag-of-words baseline on the tasks of finding an image that fits a sentence description and vice versa. They also give more similar representations to sentences that describe the same image.", "phrases": ["image", "dependency tree", "composition function", "multimodal embedding"], "overall_score": 4.034094804327049, "scores": [2.0897149213169155, 1.126367390963646, 0.5460972611161551, 0.5280412925389587], "rank_score": 1.0725552164839187} -{"id": "plank-van-noord-2011-effective", "title": "Effective Measures of Domain Similarity for Parsing", "abstract": "It is well known that parsing accuracy suffers when a model is applied to out-of-domain data. It is also known that the most beneficial data to parse a given domain is data that matches the domain (Sekine, 1997; Gildea, 2001). Hence, an important task is to select appropriate domains. However, most previous work on domain adaptation relied on the implicit assumption that domains are somehow given. As more and more data becomes available, automatic ways to select data that is beneficial for a new (unknown) target domain are becoming attractive. This paper evaluates various ways to automatically acquire related training data for a given test set. The results show that an unsupervised technique based on topic models is effective -- it outperforms random data selection on both languages examined, English and Dutch. Moreover, the technique works better than manually assigned labels gathered from meta-data that is available for English.", "phrases": ["plank", "van", "domain similarity measure"], "overall_score": 2.4692503091202953, "scores": [1.3099819429911717, 1.2906351693412372, 0.6165282387341199], "rank_score": 1.072381783688843} -{"id": "freitag-etal-2021-experts", "title": "Experts, Errors, and Context: A Large-Scale Study of Human Evaluation for Machine Translation", "abstract": "Human evaluation of modern high-quality machine translation systems is a difficult problem, and there is increasing evidence that inadequate evaluation procedures can lead to erroneous conclusions. While there has been considerable research on human evaluation, the field still lacks a commonly accepted standard procedure. As a step toward this goal, we propose an evaluation methodology grounded in explicit error analysis, based on the Multidimensional Quality Metrics (MQM) framework. We carry out the largest MQM research study to date, scoring the outputs of top systems from the WMT 2020 shared task in two language pairs using annotations provided by professional translators with access to full document context. We analyze the resulting data extensively, finding among other results a substantially different ranking of evaluated systems from the one established by the WMT crowd workers, exhibiting a clear preference for human over machine output. Surprisingly, we also find that automatic metrics based on pre-trained embeddings can outperform human crowd workers. We make our corpus publicly available for further research.", "phrases": ["human evaluation", "machine translation", "wmt", "professional translator", "document context"], "overall_score": 2.9726575910207482, "scores": [1.9077389568649448, 1.4476948448932696, 0.901009862615956, 0.5645470422856848, 0.5398072493733528], "rank_score": 1.0721595912066415} -{"id": "nakov-hearst-2005-search", "title": "Search Engine Statistics Beyond the n-Gram: Application to Noun Compound Bracketing", "abstract": "In order to achieve the long-range goal of semantic interpretation of noun compounds, it is often necessary to first determine their syntactic structure. This paper describes an unsupervised method for noun compound bracketing which extracts statistics from Web search engines using a X2 measure, a new set of surface features, and paraphrases. On a gold standard, the system achieves results of 89.34% (baseline 66.80%), which is a sizable improvement over the state of the art (80.70%).", "phrases": ["noun compound bracketing", "paraphrase", "marker", "hyphen", "search engine"], "overall_score": 3.156603622163386, "scores": [1.9222461886297793, 1.1152573477586987, 0.860148127697357, 0.8540959173198933, 0.608532669769674], "rank_score": 1.0720560502350804} -{"id": "heinz-etal-2011-tier", "title": "Tier-based Strictly Local Constraints for Phonology", "abstract": "Beginning with Goldsmith (1976), the phonological tier has a long history in phonological theory to describe non-local phenomena. This paper defines a class of formal languages, the Tier-based Strictly Local languages, which begin to describe such phenomena. Then this class is located within the Subregular Hierarchy (McNaughton and Papert, 1971). It is found that these languages contain the Strictly Local languages, are star-free, are incomparable with other known sub-star-free classes, and have other interesting properties.", "phrases": ["local", "phonology", "tier", "formal language"], "overall_score": 2.4681538071329556, "scores": [1.4500408171841326, 1.1148902245216576, 0.8802410732561653, 0.8424502007434256], "rank_score": 1.0719055789263454} -{"id": "bos-2016-squib", "title": "Squib: Expressive Power of Abstract Meaning Representations", "abstract": "The syntax of abstract meaning representations (AMRs) can be defined recursively, and a systematic translation to first-order logic (FOL) can be specified, including a proper treatment of negation. AMRs without recurrent variables are in the decidable two-variable fragment of FOL. The current definition of AMRs has limited expressive power for universal quantification (up to one universal quantifier per sentence). A simple extension of the AMR syntax and translation to FOL provides the means to represent projection and scope phenomena.", "phrases": ["expressive power", "amr", "first-order logic", "negation", "syntactic idiosyncrasy"], "overall_score": 2.6635255360783843, "scores": [0.9217738168064816, 1.6670427880502492, 1.590857739904958, 0.5901389114573835, 0.5895943825057517], "rank_score": 1.071881527744965} -{"id": "li-etal-2020-bert-vision", "title": "What Does BERT with Vision Look At?", "abstract": "Pre-trained visually grounded language models such as ViLBERT, LXMERT, and UNITER have achieved significant performance improvement on vision-and-language tasks but what they learn during pre-training remains unclear. In this work, we demonstrate that certain attention heads of a visually grounded language model actively ground elements of language to image regions. Specifically, some heads can map entities to image regions, performing the task known as entity grounding. Some heads can even detect the syntactic relations between non-entity words and image regions, tracking, for example, associations between verbs and regions corresponding to their arguments. We denote this ability as syntactic grounding. We verify grounding both quantitatively and qualitatively, using Flickr30K Entities as a testbed.", "phrases": ["bert", "attention head", "image region"], "overall_score": 2.4678734302696457, "scores": [0.9037520568766261, 1.2542548774599334, 1.0573445040687097], "rank_score": 1.0717838128017565} -{"id": "cui-etal-2020-edge", "title": "Edge-Enhanced Graph Convolution Networks for Event Detection with Syntactic Relation", "abstract": "Event detection (ED), a key subtask of information extraction, aims to recognize instances of specific event types in text. Previous studies on the task have verified the effectiveness of integrating syntactic dependency into graph convolutional networks. However, these methods usually ignore dependency label information, which conveys rich and useful linguistic knowledge for ED. In this paper, we propose a novel architecture named Edge-Enhanced Graph Convolution Networks (EE-GCN), which simultaneously exploits syntactic structure and typed dependency label information to perform ED. Specifically, an edge-aware node update module is designed to generate expressive word representations by aggregating syntactically-connected words through specific dependency types. Furthermore, to fully explore clues hidden from dependency edges, a node-aware edge update module is introduced, which refines the relation representations with contextual information. These two modules are complementary to each other and work in a mutual promotion way. We conduct experiments on the widely used ACE2005 dataset and the results show significant improvement over competitive baseline methods.", "phrases": ["convolutional network", "event detection", "syntactic structure"], "overall_score": 2.4666777091771626, "scores": [2.142063362672487, 0.543432002603354, 0.5282981879123462], "rank_score": 1.0712645177293958} -{"id": "misra-etal-2018-mapping", "title": "Mapping Instructions to Actions in 3D Environments with Visual Goal Prediction", "abstract": "We propose to decompose instruction execution to goal prediction and action generation. We design a model that maps raw visual observations to goals using LINGUNET, a language-conditioned image generation network, and then generates the actions required to complete them. Our model is trained from demonstration only without external resources. To evaluate our approach, we introduce two benchmarks for instruction following: LANI, a navigation task; and CHAI, where an agent executes household instructions. Our evaluation demonstrates the advantages of our model decomposition, and illustrates the challenges posed by our new benchmarks.", "phrases": ["environment", "goal prediction", "natural language instruction", "vision-and-language navigation"], "overall_score": 2.5679014386406944, "scores": [2.018871803302619, 0.8628735289428603, 0.8312565746235011, 0.5705904047233852], "rank_score": 1.0708980778980912} -{"id": "loukina-etal-2016-textual", "title": "Textual complexity as a predictor of difficulty of listening items in language proficiency tests", "abstract": "In this paper we explore to what extent the difficulty of listening items in an English language proficiency test can be predicted by the textual properties of the prompt. We show that a system based on multiple text complexity features can predict item difficulty for several different item types and for some items achieves higher accuracy than human estimates of item difficulty.", "phrases": ["language proficiency test", "text complexity feature", "comprehension question"], "overall_score": 2.083045494307615, "scores": [1.7922064025206599, 0.8466632725689667, 0.5725512047267574], "rank_score": 1.0704736266054613} -{"id": "nan-etal-2021-dart", "title": "DART: Open-Domain Structured Data Record to Text Generation", "abstract": "We present DART, an open domain structured DAta Record to Text generation dataset with over 82k instances (DARTs). Data-to-text annotations can be a costly process, especially when dealing with tables which are the major source of structured data and contain nontrivial structures. To this end, we propose a procedure of extracting semantic triples from tables that encodes their structures by exploiting the semantic dependencies among table headers and the table title. Our dataset construction framework effectively merged heterogeneous sources from open domain semantic parsing and spoken dialogue systems by utilizing techniques including tree ontology annotation, question-answer pair to declarative sentence conversion, and predicate unification, all with minimum post-editing. We present systematic evaluation on DART as well as new state-of-the-art results on WebNLG 2017 to show that DART (1) poses new challenges to existing data-to-text datasets and (2) facilitates out-of-domain generalization. Our data and code can be found at .", "phrases": ["text generation", "data-to-text dataset", "dart"], "overall_score": 1.483985284518743, "scores": [1.6815019961401418, 0.9274023787377796, 0.6025029412131601], "rank_score": 1.0704691053636939} -{"id": "sun-etal-2019-patient", "title": "Patient Knowledge Distillation for BERT Model Compression", "abstract": "Pre-trained language models such as BERT have proven to be highly effective for natural language processing (NLP) tasks. However, the high demand for computing resources in training such models hinders their application in practice. In order to alleviate this resource hunger in large-scale model training, we propose a Patient Knowledge Distillation approach to compress an original large model (teacher) into an equally-effective lightweight shallow network (student). Different from previous knowledge distillation methods, which only use the output from the last layer of the teacher network for distillation, our student model patiently learns from multiple intermediate layers of the teacher model for incremental knowledge extraction, following two strategies: (i) PKD-Last: learning from the last k layers; and (ii) PKD-Skip: learning from every k layers. These two patient distillation schemes enable the exploitation of rich information in the teacher's hidden layers, and encourage the student model to patiently learn from and imitate the teacher through a multi-layer distillation process. Empirically, this translates into improved results on multiple NLP tasks with a significant gain in training efficiency, without sacrificing model accuracy.", "phrases": ["bert model compression", "distillation method", "patient knowledge distillation"], "overall_score": 4.2082239178329965, "scores": [1.88989353381909, 0.7992962001103864, 0.5217033556380463], "rank_score": 1.0702976965225075} -{"id": "baldwin-etal-2013-noisy", "title": "How Noisy Social Media Text, How Diffrnt Social Media Sources?", "abstract": "While various claims have been made about text in social media text being noisy, there has never been a systematic study to investigate just how linguistically noisy or otherwise it is over a range of social media sources. We explore this question empirically over popular social media text types, in the form of YouTube comments, Twitter posts, web user forum posts, blog posts and Wikipedia, which we compare to a reference corpus of edited English text. We first extract out various descriptive statistics from each data type (including the distribution of languages, average sentence length and proportion of out-ofvocabulary words), and then investigate the proportion of grammatical sentences in each, based on a linguistically-motivated parser. We also investigate the relative similarity between different data types.", "phrases": ["comment", "blog", "social medium", "disfluency", "twitter text"], "overall_score": 3.674483099446698, "scores": [2.1354236662533763, 0.9196663208709998, 0.8627410341005644, 0.8549701072959728, 0.57736892256058], "rank_score": 1.0700340102162986} -{"id": "mubarak-etal-2021-arabic", "title": "Arabic Offensive Language on Twitter: Analysis and Experiments", "abstract": "Detecting offensive language on Twitter has many applications ranging from detecting/predicting bullying to measuring polarization. In this paper, we focus on building a large Arabic offensive tweet dataset. We introduce a method for building a dataset that is not biased by topic, dialect, or target. We produce the largest Arabic dataset to date with special tags for vulgarity and hate speech. We thoroughly analyze the dataset to determine which topics, dialects, and gender are most associated with offensive tweets and how Arabic speakers useoffensive language. Lastly, we conduct many experiments to produce strong results (F1 =83.2) on the dataset using SOTA techniques.", "phrases": ["twitter", "offensive tweet", "arabic offensive language"], "overall_score": 2.565436812225421, "scores": [1.693008169732413, 0.9551452198980069, 0.5614573569200678], "rank_score": 1.0698702488501626} -{"id": "jia-etal-2009-noisy", "title": "A Noisy Channel Model for Grapheme-based Machine Transliteration", "abstract": "Machine transliteration is an important Natural Language Processing task. This paper proposes a Noisy Channel Model for Grapheme-based machine transliteration. Moses, a phrase-based Statistical Machine Translation tool, is employed for the implementation of the system. Experiments are carried out on the NEWS 2009 Machine Transliteration Shared Task English-Chinese track. English-Chinese back transliteration is studied as well.", "phrases": ["noisy channel model", "grapheme-based machine transliteration", "machine translation"], "overall_score": 1.9167933105346473, "scores": [1.7070259860754333, 0.9477746197841933, 0.5545475406755676], "rank_score": 1.0697827155117314} -{"id": "narisawa-etal-2013-204", "title": "Is a 204 cm Man Tall or Small ? Acquisition of Numerical Common Sense from the Web", "abstract": "This paper presents novel methods for modeling numerical common sense: the ability to infer whether a given number (e.g., three billion) is large, small, or normal for a given context (e.g., number of people facing a water shortage). We first discuss the necessity of numerical common sense in solving textual entailment problems. We explore two approaches for acquiring numerical common sense. Both approaches start with extracting numerical expressions and their context from the Web. One approach estimates the distribution of numbers co-occurring within a context and examines whether a given value is large, small, or normal, based on the distribution. Another approach utilizes textual patterns with which speakers explicitly expresses their judgment about the value of a numerical expression. Experimental results demonstrate the effectiveness of both approaches.", "phrases": ["numerical common sense", "value", "object"], "overall_score": 2.2237450826143146, "scores": [1.8465408128421397, 0.834702127294763, 0.5269430627519879], "rank_score": 1.0693953342962967} -{"id": "nguyen-etal-2021-trankit", "title": "Trankit: A Light-Weight Transformer-based Toolkit for Multilingual Natural Language Processing", "abstract": "We introduce Trankit, a light-weight Transformer-based Toolkit for multilingual Natural Language Processing (NLP). It provides a trainable pipeline for fundamental NLP tasks over 100 languages, and 90 pretrained pipelines for 56 languages. Built on a state-of-the-art pretrained language model, Trankit significantly outperforms prior multilingual NLP pipelines over sentence segmentation, part-of-speech tagging, morphological feature tagging, and dependency parsing while maintaining competitive performance for tokenization, multi-word token expansion, and lemmatization over 90 Universal Dependencies treebanks. Despite the use of a large pretrained transformer, our toolkit is still efficient in memory usage and speed. This is achieved by our novel plug-and-play mechanism with Adapters where a multilingual pretrained transformer is shared across pipelines for different languages. Our toolkit along with pretrained models and code are publicly available at: . A demo website for our toolkit is also available at: . Finally, we create a demo video for Trankit at: .", "phrases": ["light-weight transformer-based toolkit", "fundamental nlp task", "language model", "trankit"], "overall_score": 1.9160818802845718, "scores": [2.2618992289721302, 0.8239880265498166, 0.5967001134914233, 0.5949552659030874], "rank_score": 1.0693856587291144} -{"id": "choi-palmer-2011-statistical", "title": "Statistical Dependency Parsing in Korean: From Corpus Generation To Automatic Parsing", "abstract": "This paper gives two contributions to dependency parsing in Korean. First, we build a Korean dependency Treebank from an existing constituent Treebank. For a morphologically rich language like Korean, dependency parsing shows some advantages over constituent parsing. Since there is not much training data available, we automatically generate dependency trees by applying head-percolation rules and heuristics to the constituent trees. Second, we show how to extract useful features for dependency parsing from rich morphology in Korean. Once we build the dependency Treebank, any statistical parsing approach can be applied. The challenging part is how to extract features from tokens consisting of multiple morphemes. We suggest a way of selecting important morphemes and use only these as features to avoid sparsity. Our parsing approach is evaluated on three different genres using both gold-standard and automatic morphological analysis. We also test the impact of fine vs. coarse-grained morphologies on dependency parsing. With automatic morphological analysis, we achieve labeled attachment scores of 80%+. To the best of our knowledge, this is the first time that Korean dependency parsing has been evaluated on labeled edges with such a large variety of data.", "phrases": ["dependency parsing", "korean", "important morpheme"], "overall_score": 2.223380256232927, "scores": [1.6819150590891387, 0.9676354043695504, 0.5581092062189873], "rank_score": 1.069219889892559} -{"id": "hwang-etal-2015-aligning", "title": "Aligning Sentences from Standard Wikipedia to Simple Wikipedia", "abstract": "This work improves monolingual sentence alignment for text simplification, specifically for text in standard and simple Wikipedia. We introduce a method that improves over past efforts by using a greedy (vs. ordered) search over the document and a word-level semantic similarity score based on Wiktionary (vs. WordNet) that also accounts for structural similarity through syntactic dependencies. Experiments show improved performance on a hand-aligned set, with the largest gain coming from structural similarity. Resulting datasets of manually and automatically aligned sentence pairs are made available.", "phrases": ["simple wikipedia", "text simplification", "sentence pair", "match"], "overall_score": 2.8947500435558418, "scores": [1.962043507214208, 0.9436922045686325, 0.8296977662054809, 0.540336657111305], "rank_score": 1.0689425337749066} -{"id": "lukasik-etal-2016-hawkes", "title": "Hawkes Processes for Continuous Time Sequence Classification: an Application to Rumour Stance Classification in Twitter", "abstract": "Classification of temporal textual data sequences is a common task in various domains such as social media and the Web. In this paper we propose to use Hawkes Processes for classifying sequences of temporal textual data, which exploit both temporal and textual information. Our experiments on rumour stance classification on four Twitter datasets show the importance of using the temporal information of tweets along with the textual content.", "phrases": ["rumour stance classification", "hawkes processes", "temporal sequence"], "overall_score": 2.655899194848073, "scores": [1.7825884792767204, 0.8835039461211971, 0.5403449613843914], "rank_score": 1.0688124622607698} -{"id": "han-zhao-2010-structural", "title": "Structural Semantic Relatedness: A Knowledge-Based Method to Named Entity Disambiguation", "abstract": "Name ambiguity problem has raised urgent demands for efficient, high-quality named entity disambiguation methods. In recent years, the increasing availability of large-scale, rich semantic knowledge sources (such as Wikipedia and WordNet) creates new opportunities to enhance the named entity disambiguation by developing algorithms which can exploit these knowledge sources at best. The problem is that these knowledge sources are heterogeneous and most of the semantic knowledge within them is embedded in complex structures, such as graphs and networks. This paper proposes a knowledge-based method, called Structural Semantic Relatedness (SSR), which can enhance the named entity disambiguation by capturing and leveraging the structural semantic knowledge in multiple knowledge sources. Empirical results show that, in comparison with the classical BOW based methods and social network based methods, our method can significantly improve the disambiguation performance by respectively 8.7% and 14.7%.", "phrases": ["knowledge-based method", "entity disambiguation", "wikipedia"], "overall_score": 1.9150163947661967, "scores": [1.6618685405485358, 0.9662878811308234, 0.578216578137259], "rank_score": 1.0687909999388727} -{"id": "cai-lam-2020-amr", "title": "AMR Parsing via Graph-Sequence Iterative Inference", "abstract": "We propose a new end-to-end model that treats AMR parsing as a series of dual decisions on the input sequence and the incrementally constructed graph. At each time step, our model performs multiple rounds of attention, reasoning, and composition that aim to answer two critical questions: (1) which part of the input sequence to abstract; and (2) where in the output graph to construct the new concept. We show that the answers to these two questions are mutually causalities. We design a model based on iterative inference that helps achieve better answers in both perspectives, leading to greatly improved parsing accuracy. Our experimental results significantly outperform all previously reported Smatch scores by large margins. Remarkably, without the help of any large-scale pre-trained language model (e.g., BERT), our model already surpasses previous state-of-the-art using BERT. With the help of BERT, we can push the state-of-the-art results to 80.2% on LDC2017T10 (AMR 2.0) and 75.4% on LDC2014T12 (AMR 1.0).", "phrases": ["graph-sequence iterative inference", "decision", "new concept", "amr"], "overall_score": 2.962394227051202, "scores": [1.9416095948235312, 0.9328724158141907, 0.8553071451841286, 0.5440423047030128], "rank_score": 1.0684578651312158} -{"id": "de-marneffe-etal-2012-happen", "title": "Did It Happen? The Pragmatic Complexity of Veridicality Assessment", "abstract": "Natural language understanding depends heavily on assessing veridicality\u2014whether events mentioned in a text are viewed as happening or not\u2014but little consideration is given to this property in current relation and event extraction systems. Furthermore, the work that has been done has generally assumed that veridicality can be captured by lexical semantic properties whereas we show that context and world knowledge play a significant role in shaping veridicality. We extend the FactBank corpus, which contains semantically driven veridicality annotations, with pragmatically informed ones. Our annotations are more complex than the lexical assumption predicts but systematic enough to be included in computational work on textual understanding. They also indicate that veridicality judgments are not always categorical, and should therefore be modeled as distributions. We build a classifier to automatically assign event veridicality distributions based on our new annotations. The classifier relies not only on lexical features like hedges or negations, but also on structural features and approximations of world knowledge, thereby providing a nuanced picture of the diverse factors that shape veridicality.\u201cAll I know is what I read in the papers\u201d\u2014Will Rogers", "phrases": ["veridicality assessment", "factbank", "uncertainty"], "overall_score": 3.200057274418311, "scores": [0.9452331403866494, 1.4182247746966261, 0.8411581676034988], "rank_score": 1.0682053608955913} -{"id": "miao-blunsom-2016-language", "title": "Language as a Latent Variable: Discrete Generative Models for Sentence Compression", "abstract": "In this work we explore deep generative models of text in which the latent representation of a document is itself drawn from a discrete language model distribution. We formulate a variational auto-encoder for inference in this model and apply it to the task of compressing sentences. In this application the generative model first draws a latent summary sentence from a background language model, and then subsequently draws the observed sentence conditioned on this latent summary. In our empirical evaluation we show that generative formulations of both abstractive and extractive compression yield state-of-the-art results when trained on a large amount of supervised data. Further, we explore semi-supervised compression scenarios where we show that it is possible to achieve performance competitive with previously proposed supervised models while training on a fraction of the supervised data.", "phrases": ["latent variable", "sentence compression", "deep generative model", "auto-encoder", "prior"], "overall_score": 3.2000297246822784, "scores": [1.8448852413017365, 1.7703649256236416, 0.6150172328655289, 0.5598551279919014, 0.550858295056075], "rank_score": 1.0681961645677767} -{"id": "guerini-etal-2015-echoes", "title": "Echoes of Persuasion: The Effect of Euphony in Persuasive Communication", "abstract": "While the effect of various lexical, syntactic, semantic and stylistic features have been addressed in persuasive language from a computational point of view, the persuasive effect of phonetics has received little attention. By modeling a notion of euphony and analyzing four datasets comprising persuasive and non-persuasive sentences in different domains (political speeches, movie quotes, slogans and tweets), we explore the impact of sounds on different forms of persuasiveness. We conduct a series of analyses and prediction experiments within and across datasets. Our results highlight the positive role of phonetic devices on persuasion.", "phrases": ["persuasion", "euphony", "alliteration"], "overall_score": 1.9137068396820225, "scores": [1.774017095673058, 0.9047990150073772, 0.5253642593105875], "rank_score": 1.0680601233303408} -{"id": "kurita-etal-2020-weight", "title": "Weight Poisoning Attacks on Pretrained Models", "abstract": "Recently, NLP has seen a surge in the usage of large pre-trained models. Users download weights of models pre-trained on large datasets, then fine-tune the weights on a task of their choice. This raises the question of whether downloading untrusted pre-trained weights can pose a security threat. In this paper, we show that it is possible to construct \u201cweight poisoning\u201d attacks where pre-trained weights are injected with vulnerabilities that expose \u201cbackdoors\u201d after fine-tuning, enabling the attacker to manipulate the model prediction simply by injecting an arbitrary keyword. We show that by applying a regularization method which we call RIPPLe and an initialization procedure we call Embedding Surgery, such attacks are possible even with limited knowledge of the dataset and fine-tuning procedure. Our experiments on sentiment classification, toxicity detection, and spam detection show that this attack is widely applicable and poses a serious threat. Finally, we outline practical defenses against such attacks.", "phrases": ["attacker", "pre-trained model", "backdoor", "weight"], "overall_score": 3.0870549789766786, "scores": [1.4622807464057261, 1.3588650763117989, 0.8742151868015767, 0.5768298884762143], "rank_score": 1.068047724498829} -{"id": "bisazza-tump-2018-lazy", "title": "The Lazy Encoder: A Fine-Grained Analysis of the Role of Morphology in Neural Machine Translation", "abstract": "Neural sequence-to-sequence models have proven very effective for machine translation, but at the expense of model interpretability. To shed more light into the role played by linguistic structure in the process of neural machine translation, we perform a fine-grained analysis of how various source-side morphological features are captured at different levels of the NMT encoder while varying the target language. Differently from previous work, we find no correlation between the accuracy of source morphology encoding and translation quality. We do find that morphological features are only captured in context and only to the extent that they are directly transferable to the target words.", "phrases": ["fine-grained analysis", "neural machine translation", "nmt encoder"], "overall_score": 1.1733518484834435, "scores": [1.7531424867626049, 0.9087491222202388, 0.5422010283913761], "rank_score": 1.0680308791247397} -{"id": "francois-fairon-2012-ai", "title": "An \u201cAI readability\u201d Formula for French as a Foreign Language", "abstract": "This paper present a new readability formula for French as a foreign language (FFL), which relies on 46 textual features representative of the lexical, syntactic, and semantic levels as well as some of the specificities of the FFL context. We report comparisons between several techniques for feature selection and various learning algorithms. Our best model, based on support vector machines (SVM), significantly outperforms previous FFL formulas. We also found that semantic features behave poorly in our case, in contrast with some previous readability studies on English as a first language.", "phrases": ["french", "foreign language", "readability formula"], "overall_score": 2.818515064333819, "scores": [1.7254071563410063, 0.9017572341424748, 0.576837696971803], "rank_score": 1.068000695818428} -{"id": "li-etal-2021-kfcnet-knowledge", "title": "KFCNet: Knowledge Filtering and Contrastive Learning for Generative Commonsense Reasoning", "abstract": "Pre-trained language models have led to substantial gains over a broad range of natural language processing (NLP) tasks, but have been shown to have limitations for natural language generation tasks with high-quality requirements on the output, such as commonsense generation and ad keyword generation. In this work, we present a novel Knowledge Filtering and Contrastive learning Network (KFCNet) which references external knowledge and achieves better generation performance. Specifically, we propose a BERT-based filter model to remove low-quality candidates, and apply contrastive learning separately to each of the encoder and decoder, within a general encoder\u2013decoder architecture. The encoder contrastive module helps to capture global target semantics during encoding, and the decoder contrastive module enhances the utility of retrieved prototypes while learning general features. Extensive experiments on the CommonGen benchmark show that our model outperforms the previous state of the art by a large margin: +6.6 points (42.5 vs. 35.9) for BLEU-4, +3.7 points (33.3 vs. 29.6) for SPICE, and +1.3 points (18.3 vs. 17.0) for CIDEr. We further verify the effectiveness of the proposed contrastive module on ad keyword generation, and show that our model has potential commercial value.", "phrases": ["contrastive learning", "external knowledge", "kfcnet"], "overall_score": 1.1733184736753324, "scores": [1.7462122229477444, 0.896225410795414, 0.5615638664524887], "rank_score": 1.0680005000652157} -{"id": "kim-hovy-2006-automatic", "title": "Automatic Identification of Pro and Con Reasons in Online Reviews", "abstract": "In this paper, we present a system that automatically extracts the pros and cons from online reviews. Although many approaches have been developed for extracting opinions from text, our focus here is on extracting the reasons of the opinions, which may themselves be in the form of either fact or opinion. Leveraging online review sites with author-generated pros and cons, we propose a system for aligning the pros and cons to their sentences in review texts. A maximum entropy model is then trained on the resulting labeled set to subsequently extract pros and cons from online review sites that do not explicitly provide them. Our experimental results show that our resulting system identifies pros and cons with 66% precision and 76% recall.", "phrases": ["reason", "review", "subjectivity analysis resource"], "overall_score": 3.1443724748273265, "scores": [1.4045518512767416, 1.254319630193098, 0.5448347224034907], "rank_score": 1.0679020679577766} -{"id": "vu-haffari-2018-automatic", "title": "Automatic Post-Editing of Machine Translation: A Neural Programmer-Interpreter Approach", "abstract": "Automated Post-Editing (PE) is the task of automatically correct common and repetitive errors found in machine translation (MT) output. In this paper, we present a neural programmer-interpreter approach to this task, resembling the way that human perform post-editing using discrete edit operations, wich we refer to as programs. Our model outperforms previous neural models for inducing PE programs on the WMT17 APE task for German-English up to +1 BLEU score and -0.7 TER scores.", "phrases": ["machine translation", "neural programmer-interpreter approach", "automatic post-editing"], "overall_score": 1.4803630413900593, "scores": [1.4041596929665505, 0.9217878225207914, 0.8776211123057702], "rank_score": 1.0678562092643709} -{"id": "wang-etal-2021-cline", "title": "CLINE: Contrastive Learning with Semantic Negative Examples for Natural Language Understanding", "abstract": "Despite pre-trained language models have proven useful for learning high-quality semantic representations, these models are still vulnerable to simple perturbations. Recent works aimed to improve the robustness of pre-trained models mainly focus on adversarial training from perturbed examples with similar semantics, neglecting the utilization of different or even opposite semantics. Different from the image processing field, the text is discrete and few word substitutions can cause significant semantic changes. To study the impact of semantics caused by small perturbations, we conduct a series of pilot experiments and surprisingly find that adversarial training is useless or even harmful for the model to detect these semantic changes. To address this problem, we propose Contrastive Learning with semantIc Negative Examples (CLINE), which constructs semantic negative examples unsupervised to improve the robustness under semantically adversarial attacking. By comparing with similar and opposite semantic examples, the model can effectively perceive the semantic changes caused by small perturbations. Empirical results show that our approach yields substantial improvements on a range of sentiment analysis, reasoning, and reading comprehension tasks. And CLINE also ensures the compactness within the same semantics and separability across different semantics in sentence-level.", "phrases": ["contrastive learning", "semantic negative examples", "pre-trained language model", "adversarial training"], "overall_score": 2.3460544754370454, "scores": [1.9572766507723596, 0.9142449397265299, 0.8291873309348038, 0.5702327012588003], "rank_score": 1.0677354056731234} -{"id": "zhou-etal-2009-generating", "title": "Generating Chinese Couplets and Quatrain Using a Statistical Approach", "abstract": "We propose a novel statistical approach to automatically generate Chinese couplets and Chinese poetry. For Chinese couplets, the system takes as input the first sentence and generates as output an N-best list of second sentences using a phrase-based SMT model. A comprehensive evaluation using both human judgments and BLEU scores has been conducted and the results demonstrate that this approach is very successful. We then extended this approach to generate classic Chinese poetry using the quatrain as a case study. Given a few keywords describing a user's intention, a statistical model is used to generate the first sentence. Then a phrase-based SMT model is used to generate the other three quatrain sentences one by one. Evaluation using human judgment over individual lines as well as the quality of the generated poem as a whole demonstrates promising results.", "phrases": ["couplet", "quatrain", "classic chinese poetry"], "overall_score": 1.4800581581134589, "scores": [1.7317922137333321, 0.9262355147586617, 0.5448811189143165], "rank_score": 1.0676362824687702} -{"id": "sporleder-lapata-2005-discourse", "title": "Discourse Chunking and its Application to Sentence Compression", "abstract": "In this paper we consider the problem of analysing sentence-level discourse structure. We introduce discourse chunking (i.e., the identification of intra-sentential nucleus and satellite spans) as an alternative to full-scale discourse parsing. Our experiments show that the proposed modelling approach yields results comparable to state-of-the-art while exploiting knowledge-lean features and small amounts of discourse annotations. We also demonstrate how discourse chunking can be successfully applied to a sentence compression task.", "phrases": ["discourse chunking", "sentence length", "low-resourced language"], "overall_score": 2.738149831042231, "scores": [2.0830209227203658, 0.5820698021641132, 0.537486928054571], "rank_score": 1.0675258843130166} -{"id": "xiao-etal-2011-document", "title": "Document-level Consistency Verification in Machine Translation", "abstract": "Translation consistency is an important issue in document-level translation. However, the consistency in Machine Translation (MT) output is generally overlooked in most MT systems due to the lack of the use of document contexts. To address this issue, we present a simple and effective approach that incorporates document contexts into an existing Statistical Machine Translation (SMT) system for document-level translation. Experimental results show that our approach effectively reduces the errors caused by inconsistent translations (25% error reduction). More interestingly, it is observed that as a \u201c bonus \u201d our approach is able to improve the BLEU score of the SMT system.", "phrases": ["consistency", "machine translation", "smt system", "hard constraint"], "overall_score": 2.8163720361800753, "scores": [1.9794740040660939, 1.1750811658135416, 0.564991093664451, 0.5492083475437557], "rank_score": 1.0671886527719605} -{"id": "bao-etal-2018-deriving", "title": "Deriving Machine Attention from Human Rationales", "abstract": "Attention-based models are successful when trained on large amounts of data. In this paper, we demonstrate that even in the low-resource scenario, attention can be learned effectively. To this end, we start with discrete human-annotated rationales and map them into continuous attention. Our central hypothesis is that this mapping is general across domains, and thus can be transferred from resource-rich domains to low-resource ones. Our model jointly learns a domain-invariant representation and induces the desired mapping between rationales and attention. Our empirical results validate this hypothesis and show that our approach delivers significant gains over state-of-the-art baselines, yielding over 15% average error reduction on benchmark datasets.", "phrases": ["machine attention", "rationale", "mapping", "training example"], "overall_score": 2.8899409990336262, "scores": [0.9491370163478512, 2.2313564337505474, 0.5441933515806431, 0.5439800019974695], "rank_score": 1.067166700919128} -{"id": "wang-etal-2019-make", "title": "Does it Make Sense? And Why? A Pilot Study for Sense Making and Explanation", "abstract": "Introducing common sense to natural language understanding systems has received increasing research attention. It remains a fundamental question on how to evaluate whether a system has the sense-making capability. Existing benchmarks measure common sense knowledge indirectly or without reasoning. In this paper, we release a benchmark to directly test whether a system can differentiate natural language statements that make sense from those that do not make sense. In addition, a system is asked to identify the most crucial reason why a statement does not make sense. We evaluate models trained over large-scale language modeling tasks as well as human performance, showing that there are different challenges for system sense-making.", "phrases": ["explanation", "capability", "natural language statement", "human performance"], "overall_score": 3.2983693020588603, "scores": [2.1132042956673316, 1.0861364517288448, 0.5428420273811004, 0.5261109828516947], "rank_score": 1.0670734394072428} -{"id": "rehm-hegele-2018-language", "title": "Language Technology for Multilingual Europe: An Analysis of a Large-Scale Survey regarding Challenges, Demands, Gaps and Needs", "abstract": "We present the analysis of a large-scale survey titled \u201cLanguage Technology for Multilingual Europe\u201d, conducted between May and June 2017. A total of 634 participants in 52 countries responded to the survey. Its main purpose was to collect input, feedback and ideas from the European Language Technology research and innovation community in order to assess the most prominent research areas, projects and applications, but, more importantly to identify the biggest challenges, obstacles and gaps Europe is currently facing with regard to its multilingual setup and technological solutions. Participants were encouraged to share concrete suggestions and recommendations on how present challenges can be turned into opportunities in the context of a potential long-term, large-scale, Europe-wide research, development and innovation funding programme, currently titled Human Language Project.", "phrases": ["multilingual europe", "survey", "country"], "overall_score": 1.172181128896129, "scores": [1.702169549716935, 0.9358058692958475, 0.5629203136861741], "rank_score": 1.0669652442329856} -{"id": "frank-etal-2003-integrated", "title": "Integrated Shallow and Deep Parsing: TopP Meets HPSG", "abstract": "We present a novel, data-driven method for integrated shallow and deep parsing. Mediated by an XML-based multi-layer annotation architecture, we interleave a robust, but accurate stochastic topological field parser of German with a constraint-based HPSG parser. Our annotation-based method for dovetailing shallow and deep phrasal constraints is highly flexible, allowing targeted and fine-grained guidance of constraint-based parsing. We conduct systematic experiments that demonstrate substantial performance gains.", "phrases": ["deep parsing", "hpsg", "field parser", "german"], "overall_score": 2.2185265280901225, "scores": [2.0144218274259167, 0.8608017987243796, 0.8483222690516613, 0.5439970650063485], "rank_score": 1.0668857400520766} -{"id": "hamdan-etal-2015-lsislif", "title": "Lsislif: Feature Extraction and Label Weighting for Sentiment Analysis in Twitter", "abstract": "This paper describes our sentiment analysis systems which have been built for SemEval2015 Task 10 Subtask B and E. For subtask B, a Logistic Regression classifier has been trained after extracting several groups of features including lexical, syntactic, lexiconbased, Z score and semantic features. A weighting schema has been adapted for positive and negative labels in order to take into account the unbalanced distribution of tweets between the positive and negative classes. This system is ranked third over 40 participants, it achieves average F1 64.27 on Twitter data set 2015 just 0.57% less than the first system. We also present our participation in Subtask E in which our system has got the second rank with Kendall metric but the first one with Spearman for ranking twitter terms according to their association with the positive sentiment.", "phrases": ["feature extraction", "twitter", "sentiment lexicon"], "overall_score": 2.3441314047675372, "scores": [1.3632209834923799, 1.2498419814206276, 0.5875175705679196], "rank_score": 1.0668601784936425} -{"id": "ghosh-etal-2010-clause", "title": "Clause Identification and Classification in Bengali", "abstract": "This paper reports about the development of clause identification and classification techniques for Bengali language. A syntactic rule based model has been used to identify the clause boundary. For clause type identification a Conditional random Field (CRF) based statistical model has been used. The clause identification system and clause classification system demonstrated 73% and 78% precision values respectively.", "phrases": ["bengali", "clause identification", "dependency relation"], "overall_score": 1.9114155173545846, "scores": [1.6882827636619984, 0.9780523712901201, 0.5340088010195123], "rank_score": 1.0667813119905436} -{"id": "collins-thompson-callan-2004-language", "title": "A Language Modeling Approach to Predicting Reading Difficulty", "abstract": "We demonstrate a new research approach to the problem of predicting the reading difficulty of a text passage, by recasting readability in terms of statistical language modeling. We derive a measure based on an extension of multinomial na\u00efve Bayes classification that combines multiple language models to estimate the most likely grade level for a given passage. The resulting classifier is not specific to any particular subject and can be trained with relatively little labeled data. We perform predictions for individual Web pages in English and compare our performance to widely-used semantic variables from traditional readability measures. We show that with minimal changes, the classifier may be retrained for use with French Web documents. For both English and French, the classifier maintains consistently good correlation with labeled grade level (0.63 to 0.79) across all test sets. Some traditional semantic variables such as type-token ratio gave the best performance on commercial calibrated test passages, while our language modeling approach gave better accuracy for Web documents and very short passages (less than 10 words).", "phrases": ["language modeling approach", "reading difficulty", "grade level", "complexity"], "overall_score": 3.297354184267899, "scores": [1.9934192659152195, 0.8608723850456135, 0.8738501879306654, 0.5388382935689051], "rank_score": 1.0667450331151007} -{"id": "choi-etal-2010-multilingual", "title": "Multilingual Propbank Annotation Tools: Cornerstone and Jubilee", "abstract": "This paper demonstrates two annotation tools related to Propbank: Cornerstone and Jubilee. Propbank is a corpus in which the arguments of each verb predicate are annotated with their semantic roles. Propbank annotation also requires the choice of a sense ID for each predicate, defined in the corresponding frameset file. Jubilee expedites the annotation process by displaying several resources of syntactic and semantic information simultaneously; easy access to each of these resources allows the annotator to quickly absorb and apply the necessary syntactic and semantic information pertinent to each predicate for consistent and efficient annotation. Cornerstone is a user-friendly XML editor, customized to allow frame authors to create and edit frameset files. Both tools have been successfully adapted to many Prop-bank projects; they run platform independently, are light enough to run as X11 applications and support multiple languages such as Arabic, Chinese, English, Hindi and Korean.", "phrases": ["propbank", "cornerstone", "semantic role"], "overall_score": 1.4784647628744745, "scores": [1.6677310484270707, 0.9778487386647141, 0.5538808852003376], "rank_score": 1.0664868907640408} -{"id": "rogati-etal-2003-unsupervised", "title": "Unsupervised Learning of Arabic Stemming Using a Parallel Corpus", "abstract": "This paper presents an unsupervised learning approach to building a non-English (Arabic) stemmer. The stemming model is based on statistical machine translation and it uses an English stemmer and a small (10 K sentences) parallel corpus as its sole training resources. No parallel text is needed after the training phase. Monolingual, unannotated text can be used to further improve the stemmer by allowing it to adapt to a desired domain or genre. Examples and results will be given for Arabic, but the approach is applicable to any language that needs affix removal. Our resource-frugal approach results in 87.5% agreement with a state of the art, proprietary Arabic stemmer built using rules, affix lists, and human annotated text, in addition to an unsupervised component. Task-based evaluation using Arabic information retrieval indicates an improvement of 22-38% in average precision over unstemmed text, and 96% of the performance of the proprietary stemmer above.", "phrases": ["arabic", "stemmer", "statistical machine translation"], "overall_score": 2.3427833810054923, "scores": [1.2124948325773384, 1.0632892067843693, 0.9229559599592622], "rank_score": 1.0662466664403232} -{"id": "blanchard-etal-2018-getting", "title": "Getting the subtext without the text: Scalable multimodal sentiment classification from visual and acoustic modalities", "abstract": "In the last decade, video blogs (vlogs) have become an extremely popular method through which people express sentiment. The ubiquitousness of these videos has increased the importance of multimodal fusion models, which incorporate video and audio features with traditional text features for automatic sentiment detection. Multimodal fusion offers a unique opportunity to build models that learn from the full depth of expression available to human viewers. In the detection of sentiment in these videos, acoustic and video features provide clarity to otherwise ambiguous transcripts. In this paper, we present a multimodal fusion model that exclusively uses high-level video and audio features to analyze spoken sentences for sentiment. We discard traditional transcription features in order to minimize human intervention and to maximize the deployability of our model on at-scale real-world data. We select high-level features for our model that have been successful in non-affect domains in order to test their generalizability in the sentiment detection domain. We train and test our model on the newly released CMU Multimodal Opinion Sentiment and Emotion Intensity (CMU-MOSEI) dataset, obtaining an F1 score of 0.8049 on the validation set and an F1 score of 0.6325 on the held-out challenge test set.", "phrases": ["sentiment classification", "modality", "video feature"], "overall_score": 1.910440123878515, "scores": [1.811009260661935, 0.8348141958225439, 0.5528873470949627], "rank_score": 1.0662369345264804} -{"id": "severyn-etal-2013-learning-semantic", "title": "Learning Semantic Textual Similarity with Structural Representations", "abstract": "Measuring semantic textual similarity (STS) is at the cornerstone of many NLP applications. Different from the majority of approaches, where a large number of pairwise similarity features are used to represent a text pair, our model features the following: (i) it directly encodes input texts into relational syntactic structures; (ii) relies on tree kernels to handle feature engineering automatically; (iii) combines both structural and feature vector representations in a single scoring model, i.e., in Support Vector Regression (SVR); and (iv) delivers significant improvement over the best STS systems.", "phrases": ["semantic textual similarity", "edition distance", "syntactic dependency"], "overall_score": 1.7157760788298324, "scores": [2.0729329668435947, 0.5638270907294529, 0.5614547954029964], "rank_score": 1.0660716176586813} -{"id": "snow-etal-2006-effectively", "title": "Effectively Using Syntax for Recognizing False Entailment", "abstract": "Recognizing textual entailment is a challenging problem and a fundamental component of many applications in natural language processing. We present a novel framework for recognizing textual entailment that focuses on the use of syntactic heuristics to recognize false entailment. We give a thorough analysis of our system, which demonstrates state-of-the-art performance on a widely-used test set.", "phrases": ["false entailment", "heuristic", "syntactic clue"], "overall_score": 2.074275911157988, "scores": [2.0719448374998466, 0.5898581361345239, 0.5360978834504122], "rank_score": 1.0659669523615942} -{"id": "luo-etal-2015-joint", "title": "Joint Entity Recognition and Disambiguation", "abstract": "Extracting named entities in text and linking extracted names to a given knowledge base are fundamental tasks in applications for text understanding. Existing systems typically run a named entity recognition (NER) model to extract entity names \ufb01rst, then run an entity linking model to link extracted names to a knowledge base. NER and linking models are usually trained separately, and the mutual dependency be-tween the two tasks is ignored. We pro-pose JERL, Joint Entity Recognition and Linking, to jointly model NER and linking tasks and capture the mutual dependency between them. It allows the information from each task to improve the performance of the other. To the best of our knowledge, JERL is the \ufb01rst model to jointly optimize NER and linking tasks together completely. In experiments on the CoNLL\u201903/AIDA data set, JERL outperforms state-of-art NER and linking systems, and we \ufb01nd improvements of 0.4% absolute F 1 for NER on CoNLL\u201903, and 0.36% absolute precision@1 for linking on AIDA.", "phrases": ["joint entity recognition", "conditional random fields", "lexicon feature", "task-specific resource"], "overall_score": 3.6252871655250534, "scores": [0.9574365795479652, 1.585231641772811, 0.8777262218680507, 0.8431477846996184], "rank_score": 1.0658855569721113} -{"id": "denero-etal-2006-generative", "title": "Why Generative Phrase Models Underperform Surface Heuristics", "abstract": "We investigate why weights from generative models underperform heuristic estimates in phrase-based machine translation. We first propose a simple generative, phrase-based model and verify that its estimates are inferior to those given by surface statistics. The performance gap stems primarily from the addition of a hidden segmentation variable, which increases the capacity for overfitting during maximum likelihood training with EM. In particular, while word level models benefit greatly from re-estimation, phrase-level models do not: the crucial difference is that distinct word alignments cannot all be correct, while distinct segmentations can. Alternate segmentations rather than alternate alignments compete, resulting in increased deter-minization of the phrase table, decreased generalization, and decreased final BLEU score. We also show that interpolation of the two methods can result in a modest increase in BLEU score.", "phrases": ["generative model", "phrase-based model", "probability"], "overall_score": 3.341342126512614, "scores": [1.3992866102523998, 1.2243438489829652, 0.5733221387356685], "rank_score": 1.0656508659903443} -{"id": "berg-kirkpatrick-etal-2011-jointly", "title": "Jointly Learning to Extract and Compress", "abstract": "We learn a joint model of sentence extraction and compression for multi-document summarization. Our model scores candidate summaries according to a combined linear model whose features factor over (1) the n-gram types in the summary and (2) the compressions used. We train the model using a margin-based objective whose loss captures end summary quality. Because of the exponentially large set of candidate summaries, we use a cutting-plane algorithm to incrementally detect and add active constraints efficiently. Inference in our model can be cast as an ILP and thereby solved in reasonable time; we also present a fast approximation scheme which achieves similar performance. Our jointly extracted and compressed summaries outperform both unlearned baselines and our learned extraction-only system on both ROUGE and Pyramid, without a drop in judged linguistic quality. We achieve the highest published ROUGE results to date on the TAC 2008 data set.", "phrases": ["compress", "joint model", "summarization"], "overall_score": 3.8186840924620036, "scores": [0.9184977191017324, 1.6898701223890664, 0.5885044156770757], "rank_score": 1.065624085722625} -{"id": "zubiaga-etal-2016-stance", "title": "Stance Classification in Rumours as a Sequential Task Exploiting the Tree Structure of Social Media Conversations", "abstract": "Rumour stance classification, the task that determines if each tweet in a collection discussing a rumour is supporting, denying, questioning or simply commenting on the rumour, has been attracting substantial interest. Here we introduce a novel approach that makes use of the sequence of transitions observed in tree-structured conversation threads in Twitter. The conversation threads are formed by harvesting users' replies to one another, which results in a nested tree-like structure. Previous work addressing the stance classification task has treated each tweet as a separate unit. Here we analyse tweets by virtue of their position in a sequence and test two sequential classifiers, Linear-Chain CRF and Tree CRF, each of which makes different assumptions about the conversational structure. We experiment with eight Twitter datasets, collected during breaking news, and show that exploiting the sequential structure of Twitter conversations achieves significant improvements over the non-sequential methods. Our work is the first to model Twitter conversations as a tree structure in this manner, introducing a novel way of tackling NLP tasks on Twitter conversations.", "phrases": ["twitter conversation", "stance classification", "rumor"], "overall_score": 2.5551235580963017, "scores": [1.8134695839643007, 0.841511379643272, 0.5417268998444165], "rank_score": 1.0655692878173297} -{"id": "li-etal-2019-rumor-detection", "title": "Rumor Detection by Exploiting User Credibility Information, Attention and Multi-task Learning", "abstract": "In this study, we propose a new multi-task learning approach for rumor detection and stance classification tasks. This neural network model has a shared layer and two task specific layers. We incorporate the user credibility information into the rumor detection layer, and we also apply attention mechanism in the rumor detection process. The attended information include not only the hidden states in the rumor detection layer, but also the hidden states from the stance detection layer. The experiments on two datasets show that our proposed model outperforms the state-of-the-art rumor detection approaches.", "phrases": ["user credibility information", "rumor detection", "auxiliary task"], "overall_score": 1.908734636999967, "scores": [1.8508263736791803, 0.8039627677796002, 0.5410661110695759], "rank_score": 1.065285084176119} -{"id": "shen-etal-2008-new", "title": "A New String-to-Dependency Machine Translation Algorithm with a Target Dependency Language Model", "abstract": "In this paper, we propose a novel string-todependency algorithm for statistical machine translation. With this new framework, we employ a target dependency language model during decoding to exploit long distance word relations, which are unavailable with a traditional n-gram language model. Our experiments show that the string-to-dependency decoder achieves 1.48 point improvement in BLEU and 2.53 point improvement in TER compared to a standard hierarchical string-tostring system on the NIST 04 Chinese-English evaluation set.", "phrases": ["dependency language model", "translation quality", "string", "syntax-based system", "target-side"], "overall_score": 4.325410025526429, "scores": [2.2041331881127877, 1.090151257905155, 0.9320397433054579, 0.5643201001361162, 0.5356341662309042], "rank_score": 1.0652556911380842} -{"id": "zhang-etal-2022-treemix", "title": "TreeMix: Compositional Constituency-based Data Augmentation for Natural Language Understanding", "abstract": "Data augmentation is an effective approach to tackle over-fitting. Many previous works have proposed different data augmentations strategies for NLP, such as noise injection, word replacement, back-translation etc. Though effective, they missed one important characteristic of language\u2013compositionality, meaning of a complex expression is built from its sub-parts. Motivated by this, we propose a compositional data augmentation approach for natural language understanding called TreeMix. Specifically, TreeMix leverages constituency parsing tree to decompose sentences into constituent sub-structures and the Mixup data augmentation technique to recombine them to generate new sentences. Compared with previous approaches, TreeMix introduces greater diversity to the samples generated and encourages models to learn compositionality of NLP data. Extensive experiments on text classification and SCAN demonstrate that TreeMix outperforms current state-of-the-art data augmentation methods.", "phrases": ["data augmentation", "natural language understanding", "treemix"], "overall_score": 1.1697052708454652, "scores": [1.7258639615300047, 0.9198976088386532, 0.5483732929784764], "rank_score": 1.0647116211157115} -{"id": "gardiner-dras-2007-exploring", "title": "Exploring Approaches to Discriminating among Near-Synonyms", "abstract": "Near-synonyms are words that mean approximately the same thing, and which tend to be assigned to the same leaf in ontologies such as WordNet. However, they can differ from each other subtly in both meaning and usage\u2014consider the pair of nearsynonyms frugal and stingy\u2014and therefore choosing the appropriate near-synonym for a given context is not a trivial problem. Initial work by Edmonds (1997) suggested that corpus statistics methods would not be particularly effective, and led to subsequent work adopting methods based on specific lexical resources. In earlier work (Gardiner and Dras, 2007) we discussed the hypothesis that some kind of corpus statistics approach may still be effective in some situations, particularly if the near-synonyms differ in sentiment from each other, and we presented some preliminary confirmation of the truth of this hypothesis. This suggests that problems involving this type of nearsynonym may be particularly amenable to corpus statistics methods. In this paper we investigate whether this result extends to a different corpus statistics method and in addition we analyse the results with respect to a possible confounding factor discussed in the previous work: the skewness of the sets of near synonyms. Our results show that the relationship between success in prediction and the nature of the near-synonyms is method dependent and that skewness is a more significant factor.", "phrases": ["near-synonyms", "different corpus", "web"], "overall_score": 1.7135754784267458, "scores": [2.099429763242155, 0.5667028481048157, 0.5279803119067427], "rank_score": 1.0647043077512377} -{"id": "erk-pado-2008-structured", "title": "A Structured Vector Space Model for Word Meaning in Context", "abstract": "We address the task of computing vector space representations for the meaning of word occurrences, which can vary widely according to context. This task is a crucial step towards a robust, vector-based compositional account of sentence meaning. We argue that existing models for this task do not take syntactic structure sufficiently into account. \n \nWe present a novel structured vector space model that addresses these issues by incorporating the selectional preferences for words' argument positions. This makes it possible to integrate syntax into the computation of word meaning in context. In addition, the model performs at and above the state of the art for modeling the contextual adequacy of paraphrases.", "phrases": ["vector space", "syntactic context", "denotation"], "overall_score": 4.076325256525965, "scores": [1.4142103405423037, 1.2251076768477427, 0.5547590724079161], "rank_score": 1.0646923632659873} -{"id": "zhang-vogel-2007-pandora", "title": "PanDoRA: a large-scale two-way statistical machine translation system for hand-held devices", "abstract": "The statistical machine translation (SMT) approach has taken a lead place in the field of Machine Translation for its better translation quality and lower cost in training compared to other approaches. However, due to the high demand of computing resources, an SMT system can not be directly run on hand-held devices. Most existing hand-held translation systems are either interlingua-based, which require non-trivial human efforts to write grammar rules, or using the client/server architecture, which are constrained by the availability of wireless connections. In this paper we present PanDoRA, a two-way phrase-based statistical machine translation system for stand-alone hand-held devices. Powered by special designs such as integerized computation and compact data structure, PanDoRA can translate dialogue speech on off-the-shelf PDAs in real time. PanDoRA uses 64K words vocabulary and millions of phrase pairs for each translation directions. To our knowledge, PanDoRA is the first large-scale SMT system with build-in reordering models running on hand-held devices. We have successfully developed several speech-to-speech translation systems using PanDoRA and our experiments show that PanDoRA's translation quality is comparable to that of the state-of-the-art phrase-based statistical machine translation systems such as Pharaoh and STTK.", "phrases": ["two-way", "device", "smt system"], "overall_score": 1.475945102165639, "scores": [1.7837958436796382, 0.8590815747517625, 0.5511306008466823], "rank_score": 1.064669339759361} -{"id": "karamanolakis-etal-2020-cross", "title": "Cross-Lingual Text Classification with Minimal Resources by Transferring a Sparse Teacher", "abstract": "Cross-lingual text classification alleviates the need for manually labeled documents in a target language by leveraging labeled documents from other languages. Existing approaches for transferring supervision across languages require expensive cross-lingual resources, such as parallel corpora, while less expensive cross-lingual representation learning approaches train classifiers without target labeled documents. In this work, we propose a cross-lingual teacher-student method, CLTS, that generates \u201cweak\u201d supervision in the target language using minimal cross-lingual resources, in the form of a small number of word translations. Given a limited translation budget, CLTS extracts and transfers only the most important task-specific seed words across languages and initializes a teacher classifier based on the translated seed words. Then, CLTS iteratively trains a more powerful student that also exploits the context of the seed words in unlabeled target documents and outperforms the teacher. CLTS is simple and surprisingly effective in 18 diverse languages: by transferring just 20 seed words, even a bag-of-words logistic regression student outperforms state-of-the-art cross-lingual methods (e.g., based on multilingual BERT). Moreover, CLTS can accommodate any type of student classifier: leveraging a monolingual BERT student leads to further improvements and outperforms even more expensive approaches by up to 12% in accuracy. Finally, CLTS addresses emerging tasks in low-resource languages using just a small number of word translations.", "phrases": ["teacher", "seed word", "cross-lingual text classification"], "overall_score": 1.713343990762315, "scores": [2.0794510844629914, 0.55934311378116, 0.5548872308910695], "rank_score": 1.0645604763784071} -{"id": "li-ji-2014-incremental", "title": "Incremental Joint Extraction of Entity Mentions and Relations", "abstract": "We present an incremental joint framework to simultaneously extract entity mentions and relations using structured perceptron with efficient beam-search. A segment-based decoder based on the idea of semi-Markov chain is adopted to the new framework as opposed to traditional token-based tagging. In addition, by virtue of the inexact search, we developed a number of new and effective global features as soft constraints to capture the interdependency among entity mentions and relations. Experiments on Automatic Content Extraction (ACE) 1 corpora demonstrate that our joint model significantly outperforms a strong pipelined baseline, which attains better performance than the best-reported end-to-end system.", "phrases": ["joint extraction", "entity mention", "feature engineering", "subtask", "end-to-end relation extraction"], "overall_score": 3.952760976383545, "scores": [0.8899682029970424, 1.605483175426724, 1.349302569485515, 0.8469038548889753, 0.6303891446998032], "rank_score": 1.064409389499612} -{"id": "huang-papineni-2007-hierarchical", "title": "Hierarchical System Combination for Machine Translation", "abstract": "Given multiple translations of the same source sentence, how to combine them to produce a translation that is better than any single system output? We propose a hierarchical system combination framework for machine translation. This framework integrates multiple MT systems\u2019 output at the word-, phrase- and sentence- levels. By boosting common word and phrase translation pairs, pruning unused phrases, and exploring decoding paths adopted by other MT systems, this framework achieves better translation quality with much less redecoding time. The full sentence translation hypotheses from multiple systems are additionally selected based on N-gram language models trained on word/word-POS mixed stream, which further improves the translation quality. We consistently observed significant improvements on several test sets in multiple languages covering different genres.", "phrases": ["machine translation", "system output", "path"], "overall_score": 2.551629179855377, "scores": [2.082910126507667, 0.5607723576774233, 0.5486535725238383], "rank_score": 1.0641120189029762} -{"id": "barker-2010-cosubstitution", "title": "Cosubstitution, Derivational Locality, and Quantifier Scope", "abstract": "Quantifier scope challenges the mantra of Tree Adjoining Grammar (TAG) that all syntactic dependencies are local once syntactic recursion has been factored out. The reason is that on current TAG analyses, a quantifier and the furthest reaches of its scope domain are in general not part of any (unicomponent) elementary tree. In this paper, I consider a novel basic TAG operation called COSUBSTITUTION. In normal substitution, the root of one tree (the argument) replaces a matching non-terminal on the frontier of another tree (the functor). In cosubstitution, the syntactic result is the same, leaving weak and strong generative capacity unchanged, but the derivational and semantic roles are reversed: the embedded subtree is viewed as the functor, and the embedding matrix is viewed as its semantic argument, i.e., as its nuclear scope. On this view, a quantifier taking scope amounts to entering a derivation at the exact moment that its nuclear scope has been constructed. Thus the relationship of a quantifier and its scope is constrained by DERIVATIONAL LOCALITY rather than by elementary-tree locality.", "phrases": ["derivational locality", "quantifier scope", "substitution", "cosubstitution"], "overall_score": 1.1687771925226673, "scores": [1.9050444872667638, 0.9198282619611635, 0.8590334223622992, 0.5715612196934492], "rank_score": 1.0638668478209188} -{"id": "ribeiro-etal-2018-local", "title": "Local String Transduction as Sequence Labeling", "abstract": "We show that the general problem of string transduction can be reduced to the problem of sequence labeling. While character deletion and insertions are allowed in string transduction, they do not exist in sequence labeling. We show how to overcome this difference. Our approach can be used with any sequence labeling algorithm and it works best for problems in which string transduction imposes a strong notion of locality (no long range dependencies). We experiment with spelling correction for social media, OCR correction, and morphological inflection, and we see that it behaves better than seq2seq models and yields state-of-the-art results in several cases.", "phrases": ["string transduction", "general problem", "character deletion"], "overall_score": 1.9057485936300482, "scores": [2.0623789072777057, 0.5679975710400386, 0.5604791466023299], "rank_score": 1.0636185416400246} -{"id": "mi-huang-2008-forest", "title": "Forest-based Translation Rule Extraction", "abstract": "Translation rule extraction is a fundamental problem in machine translation, especially for linguistically syntax-based systems that need parse trees from either or both sides of the bi-text. The current dominant practice only uses 1-best trees, which adversely affects the rule set quality due to parsing errors. So we propose a novel approach which extracts rules from a packed forest that compactly encodes exponentially many parses. Experiments show that this method improves translation quality by over 1 BLEU point on a state-of-the-art tree-to-string system, and is 0.5 points better than (and twice as fast as) extracting on 30-best parses. When combined with our previous work on forest-based decoding, it achieves a 2.5 BLEU points improvement over the base-line, and even outperforms the hierarchical system of Hiero by 0.7 points.", "phrases": ["translation rule extraction", "forest", "decoding", "tree-to-string model"], "overall_score": 3.1862565833058754, "scores": [0.9896034912088476, 1.879246507357744, 0.8381788495861459, 0.5473654599552513], "rank_score": 1.0635985770269971} -{"id": "paul-etal-2009-importance", "title": "On the Importance of Pivot Language Selection for Statistical Machine Translation", "abstract": "Recent research on multilingual statistical machine translation focuses on the usage of pivot languages in order to overcome resource limitations for certain language pairs. Due to the richness of available language resources, English is in general the pivot language of choice. In this paper, we investigate the appropriateness of languages other than English as pivot languages. Experimental results using state-of-the-art statistical machine translation techniques to translate between twelve languages revealed that the translation quality of 61 out of 110 language pairs improved when a non-English pivot language was chosen.", "phrases": ["pivot language", "statistical machine translation", "recent research"], "overall_score": 2.0695763752564122, "scores": [0.9196342890151915, 1.4092191039400006, 0.8618022130004107], "rank_score": 1.0635518686518675} -{"id": "minkov-etal-2007-generating", "title": "Generating Complex Morphology for Machine Translation", "abstract": "We present a novel method for predicting inflected word forms for generating morphologically rich languages in machine translation. We utilize a rich set of syntactic and morphological knowledge sources from both source and target sentences in a probabilistic model, and evaluate their contribution in generating Russian and Arabic sentences. Our results show that the proposed model substantially outperforms the commonly used baseline of a trigram target language model; in particular, the use of morphological and syntactic features leads to large gains in prediction accuracy. We also show that the proposed method is effective with a relatively small amount of data.", "phrases": ["machine translation", "inflection", "arabic sentence", "post-processing step", "morphological generation"], "overall_score": 3.3784784039089435, "scores": [1.9789669083975725, 1.3242122914213514, 0.82500588710615, 0.6103527515932845, 0.5767881192220338], "rank_score": 1.0630651915480784} -{"id": "bruni-etal-2011-distributional", "title": "Distributional semantics from text and images", "abstract": "We present a distributional semantic model combining text- and image-based features. We evaluate this multimodal semantic model on simulating similarity judgments, concept clustering and the BLESS benchmark. When integrated with the same core text-based model, image-based features are at least as good as further text-based features, and they capture different qualitative aspects of the tasks, suggesting that the two sources of information are complementary.", "phrases": ["image", "semantic model", "distributional approach"], "overall_score": 2.210539803430806, "scores": [1.9702973158924395, 0.611591699560017, 0.6072457966448312], "rank_score": 1.0630449373657627} -{"id": "finkel-manning-2009-nested", "title": "Nested Named Entity Recognition", "abstract": "Many named entities contain other named entities inside them. Despite this fact, the field of named entity recognition has almost entirely ignored nested named entity recognition, but due to technological, rather than ideological reasons. In this paper, we present a new technique for recognizing nested named entities, by using a discriminative constituency parser. To train the model, we transform each sentence into a tree, with constituents for each named entity (and no other syntactic structure). We present results on both newspaper and biomedical corpora which contain nested named entities. In three out of four sets of experiments, our model outperforms a standard semi-CRF on the more traditional top-level entities. At the same time, we improve the overall F-score by up to 30% over the flat model, which is unable to recover any nested entities.", "phrases": ["named entity recognition", "constituency tree", "node", "parsing method", "crf-based constituency parser"], "overall_score": 3.838495964992697, "scores": [1.0188173042047273, 1.400013184436948, 1.114857668465976, 0.9253495379514443, 0.8560872369582885], "rank_score": 1.0630249864034766} -{"id": "sahlgren-lenci-2016-effects", "title": "The Effects of Data Size and Frequency Range on Distributional Semantic Models", "abstract": "This paper investigates the effects of data size and frequency range on distributional semantic models. We compare the performance of a number of representative models for several test settings over data of varying sizes, and over test items of various frequency. Our results show that neural network-based models underperform when the data is small, and that the most reliable model over data of varying sizes and frequency ranges is the inverted factorized model.", "phrases": ["data size", "frequency range", "distributional semantic model"], "overall_score": 1.904395420282868, "scores": [1.7819273690146606, 0.7817935192206144, 0.6248690754109169], "rank_score": 1.0628633212153973} -{"id": "blunsom-osborne-2008-probabilistic", "title": "Probabilistic Inference for Machine Translation", "abstract": "We advance the state-of-the-art for discriminatively trained machine translation systems by presenting novel probabilistic inference and search methods for synchronous grammars. By approximating the intractable space of all candidate translations produced by intersecting an ngram language model with a synchronous grammar, we are able to train and decode models incorporating millions of sparse, heterogeneous features. Further, we demonstrate the power of the discriminative training paradigm by extracting structured syntactic features, and achieving increases in translation performance.", "phrases": ["synchronous grammar", "probabilistic inference", "derivation"], "overall_score": 2.0681096647699126, "scores": [2.053201287659171, 0.5761796349731997, 0.5590134630599862], "rank_score": 1.062798128564119} -{"id": "li-etal-2019-understanding", "title": "Understanding Data Augmentation in Neural Machine Translation: Two Perspectives towards Generalization", "abstract": "Many Data Augmentation (DA) methods have been proposed for neural machine translation. Existing works measure the superiority of DA methods in terms of their performance on a specific test set, but we find that some DA methods do not exhibit consistent improvements across translation tasks. Based on the observation, this paper makes an initial attempt to answer a fundamental question: what benefits, which are consistent across different methods and tasks, does DA in general obtain? Inspired by recent theoretic advances in deep learning, the paper understands DA from two perspectives towards the generalization ability of a model: input sensitivity and prediction margin, which are defined independent of specific test set thereby may lead to findings with relatively low variance. Extensive experiments show that relatively consistent benefits across five DA methods and four translation tasks are achieved regarding both perspectives.", "phrases": ["data augmentation", "neural machine translation", "prediction margin"], "overall_score": 1.9042352737840227, "scores": [1.7270442162213047, 0.9230971708921962, 0.5381804381442592], "rank_score": 1.0627739417525868} -{"id": "zhang-etal-2016-mgnc", "title": "MGNC-CNN: A Simple Approach to Exploiting Multiple Word Embeddings for Sentence Classification", "abstract": "We introduce a novel, simple convolution neural network (CNN) architecture - multi-group norm constraint CNN (MGNC-CNN) that capitalizes on multiple sets of word embeddings for sentence classification. MGNC-CNN extracts features from input embedding sets independently and then joins these at the penultimate layer in the network to form a final feature vector. We then adopt a group regularization strategy that differentially penalizes weights associated with the subcomponents generated from the respective embedding sets. This model is much simpler than comparable alternative architectures and requires substantially less training time. Furthermore, it is flexible in that it does not require input word embeddings to be of the same dimensionality. We show that MGNC-CNN consistently outperforms baseline models.", "phrases": ["sentence classification", "mgnc-cnn", "convolutional layer"], "overall_score": 2.803296614823565, "scores": [1.7198035658531394, 0.9328039985195092, 0.534094653156196], "rank_score": 1.0622340725096149} -{"id": "williams-2012-extracting", "title": "Extracting fine-grained durations for verbs from Twitter", "abstract": "This paper presents recent work on a new method to automatically extract finegrained duration information for common verbs using a large corpus of Twitter tweets. Regular expressions were used to extract verbs and durations from each tweet in a corpus of more than 14 million tweets with 90.38% precision covering 486 verb lemmas. Descriptive statistics for each verb lemma were found as well as the most typical fine-grained duration measure. Mean durations were compared with previous work by Gusev et al. (2011) and it was found that there is a small positive correlation.", "phrases": ["duration", "twitter", "verb lemmas"], "overall_score": 1.16671574095617, "scores": [1.72361948695172, 0.8854962279956726, 0.5768555862765113], "rank_score": 1.0619904337413015} -{"id": "ballesteros-etal-2015-improved", "title": "Improved Transition-based Parsing by Modeling Characters instead of Words with LSTMs", "abstract": "We present extensions to a continuousstate dependency parsing method that makes it applicable to morphologically rich languages. Starting with a highperformance transition-based parser that uses long short-term memory (LSTM) recurrent neural networks to learn representations of the parser state, we replace lookup-based word representations with representations constructed from the orthographic representations of the words, also using LSTMs. This allows statistical sharing across word forms that are similar on the surface. Experiments for morphologically rich languages show that the parsing model benefits from incorporating the character-based encodings of words.", "phrases": ["dependency parsing", "pos tag", "character-based representation", "bidirectional lstm"], "overall_score": 3.5745162667086405, "scores": [1.3639214493671667, 1.1109265660571925, 0.9159296989530208, 0.8553787829244905], "rank_score": 1.0615391243254677} -{"id": "zhou-etal-2020-hierarchy", "title": "Hierarchy-Aware Global Model for Hierarchical Text Classification", "abstract": "Hierarchical text classification is an essential yet challenging subtask of multi-label text classification with a taxonomic hierarchy. Existing methods have difficulties in modeling the hierarchical label structure in a global view. Furthermore, they cannot make full use of the mutual interactions between the text feature space and the label space. In this paper, we formulate the hierarchy as a directed graph and introduce hierarchy-aware structure encoders for modeling label dependencies. Based on the hierarchy encoder, we propose a novel end-to-end hierarchy-aware global model (HiAGM) with two variants. A multi-label attention variant (HiAGM-LA) learns hierarchy-aware label embeddings through the hierarchy encoder and conducts inductive fusion of label-aware text features. A text feature propagation model (HiAGM-TP) is proposed as the deductive variant that directly feeds text features into hierarchy encoders. Compared with previous works, both HiAGM-LA and HiAGM-TP achieve significant and consistent improvements on three benchmark datasets.", "phrases": ["hierarchical text classification", "hierarchy-aware global model", "gcn"], "overall_score": 2.332436783578707, "scores": [1.6842633887462295, 0.978855159003258, 0.5214946333115179], "rank_score": 1.0615377270203352} -{"id": "naseem-etal-2010-using", "title": "Using Universal Linguistic Knowledge to Guide Grammar Induction", "abstract": "We present an approach to grammar induction that utilizes syntactic universals to improve dependency parsing across a range of languages. Our method uses a single set of manually-specified language-independent rules that identify syntactic dependencies between pairs of syntactic categories that commonly occur across languages. During inference of the probabilistic model, we use posterior expectation constraints to require that a minimum proportion of the dependencies we infer be instances of these rules. We also automatically refine the syntactic categories given in our coarsely tagged input. Across six languages our approach outperforms state-of-the-art unsupervised methods by a significant margin.", "phrases": ["grammar induction", "noun", "other work"], "overall_score": 3.498598980188039, "scores": [1.5224440064120857, 1.12727879526357, 0.5348392283281465], "rank_score": 1.0615206766679341} -{"id": "ammar-etal-2016-many", "title": "Many Languages, One Parser", "abstract": "We train one multilingual model for dependency parsing and use it to parse sentences in several languages. The parsing model uses (i) multilingual word clusters and embeddings; (ii) token-level language information; and (iii) language-specific features (fine-grained POS tags). This input representation enables the parser not only to parse effectively in multiple languages, but also to generalize across languages based on linguistic universals and typological similarities, making it more effective to learn from limited annotations. Our parser's performance compares favorably to strong baselines in a range of data scenarios, including when the target language has a large treebank, a small treebank, or no treebank for training.", "phrases": ["dependency parsing", "pos tag", "multiple language", "word embedding", "single model"], "overall_score": 3.9669263209476435, "scores": [1.755394810607291, 1.1188076104196716, 0.9619221759836609, 0.9093446583257175, 0.5612147564645305], "rank_score": 1.0613368023601744} -{"id": "chen-etal-2020-recall", "title": "Recall and Learn: Fine-tuning Deep Pretrained Language Models with Less Forgetting", "abstract": "Deep pretrained language models have achieved great success in the way of pretraining first and then fine-tuning. But such a sequential transfer learning paradigm often confronts the catastrophic forgetting problem and leads to sub-optimal performance. To fine-tune with less forgetting, we propose a recall and learn mechanism, which adopts the idea of multi-task learning and jointly learns pretraining tasks and downstream tasks. Specifically, we introduce a Pretraining Simulation mechanism to recall the knowledge from pretraining tasks without data, and an Objective Shifting mechanism to focus the learning on downstream tasks gradually. Experiments show that our method achieves state-of-the-art performance on the GLUE benchmark. Our method also enables BERT-base to achieve better average performance than directly fine-tuning of BERT-large. Further, we provide the open-source RecAdam optimizer, which integrates the proposed mechanisms into Adam optimizer, to facility the NLP community.", "phrases": ["fine-tuning", "language model", "forgetting", "recadam", "recall"], "overall_score": 2.4435805401953985, "scores": [1.7902032960511267, 0.8868714528020832, 1.0491496325501917, 0.9965760488713773, 0.5833672931903648], "rank_score": 1.0612335446930288} -{"id": "borg-etal-2009-evolutionary", "title": "Evolutionary Algorithms for Definition Extraction", "abstract": "Books and other text-based learning material contain implicit information which can aid the learner but which usually can only be accessed through a semantic analysis of the text. Definitions of new concepts appearing in the text are one such instance. If extracted and presented to the learner in form of a glossary, they can provide an excellent reference for the study of the main text. One way of extracting definitions is by reading through the text and annotating definitions manually --- a tedious and boring job. In this paper, we explore the use of machine learning to extract definitions from nontechnical texts, reducing human expert input to a minimum. We report on experiments we have conducted on the use of genetic programming to learn the typical linguistic forms of definitions and a genetic algorithm to learn the relative importance of these forms. Results are very positive, showing the feasibility of exploring further the use of these techniques in definition extraction. The genetic program is able to learn similar rules derived by a human linguistic expert, and the genetic algorithm is able to rank candidate definitions in an order of confidence.", "phrases": ["definition extraction", "programming", "linguistic form", "genetic algorithm", "weight"], "overall_score": 2.3305250907985147, "scores": [0.9609016924237095, 1.5880067770001944, 1.3161252026639545, 0.867394867928277, 0.5709098506910648], "rank_score": 1.06066767814144} -{"id": "joanis-etal-2020-nunavut", "title": "The Nunavut Hansard Inuktitut\u2013English Parallel Corpus 3.0 with Preliminary Machine Translation Results", "abstract": "The Inuktitut language, a member of the Inuit-Yupik-Unangan language family, is spoken across Arctic Canada and noted for its morphological complexity. It is an official language of two territories, Nunavut and the Northwest Territories, and has recognition in additional regions. This paper describes a newly released sentence-aligned Inuktitut\u2013English corpus based on the proceedings of the Legislative Assembly of Nunavut, covering sessions from April 1999 to June 2017. With approximately 1.3 million aligned sentence pairs, this is, to our knowledge, the largest parallel corpus of a polysynthetic language or an Indigenous language of the Americas released to date. The paper describes the alignment methodology used, the evaluation of the alignments, and preliminary experiments on statistical and neural machine translation (SMT and NMT) between Inuktitut and English, in both directions.", "phrases": ["parallel corpus", "inuktitut", "official language"], "overall_score": 2.205538187348292, "scores": [1.2087916393986968, 1.080879309772901, 0.8922480562070173], "rank_score": 1.0606396684595383} -{"id": "koo-collins-2010-efficient", "title": "Efficient Third-Order Dependency Parsers", "abstract": "We present algorithms for higher-order dependency parsing that are \"third-order\" in the sense that they can evaluate substructures containing three dependencies, and \"efficient\" in the sense that they require only O(n4) time. Importantly, our new parsers can utilize both sibling-style and grandchild-style interactions. We evaluate our parsers on the Penn Treebank and Prague Dependency Treebank, achieving unlabeled attachment scores of 93.04% and 87.38%, respectively.", "phrases": ["third-order", "dependency parsing", "projective tree", "graph-based model", "global feature"], "overall_score": 3.9891392256163765, "scores": [2.2134004957435756, 1.3993495403160383, 0.5671732583205426, 0.5618019635957553, 0.5612885446821037], "rank_score": 1.0606027605316029} -{"id": "cimiano-wenderoth-2007-automatic", "title": "Automatic Acquisition of Ranked Qualia Structures from the Web", "abstract": "This paper presents an approach for the automatic acquisition of qualia structures for nouns from the Web and thus opens the possibility to explore the impact of qualia structures for natural language processing at a larger scale. The approach builds on earlier work based on the idea of matching specific lexico-syntactic patterns conveying a certain semantic relation on the World Wide Web using standard search engines. In our approach, the qualia elements are actually ranked for each qualia role with respect to some measure. The specific contribution of the paper lies in the extensive analysis and quantitative comparison of different measures for ranking the qualia elements. Further, for the first time, we present a quantitative evaluation of such an approach for learning qualia structures with respect to a handcrafted gold standard.", "phrases": ["acquisition", "qualia structure", "web"], "overall_score": 1.9003233438001048, "scores": [1.7388145457958608, 0.9212803309481837, 0.5216770794306687], "rank_score": 1.0605906520582378} -{"id": "moldovan-etal-2004-models", "title": "Models for the Semantic Classification of Noun Phrases", "abstract": "This paper presents an approach for detecting semantic relations in noun phrases. A learning algorithm, called semantic scattering, is used to automatically label complex nominals, genitives and adjectival noun phrases with the corresponding semantic relation.", "phrases": ["noun phrase", "semantic scattering", "complex nominal", "scheme", "inventory"], "overall_score": 3.0642330935505493, "scores": [3.0852869943722863, 0.574047461214792, 0.5568545586314946, 0.5514783061317303, 0.5330921497410711], "rank_score": 1.0601518940182748} -{"id": "zhang-etal-2016-towards", "title": "Towards Constructing Sports News from Live Text Commentary", "abstract": "In this paper, we investigate the possibility to automatically generate sports news from live text commentary scripts. As a preliminary study, we treat this task as a special kind of document summarization based on sentence extraction. We formulate the task in a supervised learning to rank framework, utilizing both traditional sentence features for generic document summarization and novelly designed task-speci\ufb01c features. To tackle the problem of local redundancy, we also propose a probabilistic sentence selection algorithm. Experiments on our collected data from football live commentary scripts and corresponding sports news demonstrate the feasibility of this task. Evaluation results show that our methods are indeed appropriate for this task, outperforming several baseline methods in different aspects.", "phrases": ["sport news", "text commentary script", "document summarization"], "overall_score": 2.0628976020833627, "scores": [1.4452105916323916, 0.9044062380084137, 0.830742144926716], "rank_score": 1.0601196581891736} -{"id": "chen-etal-2018-effects", "title": "Effects of Stimulus Duration and Vowel Quality in Tone Perception by English Musicians and Non-musicians", "abstract": "The link between music and language has been a subject of great interest, and evidence suggesting a connection between musical abilities and prosodic processing skills in language is growing. Acoustic fundamental frequency (F0), perceived as pitch, differentiates notes in music and word meaning in lexical tone languages. This study examines categorical perception of pitch stimuli among 14 English musicians and 15 English non-musicians, both groups having no exposure to tonal languages. The stimuli consist of continua of falling and rising F0 contours produced on high and low vowels with 9 different durations. The results revealed that musicians were more sensitive to variation in stimulus duration than nonmusicians were, and music experience enhanced the sharpness of category boundaries. Significant main effects of vowel quality and pitch directions as well as two-way interactions between vowel and pitch direction, vowel and duration, group and duration, and pitch direction and duration on identification rate were also found. Formulae for minimum duration required for English musicians and non-English musicians to perceive rising and falling F0 were derived, revealing that musicians require less time to perceive a pitch fall and rise if the change is less than 12semitones.", "phrases": ["stimulus duration", "vowel quality", "non-musician"], "overall_score": 1.4694526230621403, "scores": [1.802280869024017, 0.8321350761972032, 0.5455420729483229], "rank_score": 1.0599860060565145} -{"id": "chakravarthi-etal-2020-sentiment", "title": "A Sentiment Analysis Dataset for Code-Mixed Malayalam-English", "abstract": "There is an increasing demand for sentiment analysis of text from social media which are mostly code-mixed. Systems trained on monolingual data fail for code-mixed data due to the complexity of mixing at different levels of the text. However, very few resources are available for code-mixed data to create models specific for this data. Although much research in multilingual and cross-lingual sentiment analysis has used semi-supervised or unsupervised methods, supervised methods still performs better. Only a few datasets for popular languages such as English-Spanish, English-Hindi, and English-Chinese are available. There are no resources available for Malayalam-English code-mixed data. This paper presents a new gold standard corpus for sentiment analysis of code-mixed text in Malayalam-English annotated by voluntary annotators. This gold standard corpus obtained a Krippendorff's alpha above 0.8 for the dataset. We use this new corpus to provide the benchmark for sentiment analysis in Malayalam-English code-mixed texts.", "phrases": ["sentiment analysis", "code-mixed text", "non-native script", "malayalam language"], "overall_score": 3.7375116482710253, "scores": [1.5258060663275574, 1.3242984365389543, 0.8439158277130593, 0.5454913383843923], "rank_score": 1.059877917240991} -{"id": "lu-etal-2019-distilling", "title": "Distilling Discrimination and Generalization Knowledge for Event Detection via Delta-Representation Learning", "abstract": "Event detection systems rely on discrimination knowledge to distinguish ambiguous trigger words and generalization knowledge to detect unseen/sparse trigger words. Current neural event detection approaches focus on trigger-centric representations, which work well on distilling discrimination knowledge, but poorly on learning generalization knowledge. To address this problem, this paper proposes a Delta-learning approach to distill discrimination and generalization knowledge by effectively decoupling, incrementally learning and adaptively fusing event representation. Experiments show that our method significantly outperforms previous approaches on unseen/sparse trigger words, and achieves state-of-the-art performance on both ACE2005 and KBP2017 datasets.", "phrases": ["discrimination", "generalization knowledge", "event detection"], "overall_score": 2.3287360195435154, "scores": [1.7755740051016171, 0.8616172907358958, 0.5423690143335107], "rank_score": 1.0598534367236745} -{"id": "rieger-etal-2021-rollinglda-update", "title": "RollingLDA: An Update Algorithm of Latent Dirichlet Allocation to Construct Consistent Time Series from Textual Data", "abstract": "We propose a rolling version of the Latent Dirichlet Allocation, called RollingLDA. By a sequential approach, it enables the construction of LDA-based time series of topics that are consistent with previous states of LDA models. After an initial modeling, updates can be computed efficiently, allowing for real-time monitoring and detection of events or structural breaks. For this purpose, we propose suitable similarity measures for topics and provide simulation evidence of superiority over other commonly used approaches. The adequacy of the resulting method is illustrated by an application to an example corpus. In particular, we compute the similarity of sequentially obtained topic and word distributions over consecutive time periods. For a representative example corpus consisting of The New York Times articles from 1980 to 2020, we analyze the effect of several tuning parameter choices and we run the RollingLDA method on the full dataset of approximately 4 million articles to demonstrate its feasibility.", "phrases": ["latent dirichlet allocation", "time series", "rollinglda"], "overall_score": 1.164362245639084, "scores": [1.6955488866666364, 0.9114752167634859, 0.5725204665219004], "rank_score": 1.0598481899840075} -{"id": "kang-etal-2017-detecting", "title": "Detecting and Explaining Causes From Text For a Time Series Event", "abstract": "Explaining underlying causes or effects about events is a challenging but valuable task. We define a novel problem of generating explanations of a time series event by (1) searching cause and effect relationships of the time series with textual data and (2) constructing a connecting chain between them to generate an explanation. To detect causal features from text, we propose a novel method based on the Granger causality of time series between features extracted from text such as N-grams, topics, sentiments, and their composition. The generation of the sequence of causal entities requires a commonsense causative knowledge base with efficient reasoning. To ensure good interpretability and appropriate lexical usage we combine symbolic and neural representations, using a neural reasoning algorithm trained on commonsense causal tuples to predict the next cause step. Our quantitative and human analysis show empirical evidence that our method successfully extracts meaningful causality relationships between time series with textual features and generates appropriate explanation between them.", "phrases": ["time series event", "granger causality", "news", "stock price"], "overall_score": 1.898916509219313, "scores": [1.9817453264264153, 1.1409985285867856, 0.5755846606611777, 0.5408934152412131], "rank_score": 1.059805482728898} -{"id": "fang-etal-2019-implicit", "title": "Implicit Deep Latent Variable Models for Text Generation", "abstract": "Deep latent variable models (LVM) such as variational auto-encoder (VAE) have recently played an important role in text generation. One key factor is the exploitation of smooth latent structures to guide the generation. However, the representation power of VAEs is limited due to two reasons: (1) the Gaussian assumption is often made on the variational posteriors; and meanwhile (2) a notorious \u201cposterior collapse\u201d issue occurs. In this paper, we advocate sample-based representations of variational distributions for natural language, leading to implicit latent features, which can provide flexible representation power compared with Gaussian-based posteriors. We further develop an LVM to directly match the aggregated posterior to the prior. It can be viewed as a natural extension of VAEs with a regularization of maximizing mutual information, mitigating the \u201cposterior collapse\u201d issue. We demonstrate the effectiveness and versatility of our models in various text generation scenarios, including language modeling, unaligned style transfer, and dialog response generation. The source code to reproduce our experimental results is available on GitHub.", "phrases": ["text generation", "posterior", "sample-based representation", "implicit latent feature"], "overall_score": 2.062279539277435, "scores": [1.947670776289139, 0.836810463973402, 0.8299334373458486, 0.6247934693425188], "rank_score": 1.059802036737727} -{"id": "pasupat-liang-2015-compositional", "title": "Compositional Semantic Parsing on Semi-Structured Tables", "abstract": "Two important aspects of semantic parsing for question answering are the breadth of the knowledge source and the depth of logical compositionality. While existing work trades off one aspect for another, this paper simultaneously makes progress on both fronts through a new task: answering complex questions on semi-structured tables using question-answer pairs as supervision. The central challenge arises from two compounding factors: the broader domain results in an open-ended set of relations, and the deeper compositionality results in a combinatorial explosion in the space of logical forms. We propose a logical-form driven parsing algorithm guided by strong typing constraints and show that it obtains significant improvements over natural baselines. For evaluation, we created a new dataset of 22,033 complex questions on Wikipedia tables, which is made publicly available.", "phrases": ["semantic parsing", "table", "wikipedia", "natural language question"], "overall_score": 3.4524590434335423, "scores": [1.6257140321763917, 1.2334550825763841, 0.8567923513899307, 0.5226594625423454], "rank_score": 1.0596552321712631} -{"id": "das-petrov-2011-unsupervised", "title": "Unsupervised Part-of-Speech Tagging with Bilingual Graph-Based Projections", "abstract": "We describe a novel approach for inducing unsupervised part-of-speech taggers for languages that have no labeled training data, but have translated text in a resource-rich language. Our method does not assume any knowledge about the target language (in particular no tagging dictionary is assumed), making it applicable to a wide array of resource-poor languages. We use graph-based label propagation for cross-lingual knowledge transfer and use the projected labels as features in an unsupervised model (Berg-Kirkpatrick et al., 2010). Across eight European languages, our approach results in an average absolute improvement of 10.4% over a state-of-the-art baseline, and 16.7% over vanilla hidden Markov models induced with the Expectation Maximization algorithm.", "phrases": ["resource-rich language", "label propagation", "pos tag", "annotation projection", "parallel corpus"], "overall_score": 4.144905129363241, "scores": [1.4992652566810298, 1.359973744379208, 1.3445291089790794, 0.5565680818674116, 0.5373130340972555], "rank_score": 1.059529845200797} -{"id": "de-cao-etal-2008-combining", "title": "Combining Word Sense and Usage for Modeling Frame Semantics", "abstract": "Models of lexical semantics are core paradigms in most NLP applications, such as dialogue, information extraction and document understanding. Unfortunately, the coverage of currently available resources (e.g. FrameNet) is still unsatisfactory. This paper presents a largely applicable approach for extending frame semantic resources, combining word sense information derived from WordNet and corpus-based distributional information. We report a large scale evaluation over the English FrameNet, and results on extending FrameNet to the Italian language, as the basis of the development of a full FrameNet for Italian.", "phrases": ["frame", "wordnet", "italian"], "overall_score": 2.5406177681860447, "scores": [1.355303292267845, 1.2951592269650045, 0.5280971914518235], "rank_score": 1.0595199035615577} -{"id": "sokolova-bobicev-2011-sentiments", "title": "Sentiments and Opinions in Health-related Web messages", "abstract": "In this work, we analyze sentiments and opinions expressed in user-written Web messages. The messages discuss healthrelated topics: medications, treatment, illness and cure, etc. Recognition of sentiments and opinions is a challenging task for humans as well as an automated text analysis. In this work, we apply both the approaches. The paper presents the annotation model, discusses characteristics of subjectivity annotations in health-related messages, and reports the results of the annotation agreement. For external evaluation of the labeling results, we apply Machine Learning methods on the annotated data and present the obtained results.", "phrases": ["opinion", "web message", "medication"], "overall_score": 2.061425035654963, "scores": [1.756100195589538, 0.8944763566336845, 0.5275121740045469], "rank_score": 1.0593629087425898} -{"id": "manuvinakurike-etal-2017-using", "title": "Using Reinforcement Learning to Model Incrementality in a Fast-Paced Dialogue Game", "abstract": "We apply Reinforcement Learning (RL) to the problem of incremental dialogue policy learning in the context of a fast-paced dialogue game. We compare the policy learned by RL with a high-performance baseline policy which has been shown to perform very efficiently (nearly as well as humans) in this dialogue game. The RL policy outperforms the baseline policy in offline simulations (based on real user data). We provide a detailed comparison of the RL policy and the baseline policy, including information about how much effort and time it took to develop each one of them. We also highlight the cases where the RL policy performs better, and show that understanding the RL policy can provide valuable insights which can inform the creation of an even better rule-based policy.", "phrases": ["reinforcement learning", "fast-paced dialogue game", "dialogue policy learning"], "overall_score": 2.2021421946651163, "scores": [1.7671409203166424, 0.8896021952593711, 0.5202765079996885], "rank_score": 1.0590065411919005} -{"id": "liang-etal-2021-super", "title": "Super Tickets in Pre-Trained Language Models: From Model Compression to Improving Generalization", "abstract": "The Lottery Ticket Hypothesis suggests that an over-parametrized network consists of \u201dlottery tickets\u201d, and training a certain collection of them (i.e., a subnetwork) can match the performance of the full model. In this paper, we study such a collection of tickets, which is referred to as \u201dwinning tickets\u201d, in extremely over-parametrized models, e.g., pre-trained language models. We observe that at certain compression ratios, the generalization performance of the winning tickets can not only match but also exceed that of the full model. In particular, we observe a phase transition phenomenon: As the compression ratio increases, generalization performance of the winning tickets first improves then deteriorates after a certain threshold. We refer to the tickets on the threshold as \u201dsuper tickets\u201d. We further show that the phase transition is task and model dependent \u2014 as the model size becomes larger and the training data set becomes smaller, the transition becomes more pronounced. Our experiments on the GLUE benchmark show that the super tickets improve single task fine-tuning by 0.9 points on BERT-base and 1.0 points on BERT-large, in terms of task-average score. We also demonstrate that adaptively sharing the super tickets across tasks benefits multi-task learning.", "phrases": ["generalization", "model size", "super ticket"], "overall_score": 2.0606254955585093, "scores": [1.8058611844646253, 0.8465767028718498, 0.5244181919006166], "rank_score": 1.058952026412364} -{"id": "poria-etal-2016-deeper", "title": "A Deeper Look into Sarcastic Tweets Using Deep Convolutional Neural Networks", "abstract": "Sarcasm detection is a key task for many natural language processing tasks. In sentiment analysis, for example, sarcasm can flip the polarity of an \u201capparently positive\u201d sentence and, hence, negatively affect polarity detection performance. To date, most approaches to sarcasm detection have treated the task primarily as a text categorization problem. Sarcasm, however, can be expressed in very subtle ways and requires a deeper understanding of natural language that standard text categorization techniques cannot grasp. In this work, we develop models based on a pre-trained convolutional neural network for extracting sentiment, emotion and personality features for sarcasm detection. Such features, along with the network's baseline features, allow the proposed models to outperform the state of the art on benchmark datasets. We also address the often ignored generalizability issue of classifying data that have not been seen by the models at learning phase.", "phrases": ["convolutional neural network", "sarcasm", "emotion"], "overall_score": 2.792772784450515, "scores": [1.404170905711832, 1.2117394799537264, 0.5588286619977215], "rank_score": 1.0582463492210934} -{"id": "cano-basave-etal-2014-automatic", "title": "Automatic Labelling of Topic Models Learned from Twitter by Summarisation", "abstract": "Latent topics derived by topic models such as Latent Dirichlet Allocation (LDA) are the result of hidden thematic structures which provide further insights into the data. The automatic labelling of such topics derived from social media poses however new challenges since topics may characterise novel events happening in the real world. Existing automatic topic labelling approaches which depend on external knowledge sources become less applicable here since relevant articles/concepts of the extracted topics may not exist in external sources. In this paper we propose to address the problem of automatic labelling of latent topics learned from Twitter as a summarisation problem. We introduce a framework which apply summarisation algorithms to generate topic labels. These algorithms are independent of external sources and only rely on the identification of dominant terms in documents related to the latent topic. We compare the efficiency of existing state of the art summarisation algorithms. Our results suggest that summarisation algorithms generate better topic labels which capture event-related context compared to the top-n terms returned by LDA.", "phrases": ["twitter", "latent topic", "automatic labelling"], "overall_score": 1.1625529655411084, "scores": [1.7986154264696335, 0.7854454162686142, 0.5905430940623769], "rank_score": 1.0582013122668747} -{"id": "lin-etal-2017-reasoning", "title": "Reasoning with Heterogeneous Knowledge for Commonsense Machine Comprehension", "abstract": "Reasoning with commonsense knowledge is critical for natural language understanding. Traditional methods for commonsense machine comprehension mostly only focus on one specific kind of knowledge, neglecting the fact that commonsense reasoning requires simultaneously considering different kinds of commonsense knowledge. In this paper, we propose a multi-knowledge reasoning method, which can exploit heterogeneous knowledge for commonsense machine comprehension. Specifically, we first mine different kinds of knowledge (including event narrative knowledge, entity semantic knowledge and sentiment coherent knowledge) and encode them as inference rules with costs. Then we propose a multi-knowledge reasoning model, which selects inference rules for a specific reasoning context using attention mechanism, and reasons by summarizing all valid inference rules. Experiments on RocStories show that our method outperforms traditional models significantly.", "phrases": ["heterogeneous knowledge", "different kind", "inference rule"], "overall_score": 1.8959312867051112, "scores": [2.0027688423035803, 0.6059900748727602, 0.5656592777869655], "rank_score": 1.058139398321102} -{"id": "list-2012-lexstat", "title": "LexStat: Automatic Detection of Cognates in Multilingual Wordlists", "abstract": "In this paper, a new method for automatic cognate detection in multilingual wordlists will be presented. The main idea behind the method is to combine different approaches to sequence comparison in historical linguistics and evolutionary biology into a new framework which closely models the most important aspects of the comparative method. The method is implemented as a Python program and provides a convenient tool which is publicly available, easily applicable, and open for further testing and improvement. Testing the method on a large gold standard of IPA-encoded wordlists showed that its results are highly consistent and outperform previous methods.", "phrases": ["automatic detection", "cognate detection", "lexstat"], "overall_score": 2.436426078940374, "scores": [1.3045929907404459, 1.2964610594910508, 0.5733251547154461], "rank_score": 1.058126401648981} -{"id": "mcdonald-etal-2013-universal", "title": "Universal Dependency Annotation for Multilingual Parsing", "abstract": "We present a new collection of treebanks with homogeneous syntactic dependency annotation for six languages: German, English, Swedish, Spanish, French and Korean. To show the usefulness of such a resource, we present a case study of crosslingual transfer parsing with more reliable evaluation than has been possible before. This \u2018universal\u2019 treebank is made freely available in order to facilitate research on multilingual dependency parsing. 1", "phrases": ["swedish", "universal dependency annotation", "schema", "project", "different language"], "overall_score": 4.23989319645083, "scores": [1.995421881257212, 1.1058079939118537, 1.0696668885146803, 0.5894356097621158, 0.5298356765622723], "rank_score": 1.058033610001627} -{"id": "barkarson-steingrimsson-2019-compiling", "title": "Compiling and Filtering ParIce: An English-Icelandic Parallel Corpus", "abstract": "We present ParIce, a new English-Icelandic parallel corpus. This is the first parallel corpus built for the purposes of language technology development and research for Icelandic, although some Icelandic texts can be found in various other multilingual parallel corpora. We map out which Icelandic texts are available for these purposes, collect aligned data and align other bilingual texts we acquired. We describe the alignment process and how we filter the data to weed out noise and bad alignments. In total we collected 43 million Icelandic words in 4.3 million aligned segment pairs, but after filtering, our corpus includes 38.8 million Icelandic words in 3.5 million segment pairs. We estimate that approximately 5% of the corpus data is noise or faulty alignments while more than 50% of the segments we deleted were faulty. We estimate that our filtering process reduced the number of faulty segments in the corpus by more than 60% while only reducing the number of good alignments by approximately 8%.", "phrases": ["parice", "english-icelandic parallel corpus", "language technology development"], "overall_score": 1.7026567781565243, "scores": [2.0656502767777036, 0.5862872160778138, 0.521822920644573], "rank_score": 1.0579201378333636} -{"id": "wu-etal-2009-domain", "title": "Domain adaptive bootstrapping for named entity recognition", "abstract": "Bootstrapping is the process of improving the performance of a trained classifier by iteratively adding data that is labeled by the classifier itself to the training set, and retraining the classifier. It is often used in situations where labeled training data is scarce but unlabeled data is abundant. In this paper, we consider the problem of domain adaptation: the situation where training data may not be scarce, but belongs to a different domain from the target application domain. As the distribution of unlabeled data is different from the training data, standard bootstrapping often has difficulty selecting informative data to add to the training set. We propose an effective domain adaptive bootstrapping algorithm that selects unlabeled target domain data that are informative about the target domain and easy to automatically label correctly. We call these instances bridges, as they are used to bridge the source domain to the target domain. We show that the method outperforms supervised, transductive and bootstrapping algorithms on the named entity recognition task.", "phrases": ["bootstrapping", "entity recognition", "unlabeled data", "domain adaptation"], "overall_score": 2.4347060451685993, "scores": [1.7843836136590605, 0.9372638889443726, 0.9546718997033995, 0.5531981995860156], "rank_score": 1.057379400473212} -{"id": "nie-etal-2020-adversarial", "title": "Adversarial NLI: A New Benchmark for Natural Language Understanding", "abstract": "We introduce a new large-scale NLI benchmark dataset, collected via an iterative, adversarial human-and-model-in-the-loop procedure. We show that training models on this new dataset leads to state-of-the-art performance on a variety of popular NLI benchmarks, while posing a more difficult challenge with its new test set. Our analysis sheds light on the shortcomings of current state-of-the-art models, and shows that non-expert annotators are successful at finding their weaknesses. The data collection method can be applied in a never-ending learning scenario, becoming a moving target for NLU, rather than a static benchmark that will quickly saturate.", "phrases": ["natural language understanding", "state-of-the-art model", "adversarial nli", "hamlet", "crowdworker"], "overall_score": 4.04754086927885, "scores": [1.9071971600435387, 0.922498209702797, 1.1006612450787294, 0.8315582520182425, 0.523956085957976], "rank_score": 1.0571741905602567} -{"id": "belz-etal-2015-describing", "title": "Describing Spatial Relationships between Objects in Images in English and French", "abstract": "The context for the work we report here is the automatic description of spatial relationships between pairs of objects in images. We investigate the task of selecting prepositions for such spatial relationships. We describe the two datasets of object pairs and prepositions we have created for English and French, and report results for predicting prepositions for object pairs in both of these languages, using two methods: (a) an existing approach which manually fixes the mapping from geometrical features to prepositions, and (b) a Naive Bayes classifier trained on the English and French datasets. For the latter we use features based on object class labels and geometrical measurements of object bounding boxes. We evaluate the automatically generated prepositions on unseen data in terms of accuracy against the human-selected prepositions.", "phrases": ["spatial relationship", "object", "french"], "overall_score": 1.1612006326912223, "scores": [1.753512069401181, 0.8185901250993197, 0.5988089030778565], "rank_score": 1.0569703658594525} -{"id": "xu-etal-2007-seed", "title": "A Seed-driven Bottom-up Machine Learning Framework for Extracting Relations of Various Complexity", "abstract": "A minimally supervised machine learning framework is described for extracting relations of various complexity. Bootstrapping starts from a small set of n-ary relation instances as \u201cseeds\u201d, in order to automatically learn pattern rules from parsed data, which then can extract new instances of the relation and its projections. We propose a novel rule representation enabling the composition of n-ary relation rules on top of the rules for projections of the relation. The compositional approach to rule construction is supported by a bottom-up pattern extraction method. In comparison to other automatic approaches, our rules cannot only localize relation arguments but also assign their exact target argument roles. The method is evaluated in two tasks: the extraction of Nobel Prize awards and management succession events. Performance for the new Nobel Prize task is strong. For the management succession task the results compare favorably with those of existing pattern acquisition approaches.", "phrases": ["various complexity", "seed", "relation extraction"], "overall_score": 2.5342641819800624, "scores": [1.701754653260872, 0.9022450570368388, 0.5666110466381598], "rank_score": 1.0568702523119569} -{"id": "alberti-etal-2015-improved", "title": "Improved Transition-Based Parsing and Tagging with Neural Networks", "abstract": "We extend and improve upon recent work in structured training for neural network transition-based dependency parsing. We do this by experimenting with novel features, additional transition systems and by testing on a wider array of languages. In particular, we introduce set-valued features to encode the predicted morphological properties and part-ofspeech confusion sets of the words being parsed. We also investigate the use of joint parsing and partof-speech tagging in the neural paradigm. Finally, we conduct a multi-lingual evaluation that demonstrates the robustness of the overall structured neural approach, as well as the benefits of the extensions proposed in this work. Our research further demonstrates the breadth of the applicability of neural network methods to dependency parsing, as well as the ease with which new features can be added to neural parsing models.", "phrases": ["tagging", "transition system", "dependency parser"], "overall_score": 2.322053044433716, "scores": [1.7665492213935075, 0.8806252584688737, 0.5232611711653777], "rank_score": 1.0568118836759195} -{"id": "qadir-etal-2016-automatically", "title": "Automatically Inferring Implicit Properties in Similes", "abstract": "Author(s): Qadir, A; Riloff, E; Walker, MA | Abstract: \u00a92016 Association for Computational Linguistics. A simile is a figure of speech comparing two fundamentally different things. Sometimes, a simile will explain the basis of a comparison by explicitly mentioning a shared property. For example, \"my room is as cold as Antarctica\" gives \"cold\" as the property shared by the room and Antarctica. But most similes do not give an explicit property (e.g., \"my room feels like Antarctica\") leaving the reader to infer that the room is cold. We tackle the problem of automatically inferring implicit properties evoked by similes. Our approach involves three steps: (1) generating candidate properties from different sources, (2) evaluating properties based on the influence of multiple simile components, and (3) aggregated ranking of the properties. We also present an analysis showing that the difficulty of inferring an implicit property for a simile correlates with its interpretive diversity.", "phrases": ["implicit property", "simile", "interpretation"], "overall_score": 2.0561975298245296, "scores": [1.4305794477664169, 1.1912457432370427, 0.5482043154813463], "rank_score": 1.0566765021616018} -{"id": "kobayashi-ng-2020-bridging", "title": "Bridging Resolution: A Survey of the State of the Art", "abstract": "Bridging reference resolution is an anaphora resolution task that is arguably more challenging and less studied than entity coreference resolution. Given that significant progress has been made on coreference resolution in recent years, we believe that bridging resolution will receive increasing attention in the NLP community. Nevertheless, progress on bridging resolution is currently hampered in part by the scarcity of large annotated corpora for model training as well as the lack of standardized evaluation protocols. This paper presents a survey of the current state of research on bridging reference resolution and discusses future research directions.", "phrases": ["resolution", "survey", "anaphora resolution"], "overall_score": 1.8928587727882236, "scores": [1.7316840368164421, 0.9041398556246916, 0.5334498945204474], "rank_score": 1.0564245956538605} -{"id": "niklaus-etal-2018-survey", "title": "A Survey on Open Information Extraction", "abstract": "We provide a detailed overview of the various approaches that were proposed to date to solve the task of Open Information Extraction. We present the major challenges that such systems face, show the evolution of the suggested approaches over time and depict the specific issues they address. In addition, we provide a critique of the commonly applied evaluation procedures for assessing the performance of Open IE systems and highlight some directions for future work.", "phrases": ["survey", "open information extraction", "rule-based system", "oie system", "predicate"], "overall_score": 2.7094121282766457, "scores": [2.714064502265555, 0.8842720913816837, 0.5696165737735436, 0.5658645709858015, 0.5477916633441007], "rank_score": 1.0563218803501369} -{"id": "zhou-etal-2010-active", "title": "Active Deep Networks for Semi-Supervised Sentiment Classification", "abstract": "This paper presents a novel semi-supervised learning algorithm called Active Deep Networks (ADN), to address the semi-supervised sentiment classification problem with active learning. First, we propose the semi-supervised learning method of ADN. ADN is constructed by Restricted Boltzmann Machines (RBM) with unsupervised learning using labeled data and abundant of unlabeled data. Then the constructed structure is fine-tuned by gradient-descent based supervised learning with an exponential loss function. Second, we apply active learning in the semi-supervised learning framework to identify reviews that should be labeled as training data. Then ADN architecture is trained by the selected labeled data and all unlabeled data. Experiments on five sentiment classification datasets show that ADN outperforms the semi-supervised learning algorithm and deep learning techniques applied for sentiment classification.", "phrases": ["semi-supervised sentiment classification", "active learning", "active deep networks"], "overall_score": 2.0548140628681333, "scores": [1.004035945999787, 0.9064658189546181, 1.257394857403554], "rank_score": 1.0559655407859865} -{"id": "pradhan-etal-2011-conll", "title": "CoNLL-2011 Shared Task: Modeling Unrestricted Coreference in OntoNotes", "abstract": "The CoNLL-2011 shared task involved predicting coreference using OntoNotes data. Resources in this field have tended to be limited to noun phrase coreference, often on a restricted set of entities, such as ace entities. OntoNotes provides a large-scale corpus of general anaphoric coreference not restricted to noun phrases or to a specified set of entity types. OntoNotes also provides additional layers of integrated annotation, capturing additional shallow semantic structure. This paper briefly describes the OntoNotes annotation (coreference and other layers) and then describes the parameters of the shared task including the format, pre-processing information, and evaluation criteria, and presents and discusses the results achieved by the participating systems. Having a standard test set and evaluation parameters, all based on a new resource that provides multiple integrated annotation layers (parses, semantic roles, word senses, named entities and coreference) that could support joint models, should help to energize ongoing research in the task of entity and event coreference.", "phrases": ["unrestricted coreference", "ontonotes", "noun phrase"], "overall_score": 2.99175171768218, "scores": [1.7479827359494848, 0.8847408490057516, 0.5351476845612939], "rank_score": 1.0559570898388435} -{"id": "khapra-etal-2010-words", "title": "All Words Domain Adapted WSD: Finding a Middle Ground between Supervision and Unsupervision", "abstract": "In spite of decades of research on word sense disambiguation (WSD), all-words general purpose WSD has remained a distant goal. Many supervised WSD systems have been built, but the effort of creating the training corpus - annotated sense marked corpora - has always been a matter of concern. Therefore, attempts have been made to develop unsupervised and knowledge based techniques for WSD which do not need sense marked corpora. However such approaches have not proved effective, since they typically do not better Wordnet first sense baseline accuracy. Our research reported here proposes to stick to the supervised approach, but with far less demand on annotation. We show that if we have ANY sense marked corpora, be it from mixed domain or a specific domain, a small amount of annotation in ANY other domain can deliver the goods almost as if exhaustive sense marking were available in that domain. We have tested our approach across Tourism and Health domain corpora, using also the well known mixed domain SemCor corpus. Accuracy figures close to self domain training lend credence to the viability of our approach. Our contribution thus lies in finding a convenient middle ground between pure supervised and pure unsupervised WSD. Finally, our approach is not restricted to any specific set of target words, a departure from a commonly observed practice in domain specific WSD.", "phrases": ["wsd", "middle ground", "semi-supervised approach"], "overall_score": 1.6993337581426113, "scores": [1.786995182544903, 0.7821623776788973, 0.598408728007636], "rank_score": 1.0558554294104787} -{"id": "rudra-etal-2016-understanding", "title": "Understanding Language Preference for Expression of Opinion and Sentiment: What do Hindi-English Speakers do on Twitter?", "abstract": "Linguistic research on multilingual societies has indicated that there is usually a preferred language for expression of emotion and sentiment (Dewaele, 2010). Paucity of data has limited such studies to participant interviews and speech transcriptions from small groups of speakers. In this paper, we report a study on 430,000 unique tweets from Indian users, specifically Hindi-English bilinguals, to understand the language of preference, if any, for expressing opinion and sentiment. To this end, we develop classifiers for opinion detection in these languages, and further classifying opinionated tweets into positive, negative and neutral sentiments. Our study indicates that Hindi (i.e., the native language) is preferred over English for expression of negative opinion and swearing. As an aside, we explore some common pragmatic functions of code-switching through sentiment detection.", "phrases": ["opinion", "twitter", "hindi-english bilingual", "emotion expression"], "overall_score": 2.531716896472084, "scores": [1.7557331012819797, 1.3813305655238113, 0.5530933426452538, 0.533074797528652], "rank_score": 1.0558079517449241} -{"id": "yimam-etal-2017-multilingual", "title": "Multilingual and Cross-Lingual Complex Word Identification", "abstract": "Complex Word Identification (CWI) is an important task in lexical simplification and text accessibility. Due to the lack of CWI datasets, previous works largely depend on Simple English Wikipedia and edit histories for obtaining `gold standard' annotations, which are of doubtable quality, and limited only to English. We collect complex words/phrases (CP) for English, German and Spanish, annotated by both native and non-native speakers, and propose language independent features that can be used to train multilingual and cross-lingual CWI models. We show that the performance of cross-lingual CWI systems (using a model trained on one language and applying it on the other languages) is comparable to the performance of monolingual CWI systems.", "phrases": ["complex word identification", "spanish", "non-native speaker", "difficulty"], "overall_score": 2.4309776877766964, "scores": [2.5652069780651794, 0.5662591272443813, 0.5529220102050096, 0.538652666210811], "rank_score": 1.0557601954313454} -{"id": "giampiccolo-etal-2007-third", "title": "The Third PASCAL Recognizing Textual Entailment Challenge", "abstract": "This paper presents the Third PASCAL Recognising Textual Entailment Challenge (RTE-3), providing an overview of the dataset creating methodology and the submitted systems. In creating this year\u2019s dataset, a number of longer texts were introduced to make the challenge more oriented to realistic scenarios. Ad-ditionally, a pool of resources was of-fered so that the participants could share common tools. A pilot task was also set up, aimed at differentiating unknown en-tailments from identified contradictions and providing justifications for overall system decisions. 26 participants submitted 44 runs, using different approaches and generally presenting new entailment models and achieving higher scores than in the previous challenges.", "phrases": ["pascal", "textual entailment challenge", "hypothesis", "question answering"], "overall_score": 3.751718874944176, "scores": [1.9983071338234195, 0.8449432399613289, 0.8271590137431138, 0.5505206705801466], "rank_score": 1.055232514527002} -{"id": "temnikova-2010-cognitive", "title": "Cognitive Evaluation Approach for a Controlled Language Post-Editing Experiment", "abstract": "In emergency situations it is crucial that instructions are straightforward to understand. For this reason a controlled language for crisis management (CLCM), based on psycholinguistic studies of human comprehension under stress, was developed. In order to test the impact of CLCM machine translatability of this particular kind of sub-language text, a previous experiment involving machine translation and human post-editing has been conducted. Employing two automatic evaluation metrics, a previous evaluation of the experiment has proved that instructions written according to this CL can improve machine translation (MT) performance. This paper presents a new cognitive evaluation approach for MT post-editing, which is tested on the previous controlled and uncontrolled textual data. The presented evaluation approach allows a deeper look into the post-editing process and specifically how much effort post-editors put into correcting the different kinds of MT errors. The method is based on existing MT error classification, which is enriched with a new error ranking motivated by the cognitive effort involved in the detection and correction of these MT errors. The preliminary results of applying this approach to a subset of the original data confirmed once again the positive impact of CLCM on emergency instructions' machine translatability and thus the validity of the approach.", "phrases": ["post-editor", "cognitive evaluation approach", "edit operation"], "overall_score": 1.8902291686039616, "scores": [1.7418948376666306, 0.8915788261279513, 0.5313972930504193], "rank_score": 1.0549569856150003} -{"id": "kiddon-etal-2016-globally", "title": "Globally Coherent Text Generation with Neural Checklist Models", "abstract": "Recurrent neural networks can generate locally coherent text but often have dif\ufb01culties representing what has already been generated and what still needs to be said \u2013 especially when constructing long texts. We present the neural checklist model , a recurrent neural network that models global coherence by storing and updating an agenda of text strings which should be mentioned somewhere in the output. The model generates output by dynamically adjusting the interpolation among a language model and a pair of attention models that encourage references to agenda items. Evaluations on cooking recipes and dialogue system responses demonstrate high coherence with greatly improved semantic coverage of the agenda.", "phrases": ["coherence", "neural checklist model", "language model", "recipe", "story"], "overall_score": 3.6223549069746315, "scores": [1.7150822851583585, 1.3710777295765693, 1.095830141252469, 0.5549526735207649, 0.5373268332376079], "rank_score": 1.0548539325491537} -{"id": "okanohara-tsujii-2007-discriminative", "title": "A discriminative language model with pseudo-negative samples", "abstract": "In this paper, we propose a novel discriminative language model, which can be applied quite generally. Compared to the well known N-gram language models, discriminative language models can achieve more accurate discrimination because they can employ overlapping features and nonlocal information. However, discriminative language models have been used only for re-ranking in specific applications because negative examples are not available. We propose sampling pseudo-negative examples taken from probabilistic language models. However, this approach requires prohibitive computational cost if we are dealing with quite a few features and training samples. We tackle the problem by estimating the latent information in sentences using a semiMarkov class model, and then extracting features from them. We also use an online margin-based algorithm with efficient kernel computation. Experimental results show that pseudo-negative examples can be treated as real negative examples and our model can classify these sentences correctly.", "phrases": ["language model", "sample", "specific application", "negative example", "start symbol"], "overall_score": 2.6197513189567085, "scores": [2.4485460708607563, 0.8278285669356882, 0.9254450112836632, 0.5387852453766521, 0.5307225398760604], "rank_score": 1.054265486866564} -{"id": "shaalan-2014-survey", "title": "A Survey of Arabic Named Entity Recognition and Classification", "abstract": "As more and more Arabic textual information becomes available through the Web in homes and businesses, via Internet and Intranet services, there is an urgent need for technologies and tools to process the relevant information. Named Entity Recognition (NER) is an Information Extraction task that has become an integral part of many other Natural Language Processing (NLP) tasks, such as Machine Translation and Information Retrieval. Arabic NER has begun to receive attention in recent years. The characteristics and peculiarities of Arabic, a member of the Semitic languages family, make dealing with NER a challenge. The performance of an Arabic NER component affects the overall performance of the NLP system in a positive manner. This article attempts to describe and detail the recent increase in interest and progress made in Arabic NER research. The importance of the NER task is demonstrated, the main characteristics of the Arabic language are highlighted, and the aspects of standardization in annotating named entities are illustrated. Moreover, the different Arabic linguistic resources are presented and the approaches used in Arabic NER field are explained. The features of common tools used in Arabic NER are described, and standard evaluation metrics are illustrated. In addition, a review of the state of the art of Arabic NER research is discussed. Finally, we present our conclusions. Throughout the presentation, illustrative examples are used for clarification.", "phrases": ["arabic", "entity recognition", "affix"], "overall_score": 2.7822536582534467, "scores": [1.3978672303414315, 0.9401535359313791, 0.8247604590888956], "rank_score": 1.0542604084539022} -{"id": "yang-cardie-2013-joint", "title": "Joint Inference for Fine-grained Opinion Extraction", "abstract": "This paper addresses the task of finegrained opinion extraction \u2010 the identification of opinion-related entities: the opinion expressions, the opinion holders, and the targets of the opinions, and the relations between opinion expressions and their targets and holders. Most existing approaches tackle the extraction of opinion entities and opinion relations in a pipelined manner, where the interdependencies among different extraction stages are not captured. We propose a joint inference model that leverages knowledge from predictors that optimize subtasks of opinion extraction, and seeks a globally optimal solution. Experimental results demonstrate that our joint inference approach significantly outperforms traditional pipeline methods and baselines that tackle subtasks in isolation for the problem of opinion extraction.", "phrases": ["opinion expression", "joint inference", "pipelined approach"], "overall_score": 3.4741991656140256, "scores": [0.8832760124166331, 1.7228104850717043, 0.5562658641677768], "rank_score": 1.0541174538853715} -{"id": "geiger-etal-2019-posing", "title": "Posing Fair Generalization Tasks for Natural Language Inference", "abstract": "Deep learning models for semantics are generally evaluated using naturalistic corpora. Adversarial testing methods, in which models are evaluated on new examples with known semantic properties, have begun to reveal that good performance at these naturalistic tasks can hide serious shortcomings. However, we should insist that these evaluations be fair \u2013 that the models are given data sufficient to support the requisite kinds of generalization. In this paper, we define and motivate a formal notion of fairness in this sense. We then apply these ideas to natural language inference by constructing very challenging but provably fair artificial datasets and showing that standard neural models fail to generalize in the required ways; only task-specific models that jointly compose the premise and hypothesis are able to achieve high performance, and even these models do not solve the task perfectly.", "phrases": ["natural language inference", "synthetic data", "such perfection"], "overall_score": 2.4271303402105353, "scores": [2.0481668701504865, 0.5669803595591489, 0.5471207111305574], "rank_score": 1.0540893136133977} -{"id": "mohammad-etal-2007-cross", "title": "Cross-Lingual Distributional Profiles of Concepts for Measuring Semantic Distance", "abstract": "We present the idea of estimating semantic distance in one, possibly resource-poor, language using a knowledge source in another, possibly resource-rich, language. We do so by creating cross-lingual distributional profiles of concepts, using a bilingual lexicon and a bootstrapping algorithm, but without the use of any sense-annotated data or word-aligned corpora. The cross-lingual measures of semantic distance are evaluated on two tasks: (1) estimating semantic distance between words and ranking the word pairs according to semantic distance, and (2) solving Reader\u2019s Digest \u2018Word Power\u2019 problems. In task (1), cross-lingual measures are superior to conventional monolingual measures based on a wordnet. In task (2), cross-lingual measures are able to solve more problems correctly, and despite scores being affected by many tied answers, their overall performance is again better than the best monolingual measures.", "phrases": ["semantic distance", "bilingual lexicon", "cross-lingual distributional profile", "thesaurus"], "overall_score": 1.696417393631351, "scores": [2.474415244912831, 0.6429173594368989, 0.5538924120271589, 0.5449485446540026], "rank_score": 1.0540433902577226} -{"id": "dong-de-melo-2019-robust", "title": "A Robust Self-Learning Framework for Cross-Lingual Text Classification", "abstract": "Based on massive amounts of data, recent pretrained contextual representation models have made significant strides in advancing a number of different English NLP tasks. However, for other languages, relevant training data may be lacking, while state-of-the-art deep learning methods are known to be data-hungry. In this paper, we present an elegantly simple robust self-learning framework to include unlabeled non-English samples in the fine-tuning process of pretrained multilingual representation models. We leverage a multilingual model's own predictions on unlabeled non-English data in order to obtain additional information that can be used during further fine-tuning. Compared with original multilingual models and other cross-lingual classification models, we observe significant gains in effectiveness on document and sentiment classification for a range of diverse languages.", "phrases": ["self-learning framework", "learning method", "unlabeled data"], "overall_score": 1.8879637940030198, "scores": [1.7225036669373086, 0.8726836190599647, 0.5658906819340133], "rank_score": 1.0536926559770954} -{"id": "hanselowski-etal-2018-ukp", "title": "UKP-Athene: Multi-Sentence Textual Entailment for Claim Verification", "abstract": "The Fact Extraction and VERification (FEVER) shared task was launched to support the development of systems able to verify claims by extracting supporting or refuting facts from raw text. The shared task organizers provide a large-scale dataset for the consecutive steps involved in claim verification, in particular, document retrieval, fact extraction, and claim classification. In this paper, we present our claim verification pipeline approach, which, according to the preliminary results, scored third in the shared task, out of 23 competing systems. For the document retrieval, we implemented a new entity linking approach. In order to be able to rank candidate facts and classify a claim on the basis of several selected facts, we introduce two extensions to the Enhanced LSTM (ESIM).", "phrases": ["claim verification", "mediawiki api", "evidence sentence"], "overall_score": 3.830650431431823, "scores": [1.76211946832221, 0.8253141782228078, 0.5717908473044636], "rank_score": 1.0530748312831604} -{"id": "zhang-etal-2020-dialogpt", "title": "DIALOGPT : Large-Scale Generative Pre-training for Conversational Response Generation", "abstract": "We present a large, tunable neural conversational response generation model, DIALOGPT (dialogue generative pre-trained transformer). Trained on 147M conversation-like exchanges extracted from Reddit comment chains over a period spanning from 2005 through 2017, DialoGPT extends the Hugging Face PyTorch transformer to attain a performance close to human both in terms of automatic and human evaluation in single-turn dialogue settings. We show that conversational systems that leverage DialoGPT generate more relevant, contentful and context-consistent responses than strong baseline systems. The pre-trained model and training pipeline are publicly released to facilitate research into neural response generation and the development of more intelligent open-domain dialogue systems.", "phrases": ["conversational response generation", "response generation model", "open-domain dialogue system", "pre-trained language model", "generation task"], "overall_score": 4.574178708381025, "scores": [0.923635647226877, 1.3060917906425922, 1.2803510922950465, 1.167911066382753, 0.5871849038669682], "rank_score": 1.0530349000828474} -{"id": "songyot-chiang-2014-improving", "title": "Improving Word Alignment using Word Similarity", "abstract": "We show that semantic relationships can be used to improve word alignment, in addition to the lexical and syntactic features that are typically used. In this paper, we present a method based on a neural network to automatically derive word similarity from monolingual data. We present an extension to word alignment models that exploits word similarity. Our experiments, in both large-scale and resourcelimited settings, show improvements in word alignment tasks as well as translation tasks.", "phrases": ["word alignment", "semantic knowledge", "few study", "giza++", "feedforward neural network"], "overall_score": 2.4238323741910532, "scores": [3.056513958361483, 0.5806583236281839, 0.5516454657780355, 0.5400062214090933, 0.5344611566713648], "rank_score": 1.052657025169632} -{"id": "wang-etal-2010-structuring", "title": "Re-structuring, Re-labeling, and Re-aligning for Syntax-Based Machine Translation", "abstract": "This article shows that the structure of bilingual material from standard parsing and alignment tools is not optimal for training syntax-based statistical machine translation (SMT) systems. We present three modifications to the MT training data to improve the accuracy of a state-of-the-art syntax MT system: re-structuring changes the syntactic structure of training parse trees to enable reuse of substructures; re-labeling alters bracket labels to enrich rule application context; and re-aligning unifies word alignment across sentences to remove bad word alignments and refine good ones. Better structures, labels, and word alignments are learned by the EM algorithm. We show that each individual technique leads to improvement as measured by BLEU, and we also show that the greatest improvement is achieved by combining them. We report an overall 1.48 BLEU improvement on the NIST08 evaluation set over a strong baseline in Chinese/English translation.", "phrases": ["machine translation", "alignment tool", "syntax-based model", "rule extraction"], "overall_score": 2.6151975836487713, "scores": [1.8856336718188784, 0.9034858174613146, 0.8353530877855649, 0.5852591388067663], "rank_score": 1.0524329289681311} -{"id": "eyal-etal-2019-question", "title": "Question Answering as an Automatic Evaluation Metric for News Article Summarization", "abstract": "Recent work in the field of automatic summarization and headline generation focuses on maximizing ROUGE scores for various news datasets. We present an alternative, extrinsic, evaluation metric for this task, Answering Performance for Evaluation of Summaries. APES utilizes recent progress in the field of reading-comprehension to quantify the ability of a summary to answer a set of manually created questions regarding central entities in the source article. We first analyze the strength of this metric by comparing it to known manual evaluation metrics. We then present an end-to-end neural abstractive model that maximizes APES, while increasing ROUGE scores to competitive results.", "phrases": ["evaluation metric", "summarization", "apes", "question answering"], "overall_score": 3.0413889210766603, "scores": [0.907513892584739, 1.31422137637894, 1.154233804665173, 0.8330243373646427], "rank_score": 1.0522483527483737} -{"id": "roark-hollingshead-2008-classifying", "title": "Classifying Chart Cells for Quadratic Complexity Context-Free Inference", "abstract": "In this paper, we consider classifying word positions by whether or not they can either start or end multi-word constituents. This provides a mechanism for \"closing\" chart cells during context-free inference, which is demonstrated to improve efficiency and accuracy when used to constrain the well-known Charniak parser. Additionally, we present a method for \"closing\" a sufficient number of chart cells to ensure quadratic worst-case complexity of context-free inference. Empirical results show that this O(n2) bound can be achieved without impacting parsing accuracy.", "phrases": ["chart cell", "complexity", "multi-word constituent"], "overall_score": 2.6147307206397365, "scores": [1.2122229154602235, 0.9788546792199599, 0.9656575537361309], "rank_score": 1.0522450494721047} -{"id": "goh-etal-2004-chinese", "title": "Chinese Word Segmentation by Classification of Characters", "abstract": "During the process of Chinese word segmentation, two main problems occur: segmentation ambiguities and unknown word occurrences. This paper describes a method to solve the segmentation problem. First, we use a dictionary-based approach to segment the text. We apply the Maximum Matching algorithm to segment the text forwards (FMM) and backwards (BMM). Based on the difference between FMM and BMM, and the context, we apply a classification method based on Support Vector Machines to re-assign the word boundaries. In so doing, we use the output of a dictionary-based approach, and then apply a machine-learning-based approach to solve the segmentation problem. Experimental results show that our model can achieve an F-measure of 99.0 for overall segmentation, given the condition that there are no unknown words in the text, and an F-measure of 95.1 if unknown words exist.", "phrases": ["word segmentation", "dictionary-based approach", "machine-learning-based approach"], "overall_score": 1.8849528481986895, "scores": [2.049454123075563, 0.563644975128634, 0.5429375471789891], "rank_score": 1.0520122151277287} -{"id": "fersini-etal-2022-semeval", "title": "SemEval-2022 Task 5: Multimedia Automatic Misogyny Identification", "abstract": "The paper describes the SemEval-2022 Task 5: Multimedia Automatic Misogyny Identification (MAMI),which explores the detection of misogynous memes on the web by taking advantage of available texts and images. The task has been organised in two related sub-tasks: the first one is focused on recognising whether a meme is misogynous or not (Sub-task A), while the second one is devoted to recognising types of misogyny (Sub-task B). MAMI has been one of the most popular tasks at SemEval-2022 with more than 400 participants, 65 teams involved in Sub-task A and 41 in Sub-task B from 13 countries. The MAMI challenge received 4214 submitted runs (of which 166 uploaded on the leader-board), denoting an enthusiastic participation for the proposed problem. The collection and annotation is described for the task dataset. The paper provides an overview of the systems proposed for the challenge, reports the results achieved in both sub-tasks and outlines a description of the main errors for a comprehension of the systems capabilities and for detailing future research perspectives.", "phrases": ["misogyny", "meme", "semeval-2022 task"], "overall_score": 2.6139961286618276, "scores": [0.9118105482684874, 1.6846346739270812, 0.5594030615435892], "rank_score": 1.0519494279130528} -{"id": "escalante-etal-2011-local", "title": "Local Histograms of Character N-grams for Authorship Attribution", "abstract": "This paper proposes the use of local histograms (LH) over character n-grams for authorship attribution (AA). LHs are enriched histogram representations that preserve sequential information in documents; they have been successfully used for text categorization and document visualization using word histograms. In this work we explore the suitability of LHs over n-grams at the character-level for AA. We show that LHs are particularly helpful for AA, because they provide useful information for uncovering, to some extent, the writing style of authors. We report experimental results in AA data sets that confirm that LHs over character n-grams are more helpful for AA than the usual global histograms, yielding results far superior to state of the art approaches. We found that LHs are even more advantageous in challenging conditions, such as having imbalanced and small training sets. Our results motivate further research on the use of LHs for modeling the writing style of authors for related tasks, such as authorship verification and plagiarism detection.", "phrases": ["histogram", "authorship attribution", "text analysis task", "impressive performance level"], "overall_score": 2.187153659931473, "scores": [1.8951755045195817, 0.901136243638715, 0.836844776748493, 0.574037793553587], "rank_score": 1.0517985796150942} -{"id": "chen-etal-2018-best", "title": "The Best of Both Worlds: Combining Recent Advances in Neural Machine Translation", "abstract": "The past year has witnessed rapid advances in sequence-to-sequence (seq2seq) modeling for Machine Translation (MT). The classic RNN-based approaches to MT were first out-performed by the convolutional seq2seq model, which was then out-performed by the more recent Transformer model. Each of these new approaches consists of a fundamental architecture accompanied by a set of modeling and training techniques that are in principle applicable to other seq2seq architectures. In this paper, we tease apart the new architectures and their accompanying techniques in two ways. First, we identify several key modeling and training techniques, and apply them to the RNN architecture, yielding a new RNMT+ model that outperforms all of the three fundamental architectures on the benchmark WMT'14 English to French and English to German tasks. Second, we analyze the properties of each fundamental seq2seq architecture and devise new hybrid architectures intended to combine their strengths. Our hybrid models obtain further improvements, outperforming the RNMT+ model on both benchmark datasets.", "phrases": ["neural machine translation", "rnmt+ model", "strength", "multi-head attention", "rnn-based encoder"], "overall_score": 3.677426197633258, "scores": [2.684705550949328, 0.8414884608049479, 0.6007007900090494, 0.5760877508769607, 0.5557307963033926], "rank_score": 1.0517426697887358} -{"id": "levy-etal-2017-zero", "title": "Zero-Shot Relation Extraction via Reading Comprehension", "abstract": "We show that relation extraction can be reduced to answering simple reading comprehension questions, by associating one or more natural-language questions with each relation slot. This reduction has several advantages: we can (1) learn relation-extraction models by extending recent neural reading-comprehension techniques, (2) build very large training sets for those models by combining relation-specific crowd-sourced questions with distant supervision, and even (3) do zero-shot learning by extracting new relation types that are only specified at test-time, for which we have no labeled training examples. Experiments on a Wikipedia slot-filling task demonstrate that the approach can generalize to new questions for known relation types with high accuracy, and that zero-shot generalization to unseen relation types is possible, at lower accuracy levels, setting the bar for future work on this task.", "phrases": ["comprehension", "slot-filling task", "zero-shot relation extraction", "relation extraction task", "language question"], "overall_score": 3.955531234825177, "scores": [2.080050608497126, 0.8698749607374548, 0.8909215138448138, 0.8700195987755572, 0.5474699037480985], "rank_score": 1.0516673171206101} -{"id": "wu-etal-2019-depth", "title": "Depth Growing for Neural Machine Translation", "abstract": "While very deep neural networks have shown effectiveness for computer vision and text classification applications, how to increase the network depth of the neural machine translation (NMT) models for better translation quality remains a challenging problem. Directly stacking more blocks to the NMT model results in no improvement and even drop in performance. In this work, we propose an effective two-stage approach with three specially designed components to construct deeper NMT models, which result in significant improvements over the strong Transformer baselines on WMT14 English\u2192German and English\u2192French translation tasks.", "phrases": ["neural machine translation", "nmt model", "depth"], "overall_score": 1.8840318854363707, "scores": [1.3243008670742102, 0.9840406817681809, 0.8461530992278705], "rank_score": 1.0514982160234205} -{"id": "fabbri-etal-2019-multi", "title": "Multi-News: A Large-Scale Multi-Document Summarization Dataset and Abstractive Hierarchical Model", "abstract": "Automatic generation of summaries from multiple news articles is a valuable tool as the number of online publications grows rapidly. Single document summarization (SDS) systems have benefited from advances in neural encoder-decoder model thanks to the availability of large datasets. However, multi-document summarization (MDS) of news articles has been limited to datasets of a couple of hundred examples. In this paper, we introduce Multi-News, the first large-scale MDS news dataset. Additionally, we propose an end-to-end model which incorporates a traditional extractive summarization model with a standard SDS model and achieves competitive results on MDS datasets. We benchmark several methods on Multi-News and hope that this work will promote advances in summarization in the multi-document setting.", "phrases": ["summarization", "large dataset", "mds news dataset", "multi-new"], "overall_score": 3.38458069429364, "scores": [0.7913708249766553, 2.294712208047486, 0.582020307043927, 0.5378131083332609], "rank_score": 1.0514791121003322} -{"id": "vu-etal-2018-sentence", "title": "Sentence Simplification with Memory-Augmented Neural Networks", "abstract": "Sentence simplification aims to simplify the content and structure of complex sentences, and thus make them easier to interpret for human readers, and easier to process for downstream NLP applications. Recent advances in neural machine translation have paved the way for novel approaches to the task. In this paper, we adapt an architecture with augmented memory capacities called Neural Semantic Encoders (Munkhdalai and Yu, 2017) for sentence simplification. Our experiments demonstrate the effectiveness of our approach on different simplification datasets, both in terms of automatic evaluation measures and human judgments.", "phrases": ["memory-augmented neural network", "neural semantic encoders", "sentence simplification"], "overall_score": 2.3098382288915653, "scores": [1.9752358524831866, 0.5896823416968006, 0.5888398504690558], "rank_score": 1.051252681549681} -{"id": "johansson-moschitti-2013-relational", "title": "Relational Features in Fine-Grained Opinion Analysis", "abstract": "Fine-grained opinion analysis methods often make use of linguistic features but typically do not take the interaction between opinions into account. This article describes a set of experiments that demonstrate that relational features, mainly derived from dependency-syntactic and semantic role structures, can significantly improve the performance of automatic systems for a number of fine-grained opinion analysis tasks: marking up opinion expressions, finding opinion holders, and determining the polarities of opinion expressions. These features make it possible to model the way opinions expressed in natural-language discourse interact in a sentence over arbitrary distances. The use of relations requires us to consider multiple opinions simultaneously, which makes the search for the optimal analysis intractable. However, a reranker can be used as a sufficiently accurate and efficient approximation.A number of feature sets and machine learning approaches for the rerankers are evaluated. For the task of opinion expression extraction, the best model shows a 10-point absolute improvement in soft recall on the MPQA corpus over a conventional sequence labeler based on local contextual features, while precision decreases only slightly. Significant improvements are also seen for the extended tasks where holders and polarities are considered: 10 and 7 points in recall, respectively. In addition, the systems outperform previously published results for unlabeled (6 F-measure points) and polarity-labeled (10\u201315 points) opinion expression extraction. Finally, as an extrinsic evaluation, the extracted MPQA-style opinion expressions are used in practical opinion mining tasks. In all scenarios considered, the machine learning features derived from the opinion expressions lead to statistically significant improvements.", "phrases": ["opinion expression", "reranker", "relational feature"], "overall_score": 2.774264474074092, "scores": [1.6763833159862847, 0.9440103835269673, 0.5333056645834106], "rank_score": 1.0512331213655541} -{"id": "liu-etal-2018-stochastic", "title": "Stochastic Answer Networks for Machine Reading Comprehension", "abstract": "We propose a simple yet robust stochastic answer network (SAN) that simulates multi-step reasoning in machine reading comprehension. Compared to previous work such as ReasoNet which used reinforcement learning to determine the number of steps, the unique feature is the use of a kind of stochastic prediction dropout on the answer module (final layer) of the neural network during the training. We show that this simple trick improves robustness and achieves results competitive to the state-of-the-art on the Stanford Question Answering Dataset (SQuAD), the Adversarial SQuAD, and the Microsoft MAchine Reading COmprehension Dataset (MS MARCO).", "phrases": ["machine reading comprehension", "multi-step reasoning", "final layer", "stochastic answer network", "memory network"], "overall_score": 2.309775513993743, "scores": [2.582488459561398, 1.0216370782725204, 0.5572675826018636, 0.5483804817521929, 0.5463470916602003], "rank_score": 1.051224138769635} -{"id": "womack-etal-2012-disfluencies", "title": "Disfluencies as Extra-Propositional Indicators of Cognitive Processing", "abstract": "We explore filled pause usage in spontaneous medical narration. Expert physicians viewed images of dermatological conditions and provided a description while working toward a diagnosis. The narratives were analyzed for differences in filled pauses used by attending (experienced) and resident (in-training) physicians and by male and female physicians. Attending physicians described more and used more filled pauses than residents. No difference was found by speaker gender. Acoustic speech features were examined for two types of filled pauses: nasal (e.g. um) and non-nasal (e.g. uh). Nasal filled pauses were more often followed by longer silent pauses. Scores capturing diagnostic correctness and diagnostic thoroughness for each narrative were compared against filled pauses. The number of filled and silent pauses trends upward as correctness scores increase, indicating a tentative relationship between filled pause usage and expertise. Also, we report on a computational model for predicting types of filled pause.", "phrases": ["extra-propositional indicator", "cognitive processing", "disfluency"], "overall_score": 1.8835303748394925, "scores": [1.3230489091601532, 0.9320891493400244, 0.8985168943897467], "rank_score": 1.0512183176299748} -{"id": "weiss-etal-2018-practical", "title": "On the Practical Computational Power of Finite Precision RNNs for Language Recognition", "abstract": "While Recurrent Neural Networks (RNNs) are famously known to be Turing complete, this relies on infinite precision in the states and unbounded computation time. We consider the case of RNNs with finite precision whose computation time is linear in the input length. Under these limitations, we show that different RNN variants have different computational power. In particular, we show that the LSTM and the Elman-RNN with ReLU activation are strictly stronger than the RNN with a squashing activation and the GRU. This is achieved because LSTMs and ReLU-RNNs can easily implement counting behavior. We show empirically that the LSTM does indeed learn to effectively use the counting mechanism.", "phrases": ["power", "rnn", "formal language", "k-counter machine"], "overall_score": 3.2489441879197405, "scores": [1.4624550431571228, 1.0936375652689259, 1.0572287400229987, 0.5910132563893549], "rank_score": 1.0510836512096007} -{"id": "tang-etal-2016-effective", "title": "Effective LSTMs for Target-Dependent Sentiment Classification", "abstract": "Target-dependent sentiment classification remains a challenge: modeling the semantic relatedness of a target with its context words in a sentence. Different context words have different influences on determining the sentiment polarity of a sentence towards the target. Therefore, it is desirable to integrate the connections between target word and context words when building a learning system. In this paper, we develop two target dependent long short-term memory (LSTM) models, where target information is automatically taken into account. We evaluate our methods on a benchmark dataset from Twitter. Empirical results show that modeling sentence representation with standard LSTM does not perform well. Incorporating target information into LSTM can significantly boost the classification accuracy. The target-dependent LSTM models achieve state-of-the-art performances without using syntactic parser or external sentiment lexicons.", "phrases": ["sentiment classification", "semantic relatedness", "target-dependent lstm", "deep learning", "input sentence"], "overall_score": 4.000879412719916, "scores": [2.0209137520872287, 0.9235997173865907, 0.8834182790718487, 0.8498322930852487, 0.5773372754303107], "rank_score": 1.0510202634122456} -{"id": "troiano-etal-2018-computational", "title": "A Computational Exploration of Exaggeration", "abstract": "Several NLP studies address the problem of figurative language, but among non-literal phenomena, they have neglected exaggeration. This paper presents a first computational approach to this figure of speech. We explore the possibility to automatically detect exaggerated sentences. First, we introduce HYPO, a corpus containing overstatements (or hyperboles) collected on the web and validated via crowdsourcing. Then, we evaluate a number of models trained on HYPO, and bring evidence that the task of hyperbole identification can be successfully performed based on a small set of semantic features.", "phrases": ["exaggeration", "figure", "counterpart"], "overall_score": 2.18532417216128, "scores": [2.0730129863305438, 0.5516047707084333, 0.5281385888728808], "rank_score": 1.0509187819706194} -{"id": "martin-etal-2020-controllable", "title": "Controllable Sentence Simplification", "abstract": "Text simplification aims at making a text easier to read and understand by simplifying grammar and structure while keeping the underlying information identical. It is often considered an all-purpose generic task where the same simplification is suitable for all; however multiple audiences can benefit from simplified text in different ways. We adapt a discrete parametrization mechanism that provides explicit control on simplification systems based on Sequence-to-Sequence models. As a result, users can condition the simplifications returned by a model on attributes such as length, amount of paraphrasing, lexical complexity and syntactic complexity. We also show that carefully chosen values of these attributes allow out-of-the-box Sequence-to-Sequence models to outperform their standard counterparts on simplification benchmarks. Our model, which we call ACCESS (as shorthand for AudienCe-CEntric Sentence Simplification), establishes the state of the art at 41.87 SARI on the WikiLarge test set, a +1.42 improvement over the best previously reported score.", "phrases": ["simplification", "attribute", "length", "paraphrasing", "syntactic complexity"], "overall_score": 2.695476277405432, "scores": [1.4037374733051975, 1.133739850243076, 0.9750271022550315, 0.8924629039457077, 0.8494761343376909], "rank_score": 1.0508886928173407} -{"id": "nuhn-etal-2013-beam", "title": "Beam Search for Solving Substitution Ciphers", "abstract": "In this paper we address the problem of solving substitution ciphers using a beam search approach. We present a conceptually consistent and easy to implement method that improves the current state of the art for decipherment of substitution ciphers and is able to use high ordern-gram language models. We show experiments with 1:1 substitution ciphers in which the guaranteed optimal solution for 3-gram language models has 38.6% decipherment error, while our approach achieves 4.13% decipherment error in a fraction of time by using a 6-gram language model. We also apply our approach to the famous Zodiac-408 cipher and obtain slightly better (and near to optimal) results than previously published. Unlike the previous state-of-the-art approach that uses additional word lists to evaluate possible decipherments, our approach only uses a letterbased 6-gram language model. Furthermore we use our algorithm to solve large vocabulary substitution ciphers and improve the best published decipherment error rate based on the Gigaword corpus of 7.8% to 6.0% error rate.", "phrases": ["art", "decipherment", "beam search"], "overall_score": 2.5194363760748324, "scores": [1.3801862606674826, 1.2472226347082815, 0.5246508354914086], "rank_score": 1.050686576955724} -{"id": "luan-etal-2017-multi", "title": "Multi-Task Learning for Speaker-Role Adaptation in Neural Conversation Models", "abstract": "Building a persona-based conversation agent is challenging owing to the lack of large amounts of speaker-specific conversation data for model training. This paper addresses the problem by proposing a multi-task learning approach to training neural conversation models that leverages both conversation data across speakers and other types of data pertaining to the speaker and speaker roles to be modeled. Experiments show that our approach leads to significant improvements over baseline model quality, generating responses that capture more precisely speakers' traits and speaking styles. The model offers the benefits of being algorithmically simple and easy to implement, and not relying on large quantities of data representing specific individual speakers.", "phrases": ["conversation data", "multi-task learning", "autoencoder"], "overall_score": 2.519017378414129, "scores": [1.7304616540000233, 0.8897542901501609, 0.5313195799276561], "rank_score": 1.05051184135928} -{"id": "shen-evang-2022-drs", "title": "DRS Parsing as Sequence Labeling", "abstract": "We present the first fully trainable semantic parser for English, German, Italian, and Dutch discourse representation structures (DRSs) that is competitive in accuracy with recent sequence-to-sequence models and at the same time compositional in the sense that the output maps each token to one of a finite set of meaning fragments, and the meaning of the utterance is a function of the meanings of its parts. We argue that this property makes the system more transparent and more useful for human-in-the-loop annotation. We achieve this simply by casting DRS parsing as a sequence labeling task, where tokens are labeled with both fragments (lists of abstracted clauses with relative referent indices indicating unification) and symbols like word senses or names. We give a comprehensive error analysis that highlights areas for future work.", "phrases": ["sequence-to-sequence model", "same time", "drs parsing"], "overall_score": 1.1537108649736074, "scores": [2.03438505235981, 0.577031587288192, 0.5390420168056651], "rank_score": 1.0501528854845559} -{"id": "cybulska-vossen-2014-using", "title": "Using a sledgehammer to crack a nut? Lexical diversity and event coreference resolution", "abstract": "In this paper we examine the representativeness of the EventCorefBank (ECB, Bejan and Harabagiu, 2010) with regards to the language population of large-volume streams of news. The ECB corpus is one of the data sets used for evaluation of the task of event coreference resolution. Our analysis shows that the ECB in most cases covers one seminal event per domain, what considerably simplifies event and so language diversity that one comes across in the news. We augmented the corpus with a new corpus component, consisting of 502 texts, describing different instances of event types that were already captured by the 43 topics of the ECB, making it more representative of news articles on the web. The new \u201cECB+\u201d corpus is available for further research.", "phrases": ["diversity", "event coreference resolution", "news article", "ecb+"], "overall_score": 3.0918936486436004, "scores": [1.268457611402257, 0.9446931767194755, 1.4239580748666925, 0.5632072862277548], "rank_score": 1.050079037304045} -{"id": "al-shargi-etal-2016-morphologically", "title": "Morphologically Annotated Corpora and Morphological Analyzers for Moroccan and Sanaani Yemeni Arabic", "abstract": "We present new language resources for Moroccan and Sanaani Yemeni Arabic. The resources include corpora for each dialect which have been morphologically annotated, and morphological analyzers for each dialect which are derived from these corpora. These are the first sets of resources for Moroccan and Yemeni Arabic. The resources will be made available to the public.", "phrases": ["morphological analyzer", "moroccan", "arabic"], "overall_score": 1.689963216157104, "scores": [1.8063568861963797, 0.8124442813213673, 0.5312983854396297], "rank_score": 1.0500331843191255} -{"id": "lin-etal-2019-sequence", "title": "Sequence-to-Nuggets: Nested Entity Mention Detection via Anchor-Region Networks", "abstract": "Sequential labeling-based NER approaches restrict each word belonging to at most one entity mention, which will face a serious problem when recognizing nested entity mentions. In this paper, we propose to resolve this problem by modeling and leveraging the head-driven phrase structures of entity mentions, i.e., although a mention can nest other mentions, they will not share the same head word. Specifically, we propose Anchor-Region Networks (ARNs), a sequence-to-nuggets architecture for nested mention detection. ARNs first identify anchor words (i.e., possible head words) of all mentions, and then recognize the mention boundaries for each anchor word by exploiting regular phrase structures. Furthermore, we also design Bag Loss, an objective function which can train ARNs in an end-to-end manner without using any anchor word annotation. Experiments show that ARNs achieve the state-of-the-art performance on three standard nested entity mention detection benchmarks.", "phrases": ["mention", "head-driven phrase structure", "sequence-to-nugget architecture"], "overall_score": 2.843245843250088, "scores": [1.16606933560841, 1.135391863494969, 0.8483096309493854], "rank_score": 1.0499236100175882} -{"id": "georgila-2013-reinforcement", "title": "Reinforcement Learning of Two-Issue Negotiation Dialogue Policies", "abstract": "We use hand-crafted simulated negotiators (SNs) to train and evaluate dialogue policies for two-issue negotiation between two agents. These SNs differ in their goals and in the use of strong and weak arguments to persuade their counterparts. They may also make irrational moves, i.e., moves not consistent with their goals, to generate a variety of negotiation patterns. Different versions of these SNs interact with each other to generate corpora for Reinforcement Learning (RL) of argumentation dialogue policies for each of the two agents. We evaluate the learned policies against hand-crafted SNs similar to the ones used for training but with the modification that these SNs no longer make irrational moves and thus are harder to beat. The learned policies generally do as well as, or better than the hand-crafted SNs showing that RL can be successfully used for learning argumentation dialogue policies in twoissue negotiation scenarios.", "phrases": ["negotiation", "argumentation dialogue policy", "reinforcement learning"], "overall_score": 2.042734149805518, "scores": [1.708180178773439, 0.8965815501573532, 0.544511351530621], "rank_score": 1.0497576934871378} -{"id": "dinu-wang-2009-inference", "title": "Inference Rules and their Application to Recognizing Textual Entailment", "abstract": "In this paper, we explore ways of improving an inference rule collection and its application to the task of recognizing textual entailment. For this purpose, we start with an automatically acquired collection and we propose methods to refine it and obtain more rules using a hand-crafted lexical resource. Following this, we derive a dependency-based structure representation from texts, which aims to provide a proper base for the inference rule application. The evaluation of our approach on the recognizing textual entailment data shows promising results on precision and the error analysis suggests possible improvements.", "phrases": ["textual entailment", "inference rule collection", "hand-crafted lexical resource"], "overall_score": 1.880448449143545, "scores": [2.0098812781419206, 0.5715156081636615, 0.5670979001408935], "rank_score": 1.0494982621488251} -{"id": "yang-etal-2021-mtag", "title": "MTAG: Modal-Temporal Attention Graph for Unaligned Human Multimodal Language Sequences", "abstract": "Human communication is multimodal in nature; it is through multiple modalities such as language, voice, and facial expressions, that opinions and emotions are expressed. Data in this domain exhibits complex multi-relational and temporal interactions. Learning from this data is a fundamentally challenging research problem. In this paper, we propose Modal-Temporal Attention Graph (MTAG). MTAG is an interpretable graph-based neural model that provides a suitable framework for analyzing multimodal sequential data. We first introduce a procedure to convert unaligned multimodal sequence data into a graph with heterogeneous nodes and edges that captures the rich interactions across modalities and through time. Then, a novel graph fusion operation, called MTAG fusion, along with a dynamic pruning and read-out technique, is designed to efficiently process this modal-temporal graph and capture various interactions. By learning to focus only on the important interactions within the graph, MTAG achieves state-of-the-art performance on multimodal sentiment analysis and emotion recognition benchmarks, while utilizing significantly fewer model parameters.", "phrases": ["attention graph", "multimodal sentiment analysis", "mtag"], "overall_score": 1.152990968207462, "scores": [1.8013067529128444, 0.786895441504989, 0.5602906272088325], "rank_score": 1.0494976072088886} -{"id": "wu-etal-2020-tod", "title": "TOD-BERT: Pre-trained Natural Language Understanding for Task-Oriented Dialogue", "abstract": "The underlying difference of linguistic patterns between general text and task-oriented dialogue makes existing pre-trained language models less useful in practice. In this work, we unify nine human-human and multi-turn task-oriented dialogue datasets for language modeling. To better model dialogue behavior during pre-training, we incorporate user and system tokens into the masked language modeling. We propose a contrastive objective function to simulate the response selection task. Our pre-trained task-oriented dialogue BERT (TOD-BERT) outperforms strong baselines like BERT on four downstream task-oriented dialogue applications, including intention recognition, dialogue state tracking, dialogue act prediction, and response selection. We also show that TOD-BERT has a stronger few-shot ability that can mitigate the data scarcity problem for task-oriented dialogue.", "phrases": ["task-oriented dialogue", "pre-trained language model", "dialogue dataset"], "overall_score": 3.1429984655654297, "scores": [1.9662743247688514, 0.5952407924269667, 0.5859608705399579], "rank_score": 1.049158662578592} -{"id": "wolf-etal-2020-transformers", "title": "Transformers: State-of-the-Art Natural Language Processing", "abstract": "Recent progress in natural language processing has been driven by advances in both model architecture and model pretraining. Transformer architectures have facilitated building higher-capacity models and pretraining has made it possible to effectively utilize this capacity for a wide variety of tasks. Transformers is an open-source library with the goal of opening up these advances to the wider machine learning community. The library consists of carefully engineered state-of-the art Transformer architectures under a unified API. Backing this library is a curated collection of pretrained models made by and available for the community. Transformers is designed to be extensible by researchers, simple for practitioners, and fast and robust in industrial deployments. The library is available at .", "phrases": ["api", "transformer", "language model", "pre-trained model", "huggingface transformers library"], "overall_score": 3.4959930308535245, "scores": [2.4315437929106363, 1.1121309748582524, 0.6005449506156082, 0.5816092661835488, 0.5199370243581438], "rank_score": 1.0491532017852376} -{"id": "kim-etal-2010-cross", "title": "A Cross-lingual Annotation Projection Approach for Relation Detection", "abstract": "While extensive studies on relation extraction have been conducted in the last decade, statistical systems based on supervised learning are still limited because they require large amounts of training data to achieve high performance. In this paper, we develop a cross-lingual annotation projection method that leverages parallel corpora to bootstrap a relation detector without significant annotation efforts for a resource-poor language. In order to make our method more reliable, we introduce three simple projection noise reduction methods. The merit of our method is demonstrated through a novel Korean relation detection task.", "phrases": ["annotation projection approach", "parallel corpora", "relation detector"], "overall_score": 2.606621575408014, "scores": [1.3822799947719893, 1.2307955857792332, 0.5338694875426608], "rank_score": 1.0489816893646278} -{"id": "santus-etal-2016-cogalex", "title": "The CogALex-V Shared Task on the Corpus-Based Identification of Semantic Relations", "abstract": "The shared task of the 5th Workshop on Cognitive Aspects of the Lexicon (CogALex-V) aims at providing a common benchmark for testing current corpus-based methods for the identification of lexical semantic relations (synonymy, antonymy, hypernymy, part-whole meronymy) and at gaining a better understanding of their respective strengths and weaknesses. The shared task uses a challenging dataset extracted from EVALution 1.0, which contains word pairs holding the above-mentioned relations as well as semantically unrelated control items (random). The task is split into two subtasks: (i) identification of related word pairs vs. unrelated ones; (ii) classification of the word pairs according to their semantic relation. This paper describes the subtasks, the dataset, the evaluation metrics, the seven participating systems and their results. The best performing system in subtask 1 is GHHH (F1 = 0.790), while the best system in subtask 2 is LexNet (F1 = 0.445). The dataset and the task description are available at .", "phrases": ["cogalex-v", "identification", "meronymy"], "overall_score": 1.6879554259898537, "scores": [1.7497079351337386, 0.8254006205851158, 0.5712484667219885], "rank_score": 1.0487856741469475} -{"id": "anastasopoulos-etal-2019-neural", "title": "Neural Machine Translation of Text from Non-Native Speakers", "abstract": "Neural Machine Translation (NMT) systems are known to degrade when confronted with noisy data, especially when the system is trained only on clean data. In this paper, we show that augmenting training data with sentences containing artificially-introduced grammatical errors can make the system more robust to such errors. In combination with an automatic grammar error correction system, we can recover 1.0 BLEU out of 2.4 BLEU lost due to grammatical errors. We also present a set of Spanish translations of the JFLEG grammar error correction corpus, which allows for testing NMT robustness to real grammatical errors.", "phrases": ["grammatical error", "spanish translation", "neural machine translation", "noise"], "overall_score": 2.4144836548592403, "scores": [2.021658155955998, 1.0637049988457672, 0.56019556187365, 0.5488289951284383], "rank_score": 1.0485969279509635} -{"id": "bittar-etal-2011-french", "title": "French TimeBank: An ISO-TimeML Annotated Reference Corpus", "abstract": "This article presents the main points in the creation of the French TimeBank (Bittar, 2010), a reference corpus annotated according to the ISO-TimeML standard for temporal annotation. A number of improvements were made to the markup language to deal with linguistic phenomena not yet covered by ISO-TimeML, including cross-language modifications and others specific to French. An automatic pre-annotation system was used to speed up the annotation process. A preliminary evaluation of the methodology adopted for this project yields positive results in terms of data quality and annotation time.", "phrases": ["markup language", "project", "french timebank", "linguistic phenomenon"], "overall_score": 2.0393958127198366, "scores": [2.539767477844438, 0.5737057878892992, 0.5449352412911478, 0.5337600033452539], "rank_score": 1.0480421275925347} -{"id": "susanto-etal-2014-system", "title": "System Combination for Grammatical Error Correction", "abstract": "Different approaches to high-quality grammatical error correction have been proposed recently, many of which have their own strengths and weaknesses. Most of these approaches are based on classification or statistical machine translation (SMT). In this paper, we propose to combine the output from a classification-based system and an SMT-based system to improve the correction quality. We adopt the system combination technique of Heafield and Lavie (2010). We achieve an F0.5 score of 39.39% on the test set of the CoNLL-2014 shared task, outperforming the best system in the shared task.", "phrases": ["grammatical error correction", "classification-based system", "gec system combination"], "overall_score": 2.6875667050910876, "scores": [2.0535928723825383, 0.5487665271637292, 0.5410555344829585], "rank_score": 1.047804978009742} -{"id": "davidov-rappoport-2006-efficient", "title": "Efficient Unsupervised Discovery of Word Categories Using Symmetric Patterns and High Frequency Words", "abstract": "We present a novel approach for discovering word categories, sets of words sharing a significant aspect of their meaning. We utilize meta-patterns of high-frequency words and content words in order to discover pattern candidates. Symmetric patterns are then identified using graph-based measures, and word categories are created based on graph clique sets. Our method is the first pattern-based method that requires no corpus annotation or manually provided seed patterns or words. We evaluate our algorithm on very large corpora in two languages, using both human judgments and WordNet-based evaluation. Our fully unsupervised results are superior to previous work that used a POS tagged corpus, and computation time for huge corpora are orders of magnitude faster than previously reported.", "phrases": ["discovery", "symmetric pattern", "high frequency word"], "overall_score": 2.687303466577222, "scores": [0.879961553928, 1.3211039683695, 0.9420415243501298], "rank_score": 1.0477023488825432} -{"id": "hwa-2004-sample", "title": "Sample Selection for Statistical Parsing", "abstract": "Corpus-based statistical parsing relies on using large quantities of annotated text as training examples. Building this kind of resource is expensive and labor-intensive. This work proposes to use sample selection to find helpful training examples and reduce human effort spent on annotating less informative ones. We consider several criteria for predicting whether unlabeled data might be a helpful training example. Experiments are performed across two syntactic learning tasks and within the single task of parsing across two learning models to compare the effect of different predictive criteria. We find that sample selection can significantly reduce the size of annotated training corpora and that uncertainty is a robust predictive criterion that can be easily applied to different learning models.", "phrases": ["unlabeled data", "uncertainty", "sample selection", "active learning", "tree entropy"], "overall_score": 2.7648878678518325, "scores": [2.00139999831407, 1.5077042948356225, 0.6090258857502946, 0.5645165106547853, 0.5557538499570353], "rank_score": 1.0476801079023617} -{"id": "tillmann-ney-2003-word", "title": "Word Reordering and a Dynamic Programming Beam Search Algorithm for Statistical Machine Translation", "abstract": "In this article, we describe an efficient beam search algorithm for statistical machine translation based on dynamic programming (DP). The search algorithm uses the translation model presented in Brown et al. (1993). Starting from a DP-based solution to the traveling-salesman problem, we present a novel technique to restrict the possible word reorderings between source and target language in order to achieve an efficient search algorithm. Word reordering restrictions especially useful for the translation direction German to English are presented. The restrictions are generalized, and a set of four parameters to control the word reordering is introduced, which then can easily be adopted to new translation directions. The beam search procedure has been successfully tested on the Verbmobil task (German to English, 8,000-word vocabulary) and on the Canadian Hansards task (French to English, 100,000-word vocabulary). For the medium-sized Verbmobil task, a sentence can be translated in a few seconds, only a small number of search errors occur, and there is no performance degradation as measured by the word error criterion used in this article.", "phrases": ["beam search algorithm", "statistical machine translation", "cost", "tsp"], "overall_score": 2.6871487119273816, "scores": [2.5444953773215957, 0.5808821671683919, 0.5362977230967736, 0.5288927903912449], "rank_score": 1.0476420144945016} -{"id": "roesiger-2018-rule", "title": "Rule- and Learning-based Methods for Bridging Resolution in the ARRAU Corpus", "abstract": "We present two systems for bridging resolution, which we submitted to the CRAC shared task on bridging anaphora resolution in the ARRAU corpus (track 2): a rule-based approach following Hou et al. 2014 and a learning-based approach. The re-implementation of Hou et al. 2014 achieves very poor performance when being applied to ARRAU. We found that the reasons for this lie in the different bridging annotations: whereas the rule-based system suggests many referential bridging pairs, ARRAU contains mostly lexical bridging. We describe the differences between these two types of bridging and adapt the rule-based approach to be able to handle lexical bridging. The modified rule-based approach achieves reasonable performance on all (sub)-tasks and outperforms a simple learning-based approach.", "phrases": ["bridging resolution", "arrau corpus", "anaphora resolution"], "overall_score": 2.1777658699664593, "scores": [1.7229902011337335, 0.849083663761117, 0.5697781559229996], "rank_score": 1.0472840069392835} -{"id": "augenstein-sogaard-2017-multi", "title": "Multi-Task Learning of Keyphrase Boundary Classification", "abstract": "Keyphrase boundary classification (KBC) is the task of detecting keyphrases in scientific articles and labelling them with respect to predefined types. Although important in practice, this task is so far underexplored, partly due to the lack of labelled data. To overcome this, we explore several auxiliary tasks, including semantic super-sense tagging and identification of multi-word expressions, and cast the task as a multi-task learning problem with deep recurrent neural networks. Our multi-task models perform significantly better than previous state of the art approaches on two scientific KBC datasets, particularly for long keyphrases.", "phrases": ["keyphrase boundary classification", "super-sense tagging", "recurrent neural network", "multi-task learning"], "overall_score": 2.3009253686853675, "scores": [2.138175971369819, 0.9607987566207512, 0.5639915235995208, 0.5258188046463882], "rank_score": 1.0471962640591197} -{"id": "khanuja-etal-2020-gluecos", "title": "GLUECoS: An Evaluation Benchmark for Code-Switched NLP", "abstract": "Code-switching is the use of more than one language in the same conversation or utterance. Recently, multilingual contextual embedding models, trained on multiple monolingual corpora, have shown promising results on cross-lingual and multilingual tasks. We present an evaluation benchmark, GLUECoS, for code-switched languages, that spans several NLP tasks in English-Hindi and English-Spanish. Specifically, our evaluation benchmark includes Language Identification from text, POS tagging, Named Entity Recognition, Sentiment Analysis, Question Answering and a new task for code-switching, Natural Language Inference. We present results on all these tasks using cross-lingual word embedding models and multilingual models. In addition, we fine-tune multilingual models on artificially generated code-switched data. Although multilingual models perform significantly better than cross-lingual models, our results show that in most tasks, across both language pairs, multilingual models fine-tuned on code-switched data perform best, showing that multilingual models can be further optimized for code-switching tasks.", "phrases": ["evaluation benchmark", "gluecos", "code-mixed version"], "overall_score": 2.685819894922048, "scores": [1.6985465397546486, 0.8811283597112871, 0.5616969413958275], "rank_score": 1.047123946953921} -{"id": "xu-etal-2018-skeleton", "title": "A Skeleton-Based Model for Promoting Coherence Among Sentences in Narrative Story Generation", "abstract": "Narrative story generation is a challenging problem because it demands the generated sentences with tight semantic connections, which has not been well studied by most existing generative models. To address this problem, we propose a skeleton-based model to promote the coherence of generated stories. Different from traditional models that generate a complete sentence at a stroke, the proposed model first generates the most critical phrases, called skeleton, and then expands the skeleton to a complete and fluent sentence. The skeleton is not manually defined, but learned by a reinforcement learning method. Compared to the state-of-the-art models, our skeleton-based model can generate significantly more coherent text according to human evaluation and automatic evaluation. The G-score is improved by 20.1% in human evaluation.", "phrases": ["coherence", "narrative story generation", "fluent sentence"], "overall_score": 2.763100869936652, "scores": [1.6942671342627578, 0.919308315310459, 0.5274334693267883], "rank_score": 1.0470029729666683} -{"id": "wei-etal-2021-towards", "title": "Towards Propagation Uncertainty: Edge-enhanced Bayesian Graph Convolutional Networks for Rumor Detection", "abstract": "Detecting rumors on social media is a very critical task with significant implications to the economy, public health, etc. Previous works generally capture effective features from texts and the propagation structure. However, the uncertainty caused by unreliable relations in the propagation structure is common and inevitable due to wily rumor producers and the limited collection of spread data. Most approaches neglect it and may seriously limit the learning of features. Towards this issue, this paper makes the first attempt to explore propagation uncertainty for rumor detection. Specifically, we propose a novel Edge-enhanced Bayesian Graph Convolutional Network (EBGCN) to capture robust structural features. The model adaptively rethinks the reliability of latent relations by adopting a Bayesian approach. Besides, we design a new edge-wise consistency training framework to optimize the model by enforcing consistency on relations. Experiments on three public benchmark datasets demonstrate that the proposed model achieves better performance than baseline methods on both rumor detection and early rumor detection tasks.", "phrases": ["propagation uncertainty", "rumor detection", "robust structural feature"], "overall_score": 1.4513762782726225, "scores": [1.7254114781790901, 0.8834938446546816, 0.5319347158579175], "rank_score": 1.0469466795638964} -{"id": "ma-etal-2017-blend", "title": "Blend: a Novel Combined MT Metric Based on Direct Assessment \u2014 CASICT-DCU submission to WMT17 Metrics Task", "abstract": "Existing metrics to evaluate the quality of Machine Translation hypotheses take different perspectives into account. DPM-Fcomb, a metric combining the merits of a range of metrics, achieved the best performance for evaluation of to-English language pairs in the previous two years of WMT Metrics Shared Tasks. This year, we submit a novel combined metric, Blend, to WMT17 Metrics task. Compared to DPMFcomb, Blend includes the following adaptations: i) We use DA human evaluation to guide the training process with a vast reduction in required training data, while still achieving improved performance when evaluated on WMT16 to-English language pairs; ii) We carry out experiments to explore the contribution of metrics incorporated in Blend, in order to \ufb01nd a trade-off between performance and ef\ufb01ciency.", "phrases": ["wmt17 metrics task", "blend", "regression"], "overall_score": 2.0371143062143, "scores": [1.6657466699225678, 0.9544563370649638, 0.520405988556169], "rank_score": 1.0468696651812335} -{"id": "narasimhan-etal-2015-language", "title": "Language Understanding for Text-based Games using Deep Reinforcement Learning", "abstract": "In this paper, we consider the task of learning control policies for text-based games. In these games, all interactions in the virtual world are through text and the underlying state is not observed. The resulting language barrier makes such environments challenging for automatic game players. We employ a deep reinforcement learning framework to jointly learn state representations and action policies using game rewards as feedback. This framework enables us to map text descriptions into vector representations that capture the semantics of the game states. We evaluate our approach on two game worlds, comparing against baselines using bag-ofwords and bag-of-bigrams for state representations. Our algorithm outperforms the baselines on both worlds demonstrating the importance of learning expressive representations. 1", "phrases": ["text-based games", "deep reinforcement learning", "language representation learning"], "overall_score": 3.282354324769728, "scores": [1.7445629294225045, 0.8439410877219315, 0.5520098209240477], "rank_score": 1.0468379460228279} -{"id": "batliner-etal-2004-stupid", "title": "\u201cYou Stupid Tin Box\u201d - Children Interacting with the AIBO Robot: A Cross-linguistic Emotional Speech Corpus", "abstract": "This paper deals with databases that combine different aspects: children's speech, emotional speech, human-robot communication, cross-linguistics, and read vs. spontaneous speech: in a Wizard-of-Oz scenario, German and English children had to instruct Sony's AIBO robot to fulfil specific tasks. In one experimental condition, strictly parallel for German and English, the AIBO behaved `disobedient' by following it's own script irrespective of the child's commands. By that, reactions of different children to the same sequence of AIBO's actions could be obtained. In addition, both the German and the English children were recorded reading texts. The data are transliterated orthographically; emotional user states and some other phenomena will be annotated. We report preliminary word recognition rates and classification results.", "phrases": ["aibo robot", "emotional speech corpus", "english child"], "overall_score": 1.4511253355344738, "scores": [1.687747706995457, 0.9067144513697106, 0.54583482956071], "rank_score": 1.0467656626419592} -{"id": "wan-yang-2006-improved", "title": "Improved Affinity Graph Based Multi-Document Summarization", "abstract": "This paper describes an affinity graph based approach to multi-document summarization. We incorporate a diffusion process to acquire semantic relationships between sentences, and then compute information richness of sentences by a graph rank algorithm on differentiated intra-document links and inter-document links between sentences. A greedy algorithm is employed to impose diversity penalty on sentences and the sentences with both high information richness and high information novelty are chosen into the summary. Experimental results on task 2 of DUC 2002 and task 2 of DUC 2004 demonstrate that the proposed approach outperforms existing state-of-the-art systems.", "phrases": ["affinity graph", "summarization", "inter-document link"], "overall_score": 2.2998177471623746, "scores": [1.813387207111124, 0.7935012428564254, 0.5331880413720831], "rank_score": 1.0466921637798776} -{"id": "callison-burch-etal-2008-meta", "title": "Further Meta-Evaluation of Machine Translation", "abstract": "This paper analyzes the translation quality of machine translation systems for 10 language pairs translating between Czech, English, French, German, Hungarian, and Spanish. We report the translation quality of over 30 diverse translation systems based on a large-scale manual evaluation involving hundreds of hours of effort. We use the human judgments of the systems to analyze automatic evaluation metrics for translation quality, and we report the strength of the correlation with human judgments at both the system-level and at the sentence-level. We validate our manual evaluation methodology by measuring intra- and inter-annotator agreement, and collecting timing information.", "phrases": ["translation quality", "human judgment", "evaluation metric", "improved correlation", "wmt"], "overall_score": 3.186116662912876, "scores": [1.4502762698953404, 1.2485677648185545, 1.092676087441735, 0.876922098054848, 0.5640970828917378], "rank_score": 1.0465078606204432} -{"id": "yimam-etal-2020-exploring", "title": "Exploring Amharic Sentiment Analysis from Social Media Texts: Building Annotation Tools and Classification Models", "abstract": "This paper presents the study of sentiment analysis for Amharic social media texts. As the number of social media users is ever-increasing, social media platforms would like to understand the latent meaning and sentiments of a text to enhance decision-making procedures. However, low-resource languages such as Amharic have received less attention due to several reasons such as lack of well-annotated datasets, unavailability of computing resources, and fewer or no expert researchers in the area. This research addresses three main research questions. We first explore the suitability of existing tools for the sentiment analysis task. Annotation tools are scarce to support large-scale annotation tasks in Amharic. Also, the existing crowdsourcing platforms do not support Amharic text annotation. Hence, we build a social-network-friendly annotation tool called `ASAB' using the Telegram bot. We collect 9.4k tweets, where each tweet is annotated by three Telegram users. Moreover, we explore the suitability of machine learning approaches for Amharic sentiment analysis. The FLAIR deep learning text classifier, based on network embeddings that are computed from a distributional thesaurus, outperforms other supervised classifiers. We further investigate the challenges in building a sentiment analysis system for Amharic and we found that the widespread usage of sarcasm and figurative speech are the main issues in dealing with the problem. To advance the sentiment analysis research in Amharic and other related low-resource languages, we release the dataset, the annotation tool, source code, and models publicly under a permissive.", "phrases": ["amharic sentiment analysis", "low-resource language", "social medium text"], "overall_score": 1.450504284977925, "scores": [2.0561745812961645, 0.5523119559940995, 0.5304664707985024], "rank_score": 1.0463176693629221} -{"id": "park-fung-2017-one", "title": "One-step and Two-step Classification for Abusive Language Detection on Twitter", "abstract": "Automatic abusive language detection is a difficult but important task for online social media. Our research explores a two-step approach of performing classification on abusive language and then classifying into specific types and compares it with one-step approach of doing one multi-class classification for detecting sexist and racist languages. With a public English Twitter corpus of 20 thousand tweets in the type of sexism and racism, our approach shows a promising performance of 0.827 F-measure by using HybridCNN in one-step and 0.824 F-measure by using logistic regression in two-steps.", "phrases": ["abusive language detection", "twitter", "text classification"], "overall_score": 3.68924279898341, "scores": [1.7242970326493456, 0.8723642595533667, 0.5419084370381547], "rank_score": 1.0461899097469558} -{"id": "diao-etal-2012-finding", "title": "Finding Bursty Topics from Microblogs", "abstract": "Microblogs such as Twitter reflect the general public's reactions to major events. Bursty topics from microblogs reveal what events have attracted the most online attention. Although bursty event detection from text streams has been studied before, previous work may not be suitable for microblogs because compared with other text streams such as news articles and scientific publications, microblog posts are particularly diverse and noisy. To find topics that have bursty patterns on microblogs, we propose a topic model that simultaneously captures two observations: (1) posts published around the same time are more likely to have the same topic, and (2) posts published by the same user are more likely to have the same topic. The former helps find event-driven posts while the latter helps identify and filter out \"personal\" posts. Our experiments on a large Twitter dataset show that there are more meaningful and unique bursty topics in the top-ranked results returned by our model than an LDA baseline and two degenerate variations of our model. We also show some case studies that demonstrate the importance of considering both the temporal information and users' personal interests for bursty topic detection from microblogs.", "phrases": ["microblog", "event detection", "bursty topic detection"], "overall_score": 2.408858788611284, "scores": [1.9747842858858051, 0.641161522114031, 0.5225164307342612], "rank_score": 1.0461540795780324} -{"id": "chen-etal-2013-exploiting", "title": "Exploiting Domain Knowledge in Aspect Extraction", "abstract": "Aspect extraction is one of the key tasks in sentiment analysis. In recent years, statistical models have been used for the task. However, such models without any domain knowledge often produce aspects that are not interpretable in applications. To tackle the issue, some knowledge-based topic models have been proposed, which allow the user to input some prior domain knowledge to generate coherent aspects. However, existing knowledge-based topic models have several major shortcomings, e.g., little work has been done to incorporate the cannot-link type of knowledge or to automatically adjust the number of topics based on domain knowledge. This paper proposes a more advanced topic model, called MC-LDA (LDA with m-set and c-set), to address these problems, which is based on an Extended generalized Polya urn (E-GPU) model (which is also proposed in this paper). Experiments on real-life product reviews from a variety of domains show that MCLDA outperforms the existing state-of-the-art models markedly.", "phrases": ["domain knowledge", "aspect extraction", "sentiment analysis", "topic model"], "overall_score": 2.175336193167495, "scores": [1.785420985764008, 0.9869867775760289, 0.8720849790029371, 0.5399695751890555], "rank_score": 1.0461155793830073} -{"id": "contractor-etal-2010-unsupervised", "title": "Unsupervised cleansing of noisy text", "abstract": "In this paper we look at the problem of cleansing noisy text using a statistical machine translation model. Noisy text is produced in informal communications such as Short Message Service (SMS), Twitter and chat. A typical Statistical Machine Translation system is trained on parallel text comprising noisy and clean sentences. In this paper we propose an unsupervised method for the translation of noisy text to clean text. Our method has two steps. For a given noisy sentence, a weighted list of possible clean tokens for each noisy token are obtained. The clean sentence is then obtained by maximizing the product of the weighted lists and the language model scores.", "phrases": ["noisy text", "language model", "candidate"], "overall_score": 2.6831664539136697, "scores": [2.041819137723431, 0.5592080970426219, 0.5372411050452987], "rank_score": 1.0460894466037838} -{"id": "zaidan-etal-2007-using", "title": "Using \u201cAnnotator Rationales\u201d to Improve Machine Learning for Text Categorization", "abstract": "We propose a new framework for supervised machine learning. Our goal is to learn from smaller amounts of supervised training data, by collecting a richer kind of training data: annotations with \u201crationales.\u201d When annotating an example, the human teacher will also highlight evidence supporting this annotation\u2014thereby teaching the machine learner why the example belongs to the category. We provide some rationale-annotated data and present a learning method that exploits the rationales during training to boost performance significantly on a sample task, namely sentiment classification of movie reviews. We hypothesize that in some situations, providing rationales is a more fruitful use of an annotator\u2019s time than annotating more examples.", "phrases": ["annotator rationales", "machine learning", "separate word"], "overall_score": 3.8048445952363044, "scores": [1.7155309705303508, 0.8790478979719407, 0.5433629655868003], "rank_score": 1.0459806113630308} -{"id": "das-etal-2017-chains", "title": "Chains of Reasoning over Entities, Relations, and Text using Recurrent Neural Networks", "abstract": "Our goal is to combine the rich multi-step inference of symbolic logical reasoning with the generalization capabilities of neural networks. We are particularly interested in complex reasoning about entities and relations in text and large-scale knowledge bases (KBs). Neelakantan et al. (2015) use RNNs to compose the distributed semantics of multi-hop paths in KBs; however for multiple reasons, the approach lacks accuracy and practicality. This paper proposes three significant modeling advances: (1) we learn to jointly reason about relations, entities, and entity-types; (2) we use neural attention modeling to incorporate multiple paths; (3) we learn to share strength in a single RNN that represents logical composition across all relations. On a large-scale Freebase+ClueWeb prediction task, we achieve 25% error reduction, and a 53% error reduction on sparse relations due to shared strength. On chains of reasoning in WordNet we reduce error in mean quantile by 84% versus previous state-of-the-art.", "phrases": ["reasoning", "chain", "knowledge base completion"], "overall_score": 2.6823958144284243, "scores": [1.805614184869886, 0.7947385175741032, 0.5370142868401045], "rank_score": 1.0457889964280314} -{"id": "ahlberg-etal-2015-paradigm", "title": "Paradigm classification in supervised learning of morphology", "abstract": "Supervised morphological paradigm learning by identifying and aligning the longest common subsequence found in inflection tables has recently been proposed as a simple yet competitive way to induce morphological patterns. We combine this non-probabilistic strategy of inflection table generalization with a discriminative classifier to permit the reconstruction of complete inflection tables of unseen words. Our system learns morphological paradigms from labeled examples of inflection patterns (inflection tables) and then produces inflection tables from unseen lemmas or base forms. We evaluate the approach on datasets covering 11 different languages and show that this approach results in consistently higher accuracies vis-` other methods on the same task, thus indicating that the general method is a viable approach to quickly creating highaccuracy morphological resources.", "phrases": ["supervised learning", "morphology", "inflection"], "overall_score": 3.232559878310027, "scores": [0.9309338018538403, 0.9192718136921914, 1.2871436060390264], "rank_score": 1.045783073861686} -{"id": "dasgupta-etal-2018-hyte", "title": "HyTE: Hyperplane-based Temporally aware Knowledge Graph Embedding", "abstract": "Knowledge Graph (KG) embedding has emerged as an active area of research resulting in the development of several KG embedding methods. Relational facts in KG often show temporal dynamics, e.g., the fact (Cristiano_Ronaldo, playsFor, Manchester_United) is valid only from 2003 to 2009. Most of the existing KG embedding methods ignore this temporal dimension while learning embeddings of the KG elements. In this paper, we propose HyTE, a temporally aware KG embedding method which explicitly incorporates time in the entity-relation space by associating each timestamp with a corresponding hyperplane. HyTE not only performs KG inference using temporal guidance, but also predicts temporal scopes for relational facts with missing time annotations. Through extensive experimentation on temporal datasets extracted from real-world KGs, we demonstrate the effectiveness of our model over both traditional as well as temporal KG embedding methods.", "phrases": ["knowledge graph", "temporal hyperplane", "link prediction"], "overall_score": 3.0218504276143534, "scores": [1.768282409429977, 0.8299431942227352, 0.5382398901291908], "rank_score": 1.045488497927301} -{"id": "saha-etal-2021-multiprover", "title": "multiPRover: Generating Multiple Proofs for Improved Interpretability in Rule Reasoning", "abstract": "We focus on a type of linguistic formal reasoning where the goal is to reason over explicit knowledge in the form of natural language facts and rules (Clark et al., 2020). A recent work, named PRover (Saha et al., 2020), performs such reasoning by answering a question and also generating a proof graph that explains the answer. However, compositional reasoning is not always unique and there may be multiple ways of reaching the correct answer. Thus, in our work, we address a new and challenging problem of generating multiple proof graphs for reasoning over natural language rule-bases. Each proof provides a different rationale for the answer, thereby improving the interpretability of such reasoning systems. In order to jointly learn from all proof graphs and exploit the correlations between multiple proofs for a question, we pose this task as a set generation problem over structured output spaces where each proof is represented as a directed graph. We propose two variants of a proof-set generation model, multiPRover. Our first model, Multilabel-multiPRover, generates a set of proofs via multi-label classification and implicit conditioning between the proofs; while the second model, Iterative-multiPRover, generates proofs iteratively by explicitly conditioning on the previously generated proofs. Experiments on multiple synthetic, zero-shot, and human-paraphrased datasets reveal that both multiPRover models significantly outperform PRover on datasets containing multiple gold proofs. Iterative-multiPRover obtains state-of-the-art proof F1 in zero-shot scenarios where all examples have single correct proofs. It also generalizes better to questions requiring higher depths of reasoning where multiple proofs are more frequent.", "phrases": ["proof", "interpretability", "multiprover"], "overall_score": 1.6824022381335766, "scores": [1.7162440738142088, 0.867870284425537, 0.5518914953612652], "rank_score": 1.0453352845336703} -{"id": "caswell-etal-2019-tagged", "title": "Tagged Back-Translation", "abstract": "Recent work in Neural Machine Translation (NMT) has shown significant quality gains from noised-beam decoding during back-translation, a method to generate synthetic parallel data. We show that the main role of such synthetic noise is not to diversify the source side, as previously suggested, but simply to indicate to the model that the given source is synthetic. We propose a simpler alternative to noising techniques, consisting of tagging back-translated source sentences with an extra token. Our results on WMT outperform noised back-translation in English-Romanian and match performance on English-German, redefining the state-of-the-art on the former.", "phrases": ["noise", "tagged back-translation", "synthetic data", "nmt system", "monolingual sentence"], "overall_score": 3.405751173347471, "scores": [1.0397207088677307, 1.2623721841594737, 1.2091083125978168, 1.1283161270479076, 0.5870791380007939], "rank_score": 1.0453192941347447} -{"id": "scialom-etal-2019-answers", "title": "Answers Unite! Unsupervised Metrics for Reinforced Summarization Models", "abstract": "Abstractive summarization approaches based on Reinforcement Learning (RL) have recently been proposed to overcome classical likelihood maximization. RL enables to consider complex, possibly non differentiable, metrics that globally assess the quality and relevance of the generated outputs. ROUGE, the most used summarization metric, is known to suffer from bias towards lexical similarity as well as from sub-optimal accounting for fluency and readability of the generated abstracts. We thus explore and propose alternative evaluation measures: the reported human-evaluation analysis shows that the proposed metrics, based on Question Answering, favorably compare to ROUGE \u2013 with the additional property of not requiring reference summaries. Training a RL-based model on these metrics leads to improvements (both in terms of human or automated metrics) over current approaches that use ROUGE as reward.", "phrases": ["summarization", "reward", "source document", "evaluation metric"], "overall_score": 3.2770548547445686, "scores": [1.3030429079299406, 1.125578691594401, 0.8971921080103094, 0.8547774580892592], "rank_score": 1.0451477914059777} -{"id": "han-etal-2018-hierarchical", "title": "Hierarchical Relation Extraction with Coarse-to-Fine Grained Attention", "abstract": "Distantly supervised relation extraction employs existing knowledge graphs to automatically collect training data. While distant supervision is effective to scale relation extraction up to large-scale corpora, it inevitably suffers from the wrong labeling problem. Many efforts have been devoted to identifying valid instances from noisy data. However, most existing methods handle each relation in isolation, regardless of rich semantic correlations located in relation hierarchies. In this paper, we aim to incorporate the hierarchical information of relations for distantly supervised relation extraction and propose a novel hierarchical attention scheme. The multiple layers of our hierarchical attention scheme provide coarse-to-fine granularity to better identify valid instances, which is especially effective for extracting those long-tail relations. The experimental results on a large-scale benchmark dataset demonstrate that our models are capable of modeling the hierarchical information of relations and significantly outperform other baselines. The source code of this paper can be obtained from .", "phrases": ["relation extraction", "hierarchy", "attention scheme", "long-tail relation", "deep neural network"], "overall_score": 3.020594335945723, "scores": [1.5669567311127686, 1.488810452887563, 1.0819510980933285, 0.5564191007293551, 0.53113221734832], "rank_score": 1.0450539200342672} -{"id": "zhang-barzilay-2015-hierarchical", "title": "Hierarchical Low-Rank Tensors for Multilingual Transfer Parsing", "abstract": "Accurate multilingual transfer parsing typically relies on careful feature engineering. In this paper, we propose a hierarchical tensor-based approach for this task. This approach induces a compact feature representation by combining atomic features. However, unlike traditional tensor models, it enables us to incorporate prior knowledge about desired feature interactions, eliminating invalid feature combinations. To this end, we use a hierarchical structure that uses intermediate embeddings to capture desired feature combinations. Algebraically, this hierarchical tensor is equivalent to the sum of traditional tensors with shared components, and thus can be effectively trained with standard online algorithms. In both unsupervised and semi-supervised transfer scenarios, our hierarchical tensor consistently improves UAS and LAS over state-of-theart multilingual transfer parsers and the traditional tensor model across 10 different languages. 1", "phrases": ["tensor", "multilingual transfer", "dependency parsing", "linguistic typology"], "overall_score": 3.1306694641093187, "scores": [1.860831428843613, 0.8581144987703269, 0.9243383588045814, 0.5368882767261078], "rank_score": 1.0450431407861573} -{"id": "lei-etal-2021-mtvr", "title": "mTVR: Multilingual Moment Retrieval in Videos", "abstract": "We introduce mTVR, a large-scale multilingual video moment retrieval dataset, containing 218K English and Chinese queries from 21.8K TV show video clips. The dataset is collected by extending the popular TVR dataset (in English) with paired Chinese queries and subtitles. Compared to existing moment retrieval datasets, mTVR is multilingual, larger, and comes with diverse annotations. We further propose mXML, a multilingual moment retrieval model that learns and operates on data from both languages, via encoder parameter sharing and language neighborhood constraints. We demonstrate the effectiveness of mXML on the newly collected mTVR dataset, where mXML outperforms strong monolingual baselines while using fewer parameters. In addition, we also provide detailed dataset analyses and model ablations. Data and code are publicly available at ", "phrases": ["multilingual moment retrieval", "query", "mtvr"], "overall_score": 1.1480887793138588, "scores": [1.6960739245114456, 0.9172781064574099, 0.5217542967759341], "rank_score": 1.0450354425815964} -{"id": "sperber-etal-2017-toward", "title": "Toward Robust Neural Machine Translation for Noisy Input Sequences", "abstract": "Translating noisy inputs, such as the output of a speech recognizer, is a difficult but important challenge for neural machine translation. One way to increase robustness of neural models is by introducing artificial noise to the training data. In this paper, we experiment with appropriate forms of such noise, exploring a middle ground between general-purpose regularizers and highly task-specific forms of noise induction. We show that with a simple generative noise model, moderate gains can be achieved in translating erroneous speech transcripts, provided that type and amount of noise are properly calibrated. The optimal amount of noise at training time is much smaller than the amount of noise in our test data, indicating limitations due to trainability issues. We note that unlike our baseline model, models trained on noisy data are able to generate outputs of proper length even for noisy inputs, while gradually reducing output length for higher amount of noise, as might also be expected from a human translator. We discuss these findings in details and give suggestions for future work.", "phrases": ["speech recognizer", "noise", "insertion"], "overall_score": 2.829141381369119, "scores": [1.332838218848653, 1.2082934833896684, 0.5930140904256657], "rank_score": 1.044715264221329} -{"id": "finkel-etal-2007-infinite", "title": "The Infinite Tree", "abstract": "Historically, unsupervised learning techniques have lacked a principled technique for selecting the number of unseen components. Research into non-parametric priors, such as the Dirichlet process, has enabled instead the use of infinite models, in which the number of hidden categories is not fixed, but can grow with the amount of training data. Here we develop the infinite tree, a new infinite model capable of representing recursive branching structure over an arbitrarily large set of hidden categories. Specifically, we develop three infinite tree models, each of which enforces different independence assumptions, and for each model we define a simple direct assignmentsampling inference procedure. We demonstrate the utility of our models by doing unsupervised learning of part-of-speech tags from treebank dependency skeleton structure, achieving an accuracy of 75.34%, and by doing unsupervised splitting of part-of-speech tags, which increases the accuracy of a generative dependency parser from 85.11% to 87.35%.", "phrases": ["infinite tree", "dirichlet process", "pos tag"], "overall_score": 2.4053636995663874, "scores": [2.0368655254904384, 0.5513594847914501, 0.5456835347943311], "rank_score": 1.0446361816920733} -{"id": "imamura-etal-2012-grammar", "title": "Grammar Error Correction Using Pseudo-Error Sentences and Domain Adaptation", "abstract": "This paper presents grammar error correction for Japanese particles that uses discriminative sequence conversion, which corrects erroneous particles by substitution, insertion, and deletion. The error correction task is hindered by the difficulty of collecting large error corpora. We tackle this problem by using pseudo-error sentences generated automatically. Furthermore, we apply domain adaptation, the pseudo-error sentences are from the source domain, and the real-error sentences are from the target domain. Experiments show that stable improvement is achieved by using domain adaptation.", "phrases": ["domain adaptation", "particle", "grammar error correction", "learner"], "overall_score": 2.595271480744864, "scores": [1.8543880921195484, 0.9340716396089206, 0.8530751777433296, 0.5361213915667611], "rank_score": 1.0444140752596398} -{"id": "wang-etal-2021-x", "title": "X-Class: Text Classification with Extremely Weak Supervision", "abstract": "In this paper, we explore text classification with extremely weak supervision, i.e., only relying on the surface text of class names. This is a more challenging setting than the seed-driven weak supervision, which allows a few seed words per class. We opt to attack this problem from a representation learning perspective\u2014ideal document representations should lead to nearly the same results between clustering and the desired classification. In particular, one can classify the same corpus differently (e.g., based on topics and locations), so document representations should be adaptive to the given class names. We propose a novel framework X-Class to realize the adaptive representations. Specifically, we first estimate class representations by incrementally adding the most similar word to each class until inconsistency arises. Following a tailored mixture of class attention mechanisms, we obtain the document representation via a weighted average of contextualized word representations. With the prior of each document assigned to its nearest class, we then cluster and align the documents to classes. Finally, we pick the most confident documents from each cluster to train a text classifier. Extensive experiments demonstrate that X-Class can rival and even outperform seed-driven weakly supervised methods on 7 benchmark datasets.", "phrases": ["text classification", "weak supervision", "x-class"], "overall_score": 2.031919304006812, "scores": [0.9465493676708346, 0.9179814907822605, 1.2680690280215001], "rank_score": 1.0441999621581983} -{"id": "gui-etal-2019-lexicon", "title": "A Lexicon-Based Graph Neural Network for Chinese NER", "abstract": "Recurrent neural networks (RNN) used for Chinese named entity recognition (NER) that sequentially track character and word information have achieved great success. However, the characteristic of chain structure and the lack of global semantics determine that RNN-based models are vulnerable to word ambiguities. In this work, we try to alleviate this problem by introducing a lexicon-based graph neural network with global semantics, in which lexicon knowledge is used to connect characters to capture the local composition, while a global relay node can capture global sentence semantics and long-range dependency. Based on the multiple graph-based interactions among characters, potential words, and the whole-sentence semantics, word ambiguities can be effectively tackled. Experiments on four NER datasets show that the proposed model achieves significant improvements against other baseline models.", "phrases": ["graph neural network", "chinese ner", "lexicon knowledge"], "overall_score": 2.8950570264189825, "scores": [1.6666160475691107, 0.9052305893191183, 0.5606666744408318], "rank_score": 1.0441711037763537} -{"id": "zens-ney-2003-comparative", "title": "A Comparative Study on Reordering Constraints in Statistical Machine Translation", "abstract": "In statistical machine translation, the generation of a translation hypothesis is computationally expensive. If arbitrary word-reorderings are permitted, the search problem is NP-hard. On the other hand, if we restrict the possible word-reorderings in an appropriate way, we obtain a polynomial-time search algorithm.In this paper, we compare two different reordering constraints, namely the ITG constraints and the IBM constraints. This comparison includes a theoretical discussion on the permitted number of reorderings for each of these constraints. We show a connection between the ITG constraints and the since 1870 known Schroder numbers.We evaluate these constraints on two tasks: the Verbmobil task and the Canadian Hansards task. The evaluation consists of two parts: First, we check how many of the Viterbi alignments of the training corpus satisfy each of these constraints. Second, we restrict the search to each of these constraints and compare the resulting translation hypotheses.The experiments will show that the baseline ITG constraints are not sufficient on the Canadian Hansards task. Therefore, we present an extension to the ITG constraints. These extended ITG constraints increase the alignment coverage from about 87% to 96%.", "phrases": ["comparative study", "statistical machine translation", "different reordering constraint", "alignment coverage", "normal form itg"], "overall_score": 3.4402682740857258, "scores": [1.9964364327570048, 1.2579224435073861, 0.8403710037306561, 0.5981757538985122, 0.5262062547608345], "rank_score": 1.0438223777308786} -{"id": "mi-etal-2020-continual", "title": "Continual Learning for Natural Language Generation in Task-oriented Dialog Systems", "abstract": "Natural language generation (NLG) is an essential component of task-oriented dialog systems. Despite the recent success of neural approaches for NLG, they are typically developed in an offline manner for particular domains. To better fit real-life applications where new data come in a stream, we study NLG in a \u201ccontinual learning\u201d setting to expand its knowledge to new domains or functionalities incrementally. The major challenge towards this goal is catastrophic forgetting, meaning that a continually trained model tends to forget the knowledge it has learned before. To this end, we propose a method called ARPER (Adaptively Regularized Prioritized Exemplar Replay) by replaying prioritized historical exemplars, together with an adaptive regularization technique based on Elastic Weight Consolidation. Extensive experiments to continually learn new domains and intents are conducted on MultiWoZ-2.0 to benchmark ARPER with a wide range of techniques. Empirical results demonstrate that ARPER significantly outperforms other methods by effectively mitigating the detrimental catastrophic forgetting issue.", "phrases": ["natural language generation", "dialog system", "continual learning"], "overall_score": 1.8694391625860638, "scores": [1.7370666897028637, 0.8541803184095786, 0.5388145788785988], "rank_score": 1.043353862330347} -{"id": "ueffing-ney-2007-word", "title": "Word-Level Confidence Estimation for Machine Translation", "abstract": "This article introduces and evaluates several different word-level confidence measures for machine translation. These measures provide a method for labeling each word in an automatically generated translation as correct or incorrect. All approaches to confidence estimation presented here are based on word posterior probabilities. Different concepts of word posterior probabilities as well as different ways of calculating them will be introduced and compared. They can be divided into two categories: System-based methods that explore knowledge provided by the translation system that generated the translations, and direct methods that are independent of the translation system. The system-based techniques make use of system output, such as word graphs or N-best lists. The word posterior probability is determined by summing the probabilities of the sentences in the translation hypothesis space that contains the target word. The direct confidence measures take other knowledge sources, such as word or phrase lexica, into account. They can be applied to output from nonstatistical machine translation systems as well. Experimental assessment of the different confidence measures on various translation tasks and in several language pairs will be presented. Moreover,the application of confidence measures for rescoring of translation hypotheses will be investigated.", "phrases": ["machine translation", "word-level confidence measure", "system-dependent", "n-b list", "smt system"], "overall_score": 2.824834907974201, "scores": [2.9024615462943486, 0.6520607772528764, 0.5632053444325555, 0.5562740858112801, 0.5416233236621903], "rank_score": 1.04312501549065} -{"id": "guillou-hardmeier-2016-protest", "title": "PROTEST: A Test Suite for Evaluating Pronouns in Machine Translation", "abstract": "We present PROTEST, a test suite for the evaluation of pronoun translation by MT systems. The test suite comprises 250 hand-selected pronoun tokens and an automatic evaluation method which compares the translations of pronouns in MT output with those in the reference translation. Pronoun translations that do not match the reference are referred for manual evaluation. PROTEST is designed to support analysis of system performance at the level of individual pronoun groups, rather than to provide a single aggregate measure over all pronouns. We wish to encourage detailed analyses to highlight issues in the handling of specific linguistic mechanisms by MT systems, thereby contributing to a better understanding of those problems involved in translating pronouns. We present two use cases for PROTEST: a) for measuring improvement/degradation of an incremental system change, and b) for comparing the performance of a group of systems whose design may be largely unrelated. Following the latter use case, we demonstrate the application of PROTEST to the evaluation of the systems submitted to the DiscoMT 2015 shared task on pronoun translation.", "phrases": ["machine translation", "evaluation method", "protest"], "overall_score": 2.2919714602379617, "scores": [1.7334054380217736, 0.8756094951088846, 0.5203485609960198], "rank_score": 1.0431211647088927} -{"id": "cherry-lin-2003-probability", "title": "A Probability Model to Improve Word Alignment", "abstract": "Word alignment plays a crucial role in statistical machine translation. Word-aligned corpora have been found to be an excellent source of translation-related knowledge. We present a statistical model for computing the probability of an alignment given a sentence pair. This model allows easy integration of context-specific features. Our experiments show that this model can be an effective tool for improving an existing word alignment.", "phrases": ["word alignment", "integration", "context-specific feature", "knowledge source", "many researcher"], "overall_score": 3.1756062402135043, "scores": [3.045524987858561, 0.5651616411236571, 0.551682525145988, 0.5291772840453371, 0.5237316640106875], "rank_score": 1.0430556204368462} -{"id": "kottur-etal-2017-natural", "title": "Natural Language Does Not Emerge `Naturally' in Multi-Agent Dialog", "abstract": "A number of recent works have proposed techniques for end-to-end learning of communication protocols among cooperative multi-agent populations, and have simultaneously found the emergence of grounded human-interpretable language in the protocols developed by the agents, learned without any human supervision! In this paper, using a Task & Talk reference game between two agents as a testbed, we present a sequence of `negative' results culminating in a `positive' one \u2013 showing that while most agent-invented languages are effective (i.e. achieve near-perfect task rewards), they are decidedly not interpretable or compositional. In essence, we find that natural language does not emerge `naturally',despite the semblance of ease of natural-language-emergence that one may gather from recent literature. We discuss how it is possible to coax the invented languages to become more and more human-like and compositional by increasing restrictions on how two agents may communicate.", "phrases": ["agent", "human language", "compositionality", "emergent communication"], "overall_score": 3.269631603464426, "scores": [1.661744413932414, 1.141712696072211, 0.8392621248501371, 0.5284019706685297], "rank_score": 1.042780301380823} -{"id": "ganesan-etal-2010-opinosis", "title": "Opinosis: A Graph Based Approach to Abstractive Summarization of Highly Redundant Opinions", "abstract": "We present a novel graph-based summarization framework (Opinosis) that generates concise abstractive summaries of highly redundant opinions. Evaluation results on summarizing user reviews show that Opinosis summaries have better agreement with human summaries compared to the baseline extractive method. The summaries are readable, reasonably well-formed and are informative enough to convey the major opinions.", "phrases": ["abstractive summarization", "redundancy", "graph-based method"], "overall_score": 3.5107490736769886, "scores": [0.9390723613115789, 1.0963819416074003, 1.0923514358103297], "rank_score": 1.0426019129097697} -{"id": "chan-etal-2020-poison", "title": "Poison Attacks against Text Datasets with Conditional Adversarially Regularized Autoencoder", "abstract": "This paper demonstrates a fatal vulnerability in natural language inference (NLI) and text classification systems. More concretely, we present a `backdoor poisoning' attack on NLP models. Our poisoning attack utilizes conditional adversarially regularized autoencoder (CARA) to generate poisoned training samples by poison injection in latent space. Just by adding 1% poisoned data, our experiments show that a victim BERT finetuned classifier's predictions can be steered to the poison target class with success rates of >80% when the input hypothesis is injected with the poison signature, demonstrating that NLI and text classification systems face a huge security risk.", "phrases": ["attack", "conditional", "autoencoder"], "overall_score": 2.0282475923649583, "scores": [1.6605929434898072, 0.865975144117687, 0.6003711392878757], "rank_score": 1.04231307563179} -{"id": "bost-etal-2020-serial", "title": "Serial Speakers: a Dataset of TV Series", "abstract": "For over a decade, TV series have been drawing increasing interest, both from the audience and from various academic fields. But while most viewers are hooked on the continuous plots of TV serials, the few annotated datasets available to researchers focus on standalone episodes of classical TV series. We aim at filling this gap by providing the multimedia/speech processing communities with \u201cSerial Speakers\u201d, an annotated dataset of 155 episodes from three popular American TV serials: \u201cBreaking Bad\u201d, \u201cGame of Thrones\u201d and \u201cHouse of Cards\u201d. \u201cSerial Speakers\u201d is suitable both for investigating multimedia retrieval in realistic use case scenarios, and for addressing lower level speech related tasks in especially challenging conditions. We publicly release annotations for every speech turn (boundaries, speaker) and scene boundary, along with annotations for shot boundaries, recurring shots, and interacting speakers in a subset of episodes. Because of copyright restrictions, the textual content of the speech turns is encrypted in the public version of the dataset, but we provide the users with a simple online tool to recover the plain text from their own subtitle files.", "phrases": ["continuous plot", "game", "serial speakers"], "overall_score": 1.145021439756312, "scores": [2.0258423500452505, 0.5505284377609438, 0.5503595015786052], "rank_score": 1.0422434297949332} -{"id": "pitler-etal-2013-finding", "title": "Finding Optimal 1-Endpoint-Crossing Trees", "abstract": "Dependency parsing algorithms capable of producing the types of crossing dependencies seen in natural language sentences have traditionally been orders of magnitude slower than algorithms for projective trees. For 95.8\u201399.8% of dependency parses in various natural language treebanks, whenever an edge is crossed, the edges that cross it all have a common vertex. The optimal dependency tree that satisfies this 1-Endpoint-Crossing property can be found with an O(n4) parsing algorithm that recursively combines forests over intervals with one exterior point. 1-Endpoint-Crossing trees also have natural connections to linguistics and another class of graphs that has been studied in NLP.", "phrases": ["1-endpoint-crossing tree", "vertex", "interval", "non-projective tree", "restriction"], "overall_score": 2.4982126268186167, "scores": [2.5748095271588114, 0.9170103549212034, 0.5957758189765593, 0.5753384245039906, 0.5462438046815146], "rank_score": 1.0418355860484159} -{"id": "rimell-2014-distributional", "title": "Distributional Lexical Entailment by Topic Coherence", "abstract": "Automatic detection of lexical entailment, or hypernym detection, is an important NLP task. Recent hypernym detection measures have been based on the Distributional Inclusion Hypothesis (DIH). This paper assumes that the DIH sometimes fails, and investigates other ways of quantifying the relationship between the cooccurrence contexts of two terms. We consider the top features in a context vector as a topic, and introduce a new entailment detection measure based on Topic Coherence (TC). Our measure successfully detects hypernyms, and a TC-based family of measures contributes to multi-way relation classification.", "phrases": ["topic coherence", "distributional inclusion hypothesis", "dih", "top feature"], "overall_score": 2.0267242342728533, "scores": [2.5220437628202785, 0.5873232257888243, 0.5302144092557175, 0.5265394998688665], "rank_score": 1.0415302244334217} -{"id": "su-etal-2018-global", "title": "Global Relation Embedding for Relation Extraction", "abstract": "We study the problem of textual relation embedding with distant supervision. To combat the wrong labeling problem of distant supervision, we propose to embed textual relations with global statistics of relations, i.e., the co-occurrence statistics of textual and knowledge base relations collected from the entire corpus. This approach turns out to be more robust to the training noise introduced by distant supervision. On a popular relation extraction dataset, we show that the learned textual relation embedding can be used to augment existing relation extraction models and significantly improve their performance. Most remarkably, for the top 1,000 relational facts discovered by the best existing model, the precision can be improved from 83.9% to 89.3%.", "phrases": ["relation extraction", "distant supervision", "labeling problem", "global statistic"], "overall_score": 2.1647639741062226, "scores": [1.567253387859975, 0.8924528851392403, 0.863020251033748, 0.8413991428178788], "rank_score": 1.0410314167127106} -{"id": "roth-lapata-2016-neural", "title": "Neural Semantic Role Labeling with Dependency Path Embeddings", "abstract": "This paper introduces a novel model for semantic role labeling that makes use of neural sequence modeling techniques. Our approach is motivated by the observation that complex syntactic structures and related phenomena, such as nested subordinations and nominal predicates, are not handled well by existing models. Our model treats such instances as subsequences of lexicalized dependency paths and learns suitable embedding representations. We experimentally demonstrate that such embeddings can improve results over previous state-of-the-art semantic role labelers, and showcase qualitative improvements obtained by our method.", "phrases": ["dependency path", "predicate", "lstm model"], "overall_score": 3.3914779738960568, "scores": [1.4156646325373827, 0.8540001820580935, 0.8531505479802186], "rank_score": 1.0409384541918982} -{"id": "eriguchi-etal-2016-tree", "title": "Tree-to-Sequence Attentional Neural Machine Translation", "abstract": "Most of the existing Neural Machine Translation (NMT) models focus on the conversion of sequential data and do not directly use syntactic information. We propose a novel end-to-end syntactic NMT model, extending a sequence-to-sequence model with the source-side phrase structure. Our model has an attention mechanism that enables the decoder to generate a translated word while softly aligning it with phrases as well as words of the source sentence. Experimental results on the WAT'15 English-to-Japanese dataset demonstrate that our proposed model considerably outperforms sequence-to-sequence attentional NMT models and compares favorably with the state-of-the-art tree-to-string SMT system.", "phrases": ["neural machine translation", "syntactic information", "nmt model", "tree-based encoder", "annotation vector"], "overall_score": 3.9853355443332568, "scores": [1.570224755469079, 1.5612185853903793, 0.9326692910530254, 0.607887002208513, 0.5326345057959099], "rank_score": 1.0409268279833812} -{"id": "platt-etal-2010-translingual", "title": "Translingual Document Representations from Discriminative Projections", "abstract": "Representing documents by vectors that are independent of language enhances machine translation and multilingual text categorization. We use discriminative training to create a projection of documents from multiple languages into a single translingual vector space. We explore two variants to create these projections: Oriented Principal Component Analysis (OPCA) and Coupled Probabilistic Latent Semantic Analysis (CPLSA). Both of these variants start with a basic model of documents (PCA and PLSA). Each model is then made discriminative by encouraging comparable document pairs to have similar vector representations. We evaluate these algorithms on two tasks: parallel document retrieval for Wikipedia and Europarl documents, and cross-lingual text classification on Reuters. The two discriminative variants, OPCA and CPLSA, significantly outperform their corresponding baselines. The largest differences in performance are observed on the task of retrieval when the documents are only comparable and not parallel. The OPCA method is shown to perform best.", "phrases": ["principal component analysis", "cplsa", "document pair", "different language"], "overall_score": 2.746999331709699, "scores": [1.3513383145387197, 1.3100241521801976, 0.9547499547184922, 0.5474944860490257], "rank_score": 1.0409017268716088} -{"id": "marelli-etal-2014-sick", "title": "A SICK cure for the evaluation of compositional distributional semantic models", "abstract": "Shared and internationally recognized benchmarks are fundamental for the development of any computational system. We aim to help the research community working on compositional distributional semantic models (CDSMs) by providing SICK (Sentences Involving Compositional Knowldedge), a large size English benchmark tailored for them. SICK consists of about 10,000 English sentence pairs that include many examples of the lexical, syntactic and semantic phenomena that CDSMs are expected to account for, but do not require dealing with other aspects of existing sentential data sets (idiomatic multiword expressions, named entities, telegraphic language) that are not within the scope of CDSMs. By means of crowdsourcing techniques, each pair was annotated for two crucial semantic tasks: relatedness in meaning (with a 5-point rating scale as gold score) and entailment relation between the two elements (with three possible gold labels: entailment, contradiction, and neutral). The SICK data set was used in SemEval-2014 Task 1, and it freely available for research purposes.", "phrases": ["distributional semantic model", "sentence pair", "entailment relation", "compositional knowledge", "linguistic capacity"], "overall_score": 3.8131138703112475, "scores": [0.9173310926774758, 1.517377418759903, 1.3578029847626227, 0.871097504483141, 0.5404986937234674], "rank_score": 1.040821538881322} -{"id": "ha-etal-2016-toward", "title": "Toward Multilingual Neural Machine Translation with Universal Encoder and Decoder", "abstract": "In this paper, we present our first attempts in building a multilingual Neural Machine Translation framework under a unified approach in which the information shared among languages can be helpful in the translation of individual language pairs. We are then able to employ attention-based Neural Machine Translation for many-to-many multilingual translation tasks. Our approach does not require any special treatment on the network architecture and it allows us to learn minimal number of free parameters in a standard way of training. Our approach has shown its effectiveness in an under-resourced translation scenario with considerable improvements up to 2.6 BLEU points. In addition, we point out a novel way to make use of monolingual data with Neural Machine Translation using the same approach with a 3.15-BLEU-score gain in IWSLT'16 English\u2192German translation task.", "phrases": ["neural machine translation", "universal encoder", "multilingual nmt", "zero-shot translation", "language tag"], "overall_score": 4.26041943477138, "scores": [0.8164103773585294, 1.800732341342045, 1.051242144108784, 0.9967393619650359, 0.5376858587755199], "rank_score": 1.0405620167099827} -{"id": "abdul-rauf-schwenk-2009-use", "title": "On the Use of Comparable Corpora to Improve SMT performance", "abstract": "We present a simple and effective method for extracting parallel sentences from comparable corpora. We employ a statistical machine translation (SMT) system built from small amounts of parallel texts to translate the source side of the non-parallel corpus. The target side texts are used, along with other corpora, in the language model of this SMT system. We then use information retrieval techniques and simple filters to create French/English parallel data from a comparable news corpora. We evaluate the quality of the extracted data by showing that it significantly improves the performance of an SMT systems.", "phrases": ["comparable corpora", "parallel sentence", "small amount", "smt system", "rich-resources"], "overall_score": 3.1679392039801306, "scores": [2.7109131767359775, 0.8732404158664518, 0.5587758203794259, 0.5378267422144475, 0.5219304217319892], "rank_score": 1.0405373153856583} -{"id": "anastasopoulos-2019-analysis", "title": "An Analysis of Source-Side Grammatical Errors in NMT", "abstract": "The quality of Neural Machine Translation (NMT) has been shown to significantly degrade when confronted with source-side noise. We present the first large-scale study of state-of-the-art English-to-German NMT on real grammatical noise, by evaluating on several Grammar Correction corpora. We present methods for evaluating NMT robustness without true references, and we use them for extensive analysis of the effects that different grammatical errors have on the NMT output. We also introduce a technique for visualizing the divergence distribution caused by a source-side error, which allows for additional insights.", "phrases": ["grammatical error", "source-side error", "nmt system"], "overall_score": 2.024572361858164, "scores": [1.997143773590084, 0.5972313803949918, 0.5268979883144893], "rank_score": 1.0404243807665217} -{"id": "toutanova-etal-2003-feature", "title": "Feature-Rich Part-of-Speech Tagging with a Cyclic Dependency Network", "abstract": "We present a new part-of-speech tagger that demonstrates the following ideas: (i) explicit use of both preceding and following tag contexts via a dependency network representation, (ii) broad use of lexical features, including jointly conditioning on multiple consecutive words, (iii) effective use of priors in conditional loglinear models, and (iv) fine-grained modeling of unknown word features. Using these ideas together, the resulting tagger gives a 97.24% accuracy on the Penn Treebank WSJ, an error reduction of 4.4% on the best previous single automatically learned tagging result.", "phrases": ["part-of-speech", "cyclic dependency network", "tagger", "lexical feature", "pos"], "overall_score": 3.8608781854246144, "scores": [1.2549363480621287, 1.4191515606826897, 1.3597697919065772, 0.6075950718919553, 0.556882032967825], "rank_score": 1.039666961102235} -{"id": "zhou-etal-2003-fast", "title": "A Fast Algorithm for Feature Selection in Conditional Maximum Entropy Modeling", "abstract": "This paper describes a fast algorithm that selects features for conditional maximum entropy modeling. Berger et al. (1996) presents an incremental feature selection (IFS) algorithm, which computes the approximate gains for all candidate features at each selection stage, and is very time-consuming for any problems with large feature spaces. In this new algorithm, instead, we only compute the approximate gains for the top-ranked features based on the models obtained from previous stages. Experiments on WSJ data in Penn Treebank are conducted to show that the new algorithm greatly speeds up the feature selection process while maintaining the same quality of selected features. One variant of this new algorithm with look-ahead functionality is also tested to further confirm the good quality of the selected features. The new algorithm is easy to implement, and given a feature space of size F, it only uses O(F) more space than the original IFS algorithm.", "phrases": ["feature selection", "ifs", "gain"], "overall_score": 1.1418250124785423, "scores": [2.041070150086997, 0.5418731542062357, 0.5350584446117094], "rank_score": 1.0393339163016473} -{"id": "bansal-etal-2014-structured", "title": "Structured Learning for Taxonomy Induction with Belief Propagation", "abstract": "We present a structured learning approach to inducing hypernym taxonomies using a probabilistic graphical model formulation. Our model incorporates heterogeneous relational evidence about both hypernymy and siblinghood, captured by semantic features based on patterns and statistics from Web n-grams and Wikipedia abstracts. For efficient inference over taxonomy structures, we use loopy belief propagation along with a directed spanning tree algorithm for the core hypernymy factor. To train the system, we extract sub-structures of WordNet and discriminatively learn to reproduce them, using adaptive subgradient stochastic optimization. On the task of reproducing sub-hierarchies of WordNet, our approach achieves a 51% error reduction over a chance baseline, including a 15% error reduction due to the non-hypernym-factored sibling features. On a comparison setup, we find up to 29% relative error reduction over previous work on ancestor F1.", "phrases": ["belief propagation", "taxonomy structure", "structured learning problem"], "overall_score": 2.491953990240128, "scores": [1.9082038818709497, 0.6169576646767041, 0.5925150490594471], "rank_score": 1.0392255318690335} -{"id": "schlechtweg-etal-2018-diachronic", "title": "Diachronic Usage Relatedness (DURel): A Framework for the Annotation of Lexical Semantic Change", "abstract": "We propose a framework that extends synchronic polysemy annotation to diachronic changes in lexical meaning, to counteract the lack of resources for evaluating computational models of lexical semantic change. Our framework exploits an intuitive notion of semantic relatedness, and distinguishes between innovative and reductive meaning changes with high inter-annotator agreement. The resulting test set for German comprises ratings from five annotators for the relatedness of 1,320 use pairs across 22 target words.", "phrases": ["change", "diachronic usage relatedness", "semantic change detection"], "overall_score": 2.881066648502584, "scores": [1.5152679620195382, 1.0414412013864924, 0.5606662612929172], "rank_score": 1.039125141566316} -{"id": "li-etal-2016-persona", "title": "A Persona-Based Neural Conversation Model", "abstract": "We present persona-based models for handling the issue of speaker consistency in neural response generation. A speaker model encodes personas in distributed embeddings that capture individual characteristics such as background information and speaking style. A dyadic speaker-addressee model captures properties of interactions between two interlocutors. Our models yield qualitative performance improvements in both perplexity and BLEU scores over baseline sequence-to-sequence models, with similar gains in speaker consistency as measured by human judges.", "phrases": ["neural conversation model", "neural response generation", "agent", "dialog system", "dialogue generation"], "overall_score": 4.640606885034972, "scores": [1.5648309976138999, 1.3555694284002657, 0.8653397684369454, 0.8420427037980217, 0.5678086384986265], "rank_score": 1.0391183073495518} -{"id": "snyder-barzilay-2007-multiple", "title": "Multiple Aspect Ranking Using the Good Grief Algorithm", "abstract": "We address the problem of analyzing multiple related opinions in a text. For instance, in a restaurant review such opinions may include food, ambience and service. We formulate this task as a multiple aspect ranking problem, where the goal is to produce a set of numerical scores, one for each aspect. We present an algorithm that jointly learns ranking models for individual aspects by modeling the dependencies between assigned ranks. This algorithm guides the prediction of individual rankers by analyzing meta-relations between opinions, such as agreement and contrast. We prove that our agreementbased joint model is more expressive than individual ranking models. Our empirical results further conrm the strength of the model: the algorithm provides signicant improvement over both individual rankers and a state-of-the-art joint ranking model.", "phrases": ["good grief algorithm", "review", "service", "sentiment classification", "contrastive rst relation"], "overall_score": 3.2580337104168406, "scores": [1.3314321945835692, 1.3111946897358635, 0.8783447030908275, 0.8384839975478335, 0.8359514004306164], "rank_score": 1.0390813970777422} -{"id": "ueffing-etal-2007-transductive", "title": "Transductive learning for statistical machine translation", "abstract": "Statistical machine translation systems are usually trained on large amounts of bilingual text and monolingual text in the target language. In this paper we explore the use of transductive semi-supervised methods for the effective use of monolingual data from the source language in order to improve translation quality. We propose several algorithms with this aim, and present the strengths and weaknesses of each one. We present detailed experimental evaluations on the French\u2010English EuroParl data set and on data from the NIST Chinese\u2010English largedata track. We show a significant improvement in translation quality on both tasks.", "phrases": ["monolingual data", "transductive", "semi-supervised learning", "conventional smt"], "overall_score": 3.302172313313452, "scores": [1.5134251752722037, 1.1571246006246325, 0.9275369118811039, 0.5581327969649192], "rank_score": 1.0390548711857148} -{"id": "jain-etal-2020-learning", "title": "Learning to Faithfully Rationalize by Construction", "abstract": "In many settings it is important for one to be able to understand why a model made a particular prediction. In NLP this often entails extracting snippets of an input text `responsible for' corresponding model output; when such a snippet comprises tokens that indeed informed the model's prediction, it is a faithful explanation. In some settings, faithfulness may be critical to ensure transparency. Lei et al. (2016) proposed a model to produce faithful rationales for neural text classification by defining independent snippet extraction and prediction modules. However, the discrete selection over input tokens performed by this method complicates training, leading to high variance and requiring careful hyperparameter tuning. We propose a simpler variant of this approach that provides faithful explanations by construction. In our scheme, named FRESH, arbitrary feature importance scores (e.g., gradients from a trained model) are used to induce binary labels over token inputs, which an extractor can be trained to predict. An independent classifier module is then trained exclusively on snippets provided by the extractor; these snippets thus constitute faithful explanations, even if the classifier is arbitrarily complex. In both automatic and manual evaluations we find that variants of this simple framework yield predictive performance superior to `end-to-end' approaches, while being more general and easier to train. Code is available at .", "phrases": ["input text", "rationale", "predictive performance"], "overall_score": 3.257779367767073, "scores": [2.0243788331211547, 0.5714800774429936, 0.5211419289367015], "rank_score": 1.0390002798336166} -{"id": "callison-burch-etal-2004-statistical", "title": "Statistical Machine Translation with Word- and Sentence-Aligned Parallel Corpora", "abstract": "The parameters of statistical translation models are typically estimated from sentence-aligned parallel corpora. We show that significant improvements in the alignment and translation quality of such models can be achieved by additionally including word-aligned data during training. Incorporating word-level alignments into the parameter estimation of the IBM models reduces alignment error rate and increases the Bleu score when compared to training the same models only on sentence-aligned data. On the Verbmobil data set, we attain a 38% reduction in the alignment error rate and a higher Bleu score with half as many training examples. We discuss how varying the ratio of word-aligned to sentence-aligned data affects the expected performance gain.", "phrases": ["parallel corpora", "translation quality", "alignment error rate", "aer"], "overall_score": 2.282847919316089, "scores": [2.4495249009003666, 0.5807271540707801, 0.5750552116084341, 0.5505681825903421], "rank_score": 1.0389688622924806} -{"id": "dong-etal-2015-multi", "title": "Multi-Task Learning for Multiple Language Translation", "abstract": "In this paper, we investigate the problem of learning a machine translation model that can simultaneously translate sentences from one source language to multiple target languages. Our solution is inspired by the recently proposed neural machine translation model which generalizes machine translation as a sequence learning problem. We extend the neural machine translation to a multi-task learning framework which shares source language representation and separates the modeling of different target language translation. Our framework can be applied to situations where either large amounts of parallel data or limited parallel data is available. Experiments show that our multi-task learning model is able to achieve significantly higher translation quality over individually learned model in both situations on the data sets publicly available.", "phrases": ["multiple language", "multi-task learning", "paradigm", "attentional decoder", "parameter sharing"], "overall_score": 3.663558138964772, "scores": [1.5387902533567932, 1.4351795778604057, 1.1221903829160074, 0.5498350103008385, 0.5485362385160512], "rank_score": 1.0389062925900192} -{"id": "pham-etal-2020-priming", "title": "Priming Neural Machine Translation", "abstract": "Priming is a well known and studied psychology phenomenon based on the prior presentation of one stimulus (cue) to influence the processing of a response. In this paper, we propose a framework to mimic the process of priming in the context of neural machine translation (NMT). We evaluate the effect of using similar translations as priming cues on the NMT network. We propose a method to inject priming cues into the NMT network and compare our framework to other mechanisms that perform micro-adaptation during inference. Overall, experiments conducted in a multi-domain setting confirm that adding priming cues in the NMT decoder can go a long way towards improving the translation accuracy. Besides, we show the suitability of our framework to gather valuable information for an NMT network from monolingual resources.", "phrases": ["neural machine translation", "similar translation", "priming"], "overall_score": 1.4400718206621648, "scores": [1.9297841290722066, 0.6252024444633233, 0.561390137754339], "rank_score": 1.038792237096623} -{"id": "moraes-etal-2016-university", "title": "University of Houston at CL-SciSumm 2016: SVMs with tree kernels and Sentence Similarity", "abstract": "This paper describes the University of Houston team\u2019s efforts toward the problem of identifying reference spans in a reference document given sentences from other documents that cite the reference document. We investigated the following approaches: cosine similarity with multiple incremental modifications and SVMs with a tree kernel. Although the best performing approach in our experiments is quite simple, it is not the best under every metric used for comparison. We also present a brief analysis of the dataset which includes information on its sparsity and frequency of section titles.", "phrases": ["svm", "tree kernel", "incremental modification"], "overall_score": 1.4399906012449535, "scores": [1.8129988965075172, 0.7816736650105474, 0.5215283874961525], "rank_score": 1.0387336496714057} -{"id": "suhara-etal-2020-opiniondigest", "title": "OpinionDigest: A Simple Framework for Opinion Summarization", "abstract": "We present OpinionDigest, an abstractive opinion summarization framework, which does not rely on gold-standard summaries for training. The framework uses an Aspect-based Sentiment Analysis model to extract opinion phrases from reviews, and trains a Transformer model to reconstruct the original reviews from these extractions. At summarization time, we merge extractions from multiple reviews and select the most popular ones. The selected opinions are used as input to the trained Transformer model, which verbalizes them into an opinion summary. OpinionDigest can also generate customized summaries, tailored to specific user needs, by filtering the selected opinions according to their aspect and/or sentiment. Automatic evaluation on Yelp data shows that our framework outperforms competitive baselines. Human studies on two corpora verify that OpinionDigest produces informative summaries and shows promising customization capabilities.", "phrases": ["opinion summarization framework", "opiniondigest", "pipeline framework"], "overall_score": 2.021138775654342, "scores": [1.9014332641356158, 0.6699384725048608, 0.5446078628835052], "rank_score": 1.038659866507994} -{"id": "ben-abacha-etal-2019-overview", "title": "Overview of the MEDIQA 2019 Shared Task on Textual Inference, Question Entailment and Question Answering", "abstract": "This paper presents the MEDIQA 2019 shared task organized at the ACL-BioNLP workshop. The shared task is motivated by a need to develop relevant methods, techniques and gold standards for inference and entailment in the medical domain, and their application to improve domain specific information retrieval and question answering systems. MEDIQA 2019 includes three tasks: Natural Language Inference (NLI), Recognizing Question Entailment (RQE), and Question Answering (QA) in the medical domain. 72 teams participated in the challenge, achieving an accuracy of 98% in the NLI task, 74.9% in the RQE task, and 78.3% in the QA task. In this paper, we describe the tasks, the datasets, and the participants' approaches and results. We hope that this shared task will attract further research efforts in textual inference, question entailment, and question answering in the medical domain.", "phrases": ["textual inference", "question answering", "mediqa challenge", "medical nli"], "overall_score": 2.9426069572931857, "scores": [1.8577413146135615, 0.9202382699517548, 0.8476777258355901, 0.5287872724138194], "rank_score": 1.0386111457036815} -{"id": "ye-etal-2007-sentence", "title": "Sentence Level Machine Translation Evaluation as a Ranking", "abstract": "The paper proposes formulating MT evaluation as a ranking problem, as is often done in the practice of assessment by human. Under the ranking scenario, the study also investigates the relative utility of several features. The results show greater correlation with human assessment at the sentence level, even when using an n-gram match score as a baseline feature. The feature contributing the most to the rank order correlation between automatic ranking and human assessment was the dependency structure relation rather than BLEU score and reference language model feature.", "phrases": ["ranking", "human assessment", "sentence level"], "overall_score": 2.0207960132112763, "scores": [1.9780040552583762, 0.6062554810931914, 0.5311916280184601], "rank_score": 1.0384837214566758} -{"id": "stoyanov-etal-2009-conundrums", "title": "Conundrums in Noun Phrase Coreference Resolution: Making Sense of the State-of-the-Art", "abstract": "We aim to shed light on the state-of-the-art in NP coreference resolution by teasing apart the differences in the MUC and ACE task definitions, the assumptions made in evaluation methodologies, and inherent differences in text corpora. First, we examine three subproblems that play a role in coreference resolution: named entity recognition, anaphoricity determination, and coreference element detection. We measure the impact of each subproblem on coreference resolution and confirm that certain assumptions regarding these subproblems in the evaluation methodology can dramatically simplify the overall task. Second, we measure the performance of a state-of-the-art coreference resolver on several classes of anaphora and use these results to develop a quantitative measure for estimating coreference resolution performance on new data sets.", "phrases": ["coreference resolution", "mention", "state-of-the-art system"], "overall_score": 3.458604544315866, "scores": [1.4037845134904008, 1.1797687420542202, 0.5302453127197403], "rank_score": 1.0379328560881205} -{"id": "krishnakumaran-zhu-2007-hunting", "title": "Hunting Elusive Metaphors Using Lexical Resources.", "abstract": "In this paper we propose algorithms to automatically classify sentences into metaphoric or normal usages. Our algorithms only need the WordNet and bigram counts, and does not require training. We present empirical results on a test set derived from the Master Metaphor List. We also discuss issues that make classification of metaphors a tough problem in general.", "phrases": ["metaphor", "hyponymy relation", "sentence level"], "overall_score": 2.81076789030171, "scores": [1.7007383696936, 0.8645902569413495, 0.5484628634463927], "rank_score": 1.0379304966937808} -{"id": "cassidy-etal-2014-annotation", "title": "An Annotation Framework for Dense Event Ordering", "abstract": "Today\u2019s event ordering research is heavily dependent on annotated corpora. Current corpora influence shared evaluations and drive algorithm development. Partly due to this dependence, most research focuses on partial orderings of a document\u2019s events. For instance, the TempEval competitions and the TimeBank only annotate small portions of the event graph, focusing on the most salient events or on specific types of event pairs (e.g., only events in the same sentence). Deeper temporal reasoners struggle with this sparsity because the entire temporal picture is not represented. This paper proposes a new annotation process with a mechanism to force annotators to label connected graphs. It generates 10 times more relations per document than the TimeBank, and our TimeBank-Dense corpus is larger than all current corpora. We hope this process and its dense corpus encourages research on new global models with deeper reasoning.", "phrases": ["annotator", "ordering", "event pair", "timebank-dense", "temporal relation"], "overall_score": 3.3398358958321217, "scores": [1.337180393239709, 1.1824232174064464, 1.1311475949668102, 0.9262745422662081, 0.6108660465625617], "rank_score": 1.037578358888347} -{"id": "liu-etal-2020-unsupervised", "title": "Unsupervised Paraphrasing by Simulated Annealing", "abstract": "We propose UPSA, a novel approach that accomplishes Unsupervised Paraphrasing by Simulated Annealing. We model paraphrase generation as an optimization problem and propose a sophisticated objective function, involving semantic similarity, expression diversity, and language fluency of paraphrases. UPSA searches the sentence space towards this objective by performing a sequence of local editing. We evaluate our approach on various datasets, namely, Quora, Wikianswers, MSCOCO, and Twitter. Extensive results show that UPSA achieves the state-of-the-art performance compared with previous unsupervised methods in terms of both automatic and human evaluations. Further, our approach outperforms most existing domain-adapted supervised models, showing the generalizability of UPSA.", "phrases": ["simulated annealing", "upsa", "sophisticated objective function", "sentence space", "unsupervised paraphrasing"], "overall_score": 2.487457967083364, "scores": [2.0436515970710425, 0.9035417232732302, 0.8771816702694681, 0.8344576160310416, 0.5279201162555645], "rank_score": 1.0373505445800695} -{"id": "jiang-etal-2019-improved", "title": "Improved Differentiable Architecture Search for Language Modeling and Named Entity Recognition", "abstract": "In this paper, we study differentiable neural architecture search (NAS) methods for natural language processing. In particular, we improve differentiable architecture search by removing the softmax-local constraint. Also, we apply differentiable NAS to named entity recognition (NER). It is the first time that differentiable NAS methods are adopted in NLP tasks other than language modeling. On both the PTB language modeling and CoNLL-2003 English NER data, our method outperforms strong baselines. It achieves a new state-of-the-art on the NER task.", "phrases": ["differentiable architecture search", "language modeling", "entity recognition", "help"], "overall_score": 2.0182423638219955, "scores": [1.8728685336962625, 0.888345151413973, 0.8632910975402914, 0.5241808384235965], "rank_score": 1.0371714052685308} -{"id": "palogiannidi-etal-2016-tweester", "title": "Tweester at SemEval-2016 Task 4: Sentiment Analysis in Twitter Using Semantic-Affective Model Adaptation", "abstract": "We describe our submission to SemEval2016 Task 4: Sentiment Analysis in Twitter. The proposed system ranked first for the subtask B. Our system comprises of multiple independent models such as neural networks, semantic-affective models and topic modeling that are combined in a probabilistic way. The novelty of the system is the employment of a topic modeling approach in order to adapt the semantic-affective space for each tweet. In addition, significant enhancements were made in the main system dealing with the data preprocessing and feature extraction including the employment of word embeddings. Each model is used to predict a tweet\u2019s sentiment (positive, negative or neutral) and a late fusion scheme is adopted for the final decision.", "phrases": ["sentiment analysis", "twitter", "semantic-affective model"], "overall_score": 1.6683036382412084, "scores": [1.7667264167549508, 0.8103792440991053, 0.5326203348224342], "rank_score": 1.0365753318921636} -{"id": "gildea-2020-efficient", "title": "Efficient Outside Computation", "abstract": "Weighted deduction systems provide a framework for describing parsing algorithms that can be used with a variety of operations for combining the values of partial derivations. For some operations, inside values can be computed efficiently, but outside values cannot. We view out-side values as functions from inside values to the total value of all derivations, and we analyze outside computation in terms of function composition. This viewpoint helps explain why efficient outside computation is possible in many settings, despite the lack of a general outside algorithm for semiring operations.", "phrases": ["computation", "deduction system", "outside value"], "overall_score": 1.1385476382818605, "scores": [1.980771734174404, 0.5861471589347158, 0.5421332721333582], "rank_score": 1.0363507217474928} -{"id": "she-chai-2017-interactive", "title": "Interactive Learning of Grounded Verb Semantics towards Human-Robot Communication", "abstract": "To enable human-robot communication and collaboration, previous works represent grounded verb semantics as the potential change of state to the physical world caused by these verbs. Grounded verb semantics are acquired mainly based on the parallel data of the use of a verb phrase and its corresponding sequences of primitive actions demonstrated by humans. The rich interaction between teachers and students that is considered important in learning new skills has not yet been explored. To address this limitation, this paper presents a new interactive learning approach that allows robots to proactively engage in interaction with human partners by asking good questions to learn models for grounded verb semantics. The proposed approach uses reinforcement learning to allow the robot to acquire an optimal policy for its question-asking behaviors by maximizing the long-term reward. Our empirical results have shown that the interactive learning approach leads to more reliable models for grounded verb semantics, especially in the noisy environment which is full of uncertainties. Compared to previous work, the models acquired from interactive learning result in a 48% to 145% performance gain when applied in new situations.", "phrases": ["verb semantic", "human-robot communication", "interactive learning"], "overall_score": 2.0156751544711127, "scores": [1.7448061849577534, 0.8034128885624725, 0.5593372883955621], "rank_score": 1.035852120638596} -{"id": "turcan-mckeown-2019-dreaddit", "title": "Dreaddit: A Reddit Dataset for Stress Analysis in Social Media", "abstract": "Stress is a nigh-universal human experience, particularly in the online world. While stress can be a motivator, too much stress is associated with many negative health outcomes, making its identification useful across a range of domains. However, existing computational research typically only studies stress in domains such as speech, or in short genres such as Twitter. We present Dreaddit, a new text corpus of lengthy multi-domain social media data for the identification of stress. Our dataset consists of 190K posts from five different categories of Reddit communities; we additionally label 3.5K total segments taken from 3K posts using Amazon Mechanical Turk. We present preliminary supervised learning methods for identifying stress, both neural and traditional, and analyze the complexity and diversity of the data and characteristics of each category.", "phrases": ["stress", "computational research", "dreaddit"], "overall_score": 1.6669956358748907, "scores": [1.6909208529026372, 0.8803418506623965, 0.5360251694174174], "rank_score": 1.0357626243274838} -{"id": "bao-etal-2019-generating", "title": "Generating Sentences from Disentangled Syntactic and Semantic Spaces", "abstract": "Variational auto-encoders (VAEs) are widely used in natural language generation due to the regularization of the latent space. However, generating sentences from the continuous latent space does not explicitly model the syntactic information. In this paper, we propose to generate sentences from disentangled syntactic and semantic spaces. Our proposed method explicitly models syntactic information in the VAE's latent space by using the linearized tree sequence, leading to better performance of language generation. Additionally, the advantage of sampling in the disentangled syntactic and semantic latent spaces enables us to perform novel applications, such as the unsupervised paraphrase generation and syntax transfer generation. Experimental results show that our proposed model achieves similar or better performance in various tasks, compared with state-of-the-art related work.", "phrases": ["latent space", "paraphrase", "text generation"], "overall_score": 2.9935409500546357, "scores": [1.5832111781394795, 0.910355117166233, 0.6135159772877258], "rank_score": 1.0356940908644796} -{"id": "plank-etal-2016-multilingual", "title": "Multilingual Part-of-Speech Tagging with Bidirectional Long Short-Term Memory Models and Auxiliary Loss", "abstract": "Bidirectional long short-term memory (bi-LSTM) networks have recently proven successful for various NLP sequence modeling tasks, but little is known about their reliance to input representations, target languages, data set size, and label noise. We address these issues and evaluate bi-LSTMs with word, character, and unicode byte embeddings for POS tagging. We compare bi-LSTMs to traditional POS taggers across languages and data sizes. We also present a novel bi-LSTM model, which combines the POS tagging loss function with an auxiliary loss function that accounts for rare words. The model obtains state-of-the-art performance across 22 languages, and works especially well for morphologically complex languages. Our analysis suggests that bi-LSTMs are less sensitive to training data size and label corruptions (at small noise levels) than previously assumed.", "phrases": ["part-of-speech tagging", "long short-term memory", "auxiliary loss", "bi-lstms", "pos"], "overall_score": 3.918720020418219, "scores": [1.861284073592897, 0.8136434431439099, 1.0522665030485987, 0.8793608588997264, 0.5711983518799301], "rank_score": 1.0355506461130124} -{"id": "springorum-etal-2013-regular", "title": "Regular Meaning Shifts in German Particle Verbs: A Case Study", "abstract": "This paper provides a corpus-based study on German particle verbs. We hypothesize that there are regular mechanisms in meaning shifts of a base verb in combination with a particle that do not only apply to the individual verb, but across a semantically coherent set of verbs. For example, the syntactically similar base verbs brummen \u2018hum\u2019 and donnern \u2018rumble\u2019 both describe an irritating, displeasing loud sound. Combined with the particle auf, they result in near-synonyms roughly meaning \u2018forcefully assigning a task\u2019 (in one of their senses). Covering 6 base verb groups and 3 particles with 4 particle meanings, we demonstrate that corpus-based information on the verbs\u2019 subcategorization frames plus conceptual properties of the nominal complements is a sufficient basis for defining such meaning shifts. While the paper is considerably more extensive than earlier related work, we view it as a case study toward a more automatic approach to identify and formalize meaning shifts in German particle verbs.", "phrases": ["particle", "base verb", "non-literal meaning"], "overall_score": 1.855375008020645, "scores": [1.1705825634290667, 1.0613639375016652, 0.8745670237110506], "rank_score": 1.0355045082139276} -{"id": "chen-etal-2014-unified", "title": "A Unified Model for Word Sense Representation and Disambiguation", "abstract": "Most word representation methods assume that each word owns a single semantic vector. This is usually problematic because lexical ambiguity is ubiquitous, which is also the problem to be resolved by word sense disambiguation. In this paper, we present a unified model for joint word sense representation and disambiguation, which will assign distinct representations for each word sense. 1 The basic idea is that both word sense representation (WSR) and word sense disambiguation (WSD) will benefit from each other: (1) highquality WSR will capture rich information about words and senses, which should be helpful for WSD, and (2) high-quality WSD will provide reliable disambiguated corpora for learning better sense representations. Experimental results show that, our model improves the performance of contextual word similarity compared to existing WSR methods, outperforms stateof-the-art supervised methods on domainspecific WSD, and achieves competitive performance on coarse-grained all-words WSD.", "phrases": ["word sense representation", "knowledge base", "gloss"], "overall_score": 3.681214777589832, "scores": [2.0207937883392004, 0.5463121806614045, 0.5391002706616439], "rank_score": 1.0354020798874162} -{"id": "zens-etal-2004-reordering", "title": "Reordering Constraints for Phrase-Based Statistical Machine Translation", "abstract": "In statistical machine translation, the generation of a translation hypothesis is computationally expensive. If arbitrary reorderings are permitted, the search problem is NP-hard. On the other hand, if we restrict the possible reorderings in an appropriate way, we obtain a polynomial-time search algorithm. We investigate different reordering constraints for phrase-based statistical machine translation, namely the IBM constraints and the ITG constraints. We present efficient dynamic programming algorithms for both constraints. We evaluate the constraints with respect to translation quality on two Japanese-English tasks. We show that the reordering constraints improve translation quality compared to an unconstrained search that permits arbitrary phrase reorderings. The ITG constraints preform best on both tasks and yield statistically significant improvements compared to the unconstrained search.", "phrases": ["statistical machine translation", "itg constraint", "efficiency", "flat reordering model", "expressiveness"], "overall_score": 3.2001360192155928, "scores": [2.0454335040095497, 1.419671983373412, 0.60012998485004, 0.5689669813071925, 0.5422648184294124], "rank_score": 1.0352934543939214} -{"id": "dos-santos-etal-2015-classifying", "title": "Classifying Relations by Ranking with Convolutional Neural Networks", "abstract": "Relation classification is an important semantic processing task for which state-ofthe-art systems still rely on costly handcrafted features. In this work we tackle the relation classification task using a convolutional neural network that performs classification by ranking (CR-CNN). We propose a new pairwise ranking loss function that makes it easy to reduce the impact of artificial classes. We perform experiments using the the SemEval-2010 Task 8 dataset, which is designed for the task of classifying the relationship between two nominals marked in a sentence. Using CRCNN, we outperform the state-of-the-art for this dataset and achieve a F1 of 84.1 without using any costly handcrafted features. Additionally, our experimental results show that: (1) our approach is more effective than CNN followed by a softmax classifier; (2) omitting the representation of the artificial class Other improves both precision and recall; and (3) using only word embeddings as input features is enough to achieve state-of-the-art results if we consider only the text between the two target nominals.", "phrases": ["convolutional neural networks", "relation extraction", "cnn model", "learning model"], "overall_score": 3.4853523861539455, "scores": [0.873507026685547, 1.5630484124934019, 0.8549991159970517, 0.8486844521795786], "rank_score": 1.0350597518388949} -{"id": "taghipour-ng-2015-one", "title": "One Million Sense-Tagged Instances for Word Sense Disambiguation and Induction", "abstract": "Supervised word sense disambiguation (WSD) systems are usually the best performing systems when evaluated on standard benchmarks. However, these systems need annotated training data to function properly. While there are some publicly available open source WSD systems, very few large annotated datasets are available to the research community. The two main goals of this paper are to extract and annotate a large number of samples and release them for public use, and also to evaluate this dataset against some word sense disambiguation and induction tasks. We show that the open source IMS WSD system trained on our dataset achieves stateof-the-art results in standard disambiguation tasks and a recent word sense induction task, outperforming several task submissions and strong baselines.", "phrases": ["sense-tagged instance", "omsti", "english word"], "overall_score": 2.7314890977498423, "scores": [1.4594075172188417, 1.0738974405320696, 0.5717686612614419], "rank_score": 1.0350245396707844} -{"id": "wang-etal-2018-target", "title": "Target-Sensitive Memory Networks for Aspect Sentiment Classification", "abstract": "Aspect sentiment classification (ASC) is a fundamental task in sentiment analysis. Given an aspect/target and a sentence, the task classifies the sentiment polarity expressed on the target in the sentence. Memory networks (MNs) have been used for this task recently and have achieved state-of-the-art results. In MNs, attention mechanism plays a crucial role in detecting the sentiment context for the given target. However, we found an important problem with the current MNs in performing the ASC task. Simply improving the attention mechanism will not solve it. The problem is referred to as target-sensitive sentiment, which means that the sentiment polarity of the (detected) context is dependent on the given target and it cannot be inferred from the context alone. To tackle this problem, we propose the target-sensitive memory networks (TMNs). Several alternative techniques are designed for the implementation of TMNs and their effectiveness is experimentally evaluated.", "phrases": ["memory network", "aspect sentiment classification", "absa task"], "overall_score": 2.8028387074986365, "scores": [1.9643883008219571, 0.5872160552197399, 0.5534031209517283], "rank_score": 1.0350024923311416} -{"id": "chen-etal-2018-collective", "title": "Collective Event Detection via a Hierarchical and Bias Tagging Networks with Gated Multi-level Attention Mechanisms", "abstract": "Traditional approaches to the task of ACE event detection primarily regard multiple events in one sentence as independent ones and recognize them separately by using sentence-level information. However, events in one sentence are usually interdependent and sentence-level information is often insufficient to resolve ambiguities for some types of events. This paper proposes a novel framework dubbed as Hierarchical and Bias Tagging Networks with Gated Multi-level Attention Mechanisms (HBTNGMA) to solve the two problems simultaneously. Firstly, we propose a hierachical and bias tagging networks to detect multiple events in one sentence collectively. Then, we devise a gated multi-level attention to automatically extract and dynamically fuse the sentence-level and document-level information. The experimental results on the widely used ACE 2005 dataset show that our approach significantly outperforms other state-of-the-art methods.", "phrases": ["event detection", "bias tagging networks", "multi-level attention mechanisms"], "overall_score": 2.3827671501027536, "scores": [1.3619204048998779, 0.9033180036773967, 0.8392294662726151], "rank_score": 1.0348226249499632} -{"id": "ladhak-etal-2020-wikilingua", "title": "WikiLingua: A New Benchmark Dataset for Cross-Lingual Abstractive Summarization", "abstract": "We introduce WikiLingua, a large-scale, multilingual dataset for the evaluation of cross-lingual abstractive summarization systems. We extract article and summary pairs in 18 languages from WikiHow, a high quality, collaborative resource of how-to guides on a diverse set of topics written by human authors. We create gold-standard article-summary alignments across languages by aligning the images that are used to describe each how-to step in an article. As a set of baselines for further studies, we evaluate the performance of existing cross-lingual abstractive summarization methods on our dataset. We further propose a method for direct cross-lingual summarization (i.e., without requiring translation at inference time) by leveraging synthetic data and Neural Machine Translation as a pre-training step. Our method significantly outperforms the baseline approaches, while being more cost efficient during inference.", "phrases": ["multilingual dataset", "abstractive summarization system", "wikilingua"], "overall_score": 1.1364371185282218, "scores": [1.9185324200677292, 0.6073517277701714, 0.5774047837995797], "rank_score": 1.03442964387916} -{"id": "schulte-im-walde-2006-experiments", "title": "Experiments on the Automatic Induction of German Semantic Verb Classes", "abstract": "This article presents clustering experiments on German verbs: A statistical grammar model for German serves as the source for a distributional verb description at the lexical syntax-semantics interface, and the unsupervised clustering algorithm k-means uses the empirical verb properties to perform an automatic induction of verb classes. Various evaluation measures are applied to compare the clustering results to gold standard German semantic verb classes under different criteria. The primary goals of the experiments are (1) to empirically utilize and investigate the well-established relationship between verb meaning and verb behavior within a cluster analysis and (2) to investigate the required technical parameters of a cluster analysis with respect to this specific linguistic task. The clustering methodology is developed on a small-scale verb set and then applied to a larger-scale verb set including 883 German verbs.", "phrases": ["automatic induction", "verb class", "grammar model"], "overall_score": 3.2874544439346196, "scores": [1.6499889863387662, 0.9208586243230188, 0.5324237177298078], "rank_score": 1.0344237761305308} -{"id": "camgoz-etal-2016-bosphorussign", "title": "BosphorusSign: A Turkish Sign Language Recognition Corpus in Health and Finance Domains", "abstract": "There are as many sign languages as there are deaf communities in the world. Linguists have been collecting corpora of different sign languages and annotating them extensively in order to study and understand their properties. On the other hand, the field of computer vision has approached the sign language recognition problem as a grand challenge and research efforts have intensified in the last 20 years. However, corpora collected for studying linguistic properties are often not suitable for sign language recognition as the statistical methods used in the field require large amounts of data. Recently, with the availability of inexpensive depth cameras, groups from the computer vision community have started collecting corpora with large number of repetitions for sign language recognition research. In this paper, we present the BosphorusSign Turkish Sign Language corpus, which consists of 855 sign and phrase samples from the health, finance and everyday life domains. The corpus is collected using the state-of-the-art Microsoft Kinect v2 depth sensor, and will be the first in this sign language research field. Furthermore, there will be annotations rendered by linguists so that the corpus will appeal both to the linguistic and sign language recognition research communities.", "phrases": ["sign language", "health", "bosphorussign"], "overall_score": 1.1364016124942005, "scores": [1.7091515446240129, 0.7825505714595902, 0.6114898585990327], "rank_score": 1.034397324894212} -{"id": "foster-kuhn-2007-mixture", "title": "Mixture-Model Adaptation for SMT", "abstract": "We describe a mixture-model approach to adapting a Statistical Machine Translation System for new domains, using weights that depend on text distances to mixture components. We investigate a number of variants on this approach, including cross-domain versus dynamic adaptation; linear versus loglinear mixtures; language and translation model adaptation; different methods of assigning weights; and granularity of the source unit being adapted to. The best methods achieve gains of approximately one BLEU percentage point over a state-of-the art non-adapted baseline system.", "phrases": ["adaptation", "mixture-model approach", "sub-model", "mixture weight", "corpus level"], "overall_score": 3.8903858472462187, "scores": [2.4159434120941943, 1.0167666286814465, 0.5843963207016162, 0.578120686506247, 0.5765076744175059], "rank_score": 1.034346944480202} -{"id": "he-etal-2021-realformer", "title": "RealFormer: Transformer Likes Residual Attention", "abstract": "Transformer is the backbone of modern NLP models. In this paper, we propose RealFormer, a simple and generic technique to create Residual Attention Layer Transformer networks that significantly outperform the canonical Transformer and its variants (BERT, ETC, etc.) on a wide spectrum of tasks including Masked Language Modeling, GLUE, SQuAD, Neural Machine Translation, WikiHop, HotpotQA, Natural Questions, and OpenKP. We also observe empirically that RealFormer stabilizes training and leads to models with sparser attention. Source code and pre-trained checkpoints for RealFormer can be found at https://github.com/google-research/google-research/tree/master/realformer.", "phrases": ["transformer", "realformer", "motivation"], "overall_score": 1.6647046908353587, "scores": [1.6725988787223358, 0.8650575527645683, 0.5653611089368952], "rank_score": 1.0343391801412665} -{"id": "abu-sheikha-inkpen-2011-generation", "title": "Generation of Formal and Informal Sentences", "abstract": "This paper addresses the task of using natural language generation (NLG) techniques to generate sentences with formal and with informal style. We studied the main characteristics of each style, which helped us to choose parameters that can produce sentences in one of the two styles. We collected some ready-made parallel list of formal and informal words and phrases, from different sources. In addition, we added two more parallel lists: one that contains most of the contractions in English (short forms) and their full forms, and another one that consists in some common abbreviations and their full forms. These parallel lists might help to generate sentences in the preferred style, by changing words or expressions for that style. Our NLG system is built on top of the SimpleNLG package (Gatt and Reiter, 2009). We used templates from which we generated valid English texts with formal or informal style. In order to evaluate the quality of the generated sentences and their level of formality, we used human judges. The evaluation results show that our system can generate formal and informal style successfully, with high accuracy. The main contribution of our work consists in designing a set of parameters that led to good results for the task of generating texts with different formality levels.", "phrases": ["informal sentence", "list", "different formality level"], "overall_score": 2.0125625846013615, "scores": [1.4321257178664808, 1.1137388639128507, 0.5568931466467316], "rank_score": 1.034252576142021} -{"id": "conroy-dang-2008-mind", "title": "Mind the Gap: Dangers of Divorcing Evaluations of Summary Content from Linguistic Quality", "abstract": "In this paper, we analyze the state of current human and automatic evaluation of topic-focused summarization in the Document Understanding Conference main task for 2005--2007. The analyses show that while ROUGE has very strong correlation with responsiveness for both human and automatic summaries, there is a significant gap in responsiveness between humans and systems which is not accounted for by the ROUGE metrics. In addition to teasing out gaps in the current automatic evaluation, we propose a method to maximize the strength of current automatic evaluations by using the method of canonical correlation. We apply this new evaluation method, which we call ROSE (ROUGE Optimal Summarization Evaluation), to find the optimal linear combination of ROUGE scores to maximize correlation with human responsiveness.", "phrases": ["gap", "linguistic quality", "responsiveness"], "overall_score": 2.149941898582353, "scores": [1.6654001370871063, 0.9107778437407948, 0.5255325344562625], "rank_score": 1.0339035050947212} -{"id": "williams-etal-2018-latent", "title": "Do latent tree learning models identify meaningful structure in sentences?", "abstract": "Recent work on the problem of latent tree learning has made it possible to train neural networks that learn to both parse a sentence and use the resulting parse to interpret the sentence, all without exposure to ground-truth parse trees at training time. Surprisingly, these models often perform better at sentence understanding tasks than models that use parse trees from conventional parsers. This paper aims to investigate what these latent tree learning models learn. We replicate two such models in a shared codebase and find that (i) only one of these models outperforms conventional tree-structured models on sentence classification, (ii) its parsing strategies are not especially consistent across random restarts, (iii) the parses it produces tend to be shallower than standard Penn Treebank (PTB) parses, and (iv) they do not resemble those of PTB or any other semantic or syntactic formalism that the authors are aware of.", "phrases": ["latent tree", "syntactic formalism", "brief survey"], "overall_score": 3.2414209285089104, "scores": [1.6092042030052476, 0.9533933559368863, 0.5387517390807665], "rank_score": 1.0337830993409667} -{"id": "geva-etal-2021-transformer", "title": "Transformer Feed-Forward Layers Are Key-Value Memories", "abstract": "Feed-forward layers constitute two-thirds of a transformer model's parameters, yet their role in the network remains under-explored. We show that feed-forward layers in transformer-based language models operate as key-value memories, where each key correlates with textual patterns in the training examples, and each value induces a distribution over the output vocabulary. Our experiments show that the learned patterns are human-interpretable, and that lower layers tend to capture shallow patterns, while upper layers learn more semantic ones. The values complement the keys' input patterns by inducing output distributions that concentrate probability mass on tokens likely to appear immediately after each pattern, particularly in the upper layers. Finally, we demonstrate that the output of a feed-forward layer is a composition of its memories, which is subsequently refined throughout the model's layers via residual connections to produce the final output distribution.", "phrases": ["feed-forward layer", "key-value memory", "transformer"], "overall_score": 2.0109481534829152, "scores": [2.005735456736262, 0.5640380027399294, 0.5304953085229516], "rank_score": 1.0334229226663811} -{"id": "mysore-gopinath-etal-2018-supervised", "title": "Supervised and Unsupervised Methods for Robust Separation of Section Titles and Prose Text in Web Documents", "abstract": "The text in many web documents is organized into a hierarchy of section titles and corresponding prose content, a structure which provides potentially exploitable information on discourse structure and topicality. However, this organization is generally discarded during text collection, and collecting it is not straightforward: the same visual organization can be implemented in a myriad of different ways in the underlying HTML. To remedy this, we present a flexible system for automatically extracting the hierarchical section titles and prose organization of web documents irrespective of differences in HTML representation. This system uses features from syntax, semantics, discourse and markup to build two models which classify HTML text into section titles and prose text. When tested on three different domains of web text, our domain-independent system achieves an overall precision of 0.82 and a recall of 0.98. The domain-dependent variation produces very high precision (0.99) at the expense of recall (0.75). These results exhibit a robust level of accuracy suitable for enhancing question answering, information extraction, and summarization.", "phrases": ["prose text", "web document", "header"], "overall_score": 1.8512603939195835, "scores": [1.733191441554221, 0.8334832811299895, 0.5329495723956932], "rank_score": 1.0332080983599676} -{"id": "lechelle-etal-2019-wire57", "title": "WiRe57 : A Fine-Grained Benchmark for Open Information Extraction", "abstract": "We build a reference for the task of Open Information Extraction, on five documents. We tentatively resolve a number of issues that arise, including coreference and granularity, and we take steps toward addressing inference, a significant problem. We seek to better pinpoint the requirements for the task. We produce our annotation guidelines specifying what is correct to extract and what is not. In turn, we use this reference to score existing Open IE systems. We address the non-trivial problem of evaluating the extractions produced by systems against the reference tuples, and share our evaluation script. Among seven compared extractors, we find the MinIE system to perform best.", "phrases": ["open information extraction", "wire57", "scorer"], "overall_score": 2.0104647666783557, "scores": [1.6185849996483286, 0.9285096977788749, 0.5524288355391804], "rank_score": 1.0331745109887946} -{"id": "quan-ren-2009-construction", "title": "Construction of a Blog Emotion Corpus for Chinese Emotional Expression Analysis", "abstract": "There is plenty of evidence that emotion analysis has many valuable applications. In this study a blog emotion corpus is constructed for Chinese emotional expression analysis. This corpus contains manual annotation of eight emotional categories (expect, joy, love, surprise, anxiety, sorrow, angry and hate), emotion intensity, emotion holder/target, emotional word/phrase, degree word, negative word, conjunction, rhetoric, punctuation and other linguistic expressions that indicate emotion. Annotation agreement analyses for emotion classes and emotional words and phrases are described. Then, using this corpus, we explore emotion expressions in Chinese and present the analyses on them.", "phrases": ["blog emotion corpus", "emotional expression analysis", "negative word"], "overall_score": 2.1482771191944634, "scores": [1.9901501228166585, 0.5831748902064688, 0.5259837332939538], "rank_score": 1.033102915439027} -{"id": "pan-etal-2021-contrastive", "title": "Contrastive Learning for Many-to-many Multilingual Neural Machine Translation", "abstract": "Existing multilingual machine translation approaches mainly focus on English-centric directions, while the non-English directions still lag behind. In this work, we aim to build a many-to-many translation system with an emphasis on the quality of non-English language directions. Our intuition is based on the hypothesis that a universal cross-language representation leads to better multilingual translation performance. To this end, we propose mRASP2, a training method to obtain a single unified multilingual translation model. mRASP2 is empowered by two techniques: a) a contrastive learning scheme to close the gap among representations of different languages, and b) data augmentation on both multiple parallel and monolingual data to further align token representations. For English-centric directions, mRASP2 achieves competitive or even better performance than a strong pre-trained model mBART on tens of WMT benchmarks. For non-English directions, mRASP2 achieves an improvement of average 10+ BLEU compared with the multilingual baseline", "phrases": ["translation system", "mrasp2", "contrastive learning"], "overall_score": 2.797560646199662, "scores": [2.0127670507305186, 0.5643103911601197, 0.5220829559421125], "rank_score": 1.0330534659442503} -{"id": "zhang-etal-2016-keyphrase", "title": "Keyphrase Extraction Using Deep Recurrent Neural Networks on Twitter", "abstract": "Keyphrases can provide highly condensed and valuable information that allows users to quickly acquire the main ideas. The task of automatically extracting them have received considerable attention in recent decades. Different from previous studies, which are usually focused on automatically extracting keyphrases from documents or articles, in this study, we considered the problem of automatically extracting keyphrases from tweets. Because of the length limitations of Twitter-like sites, the performances of existing methods usually drop sharply. We proposed a novel deep recurrent neural network (RNN) model to combine keywords and context information to perform this problem. To evaluate the proposed method, we also constructed a large-scale dataset collected from Twitter. The experimental results showed that the proposed method performs signi\ufb01cantly better than previous methods.", "phrases": ["recurrent neural network", "twitter", "keyphrase extraction"], "overall_score": 2.926816400534199, "scores": [1.3709844780451148, 0.8562223639503851, 0.8719064739935893], "rank_score": 1.033037771996363} -{"id": "liu-etal-2009-joint", "title": "Joint Decoding with Multiple Translation Models", "abstract": "Current SMT systems usually decode with single translation models and cannot benefit from the strengths of other models in decoding phase. We instead propose joint decoding, a method that combines multiple translation models in one decoder. Our joint decoder draws connections among multiple models by integrating the translation hypergraphs they produce individually. Therefore, one model can share translations and even derivations with other models. Comparable to the state-of-the-art system combination technique, joint decoding achieves an absolute improvement of 1.5 BLEU points over individual decoding.", "phrases": ["translation hypergraph", "joint decoding", "tree-to-string model"], "overall_score": 2.147770209501856, "scores": [1.969460829084655, 0.5663981102029011, 0.562718490929822], "rank_score": 1.0328591434057925} -{"id": "balikas-etal-2016-modeling", "title": "Modeling topic dependencies in semantically coherent text spans with copulas", "abstract": "The exchangeability assumption in topic models like Latent Dirichlet Allocation (LDA) often results in inferring inconsistent topics for the words of text spans like noun-phrases, which are usually expected to be topically coherent. We propose copulaLDA, that extends LDA by integrating part of the text structure to the model and relaxes the conditional independence assumption between the word-specific latent topics given the per-document topic distributions. To this end, we assume that the words of text spans like noun-phrases are topically bound and we model this dependence with copulas. We demonstrate empirically the effectiveness of copulaLDA on both intrinsic and extrinsic evaluation tasks on several publicly available corpora.", "phrases": ["text span", "copula", "topic distribution"], "overall_score": 1.4317858900929443, "scores": [1.673691387004052, 0.8223142817176081, 0.6024399361561618], "rank_score": 1.0328152016259406} -{"id": "gimenez-marquez-2007-context", "title": "Context-aware Discriminative Phrase Selection for Statistical Machine Translation", "abstract": "In this work we revise the application of discriminative learning to the problem of phrase selection in Statistical Machine Translation. Inspired by common techniques used in Word Sense Disambiguation, we train classifiers based on local context to predict possible phrase translations. Our work extends that of Vickrey et al. (2005) in two main aspects. First, we move from word translation to phrase translation. Second, we move from the 'blank-filling' task to the 'full translation' task. We report results on a set of highly frequent source phrases, obtaining a significant improvement, specially with respect to adequacy, according to a rigorous process of manual evaluation.", "phrases": ["statistical machine translation", "recent approach", "smt system"], "overall_score": 2.26875642324738, "scores": [2.0375485068724988, 0.5354828714645252, 0.5246352598150228], "rank_score": 1.0325555460506823} -{"id": "ji-etal-2017-dynamic", "title": "Dynamic Entity Representations in Neural Language Models", "abstract": "Understanding a long document requires tracking how entities are introduced and evolve over time. We present a new type of language model, EntityNLM, that can explicitly model entities, dynamically update their representations, and contextually generate their mentions. Our model is generative and flexible; it can model an arbitrary number of entities in context while generating each entity mention at an arbitrary length. In addition, it can be used for several different tasks such as language modeling, coreference resolution, and entity prediction. Experimental results with all these tasks demonstrate that our model consistently outperforms strong baselines and prior work.", "phrases": ["entity representation", "language model", "coreference", "dynamic representation"], "overall_score": 3.190943131188818, "scores": [1.7528695574519604, 0.9530197133898071, 0.845623084721025, 0.5777652966699848], "rank_score": 1.0323194130581943} -{"id": "bryant-etal-2017-automatic", "title": "Automatic Annotation and Evaluation of Error Types for Grammatical Error Correction", "abstract": "Until now, error type performance for Grammatical Error Correction (GEC) systems could only be measured in terms of recall because system output is not annotated. To overcome this problem, we introduce ERRANT, a grammatical ERRor ANnotation Toolkit designed to automatically extract edits from parallel original and corrected sentences and classify them according to a new, dataset-agnostic, rule-based framework. This not only facilitates error type evaluation at different levels of granularity, but can also be used to reduce annotator workload and standardise existing GEC datasets. Human experts rated the automatic edits as \u201cGood\u201d or \u201cAcceptable\u201d in at least 95% of cases, so we applied ERRANT to the system output of the CoNLL-2014 shared task to carry out a detailed error type analysis for the first time.", "phrases": ["grammatical error correction", "edit", "rule-based framework"], "overall_score": 2.9245786555428963, "scores": [0.9355178845418756, 1.294419510927327, 0.8668064431249025], "rank_score": 1.0322479461980352} -{"id": "tillmann-zhang-2005-localized", "title": "A Localized Prediction Model for Statistical Machine Translation", "abstract": "In this paper, we present a novel training method for a localized phrase-based prediction model for statistical machine translation (SMT). The model predicts blocks with orientation to handle local phrase re-ordering. We use a maximum likelihood criterion to train a log-linear block bigram model which uses real-valued features (e.g. a language model score) as well as binary features based on the block identities themselves, e.g. block bigram features. Our training algorithm can easily handle millions of features. The best system obtains a 18.6% improvement over the baseline on a standard Arabic-English translation task.", "phrases": ["statistical machine translation", "block", "orientation"], "overall_score": 2.3766164236193457, "scores": [0.9847924043990566, 1.0576381696065094, 1.0540236211300023], "rank_score": 1.0321513983785229} -{"id": "bjorne-salakoski-2011-generalizing", "title": "Generalizing Biomedical Event Extraction", "abstract": "We present a system for extracting biomedical events (detailed descriptions of biomolecular interactions) from research articles. This system was developed for the BioNLP'11 Shared Task and extends our BioNLP'09 Shared Task winning Turku Event Extraction System. It uses support vector machines to first detect event-defining words, followed by detection of their relationships. The theme of the BioNLP'11 Shared Task is generalization, extending event extraction to varied biomedical domains. Our current system successfully predicts events for every domain case introduced in the BioNLP'11 Shared Task, being the only system to participate in all eight tasks and all of their subtasks, with best performance in four tasks.", "phrases": ["biomedical event extraction", "support vector machine", "hand-crafted feature"], "overall_score": 2.3759070806378775, "scores": [2.042910796484268, 0.5305061743131093, 0.5221130331103059], "rank_score": 1.0318433346358944} -{"id": "zhao-etal-2020-spanmlt", "title": "SpanMlt: A Span-based Multi-Task Learning Framework for Pair-wise Aspect and Opinion Terms Extraction", "abstract": "Aspect terms extraction and opinion terms extraction are two key problems of fine-grained Aspect Based Sentiment Analysis (ABSA). The aspect-opinion pairs can provide a global profile about a product or service for consumers and opinion mining systems. However, traditional methods can not directly output aspect-opinion pairs without given aspect terms or opinion terms. Although some recent co-extraction methods have been proposed to extract both terms jointly, they fail to extract them as pairs. To this end, this paper proposes an end-to-end method to solve the task of Pair-wise Aspect and Opinion Terms Extraction (PAOTE). Furthermore, this paper treats the problem from a perspective of joint term and relation extraction rather than under the sequence tagging formulation performed in most prior works. We propose a multi-task learning framework based on shared spans, where the terms are extracted under the supervision of span boundaries. Meanwhile, the pair-wise relations are jointly identified using the span representations. Extensive experiments show that our model consistently outperforms state-of-the-art methods.", "phrases": ["multi-task learning framework", "pair-wise aspect", "opinion terms extraction", "spanmlt"], "overall_score": 2.4738828728363, "scores": [1.272169382751253, 0.880950692008083, 0.8691547199776828, 1.104482367512608], "rank_score": 1.0316892905624067} -{"id": "postolache-etal-2006-transferring", "title": "Transferring Coreference Chains through Word Alignment", "abstract": "This paper investigates the problem of automatically annotating resources with NP coreference information using a parallel corpus, English-Romanian, in order to transfer, through word alignment, coreference chains from the English part to the Romanian part of the corpus. The results show that we can detect Romanian referential expressions and coreference chains with over 80% F-measure, thus using our method as a preprocessing step followed by manual correction as part of an annotation effort for creating a large Romanian corpus with coreference information is worthwhile.", "phrases": ["word alignment", "coreference resolution", "english-romanian corpus", "annotation projection"], "overall_score": 2.792785658912534, "scores": [1.9795402722500484, 0.9845122272014813, 0.5812428039831714, 0.5798655340945829], "rank_score": 1.0312902093823209} -{"id": "pustejovsky-stubbs-2011-increasing", "title": "Increasing Informativeness in Temporal Annotation", "abstract": "In this paper, we discuss some of the challenges of adequately applying a specification language to an annotation task, as embodied in a specific guideline. In particular, we discuss some issues with TimeML motivated by error analysis on annotated TLINKs in TimeBank. We introduce a document level information structure we call a narrative container (NC), designed to increase informativeness and accuracy of temporal relation identification. The narrative container is the default interval containing the events being discussed in the text, when no explicit temporal anchor is given. By exploiting this notion in the creation of a new temporal annotation over TimeBank, we were able to reduce inconsistencies and increase informativeness when compared to existing TLINKs in TimeBank.", "phrases": ["informativeness", "temporal annotation", "narrative container"], "overall_score": 2.374222989999126, "scores": [1.0061648145748767, 0.8348167986544067, 1.2523542168640955], "rank_score": 1.0311119433644598} -{"id": "deschacht-moens-2007-text", "title": "Text Analysis for Automatic Image Annotation", "abstract": "We present a novel approach to automatically annotate images using associated text. We detect and classify all entities (persons and objects) in the text after which we determine the salience (the importance of an entity in a text) and visualness (the extent to which an entity can be perceived visually) of these entities. We combine these measures to compute the probability that an entity is present in the image. The suitability of our approach was successfully tested on 50 image-text pairs of Yahoo! News.", "phrases": ["image", "salience", "news article"], "overall_score": 2.144105071490074, "scores": [1.685097986224745, 0.8793182298604434, 0.5288735376984178], "rank_score": 1.0310965845945355} -{"id": "almeida-martins-2013-fast", "title": "Fast and Robust Compressive Summarization with Dual Decomposition and Multi-Task Learning", "abstract": "We present a dual decomposition framework for multi-document summarization, using a model that jointly extracts and compresses sentences. Compared with previous work based on integer linear programming, our approach does not require external solvers, is significantly faster, and is modular in the three qualities a summary should have: conciseness, informativeness, and grammaticality. In addition, we propose a multi-task learning framework to take advantage of existing data for extractive summarization and sentence compression. Experiments in the TAC2008 dataset yield the highest published ROUGE scores to date, with runtimes that rival those of extractive summarizers.", "phrases": ["summarization", "dual decomposition", "approximation"], "overall_score": 2.720402209915201, "scores": [1.3896469606805182, 1.1532150645369987, 0.5496083573548103], "rank_score": 1.0308234608574425} -{"id": "tan-etal-2020-detecting", "title": "Detecting Cross-Modal Inconsistency to Defend Against Neural Fake News", "abstract": "Large-scale dissemination of disinformation online intended to mislead or deceive the general population is a major societal problem. Rapid progression in image, video, and natural language generative models has only exacerbated this situation and intensified our need for an effective defense mechanism. While existing approaches have been proposed to defend against neural fake news, they are generally constrained to the very limited setting where articles only have text and metadata such as the title and authors. In this paper, we introduce the more realistic and challenging task of defending against machine-generated news that also includes images and captions. To identify the possible weaknesses that adversaries can exploit, we create a NeuralNews dataset which is comprised of 4 different types of generated articles as well as conduct a series of human user study experiments based on this dataset. Coupled with providing a relatively effective approach based on detecting visual-semantic inconsistencies, the valuable insights gleaned from our user study experiments and, consequently, this paper will serve as an effective first line of defense and a valuable reference for future work in defending against machine-generated disinformation.", "phrases": ["inconsistency", "large-scale dissemination", "video", "neural fake news"], "overall_score": 1.6590315628428636, "scores": [2.1741066191624867, 0.8897514862583636, 0.5311583696113232, 0.528240595093031], "rank_score": 1.0308142675313012} -{"id": "mou-etal-2016-sequence", "title": "Sequence to Backward and Forward Sequences: A Content-Introducing Approach to Generative Short-Text Conversation", "abstract": "Using neural networks to generate replies in human-computer dialogue systems is attracting increasing attention over the past few years. However, the performance is not satisfactory: the neural network tends to generate safe, universally relevant replies which carry little meaning. In this paper, we propose a content-introducing approach to neural network-based generative dialogue systems. We first use pointwise mutual information (PMI) to predict a noun as a keyword, reflecting the main gist of the reply. We then propose seq2BF, a \u201csequence to backward and forward sequences\u201d model, which generates a reply containing the given keyword. Experimental results show that our approach significantly outperforms traditional sequence-to-sequence models in terms of human evaluation and the entropy measure, and that the predicted keyword can appear at an appropriate position in the reply.", "phrases": ["content-introducing approach", "generative short-text conversation", "dialogue system", "traditional sequence-to-sequence model", "response generation"], "overall_score": 3.6034258300164144, "scores": [2.1572736510425137, 0.993130452327429, 0.8863409548173433, 0.5759008840751223, 0.5402470040138057], "rank_score": 1.0305785892552428} -{"id": "zirn-etal-2011-fine", "title": "Fine-Grained Sentiment Analysis with Structural Features", "abstract": "Sentiment analysis is the problem of determining the polarity of a text with respect to a particular topic. For most applications, however, it is not only necessary to derive the polarity of a text as a whole but also to extract negative and positive utterances on a more finegrained level. Sentiment analysis systems working on the (sub-)sentence level, however, are difficult to develop since shorter textual segments rarely carry enough information to determine their polarity out of context. In this paper, therefore, we present a fully automatic framework for fine-grained sentiment analysis on the subsentence level combining multiple sentiment lexicons and neighborhood as well as discourse relations to overcome this problem. We use Markov logic to integrate polarity scores from different sentiment lexicons with information about relations between neighboring segments, and evaluate the approach on product reviews. The experiments show that the use of structural features improves the accuracy of polarity predictions achieving accuracy scores of up to 69%.", "phrases": ["structural feature", "discourse relation", "polarity prediction", "fine-grained sentiment analysis"], "overall_score": 2.14293135014681, "scores": [2.1611109054120434, 0.8507298309700163, 0.563975080312514, 0.5463127590684852], "rank_score": 1.0305321439407646} -{"id": "novikova-etal-2017-e2e", "title": "The E2E Dataset: New Challenges For End-to-End Generation", "abstract": "This paper describes the E2E data, a new dataset for training end-to-end, data-driven natural language generation systems in the restaurant domain, which is ten times bigger than existing, frequently used datasets in this area. The E2E dataset poses new challenges: (1) its human reference texts show more lexical richness and syntactic variation, including discourse phenomena; (2) generating from this set requires content selection. As such, learning from this dataset promises more natural, varied and less template-like system utterances. We also establish a baseline on this dataset, which illustrates some of the difficulties associated with this data.", "phrases": ["e2e dataset", "end-to-end", "restaurant domain", "nlg", "creation"], "overall_score": 3.5381533245143975, "scores": [1.9695621958324432, 1.2206266247031128, 0.8269708173392747, 0.5839354128879285, 0.5505742971794804], "rank_score": 1.0303338695884479} -{"id": "imran-etal-2016-twitter", "title": "Twitter as a Lifeline: Human-annotated Twitter Corpora for NLP of Crisis-related Messages", "abstract": "Microblogging platforms such as Twitter provide active communication channels during mass convergence and emergency events such as earthquakes, typhoons. During the sudden onset of a crisis situation, affected people post useful information on Twitter that can be used for situational awareness and other humanitarian disaster response efforts, if processed timely and effectively. Processing social media information pose multiple challenges such as parsing noisy, brief and informal messages, learning information categories from the incoming stream of messages and classifying them into different classes among others. One of the basic necessities of many of these tasks is the availability of data, in particular human-annotated data. In this paper, we present human-annotated Twitter corpora collected during 19 different crises that took place between 2013 and 2015. To demonstrate the utility of the annotations, we train machine learning classifiers. Moreover, we publish first largest word2vec word embeddings trained on 52 million crisis-related tweets. To deal with tweets language issues, we present human-annotated normalized lexical resources for different lexical variations.", "phrases": ["crisis-related tweet", "twitter", "crisisnlp"], "overall_score": 2.2624837971921865, "scores": [1.9531269004229883, 0.5997864523589308, 0.5361888999360302], "rank_score": 1.029700750905983} -{"id": "ju-etal-2018-neural", "title": "A Neural Layered Model for Nested Named Entity Recognition", "abstract": "Entity mentions embedded in longer entity mentions are referred to as nested entities. Most named entity recognition (NER) systems deal only with the flat entities and ignore the inner nested ones, which fails to capture finer-grained semantic information in underlying texts. To address this issue, we propose a novel neural model to identify nested entities by dynamically stacking flat NER layers. Each flat NER layer is based on the state-of-the-art flat NER model that captures sequential context representation with bidirectional Long Short-Term Memory (LSTM) layer and feeds it to the cascaded CRF layer. Our model merges the output of the LSTM layer in the current flat NER layer to build new representation for detected entities and subsequently feeds them into the next flat NER layer. This allows our model to extract outer entities by taking full advantage of information encoded in their corresponding inner entities, in an inside-to-outside way. Our model dynamically stacks the flat NER layers until no outer entities are extracted. Extensive evaluation shows that our dynamic model outperforms state-of-the-art feature-based systems on nested NER, achieving 74.7% and 72.2% on GENIA and ACE2005 datasets, respectively, in terms of F-score.", "phrases": ["entity mention", "novel neural model", "ner layer", "sequence labeling model"], "overall_score": 3.271739427167275, "scores": [1.8106697931358318, 1.1630394740608478, 0.6053166850865199, 0.5388897304842907], "rank_score": 1.0294789206918726} -{"id": "hai-etal-2016-deceptive", "title": "Deceptive Review Spam Detection via Exploiting Task Relatedness and Unlabeled Data", "abstract": "Existing work on detecting deceptive reviews primarily focuses on feature engineering and applies off-the-shelf supervised classi\ufb01cation algorithms to the problem. Then, one real challenge would be to manually recognize plentiful ground truth spam review data for model building, which is rather dif\ufb01cult and often requires domain expertise in practice. In this paper, we propose to exploit the relatedness of multiple review spam detection tasks and readily available unlabeled data to address the scarcity of labeled opinion spam data. We \ufb01rst develop a multi-task learning method based on logistic regression (MTL-LR), which can boost the learning for a task by sharing the knowledge contained in the training signals of other related tasks. To leverage the unlabeled data, we introduce a graph Laplacian regularizer into each base model. We then propose a novel semi-supervised multi-task learning method via Laplacian regularized logistic regression (SMTL-LLR) to further improve the review spam detection performance. We also develop a stochastic alternating method to cope with the optimization for SMTL-LLR. Experimental results on real-world review data demonstrate the bene\ufb01t of SMTL-LLR over several well-established baseline methods.", "phrases": ["relatedness", "unlabeled data", "opinion spam data", "deceptive review"], "overall_score": 1.8445785222564677, "scores": [2.424669259037981, 0.5873587978242837, 0.5802200563202241, 0.5256673859356347], "rank_score": 1.029478874779531} -{"id": "grishina-stede-2015-knowledge", "title": "Knowledge-lean projection of coreference chains across languages", "abstract": "Common technologies for automatic coreference resolution require either a language-specific rule set or large collections of manually annotated data, which is typically limited to newswire texts in major languages. This makes it difficult to develop coreference resolvers for a large number of the so-called low-resourced languages. We apply a direct projection algorithm on a multi-genre and multilingual corpus (English, German, Russian) to automatically produce coreference annotations for two target languages without exploiting any linguistic knowledge of the languages. Our evaluation of the projected annotations shows promising results, and the error analysis reveals structural differences of referring expressions and coreference chains for the three languages, which can now be targeted with more linguistically-informed projection algorithms.", "phrases": ["coreference chain", "linguistic knowledge", "knowledge-lean projection"], "overall_score": 2.261733821048133, "scores": [1.7106919524889788, 0.8185367050739435, 0.558849608596999], "rank_score": 1.029359422053307} -{"id": "vincze-etal-2011-multiword", "title": "Multiword Expressions and Named Entities in the Wiki50 Corpus", "abstract": "Multiword expressions (MWEs) and named entities (NEs) exhibit unique and idiosyncratic features, thus, they often pose a problem to NLP systems. In order to facilitate their identification we developed the first corpus of Wikipedia articles in which several types of multiword expressions and named entities are manually annotated at the same time. The corpus can be used for training or testing MWE-detectors or NER systems, which we illustrate with experiments and it also makes it possible to investigate the co-occurrences of different types of MWEs and NEs within the same domain.", "phrases": ["wiki50 corpus", "different type", "multiword expressions", "sequence tagger"], "overall_score": 2.557847771593229, "scores": [1.804245368260922, 0.9399836003523369, 0.8351438715328628, 0.5380418270188628], "rank_score": 1.0293536667912462} -{"id": "verga-etal-2016-multilingual", "title": "Multilingual Relation Extraction using Compositional Universal Schema", "abstract": "Universal schema builds a knowledge base (KB) of entities and relations by jointly embedding all relation types from input KBs as well as textual patterns expressing relations from raw text. In most previous applications of universal schema, each textual pattern is represented as a single embedding, preventing generalization to unseen patterns. Recent work employs a neural network to capture patterns' compositional semantics, providing generalization to all possible input text. In response, this paper introduces significant further improvements to the coverage and flexibility of universal schema relation extraction: predictions for entities unseen in training and multilingual transfer learning to domains with no annotation. We evaluate our model through extensive experiments on the English and Spanish TAC KBP benchmark, outperforming the top system from TAC 2013 slot-filling using no handwritten patterns or additional annotation. We also consider a multilingual setting in which English training data entities overlap with the seed KB, but Spanish text does not. Despite having no annotation for Spanish data, we train an accurate predictor, with additional improvements obtained by tying word embeddings across languages. Furthermore, we find that multilingual training improves English relation extraction accuracy. Our approach is thus suited to broad-coverage automated knowledge base construction in a variety of languages and domains.", "phrases": ["universal schema", "knowledge base", "relation type", "textual pattern", "multilingual transfer"], "overall_score": 2.915543272745225, "scores": [1.5191534559744957, 0.926014053298055, 0.9218670532092827, 0.9185574419904807, 0.859702258068359], "rank_score": 1.0290588525081348} -{"id": "brockett-etal-2006-correcting", "title": "Correcting ESL Errors Using Phrasal SMT Techniques", "abstract": "This paper presents a pilot study of the use of phrasal Statistical Machine Translation (SMT) techniques to identify and correct writing errors made by learners of English as a Second Language (ESL). Using examples of mass noun errors found in the Chinese Learner Error Corpus (CLEC) to guide creation of an engineered training set, we show that application of the SMT paradigm can capture errors not well addressed by widely-used proofing tools designed for native speakers. Our system was able to correct 61.81% of mistakes in a set of naturally-occurring examples of mass noun errors found on the World Wide Web, suggesting that efforts to collect alignable corpora of pre- and post-editing ESL writing samples offer can enable the development of SMT-based writing assistance tools capable of repairing many of the complex syntactic and lexical problems found in the writing of ESL learners.", "phrases": ["esl error", "machine translation", "uncountable noun", "smt system", "regular expression"], "overall_score": 3.8456606579280104, "scores": [0.8486245737982542, 2.09634921808439, 1.0891780466771857, 0.5603039305716871, 0.5500072929812865], "rank_score": 1.0288926124225608} -{"id": "kartsaklis-etal-2018-mapping", "title": "Mapping Text to Knowledge Graph Entities using Multi-Sense LSTMs", "abstract": "This paper addresses the problem of mapping natural language text to knowledge base entities. The mapping process is approached as a composition of a phrase or a sentence into a point in a multi-dimensional entity space obtained from a knowledge graph. The compositional model is an LSTM equipped with a dynamic disambiguation mechanism on the input word embeddings (a Multi-Sense LSTM), addressing polysemy issues. Further, the knowledge base space is prepared by collecting random walks from a graph enhanced with textual features, which act as a set of semantic bridges between text and knowledge base entities. The ideas of this work are demonstrated on large-scale text-to-entity mapping and entity classification tasks, with state of the art results.", "phrases": ["knowledge graph", "multi-sense lstm", "textual feature"], "overall_score": 2.26051543588864, "scores": [1.676737497064521, 0.8530369360245258, 0.5566403001229087], "rank_score": 1.0288049110706519} -{"id": "cherry-etal-2018-revisiting", "title": "Revisiting Character-Based Neural Machine Translation with Capacity and Compression", "abstract": "Translating characters instead of words or word-fragments has the potential to simplify the processing pipeline for neural machine translation (NMT), and improve results by eliminating hyper-parameters and manual feature engineering. However, it results in longer sequences in which each symbol contains less information, creating both modeling and computational challenges. In this paper, we show that the modeling problem can be solved by standard sequence-to-sequence architectures of sufficient depth, and that deep models operating at the character level outperform identical models operating over word fragments. This result implies that alternative architectures for handling character input are better viewed as methods for reducing computation time than as improved ways of modeling longer sequences. From this perspective, we evaluate several techniques for character-level NMT, verify that they do not match the performance of our deep character baseline model, and evaluate the performance versus computation time tradeoffs they offer. Within this framework, we also perform the first evaluation for NMT of conditional computation over time, in which the model learns which timesteps can be skipped, rather than having them be dictated by a fixed schedule specified before training begins.", "phrases": ["neural machine translation", "character", "rich language", "translation quality", "long sequence"], "overall_score": 3.081743206673869, "scores": [2.0239882815072194, 0.8855747453109684, 0.8564112438497972, 0.8423592456964762, 0.5352222577599051], "rank_score": 1.0287111548248733} -{"id": "pasupat-etal-2021-controllable", "title": "Controllable Semantic Parsing via Retrieval Augmentation", "abstract": "In practical applications of semantic parsing, we often want to rapidly change the behavior of the parser, such as enabling it to handle queries in a new domain, or changing its predictions on certain targeted queries. While we can introduce new training examples exhibiting the target behavior, a mechanism for enacting such behavior changes without expensive model re-training would be preferable. To this end, we propose ControllAble Semantic Parser via Exemplar Retrieval (CASPER). Given an input query, the parser retrieves related exemplars from a retrieval index, augments them to the query, and then applies a generative seq2seq model to produce an output parse. The exemplars act as a control mechanism over the generic generative model: by manipulating the retrieval index or how the augmented query is constructed, we can manipulate the behavior of the parser. On the MTOP dataset, in addition to achieving state-of-the-art on the standard setup, we show that CASPER can parse queries in a new domain, adapt the prediction toward the specified patterns, or adapt to new semantic schemas without having to further re-train the model.", "phrases": ["semantic parsing", "exemplar retrieval", "casper"], "overall_score": 1.6555684689759909, "scores": [1.992711270258188, 0.5723289724634169, 0.5209473362688574], "rank_score": 1.028662526330154} -{"id": "zhao-etal-2004-language", "title": "Language Model Adaptation for Statistical Machine Translation via Structured Query Models", "abstract": "We explore unsupervised language model adaptation techniques for Statistical Machine Translation. The hypotheses from the machine translation output are converted into queries at different levels of representation power and used to extract similar sentences from very large monolingual text collection. Specific language models are then build from the retrieved data and interpolated with a general background model. Experiments show significant improvements when translating with these adapted language models.", "phrases": ["query", "similar sentence", "language model adaptation", "data selection approach", "training corpus"], "overall_score": 3.2688795690886443, "scores": [1.5474573067398603, 1.3008984295828403, 0.8820507447302425, 0.8536243785919722, 0.5588643579775553], "rank_score": 1.028579043524494} -{"id": "moradi-samwald-2021-evaluating", "title": "Evaluating the Robustness of Neural Language Models to Input Perturbations", "abstract": "High-performance neural language models have obtained state-of-the-art results on a wide range of Natural Language Processing (NLP) tasks. However, results for common benchmark datasets often do not reflect model reliability and robustness when applied to noisy, real-world data. In this study, we design and implement various types of character-level and word-level perturbation methods to simulate realistic scenarios in which input texts may be slightly noisy or different from the data distribution on which NLP systems were trained. Conducting comprehensive experiments on different NLP tasks, we investigate the ability of high-performance language models such as BERT, XLNet, RoBERTa, and ELMo in handling different types of input perturbations. The results suggest that language models are sensitive to input perturbations and their performance can decrease even when small changes are introduced. We highlight that models need to be further improved and that current benchmarks are not reflecting model robustness well. We argue that evaluations on perturbed inputs should routinely complement widely-used benchmarks in order to yield a more realistic understanding of NLP systems' robustness.", "phrases": ["robustness", "input perturbation", "different type"], "overall_score": 1.4255970920714283, "scores": [1.7216835654196743, 0.839080918939869, 0.5242882981962212], "rank_score": 1.0283509275185883} -{"id": "oda-etal-2014-optimizing", "title": "Optimizing Segmentation Strategies for Simultaneous Speech Translation", "abstract": "In this paper, we propose new algorithms for learning segmentation strategies for simultaneous speech translation. In contrast to previously proposed heuristic methods, our method finds a segmentation that directly maximizes the performance of the machine translation system. We describe two methods based on greedy search and dynamic programming that search for the optimal segmentation strategy. An experimental evaluation finds that our algorithm is able to segment the input two to three times more frequently than conventional methods in terms of number of words, while maintaining the same score of automatic evaluation. 1", "phrases": ["segmentation strategy", "simultaneous speech translation", "dynamic programming", "bleu score", "translation quality"], "overall_score": 3.1307652217257558, "scores": [2.013605130992833, 0.8521350503150372, 0.8640865663573793, 0.845398480105448, 0.5664107525290922], "rank_score": 1.028327196059958} -{"id": "putthividhya-hu-2011-bootstrapped", "title": "Bootstrapped Named Entity Recognition for Product Attribute Extraction", "abstract": "We present a named entity recognition (NER) system for extracting product attributes and values from listing titles. Information extraction from short listing titles present a unique challenge, with the lack of informative context and grammatical structure. In this work, we combine supervised NER with bootstrapping to expand the seed list, and output normalized results. Focusing on listings from eBay's clothing and shoes categories, our bootstrapped NER system is able to identify new brands corresponding to spelling variants and typographical errors of the known brands, as well as identifying novel brands. Among the top 300 new brands predicted, our system achieves 90.33% precision. To output normalized attribute values, we explore several string comparison algorithms and found n-gram substring matching to work well in practice.", "phrases": ["entity recognition", "product attribute extraction", "title", "brand", "e-commerce domain"], "overall_score": 2.7845890845072074, "scores": [0.9549597371416829, 1.224061685250014, 1.1260738080554293, 0.9546313661007634, 0.8815907309038786], "rank_score": 1.0282634654903535} -{"id": "reiplinger-etal-2012-extracting", "title": "Extracting glossary sentences from scholarly articles: A comparative evaluation of pattern bootstrapping and deep analysis", "abstract": "The paper reports on a comparative study of two approaches to extracting definitional sentences from a corpus of scholarly discourse: one based on bootstrapping lexico-syntactic patterns and another based on deep analysis. Computational Linguistics was used as the target domain and the ACL Anthology as the corpus. Definitional sentences extracted for a set of well-defined concepts were rated by domain experts. Results show that both methods extract high-quality definition sentences intended for automated glossary construction.", "phrases": ["scholarly article", "deep analysis", "lexical-syntactic pattern"], "overall_score": 2.465575904621266, "scores": [1.6863387877800768, 0.817434591696574, 0.5809016677499678], "rank_score": 1.0282250157422061} -{"id": "tantug-etal-2008-bleu", "title": "BLEU+: a Tool for Fine-Grained BLEU Computation", "abstract": "We present a tool, BLEU+, which implements various extension to BLEU computation to allow for a better understanding of the translation performance, especially for morphologically complex languages. BLEU+ takes into account both \u0093closeness\u0094 in morphological structure, \u0093closeness\u0094 of the root words in the WordNet hierarchy while comparing tokens in the candidate and reference sentence. In addition to gauging performance at a finer level of granularity, BLEU+ also allows the computation of various upper bound oracle scores: comparing all tokens considering only the roots allows us to get an upper bound when all errors due to morphological structure are fixed, while comparing tokens in an error-tolerant way considering minor morpheme edit operations, allows us to get a (more realistic) upper bound when tokens that differ in morpheme insertions/deletions and substitutions are fixed. We use BLEU+ in the fine-grained evaluation of the output of our English-to-Turkish statistical MT system.", "phrases": ["various extension", "translation performance", "bleu+"], "overall_score": 1.12956850136262, "scores": [1.999000330459133, 0.5536335335437808, 0.5318988133041264], "rank_score": 1.0281775591023468} -{"id": "callison-burch-etal-2005-scaling", "title": "Scaling Phrase-Based Statistical Machine Translation to Larger Corpora and Longer Phrases", "abstract": "In this paper we describe a novel data structure for phrase-based statistical machine translation which allows for the retrieval of arbitrarily long phrases while simultaneously using less memory than is required by current decoder implementations. We detail the computational complexity and average retrieval times for looking up phrase translations in our suffix array-based data structure. We show how sampling can be used to reduce the retrieval time by orders of magnitude with no loss in translation quality.", "phrases": ["machine translation", "data structure", "memory"], "overall_score": 2.464814930715218, "scores": [2.0362848276096988, 0.5240994069696006, 0.5233387603437665], "rank_score": 1.0279076649743553} -{"id": "chen-etal-2018-detecting", "title": "Detecting Free Translation in Parallel Corpora from Attention Scores", "abstract": "In this study, we propose a method for extracting free translation examples from bilingual parallel corpora based on an innovative use of attention scores. Preliminary results show that the approach is promising and paraphrases at both sentential and sub-sentential levels covering diverse surface forms could be identified. The extracted data, upon further filtering, have great potential to supplement the example sentences available in existing bilingual dictionaries in an effective and systematic way.", "phrases": ["parallel corpora", "attention score", "free translation example"], "overall_score": 1.1291751879678997, "scores": [1.9187719546694144, 0.5931044458746201, 0.5715822489223097], "rank_score": 1.0278195498221148} -{"id": "iida-etal-2013-investigation", "title": "Investigation of annotator's behaviour using eye-tracking data", "abstract": "This paper presents an analysis of an annotator\u2019s behaviour during her/his annotation process for eliciting useful information for natural language processing (NLP) tasks. Text annotation is essential for machine learning-based NLP where annotated texts are used for both training and evaluating supervised systems. Since an annotator\u2019s behaviour during annotation can be seen as reflecting her/his cognitive process during her/his attempt to understand the text for annotation, analysing the process of text annotation has potential to reveal useful information for NLP tasks, in particular semantic and discourse processing that require deeper language understanding. We conducted an experiment for collecting annotator actions and eye gaze during the annotation of predicate-argument relations in Japanese texts. Our analysis of the collected data suggests that obtained insight into human annotation behaviour is useful for exploring effective linguistic features in machine learning-based approaches.", "phrases": ["annotator", "behaviour", "eye-tracking data"], "overall_score": 1.4244833856028973, "scores": [1.4322687314176237, 0.8582147077970479, 0.7921592351423602], "rank_score": 1.0275475581190106} -{"id": "gildea-2003-loosely", "title": "Loosely Tree-Based Alignment for Machine Translation", "abstract": "We augment a model of translation based on re-ordering nodes in syntactic trees in order to allow alignments not conforming to the original tree structure, while keeping computational complexity polynomial in the sentence length. This is done by adding a new subtree cloning operation to either tree-to-string or tree-to-tree alignment algorithms.", "phrases": ["tree-based alignment", "subtree", "well result", "translation model"], "overall_score": 3.6531955693280267, "scores": [0.9126485361839614, 1.1096653907326468, 1.0837947293480792, 1.0039762143542788], "rank_score": 1.0275212176547415} -{"id": "mirkin-etal-2018-listening", "title": "Listening Comprehension over Argumentative Content", "abstract": "This paper presents a task for machine listening comprehension in the argumentation domain and a corresponding dataset in English. We recorded 200 spontaneous speeches arguing for or against 50 controversial topics. For each speech, we formulated a question, aimed at confirming or rejecting the occurrence of potential arguments in the speech. Labels were collected by listening to the speech and marking which arguments were mentioned by the speaker. We applied baseline methods addressing the task, to be used as a benchmark for future work over this dataset. All data used in this work is freely available for research.", "phrases": ["argumentative content", "machine listening comprehension", "controversial topic"], "overall_score": 1.8410538165967274, "scores": [1.9270913476378027, 0.6132744510167668, 0.5421692986315237], "rank_score": 1.0275116990953646} -{"id": "li-etal-2013-listwise", "title": "Listwise Approach to Learning to Rank for Automatic Evaluation of Machine Translation", "abstract": "The listwise approach to learning to rank has been applied successfully to information retrieval. However, it has not drawn much attention in research on the automatic evaluation of machine translation. In this paper, we present the listwise approach to learning to rank for the automatic evaluation of machine translation. Unlike previous automatic metrics that give absolute scores to translation outputs, our approach directly ranks the translation outputs relative to each other using features extracted from the translation outputs. Two representative listwise approaches, ListNet and ListMLE, are applied to automatic evaluation of machine translation. When evaluated using the dataset of the WMT 2012 Metrics task, the proposed approach achieves higher segment-level correlation with human judgments than the pairwise approach, RankNet, and with all the other metrics that were evaluated during the workshop, and it achieves honorably a comparable system-level correlation with the performance of most competitors.", "phrases": ["automatic evaluation", "translation output", "listwise approach"], "overall_score": 1.4239746710735688, "scores": [1.6564135950978003, 0.87719152110402, 0.5479366782621756], "rank_score": 1.0271805981546653} -{"id": "lao-etal-2012-reading", "title": "Reading The Web with Learned Syntactic-Semantic Inference Rules", "abstract": "We study how to extend a large knowledge base (Freebase) by reading relational information from a large Web text corpus. Previous studies on extracting relational knowledge from text show the potential of syntactic patterns for extraction, but they do not exploit background knowledge of other relations in the knowledge base. We describe a distributed, Web-scale implementation of a path-constrained random walk model that learns syntactic-semantic inference rules for binary relations from a graph representation of the parsed text and the knowledge base. Experiments show significant accuracy improvements in binary relation prediction over methods that consider only text, or only the existing knowledge base.", "phrases": ["inference rule", "pra", "knowledge base completion", "syntactic information", "large corpus"], "overall_score": 2.847475275707853, "scores": [1.8342662688244518, 1.0901574551238764, 1.0872291453337581, 0.5712239257358527, 0.5521712791291294], "rank_score": 1.0270096148294134} -{"id": "guo-etal-2019-densely", "title": "Densely Connected Graph Convolutional Networks for Graph-to-Sequence Learning", "abstract": "We focus on graph-to-sequence learning, which can be framed as transducing graph structures to sequences for text generation. To capture structural information associated with graphs, we investigate the problem of encoding graphs using graph convolutional networks (GCNs). Unlike various existing approaches where shallow architectures were used for capturing local structural information only, we introduce a dense connection strategy, proposing a novel Densely Connected Graph Convolutional Network (DCGCN). Such a deep architecture is able to integrate both local and non-local features to learn a better structural representation of a graph. Our model outperforms the state-of-the-art neural models significantly on AMR-to-text generation and syntax-based neural machine translation.", "phrases": ["graph convolutional networks", "graph-to-sequence learning", "non-local feature", "gnn", "dense connectivity"], "overall_score": 3.076644554436161, "scores": [0.9695516463703174, 0.9535110133212394, 1.4055402592395232, 0.9271307900659994, 0.8793122054800839], "rank_score": 1.0270091828954326} -{"id": "apostolova-etal-2011-automatic", "title": "Automatic Extraction of Lexico-Syntactic Patterns for Detection of Negation and Speculation Scopes", "abstract": "Detecting the linguistic scope of negated and speculated information in text is an important Information Extraction task. This paper presents ScopeFinder, a linguistically motivated rule-based system for the detection of negation and speculation scopes. The system rule set consists of lexico-syntactic patterns automatically extracted from a corpus annotated with negation/speculation cues and their scopes (the BioScope corpus). The system performs on par with state-of-the-art machine learning systems. Additionally, the intuitive and linguistically motivated rules will allow for manual adaptation of the rule set to new domains and corpora.", "phrases": ["lexico-syntactic pattern", "detection", "negation", "speculation scope", "rule-based system"], "overall_score": 1.4236700672795044, "scores": [1.7529516710629574, 1.6852270276741683, 0.6096441202899167, 0.5518389016317183, 0.5351426441567341], "rank_score": 1.026960872963099} -{"id": "chi-etal-2021-infoxlm", "title": "InfoXLM: An Information-Theoretic Framework for Cross-Lingual Language Model Pre-Training", "abstract": "In this work, we present an information-theoretic framework that formulates cross-lingual language model pre-training as maximizing mutual information between multilingual-multi-granularity texts. The unified view helps us to better understand the existing methods for learning cross-lingual representations. More importantly, inspired by the framework, we propose a new pre-training task based on contrastive learning. Specifically, we regard a bilingual sentence pair as two views of the same meaning and encourage their encoded representations to be more similar than the negative examples. By leveraging both monolingual and parallel corpora, we jointly train the pretext tasks to improve the cross-lingual transferability of pre-trained models. Experimental results on several benchmarks show that our approach achieves considerably better performance. The code and pre-trained models are available at .", "phrases": ["contrastive learning", "parallel corpora", "pre-trained model", "infoxlm"], "overall_score": 2.6335428991598087, "scores": [1.5664563507091216, 1.0826957558948618, 0.8911140320867369, 0.5667044593816832], "rank_score": 1.026742649518101} -{"id": "recasens-etal-2013-linguistic", "title": "Linguistic Models for Analyzing and Detecting Biased Language", "abstract": "Unbiased language is a requirement for reference sources like encyclopedias and scientific texts. Bias is, nonetheless, ubiquitous, making it crucial to understand its nature and linguistic realization and hence detect bias automatically. To this end we analyze real instances of human edits designed to remove bias from Wikipedia articles. The analysis uncovers two classes of bias: framing bias, such as praising or perspective-specific words, which we link to the literature on subjectivity; and epistemological bias, related to whether propositions that are presupposed or entailed in the text are uncontroversially accepted as true. We identify common linguistic cues for these classes, including factive verbs, implicatives, hedges, and subjective intensifiers. These insights help us develop features for a model to solve a new prediction task of practical importance: given a biased sentence, identify the bias-inducing word. Our linguistically-informed model performs almost as well as humans tested on the same task.", "phrases": ["edit", "wikipedia", "linguistic indicator", "neutral point", "other work"], "overall_score": 3.382497079527576, "scores": [1.4078879817944479, 1.0635898132071366, 0.9671457721321127, 0.8490911633362399, 0.8437544790912571], "rank_score": 1.026293841912239} -{"id": "chang-etal-2014-typed", "title": "Typed Tensor Decomposition of Knowledge Bases for Relation Extraction", "abstract": "While relation extraction has traditionally been viewed as a task relying solely on textual data, recent work has shown that by taking as input existing facts in the form of entity-relation triples from both knowledge bases and textual data, the performance of relation extraction can be improved significantly. Following this new paradigm, we propose a tensor decomposition approach for knowledge base embedding that is highly scalable, and is especially suitable for relation extraction. By leveraging relational domain knowledge about entity type information, our learning algorithm is significantly faster than previous approaches and is better able to discover new relations missing from the database. In addition, when applied to a relation extraction task, our approach alone is comparable to several existing systems, and improves the weighted mean average precision of a state-of-theart method by 10 points when used as a subcomponent.", "phrases": ["relation extraction", "knowledge basis", "tensor factorization"], "overall_score": 2.4608074227976076, "scores": [1.9121150143721684, 0.5905440443025808, 0.5760501544167191], "rank_score": 1.0262364043638226} -{"id": "yessenalina-etal-2010-multi", "title": "Multi-Level Structured Models for Document-Level Sentiment Classification", "abstract": "In this paper, we investigate structured models for document-level sentiment classification. When predicting the sentiment of a subjective document (e.g., as positive or negative), it is well known that not all sentences are equally discriminative or informative. But identifying the useful sentences automatically is itself a difficult learning problem. This paper proposes a joint two-level approach for document-level sentiment classification that simultaneously extracts useful (i.e., subjective) sentences and predicts document-level sentiment based on the extracted sentences. Unlike previous joint learning methods for the task, our approach (1) does not rely on gold standard sentence-level subjectivity annotations (which may be expensive to obtain), and (2) optimizes directly for document-level performance. Empirical evaluations on movie reviews and U.S. Congressional floor debates show improved performance over previous approaches.", "phrases": ["sentiment classification", "previous approach", "latent variable"], "overall_score": 2.4603698745325953, "scores": [2.00565137734081, 0.543830401137541, 0.5286800192151521], "rank_score": 1.026053932564501} -{"id": "wang-etal-2013-conditional", "title": "Conditional Random Field-based Parser and Language Model for Tradi-tional Chinese Spelling Checker", "abstract": "This paper describes our Chinese spelling check system submitted to SIGHAN Bake-off 2013 evaluation. The main idea is to exchange potential error character with its confusable ones and rescore the modified sentence using a conditional random field (CRF)-based word segmentation/part of speech (POS) tagger and a tri-gram language model (LM) to detect and correct possible spelling errors. Experimental results on the Bakeoff 2013 tasks showed the proposed method achieved 0.50 location detection and 0.24 error location F-scores in subtask1 and 0.49 location and 0.40 correction accuracies and 0.40 correction precision in subtask2.", "phrases": ["language model", "chinese spelling checker", "error character", "random field"], "overall_score": 1.65057528922114, "scores": [1.813323820152559, 0.9199768712660967, 0.844269825541575, 0.5246698402954868], "rank_score": 1.0255600893139294} -{"id": "korhonen-etal-2003-clustering", "title": "Clustering Polysemic Subcategorization Frame Distributions Semantically", "abstract": "Previous research has demonstrated the utility of clustering in inducing semantic verb classes from undisambiguated corpus data. We describe a new approach which involves clustering subcategorization frame (SCF) distributions using the Information Bottleneck and nearest neighbour methods. In contrast to previous work, we particularly focus on clustering polysemic verbs. A novel evaluation scheme is proposed which accounts for the effect of polysemy on the clusters, offering us a good insight into the potential and limitations of semantically classifying undisambiguated SCF data.", "phrases": ["subcategorization frame", "corpus data", "information bottleneck", "clustering method"], "overall_score": 2.964240846340591, "scores": [1.4683776806090632, 0.9188751625720492, 0.8796049772690765, 0.8353699822435297], "rank_score": 1.0255569506734297} -{"id": "qian-etal-2009-semi", "title": "Semi-Supervised Learning for Semantic Relation Classification using Stratified Sampling Strategy", "abstract": "This paper presents a new approach to selecting the initial seed set using stratified sampling strategy in bootstrapping-based semi-supervised learning for semantic relation classification. First, the training data is partitioned into several strata according to relation types/subtypes, then relation instances are randomly sampled from each stratum to form the initial seed set. We also investigate different augmentation strategies in iteratively adding reliable instances to the labeled set, and find that the bootstrapping procedure may stop at a reasonable point to significantly decrease the training time without degrading too much in performance. Experiments on the ACE RDC 2003 and 2004 corpora show the stratified sampling strategy contributes more than the bootstrapping procedure itself. This suggests that a proper sampling strategy is critical in semi-supervised learning.", "phrases": ["semantic relation classification", "stratified sampling strategy", "seed set", "semi-supervised learning"], "overall_score": 1.837081302279719, "scores": [1.7294809471119315, 0.93259707338641, 0.9060968365519607, 0.5330035295133586], "rank_score": 1.0252945966409153} -{"id": "toutanova-etal-2008-global", "title": "A Global Joint Model for Semantic Role Labeling", "abstract": "We present a model for semantic role labeling that effectively captures the linguistic intuition that a semantic argument frame is a joint structure, with strong dependencies among the arguments. We show how to incorporate these strong dependencies in a statistical joint model with a rich set of features over multiple argument phrases. The proposed model substantially outperforms a similar state-of-the-art local model that does not include dependencies among different arguments. We evaluate the gains from incorporating this joint information on the Propbank corpus, when using correct syntactic parse trees as input, and when using automatically derived parse trees. The gains amount to 24.1% error reduction on all arguments and 36.8% on core arguments for gold-standard parse trees on Propbank. For automatic parse trees, the error reductions are 8.3% and 10.3% on all and core arguments, respectively. We also present results on the CoNLL 2005 shared task data set. Additionally, we explore considering multiple syntactic analyses to cope with parser noise and uncertainty.", "phrases": ["semantic role labeling", "argument frame", "joint structure", "strong dependency", "re-ranking model"], "overall_score": 3.299799007044325, "scores": [1.5451910920063654, 1.0655965552586284, 1.059538484755163, 0.9014860505939372, 0.5538888176403006], "rank_score": 1.025140200050879} -{"id": "yang-cardie-2012-extracting", "title": "Extracting Opinion Expressions with semi-Markov Conditional Random Fields", "abstract": "Extracting opinion expressions from text is usually formulated as a token-level sequence labeling task tackled using Conditional Random Fields (CRFs). CRFs, however, do not readily model potentially useful segment-level information like syntactic constituent structure. Thus, we propose a semi-CRF-based approach to the task that can perform sequence labeling at the segment level. We extend the original semi-CRF model (Sarawagi and Cohen, 2004) to allow the modeling of arbitrarily long expressions while accounting for their likely syntactic structure when modeling segment boundaries. We evaluate performance on two opinion extraction tasks, and, in contrast to previous sequence labeling approaches to the task, explore the usefulness of segmentlevel syntactic parse features. Experimental results demonstrate that our approach outperforms state-of-the-art methods for both opinion expression tasks.", "phrases": ["opinion expression", "conditional random fields", "strong baseline", "named-entity tagger", "dependency parser"], "overall_score": 2.9031259079681226, "scores": [1.9123625378260696, 1.211222105951549, 0.8324416750120468, 0.6040382814939295, 0.563315737555375], "rank_score": 1.024676067567794} -{"id": "lukin-etal-2017-argument", "title": "Argument Strength is in the Eye of the Beholder: Audience Effects in Persuasion", "abstract": "Americans spend about a third of their time online, with many participating in online conversations on social and political issues. We hypothesize that social media arguments on such issues may be more engaging and persuasive than traditional media summaries, and that particular types of people may be more or less convinced by particular styles of argument, e.g. emotional arguments may resonate with some personalities while factual arguments resonate with others. We report a set of experiments testing at large scale how audience variables interact with argument style to affect the persuasiveness of an argument, an under-researched topic within natural language processing. We show that belief change is affected by personality factors, with conscientious, open and agreeable people being more convinced by emotional arguments.", "phrases": ["persuasion", "audience variable", "belief change"], "overall_score": 3.166780529222583, "scores": [1.4214574570033418, 1.0769044136851549, 0.575145443113014], "rank_score": 1.0245024379338368} -{"id": "iida-etal-2007-annotating", "title": "Annotating a Japanese Text Corpus with Predicate-Argument and Coreference Relations", "abstract": "In this paper, we discuss how to annotate coreference and predicate-argument relations in Japanese written text. There have been research activities for building Japanese text corpora annotated with coreference and predicate-argument relations as are done in the Kyoto Text Corpus version 4.0 (Kawahara et al., 2002) and the GDA-Tagged Corpus (Hasida, 2005). However, there is still much room for refining their specifications. For this reason, we discuss issues in annotating these two types of relations, and propose a new specification for each. In accordance with the specification, we built a large-scaled annotated corpus, and examined its reliability. As a result of our current work, we have released an annotated corpus named the NAIST Text Corpus1, which is used as the evaluation data set in the coreference and zero-anaphora resolution tasks in Iida et al. (2005) and Iida et al. (2006).", "phrases": ["naist text corpus", "anaphor", "dative"], "overall_score": 2.9026026759334917, "scores": [1.6518569993515666, 0.8560346545949333, 0.5655825149044061], "rank_score": 1.0244913896169687} -{"id": "blunsom-etal-2008-discriminative", "title": "A Discriminative Latent Variable Model for Statistical Machine Translation", "abstract": "Large-scale discriminative machine translation promises to further the state-of-the-art, but has failed to deliver convincing gains over current heuristic frequency count systems. We argue that a principle reason for this failure is not dealing with multiple, equivalent translations. We present a translation model which models derivations as a latent variable, in both training and decoding, and is fully discriminative and globally optimised. Results show that accounting for multiple derivations does indeed improve performance. Additionally, we show that regularisation is essential for maximum conditional likelihood models in order to avoid degenerate solutions.", "phrases": ["latent variable", "machine translation", "derivation", "hiero", "million"], "overall_score": 3.449621140773805, "scores": [1.698047040043972, 1.6664253994972114, 0.6597962282901998, 0.5648572750548656, 0.533116525468041], "rank_score": 1.024448493670858} -{"id": "bloem-etal-2019-evaluating", "title": "Evaluating the Consistency of Word Embeddings from Small Data", "abstract": "In this work, we address the evaluation of distributional semantic models trained on smaller, domain-specific texts, specifically, philosophical text. Specifically, we inspect the behaviour of models using a pre-trained background space in learning. We propose a measure of consistency which can be used as an evaluation metric when no in-domain gold-standard data is available. This measure simply computes the ability of a model to learn similar embeddings from different parts of some homogeneous data. We show that in spite of being a simple evaluation, consistency actually depends on various combinations of factors, including the nature of the data itself, the model used to train the semantic space, and the frequency of the learnt terms, both in the background space and in the in-domain data of interest.", "phrases": ["consistency", "small data", "evaluation metric"], "overall_score": 1.4200359136401552, "scores": [1.6933202588279932, 0.8556221633172416, 0.5240757335940858], "rank_score": 1.0243393852464402} -{"id": "lee-etal-2018-transfer", "title": "Transfer Learning for Named-Entity Recognition with Neural Networks", "abstract": "Recent approaches based on artificial neural networks (ANNs) have shown promising results for named-entity recognition (NER). In order to achieve high performances, ANNs need to be trained on a large labeled dataset. However, labels might be difficult to obtain for the dataset on which the user wants to perform NER: label scarcity is particularly pronounced for patient note de-identification, which is an instance of NER. In this work, we analyze to what extent transfer learning may address this issue. In particular, we demonstrate that transferring an ANN model trained on a large labeled dataset to another dataset with a limited number of labels improves upon the state-of-the-art results on two different datasets for patient note de-identification.", "phrases": ["named-entity recognition", "different dataset", "transfer learning", "target domain"], "overall_score": 2.8396342913866968, "scores": [1.6840622098241458, 0.975284383548318, 0.8834708614352039, 0.5539088553141657], "rank_score": 1.0241815775304584} -{"id": "nguyen-dogruoz-2013-word", "title": "Word Level Language Identification in Online Multilingual Communication", "abstract": "Multilingual speakers switch between languages in online and spoken communication. Analyses of large scale multilingual data require automatic language identification at the word level. For our experiments with multilingual online discussions, we first tag the language of individual words using language models and dictionaries. Secondly, we incorporate context to improve the performance. We achieve an accuracy of 98%. Besides word level accuracy, we use two new metrics to evaluate this task.", "phrases": ["language identification", "communication", "multilingual online discussion"], "overall_score": 3.211079615228215, "scores": [0.8215616295039244, 1.6421054259020633, 0.6086520695170348], "rank_score": 1.0241063749743409} -{"id": "chen-zechner-2011-computing", "title": "Computing and Evaluating Syntactic Complexity Features for Automated Scoring of Spontaneous Non-Native Speech", "abstract": "This paper focuses on identifying, extracting and evaluating features related to syntactic complexity of spontaneous spoken responses as part of an effort to expand the current feature set of an automated speech scoring system in order to cover additional aspects considered important in the construct of communicative competence. \n \nOur goal is to find effective features, selected from a large set of features proposed previously and some new features designed in analogous ways from a syntactic complexity perspective that correlate well with human ratings of the same spoken responses, and to build automatic scoring models based on the most promising features by using machine learning methods. \n \nOn human transcriptions with manually annotated clause and sentence boundaries, our best scoring model achieves an overall Pearson correlation with human rater scores of r=0.49 on an unseen test set, whereas correlations of models using sentence or clause boundaries from automated classifiers are around r=0.2.", "phrases": ["complexity", "non-native speech", "speech scoring", "scoring system", "syntactic competence"], "overall_score": 2.5431807950694973, "scores": [1.7031912887950966, 1.3084626040461924, 0.9413294687845951, 0.6042809232642583, 0.559991921266473], "rank_score": 1.023451241231323} -{"id": "wang-etal-2019-vizseq", "title": "VizSeq: a visual analysis toolkit for text generation tasks", "abstract": "Automatic evaluation of text generation tasks (e.g. machine translation, text summarization, image captioning and video description) usually relies heavily on task-specific metrics, such as BLEU and ROUGE. They, however, are abstract numbers and are not perfectly aligned with human assessment. This suggests inspecting detailed examples as a complement to identify system error patterns. In this paper, we present VizSeq, a visual analysis toolkit for instance-level and corpus-level system evaluation on a wide variety of text generation tasks. It supports multimodal sources and multiple text references, providing visualization in Jupyter notebook or a web app interface. It can be used locally or deployed onto public servers for centralized data hosting and benchmarking. It covers most common n-gram based metrics accelerated with multiprocessing, and also provides latest embedding-based metrics such as BERTScore.", "phrases": ["visual analysis toolkit", "text generation task", "interface", "n-gram", "vizseq"], "overall_score": 1.9910047162521214, "scores": [2.1964309841060627, 0.9561587750198511, 0.8695645420517124, 0.5635021912092987, 0.5302136242746794], "rank_score": 1.023174023332321} -{"id": "simard-isabelle-2009-phrase", "title": "Phrase-based Machine Translation in a Computer-assisted Translation Environment", "abstract": "We explore the problem of integrating a phrase-based MT system within a computer-assisted translation (CAT) environment. We argue that one way of achieving successful integration is to design an MT system that behaves more like the translation memory (TM) component of CAT systems. This implies producing MT output that is consistent with that of a TM when high-similarity material exists in the training data; it also implies providing the MT system with a component that is capable of \ufb01ltering out machine translations that are less likely to be useful. We propose solutions to both problems, and evaluate their impact on three different data sets. Our results indicate that the proposed approach leads to systems that produce better output than a TM, for a larger portion of the source text.", "phrases": ["machine translation", "smt system", "large probability value", "second strand", "tm-based feature function"], "overall_score": 3.1624544084089656, "scores": [2.4775406262582815, 1.0592638240214416, 0.5289073331978439, 0.5270254031313962, 0.5227771683832558], "rank_score": 1.0231028709984435} -{"id": "mitra-etal-2014-thats", "title": "That's sick dude!: Automatic identification of word sense change across different timescales", "abstract": "In this paper, we propose an unsupervised method to identify noun sense changes based on rigorous analysis of time-varying text data available in the form of millions of digitized books. We construct distributional thesauri based networks from data at different time points and cluster each of them separately to obtain word-centric sense clusters corresponding to the different time points. Subsequently, we compare these sense clusters of two different time points to find if (i) there is birth of a new sense or (ii) if an older sense has got split into more than one sense or (iii) if a newer sense has been formed from the joining of older senses or (iv) if a particular sense has died. We conduct a thorough evaluation of the proposed methodology both manually as well as through comparison with WordNet. Manual evaluation indicates that the algorithm could correctly identify 60.4% birth cases from a set of 48 randomly picked samples and 57% split/join cases from a set of 21 randomly picked samples. Remarkably, in 44% cases the birth of a novel sense is attested by WordNet, while in 46% cases and 43% cases split and join are respectively confirmed by WordNet. Our approach can be applied for lexicography, as well as for applications like word sense disambiguation or semantic search.", "phrases": ["identification", "word sense", "different timescale"], "overall_score": 3.0645803769544457, "scores": [0.843008282360024, 0.8207993716012228, 1.4051385305909294], "rank_score": 1.0229820615173921} -{"id": "ettinger-etal-2018-assessing", "title": "Assessing Composition in Sentence Vector Representations", "abstract": "An important component of achieving language understanding is mastering the composition of sentence meaning, but an immediate challenge to solving this problem is the opacity of sentence vector representations produced by current neural sentence composition models. We present a method to address this challenge, developing tasks that directly target compositional meaning information in sentence vector representations with a high degree of precision and control. To enable the creation of these controlled tasks, we introduce a specialized sentence generation system that produces large, annotated sentence sets meeting specified syntactic, semantic and lexical constraints. We describe the details of the method and generation system, and then present results of experiments applying our method to probe for compositional information in embeddings from a number of existing sentence composition models. We find that the method is able to extract useful information about the differing capacities of these models, and we discuss the implications of our results with respect to these systems' capturing of sentence information. We make available for public use the datasets used for these experiments, as well as the generation system.", "phrases": ["composition", "sentence generation system", "linguistic knowledge"], "overall_score": 3.162059902860034, "scores": [1.8807717349391821, 0.6009213283659395, 0.5872326637659125], "rank_score": 1.0229752423570113} -{"id": "schneider-etal-2014-discriminative", "title": "Discriminative Lexical Semantic Segmentation with Gaps: Running the MWE Gamut", "abstract": "We present a novel representation, evaluation measure, and supervised models for the task of identifying the multiword expressions (MWEs) in a sentence, resulting in a lexical semantic segmentation. Our approach generalizes a standard chunking representation to encode MWEs containing gaps, thereby enabling efficient sequence tagging algorithms for feature-rich discriminative models. Experiments on a new dataset of English web text offer the first linguistically-driven evaluation of MWE identification with truly heterogeneous expression types. Our statistical sequence model greatly outperforms a lookup-based segmentation procedure, achieving nearly 60% F1 for MWE identification.", "phrases": ["gap", "mwe", "multiword expression", "sequence tagging model", "supervised approach"], "overall_score": 3.16131531371453, "scores": [1.7900473169857318, 1.6043380895859005, 0.5793076604653048, 0.5703544559492753, 0.5696242583247577], "rank_score": 1.022734356262194} -{"id": "hill-korhonen-2014-concreteness", "title": "Concreteness and Subjectivity as Dimensions of Lexical Meaning", "abstract": "We quantify the lexical subjectivity of adjectives using a corpus-based method, and show for the first time that it correlates with noun concreteness in large corpora. These cognitive dimensions together influence how word meanings combine, and we exploit this fact to achieve performance improvements on the semantic classification of adjective-noun pairs.", "phrases": ["subjectivity", "concreteness", "semantic composition"], "overall_score": 1.8323105117684726, "scores": [1.6592346642947224, 0.873795073411706, 0.5348661655721878], "rank_score": 1.0226319677595388} -{"id": "mosbach-etal-2019-incom", "title": "incom.py - A Toolbox for Calculating Linguistic Distances and Asymmetries between Related Languages", "abstract": "Languages may be differently distant from each other and their mutual intelligibility may be asymmetric. In this paper we introduce incom.py, a toolbox for calculating linguistic distances and asymmetries between related languages. incom.py allows linguist experts to quickly and easily perform statistical analyses and compare those with experimental results. We demonstrate the efficacy of incom.py in an incomprehension experiment on two Slavic languages: Bulgarian and Russian. Using incom.py we were able to validate three methods to measure linguistic distances and asymmetries: Levenshtein distance, word adaptation surprisal, and conditional entropy as predictors of success in a reading intercomprehension experiment.", "phrases": ["toolbox", "asymmetry", "related language", "intelligibility"], "overall_score": 1.417521736445218, "scores": [1.7463480334354138, 0.909008165962783, 0.8693398260308621, 0.5654071336145976], "rank_score": 1.0225257897609141} -{"id": "jiang-etal-2012-iterative", "title": "Iterative Annotation Transformation with Predict-Self Reestimation for Chinese Word Segmentation", "abstract": "In this paper we first describe the technology of automatic annotation transformation, which is based on the annotation adaptation algorithm (Jiang et al., 2009). It can automatically transform a human-annotated corpus from one annotation guideline to another. We then propose two optimization strategies, iterative training and predict-self reestimation, to further improve the accuracy of annotation guideline transformation. Experiments on Chinese word segmentation show that, the iterative training strategy together with predict-self reestimation brings significant improvement over the simple annotation transformation baseline, and leads to classifiers with significantly higher accuracy and several times faster processing than annotation adaptation does. On the Penn Chinese Treebank 5.0, it achieves an F-measure of 98.43%, significantly outperforms previous works although using a single classifier with only local features.", "phrases": ["annotation transformation", "chinese word segmentation", "iterative training"], "overall_score": 1.4172621583657403, "scores": [1.9746750612505914, 0.55257285890226, 0.5397677111179119], "rank_score": 1.0223385437569212} -{"id": "li-etal-2012-employing", "title": "Employing Compositional Semantics and Discourse Consistency in Chinese Event Extraction", "abstract": "Current Chinese event extraction systems suffer much from two problems in trigger identification: unknown triggers and word segmentation errors to known triggers. To resolve these problems, this paper proposes two novel inference mechanisms to explore special characteristics in Chinese via compositional semantics inside Chinese triggers and discourse consistency between Chinese trigger mentions. Evaluation on the ACE 2005 Chinese corpus justifies the effectiveness of our approach over a strong baseline.", "phrases": ["compositional semantic", "discourse consistency", "chinese event extraction", "word segmentation error"], "overall_score": 2.246296516386415, "scores": [1.487808279740584, 0.8643257232057876, 0.9038089730816856, 0.8333914316722009], "rank_score": 1.0223336019250646} -{"id": "mitchell-2009-class", "title": "Class-Based Ordering of Prenominal Modifiers", "abstract": "This paper introduces a class-based approach to ordering prenominal modifiers. Modifiers are grouped into broad classes based on where they tend to occur prenominally, and a framework is developed to order sets of modifiers based on their classes. This system is developed to generate several orderings for modifiers with more flexible positional constraints, and lends itself to bootstrapping for the classification of previously unseen modifiers.", "phrases": ["ordering", "modifier", "class-based approach"], "overall_score": 1.9889462842737804, "scores": [1.6854500607122602, 0.853015064904525, 0.5278834700355267], "rank_score": 1.0221161985507707} -{"id": "lakomkin-etal-2018-kt", "title": "KT-Speech-Crawler: Automatic Dataset Construction for Speech Recognition from YouTube Videos", "abstract": "We describe KT-Speech-Crawler: an approach for automatic dataset construction for speech recognition by crawling YouTube videos. We outline several filtering and post-processing steps, which extract samples that can be used for training end-to-end neural speech recognition systems. In our experiments, we demonstrate that a single-core version of the crawler can obtain around 150 hours of transcribed speech within a day, containing an estimated 3.5% word error rate in the transcriptions. Automatically collected samples contain reading and spontaneous speech recorded in various conditions including background noise and music, distant microphone recordings, and a variety of accents and reverberation. When training a deep neural network on speech recognition, we observed around 40% word error rate reduction on the Wall Street Journal dataset by integrating 200 hours of the collected samples into the training set.", "phrases": ["automatic dataset construction", "speech recognition", "youtube video"], "overall_score": 1.1228397998701043, "scores": [1.7065215097027204, 0.8244034880928826, 0.5352334953831861], "rank_score": 1.0220528310595964} -{"id": "crysmann-etal-2008-hybrid", "title": "Hybrid Processing for Grammar and Style Checking", "abstract": "This paper presents an implemented hybrid approach to grammar and style checking, combining an industrial pattern-based grammar and style checker with bidirectional, large-scale HPSG grammars for German and English. Under this approach, deep processing is applied selectively based on the error hypotheses of a shallow system. We have conducted a comparative evaluation of the two components, supporting an integration scenario where the shallow system is best used for error detection, whereas the HPSG grammars add error correction for both grammar and controlled language style errors.", "phrases": ["style checking", "hybrid approach", "hpsg grammar"], "overall_score": 1.1226531347005007, "scores": [1.9378082108166876, 0.6049768296118521, 0.5228637228713946], "rank_score": 1.0218829210999782} -{"id": "talbot-brants-2008-randomized", "title": "Randomized Language Models via Perfect Hash Functions", "abstract": "We propose a succinct randomized language model which employs a perfect hash function to encode fingerprints of n-grams and their associated probabilities, backoff weights, or other parameters. The scheme can represent any standard n-gram model and is easily combined with existing model reduction techniques such as entropy-pruning. We demonstrate the space-savings of the scheme via machine translation experiments within a distributed language modeling framework.", "phrases": ["language model", "hash function", "bit"], "overall_score": 2.5388594216954368, "scores": [1.321219563989446, 1.1989335170050048, 0.5449834969675899], "rank_score": 1.0217121926540136} -{"id": "saeidi-etal-2018-interpretation", "title": "Interpretation of Natural Language Rules in Conversational Machine Reading", "abstract": "Most work in machine reading focuses on question answering problems where the answer is directly expressed in the text to read. However, many real-world question answering problems require the reading of text not because it contains the literal answer, but because it contains a recipe to derive an answer together with the reader's background knowledge. One example is the task of interpreting regulations to answer \u201cCan I...?\u201d or \u201cDo I have to...?\u201d questions such as \u201cI am working in Canada. Do I have to carry on paying UK National Insurance?\u201d after reading a UK government website about this topic. This task requires both the interpretation of rules and the application of background knowledge. It is further complicated due to the fact that, in practice, most questions are underspecified, and a human assistant will regularly have to ask clarification questions such as \u201cHow long have you been working abroad?\u201d when the answer cannot be directly derived from the question and text. In this paper, we formalise this task and develop a crowd-sourcing strategy to collect 37k task instances based on real-world rules and crowd-generated questions and scenarios. We analyse the challenges of this task and assess its difficulty by evaluating the performance of rule-based and machine-learning baselines. We observe promising results when no background knowledge is necessary, and substantial room for improvement whenever background knowledge is needed.", "phrases": ["conversational machine reading", "interpretation", "rule document", "user question"], "overall_score": 3.157348482460789, "scores": [2.2140343738520096, 0.7991775853440636, 0.5453058762854641, 0.5272862647247653], "rank_score": 1.0214510250515756} -{"id": "knight-etal-2006-unsupervised", "title": "Unsupervised Analysis for Decipherment Problems", "abstract": "We study a number of natural language decipherment problems using unsupervised learning. These include letter substitution ciphers, character code conversion, phonetic decipherment, and word-based ciphers with relevance to machine translation. Straightforward unsupervised learning techniques most often fail on the first try, so we describe techniques for understanding errors and significantly increasing performance.", "phrases": ["decipherment problem", "unsupervised learning", "cipher", "phonetic decipherment", "expectation-maximization"], "overall_score": 3.1569701858597576, "scores": [1.9110199763838678, 0.9198135252954363, 0.8658667707536112, 0.8458529934106142, 0.5640899354328127], "rank_score": 1.0213286402552684} -{"id": "reddy-knight-2011-know", "title": "What We Know About The Voynich Manuscript", "abstract": "The Voynich Manuscript is an undeciphered document from medieval Europe. We present current knowledge about the manuscript's text through a series of questions about its linguistic properties.", "phrases": ["voynich manuscript", "linguistic property", "letter sequence"], "overall_score": 1.8293468614803274, "scores": [1.9858335233242872, 0.5428894154469215, 0.5342108303498218], "rank_score": 1.0209779230403433} -{"id": "schilder-kondadadi-2008-fastsum", "title": "FastSum: Fast and Accurate Query-based Multi-document Summarization", "abstract": "We present a fast query-based multi-document summarizer called FastSum based solely on word-frequency features of clusters, documents and topics. Summary sentences are ranked by a regression SVM. The summarizer does not use any expensive NLP techniques such as parsing, tagging of names or even part of speech information. Still, the achieved accuracy is comparable to the best systems presented in recent academic competitions (i.e., Document Understanding Conference (DUC)). Because of a detailed feature analysis using Least Angle Regression (LARS), FastSum can rely on a minimal set of features leading to fast processing times: 1250 news documents in 60 seconds.", "phrases": ["query-based multi-document summarizer", "summarization", "fastsum"], "overall_score": 2.243163094252558, "scores": [1.7970090123502942, 0.6612261332231267, 0.604487414592047], "rank_score": 1.020907520055156} -{"id": "deriu-etal-2016-swisscheese", "title": "SwissCheese at SemEval-2016 Task 4: Sentiment Classification Using an Ensemble of Convolutional Neural Networks with Distant Supervision", "abstract": "In this paper, we propose a classifier for predicting message-level sentiments of English micro-blog messages from Twitter. Our method builds upon the convolutional sentence embedding approach proposed by (Severyn and Moschitti, 2015a; Severyn and Moschitti, 2015b). We leverage large amounts of data with distant supervision to train an ensemble of 2-layer convolutional neural networks whose predictions are combined using a random forest classifier. Our approach was evaluated on the datasets of the SemEval-2016 competition (Task 4) outperforming all other approaches for the Message Polarity Classification task.", "phrases": ["convolutional neural networks", "distant supervision", "negative emoticon"], "overall_score": 2.6185119642358425, "scores": [1.700651667795922, 0.8166843563981915, 0.5453115364118961], "rank_score": 1.0208825202020033} -{"id": "de-melo-bansal-2013-good", "title": "Good, Great, Excellent: Global Inference of Semantic Intensities", "abstract": "Adjectives like good, great, and excellent are similar in meaning, but differ in intensity. Intensity order information is very useful for language learners as well as in several NLP tasks, but is missing in most lexical resources (dictionaries, WordNet, and thesauri). In this paper, we present a primarily unsupervised approach that uses semantics from Web-scale data (e.g., phrases like good but not excellent) to rank words by assigning them positions on a continuous scale. We rely on Mixed Integer Linear Programming to jointly determine the ranks, such that individual decisions benefit from global information. When ranking English adjectives, our global algorithm achieves substantial improvements over previous work on both pairwise and rank correlation metrics (specifically, 70% pairwise accuracy as compared to only 56% by previous work). Moreover, our approach can incorporate external synonymy information (increasing its pairwise accuracy to 78%) and extends easily to new languages. We also make our code and data freely available.", "phrases": ["intensity", "adjective", "unsupervised approach", "web-scale data"], "overall_score": 2.8294332378958447, "scores": [1.5954489013551176, 0.9567414053216777, 0.9161415999839266, 0.613677394178016], "rank_score": 1.0205023252096845} -{"id": "rozen-etal-2019-diversify", "title": "Diversify Your Datasets: Analyzing Generalization via Controlled Variance in Adversarial Datasets", "abstract": "Phenomenon-specific \u201cadversarial\u201d datasets have been recently designed to perform targeted stress-tests for particular inference types. Recent work (Liu et al., 2019a) proposed that such datasets can be utilized for training NLI and other types of models, often allowing to learn the phenomenon in focus and improve on the challenge dataset, indicating a \u201cblind spot\u201d in the original training data. Yet, although a model can improve in such a training process, it might still be vulnerable to other challenge datasets targeting the same phenomenon but drawn from a different distribution, such as having a different syntactic complexity level. In this work, we extend this method to drive conclusions about a model's ability to learn and generalize a target phenomenon rather than to \u201clearn\u201d a dataset, by controlling additional aspects in the adversarial datasets. We demonstrate our approach on two inference phenomena \u2013 dative alternation and numerical reasoning, elaborating, and in some cases contradicting, the results of Liu et al.. Our methodology enables building better challenge datasets for creating more robust models, and may yield better model understanding and subsequent overarching improvements.", "phrases": ["adversarial dataset", "nli", "reasoning"], "overall_score": 2.1216353115655058, "scores": [2.001940882898521, 0.5323641004857104, 0.526567759186235], "rank_score": 1.0202909141901555} -{"id": "lichtarge-etal-2020-data", "title": "Data Weighted Training Strategies for Grammatical Error Correction", "abstract": "Recent progress in the task of Grammatical Error Correction (GEC) has been driven by addressing data sparsity, both through new methods for generating large and noisy pretraining data and through the publication of small and higher-quality finetuning data in the BEA-2019 shared task. Building upon recent work in Neural Machine Translation (NMT), we make use of both kinds of data by deriving example-level scores on our large pretraining data based on a smaller, higher-quality dataset. In this work, we perform an empirical study to discover how to best incorporate delta-log-perplexity, a type of example scoring, into a training schedule for GEC. In doing so, we perform experiments that shed light on the function and applicability of delta-log-perplexity. Models trained on scored data achieve state- of-the-art results on common GEC test sets.", "phrases": ["grammatical error correction", "neural machine translation", "example-level score"], "overall_score": 1.6420077709539183, "scores": [1.9296564384015433, 0.5668078703047436, 0.5642460640297942], "rank_score": 1.020236790912027} -{"id": "wang-etal-2018-denoising", "title": "Denoising Neural Machine Translation Training with Trusted Data and Online Data Selection", "abstract": "Measuring domain relevance of data and identifying or selecting well-fit domain data for machine translation (MT) is a well-studied topic, but denoising is not yet. Denoising is concerned with a different type of data quality and tries to reduce the negative impact of data noise on MT training, in particular, neural MT (NMT) training. This paper generalizes methods for measuring and selecting data for domain MT and applies them to denoising NMT training. The proposed approach uses trusted data and a denoising curriculum realized by online data selection. Intrinsic and extrinsic evaluations of the approach show its significant effectiveness for NMT to train on data with severe noise.", "phrases": ["trusted data", "online data selection", "noisy data", "model training", "sentence pair"], "overall_score": 2.6924255935604062, "scores": [1.789602822487033, 0.8548062994726021, 1.3470728841277104, 0.5797046129817903, 0.5299257428076863], "rank_score": 1.0202224723753646} -{"id": "shavarani-etal-2015-learning", "title": "Learning segmentations that balance latency versus quality in spoken language translation", "abstract": "Segmentation of the incoming speech stream and translating segments incrementally is a commonly used technique that improves latency in spoken language translation. Previous work (Oda et al. 2014) [1] has explored creating training data for segmentation by finding segments that maximize translation quality with a user-defined bound on segment length. In this work, we provide a new algorithm, using Pareto-optimality, for finding good segment boundaries that can balance the trade-off between latency versus translation quality. We compare against the state-of-the-art greedy algorithm from (Oda et al. 2014) [1]. Our experimental results show that we can improve latency by up to 12% without harming the BLEU score for the same average segment length. Another benefit is that for any segment size, Paretooptimal segments maximize latency and translation quality.", "phrases": ["segmentation", "latency", "spoken language translation"], "overall_score": 1.6418455124586318, "scores": [1.6645394364844406, 0.8738886514143954, 0.5219798342225925], "rank_score": 1.0201359740404763} -{"id": "li-etal-2021-improving-bert", "title": "Improving BERT with Syntax-aware Local Attention", "abstract": "Pre-trained Transformer-based neural language models, such as BERT, have achieved remarkable results on varieties of NLP tasks. Recent works have shown that attention-based models can benefit from more focused attention over local regions. Most of them restrict the attention scope within a linear span, or confine to certain tasks such as machine translation and question answering. In this paper, we propose a syntax-aware local attention, where the attention scopes are restrained based on the distances in the syntactic structure. The proposed syntax-aware local attention can be integrated with pretrained language models, such as BERT, to render the model to focus on syntactically relevant words. We conduct experiments on various single-sentence benchmarks, including sentence classification and sequence labeling tasks. Experimental results show consistent gains over BERT on all benchmark datasets. The extensive studies verify that our model achieves better performance owing to more focused attention over syntactically relevant words.", "phrases": ["bert", "syntax-aware local attention", "syntax information", "similar effort"], "overall_score": 1.827593348776481, "scores": [1.7488660934608937, 0.964978656703793, 0.8315017407111747, 0.5346505849902746], "rank_score": 1.019999268966534} -{"id": "ni-etal-2017-weakly", "title": "Weakly Supervised Cross-Lingual Named Entity Recognition via Effective Annotation and Representation Projection", "abstract": "The state-of-the-art named entity recognition (NER) systems are supervised machine learning models that require large amounts of manually annotated data to achieve high accuracy. However, annotating NER data by human is expensive and time-consuming, and can be quite difficult for a new language. In this paper, we present two weakly supervised approaches for cross-lingual NER with no human annotation in a target language. The first approach is to create automatically labeled NER data for a target language via annotation projection on comparable corpora, where we develop a heuristic scheme that effectively selects good-quality projection-labeled data from noisy data. The second approach is to project distributed representations of words (word embeddings) from a target language to a source language, so that the source-language NER system can be applied to the target language without re-training. We also design two co-decoding schemes that effectively combine the outputs of the two projection-based approaches. We evaluate the performance of the proposed approaches on both in-house and open NER data for several target languages. The results show that the combined systems outperform three other weakly supervised approaches on the CoNLL data.", "phrases": ["entity recognition", "representation projection", "cross-lingual ner", "source language", "parallel corpora"], "overall_score": 2.9473394627528546, "scores": [1.9046633797266304, 0.8129147121944899, 0.9548094127333446, 0.8521391723861701, 0.5740206892299166], "rank_score": 1.0197094732541103} -{"id": "hermjakob-etal-2008-name", "title": "Name Translation in Statistical Machine Translation - Learning When to Transliterate", "abstract": "We present a method to transliterate names in the framework of end-to-end statistical machine translation. The system is trained to learn when to transliterate. For Arabic to English MT, we developed and trained a transliterator on a bitext of 7 million sentences and Google\u2019s English terabyte ngrams and achieved better name translation accuracy than 3 out of 4 professional translators. The paper also includes a discussion of challenges in name translation evaluation.", "phrases": ["statistical machine translation", "name", "correct conversion", "transliteration module"], "overall_score": 3.1958819691405354, "scores": [0.9371655390832679, 1.4573787592795504, 1.0782768910579323, 0.6042164308747668], "rank_score": 1.0192594050738795} -{"id": "fitzgerald-etal-2015-semantic", "title": "Semantic Role Labeling with Neural Network Factors", "abstract": "We present a new method for semantic role labeling in which arguments and semantic roles are jointly embedded in a shared vector space for a given predicate. These embeddings belong to a neural network, whose output represents the potential functions of a graphical model designed for the SRL task. We consider both local and structured learning methods and obtain strong results on standard PropBank and FrameNet corpora with a straightforward product-of-experts model. We further show how the model can learn jointly from PropBank and FrameNet annotations to obtain additional improvements on the smaller FrameNet dataset.", "phrases": ["factor", "graphical model", "propbank", "semantic role labeling", "feature representation"], "overall_score": 3.3207654333144947, "scores": [2.05913006372166, 1.090417242532667, 0.8626228383772068, 0.5424806469000744, 0.541523300552031], "rank_score": 1.0192348184167277} -{"id": "walter-pinkal-2006-automatic", "title": "Automatic Extraction of Definitions from German Court Decisions", "abstract": "This paper deals with the use of computational linguistic analysis techniques for information access and ontology learning within the legal domain. We present a rule-based approach for extracting and analysing definitions from parsed text and evaluate it on a corpus of about 6000 German court decisions. The results are applied to improve the quality of a text based ontology learning method on this corpus.", "phrases": ["definition", "german court decision", "parsed text"], "overall_score": 1.1192363766776277, "scores": [1.67236222534604, 0.8416748268077939, 0.5422815096051681], "rank_score": 1.0187728539196674} -{"id": "xu-etal-2012-learning", "title": "Learning from Bullying Traces in Social Media", "abstract": "We introduce the social study of bullying to the NLP community. Bullying, in both physical and cyber worlds (the latter known as cyberbullying), has been recognized as a serious national health issue among adolescents. However, previous social studies of bullying are handicapped by data scarcity, while the few computational studies narrowly restrict themselves to cyberbullying which accounts for only a small fraction of all bullying episodes. Our main contribution is to present evidence that social media, with appropriate natural language processing techniques, can be a valuable and abundant data source for the study of bullying in both worlds. We identify several key problems in using such data sources and formulate them as NLP tasks, including text classification, role labeling, sentiment analysis, and topic modeling. Since this is an introductory paper, we present baseline results on these tasks using off-the-shelf NLP solutions, and encourage the NLP community to contribute better models in the future.", "phrases": ["bullying", "cyberbullying", "social medium", "victim"], "overall_score": 3.2788577591913373, "scores": [1.6072673377719904, 1.35601018038972, 0.5754757961856428, 0.5357844281274964], "rank_score": 1.0186344356187125} -{"id": "schuster-etal-2019-cross-lingual", "title": "Cross-lingual Transfer Learning for Multilingual Task Oriented Dialog", "abstract": "One of the first steps in the utterance interpretation pipeline of many task-oriented conversational AI systems is to identify user intents and the corresponding slots. Since data collection for machine learning models for this task is time-consuming, it is desirable to make use of existing data in a high-resource language to train models in low-resource languages. However, development of such models has largely been hindered by the lack of multilingual training data. In this paper, we present a new data set of 57k annotated utterances in English (43k), Spanish (8.6k) and Thai (5k) across the domains weather, alarm, and reminder. We use this data set to evaluate three different cross-lingual transfer methods: (1) translating the training data, (2) using cross-lingual pre-trained embeddings, and (3) a novel method of using a multilingual machine translation encoder as contextual word representations. We find that given several hundred training examples in the the target language, the latter two methods outperform translating the training data. Further, in very low-resource settings, multilingual contextual word representations give better results than using cross-lingual static embeddings. We also compare the cross-lingual methods to using monolingual resources in the form of contextual ELMo representations and find that given just small amounts of target language data, this method outperforms all cross-lingual methods, which highlights the need for more sophisticated cross-lingual methods.", "phrases": ["dialog", "cross-lingual transfer", "task-oriented dialogue dataset"], "overall_score": 3.4289057364584874, "scores": [1.5909579225338126, 0.9001460843407152, 0.5637856519936042], "rank_score": 1.018296552956044} -{"id": "madnani-etal-2007-using", "title": "Using Paraphrases for Parameter Tuning in Statistical Machine Translation", "abstract": "Most state-of-the-art statistical machine translation systems use log-linear models, which are defined in terms of hypothesis features and weights for those features. It is standard to tune the feature weights in order to maximize a translation quality metric, using held-out test sentences and their corresponding reference translations. However, obtaining reference translations is expensive. In this paper, we introduce a new full-sentence paraphrase technique, based on English-to-English decoding with an MT system, and we demonstrate that the resulting paraphrases can be used to drastically reduce the number of human reference translations needed for parameter tuning, without a significant decrease in translation quality.", "phrases": ["paraphrase", "parameter tuning", "statistical machine translation", "summarization"], "overall_score": 2.6872730640093034, "scores": [0.9330946480775312, 0.836617958303522, 1.7589159385836513, 0.5444516929717321], "rank_score": 1.0182700594841092} -{"id": "ott-etal-2018-scaling", "title": "Scaling Neural Machine Translation", "abstract": "Sequence to sequence learning models still require several days to reach state of the art performance on large benchmark datasets using a single machine. This paper shows that reduced precision and large batch training can speedup training by nearly 5x on a single 8-GPU machine with careful tuning and implementation. On WMT'14 English-German translation, we match the accuracy of Vaswani et al. (2017) in under 5 hours when training on 8 GPUs and we obtain a new state of the art of 29.3 BLEU after training for 85 minutes on 128 GPUs. We further improve these results to 29.8 BLEU by training on the much larger Paracrawl dataset. On the WMT'14 English-French task, we obtain a state-of-the-art BLEU of 43.2 in 8.5 hours on 128 GPUs.", "phrases": ["neural machine translation", "gradient", "large batch size", "translation quality"], "overall_score": 2.88386910163062, "scores": [2.3278259346867665, 0.5935515132331639, 0.579473332779117, 0.5706662586805321], "rank_score": 1.0178792598448947} -{"id": "damonte-etal-2017-incremental", "title": "An Incremental Parser for Abstract Meaning Representation", "abstract": "Abstract Meaning Representation (AMR) is a semantic representation for natural language that embeds annotations related to traditional tasks such as named entity recognition, semantic role labeling, word sense disambiguation and co-reference resolution. We describe a transition-based parser for AMR that parses sentences left-to-right, in linear time. We further propose a test-suite that assesses specific subtasks that are helpful in comparing AMR parsers, and show that our parser is competitive with the state of the art on the LDC2015E86 dataset and that it outperforms state-of-the-art parsers for recovering named entities and handling polarity.", "phrases": ["abstract meaning representation", "amr", "entity recognition", "transition-based parser", "node"], "overall_score": 3.3912335436741805, "scores": [0.9595529297412383, 1.5322535079070134, 1.1703875576133078, 0.85969950780776, 0.566680042339922], "rank_score": 1.0177147090818484} -{"id": "kshirsagar-etal-2018-predictive", "title": "Predictive Embeddings for Hate Speech Detection on Twitter", "abstract": "We present a neural-network based approach to classifying online hate speech in general, as well as racist and sexist speech in particular. Using pre-trained word embeddings and max/mean pooling from simple, fully-connected transformations of these embeddings, we are able to predict the occurrence of hate speech on three commonly used publicly available datasets. Our models match or outperform state of the art F1 performance on all three datasets using significantly fewer parameters and minimal feature preprocessing compared to previous methods.", "phrases": ["hate speech detection", "twitter", "word embedding"], "overall_score": 1.9801423594197423, "scores": [1.2802115376368124, 0.89192887905604, 0.8806352117929459], "rank_score": 1.0175918761619327} -{"id": "woodsend-lapata-2012-multiple", "title": "Multiple Aspect Summarization Using Integer Linear Programming", "abstract": "Multi-document summarization involves many aspects of content selection and surface realization. The summaries must be informative, succinct, grammatical, and obey stylistic writing conventions. We present a method where such individual aspects are learned separately from data (without any hand-engineering) but optimized jointly using an integer linear programme. The ILP framework allows us to combine the decisions of the expert learners and to select and rewrite source content through a mixture of objective setting, soft and hard constraints. Experimental results on the TAC-08 data set show that our model achieves state-of-the-art performance using ROUGE and significantly improves the informativeness of the summaries.", "phrases": ["summarization", "content selection", "different aspect", "optimization ilp model"], "overall_score": 3.0478308905782225, "scores": [1.5770712790694528, 1.045494017170959, 0.9235750305235958, 0.5234234556662889], "rank_score": 1.017390945607574} -{"id": "wiseman-etal-2017-challenges", "title": "Challenges in Data-to-Document Generation", "abstract": "Recent neural models have shown significant progress on the problem of generating short descriptive texts conditioned on a small number of database records. In this work, we suggest a slightly more difficult data-to-text generation task, and investigate how effective current approaches are on this task. In particular, we introduce a new, large-scale corpus of data records paired with descriptive documents, propose a series of extractive evaluation methods for analyzing performance, and obtain baseline results using current neural generation methods. Experiments show that these models produce fluent text, but fail to convincingly approximate human-generated documents. Moreover, even templated baselines exceed the performance of these neural models on some metrics, though copy- and reconstruction-based extensions lead to noticeable improvements.", "phrases": ["data-to-text generation", "content selection", "structured data", "summarization model", "long document"], "overall_score": 4.469755261796188, "scores": [1.6530649531998094, 1.5006871852020667, 0.8513526633372795, 0.5424153282147535, 0.53816308593184], "rank_score": 1.0171366431771498} -{"id": "krichene-etal-2021-dot", "title": "DoT: An efficient Double Transformer for NLP tasks with tables", "abstract": "Transformer-based approaches have been successfully used to obtain state-of-the-art accuracy on natural language processing (NLP) tasks with semi-structured tables. These model architectures are typically deep, resulting in slow training and inference, especially for long inputs. To improve efficiency while maintaining a high accuracy, we propose a new architecture, DoT, a double transformer model, that decomposes the problem into two sub-tasks: A shallow pruning transformer that selects the top-K tokens, followed by a deep task-specific transformer that takes as input those K tokens. Additionally, we modify the task-specific attention to incorporate the pruning scores. The two transformers are jointly trained by optimizing the task-specific loss. We run experiments on three benchmarks, including entailment and question-answering. We show that for a small drop of accuracy, DoT improves training and inference time by at least 50%. We also show that the pruning transformer effectively selects relevant tokens enabling the end-to-end model to maintain similar accuracy as slower baseline models. Finally, we analyse the pruning and give some insight into its impact on the task model.", "phrases": ["table", "task model", "dot"], "overall_score": 1.4097285487092404, "scores": [1.700592602675441, 0.8058788238638056, 0.5442411527943789], "rank_score": 1.0169041931112084} -{"id": "shen-etal-2021-structformer", "title": "StructFormer: Joint Unsupervised Induction of Dependency and Constituency Structure from Masked Language Modeling", "abstract": "There are two major classes of natural language grammars \u2014 the dependency grammar that models one-to-one correspondences between words and the constituency grammar that models the assembly of one or several corresponded words. While previous unsupervised parsing methods mostly focus on only inducing one class of grammars, we introduce a novel model, StructFormer, that can induce dependency and constituency structure at the same time. To achieve this, we propose a new parsing framework that can jointly generate a constituency tree and dependency graph. Then we integrate the induced dependency relations into the transformer, in a differentiable manner, through a novel dependency-constrained self-attention mechanism. Experimental results show that our model can achieve strong results on unsupervised constituency parsing, unsupervised dependency parsing, and masked language modeling at the same time.", "phrases": ["constituency structure", "self-attention mechanism", "structformer", "dependency structure", "head"], "overall_score": 2.3412845099323705, "scores": [2.271817423072626, 0.7826810848858576, 0.9474690981596668, 0.5520245289898126, 0.5300425810379757], "rank_score": 1.0168069432291875} -{"id": "garimella-etal-2017-demographic", "title": "Demographic-aware word associations", "abstract": "Variations of word associations across different groups of people can provide insights into people's psychologies and their world views. To capture these variations, we introduce the task of demographic-aware word associations. We build a new gold standard dataset consisting of word association responses for approximately 300 stimulus words, collected from more than 800 respondents of different gender (male/female) and from different locations (India/United States), and show that there are significant variations in the word associations made by these groups. We also introduce a new demographic-aware word association model based on a neural net skip-gram architecture, and show how computational methods for measuring word associations that specifically account for writer demographics can outperform generic methods that are agnostic to such information.", "phrases": ["word association", "gender", "location"], "overall_score": 1.821514875982812, "scores": [1.9664193027918508, 0.5472709317123581, 0.5361301916173452], "rank_score": 1.0166068087071847} -{"id": "bapna-firat-2019-non", "title": "Non-Parametric Adaptation for Neural Machine Translation", "abstract": "Neural Networks trained with gradient descent are known to be susceptible to catastrophic forgetting caused by parameter shift during the training process. In the context of Neural Machine Translation (NMT) this results in poor performance on heterogeneous datasets and on sub-tasks like rare phrase translation. On the other hand, non-parametric approaches are immune to forgetting, perfectly complementing the generalization ability of NMT. However, attempts to combine non-parametric or retrieval based approaches with NMT have only been successful on narrow domains, possibly due to over-reliance on sentence level retrieval. We propose a novel n-gram level retrieval approach that relies on local phrase level similarities, allowing us to retrieve neighbors that are useful for translation even when overall sentence similarity is low. We complement this with an expressive neural network, allowing our model to extract information from the noisy retrieved context. We evaluate our Semi-parametric NMT approach on a heterogeneous dataset composed of WMT, IWSLT, JRC-Acquis and OpenSubtitles, and demonstrate gains on all 4 evaluation sets. The Semi-parametric nature of our approach also opens the door for non-parametric domain adaptation, demonstrating strong inference-time adaptation performance on new domains without the need for any parameter updates.", "phrases": ["adaptation", "neural machine translation", "n-gram"], "overall_score": 2.1131888210411356, "scores": [0.957096042379238, 1.17517528090852, 0.9164157092902838], "rank_score": 1.0162290108593472} -{"id": "neubig-hu-2018-rapid", "title": "Rapid Adaptation of Neural Machine Translation to New Languages", "abstract": "This paper examines the problem of adapting neural machine translation systems to new, low-resourced languages (LRLs) as effectively and rapidly as possible. We propose methods based on starting with massively multilingual \u201cseed models\u201d, which can be trained ahead-of-time, and then continuing training on data related to the LRL. We contrast a number of strategies, leading to a novel, simple, yet effective method of \u201csimilar-language regularization\u201d, where we jointly train on both a LRL of interest and a similar high-resourced language to prevent over-fitting to small LRL data. Experiments demonstrate that massively multilingual models, even without any explicit adaptation, are surprisingly effective, achieving BLEU scores of up to 15.5 with no data from the LRL, and that the proposed similar-language regularization method improves over other adaptation methods by 1.7 BLEU points average over 4 LRL settings.", "phrases": ["neural machine translation", "low-resource language", "rapid adaptation", "joint vocabulary", "mnmt model"], "overall_score": 3.72092860704373, "scores": [1.5479098228319723, 0.8446488720720743, 1.611684396602327, 0.5462143472351934, 0.5278365364324119], "rank_score": 1.015658795034796} -{"id": "xia-etal-2018-zero", "title": "Zero-shot User Intent Detection via Capsule Neural Networks", "abstract": "User intent detection plays a critical role in question-answering and dialog systems. Most previous works treat intent detection as a classification problem where utterances are labeled with predefined intents. However, it is labor-intensive and time-consuming to label users' utterances as intents are diversely expressed and novel intents will continually be involved. Instead, we study the zero-shot intent detection problem, which aims to detect emerging user intents where no labeled utterances are currently available. We propose two capsule-based architectures: IntentCapsNet that extracts semantic features from utterances and aggregates them to discriminate existing intents, and IntentCapsNet-ZSL which gives IntentCapsNet the zero-shot learning ability to discriminate emerging intents via knowledge transfer from existing intents. Experiments on two real-world datasets show that our model not only can better discriminate diversely expressed existing intents, but is also able to discriminate emerging intents when no labeled utterances are available.", "phrases": ["intent detection", "zero-shot learning", "capsule network"], "overall_score": 2.935600843690844, "scores": [1.4618960666820067, 1.0416957831315572, 0.5433527195185147], "rank_score": 1.0156481897773595} -{"id": "deng-wiebe-2014-sentiment", "title": "Sentiment Propagation via Implicature Constraints", "abstract": "Opinions may be expressed implicitly via inference over explicit sentiments and events that positively/negatively affect entities (goodFor/badFor events). We investigate how such inferences may be exploited to improve sentiment analysis, given goodFor/badFor event information. We apply Loopy Belief Propagation to propagate sentiments among entities. The graph-based model improves over explicit sentiment classification by 10 points in precision and, in an evaluation of the model itself, we find it has an 89% chance of propagating sentiments correctly.", "phrases": ["badfor event", "sentiment propagation", "agent", "generalized set", "theme"], "overall_score": 3.0426020581687268, "scores": [2.2428854577458703, 0.8797787020635738, 0.874753779502195, 0.5479044527248256, 0.5329052003096497], "rank_score": 1.0156455184692228} -{"id": "li-etal-2019-biomedical", "title": "Biomedical Event Extraction based on Knowledge-driven Tree-LSTM", "abstract": "Event extraction for the biomedical domain is more challenging than that in the general news domain since it requires broader acquisition of domain-specific knowledge and deeper understanding of complex contexts. To better encode contextual information and external background knowledge, we propose a novel knowledge base (KB)-driven tree-structured long short-term memory networks (Tree-LSTM) framework, incorporating two new types of features: (1) dependency structures to capture wide contexts; (2) entity properties (types and category descriptions) from external ontologies via entity linking. We evaluate our approach on the BioNLP shared task with Genia dataset and achieve a new state-of-the-art result. In addition, both quantitative and qualitative studies demonstrate the advancement of the Tree-LSTM and the external knowledge representation for biomedical event extraction.", "phrases": ["knowledge base", "biomedical event extraction", "tree-lstm model", "gene ontology"], "overall_score": 1.8196258342672116, "scores": [2.078097563353961, 0.8612558278947623, 0.5693770306125753, 0.5534796359455388], "rank_score": 1.0155525144517095} -{"id": "vulic-moens-2013-study", "title": "A Study on Bootstrapping Bilingual Vector Spaces from Non-Parallel Data (and Nothing Else)", "abstract": "We present a new language pair agnostic approach to inducing bilingual vector spaces from non-parallel data without any other resource in a bootstrapping fashion. The paper systematically introduces and describes all key elements of the bootstrapping procedure: (1) starting point or seed lexicon, (2) the confidence estimation and selection of new dimensions of the space, and (3) convergence. We test the quality of the induced bilingual vector spaces, and analyze the influence of the different components of the bootstrapping approach in the task of bilingual lexicon extraction (BLE) for two language pairs. Results reveal that, contrary to conclusions from prior work, the seeding of the bootstrapping process has a heavy impact on the quality of the learned lexicons. We also show that our approach outperforms the best performing fully corpus-based BLE methods on these test sets.", "phrases": ["vector space", "non-parallel data", "seed lexicon", "cross-lingual signal"], "overall_score": 2.6038519726001366, "scores": [1.785117540344596, 1.1276882248470808, 0.5885325923648611, 0.5593296864739308], "rank_score": 1.0151670110076172} -{"id": "koehn-etal-2019-findings", "title": "Findings of the WMT 2019 Shared Task on Parallel Corpus Filtering for Low-Resource Conditions", "abstract": "Following the WMT 2018 Shared Task on Parallel Corpus Filtering, we posed the challenge of assigning sentence-level quality scores for very noisy corpora of sentence pairs crawled from the web, with the goal of sub-selecting 2% and 10% of the highest-quality data to be used to train machine translation systems. This year, the task tackled the low resource condition of Nepali-English and Sinhala-English. Eleven participants from companies, national research labs, and universities participated in this task.", "phrases": ["shared task", "parallel corpus filtering", "low-resource condition", "low resource language"], "overall_score": 2.110916430404445, "scores": [1.9217995986944338, 0.8201599425735745, 0.7848201788718373, 0.5337651676941881], "rank_score": 1.0151362219585085} -{"id": "kang-etal-2020-dynamic", "title": "Dynamic Context Selection for Document-level Neural Machine Translation via Reinforcement Learning", "abstract": "Document-level neural machine translation has yielded attractive improvements. However, majority of existing methods roughly use all context sentences in a fixed scope. They neglect the fact that different source sentences need different sizes of context. To address this problem, we propose an effective approach to select dynamic context so that the document-level translation model can utilize the more useful selected context sentences to produce better translations. Specifically, we introduce a selection module that is independent of the translation module to score each candidate context sentence. Then, we propose two strategies to explicitly select a variable number of context sentences and feed them into the translation module. We train the two modules end-to-end via reinforcement learning. A novel reward is proposed to encourage the selection and utilization of dynamic context sentences. Experiments demonstrate that our approach can select adaptive context sentences for different source sentences, and significantly improves the performance of document-level translation methods.", "phrases": ["neural machine translation", "reinforcement learning", "context sentence", "dynamic context selection"], "overall_score": 2.433640636641736, "scores": [1.7943040655688793, 0.8860567044568135, 0.8315891632000569, 0.5476779650379633], "rank_score": 1.0149069745659283} -{"id": "song-etal-2014-applying", "title": "Applying Argumentation Schemes for Essay Scoring", "abstract": "Under the framework of the argumentation scheme theory (Walton, 1996), we developed annotation protocols for an argumentative writing task to support identification and classification of the arguments being made in essays. Each annotation protocol defined argumentation schemes (i.e., reasoning patterns) in a given writing prompt and listed questions to help evaluate an argument based on these schemes, to make the argument structure in a text explicit and classifiable. We report findings based on an annotation of 600 essays. Most annotation categories were applied reliably by human annotators, and some categories significantly contributed to essay score. An NLP system to identify sentences containing scheme-relevant critical questions was developed based on the human annotations.", "phrases": ["argumentation scheme", "essay scoring", "annotator", "student", "writing support system"], "overall_score": 2.9334043821618487, "scores": [1.9350079227209496, 1.1709696003955983, 0.8481996860078245, 0.5771437156322534, 0.5431204064460545], "rank_score": 1.014888266240536} -{"id": "anthonio-etal-2022-clarifying", "title": "Clarifying Implicit and Underspecified Phrases in Instructional Text", "abstract": "Natural language inherently consists of implicit and underspecified phrases, which represent potential sources of misunderstanding. In this paper, we present a data set of such phrases in English from instructional texts together with multiple possible clarifications. Our data set, henceforth called CLAIRE, is based on a corpus of revision histories from wikiHow, from which we extract human clarifications that resolve an implicit or underspecified phrase. We show how language modeling can be used to generate alternate clarifications, which may or may not be compatible with the human clarification. Based on plausibility judgements for each clarification, we define the task of distinguishing between plausible and implausible clarifications. We provide several baseline models for this task and analyze to what extent different clarifications represent multiple readings as a first step to investigate misunderstandings caused by implicit/underspecified language in instructional texts.", "phrases": ["instructional text", "clarification", "underspecified language"], "overall_score": 1.6332236885960432, "scores": [1.977001388822891, 0.5360949087854261, 0.5312405034167742], "rank_score": 1.0147789336750304} -{"id": "sassano-kurohashi-2010-using", "title": "Using Smaller Constituents Rather Than Sentences in Active Learning for Japanese Dependency Parsing", "abstract": "We investigate active learning methods for Japanese dependency parsing. We propose active learning methods of using partial dependency relations in a given sentence for parsing and evaluate their effectiveness empirically. Furthermore, we utilize syntactic constraints of Japanese to obtain more labeled examples from precious labeled ones that annotators give. Experimental results show that our proposed methods improve considerably the learning curve of Japanese dependency parsing. In order to achieve an accuracy of over 88.3%, one of our methods requires only 34.4% of labeled examples as compared to passive learning.", "phrases": ["active learning", "japanese dependency parsing", "partial annotation"], "overall_score": 2.521490106544171, "scores": [1.2979970800355374, 0.8939564850772261, 0.8522132329751548], "rank_score": 1.014722266029306} -{"id": "hathout-sajous-2016-wiktionnaires", "title": "Wiktionnaire's Wikicode GLAWIfied: a Workable French Machine-Readable Dictionary", "abstract": "GLAWI is a free, large-scale and versatile Machine-Readable Dictionary (MRD) that has been extracted from the French language edition of Wiktionary, called Wiktionnaire. In (Sajous and Hathout, 2015), we introduced GLAWI, gave the rationale behind the creation of this lexicographic resource and described the extraction process, focusing on the conversion and standardization of the heterogeneous data provided by this collaborative dictionary. In the current article, we describe the content of GLAWI and illustrate how it is structured. We also suggest various applications, ranging from linguistic studies, NLP applications to psycholinguistic experimentation. They all can take advantage of the diversity of the lexical knowledge available in GLAWI. Besides this diversity and extensive lexical coverage, GLAWI is also remarkable because it is the only free lexical resource of contemporary French that contains definitions. This unique material opens way to the renewal of MRD-based methods, notably the automated extraction and acquisition of semantic relations.", "phrases": ["glawi", "creation", "wiktionnaire"], "overall_score": 1.114425168198728, "scores": [1.9464335519140046, 0.5600332360579473, 0.5367137217321287], "rank_score": 1.0143935032346934} -{"id": "wang-etal-2017-sentence", "title": "Sentence Embedding for Neural Machine Translation Domain Adaptation", "abstract": "Although new corpora are becoming increasingly available for machine translation, only those that belong to the same or similar domains are typically able to improve translation performance. Recently Neural Machine Translation (NMT) has become prominent in the field. However, most of the existing domain adaptation methods only focus on phrase-based machine translation. In this paper, we exploit the NMT's internal embedding of the source sentence and use the sentence embedding similarity to select the sentences which are close to in-domain data. The empirical adaptation results on the IWSLT English-French and NIST Chinese-English tasks show that the proposed methods can substantially improve NMT performance by 2.4-9.0 BLEU points, outperforming the existing state-of-the-art baseline by 2.3-4.5 BLEU points.", "phrases": ["domain adaptation", "translation performance", "sentence embedding"], "overall_score": 2.6761075657419244, "scores": [1.5080228532615143, 0.9361006340445264, 0.5979940927603996], "rank_score": 1.0140391933554802} -{"id": "plank-agic-2018-distant", "title": "Distant Supervision from Disparate Sources for Low-Resource Part-of-Speech Tagging", "abstract": "a cross-lingual neural part-of-speech tagger that learns from disparate sources of distant supervision, and realistically scales to hundreds of low-resource languages. The model exploits annotation projection, instance selection, tag dictionaries, morphological lexicons, and distributed representations, all in a uniform framework. The approach is simple, yet surprisingly effective, resulting in a new state of the art without access to any gold annotated data.", "phrases": ["low-resource language", "annotation projection", "distant supervision", "pos tagger"], "overall_score": 2.108508562338962, "scores": [1.767885062255831, 1.176220545132821, 0.5844005761251381, 0.5274069452306618], "rank_score": 1.013978282186113} -{"id": "pareti-2012-database", "title": "A Database of Attribution Relations", "abstract": "The importance of attribution is becoming evident due to its relevance in particular for Opinion Analysis and Information Extraction applications. Attribution would allow to identify different perspectives on a given topic or retrieve the statements of a specific source of interest, but also to select more relevant and reliable information. However, the scarce and partial resources available to date to conduct attribution studies have determined that only a portion of attribution structures has been identified and addressed. This paper presents the collection and further annotation of a database of over 9800 attributions relations from the Penn Discourse TreeBank (PDTB). The aim is to build a large and complete resource that fills a key gap in the field and enables the training and testing of robust attribution extraction systems.", "phrases": ["database", "attribution", "perspective"], "overall_score": 2.1073000721691852, "scores": [0.8923839408267341, 1.2271929170030356, 0.9206145059536687], "rank_score": 1.0133971212611461} -{"id": "li-etal-2010-learning", "title": "Learning the Scope of Negation via Shallow Semantic Parsing", "abstract": "In this paper we present a simplified shallow semantic parsing approach to learning the scope of negation (SoN). This is done by formulating it as a shallow semantic parsing problem with the negation signal as the predicate and the negation scope as its arguments. Our parsing approach to SoN learning differs from the state-of-the-art chunking ones in two aspects. First, we extend SoN learning from the chunking level to the parse tree level, where structured syntactic information is available. Second, we focus on determining whether a constituent, rather than a word, is negated or not, via a simplified shallow semantic parsing framework. Evaluation on the BioScope corpus shows that structured syntactic information is effective in capturing the domination relationship between a negation signal and its dominated arguments. It also shows that our parsing approach much outperforms the state-of-the-art chunking ones.", "phrases": ["scope", "negation", "shallow semantic parsing", "bioscope corpus"], "overall_score": 1.9714080730467953, "scores": [1.7000618023798528, 0.9444227300944845, 0.8754215334650721, 0.5325072975529603], "rank_score": 1.0131033408730925} -{"id": "wallace-etal-2015-sparse", "title": "Sparse, Contextually Informed Models for Irony Detection: Exploiting User Communities, Entities and Sentiment", "abstract": "Automatically detecting verbal irony (roughly, sarcasm) in online content is important for many practical applications (e.g., sentiment detection), but it is difficult. Previous approaches have relied predominantly on signal gleaned from word counts and grammatical cues. But such approaches fail to exploit the context in which comments are embedded. We thus propose a novel strategy for verbal irony classification that exploits contextual features, specifically by combining noun phrases and sentiment extracted from comments with the forum type (e.g., conservative or liberal) to which they were posted. We show that this approach improves verbal irony classification performance. Furthermore, because this method generates a very large feature space (and we expect predictive contextual features to be strong but few), we propose a mixed regularization strategy that places a sparsity-inducing `1 penalty on the contextual feature weights on top of the `2 penalty applied to all model coefficients. This increases model sparsity and reduces the variance of model performance.", "phrases": ["irony", "noun phrase", "sarcasm detection"], "overall_score": 2.8679980823423117, "scores": [1.2855726567674655, 0.8877149064336802, 0.8635448959841862], "rank_score": 1.0122774863951105} -{"id": "khapra-etal-2010-everybody", "title": "Everybody loves a rich cousin: An empirical study of transliteration through bridge languages", "abstract": "Most state of the art approaches for machine transliteration are data driven and require significant parallel names corpora between languages. As a result, developing transliteration functionality among n languages could be a resource intensive task requiring parallel names corpora in the order of nC2. In this paper, we explore ways of reducing this high resource requirement by leveraging the available parallel data between subsets of the n languages, transitively. We propose, and show empirically, that reasonable quality transliteration engines may be developed between two languages, X and Y, even when no direct parallel names data exists between them, but only transitively through language Z. Such systems alleviate the need for O(nC2) corpora, significantly. In addition we show that the performance of such transitive transliteration systems is in par with direct transliteration systems, in practical applications, such as CLIR systems.", "phrases": ["bridge language", "machine transliteration", "pivot language"], "overall_score": 2.330426357018806, "scores": [1.2835468179545375, 1.2047007313764906, 0.5480263726744664], "rank_score": 1.012091307335165} -{"id": "yao-etal-2012-probabilistic", "title": "Probabilistic Databases of Universal Schema", "abstract": "In data integration we transform information from a source into a target schema. A general problem in this task is loss of fidelity and coverage: the source expresses more knowledge than can fit into the target schema, or knowledge that is hard to fit into any schema at all. This problem is taken to an extreme in information extraction (IE) where the source is natural language. To address this issue, one can either automatically learn a latent schema emergent in text (a brittle and ill-defined task), or manually extend schemas. We propose instead to store data in a probabilistic database of universal schema. This schema is simply the union of all source schemas, and the probabilistic database learns how to predict the cells of each source relation in this union. For example, the database could store Freebase relations and relations that correspond to natural language surface patterns. The database would learn to predict what freebase relations hold true based on what surface patterns appear, and vice versa. We describe an analogy between such databases and collaborative filtering models, and use it to implement our paradigm with probabilistic PCA, a scalable and effective collaborative filtering method.", "phrases": ["universal schema", "information extraction", "freebase entity"], "overall_score": 1.8130945217219214, "scores": [1.944465282078724, 0.5524772564747884, 0.5387794199910542], "rank_score": 1.0119073195148556} -{"id": "kwong-tsou-2007-extending", "title": "Extending a Thesaurus in the Pan-Chinese Context", "abstract": "In this paper, we address a unique problem in Chinese language processing and report on our study on extending a Chinese thesaurus with region-specific words, mostly from the financial domain, from various Chinese speech communities. With the larger goal of automatically constructing a Pan-Chinese lexical resource, this work aims at taking an existing semantic classificatory structure as leverage and incorporating new words into it. In particular, it is important to see if the classification could accommodate new words from heterogeneous data sources, and whether simple similarity measures and clustering methods could cope with such variation. We use the cosine function for similarity and test it on automatically classifying 120 target words from four regions, using different datasets for the extraction of feature vectors. The automatic classification results were evaluated against human judgement, and the performance was encouraging, with accuracy reaching over 85% in some cases. Thus while human judgement is not straightforward and it is difficult to create a PanChinese lexicon manually, it is observed that combining simple clustering methods with the appropriate data sources appears to be a promising approach toward its automatic construction.", "phrases": ["thesaurus", "classificatory structure", "new word"], "overall_score": 1.1114266279954401, "scores": [1.975062978959827, 0.536249591364706, 0.5236797726325957], "rank_score": 1.011664114319043} -{"id": "vulic-etal-2020-multi", "title": "Multi-SimLex: A Large-Scale Evaluation of Multilingual and Crosslingual Lexical Semantic Similarity", "abstract": "We introduce Multi-SimLex, a large-scale lexical resource and evaluation benchmark covering data sets for 12 typologically diverse languages, including major languages (e.g., Mandarin Chinese, Spanish, Russian) as well as less-resourced ones (e.g., Welsh, Kiswahili). Each language data set is annotated for the lexical relation of semantic similarity and contains 1,888 semantically aligned concept pairs, providing a representative coverage of word classes (nouns, verbs, adjectives, adverbs), frequency ranks, similarity intervals, lexical fields, and concreteness levels. Additionally, owing to the alignment of concepts across languages, we provide a suite of 66 crosslingual semantic similarity data sets. Because of its extensive size and language coverage, Multi-SimLex provides entirely novel opportunities for experimental evaluation and analysis. On its monolingual and crosslingual benchmarks, we evaluate and analyze a wide array of recent state-of-the-art monolingual and crosslingual representation models, including static and contextualized word embeddings (such as fastText, monolingual and multilingual BERT, XLM), externally informed lexical representations, as well as fully unsupervised and (weakly) supervised crosslingual word embeddings. We also present a step-by-step data set creation protocol for creating consistent, Multi-Simlex\u2013style resources for additional languages. We make these contributions\u2014the public release of Multi-SimLex data sets, their creation protocol, strong baseline results, and in-depth analyses which can be helpful in guiding future developments in multilingual lexical semantics and representation learning\u2014available via a Web site that will encourage community effort in further expansion of Multi-Simlex to many more languages. Such a large-scale semantic resource could inspire significant further advances in NLP across languages.", "phrases": ["major language", "concept pair", "coverage", "word embedding", "multi-simlex"], "overall_score": 2.222820961933788, "scores": [2.8348921172020662, 0.5746219020754512, 0.5638115563678586, 0.5616431082225364, 0.5232783994334225], "rank_score": 1.011649416660267} -{"id": "yu-siskind-2013-grounded", "title": "Grounded Language Learning from Video Described with Sentences", "abstract": "We present a method that learns representations for word meanings from short video clips paired with sentences. Unlike prior work on learning language from symbolic input, our input consists of video of people interacting with multiple complex objects in outdoor environments. Unlike prior computer-vision approaches that learn from videos with verb labels or images with noun labels, our labels are sentences containing nouns, verbs, prepositions, adjectives, and adverbs. The correspondence between words and concepts in the video is learned in an unsupervised fashion, even when the video depicts simultaneous events described by multiple sentences or when different aspects of a single event are described with multiple sentences. The learned word meanings can be subsequently used to automatically generate description of new video.", "phrases": ["video clip", "object", "environment", "noun", "language acquisition"], "overall_score": 3.0296812751671545, "scores": [1.3726921145444906, 1.3176515444971133, 0.8883780466499915, 0.8610343009517603, 0.61690626907611], "rank_score": 1.0113324551438931} -{"id": "caglayan-etal-2021-cross", "title": "Cross-lingual Visual Pre-training for Multimodal Machine Translation", "abstract": "Pre-trained language models have been shown to improve performance in many natural language tasks substantially. Although the early focus of such models was single language pre-training, recent advances have resulted in cross-lingual and visual pre-training methods. In this paper, we combine these two approaches to learn visually-grounded cross-lingual representations. Specifically, we extend the translation language modelling (Lample and Conneau, 2019) with masked region classification and perform pre-training with three-way parallel vision & language corpora. We show that when fine-tuned for multimodal machine translation, these models obtain state-of-the-art performance. We also provide qualitative insights into the usefulness of the learned grounded representations.", "phrases": ["multimodal machine translation", "visual pre-training method", "cross-lingual representation"], "overall_score": 1.8117432689600264, "scores": [1.9038165613917495, 0.5655628903784559, 0.5640800611976503], "rank_score": 1.0111531709892851} -{"id": "szekely-etal-2012-winktalk", "title": "WinkTalk: a demonstration of a multimodal speech synthesis platform linking facial expressions to expressive synthetic voices", "abstract": "This paper describes a demonstration of the WinkTalk system, which is a speech synthesis platform using expressive synthetic voices. With the help of a webcamera and facial expression analysis, the system allows the user to control the expressive features of the synthetic speech for a particular utterance with their facial expressions. Based on a personalised mapping between three expressive synthetic voices and the users facial expressions, the system selects a voice that matches their face at the moment of sending a message. The WinkTalk system is an early research prototype that aims to demonstrate that facial expressions can be used as a more intuitive control over expressive speech synthesis than manual selection of voice types, thereby contributing to an improved communication experience for users of speech generating devices.", "phrases": ["demonstration", "speech synthesis platform", "facial expression", "expressive feature"], "overall_score": 1.1107988830320357, "scores": [1.8006506193128207, 0.923367555545, 0.7807114848319014, 0.5396412052264175], "rank_score": 1.0110927162290348} -{"id": "zou-etal-2018-adversarial", "title": "Adversarial Feature Adaptation for Cross-lingual Relation Classification", "abstract": "Relation Classification aims to classify the semantic relationship between two marked entities in a given sentence. It plays a vital role in a variety of natural language processing applications. Most existing methods focus on exploiting mono-lingual data, e.g., in English, due to the lack of annotated data in other languages. In this paper, we come up with a feature adaptation approach for cross-lingual relation classification, which employs a generative adversarial network (GAN) to transfer feature representations from one language with rich annotated data to another language with scarce annotated data. Such a feature adaptation approach enables feature imitation via the competition between a relation classification network and a rival discriminator. Experimental results on the ACE 2005 multilingual training corpus, treating English as the source language and Chinese the target, demonstrate the effectiveness of our proposed approach, yielding an improvement of 5.7% over the state-of-the-art.", "phrases": ["cross-lingual relation classification", "feature adaptation approach", "generative adversarial network"], "overall_score": 1.6271515502274243, "scores": [1.95937156355998, 0.5394942898279347, 0.5341524525494685], "rank_score": 1.0110061019791277} -{"id": "wang-etal-2011-new", "title": "A New Unsupervised Approach to Word Segmentation", "abstract": "This article proposes ESA, a new unsupervised approach to word segmentation. ESA is an iterative process consisting of 3 phases: Evaluation, Selection, and Adjustment. In Evaluation, both certainty and uncertainty of character sequence co-occurrence in corpora are considered as the statistical evidence supporting goodness measurement. Additionally, the statistical data of character sequences with various lengths become comparable with each other by using a simple process called Balancing. In Selection, a local maximum strategy is adopted without thresholds, and the strategy can be implemented with dynamic programming. In Adjustment, a part of the statistical data is updated to improve successive results. In our experiment, ESA was evaluated on the SIGHAN Bakeoff-2 data set. The results suggest that ESA is effective on Chinese corpora. It is noteworthy that the F-measures of the results are basically monotone increasing and can rapidly converge to relatively high values. Furthermore, the empirical formulae based on the results can be used to predict the parameter in ESA to avoid parameter estimation that is usually time-consuming.", "phrases": ["word segmentation", "esa", "iterative process", "adjustment"], "overall_score": 1.401447273511513, "scores": [2.1208781288994554, 0.8506764266419258, 0.549541964484911, 0.522625543098545], "rank_score": 1.0109305157812094} -{"id": "bitton-etal-2021-automatic", "title": "Automatic Generation of Contrast Sets from Scene Graphs: Probing the Compositional Consistency of GQA", "abstract": "Recent works have shown that supervised models often exploit data artifacts to achieve good test scores while their performance severely degrades on samples outside their training distribution. Contrast sets (Gardneret al., 2020) quantify this phenomenon by perturbing test samples in a minimal way such that the output label is modified. While most contrast sets were created manually, requiring intensive annotation effort, we present a novel method which leverages rich semantic input representation to automatically generate contrast sets for the visual question answering task. Our method computes the answer of perturbed questions, thus vastly reducing annotation cost and enabling thorough evaluation of models' performance on various semantic aspects (e.g., spatial or relational reasoning). We demonstrate the effectiveness of our approach on the GQA dataset and its semantic scene graph image representation. We find that, despite GQA's compositionality and carefully balanced label distribution, two high-performing models drop 13-17% in accuracy compared to the original test set. Finally, we show that our automatic perturbation can be applied to the training set to mitigate the degradation in performance, opening the door to more robust models.", "phrases": ["contrast set", "scene graph", "compositional consistency", "gqa"], "overall_score": 1.8112399919238555, "scores": [1.7845344850976288, 0.8466506976633804, 0.8195035951388174, 0.5928003690093689], "rank_score": 1.010872286727299} -{"id": "bird-etal-2008-acl", "title": "The ACL Anthology Reference Corpus: A Reference Dataset for Bibliographic Research in Computational Linguistics", "abstract": "The ACL Anthology is a digital archive of conference and journal papers in natural language processing and computational linguistics. Its primary purpose is to serve as a reference repository of research results, but we believe that it can also be an object of study and a platform for research in its own right. We describe an enriched and standardized reference corpus derived from the ACL Anthology that can be used for research in scholarly document processing. This corpus, which we call the ACL Anthology Reference Corpus (ACL ARC), brings together the recent activities of a number of research groups around the world. Our goal is to make the corpus widely available, and to encourage other researchers to use it as a standard testbed for experiments in both bibliographic and bibliometric research.", "phrases": ["acl anthology", "reference corpus", "research result"], "overall_score": 3.2122033189082293, "scores": [1.8825178401501674, 0.6110265545274395, 0.5386918326331286], "rank_score": 1.0107454091035786} -{"id": "filippova-strube-2008-sentence", "title": "Sentence Fusion via Dependency Graph Compression", "abstract": "We present a novel unsupervised sentence fusion method which we apply to a corpus of biographies in German. Given a group of related sentences, we align their dependency trees and build a dependency graph. Using integer linear programming we compress this graph to a new tree, which we then linearize. We use GermaNet and Wikipedia for checking semantic compatibility of co-arguments. In an evaluation with human judges our method outperforms the fusion approach of Barzilay & McKeown (2005) with respect to readability.", "phrases": ["biography", "integer linear programming", "sentence fusion", "cluster", "fusion system"], "overall_score": 2.976054190299986, "scores": [2.539066268436916, 0.8549418189012412, 0.5561623706151335, 0.5534968228987921, 0.5500190263820652], "rank_score": 1.0107372614468295} -{"id": "yancheva-etal-2015-using", "title": "Using linguistic features longitudinally to predict clinical scores for Alzheimer's disease and related dementias", "abstract": "We use a set of 477 lexicosyntactic, acoustic, and semantic features extracted from 393 speech samples in DementiaBank to predict clinical MMSE scores, an indicator of the severity of cognitive decline associated with dementia. We use a bivariate dynamic Bayes net to represent the longitudinal progression of observed linguistic features and MMSE scores over time, and obtain a mean absolute error (MAE) of 3.83 in predicting MMSE, comparable to within-subject interrater standard deviation of 3.9 to 4.8 [1]. When focusing on individuals with more longitudinal samples, we improve MAE to 2.91, which suggests at the importance of longitudinal data collection. Index Terms- Alzheimer\u2019s disease, dementia, Mini-Mental State Examination (MMSE), dynamic Bayes network, feature selection", "phrases": ["linguistic feature", "dementia", "cognitive impairment"], "overall_score": 1.8108138864178254, "scores": [1.7134447014188983, 0.7980863170994561, 0.5203723996307005], "rank_score": 1.0106344727163517} -{"id": "geng-etal-2018-adaptive", "title": "Adaptive Multi-pass Decoder for Neural Machine Translation", "abstract": "Although end-to-end neural machine translation (NMT) has achieved remarkable progress in the recent years, the idea of adopting multi-pass decoding mechanism into conventional NMT is not well explored. In this paper, we propose a novel architecture called adaptive multi-pass decoder, which introduces a flexible multi-pass polishing mechanism to extend the capacity of NMT via reinforcement learning. More specifically, we adopt an extra policy network to automatically choose a suitable and effective number of decoding passes, according to the complexity of source sentences and the quality of the generated translations. Extensive experiments on Chinese-English translation demonstrate the effectiveness of our proposed adaptive multi-pass decoder upon the conventional NMT with a significant improvement about 1.55 BLEU.", "phrases": ["neural machine translation", "mechanism", "source sentence", "adaptive multi-pass decoder"], "overall_score": 1.8106432796459566, "scores": [2.0026132561325567, 0.9459290880490346, 0.5516721551333279, 0.5419425217411211], "rank_score": 1.01053925526401} -{"id": "labutov-lipson-2012-humor", "title": "Humor as Circuits in Semantic Networks", "abstract": "This work presents a first step to a general implementation of the Semantic-Script Theory of Humor (SSTH). Of the scarce amount of research in computational humor, no research had focused on humor generation beyond simple puns and punning riddles. We propose an algorithm for mining simple humorous scripts from a semantic network (Concept-Net) by specifically searching for dual scripts that jointly maximize overlap and incongruity metrics in line with Raskin's Semantic-Script Theory of Humor. Initial results show that a more relaxed constraint of this form is capable of generating humor of deeper semantic content than wordplay riddles. We evaluate the said metrics through a user-assessed quality of the generated two-liners.", "phrases": ["riddle", "humor", "semantic script theory"], "overall_score": 2.220267610704416, "scores": [1.9053885317712074, 0.5985371343795383, 0.5275363431575594], "rank_score": 1.0104873364361018} -{"id": "rosti-etal-2007-improved", "title": "Improved Word-Level System Combination for Machine Translation", "abstract": "Recently, confusion network decoding has been applied in machine translation system combination. Due to errors in the hypothesis alignment, decoding may result in ungrammatical combination outputs. This paper describes an improved confusion network based method to combine outputs from multiple MT systems. In this approach, arbitrary features may be added log-linearly into the objective function, thus allowing language model expansion and re-scoring. Also, a novel method to automatically select the hypothesis which other hypotheses are aligned against is proposed. A generic weight tuning algorithm may be used to optimize various automatic evaluation metrics including TER, BLEU and METEOR. The experiments using the 2005 Arabic to English and Chinese to English NIST MT evaluation tasks show significant improvements in BLEU scores compared to earlier confusion network decoding based methods.", "phrases": ["machine translation", "hypothesis", "system combination method"], "overall_score": 3.4348704612056826, "scores": [1.9069771587789377, 0.5861074141053174, 0.5366165080277896], "rank_score": 1.0099003603040149} -{"id": "shah-specia-2014-quality", "title": "Quality estimation for translation selection", "abstract": "We describe experiments on quality estimation to select the best translation among multiple options for a given source sentence. We consider a realistic and challenging setting where the translation systems used are unknown, and no relative quality assessments are available for the training of prediction models. Our \ufb01ndings indicate that prediction errors are higher in this blind setting. However, these errors do not have a negative impact in performance when the predictions are used to select the best translation, compared to non-blind settings. This holds even when test conditions (text domains, MT systems) are different from model building conditions. In addition, we experiment with quality prediction for translations produced by both translation systems and human translators. Although the latter are on average of much higher quality, we show that automatically distinguishing the two types of translation is not a trivial problem.", "phrases": ["option", "source sentence", "quality estimation"], "overall_score": 1.3997503376826468, "scores": [1.8958595336199657, 0.5917843556574319, 0.5414754167087122], "rank_score": 1.0097064353287033} -{"id": "goodman-etal-2016-noise", "title": "Noise reduction and targeted exploration in imitation learning for Abstract Meaning Representation parsing", "abstract": "Semantic parsers map natural language statements into meaning representations, and must abstract over syntactic phenomena, resolve anaphora, and identify word senses to eliminate ambiguous interpretations. Abstract meaning representation (AMR) is a recent example of one such semantic formalism which, similar to a dependency parse, utilizes a graph to represent relationships between concepts (Ba-narescu et al., 2013). As with dependency parsing, transition-based approaches are a common approach to this problem. However, when trained in the traditional manner these systems are susceptible to the accumulation of errors when they \ufb01nd undesirable states during greedy decoding. Imitation learning algorithms have been shown to help these systems recover from such errors. To effectively use these meth-ods for AMR parsing we \ufb01nd it highly bene\ufb01cial to introduce two novel extensions: noise reduction and targeted exploration. The former mitigates the noise in the feature representation, a result of the complexity of the task. The latter targets the exploration steps of imitation learning towards areas which are likely to provide the most information in the context of a large action-space. We achieve state-of-the art results, and improve upon standard transition-based parsing by 4.7 F 1 points.", "phrases": ["exploration", "imitation", "amr", "dependency tree"], "overall_score": 2.218472852401618, "scores": [1.8849996712684365, 0.7955139135804936, 0.8329655996561376, 0.5252028424202972], "rank_score": 1.0096705067313412} -{"id": "chaimongkol-etal-2014-corpus", "title": "Corpus for Coreference Resolution on Scientific Papers", "abstract": "The ever-growing number of published scientific papers prompts the need for automatic knowledge extraction to help scientists keep up with the state-of-the-art in their respective fields. To construct a good knowledge extraction system, annotated corpora in the scientific domain are required to train machine learning models. As described in this paper, we have constructed an annotated corpus for coreference resolution in multiple scientific domains, based on an existing corpus. We have modified the annotation scheme from Message Understanding Conference to better suit scientific texts. Then we applied that to the corpus. The annotated corpus is then compared with corpora in general domains in terms of distribution of resolution classes and performance of the Stanford Dcoref coreference resolver. Through these comparisons, we have demonstrated quantitatively that our manually annotated corpus differs from a general-domain corpus, which suggests deep differences between general-domain texts and scientific texts and which shows that different approaches can be made to tackle coreference resolution for general texts and scientific texts.", "phrases": ["coreference resolution", "scientific paper", "annotated corpus"], "overall_score": 1.1088371898625358, "scores": [1.880783783699901, 0.5743674855895522, 0.5727700491771969], "rank_score": 1.00930710615555} -{"id": "yin-etal-2016-abcnn", "title": "ABCNN: Attention-Based Convolutional Neural Network for Modeling Sentence Pairs", "abstract": "How to model a pair of sentences is a critical issue in many NLP tasks such as answer selection (AS), paraphrase identification (PI) and textual entailment (TE). Most prior work (i) deals with one individual task by fine-tuning a specific system; (ii) models each sentence's representation separately, rarely considering the impact of the other sentence; or (iii) relies fully on manually designed, task-specific linguistic features. This work presents a general Attention Based Convolutional Neural Network (ABCNN) for modeling a pair of sentences. We make three contributions. (i) The ABCNN can be applied to a wide variety of tasks that require modeling of sentence pairs. (ii) We propose three attention schemes that integrate mutual influence between sentences into CNNs; thus, the representation of each sentence takes into consideration its counterpart. These interdependent sentence pair representations are more powerful than isolated sentence representations. (iii) ABCNNs achieve state-of-the-art performance on AS, PI and TE tasks. We release code at: .", "phrases": ["convolutional neural network", "sentence pair", "cnns", "attention-based cnn model"], "overall_score": 3.7478468873684365, "scores": [1.4028952606562055, 1.1629301551226068, 0.9416102572952639, 0.5294827656134984], "rank_score": 1.0092296096718938} -{"id": "bilmes-kirchhoff-2003-factored", "title": "Factored Language Models and Generalized Parallel Backoff", "abstract": "We introduce factored language models (FLMs) and generalized parallel backoff (GPB). An FLM represents words as bundles of features (e.g., morphological classes, stems, data-driven clusters, etc.), and induces a probability model covering sequences of bundles rather than just words. GPB extends standard backoff to general conditional probability tables where variables might be heterogeneous types, where no obvious natural (temporal) backoff order exists, and where multiple dynamic backoff strategies are allowed. These methodologies were implemented during the JHU 2002 workshop as extensions to the SRI language modeling toolkit. This paper provides initial perplexity results on both CallHome Arabic and on Penn Treebank Wall Street Journal articles. Significantly, FLMs with GPB can produce bigrams with significantly lower perplexity, sometimes lower than highly-optimized baseline trigrams. In a multi-pass speech recognition context, where bigrams are used to create first-pass bigram lattices or N-best lists, these results are highly relevant.", "phrases": ["language model", "perplexity", "research work"], "overall_score": 2.588318231613794, "scores": [1.9386035599889317, 0.5484847602858741, 0.5402442359227769], "rank_score": 1.009110852065861} -{"id": "marcheggiani-perez-beltrachini-2018-deep", "title": "Deep Graph Convolutional Encoders for Structured Data to Text Generation", "abstract": "Most previous work on neural text generation from graph-structured data relies on standard sequence-to-sequence methods. These approaches linearise the input graph to be fed to a recurrent neural network. In this paper, we propose an alternative encoder based on graph convolutional networks that directly exploits the input structure. We report results on two graph-to-sequence datasets that empirically show the benefits of explicitly encoding the input graph structure.", "phrases": ["text generation", "input graph", "convolutional network"], "overall_score": 2.3234148187092396, "scores": [1.888325522121251, 0.5866721873047717, 0.5521409953869787], "rank_score": 1.009046234937667} -{"id": "ethayarajh-etal-2019-understanding", "title": "Understanding Undesirable Word Embedding Associations", "abstract": "Word embeddings are often criticized for capturing undesirable word associations such as gender stereotypes. However, methods for measuring and removing such biases remain poorly understood. We show that for any embedding model that implicitly does matrix factorization, debiasing vectors post hoc using subspace projection (Bolukbasi et al., 2016) is, under certain conditions, equivalent to training on an unbiased corpus. We also prove that WEAT, the most common association test for word embeddings, systematically overestimates bias. Given that the subspace projection method is provably effective, we use it to derive a new measure of association called the relational inner product association (RIPA). Experiments with RIPA reveal that, on average, skipgram with negative sampling (SGNS) does not make most words any more gendered than they are in the training corpus. However, for gender-stereotyped words, SGNS actually amplifies the gender association in the corpus.", "phrases": ["word embedding", "association", "weat", "subspace projection method", "gender bias"], "overall_score": 2.970190374236276, "scores": [0.8385884879402864, 1.7599432894732334, 1.0791253316156495, 0.830692378306355, 0.5353793779118821], "rank_score": 1.008745773049481} -{"id": "aramaki-etal-2011-twitter", "title": "Twitter Catches The Flu: Detecting Influenza Epidemics using Twitter", "abstract": "With the recent rise in popularity and scale of social media, a growing need exists for systems that can extract useful information from huge amounts of data. We address the issue of detecting influenza epidemics. First, the proposed system extracts influenza related tweets using Twitter API. Then, only tweets that mention actual influenza patients are extracted by the support vector machine (SVM) based classifier. The experiment results demonstrate the feasibility of the proposed approach (0.89 correlation to the gold standard). Especially at the outbreak and early spread (early epidemic stage), the proposed method shows high correlation (0.97 correlation), which outperforms the state-of-the-art methods. This paper describes that Twitter texts reflect the real world, and that NLP techniques can be applied to extract only tweets that contain useful information.", "phrases": ["flu", "influenza epidemic", "twitter", "flu-related tweet", "social medium"], "overall_score": 2.9698577269010844, "scores": [2.2051033891040217, 0.7826124816746145, 0.9021957487932442, 0.6161749559522228, 0.5370774158414774], "rank_score": 1.0086327982731162} -{"id": "shimaoka-etal-2017-neural", "title": "Neural Architectures for Fine-grained Entity Type Classification", "abstract": "In this work, we investigate several neural network architectures for fine-grained entity type classification and make three key contributions. Despite being a natural comparison and addition, previous work on attentive neural architectures have not considered hand-crafted features and we combine these with learnt features and establish that they complement each other. Additionally, through quantitative analysis we establish that the attention mechanism learns to attend over syntactic heads and the phrase containing the mention, both of which are known to be strong hand-crafted features for our task. We introduce parameter sharing between labels through a hierarchical encoding method, that in low-dimensional projections show clear clusters for each type hierarchy. Lastly, despite using the same evaluation dataset, the literature frequently compare models trained using different data. We demonstrate that the choice of training data has a drastic impact on performance, which decreases by as much as 9.85% loose micro F1 score for a previously proposed method. Despite this discrepancy, our best model achieves state-of-the-art results with 75.36% loose micro F1 score on the well-established Figer (GOLD) dataset and we report the best results for models trained using publicly available data for the OntoNotes dataset with 64.93% loose micro F1 score.", "phrases": ["entity typing", "hand-crafted feature", "neural architecture", "relevant expression"], "overall_score": 3.284469700366589, "scores": [1.401645912058993, 0.8919071548575439, 0.8847933438889043, 0.8540322029493521], "rank_score": 1.0080946534386983} -{"id": "plank-2009-structural", "title": "Structural Correspondence Learning for Parse Disambiguation", "abstract": "The paper presents an application of Structural Correspondence Learning (SCL) (Blitzer et al., 2006) for domain adaptation of a stochastic attribute-value grammar (SAVG). So far, SCL has been applied successfully in NLP for Part-of-Speech tagging and Sentiment Analysis (Blitzer et al., 2006; Blitzer et al., 2007). An attempt was made in the CoNLL 2007 shared task to apply SCL to non-projective dependency parsing (Shimizu and Nakagawa, 2007), however, without any clear conclusions. We report on our exploration of applying SCL to adapt a syntactic disambiguation model and show promising initial results on Wikipedia domains.", "phrases": ["structural correspondence learning", "many nlp task", "engineering problem"], "overall_score": 1.9610235225617574, "scores": [1.9414648787301128, 0.5613872870337273, 0.5204480470138899], "rank_score": 1.0077667375925765} -{"id": "wu-etal-2021-polyjuice", "title": "Polyjuice: Generating Counterfactuals for Explaining, Evaluating, and Improving Models", "abstract": "While counterfactual examples are useful for analysis and training of NLP models, current generation methods either rely on manual labor to create very few counterfactuals, or only instantiate limited types of perturbations such as paraphrases or word substitutions. We present Polyjuice, a general-purpose counterfactual generator that allows for control over perturbation types and locations, trained by finetuning GPT-2 on multiple datasets of paired sentences. We show that Polyjuice produces diverse sets of realistic counterfactuals, which in turn are useful in various distinct applications: improving training and evaluation on three different tasks (with around 70% less annotation effort than manual generation), augmenting state-of-the-art explanation techniques, and supporting systematic counterfactual error analysis by revealing behaviors easily missed by human experts.", "phrases": ["counterfactual", "perturbation", "gpt-2", "expert", "polyjuice"], "overall_score": 2.5848016026218366, "scores": [1.8461614078442816, 1.2323006646730081, 0.8676543091781067, 0.5654109345754131, 0.5271717814375898], "rank_score": 1.0077398195416798} -{"id": "itagaki-aikawa-2008-post", "title": "Post-MT Term Swapper: Supplementing a Statistical Machine Translation System with a User Dictionary", "abstract": "A statistical machine translation (SMT) system requires homogeneous training data in order to get domain-sensitive (or context-sensitive) terminology translations. If the data contains various domains, it is difficult for an SMT to learn context-sensitive terminology mappings probabilistically. Yet, terminology translation accuracy is an important issue for MT users. This paper explores an approach to tackle this terminology translation problem for an SMT. We propose a way to identify terminology translations from MT output and automatically swap them with user-defined translations. Our approach is simple and can be applied to any type of MT system. We call our prototype \u0093Term Swapper\u0094. Term Swapper allows MT users to draw on their own dictionaries without affecting any parts of the MT output except for the terminology translation(s) in question. Using an SMT developed at Microsoft Research, called MSR-MT (Quirk et al., (2005); Menezes & Quirk (2005)), we conducted initial experiments to investigate the coverage rate of Term Swapper and its impact on the overall quality of MT output. The results from our experiments show high coverage and positive impact on the overall MT quality.", "phrases": ["term swapper", "user-defined translation", "post-processing step"], "overall_score": 1.8050702411864095, "scores": [1.8432906596381902, 0.6247700851705443, 0.5542259050239398], "rank_score": 1.007428883277558} -{"id": "barrett-etal-2016-weakly", "title": "Weakly Supervised Part-of-speech Tagging Using Eye-tracking Data", "abstract": "For many of the world\u2019s languages, there are no or very few linguistically annotated resources. On the other hand, raw text, and often also dictionaries, can be harvested from the web for many of these languages, and part-of-speech taggers can be trained with these resources. At the same time, previous research shows that eye-tracking data, which can be obtained without explicit annotation, contains clues to part-of-speech information. In this work, we bring these two ideas together and show that given raw text, a dictionary, and eye-tracking data obtained from naive participants reading text, we can train a weakly supervised PoS tagger using a second-order HMM with maximum entropy emissions. The best model use type-level aggregates of eye-tracking data and signi\ufb01-cantly outperforms a baseline that does not have access to eye-tracking data.", "phrases": ["part-of-speech tagging", "eye-tracking data", "participant", "token level average", "dundee corpus"], "overall_score": 2.96612809276873, "scores": [2.3565666200831035, 1.04824807596833, 0.5557895459368691, 0.543725626826362, 0.5325007698159074], "rank_score": 1.0073661277261143} -{"id": "akhtar-etal-2016-hybrid", "title": "A Hybrid Deep Learning Architecture for Sentiment Analysis", "abstract": "In this paper, we propose a novel hybrid deep learning archtecture which is highly efficient for sentiment analysis in resource-poor languages. We learn sentiment embedded vectors from the Convolutional Neural Network (CNN). These are augmented to a set of optimized features selected through a multi-objective optimization (MOO) framework. The sentiment augmented optimized vector obtained at the end is used for the training of SVM for sentiment classification. We evaluate our proposed approach for coarse-grained (i.e. sentence level) as well as fine-grained (i.e. aspect level) sentiment analysis on four Hindi datasets covering varying domains. In order to show that our proposed method is generic in nature we also evaluate it on two benchmark English datasets. Evaluation shows that the results of the proposed method are consistent across all the datasets and often outperforms the state-of-art systems. To the best of our knowledge, this is the very first attempt where such a deep learning model is used for less-resourced languages such as Hindi.", "phrases": ["sentiment analysis", "hindi dataset", "neural network architecture"], "overall_score": 1.804637565530572, "scores": [1.9553360145078584, 0.5437106708718484, 0.5225155218088489], "rank_score": 1.007187402396185} -{"id": "thurmair-2009-comparing", "title": "Comparing different architectures of hybrid Machine Translation systems", "abstract": "The contribution discusses variants of architectures of hybrid MT systems. The three main types of architectures are: coupling of systems (serial or parallel), architecture adaptations (integrating novel components into SMT or RMT architectures, either by pre/post-editing, or by system core modifications), and genuine hybrid systems, combining components of different paradigms. The interest is to investigate which resources are required for which types of systems, and to which extent the proposals contribute to an overall increase in MT quality.", "phrases": ["different architecture", "hybrid system", "linguistic information"], "overall_score": 2.414348430378467, "scores": [1.346200308210928, 0.8397643748214126, 0.8346198159239816], "rank_score": 1.0068614996521075} -{"id": "scialom-etal-2020-mlsum", "title": "MLSUM: The Multilingual Summarization Corpus", "abstract": "We present MLSUM, the first large-scale MultiLingual SUMmarization dataset. Obtained from online newspapers, it contains 1.5M+ article/summary pairs in five different languages \u2013 namely, French, German, Spanish, Russian, Turkish. Together with English news articles from the popular CNN/Daily mail dataset, the collected data form a large scale multilingual dataset which can enable new research directions for the text summarization community. We report cross-lingual comparative analyses based on state-of-the-art systems. These highlight existing biases which motivate the use of a multi-lingual dataset.", "phrases": ["multilingual summarization", "different language", "research direction", "mlsum"], "overall_score": 2.212230675447389, "scores": [2.2752731574907052, 0.6475281314802266, 0.5635245838262778, 0.5409924054815857], "rank_score": 1.0068295695696987} -{"id": "zhao-etal-2012-identifying", "title": "Identifying Event-related Bursts via Social Media Activities", "abstract": "Activities on social media increase at a dramatic rate. When an external event happens, there is a surge in the degree of activities related to the event. These activities may be temporally correlated with one another, but they may also capture different aspects of an event and therefore exhibit different bursty patterns. In this paper, we propose to identify event-related bursts via social media activities. We study how to correlate multiple types of activities to derive a global bursty pattern. To model smoothness of one state sequence, we propose a novel function which can capture the state context. The experiments on a large Twitter dataset shows our methods are very effective.", "phrases": ["event-related burst", "activity", "state context"], "overall_score": 1.61951013055268, "scores": [1.9684383869658157, 0.525811037465591, 0.5245252385253266], "rank_score": 1.006258220985578} -{"id": "zhang-etal-2020-bert", "title": "BERT-XML: Large Scale Automated ICD Coding Using BERT Pretraining", "abstract": "ICD coding is the task of classifying and cod-ing all diagnoses, symptoms and proceduresassociated with a patient's visit. The process isoften manual, extremely time-consuming andexpensive for hospitals as clinical interactionsare usually recorded in free text medical notes. In this paper, we propose a machine learningmodel, BERT-XML, for large scale automatedICD coding of EHR notes, utilizing recentlydeveloped unsupervised pretraining that haveachieved state of the art performance on a va-riety of NLP tasks. We train a BERT modelfrom scratch on EHR notes, learning with vo-cabulary better suited for EHR tasks and thusoutperform off-the-shelf models. We furtheradapt the BERT architecture for ICD codingwith multi-label attention. We demonstratethe effectiveness of BERT-based models on thelarge scale ICD code classification task usingmillions of EHR notes to predict thousands ofunique codes.", "phrases": ["icd", "bert", "bert-xml"], "overall_score": 1.8028382601193924, "scores": [1.95615218708178, 0.5336717549506647, 0.5287256307449397], "rank_score": 1.0061831909257946} -{"id": "ohki-etal-2011-recognizing", "title": "Recognizing Confinement in Web Texts", "abstract": "In the Recognizing Textual Entailment (RTE) task, sentence pairs are classified into one of three semantic relations: Entailment, Contradiction or Unknown. While we find some sentence pairs hold full entailments or contradictions, there are a number of pairs that partially entail or contradict one another depending on a specific situation. These partial contradiction sentence pairs contain useful information for opinion mining and other such tasks, but it is difficult for Internet users to access this knowledge because current frameworks do not differentiate between full contradictions and partial contradictions. In this paper, under current approaches to semantic relation recognition, we define a new semantic relation known as Confinement in order to recognize this useful information. This information is classified as either Contradiction or Entailment. We provide a series of semantic templates to recognize Confinement relations in Web texts, and then implement a system for recognizing Confinement between sentence pairs. We show that our proposed system can obtains a F-score of 61% for recognizing Confinement in Japanese-language Web texts, and it outperforms a baseline which does not use a manually compiled list of lexico-syntactic patterns to instantiate the semantic templates.", "phrases": ["confinement", "web text", "semantic relation", "contradiction"], "overall_score": 1.3947227985769768, "scores": [1.6933894082373369, 0.9046428450956043, 0.8428474824351765, 0.583439594075445], "rank_score": 1.0060798324608906} -{"id": "kemps-snijders-etal-2008-isocat", "title": "ISOcat: Corralling Data Categories in the Wild", "abstract": "To achieve true interoperability for valuable linguistic resources different levels of variation need to be addressed. ISO Technical Committee 37, Terminology and other language and content resources, is developing a Data Category Registry. This registry will provide a reusable set of data categories. A new implementation, dubbed ISOcat, of the registry is currently under construction. This paper shortly describes the new data model for data categories that will be introduced in this implementation. It goes on with a sketch of the standardization process. Completed data categories can be reused by the community. This is done by either making a selection of data categories using the ISOcat web interface, or by other tools which interact with the ISOcat system using one of its various Application Programming Interfaces. Linguistic resources that use data categories from the registry should include persistent references, e.g. in the metadata or schemata of the resource, which point back to their origin. These data category references can then be used to determine if two or more resources share common semantics, thus providing a level of interoperability close to the source data and a promising layer for semantic alignment on higher levels.", "phrases": ["data category", "registry", "isocat"], "overall_score": 1.3945251116365052, "scores": [1.822665669618285, 0.6340311675126407, 0.5611148572987456], "rank_score": 1.0059372314765571} -{"id": "qin-etal-2017-adversarial", "title": "Adversarial Connective-exploiting Networks for Implicit Discourse Relation Classification", "abstract": "Implicit discourse relation classification is of great challenge due to the lack of connectives as strong linguistic cues, which motivates the use of annotated implicit connectives to improve the recognition. We propose a feature imitation framework in which an implicit relation network is driven to learn from another neural network with access to connectives, and thus encouraged to extract similarly salient features for accurate classification. We develop an adversarial model to enable an adaptive imitation scheme through competition between the implicit network and a rival feature discriminator. Our method effectively transfers discriminability of connectives to the implicit features, and achieves state-of-the-art performance on the PDTB benchmark.", "phrases": ["discourse relation", "implicit connective", "various nlp task"], "overall_score": 3.3503500720538506, "scores": [1.3041423016894496, 1.1518385383146568, 0.5603556980054045], "rank_score": 1.005445512669837} -{"id": "gulordava-etal-2018-colorless", "title": "Colorless Green Recurrent Networks Dream Hierarchically", "abstract": "Recurrent neural networks (RNNs) achieved impressive results in a variety of linguistic processing tasks, suggesting that they can induce non-trivial properties of language. We investigate to what extent RNNs learn to track abstract hierarchical syntactic structure. We test whether RNNs trained with a generic language modeling objective in four languages (Italian, English, Hebrew, Russian) can predict long-distance number agreement in various constructions. We include in our evaluation nonsensical sentences where RNNs cannot rely on semantic or lexical cues (\u201cThe colorless green ideas I ate with the chair sleep furiously\u201d), and, for Italian, we compare model performance to human intuitions. Our language-model-trained RNNs make reliable predictions about long-distance agreement, and do not lag much behind human performance. We thus bring support to the hypothesis that RNNs are not just shallow-pattern extractors, but they also acquire deeper grammatical competence.", "phrases": ["hebrew", "neural language model", "linguistic knowledge", "awareness", "perplexity"], "overall_score": 4.2253325953127945, "scores": [1.5572794204923535, 1.3531056173034133, 1.0417746943832733, 0.5408730057342249, 0.5315112392453055], "rank_score": 1.0049087954317142} -{"id": "augenstein-etal-2017-semeval", "title": "SemEval 2017 Task 10: ScienceIE - Extracting Keyphrases and Relations from Scientific Publications", "abstract": "We describe the SemEval task of extracting keyphrases and relations between them from scientific documents, which is crucial for understanding which publications describe which processes, tasks and materials. Although this was a new task, we had a total of 26 submissions across 3 evaluation scenarios. We expect the task and the findings reported in this paper to be relevant for researchers working on understanding scientific content, as well as the broader knowledge base population and information extraction communities.", "phrases": ["scienceie", "information extraction", "semeval"], "overall_score": 3.193151964750298, "scores": [1.399210599018827, 0.7845582224364942, 0.8304834239832337], "rank_score": 1.0047507484795182} -{"id": "thompson-etal-2011-promoting", "title": "Promoting Interoperability of Resources in META-SHARE", "abstract": "META-NET is a Network of Excellence\naiming to improve significantly on the number\nof language technologies that can assist\nEuropean citizens, by enabling enhanced\ncommunication and cooperation across\nlanguages. A major outcome will be META-\nSHARE, a searchable network of repositories\nthat collect resources such as language data,\ntools and related web services, covering a\nlarge number of European languages. These\nresources are intended to facilitate the\ndevelopment and evaluation of a wide range of\nnew language processing applications and\nservices. An important aim of META-SHARE\nis the promotion of interoperability amongst\nresources. In this paper, we describe our\nplanned efforts to help to achieve this aim,\nthrough the adoption of the UIMA framework\nand the integration of the U-Compare system\nwithin the META-SHARE network. U-\nCompare facilitates the rapid construction and\nevaluation of NLP applications that make use\nof interoperable components, and, as such, can\nhelp to speed up the development of a new\ngeneration of European language technology\napplications.", "phrases": ["interoperability", "meta-net", "european language"], "overall_score": 1.392680498649831, "scores": [1.9064633598062162, 0.5860263957992363, 0.5213301178118671], "rank_score": 1.0046066244724399} -{"id": "ringland-etal-2019-nne", "title": "NNE: A Dataset for Nested Named Entity Recognition in English Newswire", "abstract": "Named entity recognition (NER) is widely used in natural language processing applications and downstream tasks. However, most NER tools target flat annotation from popular datasets, eschewing the semantic information available in nested entity mentions. We describe NNE\u2014a fine-grained, nested named entity dataset over the full Wall Street Journal portion of the Penn Treebank (PTB). Our annotation comprises 279,795 mentions of 114 entity types with up to 6 layers of nesting. We hope the public release of this large dataset for English newswire will encourage development of new techniques for nested NER.", "phrases": ["english newswire", "entity mention", "penn treebank"], "overall_score": 2.5767006423759593, "scores": [1.8194210169355332, 0.6290360439642272, 0.5652874033489049], "rank_score": 1.0045814880828885} -{"id": "toral-2019-post", "title": "Post-editese: an Exacerbated Translationese", "abstract": "Post-editing (PE) machine translation (MT) is widely used for dissemination because it leads to higher productivity than human translation from scratch (HT). In addition, PE translations are found to be of equal or better quality than HTs. However, most such studies measure quality solely as the number of errors. We conduct a set of computational analyses in which we compare PE against HT on three different datasets that cover five translation directions with measures that address different translation universals and laws of translation: simplification, normalisation and interference. We find out that PEs are simpler and more normalised and have a higher degree of interference from the source language than HTs.", "phrases": ["source language", "post-editese", "lexical density"], "overall_score": 2.5760010414438166, "scores": [1.9025956011987097, 0.5767272212194006, 0.5336033789707746], "rank_score": 1.004308733796295} -{"id": "hua-etal-2021-dyploc", "title": "DYPLOC: Dynamic Planning of Content Using Mixed Language Models for Text Generation", "abstract": "We study the task of long-form opinion text generation, which faces at least two distinct challenges. First, existing neural generation models fall short of coherence, thus requiring efficient content planning. Second, diverse types of information are needed to guide the generator to cover both subjective and objective content. To this end, we propose DYPLOC, a generation framework that conducts dynamic planning of content while generating the output based on a novel design of mixed language models. To enrich the generation with diverse content, we further propose to use large pre-trained models to predict relevant concepts and to generate claims. We experiment with two challenging tasks on newly collected datasets: (1) argument generation with Reddit ChangeMyView, and (2) writing articles using New York Times' Opinion section. Automatic evaluation shows that our model significantly outperforms competitive comparisons. Human judges further confirm that our generations are more coherent with richer content.", "phrases": ["planning", "mixed language model", "dyploc"], "overall_score": 1.392206336945285, "scores": [1.6742656676100607, 0.7847448826643753, 0.5537832170332923], "rank_score": 1.004264589102576} -{"id": "imamura-etal-2009-discriminative", "title": "Discriminative Approach to Predicate-Argument Structure Analysis with Zero-Anaphora Resolution", "abstract": "This paper presents a predicate-argument structure analysis that simultaneously conducts zero-anaphora resolution. By adding noun phrases as candidate arguments that are not only in the sentence of the target predicate but also outside of the sentence, our analyzer identifies arguments regardless of whether they appear in the sentence or not. Because we adopt discriminative models based on maximum entropy for argument identification, we can easily add new features. We add language model scores as well as contextual features. We also use contextual information to restrict candidate arguments.", "phrases": ["structure analysis", "zero-anaphora resolution", "predicate"], "overall_score": 2.7191755776704225, "scores": [0.924021548036643, 0.7811876791184347, 1.3071155553364184], "rank_score": 1.0041082608304988} -{"id": "murray-etal-2006-incorporating", "title": "Incorporating Speaker and Discourse Features into Speech Summarization", "abstract": "We have explored the usefulness of incorporating speech and discourse features in an automatic speech summarization system applied to meeting recordings from the ICSI Meetings corpus. By analyzing speaker activity, turn-taking and discourse cues, we hypothesize that such a system can outperform solely text-based methods inherited from the field of text summarization. The summarization methods are described, two evaluation methods are applied and compared, and the results clearly show that utilizing such features is advantageous and efficient. Even simple methods relying on discourse cues and speaker activity can outperform text summarization approaches.", "phrases": ["discourse feature", "text summarization", "broadcast news"], "overall_score": 2.4949434579742227, "scores": [1.8321325904588657, 0.6276282011299475, 0.5523565346541004], "rank_score": 1.004039108747638} -{"id": "eisele-chen-2010-multiun", "title": "MultiUN: A Multilingual Corpus from United Nation Documents", "abstract": "This paper describes the acquisition, preparation and properties of a corpus extracted from the official documents of the United Nations (UN). This corpus is available in all 6 official languages of the UN, consisting of around 300 million words per language. We describe the methods we used for crawling, document formatting, and sentence alignment. This corpus also includes a common test set for machine translation. We present the results of a French-Chinese machine translation experiment performed on this corpus.", "phrases": ["multilingual corpus", "united nations", "official document", "multiun"], "overall_score": 1.9533833188522227, "scores": [0.9729146290815373, 0.8355440422713338, 1.1125893211548687, 1.0943138057757777], "rank_score": 1.0038404495708795} -{"id": "aharoni-etal-2014-benchmark", "title": "A Benchmark Dataset for Automatic Detection of Claims and Evidence in the Context of Controversial Topics", "abstract": "We describe a novel and unique argumentative structure dataset. This corpus consists of data extracted fro m hundreds of Wikipedia articles using a meticulously monitored manual annotation process. The result is 2,683 argument elements, collected in the context of 33 controversial topics, organized under a simp le claim-evidence structure. The obtained data are publicly available for academic research.", "phrases": ["claim", "controversial topic", "argumentative structure dataset", "academic research"], "overall_score": 2.574771313382246, "scores": [1.8214420655813546, 1.1074204275935142, 0.5647917084807959, 0.5216629910867758], "rank_score": 1.00382929818561} -{"id": "vincze-etal-2013-dependency", "title": "Dependency Parsing for Identifying Hungarian Light Verb Constructions", "abstract": "Light verb constructions (LVCs) are verb and noun combinations in which the verb has lost its meaning to some degree and the noun is used in one of its original senses. They often share their syntactic pattern with other constructions (e.g. verbobject pairs) thus LVC detection can be viewed as classifying certain syntactic patterns as light verb constructions or not. In this paper, we explore a novel way to detect LVCs in texts: we apply a dependency parser to carry out the task. We present our experiments on a Hungarian treebank, which has been manually annotated for dependency relations and light verb constructions. Our results outperformed those achieved by state-of-the-art techniques for Hungarian LVC detection, especially due to the high precision and the treatment of long-distance dependencies.", "phrases": ["light verb construction", "dependency parser", "hungarian lvc detection"], "overall_score": 2.4070418761268835, "scores": [1.2373301621279282, 1.226785057381601, 0.5473280700689671], "rank_score": 1.0038144298594986} -{"id": "mizumoto-matsumoto-2016-discriminative", "title": "Discriminative Reranking for Grammatical Error Correction with Statistical Machine Translation", "abstract": "Research on grammatical error correction has received considerable attention. For dealing with all types of errors, grammatical error correction methods that employ statistical machine translation (SMT) have been proposed in recent years. An SMT system generates candidates with scores for all candidates and selects the sentence with the highest score as the correction result. However, the 1-best result of an SMT system is not always the best result. Thus, we propose a reranking approach for grammatical error correction. The reranking approach is used to re-score N-best results of the SMT and reorder the results. Our experiments show that our reranking system using parts of speech and syntactic features improves performance and achieves state-of-theart quality, with an F0.5 score of 40.0.", "phrases": ["correction", "statistical machine translation", "re-ranking"], "overall_score": 2.0871412380265695, "scores": [1.9154255423445035, 0.5531715102307232, 0.5425112611605568], "rank_score": 1.0037027712452613} -{"id": "utiyama-isahara-2003-reliable", "title": "Reliable Measures for Aligning Japanese-English News Articles and Sentences", "abstract": "We have aligned Japanese and English news articles and sentences to make a large parallel corpus. We first used a method based on cross-language information retrieval (CLIR) to align the Japanese and English articles and then used a method based on dynamic programming (DP) matching to align the Japanese and English sentences in these articles. However, the results included many incorrect alignments. To remove these, we propose two measures (scores) that evaluate the validity of alignments. The measure for article alignment uses similarities in sentences aligned by DP matching and that for sentence alignment uses similarities in articles aligned by CLIR. They enhance each other to improve the accuracy of alignment. Using these measures, we have successfully constructed a large-scale article and sentence alignment corpus available to the public.", "phrases": ["news article", "parallel sentence", "comparable corpora"], "overall_score": 3.146166197913548, "scores": [1.533326510619632, 0.9120238050434188, 0.5648604976086006], "rank_score": 1.0034036044238837} -{"id": "peng-mccallum-2004-accurate", "title": "Accurate Information Extraction from Research Papers using Conditional Random Fields", "abstract": "With the increasing use of research paper search engines, such as CiteSeer, for both literature search and hiring decisions, the accuracy of such systems is of paramount importance. This paper employs Conditional Random Fields (CRFs) for the task of extracting various common fields from the headers and citation of research papers. The basic theory of CRFs is becoming well-understood, but best-practices for applying them to real-world data requires additional exploration. This paper makes an empirical exploration of several factors, including variations on Gaussian, exponential and hyperbolic-L1 priors for improved regularization, and several classes of features and Markov order. On a standard benchmark data set, we achieve new state-of-the-art performance, reducing error in average F1 by 36%, and word error rate by 78% in comparison with the previous best SVM results. Accuracy compares even more favorably against HMMs.", "phrases": ["research paper", "conditional random fields", "crfs"], "overall_score": 2.4054655237551112, "scores": [1.918695471231807, 0.5619176036970919, 0.5288580446516149], "rank_score": 1.0031570398601712} -{"id": "chisholm-etal-2017-learning", "title": "Learning to generate one-sentence biographies from Wikidata", "abstract": "We investigate the generation of one-sentence Wikipedia biographies from facts derived from Wikidata slot-value pairs. We train a recurrent neural network sequence-to-sequence model with attention to select facts and generate textual summaries. Our model incorporates a novel secondary objective that helps ensure it generates sentences that contain the input facts. The model achieves a BLEU score of 41, improving significantly upon the vanilla sequence-to-sequence model and scoring roughly twice that of a simple template baseline. Human preference evaluation suggests the model is nearly as good as the Wikipedia reference. Manual analysis explores content selection, suggesting the model can trade the ability to infer knowledge against the risk of hallucinating incorrect information.", "phrases": ["biography", "wikipedia", "natural language generation"], "overall_score": 2.781250300829303, "scores": [1.8465653471471397, 0.5928706041548752, 0.5699360610560146], "rank_score": 1.0031240041193432} -{"id": "ding-luo-2021-attentionrank", "title": "AttentionRank: Unsupervised Keyphrase Extraction using Self and Cross Attentions", "abstract": "Keyword or keyphrase extraction is to identify words or phrases presenting the main topics of a document. This paper proposes the AttentionRank, a hybrid attention model, to identify keyphrases from a document in an unsupervised manner. AttentionRank calculates self-attention and cross-attention using a pre-trained language model. The self-attention is designed to determine the importance of a candidate within the context of a sentence. The cross-attention is calculated to identify the semantic relevance between a candidate and sentences within a document. We evaluate the AttentionRank on three publicly available datasets against seven baselines. The results show that the AttentionRank is an effective and robust unsupervised keyphrase extraction model on both long and short documents. Source code is available on Github.", "phrases": ["self-attention", "language model", "attentionrank"], "overall_score": 1.102040702017874, "scores": [1.9330389970086115, 0.5404648982240413, 0.5358581337154866], "rank_score": 1.0031206763160465} -{"id": "mullenbach-etal-2018-explainable", "title": "Explainable Prediction of Medical Codes from Clinical Text", "abstract": "Clinical notes are text documents that are created by clinicians for each patient encounter. They are typically accompanied by medical codes, which describe the diagnosis and treatment. Annotating these codes is labor intensive and error prone; furthermore, the connection between the codes and the text is not annotated, obscuring the reasons and details behind specific diagnoses and treatments. We present an attentional convolutional network that predicts medical codes from clinical text. Our method aggregates information across the document using a convolutional neural network, and uses an attention mechanism to select the most relevant segments for each of the thousands of possible codes. The method is accurate, achieving precision@8 of 0.71 and a Micro-F1 of 0.54, which are both better than the prior state of the art. Furthermore, through an interpretability evaluation by a physician, we show that the attention mechanism identifies meaningful explanations for each code assignment.", "phrases": ["clinical text", "interpretability", "icd code", "medical code prediction", "document representation"], "overall_score": 3.839044967746772, "scores": [0.8405208427097285, 1.2896288639028717, 1.0405702029526303, 1.0096886365829878, 0.8331779590638099], "rank_score": 1.0027173010424055} -{"id": "passos-etal-2014-lexicon", "title": "Lexicon Infused Phrase Embeddings for Named Entity Resolution", "abstract": "Most state-of-the-art approaches for named-entity recognition (NER) use semi supervised information in the form of word clusters and lexicons. Recently neural network-based language models have been explored, as they as a byproduct generate highly informative vector representations for words, known as word embeddings. In this paper we present two contributions: a new form of learning word embeddings that can leverage information from relevant lexicons to improve the representations, and the first system to use neural word embeddings to achieve state-of-the-art results on named-entity recognition in both CoNLL and Ontonotes NER. Our system achieves an F1 score of 90.90 on the test set for CoNLL 2003---significantly better than any previous system trained on public data, and matching a system employing massive private industrial query-log data.", "phrases": ["word embedding", "hidden markov models", "ner system", "task-specific resource", "entity recognition"], "overall_score": 3.564016304001175, "scores": [1.50288464237756, 1.256026096825062, 0.8503573964124532, 0.8422314895434178, 0.5606908022790529], "rank_score": 1.0024380854875092} -{"id": "peng-etal-2004-chinese", "title": "Chinese Segmentation and New Word Detection using Conditional Random Fields", "abstract": "Chinese word segmentation is a difficult, important and widely-studied sequence modeling problem. This paper demonstrates the ability of linear-chain conditional random fields (CRFs) to perform robust and accurate Chinese word segmentation by providing a principled framework that easily supports the integration of domain knowledge in the form of multiple lexicons of characters and words. We also present a probabilistic new word detection method, which further improves performance. Our system is evaluated on four datasets used in a recent comprehensive Chinese word segmentation competition. State-of-the-art performance is obtained.", "phrases": ["new word detection", "conditional random field", "cws", "position", "character-based sequence"], "overall_score": 3.857973895839112, "scores": [0.9524910750641125, 1.5027910422540183, 1.1998512837828157, 0.8243627190203783, 0.5306675085738166], "rank_score": 1.0020327257390282} -{"id": "richman-schone-2008-mining", "title": "Mining Wiki Resources for Multilingual Named Entity Recognition", "abstract": "In this paper, we describe a system by which the multilingual characteristics of Wikipedia can be utilized to annotate a large corpus of text with Named Entity Recognition (NER) tags requiring minimal human intervention and no linguistic expertise. This process, though of value in languages for which resources exist, is particularly useful for less commonly taught languages. We show how the Wikipedia format can be used to identify possible named entities and discuss in detail the process by which we use the Category structure inherent to Wikipedia to determine the named entity type of a proposed entity. We further describe the methods by which English language data can be used to bootstrap the NER process in other languages. We demonstrate the system by using the generated corpus as training sets for a variant of BBN's Identifinder in French, Ukrainian, Spanish, Polish, Russian, and Portuguese, achieving overall F-scores as high as 84.7% on independent, human-annotated corpora, comparable to a system trained on up to 40,000 words of human-annotated newswire.", "phrases": ["wikipedia", "entity type", "other language", "human-annotated corpora"], "overall_score": 2.7134397981253846, "scores": [1.829174424232998, 1.06059597815774, 0.5596423155010172, 0.5585481345636107], "rank_score": 1.0019902131138414} -{"id": "gaddy-klein-2020-digital", "title": "Digital Voicing of Silent Speech", "abstract": "In this paper, we consider the task of digitally voicing silent speech, where silently mouthed words are converted to audible speech based on electromyography (EMG) sensor measurements that capture muscle impulses. While prior work has focused on training speech synthesis models from EMG collected during vocalized speech, we are the first to train from EMG collected during silently articulated speech. We introduce a method of training on silent EMG by transferring audio targets from vocalized to silent signals. Our method greatly improves intelligibility of audio generated from silent EMG compared to a baseline that only trains with vocalized data, decreasing transcription word error rate from 64% to 4% in one data condition and 88% to 68% in another. To spur further development on this task, we share our new dataset of silent and vocalized facial EMG measurements.", "phrases": ["silent speech", "signal", "intelligibility"], "overall_score": 1.0998889495172839, "scores": [1.933061391987774, 0.5410996586802531, 0.5293251496840238], "rank_score": 1.001162066784017} -{"id": "cohan-etal-2020-specter", "title": "SPECTER: Document-level Representation Learning using Citation-informed Transformers", "abstract": "Representation learning is a critical ingredient for natural language processing systems. Recent Transformer language models like BERT learn powerful textual representations, but these models are targeted towards token- and sentence-level training objectives and do not leverage information on inter-document relatedness, which limits their document-level representation power. For applications on scientific documents, such as classification and recommendation, accurate embeddings of documents are a necessity. We propose SPECTER, a new method to generate document-level embedding of scientific papers based on pretraining a Transformer language model on a powerful signal of document-level relatedness: the citation graph. Unlike existing pretrained language models, Specter can be easily applied to downstream applications without task-specific fine-tuning. Additionally, to encourage further research on document-level models, we introduce SciDocs, a new evaluation benchmark consisting of seven document-level tasks ranging from citation prediction, to document classification and recommendation. We show that Specter outperforms a variety of competitive baselines on the benchmark.", "phrases": ["scientific document", "recommendation", "document-level embedding", "citation graph", "specter"], "overall_score": 1.9481443288798932, "scores": [2.2023672586042617, 0.8309232037494771, 0.8287218820696373, 0.5906327884746677, 0.5530955736439933], "rank_score": 1.0011481413084076} -{"id": "pimentel-etal-2020-pareto", "title": "Pareto Probing: Trading Off Accuracy for Complexity", "abstract": "The question of how to probe contextual word representations in a way that is principled and useful has seen significant recent attention. In our contribution to this discussion, we argue, first, for a probe metric that reflects the trade-off between probe complexity and performance: the Pareto hypervolume. To measure complexity, we present a number of parametric and non-parametric metrics. Our experiments with such metrics show that probe's performance curves often fail to align with widely accepted rankings between language representations (with, e.g., non-contextual representations outperforming contextual ones). These results lead us to argue, second, that common simplistic probe tasks such as POS labeling and dependency arc labeling, are inadequate to evaluate the properties encoded in contextual word representations. We propose full dependency parsing as an example probe task, and demonstrate it with the Pareto hypervolume. In support of our arguments, the results of this illustrative experiment conform closer to accepted rankings among contextual word representations.", "phrases": ["probe", "complexity", "trade-off", "pareto hypervolume"], "overall_score": 2.1997068463076337, "scores": [2.321352866495363, 0.5825474837831396, 0.5622248204864951, 0.5383937464126413], "rank_score": 1.00112972929441} -{"id": "wilcox-etal-2019-structural", "title": "Structural Supervision Improves Learning of Non-Local Grammatical Dependencies", "abstract": "State-of-the-art LSTM language models trained on large corpora learn sequential contingencies in impressive detail, and have been shown to acquire a number of non-local grammatical dependencies with some success. Here we investigate whether supervision with hierarchical structure enhances learning of a range of grammatical dependencies, a question that has previously been addressed only for subject-verb agreement. Using controlled experimental methods from psycholinguistics, we compare the performance of word-based LSTM models versus Recurrent Neural Network Grammars (RNNGs) (Dyer et al. 2016) which represent hierarchical syntactic structure and use neural control to deploy it in left-to-right processing, on two classes of non-local grammatical dependencies in English\u2014Negative Polarity licensing and Filler-Gap Dependencies\u2014tested in a range of configurations. Using the same training data for both models, we find that the RNNG outperforms the LSTM on both types of grammatical dependencies and even learns many of the Island Constraints on the filler-gap dependency. Structural supervision thus provides data efficiency advantages over purely string-based training of neural language models in acquiring human-like generalizations about non-local grammatical dependencies.", "phrases": ["non-local grammatical dependency", "language model", "filler-gap dependency", "structural supervision"], "overall_score": 1.7937749671947296, "scores": [1.928359625046766, 0.8822419533242614, 0.6423682858582049, 0.5515296191027415], "rank_score": 1.0011248708329936} -{"id": "andersen-etal-2013-developing", "title": "Developing and testing a self-assessment and tutoring system", "abstract": "Automated feedback on writing may be a useful complement to teacher comments in the process of learning a foreign language. This paper presents a self-assessment and tutoring system which combines an holistic score with detection and correction of frequent errors and furthermore provides a qualitative assessment of each individual sentence, thus making the language learner aware of potentially problematic areas rather than providing a panacea. The system has been tested by learners in a range of educational institutions, and their feedback has guided its development.", "phrases": ["tutoring system", "assessment", "language learner"], "overall_score": 2.3049341687846936, "scores": [1.9570939548897182, 0.526088011827635, 0.5198786052429991], "rank_score": 1.0010201906534508} -{"id": "liang-etal-2007-infinite", "title": "The Infinite PCFG Using Hierarchical Dirichlet Processes", "abstract": "We present a nonparametric Bayesian model of tree structures based on the hierarchical Dirichlet process (HDP). Our HDP-PCFG model allows the complexity of the grammar to grow as more training data is available. In addition to presenting a fully Bayesian model for the PCFG, we also develop an efficient variational inference procedure. On synthetic data, we recover the correct grammar without having to specify its complexity in advance. We also show that our techniques can be applied to full-scale parsing applications by demonstrating its effectiveness in learning state-split grammars.", "phrases": ["infinite pcfg", "hierarchical dirichlet process", "bayesian model", "variable grammar"], "overall_score": 2.567158305547795, "scores": [1.9799314640790444, 0.8958722313004955, 0.5712882528661782, 0.5563528731186217], "rank_score": 1.000861205341085} -{"id": "ramachandran-etal-2017-unsupervised", "title": "Unsupervised Pretraining for Sequence to Sequence Learning", "abstract": "This work presents a general unsupervised learning method to improve the accuracy of sequence to sequence (seq2seq) models. In our method, the weights of the encoder and decoder of a seq2seq model are initialized with the pretrained weights of two language models and then fine-tuned with labeled data. We apply this method to challenging benchmarks in machine translation and abstractive summarization and find that it significantly improves the subsequent supervised models. Our main result is that pretraining improves the generalization of seq2seq models. We achieve state-of-the-art results on the WMT English\u2192German task, surpassing a range of methods using both phrase-based machine translation and neural machine translation. Our method achieves a significant improvement of 1.3 BLEU from th previous best models on both WMT'14 and WMT'15 English\u2192German. We also conduct human evaluations on abstractive summarization and find that our method outperforms a purely supervised learning baseline in a statistically significant manner.", "phrases": ["seq2seq model", "language model", "fine-tune", "unsupervised pretraining"], "overall_score": 2.9465255625746747, "scores": [0.9615873527030177, 1.6411534110651438, 0.8290164218833006, 0.5710774234852847], "rank_score": 1.0007086522841866} -{"id": "durmus-etal-2019-role", "title": "The Role of Pragmatic and Discourse Context in Determining Argument Impact", "abstract": "Research in the social sciences and psychology has shown that the persuasiveness of an argument depends not only the language employed, but also on attributes of the source/communicator, the audience, and the appropriateness and strength of the argument's claims given the pragmatic and discourse context of the argument. Among these characteristics of persuasive arguments, prior work in NLP does not explicitly investigate the effect of the pragmatic and discourse context when determining argument quality. This paper presents a new dataset to initiate the study of this aspect of argumentation: it consists of a diverse collection of arguments covering 741 controversial topics and comprising over 47,000 claims. We further propose predictive models that incorporate the pragmatic and discourse context of argumentative claims and show that they outperform models that rely only on claim-specific linguistic features for predicting the perceived impact of individual claims within a particular line of argument.", "phrases": ["discourse context", "argument impact", "influence"], "overall_score": 1.79293127556638, "scores": [1.8729231460138693, 0.602617383185544, 0.5264214635096244], "rank_score": 1.0006539975696793} -{"id": "li-yarowsky-2008-mining", "title": "Mining and Modeling Relations between Formal and Informal Chinese Phrases from Web Corpora", "abstract": "We present a novel method for discovering and modeling the relationship between informal Chinese expressions (including colloquialisms and instant-messaging slang) and their formal equivalents. Specifically, we proposed a bootstrapping procedure to identify a list of candidate informal phrases in web corpora. Given an informal phrase, we retrieve contextual instances from the web using a search engine, generate hypotheses of formal equivalents via this data, and rank the hypotheses using a conditional log-linear model. In the log-linear model, we incorporate as feature functions both rule-based intuitions and data co-occurrence phenomena (either as an explicit or indirect definition, or through formal/informal usages occurring in free variation in a discourse). We test our system on manually collected test examples, and find that the (formal-informal) relationship discovery and extraction process using our method achieves an average 1-best precision of 62%. Given the ubiquity of informal conversational style on the internet, this work has clear applications for text normalization in text-processing systems including machine translation aspiring to broad coverage.", "phrases": ["chinese", "web corpora", "formal equivalent", "conditional log-linear model", "informal word"], "overall_score": 2.398948103252046, "scores": [1.8732618209619054, 1.1420103278299922, 0.9280301907642354, 0.532772042736728, 0.52612093971644], "rank_score": 1.0004390644018604} -{"id": "zhou-etal-2021-amr", "title": "AMR Parsing with Action-Pointer Transformer", "abstract": "Abstract Meaning Representation parsing is a sentence-to-graph prediction task where target nodes are not explicitly aligned to sentence tokens. However, since graph nodes are semantically based on one or more sentence tokens, implicit alignments can be derived. Transition-based parsers operate over the sentence from left to right, capturing this inductive bias via alignments at the cost of limited expressiveness. In this work, we propose a transition-based system that combines hard-attention over sentences with a target-side action pointer mechanism to decouple source tokens from node representations and address alignments. We model the transitions as well as the pointer mechanism through straightforward modifications within a single Transformer architecture. Parser state and graph structure information are efficiently encoded using attention heads. We show that our action-pointer approach leads to increased expressiveness and attains large gains (+1.6 points) against the best transition-based AMR parser in very similar conditions. While using no graph re-categorization, our single model yields the second best Smatch score on AMR 2.0 (81.8), which is further improved to 83.4 with silver data and ensemble decoding.", "phrases": ["transition-based amr parser", "amr", "pre-trained language model"], "overall_score": 1.6099707161568997, "scores": [1.830741351490193, 0.6345763161861445, 0.5356754810223786], "rank_score": 1.0003310495662388} -{"id": "gu-etal-2019-improved", "title": "Improved Zero-shot Neural Machine Translation via Ignoring Spurious Correlations", "abstract": "Zero-shot translation, translating between language pairs on which a Neural Machine Translation (NMT) system has never been trained, is an emergent property when training the system in multilingual settings. However, naive training for zero-shot NMT easily fails, and is sensitive to hyper-parameter setting. The performance typically lags far behind the more conventional pivot-based approach which translates twice using a third language as a pivot. In this work, we address the degeneracy problem due to capturing spurious correlations by quantitatively analyzing the mutual information between language IDs of the source and decoded sentences. Inspired by this analysis, we propose to use two simple but effective approaches: (1) decoder pre-training; (2) back-translation. These methods show significant improvement (4 22 BLEU points) over the vanilla zero-shot translation on three challenging multilingual datasets, and achieve similar or better results than the pivot-based approach.", "phrases": ["zero-shot translation", "different language", "mnmt model"], "overall_score": 2.833855206323609, "scores": [1.611708490235854, 0.8654664768716805, 0.5235046805463275], "rank_score": 1.000226549217954} -{"id": "de-marneffe-etal-2014-universal", "title": "Universal Stanford dependencies: A cross-linguistic typology", "abstract": "Revisiting the now de facto standard Stanford dependency representation, we propose an improved taxonomy to capture grammatical relations across languages, including morphologically rich ones. We suggest a two-layered taxonomy: a set of broadly attested universal grammatical relations, to which language-specific relations can be added. We emphasize the lexicalist stance of the Stanford Dependencies, which leads to a particular, partially new treatment of compounding, prepositions, and morphology. We show how existing dependency schemes for several languages map onto the universal taxonomy proposed here and close with consideration of practical implications of dependency representation choices for NLP applications, in particular parsing.", "phrases": ["morphology", "rich one", "universal dependencies", "annotation scheme", "final sentence"], "overall_score": 3.688439802887264, "scores": [2.3355011793387868, 0.9335347063881465, 0.597840401736115, 0.5678161577906747, 0.5647116404149185], "rank_score": 0.9998808171337282} -{"id": "grundkiewicz-etal-2019-neural", "title": "Neural Grammatical Error Correction Systems with Unsupervised Pre-training on Synthetic Data", "abstract": "Considerable effort has been made to address the data sparsity problem in neural grammatical error correction. In this work, we propose a simple and surprisingly effective unsupervised synthetic error generation method based on confusion sets extracted from a spellchecker to increase the amount of training data. Synthetic data is used to pre-train a Transformer sequence-to-sequence model, which not only improves over a strong baseline trained on authentic error-annotated data, but also enables the development of a practical GEC system in a scenario where little genuine error-annotated data is available. The developed systems placed first in the BEA19 shared task, achieving 69.47 and 64.24 F_0.5 in the restricted and low-resource tracks respectively, both on the W&I+LOCNESS test set. On the popular CoNLL 2014 test set, we report state-of-the-art results of 64.16 M for the submitted system, and 61.30 M for the constrained system trained on the NUCLE and Lang-8 data.", "phrases": ["error correction", "synthetic data", "confusion", "spellchecker", "fine-tuning"], "overall_score": 3.176694911039167, "scores": [2.1290893613353834, 0.8848003505051919, 0.8741018367754649, 0.5570929449783015, 0.5527775323792239], "rank_score": 0.9995724051947132} -{"id": "kang-etal-2018-dataset", "title": "A Dataset of Peer Reviews (PeerRead): Collection, Insights and NLP Applications", "abstract": "Peer reviewing is a central component in the scientific publishing process. We present the first public dataset of scientific peer reviews available for research purposes (PeerRead v1),1 providing an opportunity to study this important artifact. The dataset consists of 14.7K paper drafts and the corresponding accept/reject decisions in top-tier venues including ACL, NIPS and ICLR. The dataset also includes 10.7K textual peer reviews written by experts for a subset of the papers. We describe the data collection process and report interesting observed phenomena in the peer reviews. We also propose two novel NLP tasks based on this dataset and provide simple baseline models. In the first task, we show that simple models can predict whether a paper is accepted with up to 21% error reduction compared to the majority baseline. In the second task, we predict the numerical scores of review aspects and show that simple models can outperform the mean baseline for aspects with high variance such as `originality' and `impact'.", "phrases": ["peerread", "public dataset", "research purpose", "venue"], "overall_score": 2.483661216498288, "scores": [1.742961568104959, 0.85223955689803, 0.8492551967362458, 0.553538881356513], "rank_score": 0.9994988007739369} -{"id": "reiter-2018-structured", "title": "A Structured Review of the Validity of BLEU", "abstract": "The BLEU metric has been widely used in NLP for over 15 years to evaluate NLP systems, especially in machine translation and natural language generation. I present a structured review of the evidence on whether BLEU is a valid evaluation technique\u2014in other words, whether BLEU scores correlate with real-world utility and user-satisfaction of NLP systems; this review covers 284 correlations reported in 34 papers. Overall, the evidence supports using BLEU for diagnostic evaluation of MT systems (which is what it was originally proposed for), but does not support using BLEU outside of MT, for evaluation of individual texts, or for scientific hypothesis testing.", "phrases": ["structured review", "rouge", "human judgment"], "overall_score": 2.7066871367031964, "scores": [1.891042910557756, 0.5717106139794205, 0.5357364616545948], "rank_score": 0.9994966620639237} -{"id": "muller-etal-2020-domain", "title": "Domain Robustness in Neural Machine Translation", "abstract": "Translating text that diverges from the training domain is a key challenge for neural machine translation (NMT). Domain robustness - the generalization of models to unseen test domains - is low compared to statistical machine translation. In this paper, we investigate the performance of NMT on out-of-domain test sets, and ways to improve it. \nWe observe that hallucination (translations that are fluent but unrelated to the source) is common in out-of-domain settings, and we empirically compare methods that improve adequacy (reconstruction), out-of-domain translation (subword regularization), or robustness against adversarial examples (defensive distillation), as well as noisy channel models. \nIn experiments on German to English OPUS data, and German to Romansh, a low-resource scenario, we find that several methods improve domain robustness, reconstruction standing out as a method that not only improves automatic scores, but also shows improvements in a manual assessments of adequacy, albeit at some loss in fluency. However, out-of-domain performance is still relatively low and domain robustness remains an open problem.", "phrases": ["neural machine translation", "out-of-domain translation", "adversarial example", "domain robustness"], "overall_score": 2.706672249622267, "scores": [2.034203273230702, 0.8924915245437334, 0.549752835180596, 0.521517025928497], "rank_score": 0.9994911647208821} -{"id": "baldini-soares-etal-2019-matching", "title": "Matching the Blanks: Distributional Similarity for Relation Learning", "abstract": "General purpose relation extractors, which can model arbitrary relations, are a core aspiration in information extraction. Efforts have been made to build general purpose extractors that represent relations with their surface forms, or which jointly embed surface forms with relations from an existing knowledge graph. However, both of these approaches are limited in their ability to generalize. In this paper, we build on extensions of Harris' distributional hypothesis to relations, as well as recent advances in learning text representations (specifically, BERT), to build task agnostic relation representations solely from entity-linked text. We show that these representations significantly outperform previous work on exemplar based relation extraction (FewRel) even without using any of that task's training data. We also show that models initialized with our task agnostic representations, and then tuned on supervised relation extraction datasets, significantly outperform the previous methods on SemEval 2010 Task 8, KBP37, and TACRED", "phrases": ["relation extraction", "fewrel", "language model", "distant supervision", "external knowledge"], "overall_score": 3.9864997860284905, "scores": [1.7533225741404765, 1.2954596796468885, 0.8705545899444388, 0.5442823023266873, 0.5332669526237991], "rank_score": 0.999377219736458} -{"id": "hamilton-etal-2016-inducing", "title": "Inducing Domain-Specific Sentiment Lexicons from Unlabeled Corpora", "abstract": "A word's sentiment depends on the domain in which it is used. Computational social science research thus requires sentiment lexicons that are specific to the domains being studied. We combine domain-specific word embeddings with a label propagation framework to induce accurate domain-specific sentiment lexicons using small sets of seed words. We show that our approach achieves state-of-the-art performance on inducing sentiment lexicons from domain-specific corpora and that our purely corpus-based approach outperforms methods that rely on hand-curated resources (e.g., WordNet). Using our framework, we induce and release historical sentiment lexicons for 150 years of English and community-specific sentiment lexicons for 250 online communities from the social media forum Reddit. The historical lexicons we induce show that more than 5% of sentiment-bearing (non-neutral) English words completely switched polarity during the last 150 years, and the community-specific lexicons highlight how sentiment varies drastically between different communities.", "phrases": ["word embedding", "small set", "hand-curated resource"], "overall_score": 3.2936871048170095, "scores": [1.875159599237266, 0.588645629634335, 0.5342379741678205], "rank_score": 0.999347734346474} -{"id": "takala-etal-2014-gold", "title": "Gold-standard for Topic-specific Sentiment Analysis of Economic Texts", "abstract": "Public opinion, as measured by media sentiment, can be an important indicator in the financial and economic context. These are domains where traditional sentiment estimation techniques often struggle, and existing annotated sentiment text collections are of less use. Though considerable progress has been made in analyzing sentiments at sentence-level, performing topic-dependent sentiment analysis is still a relatively uncharted territory. The computation of topic-specific sentiments has commonly relied on naive aggregation methods without much consideration to the relevance of the sentences to the given topic. Clearly, the use of such methods leads to a substantial increase in noise-to-signal ratio. To foster development of methods for measuring topic-specific sentiments in documents, we have collected and annotated a corpus of financial news that have been sampled from Thomson Reuters newswire. In this paper, we describe the annotation process and evaluate the quality of the dataset using a number of inter-annotator agreement metrics. The annotations of 297 documents and over 9000 sentences can be used for research purposes when developing methods for detecting topic-wise sentiment in financial text.", "phrases": ["sentiment analysis", "financial news", "thomson reuters newswire", "different topic"], "overall_score": 2.0778846369198063, "scores": [2.0011561418743007, 0.8701274482804713, 0.5733553558280139, 0.5523662023153054], "rank_score": 0.9992512870745229} -{"id": "ouchi-etal-2018-span", "title": "A Span Selection Model for Semantic Role Labeling", "abstract": "We present a simple and accurate span-based model for semantic role labeling (SRL). Our model directly takes into account all possible argument spans and scores them for each label. At decoding time, we greedily select higher scoring labeled spans. One advantage of our model is to allow us to design and use span-level features, that are difficult to use in token-based BIO tagging approaches. Experimental results demonstrate that our ensemble model achieves the state-of-the-art results, 87.4 F1 and 87.0 F1 on the CoNLL-2005 and 2012 datasets, respectively.", "phrases": ["span", "semantic role labeling", "state-of-the-art result", "srl model"], "overall_score": 2.2999571448996363, "scores": [2.0476261165308265, 0.8978080354196728, 0.5275797236886074, 0.5224209109363726], "rank_score": 0.9988586966438698} -{"id": "cotterell-etal-2018-languages", "title": "Are All Languages Equally Hard to Language-Model?", "abstract": "For general modeling methods applied to diverse languages, a natural question is: how well should we expect our models to work on languages with differing typological profiles? In this work, we develop an evaluation framework for fair cross-linguistic comparison of language models, using translated text so that all models are asked to predict approximately the same information. We then conduct a study on 21 languages, demonstrating that in some languages, the textual expression of the information is harder to predict with both n-gram and LSTM language models. We show complex inflectional morphology to be a cause of performance differences among languages.", "phrases": ["language modeling", "morphology", "typology effect"], "overall_score": 2.561865935652364, "scores": [1.5893634216626702, 0.869696639602372, 0.5373335262338268], "rank_score": 0.9987978624996229} -{"id": "nilsson-etal-2006-graph", "title": "Graph Transformations in Data-Driven Dependency Parsing", "abstract": "Transforming syntactic representations in order to improve parsing accuracy has been exploited successfully in statistical parsing systems using constituency-based representations. In this paper, we show that similar transformations can give substantial improvements also in data-driven dependency parsing. Experiments on the Prague Dependency Treebank show that systematic transformations of coordinate structures and verb groups result in a 10% error reduction for a deterministic data-driven dependency parser. Combining these transformations with previously proposed techniques for recovering non-projective dependencies leads to state-of-the-art accuracy for the given data set.", "phrases": ["transformation", "data-driven dependency parsing", "verb group", "maltparser"], "overall_score": 2.3949535614668105, "scores": [0.9656461591322482, 1.2693430317270877, 1.1707246863117384, 0.5893789671830044], "rank_score": 0.9987732110885197} -{"id": "roy-roth-2015-solving", "title": "Solving General Arithmetic Word Problems", "abstract": "This paper presents a novel approach to automatically solving arithmetic word problems. This is the first algorithmic approach that can handle arithmetic problems with multiple steps and operations, without depending on additional annotations or predefined templates. We develop a theory for expression trees that can be used to represent and evaluate the target arithmetic expressions; we use it to uniquely decompose the target arithmetic problem to multiple classification problems; we then compose an expression tree, combining these with world knowledge through a constrained inference framework. Our classifiers gain from the use of quantity schemas that supports better extraction of features. Experimental results show that our method outperforms existing systems, achieving state of the art performance on benchmark datasets of arithmetic word problems.", "phrases": ["word problem", "quantity", "many researcher", "target math problem", "mwp"], "overall_score": 3.2916519351042672, "scores": [1.8766401618620367, 1.0968319423161388, 0.8577358217401444, 0.6014153293275981, 0.5610279309774835], "rank_score": 0.9987302372446804} -{"id": "chen-etal-2017-teacher", "title": "A Teacher-Student Framework for Zero-Resource Neural Machine Translation", "abstract": "While end-to-end neural machine translation (NMT) has made remarkable progress recently, it still suffers from the data scarcity problem for low-resource language pairs and domains. In this paper, we propose a method for zero-resource NMT by assuming that parallel sentences have close probabilities of generating a sentence in a third language. Based on the assumption, our method is able to train a source-to-target NMT model (\u201cstudent\u201d) without parallel corpora available guided by an existing pivot-to-target NMT model (\u201cteacher\u201d) on a source-pivot parallel corpus. Experimental results show that the proposed method significantly improves over a baseline pivot-based model by +3.0 BLEU points across various language pairs.", "phrases": ["teacher-student framework", "neural machine translation", "third language", "student", "zero-shot translation"], "overall_score": 3.0397919447230977, "scores": [1.6176895015431223, 1.1386864702262987, 1.089740836766649, 0.5885697343623966, 0.5575445982786991], "rank_score": 0.9984462282354329} -{"id": "tan-etal-2019-learning", "title": "Learning to Navigate Unseen Environments: Back Translation with Environmental Dropout", "abstract": "A grand goal in AI is to build a robot that can accurately navigate based on natural language instructions, which requires the agent to perceive the scene, understand and ground language, and act in the real-world environment. One key challenge here is to learn to navigate in new environments that are unseen during training. Most of the existing approaches perform dramatically worse in unseen environments as compared to seen ones. In this paper, we present a generalizable navigational agent. Our agent is trained in two stages. The first stage is training via mixed imitation and reinforcement learning, combining the benefits from both off-policy and on-policy optimization. The second stage is fine-tuning via newly-introduced `unseen' triplets (environment, path, instruction). To generate these unseen triplets, we propose a simple but effective `environmental dropout' method to mimic unseen environments, which overcomes the problem of limited seen environment variability. Next, we apply semi-supervised learning (via back-translation) on these dropout environments to generate new paths and instructions. Empirically, we show that our agent is substantially better at generalizability when fine-tuned with these triplets, outperforming the state-of-art approaches by a large margin on the private unseen test set of the Room-to-Room task, and achieving the top rank on the leaderboard.", "phrases": ["unseen environment", "environmental dropout", "natural language instruction", "path", "learning method"], "overall_score": 2.634331060026311, "scores": [2.5278659297073567, 0.8485810693228317, 0.5540043337972629, 0.5354351941951638, 0.525159007426543], "rank_score": 0.9982091068898317} -{"id": "liu-etal-2011-insertion", "title": "Insertion, Deletion, or Substitution? Normalizing Text Messages without Pre-categorization nor Supervision", "abstract": "Most text message normalization approaches are based on supervised learning and rely on human labeled training data. In addition, the nonstandard words are often categorized into different types and specific models are designed to tackle each type. In this paper, we propose a unified letter transformation approach that requires neither pre-categorization nor human supervision. Our approach models the generation process from the dictionary words to nonstandard tokens under a sequence labeling framework, where each letter in the dictionary word can be retained, removed, or substituted by other letters/digits. To avoid the expensive and time consuming hand labeling process, we automatically collected a large set of noisy training pairs using a novel web-based approach and performed character-level alignment for model training. Experiments on both Twitter and SMS messages show that our system significantly outperformed the state-of-the-art deletion-based abbreviation system and the jazzy spell checker (absolute accuracy gain of 21.69% and 18.16% over jazzy spell checker on the two test sets respectively).", "phrases": ["substitution", "pre-categorization", "noisy training pair", "sms", "conditional random field"], "overall_score": 3.129571915121298, "scores": [2.520841667647541, 0.857396984786792, 0.5475562516622798, 0.5392670171600343, 0.5254941117001913], "rank_score": 0.9981112065913678} -{"id": "seo-etal-2018-phrase", "title": "Phrase-Indexed Question Answering: A New Challenge for Scalable Document Comprehension", "abstract": "We formalize a new modular variant of current question answering tasks by enforcing complete independence of the document encoder from the question encoder. This formulation addresses a key challenge in machine comprehension by building a standalone representation of the document discourse. It additionally leads to a significant scalability advantage since the encoding of the answer candidate phrases in the document can be pre-computed and indexed offline for efficient retrieval. We experiment with baseline models for the new task, which achieve a reasonable accuracy but significantly underperform unconstrained QA models. We invite the QA research community to engage in Phrase-Indexed Question Answering (PIQA, pika) for closing the gap. The leaderboard is at: ", "phrases": ["question answering", "encoding", "text span"], "overall_score": 2.3931740429522024, "scores": [1.9026348660877548, 0.5480761983129844, 0.5433822182796274], "rank_score": 0.9980310942267888} -{"id": "gabriel-etal-2021-go", "title": "GO FIGURE: A Meta Evaluation of Factuality in Summarization", "abstract": "While neural language models can generate text with remarkable fluency and coherence, controlling for factual correctness in generation remains an open research question. This major discrepancy between the surface-level fluency and the content-level correctness of neural generation has motivated a new line of research that seeks automatic metrics for evaluating the factuality of machine text. In this paper, we introduce GO FIGURE, a meta-evaluation framework for evaluating factuality evaluation metrics. We propose five necessary and intuitive conditions to evaluate factuality metrics on diagnostic factuality data across three different summarization tasks. Our benchmark analysis on ten factuality metrics reveals that our meta-evaluation framework provides a robust and efficient evaluation that is extensible to multiple types of factual consistency and standard generation metrics, including QA metrics. It also reveals that while QA metrics generally improve over standard metrics that measure factuality across domains, performance is highly dependent on the way in which questions are generated.", "phrases": ["factuality", "meta-evaluation framework", "evaluation metric"], "overall_score": 2.5597423778747035, "scores": [1.906138567125965, 0.5514347603415164, 0.536336517685968], "rank_score": 0.9979699483844833} -{"id": "szpektor-dagan-2008-learning", "title": "Learning Entailment Rules for Unary Templates", "abstract": "Most work on unsupervised entailment rule acquisition focused on rules between templates with two variables, ignoring unary rules - entailment rules between templates with a single variable. In this paper we investigate two approaches for unsupervised learning of such rules and compare the proposed methods with a binary rule learning method. The results show that the learned unary rule-sets outperform the binary rule-set. In addition, a novel directional similarity measure for learning entailment, termed Balanced-Inclusion, is the best performing measure.", "phrases": ["entailment rule", "unsupervised learning", "similarity measure"], "overall_score": 3.083760199819388, "scores": [1.2236142678463402, 1.2196726333997105, 0.5496453340931798], "rank_score": 0.9976440784464101} -{"id": "guo-etal-2021-parameter", "title": "Parameter-Efficient Transfer Learning with Diff Pruning", "abstract": "The large size of pretrained networks makes them difficult to deploy for multiple tasks in storage-constrained settings. Diff pruning enables parameter-efficient transfer learning that scales well with new tasks. The approach learns a task-specific \u201cdiff\u201d vector that extends the original pretrained parameters. This diff vector is adaptively pruned during training with a differentiable approximation to the L0-norm penalty to encourage sparsity. As the number of tasks increases, diff pruning remains parameter-efficient, as it requires storing only a small diff vector for each task. Since it does not require access to all tasks during training, it is attractive in on-device deployment settings where tasks arrive in stream or even from different providers. Diff pruning can match the performance of finetuned baselines on the GLUE benchmark while only modifying 0.5% of the pretrained model's parameters per task and scales favorably in comparison to popular pruning approaches.", "phrases": ["diff pruning", "new task", "parameter-efficient transfer", "difference-vector"], "overall_score": 2.191805154241215, "scores": [2.313159317050172, 0.5988341877302388, 0.5521407888130214, 0.5259997634330458], "rank_score": 0.9975335142566196} -{"id": "mihalcea-2005-unsupervised", "title": "Unsupervised Large-Vocabulary Word Sense Disambiguation with Graph-based Algorithms for Sequence Data Labeling", "abstract": "This paper introduces a graph-based algorithm for sequence data labeling, using random walks on graphs encoding label dependencies. The algorithm is illustrated and tested in the context of an unsupervised word sense disambiguation problem, and shown to significantly outperform the accuracy achieved through individual label assignment, as measured on standard sense-annotated data sets.", "phrases": ["word sense disambiguation", "graph-based algorithm", "sequence data labeling", "wsd", "node"], "overall_score": 3.0363831188788293, "scores": [0.9199849276504343, 0.8898644615556502, 1.226316327453818, 1.1199276544165409, 0.8305394769135351], "rank_score": 0.9973265695979958} -{"id": "jarvis-etal-2013-maximizing", "title": "Maximizing Classification Accuracy in Native Language Identification", "abstract": "This paper reports our contribution to the 2013 NLI Shared Task. The purpose of the task was to train a machine-learning system to identify the native-language affiliations of 1,100 texts written in English by nonnative speakers as part of a high-stakes test of general academic English proficiency. We trained our system on the new TOEFL11 corpus, which includes 11,000 essays written by nonnative speakers from 11 native-language backgrounds. Our final system used an SVM classifier with over 400,000 unique features consisting of lexical and POS n-grams occurring in at least two texts in the training set. Our system identified the correct nativelanguage affiliations of 83.6% of the texts in the test set. This was the highest classification accuracy achieved in the 2013 NLI Shared Task.", "phrases": ["classification accuracy", "native language identification", "lexical feature", "part-of-speech tag"], "overall_score": 2.764798884075619, "scores": [1.8736372970160506, 0.9723485751235473, 0.5800477323249846, 0.5627280346466526], "rank_score": 0.9971904097778088} -{"id": "daume-iii-etal-2010-frustratingly", "title": "Frustratingly Easy Semi-Supervised Domain Adaptation", "abstract": "In this work, we propose a semisupervised extension to a well-known supervised domain adaptation approach (EA) (Daume III, 2007). Our proposed approach (EA++) builds on the notion of augmented space (introduced in EA) and harnesses unlabeled data in target domain to ameliorate the transfer of information from source to target. This semisupervised approach to domain adaptation is extremely simple to implement, and can be applied as a pre-processing step to any supervised learner. Experimental results on sequential labeling tasks demonstrate the efficacy of the proposed method.", "phrases": ["domain adaptation", "unlabeled data", "performance drop"], "overall_score": 2.4774563336720763, "scores": [1.2776658756739085, 1.1776523505461556, 0.5356870904787829], "rank_score": 0.9970017722329491} -{"id": "de-vassimon-manela-etal-2021-stereotype", "title": "Stereotype and Skew: Quantifying Gender Bias in Pre-trained and Fine-tuned Language Models", "abstract": "This paper proposes two intuitive metrics, skew and stereotype, that quantify and analyse the gender bias present in contextual language models when tackling the WinoBias pronoun resolution task. We find evidence that gender stereotype correlates approximately negatively with gender skew in out-of-the-box models, suggesting that there is a trade-off between these two forms of bias. We investigate two methods to mitigate bias. The first approach is an online method which is effective at removing skew at the expense of stereotype. The second, inspired by previous work on ELMo, involves the fine-tuning of BERT using an augmented gender-balanced dataset. We show that this reduces both skew and stereotype relative to its unaugmented fine-tuned counterpart. However, we find that existing gender bias benchmarks do not fully probe professional bias as pronoun resolution may be obfuscated by cross-correlations from other manifestations of gender prejudice.", "phrases": ["skew", "gender bias", "language model", "stereotype"], "overall_score": 1.604276491210878, "scores": [1.734801522687692, 0.8584220047632704, 0.8253390202310275, 0.568609567046148], "rank_score": 0.9967930286820346} -{"id": "rosario-hearst-2005-multi", "title": "Multi-way Relation Classification: Application to Protein-Protein Interactions", "abstract": "We address the problem of multi-way relation classification, applied to identification of the interactions between proteins in bioscience text. A major impediment to such work is the acquisition of appropriately labeled training data; for our experiments we have identified a database that serves as a proxy for training data. We use two graphical models and a neural net for the classification of the interactions, achieving an accuracy of 64% for a 10-way distinction between relation types. We also provide evidence that the exploitation of the sentences surrounding a citation to a paper can yield higher accuracy than other sentences.", "phrases": ["relation classification", "protein", "graphical model"], "overall_score": 1.6040209780141657, "scores": [1.8845029218183587, 0.5699301117260422, 0.5354697746756278], "rank_score": 0.9966342694066762} -{"id": "nguyen-etal-2017-aggregating", "title": "Aggregating and Predicting Sequence Labels from Crowd Annotations", "abstract": "Despite sequences being core to NLP, scant work has considered how to handle noisy sequence labels from multiple annotators for the same text. Given such annotations, we consider two complementary tasks: (1) aggregating sequential crowd labels to infer a best single set of consensus annotations; and (2) using crowd annotations as training data for a model that can predict sequences in unannotated text. For aggregation, we propose a novel Hidden Markov Model variant. To predict sequences in unannotated text, we propose a neural approach using Long Short Term Memory. We evaluate a suite of methods across two different applications and text genres: Named-Entity Recognition in news articles and Information Extraction from biomedical abstracts. Results show improvement over strong baselines. Our source code and data are available online.", "phrases": ["crowd annotation", "hidden markov model", "information extraction", "labeling task", "noise"], "overall_score": 2.9343552131084802, "scores": [1.9912882068454418, 1.318059918400938, 0.6014578009332338, 0.5400613766557115, 0.5320092890565287], "rank_score": 0.9965753183783708} -{"id": "nivre-2003-efficient", "title": "An Efficient Algorithm for Projective Dependency Parsing", "abstract": "This paper presents a deterministic parsing algorithm for projective dependency grammar. The running time of the algorithm is linear in the length of the input string, and the dependency graph produced is guaranteed to be projective and acyclic. The algorithm has been experimentally evaluated in parsing unrestricted Swedish text, achieving an accuracy above 85% with a very simple grammar.", "phrases": ["projective dependency parsing", "transition-based parser", "nivre", "linear time", "natural language text"], "overall_score": 3.815225321928374, "scores": [0.9992625382798206, 1.280384475926367, 1.2624760763224623, 0.8870943297742566, 0.5532619069944691], "rank_score": 0.9964958654594751} -{"id": "poesio-etal-2004-learning", "title": "Learning to Resolve Bridging References", "abstract": "We use machine learning techniques to find the best combination of local focus and lexical distance features for identifying the anchor of mereological bridging references. We find that using first mention, utterance distance, and lexical distance computed using either Google or WordNet results in an accuracy significantly higher than obtained in previous experiments.", "phrases": ["bridging reference", "anaphora resolution", "antecedent", "semantic relatedness", "web"], "overall_score": 2.8802066304835114, "scores": [1.4945109688717055, 1.2346738428109054, 1.0725046362818866, 0.6276682764776778, 0.5530578119245858], "rank_score": 0.9964831072733522} -{"id": "liang-etal-2020-xglue", "title": "XGLUE: A New Benchmark Dataset for Cross-lingual Pre-training, Understanding and Generation", "abstract": "In this paper, we introduce XGLUE, a new benchmark dataset to train large-scale cross-lingual pre-trained models using multilingual and bilingual corpora, and evaluate their performance across a diverse set of cross-lingual tasks. Comparing to GLUE (Wang et al.,2019), which is labeled in English and includes natural language understanding tasks only, XGLUE has three main advantages: (1) it provides two corpora with different sizes for cross-lingual pre-training; (2) it provides 11 diversified tasks that cover both natural language understanding and generation scenarios; (3) for each task, it provides labeled data in multiple languages. We extend a recent cross-lingual pre-trained model Unicoder (Huang et al., 2019) to cover both understanding and generation tasks, which is evaluated on XGLUE as a strong baseline. We also evaluate the base versions (12-layer) of Multilingual BERT, XLM and XLM-R for comparison.", "phrases": ["cross-lingual pre-training", "pre-trained model", "diverse set", "generation task"], "overall_score": 2.823245845963525, "scores": [1.9752995013713326, 0.9306426764036111, 0.5573036504148207, 0.522681813844533], "rank_score": 0.9964819105085744} -{"id": "bouamor-etal-2019-madar", "title": "The MADAR Shared Task on Arabic Fine-Grained Dialect Identification", "abstract": "In this paper, we present the results and findings of the MADAR Shared Task on Arabic Fine-Grained Dialect Identification. This shared task was organized as part of The Fourth Arabic Natural Language Processing Workshop, collocated with ACL 2019. The shared task includes two subtasks: the MADAR Travel Domain Dialect Identification subtask (Subtask 1) and the MADAR Twitter User Dialect Identification subtask (Subtask 2). This shared task is the first to target a large set of dialect labels at the city and country levels. The data for the shared task was created or collected under the Multi-Arabic Dialect Applications and Resources (MADAR) project. A total of 21 teams from 15 countries participated in the shared task.", "phrases": ["madar shared task", "arabic", "dialect"], "overall_score": 3.1663810130665873, "scores": [1.9023182798626528, 0.563408406632652, 0.5232544781691588], "rank_score": 0.9963270548881544} -{"id": "hiraoka-etal-2020-optimizing", "title": "Optimizing Word Segmentation for Downstream Task", "abstract": "In traditional NLP, we tokenize a given sentence as a preprocessing, and thus the tokenization is unrelated to a target downstream task. To address this issue, we propose a novel method to explore a tokenization which is appropriate for the downstream task. Our proposed method, optimizing tokenization (OpTok), is trained to assign a high probability to such appropriate tokenization based on the downstream task loss. OpTok can be used for any downstream task which uses a vector representation of a sentence such as text classification. Experimental results demonstrate that OpTok improves the performance of sentiment analysis and textual entailment. In addition, we introduce OpTok into BERT, the state-of-the-art contextualized embeddings and report a positive effect.", "phrases": ["downstream task", "tokenizer", "optok"], "overall_score": 1.380473568511987, "scores": [1.8633921833907712, 0.583601258854484, 0.5404101148105463], "rank_score": 0.9958011856852672} -{"id": "marcheggiani-etal-2017-simple", "title": "A Simple and Accurate Syntax-Agnostic Neural Model for Dependency-based Semantic Role Labeling", "abstract": "We introduce a simple and accurate neural model for dependency-based semantic role labeling. Our model predicts predicate-argument dependencies relying on states of a bidirectional LSTM encoder. The semantic role labeler achieves competitive performance on English, even without any kind of syntactic information and only using local inference. However, when automatically predicted part-of-speech tags are provided as input, it substantially outperforms all previous local models and approaches the best reported results on the English CoNLL-2009 dataset. We also consider Chinese, Czech and Spanish where our approach also achieves competitive results. Syntactic parsers are unreliable on out-of-domain data, so standard (i.e., syntactically-informed) SRL models are hindered when tested in this setting. Our syntax-agnostic model appears more robust, resulting in the best reported results on standard out-of-domain test sets.", "phrases": ["syntax-agnostic neural model", "semantic role labeling", "dependency srl"], "overall_score": 3.3526237494646307, "scores": [0.9298248095636904, 1.204207680655036, 0.8528958214628442], "rank_score": 0.9956427705605235} -{"id": "mager-etal-2020-gpt", "title": "GPT-too: A Language-Model-First Approach for AMR-to-Text Generation", "abstract": "Abstract Meaning Representations (AMRs) are broad-coverage sentence-level semantic graphs. Existing approaches to generating text from AMR have focused on training sequence-to-sequence or graph-to-sequence models on AMR annotated data only. In this paper, we propose an alternative approach that combines a strong pre-trained language model with cycle consistency-based re-scoring. Despite the simplicity of the approach, our experimental results show these models outperform all previous techniques on the English LDC2017T10 dataset, including the recent use of transformer architectures. In addition to the standard evaluation metrics, we provide human evaluation experiments that further substantiate the strength of our approach.", "phrases": ["amr-to-text generation", "language model", "pre-trained generative model"], "overall_score": 2.6960588309992133, "scores": [1.5611262001399937, 0.864653120040753, 0.5609365426587423], "rank_score": 0.9955719542798297} -{"id": "miyao-tsujii-2008-feature", "title": "Feature Forest Models for Probabilistic HPSG Parsing", "abstract": "Probabilistic modeling of lexicalized grammars is difficult because these grammars exploit complicated data structures, such as typed feature structures. This prevents us from applying common methods of probabilistic modeling in which a complete structure is divided into sub-structures under the assumption of statistical independence among sub-structures. For example, part-of-speech tagging of a sentence is decomposed into tagging of each word, and CFG parsing is split into applications of CFG rules. These methods have relied on the structure of the target problem, namely lattices or trees, and cannot be applied to graph structures including typed feature structures. This article proposes the feature forest model as a solution to the problem of probabilistic modeling of complex data structures including typed feature structures. The feature forest model provides a method for probabilistic modeling without the independence assumption when probabilistic events are represented with feature forests. Feature forests are generic data structures that represent ambiguous trees in a packed forest structure. Feature forest models are maximum entropy models defined over feature forests. A dynamic programming algorithm is proposed for maximum entropy estimation without unpacking feature forests. Thus probabilistic modeling of any data structures is possible when they are represented by feature forests. This article also describes methods for representing HPSG syntactic structures and predicate-argument structures with feature forests. Hence, we describe a complete strategy for developing probabilistic models for HPSG parsing. The effectiveness of the proposed methods is empirically evaluated through parsing experiments on the Penn Treebank, and the promise of applicability to parsing of real-world sentences is discussed.", "phrases": ["hpsg", "english sentence", "head information"], "overall_score": 2.2922528867180407, "scores": [1.9012114097806376, 0.5552789777438292, 0.5300479519608674], "rank_score": 0.9955127798284448} -{"id": "kim-zhang-2014-credibility", "title": "Credibility Adjusted Term Frequency: A Supervised Term Weighting Scheme for Sentiment Analysis and Text Classification", "abstract": "We provide a simple but novel supervised weighting scheme for adjusting term frequency in tf-idf for sentiment analysis and text classification. We compare our method to baseline weighting schemes and find that it outperforms them on multiple benchmarks. The method is robust and works well on both snippets and longer documents.", "phrases": ["weighting scheme", "sentiment analysis", "text classification", "tf-idf"], "overall_score": 1.0936759392926532, "scores": [1.9281147208886733, 0.9183892058019251, 0.5916776808658453, 0.5438453570920547], "rank_score": 0.9955067411621246} -{"id": "agirre-etal-2008-improving", "title": "Improving Parsing and PP Attachment Performance with Sense Information", "abstract": "To date, parsers have made limited use of semantic information, but there is evidence to suggest that semantic features can enhance parse disambiguation. This paper shows that semantic classes help to obtain significant improvement in both parsing and PP attachment tasks. We devise a gold-standard sense- and parse tree-annotated dataset based on the intersection of the Penn Treebank and SemCor, and experiment with different approaches to both semantic representation and disambiguation. For the Bikel parser, we achieved a maximal error reduction rate over the baseline parser of 6.9% and 20.5%, for parsing and PP-attachment respectively, using an unsupervised WSD strategy. This demonstrates that word sense information can indeed enhance the performance of syntactic disambiguation.", "phrases": ["semantic class", "penn treebank", "pp-attachment", "prepositional phrase attachment"], "overall_score": 2.9303493015138593, "scores": [1.368271245001612, 1.1296394398188307, 0.9194877503792568, 0.5634608351030329], "rank_score": 0.9952148175756831} -{"id": "li-etal-2015-tree", "title": "When Are Tree Structures Necessary for Deep Learning of Representations?", "abstract": "Recursive neural models, which use syntactic parse trees to recursively generate representations bottom-up, are a popular architecture. However there have not been rigorous evaluations showing for exactly which tasks this syntax-based method is appropriate. In this paper, we benchmark recursive neural models against sequential recurrent neural models, enforcing applesto-apples comparison as much as possible. We investigate 4 tasks: (1) sentiment classification at the sentence level and phrase level; (2) matching questions to answerphrases; (3) discourse parsing; (4) semantic relation extraction. Our goal is to understand better when, and why, recursive models can outperform simpler models. We find that recursive models help mainly on tasks (like semantic relation extraction) that require longdistance connection modeling, particularly on very long sequences. We then introduce a method for allowing recurrent models to achieve similar performance: breaking long sentences into clause-like units at punctuation and processing them separately before combining. Our results thus help understand the limitations of both classes of models, and suggest directions for improving recurrent models.", "phrases": ["tree structure", "recurrent model", "various task"], "overall_score": 3.242449388416271, "scores": [1.2670832093046305, 1.1797305671868978, 0.5387785936952247], "rank_score": 0.9951974567289178} -{"id": "shah-etal-2018-adversarial", "title": "Adversarial Domain Adaptation for Duplicate Question Detection", "abstract": "We address the problem of detecting duplicate questions in forums, which is an important step towards automating the process of answering new questions. As finding and annotating such potential duplicates manually is very tedious and costly, automatic methods based on machine learning are a viable alternative. However, many forums do not have annotated data, i.e., questions labeled by experts as duplicates, and thus a promising solution is to use domain adaptation from another forum that has such annotations. Here we focus on adversarial domain adaptation, deriving important findings about when it performs well and what properties of the domains are important in this regard. Our experiments with StackExchange data show an average improvement of 5.6% over the best baseline across multiple pairs of domains.", "phrases": ["duplicate question detection", "adversarial domain adaptation", "target domain", "supervised training"], "overall_score": 2.186574248086482, "scores": [1.9560882762640532, 0.9662288216364108, 0.5305322852613211, 0.5277619219190093], "rank_score": 0.9951528262701987} -{"id": "he-etal-2008-indirect", "title": "Indirect-HMM-based Hypothesis Alignment for Combining Outputs from Machine Translation Systems", "abstract": "This paper presents a new hypothesis alignment method for combining outputs of multiple machine translation (MT) systems. An indirect hidden Markov model (IHMM) is proposed to address the synonym matching and word ordering issues in hypothesis alignment. Unlike traditional HMMs whose parameters are trained via maximum likelihood estimation (MLE), the parameters of the IHMM are estimated indirectly from a variety of sources including word semantic similarity, word surface similarity, and a distance-based distortion penalty. The IHMM-based method significantly outperforms the state-of-the-art TER-based alignment model in our experiments on NIST benchmark datasets. Our combined SMT system using the proposed method achieved the best Chinese-to-English translation result in the constrained training track of the 2008 NIST Open MT Evaluation.", "phrases": ["hypothesis alignment", "ihmm", "synonym matching", "translation result", "system combination"], "overall_score": 3.029359556608215, "scores": [1.9105924177187004, 1.098523783027013, 0.8406341119946176, 0.5907442557820655, 0.5346035274427854], "rank_score": 0.9950196191930363} -{"id": "li-etal-2012-wiki", "title": "Wiki-ly Supervised Part-of-Speech Tagging", "abstract": "Despite significant recent work, purely unsupervised techniques for part-of-speech (POS) tagging have not achieved useful accuracies required by many language processing tasks. Use of parallel text between resource-rich and resource-poor languages is one source of weak supervision that significantly improves accuracy. However, parallel text is not always available and techniques for using it require multiple complex algorithmic steps. In this paper we show that we can build POS-taggers exceeding state-of-the-art bilingual methods by using simple hidden Markov models and a freely available and naturally growing resource, the Wiktionary. Across eight languages for which we have labeled data to evaluate results, we achieve accuracy that significantly exceeds best unsupervised and parallel text methods. We achieve highest accuracy reported for several languages and show that our approach yields better out-of-domain taggers than those trained using fully supervised Penn Treebank.", "phrases": ["markov model", "wiktionary", "dictionary", "pos tagging"], "overall_score": 2.87551814762151, "scores": [1.6607982225780686, 0.8726737967006829, 0.8472458039567722, 0.5987261908653249], "rank_score": 0.9948610035252122} -{"id": "rubinstein-etal-2015-well", "title": "How Well Do Distributional Models Capture Different Types of Semantic Knowledge?", "abstract": "In recent years, distributional models (DMs) have shown great success in representing lexical semantics. In this work we show that the extent to which DMs represent semantic knowledge is highly dependent on the type of knowledge. We pose the task of predicting properties of concrete nouns in a supervised setting, and compare between learning taxonomic properties (e.g., animacy) and attributive properties (e.g., size, color). We employ four state-of-the-art DMs as sources of feature representation for this task, and show that they all yield poor results when tested on attributive properties, achieving no more than an average F-score of 0.37 in the binary property prediction task, compared to 0.73 on taxonomic properties. Our results suggest that the distributional hypothesis may not be equally applicable to all types of semantic information.", "phrases": ["semantic knowledge", "taxonomic property", "attributive property", "distributional hypothesis", "word embedding"], "overall_score": 2.5517353340700604, "scores": [0.9155907897162022, 1.1310115040436937, 1.0853751027515557, 0.9652361090405469, 0.8770276556759297], "rank_score": 0.9948482322455856} -{"id": "patwardhan-riloff-2009-unified", "title": "A Unified Model of Phrasal and Sentential Evidence for Information Extraction", "abstract": "Information Extraction (IE) systems that extract role fillers for events typically look at the local context surrounding a phrase when deciding whether to extract it. Often, however, role fillers occur in clauses that are not directly linked to an event word. We present a new model for event extraction that jointly considers both the local context around a phrase along with the wider sentential context in a probabilistic framework. Our approach uses a sentential event recognizer and a plausible role-filler recognizer that is conditioned on event sentences. We evaluate our system on two IE data sets and show that our model performs well in comparison to existing IE systems that rely on local phrasal context.", "phrases": ["sentential evidence", "information extraction", "role filler", "local context", "probabilistic framework"], "overall_score": 3.028607338212637, "scores": [0.9167504752554911, 0.9125865639967499, 1.1467492170416818, 1.1441950540031551, 0.8535814221407131], "rank_score": 0.994772546487558} -{"id": "melamed-etal-2004-generalized", "title": "Generalized Multitext Grammars", "abstract": "Generalized Multitext Grammar (GMTG) is a synchronous grammar formalism that is weakly equivalent to Linear Context-Free Rewriting Systems (LCFRS), but retains much of the notational and intuitive simplicity of Context-Free Grammar (CFG). GMTG allows both synchronous and independent rewriting. Such flexibility facilitates more perspicuous modeling of parallel text than what is possible with other synchronous formalisms. This paper investigates the generative capacity of GMTG, proves that each component grammar of a GMTG retains its generative power, and proposes a generalization of Chomsky Normal Form, which is necessary for synchronous CKY-style parsing.", "phrases": ["lcfrs", "generalized multitext grammar", "machine translation"], "overall_score": 2.290354155458504, "scores": [1.0293061588624624, 1.0615792701948294, 0.8931790849021407], "rank_score": 0.9946881713198109} -{"id": "dickinson-meurers-2003-detecting", "title": "Detecting Errors in Part-of-Speech Annotation", "abstract": "We propose a new method for detecting errors in \"gold-standard\" part-of-speech annotation. The approach locates errors with high precision based on n-grams occurring in the corpus with multiple taggings. Two further techniques, closed-class analysis and finite-state tagging guide patterns, are discussed. The success of the three approaches is illustrated for the Wall Street Journal corpus as part of the Penn Tree-bank.", "phrases": ["part-of-speech annotation", "pos tag", "variation", "annotation error", "word ngram"], "overall_score": 2.471124612218381, "scores": [2.1262412680321034, 0.8774631136040891, 0.8368935611329259, 0.5832291778424662, 0.5484413797548261], "rank_score": 0.9944537000732823} -{"id": "pruthi-etal-2019-combating", "title": "Combating Adversarial Misspellings with Robust Word Recognition", "abstract": "To combat adversarial spelling mistakes, we propose placing a word recognition model in front of the downstream classifier. Our word recognition models build upon the RNN semi-character architecture, introducing several new backoff strategies for handling rare and unseen words. Trained to recognize words corrupted by random adds, drops, swaps, and keyboard mistakes, our method achieves 32% relative (and 3.3% absolute) error reduction over the vanilla semi-character model. Notably, our pipeline confers robustness on the downstream classifier, outperforming both adversarial training and off-the-shelf spell checkers. Against a BERT model fine-tuned for sentiment analysis, a single adversarially-chosen character attack lowers accuracy from 90.3% to 45.8%. Our defense restores accuracy to 75%. Surprisingly, better word recognition does not always entail greater robustness. Our analysis reveals that robustness also depends upon a quantity that we denote the sensitivity.", "phrases": ["misspelling", "robustness", "word recognition model", "attack", "input sentence"], "overall_score": 2.9777122268994423, "scores": [1.4492382505215753, 1.0798793265600561, 0.9556466706385245, 0.9387794533502517, 0.5463801021785883], "rank_score": 0.9939847606497991} -{"id": "henderson-etal-2013-multilingual", "title": "Multilingual Joint Parsing of Syntactic and Semantic Dependencies with a Latent Variable Model", "abstract": "Current investigations in data-driven models of parsing have shifted from purely syntactic analysis to richer semantic representations, showing that the successful recovery of the meaning of text requires structured analyses of both its grammar and its semantics. In this article, we report on a joint generative history-based model to predict the most likely derivation of a dependency parser for both syntactic and semantic dependencies, in multiple languages. Because these two dependency structures are not isomorphic, we propose a weak synchronization at the level of meaningful subsequences of the two derivations. These synchronized subsequences encompass decisions about the left side of each individual word. We also propose novel derivations for semantic dependency structures, which are appropriate for the relatively unconstrained nature of these graphs. To train a joint model of these synchronized derivations, we make use of a latent variable model of parsing, the Incremental Sigmoid Belief Network (ISBN) architecture. This architecture induces latent feature representations of the derivations, which are used to discover correlations both within and between the two derivations, providing the first application of ISBNs to a multi-task learning problem. This joint model achieves competitive performance on both syntactic and semantic dependency parsing for several languages. Because of the general nature of the approach, this extension of the ISBN architecture to weakly synchronized syntactic-semantic derivations is also an exemplification of its applicability to other problems where two independent, but related, representations are being learned.", "phrases": ["latent variable model", "semantic representation", "multi-task learning"], "overall_score": 2.6916072809572142, "scores": [1.827142570014135, 0.5929473257226432, 0.5616945038231305], "rank_score": 0.9939281331866362} -{"id": "rashtchian-etal-2010-collecting", "title": "Collecting Image Annotations Using Amazon's Mechanical Turk", "abstract": "Crowd-sourcing approaches such as Amazon's Mechanical Turk (MTurk) make it possible to annotate or collect large amounts of linguistic data at a relatively low cost and high speed. However, MTurk offers only limited control over who is allowed to particpate in a particular task. This is particularly problematic for tasks requiring free-form text entry. Unlike multiple-choice tasks there is no correct answer, and therefore control items for which the correct answer is known cannot be used. Furthermore, MTurk has no effective built-in mechanism to guarantee workers are proficient English writers. We describe our experience in creating corpora of images annotated with multiple one-sentence descriptions on MTurk and explore the effectiveness of different quality control strategies for collecting linguistic data using Mechanical MTurk. We find that the use of a qualification test provides the highest improvement of quality, whereas refining the annotations through follow-up tasks works rather poorly. Using our best setup, we construct two image corpora, totaling more than 40,000 descriptive captions for 9000 images.", "phrases": ["amazon", "whole image", "human-created description"], "overall_score": 2.2880569673815536, "scores": [1.8615535686980103, 0.5711635235107172, 0.5483544534335641], "rank_score": 0.9936905152140972} -{"id": "stahlberg-etal-2022-uncertainty", "title": "Uncertainty Determines the Adequacy of the Mode and the Tractability of Decoding in Sequence-to-Sequence Models", "abstract": "In many natural language processing (NLP) tasks the same input (e.g. source sentence) can have multiple possible outputs (e.g. translations). To analyze how this ambiguity (also known as intrinsic uncertainty) shapes the distribution learned by neural sequence models we measure sentence-level uncertainty by computing the degree of overlap between references in multi-reference test sets from two different NLP tasks: machine translation (MT) and grammatical error correction (GEC). At both the sentence- and the task-level, intrinsic uncertainty has major implications for various aspects of search such as the inductive biases in beam search and the complexity of exact search. In particular, we show that well-known pathologies such as a high number of beam search errors, the inadequacy of the mode, and the drop in system performance with large beam sizes apply to tasks with high level of ambiguity such as MT but not to less uncertain tasks such as GEC. Furthermore, we propose a novel exact n-best search algorithm for neural sequence models, and show that intrinsic uncertainty affects model uncertainty as the model tends to overly spread out the probability mass for uncertain tasks and sentences.", "phrases": ["mode", "source sentence", "uncertainty"], "overall_score": 1.0916596874172635, "scores": [1.6479586391928247, 0.7972377939121561, 0.5358179757381736], "rank_score": 0.9936714696143848} -{"id": "johnson-etal-2007-bayesian", "title": "Bayesian Inference for PCFGs via Markov Chain Monte Carlo", "abstract": "This paper presents two Markov chain Monte Carlo (MCMC) algorithms for Bayesian inference of probabilistic context free grammars (PCFGs) from terminal strings, providing an alternative to maximum-likelihood estimation using the Inside-Outside algorithm. We illustrate these methods by estimating a sparse grammar describing the morphology of the Bantu language Sesotho, demonstrating that with suitable priors Bayesian techniques can infer linguistic structure in situations where maximum likelihood methods such as the Inside-Outside algorithm only produce a trivial grammar.", "phrases": ["pcfg", "terminal string", "bayesian inference", "induction", "recent research"], "overall_score": 2.976477723691891, "scores": [2.0959478361739254, 0.9217940817116997, 0.8384293197478315, 0.561519824884777, 0.550172304258435], "rank_score": 0.9935726733553338} -{"id": "wang-etal-2018-learning-ask", "title": "Learning to Ask Questions in Open-domain Conversational Systems with Typed Decoders", "abstract": "Asking good questions in open-domain conversational systems is quite significant but rather untouched. This task, substantially different from traditional question generation, requires to question not only with various patterns but also on diverse and relevant topics. We observe that a good question is a natural composition of interrogatives, topic words, and ordinary words. Interrogatives lexicalize the pattern of questioning, topic words address the key information for topic transition in dialogue, and ordinary words play syntactical and grammatical roles in making a natural sentence. We devise two typed decoders (soft typed decoder and hard typed decoder) in which a type distribution over the three types is estimated and the type distribution is used to modulate the final generation distribution. Extensive experiments show that the typed decoders outperform state-of-the-art baselines and can generate more meaningful questions.", "phrases": ["conversation", "typed decoder", "good question"], "overall_score": 2.814880875734501, "scores": [0.823439944526118, 1.1137986877309056, 1.043349696863858], "rank_score": 0.9935294430402939} -{"id": "feng-etal-2009-lattice", "title": "Lattice-based System Combination for Statistical Machine Translation", "abstract": "Current system combination methods usually use confusion networks to find consensus translations among different systems. Requiring one-to-one mappings between the words in candidate translations, confusion networks have difficulty in handling more general situations in which several words are connected to another several words. Instead, we propose a lattice-based system combination model that allows for such phrase alignments and uses lattices to encode all candidate translations. Experiments show that our approach achieves significant improvements over the state-of-the-art baseline system on Chinese-to-English translation test sets.", "phrases": ["system combination", "lattice", "hypothesis"], "overall_score": 2.1826544291104155, "scores": [0.8422646161134859, 1.3029176596278504, 0.8349242435793219], "rank_score": 0.9933688397735528} -{"id": "du-etal-2010-facilitating", "title": "Facilitating Translation Using Source Language Paraphrase Lattices", "abstract": "For resource-limited language pairs, coverage of the test set by the parallel corpus is an important factor that affects translation quality in two respects: 1) out of vocabulary words; 2) the same information in an input sentence can be expressed in different ways, while current phrase-based SMT systems cannot automatically select an alternative way to transfer the same information. Therefore, given limited data, in order to facilitate translation from the input side, this paper proposes a novel method to reduce the translation difficulty using source-side lattice-based paraphrases. We utilise the original phrases from the input sentence and the corresponding paraphrases to build a lattice with estimated weights for each edge to improve translation quality. Compared to the baseline system, our method achieves relative improvements of 7.07%, 6.78% and 3.63% in terms of BLEU score on small, medium and large-scale English-to-Chinese translation tasks respectively. The results show that the proposed method is effective not only for resource-limited language pairs, but also for resource-sufficient pairs to some extent.", "phrases": ["paraphrase", "lattice", "translation task"], "overall_score": 2.3819376097705507, "scores": [1.5400453352284393, 0.8438124310110197, 0.5961776466384388], "rank_score": 0.9933451376259659} -{"id": "tsvetkov-wintner-2010-extraction", "title": "Extraction of Multi-word Expressions from Small Parallel Corpora", "abstract": "Abstract We present a general, novel methodology for extracting multi-word expressions (MWEs) of various types, along with their translations, from small, word-aligned parallel corpora. Unlike existing approaches, we focus on misalignments; these typically indicate expressions in the source language that are translated to the target in a non-compositional way. We introduce a simple algorithm that proposes MWE candidates based on such misalignments, relying on 1:1 alignments as anchors that delimit the search space. We use a large monolingual corpus to rank and filter these candidates. Evaluation of the quality of the extraction algorithm reveals significant improvements over na\u00efve alignment-based methods. The extracted MWEs, with their translations, are used in the training of a statistical machine translation system, showing a small but significant improvement in its performance.", "phrases": ["parallel corpora", "mwe", "extraction", "hebrew multiword expression"], "overall_score": 2.5478351715012058, "scores": [0.8557098539621459, 1.5714835345357192, 0.9383470076122721, 0.607770287922598], "rank_score": 0.9933276710081838} -{"id": "brazinskas-etal-2020-unsupervised", "title": "Unsupervised Opinion Summarization as Copycat-Review Generation", "abstract": "Opinion summarization is the task of automatically creating summaries that reflect subjective information expressed in multiple documents, such as product reviews. While the majority of previous work has focused on the extractive setting, i.e., selecting fragments from input reviews to produce a summary, we let the model generate novel sentences and hence produce abstractive summaries. Recent progress in summarization has seen the development of supervised models which rely on large quantities of document-summary pairs. Since such training data is expensive to acquire, we instead consider the unsupervised setting, in other words, we do not use any summaries in training. We define a generative model for a review collection which capitalizes on the intuition that when generating a new review given a set of other reviews of a product, we should be able to control the \u201camount of novelty\u201d going into the new review or, equivalently, vary the extent to which it deviates from the input. At test time, when generating summaries, we force the novelty to be minimal, and produce a text reflecting consensus opinions. We capture this intuition by defining a hierarchical variational autoencoder model. Both individual reviews and the products they correspond to are associated with stochastic latent codes, and the review generator (\u201cdecoder\u201d) has direct access to the text of input reviews through the pointer-generator mechanism. Experiments on Amazon and Yelp datasets, show that setting at test time the review's latent code to its mean, allows the model to produce fluent and coherent summaries reflecting common opinions.", "phrases": ["review", "novel sentence", "unsupervised opinion summarization"], "overall_score": 2.6897316646500307, "scores": [0.9449891558855976, 1.457059542488545, 0.5776578782121515], "rank_score": 0.9932355255287648} -{"id": "narasimhan-etal-2015-unsupervised", "title": "An Unsupervised Method for Uncovering Morphological Chains", "abstract": "Most state-of-the-art systems today produce morphological analysis based only on orthographic patterns. In contrast, we propose a model for unsupervised morphological analysis that integrates orthographic and semantic views of words. We model word formation in terms of morphological chains, from base words to the observed words, breaking the chains into parent-child relations. We use log-linear models with morpheme and word-level features to predict possible parents, including their modifications, for each word. The limited set of candidate parents for each word render contrastive estimation feasible. Our model consistently matches or outperforms five state-of-the-art systems on Arabic, English and Turkish.", "phrases": ["unsupervised method", "chain", "log-linear model", "turkish", "word form"], "overall_score": 2.8693795321908575, "scores": [0.848049967678429, 1.609114403406939, 1.073519905967577, 0.859472414861165, 0.5735292497853294], "rank_score": 0.9927371883398879} -{"id": "koncel-kedziorski-etal-2019-text", "title": "Text Generation from Knowledge Graphs with Graph Transformers", "abstract": "Generating texts which express complex ideas spanning multiple sentences requires a structured representation of their content (document plan), but these representations are prohibitively expensive to manually produce. In this work, we address the problem of generating coherent multi-sentence texts from the output of an information extraction system, and in particular a knowledge graph. Graphical knowledge representations are ubiquitous in computing, but pose a significant challenge for text generation techniques due to their non-hierarchical nature, collapsing of long-distance dependencies, and structural variety. We introduce a novel graph transforming encoder which can leverage the relational structure of such knowledge graphs without imposing linearization or hierarchical constraints. Incorporated into an encoder-decoder setup, we provide an end-to-end trainable system for graph-to-text generation that we apply to the domain of scientific text. Automatic and human evaluations show that our technique produces more informative texts which exhibit better document structure than competitive encoder-decoder methods.", "phrases": ["knowledge graph", "graph transformer", "text generation"], "overall_score": 2.618918483407461, "scores": [0.8926608945313824, 0.8788549785531513, 1.2055908998735967], "rank_score": 0.9923689243193768} -{"id": "bojar-etal-2013-chimera", "title": "Chimera \u2013 Three Heads for English-to-Czech Translation", "abstract": "This paper describes our WMT submissions CU-BOJAR and CU-DEPFIX, the latter dubbed \u201cCHIMERA\u201d because it combines on three diverse approaches: TectoMT, a system with transfer at the deep syntactic level of representation, factored phrase-based translation using Moses, and finally automatic rule-based correction of frequent grammatical and meaning errors. We do not use any off-the-shelf systemcombination method.", "phrases": ["moses", "chimera", "depfix"], "overall_score": 1.9309144741042883, "scores": [1.9136259551592887, 0.5416224147367779, 0.5216328726037911], "rank_score": 0.9922937474999527} -{"id": "wolfson-etal-2020-break", "title": "Break It Down: A Question Understanding Benchmark", "abstract": "Understanding natural language questions entails the ability to break down a question into the requisite steps for computing its answer. In this work, we introduce a Question Decomposition Meaning Representation (QDMR) for questions. QDMR constitutes the ordered list of steps, expressed through natural language, that are necessary for answering a question. We develop a crowdsourcing pipeline, showing that quality QDMRs can be annotated at scale, and release the Break dataset, containing over 83K pairs of questions and their QDMRs. We demonstrate the utility of QDMR by showing that (a) it can be used to improve open-domain question answering on the HotpotQA dataset, (b) it can be deterministically converted to a pseudo-SQL formal language, which can alleviate annotation in semantic parsing applications. Last, we use Break to train a sequence-to-sequence model with copying that parses questions into QDMR structures, and show that it substantially outperforms several natural baselines.", "phrases": ["question decomposition", "qdmr", "formal language", "break"], "overall_score": 2.8107388865167837, "scores": [1.8228837259943407, 0.9795028239677678, 0.6080121033970973, 0.5578713569644695], "rank_score": 0.9920675025809188} -{"id": "li-etal-2020-optimus", "title": "Optimus: Organizing Sentences via Pre-trained Modeling of a Latent Space", "abstract": "When trained effectively, the Variational Autoencoder (VAE) can be both a powerful generative model and an effective representation learning framework for natural language. In this paper, we propose the first large-scale language VAE model Optimus (Organizing sentences via Pre-Trained Modeling of a Universal Space). A universal latent embedding space for sentences is first pre-trained on large text corpus, and then fine-tuned for various language generation and understanding tasks. Compared with GPT-2, Optimus enables guided language generation from an abstract level using the latent vectors. Compared with BERT, Optimus can generalize better on low-resource language understanding tasks due to the smooth latent space structure. Extensive experimental results on a wide range of language tasks demonstrate the effectiveness of Optimus. It achieves new state-of-the-art on VAE language modeling benchmarks.", "phrases": ["modeling", "vae", "language generation", "gpt-2", "optimus"], "overall_score": 2.5441287460385573, "scores": [2.173336511449405, 0.8576761694901799, 0.8284204011220815, 0.5785696543451987, 0.5214104750812847], "rank_score": 0.9918826422976299} -{"id": "fan-etal-2019-using", "title": "Using Local Knowledge Graph Construction to Scale Seq2Seq Models to Multi-Document Inputs", "abstract": "Query-based open-domain NLP tasks require information synthesis from long and diverse web results. Current approaches extractively select portions of web text as input to Sequence-to-Sequence models using methods such as TF-IDF ranking. We propose constructing a local graph structured knowledge base for each query, which compresses the web search information and reduces redundancy. We show that by linearizing the graph into a structured input sequence, models can encode the graph representations within a standard Sequence-to-Sequence setting. For two generative tasks with very long text input, long-form question answering and multi-document summarization, feeding graph representations as input can achieve better performance than using retrieved text portions.", "phrases": ["knowledge graph", "seq2seq model", "multi-document input"], "overall_score": 2.378328975212763, "scores": [1.3315329719898004, 1.0518179896723532, 0.5921696987175131], "rank_score": 0.9918402201265556} -{"id": "kasewa-etal-2018-wronging", "title": "Wronging a Right: Generating Better Errors to Improve Grammatical Error Detection", "abstract": "Grammatical error correction, like other machine learning tasks, greatly benefits from large quantities of high quality training data, which is typically expensive to produce. While writing a program to automatically generate realistic grammatical errors would be difficult, one could learn the distribution of naturally-occurring errors and attempt to introduce them into other datasets. Initial work on inducing errors in this way using statistical machine translation has shown promise; we investigate cheaply constructing synthetic samples, given a small corpus of human-annotated data, using an off-the-rack attentive sequence-to-sequence model and a straight-forward post-processing procedure. Our approach yields error-filled artificial data that helps a vanilla bi-directional LSTM to outperform the previous state of the art at grammatical error detection, and a previously introduced model to gain further improvements of over 5% F0.5 score. When attempting to determine if a given sentence is synthetic, a human annotator at best achieves 39.39 F1 score, indicating that our model generates mostly human-like instances.", "phrases": ["grammatical error detection", "error correction", "artificial data", "back-translation"], "overall_score": 2.866106058446053, "scores": [2.0535432367269677, 0.8641718244359452, 0.5256382590076453, 0.5230652564242064], "rank_score": 0.9916046441486912} -{"id": "wu-etal-2018-word", "title": "Word Mover's Embedding: From Word2Vec to Document Embedding", "abstract": "While the celebrated Word2Vec technique yields semantically rich representations for individual words, there has been relatively less success in extending to generate unsupervised sentences or documents embeddings. Recent work has demonstrated that a distance measure between documents called Word Mover's Distance (WMD) that aligns semantically similar words, yields unprecedented KNN classification accuracy. However, WMD is expensive to compute, and it is hard to extend its use beyond a KNN classifier. In this paper, we propose the Word Mover's Embedding (WME), a novel approach to building an unsupervised document (sentence) embedding from pre-trained word embeddings. In our experiments on 9 benchmark text classification datasets and 22 textual similarity tasks, the proposed technique consistently matches or outperforms state-of-the-art techniques, with significantly higher accuracy on problems of short length.", "phrases": ["wmd", "word mover", "transport"], "overall_score": 2.060315278359302, "scores": [1.9157807188561329, 0.5287480646767103, 0.5278778512238863], "rank_score": 0.9908022115855765} -{"id": "aharoni-goldberg-2020-unsupervised", "title": "Unsupervised Domain Clusters in Pretrained Language Models", "abstract": "The notion of \u201cin-domain data\u201d in NLP is often over-simplistic and vague, as textual data varies in many nuanced linguistic aspects such as topic, style or level of formality. In addition, domain labels are many times unavailable, making it challenging to build domain-specific systems. We show that massive pre-trained language models implicitly learn sentence representations that cluster by domains without supervision \u2013 suggesting a simple data-driven definition of domains in textual data. We harness this property and propose domain data selection methods based on such models, which require only a small set of in-domain monolingual data. We evaluate our data selection methods for neural machine translation across five diverse domains, where they outperform an established approach as measured by both BLEU and precision and recall with respect to an oracle selection.", "phrases": ["cluster", "language model", "data selection method"], "overall_score": 2.6829821395981837, "scores": [1.5699438484243837, 0.879827290000821, 0.5224582595078654], "rank_score": 0.9907431326443566} -{"id": "seraji-etal-2012-basic", "title": "A Basic Language Resource Kit for Persian", "abstract": "Persian with its about 100,000,000 speakers in the world belongs to the group of languages with less developed linguistically annotated resources and tools. The few existing resources and tools are neither open source nor freely available. Thus, our goal is to develop open source resources such as corpora and treebanks, and tools for data-driven linguistic analysis of Persian. We do this by exploring the reusability of existing resources and adapting state-of-the-art methods for the linguistic annotation. We present fully functional tools for text normalization, sentence segmentation, tokenization, part-of-speech tagging, and parsing. As for resources, we describe the Uppsala PErsian Corpus (UPEC) which is a modified version of the Bijankhan corpus with additional sentence segmentation and consistent tokenization modified for more appropriate syntactic annotation. The corpus consists of 2,782,109 tokens and is annotated with parts of speech and morphological features. A treebank is derived from UPEC with an annotation scheme based on Stanford Typed Dependencies and is planned to consist of 10,000 sentences of which 215 have already been annotated. Keywords: BLARK for Persian, PoS tagged corpus, Persian treebank", "phrases": ["persian", "sentence segmentation", "tokenization"], "overall_score": 1.5943583788889304, "scores": [1.8434622235136455, 0.5727674050305426, 0.5556620484903793], "rank_score": 0.9906305590115224} -{"id": "elazar-goldberg-2018-adversarial", "title": "Adversarial Removal of Demographic Attributes from Text Data", "abstract": "Recent advances in Representation Learning and Adversarial Training seem to succeed in removing unwanted features from the learned representation. We show that demographic information of authors is encoded in\u2014and can be recovered from\u2014the intermediate representations learned by text-based neural classifiers. The implication is that decisions of classifiers trained on textual data are not agnostic to\u2014and likely condition on\u2014demographic attributes. When attempting to remove such demographic information using adversarial training, we find that while the adversarial component achieves chance-level development-set accuracy during training, a post-hoc classifier, trained on the encoded sentences from the first part, still manages to reach substantially higher classification accuracies on the same data. This behavior is consistent across several tasks, demographic properties and datasets. We explore several techniques to improve the effectiveness of the adversarial component. Our main conclusion is a cautionary one: do not rely on the adversarial training to achieve invariant representation to sensitive features.", "phrases": ["attribute", "text data", "demographic information", "post-hoc classifier", "gender"], "overall_score": 3.369032182855932, "scores": [0.8377516774963694, 1.4786372230130536, 1.180147020284959, 0.9046388969586018, 0.5515400717449844], "rank_score": 0.9905429778995936} -{"id": "kim-etal-2012-multilingual", "title": "Multilingual Named Entity Recognition using Parallel Data and Metadata from Wikipedia", "abstract": "In this paper we propose a method to automatically label multi-lingual data with named entity tags. We build on prior work utilizing Wikipedia metadata and show how to effectively combine the weak annotations stemming from Wikipedia metadata with information obtained through English-foreign language parallel Wikipedia sentences. The combination is achieved using a novel semi-CRF model for foreign sentence tagging in the context of a parallel English sentence. The model outperforms both standard annotation projection methods and methods based solely on Wikipedia metadata.", "phrases": ["wikipedia", "entity tag", "english-foreign language", "parallel corpora"], "overall_score": 2.4611004095210385, "scores": [1.9738138209881657, 0.8895799760141068, 0.576385217375576, 0.5218996422123409], "rank_score": 0.9904196641475473} -{"id": "jiang-bansal-2019-avoiding", "title": "Avoiding Reasoning Shortcuts: Adversarial Evaluation, Training, and Model Development for Multi-Hop QA", "abstract": "Multi-hop question answering requires a model to connect multiple pieces of evidence scattered in a long context to answer the question. In this paper, we show that in the multi-hop HotpotQA (Yang et al., 2018) dataset, the examples often contain reasoning shortcuts through which models can directly locate the answer by word-matching the question with a sentence in the context. We demonstrate this issue by constructing adversarial documents that create contradicting answers to the shortcut but do not affect the validity of the original answer. The performance of strong baseline models drops significantly on our adversarial test, indicating that they are indeed exploiting the shortcuts rather than performing multi-hop reasoning. After adversarial training, the baseline's performance improves but is still limited on the adversarial test. Hence, we use a control unit that dynamically attends to the question at different reasoning hops to guide the model's multi-hop reasoning. We show that our 2-hop model trained on the regular data is more robust to the adversaries than the baseline. After adversarial training, it not only achieves significant improvements over its counterpart trained on regular data, but also outperforms the adversarially-trained baseline significantly. Finally, we sanity-check that these improvements are not obtained by exploiting potential new shortcuts in the adversarial data, but indeed due to robust multi-hop reasoning skills of the models.", "phrases": ["reasoning shortcut", "adversarial evaluation", "multi-hop reasoning"], "overall_score": 2.8060662895550097, "scores": [0.8361806205783704, 1.0686378196886372, 1.0664364023397186], "rank_score": 0.9904182808689087} -{"id": "elsner-2012-character", "title": "Character-based kernels for novelistic plot structure", "abstract": "Better representations of plot structure could greatly improve computational methods for summarizing and generating stories. Current representations lack abstraction, focusing too closely on events. We present a kernel for comparing novelistic plots at a higher level, in terms of the cast of characters they depict and the social relationships between them. Our kernel compares the characters of different novels to one another by measuring their frequency of occurrence over time and the descriptive and emotional language associated with them. Given a corpus of 19th-century novels as training data, our method can accurately distinguish held-out novels in their original form from artificially disordered or reversed surrogates, demonstrating its ability to robustly represent important aspects of plot structure.", "phrases": ["kernel", "plot structure", "rich representation", "literary text"], "overall_score": 2.2779378135101243, "scores": [1.8745808983310548, 0.905059112277104, 0.5980412742522633, 0.5795020052444001], "rank_score": 0.9892958225262056} -{"id": "ramage-etal-2009-labeled", "title": "Labeled LDA: A supervised topic model for credit attribution in multi-labeled corpora", "abstract": "A significant portion of the world's text is tagged by readers on social bookmarking websites. Credit attribution is an inherent problem in these corpora because most pages have multiple tags, but the tags do not always apply with equal specificity across the whole document. Solving the credit attribution problem requires associating each word in a document with the most appropriate tags and vice versa. This paper introduces Labeled LDA, a topic model that constrains Latent Dirichlet Allocation by defining a one-to-one correspondence between LDA's latent topics and user tags. This allows Labeled LDA to directly learn word-tag correspondences. We demonstrate Labeled LDA's improved expressiveness over traditional LDA with visualizations of a corpus of tagged web pages from del.icio.us. Labeled LDA outperforms SVMs by more than 3 to 1 when extracting tag-specific document snippets. As a multi-label text classifier, our model is competitive with a discriminative baseline on a variety of datasets.", "phrases": ["topic model", "credit attribution", "labeled lda"], "overall_score": 3.057470704832097, "scores": [0.9042196990013242, 0.8232982141339653, 1.2398991485523518], "rank_score": 0.989139020562547} -{"id": "braud-denis-2015-comparing", "title": "Comparing Word Representations for Implicit Discourse Relation Classification", "abstract": "This paper presents a detailed comparative framework for assessing the usefulness of unsupervised word representations for identifying so-called implicit discourse relations. Specifically, we compare standard one-hot word pair representations against low-dimensional ones based on Brown clusters and word embeddings. We also consider various word vector combination schemes for deriving discourse segment representations from word vectors, and compare representations based either on all words or limited to head words. Our main finding is that denser representations systematically outperform sparser ones and give state-of-the-art performance or above without the need for additional hand-crafted features.", "phrases": ["discourse relation classification", "word embedding", "one-hot representation"], "overall_score": 2.8580319687954616, "scores": [1.393277460684749, 1.0441995614019566, 0.5289565804292813], "rank_score": 0.9888112008386623} -{"id": "ishiwatari-etal-2020-relation", "title": "Relation-aware Graph Attention Networks with Relational Position Encodings for Emotion Recognition in Conversations", "abstract": "Interest in emotion recognition in conversations (ERC) has been increasing in various fields, because it can be used to analyze user behaviors and detect fake news. Many recent ERC methods use graph-based neural networks to take the relationships between the utterances of the speakers into account. In particular, the state-of-the-art method considers self- and inter-speaker dependencies in conversations by using relational graph attention networks (RGAT). However, graph-based neural networks do not take sequential information into account. In this paper, we propose relational position encodings that provide RGAT with sequential information reflecting the relational graph structure. Accordingly, our RGAT model can capture both the speaker dependency and the sequential information. Experiments on four ERC datasets show that our model is beneficial to recognizing emotions expressed in conversations. In addition, our approach empirically outperforms the state-of-the-art on all of the benchmark datasets.", "phrases": ["graph attention network", "relational position encoding", "emotion recognition", "conversation"], "overall_score": 2.2766226959922706, "scores": [1.6690806405923886, 0.8130673496915918, 0.9421830972885781, 0.5305676094080316], "rank_score": 0.9887246742451475} -{"id": "dai-etal-2021-apo", "title": "APo-VAE: Text Generation in Hyperbolic Space", "abstract": "Natural language often exhibits inherent hierarchical structure ingrained with complex syntax and semantics. However, most state-of-the-art deep generative models learn embeddings only in Euclidean vector space, without accounting for this structural property of language. In this paper, we investigate text generation in a hyperbolic latent space to learn continuous hierarchical representations. An Adversarial Poincare Variational Autoencoder (APo-VAE) is presented, where both the prior and variational posterior of latent variables are defined over a Poincare ball via wrapped normal distributions. By adopting the primal-dual formulation of Kullback-Leibler divergence, an adversarial learning procedure is introduced to empower robust model training. Extensive experiments in language modeling, unaligned style transfer, and dialog-response generation demonstrate the effectiveness of the proposed APo-VAE model over VAEs in Euclidean latent space, thanks to its superb capabilities in capturing latent language hierarchies in hyperbolic space.", "phrases": ["text generation", "hyperbolic space", "inherent hierarchical structure", "apo-vae"], "overall_score": 1.3704735894289053, "scores": [1.6902415385347085, 0.9039167789502343, 0.8249861505970452, 0.5352064341947704], "rank_score": 0.9885877255691895} -{"id": "ide-etal-2010-manually", "title": "The Manually Annotated Sub-Corpus: A Community Resource for and by the People", "abstract": "The Manually Annotated Sub-Corpus (MASC) project provides data and annotations to serve as the base for a communitywide annotation effort of a subset of the American National Corpus. The MASC infrastructure enables the incorporation of contributed annotations into a single, usable format that can then be analyzed as it is or ported to any of a variety of other formats. MASC includes data from a much wider variety of genres than existing multiply-annotated corpora of English, and the project is committed to a fully open model of distribution, without restriction, for all data and annotations produced or contributed. As such, MASC is the first large-scale, open, community-based effort to create much needed language resources for NLP. This paper describes the MASC project, its corpus and annotations, and serves as a call for contributions of data and annotations from the language processing community.", "phrases": ["manually annotated sub-corpus", "masc", "language resource"], "overall_score": 2.3704447137611155, "scores": [1.0124433408056492, 1.367972960459197, 0.585240381891437], "rank_score": 0.9885522277187611} -{"id": "sun-etal-2018-answer", "title": "Answer-focused and Position-aware Neural Question Generation", "abstract": "In this paper, we focus on the problem of question generation (QG). Recent neural network-based approaches employ the sequence-to-sequence model which takes an answer and its context as input and generates a relevant question as output. However, we observe two major issues with these approaches: (1) The generated interrogative words (or question words) do not match the answer type. (2) The model copies the context words that are far from and irrelevant to the answer, instead of the words that are close and relevant to the answer. To address these two issues, we propose an answer-focused and position-aware neural question generation model. (1) By answer-focused, we mean that we explicitly model question word generation by incorporating the answer embedding, which can help generate an interrogative word matching the answer type. (2) By position-aware, we mean that we model the relative distance between the context words and the answer. Hence the model can be aware of the position of the context words when copying them to generate a question. We conduct extensive experiments to examine the effectiveness of our model. The experimental results show that our model significantly improves the baseline and outperforms the state-of-the-art system.", "phrases": ["question generation", "answer location information", "input text"], "overall_score": 3.484333560077903, "scores": [1.5721831718348216, 0.8632796922835777, 0.5287833061938378], "rank_score": 0.9880820567707458} -{"id": "yang-mitchell-2016-joint", "title": "Joint Extraction of Events and Entities within a Document Context", "abstract": "Events and entities are closely related; entities are often actors or participants in events and events without entities are uncommon. The interpretation of events and entities is highly contextually dependent. Existing work in information extraction typically models events separately from entities, and performs inference at the sentence level, ignoring the rest of the document. In this paper, we propose a novel approach that models the dependencies among variables of events, entities, and their relations, and performs joint inference of these variables across a document. The goal is to enable access to document-level contextual information and facilitate context-aware predictions. We demonstrate that our approach substantially outperforms the state-of-the-art methods for event extraction as well as a strong baseline for entity extraction.", "phrases": ["document context", "event extraction", "joint extraction", "well-defined feature", "entity mention"], "overall_score": 2.95981766902896, "scores": [1.6815636151801523, 0.853212622706271, 0.9657798748503869, 0.8599283687790258, 0.5795725709082383], "rank_score": 0.9880114104848149} -{"id": "sultan-etal-2014-dls", "title": "DLS@CU: Sentence Similarity from Word Alignment", "abstract": "We present an algorithm for computing the semantic similarity between two sentences. It adopts the hypothesis that semantic similarity is a monotonically increasing function of the degree to which (1) the two sentences contain similar semantic units, and (2) such units occur in similar semantic contexts. With a simplistic operationalization of the notion of semantic units with individual words, we experimentally show that this hypothesis can lead to state-of-the-art results for sentencelevel semantic similarity. At the SemEval 2014 STS task (task 10), our system demonstrated the best performance (measured by correlation with human annotations) among 38 system runs.", "phrases": ["word alignment", "semeval", "sts"], "overall_score": 2.368651757206752, "scores": [1.5279347783257433, 0.8983774048460407, 0.537101337105741], "rank_score": 0.9878045067591749} -{"id": "bisk-etal-2020-experience", "title": "Experience Grounds Language", "abstract": "Language understanding research is held back by a failure to relate language to the physical world it describes and to the social interactions it facilitates. Despite the incredible effectiveness of language processing models to tackle tasks after being trained on text alone, successful linguistic communication relies on a shared experience of the world. It is this shared experience that makes utterances meaningful. Natural language processing is a diverse field, and progress throughout its development has come from new representational theories, modeling techniques, data collection paradigms, and tasks. We posit that the present success of representation learning approaches trained on large, text-only corpora requires the parallel tradition of research on the broader physical and social context of language to address the deeper questions of communication.", "phrases": ["grounding", "successful linguistic communication", "experience", "language model", "limitation"], "overall_score": 3.3593581549940805, "scores": [1.574332465216883, 1.262012114332196, 0.8966175654747197, 0.6184975495845357, 0.5870336917301722], "rank_score": 0.9876986772677014} -{"id": "sangati-etal-2009-generative", "title": "A generative re-ranking model for dependency parsing", "abstract": "We propose a framework for dependency parsing based on a combination of discriminative and generative models. We use a discriminative model to obtain a k-best list of candidate parses, and subsequently rerank those candidates using a generative model. We show how this approach allows us to evaluate a variety of generative models, without needing different parser implementations. Moreover, we present empirical results that show a small improvement over state-of-the-art dependency parsing of English sentences.", "phrases": ["dependency parsing", "generative model", "list"], "overall_score": 1.921660809568761, "scores": [0.9293295898156831, 1.1275591574385149, 0.9057261666487009], "rank_score": 0.9875383046342995} -{"id": "flor-2012-four", "title": "Four types of context for automatic spelling correction", "abstract": "This paper presents an investigation on using four types of contextual information for improving the accuracy of automatic correction of single-token non-word misspellings. The task is framed as contextually-informed re-ranking of correction candidates. Immediate local context is captured by word n-grams statistics from a Web-scale language model. The second approach measures how well a candidate correction fits in the semantic fabric of the local lexical neighborhood, using a very large Distributional Semantic Model. In the third approach, recognizing a misspelling as an instance of a recurring word can be useful for reranking. The fourth approach looks at context beyond the text itself. If the approximate topic can be known in advance, spelling correction can be biased towards the topic. Effectiveness of proposed methods is demonstrated with an annotated corpus of 3,000 student essays from international high-stakes English language assessments. The paper also describes an implemented system that achieves high accuracy on this task. RESUME. Cet article presente une enquete sur l\u2019utilisation de quatre types d\u2019informations contextuelles pour ameliorer la precision de la correction automatique de fautes d\u2019orthographe de mots seuls. La t\u00e2che est presentee comme un reclassement contextuellement informe. Le contexte local immediat, capture par statistique de mot n-grammes est modelise a partir d\u2019un modele de langage a l\u2019echelle du Web. La deuxieme methode consiste a mesurer a quel point une correction s\u2019inscrit dans le tissu semantique local, en utilisant un tres grand modele semantique distributionnel. La troisieme approche reconnaissant une faute d\u2019orthographe comme une instance d\u2019un mot recurrent peut etre utile pour le reclassement. La quatrieme approche s\u2019attache au contexte au-dela du texte lui-meme. Si le sujet approximatif peut etre connu a l\u2019avance, la correction orthographique peut etre biaisee par rapport au sujet. L\u2019efficacite des methodes proposees est demontree avec un corpus annote de 3 000 travaux d\u2019etudiants des evaluations internationales de langue anglaise. Le document decrit egalement un systeme mis en place qui permet d\u2019obtenir une grande precision sur cette t\u00e2che.", "phrases": ["spelling correction", "contextual information", "student essay", "ranking candidate correction", "edit distance"], "overall_score": 2.2736494680299715, "scores": [2.4906208892390573, 0.8693767581017616, 0.5359055630960992, 0.5209637382410727, 0.5203001400604118], "rank_score": 0.9874334177476806} -{"id": "chen-dolan-2011-collecting", "title": "Collecting Highly Parallel Data for Paraphrase Evaluation", "abstract": "A lack of standard datasets and evaluation metrics has prevented the field of paraphrasing from making the kind of rapid progress enjoyed by the machine translation community over the last 15 years. We address both problems by presenting a novel data collection framework that produces highly parallel text data relatively inexpensively and on a large scale. The highly parallel nature of this data allows us to use simple n-gram comparisons to measure both the semantic adequacy and lexical dissimilarity of paraphrase candidates. In addition to being simple and efficient to compute, experiments show that these metrics correlate highly with human judgments.", "phrases": ["paraphrase", "n-gram", "human judgment", "crowdsourcing"], "overall_score": 2.9578990751967766, "scores": [1.7025349817303568, 0.8599699500999853, 0.8535889526161625, 0.5333899880728097], "rank_score": 0.9873709681298286} -{"id": "van-durme-lall-2011-efficient", "title": "Efficient Online Locality Sensitive Hashing via Reservoir Counting", "abstract": "We describe a novel mechanism called Reservoir Counting for application in online Locality Sensitive Hashing. This technique allows for significant savings in the streaming setting, allowing for maintaining a larger number of signatures, or an increased level of approximation accuracy at a similar memory footprint.", "phrases": ["locality sensitive hashing", "reservoir counting", "streaming setting"], "overall_score": 1.3681466637571547, "scores": [1.8013819267025475, 0.614199902345792, 0.545145781468501], "rank_score": 0.9869092035056136} -{"id": "correia-etal-2019-adaptively", "title": "Adaptively Sparse Transformers", "abstract": "Attention mechanisms have become ubiquitous in NLP. Recent architectures, notably the Transformer, learn powerful context-aware word representations through layered, multi-headed attention. The multiple heads learn diverse types of word relationships. However, with standard softmax attention, all attention heads are dense, assigning a non-zero weight to all context words. In this work, we introduce the adaptively sparse Transformer, wherein attention heads have flexible, context-dependent sparsity patterns. This sparsity is accomplished by replacing softmax with alpha-entmax: a differentiable generalization of softmax that allows low-scoring words to receive precisely zero weight. Moreover, we derive a method to automatically learn the alpha parameter \u2013 which controls the shape and sparsity of alpha-entmax \u2013 allowing attention heads to choose between focused or spread-out behavior. Our adaptively sparse Transformer improves interpretability and head diversity when compared to softmax Transformers on machine translation datasets. Findings of the quantitative and qualitative analysis of our approach include that heads in different layers learn different sparsity preferences and tend to be more diverse in their attention distributions than softmax Transformers. Furthermore, at no cost in accuracy, sparsity in attention heads helps to uncover different head specializations.", "phrases": ["sparse transformer", "head", "alternative"], "overall_score": 2.7956161572069393, "scores": [1.003218161017344, 1.1068342533320763, 0.8501371136349603], "rank_score": 0.9867298426614601} -{"id": "mori-etal-2014-flow", "title": "Flow Graph Corpus from Recipe Texts", "abstract": "In this paper, we present our attempt at annotating procedural texts with a flow graph as a representation of understanding. The domain we focus on is cooking recipe. The flow graphs are directed acyclic graphs with a special root node corresponding to the final dish. The vertex labels are recipe named entities, such as foods, tools, cooking actions, etc. The arc labels denote relationships among them. We converted 266 Japanese recipe texts into flow graphs manually. 200 recipes are randomly selected from a web site and 66 are of the same dish. We detail the annotation framework and report some statistics on our corpus. The most typical usage of our corpus may be automatic conversion from texts to flow graphs which can be seen as an entire understanding of procedural texts. With our corpus, one can also try word segmentation, named entity recognition, predicate-argument structure analysis, and coreference resolution.", "phrases": ["recipe", "procedural text", "action", "flow graph", "workflow"], "overall_score": 2.7354603239013544, "scores": [1.5435525548392168, 1.050493520087269, 0.9247774182644775, 0.8629826641388963, 0.5512376474713924], "rank_score": 0.9866087609602504} -{"id": "chen-etal-2007-multi", "title": "Multi-Engine Machine Translation with an Open-Source SMT Decoder", "abstract": "We describe an architecture that allows to combine statistical machine translation (SMT) with rule-based machine translation (RBMT) in a multi-engine setup. We use a variant of standard SMT technology to align translations from one or more RBMT systems with the source text. We incorporate phrases extracted from these alignments into the phrase table of the SMT system and use the open-source decoder Moses to find good combinations of phrases from SMT training data with the phrases derived from RBMT. First experiments based on this hybrid architecture achieve promising results.", "phrases": ["machine translation", "rbmt", "multi-engine setup", "smt system", "good combination"], "overall_score": 2.1677441316970114, "scores": [1.9976310935752515, 0.9187538891761166, 0.8517746318920253, 0.6157836635621693, 0.5489710766963186], "rank_score": 0.9865828709803763} -{"id": "poliak-etal-2018-collecting", "title": "Collecting Diverse Natural Language Inference Problems for Sentence Representation Evaluation", "abstract": "We present a large-scale collection of diverse natural language inference (NLI) datasets that help provide insight into how well a sentence representation captures distinct types of reasoning. The collection results from recasting 13 existing datasets from 7 semantic phenomena into a common NLI structure, resulting in over half a million labeled context-hypothesis pairs in total. We refer to our collection as the DNC: Diverse Natural Language Inference Collection. The DNC is available online at , and will grow over time as additional resources are recast and added from novel sources.", "phrases": ["natural language inference", "nli", "reasoning", "test set", "semantic phenomenon"], "overall_score": 3.1748510169296473, "scores": [1.4721542518671733, 1.4533328546591273, 0.878348893964121, 0.568484755061104, 0.5592938665497226], "rank_score": 0.9863229244202497} -{"id": "pradhan-etal-2004-shallow", "title": "Shallow Semantic Parsing using Support Vector Machines", "abstract": "In this paper, we propose a machine learning algorithm for shallow semantic parsing, extending the work of Gildea and Jurafsky (2002), Surdeanu et al. (2003) and others. Our algorithm is based on Support Vector Machines which we show give an improvement in performance over earlier classifiers. We show performance improvements through a number of new features and measure their ability to generalize to a new test set drawn from the AQUAINT corpus.", "phrases": ["support vector machines", "gildea", "shallow semantic parsing", "central meaning", "predicate"], "overall_score": 3.448290580062965, "scores": [2.105866158832327, 0.8894763917193318, 0.8441983842483988, 0.5500353870394891, 0.5414734749135129], "rank_score": 0.986209959350612} -{"id": "alinejad-etal-2018-prediction", "title": "Prediction Improves Simultaneous Neural Machine Translation", "abstract": "Simultaneous speech translation aims to maintain translation quality while minimizing the delay between reading input and incrementally producing the output. We propose a new general-purpose prediction action which predicts future words in the input to improve quality and minimize delay in simultaneous translation. We train this agent using reinforcement learning with a novel reward function. Our agent with prediction has better translation quality and less delay compared to an agent-based simultaneous translation system without prediction.", "phrases": ["agent", "reinforcement learning", "predict operation", "next source word"], "overall_score": 2.6020413444756967, "scores": [1.2826809435992965, 1.2004685267674424, 0.8727035257081092, 0.5880421444752693], "rank_score": 0.9859737851375293} -{"id": "pustejovsky-yocum-2013-capturing", "title": "Capturing Motion in ISO-SpaceBank", "abstract": "This paper presents the first description of the motion subcorpus of ISO-SpaceBank (MotionBank) and discusses how motion-events are represented in ISO-Space 1.5, a specification language for the representation of spatial information in language. We present data from this subcorpus with examples from the pilot annotation, focusing specifically on the annotation of motion-events and their various participants. These data inform further discussion of outstanding issues concerning semantic annotation, such as quantification and measurement. We address these questions briefly as they impact the design of ISO-Space.", "phrases": ["motion", "iso-spacebank", "spatial information", "pilot annotation"], "overall_score": 1.0831411743109693, "scores": [1.9514869593173005, 0.9287672541889259, 0.5394058761741796, 0.524010249649599], "rank_score": 0.9859175848325011} -{"id": "miyao-tsujii-2005-probabilistic", "title": "Probabilistic Disambiguation Models for Wide-Coverage HPSG Parsing", "abstract": "This paper reports the development of log-linear models for the disambiguation in wide-coverage HPSG parsing. The estimation of log-linear models requires high computational cost, especially with wide-coverage grammars. Using techniques to reduce the estimation cost, we trained the models using 20 sections of Penn Tree-bank. A series of experiments empirically evaluated the estimation techniques, and also examined the performance of the disambiguation models on the parsing of real-world sentences.", "phrases": ["hpsg", "real-world sentence", "probabilistic model"], "overall_score": 2.6015095426550348, "scores": [1.2770019850057976, 1.1573454694998544, 0.5229693647931944], "rank_score": 0.9857722730996156} -{"id": "chen-etal-2020-mpdd", "title": "MPDD: A Multi-Party Dialogue Dataset for Analysis of Emotions and Interpersonal Relationships", "abstract": "A dialogue dataset is an indispensable resource for building a dialogue system. Additional information like emotions and interpersonal relationships labeled on conversations enables the system to capture the emotion flow of the participants in the dialogue. However, there is no publicly available Chinese dialogue dataset with emotion and relation labels. In this paper, we collect the conversions from TV series scripts, and annotate emotion and interpersonal relationship labels on each utterance. This dataset contains 25,548 utterances from 4,142 dialogues. We also set up some experiments to observe the effects of the responded utterance on the current utterance, and the correlation between emotion and relation types in emotion and relation classification tasks.", "phrases": ["dialogue dataset", "emotion", "interpersonal relationship"], "overall_score": 1.0828958898217202, "scores": [1.854171083461816, 0.5636276642310063, 0.5392842041132877], "rank_score": 0.9856943172687034} -{"id": "wu-2005-recognizing", "title": "Recognizing Paraphrases and Textual Entailment Using Inversion Transduction Grammars", "abstract": "We present first results using paraphrase as well as textual entailment data to test the language universal constraint posited by Wu's (1995, 1997) Inversion Transduction Grammar (ITG) hypothesis. In machine translation and alignment, the ITG Hypothesis provides a strong inductive bias, and has been shown empirically across numerous language pairs and corpora to yield both efficiency and accuracy gains for various language acquisition tasks. Monolingual paraphrase and textual entailment recognition datasets, however, potentially facilitate closer tests of certain aspects of the hypothesis than bilingual parallel corpora, which simultaneously exhibit many irrelevant dimensions of cross-lingual variation. We investigate this using simple generic Bracketing ITGs containing no language-specific linguistic knowledge. Experimental results on the MSR Paraphrase Corpus show that, even in the absence of any thesaurus to accommodate lexical variation between the paraphrases, an uninterpolated average precision of at least 76% is obtainable from the Bracketing ITG's structure matching bias alone. This is consistent with experimental results on the Pascal Recognising Textual Entailment Challenge Corpus, which show surpisingly strong results for a number of the task subsets.", "phrases": ["paraphrase", "inversion transduction grammar", "machine translation"], "overall_score": 1.3659999993905936, "scores": [1.8408421419164254, 0.5928631674924099, 0.5223768280538695], "rank_score": 0.9853607124875682} -{"id": "jiampojamarn-etal-2007-applying", "title": "Applying Many-to-Many Alignments and Hidden Markov Models to Letter-to-Phoneme Conversion", "abstract": "Letter-to-phoneme conversion generally requires aligned training data of letters and phonemes. Typically, the alignments are limited to one-to-one alignments. We present a novel technique of training with many-to-many alignments. A letter chunking bigram prediction manages double letters and double phonemes automatically as opposed to preprocessing with fixed lists. We also apply an HMM method in conjunction with a local classification model to predict a global phoneme sequence given a word. The many-to-many alignments result in significant improvements over the traditional one-to-one approach. Our system achieves state-of-the-art performance on several languages and data sets.", "phrases": ["many-to-many alignment", "letter", "phoneme", "transliteration unit"], "overall_score": 2.668152089351782, "scores": [1.9405735323664879, 0.9002341581622753, 0.5672109787251587, 0.5330487278952316], "rank_score": 0.9852668492872884} -{"id": "peng-etal-2019-transfer", "title": "Transfer Learning in Biomedical Natural Language Processing: An Evaluation of BERT and ELMo on Ten Benchmarking Datasets", "abstract": "Inspired by the success of the General Language Understanding Evaluation benchmark, we introduce the Biomedical Language Understanding Evaluation (BLUE) benchmark to facilitate research in the development of pre-training language representations in the biomedicine domain. The benchmark consists of five tasks with ten datasets that cover both biomedical and clinical texts with different dataset sizes and difficulties. We also evaluate several baselines based on BERT and ELMo and find that the BERT model pre-trained on PubMed abstracts and MIMIC-III clinical notes achieves the best results. We make the datasets, pre-trained models, and codes publicly available at .", "phrases": ["biomedical domain", "language model", "entity recognition"], "overall_score": 3.473810131893064, "scores": [1.3505440143452336, 1.0796604407948251, 0.5250890614845769], "rank_score": 0.9850978388748786} -{"id": "sun-etal-2012-fast", "title": "Fast Online Training with Frequency-Adaptive Learning Rates for Chinese Word Segmentation and New Word Detection", "abstract": "We present a joint model for Chinese word segmentation and new word detection. We present high dimensional new features, including word-based features and enriched edge (label-transition) features, for the joint modeling. As we know, training a word segmentation system on large-scale datasets is already costly. In our case, adding high dimensional new features will further slow down the training speed. To solve this problem, we propose a new training method, adaptive online gradient descent based on feature frequency information, for very fast online training of the parameters, even given large-scale datasets with high dimensional features. Compared with existing training methods, our training method is an order magnitude faster in terms of training time, and can achieve equal or even higher accuracies. The proposed fast training method is a general purpose optimization method, and it is not limited in the specific task discussed in this paper.", "phrases": ["chinese word segmentation", "joint model", "specific task", "fast online training"], "overall_score": 2.4468274797968133, "scores": [1.9977739761616038, 0.8712731772253209, 0.545695722657816, 0.5239603826962894], "rank_score": 0.9846758146852576} -{"id": "saluja-etal-2014-graph", "title": "Graph-based Semi-Supervised Learning of Translation Models from Monolingual Data", "abstract": "Statistical phrase-based translation learns translation rules from bilingual corpora, and has traditionally only used monolingual evidence to construct features that rescore existing translation candidates. In this work, we present a semi-supervised graph-based approach for generating new translation rules that leverages bilingual and monolingual data. The proposed technique first constructs phrase graphs using both source and target language monolingual corpora. Next, graph propagation identifies translations of phrases that were not observed in the bilingual corpus, assuming that similar phrases have similar translations. We report results on a large Arabic-English system and a medium-sized Urdu-English system. Our proposed approach significantly improves the performance of competitive phrasebased systems, leading to consistent improvements between 1 and 4 BLEU points on standard evaluation sets.", "phrases": ["monolingual data", "new translation rule", "test data"], "overall_score": 1.9160370812216356, "scores": [1.857709359204698, 0.5718127028291535, 0.5244227778424703], "rank_score": 0.9846482799587739} -{"id": "chan-etal-2012-community", "title": "Community Answer Summarization for Multi-Sentence Question with Group L1 Regularization", "abstract": "We present a novel answer summarization method for community Question Answering services (cQAs) to address the problem of \"incomplete answer\", i.e., the \"best answer\" of a complex multi-sentence question misses valuable information that is contained in other answers. In order to automatically generate a novel and non-redundant community answer summary, we segment the complex original multi-sentence question into several sub questions and then propose a general Conditional Random Field (CRF) based answer summary method with group L1 regularization. Various textual and non-textual QA features are explored. Specifically, we explore four different types of contextual factors, namely, the information novelty and non-redundancy modeling for local and non-local sentence interactions under question segmentation. To further unleash the potential of the abundant cQA features, we introduce the group L1 regularization for feature learning. Experimental results on a Yahoo! Answers dataset show that our proposed method significantly outperforms state-of-the-art methods on cQA summarization task.", "phrases": ["multi-sentence question", "answer summarization method", "good answer"], "overall_score": 1.584523070160639, "scores": [1.8134080305128015, 0.6112576281561555, 0.5288929556504107], "rank_score": 0.9845195381064559} -{"id": "shi-etal-2006-dom", "title": "A DOM Tree Alignment Model for Mining Parallel Data from the Web", "abstract": "This paper presents a new web mining scheme for parallel data acquisition. Based on the Document Object Model (DOM), a web page is represented as a DOM tree. Then a DOM tree alignment model is proposed to identify the translationally equivalent texts and hyperlinks between two parallel DOM trees. By tracing the identified parallel hyperlinks, parallel web documents are recursively mined. Compared with previous mining schemes, the benchmarks show that this new mining scheme improves the mining coverage, reduces mining bandwidth, and enhances the quality of mined parallel sentences.", "phrases": ["web", "document object model", "parallel document", "metadata information"], "overall_score": 2.789032576577841, "scores": [1.926098539183903, 0.937336352324571, 0.5399043391333175, 0.5342852796040586], "rank_score": 0.9844061275614625} -{"id": "gorinski-lapata-2015-movie", "title": "Movie Script Summarization as Graph-based Scene Extraction", "abstract": "In this paper we study the task of movie script summarization, which we argue could enhance script browsing, give readers a rough idea of the script\u2019s plotline, and speed up reading time. We formalize the process of generating a shorter version of a screenplay as the task of finding an optimal chain of scenes. We develop a graph-based model that selects a chain by jointly optimizing its logical progression, diversity, and importance. Human evaluation based on a question-answering task shows that our model produces summaries which are more informative compared to competitive baselines.", "phrases": ["scene", "script browsing", "movie script summarization", "screenplay summarization", "graph-based approach"], "overall_score": 2.5244249736887547, "scores": [2.0253569945319394, 1.2355610032202482, 0.5700393893494919, 0.5552634433822349, 0.5347827096934096], "rank_score": 0.9842007080354648} -{"id": "rehbein-van-genabith-2007-treebank", "title": "Treebank Annotation Schemes and Parser Evaluation for German", "abstract": "Recent studies focussed on the question whether less-conguration al languages like German are harder to parse than English, or whether the lower parsing scores are an artefact of treebank encoding schemes and data structures, as claimed by K\u00a4 ubler et al. (2006). This claim is based on the assumption that PARSEVAL metrics fully reect parse quality across treebank encoding schemes. In this paper we present new experiments to test this claim. We use the PARSEVAL metric, the Leaf-Ancestor metric as well as a dependency-based evaluation, and present novel approaches measuring the effect of controlled error insertion on treebank trees and parser output. We also provide extensive past-parsing crosstreebank conversion. The results of the experiments show that, contrary to K\u00a4 ubler et al. (2006), the question whether or not German is harder to parse than English remains undecided.", "phrases": ["german", "treebank annotation scheme", "parser error"], "overall_score": 2.046096582121029, "scores": [1.706403641391339, 0.6363885895141664, 0.6091011613003608], "rank_score": 0.9839644640686221} -{"id": "hahn-keller-2016-modeling", "title": "Modeling Human Reading with Neural Attention", "abstract": "When humans read text, they fixate some words and skip others. However, there have been few attempts to explain skipping behavior with computational models, as most existing work has focused on predicting reading times (e.g.,~using surprisal). In this paper, we propose a novel approach that models both skipping and reading, using an unsupervised architecture that combines a neural attention with autoencoding, trained on raw text using reinforcement learning. Our model explains human reading behavior as a tradeoff between precision of language understanding (encoding the input accurately) and economy of attention (fixating as few words as possible). We evaluate the model on the Dundee eye-tracking corpus, showing that it accurately predicts skipping behavior and reading times, is competitive with surprisal, and captures known qualitative features of human reading.", "phrases": ["human reading", "neural attention", "novel approach", "reinforcement learning"], "overall_score": 2.16177734331997, "scores": [1.9417298205011404, 0.93541488676673, 0.5369202543747089, 0.5214041126033977], "rank_score": 0.9838672685614943} -{"id": "fazly-stevenson-2006-automatically", "title": "Automatically Constructing a Lexicon of Verb Phrase Idiomatic Combinations", "abstract": "We investigate the lexical and syntactic flexibility of a class of idiomatic expressions. We develop measures that draw on such linguistic properties, and demonstrate that these statistical, corpus-based measures can be successfully used for distinguishing idiomatic combinations from non-idiomatic ones. We also propose a means for automatically determining which syntactic forms a particular idiom can appear in, and hence should be included in its lexical representation.", "phrases": ["idiomatic expression", "lexical fixedness", "statistical measure"], "overall_score": 3.0844012382044848, "scores": [1.147586006828203, 0.9121902612920476, 0.8913386367023138], "rank_score": 0.9837049682741882} -{"id": "mir-etal-2019-evaluating", "title": "Evaluating Style Transfer for Text", "abstract": "Research in the area of style transfer for text is currently bottlenecked by a lack of standard evaluation practices. This paper aims to alleviate this issue by experimentally identifying best practices with a Yelp sentiment dataset. We specify three aspects of interest (style transfer intensity, content preservation, and naturalness) and show how to obtain more reliable measures of them from human evaluation than in previous work. We propose a set of metrics for automated evaluation and demonstrate that they are more strongly correlated and in agreement with human judgment: direction-corrected Earth Mover's Distance, Word Mover's Distance on style-masked texts, and adversarial classification for the respective aspects. We also show that the three examined models exhibit tradeoffs between aspects of interest, demonstrating the importance of evaluating style transfer models at specific points of their tradeoff plots. We release software with our evaluation metrics to facilitate research.", "phrases": ["style transfer", "standard evaluation practice", "content preservation", "distance"], "overall_score": 2.1604550918507015, "scores": [2.010561509263382, 0.8404657567565277, 0.5552071726362469, 0.5268275052802341], "rank_score": 0.9832654859840977} -{"id": "biadsy-etal-2009-spoken", "title": "Spoken Arabic Dialect Identification Using Phonotactic Modeling", "abstract": "The Arabic language is a collection of multiple variants, among which Modern Standard Arabic (MSA) has a special status as the formal written standard language of the media, culture and education across the Arab world. The other variants are informal spoken dialects that are the media of communication for daily life. Arabic dialects differ substantially from MSA and each other in terms of phonology, morphology, lexical choice and syntax. In this paper, we describe a system that automatically identifies the Arabic dialect (Gulf, Iraqi, Levantine, Egyptian and MSA) of a speaker given a sample of his/her speech. The phonotactic approach we use proves to be effective in identifying these dialects with considerable overall accuracy --- 81.60% using 30s test utterances.", "phrases": ["dialect", "modern standard arabic", "phonotactic approach", "speech data"], "overall_score": 2.9934972030098885, "scores": [1.9799609240511649, 0.8916355338822048, 0.5309465899902257, 0.5304182151220604], "rank_score": 0.983240315761414} -{"id": "kahn-etal-2005-effective", "title": "Effective Use of Prosody in Parsing Conversational Speech", "abstract": "We identify a set of prosodic cues for parsing conversational speech and show how such features can be effectively incorporated into a statistical parsing model. On the Switchboard corpus of conversational speech, the system achieves improved parse accuracy over a state-of-the-art system which uses only lexical and syntactic features. Since removal of edit regions is known to improve downstream parse accuracy, we explore alternatives for edit detection and show that PCFGs are not competitive with more specialized techniques.", "phrases": ["conversational speech", "textual information", "posterior"], "overall_score": 2.6621577337531357, "scores": [1.8885633541806606, 0.5403552900822599, 0.5202413077973523], "rank_score": 0.9830533173534244} -{"id": "vilar-etal-2007-human", "title": "Human Evaluation of Machine Translation Through Binary System Comparisons", "abstract": "We introduce a novel evaluation scheme for the human evaluation of different machine translation systems. Our method is based on direct comparison of two sentences at a time by human judges. These binary judgments are then used to decide between all possible rankings of the systems. The advantages of this new method are the lower dependency on extensive evaluation guidelines, and a tighter focus on a typical evaluation task, namely the ranking of systems. Furthermore we argue that machine translation evaluations should be regarded as statistical processes, both for human and automatic evaluation. We show how confidence ranges for state-of-the-art evaluation measures such as WER and TER can be computed accurately and efficiently without having to resort to Monte Carlo estimates. We give an example of our new evaluation scheme, as well as a comparison with classical automatic and human evaluation on data from a recent international evaluation campaign.", "phrases": ["human evaluation", "adequacy", "fluency"], "overall_score": 1.9128714147370545, "scores": [1.8805293191121075, 0.5445330004813537, 0.5240020280060955], "rank_score": 0.9830214491998522} -{"id": "sun-etal-2020-knowledge", "title": "Knowledge Association with Hyperbolic Knowledge Graph Embeddings", "abstract": "Capturing associations for knowledge graphs (KGs) through entity alignment, entity type inference and other related tasks benefits NLP applications with comprehensive knowledge representations. Recent related methods built on Euclidean embeddings are challenged by the hierarchical structures and different scales of KGs. They also depend on high embedding dimensions to realize enough expressiveness. Differently, we explore with low-dimensional hyperbolic embeddings for knowledge association. We propose a hyperbolic relational graph neural network for KG embedding and capture knowledge associations with a hyperbolic transformation. Extensive experiments on entity alignment and type inference demonstrate the effectiveness and efficiency of our method.", "phrases": ["entity alignment", "hyperbolic relational graph", "knowledge association"], "overall_score": 1.5811874179032488, "scores": [1.8260601460212815, 0.5893674816709746, 0.5319133147959338], "rank_score": 0.9824469808293967} -{"id": "lala-specia-2018-multimodal", "title": "Multimodal Lexical Translation", "abstract": "Inspired by the tasks of Multimodal Machine Translation and Visual Sense Disambiguation we introduce a task called Multimodal Lexical Translation (MLT). The aim of this new task is to correctly translate an ambiguous word given its context - an image and a sentence in the source language. To facilitate the task, we introduce the MLT dataset, where each data point is a 4 -tuple consisting of an ambiguous source word, its visual context (an image), its textual context (a source sentence), and its translation that conforms with the visual and textual contexts. The dataset has been created from the Multi30K corpus using word-alignment followed by human inspection for translations from English to German and English to French. We also introduce a simple heuristic to quantify the extent of the ambiguity of a word from the distribution of its translations and use it to select subsets of the MLT Dataset which are dif\ufb01cult to translate. These form a valuable multimodal and multilingual language resource with several potential uses including evaluation of lexical disambiguation within (Multimodal) Machine Translation systems.", "phrases": ["mlt", "ambiguity", "visual context", "multimodal lexical translation"], "overall_score": 1.9116652060544217, "scores": [2.197224315406639, 0.584242299159, 0.5813710037811163, 0.5667687038824258], "rank_score": 0.9824015805572952} -{"id": "nguyen-etal-2017-word", "title": "From Word Segmentation to POS Tagging for Vietnamese", "abstract": "This paper presents an empirical comparison of two strategies for Vietnamese Part-of-Speech (POS) tagging from unsegmented text: (i) a pipeline strategy where we consider the output of a word segmenter as the input of a POS tagger, and (ii) a joint strategy where we predict a combined segmentation and POS tag for each syllable. We also make a comparison between state-of-the-art (SOTA) feature-based and neural network-based models. On the benchmark Vietnamese treebank (Nguyen et al., 2009), experimental results show that the pipeline strategy produces better scores of POS tagging from unsegmented text than the joint strategy, and the highest accuracy is obtained by using a feature-based model.", "phrases": ["word segmentation", "pos tagging", "vietnamese"], "overall_score": 1.5811141778310955, "scores": [1.4804318843671445, 0.8271687490584473, 0.6396037892162825], "rank_score": 0.9824014742139582} -{"id": "zhu-etal-2018-msmo", "title": "MSMO: Multimodal Summarization with Multimodal Output", "abstract": "Multimodal summarization has drawn much attention due to the rapid growth of multimedia data. The output of the current multimodal summarization systems is usually represented in texts. However, we have found through experiments that multimodal output can significantly improve user satisfaction for informativeness of summaries. In this paper, we propose a novel task, multimodal summarization with multimodal output (MSMO). To handle this task, we first collect a large-scale dataset for MSMO research. We then propose a multimodal attention model to jointly generate text and select the most relevant image from the multimodal input. Finally, to evaluate multimodal outputs, we construct a novel multimodal automatic evaluation (MMAE) method which considers both intra-modality salience and inter-modality relevance. The experimental results show the effectiveness of MMAE.", "phrases": ["summarization", "multimodal output", "msmo", "video"], "overall_score": 1.9115964759834956, "scores": [1.9457402243135005, 0.8502407464685431, 0.5993275689700254, 0.534156501399033], "rank_score": 0.9823662602877754} -{"id": "gupta-etal-2017-entity", "title": "Entity Linking via Joint Encoding of Types, Descriptions, and Context", "abstract": "For accurate entity linking, we need to capture various information aspects of an entity, such as its description in a KB, contexts in which it is mentioned, and structured knowledge. Additionally, a linking system should work on texts from different domains without requiring domain-specific training data or hand-engineered features. In this work we present a neural, modular entity linking system that learns a unified dense representation for each entity using multiple sources of information, such as its description, contexts around its mentions, and its fine-grained types. We show that the resulting entity linking system is effective at combining these sources, and performs competitively, sometimes out-performing current state-of-the-art systems across datasets, without requiring any domain-specific training data or hand-engineered features. We also show that our model can effectively \u201cembed\u201d entities that are new to the KB, and is able to link its mentions accurately.", "phrases": ["joint encoding", "domain-specific training data", "hand-engineered feature", "entity linking"], "overall_score": 3.07948686809721, "scores": [2.021567135426653, 0.8128921543183448, 0.5484582775045391, 0.5456329654895664], "rank_score": 0.9821376331847759} -{"id": "bramsen-etal-2006-inducing", "title": "Inducing Temporal Graphs", "abstract": "We consider the problem of constructing a directed acyclic graph that encodes temporal relations found in a text. The unit of our analysis is a temporal segment, a fragment of text that maintains temporal coherence. The strength of our approach lies in its ability to simultaneously optimize pairwise ordering preferences and global constraints on the graph topology. Our learning method achieves 83% F-measure in temporal segmentation and 84% accuracy in inferring temporal relations between two segments.", "phrases": ["acyclic graph", "segment", "integer linear programming", "global information", "temporal structure"], "overall_score": 2.658593373983307, "scores": [1.2380551825717738, 1.1301150556982833, 1.0653224729319875, 0.8533684079090887, 0.6218244231680069], "rank_score": 0.981737108455828} -{"id": "zettlemoyer-collins-2007-online", "title": "Online Learning of Relaxed CCG Grammars for Parsing to Logical Form", "abstract": "We consider the problem of learning to parse sentences to lambda-calculus representations of their underlying semantics and present an algorithm that learns a weighted combinatory categorial grammar (CCG). A key idea is to introduce non-standard CCG combinators that relax certain parts of the grammar\u2014for example allowing flexible word order, or insertion of lexical items\u2014 with learned costs. We also present a new, online algorithm for inducing a weighted CCG. Results for the approach on ATIS data show 86% F-measure in recovering fully correct semantic analyses and 95.9% F-measure by a partial-match criterion, a more than 5% improvement over the 90.3% partial-match figure reported by He and Young (2006).", "phrases": ["ccg", "semantic parser", "learning algorithm", "much work", "query"], "overall_score": 3.6693600399317456, "scores": [1.7538200985882582, 1.5605373871085504, 0.5494389667097654, 0.5225140344763558, 0.5223096088881407], "rank_score": 0.981724019154214} -{"id": "wang-cho-2016-larger", "title": "Larger-Context Language Modelling with Recurrent Neural Network", "abstract": "In this work, we propose a novel method to incorporate corpus-level discourse information into language modelling. We call this larger-context language model. We introduce a late fusion approach to a recurrent language model based on long short-term memory units (LSTM), which helps the LSTM unit keep intra-sentence dependencies and inter-sentence dependencies separate from each other. Through the evaluation on three corpora (IMDB, BBC, and PennTree Bank), we demon- strate that the proposed model improves perplexity significantly. In the experi- ments, we evaluate the proposed approach while varying the number of context sentences and observe that the proposed late fusion is superior to the usual way of incorporating additional inputs to the LSTM. By analyzing the trained larger- context language model, we discover that content words, including nouns, adjec- tives and verbs, benefit most from an increasing number of context sentences. This analysis suggests that larger-context language model improves the unconditional language model by capturing the theme of a document better and more easily.", "phrases": ["language modelling", "corpus-level discourse information", "context information", "thread"], "overall_score": 2.4391911178812875, "scores": [2.286647119583591, 0.5507995867373899, 0.5497782437773527, 0.5391859162243703], "rank_score": 0.981602716580676} -{"id": "jehl-etal-2014-source", "title": "Source-side Preordering for Translation using Logistic Regression and Depth-first Branch-and-Bound Search", "abstract": "We present a simple preordering approach for machine translation based on a featurerich logistic regression model to predict whether two children of the same node in the source-side parse tree should be swapped or not. Given the pair-wise children regression scores we conduct an efficient depth-first branch-and-bound search through the space of possible children permutations, avoiding using a cascade of classifiers or limiting the list of possible ordering outcomes. We report experiments in translating English to Japanese and Korean, demonstrating superior performance as (a) the number of crossing links drops by more than 10% absolute with respect to other state-of-the-art preordering approaches, (b) BLEU scores improve on 2.2 points over the baseline with lexicalised reordering model, and (c) decoding can be carried out 80 times faster.", "phrases": ["depth-first branch-and-bound search", "logistic regression model", "parse tree"], "overall_score": 1.7587418390046043, "scores": [1.8246334155944037, 0.5868631029561746, 0.5332210105756796], "rank_score": 0.9815725097087525} -{"id": "shwartz-dagan-2016-path", "title": "Path-based vs. Distributional Information in Recognizing Lexical Semantic Relations", "abstract": "Recognizing various semantic relations between terms is beneficial for many NLP tasks. While path-based and distributional information sources are considered complementary for this task, the superior results the latter showed recently suggested that the former's contribution might have become obsolete. We follow the recent success of an integrated neural method for hypernymy detection (Shwartz et al., 2016) and extend it to recognize multiple relations. The empirical results show that this method is effective in the multiclass setting as well. We further show that the path-based information source always contributes to the classification, and analyze the cases in which it mostly complements the distributional information.", "phrases": ["distributional information", "multiple relation", "path-based approach"], "overall_score": 2.040373703831858, "scores": [1.862235371396957, 0.5538668381712363, 0.5275348145102748], "rank_score": 0.9812123413594894} -{"id": "liu-etal-2013-modeling", "title": "Modeling Collaborative Referring for Situated Referential Grounding", "abstract": "In situated dialogue, because humans and agents have mismatched capabilities of perceiving the shared physical world, referential grounding becomes difficult. Humans and agents will need to make extra efforts by collaborating with each other to mediate a shared perceptual basis and to come to a mutual understanding of intended referents in the environment. In this paper, we have extended our previous graph-matching based approach to explicitly incorporate collaborative referring behaviors into the referential grounding algorithm. In addition, hypergraph-based representations have been used to account for group descriptions that are likely to occur in spatial communications. Our empirical results have shown that incorporating the most prevalent pattern of collaboration with our hypergraph-based approach significantly improves reference resolution in situated dialogue by an absolute gain of over 18%.", "phrases": ["referential grounding", "grounding algorithm", "communication", "reference resolution"], "overall_score": 1.7580298561514482, "scores": [1.9631265860310134, 0.8611524966751178, 0.5550715774906281, 0.5453499178531758], "rank_score": 0.9811751445124839} -{"id": "fevry-phang-2018-unsupervised", "title": "Unsupervised Sentence Compression using Denoising Auto-Encoders", "abstract": "In sentence compression, the task of shortening sentences while retaining the original meaning, models tend to be trained on large corpora containing pairs of verbose and compressed sentences. To remove the need for paired corpora, we emulate a summarization task and add noise to extend sentences and train a denoising auto-encoder to recover the original, constructing an end-to-end training regime without the need for any examples of compressed sentences. We conduct a human evaluation of our model on a standard text summarization dataset and show that it performs comparably to a supervised baseline based on grammatical correctness and retention of meaning. Despite being exposed to no target data, our unsupervised models learn to generate imperfect but reasonably readable sentence summaries. Although we underperform supervised models based on ROUGE scores, our models are competitive with a supervised baseline based on human evaluation for grammatical correctness and retention of meaning.", "phrases": ["sentence compression", "noise", "autoencoder"], "overall_score": 2.8357471098915408, "scores": [1.248789563209583, 1.1608116152453856, 0.5337023278963556], "rank_score": 0.9811011687837747} -{"id": "habash-etal-2005-morphological", "title": "Morphological Analysis and Generation for Arabic Dialects", "abstract": "We present Magead, a morphological analyzer and generator for the Arabic language family. Our work is novel in that it explicitly addresses the need for processing the morphology of the dialects. Magead provides an analysis to a root+pattern representation, it has separate phonological and orthographic representations, and it allows for combining morphemes from different dialects.", "phrases": ["arabic dialect", "language family", "levantine arabic"], "overall_score": 2.5163376345062236, "scores": [1.5280762202994012, 0.8535935363838274, 0.5614733044295769], "rank_score": 0.9810476870376018} -{"id": "moore-2004-improving", "title": "Improving IBM Word Alignment Model 1", "abstract": "We investigate a number of simple methods for improving the word-alignment accuracy of IBM Model 1. We demonstrate reduction in alignment error rate of approximately 30% resulting from (1) giving extra weight to the probability of alignment to the null word, (2) smoothing probability estimates for rare words, and (3) using a simple heuristic estimation method to initialize, or replace, EM training of model parameters.", "phrases": ["ibm model", "alignment error rate", "null word"], "overall_score": 2.4374627215091156, "scores": [1.8188142570614994, 0.573627992136953, 0.5502792269387706], "rank_score": 0.9809071587124077} -{"id": "decadt-etal-2004-gambl", "title": "GAMBL, genetic algorithm optimization of memory-based WSD", "abstract": "GAMBL is a word expert approach to WSD in which each word expert is trained using memory based learning. Joint feature selection and algorithm parameter optimization are achieved with a genetic algorithm (GA). We use a cascaded classifier approach in which the GA optimizes local context features and the output of a separate keyword classifier (rather than also optimizing the keyword features together with the local context features). A further innovation on earlier versions of memory based WSD is the use of grammatical relation and chunk features. This paper presents the architecture of the system briefly, and discusses its performance on the English lexical sample and all words tasks in SENSEVAL-3.", "phrases": ["genetic algorithm", "wsd", "gambl", "memory-based classifier"], "overall_score": 1.9076458520575996, "scores": [1.669132371684606, 0.8785127681353694, 0.8407815128662397, 0.532917512117509], "rank_score": 0.9803360412009311} -{"id": "hu-etal-2019-improved", "title": "Improved Lexically Constrained Decoding for Translation and Monolingual Rewriting", "abstract": "Lexically-constrained sequence decoding allows for explicit positive or negative phrase-based constraints to be placed on target output strings in generation tasks such as machine translation or monolingual text rewriting. We describe vectorized dynamic beam allocation, which extends work in lexically-constrained decoding to work with batching, leading to a five-fold improvement in throughput when working with positive constraints. Faster decoding enables faster exploration of constraint strategies: we illustrate this via data augmentation experiments with a monolingual rewriter applied to the tasks of natural language inference, question answering and machine translation, showing improvements in all three.", "phrases": ["decoding", "lexical constraint", "unconstrained generation", "same approach"], "overall_score": 2.9845586484721065, "scores": [1.3400551455734608, 0.9312547403516571, 0.8253489493746654, 0.8245586423468525], "rank_score": 0.9803043694116589} -{"id": "zhang-vogel-2005-efficient", "title": "An efficient phrase-to-phrase alignment model for arbitrarily long phrase and large corpora", "abstract": "Most statistical machine translation (SMT) systems use phrase-to-phrase transla- tions to capture local context information, leading to better lexical choices and more reli- able word reordering. Long phrases capture more contexts than short phrases and result in better translation qualities. On the other hand, the increasing amount of bilingual data poses serious problems for storing all possible phrases. In this paper, we describe a novel phrase- to-phrase alignment model which allows for arbitrarily long phrases and works for very large bilingual corpora. This model is very efficient in both time and space and the resulting translations are better than the state-of-the-art systems.", "phrases": ["long phrase", "bilingual data", "suffix array", "fly"], "overall_score": 2.3505712887332284, "scores": [1.968523776009172, 0.8543611210205018, 0.5603012037954499, 0.5378713621892397], "rank_score": 0.9802643657535908} -{"id": "bhatia-etal-2019-joint", "title": "Joint Entity Extraction and Assertion Detection for Clinical Text", "abstract": "Negative medical findings are prevalent in clinical reports, yet discriminating them from positive findings remains a challenging task for in-formation extraction. Most of the existing systems treat this task as a pipeline of two separate tasks, i.e., named entity recognition (NER)and rule-based negation detection. We consider this as a multi-task problem and present a novel end-to-end neural model to jointly extract entities and negations. We extend a standard hierarchical encoder-decoder NER model and first adopt a shared encoder followed by separate decoders for the two tasks. This architecture performs considerably better than the previous rule-based and machine learning-based systems. To overcome the problem of increased parameter size especially for low-resource settings, we propose the Conditional Softmax Shared Decoder architecture which achieves state-of-art results for NER and negation detection on the 2010 i2b2/VA challenge dataset and a proprietary de-identified clinical dataset.", "phrases": ["assertion detection", "negation", "joint entity extraction"], "overall_score": 1.577453639630973, "scores": [1.5122160567670928, 0.880059408011607, 0.5481056970740971], "rank_score": 0.9801270539509322} -{"id": "trivedi-etal-2018-iit", "title": "IIT (BHU) Submission for the ACL Shared Task on Named Entity Recognition on Code-switched Data", "abstract": "This paper describes the best performing system for the shared task on Named Entity Recognition (NER) on code-switched data for the language pair Spanish-English (ENG-SPA). We introduce a gated neural architecture for the NER task. Our final model achieves an F1 score of 63.76%, outperforming the baseline by 10%.", "phrases": ["entity recognition", "noisy mixed-language text", "word embedding"], "overall_score": 1.7560488807874859, "scores": [1.8361199444681113, 0.5712748668737405, 0.5328138119909085], "rank_score": 0.9800695411109203} -{"id": "eric-etal-2017-key", "title": "Key-Value Retrieval Networks for Task-Oriented Dialogue", "abstract": "Neural task-oriented dialogue systems often struggle to smoothly interface with a knowledge base. In this work, we seek to address this problem by proposing a new neural dialogue agent that is able to effectively sustain grounded, multi-domain discourse through a novel key-value retrieval mechanism. The model is end-to-end differentiable and does not need to explicitly model dialogue state or belief trackers. We also release a new dataset of 3,031 dialogues that are grounded through underlying knowledge bases and span three distinct tasks in the in-car personal assistant space: calendar scheduling, weather information retrieval, and point-of-interest navigation. Our architecture is simultaneously trained on data from all domains and significantly outperforms a competitive rule-based system and other existing neural dialogue architectures on the provided domains according to both automatic and human evaluation metrics.", "phrases": ["dialogue system", "multi-domain discourse", "key-value retrieval mechanism"], "overall_score": 3.5389368948834323, "scores": [1.4773923384701861, 0.8971008255513839, 0.5657040217061321], "rank_score": 0.9800657285759007} -{"id": "sedoc-etal-2019-chateval", "title": "ChatEval: A Tool for Chatbot Evaluation", "abstract": "Open-domain dialog systems (i.e. chatbots) are difficult to evaluate. The current best practice for analyzing and comparing these dialog systems is the use of human judgments. However, the lack of standardization in evaluation procedures, and the fact that model parameters and code are rarely published hinder systematic human evaluation experiments. We introduce a unified framework for human evaluation of chatbots that augments existing tools and provides a web-based hub for researchers to share and compare their dialog systems. Researchers can submit their trained models to the ChatEval web interface and obtain comparisons with baselines and prior work. The evaluation code is open-source to ensure standardization and transparency. In addition, we introduce open-source baseline models and evaluation datasets. ChatEval can be found at .", "phrases": ["dialog system", "human evaluation", "chateval"], "overall_score": 1.9071066779386914, "scores": [1.883322536162268, 0.5294071595451004, 0.5274471858375578], "rank_score": 0.9800589605149755} -{"id": "johnson-2007-doesnt", "title": "Why Doesn't EM Find Good HMM POS-Taggers?", "abstract": "This paper investigates why the HMMs estimated by Expectation-Maximization (EM) produce such poor results as Part-of-Speech (POS) taggers. We find that the HMMs estimated by EM generally assign a roughly equal number of word tokens to each hidden state, while the empirical distribution of tokens to POS tags is highly skewed. This motivates a Bayesian approach using a sparse prior to bias the estimator toward such a skewed distribution. We investigate Gibbs Sampling (GS) and Variational Bayes (VB) estimators and show that VB converges faster than GS for this task and that VB significantly improves 1-to-1 tagging accuracy over EM. We also show that EM does nearly as well as VB when the number of hidden HMM states is dramatically reduced. We also point out the high variance in all of these estimators, and that they require many more iterations to approach convergence than usually thought.", "phrases": ["hmm", "hidden state", "pos tagging"], "overall_score": 3.1927263783582607, "scores": [1.417498340232786, 0.9837361298534294, 0.5385737962738865], "rank_score": 0.9799360887867006} -{"id": "hopkins-may-2011-tuning", "title": "Tuning as Ranking", "abstract": "We offer a simple, effective, and scalable method for statistical machine translation parameter tuning based on the pairwise approach to ranking (Herbrich et al., 1999). Unlike the popular MERT algorithm (Och, 2003), our pairwise ranking optimization (PRO) method is not limited to a handful of parameters and can easily handle systems with thousands of features. Moreover, unlike recent approaches built upon the MIRA algorithm of Crammer and Singer (2003) (Watanabe et al., 2007; Chiang et al., 2008b), PRO is easy to implement. It uses off-the-shelf linear binary classifier software and can be built on top of an existing MERT framework in a matter of hours. We establish PRO's scalability and effectiveness by comparing it to MERT and MIRA and demonstrate parity on both phrase-based and syntax-based systems in a variety of language pairs, using large scale data scenarios.", "phrases": ["pairwise ranking optimization", "pro", "thousand"], "overall_score": 2.7156186603254815, "scores": [1.4357912031515323, 0.8588283955042556, 0.6437375819920389], "rank_score": 0.9794523935492756} -{"id": "duh-kirchhoff-2008-beyond", "title": "Beyond Log-Linear Models: Boosted Minimum Error Rate Training for N-best Re-ranking", "abstract": "Current re-ranking algorithms for machine translation rely on log-linear models, which have the potential problem of underfitting the training data. We present BoostedMERT, a novel boosting algorithm that uses Minimum Error Rate Training (MERT) as a weak learner and builds a re-ranker far more expressive than log-linear models. BoostedMERT is easy to implement, inherits the efficient optimization properties of MERT, and can quickly boost the BLEU score on N-best re-ranking tasks. In this paper, we describe the general algorithm and present preliminary results on the IWSLT 2007 Arabic-English task.", "phrases": ["log-linear model", "boostedmert", "weak learner", "bleu score", "beam search"], "overall_score": 2.5846749527892827, "scores": [1.763640886292564, 1.1501231481726422, 0.8757311697331285, 0.5537822667930883, 0.5536888127347734], "rank_score": 0.9793932567452395} -{"id": "li-etal-2012-active-learning", "title": "Active Learning for Imbalanced Sentiment Classification", "abstract": "Active learning is a promising way for sentiment classification to reduce the annotation cost. In this paper, we focus on the imbalanced class distribution scenario for sentiment classification, wherein the number of positive samples is quite different from that of negative samples. This scenario posits new challenges to active learning. To address these challenges, we propose a novel active learning approach, named co-selecting, by taking both the imbalanced class distribution issue and uncertainty into account. Specifically, our co-selecting approach employs two feature subspace classifiers to collectively select most informative minority-class samples for manual annotation by leveraging a certainty measurement and an uncertainty measurement, and in the meanwhile, automatically label most informative majority-class samples, to reduce human-annotation efforts. Extensive experiments across four domains demonstrate great potential and effectiveness of our proposed co-selecting approach to active learning for imbalanced sentiment classification.", "phrases": ["imbalanced sentiment classification", "sample", "manual annotation", "active learning"], "overall_score": 1.5761213280473731, "scores": [1.8168090551553193, 1.00033242611984, 0.5595363843756769, 0.5405191032304557], "rank_score": 0.979299242220323} -{"id": "haghighi-klein-2007-unsupervised", "title": "Unsupervised Coreference Resolution in a Nonparametric Bayesian Model", "abstract": "We present an unsupervised, nonparametric Bayesian approach to coreference resolution which models both global entity identity across a corpus as well as the sequential anaphoric structure within each document. While most existing coreference work is driven by pairwise decisions, our model is fully generative, producing each mention from a combination of global entity properties and local attentional state. Despite being unsupervised, our system achieves a 70.3 MUC F1 measure on the MUC-6 test set, broadly in the range of some recent supervised results.", "phrases": ["nonparametric bayesian model", "unsupervised coreference resolution", "pronoun", "good result"], "overall_score": 3.026687018635195, "scores": [1.9622336679510841, 0.873864792122319, 0.5511346083814553, 0.5294870210370203], "rank_score": 0.9791800223729696} -{"id": "marge-rudnicky-2010-comparing", "title": "Comparing Spoken Language Route Instructions for Robots across Environment Representations", "abstract": "Spoken language interaction between humans and robots in natural environments will necessarily involve communication about space and distance. The current study examines people's close-range route instructions for robots and how the presentation format (schematic, virtual or natural) and the complexity of the route affect the content of instructions. We find that people have a general preference for providing metric-based instructions. At the same time, presentation format appears to have less impact on the formulation of these instructions. We conclude that understanding of spatial language requires handling both landmark-based and metric-based expressions.", "phrases": ["route instruction", "robot", "communication"], "overall_score": 1.5759072576701716, "scores": [1.8176325617575047, 0.5896870928978201, 0.5301790437942154], "rank_score": 0.9791662328165134} -{"id": "lin-etal-2009-recognizing", "title": "Recognizing Implicit Discourse Relations in the Penn Discourse Treebank", "abstract": "We present an implicit discourse relation classifier in the Penn Discourse Treebank (PDTB). Our classifier considers the context of the two arguments, word pair information, as well as the arguments' internal constituent and dependency parses. Our results on the PDTB yields a significant 14.1% improvement over the baseline. In our error analysis, we discuss four challenges in recognizing implicit relations in the PDTB.", "phrases": ["penn discourse treebank", "implicit relation", "cross-argument word pair"], "overall_score": 3.941448609204895, "scores": [0.9283697645801513, 1.437803262148702, 0.571295978732184], "rank_score": 0.9791563351536791} -{"id": "shen-etal-2018-baseline", "title": "Baseline Needs More Love: On Simple Word-Embedding-Based Models and Associated Pooling Mechanisms", "abstract": "Many deep learning architectures have been proposed to model the compositionality in text sequences, requiring substantial number of parameters and expensive computations. However, there has not been a rigorous evaluation regarding the added value of sophisticated compositional functions. In this paper, we conduct a point-by-point comparative study between Simple Word-Embedding-based Models (SWEMs), consisting of parameter-free pooling operations, relative to word-embedding-based RNN/CNN models. Surprisingly, SWEMs exhibit comparable or even superior performance in the majority of cases considered. Based upon this understanding, we propose two additional pooling strategies over learned word embeddings: (i) a max-pooling operation for improved interpretability; and (ii) a hierarchical pooling operation, which preserves spatial (n-gram) information within text sequences. We present experiments on 17 datasets encompassing three tasks: (i) (long) document classification; (ii) text sequence matching; and (iii) short text tasks, including classification and tagging.", "phrases": ["simple", "superior performance", "word embedding", "hierarchical pooling operation", "text sequence matching"], "overall_score": 3.0265719424468855, "scores": [1.6673216524922054, 1.5992927832976405, 0.5547988998668975, 0.5425619130949044, 0.5317387184871633], "rank_score": 0.9791427934477621} -{"id": "romeo-etal-2014-choosing", "title": "Choosing which to use? A study of distributional models for nominal lexical semantic classification", "abstract": "This paper empirically evaluates the performances of different state-of-the-art distributional models in a nominal lexical semantic classification task. We consider models that exploit various types of distributional features, which thereby provide different representations of nominal behavior in context. The experiments presented in this work demonstrate the advantages and disadvantages of each model considered. This analysis also considers a combined strategy that we found to be capable of leveraging the bottlenecks of each model, especially when large robust data is not available.", "phrases": ["distributional model", "classification task", "bottleneck"], "overall_score": 1.356517682939087, "scores": [1.8522952747685748, 0.5588718772696036, 0.5243948490434336], "rank_score": 0.9785206670272041} -{"id": "he-etal-2018-unsupervised", "title": "Unsupervised Learning of Syntactic Structure with Invertible Neural Projections", "abstract": "Unsupervised learning of syntactic structure is typically performed using generative models with discrete latent variables and multinomial parameters. In most cases, these models have not leveraged continuous word representations. In this work, we propose a novel generative model that jointly learns discrete syntactic structure and continuous word representations in an unsupervised fashion by cascading an invertible neural network with a structured generative prior. We show that the invertibility condition allows for efficient exact inference and marginal likelihood computation in our model so long as the prior is well-behaved. In experiments we instantiate our approach with both Markov and tree-structured priors, evaluating on two tasks: part-of-speech (POS) induction, and unsupervised dependency parsing without gold POS annotation. On the Penn Treebank, our Markov-structured model surpasses state-of-the-art results on POS induction. Similarly, we find that our tree-structured model achieves state-of-the-art performance on unsupervised dependency parsing for the difficult training condition where neither gold POS annotation nor punctuation-based constraints are available.", "phrases": ["syntactic structure", "part-of-speech", "induction", "unsupervised learning"], "overall_score": 2.345119773841912, "scores": [1.9776097202744098, 0.8577079612222194, 0.5456957639726074, 0.530950184377084], "rank_score": 0.9779909074615802} -{"id": "derczynski-etal-2013-twitter", "title": "Twitter Part-of-Speech Tagging for All: Overcoming Sparse and Noisy Data", "abstract": "Part-of-speech information is a pre-requisite in many NLP algorithms. However, Twitter text is difficult to part-of-speech tag: it is noisy, with linguistic errors and idiosyncratic style. We present a detailed error analysis of existing taggers, motivating a series of tagger augmentations which are demonstrated to improve performance. We identify and evaluate techniques for improving English part-of-speech tagging performance in this genre. Further, we present a novel approach to system combination for the case where available taggers use different tagsets, based on voteconstrained bootstrapping with unlabeled data. Coupled with assigning prior probabilities to some tokens and handling of unknown words and slang, we reach 88.7% tagging accuracy (90.5% on development data). This is a new high in PTB-compatible tweet part-of-speech tagging, reducing token error by 26.8% and sentence error by 12.2%. The model, training data and tools are made available.", "phrases": ["probability", "pos tagger", "english tweet", "gate twitie"], "overall_score": 2.8796057439256013, "scores": [1.6254599773976455, 0.9095017505918309, 0.8381225347262636, 0.5388402353641044], "rank_score": 0.977981124519961} -{"id": "bentivogli-etal-2016-neural", "title": "Neural versus Phrase-Based Machine Translation Quality: a Case Study", "abstract": "Within the field of Statistical Machine Translation (SMT), the neural approach (NMT) has recently emerged as the first technology able to challenge the long-standing dominance of phrase-based approaches (PBMT). In particular, at the IWSLT 2015 evaluation campaign, NMT outperformed well established state-of-the-art PBMT systems on English-German, a language pair known to be particularly hard because of morphology and syntactic differences. To understand in what respects NMT provides better translation quality than PBMT, we perform a detailed analysis of neural versus phrase-based SMT outputs, leveraging high quality post-edits performed by professional translators on the IWSLT data. For the first time, our analysis provides useful insights on what linguistic phenomena are best modeled by neural models -- such as the reordering of verbs -- while pointing out other aspects that remain to be improved.", "phrases": ["machine translation", "phrase-based smt", "post-editing", "professional translator", "negation"], "overall_score": 3.7859136004328904, "scores": [1.8344054965555214, 0.8549909961800454, 0.8351838812763374, 0.8241899764618092, 0.5410733824728754], "rank_score": 0.9779687465893179} -{"id": "yu-etal-2016-retrofitting", "title": "Retrofitting Word Vectors of MeSH Terms to Improve Semantic Similarity Measures", "abstract": "Estimation of the semantic relatedness be-tween biomedical concepts has utility for many informatics applications. Automated methods fall into two broad categories: meth-ods based on distributional statistics drawn from text corpora, and methods based on the structure of existing knowledge resources. In the former case, taxonomic structure is disre-garded. In the latter, semantically relevant empirical information is not considered. In this paper, we present a method that retro\ufb01ts the context vector representation of MeSH terms by using additional linkage information from UMLS/MeSH hierarchy such that linked concepts have similar vector representations. We evaluated the method relative to previously published physician and coder\u2019s ratings on sets of MeSH terms. Our experimental re-sults demonstrate that the retro\ufb01tted word vector measures obtain a higher correlation with physician judgments. The results also demonstrate a clear improvement on the correlation with experts\u2019 ratings from the retro\ufb01tted vector representation in comparison to the vector representation without retro\ufb01tting.", "phrases": ["mesh term", "biomedical concept", "additional linkage information"], "overall_score": 1.5739732445235686, "scores": [1.859161234871337, 0.542652640376982, 0.5320798134055754], "rank_score": 0.9779645628846314} -{"id": "virga-khudanpur-2003-transliteration", "title": "Transliteration of Proper Names in Cross-Lingual Information Retrieval", "abstract": "We address the problem of transliterating English names using Chinese orthography in support of cross-lingual speech and text processing applications. We demonstrate the application of statistical machine translation techniques to \"translate\" the phonemic representation of an English name, obtained by using an automatic text-to-speech system, to a sequence of initials and finals, commonly used sub-word units of pronunciation for Chinese. We then use another statistical translation model to map the initial/final sequence to Chinese characters. We also present an evaluation of this module in retrieval of Mandarin spoken documents from the TDT corpus using English text queries.", "phrases": ["name", "sub-word unit", "mandarin", "text query", "transliteration"], "overall_score": 3.3579832442674973, "scores": [1.8498348359499452, 1.3877033135107408, 0.5820231577645387, 0.5456144151481944, 0.524159974453902], "rank_score": 0.9778671393654642} -{"id": "shardlow-2013-cw", "title": "The CW Corpus: A New Resource for Evaluating the Identification of Complex Words", "abstract": "The task of identifying complex words (CWs) is important for lexical simplification, however it is often carried out with no evaluation of success. There is no basis for comparison of current techniques and, prior to this work, there has been no standard corpus or evaluation technique for the CW identification task. This paper addresses these shortcomings with a new corpus for evaluating a system\u2019s performance in identifying CWs. Simple Wikipedia edit histories were mined for instances of single word lexical simplifications. The corpus contains 731 sentences, each with one annotated CW. This paper describes the method used to produce the CW corpus and presents the results of evaluation, showing its validity.", "phrases": ["identification", "complex word", "simple wikipedia", "edit history"], "overall_score": 2.032841659694089, "scores": [1.9323590278167306, 0.8800427374932471, 0.5575942173432598, 0.540364792484299], "rank_score": 0.977590193784384} -{"id": "holla-etal-2020-learning", "title": "Learning to Learn to Disambiguate: Meta-Learning for Few-Shot Word Sense Disambiguation", "abstract": "The success of deep learning methods hinges on the availability of large training datasets annotated for the task of interest. In contrast to human intelligence, these methods lack versatility and struggle to learn and adapt quickly to new tasks, where labeled data is scarce. Meta-learning aims to solve this problem by training a model on a large number of few-shot tasks, with an objective to learn new tasks quickly from a small number of examples. In this paper, we propose a meta-learning framework for few-shot word sense disambiguation (WSD), where the goal is to learn to disambiguate unseen words from only a few labeled instances. Meta-learning approaches have so far been typically tested in an N-way, K-shot classification setting where each task has N classes with K examples per class. Owing to its nature, WSD deviates from this controlled setup and requires the models to handle a large number of highly unbalanced classes. We extend several popular meta-learning approaches to this scenario, and analyze their strengths and weaknesses in this new challenging setting.", "phrases": ["word sense disambiguation", "meta-learning approach", "n-way"], "overall_score": 1.9020923581788063, "scores": [1.8408678110153485, 0.5472468451889285, 0.5443316735024992], "rank_score": 0.9774821099022587} -{"id": "farajian-etal-2017-multi", "title": "Multi-Domain Neural Machine Translation through Unsupervised Adaptation", "abstract": "We investigate the application of Neural Machine Translation (NMT) under the following three conditions posed by real-world application scenarios. First, we operate with an input stream of sentences coming from many different domains and with no prede\ufb01ned order. Second, the sentences are presented without domain information. Third, the input stream should be processed by a single generic NMT model. To tackle the weaknesses of current NMT technology in this unsupervised multi-domain setting, we explore an ef-\ufb01cient instance-based adaptation method that, by exploiting the similarity between the training instances and each test sentence, dynamically sets the hyperparameters of the learning algorithm and updates the generic model on-the-\ufb02y. The results of our experiments with multi-domain data show that local adaptation outperforms not only the original generic NMT system, but also a strong phrase-based system and even single-domain NMT models speci\ufb01-cally optimized on each domain and applicable only by violating two of our afore-mentioned assumptions.", "phrases": ["neural machine translation", "adaptation", "learning algorithm", "n-gram", "similar sentence"], "overall_score": 3.256416234529662, "scores": [0.9645169639090673, 1.4911802824601317, 0.9395555506983037, 0.9200340045201149, 0.5709922737000561], "rank_score": 0.9772558150575348} -{"id": "tatu-srikanth-2008-experiments", "title": "Experiments with Reasoning for Temporal Relations between Events", "abstract": "Few attempts have been made to investigate the utility of temporal reasoning within machine learning frameworks for temporal relation classification between events in news articles. This paper presents three settings where temporal reasoning aids machine learned classifiers of temporal relations: (1) expansion of the dataset used for learning; (2) detection of inconsistencies among the automatically identified relations; and (3) selection among multiple temporal relations. Feature engineering is another effort in our work to improve classification accuracy.", "phrases": ["reasoning", "temporal information processing", "summarization"], "overall_score": 1.9015829612366315, "scores": [1.8075514160498358, 0.6008408471521474, 0.5232687317722176], "rank_score": 0.9772203316580669} -{"id": "melville-sindhwani-2009-active", "title": "Active Dual Supervision: Reducing the Cost of Annotating Examples and Features", "abstract": "When faced with the task of building machine learning or NLP models, it is often worthwhile to turn to active learning to obtain human annotations at minimal costs. Traditional active learning schemes query a human for labels of intelligently chosen examples. However, human effort can also be expended in collecting alternative forms of annotations. For example, one may attempt to learn a text classifier by labeling class-indicating words, instead of, or in addition to, documents. Learning from two different kinds of supervision brings a new, unexplored dimension to the problem of active learning. In this paper, we demonstrate the value of such active dual supervision in the context of sentiment analysis. We show how interleaving queries for both documents and words significantly reduces human effort -- more than what is possible through traditional one-dimensional active learning, or by passive combinations of supervisory inputs.", "phrases": ["cost", "annotator", "active dual supervision"], "overall_score": 2.146905418593386, "scores": [0.9326964767746972, 0.8179309889161381, 1.18066882610128], "rank_score": 0.9770987639307052} -{"id": "koller-etal-2008-regular", "title": "Regular Tree Grammars as a Formalism for Scope Underspecification", "abstract": "We propose the use of regular tree grammars (RTGs) as a formalism for the underspecified processing of scope ambiguities. By applying standard results on RTGs, we obtain a novel algorithm for eliminating equivalent readings and the first efficient algorithm for computing the best reading of a scope ambiguity. We also show how to derive RTGs from more traditional underspecified descriptions.", "phrases": ["scope underspecification", "reading", "regular tree grammars"], "overall_score": 1.9009251054344014, "scores": [0.9365102451756935, 0.9051549832507202, 1.0889815535289338], "rank_score": 0.9768822606517825} -{"id": "su-etal-2018-natural", "title": "Natural Language Generation by Hierarchical Decoding with Linguistic Patterns", "abstract": "Natural language generation (NLG) is a critical component in spoken dialogue systems. Classic NLG can be divided into two phases: (1) sentence planning: deciding on the overall sentence structure, (2) surface realization: determining specific word forms and flattening the sentence structure into a string. Many simple NLG models are based on recurrent neural networks (RNN) and sequence-to-sequence (seq2seq) model, which basically contains a encoder-decoder structure; these NLG models generate sentences from scratch by jointly optimizing sentence planning and surface realization using a simple cross entropy loss training criterion. However, the simple encoder-decoder architecture usually suffers from generating complex and long sentences, because the decoder has to learn all grammar and diction knowledge. This paper introduces a hierarchical decoding NLG model based on linguistic patterns in different levels, and shows that the proposed method outperforms the traditional one with a smaller model size. Furthermore, the design of the hierarchical decoding is flexible and easily-extendible in various NLG systems.", "phrases": ["hierarchical decoding", "nlg", "sequence-to-sequence", "natural language generation"], "overall_score": 2.24896421205147, "scores": [1.8366930463910063, 0.978892962037457, 0.5483516853425353, 0.5429132953963937], "rank_score": 0.9767127472918481} -{"id": "ganchev-das-2013-cross", "title": "Cross-Lingual Discriminative Learning of Sequence Models with Posterior Regularization", "abstract": "We present a framework for cross-lingual transfer of sequence information from a resource-rich source language to a resource-impoverished target language that incorporates soft constraints via posterior regularization. To this end, we use automatically word aligned bitext between the source and target language pair, and learn a discriminative conditional random \ufb01eld model on the target side. Our posterior regularization constraints are derived from simple intuitions about the task at hand and from cross-lingual alignment information. We show improvements over strong baselines for two tasks: part-of-speech tagging and named-entity segmentation.", "phrases": ["posterior regularization", "lingual projection", "cross"], "overall_score": 2.1455368699168442, "scores": [1.8285066992711903, 0.579069191488915, 0.521851840998605], "rank_score": 0.9764759105862367} -{"id": "press-wolf-2017-using", "title": "Using the Output Embedding to Improve Language Models", "abstract": "We study the topmost weight matrix of neural network language models. We show that this matrix constitutes a valid word embedding. When training language models, we recommend tying the input embedding and this output embedding. We analyze the resulting update rules and show that the tied embedding evolves in a more similar way to the output embedding than to the input embedding in the untied model. We also offer a new method of regularizing the output embedding. Our methods lead to a significant reduction in perplexity, as we are able to show on a variety of neural network language models. Finally, we show that weight tying can reduce the size of neural translation models to less than half of their original size without harming their performance.", "phrases": ["output embedding", "language model", "perplexity"], "overall_score": 3.1424866880853126, "scores": [1.442531707589237, 0.9568199187135369, 0.5294535147411346], "rank_score": 0.9762683803479696} -{"id": "zhou-etal-2018-dataset", "title": "A Dataset for Document Grounded Conversations", "abstract": "This paper introduces a document grounded dataset for conversations. We define \u201cDocument Grounded Conversations\u201d as conversations that are about the contents of a specified document. In this dataset the specified documents were Wikipedia articles about popular movies. The dataset contains 4112 conversations with an average of 21.43 turns per conversation. This positions this dataset to not only provide a relevant chat history while generating responses but also provide a source of information that the models could use. We describe two neural architectures that provide benchmark performance on the task of generating the next response. We also evaluate our models for engagement and fluency, and find that the information from the document helps in generating more engaging and fluent responses.", "phrases": ["conversation", "wikipedia article", "popular movie", "knowledge grounding", "participant"], "overall_score": 3.17945029466474, "scores": [1.4213875334862691, 1.3502528508526495, 1.0411126661646883, 0.5353256686829654, 0.5312277371462086], "rank_score": 0.9758612912665562} -{"id": "ravi-knight-2011-deciphering", "title": "Deciphering Foreign Language", "abstract": "In this work, we tackle the task of machine translation (MT) without parallel training data. We frame the MT problem as a decipherment task, treating the foreign text as a cipher for English and present novel methods for training translation models from non-parallel text.", "phrases": ["foreign language", "decipherment", "monolingual data", "other work"], "overall_score": 3.4402824666401206, "scores": [0.9026536618304893, 1.4257889926019114, 1.0472526225848557, 0.5266651794645315], "rank_score": 0.975590114120447} -{"id": "lambert-banchs-2005-data", "title": "Data Inferred Multi-word Expressions for Statistical Machine Translation", "abstract": "This paper presents a strategy for detecting and using multi-word expressions in Statistical Machine Translation. Performance of the proposed strategy is evaluated in terms of alignment quality as well as translation accuracy. Evaluations are performed by using the Verbmobil corpus. Results from translation tasks from English-to-Spanish and from Spanish-to-English are presented and discussed.", "phrases": ["multi-word expression", "statistical machine translation", "mwes", "unit", "semantic perspective"], "overall_score": 2.5022839500857375, "scores": [0.9488705979150289, 1.2606493777373948, 1.0748553652871935, 1.0064271549246666, 0.587040302096808], "rank_score": 0.9755685595922184} -{"id": "gupta-etal-2021-vita", "title": "ViTA: Visual-Linguistic Translation by Aligning Object Tags", "abstract": "Multimodal Machine Translation (MMT) enriches the source text with visual information for translation. It has gained popularity in recent years, and several pipelines have been proposed in the same direction. Yet, the task lacks quality datasets to illustrate the contribution of visual modality in the translation systems. In this paper, we propose our system under the team name Volta for the Multimodal Translation Task of WAT 2021 from English to Hindi. We also participate in the textual-only subtask of the same language pair for which we use mBART, a pretrained multilingual sequence-to-sequence model. For multimodal translation, we propose to enhance the textual input by bringing the visual information to a textual domain by extracting object tags from the image. We also explore the robustness of our system by systematically degrading the source text. Finally, we achieve a BLEU score of 44.6 and 51.6 on the test set and challenge set of the multimodal task.", "phrases": ["object tag", "visual information", "textual domain"], "overall_score": 1.0715635923739073, "scores": [1.7903740741378393, 0.5742892766892913, 0.5614742959845722], "rank_score": 0.9753792156039008} -{"id": "small-etal-2003-hitiqa", "title": "HITIQA: An Interactive Question Answering System: A Preliminary Report", "abstract": "HITIQA is an interactive question answering technology designed to allow intelligence analysts and other users of information systems to pose questions in natural language and obtain relevant answers, or the assistance they require in order to perform their tasks. Our objective in HITIQA is to allow the user to submit exploratory, analytical, non-factual questions, such as \"What has been Russia's reaction to U.S. bombing of Kosovo?\" The distinguishing property of such questions is that one cannot generally anticipate what might constitute the answer. While certain types of things may be expected (e.g., diplomatic statements), the answer is heavily conditioned by what information is in fact available on the topic. From a practical viewpoint, analytical questions are often under-specified, thus casting a broad net on a space of possible answers. Therefore, clarification dialogue is often needed to negotiate with the user the exact scope and intent of the question.", "phrases": ["interactive question", "objective", "hitiqa"], "overall_score": 1.3521344228209973, "scores": [1.7742101377741428, 0.6264841945540232, 0.5253821073005044], "rank_score": 0.9753588132095569} -{"id": "dasgupta-etal-2013-summarization", "title": "Summarization Through Submodularity and Dispersion", "abstract": "We propose a new optimization framework for summarization by generalizing the submodular framework of (Lin and Bilmes, 2011). In our framework the summarization desideratum is expressed as a sum of a submodular function and a nonsubmodular function, which we call dispersion; the latter uses inter-sentence dissimilarities in different ways in order to ensure non-redundancy of the summary. We consider three natural dispersion functions and show that a greedy algorithm can obtain an approximately optimal summary in all three cases. We conduct experiments on two corpora\u2014DUC 2004 and user comments on news articles\u2014and show that the performance of our algorithm outperforms those that rely only on submodularity.", "phrases": ["submodularity", "function", "greedy algorithm", "summarization"], "overall_score": 1.7467027032346438, "scores": [1.9329080322181875, 0.9029239845111059, 0.5387583907621938, 0.5248229529126907], "rank_score": 0.9748533401010444} -{"id": "zhu-etal-2021-hitsz", "title": "HITSZ-HLT at SemEval-2021 Task 5: Ensemble Sequence Labeling and Span Boundary Detection for Toxic Span Detection", "abstract": "This paper presents the winning system that participated in SemEval-2021 Task 5: Toxic Spans Detection. This task aims to locate those spans that attribute to the text's toxicity within a text, which is crucial for semi-automated moderation in online discussions. We formalize this task as the Sequence Labeling (SL) problem and the Span Boundary Detection (SBD) problem separately and employ three state-of-the-art models. Next, we integrate predictions of these models to produce a more credible and complement result. Our system achieves a char-level score of 70.83%, ranking 1/91. In addition, we also explore the lexicon-based method, which is strongly interpretable and flexible in practice.", "phrases": ["semeval-2021 task", "sequence labeling", "span boundary detection", "conditional random fields"], "overall_score": 1.5682773182355307, "scores": [1.7031299145251133, 0.8490366888432107, 0.8235061308220599, 0.5220292053984044], "rank_score": 0.974425484897197} -{"id": "rosti-etal-2008-incremental", "title": "Incremental Hypothesis Alignment for Building Confusion Networks with Application to Machine Translation System Combination", "abstract": "Confusion network decoding has been the most successful approach in combining outputs from multiple machine translation (MT) systems in the recent DARPA GALE and NIST Open MT evaluations. Due to the varying word order between outputs from different MT systems, the hypothesis alignment presents the biggest challenge in confusion network decoding. This paper describes an incremental alignment method to build confusion networks based on the translation edit rate (TER) algorithm. This new algorithm yields significant BLEU score improvements over other recent alignment methods on the GALE test sets and was used in BBN's submission to the WMT08 shared translation task.", "phrases": ["confusion network", "translation system", "system combination"], "overall_score": 2.5707646302608027, "scores": [1.7974787830535446, 0.5698624794123988, 0.5550256767573001], "rank_score": 0.9741223130744144} -{"id": "smith-etal-2005-logarithmic", "title": "Logarithmic Opinion Pools for Conditional Random Fields", "abstract": "Recent work on Conditional Random Fields (CRFs) has demonstrated the need for regularisation to counter the tendency of these models to overfit. The standard approach to regularising CRFs involves a prior distribution over the model parameters, typically requiring search over a hyperparameter space. In this paper we address the overfitting problem from a different perspective, by factoring the CRF distribution into a weighted product of individual \"expert\" CRF distributions. We call this model a logarithmic opinion pool (LOP) of CRFs (LOP-CRFs). We apply the LOP-CRF to two sequencing tasks. Our results show that unregularised expert CRFs with an unregularised CRF under a LOP can outperform the unregularised CRF, and attain a performance level close to the regularised CRF. LOP-CRFs therefore provide a viable alternative to CRF regularisation without the need for hyperparameter search.", "phrases": ["conditional random fields", "crfs", "lop", "logarithmic opinion pool"], "overall_score": 2.1399192285006454, "scores": [0.9420653700651459, 1.527221223240033, 0.8592671930345708, 0.5671230608489013], "rank_score": 0.9739192117971629} -{"id": "tsarfaty-etal-2013-parsing", "title": "Parsing Morphologically Rich Languages: Introduction to the Special Issue", "abstract": "Parsing is a key task in natural language processing. It involves predicting, for each natural language sentence, an abstract representation of the grammatical entities in the sentence and the relations between these entities. This representation provides an interface to compositional semantics and to the notions of \u201cwho did what to whom.\u201d The last two decades have seen great advances in parsing English, leading to major leaps also in the performance of applications that use parsers as part of their backbone, such as systems for information extraction, sentiment analysis, text summarization, and machine translation. Attempts to replicate the success of parsing English for other languages have often yielded unsatisfactory results. In particular, parsing languages with complex word structure and flexible word order has been shown to require non-trivial adaptation. This special issue reports on methods that successfully address the challenges involved in parsing a range of morphologically rich languages (MRLs). This introduction characterizes MRLs, describes the challenges in parsing MRLs, and outlines the contributions of the articles in the special issue. These contributions present up-to-date research efforts that address parsing in varied, cross-lingual settings. They show that parsing MRLs addresses challenges that transcend particular representational and algorithmic choices.", "phrases": ["rich language", "special issue", "syntactic analysis"], "overall_score": 2.4979036462354145, "scores": [1.7283412915056628, 0.6043657838459465, 0.5888753398749322], "rank_score": 0.973860805075514} -{"id": "nawaz-etal-2010-evaluating", "title": "Evaluating a meta-knowledge annotation scheme for bio-events", "abstract": "The correct interpretation of biomedical texts by text mining systems requires the recognition of a range of types of high-level information (or meta-knowledge) about the text. Examples include expressions of negation and speculation, as well as pragmatic/rhetorical intent (e.g. whether the information expressed represents a hypothesis, generally accepted knowledge, new experimental knowledge, etc.) Although such types of information have previously been annotated at the text-span level (most commonly sentences), annotation at the level of the event is currently quite sparse. In this paper, we focus on the evaluation of the multi-dimensional annotation scheme that we have developed specifically for enriching bio-events with meta-knowledge information. Our annotation scheme is intended to be general enough to allow integration with different types of bio-event annotation, whilst being detailed enough to capture important subtleties in the nature of the meta-knowledge expressed in the text. To our knowledge, our scheme is unique within the field with regards to the diversity of meta-knowledge aspects annotated for each event, whilst the evaluation results have confirmed its feasibility and soundness.", "phrases": ["annotation scheme", "bio-event", "meta-knowledge aspect"], "overall_score": 1.069864446917655, "scores": [1.7530729444588897, 0.6051420474629579, 0.5632827683517785], "rank_score": 0.9738325867578753} -{"id": "qu-etal-2021-rocketqa", "title": "RocketQA: An Optimized Training Approach to Dense Passage Retrieval for Open-Domain Question Answering", "abstract": "In open-domain question answering, dense passage retrieval has become a new paradigm to retrieve relevant passages for finding answers. Typically, the dual-encoder architecture is adopted to learn dense representations of questions and passages for semantic matching. However, it is difficult to effectively train a dual-encoder due to the challenges including the discrepancy between training and inference, the existence of unlabeled positives and limited training data. To address these challenges, we propose an optimized training approach, called RocketQA, to improving dense passage retrieval. We make three major technical contributions in RocketQA, namely cross-batch negatives, denoised hard negatives and data augmentation. The experiment results show that RocketQA significantly outperforms previous state-of-the-art models on both MSMARCO and Natural Questions. We also conduct extensive experiments to examine the effectiveness of the three strategies in RocketQA. Besides, we demonstrate that the performance of end-to-end QA can be improved based on our RocketQA retriever.", "phrases": ["retriever", "open-domain question answering", "pre-trained language model"], "overall_score": 3.209155551712556, "scores": [0.9162406300713021, 1.4682154813764663, 0.5366431560682906], "rank_score": 0.9736997558386863} -{"id": "belz-varges-2007-generation", "title": "Generation of repeated references to discourse entities", "abstract": "Generation of Referring Expressions is a thriving subfield of Natural Language Generation which has traditionally focused on the task of selecting a set of attributes that unambiguously identify a given referent. In this paper, we address the complementary problem of generating repeated, potentially different referential expressions that refer to the same entity in the context of a piece of discourse longer than a sentence. We describe a corpus of short encyclopaedic texts we have compiled and annotated for reference to the main subject of the text, and report results for our experiments in which we set human subjects and automatic methods the task of selecting a referential expression from a wide range of choices in a full-text context. We find that our human subjects agree on choice of expression to a considerable degree, with three identical expressions selected in 50% of cases. We tested automatic selection strategies based on most frequent choice heuristics, involving different combinations of information about syntactic MSR type and domain type. We find that more information generally produces better results, achieving a best overall test set accuracy of 53.9% when both syntactic MSR type and domain type are known.", "phrases": ["reference", "wikipedia article", "subdomain"], "overall_score": 2.2415230284736323, "scores": [1.837867014772366, 0.5453445056154926, 0.537231726587634], "rank_score": 0.9734810823251641} -{"id": "kanayama-nasukawa-2006-fully", "title": "Fully Automatic Lexicon Expansion for Domain-oriented Sentiment Analysis", "abstract": "This paper proposes an unsupervised lexicon building method for the detection of polar clauses, which convey positive or negative aspects in a specific domain. The lexical entries to be acquired are called polar atoms, the minimum human-understandable syntactic structures that specify the polarity of clauses. As a clue to obtain candidate polar atoms, we use context coherency, the tendency for same polarities to appear successively in contexts. Using the overall density and precision of coherency in the corpus, the statistical estimation picks up appropriate polar atoms among candidates, without any manual tuning of the threshold values. The experimental results show that the precision of polarity assignment with the automatically acquired lexicon was 94% on average, and our method is robust for corpora in diverse domains and for the size of the initial lexicon.", "phrases": ["sentiment analysis", "polarity", "syntactic feature", "seed word", "co-occurrence"], "overall_score": 3.2418576565705024, "scores": [0.9189275581922315, 1.7169435567598628, 1.1162526210853154, 0.5703235524852525, 0.5419865219940413], "rank_score": 0.9728867621033407} -{"id": "li-etal-2016-visualizing", "title": "Visualizing and Understanding Neural Models in NLP", "abstract": "While neural networks have been successfully applied to many NLP tasks the resulting vector-based models are very difficult to interpret. For example it's not clear how they achieve {\\em compositionality}, building sentence meaning from the meanings of words and phrases. In this paper we describe four strategies for visualizing compositionality in neural models for NLP, inspired by similar work in computer vision. We first plot unit values to visualize compositionality of negation, intensification, and concessive clauses, allow us to see well-known markedness asymmetries in negation. We then introduce three simple and straightforward methods for visualizing a unit's {\\em salience}, the amount it contributes to the final composed meaning: (1) gradient back-propagation, (2) the variance of a token from the average word node, (3) LSTM-style gates that measure information flow. We test our methods on sentiment using simple recurrent nets and LSTMs. Our general-purpose methods may have wide applications for understanding compositionality and other semantic properties of deep networks , and also shed light on why LSTMs outperform simple recurrent nets,", "phrases": ["negation", "rnn", "recurrent network", "saliency method", "visualization"], "overall_score": 3.8622146473211028, "scores": [1.311766048667313, 1.2379206887467415, 0.9207059193773846, 0.8298770570035712, 0.5636228304004038], "rank_score": 0.972778508839083} -{"id": "neubig-etal-2011-pointwise", "title": "Pointwise Prediction for Robust, Adaptable Japanese Morphological Analysis", "abstract": "We present a pointwise approach to Japanese morphological analysis (MA) that ignores structure information during learning and tagging. Despite the lack of structure, it is able to outperform the current state-of-the-art structured approach for Japanese MA, and achieves accuracy similar to that of structured predictors using the same feature set. We also find that the method is both robust to out-of-domain data, and can be easily adapted through the use of a combination of partial annotation and active learning.", "phrases": ["japanese morphological analysis", "pointwise prediction", "pos tagging", "state-of-the-art model", "rich character-level"], "overall_score": 2.6967769468495093, "scores": [2.0121045364241565, 0.89638951314715, 0.827339810636294, 0.5787710226388446, 0.5486785266578889], "rank_score": 0.9726566819008667} -{"id": "kober-etal-2020-aspectuality", "title": "Aspectuality Across Genre: A Distributional Semantics Approach", "abstract": "The interpretation of the lexical aspect of verbs in English plays a crucial role in tasks such as recognizing textual entailment and learning discourse-level inferences. We show that two elementary dimensions of aspectual class, states vs. events, and telic vs. atelic events, can be modelled effectively with distributional semantics. We find that a verb's local context is most indicative of its aspectual class, and we demonstrate that closed class words tend to be stronger discriminating contexts than content words. Our approach outperforms previous work on three datasets. Further, we present a new dataset of human-human conversations annotated with lexical aspects and present experiments that show the correlation of telicity with genre and discourse goals.", "phrases": ["genre", "distributional semantic", "telicity"], "overall_score": 1.3481834756983793, "scores": [1.7579409057189455, 0.5836993814842356, 0.575886134694566], "rank_score": 0.972508807299249} -{"id": "sil-etal-2012-linking", "title": "Linking Named Entities to Any Database", "abstract": "Existing techniques for disambiguating named entities in text mostly focus on Wikipedia as a target catalog of entities. Yet for many types of entities, such as restaurants and cult movies, relational databases exist that contain far more extensive information than Wikipedia. This paper introduces a new task, called Open-Database Named-Entity Disambiguation (Open-DB NED), in which a system must be able to resolve named entities to symbols in an arbitrary database, without requiring labeled data for each new database. We introduce two techniques for Open-DB NED, one based on distant supervision and the other based on domain adaptation. In experiments on two domains, one with poor coverage by Wikipedia and the other with near-perfect coverage, our Open-DB NED strategies outperform a state-of-the-art Wikipedia NED system by over 25% in accuracy.", "phrases": ["database", "wikipedia", "mention"], "overall_score": 2.4158869497701625, "scores": [1.781711800969623, 0.567820702417737, 0.5671407848944438], "rank_score": 0.9722244294272678} -{"id": "pantel-etal-2009-web", "title": "Web-Scale Distributional Similarity and Entity Set Expansion", "abstract": "Computing the pairwise semantic similarity between all words on the Web is a computationally challenging task. Parallelization and optimizations are necessary. We propose a highly scalable implementation based on distributional similarity, implemented in the MapReduce framework and deployed over a 200 billion word crawl of the Web. The pairwise similarity between 500 million terms is computed in 50 hours using 200 quad-core nodes. We apply the learned similarity matrix to the task of automatic set expansion and present a large empirical study to quantify the effect on expansion performance of corpus size, corpus quality, seed composition and seed size. We make public an experimental testbed for set expansion analysis that includes a large collection of diverse entity sets extracted from Wikipedia.", "phrases": ["distributional similarity", "mapreduce framework", "quad-core node", "node", "memory"], "overall_score": 3.089191513495839, "scores": [1.423994910208364, 0.8944364123726052, 0.8716715231595689, 0.8454545330357495, 0.8246364354323642], "rank_score": 0.9720387628417303} -{"id": "yu-etal-2020-named", "title": "Named Entity Recognition as Dependency Parsing", "abstract": "Named Entity Recognition (NER) is a fundamental task in Natural Language Processing, concerned with identifying spans of text expressing references to entities. NER research is often focused on flat entities only (flat NER), ignoring the fact that entity references can be nested, as in [Bank of [China]] (Finkel and Manning, 2009). In this paper, we use ideas from graph-based dependency parsing to provide our model a global view on the input via a biaffine model (Dozat and Manning, 2017). The biaffine model scores pairs of start and end tokens in a sentence which we use to explore all spans, so that the model is able to predict named entities accurately. We show that the model works well for both nested and flat NER through evaluation on 8 corpora and achieving SoTA performance on all of them, with accuracy gains of up to 2.2 percentage points.", "phrases": ["entity recognition", "span", "graph-based dependency", "biaffine model", "language model"], "overall_score": 2.5649280799326477, "scores": [0.955646574892548, 1.284031925438048, 1.060562595806229, 0.9469384943112644, 0.6123739538217922], "rank_score": 0.9719107088539763} -{"id": "khot-etal-2017-answering", "title": "Answering Complex Questions Using Open Information Extraction", "abstract": "While there has been substantial progress in factoid question-answering (QA), answering complex questions remains challenging, typically requiring both a large body of knowledge and inference techniques. Open Information Extraction (Open IE) provides a way to generate semi-structured knowledge for QA, but to date such knowledge has only been used to answer simple questions with retrieval-based methods. We overcome this limitation by presenting a method for reasoning with Open IE knowledge, allowing more complex questions to be handled. Using a recently proposed support graph optimization framework for QA, we develop a new inference model for Open IE, in particular one that can work effectively with multiple short facts, noise, and the relational structure of tuples. Our model significantly outperforms a state-of-the-art structured solver on complex questions of varying difficulty, while also removing the reliance on manually curated knowledge.", "phrases": ["open information extraction", "knowledge representation", "oie task"], "overall_score": 2.330492355956495, "scores": [1.8268476373061004, 0.5657971452461154, 0.5230276186491732], "rank_score": 0.9718908004004629} -{"id": "goldwater-etal-2006-contextual", "title": "Contextual Dependencies in Unsupervised Word Segmentation", "abstract": "Developing better methods for segmenting continuous text into words is important for improving the processing of Asian languages, and may shed light on how humans learn to segment speech. We propose two new Bayesian word segmentation methods that assume unigram and bigram models of word dependencies respectively. The bigram model greatly outperforms the unigram model (and previous probabilistic models), demonstrating the importance of such dependencies for word segmentation. We also show that previous probabilistic models rely crucially on sub-optimal search procedures.", "phrases": ["word segmentation", "dirichlet process", "hdp"], "overall_score": 2.562611221162853, "scores": [1.32987417447929, 1.0387484794125383, 0.5444757381803702], "rank_score": 0.9710327973573994} -{"id": "sagae-lavie-2005-classifier", "title": "A Classifier-Based Parser with Linear Run-Time Complexity", "abstract": "We present a classifier-based parser that produces constituent trees in linear time. The parser uses a basic bottom-up shift-reduce algorithm, but employs a classifier to determine parser actions instead of a grammar. This can be seen as an extension of the deterministic dependency parser of Nivre and Scholz (2004) to full constituent parsing. We show that, with an appropriate feature set used in classification, a very simple one-path greedy parser can perform at the same level of accuracy as more complex parsers. We evaluate our parser on section 23 of the WSJ section of the Penn Treebank, and obtain precision and recall of 87.54% and 87.61%, respectively.", "phrases": ["classifier-based parser", "constituent", "linear time"], "overall_score": 2.9553710119557315, "scores": [0.973769845265056, 1.078791260211788, 0.8595911999260795], "rank_score": 0.9707174351343077} -{"id": "shen-etal-2019-towards", "title": "Towards Generating Long and Coherent Text with Multi-Level Latent Variable Models", "abstract": "Variational autoencoders (VAEs) have received much attention recently as an end-to-end architecture for text generation with latent variables. However, previous works typically focus on synthesizing relatively short sentences (up to 20 words), and the posterior collapse issue has been widely identified in text-VAEs. In this paper, we propose to leverage several multi-level structures to learn a VAE model for generating long, and coherent text. In particular, a hierarchy of stochastic layers between the encoder and decoder networks is employed to abstract more informative and semantic-rich latent codes. Besides, we utilize a multi-level decoder structure to capture the coherent long-term structure inherent in long-form texts, by generating intermediate sentence representations as high-level plan vectors. Extensive experimental results demonstrate that the proposed multi-level VAE model produces more coherent and less repetitive long text compared to baselines as well as can mitigate the posterior-collapse issue.", "phrases": ["coherent text", "latent variable", "variational autoencoder"], "overall_score": 2.234342232339624, "scores": [0.901144639071889, 1.1584302306648047, 0.8515126368287823], "rank_score": 0.970362502188492} -{"id": "manning-etal-2014-stanford", "title": "The Stanford CoreNLP Natural Language Processing Toolkit", "abstract": "We describe the design and use of the Stanford CoreNLP toolkit, an extensible pipeline that provides core natural language analysis. This toolkit is quite widely used, both in the research NLP community and also among commercial and government users of open source NLP technology. We suggest that this follows from a simple, approachable design, straightforward interfaces, the inclusion of robust and good quality analysis components, and not requiring use of a large amount of associated baggage.", "phrases": ["stanford corenlp", "tagging", "dependency parse", "meta language", "rule match"], "overall_score": 4.094393905290176, "scores": [2.186348152466993, 0.9113592896968081, 0.597140901001663, 0.5817746492938206, 0.5751206129233379], "rank_score": 0.9703487210765246} -{"id": "hasan-ney-2009-comparison", "title": "Comparison of Extended Lexicon Models in Search and Rescoring for SMT", "abstract": "We show how the integration of an extended lexicon model into the decoder can improve translation performance. The model is based on lexical triggers that capture long-distance dependencies on the sentence level. The results are compared to variants of the model that are applied in reranking of n-best lists. We present how a combined application of these models in search and rescoring gives promising results. Experiments are reported on the GALE Chinese-English task with improvements of up to +0.9% BLEU and -1.5% TER absolute on a competitive baseline.", "phrases": ["lexicon model", "search", "translation performance"], "overall_score": 1.5615986498417302, "scores": [1.7404746554792738, 0.6020666983299825, 0.5682860309141124], "rank_score": 0.9702757949077897} -{"id": "eger-etal-2017-neural", "title": "Neural End-to-End Learning for Computational Argumentation Mining", "abstract": "We investigate neural techniques for end-to-end computational argumentation mining (AM). We frame AM both as a token-based dependency parsing and as a token-based sequence tagging problem, including a multi-task learning setup. Contrary to models that operate on the argument component level, we find that framing AM as dependency parsing leads to subpar performance results. In contrast, less complex (local) tagging models based on BiLSTMs perform robustly across classification scenarios, being able to catch long-range dependencies inherent to the AM problem. Moreover, we find that jointly learning `natural' subtasks, in a multi-task learning setup, improves performance.", "phrases": ["computational argumentation mining", "neural end-to-end model", "raw text", "token level"], "overall_score": 3.529354616640672, "scores": [1.9321872020116944, 0.8993100706010444, 0.5252921649994647, 0.5241958770076932], "rank_score": 0.9702463286549742} -{"id": "elsner-charniak-2010-disentangling", "title": "Disentangling Chat", "abstract": "When multiple conversations occur simultaneously, a listener must decide which conversation each utterance is part of in order to interpret and respond to it appropriately. We refer to this task as disentanglement. We present a corpus of Internet Relay Chat dialogue in which the various conversations have been manually disentangled, and evaluate annotator reliability. We propose a graph-based clustering model for disentanglement, using lexical, timing, and discourse-based features. The model's predicted disentanglements are highly correlated with manual annotations. We conclude by discussing two extensions to the model, specificity tuning and conversation start detection, both of which are promising but do not currently yield practical improvements.", "phrases": ["chat", "discourse-based feature", "thread", "disentanglement", "coherence model"], "overall_score": 2.688868065018523, "scores": [1.5213968195405974, 1.3137343398955672, 0.8899560414098089, 0.5845703799180976, 0.5393631979945867], "rank_score": 0.9698041557517314} -{"id": "koo-etal-2007-structured", "title": "Structured Prediction Models via the Matrix-Tree Theorem", "abstract": "This paper provides an algorithmic framework for learning statistical models involving directed spanning trees, or equivalently non-projective dependency structures. We show how partition functions and marginals for directed spanning trees can be computed by an adaptation of Kirchhoff\u2019s Matrix-Tree Theorem. To demonstrate an application of the method, we perform experiments which use the algorithm in training both log-linear and max-margin dependency parsers. The new training methods give improvements in accuracy over perceptron-trained models.", "phrases": ["matrix-tree theorem", "dependency structure", "kirchhoff"], "overall_score": 2.0165489113518262, "scores": [1.4687624304224123, 0.850186159948974, 0.5903165237459317], "rank_score": 0.9697550380391059} -{"id": "siddiqua-etal-2019-tweet", "title": "Tweet Stance Detection Using an Attention based Neural Ensemble Model", "abstract": "Stance detection in twitter aims at mining user stances expressed in a tweet towards a single or multiple target entities. To tackle this problem, most of the prior studies have been explored the traditional deep learning models, e.g., LSTM and GRU. However, in compared to these traditional approaches, recently proposed densely connected Bi-LSTM and nested LSTMs architectures effectively address the vanishing-gradient and overfitting problems as well as dealing with long-term dependencies. In this paper, we propose a neural ensemble model that adopts the strengths of these two LSTM variants to learn better long-term dependencies, where each module coupled with an attention mechanism that amplifies the contribution of important elements in the final representation. We also employ a multi-kernel convolution on top of them to extract the higher-level tweet representations. Results of extensive experiments on single and multi-target stance detection datasets show that our proposed method achieves substantial improvement over the current state-of-the-art deep learning based methods.", "phrases": ["stance detection", "neural ensemble model", "tweet representation"], "overall_score": 1.737144666616143, "scores": [1.7985533491589725, 0.5869600687717649, 0.5230432769551421], "rank_score": 0.9695188982952931} -{"id": "williams-etal-2020-predicting", "title": "Predicting Declension Class from Form and Meaning", "abstract": "The noun lexica of many natural languages are divided into several declension classes with characteristic morphological properties. Class membership is far from deterministic, but the phonological form of a noun and/or its meaning can often provide imperfect clues. Here, we investigate the strength of those clues. More specifically, we operationalize this by measuring how much information, in bits, we can glean about declension class from knowing the form and/or meaning of nouns. We know that form and meaning are often also indicative of grammatical gender\u2014which, as we quantitatively verify, can itself share information with declension class\u2014so we also control for gender. We find for two Indo-European languages (Czech and German) that form and meaning respectively share significant amounts of information with class (and contribute additional information above and beyond gender). The three-way interaction between class, form, and meaning (given gender) is also significant. Our study is important for two reasons: First, we introduce a new method that provides additional quantitative support for a classic linguistic finding that form and meaning are relevant for the classification of nouns into declensions. Secondly, we show not only that individual declensions classes vary in the strength of their clues within a language, but also that these variations themselves vary across languages.", "phrases": ["declension class", "noun", "strength", "gender", "czech"], "overall_score": 1.3437336593185953, "scores": [2.0009122854345316, 0.901751385259336, 0.8581420745200392, 0.5642741167732053, 0.521414854449181], "rank_score": 0.9692989432872586} -{"id": "bauer-koller-2010-sentence", "title": "Sentence Generation as Planning with Probabilistic LTAG", "abstract": "We present PCRISP, a sentence generation system for probabilistic TAG grammars which performs sentence planning and surface realization in an integrated fashion, in the style of the SPUD system. PCRISP operates by converting the generation problem into a metric planning problem and solving it using an offthe-shelf planner. We evaluate PCRISP on the WSJ corpus and identify trade-offs between coverage, efficiency, and accuracy.", "phrases": ["planning", "pcrisp", "sentence generation"], "overall_score": 1.7363157571875403, "scores": [0.9593755653414389, 0.8676788523392407, 1.0801144077235452], "rank_score": 0.9690562751347415} -{"id": "forascu-2008-gmt", "title": "GMT to +2 or how can TimeML be used in Romanian", "abstract": "The paper describes the construction and usage of the Romanian version of the TimeBank corpus. The success rate of 96.53% for the automatic import of the temporal annotation from English to Romanian shows that the automatic transfer is a worth doing enterprise if temporality is to be studied in another language than the one for which TimeML, the annotation standard used, was developed. A preliminary study identifies the main situations that occurred during the automatic transfer, as well as temporal elements not (yet) marked in the English corpus.", "phrases": ["romanian", "temporal annotation", "preliminary study"], "overall_score": 1.064394742907165, "scores": [1.8190734363817482, 0.5534626968810343, 0.534025409565685], "rank_score": 0.9688538476094891} -{"id": "dou-etal-2021-gsum", "title": "GSum: A General Framework for Guided Neural Abstractive Summarization", "abstract": "Neural abstractive summarization models are flexible and can produce coherent summaries, but they are sometimes unfaithful and can be difficult to control. While previous studies attempt to provide different types of guidance to control the output and increase faithfulness, it is not clear how these strategies compare and contrast to each other. In this paper, we propose a general and extensible guided summarization framework (GSum) that can effectively take different kinds of external guidance as input, and we perform experiments across several different varieties. Experiments demonstrate that this model is effective, achieving state-of-the-art performance according to ROUGE on 4 popular summarization datasets when using highlighted sentences as guidance. In addition, we show that our guided model can generate more faithful summaries and demonstrate how different types of guidance generate qualitatively different summaries, lending a degree of controllability to the learned models.", "phrases": ["summarization", "highlighted sentence", "gsum"], "overall_score": 2.5561077315065317, "scores": [1.7229083481397185, 0.6150270657858997, 0.567770009168598], "rank_score": 0.9685684743647386} -{"id": "sanguinetti-bosco-2011-building", "title": "Building the multilingual TUT parallel treebank", "abstract": "The paper introduces an ongoing project for the development of a parallel treebank for Italian, English and French annotated in the pure dependency format of the Turin University Treebank, i.e. Parallel\u2010TUT. We hypothesize that the major features of this annotation format can be of some help in addressing the typical issues related to parallel corpora, e.g. alignment at various levels. Therefore, benefitting from the tools previously used for TUT, we applied the TUT format to a multilingual sample set of sentences from the JRCAcquis Multilingual Parallel Corpus and the whole text of the Universal Declaration of Human Rights.", "phrases": ["tut", "parallel treebank", "ongoing project", "pure dependency format"], "overall_score": 1.0640045419259974, "scores": [2.00832083266402, 0.794733187966003, 0.5449236318347435, 0.526017033015882], "rank_score": 0.9684986713701621} -{"id": "clark-curran-2007-formalism", "title": "Formalism-Independent Parser Evaluation with CCG and DepBank", "abstract": "A key question facing the parsing community is how to compare parsers which use different grammar formalisms and produce different output. Evaluating a parser on the same resource used to create it can lead to non-comparable accuracy scores and an over-optimistic view of parser performance. In this paper we evaluate a CCG parser on DepBank, and demonstrate the difficulties in converting the parser output into DepBank grammatical relations. In addition we present a method for measuring the effectiveness of the conversion, which provides an upper bound on parsing accuracy. The CCG parser obtains an F-score of 81.9% on labelled dependencies, against an upper bound of 84.8%. We compare the CCG parser against the RASP parser, outperforming RASP by over 5% overall and on the majority of dependency types.", "phrases": ["depbank", "ccg parser", "lexical category"], "overall_score": 2.1278805600991006, "scores": [1.7613858035662553, 0.6100939970542807, 0.5338407324477946], "rank_score": 0.9684401776894435} -{"id": "faruqui-kumar-2015-multilingual", "title": "Multilingual Open Relation Extraction Using Cross-lingual Projection", "abstract": "Open domain relation extraction systems identify relation and argument phrases in a sentence without relying on any underlying schema. However, current state-of-the-art relation extraction systems are available only for English because of their heavy reliance on linguistic tools such as part-of-speech taggers and dependency parsers. We present a cross-lingual annotation projection method for language independent relation extraction. We evaluate our method on a manually annotated test set and present results on three typologically different languages. We release these manual annotations and extracted relations in ten languages from Wikipedia.", "phrases": ["cross-lingual projection", "relation extraction system", "wikipedia", "source language"], "overall_score": 2.406446840409426, "scores": [1.8541767313303579, 0.8632017682332755, 0.6046866345165363, 0.5516366657274515], "rank_score": 0.9684254499519053} -{"id": "li-etal-2019-improving", "title": "Improving Relation Extraction with Knowledge-attention", "abstract": "While attention mechanisms have been proven to be effective in many NLP tasks, majority of them are data-driven. We propose a novel knowledge-attention encoder which incorporates prior knowledge from external lexical resources into deep neural networks for relation extraction task. Furthermore, we present three effective ways of integrating knowledge-attention with self-attention to maximize the utilization of both knowledge and data. The proposed relation extraction system is end-to-end and fully attention-based. Experiment results show that the proposed knowledge-attention mechanism has complementary strengths with self-attention, and our integrated models outperform existing CNN, RNN, and self-attention based models. State-of-the-art performance is achieved on TACRED, a complex and large-scale relation extraction dataset.", "phrases": ["relation extraction", "knowledge-attention", "knowledge basis"], "overall_score": 1.5580824972892362, "scores": [1.488804561642536, 0.8667435680898445, 0.5487251297426718], "rank_score": 0.9680910864916842} -{"id": "bollmann-etal-2017-learning", "title": "Learning attention for historical text normalization by learning to pronounce", "abstract": "Automated processing of historical texts often relies on pre-normalization to modern word forms. Training encoder-decoder architectures to solve such problems typically requires a lot of training data, which is not available for the named task. We address this problem by using several novel encoder-decoder architectures, including a multi-task learning (MTL) architecture using a grapheme-to-phoneme dictionary as auxiliary data, pushing the state-of-the-art by an absolute 2% increase in performance. We analyze the induced models across 44 different texts from Early New High German. Interestingly, we observe that, as previously conjectured, multi-task learning can learn to focus attention during decoding, in ways remarkably similar to recently proposed attention mechanisms. This, we believe, is an important step toward understanding how MTL works.", "phrases": ["historical text normalization", "grapheme-to-phoneme dictionary", "auxiliary task"], "overall_score": 1.8831456320930369, "scores": [1.8114063784375865, 0.5557879759747931, 0.5360419019079645], "rank_score": 0.9677454187734481} -{"id": "poesio-etal-2004-centering", "title": "Centering: A Parametric Theory and Its Instantiations", "abstract": "Centering theory is the best-known framework for theorizing about local coherence and salience; however, its claims are articulated in terms of notions which are only partially specified, such as utterance, realization, or ranking. A great deal of research has attempted to arrive at more detailed specifications of these parameters of the theory; as a result, the claims of centering can be instantiated in many different ways. We investigated in a systematic fashion the effect on the theory's claims of these different ways of setting the parameters. Doing this required, first of all, clarifying what the theory's claims are (one of our conclusions being that what has become known as Constraint 1 is actually a central claim of the theory). Secondly, we had to clearly identify these parametric aspects: For example, we argue that the notion of pronoun used in Rule 1 should be considered a parameter. Thirdly, we had to find appropriate methods for evaluating these claims. We found that while the theory's main claim about salience and pronominalization, Rule 1a preference for pronominalizing the backward-looking center (CB)is verified with most instantiations, Constraint 1a claim about (entity) coherence and CB uniquenessis much more instantiation-dependent: It is not verified if the parameters are instantiated according to very mainstream views (vanilla instantiation), it holds only if indirect realization is allowed, and is violated by between 20 and 25 of utterances in our corpus even with the most favorable instantiations. We also found a trade-off between Rule 1, on the one hand, and Constraint 1 and Rule 2, on the other: Setting the parameters to minimize the violations of local coherence leads to increased violations of salience, and vice versa. Our results suggest that entity coherencecontinuous reference to the same entitiesmust be supplemented at least by an account of relational coherence.", "phrases": ["instantiation", "centering theory", "salience"], "overall_score": 2.6815792535967575, "scores": [0.8678137657908002, 1.5058782427391024, 0.5278338096561747], "rank_score": 0.9671752727286925} -{"id": "cetinoglu-kuhn-2013-towards", "title": "Towards Joint Morphological Analysis and Dependency Parsing of Turkish", "abstract": "Turkish is an agglutinative language with rich morphology-syntax interactions. As an extension of this property, the Turkish Treebank is designed to represent sublexical dependencies, which brings extra challenges to parsing raw text. In this work, we use a joint POS tagging and parsing approach to parse Turkish raw text, and we show it outperforms a pipeline approach. Then we experiment with incorporating morphological feature prediction into the joint system. Our results show statistically significant improvements with the joint systems and achieve the state-ofthe-art accuracy for Turkish dependency parsing.", "phrases": ["dependency parsing", "agglutinative language", "joint pos tagging"], "overall_score": 1.5563637981422747, "scores": [1.8079540018155988, 0.5644191316912802, 0.5286964625021588], "rank_score": 0.9670231986696792} -{"id": "cahill-etal-2013-robust", "title": "Robust Systems for Preposition Error Correction Using Wikipedia Revisions", "abstract": "We show that existing methods for training preposition error correction systems, whether using well-edited text or error-annotated corpora, do not generalize across very different test sets. We present a new, large errorannotated corpus and use it to train systems that generalize across three different test sets, each from a different domain and with different error characteristics. This new corpus is automatically extracted from Wikipedia revisions and contains over one million instances of preposition corrections.", "phrases": ["preposition error", "wikipedia revision", "error-annotated corpus"], "overall_score": 2.4012455221322546, "scores": [1.3913175153643251, 0.9399567613337673, 0.5677225797879855], "rank_score": 0.9663322854953593} -{"id": "hershcovich-etal-2018-multitask", "title": "Multitask Parsing Across Semantic Representations", "abstract": "The ability to consolidate information of different types is at the core of intelligence, and has tremendous practical value in allowing learning for one task to benefit from generalizations learned for others. In this paper we tackle the challenging task of improving semantic parsing performance, taking UCCA parsing as a test case, and AMR, SDP and Universal Dependencies (UD) parsing as auxiliary tasks. We experiment on three languages, using a uniform transition-based system and learning architecture for all parsing tasks. Despite notable conceptual, formal and domain differences, we show that multitask learning significantly improves UCCA parsing in both in-domain and out-of-domain settings.", "phrases": ["ucca", "amr", "sdp", "formalism", "semantic graphbank"], "overall_score": 2.478484396651589, "scores": [1.2983700032302186, 1.188556315571229, 0.8722386007021043, 0.8537978414568886, 0.6184862293316717], "rank_score": 0.9662897980584224} -{"id": "hoang-kan-2010-towards", "title": "Towards Automated Related Work Summarization", "abstract": "We introduce the novel problem of automatic related work summarization. Given multiple articles (e.g., conference/journal papers) as input, a related work summarization system creates a topic-biased summary of related work specific to the target paper. Our prototype Related Work Summarization system, ReWoS, takes in set of keywords arranged in a hierarchical fashion that describes a target paper's topics, to drive the creation of an extractive summary using two different strategies for locating appropriate sentences for general topics as well as detailed ones. Our initial results show an improvement over generic multi-document summarization baselines in a human evaluation.", "phrases": ["work summarization system", "work section", "work generation"], "overall_score": 2.737478396165136, "scores": [1.3421475514182726, 0.8998774100731606, 0.6566043301304754], "rank_score": 0.9662097638739695} -{"id": "dobrovoljc-nivre-2016-universal", "title": "The Universal Dependencies Treebank of Spoken Slovenian", "abstract": "This paper presents the construction of an open-source dependency treebank of spoken Slovenian, the first syntactically annotated collection of spontaneous speech in Slovenian. The treebank has been manually annotated using the Universal Dependencies annotation scheme, a one-layer syntactic annotation scheme with a high degree of cross-modality, cross-framework and cross-language interoperability. In this original application of the scheme to spoken language transcripts, we address a wide spectrum of syntactic particularities in speech, either by extending the scope of application of existing universal labels or by proposing new speech-specific extensions. The initial analysis of the resulting treebank and its comparison with the written Slovenian UD treebank confirms significant syntactic differences between the two language modalities, with spoken data consisting of shorter and more elliptic sentences, less and simpler nominal phrases, and more relations marking disfluencies, interaction, deixis and modality.", "phrases": ["dependency treebank", "spoken slovenian", "disfluency"], "overall_score": 1.5547810815310212, "scores": [1.7389963249249691, 0.6283154313713317, 0.5308076483464975], "rank_score": 0.9660398015475996} -{"id": "meng-etal-2020-text", "title": "Text Classification Using Label Names Only: A Language Model Self-Training Approach", "abstract": "Current text classification methods typically require a good number of human-labeled documents as training data, which can be costly and difficult to obtain in real applications. Humans can perform classification without seeing any labeled examples but only based on a small set of words describing the categories to be classified. In this paper, we explore the potential of only using the label name of each class to train classification models on unlabeled data, without using any labeled documents. We use pre-trained neural language models both as general linguistic knowledge sources for category understanding and as representation learning models for document classification. Our method (1) associates semantically related words with the label names, (2) finds category-indicative words and trains the model to predict their implied categories, and (3) generalizes the model via self-training. We show that our model achieves around 90% accuracy on four benchmark datasets including topic and sentiment classification without using any labeled documents but learning from unlabeled data supervised by at most 3 words (1 in most cases) per class as the label name.", "phrases": ["self-training", "unlabeled data", "text classification", "lotclass"], "overall_score": 2.3157076336652147, "scores": [1.8821136693809317, 0.86971458177866, 0.5699814660118449, 0.5410906520557117], "rank_score": 0.965725092306787} -{"id": "ando-zhang-2005-high", "title": "A High-Performance Semi-Supervised Learning Method for Text Chunking", "abstract": "In machine learning, whether one can build a more accurate classifier by using unlabeled data (semi-supervised learning) is an important issue. Although a number of semi-supervised methods have been proposed, their effectiveness on NLP tasks is not always clear. This paper presents a novel semi-supervised method that employs a learning paradigm which we call structural learning. The idea is to find \"what good classifiers are like\" by learning from thousands of automatically generated auxiliary classification problems on unlabeled data. By doing so, the common predictive structure shared by the multiple classification problems can be discovered, which can then be used to improve performance on the target problem. The method produces performance higher than the previous best results on CoNLL'00 syntactic chunking and CoNLL'03 named entity chunking (English and German).", "phrases": ["learning method", "chunking", "unlabeled data"], "overall_score": 2.892522458231689, "scores": [1.2421951365754063, 1.0479214264292425, 0.606526588754846], "rank_score": 0.9655477172531648} -{"id": "pouliquen-etal-2011-tapta", "title": "Tapta: A user-driven translation system for patent documents based on domain-aware Statistical Machine Translation", "abstract": "This paper presents a study conducted in the course of implementing a project in the World Intellectual Property Organization (WIPO) on assisted translation of patent abstracts and titles from English to French. The tool (called \u2018Tapta\u2019) is trained on an extensive corpus of manually translated patents. These patents are classified, each class belonging to one of the 32 predefined domains. The trained Statistical Machine Translation (SMT) tool uses this additional information to propose more accurate translations according to the context. The performance of the SMT system was shown to be above the current state of the art, but, in order to produce an acceptable translation, a human has to supervise the process. Therefore, a graphical user interface was built in which the translator drives the automatic translation process. A significant experiment with human operators was conducted within WIPO, the output was judged to be successful and a project to use Tapta in production is now under discussion.", "phrases": ["patent", "statistical machine translation", "input text"], "overall_score": 1.553889040245606, "scores": [1.8131802827422543, 0.5524387924039258, 0.530837560255525], "rank_score": 0.9654855451339017} -{"id": "lakretz-etal-2019-emergence", "title": "The emergence of number and syntax units in LSTM language models", "abstract": "Recent work has shown that LSTMs trained on a generic language modeling objective capture syntax-sensitive generalizations such as long-distance number agreement. We have however no mechanistic understanding of how they accomplish this remarkable feat. Some have conjectured it depends on heuristics that do not truly take hierarchical structure into account. We present here a detailed study of the inner mechanics of number tracking in LSTMs at the single neuron level. We discover that long-distance number information is largely managed by two \u201cnumber units\u201d. Importantly, the behaviour of these units is partially controlled by other units independently shown to track syntactic structure. We conclude that LSTMs are, to some extent, implementing genuinely syntactic processing mechanisms, paving the way to a more general understanding of grammatical encoding in LSTMs.", "phrases": ["lstm language model", "syntactic structure", "individual neuron"], "overall_score": 2.7899430784897294, "scores": [0.9297380485015942, 1.064510058872436, 0.9015140770590879], "rank_score": 0.965254061477706} -{"id": "suzuki-etal-2009-empirical", "title": "An Empirical Study of Semi-supervised Structured Conditional Models for Dependency Parsing", "abstract": "This paper describes an empirical study of high-performance dependency parsers based on a semi-supervised learning approach. We describe an extension of semi-supervised structured conditional models (SS-SCMs) to the dependency parsing problem, whose framework is originally proposed in (Suzuki and Isozaki, 2008). Moreover, we introduce two extensions related to dependency parsing: The first extension is to combine SS-SCMs with another semi-supervised approach, described in (Koo et al., 2008). The second extension is to apply the approach to second-order parsing models, such as those described in (Carreras, 2007), using a two-stage semi-supervised learning approach. We demonstrate the effectiveness of our proposed methods on dependency parsing experiments using two widely used test collections: the Penn Treebank for English, and the Prague Dependency Tree-bank for Czech. Our best results on test data in the above datasets achieve 93.79% parent-prediction accuracy for English, and 88.05% for Czech.", "phrases": ["empirical study", "dependency parsing", "semi-supervised learning approach"], "overall_score": 2.6136066831784155, "scores": [1.5015459296556901, 0.8099788417975318, 0.5838499325843681], "rank_score": 0.9651249013458633} -{"id": "heilman-smith-2010-tree", "title": "Tree Edit Models for Recognizing Textual Entailments, Paraphrases, and Answers to Questions", "abstract": "We describe tree edit models for representing sequences of tree transformations involving complex reordering phenomena and demonstrate that they offer a simple, intuitive, and effective method for modeling pairs of semantically related sentences. To efficiently extract sequences of edits, we employ a tree kernel as a heuristic in a greedy search routine. We describe a logistic regression model that uses 33 syntactic features of edit sequences to classify the sentence pairs. The approach leads to competitive performance in recognizing textual entailment, paraphrase identification, and answer selection for question answering.", "phrases": ["textual entailment", "tree transformation", "edit sequence", "ted", "pre-selected sentence"], "overall_score": 3.429559204362274, "scores": [1.355844241397201, 1.1368540765948287, 0.913631070433896, 0.854305591949362, 0.5624641157587145], "rank_score": 0.9646198192268006} -{"id": "nie-etal-2019-revealing", "title": "Revealing the Importance of Semantic Retrieval for Machine Reading at Scale", "abstract": "Machine Reading at Scale (MRS) is a challenging task in which a system is given an input query and is asked to produce a precise output by \u201creading\u201d information from a large knowledge base. The task has gained popularity with its natural combination of information retrieval (IR) and machine comprehension (MC). Advancements in representation learning have led to separated progress in both IR and MC; however, very few studies have examined the relationship and combined design of retrieval and comprehension at different levels of granularity, for development of MRS systems. In this work, we give general guidelines on system design for MRS by proposing a simple yet effective pipeline system with special consideration on hierarchical semantic retrieval at both paragraph and sentence level, and their potential effects on the downstream task. The system is evaluated on both fact verification and open-domain multihop QA, achieving state-of-the-art results on the leaderboard test sets of both FEVER and HOTPOTQA. To further demonstrate the importance of semantic retrieval, we present ablation and analysis studies to quantify the contribution of neural retrieval modules at both paragraph-level and sentence-level, and illustrate that intermediate semantic retrieval modules are vital for not only effectively filtering upstream information and thus saving downstream computation, but also for shaping upstream data distribution and providing better data for downstream modeling.", "phrases": ["semantic retrieval", "scale", "fact verification", "complex question", "hyperlink"], "overall_score": 2.6744709328225422, "scores": [1.9842582788250687, 0.8542507807864381, 0.8927250299737411, 0.5486708421066746, 0.54315250803903], "rank_score": 0.9646114879461904} -{"id": "shen-etal-2019-multi", "title": "Multi-Task Learning for Conversational Question Answering over a Large-Scale Knowledge Base", "abstract": "We consider the problem of conversational question answering over a large-scale knowledge base. To handle huge entity vocabulary of a large-scale knowledge base, recent neural semantic parsing based approaches usually decompose the task into several subtasks and then solve them sequentially, which leads to following issues: 1) errors in earlier subtasks will be propagated and negatively affect downstream ones; and 2) each subtask cannot naturally share supervision signals with others. To tackle these issues, we propose an innovative multi-task learning framework where a pointer-equipped semantic parsing model is designed to resolve coreference in conversations, and naturally empower joint learning with a novel type-aware entity detection model. The proposed framework thus enables shared supervisions and alleviates the effect of error propagation. Experiments on a large-scale conversational question answering dataset containing 1.6M question answering pairs over 12.8M entities show that the proposed framework improves overall F1 score from 67% to 79% compared with previous state-of-the-art work.", "phrases": ["conversational question", "large-scale knowledge base", "semantic parsing", "joint learning", "mention"], "overall_score": 2.610513942807852, "scores": [1.7908777647218097, 0.8965516943610825, 1.0526464338709967, 0.5458839115329815, 0.533954430753932], "rank_score": 0.9639828470481605} -{"id": "specia-gimenez-2010-combining", "title": "Combining Confidence Estimation and Reference-based Metrics for Segment-level MT Evaluation", "abstract": "We describe an effort to improve standard reference-based metrics for Machine Translation (MT) evaluation by enriching them with Confidence Estimation (CE) features and using a learning mechanism trained on human annotations. Reference-based MT evaluation metrics compare the system output against reference translations looking for overlaps at different levels (lexical, syntactic, and semantic). These metrics aim at comparing MT systems or analyzing the progress of a given system and are known to have reasonably good correlation with human judgments at the corpus level, but not at the segment level. CE metrics, on the other hand, target the system in use, providing a quality score to the end-user for each translated segment. They cannot rely on reference translations, and use instead information extracted from the input text, system output and possibly external corpora to train machine learning algorithms. These metrics correlate better with human judgments at the segment level. However, they are usually highly biased by difficulty level of the input segment, and therefore are less appropriate for comparing multiple systems translating the same input segments. We show that these two classes of metrics are complementary and can be combined to provide MT evaluation metrics that achieve higher correlation with human judgments at the segment level.", "phrases": ["confidence estimation", "reference-based metric", "human annotation"], "overall_score": 1.3362837474854177, "scores": [1.7459144090143204, 0.5918348423326136, 0.5540256522296637], "rank_score": 0.9639249678588659} -{"id": "zeman-etal-2012-hamledt", "title": "HamleDT: To Parse or Not to Parse?", "abstract": "We propose HamleDT \u2015 HArmonized Multi-LanguagE Dependency Treebank. HamleDT is a compilation of existing dependency treebanks (or dependency conversions of other treebanks), transformed so that they all conform to the same annotation style. While the license terms prevent us from directly redistributing the corpora, most of them are easily acquirable for research purposes. What we provide instead is the software that normalizes tree structures in the data obtained by the user from their original providers.", "phrases": ["treebank", "hamledt", "clause", "verb group"], "overall_score": 2.117781437583562, "scores": [1.836819951272927, 0.931401158987351, 0.5643226616531877, 0.5228317039080019], "rank_score": 0.9638438689553668} -{"id": "ma-etal-2020-simuleval", "title": "SIMULEVAL: An Evaluation Toolkit for Simultaneous Translation", "abstract": "Simultaneous translation on both text and speech focuses on a real-time and low-latency scenario where the model starts translating before reading the complete source input. Evaluating simultaneous translation models is more complex than offline models because the latency is another factor to consider in addition to translation quality. The research community, despite its growing focus on novel modeling approaches to simultaneous translation, currently lacks a universal evaluation procedure. Therefore, we present SimulEval, an easy-to-use and general evaluation toolkit for both simultaneous text and speech translation. A server-client scheme is introduced to create a simultaneous translation scenario, where the server sends source input and receives predictions for evaluation and the client executes customized policies. Given a policy, it automatically performs simultaneous decoding and collectively reports several popular latency metrics. We also adapt latency metrics from text simultaneous translation to the speech task. Additionally, SimulEval is equipped with a visualization interface to provide better understanding of the simultaneous decoding process of a system. SimulEval has already been extensively used for the IWSLT 2020 shared task on simultaneous speech translation. Code will be released upon publication.", "phrases": ["evaluation toolkit", "latency", "speech translation", "simuleval"], "overall_score": 1.7265817504109597, "scores": [1.8101995241122069, 0.8901636632755074, 0.6076790648630227, 0.5464522378045026], "rank_score": 0.96362362251381} -{"id": "popovic-ney-2007-word", "title": "Word Error Rates: Decomposition over POS classes and Applications for Error Analysis", "abstract": "Evaluation and error analysis of machine translation output are important but difficult tasks. In this work, we propose a novel method for obtaining more details about actual translation errors in the generated output by introducing the decomposition of Word Error Rate (Wer) and Position independent word Error Rate (Per) over different Part-of-Speech (Pos) classes. Furthermore, we investigate two possible aspects of the use of these decompositions for automatic error analysis: estimation of inflectional errors and distribution of missing words over Pos classes. The obtained results are shown to correspond to the results of a human error analysis. The results obtained on the European Parliament Plenary Session corpus in Spanish and English give a better overview of the nature of translation errors as well as ideas of where to put efforts for possible improvements of the translation system.", "phrases": ["decomposition", "pos class", "error analysis", "different part-of-speech", "content word"], "overall_score": 2.003418901544038, "scores": [1.9563207387670556, 0.8640361271749482, 0.8595692338929298, 0.5861300132962539, 0.5511480770034759], "rank_score": 0.9634408380269328} -{"id": "blinov-etal-2019-large", "title": "Large Dataset and Language Model Fun-Tuning for Humor Recognition", "abstract": "The task of humor recognition has attracted a lot of attention recently due to the urge to process large amounts of user-generated texts and rise of conversational agents. We collected a dataset of jokes and funny dialogues in Russian from various online resources and complemented them carefully with unfunny texts with similar lexical properties. The dataset comprises of more than 300,000 short texts, which is significantly larger than any previous humor-related corpus. Manual annotation of 2,000 items proved the reliability of the corpus construction approach. Further, we applied language model fine-tuning for text classification and obtained an F1 score of 0.91 on a test set, which constitutes a considerable gain over baseline methods. The dataset is freely available for research community.", "phrases": ["humor recognition", "funny dialogue", "russian", "various online resource"], "overall_score": 2.218071214052989, "scores": [1.9297787595157994, 0.8301310632145983, 0.5576494139046694, 0.5356251182915713], "rank_score": 0.9632960887316595} -{"id": "moschitti-etal-2008-tree", "title": "Tree Kernels for Semantic Role Labeling", "abstract": "The availability of large scale data sets of manually annotated predicate-argument structures has recently favored the use of machine learning approaches to the design of automated semantic role labeling (SRL) systems. The main research in this area relates to the design choices for feature representation and for effective decompositions of the task in different learning models. Regarding the former choice, structural properties of full syntactic parses are largely employed as they represent ways to encode different principles suggested by the linking theory between syntax and semantics. The latter choice relates to several learning schemes over global views of the parses. For example, re-ranking stages operating over alternative predicate-argument sequences of the same sentence have shown to be very effective. In this article, we propose several kernel functions to model parse tree properties in kernel-based machines, for example, perceptrons or support vector machines. In particular, we define different kinds of tree kernels as general approaches to feature engineering in SRL. Moreover, we extensively experiment with such kernels to investigate their contribution to individual stages of an SRL architecture both in isolation and in combination with other traditional manually coded features. The results for boundary recognition, classification, and re-ranking stages provide systematic evidence about the significant impact of tree kernels on the overall accuracy, especially when the amount of training data is small. As a conclusive result, tree kernels allow for a general and easily portable feature engineering method which is applicable to a large family of natural language processing tasks.", "phrases": ["semantic role labeling", "srl", "different kind", "tree kernels"], "overall_score": 2.2179730998862692, "scores": [1.504965878710424, 0.9720599706477033, 0.8294298122442736, 0.5465582515594257], "rank_score": 0.9632534782904567} -{"id": "bisazza-federico-2010-chunk", "title": "Chunk-Based Verb Reordering in VSO Sentences for Arabic-English Statistical Machine Translation", "abstract": "In Arabic-to-English phrase-based statistical machine translation, a large number of syntactic disfluencies are due to wrong long-range reordering of the verb in VSO sentences, where the verb is anticipated with respect to the English word order. In this paper, we propose a chunk-based reordering technique to automatically detect and displace clause-initial verbs in the Arabic side of a word-aligned parallel corpus. This method is applied to preprocess the training data, and to collect statistics about verb movements. From this analysis, specific verb reordering lattices are then built on the test sentences before decoding them. The application of our reordering methods on the training and test sets results in consistent BLEU score improvements on the NIST-MT 2009 Arabic-English benchmark.", "phrases": ["machine translation", "chunk-based reordering technique", "clause-initial verb"], "overall_score": 1.5499768765390691, "scores": [1.7350664644587197, 0.5979835161737821, 0.5561143628274403], "rank_score": 0.9630547811533141} -{"id": "lu-roth-2015-joint", "title": "Joint Mention Extraction and Classification with Mention Hypergraphs", "abstract": "We present a novel model for the task of joint mention extraction and classification. Unlike existing approaches, our model is able to effectively capture overlapping mentions with unbounded lengths. The model is highly scalable, with a time complexity that is linear in the number of words in the input sentence and linear in the number of possible mention classes. Our model can be extended to additionally capture mention heads explicitly in a joint manner under the same time complexity. We demonstrate the effectiveness of our model through extensive experiments on standard datasets.", "phrases": ["hypergraph", "unbounded length", "joint mention extraction", "multiple node", "hand-crafted feature"], "overall_score": 3.366932552834878, "scores": [0.9998833136791183, 1.9280275185086841, 0.832352749919349, 0.53029658437596, 0.524147786590417], "rank_score": 0.9629415906147056} -{"id": "roy-etal-2020-lareqa", "title": "LAReQA: Language-Agnostic Answer Retrieval from a Multilingual Pool", "abstract": "We present LAReQA, a challenging new benchmark for language-agnostic answer retrieval from a multilingual candidate pool. Unlike previous cross-lingual tasks, LAReQA tests for \u201cstrong\u201d cross-lingual alignment, requiring semantically related cross-language pairs to be closer in representation space than unrelated same-language pairs. This level of alignment is important for the practical task of cross-lingual information retrieval. Building on multilingual BERT (mBERT), we study different strategies for achieving strong alignment. We find that augmenting training data via machine translation is effective, and improves significantly over using mBERT out-of-the-box. Interestingly, model performance on zero-shot variants of our task that only target \u201cweak\u201d alignment is not predictive of performance on LAReQA. This finding underscores our claim that language-agnostic retrieval is a substantively new kind of cross-lingual evaluation, and suggests that measuring both weak and strong alignment will be important for improving cross-lingual systems going forward. We release our dataset and evaluation code at .", "phrases": ["language-agnostic answer retrieval", "candidate pool", "same-language pair", "lareqa"], "overall_score": 1.5490710273386017, "scores": [1.7744555657913972, 0.9580296533795793, 0.5664479358414191, 0.5510346265860874], "rank_score": 0.9624919453996208} -{"id": "wang-etal-2008-semi", "title": "Semi-Supervised Convex Training for Dependency Parsing", "abstract": "We present a novel semi-supervised training algorithm for learning dependency parsers. By combining a supervised large margin loss with an unsupervised least squares loss, a discriminative, convex, semi-supervised learning algorithm can be obtained that is applicable to large-scale problems. To demonstrate the benefits of this approach, we apply the technique to learning dependency parsers from combined labeled and unlabeled corpora. Using a stochastic gradient descent algorithm, a parsing model can be efficiently learned from semi-supervised data that significantly outperforms corresponding supervised methods.", "phrases": ["dependency parsing", "least square loss", "learning algorithm", "semi-supervised approach"], "overall_score": 2.1142602821389267, "scores": [1.8343422849245659, 0.8441903299137877, 0.6168549561050989, 0.5535777172604989], "rank_score": 0.9622413220509878} -{"id": "schwenk-etal-2012-large", "title": "Large, Pruned or Continuous Space Language Models on a GPU for Statistical Machine Translation", "abstract": "Language models play an important role in large vocabulary speech recognition and statistical machine translation systems. The dominant approach since several decades are back-off language models. Some years ago, there was a clear tendency to build huge language models trained on hundreds of billions of words. Lately, this tendency has changed and recent works concentrate on data selection. Continuous space methods are a very competitive approach, but they have a high computational complexity and are not yet in widespread use. This paper presents an experimental comparison of all these approaches on a large statistical machine translation task. We also describe an open-source implementation to train and use continuous space language models (CSLM) for such large tasks. We describe an efficient implementation of the CSLM using graphical processing units from Nvidia. By these means, we are able to train an CSLM on more than 500 million words in 20 hours. This CSLM provides an improvement of up to 1.8 BLEU points with respect to the best back-off language model that we were able to build.", "phrases": ["statistical machine translation", "implementation", "network-based language"], "overall_score": 2.5390362219847478, "scores": [1.8098782682453252, 0.5523876033772891, 0.5240331793588672], "rank_score": 0.9620996836604938} -{"id": "bannard-etal-2003-statistical", "title": "A Statistical Approach to the Semantics of Verb-Particles", "abstract": "This paper describes a distributional approach to the semantics of verb-particle constructions (e.g. put up, make off). We report first on a framework for implementing and evaluating such models. We then go on to report on the implementation of some techniques for using statistical models acquired from corpus data to infer the meaning of verb-particle constructions.", "phrases": ["verb-particle construction", "statistical model", "compositionality", "vpc"], "overall_score": 2.9735224183344253, "scores": [1.6041910915578348, 1.069609213065782, 0.6087244530316979, 0.5653970528054778], "rank_score": 0.9619804526151982} -{"id": "chiticariu-etal-2010-domain", "title": "Domain Adaptation of Rule-Based Annotators for Named-Entity Recognition Tasks", "abstract": "Named-entity recognition (NER) is an important task required in a wide variety of applications. While rule-based systems are appealing due to their well-known \"explainability,\" most, if not all, state-of-the-art results for NER tasks are based on machine learning techniques. Motivated by these results, we explore the following natural question in this paper: Are rule-based systems still a viable approach to named-entity recognition? Specifically, we have designed and implemented a high-level language NERL on top of SystemT, a general-purpose algebraic information extraction system. NERL is tuned to the needs of NER tasks and simplifies the process of building, understanding, and customizing complex rule-based named-entity annotators. We show that these customized annotators match or outperform the best published results achieved with machine learning techniques. These results confirm that we can reap the benefits of rule-based extractors' explainability without sacrificing accuracy. We conclude by discussing lessons learned while building and customizing complex rule-based annotators and outlining several research directions towards facilitating rule development.", "phrases": ["domain adaptation", "entity recognition", "rule-based ner system"], "overall_score": 2.3058764811919645, "scores": [1.7512440269814027, 0.582702992658249, 0.5509285302015813], "rank_score": 0.961625183280411} -{"id": "joty-mohiuddin-2018-modeling", "title": "Modeling Speech Acts in Asynchronous Conversations: A Neural-CRF Approach", "abstract": "Participants in an asynchronous conversation (e.g., forum, e-mail) interact with each other at different times, performing certain communicative acts, called speech acts (e.g., question, request). In this article, we propose a hybrid approach to speech act recognition in asynchronous conversations. Our approach works in two main steps: a long short-term memory recurrent neural network (LSTM-RNN) first encodes each sentence separately into a task-specific distributed representation, and this is then used in a conditional random field (CRF) model to capture the conversational dependencies between sentences. The LSTM-RNN model uses pretrained word embeddings learned from a large conversational corpus and is trained to classify sentences into speech act types. The CRF model can consider arbitrary graph structures to model conversational dependencies in an asynchronous conversation. In addition, to mitigate the problem of limited annotated data in the asynchronous domains, we adapt the LSTM-RNN model to learn from synchronous conversations (e.g., meetings), using domain adversarial training of neural networks. Empirical evaluation shows the effectiveness of our approach over existing ones: (i) LSTM-RNNs provide better task-specific representations, (ii) conversational word embeddings benefit the LSTM-RNNs more than the off-the-shelf ones, (iii) adversarial training gives better domain-invariant representations, and (iv) the global CRF model improves over local models.", "phrases": ["conversation", "speech act recognition", "word embedding"], "overall_score": 1.5474298353765867, "scores": [1.7538495656661035, 0.6029763260938731, 0.527590754737931], "rank_score": 0.9614722154993025} -{"id": "punyakanok-etal-2004-semantic", "title": "Semantic Role Labeling Via Integer Linear Programming Inference", "abstract": "We present a system for the semantic role labeling task. The system combines a machine learning technique with an inference procedure based on integer linear programming that supports the incorporation of linguistic and structural constraints into the decision process. The system is tested on the data provided in CoNLL-2004 shared task on semantic role labeling and achieves very competitive results.", "phrases": ["integer linear programming", "ilp", "semantic role labeling", "srl"], "overall_score": 2.5357180983462784, "scores": [1.0569250763056621, 1.0398949451879091, 0.9106402275149857, 0.8359092297681], "rank_score": 0.9608423696941644} -{"id": "baker-etal-2010-modality", "title": "A Modality Lexicon and its use in Automatic Tagging", "abstract": "This paper describes our resource-building results for an eight-week JHU Human Language Technology Center of Excellence Summer Camp for Applied Language Exploration (SCALE-2009) on Semantically-Informed Machine Translation. Specifically, we describe the construction of a modality annotation scheme, a modality lexicon, and two automated modality taggers that were built using the lexicon and annotation scheme. Our annotation scheme is based on identifying three components of modality: a trigger, a target and a holder. We describe how our modality lexicon was produced semi-automatically, expanding from an initial hand-selected list of modality trigger words and phrases. The resulting expanded modality lexicon is being made publicly available. We demonstrate that one tagger\u2015a structure-based tagger\u2015results in precision around 86% (depending on genre) for tagging of a standard LDC data set. In a machine translation application, using the structure-based tagger to annotate English modalities on an English-Urdu training corpus improved the translation quality score for Urdu by 0.3 Bleu points in the face of sparse training data.", "phrases": ["modality lexicon", "machine translation", "annotation scheme", "tagger", "requirement"], "overall_score": 2.601906922691314, "scores": [1.9961481792532818, 0.8640330672404911, 0.8267414979912425, 0.5635145443319496, 0.5535854018117131], "rank_score": 0.9608045381257355} -{"id": "velardi-etal-2013-ontolearn", "title": "OntoLearn Reloaded: A Graph-Based Algorithm for Taxonomy Induction", "abstract": "In 2004 we published in this journal an article describing OntoLearn, one of the first systems to automatically induce a taxonomy from documents and Web sites. Since then, OntoLearn has continued to be an active area of research in our group and has become a reference work within the community. In this paper we describe our next-generation taxonomy learning methodology, which we name OntoLearn Reloaded. Unlike many taxonomy learning approaches in the literature, our novel algorithm learns both concepts and relations entirely from scratch via the automated extraction of terms, definitions, and hypernyms. This results in a very dense, cyclic and potentially disconnected hypernym graph. The algorithm then induces a taxonomy from this graph via optimal branching and a novel weighting policy. Our experiments show that we obtain high-quality results, both when building brand-new taxonomies and when reconstructing sub-hierarchies of existing taxonomies.", "phrases": ["graph-based algorithm", "taxonomy", "hypernym graph", "ontolearn reloaded"], "overall_score": 2.721824117165133, "scores": [0.8129595800580309, 0.7905096594631642, 1.6993963599390125, 0.5398723614847164], "rank_score": 0.9606844902362309} -{"id": "wang-manning-2010-probabilistic", "title": "Probabilistic Tree-Edit Models with Structured Latent Variables for Textual Entailment and Question Answering", "abstract": "A range of Natural Language Processing tasks involve making judgments about the semantic relatedness of a pair of sentences, such as Recognizing Textual Entailment (RTE) and answer selection for Question Answering (QA). A key challenge that these tasks face in common is the lack of explicit alignment annotation between a sentence pair. We capture the alignment by using a novel probabilistic model that models tree-edit operations on dependency parse trees. Unlike previous tree-edit models which require a separate alignment-finding phase and resort to ad-hoc distance metrics, our method treats alignments as structured latent variables, and offers a principled framework for incorporating complex linguistic features. We demonstrate the robustness of our model by conducting experiments for RTE and QA, and show that our model performs competitively on both tasks with the same set of general features.", "phrases": ["textual entailment", "question answering", "tree-edit operation", "dependency parse tree", "passage"], "overall_score": 3.0121410789823093, "scores": [0.9003777952273339, 0.8837055416461961, 1.338463360726144, 1.1435198876808816, 0.5372289584966052], "rank_score": 0.9606591087554321} -{"id": "poesio-etal-2008-anawiki", "title": "ANAWIKI: Creating Anaphorically Annotated Resources through Web Cooperation", "abstract": "The ability to make progress in Computational Linguistics depends on the availability of large annotated corpora, but creating such corpora by hand annotation is very expensive and time consuming; in practice, it is unfeasible to think of annotating more than one million words. However, the success of Wikipedia and other projects shows that another approach might be possible: take advantage of the willingness of Web users to contribute to collaborative resource creation. AnaWiki is a recently started project that will develop tools to allow and encourage large numbers of volunteers over the Web to collaborate in the creation of semantically annotated corpora (in the first instance, of a corpus annotated with information about anaphora).", "phrases": ["creation", "anawiki", "web community"], "overall_score": 1.5458964248696851, "scores": [1.7895785940008047, 0.5664419451966554, 0.5255378227495712], "rank_score": 0.9605194539823437} -{"id": "iyer-etal-2017-learning", "title": "Learning a Neural Semantic Parser from User Feedback", "abstract": "We present an approach to rapidly and easily build natural language interfaces to databases for new domains, whose performance improves over time based on user feedback, and requires minimal intervention. To achieve this, we adapt neural sequence models to map utterances directly to SQL with its full expressivity, bypassing any intermediate meaning representations. These models are immediately deployed online to solicit feedback from real users to flag incorrect queries. Finally, the popularity of SQL facilitates gathering annotations for incorrect predictions using the crowd, which is directly used to improve our models. This complete feedback loop, without intermediate representations or database specific engineering, opens up new ways of building high quality semantic parsers. Experiments suggest that this approach can be deployed quickly for any new target domain, as we show by learning a semantic parser for an online academic database from scratch.", "phrases": ["feedback", "semantic parsing", "text-to-sql", "neural network-based approach", "encoder-decoder model"], "overall_score": 3.6346387843839567, "scores": [1.8043812112952897, 1.0446623504185806, 0.8250267759902227, 0.5902614511288964, 0.5380686403185296], "rank_score": 0.9604800858303036} -{"id": "zang-etal-2020-word", "title": "Word-level Textual Adversarial Attacking as Combinatorial Optimization", "abstract": "Adversarial attacks are carried out to reveal the vulnerability of deep neural networks. Textual adversarial attacking is challenging because text is discrete and a small perturbation can bring significant change to the original input. Word-level attacking, which can be regarded as a combinatorial optimization problem, is a well-studied class of textual attack methods. However, existing word-level attack models are far from perfect, largely because unsuitable search space reduction methods and inefficient optimization algorithms are employed. In this paper, we propose a novel attack model, which incorporates the sememe-based word substitution method and particle swarm optimization-based search algorithm to solve the two problems separately. We conduct exhaustive experiments to evaluate our attack model by attacking BiLSTM and BERT on three benchmark datasets. Experimental results demonstrate that our model consistently achieves much higher attack success rates and crafts more high-quality adversarial examples as compared to baseline methods. Also, further experiments show our model has higher transferability and can bring more robustness enhancement to victim models by adversarial training. All the code and data of this paper can be obtained on .", "phrases": ["combinatorial optimization problem", "attack model", "adversarial example", "victim model"], "overall_score": 2.9670963276195637, "scores": [1.7080566012686977, 1.07363740523453, 0.5333048796023725, 0.5246071657568202], "rank_score": 0.9599015129656052} -{"id": "briakou-etal-2021-ola", "title": "Ol\u00e1, Bonjour, Salve! XFORMAL: A Benchmark for Multilingual Formality Style Transfer", "abstract": "We take the first step towards multilingual style transfer by creating and releasing XFORMAL, a benchmark of multiple formal reformulations of informal text in Brazilian Portuguese, French, and Italian. Results on XFORMAL suggest that state-of-the-art style transfer approaches perform close to simple baselines, indicating that style transfer is even more challenging when moving multilingual.", "phrases": ["xformal", "multilingual style transfer", "multiple formal reformulation", "informal text", "brazilian portuguese"], "overall_score": 1.3305258158817526, "scores": [2.1276798659094447, 1.002483608637818, 0.560819456539704, 0.5596190965882085, 0.5482554631931916], "rank_score": 0.9597714981736732} -{"id": "park-caragea-2022-calibration", "title": "On the Calibration of Pre-trained Language Models using Mixup Guided by Area Under the Margin and Saliency", "abstract": "A well-calibrated neural model produces confidence (probability outputs) closely approximated by the expected accuracy. While prior studies have shown that mixup training as a data augmentation technique can improve model calibration on image classification tasks, little is known about using mixup for model calibration on natural language understanding (NLU) tasks. In this paper, we explore mixup for model calibration on several NLU tasks and propose a novel mixup strategy for pre-trained language models that improves model calibration further. Our proposed mixup is guided by both the Area Under the Margin (AUM) statistic (Pleiss et al., 2020) and the saliency map of each sample (Simonyan et al., 2013). Moreover, we combine our mixup strategy with model miscalibration correction techniques (i.e., label smoothing and temperature scaling) and provide detailed analyses of their impact on our proposed mixup. We focus on systematically designing experiments on three NLU tasks: natural language inference, paraphrase detection, and commonsense reasoning. Our method achieves the lowest expected calibration error compared to strong baselines on both in-domain and out-of-domain test samples while maintaining competitive accuracy.", "phrases": ["pre-trained language model", "margin", "saliency map"], "overall_score": 1.0540650987931246, "scores": [1.72443199251151, 0.6288454175163646, 0.5250767909915091], "rank_score": 0.9594514003397946} -{"id": "kim-etal-2010-chunk", "title": "Chunk-Based EBMT", "abstract": "Corpus driven machine translation approaches such as Phrase-Based Statistical Machine Translation and Example-Based Machine Translation have been successful by using word alignment to find translation fragments for matched source parts in a bilingual training corpus. However, they still cannot properly deal with systematic translation for insertion or deletion words between two distant languages. In this work, we used syntactic chunks as translation units to alleviate this problem, improve alignments and show improvement in BLEU for Korean to English and Chinese to English translation tasks.", "phrases": ["ebmt", "chunk", "translation unit"], "overall_score": 1.5437059073853854, "scores": [1.4130783951944168, 0.9402469793093465, 0.5241498523299908], "rank_score": 0.9591584089445847} -{"id": "ding-etal-2020-daga", "title": "DAGA: Data Augmentation with a Generation Approach for Low-resource Tagging Tasks", "abstract": "Data augmentation techniques have been widely used to improve machine learning performance as they facilitate generalization. In this work, we propose a novel augmentation method to generate high quality synthetic data for low-resource tagging tasks with language models trained on the linearized labeled sentences. Our method is applicable to both supervised and semi-supervised settings. For the supervised settings, we conduct extensive experiments on named entity recognition (NER), part of speech (POS) tagging and end-to-end target based sentiment analysis (E2E-TBSA) tasks. For the semi-supervised settings, we evaluate our method on the NER task under the conditions of given unlabeled data only and unlabeled data plus a knowledge base. The results show that our method can consistently outperform the baselines, particularly when the given gold training data are less.", "phrases": ["data augmentation", "language model", "semi-supervised setting"], "overall_score": 2.65923919781038, "scores": [0.8378036101892526, 1.4964718160694723, 0.5430779761552104], "rank_score": 0.9591178008046451} -{"id": "zhu-etal-2019-graph", "title": "Graph Neural Networks with Generated Parameters for Relation Extraction", "abstract": "In this paper, we propose a novel graph neural network with generated parameters (GP-GNNs). The parameters in the propagation module, i.e. the transition matrices used in message passing procedure, are produced by a generator taking natural language sentences as inputs. We verify GP-GNNs in relation extraction from text, both on bag- and instance-settings. Experimental results on a human-annotated dataset and two distantly supervised datasets show that multi-hop reasoning mechanism yields significant improvements. We also perform a qualitative analysis to demonstrate that our model could discover more accurate relations by multi-hop relational reasoning.", "phrases": ["relation extraction", "reasoning", "gnn", "previous approach"], "overall_score": 2.208285769508337, "scores": [1.9153057095612656, 0.8700355109975938, 0.5289008880903738, 0.5219431880025548], "rank_score": 0.959046324162947} -{"id": "cotterell-heigold-2017-cross", "title": "Cross-lingual Character-Level Neural Morphological Tagging", "abstract": "Even for common NLP tasks, sufficient supervision is not available in many languages \u2013 morphological tagging is no exception. In the work presented here, we explore a transfer learning scheme, whereby we train character-level recurrent neural taggers to predict morphological taggings for high-resource languages and low-resource languages together. Learning joint character representations among multiple related languages successfully enables knowledge transfer from the high-resource languages to the low-resource ones.", "phrases": ["morphological tagging", "high-resource language", "language family"], "overall_score": 2.823812962328468, "scores": [1.1837645434264523, 1.1491554731267302, 0.5441777759042573], "rank_score": 0.9590325974858133} -{"id": "sogaard-2011-data", "title": "Data point selection for cross-language adaptation of dependency parsers", "abstract": "We consider a very simple, yet effective, approach to cross language adaptation of dependency parsers. We first remove lexical items from the treebanks and map part-of-speech tags into a common tagset. We then train a language model on tag sequences in otherwise unlabeled target data and rank labeled source data by perplexity per word of tag sequences from less similar to most similar to the target. We then train our target language parser on the most similar data points in the source labeled data. The strategy achieves much better results than a non-adapted baseline and state-of-the-art unsupervised dependency parsing, and results are comparable to more complex projection-based cross language adaptation algorithms.", "phrases": ["dependency parser", "perplexity", "data point selection", "cross-lingual transfer"], "overall_score": 2.3827957903974544, "scores": [1.890964724577873, 0.848088018601377, 0.5756230007876659, 0.5209545250425739], "rank_score": 0.9589075672523725} -{"id": "mccallum-li-2003-early", "title": "Early results for Named Entity Recognition with Conditional Random Fields, Feature Induction and Web-Enhanced Lexicons", "abstract": "Models for many natural language tasks benefit from the flexibility to use overlapping, non-independent features. For example, the need for labeled data can be drastically reduced by taking advantage of domain knowledge in the form of word lists, part-of-speech tags, character n-grams, and capitalization patterns. While it is difficult to capture such inter-dependent features with a generative probabilistic model, conditionally-trained models, such as conditional maximum entropy models, handle them well. There has been significant work with such models for greedy sequence modeling in NLP (Ratnaparkhi, 1996; Borthwick et al., 1998).", "phrases": ["named entity recognition", "conditional random field", "formal text"], "overall_score": 2.962850189448184, "scores": [0.9363608922045138, 1.389450526189733, 0.5497720465586317], "rank_score": 0.9585278216509594} -{"id": "king-abney-2013-labeling", "title": "Labeling the Languages of Words in Mixed-Language Documents using Weakly Supervised Methods", "abstract": "In this paper we consider the problem of labeling the languages of words in mixed-language documents. This problem is approached in a weakly supervised fashion, as a sequence labeling problem with monolingual text samples for training data. Among the approaches evaluated, a conditional random field model trained with generalized expectation criteria was the most accurate and performed consistently as the amount of training data was varied.", "phrases": ["mixed-language document", "semi-supervised method", "language identification", "minority language", "web page"], "overall_score": 3.0457894262163836, "scores": [1.895275430654675, 1.208900958336938, 0.594116369062201, 0.5505500040820934, 0.5430669864206782], "rank_score": 0.9583819497113171} -{"id": "muzerelle-etal-2014-ancor", "title": "ANCOR_Centre, a large free spoken French coreference corpus: description of the resource and reliability measures", "abstract": "This article presents ANCOR_Centre, a French coreference corpus, available under the Creative Commons Licence. With a size of around 500,000 words, the corpus is large enough to serve the needs of data-driven approaches in NLP and represents one of the largest coreference resources currently available. The corpus focuses exclusively on spoken language, it aims at representing a certain variety of spoken genders. ANCOR_Centre includes anaphora as well as coreference relations which involve nominal and pronominal mentions. The paper describes into details the annotation scheme and the reliability measures computed on the resource.", "phrases": ["french coreference corpus", "reliability measure", "spoken language", "ancor_centre", "substantial dataset"], "overall_score": 2.297855571239757, "scores": [2.033978668615125, 0.8250797285997346, 0.8201771088694321, 0.5573758273555264, 0.5547896866683987], "rank_score": 0.9582802040216434} -{"id": "kolomiyets-etal-2012-extracting", "title": "Extracting Narrative Timelines as Temporal Dependency Structures", "abstract": "We propose a new approach to characterizing the timeline of a text: temporal dependency structures, where all the events of a narrative are linked via partial ordering relations like BEFORE, AFTER, OVERLAP and IDENTITY. We annotate a corpus of children's stories with temporal dependency trees, achieving agreement (Krippendorff's Alpha) of 0.856 on the event words, 0.822 on the links between events, and of 0.700 on the ordering relation labels. We compare two parsing models for temporal dependency structures, and show that a deterministic non-projective dependency parser outperforms a graph-based maximum spanning tree parser, achieving labeled attachment accuracy of 0.647 and labeled tree edit distance of 0.596. Our analysis of the dependency parser errors gives some insights into future research directions.", "phrases": ["timeline", "dependency structure", "story"], "overall_score": 2.457583527087775, "scores": [1.3857436000935155, 0.9287097015793497, 0.559970148371366], "rank_score": 0.9581411500147438} -{"id": "yamada-etal-2020-luke", "title": "LUKE: Deep Contextualized Entity Representations with Entity-aware Self-attention", "abstract": "Entity representations are useful in natural language tasks involving entities. In this paper, we propose new pretrained contextualized representations of words and entities based on the bidirectional transformer. The proposed model treats words and entities in a given text as independent tokens, and outputs contextualized representations of them. Our model is trained using a new pretraining task based on the masked language model of BERT. The task involves predicting randomly masked words and entities in a large entity-annotated corpus retrieved from Wikipedia. We also propose an entity-aware self-attention mechanism that is an extension of the self-attention mechanism of the transformer, and considers the types of tokens (words or entities) when computing attention scores. The proposed model achieves impressive empirical performance on a wide range of entity-related tasks. In particular, it obtains state-of-the-art results on five well-known datasets: Open Entity (entity typing), TACRED (relation classification), CoNLL-2003 (named entity recognition), ReCoRD (cloze-style question answering), and SQuAD 1.1 (extractive question answering). Our source code and pretrained representations are available at .", "phrases": ["entity-aware self-attention", "contextualized representation", "wikipedia", "entity information", "pre-trained language model"], "overall_score": 3.320556719648479, "scores": [1.488062973516761, 1.040826685178103, 0.8832746761791489, 0.8385006300762173, 0.5398857474771541], "rank_score": 0.9581101424854769} -{"id": "jain-sharma-2016-explicit", "title": "Explicit Argument Identification for Discourse Parsing In Hindi: A Hybrid Pipeline", "abstract": "Shallow discourse parsing enables us to study discourse as a coherent piece of information rather than a sequence of clauses, sentences and paragraphs. In this paper, we identify arguments of explicit discourse relations in Hindi. This is the first such work carried out for Hindi. Building upon previous work carried out on discourse connective identification in Hindi, we propose a hybrid pipeline which makes use of both sub-tree extraction and linear tagging approaches. We report state-ofthe-art performance for this task.", "phrases": ["discourse", "sub-tree extraction", "linear tagging approach"], "overall_score": 1.0525651410546764, "scores": [1.8177630026887668, 0.5357202662563367, 0.5207749709588264], "rank_score": 0.9580860799679766} -{"id": "chrupala-etal-2020-analyzing", "title": "Analyzing analytical methods: The case of phonology in neural models of spoken language", "abstract": "Given the fast development of analysis techniques for NLP and speech processing systems, few systematic studies have been conducted to compare the strengths and weaknesses of each method. As a step in this direction we study the case of representations of phonology in neural network models of spoken language. We use two commonly applied analytical techniques, diagnostic classifiers and representational similarity analysis, to quantify to what extent neural activation patterns encode phonemes and phoneme sequences. We manipulate two factors that can affect the outcome of analysis. First, we investigate the role of learning by comparing neural activations extracted from trained versus randomly-initialized models. Second, we examine the temporal scope of the activations by probing both local activations corresponding to a few milliseconds of the speech signal, and global activations pooled over the whole utterance. We conclude that reporting analysis results with randomly initialized models is crucial, and that global-scope methods tend to yield more consistent and interpretable results and we recommend their use as a complement to local-scope diagnostic methods.", "phrases": ["phonology", "representational similarity analysis", "neural activation pattern", "temporal scope"], "overall_score": 1.3277187332664668, "scores": [1.9273920571751144, 0.8353341633733488, 0.5429235001498878, 0.5253367436594656], "rank_score": 0.9577466160894541} -{"id": "ganchev-etal-2009-dependency", "title": "Dependency Grammar Induction via Bitext Projection Constraints", "abstract": "Broad-coverage annotated treebanks necessary to train parsers do not exist for many resource-poor languages. The wide availability of parallel text and accurate parsers in English has opened up the possibility of grammar induction through partial transfer across bitext. We consider generative and discriminative models for dependency grammar induction that use word-level alignments and a source language parser (English) to constrain the space of possible target trees. Unlike previous approaches, our framework does not require full projected parses, allowing partial, approximate transfer through linear expectation constraints on the space of distributions over trees. We consider several types of constraints that range from generic dependency conservation to language-specific annotation rules for auxiliary verb analysis. We evaluate our approach on Bulgarian and Spanish CoNLL shared task data and show that we consistently outperform unsupervised methods and can outperform supervised learning for limited training data.", "phrases": ["bitext", "projection", "dependency grammar induction", "parallel corpora", "hand-written rule"], "overall_score": 3.002067513434492, "scores": [2.0526795239341222, 1.0914820899681266, 0.5617137565159576, 0.5604889382079092, 0.5208674747769374], "rank_score": 0.9574463566806106} -{"id": "desai-etal-2020-compressive", "title": "Compressive Summarization with Plausibility and Salience Modeling", "abstract": "Compressive summarization systems typically rely on a seed set of syntactic rules to determine under what circumstances deleting a span is permissible, then learn which compressions to actually apply by optimizing for ROUGE. In this work, we propose to relax these explicit syntactic constraints on candidate spans, and instead leave the decision about what to delete to two data-driven criteria: plausibility and salience. Deleting a span is plausible if removing it maintains the grammaticality and factuality of a sentence, and it is salient if it removes important information from the summary. Each of these is judged by a pre-trained Transformer model, and only deletions that are both plausible and not salient can be applied. When integrated into a simple extraction-compression pipeline, our method achieves strong in-domain results on benchmark datasets, and human evaluation shows that the plausibility model generally selects for grammatical and factual deletions. Furthermore, the flexibility of our approach allows it to generalize cross-domain, and we show that our system fine-tuned on only 500 samples from a new domain can match or exceed a strong in-domain extractive model.", "phrases": ["plausibility", "salience", "compressive summarization system"], "overall_score": 1.3272952454856795, "scores": [1.6630402825284583, 0.6497339281970416, 0.5595491919610339], "rank_score": 0.9574411342288446} -{"id": "zhou-etal-2022-distributed", "title": "Distributed NLI: Learning to Predict Human Opinion Distributions for Language Reasoning", "abstract": "We introduce distributed NLI, a new NLU task with a goal to predict the distribution of human judgements for natural language inference. We show that by applying additional distribution estimation methods, namely, Monte Carlo (MC) Dropout, Deep Ensemble, Re-Calibration, and Distribution Distillation, models can capture human judgement distribution more effectively than the softmax baseline. We show that MC Dropout is able to achieve decent performance without any distribution annotations while Re-Calibration can give further improvements with extra distribution annotations, suggesting the value of multiple annotations for one example in modeling the distribution of human judgements. Despite these improvements, the best results are still far below the estimated human upper-bound, indicating that predicting the distribution of human judgements is still an open, challenging problem with a large room for improvements. We showcase the common errors for MC Dropout and Re-Calibration. Finally, we give guidelines on the usage of these methods with different levels of data availability and encourage future work on modeling the human opinion distribution for language reasoning.", "phrases": ["nli", "language reasoning", "judgement", "distribution estimation method"], "overall_score": 1.3272095743054135, "scores": [1.8241988744197775, 0.9303953874913468, 0.5382371633529535, 0.5366859168774665], "rank_score": 0.9573793355353861} -{"id": "sarawgi-etal-2011-gender", "title": "Gender Attribution: Tracing Stylometric Evidence Beyond Topic and Genre", "abstract": "Sociolinguistic theories (e.g., Lakoff (1973)) postulate that women's language styles differ from that of men. In this paper, we explore statistical techniques that can learn to identify the gender of authors in modern English text, such as web blogs and scientific papers. Although recent work has shown the efficacy of statistical approaches to gender attribution, we conjecture that the reported performance might be overly optimistic due to non-stylistic factors such as topic bias in gender that can make the gender detection task easier. Our work is the first that consciously avoids gender bias in topics, thereby providing stronger evidence to gender-specific styles in language beyond topic. In addition, our comparative study provides new insights into robustness of various stylometric techniques across topic and genre.", "phrases": ["genre", "topic bias", "gender attribution"], "overall_score": 2.103403919428156, "scores": [0.9785058837561339, 0.8074569456111467, 1.0859383059889338], "rank_score": 0.9573003784520715} -{"id": "zhang-etal-2016-generating", "title": "Generating Abbreviations for Chinese Named Entities Using Recurrent Neural Network with Dynamic Dictionary", "abstract": "Chinese named entities occur frequently in formal and informal environments. Various approaches have been formalized the problem as a sequence labelling task and utilize a character-based methodology, in which character is treated as the basic classi\ufb01cation unit. One of the main drawbacks of these methods is that some of the generated abbreviations may not follow the conventional wisdom of Chinese. To address this problem, we pro-pose a novel neural network architecture to perform task. It combines recurrent neural network (RNN) with an architecture determining whether a given sequence of characters can be a word or not. For demonstrating the effectiveness of the proposed method, we evaluate it on Chinese named entity generation and opinion target extraction tasks. Experimental results show that the proposed method can achieve better performance than state-of-the-art methods.", "phrases": ["abbreviation", "chinese", "recurrent neural network"], "overall_score": 1.0514923387180553, "scores": [1.8079838617878154, 0.5359918283806976, 0.5273530294277877], "rank_score": 0.957109573198767} -{"id": "specia-2011-exploiting", "title": "Exploiting Objective Annotations for Minimising Translation Post-editing Effort", "abstract": "With the noticeable improvement of the overall quality of Machine Translation (MT) systems in recent years, post-editing of MT output is starting to become a common practice among human translators. However, it is well known that the quality of a given MT system can vary sig-ni\ufb01cantly across translation segments and that post-editing bad quality translations is a tedious task that may require more effort than translating texts from scratch. Previous research dedicated to learning quality estimation models to \ufb02ag such segments has shown that models based on human annotation achieve more promising results. However, it is not clear yet what is the most appropriate form of human annotation for building such models. We exper-iment with models based on three annotation types (post-editing time, post-editing distance and post-editing effort scores) and show that estimations resulting from using post-editing time , a simple and objective annotation, can minimise translation post-editing effort in a practical, task-based scenario. We also discuss some perspectives on the effectiveness, reliability and cost of each type of annotation.", "phrases": ["annotator", "translator", "post-editing time"], "overall_score": 3.0003534827856067, "scores": [1.4692170138293512, 0.8524253304532463, 0.5490567635738364], "rank_score": 0.9568997026188114} -{"id": "poudyal-etal-2020-echr", "title": "ECHR: Legal Corpus for Argument Mining", "abstract": "In this paper, we publicly release an annotated corpus of 42 decisions of the European Court of Human Rights (ECHR). The corpus is annotated in terms of three types of clauses useful in argument mining: premise, conclusion, and non-argument parts of the text. Furthermore, relationships among the premises and conclusions are mapped. We present baselines for three tasks that lead from unstructured texts to structured arguments. The tasks are argument clause recognition, clause relation prediction, and premise/conclusion recognition. Despite a straightforward application of the bidirectional encoders from Transformers (BERT), we obtained very promising results F1 0.765 on argument recognition, 0.511 on relation prediction, and 0.859/0.628 on premise/conclusion recognition). The results suggest the usefulness of pre-trained language models based on deep neural network architectures in argument mining. Because of the simplicity of the baselines, there is ample space for improvement in future work based on the released corpus.", "phrases": ["argument mining", "human rights", "echr", "legal domain"], "overall_score": 1.3265425928392909, "scores": [1.7481353099303285, 0.9693822144104812, 0.5847217159992681, 0.5253536000943871], "rank_score": 0.9568982101086162} -{"id": "cook-stevenson-2006-classifying", "title": "Classifying Particle Semantics in English Verb-Particle Constructions", "abstract": "Previous computational work on learning the semantic properties of verb-particle constructions (VPCs) has focused on their compositionality, and has left unaddressed the issue of which meaning of the component words is being used in a given VPC. We develop a feature space for use in classification of the sense contributed by the particle in a VPC, and test this on VPCs using the particle up. The features that capture linguistic properties of VPCs that are relevant to the semantics of the particle outperform linguistically uninformed word co-occurrence features in our experiments on unseen test VPCs.", "phrases": ["particle", "verb-particle construction", "compositionality"], "overall_score": 2.2030456057345855, "scores": [1.2937724632455563, 1.0261427909222636, 0.5503963956873918], "rank_score": 0.9567705499517373} -{"id": "ghazvininejad-etal-2017-hafez", "title": "Hafez: an Interactive Poetry Generation System", "abstract": "Hafez is an automatic poetry generation system that integrates a Recurrent Neural Network (RNN) with a Finite State Accep-tor (FSA). It generates sonnets given arbitrary topics. Furthermore, Hafez enables users to revise and polish generated poems by adjusting various style con\ufb01gurations. Experiments demonstrate that such \u201cpol-ish\u201d mechanisms consider the user\u2019s intention and lead to a better poem. For evaluation, we build a web interface where users can rate the quality of each poem from 1 to 5 stars. We also speed up the whole system by a factor of 10, via vocabulary pruning and GPU computation, so that adequate feedback can be collected at a fast pace. Based on such feedback, the system learns to adjust its parameters to improve poetry quality.", "phrases": ["poetry generation", "hafez", "speaker style"], "overall_score": 2.71013720681482, "scores": [1.6696957822130096, 0.6452135942322512, 0.5547691945318274], "rank_score": 0.9565595236590294} -{"id": "schlangen-skantze-2009-general", "title": "A General, Abstract Model of Incremental Dialogue Processing", "abstract": "We present a general model and conceptual framework for specifying architectures for incremental processing in dialogue systems, in particular with respect to the topology of the network of modules that make up the system, the way information flows through this network, how information increments are 'packaged', and how these increments are processed by the modules. This model enables the precise specification of incremental systems and hence facilitates detailed comparisons between systems, as well as giving guidance on designing new systems.", "phrases": ["abstract model", "incremental dialogue processing", "incrementality", "processor", "system response"], "overall_score": 3.038911616622114, "scores": [1.6357910816501064, 1.5380294134068258, 0.558326398077768, 0.5265391693505347, 0.5224028976872898], "rank_score": 0.956217792034505} -{"id": "kotani-etal-2011-compiling", "title": "Compiling Learner Corpus Data of Linguistic Output and Language Processing in Speaking, Listening, Writing, and Reading", "abstract": "A learner\u2019s language data of speaking, writing, listening, and reading have been compiled for a learner corpus in this study. The language data consist of linguistic output and language processing. Linguistic output refers to data of pronunciation, sentences, listening comprehension rate, and reading comprehension rate. Language processing refers to processing time and learners\u2019 self-judgment of their difficulty of processing in speaking, listening, and reading and the fluency of their writing. This learner corpus will contribute to making the language learning process more clearly visible.", "phrases": ["learner corpus", "writing", "pronunciation"], "overall_score": 1.0502473976675941, "scores": [1.7328733280782813, 0.6027809897597823, 0.5322748192213342], "rank_score": 0.9559763790197993} -{"id": "xie-etal-2018-large", "title": "Large-scale Cloze Test Dataset Created by Teachers", "abstract": "Cloze tests are widely adopted in language exams to evaluate students' language proficiency. In this paper, we propose the first large-scale human-created cloze test dataset CLOTH, containing questions used in middle-school and high-school language exams. With missing blanks carefully created by teachers and candidate choices purposely designed to be nuanced, CLOTH requires a deeper language understanding and a wider attention span than previously automatically-generated cloze datasets. We test the performance of dedicatedly designed baseline models including a language model trained on the One Billion Word Corpus and show humans outperform them by a significant margin. We investigate the source of the performance gap, trace model deficiencies to some distinct properties of CLOTH, and identify the limited ability of comprehending the long-term context to be the key bottleneck.", "phrases": ["cloze test", "teacher", "middle-school"], "overall_score": 1.987603425571705, "scores": [1.7160120042055775, 0.5815961693946514, 0.5698974317259861], "rank_score": 0.9558352017754049} -{"id": "zhou-etal-2021-rica", "title": "RICA: Evaluating Robust Inference Capabilities Based on Commonsense Axioms", "abstract": "Pre-trained language models (PTLMs) have achieved impressive performance on commonsense inference benchmarks, but their ability to employ commonsense to make robust inferences, which is crucial for effective communications with humans, is debated. In the pursuit of advancing fluid human-AI communication, we propose a new challenge, RICA: Robust Inference using Commonsense Axioms, that evaluates robust commonsense inference despite textual perturbations. To generate data for this challenge, we develop a systematic and scalable procedure using commonsense knowledge bases and probe PTLMs across two different evaluation settings. Extensive experiments on our generated probe sets with more than 10k statements show that PTLMs perform no better than random guessing on the zero-shot setting, are heavily impacted by statistical biases, and are not robust to perturbation attacks. We also find that fine-tuning on similar statements offer limited gains, as PTLMs still fail to generalize to unseen inferences. Our new large-scale benchmark exposes a significant gap between PTLMs and human-level language understanding and offers a new challenge for PTLMs to demonstrate commonsense.", "phrases": ["commonsense axioms", "perturbation", "random guessing", "rica"], "overall_score": 1.7125253173303245, "scores": [1.8309021762528335, 0.9267037042991572, 0.545409080634567, 0.5200993501738466], "rank_score": 0.955778577840101} -{"id": "hosseini-etal-2018-learning", "title": "Learning Typed Entailment Graphs with Global Soft Constraints", "abstract": "This paper presents a new method for learning typed entailment graphs from text. We extract predicate-argument structures from multiple-source news corpora, and compute local distributional similarity scores to learn entailments between predicates with typed arguments (e.g., person contracted disease). Previous work has used transitivity constraints to improve local decisions, but these constraints are intractable on large graphs. We instead propose a scalable method that learns globally consistent similarity scores based on new soft constraints that consider both the structures across typed entailment graphs and inside each graph. Learning takes only a few hours to run over 100K predicates and our results show large improvements over local similarity scores on two entailment data sets. We further show improvements over paraphrases and entailments from the Paraphrase Database, and prior state-of-the-art entailment graphs. We show that the entailment graphs improve performance in a downstream task.", "phrases": ["entailment graph", "similarity score", "edge"], "overall_score": 2.291702618310401, "scores": [1.4757140213816509, 0.8420817313055681, 0.5493469173543607], "rank_score": 0.9557142233471932} -{"id": "winata-etal-2018-bilingual", "title": "Bilingual Character Representation for Efficiently Addressing Out-of-Vocabulary Words in Code-Switching Named Entity Recognition", "abstract": "We propose an LSTM-based model with hierarchical architecture on named entity recognition from code-switching Twitter data. Our model uses bilingual character representation and transfer learning to address out-of-vocabulary words. In order to mitigate data noise, we propose to use token replacement and normalization. In the 3rd Workshop on Computational Approaches to Linguistic Code-Switching Shared Task, we achieved second place with 62.76% harmonic mean F1-score for English-Spanish language pair without using any gazetteer and knowledge-based information.", "phrases": ["out-of-vocabulary word", "code-switching", "entity recognition", "bilingual character representation"], "overall_score": 1.8597211338475343, "scores": [1.8430486367054868, 0.9104721969134677, 0.5493606338651301, 0.51994896433288], "rank_score": 0.9557076079542411} -{"id": "gupta-etal-2019-simple", "title": "Simple, Fast, Accurate Intent Classification and Slot Labeling for Goal-Oriented Dialogue Systems", "abstract": "With the advent of conversational assistants, like Amazon Alexa, Google Now, etc., dialogue systems are gaining a lot of traction, especially in industrial setting. These systems typically consist of Spoken Language understanding component which, in turn, consists of two tasks - Intent Classification (IC) and Slot Labeling (SL). Generally, these two tasks are modeled together jointly to achieve best performance. However, this joint modeling adds to model obfuscation. In this work, we first design framework for a modularization of joint IC-SL task to enhance architecture transparency. Then, we explore a number of self-attention, convolutional, and recurrent models, contributing a large-scale analysis of modeling paradigms for IC+SL across two datasets. Finally, using this framework, we propose a class of `label-recurrent' models that otherwise non-recurrent, with a 10-dimensional representation of the label history, and show that our proposed systems are easy to interpret, highly accurate (achieving over 30% error reduction in SL over the state-of-the-art on the Snips dataset), as well as fast, at 2x the inference and 2/3 to 1/2 the training time of comparable recurrent models, thus giving an edge in critical real-world systems.", "phrases": ["intent classification", "slot labeling", "dialogue system"], "overall_score": 1.7121237317899105, "scores": [1.7060686334574346, 0.5856822849010465, 0.5749124276890988], "rank_score": 0.9555544486825266} -{"id": "nguyen-2018-comparing", "title": "Comparing Automatic and Human Evaluation of Local Explanations for Text Classification", "abstract": "Text classification models are becoming increasingly complex and opaque, however for many applications it is essential that the models are interpretable. Recently, a variety of approaches have been proposed for generating local explanations. While robust evaluations are needed to drive further progress, so far it is unclear which evaluation approaches are suitable. This paper is a first step towards more robust evaluations of local explanations. We evaluate a variety of local explanation approaches using automatic measures based on word deletion. Furthermore, we show that an evaluation using a crowdsourcing experiment correlates moderately with these automatic measures and that a variety of other factors also impact the human judgements.", "phrases": ["human evaluation", "explanation", "text classification"], "overall_score": 1.7120944194173926, "scores": [0.8874523181128167, 0.8233389298609632, 1.1558230194340249], "rank_score": 0.9555380891359349} -{"id": "denkowski-lavie-2014-meteor", "title": "Meteor Universal: Language Specific Translation Evaluation for Any Target Language", "abstract": "This paper describes Meteor Universal, released for the 2014 ACL Workshop on Statistical Machine Translation. Meteor Universal brings language specific evaluation to previously unsupported target languages by (1) automatically extracting linguistic resources (paraphrase tables and function word lists) from the bitext used to train MT systems and (2) using a universal parameter set learned from pooling human judgments of translation quality from several language directions. Meteor Universal is shown to significantly outperform baseline BLEU on two new languages, Russian (WMT13) and Hindi (WMT14).", "phrases": ["meteor universal", "synonym", "paraphrase match", "late version", "stem"], "overall_score": 3.036658790683353, "scores": [1.729972981829554, 1.1192537275380114, 0.8385708272038845, 0.549710239630586, 0.5400368356695757], "rank_score": 0.9555089223743224} -{"id": "yih-etal-2014-semantic", "title": "Semantic Parsing for Single-Relation Question Answering", "abstract": "We develop a semantic parsing framework based on semantic similarity for open domain question answering (QA). We focus on single-relation questions and decompose each question into an entity mention and a relation pattern. Using convolutional neural network models, we measure the similarity of entity mentions with entities in the knowledge base (KB) and the similarity of relation patterns and relations in the KB. We score relational triples in the KB using these measures and select the top scoring relational triple to answer the question. When evaluated on an open-domain QA task, our method achieves higher precision across different recall points compared to the previous approach, and can improve F1 by 7 points.", "phrases": ["question answering", "knowledge base", "semantic parsing", "kbqa"], "overall_score": 3.0360556694329155, "scores": [1.4843666232718373, 0.9284490889797821, 0.8870805129888669, 0.5213803565983], "rank_score": 0.9553191454596967} -{"id": "siddharthan-etal-2011-information", "title": "Information Status Distinctions and Referring Expressions: An Empirical Study of References to People in News Summaries", "abstract": "Although there has been much theoretical work on using various information status distinctions to explain the form of references in written text, there have been few studies that attempt to automatically learn these distinctions for generating references in the context of computer-regenerated text. In this article, we present a model for generating references to people in news summaries that incorporates insights from both theory and a corpus analysis of human written summaries. In particular, our model captures how two properties of a person referred to in the summary\u2014familiarity to the reader and global salience in the news story\u2014affect the content and form of the initial reference to that person in a summary. We demonstrate that these two distinctions can be learned from a typical input for multi-document summarization and that they can be used to make regeneration decisions that improve the quality of extractive summaries.", "phrases": ["distinction", "news summary", "hearer-old"], "overall_score": 2.1994857062293347, "scores": [1.7873301904788075, 0.5582196819713897, 0.5201236432712335], "rank_score": 0.9552245052404769} -{"id": "hermet-etal-2008-using", "title": "Using the Web as a Linguistic Resource to Automatically Correct Lexico-Syntactic Errors", "abstract": "This paper presents an algorithm for correcting language errors typical of second-language learners. We focus on preposition errors, which are very common among second-language learners but are not addressed well by current commercial grammar correctors and editing aids. The algorithm takes as input a sentence containing a preposition error (and possibly other errors as well), and outputs the correct preposition for that particular sentence context. We use a two-phase hybrid rule-based and statistical approach. In the first phase, rule-based processing is used to generate a short expression that captures the context of use of the preposition in the input sentence. In the second phase, Web searches are used to evaluate the frequency of this expression, when alternative prepositions are used instead of the original one. We tested this algorithm on a corpus of 133 French sentences written by intermediate second-language learners, and found that it could address 69.9% of those cases. In contrast, we found that the best French grammar and spell checker currently on the market, Antidote, addressed only 3% of those cases. We also showed that performance degrades gracefully when using a corpus of frequent n-grams to evaluate frequencies.", "phrases": ["web", "correction", "learner", "preposition error", "french-as-a-second-language"], "overall_score": 1.9853654637413762, "scores": [1.9885800342590232, 0.8737767166832376, 0.8355864670421003, 0.54665616761522, 0.5291954625535858], "rank_score": 0.9547589696306336} -{"id": "tam-etal-2021-improving", "title": "Improving and Simplifying Pattern Exploiting Training", "abstract": "Recently, pre-trained language models (LMs) have achieved strong performance when fine-tuned on difficult benchmarks like SuperGLUE. However, performance can suffer when there are very few labeled examples available for fine-tuning. Pattern Exploiting Training (PET) is a recent approach that leverages patterns for few-shot learning. However, PET uses task-specific unlabeled data. In this paper, we focus on few-shot learning without any unlabeled data and introduce ADAPET, which modifies PET's objective to provide denser supervision during fine-tuning. As a result, ADAPET outperforms PET on SuperGLUE without any task-specific unlabeled data.", "phrases": ["language model", "fine-tuning", "plm"], "overall_score": 2.5196229002028923, "scores": [1.0983953766551107, 0.9269253063098244, 0.838909895046292], "rank_score": 0.9547435260037423} -{"id": "ye-etal-2017-jointly", "title": "Jointly Extracting Relations with Class Ties via Effective Deep Ranking", "abstract": "Connections between relations in relation extraction, which we call class ties, are common. In distantly supervised scenario, one entity tuple may have multiple relation facts. Exploiting class ties between relations of one entity tuple will be promising for distantly supervised relation extraction. However, previous models are not effective or ignore to model this property. In this work, to effectively leverage class ties, we propose to make joint relation extraction with a unified model that integrates convolutional neural network (CNN) with a general pairwise ranking framework, in which three novel ranking loss functions are introduced. Additionally, an effective method is presented to relieve the severe class imbalance problem from NR (not relation) for model training. Experiments on a widely used dataset show that leveraging class ties will enhance extraction and demonstrate the effectiveness of our model to learn class ties. Our model outperforms the baselines significantly, achieving state-of-the-art performance.", "phrases": ["class tie", "relation extraction", "entity tuple"], "overall_score": 1.9839638569414262, "scores": [1.763888278781675, 0.5691031135451005, 0.5292634253855611], "rank_score": 0.9540849392374455} -{"id": "xia-etal-2019-generalized", "title": "Generalized Data Augmentation for Low-Resource Translation", "abstract": "Low-resource language pairs with a paucity of parallel data pose challenges for machine translation in terms of both adequacy and fluency. Data augmentation utilizing a large amount of monolingual data is regarded as an effective way to alleviate the problem. In this paper, we propose a general framework of data augmentation for low-resource machine translation not only using target-side monolingual data, but also by pivoting through a related high-resource language. Specifically, we experiment with a two-step pivoting method to convert high-resource data to the low-resource language, making best use of available resources to better approximate the true distribution of the low-resource language. First, we inject low-resource words into high-resource sentences through an induced bilingual dictionary. Second, we further edit the high-resource data injected with low-resource words using a modified unsupervised machine translation framework. Extensive experiments on four low-resource datasets show that under extreme low-resource settings, our data augmentation techniques improve translation quality by up to 1.5 to 8 BLEU points compared to supervised back-translation baselines.", "phrases": ["data augmentation", "low-resource language", "monolingual data", "two-step pivoting method"], "overall_score": 2.6450106600393504, "scores": [1.4140109021241567, 0.9315731157571776, 0.9283033178346645, 0.5420564266212159], "rank_score": 0.9539859405843036} -{"id": "zhang-etal-2019-mitigating", "title": "Mitigating Uncertainty in Document Classification", "abstract": "The uncertainty measurement of classifiers' predictions is especially important in applications such as medical diagnoses that need to ensure limited human resources can focus on the most uncertain predictions returned by machine learning models. However, few existing uncertainty models attempt to improve overall prediction accuracy where human resources are involved in the text classification task. In this paper, we propose a novel neural-network-based model that applies a new dropout-entropy method for uncertainty measurement. We also design a metric learning method on feature representations, which can boost the performance of dropout-based uncertainty methods with smaller prediction variance in accurate prediction trials. Extensive experiments on real-world data sets demonstrate that our method can achieve a considerable improvement in overall prediction accuracy compared to existing approaches. In particular, our model improved the accuracy from 0.78 to 0.92 when 30% of the most uncertain predictions were handed over to human experts in \u201c20NewsGroup\u201d data.", "phrases": ["uncertainty", "text classification", "dropout-entropy method"], "overall_score": 1.535372452268289, "scores": [1.665953069110322, 0.6302465673544253, 0.5657419899994971], "rank_score": 0.9539805421547481} -{"id": "williams-reiter-2005-generating", "title": "Generating Readable Texts for Readers with Low Basic Skills", "abstract": "Most NLG systems generate texts for readers with good reading ability, but SkillSum adapts its output for readers with poor literacy. Evaluation with lowskilled readers confirms that SkillSum's knowledge-based microplanning choices enhance readability. We also discuss future readability improvements.", "phrases": ["reader", "most nlg system", "reading ability"], "overall_score": 1.3223613234745548, "scores": [1.756348635796972, 0.5837819697523928, 0.5215155799107954], "rank_score": 0.9538820618200533} -{"id": "littell-etal-2017-uriel", "title": "URIEL and lang2vec: Representing languages as typological, geographical, and phylogenetic vectors", "abstract": "We introduce the URIEL knowledge base for massively multilingual NLP and the lang2vec utility, which provides information-rich vector identifications of languages drawn from typological, geographical, and phylogenetic databases and normalized to have straightforward and consistent formats, naming, and semantics. The goal of URIEL and lang2vec is to enable multilingual NLP, especially on less-resourced languages and make possible types of experiments (especially but not exclusively related to NLP tasks) that are otherwise difficult or impossible due to the sparsity and incommensurability of the data sources. lang2vec vectors have been shown to reduce perplexity in multilingual language modeling, when compared to one-hot language identification vectors.", "phrases": ["lang2vec", "distance", "typological database", "language feature"], "overall_score": 2.517036991661734, "scores": [1.8487529391171895, 0.8308892838687574, 0.5683491186006938, 0.5670633196604293], "rank_score": 0.9537636653117675} -{"id": "sedoc-etal-2017-predicting", "title": "Predicting Emotional Word Ratings using Distributional Representations and Signed Clustering", "abstract": "Inferring the emotional content of words is important for text-based sentiment analysis, dialogue systems and psycholinguistics, but word ratings are expensive to collect at scale and across languages or domains. We develop a method that automatically extends word-level ratings to unrated words using signed clustering of vector space word representations along with affect ratings. We use our method to determine a word's valence and arousal, which determine its position on the circumplex model of affect, the most popular dimensional model of emotion. Our method achieves superior out-of-sample word rating prediction on both affective dimensions across three different languages when compared to state-of-the-art word similarity based methods. Our method can assist building word ratings for new languages and improve downstream tasks such as sentiment analysis and emotion detection.", "phrases": ["emotion", "rating", "clustering"], "overall_score": 1.8558404306601726, "scores": [1.6851595808071, 0.603262265765667, 0.5727181164843137], "rank_score": 0.953713321019027} -{"id": "herbelot-vecchi-2015-building", "title": "Building a shared world: mapping distributional to model-theoretic semantic spaces", "abstract": "In this paper, we introduce an approach to automatically map a standard distributional semantic space onto a set-theoretic model. We predict that there is a functional relationship between distributional information and vectorial concept representations in which dimensions are predicates and weights are generalised quantifiers. In order to test our prediction, we learn a model of such relationship over a publicly available dataset of feature norms annotated with natural language quantifiers. Our initial experimental results show that, at least for domain-specific data, we can indeed map between formalisms, and generate high-quality vector representations which encapsulate set overlap information. We further investigate the generation of natural language quantifiers from such vectors.", "phrases": ["semantic space", "functional relationship", "quantifier"], "overall_score": 2.6436833183415347, "scores": [0.9274722213927671, 1.4010368354713363, 0.5320125529250551], "rank_score": 0.9535072032630527} -{"id": "roller-etal-2014-inclusive", "title": "Inclusive yet Selective: Supervised Distributional Hypernymy Detection", "abstract": "We test the Distributional Inclusion Hypothesis, which states that hypernyms tend to occur in a superset of contexts in which their hyponyms are found. We find that this hypothesis only holds when it is applied to relevant dimensions. We propose a robust supervised approach that achieves accuracies of .84 and .85 on two existing datasets and that can be interpreted as selecting the dimensions that are relevant for distributional inclusion.", "phrases": ["hypernymy", "distributional inclusion hypothesis", "word representation"], "overall_score": 2.9020580166831342, "scores": [1.4584891228500392, 0.8609780515742732, 0.5401517734194573], "rank_score": 0.9532063159479232} -{"id": "amdal-etal-2008-rundkast", "title": "RUNDKAST: an Annotated Norwegian Broadcast News Speech Corpus", "abstract": "This paper describes the Norwegian broadcast news speech corpus RUNDKAST. The corpus contains recordings of approximately 77 hours of broadcast news shows from the Norwegian broadcasting company NRK. The corpus covers both read and spontaneous speech as well as spontaneous dialogues and multipart discussions, including frequent occurrences of non-speech material (e.g. music, jingles). The recordings have large variations in speaking styles, dialect use and recording/transmission quality. RUNDKAST has been annotated for research in speech technology. The entire corpus has been manually segmented and transcribed using hierarchical levels. A subset of one hour of read and spontaneous speech from 10 different speakers has been manually annotated using broad phonetic labels. We provide a description of the database content, the annotation tools and strategies, and the conventions used for the different levels of annotation. A corpus of this kind has up to this point not been available for Norwegian, but is considered a necessary part of the infrastructure for language technology research in Norway. The RUNDKAST corpus is planned to be included in a future national Norwegian language resource bank.", "phrases": ["recording", "broadcast news show", "rundkast"], "overall_score": 1.0468413977486535, "scores": [1.7317173018732097, 0.5811936806961084, 0.545717330293757], "rank_score": 0.9528761042876916} -{"id": "vanhainen-salvi-2014-free", "title": "Free Acoustic and Language Models for Large Vocabulary Continuous Speech Recognition in Swedish", "abstract": "This paper presents results for large vocabulary continuous speech recognition (LVCSR) in Swedish. We trained acoustic models on the public domain NST Swedish corpus and made them freely available to the community. The training procedure corresponds to the reference recogniser (RefRec) developed for the SpeechDat databases during the COST249 action. We describe the modifications we made to the procedure in order to train on the NST database, and the language models we created based on the N-gram data available at the Norwegian Language Council. Our tests include medium vocabulary isolated word recognition and LVCSR. Because no previous results are available for LVCSR in Swedish, we use as baseline the performance of the SpeechDat models on the same tasks. We also compare our best results to the ones obtained in similar conditions on resource rich languages such as American English. We tested the acoustic models with HTK and Julius and plan to make them available in CMU Sphinx format as well in the near future. We believe that the free availability of these resources will boost research in speech and language technology in Swedish, even in research groups that do not have resources to develop ASR systems.", "phrases": ["language model", "speech recognition", "swedish"], "overall_score": 1.0467803480046785, "scores": [1.6804257274924383, 0.6056993426851552, 0.5723365330702567], "rank_score": 0.9528205344159502} -{"id": "wu-mooney-2019-faithful", "title": "Faithful Multimodal Explanation for Visual Question Answering", "abstract": "AI systems' ability to explain their reasoning is critical to their utility and trustworthiness. Deep neural networks have enabled significant progress on many challenging problems such as visual question answering (VQA). However, most of them are opaque black boxes with limited explanatory capability. This paper presents a novel approach to developing a high-performing VQA system that can elucidate its answers with integrated textual and visual explanations that faithfully reflect important aspects of its underlying reasoning while capturing the style of comprehensible human explanations. Extensive experimental evaluation demonstrates the advantages of this approach compared to competing methods using both automated metrics and human evaluation.", "phrases": ["explanation", "visual question", "vqa"], "overall_score": 1.85354415755591, "scores": [1.3755448047951517, 0.837291825060605, 0.644763180375598], "rank_score": 0.9525332700771182} -{"id": "tandon-etal-2018-reasoning", "title": "Reasoning about Actions and State Changes by Injecting Commonsense Knowledge", "abstract": "Comprehending procedural text, e.g., a paragraph describing photosynthesis, requires modeling actions and the state changes they produce, so that questions about entities at different timepoints can be answered. Although several recent systems have shown impressive progress in this task, their predictions can be globally inconsistent or highly improbable. In this paper, we show how the predicted effects of actions in the context of a paragraph can be improved in two ways: (1) by incorporating global, commonsense constraints (e.g., a non-existent entity cannot be destroyed), and (2) by biasing reading with preferences from large-scale corpora (e.g., trees rarely move). Unlike earlier methods, we treat the problem as a neural structured prediction task, allowing hard and soft constraints to steer the model away from unlikely predictions. We show that the new model significantly outperforms earlier systems on a benchmark dataset for procedural text comprehension (+8% relative gain), and that it also avoids some of the nonsensical predictions that earlier systems make.", "phrases": ["state change", "commonsense knowledge", "recent system", "structured prediction task"], "overall_score": 2.3665048517864347, "scores": [1.8312314050709224, 0.9099314460692117, 0.5354320129562203, 0.5328115809921689], "rank_score": 0.9523516112721307} -{"id": "asano-etal-2017-reference", "title": "Reference-based Metrics can be Replaced with Reference-less Metrics in Evaluating Grammatical Error Correction Systems", "abstract": "In grammatical error correction (GEC), automatically evaluating system outputs requires gold-standard references, which must be created manually and thus tend to be both expensive and limited in coverage. To address this problem, a reference-less approach has recently emerged; however, previous reference-less metrics that only consider the criterion of grammaticality, have not worked as well as reference-based metrics. This study explores the potential of extending a prior grammaticality-based method to establish a reference-less evaluation method for GEC systems. Further, we empirically show that a reference-less metric that combines fluency and meaning preservation with grammaticality provides a better estimate of manual scores than that of commonly used reference-based metrics. To our knowledge, this is the first study that provides empirical evidence that a reference-less metric can replace reference-based metrics in evaluating GEC systems.", "phrases": ["reference-less metric", "grammaticality", "error correction", "manual evaluation"], "overall_score": 2.192519558907087, "scores": [1.4432657246533318, 0.9199054625783138, 0.8840213719317507, 0.5616040244298017], "rank_score": 0.9521991458982996} -{"id": "wuebker-etal-2010-training", "title": "Training Phrase Translation Models with Leaving-One-Out", "abstract": "Several attempts have been made to learn phrase translation probabilities for phrase-based statistical machine translation that go beyond pure counting of phrases in word-aligned training data. Most approaches report problems with over-fitting. We describe a novel leaving-one-out approach to prevent over-fitting that allows us to train phrase models that show improved translation performance on the WMT08 Europarl German-English task. In contrast to most previous work where phrase models were trained separately from other models used in translation, we include all components such as single word lexica and reordering models in training. Using this consistent training of phrase models we are able to achieve improvements of up to 1.4 points in BLEU. As a side effect, the phrase table size is reduced by more than 80%.", "phrases": ["phrase translation model", "leaving-one-out", "pure counting", "training procedure"], "overall_score": 2.7517032420300507, "scores": [0.8738943528556189, 1.4825646686905163, 0.8961366627642833, 0.5555002597669658], "rank_score": 0.952023986019346} -{"id": "xue-2008-labeling", "title": "Labeling Chinese Predicates with Semantic Roles", "abstract": "In this article we report work on Chinese semantic role labeling, taking advantage of two recently completed corpora, the Chinese PropBank, a semantically annotated corpus of Chinese verbs, and the Chinese Nombank, a companion corpus that annotates the predicate-argument structure of nominalized predicates. Because the semantic role labels are assigned to the constituents in a parse tree, we first report experiments in which semantic role labels are automatically assigned to hand-crafted parses in the Chinese Treebank. This gives us a measure of the extent to which semantic role labels can be bootstrapped from the syntactic annotation provided in the treebank. We then report experiments using automatic parses with decreasing levels of human annotation in the input to the syntactic parser: parses that use gold-standard segmentation and POS-tagging, parses that use only gold-standard segmentation, and fully automatic parses. These experiments gauge how successful semantic role labeling for Chinese can be in more realistic situations. Our results show that when hand-crafted parses are used, semantic role labeling accuracy for Chinese is comparable to what has been reported for the state-of-the-art English semantic role labeling systems trained and tested on the English PropBank, even though the Chinese PropBank is significantly smaller in size. When an automatic parser is used, however, the accuracy of our system is significantly lower than the English state of the art. This indicates that an improvement in Chinese parsing is critical to high-performance semantic role labeling for Chinese.", "phrases": ["semantic role", "chinese srl", "statistical classifier", "systematic research", "svm"], "overall_score": 2.5777849902310717, "scores": [1.9949866525270123, 1.148238284756, 0.5642759759388216, 0.5289519531726361, 0.5230323698501929], "rank_score": 0.9518970472489325} -{"id": "riedel-mccallum-2011-robust", "title": "Robust Biomedical Event Extraction with Dual Decomposition and Minimal Domain Adaptation", "abstract": "We present a joint model for biomedical event extraction and apply it to four tracks of the BioNLP 2011 Shared Task. Our model decomposes into three sub-models that concern (a) event triggers and outgoing arguments, (b) event triggers and incoming arguments and (c) protein-protein bindings. For efficient decoding we employ dual decomposition. Our results are very competitive: With minimal adaptation of our model we come in second for two of the tasks---right behind a version of the system presented here that includes predictions of the Stanford event extractor as features. We also show that for the Infectious Diseases task using data from the Genia track is a very effective way to improve accuracy.", "phrases": ["biomedical event extraction", "dual decomposition", "joint model"], "overall_score": 2.1912742008143846, "scores": [1.6940566738445466, 0.6285723680595107, 0.5323458393478787], "rank_score": 0.9516582937506453} -{"id": "wang-etal-2020-maven", "title": "MAVEN: A Massive General Domain Event Detection Dataset", "abstract": "Event detection (ED), which means identifying event trigger words and classifying event types, is the first and most fundamental step for extracting event knowledge from plain text. Most existing datasets exhibit the following issues that limit further development of ED: (1) Data scarcity. Existing small-scale datasets are not sufficient for training and stably benchmarking increasingly sophisticated modern neural methods. (2) Low coverage. Limited event types of existing datasets cannot well cover general-domain events, which restricts the applications of ED models. To alleviate these problems, we present a MAssive eVENt detection dataset (MAVEN), which contains 4,480 Wikipedia documents, 118,732 event mention instances, and 168 event types. MAVEN alleviates the data scarcity problem and covers much more general event types. We reproduce the recent state-of-the-art ED models and conduct a thorough evaluation on MAVEN. The experimental results show that existing ED methods cannot achieve promising results on MAVEN as on the small datasets, which suggests that ED in the real world remains a challenging task and requires further research efforts. We also discuss further directions for general domain ED with empirical analyses. The source code and dataset can be obtained from .", "phrases": ["event detection", "data scarcity", "maven"], "overall_score": 1.7051287921089413, "scores": [1.6995349310279777, 0.6111100517210092, 0.5443065127944913], "rank_score": 0.9516504985144927} -{"id": "giorgi-etal-2021-declutr", "title": "DeCLUTR: Deep Contrastive Learning for Unsupervised Textual Representations", "abstract": "Sentence embeddings are an important component of many natural language processing (NLP) systems. Like word embeddings, sentence embeddings are typically learned on large text corpora and then transferred to various downstream tasks, such as clustering and retrieval. Unlike word embeddings, the highest performing solutions for learning sentence embeddings require labelled data, limiting their usefulness to languages and domains where labelled data is abundant. In this paper, we present DeCLUTR: Deep Contrastive Learning for Unsupervised Textual Representations. Inspired by recent advances in deep metric learning (DML), we carefully design a self-supervised objective for learning universal sentence embeddings that does not require labelled training data. When used to extend the pretraining of transformer-based language models, our approach closes the performance gap between unsupervised and supervised pretraining for universal sentence encoders. Importantly, our experiments suggest that the quality of the learned embeddings scale with both the number of trainable parameters and the amount of unlabelled training data. Our code and pretrained models are publicly available and can be easily adapted to new domains or used to embed unseen text.", "phrases": ["deep contrastive learning", "unsupervised textual representations", "same document", "various nlp task"], "overall_score": 3.355483149121371, "scores": [0.9465507930311404, 0.9065502250736002, 1.3884774863170295, 0.5645929017042213], "rank_score": 0.9515428515314978} -{"id": "grenager-manning-2006-unsupervised", "title": "Unsupervised Discovery of a Statistical Verb Lexicon", "abstract": "This paper demonstrates how unsupervised techniques can be used to learn models of deep linguistic structure. Determining the semantic roles of a verb's dependents is an important step in natural language understanding. We present a method for learning models of verb argument patterns directly from unannotated text. The learned models are similar to existing verb lexicons such as VerbNet and PropBank, but additionally include statistics about the linkings used by each verb. The method is based on a structured probabilistic model of the domain, and unsupervised learning is performed with the EM algorithm. The learned models can also be used discriminatively as semantic role labelers, and when evaluated relative to the PropBank annotation, the best learned model reduces 28% of the error between an informed baseline and an oracle upper bound.", "phrases": ["semantic role", "important step", "linking", "unsupervised learning", "graphical model"], "overall_score": 2.7502504008020185, "scores": [1.5347200003115147, 0.9023553948727709, 0.8999355584401089, 0.8804624692343769, 0.5401332643928768], "rank_score": 0.9515213374503295} -{"id": "voita-etal-2018-context", "title": "Context-Aware Neural Machine Translation Learns Anaphora Resolution", "abstract": "Standard machine translation systems process sentences in isolation and hence ignore extra-sentential information, even though extended context can both prevent mistakes in ambiguous cases and improve translation coherence. We introduce a context-aware neural machine translation model designed in such way that the flow of information from the extended context to the translation model can be controlled and analyzed. We experiment with an English-Russian subtitles dataset, and observe that much of what is captured by our model deals with improving pronoun translation. We measure correspondences between induced attention distributions and coreference relations and observe that the model implicitly captures anaphora. It is consistent with gains for sentences where pronouns need to be gendered in translation. Beside improvements in anaphoric cases, the model also improves in overall BLEU, both over its context-agnostic version (+0.7) and over simple concatenation of the context and source sentences (+0.6).", "phrases": ["neural machine translation", "anaphora resolution", "additional encoder", "context sentence", "translation quality"], "overall_score": 3.621460870378016, "scores": [1.2522237520980157, 1.1755960307448947, 0.9031765441087287, 0.8258462881663001, 0.5998975478332053], "rank_score": 0.9513480325902288} -{"id": "hu-etal-2020-ocnli", "title": "OCNLI: Original Chinese Natural Language Inference", "abstract": "Despite the tremendous recent progress on natural language inference (NLI), driven largely by large-scale investment in new datasets (e.g.,SNLI, MNLI) and advances in modeling, most progress has been limited to English due to a lack of reliable datasets for most of the world's languages. In this paper, we present the first large-scale NLI dataset (consisting of ~56,000 annotated sentence pairs) for Chinese called the Original Chinese Natural Language Inference dataset (OCNLI). Unlike recent attempts at extending NLI to other languages, our dataset does not rely on any automatic translation or non-expert annotation. Instead, we elicit annotations from native speakers specializing in linguistics. We follow closely the annotation protocol used for MNLI, but create new strategies for eliciting diverse hypotheses. We establish several baseline results on our dataset using state-of-the-art pre-trained models for Chinese, and find even the best performing models to be far outpaced by human performance (~12% absolute performance gap), making it a challenging new resource that we hope will help to accelerate progress in Chinese NLU. To the best of our knowledge, this is the first human-elicited MNLI-style corpus for a non-English language.", "phrases": ["chinese", "natural language inference", "large-scale nli dataset", "human performance", "ocnli"], "overall_score": 1.703638383007491, "scores": [2.382710950646397, 0.6535184044108825, 0.6119670444405614, 0.5851423005764769, 0.520754726711004], "rank_score": 0.9508186853570646} -{"id": "burkett-klein-2008-two", "title": "Two Languages are Better than One (for Syntactic Parsing)", "abstract": "We show that jointly parsing a bitext can substantially improve parse quality on both sides. In a maximum entropy bitext parsing model, we define a distribution over source trees, target trees, and node-to-node alignments between them. Features include monolingual parse scores and various measures of syntactic divergence. Using the translated portion of the Chinese treebank, our model is trained iteratively to maximize the marginal likelihood of training tree pairs, with alignments treated as latent variables. The resulting bitext parser outperforms state-of-the-art monolingual parser baselines by 2.5 F1 at predicting English side trees and 1.8 F1 at predicting Chinese side trees (the highest published numbers on these corpora). Moreover, these improved trees yield a 2.4 BLEU increase when used in a downstream MT evaluation.", "phrases": ["syntactic parsing", "bitext", "side", "parallel data", "log-linear model"], "overall_score": 3.0598900129494173, "scores": [0.951520528640193, 1.281687588707564, 1.1010968682400348, 0.8275777736604942, 0.5911586431405531], "rank_score": 0.950608280477768} -{"id": "zhang-etal-2019-integrating", "title": "Integrating Semantic Knowledge to Tackle Zero-shot Text Classification", "abstract": "Insufficient or even unavailable training data of emerging classes is a big challenge of many classification tasks, including text classification. Recognising text documents of classes that have never been seen in the learning stage, so-called zero-shot text classification, is therefore difficult and only limited previous works tackled this problem. In this paper, we propose a two-phase framework together with data augmentation and feature augmentation to solve this problem. Four kinds of semantic knowledge (word embeddings, class descriptions, class hierarchy, and a general knowledge graph) are incorporated into the proposed framework to deal with instances of unseen classes effectively. Experimental results show that each and the combination of the two phases achieve the best overall accuracy compared with baselines and recent approaches in classifying real-world texts under the zero-shot scenario.", "phrases": ["semantic knowledge", "zero-shot text classification", "word embedding"], "overall_score": 1.8496262885544, "scores": [1.692430745971434, 0.6285794328888528, 0.5305494722145744], "rank_score": 0.9505198836916203} -{"id": "goyal-etal-2010-automatically", "title": "Automatically Producing Plot Unit Representations for Narrative Text", "abstract": "In the 1980s, plot units were proposed as a conceptual knowledge structure for representing and summarizing narrative stories. Our research explores whether current NLP technology can be used to automatically produce plot unit representations for narrative text. We create a system called AESOP that exploits a variety of existing resources to identify affect states and applies \"projection rules\" to map the affect states onto the characters in a story. We also use corpus-based techniques to generate a new type of affect knowledge base: verbs that impart positive or negative states onto their patients (e.g., being eaten is an undesirable state, but being fed is a desirable state). We harvest these \"patient polarity verbs\" from a Web corpus using two techniques: co-occurrence with Evil/Kind Agent patterns, and bootstrapping over conjunctions of verbs. We evaluate the plot unit representations produced by our system on a small collection of Aesop's fables.", "phrases": ["plot unit representation", "narrative text", "aesop", "character", "polarity verb"], "overall_score": 2.633756127908067, "scores": [0.9496045345281747, 1.2879931618981462, 1.0476721329774863, 0.8815951182243578, 0.5827686831766932], "rank_score": 0.9499267261609716} -{"id": "rashkin-etal-2020-plotmachines", "title": "PlotMachines: Outline-Conditioned Generation with Dynamic Plot State Tracking", "abstract": "We propose the task of outline-conditioned story generation: given an outline as a set of phrases that describe key characters and events to appear in a story, the task is to generate a coherent narrative that is consistent with the provided outline. This task is challenging as the input only provides a rough sketch of the plot, and thus, models need to generate a story by interweaving the key points provided in the outline. This requires the model to keep track of the dynamic states of the latent plot, conditioning on the input outline while generating the full story. We present PlotMachines, a neural narrative model that learns to transform an outline into a coherent story by tracking the dynamic plot states. In addition, we enrich PlotMachines with high-level discourse structure so that the model can learn different writing styles corresponding to different parts of the narrative. Comprehensive experiments over three fiction and non-fiction datasets demonstrate that large-scale language models, such as GPT-2 and Grover, despite their impressive generation performance, are not sufficient in generating coherent narratives for the given outline, and dynamic plot state tracking is important for composing narratives with tighter, more consistent plots.", "phrases": ["plot state tracking", "story generation", "outline", "plotmachines"], "overall_score": 2.506768074959125, "scores": [0.9128427570186822, 1.093429669238227, 0.9617682268725176, 0.8314494857597955], "rank_score": 0.9498725347223057} -{"id": "yang-etal-2015-hierarchical", "title": "A Hierarchical Distance-dependent Bayesian Model for Event Coreference Resolution", "abstract": "We present a novel hierarchical distance-dependent Bayesian model for event coreference resolution. While existing generative models for event coreference resolution are completely unsupervised, our model allows for the incorporation of pairwise distances between event mentions \u2014 information that is widely used in supervised coreference models to guide the generative clustering processing for better event clustering both within and across documents. We model the distances between event mentions using a feature-rich learnable distance function and encode them as Bayesian priors for nonparametric clustering. Experiments on the ECB+ corpus show that our model outperforms state-of-the-art methods for both within- and cross-document event coreference resolution.", "phrases": ["distance-dependent bayesian model", "event coreference resolution", "cluster", "complex structure", "semantic role"], "overall_score": 2.572210174449652, "scores": [2.0120412804303816, 0.8374228530710252, 0.8458551543296562, 0.5332207626869307, 0.5206521420837732], "rank_score": 0.9498384385203533} -{"id": "peyrard-2019-studying", "title": "Studying Summarization Evaluation Metrics in the Appropriate Scoring Range", "abstract": "In summarization, automatic evaluation metrics are usually compared based on their ability to correlate with human judgments. Unfortunately, the few existing human judgment datasets have been created as by-products of the manual evaluations performed during the DUC/TAC shared tasks. However, modern systems are typically better than the best systems submitted at the time of these shared tasks. We show that, surprisingly, evaluation metrics which behave similarly on these datasets (average-scoring range) strongly disagree in the higher-scoring range in which current systems now operate. It is problematic because metrics disagree yet we can't decide which one to trust. This is a call for collecting human judgments for high-scoring summaries as this would resolve the debate over which metrics to trust. This would also be greatly beneficial to further improve summarization systems and metrics alike.", "phrases": ["summarization", "evaluation metric", "higher-scoring range"], "overall_score": 2.2769599125322517, "scores": [1.3550391970465085, 0.8671994684609645, 0.62645944699393], "rank_score": 0.9495660375004676} -{"id": "poon-vanderwende-2010-joint", "title": "Joint Inference for Knowledge Extraction from Biomedical Literature", "abstract": "Knowledge extraction from online repositories such as PubMed holds the promise of dramatically speeding up biomedical research and drug design. After initially focusing on recognizing proteins and binary interactions, the community has recently shifted their attention to the more ambitious task of recognizing complex, nested event structures. State-of-the-art systems use a pipeline architecture in which the candidate events are identified first, and subsequently the arguments. This fails to leverage joint inference among events and arguments for mutual disambiguation. Some joint approaches have been proposed, but they still lag much behind in accuracy. In this paper, we present the first joint approach for bio-event extraction that obtains state-of-the-art results. Our system is based on Markov logic and adopts a novel formulation by jointly predicting events and arguments, as well as individual dependency edges that compose the argument paths. On the BioNLP'09 Shared Task dataset, it reduced F1 errors by more than 10% compared to the previous best joint approach.", "phrases": ["knowledge extraction", "markov logic", "joint inference", "event extraction", "mlns"], "overall_score": 2.6327227273044165, "scores": [1.8879975862860294, 0.9439092285512499, 0.8583588212481907, 0.5321405874638342, 0.5253638048478813], "rank_score": 0.9495540056794372} -{"id": "chrupala-etal-2017-representations", "title": "Representations of language in a model of visually grounded speech signal", "abstract": "We present a visually grounded model of speech perception which projects spoken utterances and images to a joint semantic space. We use a multi-layer recurrent highway network to model the temporal nature of spoken speech, and show that it learns to extract both form and meaning-based linguistic knowledge from the input signal. We carry out an in-depth analysis of the representations used by different components of the trained model and show that encoding of semantic aspects tends to become richer as we go up the hierarchy of layers, whereas encoding of form-related aspects of the language input tends to initially increase and then plateau or decrease.", "phrases": ["speech signal", "image", "semantic information"], "overall_score": 2.276652855971766, "scores": [0.8001081597072316, 1.2188099028354513, 0.8293958923635538], "rank_score": 0.9494379849687457} -{"id": "lazaridou-etal-2015-combining", "title": "Combining Language and Vision with a Multimodal Skip-gram Model", "abstract": "We extend the SKIP-GRAM model of Mikolov et al. (2013a) by taking visual information into account. Like SKIP-GRAM, our multimodal models (MMSKIP-GRAM) build vector-based word representations by learning to predict linguistic contexts in text corpora. However, for a restricted set of words, the models are also exposed to visual representations of the objects they denote (extracted from natural images), and must predict linguistic and visual features jointly. The MMSKIP-GRAM models achieve good performance on a variety of semantic benchmarks. Moreover, since they propagate visual information to all words, we use them to improve image labeling and retrieval in the zero-shot setup, where the test concepts are never seen during model training. Finally, the MMSKIP-GRAM models discover intriguing visual properties of abstract words, paving the way to realistic implementations of embodied theories of meaning.", "phrases": ["vision", "multimodal skip-gram model", "word embedding", "fusion approach"], "overall_score": 3.2273420026067514, "scores": [1.6835114010242098, 0.999933635095134, 0.5753045876897732, 0.5367866423390809], "rank_score": 0.9488840665370495} -{"id": "venugopalan-etal-2015-translating", "title": "Translating Videos to Natural Language Using Deep Recurrent Neural Networks", "abstract": "Solving the visual symbol grounding problem has long been a goal of artificial intelligence. The field appears to be advancing closer to this goal with recent breakthroughs in deep learning for natural language grounding in static images. In this paper, we propose to translate videos directly to sentences using a unified deep neural network with both convolutional and recurrent structure. Described video datasets are scarce, and most existing methods have been applied to toy domains with a small vocabulary of possible words. By transferring knowledge from 1.2M+ images with category labels and 100,000+ images with captions, our method is able to create sentence descriptions of open-domain videos with large vocabularies. We compare our approach with recent work using language generation metrics, subject, verb, and object prediction accuracy, and a human evaluation.", "phrases": ["video", "image captioning", "cnn-rnn encoder-decoder framework"], "overall_score": 2.433391810073158, "scores": [1.2457740920848954, 1.0517852683575055, 0.5485691250900647], "rank_score": 0.9487094951774885} -{"id": "lu-etal-2015-deep", "title": "Deep Multilingual Correlation for Improved Word Embeddings", "abstract": "Word embeddings have been found useful for many NLP tasks, including part-of-speech tagging, named entity recognition, and parsing. Adding multilingual context when learning embeddings can improve their quality, for example via canonical correlation analysis (CCA) on embeddingsfromtwo languages. In this paper, we extend this idea to learn deep non-linear transformations of word embeddings of the two languages, using the recently proposed deep canonical correlation analysis. The resulting embeddings, when evaluated on multiple word and bigram similarity tasks, consistently improve over monolingual embeddings and over embeddings transformed with linear CCA.", "phrases": ["multilingual context", "canonical correlation analysis", "semantic space"], "overall_score": 2.7419459940543294, "scores": [1.3809305563226568, 0.9336037248326998, 0.5314103485245252], "rank_score": 0.9486482098932939} -{"id": "sheth-etal-2021-bootstrapping", "title": "Bootstrapping Multilingual AMR with Contextual Word Alignments", "abstract": "We develop high performance multilingual Abstract Meaning Representation (AMR) systems by projecting English AMR annotations to other languages with weak supervision. We achieve this goal by bootstrapping transformer-based multilingual word embeddings, in particular those from cross-lingual RoBERTa (XLM-R large). We develop a novel technique for foreign-text-to-English AMR alignment, using the contextual word alignment between English and foreign language tokens. This word alignment is weakly supervised and relies on the contextualized XLM-R word embeddings. We achieve a highly competitive performance that surpasses the best published results for German, Italian, Spanish and Chinese.", "phrases": ["amr", "word alignment", "annotation projection"], "overall_score": 1.699068378415937, "scores": [1.6889401414122662, 0.5866858211859582, 0.5691783890951667], "rank_score": 0.9482681172311304} -{"id": "denkowski-etal-2012-cmu", "title": "The CMU-Avenue French-English Translation System", "abstract": "This paper describes the French-English translation system developed by the Avenue research group at Carnegie Mellon University for the Seventh Workshop on Statistical Machine Translation (NAACL WMT12). We present a method for training data selection, a description of our hierarchical phrase-based translation system, and a discussion of the impact of data size on best practice for system building.", "phrases": ["french-english translation system", "word alignment", "qe-clean system"], "overall_score": 2.273710554426893, "scores": [0.975916066052565, 1.3460251369401195, 0.522691646764904], "rank_score": 0.9482109499191962} -{"id": "das-etal-2022-container", "title": "CONTaiNER: Few-Shot Named Entity Recognition via Contrastive Learning", "abstract": "Named Entity Recognition (NER) in Few-Shot setting is imperative for entity tagging in low resource domains. Existing approaches only learn class-specific semantic features and intermediate representations from source domains. This affects generalizability to unseen target domains, resulting in suboptimal performances. To this end, we present CONTaiNER, a novel contrastive learning technique that optimizes the inter-token distribution distance for Few-Shot NER. Instead of optimizing class-specific attributes, CONTaiNER optimizes a generalized objective of differentiating between token categories based on their Gaussian-distributed embeddings. This effectively alleviates overfitting issues originating from training domains. Our experiments in several traditional test domains (OntoNotes, CoNLL'03, WNUT `17, GUM) and a new large scale Few-Shot NER dataset (Few-NERD) demonstrate that on average, CONTaiNER outperforms previous methods by 3%-13% absolute F1 points while showing consistent performance trends, even in challenging scenarios where previous approaches could not achieve appreciable performance.", "phrases": ["entity recognition", "contrastive learning", "few-shot ner"], "overall_score": 1.9714646100864426, "scores": [1.3267894600218448, 0.9033039979630869, 0.6141287582748732], "rank_score": 0.9480740720866017} -{"id": "cook-stevenson-2009-unsupervised", "title": "An Unsupervised Model for Text Message Normalization", "abstract": "Cell phone text messaging users express themselves briefly and colloquially using a variety of creative forms. We analyze a sample of creative, non-standard text message word forms to determine frequent word formation processes in texting language. Drawing on these observations, we construct an unsupervised noisy-channel model for text message normalization. On a test set of 303 text message forms that differ from their standard form, our model achieves 59% accuracy, which is on par with the best supervised results reported on this dataset.", "phrases": ["text message normalization", "word formation process", "noisy channel model", "probabilistic model", "variation"], "overall_score": 3.0507476163085023, "scores": [1.0015444161851355, 1.6500179127238779, 0.9424957102433198, 0.57818522021053, 0.5665969169794756], "rank_score": 0.9477680352684675} -{"id": "yu-etal-2020-ch", "title": "CH-SIMS: A Chinese Multimodal Sentiment Analysis Dataset with Fine-grained Annotation of Modality", "abstract": "Previous studies in multimodal sentiment analysis have used limited datasets, which only contain unified multimodal annotations. However, the unified annotations do not always reflect the independent sentiment of single modalities and limit the model to capture the difference between modalities. In this paper, we introduce a Chinese single- and multi-modal sentiment analysis dataset, CH-SIMS, which contains 2,281 refined video segments in the wild with both multimodal and independent unimodal annotations. It allows researchers to study the interaction between modalities or use independent unimodal annotations for unimodal sentiment analysis. Furthermore, we propose a multi-task learning framework based on late fusion as the baseline. Extensive experiments on the CH-SIMS show that our methods achieve state-of-the-art performance and learn more distinctive unimodal representations. The full dataset and codes are available for use at .", "phrases": ["multimodal sentiment analysis", "unimodal annotation", "ch-sims"], "overall_score": 1.313846254210561, "scores": [1.6606636644766362, 0.6383481500737943, 0.5442073986097443], "rank_score": 0.9477397377200583} -{"id": "xiong-etal-2018-one", "title": "One-Shot Relational Learning for Knowledge Graphs", "abstract": "Knowledge graphs (KG) are the key components of various natural language processing applications. To further expand KGs' coverage, previous studies on knowledge graph completion usually require a large number of positive examples for each relation. However, we observe long-tail relations are actually more common in KGs and those newly added relations often do not have many known triples for training. In this work, we aim at predicting new facts under a challenging setting where only one training instance is available. We propose a one-shot relational learning framework, which utilizes the knowledge distilled by embedding models and learns a matching metric by considering both the learned embeddings and one-hop graph structures. Empirically, our model yields considerable performance improvements over existing embedding models, and also eliminates the need of re-training the embedding models when dealing with newly added relations.", "phrases": ["relational learning", "knowledge graph", "triple", "graph structure", "one-shot learning"], "overall_score": 2.5662826479236314, "scores": [0.8593207873943989, 1.4468565581444486, 1.2551428053090763, 0.6077062913106043, 0.5692214804226744], "rank_score": 0.9476495845162406} -{"id": "geva-etal-2020-injecting", "title": "Injecting Numerical Reasoning Skills into Language Models", "abstract": "Large pre-trained language models (LMs) are known to encode substantial amounts of linguistic information. However, high-level reasoning skills, such as numerical reasoning, are difficult to learn from a language-modeling objective only. Consequently, existing models for numerical reasoning have used specialized architectures with limited flexibility. In this work, we show that numerical reasoning is amenable to automatic data generation, and thus one can inject this skill into pre-trained LMs, by generating large amounts of data, and training in a multi-task setup. We show that pre-training our model, GenBERT, on this data, dramatically improves performance on DROP (49.3 \u2013 72.3 F1), reaching performance that matches state-of-the-art models of comparable size, while using a simple and general-purpose encoder-decoder architecture. Moreover, GenBERT generalizes well to math word problem datasets, while maintaining high performance on standard RC tasks. Our approach provides a general recipe for injecting skills into large pre-trained LMs, whenever the skill is amenable to automatic data augmentation.", "phrases": ["numerical reasoning", "skill", "language model", "large amount", "drop"], "overall_score": 2.6270925206811873, "scores": [1.3325925421825278, 1.2042033012871396, 1.0713981435366209, 0.5710013629541805, 0.558421339468576], "rank_score": 0.9475233378858089} -{"id": "darwish-mubarak-2016-farasa", "title": "Farasa: A New Fast and Accurate Arabic Word Segmenter", "abstract": "In this paper, we present Farasa (meaning insight in Arabic), which is a fast and accurate Arabic segmenter. Segmentation involves breaking Arabic words into their constituent clitics. Our approach is based on SVMrank using linear kernels. The features that we utilized account for: likelihood of stems, prefixes, suffixes, and their combination; presence in lexicons containing valid stems and named entities; and underlying stem templates. Farasa outperforms or equalizes state-of-the-art Arabic segmenters, namely QATARA and MADAMIRA. Meanwhile, Farasa is nearly one order of magnitude faster than QATARA and two orders of magnitude faster than MADAMIRA. The segmenter should be able to process one billion words in less than 5 hours. Farasa is written entirely in native Java, with no external dependencies, and is open-source.", "phrases": ["segmentation", "arabic segmenter", "farasa"], "overall_score": 1.3135394634412692, "scores": [1.654038155729092, 0.6409240446926364, 0.547593104456275], "rank_score": 0.9475184349593345} -{"id": "zhang-etal-2007-improved", "title": "Improved chunk-level reordering for statistical machine translation", "abstract": "Inspired by previous chunk-level reordering approaches to statistical machine translation, this paper presents two methods to improve the reordering at the chunk level. By introducing a new lattice weighting factor and by reordering the training source data, an improvement is reported on TER and BLEU. Compared to the previous chunklevel reordering approach, the BLEU score improves 1.4% absolutely. The translation results are reported on IWSLT Chinese-English task.", "phrases": ["statistical machine translation", "chunk", "option", "psmt system", "discrepancy"], "overall_score": 2.834996402281584, "scores": [0.934579501683119, 1.2245592740306432, 1.2085328435449014, 0.8401539295905285, 0.5238996912676136], "rank_score": 0.9463450480233611} -{"id": "seo-etal-2015-solving", "title": "Solving Geometry Problems: Combining Text and Diagram Interpretation", "abstract": "This paper introduces GEOS, the first automated system to solve unaltered SAT geometry questions by combining text understanding and diagram interpretation. We model the problem of understanding geometry questions as submodular optimization, and identify a formal problem description likely to be compatible with both the question text and diagram. GEOS then feeds the description to a geometric solver that attempts to determine the correct answer. In our experiments, GEOS achieves a 49% score on official SAT questions, and a score of 61% on practice questions. 1 Finally, we show that by integrating textual and visual information, GEOS boosts the accuracy of dependency and semantic parsing of the question text.", "phrases": ["geometry problem", "diagram interpretation", "geos", "reasoning"], "overall_score": 2.2688916348596173, "scores": [1.4467699715432962, 0.9103784949664039, 0.853635772528739, 0.5740209784334569], "rank_score": 0.946201304367974} -{"id": "boston-etal-2008-surprising", "title": "Surprising Parser Actions and Reading Difficulty", "abstract": "An incremental dependency parser's probability model is entered as a predictor in a linear mixed-effects model of German readers' eye-fixation durations. This dependency-based predictor improves a baseline that takes into account word length, n-gram probability, and Cloze predictability that are typically applied in models of human reading. This improvement obtains even when the dependency parser explores a tiny fraction of its search space, as suggested by narrow-beam accounts of human sentence processing such as Garden Path theory.", "phrases": ["dependency parser", "predictor", "surprisal"], "overall_score": 2.3499892402072304, "scores": [1.1134547834066733, 0.8903327616294633, 0.8333281756784261], "rank_score": 0.9457052402381875} -{"id": "uryupina-poesio-2012-domain", "title": "Domain-specific vs. Uniform Modeling for Coreference Resolution", "abstract": "Several corpora annotated for coreference have been made available in the past decade. These resources differ with respect to their size and the underlying structure: the number of domains and their similarity. Our study compares domain-specific models, learned from small heterogeneous subsets of the investigated corpora, against uniform models, that utilize all the available data. We show that for knowledge-poor baseline systems, domain-specific and uniform modeling yield same results. Systems, relying on large amounts of linguistic knowledge, however, exhibit differences in their performance: with all the designed features in use, domain-specific models suffer from over-fitting, whereas with pre-selected feature sets they tend to outperform union models.", "phrases": ["uniform modeling", "coreference resolution", "domain adaptation"], "overall_score": 1.6941072005752904, "scores": [1.5044564911580705, 0.802879493947143, 0.5291617083689514], "rank_score": 0.9454992311580549} -{"id": "ma-xia-2014-unsupervised", "title": "Unsupervised Dependency Parsing with Transferring Distribution via Parallel Guidance and Entropy Regularization", "abstract": "We present a novel approach for inducing unsupervised dependency parsers for languages that have no labeled training data, but have translated text in a resourcerich language. We train probabilistic parsing models for resource-poor languages by transferring cross-lingual knowledge from resource-rich language with entropy regularization. Our method can be used as a purely monolingual dependency parser, requiring no human translations for the test data, thus making it applicable to a wide range of resource-poor languages. We perform experiments on three Data sets \u2014 Version 1.0 and version 2.0 of Google Universal Dependency Treebanks and Treebanks from CoNLL shared-tasks, across ten languages. We obtain stateof-the art performance of all the three data sets when compared with previously studied unsupervised and projected parsing systems.", "phrases": ["entropy regularization", "dependency parser", "resource-rich language", "parallel data", "annotation projection"], "overall_score": 2.732726226320295, "scores": [0.8546095997503929, 1.4669477622310985, 0.9916659169488051, 0.8770918939056326, 0.5369767730094459], "rank_score": 0.9454583891690749} -{"id": "barikeri-etal-2021-redditbias", "title": "RedditBias: A Real-World Resource for Bias Evaluation and Debiasing of Conversational Language Models", "abstract": "Text representation models are prone to exhibit a range of societal biases, reflecting the non-controlled and biased nature of the underlying pretraining data, which consequently leads to severe ethical issues and even bias amplification. Recent work has predominantly focused on measuring and mitigating bias in pretrained language models. Surprisingly, the landscape of bias measurements and mitigation resources and methods for conversational language models is still very scarce: it is limited to only a few types of bias, artificially constructed resources, and completely ignores the impact that debiasing methods may have on the final perfor mance in dialog tasks, e.g., conversational response generation. In this work, we present REDDITBIAS, the first conversational data set grounded in the actual human conversations from Reddit, allowing for bias measurement and mitigation across four important bias dimensions: gender,race,religion, and queerness. Further, we develop an evaluation framework which simultaneously 1)measures bias on the developed REDDITBIAS resource, and 2)evaluates model capability in dialog tasks after model debiasing. We use the evaluation framework to benchmark the widely used conversational DialoGPT model along with the adaptations of four debiasing methods. Our results indicate that DialoGPT is biased with respect to religious groups and that some debiasing techniques can remove this bias while preserving downstream task performance.", "phrases": ["language model", "conversational data", "redditbias"], "overall_score": 1.5214679353899594, "scores": [1.665259480902191, 0.6077578108555728, 0.5630062484524407], "rank_score": 0.9453411800700682} -{"id": "yuan-etal-2020-emotion", "title": "Emotion-Cause Pair Extraction as Sequence Labeling Based on A Novel Tagging Scheme", "abstract": "The task of emotion-cause pair extraction deals with finding all emotions and the corresponding causes in unannotated emotion texts. Most recent studies are based on the likelihood of Cartesian product among all clause candidates, resulting in a high computational cost. Targeting this issue, we regard the task as a sequence labeling problem and propose a novel tagging scheme with coding the distance between linked components into the tags, so that emotions and the corresponding causes can be extracted simultaneously. Accordingly, an end-to-end model is presented to process the input texts from left to right, always with linear time complexity, leading to a speed up. Experimental results show that our proposed model achieves the best performance, outperforming the state-of-the-art method by 2.26% (p0.001) in F1 measure.", "phrases": ["novel tagging scheme", "emotion", "cause"], "overall_score": 1.5214516722830944, "scores": [1.7378061169095969, 0.5726882045752862, 0.5254989042160023], "rank_score": 0.9453310752336285} -{"id": "shen-etal-2021-directed", "title": "Directed Acyclic Graph Network for Conversational Emotion Recognition", "abstract": "The modeling of conversational context plays a vital role in emotion recognition from conversation (ERC). In this paper, we put forward a novel idea of encoding the utterances with a directed acyclic graph (DAG) to better model the intrinsic structure within a conversation, and design a directed acyclic neural network, namely DAG-ERC, to implement this idea. In an attempt to combine the strengths of conventional graph-based neural models and recurrence-based neural models, DAG-ERC provides a more intuitive way to model the information flow between long-distance conversation background and nearby context. Extensive experiments are conducted on four ERC benchmarks with state-of-the-art models employed as baselines for comparison. The empirical results demonstrate the superiority of this new model and confirm the motivation of the directed acyclic graph architecture for ERC.", "phrases": ["acyclic graph", "conversation", "graph-based neural model"], "overall_score": 1.8389956089896662, "scores": [1.1487969433663159, 1.1252669301779357, 0.5611065117108677], "rank_score": 0.9450567950850397} -{"id": "chen-etal-2019-multi-source", "title": "Multi-Source Cross-Lingual Model Transfer: Learning What to Share", "abstract": "Modern NLP applications have enjoyed a great boost utilizing neural networks models. Such deep neural models, however, are not applicable to most human languages due to the lack of annotated training data for various NLP tasks. Cross-lingual transfer learning (CLTL) is a viable method for building NLP models for a low-resource target language by leveraging labeled data from other (source) languages. In this work, we focus on the multilingual transfer setting where training data in multiple source languages is leveraged to further boost target language performance. Unlike most existing methods that rely only on language-invariant features for CLTL, our approach coherently utilizes both language-invariant and language-specific features at instance level. Our model leverages adversarial networks to learn language-invariant features, and mixture-of-experts models to dynamically exploit the similarity between the target language and each individual source language. This enables our model to learn effectively what to share between various languages in the multilingual setup. Moreover, when coupled with unsupervised multilingual embeddings, our model can operate in a zero-resource setting where neither target language training data nor cross-lingual resources are available. Our model achieves significant performance gains over prior art, as shown in an extensive set of experiments over multiple text classification and sequence tagging tasks including a large-scale industry dataset.", "phrases": ["various nlp task", "cross-lingual transfer learning", "source language", "language-invariant feature", "mixture-of-expert model"], "overall_score": 2.962756880367732, "scores": [1.6418310149601143, 1.0405037687679397, 0.8314221141185969, 0.6596387776198911, 0.5511496056507605], "rank_score": 0.9449090562234606} -{"id": "szarvas-etal-2012-cross", "title": "Cross-Genre and Cross-Domain Detection of Semantic Uncertainty", "abstract": "Uncertainty is an important linguistic phenomenon that is relevant in various Natural Language Processing applications, in diverse genres from medical to community generated, newswire or scientific discourse, and domains from science to humanities. The semantic uncertainty of a proposition can be identified in most cases by using a finite dictionary (i.e., lexical cues) and the key steps of uncertainty detection in an application include the steps of locating the (genre- and domain-specific) lexical cues, disambiguating them, and linking them with the units of interest for the particular application (e.g., identified events in information extraction). In this study, we focus on the genre and domain differences of the context-dependent semantic uncertainty cue recognition task.We introduce a unified subcategorization of semantic uncertainty as different domain applications can apply different uncertainty categories. Based on this categorization, we normalized the annotation of three corpora and present results with a state-of-the-art uncertainty cue recognition model for four fine-grained categories of semantic uncertainty.Our results reveal the domain and genre dependence of the problem; nevertheless, we also show that even a distant source domain data set can contribute to the recognition and disambiguation of uncertainty cues, efficiently reducing the annotation costs needed to cover a new domain. Thus, the unified subcategorization and domain adaptation for training the models offer an efficient solution for cross-domain and cross-genre semantic uncertainty recognition.", "phrases": ["detection", "semantic uncertainty", "unified subcategorization", "wikipedia article"], "overall_score": 2.4933095621188457, "scores": [1.9036651660940196, 0.7907160268465786, 0.5542245003210297, 0.5304854756025806], "rank_score": 0.944772792216052} -{"id": "alkhouli-ney-2017-biasing", "title": "Biasing Attention-Based Recurrent Neural Networks Using External Alignment Information", "abstract": "This work explores extending attention-based neural models to include alignment information as input. We modify the attention component to have dependence on the current source position. The attention model is then used as a lexical model together with an additional alignment model to generate translation. The attention model is trained using external alignment information, and it is applied in decoding by performing beam search over the lexical and alignment hypotheses. The alignment model is used to score these alignment candidates. We demonstrate that the attention layer is capable of using the alignment information to improve over the baseline attention model that uses no such alignments. Our experiments are performed on two tasks: WMT 2016 English \u2192 Romanian and WMT 2017 German \u2192 English.", "phrases": ["external alignment information", "attention component", "neural machine translation"], "overall_score": 2.0757181210885074, "scores": [1.672437922994905, 0.5852423650014278, 0.57641979785604], "rank_score": 0.9447000286174575} -{"id": "marino-etal-2006-n", "title": "N-gram-based Machine Translation", "abstract": "This article describes in detail an n-gram approach to statistical machine translation. This approach consists of a log-linear combination of a translation model based on n-grams of bilingual units, which are referred to as tuples, along with four specific feature functions. Translation performance, which happens to be in the state of the art, is demonstrated with Spanish-to-English and English-to-Spanish translations of the European Parliament Plenary Sessions (EPPS).", "phrases": ["machine translation", "bilingual unit", "tuple", "n-gram-based approach", "smt framework"], "overall_score": 3.112506809136659, "scores": [0.9526106813854306, 1.463607598395841, 1.0700420268212671, 0.6616090387105108, 0.5740069727191471], "rank_score": 0.9443752636064392} -{"id": "danilevsky-etal-2020-survey", "title": "A Survey of the State of Explainable AI for Natural Language Processing", "abstract": "Recent years have seen important advances in the quality of state-of-the-art models, but this has come at the expense of models becoming less interpretable. This survey presents an overview of the current state of Explainable AI (XAI), considered within the domain of Natural Language Processing (NLP). We discuss the main categorization of explanations, as well as the various ways explanations can be arrived at and visualized. We detail the operations and explainability techniques currently available for generating explanations for NLP model predictions, to serve as a resource for model developers in the community. Finally, we point out the current gaps and encourage directions for future work in this important research area.", "phrases": ["survey", "explanation method", "model output", "xai method"], "overall_score": 2.4214391915906535, "scores": [2.152393052096067, 0.5684020428485724, 0.5343883600087871, 0.5210145967493776], "rank_score": 0.944049512925701} -{"id": "habash-2007-syntactic", "title": "Syntactic preprocessing for statistical machine translation", "abstract": "We describe an approach to automatic source-language syntactic preprocessing in the context of Arabic-English phrase-based machine translation. Source-language labeled dependencies, that are word aligned with target language words in a parallel corpus, are used to automatically extract syntactic reordering rules in the same spirit of Xia and McCord (2004) and Zhang et al. (2007). The extracted rules are used to reorder the source-language side of the training and test data. Our results show that when using monotonic decoding and translations for unigram source-language phrases only, source-language reordering gives very signi\ufb01cant gains over no reordering (25% relative increase in BLEU score). With decoder distortion turned on and with access to all phrase translations, the differences in BLEU scores are diminished. However, an analysis of sentence-level BLEU scores shows reordering outperforms no-reordering in over 40% of the sentences. These results suggest that the approach holds big promise but much more work on Arabic parsing may be needed.", "phrases": ["parallel corpus", "side", "syntactic preprocessing", "other language", "parse quality"], "overall_score": 2.917608452965004, "scores": [2.10848702621829, 0.8585052398838847, 0.5870997127669482, 0.5856164704382278, 0.579748654549502], "rank_score": 0.9438914207713707} -{"id": "grefenstette-2013-towards", "title": "Towards a Formal Distributional Semantics: Simulating Logical Calculi with Tensors", "abstract": "The development of compositional distributional models of semantics reconciling the empirical aspects of distributional semantics with the compositional aspects of formal semantics is a popular topic in the contemporary literature. This paper seeks to bring this reconciliation one step further by showing how the mathematical constructs commonly used in compositional distributional models, such as tensors and matrices, can be used to simulate di erent aspects of predicate logic. This paper discusses how the canonical isomorphism between tensors and multilinear maps can be exploited to simulate a full-blown quantifier-free predicate calculus using tensors. It provides tensor interpretations of the set of logical connectives required to model propositional calculi. It suggests a variant of these tensor calculi capable of modelling quantifiers, using few non-linear operations. It finally discusses the relation between these variants, and how this relation should constitute the subject of future work.", "phrases": ["logical calculi", "distributional model", "tensor calculus"], "overall_score": 2.7278340758926176, "scores": [0.9004123757077979, 1.053170718574806, 0.8777143695545173], "rank_score": 0.9437658212790404} -{"id": "xie-etal-2013-semantic", "title": "Semantic Frames to Predict Stock Price Movement", "abstract": "Semantic frames are a rich linguistic resource. There has been much work on semantic frame parsers, but less that applies them to general NLP problems. We address a task to predict change in stock price from financial news. Semantic frames help to generalize from specific sentences to scenarios, and to detect the (positive or negative) roles of specific companies. We introduce a novel tree representation, and use it to train predictive models with tree kernels using support vector machines. Our experiments test multiple text representations on two binary classification tasks, change of price and polarity. Experiments show that features derived from semantic frame parsing have significantly better performance across years on the polarity task.", "phrases": ["stock price movement", "semantic frame parser", "news", "specific company", "predictive model"], "overall_score": 2.5556803436213604, "scores": [0.9386081896293647, 1.747842993347235, 0.9248668017339418, 0.5818186082319493, 0.5255357983247889], "rank_score": 0.9437344782534559} -{"id": "denis-baldridge-2008-specialized", "title": "Specialized Models and Ranking for Coreference Resolution", "abstract": "This paper investigates two strategies for improving coreference resolution: (1) training separate models that specialize in particular types of mentions (e.g., pronouns versus proper nouns) and (2) using a ranking loss function rather than a classification function. In addition to being conceptually simple, these modifications of the standard single-model, classification-based approach also deliver significant performance improvements. Specifically, we show that on the ACE corpus both strategies produce f-score gains of more than 3% across the three coreference evaluation metrics (MUC, B3, and CEAF).", "phrases": ["coreference resolution", "mention", "classification function", "ranker", "candidate antecedent"], "overall_score": 2.778508655773253, "scores": [0.9458774032449002, 1.7590445901300895, 0.9210135556700902, 0.5474763488555685, 0.5448191054123134], "rank_score": 0.9436462006625923} -{"id": "joty-etal-2015-codra", "title": "CODRA: A Novel Discriminative Framework for Rhetorical Analysis", "abstract": "Clauses and sentences rarely stand on their own in an actual discourse; rather, the relationship between them carries important information that allows the discourse to express a meaning as a whole beyond the sum of its individual parts. Rhetorical analysis seeks to uncover this coherence structure. In this article, we present CODRA\u2014 a COmplete probabilistic Discriminative framework for performing Rhetorical Analysis in accordance with Rhetorical Structure Theory, which posits a tree representation of a discourse.CODRA comprises a discourse segmenter and a discourse parser. First, the discourse segmenter, which is based on a binary classifier, identifies the elementary discourse units in a given text. Then the discourse parser builds a discourse tree by applying an optimal parsing algorithm to probabilities inferred from two Conditional Random Fields: one for intra-sentential parsing and the other for multi-sentential parsing. We present two approaches to combine these two stages of parsing effectively. By conducting a series of empirical evaluations over two different data sets, we demonstrate that CODRA significantly outperforms the state-of-the-art, often by a wide margin. We also show that a reranking of the k-best parse hypotheses generated by CODRA can potentially improve the accuracy even further.", "phrases": ["rhetorical analysis", "conditional random fields", "multi-sentential parsing", "codra", "unlabelled structure"], "overall_score": 2.8725260181961345, "scores": [1.7855574510756298, 0.9470811510092966, 0.8523735993610289, 0.5673380217089422, 0.5651811417052329], "rank_score": 0.9435062729720262} -{"id": "nepveu-etal-2004-adaptive", "title": "Adaptive Language and Translation Models for Interactive Machine Translation", "abstract": "We describe experiments carried out with adaptive language and translation models in the context of an interactive computer-assisted translation program. We developed cache-based language models which were then extended to the bilingual case for a cachebased translation model. We present the improvements we obtained in two contexts: in a theoretical setting, we achieved a drop in perplexity for the new models and, in a more practical situation simulating a user working with the system, we showed that fewer keystrokes would be needed to enter a translation.", "phrases": ["interactive machine translation", "adaptation", "model extension", "imt system", "cache-based translation model"], "overall_score": 2.615023767454634, "scores": [1.577288401220217, 0.9036590183966503, 0.8355871873484476, 0.8333957535102848, 0.5659219159163679], "rank_score": 0.9431704552783936} -{"id": "denero-etal-2009-fast", "title": "Fast Consensus Decoding over Translation Forests", "abstract": "The minimum Bayes risk (MBR) decoding objective improves BLEU scores for machine translation output relative to the standard Viterbi objective of maximizing model score. However, MBR targeting BLEU is prohibitively slow to optimize over k-best lists for large k. In this paper, we introduce and analyze an alternative to MBR that is equally effective at improving performance, yet is asymptotically faster --- running 80 times faster than MBR in experiments with 1000-best lists. Furthermore, our fast decoding procedure can select output sentences based on distributions over entire forests of translations, in addition to k-best lists. We evaluate our procedure on translation forests from two large-scale, state-of-the-art hierarchical machine translation systems. Our forest-based decoding objective consistently outperforms k-best list MBR, giving improvements of up to 1.0 BLEU.", "phrases": ["consensus", "forest", "minimum bayes risk"], "overall_score": 1.6899278877043031, "scores": [1.7748084553910812, 0.5336906771251598, 0.5210010041829826], "rank_score": 0.9431667122330745} -{"id": "imamura-etal-2018-enhancement", "title": "Enhancement of Encoder and Attention Using Target Monolingual Corpora in Neural Machine Translation", "abstract": "A large-scale parallel corpus is required to train encoder-decoder neural machine translation. The method of using synthetic parallel texts, in which target monolingual corpora are automatically translated into source sentences, is effective in improving the decoder, but is unreliable for enhancing the encoder. In this paper, we propose a method that enhances the encoder and attention using target monolingual corpora by generating multiple source sentences via sampling. By using multiple source sentences, diversity close to that of humans is achieved. Our experimental results show that the translation quality is improved by increasing the number of synthetic source sentences for each given target sentence, and quality close to that using a manually created parallel corpus was achieved.", "phrases": ["target monolingual corpora", "neural machine translation", "back-translation"], "overall_score": 2.5536765156474894, "scores": [0.9007580565680636, 0.8713465522949794, 1.0568789689983769], "rank_score": 0.9429945259538067} -{"id": "qin-etal-2021-neural", "title": "Neural-Symbolic Solver for Math Word Problems with Auxiliary Tasks", "abstract": "Previous math word problem solvers following the encoder-decoder paradigm fail to explicitly incorporate essential math symbolic constraints, leading to unexplainable and unreasonable predictions. Herein, we propose Neural-Symbolic Solver (NS-Solver) to explicitly and seamlessly incorporate different levels of symbolic constraints by auxiliary tasks. Our NS-Solver consists of a problem reader to encode problems, a programmer to generate symbolic equations, and a symbolic executor to obtain answers. Along with target expression supervision, our solver is also optimized via 4 new auxiliary objectives to enforce different symbolic reasoning: a) self-supervised number prediction task predicting both number quantity and number locations; b) commonsense constant prediction task predicting what prior knowledge (e.g. how many legs a chicken has) is required; c) program consistency checker computing the semantic loss between predicted equation and target equation to ensure reasonable equation mapping; d) duality exploiting task exploiting the quasi-duality between symbolic equation generation and problem's part-of-speech generation to enhance the understanding ability of a solver. Besides, to provide a more realistic and challenging benchmark for developing a universal and scalable solver, we also construct a new largescale MWP benchmark CM17K consisting of 4 kinds of MWPs (arithmetic, one-unknown linear, one-unknown non-linear, equation set) with more than 17K samples. Extensive experiments on Math23K and our CM17k demonstrate the superiority of our NS-Solver compared to state-of-the-art methods.", "phrases": ["math word problem", "auxiliary task", "neural-symbolic solver"], "overall_score": 1.307215499492579, "scores": [1.4117814508748492, 0.8878851318683019, 0.529203394993549], "rank_score": 0.9429566592455666} -{"id": "chen-ji-2020-learning", "title": "Learning Variational Word Masks to Improve the Interpretability of Neural Text Classifiers", "abstract": "To build an interpretable neural text classifier, most of the prior work has focused on designing inherently interpretable models or finding faithful explanations. A new line of work on improving model interpretability has just started, and many existing methods require either prior information or human annotations as additional inputs in training. To address this limitation, we propose the variational word mask (VMASK) method to automatically learn task-specific important words and reduce irrelevant information on classification, which ultimately improves the interpretability of model predictions. The proposed method is evaluated with three neural text classifiers (CNN, LSTM, and BERT) on seven benchmark text classification datasets. Experiments show the effectiveness of VMASK in improving both model prediction accuracy and interpretability.", "phrases": ["variational word mask", "interpretability", "neural text classifier", "information bottleneck"], "overall_score": 1.8348921499928446, "scores": [1.692486667936945, 0.9549284597278399, 0.6014565614894894, 0.5229204480800889], "rank_score": 0.9429480343085908} -{"id": "ciaramita-altun-2006-broad", "title": "Broad-Coverage Sense Disambiguation and Information Extraction with a Supersense Sequence Tagger", "abstract": "In this paper we approach word sense disambiguation and information extraction as a unified tagging problem. The task consists of annotating text with the tagset defined by the 41 Wordnet supersense classes for nouns and verbs. Since the tagset is directly related to Wordnet synsets, the tagger returns partial word sense disambiguation. Furthermore, since the noun tags include the standard named entity detection classes -- person, location, organization, time, etc. -- the tagger, as a by-product, returns extended named entity information. We cast the problem of supersense tagging as a sequential labeling task and investigate it empirically with a discriminatively-trained Hidden Markov Model. Experimental evaluation on the main sense-annotated datasets available, i.e., Semcor and Senseval, shows considerable improvements over the best known \"first-sense\" baseline.", "phrases": ["information extraction", "tagset", "supersense tagging", "sense-grouping"], "overall_score": 2.776174498018194, "scores": [1.7719558408365765, 0.8839431204694724, 0.5619994069842111, 0.5535154971845385], "rank_score": 0.9428534663686997} -{"id": "huang-wang-2017-deep", "title": "Deep Residual Learning for Weakly-Supervised Relation Extraction", "abstract": "Deep residual learning (ResNet) is a new method for training very deep neural networks using identity mapping for shortcut connections. ResNet has won the ImageNet ILSVRC 2015 classification task, and achieved state-of-the-art performances in many computer vision tasks. However, the effect of residual learning on noisy natural language processing tasks is still not well understood. In this paper, we design a novel convolutional neural network (CNN) with residual learning, and investigate its impacts on the task of distantly supervised noisy relation extraction. In contradictory to popular beliefs that ResNet only works well for very deep networks, we found that even with 9 layers of CNNs, using identity mapping could significantly improve the performance for distantly-supervised relation extraction.", "phrases": ["relation extraction", "identity mapping", "cnn", "deep residual learning"], "overall_score": 1.6891791248109935, "scores": [1.8833252864228671, 0.8253512287519081, 0.5345141635488264, 0.5278046000986023], "rank_score": 0.9427488197055511} -{"id": "keller-2004-entropy", "title": "The Entropy Rate Principle as a Predictor of Processing Effort: An Evaluation against Eye-tracking Data", "abstract": "This paper provides evidence for Genzel and Charniak\u2019s (2002) entropy rate principle, which predicts that the entropy of a sentence increases with its position in the text. We show that this principle holds for individual sentences (not just for averages), but we also find that the entropy rate effect is partly an artifact of sentence length, which also correlates with sentence position. Secondly, we evaluate a set of predictions that the entropy rate principle makes for human language processing; using a corpus of eye-tracking data, we show that entropy and processing effort are correlated, and that processing effort is constant throughout a text.", "phrases": ["entropy rate principle", "processing effort", "eye-tracking data", "sentence position", "signal"], "overall_score": 1.8342609768779399, "scores": [1.8396595298588976, 0.9191678863342377, 0.906783426414077, 0.5243484938473993, 0.5231590410008532], "rank_score": 0.9426236754910929} -{"id": "headden-iii-etal-2009-improving", "title": "Improving Unsupervised Dependency Parsing with Richer Contexts and Smoothing", "abstract": "Unsupervised grammar induction models tend to employ relatively simple models of syntax when compared to their supervised counterparts. Traditionally, the unsupervised models have been kept simple due to tractability and data sparsity concerns. In this paper, we introduce basic valence frames and lexical information into an unsupervised dependency grammar inducer and show how this additional information can be leveraged via smoothing. Our model produces state-of-the-art results on the task of unsupervised grammar induction, improving over the best previous work by almost 10 percentage points.", "phrases": ["smoothing", "grammar induction", "dependency model"], "overall_score": 2.8236999757220347, "scores": [1.3196938859002718, 0.9004380048585701, 0.6075907338388505], "rank_score": 0.9425742081992309} -{"id": "lyu-titov-2018-amr", "title": "AMR Parsing as Graph Prediction with Latent Alignment", "abstract": "Abstract meaning representations (AMRs) are broad-coverage sentence-level semantic representations. AMRs represent sentences as rooted labeled directed acyclic graphs. AMR parsing is challenging partly due to the lack of annotated alignments between nodes in the graphs and words in the corresponding sentences. We introduce a neural parser which treats alignments as latent variables within a joint probabilistic model of concepts, relations and alignments. As exact inference requires marginalizing over alignments and is infeasible, we use the variational autoencoding framework and a continuous relaxation of the discrete alignments. We show that joint modeling is preferable to using a pipeline of align and parse. The parser achieves the best reported results on the standard benchmark (74.4% on LDC2016E25).", "phrases": ["node", "latent variable", "probabilistic model", "amr parsing", "pipeline design"], "overall_score": 3.0706735453754885, "scores": [0.9552231189373318, 1.4040147682248965, 1.2972629713180415, 0.5304534153243966, 0.5254192079832484], "rank_score": 0.942474696357583} -{"id": "tafjord-etal-2021-proofwriter", "title": "ProofWriter: Generating Implications, Proofs, and Abductive Statements over Natural Language", "abstract": "Transformers have been shown to emulate logical deduction over natural language theories (logical rules expressed in natural language), reliably assigning true/false labels to candidate implications. However, their ability to generate implications of a theory has not yet been demonstrated, and methods for reconstructing proofs of answers are imperfect. In this work we show that a generative model, called ProofWriter, can reliably generate both implications of a theory and the natural language proof(s) that support them. In particular, iterating a 1-step implication generator results in proofs that are highly reliable, and represent actual model decisions (rather than post-hoc rationalizations). On the RuleTaker dataset, the accuracy of ProofWriter's proofs exceed previous methods by +9% absolute, and in a way that generalizes to proof depths unseen in training and on out-of-domain problems. We also show that generative techniques can perform a type of abduction with high precision: Given a theory and an unprovable conclusion, identify a missing fact that allows the conclusion to be proved, along with a proof. These results significantly improve the viability of neural methods for systematically reasoning over natural language.", "phrases": ["abduction", "depth", "generative technique", "proofwriter", "entailment tree"], "overall_score": 2.1697877607164946, "scores": [2.445238078136989, 0.5974170903826689, 0.5799649787976616, 0.5507210060040058, 0.5382931035806098], "rank_score": 0.9423268513803869} -{"id": "clarke-grieve-2017-dimensions", "title": "Dimensions of Abusive Language on Twitter", "abstract": "In this paper, we use a new categorical form of multidimensional register analysis to identify the main dimensions of functional linguistic variation in a corpus of abusive language, consisting of racist and sexist Tweets. By analysing the use of a wide variety of parts-of-speech and grammatical constructions, as well as various features related to Twitter and computer-mediated communication, we discover three dimensions of linguistic variation in this corpus, which we interpret as being related to the degree of interactive, antagonistic and attitudinal language exhibited by individual Tweets. We then demonstrate that there is a significant functional difference between racist and sexist Tweets, with sexists Tweets tending to be more interactive and attitudinal than racist Tweets.", "phrases": ["abusive language", "twitter", "sexist tweet", "multidimensional linguistic variation"], "overall_score": 1.95936517901572, "scores": [1.824418764302899, 0.8742611662317238, 0.5429293255354857, 0.5274126466718853], "rank_score": 0.9422554756854985} -{"id": "hendrickx-etal-2013-semeval", "title": "SemEval-2013 Task 4: Free Paraphrases of Noun Compounds", "abstract": "In this paper, we describe SemEval-2013 Task 4: the definition, the data, the evaluation and the results. The task is to capture some of the meaning of English noun compounds via paraphrasing. Given a two-word noun compound, the participating system is asked to produce an explicitly ranked list of its free-form paraphrases. The list is automatically compared and evaluated against a similarly ranked list of paraphrases proposed by human annotators, recruited and managed through Amazon\u2019s Mechanical Turk. The comparison of raw paraphrases is sensitive to syntactic and morphological variation. The \u201cgold\u201d ranking is based on the relative popularity of paraphrases among annotators. To make the ranking more reliable, highly similar paraphrases are grouped, so as to downplay superficial differences in syntax and morphology. Three systems participated in the task. They all beat a simple baseline on one of the two evaluation measures, but not on both measures. This shows that the task is difficult.", "phrases": ["paraphrase", "noun compound", "annotator", "semeval-2013 task"], "overall_score": 2.485673525573774, "scores": [0.9521541322822434, 1.3781271437937566, 0.880085552567537, 0.5571504551680339], "rank_score": 0.9418793209528927} -{"id": "he-etal-2019-pun", "title": "Pun Generation with Surprise", "abstract": "We tackle the problem of generating a pun sentence given a pair of homophones (e.g., \u201cdied\u201d and \u201cdyed\u201d). Puns are by their very nature statistically anomalous and not amenable to most text generation methods that are supervised by a large corpus. In this paper, we propose an unsupervised approach to pun generation based on lots of raw (unhumorous) text and a surprisal principle. Specifically, we posit that in a pun sentence, there is a strong association between the pun word (e.g., \u201cdyed\u201d) and the distant context, but a strong association between the alternative word (e.g., \u201cdied\u201d) and the immediate context. We instantiate the surprisal principle in two ways: (i) as a measure based on the ratio of probabilities given by a language model, and (ii) a retrieve-and-edit approach based on words suggested by a skip-gram model. Based on human evaluation, our retrieve-and-edit approach generates puns successfully 30% of the time, doubling the success rate of a neural generation baseline.", "phrases": ["surprisal principle", "strong association", "immediate context", "pun generation"], "overall_score": 1.0346410700213802, "scores": [2.1014335245356404, 0.562438294014043, 0.5611158901685324, 0.5420958409322824], "rank_score": 0.9417708874126245} -{"id": "tars-fishel-2018-multi", "title": "Multi-Domain Neural Machine Translation", "abstract": "We present an approach to neural machine translation (NMT) that supports multiple domains in a single model and allows switching between the domains when translating. The core idea is to treat text domainsasdistinctlanguagesandusemultilingual NMT methods to create multi-domain translation systems; we show that this approach results in significant translation quality gains over fine-tuning. We also explore whether the knowledge of pre-specified text domains is necessary; turns out that it is after all, but also that when it is not known quite high translation quality can be reached, and even higher than with known domains in some cases.", "phrases": ["neural machine translation", "domain token", "multi-domain nmt", "cluster"], "overall_score": 2.168333146921354, "scores": [2.042165408629312, 0.5863605498326807, 0.5833695241891045, 0.55488499989233], "rank_score": 0.9416951206358568} -{"id": "flachs-etal-2020-grammatical", "title": "Grammatical Error Correction in Low Error Density Domains: A New Benchmark and Analyses", "abstract": "Evaluation of grammatical error correction (GEC) systems has primarily focused on essays written by non-native learners of English, which however is only part of the full spectrum of GEC applications. We aim to broaden the target domain of GEC and release CWEB, a new benchmark for GEC consisting of website text generated by English speakers of varying levels of proficiency. Website data is a common and important domain that contains far fewer grammatical errors than learner essays, which we show presents a challenge to state-of-the-art GEC systems. We demonstrate that a factor behind this is the inability of systems to rely on a strong internal language model in low error density domains. We hope this work shall facilitate the development of open-domain GEC models that generalize to different topics and genres.", "phrases": ["new benchmark", "website text", "learner essay", "language model", "grammatical error correction"], "overall_score": 1.8323526333437794, "scores": [2.0180963065508397, 0.790146874279228, 0.8426198656294197, 0.5359564216044042, 0.521395436497188], "rank_score": 0.9416429809122159} -{"id": "bose-etal-2018-adversarial", "title": "Adversarial Contrastive Estimation", "abstract": "Learning by contrasting positive and negative samples is a general strategy adopted by many methods. Noise contrastive estimation (NCE) for word embeddings and translating embeddings for knowledge graphs are examples in NLP employing this approach. In this work, we view contrastive learning as an abstraction of all such methods and augment the negative sampler into a mixture distribution containing an adversarially learned sampler. The resulting adaptive sampler finds harder negative examples, which forces the main model to learn a better representation of the data. We evaluate our proposal on learning word embeddings, order embeddings and knowledge graph embeddings and observe both faster convergence and improved results on multiple metrics.", "phrases": ["contrastive estimation", "sampler", "hard negative example"], "overall_score": 1.515441622350734, "scores": [0.977142867470604, 0.9425124082540989, 0.9051351880319131], "rank_score": 0.9415968212522053} -{"id": "xue-etal-2021-mt5", "title": "mT5: A Massively Multilingual Pre-trained Text-to-Text Transformer", "abstract": "The recent \u201cText-to-Text Transfer Transformer\u201d (T5) leveraged a unified text-to-text format and scale to attain state-of-the-art results on a wide variety of English-language NLP tasks. In this paper, we introduce mT5, a multilingual variant of T5 that was pre-trained on a new Common Crawl-based dataset covering 101 languages. We detail the design and modified training of mT5 and demonstrate its state-of-the-art performance on many multilingual benchmarks. We also describe a simple technique to prevent \u201caccidental translation\u201d in the zero-shot setting, where a generative model chooses to (partially) translate its prediction into the wrong language. All of the code and model checkpoints used in this work are publicly available.", "phrases": ["pre-training", "text-to-text transformer", "language model", "cross-lingual transfer", "input text"], "overall_score": 3.5624797302187208, "scores": [0.9231342302601486, 1.4941903855489427, 1.1830128621103682, 0.5625964470358068, 0.5441236948422173], "rank_score": 0.9414115239594969} -{"id": "vajjala-meurers-2014-assessing", "title": "Assessing the relative reading level of sentence pairs for text simplification", "abstract": "While the automatic analysis of the readability of texts has a long history, the use of readability assessment for text simplification has received only little attention so far. In this paper, we explore readability models for identifying differences in the reading levels of simplified and unsimplified versions of sentences. Our experiments show that a relative ranking is preferable to an absolute binary one and that the accuracy of identifying relative simplification depends on the initial reading level of the unsimplified version. The approach is particularly successful in classifying the relative reading level of harder sentences. In terms of practical relevance, the approach promises to be useful for identifying particularly relevant targets for simplification and to evaluate simplifications given specific readability constraints.", "phrases": ["relative reading level", "text simplification", "language learner", "different level"], "overall_score": 1.9575858198776706, "scores": [1.868647014641739, 0.8456570975858183, 0.5279428807056669, 0.5233521463362042], "rank_score": 0.9413997848173571} -{"id": "bastings-etal-2019-interpretable", "title": "Interpretable Neural Predictions with Differentiable Binary Variables", "abstract": "The success of neural networks comes hand in hand with a desire for more interpretability. We focus on text classifiers and make them more interpretable by having them provide a justification\u2013a rationale\u2013for their predictions. We approach this problem by jointly training two neural network models: a latent model that selects a rationale (i.e. a short and informative part of the input text), and a classifier that learns from the words in the rationale alone. Previous work proposed to assign binary latent masks to input positions and to promote short selections via sparsity-inducing penalties such as L0 regularisation. We propose a latent model that mixes discrete and continuous behaviour allowing at the same time for binary selections and gradient-based training without REINFORCE. In our formulation, we can tractably compute the expected value of penalties such as L0, which allows us to directly optimise the model towards a pre-specified text selection rate. We show that our approach is competitive with previous work on rationale extraction, and explore further uses in attention mechanisms.", "phrases": ["latent model", "rationale", "mask", "text classification", "inter-pretable model"], "overall_score": 3.201827556226884, "scores": [1.7778957428877467, 1.296466081015229, 0.5539260009526276, 0.5465506909525859, 0.5320737814460201], "rank_score": 0.941382459450842} -{"id": "roth-schulte-im-walde-2014-combining", "title": "Combining Word Patterns and Discourse Markers for Paradigmatic Relation Classification", "abstract": "Distinguishing between paradigmatic relations such as synonymy, antonymy and hypernymy is an important prerequisite in a range of NLP applications. In this paper, we explore discourse relations as an alternative set of features to lexico-syntactic patterns. We demonstrate that statistics over discourse relations, collected via explicit discourse markers as proxies, can be utilized as salient indicators for paradigmatic relations in multiple languages, outperforming patterns in terms of recall and F1-score. In addition, we observe that markers and patterns provide complementary information, leading to significant classification improvements when applied in combination.", "phrases": ["discourse marker", "paradigmatic relation", "antonymy"], "overall_score": 1.686433245361695, "scores": [0.9757942285149395, 0.9677705396648298, 0.8800841774372374], "rank_score": 0.941216315205669} -{"id": "herbig-etal-2020-mmpe-multi", "title": "MMPE: A Multi-Modal Interface using Handwriting, Touch Reordering, and Speech Commands for Post-Editing Machine Translation", "abstract": "The shift from traditional translation to post-editing (PE) of machine-translated (MT) text can save time and reduce errors, but it also affects the design of translation interfaces, as the task changes from mainly generating text to correcting errors within otherwise helpful translation proposals. Since this paradigm shift offers potential for modalities other than mouse and keyboard, we present MMPE, the first prototype to combine traditional input modes with pen, touch, and speech modalities for PE of MT. Users can directly cross out or hand-write new text, drag and drop words for reordering, or use spoken commands to update the text in place. All text manipulations are logged in an easily interpretable format to simplify subsequent translation process research. The results of an evaluation with professional translators suggest that pen and touch interaction are suitable for deletion and reordering tasks, while speech and multi-modal combinations of select & speech are considered suitable for replacements and insertions. Overall, experiment participants were enthusiastic about the new modalities and saw them as useful extensions to mouse & keyboard, but not as a complete substitute.", "phrases": ["translator", "modality", "mmpe"], "overall_score": 1.0339609169379451, "scores": [1.736143911789537, 0.5453156678910435, 0.5419957765073316], "rank_score": 0.9411517853959707} -{"id": "teh-2006-hierarchical", "title": "A Hierarchical Bayesian Language Model Based On Pitman-Yor Processes", "abstract": "We propose a new hierarchical Bayesian n-gram model of natural languages. Our model makes use of a generalization of the commonly used Dirichlet distributions called Pitman-Yor processes which produce power-law distributions more closely resembling those in natural languages. We show that an approximation to the hierarchical Pitman-Yor language model recovers the exact formulation of interpolated Kneser-Ney, one of the best smoothing methods for n-gram language models. Experiments verify that our model gives cross entropy results superior to interpolated Kneser-Ney and comparable to modified Kneser-Ney.", "phrases": ["bayesian language model", "pitman-yor process", "formulation", "dirichlet process"], "overall_score": 2.609237495949807, "scores": [0.9780862081043376, 1.426381730221351, 0.8243383595693595, 0.5355276980132749], "rank_score": 0.9410834989770808} -{"id": "barnes-etal-2019-sentiment", "title": "Sentiment Analysis Is Not Solved! Assessing and Probing Sentiment Classification", "abstract": "Neural methods for sentiment analysis have led to quantitative improvements over previous approaches, but these advances are not always accompanied with a thorough analysis of the qualitative differences. Therefore, it is not clear what outstanding conceptual challenges for sentiment analysis remain. In this work, we attempt to discover what challenges still prove a problem for sentiment classifiers for English and to provide a challenging dataset. We collect the subset of sentences that an (oracle) ensemble of state-of-the-art sentiment classifiers misclassify and then annotate them for 18 linguistic and paralinguistic phenomena, such as negation, sarcasm, modality, etc. Finally, we provide a case study that demonstrates the usefulness of the dataset to probe the performance of a given sentiment classifier with respect to linguistic phenomena.", "phrases": ["ensemble", "negation", "sarcasm", "sentiment analysis"], "overall_score": 1.3041089078395205, "scores": [2.0893839780279007, 0.5811994234521234, 0.5507497197840804, 0.5415297869742924], "rank_score": 0.9407157270595993} -{"id": "cowan-etal-2006-discriminative", "title": "A Discriminative Model for Tree-to-Tree Translation", "abstract": "This paper proposes a statistical, tree-to-tree model for producing translations. Two main contributions are as follows: (1) a method for the extraction of syntactic structures with alignment information from a parallel corpus of translations, and (2) use of a discriminative, feature-based model for prediction of these target-language syntactic structures---which we call aligned extended projections, or AEPs. An evaluation of the method on translation from German to English shows similar performance to the phrase-based model of Koehn et al. (2003).", "phrases": ["discriminative model", "clause", "modifier"], "overall_score": 2.6077640490267644, "scores": [1.4227625538811133, 0.8730336879447678, 0.5258599541786966], "rank_score": 0.9405520653348592} -{"id": "mohammad-etal-2013-computing", "title": "Computing Lexical Contrast", "abstract": "Knowing the degree of semantic contrast between words has widespread application in natural language processing, including machine translation, information retrieval, and dialogue systems. Manually created lexicons focus on opposites, such as hot and cold. Opposites are of many kinds such as antipodals, complementaries, and gradable. Existing lexicons often do not classify opposites into the different kinds, however. They also do not explicitly list word pairs that are not opposites but yet have some degree of contrast in meaning, such as warm and cold or tropical and freezing. We propose an automatic method to identify contrasting word pairs that is based on the hypothesis that if a pair of words, A and B, are contrasting, then there is a pair of opposites, C and D, such that A and C are strongly related and B and D are strongly related. (For example, there exists the pair of opposites hot and cold such that tropical is related to hot, and freezing is related to cold.) We will call this the contrast hypothesis.We begin with a large crowdsourcing experiment to determine the amount of human agreement on the concept of oppositeness and its different kinds. In the process, we flesh out key features of different kinds of opposites. We then present an automatic and empirical measure of lexical contrast that relies on the contrast hypothesis, corpus statistics, and the structure of a Roget-like thesaurus. We show how, using four different data sets, we evaluated our approach on two different tasks, solving \u201cmost contrasting word\u201d questions and distinguishing synonyms from opposites. The results are analyzed across four parts of speech and across five different kinds of opposites. We show that the proposed measure of lexical contrast obtains high precision and large coverage, outperforming existing methods.", "phrases": ["opposite", "word pair", "automatic method", "ranking", "negation"], "overall_score": 2.863185362876449, "scores": [1.7547221540164613, 1.2154072943278997, 0.5857734253310389, 0.5777749643311898, 0.5685134275263872], "rank_score": 0.9404382531065952} -{"id": "wu-fung-2009-semantic-roles", "title": "Semantic Roles for SMT: A Hybrid Two-Pass Model", "abstract": "We present results on a novel hybrid semantic SMT model that incorporates the strengths of both semantic role labeling and phrase-based statistical machine translation. The approach avoids major complexity limitations via a two-pass architecture. The first pass is performed using a conventional phrase-based SMT model. The second pass is performed by a re-ordering strategy guided by shallow semantic parsers that produce both semantic frame and role labels. Evaluation on a Wall Street Journal newswire genre test set showed the hybrid model to yield an improvement of roughly half a point in BLEU score over a strong pure phrase-based SMT baseline -- to our knowledge, the first successful application of semantic role labeling to SMT.", "phrases": ["smt model", "semantic role", "cross-lingual match"], "overall_score": 2.9055181673910147, "scores": [1.679218112292958, 0.5870415828553437, 0.5536803845173126], "rank_score": 0.9399800265552046} -{"id": "yamada-etal-2016-joint", "title": "Joint Learning of the Embedding of Words and Entities for Named Entity Disambiguation", "abstract": "Named Entity Disambiguation (NED) refers to the task of resolving multiple named entity mentions in a document to their correct references in a knowledge base (KB) (e.g., Wikipedia). In this paper, we propose a novel embedding method specifically designed for NED. The proposed method jointly maps words and entities into the same continuous vector space. We extend the skip-gram model by using two models. The KB graph model learns the relatedness of entities using the link structure of the KB, whereas the anchor context model aims to align vectors such that similar words and entities occur close to one another in the vector space by leveraging KB anchors and their context words. By combining contexts based on the proposed embedding with standard NED features, we achieved state-of-the-art accuracy of 93.1% on the standard CoNLL dataset and 85.2% on the TAC 2010 dataset.", "phrases": ["named entity disambiguation", "skip-gram model", "knowledge graph"], "overall_score": 3.556959693537717, "scores": [0.878539911953368, 1.1022610364341987, 0.8390574923650997], "rank_score": 0.9399528135842221} -{"id": "groschwitz-etal-2018-amr", "title": "AMR dependency parsing with a typed semantic algebra", "abstract": "We present a semantic parser for Abstract Meaning Representations which learns to parse strings into tree representations of the compositional structure of an AMR graph. This allows us to use standard neural techniques for supertagging and dependency tree parsing, constrained by a linguistically principled type system. We present two approximative decoding algorithms, which achieve state-of-the-art accuracy and outperform strong baselines.", "phrases": ["algebra", "compositional structure", "amr", "dependency parser"], "overall_score": 2.71673561653932, "scores": [0.8534902581049786, 1.1397954418590528, 1.1376773151297463, 0.6287410563531004], "rank_score": 0.9399260178617195} -{"id": "shen-lapata-2007-using", "title": "Using Semantic Roles to Improve Question Answering", "abstract": "Shallow semantic parsing, the automatic identification and labeling of sentential constituents, has recently received much attention. Our work examines whether semantic role information is beneficial to question answering. We introduce a general framework for answer extraction which exploits semantic role annotations in the FrameNet paradigm. We view semantic role assignment as an optimization problem in a bipartite graph and answer extraction as an instance of graph matching. Experimental results on the TREC datasets demonstrate improvements over state-of-the-art models.", "phrases": ["question answering", "role labeling", "predicate argument structure"], "overall_score": 3.0251143892626344, "scores": [0.9457832055203387, 1.3069469104402651, 0.5666837606711547], "rank_score": 0.9398046255439195} -{"id": "shen-etal-2009-effective", "title": "Effective Use of Linguistic and Contextual Information for Statistical Machine Translation", "abstract": "Current methods of using lexical features in machine translation have difficulty in scaling up to realistic MT tasks due to a prohibitively large number of parameters involved. In this paper, we propose methods of using new linguistic and contextual features that do not suffer from this problem and apply them in a state-of-the-art hierarchical MT system. The features used in this work are non-terminal labels, non-terminal length distribution, source string context and source dependency LM scores. The effectiveness of our techniques is demonstrated by significant improvements over a strong base-line. On Arabic-to-English translation, improvements in lower-cased BLEU are 2.0 on NIST MT06 and 1.7 on MT08 newswire data on decoding output. On Chinese-to-English translation, the improvements are 1.0 on MT06 and 0.8 on MT08 newswire data.", "phrases": ["contextual information", "statistical machine translation", "string-to-dependency language model", "constraint modeling"], "overall_score": 2.716336814864406, "scores": [1.7095322829905781, 0.9508988236580871, 0.5696716877053704, 0.5290493734509326], "rank_score": 0.939788041951242} -{"id": "li-etal-2010-report", "title": "Report of NEWS 2010 Transliteration Generation Shared Task", "abstract": "This report documents the Transliteration Generation Shared Task conducted as a part of the Named Entities Workshop (NEWS 2010), an ACL 2010 workshop. The shared task features machine transliteration of proper names from English to 9 languages and from 3 languages to English. In total, 12 tasks are provided. 7 teams from 5 different countries participated in the evaluations. Finally, 33 standard and 8 non-standard runs are submitted, where diverse transliteration methodologies are explored and reported on the evaluation data. We report the results with 4 performance metrics. We believe that the shared task has successfully achieved its objective by providing a common benchmarking platform for the research community to evaluate the state-of-the-art technologies that benefit the future research and development.", "phrases": ["transliteration", "named entities workshop", "news", "direction", "quality metric"], "overall_score": 2.064752762475281, "scores": [1.7902212382274147, 1.2525965559847771, 0.5776670500958588, 0.5476502014980925, 0.5304123484216711], "rank_score": 0.9397094788455629} -{"id": "li-etal-2014-recursive", "title": "Recursive Deep Models for Discourse Parsing", "abstract": "Text-level discourse parsing remains a challenge: most approaches employ features that fail to capture the intentional, semantic, and syntactic aspects that govern discourse coherence. In this paper, we propose a recursive model for discourse parsing that jointly models distributed representations for clauses, sentences, and entire discourses. The learned representations can to some extent learn the semantic and intentional import of words and larger discourse units automatically,. The proposed framework obtains comparable performance regarding standard discoursing parsing evaluations when compared against current state-of-art systems.", "phrases": ["discourse", "recursive neural network", "edu", "network model"], "overall_score": 2.8137548642011594, "scores": [1.227044431642477, 1.152707553638528, 0.8539834840473145, 0.5232823243386125], "rank_score": 0.939254448416733} -{"id": "lenci-benotto-2012-identifying", "title": "Identifying hypernyms in distributional semantic spaces", "abstract": "In this paper we apply existing directional similarity measures to identify hypernyms with a state-of-the-art distributional semantic model. We also propose a new directional measure that achieves the best performance in hypernym identification.", "phrases": ["hypernym", "semantic relation", "distributional inclusion hypothesis"], "overall_score": 2.9846181388270034, "scores": [0.9184625808715835, 1.353910389029993, 0.5450287779790457], "rank_score": 0.9391339159602076} -{"id": "bamman-etal-2013-learning", "title": "Learning Latent Personas of Film Characters", "abstract": "We present two latent variable models for learning character types, or personas, in film, in which a persona is defined as a set of mixtures over latent lexical classes. These lexical classes capture the stereotypical actions of which a character is the agent and patient, as well as attributes by which they are described. As the first attempt to solve this problem explicitly, we also present a new dataset for the text-driven analysis of film, along with a benchmark testbed to help drive future work in this area.", "phrases": ["persona", "character", "new dataset", "text-driven analysis", "movie plot summary"], "overall_score": 2.7648848833818063, "scores": [1.7965330863018938, 1.2519255058055583, 0.5552939336983429, 0.5480480216251989, 0.5432957051062801], "rank_score": 0.9390192505074548} -{"id": "liu-etal-2012-locally", "title": "Locally Training the Log-Linear Model for SMT", "abstract": "In statistical machine translation, minimum error rate training (MERT) is a standard method for tuning a single weight with regard to a given development data. However, due to the diversity and uneven distribution of source sentences, there are two problems suffered by this method. First, its performance is highly dependent on the choice of a development set, which may lead to an unstable performance for testing. Second, translations become inconsistent at the sentence level since tuning is performed globally on a document level. In this paper, we propose a novel local training method to address these two problems. Unlike a global training method, such as MERT, in which a single weight is learned and used for all the input sentences, we perform training and testing in one step by learning a sentence-wise weight for each input sentence. We propose efficient incremental training methods to put the local training into practice. In NIST Chinese-to-English translation tasks, our local training method significantly outperforms MERT with the maximal improvements up to 2.0 BLEU points, meanwhile its efficiency is comparable to that of the global method.", "phrases": ["log-linear model", "machine translation", "weight", "local training method"], "overall_score": 1.9525142065795726, "scores": [0.9636855450454377, 0.9592802232664399, 0.948556105920185, 0.8843215432314022], "rank_score": 0.9389608543658663} -{"id": "koehn-etal-2018-findings", "title": "Findings of the WMT 2018 Shared Task on Parallel Corpus Filtering", "abstract": "We posed the shared task of assigning sentence-level quality scores for a very noisy corpus of sentence pairs crawled from the web, with the goal of sub-selecting 1% and 10% of high-quality data to be used to train machine translation systems. Seventeen participants from companies, national research labs, and universities participated in this task.", "phrases": ["shared task", "parallel corpus filtering", "sentence pair", "wmt18"], "overall_score": 3.021837814994318, "scores": [1.8879826833986104, 0.8077337133992333, 0.5311471733028337, 0.5282832319578324], "rank_score": 0.9387867005146274} -{"id": "hu-wan-2014-automatic", "title": "Automatic Generation of Related Work Sections in Scientific Papers: An Optimization Approach", "abstract": "In this paper, we investigate a challenging task of automatic related work generation. Given multiple reference papers as input, the task aims to generate a related work section for a target paper. The generated related work section can be used as a draft for the author to complete his or her final related work section. We propose our Automatic Related Work Generation system called ARWG to address this task. It first exploits a PLSA model to split the sentence set of the given papers into different topic-biased parts, and then applies regression models to learn the importance of the sentences. At last it employs an optimization framework to generate the related work section. Our evaluation results on a test set of 150 target papers along with their reference papers show that our proposed ARWG system can generate related work sections with better quality. A user study is also performed to show ARWG can achieve an improvement over generic multi-document summarization baselines.", "phrases": ["work section", "work generation", "topic-biased part"], "overall_score": 2.161012261467944, "scores": [1.0673696208495411, 0.9199391205294527, 0.8282383600633922], "rank_score": 0.9385157004807954} -{"id": "thompson-post-2020-paraphrase", "title": "Paraphrase Generation as Zero-Shot Multilingual Translation: Disentangling Semantic Similarity from Lexical and Syntactic Diversity", "abstract": "Recent work has shown that a multilingual neural machine translation (NMT) model can be used to judge how well a sentence paraphrases another sentence in the same language (Thompson and Post, 2020); however, attempting to generate paraphrases from such a model using standard beam search produces trivial copies or near copies. We introduce a simple paraphrase generation algorithm which discourages the production of n-grams that are present in the input. Our approach enables paraphrase generation in many languages from a single multilingual NMT model. Furthermore, the amount of lexical diversity between the input and output can be controlled at generation time. We conduct a human evaluation to compare our method to a paraphraser trained on the large English synthetic paraphrase database ParaBank 2 (Hu et al., 2019c) and find that our method produces paraphrases that better preserve meaning and are more gramatical, for the same level of lexical diversity. Additional smaller human assessments demonstrate our approach also works in two non-English languages.", "phrases": ["multilingual translation", "n-gram", "paraphrase generation"], "overall_score": 1.8248542715908387, "scores": [0.892575248968656, 0.8626294824696213, 1.0581640242723946], "rank_score": 0.9377895852368908} -{"id": "junczys-dowmunt-etal-2018-approaching", "title": "Approaching Neural Grammatical Error Correction as a Low-Resource Machine Translation Task", "abstract": "Previously, neural methods in grammatical error correction (GEC) did not reach state-of-the-art results compared to phrase-based statistical machine translation (SMT) baselines. We demonstrate parallels between neural GEC and low-resource neural MT and successfully adapt several methods from low-resource MT to neural GEC. We further establish guidelines for trustable results in neural GEC and propose a set of model-independent methods for neural GEC that can be easily applied in most GEC settings. Proposed methods include adding source-side noise, domain-adaptation techniques, a GEC-specific training-objective, transfer learning with monolingual data, and ensembling of independently trained GEC models and language models. The combined effects of these methods result in better than state-of-the-art neural GEC models that outperform previously best neural GEC systems by more than 10% M on the CoNLL-2014 benchmark and 5.9% on the JFLEG test set. Non-neural state-of-the-art systems are outperformed by more than 2% on the CoNLL-2014 benchmark and by 4% on JFLEG.", "phrases": ["error correction", "machine translation", "neural gec system", "ungrammatical text"], "overall_score": 2.8078282923357705, "scores": [1.2110242260596888, 1.1483898274111277, 0.8485090903252009, 0.5411812967082064], "rank_score": 0.9372761101260559} -{"id": "baldwin-2005-bootstrapping", "title": "Bootstrapping Deep Lexical Resources: Resources for Courses", "abstract": "We propose a range of deep lexical acquisition methods which make use of morphological, syntactic and ontological language resources to model word similarity and bootstrap from a seed lexicon. The different methods are deployed in learning lexical items for a precision grammar, and shown to each have strengths and weaknesses over different word classes. A particular focus of this paper is the relative accessibility of different language resource types, and predicted \"bang for the buck\" associated with each in deep lexical acquisition applications.", "phrases": ["lexical acquisition", "item", "dla", "deep grammar", "secondary language resource"], "overall_score": 2.598495359578827, "scores": [1.622213549320898, 1.0411958741547178, 0.9239512923664837, 0.5637651185422414, 0.5349196269123555], "rank_score": 0.9372090922593393} -{"id": "ganitkevitch-callison-burch-2014-multilingual", "title": "The Multilingual Paraphrase Database", "abstract": "We release a massive expansion of the paraphrase database (PPDB) that now includes a collection of paraphrases in 23 different languages. The resource is derived from large volumes of bilingual parallel data. Our collection is extracted and ranked using state of the art methods. The multilingual PPDB has over a billion paraphrase pairs in total, covering the following languages: Arabic, Bulgarian, Chinese, Czech, Dutch, Estonian, Finnish, French, German, Greek, Hungarian, Italian, Latvian, Lithuanian, Polish, Portugese, Romanian, Russian, Slovak, Slovenian, and Swedish.", "phrases": ["multilingual paraphrase database", "ppdb", "parallel text"], "overall_score": 2.1574801072030496, "scores": [0.9803116913192901, 1.2569473440699053, 0.5736860807337659], "rank_score": 0.9369817053743205} -{"id": "bunt-2020-annotation", "title": "Annotation of Quantification: The Current State of ISO 24617-12", "abstract": "This paper discusses the current state of developing an ISO standard annotation scheme for quantification phenomena in natural language, as part of the ISO Semantic Annotation Framework (ISO 24617). A proposed approach that combines ideas from the theory of generalised quantifiers and from neo-Davidsonian event semantics was adopted by the ISO organisation in 2019 as a starting point for developing such an annotation scheme. * This scheme consists of (1) a conceptual `metamodel' that visualises the types of entities, functions and relations that go into annotations of quantification; (2) an abstract syntax which defines `annotation structures' as triples and other set-theoretic constructs; (3) an XML-based representation of annotation structures (`concrete syntax'); and (4) a compositional semantics of annotation structures. The latter three components together define the interpreted markup language QuantML. The focus in this paper is on the structuring of the semantic information needed to characterise quantification in natural language and the representation of these structures in QuantML.", "phrases": ["current state", "iso", "annotation scheme", "quantification phenomenon"], "overall_score": 1.028697280394485, "scores": [1.7466682423480002, 0.7881170992314895, 0.6271063539988352, 0.5835507721793024], "rank_score": 0.9363606169394069} -{"id": "he-etal-2010-bridging", "title": "Bridging SMT and TM with Translation Recommendation", "abstract": "We propose a translation recommendation framework to integrate Statistical Machine Translation (SMT) output with Translation Memory (TM) systems. The framework recommends SMT outputs to a TM user when it predicts that SMT outputs are more suitable for post-editing than the hits provided by the TM. We describe an implementation of this framework using an SVM binary classifier. We exploit methods to fine-tune the classifier and investigate a variety of features of different types. We rely on automatic MT evaluation metrics to approximate human judgements in our experiments. Experimental results show that our system can achieve 0.85 precision at 0.89 recall, excluding exact matches. Furthermore, it is possible for the end-user to achieve a desired balance between precision and recall by adjusting confidence levels.", "phrases": ["translation memory", "smt output", "translation recommendation system", "post-editor", "sentence level"], "overall_score": 2.850213627795066, "scores": [1.7159555762506766, 0.9881533757872414, 0.8480846334394371, 0.5724734502892026, 0.5562208310450697], "rank_score": 0.9361775733623254} -{"id": "huang-etal-2019-improving", "title": "Improving Event Coreference Resolution by Learning Argument Compatibility from Unlabeled Data", "abstract": "Argument compatibility is a linguistic condition that is frequently incorporated into modern event coreference resolution systems. If two event mentions have incompatible arguments in any of the argument roles, they cannot be coreferent. On the other hand, if these mentions have compatible arguments, then this may be used as information towards deciding their coreferent status. One of the key challenges in leveraging argument compatibility lies in the paucity of labeled data. In this work, we propose a transfer learning framework for event coreference resolution that utilizes a large amount of unlabeled data to learn argument compatibility of event mentions. In addition, we adopt an interactive inference network based model to better capture the compatible and incompatible relations between the context words of event mentions. Our experiments on the KBP 2017 English dataset confirm the effectiveness of our model in learning argument compatibility, which in turn improves the performance of the overall event coreference model.", "phrases": ["event coreference resolution", "unlabeled data", "mention"], "overall_score": 1.9466914807799771, "scores": [0.9214439388539548, 0.8008470540953544, 1.0861911525127568], "rank_score": 0.936160715154022} -{"id": "kim-hovy-2007-crystal", "title": "Crystal: Analyzing Predictive Opinions on the Web", "abstract": "In this paper, we present an election prediction system (Crystal) based on web users\u2019 opinions posted on an election prediction website. Given a prediction message, Crystal first identifies which party the message predicts to win and then aggregates prediction analysis results of a large amount of opinions to project the election results. We collect past election prediction messages from the Web and automatically build a gold standard. We focus on capturing lexical patterns that people frequently use when they express their predictive opinions about a coming election. To predict election results, we apply SVM-based supervised learning. To improve performance, we propose a novel technique which generalizes n-gram feature patterns. Experimental results show that Crystal significantly outperforms several baselines as well as a non-generalized n-gram approach. Crystal predicts future elections with 81.68% accuracy.", "phrases": ["opinion", "web", "election prediction", "crystal"], "overall_score": 2.3262706963289426, "scores": [0.8923463237090967, 0.8101190228850043, 1.1800629033695174, 0.86211253407132], "rank_score": 0.9361601960087347} -{"id": "tu-etal-2020-empirical", "title": "An Empirical Study on Robustness to Spurious Correlations using Pre-trained Language Models", "abstract": "Recent work has shown that pre-trained language models such as BERT improve robustness to spurious correlations in the dataset. Intrigued by these results, we find that the key to their success is generalization from a small amount of counterexamples where the spurious correlations do not hold. When such minority examples are scarce, pre-trained models perform as poorly as models trained from scratch. In the case of extreme minority, we propose to use multi-task learning (MTL) to improve generalization. Our experiments on natural language inference and paraphrase identification show that MTL with the right auxiliary tasks significantly improves performance on challenging examples without hurting the in-distribution performance. Further, we show that the gain from MTL mainly comes from improved generalization from the minority examples. Our results highlight the importance of data diversity for overcoming spurious correlations.1", "phrases": ["robustness", "language model", "pre-trained model", "auxiliary task", "plm"], "overall_score": 2.470425169713164, "scores": [1.7598908404835667, 0.8646747947135681, 0.8363449496258403, 0.6187965034156441, 0.6007997389346305], "rank_score": 0.9361013654346498} -{"id": "huang-chiang-2007-forest", "title": "Forest Rescoring: Faster Decoding with Integrated Language Models", "abstract": "Efficient decoding has been a fundamental problem in machine translation, especially with an integrated language model which is essential for achieving good translation quality. We develop faster approaches for this problem based on k-best parsing algorithms and demonstrate their effectiveness on both phrase-based and syntax-based MT systems. In both cases, our methods achieve significant speed improvements, often by more than a factor of ten, over the conventional beam-search method at the same levels of search error and translation accuracy.", "phrases": ["machine translation", "forest rescoring", "hiero search refinements"], "overall_score": 2.974821056866488, "scores": [0.8948564451799117, 1.39167696498087, 0.521620147648017], "rank_score": 0.9360511859362662} -{"id": "titov-klementiev-2012-bayesian", "title": "A Bayesian Approach to Unsupervised Semantic Role Induction", "abstract": "We introduce two Bayesian models for unsupervised semantic role labeling (SRL) task. The models treat SRL as clustering of syntactic signatures of arguments with clusters corresponding to semantic roles. The first model induces these clusterings independently for each predicate, exploiting the Chinese Restaurant Process (CRP) as a prior. In a more refined hierarchical model, we inject the intuition that the clusterings are similar across different predicates, even though they are not necessarily identical. This intuition is encoded as a distance-dependent CRP with a distance between two syntactic signatures indicating how likely they are to correspond to a single semantic role. These distances are automatically induced within the model and shared across predicates. Both models achieve state-of-the-art results when evaluated on PropBank, with the coupled model consistently outperforming the factored counterpart in all experimental set-ups.", "phrases": ["semantic role", "bayesian model", "argument signature"], "overall_score": 2.400349468225479, "scores": [1.2628966333331195, 0.9843529085339306, 0.5602321667788963], "rank_score": 0.9358272362153155} -{"id": "yang-kirchhoff-2006-phrase", "title": "Phrase-Based Backoff Models for Machine Translation of Highly Inflected Languages", "abstract": "We propose a backoff model for phrasebased machine translation that translates unseen word forms in foreign-language text by hierarchical morphological abstractions at the word and the phrase level. The model is evaluated on the Europarl corpus for German-English and FinnishEnglish translation and shows improvements over state-of-the-art phrase-based models.", "phrases": ["backoff model", "machine translation", "french"], "overall_score": 3.0116615669781766, "scores": [0.9107477872299973, 1.3745933180618797, 0.521534708659248], "rank_score": 0.9356252713170417} -{"id": "dinu-lapata-2010-measuring", "title": "Measuring Distributional Similarity in Context", "abstract": "The computation of meaning similarity as operationalized by vector-based models has found widespread use in many tasks ranging from the acquisition of synonyms and paraphrases to word sense disambiguation and textual entailment. Vector-based models are typically directed at representing words in isolation and thus best suited for measuring similarity out of context. In his paper we propose a probabilistic framework for measuring similarity in context. Central to our approach is the intuition that word meaning is represented as a probability distribution over a set of latent senses and is modulated by context. Experimental results on lexical substitution and word similarity show that our algorithm outperforms previously proposed models.", "phrases": ["sense disambiguation", "probabilistic framework", "substitution", "latent variable"], "overall_score": 2.933207313272925, "scores": [1.3863788483883541, 1.1765583348679152, 0.5927657885289049, 0.5862363988843003], "rank_score": 0.9354848426673686} -{"id": "strassel-tracey-2016-lorelei", "title": "LORELEI Language Packs: Data, Tools, and Resources for Technology Development in Low Resource Languages", "abstract": "In this paper, we describe the textual linguistic resources in nearly 3 dozen languages being produced by Linguistic Data Consortium for DARPA's LORELEI (Low Resource Languages for Emergent Incidents) Program. The goal of LORELEI is to improve the performance of human language technologies for low-resource languages and enable rapid re-training of such technologies for new languages, with a focus on the use case of deployment of resources in sudden emergencies such as natural disasters. Representative languages have been selected to provide broad typological coverage for training, and surprise incident languages for testing will be selected over the course of the program. Our approach treats the full set of language packs as a coherent whole, maintaining LORELEI-wide specifications, tagsets, and guidelines, while allowing for adaptation to the specific needs created by each language. Each representative language corpus, therefore, both stands on its own as a resource for the specific language and forms part of a large multilingual resource for broader cross-language technology development.", "phrases": ["technology development", "low resource languages", "lorelei"], "overall_score": 2.751980615114569, "scores": [0.920638548308954, 0.8218673383034387, 1.0614040954789783], "rank_score": 0.9346366606971236} -{"id": "bhandari-armstrong-2019-tkol", "title": "Tkol, Httt, and r/radiohead: High Affinity Terms in Reddit Communities", "abstract": "Language is an important marker of a cultural group, large or small. One aspect of language variation between communities is the employment of highly specialized terms with unique significance to the group. We study these high affinity terms across a wide variety of communities by leveraging the rich diversity of Reddit.com. We provide a systematic exploration of high affinity terms, the often rapid semantic shifts they undergo, and their relationship to subreddit characteristics across 2600 diverse subreddits. Our results show that high affinity terms are effective signals of loyal communities, they undergo more semantic shift than low affinity terms, and that they are partial barrier to entry for new users. We conclude that Reddit is a robust and valuable data source for testing further theories about high affinity terms across communities.", "phrases": ["high affinity term", "reddit", "community"], "overall_score": 1.025776860959008, "scores": [1.6819510744065054, 0.5619885411940534, 0.5571673942325384], "rank_score": 0.9337023366110323} -{"id": "akbik-etal-2015-generating", "title": "Generating High Quality Proposition Banks for Multilingual Semantic Role Labeling", "abstract": "Semantic role labeling (SRL) is crucial to natural language understanding as it identifies the predicate-argument structure in text with semantic labels. Unfortunately, resources required to construct SRL models are expensive to obtain and simply do not exist for most languages. In this paper, we present a two-stage method to enable the construction of SRL models for resourcepoor languages by exploiting monolingual SRL and multilingual parallel data. Experimental results show that our method outperforms existing methods. We use our method to generate Proposition Banks with high to reasonable quality for 7 languages in three language families and release these resources to the research community.", "phrases": ["annotation projection", "propbanks", "new target language"], "overall_score": 2.842554931269006, "scores": [1.4185650305334088, 0.8519535297957442, 0.5304674623534978], "rank_score": 0.9336620075608836} -{"id": "zhong-etal-2019-searching", "title": "Searching for Effective Neural Extractive Summarization: What Works and What's Next", "abstract": "The recent years have seen remarkable success in the use of deep neural networks on text summarization. However, there is no clear understanding of why they perform so well, or how they might be improved. In this paper, we seek to better understand how neural extractive summarization systems could benefit from different types of model architectures, transferable knowledge and learning schemas. Besides, we find an effective way to improve the current framework and achieve the state-of-the-art result on CNN/DailyMail by a large margin based on our observations and analysis. Hopefully, our work could provide more hints for future research on extractive summarization.", "phrases": ["summarization", "deep neural network", "sentence embedding"], "overall_score": 2.6434595581216, "scores": [1.7175569375682218, 0.5608852296877311, 0.5206335504276097], "rank_score": 0.9330252392278543} -{"id": "shen-etal-2010-string", "title": "String-to-Dependency Statistical Machine Translation", "abstract": "We propose a novel string-to-dependency algorithm for statistical machine translation. This algorithm employs a target dependency language model during decoding to exploit long distance word relations, which cannot be modeled with a traditional n-gram language model. Experiments show that the algorithm achieves significant improvement in MT performance over a state-of-the-art hierarchical string-to-string system on NIST MT06 and MT08 newswire evaluation sets.", "phrases": ["machine translation", "string-to-dependency model", "neighbouring word", "target side"], "overall_score": 2.3928698398844315, "scores": [1.983385791391265, 0.6664740206657802, 0.5437324437669554, 0.5380523209758973], "rank_score": 0.9329111441999745} -{"id": "oepen-lonning-2006-discriminant", "title": "Discriminant-Based MRS Banking", "abstract": "We present an approach to discriminant-based MRS banking, i.e. the construction of an annotated corpus where each input item is paired with a logical-form semantics. Semantic annotations are produced by parsing with a broad-coverage precision grammar, followed by manual disambiguation. The selection of the preferred analysis for each item (and hence its semantic form) builds on a notion of semantic discriminants, essentially localized dependencies extracted from a full-fledged, underspecified semantic representation.", "phrases": ["discriminant-based mrs banking", "eds", "elementary dependency structures", "flavor"], "overall_score": 2.2356856949558797, "scores": [1.0397207708399179, 1.2664579377271978, 0.874376535423379, 0.5488581633712193], "rank_score": 0.9323533518404284} -{"id": "white-2011-glue", "title": "Glue Rules for Robust Chart Realization", "abstract": "This paper shows how glue rules can be used to increase the robustness of statistical chart realization in a manner inspired by dependency realization. Unlike the use of glue rules in MT---but like previous work with XLE on improving robustness with hand-crafted grammars---they are invoked here as a fall-back option when no grammatically complete realization can be found. The method works with Combinatory Categorial Grammar (CCG) and has been implemented in OpenCCG. As the techniques are not overly tied to CCG, they are expected to be applicable to other grammar-based chart realizers where robustness is a common problem. Unlike an earlier robustness technique of greedily assembling fragments, glue rules enable n-best outputs and are compatible with disjunctive inputs. Experimental results indicate that glue rules yield improved realizations in comparison to greedy fragment assembly, though a sizeable gap remains between the quality of grammatically complete realizations and fragmentary ones.", "phrases": ["robustness", "realization", "ccg", "glue rule"], "overall_score": 1.291769995748205, "scores": [1.9964687810602395, 0.5910834089052783, 0.5779146496411646, 0.561793494063503], "rank_score": 0.9318150834175463} -{"id": "adelani-etal-2021-masakhaner", "title": "MasakhaNER: Named Entity Recognition for African Languages", "abstract": "We take a step towards addressing the under- representation of the African continent in NLP research by bringing together different stakeholders to create the first large, publicly available, high-quality dataset for named entity recognition (NER) in ten African languages. We detail the characteristics of these languages to help researchers and practitioners better understand the challenges they pose for NER tasks. We analyze our datasets and conduct an extensive empirical evaluation of state- of-the-art methods across both supervised and transfer learning settings. Finally, we release the data, code, and models to inspire future research on African NLP.1", "phrases": ["entity recognition", "african language", "masakhaner"], "overall_score": 1.937550161563139, "scores": [0.9012471410695369, 0.8101237327712323, 1.0839231357199814], "rank_score": 0.9317646698535835} -{"id": "lambert-banchs-2006-grouping", "title": "Grouping Multi-word Expressions According to Part-Of-Speech in Statistical Machine Translation", "abstract": "This paper studies a strategy for identifying and using multi-word expressions in Statistical Machine Translation. The performance of the proposed strategy for various types of multi-word expressions (like nouns or verbs) is evaluated in terms of alignment quality as well as translation accuracy. Evaluations are performed by using real-life data, namely the European Parliament corpus. Results from translation tasks from English-to-Spanish and from Spanish-to-English are presented and discussed.", "phrases": ["part-of-speech", "mwes", "further study"], "overall_score": 1.8128012529025852, "scores": [1.442977853295375, 0.826030424661636, 0.5257783987803263], "rank_score": 0.9315955589124458} -{"id": "ma-etal-2020-entity", "title": "Entity-Aware Dependency-Based Deep Graph Attention Network for Comparative Preference Classification", "abstract": "This paper studies the task of comparative preference classification (CPC). Given two entities in a sentence, our goal is to classify whether the first (or the second) entity is preferred over the other or no comparison is expressed at all between the two entities. Existing works either do not learn entity-aware representations well and fail to deal with sentences involving multiple entity pairs or use sequential modeling approaches that are unable to capture long-range dependencies between the entities. Some also use traditional machine learning approaches that do not generalize well. This paper proposes a novel Entity-aware Dependency-based Deep Graph Attention Network (ED-GAT) that employs a multi-hop graph attention over a dependency graph sentence representation to leverage both the semantic information from word embeddings and the syntactic information from the dependency graph to solve the problem. Empirical evaluation shows that the proposed model achieves the state-of-the-art performance in comparative preference classification.", "phrases": ["graph attention network", "comparative preference classification", "dependency graph"], "overall_score": 1.9370739191639161, "scores": [1.6783777000997897, 0.5819398671449264, 0.5342893697684146], "rank_score": 0.9315356456710436} -{"id": "vania-lopez-2017-characters", "title": "From Characters to Words to in Between: Do We Capture Morphology?", "abstract": "Words can be represented by composing the representations of subword units such as word segments, characters, and/or character n-grams. While such representations are effective and may capture the morphological regularities of words, they have not been systematically compared, and it is not understood how they interact with different morphological typologies. On a language modeling task, we present experiments that systematically vary (1) the basic unit of representation, (2) the composition of these representations, and (3) the morphological typology of the language modeled. Our results extend previous findings that character representations are effective across typologies, and we find that a previously unstudied combination of character trigram representations composed with bi-LSTMs outperforms most others. But we also find room for improvement: none of the character-level models match the predictive accuracy of a model with access to true morphological analyses, even when learned from an order of magnitude more data.", "phrases": ["character", "segmentation", "character-aware nlms", "bpe", "bilstm"], "overall_score": 2.836028613215499, "scores": [2.4159559717908023, 0.5683368894224173, 0.5642803139919265, 0.5613947236961927, 0.5476240079202978], "rank_score": 0.9315183813643273} -{"id": "zhu-etal-2007-unified", "title": "A Unified Tagging Approach to Text Normalization", "abstract": "This paper addresses the issue of text normalization, an important yet often overlooked problem in natural language processing. By text normalization, we mean converting \u2018informally inputted\u2019 text into the canonical form, by eliminating \u2018noises\u2019 in the text and detecting paragraph and sentence boundaries in the text. Previously, text normalization issues were often undertaken in an ad-hoc fashion or studied separately. This paper first gives a formalization of the entire problem. It then proposes a unified tagging approach to perform the task using Conditional Random Fields (CRF). The paper shows that with the introduction of a small set of tags, most of the text normalization tasks can be performed within the approach. The accuracy of the proposed method is high, because the subtasks of normalization are interdependent and should be performed together. Experimental results on email data cleaning show that the proposed method significantly outperforms the approach of using cascaded models and that of employing independent models.", "phrases": ["text normalization", "noise", "conditional random fields", "crf"], "overall_score": 1.4988796609004733, "scores": [2.0808252980089095, 0.5649048283798526, 0.5576949841196657, 0.5218000735648877], "rank_score": 0.9313062960183288} -{"id": "zampieri-etal-2020-semeval", "title": "SemEval-2020 Task 12: Multilingual Offensive Language Identification in Social Media (OffensEval 2020)", "abstract": "We present the results and the main findings of SemEval-2020 Task 12 on Multilingual Offensive Language Identification in Social Media (OffensEval-2020). The task included three subtasks corresponding to the hierarchical taxonomy of the OLID schema from OffensEval-2019, and it was offered in five languages: Arabic, Danish, English, Greek, and Turkish. OffensEval-2020 was one of the most popular tasks at SemEval-2020, attracting a large number of participants across all subtasks and languages: a total of 528 teams signed up to participate in the task, 145 teams submitted official runs on the test data, and 70 teams submitted system description papers.", "phrases": ["offensive language identification", "social media", "semeval-2020 task", "multilingual dataset"], "overall_score": 3.255419040596775, "scores": [0.8293235839799786, 0.8262256563134626, 1.516395952614256, 0.5522498598625136], "rank_score": 0.9310487631925528} -{"id": "chen-etal-2020-uncertain", "title": "Uncertain Natural Language Inference", "abstract": "We introduce Uncertain Natural Language Inference (UNLI), a refinement of Natural Language Inference (NLI) that shifts away from categorical labels, targeting instead the direct prediction of subjective probability assessments. We demonstrate the feasibility of collecting annotations for UNLI by relabeling a portion of the SNLI dataset under a probabilistic scale, where items even with the same categorical label differ in how likely people judge them to be true given a premise. We describe a direct scalar regression modeling approach, and find that existing categorically-labeled NLI data can be used in pre-training. Our best models correlate well with humans, demonstrating models are capable of more subtle inferences than the categorical bin assignment employed in current NLI tasks.", "phrases": ["natural language inference", "nli", "judgement"], "overall_score": 1.6680147715180023, "scores": [1.3259945140376803, 0.9170232549530603, 0.5497925386952028], "rank_score": 0.9309367692286478} -{"id": "basaldella-etal-2020-cometa", "title": "COMETA: A Corpus for Medical Entity Linking in the Social Media", "abstract": "Whilst there has been growing progress in Entity Linking (EL) for general language, existing datasets fail to address the complex nature of health terminology in layman's language. Meanwhile, there is a growing need for applications that can understand the public's voice in the health domain. To address this we introduce a new corpus called COMETA, consisting of 20k English biomedical entity mentions from Reddit expert-annotated with links to SNOMED CT, a widely-used medical knowledge graph. Our corpus satisfies a combination of desirable properties, from scale and coverage to diversity and quality, that to the best of our knowledge has not been met by any of the existing resources in the field. Through benchmark experiments on 20 EL baselines from string- to neural-based models we shed light on the ability of these systems to perform complex inference on entities and concepts under 2 challenging evaluation scenarios. Our experimental results on COMETA illustrate that no golden bullet exists and even the best mainstream techniques still have a significant performance gap to fill, while the best solution relies on combining different views of data.", "phrases": ["entity linking", "health terminology", "cometa"], "overall_score": 1.935805265825473, "scores": [1.6511328328183286, 0.6080633337385255, 0.5335804905762979], "rank_score": 0.9309255523777172} -{"id": "feng-etal-2010-comparison", "title": "A Comparison of Features for Automatic Readability Assessment", "abstract": "Several sets of explanatory variables - including shallow, language modeling, POS, syntactic, and discourse features - are compared and evaluated in terms of their impact on predicting the grade level of reading material for primary school students. We find that features based on in-domain language models have the highest predictive power. Entity-density (a discourse feature) and POS-features, in particular nouns, are individually very useful but highly correlated. Average sentence length (a shallow feature) is more useful - and less expensive to compute - than individual syntactic features. A judicious combination of features examined here results in a significant improvement over the state of the art.", "phrases": ["automatic readability assessment", "grade level", "lexical chain", "reader", "discourse-based feature"], "overall_score": 3.0330350457685893, "scores": [1.6120994318564421, 1.0926625361901314, 0.883981951529833, 0.5342013279477827, 0.5316667481204149], "rank_score": 0.9309223991289208} -{"id": "lee-2015-morphological", "title": "Morphological Paradigms: Computational Structure and Unsupervised Learning", "abstract": "This thesis explores the computational structure of morphological paradigms from the perspective of unsupervised learning. Three topics are studied: (i) stem identification, (ii) paradigmatic similarity, and (iii) paradigm induction. All the three topics progress in terms of the scope of data in question. The first and second topics explore structure when morphological paradigms are given, first within a paradigm and then across paradigms. The third topic asks where morphological paradigms come from in the first place, and explores strategies of paradigm induction from child-directed speech. This research is of interest to linguists and natural language processing researchers, for both theoretical questions and applied areas.", "phrases": ["computational structure", "unsupervised learning", "morphological paradigms"], "overall_score": 1.0226899030210659, "scores": [0.9527869302858601, 0.9228825612078827, 0.9170079077211678], "rank_score": 0.9308924664049703} -{"id": "settles-2011-closing", "title": "Closing the Loop: Fast, Interactive Semi-Supervised Annotation With Queries on Features and Instances", "abstract": "This paper describes DUALIST, an active learning annotation paradigm which solicits and learns from labels on both features (e.g., words) and instances (e.g., documents). We present a novel semi-supervised training algorithm developed for this setting, which is (1) fast enough to support real-time interactive speeds, and (2) at least as accurate as preexisting methods for learning with mixed feature and instance labels. Human annotators in user studies were able to produce near-state-of-the-art classifiers---on several corpora in a variety of application domains---with only a few minutes of effort.", "phrases": ["annotator", "active learning", "few minute", "text classification"], "overall_score": 2.6368820829792314, "scores": [1.1781061522157097, 1.1004597115259176, 0.8886199386179044, 0.5556289140276168], "rank_score": 0.9307036790967871} -{"id": "axelrod-etal-2015-class", "title": "Class-based N-gram language difference models for data selection", "abstract": "We present a simple method for representing text that explicitly encodes differences between two corpora in a domain adaptation or data selection scenario. We do this by replacing every word in the corpora with its part-of-speech tag plus a suffix that indicates the relative bias of the word, or how much likelier it is to be in the task corpus versus the pool. By changing the representation of the text, we can use basic n-gram models to create language difference models that characterize the difference between the corpora. This process enables us to use common models with robust statistics that are tailored to computing the similarity score via cross-entropy difference. These improvements come despite using zero of the original words in the texts during our selection process. We replace the entire vocabulary during the selection process from 3.6M to under 200 automatically-derived tags, greatly reducing the model size for selection. When used to select data for machine translation systems, our language difference models lead to MT system improvements of up to +1.8 BLEU when used in isolation, and up to +1.3 BLEU when used in a multimodel translation system. Language models trained on data selected with our method have 35% fewer OOV\u2019s on the task data than the most common approach. These LMs also have a lower perplexity on in-domain data than the baselines.", "phrases": ["part-of-speech tag", "cross-entropy difference", "language model", "in-domain data"], "overall_score": 1.8108212946532203, "scores": [0.9489436307350493, 0.9343057615917671, 0.9214983218418443, 0.9175645324318821], "rank_score": 0.9305780616501358} -{"id": "pyysalo-etal-2015-universal", "title": "Universal Dependencies for Finnish", "abstract": "There has been substantial recent interest in annotation schemes that can be applied consistently to many languages. Building on several recent efforts to unify morphological and syntactic annotation, the Universal Dependencies (UD) project seeks to introduce a cross-linguistically applicable part-of-speech tagset, feature inventory, and set of dependency relations as well as a large number of uniformly annotated treebanks. We present Universal Dependencies for Finnish, one of the ten languages in the recent first release of UD project treebank data. We detail the mapping of previously introduced annotation to the UD standard, describing specific challenges and their resolution. We additionally present parsing experiments comparing the performance of a stateof-the-art parser trained on a languagespecific annotation schema to performance on the corresponding UD annotation. The results show improvement compared to the source annotation, indicating that the conversion is accurate and supporting the feasibility of UD as a parsing target. The introduced tools and resources are available under open licenses from http://bionlp.utu.fi/ud-finnish.html.", "phrases": ["finnish", "treebank", "universal dependencies"], "overall_score": 1.8102956806502912, "scores": [0.974972291613521, 0.9094306303630039, 0.9065209264793882], "rank_score": 0.9303079494853043} -{"id": "jager-etal-2017-using", "title": "Using support vector machines and state-of-the-art algorithms for phonetic alignment to identify cognates in multi-lingual wordlists", "abstract": "Most current approaches in phylogenetic linguistics require as input multilingual word lists partitioned into sets of etymologically related words (cognates). Cognate identification is so far done manually by experts, which is time consuming and as of yet only available for a small number of well-studied language families. Automatizing this step will greatly expand the empirical scope of phylogenetic methods in linguistics, as raw wordlists (in phonetic transcription) are much easier to obtain than wordlists in which cognate words have been fully identified and annotated, even for under-studied languages. A couple of different methods have been proposed in the past, but they are either disappointing regarding their performance or not applicable to larger datasets. Here we present a new approach that uses support vector machines to unify different state-of-the-art methods for phonetic alignment and cognate detection within a single framework. Training and evaluating these method on a typologically broad collection of gold-standard data shows it to be superior to the existing state of the art.", "phrases": ["support vector machine", "phonetic alignment", "cognate detection"], "overall_score": 1.6666772057354189, "scores": [0.8699650682976604, 0.8691860779044117, 1.0514196324529577], "rank_score": 0.9301902595516767} -{"id": "pradhan-etal-2013-towards", "title": "Towards Robust Linguistic Analysis using OntoNotes", "abstract": "Large-scale linguistically annotated corpora have played a crucial role in advancing the state of the art of key natural language technologies such as syntactic, semantic and discourse analyzers, and they serve as training data as well as evaluation benchmarks. Up till now, however, most of the evaluation has been done on monolithic corpora such as the Penn Treebank, the Proposition Bank. As a result, it is still unclear how the state-of-the-art analyzers perform in general on data from a variety of genres or domains. The completion of the OntoNotes corpus, a large-scale, multi-genre, multilingual corpus manually annotated with syntactic, semantic and discourse information, makes it possible to perform such an evaluation. This paper presents an analysis of the performance of publicly available, state-of-the-art tools on all layers and languages in the OntoNotes v5.0 corpus. This should set the benchmark for future development of various NLP components in syntax and semantics, and possibly encourage research towards an integrated system that makes use of the various layers jointly to improve overall performance.", "phrases": ["ontonotes", "discourse analyzer", "syntax", "similar corpora ontonotes", "human-annotated corpus"], "overall_score": 2.3856682618207814, "scores": [2.1277302824891313, 0.828781143637304, 0.5800465341960319, 0.5698510352151605, 0.5441082844249974], "rank_score": 0.9301034559925249} -{"id": "kobayashi-etal-2020-attention", "title": "Attention is Not Only a Weight: Analyzing Transformers with Vector Norms", "abstract": "Attention is a key component of Transformers, which have recently achieved considerable success in natural language processing. Hence, attention is being extensively studied to investigate various linguistic capabilities of Transformers, focusing on analyzing the parallels between attention weights and specific linguistic phenomena. This paper shows that attention weights alone are only one of the two factors that determine the output of attention and proposes a norm-based analysis that incorporates the second factor, the norm of the transformed input vectors. The findings of our norm-based analyses of BERT and a Transformer-based neural machine translation system include the following: (i) contrary to previous studies, BERT pays poor attention to special tokens, and (ii) reasonable word alignment can be extracted from attention mechanisms of Transformer. These findings provide insights into the inner workings of Transformers.", "phrases": ["weight", "transformer", "attention weight", "norm-based analysis", "input vector"], "overall_score": 1.933970591245371, "scores": [1.7289023446678389, 0.8076112563573034, 0.9681517781697537, 0.5868578559776574, 0.5586930668521027], "rank_score": 0.9300432604049312} -{"id": "soni-etal-2014-modeling", "title": "Modeling Factuality Judgments in Social Media Text", "abstract": "How do journalists mark quoted content as certain or uncertain, and how do readers interpret these signals? Predicates such as thinks, claims, and admits offer a range of options for framing quoted content according to the author\u2019s own perceptions of its credibility. We gather a new dataset of direct and indirect quotes from Twitter, and obtain annotations of the perceived certainty of the quoted statements. We then compare the ability of linguistic and extra-linguistic features to predict readers\u2019 assessment of the certainty of quoted content. We see that readers are indeed influenced by such framing devices \u2014 and we find no evidence that they consider other factors, such as the source, journalist, or the content itself. In addition, we examine the impact of specific framing devices on perceptions of credibility.", "phrases": ["factuality", "journalist", "twitter"], "overall_score": 2.141393049256168, "scores": [1.187907425456333, 1.0509043543736898, 0.5511737748037729], "rank_score": 0.929995184877932} -{"id": "he-etal-2013-identification", "title": "Identification of Speakers in Novels", "abstract": "Speaker identification is the task of attributing utterances to characters in a literary narrative. It is challenging to automate because the speakers of the majority of utterances are not explicitly identified in novels. In this paper, we present a supervised machine learning approach for the task that incorporates several novel features. The experimental results show that our method is more accurate and general than previous approaches to the problem.", "phrases": ["novel", "speaker identification", "literary text"], "overall_score": 2.4539785021930984, "scores": [1.6856319708061591, 0.5540965070970422, 0.5498795476460479], "rank_score": 0.9298693418497498} -{"id": "dai-adel-2020-analysis", "title": "An Analysis of Simple Data Augmentation for Named Entity Recognition", "abstract": "Simple yet effective data augmentation techniques have been proposed for sentence-level and sentence-pair natural language processing tasks. Inspired by these efforts, we design and compare data augmentation for named entity recognition, which is usually modeled as a token-level sequence labeling problem. Through experiments on two data sets from the biomedical and materials science domains (i2b2-2010 and MaSciP), we show that simple augmentation can boost performance for both recurrent and transformer-based models, especially for small training sets.", "phrases": ["data augmentation", "named entity recognition", "synonym replacement"], "overall_score": 2.4531202631862836, "scores": [0.9555286418202845, 1.3061203777751595, 0.5269833859884668], "rank_score": 0.9295441351946371} -{"id": "pan-etal-2019-reinforced", "title": "Reinforced Dynamic Reasoning for Conversational Question Generation", "abstract": "This paper investigates a new task named Conversational Question Generation (CQG) which is to generate a question based on a passage and a conversation history (i.e., previous turns of question-answer pairs). CQG is a crucial task for developing intelligent agents that can drive question-answering style conversations or test user understanding of a given passage. Towards that end, we propose a new approach named Reinforced Dynamic Reasoning network, which is based on the general encoder-decoder framework but incorporates a reasoning procedure in a dynamic manner to better understand what has been asked and what to ask next about the passage into the general encoder-decoder framework. To encourage producing meaningful questions, we leverage a popular question answering (QA) model to provide feedback and fine-tune the question generator using a reinforcement learning mechanism. Empirical results on the recently released CoQA dataset demonstrate the effectiveness of our method in comparison with various baselines and model variants. Moreover, to show the applicability of our method, we also apply it to create multi-turn question-answering conversations for passages in SQuAD.", "phrases": ["conversational question generation", "dynamic reasoning network", "feedback", "answer-unaware cqg"], "overall_score": 1.9321999644145122, "scores": [2.0658867991892094, 0.5773511985150375, 0.5462916058952504, 0.527237471956034], "rank_score": 0.9291917688888829} -{"id": "rink-harabagiu-2010-utd", "title": "UTD: Classifying Semantic Relations by Combining Lexical and Semantic Resources", "abstract": "This paper describes our system for SemEval-2010 Task 8 on multi-way classification of semantic relations between nominals. First, the type of semantic relation is classified. Then a relation type-specific classifier determines the relation direction. Classification is performed using SVM classifiers and a number of features that capture the context, semantic role affiliation, and possible pre-existing relations of the nominals. This approach achieved an F1 score of 82.19% and an accuracy of 77.92%.", "phrases": ["svm", "support vector machine", "relation classification", "framenet"], "overall_score": 2.632559987740571, "scores": [1.0832935809274955, 1.071148354307367, 1.0094221887168704, 0.5528485525057683], "rank_score": 0.9291781691143752} -{"id": "collins-2003-head", "title": "Head-Driven Statistical Models for Natural Language Parsing", "abstract": "This article describes three statistical models for natural language parsing. The models extend methods from probabilistic context-free grammars to lexicalized grammars, leading to approaches in which a parse tree is represented as the sequence of decisions corresponding to a head-centered, top-down derivation of the tree. Independence assumptions then lead to parameters that encode the X-bar schema, subcategorization, ordering of complements, placement of adjuncts, bigram lexical dependencies, wh-movement, and preferences for close attachment. All of these preferences are expressed by probabilities conditioned on lexical heads. The models are evaluated on the Penn Wall Street Journal Treebank, showing that their accuracy is competitive with other models in the literature. To gain a better understanding of the models, we also give results on different constituent types, as well as a breakdown of precision/recall results in recovering various types of dependencies. We analyze various characteristics of the models through experiments on parsing accuracy, by collecting frequencies of various structures in the treebank, and through linguistically motivated examples. Finally, we compare the models to others that have been applied to parsing the treebank, aiming to give some explanation of the difference in performance of the various models.", "phrases": ["head", "treebank", "generative model", "state-of-the-art parser", "production rule"], "overall_score": 3.5756942723201424, "scores": [1.4597058699774446, 1.198983012125191, 0.8596240066060807, 0.5851706425234283, 0.5400972378947112], "rank_score": 0.9287161538253711} -{"id": "nguyen-daume-iii-2019-help", "title": "Help, Anna! Visual Navigation with Natural Multimodal Assistance via Retrospective Curiosity-Encouraging Imitation Learning", "abstract": "Mobile agents that can leverage help from humans can potentially accomplish more complex tasks than they could entirely on their own. We develop \u201cHelp, Anna!\u201d (HANNA), an interactive photo-realistic simulator in which an agent fulfills object-finding tasks by requesting and interpreting natural language-and-vision assistance. An agent solving tasks in a HANNA environment can leverage simulated human assistants, called ANNA (Automatic Natural Navigation Assistants), which, upon request, provide natural language and visual instructions to direct the agent towards the goals. To address the HANNA problem, we develop a memory-augmented neural agent that hierarchically models multiple levels of decision-making, and an imitation learning algorithm that teaches the agent to avoid repeating past mistakes while simultaneously predicting its own chances of making future progress. Empirically, our approach is able to ask for help more effectively than competitive baselines and, thus, attains higher task success rate on both previously seen and previously unseen environments.", "phrases": ["help", "navigation task", "natural language instruction"], "overall_score": 1.8070593498765446, "scores": [1.6577468166643154, 0.566566550607742, 0.5616210461238891], "rank_score": 0.9286448044653156} -{"id": "guo-etal-2013-linking", "title": "Linking Tweets to News: A Framework to Enrich Short Text Data in Social Media", "abstract": "Many current Natural Language Processing [NLP] techniques work well assuming a large context of text as input data. However they become ineffective when applied to short texts such as Twitter feeds. To overcome the issue, we want to find a related newswire document to a given tweet to provide contextual support for NLP tasks. This requires robust modeling and understanding of the semantics of short texts. The contribution of the paper is two-fold: 1. we introduce the Linking-Tweets-toNews task as well as a dataset of linked tweet-news pairs, which can benefit many NLP applications; 2. in contrast to previous research which focuses on lexical features within the short texts (text-to-word information), we propose a graph based latent variable model that models the inter short text correlations (text-to-text information). This is motivated by the observation that a tweet usually only covers one aspect of an event. We show that using tweet specific feature (hashtag) and news specific feature (named entities) as well as temporal constraints, we are able to extract text-to-text correlations, and thus completes the semantic picture of a short text. Our experiments show significant improvement of our new model over baselines with three evaluation metrics in the new task.", "phrases": ["twitter feed", "contextual support", "news"], "overall_score": 1.8067494850696648, "scores": [1.7034930798889587, 0.5517663528578894, 0.5301972636172556], "rank_score": 0.9284855654547014} -{"id": "lo-etal-2014-xmeant", "title": "XMEANT: Better semantic MT evaluation without reference translations", "abstract": "We introduce XMEANT\u2014a new cross-lingual version of the semantic frame based MT evaluation metric MEANT\u2014which can correlate even more closely with human adequacy judgments than monolingual MEANT and eliminates the need for expensive human references. Previous work established that MEANT reflects translation adequacy with state-of-the-art accuracy, and optimizing MT systems against MEANT robustly improves translation quality. However, to go beyond tuning weights in the loglinear SMT model, a cross-lingual objective function that can deeply integrate semantic frame criteria into the MT training pipeline is needed. We show that cross-lingual XMEANT outperforms monolingual MEANT by (1) replacing the monolingual context vector model in MEANT with simple translation probabilities, and (2) incorporating bracketing ITG constraints.", "phrases": ["reference", "semantic frame", "xmeant"], "overall_score": 1.8065849729346533, "scores": [1.405819769908766, 0.8402211800104111, 0.5391621189044811], "rank_score": 0.9284010229412193} -{"id": "sorodoc-etal-2016-look", "title": "\u201cLook, some Green Circles!\u201d: Learning to Quantify from Images", "abstract": "In this paper, we investigate whether a neural network model can learn the meaning of natural language quantifiers (no, some and all) from their use in visual contexts. We show that memory networks perform well in this task, and that explicit counting is not necessary to the system\u2019s performance, supporting psycholinguistic evidence on the acquisition of quantifiers.", "phrases": ["quantifier", "image", "dot"], "overall_score": 1.663341723163442, "scores": [0.9142720835445285, 1.299218791649538, 0.5714951986566732], "rank_score": 0.9283286912835799} -{"id": "bamman-etal-2014-distributed", "title": "Distributed Representations of Geographically Situated Language", "abstract": "We introduce a model for incorporating contextual information (such as geography) in learning vector-space representations of situated language. In contrast to approaches to multimodal representation learning that have used properties of the object being described (such as its color), our model includes information about the subject (i.e., the speaker), allowing us to learn the contours of a word\u2019s meaning that are shaped by the context in which it is uttered. In a quantitative evaluation on the task of judging geographically informed semantic similarity between representations learned from 1.1 billion words of geo-located tweets, our joint model outperforms comparable independent models that learn meaning in isolation.", "phrases": ["situated language", "contextual information", "word embedding"], "overall_score": 2.5734707209044125, "scores": [1.2578848677161538, 0.9895136415829789, 0.5371515758921738], "rank_score": 0.9281833617304355} -{"id": "addawood-bashir-2016-evidence", "title": "\u201cWhat Is Your Evidence?\u201d A Study of Controversial Topics on Social Media", "abstract": "In recent years, social media has revolutionized how people communicate and share information. One function of social media, besides connecting with friends, is sharing opinions with others. Micro blogging sites, like Twitter, have often provided an online forum for social activism. When users debate about controversial topics on social media, they typically share different types of evidence to support their claims. Classifying these types of evidence can provide an estimate for how adequately the arguments have been supported. We first introduce a manually built gold standard dataset of 3000 tweets related to the recent FBI and Apple encryption debate. We develop a framework for automatically classifying six evidence types typically used on Twitter to discuss the debate. Our findings show that a Support Vector Machine (SVM) classifier trained with n-gram and additional features is capable of capturing the different forms of representing evidence on Twitter, and exhibits significant improvements over the unigram baseline, achieving a F1 macroaveraged of 82.8%.", "phrases": ["twitter", "claim", "evidence type", "expert opinion", "argumentative tweet"], "overall_score": 1.9298532982106762, "scores": [1.381448527320213, 0.9301581721614379, 0.8923177949579181, 0.8566207220320802, 0.5797710884812725], "rank_score": 0.9280632609905842} -{"id": "li-etal-2019-coherent", "title": "Coherent Comments Generation for Chinese Articles with a Graph-to-Sequence Model", "abstract": "Automatic article commenting is helpful in encouraging user engagement on online news platforms. However, the news documents are usually too long for models under traditional encoder-decoder frameworks, which often results in general and irrelevant comments. In this paper, we propose to generate comments with a graph-to-sequence model that models the input news as a topic interaction graph. By organizing the article into graph structure, our model can better understand the internal structure of the article and the connection between topics, which makes it better able to generate coherent and informative comments. We collect and release a large scale news-comment corpus from a popular Chinese online news platform Tencent Kuaibao. Extensive experiment results show that our model can generate much more coherent and informative comments compared with several strong baseline models.", "phrases": ["graph-to-sequence model", "input news", "topic interaction graph"], "overall_score": 1.019268046645421, "scores": [1.694309828784436, 0.5577519159023172, 0.5312715308251714], "rank_score": 0.927777758503975} -{"id": "watanabe-etal-2007-online", "title": "Online Large-Margin Training for Statistical Machine Translation", "abstract": "We achieved a state of the art performance in statistical machine translation by using a large number of features with an online large-margin training algorithm. The millions of parameters were tuned only on a small development set consisting of less than 1K sentences. Experiments on Arabic-toEnglish translation indicated that a model trained with sparse binary features outperformed a conventional SMT system with a small number of features.", "phrases": ["statistical machine translation", "large number", "smt system", "mira"], "overall_score": 2.8672159928431813, "scores": [0.9487294872447483, 1.2685731936772418, 0.9508373816045786, 0.5422146622725625], "rank_score": 0.9275886811997828} -{"id": "oortwijn-etal-2021-interrater", "title": "Interrater Disagreement Resolution: A Systematic Procedure to Reach Consensus in Annotation Tasks", "abstract": "We present a systematic procedure for interrater disagreement resolution. The procedure is general, but of particular use in multiple-annotator tasks geared towards ground truth construction. We motivate our proposal by arguing that, barring cases in which the researchers' goal is to elicit different viewpoints, interrater disagreement is a sign of poor quality in the design or the description of a task. Consensus among annotators, we maintain, should be striven for, through a systematic procedure for disagreement resolution such as the one we describe.", "phrases": ["systematic procedure", "annotation task", "interrater disagreement resolution"], "overall_score": 1.2856925511592234, "scores": [0.9608552132833009, 0.9443666452650581, 0.8770715429495786], "rank_score": 0.9274311338326459} -{"id": "mizumoto-etal-2011-mining", "title": "Mining Revision Log of Language Learning SNS for Automated Japanese Error Correction of Second Language Learners", "abstract": "We present an attempt to extract a largescale Japanese learners\u2019 corpus from the revision log of a language learning SNS. This corpus is easy to obtain in largescale, covers a wide variety of topics and styles, and can be a great source of knowledge for both language learners and instructors. We also demonstrate that the extracted learners\u2019 corpus of Japanese as a second language can be used as training data for learners\u2019 error correction using an SMT approach. We evaluate different granularities of tokenization to alleviate the problem of word segmentation errors caused by erroneous input from language learners. Experimental results show that the character-wise model outperforms the word-wise model.", "phrases": ["revision log", "learner", "grammatical error"], "overall_score": 3.0897213965297725, "scores": [1.3687719102127973, 0.8642429383171454, 0.5486764196035236], "rank_score": 0.9272304227111553} -{"id": "sorodoc-etal-2020-probing", "title": "Probing for Referential Information in Language Models", "abstract": "Language models keep track of complex information about the preceding context \u2013 including, e.g., syntactic relations in a sentence. We investigate whether they also capture information beneficial for resolving pronominal anaphora in English. We analyze two state of the art models with LSTM and Transformer architectures, via probe tasks and analysis on a coreference annotated corpus. The Transformer outperforms the LSTM in all analyses. Our results suggest that language models are more successful at learning grammatical constraints than they are at learning truly referential information, in the sense of capturing the fact that we use language to refer to entities in the world. However, we find traces of the latter aspect, too.", "phrases": ["referential information", "language model", "coreference"], "overall_score": 1.9280367454000547, "scores": [0.8842404855662058, 0.9723725769179299, 0.9249559887562204], "rank_score": 0.9271896837467853} -{"id": "lalor-etal-2019-learning", "title": "Learning Latent Parameters without Human Response Patterns: Item Response Theory with Artificial Crowds", "abstract": "Incorporating Item Response Theory (IRT) into NLP tasks can provide valuable information about model performance and behavior. Traditionally, IRT models are learned using human response pattern (RP) data, presenting a significant bottleneck for large data sets like those required for training deep neural networks (DNNs). In this work we propose learning IRT models using RPs generated from artificial crowds of DNN models. We demonstrate the effectiveness of learning IRT models using DNN-generated data through quantitative and qualitative analyses for two NLP tasks. Parameters learned from human and machine RPs for natural language inference and sentiment analysis exhibit medium to large positive correlations. We demonstrate a use-case for latent difficulty item parameters, namely training set filtering, and show that using difficulty to sample training data outperforms baseline methods. Finally, we highlight cases where human expectation about item difficulty does not match difficulty as estimated from the machine RPs.", "phrases": ["item response theory", "artificial crowd", "difficulty", "irt parameter"], "overall_score": 1.8038690387031935, "scores": [1.7692887428795943, 0.8841372399023115, 0.5308098793452372, 0.5237853732396042], "rank_score": 0.9270053088416869} -{"id": "logeswaran-etal-2019-zero", "title": "Zero-Shot Entity Linking by Reading Entity Descriptions", "abstract": "We present the zero-shot entity linking task, where mentions must be linked to unseen entities without in-domain labeled data. The goal is to enable robust transfer to highly specialized domains, and so no metadata or alias tables are assumed. In this setting, entities are only identified by text descriptions, and models must rely strictly on language understanding to resolve the new entities. First, we show that strong reading comprehension models pre-trained on large unlabeled data can be used to generalize to unseen entities. Second, we propose a simple and effective adaptive pre-training strategy, which we term domain-adaptive pre-training (DAP), to address the domain shift problem associated with linking unseen entities in a new domain. We present experiments on a new dataset that we construct for this task and show that DAP improves over strong pre-training baselines, including BERT. The data and code are available at .", "phrases": ["entity linking", "pre-training", "zero-shot entity", "candidate", "wikipedia"], "overall_score": 3.0887251807397273, "scores": [0.9243816684165291, 1.716693542258442, 0.8747519459951291, 0.5652489805928338, 0.5535811463881912], "rank_score": 0.9269314567302249} -{"id": "andor-etal-2016-globally", "title": "Globally Normalized Transition-Based Neural Networks", "abstract": "We introduce a globally normalized transition-based neural network model that achieves state-of-the-art part-of-speech tagging, dependency parsing and sentence compression results. Our model is a simple feed-forward neural network that operates on a task-specific transition system, yet achieves comparable or better accuracies than recurrent models. We discuss the importance of global as opposed to local normalization: a key insight is that the label bias problem implies that globally normalized models can be strictly more expressive than locally normalized models.", "phrases": ["normalization", "dependency parsing", "action", "neural network architecture", "improved performance"], "overall_score": 3.2949912943904804, "scores": [1.3511046291194766, 1.259215896120093, 0.8458967356506157, 0.6163938004026623, 0.5612408674127423], "rank_score": 0.9267703857411181} -{"id": "yin-etal-2021-batchmixup", "title": "BatchMixup: Improving Training by Interpolating Hidden States of the Entire Mini-batch", "abstract": "Usually, we train a neural system on a sequence of mini-batches of labeled instances. Each mini-batch is composed of k samples, and each sample will learn a representation vector. M IXUP implicitly generates synthetic samples through linearly interpolating inputs and their corresponding labels of random sample pairs in the same mini-batch. This means that M IXUP only generates new points on the edges connecting every two original points in the representation space. We observed that the new points by the standard M IXUP cover pretty limited regions in the entire space of the mini-batch. In this work, we propose B ATCH M IXUP \u2014improving the model learning by interpolating hidden states of the entire mini-batch. B ATCH M IXUP can generate new points scattered throughout the space corresponding to the mini-batch. In experiments, B ATCH M IXUP shows superior performance than competitive baselines in improving the performance of NLP tasks while using different ratios of training data.", "phrases": ["hidden state", "entire mini-batch", "sample", "mixup"], "overall_score": 1.4914841492484978, "scores": [2.050157011105768, 0.5794611036008406, 0.5475264636976268, 0.5297002466758194], "rank_score": 0.9267112062700137} -{"id": "hangya-fraser-2018-unsupervised", "title": "An Unsupervised System for Parallel Corpus Filtering", "abstract": "In this paper we describe LMU Munich's submission for the WMT 2018 Parallel Corpus Filtering shared task which addresses the problem of cleaning noisy parallel corpora. The task of mining and cleaning parallel sentences is important for improving the quality of machine translation systems, especially for low-resource languages. We tackle this problem in a fully unsupervised fashion relying on bilingual word embeddings created without any bilingual signal. After pre-filtering noisy data we rank sentence pairs by calculating bilingual sentence-level similarities and then remove redundant data by employing monolingual similarity as well. Our unsupervised system achieved good performance during the official evaluation of the shared task, scoring only a few BLEU points behind the best systems, while not requiring any parallel training data.", "phrases": ["unsupervised system", "parallel corpus filtering", "sentence pair"], "overall_score": 1.4913798495883352, "scores": [1.0064263372049094, 0.890524217460704, 0.8829886490768631], "rank_score": 0.9266464012474921} -{"id": "wiseman-rush-2016-sequence", "title": "Sequence-to-Sequence Learning as Beam-Search Optimization", "abstract": "Sequence-to-Sequence (seq2seq) modeling has rapidly become an important general-purpose NLP tool that has proven effective for many text-generation and sequence-labeling tasks. Seq2seq builds on deep neural language modeling and inherits its remarkable accuracy in estimating local, next-word distributions. In this work, we introduce a model and beam-search training scheme, based on the work of Daume III and Marcu (2005), that extends seq2seq to learn global sequence scores. This structured approach avoids classical biases associated with local training and unifies the training loss with the test-time usage, while preserving the proven model architecture of seq2seq and its efficient training approach. We show that our system outperforms a highly-optimized attention-based seq2seq system and other baselines on three different sequence to sequence tasks: word ordering, parsing, and machine translation.", "phrases": ["beam search", "seq2seq model", "sequence-to-sequence", "improved performance", "contrastive learning"], "overall_score": 3.566210147404918, "scores": [2.0371205159888426, 0.8531106037191393, 0.6320190545382574, 0.5689753268950704, 0.5400386948351921], "rank_score": 0.9262528391953003} -{"id": "kudugunta-etal-2019-investigating", "title": "Investigating Multilingual NMT Representations at Scale", "abstract": "Multilingual Neural Machine Translation (NMT) models have yielded large empirical success in transfer learning settings. However, these black-box representations are poorly understood, and their mode of transfer remains elusive. In this work, we attempt to understand massively multilingual NMT representations (with 103 languages) using Singular Value Canonical Correlation Analysis (SVCCA), a representation similarity framework that allows us to compare representations across different languages, layers and models. Our analysis validates several empirical results and long-standing intuitions, and unveils new observations regarding how representations evolve in a multilingual translation model. We draw three major results from our analysis, with implications on cross-lingual transfer learning: (i) Encoder representations of different languages cluster based on linguistic similarity, (ii) Representations of a source language learned by the encoder are dependent on the target language, and vice-versa, and (iii) Representations of high resource and/or linguistically similar languages are more robust when fine-tuning on an arbitrary language pair, which is critical to determining how much cross-lingual transfer can be expected in a zero or few-shot setting. We further connect our findings with existing empirical observations in multilingual NMT and transfer learning.", "phrases": ["scale", "svcca", "different language"], "overall_score": 2.6771596809016316, "scores": [0.8176602324302111, 1.040226133369232, 0.9208146856358316], "rank_score": 0.9262336838117582} -{"id": "uehara-etal-2015-detecting", "title": "Detecting an Infant's Developmental Reactions in Reviews on Picture Books", "abstract": "We extract the book reviews on picture books written on the Web site specialized in picture books, and found that those reviews reflect infants\u2019 behavioral expressions as well as their parents\u2019 reading activities in detail. Analysis of the reviews reveals that infants\u2019 reactions written on the reviews are coincident with the findings of developmental psychology concerning infants\u2019 behaviors. In order to examine how the stimuli of picture books induces varieties of infants\u2019 reactions, this paper proposes to detect an infant\u2019s developmental reactions in reviews on picture books and shows effectiveness of the proposed method through experimental evaluation.", "phrases": ["infant", "developmental reaction", "review"], "overall_score": 1.017435084145297, "scores": [1.6907302991325703, 0.5505453768254482, 0.5370522964482609], "rank_score": 0.9261093241354263} -{"id": "huang-etal-2019-hubless", "title": "Hubless Nearest Neighbor Search for Bilingual Lexicon Induction", "abstract": "Bilingual Lexicon Induction (BLI) is the task of translating words from corpora in two languages. Recent advances in BLI work by aligning the two word embedding spaces. Following that, a key step is to retrieve the nearest neighbor (NN) in the target space given the source word. However, a phenomenon called hubness often degrades the accuracy of NN. Hubness appears as some data points, called hubs, being extra-ordinarily close to many of the other data points. Reducing hubness is necessary for retrieval tasks. One successful example is Inverted SoFtmax (ISF), recently proposed to improve NN. This work proposes a new method, Hubless Nearest Neighbor (HNN), to mitigate hubness. HNN differs from NN by imposing an additional equal preference assumption. Moreover, the HNN formulation explains why ISF works as well as it does. Empirical results demonstrate that HNN outperforms NN, ISF and other state-of-the-art. For reproducibility and follow-ups, we have published all code.", "phrases": ["bilingual lexicon induction", "bli", "hub", "other data point"], "overall_score": 1.490394498257507, "scores": [2.0603434525049376, 0.5902195166155499, 0.5270413506409053, 0.5265323524099415], "rank_score": 0.9260341680428337} -{"id": "zhong-ng-2010-makes", "title": "It Makes Sense: A Wide-Coverage Word Sense Disambiguation System for Free Text", "abstract": "Word sense disambiguation (WSD) systems based on supervised learning achieved the best performance in SensEval and SemEval workshops. However, there are few publicly available open source WSD systems. This limits the use of WSD in other applications, especially for researchers whose research interests are not in WSD. \n \nIn this paper, we present IMS, a supervised English all-words WSD system. The flexible framework of IMS allows users to integrate different preprocessing tools, additional features, and different classifiers. By default, we use linear support vector machines as the classifier with multiple knowledge-based features. In our implementation, IMS achieves state-of-the-art results on several SensEval and SemEval tasks.", "phrases": ["word sense disambiguation", "wsd system", "supervised approach", "lemma", "state-of-the-art system"], "overall_score": 3.3654658637011243, "scores": [1.7955752019782307, 1.0747496820506022, 0.6383973973052318, 0.559940360406713, 0.5572977010848483], "rank_score": 0.9251920685651251} -{"id": "turner-etal-2008-using", "title": "Using Spatial Reference Frames to Generate Grounded Textual Summaries of Georeferenced Data", "abstract": "Summarising georeferenced (can be identified according to it's location) data in natural language is challenging because it requires linking events describing its non-geographic attributes to their underlying geography. This mapping is not straightforward as often the only explicit geographic information such data contains is latitude and longitude. In this paper we present an approach to generating textual summaries of georeferenced data based on spatial reference frames. This approach has been implemented in a data-to-text system we have deployed in the weather forecasting domain.", "phrases": ["georeferenced data", "explicit geographic information", "spatio-temporal data"], "overall_score": 1.6574513291421586, "scores": [1.7074195352706572, 0.5441584818966387, 0.5235455821898871], "rank_score": 0.9250411997857277} -{"id": "fan-etal-2019-eli5", "title": "ELI5: Long Form Question Answering", "abstract": "We introduce the first large-scale corpus for long form question answering, a task requiring elaborate and in-depth answers to open-ended questions. The dataset comprises 270K threads from the Reddit forum \u201cExplain Like I'm Five\u201d (ELI5) where an online community provides answers to questions which are comprehensible by five year olds. Compared to existing datasets, ELI5 comprises diverse questions requiring multi-sentence answers. We provide a large set of web documents to help answer the question. Automatic and human evaluations show that an abstractive model trained with a multi-task objective outperforms conventional Seq2Seq, language modeling, as well as a strong extractive baseline. However, our best model is still far from human performance since raters prefer gold responses in over 86% of cases, leaving ample opportunity for future improvement.", "phrases": ["multi-sentence answer", "eli5", "explanation"], "overall_score": 2.673549280190527, "scores": [1.5871778748248946, 0.6296197806529681, 0.5581560571925193], "rank_score": 0.9249845708901274} -{"id": "hewitt-manning-2019-structural", "title": "A Structural Probe for Finding Syntax in Word Representations", "abstract": "Recent work has improved our ability to detect linguistic knowledge in word representations. However, current methods for detecting syntactic knowledge do not test whether syntax trees are represented in their entirety. In this work, we propose a structural probe, which evaluates whether syntax trees are embedded in a linear transformation of a neural network's word representation space. The probe identifies a linear transformation under which squared L2 distance encodes the distance between words in the parse tree, and one in which squared L2 norm encodes depth in the parse tree. Using our probe, we show that such transformations exist for both ELMo and BERT but not in baselines, providing evidence that entire syntax trees are embedded implicitly in deep models' vector geometry.", "phrases": ["structural probe", "syntax tree", "distance", "elmo"], "overall_score": 2.9000234333138417, "scores": [0.8396643870824654, 1.3564087474904334, 0.9764369382239447, 0.5270960927396088], "rank_score": 0.9249015413841131} -{"id": "gamon-etal-2008-using", "title": "Using Contextual Speller Techniques and Language Modeling for ESL Error Correction", "abstract": "We present a modular system for detection and correction of errors made by nonnative (English as a Second Language = ESL) writers. We focus on two error types: the incorrect use of determiners and the choice of prepositions. We use a decisiontree approach inspired by contextual spelling systems for detection and correction suggestions, and a large language model trained on the Gigaword corpus to provide additional information to filter out spurious suggestions. We show how this system performs on a corpus of non-native English text and discuss strategies for future enhancements.", "phrases": ["language modeling", "esl", "grammatical error"], "overall_score": 3.1142374473484424, "scores": [1.6755546679637499, 0.5788317140675204, 0.520158182436906], "rank_score": 0.9248481881560587} -{"id": "bilu-slonim-2016-claim", "title": "Claim Synthesis via Predicate Recycling", "abstract": "Computational Argumentation has two main goals - the detection and analysis of arguments on the one hand, and the synthesis of arguments on the other. Much attention has been given to the former, but considerably less to the latter. A key component in synthesizing arguments is the synthesis of claims. One way to do so is by employing argumentation mining to detect claims within an appropriate corpus. In general, this appears to be a hard problem. Thus, it is interesting to explore if - for the sake of synthesis - there may be other ways to generate claims. Here we explore such a method: we extract the predicate of simple, manually-detected, claims, and attempt to generate novel claims from them. Surprisingly, this simple method yields fairly good results.", "phrases": ["synthesis", "predicate", "claim", "multiple interaction"], "overall_score": 2.031806709484238, "scores": [0.8506008048762419, 1.17282810497528, 1.141572804188279, 0.5338586217525031], "rank_score": 0.924715083948076} -{"id": "negri-etal-2018-escape", "title": "ESCAPE: a Large-scale Synthetic Corpus for Automatic Post-Editing", "abstract": "Training models for the automatic correction of machine-translated text usually relies on data consisting of (source, MT, human post- edit) triplets providing, for each source sentence, examples of translation errors with the corresponding corrections made by a human post-editor. Ideally, a large amount of data of this kind should allow the model to learn reliable correction patterns and effectively apply them at test stage on unseen (source, MT) pairs. In practice, however, their limited availability calls for solutions that also integrate in the training process other sources of knowledge. Along this direction, state-of-the-art results have been recently achieved by systems that, in addition to a limited amount of available training data, exploit artificial corpora that approximate elements of the \"gold\" training instances with automatic translations. Following this idea, we present eSCAPE, the largest freely-available Synthetic Corpus for Automatic Post-Editing released so far. eSCAPE consists of millions of entries in which the MT element of the training triplets has been obtained by translating the source side of publicly-available parallel corpora, and using the target side as an artificial human post-edit. Translations are obtained both with phrase-based and neural models. For each MT paradigm, eSCAPE contains 7.2 million triplets for English-German and 3.3 millions for English-Italian, resulting in a total of 14,4 and 6,6 million instances respectively. The usefulness of eSCAPE is proved through experiments in a general-domain scenario, the most challenging one for automatic post-editing. For both language directions, the models trained on our artificial data always improve MT quality with statistically significant gains. The current version of eSCAPE can be freely downloaded from: this http URL", "phrases": ["synthetic corpus", "automatic post-editing", "source sentence", "ape data", "escape"], "overall_score": 2.1281601713253853, "scores": [2.0774697181486688, 0.8825301771435426, 0.5711066743576485, 0.5520726194070887, 0.5380619060075194], "rank_score": 0.9242482190128938} -{"id": "eisner-2003-learning", "title": "Learning Non-Isomorphic Tree Mappings for Machine Translation", "abstract": "Often one may wish to learn a tree-to-tree mapping, training it on unaligned pairs of trees, or on a mixture of trees and strings. Unlike previous statistical formalisms (limited to isomorphic trees), synchronous TSG allows local distortion of the tree topology. We reformulate it to permit dependency trees, and sketch EM/Viterbi algorithms for alignment, training, and decoding.", "phrases": ["non-isomorphism", "machine translation", "tree fragment", "cross-lingual structure divergence", "synchronous grammar"], "overall_score": 3.361433333425863, "scores": [1.8155971856255775, 1.0753870040238853, 0.6077841283777419, 0.5720926931709751, 0.5495564659767185], "rank_score": 0.9240834954349797} -{"id": "singh-etal-2011-large", "title": "Large-Scale Cross-Document Coreference Using Distributed Inference and Hierarchical Models", "abstract": "Cross-document coreference, the task of grouping all the mentions of each entity in a document collection, arises in information extraction and automated knowledge base construction. For large collections, it is clearly impractical to consider all possible groupings of mentions into distinct entities. To solve the problem we propose two ideas: (a) a distributed inference technique that uses parallelism to enable large scale processing, and (b) a hierarchical model of coreference that represents uncertainty over multiple granularities of entities to facilitate more effective approximate inference. To evaluate these ideas, we constructed a labeled corpus of 1.5 million disambiguated mentions in Web pages by selecting link anchors referring to Wikipedia entities. We show that the combination of the hierarchical model with distributed inference quickly obtains high accuracy (with error reduction of 38%) on this large dataset, demonstrating the scalability of our approach.", "phrases": ["cross-document coreference", "knowledge base", "equivalence class", "ccr"], "overall_score": 2.71994422998951, "scores": [2.097914500616951, 0.5423203455091539, 0.5314037794726808, 0.5233868094462512], "rank_score": 0.9237563587612592} -{"id": "chen-etal-2016-neural-sentiment", "title": "Neural Sentiment Classification with User and Product Attention", "abstract": "Document-level sentiment classi\ufb01cation aims to predict user\u2019s overall sentiment in a document about a product. However, most of existing methods only focus on local text information and ignore the global user preference and product characteristics. Even though some works take such information into account, they usually suffer from high model complexity and only consider word-level preference rather than semantic levels. To address this issue, we propose a hierarchical neural network to incorporate global user and product information into sentiment clas-si\ufb01cation. Our model \ufb01rst builds a hierarchical LSTM model to generate sentence and document representations. Afterwards, user and product information is considered via at-tentions over different semantic levels due to its ability of capturing crucial semantic components. The experimental results show that our model achieves signi\ufb01cant and consistent improvements compared to all state-of-the-art methods. The source code of this paper can be obtained from https://github. com/thunlp/NSC .", "phrases": ["sentiment classification", "product", "word-level preference", "attention model", "state-of-the-art model"], "overall_score": 2.7658269604806, "scores": [1.5065253044319018, 1.4597806281042542, 0.5877047265732983, 0.5395155256307526, 0.5227524208231628], "rank_score": 0.923255721112674} -{"id": "apidianaki-2009-data", "title": "Data-Driven Semantic Analysis for Multilingual WSD and Lexical Selection in Translation", "abstract": "A common way of describing the senses of ambiguous words in multilingual Word Sense Disambiguation (WSD) is by reference to their translation equivalents in another language. The theoretical soundness of the senses induced in this way can, however, be doubted. This type of cross-lingual sense identification has implications for multilingual WSD and MT evaluation as well. In this article, we first present some arguments in favour of a more thorough analysis of the semantic information that may be induced by the equivalents of ambiguous words found in parallel corpora. Then, we present an unsupervised WSD method and a lexical selection method that exploit the results of a data-driven sense induction method. Finally, we show how this automatically acquired information can be exploited for a multilingual WSD and MT evaluation more sensitive to lexical semantics.", "phrases": ["wsd", "sense inventory", "cross-lingual evidence idea", "machine translation"], "overall_score": 2.2929629628568993, "scores": [1.6780332627009749, 0.8320366009152658, 0.6027581013653056, 0.5781967470373514], "rank_score": 0.9227561780047243} -{"id": "beck-etal-2018-graph", "title": "Graph-to-Sequence Learning using Gated Graph Neural Networks", "abstract": "Many NLP applications can be framed as a graph-to-sequence learning problem. Previous work proposing neural architectures on graph-to-sequence obtained promising results compared to grammar-based approaches but still rely on linearisation heuristics and/or standard recurrent networks to achieve the best performance. In this work propose a new model that encodes the full structural information contained in the graph. Our architecture couples the recently proposed Gated Graph Neural Networks with an input transformation that allows nodes and edges to have their own hidden representations, while tackling the parameter explosion problem present in previous work. Experimental results shows that our model outperforms strong baselines in generation from AMR graphs and syntax-based neural machine translation.", "phrases": ["neural architecture", "gnn", "graph-to-sequence model", "sequential encoder", "amr-to-text problem"], "overall_score": 3.1680297551308865, "scores": [1.4670782456746976, 1.2174717118282896, 0.8634655313211892, 0.5355152209462496, 0.5292263660176086], "rank_score": 0.922551415157607} -{"id": "mishra-etal-2017-learning", "title": "Learning Cognitive Features from Gaze Data for Sentiment and Sarcasm Classification using Convolutional Neural Network", "abstract": "Cognitive NLP systems- i.e., NLP systems that make use of behavioral data - augment traditional text-based features with cognitive features extracted from eye-movement patterns, EEG signals, brain-imaging etc. Such extraction of features is typically manual. We contend that manual extraction of features may not be the best way to tackle text subtleties that characteristically prevail in complex classification tasks like Sentiment Analysis and Sarcasm Detection, and that even the extraction and choice of features should be delegated to the learning system. We introduce a framework to automatically extract cognitive features from the eye-movement/gaze data of human readers reading the text and use them as features along with textual features for the tasks of sentiment polarity and sarcasm detection. Our proposed framework is based on Convolutional Neural Network (CNN). The CNN learns features from both gaze and text and uses them to classify the input text. We test our technique on published sentiment and sarcasm labeled datasets, enriched with gaze information, to show that using a combination of automatically learned text and gaze features often yields better classification performance over (i) CNN based systems that rely on text input alone and (ii) existing systems that rely on handcrafted gaze and textual features.", "phrases": ["sarcasm classification", "convolutional neural network", "gaze feature"], "overall_score": 1.652851203715311, "scores": [1.3559994867248142, 0.8740274691137493, 0.5373945068660428], "rank_score": 0.9224738209015354} -{"id": "max-wisniewski-2010-mining", "title": "Mining Naturally-occurring Corrections and Paraphrases from Wikipedia's Revision History", "abstract": "Naturally-occurring instances of linguistic phenomena are important both for training and for evaluating automatic text processing. When available in large quantities, they also prove interesting material for linguistic studies. In this article, we present WiCoPaCo (Wikipedia Correction and Paraphrase Corpus), a new freely-available resource built by automatically mining Wikipedia\u0092s revision history. The WiCoPaCo corpus focuses on local modifications made by human revisors and include various types of corrections (such as spelling error or typographical corrections) and rewritings, which can be categorized broadly into meaning-preserving and meaning-altering revisions. We present an initial hand-built typology of these revisions, but the resource allows for any possible annotation scheme. We discuss the main motivations for building such a resource and describe the main technical details guiding its construction. We also present applications and data analysis on French and report initial results on spelling error correction and morphosyntactic rewriting. The WiCoPaCo corpus can be freely downloaded from .", "phrases": ["wikipedia", "revision history", "paraphrase corpus", "spelling error", "rewriting"], "overall_score": 1.9181793803987128, "scores": [1.350660248666418, 1.2850415845900405, 0.8551323726713079, 0.5892625421006298, 0.53214971803275], "rank_score": 0.9224492932122292} -{"id": "sun-etal-2011-semi", "title": "Semi-supervised Relation Extraction with Large-scale Word Clustering", "abstract": "We present a simple semi-supervised relation extraction system with large-scale word clustering. We focus on systematically exploring the effectiveness of different cluster-based features. We also propose several statistical methods for selecting clusters at an appropriate level of granularity. When training on different sizes of data, our semi-supervised approach consistently outperformed a state-of-the-art supervised baseline system.", "phrases": ["relation extraction", "large-scale word", "kernel", "brown cluster"], "overall_score": 2.2919843179385295, "scores": [1.9809025680000376, 0.6334732525685712, 0.5426131847511241, 0.5324603639498455], "rank_score": 0.9223623423173946} -{"id": "mcdonald-etal-2005-simple", "title": "Simple Algorithms for Complex Relation Extraction with Applications to Biomedical IE", "abstract": "A complex relation is any n-ary relation in which some of the arguments may be be unspecified. We present here a simple two-stage method for extracting complex relations between named entities in text. The first stage creates a graph from pairs of entities that are likely to be related, and the second stage scores maximal cliques in that graph as potential complex relation instances. We evaluate the new method against a standard baseline for extracting genomic variation relations from biomedical text.", "phrases": ["complex relation", "maximal clique", "biomedical domain"], "overall_score": 2.123475103998528, "scores": [1.3420916242428134, 0.8457653124834253, 0.5787836236502443], "rank_score": 0.9222135201254944} -{"id": "goldman-etal-2018-weakly", "title": "Weakly Supervised Semantic Parsing with Abstract Examples", "abstract": "Training semantic parsers from weak supervision (denotations) rather than strong supervision (programs) complicates training in two ways. First, a large search space of potential programs needs to be explored at training time to find a correct program. Second, spurious programs that accidentally lead to a correct denotation add noise to training. In this work we propose that in closed worlds with clear semantic types, one can substantially alleviate these problems by utilizing an abstract representation, where tokens in both the language utterance and program are lifted to an abstract form. We show that these abstractions can be defined with a handful of lexical rules and that they result in sharing between different examples that alleviates the difficulties in training. To test our approach, we develop the first semantic parser for CNLVR, a challenging visual reasoning dataset, where the search space is large and overcoming spuriousness is critical, because denotations are either TRUE or FALSE, and thus random programs are likely to lead to a correct denotation. Our method substantially improves performance, and reaches 82.5% accuracy, a 14.7% absolute accuracy improvement compared to the best reported accuracy so far.", "phrases": ["abstract example", "semantic parser", "search space"], "overall_score": 2.21080986135474, "scores": [0.8945483814372833, 1.3119122847340687, 0.5594773042238687], "rank_score": 0.9219793234650736} -{"id": "pantel-ravichandran-2004-automatically", "title": "Automatically Labeling Semantic Classes", "abstract": "Systems that automatically discover semantic classes have emerged in part to address the limitations of broad-coverage lexical resources such as WordNet and Cyc. The current state of the art discovers many semantic classes but fails to label their concepts. We propose an algorithm labeling semantic classes and for leveraging them to extract is-a relationships using a top-down approach.", "phrases": ["semantic class", "noun", "cluster"], "overall_score": 2.4328352438190683, "scores": [1.2970697806377467, 0.8903397027633557, 0.578163529945006], "rank_score": 0.9218576711153695} -{"id": "thadani-mckeown-2011-optimal", "title": "Optimal and Syntactically-Informed Decoding for Monolingual Phrase-Based Alignment", "abstract": "The task of aligning corresponding phrases across two related sentences is an important component of approaches for natural language problems such as textual inference, paraphrase detection and text-to-text generation. In this work, we examine a state-of-the-art structured prediction model for the alignment task which uses a phrase-based representation and is forced to decode alignments using an approximate search approach. We propose instead a straightforward exact decoding technique based on integer linear programming that yields order-of-magnitude improvements in decoding speed. This ILP-based decoding strategy permits us to consider syntactically-informed constraints on alignments which significantly increase the precision of the model.", "phrases": ["decoding", "phrase-based alignment", "integer linear programming", "semantic unit"], "overall_score": 2.025186326677395, "scores": [0.9483833106069848, 0.853171597118337, 1.3553850964560452, 0.5298680673587881], "rank_score": 0.9217020178850388} -{"id": "tu-etal-2019-multi", "title": "Multi-hop Reading Comprehension across Multiple Documents by Reasoning over Heterogeneous Graphs", "abstract": "Multi-hop reading comprehension (RC) across documents poses new challenge over single-document RC because it requires reasoning over multiple documents to reach the final answer. In this paper, we propose a new model to tackle the multi-hop RC problem. We introduce a heterogeneous graph with different types of nodes and edges, which is named as Heterogeneous Document-Entity (HDE) graph. The advantage of HDE graph is that it contains different granularity levels of information including candidates, documents and entities in specific document contexts. Our proposed model can do reasoning over the HDE graph with nodes representation initialized with co-attention and self-attention based context encoders. We employ Graph Neural Networks (GNN) based message passing algorithms to accumulate evidences on the proposed HDE graph. Evaluated on the blind test set of the Qangaroo WikiHop data set, our HDE graph based single model delivers competitive result, and the ensemble model achieves the state-of-the-art performance.", "phrases": ["comprehension", "heterogeneous graph", "candidate", "multi-hop", "multi-hop reading comprehension"], "overall_score": 2.2900033109025255, "scores": [0.8725209872148246, 0.7963820199789524, 1.255112683296908, 1.0457139771207684, 0.638095964586635], "rank_score": 0.9215651264396177} -{"id": "yamamoto-etal-2003-learning", "title": "Learning Sequence-to-Sequence Correspondences from Parallel Corpora via Sequential Pattern Mining", "abstract": "We present an unsupervised extraction of sequence-to-sequence correspondences from parallel corpora by sequential pattern mining. The main characteristics of our method are two-fold. First, we propose a systematic way to enumerate all possible translation pair candidates of rigid and gapped sequences without falling into combinatorial explosion. Second, our method uses an efficient data structure and algorithm for calculating frequencies in a contingency table for each translation pair candidate. Our method is empirically evaluated using English-Japanese parallel corpora of 6 million words. Results indicate that it works well for multi-word translations, giving 56--84% accuracy at 19% token coverage and 11% type coverage.", "phrases": ["sequence-to-sequence correspondence", "parallel corpora", "sequential pattern mining"], "overall_score": 1.0122676951653427, "scores": [0.9529083957727947, 0.9347416828970514, 0.8765672132900513], "rank_score": 0.9214057639866325} -{"id": "peters-etal-2019-sparse", "title": "Sparse Sequence-to-Sequence Models", "abstract": "Sequence-to-sequence models are a powerful workhorse of NLP. Most variants employ a softmax transformation in both their attention mechanism and output layer, leading to dense alignments and strictly positive output probabilities. This density is wasteful, making models less interpretable and assigning probability mass to many implausible outputs. In this paper, we propose sparse sequence-to-sequence models, rooted in a new family of \u03b1-entmax transformations, which includes softmax and sparsemax as particular cases, and is sparse for any \u03b1 > 1. We provide fast algorithms to evaluate these transformations and their gradients, which scale well for large vocabulary sizes. Our models are able to produce sparse alignments and to assign nonzero probability to a short list of plausible outputs, sometimes rendering beam search exact. Experiments on morphological inflection and machine translation reveal consistent gains over dense models.", "phrases": ["probability", "\u03b1-entmax transformation", "sparse sequence-to-sequence model"], "overall_score": 2.120721581223988, "scores": [1.03694869293376, 1.155479941405622, 0.5706244067967685], "rank_score": 0.9210176803787169} -{"id": "mou-etal-2016-transferable", "title": "How Transferable are Neural Networks in NLP Applications?", "abstract": "Transfer learning is aimed to make use of valuable knowledge in a source domain to help model performance in a target domain. It is particularly important to neural networks, which are very likely to be overfitting. In some fields like image processing, many studies have shown the effectiveness of neural network-based transfer learning. For neural NLP, however, existing studies have only casually applied transfer learning, and conclusions are inconsistent. In this paper, we conduct systematic case studies and provide an illuminating picture on the transferability of neural networks in NLP.", "phrases": ["target domain", "semantic relatedness", "language inference"], "overall_score": 3.247399027019483, "scores": [1.3732862272940263, 0.8426812881161285, 0.5467100421033024], "rank_score": 0.9208925191711522} -{"id": "wenzek-etal-2020-ccnet", "title": "CCNet: Extracting High Quality Monolingual Datasets from Web Crawl Data", "abstract": "Pre-training text representations have led to significant improvements in many areas of natural language processing. The quality of these models benefits greatly from the size of the pretraining corpora as long as its quality is preserved. In this paper, we describe an automatic pipeline to extract massive high-quality monolingual datasets from Common Crawl for a variety of languages. Our pipeline follows the data processing introduced in fastText (Mikolov et al., 2017; Grave et al., 2018), that deduplicates documents and identifies their language. We augment this pipeline with a filtering step to select documents that are close to high quality corpora like Wikipedia.", "phrases": ["common crawl", "ccnet", "low-resource language"], "overall_score": 2.4301034487036706, "scores": [0.7929325860665726, 1.4120051982056299, 0.5575298075833512], "rank_score": 0.9208225306185179} -{"id": "yang-katiyar-2020-simple", "title": "Simple and Effective Few-Shot Named Entity Recognition with Structured Nearest Neighbor Learning", "abstract": "We present a simple few-shot named entity recognition (NER) system based on nearest neighbor learning and structured inference. Our system uses a supervised NER model trained on the source domain, as a feature extractor. Across several test domains, we show that a nearest neighbor classifier in this feature-space is far more effective than the standard meta-learning approaches. We further propose a cheap but effective method to capture the label dependencies between entity tags without expensive CRF training. We show that our method of combining structured decoding with nearest neighbor learning achieves state-of-the-art performance on standard few-shot NER evaluation tasks, improving F1 scores by 6% to 16% absolute points over prior meta-learning based systems.", "phrases": ["entity recognition", "neighbor", "few-shot ner", "annotated data", "viterbi decoder"], "overall_score": 2.8032374847293218, "scores": [1.9528559342716012, 0.9642043029391923, 0.5681718368304773, 0.5664443001397693, 0.5520628691163008], "rank_score": 0.9207478486594682} -{"id": "liu-etal-2008-understanding", "title": "Understanding and Summarizing Answers in Community-Based Question Answering Services", "abstract": "Community-based question answering (cQA) services have accumulated millions of questions and their answers over time. In the process of accumulation, cQA services assume that questions always have unique best answers. However, with an in-depth analysis of questions and answers on cQA services, we find that the assumption cannot be true. According to the analysis, at least 78% of the cQA best answers are reusable when similar questions are asked again, but no more than 48% of them are indeed the unique best answers. We conduct the analysis by proposing taxonomies for cQA questions and answers. To better reuse the cQA content, we also propose applying automatic summarization techniques to summarize answers. Our results show that question-type oriented summarization techniques can improve cQA answer quality significantly.", "phrases": ["cqa service", "good answer", "answer summarization"], "overall_score": 2.2868158407584533, "scores": [1.285746961532147, 0.9484737945494036, 0.5266264261901286], "rank_score": 0.9202823940905599} -{"id": "lu-etal-2007-improving", "title": "Improving Statistical Machine Translation Performance by Training Data Selection and Optimization", "abstract": "Parallel corpus is an indispensable resource for translation model training in statistical machine translation (SMT). Instead of collecting more and more parallel training corpora, this paper aims to improve SMT performance by exploiting full potential of the existing parallel corpora. Two kinds of methods are proposed: offline data optimization and online model optimization. The offline method adapts the training data by redistributing the weight of each training sentence pairs. The online method adapts the translation model by redistributing the weight of each predefined submodels. Information retrieval model is used for the weighting scheme in both methods. Experimental results show that without using any additional resource, both methods can improve SMT performance significantly.", "phrases": ["training data selection", "parallel corpus", "translation model", "tf-idf", "information retrieval method"], "overall_score": 2.9615601842412853, "scores": [0.9022510285301761, 1.3764192065178118, 0.9323290445275042, 0.8265716366580628, 0.5627310919412216], "rank_score": 0.9200604016349555} -{"id": "mayfield-etal-2003-named", "title": "Named Entity Recognition using Hundreds of Thousands of Features", "abstract": "We present an approach to named entity recognition that uses support vector machines to capture transition probabilities in a lattice. The support vector machines are trained with hundreds of thousands of features drawn from the CoNLL-2003 Shared Task training data. Margin outputs are converted to estimated probabilities using a simple static function. Performance is evaluated using the CoNLL-2003 Shared Task test set; Test B results were F\u03b2=1 = 84.67 for English, and F\u03b2=1 = 69.96 for German.", "phrases": ["entity recognition", "hundred", "dependent feature", "pos tag", "position"], "overall_score": 1.6481057578391725, "scores": [2.014118251041714, 0.8406108882977468, 0.5954223709354963, 0.5832826391826339, 0.5656925361941023], "rank_score": 0.9198253371303388} -{"id": "khapra-etal-2011-together", "title": "Together We Can: Bilingual Bootstrapping for WSD", "abstract": "Recent work on bilingual Word Sense Disambiguation (WSD) has shown that a resource deprived language (L1) can benefit from the annotation work done in a resource rich language (L2) via parameter projection. However, this method assumes the presence of sufficient annotated data in one resource rich language which may not always be possible. Instead, we focus on the situation where there are two resource deprived languages, both having a very small amount of seed annotated data and a large amount of untagged data. We then use bilingual bootstrapping, wherein, a model trained using the seed annotated data of L1 is used to annotate the untagged data of L2 and vice versa using parameter projection. The untagged instances of L1 and L2 which get annotated with high confidence are then added to the seed data of the respective languages and the above process is repeated. Our experiments show that such a bilingual bootstrapping algorithm when evaluated on two different domains with small seed sizes using Hindi (L1) and Marathi (L2) as the language pair performs better than monolingual bootstrapping and significantly reduces annotation cost.", "phrases": ["bilingual bootstrapping", "wsd", "parameter projection", "untagged data", "vice"], "overall_score": 1.0098309668216587, "scores": [2.0939729842843535, 0.8732908676965533, 0.5529226299268818, 0.5401395029263895, 0.535612806483712], "rank_score": 0.9191877582635781} -{"id": "wang-etal-2020-inference", "title": "On the Inference Calibration of Neural Machine Translation", "abstract": "Confidence calibration, which aims to make model predictions equal to the true correctness measures, is important for neural machine translation (NMT) because it is able to offer useful indicators of translation errors in the generated output. While prior studies have shown that NMT models trained with label smoothing are well-calibrated on the ground-truth training data, we find that miscalibration still remains a severe challenge for NMT during inference due to the discrepancy between training and inference. By carefully designing experiments on three language pairs, our work provides in-depth analyses of the correlation between calibration and translation performance as well as linguistic properties of miscalibration and reports a number of interesting findings that might help humans better analyze, understand and improve NMT models. Based on these observations, we further propose a new graduated label smoothing method that can improve both inference calibration and translation performance.", "phrases": ["inference calibration", "neural machine translation", "miscalibration", "label smoothing method", "in-depth analysis"], "overall_score": 2.356347664303978, "scores": [1.730654915493291, 0.9265197708475134, 0.8303040677027482, 0.5547865054294551, 0.551095731162678], "rank_score": 0.9186721981271372} -{"id": "melamed-2003-multitext", "title": "Multitext Grammars and Synchronous Parsers", "abstract": "Multitext Grammars (MTGs) generate arbitrarily many parallel texts via production rules of arbitrary length. Both ordinary MTGs and their bilexical subclass admit relatively efficient parsers. Yet, MTGs are more expressive than other synchronous formalisms for which parsers have been described in the literature. The combination of greater expressive power and relatively low cost of inference makes MTGs an attractive foundation for practical models of translational equivalence.", "phrases": ["multitext grammars", "synchronous parsing", "discontinuous constituent"], "overall_score": 2.2028091848766342, "scores": [0.9770746360924841, 1.2194813459151843, 0.5593723646535238], "rank_score": 0.9186427822203974} -{"id": "duh-kirchhoff-2005-pos", "title": "POS Tagging of Dialectal Arabic: A Minimally Supervised Approach", "abstract": "Natural language processing technology for the dialects of Arabic is still in its infancy, due to the problem of obtaining large amounts of text data for spoken Arabic. In this paper we describe the development of a part-of-speech (POS) tagger for Egyptian Colloquial Arabic. We adopt a minimally supervised approach that only requires raw text data from several varieties of Arabic and a morphological analyzer for Modern Standard Arabic. No dialect-specific tools are used. We present several statistical modeling and cross-dialectal data sharing techniques to enhance the performance of the baseline tagger and compare the results to those obtained by a supervised tagger trained on hand-annotated data and, by a state-of-the-art Modern Standard Arabic tagger applied to Egyptian Arabic.", "phrases": ["dialectal arabic", "pos tagging", "disambiguation tool"], "overall_score": 2.3553321874936266, "scores": [1.3098964754110562, 0.9071333833552604, 0.5377990199893682], "rank_score": 0.9182762929185616} -{"id": "zhang-etal-2013-wordtopic", "title": "WordTopic-MultiRank: A New Method for Automatic Keyphrase Extraction", "abstract": "Automatic keyphrase extraction aims to pick out a set of terms as a representation of a document without manual assignment efforts. Supervised and unsupervised graph-based ranking methods have been studied for this task. However, previous methods usually computed importance scores of words under the assumption of single relation between words. In this work, we propose WordTopic-MultiRank as a new method for keyphrase extraction, based on the idea that words relate with each other via multiple relations. First we treat various latent topics in documents as heterogeneous relations between words and construct a multi-relational word network. Then, a novel ranking algorithm, named Biased-MultiRank, is applied to score the importance of words and topics simultaneously, as words and topics are considered to have mutual influence on each other. Experimental results on two different data sets show the outstanding performance and robustness of our proposed approach in automatic keyphrase extraction task.", "phrases": ["new method", "automatic keyphrase extraction", "wordtopic-multirank"], "overall_score": 1.0086234400585048, "scores": [0.9500141087134556, 0.9460791227142736, 0.8581726286819313], "rank_score": 0.9180886200365536} -{"id": "klein-etal-2003-named", "title": "Named Entity Recognition with Character-Level Models", "abstract": "We discuss two named-entity recognition models which use characters and character n-grams either exclusively or as an important part of their data representation. The first model is a character-level HMM with minimal context information, and the second model is a maximum-entropy conditional markov model with substantially richer context features. Our best model achieves an overall F1 of 86.07% on the English test data (92.31% on the development data). This number represents a 25% error reduction over the same model without word-internal (substring) features.", "phrases": ["character-level model", "hmm", "conditional markov model", "test data", "rich context feature"], "overall_score": 2.3547647538508842, "scores": [1.4368433970295251, 1.1000863084405725, 0.8785256964311933, 0.5960337885345252, 0.5787861438525242], "rank_score": 0.9180550668576682} -{"id": "zhang-etal-2006-distributed", "title": "Distributed Language Modeling for N-best List Re-ranking", "abstract": "In this paper we describe a novel distributed language model for N-best list re-ranking. The model is based on the client/server paradigm where each server hosts a portion of the data and provides information to the client. This model allows for using an arbitrarily large corpus in a very efficient way. It also provides a natural platform for relevance weighting and selection. We applied this model on a 2.97 billion-word corpus and re-ranked the N-best list from Hiero, a state-of-the-art phrase-based system. Using BLEU as a metric, the re-ranked translation achieves a relative improvement of 4.8%, significantly better than the model-best translation.", "phrases": ["language model", "n-best list", "server"], "overall_score": 1.9090035096777318, "scores": [1.2730125451330323, 0.9414359431592098, 0.5396614081594484], "rank_score": 0.9180366321505634} -{"id": "jiang-etal-2016-unsupervised", "title": "Unsupervised Neural Dependency Parsing", "abstract": "Unsupervised dependency parsing aims to learn a dependency grammar from text annotated with only POS tags. Various features and inductive biases are often used to incorporate prior knowledge into learning. One useful type of prior information is that there exist correlations between the parameters of grammar rules involving different POS tags. Previous work employed manually designed features or special prior distributions to encode such information. In this paper, we propose a novel approach to unsupervised dependency parsing that uses a neural model to predict grammar rule probabilities based on distributed representation of POS tags. The distributed representation is automatically learned from data and captures the correlations between POS tags. Our experiments show that our approach outperforms previous approaches utilizing POS correlations and is competitive with recent state-of-the-art approaches on nine different languages. \u00a9 2016 Association for Computational Linguistics", "phrases": ["pos tag", "grammar rule probability", "neural dmv model"], "overall_score": 2.3542182283217565, "scores": [1.3393999250726418, 0.8282751611695014, 0.5858508905650532], "rank_score": 0.9178419922690656} -{"id": "chambers-etal-2007-classifying", "title": "Classifying Temporal Relations Between Events", "abstract": "This paper describes a fully automatic two-stage machine learning architecture that learns temporal relations between pairs of events. The first stage learns the temporal attributes of single event descriptions, such as tense, grammatical aspect, and aspectual class. These imperfect guesses, combined with other linguistic features, are then used in a second stage to classify the temporal relationship between two events. We present both an analysis of our new features and results on the TimeBank Corpus that is 3% higher than previous work that used perfect human tagged features.", "phrases": ["temporal relation", "stage", "linguistic feature"], "overall_score": 2.5997685691789605, "scores": [1.1519400074425177, 1.0348383291472478, 0.566034374778761], "rank_score": 0.9176042371228421} -{"id": "yoshikawa-etal-2017-stair", "title": "STAIR Captions: Constructing a Large-Scale Japanese Image Caption Dataset", "abstract": "In recent years, automatic generation of image descriptions (captions), that is, image captioning, has attracted a great deal of attention. In this paper, we particularly consider generating Japanese captions for images. Since most available caption datasets have been constructed for English language, there are few datasets for Japanese. To tackle this problem, we construct a large-scale Japanese image caption dataset based on images from MS-COCO, which is called STAIR Captions. STAIR Captions consists of 820,310 Japanese captions for 164,062 images. In the experiment, we show that a neural network trained using STAIR Captions can generate more natural and better Japanese captions, compared to those generated using English-Japanese machine translation after generating English captions.", "phrases": ["image caption dataset", "image description", "stair captions"], "overall_score": 1.272047896036519, "scores": [0.9491562277258867, 0.8983335186877878, 0.9052760406640139], "rank_score": 0.9175885956925628} -{"id": "grenager-etal-2005-unsupervised", "title": "Unsupervised Learning of Field Segmentation Models for Information Extraction", "abstract": "The applicability of many current information extraction techniques is severely limited by the need for supervised training data. We demonstrate that for certain field structured extraction tasks, such as classified advertisements and bibliographic citations, small amounts of prior knowledge can be used to learn effective models in a primarily unsupervised fashion. Although hidden Markov models (HMMs) provide a suitable generative model for field structured text, general unsupervised HMM learning fails to learn useful structure in either of our domains. However, one can dramatically improve the quality of the learned structure by exploiting simple prior knowledge of the desired solutions. In both domains, we found that unsupervised methods can attain accuracies with 400 unlabeled examples comparable to those attained by supervised methods on 50 labeled examples, and that semi-supervised methods can make good use of small amounts of labeled data.", "phrases": ["field segmentation", "information extraction", "markov model", "hmm", "unsupervised learning"], "overall_score": 2.3532039860526495, "scores": [0.8539583960071743, 0.8501452473280502, 1.3490018286078063, 0.9915221176089196, 0.542605252311161], "rank_score": 0.9174465683726224} -{"id": "barrachina-etal-2009-statistical", "title": "Statistical Approaches to Computer-Assisted Translation", "abstract": "Current machine translation (MT) systems are still not perfect. In practice, the output from these systems needs to be edited to correct errors. A way of increasing the productivity of the whole translation process (MT plus human work) is to incorporate the human correction activities within the translation process itself, thereby shifting the MT paradigm to that of computer-assisted translation. This model entails an iterative process in which the human translator activity is included in the loop: In each iteration, a prefix of the translation is validated (accepted or amended) by the human and the system computes its best (or n-best) translation suffix hypothesis to complete this prefix. A successful framework for MT is the so-called statistical (or pattern recognition) framework. Interestingly, within this framework, the adaptation of MT systems to the interactive scenario affects mainly the search process, allowing a great reuse of successful techniques and models. In this article, alignment templates, phrase-based models, and stochastic finite-state transducers are used to develop computer-assisted translation systems. These systems were assessed in a European project (TransType2) in two real tasks: The translation of printer manuals; manuals and the translation of the Bulletin of the European Union. In each task, the following three pairs of languages were involved (in both translation directions): English-Spanish, English-German, and English-French.", "phrases": ["machine translation", "translator activity", "prefix", "applicability"], "overall_score": 3.1792032347923525, "scores": [1.7561908176058876, 0.845999412046308, 0.5337482286296836, 0.5333581343685829], "rank_score": 0.9173241481626155} -{"id": "shimizu-etal-2008-metric", "title": "Metric Learning for Synonym Acquisition", "abstract": "The distance or similarity metric plays an important role in many natural language processing (NLP) tasks. Previous studies have demonstrated the effectiveness of a number of metrics such as the Jaccard coefficient, especially in synonym acquisition. While the existing metrics perform quite well, to further improve performance, we propose the use of a supervised machine learning algorithm that fine-tunes them. Given the known instances of similar or dissimilar words, we estimated the parameters of the Mahalanobis distance. We compared a number of metrics in our experiments, and the results show that the proposed metric has a higher mean average precision than other metrics.", "phrases": ["synonym acquisition", "mahalanobis distance", "metric learning"], "overall_score": 1.2714057701336783, "scores": [0.9775394894687577, 0.940798162781045, 0.8330385470444037], "rank_score": 0.9171253997647355} -{"id": "melamed-etal-2003-precision", "title": "Precision and Recall of Machine Translation", "abstract": "Machine translation can be evaluated using precision, recall, and the F-measure. These standard measures have significantly higher correlation with human judgments than recently proposed alternatives. More importantly, the standard measures have an intuitive interpretation, which can facilitate insights into how MT systems might be improved. The relevant software is publicly available.", "phrases": ["machine translation", "evaluation method", "gtm metric", "brevity penalty"], "overall_score": 2.0144637772071032, "scores": [2.063842045916321, 0.5476700325980002, 0.530510512366214, 0.5252653103850065], "rank_score": 0.9168219753163855} -{"id": "elfardy-diab-2012-token", "title": "Token Level Identification of Linguistic Code Switching", "abstract": "Typically native speakers of Arabic mix dialectal Arabic and Modern Standard Arabic in the same utterance. This phenomenon is known as linguistic code switching (LCS). It is a very challenging task to identify these LCS points in written text where we don\u2019t have an accompanying speech signal. In this paper, we address automatic identification of LCS points in Arabic social media text by identifying token level dialectal words. We present an unsupervised approach that employs a set of dictionaries, sound-change rules, and language models to tackle this problem. We tune and test the performance of our approach against human-annotated Egyptian and Levantine discussion fora datasets. Two types of annotations on the token level are obtained for each dataset: context sensitive and context insensitive annotation. We achieve a token level F\u03b2=1 score of 74% and 72.4% on the context-sensitive development and test datasets, respectively. On the context insensitive annotated data, we achieve a token level F\u03b2=1 score of 84.4% and 84.9% on the development and test datasets, respectively.", "phrases": ["identification", "linguistic code switching", "dialectal arabic", "sound-change rule"], "overall_score": 2.1981970148524863, "scores": [2.025952491433997, 0.5637565250656148, 0.550226922412764, 0.5269414927899119], "rank_score": 0.9167193579255719} -{"id": "malandrakis-etal-2019-controlled", "title": "Controlled Text Generation for Data Augmentation in Intelligent Artificial Agents", "abstract": "Data availability is a bottleneck during early stages of development of new capabilities for intelligent artificial agents. We investigate the use of text generation techniques to augment the training data of a popular commercial artificial agent across categories of functionality, with the goal of faster development of new functionality. We explore a variety of encoder-decoder generative models for synthetic training data generation and propose using conditional variational auto-encoders. Our approach requires only direct optimization, works well with limited data and significantly outperforms the previous controlled text generation techniques. Further, the generated data are used as additional training samples in an extrinsic intent classification task, leading to improved performance by up to 5% absolute f-score in low-resource cases, validating the usefulness of our approach.", "phrases": ["data augmentation", "text generation technique", "auto-encoder"], "overall_score": 2.0141482666091592, "scores": [1.2889544196335347, 0.8411765681565534, 0.6199041529750711], "rank_score": 0.916678380255053} -{"id": "yan-etal-2020-multi", "title": "Multi-Unit Transformers for Neural Machine Translation", "abstract": "Transformer models achieve remarkable success in Neural Machine Translation. Many efforts have been devoted to deepening the Transformer by stacking several units (i.e., a combination of Multihead Attentions and FFN) in a cascade, while the investigation over multiple parallel units draws little attention. In this paper, we propose the Multi-Unit Transformer (MUTE) , which aim to promote the expressiveness of the Transformer by introducing diverse and complementary units. Specifically, we use several parallel units and show that modeling with multiple units improves model performance and introduces diversity. Further, to better leverage the advantage of the multi-unit setting, we design biased module and sequential dependency that guide and encourage complementariness among different units. Experimental results on three machine translation tasks, the NIST Chinese-to-English, WMT'14 English-to-German and WMT'18 Chinese-to-English, show that the MUTE models significantly outperform the Transformer-Base, by up to +1.52, +1.90 and +1.10 BLEU points, with only a mild drop in inference speed (about 3.1%). In addition, our methods also surpass the Transformer-Big model, with only 54% of its parameters. These results demonstrate the effectiveness of the MUTE, as well as its efficiency in both the inference process and parameter usage.", "phrases": ["transformer", "neural machine translation", "remarkable success", "model performance"], "overall_score": 1.7835213200355857, "scores": [2.006396566998322, 0.5643396420324835, 0.5527097348064145, 0.5427486559523684], "rank_score": 0.9165486499473972} -{"id": "waltinger-2010-germanpolarityclues", "title": "GermanPolarityClues: A Lexical Resource for German Sentiment Analysis", "abstract": "In this paper, we propose GermanPolarityClues, a new publicly available lexical resource for sentiment analysis for the German language. While sentiment analysis and polarity classification has been extensively studied at different document levels (e.g. sentences and phrases), only a few approaches explored the effect of a polarity-based feature selection and subjectivity resources for the German language. This paper evaluates four different English and three different German sentiment resources in a comparative manner by combining a polarity-based feature selection with SVM-based machine learning classifier. Using a semi-automatic translation approach, we were able to construct three different resources for a German sentiment analysis. The manually finalized GermanPolarityClues dictionary offers thereby a number of 10, 141 polarity features, associated to three numerical polarity scores, determining the positive, negative and neutral direction of specific term features. While the results show that the size of dictionaries clearly correlate to polarity-based feature coverage, this property does not correlate to classification accuracy. Using a polarity-based feature selection, considering a minimum amount of prior polarity features, in combination with SVM-based machine learning methods exhibits for both languages the best performance (F1: 0.83-0.88).", "phrases": ["lexical resource", "german sentiment analysis", "germanpolarityclue"], "overall_score": 1.0066561473353508, "scores": [0.9796754228731895, 0.8949459330182451, 0.8742723831976091], "rank_score": 0.9162979130296813} -{"id": "fei-liu-2016-breaking", "title": "Breaking the Closed World Assumption in Text Classification", "abstract": "Existing research on multiclass text classific a-tion mostly makes the closed world assum p-tion, which focus es on designing accurate classifiers under the assumption that all test classes are known at training time. A more r e-alistic scenario is to expect unseen classes dur ing testing ( open world ) . In this case, the goal is to design a learning system that class i-fies documents of the known classes into their respective cl asses and also to reject doc u-ments from unknown classes . This problem is called open ( world ) classification . This paper approach es the problem by reducing the open space risk while balancing the empirical risk . It proposes to use a new learn ing strategy, called c enter - based similarity (CBS) space learning (or CBS learning ) , to provide a novel solution to the problem . Extensive e xper i-ments across two datasets show that CBS learning give s promising results on multiclass open text classification compared to state - of - the - art baselines.", "phrases": ["assumption", "text classification", "cbs learning", "unseen intent"], "overall_score": 1.7824540764124825, "scores": [2.0041691178427925, 0.5577801339048942, 0.5537755737968695, 0.5482759553297628], "rank_score": 0.9160001952185798} -{"id": "lu-ng-2011-probabilistic", "title": "A Probabilistic Forest-to-String Model for Language Generation from Typed Lambda Calculus Expressions", "abstract": "This paper describes a novel probabilistic approach for generating natural language sentences from their underlying semantics in the form of typed lambda calculus. The approach is built on top of a novel reduction-based weighted synchronous context free grammar formalism, which facilitates the transformation process from typed lambda calculus into natural language sentences. Sentences can then be generated based on such grammar rules with a log-linear model. To acquire such grammar rules automatically in an unsupervised manner, we also propose a novel approach with a generative model, which maps from sub-expressions of logical forms to word sequences in natural language sentences. Experiments on benchmark datasets for both English and Chinese generation tasks yield significant improvements over results obtained by two state-of-the-art machine translation models, in terms of both automatic metrics and human evaluation.", "phrases": ["language generation", "lambda calculus", "synchronous context-free grammar"], "overall_score": 2.196129703001707, "scores": [1.6500955019022667, 0.5686124177667597, 0.5288637460928384], "rank_score": 0.9158572219206217} -{"id": "mehdad-etal-2013-abstractive", "title": "Abstractive Meeting Summarization with Entailment and Fusion", "abstract": "We propose a novel end-to-end framework for abstractive meeting summarization. We cluster sentences in the input into communities and build an entailment graph over the sentence communities to identify and select the most relevant sentences. We then aggregate those selected sentences by means of a word graph model. We exploit a ranking strategy to select the best path in the word graph as an abstract sentence. Despite not relying on the syntactic structure, our approach significantly outperforms previous models for meeting summarization in terms of informativeness. Moreover, the longer sentences generated by our method are competitive with shorter sentences generated by the previous word graph model in terms of grammaticality.", "phrases": ["entailment graph", "ranking strategy", "abstractive meeting summarization", "non-redundant sentence"], "overall_score": 2.7883312041613637, "scores": [0.972911034694679, 1.3060497733135163, 0.828435985932142, 0.5560102082381335], "rank_score": 0.9158517505446176} -{"id": "quirk-etal-2015-language", "title": "Language to Code: Learning Semantic Parsers for If-This-Then-That Recipes", "abstract": "Using natural language to write programs is a touchstone problem for computational linguistics. We present an approach that learns to map natural-language descriptions of simple \u201cif-then\u201d rules to executable code. By training and testing on a large corpus of naturally-occurring programs (called \u201crecipes\u201d) and their natural language descriptions, we demonstrate the ability to effectively map language to code. We compare a number of semantic parsing approaches on the highly noisy training data collected from ordinary users, and find that loosely synchronous systems perform best.", "phrases": ["recipe", "executable code", "semantic parsing"], "overall_score": 2.108685515541738, "scores": [1.6621564011578875, 0.5587622278130308, 0.5264528214363533], "rank_score": 0.9157904834690905} -{"id": "chen-yih-2020-open", "title": "Open-Domain Question Answering", "abstract": "This tutorial provides a comprehensive and coherent overview of cutting-edge research in open-domain question answering (QA), the task of answering questions using a large collection of documents of diversified topics. We will start by first giving a brief historical background, discussing the basic setup and core technical challenges of the research problem, and then describe modern datasets with the common evaluation metrics and benchmarks. The focus will then shift to cutting-edge models proposed for open-domain QA, including two-stage retriever-reader approaches, dense retriever and end-to-end training, and retriever-free methods. Finally, we will cover some hybrid approaches using both text and large knowledge bases and conclude the tutorial with important open questions. We hope that the tutorial will not only help the audience to acquire up-to-date knowledge but also provide new perspectives to stimulate the advances of open-domain QA research in the next phase.", "phrases": ["retrieval", "open-domain question", "major step"], "overall_score": 2.107752504118009, "scores": [1.1483031489786146, 1.0534215780844447, 0.5444311182055779], "rank_score": 0.9153852817562124} -{"id": "mccarthy-carroll-2003-disambiguating", "title": "Disambiguating Nouns, Verbs, and Adjectives Using Automatically Acquired Selectional Preferences", "abstract": "Selectional preferences have been used by word sense disambiguation (WSD) systems as one source of disambiguating information. We evaluate WSD using selectional preferences acquired for English adjectivenoun, subject, and direct object grammatical relationships with respect to a standard test corpus. The selectional preferences are specific to verb or adjective classes, rather than individual word forms, so they can be used to disambiguate the co-occurring adjectives and verbs, rather than just the nominal argument heads. We also investigate use of the one-senseper-discourse heuristic to propagate a sense tag for a word to other occurrences of the same word within the current document in order to increase coverage. Although the preferences perform well in comparison with other unsupervised WSD systems on the same corpus, the results show that for many applications, further knowledge sources would be required to achieve an adequate level of accuracy and coverage. In addition to quantifying performance, we analyze the results to investigate the situations in which the selectional preferences achieve the best precision and in which the one-sense-per-discourse heuristic increases performance.", "phrases": ["adjective", "selectional preference", "word sense disambiguation"], "overall_score": 2.1946191770015546, "scores": [1.2482925417201112, 0.9429567063056119, 0.5544326029256859], "rank_score": 0.9152272836504697} -{"id": "braud-etal-2017-cross", "title": "Cross-lingual RST Discourse Parsing", "abstract": "Discourse parsing is an integral part of understanding information flow and argumentative structure in documents. Most previous research has focused on inducing and evaluating models from the English RST Discourse Treebank. However, discourse treebanks for other languages exist, including Spanish, German, Basque, Dutch and Brazilian Portuguese. The treebanks share the same underlying linguistic theory, but differ slightly in the way documents are annotated. In this paper, we present (a) a new discourse parser which is simpler, yet competitive (significantly better on 2/3 metrics) to state of the art for English, (b) a harmonization of discourse treebanks across languages, enabling us to present (c) what to the best of our knowledge are the first experiments on cross-lingual discourse parsing.", "phrases": ["other language", "basque", "discourse parser"], "overall_score": 2.010824843980049, "scores": [1.2650562707976085, 0.8912491877504544, 0.5891920177515831], "rank_score": 0.9151658254332152} -{"id": "druck-etal-2009-semi", "title": "Semi-supervised Learning of Dependency Parsers using Generalized Expectation Criteria", "abstract": "In this paper, we propose a novel method for semi-supervised learning of non-projective log-linear dependency parsers using directly expressed linguistic prior knowledge (e.g. a noun's parent is often a verb). Model parameters are estimated using a generalized expectation (GE) objective function that penalizes the mismatch between model predictions and linguistic expectation constraints. In a comparison with two prominent \"unsupervised\" learning methods that require indirect biasing toward the correct syntactic structure, we show that GE can attain better accuracy with as few as 20 intuitive constraints. We also present positive experimental results on longer sentences in multiple languages.", "phrases": ["dependency parser", "noun", "parent", "semi-supervised learning"], "overall_score": 1.9025334405677143, "scores": [1.938581688868931, 0.6259358233267842, 0.5554939386038702, 0.5396892956436936], "rank_score": 0.9149251866108197} -{"id": "hong-etal-2019-faspell", "title": "FASPell: A Fast, Adaptable, Simple, Powerful Chinese Spell Checker Based On DAE-Decoder Paradigm", "abstract": "We propose a Chinese spell checker \u2013 FASPell based on a new paradigm which consists of a denoising autoencoder (DAE) and a decoder. In comparison with previous state-of-the-art models, the new paradigm allows our spell checker to be Faster in computation, readily Adaptable to both simplified and traditional Chinese texts produced by either humans or machines, and to require much Simpler structure to be as much Powerful in both error detection and correction. These four achievements are made possible because the new paradigm circumvents two bottlenecks. First, the DAE curtails the amount of Chinese spell checking data needed for supervised learning (to 10k sentences) by leveraging the power of unsupervisedly pre-trained masked language model as in BERT, XLNet, MASS etc. Second, the decoder helps to eliminate the use of confusion set that is deficient in flexibility and sufficiency of utilizing the salient feature of Chinese character similarity.", "phrases": ["chinese spell checker", "dae-decoder paradigm", "autoencoder", "language model", "candidate"], "overall_score": 2.5916371418019346, "scores": [1.2786378974562767, 0.9539028014887906, 1.264053356972377, 0.542085264345665, 0.5349916799086868], "rank_score": 0.9147342000343592} -{"id": "min-etal-2020-syntactic", "title": "Syntactic Data Augmentation Increases Robustness to Inference Heuristics", "abstract": "Pretrained neural models such as BERT, when fine-tuned to perform natural language inference (NLI), often show high accuracy on standard datasets, but display a surprising lack of sensitivity to word order on controlled challenge sets. We hypothesize that this issue is not primarily caused by the pretrained model's limitations, but rather by the paucity of crowdsourced NLI examples that might convey the importance of syntactic structure at the fine-tuning stage. We explore several methods to augment standard training sets with syntactically informative examples, generated by applying syntactic transformations to sentences from the MNLI corpus. The best-performing augmentation method, subject/object inversion, improved BERT's accuracy on controlled examples that diagnose sensitivity to word order from 0.28 to 0.73, without affecting performance on the MNLI test set. This improvement generalized beyond the particular construction used for data augmentation, suggesting that augmentation causes BERT to recruit abstract syntactic representations.", "phrases": ["robustness", "nli example", "augmentation method", "object inversion", "syntactic data augmentation"], "overall_score": 2.784654240787653, "scores": [1.6092558305298825, 1.0079801658548635, 0.8244473222749922, 0.5702695953675869, 0.5612671849349115], "rank_score": 0.9146440197924475} -{"id": "irvine-etal-2013-measuring", "title": "Measuring Machine Translation Errors in New Domains", "abstract": "We develop two techniques for analyzing the effect of porting a machine translation system to a new domain. One is a macro-level analysis that measures how domain shift affects corpus-level evaluation; the second is a micro-level analysis for word-level errors. We apply these methods to understand what happens when a Parliament-trained phrase-based machine translation system is applied in four very different domains: news, medical texts, scientific articles and movie subtitles. We present quantitative and qualitative experiments that highlight opportunities for future research in domain adaptation for machine translation.", "phrases": ["machine translation error", "domain adaptation", "choice"], "overall_score": 2.2721071792188425, "scores": [0.9800054247700908, 0.92476739945801, 0.8383167555104623], "rank_score": 0.9143631932461878} -{"id": "hu-etal-2011-interactive", "title": "Interactive Topic Modeling", "abstract": "Topic models have been used extensively as a tool for corpus exploration, and a cottage industry has developed to tweak topic models to better encode human intuitions or to better model data. However, creating such extensions requires expertise in machine learning unavail-able to potential end-users of topic modeling software. In this work, we develop a frame-work for allowing users to iteratively re\ufb01ne the topics discovered by models such as latent Dirichlet allocation (LDA) by adding constraints that enforce that sets of words must ap-pear together in the same topic. We incorporate these constraints interactively by selectively removing elements in the state of a Markov Chain used for inference; we investigate a va-riety of methods for incorporating this information and demonstrate that these interactively added constraints improve topic usefulness for simulated and actual user sessions.", "phrases": ["exploration", "iteration", "interactive topic modeling", "itm", "user feedback"], "overall_score": 2.4758949507057193, "scores": [2.079441789568585, 0.8384576736249584, 0.5637471879227416, 0.5506055724766264, 0.5391086575643133], "rank_score": 0.9142721762314447} -{"id": "bourgonje-etal-2017-clickbait", "title": "From Clickbait to Fake News Detection: An Approach based on Detecting the Stance of Headlines to Articles", "abstract": "We present a system for the detection of the stance of headlines with regard to their corresponding article bodies. The approach can be applied in fake news, especially clickbait detection scenarios. The component is part of a larger platform for the curation of digital content; we consider veracity and relevancy an increasingly important part of curating online information. We want to contribute to the debate on how to deal with fake news and related online phenomena with technological means, by providing means to separate related from unrelated headlines and further classifying the related headlines. On a publicly available data set annotated for the stance of headlines with regard to their corresponding article bodies, we achieve a (weighted) accuracy score of 89.59.", "phrases": ["clickbait", "fake news", "propaganda"], "overall_score": 2.0078343880442726, "scores": [0.8110978116098227, 1.4033538493349378, 0.5269627699075211], "rank_score": 0.9138048102840939} -{"id": "wang-etal-2021-transprompt", "title": "TransPrompt: Towards an Automatic Transferable Prompting Framework for Few-shot Text Classification", "abstract": "Recent studies have shown that prompts improve the performance of large pre-trained language models for few-shot text classification. Yet, it is unclear how the prompting knowledge can be transferred across similar NLP tasks for the purpose of mutual reinforcement. Based on continuous prompt embeddings, we propose TransPrompt, a transferable prompting framework for few-shot learning across similar tasks. In TransPrompt, we employ a multi-task meta-knowledge acquisition procedure to train a meta-learner that captures cross-task transferable knowledge. Two de-biasing techniques are further designed to make it more task-agnostic and unbiased towards any tasks. After that, the meta-learner can be adapted to target tasks with high accuracy. Extensive experiments show that TransPrompt outperforms single-task and cross-task strong baselines over multiple NLP tasks and datasets. We further show that the meta-learner can effectively improve the performance on previously unseen tasks; and TransPrompt also outperforms strong fine-tuning baselines when learning with full training sets.", "phrases": ["prompt", "few-shot text classification", "language model", "similar task"], "overall_score": 1.6367562104435098, "scores": [2.043142929825036, 0.5418325417662164, 0.5347383376073661, 0.5342503272904713], "rank_score": 0.9134910341222724} -{"id": "fan-gardent-2020-multilingual", "title": "Multilingual AMR-to-Text Generation", "abstract": "Generating text from structured data is challenging because it requires bridging the gap between (i) structure and natural language (NL) and (ii) semantically underspecified input and fully specified NL output. Multilingual generation brings in an additional challenge: that of generating into languages with varied word order and morphological properties. In this work, we focus on Abstract Meaning Representations (AMRs) as structured input, where previous research has overwhelmingly focused on generating only into English. We leverage advances in cross-lingual embeddings, pretraining, and multilingual models to create multilingual AMR-to-text models that generate in twenty one different languages. Our multilingual models surpass baselines that generate into one language in eighteen languages, based on automatic metrics. We analyze the ability of our multilingual models to accurately capture morphology and word order using human evaluation, and find that native speakers judge our generations to be fluent.", "phrases": ["amr", "different language", "multilingual amr-to-text generation"], "overall_score": 1.266215422114446, "scores": [1.6479184330021646, 0.5532640553636257, 0.5389615769066646], "rank_score": 0.9133813550908183} -{"id": "lai-etal-2021-thank", "title": "Thank you BART! Rewarding Pre-Trained Models Improves Formality Style Transfer", "abstract": "Scarcity of parallel data causes formality style transfer models to have scarce success in preserving content. We show that fine-tuning pre-trained language (GPT-2) and sequence-to-sequence (BART) models boosts content preservation, and that this is possible even with limited amounts of parallel data. Augmenting these models with rewards that target style and content \u2013the two core aspects of the task\u2013 we achieve a new state-of-the-art.", "phrases": ["bart", "style transfer model", "sequence-to-sequence", "language model"], "overall_score": 1.6362536536800565, "scores": [1.6504176340978236, 0.8666651321160351, 0.5740638631870073, 0.5616955780077089], "rank_score": 0.9132105518521438} -{"id": "limsopatham-collier-2016-normalising", "title": "Normalising Medical Concepts in Social Media Texts by Learning Semantic Representation", "abstract": "Automatically recognising medical concepts mentioned in social media messages (e.g. tweets) enables several applications for enhancing health quality of people in a community, e.g. real-time monitoring of infectious diseases in population. However, the discrepancy between the type of language used in social media and medical ontologies poses a major challenge. Existing studies deal with this challenge by employing techniques, such as lexical term matching and statistical machine translation. In this work, we handle the medical concept normalisation at the semantic level. We investigate the use of neural networks to learn the transition between layman\u2019s language used in social media messages and formal medical language used in the descriptions of medical concepts in a standard ontology. We evaluate our approaches using three different datasets, where social media texts are extracted from Twitter messages and blog posts. Our experimental results show that our proposed approaches significantly and consistently outperform existing effective baselines, which achieved state-of-the-art performance on several medical concept normalisation tasks, by up to 44%.", "phrases": ["medical concept", "standard ontology", "social medium text"], "overall_score": 2.342136387750624, "scores": [1.1397832126807763, 1.0732728435145604, 0.5263388339266757], "rank_score": 0.9131316300406708} -{"id": "mao-etal-2019-hierarchical", "title": "Hierarchical Text Classification with Reinforced Label Assignment", "abstract": "While existing hierarchical text classification (HTC) methods attempt to capture label hierarchies for model training, they either make local decisions regarding each label or completely ignore the hierarchy information during inference. To solve the mismatch between training and inference as well as modeling label dependencies in a more principled way, we formulate HTC as a Markov decision process and propose to learn a Label Assignment Policy via deep reinforcement learning to determine where to place an object and when to stop the assignment process. The proposed method, HiLAP, explores the hierarchy during both training and inference time in a consistent manner and makes inter-dependent decisions. As a general framework, HiLAP can incorporate different neural encoders as base models for end-to-end training. Experiments on five public datasets and four base models show that HiLAP yields an average improvement of 33.4% in Macro-F1 over flat classifiers and outperforms state-of-the-art HTC methods by a large margin. Data and code can be found at .", "phrases": ["htc", "reinforcement learning", "hierarchical text classification"], "overall_score": 2.1895752830222004, "scores": [1.008179840242059, 0.8811566481060187, 0.8500349610984296], "rank_score": 0.9131238164821691} -{"id": "van-der-plas-etal-2011-scaling", "title": "Scaling up Automatic Cross-Lingual Semantic Role Annotation", "abstract": "Broad-coverage semantic annotations for training statistical learners are only available for a handful of languages. Previous approaches to cross-lingual transfer of semantic annotations have addressed this problem with encouraging results on a small scale. In this paper, we scale up previous efforts by using an automatic approach to semantic annotation that does not rely on a semantic ontology for the target language. Moreover, we improve the quality of the transferred semantic annotations by using a joint syntactic-semantic parser that learns the correlations between syntax and semantics of the target language and smooths out the errors from automatic transfer. We reach a labelled F-measure for predicates and arguments of only 4% and 9% points, respectively, lower than the upper bound from manual annotations.", "phrases": ["cross-lingual transfer", "previous effort", "joint syntactic-semantic parser", "word alignment", "annotation projection"], "overall_score": 2.6883524161173042, "scores": [1.343586884764053, 1.1438566032313975, 0.9834886063995283, 0.5711273317533857, 0.5230757916960324], "rank_score": 0.9130270435688794} -{"id": "ma-etal-2018-rumor", "title": "Rumor Detection on Twitter with Tree-structured Recursive Neural Networks", "abstract": "Automatic rumor detection is technically very challenging. In this work, we try to learn discriminative features from tweets content by following their non-sequential propagation structure and generate more powerful representations for identifying different type of rumors. We propose two recursive neural models based on a bottom-up and a top-down tree-structured neural networks for rumor representation learning and classification, which naturally conform to the propagation layout of tweets. Results on two public Twitter datasets demonstrate that our recursive neural models 1) achieve much better performance than state-of-the-art approaches; 2) demonstrate superior capacity on detecting rumors at very early stage.", "phrases": ["twitter", "recursive neural network", "rumor detection", "propagation tree"], "overall_score": 2.586631563944003, "scores": [0.8615181972085446, 0.8352915675806342, 1.4339470293318337, 0.5211130085826694], "rank_score": 0.9129674506759204} -{"id": "li-etal-2019-logic", "title": "A Logic-Driven Framework for Consistency of Neural Models", "abstract": "While neural models show remarkable accuracy on individual predictions, their internal beliefs can be inconsistent across examples. In this paper, we formalize such inconsistency as a generalization of prediction error. We propose a learning framework for constraining models using logic rules to regularize them away from inconsistency. Our framework can leverage both labeled and unlabeled examples and is directly compatible with off-the-shelf learning schemes without model redesign. We instantiate our framework on natural language inference, where experiments show that enforcing invariants stated in logic can help make the predictions of neural models both accurate and consistent.", "phrases": ["logic-driven framework", "consistency", "belief", "language inference"], "overall_score": 2.101804963390871, "scores": [1.39167743879125, 0.8565038416246998, 0.8733955436607089, 0.529632366473427], "rank_score": 0.9128022976375214} -{"id": "sulubacak-etal-2016-universal", "title": "Universal Dependencies for Turkish", "abstract": "The Universal Dependencies (UD) project was conceived after the substantial recent interest in unifying annotation schemes across languages. With its own annotation principles and abstract inventory for parts of speech, morphosyntactic features and dependency relations, UD aims to facilitate multilingual parser development, cross-lingual learning, and parsing research from a language typology perspective. This paper presents the Turkish IMST-UD Treebank, the first Turkish treebank to be in a UD release. The IMST-UD Treebank was automatically converted from the IMST Treebank, which was also recently released. We describe this conversion procedure in detail, complete with mapping tables. We also present our evaluation of the parsing performances of both versions of the IMST Treebank. Our findings suggest that the UD framework is at least as viable for Turkish as the original annotation framework of the IMST Treebank.", "phrases": ["treebank", "imst", "universal dependencies"], "overall_score": 1.8977107041380403, "scores": [0.9857467141099266, 1.2031640690223941, 0.5489070387695334], "rank_score": 0.9126059406339513} -{"id": "kumar-etal-2019-submodular", "title": "Submodular Optimization-based Diverse Paraphrasing and its Effectiveness in Data Augmentation", "abstract": "Inducing diversity in the task of paraphrasing is an important problem in NLP with applications in data augmentation and conversational agents. Previous paraphrasing approaches have mainly focused on the issue of generating semantically similar paraphrases while paying little attention towards diversity. In fact, most of the methods rely solely on top-k beam search sequences to obtain a set of paraphrases. The resulting set, however, contains many structurally similar sentences. In this work, we focus on the task of obtaining highly diverse paraphrases while not compromising on paraphrasing quality. We provide a novel formulation of the problem in terms of monotone submodular function maximization, specifically targeted towards the task of paraphrasing. Additionally, we demonstrate the effectiveness of our method for data augmentation on multiple tasks such as intent classification and paraphrase recognition. In order to drive further research, we have made the source code available.", "phrases": ["paraphrase", "data augmentation", "submodular function maximization"], "overall_score": 2.340360537932052, "scores": [0.8701776328997959, 1.3242598726889807, 0.5428803261927971], "rank_score": 0.9124392772605247} -{"id": "mordido-meinel-2020-mark", "title": "Mark-Evaluate: Assessing Language Generation using Population Estimation Methods", "abstract": "We propose a family of metrics to assess language generation derived from population estimation methods widely used in ecology. More specifically, we use mark-recapture and maximum-likelihood methods that have been applied over the past several decades to estimate the size of closed populations in the wild. We propose three novel metrics: ME_Petersen and ME_CAPTURE, which retrieve a single-valued assessment, and ME_Schnabel which returns a double-valued metric to assess the evaluation set in terms of quality and diversity, separately. In synthetic experiments, our family of methods is sensitive to drops in quality and diversity. Moreover, our methods show a higher correlation to human evaluation than existing metrics on several challenging tasks, namely unconditional language generation, machine translation, and text summarization.", "phrases": ["language generation", "population estimation method", "family", "ecology"], "overall_score": 1.0023092014582704, "scores": [2.010593202742665, 0.5802794669903643, 0.53271325178846, 0.5257786879838665], "rank_score": 0.912341152376339} -{"id": "reynolds-etal-2014-view", "title": "A VIEW of Russian: Visual Input Enhancement and Adaptive Feedback", "abstract": "We explore the challenges and opportunities which arise in developing automatic visual input enhancement activities for Russian with a focus on target selection and adaptive feedback. Russian, a language with a rich fusional morphology, has many syntactically relevant forms that are not transparent to the language learner, which makes it a good candidate for visual input enhancement (VIE). VIE essentially supports incidental focus on form by increasing the salience of language forms to support noticing by the learner. The freely available VIEW system (Meurers et al., 2010) was designed to automatically generate VIE activities from any web content. We extend VIEW to Russian and discuss connected research issues regarding target selection, ambiguity management, prompt generation, and distractor generation. We show that the same information and techniques used for target selection can often be repurposed for adaptive feedback. Authentic Text ICALL (ATICALL) systems incorporating only native-language NLP, without the NLP analysis specific to learner language that is characteristic of Intelligent Language Tutoring Systems (ILTS), thus can support some forms of adaptive feedback. ATICALL and ILTS represent a spectrum of possibilities rather than two categorically distinct enterprises.", "phrases": ["view", "visual input enhancement", "adaptive feedback"], "overall_score": 1.0013014781978051, "scores": [0.9626764519210728, 0.9108355398470888, 0.8607596576370757], "rank_score": 0.911423883135079} -{"id": "li-etal-2021-mtop", "title": "MTOP: A Comprehensive Multilingual Task-Oriented Semantic Parsing Benchmark", "abstract": "Scaling semantic parsing models for task-oriented dialog systems to new languages is often expensive and time-consuming due to the lack of available datasets. Available datasets suffer from several shortcomings: a) they contain few languages b) they contain small amounts of labeled examples per language c) they are based on the simple intent and slot detection paradigm for non-compositional queries. In this paper, we present a new multilingual dataset, called MTOP, comprising of 100k annotated utterances in 6 languages across 11 domains. We use this dataset and other publicly available datasets to conduct a comprehensive benchmarking study on using various state-of-the-art multilingual pre-trained models for task-oriented semantic parsing. We achieve an average improvement of +6.3 points on Slot F1 for the two existing multilingual datasets, over best results reported in their experiments. Furthermore, we demonstrate strong zero-shot performance using pre-trained models combined with automatic translation and alignment, and a proposed distant supervision method to reduce the noise in slot label projection.", "phrases": ["semantic parsing", "intent", "multilingual dataset", "pre-trained model", "mtop"], "overall_score": 2.3370135960358907, "scores": [1.84715883568815, 0.9977542081260306, 0.5951157738679653, 0.5915448538112552, 0.5240983327850223], "rank_score": 0.9111344008556846} -{"id": "ishigaki-etal-2017-summarizing", "title": "Summarizing Lengthy Questions", "abstract": "In this research, we propose the task of question summarization. We first analyzed question-summary pairs extracted from a Community Question Answering (CQA) site, and found that a proportion of questions cannot be summarized by extractive approaches but requires abstractive approaches. We created a dataset by regarding the question-title pairs posted on the CQA site as question-summary pairs. By using the data, we trained extractive and abstractive summarization models, and compared them based on ROUGE scores and manual evaluations. Our experimental results show an abstractive method using an encoder-decoder model with a copying mechanism achieves better scores for both ROUGE-2 F-measure and the evaluations by human judges.", "phrases": ["lengthy question", "site", "summarization"], "overall_score": 1.466268672268629, "scores": [1.5551093175147468, 0.6284419372628262, 0.5495805938149395], "rank_score": 0.9110439495308374} -{"id": "marie-etal-2021-scientific", "title": "Scientific Credibility of Machine Translation Research: A Meta-Evaluation of 769 Papers", "abstract": "This paper presents the first large-scale meta-evaluation of machine translation (MT). We annotated MT evaluations conducted in 769 research papers published from 2010 to 2020. Our study shows that practices for automatic MT evaluation have dramatically changed during the past decade and follow concerning trends. An increasing number of MT evaluations exclusively rely on differences between BLEU scores to draw conclusions, without performing any kind of statistical significance testing nor human evaluation, while at least 108 metrics claiming to be better than BLEU have been proposed. MT evaluations in recent papers tend to copy and compare automatic metric scores from previous work to claim the superiority of a method or an algorithm without confirming neither exactly the same training, validating, and testing data have been used nor the metric scores are comparable. Furthermore, tools for reporting standardized metric scores are still far from being widely adopted by the MT community. After showing how the accumulation of these pitfalls leads to dubious evaluation, we propose a guideline to encourage better automatic MT evaluation along with a simple meta-evaluation scoring method to assess its credibility.", "phrases": ["credibility", "machine translation", "meta-evaluation"], "overall_score": 1.4658071510180177, "scores": [0.9230704608795081, 0.850256549376282, 0.9589445605085836], "rank_score": 0.9107571902547912} -{"id": "huang-chiang-2005-better", "title": "Better k-best Parsing", "abstract": "We discuss the relevance of k-best parsing to recent applications in natural language processing, and develop efficient algorithms for k-best trees in the framework of hypergraph parsing. To demonstrate the efficiency, scalability and accuracy of these algorithms, we present experiments on Bikel's implementation of Collins' lexicalized PCFG model, and on Chiang's CFG-based decoder for hierarchical phrase-based translation. We show in particular how the improved output of our algorithms has the potential to improve results from parse reranking systems and other applications.", "phrases": ["k-best", "hypergraph", "chiang", "good parse", "weight"], "overall_score": 2.7727972949443216, "scores": [1.5081714487928801, 1.3100440096850348, 0.6206074959851302, 0.5601435878659753, 0.5547809692473975], "rank_score": 0.9107495023152836} -{"id": "bosselut-etal-2018-discourse", "title": "Discourse-Aware Neural Rewards for Coherent Text Generation", "abstract": "In this paper, we investigate the use of discourse-aware rewards with reinforcement learning to guide a model to generate long, coherent text. In particular, we propose to learn neural rewards to model cross-sentence ordering as a means to approximate desired discourse structure. Empirical results demonstrate that a generator trained with the learned reward produces more coherent and less repetitive text than models trained with cross-entropy or with reinforcement learning with commonly used scores as rewards.", "phrases": ["reward", "text generation", "discourse structure"], "overall_score": 2.097063988628229, "scores": [1.2427894227708949, 0.9224108845015179, 0.5670296481053778], "rank_score": 0.9107433184592635} -{"id": "devitt-ahmad-2007-sentiment", "title": "Sentiment Polarity Identification in Financial News: A Cohesion-based Approach", "abstract": "Text is not unadulterated fact. A text can make you laugh or cry but can it also make you short sell your stocks in company A and buy up options in company B? Research in the domain of finance strongly suggests that it can. Studies have shown that both the informational and affective aspects of news text affect the markets in profound ways, impacting on volumes of trades, stock prices, volatility and even future firm earnings. This paper aims to explore a computable metric of positive or negative polarity in financial news text which is consistent with human judgments and can be used in a quantitative analysis of news sentiment impact on financial markets. Results from a preliminary evaluation are presented and discussed.", "phrases": ["polarity", "financial news", "sentiment analysis"], "overall_score": 2.335913952262319, "scores": [0.8389900057412297, 1.267487937869403, 0.6256391004944155], "rank_score": 0.9107056813683494} -{"id": "neves-etal-2018-findings", "title": "Findings of the WMT 2018 Biomedical Translation Shared Task: Evaluation on Medline test sets", "abstract": "Machine translation enables the automatic translation of textual documents between languages and can facilitate access to information only available in a given language for non-speakers of this language, e.g. research results presented in scientific publications. In this paper, we provide an overview of the Biomedical Translation shared task in the Workshop on Machine Translation (WMT) 2018, which specifically examined the performance of machine translation systems for biomedical texts. This year, we provided test sets of scientific publications from two sources (EDP and Medline) and for six language pairs (English with each of Chinese, French, German, Portuguese, Romanian and Spanish). We describe the development of the various test sets, the submissions that we received and the evaluations that we carried out. We obtained a total of 39 runs from six teams and some of this year's BLEU scores were somewhat higher that last year's, especially for teams that made use of biomedical resources or state-of-the-art MT algorithms (e.g. Transformer). Finally, our manual evaluation scored automatic translations higher than the reference translations for German and Spanish.", "phrases": ["wmt", "biomedical translation", "medline", "scientific abstract"], "overall_score": 1.2624001395877034, "scores": [1.6749625312599061, 0.9070482335700318, 0.5396333967308288, 0.5208726804406633], "rank_score": 0.9106292105003575} -{"id": "minervini-riedel-2018-adversarially", "title": "Adversarially Regularising Neural NLI Models to Integrate Logical Background Knowledge", "abstract": "Adversarial examples are inputs to machine learning models designed to cause the model to make a mistake. They are useful for understanding the shortcomings of machine learning models, interpreting their results, and for regularisation. In NLP, however, most example generation strategies produce input text by using known, pre-specified semantic transformations, requiring significant manual effort and in-depth understanding of the problem and domain. In this paper, we investigate the problem of automatically generating adversarial examples that violate a set of given First-Order Logic constraints in Natural Language Inference (NLI). We reduce the problem of identifying such adversarial examples to a combinatorial optimisation problem, by maximising a quantity measuring the degree of violation of such constraints and by using a language model for generating linguistically-plausible examples. Furthermore, we propose a method for adversarially regularising neural NLI models for incorporating background knowledge. Our results show that, while the proposed method does not always improve results on the SNLI and MultiNLI datasets, it significantly and consistently increases the predictive accuracy on adversarially-crafted datasets \u2013 up to a 79.6% relative improvement \u2013 while drastically reducing the number of background knowledge violations. Furthermore, we show that adversarial examples transfer among model architectures, and that the proposed adversarial training procedure improves the robustness of NLI models to adversarial examples.", "phrases": ["nli", "adversarial example", "natural language inference"], "overall_score": 2.096393880579801, "scores": [1.310843777887099, 0.8849180223693878, 0.5355950824381696], "rank_score": 0.9104522942315522} -{"id": "mao-etal-2008-chinese", "title": "Chinese Word Segmentation and Named Entity Recognition Based on Conditional Random Fields", "abstract": "Chinese word segmentation (CWS), named entity recognition (NER) and part-ofspeech tagging is the lexical processing in Chinese language. This paper describes the work on these tasks done by France Telecom Team (Beijing) at the fourth International Chinese Language Processing Bakeoff. In particular, we employ Conditional Random Fields with different features for these tasks. In order to improve NER relatively low recall; we exploit non-local features and alleviate class imbalanced distribution on NER dataset to enhance the recall and keep its relatively high precision. Some other post-processing measures such as consistency checking and transformation-based error-driven learning are used to improve word segmentation performance. Our systems participated in most CWS and POS tagging evaluations and all the NER tracks. As a result, our NER system achieves the first ranks on MSRA open track and MSRA/CityU closed track. Our CWS system achieves the first rank on CityU open track, which means that our systems achieve state-of-the-art performance on Chinese lexical processing.", "phrases": ["entity recognition", "non-local feature", "ner system", "chinese word segmentation"], "overall_score": 1.6312734437275143, "scores": [1.9604776921799, 0.6169077564086032, 0.5394283927355331, 0.524910333696659], "rank_score": 0.9104310437551738} -{"id": "finkel-manning-2009-hierarchical", "title": "Hierarchical Bayesian Domain Adaptation", "abstract": "Multi-task learning is the problem of maximizing the performance of a system across a number of related tasks. When applied to multiple domains for the same task, it is similar to domain adaptation, but symmetric, rather than limited to improving performance on a target domain. We present a more principled, better performing model for this problem, based on the use of a hierarchical Bayesian prior. Each domain has its own domain-specific parameter for each feature but, rather than a constant prior over these parameters, the model instead links them via a hierarchical Bayesian global prior. This prior encourages the features to have similar weights across domains, unless there is good evidence to the contrary. We show that the method of (Daume III, 2007), which was presented as a simple \"preprocessing step,\" is actually equivalent, except our representation explicitly separates hyperparameters which were tied in his work. We demonstrate that allowing different values for these hyperparameters significantly improves performance over both a strong baseline and (Daume III, 2007) within both a conditional random field sequence model for named entity recognition and a discriminatively trained dependency parser.", "phrases": ["domain adaptation", "entity recognition", "bayesian extension"], "overall_score": 2.334635108620238, "scores": [1.517487880180654, 0.6356493026355247, 0.5774841081992105], "rank_score": 0.9102070970051298} -{"id": "ng-2004-learning", "title": "Learning Noun Phrase Anaphoricity to Improve Conference Resolution: Issues in Representation and Optimization", "abstract": "Knowledge of the anaphoricity of a noun phrase might be profitably exploited by a coreference system to bypass the resolution of non-anaphoric noun phrases. Perhaps surprisingly, recent attempts to incorporate automatically acquired anaphoricity information into coreference systems, however, have led to the degradation in resolution performance. This paper examines several key issues in computing and using anaphoricity information to improve learning-based coreference systems. In particular, we present a new corpus-based approach to anaphoricity determination. Experiments on three standard coreference data sets demonstrate the effectiveness of our approach.", "phrases": ["noun phrase", "coreference system", "anaphoricity information"], "overall_score": 2.182529321227576, "scores": [1.2933769439583893, 0.8727690735857163, 0.5644102490111133], "rank_score": 0.9101854221850729} -{"id": "abdul-mageed-etal-2020-nadi", "title": "NADI 2020: The First Nuanced Arabic Dialect Identification Shared Task", "abstract": "We present the results and findings of the First Nuanced Arabic Dialect Identification Shared Task (NADI). This Shared Task includes two subtasks: country-level dialect identification (Subtask 1) and province-level sub-dialect identification (Subtask 2). The data for the shared task covers a total of 100 provinces from 21 Arab countries and is collected from the Twitter domain. As such, NADI is the first shared task to target naturally-occurring fine-grained dialectal text at the sub-country level. A total of 61 teams from 25 countries registered to participate in the tasks, thus reflecting the interest of the community in this area. We received 47 submissions for Subtask 1 from 18 teams and 9 submissions for Subtask 2 from 9 teams.", "phrases": ["dialect identification", "arab country", "nadi shared task"], "overall_score": 2.5770358747263837, "scores": [1.340081699405384, 0.8452620802762216, 0.5433980005299706], "rank_score": 0.9095805934038587} -{"id": "maurya-etal-2021-zmbart", "title": "ZmBART: An Unsupervised Cross-lingual Transfer Framework for Language Generation", "abstract": "Despite the recent advancement in NLP research, cross-lingual transfer for natural language generation is relatively understudied. In this work, we transfer supervision from high resource language (HRL) to multiple low-resource languages (LRLs) for natural language generation (NLG). We consider four NLG tasks (text summarization, question generation, news headline generation, and distractor generation) and three syntactically diverse languages, i.e., English, Hindi, and Japanese. We propose an unsupervised cross-lingual language generation framework (called ZmBART) that does not use any parallel or pseudo-parallel/back-translated data. In this framework, we further pre-train mBART sequence-to-sequence denoising auto-encoder model with an auxiliary task using monolingual data of three languages. The objective function of the auxiliary task is close to the target tasks which enriches the multi-lingual latent representation of mBART and provides good initialization for target tasks. Then, this model is fine-tuned with task-specific supervised English data and directly evaluated with low-resource languages in the Zero-shot setting. To overcome catastrophic forgetting and spurious correlation issues, we applied freezing model component and data argumentation approaches respectively. This simple modeling approach gave us promising results.We experimented with few-shot training (with 1000 supervised data points) which boosted the model performance further. We performed several ablations and cross-lingual transferability analyses to demonstrate the robustness of ZmBART.", "phrases": ["cross-lingual transfer", "language generation", "mbart"], "overall_score": 1.4635465842515842, "scores": [0.9480675003409549, 1.2598561432538993, 0.5201342198578509], "rank_score": 0.9093526211509015} -{"id": "cao-etal-2020-hypercore", "title": "HyperCore: Hyperbolic and Co-graph Representation for Automatic ICD Coding", "abstract": "The International Classification of Diseases (ICD) provides a standardized way for classifying diseases, which endows each disease with a unique code. ICD coding aims to assign proper ICD codes to a medical record. Since manual coding is very laborious and prone to errors, many methods have been proposed for the automatic ICD coding task. However, most of existing methods independently predict each code, ignoring two important characteristics: Code Hierarchy and Code Co-occurrence. In this paper, we propose a Hyperbolic and Co-graph Representation method (HyperCore) to address the above problem. Specifically, we propose a hyperbolic representation method to leverage the code hierarchy. Moreover, we propose a graph convolutional network to utilize the code co-occurrence. Experimental results on two widely used datasets demonstrate that our proposed model outperforms previous state-of-the-art methods.", "phrases": ["hyperbolic", "icd code", "code co-occurrence", "hypercore"], "overall_score": 2.093444876823474, "scores": [0.8912954613305513, 0.8359586362037789, 0.9689655278950525, 0.9404666072628874], "rank_score": 0.9091715581730675} -{"id": "pasini-navigli-2017-train", "title": "Train-O-Matic: Large-Scale Supervised Word Sense Disambiguation in Multiple Languages without Manual Training Data", "abstract": "Annotating large numbers of sentences with senses is the heaviest requirement of current Word Sense Disambiguation. We present Train-O-Matic, a language-independent method for generating millions of sense-annotated training instances for virtually all meanings of words in a language's vocabulary. The approach is fully automatic: no human intervention is required and the only type of human knowledge used is a WordNet-like resource. Train-O-Matic achieves consistently state-of-the-art performance across gold standard datasets and languages, while at the same time removing the burden of manual annotation. All the training data is available for research purposes at .", "phrases": ["word sense disambiguation", "multiple language", "large number", "train-o-matic"], "overall_score": 1.9975228185048952, "scores": [1.9748209560271237, 0.5691348019901613, 0.5481321385406407, 0.5443593544127869], "rank_score": 0.909111812742678} -{"id": "yao-etal-2013-automatic", "title": "Automatic Coupling of Answer Extraction and Information Retrieval", "abstract": "Information Retrieval (IR) and Answer Extraction are often designed as isolated or loosely connected components in Question Answering (QA), with repeated overengineering on IR, and not necessarily performance gain for QA. We propose to tightly integrate them by coupling automatically learned features for answer extraction to a shallow-structured IR model. Our method is very quick to implement, and significantly improves IR for QA (measured in Mean Average Precision and Mean Reciprocal Rank) by 10%-20% against an uncoupled retrieval baseline in both document and passage retrieval, which further leads to a downstream 20% improvement in QAF1.", "phrases": ["answer extraction", "information retrieval", "automatic coupling"], "overall_score": 1.4621285529963362, "scores": [0.9695010770655529, 0.9122952327446686, 0.8436183365709341], "rank_score": 0.9084715487937185} -{"id": "ruppenhofer-etal-2008-finding", "title": "Finding the Sources and Targets of Subjective Expressions", "abstract": "As many popular text genres such as blogs or news contain opinions by multiple sources and about multiple targets, finding the sources and targets of subjective expressions becomes an important sub-task for automatic opinion analysis systems. We argue that while automatic semantic role labeling systems (ASRL) have an important contribution to make, they cannot solve the problem for all cases. Based on the experience of manually annotating opinions, sources, and targets in various genres, we present linguistic phenomena that require knowledge beyond that of ASRL systems. In particular, we address issues relating to the attribution of opinions to sources; sources and targets that are realized as zero-forms; and inferred opinions. We also discuss in some depth that for arguing attitudes we need to be able to recover propositions and not only argued-about entities. A recurrent theme of the discussion is that close attention to specific discourse contexts is needed to identify sources and targets correctly.", "phrases": ["subjective expression", "semantic role labeling", "well-trained srl model", "role technique"], "overall_score": 2.4598665362999266, "scores": [2.0007485794465016, 0.5719365232592021, 0.5338827082759326, 0.5268456837884828], "rank_score": 0.9083533736925299} -{"id": "lui-cook-2013-classifying", "title": "Classifying English Documents by National Dialect", "abstract": "We investigate national dialect identification, the task of classifying English documents according to their country of origin. We use corpora of known national origin as a proxy for national dialect. In order to identify general (as opposed to corpus-specific) characteristics of national dialects of English, we make use of a variety of corpora of different sources, with inter-corpus variation in length, topic and register. The central intuition is that features that are predictive of national origin across different data sources are features that characterize a national dialect. We examine a number of classification approaches motivated by different areas of research, and evaluate the performance of each method across 3 national dialects: Australian, British, and Canadian English. Our results demonstrate that there are lexical and syntactic characteristics of each national dialect that are consistent across data sources.", "phrases": ["national dialect", "canadian english", "language identification", "text categorization", "statistical approach"], "overall_score": 2.516302859123529, "scores": [0.9122040096850933, 0.9501673002543749, 0.9371559483257621, 0.8821794831192292, 0.8561153289058344], "rank_score": 0.9075644140580588} -{"id": "lichtarge-etal-2019-corpora", "title": "Corpora Generation for Grammatical Error Correction", "abstract": "Grammatical Error Correction (GEC) has been recently modeled using the sequence-to-sequence framework. However, unlike sequence transduction problems such as machine translation, GEC suffers from the lack of plentiful parallel data. We describe two approaches for generating large parallel datasets for GEC using publicly available Wikipedia data. The first method extracts source-target pairs from Wikipedia edit histories with minimal filtration heuristics while the second method introduces noise into Wikipedia sentences via round-trip translation through bridge languages. Both strategies yield similar sized parallel corpora containing around 4B tokens. We employ an iterative decoding strategy that is tailored to the loosely supervised nature of our constructed corpora. We demonstrate that neural GEC models trained using either type of corpora give similar performance. Fine-tuning these models on the Lang-8 corpus and ensembling allows us to surpass the state of the art on both the CoNLL `14 benchmark and the JFLEG task. We present systematic analysis that compares the two approaches to data generation and highlights the effectiveness of ensembling.", "phrases": ["grammatical error correction", "gec", "wikipedia edit history", "noise", "data generation"], "overall_score": 2.7188143897606376, "scores": [2.0025092700889604, 0.8447113733324891, 0.5748243445536755, 0.5607440157304718, 0.5550236936473093], "rank_score": 0.9075625394705812} -{"id": "chan-etal-2019-neural", "title": "Neural Keyphrase Generation via Reinforcement Learning with Adaptive Rewards", "abstract": "Generating keyphrases that summarize the main points of a document is a fundamental task in natural language processing. Although existing generative models are capable of predicting multiple keyphrases for an input document as well as determining the number of keyphrases to generate, they still suffer from the problem of generating too few keyphrases. To address this problem, we propose a reinforcement learning (RL) approach for keyphrase generation, with an adaptive reward function that encourages a model to generate both sufficient and accurate keyphrases. Furthermore, we introduce a new evaluation method that incorporates name variations of the ground-truth keyphrases using the Wikipedia knowledge base. Thus, our evaluation method can more robustly evaluate the quality of predicted keyphrases. Extensive experiments on five real-world datasets of different scales demonstrate that our RL approach consistently and significantly improves the performance of the state-of-the-art generative models with both conventional and new evaluation methods.", "phrases": ["keyphrase", "reinforcement learning", "generative model", "recall", "pre-trained model"], "overall_score": 2.0897314695631835, "scores": [1.7783606739122566, 1.1606375973436955, 0.5378054650968381, 0.5314196030378154, 0.5295708900637132], "rank_score": 0.9075588458908637} -{"id": "gonen-etal-2020-greek", "title": "It's not Greek to mBERT: Inducing Word-Level Translations from Multilingual BERT", "abstract": "Recent works have demonstrated that multilingual BERT (mBERT) learns rich cross-lingual representations, that allow for transfer across languages. We study the word-level translation information embedded in mBERT and present two simple methods that expose remarkable translation capabilities with no fine-tuning. The results suggest that most of this information is encoded in a non-linear way, while some of it can also be recovered with purely linear tools. As part of our analysis, we test the hypothesis that mBERT learns representations which contain both a language-encoding component and an abstract, cross-lingual component, and explicitly identify an empirical language-identity subspace within mBERT representations.", "phrases": ["mbert", "word-level translation", "multilingual bert"], "overall_score": 1.6260928901391207, "scores": [0.9417745965627495, 0.9161609338385481, 0.8646836348369218], "rank_score": 0.9075397217460731} -{"id": "zhu-etal-2020-return", "title": "The Return of Lexical Dependencies: Neural Lexicalized PCFGs", "abstract": "In this paper we demonstrate that context free grammar (CFG) based methods for grammar induction benefit from modeling lexical dependencies. This contrasts to the most popular current methods for grammar induction, which focus on discovering either constituents or dependencies. Previous approaches to marry these two disparate syntactic formalisms (e.g., lexicalized PCFGs) have been plagued by sparsity, making them unsuitable for unsupervised grammar induction. However, in this work, we present novel neural models of lexicalized PCFGs that allow us to overcome sparsity problems and effectively induce both constituents and dependencies within a single model. Experiments demonstrate that this unified framework results in stronger results on both representations than achieved when modeling either formalism alone.1", "phrases": ["lexical dependency", "pcfg", "neural lpcfg", "equation"], "overall_score": 2.1759450772562428, "scores": [1.3227252808779668, 1.2098831761894489, 0.57697296159909, 0.5201768980374438], "rank_score": 0.9074395791759873} -{"id": "goldberg-elhadad-2010-efficient", "title": "An Efficient Algorithm for Easy-First Non-Directional Dependency Parsing", "abstract": "We present a novel deterministic dependency parsing algorithm that attempts to create the easiest arcs in the dependency structure first in a non-directional manner. Traditional deterministic parsing algorithms are based on a shift-reduce framework: they traverse the sentence from left-to-right and, at each step, perform one of a possible set of actions, until a complete tree is built. A drawback of this approach is that it is extremely local: while decisions can be based on complex structures on the left, they can look only at a few words to the right. In contrast, our algorithm builds a dependency tree by iteratively selecting the best pair of neighbours to connect at each parsing step. This allows incorporation of features from already built structures both to the left and to the right of the attachment point. The parser learns both the attachment preferences and the order in which they should be performed. The result is a deterministic, best-first, O(nlogn) parser, which is significantly more accurate than best-first transition based parsers, and nears the performance of globally optimized parsing models.", "phrases": ["dependency parsing", "arc", "easy-first strategy"], "overall_score": 2.955816835760928, "scores": [1.6262614224740335, 0.563911579478018, 0.5314929781074739], "rank_score": 0.9072219933531751} -{"id": "feng-lapata-2010-visual", "title": "Visual Information in Semantic Representation", "abstract": "The question of how meaning might be acquired by young children and represented by adult speakers of a language is one of the most debated topics in cognitive science. Existing semantic representation models are primarily amodal based on information provided by the linguistic input despite ample evidence indicating that the cognitive system is also sensitive to perceptual information. In this work we exploit the vast resource of images and associated documents available on the web and develop a model of multimodal meaning representation which is based on the linguistic and visual context. Experimental results show that a closer correspondence to human data can be obtained by taking the visual modality into account.", "phrases": ["visual information", "latent topic", "word meaning", "joint bimodal representation", "lda"], "overall_score": 2.2542749984928547, "scores": [1.937010120145379, 0.8689709636986332, 0.6008002347121282, 0.578141674420316, 0.5510119860803595], "rank_score": 0.9071869958113631} -{"id": "hu-etal-2019-constrained", "title": "CAN: Constrained Attention Networks for Multi-Aspect Sentiment Analysis", "abstract": "Aspect level sentiment classification is a fine-grained sentiment analysis task. To detect the sentiment towards a particular aspect in a sentence, previous studies have developed various attention-based methods for generating aspect-specific sentence representations. However, the attention may inherently introduce noise and downgrade the performance. In this paper, we propose constrained attention networks (CAN), a simple yet effective solution, to regularize the attention for multi-aspect sentiment analysis, which alleviates the drawback of the attention mechanism. Specifically, we introduce orthogonal regularization on multiple aspects and sparse regularization on each single aspect. Experimental results on two public datasets demonstrate the effectiveness of our approach. We further extend our approach to multi-task settings and outperform the state-of-the-art methods.", "phrases": ["attention network", "multi-aspect sentiment analysis", "multiple aspect"], "overall_score": 1.8861533514529052, "scores": [0.9705622887993628, 0.9057153620534909, 0.844866435644351], "rank_score": 0.9070480288324015} -{"id": "katti-etal-2018-chargrid", "title": "Chargrid: Towards Understanding 2D Documents", "abstract": "We introduce a novel type of text representation that preserves the 2D layout of a document. This is achieved by encoding each document page as a two-dimensional grid of characters. Based on this representation, we present a generic document understanding pipeline for structured documents. This pipeline makes use of a fully convolutional encoder-decoder network that predicts a segmentation mask and bounding boxes. We demonstrate its capabilities on an information extraction task from invoices and show that it significantly outperforms approaches based on sequential text or document images.", "phrases": ["text representation", "document page", "chargrid"], "overall_score": 2.3931074732588744, "scores": [1.6007044123609409, 0.5602735228851621, 0.5594337584336546], "rank_score": 0.9068038978932526} -{"id": "lane-bird-2020-interactive", "title": "Interactive Word Completion for Morphologically Complex Languages", "abstract": "Text input technologies for low-resource languages support literacy, content authoring, and language learning. However, tasks such as word completion pose a challenge for morphologically complex languages thanks to the combinatorial explosion of possible words. We have developed a method for morphologically-aware text input in Kunwinjku, a polysynthetic language of northern Australia. We modify an existing finite state recognizer to map input morph prefixes to morph completions, respecting the morphosyntax and morphophonology of the language. We demonstrate the portability of the method by applying it to Turkish. We show that the space of proximal morph completions is many orders of magnitude smaller than the space of full word completions for Kunwinjku. We provide a visualization of the morph completion space to enable the text completion parameters to be fine-tuned. Finally, we report on a web services deployment, along with a web interface which helps users enter morphologically complex words and which retrieves corresponding entries from the lexicon.", "phrases": ["word completion", "polysynthetic language", "australia", "turkish"], "overall_score": 1.2570641002726377, "scores": [2.010467869438229, 0.5553845370360462, 0.5307051463488497, 0.5305627342626377], "rank_score": 0.9067800717714406} -{"id": "kurniawan-etal-2021-ppt", "title": "PPT: Parsimonious Parser Transfer for Unsupervised Cross-Lingual Adaptation", "abstract": "Cross-lingual transfer is a leading technique for parsing low-resource languages in the absence of explicit supervision. Simple `direct transfer' of a learned model based on a multilingual input encoding has provided a strong benchmark. This paper presents a method for unsupervised cross-lingual transfer that improves over direct transfer systems by using their output as implicit supervision as part of self-training on unlabelled text in the target language. The method assumes minimal resources and provides maximal flexibility by (a) accepting any pre-trained arc-factored dependency parser; (b) assuming no access to source language data; (c) supporting both projective and non-projective parsing; and (d) supporting multi-source transfer. With English as the source language, we show significant improvements over state-of-the-art transfer models on both distant and nearby languages, despite our conceptually simpler approach. We provide analyses of the choice of source languages for multi-source transfer, and the advantage of non-projective parsing. Our code is available online.", "phrases": ["cross-lingual transfer", "dependency parser", "ppt"], "overall_score": 1.6245932506014098, "scores": [0.8039133759463926, 0.9949905891185058, 0.9212043058873427], "rank_score": 0.9067027569840803} -{"id": "kiss-strunk-2006-unsupervised", "title": "Unsupervised Multilingual Sentence Boundary Detection", "abstract": "In this article, we present a language-independent, unsupervised approach to sentence boundary detection. It is based on the assumption that a large number of ambiguities in the determination of sentence boundaries can be eliminated once abbreviations have been identified. Instead of relying on orthographic clues, the proposed system is able to detect abbreviations with high accuracy using three criteria that only require information about the candidate type itself and are independent of context: Abbreviations can be defined as a very tight collocation consisting of a truncated word and a final period, abbreviations are usually short, and abbreviations sometimes contain internal periods. We also show the potential of collocational evidence for two other important subtasks of sentence boundary disambiguation, namely, the detection of initials and ordinal numbers. The proposed system has been tested extensively on eleven different languages and on different text genres. It achieves good results without any further amendments or language-specific resources. We evaluate its performance against three different baselines and compare it to other systems for sentence boundary detection proposed in the literature.", "phrases": ["sentence boundary", "boundary detection", "abbreviation", "ordinal number"], "overall_score": 2.513886383215571, "scores": [1.2109906505684183, 1.0187597163445123, 0.8742718945558303, 0.5227491569546362], "rank_score": 0.9066928546058493} -{"id": "bugliarello-okazaki-2020-enhancing", "title": "Enhancing Machine Translation with Dependency-Aware Self-Attention", "abstract": "Most neural machine translation models only rely on pairs of parallel sentences, assuming syntactic information is automatically learned by an attention mechanism. In this work, we investigate different approaches to incorporate syntactic knowledge in the Transformer model and also propose a novel, parameter-free, dependency-aware self-attention mechanism that improves its translation quality, especially for long sentences and in low-resource scenarios. We show the efficacy of each approach on WMT English-German and English-Turkish, and WAT English-Japanese translation tasks.", "phrases": ["self-attention", "translation task", "dependency structure"], "overall_score": 2.2524009695517564, "scores": [1.2935711249431623, 0.847989422036799, 0.5777379462780288], "rank_score": 0.9064328310859966} -{"id": "chiang-etal-2008-online", "title": "Online Large-Margin Training of Syntactic and Structural Translation Features", "abstract": "Minimum-error-rate training (MERT) is a bottleneck for current development in statistical machine translation because it is limited in the number of weights it can reliably optimize. Building on the work of Watanabe et al., we explore the use of the MIRA algorithm of Crammer et al. as an alternative to MERT. We first show that by parallel processing and exploiting more of the parse forest, we can obtain results using MIRA that match or surpass MERT in terms of both translation quality and computational cost. We then test the method on two classes of features that address deficiencies in the Hiero hierarchical phrase-based model: first, we simultaneously train a large number of Marton and Resnik's soft syntactic constraints, and, second, we introduce a novel structural distortion model. In both cases we obtain significant improvements in translation performance. Optimizing them in combination, for a total of 56 feature weights, we improve performance by 2.6 Bleu on a subset of the NIST 2006 Arabic-English evaluation data.", "phrases": ["mira", "alternative", "hierarchical phrase-based model", "large number", "margin"], "overall_score": 3.1123047798722463, "scores": [1.9356203725782903, 0.9044249659734444, 0.5977288517991343, 0.5712123162794485, 0.5226331450201763], "rank_score": 0.9063239303300987} -{"id": "ding-etal-2017-visualizing", "title": "Visualizing and Understanding Neural Machine Translation", "abstract": "While neural machine translation (NMT) has made remarkable progress in recent years, it is hard to interpret its internal workings due to the continuous representations and non-linearity of neural networks. In this work, we propose to use layer-wise relevance propagation (LRP) to compute the contribution of each contextual word to arbitrary hidden states in the attention-based encoder-decoder framework. We show that visualization with LRP helps to interpret the internal workings of NMT and analyze translation errors.", "phrases": ["neural machine translation", "layer-wise relevance propagation", "contextual word", "attention-based encoder-decoder framework", "translation error"], "overall_score": 2.6195580535712635, "scores": [1.5727517333748156, 0.8824382695451067, 0.8703734001568874, 0.6463517341077862, 0.5596093049826292], "rank_score": 0.9063048884334449} -{"id": "subramanian-lee-2020-hierarchical", "title": "Hierarchical Evidence Set Modeling for Automated Fact Extraction and Verification", "abstract": "Automated fact extraction and verification is a challenging task that involves finding relevant evidence sentences from a reliable corpus to verify the truthfulness of a claim. Existing models either (i) concatenate all the evidence sentences, leading to the inclusion of redundant and noisy information; or (ii) process each claim-evidence sentence pair separately and aggregate all of them later, missing the early combination of related sentences for more accurate claim verification. Unlike the prior works, in this paper, we propose Hierarchical Evidence Set Modeling (HESM), a framework to extract evidence sets (each of which may contain multiple evidence sentences), and verify a claim to be supported, refuted or not enough info, by encoding and attending the claim and evidence sets at different levels of hierarchy. Our experimental results show that HESM outperforms 7 state-of-the-art methods for fact extraction and claim verification. Our source code is available at .", "phrases": ["evidence set modeling", "automated fact extraction", "verification"], "overall_score": 1.2561685680684544, "scores": [0.9553027531978986, 0.9521771239636989, 0.810922368347827], "rank_score": 0.9061340818364748} -{"id": "eisenstein-etal-2011-discovering", "title": "Discovering Sociolinguistic Associations with Structured Sparsity", "abstract": "We present a method to discover robust and interpretable sociolinguistic associations from raw geotagged text data. Using aggregate demographic statistics about the authors' geographic communities, we solve a multi-output regression problem between demographics and lexical frequencies. By imposing a composite e1,\u221e regularizer, we obtain structured sparsity, driving entire rows of coefficients to zero. We perform two regression studies. First, we use term frequencies to predict demographic attributes; our method identifies a compact set of words that are strongly associated with author demographics. Next, we conjoin demographic attributes into features, which we use to predict term frequencies. The composite regularizer identifies a small number of features, which correspond to communities of authors united by shared demographic and linguistic properties.", "phrases": ["structured sparsity", "text data", "demographic", "sociolinguistic pattern"], "overall_score": 2.4524555945683364, "scores": [1.8603629677882667, 0.6422799135192413, 0.5619478048096597, 0.5578762734246551], "rank_score": 0.9056167398854558} -{"id": "wan-etal-2020-self", "title": "Self-Paced Learning for Neural Machine Translation", "abstract": "Recent studies have proven that the training of neural machine translation (NMT) can be facilitated by mimicking the learning process of humans. Nevertheless, achievements of such kind of curriculum learning rely on the quality of artificial schedule drawn up with the handcrafted features, e.g. sentence length or word rarity. We ameliorate this procedure with a more flexible manner by proposing self-paced learning, where NMT model is allowed to 1) automatically quantify the learning confidence over training examples; and 2) flexibly govern its learning via regulating the loss in each iteration step. Experimental results over multiple translation tasks demonstrate that the proposed model yields better performance than strong baselines and those models trained with human-designed curricula on both translation quality and convergence speed.", "phrases": ["neural machine translation", "training example", "translation quality", "self-paced learning", "well performance"], "overall_score": 1.7620755031076651, "scores": [1.9241381059922427, 0.9374375350129392, 0.5625895061508391, 0.5593010966382306, 0.5441721570926168], "rank_score": 0.9055276801773736} -{"id": "angeli-etal-2015-leveraging", "title": "Leveraging Linguistic Structure For Open Domain Information Extraction", "abstract": "Relation triples produced by open domain information extraction (open IE) systems are useful for question answering, inference, and other IE tasks. Traditionally these are extracted using a large set of patterns; however, this approach is brittle on out-of-domain text and long-range dependencies, and gives no insight into the substructure of the arguments. We replace this large pattern set with a few patterns for canonically structured sentences, and shift the focus to a classifier which learns to extract self-contained clauses from longer sentences. We then run natural logic inference over these short clauses to determine the maximally specific arguments for each candidate triple. We show that our approach outperforms a state-of-the-art open IE system on the end-to-end TAC-KBP 2013 Slot Filling task.", "phrases": ["domain information extraction", "relation triple", "openie system", "knowledge base", "argument span"], "overall_score": 2.8383672353060065, "scores": [2.0355154094659587, 0.8456633563940804, 0.5654701386715959, 0.544319816157346, 0.5352192417801275], "rank_score": 0.9052375924938216} -{"id": "zhang-clark-2011-syntactic", "title": "Syntactic Processing Using the Generalized Perceptron and Beam Search", "abstract": "We study a range of syntactic processing tasks using a general statistical framework that consists of a global linear model, trained by the generalized perceptron together with a generic beam-search decoder. We apply the framework to word segmentation, joint segmentation and POS-tagging, dependency parsing, and phrase-structure parsing. Both components of the framework are conceptually and computationally very simple. The beam-search decoder only requires the syntactic processing task to be broken into a sequence of decisions, such that, at each stage in the process, the decoder is able to consider the top-n candidates and generate all possibilities for the next stage. Once the decoder has been defined, it is applied to the training data, using trivial updates according to the generalized perceptron to induce a model. This simple framework performs surprisingly well, giving accuracy results competitive with the state-of-the-art on all the tasks we consider. The computational simplicity of the decoder and training algorithm leads to significantly higher test speeds and lower training times than their main alternatives, including log-linear and large-margin training algorithms and dynamic-programming for decoding. Moreover, the framework offers the freedom to define arbitrary features which can make alternative training and decoding algorithms prohibitively slow. We discuss how the general framework is applied to each of the problems studied in this article, making comparisons with alternative learning and decoding algorithms. We also show how the comparability of candidates considered by the beam is an important factor in the performance. We argue that the conceptual and computational simplicity of the framework, together with its language-independent nature, make it a competitive choice for a range of syntactic processing tasks and one that should be considered for comparison by developers of alternative approaches.", "phrases": ["generalized perceptron", "beam search", "segmentation", "shift-reduce parser"], "overall_score": 2.9128440461654974, "scores": [1.6354444092348452, 0.876908184264089, 0.571364354712074, 0.5359865814021804], "rank_score": 0.9049258824032972} -{"id": "kiritchenko-mohammad-2016-capturing", "title": "Capturing Reliable Fine-Grained Sentiment Associations by Crowdsourcing and Best\u2013Worst Scaling", "abstract": "Access to word-sentiment associations is useful for many applications, including sentiment analysis, stance detection, and linguistic analysis. However, manually assigning fine-grained sentiment association scores to words has many challenges with respect to keeping annotations consistent. We apply the annotation technique of Best-Worst Scaling to obtain real-valued sentiment association scores for words and phrases in three different domains: general English, English Twitter, and Arabic Twitter. We show that on all three domains the ranking of words by sentiment remains remarkably consistent even when the annotation process is repeated with a different set of annotators. We also, for the first time, determine the minimum difference in sentiment association that is perceptible to native speakers of a language.", "phrases": ["sentiment association", "crowdsourcing", "item"], "overall_score": 2.3878535529659404, "scores": [0.8612272997617738, 0.9808628933163198, 0.8723490040204317], "rank_score": 0.9048130656995085} -{"id": "barriere-balahur-2020-improving", "title": "Improving Sentiment Analysis over non-English Tweets using Multilingual Transformers and Automatic Translation for Data-Augmentation", "abstract": "Tweets are specific text data when compared to general text. Although sentiment analysis over tweets has become very popular in the last decade for English, it is still difficult to find huge annotated corpora for non-English languages. The recent rise of the transformer models in Natural Language Processing allows to achieve unparalleled performances in many tasks, but these models need a consequent quantity of text to adapt to the tweet domain. We propose the use of a multilingual transformer model, that we pre-train over English tweets on which we apply data-augmentation using automatic translation to adapt the model to non-English languages. Our experiments in French, Spanish, German and Italian suggest that the proposed technique is an efficient way to improve the results of the transformers over small corpora of tweets in a non-English language.", "phrases": ["sentiment analysis", "automatic translation", "multilingual transformer model", "english tweet"], "overall_score": 0.994017037568034, "scores": [1.6831201970906193, 0.8386887589391946, 0.5568826526896972, 0.540481589399797], "rank_score": 0.9047932995298271} -{"id": "roder-etal-2014-n3", "title": "N - A Collection of Datasets for Named Entity Recognition and Disambiguation in the NLP Interchange Format", "abstract": "Extracting Linked Data following the Semantic Web principle from unstructured sources has become a key challenge for scientific research. Named Entity Recognition and Disambiguation are two basic operations in this extraction process. One step towards the realization of the Semantic Web vision and the development of highly accurate tools is the availability of data for validating the quality of processes for Named Entity Recognition and Disambiguation as well as for algorithm tuning. This article presents three novel, manually curated and annotated corpora (N3). All of them are based on a free license and stored in the NLP Interchange Format to leverage the Linked Data character of our datasets.", "phrases": ["named entity recognition", "disambiguation", "nlp interchange format"], "overall_score": 1.2539806549655674, "scores": [0.9533254685927282, 0.8826135917075293, 0.8777284481340205], "rank_score": 0.9045558361447593} -{"id": "foster-etal-2003-statistical", "title": "Statistical machine translation: rapid development with limited resources", "abstract": "We describe an experiment in rapid development of a statistical machine translation (SMT) system from scratch, using limited resources: under this heading we include not only training data, but also computing power, linguistic knowledge, programming effort, and absolute time.", "phrases": ["rapid development", "limited resource", "statistical machine translation"], "overall_score": 0.9934707517353512, "scores": [1.0073940948804043, 0.8536000728207174, 0.8518939785067845], "rank_score": 0.9042960487359687} -{"id": "perret-etal-2016-integer", "title": "Integer Linear Programming for Discourse Parsing", "abstract": "In this paper we present the first, to the best of our knowledge, discourse parser that is able to predict non-tree DAG structures. We use Integer Linear Programming (ILP) to encode both the objective function and the constraints as global decoding over local scores. Our underlying data come from multi-party chat dialogues, which require the prediction of DAGs. We use the dependency parsing paradigm, as has been done in the past (Muller et al., 2012; Li et al., 2014; Afantenos et al., 2015), but we use the underlying formal framework of SDRT and exploit SDRT's notions of left and right distributive relations. We achieve an F-measure of 0.531 for fully labeled structures which beats the previous state of the art.", "phrases": ["discourse", "integer linear programming", "stac corpus", "approximation", "sdrt graph"], "overall_score": 1.8801748689439524, "scores": [1.8896101557505187, 0.9689515801448724, 0.585168783357812, 0.5451710248060918, 0.5319633883232007], "rank_score": 0.904172986476499} -{"id": "mazare-etal-2018-training", "title": "Training Millions of Personalized Dialogue Agents", "abstract": "Current dialogue systems fail at being engaging for users, especially when trained end-to-end without relying on proactive reengaging scripted strategies. Zhang et al. (2018) showed that the engagement level of end-to-end dialogue models increases when conditioning them on text personas providing some personalized back-story to the model. However, the dataset used in Zhang et al. (2018) is synthetic and only contains around 1k different personas. In this paper we introduce a new dataset providing 5 million personas and 700 million persona-based dialogues. Our experiments show that, at this scale, training using personas still improves the performance of end-to-end systems. In addition, we show that other tasks benefit from the wide coverage of our dataset by fine-tuning our model on the data from Zhang et al. (2018) and achieving state-of-the-art results.", "phrases": ["personalization", "agent", "conversation"], "overall_score": 2.8335492269207143, "scores": [1.2717967286291387, 0.9068421176279028, 0.5324641235958697], "rank_score": 0.9037009899509704} -{"id": "poon-2013-grounded", "title": "Grounded Unsupervised Semantic Parsing", "abstract": "We present the first unsupervised approach for semantic parsing that rivals the accuracy of supervised approaches in translating natural-language questions to database queries. Our GUSP system produces a semantic parse by annotating the dependency-tree nodes and edges with latent states, and learns a probabilistic grammar using EM. To compensate for the lack of example annotations or question-answer pairs, GUSP adopts a novel grounded-learning approach to leverage database for indirect supervision. On the challenging ATIS dataset, GUSP attained an accuracy of 84%, effectively tying with the best published results by supervised approaches.", "phrases": ["unsupervised semantic parsing", "query", "node"], "overall_score": 2.166902448074869, "scores": [1.620191582550746, 0.5621598736342974, 0.5286540735261062], "rank_score": 0.9036685099037164} -{"id": "birke-sarkar-2006-clustering", "title": "A Clustering Approach for Nearly Unsupervised Recognition of Nonliteral Language", "abstract": "In this paper we present TroFi (Trope Finder), a system for automatically classifying literal and nonliteral usages of verbs through nearly unsupervised word-sense disambiguation and clustering techniques. TroFi uses sentential context instead of selectional constraint violations or paths in semantic hierarchies. It also uses literal and nonliteral seed sets acquired and cleaned without human supervision in order to bootstrap learning. We adapt a word-sense disambiguation algorithm to our task and augment it with multiple seed set learners, a voting schema, and additional features like SuperTags and extrasentential context. Detailed experiments on hand-annotated data show that our enhanced algorithm outperforms the baseline by 24.4%. Using the TroFi algorithm, we also build the TroFi Example Base, an extensible resource of annotated literal/nonliteral examples which is freely available to the NLP research community.", "phrases": ["seed set", "sentence clustering approach", "non-literal classification", "language recognition", "new input sentence"], "overall_score": 3.1859270455883486, "scores": [1.36023860140219, 1.1790095414460877, 0.8557326497882158, 0.5873523527168137, 0.5349687915142101], "rank_score": 0.9034603873735035} -{"id": "fares-etal-2017-word", "title": "Word vectors, reuse, and replicability: Towards a community repository of large-text resources", "abstract": "This paper describes an emerging shared repository of large-text resources for creating word vectors, including pre-processed corpora and pre-trained vectors for a range of frameworks and configurations. This will facilitate reuse, rapid experimentation, and replicability of results.", "phrases": ["reuse", "community repository", "large-text resource"], "overall_score": 1.4536358917185186, "scores": [0.9382590383266151, 0.9077034035332319, 0.8636218431034393], "rank_score": 0.9031947616544288} -{"id": "styler-iv-etal-2014-temporal", "title": "Temporal Annotation in the Clinical Domain", "abstract": "This article discusses the requirements of a formal specification for the annotation of temporal information in clinical narratives. We discuss the implementation and extension of ISO-TimeML for annotating a corpus of clinical notes, known as the THYME corpus. To reflect the information task and the heavily inference-based reasoning demands in the domain, a new annotation guideline has been developed, \u201cthe THYME Guidelines to ISO-TimeML (THYME-TimeML)\u201d. To clarify what relations merit annotation, we distinguish between linguistically-derived and inferentially-derived temporal orderings in the text. We also apply a top performing TempEval 2013 system against this new resource to measure the difficulty of adapting systems to the clinical domain. The corpus is available to the community and has been proposed for use in a SemEval 2015 task.", "phrases": ["clinical domain", "narrative", "thyme-timeml", "temporal annotation", "relation type"], "overall_score": 2.9424781882716924, "scores": [1.7104255701052564, 0.9225462381478858, 0.8337457569126926, 0.5248972369077616, 0.5240251642893212], "rank_score": 0.9031279932725834} -{"id": "park-cardie-2018-corpus", "title": "A Corpus of eRulemaking User Comments for Measuring Evaluability of Arguments", "abstract": "eRulemaking is a means for government agencies to directly reach citizens to solicit their opinions and experiences regarding newly proposed rules. The effort, however, is partly hampered by citizens\u2019 comments that lack reasoning and evidence, which are largely ignored since government agencies are unable to evaluate the validity and strength. We present Cornell eRulemaking Corpus \u2013 CDCP , an argument mining corpus annotated with argumentative structure information capturing the evaluability of arguments. The corpus consists of 731 user comments on Consumer Debt Collection Practices (CDCP) rule by the Consumer Financial Protection Bureau (CFPB); the resulting dataset contains 4931 elementary unit and 1221 support relation annotations. It is a resource for building argument mining systems that can not only extract arguments from unstructured text, but also identify what additional information is necessary for readers to understand and evaluate a given argument. Immediate applications include providing real-time feedback to commenters, specifying which types of support for which propositions can be added to construct better-formed arguments.", "phrases": ["comment", "evaluability", "cornell erulemaking corpus", "argument model"], "overall_score": 2.383312331247524, "scores": [0.8330955211546072, 1.3239641205758748, 0.8985876688170662, 0.5567218555212791], "rank_score": 0.9030922915172068} -{"id": "kitaev-klein-2018-constituency", "title": "Constituency Parsing with a Self-Attentive Encoder", "abstract": "We demonstrate that replacing an LSTM encoder with a self-attentive architecture can lead to improvements to a state-of-the-art discriminative constituency parser. The use of attention makes explicit the manner in which information is propagated between different locations in the sentence, which we use to both analyze our model and propose potential improvements. For example, we find that separating positional and content information in the encoder can lead to improved parsing accuracy. Additionally, we evaluate different approaches for lexical representation. Our parser achieves new state-of-the-art results for single models trained on the Penn Treebank: 93.55 F1 without the use of any external data, and 95.13 F1 when using pre-trained word representations. Our parser also outperforms the previous best-published accuracy figures on 8 of the 9 languages in the SPMRL dataset.", "phrases": ["self-attentive encoder", "constituency parser", "chart-based parser"], "overall_score": 3.071113431024695, "scores": [0.886226198388837, 0.9484159735944315, 0.8742098172451694], "rank_score": 0.902950663076146} -{"id": "tsao-wible-2009-method", "title": "A Method for Unsupervised Broad-Coverage Lexical Error Detection and Correction", "abstract": "We describe and motivate an unsupervised lexical error detection and correction algorithm and its application in a tool called Lexbar appearing as a query box on the Web browser toolbar or as a search engine interface. Lexbar accepts as user input candidate strings of English to be checked for acceptability and, where errors are detected, offers corrections. We introduce the notion of hybrid n-gram and extract these from BNC as the knowledgebase against which to compare user input. An extended notion of edit distance is used to identify most likely candidates for correcting detected errors. Results are illustrated with four types of errors.", "phrases": ["lexical error detection", "correction", "n-gram"], "overall_score": 1.4531431286792316, "scores": [0.9405521951700025, 0.8415653839251784, 0.9265481931957988], "rank_score": 0.90288859076366} -{"id": "panyam-etal-2016-asm", "title": "ASM Kernel: Graph Kernel using Approximate Subgraph Matching for Relation Extraction", "abstract": "Kernel methods have been widely studied in several natural language processing tasks such as relation extraction and sentence classification. In this work, we present a new graph kernel that is derived from a distance measure described in prior work as Approximate Subgraph Matching (ASM). The classical ASM distance, shown to be effective for event extraction, is not a valid kernel and was primarily designed to work with rule based systems. We modify this distance suitably to render it a valid kernel (ASM kernel) and enable its use in powerful learning algorithms such as Support Vector Machine (SVM). We compare the ASM kernel with SVMs to the classical ASM with a rule based approach, for two relation extraction tasks and show an improved performance with the kernel based approach. Compared to other kernels such as the Subset tree kernel and the Partial tree kernel, ASM kernel outperforms in relation extraction tasks and is of comparable performance in a general sentence classification task. We describe the advantages of the ASM kernel such as its flexibility and ease of modification, which offers further directions for improvement.", "phrases": ["approximate subgraph matching", "relation extraction", "asm kernel"], "overall_score": 1.2515623267002374, "scores": [0.9166449366206699, 0.8985317677146775, 0.8932574388054804], "rank_score": 0.9028113810469426} -{"id": "lin-bilmes-2010-multi", "title": "Multi-document Summarization via Budgeted Maximization of Submodular Functions", "abstract": "We treat the text summarization problem as maximizing a submodular function under a budget constraint. We show, both theoretically and empirically, a modified greedy algorithm can efficiently solve the budgeted submodular maximization problem near-optimally, and we derive new approximation bounds in doing so. Experiments on DUC'04 task show that our approach is superior to the best-performing method from the DUC'04 evaluation on ROUGE-1 scores.", "phrases": ["submodular function", "greedy algorithm", "multi-document summarization", "coverage"], "overall_score": 2.7904822012502195, "scores": [0.9786311915186756, 1.570315470412252, 0.5402165550124891, 0.5218932384196624], "rank_score": 0.9027641138407698} -{"id": "ghazvininejad-etal-2016-generating", "title": "Generating Topical Poetry", "abstract": "We describe Hafez, a program that generates any number of distinct poems on a user-supplied topic. Poems obey rhythmic and rhyme constraints. We describe the poetry-generation algorithm, give experimental data concerning its parameters, and show its generality with respect to language and poetic form.", "phrases": ["poem", "user-supplied topic", "rhyme", "language model"], "overall_score": 2.6577628263013624, "scores": [1.4161282685336518, 1.0934415678981715, 0.5588405193428746, 0.5421420721839422], "rank_score": 0.90263810698966} -{"id": "uresova-etal-2018-synonymy", "title": "Synonymy in Bilingual Context: The CzEngClass Lexicon", "abstract": "This paper describes CzEngClass, a bilingual lexical resource being built to investigate verbal synonymy in bilingual context and to relate semantic roles common to one synonym class to verb arguments (verb valency). In addition, the resource is linked to existing resources with the same of a similar aim: English and Czech WordNet, FrameNet, PropBank, VerbNet (SemLink), and valency lexicons for Czech and English (PDT-Vallex, Vallex, and EngVallex). There are several goals of this work and resource: (a) to provide gold standard data for automatic experiments in the future (such as automatic discovery of synonym classes, word sense disambiguation, assignment of classes to occurrences of verbs in text, coreferential linking of verb and event arguments in text, etc.), (b) to build a core (bilingual) lexicon linked to existing resources, for comparative studies and possibly for training automatic tools, and (c) to enrich the annotation of a parallel treebank, the Prague Czech English Dependency Treebank, which so far contained valency annotation but has not linked synonymous senses of verbs together. The method used for extracting the synonym classes is a semi-automatic process with a substantial amount of manual work during filtering, role assignment to classes and individual Class members' arguments, and linking to the external lexical resources. We present the first version with 200 classes (about 1800 verbs) and evaluate interannotator agreement using several metrics.", "phrases": ["bilingual context", "czengclass lexicon", "synonymy"], "overall_score": 0.9912731101537886, "scores": [0.9318224830184534, 0.8892463509604027, 0.8858181735082364], "rank_score": 0.9022956691623643} -{"id": "zhila-gelbukh-2014-open", "title": "Open Information Extraction for Spanish Language based on Syntactic Constraints", "abstract": "Open Information Extraction (Open IE) serves for the analysis of vast amounts of texts by extraction of assertions, or relations, in the form of tupleshargument 1; relation; argument 2i. Various approaches to Open IE have been designed to perform in a fast, unsupervised manner. All of them require language specific information for their implementation. In this work, we introduce an approach to Open IE based on syntactic constraints over POS tag sequences targeted at Spanish language. We describe the rules specific for Spanish language constructions and their implementation in EXTRHECH, an Open IE system for Spanish. We also discuss language-specific issues of implementation. We compare EXTRHECH\u2019s performance with that of REVERB, a similar Open IE system for English, on a parallel dataset and show that these systems perform at a very similar level. We also compare EXTRHECH\u2019s performance on a dataset of grammatically correct sentences against its performance on a dataset of random texts extracted from the Web, drastically different in their quality from the first dataset. The latter experiment shows robustness of EXTRHECH on texts from the Web.", "phrases": ["spanish language", "syntactic constraint", "open information extraction"], "overall_score": 0.9912247255462883, "scores": [0.9138294781834639, 0.9130652371707716, 0.879860167429722], "rank_score": 0.9022516275946525} -{"id": "chen-etal-2008-learning", "title": "Learning Reliable Information for Dependency Parsing Adaptation", "abstract": "In this paper, we focus on the adaptation problem that has a large labeled data in the source domain and a large but unlabeled data in the target domain. Our aim is to learn reliable information from unlabeled target domain data for dependency parsing adaptation. Current state-of-the-art statistical parsers perform much better for shorter dependencies than for longer ones. Thus we propose an adaptation approach by learning reliable information on shorter dependencies in an unlabeled target data to help parse longer distance words. The unlabeled data is parsed by a dependency parser trained on labeled source domain data. The experimental results indicate that our proposed approach outperforms the baseline system, and is better than current state-of-the-art adaptation techniques.", "phrases": ["reliable information", "dependency parsing adaptation", "target domain data", "chinese", "unlabeled attachment score"], "overall_score": 2.0771839815139286, "scores": [1.8559638604779285, 0.9898565856214194, 0.5657052198350849, 0.5533468502057403, 0.5456751892064532], "rank_score": 0.9021095410693253} -{"id": "clarke-lapata-2010-discourse", "title": "Discourse Constraints for Document Compression", "abstract": "Sentence compression holds promise for many applications ranging from summarization to subtitle generation. The task is typically performed on isolated sentences without taking the surrounding context into account, even though most applications would operate over entire documents. In this article we present a discourse-informed model which is capable of producing document compressions that are coherent and informative. Our model is inspired by theories of local coherence and formulated within the framework of integer linear programming. Experimental results show significant improvements over a state-of-the-art discourse agnostic approach.", "phrases": ["document compression", "summarization", "human evaluation"], "overall_score": 1.8756833729469176, "scores": [0.9227149264414756, 0.9551180967443738, 0.8282060772425528], "rank_score": 0.902013033476134} -{"id": "mohammad-2012-emotional", "title": "#Emotional Tweets", "abstract": "Detecting emotions in microblogs and social media posts has applications for industry, health, and security. However, there exists no microblog corpus with instances labeled for emotions for developing supervised systems. In this paper, we describe how we created such a corpus from Twitter posts using emotion-word hashtags. We conduct experiments to show that the self-labeled hashtag annotations are consistent and match with the annotations of trained judges. We also show how the Twitter emotion corpus can be used to improve emotion classification accuracy in a different domain. Finally, we extract a word-emotion association lexicon from this Twitter corpus, and show that it leads to significantly better results than the manually crafted WordNet Affect lexicon in an emotion classification task.", "phrases": ["emotion", "hashtag", "judge"], "overall_score": 1.9813365813954635, "scores": [1.2359238066261418, 0.9426399024775766, 0.5266717072015844], "rank_score": 0.9017451387684342} -{"id": "song-etal-2018-leveraging", "title": "Leveraging Context Information for Natural Question Generation", "abstract": "The task of natural question generation is to generate a corresponding question given the input passage (fact) and answer. It is useful for enlarging the training set of QA systems. Previous work has adopted sequence-to-sequence models that take a passage with an additional bit to indicate answer position as input. However, they do not explicitly model the information between answer and other context within the passage. We propose a model that matches the answer with the passage before generating the question. Experiments show that our model outperforms the existing state of the art using rich features.", "phrases": ["natural question generation", "well quality answer", "word position"], "overall_score": 2.7870739588848448, "scores": [1.0012583525689671, 0.8571877995746157, 0.8465383321589023], "rank_score": 0.9016614947674951} -{"id": "vilar-etal-2006-aer", "title": "AER: do we need to \u201cimprove\u201d our alignments?", "abstract": "Currently most statistical machine translation systems make use of alignments as a first step in the process of training the actual translation models. Several researchers have investigated how to improve the alignment quality, with the (intuitive) assumption that better alignments increase the translation quality. In this paper we will investigate this assumption and show that this is not always the case.", "phrases": ["translation quality", "aer", "well alignment"], "overall_score": 1.8747399994028824, "scores": [0.806682045382254, 1.305040589975899, 0.5929554647365636], "rank_score": 0.901559366698239} -{"id": "arabsorkhi-shamsfard-2006-unsupervised", "title": "Unsupervised Discovery of Persian Morphemes", "abstract": "This paper reports the present results of a research on unsupervised Persian morpheme discovery. In this paper we present a method for discovering the morphemes of Persian language through automatic analysis of corpora. We utilized a Minimum Description Length (MDL) based algorithm with some improvements and applied it to Persian corpus. Our improvements include enhancing the cost function using some heuristics, preventing the split of high frequency chunks, exploiting penalty for first and last letters and distinguishing pre-parts and post-parts. Our improved approach has raised the precision, recall and f-measure of discovery by respectively %32, %17 and %23.", "phrases": ["morpheme", "persian language", "automatic analysis"], "overall_score": 1.2496154294119153, "scores": [0.9221759991498927, 0.8985310920276031, 0.8835138833688582], "rank_score": 0.9014069915154513} -{"id": "kumar-byrne-2005-local", "title": "Local Phrase Reordering Models for Statistical Machine Translation", "abstract": "We describe stochastic models of local phrase movement that can be incorporated into a Statistical Machine Translation (SMT) system. These models provide properly formulated, non-deficient, probability distributions over reordered phrase sequences. They are implemented by Weighted Finite State Transducers. We describe EM-style parameter re-estimation procedures based on phrase alignment under the complete translation model incorporating reordering. Our experiments show that the reordering model yields substantial improvements in translation performance on Arabic-to-English and Chinese-to-English MT tasks. We also show that the procedure scales as the bitext size is increased.", "phrases": ["statistical machine translation", "state transducer", "orientation"], "overall_score": 2.5527360907950656, "scores": [0.9819007821437676, 0.8851005218048434, 0.8360102036215168], "rank_score": 0.9010038358567093} -{"id": "bethard-parker-2016-semantically", "title": "A Semantically Compositional Annotation Scheme for Time Normalization", "abstract": "We present a new annotation scheme for normalizing time expressions, such as \u201cthree days ago\u201d, to computer-readable forms, such as 2016-03-07. The annotation scheme addresses several weaknesses of the existing TimeML standard, allowing the representation of time expressions that align to more than one calendar unit (e.g., \u201cthe past three summers\u201d), that are defined relative to events (e.g., \u201cthree weeks postoperative\u201d), and that are unions or intersections of smaller time expressions (e.g., \u201cTuesdays and Thursdays\u201d). It achieves this by modeling time expression interpretation as the semantic composition of temporal operators like UNION, NEXT, and AFTER. We have applied the annotation scheme to 34 documents so far, producing 1104 annotations, and achieving inter-annotator agreement of 0.821.", "phrases": ["semantically compositional annotation", "time normalization", "scate"], "overall_score": 1.6139271961201138, "scores": [0.9225497705625569, 0.9432366435451429, 0.8364633417963834], "rank_score": 0.9007499186346944} -{"id": "zhang-etal-2023-survey", "title": "A Survey of Multi-task Learning in Natural Language Processing: Regarding Task Relatedness and Training Methods", "abstract": "Multi-task learning (MTL) has become increasingly popular in natural language processing (NLP) because it improves the performance of related tasks by exploiting their commonalities and differences. Nevertheless, it is still not understood very well how multi-task learning can be implemented based on the relatedness of training tasks. In this survey, we review recent advances of multi-task learning methods in NLP, with the aim of summarizing them into two general multi-task training methods based on their task relatedness: (i) joint training and (ii) multi-step training. We present examples in various NLP downstream applications, summarize the task relationships and discuss future directions of this promising topic.", "phrases": ["survey", "multi-task learning", "task relatedness"], "overall_score": 0.9895689597964719, "scores": [0.9563870805575383, 0.9081747639891626, 0.8376716094304921], "rank_score": 0.9007444846590644} -{"id": "potash-etal-2017-heres", "title": "Here's My Point: Joint Pointer Architecture for Argument Mining", "abstract": "In order to determine argument structure in text, one must understand how individual components of the overall argument are linked. This work presents the first neural network-based approach to link extraction in argument mining. Specifically, we propose a novel architecture that applies Pointer Network sequence-to-sequence attention modeling to structural prediction in discourse parsing tasks. We then develop a joint model that extends this architecture to simultaneously address the link extraction task and the classification of argument components. The proposed joint model achieves state-of-the-art results on two separate evaluation corpora, showing far superior performance than the previously proposed corpus-specific and heavily feature-engineered models. Furthermore, our results demonstrate that jointly optimizing for both tasks is crucial for high performance.", "phrases": ["joint pointer architecture", "argument mining", "link", "joint model", "subtask"], "overall_score": 2.6521097052838027, "scores": [1.9835502831680372, 0.9240166522338533, 0.5364036542221138, 0.533323471258536, 0.5262968167837463], "rank_score": 0.9007181755332573} -{"id": "yin-etal-2016-simple", "title": "Simple Question Answering by Attentive Convolutional Neural Network", "abstract": "This work focuses on answering single-relation factoid questions over Freebase. Each question can acquire the answer from a single fact of form (subject, predicate, object) in Freebase. This task, simple question answering (SimpleQA), can be addressed via a two-step pipeline: entity linking and fact selection. In fact selection, we match the subject entity in a fact candidate with the entity mention in the question by a character-level convolutional neural network (char-CNN), and match the predicate in that fact with the question by a word-level CNN (word-CNN). This work makes two main contributions. (i) A simple and effective entity linker over Freebase is proposed. Our entity linker outperforms the state-of-the-art entity linker over SimpleQA task. (ii) A novel attentive maxpooling is stacked over word-CNN, so that the predicate representation can be matched with the predicate-focused question representation more effectively. Experiments show that our system sets new state-of-the-art in this task.", "phrases": ["convolutional neural network", "word-level cnn", "simple question"], "overall_score": 2.65125506725523, "scores": [0.9495538611760294, 0.8922756897777968, 0.8594542107552962], "rank_score": 0.9004279205697076} -{"id": "li-2010-query", "title": "Query Understanding in Web Search - by Large Scale Log Data Mining and Statistical Learning", "abstract": "Query understanding is an important component of web search, like document understanding, query document matching, ranking, and user understanding. The goal of query understanding is to predict the user\u2019s search intent from the given query. Needless to say, search log mining and statistical learning are fundamental technologies to address the task of query understanding. In this talk, I will first introduce a large-scale search log mining platform which we have developed at MSRA. I will then explain our approach to query understanding, as well as document understanding, query document matching, and user understanding. After that, I will describe in details about our methods for query understanding based on statistical learning. They include query refinement using CRF, named entity recognition in query using topic model, context aware query topic prediction using HMM. This is joint work with Gu Xu, Daxin Jiang and other collaborators.", "phrases": ["web search", "statistical learning", "query understanding"], "overall_score": 1.2482080478067523, "scores": [0.9398622588097768, 0.9074398977932085, 0.8538731842497584], "rank_score": 0.9003917802842478} -{"id": "arora-etal-2018-linear", "title": "Linear Algebraic Structure of Word Senses, with Applications to Polysemy", "abstract": "Word embeddings are ubiquitous in NLP and information retrieval, but it is unclear what they represent when the word is polysemous. Here it is shown that multiple word senses reside in linear superposition within the word embedding and simple sparse coding can recover vectors that approximately capture the senses. The success of our approach, which applies to several embedding methods, is mathematically explained using a variant of the random walk on discourses model (Arora et al., 2016). A novel aspect of our technique is that each extracted word sense is accompanied by one of about 2000 \u201cdiscourse atoms\u201d that gives a succinct description of which other words co-occur with that word sense. Discourse atoms can be of independent interest, and make the method potentially more useful. Empirical tests are used to verify and support the theory.", "phrases": ["polysemy", "linear algebraic structure", "different sense"], "overall_score": 2.7412423632941807, "scores": [0.908263384216875, 0.8858285228635008, 0.907063120711743], "rank_score": 0.9003850092640396} -{"id": "shin-etal-2021-constrained", "title": "Constrained Language Models Yield Few-Shot Semantic Parsers", "abstract": "We explore the use of large pretrained language models as few-shot semantic parsers. The goal in semantic parsing is to generate a structured meaning representation given a natural language input. However, language models are trained to generate natural language. To bridge the gap, we use language models to paraphrase inputs into a controlled sublanguage resembling English that can be automatically mapped to a target meaning representation. Our results demonstrate that with only a small amount of data and very little code to convert into English-like representations, our blueprint for rapidly bootstrapping semantic parsers leads to surprisingly effective performance on multiple community tasks, greatly exceeding baseline methods also trained on the same limited data.", "phrases": ["semantic parser", "gap", "canonical utterance", "gpt-3"], "overall_score": 2.0731536624557068, "scores": [1.9795935749197509, 0.5489500887822496, 0.5419554532708526, 0.5309376659952673], "rank_score": 0.90035919574203} -{"id": "farra-etal-2015-scoring", "title": "Scoring Persuasive Essays Using Opinions and their Targets", "abstract": "In this work, we investigate whether the analysis of opinion expressions can help in scoring persuasive essays. For this, we develop systems that predict holistic essay scores based on features extracted from opinion expressions, topical elements, and their combinations. Experiments on test taker essays show that essay scores produced using opinion features are indeed correlated with human scores. Moreover, we find that combining opinions with their targets (what the opinions are about) produces the best result when compared to using only opinions or only topics.", "phrases": ["persuasive essay", "opinion expression", "essay score"], "overall_score": 2.158143500491905, "scores": [1.1061607396014617, 0.9593600463062033, 0.6345264492328351], "rank_score": 0.9000157450468333} -{"id": "zhang-etal-2015-bidirectional", "title": "Bidirectional Long Short-Term Memory Networks for Relation Classification", "abstract": "Relation classification is an important semantic processing, which has achieved great attention in recent years. The main challenge is the fact that important information can appear at any position in the sentence. Therefore, we propose bidirectional long short-term memory networks (BLSTM) to model the sentence with complete, sequential information about all words. At the same time, we also use features derived from the lexical resources such as WordNet or NLP systems such as dependency parser and named entity recognizers (NER). The experimental results on SemEval-2010 show that BLSTMbased method only with word embeddings as input features is sufficient to achieve state-of-the-art performance, and importing more features could further improve the performance.", "phrases": ["short-term memory network", "relation classification", "sequential information", "powerful encoder"], "overall_score": 2.1577684832494333, "scores": [1.8433510344065756, 0.6567448417362796, 0.5595959603049829, 0.5397455663896816], "rank_score": 0.89985935070938} -{"id": "han-etal-2019-joint", "title": "Joint Event and Temporal Relation Extraction with Shared Representations and Structured Prediction", "abstract": "We propose a joint event and temporal relation extraction model with shared representation learning and structured prediction. The proposed method has two advantages over existing work. First, it improves event representation by allowing the event and relation modules to share the same contextualized embeddings and neural representation learner. Second, it avoids error propagation in the conventional pipeline systems by leveraging structured inference and learning methods to assign both the event labels and the temporal relation labels jointly. Experiments show that the proposed method can improve both event extraction and temporal relation extraction over state-of-the-art systems, with the end-to-end F1 improved by 10% and 6.8% on two benchmark datasets respectively.", "phrases": ["temporal relation extraction", "structured prediction", "joint event"], "overall_score": 1.9769748643818454, "scores": [0.9522293045453311, 0.8851056172996785, 0.8619451855784319], "rank_score": 0.8997600358078138} -{"id": "li-eisner-2009-first", "title": "First- and Second-Order Expectation Semirings with Applications to Minimum-Risk Training on Translation Forests", "abstract": "Many statistical translation models can be regarded as weighted logical deduction. Under this paradigm, we use weights from the expectation semiring (Eisner, 2002), to compute first-order statistics (e.g., the expected hypothesis length or feature counts) over packed forests of translations (lattices or hypergraphs). We then introduce a novel second-order expectation semiring, which computes second-order statistics (e.g., the variance of the hypothesis length or the gradient of entropy). This second-order semiring is essential for many interesting training paradigms such as minimum risk, deterministic annealing, active learning, and semi-supervised learning, where gradient descent optimization requires computing the gradient of entropy or risk. We use these semirings in an open-source machine translation toolkit, Joshua, enabling minimum-risk training for a benefit of up to 1.0 bleu point.", "phrases": ["expectation", "semiring", "minimum-risk training", "translation forest", "risk"], "overall_score": 2.435786690890687, "scores": [0.8690760152999241, 0.7966102635444524, 1.3989479892969392, 0.9066993660063407, 0.5259734872256681], "rank_score": 0.899461424274665} -{"id": "long-etal-2017-cognition", "title": "A Cognition Based Attention Model for Sentiment Analysis", "abstract": "Attention models are proposed in sentiment analysis because some words are more important than others. However,most existing methods either use local context based text information or user preference information. In this work, we propose a novel attention model trained by cognition grounded eye-tracking data. A reading prediction model is first built using eye-tracking data as dependent data and other features in the context as independent data. The predicted reading time is then used to build a cognition based attention (CBA) layer for neural sentiment analysis. As a comprehensive model, We can capture attentions of words in sentences as well as sentences in documents. Different attention mechanisms can also be incorporated to capture other aspects of attentions. Evaluations show the CBA based method outperforms the state-of-the-art local context based attention methods significantly. This brings insight to how cognition grounded data can be brought into NLP tasks.", "phrases": ["cognition", "attention model", "sentiment analysis"], "overall_score": 1.6114877637645075, "scores": [0.9106320851564733, 0.918662868646772, 0.8688703827395877], "rank_score": 0.8993884455142777} -{"id": "rudinger-etal-2017-social", "title": "Social Bias in Elicited Natural Language Inferences", "abstract": "We analyze the Stanford Natural Language Inference (SNLI) corpus in an investigation of bias and stereotyping in NLP data. The SNLI human-elicitation protocol makes it prone to amplifying bias and stereotypical associations, which we demonstrate statistically (using pointwise mutual information) and with qualitative examples.", "phrases": ["social bias", "nli dataset", "hypothesis"], "overall_score": 2.4935351362363685, "scores": [0.9388230472024269, 0.8816508437426832, 0.877584190552836], "rank_score": 0.8993526938326487} -{"id": "stoia-etal-2008-scare", "title": "SCARE: a Situated Corpus with Annotated Referring Expressions", "abstract": "Even though a wealth of speech data is available for the dialog systems research community, the particular field of situated language has yet to find an appropriate free resource. The corpus required to answer research questions related to situated language should connect world information to the human language. In this paper we report on the release of a corpus of English spontaneous instruction giving situated dialogs. The corpus was collected using the Quake environment, a first-person virtual reality game, and consists of pairs of participants completing a direction giver- direction follower scenario. The corpus contains the collected audio and video, as well as word-aligned transcriptions and the positional/gaze information of the player. Referring expressions in the corpus are annotated with the IDs of their virtual world referents.", "phrases": ["instruction", "environment", "participant", "scare"], "overall_score": 2.0707642478134227, "scores": [1.3378589642616452, 0.853886635584796, 0.8517105246271127, 0.5538298201180754], "rank_score": 0.8993214861479073} -{"id": "li-etal-2018-towards", "title": "Towards Robust and Privacy-preserving Text Representations", "abstract": "Written text often provides sufficient clues to identify the author, their gender, age, and other important attributes. Consequently, the authorship of training and evaluation corpora can have unforeseen impacts, including differing model performance for different user groups, as well as privacy implications. In this paper, we propose an approach to explicitly obscure important author characteristics at training time, such that representations learned are invariant to these attributes. Evaluating on two tasks, we show that this leads to increased privacy in the learned representations, as well as more robust models to varying evaluation conditions, including out-of-domain corpora.", "phrases": ["robustness", "attribute", "privacy", "part-of-speech tagging", "high prediction accuracy"], "overall_score": 2.647080192671244, "scores": [1.6555141020632922, 1.1539495588998296, 0.5899030453128564, 0.5550763700064391, 0.5406071037362961], "rank_score": 0.8990100360037425} -{"id": "nouri-yangarber-2016-alignment", "title": "From alignment of etymological data to phylogenetic inference via population genetics", "abstract": "This paper presents a method for linking models for aligning linguistic etymological data with models for phylogenetic inference from population genetics. We begin with a large database of genetically related words\u2014sets of cognates\u2014from languages in a language family. We process the cognate sets to obtain a complete alignment of the data. We use the alignments as input to a model developed for phylogenetic reconstruction in population genetics. This is achieved via a natural novel projection of the linguistic data onto genetic primitives. As a result, we induce phylogenies based on aligned linguistic data. We place the method in the context of those reported in the literature, and illustrate its operation on data from the Uralic language family, which results in family trees that are very close to the \u201ctrue\u201d (expected) phylogenies.", "phrases": ["etymological data", "phylogenetic inference", "population genetic"], "overall_score": 0.9874618918399248, "scores": [0.94159834766232, 0.92458877946619, 0.8302925191270306], "rank_score": 0.8988265487518469} -{"id": "luong-etal-2015-pronoun", "title": "Pronoun Translation and Prediction with or without Coreference Links", "abstract": "The Idiap NLP Group has participated in both DiscoMT 2015 sub-tasks: pronoun-focused translation and pronoun prediction. The system for the first sub-task combines two knowledge sources: gram matical constraints from the hypothesized coreference links, and candidate translations from an SMT decoder. The system for the second sub-task avoids hypothesizing a coreference link, and uses instead a large set of source-side and target-side features from the noun phrases surrounding the pronoun to train a pronoun predictor.", "phrases": ["coreference link", "smt pronoun translation", "moses decoder"], "overall_score": 1.7483243429342183, "scores": [0.8253336906229273, 0.9988397752738541, 0.8712094793789534], "rank_score": 0.8984609817585784} -{"id": "wing-baldridge-2011-simple", "title": "Simple supervised document geolocation with geodesic grids", "abstract": "We investigate automatic geolocation (i.e. identification of the location, expressed as latitude/longitude coordinates) of documents. Geolocation can be an effective means of summarizing large document collections and it is an important component of geographic information retrieval. We describe several simple supervised methods for document geolocation using only the document's raw text as evidence. All of our methods predict locations in the context of geodesic grids of varying degrees of resolution. We evaluate the methods on geotagged Wikipedia articles and Twitter feeds. For Wikipedia, our best method obtains a median prediction error of just 11.8 kilometers. Twitter geolocation is more challenging: we obtain a median error of 479 km, an improvement on previous results for the dataset.", "phrases": ["document geolocation", "geodesic grid", "location", "wikipedia", "word distribution"], "overall_score": 1.8675996335776675, "scores": [1.8927688955307524, 0.9511788139999088, 0.5763437373249357, 0.5399109908147449, 0.5304254452105684], "rank_score": 0.8981255765761821} -{"id": "saito-etal-2006-using", "title": "Using Phrasal Patterns to Identify Discourse Relations", "abstract": "This paper describes a system which identifies discourse relations between two successive sentences in Japanese. On top of the lexical information previously proposed, we used phrasal pattern information. Adding phrasal information improves the system's accuracy 12%, from 53% to 65%.", "phrases": ["phrasal pattern", "discourse relation", "cross-argument word pair"], "overall_score": 1.8667932620130525, "scores": [0.9308025447613263, 1.1966831829429805, 0.5657276537668555], "rank_score": 0.8977377938237208} -{"id": "goyal-etal-2010-sketch", "title": "Sketch Techniques for Scaling Distributional Similarity to the Web", "abstract": "In this paper, we propose a memory, space, and time efficient framework to scale distributional similarity to the web. We exploit sketch techniques, especially the Count-Min sketch, which approximates the frequency of an item in the corpus without explicitly storing the item itself. These methods use hashing to deal with massive amounts of the streaming text. We store all item counts computed from 90 GB of web data in just 2 billion counters (8 GB main memory) of CM sketch. Our method returns semantic similarity between word pairs in O(K) time and can compute similarity between any word pairs that are stored in the sketch. In our experiments, we show that our framework is as effective as using the exact counts.", "phrases": ["distributional similarity", "web", "sketch technique"], "overall_score": 0.9862431534759581, "scores": [0.9331328229448534, 0.9047292343245772, 0.8552895585884774], "rank_score": 0.8977172052859693} -{"id": "gao-etal-2018-neural", "title": "Neural Metaphor Detection in Context", "abstract": "We present end-to-end neural models for detecting metaphorical word use in context. We show that relatively standard BiLSTM models which operate on complete sentences work well in this setting, in comparison to previous work that used more restricted forms of linguistic context. These models establish a new state-of-the-art on existing verb metaphor detection benchmarks, and show strong performance on jointly predicting the metaphoricity of all words in a running text.", "phrases": ["metaphor detection", "elmo embedding", "top", "concreteness score"], "overall_score": 2.924812066017029, "scores": [1.9623964069147013, 0.5578198787342925, 0.5372625061072824, 0.533344293913439], "rank_score": 0.8977057714174288} -{"id": "chang-etal-2010-discriminative", "title": "Discriminative Learning over Constrained Latent Representations", "abstract": "This paper proposes a general learning framework for a class of problems that require learning over latent intermediate representations. Many natural language processing (NLP) decision problems are defined over an expressive intermediate representation that is not explicit in the input, leaving the algorithm with both the task of recovering a good intermediate representation and learning to classify correctly. Most current systems separate the learning problem into two stages by solving the first step of recovering the intermediate representation heuristically and using it to learn the final classifier. This paper develops a novel joint learning algorithm for both tasks, that uses the final prediction to guide the selection of the best intermediate representation. We evaluate our algorithm on three different NLP tasks -- transliteration, paraphrase identification and textual entailment -- and show that our joint method significantly improves performance.", "phrases": ["constrained latent representations", "paraphrase identification", "lclr"], "overall_score": 2.3021801051596067, "scores": [1.5726775893261586, 0.5609020034930697, 0.5590818803546677], "rank_score": 0.8975538243912987} -{"id": "sharma-etal-2015-adjective", "title": "Adjective Intensity and Sentiment Analysis", "abstract": "For fine-grained sentiment analysis, we need to go beyond zero-one polarity and find a way to compare adjectives that share a common semantic property. In this paper, we present a semi-supervised approach to assign intensity levels to adjectives, viz. high, medium and low, where adjectives are compared when they belong to the same semantic category. For example, in the semantic category of EXPERTISE, expert, experienced and familiar are respectively of level high, medium and low. We obtain an overall accuracy of 77% for intensity assignment. We show the significance of considering intensity information of adjectives in predicting star-rating of reviews. Our intensity based prediction system results in an accuracy of 59% for a 5-star rated movie review corpus.", "phrases": ["intensity", "sentiment analysis", "semantic property", "adjective"], "overall_score": 1.8662013991422959, "scores": [0.96480693177303, 1.0941330122482862, 0.9697834030112498, 0.5610893247576144], "rank_score": 0.8974531679475451} -{"id": "zhang-etal-2020-multi", "title": "Multi-modal Multi-label Emotion Detection with Modality and Label Dependence", "abstract": "As an important research issue in the natural language processing community, multi-label emotion detection has been drawing more and more attention in the last few years. However, almost all existing studies focus on one modality (e.g., textual modality). In this paper, we focus on multi-label emotion detection in a multi-modal scenario. In this scenario, we need to consider both the dependence among different labels (label dependence) and the dependence between each predicting label and different modalities (modality dependence). Particularly, we propose a multi-modal sequence-to-set approach to effectively model both kinds of dependence in multi-modal multi-label emotion detection. The detailed evaluation demonstrates the effectiveness of our approach.", "phrases": ["multi-label emotion detection", "modality", "label dependence"], "overall_score": 0.9854709060913345, "scores": [1.0178345492599272, 0.850117669704741, 0.8230906073068067], "rank_score": 0.897014275423825} -{"id": "he-etal-2018-effective", "title": "Effective Attention Modeling for Aspect-Level Sentiment Classification", "abstract": "Aspect-level sentiment classification aims to determine the sentiment polarity of a review sentence towards an opinion target. A sentence could contain multiple sentiment-target pairs; thus the main challenge of this task is to separate different opinion contexts for different targets. To this end, attention mechanism has played an important role in previous state-of-the-art neural models. The mechanism is able to capture the importance of each context word towards a target by modeling their semantic associations. We build upon this line of research and propose two novel approaches for improving the effectiveness of attention. First, we propose a method for target representation that better captures the semantic meaning of the opinion target. Second, we introduce an attention model that incorporates syntactic information into the attention mechanism. We experiment on attention-based LSTM (Long Short-Term Memory) models using the datasets from SemEval 2014, 2015, and 2016. The experimental results show that the conventional attention-based LSTM can be substantially improved by incorporating the two approaches.", "phrases": ["aspect-level sentiment classification", "attention weight", "asc"], "overall_score": 2.6867113646185032, "scores": [0.9752548228150182, 0.8725170318465455, 0.842767004571461], "rank_score": 0.8968462864110082} -{"id": "pei-li-2018-s2spmn", "title": "S2SPMN: A Simple and Effective Framework for Response Generation with Relevant Information", "abstract": "How to generate relevant and informative responses is one of the core topics in response generation area. Following the task formulation of machine translation, previous works mainly consider response generation task as a mapping from a source sentence to a target sentence. To realize this mapping, existing works tend to design intuitive but complex models. However, the relevant information existed in large dialogue corpus is mainly overlooked. In this paper, we propose Sequence to Sequence with Prototype Memory Network (S2SPMN) to exploit the relevant information provided by the large dialogue corpus to enhance response generation. Specifically, we devise two simple approaches in S2SPMN to select the relevant information (named prototypes) from the dialogue corpus. These prototypes are then saved into prototype memory network (PMN). Furthermore, a hierarchical attention mechanism is devised to extract the semantic information from the PMN to assist the response generation process. Empirical studies reveal the advantage of our model over several classical and strong baselines.", "phrases": ["response generation", "relevant information", "s2spmn"], "overall_score": 0.9852690614857497, "scores": [0.9438900791454016, 0.8864343009934933, 0.8601672654995209], "rank_score": 0.8968305485461386} -{"id": "girju-etal-2006-automatic", "title": "Automatic Discovery of Part-Whole Relations", "abstract": "An important problem in knowledge discovery from text is the automatic extraction of semantic relations. This paper presents a supervised, semantically intensive, domain independent approach for the automatic detection of part-whole relations in text. First an algorithm is described that identifies lexico-syntactic patterns that encode part-whole relations. A difficulty is that these patterns also encode other semantic relations, and a learning method is necessary to discriminate whether or not a pattern contains a part-whole relation. A large set of training examples have been annotated and fed into a specialized learning system that learns classification rules. The rules are learned through an iterative semantic specialization (ISS) method applied to noun phrase constituents. Classification rules have been generated this way for different patterns such as genitives, noun compounds, and noun phrases containing prepositional phrases to extract part-whole relations from them. The applicability of these rules has been tested on a test corpus obtaining an overall average precision of 80.95% and recall of 75.91%. The results demonstrate the importance of word sense disambiguation for this task. They also demonstrate that different lexico-syntactic patterns encode different semantic information and should be treated separately in the sense that different clarification rules apply to different patterns.", "phrases": ["discovery", "part-whole relation", "extraction", "noun compound", "semantic information"], "overall_score": 2.5920395551109987, "scores": [0.8888005230456031, 1.5541812244526088, 0.9130928706276491, 0.582362517461709, 0.5454835712035953], "rank_score": 0.8967841413582331} -{"id": "xie-xing-2018-neural", "title": "A Neural Architecture for Automated ICD Coding", "abstract": "The International Classification of Diseases (ICD) provides a hierarchy of diagnostic codes for classifying diseases. Medical coding \u2013 which assigns a subset of ICD codes to a patient visit \u2013 is a mandatory process that is crucial for patient care and billing. Manual coding is time-consuming, expensive, and error prone. In this paper, we build a neural architecture for automated coding. It takes the diagnosis descriptions (DDs) of a patient as inputs and selects the most relevant ICD codes. This architecture contains four major ingredients: (1) tree-of-sequences LSTM encoding of code descriptions (CDs), (2) adversarial learning for reconciling the different writing styles of DDs and CDs, (3) isotonic constraints for incorporating the importance order among the assigned codes, and (4) attentional matching for performing many-to-one and one-to-many mappings from DDs to CDs. We demonstrate the effectiveness of the proposed methods on a clinical datasets with 59K patient visits.", "phrases": ["neural architecture", "icd", "code description", "tree lstm"], "overall_score": 2.427684872544328, "scores": [0.9103367257122232, 1.2712346749052699, 0.8553402137937107, 0.5489670691615456], "rank_score": 0.8964696708931873} -{"id": "mcdonald-etal-2010-distributed", "title": "Distributed Training Strategies for the Structured Perceptron", "abstract": "Perceptron training is widely applied in the natural language processing community for learning complex structured models. Like all structured prediction learning frameworks, the structured perceptron can be costly to train as training complexity is proportional to inference, which is frequently non-linear in example sequence length. In this paper we investigate distributed training strategies for the structured perceptron as a means to reduce training times when computing clusters are available. We look at two strategies and provide convergence bounds for a particular mode of distributed structured perceptron training based on iterative parameter mixing (or averaging). We present experiments on two structured prediction problems -- named-entity recognition and dependency parsing -- to highlight the efficiency of this method.", "phrases": ["training strategy", "structured perceptron", "particular mode", "iterative parameter mixing"], "overall_score": 1.7441443592334838, "scores": [1.940884704708535, 0.5827518267417716, 0.53341353750395, 0.5282015113002962], "rank_score": 0.8963128950636383} -{"id": "shmueli-etal-2020-reactive", "title": "Reactive Supervision: A New Method for Collecting Sarcasm Data", "abstract": "Sarcasm detection is an important task in affective computing, requiring large amounts of labeled data. We introduce reactive supervision, a novel data collection method that utilizes the dynamics of online conversations to overcome the limitations of existing data collection techniques. We use the new method to create and release a first-of-its-kind large dataset of tweets with sarcasm perspective labels and new contextual features. The dataset is expected to advance sarcasm detection research. Our method can be adapted to other affective computing domains, thus opening up new research opportunities.", "phrases": ["reactive supervision", "sarcasm detection task", "user history", "spirs dataset"], "overall_score": 1.4419571051897933, "scores": [1.898120378797053, 0.6203439076155239, 0.534691899781749, 0.5305971081691443], "rank_score": 0.8959383235908676} -{"id": "arisoy-etal-2012-deep", "title": "Deep Neural Network Language Models", "abstract": "In recent years, neural network language models (NNLMs) have shown success in both peplexity and word error rate (WER) compared to conventional n-gram language models. Most NNLMs are trained with one hidden layer. Deep neural networks (DNNs) with more hidden layers have been shown to capture higher-level discriminative information about input features, and thus produce better networks. Motivated by the success of DNNs in acoustic modeling, we explore deep neural network language models (DNN LMs) in this paper. Results on a Wall Street Journal (WSJ) task demonstrate that DNN LMs offer improvements over a single hidden layer NNLM. Furthermore, our preliminary results are competitive with a model M language model, considered to be one of the current state-of-the-art techniques for language modeling.", "phrases": ["language modeling", "word error rate", "speech recognition"], "overall_score": 1.9682432895508648, "scores": [1.1897242020966257, 0.93379997557115, 0.5638341968735865], "rank_score": 0.8957861248471207} -{"id": "yahya-etal-2014-renoun", "title": "ReNoun: Fact Extraction for Nominal Attributes", "abstract": "Search engines are increasingly relying on large knowledge bases of facts to provide direct answers to users\u2019 queries. However, the construction of these knowledge bases is largely manual and does not scale to the long and heavy tail of facts. Open information extraction tries to address this challenge, but typically assumes that facts are expressed with verb phrases, and therefore has had difficulty extracting facts for noun-based relations. We describe ReNoun, an open information extraction system that complements previous efforts by focusing on nominal attributes and on the long tail. ReNoun\u2019s approach is based on leveraging a large ontology of noun attributes mined from a text corpus and from user queries. ReNoun creates a seed set of training data by using specialized patterns and requiring that the facts mention an attribute in the ontology. ReNoun then generalizes from this seed set to produce a much larger set of extractions that are then scored. We describe experiments that show that we extract facts with high precision and for attributes that cannot be extracted with verb-based techniques.", "phrases": ["attribute", "information extraction", "renoun", "knowledge basis"], "overall_score": 1.9680529821290664, "scores": [1.856038117514099, 0.5925898701468072, 0.5820404686621665, 0.5521295925045319], "rank_score": 0.8956995122069011} -{"id": "hajishirzi-etal-2013-joint", "title": "Joint Coreference Resolution and Named-Entity Linking with Multi-Pass Sieves", "abstract": "Many errors in coreference resolution come from semantic mismatches due to inadequate world knowledge. Errors in named-entity linking (NEL), on the other hand, are often caused by superficial modeling of entity context. This paper demonstrates that these two tasks are complementary. We introduce NECO, a new model for named entity linking and coreference resolution, which solves both problems jointly, reducing the errors made on each. NECO extends the Stanford deterministic coreference system by automatically linking mentions to Wikipedia and introducing new NEL-informed mention-merging sieves. Linking improves mention-detection and enables new semantic attributes to be incorporated from Freebase, while coreference provides better context modeling by propagating named-entity links within mention clusters. Experiments show consistent improvements across a number of datasets and experimental conditions, including over 11% reduction in MUC coreference error and nearly 21% reduction in F1 NEL error on ACE 2004 newswire data.", "phrases": ["coreference resolution", "named-entity", "linking", "wikipedia", "joint model"], "overall_score": 2.0621652467336307, "scores": [1.9176192025841035, 0.9471471867048049, 0.5500425758132056, 0.5367742065868472, 0.5263517654564072], "rank_score": 0.8955869874290736} -{"id": "wang-etal-2015-chinese", "title": "Chinese Semantic Role Labeling with Bidirectional Recurrent Neural Networks", "abstract": "Traditional approaches to Chinese Semantic Role Labeling (SRL) almost heavily rely on feature engineering. Even worse, the long-range dependencies in a sentence can hardly be modeled by these methods. In this paper, we introduce bidirectional recurrent neural network (RNN) with long-short-term memory (LSTM) to capture bidirectional and long-range dependencies in a sentence with minimal feature engineering. Experimental results on Chinese Proposition Bank (CPB) show a significant improvement over the state-ofthe-art methods. Moreover, our model makes it convenient to introduce heterogeneous resource, which makes a further improvement on our experimental performance.", "phrases": ["bidirectional", "recurrent neural network", "chinese srl"], "overall_score": 1.9663275406981202, "scores": [1.2340427195049943, 0.9106652417899568, 0.5400347286152105], "rank_score": 0.894914229970054} -{"id": "khodak-etal-2018-la", "title": "A La Carte Embedding: Cheap but Effective Induction of Semantic Feature Vectors", "abstract": "Motivations like domain adaptation, transfer learning, and feature learning have fueled interest in inducing embeddings for rare or unseen words, n-grams, synsets, and other textual features. This paper introduces a la carte embedding, a simple and general alternative to the usual word2vec-based approaches for building such representations that is based upon recent theoretical results for GloVe-like embeddings. Our method relies mainly on a linear transformation that is efficiently learnable using pretrained word vectors and linear regression. This transform is applicable on the fly in the future when a new text feature or rare word is encountered, even if only a single usage example is available. We introduce a new dataset showing how the a la carte method requires fewer examples of words in context to learn high-quality embeddings and we obtain state-of-the-art results on a nonce task and some unsupervised document classification tasks.", "phrases": ["transformation", "word vector", "irony"], "overall_score": 2.361367546173418, "scores": [1.2184880577691295, 0.9223074877994782, 0.5435351656376654], "rank_score": 0.8947769037354244} -{"id": "mao-etal-2021-banditmtl", "title": "BanditMTL: Bandit-based Multi-task Learning for Text Classification", "abstract": "Task variance regularization, which can be used to improve the generalization of Multi-task Learning (MTL) models, remains unexplored in multi-task text classification. Accordingly, to fill this gap, this paper investigates how the task might be effectively regularized, and consequently proposes a multi-task learning method based on adversarial multi-armed bandit. The proposed method, named BanditMTL, regularizes the task variance by means of a mirror gradient ascent-descent algorithm. Adopting BanditMTL in the multi-task text classification context is found to achieve state-of-the-art performance. The results of extensive experiments back up our theoretical analysis and validate the superiority of our proposals.", "phrases": ["multi-task learning", "text classification", "banditmtl"], "overall_score": 0.9825472230636427, "scores": [0.9441406946704849, 0.936816015947395, 0.8021023627195107], "rank_score": 0.8943530244457968} -{"id": "feng-etal-2004-accessor", "title": "Accessor Variety Criteria for Chinese Word Extraction", "abstract": "We are interested in the problem of word extraction from Chinese text collections. We define a word to be a meaningful string composed of several Chinese characters. For example, percent, and, more and more, are not recognized as traditional Chinese words from the viewpoint of some people. However, in our work, they are words because they are very widely used and have specific meanings. We start with the viewpoint that a word is a distinguished linguistic entity that can be used in many different language environments. We consider the characters that are directly before a string (predecessors) and the characters that are directly after a string (successors) as important factors for determining the independence of the string. We call such characters accessors of the string, consider the number of distinct predecessors and successors of a string in a large corpus (TREC 5 and TREC 6 documents), and use them as the measurement of the context independency of a string from the rest of the sentences in the document. Our experiments confirm our hypothesis and show that this simple rule gives quite good results for Chinese word extraction and is comparable to, and for long words outperforms, other iterative methods.", "phrases": ["chinese word extraction", "string", "factor", "accessor variety criteria"], "overall_score": 2.421735637653234, "scores": [0.9875417385125079, 0.8142774805912709, 1.2479628303958343, 0.527309153119242], "rank_score": 0.8942728006547137} -{"id": "brockett-dolan-2005-support", "title": "Support Vector Machines for Paraphrase Identification and Corpus Construction", "abstract": "The lack of readily-available large corpora of aligned monolingual sentence pairs is a major obstacle to the development of Statistical Machine Translation-based paraphrase models. In this paper, we describe the use of annotated datasets and Support Vector Machines to induce larger monolingual paraphrase corpora from a comparable corpus of news clusters found on the World Wide Web. Features include: morphological variants; WordNet synonyms and hypernyms; loglikelihood-based word pairings dynamically obtained from baseline sentence alignments; and formal string features such as word-based edit distance. Use of this technique dramatically reduces the Alignment Error Rate of the extracted corpora over heuristic methods based on position of the sentences in the text.", "phrases": ["paraphrase corpora", "world wide web", "edit distance", "support vector machines"], "overall_score": 1.439234216472998, "scores": [1.8624525110194798, 0.6455684056614328, 0.5465280917616495, 0.5224369823902562], "rank_score": 0.8942464977082045} -{"id": "muller-etal-2021-unseen", "title": "When Being Unseen from mBERT is just the Beginning: Handling New Languages With Multilingual Language Models", "abstract": "Transfer learning based on pretraining language models on a large amount of raw data has become a new norm to reach state-of-the-art performance in NLP. Still, it remains unclear how this approach should be applied for unseen languages that are not covered by any available large-scale multilingual language model and for which only a small amount of raw data is generally available. In this work, by comparing multilingual and monolingual models, we show that such models behave in multiple ways on unseen languages. Some languages greatly benefit from transfer learning and behave similarly to closely related high resource languages whereas others apparently do not. Focusing on the latter, we show that this failure to transfer is largely related to the impact of the script used to write such languages. We show that transliterating those languages significantly improves the potential of large-scale multilingual language models on downstream tasks. This result provides a promising direction towards making these massively multilingual models useful for a new set of unseen languages.", "phrases": ["unseen language", "dependency parsing", "maltese"], "overall_score": 2.3598856776482626, "scores": [1.5917609117065905, 0.5585292537039069, 0.5323560027865814], "rank_score": 0.8942153893990262} -{"id": "abdelali-etal-2016-farasa", "title": "Farasa: A Fast and Furious Segmenter for Arabic", "abstract": "In this paper, we present Farasa, a fast and accurate Arabic segmenter. Our approach is based on SVM-rank using linear kernels. We measure the performance of the seg-menter in terms of accuracy and ef\ufb01ciency, in two NLP tasks, namely Machine Translation (MT) and Information Retrieval (IR). Farasa outperforms or is at par with the state-of-the-art Arabic segmenters (Stanford and MADAMIRA), while being more than one order of magnitude faster.", "phrases": ["segmenter", "arabic", "disambiguation"], "overall_score": 2.4788590615669976, "scores": [0.9355435410273811, 0.8966335431791514, 0.8500011721825003], "rank_score": 0.8940594187963443} -{"id": "roark-etal-2009-deriving", "title": "Deriving lexical and syntactic expectation-based measures for psycholinguistic modeling via incremental top-down parsing", "abstract": "A number of recent publications have made use of the incremental output of stochastic parsers to derive measures of high utility for psycholinguistic modeling, following the work of Hale (2001; 2003; 2006). In this paper, we present novel methods for calculating separate lexical and syntactic surprisal measures from a single incremental parser using a lexicalized PCFG. We also present an approximation to entropy measures that would otherwise be intractable to calculate for a grammar of that size. Empirical results demonstrate the utility of our methods in predicting human reading times.", "phrases": ["psycholinguistic modeling", "surprisal", "entropy", "processing difficulty", "pcfg parser"], "overall_score": 2.477074981456485, "scores": [0.9179496370780339, 1.5808341460309405, 0.8544230673663722, 0.5644421027153399, 0.5494307863810536], "rank_score": 0.893415947914348} -{"id": "wan-2008-using", "title": "Using Bilingual Knowledge and Ensemble Techniques for Unsupervised Chinese Sentiment Analysis", "abstract": "It is a challenging task to identify sentiment polarity of Chinese reviews because the resources for Chinese sentiment analysis are limited. Instead of leveraging only monolingual Chinese knowledge, this study proposes a novel approach to leverage reliable English resources to improve Chinese sentiment analysis. Rather than simply projecting English resources onto Chinese resources, our approach first translates Chinese reviews into English reviews by machine translation services, and then identifies the sentiment polarity of English reviews by directly leveraging English resources. Furthermore, our approach performs sentiment analysis for both Chinese reviews and English reviews, and then uses ensemble methods to combine the individual analysis results. Experimental results on a dataset of 886 Chinese product reviews demonstrate the effectiveness of the proposed approach. The individual analysis of the translated English reviews outperforms the individual analysis of the original Chinese reviews, and the combination of the individual analysis results further improves the performance.", "phrases": ["bilingual knowledge", "chinese", "sentiment analysis", "ensemble method", "english lexicon"], "overall_score": 2.6760858749850778, "scores": [0.7931737818191998, 1.7280969782946438, 0.828565772039452, 0.5677200595857055, 0.5489404624358362], "rank_score": 0.8932994108349674} -{"id": "philip-etal-2020-monolingual", "title": "Monolingual Adapters for Zero-Shot Neural Machine Translation", "abstract": "We propose a novel adapter layer formalism for adapting multilingual models. They are more parameter-efficient than existing adapter layers while obtaining as good or better performance. The layers are specific to one language (as opposed to bilingual adapters) allowing to compose them and generalize to unseen language-pairs. In this zero-shot setting, they obtain a median improvement of +2.77 BLEU points over a strong 20-language multilingual Transformer baseline trained on TED talks.", "phrases": ["adapter", "multilingual model", "zero-shot setting"], "overall_score": 2.14095267073632, "scores": [1.2199625974157564, 0.8967661450294556, 0.5618110941646711], "rank_score": 0.8928466122032944} -{"id": "abend-etal-2017-uccaapp", "title": "UCCAApp: Web-application for Syntactic and Semantic Phrase-based Annotation", "abstract": "We present UCCAApp, an open-source, flexible web-application for syntactic and semantic phrase-based annotation in general, and for UCCA annotation in particular. UCCAApp supports a variety of formal properties that have proven useful for syntactic and semantic representation, such as discontiguous phrases, multiple parents and empty elements, making it useful to a variety of other annotation schemes with similar formal properties. UCCAApp\u2019s user interface is intuitive and user friendly, so as to support annotation by users with no background in linguistics or formal representation. Indeed, a pilot version of the application has been successfully used in the compilation of the UCCA Wikipedia treebank by annotators with no previous linguistic training. The application and all accompanying resources are released as open source under the GNU public license, and are available online along with a live demo.1", "phrases": ["web-application", "semantic phrase-based annotation", "uccaapp"], "overall_score": 1.2374418520901591, "scores": [0.9682378360014335, 0.8928432167061586, 0.8167957823907978], "rank_score": 0.8926256116994633} -{"id": "li-etal-2011-composing", "title": "Composing Simple Image Descriptions using Web-scale N-grams", "abstract": "Studying natural language, and especially how people describe the world around them can help us better understand the visual world. In turn, it can also help us in the quest to generate natural language that describes this world in a human manner. We present a simple yet effective approach to automatically compose image descriptions given computer vision based inputs and using web-scale n-grams. Unlike most previous work that summarizes or retrieves pre-existing text relevant to an image, our method composes sentences entirely from scratch. Experimental results indicate that it is viable to generate simple textual descriptions that are pertinent to the specific content of an image, while permitting creativity in the description -- making for more human-like annotations than previous approaches.", "phrases": ["image", "textual description", "previous approach", "spatial relationship"], "overall_score": 2.474051199797463, "scores": [1.5763714371050976, 0.8778664196761994, 0.5925493816511623, 0.5225141584207302], "rank_score": 0.8923253492132974} -{"id": "cheng-etal-2020-ent", "title": "ENT-DESC: Entity Description Generation by Exploring Knowledge Graph", "abstract": "Previous works on knowledge-to-text generation take as input a few RDF triples or key-value pairs conveying the knowledge of some entities to generate a natural language description. Existing datasets, such as WIKIBIO, WebNLG, and E2E, basically have a good alignment between an input triple/pair set and its output text. However, in practice, the input knowledge could be more than enough, since the output description may only cover the most significant knowledge. In this paper, we introduce a large-scale and challenging dataset to facilitate the study of such a practical scenario in KG-to-text. Our dataset involves retrieving abundant knowledge of various types of main entities from a large knowledge graph (KG), which makes the current graph-to-sequence models severely suffer from the problems of information loss and parameter explosion while generating the descriptions. We address these challenges by proposing a multi-graph structure that is able to represent the original graph information more comprehensively. Furthermore, we also incorporate aggregation methods that learn to extract the rich graph information. Extensive experiments demonstrate the effectiveness of our model architecture.", "phrases": ["knowledge graph", "input knowledge", "multi-graph structure", "well text description"], "overall_score": 1.7358146404844845, "scores": [1.94045055642828, 0.5612287208640488, 0.5351664414766233, 0.5312833468555331], "rank_score": 0.8920322664061213} -{"id": "schwenk-li-2018-corpus", "title": "A Corpus for Multilingual Document Classification in Eight Languages", "abstract": "Cross-lingual document classification aims at training a document classifier on resources in one language and transferring it to a different language without any additional resources. Several approaches have been proposed in the literature and the current best practice is to evaluate them on a subset of the Reuters Corpus Volume 2. However, this subset covers only few languages (English, German, French and Spanish) and almost all published works focus on the the transfer between English and German. In addition, we have observed that the class prior distributions differ significantly between the languages. We argue that this complicates the evaluation of the multilinguality. In this paper, we propose a new subset of the Reuters corpus with balanced class priors for eight languages. By adding Italian, Russian, Japanese and Chinese, we cover languages which are very different with respect to syntax, morphology, etc. We provide strong baselines for all language transfer directions using multilingual word and sentence embeddings respectively. Our goal is to offer a freely available framework to evaluate cross-lingual document classification, and we hope to foster by these means, research in this important area.", "phrases": ["multilingual document classification", "strong baseline", "sentence embedding"], "overall_score": 1.854395807026346, "scores": [1.593276058976003, 0.561349112166405, 0.520702463499789], "rank_score": 0.8917758782140656} -{"id": "shardlow-2014-open", "title": "Out in the Open: Finding and Categorising Errors in the Lexical Simplification Pipeline", "abstract": "Lexical simplification is the task of automatically reducing the complexity of a text by identifying difficult words and replacing them with simpler alternatives. Whilst this is a valuable application of natural language generation, rudimentary lexical simplification systems suffer from a high error rate which often results in nonsensical, non-simple text. This paper seeks to characterise and quantify the errors which occur in a typical baseline lexical simplification system. We expose 6 distinct categories of error and propose a classification scheme for these. We also quantify these errors for a moderate size corpus, showing the magnitude of each error type. We find that for 183 identified simplification instances, only 19 (10.38%) result in a valid simplification, with the rest causing errors of varying gravity.", "phrases": ["lexical simplification pipeline", "alternative", "cwi"], "overall_score": 2.671443917300209, "scores": [0.9471169709335048, 1.2071910561162942, 0.5209416348276339], "rank_score": 0.8917498872924776} -{"id": "lin-su-2021-fast", "title": "How Fast can BERT Learn Simple Natural Language Inference?", "abstract": "This paper empirically studies whether BERT can really learn to conduct natural language inference (NLI) without utilizing hidden dataset bias; and how efficiently it can learn if it could. This is done via creating a simple entailment judgment case which involves only binary predicates in plain English. The results show that the learning process of BERT is very slow. However, the efficiency of learning can be greatly improved (data reduction by a factor of 1,500) if task-related features are added. This suggests that domain knowledge greatly helps when conducting NLI with neural networks.", "phrases": ["bert", "natural language inference", "efficiency", "task-related feature"], "overall_score": 0.9796093839367024, "scores": [1.8482759653504657, 0.6272796282342786, 0.550484726711564, 0.5406752318274373], "rank_score": 0.8916788880309365} -{"id": "iida-etal-2003-incorporating", "title": "Incorporating Contextual Cues in Trainable Models for Coreference Resolution", "abstract": "We propose a method that incorporates various novel contextual cues into a machine learning for resolving coreference. Distinct characteristics of our model are (i) incorporating more linguistic features capturing contextual information that is more sophisticated than what is offered in Centering Theory, and (ii) a tournament model for selecting a referent. Our experiments show that this model significantly outperforms earlier machine learning approaches, such as Soon et al. (2001).", "phrases": ["coreference resolution", "candidate", "anaphor"], "overall_score": 2.052160957475504, "scores": [0.9080479688941279, 1.228263034437684, 0.5374155360949033], "rank_score": 0.8912421798089051} -{"id": "das-2019-nuclearity", "title": "Nuclearity in RST and signals of coherence relations", "abstract": "We investigate the relationship between the notion of nuclearity as proposed in Rhetorical Structure Theory (RST) and the signalling of coherence relations. RST relations are categorized as either mononuclear (comprising a nucleus and a satellite span) or multinuclear (comprising two or more nuclei spans). We examine how mononuclear relations (e.g., Antithesis, Condition) and multinuclear relations (e.g., Contrast, List) are indicated by relational signals, more particularly by discourse markers (e.g., because, however, if, therefore). We conduct a corpus study, examining the distribution of either type of relations in the RST Discourse Treebank (Carlson et al., 2002) and the distribution of discourse markers for those relations in the RST Signalling Corpus (Das et al., 2015). Our results show that discourse markers are used more often to signal multinuclear relations than mononuclear relations. The findings also suggest a complex relationship between the relation types and syntactic categories of discourse markers (subordinating and coordinating conjunctions).", "phrases": ["rst", "coherence relation", "nuclearity"], "overall_score": 0.9788056767690221, "scores": [0.9273353868034041, 0.8993141046060356, 0.8461924753111385], "rank_score": 0.8909473222401928} -{"id": "eom-etal-2012-using", "title": "Using semi-experts to derive judgments on word sense alignment: a pilot study", "abstract": "The overall goal of this project is to evaluate the performance of word sense alignment (WSA) systems, focusing on obtaining examples appropriate to language learners. Building a gold standard dataset based on human expert judgments is costly in time and labor, and thus we gauge the utility of using semi-experts in performing the annotation. In an online survey, we present a sense of a target word from one dictionary with senses from the other dictionary, asking for judgments of relatedness. We note the difficulty of agreement, yet the utility in using such results to evaluate WSA work. We find that one's treatment of related senses heavily impacts the results for WSA.", "phrases": ["semi-expert", "judgment", "word sense alignment"], "overall_score": 0.9787030069921313, "scores": [0.9341396230982917, 0.9051145154124711, 0.8333074660348705], "rank_score": 0.8908538681818777} -{"id": "gillick-etal-2016-multilingual", "title": "Multilingual Language Processing From Bytes", "abstract": "We describe an LSTM-based model which we call Byte-to-Span (BTS) that reads text as bytes and outputs span annotations of the form [start, length, label] where start positions, lengths, and labels are separate entries in our vocabulary. Because we operate directly on unicode bytes rather than language-specific words or characters, we can analyze text in many languages with a single model. Due to the small vocabulary size, these multilingual models are very compact, but produce results similar to or better than the state-of- the-art in Part-of-Speech tagging and Named Entity Recognition that use only the provided training datasets (no external data sources). Our models are learning \"from scratch\" in that they do not rely on any elements of the standard pipeline in Natural Language Processing (including tokenization), and thus can run in standalone fashion on raw text.", "phrases": ["byte", "part-of-speech tagging", "entity recognition", "tokenization", "multilingual language processing"], "overall_score": 2.52367911619306, "scores": [1.778948443891679, 0.9975073619936162, 0.569041761079761, 0.5631051560632303, 0.5451372706214573], "rank_score": 0.8907479987299487} -{"id": "williams-koehn-2011-agreement", "title": "Agreement Constraints for Statistical Machine Translation into German", "abstract": "Languages with rich inflectional morphology pose a difficult challenge for statistical machine translation. To address the problem of morphologically inconsistent output, we add unification-based constraints to the target-side of a string-to-tree model. By integrating constraint evaluation into the decoding process, implausible hypotheses can be penalised or filtered out during search. We use a simple heuristic process to extract agreement constraints for German and test our approach on an English-German system trained on WMT data, achieving a small improvement in translation accuracy as measured by BLEU.", "phrases": ["statistical machine translation", "string-to-tree model", "agreement constraint"], "overall_score": 1.9569624713405618, "scores": [0.9378956334208067, 0.8884930170526596, 0.8455673592026997], "rank_score": 0.8906520032253886} -{"id": "berant-etal-2015-efficient", "title": "Efficient Global Learning of Entailment Graphs", "abstract": "Entailment rules between predicates are fundamental to many semantic-inference applications. Consequently, learning such rules has been an active field of research in recent years. Methods for learning entailment rules between predicates that take into account dependencies between different rules (e.g., entailment is a transitive relation) have been shown to improve rule quality, but suffer from scalability issues, that is, the number of predicates handled is often quite small. In this article, we present methods for learning transitive graphs that contain tens of thousands of nodes, where nodes represent predicates and edges correspond to entailment rules (termed entailment graphs). Our methods are able to scale to a large number of predicates by exploiting structural properties of entailment graphs such as the fact that they exhibit a \u201ctree-like\u201d property. We apply our methods on two data sets and demonstrate that our methods find high-quality solutions faster than methods proposed in the past, and moreover our methods for the first time scale to large graphs containing 20,000 nodes and more than 100,000 edges.", "phrases": ["entailment graph", "approximation method", "treenode-fix"], "overall_score": 1.8519967126970507, "scores": [1.2331852969880568, 0.8677505028166731, 0.5709306733459678], "rank_score": 0.8906221577168992} -{"id": "kocisky-etal-2016-semantic", "title": "Semantic Parsing with Semi-Supervised Sequential Autoencoders", "abstract": "We present a novel semi-supervised approach for sequence transduction and apply it to semantic parsing. The unsupervised component is based on a generative model in which latent sentences generate the unpaired logical forms. We apply this method to a number of semantic parsing tasks focusing on domains with limited access to labelled training data and extend those datasets with synthetically generated logical forms.", "phrases": ["generative model", "semantic parsing", "natural language utterance", "program"], "overall_score": 1.8518539633414977, "scores": [1.9474427665890108, 0.5501378064075539, 0.5353847901495652, 0.5292486760050048], "rank_score": 0.8905535097877837} -{"id": "simianer-etal-2012-joint", "title": "Joint Feature Selection in Distributed Stochastic Learning for Large-Scale Discriminative Training in SMT", "abstract": "With a few exceptions, discriminative training in statistical machine translation (SMT) has been content with tuning weights for large feature sets on small development data. Evidence from machine learning indicates that increasing the training sample size results in better prediction. The goal of this paper is to show that this common wisdom can also be brought to bear upon SMT. We deploy local features for SCFG-based SMT that can be read off from rules at runtime, and present a learning algorithm that applies l1/l2 regularization for joint feature selection over distributed stochastic learning processes. We present experiments on learning on 1.5 million training sentences, and show significant improvements over tuning discriminative models on small development sets.", "phrases": ["tuning", "million", "joint feature selection", "inter alia"], "overall_score": 2.048597124450023, "scores": [1.965333997573608, 0.5406317273520147, 0.5286976193163201, 0.5241143629241143], "rank_score": 0.8896944267915142} -{"id": "ghosh-etal-2016-coarse", "title": "Coarse-grained Argumentation Features for Scoring Persuasive Essays", "abstract": "Scoring the quality of persuasive essays is an important goal of discourse analysis, addressed most recently with highlevel persuasion-related features such as thesis clarity, or opinions and their targets. We investigate whether argumentation features derived from a coarse-grained argumentative structure of essays can help predict essays scores. We introduce a set of argumentation features related to argument components (e.g., the number of claims and premises), argument relations (e.g., the number of supported claims) and typology of argumentative structure (chains, trees). We show that these features are good predictors of human scores for TOEFL essays, both when the coarsegrained argumentative structure is manually annotated and automatically predicted.", "phrases": ["essay", "argument component", "automatic essay scoring"], "overall_score": 2.347814998685313, "scores": [1.1338062844277668, 0.9440248551363091, 0.5910934483996066], "rank_score": 0.8896415293212274} -{"id": "liang-etal-2018-multimodal", "title": "Multimodal Language Analysis with Recurrent Multistage Fusion", "abstract": "Computational modeling of human multimodal language is an emerging research area in natural language processing spanning the language, visual and acoustic modalities. Comprehending multimodal language requires modeling not only the interactions within each modality (intra-modal interactions) but more importantly the interactions between modalities (cross-modal interactions). In this paper, we propose the Recurrent Multistage Fusion Network (RMFN) which decomposes the fusion problem into multiple stages, each of them focused on a subset of multimodal signals for specialized, effective fusion. Cross-modal interactions are modeled using this multistage fusion approach which builds upon intermediate representations of previous stages. Temporal and intra-modal interactions are modeled by integrating our proposed fusion approach with a system of recurrent neural networks. The RMFN displays state-of-the-art performance in modeling human multimodal language across three public datasets relating to multimodal sentiment analysis, emotion recognition, and speaker traits recognition. We provide visualizations to show that each stage of fusion focuses on a different subset of multimodal signals, learning increasingly discriminative multimodal representations.", "phrases": ["fusion", "cross-modal interaction", "stage", "discriminative multimodal representation"], "overall_score": 1.9540625716796571, "scores": [1.0832436726593946, 1.0490871645854825, 0.8590406894295327, 0.5659572813778698], "rank_score": 0.88933220201307} -{"id": "yao-etal-2011-structured", "title": "Structured Relation Discovery using Generative Models", "abstract": "We explore unsupervised approaches to relation extraction between two named entities; for instance, the semantic bornIn relation between a person and location entity. Concretely, we propose a series of generative probabilistic models, broadly similar to topic models, each which generates a corpus of observed triples of entity mention pairs and the surface syntactic dependency path between them. The output of each model is a clustering of observed relation tuples and their associated textual expressions to underlying semantic relation types. Our proposed models exploit entity type constraints within a relation as well as features on the dependency path between entity mentions. We examine effectiveness of our approach via multiple evaluations and demonstrate 12% error reduction in precision over a state-of-the-art weakly supervised baseline.", "phrases": ["relation discovery", "topic model", "openre", "group"], "overall_score": 2.7882300518034295, "scores": [1.2994045621335444, 0.9079633726811168, 0.8293378094790008, 0.5202838207177795], "rank_score": 0.8892473912528605} -{"id": "muller-gurevych-2009-study", "title": "A Study on the Semantic Relatedness of Query and Document Terms in Information Retrieval", "abstract": "The use of lexical semantic knowledge in information retrieval has been a field of active study for a long time. Collaborative knowledge bases like Wikipedia and Wiktionary, which have been applied in computational methods only recently, offer new possibilities to enhance information retrieval. In order to find the most beneficial way to employ these resources, we analyze the lexical semantic relations that hold among query and document terms and compare how these relations are represented by a measure for semantic relatedness. We explore the potential of different indicators of document relevance that are based on semantic relatedness and compare the characteristics and performance of the knowledge bases Wikipedia, Wiktionary and WordNet.", "phrases": ["semantic relatedness", "query", "document term"], "overall_score": 0.9768161888553731, "scores": [0.9270682247043353, 0.8714442204620247, 0.8688967917345084], "rank_score": 0.8891364123002895} -{"id": "de-melo-2014-etymological", "title": "Etymological Wordnet: Tracing The History of Words", "abstract": "Research on the history of words has led to remarkable insights about language and also about the history of human civilization more generally. This paper presents the Etymological Wordnet, the first database that aims at making word origin information available as a large, machine-readable network of words in many languages. The information in this resource is obtained from Wiktionary. Extracting a network of etymological information from Wiktionary requires significant effort, as much of the etymological information is only given in prose. We rely on custom pattern matching techniques and mine a large network with over 500,000 word origin links as well as over 2 million derivational/compositional links.", "phrases": ["history", "wiktionary", "etymological wordnet"], "overall_score": 1.2322832747757313, "scores": [0.9687510896559192, 0.8347107642420879, 0.8632516003360319], "rank_score": 0.8889044847446796} -{"id": "clematide-etal-2012-mlsa", "title": "MLSA \u2014 A Multi-layered Reference Corpus for German Sentiment Analysis", "abstract": "In this paper, we describe MLSA, a publicly available multi-layered reference corpus for German-language sentiment analysis. The construction of the corpus is based on the manual annotation of 270 German-language sentences considering three different layers of granularity. The sentence-layer annotation, as the most coarse-grained annotation, focuses on aspects of objectivity, subjectivity and the overall polarity of the respective sentences. Layer 2 is concerned with polarity on the word- and phrase-level, annotating both subjective and factual language. The annotations on Layer 3 focus on the expression-level, denoting frames of private states such as objective and direct speech events. These three layers and their respective annotations are intended to be fully independent of each other. At the same time, exploring for and discovering interactions that may exist between different layers should also be possible. The reliability of the respective annotations was assessed using the average pairwise agreement and Fleiss' multi-rater measures. We believe that MLSA is a beneficial resource for sentiment analysis research, algorithms and applications that focus on the German language.", "phrases": ["multi-layered reference corpus", "german sentiment analysis", "subjectivity", "mlsa"], "overall_score": 1.4299241616604816, "scores": [0.9628101879010752, 0.9371712405244914, 0.8066252168865811, 0.8472406963299456], "rank_score": 0.8884618354105233} -{"id": "joulin-etal-2018-loss", "title": "Loss in Translation: Learning Bilingual Word Mapping with a Retrieval Criterion", "abstract": "Continuous word representations learned separately on distinct languages can be aligned so that their words become comparable in a common space. Existing works typically solve a quadratic problem to learn a orthogonal matrix aligning a bilingual lexicon, and use a retrieval criterion for inference. In this paper, we propose an unified formulation that directly optimizes a retrieval criterion in an end-to-end fashion. Our experiments on standard benchmarks show that our approach outperforms the state of the art on word translation, with the biggest improvements observed for distant language pairs such as English-Chinese.", "phrases": ["retrieval criterion", "loss", "monolingual word embedding", "new objective function", "semantic space"], "overall_score": 2.8597434230321594, "scores": [1.6407411720758127, 0.8352888408043968, 0.9127471890633252, 0.5279437070014964, 0.5254253225723865], "rank_score": 0.8884292463034835} -{"id": "white-etal-2017-inference", "title": "Inference is Everything: Recasting Semantic Resources into a Unified Evaluation Framework", "abstract": "We propose to unify a variety of existing semantic classification tasks, such as semantic role labeling, anaphora resolution, and paraphrase detection, under the heading of Recognizing Textual Entailment (RTE). We present a general strategy to automatically generate one or more sentential hypotheses based on an input sentence and pre-existing manual semantic annotations. The resulting suite of datasets enables us to probe a statistical RTE model's performance on different aspects of semantics. We demonstrate the value of this approach by investigating the behavior of a popular neural network RTE model.", "phrases": ["anaphora resolution", "nli", "natural language inference"], "overall_score": 2.405468858346918, "scores": [1.504047421106468, 0.5953763875725855, 0.5653741230962096], "rank_score": 0.8882659772584209} -{"id": "rodriguez-etal-2008-arabic", "title": "Arabic WordNet: Semi-automatic Extensions using Bayesian Inference", "abstract": "This presentation focuses on the semi-automatic extension of Arabic WordNet (AWN) using lexical and morphological rules and applying Bayesian inference. We briefly report on the current status of AWN and propose a way of extending its coverage by taking advantage of a limited set of highly productive Arabic morphological rules for deriving a range of semantically related word forms from verb entries. The application of this set of rules, combined with the use of bilingual Arabic-English resources and Princeton\u0092s WordNet, allows the generation of a graph representing the semantic neighbourhood of the original word. In previous work, a set of associations between the hypothesized Arabic words and English synsets was proposed on the basis of this graph. Here, a novel approach to extending AWN is presented whereby a Bayesian Network is automatically built from the graph and then the net is used as an inferencing mechanism for scoring the set of candidate associations. Both on its own and in combination with the previous technique, this new approach has led to improved results.", "phrases": ["semi-automatic extension", "bayesian inference", "arabic wordnet"], "overall_score": 0.9757285791321832, "scores": [0.965895163380464, 0.8843711229368477, 0.8141729954836323], "rank_score": 0.8881464272669813} -{"id": "kim-etal-2019-pivot", "title": "Pivot-based Transfer Learning for Neural Machine Translation between Non-English Languages", "abstract": "We present effective pre-training strategies for neural machine translation (NMT) using parallel corpora involving a pivot language, i.e., source-pivot and pivot-target, leading to a significant improvement in source-target translation. We propose three methods to increase the relation among source, pivot, and target languages in the pre-training: 1) step-wise training of a single model for different language pairs, 2) additional adapter component to smoothly connect pre-trained encoder and decoder, and 3) cross-lingual encoder training via autoencoding of the pivot language. Our methods greatly outperform multilingual models up to +2.6% BLEU in WMT 2019 French-German and German-Czech tasks. We show that our improvements are valid also in zero-shot/zero-resource scenarios.", "phrases": ["transfer learning", "neural machine translation", "pivot language", "resource language pair"], "overall_score": 1.8463354075338727, "scores": [0.913688553429745, 0.8325846844154227, 1.2835240721500873, 0.5218012716938405], "rank_score": 0.8878996454222738} -{"id": "raghu-etal-2019-disentangling", "title": "Disentangling Language and Knowledge in Task-Oriented Dialogs", "abstract": "The Knowledge Base (KB) used for real-world applications, such as booking a movie or restaurant reservation, keeps changing over time. End-to-end neural networks trained for these task-oriented dialogs are expected to be immune to any changes in the KB. However, existing approaches breakdown when asked to handle such changes. We propose an encoder-decoder architecture (BoSsNet) with a novel Bag-of-Sequences (BoSs) memory, which facilitates the disentangled learning of the response's language model and its knowledge incorporation. Consequently, the KB can be modified with new knowledge without a drop in interpretability. We find that BoSsNeT outperforms state-of-the-art models, with considerable improvements (10%) on bAbI OOV test sets and other human-human datasets. We also systematically modify existing datasets to measure disentanglement and show BoSsNeT to be robust to KB modifications.", "phrases": ["task-oriented dialog", "language model", "knowledge incorporation"], "overall_score": 1.42885511052659, "scores": [0.9597647506771272, 0.8607459819656421, 0.8428820571398483], "rank_score": 0.8877975965942059} -{"id": "marcheggiani-titov-2016-discrete", "title": "Discrete-State Variational Autoencoders for Joint Discovery and Factorization of Relations", "abstract": "We present a method for unsupervised open-domain relation discovery. In contrast to previous (mostly generative and agglomerative clustering) approaches, our model relies on rich contextual features and makes minimal independence assumptions. The model is composed of two parts: a feature-rich relation extractor, which predicts a semantic relation between two entities, and a factorization model, which reconstructs arguments (i.e., the entities) relying on the predicted relation. The two components are estimated jointly so as to minimize errors in recovering arguments. We study factorization models inspired by previous work in relation factorization and selectional preference modeling. Our models substantially outperform the generative and agglomerative-clustering counterparts and achieve state-of-the-art performance.", "phrases": ["variational autoencoder", "relation discovery", "vae"], "overall_score": 2.2766083775177086, "scores": [1.2431538125045682, 0.8458373431181585, 0.5737612736542492], "rank_score": 0.8875841430923254} -{"id": "gan-etal-2021-towards", "title": "Towards Robustness of Text-to-SQL Models against Synonym Substitution", "abstract": "Recently, there has been significant progress in studying neural networks to translate text descriptions into SQL queries. Despite achieving good performance on some public benchmarks, existing text-to-SQL models typically rely on the lexical matching between words in natural language (NL) questions and tokens in table schemas, which may render the models vulnerable to attacks that break the schema linking mechanism. In this work, we investigate the robustness of text-to-SQL models to synonym substitution. In particular, we introduce Spider-Syn, a human-curated dataset based on the Spider benchmark for text-to-SQL translation. NL questions in Spider-Syn are modified from Spider, by replacing their schema-related words with manually selected synonyms that reflect real-world question paraphrases. We observe that the accuracy dramatically drops by eliminating such explicit correspondence between NL questions and table schemas, even if the synonyms are not adversarially selected to conduct worst-case attacks. Finally, we present two categories of approaches to improve the model robustness. The first category of approaches utilizes additional synonym annotations for table schemas by modifying the model input, while the second category is based on adversarial training. We demonstrate that both categories of approaches significantly outperform their counterparts without the defense, and the first category of approaches are more effective.", "phrases": ["text-to-sql model", "synonym substitution", "schema-related word"], "overall_score": 1.2304018209249723, "scores": [0.9710766579532183, 0.8440404911102188, 0.847524758960375], "rank_score": 0.887547302674604} -{"id": "xiao-etal-2012-niutrans", "title": "NiuTrans: An Open Source Toolkit for Phrase-based and Syntax-based Machine Translation", "abstract": "We present a new open source toolkit for phrase-based and syntax-based machine translation. The toolkit supports several state-of-the-art models developed in statistical machine translation, including the phrase-based model, the hierachical phrase-based model, and various syntax-based models. The key innovation provided by the toolkit is that the decoder can work with various grammars and offers different choices of decoding algrithms, such as phrase-based decoding, decoding as parsing/tree-parsing and forest-based decoding. Moreover, several useful utilities were distributed with the toolkit, including a discriminative reordering model, a simple and fast language model, and an implementation of minimum error rate training for weight tuning.", "phrases": ["open source toolkit", "syntax-based machine translation", "niutran"], "overall_score": 0.9749236971014128, "scores": [0.9836210061163847, 0.8656175334482115, 0.8130028366447045], "rank_score": 0.887413792069767} -{"id": "rottger-pierrehumbert-2021-temporal-adaptation", "title": "Temporal Adaptation of BERT and Performance on Downstream Document Classification: Insights from Social Media", "abstract": "Language use differs between domains and even within a domain, language use changes over time. For pre-trained language models like BERT, domain adaptation through continued pre-training has been shown to improve performance on in-domain downstream tasks. In this article, we investigate whether temporal adaptation can bring additional benefits. For this purpose, we introduce a corpus of social media comments sampled over three years. It contains unlabelled data for adaptation and evaluation on an upstream masked language modelling task as well as labelled data for fine-tuning and evaluation on a downstream document classification task. We find that temporality matters for both tasks: temporal adaptation improves upstream and temporal fine-tuning downstream task performance. Time-specific models generally perform better on past than on future test sets, which matches evidence on the bursty usage of topical words. However, adapting BERT to time and domain does not improve performance on the downstream task over only adapting to domain. Token-level analysis shows that temporal adaptation captures event-driven changes in language use in the downstream task, but not those changes that are actually relevant to task performance. Based on our findings, we discuss when temporal adaptation may be more effective.", "phrases": ["bert", "downstream task", "temporal adaptation", "upto-date corpora"], "overall_score": 1.726329262225699, "scores": [1.6577824390873106, 0.7894033939492516, 0.5515104077247059, 0.5499347442074576], "rank_score": 0.8871577462421814} -{"id": "mehryary-etal-2016-deep", "title": "Deep Learning with Minimal Training Data: TurkuNLP Entry in the BioNLP Shared Task 2016", "abstract": "We present the TurkuNLP entry to the BioNLP Shared Task 2016 Bacteria Biotopes event extraction (BB3-event) subtask. We propose a deep learning-based approach to event extraction using a combination of several Long Short-Term Memory (LSTM) networks over syntactic dependency graphs. Features for the proposed neural network are generated based on the shortest path connecting the two candidate entities in the dependency graph. We further detail how this network can be ef\ufb01ciently trained to have good generalization performance even when only a very limited number of training examples are available and part-of-speech (POS) and dependency type feature representations must be learned from scratch. Our method ranked second among the entries to the shared task, achieving an F-score of 52.1% with 62.3% precision and 44.8% re-call.", "phrases": ["entry", "bionlp shared task", "deep learning"], "overall_score": 0.9743056813268133, "scores": [0.9355208592068618, 0.9037203271167737, 0.8213125632835213], "rank_score": 0.8868512498690523} -{"id": "tran-etal-2010-treematch", "title": "TreeMatch: A Fully Unsupervised WSD System Using Dependency Knowledge on a Specific Domain", "abstract": "Word sense disambiguation (WSD) is one of the main challenges in Computational Linguistics. TreeMatch is a WSD system originally developed using data from SemEval 2007 Task 7 (Coarse-grained English All-words Task) that has been adapted for use in SemEval 2010 Task 17 (All-words Word Sense Disambiguation on a Specific Domain). The system is based on a fully unsupervised method using dependency knowledge drawn from a domain specific knowledge base that was built for this task. When evaluated on the task, the system precision performs above the First Sense Baseline.", "phrases": ["wsd system", "dependency knowledge", "treematch"], "overall_score": 0.9742842123593202, "scores": [0.8984117895602359, 0.8873465935614339, 0.874736740796385], "rank_score": 0.886831707972685} -{"id": "srivastava-etal-2018-zero", "title": "Zero-shot Learning of Classifiers from Natural Language Quantification", "abstract": "Humans can efficiently learn new concepts using language. We present a framework through which a set of explanations of a concept can be used to learn a classifier without access to any labeled examples. We use semantic parsing to map explanations to probabilistic assertions grounded in latent class labels and observed attributes of unlabeled data, and leverage the differential semantics of linguistic quantifiers (e.g., `usually' vs `always') to drive model training. Experiments on three domains show that the learned classifiers outperform previous approaches for learning with limited data, and are comparable with fully supervised classifiers trained from a small number of labeled examples.", "phrases": ["quantifier", "zero-shot learning", "semantic parser"], "overall_score": 1.9483224629617588, "scores": [1.5005455223628397, 0.5932530138647618, 0.5663607616314082], "rank_score": 0.8867197659530032} -{"id": "shimbo-hara-2007-discriminative", "title": "A Discriminative Learning Model for Coordinate Conjunctions", "abstract": "We propose a sequence-alignment based method for detecting and disambiguatingcoordinate conjunctions. In this method, averaged perceptron learning is used to adapt the substitution matrix to the training data drawn from the target language and domain. To reduce the cost of training data construction, our method accepts training examples in which complete word-by-word alignment labels are missing, but instead only the boundaries of coordinated conjuncts are marked. We report promising empirical results in detecting and disambiguating coordinated noun phrases in the GENIA corpus, despite a relatively small number of training examples and minimal features are employed.", "phrases": ["discriminative learning model", "conjunct", "coordination disambiguation", "coordinate structure", "alignment-based method"], "overall_score": 2.401071747510576, "scores": [0.9486854250196409, 1.4666716096818704, 0.8630403541290781, 0.591170831004038, 0.5636430746482263], "rank_score": 0.8866422588965707} -{"id": "morgan-etal-2022-isl", "title": "ISL-LEX v.1: An Online Lexical Resource of Israeli Sign Language", "abstract": "This paper describes a new online lexical resource and interactive tool for Israeli Sign Language, ISL-LEX v.1. The dataset contains 961 non-compound ISL signs with the following information: subjective frequency ratings from native signers, iconicity ratings from native and non-native signers (presented separately), and phonological properties in six domains. The selection of signs was also designed to reflect a broad distinction between those signs acquired early in childhood and those acquired later. ISL-LEX is an online interface built using the SIGN-LEX visualization (Caselli et al. 2022), and is intended for use by researchers, educators, and students. It is therefore offered in two text-based versions, English and Hebrew, with video instructions in ISL.", "phrases": ["online lexical resource", "israeli sign language", "isl-lex v.1"], "overall_score": 0.9740747878011532, "scores": [0.9170141049398889, 0.8916319703071097, 0.8512771693274683], "rank_score": 0.8866410815248223} -{"id": "bunescu-2008-learning", "title": "Learning with Probabilistic Features for Improved Pipeline Models", "abstract": "We present a novel learning framework for pipeline models aimed at improving the communication between consecutive stages in a pipeline. Our method exploits the confidence scores associated with outputs at any given stage in a pipeline in order to compute probabilistic features used at other stages downstream. We describe a simple method of integrating probabilistic features into the linear scoring functions used by state of the art machine learning algorithms. Experimental evaluation on dependency parsing and named entity recognition demonstrate the superiority of our approach over the baseline pipeline models, especially when upstream stages in the pipeline exhibit low accuracy.", "phrases": ["probabilistic feature", "pipeline model", "weight"], "overall_score": 1.7252641319255344, "scores": [1.1768139081679754, 0.9500363354639509, 0.5329808890076307], "rank_score": 0.8866103775465191} -{"id": "espla-etal-2019-paracrawl", "title": "ParaCrawl: Web-scale parallel corpora for the languages of the EU", "abstract": "We describe two projects funded by the Connecting Europe Facility, Provision of Web-Scale Parallel Corpora for Of\ufb01cial European Languages (2016-EU-IA-0114, completed) and Broader Web-Scale Pro-vision of Parallel Corpora for European Languages (2017-EU-IA-0178, ongoing), which aim at harvesting parallel corpora from the Internet for languages used in the European Union. In addition to parallel corpora, the project releases successive versions of the free/open-source web crawling software used.", "phrases": ["web-scale parallel corpora", "web", "paracrawl"], "overall_score": 1.426851640480502, "scores": [0.9807177950620871, 0.8147555960156077, 0.8641849209149877], "rank_score": 0.8865527706642276} -{"id": "rozovskaya-roth-2013-joint", "title": "Joint Learning and Inference for Grammatical Error Correction", "abstract": "State-of-the-art systems for grammatical error correction are based on a collection of independently-trained models for specific errors. Such models ignore linguistic interactions at the sentence level and thus do poorly on mistakes that involve grammatical dependencies among several words. In this paper, we identify linguistic structures with interacting grammatical properties and propose to address such dependencies via joint inference and joint learning. We show that it is possible to identify interactions well enough to facilitate a joint approach and, consequently, that joint methods correct incoherent predictions that independentlytrained classifiers tend to produce. Furthermore, because the joint learning model considers interacting phenomena during training, it is able to identify mistakes that require making multiple changes simultaneously and that standard approaches miss. Overall, our model significantly outperforms the Illinois system that placed first in the CoNLL-2013 shared task on grammatical error correction.", "phrases": ["grammatical error correction", "such dependency", "illinois system", "joint learning", "linear programming"], "overall_score": 2.0411764855955075, "scores": [1.9102509925100652, 0.914950802938869, 0.5569076068237476, 0.5297142110753377, 0.5205348080759861], "rank_score": 0.8864716842848012} -{"id": "guo-etal-2018-effective", "title": "Effective Parallel Corpus Mining using Bilingual Sentence Embeddings", "abstract": "This paper presents an effective approach for parallel corpus mining using bilingual sentence embeddings. Our embedding models are trained to produce similar representations exclusively for bilingual sentence pairs that are translations of each other. This is achieved using a novel training method that introduces hard negatives consisting of sentences that are not translations but have some degree of semantic similarity. The quality of the resulting embeddings are evaluated on parallel corpus reconstruction and by assessing machine translation systems trained on gold vs. mined sentence pairs. We find that the sentence embeddings can be used to reconstruct the United Nations Parallel Corpus (Ziemski et al., 2016) at the sentence-level with a precision of 48.9% for en-fr and 54.9% for en-es. When adapted to document-level matching, we achieve a parallel document matching accuracy that is comparable to the significantly more computationally intensive approach of Uszkoreit et al. (2010). Using reconstructed parallel data, we are able to train NMT models that perform nearly as well as models trained on the original data (within 1-2 BLEU).", "phrases": ["parallel corpus mining", "sentence embedding", "semantic similarity", "different language"], "overall_score": 2.739776815119789, "scores": [0.920841879055195, 1.5654135117651518, 0.533074632269486, 0.5261105283889884], "rank_score": 0.8863601378697052} -{"id": "dale-kilgarriff-2010-helping", "title": "Helping Our Own: Text Massaging for Computational Linguistics as a New Shared Task", "abstract": "In this paper, we propose a new shared task called HOO: Helping Our Own. The aim is to use tools and techniques developed in computational linguistics to help people writing about computational linguistics. We describe a text-to-text generation scenario that poses challenging research questions, and delivers practical outcomes that are useful in the first case to our own community and potentially much more widely. Two specific factors make us optimistic that this task will generate useful outcomes: one is the availability of the ACL Anthology, a large corpus of the target text type; the other is that CL researchers who are non-native speakers of English will be motivated to use prototype systems, providing informed and precise feedback in large quantity. We lay out our plans in detail and invite comment and critique with the aim of improving the nature of the planned exercise.", "phrases": ["writing", "non-native speaker", "grammatical error"], "overall_score": 2.273270817582208, "scores": [1.1735949075050438, 0.9252465341437758, 0.5600073316836929], "rank_score": 0.8862829244441709} -{"id": "avraham-goldberg-2016-improving", "title": "Improving Reliability of Word Similarity Evaluation by Redesigning Annotation Task and Performance Measure", "abstract": "We suggest a new method for creating and using gold-standard datasets for word similarity evaluation. Our goal is to improve the reliability of the evaluation, and we do this by redesigning the annotation task to achieve higher inter-rater agreement, and by defining a performance measure which takes the reliability of each annotation decision in the dataset into account.", "phrases": ["reliability", "word similarity evaluation", "annotation task"], "overall_score": 0.9734181006517036, "scores": [0.9642049959186446, 0.8778029387030487, 0.8161220827436214], "rank_score": 0.8860433391217716} -{"id": "kotonya-toni-2020-explainable-automated", "title": "Explainable Automated Fact-Checking for Public Health Claims", "abstract": "Fact-checking is the task of verifying the veracity of claims by assessing their assertions against credible evidence. The vast majority of fact-checking studies focus exclusively on political claims. Very little research explores fact-checking for other topics, specifically subject matters for which expertise is required. We present the first study of explainable fact-checking for claims which require specific expertise. For our case study we choose the setting of public health. To support this case study we construct a new dataset PUBHEALTH of 11.8K claims accompanied by journalist crafted, gold standard explanations (i.e., judgments) to support the fact-check labels for claims. We explore two tasks: veracity prediction and explanation generation. We also define and evaluate, with humans and computationally, three coherence properties of explanation quality. Our results indicate that, by training on in-domain data, gains can be made in explainable, automated fact-checking for claims which require specific expertise.", "phrases": ["automated fact-checking", "public health claim", "claim", "pubhealth", "explanation quality"], "overall_score": 2.3987911564016717, "scores": [0.9044005958157945, 1.510967540778434, 0.8635642787731689, 0.6158983947380936, 0.5341697221323048], "rank_score": 0.8858001064475591} -{"id": "buck-etal-2014-n", "title": "N-gram Counts and Language Models from the Common Crawl", "abstract": "We contribute 5-gram counts and language models trained on the Common Crawl corpus, a collection over 9 billion web pages. This release improves upon the Google n-gram counts in two key ways: the inclusion of low-count entries and deduplication to reduce boilerplate. By preserving singletons, we were able to use Kneser-Ney smoothing to build large language models. This paper describes how the corpus was processed with emphasis on the problems that arise in working with data at this scale. Our unpruned Kneser-Ney English 5-gram language model, built on 975 billion deduplicated tokens, contains over 500 billion unique n-grams. We show gains of 0.5-1.4 BLEU by using large language models to translate into various languages.", "phrases": ["language model", "common crawl", "user-generated content"], "overall_score": 2.03957259276612, "scores": [1.4545295371523184, 0.6128209798655445, 0.5899748504204388], "rank_score": 0.8857751224794339} -{"id": "li-sporleder-2010-using", "title": "Using Gaussian Mixture Models to Detect Figurative Language in Context", "abstract": "We present a Gaussian Mixture model for detecting different types of figurative language in context. We show that this model performs well when the parameters are estimated in an unsupervised fashion using EM. Performance can be improved further by estimating the parameters from a small annotated data set.", "phrases": ["gaussian mixture models", "figurative language", "cohesion", "tokens", "idiom recognition"], "overall_score": 2.03846606199332, "scores": [1.9670167641657645, 0.8362965085684559, 0.5508785393038974, 0.5397701486906089, 0.5325108506250271], "rank_score": 0.8852945622707509} -{"id": "li-etal-2020-docbank", "title": "DocBank: A Benchmark Dataset for Document Layout Analysis", "abstract": "Document layout analysis usually relies on computer vision models to understand documents while ignoring textual information that is vital to capture. Meanwhile, high quality labeled datasets with both visual and textual information are still insufficient. In this paper, we present DocBank, a benchmark dataset that contains 500K document pages with fine-grained token-level annotations for document layout analysis. DocBank is constructed using a simple yet effective way with weak supervision from the LaTeX documents available on the arXiv.com. With DocBank, models from different modalities can be compared fairly and multi-modal approaches will be further investigated and boost the performance of document layout analysis. We build several strong baselines and manually split train/dev/test sets for evaluation. Experiment results show that models trained on DocBank accurately recognize the layout information for a variety of documents. The DocBank dataset is publicly available at .", "phrases": ["benchmark dataset", "document layout analysis", "docbank"], "overall_score": 1.4248253155604926, "scores": [0.9388791940040406, 0.8813923677863393, 0.8356096708175909], "rank_score": 0.885293744202657} -{"id": "meyer-etal-2011-multilingual", "title": "Multilingual Annotation and Disambiguation of Discourse Connectives for Machine Translation", "abstract": "Many discourse connectives can signal several types of relations between sentences. Their automatic disambiguation, i.e. the labeling of the correct sense of each occurrence, is important for discourse parsing, but could also be helpful to machine translation. We describe new approaches for improving the accuracy of manual annotation of three discourse connectives (two English, one French) by using parallel corpora. An appropriate set of labels for each connective can be found using information from their translations. Our results for automatic disambiguation are state-of-the-art, at up to 85% accuracy using surface features. Using feature analysis, contextual features are shown to be useful across languages and connectives.", "phrases": ["disambiguation", "discourse connective", "machine translation"], "overall_score": 1.5861650398280054, "scores": [0.8650704429795083, 0.8365846172668022, 0.9541116323299662], "rank_score": 0.8852555641920922} -{"id": "artetxe-etal-2020-cross", "title": "On the Cross-lingual Transferability of Monolingual Representations", "abstract": "State-of-the-art unsupervised multilingual models (e.g., multilingual BERT) have been shown to generalize in a zero-shot cross-lingual setting. This generalization ability has been attributed to the use of a shared subword vocabulary and joint training across multiple languages giving rise to deep multilingual abstractions. We evaluate this hypothesis by designing an alternative approach that transfers a monolingual model to new languages at the lexical level. More concretely, we first train a transformer-based masked language model on one language, and transfer it to a new language by learning a new embedding matrix with the same masked language modeling objective, freezing parameters of all other layers. This approach does not rely on a shared vocabulary or joint training. However, we show that it is competitive with multilingual BERT on standard cross-lingual classification benchmarks and on a new Cross-lingual Question Answering Dataset (XQuAD). Our results contradict common beliefs of the basis of the generalization ability of multilingual models and suggest that deep monolingual models learn some abstractions that generalize across languages. We also release XQuAD as a more comprehensive cross-lingual benchmark, which comprises 240 paragraphs and 1190 question-answer pairs from SQuAD v1.1 translated into ten languages by professional translators.", "phrases": ["cross-lingual transferability", "language model", "paragraph", "professional translator"], "overall_score": 3.7349660062852346, "scores": [0.9793663675755657, 1.4938820966661992, 0.5410167812085556, 0.5263999385032662], "rank_score": 0.8851662959883967} -{"id": "oco-roxas-2018-survey", "title": "A Survey of Machine Translation Work in the Philippines: From 1998 to 2018", "abstract": "In this paper, we present a survey covering the last 20 years of machine translation work in the Philippines. We detail the various approaches used and innovations applied. We also discuss the various mechanisms and support that keep the MT community thriving, as well as the challenges ahead.", "phrases": ["survey", "machine translation work", "philippines"], "overall_score": 1.22674046242783, "scores": [0.97147154473013, 0.8526621857394584, 0.8305848419341073], "rank_score": 0.884906190801232} -{"id": "shrestha-mckeown-2004-detection", "title": "Detection of Question-Answer Pairs in Email Conversations", "abstract": "While sentence extraction as an approach to summarization has been shown to work in documents of certain genres, because of the conversational nature of email communication where utterances are made in relation to one made previously, sentence extraction may not capture the necessary segments of dialogue that would make a summary coherent. In this paper, we present our work on the detection of question-answer pairs in an email conversation for the task of email summarization. We show that various features based on the structure of email-threads can be used to improve upon lexical similarity of discourse segments for question-answer pairing.", "phrases": ["question-answer pair", "email conversation", "email summarization"], "overall_score": 2.1984252499117143, "scores": [0.911514679420646, 0.8813083708157248, 0.8613111605187164], "rank_score": 0.8847114035850291} -{"id": "isonuma-etal-2017-extractive", "title": "Extractive Summarization Using Multi-Task Learning with Document Classification", "abstract": "The need for automatic document summarization that can be used for practical applications is increasing rapidly. In this paper, we propose a general framework for summarization that extracts sentences from a document using externally related information. Our work is aimed at single document summarization using small amounts of reference summaries. In particular, we address document summarization in the framework of multi-task learning using curriculum learning for sentence extraction and document classification. The proposed framework enables us to obtain better feature representations to extract sentences from documents. We evaluate our proposed summarization method on two datasets: financial report and news corpus. Experimental results demonstrate that our summarizers achieve performance that is comparable to state-of-the-art systems.", "phrases": ["summarization", "multi-task learning", "document classification"], "overall_score": 1.8393396474597932, "scores": [0.8662084382532732, 0.8519654944108266, 0.9354322552365981], "rank_score": 0.8845353959668993} -{"id": "isozaki-etal-2010-automatic", "title": "Automatic Evaluation of Translation Quality for Distant Language Pairs", "abstract": "Automatic evaluation of Machine Translation (MT) quality is essential to developing high-quality MT systems. Various evaluation metrics have been proposed, and BLEU is now used as the de facto standard metric. However, when we consider translation between distant language pairs such as Japanese and English, most popular metrics (e.g., BLEU, NIST, PER, and TER) do not work well. It is well known that Japanese and English have completely different word orders, and special care must be paid to word order in translation. Otherwise, translations with wrong word order often lead to misunderstanding and incomprehensibility. For instance, SMT-based Japanese-to-English translators tend to translate 'A because B' as 'B because A.' Thus, word order is the most important problem for distant language translation. However, conventional evaluation metrics do not significantly penalize such word order mistakes. Therefore, locally optimizing these metrics leads to inadequate translations. In this paper, we propose an automatic evaluation metric based on rank correlation coefficients modified with precision. Our meta-evaluation of the NTCIR-7 PATMT JE task data shows that this metric outperforms conventional metrics.", "phrases": ["translation quality", "distant language pair", "word order", "conventional metric", "automatic evaluation"], "overall_score": 2.334258091329128, "scores": [0.9448005125477258, 0.8634131207769109, 1.1168644105174674, 0.9706191548213398, 0.526825315596286], "rank_score": 0.8845045028519459} -{"id": "malmasi-dras-2015-language", "title": "Language Identification using Classifier Ensembles", "abstract": "In this paper we describe the language identification system we developed for the Discriminating Similar Languages (DSL) 2015 shared task. We constructed a classifier ensemble composed of several Support Vector Machine (SVM) base classifiers, each trained on a single feature type. Our feature types include character 1\u20136 grams and word unigrams and bigrams. Using this system we were able to outperform the other entries in the closed training track of the DSL 2015 shared task, achieving the best accuracy of 95.54%.", "phrases": ["ensemble", "feature type", "language identification"], "overall_score": 2.196591138386988, "scores": [0.937462075999075, 1.182005276975892, 0.5324525554542568], "rank_score": 0.8839733028097413} -{"id": "quirk-etal-2005-dependency", "title": "Dependency Treelet Translation: Syntactically Informed Phrasal SMT", "abstract": "We describe a novel approach to statistical machine translation that combines syntactic information in the source language with recent advances in phrasal translation. This method requires a source-language dependency parser, target language word segmentation and an unsupervised word alignment component. We align a parallel corpus, project the source dependency parse onto the target sentence, extract dependency treelet translation pairs, and train a tree-based ordering model. We describe an efficient decoder and show that using these tree-based models in combination with conventional SMT models provides a promising approach that incorporates the power of phrasal SMT with the linguistic generality available in a parser.", "phrases": ["phrasal smt", "generalization", "dependency treelet translation", "smt system", "source side"], "overall_score": 3.7550437899662237, "scores": [1.5651570017184624, 0.8677421865524811, 0.8421879437506018, 0.588519867409087, 0.5556562644195728], "rank_score": 0.883852652770041} -{"id": "welbl-etal-2018-constructing", "title": "Constructing Datasets for Multi-hop Reading Comprehension Across Documents", "abstract": "Most Reading Comprehension methods limit themselves to queries which can be answered using a single sentence, paragraph, or document. Enabling models to combine disjoint pieces of textual evidence would extend the scope of machine comprehension methods, but currently no resources exist to train and test this capability. We propose a novel task to encourage the development of models for text understanding across multiple documents and to investigate the limits of existing methods. In our task, a model learns to seek and combine evidence \u2014 effectively performing multihop, alias multi-step, inference. We devise a methodology to produce datasets for this task, given a collection of query-answer pairs and thematically linked documents. Two datasets from different domains are induced, and we identify potential pitfalls and devise circumvention strategies. We evaluate two previously proposed competitive models and find that one can integrate information across documents. However, both models struggle to select relevant information; and providing documents guaranteed to be relevant greatly improves their performance. While the models outperform several strong baselines, their best accuracy reaches 54.5% on an annotated test set, compared to human performance at 85.0%, leaving ample room for improvement.", "phrases": ["comprehension", "textual evidence", "multiple document"], "overall_score": 3.238019673779842, "scores": [0.8526634871553898, 1.2397924744200866, 0.5590782446530179], "rank_score": 0.883844735409498} -{"id": "he-etal-2017-deep", "title": "Deep Semantic Role Labeling: What Works and What's Next", "abstract": "We introduce a new deep learning model for semantic role labeling (SRL) that significantly improves the state of the art, along with detailed analyses to reveal its strengths and limitations. We use a deep highway BiLSTM architecture with constrained decoding, while observing a number of recent best practices for initialization and regularization. Our 8-layer ensemble model achieves 83.2 F1 on theCoNLL 2005 test set and 83.4 F1 on CoNLL 2012, roughly a 10% relative error reduction over the previous state of the art. Extensive empirical analysis of these gains show that (1) deep models excel at recovering long-distance dependencies but can still make surprisingly obvious errors, and (2) that there is still room for syntactic parsers to improve these results.", "phrases": ["semantic role labeling", "neural srl model", "structured prediction task", "dependency srl", "output structure"], "overall_score": 3.525152761342399, "scores": [1.9174816924000373, 0.8709613665834954, 0.5609637277915324, 0.5381941546550287, 0.531008810066186], "rank_score": 0.883721950299256} -{"id": "shi-etal-2016-deep", "title": "Deep LSTM based Feature Mapping for Query Classification", "abstract": "Traditional convolutional neural network ( CNN ) based query classi\ufb01cation uses linear feature mapping in its convolution operation. The recurrent neural network ( RNN ), differs from a CNN in representing word sequence with their ordering information kept explicitly. We propose using a deep long-short-term-memory ( DLSTM ) based feature mapping to learn feature representation for CNN . The DLSTM , which is a stack of LSTM units, has different order of feature representations at different depth of LSTM unit. The bottom LSTM unit equipped with input and output gates, extracts the \ufb01rst order feature representation from current word. To extract higher order nonlinear feature representation, the LSTM unit at higher position gets input from two parts. First part is the lower LSTM unit\u2019s memory cell from previous word. Second part is the lower LSTM unit\u2019s hidden output from current word. In this way, the DLSTM captures the nonlinear nonconsecutive interaction within n -grams. Using an architecture that combines a stack of the DLSTM layers with a tradition CNN layer, we have observed new state-of-the-art query classi\ufb01cation accuracy on benchmark data sets for query classi\ufb01cation.", "phrases": ["feature mapping", "query classification", "cnn", "deep lstm"], "overall_score": 1.7193391758491046, "scores": [0.9443000871359929, 0.9118208356515645, 0.8530603570422923, 0.8250809299310629], "rank_score": 0.8835655524402282} -{"id": "huang-etal-2017-moodswipe", "title": "MoodSwipe: A Soft Keyboard that Suggests MessageBased on User-Specified Emotions", "abstract": "We present MoodSwipe, a soft keyboard that suggests text messages given the user-specified emotions utilizing the real dialog data. The aim of MoodSwipe is to create a convenient user interface to enjoy the technology of emotion classification and text suggestion, and at the same time to collect labeled data automatically for developing more advanced technologies. While users select the MoodSwipe keyboard, they can type as usual but sense the emotion conveyed by their text and receive suggestions for their message as a benefit. In MoodSwipe, the detected emotions serve as the medium for suggested texts, where viewing the latter is the incentive to correcting the former. We conduct several experiments to show the superiority of the emotion classification models trained on the dialog data, and further to verify good emotion cues are important context for text suggestion.", "phrases": ["soft keyboard", "user-specified emotion", "moodswipe"], "overall_score": 0.9701688654689508, "scores": [0.9243552062925898, 0.8952556080377411, 0.8296464590753506], "rank_score": 0.8830857578018939} -{"id": "tu-etal-2020-engine", "title": "ENGINE: Energy-Based Inference Networks for Non-Autoregressive Machine Translation", "abstract": "We propose to train a non-autoregressive machine translation model to minimize the energy defined by a pretrained autoregressive model. In particular, we view our non-autoregressive translation system as an inference network (Tu and Gimpel, 2018) trained to minimize the autoregressive teacher energy. This contrasts with the popular approach of training a non-autoregressive model on a distilled corpus consisting of the beam-searched outputs of such a teacher model. Our approach, which we call ENGINE (ENerGy-based Inference NEtworks), achieves state-of-the-art non-autoregressive results on the IWSLT 2014 DE-EN and WMT 2016 RO-EN datasets, approaching the performance of autoregressive models.", "phrases": ["energy-based inference networks", "non-autoregressive machine translation", "engine"], "overall_score": 1.2241190961931083, "scores": [0.9799607428231113, 0.839787649762829, 0.8292974317169753], "rank_score": 0.8830152747676386} -{"id": "kreutzer-etal-2017-bandit", "title": "Bandit Structured Prediction for Neural Sequence-to-Sequence Learning", "abstract": "Bandit structured prediction describes a stochastic optimization framework where learning is performed from partial feedback. This feedback is received in the form of a task loss evaluation to a predicted output structure, without having access to gold standard structures. We advance this framework by lifting linear bandit learning to neural sequence-to-sequence learning problems using attention-based recurrent neural networks. Furthermore, we show how to incorporate control variates into our learning algorithms for variance reduction and improved generalization. We present an evaluation on a neural machine translation task that shows improvements of up to 5.89 BLEU points for domain adaptation from simulated bandit feedback.", "phrases": ["structured prediction", "stochastic optimization framework", "bandit feedback"], "overall_score": 2.194188264708786, "scores": [1.1455742243721516, 0.9366962619113742, 0.5667484596346034], "rank_score": 0.883006315306043} -{"id": "bansal-etal-2020-learning", "title": "Learning to Few-Shot Learn Across Diverse Natural Language Classification Tasks", "abstract": "Pre-trained transformer models have shown enormous success in improving performance on several downstream tasks. However, fine-tuning on a new task still requires large amounts of task-specific labeled data to achieve good performance. We consider this problem of learning to generalize to new tasks, with a few examples, as a meta-learning problem. While meta-learning has shown tremendous progress in recent years, its application is still limited to simulated problems or problems with limited diversity across tasks. We develop a novel method, LEOPARD, which enables optimization-based meta-learning across tasks with a different number of classes, and evaluate different methods on generalization to diverse NLP classification tasks. LEOPARD is trained with the state-of-the-art transformer architecture and shows better generalization to tasks not seen at all during training, with as few as 4 examples per label. Across 17 NLP tasks, including diverse domains of entity typing, natural language inference, sentiment analysis, and several other text classification tasks, we show that LEOPARD learns better initial parameters for few-shot learning than self-supervised pre-training or multi-task training, outperforming many strong baselines, for example, yielding 14.6% average relative gain in accuracy on unseen tasks with only 4 examples per label.", "phrases": ["few-shot learning", "language inference", "sentiment analysis", "maml", "diverse nlp task"], "overall_score": 2.501635948434058, "scores": [1.2133564280791136, 0.8868006412317311, 0.8843723575700867, 0.8255597372048527, 0.6047494743143688], "rank_score": 0.8829677276800305} -{"id": "miura-etal-2014-teamx", "title": "TeamX: A Sentiment Analyzer with Enhanced Lexicon Mapping and Weighting Scheme for Unbalanced Data", "abstract": "This paper describes the system that has been used by TeamX in SemEval-2014 Task 9 Subtask B. The system is a sentiment analyzer based on a supervised text categorization approach designed with following two concepts. Firstly, since lexicon features were shown to be effective in SemEval-2013 Task 2, various lexicons and pre-processors for them are introduced to enhance lexical information. Secondly, since a distribution of sentiment on tweets is known to be unbalanced, an weighting scheme is introduced to bias an output of a machine learner. For the test run, the system was tuned towards Twitter texts and successfully achieved high scoring results on Twitter data, average F1 70.96 on Twitter2014 and average F1 56.50 on Twitter2014Sarcasm.", "phrases": ["sentiment analyzer", "weighting scheme", "machine learner"], "overall_score": 1.9393117334874892, "scores": [0.9383920306403709, 0.8306718508849522, 0.8787925371916819], "rank_score": 0.8826188062390017} -{"id": "pedler-mitton-2010-large", "title": "A Large List of Confusion Sets for Spellchecking Assessed Against a Corpus of Real-word Errors", "abstract": "One of the methods that has been proposed for dealing with real-word errors (errors that occur when a correctly spelled word is substituted for the one intended) is the \u201d\u201cconfusion-set\u201d\u201d approach - a confusion set being a small group of words that are likely to be confused with one another. Using a list of confusion sets drawn up in advance, a spellchecker, on finding one of these words in a text, can assess whether one of the other members of its set would be a better fit and, if it appears to be so, propose that word as a correction. Much of the research using this approach has suffered from two weaknesses. The first is the small number of confusion sets used. The second is that systems have largely been tested on artificial errors. In this paper we address these two weaknesses. We describe the creation of a realistically sized list of confusion sets, then the assembling of a corpus of real-word errors, and then we assess the potential of that list in relation to that corpus.", "phrases": ["list", "confusion set", "real-word error"], "overall_score": 0.9693760342011175, "scores": [0.8997712734310949, 0.8917847937207732, 0.8555362078935793], "rank_score": 0.8823640916818158} -{"id": "turney-2006-expressing", "title": "Expressing Implicit Semantic Relations without Supervision", "abstract": "We present an unsupervised learning algorithm that mines large text corpora for patterns that express implicit semantic relations. For a given input word pair X:Y with some unspecified semantic relations, the corresponding output list of patterns (P1,..., Pm) is ranked according to how well each pattern Pi expresses the relations between X and Y. For example, given X = ostrich and Y = bird, the two highest ranking output patterns are \"X is the largest Y\" and \"Y such as the X\". The output patterns are intended to be useful for finding further pairs with the same relations, to support the construction of lexicons, ontologies, and semantic networks. The patterns are sorted by pertinence, where the pertinence of a pattern Pi for a word pair X:Y is the expected relational similarity between the given pair and typical pairs for Pi. The algorithm is empirically evaluated on two tasks, solving multiple-choice SAT word analogy questions and classifying semantic relations in noun-modifier pairs. On both tasks, the algorithm achieves state-of-the-art results, performing significantly better than several alternative pattern ranking algorithms, based on tf-idf.", "phrases": ["semantic relation", "web", "unsupervised algorithm"], "overall_score": 1.9386013579567376, "scores": [1.2268488474196373, 0.8854937435880912, 0.534543910198688], "rank_score": 0.8822955004021388} -{"id": "chairatanakul-etal-2021-cross-lingual", "title": "Cross-lingual Transfer for Text Classification with Dictionary-based Heterogeneous Graph", "abstract": "In cross-lingual text classification, it is required that task-specific training data in high-resource source languages are available, where the task is identical to that of a low-resource target language. However, collecting such training data can be infeasible because of the labeling cost, task characteristics, and privacy concerns. This paper proposes an alternative solution that uses only task-independent word embeddings of high-resource languages and bilingual dictionaries. First, we construct a dictionary-based heterogeneous graph (DHG) from bilingual dictionaries. This opens the possibility to use graph neural networks for cross-lingual transfer. The remaining challenge is the heterogeneity of DHG because multiple languages are considered. To address this challenge, we propose dictionary-based heterogeneous graph neural network (DHGNet) that effectively handles the heterogeneity of DHG by two-step aggregations, which are word-level and language-level aggregations. Experimental results demonstrate that our method outperforms pretrained models even though it does not access to large corpora. Furthermore, it can perform well even though dictionaries contain many incorrect translations. Its robustness allows the usage of a wider range of dictionaries such as an automatically constructed dictionary and crowdsourced dictionary, which are convenient for real-world applications.", "phrases": ["text classification", "heterogeneous graph", "cross-lingual transfer"], "overall_score": 0.9687143088795924, "scores": [0.9214249133924809, 0.9032841668631791, 0.8205762097550742], "rank_score": 0.8817617633369114} -{"id": "cao-etal-2017-parsing", "title": "Parsing to 1-Endpoint-Crossing, Pagenumber-2 Graphs", "abstract": "We study the Maximum Subgraph problem in deep dependency parsing. We consider two restrictions to deep dependency graphs: (a) 1-endpoint-crossing and (b) pagenumber-2. Our main contribution is an exact algorithm that obtains maximum subgraphs satisfying both restrictions simultaneously in time O(n5). Moreover, ignoring one linguistically-rare structure descreases the complexity to O(n4). We also extend our quartic-time algorithm into a practical parser with a discriminative disambiguation model and evaluate its performance on four linguistic data sets used in semantic dependency parsing.", "phrases": ["1-endpoint-crossing", "pagenumber-2", "dependency parsing", "appropriate graph class"], "overall_score": 1.715675459878877, "scores": [1.4295960674570483, 0.9015001293089078, 0.6027618610113298, 0.5928730417275723], "rank_score": 0.8816827748762146} -{"id": "dozat-manning-2018-simpler", "title": "Simpler but More Accurate Semantic Dependency Parsing", "abstract": "While syntactic dependency annotations concentrate on the surface or functional structure of a sentence, semantic dependency annotations aim to capture between-word relationships that are more closely related to the meaning of a sentence, using graph-structured representations. We extend the LSTM-based syntactic parser of Dozat and Manning (2017) to train on and generate these graph structures. The resulting system on its own achieves state-of-the-art performance, beating the previous, substantially more complex state-of-the-art system by 0.6% labeled F1. Adding linguistically richer input representations pushes the margin even higher, allowing us to beat it by 1.9% labeled F1.", "phrases": ["dependency parsing", "arc", "bilstms", "second-order information"], "overall_score": 2.8017576638845787, "scores": [1.5474128521803092, 0.8736339650616756, 0.5612874704975253, 0.5440473451075728], "rank_score": 0.8815954082117707} -{"id": "hasan-etal-2007-large", "title": "Are Very Large N-Best Lists Useful for SMT?", "abstract": "This paper describes an efficient method to extract large n-best lists from a word graph produced by a statistical machine translation system. The extraction is based on the k shortest paths algorithm which is efficient even for very large k. We show that, although we can generate large amounts of distinct translation hypotheses, these numerous candidates are not able to significantly improve overall system performance. We conclude that large n-best lists would benefit from better discriminating models.", "phrases": ["n-b list", "translation hypothesis", "different size"], "overall_score": 1.4177442122618513, "scores": [1.232211094205092, 0.8343938361781053, 0.5760770916607604], "rank_score": 0.8808940073479858} -{"id": "vilnis-etal-2018-probabilistic", "title": "Probabilistic Embedding of Knowledge Graphs with Box Lattice Measures", "abstract": "Embedding methods which enforce a partial order or lattice structure over the concept space, such as Order Embeddings (OE), are a natural way to model transitive relational data (e.g. entailment graphs). However, OE learns a deterministic knowledge base, limiting expressiveness of queries and the ability to use uncertainty for both prediction and learning (e.g. learning from expectations). Probabilistic extensions of OE have provided the ability to somewhat calibrate these denotational probabilities while retaining the consistency and inductive bias of ordered models, but lack the ability to model the negative correlations found in real-world knowledge. In this work we show that a broad class of models that assign probability measures to OE can never capture negative correlation, which motivates our construction of a novel box lattice and accompanying probability measure to capture anti-correlation and even disjoint concepts, while still providing the benefits of probabilistic modeling, such as the ability to perform rich joint and conditional queries over arbitrary sets of concepts, and both learning from and predicting calibrated uncertainty. We show improvements over previous approaches in modeling the Flickr and WordNet entailment graphs, and investigate the power of the model.", "phrases": ["lattice structure", "probability measure", "box embedding"], "overall_score": 2.0279608824189324, "scores": [1.2390813455149765, 0.8583781385547783, 0.5447371781808198], "rank_score": 0.8807322207501915} -{"id": "song-etal-2018-structure", "title": "Structure-Infused Copy Mechanisms for Abstractive Summarization", "abstract": "Seq2seq learning has produced promising results on summarization. However, in many cases, system summaries still struggle to keep the meaning of the original intact. They may miss out important words or relations that play critical roles in the syntactic structure of source sentences. In this paper, we present structure-infused copy mechanisms to facilitate copying important words and relations from the source sentence to summary sentence. The approach naturally combines source dependency structure with the copy mechanism of an abstractive sentence summarizer. Experimental results demonstrate the effectiveness of incorporating source-side syntactic information in the system, and our proposed approach compares favorably to state-of-the-art methods.", "phrases": ["abstractive summarization", "structure-infused copy mechanism", "detail"], "overall_score": 1.934869187854024, "scores": [1.5191632150399181, 0.5822562971328284, 0.540371237591769], "rank_score": 0.8805969165881719} -{"id": "pratapa-etal-2018-language", "title": "Language Modeling for Code-Mixing: The Role of Linguistic Theory based Synthetic Data", "abstract": "Training language models for Code-mixed (CM) language is known to be a difficult problem because of lack of data compounded by the increased confusability due to the presence of more than one language. We present a computational technique for creation of grammatically valid artificial CM data based on the Equivalence Constraint Theory. We show that when training examples are sampled appropriately from this synthetic data and presented in certain order (aka training curriculum) along with monolingual and real CM data, it can significantly reduce the perplexity of an RNN-based language model. We also show that randomly generated CM data does not help in decreasing the perplexity of the LMs.", "phrases": ["linguistic theory", "synthetic data", "code-mixed data"], "overall_score": 2.86866567173162, "scores": [1.2453265446351955, 0.8225878269519598, 0.5735042956512788], "rank_score": 0.8804728890794781} -{"id": "camacho-collados-etal-2015-nasari", "title": "NASARI: a Novel Approach to a Semantically-Aware Representation of Items", "abstract": "The semantic representation of individual word senses and concepts is of fundamental importance to several applications in Natural Language Processing. To date, concept modeling techniques have in the main based their representation either on lexicographic resources, such as WordNet, or on encyclopedic resources, such as Wikipedia. We propose a vector representation technique that combines the complementary knowledge of both these types of resource. Thanks to its use of explicit semantics combined with a novel cluster-based dimensionality reduction and an effective weighting scheme, our representation attains state-of-the-art performance on multiple datasets in two standard benchmarks: word similarity and sense clustering. We are releasing our vector representations at http://lcl.uniroma1.it/nasari/.", "phrases": ["wikipedia", "nasari", "sense inventory"], "overall_score": 1.5772685656657226, "scores": [1.2479863413051118, 0.8693986947041576, 0.5234860062605811], "rank_score": 0.8802903474232835} -{"id": "strassel-etal-2006-integrated", "title": "Integrated Linguistic Resources for Language Exploitation Technologies", "abstract": "Linguistic Data Consortium has recently embarked on an effort to create integrated linguistic resources and related infrastructure for language exploitation technologies within the DARPA GALE (Global Autonomous Language Exploitation) Program. GALE targets an end-to-end system consisting of three major engines: Transcription, Translation and Distillation. Multilingual speech or text from a variety of genres is taken as input and English text is given as output, with information of interest presented in an integrated and consolidated fashion to the end user. GALE's goals require a quantum leap in the performance of human language technology, while also demanding solutions that are more intelligent, more robust, more adaptable, more efficient and more integrated. LDC has responded to this challenge with a comprehensive approach to linguistic resource development designed to support GALE's research and evaluation needs and to provide lasting resources for the larger Human Language Technology community.", "phrases": ["linguistic data consortium", "darpa gale", "translation agency"], "overall_score": 1.5770596046117993, "scores": [1.255783248480776, 0.8566140428277685, 0.5281238808071159], "rank_score": 0.8801737240385535} -{"id": "gaspari-hutchins-2007-online", "title": "Online and free! Ten years of online machine translation: origins, developments, current use and future prospects", "abstract": "Marking the ten-year anniversary of the launch of Babel Fish, the first ever free online machine translation (MT) service that went live on the Internet in late 1997, this paper sketches the background that led to its development, giving an account of its origins and of the early stages of its evolution. Several competitors have entered the field of web-based MT over the last decade, and the paper offers a review of the most significant contributions in the literature with a particular focus on two key issues: firstly, the role that these online MT tools have played in meeting the translation needs of the users, and secondly the impact that they have had on the MT-related industry and business. Information coming from a variety of sources, including data on current usage supplied by the online MT providers themselves for the purposes of this study, testifies to the massive increase in the use of the leading multilingual online MT services over the last ten years. On this basis, the conclusion assesses the future prospects of Internet-based MT.", "phrases": ["online machine translation", "origin", "future prospect"], "overall_score": 0.966841039340713, "scores": [0.9660376374388633, 0.8384575407087085, 0.8356747416141629], "rank_score": 0.8800566399205781} -{"id": "jadhav-rajan-2018-extractive", "title": "Extractive Summarization with SWAP-NET: Sentences and Words from Alternating Pointer Networks", "abstract": "We present a new neural sequence-to-sequence model for extractive summarization called SWAP-NET (Sentences and Words from Alternating Pointer Networks). Extractive summaries comprising a salient subset of input sentences, often also contain important key words. Guided by this principle, we design SWAP-NET that models the interaction of key words and salient sentences using a new two-level pointer network based architecture. SWAP-NET identifies both salient sentences and key words in an input document, and then combines them to form the extractive summary. Experiments on large scale benchmark corpora demonstrate the efficacy of SWAP-NET that outperforms state-of-the-art extractive summarizers.", "phrases": ["swap-net", "pointer networks", "extractive summarization"], "overall_score": 1.7120539417277016, "scores": [0.9601662065058834, 0.8437627317671369, 0.8355361098313707], "rank_score": 0.8798216827014637} -{"id": "petrov-klein-2007-improved", "title": "Improved Inference for Unlexicalized Parsing", "abstract": "We present several improvements to unlexicalized parsing with hierarchically state-split PCFGs. First, we present a novel coarse-to-fine method in which a grammar\u2019s own hierarchical projections are used for incremental pruning, including a method for efficiently computing projections of a grammar without a treebank. In our experiments, hierarchical pruning greatly accelerates parsing with no loss in empirical accuracy. Second, we compare various inference procedures for state-split PCFGs from the standpoint of risk minimization, paying particular attention to their practical tradeoffs. Finally, we present multilingual experiments which show that parsing with hierarchical state-splitting is fast and accurate in multiple languages and domains, even without any language-specific tuning.", "phrases": ["procedure", "berkeley parser", "subcategorie", "dependency structure"], "overall_score": 3.152828755206084, "scores": [1.8192148740262364, 0.5891081487248901, 0.5839301245946198, 0.5270013166079667], "rank_score": 0.8798136159884282} -{"id": "rashkin-etal-2016-connotation", "title": "Connotation Frames: A Data-Driven Investigation", "abstract": "Through a particular choice of a predicate (e.g., \"x violated y\"), a writer can subtly connote a range of implied sentiments and presupposed facts about the entities x and y: (1) writer's perspective: projecting x as an \"antagonist\"and y as a \"victim\", (2) entities' perspective: y probably dislikes x, (3) effect: something bad happened to y, (4) value: y is something valuable, and (5) mental state: y is distressed by the event. We introduce connotation frames as a representation formalism to organize these rich dimensions of connotation using typed relations. First, we investigate the feasibility of obtaining connotative labels through crowdsourcing experiments. We then present models for predicting the connotation frames of verb predicates based on their distributional word representations and the interplay between different types of connotative relations. Empirical results confirm that connotation frames can be induced from various data sources that reflect how people use language and give rise to the connotative meanings. We conclude with analytical results that show the potential use of connotation frames for analyzing subtle biases in online news media.", "phrases": ["predicate", "writer", "connotation frames"], "overall_score": 2.1082958298301833, "scores": [0.999998086169834, 1.085775443080942, 0.5519094259807651], "rank_score": 0.8792276517438471} -{"id": "pyysalo-etal-2007-unification", "title": "On the unification of syntactic annotations under the Stanford dependency scheme: A case study on BioInfer and GENIA", "abstract": "Several incompatible syntactic annotation schemes are currently used by parsers and corpora in biomedical information extraction. The recently introduced Stanford dependency scheme has been suggested to be a suitable unifying syntax formalism. In this paper, we present a step towards such unification by creating a conversion from the Link Grammar to the Stanford scheme. Further, we create a version of the BioInfer corpus with syntactic annotation in this scheme. We present an application-oriented evaluation of the transformation and assess the suitability of the scheme and our conversion to the unification of the syntactic annotations of BioInfer and the GENIA Treebank. \n \nWe find that a highly reliable conversion is both feasible to create and practical, increasing the applicability of both the parser and the corpus to information extraction.", "phrases": ["unification", "syntactic annotation", "bioinfer"], "overall_score": 1.7105876598540728, "scores": [0.9118747514544385, 0.8641421615872671, 0.8611875755897712], "rank_score": 0.8790681628771589} -{"id": "wiseman-etal-2016-learning", "title": "Learning Global Features for Coreference Resolution", "abstract": "There is compelling evidence that coreference prediction would benefit from modeling global information about entity-clusters. Yet, state-of-the-art performance can be achieved with systems treating each mention prediction independently, which we attribute to the inherent difficulty of crafting informative cluster-level features. We instead propose to use recurrent neural networks (RNNs) to learn latent, global representations of entity clusters directly from their mentions. We show that such representations are especially useful for the prediction of pronominal mentions, and can be incorporated into an end-to-end coreference system that outperforms the state of the art without requiring any additional search.", "phrases": ["coreference resolution", "mention", "recurrent neural network", "entity-level feature"], "overall_score": 2.319651991856826, "scores": [0.9555580786092099, 1.507759050532563, 0.526456539767586, 0.5261059837619263], "rank_score": 0.8789699131678212} -{"id": "mundra-etal-2021-wassa", "title": "WASSA@IITK at WASSA 2021: Multi-task Learning and Transformer Finetuning for Emotion Classification and Empathy Prediction", "abstract": "This paper describes our contribution to the WASSA 2021 shared task on Empathy Prediction and Emotion Classification. The broad goal of this task was to model an empathy score, a distress score and the overall level of emotion of an essay written in response to a newspaper article associated with harm to someone. We have used the ELECTRA model abundantly and also advanced deep learning approaches like multi-task learning. Additionally, we also leveraged standard machine learning techniques like ensembling. Our system achieves a Pearson Correlation Coefficient of 0.533 on sub-task I and a macro F1 score of 0.5528 on sub-task II. We ranked 1st in Emotion Classification sub-task and 3rd in Empathy Prediction sub-task.", "phrases": ["multi-task learning", "emotion classification", "empathy prediction"], "overall_score": 0.9656336482111507, "scores": [0.9062769277279974, 0.8771105234553346, 0.853485424274376], "rank_score": 0.8789576251525694} -{"id": "bapna-etal-2018-training", "title": "Training Deeper Neural Machine Translation Models with Transparent Attention", "abstract": "While current state-of-the-art NMT models, such as RNN seq2seq and Transformers, possess a large number of parameters, they are still shallow in comparison to convolutional models used for both text and vision applications. In this work we attempt to train significantly (2-3x) deeper Transformer and Bi-RNN encoders for machine translation. We propose a simple modification to the attention mechanism that eases the optimization of deeper models, and results in consistent gains of 0.7-1.1 BLEU on the benchmark WMT'14 English-German and WMT'15 Czech-English tasks for both architectures.", "phrases": ["transparent attention", "optimization", "deep model"], "overall_score": 2.319054688624635, "scores": [0.8245936187632474, 0.9858557949864399, 0.8257813296302499], "rank_score": 0.8787435811266459} -{"id": "roy-roth-2018-mapping", "title": "Mapping to Declarative Knowledge for Word Problem Solving", "abstract": "Math word problems form a natural abstraction to a range of quantitative reasoning problems, such as understanding financial news, sports results, and casualties of war. Solving such problems requires the understanding of several mathematical concepts such as dimensional analysis, subset relationships, etc. In this paper, we develop declarative rules which govern the translation of natural language description of these concepts to math expressions. We then present a framework for incorporating such declarative knowledge into word problem solving. Our method learns to map arithmetic word problem text to math expressions, by learning to select the relevant declarative knowledge for each operation of the solution expression. This provides a way to handle multiple concepts in the same problem while, at the same time, supporting interpretability of the answer expression. Our method models the mapping to declarative knowledge as a latent variable, thus removing the need for expensive annotations. Experimental evaluation suggests that our domain knowledge based solver outperforms all other systems, and that it generalizes better in the realistic case where the training data it is exposed to is biased in a different way than the test data.", "phrases": ["declarative knowledge", "math word problem", "mapping"], "overall_score": 2.1062932899243667, "scores": [0.9265106609359933, 0.8031898506606983, 0.9054770716173146], "rank_score": 0.878392527738002} -{"id": "reddy-waxmonsky-2009-substring", "title": "Substring-based Transliteration with Conditional Random Fields", "abstract": "Motivated by phrase-based translation research, we present a transliteration system where characters are grouped into substrings to be mapped atomically into the target language. We show how this substring representation can be incorporated into a Conditional Random Field model that uses local context and phonemic information.", "phrases": ["transliteration", "substring", "local context", "phonemic information"], "overall_score": 1.4133400122014625, "scores": [0.9421532466266118, 0.8889327480198319, 0.8546336587493738, 0.8269104425708893], "rank_score": 0.8781575239916768} -{"id": "cahill-etal-2007-pruning", "title": "Pruning the Search Space of a Hand-Crafted Parsing System with a Probabilistic Parser", "abstract": "The demand for deep linguistic analysis for huge volumes of data means that it is increasingly important that the time taken to parse such data is minimized. In the XLE parsing model which is a hand-crafted, unification-based parsing system, most of the time is spent on unification, searching for valid f-structures (dependency attribute-value matrices) within the space of the many valid c-structures (phrase structure trees). We carried out an experiment to determine whether pruning the search space at an earlier stage of the parsing process results in an improvement in the overall time taken to parse, while maintaining the quality of the f-structures produced. We retrained a state-of-the-art probabilistic parser and used it to pre-bracket input to the XLE, constraining the valid c-structure space for each sentence. We evaluated against the PARC 700 Dependency Bank and show that it is possible to decrease the time taken to parse by ~18% while maintaining accuracy.", "phrases": ["search space", "probabilistic parser", "c-structure"], "overall_score": 1.217123391388644, "scores": [0.9254149306939066, 0.83848728735857, 0.8700046033070538], "rank_score": 0.8779689404531767} -{"id": "yang-etal-2018-unsupervised", "title": "Unsupervised Neural Machine Translation with Weight Sharing", "abstract": "Unsupervised neural machine translation (NMT) is a recently proposed approach for machine translation which aims to train the model without using any labeled data. The models proposed for unsupervised NMT often use only one shared encoder to map the pairs of sentences from different languages to a shared-latent space, which is weak in keeping the unique and internal characteristics of each language, such as the style, terminology, and sentence structure. To address this issue, we introduce an extension by utilizing two independent encoders but sharing some partial weights which are responsible for extracting high-level representations of the input sentences. Besides, two different generative adversarial networks (GANs), namely the local GAN and global GAN, are proposed to enhance the cross-language translation. With this new approach, we achieve significant improvements on English-German, English-French and Chinese-to-English translation tasks.", "phrases": ["weight", "weakness", "independent encoder", "back-translation", "monolingual data"], "overall_score": 2.7526233172882777, "scores": [1.3506542378865762, 1.1058594721420307, 0.8649767340378904, 0.5458366060967434, 0.5221298069156444], "rank_score": 0.877891371415777} -{"id": "yuan-etal-2020-one", "title": "One Size Does Not Fit All: Generating and Evaluating Variable Number of Keyphrases", "abstract": "Different texts shall by nature correspond to different number of keyphrases. This desideratum is largely missing from existing neural keyphrase generation models. In this study, we address this problem from both modeling and evaluation perspectives. We first propose a recurrent generative model that generates multiple keyphrases as delimiter-separated sequences. Generation diversity is further enhanced with two novel techniques by manipulating decoder hidden states. In contrast to previous approaches, our model is capable of generating diverse keyphrases and controlling number of outputs. We further propose two evaluation metrics tailored towards the variable-number generation. We also introduce a new dataset StackEx that expands beyond the only existing genre (i.e., academic writing) in keyphrase generation tasks. With both previous and new evaluation metrics, our model outperforms strong baselines on all datasets.", "phrases": ["keyphrase", "generative model", "previous approach", "end"], "overall_score": 2.181061398722447, "scores": [1.5944469677489372, 0.8249580849576453, 0.5496550017543849, 0.5418346488205816], "rank_score": 0.8777236758203872} -{"id": "gu-etal-2020-token", "title": "Token-level Adaptive Training for Neural Machine Translation", "abstract": "There exists a token imbalance phenomenon in natural language as different tokens appear with different frequencies, which leads to different learning difficulties for tokens in Neural Machine Translation (NMT). The vanilla NMT model usually adopts trivial equal-weighted objectives for target tokens with different frequencies and tends to generate more high-frequency tokens and less low-frequency tokens compared with the golden token distribution. However, low-frequency tokens may carry critical semantic information that will affect the translation quality once they are neglected. In this paper, we explored target token-level adaptive objectives based on token frequencies to assign appropriate weights for each target token during training. We aimed that those meaningful but relatively low-frequency words could be assigned with larger weights in objectives to encourage the model to pay more attention to these tokens. Our method yields consistent improvements in translation quality on ZH-EN, EN-RO, and EN-DE translation tasks, especially on sentences that contain more low-frequency tokens where we can get 1.68, 1.02, and 0.52 BLEU increases compared with baseline, respectively. Further analyses show that our method can also improve the lexical diversity of translation.", "phrases": ["neural machine translation", "low-frequency token", "weight"], "overall_score": 1.7077324359737134, "scores": [0.9806642510923363, 1.125528618067134, 0.5266097350143729], "rank_score": 0.8776008680579478} -{"id": "yu-etal-2018-transition", "title": "Transition-based Neural RST Parsing with Implicit Syntax Features", "abstract": "Syntax has been a useful source of information for statistical RST discourse parsing. Under the neural setting, a common approach integrates syntax by a recursive neural network (RNN), requiring discrete output trees produced by a supervised syntax parser. In this paper, we propose an implicit syntax feature extraction approach, using hidden-layer vectors extracted from a neural syntax parser. In addition, we propose a simple transition-based model as the baseline, further enhancing it with dynamic oracle. Experiments on the standard dataset show that our baseline model with dynamic oracle is highly competitive. When implicit syntax features are integrated, we are able to obtain further improvements, better than using explicit Tree-RNN.", "phrases": ["implicit syntax feature", "neural syntax parser", "shift-reduce parser", "transition"], "overall_score": 2.433068651255525, "scores": [1.7989479460725197, 0.626382270963456, 0.552991377739895, 0.5318544825328744], "rank_score": 0.8775440193271862} -{"id": "rumshisky-batiukova-2008-polysemy", "title": "Polysemy in Verbs: Systematic Relations between Senses and their Effect on Annotation", "abstract": "Sense inventories for polysemous predicates are often comprised by a number of related senses. In this paper, we examine different types of relations within sense inventories and give a qualitative analysis of the effects they have on decisions made by the annotators and annotator error. We also discuss some common traps and pitfalls in design of sense inventories. We use the data set developed specifically for the task of annotating sense distinctions dependent predominantly on semantics of the arguments and only to a lesser extent on syntactic frame.", "phrases": ["annotator", "sense inventory", "polysemy"], "overall_score": 1.4123035449335877, "scores": [0.9268679305952676, 0.8628852918172162, 0.8427873695963719], "rank_score": 0.8775135306696186} -{"id": "hermann-blunsom-2013-role", "title": "The Role of Syntax in Vector Space Models of Compositional Semantics", "abstract": "Modelling the compositional process by which the meaning of an utterance arises from the meaning of its parts is a fundamental task of Natural Language Processing. In this paper we draw upon recent advances in the learning of vector space representations of sentential semantics and the transparent interface between syntax and semantics provided by Combinatory Categorial Grammar to introduce Combinatory Categorial Autoencoders. This model leverages the CCG combinatory operators to guide a non-linear transformation of meaning within a sentence. We use this model to learn high dimensional embeddings for sentences and evaluate them in a range of tasks, demonstrating that the incorporation of syntax allows a concise model to learn representations that are both effective and general.", "phrases": ["syntax", "compositionality", "sentiment classification"], "overall_score": 2.671224273390151, "scores": [0.8589329654468293, 0.9203805373555765, 0.8528473644903872], "rank_score": 0.8773869557642643} -{"id": "hande-etal-2020-kancmd", "title": "KanCMD: Kannada CodeMixed Dataset for Sentiment Analysis and Offensive Language Detection", "abstract": "We introduce Kannada CodeMixed Dataset (KanCMD), a multi-task learning dataset for sentiment analysis and offensive language identification. The KanCMD dataset highlights two real-world issues from the social media text. First, it contains actual comments in code mixed text posted by users on YouTube social media, rather than in monolingual text from the textbook. Second, it has been annotated for two tasks, namely sentiment analysis and offensive language detection for under-resourced Kannada language. Hence, KanCMD is meant to stimulate research in under-resourced Kannada language on real-world code-mixed social media text and multi-task learning. KanCMD was obtained by crawling the YouTube, and a minimum of three annotators annotates each comment. We release KanCMD 7,671 comments for multitask learning research purpose.", "phrases": ["kannada codemixed dataset", "sentiment analysis", "offensive language detection"], "overall_score": 0.9637277213316555, "scores": [0.9269461394955286, 0.8960495957002949, 0.8086725920354876], "rank_score": 0.8772227757437703} -{"id": "lee-etal-2019-sumbt", "title": "SUMBT: Slot-Utterance Matching for Universal and Scalable Belief Tracking", "abstract": "In goal-oriented dialog systems, belief trackers estimate the probability distribution of slot-values at every dialog turn. Previous neural approaches have modeled domain- and slot-dependent belief trackers, and have difficulty in adding new slot-values, resulting in lack of flexibility of domain ontology configurations. In this paper, we propose a new approach to universal and scalable belief tracker, called slot-utterance matching belief tracker (SUMBT). The model learns the relations between domain-slot-types and slot-values appearing in utterances through attention mechanisms based on contextual semantic vectors. Furthermore, the model predicts slot-value labels in a non-parametric way. From our experiments on two dialog corpora, WOZ 2.0 and MultiWOZ, the proposed model showed performance improvement in comparison with slot-dependent methods and achieved the state-of-the-art joint accuracy.", "phrases": ["ontology", "matching belief tracker", "slot", "sumbt"], "overall_score": 2.375374573594687, "scores": [1.5310486527851395, 0.8527073631294242, 0.5969541994589904, 0.5279021030064817], "rank_score": 0.877153079595009} -{"id": "phan-ogunbona-2020-modelling", "title": "Modelling Context and Syntactical Features for Aspect-based Sentiment Analysis", "abstract": "The aspect-based sentiment analysis (ABSA) consists of two conceptual tasks, namely an aspect extraction and an aspect sentiment classification. Rather than considering the tasks separately, we build an end-to-end ABSA solution. Previous works in ABSA tasks did not fully leverage the importance of syntactical information. Hence, the aspect extraction model often failed to detect the boundaries of multi-word aspect terms. On the other hand, the aspect sentiment classifier was unable to account for the syntactical correlation between aspect terms and the context words. This paper explores the grammatical aspect of the sentence and employs the self-attention mechanism for syntactical learning. We combine part-of-speech embeddings, dependency-based embeddings and contextualized embeddings (e.g. BERT, RoBERTa) to enhance the performance of the aspect extractor. We also propose the syntactic relative distance to de-emphasize the adverse effects of unrelated words, having weak syntactic connection with the aspect terms. This increases the accuracy of the aspect sentiment classifier. Our solutions outperform the state-of-the-art models on SemEval-2014 dataset in both two subtasks.", "phrases": ["sentiment analysis", "aspect term", "syntactic relative distance"], "overall_score": 1.9272005535192973, "scores": [0.9329823544743039, 1.140635619458475, 0.5577023381525481], "rank_score": 0.877106770695109} -{"id": "sun-etal-2020-colake", "title": "CoLAKE: Contextualized Language and Knowledge Embedding", "abstract": "With the emerging branch of incorporating factual knowledge into pre-trained language models such as BERT, most existing models consider shallow, static, and separately pre-trained entity embeddings, which limits the performance gains of these models. Few works explore the potential of deep contextualized knowledge representation when injecting knowledge. In this paper, we propose the Contextualized Language and Knowledge Embedding (CoLAKE), which jointly learns contextualized representation for both language and knowledge with the extended MLM objective. Instead of injecting only entity embeddings, CoLAKE extracts the knowledge context of an entity from large-scale knowledge bases. To handle the heterogeneity of knowledge context and language context, we integrate them in a unified data structure, word-knowledge graph (WK graph). CoLAKE is pre-trained on large-scale WK graphs with the modified Transformer encoder. We conduct experiments on knowledge-driven tasks, knowledge probing tasks, and language understanding tasks. Experimental results show that CoLAKE outperforms previous counterparts on most of the tasks. Besides, CoLAKE achieves surprisingly high performance on our synthetic task called word-knowledge graph completion, which shows the superiority of simultaneously contextualizing language and knowledge representation.", "phrases": ["contextualized language", "knowledge embedding", "colake"], "overall_score": 1.8234189536329741, "scores": [0.9439630823819367, 0.9063435478292498, 0.7803308516580484], "rank_score": 0.8768791606230782} -{"id": "tao-etal-2006-unsupervised", "title": "Unsupervised Named Entity Transliteration Using Temporal and Phonetic Correlation", "abstract": "In this paper we investigate unsupervised name transliteration using comparable corpora, corpora where texts in the two languages deal in some of the same topics --- and therefore share references to named entities --- but are not translations of each other. We present two distinct methods for transliteration, one approach using an unsupervised phonetic transliteration method, and the other using the temporal distribution of candidate pairs. Each of these approaches works quite well, but by combining the approaches one can achieve even better results. We believe that the novelty of our approach lies in the phonetic-based scoring method, which is based on a combination of carefully crafted phonetic features, and empirical results from the pronunciation errors of second-language learners of English. Unlike previous approaches to transliteration, this method can in principle work with any pair of languages in the absence of a training dictionary, provided one has an estimate of the pronunciation of words in text.", "phrases": ["transliteration", "comparable corpora", "phonetic mapping"], "overall_score": 2.248881768059877, "scores": [1.208883110347021, 0.8494740389010441, 0.5719658567611489], "rank_score": 0.8767743353364046} -{"id": "ramanathan-etal-2009-case", "title": "Case markers and Morphology: Addressing the crux of the fluency problem in English-Hindi SMT", "abstract": "We report in this paper our work on accurately generating case markers and suffixes in English-to-Hindi SMT. Hindi is a relatively free word-order language, and makes use of a comparatively richer set of case markers and morphological suffixes for correct meaning representation. From our experience of large-scale English-Hindi MT, we are convinced that fluency and fidelity in the Hindi output get an order of magnitude facelift if accurate case markers and suffixes are produced. Now, the moot question is: what entity on the English side encodes the information contained in case markers and suffixes on the Hindi side? Our studies of correspondences in the two languages show that case markers and suffixes in Hindi are predominantly determined by the combination of suffixes and semantic relations on the English side. We, therefore, augment the aligned corpus of the two languages, with the correspondence of English suffixes and semantic relations with Hindi suffixes and case markers. Our results on 400 test sentences, translated using an SMT system trained on around 13000 parallel sentences, show that suffix + semantic relation \u2192 case marker/suffix is a very useful translation factor, in the sense of making a significant difference to output quality as indicated by subjective evaluation as well as BLEU scores.", "phrases": ["fluency", "suffix", "semantic relation", "english-hindi smt system"], "overall_score": 2.10225236743608, "scores": [0.9015472766334603, 0.8791841873974445, 0.8666735138626223, 0.8594243507830794], "rank_score": 0.8767073321691516} -{"id": "rosa-etal-2013-deepfix", "title": "Deepfix: Statistical Post-editing of Statistical Machine Translation Using Deep Syntactic Analysis", "abstract": "Deepfix is a statistical post-editing system for improving the quality of statistical machine translation outputs. It attempts to correct errors in verb-noun valency using deep syntactic analysis and a simple probabilistic model of valency. On the English-to-Czech translation pair, we show that statistical post-editing of statistical machine translation leads to an improvement of the translation quality when helped by deep linguistic knowledge.", "phrases": ["post-editing", "statistical machine translation", "deepfix"], "overall_score": 1.4107356891848821, "scores": [0.9297382963903431, 0.8729251078476309, 0.8269546971238187], "rank_score": 0.8765393671205977} -{"id": "dellorletta-etal-2011-read", "title": "READ\u2013IT: Assessing Readability of Italian Texts with a View to Text Simplification", "abstract": "In this paper, we propose a new approach to readability assessment with a specific view to the task of text simplification: the intended audience includes people with low literacy skills and/or with mild cognitive impairment. READ-IT represents the first advanced readability assessment tool for what concerns Italian, which combines traditional raw text features with lexical, morpho-syntactic and syntactic information. In READ-IT readability assessment is carried out with respect to both documents and sentences where the latter represents an important novelty of the proposed approach creating the prerequisites for aligning the readability assessment step with the text simplification process. READ-IT shows a high accuracy in the document classification task and promising results in the sentence classification scenario.", "phrases": ["text simplification", "read-it", "readability assessment tool"], "overall_score": 2.177889639622075, "scores": [0.8677189483443665, 0.887165181725876, 0.8744576681110466], "rank_score": 0.8764472660604298} -{"id": "fritzinger-fraser-2010-avoid", "title": "How to Avoid Burning Ducks: Combining Linguistic Analysis and Corpus Statistics for German Compound Processing", "abstract": "Compound splitting is an important problem in many Nlp applications which must be solved in order to address issues of data sparsity. Previous work has shown that linguistic approaches for German compound splitting produce a correct splitting more often, but corpus-driven approaches work best for phrase-based statistical machine translation from German to English, a worrisome contradiction. We address this situation by combining linguistic analysis with corpus-driven statistics and obtaining better results in terms of both producing splittings according to a gold standard and statistical machine translation performance.", "phrases": ["linguistic analysis", "statistic", "compound splitting"], "overall_score": 2.0180541570570774, "scores": [0.8988576174750356, 1.2092403122434976, 0.5211914240568878], "rank_score": 0.876429784591807} -{"id": "li-etal-2020-graph-tree", "title": "Graph-to-Tree Neural Networks for Learning Structured Input-Output Translation with Applications to Semantic Parsing and Math Word Problem", "abstract": "The celebrated Seq2Seq technique and its numerous variants achieve excellent performance on many tasks such as neural machine translation, semantic parsing, and math word problem solving. However, these models either only consider input objects as sequences while ignoring the important structural information for encoding, or they simply treat output objects as sequence outputs instead of structural objects for decoding. In this paper, we present a novel Graph-to-Tree Neural Networks, namely Graph2Tree consisting of a graph encoder and a hierarchical tree decoder, that encodes an augmented graph-structured input and decodes a tree-structured output. In particular, we investigated our model for solving two problems, neural semantic parsing and math word problem. Our extensive experiments demonstrate that our Graph2Tree model outperforms or matches the performance of other state-of-the-art models on these tasks.", "phrases": ["semantic parsing", "math word problem", "graph-to-tree neural networks"], "overall_score": 1.7052840101458562, "scores": [0.9403972027297866, 0.8580595914124575, 0.8305710841085463], "rank_score": 0.8763426260835967} -{"id": "castillo-estrella-2012-semantic", "title": "Semantic Textual Similarity for MT evaluation", "abstract": "This paper describes the system used for our participation in the WMT12 Machine Translation evaluation shared task. \n \nWe also present a new approach to Machine Translation evaluation based on the recently defined task Semantic Textual Similarity. This problem is addressed using a textual entailment engine entirely based on WordNet semantic features. \n \nWe described results for the Spanish-English, Czech-English and German-English language pairs according to our submission on the Eight Workshop on Statistical Machine Translation. Our first experiments reports a competitive score to system level.", "phrases": ["semantic textual similarity", "pipeline", "sagan"], "overall_score": 1.5701773947515598, "scores": [0.9500645540738458, 0.85372410827988, 0.8252094063904687], "rank_score": 0.8763326895813982} -{"id": "gregoire-2007-design", "title": "Design and Implementation of a Lexicon of Dutch Multiword Expressions", "abstract": "This paper describes the design and implementation of a lexicon of Dutch multiword expressions (MWEs). No exhaustive research on a standard lexical representation of MWEs has been done for Dutch before. The approach taken is innovative, since it is based on the Equivalence Class Method. Furthermore, the selection of the lexical entries and their properties is corpus-based. The design of the lexicon and the standard representation will be tested in Dutch NLP systems. The purpose of the current paper is to give an overview of the decisions made in order to come to a standard lexical representation and to discuss the description fields this representation comprises.", "phrases": ["implementation", "syntactic fixedness", "idiomatic expression"], "overall_score": 2.312125560881675, "scores": [0.8235187731482511, 1.2399398228617766, 0.5648953259778134], "rank_score": 0.8761179739959472} -{"id": "wu-etal-2021-one", "title": "One Teacher is Enough? Pre-trained Language Model Distillation from Multiple Teachers", "abstract": "Pre-trained language models (PLMs) achieve great success in NLP. However, their huge model sizes hinder their applications in many practical systems. Knowledge distillation is a popular technique to compress PLMs, which learns a small student model from a large teacher PLM. However, the knowledge learned from a single teacher may be limited and even biased, resulting in low-quality student model. In this paper, we propose a multi-teacher knowledge distillation framework named MT-BERT for pre-trained language model compression, which can train high-quality student model from multiple teacher PLMs. In MT-BERT we design a multi-teacher co-finetuning method to jointly finetune multiple teacher PLMs in downstream tasks with shared pooling and prediction layers to align their output space for better collaborative teaching. In addition, we propose a multi-teacher hidden loss and a multi-teacher distillation loss to transfer the useful knowledge in both hidden states and soft labels from multiple teacher PLMs to the student model. Experiments on three benchmark datasets validate the effectiveness of MT-BERT in compressing PLMs.", "phrases": ["teacher", "plm", "knowledge distillation framework", "language model compression"], "overall_score": 1.4093069519251316, "scores": [1.8350189234750178, 0.5773475628133877, 0.5641267882268077, 0.5261132964800173], "rank_score": 0.8756516427488077} -{"id": "deri-knight-2015-make", "title": "How to Make a Frenemy: Multitape FSTs for Portmanteau Generation", "abstract": "A portmanteau is a type of compound word that fuses the sounds and meanings of two component words; for example, \u201cfrenemy\u201d (friend + enemy) or \u201csmog\u201d (smoke + fog). We develop a system, including a novel multitape FST, that takes an input of two words and outputs possible portmanteaux. Our system is trained on a list of known portmanteaux and their component words, and achieves 45% exact matches in cross-validated experiments.", "phrases": ["frenemy", "multitape fst", "portmanteau"], "overall_score": 1.8204961236278363, "scores": [0.8732297631199627, 0.8403796700672607, 0.9128112963282377], "rank_score": 0.8754735765051537} -{"id": "dusek-etal-2019-automatic", "title": "Automatic Quality Estimation for Natural Language Generation: Ranting (Jointly Rating and Ranking)", "abstract": "We present a recurrent neural network based system for automatic quality estimation of natural language generation (NLG) outputs, which jointly learns to assign numerical ratings to individual outputs and to provide pairwise rankings of two different outputs. The latter is trained using pairwise hinge loss over scores from two copies of the rating network. We use learning to rank and synthetic data to improve the quality of ratings assigned by our system: We synthesise training pairs of distorted system outputs and train the system to rank the less distorted one higher. This leads to a 12% increase in correlation with human ratings over the previous benchmark. We also establish the state of the art on the dataset of relative rankings from the E2E NLG Challenge (Dusek et al., 2019), where synthetic data lead to a 4% accuracy increase over the base model.", "phrases": ["natural language generation", "ranking", "automatic quality estimation"], "overall_score": 0.9615954398202622, "scores": [0.9059561596869906, 0.8682732895589734, 0.8516162191637026], "rank_score": 0.8752818894698889} -{"id": "simon-etal-2013-leveraging", "title": "Leveraging Lexical Cohesion and Disruption for Topic Segmentation", "abstract": "Topic segmentation classically relies on one of two criteria, either finding areas with coherent vocabulary use or detecting discontinuities. In this paper, we propose a segmentation criterion combining both lexical cohesion and disruption, enabling a trade-off between the two. We provide the mathematical formulation of the criterion and an efficient graph based decoding algorithm for topic segmentation. Experimental results on standard textual data sets and on a more challenging corpus of automatically transcribed broadcast news shows demonstrate the benefit of such a combination. Gains were observed in all conditions, with segments of either regular or varying length and abrupt or smooth topic shifts. Long segments benefit more than short segments. However the algorithm has proven robust on automatic transcripts with short segments and limited vocabulary reoccurrences.", "phrases": ["lexical cohesion", "disruption", "topic segmentation"], "overall_score": 0.9615400880108376, "scores": [0.9206702160966191, 0.9159877835474791, 0.7890365186009595], "rank_score": 0.8752315060816859} -{"id": "gu-etal-2020-train", "title": "Train No Evil: Selective Masking for Task-Guided Pre-Training", "abstract": "Recently, pre-trained language models mostly follow the pre-train-then-fine-tuning paradigm and have achieved great performance on various downstream tasks. However, since the pre-training stage is typically task-agnostic and the fine-tuning stage usually suffers from insufficient supervised data, the models cannot always well capture the domain-specific and task-specific patterns. In this paper, we propose a three-stage framework by adding a task-guided pre-training stage with selective masking between general pre-training and fine-tuning. In this stage, the model is trained by masked language modeling on in-domain unsupervised data to learn domain-specific patterns and we propose a novel selective masking strategy to learn task-specific patterns. Specifically, we design a method to measure the importance of each token in sequences and selectively mask the important tokens. Experimental results on two sentiment analysis tasks show that our method can achieve comparable or even better performance with less than 50% of computation cost, which indicates our method is both effective and efficient. The source code of this paper can be obtained from .", "phrases": ["selective masking", "pre-training", "downstream task", "important token", "plm"], "overall_score": 1.7031021043911838, "scores": [0.848748766061426, 1.2797573130479225, 0.88423746383595, 0.8364658301274015, 0.5268973685926173], "rank_score": 0.8752213483330635} -{"id": "yin-etal-2020-robustness", "title": "On the Robustness of Language Encoders against Grammatical Errors", "abstract": "We conduct a thorough study to diagnose the behaviors of pre-trained language encoders (ELMo, BERT, and RoBERTa) when confronted with natural grammatical errors. Specifically, we collect real grammatical errors from non-native speakers and conduct adversarial attacks to simulate these errors on clean text data. We use this approach to facilitate debugging models on downstream applications. Results confirm that the performance of all tested models is affected but the degree of impact varies. To interpret model behaviors, we further design a linguistic acceptability task to reveal their abilities in identifying ungrammatical sentences and the position of errors. We find that fixed contextual encoders with a simple classifier trained on the prediction of sentence correctness are able to locate error positions. We also design a cloze test for BERT and discover that BERT captures the interaction between errors and specific tokens in context. Our results shed light on understanding the robustness and behaviors of language encoders against grammatical errors.", "phrases": ["robustness", "language encoder", "grammatical error"], "overall_score": 0.9614681465447138, "scores": [0.9091560316014696, 0.9003456936343583, 0.8159963411757692], "rank_score": 0.875166022137199} -{"id": "purandare-pedersen-2004-word", "title": "Word Sense Discrimination by Clustering Contexts in Vector and Similarity Spaces", "abstract": "This paper systematically compares unsupervised word sense discrimination techniques that cluster instances of a target word that occur in raw text using both vector and similarity spaces. The context of each instance is represented as a vector in a high dimensional feature space. Discrimination is achieved by clustering these context vectors directly in vector space and also by finding pairwise similarities among the vectors and then clustering in similarity space. We employ two different representations of the context in which a target word occurs. First order context vectors represent the context of each instance of a target word as a vector of features that occur in that context. Second order context vectors are an indirect representation of the context based on the average of vectors that represent the words that occur in the context. We evaluate the discriminated clusters by carrying out experiments using sense\u2013tagged instances of 24 SENSEVAL2 words and the well known Line, Hard and Serve sense\u2013tagged corpora.", "phrases": ["cluster", "word sense discrimination", "probability distribution"], "overall_score": 2.426443460504635, "scores": [1.5120754005821775, 0.5755667300416778, 0.5378213299767642], "rank_score": 0.8751544868668732} -{"id": "sagae-tsujii-2008-shift", "title": "Shift-Reduce Dependency DAG Parsing", "abstract": "Most data-driven dependency parsing approaches assume that sentence structure is represented as trees. Although trees have several desirable properties from both computational and linguistic perspectives, the structure of linguistic phenomena that goes beyond shallow syntax often cannot be fully captured by tree representations. We present a parsing approach that is nearly as simple as current data-driven transition-based dependency parsing frameworks, but outputs directed acyclic graphs (DAGs). We demonstrate the benefits of DAG parsing in two experiments where its advantages over dependency tree parsing can be clearly observed: predicate-argument analysis of English and syntactic analysis of Danish with a representation that includes long-distance dependencies and anaphoric reference links.", "phrases": ["dag", "dependency parsing", "predicate-argument analysis", "long-distance dependency"], "overall_score": 2.3697128879348446, "scores": [1.4117893898212843, 1.0029234538864564, 0.5481886571753776, 0.5373480690404258], "rank_score": 0.875062392480886} -{"id": "tebbifakhr-etal-2020-automatic", "title": "Automatic Translation for Multiple NLP tasks: a Multi-task Approach to Machine-oriented NMT Adaptation", "abstract": "Although machine translation (MT) traditionally pursues \u201chuman-oriented\u201d objectives, humans are not the only possible consumers of MT output. For instance, when automatic translations are used to feed downstream Natural Language Processing (NLP) components in cross-lingual settings, they should ideally pursue \u201cmachine-oriented\u201d objectives that maximize the performance of these components. Tebbifakhr et al. (2019) recently proposed a reinforcement learning approach to adapt a generic neural MT(NMT) system by exploiting the reward from a downstream sentiment classifier. But what if the downstream NLP tasks to serve are more than one? How to avoid the costs of adapting and maintaining one dedicated NMT system for each task? We address this problem by proposing a multi-task approach to machine-oriented NMT adaptation, which is capable to serve multiple downstream tasks with a single system. Through experiments with Spanish and Italian data covering three different tasks, we show that our approach can outperform a generic NMT system, and compete with single-task models in most of the settings.", "phrases": ["multi-task approach", "machine-oriented nmt adaptation", "automatic translation"], "overall_score": 1.2128639770277958, "scores": [0.9028766997322635, 0.8701797399541611, 0.8516328277098753], "rank_score": 0.8748964224654333} -{"id": "fath-etal-2020-fintan", "title": "Fintan - Flexible, Integrated Transformation and Annotation eNgineering", "abstract": "We introduce the Flexible and Integrated Transformation and Annotation eNgeneering (Fintan) platform for converting heterogeneous linguistic resources to RDF. With its modular architecture, workflow management and visualization features, Fintan facilitates the development of complex transformation pipelines by integrating generic RDF converters and augmenting them with extended graph processing capabilities: Existing converters can be easily deployed to the system by means of an ontological data structure which renders their properties and the dependencies between transformation steps. Development of subsequent graph transformation steps for resource transformation, annotation engineering or entity linking is further facilitated by a novel visual rendering of SPARQL queries. A graphical workflow manager allows to easily manage the converter modules and combine them to new transformation pipelines. Employing the stream-based graph processing approach first implemented with CoNLL-RDF, we address common challenges and scalability issues when transforming resources and showcase the performance of Fintan by means of a purely graph-based transformation of the Universal Morphology data to RDF.", "phrases": ["flexible", "integrated transformation", "annotation engineering"], "overall_score": 0.9608227612407311, "scores": [0.9397572779246405, 0.8738685724257389, 0.810109851001297], "rank_score": 0.8745785671172254} -{"id": "tan-bond-2011-building", "title": "Building and Annotating the Linguistically Diverse NTU-MC (NTU-Multilingual Corpus)", "abstract": "The NTU-MC compilation taps on the linguistic diversity of multilingual texts available within Singapore. The current version of NTU-MC contains 375,000 words (15,000 sentences) in 6 languages (English, Chinese, Japanese, Korean, Indonesian and Vietnamese) from 6 language families (Indo-European, Sino-Tibetan, Japonic, Korean as a language isolate, Austronesian and Austro-Asiatic). The NTU-MC is annotated with a layer of monolingual annotation (POS tags) and cross-lingual annotation (sentence-level alignments). The diverse language data and cross-lingual annotations provide valuable information on linguistic diversity for traditional linguistic research as well as natural language processing tasks. This paper describes the corpus compilation process with the evaluation of the monolingual and cross-lingual annotations of the corpus data. The corpus is available under the Creative Commons - Attribute 3.0 Unported license (CC by).", "phrases": ["ntu-mc", "language family", "indo-european"], "overall_score": 1.921640319300756, "scores": [1.2208324904295886, 0.8675082179543889, 0.5353878887589257], "rank_score": 0.8745761990476345} -{"id": "zoph-knight-2016-multi", "title": "Multi-Source Neural Translation", "abstract": "We build a multi-source machine translation model and train it to maximize the probability of a target English string given French and German sources. Using the neural encoder-decoder framework, we explore several combination methods and report up to +4.8 Bleu increases on top of a very strong attention-based neural translation model.", "phrases": ["machine translation", "multi-source nmt", "well performance"], "overall_score": 2.9135073807404814, "scores": [1.4811267742943617, 0.5740256056901021, 0.5678925488401109], "rank_score": 0.8743483096081915} -{"id": "chen-etal-2020-logical", "title": "Logical Natural Language Generation from Open-Domain Tables", "abstract": "Neural natural language generation (NLG) models have recently shown remarkable progress in fluency and coherence. However, existing studies on neural NLG are primarily focused on surface-level realizations with limited emphasis on logical inference, an important aspect of human thinking and language. In this paper, we suggest a new NLG task where a model is tasked with generating natural language statements that can be logically entailed by the facts in an open-domain semi-structured table. To facilitate the study of the proposed logical NLG problem, we use the existing TabFact dataset~(CITATION) featured with a wide range of logical/symbolic inferences as our testbed, and propose new automatic metrics to evaluate the fidelity of generation models w.r.t. logical inference. The new task poses challenges to the existing monotonic generation frameworks due to the mismatch between sequence order and logical order. In our experiments, we comprehensively survey different generation architectures (LSTM, Transformer, Pre-Trained LM) trained with different algorithms (RL, Adversarial Training, Coarse-to-Fine) on the dataset and made following observations: 1) Pre-Trained LM can significantly boost both the fluency and logical fidelity metrics, 2) RL and Adversarial Training are trading fluency for fidelity, 3) Coarse-to-Fine generation can help partially alleviate the fidelity issue while maintaining high language fluency. The code and data are available at .", "phrases": ["table", "logicnlg", "factual correctness rate"], "overall_score": 2.1724575952061014, "scores": [1.5063994603996307, 0.5682001374626373, 0.5481841538631068], "rank_score": 0.8742612505751249} -{"id": "de-marneffe-etal-2006-generating", "title": "Generating Typed Dependency Parses from Phrase Structure Parses", "abstract": "This paper describes a system for extracting typed dependency parses of English sentences from phrase structure parses. In order to capture inherent relations occurring in corpus texts that can be critical in real-world applications, many NP relations are included in the set of grammatical relations used. We provide a comparison of our system with Minipar and the Link parser. The typed dependency extraction facility described here is integrated in the Stanford Parser, available for download.", "phrases": ["stanford parser", "dependency relation", "formalism", "modifier"], "overall_score": 3.3836431403778353, "scores": [1.8078758229321468, 0.6097280719461925, 0.5526996126825033, 0.5259170512205141], "rank_score": 0.8740551396953392} -{"id": "knowles-koehn-2016-neural", "title": "Neural Interactive Translation Prediction", "abstract": "We present an interactive translation prediction method based on neural machine translation. Even with the same translation quality of the underlying machine translation systems, the neural prediction method yields much higher word prediction accuracy (61.6% vs. 43.3%) than the traditional method based on search graphs, mainly due to better recovery from errors. We also develop efficient means to enable practical deployment.", "phrases": ["translator", "neural machine translation", "decoding"], "overall_score": 2.366206760439457, "scores": [1.1062968305245782, 0.8647108755133299, 0.6502953548983864], "rank_score": 0.8737676869787648} -{"id": "saers-wu-2011-linear", "title": "Linear Transduction Grammars and Zipper Finite-State Transducers", "abstract": "We examine how the recently explored class of linear transductions relates to finite-state models. Linear transductions have been neglected historically, but gainined recent interest in statistical machine translation modeling, due to empirical studies demonstrating that their attractive balance of generative capacity and complexity characteristics lead to improved accuracy and speed in learning alignment and translation models. Such work has until now characterized the class of linear transductions in terms of either (a) linear inversion transduction grammars (LITGs) which are linearized restrictions of inversion transduction grammars or (b) linear transduction grammars (LTGs) which are bilingualized generalizations of linear grammars. In this paper, we offer a new alternative characterization of linear transductions, as relating four finite-state languages to each other. We introduce the devices of zipper finite-state automata (ZFSAs) and zipper finite-state transducers (ZFSTs) in order to construct the bridge between linear transductions and finite-state models.", "phrases": ["zipper", "transducer", "linear transduction grammars"], "overall_score": 0.9598207096649503, "scores": [0.954085743385439, 0.8414488142410336, 0.8254648237710669], "rank_score": 0.8736664604658465} -{"id": "lu-roth-2012-automatic", "title": "Automatic Event Extraction with Structured Preference Modeling", "abstract": "This paper presents a novel sequence labeling model based on the latent-variable semi-Markov conditional random fields for jointly extracting argument roles of events from texts. The model takes in coarse mention and type information and predicts argument roles for a given event template. \n \nThis paper addresses the event extraction problem in a primarily unsupervised setting, where no labeled training instances are available. Our key contribution is a novel learning framework called structured preference modeling (PM), that allows arbitrary preference to be assigned to certain structures during the learning procedure. We establish and discuss connections between this framework and other existing works. We show empirically that the structured preferences are crucial to the success of our task. Our model, trained without annotated data and with a small number of structured preferences, yields performance competitive to some baseline supervised approaches.", "phrases": ["event extraction", "conditional random field", "annotated data"], "overall_score": 1.7000342604451264, "scores": [1.5248041959754508, 0.5555609098808502, 0.5405692593873055], "rank_score": 0.8736447884145355} -{"id": "pustejovsky-etal-2019-modeling", "title": "Modeling Quantification and Scope in Abstract Meaning Representations", "abstract": "In this paper, we propose an extension to Abstract Meaning Representations (AMRs) to encode scope information of quantifiers and negation, in a way that overcomes the semantic gaps of the schema while maintaining its cognitive simplicity. Specifically, we address three phenomena not previously part of the AMR specification: quantification, negation (generally), and modality. The resulting representation, which we call \u201cUniform Meaning Representation\u201d (UMR), adopts the predicative core of AMR and embeds it under a \u201cscope\u201d graph when appropriate. UMR representations differ from other treatments of quantification and modal scope phenomena in two ways: (a) they are more transparent; and (b) they specify default scope when possible.`", "phrases": ["quantification", "scope", "abstract meaning representations"], "overall_score": 0.9595666904755741, "scores": [0.9752178873914401, 0.8282795385420264, 0.8168083007726145], "rank_score": 0.8734352422353603} -{"id": "mayn-etal-2021-familiar", "title": "Familiar words but strange voices: Modelling the influence of speech variability on word recognition", "abstract": "We present a deep neural model of spoken word recognition which is trained to retrieve the meaning of a word (in the form of a word embedding) given its spoken form, a task which resembles that faced by a human listener. Furthermore, we investigate the influence of variability in speech signals on the model's performance. To this end, we conduct of set of controlled experiments using word-aligned read speech data in German. Our experiments show that (1) the model is more sensitive to dialectical variation than gender variation, and (2) recognition performance of word cognates from related languages reflect the degree of relatedness between languages in our study. Our work highlights the feasibility of modeling human speech perception using deep neural networks.", "phrases": ["influence", "speech variability", "word recognition"], "overall_score": 0.959444231121882, "scores": [0.9303864634963882, 0.8811788736013957, 0.8084059876861037], "rank_score": 0.8733237749279624} -{"id": "xuan-bach-etal-2012-reranking", "title": "A Reranking Model for Discourse Segmentation using Subtree Features", "abstract": "This paper presents a discriminative reranking model for the discourse segmentation task, the first step in a discourse parsing system. Our model exploits subtree features to rerank N-best outputs of a base segmenter, which uses syntactic and lexical features in a CRF framework. Experimental results on the RST Discourse Treebank corpus show that our model outperforms existing discourse segmenters in both settings that use gold standard Penn Treebank parse trees and Stanford parse trees.", "phrases": ["reranking model", "discourse segmentation", "subtree feature"], "overall_score": 1.4054425000609676, "scores": [0.898293422682662, 0.8653517347372613, 0.8561064139881124], "rank_score": 0.8732505238026785} -{"id": "lyu-etal-2004-toward", "title": "Toward Constructing A Multilingual Speech Corpus for Taiwanese (Min-nan), Hakka, and Mandarin", "abstract": "The Formosa speech database (ForSDat) is a multilingual speech corpus collected at Chang Gung University and sponsored by the National Science Council of Taiwan. It is expected that a multilingual speech corpus will be collected, covering the three most frequently used languages in Taiwan: Taiwanese (Min-nan), Hakka, and Mandarin. This 3-year project has the goal of collecting a phonetically abundant speech corpus of more than 1,800 speakers and hundreds of hours of speech. Recently, the first version of this corpus containing speech of 600 speakers of Taiwanese and Mandarin was finished and is ready to be released. It contains about 49 hours of speech and 247,000 utterances.", "phrases": ["multilingual speech corpus", "taiwanese", "formosa speech database"], "overall_score": 1.210511518764162, "scores": [0.936616465504574, 0.8234154035399822, 0.8595665785454798], "rank_score": 0.873199482530012} -{"id": "carpuat-etal-2013-sensespotting", "title": "SenseSpotting: Never let your parallel data tie you to an old domain", "abstract": "Words often gain new senses in new domains. Being able to automatically identify, from a corpus of monolingual text, which word tokens are being used in a previously unseen sense has applications to machine translation and other tasks sensitive to lexical semantics. We define a task, SENSESPOTTING, in which we build systems to spot tokens that have new senses in new domain text. Instead of difficult and expensive annotation, we build a goldstandard by leveraging cheaply available parallel corpora, targeting our approach to the problem of domain adaptation for machine translation. Our system is able to achieve F-measures of as much as 80%, when applied to word types it has never seen before. Our approach is based on a large set of novel features that capture varied aspects of how words change when used in new domains.", "phrases": ["parallel data", "domain adaptation", "central interest"], "overall_score": 1.9185888649961973, "scores": [0.9165720573285092, 1.1430244407015213, 0.5599657690034697], "rank_score": 0.8731874223445001} -{"id": "iyyer-etal-2015-deep", "title": "Deep Unordered Composition Rivals Syntactic Methods for Text Classification", "abstract": "Many existing deep learning models for natural language processing tasks focus on learning the compositionality of their inputs, which requires many expensive computations. We present a simple deep neural network that competes with and, in some cases, outperforms such models on sentiment analysis and factoid question answering tasks while taking only a fraction of the training time. While our model is syntactically-ignorant, we show significant improvements over previous bag-of-words models by deepening our network and applying a novel variant of dropout. Moreover, our model performs better than syntactic models on datasets with high syntactic variance. We show that our model makes similar errors to syntactically-aware models, indicating that for the tasks we consider, nonlinearly transforming the input is more important than tailoring a network to incorporate word order and syntax.", "phrases": ["text classification", "compositionality", "sentiment analysis", "average", "neural architecture"], "overall_score": 3.1044126999756063, "scores": [1.50192202779259, 0.9066085419241817, 0.8383928460536985, 0.5801794025654133, 0.5387323211287735], "rank_score": 0.8731670278929314} -{"id": "che-etal-2013-named", "title": "Named Entity Recognition with Bilingual Constraints", "abstract": "Different languages contain complementary cues about entities, which can be used to improve Named Entity Recognition (NER) systems. We propose a method that formulates the problem of exploring such signals on unannotated bilingual text as a simple Integer Linear Program, which encourages entity tags to agree via bilingual constraints. Bilingual NER experiments on the large OntoNotes 4.0 Chinese-English corpus show that the proposed method can improve strong baselines for both Chinese and English. In particular, Chinese performance improves by over 5% absolute F1 score. We can then annotate a large amount of bilingual text (80k sentence pairs) using our method, and add it as uptraining data to the original monolingual NER training corpus. The Chinese model retrained on this new combined dataset outperforms the strong baseline by over 3% F1 score.", "phrases": ["entity recognition", "bilingual constraint", "cue", "ner performance"], "overall_score": 1.6989827876960668, "scores": [0.9479890435519451, 1.4464388790330431, 0.5495660096935491, 0.5484238209684495], "rank_score": 0.8731044383117468} -{"id": "abdul-mageed-etal-2012-samar", "title": "SAMAR: A System for Subjectivity and Sentiment Analysis of Arabic Social Media", "abstract": "In this work, we present SAMAR, a system for Subjectivity and Sentiment Analysis (SSA) for Arabic social media genres. We investigate: how to best represent lexical information; whether standard features are useful; how to treat Arabic dialects; and, whether genre specific features have a measurable impact on performance. Our results suggest that we need individualized solutions for each domain and task, but that lemmatization is a feature in all the best approaches.", "phrases": ["subjectivity", "sentiment analysis", "arabic social medium"], "overall_score": 1.564065320543556, "scores": [0.8889108955110268, 0.8160701500507382, 0.913783382485159], "rank_score": 0.8729214760156414} -{"id": "panicheva-etal-2010-personal", "title": "Personal Sense and Idiolect: Combining Authorship Attribution and Opinion Analysis", "abstract": "Subjectivity analysis and authorship attribution are very popular areas of research. However, work in these two areas has been done separately. We believe that by combining information about subjectivity in texts and authorship, the performance of both tasks can be improved. In the paper a personalized approach to opinion mining is presented, in which the notions of personal sense and idiolect are introduced; the approach is applied to the polarity classification task. It is assumed that different authors express their private states in text individually, and opinion mining results could be improved by analyzing texts by different authors separately. The hypothesis is tested on a corpus of movie reviews by ten authors. The results of applying the personalized approach to opinion mining are presented, confirming that the approach increases the performance of the opinion mining task. Automatic authorship attribution is further applied to model the personalized approach, classifying documents by their assumed authorship. Although the automatic authorship classification imposes a number of limitations on the dataset for further experiments, after overcoming these issues the authorship attribution technique modeling the personalized approach confirms the increase over the baseline with no authorship information used.", "phrases": ["idiolect", "authorship attribution", "personal sense"], "overall_score": 0.958967738054774, "scores": [0.9168521096425181, 0.8771007318497551, 0.8247173152489217], "rank_score": 0.872890052247065} -{"id": "xie-etal-2018-neural", "title": "Neural Cross-Lingual Named Entity Recognition with Minimal Resources", "abstract": "For languages with no annotated resources, unsupervised transfer of natural language processing models such as named-entity recognition (NER) from resource-rich languages would be an appealing capability. However, differences in words and word order across languages make it a challenging problem. To improve mapping of lexical items across languages, we propose a method that finds translations based on bilingual word embeddings. To improve robustness to word order differences, we propose to use self-attention, which allows for a degree of flexibility with respect to word order. We demonstrate that these methods achieve state-of-the-art or competitive NER performance on commonly tested languages under a cross-lingual setting, with much lower resource requirements than past approaches. We also evaluate the challenges of applying these methods to Uyghur, a low-resource language.", "phrases": ["entity recognition", "word embedding", "low-resource language", "self-attention layer"], "overall_score": 2.8438317526072683, "scores": [0.8898552896719429, 1.1523792663054728, 0.9058892832951738, 0.5432788486713586], "rank_score": 0.872850671985987} -{"id": "shibata-etal-2014-large", "title": "A Large Scale Database of Strongly-related Events in Japanese", "abstract": "The knowledge about the relation between events is quite useful for coreference resolution, anaphora resolution, and several NLP applications such as dialogue system. This paper presents a large scale database of strongly-related events in Japanese, which has been acquired with our proposed method (Shibata and Kurohashi, 2011). In languages, where omitted arguments or zero anaphora are often utilized, such as Japanese, the coreference-based event extraction methods are hard to be applied, and so our method extracts strongly-related events in a two-phrase construct. This method first calculates the co-occurrence measure between predicate-arguments (events), and regards an event pair, whose mutual information is high, as strongly-related events. To calculate the co-occurrence measure efficiently, we adopt an association rule mining method. Then, we identify the remaining arguments by using case frames. The database contains approximately 100,000 unique events, with approximately 340,000 strongly-related event pairs, which is much larger than an existing automatically-constructed event database. We evaluated randomly-chosen 100 event pairs, and the accuracy was approximately 68%.", "phrases": ["large scale database", "strongly-related event", "japanese"], "overall_score": 1.2097008516527434, "scores": [0.9641045390031747, 0.8432166328534291, 0.8105229576012488], "rank_score": 0.8726147098192842} -{"id": "schatzmann-etal-2007-agenda", "title": "Agenda-Based User Simulation for Bootstrapping a POMDP Dialogue System", "abstract": "This paper investigates the problem of bootstrapping a statistical dialogue manager without access to training data and proposes a new probabilistic agenda-based method for simulating user behaviour. In experiments with a statistical POMDP dialogue system, the simulator was realistic enough to successfully test the prototype system and train a dialogue policy. An extensive study with human subjects showed that the learned policy was highly competitive, with task completion rates above 90%.", "phrases": ["user simulation", "pomdp dialogue system", "dialogue management", "agenda-based user simulator", "human conversational data"], "overall_score": 2.8084615913342965, "scores": [1.8184038079140006, 0.8683081179281863, 0.601177273499123, 0.5487869366707174, 0.5258121116501693], "rank_score": 0.8724976495324392} -{"id": "wang-etal-2008-chinese", "title": "Chinese Word Sense Disambiguation with PageRank and HowNet", "abstract": "Word sense disambiguation is a basic problem in natural language processing. This paper proposed an unsupervised word sense disambiguation method based PageRank and HowNet. In the method, a free text is firstly represented as a sememe graph with sememes as vertices and relatedness of sememes as weighted edges based on HowNet. Then UW-PageRank is applied on the sememe graph to score the importance of sememes. Score of each definition of one word can be computed from the score of sememes it contains. Finally, the highest scored definition is assigned to the word. This approach is tested on SENSEVAL-3 and the experimental results prove practical and effective.", "phrases": ["word sense disambiguation", "pagerank", "hownet"], "overall_score": 0.9583907311853118, "scores": [0.9460385929038374, 0.8556222252894289, 0.815433695688076], "rank_score": 0.8723648379604474} -{"id": "chen-etal-2021-finqa", "title": "FinQA: A Dataset of Numerical Reasoning over Financial Data", "abstract": "The sheer volume of financial statements makes it difficult for humans to access and analyze a business's financials. Robust numerical reasoning likewise faces unique challenges in this domain. In this work, we focus on answering deep questions over financial data, aiming to automate the analysis of a large corpus of financial documents. In contrast to existing tasks on general domain, the finance domain includes complex numerical reasoning and understanding of heterogeneous representations. To facilitate analytical progress, we propose a new large-scale dataset, FinQA, with Question-Answering pairs over Financial reports, written by financial experts. We also annotate the gold reasoning programs to ensure full explainability. We further introduce baselines and conduct comprehensive experiments in our dataset. The results demonstrate that popular, large, pre-trained models fall far short of expert humans in acquiring finance knowledge and in complex multi-step numerical reasoning on that knowledge. Our dataset \u2013 the first of its kind \u2013 should therefore enable significant, new community research into complex application domains. The dataset and code are publicly available at .", "phrases": ["numerical reasoning", "financial data", "finqa"], "overall_score": 1.4037502049043096, "scores": [0.8724034259756844, 0.8722525856720116, 0.8719411134590865], "rank_score": 0.8721990417022609} -{"id": "torrens-urrutia-2018-approach", "title": "An Approach to Measuring Complexity with a Fuzzy Grammar & Degrees of Grammaticality", "abstract": "This paper presents an approach to evaluate complexity of a given natural language input by means of a Fuzzy Grammar with some fuzzy logic formulations. Usually, the approaches in linguistics has described a natural language grammar by means of discrete terms. However, a grammar can be explained in terms of degrees by following the concepts of linguistic gradience & fuzziness. Understanding a grammar as a fuzzy or gradient object allows us to establish degrees of grammaticality for every linguistic input. This shall be meaningful for linguistic complexity considering that the less grammatical an input is the more complex its processing will be. In this regard, the degree of complexity of a linguistic input (which is a linguistic representation of a natural language expression) depends on the chosen grammar. The bases of the fuzzy grammar are shown here. Some of these are described by Fuzzy Type Theory. The linguistic inputs are characterized by constraints through a Property Grammar.", "phrases": ["complexity", "fuzzy grammar", "grammaticality"], "overall_score": 1.2087764131915413, "scores": [0.9310173403622014, 0.8537171382823598, 0.8311091266379166], "rank_score": 0.8719478684274926} -{"id": "green-etal-2010-improved", "title": "Improved Models of Distortion Cost for Statistical Machine Translation", "abstract": "The distortion cost function used in Moses-style machine translation systems has two flaws. First, it does not estimate the future cost of known required moves, thus increasing search errors. Second, all distortion is penalized linearly, even when appropriate re-orderings are performed. Because the cost function does not effectively constrain search, translation quality decreases at higher distortion limits, which are often needed when translating between languages of different typologies such as Arabic and English. To address these problems, we introduce a method for estimating future linear distortion cost, and a new discriminative distortion model that predicts word movement during translation. In combination, these extensions give a statistically significant improvement over a baseline distortion parameterization. When we triple the distortion limit, our model achieves a +2.32 BLEU average gain over Moses.", "phrases": ["distortion cost", "word movement", "length", "jump"], "overall_score": 1.9152217851399602, "scores": [1.8060809433829565, 0.5704735664930958, 0.5702368740527393, 0.5398286091205451], "rank_score": 0.8716549982623341} -{"id": "arthur-etal-2015-semantic", "title": "Semantic Parsing of Ambiguous Input through Paraphrasing and Verification", "abstract": "We propose a new method for semantic parsing of ambiguous and ungrammatical input, such as search queries. We do so by building on an existing semantic parsing framework that uses synchronous context free grammars (SCFG) to jointly model the input sentence and output meaning representation. We generalize this SCFG framework to allow not one, but multiple outputs. Using this formalism, we construct a grammar that takes an ambiguous input string and jointly maps it into both a meaning representation and a natural language paraphrase that is less ambiguous than the original input. This paraphrase can be used to disambiguate the meaning representation via verification using a language model that calculates the probability of each paraphrase.", "phrases": ["ambiguous input", "verification", "semantic parsing"], "overall_score": 0.9575250733670716, "scores": [0.9342607787242901, 0.8849661179062654, 0.7955037501417909], "rank_score": 0.8715768822574489} -{"id": "fujii-etal-2006-test", "title": "Test Collections for Patent Retrieval and Patent Classification in the Fifth NTCIR Workshop", "abstract": "This paper describes the test collections produced for the Patent Retrieval Task in the Fifth NTCIR Workshop. We performed the invalidity search task, in which each participant group searches a patent collection for the patents that can invalidate the demand in an existing claim. For this purpose, we performed both document and passage retrieval tasks. We also performed the automatic patent classification task using the F-term classification system. The test collections will be available to the public for research purposes.", "phrases": ["fifth ntcir workshop", "patent retrieval task", "information retrieval"], "overall_score": 1.4026629460211875, "scores": [0.790908636404432, 1.289369942950674, 0.5342918899706944], "rank_score": 0.8715234897752668} -{"id": "vu-etal-2020-exploring", "title": "Exploring and Predicting Transferability across NLP Tasks", "abstract": "Recent advances in NLP demonstrate the effectiveness of training large-scale language models and transferring them to downstream tasks. Can fine-tuning these models on tasks other than language modeling further improve performance? In this paper, we conduct an extensive study of the transferability between 33 NLP tasks across three broad classes of problems (text classification, question answering, and sequence labeling). Our results show that transfer learning is more beneficial than previously thought, especially when target task data is scarce, and can improve performance even with low-data source tasks that differ substantially from the target task (e.g., part-of-speech tagging transfers well to the DROP QA dataset). We also develop task embeddings that can be used to predict the most transferable source tasks for a given target task, and we validate their effectiveness in experiments controlled for source and target data size. Overall, our experiments reveal that factors such as data size, task and domain similarity, and task complexity all play a role in determining transferability.", "phrases": ["transferability", "task data", "task embedding", "relatedness"], "overall_score": 2.4163105552331126, "scores": [1.1645979506248427, 0.8873040698861205, 0.8763856007908766, 0.5577116339806297], "rank_score": 0.8714998138206174} -{"id": "veale-2011-creative", "title": "Creative Language Retrieval: A Robust Hybrid of Information Retrieval and Linguistic Creativity", "abstract": "Information retrieval (IR) and figurative language processing (FLP) could scarcely be more different in their treatment of language and meaning. IR views language as an open-ended set of mostly stable signs with which texts can be indexed and retrieved, focusing more on a text's potential relevance than its potential meaning. In contrast, FLP views language as a system of unstable signs that can be used to talk about the world in creative new ways. There is another key difference: IR is practical, scalable and robust, and in daily use by millions of casual users. FLP is neither scalable nor robust, and not yet practical enough to migrate beyond the lab. This paper thus presents a mutually beneficial hybrid of IR and FLP, one that enriches IR with new operators to enable the non-literal retrieval of creative expressions, and which also transplants FLP into a robust, scalable framework in which practical applications of linguistic creativity can be implemented.", "phrases": ["hybrid", "information retrieval", "linguistic creativity"], "overall_score": 1.2079680987029602, "scores": [0.9213466425200327, 0.8804797653574626, 0.8122679704487505], "rank_score": 0.8713647927754153} -{"id": "choi-etal-2014-lexical", "title": "Lexical Acquisition for Opinion Inference: A Sense-Level Lexicon of Benefactive and Malefactive Events", "abstract": "Opinion inference arises when opinions are expressed toward states and events which positive or negatively affect entities, i.e., benefactive and malefactive events. This paper addresses creating a lexicon of such events, which would be helpful to infer opinions. Verbs may be ambiguous, in that some meanings may be benefactive and others may be malefactive or neither. Thus, we use WordNet to create a sense-level lexicon. We begin with seed senses culled from FrameNet and expand the lexicon using WordNet relationships. The evaluations show that the accuracy of the approach is well above baseline accuracy.", "phrases": ["opinion inference", "sense-level lexicon", "malefactive event"], "overall_score": 0.9572231688485089, "scores": [0.9009668408667793, 0.8821975104425913, 0.8307418794565012], "rank_score": 0.8713020769219573} -{"id": "majidi-crane-2013-active", "title": "Active Learning for Dependency Parsing by A Committee of Parsers", "abstract": "Data-driven dependency parsers need a large annotated corpus to learn how to generate dependency graph of a given sentence. But annotations on structured corpora are expensive to collect and requires a labor intensive task. Active learning is a machine learning approach that allows only informative examples to be selected for annotation and is usually used when the number of annotated data is abundant and acquisition of more labeled data is expensive. We will provide a novel framework in which a committee of dependency parsers collaborate to improve their ef\ufb01ciency using active learning techniques. Queries are made up only from uncertain tokens, and the annotations of the remaining tokens of selected sentences are voted among committee members.", "phrases": ["dependency parsing", "committee", "active learning"], "overall_score": 1.402148640253585, "scores": [0.9124808394453671, 0.8625366481331785, 0.8385943133258843], "rank_score": 0.87120393363481} -{"id": "rahimi-etal-2019-massively", "title": "Massively Multilingual Transfer for NER", "abstract": "In cross-lingual transfer, NLP models over one or more source languages are applied to a low-resource target language. While most prior work has used a single source model or a few carefully selected models, here we consider a \u201cmassive\u201d setting with many such models. This setting raises the problem of poor transfer, particularly from distant languages. We propose two techniques for modulating the transfer, suitable for zero-shot or few-shot learning, respectively. Evaluating on named entity recognition, we show that our techniques are much more effective than strong baselines, including standard ensembling, and our unsupervised method rivals oracle selection of the single best individual model.", "phrases": ["multilingual transfer", "single-source transfer", "inter alia"], "overall_score": 2.3588163721449122, "scores": [1.5241414813947083, 0.5463638241507475, 0.5426106232340526], "rank_score": 0.8710386429265028} -{"id": "che-etal-2010-ltp", "title": "LTP: A Chinese Language Technology Platform", "abstract": "LTP (Language Technology Platform) is an integrated Chinese processing platform which includes a suite of high performance natural language processing (NLP) modules and relevant corpora. Especially for the syntactic and semantic parsing modules, we achieved good results in some relevant evaluations, such as CoNLL and SemEval. Based on XML internal data representation, users can easily use these modules and corpora by invoking DLL (Dynamic Link Library) or Web service APIs (Application Program Interface), and view the processing results directly by the visualization tool.", "phrases": ["ltp", "word segmentation", "pos tagging"], "overall_score": 1.6947914625619271, "scores": [0.8426850354315287, 0.9132210196750788, 0.8569455147123314], "rank_score": 0.8709505232729797} -{"id": "laban-etal-2020-summary", "title": "The Summary Loop: Learning to Write Abstractive Summaries Without Examples", "abstract": "This work presents a new approach to unsupervised abstractive summarization based on maximizing a combination of coverage and fluency for a given length constraint. It introduces a novel method that encourages the inclusion of key terms from the original document into the summary: key terms are masked out of the original document and must be filled in by a coverage model using the current generated summary. A novel unsupervised training procedure leverages this coverage model along with a fluency model to generate and score summaries. When tested on popular news summarization datasets, the method outperforms previous unsupervised methods by more than 2 R-1 points, and approaches results of competitive supervised methods. Our model attains higher levels of abstraction with copied passages roughly two times shorter than prior work, and learns to compress and merge sentences without supervision.", "phrases": ["summarization", "fluency", "length constraint", "unsupervised method"], "overall_score": 2.0053147703215677, "scores": [1.5395350582704523, 0.8742064776430136, 0.5431022692525973, 0.526744751752911], "rank_score": 0.8708971392297435} -{"id": "yao-van-durme-2014-information", "title": "Information Extraction over Structured Data: Question Answering with Freebase", "abstract": "Answering natural language questions using the Freebase knowledge base has recently been explored as a platform for advancing the state of the art in open domain semantic parsing. Those efforts map questions to sophisticated meaning representations that are then attempted to be matched against viable answer candidates in the knowledge base. Here we show that relatively modest information extraction techniques, when paired with a webscale corpus, can outperform these sophisticated approaches by roughly 34% relative gain.", "phrases": ["natural language question", "knowledge base", "information extraction", "query", "topic graph"], "overall_score": 2.9616981947613477, "scores": [0.8843036971971616, 1.3877230191988763, 0.9616644372761067, 0.5670788953368152, 0.5531351532142258], "rank_score": 0.8707810404446372} -{"id": "kumar-etal-2018-aggression", "title": "Aggression-annotated Corpus of Hindi-English Code-mixed Data", "abstract": "As the interaction over the web has increased, incidents of aggression and related events like trolling, cyberbullying, flaming, hate speech, etc. too have increased manifold across the globe. While most of these behaviour like bullying or hate speech have predated the Internet, the reach and extent of the Internet has given these an unprecedented power and influence to affect the lives of billions of people. So it is of utmost significance and importance that some preventive measures be taken to provide safeguard to the people using the web such that the web remains a viable medium of communication and connection, in general. In this paper, we discuss the development of an aggression tagset and an annotated corpus of Hindi-English code-mixed data from two of the most popular social networking and social media platforms in India, Twitter and Facebook. The corpus is annotated using a hierarchical tagset of 3 top-level tags and 10 level 2 tags. The final dataset contains approximately 18k tweets and 21k facebook comments and is being released for further research in the field.", "phrases": ["hindi-english code-mixed data", "aggression", "facebook", "distinction"], "overall_score": 2.2977440496284074, "scores": [0.84989500363609, 1.5560194176298532, 0.5509178296705894, 0.5258416930408649], "rank_score": 0.8706684859943493} -{"id": "liu-etal-2019-original", "title": "Original Semantics-Oriented Attention and Deep Fusion Network for Sentence Matching", "abstract": "Sentence matching is a key issue in natural language inference and paraphrase identification. Despite the recent progress on multi-layered neural network with cross sentence attention, one sentence learns attention to the intermediate representations of another sentence, which are propagated from preceding layers and therefore are uncertain and unstable for matching, particularly at the risk of error propagation. In this paper, we present an original semantics-oriented attention and deep fusion network (OSOA-DFN) for sentence matching. Unlike existing models, each attention layer of OSOA-DFN is oriented to the original semantic representation of another sentence, which captures the relevant information from a fixed matching target. The multiple attention layers allow one sentence to repeatedly read the important information of another sentence for better matching. We then additionally design deep fusion to propagate the attention information at each matching layer. At last, we introduce a self-attention mechanism to capture global context to enhance attention-aware representation within each sentence. Experiment results on three sentence matching benchmark datasets SNLI, SciTail and Quora show that OSOA-DFN has the ability to model sentence matching more precisely.", "phrases": ["deep fusion network", "sentence matching", "original semantics-oriented attention"], "overall_score": 0.9564092544191524, "scores": [0.8947754475312263, 0.8849555826344394, 0.8319526300780524], "rank_score": 0.8705612200812394} -{"id": "li-etal-2020-hitrans", "title": "HiTrans: A Transformer-Based Context- and Speaker-Sensitive Model for Emotion Detection in Conversations", "abstract": "Emotion detection in conversations (EDC) is to detect the emotion for each utterance in conversations that have multiple speakers. Different from the traditional non-conversational emotion detection, the model for EDC should be context-sensitive (e.g., understanding the whole conversation rather than one utterance) and speaker-sensitive (e.g., understanding which utterance belongs to which speaker). In this paper, we propose a transformer-based context- and speaker-sensitive model for EDC, namely HiTrans, which consists of two hierarchical transformers. We utilize BERT as the low-level transformer to generate local utterance representations, and feed them into another high-level transformer so that utterance representations could be sensitive to the global context of the conversation. Moreover, we exploit an auxiliary task to make our model speaker-sensitive, called pairwise utterance speaker verification (PUSV), which aims to classify whether two utterances belong to the same speaker. We evaluate our model on three benchmark datasets, namely EmoryNLP, MELD and IEMOCAP. Results show that our model outperforms previous state-of-the-art models.", "phrases": ["speaker-sensitive model", "emotion detection", "conversation"], "overall_score": 0.956394956903803, "scores": [0.8949081299840461, 0.8616571388522728, 0.855079348929456], "rank_score": 0.8705482059219251} -{"id": "dreyer-etal-2008-latent", "title": "Latent-Variable Modeling of String Transductions with Finite-State Methods", "abstract": "String-to-string transduction is a central problem in computational linguistics and natural language processing. It occurs in tasks as diverse as name transliteration, spelling correction, pronunciation modeling and inflectional morphology. We present a conditional loglinear model for string-to-string transduction, which employs overlapping features over latent alignment sequences, and which learns latent classes and latent string pair regions from incomplete training data. We evaluate our approach on morphological tasks and demonstrate that latent variables can dramatically improve results, even when trained on small data sets. On the task of generating morphological forms, we outperform a baseline method reducing the error rate by up to 48%. On a lemmatization task, we reduce the error rates in Wicentowski (2002) by 38--92%.", "phrases": ["string transduction", "latent variable", "lemmatization", "finite-state machine"], "overall_score": 2.4135939467570706, "scores": [0.9157926951021373, 1.0868888767111755, 0.8736860890482643, 0.6057123568444697], "rank_score": 0.8705200044265117} -{"id": "bonial-etal-2020-infoforager", "title": "InfoForager: Leveraging Semantic Search with AMR for COVID-19 Research", "abstract": "This paper examines how Abstract Meaning Representation (AMR) can be utilized for finding answers to research questions in medical scientific documents, in particular, to advance the study of UV (ultraviolet) inactivation of the novel coronavirus that causes the disease COVID-19. We describe the development of a proof-of-concept prototype tool, InfoForager, which uses AMR to conduct a semantic search, targeting the meaning of the user question, and matching this to sentences in medical documents that may contain information to answer that question. This work was conducted as a sprint over a period of six weeks, and reveals both promising results and challenges in reducing the user search time relating to COVID-19 research, and in general, domain adaption of AMR for this task.", "phrases": ["semantic search", "covid-19 research", "infoforager"], "overall_score": 0.9562365722059868, "scores": [0.8851275554539514, 0.8821633637674378, 0.8439211946498367], "rank_score": 0.8704040379570753} -{"id": "haagsma-etal-2020-magpie", "title": "MAGPIE: A Large Corpus of Potentially Idiomatic Expressions", "abstract": "Given the limited size of existing idiom corpora, we aim to enable progress in automatic idiom processing and linguistic analysis by creating the largest-to-date corpus of idioms for English. Using a fixed idiom list, automatic pre-extraction, and a strictly controlled crowdsourced annotation procedure, we show that it is feasible to build a high-quality corpus comprising more than 50K instances, an order of a magnitude larger than previous resources. Crucial ingredients of crowdsourcing were the selection of crowdworkers, clear and comprehensive instructions, and an interface that breaks down the task in small, manageable steps. Analysis of the resulting corpus revealed strong effects of genre on idiom distribution, providing new evidence for existing theories on what influences idiom usage. The corpus also contains rich metadata, and is made publicly available.", "phrases": ["potential idiomatic expression", "crowdsourcing", "magpie"], "overall_score": 1.6936569197714868, "scores": [1.42602190736159, 0.6149828176442307, 0.5700977258350537], "rank_score": 0.8703674836136249} -{"id": "zhang-etal-2007-grammar", "title": "A Grammar-driven Convolution Tree Kernel for Semantic Role Classification", "abstract": "Convolution tree kernel has shown promising results in semantic role classification. However, it only carries out hard matching, which may lead to over-fitting and less accurate similarity measure. To remove the constraint, this paper proposes a grammardriven convolution tree kernel for semantic role classification by introducing more linguistic knowledge into the standard tree kernel. The proposed grammar-driven tree kernel displays two advantages over the previous one: 1) grammar-driven approximate substructure matching and 2) grammardriven approximate tree node matching. The two improvements enable the grammardriven tree kernel explore more linguistically motivated structure features than the previous one. Experiments on the CoNLL-2005 SRL shared task show that the grammardriven tree kernel significantly outperforms the previous non-grammar-driven one in SRL. Moreover, we present a composite kernel to integrate feature-based and tree kernel-based methods. Experimental results show that the composite kernel outperforms the previously best-reported methods.", "phrases": ["tree kernel", "semantic role classification", "grammar-driven tree kernel"], "overall_score": 1.8093440408577763, "scores": [0.9068934890185647, 1.0878126754485418, 0.6156255105404056], "rank_score": 0.8701105583358374} -{"id": "vulic-etal-2019-really", "title": "Do We Really Need Fully Unsupervised Cross-Lingual Embeddings?", "abstract": "Recent efforts in cross-lingual word embedding (CLWE) learning have predominantly focused on fully unsupervised approaches that project monolingual embeddings into a shared cross-lingual space without any cross-lingual signal. The lack of any supervision makes such approaches conceptually attractive. Yet, their only core difference from (weakly) supervised projection-based CLWE methods is in the way they obtain a seed dictionary used to initialize an iterative self-learning procedure. The fully unsupervised methods have arguably become more robust, and their primary use case is CLWE induction for pairs of resource-poor and distant languages. In this paper, we question the ability of even the most robust unsupervised CLWE approaches to induce meaningful CLWEs in these more challenging settings. A series of bilingual lexicon induction (BLI) experiments with 15 diverse languages (210 language pairs) show that fully unsupervised CLWE methods still fail for a large number of language pairs (e.g., they yield zero BLI performance for 87/210 pairs). Even when they succeed, they never surpass the performance of weakly supervised methods (seeded with 500-1,000 translation pairs) using the same self-learning procedure in any BLI setup, and the gaps are often substantial. These findings call for revisiting the main motivations behind fully unsupervised CLWE methods.", "phrases": ["cross-lingual embedding", "distant language", "low-resource language"], "overall_score": 2.465104842541251, "scores": [0.9956217342533317, 1.0817250235543807, 0.5328747926231246], "rank_score": 0.8700738501436124} -{"id": "plank-etal-2014-learning", "title": "Learning part-of-speech taggers with inter-annotator agreement loss", "abstract": "In natural language processing (NLP) annotation projects, we use inter-annotator agreement measures and annotation guidelines to ensure consistent annotations. However, annotation guidelines often make linguistically debatable and even somewhat arbitrary decisions, and interannotator agreement is often less than perfect. While annotation projects usually specify how to deal with linguistically debatable phenomena, annotator disagreements typically still stem from these \u201chard\u201d cases. This indicates that some errors are more debatable than others. In this paper, we use small samples of doublyannotated part-of-speech (POS) data for Twitter to estimate annotation reliability and show how those metrics of likely interannotator agreement can be implemented in the loss functions of POS taggers. We find that these cost-sensitive algorithms perform better across annotation projects and, more surprisingly, even on data annotated according to the same guidelines. Finally, we show that POS tagging models sensitive to inter-annotator agreement perform better on the downstream task of chunking.", "phrases": ["part-of-speech", "tagger", "inter-annotator agreement loss", "pos", "loss function"], "overall_score": 2.764955453897326, "scores": [0.9175136007688137, 0.8805882166850828, 1.4138546338174236, 0.5923473523208528, 0.5457726921143327], "rank_score": 0.8700152991413012} -{"id": "fan-etal-2017-transfer", "title": "Transfer Learning for Neural Semantic Parsing", "abstract": "The goal of semantic parsing is to map natural language to a machine interpretable meaning representation language (MRL). One of the constraints that limits full exploration of deep learning technologies for semantic parsing is the lack of sufficient annotation training data. In this paper, we propose using sequence-to-sequence in a multi-task setup for semantic parsing with focus on transfer learning. We explore three multi-task architectures for sequence-to-sequence model and compare their performance with the independently trained model. Our experiments show that the multi-task setup aids transfer learning from an auxiliary task with large labeled data to the target task with smaller labeled data. We see an absolute accuracy gain ranging from 1.0% to 4.4% in in our in-house data set and we also see good gains ranging from 2.5% to 7.0% on the ATIS semantic parsing tasks with syntactic and semantic auxiliary tasks.", "phrases": ["semantic parsing", "transfer learning", "different domain"], "overall_score": 2.0861314888488764, "scores": [0.8498005580227798, 1.228089117459697, 0.5320635353777345], "rank_score": 0.8699844036200705} -{"id": "li-srikumar-2019-augmenting", "title": "Augmenting Neural Networks with First-order Logic", "abstract": "Today, the dominant paradigm for training neural networks involves minimizing task loss on a large dataset. Using world knowledge to inform a model, and yet retain the ability to perform end-to-end training remains an open question. In this paper, we present a novel framework for introducing declarative knowledge to neural network architectures in order to guide training and prediction. Our framework systematically compiles logical statements into computation graphs that augment a neural network without extra learnable parameters or manual redesign. We evaluate our modeling strategy on three tasks: machine comprehension, natural language inference, and text chunking. Our experiments show that knowledge-augmented networks can strongly improve over baselines, especially in low-data regimes.", "phrases": ["first-order logic", "declarative knowledge", "neuron"], "overall_score": 2.161475589341143, "scores": [1.4722034975496023, 0.6139161523579463, 0.5234056489911635], "rank_score": 0.8698417662995707} -{"id": "kiela-etal-2018-learning", "title": "Learning Visually Grounded Sentence Representations", "abstract": "We investigate grounded sentence representations, where we train a sentence encoder to predict the image features of a given caption\u2014i.e., we try to \u201cimagine\u201d how a sentence would be depicted visually\u2014and use the resultant features as sentence representations. We examine the quality of the learned representations on a variety of standard sentence representation quality benchmarks, showing improved performance for grounded models over non-grounded ones. In addition, we thoroughly analyze the extent to which grounding contributes to improved performance, and show that the system also learns improved word embeddings.", "phrases": ["sentence representation", "image feature", "caption"], "overall_score": 2.161456813683899, "scores": [1.1100598643616437, 0.884992868747115, 0.6144498981490032], "rank_score": 0.869834210419254} -{"id": "dieng-etal-2020-topic", "title": "Topic Modeling in Embedding Spaces", "abstract": "Topic modeling analyzes documents to learn meaningful patterns of words. However, existing topic models fail to learn interpretable topics when working with large and heavy-tailed vocabularies. To this end, we develop the embedded topic model (etm), a generative model of documents that marries traditional topic models with word embeddings. More specifically, the etm models each word with a categorical distribution whose natural parameter is the inner product between the word's embedding and an embedding of its assigned topic. To fit the etm, we develop an efficient amortized variational inference algorithm. The etm discovers interpretable topics even with large vocabularies that include rare words and stop words. It outperforms existing document models, such as latent Dirichlet allocation, in terms of both topic quality and predictive performance.", "phrases": ["etm", "topic modeling", "pre-trained word embedding"], "overall_score": 2.230848995363424, "scores": [0.9687662728417861, 1.043642701364994, 0.5968226531629361], "rank_score": 0.8697438757899053} -{"id": "specia-etal-2009-improving", "title": "Improving the Confidence of Machine Translation Quality Estimates", "abstract": "We investigate the problem of estimating the quality of the output of machine translation systems at the sentence level when reference translations are not available. The focus is on automatically identifying a threshold to map a continuous predicted score into \u201cgood\u201d / \u201cbad\u201d categories for filtering out bad-quality cases in a translation post-edition task. We use the theory of Inductive Confidence Machines (ICM) to identify this threshold according to a confidence level that is expected for a given task. Experiments show that this approach gives improved estimates when compared to those based on classification or regression algorithms without ICM.", "phrases": ["confidence", "reference translation", "post-editing effort", "unseen machine"], "overall_score": 2.647887542742862, "scores": [0.8459772459049529, 1.1819802402122586, 0.8925202010415182, 0.5584095234382144], "rank_score": 0.869721802649236} -{"id": "lee-etal-2015-event", "title": "Event Detection and Factuality Assessment with Non-Expert Supervision", "abstract": "Events are communicated in natural language with varying degrees of certainty. For example, if you are \u201choping for a raise,\u201d it may be somewhat less likely than if you are \u201cexpecting\u201d one. To study these distinctions, we present scalable, highquality annotation schemes for event detection and fine-grained factuality assessment. We find that non-experts, with very little training, can reliably provide judgments about what events are mentioned and the extent to which the author thinks they actually happened. We also show how such data enables the development of regression models for fine-grained scalar factuality predictions that outperform strong baselines.", "phrases": ["factuality assessment", "certainty", "event detection"], "overall_score": 2.002069817127215, "scores": [0.886863148528997, 0.8711918697156994, 0.850408603645509], "rank_score": 0.8694878739634019} -{"id": "wong-etal-2008-extractive", "title": "Extractive Summarization Using Supervised and Semi-Supervised Learning", "abstract": "It is difficult to identify sentence importance from a single point of view. In this paper, we propose a learning-based approach to combine various sentence features. They are categorized as surface, content, relevance and event features. Surface features are related to extrinsic aspects of a sentence. Content features measure a sentence based on content-conveying words. Event features represent sentences by events they contained. Relevance features evaluate a sentence from its relatedness with other sentences. Experiments show that the combined features improved summarization performance significantly. Although the evaluation results are encouraging, supervised learning approach requires much labeled data. Therefore we investigate co-training by combining labeled and unlabeled data. Experiments show that this semi-supervised learning approach achieves comparable performance to its supervised counterpart and saves about half of the labeling time cost.", "phrases": ["summarization", "unlabeled data", "machine learning technique"], "overall_score": 2.5127496858321434, "scores": [1.205964220329359, 0.8685129143441257, 0.5335780530036008], "rank_score": 0.8693517292256953} -{"id": "wang-hua-2014-semiparametric", "title": "A Semiparametric Gaussian Copula Regression Model for Predicting Financial Risks from Earnings Calls", "abstract": "Earnings call summarizes the financial performance of a company, and it is an important indicator of the future financial risks of the company. We quantitatively study how earnings calls are correlated with the financial risks, with a special focus on the financial crisis of 2009. In particular, we perform a text regression task: given the transcript of an earnings call, we predict the volatility of stock prices from the week after the call is made. We propose the use of copula: a powerful statistical framework that separately models the uniform marginals and their complex multivariate stochastic dependencies, while not requiring any prior assumptions on the distributions of the covariate and the dependent variable. By performing probability integral transform, our approach moves beyond the standard count-based bag-ofwords models in NLP, and improves previous work on text regression by incorporating the correlation among local features in the form of semiparametric Gaussian copula. In experiments, we show that our model significantly outperforms strong linear and non-linear discriminative baselines on three datasets under various settings.", "phrases": ["gaussian copula", "financial risk", "earning call", "volatility"], "overall_score": 1.910114888062026, "scores": [1.7444931791086404, 0.6179538469287335, 0.5645343173299107, 0.5503416535886884], "rank_score": 0.8693307492389932} -{"id": "levy-etal-2021-collecting-large", "title": "Collecting a Large-Scale Gender Bias Dataset for Coreference Resolution and Machine Translation", "abstract": "Recent works have found evidence of gender bias in models of machine translation and coreference resolution using mostly synthetic diagnostic datasets. While these quantify bias in a controlled experiment, they often do so on a small scale and consist mostly of artificial, out-of-distribution sentences. In this work, we find grammatical patterns indicating stereotypical and non-stereotypical gender-role assignments (e.g., female nurses versus male dancers) in corpora from three domains, resulting in a first large-scale gender bias dataset of 108K diverse real-world English sentences. We manually verify the quality of our corpus and use it to evaluate gender bias in various coreference resolution and machine translation models. We find that all tested models tend to over-rely on gender stereotypes when presented with natural inputs, which may be especially harmful when deployed in commercial systems. Finally, we show that our dataset lends itself to finetuning a coreference resolution model, finding it mitigates bias on a held out set. Our dataset and models are publicly available at github.com/SLAB-NLP/BUG. We hope they will spur future research into gender bias evaluation mitigation techniques in realistic settings.", "phrases": ["gender bias dataset", "coreference resolution", "machine translation"], "overall_score": 0.9550562782721821, "scores": [0.8895758570798062, 0.8700455081966609, 0.8483676990822625], "rank_score": 0.8693296881195766} -{"id": "iwai-etal-2019-applying", "title": "Applying Machine Translation to Psychology: Automatic Translation of Personality Adjectives", "abstract": "We introduce our approach to apply machine translation to psychology, especially to translate English adjectives in a psychological personality questionnaire. We first extend seed English personality adjectives with a word2vec model trained with web sentences, and then feed the acquired words to a phrase-based machine translation model. We use Moses trained with bilingual corpora that consist of TED subtitles, movie\u2019 subtitles and Wikipedia. We collect Japanese translations whose translation probabilities are higher than .01 and filter them based on human evaluations. This resulted in 507 Japanese personality descriptors. We conducted a web-survey (N=17,751) and finalized a personality questionnaire. Statistical analyses supported the five-factor structure, reliability and criterion-validity of the newly developed questionnaire. This shows the potential applicability of machine translation to psychology. We discuss further issues related to machine translation application to psychology.", "phrases": ["machine translation", "personality adjective", "questionnaire"], "overall_score": 1.2051229618759223, "scores": [0.9227718169093357, 0.8595971833493622, 0.8255683808810206], "rank_score": 0.8693124603799062} -{"id": "hendrickx-etal-2012-modality", "title": "Modality in Text: a Proposal for Corpus Annotation", "abstract": "We present a annotation scheme for modality in Portuguese. In our annotation scheme we have tried to combine a more theoretical linguistic viewpoint with a practical annotation scheme that will also be useful for NLP research but is not geared towards one specific application. Our notion of modality focuses on the attitude and opinion of the speaker, or of the subject of the sentence. We validated the annotation scheme on a corpus sample of approximately 2000 sentences that we fully annotated with modal information using the MMAX2 annotation tool to produce XML annotation. We discuss our main findings and give attention to the difficult cases that we encountered as they illustrate the complexity of modality and its interactions with other elements in the text.", "phrases": ["annotation scheme", "portuguese", "modality"], "overall_score": 1.6908820995676146, "scores": [0.8954889333225924, 1.1166922104666024, 0.5946433805422477], "rank_score": 0.8689415081104809} -{"id": "hassan-etal-2017-synthetic", "title": "Synthetic Data for Neural Machine Translation of Spoken-Dialects", "abstract": "In this paper, we introduce a novel approach to generate synthetic data for training Neural Machine Translation systems. The proposed approach supports language variants and dialects with very limited parallel training data. This is achieved using a seed data to project words from a closely-related resource-rich language to an under-resourced language variant via word embedding representations. The proposed approach is based on localized embedding projection of distributed representations which utilizes monolingual embeddings and approximate nearest neighbors queries to transform parallel data across language variants. Our approach is language independent and can be used to generate data for any variant of the source language such as slang or spoken dialect or even for a different language that is related to the source language. We report experimental results on Levantine to English translation using Neural Machine Translation. We show that the synthetic data can provide significant improvements over a very large scale system by more than 2.8 Bleu points and it can be used to provide a reliable translation system for a spoken dialect which does not have sufficient parallel data.", "phrases": ["neural machine translation", "dialect", "synthetic data"], "overall_score": 1.5566990506500056, "scores": [0.9391382377465847, 0.7989212683777568, 0.8683713414056775], "rank_score": 0.8688102825100064} -{"id": "liu-etal-2021-enriching", "title": "Enriching Non-Autoregressive Transformer with Syntactic and Semantic Structures for Neural Machine Translation", "abstract": "The non-autoregressive models have boosted the efficiency of neural machine translation through parallelized decoding at the cost of effectiveness, when comparing with the autoregressive counterparts. In this paper, we claim that the syntactic and semantic structures among natural language are critical for non-autoregressive machine translation and can further improve the performance. However, these structures are rarely considered in the existing non-autoregressive models. Inspired by this intuition, we propose to incorporate the explicit syntactic and semantic structure of languages into a non-autoregressive Transformer, for the task of neural machine translation. Moreover, we also consider the intermediate latent alignment within target sentences to better learn the long-term token dependencies. Experimental results on two real-world datasets (i.e., WMT14 En-De and WMT16 En- Ro) show that our model achieves a significantly faster speed, as well as keeps the translation quality when compared with several state-of-the-art non-autoregressive models.", "phrases": ["non-autoregressive transformer", "semantic structure", "neural machine translation"], "overall_score": 1.2043864418299084, "scores": [0.8826335467518114, 0.862286838246529, 0.8614231354145288], "rank_score": 0.8687811734709564} -{"id": "jiao-etal-2020-exploiting", "title": "Exploiting Unsupervised Data for Emotion Recognition in Conversations", "abstract": "Emotion Recognition in Conversations (ERC) aims to predict the emotional state of speakers in conversations, which is essentially a text classification task. Unlike the sentence-level text classification problem, the available supervised data for the ERC task is limited, which potentially prevents the models from playing their maximum effect. In this paper, we propose a novel approach to leverage unsupervised conversation data, which is more accessible. Specifically, we propose the Conversation Completion (ConvCom) task, which attempts to select the correct answer from candidate answers to fill a masked utterance in a conversation. Then, we Pre-train a basic COntext-Dependent Encoder (Pre-CODE) on the ConvCom task. Finally, we fine-tune the Pre-CODE on the datasets of ERC. Experimental results demonstrate that pre-training on unsupervised data achieves significant improvement of performance on the ERC datasets, particularly on the minority emotion classes.", "phrases": ["unsupervised data", "emotion recognition", "conversation"], "overall_score": 1.398235924587849, "scores": [0.9384540028275824, 0.8795213035100494, 0.7883431737704372], "rank_score": 0.8687728267026896} -{"id": "schmitt-etal-2011-modeling", "title": "Modeling and Predicting Quality in Spoken Human-Computer Interaction", "abstract": "In this work we describe the modeling and prediction of Interaction Quality (IQ) in Spoken Dialogue Systems (SDS) using Support Vector Machines. The model can be employed to estimate the quality of the ongoing interaction at arbitrary points in a spoken human-computer interaction. We show that the use of 52 completely automatic features characterizing the system-user exchange significantly outperforms state-of-the-art approaches. The model is evaluated on publically available data from the CMU Let's Go Bus Information system. It reaches a performance of 61.6% unweighted average recall when discriminating between 5 classes (good to very poor). It can be further shown that incorporating knowledge about the user's emotional state does hardly improve the performance.", "phrases": ["human-computer interaction", "interaction quality", "spoken dialogue systems", "average recall", "modeling"], "overall_score": 2.2280716727401955, "scores": [0.8988201862739598, 0.7940709531734608, 1.5304423457959824, 0.5750929320130501, 0.5448789705451598], "rank_score": 0.8686610775603226} -{"id": "nagoudi-etal-2022-arat5", "title": "AraT5: Text-to-Text Transformers for Arabic Language Generation", "abstract": "Transfer learning with a unified Transformer framework (T5) that converts all language problems into a text-to-text format was recently proposed as a simple and effective transfer learning approach. Although a multilingual version of the T5 model (mT5) was also introduced, it is not clear how well it can fare on non-English tasks involving diverse data. To investigate this question, we apply mT5 on a language with a wide variety of dialects\u2013Arabic. For evaluation, we introduce a novel benchmark for ARabic language GENeration (ARGEN), covering seven important tasks. For model comparison, we pre-train three powerful Arabic T5-style models and evaluate them on ARGEN. Although pre-trained with ~49 less data, our new models perform significantly better than mT5 on all ARGEN tasks (in 52 out of 59 test sets) and set several new SOTAs. Our models also establish new SOTA on the recently-proposed, large Arabic language understanding evaluation benchmark ARLUE (Abdul-Mageed et al., 2021). Our new models are publicly available. We also link to ARGEN datasets through our repository: .", "phrases": ["text-to-text transformer", "arabic language generation", "arat5"], "overall_score": 1.2042054815294256, "scores": [0.9482730001137483, 0.8299535932351708, 0.8277253212717939], "rank_score": 0.8686506382069042} -{"id": "liao-grishman-2011-acquiring", "title": "Acquiring Topic Features to improve Event Extraction: in Pre-selected and Balanced Collections", "abstract": "Event extraction is a particularly challenging type of information extraction (IE) that may require inferences from the whole article. However, most current event extraction systems rely on local information at the phrase or sentence level, and do not consider the article as a whole, thus limiting extraction performance. Moreover, most annotated corpora are artificially enriched to include enough positive samples of the events of interest; event identification on a more balanced collection, such as unfiltered newswire, may perform much worse. In this paper, we investigate the use of unsupervised topic models to extract topic features to improve event extraction both on test data similar to training data, and on more balanced collections. We compare this unsupervised approach to a supervised multi-label text classifier, and show that unsupervised topic modeling can get better results for both collections, and especially for a more balanced collection. We show that the unsupervised topic model can improve trigger, argument and role labeling by 3.5%, 6.9% and 6% respectively on a pre-selected corpus, and by 16.8%, 12.5% and 12.7% on a balanced corpus.", "phrases": ["topic feature", "event extraction", "balanced collection"], "overall_score": 1.3979995066224067, "scores": [0.9024154407428483, 0.8859421178826594, 0.8175202372593002], "rank_score": 0.8686259319616026} -{"id": "yoshimura-etal-2019-filtering", "title": "Filtering Pseudo-References by Paraphrasing for Automatic Evaluation of Machine Translation", "abstract": "In this paper, we introduce our participation in the WMT 2019 Metric Shared Task. We propose an improved version of sentence BLEU using filtered pseudo-references. We propose a method to filter pseudo-references by paraphrasing for automatic evaluation of machine translation (MT). We use the outputs of off-the-shelf MT systems as pseudo-references filtered by paraphrasing in addition to a single human reference (gold reference). We use BERT fine-tuned with paraphrase corpus to filter pseudo-references by checking the paraphrasability with the gold reference. Our experimental results of the WMT 2016 and 2017 datasets show that our method achieved higher correlation with human evaluation than the sentence BLEU (SentBLEU) baselines with a single reference and with unfiltered pseudo-references.", "phrases": ["pseudo-reference", "paraphrasing", "machine translation"], "overall_score": 1.20361507157317, "scores": [0.8806482057623035, 0.8618178327337123, 0.8622082038507247], "rank_score": 0.8682247474489135} -{"id": "wan-etal-2009-improving", "title": "Improving Grammaticality in Statistical Sentence Generation: Introducing a Dependency Spanning Tree Algorithm with an Argument Satisfaction Model", "abstract": "Abstract-like text summarisation requires a means of producing novel summary sentences. In order to improve the grammaticality of the generated sentence, we model a global (sentence) level syntactic structure. We couch statistical sentence generation as a spanning tree problem in order to search for the best dependency tree spanning a set of chosen words. We also introduce a new search algorithm for this task that models argument satisfaction to improve the linguistic validity of the generated tree. We treat the allocation of modifiers to heads as a weighted bipartite graph matching (or assignment) problem, a well studied problem in graph theory. Using BLEU to measure performance on a string regeneration task, we found an improvement, illustrating the benefit of the spanning tree approach armed with an argument satisfaction model.", "phrases": ["statistical sentence generation", "argument satisfaction model", "n-gram language model"], "overall_score": 2.0816605727121433, "scores": [0.9228122227753977, 0.7911876971634454, 0.8903597403762905], "rank_score": 0.8681198867717113} -{"id": "perez-almendros-etal-2020-dont", "title": "Don't Patronize Me! An Annotated Dataset with Patronizing and Condescending Language towards Vulnerable Communities", "abstract": "In this paper, we introduce a new annotated dataset which is aimed at supporting the development of NLP models to identify and categorize language that is patronizing or condescending towards vulnerable communities (e.g. refugees, homeless people, poor families). While the prevalence of such language in the general media has long been shown to have harmful effects, it differs from other types of harmful language, in that it is generally used unconsciously and with good intentions. We furthermore believe that the often subtle nature of patronizing and condescending language (PCL) presents an interesting technical challenge for the NLP community. Our analysis of the proposed dataset shows that identifying PCL is hard for standard NLP models, with language models such as BERT achieving the best results.", "phrases": ["patronizing", "vulnerable community", "refugee", "poor family"], "overall_score": 2.4593608459004392, "scores": [0.7800478866512406, 1.6196939299662914, 0.5463029674629057, 0.5261411013346795], "rank_score": 0.8680464713537792} -{"id": "parton-etal-2012-automatic", "title": "Can Automatic Post-Editing Make MT More Meaningful", "abstract": "Automatic post-editors (APEs) enable the re-use of black box machine translation (MT) systems for a variety of tasks where different aspects of translation are important. In this paper, we describe APEs that target adequacy errors, a critical problem for tasks such as cross-lingual question-answering, and compare different approaches for post-editing: a rule-based system and a feedback approach that uses a computer in the loop to suggest improvements to the MT system. We test the APEs on two different MT systems and across two different genres. Human evaluation shows that the APEs significantly improve adequacy, regardless of approach, MT system or genre: 30-56% of the post-edited sentences have improved adequacy compared to the original MT.", "phrases": ["ape", "adequacy", "text analysis"], "overall_score": 1.9987479772847594, "scores": [0.8819950192119171, 0.8804994667876717, 0.8416411657505821], "rank_score": 0.868045217250057} -{"id": "levenberg-etal-2010-stream", "title": "Stream-based Translation Models for Statistical Machine Translation", "abstract": "Typical statistical machine translation systems are trained with static parallel corpora. Here we account for scenarios with a continuous incoming stream of parallel training data. Such scenarios include daily governmental proceedings, sustained output from translation agencies, or crowd-sourced translations. We show incorporating recent sentence pairs from the stream improves performance compared with a static baseline. Since frequent batch retraining is computationally demanding we introduce a fast incremental alternative using an online version of the EM algorithm. To bound our memory requirements we use a novel data-structure and associated training regime. When compared to frequent batch retraining, our online time and space-bounded model achieves the same performance with significantly less computational overhead.", "phrases": ["statistical machine translation", "stream", "sentence pair", "online version", "smt model"], "overall_score": 2.1566130868484037, "scores": [1.538367499013305, 1.1365851173023307, 0.5730316544368124, 0.5578730921857115, 0.5335673937874005], "rank_score": 0.8678849513451119} -{"id": "bergsma-etal-2008-distributional", "title": "Distributional Identification of Non-Referential Pronouns", "abstract": "We present an automatic approach to determining whether a pronoun in text refers to a preceding noun phrase or is instead nonreferential. We extract the surrounding textual context of the pronoun and gather, from a large corpus, the distribution of words that occur within that context. We learn to reliably classify these distributions as representing either referential or non-referential pronoun instances. Despite its simplicity, experimental results on classifying the English pronoun it show the system achieves the highest performance yet attained on this important task.", "phrases": ["pronoun", "textual context", "distributional method"], "overall_score": 1.6887328574738325, "scores": [1.1792285924704846, 0.8723571238374379, 0.5519253321754827], "rank_score": 0.867837016161135} -{"id": "ghaeini-etal-2018-dr", "title": "DR-BiLSTM: Dependent Reading Bidirectional LSTM for Natural Language Inference", "abstract": "We present a novel deep learning architecture to address the natural language inference (NLI) task. Existing approaches mostly rely on simple reading mechanisms for independent encoding of the premise and hypothesis. Instead, we propose a novel dependent reading bidirectional LSTM network (DR-BiLSTM) to efficiently model the relationship between a premise and a hypothesis during encoding and inference. We also introduce a sophisticated ensemble strategy to combine our proposed models, which noticeably improves final predictions. Finally, we demonstrate how the results can be improved further with an additional preprocessing step. Our evaluation shows that DR-BiLSTM obtains the best single model and ensemble model results achieving the new state-of-the-art scores on the Stanford NLI dataset.", "phrases": ["natural language inference", "nli", "dr-bilstm"], "overall_score": 1.8045442966788976, "scores": [0.9172868445358068, 0.8113175030134876, 0.8748027603338135], "rank_score": 0.8678023692943694} -{"id": "barron-cedeno-etal-2013-plagiarism", "title": "Plagiarism Meets Paraphrasing: Insights for the Next Generation in Automatic Plagiarism Detection", "abstract": "Although paraphrasing is the linguistic mechanism underlying many plagiarism cases, little attention has been paid to its analysis in the framework of automatic plagiarism detection. Therefore, state-of-the-art plagiarism detectors find it difficult to detect cases of paraphrase plagiarism. In this article, we analyze the relationship between paraphrasing and plagiarism, paying special attention to which paraphrase phenomena underlie acts of plagiarism and which of them are detected by plagiarism detection systems. With this aim in mind, we created the P4P corpus, a new resource that uses a paraphrase typology to annotate a subset of the PAN-PC-10 corpus for automatic plagiarism detection. The results of the Second International Competition on Plagiarism Detection were analyzed in the light of this annotation.The presented experiments show that (i) more complex paraphrase phenomena and a high density of paraphrase mechanisms make plagiarism detection more difficult, (ii) lexical substitutions are the paraphrase mechanisms used the most when plagiarizing, and (iii) paraphrase mechanisms tend to shorten the plagiarized text. For the first time, the paraphrase mechanisms behind plagiarism have been analyzed, providing critical insights for the improvement of automatic plagiarism detection systems.", "phrases": ["paraphrasing", "insight", "automatic plagiarism detection"], "overall_score": 1.2030206733619182, "scores": [0.9575934931259847, 0.862095530104607, 0.7836989160886194], "rank_score": 0.8677959797730704} -{"id": "drozdov-etal-2019-unsupervised-latent", "title": "Unsupervised Latent Tree Induction with Deep Inside-Outside Recursive Auto-Encoders", "abstract": "We introduce the deep inside-outside recursive autoencoder (DIORA), a fully-unsupervised method for discovering syntax that simultaneously learns representations for constituents within the induced tree. Our approach predicts each word in an input sentence conditioned on the rest of the sentence. During training we use dynamic programming to consider all possible binary trees over the sentence, and for inference we use the CKY algorithm to extract the highest scoring parse. DIORA outperforms previously reported results for unsupervised binary constituency parsing on the benchmark WSJ dataset.", "phrases": ["recursive autoencoder", "diora", "dynamic programming", "latent representation", "inside-outside algorithm"], "overall_score": 2.3499369437338693, "scores": [1.379063955093754, 0.9577127711722517, 0.881605988301963, 0.5924403519164615, 0.527975643335306], "rank_score": 0.8677597419639472} -{"id": "han-etal-2020-continual", "title": "Continual Relation Learning via Episodic Memory Activation and Reconsolidation", "abstract": "Continual relation learning aims to continually train a model on new data to learn incessantly emerging novel relations while avoiding catastrophically forgetting old relations. Some pioneering work has proved that storing a handful of historical relation examples in episodic memory and replaying them in subsequent training is an effective solution for such a challenging problem. However, these memory-based methods usually suffer from overfitting the few memorized examples of old relations, which may gradually cause inevitable confusion among existing relations. Inspired by the mechanism in human long-term memory formation, we introduce episodic memory activation and reconsolidation (EMAR) to continual relation learning. Every time neural models are activated to learn both new and memorized data, EMAR utilizes relation prototypes for memory reconsolidation exercise to keep a stable understanding of old relations. The experimental results show that EMAR could get rid of catastrophically forgetting old relations and outperform the state-of-the-art continual learning models.", "phrases": ["episodic memory activation", "reconsolidation", "continual relation learning"], "overall_score": 2.080211301311675, "scores": [0.8963691862697446, 0.8629082953398859, 0.8432689993516228], "rank_score": 0.8675154936537511} -{"id": "bronner-monz-2012-user", "title": "User Edits Classification Using Document Revision Histories", "abstract": "Document revision histories are a useful and abundant source of data for natural language processing, but selecting relevant data for the task at hand is not trivial. In this paper we introduce a scalable approach for automatically distinguishing between factual and fluency edits in document revision histories. The approach is based on supervised machine learning using language model probabilities, string similarity measured over different representations of user edits, comparison of part-of-speech tags and named entities, and a set of adaptive features extracted from large amounts of unlabeled user edits. Applied to contiguous edit segments, our method achieves statistically significant improvements over a simple yet effective edit-distance baseline. It reaches high classification accuracy (88%) and is shown to generalize to additional sets of unseen data.", "phrases": ["fluency edit", "wikipedia revision", "revision analysis"], "overall_score": 2.2246441993282526, "scores": [1.1457049443723764, 0.8668161345193939, 0.5894533338076583], "rank_score": 0.8673248042331427} -{"id": "sadat-habash-2006-combination", "title": "Combination of Arabic Preprocessing Schemes for Statistical Machine Translation", "abstract": "Statistical machine translation is quite robust when it comes to the choice of input representation. It only requires consistency between training and testing. As a result, there is a wide range of possible preprocessing choices for data used in statistical machine translation. This is even more so for morphologically rich languages such as Arabic. In this paper, we study the effect of different word-level preprocessing schemes for Arabic on the quality of phrase-based statistical machine translation. We also present and evaluate different methods for combining preprocessing schemes resulting in improved translation quality.", "phrases": ["arabic", "preprocessing scheme", "statistical machine translation", "wide range", "rich language"], "overall_score": 2.3486756655937304, "scores": [0.8939128566574293, 1.1671357530059456, 0.8693153356150528, 0.8473910439093523, 0.5587149636915841], "rank_score": 0.8672939905758728} -{"id": "batchelor-2014-gdbank", "title": "gdbank: The beginnings of a corpus of dependency structures and type-logical grammar in Scottish Gaelic", "abstract": "We present gdbank, a small handbuilt corpus of 32 sentences with dependency structures and categorial grammar type assignments. The sentences have been chosen to illustrate as broad a range of the unusual features of Scottish Gaelic as possible, particularly nouns being used to represent psychological states where more thoroughly-studied languages such as English and French would prefer a verb, and prepositions marking aspect, as is also seen in Welsh and, for example, Irish Gaelic. We provide hand-built dependency trees, building on previous work on Irish Gaelic and using the Universal Dependency Scheme. We also provide a tentative categorial grammar account of the words in the sentences, based largely on previous work on English.", "phrases": ["dependency structure", "scottish gaelic", "gdbank"], "overall_score": 0.9525802663362116, "scores": [0.9282535047569426, 0.8585433463058305, 0.8144309237268066], "rank_score": 0.86707592492986} -{"id": "swanson-charniak-2014-data", "title": "Data Driven Language Transfer Hypotheses", "abstract": "Language transfer, the preferential second language behavior caused by similarities to the speaker\u2019s native language, requires considerable expertise to be detected by humans alone. Our goal in this work is to replace expert intervention by data-driven methods wherever possible. We define a computational methodology that produces a concise list of lexicalized syntactic patterns that are controlled for redundancy and ranked by relevancy to language transfer. We demonstrate the ability of our methodology to detect hundreds of such candidate patterns from currently available data sources, and validate the quality of the proposed patterns through classification experiments.", "phrases": ["language transfer", "list", "syntactic pattern"], "overall_score": 1.6869866409196914, "scores": [1.05658902179181, 0.8994414282858395, 0.6447884650279803], "rank_score": 0.8669396383685433} -{"id": "zeng-etal-2019-automatic", "title": "Automatic Generation of Personalized Comment Based on User Profile", "abstract": "Comments on social media are very diverse, in terms of content, style and vocabulary, which make generating comments much more challenging than other existing natural language generation (NLG) tasks. Besides, since different user has different expression habits, it is necessary to take the user's profile into consideration when generating comments. In this paper, we introduce the task of automatic generation of personalized comment (AGPC) for social media. Based on tens of thousands of users' real comments and corresponding user profiles on weibo, we propose Personalized Comment Generation Network (PCGN) for AGPC. The model utilizes user feature embedding with a gated memory and attends to user description to model personality of users. In addition, external user representation is taken into consideration during the decoding to enhance the comments generation. Experimental results show that our model can generate natural, human-like and personalized comments.", "phrases": ["personalized comment", "user profile", "automatic generation"], "overall_score": 0.9523964184443919, "scores": [0.9182188442592807, 0.8657319960779911, 0.8167748977637075], "rank_score": 0.8669085793669931} -{"id": "lebret-etal-2016-neural", "title": "Neural Text Generation from Structured Data with Application to the Biography Domain", "abstract": "This paper introduces a neural model for concept-to-text generation that scales to large, rich domains. We experiment with a new dataset of biographies from Wikipedia that is an order of magnitude larger than existing resources with over 700k samples. The dataset is also vastly more diverse with a 400k vocabulary, compared to a few hundred words for Weathergov or Robocup. Our model builds upon recent work on conditional neural language model for text generation. To deal with the large vocabulary, we extend these models to mix a fixed vocabulary with copy actions that transfer sample-specific words from the input database to the generated output sentence. Our neural model significantly out-performs a classical Kneser-Ney language model adapted to this task by nearly 15 BLEU.", "phrases": ["neural text generation", "first sentence", "table-to-text generation", "generation model"], "overall_score": 3.473534008126404, "scores": [0.9462031290608839, 1.064326786457456, 0.9328303777452468, 0.5238173508882052], "rank_score": 0.866794411037948} -{"id": "henderson-2003-inducing", "title": "Inducing History Representations for Broad Coverage Statistical Parsing", "abstract": "We present a neural network method for inducing representations of parse histories and using these history representations to estimate the probabilities needed by a statistical left-corner parser. The resulting statistical parser achieves performance (89.1% F-measure) on the Penn Treebank which is only 0.6% below the best current parser for this task, despite using a smaller vocabulary size and less prior linguistic knowledge. Crucial to this success is the use of structurally determined soft biases in inducing the representation of the parse history, and no use of hard independence assumptions.", "phrases": ["statistical parser", "approximation", "derivation history", "isbn"], "overall_score": 2.2874665541993187, "scores": [1.4472261763383496, 0.8851100822345445, 0.6022974414403667, 0.5324627188929596], "rank_score": 0.866774104726555} -{"id": "baeza-yates-etal-2015-cassa", "title": "CASSA: A Context-Aware Synonym Simplification Algorithm", "abstract": "We present a new context-aware method for lexical simplification that uses two free language resources and real web frequencies. We compare it with the state-of-the-art method for lexical simplification in Spanish and the established simplification baseline, that is, the most frequent synonym. Our method improves upon the other methods in the detection of complex words, in meaning preservation, and in simplicity. Although we use Spanish, the method can be extended to other languages since it does not require alignment of parallel corpora.", "phrases": ["simplification", "spanish", "cassa"], "overall_score": 1.5530410975072297, "scores": [0.7927961233103329, 0.9086842028876099, 0.8988258937708473], "rank_score": 0.8667687399895967} -{"id": "zaghouani-etal-2014-large", "title": "Large Scale Arabic Error Annotation: Guidelines and Framework", "abstract": "We present annotation guidelines and a web-based annotation framework developed as part of an effort to create a manually annotated Arabic corpus of errors and corrections for various text types. Such a corpus will be invaluable for developing Arabic error correction tools, both for training models and as a gold standard for evaluating error correction algorithms. We summarize the guidelines we created. We also describe issues encountered during the training of the annotators, as well as problems that are specific to the Arabic language that arose during the annotation process. Finally, we present the annotation tool that was developed as part of this project, the annotation pipeline, and the quality of the resulting annotations.", "phrases": ["arabic", "qalb corpus", "spelling error"], "overall_score": 2.7166566864664756, "scores": [1.2042717185818212, 0.8470538095740108, 0.5479361824846779], "rank_score": 0.8664205702135034} -{"id": "zhang-etal-2020-multi-task", "title": "A Multi-task Learning Framework for Opinion Triplet Extraction", "abstract": "The state-of-the-art Aspect-based Sentiment Analysis (ABSA) approaches are mainly based on either detecting aspect terms and their corresponding sentiment polarities, or co-extracting aspect and opinion terms. However, the extraction of aspect-sentiment pairs lacks opinion terms as a reference, while co-extraction of aspect and opinion terms would not lead to meaningful pairs without determining their sentiment dependencies. To address the issue, we present a novel view of ABSA as an opinion triplet extraction task, and propose a multi-task learning framework to jointly extract aspect terms and opinion terms, and simultaneously parses sentiment dependencies between them with a biaffine scorer. At inference phase, the extraction of triplets is facilitated by a triplet decoding method based on the above outputs. We evaluate the proposed framework on four SemEval benchmarks for ASBA. The results demonstrate that our approach significantly outperforms a range of strong baselines and state-of-the-art approaches.", "phrases": ["multi-task learning framework", "aspect term", "sentiment polarity"], "overall_score": 1.9034640311600861, "scores": [0.8840896452625331, 0.8853919184635366, 0.8294298777266688], "rank_score": 0.8663038138175795} -{"id": "li-etal-2013-modeling", "title": "Modeling Syntactic and Semantic Structures in Hierarchical Phrase-based Translation", "abstract": "Incorporating semantic structure into a linguistics-free translation model is challenging, since semantic structures are closely tied to syntax. In this paper, we propose a two-level approach to exploiting predicate-argument structure reordering in a hierarchical phrase-based translation model. First, we introduce linguistically motivated constraints into a hierarchical model, guiding translation phrase choices in favor of those that respect syntactic boundaries. Second, based on such translation phrases, we propose a predicate-argument structure reordering model that predicts reordering not only between an argument and its predicate, but also between two arguments. Experiments on Chinese-to-English translation demonstrate that both advances significantly improve translation accuracy.", "phrases": ["phrase-based translation", "predicate-argument structure", "extraction"], "overall_score": 1.9944057646261872, "scores": [0.9619677999603091, 1.0986374813331503, 0.5378729734661072], "rank_score": 0.8661594182531888} -{"id": "liu-etal-2010-automatic", "title": "Automatic Keyphrase Extraction via Topic Decomposition", "abstract": "Existing graph-based ranking methods for keyphrase extraction compute a single importance score for each word via a single random walk. Motivated by the fact that both documents and words can be represented by a mixture of semantic topics, we propose to decompose traditional random walk into multiple random walks specific to various topics. We thus build a Topical PageRank (TPR) on word graph to measure word importance with respect to different topics. After that, given the topic distribution of the document, we further calculate the ranking scores of words and extract the top ranked ones as keyphrases. Experimental results show that TPR outperforms state-of-the-art keyphrase extraction methods on two datasets under various evaluation metrics.", "phrases": ["mixture", "pagerank", "automatic keyphrase extraction", "article topic information", "similar meaning"], "overall_score": 2.7524112072468983, "scores": [0.968340833776579, 1.3296032682297243, 0.9508775877952388, 0.552363640798234, 0.5291554285206472], "rank_score": 0.8660681518240846} -{"id": "anastasiou-2008-identification", "title": "Identification of idioms by machine translation: a hybrid research system vs. three commercial systems", "abstract": "We compare three commercial Machine Translation (MT) systems, Power Translator Pro, SYSTRAN, and T1 Langenscheidt, with the research hybrid, statistical and rule-based system, METIS-II, with respect to identification of idioms. Firstly, we make a distinction between continuous (adjacent constituents) and discontinuous idioms (non-adjacent constituents). Secondly, we describe our idiom resources within METIS-II, the system\u2019s identification process, and we evaluate the results with simple techniques. From the translation outputs of the commercial systems we deduce that they cannot identify discontinuous idioms. We prove that, within METIS-II, the identification of discontinuous idioms is feasible, even with low resources.", "phrases": ["idiom", "machine translation", "commercial system"], "overall_score": 0.9514051921302328, "scores": [0.9102939649030474, 0.8828981060190174, 0.8048269079580774], "rank_score": 0.8660063262933807} -{"id": "zhao-2009-character", "title": "Character-Level Dependencies in Chinese: Usefulness and Learning", "abstract": "We investigate the possibility of exploiting character-based dependency for Chinese information processing. As Chinese text is made up of character sequences rather than word sequences, word in Chinese is not so natural a concept as in English, nor is word easy to be defined without argument for such a language. Therefore we propose a character-level dependency scheme to represent primary linguistic relationships within a Chinese sentence. The usefulness of character dependencies are verified through two specialized dependency parsing tasks. The first is to handle trivial character dependencies that are equally transformed from traditional word boundaries. The second furthermore considers the case that annotated internal character dependencies inside a word are involved. Both of these results from character-level dependency parsing are positive. This study provides an alternative way to formularize basic character-and word-level representation for Chinese.", "phrases": ["chinese", "usefulness", "word boundary", "character-level dependency", "internal structure"], "overall_score": 1.9939748439082734, "scores": [1.4486703033935444, 0.8936570354686202, 0.8533894913285727, 0.5718807482907118, 0.5622637803348554], "rank_score": 0.865972271763261} -{"id": "mesgar-strube-2018-neural", "title": "A Neural Local Coherence Model for Text Quality Assessment", "abstract": "We propose a local coherence model that captures the flow of what semantically connects adjacent sentences in a text. We represent the semantics of a sentence by a vector and capture its state at each word of the sentence. We model what relates two adjacent sentences based on the two most similar semantic states, each of which is in one of the sentences. We encode the perceived coherence of a text by a vector, which represents patterns of changes in salient information that relates adjacent sentences. Our experiments demonstrate that our approach is beneficial for two downstream tasks: Readability assessment, in which our model achieves new state-of-the-art results; and essay scoring, in which the combination of our coherence vectors and other task-dependent features significantly improves the performance of a strong essay scorer.", "phrases": ["local coherence model", "adjacent sentence", "readability assessment", "essay scoring"], "overall_score": 1.6846019931606262, "scores": [1.0773417720677023, 0.9435553463626395, 0.8896212954054855, 0.5523382735162687], "rank_score": 0.8657141718380239} -{"id": "wang-etal-2018-semi-autoregressive", "title": "Semi-Autoregressive Neural Machine Translation", "abstract": "Existing approaches to neural machine translation are typically autoregressive models. While these models attain state-of-the-art translation quality, they are suffering from low parallelizability and thus slow at decoding long sequences. In this paper, we propose a novel model for fast sequence generation \u2014 the semi-autoregressive Transformer (SAT). The SAT keeps the autoregressive property in global but relieves in local and thus are able to produce multiple successive words in parallel at each time step. Experiments conducted on English-German and Chinese-English translation tasks show that the SAT achieves a good balance between translation quality and decoding speed. On WMT'14 English-German translation, the SAT achieves 5.58 speedup while maintaining 88% translation quality, significantly better than the previous non-autoregressive methods. When produces two words at each time step, the SAT is almost lossless (only 1% degeneration in BLEU score).", "phrases": ["translation quality", "semi-autoregressive transformer", "parallel", "time step"], "overall_score": 1.9931687547524162, "scores": [1.2808471972470155, 1.0967506348065172, 0.5506589511872112, 0.5342319835230567], "rank_score": 0.8656221916909501} -{"id": "che-liu-2010-jointly", "title": "Jointly Modeling WSD and SRL with Markov Logic", "abstract": "Semantic role labeling (SRL) and word sense disambiguation (WSD) are two fundamental tasks in natural language processing to find a sentence-level semantic representation. To date, they have mostly been modeled in isolation. However, this approach neglects logical constraints between them. We therefore exploit some pipeline systems which verify the automatic all word sense disambiguation could help the semantic role labeling and vice versa. We further propose a Markov logic model that jointly labels semantic roles and disambiguates all word senses. By evaluating our model on the OntoNotes 3.0 data, we show that this joint approach leads to a higher performance for word sense disambiguation and semantic role labeling than those pipeline approaches.", "phrases": ["wsd", "srl", "markov logic"], "overall_score": 0.9509351215977975, "scores": [0.8996710644043738, 0.8720560098941768, 0.8250082746678796], "rank_score": 0.8655784496554767} -{"id": "carpuat-simard-2012-trouble", "title": "The Trouble with SMT Consistency", "abstract": "SMT typically models translation at the sentence level, ignoring wider document context. Does this hurt the consistency of translated documents? Using a phrase-based SMT system in various data conditions, we show that SMT translates documents remarkably consistently, even without document knowledge. Nevertheless, translation inconsistencies often indicate translation errors. However, unlike in human translation, these errors are rarely due to terminology inconsistency. They are more often symptoms of deeper issues with SMT models instead.", "phrases": ["trouble", "consistency", "smt system", "translation error"], "overall_score": 2.07516325833825, "scores": [0.8235824185845173, 1.498510841354278, 0.6136837153411117, 0.5258642096022185], "rank_score": 0.8654102962205314} -{"id": "gui-etal-2017-question", "title": "A Question Answering Approach for Emotion Cause Extraction", "abstract": "Emotion cause extraction aims to identify the reasons behind a certain emotion expressed in text. It is a much more difficult task compared to emotion classification. Inspired by recent advances in using deep memory networks for question answering (QA), we propose a new approach which considers emotion cause identification as a reading comprehension task in QA. Inspired by convolutional neural networks, we propose a new mechanism to store relevant context in different memory slots to model context information. Our proposed approach can extract both word level sequence features and lexical features. Performance evaluation shows that our method achieves the state-of-the-art performance on a recently released emotion cause dataset, outperforming a number of competitive baselines by at least 3.01% in F-measure.", "phrases": ["emotion", "extraction", "clause"], "overall_score": 2.219648997075779, "scores": [1.5056410487262009, 0.5454312666775887, 0.545059640128277], "rank_score": 0.8653773185106889} -{"id": "hua-etal-2019-argument-generation", "title": "Argument Generation with Retrieval, Planning, and Realization", "abstract": "Automatic argument generation is an appealing but challenging task. In this paper, we study the specific problem of counter-argument generation, and present a novel framework, CANDELA. It consists of a powerful retrieval system and a novel two-step generation model, where a text planning decoder first decides on the main talking points and a proper language style for each sentence, then a content realization decoder reflects the decisions and constructs an informative paragraph-level argument. Furthermore, our generation model is empowered by a retrieval system indexed with 12 million articles collected from Wikipedia and popular English news media, which provides access to high-quality content with diversity. Automatic evaluation on a large-scale dataset collected from Reddit shows that our model yields significantly higher BLEU, ROUGE, and METEOR scores than the state-of-the-art and non-trivial comparisons. Human evaluation further indicates that our system arguments are more appropriate for refutation and richer in content.", "phrases": ["retrieval", "argument generation", "stance"], "overall_score": 2.0744369781954983, "scores": [0.9486433459045243, 0.7967984730770137, 0.8498804226457294], "rank_score": 0.8651074138757558} -{"id": "belz-gatt-2007-attribute", "title": "The attribute selection for GRE challenge: overview and evaluation results", "abstract": ") Challenge wasthe \ufb01rst shared-task evaluation challenge inthe \ufb01eld of Natural Language Generation.Six teams submitted a total of 22 systems.All submitted systems were tested automat-ically for minimality, uniqueness and \u2018hu-manlikeness\u2019. In addition, the output of 15systems was tested in a task-based exper-iment where subjects were asked to iden-tify referents, and the speed and accuracy ofidenti\ufb01cation was measured. This report de-scribes the", "phrases": ["attribute selection", "nlg", "expression generation"], "overall_score": 2.218793764305927, "scores": [1.495936969611416, 0.5640817137893093, 0.5351129801364555], "rank_score": 0.8650438878457268} -{"id": "huang-yates-2014-improving", "title": "Improving Word Alignment Using Linguistic Code Switching Data", "abstract": "Linguist Code Switching (LCS) is a situation where two or more languages show up in the context of a single conversation. For example, in EnglishChinese code switching, there might be a sentence like \u201c\u00b7 \u201a15\u00a9 \u00a8 k \u2021meeting (We will have a meeting in 15 minutes)\u201d. Traditional machine translation (MT) systems treat LCS data as noise, or just as regular sentences. However, if LCS data is processed intelligently, it can provide a useful signal for training word alignment and MT models. Moreover, LCS data is from non-news sources which can enhance the diversity of training data for MT. In this paper, we first extract constraints from this code switching data and then incorporate them into a word alignment model training procedure. We also show that by using the code switching data, we can jointly train a word alignment model and a language model using cotraining. Our techniques for incorporating LCS data improve by 2.64 in BLEU score over a baseline MT system trained using only standard sentence-aligned corpora.", "phrases": ["word alignment", "noise", "code-switching"], "overall_score": 1.3922195834128468, "scores": [1.507285035960448, 0.5495325033976634, 0.5382864518991824], "rank_score": 0.8650346637524312} -{"id": "mohammad-2011-even", "title": "Even the Abstract have Color: Consensus in Word-Colour Associations", "abstract": "Colour is a key component in the successful dissemination of information. Since many real-world concepts are associated with colour, for example danger with red, linguistic information is often complemented with the use of appropriate colours in information visualization and product marketing. Yet, there is no comprehensive resource that captures concept--colour associations. We present a method to create a large word--colour association lexicon by crowdsourcing. A word-choice question was used to obtain sense-level annotations and to ensure data quality. We focus especially on abstract concepts and emotions to show that even they tend to have strong colour associations. Thus, using the right colours can not only improve semantic coherence, but also inspire the desired emotional response.", "phrases": ["color", "association", "crowdsourcing"], "overall_score": 1.199122232387505, "scores": [0.874592035739246, 0.8799722680238202, 0.8403872433646686], "rank_score": 0.8649838490425784} -{"id": "duboue-mckeown-2003-statistical", "title": "Statistical Acquisition of Content Selection Rules for Natural Language Generation", "abstract": "A Natural Language Generation system produces text using as input semantic data. One of its very first tasks is to decide which pieces of information to convey in the output. This task, called Content Selection, is quite domain dependent, requiring considerable re-engineering to transport the system from one scenario to another. In this paper, we present a method to acquire content selection rules automatically from a corpus of text and associated semantics. Our proposed technique was evaluated by comparing its output with information selected by human authors in unseen texts, where we were able to filter half the input data set without loss of recall.", "phrases": ["content selection rule", "statistical approach", "biographical summary"], "overall_score": 2.1490060666051343, "scores": [1.1588759346352298, 0.8801518862338866, 0.5554431627251483], "rank_score": 0.8648236611980883} -{"id": "hong-ong-2009-automatically", "title": "Automatically Extracting Word Relationships as Templates for Pun Generation", "abstract": "Computational models can be built to capture the syntactic structures and semantic patterns of human punning riddles. This model is then used as rules by a computer to generate its own puns. This paper presents T-PEG, a system that utilizes phonetic and semantic linguistic resources to automatically extract word relationships in puns and store the knowledge in template form. Given a set of training examples, it is able to extract 69.2% usable templates, resulting in computer-generated puns that received an average score of 2.13 as compared to 2.70 for human-generated puns from user feedback.", "phrases": ["template", "pun generation", "riddle", "linguistic resource", "creativity"], "overall_score": 1.9903756152902259, "scores": [1.4259030568142803, 0.902366526879957, 0.8330578643509912, 0.6008822445732047, 0.5598360405582403], "rank_score": 0.8644091466353346} -{"id": "huang-etal-2008-quality", "title": "Quality Assurance of Automatic Annotation of Very Large Corpora: a Study based on heterogeneous Tagging System", "abstract": "We propose a set of heuristics for improving annotation quality of very large corpora efficiently. The Xinhua News portion of the Chinese Gigaword Corpus was tagged independently with both the Peking University ICL tagset and the Academia Sinica CKIP tagset. The corpus-based POS tags mapping will serve as the basis of the possible contrast in grammatical systems between PRC and Taiwan. And it can serve as the basic model for mapping between the CKIP and ICL tagging systems for any data.", "phrases": ["large corpora", "heterogeneous tagging system", "quality assurance"], "overall_score": 0.9496254691917831, "scores": [0.904131760467671, 0.8639243913214059, 0.8251029061977516], "rank_score": 0.8643863526622763} -{"id": "liu-etal-2018-exploiting-contextual", "title": "Exploiting Contextual Information via Dynamic Memory Network for Event Detection", "abstract": "The task of event detection involves identifying and categorizing event triggers. Contextual information has been shown effective on the task. However, existing methods which utilize contextual information only process the context once. We argue that the context can be better exploited by processing the context multiple times, allowing the model to perform complex reasoning and to generate better context representation, thus improving the overall performance. Meanwhile, dynamic memory network (DMN) has demonstrated promising capability in capturing contextual information and has been applied successfully to various tasks. In light of the multi-hop mechanism of the DMN to model the context, we propose the trigger detection dynamic memory network (TD-DMN) to tackle the event detection problem. We performed a five-fold cross-validation on the ACE-2005 dataset and experimental results show that the multi-hop mechanism does improve the performance and the proposed model achieves best F1 score compared to the state-of-the-art methods.", "phrases": ["contextual information", "dynamic memory network", "event detection"], "overall_score": 0.9495596098433, "scores": [0.8816507918070113, 0.879648036632897, 0.8316803862596324], "rank_score": 0.8643264048998468} -{"id": "strapparava-etal-2004-pattern", "title": "Pattern abstraction and term similarity for Word Sense Disambiguation: IRST at Senseval-3", "abstract": "This paper summarizes IRST\u2019s participation in Senseval-3. We participated both in the English allwords task and in some lexical sample tasks (English, Basque, Catalan, Italian, Spanish). We followed two perspectives. On one hand, for the allwords task, we tried to refine the Domain Driven Disambiguation that we presented at Senseval-2. The refinements consist of both exploiting a new technique (Domain Relevance Estimation) for domain detection in texts, and experimenting with the use of Latent Semantic Analysis to avoid reliance on manually annotated domain resources (e.g. WORDNET DOMAINS). On the other hand, for the lexical sample tasks, we explored the direction of pattern abstraction and we demonstrated the feasibility of leveraging external knowledge using kernel methods.", "phrases": ["word sense disambiguation", "senseval-3", "kernel method", "pattern abstraction"], "overall_score": 1.390487720049201, "scores": [0.9421211450336363, 0.8575697632447378, 0.8207960870753006, 0.8353473908171816], "rank_score": 0.8639585965427141} -{"id": "qian-etal-2016-speculation", "title": "Speculation and Negation Scope Detection via Convolutional Neural Networks", "abstract": "Speculation and negation are important information to identify text factuality. In this paper, we propose a Convolutional Neural Network (CNN)-based model with probabilistic weighted average pooling to address speculation and negation scope detection. In particular, our CNN-based model extracts those meaningful features from various syntactic paths be-tween the cues and the candidate tokens in both constituency and dependency parse trees. Evaluation on BioScope shows that our CNN-based model significantly outperforms the state-of-the-art systems on Abstracts, a sub-corpus in BioScope, and achieves comparable performances on Clinical Records, another sub-corpus in BioScope.", "phrases": ["negation scope detection", "convolutional neural network", "candidate token", "speculation"], "overall_score": 1.796508699373611, "scores": [0.9226198611062931, 0.7863723962449239, 1.2120842522840873, 0.5346757456982826], "rank_score": 0.8639380638333968} -{"id": "rozovskaya-roth-2010-generating", "title": "Generating Confusion Sets for Context-Sensitive Error Correction", "abstract": "In this paper, we consider the problem of generating candidate corrections for the task of correcting errors in text. We focus on the task of correcting errors in preposition usage made by non-native English speakers, using discriminative classifiers. The standard approach to the problem assumes that the set of candidate corrections for a preposition consists of all preposition choices participating in the task. We determine likely preposition confusions using an annotated corpus of non-native text and use this knowledge to produce smaller sets of candidates. \n \nWe propose several methods of restricting candidate sets. These methods exclude candidate prepositions that are not observed as valid corrections in the annotated corpus and take into account the likelihood of each preposition confusion in the non-native text. We find that restricting candidates to those that are observed in the non-native data improves both the precision and the recall compared to the approach that views all prepositions as possible candidates. Furthermore, the approach that takes into account the likelihood of each preposition confusion is shown to be the most effective.", "phrases": ["confusion set", "error correction", "non-native data", "preposition error", "writer"], "overall_score": 2.5435076805064756, "scores": [1.4028011624542858, 0.9043285221620675, 0.8551349919671165, 0.5844338758470664, 0.5724734502892026], "rank_score": 0.8638344005439478} -{"id": "ippolito-etal-2019-unsupervised", "title": "Unsupervised Hierarchical Story Infilling", "abstract": "Story infilling involves predicting words to go into a missing span from a story. This challenging task has the potential to transform interactive tools for creative writing. However, state-of-the-art conditional language models have trouble balancing fluency and coherence with novelty and diversity. We address this limitation with a hierarchical model which first selects a set of rare words and then generates text conditioned on that set. By relegating the high entropy task of picking rare words to a word-sampling model, the second-stage model conditioned on those words can achieve high fluency and coherence by searching for likely sentences, without sacrificing diversity.", "phrases": ["story", "span", "rare word", "text infilling"], "overall_score": 1.3900722071083846, "scores": [1.2310922070223838, 1.0554784801969797, 0.614752115848638, 0.5534788922792923], "rank_score": 0.8637004238368234} -{"id": "bhatia-etal-2010-empty", "title": "Empty Categories in a Hindi Treebank", "abstract": "We are in the process of creating a multi-representational and multi-layered treebank for Hindi/Urdu (Palmer et al., 2009), which has three main layers: dependency structure, predicate-argument structure (PropBank), and phrase structure. This paper discusses an important issue in treebank design which is often neglected: the use of empty categories (ECs). All three levels of representation make use of ECs. We make a high-level distinction between two types of ECs, trace and silent, on the basis of whether they are postulated to mark displacement or not. Each type is further refined into several subtypes based on the underlying linguistic phenomena which the ECs are introduced to handle. This paper discusses the stages at which we add ECs to the Hindi/Urdu treebank and why. We investigate methodically the different types of ECs and their role in our syntactic and semantic representations. We also examine our decisions whether or not to coindex each type of ECs with other elements in the representation.", "phrases": ["hindi treebank", "predicate", "hutb"], "overall_score": 2.3945575410507502, "scores": [1.512291885380748, 0.5542107011806773, 0.5244596306364654], "rank_score": 0.863654072399297} -{"id": "schuff-etal-2017-annotation", "title": "Annotation, Modelling and Analysis of Fine-Grained Emotions on a Stance and Sentiment Detection Corpus", "abstract": "There is a rich variety of data sets for sentiment analysis (viz., polarity and subjectivity classification). For the more challenging task of detecting discrete emotions following the definitions of Ekman and Plutchik, however, there are much fewer data sets, and notably no resources for the social media domain. This paper contributes to closing this gap by extending the SemEval 2016 stance and sentiment datasetwith emotion annotation. We (a) analyse annotation reliability and annotation merging; (b) investigate the relation between emotion annotation and the other annotation layers (stance, sentiment); (c) report modelling results as a baseline for future work.", "phrases": ["stance", "emotion annotation", "inter-annotator agreement"], "overall_score": 2.070872019380765, "scores": [0.7856149102006377, 0.9660418044312346, 0.8392054170958835], "rank_score": 0.8636207105759186} -{"id": "tseng-etal-2015-introduction", "title": "Introduction to SIGHAN 2015 Bake-off for Chinese Spelling Check", "abstract": "This paper introduces the SIGHAN 2015 Bake-off for Chinese Spelling Check, including task description, data preparation, performance metrics, and evaluation results. The competition reveals current state-of-the-art NLP techniques in dealing with Chinese spelling checking. All data sets with gold standards and evaluation tool used in this bake-off are publicly available for future research.", "phrases": ["sighan", "bake-off", "chinese spelling check", "language model", "correct candidate"], "overall_score": 2.496128043720562, "scores": [0.959027219677123, 0.8023917728337885, 0.7947929291544749, 1.2300128931127292, 0.5317803637969694], "rank_score": 0.863601035715017} -{"id": "stroppa-etal-2007-exploiting", "title": "Exploiting source similarity for SMT using context-informed features", "abstract": "In this paper, we introduce context informed features in a log-linear phrase-based SMT framework; these features enable us to exploit source similarity in addition to target similarity modeled by the language model. We \npresent a memory-based classification framework that enables the estimation of these features while avoiding \nsparseness problems. We evaluate the performance of our approach on Italian-to-English and Chinese-to-English translation tasks using a state-of-the-art phrase-based SMT \nsystem, and report significant improvements for both BLEU and NIST scores when adding the context-informed features.", "phrases": ["source similarity", "context-informed feature", "phrase-based smt", "log-linear pb-smt system", "part-of-speech tag"], "overall_score": 2.4465270034187423, "scores": [1.7650330420147702, 0.8895350174084339, 0.6135419642915632, 0.5264360889458063, 0.5230373276251697], "rank_score": 0.8635166880571485} -{"id": "kiela-etal-2014-improving", "title": "Improving Multi-Modal Representations Using Image Dispersion: Why Less is Sometimes More", "abstract": "Models that learn semantic representations from both linguistic and perceptual input outperform text-only models in many contexts and better reflect human concept acquisition. However, experiments suggest that while the inclusion of perceptual input improves representations of certain concepts, it degrades the representations of others. We propose an unsupervised method to determine whether to include perceptual input for a concept, and show that it significantly improves the ability of multi-modal models to learn and represent word meanings. The method relies solely on image data, and can be applied to a variety of other NLP tasks.", "phrases": ["image dispersion", "inclusion", "unsupervised method", "visual information", "filter"], "overall_score": 2.338430586378998, "scores": [1.7129909507588525, 0.9408932250676932, 0.570085207453237, 0.5537452074251359, 0.5398393922811199], "rank_score": 0.8635107965972078} -{"id": "francois-etal-2014-flelex", "title": "FLELex: a graded lexical resource for French foreign learners", "abstract": "In this paper we present FLELex, the first graded lexicon for French as a foreign language (FFL) that reports word frequencies by difficulty level (according to the CEFR scale). It has been obtained from a tagged corpus of 777,000 words from available textbooks and simplified readers intended for FFL learners. Our goal is to freely provide this resource to the community to be used for a variety of purposes going from the assessment of the lexical difficulty of a text, to the selection of simpler words within text simplification systems, and also as a dictionary in assistive tools for writing.", "phrases": ["lexical resource", "french", "learner"], "overall_score": 1.7953168936283994, "scores": [0.8898597516694222, 0.8554306692587581, 0.8448043583336902], "rank_score": 0.8633649264206236} -{"id": "huang-etal-2019-ana", "title": "ANA at SemEval-2019 Task 3: Contextual Emotion detection in Conversations through hierarchical LSTMs and BERT", "abstract": "This paper describes the system submitted by ANA Team for the SemEval-2019 Task 3: EmoContext. We propose a novel Hierarchi- cal LSTMs for Contextual Emotion Detection (HRLCE) model. It classifies the emotion of an utterance given its conversational con- text. The results show that, in this task, our HRCLE outperforms the most recent state-of- the-art text classification framework: BERT. We combine the results generated by BERT and HRCLE to achieve an overall score of 0.7709 which ranked 5th on the final leader board of the competition among 165 Teams.", "phrases": ["semeval-2019 task", "contextual emotion detection", "conversation"], "overall_score": 1.3892351192819308, "scores": [0.8998177525715034, 0.8724639728025901, 0.8172592104067654], "rank_score": 0.863180311926953} -{"id": "liu-etal-2011-automatic", "title": "Automatic Keyphrase Extraction by Bridging Vocabulary Gap", "abstract": "Keyphrase extraction aims to select a set of terms from a document as a short summary of the document. Most methods extract keyphrases according to their statistical properties in the given document. Appropriate keyphrases, however, are not always statistically significant or even do not appear in the given document. This makes a large vocabulary gap between a document and its keyphrases. In this paper, we consider that a document and its keyphrases both describe the same object but are written in two different languages. By regarding keyphrase extraction as a problem of translating from the language of documents to the language of keyphrases, we use word alignment models in statistical machine translation to learn translation probabilities between the words in documents and the words in keyphrases. According to the translation model, we suggest keyphrases given a new document. The suggested keyphrases are not necessarily statistically frequent in the document, which indicates that our method is more flexible and reliable. Experiments on news articles demonstrate that our method outperforms existing unsupervised methods on precision, recall and F-measure.", "phrases": ["keyphrase", "vocabulary gap", "large set"], "overall_score": 2.2136227918342346, "scores": [1.4640431806160117, 0.5831469614074322, 0.5418934810836411], "rank_score": 0.8630278743690282} -{"id": "qadir-riloff-2014-learning", "title": "Learning Emotion Indicators from Tweets: Hashtags, Hashtag Patterns, and Phrases", "abstract": "We present a weakly supervised approach for learning hashtags, hashtag patterns, and phrases associated with five emotions: AFFECTION, ANGER/RAGE, FEAR/ANXIETY, JOY, and SADNESS/DISAPPOINTMENT. Starting with seed hashtags to label an initial set of tweets, we train emotion classifiers and use them to learn new emotion hashtags and hashtag patterns. This process then repeats in a bootstrapping framework. Emotion phrases are also extracted from the learned hashtags and used to create phrase-based emotion classifiers. We show that the learned set of emotion indicators yields a substantial improvement in F-scores, ranging from +%5 to +%18 over baseline classifiers.", "phrases": ["emotion", "hashtag", "social medium"], "overall_score": 1.5462730679254924, "scores": [1.173074671650799, 0.8928470891584162, 0.5230525314684323], "rank_score": 0.8629914307592158} -{"id": "williams-2012-belief", "title": "A belief tracking challenge task for spoken dialog systems", "abstract": "Belief tracking is a promising technique for adding robustness to spoken dialog systems, but current research is fractured across different teams, techniques, and domains. This paper amplifies past informal discussions (Raux, 2011) to call for a belief tracking challenge task, based on the Spoken dialog challenge corpus (Black et al., 2011). Benefits, limitations, evaluation design issues, and next steps are presented.", "phrases": ["belief", "challenge task", "spoken dialog system"], "overall_score": 1.5462719024967755, "scores": [0.9335940819342687, 0.8688791916333404, 0.7864990673955843], "rank_score": 0.8629907803210645} -{"id": "velldal-oepen-2005-maximum", "title": "Maximum Entropy Models for Realization Ranking", "abstract": "In this paper we describe and evaluate different statistical models for the task of realization ranking, i.e. the problem of discriminating between competing surface realizations generated for a given input semantics. Three models are trained and tested; an n-gram language model, a discriminative maximum entropy model using structural features, and a combination of these two. Our realization component forms part of a larger, hybrid MT system.", "phrases": ["realization ranking", "log-linear model", "disambiguation model", "hpsg grammar"], "overall_score": 1.9871067831750646, "scores": [0.958582445289506, 1.090014836463707, 0.8338841212137754, 0.56947664057482], "rank_score": 0.862989510885452} -{"id": "chi-etal-2017-speaker", "title": "Speaker Role Contextual Modeling for Language Understanding and Dialogue Policy Learning", "abstract": "Language understanding (LU) and dialogue policy learning are two essential components in conversational systems. Human-human dialogues are not well-controlled and often random and unpredictable due to their own goals and speaking habits. This paper proposes a role-based contextual model to consider different speaker roles independently based on the various speaking patterns in the multi-turn dialogues. The experiments on the benchmark dataset show that the proposed role-based model successfully learns role-specific behavioral patterns for contextual encoding and then significantly improves language understanding and dialogue policy learning tasks.", "phrases": ["language understanding", "dialogue policy learning", "conversation"], "overall_score": 1.3888213559475147, "scores": [0.8866188541670091, 0.8207774954191371, 0.8813733293517749], "rank_score": 0.8629232263126404} -{"id": "ganea-hofmann-2017-deep", "title": "Deep Joint Entity Disambiguation with Local Neural Attention", "abstract": "We propose a novel deep learning model for joint document-level entity disambiguation, which leverages learned neural representations. Key components are entity embeddings, a neural attention mechanism over local context windows, and a differentiable joint inference stage for disambiguation. Our approach thereby combines benefits of deep learning with more traditional approaches such as graphical models and probabilistic mention-entity maps. Extensive experiments show that we are able to obtain competitive or state-of-the-art accuracy at moderate computational costs.", "phrases": ["disambiguation", "deep learning", "mention", "wikipedia"], "overall_score": 3.0914664770635967, "scores": [1.8321593072761122, 0.5525240661335288, 0.5349601980375833, 0.5311170135050574], "rank_score": 0.8626901462380705} -{"id": "afrin-litman-2018-annotation", "title": "Annotation and Classification of Sentence-level Revision Improvement", "abstract": "Studies of writing revisions rarely focus on revision quality. To address this issue, we introduce a corpus of between-draft revisions of student argumentative essays, annotated as to whether each revision improves essay quality. We demonstrate a potential usage of our annotations by developing a machine learning model to predict revision improvement. With the goal of expanding training data, we also extract revisions from a dataset edited by expert proofreaders. Our results indicate that blending expert and non-expert revisions increases model performance, with expert data particularly important for predicting low-quality revisions.", "phrases": ["revision", "student", "argumentative essay"], "overall_score": 1.7933619866975627, "scores": [1.154402864791886, 0.9058142404702609, 0.5270573394652058], "rank_score": 0.8624248149091177} -{"id": "ustun-etal-2020-udapter", "title": "UDapter: Language Adaptation for Truly Universal Dependency Parsing", "abstract": "Recent advances in multilingual dependency parsing have brought the idea of a truly universal parser closer to reality. However, cross-language interference and restrained model capacity remain major obstacles. To address this, we propose a novel multilingual task adaptation approach based on contextual parameter generation and adapter modules. This approach enables to learn adapters via language embeddings while sharing model parameters across languages. It also allows for an easy but effective integration of existing linguistic typology features into the parsing network. The resulting parser, UDapter, outperforms strong monolingual and multilingual baselines on the majority of both high-resource and low-resource (zero-shot) languages, showing the success of the proposed adaptation approach. Our in-depth analyses show that soft parameter sharing via typological features is key to this success.", "phrases": ["adapter", "dependency parsing", "udapter", "multilingual bert"], "overall_score": 2.211308515636897, "scores": [0.7913937546859237, 1.4593510897232727, 0.6305580808821418, 0.5671994932131288], "rank_score": 0.8621256046261168} -{"id": "wu-etal-2020-perturbed", "title": "Perturbed Masking: Parameter-free Probing for Analyzing and Interpreting BERT", "abstract": "By introducing a small set of additional parameters, a probe learns to solve specific linguistic tasks (e.g., dependency parsing) in a supervised manner using feature representations (e.g., contextualized embeddings). The effectiveness of such probing tasks is taken as evidence that the pre-trained model encodes linguistic knowledge. However, this approach of evaluating a language model is undermined by the uncertainty of the amount of knowledge that is learned by the probe itself. Complementary to those works, we propose a parameter-free probing technique for analyzing pre-trained language models (e.g., BERT). Our method does not require direct supervision from the probing tasks, nor do we introduce additional parameters to the probing process. Our experiments on BERT show that syntactic trees recovered from BERT using our method are significantly better than linguistically-uninformed baselines. We further feed the empirically induced dependency structures into a downstream sentiment classification task and find its improvement compatible with or even superior to a human-designed dependency schema.", "phrases": ["bert", "perturbed", "masking technique"], "overall_score": 2.5380143624597866, "scores": [0.8770825739989022, 1.0957021480284814, 0.6131215036587289], "rank_score": 0.8619687418953709} -{"id": "yeh-chen-2019-flowdelta", "title": "FlowDelta: Modeling Flow Information Gain in Reasoning for Conversational Machine Comprehension", "abstract": "Conversational machine comprehension requires deep understanding of the dialogue flow, and the prior work proposed FlowQA to implicitly model the context representations in reasoning for better understanding. This paper proposes to explicitly model the information gain through the dialogue reasoning in order to allow the model to focus on more informative cues. The proposed model achieves the state-of-the-art performance in a conversational QA dataset QuAC and sequential instruction understanding dataset SCONE, which shows the effectiveness of the proposed mechanism and demonstrate its capability of generalization to different QA models and tasks.", "phrases": ["reasoning", "conversational machine comprehension", "flowdelta"], "overall_score": 1.5443953761296814, "scores": [0.9433038222543807, 0.8313285081806454, 0.811198082608731], "rank_score": 0.8619434710145857} -{"id": "li-etal-2021-search", "title": "Search from History and Reason for Future: Two-stage Reasoning on Temporal Knowledge Graphs", "abstract": "Temporal Knowledge Graphs (TKGs) have been developed and used in many different areas. Reasoning on TKGs that predicts potential facts (events) in the future brings great challenges to existing models. When facing a prediction task, human beings usually search useful historical information (i.e., clues) in their memories and then reason for future meticulously. Inspired by this mechanism, we propose CluSTeR to predict future facts in a two-stage manner, Clue Searching and Temporal Reasoning, accordingly. Specifically, at the clue searching stage, CluSTeR learns a beam search policy via reinforcement learning (RL) to induce multiple clues from historical facts. At the temporal reasoning stage, it adopts a graph convolution network based sequence method to deduce answers from clues. Experiments on four datasets demonstrate the substantial advantages of CluSTeR compared with the state-of-the-art methods. Moreover, the clues found by CluSTeR further provide interpretability for the results.", "phrases": ["reason", "future", "temporal knowledge graphs"], "overall_score": 0.9469324300505945, "scores": [0.9456039199827359, 0.8417833401076013, 0.7984178683010377], "rank_score": 0.8619350427971249} -{"id": "da-san-martino-etal-2020-semeval", "title": "SemEval-2020 Task 11: Detection of Propaganda Techniques in News Articles", "abstract": "We present the results and the main findings of SemEval-2020 Task 11 on Detection of Propaganda Techniques in News Articles. The task featured two subtasks. Subtask SI is about Span Identification: given a plain-text document, spot the specific text fragments containing propaganda. Subtask TC is about Technique Classification: given a specific text fragment, in the context of a full document, determine the propaganda technique it uses, choosing from an inventory of 14 possible propaganda techniques. The task attracted a large number of participants: 250 teams signed up to participate and 44 made a submission on the test set. In this paper, we present the task, analyze the results, and discuss the system submissions and the methods they used. For both subtasks, the best systems used pre-trained Transformers and ensembles.", "phrases": ["propaganda techniques", "semeval-2020 task", "news articles"], "overall_score": 2.5377719567867247, "scores": [0.87820761708554, 0.8552392371724618, 0.8522123916046772], "rank_score": 0.8618864152875596} -{"id": "salehi-etal-2014-using", "title": "Using Distributional Similarity of Multi-way Translations to Predict Multiword Expression Compositionality", "abstract": "We predict the compositionality of multiword expressions using distributional similarity between each component word and the overall expression, based on translations into multiple languages. We evaluate the method over English noun compounds, English verb particle constructions and German noun compounds. We show that the estimation of compositionality is improved when using translations into multiple languages, as compared to simply using distributional similarity in the source language. We further find that string similarity complements distributional similarity. 1 Compositionality of MWEs Multiword expressions (hereafter MWEs) are combinations of words which are lexically, syntactically, semantically or statistically idiosyncratic (Sag et al., 2002; Baldwin and Kim, 2009). Much research has been carried out on the extraction and identification of MWEs1 in English (Schone and Jurafsky, 2001; Pecina, 2008; Fazly et al., 2009) and other languages (Dias, 2003; Evert and Krenn, 2005; Salehi et al., 2012). However, considerably less work has addressed the task of predicting the meaning of MWEs, especially in non-English languages. As a step in this direction, the focus of this study is on predicting the compositionality of MWEs. An MWE is fully compositional if its meaning is predictable from its component words, and it is non-compositional (or idiomatic) if not. For example, stand up \u201crise to one\u2019s feet\u201d is composiIn this paper, we follow Baldwin and Kim (2009) in considering MWE \u201cidentification\u201d to be a token-level disambiguation task, and MWE \u201cextraction\u201d to be a type-level lexicon induction task. tional, because its meaning is clear from the meaning of the components stand and up. However, the meaning of strike up \u201cto start playing\u201d is largely unpredictable from the component words strike and up. In this study, following McCarthy et al. (2003) and Reddy et al. (2011), we consider compositionality to be graded, and aim to predict the degree of compositionality. For example, in the dataset of Reddy et al. (2011), climate change is judged to be 99% compositional, while silver screen is 48% compositional and ivory tower is 9% compositional. Formally, we model compositionality prediction as a regression task. An explicit handling of MWEs has been shown to be useful in NLP applications (Ramisch, 2012). As an example, Carpuat and Diab (2010) proposed two strategies for integrating MWEs into statistical machine translation. They show that even a large scale bilingual corpus cannot capture all the necessary information to translate MWEs, and that in adding the facility to model the compositionality of MWEs into their system, they could improve translation quality. Acosta et al. (2011) showed that treating non-compositional MWEs as a single unit in information retrieval improves retrieval effectiveness. For example, while searching for documents related to ivory tower, we are almost certainly not interested in documents relating to elephant tusks. Our approach is to use a large-scale multi-way translation lexicon to source translations of MWEs and their component words, and then model the relative similarity between each of the component words and the MWE, using distributional similarity based on monolingual corpora for the source language and each of the target languages. Our hypothesis is that using distributional similarity in more than one language will improve the prediction of compositionality. Importantly, in order to make the method as language-independent and", "phrases": ["distributional similarity", "german noun compound", "source language", "non-compositional mwe"], "overall_score": 2.2105065670116764, "scores": [1.8063646131190148, 0.5735295389888697, 0.5416847174423212, 0.5256729221176922], "rank_score": 0.8618129479169745} -{"id": "snyder-etal-2010-statistical", "title": "A Statistical Model for Lost Language Decipherment", "abstract": "In this paper we propose a method for the automatic decipherment of lost languages. Given a non-parallel corpus in a known related language, our model produces both alphabetic mappings and translations of words into their corresponding cognates. We employ a non-parametric Bayesian framework to simultaneously capture both low-level character mappings and high-level morphemic correspondences. This formulation enables us to encode some of the linguistic intuitions that have guided human decipherers. When applied to the ancient Semitic language Ugaritic, the model correctly maps 29 of 30 letters to their Hebrew counterparts, and deduces the correct Hebrew cognate for 60% of the Ugaritic words which have cognates in Hebrew.", "phrases": ["decipherment", "hebrew", "dead language"], "overall_score": 2.2102996518040143, "scores": [1.178854445718893, 0.8421227232849707, 0.5642196638780421], "rank_score": 0.8617322776273019} -{"id": "emelin-etal-2021-moral", "title": "Moral Stories: Situated Reasoning about Norms, Intents, Actions, and their Consequences", "abstract": "In social settings, much of human behavior is governed by unspoken rules of conduct rooted in societal norms. For artificial systems to be fully integrated into social environments, adherence to such norms is a central prerequisite. To investigate whether language generation models can serve as behavioral priors for systems deployed in social settings, we evaluate their ability to generate action descriptions that achieve predefined goals under normative constraints. Moreover, we examine if models can anticipate likely consequences of actions that either observe or violate known norms, or explain why certain actions are preferable by generating relevant norm hypotheses. For this purpose, we introduce Moral Stories, a crowd-sourced dataset of structured, branching narratives for the study of grounded, goal-oriented social reasoning. Finally, we propose decoding strategies that combine multiple expert models to significantly improve the quality of generated actions, consequences, and norms compared to strong baselines.", "phrases": ["action", "goal-oriented social reasoning", "moral story"], "overall_score": 1.6767021860293543, "scores": [0.9314802726006713, 1.0494545357112721, 0.6040286138327244], "rank_score": 0.8616544740482226} -{"id": "peng-etal-2014-classifying", "title": "Classifying Idiomatic and Literal Expressions Using Topic Models and Intensity of Emotions", "abstract": "We describe an algorithm for automatic classification of idiomatic and literal expressions. Our starting point is that words in a given text segment, such as a paragraph, that are highranking representatives of a common topic of discussion are less likely to be a part of an idiomatic expression. Our additional hypothesis is that contexts in which idioms occur, typically, are more affective and therefore, we incorporate a simple analysis of the intensity of the emotions expressed by the contexts. We investigate the bag of words topic representation of one to three paragraphs containing an expression that should be classified as idiomatic or literal (a target phrase). We extract topics from paragraphs containing idioms and from paragraphs containing literals using an unsupervised clustering method, Latent Dirichlet Allocation (LDA) (Blei et al., 2003). Since idiomatic expressions exhibit the property of non-compositionality, we assume that they usually present different semantics than the words used in the local topic. We treat idioms as semantic outliers, and the identification of a semantic shift as outlier detection. Thus, this topic representation allows us to differentiate idioms from literals using local semantic contexts. Our results are encouraging.", "phrases": ["emotion", "paragraph", "idiom", "word topic representation"], "overall_score": 2.209545685452107, "scores": [0.8576663159124133, 1.485126251652203, 0.5777581905258512, 0.5252025532167569], "rank_score": 0.8614383278268062} -{"id": "delbrouck-etal-2020-transformer", "title": "A Transformer-based joint-encoding for Emotion Recognition and Sentiment Analysis", "abstract": "Understanding expressed sentiment and emotions are two crucial factors in human multimodal language. This paper describes a Transformer-based joint-encoding (TBJE) for the task of Emotion Recognition and Sentiment Analysis. In addition to use the Transformer architecture, our approach relies on a modular co-attention and a glimpse layer to jointly encode one or more modalities. The proposed solution has also been submitted to the ACL20: Second Grand-Challenge on Multimodal Language to be evaluated on the CMU-MOSEI dataset. The code to replicate the presented experiments is open-source .", "phrases": ["joint-encoding", "emotion recognition", "sentiment analysis"], "overall_score": 1.67627754031526, "scores": [0.8946943879103536, 0.8799807653060356, 0.8096335947425765], "rank_score": 0.8614362493196551} -{"id": "wubben-etal-2010-paraphrase", "title": "Paraphrase Generation as Monolingual Translation: Data and Evaluation", "abstract": "In this paper we investigate the automatic generation and evaluation of sentential paraphrases. We describe a method for generating sentential paraphrases by using a large aligned monolingual corpus of news headlines acquired automatically from Google News and a standard Phrase-Based Machine Translation (PBMT) framework. The output of this system is compared to a word substitution baseline. Human judges prefer the PBMT paraphrasing system over the word substitution system. We demonstrate that BLEU correlates well with human judgements provided that the generated paraphrased sentence is sufficiently different from the source sentence.", "phrases": ["monolingual translation", "paraphrase generation", "phrase-based smt framework", "machine paclic"], "overall_score": 2.06456646531274, "scores": [1.4638578317622355, 0.904640118319367, 0.5415856032575743, 0.5338808077955248], "rank_score": 0.8609910902836753} -{"id": "fu-etal-2020-sscr", "title": "SSCR: Iterative Language-Based Image Editing via Self-Supervised Counterfactual Reasoning", "abstract": "Iterative Language-Based Image Editing (ILBIE) tasks follow iterative instructions to edit images step by step. Data scarcity is a significant issue for ILBIE as it is challenging to collect large-scale examples of images before and after instruction-based changes. Yet, humans still accomplish these editing tasks even when presented with an unfamiliar image-instruction pair. Such ability results from counterfactual thinking, the ability to think about possible alternatives to events that have happened already. In this paper, we introduce a Self-Supervised Counterfactual Reasoning (SSCR) framework that incorporates counterfactual thinking to overcome data scarcity. SSCR allows the model to consider out-of-distribution instructions paired with previous images. With the help of cross-task consistency (CTC), we train these counterfactual instructions in a self-supervised scenario. Extensive results show that SSCR improves the correctness of ILBIE in terms of both object identity and position, establishing a new state of the art (SOTA) on two IBLIE datasets (i-CLEVR and CoDraw). Even with only 50% of the training data, SSCR achieves a comparable result to using complete data.", "phrases": ["image editing", "self-supervised counterfactual reasoning", "sscr"], "overall_score": 0.9458337908155637, "scores": [0.8953545156485306, 0.8757451522266906, 0.8117053869332445], "rank_score": 0.8609350182694886} -{"id": "szpektor-etal-2004-scaling", "title": "Scaling Web-based Acquisition of Entailment Relations", "abstract": "Paraphrase recognition is a critical step for natural language interpretation. Accordingly, many NLP applications would benefit from high coverage knowledge bases of paraphrases. However, the scalability of state-of-the-art paraphrase acquisition approaches is still limited. We present a fully unsupervised learning algorithm for Web-based extraction of entailment relations, an extended model of paraphrases. We focus on increased scalability and generality with respect to prior work, eventually aiming at a full scale knowledge base. Our current implementation of the algorithm takes as its input a verb lexicon and for each verb searches the Web for related syntactic entailment templates. Experiments show promising results with respect to the ultimate goal, achieving much better scalability than prior Web-based methods.", "phrases": ["many nlp application", "paraphrase acquisition", "web", "entailment rule"], "overall_score": 2.89839590230504, "scores": [1.4486717982433914, 0.9275695221139194, 0.5442446232368627, 0.5225093245901277], "rank_score": 0.8607488170460753} -{"id": "mohammady-ardehaly-culotta-2015-inferring", "title": "Inferring latent attributes of Twitter users with label regularization", "abstract": "Inferring latent attributes of online users has many applications in public health, politics, and marketing. Most existing approaches rely on supervised learning algorithms, which require manual data annotation and therefore are costly to develop and adapt over time. In this paper, we propose a lightly supervised approach based on label regularization to infer the age, ethnicity, and political orientation of Twitter users. Our approach learns from a heterogeneous collection of soft constraints derived from Census demographics, trends in baby names, and Twitter accounts that are emblematic of class labels. To counteract the imprecision of such constraints, we compare several constraint selection algorithms that optimize classification accuracy on a tuning set. We find that using no user-annotated data, our approach is within 2% of a fully supervised baseline for three of four tasks. Using a small set of labeled data for tuning further improves accuracy on all tasks.", "phrases": ["latent attribute", "twitter user", "label regularization"], "overall_score": 0.9453948797888029, "scores": [0.8776195010289027, 0.8632184041646923, 0.8407686075142], "rank_score": 0.8605355042359317} -{"id": "pan-etal-2011-annotating", "title": "Annotating and Learning Event Durations in Text", "abstract": "This article presents our work on constructing a corpus of news articles in which events are annotated for estimated bounds on their duration, and automatically learning from this corpus. We describe the annotation guidelines, the event classes we categorized to reduce gross discrepancies in inter-annotator judgments, and our use of normal distributions to model vague and implicit temporal information and to measure inter-annotator agreement for these event duration distributions. We then show that machine learning techniques applied to this data can produce coarse-grained event duration information automatically, considerably outperforming a baseline and approaching human performance. The methods described here should be applicable to other kinds of vague but substantive information in texts.", "phrases": ["duration", "annotating", "timebank", "news story", "full-length weblog"], "overall_score": 1.9811710598936318, "scores": [0.8804932133220875, 1.4597244526879278, 0.886670527712445, 0.5448531074856968, 0.5303169938829483], "rank_score": 0.860411659018221} -{"id": "cho-etal-2012-segmentation", "title": "Segmentation and punctuation prediction in speech language translation using a monolingual translation system", "abstract": "In spoken language translation (SLT), finding proper segmentation and reconstructing punctuation marks are not only significant but also challenging tasks. In this paper we present our recent work on speech translation quality analysis for German-English by improving sentence segmentation and punctuation. From oracle experiments, we show an upper bound of translation quality if we had human-generated segmentation and punctuation on the output stream of speech recognition systems. In our oracle experiments we gain 1.78 BLEU points of improvements on the lecture test set. We build a monolingual translation system from German to German implementing segmentation and punctuation prediction as a machine translation task. Using the monolingual translation system we get an improvement of 1.53 BLEU points on the lecture test set, which is a comparable performance against the upper bound drawn by the oracle experiments.", "phrases": ["punctuation prediction", "monolingual translation system", "segmentation"], "overall_score": 1.1927543691536033, "scores": [0.9122226633134439, 0.8766452982459377, 0.7923032585054397], "rank_score": 0.8603904066882738} -{"id": "mukerjee-etal-2006-detecting", "title": "Detecting Complex Predicates in Hindi using POS Projection across Parallel Corpora", "abstract": "Complex Predicates or CPs are multiword complexes functioning as single verbal units. CPs are particularly pervasive in Hindi and other Indo-Aryan languages, but an usage account driven by corpus-based identification of these constructs has not been possible since single-language systems based on rules and statistical approaches require reliable tools (POS taggers, parsers, etc.) that are unavailable for Hindi. This paper highlights the development of first such database based on the simple idea of projecting POS tags across an English-Hindi parallel corpus. The CP types considered include adjective-verb (AV), noun-verb (NV), adverb-verb (Adv-V), and verb-verb (VV) composites. CPs are hypothesized where a verb in English is projected onto a multi-word sequence in Hindi. While this process misses some CPs, those that are detected appear to be more reliable (83% precision, 46% recall). The resulting database lists usage instances of 1439 CPs in 4400 sentences.", "phrases": ["complex predicates", "pos projection", "serial verb"], "overall_score": 1.7888257249205164, "scores": [0.8628037482600601, 0.8397857905972127, 0.8781404635001614], "rank_score": 0.8602433341191448} -{"id": "dhingra-etal-2019-handling", "title": "Handling Divergent Reference Texts when Evaluating Table-to-Text Generation", "abstract": "Automatically constructed datasets for generating text from semi-structured data (tables), such as WikiBio, often contain reference texts that diverge from the information in the corresponding semi-structured data. We show that metrics which rely solely on the reference texts, such as BLEU and ROUGE, show poor correlation with human judgments when those references diverge. We propose a new metric, PARENT, which aligns n-grams from the reference and generated texts to the semi-structured data before computing their precision and recall. Through a large scale human evaluation study of table-to-text models for WikiBio, we show that PARENT correlates with human judgments better than existing text generation metrics. We also adapt and evaluate the information extraction based evaluation proposed by Wiseman et al (2017), and show that PARENT has comparable correlation to it, while being easier to use. We show that PARENT is also applicable when the reference texts are elicited from humans using the data from the WebNLG challenge.", "phrases": ["semi-structured data", "table", "data-to-text generation"], "overall_score": 2.328609447711862, "scores": [1.0480137236733962, 0.9965633452867062, 0.5350753836762138], "rank_score": 0.8598841508787721} -{"id": "cabezas-garcia-san-martin-2017-semantic", "title": "Semantic annotation to characterize contextual variation in terminological noun compounds: a pilot study", "abstract": "Noun compounds (NCs) are semantically complex and not fully compositional, as is often assumed. This paper presents a pilot study regarding the semantic annotation of environmental NCs with a view to accessing their semantics and exploring their domain-based contextual variation. Our results showed that the semantic annotation of NCs afforded important insights into how context impacts their conceptualization.", "phrases": ["contextual variation", "noun compound", "semantic annotation"], "overall_score": 0.9446322775534228, "scores": [0.8861016342925419, 0.8544835483216046, 0.8389388786867803], "rank_score": 0.8598413537669756} -{"id": "zhang-litman-2015-annotation", "title": "Annotation and Classification of Argumentative Writing Revisions", "abstract": "This paper explores the annotation and classification of students\u2019 revision behaviors in argumentative writing. A sentence-level revision schema is proposed to capture why and how students make revisions. Based on the proposed schema, a small corpus of student essays and revisions was annotated. Studies show that manual annotation is reliable with the schema and the annotated information helpful for revision analysis. Furthermore, features and methods are explored for the automatic classification of revisions. Intrinsic evaluations demonstrate promising performance in high-level revision classification (surface vs. text-based). Extrinsic evaluations demonstrate that our method for automatic revision classification can be used to predict a writer\u2019s improvement.", "phrases": ["argumentative writing", "revision", "student essay"], "overall_score": 1.9794141146639117, "scores": [1.0519856450961562, 0.9940364451378726, 0.5329237919658132], "rank_score": 0.8596486273999474} -{"id": "gaillard-etal-2010-query", "title": "Query translation using Wikipedia-based resources for analysis and disambiguation", "abstract": "This work investigates query translation using only Wikipedia-based resources in a two step approach: analysis and disam- biguation. After arguing that data mined from Wikipedia is particularly relevant to query translation, both from a lexical and a semantic perspective, we detail the im- plementation of the approach. In the analysis phase, lexical units are extracted from queries and associated to several possible translations using a Wikipedia- based bilingual dictionary. During the second phase, one translation is chosen amongst the many candidates, based on topic homogeneity, asserted with the help of semantic information carried by cate- gories of Wikipedia articles. We report promising results regarding translation accuracy.", "phrases": ["wikipedia-based resource", "disambiguation", "query translation"], "overall_score": 0.9442115674906466, "scores": [0.8879597463817045, 0.8776925042654379, 0.8127229702472574], "rank_score": 0.8594584069648} -{"id": "wen-etal-2016-multi", "title": "Multi-domain Neural Network Language Generation for Spoken Dialogue Systems", "abstract": "\u00a92016 Association for Computational Linguistics. Moving from limited-domain natural language generation (NLG) to open domain is difficult because the number of semantic input combinations grows exponentially with the number of domains. Therefore, it is important to leverage existing resources and exploit similarities between domains to facilitate domain adaptation. In this paper, we propose a procedure to train multi-domain, Recurrent Neural Network-based (RNN) language generators via multiple adaptation steps. In this procedure, a model is first trained on counterfeited data synthesised from an out-of-domain dataset, and then fine tuned on a small set of in-domain utterances with a discriminative objective function. Corpus-based evaluation results show that the proposed procedure can achieve competitive performance in terms of BLEU score and slot error rate while significantly reducing the data needed to train generators in new, unseen domains. In subjective testing, human judges confirm that the procedure greatly improves generator performance when only a small amount of data is available in the domain.", "phrases": ["language generation", "dialogue system", "domain adaptation", "attentive encoder-decoder"], "overall_score": 3.0306540856637327, "scores": [1.4831849110788633, 0.8375799475995375, 0.5863928993144052, 0.5305555041741297], "rank_score": 0.8594283155417339} -{"id": "feng-etal-2020-doc2dial", "title": "doc2dial: A Goal-Oriented Document-Grounded Dialogue Dataset", "abstract": "We introduce doc2dial, a new dataset of goal-oriented dialogues that are grounded in the associated documents. Inspired by how the authors compose documents for guiding end users, we first construct dialogue flows based on the content elements that corresponds to higher-level relations across text sections as well as lower-level relations between discourse units within a section. Then we present these dialogue flows to crowd contributors to create conversational utterances. The dataset includes over 4500 annotated conversations with an average of 14 turns that are grounded in over 450 documents from four domains. Compared to the prior document-grounded dialogue datasets, this dataset covers a variety of dialogue scenes in information-seeking conversations. For evaluating the versatility of the dataset, we introduce multiple dialogue modeling tasks and present baseline approaches.", "phrases": ["document-grounded dialogue dataset", "goal-oriented dialogue", "associated document", "doc2dial", "knowledge identification"], "overall_score": 2.1354450574544783, "scores": [1.693519194344647, 0.9564884050836292, 0.5963985981432435, 0.5279223059395126, 0.5225030447418236], "rank_score": 0.8593663096505713} -{"id": "hartung-frank-2010-structured", "title": "A Structured Vector Space Model for Hidden Attribute Meaning in Adjective-Noun Phrases", "abstract": "We present an approach to model hidden attributes in the compositional semantics of adjective-noun phrases in a distributional model. For the representation of adjective meanings, we reformulate the pattern-based approach for attribute learning of Almuhareb (2006) in a structured vector space model (VSM). This model is complemented by a structured vector space representing attribute dimensions of noun meanings. The combination of these representations along the lines of compositional semantic principles exposes the underlying semantic relations in adjective-noun phrases. We show that our compositional VSM outperforms simple pattern-based approaches by circumventing their inherent sparsity problems.", "phrases": ["attribute", "adjective-noun phrase", "compositional vsm"], "overall_score": 1.5397454470091774, "scores": [0.9420363670815309, 1.104334130040798, 0.5316743913568377], "rank_score": 0.8593482961597223} -{"id": "rabinovich-etal-2018-native", "title": "Native Language Cognate Effects on Second Language Lexical Choice", "abstract": "We present a computational analysis of cognate effects on the spontaneous linguistic productions of advanced non-native speakers. Introducing a large corpus of highly competent non-native English speakers, and using a set of carefully selected lexical items, we show that the lexical choices of non-natives are affected by cognates in their native language. This effect is so powerful that we are able to reconstruct the phylogenetic language tree of the Indo-European language family solely from the frequencies of specific lexical items in the English of authors with various native languages. We quantitatively analyze non-native lexical choice, highlighting cognate facilitation as one of the important phenomena shaping the language of non-native speakers.", "phrases": ["cognate", "lexical choice", "native language"], "overall_score": 1.8881689301182019, "scores": [1.0863399683916475, 0.9089936072049863, 0.5826945644407882], "rank_score": 0.8593427133458075} -{"id": "zhang-etal-2015-shallow", "title": "Shallow Convolutional Neural Network for Implicit Discourse Relation Recognition", "abstract": "Implicit discourse relation recognition remains a serious challenge due to the absence of discourse connectives. In this paper, we propose a Shallow Convolutional Neural Network (SCNN) for implicit discourse relation recognition, which contains only one hidden layer but is effective in relation recognition. The shallow structure alleviates the overfitting problem, while the convolution and nonlinear operations help preserve the recognition and generalization ability of our model. Experiments on the benchmark data set show that our model achieves comparable and even better performance when comparing against current state-of-the-art systems.", "phrases": ["convolutional neural network", "discourse relation", "relation classification"], "overall_score": 2.6938287727705776, "scores": [1.1482174621010968, 0.8963111078651279, 0.5328916903728377], "rank_score": 0.8591400867796875} -{"id": "baziotis-etal-2020-language", "title": "Language Model Prior for Low-Resource Neural Machine Translation", "abstract": "The scarcity of large parallel corpora is an important obstacle for neural machine translation. A common solution is to exploit the knowledge of language models (LM) trained on abundant monolingual data. In this work, we propose a novel approach to incorporate a LM as prior in a neural translation model (TM). Specifically, we add a regularization term, which pushes the output distributions of the TM to be probable under the LM prior, while avoiding wrong predictions when the TM \u201cdisagrees\u201d with the LM. This objective relates to knowledge distillation, where the LM can be viewed as teaching the TM about the target language. The proposed approach does not compromise decoding speed, because the LM is used only at training time, unlike previous work that requires it during inference. We present an analysis of the effects that different methods have on the distributions of the TM. Results on two low-resource machine translation datasets show clear improvements even with limited monolingual data.", "phrases": ["regularization term", "output distribution", "language model"], "overall_score": 1.8877182126803782, "scores": [1.4906491800507407, 0.5575764519829257, 0.5291871169657081], "rank_score": 0.8591375829997915} -{"id": "dasgupta-etal-2018-automatic-extraction", "title": "Automatic Extraction of Causal Relations from Text using Linguistically Informed Deep Neural Networks", "abstract": "In this paper we have proposed a linguistically informed recursive neural network architecture for automatic extraction of cause-effect relations from text. These relations can be expressed in arbitrarily complex ways. The architecture uses word level embeddings and other linguistic features to detect causal events and their effects mentioned within a sentence. The extracted events and their relations are used to build a causal-graph after clustering and appropriate generalization, which is then used for predictive purposes. We have evaluated the performance of the proposed extraction model with respect to two baseline systems,one a rule-based classifier, and the other a conditional random field (CRF) based supervised model. We have also compared our results with related work reported in the past by other authors on SEMEVAL data set, and found that the proposed bi-directional LSTM model enhanced with an additional linguistic layer performs better. We have also worked extensively on creating new annotated datasets from publicly available data, which we are willing to share with the community.", "phrases": ["extraction", "causal relation", "other linguistic feature", "lstm model"], "overall_score": 2.0598285640852807, "scores": [1.759322715222708, 0.5711894691997631, 0.5653250824387295, 0.5402236611566227], "rank_score": 0.859015232004456} -{"id": "lai-hockenmaier-2014-illinois", "title": "Illinois-LH: A Denotational and Distributional Approach to Semantics", "abstract": "This paper describes and analyzes our SemEval 2014 Task 1 system. Its features are based on distributional and denotational similarities; word alignment; negation; and hypernym/hyponym, synonym, and antonym relations.", "phrases": ["negation", "hypernym", "synonym", "rte"], "overall_score": 2.134569614163897, "scores": [0.8862151631361411, 0.8586623321499981, 0.8528272613950572, 0.8383512647327389], "rank_score": 0.8590140053534838} -{"id": "muntes-mulero-etal-2012-multiplying", "title": "Multiplying the Potential of Crowdsourcing with Machine Translation", "abstract": "Machine Translation (MT) is said to be the next lingua franca. With the evolution of new technologies and the capacity to produce a humungous number of written digital documents, human translators will not be able to translate documentation fast enough. However, some applications require a level of quality that is still beyond that provided by MT. Thanks to the increased capacity of communication provided by new technologies, people can also interact and collaborate to work remotely. With this, crowd computing is becoming more common and it has been proposed as a feasible solution for translation. In this paper, we discuss about the relationship between crowdsourcing and MT, and the main challenges for the MT community to multiply the potential of the crowd.", "phrases": ["potential", "crowdsourcing", "machine translation"], "overall_score": 0.9436417547677969, "scores": [0.9159445889329927, 0.8732154475447168, 0.7876591867401836], "rank_score": 0.8589397410726312} -{"id": "shi-etal-2021-refine-imitate", "title": "Refine and Imitate: Reducing Repetition and Inconsistency in Persuasion Dialogues via Reinforcement Learning and Human Demonstration", "abstract": "Persuasion dialogue system reflects the machine's ability to make strategic moves beyond verbal communication, and therefore differentiates itself from task-oriented or open-domain dialogues and has its own unique values. However, the repetition and inconsistency problems still persist in dialogue response generation and could substantially impact user experience and impede the persuasion outcome. Besides, although reinforcement learning (RL) approaches have achieved big success in strategic tasks such as games, it requires a sophisticated user simulator to provide real-time feedback to the dialogue system, which limits the application of RL on persuasion dialogues. To address these issues towards a better persuasion dialogue system, we apply RL to refine a language model baseline without user simulators, and distill sentence-level information about repetition, inconsistency, and task relevance through rewards. Moreover, to better accomplish the persuasion task, the model learns from human demonstration to imitate human persuasion behavior and selects the most persuasive responses. Experiments show that our model outperforms previous state-of-the-art dialogue models on both automatic metrics and human evaluation results on a donation persuasion task, and generates more diverse, consistent and persuasive conversations according to the user feedback. We will make the code and model publicly available.", "phrases": ["repetition", "reinforcement learning", "human demonstration", "persuasive response"], "overall_score": 1.1905901962106948, "scores": [0.8467170698758839, 0.8252261069059281, 0.820187024419386, 0.9431869424071772], "rank_score": 0.8588292859020937} -{"id": "gabbard-etal-2006-fully", "title": "Fully Parsing the Penn Treebank", "abstract": "We present a two stage parser that recovers Penn Treebank style syntactic analyses of new sentences including skeletal syntactic structure, and, for the first time, both function tags and empty categories. The accuracy of the first-stage parser on the standard Parseval metric matches that of the (Collins, 2003) parser on which it is based, despite the data fragmentation caused by the greatly enriched space of possible node labels. This first stage simultaneously achieves near state-of-the-art performance on recovering function tags with minimal modifications to the underlying parser, modifying less than ten lines of code. The second stage achieves state-of-the-art performance on the recovery of empty categories by combining a linguistically-informed architecture and a rich feature set with the power of modern machine learning methods.", "phrases": ["penn treebank", "function tag", "empty category", "machine learning method", "ptb"], "overall_score": 2.132639539898888, "scores": [0.9626392686087458, 1.1208619471109416, 1.0984129767562787, 0.5560251228778558, 0.5532471162991213], "rank_score": 0.8582372863305887} -{"id": "hagiwara-etal-2006-selection", "title": "Selection of Effective Contextual Information for Automatic Synonym Acquisition", "abstract": "Various methods have been proposed for automatic synonym acquisition, as synonyms are one of the most fundamental lexical knowledge. Whereas many methods are based on contextual clues of words, little attention has been paid to what kind of categories of contextual information are useful for the purpose. This study has experimentally investigated the impact of contextual information selection, by extracting three kinds of word relationships from corpora: dependency, sentence co-occurrence, and proximity. The evaluation result shows that while dependency and proximity perform relatively well by themselves, combination of two or more kinds of contextual information gives more stable performance. We've further investigated useful selection of dependency relations and modification categories, and it is found that modification has the greatest contribution, even greater than the widely adopted subject-object combination.", "phrases": ["contextual information", "automatic synonym acquisition", "selection"], "overall_score": 1.6698024971240433, "scores": [0.9392654666469299, 0.8476009172098945, 0.7874598222139243], "rank_score": 0.8581087353569162} -{"id": "miyao-etal-2006-semantic", "title": "Semantic Retrieval for the Accurate Identification of Relational Concepts in Massive Textbases", "abstract": "This paper introduces a novel framework for the accurate retrieval of relational concepts from huge texts. Prior to retrieval, all sentences are annotated with predicate argument structures and ontological identifiers by applying a deep parser and a term recognizer. During the run time, user requests are converted into queries of region algebra on these annotations. Structural matching with pre-computed semantic annotations establishes the accurate and efficient retrieval of relational concepts. This framework was applied to a text retrieval system for MEDLINE. Experiments on the retrieval of biomedical correlations revealed that the cost is sufficiently small for real-time applications and that the retrieval precision is significantly improved.", "phrases": ["relational concept", "semantic retrieval", "biomedicine"], "overall_score": 1.784342494305393, "scores": [1.461956124781688, 0.5829979802693758, 0.5293079627307705], "rank_score": 0.858087355927278} -{"id": "fisch-etal-2020-capwap", "title": "CapWAP: Image Captioning with a Purpose", "abstract": "The traditional image captioning task uses generic reference captions to provide textual information about images. Different user populations, however, will care about different visual aspects of images. In this paper, we propose a new task, Captioning with A Purpose (CapWAP). Our goal is to develop systems that can be tailored to be useful for the information needs of an intended population, rather than merely provide generic information about an image. In this task, we use question-answer (QA) pairs\u2014a natural expression of information need\u2014from users, instead of reference captions, for both training and post-inference evaluation. We show that it is possible to use reinforcement learning to directly optimize for the intended information need, by rewarding outputs that allow a question answering model to provide correct answers to sampled user questions. We convert several visual question answering datasets into CapWAP datasets, and demonstrate that under a variety of scenarios our purposeful captioning system learns to anticipate and fulfill specific information needs better than its generic counterparts, as measured by QA performance on user questions from unseen images, when using the caption alone as context.", "phrases": ["image", "caption", "purpose", "visual question"], "overall_score": 1.3810004600223609, "scores": [0.7862556406442175, 1.0911702459220782, 0.9953620052641479, 0.5594674299887062], "rank_score": 0.8580638304547874} -{"id": "poria-etal-2014-rule", "title": "A Rule-Based Approach to Aspect Extraction from Product Reviews", "abstract": "Sentiment analysis is a rapidly growing research field that has attracted both academia and industry because of the challenging research problems it poses and the potential benefits it can provide in many real life applications. Aspect-based opinion mining, in particular, is one of the fundamental challenges within this research field. In this work, we aim to solve the problem of aspect extraction from product reviews by proposing a novel rule-based approach that exploits common-sense knowledge and sentence dependency trees to detect both explicit and implicit aspects. Two popular review datasets were used for evaluating the system against state-of-the-art aspect extraction techniques, obtaining higher detection accuracy for both datasets.", "phrases": ["rule-based approach", "aspect extraction", "product review"], "overall_score": 1.5368358977935697, "scores": [0.9308698465566381, 0.8311097463597887, 0.8111937445556262], "rank_score": 0.8577244458240177} -{"id": "kouno-etal-2015-unsupervised", "title": "Unsupervised Domain Adaptation for Word Sense Disambiguation using Stacked Denoising Autoencoder", "abstract": "In this paper, we propose an unsupervised domain adaptation for Word Sense Disambiguation (WSD) using Stacked Denoising Autoencoder (SdA). SdA is an unsupervised learning method of obtaining the abstract feature set of input data using Neural Network. The abstract feature set absorbs the difference of domains, and thus SdA can solve a problem of domain adaptation. However, SdA does not always cope with any problems of domain adaptation. Especially, difficulty of domain adaptation for WSD depends on the combination of a source domain, a target domain and a target word. As a result, any method of domain adaptation for WSD has adverse effect for a part of the problem, Therefore, we defined the similarity between two domains, and judge whether we use SdA or not through this similarity. This approach avoids an adverse effect of SdA. In the experiments, we have used three domains from the Balanced Corpus of Contemporary Written Japanese and 16 target words. In comparison with baseline, our method has got higher average accuracies for all combinations of two domains. Furthermore, we have obtained better results against conventional domain adaptation methods.", "phrases": ["word sense disambiguation", "stacked denoising autoencoder", "unsupervised domain adaptation"], "overall_score": 0.9420135854920518, "scores": [0.859898306207023, 0.8595425858524289, 0.8529322605313261], "rank_score": 0.8574577175302593} -{"id": "sugiyama-yoshinaga-2019-data", "title": "Data augmentation using back-translation for context-aware neural machine translation", "abstract": "A single sentence does not always convey information that is enough to translate it into other languages. Some target languages need to add or specialize words that are omitted or ambiguous in the source languages (e.g, zero pronouns in translating Japanese to English or epicene pronouns in translating English to French). To translate such ambiguous sentences, we need contexts beyond a single sentence, and have so far explored context-aware neural machine translation (NMT). However, a large amount of parallel corpora is not easily available to train accurate context-aware NMT models. In this study, we first obtain large-scale pseudo parallel corpora by back-translating monolingual data, and then investigate its impact on the translation accuracy of context-aware NMT models. We evaluated context-aware NMT models trained with small parallel corpora and the large-scale pseudo parallel corpora on English-Japanese and English-French datasets to demonstrate the large impact of the data augmentation for context-aware NMT models.", "phrases": ["back-translation", "neural machine translation", "data augmentation"], "overall_score": 1.3797647029843265, "scores": [0.9100190562805771, 0.8653855715514788, 0.7964834064772304], "rank_score": 0.8572960114364289} -{"id": "williams-etal-2013-dialog", "title": "The Dialog State Tracking Challenge", "abstract": "In a spoken dialog system, dialog state tracking deduces information about the user\u2019s goal as the dialog progresses, synthesizing evidence such as dialog acts over multiple turns with external data sources. Recent approaches have been shown to overcome ASR and SLU errors in some applications. However, there are currently no common testbeds or evaluation measures for this task, hampering progress. The dialog state tracking challenge seeks to address this by providing a heterogeneous corpus of 15K human-computer dialogs in a standard format, along with a suite of 11 evaluation metrics. The challenge received a total of 27 entries from 9 research groups. The results show that the suite of performance metrics cluster into 4 natural groups. Moreover, the dialog systems that benefit most from dialog state tracking are those with less discriminative speech recognition confidence scores. Finally, generalization is a key problem: in 2 of the 4 test sets, fewer than half of the entries out-performed simple baselines. 1 Overview and motivation Spoken dialog systems interact with users via natural language to help them achieve a goal. As the interaction progresses, the dialog manager maintains a representation of the state of the dialog in a process called dialog state tracking (DST). For example, in a bus schedule information system, the dialog state might indicate the user\u2019s desired bus route, origin, and destination. Dialog state tracking is difficult because automatic speech \u2217Most of the work for the challenge was performed when the second and third authors were with Honda Research Institute, Mountain View, CA, USA recognition (ASR) and spoken language understanding (SLU) errors are common, and can cause the system to misunderstand the user\u2019s needs. At the same time, state tracking is crucial because the system relies on the estimated dialog state to choose actions \u2013 for example, which bus schedule information to present to the user. Most commercial systems use hand-crafted heuristics for state tracking, selecting the SLU result with the highest confidence score, and discarding alternatives. In contrast, statistical approaches compute scores for many hypotheses for the dialog state (Figure 1). By exploiting correlations between turns and information from external data sources \u2013 such as maps, bus timetables, or models of past dialogs \u2013 statistical approaches can overcome some SLU errors. Numerous techniques for dialog state tracking have been proposed, including heuristic scores (Higashinaka et al., 2003), Bayesian networks (Paek and Horvitz, 2000; Williams and Young, 2007), kernel density estimators (Ma et al., 2012), and discriminative models (Bohus and Rudnicky, 2006). Techniques have been fielded which scale to realistically sized dialog problems and operate in real time (Young et al., 2010; Thomson and Young, 2010; Williams, 2010; Mehta et al., 2010). In end-to-end dialog systems, dialog state tracking has been shown to improve overall system performance (Young et al., 2010; Thomson and Young, 2010). Despite this progress, direct comparisons between methods have not been possible because past studies use different domains and system components, for speech recognition, spoken language understanding, dialog control, etc. Moreover, there is little agreement on how to evaluate dialog state tracking. Together these issues limit progress in this research area. The Dialog State Tracking Challenge (DSTC) provides a first common testbed and evaluation", "phrases": ["dialog state tracking", "spoken language understanding", "template", "task-oriented dialogue", "user goal"], "overall_score": 3.1833617665713105, "scores": [1.4986615266454129, 1.0816152914682249, 0.6167119242970148, 0.5589961521623584, 0.5301333083200533], "rank_score": 0.8572236405786129} -{"id": "atanasova-etal-2020-diagnostic", "title": "A Diagnostic Study of Explainability Techniques for Text Classification", "abstract": "Recent developments in machine learning have introduced models that approach human performance at the cost of increased architectural complexity. Efforts to make the rationales behind the models' predictions transparent have inspired an abundance of new explainability techniques. Provided with an already trained model, they compute saliency scores for the words of an input instance. However, there exists no definitive guide on (i) how to choose such a technique given a particular application task and model architecture, and (ii) the benefits and drawbacks of using each such technique. In this paper, we develop a comprehensive list of diagnostic properties for evaluating existing explainability techniques. We then employ the proposed list to compare a set of diverse explainability techniques on downstream text classification tasks and neural network architectures. We also compare the saliency scores assigned by the explainability techniques with human annotations of salient input regions to find relations between a model's performance and the agreement of its rationales with human ones. Overall, we find that the gradient-based explanations perform best across tasks and model architectures, and we present further insights into the properties of the reviewed explainability techniques.", "phrases": ["diagnostic property", "explanation", "faithfulness"], "overall_score": 1.9731747203103978, "scores": [1.2004300213817882, 0.8255991576067702, 0.5447874995968356], "rank_score": 0.8569388928617979} -{"id": "quan-etal-2020-risawoz", "title": "RiSAWOZ: A Large-Scale Multi-Domain Wizard-of-Oz Dataset with Rich Semantic Annotations for Task-Oriented Dialogue Modeling", "abstract": "In order to alleviate the shortage of multi-domain data and to capture discourse phenomena for task-oriented dialogue modeling, we propose RiSAWOZ, a large-scale multi-domain Chinese Wizard-of-Oz dataset with Rich Semantic Annotations. RiSAWOZ contains 11.2K human-to-human (H2H) multi-turn semantically annotated dialogues, with more than 150K utterances spanning over 12 domains, which is larger than all previous annotated H2H conversational datasets. Both single- and multi-domain dialogues are constructed, accounting for 65% and 35%, respectively. Each dialogue is labeled with comprehensive dialogue annotations, including dialogue goal in the form of natural language description, domain, dialogue states and acts at both the user and system side. In addition to traditional dialogue annotations, we especially provide linguistic annotations on discourse phenomena, e.g., ellipsis and coreference, in dialogues, which are useful for dialogue coreference and ellipsis resolution tasks. Apart from the fully annotated dataset, we also present a detailed description of the data collection procedure, statistics and analysis of the dataset. A series of benchmark models and results are reported, including natural language understanding (intent detection & slot filling), dialogue state tracking and dialogue context-to-text generation, as well as coreference and ellipsis resolution, which facilitate the baseline comparison for future research on this corpus.", "phrases": ["large-scale multi-domain", "rich semantic annotations", "task-oriented dialogue modeling"], "overall_score": 1.378961892604416, "scores": [0.9012814736612521, 0.869603956362779, 0.7995061618806589], "rank_score": 0.8567971973015633} -{"id": "filatova-hatzivassiloglou-2004-formal", "title": "A Formal Model for Information Selection in Multi-Sentence Text Extraction", "abstract": "Selecting important information while accounting for repetitions is a hard task for both summarization and question answering. We propose a formal model that represents a collection of documents in a two-dimensional space of textual and conceptual units with an associated mapping between these two dimensions. This representation is then used to describe the task of selecting textual units for a summary or answer as a formal optimization task. We provide approximation algorithms and empirically validate the performance of the proposed model when used with two very different sets of features, words and atomic events.", "phrases": ["formal model", "text summarization", "maximum coverage problem"], "overall_score": 1.9725739577641253, "scores": [0.8108046831643123, 0.9274454329749928, 0.8317838388697477], "rank_score": 0.8566779850030176} -{"id": "dos-santos-guimaraes-2015-boosting", "title": "Boosting Named Entity Recognition with Neural Character Embeddings", "abstract": "Most state-of-the-art named entity recognition (NER) systems rely on handcrafted features and on the output of other NLP tasks such as part-of-speech (POS) tagging and text chunking. In this work we propose a language-independent NER system that uses automatically learned features only. Our approach is based on the CharWNN deep neural network, which uses word-level and character-level representations (embeddings) to perform sequential classification. We perform an extensive number of experiments using two annotated corpora in two different languages: HAREM I corpus, which contains texts in Portuguese; and the SPA CoNLL-2002 corpus, which contains texts in Spanish. Our experimental results shade light on the contribution of neural character embeddings for NER. Moreover, we demonstrate that the same neural network which has been successfully applied to POS tagging can also achieve state-of-the-art results for language-independet NER, using the same hyperparameters, and without any handcrafted features. For the HAREM I corpus, CharWNN outperforms the state-of-the-art system by 7.9 points in the F1-score for the total scenario (ten NE classes), and by 7.2 points in the F1 for the selective scenario (five NE classes).", "phrases": ["entity recognition", "character embedding", "deep neural network", "pos tagging"], "overall_score": 2.3740521580287073, "scores": [0.8912571005466673, 1.111164043478587, 0.9024902231245123, 0.5201219080499915], "rank_score": 0.8562583187999395} -{"id": "yang-etal-2020-efficient", "title": "Efficient Transfer Learning for Quality Estimation with Bottleneck Adapter Layer", "abstract": "The Predictor-Estimator framework for quality estimation (QE) is commonly used for its strong performance. Where the predictor and estimator works on feature extraction and quality evaluation, respectively. However, training the predictor from scratch is computationally expensive. In this paper, we propose an efficient transfer learning framework to transfer knowledge from NMT dataset into QE models. A Predictor-Estimator alike model named BAL-QE is also proposed, aiming to extract high quality features with pre-trained NMT model, and make classification with a fine-tuned Bottleneck Adapter Layer (BAL). The experiment shows that BAL-QE achieves 97% of the SOTA performance in WMT19 En-De and En-Ru QE tasks by only training 3% of parameters within 4 hours on 4 Titan XP GPUs. Compared with the commonly used NuQE baseline, BAL-QE achieves 47% (En-Ru) and 75% (En-De) of performance promotions.", "phrases": ["quality estimation", "bottleneck adapter layer", "efficient transfer learning"], "overall_score": 0.9406883646804346, "scores": [0.9051082562215628, 0.8374959802519347, 0.8261501122172517], "rank_score": 0.8562514495635831} -{"id": "huang-etal-2019-cross", "title": "Cross-lingual Multi-Level Adversarial Transfer to Enhance Low-Resource Name Tagging", "abstract": "We focus on improving name tagging for low-resource languages using annotations from related languages. Previous studies either directly project annotations from a source language to a target language using cross-lingual representations or use a shared encoder in a multitask network to transfer knowledge. These approaches inevitably introduce noise to the target language annotation due to mismatched source-target sentence structures. To effectively transfer the resources, we develop a new neural architecture that leverages multi-level adversarial transfer: (1) word-level adversarial training, which projects source language words into the same semantic space as those of the target language without using any parallel corpora or bilingual gazetteers, and (2) sentence-level adversarial training, which yields language-agnostic sequential features. Our neural architecture outperforms previous approaches on CoNLL data sets. Moreover, on 10 low-resource languages, our approach achieves up to 16% absolute F-score gain over all high-performing baselines on cross-lingual transfer without using any target-language resources.", "phrases": ["multi-level adversarial transfer", "name tagging", "cross-lingual transfer"], "overall_score": 1.971528129279825, "scores": [0.7966502356052038, 0.9119892958211422, 0.8600318309634607], "rank_score": 0.8562237874632688} -{"id": "schneider-waibel-2019-kits", "title": "KIT's Submission to the IWSLT 2019 Shared Task on Text Translation", "abstract": "In this paper, we describe KIT's submission for the IWSLT 2019 shared task on text translation. Our system is based on the transformer model [1] using our in-house implementation. We augment the available training data using back-translation and employ fine-tuning for the final model. For our best results, we used a 12-layer transformer-big config- uration, achieving state-of-the-art results on the WMT2018 test set. We also experiment with student-teacher models to improve performance of smaller models.", "phrases": ["iwslt", "text translation", "kit"], "overall_score": 0.9404964148632503, "scores": [0.958043597121702, 0.8208247182257924, 0.7893618725838198], "rank_score": 0.8560767293104381} -{"id": "li-etal-2019-findings", "title": "Findings of the First Shared Task on Machine Translation Robustness", "abstract": "We share the findings of the first shared task on improving robustness of Machine Translation (MT). The task provides a testbed representing challenges facing MT models deployed in the real world, and facilitates new approaches to improve models' robustness to noisy input and domain mismatch. We focus on two language pairs (English-French and English-Japanese), and the submitted systems are evaluated on a blind test set consisting of noisy comments on Reddit and professionally sourced translations. As a new task, we received 23 submissions by 11 participating teams from universities, companies, national labs, etc. All submitted systems achieved large improvements over baselines, with the best improvement having +22.33 BLEU. We evaluated submissions by both human judgment and automatic evaluation (BLEU), which shows high correlations (Pearson's r = 0.94 and 0.95). Furthermore, we conducted a qualitative analysis of the submitted systems using compare-mt, which revealed their salient differences in handling challenges in this task. Such analysis provides additional insights when there is occasional disagreement between human judgment and BLEU, e.g. systems better at producing colloquial expressions received higher score from human judgment.", "phrases": ["first shared task", "machine translation robustness", "noise", "nmt model"], "overall_score": 2.4252108916506336, "scores": [0.9651206349846946, 0.8354223908678376, 1.0733947221494098, 0.5500343954844936], "rank_score": 0.8559930358716089} -{"id": "davani-etal-2022-dealing", "title": "Dealing with Disagreements: Looking Beyond the Majority Vote in Subjective Annotations", "abstract": "Majority voting and averaging are common approaches used to resolve annotator disagreements and derive single ground truth labels from multiple annotations. However, annotators may systematically disagree with one another, often reflecting their individual biases and values, especially in the case of subjective tasks such as detecting affect, aggression, and hate speech. Annotator disagreements may capture important nuances in such tasks that are often ignored while aggregating annotations to a single ground truth. In order to address this, we investigate the efficacy of multi-annotator models. In particular, our multi-task based approach treats predicting each annotators' judgements as separate subtasks, while sharing a common learned representation of the task. We show that this approach yields same or better performance than aggregating labels in the data prior to training across seven different binary classification tasks. Our approach also provides a way to estimate uncertainty in predictions, which we demonstrate better correlate with annotation disagreements than traditional methods. Being able to model uncertainty is especially useful in deployment scenarios where knowing when not to make a prediction is important.", "phrases": ["disagreement", "single ground truth", "value", "separate subtask"], "overall_score": 1.779392827162268, "scores": [1.4618910025396497, 0.8404810796370074, 0.562035020334462, 0.5584211742094101], "rank_score": 0.8557070691801323} -{"id": "maruf-etal-2019-selective", "title": "Selective Attention for Context-aware Neural Machine Translation", "abstract": "Despite the progress made in sentence-level NMT, current systems still fall short at achieving fluent, good quality translation for a full document. Recent works in context-aware NMT consider only a few previous sentences as context and may not scale to entire documents. To this end, we propose a novel and scalable top-down approach to hierarchical attention for context-aware NMT which uses sparse attention to selectively focus on relevant sentences in the document context and then attends to key words in those sentences. We also propose single-level attention approaches based on sentence or word-level information in the context. The document-level context representation, produced from these attention modules, is integrated into the encoder or decoder of the Transformer model depending on whether we use monolingual or bilingual context. Our experiments and evaluation on English-German datasets in different document MT settings show that our selective attention approach not only significantly outperforms context-agnostic baselines but also surpasses context-aware baselines in most cases.", "phrases": ["machine translation", "context-aware nmt", "document context", "attention module"], "overall_score": 2.4243284536840766, "scores": [1.2016148469716894, 1.0799033717686943, 0.6084655332335281, 0.5327425439756153], "rank_score": 0.8556815739873818} -{"id": "zhao-etal-2017-generative", "title": "Generative Encoder-Decoder Models for Task-Oriented Spoken Dialog Systems with Chatting Capability", "abstract": "Generative encoder-decoder models offer great promise in developing domain-general dialog systems. However, they have mainly been applied to open-domain conversations. This paper presents a practical and novel framework for building task-oriented dialog systems based on encoder-decoder models. This framework enables encoder-decoder models to accomplish slot-value independent decision-making and interact with external databases. Moreover, this paper shows the flexibility of the proposed method by interleaving chatting capability with a slot-filling system for better out-of-domain recovery. The models were trained on both real-user data from a bus information system and human-human chat data. Results show that the proposed framework achieves good performance in both offline evaluation metrics and in task success rate with human users.", "phrases": ["encoder-decoder model", "chatting capability", "task-oriented dialog system"], "overall_score": 2.194381544493779, "scores": [0.786002918064769, 0.931389699568189, 0.8491861782916924], "rank_score": 0.8555262653082168} -{"id": "banerjee-etal-2010-combining", "title": "Combining Multi-Domain Statistical Machine Translation Models using Automatic Classifiers", "abstract": "This paper presents a set of experiments on Domain Adaptation of Statistical Machine Translation systems. The experiments focus on Chinese-English and two domain-specific corpora. The paper presents a novel approach for combining multiple domain-trained translation models to achieve improved translation quality for both domain-specific as well as combined sets of sentences. We train a statistical classifier to classify sentences according to the appropriate domain and utilize the corresponding domain-specific MT models to translate them. Experimental results show that the method achieves a statistically significant absolute improvement of 1.58 BLEU (2.86% relative improvement) score over a translation model trained on combined data, and considerable improvements over a model using multiple decoding paths of the Moses decoder, for the combined domain test set. Furthermore, even for domain-specific test sets, our approach works almost as well as dedicated domain-specific models and perfect classification.", "phrases": ["translation model", "statistical classifier", "domain-specific model"], "overall_score": 2.316420642260584, "scores": [0.9864648467443062, 0.9851901009567153, 0.5944946472929401], "rank_score": 0.8553831983313206} -{"id": "wang-etal-2018-label", "title": "Label-Free Distant Supervision for Relation Extraction via Knowledge Graph Embedding", "abstract": "Distant supervision is an effective method to generate large scale labeled data for relation extraction, which assumes that if a pair of entities appears in some relation of a Knowledge Graph (KG), all sentences containing those entities in a large unlabeled corpus are then labeled with that relation to train a relation classifier. However, when the pair of entities has multiple relationships in the KG, this assumption may produce noisy relation labels. This paper proposes a label-free distant supervision method, which makes no use of the relation labels under this inadequate assumption, but only uses the prior knowledge derived from the KG to supervise the learning of the classifier directly and softly. Specifically, we make use of the type information and the translation law derived from typical KG embedding model to learn embeddings for certain sentence patterns. As the supervision signal is only determined by the two aligned entities, neither hard relation labels nor extra noise-reduction model for the bag of sentences is needed in this way. The experiments show that the approach performs well in current distant supervision dataset.", "phrases": ["distant supervision", "relation extraction", "knowledge graph"], "overall_score": 1.1855654504500508, "scores": [0.8644794142300721, 0.8561383296645263, 0.8449963501257691], "rank_score": 0.8552046980067892} -{"id": "durrett-denero-2013-supervised", "title": "Supervised Learning of Complete Morphological Paradigms", "abstract": "We describe a supervised approach to predicting the set of all inflected forms of a lexical item. Our system automatically acquires the orthographic transformation rules of morphological paradigms from labeled examples, and then learns the contexts in which those transformations apply using a discriminative sequence model. Because our approach is completely data-driven and the model is trained on examples extracted from Wiktionary, our method can extend to new languages without change. Our end-to-end system is able to predict complete paradigms with 86.1% accuracy and individual inflected forms with 94.9% accuracy, averaged across three languages and two parts of speech.", "phrases": ["paradigm", "inflection", "semi-supervised learning"], "overall_score": 2.422974307440302, "scores": [1.1074602550524955, 0.8741258688145077, 0.584024735467096], "rank_score": 0.8552036197780332} -{"id": "yeh-etal-2015-condition", "title": "Condition Random Fields-based Grammatical Error Detection for Chinese as Second Language", "abstract": "The foreign learners are not easy to learn Chinese as a second language. Because there are many special rules different from other languages in Chinese. When the people learn Chinese as a foreign language usually make some grammatical errors, such as missing, redundant, selection and disorder. In this paper, we proposed the conditional random fields (CRFs) to detect the grammatical errors. The features based on statistical word and part-ofspeech (POS) pattern were adopted here. The relationships between words by part-of-speech are helpful for Chinese grammatical error detection. Finally, we according to CRF determined which error types in sentences. According to the observation of experimental results, the performance of the proposed model is acceptable in precision and recall rates.", "phrases": ["grammatical error detection", "chinese", "second language"], "overall_score": 0.9394285493279241, "scores": [0.9199298343760032, 0.838167015095061, 0.8072172991631997], "rank_score": 0.8551047162114213} -{"id": "dou-etal-2019-investigating", "title": "Investigating Meta-Learning Algorithms for Low-Resource Natural Language Understanding Tasks", "abstract": "Learning general representations of text is a fundamental problem for many natural language understanding (NLU) tasks. Previously, researchers have proposed to use language model pre-training and multi-task learning to learn robust representations. However, these methods can achieve sub-optimal performance in low-resource scenarios. Inspired by the recent success of optimization-based meta-learning algorithms, in this paper, we explore the model-agnostic meta-learning algorithm (MAML) and its variants for low-resource NLU tasks. We validate our methods on the GLUE benchmark and show that our proposed models can outperform several strong baselines. We further empirically demonstrate that the learned representations can be adapted to new tasks efficiently and effectively.", "phrases": ["meta-learning algorithm", "natural language understanding", "maml", "low-resource nlu task", "new task"], "overall_score": 2.5175620526434064, "scores": [1.2527223961083709, 0.9691345379570946, 0.9076378596945183, 0.6005345392881567, 0.5450839745404554], "rank_score": 0.8550226615177193} -{"id": "xu-etal-2019-alter", "title": "ALTER: Auxiliary Text Rewriting Tool for Natural Language Generation", "abstract": "In this paper, we describe ALTER, an auxiliary text rewriting tool that facilitates the rewriting process for natural language generation tasks, such as paraphrasing, text simplification, fairness-aware text rewriting, and text style transfer. Our tool is characterized by two features, i) recording of word-level revision histories and ii) flexible auxiliary edit support and feedback to annotators. The text rewriting assist and traceable rewriting history are potentially beneficial to the future research of natural language generation.", "phrases": ["auxiliary text", "natural language generation", "alter"], "overall_score": 1.1849914171501283, "scores": [0.9301703045073945, 0.8436794411475246, 0.7905221158727938], "rank_score": 0.8547906205092376} -{"id": "durmus-etal-2020-feqa", "title": "FEQA: A Question Answering Evaluation Framework for Faithfulness Assessment in Abstractive Summarization", "abstract": "Neural abstractive summarization models are prone to generate content inconsistent with the source document, i.e. unfaithful. Existing automatic metrics do not capture such mistakes effectively. We tackle the problem of evaluating faithfulness of a generated summary given its source document. We first collected human annotations of faithfulness for outputs from numerous models on two datasets. We find that current models exhibit a trade-off between abstractiveness and faithfulness: outputs with less word overlap with the source document are more likely to be unfaithful. Next, we propose an automatic question answering (QA) based metric for faithfulness, FEQA, which leverages recent advances in reading comprehension. Given question-answer pairs generated from the summary, a QA model extracts answers from the document; non-matched answers indicate unfaithful information in the summary. Among metrics based on word overlap, embedding similarity, and learned language understanding models, our QA-based metric has significantly higher correlation with human faithfulness scores, especially on highly abstractive summaries.", "phrases": ["evaluation framework", "abstractive summarization", "question generation"], "overall_score": 2.962437470064409, "scores": [1.4417791325686253, 0.5675835141998827, 0.5549736614348338], "rank_score": 0.854778769401114} -{"id": "mao-etal-2020-jass", "title": "JASS: Japanese-specific Sequence to Sequence Pre-training for Neural Machine Translation", "abstract": "Neural machine translation (NMT) needs large parallel corpora for state-of-the-art translation quality. Low-resource NMT is typically addressed by transfer learning which leverages large monolingual or parallel corpora for pre-training. Monolingual pre-training approaches such as MASS (MAsked Sequence to Sequence) are extremely effective in boosting NMT quality for languages with small parallel corpora. However, they do not account for linguistic information obtained using syntactic analyzers which is known to be invaluable for several Natural Language Processing (NLP) tasks. To this end, we propose JASS, Japanese-specific Sequence to Sequence, as a novel pre-training alternative to MASS for NMT involving Japanese as the source or target language. JASS is joint BMASS (Bunsetsu MASS) and BRSS (Bunsetsu Reordering Sequence to Sequence) pre-training which focuses on Japanese linguistic units called bunsetsus. In our experiments on ASPEC Japanese\u2013English and News Commentary Japanese\u2013Russian translation we show that JASS can give results that are competitive with if not better than those given by MASS. Furthermore, we show for the first time that joint MASS and JASS pre-training gives results that significantly surpass the individual methods indicating their complementary nature. We will release our code, pre-trained models and bunsetsu annotated data as resources for researchers to use in their own NLP tasks.", "phrases": ["japanese-specific sequence", "neural machine translation", "jass"], "overall_score": 0.9390439234705537, "scores": [0.940693553729032, 0.8404840312305248, 0.7830862590458465], "rank_score": 0.8547546146684678} -{"id": "yancheva-rudzicz-2016-vector", "title": "Vector-space topic models for detecting Alzheimer's disease", "abstract": "Semantic de\ufb01cit is a symptom of language impairment in Alzheimer\u2019s disease (AD). We present a generalizable method for automatic generation of information content units (ICUs) for a picture used in a standard clinical task, achieving high recall, 96.8%, of human-supplied ICUs. We use the automatically generated topic model to extract semantic features, and train a random forest classi\ufb01er to achieve an F-score of 0.74 in binary classi\ufb01cation of controls versus people with AD using a set of only 12 features. This is comparable to re-sults (0.72 F-score) with a set of 85 manual features. Adding semantic information to a set of standard lexicosyntactic and acoustic features improves F-score to 0.80. While control and dementia subjects discuss the same topics in the same contexts, controls are more informative per second of speech.", "phrases": ["topic model", "alzheimer", "disease"], "overall_score": 0.9387524130187054, "scores": [0.8763214935677577, 0.8497501126623895, 0.8373962050305241], "rank_score": 0.8544892704202237} -{"id": "jin-etal-2018-unsupervised", "title": "Unsupervised Grammar Induction with Depth-bounded PCFG", "abstract": "There has been recent interest in applying cognitively- or empirically-motivated bounds on recursion depth to limit the search space of grammar induction models (Ponvert et al., 2011; Noji and Johnson, 2016; Shain et al., 2016). This work extends this depth-bounding approach to probabilistic context-free grammar induction (DB-PCFG), which has a smaller parameter space than hierarchical sequence models, and therefore more fully exploits the space reductions of depth-bounding. Results for this model on grammar acquisition from transcribed child-directed speech and newswire text exceed or are competitive with those of other models when evaluated on parse accuracy. Moreover, grammars acquired from this model demonstrate a consistent use of category labels, something which has not been demonstrated by other acquisition models.", "phrases": ["induction", "pcfg", "search space", "grammar acquisition"], "overall_score": 1.8772942234076395, "scores": [1.3284414826294495, 0.9119902125746752, 0.620151339372462, 0.5569906495546111], "rank_score": 0.8543934210327995} -{"id": "tinsley-etal-2012-iptranslator", "title": "IPTranslator: Facilitating Patent Search with Machine Translation", "abstract": "Intellectual Property professionals frequently need to carry out patent searches for a variety of reasons. During a typical search, they will retrieve approximately 30% of their results in a foreign language. The machine translation (MT) options currently available to patent searchers for these foreign-language patents vary in their quality, consistency, and general level of service. In this article, we introduce IPTranslator; an MT web service designed to cater for the needs of patent searchers. At the core of IPTranslator is a set of MT systems developed specifically for translating patent text. We describe the challenges faced in adapting MT technology to such a complex domain, and how the systems were evaluated to ensure that the quality was fit for purpose. Finally, we present the framework through which the IPTranslator service is delivered to users, and the value-adding features which address many of the issues with existing solutions.", "phrases": ["patent search", "machine translation", "iptranslator"], "overall_score": 0.9385276923885614, "scores": [0.9178383970019892, 0.8239819532754699, 0.8210338123854439], "rank_score": 0.8542847208876344} -{"id": "tannier-etal-2011-grawltcq", "title": "GrawlTCQ: Terminology and Corpora Building by Ranking Simultaneously Terms, Queries and Documents using Graph Random Walks", "abstract": "In this paper, we present GrawlTCQ, a new bootstrapping algorithm for building specialized terminology, corpora and queries, based on a graph model. We model links between documents, terms and queries, and use a random walk with restart algorithm to compute relevance propagation. We have evaluated GrawlTCQ on an AFP English corpus of 57,441 news over 10 categories. For corpora building, GrawlTCQ outperforms the BootCaT tool, which is vastly used in the domain. For 1,000 documents retrieved, we improve mean precision by 25%. GrawlTCQ has also shown to be faster and more robust than BootCaT over iterations.", "phrases": ["terminology", "corpora building", "grawltcq"], "overall_score": 1.1842900340829445, "scores": [0.8968815723136094, 0.86155141430089, 0.8044210521040293], "rank_score": 0.8542846795728428} -{"id": "goyal-durrett-2020-evaluating", "title": "Evaluating Factuality in Generation with Dependency-level Entailment", "abstract": "Despite significant progress in text generation models, a serious limitation is their tendency to produce text that is factually inconsistent with information in the input. Recent work has studied whether textual entailment systems can be used to identify factual errors; however, these sentence-level entailment models are trained to solve a different problem than generation filtering and they do not localize which part of a generation is non-factual. In this paper, we propose a new formulation of entailment that decomposes it at the level of dependency arcs. Rather than focusing on aggregate decisions, we instead ask whether the semantic relationship manifested by individual dependency arcs in the generated output is supported by the input. Human judgments on this task are difficult to obtain; we therefore propose a method to automatically create data based on existing entailment or paraphrase corpora. Experiments show that our dependency arc entailment model trained on this data can identify factual inconsistencies in paraphrasing and summarization better than sentence-level methods or those based on question generation, while additionally localizing the erroneous parts of the generation.", "phrases": ["dependency-level entailment", "factual error", "dependency arc"], "overall_score": 2.122510033093694, "scores": [0.9154069802089329, 1.0832707751626016, 0.5638048633716397], "rank_score": 0.8541608729143914} -{"id": "cai-lam-2019-core", "title": "Core Semantic First: A Top-down Approach for AMR Parsing", "abstract": "We introduce a novel scheme for parsing a piece of text into its Abstract Meaning Representation (AMR): Graph Spanning based Parsing (GSP). One novel characteristic of GSP is that it constructs a parse graph incrementally in a top-down fashion. Starting from the root, at each step, a new node and its connections to existing nodes will be jointly predicted. The output graph spans the nodes by the distance to the root, following the intuition of first grasping the main ideas then digging into more details. The core semantic first principle emphasizes capturing the main ideas of a sentence, which is of great interest. We evaluate our model on the latest AMR sembank and achieve the state-of-the-art performance in the sense that no heuristic graph re-categorization is adopted. More importantly, the experiments show that our parser is especially good at obtaining the core semantics.", "phrases": ["amr", "more detail", "core semantic"], "overall_score": 2.1221552132522006, "scores": [1.1013791308953875, 0.9341761064492474, 0.5264990113732216], "rank_score": 0.8540180829059523} -{"id": "hale-etal-2006-pcfgs", "title": "PCFGs with Syntactic and Prosodic Indicators of Speech Repairs", "abstract": "A grammatical method of combining two kinds of speech repair cues is presented. One cue, prosodic disjuncture, is detected by a decision tree-based ensemble classifier that uses acoustic cues to identify where normal prosody seems to be interrupted (Lickley, 1996). The other cue, syntactic parallelism, codifies the expectation that repairs continue a syntactic category that was left unfinished in the reparandum (Levelt, 1983). The two cues are combined in a Treebank PCFG whose states are split using a few simple tree transformations. Parsing performance on the Switchboard and Fisher corpora suggests that these two cues help to locate speech repairs in a synergistic way.", "phrases": ["speech repair", "repair", "constituent"], "overall_score": 1.6616721691740965, "scores": [1.0491784702746407, 0.9690031802722995, 0.5436100693546083], "rank_score": 0.8539305733005161} -{"id": "hara-etal-2009-coordinate", "title": "Coordinate Structure Analysis with Global Structural Constraints and Alignment-Based Local Features", "abstract": "We propose a hybrid approach to coordinate structure analysis that combines a simple grammar to ensure consistent global structure of coordinations in a sentence, and features based on sequence alignment to capture local symmetry of conjuncts. The weight of the alignment-based features, which in turn determines the score of coordinate structures, is optimized by perceptron training on a given corpus. A bottom-up chart parsing algorithm efficiently finds the best scoring structure, taking both nested or non-overlapping flat coordinations into account. We demonstrate that our approach outperforms existing parsers in coordination scope detection on the Genia corpus.", "phrases": ["alignment-based local feature", "sequence alignment", "local symmetry", "coordination", "dual decomposition"], "overall_score": 2.047255187417274, "scores": [0.9627164239818241, 1.616724197837643, 0.5896595772466982, 0.555134375973669, 0.5446240582817631], "rank_score": 0.8537717266643193} -{"id": "li-etal-2022-ultra", "title": "Ultra-fine Entity Typing with Indirect Supervision from Natural Language Inference", "abstract": "The task of ultra-fine entity typing (UFET) seeks to predict diverse and free-form words or phrases that describe the appropriate types of entities mentioned in sentences. A key challenge for this task lies in the large number of types and the scarcity of annotated data per type. Existing systems formulate the task as a multi-way classification problem and train directly or distantly supervised classifiers. This causes two issues: (i) the classifiers do not capture the type semantics because types are often converted into indices; (ii) systems developed in this way are limited to predicting within a pre-defined type set, and often fall short of generalizing to types that are rarely seen or unseen in training. This work presents LITE\ud83c\udf7b, a new approach that formulates entity typing as a natural language inference (NLI) problem, making use of (i) the indirect supervision from NLI to infer type information meaningfully represented as textual hypotheses and alleviate the data scarcity issue, as well as (ii) a learning-to-rank objective to avoid the pre-defining of a type set. Experiments show that, with limited training data, LITE obtains state-of-the-art performance on the UFET task. In addition, LITE demonstrates its strong generalizability by not only yielding best results on other fine-grained entity typing benchmarks, more importantly, a pre-trained LITE system works well on new data containing unseen types.1", "phrases": ["indirect supervision", "natural language inference", "ultra-fine entity typing"], "overall_score": 0.9379250407693454, "scores": [0.8867659761394493, 0.8605757241854319, 0.8138667909066202], "rank_score": 0.8537361637438338} -{"id": "chen-durrett-2019-understanding", "title": "Understanding Dataset Design Choices for Multi-hop Reasoning", "abstract": "Learning multi-hop reasoning has been a key challenge for reading comprehension models, leading to the design of datasets that explicitly focus on it. Ideally, a model should not be able to perform well on a multi-hop question answering task without doing multi-hop reasoning. In this paper, we investigate two recently proposed datasets, WikiHop and HotpotQA. First, we explore sentence-factored models for these tasks; by design, these models cannot do multi-hop reasoning, but they are still able to solve a large number of examples in both datasets. Furthermore, we find spurious correlations in the unmasked version of WikiHop, which make it easy to achieve high performance considering only the questions and answers. Finally, we investigate one key difference between these datasets, namely span-based vs. multiple-choice formulations of the QA task. Multiple-choice versions of both datasets can be easily gamed, and two models we examine only marginally exceed a baseline in this setting. Overall, while these datasets are useful testbeds, high-performing models may not be learning as much multi-hop reasoning as previously thought.", "phrases": ["multi-hop reasoning", "reasoning", "hotpotqa", "large number"], "overall_score": 2.311536507240995, "scores": [1.066765929116518, 0.9604054727457619, 0.846108178304755, 0.5410389672515773], "rank_score": 0.8535796368546531} -{"id": "navigli-vannella-2013-semeval", "title": "SemEval-2013 Task 11: Word Sense Induction and Disambiguation within an End-User Application", "abstract": "In this paper we describe our Semeval-2013 task on Word Sense Induction and Disambiguation within an end-user application, namely Web search result clustering and diversification. Given a target query, induction and disambiguation systems are requested to cluster and diversify the search results returned by a search engine for that query. The task enables the end-to-end evaluation and comparison of systems.", "phrases": ["disambiguation", "end-user application", "semeval-2013 task"], "overall_score": 0.9374705840314506, "scores": [0.900187664556969, 0.8571655186675571, 0.802614314958065], "rank_score": 0.853322499394197} -{"id": "voutilainen-purtonen-2011-double", "title": "A double-blind experiment on interannotator agreement: the case of dependency syntax and Finnish", "abstract": "Manually performed treebanking is an expensive effort compared with automatic annotation. In return, manual treebanking is generally believed to provide higherquality/value syntactic annotation than automatic methods. Unfortunately, there is little or no empirical evidence for or against this belief, though arguments have been voiced for the high degree of subjectivity in other levels of linguistic analysis (e.g. morphological annotation). We report a double-blind annotation experiment at the level of dependency syntax, using a small Finnish corpus as the analysis data. The results suggest that an interannotator agreement can be reached as a result of reviews and negotiations that is much higher than the corresponding labelled attachment scores (LAS) reported for stateof-the-art dependency parsers.", "phrases": ["double-blind experiment", "interannotator agreement", "dependency syntax"], "overall_score": 1.1828660191230074, "scores": [0.9369130024203811, 0.8181112040365492, 0.8047482032803188], "rank_score": 0.8532574699124164} -{"id": "sudo-etal-2003-improved", "title": "An Improved Extraction Pattern Representation Model for Automatic IE Pattern Acquisition", "abstract": "Several approaches have been described for the automatic unsupervised acquisition of patterns for information extraction. Each approach is based on a particular model for the patterns to be acquired, such as a predicate-argument structure or a dependency chain. The effect of these alternative models has not been previously studied. In this paper, we compare the prior models and introduce a new model, the Subtree model, based on arbitrary subtrees of dependency trees. We describe a discovery procedure for this model and demonstrate experimentally an improvement in recall using Subtree patterns.", "phrases": ["event-specific document", "extractor", "common word pattern"], "overall_score": 2.59769509103555, "scores": [0.8689815718466576, 0.8534234786034619, 0.8373019093494676], "rank_score": 0.853235653266529} -{"id": "abrahamsson-etal-2014-medical", "title": "Medical text simplification using synonym replacement: Adapting assessment of word difficulty to a compounding language", "abstract": "Medical texts can be difficult to understand for laymen, due to a frequent occurrence of specialised medical terms. Replacing these difficult terms with easier synonyms can, however, lead to improved readability. In this study, we have adapted a method for assessing difficulty of words to make it more suitable to medical Swedish. The difficulty of a word was assessed not only by measuring the frequency of the word in a general corpus, but also by measuring the frequency of substrings of words, thereby adapting the method to the compounding nature of Swedish. All words having a MeSH synonym that was assessed as easier, were replaced in a corpus of medical text. According to the readability measure LIX, the replacement resulted in a slightly more difficult text, while the readability increased according to the OVIX measure and to a preliminary reader study.", "phrases": ["simplification", "synonym replacement", "medical swedish", "medical text"], "overall_score": 1.8747032311440768, "scores": [1.3768050117083188, 0.954731357718252, 0.5605286004077247, 0.5207918687085394], "rank_score": 0.8532142096357087} -{"id": "vaswani-etal-2011-rule", "title": "Rule Markov Models for Fast Tree-to-String Translation", "abstract": "Most statistical machine translation systems rely on composed rules (rules that can be formed out of smaller rules in the grammar). Though this practice improves translation by weakening independence assumptions in the translation model, it nevertheless results in huge, redundant grammars, making both training and decoding inefficient. Here, we take the opposite approach, where we only use minimal rules (those that cannot be formed out of other rules), and instead rely on a rule Markov model of the derivation history to capture dependencies between minimal rules. Large-scale experiments on a state-of-the-art tree-to-string translation system show that our approach leads to a slimmer model, a faster decoder, yet the same translation quality (measured using B) as composed rules.", "phrases": ["translation model", "rule markov model", "fast decoder"], "overall_score": 1.8743463513896699, "scores": [1.430136922791471, 0.5994380034076362, 0.5295804337805438], "rank_score": 0.8530517866598837} -{"id": "friedrich-palmer-2014-situation", "title": "Situation Entity Annotation", "abstract": "This paper presents an annotation scheme for a new semantic annotation task with relevance for analysis and computation at both the clause level and the discourse level. More specifically, we label the finite clauses of texts with the type of situation entity (e.g., eventualities, statements about kinds, or statements of belief) they introduce to the discourse, following and extending work by Smith (2003). We take a feature-driven approach to annotation, with the result that each clause is also annotated with fundamental aspectual class, whether the main NP referent is specific or generic, and whether the situation evoked is episodic or habitual. This annotation is performed (so far) on three sections of the MASC corpus, with each clause labeled by at least two annotators. In this paper we present the annotation scheme, statistics of the corpus in its current version, and analyses of both inter-annotator agreement and intra-annotator consistency.", "phrases": ["annotation scheme", "clause", "situation entity"], "overall_score": 1.659740524139745, "scores": [0.9743513239364443, 0.9225458437180497, 0.6619165447034544], "rank_score": 0.8529379041193161} -{"id": "xu-carpuat-2021-editor", "title": "EDITOR: An Edit-Based Transformer with Repositioning for Neural Machine Translation with Soft Lexical Constraints", "abstract": "We introduce an Edit-Based TransfOrmer with Repositioning (EDITOR), which makes sequence generation flexible by seamlessly allowing users to specify preferences in output lexical choice. Building on recent models for non-autoregressive sequence generation (Gu et al., 2019), EDITOR generates new sequences by iteratively editing hypotheses. It relies on a novel reposition operation designed to disentangle lexical choice from word positioning decisions, while enabling efficient oracles for imitation learning and parallel edits at decoding time. Empirically, EDITOR uses soft lexical constraints more effectively than the Levenshtein Transformer (Gu et al., 2019) while speeding up decoding dramatically compared to constrained beam search (Post and Vilar, 2018). EDITOR also achieves comparable or better translation quality with faster decoding speed than the Levenshtein Transformer on standard Romanian-English, English-German, and English-Japanese machine translation tasks.", "phrases": ["edit-based transformer", "lexical constraint", "editor"], "overall_score": 1.7733487192563444, "scores": [0.8814736752959609, 0.7958962819755886, 0.8810314457663734], "rank_score": 0.8528004676793076} -{"id": "lo-etal-2013-improving", "title": "Improving machine translation by training against an automatic semantic frame based evaluation metric", "abstract": "We present the first ever results showing that tuning a machine translation system against a semantic frame based objective function, MEANT, produces more robustly adequate translations than tuning against BLEU or TER as measured across commonly used metrics and human subjective evaluation. Moreover, for informal web forum data, human evaluators preferred MEANT-tuned systems over BLEU- or TER-tuned systems by a significantly wider margin than that for formal newswire\u2014even though automatic semantic parsing might be expected to fare worse on informal language. We argue that by preserving the meaning of the translations as captured by semantic frames right in the training process, an MT system is constrained to make more accurate choices of both lexical and reordering rules. As a result, MT systems tuned against semantic frame based MT evaluation metrics produce output that is more adequate. Tuning a machine translation system against a semantic frame based objective function is independent of the translation model paradigm, so, any translation model can benefit from the semantic knowledge incorporated to improve translation adequacy through our approach.", "phrases": ["machine translation", "semantic frame", "evaluation metric"], "overall_score": 1.372342788014348, "scores": [0.9137487284235273, 0.8489243952399834, 0.7953804254892399], "rank_score": 0.8526845163842501} -{"id": "vossen-etal-2008-kyoto", "title": "KYOTO: a System for Mining, Structuring and Distributing Knowledge across Languages and Cultures", "abstract": "We outline work performed within the framework of a current EC project. The goal is to construct a language-independent information system for a specific domain (environment/ecology/biodiversity) anchored in a language-independent ontology that is linked to wordnets in seven languages. For each language, information extraction and identification of lexicalized concepts with ontological entries is carried out by text miners (\u0093Kybots\u0094). The mapping of language-specific lexemes to the ontology allows for crosslinguistic identification and translation of equivalent terms. The infrastructure developed within this project enables long-range knowledge sharing and transfer across many languages and cultures, addressing the need for global and uniform transition of knowledge beyond the specific domains addressed here.", "phrases": ["culture", "project", "specific domain", "information extraction", "kyoto"], "overall_score": 1.5267593091783833, "scores": [1.7615032789832656, 0.8432541879988792, 0.5622907175788967, 0.5532107592826238, 0.5402440293488195], "rank_score": 0.852100594638497} -{"id": "yoshikawa-etal-2016-joint", "title": "Joint Transition-based Dependency Parsing and Disfluency Detection for Automatic Speech Recognition Texts", "abstract": "Joint dependency parsing with dis\ufb02uency detection is an important task in speech language processing. Recent methods show high performance for this task, although most authors make the unrealistic assumption that input texts are transcribed by human annotators. In real-world applications, the input text is typically the output of an automatic speech recognition (ASR) system, which implies that the text contains not only dis\ufb02uency noises but also recognition errors from the ASR system. In this work, we propose a parsing method that handles both dis\ufb02uency and ASR errors us-ing an incremental shift-reduce algorithm with several novel features suited to ASR output texts. Because the gold dependency information is usually annotated only on transcribed texts, we also introduce an alignment-based method for transferring the gold dependency annotation to the ASR output texts to construct training data for our parser. We conducted an experiment on the Switchboard corpus and show that our method outperforms conventional methods in terms of dependency parsing and dis\ufb02uency detection.", "phrases": ["disfluency detection", "asr output text", "transition-based dependency parser"], "overall_score": 1.9616430531548037, "scores": [1.39086945151673, 0.6218278109809078, 0.5430949978492978], "rank_score": 0.8519307534489785} -{"id": "goutte-etal-2014-nrc", "title": "The NRC System for Discriminating Similar Languages", "abstract": "We describe the system built by the National Research Council Canada for the \u201dDiscriminating between similar languages\u201d (DSL) shared task. Our system uses various statistical classifiers and makes predictions based on a two-stage process: we first predict the language group, then discriminate between languages or variants within the group. Language groups are predicted using a generative classifier with 99.99% accuracy on the five target groups. Within each group (except English), we use a voting combination of discriminative classifiers trained on a variety of feature spaces, achieving an average accuracy of 95.71%, with per-group accuracy between 90.95% and 100% depending on the group. This approach turns out to reach the best performance among all systems submitted to the open and closed tasks.", "phrases": ["dsl", "language group", "two-step classification approach", "submission track", "good result"], "overall_score": 2.361917179702914, "scores": [1.6454808930423706, 0.8748970549829187, 0.6274879374128922, 0.5581374242215643, 0.5534044430250555], "rank_score": 0.8518815505369602} -{"id": "deng-etal-2021-compression", "title": "Compression, Transduction, and Creation: A Unified Framework for Evaluating Natural Language Generation", "abstract": "Natural language generation (NLG) spans a broad range of tasks, each of which serves for specific objectives and desires different properties of generated text. The complexity makes automatic evaluation of NLG particularly challenging. Previous work has typically focused on a single task and developed individual evaluation metrics based on specific intuitions. In this paper, we propose a unifying perspective based on the nature of information change in NLG tasks, including compression (e.g., summarization), transduction (e.g., text rewriting), and creation (e.g., dialog). _Information alignment_ between input, context, and output text plays a common central role in characterizing the generation. With automatic alignment prediction models, we develop a family of interpretable metrics that are suitable for evaluating key aspects of different NLG tasks, often without need of gold reference data. Experiments show the uniformly designed metrics achieve stronger or comparable correlations with human judgement compared to state-of-the-art metrics in each of diverse tasks, including text summarization, style transfer, and knowledge-grounded dialog.", "phrases": ["transduction", "creation", "compression"], "overall_score": 1.1806541366575845, "scores": [0.8708874003599293, 0.8529600240711969, 0.8311383775102804], "rank_score": 0.8516619339804689} -{"id": "li-2009-use", "title": "On the Use of Virtual Evidence in Conditional Random Fields", "abstract": "Virtual evidence (VE), first introduced by (Pearl, 1988), provides a convenient way of incorporating prior knowledge into Bayesian networks. This work generalizes the use of VE to undirected graphical models and, in particular, to conditional random fields (CRFs). We show that VE can be naturally encoded into a CRF model as potential functions. More importantly, we propose a novel semi-supervised machine learning objective for estimating a CRF model integrated with VE. The objective can be optimized using the Expectation-Maximization algorithm while maintaining the discriminative nature of CRFs. When evaluated on the CLASSIFIEDS data, our approach significantly outperforms the best known solutions reported on this task.", "phrases": ["virtual evidence", "conditional random field", "prior knowledge", "function"], "overall_score": 1.1804423702116702, "scores": [1.737446225665512, 0.6242675733618419, 0.5242659055792421, 0.5200570025125854], "rank_score": 0.8515091767797953} -{"id": "yamada-etal-2019-incorporating", "title": "Incorporating Textual Information on User Behavior for Personality Prediction", "abstract": "Several recent studies have shown that textual information of user posts and user behaviors such as liking and sharing the specific posts are useful for predicting the personality of social media users. However, less attention has been paid to the textual information derived from the user behaviors. In this paper, we investigate the effect of textual information on user behaviors for personality prediction. Our experiments on the personality prediction of Twitter users show that the textual information of user behaviors is more useful than the co-occurrence information of the user behaviors. They also show that taking user behaviors into account is crucial for predicting the personality of users who do not post frequently.", "phrases": ["textual information", "user behavior", "personality prediction"], "overall_score": 1.180278244360514, "scores": [0.944216300738883, 0.8150321778871327, 0.7949238763860528], "rank_score": 0.8513907850040229} -{"id": "stewart-2014-now", "title": "Now We Stronger than Ever: African-American English Syntax in Twitter", "abstract": "African American English (AAE) is a well-established dialect that exhibits a distinctive syntax, including constructions like habitual be . Using data mined from the social media service Twitter, the proposed senior thesis project intends to study the demographic distribution of a sub-set of AAE syntactic constructions. This study expands on previous sociolinguistic Twitter work (Eisenstein et al., 2011) by adding part-of-speech tags to the data, thus enabling detection of short-range syntactic features. Through an analysis of ethnic and gender data associated with AAE tweets, this project will provide a more accurate description of the dialect\u2019s speakers and distribution.", "phrases": ["african-american english", "twitter", "dialect"], "overall_score": 1.5253085797943704, "scores": [0.8748055299241897, 1.1147555383014514, 0.564311713233447], "rank_score": 0.8512909271530292} -{"id": "dimitrov-etal-2021-semeval", "title": "SemEval-2021 Task 6: Detection of Persuasion Techniques in Texts and Images", "abstract": "We describe SemEval-2021 task 6 on Detection of Persuasion Techniques in Texts and Images: the data, the annotation guidelines, the evaluation setup, the results, and the participating systems. The task focused on memes and had three subtasks: (i) detecting the techniques in the text, (ii) detecting the text spans where the techniques are used, and (iii) detecting techniques in the entire meme, i.e., both in the text and in the image. It was a popular task, attracting 71 registrations, and 22 teams that eventually made an official submission on the test set. The evaluation results for the third subtask confirmed the importance of both modalities, the text and the image. Moreover, some teams reported benefits when not just combining the two modalities, e.g., by using early or late fusion, but rather modeling the interaction between them in a joint model.", "phrases": ["persuasion techniques", "texts", "semeval-2021 task"], "overall_score": 1.6561482320574454, "scores": [0.8586959218307453, 0.8568709648617407, 0.8377086068262574], "rank_score": 0.8510918311729144} -{"id": "awasthi-etal-2019-parallel", "title": "Parallel Iterative Edit Models for Local Sequence Transduction", "abstract": "We present a Parallel Iterative Edit (PIE) model for the problem of local sequence transduction arising in tasks like Grammatical error correction (GEC). Recent approaches are based on the popular encoder-decoder (ED) model for sequence to sequence learning. The ED model auto-regressively captures full dependency among output tokens but is slow due to sequential decoding. The PIE model does parallel decoding, giving up the advantage of modeling full dependency in the output, yet it achieves accuracy competitive with the ED model for four reasons: 1. predicting edits instead of tokens, 2. labeling sequences instead of generating sequences, 3. iteratively refining predictions to capture dependencies, and 4. factorizing logits over edits and their token argument to harness pre-trained language models like BERT. Experiments on tasks spanning GEC, OCR correction and spell correction demonstrate that the PIE model is an accurate and significantly faster alternative for local sequence transduction.", "phrases": ["local sequence transduction", "gec", "alternative", "parallel iterative edit", "text-editing method"], "overall_score": 2.504874084563673, "scores": [0.9162180102229699, 1.5850520970369302, 0.648322697549861, 0.5643686450160985, 0.5396062115980387], "rank_score": 0.8507135322847796} -{"id": "chambers-jurafsky-2011-template", "title": "Template-Based Information Extraction without the Templates", "abstract": "Standard algorithms for template-based information extraction (IE) require predefined template schemas, and often labeled data, to learn to extract their slot fillers (e.g., an embassy is the Target of a Bombing template). This paper describes an approach to template-based IE that removes this requirement and performs extraction without knowing the template structure in advance. Our algorithm instead learns the template structure automatically from raw text, inducing template schemas as sets of linked events (e.g., bombings include detonate, set off, and destroy events) associated with semantic roles. We also solve the standard IE task, using the induced syntactic patterns to extract role fillers from specific documents. We evaluate on the MUC-4 terrorism dataset and show that we induce template structure very similar to hand-created gold structure, and we extract role fillers with an F1 score of .40, approaching the performance of algorithms that require full knowledge of the templates.", "phrases": ["information extraction", "semantic role", "event template", "frame"], "overall_score": 2.504638827880914, "scores": [0.9230713904623162, 1.1010979424246132, 0.8504964810198834, 0.5278687206549705], "rank_score": 0.8506336336404458} -{"id": "kadlec-etal-2016-text", "title": "Text Understanding with the Attention Sum Reader Network", "abstract": "Several large cloze-style context-question-answer datasets have been introduced recently: the CNN and Daily Mail news data and the Children's Book Test. Thanks to the size of these datasets, the associated text comprehension task is well suited for deep-learning techniques that currently seem to outperform all alternative approaches. We present a new, simple model that uses attention to directly pick the answer from the context as opposed to computing the answer using a blended representation of words in the document as is usual in similar models. This makes the model particularly suitable for question-answering problems where the answer is a single word from the document. Ensemble of our models sets new state of the art on all evaluated datasets.", "phrases": ["reader", "attention-sum", "human-level performance"], "overall_score": 2.8641428026819877, "scores": [1.4671627699434406, 0.5549601101832303, 0.5296067099879215], "rank_score": 0.8505765300381976} -{"id": "solorio-liu-2008-learning", "title": "Learning to Predict Code-Switching Points", "abstract": "Predicting possible code-switching points can help develop more accurate methods for automatically processing mixed-language text, such as multilingual language models for speech recognition systems and syntactic analyzers. We present in this paper exploratory results on learning to predict potential code-switching points in Spanish-English. We trained different learning algorithms using a transcription of code-switched discourse. To evaluate the performance of the classifiers, we used two different criteria: 1) measuring precision, recall, and F-measure of the predictions against the reference in the transcription, and 2) rating the naturalness of artificially generated code-switched sentences. Average scores for the code-switched sentences generated by our machine learning approach were close to the scores of those generated by humans.", "phrases": ["point", "learning algorithm", "discourse", "code alternation point", "past"], "overall_score": 2.8630520755152955, "scores": [2.0081186403312583, 0.5948341309344848, 0.5629439457468974, 0.5429034624760227, 0.5424628815397404], "rank_score": 0.8502526122056807} -{"id": "davidov-etal-2007-fully", "title": "Fully Unsupervised Discovery of Concept-Specific Relationships by Web Mining", "abstract": "We present a web mining method for discovering and enhancing relationships in which a specified concept (word class) participates. We discover a whole range of relationships focused on the given concept, rather than generic known relationships as in most previous work. Our method is based on clustering patterns that contain concept words and other words related to them. We evaluate the method on three different rich concepts and find that in each case the method generates a broad variety of relationships with good precision.", "phrases": ["unsupervised discovery", "concept word", "seed", "concept acquisition method"], "overall_score": 2.357304193375711, "scores": [1.4265041852023261, 0.8285566699865176, 0.585989005912952, 0.559821208548101], "rank_score": 0.8502177674124741} -{"id": "wang-etal-2020-galileo", "title": "Galileo at SemEval-2020 Task 12: Multi-lingual Learning for Offensive Language Identification Using Pre-trained Language Models", "abstract": "This paper describes Galileo's performance in SemEval-2020 Task 12 on detecting and categorizing offensive language in social media. For Offensive Language Identification, we proposed a multi-lingual method using Pre-trained Language Models, ERNIE and XLM-R. For offensive language categorization, we proposed a knowledge distillation method trained on soft labels generated by several supervised models. Our team participated in all three sub-tasks. In Sub-task A - Offensive Language Identification, we ranked first in terms of average F1 scores in all languages. We are also the only team which ranked among the top three across all languages. We also took the first place in Sub-task B - Automatic Categorization of Offense Types and Sub-task C - Offence Target Identification.", "phrases": ["semeval-2020 task", "offensive language identification", "pre-trained language models"], "overall_score": 0.9340189141869235, "scores": [0.9000330439498763, 0.8491857939256415, 0.8013231244375132], "rank_score": 0.8501806541043436} -{"id": "qin-etal-2018-robust", "title": "Robust Distant Supervision Relation Extraction via Deep Reinforcement Learning", "abstract": "Distant supervision has become the standard method for relation extraction. However, even though it is an efficient method, it does not come at no cost\u2014The resulted distantly-supervised training samples are often very noisy. To combat the noise, most of the recent state-of-the-art approaches focus on selecting one-best sentence or calculating soft attention weights over the set of the sentences of one specific entity pair. However, these methods are suboptimal, and the false positive problem is still a key stumbling bottleneck for the performance. We argue that those incorrectly-labeled candidate sentences must be treated with a hard decision, rather than being dealt with soft attention weights. To do this, our paper describes a radical solution\u2014We explore a deep reinforcement learning strategy to generate the false-positive indicator, where we automatically recognize false positives for each relation type without any supervised information. Unlike the removal operation in the previous studies, we redistribute them into the negative examples. The experimental results show that the proposed strategy significantly improves the performance of distant supervision comparing to state-of-the-art systems.", "phrases": ["distant supervision", "relation extraction", "deep reinforcement learning"], "overall_score": 2.701367152373156, "scores": [0.8696703285752825, 1.0782871371262182, 0.6020627321100009], "rank_score": 0.8500067326038337} -{"id": "alam-etal-2021-fighting-covid", "title": "Fighting the COVID-19 Infodemic: Modeling the Perspective of Journalists, Fact-Checkers, Social Media Platforms, Policy Makers, and the Society", "abstract": "With the emergence of the COVID-19 pandemic, the political and the medical aspects of disinformation merged as the problem got elevated to a whole new level to become the first global infodemic. Fighting this infodemic has been declared one of the most important focus areas of the World Health Organization, with dangers ranging from promoting fake cures, rumors, and conspiracy theories to spreading xenophobia and panic. Addressing the issue requires solving a number of challenging problems such as identifying messages containing claims, determining their check-worthiness and factuality, and their potential to do harm as well as the nature of that harm, to mention just a few. To address this gap, we release a large dataset of 16K manually annotated tweets for fine-grained disinformation analysis that (i) focuses on COVID-19, (ii) combines the perspectives and the interests of journalists, fact-checkers, social media platforms, policy makers, and society, and (iii) covers Arabic, Bulgarian, Dutch, and English. Finally, we show strong evaluation results using pretrained Transformers, thus confirming the practical utility of the dataset in monolingual vs. multilingual, and single task vs. multitask settings.", "phrases": ["covid-19 infodemic", "journalist", "fact-checker"], "overall_score": 1.767500299639783, "scores": [0.8282027550020713, 0.8808666920600202, 0.8408944699979803], "rank_score": 0.8499879723533573} -{"id": "wen-etal-2015-stochastic", "title": "Stochastic Language Generation in Dialogue using Recurrent Neural Networks with Convolutional Sentence Reranking", "abstract": "The natural language generation (NLG) component of a spoken dialogue system (SDS) usually needs a substantial amount of handcrafting or a well-labeled dataset to be trained on. These limitations add significantly to development costs and make cross-domain, multi-lingual dialogue systems intractable. Moreover, human languages are context-aware. The most natural response should be directly learned from data rather than depending on predefined syntaxes or rules. This paper presents a statistical language generator based on a joint recurrent and convolutional neural network structure which can be trained on dialogue act-utterance pairs without any semantic alignments or predefined grammar trees. Objective metrics suggest that this new model outperforms previous methods under the same experimental conditions. Results of an evaluation by human judges indicate that it produces not only high quality but linguistically varied utterances which are preferred compared to n-gram and rule-based systems.", "phrases": ["dialogue system", "stochastic language generation", "backward rnn reranker"], "overall_score": 2.5022366962428944, "scores": [0.891269618928484, 1.1279473512192078, 0.5302364713543648], "rank_score": 0.8498178138340188} -{"id": "chen-etal-2020-conditional", "title": "Conditional Causal Relationships between Emotions and Causes in Texts", "abstract": "The causal relationships between emotions and causes in text have recently received a lot of attention. Most of the existing works focus on the extraction of the causally related clauses from documents. However, none of these works has considered the possibility that the causal relationships among the extracted emotion and cause clauses may only be valid under a specific context, without which the extracted clauses may not be causally related. To address such an issue, we propose a new task of determining whether or not an input pair of emotion and cause has a valid causal relationship under different contexts, and construct a corresponding dataset via manual annotation and negative sampling based on an existing benchmark dataset. Furthermore, we propose a prediction aggregation module with low computational overhead to fine-tune the prediction results based on the characteristics of the input clauses. Experiments demonstrate the effectiveness and generality of our aggregation module.", "phrases": ["causal relationship", "emotion", "clause"], "overall_score": 1.367651720332398, "scores": [1.1346091134556961, 0.8402405627993939, 0.574459700204123], "rank_score": 0.849769792153071} -{"id": "krishna-etal-2021-hurdles", "title": "Hurdles to Progress in Long-form Question Answering", "abstract": "The task of long-form question answering (LFQA) involves retrieving documents relevant to a given question and using them to generate a paragraph-length answer. While many models have recently been proposed for LFQA, we show in this paper that the task formulation raises fundamental challenges regarding evaluation and dataset creation that currently preclude meaningful modeling progress. To demonstrate these challenges, we first design a new system that relies on sparse attention and contrastive retriever learning to achieve state-of-the-art performance on the ELI5 LFQA dataset. While our system tops the public leaderboard, a detailed analysis reveals several troubling trends: (1) our system's generated answers are not actually grounded in the documents that it retrieves; (2) ELI5 contains significant train / validation overlap, as at least 81% of ELI5 validation questions occur in paraphrased form in the training set; (3) ROUGE-L is not an informative metric of generated answer quality and can be easily gamed; and (4) human evaluations used for other text generation tasks are unreliable for LFQA. We offer suggestions to mitigate each of these issues, which we hope will lead to more rigorous LFQA research and meaningful progress in the future.", "phrases": ["long-form question", "lfqa", "retriever", "human evaluation"], "overall_score": 2.0368034779692783, "scores": [1.2769698040671489, 1.0573694582027602, 0.5331144597284673, 0.5301983791166255], "rank_score": 0.8494130252787504} -{"id": "liakata-etal-2010-corpora", "title": "Corpora for the Conceptualisation and Zoning of Scientific Papers", "abstract": "We present two complementary annotation schemes for sentence based annotation of full scientific papers, CoreSC and AZ-II, applied to primary research articles in chemistry. AZ-II is the extension of AZ for chemistry papers. AZ has been shown to have been reliably annotated by independent human coders and useful for various information access tasks. Like AZ, AZ-II follows the rhetorical structure of a scientific paper and the knowledge claims made by the authors. The CoreSC scheme takes a different view of scientific papers, treating them as the humanly readable representations of scientific investigations. It seeks to retrieve the structure of the investigation from the paper as generic high-level Core Scientific Concepts (CoreSC). CoreSCs have been annotated by 16 chemistry experts over a total of 265 full papers in physical chemistry and biochemistry. We describe the differences and similarities between the two schemes in detail and present the two corpora produced using each scheme. There are 36 shared papers in the corpora, which allows us to quantitatively compare aspects of the annotation schemes. We show the correlation between the two schemes, their strengths and weeknesses and discuss the benefits of combining a rhetorical based analysis of the papers with a content-based one.", "phrases": ["annotation scheme", "research article", "rhetorical structure"], "overall_score": 2.300098872059428, "scores": [1.4704280166387758, 0.5563861315257586, 0.5212540572807628], "rank_score": 0.8493560684817657} -{"id": "liu-etal-2020-context", "title": "How Does Context Matter? On the Robustness of Event Detection with Context-Selective Mask Generalization", "abstract": "Event detection (ED) aims to identify and classify event triggers in texts, which is a crucial subtask of event extraction (EE). Despite many advances in ED, the existing studies are typically centered on improving the overall performance of an ED model, which rarely consider the robustness of an ED model. This paper aims to fill this research gap by stressing the importance of robustness modeling in ED models. We first pinpoint three stark cases demonstrating the brittleness of the existing ED models. After analyzing the underlying reason, we propose a new training mechanism, called context-selective mask generalization for ED, which can effectively mine context-specific patterns for learning and robustify an ED model. The experimental results have confirmed the effectiveness of our model regarding defending against adversarial attacks, exploring unseen predicates, and tackling ambiguity cases. Moreover, a deeper analysis suggests that our approach can learn a complementary predictive bias with most ED models that use full context for feature learning.", "phrases": ["robustness", "event detection", "context-selective mask generalization"], "overall_score": 1.1774141203166908, "scores": [0.8860522424593343, 0.8726568922213795, 0.7892651339995828], "rank_score": 0.8493247562267655} -{"id": "sennrich-2017-grammatical", "title": "How Grammatical is Character-level Neural Machine Translation? Assessing MT Quality with Contrastive Translation Pairs", "abstract": "Analysing translation quality in regards to specific linguistic phenomena has historically been difficult and time-consuming. Neural machine translation has the attractive property that it can produce scores for arbitrary translations, and we propose a novel method to assess how well NMT systems model specific linguistic phenomena such as agreement over long distances, the production of novel words, and the faithful translation of polarity. The core idea is that we measure whether a reference translation is more probable under a NMT model than a contrastive translation which introduces a specific type of error. We present LingEval97, a large-scale data set of 97000 contrastive translation pairs based on the WMT English-German translation task, with errors automatically created with simple rules. We report results for a number of systems, and find that recently introduced character-level NMT systems perform better at transliteration than models with byte-pair encoding (BPE) segmentation, but perform more poorly at morphosyntactic agreement, and translating discontiguous units of meaning.", "phrases": ["contrastive translation pair", "polarity", "incorrect translation"], "overall_score": 2.662854352439719, "scores": [1.1575004825974662, 0.8320322790771818, 0.558251576990408], "rank_score": 0.8492614462216853} -{"id": "mann-yarowsky-2003-unsupervised", "title": "Unsupervised Personal Name Disambiguation", "abstract": "This paper presents a set of algorithms for distinguishing personal names with multiple real referents in text, based on little or no supervision. The approach utilizes an unsupervised clustering technique over a rich feature space of biographic facts, which are automatically extracted via a language-independent bootstrapping process. The induced clustering of named entities are then partitioned and linked to their real referents via the automatically extracted biographic data. Performance is evaluated based on both a test set of handlabeled multi-referent personal names and via automatically generated pseudonames.", "phrases": ["name", "disambiguation", "agglomerative clustering algorithm", "local biographical information"], "overall_score": 2.8880765729175133, "scores": [1.7117988537656519, 0.5976172605473621, 0.5605813180816459, 0.526543548718431], "rank_score": 0.8491352452782727} -{"id": "manion-sainudiin-2013-daebak", "title": "DAEBAK!: Peripheral Diversity for Multilingual Word Sense Disambiguation", "abstract": "We introduce Peripheral Diversity (PD) as a knowledge-based approach to achieve multilingual Word Sense Disambiguation (WSD). PD exploits the frequency and diverse use of word senses in semantic subgraphs derived from larger sense inventories such as BabelNet, Wikipedia, and WordNet in order to achieve WSD. PD\u2019s f -measure scores for SemEval 2013 Task 12 outperform the Most Frequent Sense (MFS) baseline for two of the five languages: English, French, German, Italian, and Spanish. Despite PD remaining under-developed and under-explored, it demonstrates that it is robust, competitive, and encourages development.", "phrases": ["peripheral diversity", "word sense disambiguation", "daebak"], "overall_score": 1.1770640015277234, "scores": [0.9517051437858961, 0.8109113992706906, 0.784600053662862], "rank_score": 0.8490721989064829} -{"id": "jwalapuram-etal-2019-evaluating", "title": "Evaluating Pronominal Anaphora in Machine Translation: An Evaluation Measure and a Test Suite", "abstract": "The ongoing neural revolution in machine translation has made it easier to model larger contexts beyond the sentence-level, which can potentially help resolve some discourse-level ambiguities such as pronominal anaphora, thus enabling better translations. Unfortunately, even when the resulting improvements are seen as substantial by humans, they remain virtually unnoticed by traditional automatic evaluation measures like BLEU, as only a few words end up being affected. Thus, specialized evaluation measures are needed. With this aim in mind, we contribute an extensive, targeted dataset that can be used as a test suite for pronoun translation, covering multiple source languages and different pronoun errors drawn from real system translations, for English. We further propose an evaluation measure to differentiate good and bad pronoun translations. We also conduct a user study to report correlations with human judgments.", "phrases": ["pronominal anaphora", "machine translation", "evaluation measure"], "overall_score": 0.9319122774252839, "scores": [0.9265766613153735, 0.8363440412360472, 0.7818686295115147], "rank_score": 0.8482631106876451} -{"id": "feng-etal-2019-misleading", "title": "Misleading Failures of Partial-input Baselines", "abstract": "Recent work establishes dataset difficulty and removes annotation artifacts via partial-input baselines (e.g., hypothesis-only model for SNLI or question-only model for VQA). A successful partial-input baseline indicates that the dataset is cheatable. But the converse is not necessarily true: failures of partial-input baselines do not mean the dataset is free of artifacts. We first design artificial datasets to illustrate how the trivial patterns that are only visible in the full input can evade any partial-input baseline. Next, we identify such artifacts in the SNLI dataset\u2014a hypothesis-only model augmented with trivial patterns in the premise can solve 15% of previously-thought \u201chard\u201d examples. Our work provides a caveat for the use and creation of partial-input baselines for datasets.", "phrases": ["partial-input baseline", "artifact", "premise", "future dataset creation"], "overall_score": 1.953175964494979, "scores": [1.3082335330716106, 0.9192612467742187, 0.5828670123804021, 0.5826523820386929], "rank_score": 0.8482535435662311} -{"id": "villemonte-de-la-clergerie-etal-2008-passage", "title": "PASSAGE: from French Parser Evaluation to Large Sized Treebank", "abstract": "In this paper we present the PASSAGE project which aims at building automatically a French Treebank of large size by combining the output of several parsers, using the EASY annotation scheme. We present also the results of the of the first evaluation campaign of the project and the preliminary results we have obtained with our ROVER procedure for combining parsers automatically.", "phrases": ["passage", "parse combination algorithm", "new option"], "overall_score": 1.5187933564860527, "scores": [1.4207374780679864, 0.602052279467758, 0.520174377835164], "rank_score": 0.8476547117903027} -{"id": "fernandez-etal-2014-gplsi", "title": "GPLSI: Supervised Sentiment Analysis in Twitter using Skipgrams", "abstract": "In this paper we describe the system submitted for the SemEval 2014 Task 9 (Sentiment Analysis in Twitter) Subtask B. Our contribution consists of a supervised approach using machine learning techniques, which uses the terms in the dataset as features. In this work we do not employ any external knowledge and resources. The novelty of our approach lies in the use of words, ngrams and skipgrams (notadjacent ngrams) as features, and how they are weighted.", "phrases": ["sentiment analysis", "twitter", "skipgram"], "overall_score": 1.1744802073403702, "scores": [0.8843793652377469, 0.8675968631355598, 0.7896489277549836], "rank_score": 0.8472083853760969} -{"id": "pasunuru-bansal-2017-multi", "title": "Multi-Task Video Captioning with Video and Entailment Generation", "abstract": "Video captioning, the task of describing the content of a video, has seen some promising improvements in recent years with sequence-to-sequence models, but accurately learning the temporal and logical dynamics involved in the task still remains a challenge, especially given the lack of sufficient annotated data. We improve video captioning by sharing knowledge with two related directed-generation tasks: a temporally-directed unsupervised video prediction task to learn richer context-aware video encoder representations, and a logically-directed language entailment generation task to learn better video-entailing caption decoder representations. For this, we present a many-to-many multi-task learning model that shares parameters across the encoders and decoders of the three tasks. We achieve significant improvements and the new state-of-the-art on several standard video captioning datasets using diverse automatic and human evaluations. We also show mutual multi-task improvements on the entailment generation task.", "phrases": ["video", "entailment generation", "multi-task learning"], "overall_score": 2.031134802503128, "scores": [0.8584104779364491, 0.8294840299726692, 0.8532525040695635], "rank_score": 0.8470490039928938} -{"id": "mellebeek-etal-2006-multi", "title": "Multi-Engine Machine Translation by Recursive Sentence Decomposition", "abstract": "In this paper, we present a novel approach to combine the outputs of multiple MT engines into a consensus translation. In contrast to previous Multi-Engine Machine Translation (MEMT) techniques, we do not rely on word alignments of output hypotheses, but prepare the input sentence for multi-engine processing. We do this by using a recursive decomposition algorithm that produces simple chunks as input to the MT engines. A consensus translation is produced by combining the best chunk translations, selected through majority voting, a trigram language model score and a confidence score assigned to each MT engine. We report statistically significant relative improvements of up to 9% BLEU score in experiments (English\u2192Spanish) carried out on an 800-sentence test set extracted from the Penn-II Treebank.", "phrases": ["recursive decomposition algorithm", "chunk", "multi-engine machine translation"], "overall_score": 1.647524672255382, "scores": [0.9980727342575468, 0.947083603299054, 0.5948242566993224], "rank_score": 0.8466601980853077} -{"id": "ling-etal-2013-microblogs", "title": "Microblogs as Parallel Corpora", "abstract": "In the ever-expanding sea of microblog data, there is a surprising amount of naturally occurring parallel text: some users create post multilingual messages targeting international audiences while others \u201cretweet\u201d translations. We present an efficient method for detecting these messages and extracting parallel segments from them. We have been able to extract over 1M Chinese-English parallel segments from Sina Weibo (the Chinese counterpart of Twitter) using only their public APIs. As a supplement to existing parallel training data, our automatically extracted parallel data yields substantial translation quality improvements in translating microblog text and modest improvements in translating edited news commentary. The resources in described in this paper are available at http://www.cs.cmu.edu/ lingwang/utopia.", "phrases": ["parallel data", "microblog", "social medium", "machine translation"], "overall_score": 2.39831322741977, "scores": [0.9273283839462492, 1.0530340492446089, 0.8419276512296343, 0.5637072778341773], "rank_score": 0.8464993405636675} -{"id": "cao-gete-2018-using", "title": "Using Discourse Information for Education with a Spanish-Chinese Parallel Corpus", "abstract": "Nowadays, with the fruitful achievements in Natural Language Processing (NLP) studies, the concern of using NLP technologies for education has called much attention. As two of the most spoken languages in the world, Spanish and Chinese occupy important positions in both NLP studies and bilingual education. In this paper, we present a Spanish-Chinese parallel corpus with annotated discourse information that aims to serve for bilingual language education. The theoretical framework of this work is Rhetorical Structure Theory (RST). The corpus is composed of 100 Spanish-Chinese parallel texts, and all the discourse markers (DM) have been annotated to form the education source. With pedagogical aim, we also present two programs that generate automatic exercises for both Spanish and Chinese students using our corpus. The reliability of this work has been evaluated using Kappa coefficient.", "phrases": ["discourse information", "education", "spanish-chinese parallel corpus"], "overall_score": 1.1734475487823655, "scores": [0.8583916383915368, 0.8535057511517814, 0.8274930495141251], "rank_score": 0.8464634796858143} -{"id": "chen-etal-2021-improving", "title": "Improving Faithfulness in Abstractive Summarization with Contrast Candidate Generation and Selection", "abstract": "Despite significant progress in neural abstractive summarization, recent studies have shown that the current models are prone to generating summaries that are unfaithful to the original context. To address the issue, we study contrast candidate generation and selection as a model-agnostic post-processing technique to correct the extrinsic hallucinations (i.e. information not present in the source text) in unfaithful summaries. We learn a discriminative correction model by generating alternative candidate summaries where named entities and quantities in the generated summary are replaced with ones with compatible semantic types from the source document. This model is then used to select the best candidate as the final output summary. Our experiments and analysis across a number of neural summarization systems show that our proposed method is effective in identifying and correcting extrinsic hallucinations. We analyze the typical hallucination phenomenon by different types of neural summarization systems, in hope to provide insights for future work on the direction.", "phrases": ["faithfulness", "abstractive summarization", "selection"], "overall_score": 1.7597417647051605, "scores": [0.9391386715518952, 0.8094321231619519, 0.7901999224714811], "rank_score": 0.8462569057284428} -{"id": "iosif-mishra-2014-speaker", "title": "From Speaker Identification to Affective Analysis: A Multi-Step System for Analyzing Children's Stories", "abstract": "We propose a multi-step system for the analysis of children\u2019s stories that is intended to be part of a larger text-to-speechbased storytelling system. A hybrid approach is adopted, where pattern-based and statistical methods are used along with utilization of external knowledge sources. This system performs the following story analysis tasks: identification of characters in each story; attribution of quotes to specific story characters; identification of character age, gender and other salient personality attributes; and finally, affective analysis of the quoted material. The different types of analyses were evaluated using several datasets. For the quote attribution, as well as for the gender and age estimation, substantial improvement over baseline was realized, whereas results for personality attribute estimation and valence estimation are more modest.", "phrases": ["affective analysis", "multi-step system", "story"], "overall_score": 0.929575132943954, "scores": [0.8726345822339834, 0.8437855995042179, 0.8219870685691314], "rank_score": 0.8461357501024441} -{"id": "liang-etal-2020-towards", "title": "Towards Debiasing Sentence Representations", "abstract": "As natural language processing methods are increasingly deployed in real-world scenarios such as healthcare, legal systems, and social science, it becomes necessary to recognize the role they potentially play in shaping social biases and stereotypes. Previous work has revealed the presence of social biases in widely used word embeddings involving gender, race, religion, and other social constructs. While some methods were proposed to debias these word-level embeddings, there is a need to perform debiasing at the sentence-level given the recent shift towards new contextualized sentence representations such as ELMo and BERT. In this paper, we investigate the presence of social biases in sentence-level representations and propose a new method, Sent-Debias, to reduce these biases. We show that Sent-Debias is effective in removing biases, and at the same time, preserves performance on sentence-level downstream tasks such as sentiment analysis, linguistic acceptability, and natural language understanding. We hope that our work will inspire future research on characterizing and removing social biases from widely adopted sentence representations for fairer NLP.", "phrases": ["sentence representation", "gender", "bert", "sent-debias"], "overall_score": 2.396161987778558, "scores": [1.2330250782267191, 1.0631667497424395, 0.5632184412214529, 0.5235499202429919], "rank_score": 0.8457400473584009} -{"id": "ge-etal-2021-baco", "title": "BACO: A Background Knowledge- and Content-Based Framework for Citing Sentence Generation", "abstract": "In this paper, we focus on the problem of citing sentence generation, which entails generating a short text to capture the salient information in a cited paper and the connection between the citing and cited paper. We present BACO, a BAckground knowledge- and COntent-based framework for citing sentence generation, which considers two types of information: (1) background knowledge by leveraging structural information from a citation network; and (2) content, which represents in-depth information about what to cite and why to cite. First, a citation network is encoded to provide background knowledge. Second, we apply salience estimation to identify what to cite by estimating the importance of sentences in the cited paper. During the decoding stage, both types of information are combined to facilitate the text generation, and then we conduct a joint training for the generator and citation function classification to make the model aware of why to cite. Our experimental results show that our framework outperforms comparative baselines.", "phrases": ["background knowledge-", "sentence generation", "cited paper"], "overall_score": 1.3610652153832172, "scores": [0.8813056067242432, 0.8030304581951903, 0.8526960346750525], "rank_score": 0.8456773665314953} -{"id": "ebert-etal-2015-cis", "title": "CIS-positive: A Combination of Convolutional Neural Networks and Support Vector Machines for Sentiment Analysis in Twitter", "abstract": "This paper describes our automatic sentiment analysis system \u2013 CIS-positive \u2013 for SemEval 2015 Task 10 \u201cSentiment Analysis in Twitter\u201d, subtask B \u201cMessage Polarity Classification\u201d. In this system, we propose to normalize the Twitter data in a way that maximizes the coverage of sentiment lexicons and minimizes distracting elements. Furthermore, we integrate the output of Convolutional Neural Networks into Support Vector Machines for the polarity classification. Our system achieves a macro F1 score of the positive and negative class of 59.57 on the SemEval 2015 test data.", "phrases": ["convolutional neural networks", "sentiment analysis", "twitter"], "overall_score": 0.9289980352966168, "scores": [0.8955941001242903, 0.8422948585408451, 0.7989424008935959], "rank_score": 0.8456104531862438} -{"id": "abdul-mageed-ungar-2017-emonet", "title": "EmoNet: Fine-Grained Emotion Detection with Gated Recurrent Neural Networks", "abstract": "Accurate detection of emotion from natural language has applications ranging from building emotional chatbots to better understanding individuals and their lives. However, progress on emotion detection has been hampered by the absence of large labeled datasets. In this work, we build a very large dataset for fine-grained emotions and develop deep learning models on it. We achieve a new state-of-the-art on 24 fine-grained types of emotions (with an average accuracy of 87.58%). We also extend the task beyond emotion types to model Robert Plutick's 8 primary emotion dimensions, acquiring a superior accuracy of 95.68%.", "phrases": ["emotion", "deep learning", "distant supervision", "hashtag", "writer"], "overall_score": 2.6137618925887, "scores": [1.5167729466832585, 1.0885206457152459, 0.5567512716528088, 0.540661969779374, 0.5252549816871378], "rank_score": 0.8455923631035651} -{"id": "berant-etal-2014-modeling", "title": "Modeling Biological Processes for Reading Comprehension", "abstract": "Machine reading calls for programs that read and understand text, but most current work only attempts to extract facts from redundant web-scale corpora. In this paper, we focus on a new reading comprehension task that requires complex reasoning over a single document. The input is a paragraph describing a biological process, and the goal is to answer questions that require an understanding of the relations between entities and events in the process. To answer the questions, we first predict a rich structure representing the process in the paragraph. Then, we map the question to a formal query, which is executed against the predicted structure. We demonstrate that answering questions via predicted structures substantially improves accuracy over baselines that use shallower representations.", "phrases": ["biological process", "reading comprehension task", "reasoning", "event argument", "processbank dataset"], "overall_score": 2.6871839971482854, "scores": [1.6824520139591201, 0.8702996014974835, 0.6018728906431764, 0.5505694220340863, 0.5225255199883856], "rank_score": 0.8455438896244504} -{"id": "yu-etal-2019-gumdrop", "title": "GumDrop at the DISRPT2019 Shared Task: A Model Stacking Approach to Discourse Unit Segmentation and Connective Detection", "abstract": "In this paper we present GumDrop, Georgetown University's entry at the DISRPT 2019 Shared Task on automatic discourse unit segmentation and connective detection. Our approach relies on model stacking, creating a heterogeneous ensemble of classifiers, which feed into a metalearner for each final task. The system encompasses three trainable component stacks: one for sentence splitting, one for discourse unit segmentation and one for connective detection. The flexibility of each ensemble allows the system to generalize well to datasets of different sizes and with varying levels of homogeneity.", "phrases": ["discourse unit segmentation", "connective detection", "gumdrop"], "overall_score": 0.9288454376883468, "scores": [0.8884659971790354, 0.8340958142283881, 0.8138528471644976], "rank_score": 0.845471552857307} -{"id": "sujana-etal-2020-rumor", "title": "Rumor Detection on Twitter Using Multiloss Hierarchical BiLSTM with an Attenuation Factor", "abstract": "Social media platforms such as Twitter have become a breeding ground for unverified information or rumors. These rumors can threaten people's health, endanger the economy, and affect the stability of a country. Many researchers have developed models to classify rumors using traditional machine learning or vanilla deep learning models. However, previous studies on rumor detection have achieved low precision and are time consuming. Inspired by the hierarchical model and multitask learning, a multiloss hierarchical BiLSTM model with an attenuation factor is proposed in this paper. The model is divided into two BiLSTM modules: post level and event level. By means of this hierarchical structure, the model can extract deep information from limited quantities of text. Each module has a loss function that helps to learn bilateral features and reduce the training time. An attenuation factor is added at the post level to increase the accuracy. The results on two rumor datasets demonstrate that our model achieves better performance than that of state-of-the-art machine learning and vanilla deep learning models.", "phrases": ["twitter", "bilstm model", "rumor detection"], "overall_score": 1.1717231761687792, "scores": [0.8499163220684909, 0.8272285522191064, 0.8585139490424478], "rank_score": 0.8452196077766816} -{"id": "krahmer-etal-2003-graph", "title": "Graph-Based Generation of Referring Expressions", "abstract": "This article describes a new approach to the generation of referring expressions. We propose to formalize a scene (consisting of a set of objects with various properties and relations) as a labeled directed graph and describe content selection (which properties to include in a referring expression) as a subgraph construction problem. Cost functions are used to guide the search process and to give preference to some solutions over others. The current approach has four main advantages: (1) Graph structures have been studied extensively, and by moving to a graph perspective we get direct access to the many theories and algorithms for dealing with graphs; (2) many existing generation algorithms can be reformulated in terms of graphs, and this enhances comparison and integration of the various approaches; (3) the graph perspective allows us to solve a number of problems that have plagued earlier algorithms for the generation of referring expressions; and (4) the combined use of graphs and cost functions paves the way for an integration of rule-based generation techniques with more recent stochastic approaches.", "phrases": ["referring expression", "natural language generation", "graph-based approach"], "overall_score": 2.7850510743970194, "scores": [1.4024571834322375, 0.5898231838209366, 0.5427823688222114], "rank_score": 0.8450209120251285} -{"id": "benamara-saint-dizier-2003-dynamic", "title": "Dynamic Generation of Cooperative Natural Language Responses in WEBCOOP", "abstract": "We present in this paper a formal approach for the dynamic generation of cooperative NL responses in WEBCOOP, a system that provides intelligent responses in French to natural language queries on the Web. The system integrates reasoning procedures and NLG techniques paired with hypertext links. Content determination is organized in two steps: providing explanations that report user misconceptions and then offering flexible solutions that reflect the cooperative know howof the system.", "phrases": ["natural language response", "webcoop", "dynamic generation", "cooperative question"], "overall_score": 1.3596568195294285, "scores": [0.830982951264754, 0.7986805684026272, 0.9198395872887306, 0.8297060169872775], "rank_score": 0.8448022809858474} -{"id": "chen-etal-2018-attacking", "title": "Attacking Visual Language Grounding with Adversarial Examples: A Case Study on Neural Image Captioning", "abstract": "Visual language grounding is widely studied in modern neural image captioning systems, which typically adopts an encoder-decoder framework consisting of two principal components: a convolutional neural network (CNN) for image feature extraction and a recurrent neural network (RNN) for language caption generation. To study the robustness of language grounding to adversarial perturbations in machine vision and perception, we propose Show-and-Fool, a novel algorithm for crafting adversarial examples in neural image captioning. The proposed algorithm provides two evaluation approaches, which check if we can mislead neural image captioning systems to output some randomly chosen captions or keywords. Our extensive experiments show that our algorithm can successfully craft visually-similar adversarial examples with randomly targeted captions or keywords, and the adversarial examples can be made highly transferable to other image captioning systems. Consequently, our approach leads to new robustness implications of neural image captioning and novel insights in visual language grounding.", "phrases": ["visual language grounding", "adversarial example", "neural image captioning"], "overall_score": 1.1704516362004411, "scores": [0.862054814377609, 0.8438912620834136, 0.8269610802591015], "rank_score": 0.8443023855733748} -{"id": "hasan-ng-2010-conundrums", "title": "Conundrums in Unsupervised Keyphrase Extraction: Making Sense of the State-of-the-Art", "abstract": "State-of-the-art approaches for unsupervised keyphrase extraction are typically evaluated on a single dataset with a single parameter setting. Consequently, it is unclear how effective these approaches are on a new dataset from a different domain, and how sensitive they are to changes in parameter settings. To gain a better understanding of state-of-the-art unsupervised keyphrase extraction algorithms, we conduct a systematic evaluation and analysis of these algorithms on a variety of standard evaluation datasets.", "phrases": ["unsupervised keyphrase extraction", "tf-idf", "ranking"], "overall_score": 2.097739014381967, "scores": [0.9684021862419184, 1.0441625240713241, 0.5200121346490442], "rank_score": 0.8441922816540957} -{"id": "mostafazadeh-etal-2016-caters", "title": "CaTeRS: Causal and Temporal Relation Scheme for Semantic Annotation of Event Structures", "abstract": "Learning commonsense causal and temporal relation between events is one of the major steps towards deeper language understanding. This is even more crucial for understanding stories and script learning. A prerequisite for learning scripts is a semantic framework which enables capturing rich event structures. In this paper we introduce a novel semantic annotation framework, called Causal and Temporal Relation Scheme (CaTeRS), which is unique in simultaneously capturing a comprehensive set of temporal and causal relations between events. By annotating a total of 1,600 sentences in the context of 320 five-sentence short stories sampled from ROCStories corpus, we demonstrate that these stories are indeed full of causal and temporal relations. Furthermore, we show that the CaTeRS annotation scheme enables high inter-annotator agreement for broad-coverage event entity annotation and moderate agreement on semantic link annotation.", "phrases": ["causal", "temporal relation scheme", "caters annotation scheme", "commonsense reasoning standpoint"], "overall_score": 2.339785962187374, "scores": [0.9174683610721492, 0.7883520357932083, 1.1365522307283171, 0.5332249767956612], "rank_score": 0.843899401097334} -{"id": "wang-etal-2020-combining", "title": "Combining Self-Training and Self-Supervised Learning for Unsupervised Disfluency Detection", "abstract": "Most existing approaches to disfluency detection heavily rely on human-annotated corpora, which is expensive to obtain in practice. There have been several proposals to alleviate this issue with, for instance, self-supervised learning techniques, but they still require human-annotated corpora. In this work, we explore the unsupervised learning paradigm which can potentially work with unlabeled text corpora that are cheaper and easier to obtain. Our model builds upon the recent work on Noisy Student Training, a semi-supervised learning approach that extends the idea of self-training. Experimental results on the commonly used English Switchboard test set show that our approach achieves competitive performance compared to the previous state-of-the-art supervised systems using contextualized word embeddings (e.g. BERT and ELECTRA).", "phrases": ["self-training", "disfluency detection", "pseudo label", "teacher"], "overall_score": 1.511833064716879, "scores": [1.6866895114888318, 0.6110259761203588, 0.5439342665233075, 0.5334306418276203], "rank_score": 0.8437700989900296} -{"id": "dusek-jurcicek-2016-sequence", "title": "Sequence-to-Sequence Generation for Spoken Dialogue via Deep Syntax Trees and Strings", "abstract": "We present a natural language generator based on the sequence-to-sequence approach that can be trained to produce natural language strings as well as deep syntax dependency trees from input dialogue acts, and we use it to directly compare two-step generation with separate sentence planning and surface realization stages to a joint, one-step approach. We were able to train both setups successfully using very little training data. The joint setup offers better performance, surpassing state-of-the-art with regards to n-gram-based scores while providing more relevant outputs.", "phrases": ["language generator", "sentence planning", "encoder-decoder model"], "overall_score": 2.7157330612039767, "scores": [1.4584436340279994, 0.5510949461816399, 0.5215311555871812], "rank_score": 0.8436899119322736} -{"id": "badaro-etal-2018-ema", "title": "EMA at SemEval-2018 Task 1: Emotion Mining for Arabic", "abstract": "While significant progress has been achieved for Opinion Mining in Arabic (OMA), very limited efforts have been put towards the task of Emotion mining in Arabic. In fact, businesses are interested in learning a fine-grained representation of how users are feeling towards their products or services. In this work, we describe the methods used by the team Emotion Mining in Arabic (EMA), as part of the SemEval-2018 Task 1 for Affect Mining for Arabic tweets. EMA participated in all 5 subtasks. For the five tasks, several preprocessing steps were evaluated and eventually the best system included diacritics removal, elongation adjustment, replacement of emojis by the corresponding Arabic word, character normalization and light stemming. Moreover, several features were evaluated along with different classification and regression techniques. For the 5 subtasks, word embeddings feature turned out to perform best along with Ensemble technique. EMA achieved the 1st place in subtask 5, and 3rd place in subtasks 1 and 3.", "phrases": ["semeval-2018 task", "emotion mining", "arabic"], "overall_score": 0.9262325554109907, "scores": [0.8688035855649423, 0.8440301417549545, 0.8164458874218016], "rank_score": 0.8430932049138994} -{"id": "yang-etal-2020-ted", "title": "TED: A Pretrained Unsupervised Summarization Model with Theme Modeling and Denoising", "abstract": "Text summarization aims to extract essential information from a piece of text and transform the text into a concise version. Existing unsupervised abstractive summarization models leverage recurrent neural networks framework while the recently proposed transformer exhibits much more capability. Moreover, most of previous summarization models ignore abundant unlabeled corpora resources available for pretraining. In order to address these issues, we propose TED, a transformer-based unsupervised abstractive summarization system with pretraining on large-scale data. We first leverage the lead bias in news articles to pretrain the model on millions of unlabeled corpora. Next, we finetune TED on target domains through theme modeling and a denoising autoencoder to enhance the quality of generated summaries. Notably, TED outperforms all unsupervised abstractive baselines on NYT, CNN/DM and English Gigaword datasets with various document styles. Further analysis shows that the summaries generated by TED are highly abstractive, and each component in the objective function of TED is highly effective.", "phrases": ["theme modeling", "news article", "ted"], "overall_score": 2.0206920946527394, "scores": [0.8637607847471674, 0.7996639430692994, 0.864657441878837], "rank_score": 0.8426940565651013} -{"id": "voita-etal-2019-bottom", "title": "The Bottom-up Evolution of Representations in the Transformer: A Study with Machine Translation and Language Modeling Objectives", "abstract": "We seek to understand how the representations of individual tokens and the structure of the learned feature space evolve between layers in deep neural networks under different learning objectives. We chose the Transformers for our analysis as they have been shown effective with various tasks, including machine translation (MT), standard left-to-right language models (LM) and masked language modeling (MLM). Previous work used black-box probing tasks to show that the representations learned by the Transformer differ significantly depending on the objective. In this work, we use canonical correlation analysis and mutual information estimators to study how information flows across Transformer layers and observe that the choice of the objective determines this process. For example, as you go from bottom to top layers, information about the past in left-to-right language models gets vanished and predictions about the future get formed. In contrast, for MLM, representations initially acquire information about the context around the token, partially forgetting the token identity and producing a more generalized token representation. The token identity then gets recreated at the top MLM layers.", "phrases": ["evolution", "machine translation", "information flow", "transformer layer", "gpt-2"], "overall_score": 2.480628507832474, "scores": [1.7843773273491201, 0.8387934673476052, 0.5402677440391258, 0.5283807761805034, 0.5205765360153751], "rank_score": 0.8424791701863459} -{"id": "akbik-etal-2019-pooled", "title": "Pooled Contextualized Embeddings for Named Entity Recognition", "abstract": "Contextual string embeddings are a recent type of contextualized word embedding that were shown to yield state-of-the-art results when utilized in a range of sequence labeling tasks. They are based on character-level language models which treat text as distributions over characters and are capable of generating embeddings for any string of characters within any textual context. However, such purely character-based approaches struggle to produce meaningful embeddings if a rare string is used in a underspecified context. To address this drawback, we propose a method in which we dynamically aggregate contextualized embeddings of each unique string that we encounter. We then use a pooling operation to distill a \u201dglobal\u201d word representation from all contextualized instances. We evaluate these \u201dpooled contextualized embeddings\u201d on common named entity recognition (NER) tasks such as CoNLL-03 and WNUT and show that our approach significantly improves the state-of-the-art for NER. We make all code and pre-trained models available to the research community for use and reproduction.", "phrases": ["entity recognition", "language model", "underspecified context", "pre-trained model"], "overall_score": 2.2814274238871577, "scores": [1.1544523599120724, 1.1268261504081771, 0.55178878678966, 0.5367778009737054], "rank_score": 0.8424612745209038} -{"id": "hopkins-kiela-2017-automatically", "title": "Automatically Generating Rhythmic Verse with Neural Networks", "abstract": "We propose two novel methodologies for the automatic generation of rhythmic poetry in a variety of forms. The first approach uses a neural language model trained on a phonetic encoding to learn an implicit representation of both the form and content of English poetry. This model can effectively learn common poetic devices such as rhyme, rhythm and alliteration. The second approach considers poetry generation as a constraint satisfaction problem where a generative neural language model is tasked with learning a representation of content, and a discriminative weighted finite state machine constrains it on the basis of form. By manipulating the constraints of the latter model, we can generate coherent poetry with arbitrary forms and themes. A large-scale extrinsic evaluation demonstrated that participants consider machine-generated poems to be written by humans 54% of the time. In addition, participants rated a machine-generated poem to be the best amongst all evaluated.", "phrases": ["language model", "rhythm", "poetry generation"], "overall_score": 1.939826859427271, "scores": [1.1326590552981073, 0.830888890974386, 0.5638203564184426], "rank_score": 0.8424561008969786} -{"id": "rastogi-etal-2016-weighting", "title": "Weighting Finite-State Transductions With Neural Context", "abstract": "How should one apply deep learning to tasks such as morphological rein\ufb02ection, which stochastically edit one string to get another? A recent approach to such sequence-to-sequence tasks is to compress the input string into a vector that is then used to generate the output string, using recurrent neural networks. In contrast, we propose to keep the traditional architecture, which uses a \ufb01nite-state transducer to score all possible output strings , but to augment the scoring function with the help of recurrent networks. A stack of bidirectional LSTMs reads the input string from left-to-right and right-to-left, in order to summarize the input context in which a transducer arc is applied. We combine these learned features with the transducer to de\ufb01ne a probability distribution over aligned output strings, in the form of a weighted \ufb01nite-state automaton. This reduces hand-engineering of features, allows learned features to examine unbounded context in the input string, and still permits exact inference through dynamic programming. We illustrate our method on the tasks of morphological rein\ufb02ection and lemmatization.", "phrases": ["morphology", "transducer", "possible output string", "bidirectional lstm", "finite-state transducer"], "overall_score": 2.386439805478185, "scores": [1.357451356269359, 1.1966746720959367, 0.5578001715787593, 0.5523805798627385, 0.5472359380839793], "rank_score": 0.8423085435781547} -{"id": "barreto-etal-2006-open", "title": "Open Resources and Tools for the Shallow Processing of Portuguese: The TagShare Project", "abstract": "This paper presents the TagShare project and the linguistic resources and tools for the shallow processing of Portuguese developed in its scope. These resources include a 1 million token corpus that has been accurately hand annotated with a variety of linguistic information, as well as several state of the art shallow processing tools capable of automatically producing that type of annotation. At present, the linguistic annotations in the corpus are sentence and paragraph boundaries, token boundaries, morphosyntactic POS categories, values of inflection features, lemmas and namedentities. Hence, the set of tools comprise a sentence chunker, a tokenizer, a POS tagger, nominal and verbal analyzers and lemmatizers, a verbal conjugator, a nominal \u0093inflector\u0094, and a namedentity recognizer, some of which underline several online services.", "phrases": ["shallow processing", "portuguese", "tagshare project"], "overall_score": 1.1674015133398057, "scores": [0.8658592249783363, 0.8391946379034022, 0.821252698150675], "rank_score": 0.8421021870108044} -{"id": "louis-nenkova-2013-automatically", "title": "Automatically Assessing Machine Summary Content Without a Gold Standard", "abstract": "The most widely adopted approaches for evaluation of summary content follow some protocol for comparing a summary with gold-standard human summaries, which are traditionally called model summaries. This evaluation paradigm falls short when human summaries are not available and becomes less accurate when only a single model is available. We propose three novel evaluation techniques. Two of them are model-free and do not rely on a gold standard for the assessment. The third technique improves standard automatic evaluations by expanding the set of available model summaries with chosen system summaries.We show that quantifying the similarity between the source text and its summary with appropriately chosen measures produces summary scores which replicate human assessments accurately. We also explore ways of increasing evaluation quality when only one human model summary is available as a gold standard. We introduce pseudomodels, which are system summaries deemed to contain good content according to automatic evaluation. Combining the pseudomodels with the single human model to form the gold-standard leads to higher correlations with human judgments compared to using only the one available model. Finally, we explore the feasibility of another measure\u2014similarity between a system summary and the pool of all other system summaries for the same input. This method of comparison with the consensus of systems produces impressively accurate rankings of system summaries, achieving correlation with human rankings above 0.9.", "phrases": ["gold standard", "model summary", "pyramid"], "overall_score": 2.7102554823630114, "scores": [0.855274189486049, 1.132435624905814, 0.5382548047689131], "rank_score": 0.8419882063869254} -{"id": "rubino-etal-2015-abu", "title": "Abu-MaTran at WMT 2015 Translation Task: Morphological Segmentation and Web Crawling", "abstract": "This paper presents the machine translation systems submitted by the Abu-MaTran project for the Finnish\u2010English language pair at the WMT 2015 translation task. We tackle the lack of resources and complex morphology of the Finnish language by (i) crawling parallel and monolingual data from the Web and (ii) applying rule-based and unsupervised methods for morphological segmentation. Several statistical machine translation approaches are evaluated and then combined to obtain our final submissions, which are the top performing English-to-Finnish unconstrained (all automatic metrics) and constrained (BLEU), and Finnish-to-English constrained (TER) systems.", "phrases": ["wmt", "translation task", "morphological segmentation"], "overall_score": 0.9249601900172291, "scores": [0.9168449828409887, 0.821317149225375, 0.7876430119993215], "rank_score": 0.8419350480218951} -{"id": "wu-etal-2018-neural", "title": "Neural Metaphor Detecting with CNN-LSTM Model", "abstract": "Metaphors are figurative languages widely used in daily life and literatures. It's an important task to detect the metaphors evoked by texts. Thus, the metaphor shared task is aimed to extract metaphors from plain texts at word level. We propose to use a CNN-LSTM model for this task. Our model combines CNN and LSTM layers to utilize both local and long-range contextual information for identifying metaphorical information. In addition, we compare the performance of the softmax classifier and conditional random field (CRF) for sequential labeling in this task. We also incorporated some additional features such as part of speech (POS) tags and word cluster to improve the performance of model. Our best model achieved 65.06% F-score in the all POS testing subtask and 67.15% in the verbs testing subtask.", "phrases": ["metaphor", "cnn-lstm model", "word2vec"], "overall_score": 2.432549864421161, "scores": [0.8566618087299019, 1.1311190877606927, 0.5370325892927277], "rank_score": 0.8416044952611074} -{"id": "cheng-etal-2018-towards", "title": "Towards Robust Neural Machine Translation", "abstract": "Small perturbations in the input can severely distort intermediate representations and thus impact translation quality of neural machine translation (NMT) models. In this paper, we propose to improve the robustness of NMT models with adversarial stability training. The basic idea is to make both the encoder and decoder in NMT models robust against input perturbations by enabling them to behave similarly for the original input and its perturbed counterpart. Experimental results on Chinese-English, English-German and English-French translation tasks show that our approaches can not only achieve significant improvements over strong NMT systems but also improve the robustness of NMT models.", "phrases": ["neural machine translation", "adversarial stability training", "noise", "feature level"], "overall_score": 2.6387390682406453, "scores": [1.1073828311332725, 1.0399175945065018, 0.6738898604762581, 0.5450912459437549], "rank_score": 0.8415703830149469} -{"id": "touileb-etal-2020-gender", "title": "Gender and sentiment, critics and authors: a dataset of Norwegian book reviews", "abstract": "Gender bias in models and datasets is widely studied in NLP. The focus has usually been on analysing how females and males express themselves, or how females and males are described. However, a less studied aspect is the combination of these two perspectives, how female and male describe the same or opposite gender. In this paper, we present a new gender annotated sentiment dataset of critics reviewing the works of female and male authors. We investigate if this newly annotated dataset contains differences in how the works of male and female authors are critiqued, in particular in terms of positive and negative sentiment. We also explore the differences in how this is done by male and female critics. We show that there are differences in how critics assess the works of authors of the same or opposite gender. For example, male critics rate crime novels written by females, and romantic and sentimental works written by males, more negatively.", "phrases": ["critic", "norwegian book review", "gender"], "overall_score": 0.9244016110564397, "scores": [0.8628248807758993, 0.8375665665731686, 0.8238883752727805], "rank_score": 0.8414266075406162} -{"id": "al-onaizan-papineni-2006-distortion", "title": "Distortion Models for Statistical Machine Translation", "abstract": "In this paper, we argue that n-gram language models are not sufficient to address word reordering required for Machine Translation. We propose a new distortion model that can be used with existing phrase-based SMT decoders to address those n-gram language model limitations. We present empirical results in Arabic to English Machine Translation that show statistically significant improvements when our proposed model is used. We also propose a novel metric to measure word order similarity (or difference) between any pair of languages based on word alignments.", "phrases": ["distortion model", "scope", "deterministic choice"], "overall_score": 2.4319483634227437, "scores": [1.4318038979680658, 0.5474694492853923, 0.5449158233391549], "rank_score": 0.8413963901975375} -{"id": "ma-etal-2022-template", "title": "Template-free Prompt Tuning for Few-shot NER", "abstract": "Prompt-based methods have been successfully applied in sentence-level few-shot learning tasks, mostly owing to the sophisticated design of templates and label words. However, when applied to token-level labeling tasks such as NER, it would be time-consuming to enumerate the template queries over all potential entity spans. In this work, we propose a more elegant method to reformulate NER tasks as LM problems without any templates. Specifically, we discard the template construction process while maintaining the word prediction paradigm of pre-training models to predict a class-related pivot word (or label word) at the entity position. Meanwhile, we also explore principled ways to automatically search for appropriate label words that the pre-trained models can easily adapt to. While avoiding the complicated template-based process, the proposed LM objective also reduces the gap between different objectives used in pre-training and fine-tuning, thus it can better benefit the few-shot performance. Experimental results demonstrate the effectiveness of the proposed method over bert-tagger and template-based method under few-shot settings. Moreover, the decoding speed of the proposed method is up to 1930.12 times faster than the template-based method.", "phrases": ["prompt tuning", "few-shot setting", "low-resource ner"], "overall_score": 2.089544219649793, "scores": [1.3859254651958333, 0.6009541323103701, 0.5358037634499064], "rank_score": 0.8408944536520365} -{"id": "williams-koehn-2012-ghkm", "title": "GHKM Rule Extraction and Scope-3 Parsing in Moses", "abstract": "We developed a string-to-tree system for English--German, achieving competitive results against a hierarchical model baseline. We provide details of our implementation of GHKM rule extraction and scope-3 parsing in the Moses toolkit. We compare systems trained on the same data using different grammar extraction methods.", "phrases": ["scope-3", "moses", "ghkm rule extraction"], "overall_score": 1.16534281142421, "scores": [0.9121922969417103, 0.8086951499116326, 0.8009639956126224], "rank_score": 0.8406171474886551} -{"id": "zwarts-etal-2010-detecting", "title": "Detecting Speech Repairs Incrementally Using a Noisy Channel Approach", "abstract": "Unrehearsed spoken language often contains disfluencies. In order to correctly interpret a spoken utterance, any such disfluencies must be identified and removed or otherwise dealt with. Operating on transcripts of speech which contain disfluencies, our particular focus here is the identification and correction of speech repairs using a noisy channel model. Our aim is to develop a high-accuracy mechanism that can identify speech repairs in an incremental fashion, as the utterance is processed word-by-word. \n \nWe also address the issue of the evaluation of such incremental systems. We propose a novel approach to evaluation, which evaluates performance in detecting and correcting disfluencies incrementally, rather than only assessing performance once the processing of an utterance is complete. This demonstrates some shortcomings in our basic incremental model, and so we then demonstrate a technique that improves performance on the detection of disfluencies as they happen.", "phrases": ["repair", "noisy channel model", "detection"], "overall_score": 1.747946314131615, "scores": [1.092471413964771, 0.892209028699471, 0.5370730364735811], "rank_score": 0.8405844930459411} -{"id": "wang-etal-2007-chinese", "title": "Chinese Syntactic Reordering for Statistical Machine Translation", "abstract": "Syntactic reordering approaches are an effective method for handling word-order differences between source and target languages in statistical machine translation (SMT) systems. This paper introduces a reordering approach for translation from Chinese to English. We describe a set of syntactic reordering rules that exploit systematic differences between Chinese and English word order. The resulting system is used as a preprocessor for both training and test sentences, transforming Chinese sentences to be much closer to English in terms of their word order. We evaluated the reordering approach within the MOSES phrase-based SMT system (Koehn et al., 2007). The reordering approach improved the BLEU score for the MOSES system from 28.52 to 30.86 on the NIST 2006 evaluation data. We also conducted a series of experiments to analyze the accuracy and impact of different types of reordering rules.", "phrases": ["statistical machine translation", "clause restructuring", "structural difference", "preprocessing step", "localizer phrase"], "overall_score": 3.2357759753291133, "scores": [0.9719077669559119, 0.9202812660444352, 0.8819734755038924, 0.8641049014280386, 0.5638779492377578], "rank_score": 0.8404290718340072} -{"id": "tsarfaty-etal-2010-statistical", "title": "Statistical Parsing of Morphologically Rich Languages (SPMRL) What, How and Whither", "abstract": "The term Morphologically Rich Languages (MRLs) refers to languages in which significant information concerning syntactic units and relations is expressed at word-level. There is ample evidence that the application of readily available statistical parsing models to such languages is susceptible to serious performance degradation. The first workshop on statistical parsing of MRLs hosts a variety of contributions which show that despite language-specific idiosyncrasies, the problems associated with parsing MRLs cut across languages and parsing frameworks. In this paper we review the current state-of-affairs with respect to parsing MRLs and point out central challenges. We synthesize the contributions of researchers working on parsing Arabic, Basque, French, German, Hebrew, Hindi and Korean to point out shared solutions across languages. The overarching analysis suggests itself as a source of directions for future investigations.", "phrases": ["morphologically rich languages", "parsing model", "characteristic", "constituency parser"], "overall_score": 2.9384216638168814, "scores": [1.4166825134856844, 0.8241624738558202, 0.5996143349376487, 0.5210911530579795], "rank_score": 0.8403876188342831} -{"id": "dibia-2020-neuralqa", "title": "NeuralQA: A Usable Library for Question Answering (Contextual Query Expansion + BERT) on Large Datasets", "abstract": "Existing tools for Question Answering (QA) have challenges that limit their use in practice. They can be complex to set up or integrate with existing infrastructure, do not offer configurable interactive interfaces, and do not cover the full set of subtasks that frequently comprise the QA pipeline (query expansion, retrieval, reading, and explanation/sensemaking). To help address these issues, we introduce NeuralQA - a usable library for QA on large datasets. NeuralQA integrates well with existing infrastructure (e.g., ElasticSearch instances and reader models trained with the HuggingFace Transformers API) and offers helpful defaults for QA subtasks. It introduces and implements contextual query expansion (CQE) using a masked language model (MLM) as well as relevant snippets (RelSnip) - a method for condensing large documents into smaller passages that can be speedily processed by a document reader model. Finally, it offers a flexible user interface to support workflows for research explorations (e.g., visualization of gradient-based explanations to support qualitative inspection of model behaviour) and large scale search deployment. Code and documentation for NeuralQA is available as open source on Github.", "phrases": ["question answering", "large dataset", "neuralqa"], "overall_score": 0.9227483870871298, "scores": [0.8913651800411642, 0.8255220240998631, 0.8028781305590243], "rank_score": 0.8399217782333506} -{"id": "kovatchev-etal-2020-decomposing", "title": "Decomposing and Comparing Meaning Relations: Paraphrasing, Textual Entailment, Contradiction, and Specificity", "abstract": "In this paper, we present a methodology for decomposing and comparing multiple meaning relations (paraphrasing, textual entailment, contradiction, and specificity). The methodology includes SHARel - a new typology that consists of 26 linguistic and 8 reason-based categories. We use the typology to annotate a corpus of 520 sentence pairs in English and we demonstrate that unlike previous typologies, SHARel can be applied to all relations of interest with a high inter-annotator agreement. We analyze and compare the frequency and distribution of the linguistic and reason-based phenomena involved in paraphrasing, textual entailment, contradiction, and specificity. This comparison allows for a much more in-depth analysis of the workings of the individual relations and the way they interact and compare with each other. We release all resources (typology, annotation guidelines, and annotated corpus) to the community.", "phrases": ["meaning relation", "contradiction", "specificity"], "overall_score": 0.9225957213954534, "scores": [0.9160770854692509, 0.80736708593969, 0.7959042763877389], "rank_score": 0.8397828159322266} -{"id": "liu-etal-2009-weighted", "title": "Weighted Alignment Matrices for Statistical Machine Translation", "abstract": "Current statistical machine translation systems usually extract rules from bilingual corpora annotated with 1-best alignments. They are prone to learn noisy rules due to alignment mistakes. We propose a new structure called weighted alignment matrix to encode all possible alignments for a parallel text compactly. The key idea is to assign a probability to each word pair to indicate how well they are aligned. We design new algorithms for extracting phrase pairs from weighted alignment matrices and estimating their probabilities. Our experiments on multiple language pairs show that using weighted matrices achieves consistent improvements over using n-best lists in significant less extraction time.", "phrases": ["alignment matrix", "n-best list", "parallel sentence", "posterior probability"], "overall_score": 1.9335542666333552, "scores": [1.6174472370624413, 0.6047299737327928, 0.5896295827080879, 0.5471210003340977], "rank_score": 0.839731948459355} -{"id": "llorens-etal-2010-tipsem", "title": "TIPSem (English and Spanish): Evaluating CRFs and Semantic Roles in TempEval-2", "abstract": "This paper presents TIPSem, a system to extract temporal information from natural language texts for English and Spanish. TIPSem, learns CRF models from training data. Although the used features include different language analysis levels, the approach is focused on semantic information. For Spanish, TIPSem achieved the best F1 score in all the tasks. For English, it obtained the best F1 in tasks B (events) and D (event-dct links); and was among the best systems in the rest.", "phrases": ["spanish", "tempeval-2", "semantic information"], "overall_score": 2.3282018318898605, "scores": [0.7953946171201114, 0.8782320733710629, 0.8455372373009021], "rank_score": 0.8397213092640254} -{"id": "petukhova-etal-2014-interoperability", "title": "Interoperability of Dialogue Corpora through ISO 24617-2-based Querying", "abstract": "This paper explores a way of achieving interoperability: developing a query format for accessing existing annotated corpora whose expressions make use of the annotation language defined by the standard. The interpretation of expressions in the query implements a mapping from ISO 24617-2 concepts to those of the annotation scheme used in the corpus. We discuss two possible ways to query existing annotated corpora using DiAML. One way is to transform corpora into DiAML compliant format, and subsequently query these data using XQuery or XPath. The second approach is to define a DiAML query that can be directly used to retrieve requested information from the annotated data. Both approaches are valid. The first one presents a standard way of querying XML data. The second approach is a DiAML-oriented querying of dialogue act annotated data, for which we designed an interface. The proposed approach is tested on two important types of existing dialogue corpora: spoken two-person dialogue corpora collected and annotated within the HCRC Map Task paradigm, and multiparty face-to-face dialogues of the AMI corpus. We present the results and evaluate them with respect to accuracy and completeness through statistical comparisons between retrieved and manually constructed reference annotations.", "phrases": ["dialogue corpora", "querying", "interoperability"], "overall_score": 1.1639278744665715, "scores": [0.8933270955439061, 0.8261861180580216, 0.7992762450661043], "rank_score": 0.8395964862226774} -{"id": "kallmeyer-maier-2010-data", "title": "Data-Driven Parsing with Probabilistic Linear Context-Free Rewriting Systems", "abstract": "This paper presents the first efficient implementation of a weighted deductive CYK parser for Probabilistic Linear Context-Free Rewriting Systems (PLCFRSs). LCFRS, an extension of CFG, can describe discontinuities in a straightforward way and is therefore a natural candidate to be used for data-driven parsing. To speed up parsing, we use different context-summary estimates of parse items, some of them allowing for A* parsing. We evaluate our parser with grammars extracted from the German NeGra treebank. Our experiments show that data-driven LCFRS parsing is feasible and yields output of competitive quality.", "phrases": ["lcfrs", "constituent", "data-driven parsing"], "overall_score": 1.9331003179821198, "scores": [1.058668808394628, 0.8437749750809584, 0.6161606197195812], "rank_score": 0.839534801065056} -{"id": "zhang-komachi-2018-neural", "title": "Neural Machine Translation of Logographic Language Using Sub-character Level Information", "abstract": "Recent neural machine translation (NMT) systems have been greatly improved by encoder-decoder models with attention mechanisms and sub-word units. However, important differences between languages with logographic and alphabetic writing systems have long been overlooked. This study focuses on these differences and uses a simple approach to improve the performance of NMT systems utilizing decomposed sub-character level information for logographic languages. Our results indicate that our approach not only improves the translation capabilities of NMT systems between Chinese and English, but also further improves NMT systems between Chinese and Japanese, because it utilizes the shared information brought by similar sub-character units.", "phrases": ["logographic language", "sub-character level information", "neural machine translation"], "overall_score": 1.351117036126071, "scores": [0.8975444268280237, 0.8232066812134539, 0.7977375376298297], "rank_score": 0.839496215223769} -{"id": "lopes-etal-2019-unbabels", "title": "Unbabel's Submission to the WMT2019 APE Shared Task: BERT-Based Encoder-Decoder for Automatic Post-Editing", "abstract": "This paper describes Unbabel's submission to the WMT2019 APE Shared Task for the English-German language pair. Following the recent rise of large, powerful, pre-trained models, we adapt the BERT pretrained model to perform Automatic Post-Editing in an encoder-decoder framework. Analogously to dual-encoder architectures we develop a BERT-based encoder-decoder (BED) model in which a single pretrained BERT encoder receives both the source src and machine translation mt strings. Furthermore, we explore a conservativeness factor to constrain the APE system to perform fewer edits. As the official results show, when trained on a weighted combination of in-domain and artificial training data, our BED system with the conservativeness penalty improves significantly the translations of a strong NMT system by -0.78 and +1.23 in terms of TER and BLEU, respectively. Finally, our submission achieves a new state-of-the-art, ex-aequo, in English-German APE of NMT.", "phrases": ["submission", "encoder-decoder", "automatic post-editing"], "overall_score": 1.844453282498434, "scores": [0.9048409082059323, 0.8316166168789917, 0.781883069031135], "rank_score": 0.8394468647053531} -{"id": "grossman-etal-2020-segbo", "title": "SegBo: A Database of Borrowed Sounds in the World's Languages", "abstract": "Phonological segment borrowing is a process through which languages acquire new contrastive speech sounds as the result of borrowing new words from other languages. Despite the fact that phonological segment borrowing is documented in many of the world's languages, to date there has been no large-scale quantitative study of the phenomenon. In this paper, we present SegBo, a novel cross-linguistic database of borrowed phonological segments. We describe our data aggregation pipeline and the resulting language sample. We also present two short case studies based on the database. The first deals with the impact of large colonial languages on the sound systems of the world's languages; the second deals with universals of borrowing in the domain of rhotic consonants.", "phrases": ["database", "world", "phonological segment", "cross-linguistic database", "segbo"], "overall_score": 0.921991343213402, "scores": [1.6629293553509694, 0.8208456648250698, 0.5898575577274432, 0.5825262066655302, 0.5400046514470173], "rank_score": 0.839232687203206} -{"id": "gupta-etal-2020-infotabs", "title": "INFOTABS: Inference on Tables as Semi-structured Data", "abstract": "In this paper, we observe that semi-structured tabulated text is ubiquitous; understanding them requires not only comprehending the meaning of text fragments, but also implicit relationships between them. We argue that such data can prove as a testing ground for understanding how we reason about information. To study this, we introduce a new dataset called INFOTABS, comprising of human-written textual hypotheses based on premises that are tables extracted from Wikipedia info-boxes. Our analysis shows that the semi-structured, multi-domain and heterogeneous nature of the premises admits complex, multi-faceted reasoning. Experiments reveal that, while human annotators agree on the relationships between a table-hypothesis pair, several standard modeling strategies are unsuccessful at the task, suggesting that reasoning about tables can pose a difficult modeling challenge.", "phrases": ["table", "infotabs", "natural language inference", "claim"], "overall_score": 1.9320291957494495, "scores": [0.9014751987184753, 1.3581825345392562, 0.5685090068436994, 0.5281117342584225], "rank_score": 0.8390696185899634} -{"id": "faruqui-etal-2018-wikiatomicedits", "title": "WikiAtomicEdits: A Multilingual Corpus of Wikipedia Edits for Modeling Language and Discourse", "abstract": "We release a corpus of 43 million atomic edits across 8 languages. These edits are mined from Wikipedia edit history and consist of instances in which a human editor has inserted a single contiguous phrase into, or deleted a single contiguous phrase from, an existing sentence. We use the collected data to show that the language generated during editing differs from the language that we observe in standard corpora, and that models trained on edits encode different aspects of semantics and discourse than models trained on raw text. We release the full corpus as a resource to aid ongoing research in semantics, discourse, and representation learning.", "phrases": ["discourse", "atomic edit", "wikiatomicedit", "sentence-to-sentence generation task"], "overall_score": 2.0113641143693655, "scores": [1.3934006735036475, 0.8474080597632923, 0.5752219581068245, 0.5391852551877067], "rank_score": 0.8388039866403678} -{"id": "han-etal-2019-micron", "title": "MICRON: Multigranular Interaction for Contextualizing RepresentatiON in Non-factoid Question Answering", "abstract": "This paper studies the problem of non-factoid question answering, where the answer may span over multiple sentences. Existing solutions can be categorized into representation- and interaction-focused approaches. We combine their complementary strength, by a hybrid approach allowing multi-granular interactions, but represented at word level, enabling an easy integration with strong word-level signals. Specifically, we propose MICRON: Multigranular Interaction for Contextualizing RepresentatiON, a novel approach which derives contextualized uni-gram representation from n-grams. Our contributions are as follows: First, we enable multi-granular matches between question and answer n-grams. Second, by contextualizing word representation with surrounding n-grams, MICRON can naturally utilize word-based signals for query term weighting, known to be effective in information retrieval. We validate MICRON in two public non-factoid question answering datasets: WikiPassageQA and InsuranceQA, showing our model achieves the state of the art among baselines with reported performances on both datasets.", "phrases": ["multigranular interaction", "contextualizing representation", "non-factoid question answering"], "overall_score": 0.9214903419031866, "scores": [0.9267692708732269, 0.8006950363201245, 0.7888656612808174], "rank_score": 0.8387766561580564} -{"id": "hessel-schofield-2021-effective", "title": "How effective is BERT without word ordering? Implications for language understanding and data privacy", "abstract": "Ordered word sequences contain the rich structures that define language. However, it's often not clear if or how modern pretrained language models utilize these structures. We show that the token representations and self-attention activations within BERT are surprisingly resilient to shuffling the order of input tokens, and that for several GLUE language understanding tasks, shuffling only minimally degrades performance, e.g., by 4% for QNLI. While bleak from the perspective of language understanding, our results have positive implications for cases where copyright or ethics necessitates the consideration of bag-of-words data (vs. full documents). We simulate such a scenario for three sensitive classification tasks, demonstrating minimal performance degradation vs. releasing full language sequences.", "phrases": ["bert", "implication", "language understanding"], "overall_score": 0.9212459905575228, "scores": [0.9037832288867935, 0.8292713414261593, 0.7826081436215097], "rank_score": 0.8385542379781542} -{"id": "zhou-zhao-2019-head", "title": "Head-Driven Phrase Structure Grammar Parsing on Penn Treebank", "abstract": "Head-driven phrase structure grammar (HPSG) enjoys a uniform formalism representing rich contextual syntactic and even semantic meanings. This paper makes the first attempt to formulate a simplified HPSG by integrating constituent and dependency formal representations into head-driven phrase structure. Then two parsing algorithms are respectively proposed for two converted tree representations, division span and joint span. As HPSG encodes both constituent and dependency structure information, the proposed HPSG parsers may be regarded as a sort of joint decoder for both types of structures and thus are evaluated in terms of extracted or converted constituent and dependency parsing trees. Our parser achieves new state-of-the-art performance for both parsing tasks on Penn Treebank (PTB) and Chinese Penn Treebank, verifying the effectiveness of joint learning constituent and dependency structures. In details, we report 95.84 F1 of constituent parsing and 97.00% UAS of dependency parsing on PTB.", "phrases": ["penn treebank", "hpsg", "constituent"], "overall_score": 2.21276410403968, "scores": [0.9232432393374537, 1.0688512519013937, 0.5233083526572414], "rank_score": 0.8384676146320297} -{"id": "allen-etal-2014-detecting", "title": "Detecting Disagreement in Conversations using Pseudo-Monologic Rhetorical Structure", "abstract": "Casual online forums such as Reddit, Slashdot and Digg, are continuing to increase in popularity as a means of communication. Detecting disagreement in this domain is a considerable challenge. Many topics are unique to the conversation on the forum, and the appearance of disagreement may be much more subtle than on political blogs or social media sites such as twitter. In this analysis we present a crowd-sourced annotated corpus for topic level disagreement detection in Slashdot, showing that disagreement detection in this domain is difficult even for humans. We then proceed to show that a new set of features determined from the rhetorical structure of the conversation significantly improves the performance on disagreement detection over a baseline consisting of unigram/bigram features, discourse markers, structural features and meta-post features.", "phrases": ["disagreement", "conversation", "rhetorical structure"], "overall_score": 0.920616491381706, "scores": [0.8492049433314899, 0.8327232542260276, 0.8320155318480722], "rank_score": 0.8379812431351965} -{"id": "banerjee-2019-asu", "title": "ASU at TextGraphs 2019 Shared Task: Explanation ReGeneration using Language Models and Iterative Re-Ranking", "abstract": "In this work we describe the system from Natural Language Processing group at Arizona State University for the TextGraphs 2019 Shared Task. The task focuses on Explanation Regeneration, an intermediate step towards general multi-hop inference on large graphs. Our approach consists of modeling the explanation regeneration task as a learning to rank problem, for which we use state-of-the-art language models and explore dataset preparation techniques. We utilize an iterative reranking based approach to further improve the rankings. Our system secured 2nd rank in the task with a mean average precision (MAP) of 41.3% on the test set.", "phrases": ["textgraphs", "shared task", "explanation regeneration"], "overall_score": 1.3484894753547505, "scores": [0.9120589327948311, 0.8081243860674147, 0.7934075409093616], "rank_score": 0.8378636199238692} -{"id": "zhang-etal-2019-hibert", "title": "HIBERT: Document Level Pre-training of Hierarchical Bidirectional Transformers for Document Summarization", "abstract": "Neural extractive summarization models usually employ a hierarchical encoder for document encoding and they are trained using sentence-level labels, which are created heuristically using rule-based methods. Training the hierarchical encoder with these inaccurate labels is challenging. Inspired by the recent work on pre-training transformer sentence encoders (Devlin et al., 2018), we propose Hibert (as shorthand for HIerachical Bidirectional Encoder Representations from Transformers) for document encoding and a method to pre-train it using unlabeled data. We apply the pre-trained Hibert to our summarization model and it outperforms its randomly initialized counterpart by 1.25 ROUGE on the CNN/Dailymail dataset and by 2.0 ROUGE on a version of New York Times dataset. We also achieve the state-of-the-art performance on these two datasets.", "phrases": ["document summarization", "document encoding", "hierarchical transformer encoder", "important sentence"], "overall_score": 2.929287685752398, "scores": [1.4357538027143961, 0.8304023567779614, 0.5567309860901949, 0.5282140709969044], "rank_score": 0.8377753041448642} -{"id": "strubell-etal-2019-energy", "title": "Energy and Policy Considerations for Deep Learning in NLP", "abstract": "Recent progress in hardware and methodology for training neural networks has ushered in a new generation of large networks trained on abundant data. These models have obtained notable gains in accuracy across many NLP tasks. However, these accuracy improvements depend on the availability of exceptionally large computational resources that necessitate similarly substantial energy consumption. As a result these models are costly to train and develop, both financially, due to the cost of hardware and electricity or cloud compute time, and environmentally, due to the carbon footprint required to fuel modern tensor processing hardware. In this paper we bring this issue to the attention of NLP researchers by quantifying the approximate financial and environmental costs of training a variety of recently successful neural network models for NLP. Based on these findings, we propose actionable recommendations to reduce costs and improve equity in NLP research and practice.", "phrases": ["deep learning", "computational resource", "pretraining", "nlp model"], "overall_score": 3.691717968273339, "scores": [0.9325718507062148, 1.3573429024823782, 0.5396220351631733, 0.5214531532608777], "rank_score": 0.837747485403161} -{"id": "christodoulopoulos-etal-2010-two", "title": "Two Decades of Unsupervised POS Induction: How Far Have We Come?", "abstract": "Part-of-speech (POS) induction is one of the most popular tasks in research on unsupervised NLP. Many different methods have been proposed, yet comparisons are difficult to make since there is little consensus on evaluation framework, and many papers evaluate against only one or two competitor systems. Here we evaluate seven different POS induction systems spanning nearly 20 years of work, using a variety of measures. We show that some of the oldest (and simplest) systems stand up surprisingly well against more recent approaches. Since most of these systems were developed and tested using data from the WSJ corpus, we compare their generalization abilities by testing on both WSJ and the multilingual Multext-East corpus. Finally, we introduce the idea of evaluating systems based on their ability to produce cluster prototypes that are useful as input to a prototype-driven learner. In most cases, the prototype-driven learner outperforms the unsupervised system used to initialize it, yielding state-of-the-art results on WSJ and improvements on non-English corpora.", "phrases": ["pos", "induction", "tagger", "bible corpus"], "overall_score": 2.696221741488209, "scores": [1.3948295117512233, 0.8268198804183112, 0.5716488896809577, 0.5572152367610655], "rank_score": 0.8376283796528895} -{"id": "zuo-etal-2020-knowdis", "title": "KnowDis: Knowledge Enhanced Data Augmentation for Event Causality Detection via Distant Supervision", "abstract": "Modern models of event causality detection (ECD) are mainly based on supervised learning from small hand-labeled corpora. However, hand-labeled training data is expensive to produce, low coverage of causal expressions, and limited in size, which makes supervised methods hard to detect causal relations between events. To solve this data lacking problem, we investigate a data augmentation framework for ECD, dubbed as Knowledge Enhanced Distant Data Augmentation (KnowDis). Experimental results on two benchmark datasets EventStoryLine corpus and Causal-TimeBank show that 1) KnowDis can augment available training data assisted with the lexical and causal commonsense knowledge for ECD via distant supervision, and 2) our method outperforms previous methods by a large margin assisted with automatically labeled training data.", "phrases": ["event causality detection", "distant supervision", "data lacking problem", "knowdis"], "overall_score": 1.499841338963537, "scores": [0.8582059077464639, 0.8424766849381237, 0.8023104033519797, 0.8453165616290379], "rank_score": 0.8370773894164013} -{"id": "vincze-etal-2014-automatic", "title": "Automatic Error Detection concerning the Definite and Indefinite Conjugation in the HunLearner Corpus", "abstract": "In this paper we present the results of automatic error detection, concerning the definite and indefinite conjugation in the extended version of the HunLearner corpus, the learners\u0092 corpus of the Hungarian language. We present the most typical structures that trigger definite or indefinite conjugation in Hungarian and we also discuss the most frequent types of errors made by language learners in the corpus texts. We also illustrate the error types with sentences taken from the corpus. Our results highlight grammatical structures that might pose problems for learners of Hungarian, which can be fruitfully applied in the teaching and practicing of such constructions from the language teacher\u0092s or learners\u0092 point of view. On the other hand, these results may be exploited in extending the functionalities of a grammar checker, concerning the definiteness of the verb. Our automatic system was able to achieve perfect recall, i.e. it could find all the mismatches between the type of the object and the conjugation of the verb, which is promising for future studies in this area.", "phrases": ["indefinite conjugation", "hunlearner corpus", "automatic error detection"], "overall_score": 1.1602533454162878, "scores": [0.8563331702211193, 0.8414851299427395, 0.8130193212465028], "rank_score": 0.836945873803454} -{"id": "berant-etal-2011-global", "title": "Global Learning of Typed Entailment Rules", "abstract": "Extensive knowledge bases of entailment rules between predicates are crucial for applied semantic inference. In this paper we propose an algorithm that utilizes transitivity constraints to learn a globally-optimal set of entailment rules for typed predicates. We model the task as a graph learning problem and suggest methods that scale the algorithm to larger graphs. We apply the algorithm over a large data set of extracted predicate instances, from which a resource of typed entailment rules has been recently released (Schoenmackers et al., 2010). Our results show that using global transitivity information substantially improves performance over this resource and several baselines, and that our scaling methods allow us to increase the scope of global learning of entailment-rule graphs.", "phrases": ["transitivity", "global learning", "entailment graph", "inference rule"], "overall_score": 2.319888243747295, "scores": [0.8177219567286738, 1.4316838895692674, 0.5642030553318694, 0.533282363041019], "rank_score": 0.8367228161677074} -{"id": "joulin-etal-2017-bag", "title": "Bag of Tricks for Efficient Text Classification", "abstract": "This paper explores a simple and efficient baseline for text classification. Our experiments show that our fast text classifier fastText is often on par with deep learning classifiers in terms of accuracy, and many orders of magnitude faster for training and evaluation. We can train fastText on more than one billion words in less than ten minutes using a standard multicore CPU, and classify half a million sentences among 312K classes in less than a minute.", "phrases": ["bag", "learning model", "fasttext n-gram model", "tweet text", "document representation"], "overall_score": 3.6341606503370794, "scores": [1.4481626838658614, 0.8963109769003375, 0.644654398529646, 0.6018845000995807, 0.5921405717895237], "rank_score": 0.8366306262369898} -{"id": "clark-etal-2019-dont", "title": "Don't Take the Easy Way Out: Ensemble Based Methods for Avoiding Known Dataset Biases", "abstract": "State-of-the-art models often make use of superficial patterns in the data that do not generalize well to out-of-domain or adversarial settings. For example, textual entailment models often learn that particular key words imply entailment, irrespective of context, and visual question answering models learn to predict prototypical answers, without considering evidence in the image. In this paper, we show that if we have prior knowledge of such biases, we can train a model to be more robust to domain shift. Our method has two stages: we (1) train a naive model that makes predictions exclusively based on dataset biases, and (2) train a robust model as part of an ensemble with the naive one in order to encourage it to focus on other patterns in the data that are more likely to generalize. Experiments on five datasets with out-of-domain test sets show significantly improved robustness in all settings, including a 12 point gain on a changing priors visual question answering dataset and a 9 point gain on an adversarial question answering test set.", "phrases": ["ensemble", "adversarial setting", "prior knowledge", "robustness", "vqa model"], "overall_score": 2.974115659259982, "scores": [1.7326975345527427, 0.8352652758935858, 0.5472432094872788, 0.5362831802901746, 0.5311050322155298], "rank_score": 0.8365188464878625} -{"id": "brantley-etal-2020-active", "title": "Active Imitation Learning with Noisy Guidance", "abstract": "Imitation learning algorithms provide state-of-the-art results on many structured prediction tasks by learning near-optimal search policies. Such algorithms assume training-time access to an expert that can provide the optimal action at any queried state; unfortunately, the number of such queries is often prohibitive, frequently rendering these approaches impractical. To combat this query complexity, we consider an active learning setting in which the learning algorithm has additional access to a much cheaper noisy heuristic that provides noisy guidance. Our algorithm, LEAQI, learns a difference classifier that predicts when the expert is likely to disagree with the heuristic, and queries the expert only when necessary. We apply LEAQI to three sequence labelling tasks, demonstrating significantly fewer queries to the expert and comparable (or better) accuracies over a passive approach.", "phrases": ["imitation", "noisy guidance", "prediction task"], "overall_score": 1.3460477140753564, "scores": [0.9278747307487059, 1.0162892620203514, 0.5648754122483228], "rank_score": 0.8363464683391267} -{"id": "muller-etal-2012-constrained", "title": "Constrained Decoding for Text-Level Discourse Parsing", "abstract": "This paper presents a novel approach to document-based discourse analysis by performing a global A* search over the space of possible structures while optimizing a global criterion over the set of potential coherence relations. Existing approaches to discourse analysis have so far relied on greedy search strategies or restricted themselves to sentence-level discourse parsing. Another advantage of our approach, over other global alternatives (like Maximum Spanning Tree decoding algorithms), is its flexibility in being able to integrate constraints (including linguistically motivated ones like the Right Frontier Constraint). Finally, our paper provides the first discourse parsing system for French; our evaluation is carried out on the Annodis corpus. While using a lot less training data than earlier approaches than previous work on English, our system manages to achieve state-of-the-art results, with F1-scores of 66.2 and 46.8 when compared to unlabeled and labeled reference structures.", "phrases": ["discourse", "difficulty", "attachment"], "overall_score": 1.925709441620792, "scores": [1.4066249655544638, 0.5705715651784729, 0.5317784220017701], "rank_score": 0.8363249842449022} -{"id": "dogruoz-etal-2021-survey", "title": "A Survey of Code-switching: Linguistic and Social Perspectives for Language Technologies", "abstract": "The analysis of data in which multiple languages are represented has gained popularity among computational linguists in recent years. So far, much of this research focuses mainly on the improvement of computational methods and largely ignores linguistic and social aspects of C-S discussed across a wide range of languages within the long-established literature in linguistics. To fill this gap, we offer a survey of code-switching (C-S) covering the literature in linguistics with a reflection on the key issues in language technologies. From the linguistic perspective, we provide an overview of structural and functional patterns of C-S focusing on the literature from European and Indian contexts as highly multilingual areas. From the language technologies perspective, we discuss how massive language models fail to represent diverse C-S types due to lack of appropriate training data, lack of robust evaluation benchmarks for C-S (across multilingual situations and types of C-S) and lack of end-to- end systems that cover sociolinguistic aspects of C-S as well. Our survey will be a step to- wards an outcome of mutual benefit for computational scientists and linguists with a shared interest in multilingualism and C-S.", "phrases": ["survey", "code-switching", "linguistic"], "overall_score": 1.1583095360723699, "scores": [0.8716629822828813, 0.8331447270712531, 0.8018234259048717], "rank_score": 0.8355437117530019} -{"id": "cai-etal-2017-pay", "title": "Pay Attention to the Ending:Strong Neural Baselines for the ROC Story Cloze Task", "abstract": "We consider the ROC story cloze task (Mostafazadeh et al., 2016) and present several findings. We develop a model that uses hierarchical recurrent networks with attention to encode the sentences in the story and score candidate endings. By discarding the large training set and only training on the validation set, we achieve an accuracy of 74.7%. Even when we discard the story plots (sentences before the ending) and only train to choose the better of two endings, we can still reach 72.5%. We then analyze this \u201cending-only\u201d task setting. We estimate human accuracy to be 78% and find several types of clues that lead to this high accuracy, including those related to sentiment, negation, and general ending likelihood regardless of the story context.", "phrases": ["story", "hierarchical recurrent network", "negation"], "overall_score": 2.3165387441336236, "scores": [1.1433685102849196, 0.8262729714535011, 0.5369027369031238], "rank_score": 0.8355147395471816} -{"id": "yin-etal-2021-docnli", "title": "DocNLI: A Large-scale Dataset for Document-level Natural Language Inference", "abstract": "Natural language inference (NLI) is formulated as a unified framework for solving various NLP problems such as relation extraction, question answering, summarization, etc. It has been studied intensively in the past few years thanks to the availability of large-scale labeled datasets. However, most existing studies focus on merely sentence-level inference, which limits the scope of NLI's application in downstream NLP problems. This work presents DocNLI -- a newly-constructed large-scale dataset for document-level NLI. DocNLI is transformed from a broad range of NLP problems and covers multiple genres of text. The premises always stay in the document granularity, whereas the hypotheses vary in length from single sentences to passages with hundreds of words. Additionally, DocNLI has pretty limited artifacts which unfortunately widely exist in some popular sentence-level NLI datasets. Our experiments demonstrate that, even without fine-tuning, a model pretrained on DocNLI shows promising performance on popular sentence-level benchmarks, and generalizes well to out-of-domain NLP tasks that rely on inference at document granularity. Task-specific fine-tuning can bring further improvements. Data, code, and pretrained models can be found at https://github.com/salesforce/DocNLI.", "phrases": ["large-scale dataset", "natural language inference", "summarization", "docnli"], "overall_score": 1.344542241203201, "scores": [0.9360646031774555, 0.9244379391625172, 0.902612264409432, 0.5785294550530942], "rank_score": 0.8354110654506248} -{"id": "stab-etal-2018-argumentext", "title": "ArgumenText: Searching for Arguments in Heterogeneous Sources", "abstract": "Argument mining is a core technology for enabling argument search in large corpora. However, most current approaches fall short when applied to heterogeneous texts. In this paper, we present an argument retrieval system capable of retrieving sentential arguments for any given controversial topic. By analyzing the highest-ranked results extracted from Web sources, we found that our system covers 89% of arguments found in expert-curated lists of arguments from an online debate portal, and also identifies additional valid arguments.", "phrases": ["retrieval", "controversial topic", "argumentext", "common crawl", "con"], "overall_score": 2.261902960323984, "scores": [1.9610891655976619, 0.5741446336043396, 0.5562432236620488, 0.5484509647864482, 0.5363294528566259], "rank_score": 0.8352514881014249} -{"id": "bonin-etal-2010-contrastive", "title": "A Contrastive Approach to Multi-word Extraction from Domain-specific Corpora", "abstract": "In this paper, we present a novel approach to multi-word terminology extraction combining a well-known automatic term recognition approach, the C\u2013NC value method, with a contrastive ranking technique, aimed at refining obtained results either by filtering noise due to common words or by discerning between semantically different types of terms within heterogeneous terminologies. Differently from other contrastive methods proposed in the literature that focus on single terms to overcome the multi-word terms' sparsity problem, the proposed contrastive function is able to handle variation in low frequency events by directly operating on pre-selected multi-word terms. This methodology has been tested in two case studies carried out in the History of Art and Legal domains. Evaluation of achieved results showed that the proposed two\u2013stage approach improves significantly multi\u2013word term extraction results. In particular, for what concerns the legal domain it provides an answer to a well-known problem in the semi\u2013automatic construction of legal ontologies, namely that of singling out law terms from terms of the specific domain being regulated.", "phrases": ["contrastive approach", "different type", "multi-word term"], "overall_score": 1.3440239647374204, "scores": [1.3445916022077562, 0.6239555227418362, 0.5367200015804329], "rank_score": 0.8350890421766751} -{"id": "bicici-van-genabith-2013-cngl", "title": "CNGL-CORE: Referential Translation Machines for Measuring Semantic Similarity", "abstract": "We invent referential translation machines (RTMs), a computational model for identifying the translation acts between any two data sets with respect to a reference corpus selected in the same domain, which can be used for judging the semantic similarity between text. RTMs make quality and semantic similarity judgments possible by using retrieved relevant training data as interpretants for reaching shared semantics. An MTPP (machine translation performance predictor) model derives features measuring the closeness of the test sentences to the training data, the difficulty of translating them, and the presence of acts of translation involved. We view semantic similarity as paraphrasing between any two given texts. Each view is modeled by an RTM model, giving us a new perspective on the binary relationship between the two. Our prediction model is the 15th on some tasks and 30th overall out of 89 submissions in total according to the official results of the Semantic Textual Similarity (STS 2013) challenge.", "phrases": ["referential translation machine", "semantic similarity", "sts task"], "overall_score": 1.156974805913806, "scores": [0.9344589657789926, 1.0127331095835765, 0.5565506470254094], "rank_score": 0.8345809074626596} -{"id": "liu-etal-2011-recognizing", "title": "Recognizing Named Entities in Tweets", "abstract": "The challenges of Named Entities Recognition (NER) for tweets lie in the insufficient information in a tweet and the unavailability of training data. We propose to combine a K-Nearest Neighbors (KNN) classifier with a linear Conditional Random Fields (CRF) model under a semi-supervised learning framework to tackle these challenges. The KNN based classifier conducts pre-labeling to collect global coarse evidence across tweets while the CRF model conducts sequential labeling to capture fine-grained information encoded in a tweet. The semi-supervised learning plus the gazetteers alleviate the lack of training data. Extensive experiments show the advantages of our method over the baselines as well as the effectiveness of KNN and semi-supervised learning.", "phrases": ["semi-supervised learning framework", "twitter", "nlp tool", "social medium data", "presence"], "overall_score": 2.865831841382007, "scores": [1.1565854425958924, 1.0865172914766552, 0.8636297611683807, 0.536126597230487, 0.5298877331995299], "rank_score": 0.834549365134189} -{"id": "donoso-sanchez-2017-dialectometric", "title": "Dialectometric analysis of language variation in Twitter", "abstract": "In the last few years, microblogging platforms such as Twitter have given rise to a deluge of textual data that can be used for the analysis of informal communication between millions of individuals. In this work, we propose an information-theoretic approach to geographic language variation using a corpus based on Twitter. We test our models with tens of concepts and their associated keywords detected in Spanish tweets geolocated in Spain. We employ dialectometric measures (cosine similarity and Jensen-Shannon divergence) to quantify the linguistic distance on the lexical level between cells created in a uniform grid over the map. This can be done for a single concept or in the general case taking into account an average of the considered variants. The latter permits an analysis of the dialects that naturally emerge from the data. Interestingly, our results reveal the existence of two dialect macrovarieties. The first group includes a region-specific speech spoken in small towns and rural areas whereas the second cluster encompasses cities that tend to use a more uniform variety. Since the results obtained with the two different metrics qualitatively agree, our work suggests that social media corpora can be efficiently used for dialectometric analyses.", "phrases": ["language variation", "twitter", "dialectometric analysis"], "overall_score": 1.1556658327972922, "scores": [0.8903886223150852, 0.8163478474216329, 0.794173579115483], "rank_score": 0.8336366829507337} -{"id": "yasui-etal-2019-using", "title": "Using Semantic Similarity as Reward for Reinforcement Learning in Sentence Generation", "abstract": "Traditional model training for sentence generation employs cross-entropy loss as the loss function. While cross-entropy loss has convenient properties for supervised learning, it is unable to evaluate sentences as a whole, and lacks flexibility. We present the approach of training the generation model using the estimated semantic similarity between the output and reference sentences to alleviate the problems faced by the training with cross-entropy loss. We use the BERT-based scorer fine-tuned to the Semantic Textual Similarity (STS) task for semantic similarity estimation, and train the model with the estimated scores through reinforcement learning (RL). Our experiments show that reinforcement learning with semantic similarity reward improves the BLEU scores from the baseline LSTM NMT model.", "phrases": ["semantic similarity", "reinforcement learning", "sentence generation"], "overall_score": 0.9157539064113402, "scores": [0.9164883948757737, 0.7995087647125219, 0.7846682230687947], "rank_score": 0.8335551275523635} -{"id": "rabinovich-etal-2017-personalized", "title": "Personalized Machine Translation: Preserving Original Author Traits", "abstract": "The language that we produce reflects our personality, and various personal and demographic characteristics can be detected in natural language texts. We focus on one particular personal trait of the author, gender, and study how it is manifested in original texts and in translations. We show that author's gender has a powerful, clear signal in originals texts, but this signal is obfuscated in human and machine translation. We then propose simple domain-adaptation techniques that help retain the original gender traits in the translation, without harming the quality of the translation, thereby creating more personalized machine translation systems.", "phrases": ["machine translation", "author trait", "gender", "speaker-specific data"], "overall_score": 2.361619874643332, "scores": [1.6019029544616115, 0.6020547583552465, 0.58657910507958, 0.5436559700879363], "rank_score": 0.8335481969960936} -{"id": "ni-wang-2017-learning", "title": "Learning to Explain Non-Standard English Words and Phrases", "abstract": "We describe a data-driven approach for automatically explaining new, non-standard English expressions in a given sentence, building on a large dataset that includes 15 years of crowdsourced examples from UrbanDictionary.com. Unlike prior studies that focus on matching keywords from a slang dictionary, we investigate the possibility of learning a neural sequence-to-sequence model that generates explanations of unseen non-standard English expressions given context. We propose a dual encoder approach\u2014a word-level encoder learns the representation of context, and a second character-level encoder to learn the hidden representation of the target non-standard expression. Our model can produce reasonable definitions of new non-standard English expressions given their context with certain confidence.", "phrases": ["english expression", "sequence-to-sequence model", "definition", "translation task"], "overall_score": 1.9986952335107493, "scores": [1.3752041979697252, 0.8613687850265028, 0.5553342569348219, 0.5421753719058705], "rank_score": 0.8335206529592302} -{"id": "hashimoto-etal-2013-simple", "title": "Simple Customization of Recursive Neural Networks for Semantic Relation Classification", "abstract": "In this paper, we present a recursive neural network (RNN) model that works on a syntactic tree. Our model differs from previous RNN models in that the model allows for an explicit weighting of important phrases for the target task. We also propose to average parameters in training. Our experimental results on semantic relation classification show that both phrase categories and task-specific weighting significantly improve the prediction accuracy of the model. We also show that averaging the model parameters is effective in stabilizing the learning and improves generalization capacity. The proposed model marks scores competitive with state-of-the-art RNN-based models.", "phrases": ["recursive neural networks", "relation classification", "important phrase"], "overall_score": 1.9191939090245749, "scores": [0.8384854901651408, 1.0670690970563568, 0.5949313859536153], "rank_score": 0.8334953243917043} -{"id": "gella-etal-2017-image", "title": "Image Pivoting for Learning Multilingual Multimodal Representations", "abstract": "In this paper we propose a model to learn multimodal multilingual representations for matching images and sentences in different languages, with the aim of advancing multilingual versions of image search and image understanding. Our model learns a common representation for images and their descriptions in two different languages (which need not be parallel) by considering the image as a pivot between two languages. We introduce a new pairwise ranking loss function which can handle both symmetric and asymmetric similarity between the two modalities. We evaluate our models on image-description ranking for German and English, and on semantic textual similarity of image descriptions in English. In both cases we achieve state-of-the-art performance.", "phrases": ["pivot", "different language", "image"], "overall_score": 2.3101538944085673, "scores": [1.4198423248861767, 0.5552393981735968, 0.5245539523054012], "rank_score": 0.8332118917883916} -{"id": "smith-eisner-2006-minimum", "title": "Minimum Risk Annealing for Training Log-Linear Models", "abstract": "When training the parameters for a natural language system, one would prefer to minimize 1-best loss (error) on an evaluation set. Since the error surface for many natural language problems is piecewise constant and riddled with local minima, many systems instead optimize log-likelihood, which is conveniently differentiable and convex. We propose training instead to minimize the expected loss, or risk. We define this expectation using a probability distribution over hypotheses that we gradually sharpen (anneal) to focus on the 1-best hypothesis. Besides the linear loss functions used in previous work, we also describe techniques for optimizing nonlinear functions such as precision or the BLEU metric. We present experiments training log-linear combinations of models for dependency parsing and for machine translation. In machine translation, annealed minimum risk training achieves significant improvements in BLEU over standard minimum error training. We also show improvements in labeled dependency parsing.", "phrases": ["log-linear model", "loss", "machine translation", "minimum risk training"], "overall_score": 2.495406490872845, "scores": [1.1547580893689824, 0.9340029055139119, 0.6512110972514151, 0.5919765107525791], "rank_score": 0.832987150721722} -{"id": "eck-etal-2007-translation", "title": "Translation Model Pruning via Usage Statistics for Statistical Machine Translation", "abstract": "We describe a new pruning approach to remove phrase pairs from translation models of statistical machine translation systems. The approach applies the original translation system to a large amount of text and calculates usage statistics for the phrase pairs. Using these statistics the relevance of each phrase pair can be estimated. The approach is tested against a strong baseline based on previous work and shows significant improvements.", "phrases": ["usage statistic", "pruning approach", "phrase pair", "large amount"], "overall_score": 1.7320360596552304, "scores": [1.1430485685397422, 1.0958974843625722, 0.5628216539641334, 0.5299654050075017], "rank_score": 0.8329332779684874} -{"id": "gupta-etal-2014-text", "title": "Text Summarization through Entailment-based Minimum Vertex Cover", "abstract": "Sentence Connectivity is a textual characteristic that may be incorporated intelligently for the selection of sentences of a well meaning summary. However, the existing summarization methods do not utilize its potential fully. The present paper introduces a novel method for singledocument text summarization. It poses the text summarization task as an optimization problem, and attempts to solve it using Weighted Minimum Vertex Cover (WMVC), a graph-based algorithm. Textual entailment, an established indicator of semantic relationships between text units, is used to measure sentence connectivity and construct the graph on which WMVC operates. Experiments on a standard summarization dataset show that the suggested algorithm outperforms related methods.", "phrases": ["summarization", "textual entailment recognition", "non-redundant sentence"], "overall_score": 1.9174012158956544, "scores": [1.387508519774645, 0.5661098155879931, 0.5445319676115667], "rank_score": 0.8327167676580682} -{"id": "palaskar-etal-2019-multimodal", "title": "Multimodal Abstractive Summarization for How2 Videos", "abstract": "In this paper, we study abstractive summarization for open-domain videos. Unlike the traditional text news summarization, the goal is less to \u201ccompress\u201d text information but rather to provide a fluent textual summary of information that has been collected and fused from different source modalities, in our case video and audio transcripts (or text). We show how a multi-source sequence-to-sequence model with hierarchical attention can integrate information from different modalities into a coherent output, compare various models trained with different modalities and present pilot experiments on the How2 corpus of instructional videos. We also propose a new evaluation metric (Content F1) for abstractive summarization task that measures semantic adequacy rather than fluency of the summaries, which is covered by metrics like ROUGE and BLEU.", "phrases": ["video", "modality", "hierarchical attention", "multimodal abstractive summarization"], "overall_score": 1.8296162926072108, "scores": [0.9604825745215981, 1.3014524251888042, 0.5407857075798396, 0.5280563311230554], "rank_score": 0.8326942596033244} -{"id": "li-etal-2009-collaborative", "title": "Collaborative Decoding: Partial Hypothesis Re-ranking Using Translation Consensus between Decoders", "abstract": "This paper presents collaborative decoding (co-decoding), a new method to improve machine translation accuracy by leveraging translation consensus between multiple machine translation decoders. Different from system combination and MBR decoding, which post-process the n-best lists or word lattice of machine translation decoders, in our method multiple machine translation decoders collaborate by exchanging partial translation results. Using an iterative decoding approach, n-gram agreement statistics between translations of multiple decoders are employed to re-rank both full and partial hypothesis explored in decoding. Experimental results on data sets for NIST Chinese-to-English machine translation task show that the co-decoding method can bring significant improvements to all baseline decoders, and the outputs from co-decoding can be used to further improve the result of system combination.", "phrases": ["hypothesis", "translation consensus", "multiple decoder"], "overall_score": 1.996202200409277, "scores": [0.8555382529757573, 1.0971719630499726, 0.5447327161833406], "rank_score": 0.8324809774030234} -{"id": "schmitt-etal-2018-joint", "title": "Joint Aspect and Polarity Classification for Aspect-based Sentiment Analysis with End-to-End Neural Networks", "abstract": "In this work, we propose a new model for aspect-based sentiment analysis. In contrast to previous approaches, we jointly model the detection of aspects and the classification of their polarity in an end-to-end trainable neural network. We conduct experiments with different neural architectures and word representations on the recent GermEval 2017 dataset. We were able to show considerable performance gains by using the joint modeling approach in all settings compared to pipeline approaches. The combination of a convolutional neural network and fasttext embeddings outperformed the best submission of the shared task in 2017, establishing a new state of the art.", "phrases": ["sentiment analysis", "convolutional neural network", "aspect category"], "overall_score": 1.9167434658659914, "scores": [0.8836749893579008, 1.0831565810789667, 0.5304617609122744], "rank_score": 0.832431110449714} -{"id": "volkova-etal-2017-separating", "title": "Separating Facts from Fiction: Linguistic Models to Classify Suspicious and Trusted News Posts on Twitter", "abstract": "Pew research polls report 62 percent of U.S. adults get news on social media (Gottfried and Shearer, 2016). In a December poll, 64 percent of U.S. adults said that \u201cmade-up news\u201d has caused a \u201cgreat deal of confusion\u201d about the facts of current events (Barthel et al., 2016). Fabricated stories in social media, ranging from deliberate propaganda to hoaxes and satire, contributes to this confusion in addition to having serious effects on global stability. In this work we build predictive models to classify 130 thousand news posts as suspicious or verified, and predict four sub-types of suspicious news \u2013 satire, hoaxes, clickbait and propaganda. We show that neural network models trained on tweet content and social network interactions outperform lexical models. Unlike previous work on deception detection, we find that adding syntax and grammar features to our models does not improve performance. Incorporating linguistic features improves classification results, however, social interaction features are most informative for finer-grained separation between four types of suspicious news posts.", "phrases": ["twitter", "news post", "linguistic feature", "misinformation", "social medium"], "overall_score": 2.6094330406805537, "scores": [1.3765293963068717, 0.8403078132192765, 0.8328235028585277, 0.5577705075584807, 0.5536879864389439], "rank_score": 0.83222384127642} -{"id": "fried-klein-2018-policy", "title": "Policy Gradient as a Proxy for Dynamic Oracles in Constituency Parsing", "abstract": "Dynamic oracles provide strong supervision for training constituency parsers with exploration, but must be custom defined for a given parser's transition system. We explore using a policy gradient method as a parser-agnostic alternative. In addition to directly optimizing for a tree-level metric such as F1, policy gradient has the potential to reduce exposure bias by allowing exploration during training; moreover, it does not require a dynamic oracle for supervision. On four constituency parsers in three languages, the method substantially outperforms static oracle likelihood training in almost all settings. For parsers where a dynamic oracle is available (including a novel oracle which we define for the transition system of Dyer et al., 2016), policy gradient typically recaptures a substantial fraction of the performance gain afforded by the dynamic oracle.", "phrases": ["dynamic oracle", "constituency parser", "policy gradient"], "overall_score": 1.730438016241134, "scores": [0.8265366327488899, 1.0965719070185993, 0.5733858048293304], "rank_score": 0.8321647815322732} -{"id": "zhang-etal-2021-need", "title": "When Do You Need Billions of Words of Pretraining Data?", "abstract": "NLP is currently dominated by language models like RoBERTa which are pretrained on billions of words. But what exact knowledge or skills do Transformer LMs learn from large-scale pretraining that they cannot learn from less data? To explore this question, we adopt five styles of evaluation: classifier probing, information-theoretic probing, unsupervised relative acceptability judgments, unsupervised language model knowledge probing, and fine-tuning on NLU tasks. We then draw learning curves that track the growth of these different measures of model ability with respect to pretraining data volume using the MiniBERTas, a group of RoBERTa models pretrained on 1M, 10M, 100M and 1B words. We find that these LMs require only about 10M to 100M words to learn to reliably encode most syntactic and semantic features we test. They need a much larger quantity of data in order to acquire enough commonsense knowledge and other skills required to master typical downstream NLU tasks. The results suggest that, while the ability to encode linguistic features is almost certainly necessary for language understanding, it is likely that other, unidentified, forms of knowledge are the major drivers of recent improvements in language understanding among large pretrained models.", "phrases": ["billion", "roberta", "skill", "semantic feature"], "overall_score": 1.8280149131462926, "scores": [0.841239162331697, 1.0932310277208184, 0.8420234519738294, 0.5513681195828682], "rank_score": 0.8319654404023032} -{"id": "aghajanyan-etal-2021-muppet", "title": "Muppet: Massive Multi-task Representations with Pre-Finetuning", "abstract": "We propose pre-finetuning, an additional large-scale learning stage between language model pre-training and fine-tuning. Pre-finetuning is massively multi-task learning (around 50 datasets, over 4.8 million total labeled examples), and is designed to encourage learning of representations that generalize better to many different tasks. We show that pre-finetuning consistently improves performance for pretrained discriminators (e.g. RoBERTa) and generation models (e.g. BART) on a wide range of tasks (sentence prediction, commonsense reasoning, MRC, etc.), while also significantly improving sample efficiency during fine-tuning. We also show that large-scale multi-tasking is crucial; pre-finetuning can hurt performance when few tasks are used up until a critical point (usually above 15) after which performance improves linearly in the number of tasks.", "phrases": ["language model", "muppet", "downstream task"], "overall_score": 2.0670689796222956, "scores": [1.3796231448098575, 0.5830100441884862, 0.5329160660998075], "rank_score": 0.8318497516993837} -{"id": "patwardhan-riloff-2007-effective", "title": "Effective Information Extraction with Semantic Affinity Patterns and Relevant Regions", "abstract": "We present an information extraction system that decouples the tasks of finding relevant regions of text and applying extraction patterns. We create a self-trained relevant sentence classifier to identify relevant regions, and use a semantic affinity measure to automatically learn domain-relevant extraction patterns. We then distinguish primary patterns from secondary patterns and apply the patterns selectively in the relevant regions. The resulting IE system achieves good performance on the MUC-4 terrorism corpus and ProMed disease outbreak stories. This approach requires only a few seed extraction patterns and a collection of relevant and irrelevant documents for training.", "phrases": ["information extraction", "relevant region", "affinity measure", "common word pattern", "event-specific document"], "overall_score": 2.404244009068317, "scores": [0.9184107101508875, 0.9222685257743272, 0.8947691284226771, 0.8672217979577318, 0.5563865446736733], "rank_score": 0.8318113413958594} -{"id": "gardner-etal-2013-improving", "title": "Improving Learning and Inference in a Large Knowledge-Base using Latent Syntactic Cues", "abstract": "Automatically constructed Knowledge Bases (KBs) are often incomplete and there is a genuine need to improve their coverage. Path Ranking Algorithm (PRA) is a recently proposed method which aims to improve KB coverage by performing inference directly over the KB graph. For the first time, we demonstrate that addition of edges labeled with latent features mined from a large dependency parsed corpus of 500 million Web documents can significantly outperform previous PRAbased approaches on the KB inference task. We present extensive experimental results validating this finding. The resources presented in this paper are publicly available.", "phrases": ["path", "pra", "edge label", "pre-trained vector representation", "relation extraction"], "overall_score": 2.3052133009162996, "scores": [1.6054970934311301, 0.8519386652920312, 0.5826913005722617, 0.5741577717080285, 0.5428649157755772], "rank_score": 0.8314299493558058} -{"id": "dreyer-eisner-2009-graphical", "title": "Graphical Models over Multiple Strings", "abstract": "We study graphical modeling in the case of string-valued random variables. Whereas a weighted finite-state transducer can model the probabilistic relationship between two strings, we are interested in building up joint models of three or more strings. This is needed for inflectional paradigms in morphology, cognate modeling or language reconstruction, and multiple-string alignment. We propose a Markov Random Field in which each factor (potential function) is a weighted finite-state machine, typically a transducer that evaluates the relationship between just two of the strings. The full joint distribution is then a product of these factors. Though decoding is actually undecidable in general, we can still do efficient joint inference using approximate belief propagation; the necessary computations and messages are all finite-state. We demonstrate the methods by jointly predicting morphological forms.", "phrases": ["paradigm", "markov random field", "message", "graphical model"], "overall_score": 1.728367971837611, "scores": [1.299468104282832, 0.8896082644088383, 0.5837201628243472, 0.551880670885899], "rank_score": 0.8311693006004791} -{"id": "luong-manning-2016-achieving", "title": "Achieving Open Vocabulary Neural Machine Translation with Hybrid Word-Character Models", "abstract": "Nearly all previous work on neural machine translation (NMT) has used quite restricted vocabularies, perhaps with a subsequent method to patch in unknown words. This paper presents a novel word-character solution to achieving open vocabulary NMT. We build hybrid systems that translate mostly at the word level and consult the character components for rare words. Our character-level recurrent neural networks compute source word representations and recover unknown target words when needed. The twofold advantage of such a hybrid approach is that it is much faster and easier to train than character-based ones; at the same time, it never produces unknown words as in the case of word-based models. On the WMT'15 English to Czech translation task, this hybrid approach offers an addition boost of +2.1-11.4 BLEU points over models that already handle unknown words. Our best system achieves a new state-of-the-art result with 20.7 BLEU score. We demonstrate that our character models can successfully learn to not only generate well-formed words for Czech, a highly-inflected language with a very complex vocabulary, but also build correct representations for English source words.", "phrases": ["open vocabulary", "machine translation", "recurrent neural network", "character-based embedding"], "overall_score": 2.954990260169164, "scores": [1.6513836160749478, 0.5625891343177158, 0.5575553401244824, 0.5530299657551321], "rank_score": 0.8311395140680695} -{"id": "beck-etal-2021-investigating", "title": "Investigating label suggestions for opinion mining in German Covid-19 social media", "abstract": "This work investigates the use of interactively updated label suggestions to improve upon the efficiency of gathering annotations on the task of opinion mining in German Covid-19 social media data. We develop guidelines to conduct a controlled annotation study with social science students and find that suggestions from a model trained on a small, expert-annotated dataset already lead to a substantial improvement \u2013 in terms of inter-annotator agreement (+.14 Fleiss' \u03ba) and annotation quality \u2013 compared to students that do not receive any label suggestions. We further find that label suggestions from interactively trained models do not lead to an improvement over suggestions from a static model. Nonetheless, our analysis of suggestion bias shows that annotators remain capable of reflecting upon the suggested label in general. Finally, we confirm the quality of the annotated data in transfer learning experiments between different annotator groups. To facilitate further research in opinion mining on social media data, we release our collected data consisting of 200 expert and 2,785 student annotations.", "phrases": ["opinion mining", "german covid-19", "social medium"], "overall_score": 0.9130080799357025, "scores": [0.9138248302694231, 0.7949438934025221, 0.7843985820822373], "rank_score": 0.8310557685847275} -{"id": "costa-branco-2012-aspectual", "title": "Aspectual Type and Temporal Relation Classification", "abstract": "In this paper we investigate the relevance of aspectual type for the problem of temporal information processing, i.e. the problems of the recent TempEval challenges. \n \nFor a large list of verbs, we obtain several indicators about their lexical aspect by querying the web for expressions where these verbs occur in contexts associated with specific aspectual types. \n \nWe then proceed to extend existing solutions for the problem of temporal information processing with the information extracted this way. The improved performance of the resulting models shows that (i) aspectual type can be data-mined with unsupervised methods with a level of noise that does not prevent this information from being useful and that (ii) temporal information processing can profit from information about aspectual type.", "phrases": ["temporal relation classification", "information processing", "aspectual type"], "overall_score": 1.4884560951460115, "scores": [0.9767111692144886, 0.9715317816960996, 0.5439265406573018], "rank_score": 0.8307231638559633} -{"id": "borgeaud-emerson-2020-leveraging", "title": "Leveraging Sentence Similarity in Natural Language Generation: Improving Beam Search using Range Voting", "abstract": "We propose a method for natural language generation, choosing the most representative output rather than the most likely output. By viewing the language generation process from the voting theory perspective, we define representativeness using range voting and a similarity measure. The proposed method can be applied when generating from any probabilistic language model, including n-gram models and neural network models. We evaluate different similarity measures on an image captioning task and a machine translation task, and show that our method generates longer and more diverse sentences, providing a solution to the common problem of short outputs being preferred over longer and more informative ones. The generated sentences obtain higher BLEU scores, particularly when the beam size is large. We also perform a human evaluation on both tasks and find that the outputs generated using our method are rated higher.", "phrases": ["natural language generation", "beam search", "range voting"], "overall_score": 1.1514530956233495, "scores": [0.9093050747117133, 0.7990395113109563, 0.7834489202854082], "rank_score": 0.8305978354360258} -{"id": "chen-etal-2021-geoqa", "title": "GeoQA: A Geometric Question Answering Benchmark Towards Multimodal Numerical Reasoning", "abstract": "Automatic math problem solving has recently attracted increasing attention as a long-standing AI benchmark. In this paper, we focus on solving geometric problems, which requires a comprehensive understanding of textual descriptions, visual diagrams, and theorem knowledge. However, the existing methods were highly dependent on handcraft rules and were merely evaluated on small-scale datasets. Therefore, we propose a Geometric Question Answering dataset GeoQA, containing 4,998 geometric problems with corresponding annotated programs, which illustrate the solving process of the given problems. Compared with another publicly available dataset GeoS, GeoQA is 25 times larger, in which the program annotations can provide a practical testbed for future research on explicit and explainable numerical reasoning. Moreover, we introduce a Neural Geometric Solver (NGS) to address geometric problems by comprehensively parsing multimodal information and generating interpretable programs. We further add multiple self-supervised auxiliary tasks on NGS to enhance cross-modal semantic representation. Extensive experiments on GeoQA validate the effectiveness of our proposed NGS and auxiliary tasks. However, the results are still significantly lower than human performance, which leaves large room for future research. Our benchmark and code are released at https://github.com/chen-judge/GeoQA .", "phrases": ["numerical reasoning", "geometric problem", "small-scale dataset", "geoqa"], "overall_score": 0.9122825951629984, "scores": [1.708934666931071, 0.5602518739344295, 0.5265221889712388, 0.525872885708428], "rank_score": 0.8303954038862917} -{"id": "tsai-etal-2016-cross", "title": "Cross-Lingual Named Entity Recognition via Wikification", "abstract": "Named Entity Recognition (NER) models for language L are typically trained using annotated data in that language. We study cross-lingual NER , where a model for NER in L is trained on another, source, language (or multiple source languages). We introduce a language independent method for NER, building on cross-lingual wiki\ufb01cation, a technique that grounds words and phrases in non-English text into English Wikipedia entries. Thus, mentions in any language can be described using a set of categories and FreeBase types, yielding, as we show, strong language-independent features. With this insight, we propose an NER model that can be applied to all languages in Wikipedia. When trained on English, our model outperforms comparable approaches on the standard CoNLL datasets (Spanish, German, and Dutch) and also performs very well on low-resource languages (e.g., Turkish, Taga-log, Yoruba, Bengali, and Tamil) that have signi\ufb01cantly smaller Wikipedia. More-over, our method allows us to train on multiple source languages, typically improving NER results on the target languages. Finally, we show that our language-independent features can be used also to enhance monolingual NER systems, yielding improved results for all 9 languages.", "phrases": ["entity recognition", "cross-lingual ner", "wikipedia", "low-resource language", "name tagging"], "overall_score": 2.399591306232595, "scores": [0.8631026401189812, 1.1996631362224415, 0.9543464521991962, 0.5750892549966089, 0.5588065998990742], "rank_score": 0.8302016166872604} -{"id": "matusov-etal-2004-symmetric", "title": "Symmetric Word Alignments for Statistical Machine Translation", "abstract": "In this paper, we address the word alignment problem for statistical machine translation. We aim at creating a symmetric word alignment allowing for reliable one-to-many and many-to-one word relationships. We perform the iterative alignment training in the source-to-target and the target-to-source direction with the well-known IBM and HMM alignment models. Using these models, we robustly estimate the local costs of aligning a source word and a target word in each sentence pair. Then, we use efficient graph algorithms to determine the symmetric alignment with minimal total costs (i. e. maximal alignment probability). We evaluate the automatic alignments created in this way on the German--English Verbmobil task and the French--English Canadian Hansards task. We show statistically significant improvements of the alignment quality compared to the best results reported so far. On the Verbmobil task, we achieve an improvement of more than 1% absolute over the baseline error rate of 4.7%.", "phrases": ["statistical machine translation", "cost", "symmetric alignment"], "overall_score": 1.9110483689494544, "scores": [0.9886931817508978, 0.8727739847653572, 0.628406117338618], "rank_score": 0.8299577612849577} -{"id": "mohammad-etal-2008-computing", "title": "Computing Word-Pair Antonymy", "abstract": "Knowing the degree of antonymy between words has widespread applications in natural language processing. Manually-created lexicons have limited coverage and do not include most semantically contrasting word pairs. We present a new automatic and empirical measure of antonymy that combines corpus statistics with the structure of a published thesaurus. The approach is evaluated on a set of closest-opposite questions, obtaining a precision of over 80%. Along the way, we discuss what humans consider antonymous and how antonymy manifests itself in utterances.", "phrases": ["word-pair antonymy", "corpus statistic", "thesaurus"], "overall_score": 2.0620161074849657, "scores": [1.0208103917176365, 0.8978932934982388, 0.5707452938766224], "rank_score": 0.829816326364166} -{"id": "chaturvedi-etal-2017-story", "title": "Story Comprehension for Predicting What Happens Next", "abstract": "Automatic story comprehension is a fundamental challenge in Natural Language Understanding, and can enable computers to learn about social norms, human behavior and commonsense. In this paper, we present a story comprehension model that explores three distinct semantic aspects: (i) the sequence of events described in the story, (ii) its emotional trajectory, and (iii) its plot consistency. We judge the model's understanding of real-world stories by inquiring if, like humans, it can develop an expectation of what will happen next in a given story. Specifically, we use it to predict the correct ending of a given short story from possible alternatives. The model uses a hidden variable to weigh the semantic aspects in the context of the story. Our experiments demonstrate the potential of our approach to characterize these semantic aspects, and the strength of the hidden variable based approach. The model outperforms the state-of-the-art approaches and achieves best results on a publicly available dataset.", "phrases": ["semantic aspect", "story comprehension", "sentiment trajectory", "event sequence", "coherence model"], "overall_score": 2.128050873333716, "scores": [0.9972206166833886, 0.8704633729679085, 0.8685264691999346, 0.8609011097598992, 0.5512176511123188], "rank_score": 0.8296658439446899} -{"id": "zhang-etal-2013-chinese", "title": "Chinese Parsing Exploiting Characters", "abstract": "Characters play an important role in the Chinese language, yet computational processing of Chinese has been dominated by word-based approaches, with leaves in syntax trees being words. We investigate Chinese parsing from the character-level, extending the notion of phrase-structure trees by annotating internal structures of words. We demonstrate the importance of character-level information to Chinese processing by building a joint segmentation, part-of-speech (POS) tagging and phrase-structure parsing system that integrates character-structure features. Our joint system significantly outperforms a state-of-the-art word-based baseline on the standard CTB5 test, and gives the best published results for Chinese parsing.", "phrases": ["character", "internal structure", "segmentation", "chinese", "pos tag"], "overall_score": 2.2465340328749828, "scores": [1.32908150509538, 1.096545052404141, 0.6009562393647352, 0.5654405572809003, 0.5558577153428018], "rank_score": 0.8295762138975915} -{"id": "striegnitz-etal-2011-report", "title": "Report on the Second Second Challenge on Generating Instructions in Virtual Environments (GIVE-2.5)", "abstract": "GIVE-2.5 evaluates eight natural language generation (NLG) systems that guide human users through solving a task in a virtual environment. The data is collected via the Internet, and to date, 536 interactions of subjects with one of the NLG systems have been recorded. The systems are compared using both task performance measures and subjective ratings by human users.", "phrases": ["generating instruction", "virtual environments", "give-2.5"], "overall_score": 1.6137550512943877, "scores": [0.8482619745308796, 0.8457793687111865, 0.7938767943109272], "rank_score": 0.8293060458509979} -{"id": "lee-yeung-2018-personalizing", "title": "Personalizing Lexical Simplification", "abstract": "A lexical simplification (LS) system aims to substitute complex words with simple words in a text, while preserving its meaning and grammaticality. Despite individual users' differences in vocabulary knowledge, current systems do not consider these variations; rather, they are trained to find one optimal substitution or ranked list of substitutions for all users. We evaluate the performance of a state-of-the-art LS system on individual learners of English at different proficiency levels, and measure the benefits of using complex word identification (CWI) models to personalize the system. Experimental results show that even a simple personalized CWI model, based on graded vocabulary lists, can help the system avoid some unnecessary simplifications and produce more readable output.", "phrases": ["lexical simplification", "individual user", "learner", "cwi"], "overall_score": 1.7239420397808611, "scores": [0.9947029346057338, 1.1540866413779414, 0.5882053379015929, 0.5791685948772022], "rank_score": 0.8290408771906176} -{"id": "gao-huang-2017-detecting", "title": "Detecting Online Hate Speech Using Context Aware Models", "abstract": "In the wake of a polarizing election, the cyber world is laden with hate speech. Context accompanying a hate speech text is useful for identifying hate speech, which however has been largely overlooked in existing datasets and hate speech detection models. In this paper, we provide an annotated corpus of hate speech with context information well kept. Then we propose two types of hate speech detection models that incorporate context information, a logistic regression model with context features and a neural network model with learning components for context. Our evaluation shows that both models outperform a strong baseline by around 3% to 4% in F1 score and combining these two models further improve the performance by another 7% in F1 score.", "phrases": ["hate speech", "annotated corpus", "context information", "logistic regression model", "news article"], "overall_score": 2.561402251430725, "scores": [1.3680945951421695, 0.8592091101500178, 0.8328660009330202, 0.5448020424034345, 0.5382943017095625], "rank_score": 0.8286532100676409} -{"id": "xia-mccord-2004-improving", "title": "Improving a Statistical MT System with Automatically Learned Rewrite Patterns", "abstract": "Current clump-based statistical MT systems have two limitations with respect to word ordering: First, they lack a mechanism for expressing and using generalization that accounts for reorderings of linguistic phrases. Second, the ordering of target words in such systems does not respect linguistic phrase boundaries. To address these limitations, we propose to use automatically learned rewrite patterns to preprocess the source sentences so that they have a word order similar to that of the target language. Our system is a hybrid one. The basic model is statistical, but we use broad-coverage rule-based parsers in two ways - during training for learning rewrite patterns, and at runtime for reordering the source sentences. Our experiments show 10% relative improvement in Bleu measure.", "phrases": ["source sentence", "rewrite pattern extraction", "structure-based reordering", "psmt system", "principle"], "overall_score": 3.577587621374156, "scores": [1.8983316733035462, 0.5900336000539154, 0.5693845085898321, 0.5648186457248371, 0.520567033613336], "rank_score": 0.8286270922570933} -{"id": "wu-etal-2019-neural", "title": "Neural News Recommendation with Heterogeneous User Behavior", "abstract": "News recommendation is important for online news platforms to help users find interested news and alleviate information overload. Existing news recommendation methods usually rely on the news click history to model user interest. However, these methods may suffer from the data sparsity problem, since the news click behaviors of many users in online news platforms are usually very limited. Fortunately, some other kinds of user behaviors such as webpage browsing and search queries can also provide useful clues of users' news reading interest. In this paper, we propose a neural news recommendation approach which can exploit heterogeneous user behaviors. Our approach contains two major modules, i.e., news representation and user representation. In the news representation module, we learn representations of news from their titles via CNN networks, and apply attention networks to select important words. In the user representation module, we propose an attentive multi-view learning framework to learn unified representations of users from their heterogeneous behaviors such as search queries, clicked news and browsed webpages. In addition, we use word- and record-level attentions to select informative words and behavior records. Experiments on a real-world dataset validate the effectiveness of our approach.", "phrases": ["heterogeneous user behavior", "news", "short-term user interest"], "overall_score": 1.6121333929314035, "scores": [0.7846236650661896, 1.1641059740879665, 0.536688395764955], "rank_score": 0.8284726783063703} -{"id": "li-sporleder-2010-linguistic", "title": "Linguistic Cues for Distinguishing Literal and Non-Literal Usages", "abstract": "We investigate the effectiveness of different linguistic cues for distinguishing literal and non-literal usages of potentially idiomatic expressions. We focus specifically on features that generalize across different target expressions. While idioms on the whole are frequent, instances of each particular expression can be relatively infrequent and it will often not be feasible to extract and annotate a sufficient number of examples for each expression one might want to disambiguate. We experimented with a number of different features and found that features encoding lexical cohesion as well as some syntactic features can generalize well across idioms.", "phrases": ["usage", "idiomatic expression", "lexical context"], "overall_score": 1.907573115988441, "scores": [1.051443429772847, 0.8521079372836541, 0.5817940672458135], "rank_score": 0.8284484781007716} -{"id": "jin-etal-2004-segmentation", "title": "Segmentation of Chinese Long Sentences Using Commas", "abstract": "The comma is the most common form of punctuation. As such, it may have the greatest effect on the syntactic analysis of a sentence. As an isolate language, Chinese sentences have fewer cues for parsing. The clues for segmentation of a long Chinese sentence are even fewer. However, the average frequency of comma usage in Chinese is higher than other languages. The comma plays an important role in long Chinese sentence segmentation. This paper proposes a method for classifying commas in Chinese sentences by their context, then segments a long sentence according to the classification results. Experimental results show that accuracy for the comma classification reaches 87.1 percent, and with our segmentation model, our parser\u0092s dependency parsing accuracy improves by 9.6 percent.", "phrases": ["chinese", "long sentence", "segmentation"], "overall_score": 1.8196964943562497, "scores": [0.7999133191506386, 0.8568994805884974, 0.8277258948384636], "rank_score": 0.8281795648591999} -{"id": "moro-navigli-2015-semeval", "title": "SemEval-2015 Task 13: Multilingual All-Words Sense Disambiguation and Entity Linking", "abstract": "In this paper we present the Multilingual AllWords Sense Disambiguation and Entity Linking task. Word Sense Disambiguation (WSD) and Entity Linking (EL) are well-known problems in the Natural Language Processing field and both address the lexical ambiguity of language. Their main difference lies in the kind of meaning inventories that are used: EL uses encyclopedic knowledge, while WSD uses lexicographic information. Our aim with this task is to analyze whether, and if so, how, using a resource that integrates both kinds of inventories (i.e., BabelNet 2.5.1) might enable WSD and EL to be solved by means of similar (even, the same) methods. Moreover, we investigate this task in a multilingual setting and for some specific domains.", "phrases": ["entity linking", "wsd", "multilingual setting"], "overall_score": 1.7220912083674262, "scores": [0.8768121893460984, 1.0418542666716528, 0.5657859902524173], "rank_score": 0.8281508154233895} -{"id": "ellsworth-janin-2007-mutaphrase", "title": "Mutaphrase: Paraphrasing with FrameNet", "abstract": "We describe a preliminary version of Mutaphrase, a system that generates paraphrases of semantically labeled input sentences using the semantics and syntax encoded in FrameNet, a freely available lexicosemantic database. The algorithm generates a large number of paraphrases with a wide range of syntactic and semantic distances from the input. For example, given the input \"I like eating cheese\", the system outputs the syntactically distant \"Eating cheese is liked by me\", the semantically distant \"I fear sipping juice\", and thousands of other sentences. The wide range of generated paraphrases makes the algorithm ideal for a range of statistical machine learning problems such as machine translation and language modeling as well as other semantics-dependent tasks such as query and language generation.", "phrases": ["paraphrasing", "framenet", "machine translation", "mutaphrase"], "overall_score": 0.9097887730836912, "scores": [0.9488559105066597, 0.9376632996909509, 0.8655439724619914, 0.5604385341623106], "rank_score": 0.8281254292054783} -{"id": "hernandez-farias-etal-2015-valento", "title": "ValenTo: Sentiment Analysis of Figurative Language Tweets with Irony and Sarcasm", "abstract": "This paper describes the system used by the ValenTo team in the Task 11, Sentiment Analysis of Figurative Language in Twitter, at SemEval 2015. Our system used a regression model and additional external resources to assign polarity values. A distinctive feature of our approach is that we used not only wordsentiment lexicons providing polarity annotations, but also novel resources for dealing with emotions and psycholinguistic information. These are important aspects to tackle in figurative language such as irony and sarcasm, which were represented in the dataset. The system also exploited novel and standard structural features of tweets. Considering the different kinds of figurative language in the dataset our submission obtained good results in recognizing sentiment polarity in both ironic and sarcastic tweets.", "phrases": ["sentiment analysis", "irony", "sarcasm"], "overall_score": 0.909629463586271, "scores": [0.9053832887884075, 0.7979108118652731, 0.7806471577015759], "rank_score": 0.8279804194517522} -{"id": "gonzalez-rubio-etal-2010-balancing", "title": "Balancing User Effort and Translation Error in Interactive Machine Translation via Confidence Measures", "abstract": "This work deals with the application of confidence measures within an interactive-predictive machine translation system in order to reduce human effort. If a small loss in translation quality can be tolerated for the sake of efficiency, user effort can be saved by interactively translating only those initial translations which the confidence measure classifies as incorrect. We apply confidence estimation as a way to achieve a balance between user effort savings and final translation error. Empirical results show that our proposal allows to obtain almost perfect translations while significantly reducing user effort.", "phrases": ["user effort", "translation error", "confidence measure"], "overall_score": 1.1469214542424757, "scores": [0.8659425775701358, 0.8277527129785414, 0.7882915509384899], "rank_score": 0.827328947162389} -{"id": "pantel-etal-2004-towards", "title": "Towards Terascale Semantic Acquisition", "abstract": "Although vast amounts of textual data are freely available, many NLP algorithms exploit only a minute percentage of it. In this paper, we study the challenges of working at the terascale. We present an algorithm, designed for the terascale, for mining is-a relations that achieves similar performance to a state-of-the-art linguistically-rich method. We focus on the accuracy of these two systems as a function of processing time and corpus size.", "phrases": ["terascale", "large corpora", "generic pattern"], "overall_score": 2.662691250977254, "scores": [1.3444663656836133, 0.6081387332329662, 0.529029542351025], "rank_score": 0.8272115470892015} -{"id": "arase-tsujii-2017-monolingual", "title": "Monolingual Phrase Alignment on Parse Forests", "abstract": "We propose an efficient method to conduct phrase alignment on parse forests for paraphrase detection. Unlike previous studies, our method identifies syntactic paraphrases under linguistically motivated grammar. In addition, it allows phrases to non-compositionally align to handle paraphrases with non-homographic phrase correspondences. A dataset that provides gold parse trees and their phrase alignments is created. The experimental results confirm that the proposed method conducts highly accurate phrase alignment compared to human performance.", "phrases": ["phrase alignment", "parse forest", "paraphrase"], "overall_score": 1.6092081230895867, "scores": [0.9228139579966396, 0.9975028866932072, 0.5605913162611826], "rank_score": 0.8269693869836764} -{"id": "ive-etal-2019-distilling", "title": "Distilling Translations with Visual Awareness", "abstract": "Previous work on multimodal machine translation has shown that visual information is only needed in very specific cases, for example in the presence of ambiguous words where the textual context is not sufficient. As a consequence, models tend to learn to ignore this information. We propose a translate-and-refine approach to this problem where images are only used by a second stage decoder. This approach is trained jointly to generate a good first draft translation and to improve over this draft by (i) making better use of the target language textual context (both left and right-side contexts) and (ii) making use of visual context. This approach leads to the state of the art results. Additionally, we show that it has the ability to recover from erroneous or missing words in the source language.", "phrases": ["ambiguous word", "translate-and-refine approach", "image", "visual context", "well use"], "overall_score": 1.9037874963686265, "scores": [1.1836214289887852, 0.9211416392351248, 0.9136615852300648, 0.5686906679818121, 0.5469067005107203], "rank_score": 0.8268044043893015} -{"id": "chang-lai-2004-preliminary", "title": "A Preliminary Study on Probabilistic Models for Chinese Abbreviations", "abstract": "Chinese abbreviations are widely used in the modern Chinese texts. They are a special form of unknown words, including many named entities. This results in difficulty for correct Chinese processing. In this study, the Chinese abbreviation problem is regarded as an error recovery problem in which the suspect root words are the \u201cerrors\u201d to be recovered from a set of candidates. Such a problem is mapped to an HMM-based generation model for both abbreviation identification and root word recovery, and is integrated as part of a unified word segmentation model when the input extends to a complete sentence. Two major experiments are conducted to test the abbreviation models. In the first experiment, an attempt is made to guess the abbreviations of the root words. An accuracy rate of 72% is observed. In contrast, a second experiment is conducted to guess the root words from abbreviations. Some submodels could achieve as high as 51% accuracy with the simple HMM-based model. Some quantitative observations against heuristic abbreviation knowledge about Chinese are also observed.", "phrases": ["abbreviation", "markov model", "full-form phrase", "news article"], "overall_score": 1.8165448288852104, "scores": [1.3583774642485755, 0.8871928807790507, 0.5392735448970873, 0.5221368304301951], "rank_score": 0.8267451800887272} -{"id": "federmann-etal-2019-multilingual", "title": "Multilingual Whispers: Generating Paraphrases with Translation", "abstract": "Naturally occurring paraphrase data, such as multiple news stories about the same event, is a useful but rare resource. This paper compares translation-based paraphrase gathering using human, automatic, or hybrid techniques to monolingual paraphrasing by experts and non-experts. We gather translations, paraphrases, and empirical human quality assessments of these approaches. Neural machine translation techniques, especially when pivoting through related languages, provide a relatively robust source of paraphrases with diversity comparable to expert human paraphrases. Surprisingly, human translators do not reliably outperform neural systems. The resulting data release will not only be a useful test set, but will also allow additional explorations in translation and paraphrase quality assessments and relationships.", "phrases": ["paraphrasing", "multilingual whisper", "back-translation"], "overall_score": 1.6084501365238033, "scores": [0.940491400454348, 0.971073406196925, 0.5681747701806721], "rank_score": 0.8265798589439818} -{"id": "koufakou-etal-2020-hurtbert", "title": "HurtBERT: Incorporating Lexical Features with BERT for the Detection of Abusive Language", "abstract": "The detection of abusive or offensive remarks in social texts has received significant attention in research. In several related shared tasks, BERT has been shown to be the state-of-the-art. In this paper, we propose to utilize lexical features derived from a hate lexicon towards improving the performance of BERT in such tasks. We explore different ways to utilize the lexical features in the form of lexicon-based encodings at the sentence level or embeddings at the word level. We provide an extensive dataset evaluation that addresses in-domain as well as cross-domain detection of abusive content to render a complete picture. Our results indicate that our proposed models combining BERT with lexical features help improve over a baseline BERT model in many of our in-domain and cross-domain experiments.", "phrases": ["lexical feature", "detection", "abusive language"], "overall_score": 1.145413417646122, "scores": [0.8674083437420624, 0.8151269953335663, 0.7961880470329804], "rank_score": 0.8262411287028697} -{"id": "lu-etal-2016-joint", "title": "Joint Inference for Event Coreference Resolution", "abstract": "Event coreference resolution is a challenging problem since it relies on several components of the information extraction pipeline that typically yield noisy outputs. We hypothesize that exploiting the inter-dependencies between these components can significantly improve the performance of an event coreference resolver, and subsequently propose a novel joint inference based event coreference resolver using Markov Logic Networks (MLNs). However, the rich features that are important for this task are typically very hard to explicitly encode as MLN formulas since they significantly increase the size of the MLN, thereby making joint inference and learning infeasible. To address this problem, we propose a novel solution where we implicitly encode rich features into our model by augmenting the MLN distribution with low dimensional unit clauses. Our approach achieves state-of-the-art results on two standard evaluation corpora.", "phrases": ["event coreference resolution", "markov logic networks", "joint inference"], "overall_score": 1.4803544985000703, "scores": [0.9973592484661807, 0.9177890671409689, 0.5634564144203451], "rank_score": 0.8262015766758316} -{"id": "shlain-etal-2020-syntactic", "title": "Syntactic Search by Example", "abstract": "We present a system that allows a user to search a large linguistically annotated corpus using syntactic patterns over dependency graphs. In contrast to previous attempts to this effect, we introduce a light-weight query language that does not require the user to know the details of the underlying syntactic representations, and instead to query the corpus by providing an example sentence coupled with simple markup. Search is performed at an interactive speed due to efficient linguistic graph-indexing and retrieval engine. This allows for rapid exploration, development and refinement of syntax-based queries. We demonstrate the system using queries over two corpora: the English wikipedia, and a collection of English pubmed abstracts. A demo of the wikipedia system is available at .", "phrases": ["search", "example sentence", "wikipedia"], "overall_score": 1.4801782981188227, "scores": [0.9230912835344111, 0.9874646974367973, 0.5677537311407571], "rank_score": 0.826103237370655} -{"id": "miwa-etal-2010-evaluating", "title": "Evaluating Dependency Representations for Event Extraction", "abstract": "The detailed analyses of sentence structure provided by parsers have been applied to address several information extraction tasks. In a recent bio-molecular event extraction task, state-of-the-art performance was achieved by systems building specifically on dependency representations of parser output. While intrinsic evaluations have shown significant advances in both general and domain-specific parsing, the question of how these translate into practical advantage is seldom considered. In this paper, we analyze how event extraction performance is affected by parser and dependency representation, further considering the relation between intrinsic evaluation and performance at the extraction task. We find that good intrinsic evaluation results do not always imply good extraction performance, and that the types and structures of different dependency representations have specific advantages and disadvantages for the event extraction task.", "phrases": ["dependency representation", "event extraction", "pipeline"], "overall_score": 1.815077216387438, "scores": [0.9293572294111795, 1.02100209548615, 0.5278723976714117], "rank_score": 0.8260772408562471} -{"id": "finkel-etal-2006-solving", "title": "Solving the Problem of Cascading Errors: Approximate Bayesian Inference for Linguistic Annotation Pipelines", "abstract": "The end-to-end performance of natural language processing systems for compound tasks, such as question answering and textual entailment, is often hampered by use of a greedy 1-best pipeline architecture, which causes errors to propagate and compound at each stage. We present a novel architecture, which models these pipelines as Bayesian networks, with each low level task corresponding to a variable in the network, and then we perform approximate inference to find the best labeling. Our approach is extremely simple to apply but gains the benefits of sampling the entire distribution over labels at each stage in the pipeline. We apply our method to two tasks -- semantic role labeling and recognizing textual entailment -- and achieve useful performance gains from the superior pipeline architecture.", "phrases": ["linguistic annotation", "bayesian network", "variable", "pipeline approach"], "overall_score": 2.5143697900259236, "scores": [1.293511821503322, 0.8689517773568362, 0.6011656640427187, 0.539837657059878], "rank_score": 0.8258667299906887} -{"id": "jhamtani-etal-2017-shakespearizing", "title": "Shakespearizing Modern Language Using Copy-Enriched Sequence to Sequence Models", "abstract": "Variations in writing styles are commonly used to adapt the content to a specific context, audience, or purpose. However, applying stylistic variations is still by and large a manual process, and there have been little efforts towards automating it. In this paper we explore automated methods to transform text from modern English to Shakespearean English using an end to end trainable neural model with pointers to enable copy action. To tackle limited amount of parallel data, we pre-train embeddings of words by leveraging external dictionaries mapping Shakespearean words to modern English words as well as additional text. Our methods are able to get a BLEU score of 31+, an improvement of 6 points above the strongest baseline. We publicly release our code to foster further research in this area.", "phrases": ["style", "external dictionary mapping", "parallel corpus", "sequence-to-sequence"], "overall_score": 2.552477116459984, "scores": [1.2963783050880504, 0.8475402128056451, 0.5954037379645415, 0.5637409080744374], "rank_score": 0.8257657909831686} -{"id": "saha-etal-2021-towards", "title": "Towards Sentiment and Emotion aided Multi-modal Speech Act Classification in Twitter", "abstract": "Speech Act Classification determining the communicative intent of an utterance has been investigated widely over the years as a standalone task. This holds true for discussion in any fora including social media platform such as Twitter. But the emotional state of the tweeter which has a considerable effect on the communication has not received the attention it deserves. Closely related to emotion is sentiment, and understanding of one helps understand the other. In this work, we firstly create a new multi-modal, emotion-TA (`TA' means tweet act, i.e., speech act in Twitter) dataset called EmoTA collected from open-source Twitter dataset. We propose a Dyadic Attention Mechanism (DAM) based multi-modal, adversarial multi-tasking framework. DAM incorporates intra-modal and inter-modal attention to fuse multiple modalities and learns generalized features across all the tasks. Experimental results indicate that the proposed framework boosts the performance of the primary task, i.e., TA classification (TAC) by benefitting from the two secondary tasks, i.e., Sentiment and Emotion Analysis compared to its uni-modal and single task TAC (tweet act classification) variants.", "phrases": ["emotion", "speech act classification", "twitter"], "overall_score": 0.9071183886959908, "scores": [0.8564426131037348, 0.8242450871823699, 0.7963965214707599], "rank_score": 0.8256947405856215} -{"id": "zhang-etal-2021-textoir", "title": "TEXTOIR: An Integrated and Visualized Platform for Text Open Intent Recognition", "abstract": "TEXTOIR is the first integrated and visualized platform for text open intent recognition. It is composed of two main modules: open intent detection and open intent discovery. Each module integrates most of the state-of-the-art algorithms and benchmark intent datasets. It also contains an overall framework connecting the two modules in a pipeline scheme. In addition, this platform has visualized tools for data and model management, training, evaluation and analysis of the performance from different aspects. TEXTOIR provides useful toolkits and convenient visualized interfaces for each sub-module, and designs a framework to implement a complete process to both identify known intents and discover open intents.", "phrases": ["visualized platform", "intent", "textoir"], "overall_score": 1.144569704228373, "scores": [0.8206871399701828, 0.8206155001217663, 0.8355949142710827], "rank_score": 0.8256325181210107} -{"id": "koomen-etal-2005-generalized", "title": "Generalized Inference with Multiple Semantic Role Labeling Systems", "abstract": "We present an approach to semantic role labeling (SRL) that takes the output of multiple argument classifiers and combines them into a coherent predicate-argument output by solving an optimization problem. The optimization stage, which is solved via integer linear programming, takes into account both the recommendation of the classifiers and a set of problem specific constraints, and is thus used both to clean the classification results and to ensure structural integrity of the final role labeling. We illustrate a significant improvement in overall SRL performance through this inference.", "phrases": ["semantic role labeling", "srl", "optimization problem", "previous research"], "overall_score": 2.2358220252758083, "scores": [1.5186876372134606, 0.656259475566039, 0.5697957973389592, 0.5577394801500835], "rank_score": 0.8256205975671356} -{"id": "van-hee-etal-2018-semeval", "title": "SemEval-2018 Task 3: Irony Detection in English Tweets", "abstract": "This paper presents the first shared task on irony detection: given a tweet, automatic natural language processing systems should determine whether the tweet is ironic (Task A) and which type of irony (if any) is expressed (Task B). The ironic tweets were collected using irony-related hashtags (i.e. #irony, #sarcasm, #not) and were subsequently manually annotated to minimise the amount of noise in the corpus. Prior to distributing the data, hashtags that were used to collect the tweets were removed from the corpus. For both tasks, a training corpus of 3,834 tweets was provided, as well as a test set containing 784 tweets. Our shared tasks received submissions from 43 teams for the binary classification Task A and from 31 teams for the multiclass Task B. The highest classification scores obtained for both subtasks are respectively F1= 0.71 and F1= 0.51 and demonstrate that fine-grained irony classification is much more challenging than binary irony detection.", "phrases": ["english tweets", "sarcasm detection", "semeval"], "overall_score": 2.4723155233870453, "scores": [0.9049382045398544, 1.0410276403238343, 0.5298717443752293], "rank_score": 0.8252791964129726} -{"id": "rios-kavuluru-2018-shot", "title": "Few-Shot and Zero-Shot Multi-Label Learning for Structured Label Spaces", "abstract": "Large multi-label datasets contain labels that occur thousands of times (frequent group), those that occur only a few times (few-shot group), and labels that never appear in the training dataset (zero-shot group). Multi-label few- and zero-shot label prediction is mostly unexplored on datasets with large label spaces, especially for text classification. In this paper, we perform a fine-grained evaluation to understand how state-of-the-art methods perform on infrequent labels. Furthermore, we develop few- and zero-shot methods for multi-label text classification when there is a known structure over the label space, and evaluate them on two publicly available medical text datasets: MIMIC II and MIMIC III. For few-shot labels we achieve improvements of 6.2% and 4.8% in R@10 for MIMIC II and MIMIC III, respectively, over prior efforts; the corresponding R@10 improvements for zero-shot labels are 17.3% and 19%.", "phrases": ["label space", "zero-shot learning", "promising result"], "overall_score": 2.5508397815741852, "scores": [1.0006373325048172, 0.9525424776935024, 0.5225284533385803], "rank_score": 0.8252360878456333} -{"id": "paetzel-etal-2014-multimodal", "title": "A Multimodal Corpus of Rapid Dialogue Games", "abstract": "This paper presents a multimodal corpus of spoken human-human dialogues collected as participants played a series of Rapid Dialogue Games (RDGs). The corpus consists of a collection of about 11 hours of spoken audio, video, and Microsoft Kinect data taken from 384 game interactions (dialogues). The games used for collecting the corpus required participants to give verbal descriptions of linguistic expressions or visual images and were specifically designed to engage players in a fast-paced conversation under time pressure. As a result, the corpus contains many examples of participants attempting to communicate quickly in specific game situations, and it also includes a variety of spontaneous conversational phenomena such as hesitations, filled pauses, overlapping speech, and low-latency responses. The corpus has been created to facilitate research in incremental speech processing for spoken dialogue systems. Potentially, the corpus could be used in several areas of speech and language research, including speech recognition, natural language understanding, natural language generation, and dialogue management.", "phrases": ["multimodal corpus", "rapid dialogue games", "series", "audio"], "overall_score": 1.4783327145110343, "scores": [0.9870772569693577, 0.8797219694522403, 0.8812022238530861, 0.5522913399131538], "rank_score": 0.8250731975469595} -{"id": "kiomourtzis-etal-2014-nomad", "title": "NOMAD: Linguistic Resources and Tools Aimed at Policy Formulation and Validation", "abstract": "The NOMAD project (Policy Formulation and Validation through non Moderated Crowd-sourcing) is a project that supports policy making, by providing rich, actionable information related to how citizens perceive different policies. NOMAD automatically analyzes citizen contributions to the informal web (e.g. forums, social networks, blogs, newsgroups and wikis) using a variety of tools. These tools comprise text retrieval, topic classification, argument detection and sentiment analysis, as well as argument summarization. NOMAD provides decision-makers with a full arsenal of solutions starting from describing a domain and a policy to applying content search and acquisition, categorization and visualization. These solutions work in a collaborative manner in the policy-making arena. NOMAD, thus, embeds editing, analysis and visualization technologies into a concrete framework, applicable in a variety of policy-making and decision support settings In this paper we provide an overview of the linguistic tools and resources of NOMAD.", "phrases": ["policy formulation", "validation", "nomad"], "overall_score": 0.906423415977661, "scores": [0.8557784571733891, 0.8392665256405675, 0.78014146465393], "rank_score": 0.8250621491559622} -{"id": "niehues-cho-2017-exploiting", "title": "Exploiting Linguistic Resources for Neural Machine Translation Using Multi-task Learning", "abstract": "Linguistic resources such as part-of-speech (POS) tags have been extensively used in statistical machine translation (SMT) frameworks and have yielded better performances. However, usage of such linguistic annotations in neural machine translation (NMT) systems has been left under-explored. \nIn this work, we show that multi-task learning is a successful and a easy approach to introduce an additional knowledge into an end-to-end neural attentional model. By jointly training several natural language processing (NLP) tasks in one system, we are able to leverage common information and improve the performance of the individual task. \nWe analyze the impact of three design decisions in multi-task learning: the tasks used in training, the training schedule, and the degree of parameter sharing across the tasks, which is defined by the network architecture. The experiments are conducted for an German to English translation task. As additional linguistic resources, we exploit POS information and named-entities (NE). Experiments show that the translation quality can be improved by up to 1.5 BLEU points under the low-resource condition. The performance of the POS tagger is also improved using the multi-task learning scheme.", "phrases": ["neural machine translation", "multi-task learning", "part-of-speech tagging", "dependency parsing"], "overall_score": 2.0494893944661423, "scores": [0.9233396680607548, 0.9154251380597859, 0.9184224172915537, 0.5419136013870891], "rank_score": 0.8247752061997959} -{"id": "yang-etal-2020-hw", "title": "HW-TSC's Participation at WMT 2020 Automatic Post Editing Shared Task", "abstract": "The paper presents the submission by HW-TSC in the WMT 2020 Automatic Post Editing Shared Task. We participate in the English-German and English-Chinese language pairs. Our system is built based on the Transformer pre-trained on WMT 2019 and WMT 2020 News Translation corpora, and fine-tuned on the APE corpus. Bottleneck Adapter Layers are integrated into the model to prevent over-fitting. We further collect external translations as the augmented MT candidates to improve the performance. The experiment demonstrates that pre-trained NMT models are effective when fine-tuning with the APE corpus of a limited size, and the performance can be further improved with external MT augmentation. Our system achieves competitive results on both directions in the final evaluation.", "phrases": ["automatic post", "shared task", "hw-tsc"], "overall_score": 1.1431281665942985, "scores": [0.8308448772316468, 0.8290794135623653, 0.8138537147751186], "rank_score": 0.8245926685230436} -{"id": "gururangan-etal-2019-variational", "title": "Variational Pretraining for Semi-supervised Text Classification", "abstract": "We introduce VAMPIRE, a lightweight pretraining framework for effective text classification when data and computing resources are limited. We pretrain a unigram document model as a variational autoencoder on in-domain, unlabeled data and use its internal states as features in a downstream classifier. Empirically, we show the relative strength of VAMPIRE against computationally expensive contextual embeddings and other popular semi-supervised baselines under low resource settings. We also find that fine-tuning to in-domain data is crucial to achieving decent performance from contextual embeddings when working with limited supervision. We accompany this paper with code to pretrain and use VAMPIRE embeddings in downstream tasks.", "phrases": ["semi-supervised text classification", "vampire", "variational autoencoder", "unlabeled data"], "overall_score": 1.8115782874519053, "scores": [0.9970081760256275, 0.8974929996163082, 0.8252035129748997, 0.5782345500715503], "rank_score": 0.8244848096720964} -{"id": "negri-etal-2012-chinese", "title": "Chinese Whispers: Cooperative Paraphrase Acquisition", "abstract": "We present a framework for the acquisition of sentential paraphrases based on crowdsourcing. The proposed method maximizes the lexical divergence between an original sentence s and its valid paraphrases by running a sequence of paraphrasing jobs carried out by a crowd of non-expert workers. Instead of collecting direct paraphrases of s, at each step of the sequence workers manipulate semantically equivalent reformulations produced in the previous round. We applied this method to paraphrase English sentences extracted from Wikipedia. Our results show that, keeping at each round n the most promising paraphrases (i.e. the more lexically dissimilar from those acquired at round n-1), the monotonic increase of divergence allows to collect good-quality paraphrases in a cost-effective manner.", "phrases": ["paraphrase", "crowdsourcing", "previous round"], "overall_score": 1.4772071729021434, "scores": [1.4209746508736165, 0.5316300192707942, 0.5207303922988256], "rank_score": 0.824445020814412} -{"id": "machacek-bojar-2013-results", "title": "Results of the WMT13 Metrics Shared Task", "abstract": "This paper presents the results of the WMT17 Metrics Shared Task. We asked participants of this task to score the outputs of the MT systems involved in the WMT17 news translation task and Neural MT training task. We collected scores of 14 metrics from 8 research groups. In addition to that, we computed scores of 7 standard metrics (BLEU, SentBLEU, NIST, WER, PER, TER and CDER) as baselines. The collected scores were evaluated in terms of system-level correlation (how well each metric\u2019s scores correlate with WMT17 of\ufb01cial manual ranking of systems) and in terms of segment level correlation (how often a metric agrees with humans in judging the quality of a particular sentence). This year, we build upon two types of manual judgements: direct assessment (DA) and HUME manual semantic judgements.", "phrases": ["direct assessment", "evaluation metric", "wmt", "high correlation", "translation quality"], "overall_score": 2.746291000877384, "scores": [1.1715300768567392, 0.932930827739502, 0.8759931647963715, 0.5791956147508264, 0.5611823243532231], "rank_score": 0.8241664016993324} -{"id": "chambers-jurafsky-2008-jointly", "title": "Jointly Combining Implicit Constraints Improves Temporal Ordering", "abstract": "Previous work on ordering events in text has typically focused on local pairwise decisions, ignoring globally inconsistent labels. However, temporal ordering is the type of domain in which global constraints should be relatively easy to represent and reason over. This paper presents a framework that informs local decisions with two types of implicit global constraints: transitivity (A before B and B before C implies A before C) and time expression normalization (e.g. last month is before yesterday). We show how these constraints can be used to create a more densely-connected network of events, and how global consistency can be enforced by incorporating these constraints into an integer linear programming framework. We present results on two event ordering tasks, showing a 3.6% absolute increase in the accuracy of before/after classification over a pairwise model.", "phrases": ["temporal ordering", "integer linear programming", "global information", "relation extraction", "transitivity constraint"], "overall_score": 2.3817285627365345, "scores": [0.9705293195957663, 1.0991155761000915, 0.8962614722095572, 0.5830874681077092, 0.5711138218165736], "rank_score": 0.8240215315659395} -{"id": "wu-etal-2019-generating", "title": "Generating Question Relevant Captions to Aid Visual Question Answering", "abstract": "Visual question answering (VQA) and image captioning require a shared body of general knowledge connecting language and vision. We present a novel approach to better VQA performance that exploits this connection by jointly generating captions that are targeted to help answer a specific visual question. The model is trained using an existing caption dataset by automatically determining question-relevant captions using an online gradient-based method. Experimental results on the VQA v2 challenge demonstrates that our approach obtains state-of-the-art VQA performance (e.g. 68.4% in the Test-standard set using a single model) by simultaneously generating question-relevant captions.", "phrases": ["caption", "visual question", "vision-language task"], "overall_score": 1.810194938909239, "scores": [1.2776753175007347, 0.6251466281800414, 0.5687437161740652], "rank_score": 0.8238552206182804} -{"id": "chen-2009-performance", "title": "Performance Prediction for Exponential Language Models", "abstract": "We investigate the task of performance prediction for language models belonging to the exponential family. First, we attempt to empirically discover a formula for predicting test set cross-entropy for n-gram language models. We build models over varying domains, data set sizes, and n-gram orders, and perform linear regression to see whether we can model test set performance as a simple function of training set performance and various model statistics. Remarkably, we find a simple relationship that predicts test set performance with a correlation of 0.9997. We analyze why this relationship holds and show that it holds for other exponential language models as well, including class-based models and minimum discrimination information models. Finally, we discuss how this relationship can be applied to improve language model performance.", "phrases": ["exponential language model", "cross-entropy", "performance prediction"], "overall_score": 1.1419230644175253, "scores": [0.8697738841001129, 1.0485021476617524, 0.552894081405973], "rank_score": 0.8237233710559461} -{"id": "jing-etal-2018-automatic", "title": "On the Automatic Generation of Medical Imaging Reports", "abstract": "Medical imaging is widely used in clinical practice for diagnosis and treatment. Report-writing can be error-prone for unexperienced physicians, and time-consuming and tedious for experienced physicians. To address these issues, we study the automatic generation of medical imaging reports. This task presents several challenges. First, a complete report contains multiple heterogeneous forms of information, including findings and tags. Second, abnormal regions in medical images are difficult to identify. Third, the reports are typically long, containing multiple sentences. To cope with these challenges, we (1) build a multi-task learning framework which jointly performs the prediction of tags and the generation of paragraphs, (2) propose a co-attention mechanism to localize regions containing abnormalities and generate narrations for them, (3) develop a hierarchical LSTM model to generate long paragraphs. We demonstrate the effectiveness of the proposed methods on two publicly available dataset.", "phrases": ["automatic generation", "image", "radiologist", "medical report"], "overall_score": 2.424500842345877, "scores": [0.8372985368634789, 1.3609933371205019, 0.5505837995815195, 0.5447919615943148], "rank_score": 0.8234169087899538} -{"id": "rodriguez-etal-2010-anaphoric", "title": "Anaphoric Annotation of Wikipedia and Blogs in the Live Memories Corpus", "abstract": "The Live Memories corpus is an Italian corpus annotated for anaphoric relations. This annotation effort aims to contribute to two significant issues for the CL research: the lack of annotated anaphoric resources for Italian and the increasing interest for the social Web. The Live Memories Corpus contains texts from the Italian Wikipedia about the region Trentino/S\u00fcd Tirol and from blog sites with users' comments. It is planned to add a set of articles of local news papers. The corpus includes manual annotated information about morphosyntactic agreement, anaphoricity, and semantic class of the NPs. The anaphoric annotation includes discourse deixis, bridging relations and markes cases of ambiguity with the annotation of alternative interpretations. For the annotation of the anaphoric links the corpus takes into account specific phenomena of the Italian language like incorporated clitics and phonetically non realized pronouns. Reliability studies for the annotation of the mentioned phenomena and for annotation of anaphoric links in general offer satisfactory results. The Wikipedia and blogs dataset will be distributed under Creative Commons Attributions licence.", "phrases": ["wikipedia", "live memories corpus", "anaphoric annotation"], "overall_score": 0.9044972910209259, "scores": [0.8828847819987669, 0.7967532953525366, 0.790288666643568], "rank_score": 0.823308914664957} -{"id": "glavas-vulic-2018-explicit", "title": "Explicit Retrofitting of Distributional Word Vectors", "abstract": "Semantic specialization of distributional word vectors, referred to as retrofitting, is a process of fine-tuning word vectors using external lexical knowledge in order to better embed some semantic relation. Existing retrofitting models integrate linguistic constraints directly into learning objectives and, consequently, specialize only the vectors of words from the constraints. In this work, in contrast, we transform external lexico-semantic relations into training examples which we use to learn an explicit retrofitting model (ER). The ER model allows us to learn a global specialization function and specialize the vectors of words unobserved in the training data as well. We report large gains over original distributional vector spaces in (1) intrinsic word similarity evaluation and on (2) two downstream tasks \u2212 lexical simplification and dialog state tracking. Finally, we also successfully specialize vector spaces of new languages (i.e., unseen in the training data) by coupling ER with shared multilingual distributional vector spaces.", "phrases": ["distributional word vector", "specialization", "explicit retrofitting model"], "overall_score": 1.3249138952587243, "scores": [0.9533910971389852, 0.9221389361142027, 0.5941158319699119], "rank_score": 0.8232152884076999} -{"id": "stab-etal-2018-cross", "title": "Cross-topic Argument Mining from Heterogeneous Sources", "abstract": "Argument mining is a core technology for automating argument search in large document collections. Despite its usefulness for this task, most current approaches are designed for use only with specific text types and fall short when applied to heterogeneous texts. In this paper, we propose a new sentential annotation scheme that is reliably applicable by crowd workers to arbitrary Web texts. We source annotations for over 25,000 instances covering eight controversial topics. We show that integrating topic information into bidirectional long short-term memory networks outperforms vanilla BiLSTMs by more than 3 percentage points in F1 in two- and three-label cross-topic settings. We also show that these results can be further improved by leveraging additional data for topic relevance using multi-task learning.", "phrases": ["argumentation", "heterogeneous source", "web text", "topic information", "claim detection"], "overall_score": 2.4236602442576056, "scores": [0.8399069462232113, 1.1910897385844357, 0.9598572541330476, 0.5647678698461152, 0.560035301797521], "rank_score": 0.8231314221168662} -{"id": "nakov-hearst-2005-using", "title": "Using the Web as an Implicit Training Set: Application to Structural Ambiguity Resolution", "abstract": "Recent work has shown that very large corpora can act as training data for NLP algorithms even without explicit labels. In this paper we show how the use of surface features and paraphrases in queries against search engines can be used to infer labels for structural ambiguity resolution tasks. Using unsupervised algorithms, we achieve 84% precision on PP-attachment and 80% on noun compound coordination.", "phrases": ["web", "paraphrase", "unsupervised algorithm", "noun"], "overall_score": 2.422343940599176, "scores": [1.3689125495782137, 0.845963527693732, 0.5527697651984267, 0.5230916565759585], "rank_score": 0.8226843747615827} -{"id": "frontini-etal-2012-verb", "title": "Verb interpretation for basic action types: annotation, ontology induction and creation of prototypical scenes", "abstract": "In the last 20 years dictionaries and lexicographic resources such as WordNet have started to be enriched with multimodal content. Short videos depicting basic actions support the user\u2019s need (especially in second language acquisition) to fully understand the range of applicability of verbs. The IMAGACT project has among its results a repository of action verbs ontologically organised around prototypical action scenes in the form of both video recordings and 3D animations. The creation of the IMAGACT ontology, which consists in deriving action types from corpus instances of action verbs, intra and cross linguistically validating them and producing the prototypical scenes thereof, is the preliminary step for the creation of a resouce that users can browse by verb, learning how to match different action prototypes with the correct verbs in the target language. The mapping of IMAGACT types onto WordNet synsets allows for a mutual enrichment of both resources. Interpretazione dei verbi per tipi azionali di base: annotazione, induzione di ontologia e creazione di scene prototipiche Negli ultimi venti anni dizionari e risorse lessicografiche come WordNet sono stati arricchiti con contenuto multimediale. Brevi video in grado di rappresentare azioni di base supportano i bisogni degli utenti (in particolar modo per quanto riguarda l' acquisizione della seconda lingua) nel comprendere l' ambito di applicabilita dei verbi. Il progetto IMAGACT ha tra i suoi risultati una base di dati di verbi d'azione ontologicamente organizzati e raffiguranti scene che riproducono azioni prototipiche sottoforma di registrazioni video e animazioni 3D. La creazione dell' ontologia IMAGACT che consiste nella derivazione di tipi azionali da istanze di verbi d'azione estratte da un corpus, nella loro validazione intra e crosslinguisticamente e nella conseguente produzione di scene prototipiche, e il passaggio preliminare per la creazione di una risorsa che gli utenti possono consultare partendo dal verbo, imparando come alllineare differenti prototipi d'azione con il verbo corretto nella lingua da apprendere. Il mapping dei tipi di IMAGACT sui synsets di WordNet consente un arricchimento reciproco di entrambe le risorse.", "phrases": ["action type", "creation", "prototypical scene"], "overall_score": 0.9033963823390143, "scores": [0.8380024789380144, 0.8256217373490864, 0.8032962569061404], "rank_score": 0.822306824397747} -{"id": "huang-carley-2018-parameterized", "title": "Parameterized Convolutional Neural Networks for Aspect Level Sentiment Classification", "abstract": "We introduce a novel parameterized convolutional neural network for aspect level sentiment classification. Using parameterized filters and parameterized gates, we incorporate aspect information into convolutional neural networks (CNN). Experiments demonstrate that our parameterized filters and parameterized gates effectively capture the aspect-specific features, and our CNN-based models achieve excellent results on SemEval 2014 datasets.", "phrases": ["convolutional neural network", "sentiment classification", "cnn"], "overall_score": 1.7089023601354625, "scores": [1.0065790740815583, 0.8439994487317449, 0.614846437517574], "rank_score": 0.8218083201102925} -{"id": "roy-etal-2015-reasoning", "title": "Reasoning about Quantities in Natural Language", "abstract": "Little work from the Natural Language Processing community has targeted the role of quantities in Natural Language Understanding. This paper takes some key steps towards facilitating reasoning about quantities expressed in natural language. We investigate two different tasks of numerical reasoning. First, we consider Quantity Entailment, a new task formulated to understand the role of quantities in general textual inference tasks. Second, we consider the problem of automatically understanding and solving elementary school math word problems. In order to address these quantitative reasoning problems we first develop a computational approach which we show to successfully recognize and normalize textual expressions of quantities. We then use these capabilities to further develop algorithms to assist reasoning in the context of the aforementioned tasks.", "phrases": ["quantity", "word problem", "operand"], "overall_score": 1.9703238115422836, "scores": [1.3497669815472584, 0.5846061172127229, 0.5306934542628625], "rank_score": 0.8216888510076146} -{"id": "korkontzelos-etal-2013-semeval", "title": "SemEval-2013 Task 5: Evaluating Phrasal Semantics", "abstract": "This paper describes the SemEval-2013 Task 5: \u201cEvaluating Phrasal Semantics\u201d. Its first subtask is about computing the semantic similarity of words and compositional phrases of minimal length. The second one addresses deciding the compositionality of phrases in a given context. The paper discusses the importance and background of these subtasks and their structure. In succession, it introduces the systems that participated and discusses evaluation results.", "phrases": ["evaluating phrasal semantics", "semantic similarity", "semeval-2013 task"], "overall_score": 1.3224490000642117, "scores": [0.9657897486900173, 0.9503741671211545, 0.5488873729287916], "rank_score": 0.8216837629133211} -{"id": "wang-etal-2020-sentiment", "title": "Sentiment Forecasting in Dialog", "abstract": "Sentiment forecasting in dialog aims to predict the polarity of next utterance to come, and can help speakers revise their utterances in sentimental utterances generation. However, the polarity of next utterance is normally hard to predict, due to the lack of content of next utterance (yet to come). In this study, we propose a Neural Sentiment Forecasting (NSF) model to address inherent challenges. In particular, we employ a neural simulation model to simulate the next utterance based on the context (previous utterances encountered). Moreover, we employ a sequence influence model to learn both pair-wise and seq-wise influence. Empirical studies illustrate the importance of proposed sentiment forecasting task, and justify the effectiveness of our NSF model over several strong baselines.", "phrases": ["dialog", "next utterance", "sentiment forecasting"], "overall_score": 0.9026429184097413, "scores": [0.9834055288214503, 0.8961039453084794, 0.5853535017904937], "rank_score": 0.8216209919734744} -{"id": "li-2015-abstractive", "title": "Abstractive Multi-document Summarization with Semantic Information Extraction", "abstract": "This paper proposes a novel approach to generate abstractive summary for multiple documents by extracting semantic information from texts. The concept of Basic Semantic Unit (BSU) is defined to describe the semantics of an event or action. A semantic link network on BSUs is constructed to capture the semantic information of texts. Summary structure is planned with sentences generated based on the semantic link network. Experiments demonstrate that the approach is effective in generating informative, coherent and compact summary.", "phrases": ["basic semantic unit", "compact summary", "abstractive multi-document summarization"], "overall_score": 1.5984794557255129, "scores": [1.0157452189402782, 0.8888412691137209, 0.5597813397743283], "rank_score": 0.8214559426094424} -{"id": "bicici-way-2014-rtm", "title": "RTM-DCU: Referential Translation Machines for Semantic Similarity", "abstract": "We use referential translation machines (RTMs) for predicting the semantic similarity of text. RTMs are a computational model for identifying the translation acts between any two data sets with respect to interpretants selected in the same domain, which are effective when making monolingual and bilingual similarity judgments. RTMs judge the quality or the semantic similarity of text by using retrieved relevant training data as interpretants for reaching shared semantics. We derive features measuring the closeness of the test sentences to the training data via interpretants, the difficulty of translating them, and the presence of the acts of translation, which may ubiquitously be observed in communication. RTMs provide a language independent approach to all similarity tasks and achieve top performance when predicting monolingual cross-level semantic similarity (Task 3) and good results in semantic relatedness and entailment (Task 1) and multilingual semantic textual similarity (STS) (Task 10). RTMs remove the need to access any task or domain specific information or resource.", "phrases": ["referential translation machine", "semantic similarity", "sts task"], "overall_score": 1.138447786536386, "scores": [0.9119999352726058, 0.9945614175002867, 0.5570881111476991], "rank_score": 0.8212164879735306} -{"id": "banko-etzioni-2008-tradeoffs", "title": "The Tradeoffs Between Open and Traditional Relation Extraction", "abstract": "Traditional Information Extraction (IE) takes a relation name and hand-tagged examples of that relation as input. Open IE is a relationindependent extraction paradigm that is tailored to massive and heterogeneous corpora such as the Web. An Open IE system extracts a diverse set of relational tuples from text without any relation-specific input. How is Open IE possible? We analyze a sample of English sentences to demonstrate that numerous relationships are expressed using a compact set of relation-independent lexico-syntactic patterns, which can be learned by an Open IE system. What are the tradeoffs between Open IE and traditional IE? We consider this question in the context of two tasks. First, when the number of relations is massive, and the relations themselves are not pre-specified, we argue that Open IE is necessary. We then present a new model for Open IE called O-CRF and show that it achieves increased precision and nearly double the recall than the model employed by TEXTRUNNER, the previous stateof-the-art Open IE system. Second, when the number of target relations is small, and their names are known in advance, we show that O-CRF is able to match the precision of a traditional extraction system, though at substantially lower recall. Finally, we show how to combine the two types of systems into a hybrid that achieves higher precision than a traditional extractor, with comparable recall.", "phrases": ["traditional relation extraction", "hand-tagged example", "tuple", "textrunner", "low recall"], "overall_score": 3.007957123431693, "scores": [0.9514411422683751, 1.2198243623216969, 0.8412781313515271, 0.5623226539127063, 0.530369835501244], "rank_score": 0.8210472250711097} -{"id": "pichotta-denero-2013-identifying", "title": "Identifying Phrasal Verbs Using Many Bilingual Corpora", "abstract": "We address the problem of identifying multiword expressions in a language, focusing on English phrasal verbs. Our polyglot ranking approach integrates frequency statistics from translated corpora in 50 different languages. Our experimental evaluation demonstrates that combining statistical evidence from many parallel corpora using a novel ranking-oriented boosting algorithm produces a comprehensive set of English phrasal verbs, achieving performance comparable to a human-curated set.", "phrases": ["phrasal verb", "bilingual corpora", "frequency statistic"], "overall_score": 1.3213897940410393, "scores": [0.9263081978003733, 1.007410111159193, 0.529358614665118], "rank_score": 0.8210256412082281} -{"id": "elliott-etal-2016-multi30k", "title": "Multi30K: Multilingual English-German Image Descriptions", "abstract": "We introduce the Multi30K dataset to stimulate multilingual multimodal research. Recent advances in image description have been demonstrated on English-language datasets almost exclusively, but image description should not be limited to English. This dataset extends the Flickr30K dataset with i) German translations created by professional translators over a subset of the English descriptions, and ii) descriptions crowdsourced independently of the original English descriptions. We outline how the data can be used for multilingual image description and multimodal machine translation, but we anticipate the data will be useful for a broader range of tasks.", "phrases": ["english-german image descriptions", "multimodal machine translation", "mmt model", "french"], "overall_score": 2.7917872872397043, "scores": [0.9527334482882966, 0.9332168548417877, 0.8461694043442782, 0.5511796415041623], "rank_score": 0.8208248372446312} -{"id": "spitkovsky-etal-2010-viterbi", "title": "Viterbi Training Improves Unsupervised Dependency Parsing", "abstract": "We show that Viterbi (or \"hard\") EM is well-suited to unsupervised grammar induction. It is more accurate than standard inside-outside re-estimation (classic EM), significantly faster, and simpler. Our experiments with Klein and Manning's Dependency Model with Valence (DMV) attain state-of-the-art performance --- 44.8% accuracy on Section 23 (all sentences) of the Wall Street Journal corpus --- without clever initialization; with a good initializer, Viterbi training improves to 47.9%. This generalizes to the Brown corpus, our held-out set, where accuracy reaches 50.8% --- a 7.5% gain over previous best results. We find that classic EM learns better from short sentences but cannot cope with longer ones, where Viterbi thrives. However, we explain that both algorithms optimize the wrong objectives and prove that there are fundamental disconnects between the likelihoods of sentences, best parses, and true parses, beyond the well-established discrepancies between likelihood, accuracy and extrinsic performance.", "phrases": ["unsupervised dependency parsing", "objective", "viterbi training"], "overall_score": 2.1658851333936493, "scores": [0.9678373717276727, 0.9409825300379993, 0.5532923559957856], "rank_score": 0.8207040859204859} -{"id": "forbes-choi-2017-verb", "title": "Verb Physics: Relative Physical Knowledge of Actions and Objects", "abstract": "Learning commonsense knowledge from natural language text is nontrivial due to reporting bias: people rarely state the obvious, e.g., \u201cMy house is bigger than me.\u201d However, while rarely stated explicitly, this trivial everyday knowledge does influence the way people talk about the world, which provides indirect clues to reason about the world. For example, a statement like, \u201cTyler entered his house\u201d implies that his house is bigger than Tyler. In this paper, we present an approach to infer relative physical knowledge of actions and objects along five dimensions (e.g., size, weight, and strength) from unstructured natural language text. We frame knowledge acquisition as joint inference over two closely related problems: learning (1) relative physical knowledge of object pairs and (2) physical implications of actions when applied to those object pairs. Empirical results demonstrate that it is possible to extract knowledge of actions and objects from language and that joint inference over different types of knowledge improves performance.", "phrases": ["relative physical knowledge", "object", "dimension"], "overall_score": 2.221658490053648, "scores": [0.8770640443149259, 1.0557083556967428, 0.5283989133739606], "rank_score": 0.8203904377952097} -{"id": "ostling-2016-morphological", "title": "Morphological reinflection with convolutional neural networks", "abstract": "We present a system for morphological reinflection based on an encoder-decoder neural network model with extra convolutional layers. In spite of its simplicity, the method performs reasonably well on all the languages of the SIGMORPHON 2016 shared task, particularly for the most challenging problem of limited-resources reinflection (track 2, task 3). We also find that using only convolution achieves surprisingly good results in this task, surpassing the accuracy of our encoder-decoder model for several languages.", "phrases": ["convolutional neural network", "morphological reinflection", "neural sequence-to-sequence model"], "overall_score": 1.3202651399483274, "scores": [0.9641935310640104, 0.9174634652693596, 0.5793235666600225], "rank_score": 0.8203268543311308} -{"id": "culotta-etal-2006-integrating", "title": "Integrating Probabilistic Extraction Models and Data Mining to Discover Relations and Patterns in Text", "abstract": "In order for relation extraction systems to obtain human-level performance, they must be able to incorporate relational patterns inherent in the data (for example, that one's sister is likely one's mother's daughter, or that children are likely to attend the same college as their parents). Hand-coding such knowledge can be time-consuming and inadequate. Additionally, there may exist many interesting, unknown relational patterns that both improve extraction performance and provide insight into text. We describe a probabilistic extraction model that provides mutual benefits to both \"top-down\" relational pattern discovery and \"bottom-up\" relation extraction.", "phrases": ["relation extraction", "contextual pattern", "wikipedia"], "overall_score": 2.103905571659631, "scores": [1.3947745438755148, 0.5363327580399438, 0.5296495534266803], "rank_score": 0.8202522851140465} -{"id": "dahlmeier-etal-2012-nus", "title": "NUS at the HOO 2012 Shared Task", "abstract": "This paper describes the submission of the National University of Singapore (NUS) to the HOO 2012 shared task. Our system uses a pipeline of confidence-weighted linear classifiers to correct determiner and preposition errors. Our system achieves the highest correction F1 score on the official test set among all 14 participating teams, based on gold-standard edits both before and after revision.", "phrases": ["hoo", "preposition error", "word usage", "ill-formed grammatical construction", "error correction"], "overall_score": 1.7047753981817668, "scores": [0.8629612195877646, 0.9419588855673724, 0.9192210405835586, 0.8399380996159099, 0.5350391092892993], "rank_score": 0.8198236709287811} -{"id": "gupta-ji-2009-predicting", "title": "Predicting Unknown Time Arguments based on Cross-Event Propagation", "abstract": "Many events in news articles don't include time arguments. This paper describes two methods, one based on rules and the other based on statistical learning, to predict the unknown time argument for an event by the propagation from its related events. The results are promising - the rule based approach was able to correctly predict 74% of the unknown event time arguments with 70% precision.", "phrases": ["propagation", "time information", "cross-event information", "ace event task", "more clue"], "overall_score": 2.2195396711898185, "scores": [0.8076055549160799, 1.2540472849939062, 0.945569650321754, 0.5570565466470127, 0.5337610775298322], "rank_score": 0.819608022881717} -{"id": "sagae-lavie-2006-best", "title": "A Best-First Probabilistic Shift-Reduce Parser", "abstract": "Recently proposed deterministic classifier-based parsers (Nivre and Scholz, 2004; Sagae and Lavie, 2005; Yamada and Mat-sumoto, 2003) offer attractive alternatives to generative statistical parsers. Deterministic parsers are fast, efficient, and simple to implement, but generally less accurate than optimal (or nearly optimal) statistical parsers. We present a statistical shift-reduce parser that bridges the gap between deterministic and probabilistic parsers. The parsing model is essentially the same as one previously used for deterministic parsing, but the parser performs a best-first search instead of a greedy search. Using the standard sections of the WSJ corpus of the Penn Treebank for training and testing, our parser has 88.1% precision and 87.8% recall (using automatically assigned part-of-speech tags). Perhaps more interestingly, the parsing model is significantly different from the generative models used by other well-known accurate parsers, allowing for a simple combination that produces precision and recall of 90.9% and 90.7%, respectively.", "phrases": ["shift-reduce parser", "best-first search", "penn treebank"], "overall_score": 2.0361365892165657, "scores": [0.9689222568369974, 0.91014563898395, 0.5791370303765158], "rank_score": 0.819401642065821} -{"id": "venkatapathy-joshi-2006-using", "title": "Using Information about Multi-word Expressions for the Word-Alignment Task", "abstract": "It is well known that multi-word expressions are problematic in natural language processing. In previous literature, it has been suggested that information about their degree of compositionality can be helpful in various applications but it has not been proven empirically. In this paper, we propose a framework in which information about the multi-word expressions can be used in the word-alignment task. We have shown that even simple features like point-wise mutual information are useful for word-alignment task in English-Hindi parallel corpora. The alignment error rate which we achieve (AER = 0.5040) is significantly better (about 10% decrease in AER) than the alignment error rates of the state-of-art models (Och and Ney, 2003) (Best AER = 0.5518) on the English-Hindi dataset.", "phrases": ["word-alignment task", "verb-based multi-word expression", "compositionality information"], "overall_score": 1.4680760752920876, "scores": [0.9467819492894394, 0.9694493118308783, 0.5418153134981716], "rank_score": 0.8193488582061631} -{"id": "dev-etal-2021-oscar", "title": "OSCaR: Orthogonal Subspace Correction and Rectification of Biases in Word Embeddings", "abstract": "Language representations are known to carry stereotypical biases and, as a result, lead to biased predictions in downstream tasks. While existing methods are effective at mitigating biases by linear projection, such methods are too aggressive: they not only remove bias, but also erase valuable information from word embeddings. We develop new measures for evaluating specific information retention that demonstrate the tradeoff between bias removal and information retention. To address this challenge, we propose OSCaR (Orthogonal Subspace Correction and Rectification), a bias-mitigating method that focuses on disentangling biased associations between concepts instead of removing concepts wholesale. Our experiments on gender biases show that OSCaR is a well-balanced approach that ensures that semantic information is retained in the embeddings and bias is also effectively mitigated.", "phrases": ["orthogonal subspace correction", "rectification", "oscar"], "overall_score": 1.1356134993855518, "scores": [0.8788692321562099, 0.7974839474397603, 0.7811627662991757], "rank_score": 0.8191719819650487} -{"id": "daume-iii-marcu-2005-large", "title": "A Large-Scale Exploration of Effective Global Features for a Joint Entity Detection and Tracking Model", "abstract": "Entity detection and tracking (EDT) is the task of identifying textual mentions of real-world entities in documents, extending the named entity detection and coreference resolution task by considering mentions other than names (pronouns, definite descriptions, etc.). Like NE tagging and coreference resolution, most solutions to the EDT task separate out the mention detection aspect from the coreference aspect. By doing so, these solutions are limited to using only local features for learning. In contrast, by modeling both aspects of the EDT task simultaneously, we are able to learn using highly complex, non-local features. We develop a new joint EDT model and explore the utility of many features, demonstrating their effectiveness on this task.", "phrases": ["entity detection", "coreference resolution", "search optimization framework"], "overall_score": 2.100935863841219, "scores": [1.2993163524341727, 0.6075645402610558, 0.5504025515913215], "rank_score": 0.81909448142885} -{"id": "reiter-belz-2009-investigation", "title": "An Investigation into the Validity of Some Metrics for Automatically Evaluating Natural Language Generation Systems", "abstract": "Abstract There is growing interest in using automatically computed corpus-based evaluation metrics to evaluate Natural Language Generation (NLG) systems, because these are often considerably cheaper than the human-based evaluations which have traditionally been used in NLG. We review previous work on NLG evaluation and on validation of automatic metrics in NLP, and then present the results of two studies of how well some metrics which are popular in other areas of NLP (notably BLEU and ROUGE) correlate with human judgments in the domain of computer-generated weather forecasts. Our results suggest that, at least in this domain, metrics may provide a useful measure of language quality, although the evidence for this is not as strong as we would ideally like to see; however, they do not provide a useful measure of content quality. We also discuss a number of caveats which must be kept in mind when interpreting this and other validation studies.", "phrases": ["nlg", "automatic metric", "text quality"], "overall_score": 2.2707227225170183, "scores": [1.0418755851040535, 0.8704263754146139, 0.5446683477382236], "rank_score": 0.8189901027522971} -{"id": "belz-etal-2020-disentangling", "title": "Disentangling the Properties of Human Evaluation Methods: A Classification System to Support Comparability, Meta-Evaluation and Reproducibility Testing", "abstract": "Current standards for designing and reporting human evaluations in NLP mean it is generally unclear which evaluations are comparable and can be expected to yield similar results when applied to the same system outputs. This has serious implications for reproducibility testing and meta-evaluation, in particular given that human evaluation is considered the gold standard against which the trustworthiness of automatic metrics is gauged. %and merging others, as well as deciding which evaluations should be able to reproduce each other's results. Using examples from NLG, we propose a classification system for evaluations based on disentangling (i) what is being evaluated (which aspect of quality), and (ii) how it is evaluated in specific (a) evaluation modes and (b) experimental designs. We show that this approach provides a basis for determining comparability, hence for comparison of evaluations across papers, meta-evaluation experiments, reproducibility testing.", "phrases": ["classification system", "meta-evaluation", "reproducibility testing", "natural language generation"], "overall_score": 1.5935336607600146, "scores": [0.9784227790530833, 0.9154476959359308, 0.8469787164502909, 0.5348080356605833], "rank_score": 0.8189143067749721} -{"id": "do-etal-2022-text", "title": "Text-to-Speech for Under-Resourced Languages: Phoneme Mapping and Source Language Selection in Transfer Learning", "abstract": "We propose a new approach for phoneme mapping in cross-lingual transfer learning for text-to-speech (TTS) in under-resourced languages (URLs), using phonological features from the PHOIBLE database and a language-independent mapping rule. This approach was validated through our experiment, in which we pre-trained acoustic models in Dutch, Finnish, French, Japanese, and Spanish, and fine-tuned them with 30 minutes of Frisian training data. The experiment showed an improvement in both naturalness and pronunciation accuracy in the synthesized Frisian speech when our mapping approach was used. Since this improvement also depended on the source language, we then experimented on finding a good criterion for selecting source languages. As an alternative to the traditionally used language family criterion, we tested a novel idea of using Angular Similarity of Phoneme Frequencies (ASPF), which measures the similarity between the phoneme systems of two languages. ASPF was empirically confirmed to be more effective than language family as a criterion for source language selection, and also to affect the phoneme mapping's effectiveness. Thus, a combination of our phoneme mapping approach and the ASPF measure can be beneficially adopted by other studies involving multilingual or cross-lingual TTS for URLs.", "phrases": ["phoneme mapping", "source language selection", "text-to-speech"], "overall_score": 0.8995007632956511, "scores": [0.8549048352502684, 0.8112123362117897, 0.7901654659353915], "rank_score": 0.8187608791324833} -{"id": "bao-etal-2016-constraint", "title": "Constraint-Based Question Answering with Knowledge Graph", "abstract": "WebQuestions and SimpleQuestions are two benchmark data-sets commonly used in recent knowledge-based question answering (KBQA) work. Most questions in them are `simple' questions which can be answered based on a single relation in the knowledge base. Such data-sets lack the capability of evaluating KBQA systems on complicated questions. Motivated by this issue, we release a new data-set, namely ComplexQuestions, aiming to measure the quality of KBQA systems on `multi-constraint' questions which require multiple knowledge base relations to get the answer. Beside, we propose a novel systematic KBQA approach to solve multi-constraint questions. Compared to state-of-the-art methods, our approach not only obtains comparable results on the two existing benchmark data-sets, but also achieves significant improvements on the ComplexQuestions.", "phrases": ["knowledge graph", "complex question", "query graph", "reasoning", "semantic parsing"], "overall_score": 2.452407653197738, "scores": [0.8788857787301955, 0.9251982081361094, 0.8963219124603379, 0.8458539756465423, 0.5469090554538344], "rank_score": 0.8186337860854038} -{"id": "zhang-etal-2007-partial", "title": "Partial Parse Selection for Robust Deep Processing", "abstract": "This paper presents an approach to partial parse selection for robust deep processing. The work is based on a bottom-up chart parser for HPSG parsing. Following the definition of partial parses in (Kasper et al., 1999), different partial parse selection methods are presented and evaluated on the basis of multiple metrics, from both the syntactic and semantic viewpoints. The application of the partial parsing in spontaneous speech texts processing shows promising competence of the method.", "phrases": ["robust deep processing", "definition", "partial parse selection"], "overall_score": 1.317497554755061, "scores": [0.9778411700761033, 0.9466886191754988, 0.5312919816469512], "rank_score": 0.8186072569661844} -{"id": "kuhlmann-satta-2009-treebank", "title": "Treebank Grammar Techniques for Non-Projective Dependency Parsing", "abstract": "An open problem in dependency parsing is the accurate and efficient treatment of non-projective structures. We propose to attack this problem using chart-parsing algorithms developed for mildly context-sensitive grammar formalisms. In this paper, we provide two key tools for this approach. First, we show how to reduce non-projective dependency parsing to parsing with Linear Context-Free Rewriting Systems (LCFRS), by presenting a technique for extracting LCFRS from dependency treebanks. For efficient parsing, the extracted grammars need to be transformed in order to minimize the number of nonterminal symbols per production. Our second contribution is an algorithm that computes this transformation for a large, empirically relevant class of grammars.", "phrases": ["non-projective dependency", "chart-parsing algorithm", "lcfrs", "dependency treebank"], "overall_score": 1.88463881008777, "scores": [1.0569620943588232, 1.0054695403770872, 0.6236949916667991, 0.5878263160046073], "rank_score": 0.818488235601829} -{"id": "andreas-etal-2016-learning", "title": "Learning to Compose Neural Networks for Question Answering", "abstract": "We describe a question answering model that applies to both images and structured knowledge bases. The model uses natural language strings to automatically assemble neural networks from a collection of composable modules. Parameters for these modules are learned jointly with network-assembly parameters via reinforcement learning, with only (world, question, answer) triples as supervision. Our approach, which we term a dynamic neural model network, achieves state-of-the-art results on benchmark datasets in both visual and structured domains.", "phrases": ["module", "reinforcement learning", "neural modular network"], "overall_score": 2.4908488767613632, "scores": [1.0578082212882176, 0.8346107793534424, 0.5620042408148136], "rank_score": 0.8181410804854913} -{"id": "lu-ng-2010-better", "title": "Better Punctuation Prediction with Dynamic Conditional Random Fields", "abstract": "This paper focuses on the task of inserting punctuation symbols into transcribed conversational speech texts, without relying on prosodic cues. We investigate limitations associated with previous methods, and propose a novel approach based on dynamic conditional random fields. Different from previous work, our proposed approach is designed to jointly perform both sentence boundary and sentence type prediction, and punctuation prediction on speech utterances. \n \nWe performed evaluations on a transcribed conversational speech domain consisting of both English and Chinese texts. Empirical results show that our method outperforms an approach based on linear-chain conditional random fields and other previous approaches.", "phrases": ["punctuation prediction", "conditional random field", "prosodic cue", "machine translation"], "overall_score": 2.03266233792314, "scores": [1.310288943947459, 0.9088107803575546, 0.5273311325883063, 0.5255831450758185], "rank_score": 0.8180035004922847} -{"id": "xiong-etal-2008-linguistically", "title": "Linguistically Annotated BTG for Statistical Machine Translation", "abstract": "Bracketing Transduction Grammar (BTG) is a natural choice for effective integration of desired linguistic knowledge into statistical machine translation (SMT). In this paper, we propose a Linguistically Annotated BTG (LABTG) for SMT. It conveys linguistic knowledge of source-side syntax structures to BTG hierarchical structures through linguistic annotation. From the linguistically annotated data, we learn annotated BTG rules and train linguistically motivated phrase translation model and reordering model. We also present an annotation algorithm that captures syntactic information for BTG nodes. The experiments show that the LABTG approach significantly outperforms a baseline BTG-based system and a state-of-the-art phrase-based system on the NIST MT-05 Chinese-to-English translation task. Moreover, we empirically demonstrate that the proposed method achieves better translation selection and phrase reordering.", "phrases": ["statistical machine translation", "labtg", "linguistically annotated btg"], "overall_score": 1.31622895676499, "scores": [0.9920851674457325, 0.937891915089574, 0.5234800156158173], "rank_score": 0.8178190327170413} -{"id": "li-etal-2012-exploiting", "title": "Exploiting Multiple Treebanks for Parsing with Quasi-synchronous Grammars", "abstract": "We present a simple and effective framework for exploiting multiple monolingual treebanks with different annotation guidelines for parsing. Several types of transformation patterns (TP) are designed to capture the systematic annotation inconsistencies among different tree-banks. Based on such TPs, we design quasi-synchronous grammar features to augment the baseline parsing models. Our approach can significantly advance the state-of-the-art parsing accuracy on two widely used target tree-banks (Penn Chinese Treebank 5.1 and 6.0) using the Chinese Dependency Treebank as the source treebank. The improvements are respectively 1.37% and 1.10% with automatic part-of-speech tags. Moreover, an indirect comparison indicates that our approach also outperforms previous work based on treebank conversion.", "phrases": ["treebank", "several type", "annotation inconsistency", "quasi-synchronous grammar feature"], "overall_score": 1.7004736075125586, "scores": [1.1902736061936514, 1.0121141045016382, 0.5364421182929764, 0.532189958639646], "rank_score": 0.817754946906978} -{"id": "pham-etal-2017-nnvlp", "title": "NNVLP: A Neural Network-Based Vietnamese Language Processing Toolkit", "abstract": "This paper demonstrates neural network-based toolkit namely NNVLP for essential Vietnamese language processing tasks including part-of-speech (POS) tagging, chunking, Named Entity Recognition (NER). Our toolkit is a combination of bidirectional Long Short-Term Memory (Bi-LSTM), Convolutional Neural Network (CNN), Conditional Random Field (CRF), using pre-trained word embeddings as input, which outperforms previously published toolkits on these three tasks. We provide both of API and web demo for this toolkit.", "phrases": ["neural network-based toolkit", "language processing task", "entity recognition", "nnvlp", "feature-based model"], "overall_score": 1.3160058247562685, "scores": [1.7701825775742372, 0.6230842764192254, 0.6099437351576885, 0.5568933945354805, 0.5282979813383888], "rank_score": 0.8176803930050041} -{"id": "salesky-etal-2021-robust", "title": "Robust Open-Vocabulary Translation from Visual Text Representations", "abstract": "Machine translation models have discrete vocabularies and commonly use subword segmentation techniques to achieve an `open vocabulary.' This approach relies on consistent and correct underlying unicode sequences, and makes models susceptible to degradation from common types of noise and variation. Motivated by the robustness of human language processing, we propose the use of visual text representations, which dispense with a finite set of text embeddings in favor of continuous vocabularies created by processing visually rendered text with sliding windows. We show that models using visual text representations approach or match performance of traditional text models on small and larger datasets. More importantly, models with visual embeddings demonstrate significant robustness to varied types of noise, achieving e.g., 25.9 BLEU on a character permuted German\u2013English task where subword models degrade to 1.9.", "phrases": ["open-vocabulary translation", "visual text representation", "noise"], "overall_score": 1.1331334578869297, "scores": [0.990565795331868, 0.9265282610371613, 0.5350549741692255], "rank_score": 0.8173830101794183} -{"id": "wang-etal-2021-secoco-self", "title": "Secoco: Self-Correcting Encoding for Neural Machine Translation", "abstract": "This paper presents Self-correcting Encoding (Secoco), a framework that effectively deals with noisy input for robust neural machine translation by introducing self-correcting predictors. Different from previous robust approaches, Secoco enables NMT to explicitly correct noisy inputs and delete specific errors simultaneously with the translation decoding process. Secoco is able to achieve significant improvements over strong baselines on two real-world test sets and a benchmark WMT dataset with good interpretability. We will make our code and dataset publicly available soon.", "phrases": ["self-correcting encoding", "neural machine translation", "noisy input", "secoco"], "overall_score": 0.8979211091206019, "scores": [0.9450017362396016, 0.931861649440771, 0.8492028362771248, 0.543225841793897], "rank_score": 0.8173230159378486} -{"id": "maletti-2012-every", "title": "Every sensible extended top-down tree transducer is a multi bottom-up tree transducer", "abstract": "A tree transformation is sensible if the size of each output tree is uniformly bounded by a linear function in the size of the corresponding input tree. Every sensible tree transformation computed by an arbitrary weighted extended top-down tree transducer can also be computed by a weighted multi bottom-up tree transducer. This further motivates weighted multi bottom-up tree transducers as suitable translation models for syntax-based machine translation.", "phrases": ["top-down tree transducer", "sensible translation", "mbot"], "overall_score": 1.315385628208145, "scores": [0.9991179571670561, 0.9155380487472582, 0.5372291237557711], "rank_score": 0.8172950432233618} -{"id": "sultan-etal-2014-back", "title": "Back to Basics for Monolingual Alignment: Exploiting Word Similarity and Contextual Evidence", "abstract": "We present a simple, easy-to-replicate monolingual aligner that demonstrates state-of-the-art performance while relying on almost no supervision and a very small number of external resources. Based on the hypothesis that words with similar meanings represent potential pairs for alignment if located in similar contexts, we propose a system that operates by finding such pairs. In two intrinsic evaluations on alignment test data, our system achieves F1 scores of 88\u201392%, demonstrating 1\u20133% absolute improvement over the previous best system. Moreover, in two extrinsic evaluations our aligner outperforms existing aligners, and even a naive application of the aligner approaches state-of-the-art performance in each extrinsic task.", "phrases": ["monolingual alignment", "contextual evidence", "dependency type", "equivalence"], "overall_score": 2.030770713854801, "scores": [0.9748802629155119, 0.8478470707374985, 0.881375817682793, 0.5648658685314922], "rank_score": 0.8172422549668239} -{"id": "zens-ney-2006-n", "title": "N-Gram Posterior Probabilities for Statistical Machine Translation", "abstract": "Word posterior probabilities are a common approach for confidence estimation in automatic speech recognition and machine translation. We will generalize this idea and introduce n-gram posterior probabilities and show how these can be used to improve translation quality. Additionally, we will introduce a sentence length model based on posterior probabilities. \n \nWe will show significant improvements on the Chinese-English NIST task. The absolute improvements of the BLEU score is between 1.1% and 1.6%.", "phrases": ["posterior probability", "n-gram", "performance improvement"], "overall_score": 1.4642113897818985, "scores": [1.2369118911790422, 0.651749800817449, 0.5629141164674529], "rank_score": 0.8171919361546479} -{"id": "yih-etal-2011-learning", "title": "Learning Discriminative Projections for Text Similarity Measures", "abstract": "Traditional text similarity measures consider each term similar only to itself and do not model semantic relatedness of terms. We propose a novel discriminative training method that projects the raw term vectors into a common, low-dimensional vector space. Our approach operates by finding the optimal matrix to minimize the loss of the pre-selected similarity function (e.g., cosine) of the projected vectors, and is able to efficiently handle a large number of training examples in the high-dimensional space. Evaluated on two very different tasks, cross-lingual document retrieval and ad relevance measure, our method not only outperforms existing state-of-the-art approaches, but also achieves high accuracy at low dimensions and is thus more efficient.", "phrases": ["text similarity measure", "different language", "s2net", "query"], "overall_score": 2.030278761717402, "scores": [0.9397919823494789, 0.8744246649838597, 0.8655575628834203, 0.5884029052344232], "rank_score": 0.8170442788627955} -{"id": "yamamoto-isahara-2007-extracting", "title": "Extracting Word Sets with Non-Taxonomical Relation", "abstract": "At least two kinds of relations exist among related words: taxonomical relations and thematic relations. Both relations identify related words useful to language understanding and generation, information retrieval, and so on. However, although words with taxonomical relations are easy to identify from linguistic resources such as dictionaries and thesauri, words with thematic relations are difficult to identify because they are rarely maintained in linguistic resources. In this paper, we sought to extract thematically (non-taxonomically) related word sets among words in documents by employing case-marking particles derived from syntactic analysis. We then verified the usefulness of word sets with non-taxonomical relation that seems to be a thematic relation for information retrieval.", "phrases": ["word set", "non-taxonomical relation", "thematic relation"], "overall_score": 0.8975224108013642, "scores": [0.9578014717862666, 0.9276567125940958, 0.5654221308839027], "rank_score": 0.8169601050880884} -{"id": "komninos-manandhar-2017-feature", "title": "Feature-Rich Networks for Knowledge Base Completion", "abstract": "We propose jointly modelling Knowledge Bases and aligned text with Feature-Rich Networks. Our models perform Knowledge Base Completion by learning to represent and compose diverse feature types from partially aligned and noisy resources. We perform experiments on Freebase utilizing additional entity type information and syntactic textual relations. Our evaluation suggests that the proposed models can better incorporate side information than previously proposed combinations of bilinear models with convolutional neural networks, showing large improvements when scoring the plausibility of unobserved facts with associated textual mentions.", "phrases": ["knowledge base completion", "mention", "feature-rich networks"], "overall_score": 0.8972526492154577, "scores": [0.9591946065547813, 0.9098150438402768, 0.5811340221372194], "rank_score": 0.8167145575107592} -{"id": "losnegaard-etal-2016-parseme", "title": "PARSEME Survey on MWE Resources", "abstract": "This paper summarizes the preliminary results of an ongoing survey on multiword resources carried out within the IC1207 Cost Action PARSEME (PARSing and Multi-word Expressions). Despite the availability of language resource catalogs and the inventory of multiword datasets on the SIGLEX-MWE website, multiword resources are scattered and difficult to find. In many cases, language resources such as corpora, treebanks, or lexical databases include multiwords as part of their data or take them into account in their annotations. However, these resources need to be centralized to make them accessible. The aim of this survey is to create a portal where researchers can easily find multiword(-aware) language resources for their research. We report on the design of the survey and analyze the data gathered so far. We also discuss the problems we have detected upon examination of the data as well as possible ways of enhancing the survey.", "phrases": ["survey", "mwe resource", "treebank"], "overall_score": 1.698264653463485, "scores": [0.950058170938563, 0.9103937517794081, 0.5896260709508125], "rank_score": 0.8166926645562613} -{"id": "brooke-etal-2014-unsupervised", "title": "Unsupervised Multiword Segmentation of Large Corpora using Prediction-Driven Decomposition of n-grams", "abstract": "We present a new, efficient unsupervised approach to the segmentation of corpora into multiword units. Our method involves initial decomposition of common n-grams into segments which maximize within-segment predictability of words, and then further refinement of these segments into a multiword lexicon. Evaluating in four large, distinct corpora, we show that this method creates segments which correspond well to known multiword expressions; our model is particularly strong with regards to longer (3+ word) multiword units, which are often ignored or minimized in relevant work.", "phrases": ["segmentation", "large corpora", "decomposition", "predictability"], "overall_score": 1.7940903909176085, "scores": [1.3428362483805072, 0.8210113164814862, 0.5730835045001127, 0.5291718304928625], "rank_score": 0.8165257249637421} -{"id": "dhingra-etal-2022-time", "title": "Time-Aware Language Models as Temporal Knowledge Bases", "abstract": "Many facts come with an expiration date, from the name of the President to the basketball team Lebron James plays for. However, most language models (LMs) are trained on snapshots of data collected at a specific moment in time. This can limit their utility, especially in the closed-book setting where the pretraining corpus must contain the facts the model should memorize. We introduce a diagnostic dataset aimed at probing LMs for factual knowledge that changes over time and highlight problems with LMs at either end of the spectrum\u2014those trained on specific slices of temporal data, as well as those trained on a wide range of temporal data. To mitigate these problems, we propose a simple technique for jointly modeling text with its timestamp. This improves memorization of seen facts from the training time period, as well as calibration on predictions about unseen facts from future time periods. We also show that models trained with temporal context can be efficiently \u201crefreshed\u201d as new data arrives, without the need for retraining from scratch.", "phrases": ["language model", "factual knowledge", "time period"], "overall_score": 1.6971799693845593, "scores": [1.3408924036246048, 0.5678931685619829, 0.5397275531405988], "rank_score": 0.8161710417757289} -{"id": "becker-etal-2016-argumentative", "title": "Argumentative texts and clause types", "abstract": "Argumentative texts have been thoroughly analyzed for their argumentative structure, and recent efforts aim at their automatic classification. This work investigates linguistic properties of argumentative texts and text passages in terms of their semantic clause types. We annotate argumentative texts with Situation Entity (SE) classes, which combine notions from lexical aspect (states, events) with genericity and habituality of clauses. We analyse the correlation of SE classes with argumentative text genres, components of argument structures, and some functions of those components. Our analysis reveals interesting relations between the distribution of SE types and the argumentative text genre, compared to other genres like fiction or report. We also see tendencies in the correlations between argument components (such as premises and conclusions) and SE types, as well as between argumentative functions (such as support and rebuttal) and SE types. The observed tendencies can be deployed for automatic recognition and fine-grained classification of argumentative text passages.", "phrases": ["clause type", "support", "argumentative text"], "overall_score": 1.1313267254231463, "scores": [1.001754625844157, 0.8960085081401736, 0.5504760506053543], "rank_score": 0.8160797281965616} -{"id": "giuliano-gliozzo-2007-instance", "title": "Instance Based Lexical Entailment for Ontology Population", "abstract": "In this paper we propose an instance based method for lexical entailment and apply it to automatic ontology population from text. The approach is fully unsupervised and based on kernel methods. We demonstrate the effectiveness of our technique largely surpassing both the random and most frequent baselines and outperforming current state-of-the-art unsupervised approaches on a benchmark ontology available in the literature.", "phrases": ["lexical entailment", "ontology population", "occurrence"], "overall_score": 1.461859089931444, "scores": [0.9290073964143706, 0.9954138673211558, 0.5232160140982962], "rank_score": 0.8158790926112743} -{"id": "ma-etal-2011-extraction", "title": "Extraction of Broad-Scale, High-Precision Japanese-English Parallel Translation Expressions Using Lexical Information and Rules", "abstract": "Extraction was attempted of broad-scale, high-precision Japanese-English paral- lel translation expressions from large aligned parallel corpora. To acquire broad-scale parallel translation expressions, a new method was used to extract single Japanese and English word n-grams, by which as many parallel translation expressions as possible could then be ex- tracted. To achieve high extraction precision, first, hand-crafted rules were used to prune the unnecessary words often found in expressions extracted on the basis of word n-grams, and lexical information was used to refine the parallel translation expressions. Computer exper- iments with aligned parallel corpora consisting of about 280,000 pairs of Japanese-English parallel sentences found that more than 125,000 pairs of parallel translation expressions could be extracted with a precision of 0.96. These figures show that the proposed methods for ex- tracting a broad range of parallel translation expressions have reached a level high enough for practical use.", "phrases": ["parallel translation expression", "lexical information", "extraction"], "overall_score": 0.8961730733342264, "scores": [0.8576242987694839, 0.8080785266488781, 0.7814928301682642], "rank_score": 0.8157318851955422} -{"id": "kaneko-etal-2020-encoder", "title": "Encoder-Decoder Models Can Benefit from Pre-trained Masked Language Models in Grammatical Error Correction", "abstract": "This paper investigates how to effectively incorporate a pre-trained masked language model (MLM), such as BERT, into an encoder-decoder (EncDec) model for grammatical error correction (GEC). The answer to this question is not as straightforward as one might expect because the previous common methods for incorporating a MLM into an EncDec model have potential drawbacks when applied to GEC. For example, the distribution of the inputs to a GEC model can be considerably different (erroneous, clumsy, etc.) from that of the corpora used for pre-training MLMs; however, this issue is not addressed in the previous methods. Our experiments show that our proposed method, where we first fine-tune a MLM with a given GEC corpus and then use the output of the fine-tuned MLM as additional features in the GEC model, maximizes the benefit of the MLM. The best-performing model achieves state-of-the-art performances on the BEA-2019 and CoNLL-2014 benchmarks. Our code is publicly available at: .", "phrases": ["language model", "grammatical error correction", "encoder-decoder model", "fine-tuned bert", "english gec task"], "overall_score": 2.20876806006947, "scores": [1.8747049219875862, 0.5774032964670867, 0.5522580815060171, 0.543983431125162, 0.5298022528959695], "rank_score": 0.8156303967963643} -{"id": "hardmeier-etal-2010-fbk", "title": "FBK at WMT 2010: Word Lattices for Morphological Reduction and Chunk-Based Reordering", "abstract": "FBK participated in the WMT 2010 Machine Translation shared task with phrase-based Statistical Machine Translation systems based on the Moses decoder for English-German and German-English translation. Our work concentrates on exploiting the available language modelling resources by using linear mixtures of large 6-gram language models and on addressing linguistic differences between English and German with methods based on word lattices. In particular, we use lattices to integrate a morphological analyser for German into our system, and we present some initial work on rule-based word reordering.", "phrases": ["wmt", "morphological reduction", "german\u2192english smt system", "edge"], "overall_score": 1.4611934037078327, "scores": [1.3258150819810912, 0.8218466395929102, 0.5942392805668372, 0.5201292620828739], "rank_score": 0.8155075660559281} -{"id": "aguilar-etal-2017-multi", "title": "A Multi-task Approach for Named Entity Recognition in Social Media Data", "abstract": "Named Entity Recognition for social media data is challenging because of its inherent noisiness. In addition to improper grammatical structures, it contains spelling inconsistencies and numerous informal abbreviations. We propose a novel multi-task approach by employing a more general secondary task of Named Entity (NE) segmentation together with the primary task of fine-grained NE categorization. The multi-task neural network architecture learns higher order feature representations from word and character sequences along with basic Part-of-Speech tags and gazetteer information. This neural network acts as a feature extractor to feed a Conditional Random Fields classifier. We were able to obtain the first position in the 3rd Workshop on Noisy User-generated Text (WNUT-2017) with a 41.86% entity F1-score and a 40.24% surface F1-score.", "phrases": ["multi-task approach", "part-of-speech tag", "gazetteer", "feature extractor", "noisy user-generated text"], "overall_score": 2.4009259706395927, "scores": [1.9027140997859613, 0.5631670869356503, 0.5621002563902, 0.5253619456822649, 0.523708279838713], "rank_score": 0.815410333726558} -{"id": "schwartz-etal-2015-extracting", "title": "Extracting Human Temporal Orientation from Facebook Language", "abstract": "People vary widely in their temporal orientation\u2014how often they emphasize the past, present, and future\u2014and this affects their finances, health, and happiness. Traditionally, temporal orientation has been assessed by self-report questionnaires. In this paper, we develop a novel behavior-based assessment using human language on Facebook. We first create a past, present, and future message classifier, engineering features and evaluating a variety of classification techniques. Our message classifier achieves an accuracy of 71.8%, compared with 52.8% from the most frequent class and 58.6% from a model based entirely on time expression features. We quantify a users\u2019 overall temporal orientation based on their distribution of messages and validate it against known human correlates: conscientiousness, age, and gender. We then explore social scientific questions, finding novel associations with the factors openness to experience, satisfaction with life, depression, IQ, and one\u2019s number of friends. Further, demonstrating how one can track orientation over time, we find differences in future orientation around birthdays.", "phrases": ["human temporal orientation", "human correlate", "age", "gender"], "overall_score": 1.3122273121787142, "scores": [0.9191968893178527, 0.8929942935828526, 0.8752858894456872, 0.5738536122131944], "rank_score": 0.8153326711398967} -{"id": "jiang-etal-2011-relaxed", "title": "Relaxed Cross-lingual Projection of Constituent Syntax", "abstract": "We propose a relaxed correspondence assumption for cross-lingual projection of constituent syntax, which allows a supposed constituent of the target sentence to correspond to an unrestricted treelet in the source parse. Such a relaxed assumption fundamentally tolerates the syntactic non-isomorphism between languages, and enables us to learn the target-language-specific syntactic idiosyncrasy rather than a strained grammar directly projected from the source language syntax. Based on this assumption, a novel constituency projection method is also proposed in order to induce a projected constituent tree-bank from the source-parsed bilingual corpus. Experiments show that, the parser trained on the projected treebank dramatically outperforms previous projected and unsupervised parsers.", "phrases": ["cross-lingual projection", "constituent syntax", "non-isomorphism"], "overall_score": 1.1298322437829063, "scores": [0.9545751997200355, 0.9391136347882617, 0.5513162282047765], "rank_score": 0.8150016875710246} -{"id": "han-etal-2020-explaining", "title": "Explaining Black Box Predictions and Unveiling Data Artifacts through Influence Functions", "abstract": "Modern deep learning models for NLP are notoriously opaque. This has motivated the development of methods for interpreting such models, e.g., via gradient-based saliency maps or the visualization of attention weights. Such approaches aim to provide explanations for a particular model prediction by highlighting important words in the corresponding input text. While this might be useful for tasks where decisions are explicitly influenced by individual tokens in the input, we suspect that such highlighting is not suitable for tasks where model decisions should be driven by more complex reasoning. In this work, we investigate the use of influence functions for NLP, providing an alternative approach to interpreting neural text classifiers. Influence functions explain the decisions of a model by identifying influential training examples. Despite the promise of this approach, influence functions have not yet been extensively evaluated in the context of NLP, a gap addressed by this work. We conduct a comparison between influence functions and common word-saliency methods on representative tasks. As suspected, we find that influence functions are particularly useful for natural language inference, a task in which `saliency maps' may not have clear interpretation. Furthermore, we develop a new quantitative measure based on influence functions that can reveal artifacts in training data.", "phrases": ["data artifact", "influence function", "explanation"], "overall_score": 1.6947010774506712, "scores": [0.9467967249256306, 0.8983576291626867, 0.5997824861389492], "rank_score": 0.8149789467424222} -{"id": "lison-etal-2020-named", "title": "Named Entity Recognition without Labelled Data: A Weak Supervision Approach", "abstract": "Named Entity Recognition (NER) performance often degrades rapidly when applied to target domains that differ from the texts observed during training. When in-domain labelled data is available, transfer learning techniques can be used to adapt existing NER models to the target domain. But what should one do when there is no hand-labelled data for the target domain? This paper presents a simple but powerful approach to learn NER models in the absence of labelled data through weak supervision. The approach relies on a broad spectrum of labelling functions to automatically annotate texts from the target domain. These annotations are then merged together using a hidden Markov model which captures the varying accuracies and confusions of the labelling functions. A sequence labelling model can finally be trained on the basis of this unified annotation. We evaluate the approach on two English datasets (CoNLL 2003 and news articles from Reuters and Bloomberg) and demonstrate an improvement of about 7 percentage points in entity-level F1 scores compared to an out-of-domain neural NER model.", "phrases": ["entity recognition", "weak supervision approach", "target domain", "hidden markov model"], "overall_score": 2.09029782382203, "scores": [0.9367132040888112, 0.9183914987728519, 0.8608013145895962, 0.5438820446268839], "rank_score": 0.8149470155195357} -{"id": "dichy-farghaly-2003-roots", "title": "Roots & patterns vs. stems plus grammar-lexis specifications: on what basis should a multilingual database centred on Arabic be built?", "abstract": "Machine translation engines draw on various types of databases. This paper is concerned with Arabic as a source or target language, and focuses on lexical databases. The non-concatenative nature of Arabic morphology, the complex structure of Arabic word-forms, and the general use of vowel-free writing present a real challenge to NLP developers. We show here how and why a stem-grounded lexical database, the items of which are associated with grammar-lexis specifications \u2013 as opposed to a root-&-pattern database \u2013, is motivated both linguistically and with regards to efficiency, economy and modularity. Arguments in favour of databases relying on stems associated with grammar-lexis specifications (such as DIINAR.1 or the Arabic dB under development at SYSTRAN), rather than on roots and patterns, are the following: (a) The latter include huge numbers of rule-generated word-forms, which do not actually appear in the language. (b) Rule-generated lemmas \u2013 as opposed to existing ones \u2013 are widely under-specified with regards to grammar-lexis relations. (c) In a Semitic language such as Arabic, the mapping of grammar-lexis specifications that need to be associated with every lexical entry of the database is decisive. (d) These specifications can only be included in a stem-based dB. Points (a) to (d) are crucial and in the context of machine translation involving Arabic.", "phrases": ["grammar-lexis specification", "arabic", "root"], "overall_score": 1.311452777481409, "scores": [0.8336011522300658, 0.8309977006453103, 0.7799554241479211], "rank_score": 0.8148514256744325} -{"id": "basile-etal-2014-enhanced", "title": "An Enhanced Lesk Word Sense Disambiguation Algorithm through a Distributional Semantic Model", "abstract": "This paper describes a new Word Sense Disambiguation (WSD) algorithm which extends two well-known variations of the Lesk WSD method. Given a word and its context, Lesk algorithm exploits the idea of maximum number of shared words (maximum overlaps) between the context of a word and each definition of its senses (gloss) in order to select the proper meaning. The main contribution of our approach relies on the use of a word similarity function defined on a distributional semantic space to compute the gloss-context overlap. As sense inventory we adopt BabelNet, a large multilingual semantic network built exploiting both WordNet and Wikipedia. Besides linguistic knowledge, BabelNet also represents encyclopedic concepts coming from Wikipedia. The evaluation performed on SemEval-2013 Multilingual Word Sense Disambiguation shows that our algorithm goes beyond the most frequent sense baseline and the simplified version of the Lesk algorithm. Moreover, when compared with the other participants in SemEval-2013 task, our approach is able to outperform the best system for English.", "phrases": ["lesk algorithm", "overlap", "lexical knowledge"], "overall_score": 2.0897980475478604, "scores": [1.041820223283478, 0.8388247679325165, 0.5636115101475399], "rank_score": 0.814752167121178} -{"id": "declerck-etal-2012-ontology", "title": "Ontology-Based Incremental Annotation of Characters in Folktales", "abstract": "We present on-going work on the automated ontology-based detection and recognition of characters in folktales, restricting ourselves for the time being to the analysis of referential nominal phrases occurring in such texts. Focus of the presently reported work was to investigate the interaction between an ontology and linguistic analysis of indefinite and indefinite nominal phrase for both the incremental annotation of characters in folktales text, including some inference based co-reference resolution, and the incremental population of the ontology. This in depth study was done at this early stage using only a very small textual base, but the demonstrated feasibility and the promising results of our small-scale experiment are encouraging us to deploy the strategy on a larger text base, covering more linguistic phenomena in a multilingual fashion.", "phrases": ["incremental annotation", "folktale", "ontology-based method"], "overall_score": 1.310641178865123, "scores": [0.8446759539178856, 1.0581812112256481, 0.5401842881603477], "rank_score": 0.8143471511012939} -{"id": "luan-etal-2021-sparse", "title": "Sparse, Dense, and Attentional Representations for Text Retrieval", "abstract": "Dual encoders perform retrieval by encoding documents and queries into dense low-dimensional vectors, scoring each document by its inner product with the query. We investigate the capacity of this architecture relative to sparse bag-of-words models and attentional neural networks. Using both theoretical and empirical analysis, we establish connections between the encoding dimension, the margin between gold and lower-ranked documents, and the document length, suggesting limitations in the capacity of fixed-length encodings to support precise retrieval of long documents. Building on these insights, we propose a simple neural model that combines the efficiency of dual encoders with some of the expressiveness of more costly attentional architectures, and explore sparse-dense hybrids to capitalize on the precision of sparse retrieval. These models outperform strong alternatives in large-scale retrieval.", "phrases": ["query", "efficiency", "me-bert", "bi-encoder", "text sequence"], "overall_score": 2.5529371286545532, "scores": [1.1642204160603502, 0.8670776057234751, 0.8533448342468124, 0.6043495471328971, 0.5820358827203128], "rank_score": 0.8142056571767696} -{"id": "zhang-etal-2013-punctuation", "title": "Punctuation Prediction with Transition-based Parsing", "abstract": "Punctuations are not available in automatic speech recognition outputs, which could create barriers to many subsequent text processing tasks. This paper proposes a novel method to predict punctuation symbols for the stream of words in transcribed speech texts. Our method jointly performs parsing and punctuation prediction by integrating a rich set of syntactic features when processing words from left to right. It can exploit a global view to capture long-range dependencies for punctuation prediction with linear complexity. The experimental results on the test data sets of IWSLT and TDT4 show that our method can achieve high-level performance in punctuation prediction over the stream of words in transcribed speech text.", "phrases": ["transition-based parsing", "syntactic feature", "punctuation prediction"], "overall_score": 1.584091393582333, "scores": [0.959711640512687, 0.9002909721930505, 0.5821832112667104], "rank_score": 0.8140619413241493} -{"id": "bonial-etal-2020-dialogue", "title": "Dialogue-AMR: Abstract Meaning Representation for Dialogue", "abstract": "This paper describes a schema that enriches Abstract Meaning Representation (AMR) in order to provide a semantic representation for facilitating Natural Language Understanding (NLU) in dialogue systems. AMR offers a valuable level of abstraction of the propositional content of an utterance; however, it does not capture the illocutionary force or speaker's intended contribution in the broader dialogue context (e.g., make a request or ask a question), nor does it capture tense or aspect. We explore dialogue in the domain of human-robot interaction, where a conversational robot is engaged in search and navigation tasks with a human partner. To address the limitations of standard AMR, we develop an inventory of speech acts suitable for our domain, and present \u201cDialogue-AMR\u201d, an enhanced AMR that represents not only the content of an utterance, but the illocutionary force behind it, as well as tense and aspect. To showcase the coverage of the schema, we use both manual and automatic methods to construct the \u201cDialAMR\u201d corpus\u2014a corpus of human-robot dialogue annotated with standard AMR and our enriched Dialogue-AMR schema. Our automated methods can be used to incorporate AMR into a larger NLU pipeline supporting human-robot dialogue.", "phrases": ["abstract meaning representation", "amr", "dialogue-amr"], "overall_score": 0.8943274986121165, "scores": [0.9846625726668484, 0.9183839381660122, 0.5391094012305597], "rank_score": 0.8140519706878068} -{"id": "lin-etal-2019-kagnet", "title": "KagNet: Knowledge-Aware Graph Networks for Commonsense Reasoning", "abstract": "Commonsense reasoning aims to empower machines with the human ability to make presumptions about ordinary situations in our daily life. In this paper, we propose a textual inference framework for answering commonsense questions, which effectively utilizes external, structured commonsense knowledge graphs to perform explainable inferences. The framework first grounds a question-answer pair from the semantic space to the knowledge-based symbolic space as a schema graph, a related sub-graph of external knowledge graphs. It represents schema graphs with a novel knowledge-aware graph network module named KagNet, and finally scores answers with graph representations. Our model is based on graph convolutional networks and LSTMs, with a hierarchical path-based attention mechanism. The intermediate attention scores make it transparent and interpretable, which thus produce trustworthy inferences. Using ConceptNet as the only external resource for Bert-based models, we achieved state-of-the-art performance on the CommonsenseQA, a large-scale dataset for commonsense reasoning.", "phrases": ["commonsense reasoning", "knowledge graph", "kagnet", "subgraph", "question answering"], "overall_score": 2.1481461807942814, "scores": [0.8996217345433535, 0.7879639659568899, 1.2984832422836667, 0.5485758594010751, 0.5352671256234462], "rank_score": 0.8139823855616862} -{"id": "ni-mcauley-2018-personalized", "title": "Personalized Review Generation By Expanding Phrases and Attending on Aspect-Aware Representations", "abstract": "In this paper, we focus on the problem of building assistive systems that can help users to write reviews. We cast this problem using an encoder-decoder framework that generates personalized reviews by expanding short phrases (e.g. review summaries, product titles) provided as input to the system. We incorporate aspect-level information via an aspect encoder that learns aspect-aware user and item representations. An attention fusion layer is applied to control generation by attending on the outputs of multiple encoders. Experimental results show that our model successfully learns representations capable of generating coherent and diverse reviews. In addition, the learned aspect-aware representations discover those aspects that users are more inclined to discuss and bias the generated text toward their personalized aspect preferences.", "phrases": ["review", "aspect-aware representation", "item representation"], "overall_score": 1.7884920206788943, "scores": [0.8330048558447167, 1.0681228721277012, 0.5408056626241217], "rank_score": 0.8139777968655132} -{"id": "kulick-etal-2004-integrated", "title": "Integrated Annotation for Biomedical Information Extraction", "abstract": "We describe an approach to two areas of biomedical information extraction, drug development and cancer genomics. We have developed a framework which includes corpus annotation integrated at multiple levels: a Treebank containing syntactic structure, a Propbank containing predicate-argument structure, and annotation of entities and relations among the entities. Crucial to this approach is the proper characterization of entities as relation components, which allows the integration of the entity annotation with the syntactic structure while retaining the capacity to annotate and extract more complex events. We are training statistical taggers using this annotation for such extraction as well as using them for improving the annotation process.", "phrases": ["biomedical information extraction", "integrated annotation", "abstract"], "overall_score": 1.874027490778584, "scores": [0.9747397099949161, 0.9353365539220947, 0.5315631306233973], "rank_score": 0.8138797981801361} -{"id": "wang-etal-2006-capitalizing", "title": "Capitalizing Machine Translation", "abstract": "We present a probabilistic bilingual capitalization model for capitalizing machine translation outputs using conditional random fields. Experiments carried out on three language pairs and a variety of experiment conditions show that our model significantly outperforms a strong monolingual capitalization model baseline, especially when working with small datasets and/or European language pairs.", "phrases": ["machine translation", "bilingual capitalization model", "conditional random field"], "overall_score": 1.1282154403074858, "scores": [0.9627272071423989, 0.891398618576328, 0.5873804054602247], "rank_score": 0.813835410392984} -{"id": "lease-johnson-2006-early", "title": "Early Deletion of Fillers In Processing Conversational Speech", "abstract": "This paper evaluates the benefit of deleting fillers (e.g. you know, like) early in parsing conversational speech. Readability studies have shown that disfluencies (fillers and speech repairs) may be deleted from transcripts without compromising meaning (Jones et al., 2003), and deleting repairs prior to parsing has been shown to improve its accuracy (Charniak and Johnson, 2001). We explore whether this strategy of early deletion is also beneficial with regard to fillers. Reported experiments measure the effect of early deletion under in-domain and out-of-domain parser training conditions using a state-of-the-art parser (Charniak, 2000). While early deletion is found to yield only modest benefit for in-domain parsing, significant improvement is achieved for out-of-domain adaptation. This suggests a potentially broader role for disfluency modeling in adapting text-based tools for processing conversational speech.", "phrases": ["disfluency", "early deletion", "pcfg parser"], "overall_score": 1.5835176957754837, "scores": [0.8397957681193537, 1.0802218261813785, 0.5212837626158329], "rank_score": 0.8137671189721883} -{"id": "vanmassenhove-etal-2018-getting", "title": "Getting Gender Right in Neural Machine Translation", "abstract": "Speakers of different languages must attend to and encode strikingly different aspects of the world in order to use their language correctly (Sapir, 1921; Slobin, 1996). One such difference is related to the way gender is expressed in a language. Saying \u201cI am happy\u201d in English, does not encode any additional knowledge of the speaker that uttered the sentence. However, many other languages do have grammatical gender systems and so such knowledge would be encoded. In order to correctly translate such a sentence into, say, French, the inherent gender information needs to be retained/recovered. The same sentence would become either \u201cJe suis heureux\u201d, for a male speaker or \u201cJe suis heureuse\u201d for a female one. Apart from morphological agreement, demographic factors (gender, age, etc.) also influence our use of language in terms of word choices or syntactic constructions (Tannen, 1991; Pennebaker et al., 2003). We integrate gender information into NMT systems. Our contribution is two-fold: (1) the compilation of large datasets with speaker information for 20 language pairs, and (2) a simple set of experiments that incorporate gender information into NMT for multiple language pairs. Our experiments show that adding a gender feature to an NMT system significantly improves the translation quality for some language pairs.", "phrases": ["gender", "morphological agreement", "speaker information", "translation quality"], "overall_score": 2.1469927584888326, "scores": [1.2994564101977581, 0.8795814036068001, 0.5479436191471433, 0.5271998754957924], "rank_score": 0.8135453271118734} -{"id": "peng-etal-2020-learning", "title": "Learning from Context or Names? An Empirical Study on Neural Relation Extraction", "abstract": "Neural models have achieved remarkable success on relation extraction (RE) benchmarks. However, there is no clear understanding what information in text affects existing RE models to make decisions and how to further improve the performance of these models. To this end, we empirically study the effect of two main information sources in text: textual context and entity mentions (names). We find that (i) while context is the main source to support the predictions, RE models also heavily rely on the information from entity mentions, most of which is type information, and (ii) existing datasets may leak shallow heuristics via entity mentions and thus contribute to the high performance on RE benchmarks. Based on the analyses, we propose an entity-masked contrastive pre-training framework for RE to gain a deeper understanding on both textual context and type information while avoiding rote memorization of entities or use of superficial cues in mentions. We carry out extensive experiments to support our views, and show that our framework can improve the effectiveness and robustness of neural models in different RE scenarios. All the code and datasets are released at .", "phrases": ["textual context", "shallow heuristic", "cue", "entity type"], "overall_score": 2.0864953577015357, "scores": [1.3114182065777502, 0.8412341926643399, 0.5733492412388757, 0.5278565327914855], "rank_score": 0.8134645433181127} -{"id": "van-halteren-teufel-2003-examining", "title": "Examining the consensus between human summaries: initial experiments with factoid analysis", "abstract": "We present a new approach to summary evaluation which combines two novel aspects, namely (a) content comparison between gold standard summary and system summary via factoids, a pseudo-semantic representation based on atomic information units which can be robustly marked in text, and (b) use of a gold standard consensus summary, in our case based on 50 individual summaries of one text. Even though future work on more than one source text is imperative, our experiments indicate that (1) ranking with regard to a single gold standard summary is insufficient as rankings based on any two randomly chosen summaries are very dissimilar (correlations average \u03c1 = 0.20), (2) a stable consensus summary can only be expected if a larger number of summaries are collected (in the range of at least 30--40 summaries), and (3) similarity measurement using unigrams shows a similarly low ranking correlation when compared with factoid-based ranking.", "phrases": ["factoid", "ranking", "large number", "model summary"], "overall_score": 2.0213828908390394, "scores": [1.3281684435927714, 0.853759992632456, 0.5375285320495856, 0.5344002999835231], "rank_score": 0.8134643170645841} -{"id": "haghighi-klein-2009-simple", "title": "Simple Coreference Resolution with Rich Syntactic and Semantic Features", "abstract": "Coreference systems are driven by syntactic, semantic, and discourse constraints. We present a simple approach which completely modularizes these three aspects. In contrast to much current work, which focuses on learning and on the discourse component, our system is deterministic and is driven entirely by syntactic and semantic compatibility as learned from a large, unlabeled corpus. Despite its simplicity and discourse naivete, our system substantially outperforms all unsupervised systems and most supervised ones. Primary contributions include (1) the presentation of a simple-to-reproduce, high-performing baseline and (2) the demonstration that most remaining errors can be attributed to syntactic and semantic factors external to the coreference phenomenon (and perhaps best addressed by non-coreference systems).", "phrases": ["coreference resolution", "semantic compatibility", "pronoun", "rule-based system", "nominal"], "overall_score": 2.58519123741277, "scores": [1.481462433277092, 0.8964338218737551, 0.5880288411124145, 0.5561196924355406, 0.5452104804319499], "rank_score": 0.8134510538261503} -{"id": "izacard-grave-2021-leveraging", "title": "Leveraging Passage Retrieval with Generative Models for Open Domain Question Answering", "abstract": "Generative models for open domain question answering have proven to be competitive, without resorting to external knowledge. While promising, this approach requires to use models with billions of parameters, which are expensive to train and query. In this paper, we investigate how much these models can benefit from retrieving text passages, potentially containing evidence. We obtain state-of-the-art results on the Natural Questions and TriviaQA open benchmarks. Interestingly, we observe that the performance of this method significantly improves when increasing the number of retrieved passages. This is evidence that sequence-to-sequence models offers a flexible framework to efficiently aggregate and combine evidence from multiple passages.", "phrases": ["passage retrieval", "generative reader", "knowledge-intensive nlp task"], "overall_score": 2.9588167247234844, "scores": [0.8534416719102047, 1.0450856617720268, 0.5416760826509031], "rank_score": 0.8134011387777115} -{"id": "berend-etal-2020-prosperamnet", "title": "ProsperAMnet at the FinSim Task: Detecting hypernyms of financial concepts via measuring the information stored in sparse word representations", "abstract": "In this paper we propose and carefully evaluate the application of an information theoretic approach for the detection of hypernyms for financial concepts. Our algorithm is based on the application of sparse word embeddings, meaning that \u2013 unlike in the case of traditional word embeddings \u2013 most of the coefficients in the embeddings are exactly zero. We apply an approach that quantify the extent to which the individual dimensions for such word representations convey the property that some word is the hyponym of a certain top-level concept according to an external ontology. Our experimental results demonstrate that substantial improvements can be gained by our approach compared to the direct utilization of the traditional dense word embeddings. Our team ranked second and fourth according to average rank score and mean accuracy that were the two evaluation criteria applied at the shared task.", "phrases": ["hypernyms", "financial concept", "word representation"], "overall_score": 0.8935895350050399, "scores": [0.8269164602843092, 0.8140734061787834, 0.7991508753313754], "rank_score": 0.8133802472648227} -{"id": "schwartz-etal-2017-effect", "title": "The Effect of Different Writing Tasks on Linguistic Style: A Case Study of the ROC Story Cloze Task", "abstract": "A writer's style depends not just on personal traits but also on her intent and mental state. In this paper, we show how variants of the same writing task can lead to measurable differences in writing style. We present a case study based on the story cloze task (Mostafazadeh et al., 2016a), where annotators were assigned similar writing tasks with different constraints: (1) writing an entire story, (2) adding a story ending for a given story context, and (3) adding an incoherent ending to a story. We show that a simple linear classifier informed by stylistic features is able to successfully distinguish among the three cases, without even looking at the story context. In addition, combining our stylistic features with language model predictions reaches state of the art performance on the story cloze challenge. Our results demonstrate that different task framings can dramatically affect the way people write.", "phrases": ["style", "story", "task framing"], "overall_score": 2.350735330874476, "scores": [0.8393071793953782, 1.0788688493901768, 0.521719798925053], "rank_score": 0.8132986092368695} -{"id": "miao-etal-2021-generative", "title": "A Generative Framework for Simultaneous Machine Translation", "abstract": "We propose a generative framework for simultaneous machine translation. Conventional approaches use a fixed number of source words to translate or learn dynamic policies for the number of source words by reinforcement learning. Here we formulate simultaneous translation as a structural sequence-to-sequence learning problem. A latent variable is introduced to model read or translate actions at every time step, which is then integrated out to consider all the possible translation policies. A re-parameterised Poisson prior is used to regularise the policies which allows the model to explicitly balance translation quality and latency. The experiments demonstrate the effectiveness and robustness of the generative framework, which achieves the best BLEU scores given different average translation latencies on benchmark datasets.", "phrases": ["generative framework", "simultaneous machine translation", "simt policy"], "overall_score": 1.1271344925067615, "scores": [0.9761593688595573, 0.9141457222548043, 0.5488619230172433], "rank_score": 0.8130556713772016} -{"id": "verhoeven-daelemans-2014-clips", "title": "CLiPS Stylometry Investigation (CSI) corpus: A Dutch corpus for the detection of age, gender, personality, sentiment and deception in text", "abstract": "We present the CLiPS Stylometry Investigation (CSI) corpus, a new Dutch corpus containing reviews and essays written by university students. It is designed to serve multiple purposes: detection of age, gender, authorship, personality, sentiment, deception, topic and genre. Another major advantage is its planned yearly expansion with each year's new students. The corpus currently contains about 305,000 tokens spread over 749 documents. The average review length is 128 tokens; the average essay length is 1126 tokens. The corpus will be made available on the CLiPS website (www.clips.uantwerpen.be/datasets) and can freely be used for academic research purposes. An initial deception detection experiment was performed on this data. Deception detection is the task of automatically classifying a text as being either truthful or deceptive, in our case by examining the writing style of the author. This task has never been investigated for Dutch before. We performed a supervised machine learning experiment using the SVM algorithm in a 10-fold cross-validation setup. The only features were the token unigrams present in the training data. Using this simple method, we reached a state-of-the-art F-score of 72.2%.", "phrases": ["detection", "personality", "clips stylometry investigation"], "overall_score": 1.127111048160131, "scores": [0.865952307203528, 0.7882205308119455, 0.784943441552201], "rank_score": 0.8130387598558914} -{"id": "li-etal-2016-commonsense", "title": "Commonsense Knowledge Base Completion", "abstract": "We enrich a curated resource of commonsense knowledge by formulating the problem as one of knowledge base completion (KBC). Most work in KBC focuses on knowledge bases like Freebase that relate entities drawn from a fixed set. However, the tuples in ConceptNet (Speer and Havasi, 2012) define relations between an unbounded set of phrases. We develop neural network models for scoring tuples on arbitrary phrases and evaluate them by their ability to distinguish true held-out tuples from false ones. We find strong performance from a bilinear model using a simple additive architecture to model phrases. We manually evaluate our trained model\u2019s ability to assign quality scores to novel tuples, finding that it can propose tuples at the same quality level as mediumconfidence tuples from ConceptNet.", "phrases": ["knowledge base completion", "conceptnet", "neural network model", "sparsity", "wikipedia"], "overall_score": 2.349677944611479, "scores": [1.7027047841849605, 0.6669795897690517, 0.5879403448290765, 0.5659968609481023, 0.5410423137496867], "rank_score": 0.8129327786961756} -{"id": "chahuneau-etal-2013-translating", "title": "Translating into Morphologically Rich Languages with Synthetic Phrases", "abstract": "Translation into morphologically rich languages is an important but recalcitrant problem in MT. We present a simple and effective approach that deals with the problem in two phases. First, a discriminative model is learned to predict inflections of target words from rich source-side annotations. Then, this model is used to create additional sentencespecific word- and phrase-level translations that are added to a standard translation model as \u201csynthetic\u201d phrases. Our approach relies on morphological analysis of the target language, but we show that an unsupervised Bayesian model of morphology can successfully be used in place of a supervised analyzer. We report significant improvements in translation quality when translating from English to Russian, Hebrew and Swahili.", "phrases": ["rich language", "synthetic phrase", "morphological analysis", "translation quality", "swahili"], "overall_score": 2.084610041057516, "scores": [1.4807135896313461, 0.9428011201345884, 0.5765005682733724, 0.538908652658786, 0.5247236321539863], "rank_score": 0.8127295125704158} -{"id": "wang-jiang-2016-learning", "title": "Learning Natural Language Inference with LSTM", "abstract": "Natural language inference (NLI) is a fundamentally important task in natural language processing that has many applications. The recently released Stanford Natural Language Inference (SNLI) corpus has made it possible to develop and evaluate learning-centered methods such as deep neural networks for natural language inference (NLI). In this paper, we propose a special long short-term memory (LSTM) architecture for NLI. Our model builds on top of a recently proposed neural attention model for NLI but is based on a significantly different idea. Instead of deriving sentence embeddings for the premise and the hypothesis to be used for classification, our solution uses a match-LSTM to perform word-by-word matching of the hypothesis with the premise. This LSTM is able to place more emphasis on important word-level matching results. In particular, we observe that this LSTM remembers important mismatches that are critical for predicting the contradiction or the neutral relationship label. On the SNLI corpus, our model achieves an accuracy of 86.1%, outperforming the state of the art.", "phrases": ["natural language inference", "nli", "attention model", "matching"], "overall_score": 2.019352114884349, "scores": [0.96508940100234, 0.8791249258297777, 0.8370099888316127, 0.5693639751384695], "rank_score": 0.81264707270055} -{"id": "wilson-wiebe-2005-annotating", "title": "Annotating Attributions and Private States", "abstract": "This paper describes extensions to a corpus annotation scheme for the manual annotation of attributions, as well as opinions, emotions, sentiments, speculations, evaluations and other private states in language. It discusses the scheme with respect to the \"Pie in the Sky\" Check List of Desirable Semantic Information for Annotation. We believe that the scheme is a good foundation for adding private state annotations to other layers of semantic meaning.", "phrases": ["private state", "manual annotation", "opinion", "subjectivity"], "overall_score": 2.0840687612273157, "scores": [1.2574754385826032, 0.8948978668116637, 0.5546547112446519, 0.5430459158770262], "rank_score": 0.8125184831289862} -{"id": "alikaniotis-etal-2016-automatic", "title": "Automatic Text Scoring Using Neural Networks", "abstract": "Automated Text Scoring (ATS) provides a cost-effective and consistent alternative to human marking. However, in order to achieve good performance, the predictive features of the system need to be manually engineered by human experts. We introduce a model that forms word representations by learning the extent to which specific words contribute to the text's score. Using Long-Short Term Memory networks to represent the meaning of texts, we demonstrate that a fully automated framework is able to achieve excellent results over similar approaches. In an attempt to make our results more interpretable, and inspired by recent advances in visualizing neural networks, we introduce a novel method for identifying the regions of the text that the model has found more discriminative.", "phrases": ["automatic text scoring", "neural network model", "essay scoring"], "overall_score": 2.4339253231887463, "scores": [0.9605004844837022, 0.9320363382209068, 0.5448558755767255], "rank_score": 0.8124642327604449} -{"id": "naplava-straka-2019-grammatical", "title": "Grammatical Error Correction in Low-Resource Scenarios", "abstract": "Grammatical error correction in English is a long studied problem with many existing systems and datasets. However, there has been only a limited research on error correction of other languages. In this paper, we present a new dataset AKCES-GEC on grammatical error correction for Czech. We then make experiments on Czech, German and Russian and show that when utilizing synthetic parallel corpus, Transformer neural machine translation model can reach new state-of-the-art results on these datasets. AKCES-GEC is published under CC BY-NC-SA 4.0 license at , and the source code of the GEC model is available at .", "phrases": ["other language", "czech", "grammatical error correction", "pre-training"], "overall_score": 1.8702348923366643, "scores": [0.934832658067878, 1.2274511790587623, 0.5496306673422064, 0.5370162699500953], "rank_score": 0.8122326936047355} -{"id": "bodrumlu-etal-2009-new", "title": "A New Objective Function for Word Alignment", "abstract": "We develop a new objective function for word alignment that measures the size of the bilingual dictionary induced by an alignment. A word alignment that results in a small dictionary is preferred over one that results in a large dictionary. In order to search for the alignment that minimizes this objective, we cast the problem as an integer linear program. We then extend our objective function to align corpora at the sub-word level, which we demonstrate on a small Turkish-English corpus.", "phrases": ["new objective function", "word alignment", "mdl"], "overall_score": 1.4542406447103582, "scores": [0.98986612933825, 0.9052809727073212, 0.5397343700811921], "rank_score": 0.8116271573755878} -{"id": "liu-lapata-2018-learning", "title": "Learning Structured Text Representations", "abstract": "In this paper, we focus on learning structure-aware document representations from data without recourse to a discourse parser or additional annotations. Drawing inspiration from recent efforts to empower neural networks with a structural bias (Cheng et al., 2016; Kim et al., 2017), we propose a model that can encode a document while automatically inducing rich structural dependencies. Specifically, we embed a differentiable non-projective parsing algorithm into a neural model and use attention mechanisms to incorporate the structural biases. Experimental evaluations across different tasks and datasets show that the proposed model achieves state-of-the-art results on document modeling tasks while inducing intermediate structures which are both interpretable and meaningful.", "phrases": ["document representation", "discourse parser", "attention weight", "latent structure", "summarization"], "overall_score": 2.702958107750694, "scores": [1.047095956895585, 0.9257661369497826, 0.8791193598261847, 0.6068458281485681, 0.5969833677017713], "rank_score": 0.8111621299043783} -{"id": "gargett-etal-2010-give", "title": "The GIVE-2 Corpus of Giving Instructions in Virtual Environments", "abstract": "We present the GIVE-2 Corpus, a new corpus of human instruction giving. The corpus was collected by asking one person in each pair of subjects to guide the other person towards completing a task in a virtual 3D environment with typed instructions. This is the same setting as that of the recent GIVE Challenge, and thus the corpus can serve as a source of data and as a point of comparison for NLG systems that participate in the GIVE Challenge. The instruction-giving data we collect is multilingual (45 German and 63 English dialogues), and can easily be extended to further languages by using our software, which we have made available. We analyze the corpus to study the effects of learning by repeated participation in the task and the effects of the participants' spatial navigation abilities. Finally, we present a novel annotation scheme for situated referring expressions and compare the referring expressions in the German and English data.", "phrases": ["give-2 corpus", "instruction", "virtual environments", "hearer"], "overall_score": 1.8675327588706894, "scores": [0.819305408084115, 0.8082239134000763, 1.068572129170193, 0.5481352371500012], "rank_score": 0.8110591719510964} -{"id": "xiong-etal-2015-hanspeller", "title": "HANSpeller: A Unified Framework for Chinese Spelling Correction", "abstract": "The number of people learning Chinese as a Foreign Language (CFL) has been booming in recent decades. The problem of spelling error correction for CFL learners increasingly is becoming important. Compared to the regular text spelling check task, more error types need to be considered in CFL cases. In this paper, we propose a unified framework for Chinese spelling correction. Instead of conventional methods, which focus on rules or statistics separately, our approach is based on extended HMM and ranker-based models, together with a rule-based model for further polishing, and a final decision-making step is adopted to decide whether to output the corrections or not. Experimental results on the test data of foreigner's Chinese essays provided by the SIGHAN 2014 bake-off illustrate the performance of our approach.", "phrases": ["unified framework", "chinese spelling correction", "hmm", "conditional random field", "machine learning algorithm"], "overall_score": 1.867504084195226, "scores": [0.9738298963144639, 0.8150518850426659, 0.8698027865650105, 0.8538457745701837, 0.542703250996538], "rank_score": 0.8110467186977723} -{"id": "amigo-etal-2011-corroborating", "title": "Corroborating Text Evaluation Results with Heterogeneous Measures", "abstract": "Automatically produced texts (e.g. translations or summaries) are usually evaluated with n-gram based measures such as BLEU or ROUGE, while the wide set of more sophisticated measures that have been proposed in the last years remains largely ignored for practical purposes. In this paper we first present an in-depth analysis of the state of the art in order to clarify this issue. After this, we formalize and verify empirically a set of properties that every text evaluation measure based on similarity to human-produced references satisfies. These properties imply that corroborating system improvements with additional measures always increases the overall reliability of the evaluation process. In addition, the greater the heterogeneity of the measures (which is measurable) the higher their combined reliability. These results support the use of heterogeneous measures in order to consolidate text evaluation results.", "phrases": ["text evaluation result", "heterogeneous measure", "score increase"], "overall_score": 1.1240435138629108, "scores": [0.998504990263347, 0.8834951460706131, 0.5504778684561792], "rank_score": 0.8108260015967131} -{"id": "popat-etal-2019-stancy", "title": "STANCY: Stance Classification Based on Consistency Cues", "abstract": "Controversial claims are abundant in online media and discussion forums. A better understanding of such claims requires analyzing them from different perspectives. Stance classification is a necessary step for inferring these perspectives in terms of supporting or opposing the claim. In this work, we present a neural network model for stance classification leveraging BERT representations and augmenting them with a novel consistency constraint. Experiments on the Perspectrum dataset, consisting of claims and users' perspectives from various debate websites, demonstrate the effectiveness of our approach over state-of-the-art baselines.", "phrases": ["stance classification", "claim", "consistency constraint"], "overall_score": 1.3048761295877116, "scores": [0.9836016088217875, 0.8626130815003202, 0.5860806834352336], "rank_score": 0.8107651245857804} -{"id": "eisenstein-davis-2006-gesture", "title": "Gesture Improves Coreference Resolution", "abstract": "Coreference resolution, like many problems in natural language processing, has most often been explored using datasets of written text. While spontaneous spoken language poses well-known challenges, it also offers additional modalities that may help disambiguate some of the inherent disfluency. We explore features of hand gesture that are correlated with coreference. Combining these features with a traditional textual model yields a statistically significant improvement in overall performance.", "phrases": ["coreference resolution", "gesture", "co-reference resolution"], "overall_score": 1.3048162737348348, "scores": [0.9641022460322479, 0.9067111668437884, 0.5613703892840143], "rank_score": 0.8107279340533502} -{"id": "xie-etal-2011-novel", "title": "A novel dependency-to-string model for statistical machine translation", "abstract": "Dependency structure, as a first step towards semantics, is believed to be helpful to improve translation quality. However, previous works on dependency structure based models typically resort to insertion operations to complete translations, which make it difficult to specify ordering information in translation rules. In our model of this paper, we handle this problem by directly specifying the ordering information in head-dependents rules which represent the source side as head-dependents relations and the target side as strings. The head-dependents rules require only substitution operation, thus our model requires no heuristics or separate ordering models of the previous works to control the word order of translations. Large-scale experiments show that our model performs well on long distance reordering, and outperforms the state-of-the-art constituency-to-string model (+1.47 BLEU on average) and hierarchical phrase-based model (+0.46 BLEU on average) on two Chinese-English NIST test sets without resort to phrases or parse forest. For the first time, a source dependency structure based model catches up with and surpasses the state-of-the-art translation models.", "phrases": ["statistical machine translation", "source side", "head-dependents relation", "long distance", "tree-based model"], "overall_score": 2.247176532795256, "scores": [0.935829170838239, 1.1543259366501608, 0.8616291430298656, 0.5651271019579844, 0.5355766973559635], "rank_score": 0.8104976099664427} -{"id": "wilkens-etal-2016-multiword", "title": "Multiword Expressions in Child Language", "abstract": "The goal of this work is to introduce CHILDES-MWE, which contains English CHILDES corpora automatically annotated with Multiword Expressions (MWEs) information. The result is a resource with almost 350,000 sentences annotated with more than 70,000 distinct MWEs of various types from both longitudinal and latitudinal corpora. This resource can be used for large scale language acquisition studies of how MWEs feature in child language. Focusing on compound nouns (CN), we then verify in a longitudinal study if there are differences in the distribution and compositionality of CNs in child-directed and child-produced sentences across ages. Moreover, using additional latitudinal data, we investigate if there are further differences in CN usage and in compositionality preferences. The results obtained for the child-produced sentences reflect CN distribution and compositionality in child-directed sentences.", "phrases": ["child language", "multiword expressions", "english verb-particle constructions"], "overall_score": 1.7807639337340395, "scores": [0.9343450608988978, 0.9312921250402972, 0.5657445928313599], "rank_score": 0.8104605929235184} -{"id": "dickinson-meurers-2005-detecting", "title": "Detecting Errors in Discontinuous Structural Annotation", "abstract": "Consistency of corpus annotation is an essential property for the many uses of annotated corpora in computational and theoretical linguistics. While some research addresses the detection of inconsistencies in positional annotation (e.g., part-of-speech) and continuous structural annotation (e.g., syntactic constituency), no approach has yet been developed for automatically detecting annotation errors in discontinuous structural annotation. This is significant since the annotation of potentially discontinuous stretches of material is increasingly relevant, from tree-banks for free-word order languages to semantic and discourse annotation.In this paper we discuss how the variation n-gram error detection approach (Dickinson and Meurers, 2003a) can be extended to discontinuous structural annotation. We exemplify the approach by showing how it successfully detects errors in the syntactic annotation of the German TIGER corpus (Brants et al., 2002).", "phrases": ["discontinuous structural annotation", "constituency annotation", "variation nucleus"], "overall_score": 1.8658360121372914, "scores": [0.9928298252472659, 0.8876109858724789, 0.5505260415030383], "rank_score": 0.8103222842075944} -{"id": "duh-etal-2010-n", "title": "N-Best Reranking by Multitask Learning", "abstract": "We propose a new framework for N-best reranking on sparse feature sets. The idea is to reformulate the reranking problem as a Multitask Learning problem, where each N-best list corresponds to a distinct task. \n \nThis is motivated by the observation that N-best lists often show significant differences in feature distributions. Training a single reranker directly on this heteroge-nous data can be difficult. \n \nOur proposed meta-algorithm solves this challenge by using multitask learning (such as e1/e2 regularization) to discover common feature representations across N-best lists. This meta-algorithm is simple to implement, and its modular approach allows one to plug-in different learning algorithms from existing literature. As a proof of concept, we show statistically significant improvements on a machine translation system involving millions of features.", "phrases": ["multitask learning", "n-best reranking", "language task"], "overall_score": 1.7802180712406057, "scores": [1.0093343201176221, 0.882544058913478, 0.5387581015586534], "rank_score": 0.8102121601965845} -{"id": "qiu-etal-2019-graph", "title": "Graph-Based Semi-Supervised Learning for Natural Language Understanding", "abstract": "Semi-supervised learning is an efficient method to augment training data automatically from unlabeled data. Development of many natural language understanding (NLU) applications has a challenge where unlabeled data is relatively abundant while labeled data is rather limited. In this work, we propose transductive graph-based semi-supervised learning models as well as their inductive variants for NLU. We evaluate the approach's applicability using publicly available NLU data and models. In order to find similar utterances and construct a graph, we use a paraphrase detection model. Results show that applying the inductive graph-based semi-supervised learning can improve the error rate of the NLU model by 5%.", "phrases": ["natural language understanding", "unlabeled data", "graph-based semi-supervised learning"], "overall_score": 1.3039629060606692, "scores": [0.949577638598925, 0.940436307179917, 0.5405791749372594], "rank_score": 0.8101977069053672} -{"id": "feldman-etal-2006-cross", "title": "A Cross-language Approach to Rapid Creation of New Morpho-syntactically Annotated Resources", "abstract": "We take a novel approach to rapid, low-cost development of morpho-syntactically annotated resources without using parallel corpora or bilingual lexicons. The overall research question is how to exploit language resources and properties to facilitate and automate the creation of morphologically annotated corpora for new languages. This portability issue is especially relevant to minority languages, for which such resources are likely to remain unavailable in the foreseeable future. We compare the performance of our system on languages that belong to different language families (Romance vs. Slavic), as well as different language pairs within the same language family (Portuguese via Spanish vs. Catalan via Spanish). We show that across language families, the most difficult category is the category of nominals (the noun homonymy is challenging for morphological analysis and the order variation of adjectives within a sentence makes it challenging to create a realiable model), whereas different language families present different challenges with respect to their morpho-syntactic descriptions: for the Slavic languages, case is the most challenging category; for the Romance languages, gender is more challenging than case. In addition, we present an alternative evaluation metric for our system, where we measure how much human labor will be needed to convert the result of our tagging to a high precision annotated resource.", "phrases": ["parallel corpora", "tagger", "czech"], "overall_score": 2.077726374701337, "scores": [1.3490614214379772, 0.5520392370555774, 0.5290366484951585], "rank_score": 0.8100457689962378} -{"id": "suzuki-isozaki-2008-semi", "title": "Semi-Supervised Sequential Labeling and Segmentation Using Giga-Word Scale Unlabeled Data", "abstract": "This paper provides evidence that the use of more unlabeled data in semi-supervised learning can improve the performance of Natural Language Processing (NLP) tasks, such as part-of-speech tagging, syntactic chunking, and named entity recognition. We first propose a simple yet powerful semi-supervised discriminative model appropriate for handling large scale unlabeled data. Then, we describe experiments performed on widely used test collections, namely, PTB III data, CoNLL\u201900 and \u201903 shared task data for the above three NLP tasks, respectively. We incorporate up to 1G-words (one billion tokens) of unlabeled data, which is the largest amount of unlabeled data ever used for these tasks, to investigate the performance improvement. In addition, our results are superior to the best reported results for all of the above test collections.", "phrases": ["unlabeled data", "part-of-speech tagging", "chunking", "entity recognition", "probability model"], "overall_score": 2.0766100048319447, "scores": [1.0980941091956797, 0.9283065919544251, 0.8861936849105116, 0.5894667611148875, 0.5459914952499807], "rank_score": 0.809610528485097} -{"id": "xia-yarowsky-2017-deriving", "title": "Deriving Consensus for Multi-Parallel Corpora: an English Bible Study", "abstract": "What can you do with multiple noisy versions of the same text? We present a method which generates a single consensus between multi-parallel corpora. By maximizing a function of linguistic features between word pairs, we jointly learn a single corpus-wide multiway alignment: a consensus between 27 versions of the English Bible. We additionally produce English paraphrases, word-level distributions of tags, and consensus dependency parses. Our method is language independent and applicable to any multi-parallel corpora. Given the Bible's unique role as alignable bitext for over 800 of the world's languages, this consensus alignment and resulting resources offer value for multilingual annotation projection, and also shed potential insights into the Bible itself.", "phrases": ["consensus", "multi-parallel corpora", "paraphrase"], "overall_score": 0.8891936844801944, "scores": [0.9810733295001195, 0.9003123525976385, 0.5467512329504024], "rank_score": 0.8093789716827202} -{"id": "degaetano-ortlieb-2018-stylistic", "title": "Stylistic variation over 200 years of court proceedings according to gender and social class", "abstract": "We present an approach to detect stylistic variation across social variables (here: gender and social class), considering also diachronic change in language use. For detection of stylistic variation, we use relative entropy, measuring the difference between probability distributions at different linguistic levels (here: lexis and grammar). In addition, by relative entropy, we can determine which linguistic units are related to stylistic variation.", "phrases": ["gender", "social class", "stylistic variation"], "overall_score": 0.8889503316911582, "scores": [0.8284958834475817, 0.8091002620994342, 0.7898762417376755], "rank_score": 0.8091574624282304} -{"id": "guevara-2010-regression", "title": "A Regression Model of Adjective-Noun Compositionality in Distributional Semantics", "abstract": "In this paper we explore the computational modelling of compositionality in distributional models of semantics. In particular, we model the semantic composition of pairs of adjacent English Adjectives and Nouns from the British National Corpus. We build a vector-based semantic space from a lemmatised version of the BNC, where the most frequent A-N lemma pairs are treated as single tokens. We then extrapolate three different models of compositionality: a simple additive model, a pointwise-multiplicative model and a Partial Least Squares Regression (PLSR) model. We propose two evaluation methods for the implemented models. Our study leads to the conclusion that regression-based models of compositionality generally out-perform additive and multiplicative approaches, and also show a number of advantages that make them very promising for future research.", "phrases": ["compositionality", "adjective-noun phrase", "distributional semantic model"], "overall_score": 2.603234343946559, "scores": [0.948165241503165, 0.9180035918917782, 0.5600518277141108], "rank_score": 0.8087402203696846} -{"id": "mcclosky-etal-2011-event", "title": "Event Extraction as Dependency Parsing", "abstract": "Nested event structures are a common occurrence in both open domain and domain specific extraction tasks, e.g., a \"crime\" event can cause a \"investigation\" event, which can lead to an \"arrest\" event. However, most current approaches address event extraction with highly local models that extract each event and argument independently. We propose a simple approach for the extraction of such structures by taking the tree of event-argument relations and using it directly as the representation in a reranking dependency parser. This provides a simple framework that captures global properties of both nested and flat event structures. We explore a rich feature space that models both the events to be parsed and context from the original supporting text. Our approach obtains competitive results in the extraction of biomedical events from the BioNLP'09 shared task with a F1 score of 53.5% in development and 48.6% in testing.", "phrases": ["reranking dependency parser", "event extraction", "feature-based method"], "overall_score": 2.241772498354923, "scores": [0.9844679799990044, 0.8356077488205442, 0.6055698208138832], "rank_score": 0.8085485165444773} -{"id": "metallinou-etal-2013-discriminative", "title": "Discriminative state tracking for spoken dialog systems", "abstract": "In spoken dialog systems, statistical state tracking aims to improve robustness to speech recognition errors by tracking a posterior distribution over hidden dialog states. Current approaches based on generative or discriminative models have different but important shortcomings that limit their accuracy. In this paper we discuss these limitations and introduce a new approach for discriminative state tracking that overcomes them by leveraging the problem structure. An offline evaluation with dialog data collected from real users shows improvements in both state tracking accuracy and the quality of the posterior probabilities. Features that encode speech recognition error patterns are particularly helpful, and training requires relatively few dialogs.", "phrases": ["spoken dialog system", "discriminative state tracking", "maximum-entropy model"], "overall_score": 1.4486388747731718, "scores": [0.9527359271757851, 0.9487269463850726, 0.5240393765775884], "rank_score": 0.8085007500461487} -{"id": "srikumar-roth-2011-joint", "title": "A Joint Model for Extended Semantic Role Labeling", "abstract": "This paper presents a model that extends semantic role labeling. Existing approaches independently analyze relations expressed by verb predicates or those expressed as nominalizations. However, sentences express relations via other linguistic phenomena as well. Furthermore, these phenomena interact with each other, thus restricting the structures they articulate. In this paper, we use this intuition to define a joint inference model that captures the inter-dependencies between verb semantic role labeling and relations expressed using prepositions. The scarcity of jointly labeled data presents a crucial technical challenge for learning a joint model. The key strength of our model is that we use existing structure predictors as black boxes. By enforcing consistency constraints between their predictions, we show improvements in the performance of both tasks without retraining the individual models.", "phrases": ["joint model", "semantic role labeling", "preposition"], "overall_score": 1.4485507836942195, "scores": [0.8463477156401034, 1.037317820041948, 0.5416892207545919], "rank_score": 0.8084515854788811} -{"id": "kober-etal-2021-data", "title": "Data Augmentation for Hypernymy Detection", "abstract": "The automatic detection of hypernymy relationships represents a challenging problem in NLP. The successful application of state-of-the-art supervised approaches using distributed representations has generally been impeded by the limited availability of high quality training data. We have developed two novel data augmentation techniques which generate new training examples from existing ones. First, we combine the linguistic principles of hypernym transitivity and intersective modifier-noun composition to generate additional pairs of vectors, such as \u201csmall dog - dog\u201d or \u201csmall dog - animal\u201d, for which a hypernymy relationship can be assumed. Second, we use generative adversarial networks (GANs) to generate pairs of vectors for which the hypernymy relation can also be assumed. We furthermore present two complementary strategies for extending an existing dataset by leveraging linguistic resources such as WordNet. Using an evaluation across 3 different datasets for hypernymy detection and 2 different vector spaces, we demonstrate that both of the proposed automatic data augmentation and dataset extension strategies substantially improve classifier performance.", "phrases": ["hypernymy detection", "data augmentation", "entailment"], "overall_score": 1.1207489531422754, "scores": [1.0032880656445184, 0.8809614131964705, 0.541098956328798], "rank_score": 0.808449478389929} -{"id": "hughes-ramage-2007-lexical", "title": "Lexical Semantic Relatedness with Random Graph Walks", "abstract": "Many systems for tasks such as question answering, multi-document summarization, and information retrieval need robust numerical measures of lexical relatedness. Standard thesaurus-based measures of word pair similarity are based on only a single path between those words in the thesaurus graph. By contrast, we propose a new model of lexical semantic relatedness that incorporates information from every explicit or implicit path connecting the two words in the entire graph. Our model uses a random walk over nodes and edges derived from WordNet links and corpus statistics. We treat the graph as a Markov chain and compute a word-specific stationary distribution via a generalized PageRank algorithm. Semantic relatedness of a word pair is scored by a novel divergence measure, ZKL, that outperforms existing measures on certain classes of distributions. In our experiments, the resulting relatedness measure is the WordNet-based measure most highly correlated with human similarity judgments by rank ordering at = .90.", "phrases": ["random walk", "lexical semantic relatedness", "thesauri", "knowledge-based measure"], "overall_score": 2.380248970985106, "scores": [0.9914497666102529, 1.1561817970831882, 0.5585114470287815, 0.5274087630814867], "rank_score": 0.8083879434509272} -{"id": "liu-etal-2005-using-conditional", "title": "Using Conditional Random Fields for Sentence Boundary Detection in Speech", "abstract": "Sentence boundary detection in speech is important for enriching speech recognition output, making it easier for humans to read and downstream modules to process. In previous work, we have developed hidden Markov model (HMM) and maximum entropy (Maxent) classifiers that integrate textual and prosodic knowledge sources for detecting sentence boundaries. In this paper, we evaluate the use of a conditional random field (CRF) for this task and relate results with this model to our prior work. We evaluate across two corpora (conversational telephone speech and broadcast news speech) on both human transcriptions and speech recognition output. In general, our CRF model yields a lower error rate than the HMM and Maxent models on the NIST sentence boundary detection task in speech, although it is interesting to note that the best results are achieved by three-way voting among the classifiers. This probably occurs because each model has different strengths and weaknesses for modeling the knowledge sources.", "phrases": ["conditional random fields", "boundary detection", "low error rate", "speech processing task", "phone"], "overall_score": 1.861220132756094, "scores": [0.9058104011026691, 1.1547260290907984, 0.8638983699535402, 0.5906862498148355, 0.5264671163542035], "rank_score": 0.8083176332632094} -{"id": "xu-etal-2021-document-graph", "title": "Document Graph for Neural Machine Translation", "abstract": "Previous works have shown that contextual information can improve the performance of neural machine translation (NMT). However, most existing document-level NMT methods failed to leverage contexts beyond a few set of previous sentences. How to make use of the whole document as global contexts is still a challenge. To address this issue, we hypothesize that a document can be represented as a graph that connects relevant contexts regardless of their distances. We employ several types of relations, including adjacency, syntactic dependency, lexical consistency, and coreference, to construct the document graph. Then, we incorporate both source and target graphs into the conventional Transformer architecture with graph convolutional networks. Experiments on various NMT benchmarks, including IWSLT English\u2013French, Chinese-English, WMT English\u2013German and Opensubtitle English\u2013Russian, demonstrate that using document graphs can significantly improve the translation quality. Extensive analysis verifies that the document graph is beneficial for capturing discourse phenomena.", "phrases": ["neural machine translation", "relevant context", "distance", "document graph"], "overall_score": 1.3007797934211427, "scores": [0.9545756954975332, 0.8877671987960384, 0.850133643068014, 0.5404031739255787], "rank_score": 0.8082199278217911} -{"id": "ye-ling-2018-hybrid", "title": "Hybrid semi-Markov CRF for Neural Sequence Labeling", "abstract": "This paper proposes hybrid semi-Markov conditional random fields (SCRFs) for neural sequence labeling in natural language processing. Based on conventional conditional random fields (CRFs), SCRFs have been designed for the tasks of assigning labels to segments by extracting features from and describing transitions between segments instead of words. In this paper, we improve the existing SCRF methods by employing word-level and segment-level information simultaneously. First, word-level labels are utilized to derive the segment scores in SCRFs. Second, a CRF output layer and an SCRF output layer are integrated into a unified neural network and trained jointly. Experimental results on CoNLL 2003 named entity recognition (NER) shared task show that our model achieves state-of-the-art performance when no external knowledge is used.", "phrases": ["crf", "random field", "segment", "word-level label"], "overall_score": 1.775764273559264, "scores": [1.2465488390242216, 0.8308974036857636, 0.5974627845420396, 0.5578315708202798], "rank_score": 0.8081851495180761} -{"id": "li-etal-2020-connecting", "title": "Connecting the Dots: Event Graph Schema Induction with Path Language Modeling", "abstract": "Event schemas can guide our understanding and ability to make predictions with respect to what might happen next. We propose a new Event Graph Schema, where two event types are connected through multiple paths involving entities that fill important roles in a coherent story. We then introduce Path Language Model, an auto-regressive language model trained on event-event paths, and select salient and coherent paths to probabilistically construct these graph schemas. We design two evaluation metrics, instance coverage and instance coherence, to evaluate the quality of graph schema induction, by checking when coherent event instances are covered by the schema graph. Intrinsic evaluations show that our approach is highly effective at inducing salient and coherent schemas. Extrinsic evaluations show the induced schema repository provides significant improvement to downstream end-to-end Information Extraction over a state-of-the-art joint neural extraction model, when used as additional global features to unfold instance graphs.", "phrases": ["schema", "event type", "story", "auto-regressive language model"], "overall_score": 1.4476253253653806, "scores": [0.9453034643852175, 0.8877139896801473, 0.843754151679281, 0.554968703659857], "rank_score": 0.8079350773511258} -{"id": "huang-etal-2006-statistical", "title": "Statistical Syntax-Directed Translation with Extended Domain of Locality", "abstract": "In syntax-directed translation, the source-language input is first parsed into a parse-tree, which is then recursively converted into a string in the target-language. We model this conversion by an extended tree-to-string transducer that has multi-level trees on the source-side, which gives our system more expressive power and flexibility. We also define a direct probability model and use a linear-time dynamic programming algorithm to search for the best derivation. The model is then extended to the general log-linear frame-work in order to incorporate other features like n-gram language models. We devise a simple-yet-effective algorithm to generate non-duplicate k-best translations for n-gram rescoring. Preliminary experiments on English-to-Chinese translation show a significant improvement in terms of translation quality compared to a state-of-the- art phrase-based system.", "phrases": ["syntax-directed translation", "string", "translation quality", "tree-based model"], "overall_score": 2.917247342493817, "scores": [0.9696839569900141, 1.137022888832793, 0.5624948539635714, 0.5623836345449225], "rank_score": 0.8078963335828252} -{"id": "finch-sumita-2008-dynamic", "title": "Dynamic Model Interpolation for Statistical Machine Translation", "abstract": "This paper presents a technique for class-dependent decoding for statistical machine translation (SMT). The approach differs from previous methods of class-dependent translation in that the class-dependent forms of all models are integrated directly into the decoding process. We employ probabilistic mixture weights between models that can change dynamically on a segment-by-segment basis depending on the characteristics of the source segment. The effectiveness of this approach is demonstrated by evaluating its performance on travel conversation data. We used the approach to tackle the translation of questions and declarative sentences using class-dependent models. To achieve this, our system integrated two sets of models specifically built to deal with sentences that fall into one of two classes of dialog sentence: questions and declarations, with a third set of models built to handle the general class. The technique was thoroughly evaluated on data from 17 language pairs using 6 machine translation evaluation metrics. We found the results were corpus-dependent, but in most cases our system was able to improve translation performance, and for some languages the improvements were substantial.", "phrases": ["statistical machine translation", "declarative sentence", "general model"], "overall_score": 1.6798085112590515, "scores": [0.9761710196307531, 0.8874627337297191, 0.5598176554760342], "rank_score": 0.8078171362788354} -{"id": "merrill-etal-2020-formal", "title": "A Formal Hierarchy of RNN Architectures", "abstract": "We develop a formal hierarchy of the expressive capacity of RNN architectures. The hierarchy is based on two formal properties: space complexity, which measures the RNN's memory, and rational recurrence, defined as whether the recurrent update can be described by a weighted finite-state machine. We place several RNN variants within this hierarchy. For example, we prove the LSTM is not rational, which formally separates it from the related QRNN (Bradbury et al., 2016). We also show how these models' expressive capacity is expanded by stacking multiple layers or composing them with different pooling functions. Our results build on the theory of \u201csaturated\u201d RNNs (Merrill, 2019). While formally extending these findings to unsaturated RNNs is left to future work, we hypothesize that the practical learnable capacity of unsaturated RNNs obeys a similar hierarchy. We provide empirical results to support this conjecture. Experimental findings from training unsaturated networks on formal languages support this conjecture.", "phrases": ["formal hierarchy", "rnn architecture", "finite-state machine"], "overall_score": 1.3000547452059679, "scores": [0.9807918518258049, 0.8776436701819151, 0.5648727681016684], "rank_score": 0.8077694300364628} -{"id": "wu-yarowsky-2020-computational", "title": "Computational Etymology and Word Emergence", "abstract": "We developed an extensible, comprehensive Wiktionary parser that improves over several existing parsers. We predict the etymology of a word across the full range of etymology types and languages in Wiktionary, showing improvements over a strong baseline. We also model word emergence and show the application of etymology in modeling this phenomenon. We release our parser to further research in this understudied field.", "phrases": ["etymology", "word emergence", "wiktionary parser"], "overall_score": 1.1190741339185366, "scores": [0.9613845797064616, 0.8974546924151092, 0.5628847829655061], "rank_score": 0.8072413516956923} -{"id": "duong-etal-2015-cross", "title": "Cross-lingual Transfer for Unsupervised Dependency Parsing Without Parallel Data", "abstract": "Cross-lingual transfer has been shown to produce good results for dependency parsing of resource-poor languages. Although this avoids the need for a target language treebank, most approaches have still used large parallel corpora. However, parallel data is scarce for low-resource languages, and we report a new method that does not need parallel data. Our method learns syntactic word embeddings that generalise over the syntactic contexts of a bilingual vocabulary, and incorporates these into a neural network parser. We show empirical improvements over a baseline delexicalised parser on both the CoNLL and Universal Dependency Treebank datasets. We analyse the importance of the source languages, and show that combining multiple source-languages leads to a substantial improvement.", "phrases": ["dependency parsing", "parallel data", "cross-lingual transfer", "pos tag", "similar approach"], "overall_score": 2.005675997766397, "scores": [0.9663649125595273, 0.7831371382115472, 0.9336820417773734, 0.8289505465937174, 0.5235823523542992], "rank_score": 0.8071433982992928} -{"id": "xie-etal-2021-knowledge-interactive", "title": "Knowledge-Interactive Network with Sentiment Polarity Intensity-Aware Multi-Task Learning for Emotion Recognition in Conversations", "abstract": "Emotion Recognition in Conversation (ERC) has gained much attention from the NLP community recently. Some models concentrate on leveraging commonsense knowledge or multi-task learning to help complicated emotional reasoning. However, these models neglect direct utterance-knowledge interaction. In addition, these models utilize emotion-indirect auxiliary tasks, which provide limited affective information for the ERC task. To address the above issues, we propose a Knowledge-Interactive Network with sentiment polarity intensity-aware multi-task learning, namely KI-Net, which leverages both commonsense knowledge and sentiment lexicon to augment semantic information. Specifically, we use a self-matching module for internal utterance-knowledge interaction. Considering correlations with the ERC task, a phrase-level Sentiment Polarity Intensity Prediction (SPIP) task is devised as an auxiliary task. Experiments show that all knowledge integration, self-matching and SPIP modules improve the model performance respectively on three datasets. Moreover, our KI-Net model shows 1.04% performance improvement over the state-of-the-art model on the IEMOCAP dataset.", "phrases": ["intensity-aware multi-task learning", "emotion recognition", "conversation"], "overall_score": 0.8866515862510528, "scores": [0.8318369899767158, 0.80090115581479, 0.7884570166783447], "rank_score": 0.8070650541566168} -{"id": "hassan-etal-2006-graph", "title": "Graph Based Semi-Supervised Approach for Information Extraction", "abstract": "Classification techniques deploy supervised labeled instances to train classifiers for various classification problems. However labeled instances are limited, expensive, and time consuming to obtain, due to the need of experienced human annotators. Meanwhile large amount of unlabeled data is usually easy to obtain. Semi-supervised learning addresses the problem of utilizing unlabeled data along with supervised labeled data, to build better classifiers. In this paper we introduce a semi-supervised approach based on mutual reinforcement in graphs to obtain more labeled data to enhance the classifier accuracy. The approach has been used to supplement a maximum entropy model for semi-supervised training of the ACE Relation Detection and Characterization (RDC) task. ACE RDC is considered a hard task in information extraction due to lack of large amounts of training data and inconsistencies in the available data. The proposed approach provides 10% relative improvement over the state of the art supervised baseline system.", "phrases": ["semi-supervised approach", "information extraction", "review"], "overall_score": 1.1187820335527585, "scores": [0.9516062361751065, 0.9204271611783755, 0.5490585401098698], "rank_score": 0.8070306458211173} -{"id": "soricut-brill-2004-unified", "title": "A Unified Framework For Automatic Evaluation Using 4-Gram Co-occurrence Statistics", "abstract": "In this paper we propose a unified framework for automatic evaluation of NLP applications using N-gram co-occurrence statistics. The automatic evaluation metrics proposed to date for Machine Translation and Automatic Summarization are particular instances from the family of metrics we propose. We show that different members of the same family of metrics explain best the variations obtained with human evaluations, according to the application being evaluated (Machine Translation, Automatic Summarization, and Automatic Question Answering) and the evaluation guidelines used by humans for evaluating such applications.", "phrases": ["unified framework", "automatic evaluation", "n-gram co-occurrence statistic"], "overall_score": 1.118500500932577, "scores": [0.9315147911089482, 0.8447795714149033, 0.644188326367024], "rank_score": 0.8068275629636251} -{"id": "xu-durrett-2018-spherical", "title": "Spherical Latent Spaces for Stable Variational Autoencoders", "abstract": "A hallmark of variational autoencoders (VAEs) for text processing is their combination of powerful encoder-decoder models, such as LSTMs, with simple latent distributions, typically multivariate Gaussians. These models pose a difficult optimization problem: there is an especially bad local optimum where the variational posterior always equals the prior and the model does not use the latent variable at all, a kind of \u201ccollapse\u201d which is encouraged by the KL divergence term of the objective. In this work, we experiment with another choice of latent distribution, namely the von Mises-Fisher (vMF) distribution, which places mass on the surface of the unit hypersphere. With this choice of prior and posterior, the KL divergence term now only depends on the variance of the vMF distribution, giving us the ability to treat it as a fixed hyperparameter. We show that doing so not only averts the KL collapse, but consistently gives better likelihoods than Gaussians across a range of modeling conditions, including recurrent language modeling and bag-of-words document modeling. An analysis of the properties of our vMF representations shows that they learn richer and more nuanced structures in their latent representations than their Gaussian counterparts.", "phrases": ["variational autoencoder", "vae", "language modeling", "text generation"], "overall_score": 2.184815700383809, "scores": [1.5015727328392932, 0.6481686760072448, 0.5508947347021553, 0.5265059522581893], "rank_score": 0.8067855239517207} -{"id": "kate-mooney-2007-semi", "title": "Semi-Supervised Learning for Semantic Parsing using Support Vector Machines", "abstract": "We present a method for utilizing unan-notated sentences to improve a semantic parser which maps natural language (NL) sentences into their formal meaning representations (MRs). Given NL sentences annotated with their MRs, the initial supervised semantic parser learns the mapping by training Support Vector Machine (SVM) classifiers for every production in the MR grammar. Our new method applies the learned semantic parser to the unannotated sentences and collects unla-beled examples which are then used to retrain the classifiers using a variant of transductive SVMs. Experimental results show the improvements obtained over the purely supervised parser, particularly when the annotated training set is small.", "phrases": ["semantic parsing", "svm", "semi-supervised learning"], "overall_score": 1.2984392279832424, "scores": [0.9434312370712875, 0.933015199733526, 0.543850521440989], "rank_score": 0.8067656527486008} -{"id": "pado-etal-2009-robust", "title": "Robust Machine Translation Evaluation with Entailment Features", "abstract": "Existing evaluation metrics for machine translation lack crucial robustness: their correlations with human quality judgments vary considerably across languages and genres. We believe that the main reason is their inability to properly capture meaning: A good translation candidate means the same thing as the reference translation, regardless of formulation. We propose a metric that evaluates MT output based on a rich set of features motivated by textual entailment, such as lexical-semantic (in-)compatibility and argument structure overlap. We compare this metric against a combination metric of four state-of-the-art scores (BLEU, NIST, TER, and METEOR) in two different settings. The combination metric out-performs the individual scores, but is bested by the entailment-based metric. Combining the entailment and traditional features yields further improvements.", "phrases": ["machine translation", "entailment feature", "rich set"], "overall_score": 1.4450655204091047, "scores": [0.8935454855316395, 0.9912152016225613, 0.5347585818551885], "rank_score": 0.8065064230031297} -{"id": "eguchi-lavrenko-2006-sentiment", "title": "Sentiment Retrieval using Generative Models", "abstract": "Ranking documents or sentences according to both topic and sentiment relevance should serve a critical function in helping users when topics and sentiment polarities of the targeted text are not explicitly given, as is often the case on the web. In this paper, we propose several sentiment information retrieval models in the framework of probabilistic language models, assuming that a user both inputs query terms expressing a certain topic and also specifies a sentiment polarity of interest in some manner. We combine sentiment relevance models and topic relevance models with model parameters estimated from training data, considering the topic dependence of the sentiment. Our experiments prove that our models are effective.", "phrases": ["probabilistic language model", "dependence", "sentiment retrieval"], "overall_score": 1.6769493175673411, "scores": [0.9590613663522766, 0.9178893905945286, 0.5423757073297295], "rank_score": 0.8064421547588448} -{"id": "zhang-xue-2018-structured", "title": "Structured Interpretation of Temporal Relations", "abstract": "Temporal relations between events and time expressions in a document are often modeled in an unstructured manner where relations between individual pairs of time expressions and events are considered in isolation. This often results in inconsistent and incomplete annotation and computational modeling. We propose a novel annotation approach where events and time expressions in a document form a dependency tree in which each dependency relation corresponds to an instance of temporal anaphora where the antecedent is the parent and the anaphor is the child. We annotate a corpus of 235 documents using this approach in the two genres of news and narratives, with 48 documents doubly annotated. We report a stable and high inter-annotator agreement on the doubly annotated subset, validating our approach, and perform a quantitative comparison between the two genres of the entire corpus. We make this corpus publicly available.", "phrases": ["parent", "inter-annotator agreement", "temporal dependency tree"], "overall_score": 1.676874852192377, "scores": [1.0219232363395965, 0.8659439090151708, 0.531351888094589], "rank_score": 0.8064063444831188} -{"id": "izumi-etal-2003-automatic", "title": "Automatic Error Detection in the Japanese Learners' English Spoken Data", "abstract": "This paper describes a method of detecting grammatical and lexical errors made by Japanese learners of English and other techniques that improve the accuracy of error detection with a limited amount of training data. In this paper, we demonstrate to what extent the proposed methods hold promise by conducting experiments using our learner corpus, which contains information on learners' errors.", "phrases": ["japanese learners", "error type", "preposition error", "native english speaker"], "overall_score": 2.595603687162316, "scores": [1.285254435115677, 0.8786085971435316, 0.5397862201444924, 0.5218292418076685], "rank_score": 0.8063696235528424} -{"id": "huang-etal-2005-machine", "title": "Machine Translation as Lexicalized Parsing with Hooks", "abstract": "We adapt the \"hook\" trick for speeding up bilexical parsing to the decoding problem for machine translation models that are based on combining a synchronous context free grammar as the translation model with an n-gram language model. This dynamic programming technique yields lower complexity algorithms than have previously been described for an important class of translation models.", "phrases": ["complexity", "machine translation", "hook trick"], "overall_score": 1.7716594292678578, "scores": [0.9438558085258736, 0.9172749692802549, 0.5578200853082499], "rank_score": 0.8063169543714594} -{"id": "piperidis-2012-meta", "title": "The META-SHARE Language Resources Sharing Infrastructure: Principles, Challenges, Solutions", "abstract": "Language resources have become a key factor in the development cycle of language technology. The current prevailing methodologies, the sheer number of languages and the vast volumes of digital content together with the wide palette of useful content processing applications, render new models for managing the underlying language resources indispensable. This paper presents META-SHARE, an open resource exchange infrastructure, which aims to boost visibility, documentation, identification, openness and sharing, collaboration, preservation and interoperability of language data and basic language processing tools. META-SHARE is implemented as a network of distributed repositories of language resources. It offers providers and consumers of resources the necessary functionalities for describing, storing, searching, licensing and downloading language resources in a single integrated technical platform. META-SHARE favours and aligns itself with the growing open data and open source tools movement. To this end, it has prepared the necessary underlying legal framework consisting of a Charter for language resource sharing, as well as a set of licensing templates aiming to act as recommended licence models in an attempt to facilitate the legal interoperability of language resources. In its current version, META-SHARE features 13 resource repositories, with over 1200 resource packages.", "phrases": ["meta-share", "language resource", "metadata schema"], "overall_score": 1.7715468368476777, "scores": [1.3131493589076593, 0.5629885244068983, 0.5426592507436179], "rank_score": 0.8062657113527251} -{"id": "yamamoto-sumita-2007-bilingual", "title": "Bilingual Cluster Based Models for Statistical Machine Translation", "abstract": "We propose a domain specific model for statistical machine translation. It is well-known that domain specific language models perform well in automatic speech recognition. We show that domain specific language and translation models also benefit statistical machine translation. However, there are two problems with using domain specific models. The first is the data sparseness problem. We employ an adaptation technique to overcome this problem. The second issue is domain prediction. In order to perform adaptation, the domain must be provided, however in many cases, the domain is not known or changes dynamically. For these cases, not only the translation target sentence but also the domain must be predicted. This paper focuses on the domain prediction problem for statistical machine translation. In the proposed method, a bilingual training corpus, is automatically clustered into sub-corpora. Each sub-corpus is deemed to be a domain. The domain of a source sentence is predicted by using its similarity to the sub-corpora. The predicted domain (sub-corpus) specific language and translation models are then used for the translation decoding. This approach gave an improvement of 2.7 in BLEU score on the IWSLT05 Japanese to English evaluation corpus (improving the score from 52.4 to 55.1). This is a substantial gain and indicates the validity of the proposed bilingual cluster based models.", "phrases": ["cluster", "statistical machine translation", "training corpus"], "overall_score": 1.7714180520728942, "scores": [0.9722818930501077, 0.8812260594449433, 0.5651133441324235], "rank_score": 0.8062070988758249} -{"id": "li-etal-2013-recursive", "title": "Recursive Autoencoders for ITG-Based Translation", "abstract": "While inversion transduction grammar (ITG) is well suited for modeling ordering shifts between languages, how to make applying the two reordering rules (i.e., straight and inverted) dependent on actual blocks being merged remains a challenge. Unlike previous work that only uses boundary words, we propose to use recursive autoencoders to make full use of the entire merging blocks alternatively. The recursive autoencoders are capable of generating vector space representations for variable-sized phrases, which enable predicting orders to exploit syntactic and semantic information from a neural language modeling\u2019s perspective. Experiments on the NIST 2008 dataset show that our system significantly improves over the MaxEnt classifier by 1.07 BLEU points.", "phrases": ["block", "recursive autoencoder", "machine translation"], "overall_score": 1.933156598672613, "scores": [0.9899056192007176, 0.8338213890791625, 0.5948397497461252], "rank_score": 0.8061889193420018} -{"id": "schwenk-etal-2007-smooth", "title": "Smooth Bilingual N-Gram Translation", "abstract": "We address the problem of smoothing translation probabilities in a bilingual N-grambased statistical machine translation system. It is proposed to project the bilingual tuples onto a continuous space and to estimate the translation probabilities in this representation. A neural network is used to perform the projection and the probability estimation. Smoothing probabilities is most important for tasks with a limited amount of training material. We consider here the BTEC task of the 2006 IWSLT evaluation. Improvements in all official automatic measures are reported when translating from Italian to English. Using a continuous space model for the translation model and the target language model, an improvement of 1.5 BLEU on the test data is observed.", "phrases": ["translation probability", "tuple", "continuous space"], "overall_score": 1.297465143760405, "scores": [0.9799247270401183, 0.8812328041316502, 0.5573237294034772], "rank_score": 0.8061604201917486} -{"id": "choudhary-etal-2018-neural", "title": "Neural Machine Translation for English-Tamil", "abstract": "A huge amount of valuable resources is available on the web in English, which are often translated into local languages to facilitate knowledge sharing among local people who are not much familiar with English. However, translating such content manually is very tedious, costly, and time-consuming process. To this end, machine translation is an efficient approach to translate text without any human involvement. Neural machine translation (NMT) is one of the most recent and effective translation technique amongst all existing machine translation systems. In this paper, we apply NMT for English-Tamil language pair. We propose a novel neural machine translation technique using word-embedding along with Byte-Pair-Encoding (BPE) to develop an efficient translation system that overcomes the OOV (Out Of Vocabulary) problem for languages which do not have much translations available online. We use the BLEU score for evaluating the system performance. Experimental results confirm that our proposed MIDAS translator (8.33 BLEU score) outperforms Google translator (3.75 BLEU score).", "phrases": ["english-tamil", "vocabulary", "neural machine translation"], "overall_score": 1.2973447212986202, "scores": [0.9975669792377138, 0.863905985581804, 0.5567838277084906], "rank_score": 0.8060855975093361} -{"id": "amidei-etal-2019-agreement", "title": "Agreement is overrated: A plea for correlation to assess human evaluation reliability", "abstract": "Inter-Annotator Agreement (IAA) is used as a means of assessing the quality of NLG evaluation data, in particular, its reliability. According to existing scales of IAA interpretation \u2013 see, for example, Lommel et al. (2014), Liu et al. (2016), Sedoc et al. (2018) and Amidei et al. (2018a) \u2013 most data collected for NLG evaluation fail the reliability test. We confirmed this trend by analysing papers published over the last 10 years in NLG-specific conferences (in total 135 papers that included some sort of human evaluation study). Following Sampson and Babarczy (2008), Lommel et al. (2014), Joshi et al. (2016) and Amidei et al. (2018b), such phenomena can be explained in terms of irreducible human language variability. Using three case studies, we show the limits of considering IAA as the only criterion for checking evaluation reliability. Given human language variability, we propose that for human evaluation of NLG, correlation coefficients and agreement coefficients should be used together to obtain a better assessment of the evaluation data reliability. This is illustrated using the three case studies.", "phrases": ["evaluation reliability", "case study", "agreement"], "overall_score": 1.2973081276586307, "scores": [0.9594858758346754, 0.9278257727208087, 0.5308769332518], "rank_score": 0.8060628606024279} -{"id": "cui-etal-2019-cross", "title": "Cross-Lingual Machine Reading Comprehension", "abstract": "Though the community has made great progress on Machine Reading Comprehension (MRC) task, most of the previous works are solving English-based MRC problems, and there are few efforts on other languages mainly due to the lack of large-scale training data. In this paper, we propose Cross-Lingual Machine Reading Comprehension (CLMRC) task for the languages other than English. Firstly, we present several back-translation approaches for CLMRC task which is straightforward to adopt. However, to exactly align the answer into source language is difficult and could introduce additional noise. In this context, we propose a novel model called Dual BERT, which takes advantage of the large-scale training data provided by rich-resource language (such as English) and learn the semantic relations between the passage and question in bilingual context, and then utilize the learned knowledge to improve reading comprehension performance of low-resource language. We conduct experiments on two Chinese machine reading comprehension datasets CMRC 2018 and DRCD. The results show consistent and significant improvements over various state-of-the-art systems by a large margin, which demonstrate the potentials in CLMRC task. Resources available: ", "phrases": ["machine reading comprehension", "mrc", "cross-lingual mrc"], "overall_score": 1.8559951727553896, "scores": [0.9489019558417578, 0.8309548972287597, 0.6382885328296969], "rank_score": 0.8060484619667382} -{"id": "pan-etal-2006-annotated", "title": "An Annotated Corpus of Typical Durations of Events", "abstract": "In this paper, we present our work on generating an annotated corpus for extracting information about the typical durations of events from texts. We include the annotation guidelines, the event classes we categorized, the way we use normal distributions to model vague and implicit temporal information, and how we evaluate inter-annotator agreement. The experimental results show that our guidelines are effective in improving the inter-annotator agreement.", "phrases": ["annotated corpus", "duration", "guideline"], "overall_score": 1.4441526736837114, "scores": [0.8880572286321883, 0.9615769527961036, 0.5683566792075335], "rank_score": 0.8059969535452751} -{"id": "belinkov-2022-probing", "title": "Probing Classifiers: Promises, Shortcomings, and Advances", "abstract": "Probing classifiers have emerged as one of the prominent methodologies for interpreting and analyzing deep neural network models of natural language processing. The basic idea is simple\u2014a classifier is trained to predict some linguistic property from a model's representations\u2014and has been used to examine a wide variety of models and properties. However, recent studies have demonstrated various methodological limitations of this approach. This squib critically reviews the probing classifiers framework, highlighting their promises, shortcomings, and advances.", "phrases": ["methodology", "linguistic property", "probing classifier"], "overall_score": 1.7705492640988874, "scores": [0.9503194456798467, 0.8858438779552894, 0.5812717656519949], "rank_score": 0.8058116964290436} -{"id": "sterckx-etal-2016-supervised", "title": "Supervised Keyphrase Extraction as Positive Unlabeled Learning", "abstract": "The problem of noisy and unbalanced training data for supervised keyphrase extraction results from the subjectivity of keyphrase assignment, which we quantify by crowdsourcing keyphrases for news and fashion magazine articles with many annotators per document. We show that annotators exhibit substantial disagreement, meaning that single annotator data could lead to very different training sets for supervised keyphrase extractors. Thus, annotations from single authors or readers lead to noisy training data and poor extraction performance of the resulting supervised extractor. We provide a simple but effective solution to still work with such data by reweighting the importance of unlabeled candidate phrases in a two stage Positive Unlabeled Learning setting. We show that performance of trained keyphrase extractors approximates a classi-\ufb01er trained on articles labeled by multiple an-notators, leading to higher average F 1 scores and better rankings of keyphrases. We apply this strategy to a variety of test collections from different backgrounds and show improvements over strong baseline models.", "phrases": ["positive unlabeled learning", "annotator", "supervised keyphrase extraction"], "overall_score": 1.1167492483314219, "scores": [0.9503203132904677, 0.9322991730824842, 0.5340734173533781], "rank_score": 0.80556430124211} -{"id": "chiticariu-etal-2013-rule", "title": "Rule-Based Information Extraction is Dead! Long Live Rule-Based Information Extraction Systems!", "abstract": "The rise of \u201cBig Data\u201d analytics over unstructured text has led to renewed interest in information extraction (IE). We surveyed the landscape of IE technologies and identified a major disconnect between industry and academia: while rule-based IE dominates the commercial world, it is widely regarded as dead-end technology by the academia. We believe the disconnect stems from the way in which the two communities measure the benefits and costs of IE, as well as academia\u2019s perception that rulebased IE is devoid of research challenges. We make a case for the importance of rule-based IE to industry practitioners. We then lay out a research agenda in advancing the state-of-theart in rule-based IE systems which we believe has the potential to bridge the gap between academic research and industry practice.", "phrases": ["information extraction", "machine learning", "rule-based system", "high level"], "overall_score": 2.327736509769071, "scores": [1.0495964107051952, 1.0129281161565178, 0.6137604369088795, 0.5450812890790095], "rank_score": 0.8053415632124006} -{"id": "li-etal-2020-multi-encoder", "title": "Does Multi-Encoder Help? A Case Study on Context-Aware Neural Machine Translation", "abstract": "In encoder-decoder neural models, multiple encoders are in general used to represent the contextual information in addition to the individual sentence. In this paper, we investigate multi-encoder approaches in document-level neural machine translation (NMT). Surprisingly, we find that the context encoder does not only encode the surrounding sentences but also behaves as a noise generator. This makes us rethink the real benefits of multi-encoder in context-aware translation - some of the improvements come from robust training. We compare several methods that introduce noise and/or well-tuned dropout setup into the training of these encoders. Experimental results show that noisy training plays an important role in multi-encoder-based NMT, especially when the training data is small. Also, we establish a new state-of-the-art on IWSLT Fr-En task by careful use of noise generation and dropout methods.", "phrases": ["neural machine translation", "context encoder", "regularization"], "overall_score": 1.9309745816860888, "scores": [0.9200801169299911, 0.9691545100876342, 0.5266022157223246], "rank_score": 0.8052789475799833} -{"id": "luo-etal-2017-unsupervised", "title": "Unsupervised Learning of Morphological Forests", "abstract": "This paper focuses on unsupervised modeling of morphological families, collectively comprising a forest over the language vocabulary. This formulation enables us to capture edge-wise properties reflecting single-step morphological derivations, along with global distributional properties of the entire forest. These global properties constrain the size of the affix set and encourage formation of tight morphological families. The resulting objective is solved using Integer Linear Programming (ILP) paired with contrastive estimation. We train the model by alternating between optimizing the local log-linear model and the global ILP objective. We evaluate our system on three tasks: root detection, clustering of morphological families, and segmentation. Our experiments demonstrate that our model yields consistent gains in all three tasks compared with the best published results.", "phrases": ["morphological family", "integer linear programming", "unsupervised learning"], "overall_score": 1.295780145774953, "scores": [0.9275279343890702, 0.9344722833227911, 0.5533401985243129], "rank_score": 0.8051134720787246} -{"id": "finch-etal-2005-using", "title": "Using Machine Translation Evaluation Techniques to Determine Sentence-level Semantic Equivalence", "abstract": "The task of machine translation (MT) evaluation is closely related to the task of sentence-level semantic equivalence classification. This paper investigates the utility of applying standard MT evaluation methods (BLEU, NIST, WER and PER) to building classifiers to predict semantic equivalence and entailment. We also introduce a novel classification method based on PER which leverages part of speech information of the words contributing to the word matches and non-matches in the sentence. Our results show that MT evaluation techniques are able to produce useful features for paraphrase classification and to a lesser extent entailment. Our technique gives a substantial improvement in paraphrase classification accuracy over all of the other models used in the experiments.", "phrases": ["wer", "classification accuracy", "paraphrase identification"], "overall_score": 1.853337797237455, "scores": [1.0407513269984539, 0.8392226389658243, 0.5347091693645852], "rank_score": 0.8048943784429544} -{"id": "filippova-strube-2008-dependency", "title": "Dependency Tree Based Sentence Compression", "abstract": "We present a novel unsupervised method for sentence compression which relies on a dependency tree representation and shortens sentences by removing subtrees. An automatic evaluation shows that our method obtains result comparable or superior to the state of the art. We demonstrate that the choice of the parser affects the performance of the system. We also apply the method to German and report the results of an evaluation with humans.", "phrases": ["sentence compression", "unsupervised method", "weight"], "overall_score": 2.1796101079270906, "scores": [1.315912630733363, 0.570204979033721, 0.5284721644992446], "rank_score": 0.8048632580887762} -{"id": "bohnet-2010-top", "title": "Top Accuracy and Fast Dependency Parsing is not a Contradiction", "abstract": "In addition to a high accuracy, short parsing and training times are the most important properties of a parser. However, parsing and training times are still relatively long. To determine why, we analyzed the time usage of a dependency parser. We illustrate that the mapping of the features onto their weights in the support vector machine is the major factor in time complexity. To resolve this problem, we implemented the passive-aggressive perceptron algorithm as a Hash Kernel. The Hash Kernel substantially improves the parsing times and takes into account the features of negative examples built during the training. This has lead to a higher accuracy. We could further increase the parsing and training speed with a parallel feature extraction and a parallel parsing algorithm. We are convinced that the Hash Kernel and the parallelization can be applied successful to other NLP applications as well such as transition based dependency parsers, phrase structrue parsers, and machine translation.", "phrases": ["dependency parser", "feature extraction", "head", "execution time"], "overall_score": 2.557585732954644, "scores": [1.2879186369557634, 0.873007494986683, 0.5335744173019511, 0.5245584969324635], "rank_score": 0.8047647615442152} -{"id": "agirre-lopez-de-lacalle-2007-ubc", "title": "UBC-ALM: Combining k-NN with SVD for WSD", "abstract": "This work describes the University of the Basque Country system (UBC-ALM) for lexical sample and all-words WSD subtasks of SemEval-2007 task 17, where it performed in the second and fifth positions respectively. The system is based on a combination of k-Nearest Neighbor classifiers, with each classifier learning from a distinct set of features: local features (syntactic, collocations features), topical features (bag-of-words, domain information) and latent features learned from a reduced space using Singular Value Decomposition.", "phrases": ["svd", "feature-to-document matrix", "unlabeled data"], "overall_score": 1.1149540345214934, "scores": [1.319902021693453, 0.5535237601428334, 0.5393822027986648], "rank_score": 0.8042693282116504} -{"id": "choshen-abend-2018-automatic", "title": "Automatic Metric Validation for Grammatical Error Correction", "abstract": "Metric validation in Grammatical Error Correction (GEC) is currently done by observing the correlation between human and metric-induced rankings. However, such correlation studies are costly, methodologically troublesome, and suffer from low inter-rater agreement. We propose MAEGE, an automatic methodology for GEC metric validation, that overcomes many of the difficulties in the existing methodology. Experiments with MAEGE shed a new light on metric quality, showing for example that the standard M^2 metric fares poorly on corpus-level ranking. Moreover, we use MAEGE to perform a detailed analysis of metric behavior, showing that some types of valid edits are consistently penalized by existing metrics.", "phrases": ["metric validation", "grammatical error correction", "maege"], "overall_score": 1.5649961643660815, "scores": [0.956389621417214, 0.9269426690530447, 0.5294145135779829], "rank_score": 0.8042489346827472} -{"id": "lee-dernoncourt-2016-sequential", "title": "Sequential Short-Text Classification with Recurrent and Convolutional Neural Networks", "abstract": "Recent approaches based on artificial neural networks (ANNs) have shown promising results for short-text classification. However, many short texts occur in sequences (e.g., sentences in a document or utterances in a dialog), and most existing ANN-based systems do not leverage the preceding short texts when classifying a subsequent one. In this work, we present a model based on recurrent neural networks and convolutional neural networks that incorporates the preceding short texts. Our model achieves state-of-the-art results on three different datasets for dialog act prediction.", "phrases": ["recurrent", "sequential short-text classification", "conversation", "deep"], "overall_score": 1.9983677225157903, "scores": [0.9771893466110126, 0.8086533806574521, 0.8449886257938165, 0.5859779748636283], "rank_score": 0.8042023319814774} -{"id": "cai-zhao-2016-neural", "title": "Neural Word Segmentation Learning for Chinese", "abstract": "Most previous approaches to Chinese word segmentation formalize this problem as a character-based sequence labeling task where only contextual information within fixed sized local windows and simple interactions between adjacent tags can be captured. In this paper, we propose a novel neural framework which thoroughly eliminates context windows and can utilize complete segmentation history. Our model employs a gated combination neural network over characters to produce distributed representations of word candidates, which are then given to a long short-term memory (LSTM) language scoring model. Experiments on the benchmark datasets show that without the help of feature engineering as most existing approaches, our models achieve competitive or better performances with previous state-of-the-art methods.", "phrases": ["word segmentation", "chinese", "novel neural framework"], "overall_score": 2.2296898873755078, "scores": [0.8314783569293228, 0.9744471901630346, 0.60664636033533], "rank_score": 0.8041906358092291} -{"id": "liu-etal-2017-learning", "title": "Learning Character-level Compositionality with Visual Features", "abstract": "Previous work has modeled the compositionality of words by creating character-level models of meaning, reducing problems of sparsity for rare words. However, in many writing systems compositionality has an effect even on the character-level: the meaning of a character is derived by the sum of its parts. In this paper, we model this effect by creating embeddings for characters based on their visual characteristics, creating an image for the character and running it through a convolutional neural network to produce a visual character embedding. Experiments on a text classification task demonstrate that such model allows for better processing of instances with rare characters in languages such as Chinese, Japanese, and Korean. Additionally, qualitative analyses demonstrate that our proposed model learns to focus on the parts of characters that carry topical content which resulting in embeddings that are coherent in visual space.", "phrases": ["compositionality", "visual feature", "convolutional neural network", "chinese", "japanese"], "overall_score": 1.9977060969402833, "scores": [0.8599812249935119, 0.8585170267150228, 0.8574007483238448, 0.8549331752250733, 0.5888481960569336], "rank_score": 0.8039360742628773} -{"id": "yatskar-etal-2014-see", "title": "See No Evil, Say No Evil: Description Generation from Densely Labeled Images", "abstract": "This paper studies generation of descriptive sentences from densely annotated images. Previous work studied generation from automatically detected visual information but produced a limited class of sentences, hindered by currently unreliable recognition of activities and attributes. Instead, we collect human annotations of objects, parts, attributes and activities in images. These annotations allow us to build a significantly more comprehensive model of language generation and allow us to study what visual information is required to generate human-like descriptions. Experiments demonstrate high quality output and that activity annotations and relative spatial location of objects contribute most to producing high quality sentences.", "phrases": ["image", "attribute", "human-like description"], "overall_score": 1.439867948040816, "scores": [0.9436782568184523, 0.8723774233799536, 0.5947611276979495], "rank_score": 0.8036056026321186} -{"id": "pavlopoulos-etal-2021-semeval", "title": "SemEval-2021 Task 5: Toxic Spans Detection", "abstract": "The Toxic Spans Detection task of SemEval-2021 required participants to predict the spans of toxic posts that were responsible for the toxic label of the posts. The task could be addressed as supervised sequence labeling, using training data with gold toxic spans provided by the organisers. It could also be treated as rationale extraction, using classifiers trained on potentially larger external datasets of posts manually annotated as toxic or not, without toxic span annotations. For the supervised sequence labeling approach and evaluation purposes, posts previously labeled as toxic were crowd-annotated for toxic spans. Participants submitted their predicted spans for a held-out test set and were scored using character-based F1. This overview summarises the work of the 36 teams that provided system descriptions.", "phrases": ["toxic spans detection", "offensive span identification", "english language"], "overall_score": 2.17563658833968, "scores": [1.3411836364313459, 0.5348245615771731, 0.5341796789970501], "rank_score": 0.8033959590018563} -{"id": "mcdonald-nivre-2011-analyzing", "title": "Analyzing and Integrating Dependency Parsers", "abstract": "There has been a rapid increase in the volume of research on data-driven dependency parsers in the past five years. This increase has been driven by the availability of treebanks in a wide variety of languages\u2014due in large part to the CoNLL shared tasks\u2014as well as the straightforward mechanisms by which dependency theories of syntax can encode complex phenomena in free word order languages. In this article, our aim is to take a step back and analyze the progress that has been made through an analysis of the two predominant paradigms for data-driven dependency parsing, which are often called graph-based and transition-based dependency parsing. Our analysis covers both theoretical and empirical aspects and sheds light on the kinds of errors each type of parser makes and how they relate to theoretical expectations. Using these observations, we present an integrated system based on a stacking learning framework and show that such a system can learn to overcome the shortcomings of each non-integrated system.", "phrases": ["dependency parser", "sentence length", "discontinuous construction"], "overall_score": 2.365123848424439, "scores": [1.3285980069372272, 0.5440525094565071, 0.5371027831234426], "rank_score": 0.8032510998390588} -{"id": "mao-etal-2020-tchebycheff", "title": "Tchebycheff Procedure for Multi-task Text Classification", "abstract": "Multi-task Learning methods have achieved great progress in text classification. However, existing methods assume that multi-task text classification problems are convex multiobjective optimization problems, which is unrealistic in real-world applications. To address this issue, this paper presents a novel Tchebycheff procedure to optimize the multi-task classification problems without convex assumption. The extensive experiments back up our theoretical analysis and validate the superiority of our proposals.", "phrases": ["multi-task text classification", "optimization problem", "tchebycheff procedure"], "overall_score": 1.1135230828646978, "scores": [1.0171291198528984, 0.8427059820308063, 0.5498762424627299], "rank_score": 0.8032371147821449} -{"id": "sharoff-etal-2010-web", "title": "The Web Library of Babel: evaluating genre collections", "abstract": "We present experiments in automatic genre classification on web corpora, comparing a wide variety of features on several different genreannotated datasets (HGC, I-EN, KI-04, KRYS-I, MGC and SANTINIS).We investigate the performance of several types of features (POS n-grams, character n-grams and word n-grams) and show that simple character n-grams perform best on current collections because of their ability to generalise both lexical and syntactic phenomena related to genres. However, we also show that these impressive results might not be transferrable to the wider web due to the lack of comparability between different annotation labels (many webpages cannot be described in terms of the genre labels in individual collections), lack of representativeness of existing collections (many genres are represented by webpages coming from a small number of sources) as well as problems in the reliability of genre annotation (many pages from the web are difficult to interpret in terms of the labels available). This suggests that more research is needed to understand genres on the Web.", "phrases": ["web", "genre collection", "mgc", "character n-gram", "register"], "overall_score": 2.1190828130860666, "scores": [0.9332420799109074, 1.12990955592549, 0.8936330743481451, 0.533959388528909, 0.5241039102818713], "rank_score": 0.8029696017990645} -{"id": "huang-etal-2010-soft", "title": "Soft Syntactic Constraints for Hierarchical Phrase-Based Translation Using Latent Syntactic Distributions", "abstract": "In this paper, we present a novel approach to enhance hierarchical phrase-based machine translation systems with linguistically motivated syntactic features. Rather than directly using treebank categories as in previous studies, we learn a set of linguistically-guided latent syntactic categories automatically from a source-side parsed, word-aligned parallel corpus, based on the hierarchical structure among phrase pairs as well as the syntactic structure of the source side. In our model, each X nonterminal in a SCFG rule is decorated with a real-valued feature vector computed based on its distribution of latent syntactic categories. These feature vectors are utilized at decoding time to measure the similarity between the syntactic analysis of the source side and the syntax of the SCFG rules that are applied to derive translations. Our approach maintains the advantages of hierarchical phrase-based translation systems while at the same time naturally incorporates soft syntactic constraints.", "phrases": ["latent syntactic distribution", "feature vector", "translation rule", "soft constraint modeling"], "overall_score": 1.925155448402078, "scores": [1.4923630059631434, 0.5997755452539816, 0.5883296541091392, 0.530940516715879], "rank_score": 0.8028521805105359} -{"id": "stanovsky-etal-2018-supervised", "title": "Supervised Open Information Extraction", "abstract": "We present data and methods that enable a supervised learning approach to Open Information Extraction (Open IE). Central to the approach is a novel formulation of Open IE as a sequence tagging problem, addressing challenges such as encoding multiple extractions for a predicate. We also develop a bi-LSTM transducer, extending recent deep Semantic Role Labeling models to extract Open IE tuples and provide confidence scores for tuning their precision-recall tradeoff. Furthermore, we show that the recently released Question-Answer Meaning Representation dataset can be automatically converted into an Open IE corpus which significantly increases the amount of available training data. Our supervised model outperforms the existing state-of-the-art Open IE systems on benchmark datasets.", "phrases": ["open information extraction", "sequence tagging", "predicate", "confidence score"], "overall_score": 1.9948358696077653, "scores": [1.000605165715758, 1.0947412486083714, 0.5626676737363085, 0.5531099511914265], "rank_score": 0.8027810098129661} -{"id": "alegria-etal-2008-spelling", "title": "Spelling Correction: from Two-Level Morphology to Open Source", "abstract": "Basque is a highly inflected and agglutinative language (Alegria et al., 1996). Two-level morphology has been applied successfully to this kind of languages and there are two-level based descriptions for very different languages. After doing the morphological description for a language, it is easy to develop a spelling checker/corrector for this language. However, what happens if we want to use the speller in the \u0093free world\u0094 (OpenOffice, Mozilla, emacs, LaTeX, etc.)? Ispell and similar tools (aspell, hunspell, myspell) are the usual mechanisms for these purposes, but they do not fit the two-level model. In the absence of two-level morphology based mechanisms, an automatic conversion from two-level description to hunspell is described in this paper.", "phrases": ["two-level morphology", "agglutinative language", "spelling correction"], "overall_score": 0.8818524928395908, "scores": [0.9649255465393528, 0.9102717168878384, 0.5328929298165819], "rank_score": 0.8026967310812577} -{"id": "alfter-volodina-2018-towards", "title": "Towards Single Word Lexical Complexity Prediction", "abstract": "In this paper we present work-in-progress where we investigate the usefulness of previously created word lists to the task of single-word lexical complexity analysis and prediction of the complexity level for learners of Swedish as a second language. The word lists used map each word to a single CEFR level, and the task consists of predicting CEFR levels for unseen words. In contrast to previous work on word-level lexical complexity, we experiment with topics as additional features and show that linking words to topics significantly increases accuracy of classification.", "phrases": ["complexity level", "learner", "swedish", "second language"], "overall_score": 1.2917240790424787, "scores": [1.1121693149847407, 0.8776012814579863, 0.6293306184274392, 0.5912719696135672], "rank_score": 0.8025932961209333} -{"id": "garmash-monz-2016-ensemble", "title": "Ensemble Learning for Multi-Source Neural Machine Translation", "abstract": "In this paper we describe and evaluate methods to perform ensemble prediction in neural machine translation (NMT). We compare two methods of ensemble set induction: sampling parameter initializations for an NMT system, which is a relatively established method in NMT (Sutskever et al., 2014), and NMT systems translating from different source languages into the same target language, i.e., multi-source ensembles, a method recently introduced by Firat et al. (2016). We are motivated by the observation that for different language pairs systems make different types of mistakes. We propose several methods with different degrees of parameterization to combine individual predictions of NMT systems so that they mutually compensate for each other's mistakes and improve overall performance. We find that the biggest improvements can be obtained from a context-dependent weighting scheme for multi-source ensembles. This result offers stronger support for the linguistic motivation of using multi-source ensembles than previous approaches. Evaluation is carried out for German and French into English translation. The best multi-source ensemble method achieves an improvement of up to 2.2 BLEU points over the strongest single-source ensemble baseline, and a 2 BLEU improvement over a multi-source ensemble baseline.", "phrases": ["neural machine translation", "nmt system", "ensemble"], "overall_score": 1.7634291754072877, "scores": [0.9364823576914483, 0.8741284881103162, 0.5971027674491322], "rank_score": 0.8025712044169655} -{"id": "ali-renals-2018-word", "title": "Word Error Rate Estimation for Speech Recognition: e-WER", "abstract": "Measuring the performance of automatic speech recognition (ASR) systems requires manually transcribed data in order to compute the word error rate (WER), which is often time-consuming and expensive. In this paper, we propose a novel approach to estimate WER, or e-WER, which does not require a gold-standard transcription of the test set. Our e-WER framework uses a comprehensive set of features: ASR recognised text, character recognition results to complement recognition output, and internal decoder features. We report results for the two features; black-box and glass-box using unseen 24 Arabic broadcast programs. Our system achieves 16.9% WER root mean squared error (RMSE) across 1,400 sentences. The estimated overall WER e-WER was 25.3% for the three hours test set, while the actual WER was 28.5%.", "phrases": ["speech recognition", "e-wer", "word error rate"], "overall_score": 1.1120669942057877, "scores": [0.9128853938834838, 0.8796968707164197, 0.6139780419155748], "rank_score": 0.8021867688384927} -{"id": "vulic-moens-2013-cross", "title": "Cross-Lingual Semantic Similarity of Words as the Similarity of Their Semantic Word Responses", "abstract": "We propose a new approach to identifying semantically similar words across languages. The approach is based on an idea that two words in different languages are similar if they are likely to generate similar words (which includes both source and target language words) as their top semantic word responses. Semantic word responding is a concept from cognitive science which addresses detecting most likely words that humans output as free word associations given some cue word. The method consists of two main steps: (1) it utilizes a probabilistic multilingual topic model trained on comparable data to learn and quantify the semantic word responses, (2) it provides ranked lists of similar words according to the similarity of their semantic word response vectors. We evaluate our approach in the task of bilingual lexicon extraction (BLE) for a variety of language pairs. We show that in the cross-lingual settings without any language pair dependent knowledge the response-based method of similarity is more robust and outperforms current state-of-the art methods that directly operate in the semantic space of latent cross-lingual concepts/topics.", "phrases": ["semantic word response", "cross-lingual semantic similarity", "non-parallel data"], "overall_score": 1.6677631733620721, "scores": [0.997493232334932, 0.8768641840111688, 0.5317162432406012], "rank_score": 0.8020245531955674} -{"id": "zhang-etal-2019-broad", "title": "Broad-Coverage Semantic Parsing as Transduction", "abstract": "We unify different broad-coverage semantic parsing tasks into a transduction parsing paradigm, and propose an attention-based neural transducer that incrementally builds meaning representation via a sequence of semantic relations. By leveraging multiple attention mechanisms, the neural transducer can be effectively trained without relying on a pre-trained aligner. Experiments separately conducted on three broad-coverage semantic parsing tasks \u2013 AMR, SDP and UCCA \u2013 demonstrate that our attention-based neural transducer improves the state of the art on both AMR and UCCA, and is competitive with the state of the art on SDP.", "phrases": ["transduction", "semantic relation", "amr", "sdp", "node"], "overall_score": 2.116431431583061, "scores": [0.8431236745726118, 1.2126962855249617, 0.8331446940070423, 0.5908639860477581, 0.5299960192679841], "rank_score": 0.8019649318840717} -{"id": "agarwal-etal-2011-scisumm", "title": "SciSumm: A Multi-Document Summarization System for Scientific Articles", "abstract": "In this demo, we present SciSumm, an interactive multi-document summarization system for scientific articles. The document collection to be summarized is a list of papers cited together within the same source article, otherwise known as a co-citation. At the heart of the approach is a topic based clustering of fragments extracted from each article based on queries generated from the context surrounding the co-cited list of papers. This analysis enables the generation of an overview of common themes from the co-cited papers that relate to the context in which the co-citation was found. SciSumm is currently built over the 2008 ACL Anthology, however the generalizable nature of the summarization techniques and the extensible architecture makes it possible to use the system with other corpora where a citation network is available. Evaluation results on the same corpus demonstrate that our system performs better than an existing widely used multi-document summarization system (MEAD).", "phrases": ["multi-document summarization system", "fragment", "scisumm"], "overall_score": 0.8809749279900487, "scores": [0.9666364746838881, 0.906772891142251, 0.5322844455677478], "rank_score": 0.8018979371312956} -{"id": "loureiro-etal-2022-timelms", "title": "TimeLMs: Diachronic Language Models from Twitter", "abstract": "Despite its importance, the time variable has been largely neglected in the NLP and language model literature. In this paper, we present TimeLMs, a set of language models specialized on diachronic Twitter data. We show that a continual learning strategy contributes to enhancing Twitter-based language models' capacity to deal with future and out-of-distribution tweets, while making them competitive with standardized and more monolithic benchmarks. We also perform a number of qualitative analyses showing how they cope with trends and peaks in activity involving specific named entities or concept drift. TimeLMs is available at github.com/cardiffnlp/timelms.", "phrases": ["language model", "twitter data", "timelms"], "overall_score": 1.2898423956100542, "scores": [0.8516780674065396, 0.9383210765837682, 0.6142732774154505], "rank_score": 0.801424140468586} -{"id": "federico-etal-2014-matecat", "title": "The MateCat Tool", "abstract": "We present a new web-based CAT tool providing translators with a professional work environment, integrating translation memories, terminology bases, concordancers, and machine translation. The tool is completely developed as open source software and has been already successfully deployed for business, research and education. The MateCat Tool represents today probably the best available open source platform for investigating, integrating, and evaluating under realistic conditions the impact of new machine translation technology on human post-editing.", "phrases": ["matecat tool", "translator", "post-editing", "wordbee", "machine-translated document"], "overall_score": 1.8451361420708776, "scores": [0.9668277828258101, 1.3300303484786855, 0.6093068676471116, 0.5643807502500006, 0.5361164751065757], "rank_score": 0.8013324448616366} -{"id": "poncelas-etal-2018-data", "title": "Data Selection with Feature Decay Algorithms Using an Approximated Target Side", "abstract": "Data selection techniques applied to neural machine translation (NMT) aim to increase the performance of a model by retrieving a subset of sentences for use as training data. One of the possible data selection techniques are transductive learning methods, which select the data based on the test set, i.e. the document to be translated. A limitation of these methods to date is that using the source-side test set does not by itself guarantee that sentences are selected with correct translations, or translations that are suitable given the test-set domain. Some corpora, such as subtitle corpora, may contain parallel sentences with inaccurate translations caused by localization or length restrictions. In order to try to fix this problem, in this paper we propose to use an approximated target-side in addition to the source-side when selecting suitable sentence-pairs for training a model. This approximated target-side is built by pre-translating the source-side. In this work, we explore the performance of this general idea for one specific data selection approach called Feature Decay Algorithms (FDA). We train German-English NMT models on data selected by using the test set (source), the approximated target side, and a mixture of both. Our findings reveal that models built using a combination of outputs of FDA (using the test set and an approximated target side) perform better than those solely using the test set. We obtain a statistically significant improvement of more than 1.5 BLEU points over a model trained with all data, and more than 0.5 BLEU points over a strong FDA baseline that uses source-side information only.", "phrases": ["feature decay algorithms", "approximated target side", "target-side"], "overall_score": 0.8803036256026864, "scores": [0.9596894544696652, 0.8848381453396736, 0.5593330742868317], "rank_score": 0.8012868913653902} -{"id": "shao-etal-2018-greedy", "title": "Greedy Search with Probabilistic N-gram Matching for Neural Machine Translation", "abstract": "Neural machine translation (NMT) models are usually trained with the word-level loss using the teacher forcing algorithm, which not only evaluates the translation improperly but also suffers from exposure bias. Sequence-level training under the reinforcement framework can mitigate the problems of the word-level loss, but its performance is unstable due to the high variance of the gradient estimation. On these grounds, we present a method with a differentiable sequence-level training objective based on probabilistic n-gram matching which can avoid the reinforcement framework. In addition, this method performs greedy search in the training which uses the predicted words as context just as at inference to alleviate the problem of exposure bias. Experiment results on the NIST Chinese-to-English translation tasks show that our method significantly outperforms the reinforcement-based algorithms and achieves an improvement of 1.5 BLEU points on average over a strong baseline system.", "phrases": ["probabilistic n-gram matching", "neural machine translation", "loss"], "overall_score": 1.5590073627600833, "scores": [0.9448223267576243, 0.9221494302331706, 0.5365421414031358], "rank_score": 0.8011712994646435} -{"id": "bauer-etal-2018-commonsense", "title": "Commonsense for Generative Multi-Hop Question Answering Tasks", "abstract": "Reading comprehension QA tasks have seen a recent surge in popularity, yet most works have focused on fact-finding extractive QA. We instead focus on a more challenging multi-hop generative task (NarrativeQA), which requires the model to reason, gather, and synthesize disjoint pieces of information within the context to generate an answer. This type of multi-step reasoning also often requires understanding implicit relations, which humans resolve via external, background commonsense knowledge. We first present a strong generative baseline that uses a multi-attention mechanism to perform multiple hops of reasoning and a pointer-generator decoder to synthesize the answer. This model performs substantially better than previous generative models, and is competitive with current state-of-the-art span prediction models. We next introduce a novel system for selecting grounded multi-hop relational commonsense information from ConceptNet via a pointwise mutual information and term-frequency based scoring function. Finally, we effectively use this extracted commonsense information to fill in gaps of reasoning between context hops, using a selectively-gated attention mechanism. This boosts the model's performance significantly (also verified via human evaluation), establishing a new state-of-the-art for the task. We also show that our background knowledge enhancements are generalizable and improve performance on QAngaroo-WikiHop, another multi-hop reasoning dataset.", "phrases": ["narrativeqa", "multi-step reasoning", "commonsense knowledge"], "overall_score": 2.43888321339332, "scores": [1.31034695654627, 0.5492293767726162, 0.5436411793925885], "rank_score": 0.8010725042371583} -{"id": "moudjari-etal-2020-algerian", "title": "An Algerian Corpus and an Annotation Platform for Opinion and Emotion Analysis", "abstract": "In this paper, we address the lack of resources for opinion and emotion analysis related to North African dialects, targeting Algerian dialect. We present TWIFIL (TWItter proFILing) a collaborative annotation platform for crowdsourcing annotation of tweets at different levels of granularity. The plateform allowed the creation of the largest Algerian dialect dataset annotated for both sentiment (9,000 tweets), emotion (about 5,000 tweets) and extra-linguistic information including author profiling (age and gender). The annotation resulted also in the creation of the largest Algerien dialect subjectivity lexicon of about 9,000 entries which can constitute a valuable resources for the development of future NLP applications for Algerian dialect. To test the validity of the dataset, a set of deep learning experiments were conducted to classify a given tweet as positive, negative or neutral. We discuss our results and provide an error analysis to better identify classification errors.", "phrases": ["annotation platform", "opinion", "emotion analysis", "dialect"], "overall_score": 0.8800126768389411, "scores": [0.9068181308389155, 0.9014983143443052, 0.8591815978619218, 0.5365901905056205], "rank_score": 0.8010220583876907} -{"id": "wang-etal-2019-tell", "title": "Can You Tell Me How to Get Past Sesame Street? Sentence-Level Pretraining Beyond Language Modeling", "abstract": "Natural language understanding has recently seen a surge of progress with the use of sentence encoders like ELMo (Peters et al., 2018a) and BERT (Devlin et al., 2019) which are pretrained on variants of language modeling. We conduct the first large-scale systematic study of candidate pretraining tasks, comparing 19 different tasks both as alternatives and complements to language modeling. Our primary results support the use language modeling, especially when combined with pretraining on additional labeled-data tasks. However, our results are mixed across pretraining tasks and show some concerning trends: In ELMo's pretrain-then-freeze paradigm, random baselines are worryingly strong and results vary strikingly across target tasks. In addition, fine-tuning BERT on an intermediate task often negatively impacts downstream transfer. In a more positive trend, we see modest gains from multitask training, suggesting the development of more sophisticated multitask and transfer learning techniques as an avenue for further research.", "phrases": ["language modeling", "bert", "target task"], "overall_score": 2.0540704267609478, "scores": [0.8317458908615148, 1.0430414885194594, 0.5276816059643832], "rank_score": 0.800822995115119} -{"id": "lee-etal-2004-supervised", "title": "Supervised Word Sense Disambiguation with Support Vector Machines and multiple knowledge sources", "abstract": "We participated in the SENSEVAL-3 English lexical sample task and multilingual lexical sample task. We adopted a supervised learning approach with Support Vector Machines, using only the of\ufb01cial training data provided. No other external resources were used. The knowledge sources used were part-of-speech of neighboring words, single words in the surrounding context, local collocations, and syntactic relations. For the translation and sense subtask of the multilingual lexical sample task, the English sense given for the target word was also used as an additional knowledge source. For the English lexical sample task, we obtained \ufb01ne-grained and coarse-grained score (for both recall and precision) of 0.724 and 0.788 respectively. For the multilingual lexical sample task, we obtained recall (and precision) of 0.634 for the translation subtask, and 0.673 for the translation and sense subtask.", "phrases": ["word sense disambiguation", "support vector machines", "knowledge source", "wsd approach"], "overall_score": 1.6651121389558008, "scores": [0.9757955301484386, 0.8836198341112826, 0.821818752108665, 0.5217645841590113], "rank_score": 0.8007496751318495} -{"id": "de-clercq-etal-2013-normalization", "title": "Normalization of Dutch User-Generated Content", "abstract": "This paper describes a phrase-based machine translation approach to normalize Dutch user-generated content (UGC). We compiled a corpus of three different social media genres (text messages, message board posts and tweets) to have a sample of this recent domain. We describe the various characteristics of this noisy text material and explain how it has been manually normalized using newly developed guidelines. For the automatic normalization task we focus on text messages, and find that a cascaded SMT system where a token-based module is followed by a translation at the character level gives the best word error rate reduction. After these initial experiments, we investigate the system\u2019s robustness on the complete domain of UGC by testing it on the other two social media genres, and find that the cascaded approach performs best on these genres as well. To our knowledge, we deliver the first proof-of-concept system for Dutch UGC normalization, which can serve as a baseline for future work.", "phrases": ["dutch user-generated content", "ugc", "genre", "phrase-based method"], "overall_score": 1.4346725170641452, "scores": [0.915203091713007, 0.8893036403063124, 0.860142234281788, 0.5381749432769931], "rank_score": 0.8007059773945251} -{"id": "moiron-tiedemann-2006-identifying", "title": "Identifying idiomatic expressions using automatic word-alignment", "abstract": "For NLP applications that require some sort of semantic interpretation it would be helpful to know what expressions exhibit an idiomatic meaning and what expressions exhibit a literal meaning. We investigate whether automatic word-alignment in existing parallel corpora facilitates the classification of candidate expressions along a continuum ranging from literal and transparent expressions to idiomatic and opaque expressions. Our method relies on two criteria: (i) meaning predictability that is measured as semantic entropy and (ii), the overlap between the meaning of an expression and the meaning of its component words. We approximate the mentioned overlap as the proportion of default alignments. We obtain a significant improvement over the baseline with both measures.", "phrases": ["automatic word-alignment", "parallel corpora", "predictability", "mwe"], "overall_score": 1.9199711777756614, "scores": [0.9532043749389169, 0.876975531689341, 0.8383953998671118, 0.5341853804382736], "rank_score": 0.8006901717334108} -{"id": "goldfarb-tarrant-etal-2021-intrinsic", "title": "Intrinsic Bias Metrics Do Not Correlate with Application Bias", "abstract": "Natural Language Processing (NLP) systems learn harmful societal biases that cause them to amplify inequality as they are deployed in more and more situations. To guide efforts at debiasing these systems, the NLP community relies on a variety of metrics that quantify bias in models. Some of these metrics are intrinsic, measuring bias in word embedding spaces, and some are extrinsic, measuring bias in downstream tasks that the word embeddings enable. Do these intrinsic and extrinsic metrics correlate with each other? We compare intrinsic and extrinsic metrics across hundreds of trained models covering different tasks and experimental conditions. Our results show no reliable correlation between these metrics that holds in all scenarios across tasks and languages. We urge researchers working on debiasing to focus on extrinsic measures of bias, and to make using these measures more feasible via creation of new challenge sets and annotated test data. To aid this effort, we release code, a new intrinsic metric, and an annotated test set focused on gender bias in hate speech.", "phrases": ["application bias", "downstream task", "intrinsic metric"], "overall_score": 1.4343553564242888, "scores": [0.954706332868175, 0.9249117226570571, 0.5219688444880604], "rank_score": 0.8005289666710974} -{"id": "cieri-etal-2018-introducing", "title": "Introducing NIEUW: Novel Incentives and Workflows for Eliciting Linguistic Data", "abstract": "This paper introduces the NIEUW (Novel Incentives and Workflows) project funded by the United States National Science Foundation and part of the Linguistic Data Consortium\u2019s strategy to provide order of magnitude improvement in the scale, cost, variety, linguistic diversity and quality of Language Resources available for education, research and technology development. Notwithstanding decades of effort and progress in collecting and distributing Language Resources, it remains the case that demand still far exceeds supply for all of the approximately 7000 languages in the world, even the most well documented languages with global economic and political influence. The absence of Language Resources, regardless of the language, stifles teaching and technology building, inhibiting the creation of language enabled applications and, as a result, commerce and communication. Project oriented approaches which focus intensive funding and effort on problems of limited scope over short durations can only address part of the problem. The HLT community instead requires approaches that do not rely upon highly constrained resources such as project funding and can be sustained across many languages and many years. In this paper, we describe a new initiative to harness the power of alternative incentives to elicit linguistic data and annotation. We also describe changes to the workflows necessary to collect data from workforces attracted by these incentives.", "phrases": ["nieuw", "novel incentives", "workflows"], "overall_score": 1.109505451147097, "scores": [0.816831602315006, 0.7984704826879803, 0.785714933310797], "rank_score": 0.8003390061045944} -{"id": "chakraborty-etal-2011-semantic", "title": "Semantic Clustering: an Attempt to Identify Multiword Expressions in Bengali", "abstract": "One of the key issues in both natural language understanding and generation is the appropriate processing of Multiword Expressions (MWEs). MWE can be defined as a semantic issue of a phrase where the meaning of the phrase may not be obtained from its constituents in a straightforward manner. This paper presents an approach of identifying bigram noun-noun MWEs from a medium-size Bengali corpus by clustering the semantically related nouns and incorporating a vector space model for similarity measurement. Additional inclusion of the English WordNet::Similarity module also improves the results considerably. The present approach also contributes to locate clusters of the synonymous noun words present in a document. Experimental results draw a satisfactory conclusion after analyzing the Precision, Recall and F-score values.", "phrases": ["multiword expressions", "medium-size bengali corpus", "semantic clustering"], "overall_score": 1.1092986426089275, "scores": [0.9132020097879474, 0.9108116185828251, 0.5765558474643651], "rank_score": 0.8001898252783791} -{"id": "han-etal-2012-automatically", "title": "Automatically Constructing a Normalisation Dictionary for Microblogs", "abstract": "Microblog normalisation methods often utilise complex models and struggle to differentiate between correctly-spelled unknown words and lexical variants of known words. In this paper, we propose a method for constructing a dictionary of lexical variants of known words that facilitates lexical normalisation via simple string substitution (e.g. tomorrow for tmrw). We use context information to generate possible variant and normalisation pairs and then rank these by string similarity. Highly-ranked pairs are selected to populate the dictionary. We show that a dictionary-based approach achieves state-of-the-art performance for both F-score and word error rate on a standard dataset. Compared with other methods, this approach offers a fast, lightweight and easy-to-use solution, and is thus suitable for high-volume microblog pre-processing.", "phrases": ["microblog", "distributional similarity", "noisy text", "oov word"], "overall_score": 2.7213695750739784, "scores": [0.8875621328285556, 1.2548924280741733, 0.5339252211963597, 0.5241043647445774], "rank_score": 0.8001210367109165} -{"id": "bhowmick-etal-2008-agreement", "title": "An Agreement Measure for Determining Inter-Annotator Reliability of Human Judgements on Affective Text", "abstract": "An affective text may be judged to belong to multiple affect categories as it may evoke different affects with varying degree of intensity. For affect classification of text, it is often required to annotate text corpus with affect categories. This task is often performed by a number of human judges. This paper presents a new agreement measure inspired by Kappa coefficient to compute inter-annotator reliability when the annotators have freedom to categorize a text into more than one class. The extended reliability coefficient has been applied to measure the quality of an affective text corpus. An analysis of the factors that influence corpus quality has been provided.", "phrases": ["inter-annotator reliability", "affective text", "factor"], "overall_score": 0.87900963238015, "scores": [0.9584867602324515, 0.9098049423737613, 0.532035441319532], "rank_score": 0.8001090479752483} -{"id": "rimell-etal-2009-unbounded", "title": "Unbounded Dependency Recovery for Parser Evaluation", "abstract": "This paper introduces a new parser evaluation corpus containing around 700 sentences annotated with unbounded dependencies, from seven different grammatical constructions. We run a series of off-the-shelf parsers on the corpus to evaluate how well state-of-the-art parsing technology is able to recover such dependencies. The overall results range from 25% accuracy to 59%. These low scores call into question the validity of using Parseval scores as a general measure of parsing capability. We discuss the importance of parsers being able to recover unbounded dependencies, given their relatively low frequency in corpora. We also analyse the various errors made on these constructions by one of the more successful parsers.", "phrases": ["unbounded dependency", "ccg parser", "linguistic capacity"], "overall_score": 2.4356097249450257, "scores": [1.2491350374958912, 0.5981198963004389, 0.5527369612539961], "rank_score": 0.7999972983501088} -{"id": "aji-etal-2020-neural", "title": "In Neural Machine Translation, What Does Transfer Learning Transfer?", "abstract": "Transfer learning improves quality for low-resource machine translation, but it is unclear what exactly it transfers. We perform several ablation studies that limit information transfer, then measure the quality impact across three language pairs to gain a black-box understanding of transfer learning. Word embeddings play an important role in transfer learning, particularly if they are properly aligned. Although transfer learning can be performed without embeddings, results are sub-optimal. In contrast, transferring only the embeddings but nothing else yields catastrophic results. We then investigate diagonal alignments with auto-encoders over real languages and randomly generated sequences, finding even randomly generated sequences as parents yield noticeable but smaller gains. Finally, transfer learning can eliminate the need for a warm-up phase when training transformer models in high resource language pairs.", "phrases": ["transfer learning", "parent", "vocabulary"], "overall_score": 1.9875126898120083, "scores": [0.9462655317356713, 0.8827258027424824, 0.5705105019166739], "rank_score": 0.7998339454649425} -{"id": "tsai-chen-2003-context", "title": "Context-rule Model for Pos Tagging", "abstract": "Part-of-speech tagging for a large corpus is a labour intensive and time-consuming task. In order to achieve fast and high quality tagging, algorithms should be high precision and in particular, its tagging results should require less manual proofreading. In this paper, we proposed a context-rule model to achieve both the above goals for pos tagging. We compared the tagging precisions between Markov bi-gram model and context-rule classifier. According to the experiments, context-rule classifier performs better than those two other algorithms. Also, it covers the data sparseness problem by utilizing more context features, and reduces the amount of corpus that is need to be manual proofread by introducing the confidence measure.", "phrases": ["pos tagging", "markov bi-gram model", "context-rule model"], "overall_score": 0.8787053827664771, "scores": [0.9567252627831515, 0.8835325152995016, 0.5592385460439384], "rank_score": 0.7998321080421972} -{"id": "colombo-etal-2019-affect", "title": "Affect-Driven Dialog Generation", "abstract": "The majority of current systems for end-to-end dialog generation focus on response quality without an explicit control over the affective content of the responses. In this paper, we present an affect-driven dialog system, which generates emotional responses in a controlled manner using a continuous representation of emotions. The system achieves this by modeling emotions at a word and sequence level using: (1) a vector representation of the desired emotion, (2) an affect regularizer, which penalizes neutral words, and (3) an affect sampling method, which forces the neural network to generate diverse words that are emotionally relevant. During inference, we use a re-ranking procedure that aims to extract the most emotionally relevant responses using a human-in-the-loop optimization process. We study the performance of our system in terms of both quantitative (BLEU score and response diversity), and qualitative (emotional appropriateness) measures.", "phrases": ["dialog system", "emotion", "response generation"], "overall_score": 1.987456685909363, "scores": [0.9435769555530594, 0.9017586538052045, 0.5540986141514074], "rank_score": 0.799811407836557} -{"id": "huang-etal-2020-grade", "title": "GRADE: Automatic Graph-Enhanced Coherence Metric for Evaluating Open-Domain Dialogue Systems", "abstract": "Automatically evaluating dialogue coherence is a challenging but high-demand ability for developing high-quality open-domain dialogue systems. However, current evaluation metrics consider only surface features or utterance-level semantics, without explicitly considering the fine-grained topic transition dynamics of dialogue flows. Here, we first consider that the graph structure constituted with topics in a dialogue can accurately depict the underlying communication logic, which is a more natural way to produce persuasive metrics. Capitalized on the topic-level dialogue graph, we propose a new evaluation metric GRADE, which stands for Graph-enhanced Representations for Automatic Dialogue Evaluation. Specifically, GRADE incorporates both coarse-grained utterance-level contextualized representations and fine-grained topic-level graph representations to evaluate dialogue coherence. The graph representations are obtained by reasoning over topic-level dialogue graphs enhanced with the evidence from a commonsense graph, including k-hop neighboring representations and hop-attention weights. Experimental results show that our GRADE significantly outperforms other state-of-the-art metrics on measuring diverse dialogue models in terms of the Pearson and Spearman correlations with human judgments. Besides, we release a new large-scale human evaluation benchmark to facilitate future research on automatic metrics.", "phrases": ["coherence metric", "topic-level graph representation", "dialogue topic transition"], "overall_score": 1.8412164742456023, "scores": [1.3079064654205654, 0.5563535754700768, 0.5346304233720351], "rank_score": 0.7996301547542258} -{"id": "tonelli-pighin-2009-new", "title": "New Features for FrameNet - WordNet Mapping", "abstract": "Many applications in the context of natural language processing or information retrieval may be largely improved if they were able to fully exploit the rich semantic information annotated in high-quality, publicly available resources such as the FrameNet and the WordNet databases. Nevertheless, the practical use of similar resources is often biased by the limited coverage of semantic phenomena that they provide. \n \nA natural solution to this problem would be to automatically establish anchors between these resources that would allow us 1) to jointly use the encoded information, thus possibly overcoming limitations of the individual corpora, and 2) to extend each resource coverage by exploiting the information encoded in the others. \n \nIn this paper, we present a supervised learning framework for the mapping of FrameNet lexical units onto WordNet synsets based on a reduced set of novel and semantically rich features. The automatically learnt mapping, which we call MapNet, can be used 1) to extend frame sets in the English FrameNet, 2) to populate frame sets in the Italian FrameNet via MultiWordNet and 3) to add frame labels to the MultiSemCor corpus. Our evaluation on these tasks shows that the proposed approach is viable and can result in accurate automatic annotations.", "phrases": ["framenet", "mapping", "wordnet synset"], "overall_score": 1.28694081158365, "scores": [0.886487844963244, 0.9799996389002408, 0.5323763709787782], "rank_score": 0.7996212849474209} -{"id": "yannakoudakis-etal-2011-new", "title": "A New Dataset and Method for Automatically Grading ESOL Texts", "abstract": "We demonstrate how supervised discriminative machine learning techniques can be used to automate the assessment of 'English as a Second or Other Language' (ESOL) examination scripts. In particular, we use rank preference learning to explicitly model the grade relationships between scripts. A number of different features are extracted and ablation tests are used to investigate their contribution to overall performance. A comparison between regression and rank preference models further supports our method. Experimental results on the first publically available dataset show that our system can achieve levels of performance close to the upper bound for the task, as defined by the agreement between human examiners on the same corpus. Finally, using a set of 'outlier' texts, we test the validity of our model and identify cases where the model's scores diverge from that of a human examiner.", "phrases": ["learner", "essay score", "grammatical feature", "cambridge fce corpus"], "overall_score": 3.127649122612795, "scores": [1.5733687284706932, 0.5561431179223065, 0.5376152931116818, 0.5308592918358405], "rank_score": 0.7994966078351304} -{"id": "beigman-klebanov-beigman-2014-difficult", "title": "Difficult Cases: From Data to Learning, and Back", "abstract": "This article contributes to the ongoing discussion in the computational linguistics community regarding instances that are difficult to annotate reliably. Is it worthwhile to identify those? What information can be inferred from them regarding the nature of the task? What should be done with them when building supervised machine learning systems? We address these questions in the context of a subjective semantic task. In this setting, we show that the presence of such instances in training data misleads a machine learner into misclassifying clear-cut cases. We also show that considering machine learning outcomes with and without the difficult cases, it is possible to identify specific weaknesses of the problem representation.", "phrases": ["presence", "machine learner", "difficult case", "beigman"], "overall_score": 1.5554750298189326, "scores": [0.9131016148446648, 1.1315901590130832, 0.5999662956462186, 0.5527660881819855], "rank_score": 0.799356039421488} -{"id": "rastogi-etal-2015-multiview", "title": "Multiview LSA: Representation Learning via Generalized CCA", "abstract": "Multiview LSA (MVLSA) is a generalization of Latent Semantic Analysis (LSA) that supports the fusion of arbitrary views of data and relies on Generalized Canonical Correlation Analysis (GCCA). We present an algorithm for fast approximate computation of GCCA, which when coupled with methods for handling missing values, is general enough to approximate some recent algorithms for inducing vector representations of words. Experiments across a comprehensive collection of test-sets show our approach to be competitive with the state of the art.", "phrases": ["generalized cca", "art", "multiview lsa"], "overall_score": 1.7561808161013959, "scores": [0.9627078098478017, 0.8642289226493631, 0.5708802693003692], "rank_score": 0.7992723339325113} -{"id": "schwitter-2010-controlled", "title": "Controlled Natural Languages for Knowledge Representation", "abstract": "This paper presents a survey of research in controlled natural languages that can be used as high-level knowledge representation languages. Over the past 10 years or so, a number of machine-oriented controlled natural languages have emerged that can be used as high-level interface languages to various kinds of knowledge systems. These languages are relevant to the area of computational linguistics since they have two very interesting properties: firstly, they look informal like natural languages and are therefore easier to write and understand by humans than formal languages; secondly, they are precisely defined subsets of natural languages and can be translated automatically (and often deterministically) into a formal target language and then be used for automated reasoning. We present and compare the most mature of these novel languages, show how they can balance the disadvantages of natural languages and formal languages for knowledge representation, and discuss how domain specialists can be supported writing specifications in controlled natural language.", "phrases": ["knowledge representation", "cnl", "full natural language", "ambiguity"], "overall_score": 1.5549054924467114, "scores": [0.9142214522675767, 1.086202142247289, 0.6352483012694747, 0.5605815246556033], "rank_score": 0.7990633551099859} -{"id": "sood-etal-2020-interpreting", "title": "Interpreting Attention Models with Human Visual Attention in Machine Reading Comprehension", "abstract": "While neural networks with attention mechanisms have achieved superior performance on many natural language processing tasks, it remains unclear to which extent learned attention resembles human visual attention. In this paper, we propose a new method that leverages eye-tracking data to investigate the relationship between human visual attention and neural attention in machine reading comprehension. To this end, we introduce a novel 23 participant eye tracking dataset - MQA-RC, in which participants read movie plots and answered pre-defined questions. We compare state of the art networks based on long short-term memory (LSTM), convolutional neural models (CNN) and XLNet Transformer architectures. We find that higher similarity to human attention and performance significantly correlates to the LSTM and CNN models. However, we show this relationship does not hold true for the XLNet models \u2013 despite the fact that the XLNet performs best on this challenging task. Our results suggest that different architectures seem to learn rather different neural attention strategies and similarity of neural to human attention does not guarantee best performance.", "phrases": ["human visual attention", "machine reading comprehension", "cnn"], "overall_score": 1.554707684213853, "scores": [0.9530876193382104, 0.920050494224504, 0.5237469917983245], "rank_score": 0.7989617017870129} -{"id": "dhingra-etal-2018-embedding", "title": "Embedding Text in Hyperbolic Spaces", "abstract": "Natural language text exhibits hierarchical structure in a variety of respects. Ideally, we could incorporate our prior knowledge of this hierarchical structure into unsupervised learning algorithms that work on text data. Recent work by Nickel and Kiela (2017) proposed using hyperbolic instead of Euclidean embedding spaces to represent hierarchical data and demonstrated encouraging results when embedding graphs. In this work, we extend their method with a re-parameterization technique that allows us to learn hyperbolic embeddings of arbitrarily parameterized objects. We apply this framework to learn word and sentence embeddings in hyperbolic space in an unsupervised manner from text corpora. The resulting embeddings seem to encode certain intuitive notions of hierarchy, such as word-context frequency and phrase constituency. However, the implicit continuous hierarchy in the learned hyperbolic space makes interrogating the model's learned hierarchies more difficult than for models that learn explicit edges between items. The learned hyperbolic embeddings show improvements over Euclidean embeddings in some \u2013 but not all \u2013 downstream tasks, suggesting that hierarchical organization is more useful for some tasks than others.", "phrases": ["hyperbolic space", "language text", "hierarchical structure"], "overall_score": 1.4315411493991108, "scores": [0.9269246514858722, 0.8695700621324273, 0.6003802698567915], "rank_score": 0.7989583278250304} -{"id": "junczys-dowmunt-etal-2016-neural", "title": "Is Neural Machine Translation Ready for Deployment? A Case Study on 30 Translation Directions", "abstract": "In this paper we provide the largest published comparison of translation quality for phrase-based SMT and neural machine translation across 30 translation directions. For ten directions we also include hierarchical phrase-based MT. Experiments are performed for the recently published United Nations Parallel Corpus v1.0 and its large six-way sentence-aligned subcorpus. In the second part of the paper we investigate aspects of translation speed, introducing AmuNMT, our efficient neural machine translation decoder. We demonstrate that current neural machine translation could already be used for in-production systems when comparing words-persecond ratios.", "phrases": ["neural machine translation", "translation direction", "translation quality"], "overall_score": 2.1631707697348914, "scores": [0.8373205989621262, 0.9266174080875378, 0.6324401348929639], "rank_score": 0.7987927139808759} -{"id": "xu-yang-2017-cross", "title": "Cross-lingual Distillation for Text Classification", "abstract": "Cross-lingual text classification(CLTC) is the task of classifying documents written in different languages into the same taxonomy of categories. This paper presents a novel approach to CLTC that builds on model distillation, which adapts and extends a framework originally proposed for model compression. Using soft probabilistic predictions for the documents in a label-rich language as the (induced) supervisory labels in a parallel corpus of documents, we train classifiers successfully for new languages in which labeled training data are not available. An adversarial feature adaptation technique is also applied during the model training to reduce distribution mismatch. We conducted experiments on two benchmark CLTC datasets, treating English as the source language and German, French, Japan and Chinese as the unlabeled target languages. The proposed approach had the advantageous or comparable performance of the other state-of-art methods.", "phrases": ["text classification", "source language", "french", "chinese", "knowledge distillation"], "overall_score": 1.7548856942072775, "scores": [1.8401560197512519, 0.5609571174248965, 0.537977995666035, 0.5333138035973309, 0.5210095563448177], "rank_score": 0.7986828985568664} -{"id": "berant-etal-2012-efficient", "title": "Efficient Tree-based Approximation for Entailment Graph Learning", "abstract": "Learning entailment rules is fundamental in many semantic-inference applications and has been an active field of research in recent years. In this paper we address the problem of learning transitive graphs that describe entailment rules between predicates (termed entailment graphs). We first identify that entailment graphs exhibit a \"tree-like\" property and are very similar to a novel type of graph termed forest-reducible graph. We utilize this property to develop an iterative efficient approximation algorithm for learning the graph edges, where each iteration takes linear time. We compare our approximation algorithm to a recently-proposed state-of-the-art exact algorithm and show that it is more efficient and scalable both theoretically and empirically, while its output quality is close to that given by the optimal solution of the exact algorithm.", "phrases": ["entailment graph", "approximation method", "treenode-fix"], "overall_score": 1.430586607506205, "scores": [0.982836925602379, 0.8518031167339425, 0.560636721217013], "rank_score": 0.7984255878511114} -{"id": "becker-etal-2011-discuss", "title": "DISCUSS: A dialogue move taxonomy layered over semantic representations", "abstract": "In this paper we describe DISCUSS, a dialogue move taxonomy layered over semantic representations. We designed this scheme to enable development of computational models of tutorial dialogues and to provide an intermediate representation suitable for question and tutorial act generation. As such, DISCUSS captures semantic and pragmatic elements across four dimensions: Dialogue Act, Rhetorical Form, Predicate Type, Semantic Roles. Together these dimensions provide a summary of an utterance's propositional content and how it may change the underlying information state of the conversation. This taxonomy builds on previous work in both general dialogue act taxonomies as well as work in tutorial act and tutorial question categorization. The types and values found within our taxonomy are based on preliminary observations and on-going annotation from our corpus of multimodal tutorial dialogues for elementary school science education.", "phrases": ["dialogue move taxonomy", "semantic representation", "discuss", "human tutor"], "overall_score": 1.2848791454900088, "scores": [0.9939320005968226, 0.8521978901128697, 0.8271942196273913, 0.5200370887830947], "rank_score": 0.7983402997800446} -{"id": "vulic-etal-2018-post", "title": "Post-Specialisation: Retrofitting Vectors of Words Unseen in Lexical Resources", "abstract": "Word vector specialisation (also known as retrofitting) is a portable, light-weight approach to fine-tuning arbitrary distributional word vector spaces by injecting external knowledge from rich lexical resources such as WordNet. By design, these post-processing methods only update the vectors of words occurring in external lexicons, leaving the representations of all unseen words intact. In this paper, we show that constraint-driven vector space specialisation can be extended to unseen words. We propose a novel post-specialisation method that: a) preserves the useful linguistic knowledge for seen words; while b) propagating this external signal to unseen words in order to improve their vector representations as well. Our post-specialisation approach explicits a non-linear specialisation function in the form of a deep neural network by learning to predict specialised vectors from their original distributional counterparts. The learned function is then used to specialise vectors of unseen words. This approach, applicable to any post-processing model, yields considerable gains over the initial specialisation models both in intrinsic word similarity tasks, and in two downstream tasks: dialogue state tracking and lexical text simplification. The positive effects persist across three languages, demonstrating the importance of specialising the full vocabulary of distributional word vector spaces.", "phrases": ["retrofitting", "vocabulary", "post-specialization"], "overall_score": 1.660074135126563, "scores": [0.9036459219176078, 0.8758112547024727, 0.6155235456350469], "rank_score": 0.7983269074183759} -{"id": "cherry-2013-improved", "title": "Improved Reordering for Phrase-Based Translation using Sparse Features", "abstract": "There have been many recent investigations into methods to tune SMT systems using large numbers of sparse features. However, there have not been nearly so many examples of helpful sparse features, especially for phrasebased systems. We use sparse features to address reordering, which is often considered a weak point of phrase-based translation. Using a hierarchical reordering model as our baseline, we show that simple features coupling phrase orientation to frequent words or wordclusters can improve translation quality, with boosts of up to 1.2 BLEU points in ChineseEnglish and 1.8 in Arabic-English. We compare this solution to a more traditional maximum entropy approach, where a probability model with similar features is trained on wordaligned bitext. We show that sparse decoder features outperform maximum entropy handily, indicating that there are major advantages to optimizing reordering features directly for BLEU with the decoder in the loop.", "phrases": ["reordering", "phrase-based translation", "sparse feature"], "overall_score": 1.2847996413188147, "scores": [0.9507052845174256, 0.8736884812497023, 0.5704789374159874], "rank_score": 0.7982909010610385} -{"id": "duran-aluisio-2012-propbank", "title": "Propbank-Br: a Brazilian Treebank annotated with semantic role labels", "abstract": "This paper reports the annotation of a Brazilian Portuguese Treebank with semantic role labels following Propbank guidelines. A different language and a different parser output impact the task and require some decisions on how to annotate the corpus. Therefore, a new annotation guide \u2015 called Propbank-Br - has been generated to deal with specific language phenomena and parser problems. In this phase of the project, the corpus was annotated by a unique linguist. The annotation task reported here is inserted in a larger projet for the Brazilian Portuguese language. This project aims to build Brazilian verbs frames files and a broader and distributed annotation of semantic role labels in Brazilian Portuguese, allowing inter-annotator agreement measures. The corpus, available in web, is already being used to build a semantic tagger for Portuguese language.", "phrases": ["semantic role label", "portuguese", "propbank-br"], "overall_score": 1.1065885105031494, "scores": [0.942739875350756, 0.9145469921669988, 0.537417767093643], "rank_score": 0.7982348782037992} -{"id": "zhang-etal-2020-every", "title": "Every Document Owns Its Structure: Inductive Text Classification via Graph Neural Networks", "abstract": "Text classification is fundamental in natural language processing (NLP) and Graph Neural Networks (GNN) are recently applied in this task. However, the existing graph-based works can neither capture the contextual word relationships within each document nor fulfil the inductive learning of new words. Therefore in this work, to overcome such problems, we propose TextING for inductive text classification via GNN. We first build individual graphs for each document and then use GNN to learn the fine-grained word representations based on their local structure, which can also effectively produce embeddings for unseen words in the new document. Finally, the word nodes are aggregated as the document embedding. Extensive experiments on four benchmark datasets show that our method outperforms state-of-the-art text classification methods.", "phrases": ["inductive text classification", "graph neural networks", "gnn"], "overall_score": 1.6598059177447295, "scores": [0.9433147913315171, 0.9235090380484039, 0.5277699369885552], "rank_score": 0.7981979221228254} -{"id": "suhr-etal-2020-exploring", "title": "Exploring Unexplored Generalization Challenges for Cross-Database Semantic Parsing", "abstract": "We study the task of cross-database semantic parsing (XSP), where a system that maps natural language utterances to executable SQL queries is evaluated on databases unseen during training. Recently, several datasets, including Spider, were proposed to support development of XSP systems. We propose a challenging evaluation setup for cross-database semantic parsing, focusing on variation across database schemas and in-domain language use. We re-purpose eight semantic parsing datasets that have been well-studied in the setting where in-domain training data is available, and instead use them as additional evaluation data for XSP systems instead. We build a system that performs well on Spider, and find that it struggles to generalize to our re-purposed set. Our setup uncovers several generalization challenges for cross-database semantic parsing, demonstrating the need to use and develop diverse training and evaluation datasets.", "phrases": ["cross-database semantic parsing", "database", "state-of-the-art model"], "overall_score": 2.106449876447707, "scores": [1.0003887588380154, 0.8701776077952037, 0.5239817011286901], "rank_score": 0.7981826892539697} -{"id": "xia-lewis-2007-multilingual", "title": "Multilingual Structural Projection across Interlinear Text", "abstract": "This paper explores the potential for annotating and enriching data for low-density languages via the alignment and projection of syntactic structure from parsed data for resource-rich languages such as English. We seek to develop enriched resources for a large number of the world\u2019s languages, most of which have no significant digital presence. We do this by tapping the body of Web-based linguistic data, most of which exists in small, analyzed chunks embedded in scholarly papers, journal articles, Web pages, and other online documents. By harvesting and enriching these data, we can provide the means for knowledge discovery across the resulting corpus that can lead to building computational resources such as grammars and transfer rules, which, in turn, can be used as bootstraps for building additional tools and resources for the languages represented. 1", "phrases": ["projection", "interlinear text", "syntactic structure"], "overall_score": 1.8375276642185365, "scores": [0.9333209705052277, 0.8780255418965645, 0.5827378623422533], "rank_score": 0.7980281249146818} -{"id": "hardmeier-etal-2012-tree", "title": "Tree Kernels for Machine Translation Quality Estimation", "abstract": "This paper describes Uppsala University's submissions to the Quality Estimation (QE) shared task at WMT 2012. We present a QE system based on Support Vector Machine regression, using a number of explicitly defined features extracted from the Machine Translation input, output and models in combination with tree kernels over constituency and dependency parse trees for the input and output sentences. We confirm earlier results suggesting that tree kernels can be a useful tool for QE system construction especially in the early stages of system design.", "phrases": ["quality estimation", "dependency parse tree", "output sentence", "tree kernel"], "overall_score": 1.659392243669669, "scores": [1.446860395343451, 0.595756194450609, 0.5791990025637274, 0.5701803554180024], "rank_score": 0.7979989869439474} -{"id": "wang-etal-2015-building", "title": "Building a Semantic Parser Overnight", "abstract": "How do we build a semantic parser in a new domain starting with zero training examples? We introduce a new methodology for this setting: First, we use a simple grammar to generate logical forms paired with canonical utterances. The logical forms are meant to cover the desired set of compositional operators, and the canonical utterances are meant to capture the meaning of the logical forms (although clumsily). We then use crowdsourcing to paraphrase these canonical utterances into natural utterances. The resulting data is used to train the semantic parser. We further study the role of compositionality in the resulting paraphrases. Finally, we test our methodology on seven domains and show that we can build an adequate semantic parser in just a few hours.", "phrases": ["canonical utterance", "compositionality", "semantic parsing", "sql", "data collection"], "overall_score": 2.2124146969017637, "scores": [1.8582950980971515, 0.5471500033177127, 0.5339676928019953, 0.5298259262714843, 0.5205609190241979], "rank_score": 0.7979599279025084} -{"id": "taghipour-etal-2011-parallel", "title": "Parallel Corpus Refinement as an Outlier Detection Algorithm", "abstract": "Filtering noisy parallel corpora or removing mistranslations out of training sets can improve the quality of a statistical machine translation. Discriminative methods for \ufb01lter-ing the corpora such as a maximum entropy model, need properly labeled training data, which are usually unavailable. Generating all possible sentence pairs (the Cartesian product) to generate labeled data, produces an imbalanced training set, containing a few correct translations and thus inappropriate for training a classi\ufb01er. In order to treat this problem effectively, unsupervised methods are utilized and the problem is modeled as an outlier detection procedure. The experiments show that a \ufb01ltered corpus, results in an improved translation quality, even with some sentence pairs removed.", "phrases": ["outlier detection", "sentence pair", "improved translation quality", "small portion"], "overall_score": 1.8369821913292101, "scores": [0.9765140764855044, 0.8544369496341572, 0.8259291888786384, 0.5342847011969779], "rank_score": 0.7977912290488195} -{"id": "li-etal-2011-clustering", "title": "Clustering Comparable Corpora For Bilingual Lexicon Extraction", "abstract": "We study in this paper the problem of enhancing the comparability of bilingual corpora in order to improve the quality of bilingual lexicons extracted from comparable corpora. We introduce a clustering-based approach for enhancing corpus comparability which exploits the homogeneity feature of the corpus, and finally preserves most of the vocabulary of the original corpus. Our experiments illustrate the well-foundedness of this method and show that the bilingual lexicons obtained from the homogeneous corpus are of better quality than the lexicons obtained with previous approaches.", "phrases": ["comparability", "bilingual lexicon extraction", "clustering-based approach"], "overall_score": 1.1059663346914324, "scores": [0.9949327274759141, 0.8506575022297095, 0.5477679899685859], "rank_score": 0.7977860732247365} -{"id": "wu-etal-2016-bilingually", "title": "Bilingually-constrained Synthetic Data for Implicit Discourse Relation Recognition", "abstract": "To alleviate the shortage of labeled data, we propose to use bilingually-constrained synthetic implicit data for implicit discourse relation recognition. These data are extracted from a bilingual sentence-aligned corpus according to the implicit/explicit mismatch be-tween different languages. Incorporating these data via a multi-task neural network model achieves signi\ufb01cant improvements over baselines, on both the English PDTB and Chinese CDTB data sets.", "phrases": ["discourse relation recognition", "synthetic implicit data", "english-chinese corpus"], "overall_score": 1.5522788211381915, "scores": [1.179641575126062, 0.6186117436681707, 0.5948872204415293], "rank_score": 0.7977135130785874} -{"id": "huang-etal-2012-tweet", "title": "Tweet Ranking Based on Heterogeneous Networks", "abstract": "Ranking tweets is a fundamental task to make it easier to distill the vast amounts of information shared by users. In this paper, we explore the novel idea of ranking tweets on a topic using heterogeneous networks. We construct heterogeneous networks by harnessing cross-genre linkages between tweets and semantically-related web documents from formal genres, and inferring implicit links between tweets and users. To rank tweets effectively by capturing the semantics and importance of different linkages, we introduce Tri-HITS, a model to iteratively propagate ranking scores across heterogeneous networks. We show that integrating both formal genre and inferred social networks with tweet networks produces a higher-quality ranking than the tweet networks alone. 1 Title and Abstract in Chinese u", "phrases": ["web document", "social network", "tweet ranking"], "overall_score": 1.5520795996234047, "scores": [0.9777560822630619, 0.8500001244641768, 0.5650771936898835], "rank_score": 0.797611133472374} -{"id": "yao-wan-2020-multimodal", "title": "Multimodal Transformer for Multimodal Machine Translation", "abstract": "Multimodal Machine Translation (MMT) aims to introduce information from other modality, generally static images, to improve the translation quality. Previous works propose various incorporation methods, but most of them do not consider the relative importance of multiple modalities. Equally treating all modalities may encode too much useless information from less important modalities. In this paper, we introduce the multimodal self-attention in Transformer to solve the issues above in MMT. The proposed method learns the representation of images based on the text, which avoids encoding irrelevant information in images. Experiments and visualization analysis demonstrate that our model benefits from visual information and substantially outperforms previous works and competitive baselines in terms of various metrics.", "phrases": ["multimodal machine translation", "multimodal self-attention", "noise"], "overall_score": 1.658581143386531, "scores": [1.0053987763687553, 0.8560764978454738, 0.5313515162614657], "rank_score": 0.797608930158565} -{"id": "narayan-cohen-2015-diversity", "title": "Diversity in Spectral Learning for Natural Language Parsing", "abstract": "We describe an approach to create a diverse set of predictions with spectral learning of latent-variable PCFGs (L-PCFGs). Our approach works by creating multiple spectral models where noise is added to the underlying features in the training set before the estimation of each model. We describe three ways to decode with multiple models. In addition, we describe a simple variant of the spectral algorithm for L-PCFGs that is fast and leads to compact models. Our experiments for natural language parsing, for English and German, show that we get a significant improvement over baselines comparable to state of the art. For English, we achieve the F1 score of 90.18, and for German we achieve theF1 score of 83.38.", "phrases": ["spectral learning", "natural language parsing", "l-pcfg"], "overall_score": 1.105500219350617, "scores": [0.921867394809171, 0.9184904063836414, 0.551991725045382], "rank_score": 0.7974498420793982} -{"id": "mohler-mihalcea-2009-text", "title": "Text-to-Text Semantic Similarity for Automatic Short Answer Grading", "abstract": "In this paper, we explore unsupervised techniques for the task of automatic short answer grading. We compare a number of knowledge-based and corpus-based measures of text similarity, evaluate the effect of domain and size on the corpus-based measures, and also introduce a novel technique to improve the performance of the system by integrating automatic feedback from the student answers. Overall, our system significantly and consistently outperforms other unsupervised methods for short answer grading that have been proposed in the past.", "phrases": ["short answer", "text similarity", "student"], "overall_score": 2.304514424068369, "scores": [0.9522844770563673, 0.9111431977926087, 0.528494143968309], "rank_score": 0.797307272939095} -{"id": "fader-etal-2013-paraphrase", "title": "Paraphrase-Driven Learning for Open Question Answering", "abstract": "We study question answering as a machine learning problem, and induce a function that maps open-domain questions to queries over a database of web extractions. Given a large, community-authored, question-paraphrase corpus, we demonstrate that it is possible to learn a semantic lexicon and linear ranking function without manually annotating questions. Our approach automatically generalizes a seed lexicon and includes a scalable, parallelized perceptron parameter estimation scheme. Experiments show that our approach more than quadruples the recall of the seed lexicon, with only an 8% loss in precision.", "phrases": ["open-domain question", "paraphrasing", "search query log", "knowledge basis"], "overall_score": 2.834503917881713, "scores": [0.9062314288101562, 0.9038593290436036, 0.8333757158973499, 0.5455365367662653], "rank_score": 0.7972507526293438} -{"id": "hardy-etal-2019-highres", "title": "HighRES: Highlight-based Reference-less Evaluation of Summarization", "abstract": "There has been substantial progress in summarization research enabled by the availability of novel, often large-scale, datasets and recent advances on neural network-based approaches. However, manual evaluation of the system generated summaries is inconsistent due to the difficulty the task poses to human non-expert readers. To address this issue, we propose a novel approach for manual evaluation, Highlight-based Reference-less Evaluation of Summarization (HighRES), in which summaries are assessed by multiple annotators against the source document via manually highlighted salient content in the latter. Thus summary assessment on the source document by human judges is facilitated, while the highlights can be used for evaluating multiple systems. To validate our approach we employ crowd-workers to augment with highlights a recently proposed dataset and compare two state-of-the-art systems. We demonstrate that HighRES improves inter-annotator agreement in comparison to using the source document directly, while they help emphasize differences among systems that would be ignored under other evaluation approaches.", "phrases": ["highlight-based reference-less evaluation", "summarization", "evaluation protocol"], "overall_score": 1.5512080552366894, "scores": [0.9478300848917477, 0.8913079177401807, 0.5523517421382893], "rank_score": 0.7971632482567393} -{"id": "sharma-etal-2019-entity", "title": "An Entity-Driven Framework for Abstractive Summarization", "abstract": "Abstractive summarization systems aim to produce more coherent and concise summaries than their extractive counterparts. Popular neural models have achieved impressive results for single-document summarization, yet their outputs are often incoherent and unfaithful to the input. In this paper, we introduce SENECA, a novel System for ENtity-drivEn Coherent Abstractive summarization framework that leverages entity information to generate informative and coherent abstracts. Our framework takes a two-step approach: (1) an entity-aware content selection module first identifies salient sentences from the input, then (2) an abstract generation module conducts cross-sentence information compression and abstraction to generate the final summary, which is trained with rewards to promote coherence, conciseness, and clarity. The two components are further connected using reinforcement learning. Automatic evaluation shows that our model significantly outperforms previous state-of-the-art based on ROUGE and our proposed coherence measures on New York Times and CNN/Daily Mail datasets. Human judges further rate our system summaries as more informative and coherent than those by popular summarization models.", "phrases": ["abstractive summarization", "coherence", "entity information"], "overall_score": 1.911319321442102, "scores": [0.9597325251397772, 0.8339886311165338, 0.5975250459327914], "rank_score": 0.7970820673963676} -{"id": "nilsson-etal-2007-generalizing", "title": "Generalizing Tree Transformations for Inductive Dependency Parsing", "abstract": "Previous studies in data-driven dependency parsing have shown that tree transformations can improve parsing accuracy for specific parsers and data sets. We investigate to what extent this can be generalized across languages/treebanks and parsers, focusing on pseudo-projective parsing, as a way of capturing non-projective dependencies, and transformations used to facilitate parsing of coordinate structures and verb groups. The results indicate that the beneficial effect of pseudo-projective parsing is independent of parsing strategy but sensitive to language or treebank specific properties. By contrast, the construction specific transformations appear to be more sensitive to parsing strategy but have a constant positive effect over several languages.", "phrases": ["transformation", "treebank", "verb group"], "overall_score": 1.7513184807701871, "scores": [1.2543059141672366, 0.5767684533812919, 0.5601038017217855], "rank_score": 0.7970593897567714} -{"id": "p-v-s-meyer-2017-joint", "title": "Joint Optimization of User-desired Content in Multi-document Summaries by Learning from User Feedback", "abstract": "In this paper, we propose an extractive multi-document summarization (MDS) system using joint optimization and active learning for content selection grounded in user feedback. Our method interactively obtains user feedback to gradually improve the results of a state-of-the-art integer linear programming (ILP) framework for MDS. Our methods complement fully automatic methods in producing high-quality summaries with a minimum number of iterations and feedbacks. We conduct multiple simulation-based experiments and analyze the effect of feedback-based concept selection in the ILP setup in order to maximize the user-desired content in the summary.", "phrases": ["user-desired content", "user feedback", "joint optimization"], "overall_score": 1.1049382423250405, "scores": [0.8081178170155702, 0.7980544633952293, 0.7849611036255563], "rank_score": 0.7970444613454518} -{"id": "kang-etal-2019-dual", "title": "Dual Attention Networks for Visual Reference Resolution in Visual Dialog", "abstract": "Visual dialog (VisDial) is a task which requires a dialog agent to answer a series of questions grounded in an image. Unlike in visual question answering (VQA), the series of questions should be able to capture a temporal context from a dialog history and utilizes visually-grounded information. Visual reference resolution is a problem that addresses these challenges, requiring the agent to resolve ambiguous references in a given question and to find the references in a given image. In this paper, we propose Dual Attention Networks (DAN) for visual reference resolution in VisDial. DAN consists of two kinds of attention modules, REFER and FIND. Specifically, REFER module learns latent relationships between a given question and a dialog history by employing a multi-head attention mechanism. FIND module takes image features and reference-aware representations (i.e., the output of REFER module) as input, and performs visual grounding via bottom-up attention mechanism. We qualitatively and quantitatively evaluate our model on VisDial v1.0 and v0.9 datasets, showing that DAN outperforms the previous state-of-the-art model by a significant margin.", "phrases": ["visual reference resolution", "visual dialog", "multi-head attention mechanism", "dual attention networks"], "overall_score": 1.1047281953933068, "scores": [0.914284230093222, 0.861912092430461, 0.8599963462071916, 0.5513791093174004], "rank_score": 0.7968929445120687} -{"id": "zarriess-kuhn-2009-exploiting", "title": "Exploiting Translational Correspondences for Pattern-Independent MWE Identification", "abstract": "Based on a study of verb translations in the Europarl corpus, we argue that a wide range of MWE patterns can be identified in translations that exhibit a correspondence between a single lexical item in the source language and a group of lexical items in the target language. We show that these correspondences can be reliably detected on dependency-parsed, word-aligned sentences. We propose an extraction method that combines word alignment with syntactic filters and is independent of the structural pattern of the translation.", "phrases": ["correspondence", "mwe pattern", "parallel corpora"], "overall_score": 1.8345700923019224, "scores": [1.2451480530676626, 0.5866947038661252, 0.558388246320605], "rank_score": 0.7967436677514642} -{"id": "horvat-byrne-2014-graph", "title": "A Graph-Based Approach to String Regeneration", "abstract": "The string regeneration problem is the problem of generating a fluent sentence from a bag of words. We explore the Ngram language model approach to string regeneration. The approach computes the highest probability permutation of the input bag of words under an N-gram language model. We describe a graph-based approach for finding the optimal permutation. The evaluation of the approach on a number of datasets yielded promising results, which were confirmed by conducting a manual evaluation study.", "phrases": ["graph-based approach", "string regeneration", "n-gram language model"], "overall_score": 1.1040819891113873, "scores": [0.9767328594800126, 0.8584874473929658, 0.5540601087657533], "rank_score": 0.7964268052129105} -{"id": "lucking-2017-indexicals", "title": "Indexicals as Weak Descriptors", "abstract": "Indexicals have a couple of uses that are in conflict with the traditional view that they directly refer to indices in the utterance situation. But how do they refer instead? It is argued that indexicals have both an indexical and a descriptive aspect \u2013 why they are called weak descriptors here. The indexical aspect anchors them in the actual situation of utterance, the weak descriptive aspect singles out the referent. Descriptive uses of \u201ctoday\u201d are then attributed to calendric coercion which is triggered by qunatificational elements. This account provides a grammatically motivated formal link to descriptive uses. With regard to some uses of \u201cI\u201d, a tentative contiguity rule is proposed as the reference rule for the first person pronoun, which is oriented along recent hearer-oriented accounts in philosophy, but finally has to be criticized. 1 Descriptive Indexicals Indexicals have descriptive uses as exemplified in (1a) (taken from Nunberg, 2004, p. 265): (1) a. Today is always the biggest party day of the year. b. *November 1, 2000 is always the biggest party day of the year. According to Nunberg (2004), today in (1a) is interpreted as picking out a day type or day property instead of referring to a concrete day, since \u201c[. . . ] the interpretations of these uses of indexicals are the very things that their linguistic meanings pick out of the context.\u201d (p. 272). The full date in (1b), to the contrary, refers to a particular day and has no such type or property reading. The interpretations of both sentences in (1) diverge, even if both sentences are produced on November 1, 2000. Based on these observations (and criticizing his earlier account which rests on distinguishing the index from the referent and bridging between both by means of a salient relation (Nunberg, 1993)) Nunberg (2004) comes up with his granularization of context hypothesis: indexical expressions are evaluated in contexts which are \u201cindividuated by the conversationally relevant properties\u201d (p. 273). However, the \u201cconversationally relevant properties\u201d seem to be restricted by the linguistic meaning of the indexical in question. For instance, descriptive uses of today always rest on a temporal interpretation (combining today with atemporal descriptions sound awkward, e.g., \u201c*Today is always 2+2 = 4\u201d or \u201c*Today is always the largest tree in the park\u201d). Thus, a more restrictive account should be possible. Accordingly, contrary to the context granularization account \u2013 at least with respect to the kinds of examples in (1) \u2013 it is argued in the following, that descriptive interpretations of indexical expressions are functional abstractions over indices that follow from the grammar of descriptive constructions in addition to type raising (Section 2). Part of the argument is that indexicals have a weak descriptive content that allows for functional abstraction in the first place. This re-analysis is spelled out more precisely in Section 3. Contrary to this line, however, in Section 4 it is suggested that \u2018I\u2019, instead of exhibiting descriptive use, should be interpreted according to a addressee-oriented semantic rule resting on a contiguity relation.", "phrases": ["weak descriptor", "indexical", "modality"], "overall_score": 1.4268225115141007, "scores": [0.9632960498488133, 0.8847599984115999, 0.5409183693752637], "rank_score": 0.7963248058785589} -{"id": "zhou-etal-2016-multi", "title": "Multi-view Response Selection for Human-Computer Conversation", "abstract": "In this paper, we study the task of response selection for multi-turn human-computer conversation. Previous approaches take word as a unit and view context and response as sequences of words. This kind of approaches do not explicitly take each utterance as a unit, therefore it is dif\ufb01cult to catch utterance-level discourse information and dependencies. In this paper, we propose a multi-view response selection model that integrates information from two different views, i.e., word sequence view and utterance sequence view. We jointly model the two views via deep neural networks. Experimental results on a public corpus for context-sensitive response selection demonstrate the effectiveness of the proposed multi-view model, which signi\ufb01cantly outperforms other single-view baselines.", "phrases": ["response selection", "conversation", "multi-view model", "multi-turn context"], "overall_score": 2.4244229107207924, "scores": [0.8633105568070758, 1.2249384852220502, 0.5493921570510251, 0.5476503667572584], "rank_score": 0.7963228914593524} -{"id": "zhao-etal-2021-closer", "title": "A Closer Look at Few-Shot Crosslingual Transfer: The Choice of Shots Matters", "abstract": "Few-shot crosslingual transfer has been shown to outperform its zero-shot counterpart with pretrained encoders like multilingual BERT. Despite its growing popularity, little to no attention has been paid to standardizing and analyzing the design of few-shot experiments. In this work, we highlight a fundamental risk posed by this shortcoming, illustrating that the model exhibits a high degree of sensitivity to the selection of few shots. We conduct a large-scale experimental study on 40 sets of sampled few shots for six diverse NLP tasks across up to 40 languages. We provide an analysis of success and failure cases of few-shot transfer, which highlights the role of lexical features. Additionally, we show that a straightforward full model finetuning approach is quite effective for few-shot transfer, outperforming several state-of-the-art few-shot approaches. As a step towards standardizing few-shot crosslingual experimental designs, we make our sampled few shots publicly available.", "phrases": ["crosslingual transfer", "shot", "few-shot cross-lingual transfer"], "overall_score": 1.2814141249589783, "scores": [0.909359486292085, 0.8707065745837257, 0.6084960235496363], "rank_score": 0.796187361475149} -{"id": "savary-waszczuk-2020-polish", "title": "Polish corpus of verbal multiword expressions", "abstract": "This paper describes a manually annotated corpus of verbal multi-word expressions in Polish. It is among the 4 biggest datasets in release 1.2 of the PARSEME multiligual corpus. We describe the data sources, as well as the annotation process and its outcomes. We also present interesting phenomena encountered during the annotation task and put forward enhancements for the PARSEME annotation guidelines.", "phrases": ["verbal multiword expression", "parseme", "polish corpus"], "overall_score": 1.103747372610257, "scores": [0.9343093029468768, 0.9078986159229481, 0.5463483724187361], "rank_score": 0.7961854304295203} -{"id": "xu-etal-2019-clickbait", "title": "Clickbait? Sensational Headline Generation with Auto-tuned Reinforcement Learning", "abstract": "Sensational headlines are headlines that capture people's attention and generate reader interest. Conventional abstractive headline generation methods, unlike human writers, do not optimize for maximal reader attention. In this paper, we propose a model that generates sensational headlines without labeled data. We first train a sensationalism scorer by classifying online headlines with many comments (\u201cclickbait\u201d) against a baseline of headlines generated from a summarization model. The score from the sensationalism scorer is used as the reward for a reinforcement learner. However, maximizing the noisy sensationalism reward will generate unnatural phrases instead of sensational headlines. To effectively leverage this noisy reward, we propose a novel loss function, Auto-tuned Reinforcement Learning (ARL), to dynamically balance reinforcement learning (RL) with maximum likelihood estimation (MLE). Human evaluation shows that 60.8% of samples generated by our model are sensational, which is significantly better than the Pointer-Gen baseline and other RL models.", "phrases": ["sensational headline", "auto-tuned reinforcement learning", "clickbait"], "overall_score": 1.1037197663193552, "scores": [0.9426316719118847, 0.8610221098499165, 0.584842768338288], "rank_score": 0.7961655167000297} -{"id": "lin-etal-2011-sentence", "title": "Sentence Subjectivity Detection with Weakly-Supervised Learning", "abstract": "This paper presents a hierarchical Bayesian model based on latent Dirichlet allocation (LDA), called subjLDA, for sentence-level subjectivity detection, which automatically identifies whether a given sentence expresses opinion or states facts. In contrast to most of the existing methods relying on either labelled corpora for classifier training or linguistic pattern extraction for subjectivity classification, we view the problem as weakly-supervised generative model learning, where the only input to the model is a small set of domain independent subjectivity lexical clues. A mechanism is introduced to incorporate the prior information about the subjectivity lexical clues into model learning by modifying the Dirichlet priors of topic-word distributions. The subjLDA model has been evaluated on the Multi-Perspective Question Answering (MPQA) dataset and promising results have been observed in the preliminary experiments. We have also explored adding neutral words as prior information for model learning. It was found that while incorporating subjectivity clues bearing positive or negative polarity can achieve a significant performance gain, the prior lexical information from neutral words is less effective.", "phrases": ["subjectivity detection", "latent dirichlet allocation", "opinion"], "overall_score": 1.1036915939053724, "scores": [0.989358841200698, 0.8568491901089746, 0.5422275524875025], "rank_score": 0.7961451945990583} -{"id": "spitkovsky-etal-2011-lateen", "title": "Lateen EM: Unsupervised Training with Multiple Objectives, Applied to Dependency Grammar Induction", "abstract": "We present new training methods that aim to mitigate local optima and slow convergence in unsupervised training by using additional imperfect objectives. In its simplest form, lateen EM alternates between the two objectives of ordinary \"soft\" and \"hard\" expectation maximization (EM) algorithms. Switching objectives when stuck can help escape local optima. We find that applying a single such alternation already yields state-of-the-art results for English dependency grammar induction. More elaborate lateen strategies track both objectives, with each validating the moves proposed by the other. Disagreements can signal earlier opportunities to switch or terminate, saving iterations. De-emphasizing fixed points in these ways eliminates some guesswork from tuning EM. An evaluation against a suite of unsupervised dependency parsing tasks, for a variety of languages, showed that lateen strategies significantly speed up training of both EM algorithms, and improve accuracy for hard EM.", "phrases": ["unsupervised training", "grammar induction", "iteration"], "overall_score": 1.4262483807483606, "scores": [0.8887992216296716, 0.8823066499307307, 0.6169072606311055], "rank_score": 0.7960043773971693} -{"id": "pouran-ben-veyseh-etal-2021-modeling", "title": "Modeling Document-Level Context for Event Detection via Important Context Selection", "abstract": "The task of Event Detection (ED) in Information Extraction aims to recognize and classify trigger words of events in text. The recent progress has featured advanced transformer-based language models (e.g., BERT) as a critical component in state-of-the-art models for ED. However, the length limit for input texts is a barrier for such ED models as they cannot encode long-range document-level context that has been shown to be beneficial for ED. To address this issue, we propose a novel method to model document-level context for ED that dynamically selects relevant sentences in the document for the event prediction of the target sentence. The target sentence will be then augmented with the selected sentences and consumed entirely by transformer-based language models for improved representation learning for ED. To this end, the REINFORCE algorithm is employed to train the relevant sentence selection for ED. Several information types are then introduced to form the reward function for the training process, including ED performance, sentence similarity, and discourse relations. Our extensive experiments on multiple benchmark datasets reveal the effectiveness of the proposed model, leading to new state-of-the-art performance.", "phrases": ["document-level context", "event detection", "information extraction"], "overall_score": 0.8744878880719996, "scores": [0.9433843241255685, 0.8849290585383129, 0.5596661541356979], "rank_score": 0.7959931789331932} -{"id": "huang-mi-2010-efficient", "title": "Efficient Incremental Decoding for Tree-to-String Translation", "abstract": "Syntax-based translation models should in principle be efficient with polynomially-sized search space, but in practice they are often embarassingly slow, partly due to the cost of language model integration. In this paper we borrow from phrase-based decoding the idea to generate a translation incrementally left-to-right, and show that for tree-to-string models, with a clever encoding of derivation history, this method runs in average-case polynomial-time in theory, and linear-time with beam search in practice (whereas phrase-based decoding is exponential-time in theory and quadratic-time in practice). Experiments show that, with comparable translation quality, our tree-to-string system (in Python) can run more than 30 times faster than the phrase-based system Moses (in C++).", "phrases": ["tree-to-string model", "beam search", "time complexity", "source sentence"], "overall_score": 1.9779183128660278, "scores": [1.0614433445308789, 1.0041521000678182, 0.5606597335558642, 0.5576363584305636], "rank_score": 0.7959728841462812} -{"id": "bar-etal-2012-ukp", "title": "UKP: Computing Semantic Textual Similarity by Combining Multiple Content Similarity Measures", "abstract": "We present the UKP system which performed best in the Semantic Textual Similarity (STS) task at SemEval-2012 in two out of three metrics. It uses a simple log-linear regression model, trained on the training data, to combine multiple text similarity measures of varying complexity. These range from simple character and word n-grams and common subsequences to complex features such as Explicit Semantic Analysis vector comparisons and aggregation of word similarity based on lexical-semantic resources. Further, we employ a lexical substitution system and statistical machine translation to add additional lexemes, which alleviates lexical gaps. Our final models, one per dataset, consist of a log-linear combination of about 20 features, out of the possible 300+ features implemented.", "phrases": ["semantic textual similarity", "complexity", "machine translation"], "overall_score": 2.3842609062542053, "scores": [1.3132813742939455, 0.5440042124652735, 0.5303719425556092], "rank_score": 0.7958858431049428} -{"id": "yang-eisenstein-2016-part", "title": "Part-of-Speech Tagging for Historical English", "abstract": "As more historical texts are digitized, there is interest in applying natural language processing tools to these archives. However, the performance of these tools is often unsatisfactory, due to language change and genre differences. Spelling normalization heuristics are the dominant solution for dealing with historical texts, but this approach fails to account for changes in usage and vocabulary. In this empirical paper, we assess the capability of domain adaptation techniques to cope with historical texts, focusing on the classic benchmark task of part-of-speech tagging. We evaluate several domain adaptation methods on the task of tagging Early Modern English and Modern British English texts in the Penn Corpora of Historical English. We demonstrate that the Feature Embedding method for unsupervised domain adaptation outperforms word embeddings and Brown clusters, showing the importance of embedding the entire feature space, rather than just individual words. Feature Embeddings also give better performance than spelling normalization, but the combination of the two methods is better still, yielding a 5% raw improvement in tagging accuracy on Early Modern English texts.", "phrases": ["historical english", "domain adaptation method", "part-of-speech tagging"], "overall_score": 1.1031099364804005, "scores": [0.99571177984135, 0.8622028039606702, 0.5292622685713999], "rank_score": 0.7957256174578067} -{"id": "kiela-etal-2015-visual", "title": "Visual Bilingual Lexicon Induction with Transferred ConvNet Features", "abstract": "This paper is concerned with the task of bilingual lexicon induction using imagebased features. By applying features from a convolutional neural network (CNN), we obtain state-of-the-art performance on a standard dataset, obtaining a 79% relative improvement over previous work which uses bags of visual words based on SIFT features. The CNN image-based approach is also compared with state-of-the-art linguistic approaches to bilingual lexicon induction, even outperforming these for one of three language pairs on another standard dataset. Furthermore, we shed new light on the type of visual similarity metric to use for genuine similarity versus relatedness tasks, and experiment with using multiple layers from the same network in an attempt to improve performance.", "phrases": ["bilingual lexicon induction", "convolutional neural network", "image", "van"], "overall_score": 1.832108542138028, "scores": [1.2664960086509063, 0.8426234016787612, 0.5528550389280298, 0.52072407113573], "rank_score": 0.7956746300983568} -{"id": "shi-etal-2016-neural", "title": "Why Neural Translations are the Right Length", "abstract": "We investigate how neural, encoder-decoder translation systems output target strings of appropriate lengths, \ufb01nding that a collection of hidden units learns to explicitly implement this functionality.", "phrases": ["right length", "neuron", "mechanism"], "overall_score": 1.9075660919773636, "scores": [0.871922211941987, 0.9503682657252803, 0.564260069744104], "rank_score": 0.7955168491371237} -{"id": "mizumoto-etal-2015-grammatical", "title": "Grammatical Error Correction Considering Multi-word Expressions", "abstract": "Multi-word expressions (MWEs) have been recognized as important linguistic information and much research has been conducted especially on their extraction and interpretation. On the other hand, they have hardly been used in real application areas. While those who are learning English as a second language (ESL) use MWEs in their writings just like native speakers, MWEs haven\u2019t been taken into consideration in grammatical error correction tasks. In this paper, we investigate the grammatical error correction method using MWEs. Our method proposes a straightforward application of MWEs to grammatical error correction, but experimental results show that MWEs have a beneficial effect on grammatical error correction.", "phrases": ["multi-word expression", "consideration", "grammatical error correction"], "overall_score": 1.1028114410735896, "scores": [0.9371554995889397, 0.9290135316609045, 0.5203618643588744], "rank_score": 0.7955102985362396} -{"id": "ma-etal-2014-prune", "title": "Prune-and-Score: Learning for Greedy Coreference Resolution", "abstract": "We propose a novel search-based approach for greedy coreference resolution, where the mentions are processed in order and added to previous coreference clusters. Our method is distinguished by the use of two functions to make each coreference decision: a pruning function that prunes bad coreference decisions from further consideration, and a scoring function that then selects the best among the remaining decisions. Our framework reduces learning of these functions to rank learning, which helps leverage powerful off-the-shelf rank-learners. We show that our Prune-and-Score approach is superior to using a single scoring function to make both decisions and outperforms several state-of-the-art approaches on multiple benchmark corpora including OntoNotes.", "phrases": ["greedy coreference resolution", "mention", "decision"], "overall_score": 1.280190825628209, "scores": [0.9606502712601924, 0.8886395178540728, 0.5369920594822913], "rank_score": 0.7954272828655188} -{"id": "jiang-etal-2020-hover", "title": "HoVer: A Dataset for Many-Hop Fact Extraction And Claim Verification", "abstract": "We introduce HoVer (HOppy VERification), a dataset for many-hop evidence extraction and fact verification. It challenges models to extract facts from several Wikipedia articles that are relevant to a claim and classify whether the claim is supported or not-supported by the facts. In HoVer, the claims require evidence to be extracted from as many as four English Wikipedia articles and embody reasoning graphs of diverse shapes. Moreover, most of the 3/4-hop claims are written in multiple sentences, which adds to the complexity of understanding long-range dependency relations such as coreference. We show that the performance of an existing state-of-the-art semantic-matching model degrades significantly on our dataset as the number of reasoning hops increases, hence demonstrating the necessity of many-hop reasoning to achieve strong results. We hope that the introduction of this challenging dataset and the accompanying evaluation task will encourage research in many-hop fact retrieval and information verification.", "phrases": ["claim", "many-hop evidence extraction", "wikipedia article"], "overall_score": 1.546810722194321, "scores": [0.9604833313136689, 0.89613875820093, 0.5280883087716566], "rank_score": 0.7949034660954185} -{"id": "kirchhoff-bilmes-2014-submodularity", "title": "Submodularity for Data Selection in Machine Translation", "abstract": "We introduce submodular optimization to the problem of training data subset selection for statistical machine translation (SMT). By explicitly formulating data selection as a submodular program, we obtain fast scalable selection algorithms with mathematical performance guarantees, resulting in a uni\ufb01ed framework that clari\ufb01es existing approaches and also makes both new and many previous approaches easily accessible. We present a new class of submodular functions designed speci\ufb01cally for SMT and evaluate them on two different translation tasks. Our results show that our best submodular method signi\ufb01cantly outperforms several baseline methods, including the widely-used cross-entropy based data selection method. In addition, our approach easily scales to large data sets and is applicable to other data selection problems in natural language processing.", "phrases": ["data selection", "machine translation", "submodular optimization"], "overall_score": 1.2792370201611571, "scores": [0.9206999627464806, 0.8703114308519856, 0.59349255702573], "rank_score": 0.7948346502080654} -{"id": "popovic-2016-chrf", "title": "chrF deconstructed: beta parameters and n-gram weights", "abstract": "Character n-gram F-score (CHRF) is shown to correlate very well with human rankings of different machine translation outputs, especially for morphologically rich target languages. However, only two versions have been explored so far, namely CHRF1 (standard F-score, \u03b2 = 1) and CHRF3 (\u03b2 = 3), both with uniform n-gram weights. In this work, we investigated CHRF in more details, namely \u03b2 parameters in range from 1/6 to 6, and we found out that CHRF2 is the most promising version. Then we investigated different n-gram weights for CHRF2 and found out that the uniform weights are the best option. Apart from this, CHRF scores were systematically compared with WORDF scores, and a preliminary experiment carried out on small amount of data with direct human scores indicates that the main advantage of CHRF is that it does not penalise too hard acceptable variations in high quality translations.", "phrases": ["n-gram weight", "preliminary experiment", "chrf"], "overall_score": 0.8732110216085908, "scores": [0.9267307241727814, 0.8886091529314939, 0.5691528978688271], "rank_score": 0.7948309249910341} -{"id": "reidsma-op-den-akker-2008-exploiting", "title": "Exploiting `Subjective' Annotations", "abstract": "Many interesting phenomena in conversation can only be annotated as a subjective task, requiring interpretative judgements from annotators. This leads to data which is annotated with lower levels of agreement not only due to errors in the annotation, but also due to the differences in how annotators interpret conversations. This paper constitutes an attempt to find out how subjective annotations with a low level of agreement can profitably be used for machine learning purposes. We analyse the (dis)agreements between annotators for two different cases in a multimodal annotated corpus and explicitly relate the results to the way machine-learning algorithms perform on the annotated data. Finally we present two new concepts, namely 'subjective entity' classifiers resp. 'consensus objective' classifiers, and give recommendations for using subjective data in machine-learning applications.", "phrases": ["annotator", "reidsma", "slip"], "overall_score": 1.5464666470220925, "scores": [0.9619094069165945, 0.8606607238870762, 0.561609808500608], "rank_score": 0.7947266464347597} -{"id": "saunders-etal-2019-domain", "title": "Domain Adaptive Inference for Neural Machine Translation", "abstract": "We investigate adaptive ensemble weighting for Neural Machine Translation, addressing the case of improving performance on a new and potentially unknown domain without sacrificing performance on the original domain. We adapt sequentially across two Spanish-English and three English-German tasks, comparing unregularized fine-tuning, L2 and Elastic Weight Consolidation. We then report a novel scheme for adaptive NMT ensemble decoding by extending Bayesian Interpolation with source information, and report strong improvements across test domains without access to the domain label.", "phrases": ["neural machine translation", "ensemble weighting", "fine-tuning", "forgetting"], "overall_score": 1.829777356764293, "scores": [0.9889749692861485, 1.0561548033334147, 0.5700874384519766, 0.5634316255454606], "rank_score": 0.7946622091542501} -{"id": "yamamura-etal-2016-kyutech", "title": "The Kyutech corpus and topic segmentation using a combined method", "abstract": "Summarization of multi-party conversation is one of the important tasks in natural language processing. In this paper, we explain a Japanese corpus and a topic segmentation task. To the best of our knowledge, the corpus is the first Japanese corpus annotated for summarization tasks and freely available to anyone. We call it \u201cthe Kyutech corpus.\u201d The task of the corpus is a decision-making task with four participants and it contains utterances with time information, topic segmentation and reference summaries. As a case study for the corpus, we describe a method combined with LCSeg and TopicTiling for a topic segmentation task. We discuss the effectiveness and the problems of the combined method through the experiment with the Kyutech corpus.", "phrases": ["kyutech corpus", "topic segmentation", "decision-making task"], "overall_score": 0.8729151159946167, "scores": [0.9637339453236499, 0.8810981238414591, 0.5388526711163382], "rank_score": 0.7945615800938158} -{"id": "tran-nguyen-2017-natural", "title": "Natural Language Generation for Spoken Dialogue System using RNN Encoder-Decoder Networks", "abstract": "Natural language generation (NLG) is a critical component in a spoken dialogue system. This paper presents a Recurrent Neural Network based Encoder-Decoder architecture, in which an LSTM-based decoder is introduced to select, aggregate semantic elements produced by an attention mechanism over the input elements, and to produce the required utterances. The proposed generator can be jointly trained both sentence planning and surface realization to produce natural language sentences. The proposed model was extensively evaluated on four different NLG datasets. The experimental results showed that the proposed generators not only consistently outperform the previous methods across all the NLG domains but also show an ability to generalize from a new, unseen domain and learn from multi-domain datasets.", "phrases": ["spoken dialogue system", "recurrent neural network", "natural language generation"], "overall_score": 1.5456253549276355, "scores": [0.924456654763055, 0.9132184324175584, 0.5452078362852955], "rank_score": 0.7942943078219695} -{"id": "broeder-etal-2012-standardizing", "title": "Standardizing a Component Metadata Infrastructure", "abstract": "This paper describes the status of the standardization efforts of a Component Metadata approach for describing Language Resources with metadata. Different linguistic and Language & Technology communities as CLARIN, META-SHARE and NaLiDa use this component approach and see its standardization of as a matter for cooperation that has the possibility to create a large interoperable domain of joint metadata. Starting with an overview of the component metadata approach together with the related semantic interoperability tools and services as the ISOcat data category registry and the relation registry we explain the standardization plan and efforts for component metadata within ISO TC37/SC4. Finally, we present information about uptake and plans of the use of component metadata within the three mentioned linguistic and L&T communities.", "phrases": ["component metadata infrastructure", "clarin", "meta-share"], "overall_score": 1.6513775575955962, "scores": [0.9913790563446445, 0.8300375543542357, 0.561017602279615], "rank_score": 0.7941447376594984} -{"id": "andreas-2020-good", "title": "Good-Enough Compositional Data Augmentation", "abstract": "We propose a simple data augmentation protocol aimed at providing a compositional inductive bias in conditional and unconditional sequence models. Under this protocol, synthetic training examples are constructed by taking real training examples and replacing (possibly discontinuous) fragments with other fragments that appear in at least one similar environment. The protocol is model-agnostic and useful for a variety of tasks. Applied to neural sequence-to-sequence models, it reduces error rate by as much as 87% on diagnostic tasks from the SCAN dataset and 16% on a semantic parsing task. Applied to n-gram language models, it reduces perplexity by roughly 1% on small corpora in several languages.", "phrases": ["data augmentation", "training example", "compositional generalization", "geca", "sentence fragment"], "overall_score": 2.295149455477328, "scores": [1.4003177615543057, 0.8445948801514072, 0.6199903356300865, 0.5699993966313448, 0.5354337068626708], "rank_score": 0.794067216165963} -{"id": "white-rajkumar-2009-perceptron", "title": "Perceptron Reranking for CCG Realization", "abstract": "This paper shows that discriminative reranking with an averaged perceptron model yields substantial improvements in realization quality with CCG. The paper confirms the utility of including language model log probabilities as features in the model, which prior work on discriminative training with log linear models for HPSG realization had called into question. The perceptron model allows the combination of multiple n-gram models to be optimized and then augmented with both syntactic features and discriminative n-gram features. The full model yields a state-of-the-art BLEU score of 0.8506 on Section 23 of the CCGbank, to our knowledge the best score reported to date using a reversible, corpus-engineered grammar.", "phrases": ["ccg", "perceptron reranking", "surface realization"], "overall_score": 1.903845551504702, "scores": [0.9426763538588642, 0.8873485979148646, 0.551870837965528], "rank_score": 0.7939652632464189} -{"id": "may-knight-2006-better", "title": "A Better N-Best List: Practical Determinization of Weighted Finite Tree Automata", "abstract": "Ranked lists of output trees from syntactic statistical NLP applications frequently contain multiple repeated entries. This redundancy leads to misrepresentation of tree weight and reduced information for debugging and tuning purposes. It is chiefly due to nondeterminism in the weighted automata that produce the results. We introduce an algorithm that determinizes such automata while preserving proper weights, returning the sum of the weight of all multiply derived trees. We also demonstrate our algorithm's effectiveness on two large-scale tasks.", "phrases": ["n-best list", "determinization", "wta"], "overall_score": 1.4220995062272535, "scores": [0.9449895277187209, 0.8833958666267002, 0.5526811449707143], "rank_score": 0.7936888464387118} -{"id": "yoshimoto-etal-2015-coordination", "title": "Coordination-Aware Dependency Parsing (Preliminary Report)", "abstract": "Coordinate structures pose difficulties in dependency parsers. In this paper, we propose a set of parsing rules specifically designed to handle coordination, which are intended to be used in combination with Eisner and Satta\u2019s dependency rules. The new rules are compatible with existing similarity-based approaches to coordination structure analysis, and thus the syntactic and semantic similarity of conjuncts can be incorporated to the parse scoring function. Although we are yet to implement such a scoring function, we analyzed the time complexity of the proposed rules as well as their coverage of the Penn Treebank converted to the Stanford basic dependencies.", "phrases": ["dependency parsing", "coordination", "new rule"], "overall_score": 1.0997546288602953, "scores": [0.9423615971200169, 0.8968886625908972, 0.5406655641662322], "rank_score": 0.7933052746257153} -{"id": "han-etal-2021-improving", "title": "Improving Multimodal Fusion with Hierarchical Mutual Information Maximization for Multimodal Sentiment Analysis", "abstract": "In multimodal sentiment analysis (MSA), the performance of a model highly depends on the quality of synthesized embeddings. These embeddings are generated from the upstream process called multimodal fusion, which aims to extract and combine the input unimodal raw data to produce a richer multimodal representation. Previous work either back-propagates the task loss or manipulates the geometric property of feature spaces to produce favorable fusion results, which neglects the preservation of critical task-related information that flows from input to the fusion results. In this work, we propose a framework named MultiModal InfoMax (MMIM), which hierarchically maximizes the Mutual Information (MI) in unimodal input pairs (inter-modality) and between multimodal fusion result and unimodal input in order to maintain task-related information through multimodal fusion. The framework is jointly trained with the main task (MSA) to improve the performance of the downstream MSA task. To address the intractable issue of MI bounds, we further formulate a set of computationally simple parametric and non-parametric methods to approximate their truth value. Experimental results on the two widely used datasets demonstrate the efficacy of our approach.", "phrases": ["multimodal fusion", "mutual information", "multimodal sentiment analysis"], "overall_score": 1.0997265907972211, "scores": [0.9718664934792289, 0.8639151574655113, 0.544073497370576], "rank_score": 0.7932850494384387} -{"id": "novikova-etal-2018-rankme", "title": "RankME: Reliable Human Ratings for Natural Language Generation", "abstract": "Human evaluation for natural language generation (NLG) often suffers from inconsistent user ratings. While previous research tends to attribute this problem to individual user preferences, we show that the quality of human judgements can also be improved by experimental design. We present a novel rank-based magnitude estimation method (RankME), which combines the use of continuous scales and relative assessments. We show that RankME significantly improves the reliability and consistency of human ratings compared to traditional evaluation methods. In addition, we show that it is possible to evaluate NLG systems according to multiple, distinct criteria, which is important for error analysis. Finally, we demonstrate that RankME, in combination with Bayesian estimation of system quality, is a cost-effective alternative for ranking multiple NLG systems.", "phrases": ["natural language generation", "human judgement", "rankme"], "overall_score": 0.8714417229961471, "scores": [0.9619629041575194, 0.8518318204030113, 0.5658665954105837], "rank_score": 0.7932204399903715} -{"id": "wei-etal-2020-uncertainty", "title": "Uncertainty-Aware Semantic Augmentation for Neural Machine Translation", "abstract": "As a sequence-to-sequence generation task, neural machine translation (NMT) naturally contains intrinsic uncertainty, where a single sentence in one language has multiple valid counterparts in the other. However, the dominant methods for NMT only observe one of them from the parallel corpora for the model training but have to deal with adequate variations under the same meaning at inference. This leads to a discrepancy of the data distribution between the training and the inference phases. To address this problem, we propose uncertainty-aware semantic augmentation, which explicitly captures the universal semantic information among multiple semantically-equivalent source sentences and enhances the hidden representations with this information for better translations. Extensive experiments on various translation tasks reveal that our approach significantly outperforms the strong baselines and the existing methods.", "phrases": ["semantic augmentation", "neural machine translation", "source sentence"], "overall_score": 1.421178781076875, "scores": [0.9192413233760833, 0.9035735769774569, 0.5567100394909174], "rank_score": 0.7931749799481526} -{"id": "zhao-etal-2021-relation", "title": "A Relation-Oriented Clustering Method for Open Relation Extraction", "abstract": "The clustering-based unsupervised relation discovery method has gradually become one of the important methods of open relation extraction (OpenRE). However, high-dimensional vectors can encode complex linguistic information which leads to the problem that the derived clusters cannot explicitly align with the relational semantic classes. In this work, we propose a relation-oriented clustering model and use it to identify the novel relations in the unlabeled data. Specifically, to enable the model to learn to cluster relational data, our method leverages the readily available labeled data of pre-defined relations to learn a relation-oriented representation. We minimize distance between the instance with same relation by gathering the instances towards their corresponding relation centroids to form a cluster structure, so that the learned representation is cluster-friendly. To reduce the clustering bias on predefined classes, we optimize the model by minimizing a joint objective on both labeled and unlabeled data. Experimental results show that our method reduces the error rate by 29.2% and 15.7%, on two datasets respectively, compared with current SOTA methods.", "phrases": ["clustering method", "open relation extraction", "semantic class"], "overall_score": 1.0995034972202282, "scores": [0.9691100945364355, 0.8615820905335596, 0.5486801792495478], "rank_score": 0.7931241214398477} -{"id": "schwenk-etal-2006-continuous", "title": "Continuous Space Language Models for Statistical Machine Translation", "abstract": "Statistical machine translation systems are based on one or more translation models and a language model of the target language. While many different translation models and phrase extraction algorithms have been proposed, a standard word n-gram back-off language model is used in most systems. \n \nIn this work, we propose to use a new statistical language model that is based on a continuous representation of the words in the vocabulary. A neural network is used to perform the projection and the probability estimation. We consider the translation of European Parliament Speeches. This task is part of an international evaluation organized by the TC-STAR project in 2006. The proposed method achieves consistent improvements in the BLEU score on the development and test data. \n \nWe also present algorithms to improve the estimation of the language model probabilities when splitting long sentences into shorter chunks.", "phrases": ["statistical machine translation", "cslm", "reranking"], "overall_score": 2.0924070471706955, "scores": [0.9678763522334287, 0.8733694816674146, 0.537338773212344], "rank_score": 0.7928615357043958} -{"id": "labutov-lipson-2014-generating", "title": "Generating Code-switched Text for Lexical Learning", "abstract": "A vast majority of L1 vocabulary acquisition occurs through incidental learning during reading (Nation, 2001; Schmitt et al., 2001). We propose a probabilistic approach to generating code-mixed text as an L2 technique for increasing retention in adult lexical learning through reading. Our model that takes as input a bilingual dictionary and an English text, and generates a code-switched text that optimizes a defined \u201clearnability\u201d metric by constructing a factor graph over lexical mentions. Using an artificial language vocabulary, we evaluate a set of algorithms for generating code-switched text automatically by presenting it to Mechanical Turk subjects and measuring recall in a sentence completion task.", "phrases": ["code-switched text", "lexical learning", "incidental learning"], "overall_score": 1.2760072823407456, "scores": [0.954464889226799, 0.8742355717184055, 0.5497832428671212], "rank_score": 0.7928279012707753} -{"id": "ma-etal-2021-contrastive", "title": "Contrastive Fine-tuning Improves Robustness for Neural Rankers", "abstract": "The performance of state-of-the-art neural rankers can deteriorate substantially when exposed to noisy inputs or applied to a new domain. In this paper, we present a novel method for fine-tuning neural rankers that can significantly improve their robustness to out-of-domain data and query perturbations. Specifically, a contrastive loss that compares data points in the representation space is combined with the standard ranking loss during fine-tuning. We use relevance labels to denote similar/dissimilar pairs, which allows the model to learn the underlying matching semantics across different query-document pairs and leads to improved robustness. In experiments with four passage ranking datasets, the proposed contrastive fine-tuning method obtains improvements on robustness to query reformulations, noise perturbations, and zero-shot transfer for both BERT and BART based rankers. Additionally, our experiments show that contrastive fine-tuning outperforms data augmentation for robustifying neural rankers.", "phrases": ["fine-tuning", "robustness", "neural ranker"], "overall_score": 0.8707894007495316, "scores": [0.9223434651513298, 0.8219548430317815, 0.6335817038961913], "rank_score": 0.7926266706931008} -{"id": "gatt-etal-2009-tuna", "title": "The TUNA-REG Challenge 2009: Overview and Evaluation Results", "abstract": "The GREC Task at REG '08 required participating systems to select coreference chains to the main subject of short encyclopaedic texts collected from Wikipedia. Three teams submitted a total of 6 systems, and we additionally created four baseline systems. Systems were tested automatically using a range of existing intrinsic metrics. We also evaluated systems extrinsically by applying coreference resolution tools to the outputs and measuring the success of the tools. In addition, systems were tested in a reading/comprehension experiment involving human subjects. This report describes the GREC Task and the evaluation methods, gives brief descriptions of the participating systems, and presents the evaluation results.", "phrases": ["evaluation result", "tuna", "expression generation", "recent reg challenge"], "overall_score": 2.091722013537028, "scores": [0.8518329359023812, 0.8624273079451039, 0.851584536498725, 0.6045630619752366], "rank_score": 0.7926019605803617} -{"id": "huang-etal-2010-self", "title": "Self-Training with Products of Latent Variable Grammars", "abstract": "We study self-training with products of latent variable grammars in this paper. We show that increasing the quality of the automatically parsed data used for self-training gives higher accuracy self-trained grammars. Our generative self-trained grammars reach F scores of 91.6 on the WSJ test set and surpass even discriminative reranking systems without self-training. Additionally, we show that multiple self-trained grammars can be combined in a product model to achieve even higher accuracy. The product model is most effective when the individual underlying grammars are most diverse. Combining multiple grammars that were self-trained on disjoint sets of unlabeled data results in a final test accuracy of 92.5% on the WSJ test set and 89.6% on our Broadcast News test set.", "phrases": ["product", "latent variable grammar", "self-training"], "overall_score": 0.8705861339572349, "scores": [0.9354888815582606, 0.8124036895387438, 0.6294323767588406], "rank_score": 0.7924416492852817} -{"id": "malmasi-dras-2014-chinese", "title": "Chinese Native Language Identification", "abstract": "We present the first application of Native Language Identification (NLI) to nonEnglish data. Motivated by theories of language transfer, NLI is the task of identifying a writer\u2019s native language (L1) based on their writings in a second language (the L2). An NLI system was applied to Chinese learner texts using topicindependent syntactic models to assess their accuracy. We find that models using part-of-speech tags, context-free grammar production rules and function words are highly effective, achieving a maximum accuracy of 71% . Interestingly, we also find that when applied to equivalent English data, the model performance is almost identical. This finding suggests a systematic pattern of cross-linguistic transfer may exist, where the degree of transfer is independent of the L1 and L2.", "phrases": ["native language identification", "nli", "non-english dataset", "syntactic feature"], "overall_score": 1.5415098978808894, "scores": [0.9965741847985853, 1.0535529630255267, 0.5703272708164852, 0.5482631064296143], "rank_score": 0.7921793812675528} -{"id": "ehara-2014-machine", "title": "A machine translation system combining rule-based machine translation and statistical post-editing", "abstract": "System architecture, experimental settings and evaluation results of the EIWA in the WAT2014 Japanese to English (jaen) and Chinese to Japanese (zh-ja) tasks are described. Our system is combining rule-based machine translation (RBMT) and statistical post-editing (SPE). Evaluation results for ja-en task show 19.86 BLEU score, 0.7067 RIBES score, and 22.50 human evaluation score. Evaluation results for zh-ja task show 33.57 BLEU score, 0.8114 RIBES score, and 15.00 human evaluation score.", "phrases": ["rule-based machine translation", "statistical post-editing", "spe"], "overall_score": 0.8701392193478074, "scores": [0.9533439363045172, 0.8772725807248927, 0.5454880332010744], "rank_score": 0.7920348500768282} -{"id": "raghavan-etal-2014-cross", "title": "Cross-narrative Temporal Ordering of Medical Events", "abstract": "Cross-narrative temporal ordering of medical events is essential to the task of generating a comprehensive timeline over a patient\u2019s history. We address the problem of aligning multiple medical event sequences, corresponding to different clinical narratives, comparing the following approaches: (1) A novel weighted finite state transducer representation of medical event sequences that enables composition and search for decoding, and (2) Dynamic programming with iterative pairwise alignment of multiple sequences using global and local alignment algorithms. The cross-narrative coreference and temporal relation weights used in both these approaches are learned from a corpus of clinical narratives. We present results using both approaches and observe that the finite state transducer approach performs performs significantly better than the dynamic programming one by 6.8% for the problem of multiple-sequence alignment.", "phrases": ["medical event", "timeline", "cross-narrative temporal ordering"], "overall_score": 1.0978666503578203, "scores": [0.9826932205016412, 0.8581000592507066, 0.5350368782905597], "rank_score": 0.7919433860143025} -{"id": "otrusina-smrz-2010-new", "title": "A New Approach to Pseudoword Generation", "abstract": "Sense-tagged corpora are used to evaluate word sense disambiguation (WSD) systems. Manual creation of such resources is often prohibitively expensive. That is why the concept of pseudowords - conflations of two or more unambiguous words - has been integrated into WSD evaluation experiments. This paper presents a new method of pseudoword generation which takes into account semantic-relatedness of the candidate words forming parts of the pseudowords to the particular senses of the word to be disambiguated. We compare the new approach to its alternatives and show that the results on pseudowords, that are more similar to real ambiguous words, better correspond to the actual results. Two techniques assessing the similarity are studied - the first one takes advantage of manually created dictionaries (wordnets), the second one builds on the automatically computed statistical data obtained from large corpora. Pros and cons of the two techniques are discussed and the results on a standard task are demonstrated.", "phrases": ["new approach", "pseudoword generation", "ambiguous word"], "overall_score": 0.870033417734221, "scores": [1.0082701337188262, 0.8112770351752385, 0.5562684669996396], "rank_score": 0.7919385452979014} -{"id": "muller-2007-resolving", "title": "Resolving It, This, and That in Unrestricted Multi-Party Dialog", "abstract": "We present an implemented system for the resolution of it , this , and that in transcribed multi-party dialog. The system handles NP-anaphoric as well as discourse-deictic anaphors, i", "phrases": ["multi-party dialog", "anaphor", "pronoun"], "overall_score": 1.9677436361255558, "scores": [0.9792613247182422, 0.8538151942916198, 0.5425583600228376], "rank_score": 0.7918782930109} -{"id": "hautli-janisz-etal-2022-qt30", "title": "QT30: A Corpus of Argument and Conflict in Broadcast Debate", "abstract": "Broadcast political debate is a core pillar of democracy: it is the public's easiest access to opinions that shape policies and enables the general public to make informed choices. With QT30, we present the largest corpus of analysed dialogical argumentation ever created (19,842 utterances, 280,000 words) and also the largest corpus of analysed broadcast political debate to date, using 30 episodes of BBC's `Question Time' from 2020 and 2021. Question Time is the prime institution in UK broadcast political debate and features questions from the public on current political issues, which are responded to by a weekly panel of five figures of UK politics and society. QT30 is highly argumentative and combines language of well-versed political rhetoric with direct, often combative, justification-seeking of the general public. QT30 is annotated with Inference Anchoring Theory, a framework well-known in argument mining, which encodes the way arguments and conflicts are created and reacted to in dialogical settings. The resource is freely available at .", "phrases": ["argumentation", "conflict", "broadcast", "episode", "question time"], "overall_score": 1.0976000652630098, "scores": [0.8024111081561984, 0.9368141957051509, 0.8585188602220888, 0.824915652365548, 0.5360956111368812], "rank_score": 0.7917510855171734} -{"id": "liang-etal-2015-measuring", "title": "Measuring Prerequisite Relations Among Concepts", "abstract": "A prerequisite relation describes a basic relation among concepts in cognition, education and other areas. However, as a semantic relation, it has not been well studied in computational linguistics. We investigate the problem of measuring prerequisite relations among concepts and propose a simple link-based metric, namely reference distance (RefD), that effectively models the relation by measuring how differently two concepts refer to each other. Evaluations on two datasets that include seven domains show that our single metric based method outperforms existing supervised learning based methods.", "phrases": ["prerequisite relation", "reference distance", "refd"], "overall_score": 1.0975296282093177, "scores": [1.0033897540227608, 0.8414441947057846, 0.5302668790408899], "rank_score": 0.7917002759231452} -{"id": "zhou-etal-2018-relevant", "title": "Relevant Emotion Ranking from Text Constrained with Emotion Relationships", "abstract": "Text might contain or invoke multiple emotions with varying intensities. As such, emotion detection, to predict multiple emotions associated with a given text, can be cast into a multi-label classification problem. We would like to go one step further so that a ranked list of relevant emotions are generated where top ranked emotions are more intensely associated with text compared to lower ranked emotions, whereas the rankings of irrelevant emotions are not important. A novel framework of relevant emotion ranking is proposed to tackle the problem. In the framework, the objective loss function is designed elaborately so that both emotion prediction and rankings of only relevant emotions can be achieved. Moreover, we observe that some emotions co-occur more often while other emotions rarely co-exist. Such information is incorporated into the framework as constraints to improve the accuracy of emotion detection. Experimental results on two real-world corpora show that the proposed framework can effectively deal with emotion detection and performs remarkably better than the state-of-the-art emotion detection approaches and multi-label learning methods.", "phrases": ["intensity", "relevant emotion ranking", "relevant label"], "overall_score": 1.2737039411331792, "scores": [0.9879497014209213, 0.8508228452776196, 0.5354177180383702], "rank_score": 0.7913967549123037} -{"id": "auli-etal-2009-systematic", "title": "A Systematic Analysis of Translation Model Search Spaces", "abstract": "Translation systems are complex, and most metrics do little to pinpoint causes of error or isolate system differences. We use a simple technique to discover induction errors, which occur when good translations are absent from model search spaces. Our results show that a common pruning heuristic drastically increases induction error, and also strongly suggest that the search spaces of phrase-based and hierarchical phrase-based models are highly overlapping despite the well known structural differences.", "phrases": ["search space", "phrase-based model", "limit"], "overall_score": 1.8220236597935093, "scores": [0.9376137357506887, 0.9147587427618409, 0.5215119855239372], "rank_score": 0.7912948213454888} -{"id": "shishtla-etal-2009-language", "title": "A Language-Independent Transliteration Schema Using Character Aligned Models at NEWS 2009", "abstract": "In this paper we present a statistical transliteration technique that is language independent. This technique uses statistical alignment models and Conditional Random Fields (CRF). Statistical alignment models maximizes the probability of the observed (source, target) word pairs using the expectation maximization algorithm and then the character level alignments are set to maximum posterior predictions of the model. CRF has efficient training and decoding processes which is conditioned on both source and target languages and produces globally optimal solution.", "phrases": ["alignment model", "statistical transliteration technique", "giza++"], "overall_score": 1.4161933103474786, "scores": [0.9893741640811775, 0.858180643650819, 0.5236227995351526], "rank_score": 0.7903925357557163} -{"id": "li-etal-2018-deep", "title": "A Deep Relevance Model for Zero-Shot Document Filtering", "abstract": "In the era of big data, focused analysis for diverse topics with a short response time becomes an urgent demand. As a fundamental task, information filtering therefore becomes a critical necessity. In this paper, we propose a novel deep relevance model for zero-shot document filtering, named DAZER. DAZER estimates the relevance between a document and a category by taking a small set of seed words relevant to the category. With pre-trained word embeddings from a large external corpus, DAZER is devised to extract the relevance signals by modeling the hidden feature interactions in the word embedding space. The relevance signals are extracted through a gated convolutional process. The gate mechanism controls which convolution filters output the relevance signals in a category dependent manner. Experiments on two document collections of two different tasks (i.e., topic categorization and sentiment analysis) demonstrate that DAZER significantly outperforms the existing alternative solutions, including the state-of-the-art deep relevance ranking models.", "phrases": ["deep relevance model", "zero-shot document filtering", "seed word"], "overall_score": 0.8682629541390452, "scores": [0.9453473551276802, 0.9001310219778577, 0.525502622547235], "rank_score": 0.7903269998842576} -{"id": "zhang-etal-2019-generating-fluent", "title": "Generating Fluent Adversarial Examples for Natural Languages", "abstract": "Efficiently building an adversarial attacker for natural language processing (NLP) tasks is a real challenge. Firstly, as the sentence space is discrete, it is difficult to make small perturbations along the direction of gradients. Secondly, the fluency of the generated examples cannot be guaranteed. In this paper, we propose MHA, which addresses both problems by performing Metropolis-Hastings sampling, whose proposal is designed with the guidance of gradients. Experiments on IMDB and SNLI show that our proposed MHAoutperforms the baseline model on attacking capability. Adversarial training with MHA also leads to better robustness and performance.", "phrases": ["adversarial example", "fluency", "metropolis-hasting sampling"], "overall_score": 2.2383769006939294, "scores": [0.9672063433476837, 0.8807606105797768, 0.5221795499245795], "rank_score": 0.7900488346173468} -{"id": "varanasi-etal-2020-copybert", "title": "CopyBERT: A Unified Approach to Question Generation with Self-Attention", "abstract": "Contextualized word embeddings provide better initialization for neural networks that deal with various natural language understanding (NLU) tasks including Question Answering (QA) and more recently, Question Generation(QG). Apart from providing meaningful word representations, pre-trained transformer models (Vaswani et al., 2017), such as BERT (Devlin et al., 2019) also provide self-attentions which encode syntactic information that can be probed for dependency parsing (Hewitt and Manning, 2019) and POStagging (Coenen et al., 2019). In this paper, we show that the information from selfattentions of BERT are useful for language modeling of questions conditioned on paragraph and answer phrases. To control the attention span, we use semi-diagonal mask and utilize a shared model for encoding and decoding, unlike sequence-to-sequence. We further employ copy-mechanism over self-attentions to acheive state-of-the-art results for Question Generation on SQuAD v1.1 (Rajpurkar et al., 2016).", "phrases": ["question generation", "self-attention", "bert"], "overall_score": 1.0952219638711373, "scores": [0.9327196543727143, 0.9154323268335024, 0.521954962718125], "rank_score": 0.7900356479747807} -{"id": "zhang-etal-2021-crowdsourcing", "title": "Crowdsourcing Learning as Domain Adaptation: A Case Study on Named Entity Recognition", "abstract": "Crowdsourcing is regarded as one prospective solution for effective supervised learning, aiming to build large-scale annotated training data by crowd workers. Previous studies focus on reducing the influences from the noises of the crowdsourced annotations for supervised models. We take a different point in this work, regarding all crowdsourced annotations as gold-standard with respect to the individual annotators. In this way, we find that crowdsourcing could be highly similar to domain adaptation, and then the recent advances of cross-domain methods can be almost directly applied to crowdsourcing. Here we take named entity recognition (NER) as a study case, suggesting an annotator-aware representation learning model that inspired by the domain adaptation methods which attempt to capture effective domain-aware features. We investigate both unsupervised and supervised crowdsourcing learning, assuming that no or only small-scale expert annotations are available. Experimental results on a benchmark crowdsourced NER dataset show that our method is highly effective, leading to a new state-of-the-art performance. In addition, under the supervised setting, we can achieve impressive performance gains with only a very small scale of expert annotations.", "phrases": ["domain adaptation", "entity recognition", "annotator", "crowdsourcing learning"], "overall_score": 0.8679193958141899, "scores": [0.8818450126417322, 0.8808200546374411, 0.8605745467138749, 0.5368175044883122], "rank_score": 0.7900142796203401} -{"id": "luu-etal-2016-learning", "title": "Learning Term Embeddings for Taxonomic Relation Identification Using Dynamic Weighting Neural Network", "abstract": "Taxonomic relation identi\ufb01cation aims to recognize the \u2018is-a\u2019 relation between two terms. Previous works on identifying taxonomic relations are mostly based on statistical and linguistic approaches, but the accuracy of these approaches is far from satisfactory. In this paper, we propose a novel supervised learning approach for identifying taxonomic relations using term embeddings. For this purpose, we \ufb01rst design a dynamic weighting neural network to learn term embeddings based on not only the hypernym and hyponym terms, but also the contextual information between them. We then apply such embeddings as features to identify taxonomic relations using a supervised method. The experimental results show that our proposed approach signi\ufb01cantly out-performs other state-of-the-art methods by 9% to 13% in terms of accuracy for both general and speci\ufb01c domain datasets.", "phrases": ["term embedding", "taxonomic relation", "weighting", "linguistic approach", "hypernymy"], "overall_score": 1.8940943639425634, "scores": [1.0409369130417565, 0.9608079275467345, 0.8461687495203262, 0.5621224424332217, 0.539457478348731], "rank_score": 0.7898987021781539} -{"id": "vecchi-etal-2021-towards", "title": "Towards Argument Mining for Social Good: A Survey", "abstract": "This survey builds an interdisciplinary picture of Argument Mining (AM), with a strong focus on its potential to address issues related to Social and Political Science. More specifically, we focus on AM challenges related to its applications to social media and in the multilingual domain, and then proceed to the widely debated notion of argument quality. We propose a novel definition of argument quality which is integrated with that of deliberative quality from the Social Science literature. Under our definition, the quality of a contribution needs to be assessed at multiple levels: the contribution itself, its preceding context, and the consequential effect on the development of the upcoming discourse. The latter has not received the deserved attention within the community. We finally define an application of AM for Social Good: (semi-)automatic moderation, a highly integrative application which (a) represents a challenging testbed for the integrated notion of quality we advocate, (b) allows the empirical quantification of argument/deliberative quality to benefit from the developments in other NLP fields (i.e. hate speech detection, fact checking, debiasing), and (c) has a clearly beneficial potential at the level of its societal thanks to its real-world application (even if extremely ambitious).", "phrases": ["argument mining", "social good", "deliberative quality"], "overall_score": 1.0946705921668582, "scores": [0.945702393788215, 0.8519983396700487, 0.5712130186309035], "rank_score": 0.7896379173630557} -{"id": "ning-etal-2017-structured", "title": "A Structured Learning Approach to Temporal Relation Extraction", "abstract": "Identifying temporal relations between events is an essential step towards natural language understanding. However, the temporal relation between two events in a story depends on, and is often dictated by, relations among other events. Consequently, effectively identifying temporal relations between events is a challenging problem even for human annotators. This paper suggests that it is important to take these dependencies into account while learning to identify these relations and proposes a structured learning approach to address this challenge. As a byproduct, this provides a new perspective on handling missing relations, a known issue that hurts existing methods. As we show, the proposed approach results in significant improvements on the two commonly used data sets for this problem.", "phrases": ["structured learning approach", "temporal relation extraction", "annotator"], "overall_score": 2.08386775080881, "scores": [0.97275721972602, 0.869557663138932, 0.5265625122077178], "rank_score": 0.7896257983575566} -{"id": "peng-etal-2018-learning", "title": "Learning Joint Semantic Parsers from Disjoint Data", "abstract": "We present a new approach to learning a semantic parser from multiple datasets, even when the target semantic formalisms are drastically different and the underlying corpora do not overlap. We handle such \u201cdisjoint\u201d data by treating annotations for unobserved formalisms as latent structured variables. Building on state-of-the-art baselines, we show improvements both in frame-semantic parsing and semantic dependency parsing by modeling them jointly.", "phrases": ["semantic parser", "disjoint data", "latent structured variable"], "overall_score": 2.0836971645977216, "scores": [0.8337773391583081, 1.007122774409003, 0.5277833642957844], "rank_score": 0.7895611592876985} -{"id": "sajjad-etal-2013-translating", "title": "Translating Dialectal Arabic to English", "abstract": "We present a dialectal Egyptian Arabic to English statistical machine translation system that leverages dialectal to Modern Standard Arabic (MSA) adaptation. In contrast to previous work, we first narrow down the gap between Egyptian and MSA by applying an automatic characterlevel transformational model that changes Egyptian to EG 0 , which looks similar to MSA. The transformations include morphological, phonological and spelling changes. The transformation reduces the out-of-vocabulary (OOV) words from 5.2% to 2.6% and gives a gain of 1.87 BLEU points. Further, adapting large MSA/English parallel data increases the lexical coverage, reduces OOVs to 0.7% and leads to an absolute BLEU improvement of 2.73 points.", "phrases": ["dialectal arabic", "egyptian arabic", "pivot language"], "overall_score": 1.8174485020806292, "scores": [0.9732279604600786, 0.8448391949679429, 0.5498564113628223], "rank_score": 0.7893078555969479} -{"id": "forster-etal-2012-rwth", "title": "RWTH-PHOENIX-Weather: A Large Vocabulary Sign Language Recognition and Translation Corpus", "abstract": "This paper introduces the RWTH-PHOENIX-Weather corpus, a video-based, large vocabulary corpus of German Sign Language suitable for statistical sign language recognition and translation. In contrastto most available sign language data collections, the RWTH-PHOENIX-Weather corpus has not been recorded for linguistic research but for the use in statistical pattern recognition. The corpus contains weather forecasts recorded from German public TV which are manually annotated using glosses distinguishing sign variants, and time boundaries have been marked on the sentence and the gloss level. Further, the spoken German weather forecast has been transcribed in a semi-automatic fashion using a state-of-the-art automatic speech recognition system. Moreover, an additional translation of the glosses into spoken German has been created to capture allowable translation variability. In addition to the corpus, experimental baseline results for hand and head tracking, statistical sign language recognition and translation are presented.", "phrases": ["translation corpus", "german sign language", "weather forecast"], "overall_score": 1.0939979765359926, "scores": [0.924290507329141, 0.8487815625716778, 0.594386113335737], "rank_score": 0.7891527277455186} -{"id": "saraclar-sproat-2004-lattice", "title": "Lattice-Based Search for Spoken Utterance Retrieval", "abstract": "Recent work on spoken document retrieval has suggested that it is adequate to take the singlebest output of ASR, and perform text retrieval on this output. This is reasonable enough for the task of retrieving broadcast news stories, where word error rates are relatively low, and the stories are long enough to contain much redundancy. But it is patently not reasonable if one\u2019s task is to retrieve a short snippet of speech in a domain where WER\u2019s can be as high as 50%; such would be the situation with teleconference speech, where one\u2019s task is to find if and when a participant uttered a certain phrase. In this paper we propose an indexing procedure for spoken utterance retrieval that works on lattices rather than just single-best text. We demonstrate that this procedure can improve F scores by over five points compared to singlebest retrieval on tasks with poor WER and low redundancy. The representation is flexible so that we can represent both word lattices, as well as phone lattices, the latter being important for improving performance when searching for phrases containing OOV words.", "phrases": ["search", "spoken utterance retrieval", "word error rate"], "overall_score": 1.8169349017802652, "scores": [0.9732691719645743, 0.853740625741313, 0.5402446077559002], "rank_score": 0.7890848018205959} -{"id": "kim-etal-2020-beyond", "title": "Beyond Domain APIs: Task-oriented Conversational Modeling with Unstructured Knowledge Access", "abstract": "Most prior work on task-oriented dialogue systems are restricted to a limited coverage of domain APIs, while users oftentimes have domain related requests that are not covered by the APIs. In this paper, we propose to expand coverage of task-oriented dialogue systems by incorporating external unstructured knowledge sources. We define three sub-tasks: knowledge-seeking turn detection, knowledge selection, and knowledge-grounded response generation, which can be modeled individually or jointly. We introduce an augmented version of MultiWOZ 2.1, which includes new out-of-API-coverage turns and responses grounded on external knowledge sources. We present baselines for each sub-task using both conventional and neural approaches. Our experimental results demonstrate the need for further research in this direction to enable more informative conversational systems.", "phrases": ["domain api", "conversational modeling", "unstructured knowledge access", "task-oriented dialog"], "overall_score": 1.8167260975514168, "scores": [0.8885504032980175, 0.8494145952408265, 0.8361676683912432, 0.5818438102547486], "rank_score": 0.788994119296209} -{"id": "zhang-etal-2019-lattice", "title": "Lattice Transformer for Speech Translation", "abstract": "Recent advances in sequence modeling have highlighted the strengths of the transformer architecture, especially in achieving state-of-the-art machine translation results. However, depending on the up-stream systems, e.g., speech recognition, or word segmentation, the input to translation system can vary greatly. The goal of this work is to extend the attention mechanism of the transformer to naturally consume the lattice in addition to the traditional sequential input. We first propose a general lattice transformer for speech translation where the input is the output of the automatic speech recognition (ASR) which contains multiple paths and posterior scores. To leverage the extra information from the lattice structure, we develop a novel controllable lattice attention mechanism to obtain latent representations. On the LDC Spanish-English speech translation corpus, our experiments show that lattice transformer generalizes significantly better and outperforms both a transformer baseline and a lattice LSTM. Additionally, we validate our approach on the WMT 2017 Chinese-English translation task with lattice inputs from different BPE segmentations. In this task, we also observe the improvements over strong baselines.", "phrases": ["speech translation", "asr", "lattice transformer"], "overall_score": 1.0937563390142289, "scores": [0.9521621266943937, 0.8831339101913571, 0.531639232469293], "rank_score": 0.788978423118348} -{"id": "baan-etal-2019-realization", "title": "On the Realization of Compositionality in Neural Networks", "abstract": "We present a detailed comparison of two types of sequence to sequence models trained to conduct a compositional task. The models are architecturally identical at inference time, but differ in the way that they are trained: our baseline model is trained with a task-success signal only, while the other model receives additional supervision on its attention mechanism (Attentive Guidance), which has shown to be an effective method for encouraging more compositional solutions. We first confirm that the models with attentive guidance indeed infer more compositional solutions than the baseline, by training them on the lookup table task presented by Liska et al. (2019). We then do an in-depth analysis of the structural differences between the two model types, focusing in particular on the organisation of the parameter space and the hidden layer activations and find noticeable differences in both these aspects. Guided networks focus more on the components of the input rather than the sequence as a whole and develop small functional groups of neurons with specific purposes that use their gates more selectively. Results from parameter heat maps, component swapping and graph analysis also indicate that guided networks exhibit a more modular structure with a small number of specialized, strongly connected neurons.", "phrases": ["compositionality", "in-depth analysis", "neuron"], "overall_score": 1.4136346679739467, "scores": [0.8736185766225277, 0.9310297428417089, 0.562245271308275], "rank_score": 0.7889645302575038} -{"id": "schwenk-2012-continuous", "title": "Continuous Space Translation Models for Phrase-Based Statistical Machine Translation", "abstract": "This paper presents a new approach to perform the estimation of the translation model probabilities of a phrase-based statistical machine translation system. We use neural networks to directly learn the translation probability of phrase pairs using continuous representations. The system can be easily trained on the same data used to build standard phrase-based systems. We provide experimental evidence that the approach seems to be able to infer meaningful translation probabilities for phrase pairs not seen in the training data, or even predict a list of the most likely translations given a source phrase. The approach can be used to rescore n-best lists, but we also discuss an integration into the Moses decoder. A preliminary evaluation on the English/French IWSLT task achieved improvements in the BLEU score and a human analysis showed that the new model often chooses semantically better translations. Several extensions of this work are discussed.", "phrases": ["translation probability", "phrase-based smt", "maximum length"], "overall_score": 2.280396887596439, "scores": [1.261775876354546, 0.5722640669260106, 0.5328495906003253], "rank_score": 0.7889631779602939} -{"id": "demir-etal-2008-generating", "title": "Generating Textual Summaries of Bar Charts", "abstract": "Information graphics, such as bar charts and line graphs, play an important role in multimodal documents. This paper presents a novel approach to producing a brief textual summary of a simple bar chart. It outlines our approach to augmenting the core message of the graphic to produce a brief summary. Our method simultaneously constructs both the discourse and sentence structures of the textual summary using a bottom-up approach. The result is then realized in natural language. An evaluation study validates our generation methodology.", "phrases": ["textual summary", "bar chart", "message"], "overall_score": 1.413471904300901, "scores": [1.20406291362571, 0.6268630511918428, 0.5356951055483289], "rank_score": 0.7888736901219605} -{"id": "felice-buttery-2019-entropy", "title": "Entropy as a Proxy for Gap Complexity in Open Cloze Tests", "abstract": "This paper presents a pilot study of entropy as a measure of gap complexity in open cloze tests aimed at learners of English. Entropy is used to quantify the information content in each gap, which can be used to estimate complexity. Our study shows that average gap entropy correlates positively with proficiency levels while individual gap entropy can capture contextual complexity. To the best of our knowledge, this is the first unsupervised information-theoretical approach to evaluating the quality of cloze tests.", "phrases": ["gap complexity", "open cloze test", "contextual complexity", "entropy"], "overall_score": 1.0935728483415852, "scores": [0.9030965770524899, 0.88910084026483, 0.8026769068671485, 0.5605099261219783], "rank_score": 0.7888460625766117} -{"id": "van-noord-bos-2017-dealing", "title": "Dealing with Co-reference in Neural Semantic Parsing", "abstract": "Linguistic phenomena like pronouns, control constructions, or co-reference give rise to co-indexed variables in meaning representations. We review three different methods for dealing with co-indexed variables in the output of neural semantic parsing of abstract meaning representations: (a) copying concepts during training and restoring co-indexation in a post-processing step; (b) explicit indexing of co-indexation; and (c) using absolute paths to designate co-indexing. The second method gives the best results and outperforms the baseline by 2.9 F-score points.", "phrases": ["co-reference", "neural semantic parsing", "van"], "overall_score": 1.2695900522952095, "scores": [0.9455206913353108, 0.8978501975997253, 0.5231510672460986], "rank_score": 0.7888406520603782} -{"id": "sangati-zuidema-2011-accurate", "title": "Accurate Parsing with Compact Tree-Substitution Grammars: Double-DOP", "abstract": "We present a novel approach to Data-Oriented Parsing (DOP). Like other DOP models, our parser utilizes syntactic fragments of arbitrary size from a treebank to analyze new sentences, but, crucially, it uses only those which are encountered at least twice. This criterion allows us to work with a relatively small but representative set of fragments, which can be employed as the symbolic backbone of several probabilistic generative models. For parsing we define a transform-backtransform approach that allows us to use standard PCFG technology, making our results easily replicable. According to standard Parseval metrics, our best model is on par with many state-of-the-art parsers, while offering some complementary benefits: a simple generative probability model, and an explicit representation of the larger units of grammar.", "phrases": ["dop model", "fragment", "treebank"], "overall_score": 1.4133542868817788, "scores": [0.9739606559666099, 0.8704312211118596, 0.5220322626929735], "rank_score": 0.7888080465904809} -{"id": "suhr-etal-2017-corpus", "title": "A Corpus of Natural Language for Visual Reasoning", "abstract": "We present a new visual reasoning language dataset, containing 92,244 pairs of examples of natural statements grounded in synthetic images with 3,962 unique sentences. We describe a method of crowdsourcing linguistically-diverse data, and present an analysis of our data. The data demonstrates a broad set of linguistic phenomena, requiring visual and set-theoretic reasoning. We experiment with various models, and show the data presents a strong challenge for future research.", "phrases": ["reasoning", "image", "quantifier", "creation", "fig"], "overall_score": 2.1868718596722614, "scores": [1.667046848644593, 0.595130812452062, 0.5793229469381503, 0.5530356671963556, 0.5492002085298353], "rank_score": 0.7887472967521992} -{"id": "boleda-etal-2007-modelling", "title": "Modelling Polysemy in Adjective Classes by Multi-Label Classification", "abstract": "This paper assesses the role of multi-label classification in modelling polysemy for language acquisition tasks. We focus on the acquisition of semantic classes for Catalan adjectives, and show that polysemy acquisition naturally suits architectures used for multilabel classification. Furthermore, we explore the performance of information drawn from different levels of linguistic description, using feature sets based on morphology, syntax, semantics, and n-gram distribution. Finally, we demonstrate that ensemble classifiers are a powerful and adequate way to combine different types of linguistic evidence: a simple, majority voting ensemble classifier improves the accuracy from 62.5% (best single classifier) to 84%.", "phrases": ["polysemy", "adjective", "multi-label classification"], "overall_score": 1.2694082144863226, "scores": [0.888809570984936, 0.8871314880996227, 0.5902419505473204], "rank_score": 0.7887276698772929} -{"id": "shao-etal-2019-aggregating", "title": "Aggregating Bidirectional Encoder Representations Using MatchLSTM for Sequence Matching", "abstract": "In this work, we propose an aggregation method to combine the Bidirectional Encoder Representations from Transformer (BERT) with a MatchLSTM layer for Sequence Matching. Given a sentence pair, we extract the output representations of it from BERT. Then we extend BERT with a MatchLSTM layer to get further interaction of the sentence pair for sequence matching tasks. Taking natural language inference as an example, we split BERT output into two parts, which is from premise sentence and hypothesis sentence. At each position of the hypothesis sentence, both the weighted representation of the premise sentence and the representation of the current token are fed into LSTM. We jointly train the aggregation layer and pre-trained layer for sequence matching. We conduct an experiment on two publicly available datasets, WikiQA and SNLI. Experiments show that our model achieves significantly improvement compared with state-of-the-art methods on both datasets.", "phrases": ["bidirectional encoder representations", "sequence matching", "bert"], "overall_score": 0.8664678897396806, "scores": [0.9307524092618722, 0.8910707501797223, 0.5442560261193097], "rank_score": 0.7886930618536346} -{"id": "peng-etal-2017-composite", "title": "Composite Task-Completion Dialogue Policy Learning via Hierarchical Deep Reinforcement Learning", "abstract": "Building a dialogue agent to fulfill complex tasks, such as travel planning, is challenging because the agent has to learn to collectively complete multiple subtasks. For example, the agent needs to reserve a hotel and book a flight so that there leaves enough time for commute between arrival and hotel check-in. This paper addresses this challenge by formulating the task in the mathematical framework of options over Markov Decision Processes (MDPs), and proposing a hierarchical deep reinforcement learning approach to learning a dialogue manager that operates at different temporal scales. The dialogue manager consists of: (1) a top-level dialogue policy that selects among subtasks or options, (2) a low-level dialogue policy that selects primitive actions to complete the subtask given by the top-level policy, and (3) a global state tracker that helps ensure all cross-subtask constraints be satisfied. Experiments on a travel planning task with simulated and real users show that our approach leads to significant improvements over three baselines, two based on handcrafted rules and the other based on flat deep reinforcement learning.", "phrases": ["reinforcement learning", "dialogue agent", "dialog policy learning"], "overall_score": 2.1864952451919772, "scores": [1.2191381752101023, 0.5945200972044883, 0.5521761129597319], "rank_score": 0.7886114617914409} -{"id": "bykh-meurers-2014-exploring", "title": "Exploring Syntactic Features for Native Language Identification: A Variationist Perspective on Feature Encoding and Ensemble Optimization", "abstract": "In this paper, we systematically explore lexicalized and non-lexicalized local syntactic features for the task of Native Language Identification (NLI). We investigate different types of feature representations in single- and cross-corpus settings, including two representations inspired by a variationist perspective on the choices made in the linguistic system. To combine the different models, we use a probabilities-based ensemble classifier and propose a technique to optimize and tune it. Combining the best performing syntactic features with four types of n-grams outperforms the best approach of the NLI Shared Task 2013.", "phrases": ["syntactic feature", "native language identification", "variationist perspective", "n-gram"], "overall_score": 1.6398298094832042, "scores": [0.8940479560255503, 0.8885799020591301, 0.7981111679465279, 0.5736267526932088], "rank_score": 0.7885914446811042} -{"id": "kohonen-etal-2010-semi", "title": "Semi-Supervised Learning of Concatenative Morphology", "abstract": "We consider morphology learning in a semi-supervised setting, where a small set of linguistic gold standard analyses is available. We extend Morfessor Baseline, which is a method for unsupervised morphological segmentation, to this task. We show that known linguistic segmentations can be exploited by adding them into the data likelihood function and optimizing separate weights for unlabeled and labeled data. Experiments on English and Finnish are presented with varying amount of labeled data. Results of the linguistic evaluation of Morpho Challenge improve rapidly already with small amounts of labeled data, surpassing the state-of-the-art unsupervised methods at 1000 labeled words for English and at 100 labeled words for Finnish.", "phrases": ["morfessor", "segmentation", "semi-supervised version", "extension"], "overall_score": 2.1351472133403075, "scores": [1.0964637448945194, 0.9769214424661178, 0.5536500594603705, 0.5267426446985458], "rank_score": 0.7884444728798884} -{"id": "zhou-etal-2016-text", "title": "Text Classification Improved by Integrating Bidirectional LSTM with Two-dimensional Max Pooling", "abstract": "Recurrent Neural Network (RNN) is one of the most popular architectures used in Natural Language Processsing (NLP) tasks because its recurrent structure is very suitable to process variable-length text. RNN can utilize distributed representations of words by first converting the tokens comprising each text into vectors, which form a matrix. And this matrix includes two dimensions: the time-step dimension and the feature vector dimension. Then most existing models usually utilize one-dimensional (1D) max pooling operation or attention-based operation only on the time-step dimension to obtain a fixed-length vector. However, the features on the feature vector dimension are not mutually independent, and simply applying 1D pooling operation over the time-step dimension independently may destroy the structure of the feature representation. On the other hand, applying two-dimensional (2D) pooling operation over the two dimensions may sample more meaningful features for sequence modeling tasks. To integrate the features on both dimensions of the matrix, this paper explores applying 2D max pooling operation to obtain a fixed-length representation of the text. This paper also utilizes 2D convolution to sample more meaningful information of the matrix. Experiments are conducted on six text classification tasks, including sentiment analysis, question classification, subjectivity classification and newsgroup classification. Compared with the state-of-the-art models, the proposed models achieve excellent performance on 4 out of 6 tasks. Specifically, one of the proposed models achieves highest accuracy on Stanford Sentiment Treebank binary classification and fine-grained classification tasks.", "phrases": ["bidirectional lstm", "fixed-length representation", "text classification"], "overall_score": 1.732171898788905, "scores": [0.9056666876005256, 0.900895882712422, 0.5584736439945824], "rank_score": 0.7883454047691766} -{"id": "yoon-etal-2021-self", "title": "Self-Adapter at SemEval-2021 Task 10: Entropy-based Pseudo-Labeler for Source-free Domain Adaptation", "abstract": "Source-free domain adaptation is an emerging line of work in deep learning research since it is closely related to the real-world environment. We study the domain adaption in the sequence labeling problem where the model trained on the source domain data is given. We propose two methods: Self-Adapter and Selective Classifier Training. Self-Adapter is a training method that uses sentence-level pseudo-labels filtered by the self-entropy threshold to provide supervision to the whole model. Selective Classifier Training uses token-level pseudo-labels and supervises only the classification layer of the model. The proposed methods are evaluated on data provided by SemEval-2021 task 10 and Self-Adapter achieves 2nd rank performance.", "phrases": ["semeval-2021 task", "pseudo-label", "source-free domain adaptation"], "overall_score": 0.8660603424688136, "scores": [0.9148111796010814, 0.8914200254268464, 0.5587350839950321], "rank_score": 0.7883220963409867} -{"id": "razmara-etal-2012-mixing", "title": "Mixing Multiple Translation Models in Statistical Machine Translation", "abstract": "Statistical machine translation is often faced with the problem of combining training data from many diverse sources into a single translation model which then has to translate sentences in a new domain. We propose a novel approach, ensemble decoding, which combines a number of translation systems dynamically at the decoding step. In this paper, we evaluate performance on a domain adaptation setting where we translate sentences from the medical domain. Our experimental results show that ensemble decoding outperforms various strong baselines including mixture models, the current state-of-the-art for domain adaptation in machine translation.", "phrases": ["statistical machine translation", "ensemble decoding", "strong baseline"], "overall_score": 1.7315183321167122, "scores": [0.9547740064966099, 0.8623156604612675, 0.5470541943162837], "rank_score": 0.7880479537580537} -{"id": "pinnis-etal-2012-accurat", "title": "ACCURAT Toolkit for Multi-Level Alignment and Information Extraction from Comparable Corpora", "abstract": "The lack of parallel corpora and linguistic resources for many languages and domains is one of the major obstacles for the further advancement of automated translation. A possible solution is to exploit comparable corpora (non-parallel bi- or multi-lingual text resources) which are much more widely available than parallel translation data. Our presented toolkit deals with parallel content extraction from comparable corpora. It consists of tools bundled in two workflows: (1) alignment of comparable documents and extraction of parallel sentences and (2) extraction and bilingual mapping of terms and named entities. The toolkit pairs similar bilingual comparable documents and extracts parallel sentences and bilingual terminological and named entity dictionaries from comparable corpora. This demonstration focuses on the English, Latvian, Lithuanian, and Romanian languages.", "phrases": ["toolkit", "comparable corpora", "parallel sentence"], "overall_score": 1.0924328884646133, "scores": [0.9164902540413901, 0.861196871417853, 0.5863841405786125], "rank_score": 0.7880237553459519} -{"id": "zhang-etal-2018-global", "title": "Global Attention for Name Tagging", "abstract": "Many name tagging approaches use local contextual information with much success, but can fail when the local context is ambiguous or limited. We present a new framework to improve name tagging by utilizing local, document-level, and corpus-level contextual information. For each word, we retrieve document-level context from other sentences within the same document and corpus-level context from sentences in other documents. We propose a model that learns to incorporate document-level and corpus-level contextual information alongside local contextual information via document-level and corpus-level attentions, which dynamically weight their respective contextual information and determines the influence of this information through gating mechanisms. Experiments on benchmark datasets show the effectiveness of our approach, which achieves state-of-the-art results for Dutch, German, and Spanish on the CoNLL-2002 and CoNLL-2003 datasets. We will make our code and pre-trained models publicly available for research purposes.", "phrases": ["name tagging", "contextual information", "global attention"], "overall_score": 1.0923649895893768, "scores": [0.934742736424234, 0.8972729886480373, 0.5319086049097057], "rank_score": 0.787974776660659} -{"id": "shutova-etal-2012-unsupervised", "title": "Unsupervised Metaphor Paraphrasing using a Vector Space Model", "abstract": "We present the first fully unsupervised approach to metaphor interpretation, and a system that produces literal paraphrases for metaphorical expressions. Such a form of interpretation is directly transferable to other NLP applications that can benefit from a metaphor processing component. Our method is different from previous work in that it does not rely on any manually annotated data or lexical resources. First, our method computes candidate paraphrases according to the context in which the metaphor appears, using a vector space model. It then uses a selectional preference model to measure the degree of literalness of the paraphrases. The system identifies correct paraphrases with a precision of 0.52 at top rank, which is a promising result for a fully unsupervised approach.", "phrases": ["paraphrase", "vector space model", "linguistic metaphor"], "overall_score": 1.4113995824816772, "scores": [0.8268269724459758, 0.9190804498810385, 0.6172438935520383], "rank_score": 0.7877171052930176} -{"id": "rohrdantz-etal-2011-towards", "title": "Towards Tracking Semantic Change by Visual Analytics", "abstract": "This paper presents a new approach to detecting and tracking changes in word meaning by visually modeling and representing diachronic development in word contexts. Previous studies have shown that computational models are capable of clustering and disambiguating senses, a more recent trend investigates whether changes in word meaning can be tracked by automatic methods. The aim of our study is to offer a new instrument for investigating the diachronic development of word senses in a way that allows for a better understanding of the nature of semantic change in general. For this purpose we combine techniques from the field of Visual Analytics with unsupervised methods from Natural Language Processing, allowing for an interactive visual exploration of semantic change.", "phrases": ["semantic change", "visual analytics", "new approach"], "overall_score": 1.637967709319282, "scores": [0.9361348796377533, 0.8974274853107556, 0.5295255264226744], "rank_score": 0.7876959637903944} -{"id": "beltagy-etal-2016-representing", "title": "Representing Meaning with a Combination of Logical and Distributional Models", "abstract": "NLP tasks differ in the semantic information they require, and at this time no single semantic representation fulfills all requirements. Logic-based representations characterize sentence structure, but do not capture the graded aspect of meaning. Distributional models give graded similarity ratings for words and phrases, but do not capture sentence structure in the same detail as logic-based approaches. It has therefore been argued that the two are complementary.We adopt a hybrid approach that combines logical and distributional semantics using probabilistic logic, specifically Markov Logic Networks. In this article, we focus on the three components of a practical system:1 1) Logical representation focuses on representing the input problems in probabilistic logic; 2) knowledge base construction creates weighted inference rules by integrating distributional information with other sources; and 3) probabilistic inference involves solving the resulting MLN inference problems efficiently. To evaluate our approach, we use the task of textual entailment, which can utilize the strengths of both logic-based and distributional representations. In particular we focus on the SICK data set, where we achieve state-of-the-art results. We also release a lexical entailment data set of 10,213 rules extracted from the SICK data set, which is a valuable resource for evaluating lexical entailment systems.2", "phrases": ["markov logic networks", "distributional information", "boxer"], "overall_score": 1.813341103053027, "scores": [0.9489554175661875, 0.886722127839872, 0.526894559186797], "rank_score": 0.7875240348642855} -{"id": "shnarch-etal-2011-probabilistic", "title": "A Probabilistic Modeling Framework for Lexical Entailment", "abstract": "Recognizing entailment at the lexical level is an important and commonly-addressed component in textual inference. Yet, this task has been mostly approached by simplified heuristic methods. This paper proposes an initial probabilistic modeling framework for lexical entailment, with suitable EM-based parameter estimation. Our model considers prominent entailment factors, including differences in lexical-resources reliability and the impacts of transitivity and multiple evidence. Evaluations show that the proposed model outperforms most prior systems while pointing at required future improvements.", "phrases": ["probabilistic modeling framework", "lexical entailment", "heuristic method"], "overall_score": 1.5318948889214932, "scores": [0.972704791255639, 0.8507178703378845, 0.538292070710823], "rank_score": 0.7872382441014487} -{"id": "niu-bansal-2018-polite", "title": "Polite Dialogue Generation Without Parallel Data", "abstract": "Stylistic dialogue response generation, with valuable applications in personality-based conversational agents, is a challenging task because the response needs to be fluent, contextually-relevant, as well as paralinguistically accurate. Moreover, parallel datasets for regular-to-stylistic pairs are usually unavailable. We present three weakly-supervised models that can generate diverse, polite (or rude) dialogue responses without parallel data. Our late fusion model (Fusion) merges the decoder of an encoder-attention-decoder dialogue model with a language model trained on stand-alone polite utterances. Our label-finetuning (LFT) model prepends to each source sequence a politeness-score scaled label (predicted by our state-of-the-art politeness classifier) during training, and at test time is able to generate polite, neutral, and rude responses by simply scaling the label embedding by the corresponding score. Our reinforcement learning model (Polite-RL) encourages politeness generation by assigning rewards proportional to the politeness classifier score of the sampled response. We also present two retrievalbased, polite dialogue model baselines. Human evaluation validates that while the Fusion and the retrieval-based models achieve politeness with poorer context-relevance, the LFT and Polite-RL models can produce significantly more polite responses without sacrificing dialogue quality.", "phrases": ["dialogue generation", "language model", "politeness"], "overall_score": 2.3583452789454076, "scores": [0.9480761764471646, 0.8573941346019285, 0.5562346715002137], "rank_score": 0.7872349941831023} -{"id": "andrew-2006-hybrid", "title": "A Hybrid Markov/Semi-Markov Conditional Random Field for Sequence Segmentation", "abstract": "Markov order-1 conditional random fields (CRFs) and semi-Markov CRFs are two popular models for sequence segmentation and labeling. Both models have advantages in terms of the type of features they most naturally represent. We propose a hybrid model that is capable of representing both types of features, and describe efficient algorithms for its training and inference. We demonstrate that our hybrid model achieves error reductions of 18% and 25% over a standard order-1 CRF and a semi-Markov CRF (resp.) on the task of Chinese word segmentation. We also propose the use of a powerful feature for the semi-Markov CRF: the log conditional odds that a given token sequence constitutes a chunk according to a generative model, which reduces error by an additional 13%. Our best system achieves 96.8% F-measure, the highest reported score on this test set.", "phrases": ["conditional random field", "sequence segmentation", "semi-markov crf"], "overall_score": 1.9560395342468455, "scores": [0.915038431611586, 0.8795616279234462, 0.566904588231585], "rank_score": 0.7871682159222058} -{"id": "cherry-lin-2006-soft", "title": "Soft Syntactic Constraints for Word Alignment through Discriminative Training", "abstract": "Word alignment methods can gain valuable guidance by ensuring that their alignments maintain cohesion with respect to the phrases specified by a monolingual dependency tree. However, this hard constraint can also rule out correct alignments, and its utility decreases as alignment models become more complex. We use a publicly available structured output SVM to create a max-margin syntactic aligner with a soft cohesion constraint. The resulting aligner is the first, to our knowledge, to use a discriminative learning method to train an ITG bitext parser.", "phrases": ["word alignment", "cohesion", "reason itg", "more attention"], "overall_score": 2.0189886097422156, "scores": [1.5267891965627078, 0.5575004740814045, 0.539868147375986, 0.5244245956932951], "rank_score": 0.7871456034283484} -{"id": "zhou-etal-2022-exsum", "title": "ExSum: From Local Explanations to Model Understanding", "abstract": "Interpretability methods are developed to understand the working mechanisms of black-box models, which is crucial to their responsible deployment. Fulfilling this goal requires both that the explanations generated by these methods are correct and that people can easily and reliably understand them. While the former has been addressed in prior work, the latter is often overlooked, resulting in informal model understanding derived from a handful of local explanations. In this paper, we introduce explanation summary (ExSum), a mathematical framework for quantifying model understanding, and propose metrics for its quality assessment. On two domains, ExSum highlights various limitations in the current practice, helps develop accurate model understanding, and reveals easily overlooked properties of the model. We also connect understandability to other properties of explanations such as human alignment, robustness, and counterfactual similarity and plausibility.", "phrases": ["model understanding", "black-box model", "exsum"], "overall_score": 0.8646320887688046, "scores": [0.951666721029825, 0.8517870145116574, 0.5576123958515085], "rank_score": 0.7870220437976636} -{"id": "kajiwara-komachi-2018-complex", "title": "Complex Word Identification Based on Frequency in a Learner Corpus", "abstract": "We introduce the TMU systems for the Complex Word Identification (CWI) Shared Task 2018. TMU systems use random forest classifiers and regressors whose features are the number of characters, the number of words, and the frequency of target words in various corpora. Our simple systems performed best on 5 tracks out of 12 tracks. Our ablation analysis revealed the usefulness of a learner corpus for CWI task.", "phrases": ["learner corpus", "cwi task", "complex word identification"], "overall_score": 1.4101345085047685, "scores": [0.9453058337622485, 0.8788932153926609, 0.5368341130344849], "rank_score": 0.7870110540631314} -{"id": "zeng-etal-2019-variational", "title": "A Variational Approach to Weakly Supervised Document-Level Multi-Aspect Sentiment Classification", "abstract": "In this paper, we propose a variational approach to weakly supervised document-level multi-aspect sentiment classification. Instead of using user-generated ratings or annotations provided by domain experts, we use target-opinion word pairs as \u201csupervision.\u201d These word pairs can be extracted by using dependency parsers and simple rules. Our objective is to predict an opinion word given a target word while our ultimate goal is to learn a sentiment polarity classifier to predict the sentiment polarity of each aspect given a document. By introducing a latent variable, i.e., the sentiment polarity, to the objective function, we can inject the sentiment polarity classifier to the objective via the variational lower bound. We can learn a sentiment polarity classifier by optimizing the lower bound. We show that our method can outperform weakly supervised baselines on TripAdvisor and BeerAdvocate datasets and can be comparable to the state-of-the-art supervised method with hundreds of labels per aspect.", "phrases": ["variational approach", "multi-aspect sentiment classification", "word pair"], "overall_score": 1.0909706428009895, "scores": [0.9856220260692571, 0.8289461733598605, 0.5463387047575311], "rank_score": 0.7869689680622162} -{"id": "chen-etal-2019-numeracy", "title": "Numeracy-600K: Learning Numeracy for Detecting Exaggerated Information in Market Comments", "abstract": "In this paper, we attempt to answer the question of whether neural network models can learn numeracy, which is the ability to predict the magnitude of a numeral at some specific position in a text description. A large benchmark dataset, called Numeracy-600K, is provided for the novel task. We explore several neural network models including CNN, GRU, BiGRU, CRNN, CNN-capsule, GRU-capsule, and BiGRU-capsule in the experiments. The results show that the BiGRU model gets the best micro-averaged F1 score of 80.16%, and the GRU-capsule model gets the best macro-averaged F1 score of 64.71%. Besides discussing the challenges through comprehensive experiments, we also present an important application scenario, i.e., detecting exaggerated information, for the task.", "phrases": ["exaggerated information", "market comment", "network model", "magnitude", "bigru model"], "overall_score": 1.6362030969480517, "scores": [0.8579876417031049, 1.059353559748524, 0.9413974395108251, 0.5519934602666239, 0.523504721861119], "rank_score": 0.7868473646180394} -{"id": "das-bandyopadhyay-2011-dr", "title": "Dr Sentiment Knows Everything!", "abstract": "Sentiment analysis is one of the hot demanding research areas since last few decades. Although a formidable amount of research have been done, the existing reported solutions or available systems are still far from perfect or do not meet the satisfaction level of end users'. The main issue is the various conceptual rules that govern sentiment and there are even more clues (possibly unlimited) that can convey these concepts from realization to verbalization of a human being. Human psychology directly relates to the unrevealed clues and governs the sentiment realization of us. Human psychology relates many things like social psychology, culture, pragmatics and many more endless intelligent aspects of civilization. Proper incorporation of human psychology into computational sentiment knowledge representation may solve the problem. In the present paper we propose a template based online interactive gaming technology, called Dr Sentiment to automatically create the PsychoSentiWordNet involving internet population. The PsychoSentiWordNet is an extension of SentiWordNet that presently holds human psychological knowledge on a few aspects along with sentiment knowledge.", "phrases": ["extension", "sentiwordnet", "human psychological knowledge"], "overall_score": 0.8643656058305632, "scores": [0.9221347452409091, 0.8670995423258712, 0.5711041541553686], "rank_score": 0.7867794805740496} -{"id": "zhang-etal-2008-bayesian", "title": "Bayesian Learning of Non-Compositional Phrases with Synchronous Parsing", "abstract": "We combine the strengths of Bayesian modeling and synchronous grammar in unsupervised learning of basic translation phrase pairs. The structured space of a synchronous grammar is a natural fit for phrase pair probability estimation, though the search space can be prohibitively large. Therefore we explore efficient algorithms for pruning this space that lead to empirically effective results. Incorporating a sparse prior using Variational Bayes, biases the models toward generalizable, parsimonious parameter sets, leading to significant improvements in word alignment. This preference for sparse solutions together with effective pruning methods forms a phrase alignment regimen that produces better end-to-end translations than standard word alignment approaches.", "phrases": ["synchronous grammar", "word alignment", "bayesian learning"], "overall_score": 2.431199936114272, "scores": [0.8757022674731403, 0.8936419144714987, 0.5902480651364587], "rank_score": 0.7865307490270325} -{"id": "pradhan-etal-2007-towards", "title": "Towards Robust Semantic Role Labeling", "abstract": "Most semantic role labeling (SRL) research has been focused on training and evaluating on the same corpus. This strategy, although appropriate for initiating research, can lead to over-training to the particular corpus. This article describes the operation of ASSERT , a state-of-the art SRL system, and analyzes the robustness of the system when trained on one genre of data and used to label a different genre. As a starting point, results are \ufb01rst presented for training and testing the system on the PropBank corpus, which is annotated Wall Street Journal (WSJ) data. Experiments are then presented to evaluate the portability of the system to another source of data. These experiments are based on comparisons of performance using PropBanked WSJ data and PropBanked Brown Corpus data. The results indicate that whereas syntactic parses and argument identi\ufb01cation transfer relatively well to a new corpus, argument classi\ufb01cation does not. An analysis of the reasons for this is presented and these generally point to the nature of the more lexical/semantic features dominating the classi\ufb01cation task where more general structural features are dominant in the argument identi\ufb01cation task.", "phrases": ["propbank", "reason", "semantic feature", "new domain", "marked decrease"], "overall_score": 2.2280857926118816, "scores": [1.0727529381786474, 0.8892864184363716, 0.8259550544247471, 0.6197031565145484, 0.5243850574378541], "rank_score": 0.7864165249984337} -{"id": "yang-etal-2020-program", "title": "Program Enhanced Fact Verification with Verbalization and Graph Attention Network", "abstract": "Performing fact verification based on structured data is important for many real-life applications and is a challenging research problem, particularly when it involves both symbolic operations and informal inference based on language understanding. In this paper, we present a Program-enhanced Verbalization and Graph Attention Network (ProgVGAT) to integrate programs and execution into textual inference models. Specifically, a verbalization with program execution model is proposed to accumulate evidences that are embedded in operations over the tables. Built on that, we construct the graph attention verification networks, which are designed to fuse different sources of evidences from verbalized program execution, program structures, and the original statements and tables, to make the final verification decision. To support the above framework, we propose a program selection module optimized with a new training strategy based on margin loss, to produce more accurate programs, which is shown to be effective in enhancing the final verification results. Experimental results show that the proposed framework achieves the new state-of-the-art performance, a 74.4% accuracy, on the benchmark dataset TABFACT.", "phrases": ["fact verification", "verbalization", "graph attention network", "reasoning"], "overall_score": 1.7279029774181558, "scores": [0.8664069971410988, 0.8381035555753563, 0.911234742181115, 0.5298648448050531], "rank_score": 0.7864025349256558} -{"id": "goel-etal-2021-robustness", "title": "Robustness Gym: Unifying the NLP Evaluation Landscape", "abstract": "Despite impressive performance on standard benchmarks, natural language processing (NLP) models are often brittle when deployed in real-world systems. In this work, we identify challenges with evaluating NLP systems and propose a solution in the form of Robustness Gym (RG), a simple and extensible evaluation toolkit that unifies 4 standard evaluation paradigms: subpopulations, transformations, evaluation sets, and adversarial attacks. By providing a common platform for evaluation, RG enables practitioners to compare results from disparate evaluation paradigms with a single click, and to easily develop and share novel evaluation methods using a built-in set of abstractions. RG is under active development and we welcome feedback & contributions from the community.", "phrases": ["nlp system", "subpopulation", "evaluation set", "robustness gym"], "overall_score": 1.6351076372484654, "scores": [0.8459530767519404, 0.8933242593723256, 0.8291329150643826, 0.5768719882487267], "rank_score": 0.7863205598593438} -{"id": "cuadros-rigau-2008-knownet", "title": "KnowNet: Building a Large Net of Knowledge from the Web", "abstract": "This paper presents a new fully automatic method for building highly dense and accurate knowledge bases from existing semantic resources. Basically, the method uses a wide-coverage and accurate knowledge-based Word Sense Disambiguation algorithm to assign the most appropriate senses to large sets of topically related words acquired from the web. KnowNet, the resulting knowledge-base which connects large sets of semantically-related concepts is a major step towards the autonomous acquisition of knowledge from raw corpora. In fact, KnowNet is several times larger than any available knowledge resource encoding relations between synsets, and the knowledge KnowNet contains outperform any other resource when is empirically evaluated in a common framework.", "phrases": ["web", "knownet", "knowledge basis"], "overall_score": 1.0900681404589068, "scores": [0.8837219642758072, 0.8605688452726514, 0.6146630411582193], "rank_score": 0.7863179502355594} -{"id": "el-baff-etal-2018-challenge", "title": "Challenge or Empower: Revisiting Argumentation Quality in a News Editorial Corpus", "abstract": "News editorials are said to shape public opinion, which makes them a powerful tool and an important source of political argumentation. However, rarely do editorials change anyone's stance on an issue completely, nor do they tend to argue explicitly (but rather follow a subtle rhetorical strategy). So, what does argumentation quality mean for editorials then? We develop the notion that an effective editorial challenges readers with opposing stance, and at the same time empowers the arguing skills of readers that share the editorial's stance \u2014 or even challenges both sides. To study argumentation quality based on this notion, we introduce a new corpus with 1000 editorials from the New York Times, annotated for their perceived effect along with the annotators' political orientations. Analyzing the corpus, we find that annotators with different orientation disagree on the effect significantly. While only 1% of all editorials changed anyone's stance, more than 5% meet our notion. We conclude that our corpus serves as a suitable resource for studying the argumentation quality of news editorials.", "phrases": ["argumentation quality", "news editorial", "ideology"], "overall_score": 1.5300962006403351, "scores": [0.9443549944938623, 0.8934769643179599, 0.521109744714143], "rank_score": 0.7863139011753217} -{"id": "al-kuwatly-etal-2020-identifying", "title": "Identifying and Measuring Annotator Bias Based on Annotators' Demographic Characteristics", "abstract": "Machine learning is recently used to detect hate speech and other forms of abusive language in online platforms. However, a notable weakness of machine learning models is their vulnerability to bias, which can impair their performance and fairness. One type is annotator bias caused by the subjective perception of the annotators. In this work, we investigate annotator bias using classification models trained on data from demographically distinct annotator groups. To do so, we sample balanced subsets of data that are labeled by demographically distinct annotators. We then train classifiers on these subsets, analyze their performances on similarly grouped test sets, and compare them statistically. Our findings show that the proposed approach successfully identifies bias and that demographic features, such as first language, age, and education, correlate with significant performance differences.", "phrases": ["annotator", "background", "educational background"], "overall_score": 1.5300135417226874, "scores": [1.2203450729457468, 0.5774445699437695, 0.5610246257941656], "rank_score": 0.7862714228945605} -{"id": "chu-etal-2014-constructing", "title": "Constructing a Chinese\u2014Japanese Parallel Corpus from Wikipedia", "abstract": "Parallel corpora are crucial for statistical machine translation (SMT). However, they are quite scarce for most language pairs, such as Chinese\u2015Japanese. As comparable corpora are far more available, many studies have been conducted to automatically construct parallel corpora from comparable corpora. This paper presents a robust parallel sentence extraction system for constructing a Chinese\u2015Japanese parallel corpus from Wikipedia. The system is inspired by previous studies that mainly consist of a parallel sentence candidate filter and a binary classifier for parallel sentence identification. We improve the system by using the common Chinese characters for filtering and two novel feature sets for classification. Experiments show that our system performs significantly better than the previous studies for both accuracy in parallel sentence extraction and SMT performance. Using the system, we construct a Chinese\u2015Japanese parallel corpus with more than 126k highly accurate parallel sentences from Wikipedia. The constructed parallel corpus is freely available at .", "phrases": ["parallel corpus", "wikipedia", "sentence extraction system"], "overall_score": 1.408453917068908, "scores": [0.9212212727853039, 0.8856142850123105, 0.5513837365740456], "rank_score": 0.7860730981238867} -{"id": "teufel-van-halteren-2004-evaluating", "title": "Evaluating Information Content by Factoid Analysis: Human annotation and stability", "abstract": "We present a new approach to intrinsic summary evaluation, based on initial experiments in van Halteren and Teufel (2003), which combines two novel aspects: comparison of information content (rather than string similarity) in gold standard and system summary, measured in shared atomic information units which we call factoids, and comparison to more than one gold standard summary (in our data: 20 and 50 summaries respectively). In this paper, we show that factoid annotation is highly reproducible, introduce a weighted factoid score, estimate how many summaries are required for stable system rankings, and show that the factoid scores cannot be sufficiently approximated by unigrams and the DUC information overlap measure.", "phrases": ["information content", "factoid analysis", "van"], "overall_score": 1.4081504838028194, "scores": [0.9534363988078368, 0.880929311603495, 0.5233455359695683], "rank_score": 0.7859037487936334} -{"id": "jaffe-etal-2020-coreference", "title": "Coreference information guides human expectations during natural reading", "abstract": "Models of human sentence processing effort tend to focus on costs associated with retrieving structures and discourse referents from memory (memory-based) and/or on costs associated with anticipating upcoming words and structures based on contextual cues (expectation-based) (Levy,2008). Although evidence suggests that expectation and memory may play separable roles in language comprehension (Levy et al., 2013), theories of coreference processing have largely focused on memory: how comprehenders identify likely referents of linguistic expressions. In this study, we hypothesize that coreference tracking also informs human expectations about upcoming words, and we test this hypothesis by evaluating the degree to which incremental surprisal measures generated by a novel coreference-aware semantic parser explain human response times in a naturalistic self-paced reading experiment. Results indicate (1) that coreference information indeed guides human expectations and (2) that coreference effects on memory retrieval may exist independently of coreference effects on expectations. Together, these findings suggest that the language processing system exploits coreference information both to retrieve referents from memory and to anticipate upcoming material.", "phrases": ["human expectation", "memory retrieval", "coreference information"], "overall_score": 0.8633792179237495, "scores": [0.9331200566742879, 0.8759653394078531, 0.5485594987436513], "rank_score": 0.7858816316085974} -{"id": "cheng-etal-2016-long", "title": "Long Short-Term Memory-Networks for Machine Reading", "abstract": "In this paper we address the question of how to render sequence-level networks better at handling structured input. We propose a machine reading simulator which processes text incrementally from left to right and performs shallow reasoning with memory and attention. The reader extends the Long Short-Term Memory architecture with a memory network in place of a single memory cell. This enables adaptive memory usage during recurrence with neural attention, offering a way to weakly induce relations among tokens. The system is initially designed to process a single sequence but we also demonstrate how to integrate it with an encoder-decoder architecture. Experiments on language modeling, sentiment analysis, and natural language inference show that our model matches or outperforms the state of the art.", "phrases": ["machine reading", "long short-term memory-network", "mechanism", "strong memorization capability", "context information"], "overall_score": 2.6727158580366526, "scores": [1.016284253024644, 0.9164478650653374, 0.8697517757791404, 0.578512102840675, 0.5480847917896111], "rank_score": 0.7858161576998816} -{"id": "rogers-etal-2018-whats", "title": "What's in Your Embedding, And How It Predicts Task Performance", "abstract": "Attempts to find a single technique for general-purpose intrinsic evaluation of word embeddings have so far not been successful. We present a new approach based on scaled-up qualitative analysis of word vector neighborhoods that quantifies interpretable characteristics of a given model (e.g. its preference for synonyms or shared morphological forms as nearest neighbors). We analyze 21 such factors and show how they correlate with performance on 14 extrinsic and intrinsic task datasets (and also explain the lack of correlation between some of them). Our approach enables multi-faceted evaluation, parameter search, and generally \u2013 a more principled, hypothesis-driven approach to development of distributional semantic representations.", "phrases": ["task performance", "intrinsic evaluation", "word embedding", "factor"], "overall_score": 1.726549060460681, "scores": [0.8233739441467377, 1.2211086529217756, 0.5614757833170654, 0.5371869826684673], "rank_score": 0.7857863407635114} -{"id": "wang-etal-2019-evidence", "title": "Evidence Sentence Extraction for Machine Reading Comprehension", "abstract": "Remarkable success has been achieved in the last few years on some limited machine reading comprehension (MRC) tasks. However, it is still difficult to interpret the predictions of existing MRC models. In this paper, we focus on extracting evidence sentences that can explain or support the answers of multiple-choice MRC tasks, where the majority of answer options cannot be directly extracted from reference documents. Due to the lack of ground truth evidence sentence labels in most cases, we apply distant supervision to generate imperfect labels and then use them to train an evidence sentence extractor. To denoise the noisy labels, we apply a recently proposed deep probabilistic logic learning framework to incorporate both sentence-level and cross-sentence linguistic indicators for indirect supervision. We feed the extracted evidence sentences into existing MRC models and evaluate the end-to-end performance on three challenging multiple-choice MRC datasets: MultiRC, RACE, and DREAM, achieving comparable or better performance than the same models that take as input the full reference document. To the best of our knowledge, this is the first work extracting evidence sentences for multiple-choice MRC.", "phrases": ["machine reading comprehension", "mrc", "noisy label"], "overall_score": 1.8091999686288198, "scores": [0.9620707977354547, 0.8597593587169837, 0.5353465326526599], "rank_score": 0.7857255630350327} -{"id": "ramteke-etal-2013-detecting", "title": "Detecting Turnarounds in Sentiment Analysis: Thwarting", "abstract": "Thwarting and sarcasm are two uncharted territories in sentiment analysis, the former because of the lack of training corpora and the latter because of the enormous amount of world knowledge it demands. In this paper, we propose a working definition of thwarting amenable to machine learning and create a system that detects if the document is thwarted or not. We focus on identifying thwarting in product reviews, especially in the camera domain. An ontology of the camera domain is created. Thwarting is looked upon as the phenomenon of polarity reversal at a higher level of ontology compared to the polarity expressed at the lower level. This notion of thwarting defined with respect to an ontology is novel, to the best of our knowledge. A rule based implementation building upon this idea forms our baseline. We show that machine learning with annotated corpora (thwarted/nonthwarted) is more effective than the rule based system. Because of the skewed distribution of thwarting, we adopt the Areaunder-the-Curve measure of performance. To the best of our knowledge, this is the first attempt at the difficult problem of thwarting detection, which we hope will at least provide a baseline system to compare against.", "phrases": ["sentiment analysis", "thwarting", "sarcasm"], "overall_score": 1.0891990025185911, "scores": [0.9511573716231335, 0.86882631308761, 0.5370893145014219], "rank_score": 0.7856909997373885} -{"id": "lee-2011-toward", "title": "Toward a Parallel Corpus of Spoken Cantonese and Written Chinese", "abstract": "We introduce a parallel corpus of spoken Cantonese and written Chinese. This sentencealigned corpus consists of transcriptions of Cantonese spoken in television programs in Hong Kong, and their corresponding Chinese (Mandarin) subtitles. Preliminary evaluation shows that the corpus reflects known syntactic differences between Cantonese and Mandarin, facilitates quantitative analyses on these differences, and already reveals some phenomena not yet discussed in the literature.", "phrases": ["parallel corpus", "spoken cantonese", "chinese"], "overall_score": 0.8631539980152916, "scores": [0.9045311712142492, 0.8665480458391922, 0.5859506657864638], "rank_score": 0.7856766276133017} -{"id": "tan-2022-diversity", "title": "On the Diversity and Limits of Human Explanations", "abstract": "A growing effort in NLP aims to build datasets of human explanations. However, it remains unclear whether these datasets serve their intended goals. This problem is exacerbated by the fact that the term explanation is overloaded and refers to a broad range of notions with different properties and ramifications. Our goal is to provide an overview of the diversity of explanations, discuss human limitations in providing explanations, and ultimately provide implications for collecting and using human explanations in NLP.Inspired by prior work in psychology and cognitive sciences, we group existing human explanations in NLP into three categories: proximal mechanism, evidence, and procedure. These three types differ in nature and have implications for the resultant explanations. For instance, procedure is not considered explanation in psychology and connects with a rich body of work on learning from instructions. The diversity of explanations is further evidenced by proxy questions that are needed for annotators to interpret and answer \u201cwhy is [input] assigned [label]\u201d. Finally, giving explanations may require different, often deeper, understandings than predictions, which casts doubt on whether humans can provide valid explanations in some tasks.", "phrases": ["diversity", "human explanation", "instruction"], "overall_score": 1.089059346260257, "scores": [0.9535982701608332, 0.8475937904083651, 0.5555787165559756], "rank_score": 0.7855902590417246} -{"id": "tackstrom-etal-2013-token", "title": "Token and Type Constraints for Cross-Lingual Part-of-Speech Tagging", "abstract": "We consider the construction of part-of-speech taggers for resource-poor languages. Recently, manually constructed tag dictionaries from Wiktionary and dictionaries projected via bitext have been used as type constraints to overcome the scarcity of annotated data in this setting. In this paper, we show that additional token constraints can be projected from a resource-rich source language to a resource-poor target language via word-aligned bitext. We present several models to this end; in particular a partially observed conditional random field model, where coupled token and type constraints provide a partial signal for training. Averaged across eight previously studied Indo-European languages, our model achieves a 25% relative error reduction over the prior state of the art. We further present successful results on seven additional languages from different families, empirically demonstrating the applicability of coupled token and type constraints across a diverse set of languages.", "phrases": ["type constraint", "tagging", "resource-poor language", "supervised language"], "overall_score": 2.270124054733534, "scores": [0.8624781463884509, 0.8765944896316027, 0.8380298771370387, 0.5645335736636641], "rank_score": 0.7854090217051891} -{"id": "gimpel-smith-2009-cube", "title": "Cube Summing, Approximate Inference with Non-Local Features, and Dynamic Programming without Semirings", "abstract": "We introduce cube summing, a technique that permits dynamic programming algorithms for summing over structures (like the forward and inside algorithms) to be extended with non-local features that violate the classical structural independence assumptions. It is inspired by cube pruning (Chiang, 2007; Huang and Chiang, 2007) in its computation of non-local features dynamically using scored k-best lists, but also maintains additional residual quantities used in calculating approximate marginals. When restricted to local features, cube summing reduces to a novel semiring (k-best+residual) that generalizes many of the semirings of Goodman (1999). When non-local features are included, cube summing does not reduce to any semiring, but is compatible with generic techniques for solving dynamic programming equations.", "phrases": ["semiring", "dynamic programming algorithm", "cube summing"], "overall_score": 1.263689100660498, "scores": [0.8880550596056358, 0.8863951965433629, 0.5810722978387568], "rank_score": 0.7851741846625853} -{"id": "ji-etal-2016-latent", "title": "A Latent Variable Recurrent Neural Network for Discourse-Driven Language Models", "abstract": "This paper presents a novel latent variable recurrent neural network architecture for jointly modeling sequences of words and (possibly latent) discourse relations between adjacent sentences. A recurrent neural network generates individual words, thus reaping the benefits of discriminatively-trained vector representations. The discourse relations are represented with a latent variable, which can be predicted or marginalized, depending on the task. The resulting model can therefore employ a training objective that includes not only discourse relation classification, but also word prediction. As a result, it outperforms state-ofthe-art alternatives for two tasks: implicit discourse relation classification in the Penn Discourse Treebank, and dialog act classification in the Switchboard corpus. Furthermore, by marginalizing over latent discourse relations at test time, we obtain a discourse informed language model, which improves over a strong LSTM baseline.", "phrases": ["latent variable", "recurrent neural network", "discourse relation classification", "network model"], "overall_score": 2.125995923819218, "scores": [1.1224241420061694, 0.9231452040814246, 0.5730311173445233, 0.5216602643105386], "rank_score": 0.785065181935664} -{"id": "wang-goutte-2017-detecting", "title": "Detecting Changes in Twitter Streams using Temporal Clusters of Hashtags", "abstract": "Detecting events from social media data has important applications in public security, political issues, and public health. Many studies have focused on detecting specific or unspecific events from Twitter streams. However, not much attention has been paid to detecting changes, and their impact, in online conversations related to an event. We propose methods for detecting such changes, using clustering of temporal profiles of hashtags, and three change point detection algorithms. The methods were tested on two Twitter datasets: one covering the 2014 Ottawa shooting event, and one covering the Sochi winter Olympics. We compare our approach to a baseline consisting of detecting change from raw counts in the conversation. We show that our method produces large gains in change detection accuracy on both datasets.", "phrases": ["hashtag", "temporal profile", "twitter dataset"], "overall_score": 1.0881790654992518, "scores": [0.8998044626848946, 0.8711751011214672, 0.583886248286074], "rank_score": 0.7849552706974786} -{"id": "zhu-etal-2020-convlab", "title": "ConvLab-2: An Open-Source Toolkit for Building, Evaluating, and Diagnosing Dialogue Systems", "abstract": "We present ConvLab-2, an open-source toolkit that enables researchers to build task-oriented dialogue systems with state-of-the-art models, perform an end-to-end evaluation, and diagnose the weakness of systems. As the successor of ConvLab, ConvLab-2 inherits ConvLab's framework but integrates more powerful dialogue models and supports more datasets. Besides, we have developed an analysis tool and an interactive tool to assist researchers in diagnosing dialogue systems. The analysis tool presents rich statistics and summarizes common mistakes from simulated dialogues, which facilitates error analysis and system improvement. The interactive tool provides an user interface that allows developers to diagnose an assembled dialogue system by interacting with the system and modifying the output of each system component.", "phrases": ["open-source toolkit", "dialogue system", "convlab-2"], "overall_score": 1.724610434634044, "scores": [0.8984995421773274, 0.8372425759784269, 0.6189699842250448], "rank_score": 0.784904034126933} -{"id": "zhao-etal-2012-novel", "title": "A Novel Burst-based Text Representation Model for Scalable Event Detection", "abstract": "Mining retrospective events from text streams has been an important research topic. Classic text representation model (i.e., vector space model) cannot model temporal aspects of documents. To address it, we proposed a novel burst-based text representation model, denoted as BurstVSM. BurstVSM corresponds dimensions to bursty features instead of terms, which can capture semantic and temporal information. Meanwhile, it significantly reduces the number of non-zero entries in the representation. We test it via scalable event detection, and experiments in a 10-year news archive show that our methods are both effective and efficient.", "phrases": ["text representation model", "scalable event detection", "research topic"], "overall_score": 1.406201707894934, "scores": [0.9155330936099083, 0.9118417202786547, 0.5270735348634639], "rank_score": 0.7848161162506756} -{"id": "lacerra-etal-2021-genesis", "title": "GeneSis: A Generative Approach to Substitutes in Context", "abstract": "The lexical substitution task aims at generating a list of suitable replacements for a target word in context, ideally keeping the meaning of the modified text unchanged. While its usage has increased in recent years, the paucity of annotated data prevents the finetuning of neural models on the task, hindering the full fruition of recently introduced powerful architectures such as language models. Furthermore, lexical substitution is usually evaluated in a framework that is strictly bound to a limited vocabulary, making it impossible to credit appropriate, but out-of-vocabulary, substitutes. To assess these issues, we proposed GeneSis (Generating Substitutes in contexts), the first generative approach to lexical substitution. Thanks to a seq2seq model, we generate substitutes for a word according to the context it appears in, attaining state-of-the-art results on different benchmarks. Moreover, our approach allows silver data to be produced for further improving the performances of lexical substitution systems. Along with an extensive analysis of GeneSis results, we also present a human evaluation of the generated substitutes in order to assess their quality. We release the fine-tuned models, the generated datasets, and the code to reproduce the experiments at .", "phrases": ["generative approach", "substitute", "genesis"], "overall_score": 0.8621581722887872, "scores": [0.8982781775246079, 0.8886547024890944, 0.5673776839087576], "rank_score": 0.7847701879741532} -{"id": "gao-johnson-2008-comparison", "title": "A comparison of Bayesian estimators for unsupervised Hidden Markov Model POS taggers", "abstract": "There is growing interest in applying Bayesian techniques to NLP problems. There are a number of different estimators for Bayesian models, and it is useful to know what kinds of tasks each does well on. This paper compares a variety of different Bayesian estimators for Hidden Markov Model POS taggers with various numbers of hidden states on data sets of different sizes. Recent papers have given contradictory results when comparing Bayesian estimators to Expectation Maximization (EM) for unsupervised HMM POS tagging, and we show that the difference in reported results is largely due to differences in the size of the training data and the number of states in the HMM. We invesigate a variety of samplers for HMMs, including some that these earlier papers did not study. We find that all of Gibbs samplers do well with small data sets and few states, and that Variational Bayes does well on large data sets and is competitive with the Gibbs samplers. In terms of times of convergence, we find that Variational Bayes was the fastest of all the estimators, especially on large data sets, and that explicit Gibbs sampler (both pointwise and sentence-blocked) were generally faster than their collapsed counterparts on large data sets.", "phrases": ["bayesian estimator", "hmm", "pos induction"], "overall_score": 1.8811835443424674, "scores": [0.9023099640802142, 0.8754468451731182, 0.5757866073619042], "rank_score": 0.7845144722050789} -{"id": "el-kholy-habash-2011-automatic", "title": "Automatic Error Analysis for Morphologically Rich Languages", "abstract": "This paper presents AMEANA, an opensource tool for error analysis for natural language processing tasks targeting morphologically rich languages. Unlike standard evaluation metrics such as BLEU or WER, AMEANA automatically provides a detailed error analysis that can help researchers and developers better understand the strengths and weaknesses of their systems. AMEANA is easily adaptable to any language provided the existence of a morphological analyzer. In this paper, we focus on usability in the context of Machine Translation (MT) and demonstrate it specifically for English-to-Arabic MT.", "phrases": ["error analysis", "rich language", "morphological analyzer"], "overall_score": 1.526427688761781, "scores": [0.9420146110856977, 0.8827320235700276, 0.5285393423501819], "rank_score": 0.784428659001969} -{"id": "marasovic-etal-2022-shot", "title": "Few-Shot Self-Rationalization with Natural Language Prompts", "abstract": "Self-rationalization models that predict task labels and generate free-text elaborations for their predictions could enable more intuitive interaction with NLP systems. These models are, however, currently trained with a large amount of human-written free-text explanations for each task which hinders their broader usage. We propose to study a more realistic setting of self-rationalization using few training examples. We present FEB\u2014a standardized collection of four existing English-language datasets and associated metrics. We identify the right prompting approach by extensively exploring natural language prompts on FEB. Then, by using this prompt and scaling the model size, we demonstrate that making progress on few-shot self-rationalization is possible. We show there is still ample room for improvement in this task: the average plausibility of generated explanations assessed by human annotators is at most 51% (with GPT-3), while plausibility of human explanations is 76%. We hope that FEB and our proposed approach will spur the community to take on the few-shot self-rationalization challenge.", "phrases": ["self-rationalization", "natural language prompt", "explanation"], "overall_score": 0.8617050696541979, "scores": [0.9199512767527783, 0.8746143457266422, 0.5585076460679659], "rank_score": 0.7843577561824621} -{"id": "bhatia-etal-2016-morphological", "title": "Morphological Priors for Probabilistic Neural Word Embeddings", "abstract": "Word embeddings allow natural language processing systems to share statistical information across related words. These embeddings are typically based on distributional statistics, making it difficult for them to generalize to rare or unseen words. We propose to improve word embeddings by incorporating morphological information, capturing shared sub-word features. Unlike previous work that constructs word embeddings directly from morphemes, we combine morphological and distributional information in a unified probabilistic framework, in which the word embedding is a latent variable. The morphological information provides a prior distribution on the latent word embeddings, which in turn condition a likelihood function over an observed corpus. This approach yields improvements on intrinsic word similarity evaluations, and also in the downstream task of part-of-speech tagging.", "phrases": ["word embedding", "morphological information", "probabilistic framework"], "overall_score": 1.7233867034869392, "scores": [1.257562346031244, 0.5531654782711679, 0.5423134459389777], "rank_score": 0.7843470900804631} -{"id": "khaltar-etal-2006-extracting", "title": "Extracting Loanwords from Mongolian Corpora and Producing a Japanese-Mongolian Bilingual Dictionary", "abstract": "This paper proposes methods for extracting loanwords from Cyrillic Mongolian corpora and producing a Japanese-Mongolian bilingual dictionary. We extract loanwords from Mongolian corpora using our own handcrafted rules. To complement the rule-based extraction, we also extract words in Mongolian corpora that are phonetically similar to Japanese Katakana words as loanwords. In addition, we correspond the extracted loanwords to Japanese words and produce a bilingual dictionary. We propose a stemming method for Mongolian to extract loanwords correctly. We verify the effectiveness of our methods experimentally.", "phrases": ["loanword", "mongolian corpora", "japanese-mongolian bilingual dictionary"], "overall_score": 0.8616306998799117, "scores": [0.9502597044913749, 0.8336274284374435, 0.568983052761076], "rank_score": 0.7842900618966314} -{"id": "zarcone-etal-2013-fitting", "title": "Fitting, Not Clashing! A Distributional Semantic Model of Logical Metonymy", "abstract": "Logical metonymy interpretation (e.g. begin the book \u2192 writing) has received wide attention in linguistics. Experimental results have shown higher processing costs for metonymic conditions compared with non-metonymic ones (read the book). According to a widely held interpretation, it is the type clash between the event-selecting verb and the entity-denoting object (begin the book) that triggers coercion mechanisms and leads to additional processing effort. We propose an alternative explanation and argue that the extra processing effort is an effect of thematic fit. This is a more economical hypothesis that does not need to postulate a separate type clash mechanism: entitydenoting objects simply have a low fit as objects of event-selecting verbs. We test linguistic datasets from psycholinguistic experiments and find that a structured distributional model of thematic fit, which does not encode any explicit argument type information, is able to replicate all significant experimental findings. This result provides evidence for a graded account of coercion phenomena in which thematic fit accounts for both the trigger of the coercion and the retrieval of the covert event.", "phrases": ["logical metonymy", "object", "thematic fit"], "overall_score": 1.0871422619496571, "scores": [0.9492899637058891, 0.8773233086903116, 0.5260088526871701], "rank_score": 0.7842073750277904} -{"id": "baker-sato-2003-framenet", "title": "The FrameNet Data and Software", "abstract": "The FrameNet project has developed a lexical knowledge base providing a unique level of detail as to the the possible syntactic realizations of the specific semantic roles evoked by each predicator, for roughly 7,000 lexical units, on the basis of annotating more than 100,000 example sentences extracted from corpora. An interim version of the FrameNet data was released in October, 2002 and is being widely used. A new, more portable version of the FrameNet software is also being made available to researchers elsewhere, including the Spanish FrameNet project.This demo and poster will briefly explain the principles of Frame Semantics and demonstrate the new unified tools for lexicon building and annotation and also FrameSQL, a search tool for finding patterns in annotated sentences. We will discuss the content and format of the data releases and how the software and data can be used by other NLP researchers.", "phrases": ["framenet data", "software", "lexical unit"], "overall_score": 1.2618948375809018, "scores": [0.9561728427063482, 0.8584534866343738, 0.5375517096476028], "rank_score": 0.7840593463294416} -{"id": "oraby-etal-2017-serious", "title": "Are you serious?: Rhetorical Questions and Sarcasm in Social Media Dialog", "abstract": "Effective models of social dialog must understand a broad range of rhetorical and figurative devices. Rhetorical questions (RQs) are a type of figurative language whose aim is to achieve a pragmatic goal, such as structuring an argument, being persuasive, emphasizing a point, or being ironic. While there are computational models for other forms of figurative language, rhetorical questions have received little attention to date. We expand a small dataset from previous work, presenting a corpus of 10,270 RQs from debate forums and Twitter that represent different discourse functions. We show that we can clearly distinguish between RQs and sincere questions (0.76 F1). We then show that RQs can be used both sarcastically and non-sarcastically, observing that non-sarcastic (other) uses of RQs are frequently argumentative in forums, and persuasive in tweets. We present experiments to distinguish between these uses of RQs using SVM and LSTM models that represent linguistic features and post-level context, achieving results as high as 0.76 F1 for \u201csarcastic\u201d and 0.77 F1 for \u201cother\u201d in forums, and 0.83 F1 for both \u201csarcastic\u201d and \u201cother\u201d in tweets. We supplement our quantitative experiments with an in-depth characterization of the linguistic variation in RQs.", "phrases": ["rhetorical question", "sarcasm", "debate forum", "twitter"], "overall_score": 1.404792736979102, "scores": [0.856705499121886, 1.2073706584607284, 0.5417960608053446, 0.5302468000522333], "rank_score": 0.7840297546100481} -{"id": "huck-etal-2017-target", "title": "Target-side Word Segmentation Strategies for Neural Machine Translation", "abstract": "For ef\ufb01ciency considerations, state-of-the-art neural machine translation (NMT) requires the vocabulary to be restricted to a limited-size set of several thousand symbols. This is highly problematic when translating into in\ufb02ected or compounding languages. A typical remedy is the use of subword units, where words are segmented into smaller components. Byte pair encoding, a purely corpus-based approach, has proved effective recently. In this paper, we investigate word segmentation strategies that incorporate more linguistic knowledge. We demonstrate that linguistically informed target word segmentation is better suited for NMT, leading to improved translation quality on the order of magnitude of +0 . 5 B LEU and \u2212 0 . 9 T ER for a medium-scale English \u2192 German translation task. Our work is important in that it shows that linguistic knowledge can be used to improve NMT results over results based only on the language-agnostic byte pair encoding vocabulary reduction technique.", "phrases": ["segmentation", "neural machine translation", "linguistic knowledge", "bpe"], "overall_score": 2.0104715587367803, "scores": [0.9280767600790153, 1.0853177578209894, 0.567771579130674, 0.5541341035572838], "rank_score": 0.7838250501469906} -{"id": "erk-2009-supporting", "title": "Supporting inferences in semantic space: representing words as regions", "abstract": "Semantic space models represent the meaning of a word as a vector in high-dimensional space. They offer a framework in which the meaning representation of a word can be computed from its context, but the question remains how they support inferences. While there has been some work on paraphrase-based inferences in semantic space, it is not clear how semantic space models would support inferences involving hyponymy, like horse ran \u2192 animal moved. In this paper, we first discuss what a point in semantic space stands for, contrasting semantic space with Gardenforsian conceptual space. Building on this, we propose an extension of the semantic space representation from a point to a region. We present a model for learning a region representation for word meaning in semantic space, based on the fact that points at close distance tend to represent similar meanings. We show that this model can be used to predict, with high precision, when a hyponymy-based inference rule is applicable. Moving beyond paraphrase-based and hyponymy-based inference rules, we last discuss in what way semantic space models can support inferences.", "phrases": ["semantic space", "region", "hyponymy"], "overall_score": 1.2614908225174706, "scores": [0.9450732521436437, 0.8702865800049138, 0.5360651208207731], "rank_score": 0.7838083176564435} -{"id": "batchelor-2019-universal", "title": "Universal dependencies for Scottish Gaelic: syntax", "abstract": "We present universal dependencies for Scottish Gaelic and a treebank of 1021 sentences (20 021 tokens) drawn from the Annotated Reference Corpus Of Scottish Gaelic (ARCOSG). The tokens are annotated for coarse part-of-speech, finegrained part-of-speech, syntactic features and dependency relations. We discuss how the annotations differ from the treebanks developed for two other Celtic languages, Irish and Breton, and in preliminary dependency parsing experiments we obtain a mean labelled attachment score of 0.792. We also discuss some difficult cases for future investigation, including cosubordination. The treebank is available, along with documentation, from https:// universaldependencies.org/.", "phrases": ["scottish gaelic", "celtic language", "universal dependency"], "overall_score": 0.8610776886288034, "scores": [0.9519964750379774, 0.8543970351482574, 0.5449665579030853], "rank_score": 0.7837866893631067} -{"id": "zenkel-etal-2020-end", "title": "End-to-End Neural Word Alignment Outperforms GIZA++", "abstract": "Word alignment was once a core unsupervised learning task in natural language processing because of its essential role in training statistical machine translation (MT) models. Although unnecessary for training neural MT models, word alignment still plays an important role in interactive applications of neural machine translation, such as annotation transfer and lexicon injection. While statistical MT methods have been replaced by neural approaches with superior performance, the twenty-year-old GIZA++ toolkit remains a key component of state-of-the-art word alignment systems. Prior work on neural word alignment has only been able to outperform GIZA++ by using its output during training. We present the first end-to-end neural word alignment method that consistently outperforms GIZA++ on three data sets. Our approach repurposes a Transformer model trained for supervised translation to also serve as an unsupervised word alignment model in a manner that is tightly integrated and does not affect translation quality.", "phrases": ["word alignment", "giza++", "end-to-end", "loss function"], "overall_score": 1.879316407722592, "scores": [1.1878058736988892, 0.8680197009433893, 0.5561389451283676, 0.5229787432508591], "rank_score": 0.7837358157553764} -{"id": "li-etal-2019-cnm", "title": "CNM: An Interpretable Complex-valued Network for Matching", "abstract": "This paper seeks to model human language by the mathematical framework of quantum physics. With the well-designed mathematical formulations in quantum physics, this framework unifies different linguistic units in a single complex-valued vector space, e.g. words as particles in quantum states and sentences as mixed systems. A complex-valued network is built to implement this framework for semantic matching. With well-constrained complex-valued components, the network admits interpretations to explicit physical meanings. The proposed complex-valued network for matching (CNM) achieves comparable performances to strong CNN and RNN baselines on two benchmarking question answering (QA) datasets.", "phrases": ["complex-valued network", "unit", "cnm"], "overall_score": 1.0863610604224627, "scores": [0.9618178892394444, 0.8630545497017051, 0.5260591327883943], "rank_score": 0.7836438572431813} -{"id": "scarton-etal-2016-word", "title": "Word embeddings and discourse information for Quality Estimation", "abstract": "In this paper we present the results of the University of Sheffield (SHEF) submissions for the WMT16 shared task on document-level Quality Estimation (Task 3). Our submission explore discourse and document-aware information and word embeddings as features, with Support Vector Regression and Gaussian Process used to train the Quality Estimation models. The use of word embeddings (combined with baseline features) and a Gaussian Process model with two kernels led to the winning submission in the shared task.", "phrases": ["quality estimation", "wmt16", "word embedding"], "overall_score": 1.0862528785086354, "scores": [0.9432794052126193, 0.8651413391616782, 0.542276717089357], "rank_score": 0.7835658204878849} -{"id": "severyn-etal-2015-distributional", "title": "Distributional Neural Networks for Automatic Resolution of Crossword Puzzles", "abstract": "Automatic resolution of Crossword Puzzles (CPs) heavily depends on the quality of the answer candidate lists produced by a retrieval system for each clue of the puzzle grid. Previous work has shown that such lists can be generated using Information Retrieval (IR) search algorithms applied to the databases containing previously solved CPs and reranked with tree kernels (TKs) applied to a syntactic tree representation of the clues. In this paper, we create a labelled dataset of 2 million clues on which we apply an innovative Distributional Neural Network (DNN) for reranking clue pairs. Our DNN is computationally efficient and can thus take advantage of such large datasets showing a large improvement over the TK approach, when the latter uses small training data. In contrast, when data is scarce, TKs outperform DNNs.", "phrases": ["automatic resolution", "crossword puzzles", "database", "distributional neural network"], "overall_score": 1.0860798501271405, "scores": [0.898925724908781, 0.7869833800386422, 0.8311810079394265, 0.6166739146888583], "rank_score": 0.783441006893927} -{"id": "amiri-etal-2016-learning", "title": "Learning Text Pair Similarity with Context-sensitive Autoencoders", "abstract": "We present a pairwise context-sensitive Autoencoder for computing text pair similarity. Our model encodes input text into context-sensitive representations and uses them to compute similarity between text pairs. Our model outperforms the state-of-the-art models in two semantic retrieval tasks and a contextual word similarity task. For retrieval, our unsupervised approach that merely ranks inputs with respect to the cosine similarity between their hidden representations shows comparable performance with the state-of-the-art supervised models and in some cases outperforms them.", "phrases": ["text pair similarity", "autoencoder", "input text"], "overall_score": 0.8606513200784708, "scores": [0.9029940130826549, 0.8961850669015392, 0.5510166959665875], "rank_score": 0.7833985919835939} -{"id": "zhang-etal-2017-macro", "title": "Macro Grammars and Holistic Triggering for Efficient Semantic Parsing", "abstract": "To learn a semantic parser from denotations, a learning algorithm must search over a combinatorially large space of logical forms for ones consistent with the annotated denotations. We propose a new online learning algorithm that searches faster as training progresses. The two key ideas are using macro grammars to cache the abstract patterns of useful logical forms found thus far, and holistic triggering to efficiently retrieve the most relevant patterns based on sentence similarity. On the WikiTableQuestions dataset, we first expand the search space of an existing model to improve the state-of-the-art accuracy from 38.7% to 42.7%, and then use macro grammars and holistic triggering to achieve an 11x speedup and an accuracy of 43.7%.", "phrases": ["holistic triggering", "semantic parser", "search space"], "overall_score": 1.4035817281693908, "scores": [0.8407169227100657, 0.986421824810652, 0.5229228856527858], "rank_score": 0.7833538777245012} -{"id": "hessel-etal-2019-unsupervised", "title": "Unsupervised Discovery of Multimodal Links in Multi-image, Multi-sentence Documents", "abstract": "Images and text co-occur constantly on the web, but explicit links between images and sentences (or other intra-document textual units) are often not present. We present algorithms that discover image-sentence relationships without relying on explicit multimodal annotation in training. We experiment on seven datasets of varying difficulty, ranging from documents consisting of groups of images captioned post hoc by crowdworkers to naturally-occurring user-generated multimodal documents. We find that a structured training objective based on identifying whether collections of images and sentences co-occur in documents can suffice to predict links between specific sentences and specific images within the same document at test time.", "phrases": ["image", "image-sentence relationship", "multimodal document"], "overall_score": 1.720995228305252, "scores": [1.2305282289387445, 0.5700511227502706, 0.54919669677256], "rank_score": 0.783258682820525} -{"id": "wu-etal-2021-text", "title": "A Text-Centered Shared-Private Framework via Cross-Modal Prediction for Multimodal Sentiment Analysis", "abstract": "Multimodal fusion is a core problem for multimodal sentiment analysis. Previous works usually treat all three modal features equally and implicitly explore the interactions between different modalities. In this paper, we break this kind of methods in two ways. Firstly, we observe that textual modality plays the most important role in multimodal sentiment analysis, and this can be seen from the previous works. Secondly, we observe that comparing to the textual modality, the other two kinds of nontextual modalities (visual and acoustic) can provide two kinds of semantics, shared and private semantics. The shared semantics from the other two modalities can obviously enhance the textual semantics and make the sentiment analysis model more robust, and the private semantics can be complementary to the textual semantics and meanwhile provide different views to improve the performance of sentiment analysis together with the shared semantics. Motivated by these two observations, we propose a text-centered shared-private framework (TCSP) for multimodal fusion, which consists of the cross-modal prediction and sentiment regression parts. Experiments on the MOSEI and MOSI datasets demonstrate the effectiveness of our shared-private framework, which outperforms all baselines. Furthermore, our approach provides a new way to utilize the unlabeled data for multimodal sentiment analysis.", "phrases": ["text-centered shared-private framework", "cross-modal prediction", "multimodal sentiment analysis"], "overall_score": 1.2605728119898654, "scores": [0.9414924991665627, 0.8404268928739158, 0.5677943848955678], "rank_score": 0.7832379256453489} -{"id": "ma-etal-2011-consistent", "title": "Consistent Translation using Discriminative Learning - A Translation Memory-inspired Approach", "abstract": "We present a discriminative learning method to improve the consistency of translations in phrase-based Statistical Machine Translation (SMT) systems. Our method is inspired by Translation Memory (TM) systems which are widely used by human translators in industrial settings. We constrain the translation of an input sentence using the most similar 'translation example' retrieved from the TM. Differently from previous research which used simple fuzzy match thresholds, these constraints are imposed using discriminative learning to optimise the translation performance. We observe that using this method can benefit the SMT system by not only producing consistent translations, but also improved translation outputs. We report a 0.9 point improvement in terms of BLEU score on English--Chinese technical documents.", "phrases": ["discriminative learning", "input sentence", "smt system", "consistent translation"], "overall_score": 2.0669222204540865, "scores": [0.9070109882855176, 0.8556468282477518, 0.8332146292051286, 0.5369465305820866], "rank_score": 0.7832047440801212} -{"id": "rozovskaya-roth-2019-grammar", "title": "Grammar Error Correction in Morphologically Rich Languages: The Case of Russian", "abstract": "Until now, most of the research in grammar error correction focused on English, and the problem has hardly been explored for other languages. We address the task of correcting writing mistakes in morphologically rich languages, with a focus on Russian. We present a corrected and error-tagged corpus of Russian learner writing and develop models that make use of existing state-of-the-art methods that have been well studied for English. Although impressive results have recently been achieved for grammar error correction of non-native English writing, these results are limited to domains where plentiful training data are available. Because annotation is extremely costly, these approaches are not suitable for the majority of domains and languages. We thus focus on methods that use \u201cminimal supervision\u201d; that is, those that do not rely on large amounts of annotated training data, and show how existing minimal-supervision approaches extend to a highly inflectional language such as Russian. The results demonstrate that these methods are particularly useful for correcting mistakes in grammatical phenomena that involve rich morphology.", "phrases": ["other language", "russian learner writing", "grammar error correction"], "overall_score": 1.403242186661127, "scores": [0.9304406891601983, 0.8730219011136295, 0.546030537727924], "rank_score": 0.7831643760005839} -{"id": "leong-mihalcea-2011-going", "title": "Going Beyond Text: A Hybrid Image-Text Approach for Measuring Word Relatedness", "abstract": "Traditional approaches to semantic relatedness are often restricted to text-based methods, which typically disregard other multimodal knowledge sources. In this paper, we propose a novel image-based metric to estimate the relatedness of words, and demonstrate the promise of this method through comparative evaluations on three standard datasets. We also show that a hybrid image-text approach can lead to improvements in word relatedness, confirming the applicability of visual cues as a possible orthogonal information source.", "phrases": ["hybrid image-text approach", "word relatedness", "image"], "overall_score": 1.5233875973566589, "scores": [0.9250598300611846, 0.8705829929763463, 0.5529562601671418], "rank_score": 0.7828663610682242} -{"id": "roark-bacchiani-2003-supervised", "title": "Supervised and unsupervised PCFG adaptation to novel domains", "abstract": "This paper investigates adapting a lexicalized probabilistic context-free grammar (PCFG) to a novel domain, using maximum a posteriori (MAP) estimation. The MAP framework is general enough to include some previous model adaptation approaches, such as corpus mixing in Gildea (2001), for example. Other approaches falling within this framework are more effective. In contrast to the results in Gildea (2001), we show F-measure parsing accuracy gains of as much as 2.5% for high accuracy lexicalized parsing through the use of out-of-domain treebanks, with the largest gains when the amount of indomain data is small. MAP adaptation can also be based on either supervised or unsupervised adaptation data. Even when no in-domain treebank is available, unsupervised techniques provide a substantial accuracy gain over unadapted grammars, as much as nearly 5% F-measure improvement.", "phrases": ["pcfg adaptation", "novel domain", "posteriori", "treebank"], "overall_score": 1.8024653061183211, "scores": [0.8729234965707634, 0.8497674029026215, 0.8741757009172639, 0.5343363446863209], "rank_score": 0.7828007362692424} -{"id": "wang-cohen-2015-joint", "title": "Joint Information Extraction and Reasoning: A Scalable Statistical Relational Learning Approach", "abstract": "A standard pipeline for statistical relational learning involves two steps: one first constructs the knowledge base (KB) from text, and then performs the learning and reasoning tasks using probabilistic first-order logics. However, a key issue is that information extraction (IE) errors from text affect the quality of the KB, and propagate to the reasoning task. In this paper, we propose a statistical relational learning model for joint information extraction and reasoning. More specifically, we incorporate context-based entity extraction with structure learning (SL) in a scalable probabilistic logic framework. We then propose a latent context invention (LCI) approach to improve the performance. In experiments, we show that our approach outperforms state-of-the-art baselines over three real-world Wikipedia datasets from multiple domains; that joint learning and inference for IE and SL significantly improve both tasks; that latent context invention further improves the results.", "phrases": ["reasoning", "structure learning", "joint information extraction"], "overall_score": 0.8598362331015592, "scores": [0.9355853102815617, 0.8406364828110651, 0.5717482104396621], "rank_score": 0.7826566678440963} -{"id": "ye-etal-2021-crossfit", "title": "CrossFit: A Few-shot Learning Challenge for Cross-task Generalization in NLP", "abstract": "Humans can learn a new language task efficiently with only few examples, by leveraging their knowledge obtained when learning prior tasks. In this paper, we explore whether and how such cross-task generalization ability can be acquired, and further applied to build better few-shot learners across diverse NLP tasks. We introduce CrossFit, a problem setup for studying cross-task generalization ability, which standardizes seen/unseen task partitions, data access during different learning stages, and the evaluation protocols. To instantiate different seen/unseen task partitions in CrossFit and facilitate in-depth analysis, we present the NLP Few-shot Gym, a repository of 160 diverse few-shot NLP tasks created from open-access NLP datasets and converted to a unified text-to-text format. Our analysis reveals that the few-shot learning ability on unseen tasks can be improved via an upstream learning stage using a set of seen tasks. We also observe that the selection of upstream learning tasks can significantly influence few-shot performance on unseen tasks, asking further analysis on task similarity and transferability.", "phrases": ["cross-task generalization", "problem setup", "crossfit"], "overall_score": 1.6273065692854405, "scores": [0.9575337519375129, 0.8385619018719727, 0.5516114637046522], "rank_score": 0.7825690391713792} -{"id": "wang-etal-2013-lattice", "title": "A Lattice-based Framework for Joint Chinese Word Segmentation, POS Tagging and Parsing", "abstract": "For the cascaded task of Chinese word segmentation, POS tagging and parsing, the pipeline approach suffers from error propagation while the joint learning approach suffers from inefficient decoding due to the large combined search space. In this paper, we present a novel lattice-based framework in which a Chinese sentence is first segmented into a word lattice, and then a lattice-based POS tagger and a lattice-based parser are used to process the lattice from two different viewpoints: sequential POS tagging and hierarchical tree building. A strategy is designed to exploit the complementary strengths of the tagger and parser, and encourage them to predict agreed structures. Experimental results on Chinese Treebank show that our lattice-based framework significantly improves the accuracy of the three sub-tasks.", "phrases": ["pos tagging", "chinese sentence", "word lattice"], "overall_score": 1.2593830021750831, "scores": [0.8824619457654228, 0.9098275909904056, 0.5552064289700003], "rank_score": 0.7824986552419428} -{"id": "rutherford-xue-2014-discovering", "title": "Discovering Implicit Discourse Relations Through Brown Cluster Pair Representation and Coreference Patterns", "abstract": "Sentences form coherent relations in a discourse without discourse connectives more frequently than with connectives. Senses of these implicit discourse relations that hold between a sentence pair, however, are challenging to infer. Here, we employ Brown cluster pairs to represent discourse relation and incorporate coreference patterns to identify senses of implicit discourse relations in naturally occurring text. Our system improves the baseline performance by as much as 25%. Feature analyses suggest that Brown cluster pairs and coreference patterns can reveal many key linguistic characteristics of each type of discourse relation.", "phrases": ["discourse relation", "coreference pattern", "linguistically-informed feature"], "overall_score": 2.5485732148611997, "scores": [1.227957323274894, 0.5826556459072194, 0.5360699959661671], "rank_score": 0.7822276550494268} -{"id": "nomoto-2004-multi", "title": "Multi-Engine Machine Translation with Voted Language Model", "abstract": "The paper describes a particular approach to multiengine machine translation (MEMT), where we make use of voted language models to selectively combine translation outputs from multiple off-the-shelf MT systems. Experiments are done using large corpora from three distinct domains. The study found that the use of voted language models leads to an improved performance of MEMT systems.", "phrases": ["machine translation", "hypothesis", "such setup"], "overall_score": 1.8753744621309425, "scores": [0.9574024328728117, 0.8604578594267096, 0.5284153979757589], "rank_score": 0.7820918967584266} -{"id": "dou-knight-2012-large", "title": "Large Scale Decipherment for Out-of-Domain Machine Translation", "abstract": "We apply slice sampling to Bayesian decipherment and use our new decipherment framework to improve out-of-domain machine translation. Compared with the state of the art algorithm, our approach is highly scalable and produces better results, which allows us to decipher ciphertext with billions of tokens and hundreds of thousands of word types with high accuracy. We decipher a large amount of monolingual data to improve out-of-domain translation and achieve significant gains of up to 3.8 BLEU points.", "phrases": ["decipherment", "monolingual data", "phrase table", "non parallel data"], "overall_score": 2.215619565155173, "scores": [1.1179280185091829, 0.9119475835353922, 0.5586356392919534, 0.5395547333678616], "rank_score": 0.7820164936760976} -{"id": "kim-etal-2019-effective", "title": "Effective Cross-lingual Transfer of Neural Machine Translation Models without Shared Vocabularies", "abstract": "Transfer learning or multilingual model is essential for low-resource neural machine translation (NMT), but the applicability is limited to cognate languages by sharing their vocabularies. This paper shows effective techniques to transfer a pretrained NMT model to a new, unrelated language without shared vocabularies. We relieve the vocabulary mismatch by using cross-lingual word embedding, train a more language-agnostic encoder by injecting artificial noises, and generate synthetic data easily from the pretraining data without back-translation. Our methods do not require restructuring the vocabulary or retraining the model. We improve plain NMT transfer by up to +5.1% BLEU in five low-resource translation tasks, outperforming multilingual joint training by a large margin. We also provide extensive ablation studies on pretrained embedding, synthetic data, vocabulary size, and parameter freezing for a better understanding of NMT transfer.", "phrases": ["cross-lingual transfer", "vocabulary mismatch", "source language"], "overall_score": 1.9430408126814773, "scores": [0.9454639248118251, 0.8722252422934811, 0.5281222695302484], "rank_score": 0.7819371455451849} -{"id": "castilho-2021-towards", "title": "Towards Document-Level Human MT Evaluation: On the Issues of Annotator Agreement, Effort and Misevaluation", "abstract": "Document-level human evaluation of machine translation (MT) has been raising interest in the community. However, little is known about the issues of using document-level methodologies to assess MT quality. In this article, we compare the inter-annotator agreement (IAA) scores, the effort to assess the quality in different document-level methodologies, and the issue of misevaluation when sentences are evaluated out of context.", "phrases": ["misevaluation", "inter-annotator agreement", "single sentence"], "overall_score": 1.2582529784503886, "scores": [0.8013095525285139, 0.9936412588827686, 0.5504387846634445], "rank_score": 0.781796532024909} -{"id": "gao-etal-2019-rebuttal", "title": "Does My Rebuttal Matter? Insights from a Major NLP Conference", "abstract": "Peer review is a core element of the scientific process, particularly in conference-centered fields such as ML and NLP. However, only few studies have evaluated its properties empirically. Aiming to fill this gap, we present a corpus that contains over 4k reviews and 1.2k author responses from ACL-2018. We quantitatively and qualitatively assess the corpus. This includes a pilot study on paper weaknesses given by reviewers and on quality of author responses. We then focus on the role of the rebuttal phase, and propose a novel task to predict after-rebuttal (i.e., final) scores from initial reviews and author responses. Although author responses do have a marginal (and statistically significant) influence on the final scores, especially for borderline papers, our results suggest that a reviewer's final score is largely determined by her initial score and the distance to the other reviewers' initial scores. In this context, we discuss the conformity bias inherent to peer reviewing, a bias that has largely been overlooked in previous research. We hope our analyses will help better assess the usefulness of the rebuttal phase in NLP conferences.", "phrases": ["nlp conference", "influence", "conformity bias"], "overall_score": 1.5212125394840426, "scores": [0.9153159430659191, 0.8760447649237985, 0.5538850993090679], "rank_score": 0.7817486024329284} -{"id": "schwartz-etal-2014-machine", "title": "Machine Translation and Monolingual Postediting: The AFRL WMT-14 System", "abstract": "This paper describes the AFRL statistical MT system and the improvements that were developed during the WMT14 evaluation campaign. As part of these efforts we experimented with a number of extensions to the standard phrase-based model that improve performance on Russian to English and Hindi to English translation tasks. In addition, we describe our efforts to make use of monolingual English speakers to correct the output of machine translation, and present the results of monolingual postediting of the entire 3003 sentences of the WMT14 Russian-English test set.", "phrases": ["monolingual postediting", "machine translation", "post-editor"], "overall_score": 1.0836800237436102, "scores": [0.9364565152893811, 0.8689213327206442, 0.5397518462379858], "rank_score": 0.7817098980826703} -{"id": "dunietz-etal-2017-corpus", "title": "The BECauSE Corpus 2.0: Annotating Causality and Overlapping Relations", "abstract": "Language of cause and effect captures an essential component of the semantics of a text. However, causal language is also intertwined with other semantic relations, such as temporal precedence and correlation. This makes it difficult to determine when causation is the primary intended meaning. This paper presents BECauSE 2.0, a new version of the BECauSE corpus with exhaustively annotated expressions of causal language, but also seven semantic relations that are frequently co-present with causation. The new corpus shows high inter-annotator agreement, and yields insights both about the linguistic expressions of causation and about the process of annotating co-present semantic relations.", "phrases": ["because corpus", "causal language", "new version"], "overall_score": 1.7174677196333528, "scores": [0.8868176609435836, 0.8574316560143849, 0.600710416355463], "rank_score": 0.7816532444378105} -{"id": "forbes-etal-2020-social", "title": "Social Chemistry 101: Learning to Reason about Social and Moral Norms", "abstract": "Social norms\u2014the unspoken commonsense rules about acceptable social behavior\u2014are crucial in understanding the underlying causes and intents of people's actions in narratives. For example, underlying an action such as \u201cwanting to call cops on my neighbor\u201d are social norms that inform our conduct, such as \u201cIt is expected that you report crimes.\u201d We present SOCIAL CHEMISTRY, a new conceptual formalism to study people's everyday social norms and moral judgments over a rich spectrum of real life situations described in natural language. We introduce SOCIAL-CHEM-101, a large-scale corpus that catalogs 292k rules-of-thumb such as \u201cIt is rude to run a blender at 5am\u201d as the basic conceptual units. Each rule-of-thumb is further broken down with 12 different dimensions of people's judgments, including social judgments of good and bad, moral foundations, expected cultural pressure, and assumed legality, which together amount to over 4.5 million annotations of categorical labels and free-text descriptions. Comprehensive empirical results based on state-of-the-art neural models demonstrate that computational modeling of social norms is a promising research direction. Our model framework, Neural Norm Transformer, learns and generalizes SOCIAL-CHEM-101 to successfully reason about previously unseen situations, generating relevant (and potentially novel) attribute-aware social rules-of-thumb.", "phrases": ["action", "narrative", "social norm", "different dimension", "social chemistry"], "overall_score": 2.0048232295276684, "scores": [0.9195303616572378, 1.3590545409933992, 0.5645333257749153, 0.5339574880485012, 0.5310389285491709], "rank_score": 0.7816229290046449} -{"id": "subramanya-etal-2010-efficient", "title": "Efficient Graph-Based Semi-Supervised Learning of Structured Tagging Models", "abstract": "We describe a new scalable algorithm for semi-supervised training of conditional random fields (CRF) and its application to part-of-speech (POS) tagging. The algorithm uses a similarity graph to encourage similar n-grams to have similar POS tags. We demonstrate the efficacy of our approach on a domain adaptation task, where we assume that we have access to large amounts of unlabeled data from the target domain, but no additional labeled data. The similarity graph is used during training to smooth the state posteriors on the target domain. Standard inference can be used at test time. Our approach is able to scale to very large problems and yields significantly improved target domain accuracy.", "phrases": ["n-gram", "pos tagging", "label propagation"], "overall_score": 2.2136519293063577, "scores": [0.8901651266977206, 0.878946093408454, 0.574854793554992], "rank_score": 0.7813220045537222} -{"id": "chen-etal-2016-implicit", "title": "Implicit Discourse Relation Detection via a Deep Architecture with Gated Relevance Network", "abstract": "Word pairs, which are one of the most easily accessible features between two text segments, have been proven to be very useful for detecting the discourse relations held between text segments. However, because of the data sparsity problem, the performance achieved by using word pair features is limited. In this paper, in order to overcome the data sparsity problem, we propose the use of word embeddings to replace the original words. Moreover, we adopt a gated relevance network to capture the semantic interaction between word pairs, and then aggregate those semantic interactions using a pooling layer to select the most informative interactions. Experimental results on Penn Discourse Tree Bank show that the proposed method without using manually designed features can achieve better performance on recognizing the discourse level relations in all of the relations.", "phrases": ["gated relevance network", "word embedding", "semantic interaction", "discourse argument"], "overall_score": 2.0617489622665968, "scores": [0.903406004183237, 1.0741825952228254, 0.591955770727259, 0.5554335363787347], "rank_score": 0.781244476628014} -{"id": "muller-etal-2021-first", "title": "First Align, then Predict: Understanding the Cross-Lingual Ability of Multilingual BERT", "abstract": "Multilingual pretrained language models have demonstrated remarkable zero-shot cross-lingual transfer capabilities. Such transfer emerges by fine-tuning on a task of interest in one language and evaluating on a distinct language, not seen during the fine-tuning. Despite promising results, we still lack a proper understanding of the source of this transfer. Using a novel layer ablation technique and analyses of the model's internal representations, we show that multilingual BERT, a popular multilingual language model, can be viewed as the stacking of two sub-networks: a multilingual encoder followed by a task-specific language-agnostic predictor. While the encoder is crucial for cross-lingual transfer and remains mostly unchanged during fine-tuning, the task predictor has little importance on the transfer and can be reinitialized during fine-tuning. We present extensive experiments with three distinct tasks, seventeen typologically diverse languages and multiple domains to support our hypothesis.", "phrases": ["cross-lingual ability", "bert", "language model", "fine-tuning"], "overall_score": 1.5202242781031927, "scores": [0.9460553253943844, 0.783215904861493, 0.8600722990837016, 0.5356194168503479], "rank_score": 0.7812407365474816} -{"id": "pareti-etal-2013-automatically", "title": "Automatically Detecting and Attributing Indirect Quotations", "abstract": "Direct quotations are used for opinion min- ing and information extraction as they have an easy to extract span and they can be attributed to a speaker with high accuracy. However, simply focusing on direct quotations ignores around half of all reported speech, which is in the form of indirect or mixed speech. This work presents the first large-scale experiments in indirect and mixed quotation extraction and attribution. We propose two methods of ex- tracting all quote types from news articles and evaluate them on two large annotated corpora, one of which is a contribution of this work. We further show that direct quotation attribu- tion methods can be successfully applied to in- direct and mixed quotation attribution.", "phrases": ["attribution", "indirect quotation", "information extraction", "news article"], "overall_score": 2.115307992357302, "scores": [0.9311892512095261, 1.1310507530955942, 0.5360438023883723, 0.5261900180477851], "rank_score": 0.7811184561853195} -{"id": "feng-etal-2011-learning", "title": "Learning General Connotation of Words using Graph-based Algorithms", "abstract": "In this paper, we introduce a connotation lexicon, a new type of lexicon that lists words with connotative polarity, i.e., words with positive connotation (e.g., award, promotion) and words with negative connotation (e.g., cancer, war). Connotation lexicons differ from much studied sentiment lexicons: the latter concerns words that express sentiment, while the former concerns words that evoke or associate with a specific polarity of sentiment. Understanding the connotation of words would seem to require common sense and world knowledge. However, we demonstrate that much of the connotative polarity of words can be inferred from natural language text in a nearly unsupervised manner. The key linguistic insight behind our approach is selectional preference of connotative predicates. We present graph-based algorithms using PageRank and HITS that collectively learn connotation lexicon together with connotative predicates. Our empirical study demonstrates that the resulting connotation lexicon is of great value for sentiment analysis complementing existing sentiment lexicons.", "phrases": ["graph-based algorithm", "connotation lexicon", "pagerank", "sentiment analysis"], "overall_score": 1.6242675075553663, "scores": [0.857629628377584, 1.2010302426723274, 0.5372148701527125, 0.5285554964336484], "rank_score": 0.7811075594090681} -{"id": "lin-etal-2020-mintl", "title": "MinTL: Minimalist Transfer Learning for Task-Oriented Dialogue Systems", "abstract": "In this paper, we propose Minimalist Transfer Learning (MinTL) to simplify the system design process of task-oriented dialogue systems and alleviate the over-dependency on annotated data. MinTL is a simple yet effective transfer learning framework, which allows us to plug-and-play pre-trained seq2seq models, and jointly learn dialogue state tracking and dialogue response generation. Unlike previous approaches, which use a copy mechanism to \u201ccarryover\u201d the old dialogue states to the new one, we introduce Levenshtein belief spans (Lev), that allows efficient dialogue state tracking with a minimal generation length. We instantiate our learning framework with two pre-trained backbones: T5 and BART, and evaluate them on MultiWOZ. Extensive experiments demonstrate that: 1) our systems establish new state-of-the-art results on end-to-end response generation, 2) MinTL-based systems are more robust than baseline methods in the low resource setting, and they achieve competitive results with only 20% training data, and 3) Lev greatly improves the inference efficiency.", "phrases": ["minimalist transfer learning", "task-oriented dialogue system", "mintl"], "overall_score": 1.399366563283259, "scores": [0.9044905174594384, 0.8146888519699809, 0.623824678797237], "rank_score": 0.7810013494088853} -{"id": "mirkin-meunier-2015-personalized", "title": "Personalized Machine Translation: Predicting Translational Preferences", "abstract": "Machine Translation (MT) has advanced in recent years to produce better translations for clients\u2019 specific domains, and sophisticated tools allow professional translators to obtain translations according to their prior edits. We suggest that MT should be further personalized to the end-user level \u2010 the receiver or the author of the text \u2010 as done in other applications. As a step in that direction, we propose a method based on a recommender systems approach where the user\u2019s preferred translation is predicted based on preferences of similar users. In our experiments, this method outperforms a set of non-personalized methods, suggesting that user preference information can be employed to provide better-suited translations for each user.", "phrases": ["recommender system approach", "preferred translation", "similar user"], "overall_score": 1.2569247577504896, "scores": [0.930760086338226, 0.858098135832852, 0.5540555641386911], "rank_score": 0.7809712621032564} -{"id": "glavas-ponzetto-2017-dual", "title": "Dual Tensor Model for Detecting Asymmetric Lexico-Semantic Relations", "abstract": "Detection of lexico-semantic relations is one of the central tasks of computational semantics. Although some fundamental relations (e.g., hypernymy) are asymmetric, most existing models account for asymmetry only implicitly and use the same concept representations to support detection of symmetric and asymmetric relations alike. In this work, we propose the Dual Tensor model, a neural architecture with which we explicitly model the asymmetry and capture the translation between unspecialized and specialized word embeddings via a pair of tensors. Although our Dual Tensor model needs only unspecialized embeddings as input, our experiments on hypernymy and meronymy detection suggest that it can outperform more complex and resource-intensive models. We further demonstrate that the model can account for polysemy and that it exhibits stable performance across languages.", "phrases": ["lexico-semantic relation", "hypernymy", "dual tensor model"], "overall_score": 1.0825889452339048, "scores": [0.9084339316760811, 0.8741397627169765, 0.5601948595221949], "rank_score": 0.7809228513050841} -{"id": "kovatchev-etal-2018-etpc", "title": "ETPC - A Paraphrase Identification Corpus Annotated with Extended Paraphrase Typology and Negation", "abstract": "We present the Extended Paraphrase Typology (EPT) and the Extended Typology Paraphrase Corpus (ETPC). The EPT typology addresses several practical limitations of existing paraphrase typologies: it is the \ufb01rst typology that copes with the non-paraphrase pairs in the paraphrase identi\ufb01cation corpora and distinguishes between contextual and habitual paraphrase types. ETPC is the largest corpus to date annotated with atomic paraphrase types. It is the \ufb01rst corpus with detailed annotation of both the paraphrase and the non-paraphrase pairs and the \ufb01rst corpus annotated with paraphrase and negation. Both new resources contribute to better understanding the paraphrase phenomenon, and allow for studying the relationship between paraphrasing and negation. To the developers of Paraphrase Identi\ufb01cation systems ETPC corpus offers better means for evaluation and error analysis. Furthermore, the EPT typology and ETPC corpus emphasize the relationship with other areas of NLP such as Semantic Similarity, Textual Entailment, Summarization and Simpli\ufb01cation.", "phrases": ["extended paraphrase typology", "negation", "etpc corpus"], "overall_score": 1.519369126673665, "scores": [0.9394317999974057, 0.8516397685948429, 0.5513322583438686], "rank_score": 0.7808012756453723} -{"id": "long-etal-2016-simpler", "title": "Simpler Context-Dependent Logical Forms via Model Projections", "abstract": "We consider the task of learning a context-dependent mapping from utterances to denotations. With only denotations at training time, we must search over a combinatorially large space of logical forms, which is even larger with context-dependent utterances. To cope with this challenge, we perform successive projections of the full model onto simpler models that operate over equivalence classes of logical forms. Though less expressive, we find that these simpler models are much faster and can be surprisingly effective. Moreover, they can be used to bootstrap the full model. Finally, we collected three new context-dependent semantic parsing datasets, and develop a new left-to-right parser.", "phrases": ["logical form", "scone", "instruction", "deterministic domain", "paragraph"], "overall_score": 2.060553474318777, "scores": [1.102370024854108, 1.0468816983869993, 0.6208864947719565, 0.5737848230853896, 0.5600343515573172], "rank_score": 0.780791478531154} -{"id": "hodosh-hockenmaier-2016-focused", "title": "Focused Evaluation for Image Description with Binary Forced-Choice Tasks", "abstract": "Current evaluation metrics for image description may be too coarse. We therefore propose a series of binary forced-choice tasks that each focus on a different aspect of the captions. We evaluate a number of different off-the-shelf image description systems. Our results indicate strengths and shortcomings of both generation and ranking based approaches.", "phrases": ["image description", "binary forced-choice task", "caption"], "overall_score": 1.2557833118234047, "scores": [0.9149079181853186, 0.8804143846999545, 0.5454638227332704], "rank_score": 0.7802620418728479} -{"id": "madnani-etal-2013-automated", "title": "Automated Scoring of a Summary-Writing Task Designed to Measure Reading Comprehension", "abstract": "We introduce a cognitive framework for measuring reading comprehension that includes the use of novel summary writing tasks. We derive NLP features from the holistic rubric used to score the summaries written by students for such tasks and use them to design a preliminary, automated scoring system. Our results show that the automated approach performs well on summaries written by students for two different passages.", "phrases": ["scoring", "reading comprehension", "student"], "overall_score": 1.5182369484430684, "scores": [0.9086872739774018, 0.8916795380517872, 0.540291541359015], "rank_score": 0.7802194511294015} -{"id": "yin-schutze-2015-multichannel", "title": "Multichannel Variable-Size Convolution for Sentence Classification", "abstract": "We propose MVCNN, a convolution neural network (CNN) architecture for sentence classification. It (i) combines diverse versions of pretrained word embeddings and (ii) extracts features of multigranular phrases with variable-size convolution filters. We also show that pretraining MVCNN is critical for good performance. MVCNN achieves state-of-the-art performance on four tasks: on small-scale binary, small-scale multi-class and largescale Twitter sentiment prediction and on subjectivity classification.", "phrases": ["sentence classification", "cnn", "deep neural network"], "overall_score": 2.1128549380473136, "scores": [0.908814193016811, 0.8382091024527338, 0.5936145596049537], "rank_score": 0.7802126183581661} -{"id": "kozareva-2015-everyone", "title": "Everyone Likes Shopping! Multi-class Product Categorization for e-Commerce", "abstract": "Online shopping caters the needs of millions of users on a daily basis. To build an accurate system that can retrieve relevant products for a query like \u201cMB252 with travel bags\u201d one requires product and query categorization mechanisms, which classify the text as Home&Garden>Kitchen&Dining>Kitchen Appliances>Blenders. One of the biggest challenges in e-Commerce is that providers like Amazon, e-Bay, Google, Yahoo! and Walmart organize products into different product taxonomies making it hard and time-consuming for sellers to categorize goods for each shopping platform. To address this challenge, we propose an automatic product categorization mechanism, which for a given product title assigns the correct product category from a taxonomy. We conducted an empirical evaluation on445,408 product titles and used a rich product taxonomy of 319 categories organized into 6 levels. We compared performance against multiple algorithms and found that the best performing system reaches.88 f-score.", "phrases": ["shopping", "e-commerce", "product taxonomy"], "overall_score": 1.2556468337263522, "scores": [0.8914925328858838, 0.8699539752761496, 0.579085221628007], "rank_score": 0.7801772432633468} -{"id": "zhou-etal-2019-improving", "title": "Improving Robustness of Neural Machine Translation with Multi-task Learning", "abstract": "While neural machine translation (NMT) achieves remarkable performance on clean, in-domain text, performance is known to degrade drastically when facing text which is full of typos, grammatical errors and other varieties of noise. In this work, we propose a multi-task learning algorithm for transformer-based MT systems that is more resilient to this noise. We describe our submission to the WMT 2019 Robustness shared task based on this method. Our model achieves a BLEU score of 32.8 on the shared task French to English dataset, which is 7.1 BLEU points higher than the baseline vanilla transformer trained with clean text.", "phrases": ["neural machine translation", "multi-task learning", "noisy input"], "overall_score": 1.3976718945376239, "scores": [0.9281713916088873, 0.890498437030824, 0.5214967816806746], "rank_score": 0.7800555367734621} -{"id": "tsuruoka-tsujii-2005-bidirectional", "title": "Bidirectional Inference with the Easiest-First Strategy for Tagging Sequence Data", "abstract": "This paper presents a bidirectional inference algorithm for sequence labeling problems such as part-of-speech tagging, named entity recognition and text chunking. The algorithm can enumerate all possible decomposition structures and find the highest probability sequence together with the corresponding decomposition structure in polynomial time. We also present an efficient decoding algorithm based on the easiest-first strategy, which gives comparably good performance to full bidirectional inference with significantly lower computational cost. Experimental results of part-of-speech tagging and text chunking show that the proposed bidirectional inference methods consistently outperform unidirectional inference methods and bidirectional MEMMs give comparable performance to that achieved by state-of-the-art learning algorithms including kernel support vector machines.", "phrases": ["easiest-first strategy", "bidirectional inference", "pos tagger"], "overall_score": 1.7135681533718825, "scores": [0.9249227475830728, 0.8471760978665596, 0.5675365805967679], "rank_score": 0.7798784753488001} -{"id": "xu-etal-2015-semantic", "title": "Semantic Relation Classification via Convolutional Neural Networks with Simple Negative Sampling", "abstract": "Syntactic features play an essential role in identifying relationship in a sentence. Previous neural network models directly work on raw word sequences or constituent parse trees, thus often suffer from irrelevant information introduced when subjects and objects are in a long distance. In this paper, we propose to learn more robust relation representations from shortest dependency paths through a convolution neural network. We further take the relation directionality into account and propose a straightforward negative sampling strategy to improve the assignment of subjects and objects. Experimental results show that our method outperforms the state-of-theart approaches on the SemEval-2010 Task 8 dataset.", "phrases": ["convolutional neural network", "semantic relation classification", "cnns"], "overall_score": 2.4103209698597228, "scores": [0.9022750117666269, 0.897157402340847, 0.5398958282862738], "rank_score": 0.7797760807979159} -{"id": "hashimoto-etal-2016-word", "title": "Word Embeddings as Metric Recovery in Semantic Spaces", "abstract": "Continuous word representations have been remarkably useful across NLP tasks but remain poorly understood. We ground word embeddings in semantic spaces studied in the cognitive-psychometric literature, taking these spaces as the primary objects to recover. To this end, we relate log co-occurrences of words in large corpora to semantic similarity assessments and show that co-occurrences are indeed consistent with an Euclidean semantic space hypothesis. Framing word embedding as metric recovery of a semantic space unifies existing word embedding algorithms, ties them to manifold learning, and demonstrates that existing algorithms are consistent metric recovery methods given co-occurrence counts from random walks. Furthermore, we propose a simple, principled, direct metric recovery algorithm that performs on par with the state-of-the-art word embedding and manifold learning methods. Finally, we complement recent focus on analogies by constructing two new inductive reasoning datasets\u2014series completion and classification\u2014and demonstrate that word embeddings can be used to solve them as well.", "phrases": ["metric recovery", "semantic space", "co-occurrence count"], "overall_score": 1.2548226924352608, "scores": [0.8358814188585133, 0.9763080033877723, 0.5268061042182505], "rank_score": 0.7796651754881787} -{"id": "escolano-etal-2021-multilingual", "title": "Multilingual Machine Translation: Closing the Gap between Shared and Language-specific Encoder-Decoders", "abstract": "State-of-the-art multilingual machine translation relies on a universal encoder-decoder, which requires retraining the entire system to add new languages. In this paper, we propose an alternative approach that is based on language-specific encoder-decoders, and can thus be more easily extended to new languages by learning their corresponding modules. So as to encourage a common interlingua representation, we simultaneously train the N initial languages. Our experiments show that the proposed approach outperforms the universal encoder-decoder by 3.28 BLEU points on average, while allowing to add new languages without the need to retrain the rest of the modules. All in all, our work closes the gap between shared and language-specific encoderdecoders, advancing toward modular multilingual machine translation systems that can be flexibly extended in lifelong learning settings.", "phrases": ["gap", "language-specific encoder-decoder", "mnmt model"], "overall_score": 1.7129665835223464, "scores": [0.9701139819970747, 0.821409735673069, 0.547290349664351], "rank_score": 0.7796046891114982} -{"id": "fei-etal-2022-cqg", "title": "CQG: A Simple and Effective Controlled Generation Framework for Multi-hop Question Generation", "abstract": "Multi-hop question generation focuses on generating complex questions that require reasoning over multiple pieces of information of the input passage. Current models with state-of-the-art performance have been able to generate the correct questions corresponding to the answers. However, most models can not ensure the complexity of generated questions, so they may generate shallow questions that can be answered without multi-hop reasoning. To address this challenge, we propose the CQG, which is a simple and effective controlled framework. CQG employs a simple method to generate the multi-hop questions that contain key entities in multi-hop reasoning chains, which ensure the complexity and quality of the questions. In addition, we introduce a novel controlled Transformer-based decoder to guarantee that key entities appear in the questions. Experiment results show that our model greatly improves performance, which also outperforms the state-of-the-art model about 25% by 5 BLEU points on HotpotQA.", "phrases": ["multi-hop question generation", "complex question", "cqg"], "overall_score": 1.0807027921821886, "scores": [0.9880628006625823, 0.8011718503285299, 0.5494521874430373], "rank_score": 0.7795622794780498} -{"id": "tran-etal-2015-joint", "title": "Joint Graphical Models for Date Selection in Timeline Summarization", "abstract": "Automatic timeline summarization (TLS) generates precise, dated overviews over (often prolonged) events, such as wars or economic crises. One subtask of TLS selects the most important dates for an event within a certain time frame. Date selection has up to now been handled via supervised machine learning approaches that estimate the importance of each date separately, using features such as the frequency of date mentions in news corpora. This approach neglects interactions between different dates that occur due to connections between subevents. We therefore suggest a joint graphical model for date selection. Even unsupervised versions of this model perform as well as supervised state-of-theart approaches. With parameter tuning on training data, it outperforms prior supervised models by a considerable margin.", "phrases": ["graphical model", "date selection", "timeline summarization"], "overall_score": 0.8563874908435087, "scores": [0.911431278482753, 0.875492119786306, 0.5516290638058202], "rank_score": 0.7795174873582931} -{"id": "carpuat-etal-2017-detecting", "title": "Detecting Cross-Lingual Semantic Divergence for Neural Machine Translation", "abstract": "Parallel corpora are often not as parallel as one might assume: non-literal translations and noisy translations abound, even in curated corpora routinely used for training and evaluation. We use a cross-lingual textual entailment system to distinguish sentence pairs that are parallel in meaning from those that are not, and show that filtering out divergent examples from training improves translation quality.", "phrases": ["semantic divergence", "neural machine translation", "translation quality"], "overall_score": 2.0568493957283263, "scores": [0.9036112560672815, 0.8768045047948841, 0.5577479909971272], "rank_score": 0.779387917286431} -{"id": "yang-etal-2018-breaking", "title": "Breaking the Beam Search Curse: A Study of (Re-)Scoring Methods and Stopping Criteria for Neural Machine Translation", "abstract": "Beam search is widely used in neural machine translation, and usually improves translation quality compared to greedy search. It has been widely observed that, however, beam sizes larger than 5 hurt translation quality. We explain why this happens, and propose several methods to address this problem. Furthermore, we discuss the optimal stopping criteria for these methods. Results show that our hyperparameter-free methods outperform the widely-used hyperparameter-free heuristic of length normalization by +2.0 BLEU, and achieve the best results among all methods on Chinese-to-English translation.", "phrases": ["beam search", "neural machine translation", "reward", "guarantee", "variation"], "overall_score": 1.7945362078857776, "scores": [0.8596128623127267, 1.0821928722530363, 0.8942368220319992, 0.5311712185114718, 0.529572088192666], "rank_score": 0.77935717266038} -{"id": "mueller-etal-2020-cross", "title": "Cross-Linguistic Syntactic Evaluation of Word Prediction Models", "abstract": "A range of studies have concluded that neural word prediction models can distinguish grammatical from ungrammatical sentences with high accuracy. However, these studies are based primarily on monolingual evidence from English. To investigate how these models' ability to learn syntax varies by language, we introduce CLAMS (Cross-Linguistic Assessment of Models on Syntax), a syntactic evaluation suite for monolingual and multilingual models. CLAMS includes subject-verb agreement challenge sets for English, French, German, Hebrew and Russian, generated from grammars we develop. We use CLAMS to evaluate LSTM language models as well as monolingual and multilingual BERT. Across languages, monolingual LSTMs achieved high accuracy on dependencies without attractors, and generally poor accuracy on agreement across object relative clauses. On other constructions, agreement accuracy was generally higher in languages with richer morphology. Multilingual models generally underperformed monolingual models. Multilingual BERT showed high syntactic accuracy on English, but noticeable deficiencies in other languages.", "phrases": ["syntactic evaluation", "hebrew", "multilingual bert"], "overall_score": 1.7939780432802874, "scores": [0.9374256363529947, 0.8462976843564984, 0.5536209738471726], "rank_score": 0.7791147648522219} -{"id": "wang-etal-2016-memory", "title": "Memory-enhanced Decoder for Neural Machine Translation", "abstract": "We propose to enhance the RNN decoder in a neural machine translator (NMT) with external memory, as a natural but powerful extension to the state in the decoding RNN. This memory-enhanced RNN decoder is called \\textsc{MemDec}. At each time during decoding, \\textsc{MemDec} will read from this memory and write to this memory once, both with content-based addressing. Unlike the unbounded memory in previous work\\cite{RNNsearch} to store the representation of source sentence, the memory in \\textsc{MemDec} is a matrix with pre-determined size designed to better capture the information important for the decoding process at each time step. Our empirical study on Chinese-English translation shows that it can improve by $4.8$ BLEU upon Groundhog and $5.3$ BLEU upon on Moses, yielding the best performance achieved with the same training set.", "phrases": ["neural machine translation", "memory", "good performance"], "overall_score": 1.3958089816343113, "scores": [0.9432194781075858, 0.866502277399143, 0.5273257203506232], "rank_score": 0.779015825285784} -{"id": "ravi-2013-scalable", "title": "Scalable Decipherment for Machine Translation via Hash Sampling", "abstract": "In this paper, we propose a new Bayesian inference method to train statistical machine translation systems using only nonparallel corpora. Following a probabilistic decipherment approach, we first introduce a new framework for decipherment training that is flexible enough to incorporate any number/type of features (besides simple bag-of-words) as side-information used for estimating translation models. In order to perform fast, efficient Bayesian inference in this framework, we then derive a hash sampling strategy that is inspired by the work of Ahmed et al. (2012). The new translation hash sampler enables us to scale elegantly to complex models (for the first time) and large vocabulary/corpora sizes. We show empirical results on the OPUS data\u2014our method yields the best BLEU scores compared to existing approaches, while achieving significant computational speedups (several orders faster). We also report for the first time\u2014BLEU score results for a largescale MT task using only non-parallel data (EMEA corpus).", "phrases": ["decipherment", "hash sampling", "translation model"], "overall_score": 1.711622072412242, "scores": [0.8801608564820722, 0.8713668791723848, 0.5854505915504584], "rank_score": 0.7789927757349718} -{"id": "finkel-manning-2008-enforcing", "title": "Enforcing Transitivity in Coreference Resolution", "abstract": "A desirable quality of a coreference resolution system is the ability to handle transitivity constraints, such that even if it places high likelihood on a particular mention being coreferent with each of two other mentions, it will also consider the likelihood of those two mentions being coreferent when making a final assignment. This is exactly the kind of constraint that integer linear programming (ILP) is ideal for, but, surprisingly, previous work applying ILP to coreference resolution has not encoded this type of constraint. We train a coreference classifier over pairs of mentions, and show how to encode this type of constraint on top of the probabilities output from our pairwise classifier to extract the most probable legal entity assignments. We present results on two commonly used datasets which show that enforcement of transitive closure consistently improves performance, including improvements of up to 3.6% using the b3 scorer, and up to 16.5% using cluster f-measure.", "phrases": ["transitivity", "resolution", "mention-pair model"], "overall_score": 1.793538898792855, "scores": [1.2682392603554227, 0.5406591603735537, 0.5278737197447388], "rank_score": 0.7789240468245717} -{"id": "jans-etal-2012-skip", "title": "Skip N-grams and Ranking Functions for Predicting Script Events", "abstract": "In this paper, we extend current state-of-the-art research on unsupervised acquisition of scripts, that is, stereotypical and frequently observed sequences of events. We design, evaluate and compare different methods for constructing models for script event prediction: given a partial chain of events in a script, predict other events that are likely to belong to the script. Our work aims to answer key questions about how best to (1) identify representative event chains from a source text, (2) gather statistics from the event chains, and (3) choose ranking functions for predicting new script events. We make several contributions, introducing skip-grams for collecting event statistics, designing improved methods for ranking event predictions, defining a more reliable evaluation metric for measuring predictiveness, and providing a systematic analysis of the various event prediction models.", "phrases": ["script event", "event prediction", "skip-gram"], "overall_score": 2.506734767954721, "scores": [0.8227881830332145, 0.9696991926510071, 0.5437954488239537], "rank_score": 0.7787609415027251} -{"id": "bui-etal-2009-extracting", "title": "Extracting Decisions from Multi-Party Dialogue Using Directed Graphical Models and Semantic Similarity", "abstract": "We use directed graphical models (DGMs) to automatically detect decision discussions in multi-party dialogue. Our approach distinguishes between different dialogue act (DA) types based on their role in the formulation of a decision. DGMs enable us to model dependencies, including sequential ones. We summarize decisions by extracting suitable phrases from DAs that concern the issue under discussion and its resolution. Here we use a semantic-similarity metric to improve results on both manual and ASR transcripts.", "phrases": ["multi-party dialogue", "directed graphical models", "discourse relation"], "overall_score": 1.9347986811244868, "scores": [0.9113810810111116, 0.8719671417777154, 0.552512580621499], "rank_score": 0.7786202678034421} -{"id": "wu-etal-2018-hard", "title": "Hard Non-Monotonic Attention for Character-Level Transduction", "abstract": "Character-level string-to-string transduction is an important component of various NLP tasks. The goal is to map an input string to an output string, where the strings may be of different lengths and have characters taken from different alphabets. Recent approaches have used sequence-to-sequence models with an attention mechanism to learn which parts of the input string the model should focus on during the generation of the output string. Both soft attention and hard monotonic attention have been used, but hard non-monotonic attention has only been used in other sequence modeling tasks and has required a stochastic approximation to compute the gradient. In this work, we introduce an exact, polynomial-time algorithm for marginalizing over the exponential number of non-monotonic alignments between two strings, showing that hard attention models can be viewed as neural reparameterizations of the classical IBM Model 1. We compare soft and hard non-monotonic attention experimentally and find that the exact algorithm significantly improves performance over the stochastic approximation and outperforms soft attention.", "phrases": ["transduction", "character", "hard non-monotonic attention"], "overall_score": 1.710671053131989, "scores": [0.959612918818459, 0.8096626596983787, 0.5664042661068307], "rank_score": 0.7785599482078895} -{"id": "qian-etal-2015-transition", "title": "A Transition-based Model for Joint Segmentation, POS-tagging and Normalization", "abstract": "We propose a transition-based model for joint word segmentation, POS tagging and text normalization. Different from previous methods, the model can be trained on standard text corpora, overcoming the lack of annotated microblog corpora. To evaluate our model, we develop an annotated corpus based on microblogs. Experimental results show that our joint model can help improve the performance of word segmentation on microblogs, giving an error reduction in segmentation accuracy of 12.02%, compared to the traditional approach.", "phrases": ["transition-based model", "normalization", "pos tagging", "joint model"], "overall_score": 1.618678780831049, "scores": [0.8175139160962047, 0.8155149412255103, 0.9096972155415386, 0.5709537269996106], "rank_score": 0.778419949965716} -{"id": "luo-etal-2018-marrying", "title": "Marrying Up Regular Expressions with Neural Networks: A Case Study for Spoken Language Understanding", "abstract": "The success of many natural language processing (NLP) tasks is bound by the number and quality of annotated data, but there is often a shortage of such training data. In this paper, we ask the question: \u201cCan we combine a neural network (NN) with regular expressions (RE) to improve supervised learning for NLP?\u201d. In answer, we develop novel methods to exploit the rich expressiveness of REs at different levels within a NN, showing that the combination significantly enhances the learning effectiveness when a small number of training examples are available. We evaluate our approach by applying it to spoken language understanding for intent detection and slot filling. Experimental results show that our approach is highly effective in exploiting the available training data, giving a clear boost to the RE-unaware NN.", "phrases": ["regular expression", "spoken language understanding", "different level"], "overall_score": 1.70986655265781, "scores": [0.9129284025814086, 0.8971530150203678, 0.5244999951877358], "rank_score": 0.7781938042631708} -{"id": "cattle-ma-2018-recognizing", "title": "Recognizing Humour using Word Associations and Humour Anchor Extraction", "abstract": "This paper attempts to marry the interpretability of statistical machine learning approaches with the more robust models of joke structure and joke semantics capable of being learned by neural models. Specifically, we explore the use of semantic relatedness features based on word associations, rather than the more common Word2Vec similarity, on a binary humour identification task and identify several factors that make word associations a better fit for humour. We also explore the effects of using joke structure, in the form of humour anchors (Yang et al., 2015), for improving the performance of semantic features and show that, while an intriguing idea, humour anchors contain several pitfalls that can hurt performance.", "phrases": ["humour", "word association", "factor"], "overall_score": 1.709569439865528, "scores": [0.9102300096058451, 0.9035782131209588, 0.5203675244853064], "rank_score": 0.7780585824040368} -{"id": "aroca-ouellette-etal-2021-prost", "title": "PROST: Physical Reasoning about Objects through Space and Time", "abstract": "We present a new probing dataset named PROST: Physical Reasoning about Objects Through Space and Time. This dataset contains 18,736 multiple-choice questions made from 14 manually curated templates, covering 10 physical reasoning concepts. All questions are designed to probe both causal and masked language models in a zero-shot setting. We conduct an extensive analysis which demonstrates that state-of-the-art pretrained models are inadequate at physical reasoning: they are influenced by the order in which answer options are presented to them, they struggle when the superlative in a question is inverted (e.g., most<->least), and increasing the amount of pretraining data and parameters only yields minimal improvements. These results provide support for the hypothesis that current pretrained models' ability to reason about physical interactions is inherently limited by a lack of real world experience. By highlighting these limitations, we hope to motivate the development of models with a human-like understanding of the physical world.", "phrases": ["physical reasoning", "object", "limitation", "prost"], "overall_score": 0.8546816270537968, "scores": [0.9419512172963023, 0.8263128511808692, 0.8059625482887284, 0.5376323561205607], "rank_score": 0.777964743221615} -{"id": "cohen-smith-2007-joint", "title": "Joint Morphological and Syntactic Disambiguation", "abstract": "In morphologically rich languages, should morphological and syntactic disambiguation be treated sequentially or as a single problem? We describe several efficient, probabilisticallyinterpretable ways to apply joint inference to morphological and syntactic disambiguation using lattice parsing. Joint inference is shown to compare favorably to pipeline parsing methods across a variety of component models. State-of-the-art performance on Hebrew Treebank parsing is demonstrated using the new method. The benefits of joint inference are modest with the current component models, but appear to increase as components themselves improve.", "phrases": ["morphology", "syntactic disambiguation", "joint inference", "semitic language"], "overall_score": 1.994801431982371, "scores": [0.9751271601093625, 1.0746558974739555, 0.5368867480788233, 0.5241930676018729], "rank_score": 0.7777157183160036} -{"id": "shapira-etal-2018-evaluating", "title": "Evaluating Multiple System Summary Lengths: A Case Study", "abstract": "Practical summarization systems are expected to produce summaries of varying lengths, per user needs. While a couple of early summarization benchmarks tested systems across multiple summary lengths, this practice was mostly abandoned due to the assumed cost of producing reference summaries of multiple lengths. In this paper, we raise the research question of whether reference summaries of a single length can be used to reliably evaluate system summaries of multiple lengths. For that, we have analyzed a couple of datasets as a case study, using several variants of the ROUGE metric that are standard in summarization evaluation. Our findings indicate that the evaluation protocol in question is indeed competitive. This result paves the way to practically evaluating varying-length summaries with simple, possibly existing, summarization benchmarks.", "phrases": ["length", "case study", "summarization system"], "overall_score": 1.2515772316887703, "scores": [0.8180989955156686, 0.9121135814072545, 0.6027333951200039], "rank_score": 0.7776486573476423} -{"id": "brantley-etal-2019-non", "title": "Non-Monotonic Sequential Text Generation", "abstract": "Standard sequential generation methods assume a pre-specified generation order, such as text generation methods which generate words from left to right. In this work, we propose a framework for training models of text generation that operate in non-monotonic orders; the model directly learns good orders, without any additional annotation. Our framework operates by generating a word at an arbitrary position, and then recursively generating words to its left and then words to its right, yielding a binary tree. Learning is framed as imitation learning, including a coaching method which moves from imitating an oracle to reinforcing the policy's own preferences. Experimental results demonstrate that using the proposed method, it is possible to learn policies which generate text without pre-specifying a generation order while achieving competitive performance with conventional left-to-right generation.", "phrases": ["text generation", "arbitrary position", "policy"], "overall_score": 2.2031648692325834, "scores": [1.2738016205977307, 0.5370300690904477, 0.5220299077498595], "rank_score": 0.777620532479346} -{"id": "liu-etal-2016-jointly", "title": "Jointly Learning Grounded Task Structures from Language Instruction and Visual Demonstration", "abstract": "To enable language-based communication and collaboration with cognitive robots, this paper presents an approach where an agent can learn task models jointly from language instruction and visual demonstration using an And-Or Graph (AoG) representation. The learned AoG captures a hierarchical task structure where linguistic labels (for language communication) are grounded to corresponding state changes from the physical environment (for perception and action). Our empirical results on a cloth-folding domain have shown that, although state detection through visual processing is full of uncertainties and error prone, by a tight integration with language the agent is able to learn an effective AoG for task representation. The learned AoG can be further applied to infer and interpret on-going actions from new visual demonstration using linguistic labels at different levels of granularity.", "phrases": ["language instruction", "visual demonstration", "action"], "overall_score": 1.0778431260006927, "scores": [0.9155199555062195, 0.8662826809335525, 0.5506957626664148], "rank_score": 0.7774994663687288} -{"id": "cer-etal-2010-parsing", "title": "Parsing to Stanford Dependencies: Trade-offs between Speed and Accuracy", "abstract": "We investigate a number of approaches to generating Stanford Dependencies, a widely used semantically-oriented dependency representation. We examine algorithms specifically designed for dependency parsing (Nivre, Nivre Eager, Covington, Eisner, and RelEx) as well as dependencies extracted from constituent parse trees created by phrase structure parsers (Charniak, Charniak-Johnson, Bikel, Berkeley and Stanford). We found that constituent parsers systematically outperform algorithms designed specifically for dependency parsing. The most accurate method for generating dependencies is the Charniak-Johnson reranking parser, with 89% (labeled) attachment F1 score. The fastest methods are Nivre, Nivre Eager, and Covington, used with a linear classifier to make local parsing decisions, which can parse the entire Penn Treebank development set (section 22) in less than 10 seconds on an Intel Xeon E5520. However, this speed comes with a substantial drop in F1 score (about 76% for labeled attachment) compared to competing methods. By tuning how much of the search space is explored by the Charniak-Johnson parser, we are able to arrive at a balanced configuration that is both fast and nearly as good as the most accurate approaches.", "phrases": ["stanford dependencies", "trade-off", "setup"], "overall_score": 2.0516022621028824, "scores": [0.9513501670975485, 0.8334583063385432, 0.5473904967188848], "rank_score": 0.7773996567183254} -{"id": "joseph-etal-2017-constance", "title": "ConStance: Modeling Annotation Contexts to Improve Stance Classification", "abstract": "Manual annotations are a prerequisite for many applications of machine learning. However, weaknesses in the annotation process itself are easy to overlook. In particular, scholars often choose what information to give to annotators without examining these decisions empirically. For subjective tasks such as sentiment analysis, sarcasm, and stance detection, such choices can impact results. Here, for the task of political stance detection on Twitter, we show that providing too little context can result in noisy and uncertain annotations, whereas providing too strong a context may cause it to outweigh other signals. To characterize and reduce these biases, we develop ConStance, a general model for reasoning about annotations across information conditions. Given conflicting labels produced by multiple annotators seeing the same instances with different contexts, ConStance simultaneously estimates gold standard labels and also learns a classifier for new instances. We show that the classifier learned by ConStance outperforms a variety of baselines at predicting political stance, while the model's interpretable parameters shed light on the effects of each context.", "phrases": ["annotator", "stance detection", "constance"], "overall_score": 1.392893259067125, "scores": [0.8257364478676149, 0.9232854018895734, 0.5831437388536972], "rank_score": 0.7773885295369619} -{"id": "marin-etal-2011-detecting", "title": "Detecting Forum Authority Claims in Online Discussions", "abstract": "This paper explores the problem of detecting sentence-level forum authority claims in online discussions. Using a maximum entropy model, we explore a variety of strategies for extracting lexical features in a sparse training scenario, comparing knowledge- and data-driven methods (and combinations). The augmentation of lexical features with parse context is also investigated. We find that certain markup features perform remarkably well alone, but are outperformed by data-driven selection of lexical features augmented with parse context.", "phrases": ["authority claim", "online discussion", "wikipedia discussion"], "overall_score": 1.7898339637123297, "scores": [0.8767786004206297, 0.9018372326794588, 0.5533292087897808], "rank_score": 0.7773150139632898} -{"id": "derczynski-2016-complementarity", "title": "Complementarity, F-score, and NLP Evaluation", "abstract": "This paper addresses the problem of quantifying the differences between entity extraction systems, where in general only a small proportion a document should be selected. Comparing overall accuracy is not very useful in these cases, as small differences in accuracy may correspond to huge differences in selections over the target minority class. Conventionally, one may use per-token complementarity to describe these differences, but it is not very useful when the set is heavily skewed. In such situations, which are common in information retrieval and entity recognition, metrics like precision and recall are typically used to describe performance. However, precision and recall fail to describe the differences between sets of objects selected by different decision strategies, instead just describing the proportional amount of correct and incorrect objects selected. This paper presents a method for measuring complementarity for precision, recall and F-score, quantifying the difference between entity extraction approaches.", "phrases": ["f-score", "recall", "complementarity"], "overall_score": 0.8539449154249164, "scores": [0.894504071323427, 0.8625707948083321, 0.5748076120631284], "rank_score": 0.7772941593982958} -{"id": "hedderich-etal-2020-transfer", "title": "Transfer Learning and Distant Supervision for Multilingual Transformer Models: A Study on African Languages", "abstract": "Multilingual transformer models like mBERT and XLM-RoBERTa have obtained great improvements for many NLP tasks on a variety of languages. However, recent works also showed that results from high-resource languages could not be easily transferred to realistic, low-resource scenarios. In this work, we study trends in performance for different amounts of available resources for the three African languages Hausa, isiXhosa and on both NER and topic classification. We show that in combination with transfer learning or distant supervision, these models can achieve with as little as 10 or 100 labeled sentences the same performance as baselines with much more supervised training data. However, we also find settings where this does not hold. Our discussions and additional experiments on assumptions such as time and hardware restrictions highlight challenges and opportunities in low-resource learning.", "phrases": ["distant supervision", "transfer learning", "few-shot cross-lingual transfer"], "overall_score": 1.251003286993768, "scores": [0.8882592579624977, 0.8322451388016908, 0.6113717396102076], "rank_score": 0.7772920454581321} -{"id": "eshghi-etal-2015-feedback", "title": "Feedback in Conversation as Incremental Semantic Update", "abstract": "In conversation, interlocutors routinely indicate whether something said or done has been processed and integrated. Such feedback includes backchannels such as \u2018okay\u2019 or \u2018mhm\u2019, the production of a next relevant turn, and repair initiation via clarification requests. Importantly, such feedback can be produced not only at sentence/turn boundaries, but also sub-sententially. In this paper, we extend an existing model of incremental semantic processing in dialogue, based around the Dynamic Syntax (DS) grammar framework, to provide a low-level, integrated account of backchannels, clarification requests and their responses; demonstrating that they can be accounted for as part of the core semantic structure-building mechanisms of the grammar, rather than via higher level pragmatic phenomena such as intention recognition, or treatment as an \u201cuno cial\u201d part of the conversation. The end result is an incremental model in which words, not turns, are seen as procedures for contextual update and backchannels serve to align participant semantic processing contexts and thus ease the production and interpretation of subsequent conversational actions. We also show how clarification requests and their following responses and repair can be modelled within the same DS framework, wherein the divergence and re-alignment e ort in participants\u2019 semantic processing drives conversations forward.", "phrases": ["conversation", "mechanism", "incremental semantic grammar"], "overall_score": 1.0775506623752236, "scores": [0.8855490282991768, 0.8403021817332883, 0.6060142853405641], "rank_score": 0.7772884984576764} -{"id": "benajiba-etal-2010-arabic", "title": "Arabic Named Entity Recognition: Using Features Extracted from Noisy Data", "abstract": "Building an accurate Named Entity Recognition (NER) system for languages with complex morphology is a challenging task. In this paper, we present research that explores the feature space using both gold and bootstrapped noisy features to build an improved highly accurate Arabic NER system. We bootstrap noisy features by projection from an Arabic-English parallel corpus that is automatically tagged with a baseline NER system. The feature space covers lexical, morphological, and syntactic features. The proposed approach yields an improvement of up to 1.64 F-measure (absolute).", "phrases": ["arabic ner system", "syntactic feature", "parallel corpora"], "overall_score": 1.3920781070538766, "scores": [0.9414584036207675, 0.8464312029603356, 0.542911147027237], "rank_score": 0.7769335845361134} -{"id": "zhang-etal-2006-word", "title": "Word Segmentation and Named Entity Recognition for SIGHAN Bakeoff3", "abstract": "We have participated in three open tracks of Chinese word segmentation and named entity recognition tasks of SIGHAN Bakeoff3. We take a probabilistic feature based Maximum Entropy (ME) model as our basic frame to combine multiple sources of knowledge. Our named entity recognizer achieved the highest F measure for MSRA, and word segmenter achieved the medium F measure for MSRA. We find effective combining of the external multi-knowledge is crucial to improve performance of word segmentation and named entity recognition.", "phrases": ["entity recognition", "sighan bakeoff3", "word segmentation"], "overall_score": 0.8532767751335413, "scores": [0.9051078843884395, 0.8066800622722632, 0.6182700290278865], "rank_score": 0.7766859918961964} -{"id": "zhou-etal-2011-unsupervised", "title": "Unsupervised Discovery of Discourse Relations for Eliminating Intra-sentence Polarity Ambiguities", "abstract": "Polarity classification of opinionated sentences with both positive and negative sentiments is a key challenge in sentiment analysis. This paper presents a novel unsupervised method for discovering intra-sentence level discourse relations for eliminating polarity ambiguities. Firstly, a discourse scheme with discourse constraints on polarity was defined empirically based on Rhetorical Structure Theory (RST). Then, a small set of cuephrase-based patterns were utilized to collect a large number of discourse instances which were later converted to semantic sequential representations (SSRs). Finally, an unsupervised method was adopted to generate, weigh and filter new SSRs without cue phrases for recognizing discourse relations. Experimental results showed that the proposed methods not only effectively recognized the defined discourse relations but also achieved significant improvement by integrating discourse information in sentence-level polarity classification.", "phrases": ["discourse relation", "polarity ambiguity", "sentiment analysis"], "overall_score": 1.5113250587646612, "scores": [0.9305695980505546, 0.8574969419624112, 0.5419357874301107], "rank_score": 0.7766674424810255} -{"id": "muller-eberstein-etal-2021-genre", "title": "Genre as Weak Supervision for Cross-lingual Dependency Parsing", "abstract": "Recent work has shown that monolingual masked language models learn to represent data-driven notions of language variation which can be used for domain-targeted training data selection. Dataset genre labels are already frequently available, yet remain largely unexplored in cross-lingual setups. We harness this genre metadata as a weak supervision signal for targeted data selection in zero-shot dependency parsing. Specifically, we project treebank-level genre information to the finer-grained sentence level, with the goal to amplify information implicitly stored in unsupervised contextualized representations. We demonstrate that genre is recoverable from multilingual contextual embeddings and that it provides an effective signal for training data selection in cross-lingual, zero-shot scenarios. For 12 low-resource language treebanks, six of which are test-only, our genre-specific methods significantly outperform competitive baselines as well as recent embedding-based methods for data selection. Moreover, genre-based data selection provides new state-of-the-art results for three of these target languages.", "phrases": ["dependency parsing", "cross-lingual setup", "genre"], "overall_score": 1.0766460897354522, "scores": [0.8662898697072691, 0.838674815197072, 0.6249432767764047], "rank_score": 0.7766359872269154} -{"id": "ovrelid-etal-2010-syntactic", "title": "Syntactic Scope Resolution in Uncertainty Analysis", "abstract": "We show how the use of syntactic structure enables the resolution of hedge scope in a hybrid, two-stage approach to uncertainty analysis. In the first stage, a Maximum Entropy classifier, combining surface-oriented and syntactic features, identifies cue words. With a small set of hand-crafted rules operating over dependency representations in stage two, we attain the best overall result (in terms of both combined ranks and average F1) in the 2010 CoNLL Shared Task.", "phrases": ["uncertainty analysis", "syntactic feature", "heuristic rule"], "overall_score": 1.2497595910249566, "scores": [0.888879599556485, 0.8407704463414489, 0.5999078352162824], "rank_score": 0.7765192937047387} -{"id": "auli-lopez-2011-training", "title": "Training a Log-Linear Parser with Loss Functions via Softmax-Margin", "abstract": "Log-linear parsing models are often trained by optimizing likelihood, but we would prefer to optimise for a task-specific metric like F-measure. Softmax-margin is a convex objective for such models that minimises a bound on expected risk for a given loss function, but its naive application requires the loss to decompose over the predicted structure, which is not true of F-measure. We use softmax-margin to optimise a log-linear CCG parser for a variety of loss functions, and demonstrate a novel dynamic programming algorithm that enables us to use it with F-measure, leading to substantial gains in accuracy on CCG-Bank. When we embed our loss-trained parser into a larger model that includes supertagging features incorporated via belief propagation, we obtain further improvements and achieve a labelled/unlabelled dependency F-measure of 89.3%/94.0% on gold part-of-speech tags, and 87.2%/92.8% on automatic part-of-speech tags, the best reported results for this task.", "phrases": ["softmax-margin", "likelihood", "task-specific metric"], "overall_score": 1.706162767881651, "scores": [0.8806291183286424, 0.881390944116087, 0.567504355059418], "rank_score": 0.7765081391680492} -{"id": "wing-baldridge-2014-hierarchical", "title": "Hierarchical Discriminative Classification for Text-Based Geolocation", "abstract": "Text-based document geolocation is commonly rooted in language-based information retrieval techniques over geodesic grids. These methods ignore the natural hierarchy of cells in such grids and fall afoul of independence assumptions. We demonstrate the effectiveness of using logistic regression models on a hierarchy of nodes in the grid, which improves upon the state of the art accuracy by several percent and reduces mean error distances by hundreds of kilometers on data from Twitter, Wikipedia, and Flickr. We also show that logistic regression performs feature selection effectively, assigning high weights to geocentric terms.", "phrases": ["grid", "feature selection", "city", "different level"], "overall_score": 1.991559208154857, "scores": [1.1021194093290247, 0.9117836811001767, 0.5615883661238329, 0.5303152173469149], "rank_score": 0.7764516684749874} -{"id": "kavumba-etal-2019-choosing", "title": "When Choosing Plausible Alternatives, Clever Hans can be Clever", "abstract": "Pretrained language models, such as BERT and RoBERTa, have shown large improvements in the commonsense reasoning benchmark COPA. However, recent work found that many improvements in benchmarks of natural language understanding are not due to models learning the task, but due to their increasing ability to exploit superficial cues, such as tokens that occur more often in the correct answer than the wrong one. Are BERT's and RoBERTa's good performance on COPA also caused by this? We find superficial cues in COPA, as well as evidence that BERT exploits these cues. To remedy this problem, we introduce Balanced COPA, an extension of COPA that does not suffer from easy-to-exploit single token cues. We analyze BERT's and RoBERTa's performance on original and Balanced COPA, finding that BERT relies on superficial cues when they are present, but still achieves comparable performance once they are made ineffective, suggesting that BERT learns the task to a certain degree when forced to. In contrast, RoBERTa does not appear to rely on superficial cues.", "phrases": ["plausible alternatives", "cue", "balanced copa", "fine-tuning", "commonsense knowledge"], "overall_score": 1.7875502395481766, "scores": [0.9168035854199315, 1.2808677711407124, 0.5852766975931429, 0.5526489194333642, 0.5460190522158941], "rank_score": 0.7763232051606089} -{"id": "hovy-2011-invited", "title": "Invited Keynote: What are Subjectivity, Sentiment, and Affect?", "abstract": "Pragmatics \u2014the aspects of text that signal interpersonal and situational information, complementing semantics\u2014 has been almost totally ignored in Natural Language Processing. But in the past five to eight years there has been a surge of research on the general topic of \u2018opinion\u2019, also called \u2018sentiment\u2019. Generally, research focuses on the determining the author\u2019s opinion/sentiment about some topic within a given fragment of text. Since opinions may differ, it is granted that the author\u2019s opinion is \u2018subjective\u2019, and the effectiveness of an opiniondetermination system is measured by comparing against a gold-standard set of human annotations. But what does \u2018subjectivity\u2019 actually mean? What are \u2018opinion\u2019 and \u2018sentiment\u2019? Lately, researchers are also starting to talk about \u2018affect\u2019, and even \u2018emotion\u2019. What are these notions, and how do they differ from one another? Unfortunately, a survey of the research done to date shows a disturbing lack of clarity on these questions. Very few papers bother to define their terms, but simply take a set of valences such as Good\u2013Neutral\u2013Bad to be sufficient. More recent work acknowledges the need to specify what the opinion actually applies to, and attempts also to determine the theme. Lately, several identify the holder of the opinion. Some even try to estimate the strength of the expressed opinion. The trouble is, the same aspect of the same object can be considered Good by one person and Bad by another, and we can often understand both their points of view. There is much more to opinion/sentiment than simply matching words and phrases that attach to the theme, and computing a polarity score. People give reasons why they like or dislike something, and these reasons pertain to their goals and plans in the case of opinions) or their deeper emotional states (in the case of affect). In this talk I outline a model of sentiment/opinion and of affect, and show that they appear in text in a fairly structured way, with various components. I show how proper understanding requires the reader to build some kind of person profile of the author, and claim that for systems to do adequate understanding of sentiments, opinions, and affects, they will need to do so as well. This is not a trivial challenge, and it opens the door to a whole new line of research with many fascinating and practical aspects.", "phrases": ["subjectivity", "affect", "election"], "overall_score": 1.249088038823995, "scores": [0.9054472440856097, 0.8784289817382595, 0.5444298787618337], "rank_score": 0.7761020348619009} -{"id": "kann-etal-2016-neural", "title": "Neural Morphological Analysis: Encoding-Decoding Canonical Segments", "abstract": "Canonical morphological segmentation aims to divide words into a sequence of standardized segments. In this work, we propose a character-based neural encoder-decoder model for this task. Additionally, we extend our model to include morpheme-level and lexical information through a neural reranker. We set the new state of the art for the task improving previous results by up to 21% accuracy. Our experiments cover three languages: English, German and Indonesian.", "phrases": ["canonical segmentation", "encoder-decoder model", "morpheme", "allomorph"], "overall_score": 2.048134634865028, "scores": [1.0992399336224292, 0.9583507006664064, 0.5257345224717805, 0.5210176127291553], "rank_score": 0.7760856923724428} -{"id": "vu-etal-2009-feature", "title": "Feature-Based Method for Document Alignment in Comparable News Corpora", "abstract": "In this paper, we present a feature-based method to align documents with similar content across two sets of bilingual comparable corpora from daily news texts. We evaluate the contribution of each individual feature and investigate the incorporation of these diverse statistical and heuristic features for the task of bilingual document alignment. Experimental results on the English-Chinese and English-Malay comparable news corpora show that our proposed Discrete Fourier Transform-based term frequency distribution feature is very effective. It contributes 4.1% and 8% to performance improvement over Pearson's correlation method on the two comparable corpora. In addition, when more heuristic and statistical features as well as a bilingual dictionary are utilized, our method shows an absolute performance improvement of 23.2% and 15.3% on the two sets of bilingual corpora when comparing with a prior information retrieval-based method.", "phrases": ["document alignment", "feature-based method", "independent unit"], "overall_score": 1.0757655712412069, "scores": [0.948727380190383, 0.8469059611025046, 0.5323691408902702], "rank_score": 0.776000827394386} -{"id": "johansson-nugues-2008-effect", "title": "The Effect of Syntactic Representation on Semantic Role Labeling", "abstract": "Almost all automatic semantic role labeling (SRL) systems rely on a preliminary parsing step that derives a syntactic structure from the sentence being analyzed. This makes the choice of syntactic representation an essential design decision. In this paper, we study the influence of syntactic representation on the performance of SRL systems. Specifically, we compare constituent-based and dependency-based representations for SRL of English in the FrameNet paradigm. \n \nContrary to previous claims, our results demonstrate that the systems based on dependencies perform roughly as well as those based on constituents: For the argument classification task, dependency-based systems perform slightly higher on average, while the opposite holds for the argument identification task. This is remarkable because dependency parsers are still in their infancy while constituent parsing is more mature. Furthermore, the results show that dependency-based semantic role classifiers rely less on lexicalized features, which makes them more robust to domain changes and makes them learn more efficiently with respect to the amount of training data.", "phrases": ["semantic role labeling", "srl", "framenet", "argument identification task"], "overall_score": 1.613405573995131, "scores": [0.9698546283935946, 1.0507351289878109, 0.5564183570631085, 0.5265281796160025], "rank_score": 0.775884073515129} -{"id": "che-etal-2019-hit", "title": "HIT-SCIR at MRP 2019: A Unified Pipeline for Meaning Representation Parsing via Efficient Training and Effective Encoding", "abstract": "This paper describes our system (HIT-SCIR) for CoNLL 2019 shared task: Cross-Framework Meaning Representation Parsing. We extended the basic transition-based parser with two improvements: a) Efficient Training by realizing Stack LSTM parallel training; b) Effective Encoding via adopting deep contextualized word embeddings BERT. Generally, we proposed a unified pipeline to meaning representation parsing, including framework-specific transition-based parsers, BERT-enhanced word representation, and post-processing. In the final evaluation, our system was ranked first according to ALL-F1 (86.2%) and especially ranked first in UCCA framework (81.67%).", "phrases": ["unified pipeline", "meaning representation parsing", "transition-based parser"], "overall_score": 1.5097747356491595, "scores": [0.9406467027555, 0.8427303371003804, 0.5442351621496151], "rank_score": 0.7758707340018317} -{"id": "fukuda-etal-2020-improving", "title": "Improving Speech Recognition for the Elderly: A New Corpus of Elderly Japanese Speech and Investigation of Acoustic Modeling for Speech Recognition", "abstract": "In an aging society like Japan, a highly accurate speech recognition system is needed for use in electronic devices for the elderly, but this level of accuracy cannot be obtained using conventional speech recognition systems due to the unique features of the speech of elderly people. S-JNAS, a corpus of elderly Japanese speech, is widely used for acoustic modeling in Japan, but the average age of its speakers is 67.6 years old. Since average life expectancy in Japan is now 84.2 years, we are constructing a new speech corpus, which currently consists of the utterances of 221 speakers with an average age of 79.2, collected from four regions of Japan. In addition, we expand on our previous study (Fukuda, 2019) by further investigating the construction of acoustic models suitable for elderly speech. We create new acoustic models and train them using a combination of existing Japanese speech corpora (JNAS, S-JNAS, CSJ), with and without our `super-elderly' speech data, and conduct speech recognition experiments. Our new acoustic models achieve word error rates (WER) as low as 13.38%, exceeding the results of our previous study in which we used the CSJ acoustic model adapted for elderly speech (17.4% WER).", "phrases": ["speech recognition", "elderly japanese speech", "adaptation"], "overall_score": 1.0754031288970685, "scores": [0.9032714005926136, 0.9000614272116191, 0.5238853137201805], "rank_score": 0.7757393805081377} -{"id": "branavan-etal-2010-reading", "title": "Reading between the Lines: Learning to Map High-Level Instructions to Commands", "abstract": "In this paper, we address the task of mapping high-level instructions to sequences of commands in an external environment. Processing these instructions is challenging---they posit goals to be achieved without specifying the steps required to complete them. We describe a method that fills in missing information using an automatically derived environment model that encodes states, transitions, and commands that cause these transitions to happen. We present an efficient approximate approach for learning this environment model as part of a policy-gradient reinforcement learning algorithm for text interpretation. This design enables learning for mapping high-level instructions, which previous statistical methods cannot handle.", "phrases": ["high-level instruction", "command", "environment"], "overall_score": 1.2482350886430649, "scores": [0.8765321370320895, 0.8395945789706281, 0.6105894853484325], "rank_score": 0.7755720671170501} -{"id": "cocos-etal-2018-learning", "title": "Learning Scalar Adjective Intensity from Paraphrases", "abstract": "Adjectives like \u201cwarm\u201d, \u201chot\u201d, and \u201cscalding\u201d all describe temperature but differ in intensity. Understanding these differences between adjectives is a necessary part of reasoning about natural language. We propose a new paraphrase-based method to automatically learn the relative intensity relation that holds between a pair of scalar adjectives. Our approach analyzes over 36k adjectival pairs from the Paraphrase Database under the assumption that, for example, paraphrase pair \u201creally hot\u201d \u2013 \u201cscalding\u201d suggests that \u201chot\u201d \u201cscalding\u201d. We show that combining this paraphrase evidence with existing, complementary pattern- and lexicon-based approaches improves the quality of systems for automatically ordering sets of scalar adjectives and inferring the polarity of indirect answers to \u201cyes/no\u201d questions.", "phrases": ["adjective", "intensity", "paraphrase"], "overall_score": 1.2480270843562284, "scores": [0.8353551510447131, 0.9482561965501144, 0.5427171327664735], "rank_score": 0.7754428267871004} -{"id": "mayhew-etal-2019-named", "title": "Named Entity Recognition with Partially Annotated Training Data", "abstract": "Supervised machine learning assumes the availability of fully-labeled data, but in many cases, such as low-resource languages, the only data available is partially annotated. We study the problem of Named Entity Recognition (NER) with partially annotated training data in which a fraction of the named entities are labeled, and all other tokens, entities or otherwise, are labeled as non-entity by default. In order to train on this noisy dataset, we need to distinguish between the true and false negatives. To this end, we introduce a constraint-driven iterative algorithm that learns to detect false negatives in the noisy set and downweigh them, resulting in a weighted training set. With this set, we train a weighted NER model. We evaluate our algorithm with weighted variants of neural and non-neural NER models on data in 8 languages from several language and script families, showing strong ability to learn from partial data. Finally, to show real-world efficacy, we evaluate on a Bengali NER corpus annotated by non-speakers, outperforming the prior state-of-the-art by over 5 points F1.", "phrases": ["low-resource language", "ner model", "noisy label", "training procedure"], "overall_score": 1.859390623718396, "scores": [1.0828025959456145, 0.9131942373754371, 0.5534410479303018, 0.5522665923530607], "rank_score": 0.7754261184011035} -{"id": "kaneko-etal-2022-gender", "title": "Gender Bias in Masked Language Models for Multiple Languages", "abstract": "Masked Language Models (MLMs) pre-trained by predicting masked tokens on large corpora have been used successfully in natural language processing tasks for a variety of languages. Unfortunately, it was reported that MLMs also learn discriminative biases regarding attributes such as gender and race. Because most studies have focused on MLMs in English, the bias of MLMs in other languages has rarely been investigated. Manual annotation of evaluation data for languages other than English has been challenging due to the cost and difficulty in recruiting annotators. Moreover, the existing bias evaluation methods require the stereotypical sentence pairs consisting of the same context with attribute words (e.g. He/She is a nurse).We propose Multilingual Bias Evaluation (MBE) score, to evaluate bias in various languages using only English attribute word lists and parallel corpora between the target language and English without requiring manually annotated data. We evaluated MLMs in eight languages using the MBE and confirmed that gender-related biases are encoded in MLMs for all those languages. We manually created datasets for gender bias in Japanese and Russian to evaluate the validity of the MBE.The results show that the bias scores reported by the MBE significantly correlates with that computed from the above manually created datasets and the existing English datasets for gender bias.", "phrases": ["masked language models", "mlm", "gender bias"], "overall_score": 0.8518905364601204, "scores": [0.9149697664281556, 0.8912143397374913, 0.5200884430688973], "rank_score": 0.7754241830781815} -{"id": "bond-etal-2016-cili", "title": "CILI: the Collaborative Interlingual Index", "abstract": "This paper introduces the motivation for and design of the Collaborative InterLingual Index (CILI). It is designed to make possible coordination between multiple loosely coupled wordnet projects. The structure of the CILI is based on the Interlingual index first proposed in the EuroWordNet project with several pragmatic extensions: an explicit open license, definitions in English and links to wordnets in the Global Wordnet Grid.", "phrases": ["collaborative interlingual index", "wordnet", "cili"], "overall_score": 1.5083818006971184, "scores": [0.9546811721601671, 0.818013287980755, 0.5527702609759244], "rank_score": 0.7751549070389488} -{"id": "huang-etal-2016-attention", "title": "Attention-based Multimodal Neural Machine Translation", "abstract": "We present a novel neural machine translation (NMT) architecture associating visual and textual features for translation tasks with multiple modalities. Transformed global and regional visual features are concatenated with text to form attendable sequences which are dissipated over parallel long short-term memory (LSTM) threads to assist the encoder generating a representation for attention-based decoding. Experiments show that the proposed NMT outperform the text-only baseline.", "phrases": ["image feature", "convolutional neural network", "encode word sequence", "multimodal system"], "overall_score": 2.430261695343591, "scores": [1.0450226980298198, 0.9339757958022942, 0.5651942798089218, 0.5561308474292386], "rank_score": 0.7750809052675687} -{"id": "gamon-2004-linguistic", "title": "Linguistic correlates of style: authorship classification with deep linguistic analysis features", "abstract": "The identification of authorship falls into the category of style classification, an interesting sub-field of text categorization that deals with properties of the form of linguistic expression as opposed to the content of a text. Various feature sets and classification methods have been proposed in the literature, geared towards abstracting away from the content of a text, and focusing on its stylistic properties. We demonstrate that in a realistically difficult authorship attribution scenario, deep linguistic analysis features such as context free production frequencies and semantic relationship frequencies achieve significant error reduction over more commonly used \"shallow\" features such as function word frequencies and part of speech trigrams. Modern machine learning techniques like support vector machines allow us to explore large feature vectors, combining these different feature sets to achieve high classification accuracy in style-based tasks.", "phrases": ["authorship classification", "linguistic analysis feature", "support vector machine"], "overall_score": 1.9258636287579647, "scores": [0.9054292101791311, 0.889624133413644, 0.5300202710505796], "rank_score": 0.7750245382144515} -{"id": "hoyle-etal-2019-unsupervised", "title": "Unsupervised Discovery of Gendered Language through Latent-Variable Modeling", "abstract": "Studying the ways in which language is gendered has long been an area of interest in sociolinguistics. Studies have explored, for example, the speech of male and female characters in film and the language used to describe male and female politicians. In this paper, we aim not to merely study this phenomenon qualitatively, but instead to quantify the degree to which the language used to describe men and women is different and, moreover, different in a positive or negative way. To that end, we introduce a generative latent-variable model that jointly represents adjective (or verb) choice, with its sentiment, given the natural gender of a head (or dependent) noun. We find that there are significant differences between descriptions of male and female nouns and that these differences align with common gender stereotypes: Positive adjectives used to describe women are more often related to their bodies than adjectives used to describe men.", "phrases": ["latent-variable model", "adjective", "woman"], "overall_score": 1.5078065743029796, "scores": [0.8519666262747867, 0.8480946522459045, 0.6245166189248492], "rank_score": 0.7748592991485136} -{"id": "duong-etal-2015-neural", "title": "A Neural Network Model for Low-Resource Universal Dependency Parsing", "abstract": "Accurate dependency parsing requires large treebanks, which are only available for a few languages. We propose a method that takes advantage of shared structure across languages to build a mature parser using less training data. We propose a model for learning a shared \u201cuniversal\u201d parser that operates over an interlingual continuous representation of language, along with language-specific mapping components. Compared with supervised learning, our methods give a consistent 8-10% improvement across several treebanks in low-resource simulations.", "phrases": ["neural network model", "treebank", "low-resource language"], "overall_score": 1.8579548225323885, "scores": [0.854796879700146, 0.8885239414265238, 0.5811612072700095], "rank_score": 0.7748273427988931} -{"id": "blunsom-etal-2009-gibbs", "title": "A Gibbs Sampler for Phrasal Synchronous Grammar Induction", "abstract": "We present a phrasal synchronous grammar model of translational equivalence. Unlike previous approaches, we do not resort to heuristics or constraints from a word-alignment model, but instead directly induce a synchronous grammar from parallel sentence-aligned corpora. We use a hierarchical Bayesian prior to bias towards compact grammars with small translation units. Inference is performed using a novel Gibbs sampler over synchronous derivations. This sampler side-steps the intractability issues of previous models which required inference over derivation forests. Instead each sampling iteration is highly efficient, allowing the model to be applied to larger translation corpora than previous approaches.", "phrases": ["gibbs sampler", "phrasal", "grammar induction", "word-alignment model", "parallel corpus"], "overall_score": 2.358853895075924, "scores": [1.638435558822796, 0.6010025119311865, 0.5639069109065814, 0.5395319276029679, 0.5310539671332676], "rank_score": 0.7747861752793599} -{"id": "zhao-etal-2019-data", "title": "Data Augmentation with Atomic Templates for Spoken Language Understanding", "abstract": "Spoken Language Understanding (SLU) converts user utterances into structured semantic representations. Data sparsity is one of the main obstacles of SLU due to the high cost of human annotation, especially when domain changes or a new domain comes. In this work, we propose a data augmentation method with atomic templates for SLU, which involves minimum human efforts. The atomic templates produce exemplars for fine-grained constituents of semantic representations. We propose an encoder-decoder model to generate the whole utterance from atomic exemplars. Moreover, the generator could be transferred from source domains to help a new domain which has little data. Experimental results show that our method achieves significant improvements on DSTC 2&3 dataset which is a domain adaptation setting of SLU.", "phrases": ["spoken language understanding", "new domain", "data augmentation"], "overall_score": 1.702297717049964, "scores": [0.9480453142979333, 0.8502015800462254, 0.5260003418401263], "rank_score": 0.774749078728095} -{"id": "jiang-etal-2020-x", "title": "X-FACTR: Multilingual Factual Knowledge Retrieval from Pretrained Language Models", "abstract": "Language models (LMs) have proven surprisingly successful at capturing factual knowledge by completing cloze-style fill-in-the-blank questions such as \u201cPunta Cana is located in _.\u201d However, while knowledge is both written and queried in many languages, studies on LMs' factual representation ability have almost invariably been performed on English. To assess factual knowledge retrieval in LMs in different languages, we create a multilingual benchmark of cloze-style probes for typologically diverse languages. To properly handle language variations, we expand probing methods from single- to multi-word entities, and develop several decoding algorithms to generate multi-token predictions. Extensive experimental results provide insights about how well (or poorly) current state-of-the-art LMs perform at this task in languages with more or fewer available resources. We further propose a code-switching-based method to improve the ability of multilingual LMs to access knowledge, and verify its effectiveness on several benchmark languages. Benchmark data and code have be released at .", "phrases": ["factual knowledge retrieval", "language model", "x-factr"], "overall_score": 1.8577490392537248, "scores": [0.792685998733658, 0.891898511181376, 0.6396400636031969], "rank_score": 0.7747415245060769} -{"id": "przepiorkowski-2007-slavic", "title": "Slavic Information Extraction and Partial Parsing", "abstract": "Information Extraction (IE) often involves some amount of partial syntactic processing. This is clear in cases of interesting high-level IE tasks, such as finding information about who did what to whom (when, where, how and why), but it is also true in case of simpler IE tasks, such as finding company names in texts. The aim of this paper is to give an overview of Slavonic phenomena which pose particular problems for IE and partial parsing, and some phenomena which seem easier to treat in Slavonic than in Germanic or Romance; I also mention various tools which have been used for the partial processing of Slavonic.", "phrases": ["partial parsing", "slavic language", "word order", "rich inflection", "nes"], "overall_score": 1.6110000137788894, "scores": [0.944396515859294, 0.9321067972781549, 0.8923176639931276, 0.5646088492137303, 0.5402063915737864], "rank_score": 0.7747272435836186} -{"id": "botha-etal-2018-learning", "title": "Learning To Split and Rephrase From Wikipedia Edit History", "abstract": "Split and rephrase is the task of breaking down a sentence into shorter ones that together convey the same meaning. We extract a rich new dataset for this task by mining Wikipedia's edit history: WikiSplit contains one million naturally occurring sentence rewrites, providing sixty times more distinct split examples and a ninety times larger vocabulary than the WebSplit corpus introduced by Narayan et al. (2017) as a benchmark for this task. Incorporating WikiSplit as training data produces a model with qualitatively better predictions that score 32 BLEU points above the prior best result on the WebSplit benchmark.", "phrases": ["split", "wikipedia edit history", "new dataset", "sentence-to-sentence generation task"], "overall_score": 1.9250600292100548, "scores": [0.9663890817125398, 0.7857180319201577, 0.824458716211759, 0.5222387540207623], "rank_score": 0.7747011459663047} -{"id": "kim-lee-2016-recurrent-neural", "title": "Recurrent Neural Network based Translation Quality Estimation", "abstract": "This paper describes the recurrent neural network based model for translation quality estimation. Recurrent neural network based quality estimation model consists of two parts. The first part using two bidirectional recurrent neural networks generates the quality information about whether each word in translation is properly translated. The second part using another recurrent neural network predicts the final quality of translation. We apply this model to sentence, word and phrase level of WMT16 Quality Estimation Shared Task. Our results achieve the excellent performance especially in sentence and phraselevel QE.", "phrases": ["translation quality estimation", "recurrent neural network", "predictor-estimator architecture"], "overall_score": 1.2467806114501214, "scores": [0.9602139250900362, 0.8414607748731654, 0.5223303489134609], "rank_score": 0.7746683496255541} -{"id": "lee-etal-2018-improving", "title": "Improving Large-Scale Fact-Checking using Decomposable Attention Models and Lexical Tagging", "abstract": "Fact-checking of textual sources needs to effectively extract relevant information from large knowledge bases. In this paper, we extend an existing pipeline approach to better tackle this problem. We propose a neural ranker using a decomposable attention model that dynamically selects sentences to achieve promising improvement in evidence retrieval F1 by 38.80%, with (x65) speedup compared to a TF-IDF method. Moreover, we incorporate lexical tagging methods into our pipeline framework to simplify the tasks and render the model more generalizable. As a result, our framework achieves promising performance on a large-scale fact extraction and verification dataset with speedup.", "phrases": ["fact-checking", "decomposable attention model", "lexical tagging"], "overall_score": 0.851042671105426, "scores": [0.8672609119086863, 0.8654430197690238, 0.5912533366426123], "rank_score": 0.7746524227734408} -{"id": "laokulrat-etal-2013-uttime", "title": "UTTime: Temporal Relation Classification using Deep Syntactic Features", "abstract": "In this paper, we present a system, UTTime, which we submitted to TempEval-3 for Task C: Annotating temporal relations. The system uses logistic regression classifiers and exploits features extracted from a deep syntactic parser, including paths between event words in phrase structure trees and their path lengths, and paths between event words in predicateargument structures and their subgraphs. UTTime achieved an F1 score of 34.9 based on the graphed-based evaluation for Task C (ranked 2 nd ) and 56.45 for Task C-relationonly (ranked 1 st ) in the TempEval-3 evaluation.", "phrases": ["deep syntactic parser", "uttime", "predicate-argument structure feature"], "overall_score": 1.701939348570782, "scores": [0.8112837281714573, 0.9623461090102631, 0.5501280974315574], "rank_score": 0.774585978204426} -{"id": "sun-etal-2018-extracting", "title": "Extracting Entities and Relations with Joint Minimum Risk Training", "abstract": "We investigate the task of joint entity relation extraction. Unlike prior efforts, we propose a new lightweight joint learning paradigm based on minimum risk training (MRT). Specifically, our algorithm optimizes a global loss function which is flexible and effective to explore interactions between the entity model and the relation model. We implement a strong and simple neural network where the MRT is executed. Experiment results on the benchmark ACE05 and NYT datasets show that our model is able to achieve state-of-the-art joint extraction performances.", "phrases": ["minimum risk training", "joint learning paradigm", "loss function"], "overall_score": 1.5071443817103811, "scores": [0.8893676166064344, 0.8684150836456791, 0.56577429816643], "rank_score": 0.7745189994728477} -{"id": "ji-grishman-2004-applying", "title": "Applying Coreference to Improve Name Recognition", "abstract": "We present a novel method of applying the results of coreference resolution to improve Name Recognition for Chinese. We consider first some methods for gauging the confidence of individual tags assigned by a statistical name tagger. For names with low confidence, we show how these names can be filtered using coreference features to improve accuracy. In addition, we present rules which use coreference information to correct some name tagging errors. Finally, we show how these gains can be magnified by clustering documents and using cross-document coreference in these clusters. These combined methods yield an absolute improvement of about 3.1% in tagger F score.", "phrases": ["coreference", "name recognition", "chinese"], "overall_score": 1.0737089980264654, "scores": [0.9207140304329776, 0.8567405134076606, 0.546097426375321], "rank_score": 0.7745173234053198} -{"id": "gimenez-marquez-2007-linguistic", "title": "Linguistic Features for Automatic Evaluation of Heterogenous MT Systems", "abstract": "Evaluation results recently reported by Callison-Burch et al. (2006) and Koehn and Monz (2006), revealed that, in certain cases, the BLEU metric may not be a reliable MT quality indicator. This happens, for instance, when the systems under evaluation are based on different paradigms, and therefore, do not share the same lexicon. The reason is that, while MT quality aspects are diverse, BLEU limits its scope to the lexical dimension. In this work, we suggest using metrics which take into account linguistic features at more abstract levels. We provide experimental results showing that metrics based on deeper linguistic information (syntactic/shallow-semantic) are able to produce more reliable system rankings than metrics based on lexical matching alone, specially when the systems under evaluation are of a different nature.", "phrases": ["translation quality", "improved correlation", "human judgment", "gim\u00e9nez", "predicate-argument structure"], "overall_score": 2.0973487094386183, "scores": [1.172845622446865, 1.06856279202732, 0.5601320610391539, 0.5369359126806778, 0.5339568270118376], "rank_score": 0.7744866430411709} -{"id": "kok-brockett-2010-hitting", "title": "Hitting the Right Paraphrases in Good Time", "abstract": "We present a random-walk-based approach to learning paraphrases from bilingual parallel corpora. The corpora are represented as a graph in which a node corresponds to a phrase, and an edge exists between two nodes if their corresponding phrases are aligned in a phrase table. We sample random walks to compute the average number of steps it takes to reach a ranking of paraphrases with better ones being \"closer\" to a phrase of interest. This approach allows \"feature\" nodes that represent domain knowledge to be built into the graph, and incorporates truncation techniques to prevent the graph from growing too large for efficiency. Current approaches, by contrast, implicitly presuppose the graph to be bipartite, are limited to finding paraphrases that are of length two away from a phrase, and do not generally permit easy incorporation of domain knowledge. Manual evaluation of generated output shows that our approach outperforms the state-of-the-art system of Callison-Burch (2008).", "phrases": ["paraphrase", "parallel corpora", "random walk"], "overall_score": 1.7015798290140356, "scores": [0.9505398296007356, 0.8364803672191385, 0.5362468645884687], "rank_score": 0.7744223538027809} -{"id": "dungs-etal-2018-rumour", "title": "Can Rumour Stance Alone Predict Veracity?", "abstract": "Prior manual studies of rumours suggested that crowd stance can give insights into the actual rumour veracity. Even though numerous studies of automatic veracity classification of social media rumours have been carried out, none explored the effectiveness of leveraging crowd stance to determine veracity. We use stance as an additional feature to those commonly used in earlier studies. We also model the veracity of a rumour using variants of Hidden Markov Models (HMM) and the collective stance information. This paper demonstrates that HMMs that use stance and tweets' times as the only features for modelling true and false rumours achieve F1 scores in the range of 80%, outperforming those approaches where stance is used jointly with content and user based features.", "phrases": ["stance", "veracity", "rumor detection", "claim-level"], "overall_score": 1.924362161853447, "scores": [1.0879172018709717, 0.920531801688516, 0.5593655063981391, 0.5298667039706695], "rank_score": 0.774420303482074} -{"id": "srikanth-murthy-2008-named", "title": "Named Entity Recognition for Telugu", "abstract": "This paper is about Named Entity Recognition (NER) for Telugu. Not much work has been done in NER for Indian languages in general and Telugu in particular. Adequate annotated corpora are not yet available in Telugu. We recognize that named entities are usually nouns. In this paper we therefore start with our experiments in building a CRF (Conditional Random Fields) based Noun Tagger. Trained on a manually tagged data of 13,425 words and tested on a test data set of 6,223 words, this Noun Tagger has given an F-Measure of about 92%. We then develop a rule based NER system for Telugu. Our focus is mainly on identifying person, place and organization names. A manually checked Named Entity tagged corpus of 72,157 words has been developed using this rule based tagger through bootstrapping. We have then developed a CRF based NER system for Telugu and tested it on several data sets from the Eenaadu and Andhra Prabha newspaper corpora developed by us here. Good performance has been obtained using the majority tag concept. We have obtained overall F-measures between 80% and 97% in various experiments.", "phrases": ["entity recognition", "telugu", "indian language"], "overall_score": 1.0733589365119605, "scores": [0.9473732878998116, 0.8439601751555927, 0.5314609591440813], "rank_score": 0.7742648073998285} -{"id": "wachsmuth-etal-2014-modeling", "title": "Modeling Review Argumentation for Robust Sentiment Analysis", "abstract": "Most text classification approaches model text at the lexical and syntactic level only, lacking domain robustness and explainability. In tasks like sentiment analysis, such approaches can result in limited effectiveness if the texts to be classified consist of a series of arguments. In this paper, we claim that even a shallow model of the argumentation of a text allows for an effective and more robust classification, while providing intuitive explanations of the classification results. Here, we apply this idea to the supervised prediction of sentiment scores for reviews. We combine existing approaches from sentiment analysis with novel features that compare the overall argumentation structure of the given review text to a learned set of common sentiment flow patterns. Our evaluation in two domains demonstrates the benefit of modeling argumentation for text classification in terms of effectiveness and robustness.", "phrases": ["review", "argumentation", "global sentiment"], "overall_score": 1.5065665916797077, "scores": [1.1100967171556388, 0.6140448479333889, 0.598524657312513], "rank_score": 0.7742220741338469} -{"id": "kato-etal-2004-stochastically", "title": "Stochastically Evaluating the Validity of Partial Parse Trees in Incremental Parsing", "abstract": "This paper proposes a method for evaluating the validity of partial parse trees constructed in incremental parsing. Our method is based on stochastic incremental parsing, and it incrementally evaluates the validity for each partial parse tree on a word-by-word basis. In our method, incremental parser returns partial parse trees at the point where the validity for the partial parse tree becomes greater than a threshold. Our technique is effective for improving the accuracy of incremental parsing.", "phrases": ["validity", "incremental parser", "transition-based parsing"], "overall_score": 1.7827091663509014, "scores": [0.8247240082451405, 0.8954755525022234, 0.6024627006062641], "rank_score": 0.7742207537845426} -{"id": "kim-hassan-2020-fastformers", "title": "FastFormers: Highly Efficient Transformer Models for Natural Language Understanding", "abstract": "Transformer-based models are the state-of-the-art for Natural Language Understanding (NLU) applications. Models are getting bigger and better on various tasks. However, Transformer models remain computationally challenging since they are not efficient at inference-time compared to traditional approaches. In this paper, we present FastFormers, a set of recipes to achieve efficient inference-time performance for Transformer-based models on various NLU tasks. We show how carefully utilizing knowledge distillation, structured pruning and numerical optimization can lead to drastic improvements on inference efficiency. We provide effective recipes that can guide practitioners to choose the best settings for various NLU tasks and pretrained models. Applying the proposed recipes to the SuperGLUE benchmark, we achieve from 9.8x up to 233.9x speed-up compared to out-of-the-box models on CPU. On GPU, we also achieve up to 12.4x speed-up with the presented methods. We show that FastFormers can drastically reduce cost of serving 100 million requests from 4,223 USD to just 18 USD on an Azure F16s_v2 instance. This translates to a sustainable runtime by reducing energy consumption 6.9x - 125.8x according to the metrics used in the SustaiNLP 2020 shared task.", "phrases": ["natural language understanding", "pruning", "fastformers"], "overall_score": 1.0731380766390028, "scores": [0.9519563790328516, 0.801102751339789, 0.5692573416616742], "rank_score": 0.7741054906781049} -{"id": "wang-etal-2021-dynamic", "title": "Dynamic Connected Networks for Chinese Spelling Check", "abstract": "Chinese spelling check (CSC) is a task to detect and correct spelling errors in Chinese text. Most state-of-the-art works on the CSC task adopt a BERT-based non-autoregressive language model, which relies on the output independence assumption. The inappropriate independence assumption prevents BERT-based models from learning the dependencies among target tokens, resulting in an incoherent problem. To address the above issue, we propose a novel architecture named Dynamic Connected Networks (DCN), which generates the candidate Chinese characters via a Pinyin Enhanced Candidate Generator and then utilizes an attention-based network to model the dependencies between two adjacent Chinese characters. The experimental results show that our proposed method achieves a new state-of-the-art performance on three human-annotated datasets.", "phrases": ["chinese spelling check", "language model", "dynamic connected networks"], "overall_score": 1.2458578355484415, "scores": [0.9572611362859694, 0.8214306822723465, 0.5435931716048954], "rank_score": 0.7740949967210705} -{"id": "haque-etal-2009-dependency", "title": "Dependency Relations as Source Context in Phrase-Based SMT", "abstract": "The Phrase-Based Statistical Machine Translation (PB-SMT) model has recently begun to include source context modeling, under the assumption that the proper lexical \nchoice of an ambiguous word can be determined from the context in which it appears. Various types of lexical and syntactic features such as words, parts-of-speech, and \nsupertags have been explored as effective source context in SMT. In this paper, we show that position-independent syntactic dependency relations of the head of a source phrase can be modeled as useful source context to improve target phrase selection and thereby improve overall performance of PB-SMT. On a Dutch\u2014English translation task, by combining dependency relations and syntactic contextual features (part-of-speech), we achieved a 1.0 BLEU (Papineni et al., 2002) point improvement (3.1% relative) over the baseline.", "phrases": ["source context", "phrase-based smt", "supertag"], "overall_score": 0.8501802585962253, "scores": [0.9725948525955258, 0.8256965377790507, 0.5233108728595214], "rank_score": 0.7738674210780326} -{"id": "geertzen-etal-2007-multidimensional", "title": "A Multidimensional Approach to Utterance Segmentation and Dialogue Act Classification", "abstract": "In this paper we present a multidimensional approach to utterance segmentation and automatic dialogue act classification. We show that the use of multiple dimensions in distinguishing and annotating units not only supports a more accurate analysis of human communication, but can also help to solve some notorious problems concerning the segmentation of dialogue into functional units. We introduce the use of per-dimension segmentation for dialogue act taxonomies that feature multi-functionality and show that better classification results are obtained when using a separate segmentation for each dimension than when using one segmentation that fits all dimensions. Three machine learning techniques are applied and compared on the task of automatic classification of multiple communicative functions of utterances. The results are encouraging and indicate that communicative functions in important dimensions are easy machinelearnable.", "phrases": ["multidimensional approach", "dialogue act classification", "communicative function"], "overall_score": 1.0725184242297379, "scores": [0.9707517997478555, 0.7861800965480066, 0.56404362155157], "rank_score": 0.7736585059491441} -{"id": "cao-etal-2017-quasi", "title": "Quasi-Second-Order Parsing for 1-Endpoint-Crossing, Pagenumber-2 Graphs", "abstract": "We propose a new Maximum Subgraph algorithm for first-order parsing to 1-endpoint-crossing, pagenumber-2 graphs. Our algorithm has two characteristics: (1) it separates the construction for noncrossing edges and crossing edges; (2) in a single construction step, whether to create a new arc is deterministic. These two characteristics make our algorithm relatively easy to be extended to incorporiate crossing-sensitive second-order features. We then introduce a new algorithm for quasi-second-order parsing. Experiments demonstrate that second-order features are helpful for Maximum Subgraph parsing.", "phrases": ["1-endpoint-crossing", "pagenumber-2 graph", "quasi-second-order", "dynamic programming"], "overall_score": 1.0724617491024628, "scores": [0.8841199496620795, 0.8338771763519058, 0.8485452366073579, 0.5279281313251105], "rank_score": 0.7736176234866134} -{"id": "jiang-bansal-2019-self", "title": "Self-Assembling Modular Networks for Interpretable Multi-Hop Reasoning", "abstract": "Multi-hop QA requires a model to connect multiple pieces of evidence scattered in a long context to answer the question. The recently proposed HotpotQA (Yang et al., 2018) dataset is comprised of questions embodying four different multi-hop reasoning paradigms (two bridge entity setups, checking multiple properties, and comparing two entities), making it challenging for a single neural network to handle all four. In this work, we present an interpretable, controller-based Self-Assembling Neural Modular Network (Hu et al., 2017, 2018) for multi-hop reasoning, where we design four novel modules (Find, Relocate, Compare, NoOp) to perform unique types of language reasoning. Based on a question, our layout controller RNN dynamically infers a series of reasoning modules to construct the entire network. Empirically, we show that our dynamic, multi-hop modular network achieves significant improvements over the static, single-hop baseline (on both regular and adversarial evaluation). We further demonstrate the interpretability of our model via three analyses. First, the controller can softly decompose the multi-hop question into multiple single-hop sub-questions to promote compositional reasoning behavior of the main network. Second, the controller can predict layouts that conform to the layouts designed by human experts. Finally, the intermediate module can infer the entity that connects two distantly-located supporting facts by addressing the sub-question from the controller.", "phrases": ["modular network", "interpretable multi-hop reasoning", "single-hop sub-question"], "overall_score": 1.922256973453148, "scores": [0.8924804315222225, 0.8654873657557531, 0.5627515427630014], "rank_score": 0.7735731133469924} -{"id": "emami-etal-2019-knowref", "title": "The KnowRef Coreference Corpus: Removing Gender and Number Cues for Difficult Pronominal Anaphora Resolution", "abstract": "We introduce a new benchmark for coreference resolution and NLI, KnowRef, that targets common-sense understanding and world knowledge. Previous coreference resolution tasks can largely be solved by exploiting the number and gender of the antecedents, or have been handcrafted and do not reflect the diversity of naturally occurring text. We present a corpus of over 8,000 annotated text passages with ambiguous pronominal anaphora. These instances are both challenging and realistic. We show that various coreference systems, whether rule-based, feature-rich, or neural, perform significantly worse on the task than humans, who display high inter-annotator agreement. To explain this performance gap, we show empirically that state-of-the art models often fail to capture context, instead relying on the gender or number of candidate antecedents to make a decision. We then use problem-specific insights to propose a data-augmentation trick called antecedent switching to alleviate this tendency in models. Finally, we show that antecedent switching yields promising results on other tasks as well: we use it to achieve state-of-the-art results on the GAP coreference task.", "phrases": ["gender", "coreference resolution", "pronoun", "language bias", "commonsense reasoning"], "overall_score": 1.8547118202905792, "scores": [1.676476091342835, 0.5636017598567519, 0.5591743428579873, 0.5383986628728267, 0.5297236721625853], "rank_score": 0.7734749058185972} -{"id": "kawahara-kurohashi-2006-fully", "title": "A Fully-Lexicalized Probabilistic Model for Japanese Syntactic and Case Structure Analysis", "abstract": "\u672c\u7a3f\u3067\u306f, \u683c\u30d5\u30ec\u30fc\u30e0\u306b\u57fa\u3065\u304d\u69cb\u6587\u30fb\u683c\u89e3\u6790\u3092\u7d71\u5408\u7684\u306b\u884c\u3046\u78ba\u7387\u30e2\u30c7\u30eb\u3092\u63d0\u6848\u3059\u308b.\u683c\u30d5\u30ec\u30fc\u30e0\u306f, \u30a6\u30a7\u30d6\u30c6\u30ad\u30b9\u30c8\u7d045\u5104\u6587\u304b\u3089\u81ea\u52d5\u7684\u306b\u69cb\u7bc9\u3057\u305f\u5927\u898f\u6a21\u306a\u3082\u306e\u3092\u7528\u3044\u308b.\u78ba\u7387\u30e2\u30c7\u30eb\u306f, \u8ff0\u8a9e\u9805\u69cb\u9020\u3092\u57fa\u672c\u5358\u4f4d\u3068\u3057, \u305d\u308c\u3092\u751f\u6210\u3059\u308b\u78ba\u7387\u3067\u3042\u308a, \u683c\u30d5\u30ec\u30fc\u30e0\u306b\u3088\u308b\u8a9e\u5f59\u7684\u306a\u9078\u597d\u3092\u5229\u7528\u3059\u308b\u3082\u306e\u3067\u3042\u308b.\u30a6\u30a7\u30d6\u306e\u30c6\u30ad\u30b9\u30c8\u3092\u7528\u3044\u3066\u5b9f\u9a13\u3092\u884c\u3044, \u7279\u306b\u8ff0\u8a9e\u9805\u69cb\u9020\u306b\u95a2\u9023\u3059\u308b\u4fc2\u308a\u53d7\u3051\u306e\u7cbe\u5ea6\u304c\u5411\u4e0a\u3059\u308b\u3053\u3068\u3092\u78ba\u8a8d\u3057\u305f.\u307e\u305f, \u8a9e\u5f59\u7684\u9078\u597d\u304c\u3069\u306e\u7a0b\u5ea6\u7528\u3044\u3089\u308c\u3066\u3044\u308b\u304b\u3092\u8abf\u67fb\u3057\u305f\u3068\u3053\u308d, 60.7%\u3068\u3044\u3046\u9ad8\u3044\u5272\u5408\u3067\u4f7f\u308f\u308c\u3066\u3044\u308b\u3053\u3068\u304c\u308f\u304b\u308a, \u30ab\u30d0\u30ec\u30fc\u30b8\u306e\u9ad8\u3055\u3092\u78ba\u8a8d\u3059\u308b\u3053\u3068\u304c\u3067\u304d\u305f.", "phrases": ["probabilistic model", "japanese", "case structure analysis", "predicate-argument structure"], "overall_score": 1.921920232835059, "scores": [0.8591494962689463, 0.8099265992437125, 0.8383608906448351, 0.5863134096556085], "rank_score": 0.7734375989532756} -{"id": "tackstrom-etal-2012-cross", "title": "Cross-lingual Word Clusters for Direct Transfer of Linguistic Structure", "abstract": "It has been established that incorporating word cluster features derived from large unlabeled corpora can significantly improve prediction of linguistic structure. While previous work has focused primarily on English, we extend these results to other languages along two dimensions. First, we show that these results hold true for a number of languages across families. Second, and more interestingly, we provide an algorithm for inducing cross-lingual clusters and we show that features derived from these clusters significantly improve the accuracy of cross-lingual structure prediction. Specifically, we show that by augmenting direct-transfer systems with cross-lingual cluster features, the relative error of delexicalized dependency parsers, trained on English treebanks and transferred to foreign languages, can be reduced by up to 13%. When applying the same method to direct transfer of named-entity recognizers, we observe relative improvements of up to 26%.", "phrases": ["cluster", "linguistic structure", "transfer method"], "overall_score": 2.45794561463375, "scores": [0.8990750159077735, 0.8755944424919259, 0.5455671510267478], "rank_score": 0.773412203142149} -{"id": "kirchhoff-yang-2005-improved", "title": "Improved Language Modeling for Statistical Machine Translation", "abstract": "Statistical machine translation systems use a combination of one or more translation models and a language model. While there is a significant body of research addressing the improvement of translation models, the problem of optimizing language models for a specific translation task has not received much attention. Typically, standard word trigram models are used as an out-of-the-box component in a statistical machine translation system. In this paper we apply language modeling techniques that have proved beneficial in automatic speech recognition to the ACL05 machine translation shared data task and demonstrate improvements over a baseline system with a standard language model.", "phrases": ["language model", "pos information", "n-best list"], "overall_score": 1.6991917671784467, "scores": [1.260616335417027, 0.5368383684580068, 0.5225517961957633], "rank_score": 0.7733355000235992} -{"id": "linmei-etal-2019-heterogeneous", "title": "Heterogeneous Graph Attention Networks for Semi-supervised Short Text Classification", "abstract": "Short text classification has found rich and critical applications in news and tweet tagging to help users find relevant information. Due to lack of labeled training data in many practical use cases, there is a pressing need for studying semi-supervised short text classification. Most existing studies focus on long texts and achieve unsatisfactory performance on short texts due to the sparsity and limited labeled data. In this paper, we propose a novel heterogeneous graph neural network based method for semi-supervised short text classification, leveraging full advantage of few labeled data and large unlabeled data through information propagation along the graph. In particular, we first present a flexible HIN (heterogeneous information network) framework for modeling the short texts, which can integrate any type of additional information as well as capture their relations to address the semantic sparsity. Then, we propose Heterogeneous Graph ATtention networks (HGAT) to embed the HIN for short text classification based on a dual-level attention mechanism, including node-level and type-level attentions. The attention mechanism can learn the importance of different neighboring nodes as well as the importance of different node (information) types to a current node. Extensive experimental results have demonstrated that our proposed model outperforms state-of-the-art methods across six benchmark datasets significantly.", "phrases": ["text classification", "state-of-the-art method", "heterogeneous graph", "neural graph"], "overall_score": 1.8537554340354576, "scores": [1.1634600586380566, 0.8456625706053378, 0.5434876536274699, 0.5396939642151302], "rank_score": 0.7730760617714986} -{"id": "niculae-danescu-niculescu-mizil-2014-brighter", "title": "Brighter than Gold: Figurative Language in User Generated Comparisons", "abstract": "Comparisons are common linguistic devices used to indicate the likeness of two things. Often, this likeness is not meant in the literal sense\u2014for example, \u201cI slept like a log\u201d does not imply that logs actually sleep. In this paper we propose a computational study of figurative comparisons, or similes. Our starting point is a new large dataset of comparisons extracted from product reviews and annotated for figurativeness. We use this dataset to characterize figurative language in naturally occurring comparisons and reveal linguistic patterns indicative of this phenomenon. We operationalize these insights and apply them to a new task with high relevance to text understanding: distinguishing between figurative and literal comparisons. Finally, we apply this framework to explore the social context in which figurative language is produced, showing that similes are more likely to accompany opinions showing extreme sentiment, and that they are uncommon in reviews deemed helpful.", "phrases": ["figurative language", "simile", "tenor"], "overall_score": 1.3851663244029526, "scores": [0.9082899083130016, 0.8872434986705494, 0.5236947285871094], "rank_score": 0.7730760451902201} -{"id": "wang-etal-2020-docstruct", "title": "DocStruct: A Multimodal Method to Extract Hierarchy Structure in Document for General Form Understanding", "abstract": "Form understanding depends on both textual contents and organizational structure. Although modern OCR performs well, it is still challenging to realize general form understanding because forms are commonly used and of various formats. The table detection and handcrafted features in previous works cannot apply to all forms because of their requirements on formats. Therefore, we concentrate on the most elementary components, the key-value pairs, and adopt multimodal methods to extract features. We consider the form structure as a tree-like or graph-like hierarchy of text fragments. The parent-child relation corresponds to the key-value pairs in forms. We utilize the state-of-the-art models and design targeted extraction modules to extract multimodal features from semantic contents, layout information, and visual images. A hybrid fusion method of concatenation and feature shifting is designed to fuse the heterogeneous features and provide an informative joint representation. We adopt an asymmetric algorithm and negative sampling in our model as well. We validate our method on two benchmarks, MedForm and FUNSD, and extensive experiments demonstrate the effectiveness of our method.", "phrases": ["multimodal method", "general form understanding", "graph-like hierarchy", "docstruct"], "overall_score": 1.07150090999418, "scores": [0.8922748078050546, 0.838554960987005, 0.7817024201054135, 0.5791659094157564], "rank_score": 0.7729245245783074} -{"id": "garneau-etal-2020-robust", "title": "A Robust Self-Learning Method for Fully Unsupervised Cross-Lingual Mappings of Word Embeddings: Making the Method Robustly Reproducible as Well", "abstract": "In this paper, we reproduce the experiments of Artetxe et al. (2018b) regarding the robust self-learning method for fully unsupervised cross-lingual mappings of word embeddings. We show that the reproduction of their method is indeed feasible with some minor assumptions. We further investigate the robustness of their model by introducing four new languages that are less similar to English than the ones proposed by the original paper. In order to assess the stability of their model, we also conduct a grid search over sensible hyperparameters. We then propose key recommendations that apply to any research project in order to deliver fully reproducible research.", "phrases": ["robust self-learning method", "unsupervised cross-lingual mapping", "new language"], "overall_score": 0.8491175900942561, "scores": [0.9415870687242475, 0.8286461040293824, 0.548467242814289], "rank_score": 0.7729001385226396} -{"id": "lin-etal-2018-multi", "title": "Multi-Hop Knowledge Graph Reasoning with Reward Shaping", "abstract": "Multi-hop reasoning is an effective approach for query answering (QA) over incomplete knowledge graphs (KGs). The problem can be formulated in a reinforcement learning (RL) setup, where a policy-based agent sequentially extends its inference path until it reaches a target. However, in an incomplete KG environment, the agent receives low-quality rewards corrupted by false negatives in the training data, which harms generalization at test time. Furthermore, since no golden action sequence is used for training, the agent can be misled by spurious search trajectories that incidentally lead to the correct answer. We propose two modeling advances to address both issues: (1) we reduce the impact of false negative supervision by adopting a pretrained one-hop embedding model to estimate the reward of unobserved facts; (2) we counter the sensitivity to spurious paths of on-policy RL by forcing the agent to explore a diverse set of paths using randomly generated edge masks. Our approach significantly improves over existing path-based KGQA models on several benchmark datasets and is comparable or better than embedding-based models.", "phrases": ["knowledge graph", "multi-hop reasoning", "multihopkg", "markov decision process", "path-based method"], "overall_score": 2.275652974041259, "scores": [1.4377523238615044, 0.838885470112878, 0.5427252304656024, 0.5229824615820918, 0.5219780576865591], "rank_score": 0.7728647087417272} -{"id": "stymne-2011-definite", "title": "Definite Noun Phrases in Statistical Machine Translation into Scandinavian Languages", "abstract": "In this thesis I aim to improve phrase-based statistical machine translation (PBSMT) in a number of ways by the use of text harmonization strategies. PBSMT systems are built by training statistical models on large corpora of human translations. This architecture generally performs well for languages with similar structure. If the languages are different for example with respect to word order or morphological complexity, however, the standard methods do not tend to work well. I address this problem through text harmonization, by making texts more similar before training and applying a PBSMT system. I investigate how text harmonization can be used to improve PBSMT with a focus on four areas: compounding, definiteness, word order, and unknown words. For the first three areas, the focus is on linguistic differences between languages, which I address by applying transformation rules, using either rule-based or machine learning-based techniques, to the source or target data. For the last area, unknown words, I harmonize the translation input to the training data by replacing unknown words with known alternatives. I show that translation into languages with closed compounds can be improved by splitting and merging compounds. I develop new merging algorithms that outperform previously suggested algorithms and show how part-of-speech tags can be used to improve the order of compound parts. Scandinavian definite noun phrases are identified as a problem forPBSMT in translation into Scandinavian languages and I propose a preprocessing approach that addresses this problem and gives large improvements over a baseline. Several previous proposals for how to handle differences in reordering exist; I propose two types of extensions, iterating reordering and word alignment and using automatically induced word classes, which allow these methods to be used for less-resourced languages. Finally I identify several ways of replacing unknown words in the translation input, most notably a spell checking-inspired algorithm, which can be trained using character-based PBSMT techniques. Overall I present several approaches for extending PBSMT by the use of pre- and postprocessing techniques for text harmonization, and show experimentally that these methods work. Text harmonization methods are an efficient way to improve statistical machine translation within the phrase-based approach, without resorting to more complex models.", "phrases": ["statistical machine translation", "scandinavian language", "danish"], "overall_score": 1.3847593988670286, "scores": [0.9261583490516959, 0.8650965332703243, 0.5272919248511971], "rank_score": 0.7728489357244058} -{"id": "roy-goldwasser-2020-weakly", "title": "Weakly Supervised Learning of Nuanced Frames for Analyzing Polarization in News Media", "abstract": "In this paper, we suggest a minimally supervised approach for identifying nuanced frames in news article coverage of politically divisive topics. We suggest to break the broad policy frames suggested by Boydstun et al., 2014 into fine-grained subframes which can capture differences in political ideology in a better way. We evaluate the suggested subframes and their embedding, learned using minimal supervision, over three topics, namely, immigration, gun-control, and abortion. We demonstrate the ability of the subframes to capture ideological differences and analyze political discourse in news media.", "phrases": ["polarization", "ideology", "news medium"], "overall_score": 1.607035277983442, "scores": [1.2437998553320266, 0.5399165269968025, 0.5347454437514998], "rank_score": 0.7728206086934429} -{"id": "tsakalidis-liakata-2020-sequential", "title": "Sequential Modelling of the Evolution of Word Representations for Semantic Change Detection", "abstract": "Semantic change detection concerns the task of identifying words whose meaning has changed over time. Current state-of-the-art approaches operating on neural embeddings detect the level of semantic change in a word by comparing its vector representation in two distinct time periods, without considering its evolution through time. In this work, we propose three variants of sequential models for detecting semantically shifted words, effectively accounting for the changes in the word representations over time. Through extensive experimentation under various settings with synthetic and real data we showcase the importance of sequential modelling of word vectors through time for semantic change detection. Finally, we compare different approaches in a quantitative manner, demonstrating that temporal modelling of word representations yields a clear-cut advantage in performance.", "phrases": ["evolution", "semantic change detection", "word vector", "sequential modelling"], "overall_score": 1.2431862089197592, "scores": [0.9362363900804058, 0.8055742305636611, 0.8009879788490734, 0.5469414875651417], "rank_score": 0.7724350217645706} -{"id": "ravi-kozareva-2018-self-governing", "title": "Self-Governing Neural Networks for On-Device Short Text Classification", "abstract": "Deep neural networks reach state-of-the-art performance for wide range of natural language processing, computer vision and speech applications. Yet, one of the biggest challenges is running these complex networks on devices such as mobile phones or smart watches with tiny memory footprint and low computational capacity. We propose on-device Self-Governing Neural Networks (SGNNs), which learn compact projection vectors with local sensitive hashing. The key advantage of SGNNs over existing work is that they surmount the need for pre-trained word embeddings and complex networks with huge parameters. We conduct extensive evaluation on dialog act classification and show significant improvement over state-of-the-art results. Our findings show that SGNNs are effective at capturing low-dimensional semantic text representations, while maintaining high accuracy.", "phrases": ["short text classification", "device", "low computational capacity"], "overall_score": 1.697054729144741, "scores": [0.9231867854023842, 0.8737320576897031, 0.5201698332081017], "rank_score": 0.772362892100063} -{"id": "tebbifakhr-etal-2019-machine", "title": "Machine Translation for Machines: the Sentiment Classification Use Case", "abstract": "We propose a neural machine translation (NMT) approach that, instead of pursuing adequacy and fluency (\u201chuman-oriented\u201d quality criteria), aims to generate translations that are best suited as input to a natural language processing component designed for a specific downstream task (a \u201cmachine-oriented\u201d criterion). Towards this objective, we present a reinforcement learning technique based on a new candidate sampling strategy, which exploits the results obtained on the downstream task as weak feedback. Experiments in sentiment classification of Twitter data in German and Italian show that feeding an English classifier with \u201cmachine-oriented\u201d translations significantly improves its performance. Classification results outperform those obtained with translations produced by general-purpose NMT models as well as by an approach based on reinforcement learning. Moreover, our results on both languages approximate the classification accuracy computed on gold standard English tweets.", "phrases": ["adequacy", "machine translation", "reward"], "overall_score": 1.5028946108317331, "scores": [0.933906483730002, 0.83922132931792, 0.5438773347406559], "rank_score": 0.7723350492628592} -{"id": "zhu-etal-2008-learning", "title": "Learning a Stopping Criterion for Active Learning for Word Sense Disambiguation and Text Classification", "abstract": "In this paper, we address the problem of knowing when to stop the process of active learning. We propose a new statistical learning approach, called minimum expected error strategy, to defining a stopping criterion through estimation of the classifier\u2019s expected error on future unlabeled examples in the active learning process. In experiments on active learning for word sense disambiguation and text classification tasks, experimental results show that the new proposed stopping criterion can reduce approximately 50% human labeling costs in word sense disambiguation with degradation of 0.5% average accuracy, and approximately 90% costs in text classification with degradation of 2% average accuracy.", "phrases": ["stopping criterion", "active learning", "word sense disambiguation", "future unlabeled example"], "overall_score": 1.502744183990892, "scores": [0.9075658252776223, 0.8369395939551498, 0.8086666427055154, 0.5358589186965247], "rank_score": 0.772257745158703} -{"id": "yu-etal-2013-compound", "title": "Compound Embedding Features for Semi-supervised Learning", "abstract": "To solve data sparsity problem, recently there has been a trend in discriminative methods of NLP to use representations of lexical items learned from unlabeled data as features. In this paper, we investigated the usage of word representations learned by neural language models, i.e. word embeddings. The direct usage has disadvantages such as large amount of computation, inadequacy with dealing word ambiguity and rare-words, and the problem of linear non-separability. To overcome these problems, we instead built compound features from continuous word embeddings based on clustering. Experiments showed that the compound features not only improved the performances on several NLP tasks, but also ran faster, suggesting the potential of embeddings.", "phrases": ["usage", "word embedding", "compound"], "overall_score": 1.2427812734043604, "scores": [0.8005080662313074, 0.9749174192430515, 0.5411247780734695], "rank_score": 0.7721834211826094} -{"id": "allman-etal-2012-linguists", "title": "Linguist's Assistant: A Multi-Lingual Natural Language Generator based on Linguistic Universals, Typologies, and Primitives", "abstract": "Linguist's Assistant (LA) is a large scale semantic analyzer and multi-lingual natural language generator designed and developed entirely from a linguist's perspective. The system incorporates extensive typological, semantic, syntactic, and discourse research into its semantic representational system and its transfer and synthesizing grammars. LA has been tested with English, Korean, Kewa (Papua New Guinea), Jula (Cote d'Ivoure), and North Tanna (Vanuatu), and proof-of-concept lexicons and grammars have been developed for Spanish, Urdu, Tagalog, Chinantec (Mexico), and Angas (Nigeria). This paper will summarize the major components of the NLG system, and then present the results of experiments that were performed to determine the quality of the generated texts. The experiments indicate that when experienced mother-tongue translators use the drafts generated by LA, their productivity is typically quadrupled without any loss of quality.", "phrases": ["assistant", "productivity", "linguist"], "overall_score": 1.0704228184688354, "scores": [0.9435338630133098, 0.8346784767325507, 0.5382281980432035], "rank_score": 0.772146845929688} -{"id": "isard-konrad-2022-dgs", "title": "MY DGS \u2013 ANNIS: ANNIS and the Public DGS Corpus", "abstract": "In 2018 the DGS-Korpus project published the first full release of the Public DGS Corpus. The data have already been published in two different ways to fulfil the needs of different user groups, and we have now published the third portal MY DGS \u2013 ANNIS using the ANNIS browser-based corpus software. ANNIS is a corpus query tool for visualization and querying of multi-layer corpus data. It has its own query language, AQL, and is accessed from a web browser without requiring a login. It allows more complex queries and visualizations than those provided by the existing research portal. We introduce ANNIS and its query language AQL, describe the structure of MY DGS \u2013 ANNIS, and give some example queries. The use cases with queries over multiple annotation tiers and metadata illustrate the research potential of this powerful tool and show how students and researchers can explore the Public DGS Corpus.", "phrases": ["annis", "public dgs corpus", "project"], "overall_score": 1.2426728811620686, "scores": [0.9355931187771503, 0.8323858156666609, 0.5483692854437033], "rank_score": 0.7721160732958382} -{"id": "ayan-etal-2008-improving", "title": "Improving Alignments for Better Confusion Networks for Combining Machine Translation Systems", "abstract": "The state-of-the-art system combination method for machine translation (MT) is the word-based combination using confusion networks. One of the crucial steps in confusion network decoding is the alignment of different hypotheses to each other when building a network. In this paper, we present new methods to improve alignment of hypotheses using word synonyms and a two-pass alignment strategy. We demonstrate that combination with the new alignment technique yields up to 2.9 BLEU point improvement over the best input system and up to 1.3 BLEU point improvement over a state-of-the-art combination method on two different language pairs.", "phrases": ["confusion network", "hypothesis", "word-level combination"], "overall_score": 1.5022307060733833, "scores": [0.8886057289381435, 0.8396928025634459, 0.5876830776225658], "rank_score": 0.7719938697080518} -{"id": "chen-etal-2009-cognitive", "title": "A Cognitive-based Annotation System for Emotion Computing", "abstract": "Emotion computing is very important for expressive information extraction. In this paper, we provide a robust and versatile emotion annotation scheme based on cognitive emotion theories, which not only can annotate both explicit and implicit emotion expressions, but also can encode different levels of emotion information for the given emotion content. In addition, motivated by a cognitive framework, an automatic emotion annotation system is developed, and large and comparatively high-quality emotion corpora are created for emotion computing, one in Chinese and the other in English. Such an annotation system can be easily adapted for different kinds of emotion applications and be extended to other languages.", "phrases": ["annotation system", "emotion computing", "neutral-sentence"], "overall_score": 1.502148558482695, "scores": [0.9275672867279495, 0.8361780797186947, 0.5521095961454583], "rank_score": 0.7719516541973676} -{"id": "moon-etal-2019-unified", "title": "A Unified Neural Coherence Model", "abstract": "Recently, neural approaches to coherence modeling have achieved state-of-the-art results in several evaluation tasks. However, we show that most of these models often fail on harder tasks with more realistic application scenarios. In particular, the existing models underperform on tasks that require the model to be sensitive to local contexts such as candidate ranking in conversational dialogue and in machine translation. In this paper, we propose a unified coherence model that incorporates sentence grammar, inter-sentence coherence relations, and global coherence patterns into a common neural framework. With extensive experiments on local and global discrimination tasks, we demonstrate that our proposed model outperforms existing models by a good margin, and establish a new state-of-the-art.", "phrases": ["coherence model", "sentence grammar", "common neural framework", "discrimination task"], "overall_score": 1.8510432595992166, "scores": [0.9954499481209176, 0.8682612654993263, 0.6493538321154777, 0.5747149429858514], "rank_score": 0.7719449971803932} -{"id": "brooke-etal-2012-unsupervised", "title": "Unsupervised Stylistic Segmentation of Poetry with Change Curves and Extrinsic Features", "abstract": "The identification of stylistic inconsistency is a challenging task relevant to a number of genres, including literature. In this work, we carry out stylistic segmentation of a well-known poem, The Waste Land by T.S. Eliot, which is traditionally analyzed in terms of numerous voices which appear throughout the text. Our method, adapted from work in topic segmentation and plagiarism detection, predicts breaks based on a curve of stylistic change which combines information from a diverse set of features, most notably co-occurrence in larger corpora via reduced-dimensionality vectors. We show that this extrinsic information is more useful than (within-text) distributional features. We achieve well above baseline performance on both artificial mixed-style texts and The Waste Land itself.", "phrases": ["segmentation", "poetry", "voice"], "overall_score": 1.242292080751722, "scores": [0.9040945771553441, 0.8751610800004133, 0.5363827489376278], "rank_score": 0.7718794686977951} -{"id": "goyal-etal-2009-streaming", "title": "Streaming for large scale NLP: Language Modeling", "abstract": "In this paper, we explore a streaming algorithm paradigm to handle large amounts of data for NLP problems. We present an efficient low-memory method for constructing high-order approximate n-gram frequency counts. The method is based on a deterministic streaming algorithm which efficiently computes approximate frequency counts over a stream of data while employing a small memory footprint. We show that this method easily scales to billion-word monolingual corpora using a conventional (8 GB RAM) desktop machine. Statistical machine translation experimental results corroborate that the resulting high-n approximate small language model is as effective as models obtained from other count pruning methods.", "phrases": ["language modeling", "n-gram frequency count", "streaming"], "overall_score": 1.3828177047959629, "scores": [0.8834045427329098, 0.8778606967815299, 0.5540305273750576], "rank_score": 0.7717652556298326} -{"id": "chen-etal-2020-modeling", "title": "Modeling Discourse Structure for Document-level Neural Machine Translation", "abstract": "Recently, document-level neural machine translation (NMT) has become a hot topic in the community of machine translation. Despite its success, most of existing studies ignored the discourse structure information of the input document to be translated, which has shown effective in other tasks. In this paper, we propose to improve document-level NMT with the aid of discourse structure information. Our encoder is based on a hierarchical attention network (HAN) (Miculicich et al., 2018). Specifically, we first parse the input document to obtain its discourse structure. Then, we introduce a Transformer-based path encoder to embed the discourse structure information of each word. Finally, we combine the discourse structure information with the word embedding before it is fed into the encoder. Experimental results on the English-to-German dataset show that our model can significantly outperform both Transformer and Transformer+HAN.", "phrases": ["discourse structure", "neural machine translation", "input document"], "overall_score": 1.5015993409143806, "scores": [0.8922899290187343, 0.8844620981076742, 0.5382562094718232], "rank_score": 0.7716694121994104} -{"id": "kreutzer-etal-2018-reliability", "title": "Reliability and Learnability of Human Bandit Feedback for Sequence-to-Sequence Reinforcement Learning", "abstract": "We present a study on reinforcement learning (RL) from human bandit feedback for sequence-to-sequence learning, exemplified by the task of bandit neural machine translation (NMT). We investigate the reliability of human bandit feedback, and analyze the influence of reliability on the learnability of a reward estimator, and the effect of the quality of reward estimates on the overall RL task. Our analysis of cardinal (5-point ratings) and ordinal (pairwise preferences) feedback shows that their intra- and inter-annotator \u03b1-agreement is comparable. Best reliability is obtained for standardized cardinal feedback, and cardinal feedback is also easiest to learn and generalize from. Finally, improvements of over 1 BLEU can be obtained by integrating a regression-based reward estimator trained on cardinal feedback for 800 translations into RL for NMT. This shows that RL is possible even from small amounts of fairly reliable human feedback, pointing to a great potential for applications at larger scale.", "phrases": ["learnability", "human bandit feedback", "reward estimator"], "overall_score": 1.2419310454057866, "scores": [0.887530341096516, 0.851444680149501, 0.5759904132282472], "rank_score": 0.7716551448247547} -{"id": "torisawa-2006-acquiring", "title": "Acquiring Inference Rules with Temporal Constraints by Using Japanese Coordinated Sentences and Noun-Verb Co-occurrences", "abstract": "This paper shows that inference rules with temporal constraints can be acquired by using verb-verb co-occurrences in Japanese coordinated sentences and verb-noun co-occurrences. For example, our unsupervised acquisition method could obtain the inference rule \"If someone enforces a law, usually someone enacts the law at the same time as or before the enforcing of the law\" since the verbs \"enact\" and \"enforce\" frequently co-occurred in coordinated sentences and the verbs also frequently co-occurred with the noun \"law\". We also show that the accuracy of the acquisition is improved by using the occurrence frequency of a single verb, which we assume indicates how generic the meaning of the verb is.", "phrases": ["inference rule", "temporal constraint", "coordinated sentence", "verb-noun co-occurrence"], "overall_score": 1.6951462299185718, "scores": [1.1186939121135342, 0.8366360843549525, 0.5823177322277507, 0.5483294579847221], "rank_score": 0.7714942966702398} -{"id": "ramisch-etal-2010-multiword", "title": "Multiword Expressions in the wild? The mwetoolkit comes in handy", "abstract": "The mwetoolkit is a tool for automatic extraction of Multiword Expressions (MWEs) from monolingual corpora. It both generates and validates MWE candidates. The generation is based on surface forms, while for the validation, a series of criteria for removing noise are provided, such as some (language independent) association measures. In this paper, we present the use of the mwetoolkit in a standard configuration, for extracting MWEs from a corpus of general-purpose English. The functionalities of the toolkit are discussed in terms of a set of selected examples, comparing it with related work on MWE extraction.", "phrases": ["mwetoolkit", "association measure", "multiword expressions"], "overall_score": 0.8475062449847214, "scores": [0.9631703702531484, 0.8242265574983937, 0.5269033592373811], "rank_score": 0.7714334289963077} -{"id": "sogaard-kuhn-2009-empirical", "title": "Empirical Lower Bounds on Aligment Error Rates in Syntax-Based Machine Translation", "abstract": "The empirical adequacy of synchronous context-free grammars of rank two (2-SCFGs) (Satta and Peserico, 2005), used in syntax-based machine translation systems such as Wu (1997), Zhang et al. (2006) and Chiang (2007), in terms of what alignments they induce, has been discussed in Wu (1997) and Wellington et al. (2006), but with a one-sided focus on so-called \"inside-out alignments\". Other alignment configurations that cannot be induced by 2-SCFGs are identified in this paper, and their frequencies across a wide collection of hand-aligned parallel corpora are examined. Empirical lower bounds on two measures of alignment error rate, i.e. the one introduced in Och and Ney (2000) and one where only complete translation units are considered, are derived for 2-SCFGs and related formalisms.", "phrases": ["syntax-based machine translation", "adequacy", "low bound"], "overall_score": 1.7762421742459344, "scores": [0.9253941700111907, 0.8585792349904742, 0.5302631193948657], "rank_score": 0.7714121747988436} -{"id": "deri-knight-2016-grapheme", "title": "Grapheme-to-Phoneme Models for (Almost) Any Language", "abstract": "Grapheme-to-phoneme (g2p) models are rarely available in low-resource languages, as the creation of training and evaluation data is expensive and time-consuming. We use Wiktionary to obtain more than 650k word-pronunciation pairs in more than 500 languages. We then develop phoneme and language distance metrics based on phonological and linguistic knowledge; applying those, we adapt g2p models for high-resource languages to create models for related low-resource languages. We provide results for models for 229 adapted languages.", "phrases": ["low-resource language", "phoneme", "g2p model"], "overall_score": 1.6945362823572008, "scores": [0.9139093051311515, 0.8784592972824483, 0.5212814903023018], "rank_score": 0.7712166975719672} -{"id": "tiedemann-ljubesic-2012-efficient", "title": "Efficient Discrimination Between Closely Related Languages", "abstract": "In this paper, we revisit the problem of language identification with the focus on proper discrimination between closely related languages. Strong similarities between certain languages make it very hard to classify them correctly using standard methods that have been proposed in the literature. Dedicated models that focus on specific discrimination tasks help to improve the accuracy of general-purpose language identification tools. We propose and compare methods based on simple document classification techniques trained on parallel corpora of closely related languages and methods that emphasize discriminating features in terms of blacklisted words. Our experiments demonstrate that these techniques are highly accurate for the difficult task of discriminating between Bosnian, Croatian and Serbian. The best setup yields an absolute improvement of over 9% in accuracy over the best performing baseline using a state-of-the-art language identification tool.", "phrases": ["discrimination", "related language", "croatian", "main bottleneck", "political motive"], "overall_score": 1.9163101188258376, "scores": [0.9298570351010403, 0.9295272810928878, 0.9117565713885589, 0.5519701174094409, 0.5327886099681092], "rank_score": 0.7711799229920075} -{"id": "fornaciari-poesio-2014-identifying", "title": "Identifying fake Amazon reviews as learning from crowds", "abstract": "Customers who buy products such as books online often rely on other customers reviews more than on reviews found on specialist magazines. Unfortunately the confidence in such reviews is often misplaced due to the explosion of so-called sock puppetry-Authors writing glowing reviews of their own books. Identifying such deceptive reviews is not easy. The first contribution of our work is the creation of a collection including a number of genuinely deceptive Amazon book reviews in collaboration with crime writer Jeremy Duns, who has devoted a great deal of effort in unmasking sock puppeting among his colleagues. But there can be no certainty concerning the other reviews in the collection: All we have is a number of cues, also developed in collaboration with Duns, suggesting that a review may be genuine or deceptive. Thus this corpus is an example of a collection where it is not possible to acquire the actual label for all instances, and where clues of deception were treated as annotators who assign them heuristic labels. A number of approaches have been proposed for such cases; we adopt here the 'learning from crowds' approach proposed by Raykar et al. (2010). Thanks to Duns' certainly fake reviews, the second contribution of this work consists in the evaluation of the effectiveness of different methods of annotation, according to the performance of models trained to detect deceptive reviews. \u00a9 2014 Association for Computational Linguistics.", "phrases": ["crowd", "amazon book review", "fake review"], "overall_score": 1.381704060023438, "scores": [0.8210778746105514, 0.8748021709922567, 0.6175511103414415], "rank_score": 0.7711437186480832} -{"id": "palakurthi-etal-2015-classification", "title": "Classification of Attributes in a Natural Language Query into Different SQL Clauses", "abstract": "Attribute information in a natural language query is one of the key features for converting a natural language query into a Structured Query Language 1 (SQL) in Natural Language Interface to Database systems. In this paper, we explore the task of classifying the attributes present in a natural language query into different SQL clauses in a SQL query. In particular, we investigate the effectiveness of various features and Conditional Random Fields for this task. Our system uses a statistical classifier trained on manually prepared data. We report our results on three different domains and also show how our system can be used for generating a complete SQL query.", "phrases": ["attribute", "natural language query", "sql clause"], "overall_score": 1.2408349872040239, "scores": [0.8876032823608639, 0.8745157479767887, 0.5508033463834141], "rank_score": 0.7709741255736889} -{"id": "tang-etal-2021-multilingual", "title": "Multilingual Translation from Denoising Pre-Training", "abstract": "Recent work demonstrates the potential of training one model for multilingual machine translation. In parallel, denoising pretraining using unlabeled monolingual data as a starting point for \ufb01netuning bitext machine translation systems has demonstrated strong performance gains. However, little has been explored on the potential to combine denoising pretraining with multilingual machine translation in a single model. In this work, we \ufb01ll this gap by studying how multilingual translation models can be created through multilingual \ufb01netuning . Fintuning multilingual model from a denoising pretrained model incorporates the bene\ufb01ts of large quantities of unlabeled monolingual data, which is particularly important for low resource languages where bitext is rare. Further, we create the ML50 benchmark to facilitate re-producible research by standardizing training and evaluation data. On ML50, we show that multilingual \ufb01netuning signi\ufb01cantly improves over multilingual models trained from scratch and bilingual \ufb01netuning for translation into English. We also \ufb01nd that multilingual \ufb01ne-tuning can signi\ufb01cantly improve over multilingual models trained from scratch for zero-shot translation on non-English directions. Finally, we discuss that the pretraining and \ufb01netuning paradigm alone is not enough to address the challenges of multilingual models for to-Many directions performance.", "phrases": ["denoising", "pretraining", "multilingual model"], "overall_score": 1.9153988917304121, "scores": [1.1954463007158216, 0.5613920795495383, 0.5556012744321206], "rank_score": 0.7708132182324935} -{"id": "bos-nissim-2006-empirical", "title": "An Empirical Approach to the Interpretation of Superlatives", "abstract": "In this paper we introduce an empirical approach to the semantic interpretation of superlative adjectives. We present a corpus annotated for superlatives and propose an interpretation algorithm that uses a wide-coverage parser and produces semantic representations. We achieve F-scores between 0.84 and 0.91 for detecting attributive superlatives and an accuracy in the range of 0.69--0.84 for determining the correct comparison set. As far as we are aware, this is the first automated approach to superlatives for open-domain texts and questions.", "phrases": ["empirical approach", "interpretation", "superlative", "attributive superlative"], "overall_score": 1.2405533116579488, "scores": [0.8328359196623781, 0.7903625374907242, 0.8529077392587727, 0.6070902464549304], "rank_score": 0.7707991107167014} -{"id": "joshi-penstein-rose-2009-generalizing", "title": "Generalizing Dependency Features for Opinion Mining", "abstract": "We explore how features based on syntactic dependency relations can be utilized to improve performance on opinion mining. Using a transformation of dependency relation triples, we convert them into \"composite back-off features\" that generalize better than the regular lexicalized dependency relation features. Experiments comparing our approach with several other approaches that generalize dependency features or ngrams demonstrate the utility of composite back-off features.", "phrases": ["opinion mining", "syntactic feature", "dependency relation pair", "pos tag"], "overall_score": 1.6935660204257703, "scores": [0.972113204756518, 0.9433819489801183, 0.6165264621980865, 0.5510788334129649], "rank_score": 0.7707751123369219} -{"id": "baranes-sagot-2014-language", "title": "A Language-independent Approach to Extracting Derivational Relations from an Inflectional Lexicon", "abstract": "In this paper, we describe and evaluate an unsupervised method for acquiring pairs of lexical entries belonging to the same morphological family, i.e., derivationally related words, starting from a purely inflectional lexicon. Our approach relies on transformation rules that relate lexical entries with the one another, and which are automatically extracted from the inflected lexicon based on surface form analogies and on part-of-speech information. It is generic enough to be applied to any language with a mainly concatenative derivational morphology. Results were obtained and evaluated on English, French, German and Spanish. Precision results are satisfying, and our French results favorably compare with another resource, although its construction relied on manually developed lexicographic information whereas our approach only requires an inflectional lexicon.", "phrases": ["language-independent approach", "inflectional lexicon", "spanish"], "overall_score": 1.2404011830114916, "scores": [0.9407742415167814, 0.844591671743278, 0.5267478503622716], "rank_score": 0.7707045878741102} -{"id": "sornil-chaiwanarom-2004-combining", "title": "Combining Prediction by Partial Matching and Logistic Regression for Thai Word Segmentation", "abstract": "Word segmentation is an important part of many applications, including information retrieval, information filtering, document analysis, and text summarization. In Thai language, the process is complicated since words are written continuously, and their structures are not well-defined. A recognized effective approach to word segmentation is Longest Matching, a method based on dictionary. Nevertheless, this method suffers from character-level and syllable-level ambiguities in determining word boundaries. This paper proposes a technique to Thai word segmentation using a two-step approach. First, text is segmented, using an application of Prediction by Partial Matching, into syllables whose structures are more well-defined. This reduces the earlier type of ambiguity. Then, the syllables are combined into words by an application of a syllable-level longest matching method together with a logistic regression model which takes into account contextual information. The experimental results show the syllable segmentation accuracy of more than 96.65% and the overall word segmentation accuracy of 97%.", "phrases": ["partial matching", "thai word segmentation", "ambiguity"], "overall_score": 0.8466858777614003, "scores": [0.9747166563412735, 0.80943038794071, 0.5279130514262224], "rank_score": 0.7706866985694019} -{"id": "lin-etal-2020-triggerner", "title": "TriggerNER: Learning with Entity Triggers as Explanations for Named Entity Recognition", "abstract": "Training neural models for named entity recognition (NER) in a new domain often requires additional human annotations (e.g., tens of thousands of labeled instances) that are usually expensive and time-consuming to collect. Thus, a crucial research question is how to obtain supervision in a cost-effective way. In this paper, we introduce \u201centity triggers,\u201d an effective proxy of human explanations for facilitating label-efficient learning of NER models. An entity trigger is defined as a group of words in a sentence that helps to explain why humans would recognize an entity in the sentence. We crowd-sourced 14k entity triggers for two well-studied NER datasets. Our proposed model, Trigger Matching Network, jointly learns trigger representations and soft matching module with self-attention such that can generalize to unseen sentences easily for tagging. Our framework is significantly more cost-effective than the traditional neural NER frameworks. Experiments show that using only 20% of the trigger-annotated sentences results in a comparable performance as using 70% of conventional annotated sentences.", "phrases": ["explanation", "named entity recognition", "new domain"], "overall_score": 1.7745059363857545, "scores": [0.9282184284989808, 0.8381711052594168, 0.5455848750722903], "rank_score": 0.7706581362768959} -{"id": "zhou-etal-2016-cross", "title": "Cross-Lingual Sentiment Classification with Bilingual Document Representation Learning", "abstract": "Cross-lingual sentiment classi\ufb01cation aims to adapt the sentiment resource in a resource-rich language to a resource-poor language. In this study, we propose a representation learning approach which simultaneously learns vector representations for the texts in both the source and the target languages. Different from previous research which only gets bilingual word embedding, our Bilingual Document Representation Learning model BiDRL directly learns document representations. Both semantic and sentiment correlations are utilized to map the bilingual texts into the same embedding space. The experiments are based on the multilingual multi-domain Amazon review dataset. We use English as the source language and use Japanese, German and French as the target languages. The experimental results show that BiDRL outperforms the state-of-the-art methods for all the target languages.", "phrases": ["sentiment classification", "document representation", "source language"], "overall_score": 1.60247187432276, "scores": [0.9100501663185573, 0.8795018424966177, 0.5223262174343134], "rank_score": 0.7706260754164961} -{"id": "iandola-etal-2020-squeezebert", "title": "SqueezeBERT: What can computer vision teach NLP about efficient neural networks?", "abstract": "Humans read and write hundreds of billions of messages every day. Further, due to the availability of large datasets, large computing systems, and better neural network models, natural language processing (NLP) technology has made significant strides in understanding, proofreading, and organizing these messages. Thus, there is a significant opportunity to deploy NLP in myriad applications to help web users, social networks, and businesses. Toward this end, we consider smartphones and other mobile devices as crucial platforms for deploying NLP models at scale. However, today's highly-accurate NLP neural network models such as BERT and RoBERTa are extremely computationally expensive, with BERT-base taking 1.7 seconds to classify a text snippet on a Pixel 3 smartphone. To begin to address this problem, we draw inspiration from the computer vision community, where work such as MobileNet has demonstrated that grouped convolutions (e.g. depthwise convolutions) can enable speedups without sacrificing accuracy. We demonstrate how to replace several operations in self-attention layers with grouped convolutions, and we use this technique in a novel network architecture called SqueezeBERT, which runs 4.3x faster than BERT-base on the Pixel 3 while achieving competitive accuracy on the GLUE test set. A PyTorch-based implementation of SqueezeBERT is available as part of the Hugging Face Transformers library: ", "phrases": ["convolution", "squeezebert", "feed-forward network"], "overall_score": 1.4995638036938281, "scores": [0.8521075346639153, 0.8788011153854547, 0.5809614089384397], "rank_score": 0.7706233529959365} -{"id": "lynn-etal-2017-human", "title": "Human Centered NLP with User-Factor Adaptation", "abstract": "We pose the general task of user-factor adaptation \u2013 adapting supervised learning models to real-valued user factors inferred from a background of their language, reflecting the idea that a piece of text should be understood within the context of the user that wrote it. We introduce a continuous adaptation technique, suited for real-valued user factors that are common in social science and bringing us closer to personalized NLP, adapting to each user uniquely. We apply this technique with known user factors including age, gender, and personality traits, as well as latent factors, evaluating over five tasks: POS tagging, PP-attachment, sentiment analysis, sarcasm detection, and stance detection. Adaptation provides statistically significant benefits for 3 of the 5 tasks: up to +1.2 points for PP-attachment, +3.4 points for sarcasm, and +3.0 points for stance.", "phrases": ["user-factor adaptation", "demographic", "domain adaptation problem"], "overall_score": 1.9765641888655987, "scores": [0.932645411692435, 0.84146750643848, 0.5377037067654369], "rank_score": 0.7706055416321173} -{"id": "chen-ng-2013-linguistically", "title": "Linguistically Aware Coreference Evaluation Metrics", "abstract": "Virtually all the commonly-used evaluation metrics for entity coreference resolution are linguistically agnostic, treating the mentions to be clustered as generic rather than linguistic objects. We argue that the performance of an entity coreference resolver cannot be accurately reflected when it is evaluated using linguistically agnostic metrics. Consequently, we propose a framework for incorporating linguistic awareness into commonly-used coreference evaluation metrics.", "phrases": ["evaluation metric", "mention", "agnostic metric"], "overall_score": 1.24004788039403, "scores": [1.1867378037096945, 0.5878518485457385, 0.5368655535907969], "rank_score": 0.77048506861541} -{"id": "pavlick-etal-2015-adding", "title": "Adding Semantics to Data-Driven Paraphrasing", "abstract": "We add an interpretable semantics to the paraphrase database (PPDB). To date, the relationship between phrase pairs in the database has been weakly defined as approximately equivalent. We show that these pairs represent a variety of relations, including directed entailment (little girl/girl) and exclusion (nobody/someone). We automatically assign semantic entailment relations to entries in PPDB using features derived from past work on discovering inference rules from text and semantic taxonomy induction. We demonstrate that our model assigns these relations with high accuracy. In a downstream RTE task, our labels rival relations from WordNet and improve the coverage of a proof-based RTE system by 17%.", "phrases": ["phrase pair", "entailment", "inference rule"], "overall_score": 1.914386921248461, "scores": [0.9022886683120499, 0.8776389993176283, 0.5312902464257093], "rank_score": 0.7704059713517958} -{"id": "zhang-clark-2007-chinese", "title": "Chinese Segmentation with a Word-Based Perceptron Algorithm", "abstract": "Standard approaches to Chinese word segmentation treat the problem as a tagging task, assigning labels to the characters in the sequence indicating whether the character marks a word boundary. Discriminatively trained models based on local character features are used to make the tagging decisions, with Viterbi decoding finding the highest scoring segmentation. In this paper we propose an alternative, word-based segmentor, which uses features based on complete words and word sequences. The generalized perceptron algorithm is used for discriminative training, and we use a beamsearch decoder. Closed tests on the first and second SIGHAN bakeoffs show that our system is competitive with the best in the literature, achieving the highest reported F-scores for a number of corpora.", "phrases": ["word segmentation", "perceptron method", "cws approach", "newswire"], "overall_score": 2.267841836605167, "scores": [1.414807504637044, 0.6047463343902167, 0.5314231561098822, 0.5298704636166937], "rank_score": 0.7702118646884591} -{"id": "brunato-etal-2018-sentence", "title": "Is this Sentence Difficult? Do you Agree?", "abstract": "In this paper, we present a crowdsourcing-based approach to model the human perception of sentence complexity. We collect a large corpus of sentences rated with judgments of complexity for two typologically-different languages, Italian and English. We test our approach in two experimental scenarios aimed to investigate the contribution of a wide set of lexical, morpho-syntactic and syntactic phenomena in predicting i) the degree of agreement among annotators independently from the assigned judgment and ii) the perception of sentence complexity.", "phrases": ["perception", "complexity", "annotator"], "overall_score": 1.2394364641040434, "scores": [0.8892450335625977, 0.856021885527867, 0.5650486038541832], "rank_score": 0.7701051743148826} -{"id": "schlangen-etal-2009-incremental", "title": "Incremental Reference Resolution: The Task, Metrics for Evaluation, and a Bayesian Filtering Model that is Sensitive to Disfluencies", "abstract": "In this paper we do two things: a) we discuss in general terms the task of incremental reference resolution (IRR), in particular resolution of exophoric reference, and specify metrics for measuring the performance of dialogue system components tackling this task, and b) we present a simple Bayesian filtering model of IRR that performs reasonably well just using words directly (no structure information and no hand-coded semantics): it picks the right referent out of 12 for around 50% of real-world dialogue utterances in our test corpus. It is also able to learn to interpret not only words but also hesitations, just as humans have shown to do in similar situations, namely as markers of references to hard-to-describe entities.", "phrases": ["bayesian filtering model", "incremental reference resolution", "system response"], "overall_score": 1.8464404488089396, "scores": [0.9332764125026225, 0.8410122821543157, 0.5357877333108144], "rank_score": 0.7700254759892508} -{"id": "yavuz-etal-2019-deepcopy", "title": "DeepCopy: Grounded Response Generation with Hierarchical Pointer Networks", "abstract": "Recent advances in neural sequence-to-sequence models have led to promising results for several language generation-based tasks, including dialogue response generation, summarization, and machine translation. However, these models are known to have several problems, especially in the context of chit-chat based dialogue systems: they tend to generate short and dull responses that are often too generic. Furthermore, these models do not ground conversational responses on knowledge and facts, resulting in turns that are not accurate, informative and engaging for the users. In this paper, we propose and experiment with a series of response generation models that aim to serve in the general scenario where in addition to the dialogue context, relevant unstructured external knowledge in the form of text is also assumed to be available for models to harness. Our proposed approach extends pointer-generator networks (See et al., 2017) by allowing the decoder to hierarchically attend and copy from external knowledge in addition to the dialogue context. We empirically show the effectiveness of the proposed model compared to several baselines including (Ghazvininejadet al., 2018; Zhang et al., 2018) through both automatic evaluation metrics and human evaluation on ConvAI2 dataset.", "phrases": ["response generation", "dialogue system", "pointer-generator network", "deepcopy", "copy mechanism"], "overall_score": 1.8462164980229527, "scores": [1.3902095854201793, 0.8132499817273041, 0.5739603283195726, 0.5459360921146137, 0.5263044187053775], "rank_score": 0.7699320812574093} -{"id": "zhao-etal-2010-jointly", "title": "Jointly Modeling Aspects and Opinions with a MaxEnt-LDA Hybrid", "abstract": "Discovering and summarizing opinions from online reviews is an important and challenging task. A commonly-adopted framework generates structured review summaries with aspects and opinions. Recently topic models have been used to identify meaningful review aspects, but existing topic models do not identify aspect-specific opinion words. In this paper, we propose a MaxEnt-LDA hybrid model to jointly discover both aspects and aspect-specific opinion words. We show that with a relatively small amount of training data, our model can effectively identify aspect and opinion words simultaneously. We also demonstrate the domain adaptability of our model.", "phrases": ["maxent-lda", "topic model", "hybrid model", "sentiment word", "aspect-based opinion mining"], "overall_score": 2.225002088517486, "scores": [1.228998817346384, 0.9218712440825763, 0.585470670539115, 0.5700504203988156, 0.5425983114261933], "rank_score": 0.7697978927586167} -{"id": "calzolari-etal-2010-lrec", "title": "The LREC Map of Language Resources and Technologies", "abstract": "In this paper we present the LREC Map of Language Resources and Tools, an innovative feature introduced with this LREC. The purpose of the Map is to shed light on the vast amount of resources and tools that represent the background of the research presented at LREC, in the attempt to fill in a gap in the community knowledge about the resources and tools that are used or created worldwide. It also aims at a change of culture in the field, actively engaging each researcher in the documentation task about resources. The Map has been developed on the basis of the information provided by LREC authors during the submission of papers to the LREC 2010 conference and the LREC workshops, and contains information about almost 2000 resources. The paper illustrates the motivation behind this initiative, its main characteristics, its relevance and future impact in the field, the metadata used to describe the resources, and finally presents some of the most relevant findings.", "phrases": ["lrec map", "language resources", "clue-aligner tool"], "overall_score": 1.2389384902271228, "scores": [0.9391218151169737, 0.8172261792309816, 0.5530393028980053], "rank_score": 0.7697957657486536} -{"id": "miehle-etal-2018-causes", "title": "What Causes the Differences in Communication Styles? A Multicultural Study on Directness and Elaborateness", "abstract": "With the aim of designing a Spoken Dialogue System which adapts to the user\u2019s communication idiosyncrasies, we present a multicultural study to investigate the causes of differences in the communication styles elaborateness and directness in Human-Computer Interaction. By adapting the system\u2019s behaviour to the user, the conversation agent may appear more familiar and trustworthy. 339 persons from Germany, Russia, Poland, Spain and the United Kingdom participated in this web-based study. The participants had to imagine that they are talking to a digital agent. For every dialogue turn, they had to read four different variants of the system output and indicate their preference. With the results of this study, we could demonstrate the influence of the user\u2019s culture and gender, the frequency of use of speech based assistants as well as the system\u2019s role on the user\u2019s preference concerning the system\u2019s communication style in terms of its elaborateness and its directness.", "phrases": ["communication style", "multicultural study", "directness", "elaborateness"], "overall_score": 0.8455592278782423, "scores": [0.8593127929822486, 0.8197853826740681, 0.8040193483865243, 0.5955271865614667], "rank_score": 0.769661177651077} -{"id": "klie-etal-2018-inception", "title": "The INCEpTION Platform: Machine-Assisted and Knowledge-Oriented Interactive Annotation", "abstract": "We introduce INCEpTION, a new annotation platform for tasks including interactive and semantic annotation (e.g., concept linking, fact linking, knowledge base population, semantic frame annotation). These tasks are very time consuming and demanding for annotators, especially when knowledge bases are used. We address these issues by developing an annotation platform that incorporates machine learning capabilities which actively assist and guide annotators. The platform is both generic and modular. It targets a range of research domains in need of semantic annotation, such as digital humanities, bioinformatics, or linguistics. INCEpTION is publicly available as open-source software.", "phrases": ["annotation platform", "interactive task", "knowledge basis"], "overall_score": 2.030830283530204, "scores": [1.1961593940166688, 0.5749342832137887, 0.5374923402922541], "rank_score": 0.7695286725075706} -{"id": "mayhew-etal-2017-cheap", "title": "Cheap Translation for Cross-Lingual Named Entity Recognition", "abstract": "Recent work in NLP has attempted to deal with low-resource languages but still assumed a resource level that is not present for most languages, e.g., the availability of Wikipedia in the target language. We propose a simple method for cross-lingual named entity recognition (NER) that works well in settings with very minimal resources. Our approach makes use of a lexicon to \u201ctranslate\u201d annotated data available in one or several high resource language(s) into the target language, and learns a standard monolingual NER model there. Further, when Wikipedia is available in the target language, our method can enhance Wikipedia based methods to yield state-of-the-art NER results; we evaluate on 7 diverse languages, improving the state-of-the-art by an average of 5.5% F1 points. With the minimal resources required, this is an extremely portable cross-lingual NER approach, as illustrated using a truly low-resource language, Uyghur.", "phrases": ["entity recognition", "low-resource language", "annotated data", "cheap translation", "name tagging"], "overall_score": 2.378395632541127, "scores": [0.877785710435004, 0.8703240112059895, 0.9642213283619474, 0.5837787471986579, 0.5511289069402319], "rank_score": 0.7694477408283662} -{"id": "kunze-etal-2017-transfer", "title": "Transfer Learning for Speech Recognition on a Budget", "abstract": "End-to-end training of automated speech recognition (ASR) systems requires massive data and compute resources. We explore transfer learning based on model adaptation as an approach for training ASR models under constrained GPU memory, throughput and training data. We conduct several systematic experiments adapting a Wav2Letter convolutional neural network originally trained for English ASR to the German language. We show that this technique allows faster training on consumer-grade resources while requiring less training data in order to achieve the same accuracy, thereby lowering the cost of training ASR models in other languages. Model introspection revealed that small adaptations to the network's weights were sufficient for good performance, especially for inner layers.", "phrases": ["speech recognition", "asr", "transfer learning"], "overall_score": 1.0666475245476115, "scores": [0.9080299969598365, 0.8710553449872724, 0.5291852991148832], "rank_score": 0.7694235470206641} -{"id": "cahill-2009-correlating", "title": "Correlating Human and Automatic Evaluation of a German Surface Realiser", "abstract": "We examine correlations between native speaker judgements on automatically generated German text against automatic evaluation metrics. We look at a number of metrics from the MT and Summarisation communities and find that for a relative ranking task, most automatic metrics perform equally well and have fairly strong correlations to the human judgements. In contrast, on a naturalness judgement task, the General Text Matcher (GTM) tool correlates best overall, although in general, correlation between the human judgements and the automatic metrics was quite weak.", "phrases": ["german surface realiser", "automatic metric", "surface realizer"], "overall_score": 1.3785467902211155, "scores": [0.9196330495714473, 0.8573945274962997, 0.5311172613938062], "rank_score": 0.7693816128205176} -{"id": "chen-etal-2009-pairwise", "title": "A Pairwise Event Coreference Model, Feature Impact and Evaluation for Event Coreference Resolution", "abstract": "In past years, there has been substantial work on the problem of entity coreference resolution whereas much less attention has been paid to event coreference resolution. Starting with some motivating examples, we formally state the problem of event coreference resolution in the ACE program, present an agglomerative clustering algorithm for the task, explore the feature impact in the event coreference model and compare three evaluation metrics that were previously adopted in entity coreference resolution: MUC F-Measure, B-Cubed F-Measure and ECM F-Measure.", "phrases": ["feature impact", "event coreference resolution", "symbolic feature"], "overall_score": 2.179612608196466, "scores": [0.9835665945360129, 0.7963030674124449, 0.5280531911989034], "rank_score": 0.769307617715787} -{"id": "gronroos-etal-2014-morfessor", "title": "Morfessor FlatCat: An HMM-Based Method for Unsupervised and Semi-Supervised Learning of Morphology", "abstract": "Morfessor is a family of methods for learning morphological segmentations of words based on unannotated data. We introduce a new variant of Morfessor, FlatCat, that applies a hidden Markov model structure. It builds on previous work on Morfessor, sharing model components with the popular Morfessor Baseline and Categories-MAP variants. Our experiments show that while unsupervised FlatCat does not reach the accuracy of Categories-MAP, with semisupervised learning it provides state-of-the-art results in the Morpho Challenge 2010 tasks for English, Finnish, and Turkish.", "phrases": ["semi-supervised learning", "segmentation", "morfessor flatcat"], "overall_score": 1.8441992541795855, "scores": [0.8909932849457078, 0.8459954657279931, 0.5702837250262711], "rank_score": 0.7690908252333241} -{"id": "riedl-biemann-2013-scaling", "title": "Scaling to Large Data: An Efficient and Effective Method to Compute Distributional Thesauri", "abstract": "We introduce a new highly scalable approach for computing Distributional Thesauri (DTs). By employing pruning techniques and a distributed framework, we make the computation for very large corpora feasible on comparably small computational resources. We demonstrate this by releasing a DT for the whole vocabulary of Google Books syntactic n-grams. Evaluating against lexical resources using two measures, we show that our approach produces higher quality DTs than previous approaches, and is thus preferable in terms of speed and quality for large corpora.", "phrases": ["distributional thesauri", "scalable approach", "n-gram", "thesaurus"], "overall_score": 1.4961199374174659, "scores": [1.0128543174971139, 0.8922962512498933, 0.5951919996582354, 0.5750716548954409], "rank_score": 0.7688535558251709} -{"id": "benamara-etal-2018-introduction", "title": "Introduction to the Special Issue on Language in Social Media: Exploiting Discourse and Other Contextual Information", "abstract": "Social media content is changing the way people interact with each other and share information, personal messages, and opinions about situations, objects, and past experiences. Most social media texts are short online conversational posts or comments that do not contain enough information for natural language processing (NLP) tools, as they are often accompanied by non-linguistic contextual information, including meta-data (e.g., the user's profile, the social network of the user, and their interactions with other users). Exploiting such different types of context and their interactions makes the automatic processing of social media texts a challenging research task. Indeed, simply applying traditional text mining tools is clearly sub-optimal, as, typically, these tools take into account neither the interactive dimension nor the particular nature of this data, which shares properties with both spoken and written language. This special issue contributes to a deeper understanding of the role of these interactions to process social media data from a new perspective in discourse interpretation. This introduction first provides the necessary background to understand what context is from both the linguistic and computational linguistic perspectives, then presents the most recent context-based approaches to NLP for social media. We conclude with an overview of the papers accepted in this special issue, highlighting what we believe are the future directions in processing social media texts.", "phrases": ["special issue", "discourse", "contextual information", "social medium"], "overall_score": 0.8442579837729215, "scores": [0.8812974263955313, 0.8391218825556158, 0.782522374114409, 0.5709652538264318], "rank_score": 0.768476734222997} -{"id": "xu-etal-2020-deep", "title": "A Deep Generative Distance-Based Classifier for Out-of-Domain Detection with Mahalanobis Space", "abstract": "Detecting out-of-domain (OOD) input intents is critical in the task-oriented dialog system. Different from most existing methods that rely heavily on manually labeled OOD samples, we focus on the unsupervised OOD detection scenario where there are no labeled OOD samples except for labeled in-domain data. In this paper, we propose a simple but strong generative distance-based classifier to detect OOD samples. We estimate the class-conditional distribution on feature spaces of DNNs via Gaussian discriminant analysis (GDA) to avoid over-confidence problems. And we use two distance functions, Euclidean and Mahalanobis distances, to measure the confidence score of whether a test sample belongs to OOD. Experiments on four benchmark datasets show that our method can consistently outperform the baselines.", "phrases": ["distance-based classifier", "out-of-domain detection", "mahalanobis distance"], "overall_score": 1.6883305423884412, "scores": [0.897495592744501, 0.8655727895290447, 0.5421086485176394], "rank_score": 0.7683923435970618} -{"id": "zhang-etal-2018-neural", "title": "Neural Latent Extractive Document Summarization", "abstract": "Extractive summarization models need sentence level labels, which are usually created with rule-based methods since most summarization datasets only have document summary pairs. These labels might be suboptimal. We propose a latent variable extractive model, where sentences are viewed as latent variables and sentences with activated variables are used to infer gold summaries. During training, the loss can come directly from gold summaries. Experiments on CNN/Dailymail dataset show our latent extractive model outperforms a strong extractive baseline trained on rule-based labels and also performs competitively with several recent models.", "phrases": ["latent variable", "extractive model", "gold summary"], "overall_score": 2.176941971363306, "scores": [1.224044895151857, 0.5450343967906862, 0.5360157083301698], "rank_score": 0.7683650000909044} -{"id": "de-cao-etal-2019-question", "title": "Question Answering by Reasoning Across Documents with Graph Convolutional Networks", "abstract": "Most research in reading comprehension has focused on answering questions based on individual documents or even single paragraphs. We introduce a neural model which integrates and reasons relying on information spread within documents and across multiple documents. We frame it as an inference problem on a graph. Mentions of entities are nodes of this graph while edges encode relations between different mentions (e.g., within- and cross-document co-reference). Graph convolutional networks (GCNs) are applied to these graphs and trained to perform multi-step reasoning. Our Entity-GCN method is scalable and compact, and it achieves state-of-the-art results on a multi-document question answering dataset, WikiHop (Welbl et al., 2018).", "phrases": ["reasoning", "convolutional network", "mention"], "overall_score": 2.408822223365243, "scores": [0.8953771979446151, 0.8577982264627814, 0.5515542840332517], "rank_score": 0.7682432361468828} -{"id": "siddharthan-mandya-2014-hybrid", "title": "Hybrid text simplification using synchronous dependency grammars with hand-written and automatically harvested rules", "abstract": "We present an approach to text simplification based on synchronous dependency grammars. The higher level of abstraction afforded by dependency representations allows for a linguistically sound treatment of complex constructs requiring reordering and morphological change, such as conversion of passive voice to active. We present a synchronous grammar formalism in which it is easy to write rules by hand and also acquire them automatically from dependency parses of aligned English and Simple English sentences. The grammar formalism is optimised for monolingual translation in that it reuses ordering information from the source sentence where appropriate. We demonstrate the superiority of our approach over a leading contemporary system based on quasi-synchronous tree substitution grammars, both in terms of expressivity and performance.", "phrases": ["text simplification", "synchronous dependency grammar", "clause"], "overall_score": 2.1765903813111684, "scores": [0.8943677944837489, 0.8845530113063135, 0.5258019068966752], "rank_score": 0.7682409042289126} -{"id": "yazdani-henderson-2015-model", "title": "A Model of Zero-Shot Learning of Spoken Language Understanding", "abstract": "When building spoken dialogue systems for a new domain, a major bottleneck is developing a spoken language understanding (SLU) module that handles the new domain\u2019s terminology and semantic concepts. We propose a statistical SLU model that generalises to both previously unseen input words and previously unseen output classes by leveraging unlabelled data. After mapping the utterance into a vector space, the model exploits the structure of the output labels by mapping each label to a hyperplane that separates utterances with and without that label. Both these mappings are initialised with unsupervised word embeddings, so they can be computed even for words or concepts which were not in the SLU training data.", "phrases": ["zero-shot learning", "spoken language understanding", "text classification"], "overall_score": 1.6875917617804876, "scores": [0.9123019257408874, 0.8634626985266801, 0.5284037058897717], "rank_score": 0.7680561100524465} -{"id": "ng-etal-2006-examining", "title": "Examining the Role of Linguistic Knowledge Sources in the Automatic Identification and Classification of Reviews", "abstract": "This paper examines two problems in document-level sentiment analysis: (1) determining whether a given document is a review or not, and (2) classifying the polarity of a review as positive or negative. We first demonstrate that review identification can be performed with high accuracy using only unigrams as features. We then examine the role of four types of simple linguistic knowledge sources in a polarity classification system.", "phrases": ["review identification", "sentiment classification", "low precision"], "overall_score": 2.2198018984378085, "scores": [0.9031966472040588, 0.829691938272307, 0.571107665912644], "rank_score": 0.7679987504630033} -{"id": "zou-etal-2020-pre", "title": "Pre-training for Abstractive Document Summarization by Reinstating Source Text", "abstract": "Abstractive document summarization is usually modeled as a sequence-to-sequence (SEQ2SEQ) learning problem. Unfortunately, training large SEQ2SEQ based summarization models on limited supervised summarization data is challenging. This paper presents three sequence-to-sequence pre-training (in shorthand, STEP) objectives which allow us to pre-train a SEQ2SEQ based abstractive summarization model on unlabeled text. The main idea is that, given an input text artificially constructed from a document, a model is pre-trained to reinstate the original document. These objectives include sentence reordering, next sentence generation and masked document generation, which have close relations with the abstractive document summarization task. Experiments on two benchmark summarization datasets (i.e., CNN/DailyMail and New York Times) show that all three objectives can improve performance upon baselines. Compared to models pre-trained on large-scale data (larger than 160GB), our method, with only 19GB text for pre-training, achieves comparable results, which demonstrates its effectiveness.", "phrases": ["abstractive document summarization", "sequence-to-sequence", "objective", "next sentence generation"], "overall_score": 1.4944556234243633, "scores": [0.9464989223104444, 1.0439912329458716, 0.5531040844910371, 0.5283988307443777], "rank_score": 0.7679982676229327} -{"id": "tiedemann-2017-cross", "title": "Cross-lingual dependency parsing for closely related languages - Helsinki's submission to VarDial 2017", "abstract": "This paper describes the submission from the University of Helsinki to the shared task on cross-lingual dependency parsing at VarDial 2017. We present work on annotation projection and treebank translation that gave good results for all three target languages in the test set. In particular, Slovak seems to work well with information coming from the Czech treebank, which is in line with related work. The attachment scores for cross-lingual models even surpass the fully supervised models trained on the target language treebank. Croatian is the most difficult language in the test set and the improvements over the baseline are rather modest. Norwegian works best with information coming from Swedish whereas Danish contributes surprisingly little.", "phrases": ["helsinki", "annotation projection", "cross-lingual dependency"], "overall_score": 0.8436156943727253, "scores": [0.9856041780793401, 0.7890631046692732, 0.5290090088996622], "rank_score": 0.7678920972160919} -{"id": "yoshino-etal-2018-dialogue", "title": "Dialogue Scenario Collection of Persuasive Dialogue with Emotional Expressions via Crowdsourcing", "abstract": "Existing dialogue data collection methods such as the Wizard of Oz method (WoZ) or real dialogue recording are costly, and they prevent launching a new dialogue system. In this study, we requested crowd workers in crowdsourcing to create dialogue scenarios according to the instruction of the situation for persuasive dialogue systems that use emotional expressions. We collected 200 dialogues in 5 scenarios for a total of 1,000 via crowdsourcing. We also annotated emotional states and users\u2019 acceptance for system persuasion by using crowdsourcing. We constructed a persuasive dialogue system with the collected data and evaluated the system by interacting with crowd works. From the experiment, it was investigated that the collected labels have sufficient agreement even if we did not impose any training of annotation to workers.", "phrases": ["persuasive dialogue", "emotional expression", "crowdsourcing"], "overall_score": 1.0644716390818396, "scores": [0.9185657645632906, 0.8379404447786156, 0.5470557229635682], "rank_score": 0.7678539774351583} -{"id": "lin-etal-2019-reasoning", "title": "Reasoning Over Paragraph Effects in Situations", "abstract": "A key component of successfully reading a passage of text is the ability to apply knowledge gained from the passage to a new situation. In order to facilitate progress on this kind of reading, we present ROPES, a challenging benchmark for reading comprehension targeting Reasoning Over Paragraph Effects in Situations. We target expository language describing causes and effects (e.g., \u201canimal pollinators increase efficiency of fertilization in flowers\u201d), as they have clear implications for new situations. A system is presented a background passage containing at least one of these relations, a novel situation that uses this background, and questions that require reasoning about effects of the relationships in the background passage in the context of the situation. We collect background passages from science textbooks and Wikipedia that contain such phenomena, and ask crowd workers to author situations, questions, and answers, resulting in a 14,322 question dataset. We analyze the challenges of this task and evaluate the performance of state-of-the-art reading comprehension models. The best model performs only slightly better than randomly guessing an answer of the correct type, at 61.6% F1, well below the human performance of 89.0%.", "phrases": ["paragraph effects", "situation", "rope"], "overall_score": 1.2355738369090645, "scores": [0.9408370813146139, 0.834239961535842, 0.52803852444793], "rank_score": 0.7677051890994621} -{"id": "quan-etal-2012-ku", "title": "KU Leuven at HOO-2012: A Hybrid Approach to Detection and Correction of Determiner and Preposition Errors in Non-native English Text", "abstract": "In this paper we describe the technical implementation of our system that participated in the Helping Our Own 2012 Shared Task (HOO-2012). The system employs a number of preprocessing steps and machine learning classifiers for correction of determiner and preposition errors in non-native English texts. We use maximum entropy classifiers trained on the provided HOO-2012 development data and a large high-quality English text collection. The system proposes a number of highly-probable corrections, which are evaluated by a language model and compared with the original text. A number of deterministic rules are used to increase the precision and recall of the system. Our system is ranked among the three best performing HOO-2012 systems with a precision of 31.15%, recall of 22.08% and F1-score of 25.84% for correction of determiner and preposition errors combined.", "phrases": ["determiner", "preposition error", "non-native english text", "article correction"], "overall_score": 1.0642480658312232, "scores": [0.8790903489201807, 0.878827029096719, 0.7877980044395375, 0.5250554312443168], "rank_score": 0.7676927034251885} -{"id": "saleh-etal-2019-team", "title": "Team QCRI-MIT at SemEval-2019 Task 4: Propaganda Analysis Meets Hyperpartisan News Detection", "abstract": "We describe our submission to SemEval-2019 Task 4 on Hyperpartisan News Detection. We rely on a variety of engineered features originally used to detect propaganda. This is based on the assumption that biased messages are propagandistic and promote a particular political cause or viewpoint. In particular, we trained a logistic regression model with features ranging from simple bag of words to vocabulary richness and text readability. Our system achieved 72.9% accuracy on the manually annotated testset, and 60.8% on the test data that was obtained with distant supervision. Additional experiments showed that significant performance gains can be achieved with better feature pre-processing.", "phrases": ["semeval-2019 task", "propaganda", "hyperpartisan news detection"], "overall_score": 1.2355125741465383, "scores": [0.9003101215988989, 0.8347201220423568, 0.567971129573495], "rank_score": 0.7676671244049169} -{"id": "han-etal-2020-isobs", "title": "IsOBS: An Information System for Oracle Bone Script", "abstract": "Oracle bone script (OBS) is the earliest known ancient Chinese writing system and the ancestor of modern Chinese. As the Chinese writing system is the oldest continuously-used system in the world, the study of OBS plays an important role in both linguistic and historical research. In order to utilize advanced machine learning methods to automatically process OBS, we construct an information system for OBS (IsOBS) to symbolize, serialize, and store OBS data at the character-level, based on efficient databases and retrieval modules. Moreover, we also apply few-shot learning methods to build an effective OBS character recognition module, which can recognize a large number of OBS characters (especially those characters with a handful of examples) and make the system easy to use. The demo system of IsOBS can be found from . In the future, we will add more OBS data to the system, and hopefully our IsOBS can support further efforts in automatically processing OBS and advance the scientific progress in this field.", "phrases": ["information system", "oracle bone script", "isobs"], "overall_score": 0.8432410389519848, "scores": [0.9550397432353729, 0.8093720101403568, 0.5382414600912668], "rank_score": 0.7675510711556655} -{"id": "beltagy-etal-2013-montague", "title": "Montague Meets Markov: Deep Semantics with Probabilistic Logical Form", "abstract": "We combine logical and distributional representations of natural language meaning by transforming distributional similarity judgments into weighted inference rules using Markov Logic Networks (MLNs). We show that this framework supports both judging sentence similarity and recognizing textual entailment by appropriately adapting the MLN implementation of logical connectives. We also show that distributional phrase similarity, used as textual inference rules created on the fly, improves its performance.", "phrases": ["logic", "sentence similarity", "predicate-argument representation"], "overall_score": 1.9687254078359937, "scores": [0.8684395740614883, 0.8680079141122512, 0.5662007907588196], "rank_score": 0.7675494263108531} -{"id": "gilmanov-etal-2014-swift", "title": "SWIFT Aligner, A Multifunctional Tool for Parallel Corpora: Visualization, Word Alignment, and (Morpho)-Syntactic Cross-Language Transfer", "abstract": "It is well known that word aligned parallel corpora are valuable linguistic resources. Since many factors affect automatic alignment quality, manual post-editing may be required in some applications. While there are several state-of-the-art word-aligners, such as GIZA++ and Berkeley, there is no simple visual tool that would enable correcting and editing aligned corpora of different formats. We have developed SWIFT Aligner, a free, portable software that allows for visual representation and editing of aligned corpora from several most commonly used formats: TALP, GIZA, and NAACL. In addition, our tool has incorporated part-of-speech and syntactic dependency transfer from an annotated source language into an unannotated target language, by means of word-alignment.", "phrases": ["visualization", "word alignment", "editing"], "overall_score": 0.8430393607725045, "scores": [0.9288021445303261, 0.8226223454602366, 0.5506779973060809], "rank_score": 0.7673674957655479} -{"id": "hardmeier-etal-2012-document", "title": "Document-Wide Decoding for Phrase-Based Statistical Machine Translation", "abstract": "Independence between sentences is an assumption deeply entrenched in the models and algorithms used for statistical machine translation (SMT), particularly in the popular dynamic programming beam search decoding algorithm. This restriction is an obstacle to research on more sophisticated discourse-level models for SMT. We propose a stochastic local search decoding method for phrase-based SMT, which permits free document-wide dependencies in the models. We explore the stability and the search parameters of this method and demonstrate that it can be successfully used to optimise a document-level semantic language model.", "phrases": ["statistical machine translation", "local search", "phrase-based smt", "semantic language model", "document-level decoder"], "overall_score": 2.1274061499586976, "scores": [0.9442319797022475, 1.1545794442106474, 0.6119012299777428, 0.5660308630214856, 0.5597543612154955], "rank_score": 0.7672995756255239} -{"id": "hou-etal-2018-unrestricted", "title": "Unrestricted Bridging Resolution", "abstract": "In contrast to identity anaphors, which indicate coreference between a noun phrase and its antecedent, bridging anaphors link to their antecedent(s) via lexico-semantic, frame, or encyclopedic relations. Bridging resolution involves recognizing bridging anaphors and finding links to antecedents. In contrast to most prior work, we tackle both problems. Our work also follows a more wide-ranging definition of bridging than most previous work and does not impose any restrictions on the type of bridging anaphora or relations between anaphor and antecedent. We create a corpus (ISNotes) annotated for information status (IS), bridging being one of the IS subcategories. The annotations reach high reliability for all categories and marginal reliability for the bridging subcategory. We use a two-stage statistical global inference method for bridging resolution. Given all mentions in a document, the first stage, bridging anaphora recognition, recognizes bridging anaphors as a subtask of learning fine-grained IS. We use a cascading collective classification method where (i) collective classification allows us to investigate relations among several mentions and autocorrelation among IS classes and (ii) cascaded classification allows us to tackle class imbalance, important for minority classes such as bridging. We show that our method outperforms current methods both for IS recognition overall as well as for bridging, specifically. The second stage, bridging antecedent selection, finds the antecedents for all predicted bridging anaphors. We investigate the phenomenon of semantically or syntactically related bridging anaphors that share the same antecedent, a phenomenon we call sibling anaphors. We show that taking sibling anaphors into account in a joint inference model improves antecedent selection performance. In addition, we develop semantic and salience features for antecedent selection and suggest a novel method to build the candidate antecedent list for an anaphor, using the discourse scope of the anaphor. Our model outperforms previous work significantly.", "phrases": ["resolution", "coreference", "isnotes", "subtask", "wall street journal"], "overall_score": 1.685794167507073, "scores": [1.367603433669869, 0.8845327894383559, 0.5385378110905122, 0.5250128356943068, 0.5205030783161337], "rank_score": 0.7672379896418355} -{"id": "liang-etal-2020-alice", "title": "ALICE: Active Learning with Contrastive Natural Language Explanations", "abstract": "Training a supervised neural network classifier typically requires many annotated training samples. Collecting and annotating a large number of data points are costly and sometimes even infeasible. Traditional annotation process uses a low-bandwidth human-machine communication interface: classification labels, each of which only provides a few bits of information. We propose Active Learning with Contrastive Explanations (ALICE), an expert-in-the-loop training framework that utilizes contrastive natural language explanations to improve data efficiency in learning. AL-ICE learns to first use active learning to select the most informative pairs of label classes to elicit contrastive natural language explanations from experts. Then it extracts knowledge from these explanations using a semantic parser. Finally, it incorporates the extracted knowledge through dynamically changing the learning model's structure. We applied ALICEin two visual recognition tasks, bird species classification and social relationship classification. We found by incorporating contrastive explanations, our models outperform baseline models that are trained with 40-100% more training data. We found that adding1expla-nation leads to similar performance gain as adding 13-30 labeled training data points.", "phrases": ["active learning", "contrastive explanation", "alice"], "overall_score": 0.8426660594550696, "scores": [0.9149796200059223, 0.7984521389205657, 0.5876513478627136], "rank_score": 0.767027702263067} -{"id": "foster-andersen-2009-generrate", "title": "GenERRate: Generating Errors for Use in Grammatical Error Detection", "abstract": "This paper explores the issue of automatically generated ungrammatical data and its use in error detection, with a focus on the task of classifying a sentence as grammatical or ungrammatical. We present an error generation tool called GenERRate and show how GenERRate can be used to improve the performance of a classifier on learner data. We describe initial attempts to replicate Cambridge Learner Corpus errors using GenERRate.", "phrases": ["generrate", "artificial error", "error-corrected data"], "overall_score": 1.8390748860372907, "scores": [0.8452273204675065, 0.9279660180169278, 0.5276680547127796], "rank_score": 0.7669537977324046} -{"id": "hangya-fraser-2019-unsupervised", "title": "Unsupervised Parallel Sentence Extraction with Parallel Segment Detection Helps Machine Translation", "abstract": "Mining parallel sentences from comparable corpora is important. Most previous work relies on supervised systems, which are trained on parallel data, thus their applicability is problematic in low-resource scenarios. Recent developments in building unsupervised bilingual word embeddings made it possible to mine parallel sentences based on cosine similarities of source and target language words. We show that relying only on this information is not enough, since sentences often have similar words but different meanings. We detect continuous parallel segments in sentence pair candidates and rely on them when mining parallel sentences. We show better mining accuracy on three language pairs in a standard shared task on artificial data. We also provide the first experiments showing that parallel sentences mined from real life sources improve unsupervised MT. Our code is available, we hope it will be used to support low-resource MT research.", "phrases": ["parallel segment detection", "bilingual word embedding", "bitext mining"], "overall_score": 1.374087180578125, "scores": [0.8516108895556024, 0.913977734234148, 0.5350893480757322], "rank_score": 0.7668926572884942} -{"id": "camacho-collados-pilehvar-2018-role", "title": "On the Role of Text Preprocessing in Neural Network Architectures: An Evaluation Study on Text Categorization and Sentiment Analysis", "abstract": "Text preprocessing is often the first step in the pipeline of a Natural Language Processing (NLP) system, with potential impact in its final performance. Despite its importance, text preprocessing has not received much attention in the deep learning literature. In this paper we investigate the impact of simple text preprocessing decisions (particularly tokenizing, lemmatizing, lowercasing and multiword grouping) on the performance of a standard neural text classifier. We perform an extensive evaluation on standard benchmarks from text categorization and sentiment analysis. While our experiments show that a simple tokenization of input text is generally adequate, they also highlight significant degrees of variability across preprocessing techniques. This reveals the importance of paying attention to this usually-overlooked step in the pipeline, particularly when comparing different models. Finally, our evaluation provides insights into the best preprocessing practices for training word embeddings.", "phrases": ["text categorization", "sentiment analysis", "tokenizing"], "overall_score": 1.2342584606772018, "scores": [0.894727295141763, 0.8753861473461744, 0.5305502571956124], "rank_score": 0.7668878998945164} -{"id": "park-etal-2018-plusemo2vec", "title": "PlusEmo2Vec at SemEval-2018 Task 1: Exploiting emotion knowledge from emoji and #hashtags", "abstract": "This paper describes our system that has been submitted to SemEval-2018 Task 1: Affect in Tweets (AIT) to solve five subtasks. We focus on modeling both sentence and word level representations of emotion inside texts through large distantly labeled corpora with emojis and hashtags. We transfer the emotional knowledge by exploiting neural network models as feature extractors and use these representations for traditional machine learning models such as support vector regression (SVR) and logistic regression to solve the competition tasks. Our system is placed among the Top3 for all subtasks we participated.", "phrases": ["semeval-2018 task", "emojis", "hashtag"], "overall_score": 0.8417944632508243, "scores": [0.8759777338452954, 0.8147283902254219, 0.6079968995538347], "rank_score": 0.7662343412081839} -{"id": "zhikov-etal-2010-efficient", "title": "An Efficient Algorithm for Unsupervised Word Segmentation with Branching Entropy and MDL", "abstract": "This paper proposes a fast and simple unsupervised word segmentation algorithm that utilizes the local predictability of adjacent character sequences, while searching for a least-effort representation of the data. The model uses branching entropy as a means of constraining the hypothesis space, in order to efficiently obtain a solution that minimizes the length of a two-part MDL code. An evaluation with corpora in Japanese, Thai, English, and the \"CHILDES\" corpus for research in language development reveals that the algorithm achieves an accuracy, comparable to that of the state-of-the-art methods in unsupervised word segmentation, in a significantly reduced computational time.", "phrases": ["unsupervised word segmentation", "mdl", "length"], "overall_score": 1.2331259861516943, "scores": [0.9550993604794704, 0.7977050022315436, 0.5457483990169457], "rank_score": 0.7661842539093199} -{"id": "majumder-etal-2020-like", "title": "Like hiking? You probably enjoy nature: Persona-grounded Dialog with Commonsense Expansions", "abstract": "Existing persona-grounded dialog models often fail to capture simple implications of given persona descriptions, something which humans are able to do seamlessly. For example, state-of-the-art models cannot infer that interest in hiking might imply love for nature or longing for a break. In this paper, we propose to expand available persona sentences using existing commonsense knowledge bases and paraphrasing resources to imbue dialog models with access to an expanded and richer set of persona descriptions. Additionally, we introduce fine-grained grounding on personas by encouraging the model to make a discrete choice among persona sentences while synthesizing a dialog response. Since such a choice is not observed in the data, we model it using a discrete latent random variable and use variational learning to sample from hundreds of persona expansions. Our model outperforms competitive baselines on the Persona-Chat dataset in terms of dialog quality and diversity while achieving persona-consistent and controllable dialog generation.", "phrases": ["nature", "dialog", "persona"], "overall_score": 1.8368132649890205, "scores": [0.859936109241222, 0.8538368685480525, 0.5842589077051725], "rank_score": 0.766010628498149} -{"id": "vaswani-etal-2016-supertagging", "title": "Supertagging With LSTMs", "abstract": "In this paper we present new state-of-the-art performance on CCG supertagging and parsing. Our model outperforms existing approaches by an absolute gain of 1.5%. We analyze the performance of several neural models and demonstrate that while feed-forward architectures can compete with bidirectional LSTMs on POS tagging, models that encode the complete sentence are necessary for the long range syntactic information encoded in supertags.", "phrases": ["bidirectional lstm", "supertag", "rnn"], "overall_score": 1.8365535237802448, "scores": [1.2153575513189647, 0.5561967445216401, 0.5261526281615008], "rank_score": 0.7659023080007019} -{"id": "strzalkowski-etal-2010-modeling", "title": "Modeling Socio-Cultural Phenomena in Discourse", "abstract": "In this paper, we describe a novel approach to computational modeling and understanding of social and cultural phenomena in multi-party dialogues. We developed a two-tier approach in which we first detect and classify certain social language uses, including topic control, disagreement, and involvement, that serve as first order models from which presence the higher level social constructs such as leadership, may be inferred.", "phrases": ["language use", "construct", "modeling"], "overall_score": 1.903149445106191, "scores": [0.9117268423811328, 0.8450589538862742, 0.5408652385534277], "rank_score": 0.7658836782736116} -{"id": "agirre-soroa-2008-using", "title": "Using the Multilingual Central Repository for Graph-Based Word Sense Disambiguation", "abstract": "This paper presents the results of a graph-based method for performing knowledge-based Word Sense Disambiguation (WSD). The technique exploits the structural properties of the graph underlying the chosen knowledge base. The method is general, in the sense that it is not tied to any particular knowledge base, but in this work we have applied it to the Multilingual Central Repository (MCR). The evaluation has been performed on the Senseval-3 all-words task. The main contributions of the paper are twofold: (1) We have evaluated the separate and combined performance of each type of relation in the MCR, and thus indirectly validated the contents of the MCR and their potential for WSD. (2) We obtain state-of-the-art results, and in fact yield the best results that can be obtained using publicly available data.", "phrases": ["multilingual central repository", "word sense disambiguation", "pagerank"], "overall_score": 1.2322797111273145, "scores": [0.9234465700836947, 0.8459838769289846, 0.527544854004603], "rank_score": 0.7656584336724274} -{"id": "le-etal-2020-dual", "title": "Dual-decoder Transformer for Joint Automatic Speech Recognition and Multilingual Speech Translation", "abstract": "We introduce dual-decoder Transformer, a new model architecture that jointly performs automatic speech recognition (ASR) and multilingual speech translation (ST). Our models are based on the original Transformer architecture (Vaswani et al., 2017) but consist of two decoders, each responsible for one task (ASR or ST). Our major contribution lies in how these decoders interact with each other: one decoder can attend to different information sources from the other via a dual-attention mechanism. We propose two variants of these architectures corresponding to two different levels of dependencies between the decoders, called the parallel and cross dual-decoder Transformers, respectively. Extensive experiments on the MuST-C dataset show that our models outperform the previously-reported highest translation performance in the multilingual settings, and outperform as well bilingual one-to-one results. Furthermore, our parallel models demonstrate no trade-off between ASR and ST compared to the vanilla multi-task architecture. Our code and pre-trained models are available at .", "phrases": ["speech recognition", "multilingual speech translation", "dual-decoder transformer"], "overall_score": 1.2321991873877138, "scores": [0.8605560790020859, 0.8578759678217498, 0.5783931575560204], "rank_score": 0.7656084014599521} -{"id": "nie-etal-2020-named", "title": "Named Entity Recognition for Social Media Texts with Semantic Augmentation", "abstract": "Existing approaches for named entity recognition suffer from data sparsity problems when conducted on short and informal texts, especially user-generated social media content. Semantic augmentation is a potential way to alleviate this problem. Given that rich semantic information is implicitly preserved in pre-trained word embeddings, they are potential ideal resources for semantic augmentation. In this paper, we propose a neural-based approach to NER for social media texts where both local (from running text) and augmented semantics are taken into account. In particular, we obtain the augmented semantic information from a large-scale corpus, and propose an attentive semantic augmentation module and a gate module to encode and aggregate such information, respectively. Extensive experiments are performed on three benchmark datasets collected from English and Chinese social media platforms, where the results demonstrate the superiority of our approach to previous studies across all three datasets.", "phrases": ["entity recognition", "semantic augmentation", "word embedding"], "overall_score": 1.371675841057931, "scores": [0.8936260493750144, 0.8471169144277726, 0.555897625431366], "rank_score": 0.765546863078051} -{"id": "hitschler-etal-2016-multimodal", "title": "Multimodal Pivots for Image Caption Translation", "abstract": "We present an approach to improve statistical machine translation of image descriptions by multimodal pivots defined in visual space. The key idea is to perform image retrieval over a database of images that are captioned in the target language, and use the captions of the most similar images for crosslingual reranking of translation outputs. Our approach does not depend on the availability of large amounts of in-domain parallel data, but only relies on available large datasets of monolingually captioned images, and on state-of-the-art convolutional neural networks to compute image similarities. Our experimental evaluation shows improvements of 1 BLEU point over strong baselines.", "phrases": ["pivot", "image caption translation", "image feature", "translation quality", "useful complementary information"], "overall_score": 2.2537739550162668, "scores": [0.9940399561469451, 1.1131929302583088, 0.5940735256234422, 0.5808938179395876, 0.5449701936047351], "rank_score": 0.7654340847146038} -{"id": "peyrard-eckle-kohler-2016-general", "title": "A General Optimization Framework for Multi-Document Summarization Using Genetic Algorithms and Swarm Intelligence", "abstract": "Extracting summaries via integer linear programming and submodularity are popular and successful techniques in extractive multi-document summarization. However, many interesting optimization objectives are neither submodular nor factorizable into an integer linear program. We address this issue and present a general optimization framework where any function of input documents and a system summary can be plugged in. Our framework includes two kinds of summarizers \u2013 one based on genetic algorithms, the other using a swarm intelligence approach. In our experimental evaluation, we investigate the optimization of two information-theoretic summary evaluation metrics and find that our framework yields competitive results compared to several strong summarization baselines. Our comparative analysis of the genetic and swarm summarizers reveals interesting complementary properties.", "phrases": ["general optimization framework", "multi-document summarization", "genetic algorithm"], "overall_score": 1.0606657701434101, "scores": [0.9512728877800958, 0.8013066398357149, 0.5427463423240458], "rank_score": 0.7651086233132856} -{"id": "marcinczuk-2015-automatic", "title": "Automatic construction of complex features in Conditional Random Fields for Named Entities Recognition", "abstract": "Conditional Random Fields (CRFs) have been proven to be very useful in many sequence labelling tasks from the field of natural language processing, including named entity recognition (NER). The advantage of CRFs over other statistical models (like Hidden Markov Models) is that they can utilize a large set of features describing a sequence of observations. On the other hand, CRFs potential function is defined as a linear combination of features, what means, that it cannot model relationships between combinations of input features and output labels. This limitation can be overcome by defining the relationships between atomic features as complex features before training the CRFs. In the paper we present the experimental results of automatic generation of complex features for the named entity recognition task for Polish. A rule-induction algorithm called RIPPER is used to generate a set of rules which are latter transformed into a set of complex features. The extended set of features is used to train a CRFs model.", "phrases": ["complex feature", "conditional random fields", "crfs"], "overall_score": 1.0606617609310385, "scores": [0.9189632541720653, 0.8392587171449789, 0.5370952225166028], "rank_score": 0.7651057312778823} -{"id": "wuebker-etal-2018-compact", "title": "Compact Personalized Models for Neural Machine Translation", "abstract": "We propose and compare methods for gradient-based domain adaptation of self-attentive neural machine translation models. We demonstrate that a large proportion of model parameters can be frozen during adaptation with minimal or no reduction in translation quality by encouraging structured sparsity in the set of offset tensors during learning via group lasso regularization. We evaluate this technique for both batch and incremental adaptation across multiple data sets and language pairs. Our system architecture\u2013combining a state-of-the-art self-attentive model with compact domain adaptation\u2013provides high quality personalized machine translation that is both space and time efficient.", "phrases": ["neural machine translation", "domain adaptation", "model parameter", "translation quality", "offset tensor"], "overall_score": 1.9011120854536874, "scores": [0.944018237628555, 0.8909783525638578, 0.8741853923117553, 0.5793212943464914, 0.5368156453226959], "rank_score": 0.765063784434671} -{"id": "sim-etal-2013-measuring", "title": "Measuring Ideological Proportions in Political Speeches", "abstract": "We seek to measure political candidates\u2019 ideological positioning from their speeches. To accomplish this, we infer ideological cues from a corpus of political writings annotated with known ideologies. We then represent the speeches of U.S. Presidential candidates as sequences of cues and lags (filler distinguished only by its length in words). We apply a domain-informed Bayesian HMM to infer the proportions of ideologies each candidate uses in each campaign. The results are validated against a set of preregistered, domain expertauthored hypotheses.", "phrases": ["ideology", "proportion", "computational linguistic"], "overall_score": 1.6809993363141134, "scores": [1.2232522295626302, 0.542280022272675, 0.5296350519348728], "rank_score": 0.7650557679233927} -{"id": "belz-kow-2010-extracting", "title": "Extracting Parallel Fragments from Comparable Corpora for Data-to-text Generation", "abstract": "Building NLG systems, in particular statistical ones, requires parallel data (paired inputs and outputs) which do not generally occur naturally. In this paper, we investigate the idea of automatically extracting parallel resources for data-to-text generation from comparable corpora obtained from the Web. We describe our comparable corpus of data and texts relating to British hills and the techniques for extracting paired input/output fragments we have developed so far.", "phrases": ["comparable corpora", "data-to-text generation", "parallel resource"], "overall_score": 1.2312400173268194, "scores": [0.9409589186346716, 0.8151949788229372, 0.5388834093211952], "rank_score": 0.7650124355929346} -{"id": "raaijmakers-etal-2008-multimodal", "title": "Multimodal Subjectivity Analysis of Multiparty Conversation", "abstract": "We investigate the combination of several sources of information for the purpose of subjectivity recognition and polarity classification in meetings. We focus on features from two modalities, transcribed words and acoustics, and we compare the performance of three different textual representations: words, characters, and phonemes. Our experiments show that character-level features outperform wordlevel features for these tasks, and that a careful fusion of all features yields the best performance.1", "phrases": ["subjectivity", "multiparty conversation", "polarity classification", "prosodic feature"], "overall_score": 1.488270393697477, "scores": [0.8980213647808034, 1.076723785416829, 0.5557293089708996, 0.5288042941079069], "rank_score": 0.7648196883191097} -{"id": "ding-etal-2020-hashtags", "title": "Hashtags, Emotions, and Comments: A Large-Scale Dataset to Understand Fine-Grained Social Emotions to Online Topics", "abstract": "This paper studies social emotions to online discussion topics. While most prior work focus on emotions from writers, we investigate readers' responses and explore the public feelings to an online topic. A large-scale dataset is collected from Chinese microblog Sina Weibo with over 13 thousand trending topics, emotion votes in 24 fine-grained types from massive participants, and user comments to allow context understanding. In experiments, we examine baseline performance to predict a topic's possible social emotions in a multilabel classification setting. The results show that a seq2seq model with user comment modeling performs the best, even surpassing human prediction. More analyses shed light on the effects of emotion types, topic description lengths, contexts from user comments, and the limited capacity of the existing models.", "phrases": ["emotion", "large-scale dataset", "online topic"], "overall_score": 0.8401344536353407, "scores": [0.8688048250086866, 0.8488423440641154, 0.5765228369459771], "rank_score": 0.764723335339593} -{"id": "getman-etal-2018-laying", "title": "Laying the Groundwork for Knowledge Base Population: Nine Years of Linguistic Resources for TAC KBP", "abstract": "Knowledge Base Population (KBP) is an evaluation series within the Text Analysis Conference (TAC) evaluation campaign conducted by the National Institute of Standards and Technology (NIST). Over the past nine years TAC KBP evaluations have targeted information extraction technologies for the population of knowledge bases comprised of entities, relations, and events. Linguistic Data Consortium (LDC) has supported TAC KBP since 2009, developing, maintaining, and distributing linguistic resources in three languages for seven distinct evaluation tracks. This paper describes LDC's resource creation efforts for the various KBP tracks, and highlights changes made over the years to support evolving evaluation requirements.", "phrases": ["knowledge base population", "tac kbp", "few language"], "overall_score": 1.680177569598549, "scores": [0.9055755265131376, 0.8426780325743738, 0.5457917382332023], "rank_score": 0.7646817657735712} -{"id": "mann-mccallum-2007-efficient", "title": "Efficient Computation of Entropy Gradient for Semi-Supervised Conditional Random Fields", "abstract": "Entropy regularization is a straightforward and successful method of semi-supervised learning that augments the traditional conditional likelihood objective function with an additional term that aims to minimize the predicted label entropy on unlabeled data. It has previously been demonstrated to provide positive results in linear-chain CRFs, but the published method for calculating the entropy gradient requires significantly more computation than supervised CRF training. This paper presents a new derivation and dynamic program for calculating the entropy gradient that is significantly more efficient---having the same asymptotic time complexity as supervised CRF training. We also present efficient generalizations of this method for calculating the label entropy of all sub-sequences, which is useful for active learning, among other applications.", "phrases": ["entropy gradient", "unlabeled data", "crf"], "overall_score": 1.0599296143669703, "scores": [0.8780477908147214, 0.8731982452039353, 0.5424867614892126], "rank_score": 0.7645775991692897} -{"id": "glavas-etal-2016-unsupervised", "title": "Unsupervised Text Segmentation Using Semantic Relatedness Graphs", "abstract": "Segmenting text into semantically coherent \nfragments improves readability of text \nand facilitates tasks like text summarization \nand passage retrieval. In this paper, \nwe present a novel unsupervised algorithm \nfor linear text segmentation (TS) \nthat exploits word embeddings and a measure \nof semantic relatedness of short texts \nto construct a semantic relatedness graph \nof the document. Semantically coherent \nsegments are then derived from maximal \ncliques of the relatedness graph. The algorithm \nperforms competitively on a standard \nsynthetic dataset and outperforms the \nbest-performing method on a real-world \n(i.e., non-artificial) dataset of political manifestos.", "phrases": ["text segmentation", "semantic relatedness graph", "fragment", "clique"], "overall_score": 1.679834136458048, "scores": [1.0163371296512513, 0.8733271800401077, 0.620227110700026, 0.5482104300704845], "rank_score": 0.7645254626154674} -{"id": "belz-reiter-2006-comparing", "title": "Comparing Automatic and Human Evaluation of NLG Systems", "abstract": "We consider the evaluation problem in Natural Language Generation (NLG) and present results for evaluating several NLG systems with similar functionality, including a knowledge-based generator and several statistical systems. We compare evaluation results for these systems by human domain experts, human non-experts, and several automatic evaluation metrics, including NI ST, B LEU, and ROUGE. We find that NI ST scores correlate best (>0.8) with human judgments, but that all automatic metrics we examined are biased in favour of generators that select on the basis of frequency alone. We conclude that automatic evaluation of NLG systems has considerable potential, in particular where high-quality reference texts and only a small number of human evaluators are available. However, in general it is probably best for automatic evaluations to be supported by human based evaluations, or at least by studies that demonstrate that a particular metric correlates well with human judgments in a given domain.", "phrases": ["human evaluation", "natural language generation", "domain expert", "rouge"], "overall_score": 2.017151064956774, "scores": [1.4510577260405766, 0.5534706293209974, 0.531475130117557, 0.5213777124516455], "rank_score": 0.7643452994826941} -{"id": "swayamdipta-etal-2020-dataset", "title": "Dataset Cartography: Mapping and Diagnosing Datasets with Training Dynamics", "abstract": "Large datasets have become commonplace in NLP research. However, the increased emphasis on data quantity has made it challenging to assess the quality of data. We introduce Data Maps\u2014a model-based tool to characterize and diagnose datasets. We leverage a largely ignored source of information: the behavior of the model on individual instances during training (training dynamics) for building data maps. This yields two intuitive measures for each example\u2014the model's confidence in the true class, and the variability of this confidence across epochs\u2014obtained in a single run of training. Experiments on four datasets show that these model-dependent measures reveal three distinct regions in the data map, each with pronounced characteristics. First, our data maps show the presence of \u201cambiguous\u201d regions with respect to the model, which contribute the most towards out-of-distribution generalization. Second, the most populous regions in the data are \u201ceasy to learn\u201d for the model, and play an important role in model optimization. Finally, data maps uncover a region with instances that the model finds \u201chard to learn\u201d; these often correspond to labeling errors. Our results indicate that a shift in focus from quantity to quality of data could lead to robust models and improved out-of-distribution generalization.", "phrases": ["data map", "region", "model optimization", "easy-to-learn"], "overall_score": 2.2503773655656234, "scores": [1.1465515670792685, 0.8464058612733886, 0.5372216044637228, 0.5269430627519879], "rank_score": 0.7642805238920919} -{"id": "chung-etal-2019-conan", "title": "CONAN - COunter NArratives through Nichesourcing: a Multilingual Dataset of Responses to Fight Online Hate Speech", "abstract": "Although there is an unprecedented effort to provide adequate responses in terms of laws and policies to hate content on social media platforms, dealing with hatred online is still a tough problem. Tackling hate speech in the standard way of content deletion or user suspension may be charged with censorship and overblocking. One alternate strategy, that has received little attention so far by the research community, is to actually oppose hate content with counter-narratives (i.e. informed textual responses). In this paper, we describe the creation of the first large-scale, multilingual, expert-based dataset of hate-speech/counter-narrative pairs. This dataset has been built with the effort of more than 100 operators from three different NGOs that applied their training and expertise to the task. Together with the collected data we also provide additional annotations about expert demographics, hate and response type, and data augmentation through translation and paraphrasing. Finally, we provide initial experiments to assess the quality of our data.", "phrases": ["hate speech", "counter-narratives", "textual response"], "overall_score": 2.1184532249880648, "scores": [0.9322889038192393, 0.8359259932612743, 0.5239965744536209], "rank_score": 0.7640704905113781} -{"id": "agirre-etal-2016-semeval-2016", "title": "SemEval-2016 Task 2: Interpretable Semantic Textual Similarity", "abstract": "Comunicacio presentada al 10th International Workshop on Semantic Evaluation (SemEval-2016), celebrat els dies 16 i 17 de juny de 2016 a San Diego, California.", "phrases": ["semantic textual similarity", "relation type", "caption"], "overall_score": 1.831414838445863, "scores": [1.249254519872835, 0.5214082027677536, 0.5206152066601951], "rank_score": 0.763759309766928} -{"id": "antoine-etal-2014-weighted", "title": "Weighted Krippendorff's alpha is a more reliable metrics for multi-coders ordinal annotations: experimental studies on emotion, opinion and coreference annotation", "abstract": "The question of data reliability is of first importance to assess the quality of manually annotated corpora. Although Cohen ' s \u03ba is the prevailing reliability measure used in NLP, alternative statistics have been proposed. This paper presents an experimental study with four measures (Cohen's \u03ba, Scott's \u03c0, binary and weighted Krippendorff ' s \u03b1) on three tasks: emotion, opinion and coreference annotation. The reported studies investigate the factors of influence (annotator bias, category prevalence, number of coders, number of categories) that should affect reliability estimation. Results show that the use of a weighted measure re- stricts this influence on ordinal annotations. They suggest that weighted \u03b1 is the most reliable metrics for such an annotation scheme.", "phrases": ["krippendorff", "opinion", "coreference annotation"], "overall_score": 1.0587023729333338, "scores": [0.9224470826483475, 0.8417411370481103, 0.5268887751159905], "rank_score": 0.7636923316041494} -{"id": "kim-hovy-2006-identifying", "title": "Identifying and Analyzing Judgment Opinions", "abstract": "In this paper, we introduce a methodology for analyzing judgment opinions. We define a judgment opinion as consisting of a valence, a holder, and a topic. We decompose the task of opinion analysis into four parts: 1) recognizing the opinion; 2) identifying the valence; 3) identifying the holder; and 4) identifying the topic. In this paper, we address the first three parts and evaluate our methodology using both intrinsic and extrinsic measures.", "phrases": ["opinion", "polarity classification", "benefit"], "overall_score": 2.068073109111353, "scores": [1.2258729920450135, 0.5447793605829151, 0.5203758287583928], "rank_score": 0.7636760604621072} -{"id": "declerck-siegel-2019-ontolex", "title": "OntoLex as a possible Bridge between WordNets and full lexical Descriptions", "abstract": "In this paper we describe our current work on representing a recently created German lexical semantics resource in OntoLex-Lemon and in conformance with WordNet specifications. Besides presenting the representation effort, we show the utilization of OntoLex-Lemon to bridge from WordNet-like resources to full lexical descriptions and extend the coverage of WordNets to other types of lexical data, such as decomposition results, exemplified for German data, and inflectional phenomena, here outlined for English data.", "phrases": ["bridge", "wordnets", "german data"], "overall_score": 0.8389757345765502, "scores": [0.9494630520247709, 0.7874025599129408, 0.5541402594612135], "rank_score": 0.7636686237996417} -{"id": "hasan-etal-2009-learning", "title": "Learning-Based Named Entity Recognition for Morphologically-Rich, Resource-Scarce Languages", "abstract": "Named entity recognition for morphologically rich, case-insensitive languages, including the majority of semitic languages, Iranian languages, and Indian languages, is inherently more difficult than its English counterpart. Worse still, progress on machine learning approaches to named entity recognition for many of these languages is currently hampered by the scarcity of annotated data and the lack of an accurate part-of-speech tagger. While it is possible to rely on manually-constructed gazetteers to combat data scarcity, this gazetteer-centric approach has the potential weakness of creating irreproducible results, since these name lists are not publicly available in general. Motivated in part by this concern, we present a learning-based named entity recognizer that does not rely on manually-constructed gazetteers, using Bengali as our representative resource-scarce, morphologically-rich language. Our recognizer achieves a relative improvement of 7.5% in F-measure over a baseline recognizer. Improvements arise from (1) using induced affixes, (2) extracting information from online lexical databases, and (3) jointly modeling part-of-speech tagging and named entity recognition.", "phrases": ["entity recognition", "part-of-speech tagger", "rich language"], "overall_score": 1.2288551889540347, "scores": [0.8907447145028026, 0.8486872024401774, 0.5511600582930035], "rank_score": 0.7635306584119945} -{"id": "riezler-etal-2003-statistical", "title": "Statistical Sentence Condensation using Ambiguity Packing and Stochastic Disambiguation Methods for Lexical-Functional Grammar", "abstract": "We present an application of ambiguity packing and stochastic disambiguation techniques for Lexical-Functional Grammars (LFG) to the domain of sentence condensation. Our system incorporates a linguistic parser/generator for LFG, a transfer component for parse reduction operating on packed parse forests, and a maximum-entropy model for stochastic output selection. Furthermore, we propose the use of standard parser evaluation methods for automatically evaluating the summarization quality of sentence condensation systems. An experimental evaluation of summarization quality shows a close correlation between the automatic parse-based evaluation and a manual evaluation of generated strings. Overall summarization quality of the proposed system is state-of-the-art, with guaranteed grammaticality of the system output due to the use of a constraint-based parser/generator.", "phrases": ["ambiguity packing", "grammaticality", "statistical model"], "overall_score": 1.896847374240122, "scores": [0.8266382051637295, 0.9169288293391645, 0.5464755806616856], "rank_score": 0.7633475383881931} -{"id": "harrison-etal-2019-maximizing", "title": "Maximizing Stylistic Control and Semantic Accuracy in NLG: Personality Variation and Discourse Contrast", "abstract": "Neural generation methods for task-oriented dialogue typically generate from a meaning representation that is populated using a database of domain information, such as a table of data describing a restaurant. While earlier work focused solely on the semantic fidelity of outputs, recent work has started to explore methods for controlling the style of the generated text while simultaneously achieving semantic accuracy. Here we experiment with two stylistic benchmark tasks, generating language that exhibits variation in personality, and generating discourse contrast. We report a huge performance improvement in both stylistic control and semantic accuracy over the state of the art on both of these benchmarks. We test several different models and show that putting stylistic conditioning in the decoder and eliminating the semantic re-ranker used in earlier models results in more than 15 points higher BLEU for Personality, with a reduction of semantic error to near zero. We also report an improvement from .75 to .81 in controlling contrast and a reduction in semantic error from 16% to 2%.", "phrases": ["stylistic control", "semantic accuracy", "personality", "discourse contrast"], "overall_score": 0.8385787156480043, "scores": [0.8645009805512217, 0.860112482086026, 0.8021017429976386, 0.526513760753778], "rank_score": 0.763307241597166} -{"id": "li-etal-2010-adaptive", "title": "Adaptive Development Data Selection for Log-linear Model in Statistical Machine Translation", "abstract": "This paper addresses the problem of dynamic model parameter selection for log-linear model based statistical machine translation (SMT) systems. In this work, we propose a principled method for this task by transforming it to a test data dependent development set selection problem. We present two algorithms for automatic development set construction, and evaluated our method on several NIST data sets for the Chinese-English translation task. Experimental results show that our method can effectively adapt log-linear model parameters to different test data, and consistently achieves good translation performance compared with conventional methods that use a fixed model parameter setting across different data sets.", "phrases": ["log-linear model", "statistical machine translation", "test set"], "overall_score": 1.5870936680465415, "scores": [0.905846097082503, 0.8450269024140644, 0.5388191648204526], "rank_score": 0.7632307214390067} -{"id": "ji-etal-2022-vscript", "title": "VScript: Controllable Script Generation with Visual Presentation", "abstract": "In order to offer a customized script tool and inspire professional scriptwriters, we present VScript. It is a controllable pipeline that generates complete scripts, including dialogues and scene descriptions, as well as presents visually using video retrieval. With an interactive interface, our system allows users to select genres and input starting words that control the theme and development of the generated script. We adopt a hierarchical structure, which first generates the plot, then the script and its visual presentation. A novel approach is also introduced to plot-guided dialogue generation by treating it as an inverse dialogue summarization. The experiment results show that our approach outperforms the baselines on both automatic and human evaluations, especially in genre control.", "phrases": ["visual presentation", "interface", "vscript"], "overall_score": 0.8384120569251017, "scores": [0.8735373310850935, 0.8643513796912932, 0.551577916093975], "rank_score": 0.7631555422901205} -{"id": "xu-etal-2022-beyond", "title": "Beyond Goldfish Memory: Long-Term Open-Domain Conversation", "abstract": "Despite recent improvements in open-domain dialogue models, state of the art models are trained and evaluated on short conversations with little context. In contrast, the long-term conversation setting has hardly been studied. In this work we collect and release a human-human dataset consisting of multiple chat sessions whereby the speaking partners learn about each other's interests and discuss the things they have learnt from past sessions. We show how existing models trained on existing datasets perform poorly in this long-term conversation setting in both automatic and human evaluations, and we study long-context models that can perform much better. In particular, we find retrieval-augmented methods and methods with an ability to summarize and recall previous conversations outperform the standard encoder-decoder architectures currently considered state of the art.", "phrases": ["conversation", "partner", "long-context model"], "overall_score": 1.7562530585416012, "scores": [1.2010770936458595, 0.5517867623648778, 0.5353291804402407], "rank_score": 0.762731012150326} -{"id": "nadejde-etal-2017-predicting", "title": "Predicting Target Language CCG Supertags Improves Neural Machine Translation", "abstract": "Neural machine translation (NMT) models are able to partially learn syntactic information from sequential lexical information. Still, some complex syntactic phenomena such as prepositional phrase attachment are poorly modeled. This work aims to answer two questions: 1) Does explicitly modeling target language syntax help NMT? 2) Is tight integration of words and syntax better than multitask training? We introduce syntactic information in the form of CCG supertags in the decoder, by interleaving the target supertags with the word sequence. Our results on WMT data show that explicitly modeling target-syntax improves machine translation quality for German->English, a high-resource pair, and for Romanian->English, a low-resource pair and also several syntactic phenomena including prepositional phrase attachment. Furthermore, a tight coupling of words and syntax improves translation quality more than multitask training. By combining target-syntax with adding source-side dependency labels in the embedding layer, we obtain a total improvement of 0.9 BLEU for German->English and 1.2 BLEU for Romanian->English.", "phrases": ["ccg supertag", "neural machine translation", "syntactic information"], "overall_score": 1.828928125790263, "scores": [0.899698951888619, 0.8493363949063086, 0.539131463329207], "rank_score": 0.7627222700413782} -{"id": "chen-etal-2020-shot", "title": "Few-Shot NLG with Pre-Trained Language Model", "abstract": "Neural-based end-to-end approaches to natural language generation (NLG) from structured data or knowledge are data-hungry, making their adoption for real-world applications difficult with limited data. In this work, we propose the new task of few-shot natural language generation. Motivated by how humans tend to summarize tabular data, we propose a simple yet effective approach and show that it not only demonstrates strong performance but also provides good generalization across domains. The design of the model architecture is based on two aspects: content selection from input data and language modeling to compose coherent sentences, which can be acquired from prior knowledge. With just 200 training examples, across multiple domains, we show that our approach achieves very reasonable performances and outperforms the strongest baseline by an average of over 8.0 BLEU points improvement. Our code and data can be found at ", "phrases": ["pre-trained language model", "table-to-text generation", "few-shot scenario"], "overall_score": 2.245544549620216, "scores": [0.8636808406256646, 0.8344546038408619, 0.589782116918211], "rank_score": 0.7626391871282459} -{"id": "schick-etal-2020-automatically", "title": "Automatically Identifying Words That Can Serve as Labels for Few-Shot Text Classification", "abstract": "A recent approach for few-shot text classification is to convert textual inputs to cloze questions that contain some form of task description, process them with a pretrained language model and map the predicted words to labels. Manually defining this mapping between words and labels requires both domain expertise and an understanding of the language model's abilities. To mitigate this issue, we devise an approach that automatically finds such a mapping given small amounts of training data. For a number of tasks, the mapping found by our approach performs almost as well as hand-crafted label-to-word mappings.", "phrases": ["text classification", "prompt", "well performance"], "overall_score": 2.245126499073555, "scores": [0.9120775244509945, 0.8315866713777645, 0.5438274264725549], "rank_score": 0.7624972074337713} -{"id": "gari-soler-apidianaki-2021-lets", "title": "Let's Play Mono-Poly: BERT Can Reveal Words' Polysemy Level and Partitionability into Senses", "abstract": "Pre-trained language models (LMs) encode rich information about linguistic structure but their knowledge about lexical polysemy remains unclear. We propose a novel experimental setup for analyzing this knowledge in LMs specifically trained for different languages (English, French, Spanish, and Greek) and in multilingual BERT. We perform our analysis on datasets carefully designed to reflect different sense distributions, and control for parameters that are highly correlated with polysemy such as frequency and grammatical category. We demonstrate that BERT-derived representations reflect words' polysemy level and their partitionability into senses. Polysemy-related information is more clearly present in English BERT embeddings, but models in other languages also manage to establish relevant distinctions between words at different polysemy levels. Our results contribute to a better understanding of the knowledge encoded in contextualized representations and open up new avenues for multilingual lexical semantics research.", "phrases": ["bert", "polysemy level", "partitionability", "language model"], "overall_score": 0.8376874187536275, "scores": [0.920226867069308, 0.8059213367842327, 0.7905747922319235, 0.5332607967198694], "rank_score": 0.7624959482013335} -{"id": "friedman-etal-2019-relating", "title": "Relating Word Embedding Gender Biases to Gender Gaps: A Cross-Cultural Analysis", "abstract": "Modern models for common NLP tasks often employ machine learning techniques and train on journalistic, social media, or other culturally-derived text. These have recently been scrutinized for racial and gender biases, rooting from inherent bias in their training text. These biases are often sub-optimal and recent work poses methods to rectify them; however, these biases may shed light on actual racial or gender gaps in the culture(s) that produced the training text, thereby helping us understand cultural context through big data. This paper presents an approach for quantifying gender bias in word embeddings, and then using them to characterize statistical gender gaps in education, politics, economics, and health. We validate these metrics on 2018 Twitter data spanning 51 U.S. regions and 99 countries. We correlate state and country word embedding biases with 18 international and 5 U.S.-based statistical gender gaps, characterizing regularities and predictive strength.", "phrases": ["word embedding", "cultural context", "gender bias"], "overall_score": 1.365934369647336, "scores": [0.9168626266376053, 0.828549401440649, 0.5416154325370187], "rank_score": 0.7623424868717578} -{"id": "grundkiewicz-junczys-dowmunt-2018-near", "title": "Near Human-Level Performance in Grammatical Error Correction with Hybrid Machine Translation", "abstract": "We combine two of the most popular approaches to automated Grammatical Error Correction (GEC): GEC based on Statistical Machine Translation (SMT) and GEC based on Neural Machine Translation (NMT). The hybrid system achieves new state-of-the-art results on the CoNLL-2014 and JFLEG benchmarks. This GEC system preserves the accuracy of SMT output and, at the same time, generates more fluent sentences as it typical for NMT. Our analysis shows that the created systems are closer to reaching human-level performance than any other GEC system reported so far.", "phrases": ["human-level performance", "grammatical error correction", "recall"], "overall_score": 1.6749608979610064, "scores": [0.9053866352865169, 0.8465535252738328, 0.5349825080249795], "rank_score": 0.7623075561951097} -{"id": "nguyen-etal-2015-tea", "title": "Tea Party in the House: A Hierarchical Ideal Point Topic Model and Its Application to Republican Legislators in the 112th Congress", "abstract": "We introduce the Hierarchical Ideal Point Topic Model, which provides a rich picture of policy issues, framing, and voting behavior using a joint model of votes, bill text, and the language that legislators use when debating bills. We use this model to look at the relationship between Tea Party Republicans and \u201cestablishment\u201d Republicans in the U.S. House of Representatives during the 112th Congress. 1 Capturing Political Polarization Ideal-point models are one of the most widely used tools in contemporary political science research (Poole and Rosenthal, 2007). These models estimate political preferences for legislators, known as their ideal points, from binary data such as legislative votes. Popular formulations analyze legislators\u2019 votes and place them on a one-dimensional scale, most often interpreted as an ideological spectrum from liberal to conservative. Moving beyond a single dimension is attractive, however, since people may lean differently based on policy issues; for example, the conservative movement in the U.S. includes fiscal conservatives who are relatively liberal on social issues, and vice versa. In multi-dimensional ideal point models, therefore, the ideal point of each legislator is no longer characterized by a single number, but by a multi-dimensional vector. With that move comes a new challenge, though: the additional dimensions are often difficult to interpret. To mitigate this problem, recent research has introduced methods that estimate multi-dimensional ideal points using both voting data and the texts of the bills being voted on, e.g., using topic models and associating each dimension of the ideal point space with a topic. The words most strongly associated with the topic can sometimes provide a readable description of its corresponding dimension. In this paper, we develop this idea further by introducing HIPTM, the Hierarchical Ideal Point Topic Model, to estimate multi-dimensional ideal points for legislators in the U.S. Congress. HIPTM differs from previous models in three ways. First, HIPTM uses not only votes and associated bill text, but also the language of the legislators themselves; this allows predictions of ideal points from politicians\u2019 writing alone. Second, HIPTM improves the interpretability of ideal-point dimensions by incorporating data from the Congressional Bills Project (Adler and Wilkerson, 2015), in which bills are labeled with major topics from the Policy Agendas Project Topic Codebook.1 And third, HIPTM discovers a hierarchy of topics, allowing us to analyze both agenda issues and issue-specific frames that legislators use on the congressional floor, following Nguyen et al. (2013) in modeling framing as second-level agenda setting (McCombs, 2005). Using this new model, we focus on Republican legislators during the 112th U.S. Congress, from January 2011 until January 2013. This is a particularly interesting session of Congress for political scientists, because of the rise of the Tea Party, a decentralized political movement with populist, libertarian, and conservative elements. Although united with \u201cestablishment\u201d Republicans against Democrats in the 2010 midterm elections, leading to massive Democratic defeats, the Tea Party was\u2014and still is\u2014wrestling with establishment Republicans for control of the Republican party. The Tea Party is a new and complex phenomenon for political scientists; as Carmines and D\u2019Amico (2015) observe: \u201cConventional views of ideology as a single-dimensional, left-right spectrum experience great difficulty in understanding or explaining the Tea Party.\u201d Our model identifies legislators who have low (or high) levels of \u201cTea Partiness\u201d but are (or are not) members of the Tea Party Caucus, and providing insights into the nahttp://www.policyagendas.org/", "phrases": ["topic model", "112th congress", "framing", "tea party"], "overall_score": 1.6745038361279716, "scores": [0.8515949007313017, 0.7874543066892624, 0.8841979124692421, 0.5251510336717884], "rank_score": 0.7620995383903986} -{"id": "gimenez-marquez-2008-smorgasbord", "title": "A Smorgasbord of Features for Automatic MT Evaluation", "abstract": "This document describes the approach by the NLP Group at the Technical University of Catalonia (UPC-LSI), for the shared task on Automatic Evaluation of Machine Translation at the ACL 2008 Third SMT Workshop.", "phrases": ["translation quality", "improved correlation", "difficulty", "gim\u00e9nez", "human judgement"], "overall_score": 1.5844959647559815, "scores": [1.119235218511431, 1.0895543417979339, 0.5507844242089188, 0.528330578708862, 0.5220028878762353], "rank_score": 0.7619814902206762} -{"id": "avramidis-koehn-2008-enriching", "title": "Enriching Morphologically Poor Languages for Statistical Machine Translation", "abstract": "We address the problem of translating from morphologically poor to morphologically rich languages by adding per-word linguistic information to the source language. We use the syntax of the source sentence to extract information for noun cases and verb persons and annotate the corresponding words accordingly. In experiments, we show improved performance for translating from English into Greek and Czech. For English\u2010Greek, we reduce the error on the verb conjugation from 19% to 5.4% and noun case agreement from 9% to 6%.", "phrases": ["morphology", "poor language", "source sentence"], "overall_score": 2.1588208226457177, "scores": [1.0901754683729592, 0.598996389601567, 0.5967352310641765], "rank_score": 0.7619690296795675} -{"id": "peirsman-pado-2010-cross", "title": "Cross-lingual Induction of Selectional Preferences with Bilingual Vector Spaces", "abstract": "We describe a cross-lingual method for the induction of selectional preferences for resource-poor languages, where no accurate monolingual models are available. The method uses bilingual vector spaces to \"translate\" foreign language predicate-argument structures into a resource-rich language like English. The only prerequisite for constructing the bilingual vector space is a large unparsed corpus in the resource-poor language, although the model can profit from (even noisy) syntactic knowledge. Our experiments show that the cross-lingual predictions correlate well with human ratings, clearly outperforming monolingual baseline models.", "phrases": ["induction", "bilingual vector space", "predicate-argument structure"], "overall_score": 1.226102906090279, "scores": [0.79546421188635, 0.9460357540108726, 0.5439617408596379], "rank_score": 0.7618205689189534} -{"id": "elsner-santhanam-2011-learning", "title": "Learning to Fuse Disparate Sentences", "abstract": "We present a system for fusing sentences which are drawn from the same source document but have different content. Unlike previous work, our approach is supervised, training on real-world examples of sentences fused by professional journalists in the process of editing news articles. Like Filippova and Strube (2008), our system merges dependency graphs using Integer Linear Programming. However, instead of aligning the inputs as a preprocess, we integrate the tasks of finding an alignment and selecting a merged sentence into a joint optimization problem, and learn parameters for this optimization using a structured online algorithm. Evaluation by human judges shows that our technique produces fused sentences that are both informative and readable.", "phrases": ["news article", "fusion", "similar sentence"], "overall_score": 1.8929125272132579, "scores": [1.151421011032017, 0.5938407994030672, 0.5400303079325228], "rank_score": 0.7617640394558691} -{"id": "kubler-2008-page", "title": "The PaGe 2008 Shared Task on Parsing German", "abstract": "The ACL 2008 Workshop on Parsing German features a shared task on parsing German. The goal of the shared task was to find reasons for the radically different behavior of parsers on the different treebanks and between constituent and dependency representations. In this paper, we describe the task and the data sets. In addition, we provide an overview of the test results and a first analysis.", "phrases": ["page", "shared task", "dependency parser"], "overall_score": 1.6737290421940683, "scores": [0.852409834993133, 0.8287211523480955, 0.6041097560831801], "rank_score": 0.7617469144748029} -{"id": "takamura-etal-2006-latent", "title": "Latent Variable Models for Semantic Orientations of Phrases", "abstract": "We propose models for semantic orientations of phrases as well as classification methods based on the models. Although each phrase consists of multiple words, the semantic orientation of the phrase is not a mere sum of the orientations of the component words. Some words can invert the orientation. In order to capture the property of such phrases, we introduce latent variables into the models. Through experiments, we show that the proposed latent variable models work well in the classification of semantic orientations of phrases and achieved nearly 82% classification accuracy.", "phrases": ["variable model", "semantic orientation", "latent", "sentiment classification"], "overall_score": 1.5838521574954993, "scores": [1.0088505928890654, 0.907974570170698, 0.5750729356539765, 0.5547894387796498], "rank_score": 0.7616718843733475} -{"id": "damonte-cohen-2018-cross", "title": "Cross-Lingual Abstract Meaning Representation Parsing", "abstract": "Abstract Meaning Representation (AMR) research has mostly focused on English. We show that it is possible to use AMR annotations for English as a semantic representation for sentences written in other languages. We exploit an AMR parser for English and parallel corpora to learn AMR parsers for Italian, Spanish, German and Chinese. Qualitative analysis show that the new parsers overcome structural differences between the languages. We further propose a method to evaluate the parsers that does not require gold standard data in the target languages. This method highly correlates with the gold standard evaluation, obtaining a Pearson correlation coefficient of 0.95.", "phrases": ["semantic representation", "other language", "amr parser", "parallel corpora"], "overall_score": 1.225708801121905, "scores": [0.9725020356132639, 0.9331769105807084, 0.5766270741648668, 0.563996770578038], "rank_score": 0.7615756977342192} -{"id": "chaudhary-etal-2018-adapting", "title": "Adapting Word Embeddings to New Languages with Morphological and Phonological Subword Representations", "abstract": "Much work in Natural Language Processing (NLP) has been for resource-rich languages, making generalization to new, less-resourced languages challenging. We present two approaches for improving generalization to low-resourced languages by adapting continuous word representations using linguistically motivated subword units: phonemes, morphemes and graphemes. Our method requires neither parallel corpora nor bilingual dictionaries and provides a significant gain in performance over previous methods relying on these resources. We demonstrate the effectiveness of our approaches on Named Entity Recognition for four languages, namely Uyghur, Turkish, Bengali and Hindi, of which Uyghur and Bengali are low resource languages, and also perform experiments on Machine Translation. Exploiting subwords with transfer learning gives us a boost of +15.2 NER F1 for Uyghur and +9.7 F1 for Bengali. We also show improvements in the monolingual setting where we achieve (avg.) +3 F1 and (avg.) +1.35 BLEU.", "phrases": ["word embedding", "low-resource language", "phoneme"], "overall_score": 2.0097949050008337, "scores": [0.8355057927311992, 0.8320407917885593, 0.6171270553217489], "rank_score": 0.7615578799471692} -{"id": "lin-chen-2008-ranking", "title": "Ranking Reader Emotions Using Pairwise Loss Minimization and Emotional Distribution Regression", "abstract": "This paper presents two approaches to ranking reader emotions of documents. Past studies assign a document to a single emotion category, so their methods cannot be applied directly to the emotion ranking problem. Furthermore, whereas previous research analyzes emotions from the writer's perspective, this work examines readers' emotional states. The first approach proposed in this paper minimizes pairwise ranking errors. In the second approach, regression is used to model emotional distributions. Experiment results show that the regression method is more effective at identifying the most popular emotion, but the pairwise loss minimization method produces ranked lists of emotions that have better correlations with the correct lists.", "phrases": ["reader", "pairwise loss minimization", "emotional distribution regression"], "overall_score": 1.0556343899861576, "scores": [0.901410933560337, 0.8446146014525463, 0.5384122141244303], "rank_score": 0.7614792497124379} -{"id": "halder-etal-2020-task", "title": "Task-Aware Representation of Sentences for Generic Text Classification", "abstract": "State-of-the-art approaches for text classification leverage a transformer architecture with a linear layer on top that outputs a class distribution for a given prediction problem. While effective, this approach suffers from conceptual limitations that affect its utility in few-shot or zero-shot transfer learning scenarios. First, the number of classes to predict needs to be pre-defined. In a transfer learning setting, in which new classes are added to an already trained classifier, all information contained in a linear layer is therefore discarded, and a new layer is trained from scratch. Second, this approach only learns the semantics of classes implicitly from training examples, as opposed to leveraging the explicit semantic information provided by the natural language names of the classes. For instance, a classifier trained to predict the topics of news articles might have classes like \u201cbusiness\u201d or \u201csports\u201d that themselves carry semantic information. Extending a classifier to predict a new class named \u201cpolitics\u201d with only a handful of training examples would benefit from both leveraging the semantic information in the name of a new class and using the information contained in the already trained linear layer. This paper presents a novel formulation of text classification that addresses these limitations. It imbues the notion of the task at hand into the transformer model itself by factorizing arbitrary classification problems into a generic binary classification problem. We present experiments in few-shot and zero-shot transfer learning that show that our approach significantly outperforms previous approaches on small training data and can even learn to predict new classes with no training examples at all. The implementation of our model is publicly available at: .", "phrases": ["sentences", "new class", "task-aware representation"], "overall_score": 1.4811465504897838, "scores": [0.8814154834121692, 0.8765682048450467, 0.5254925830529068], "rank_score": 0.7611587571033742} -{"id": "lai-etal-2021-lattice", "title": "Lattice-BERT: Leveraging Multi-Granularity Representations in Chinese Pre-trained Language Models", "abstract": "Chinese pre-trained language models usually process text as a sequence of characters, while ignoring more coarse granularity, e.g., words. In this work, we propose a novel pre-training paradigm for Chinese \u2014 Lattice-BERT, which explicitly incorporates word representations along with characters, thus can model a sentence in a multi-granularity manner. Specifically, we construct a lattice graph from the characters and words in a sentence and feed all these text units into transformers. We design a lattice position attention mechanism to exploit the lattice structures in self-attention layers. We further propose a masked segment prediction task to push the model to learn from rich but redundant information inherent in lattices, while avoiding learning unexpected tricks. Experiments on 11 Chinese natural language understanding tasks show that our model can bring an average increase of 1.5% under the 12-layer setting, which achieves new state-of-the-art among base-size models on the CLUE benchmarks. Further analysis shows that Lattice-BERT can harness the lattice structures, and the improvement comes from the exploration of redundant information and multi-granularity representations. Our code will be available at .", "phrases": ["multi-granularity representation", "chinese", "word representation"], "overall_score": 1.2249941729928797, "scores": [0.8437188554585912, 0.9122285684932466, 0.5274475989854724], "rank_score": 0.7611316743124368} -{"id": "dernoncourt-lee-2017-pubmed", "title": "PubMed 200k RCT: a Dataset for Sequential Sentence Classification in Medical Abstracts", "abstract": "We present PubMed 200k RCT, a new dataset based on PubMed for sequential sentence classification. The dataset consists of approximately 200,000 abstracts of randomized controlled trials, totaling 2.3 million sentences. Each sentence of each abstract is labeled with their role in the abstract using one of the following classes: background, objective, method, result, or conclusion. The purpose of releasing this dataset is twofold. First, the majority of datasets for sequential short-text classification (i.e., classification of short texts that appear in sequences) are small: we hope that releasing a new large dataset will help develop more accurate algorithms for this task. Second, from an application perspective, researchers need better tools to efficiently skim through the literature. Automatically classifying each sentence in an abstract would help researchers read abstracts more efficiently, especially in fields where abstracts may be long, such as the medical field.", "phrases": ["sequential sentence classification", "abstract", "pubmed"], "overall_score": 1.2246128559298854, "scores": [0.929089881395549, 0.7936889566114891, 0.5599054080931257], "rank_score": 0.7608947487000547} -{"id": "rehbein-etal-2014-kiezdeutsch", "title": "The KiezDeutsch Korpus (KiDKo) Release 1.0", "abstract": "This paper presents the first release of the KiezDeutsch Korpus (KiDKo), a new language resource with multiparty spoken dialogues of Kiezdeutsch, a newly emerging language variety spoken by adolescents from multiethnic urban areas in Germany. The first release of the corpus includes the transcriptions of the data as well as a normalisation layer and part-of-speech annotations. In the paper, we describe the main features of the new resource and then focus on automatic POS tagging of informal spoken language. Our tagger achieves an accuracy of nearly 97% on KiDKo. While we did not succeed in further improving the tagger using ensemble tagging, we present our approach to using the tagger ensembles for identifying error patterns in the automatically tagged data.", "phrases": ["kiezdeutsch korpus", "kidko", "kiezdeutsch corpus"], "overall_score": 1.3632885774264232, "scores": [0.90130663436926, 0.8608700301024994, 0.5204208618810998], "rank_score": 0.7608658421176197} -{"id": "he-etal-2018-decoupling", "title": "Decoupling Strategy and Generation in Negotiation Dialogues", "abstract": "We consider negotiation settings in which two agents use natural language to bargain on goods. Agents need to decide on both high-level strategy (e.g., proposing $50) and the execution of that strategy (e.g., generating \u201cThe bike is brand new. Selling for just $50!\u201d). Recent work on negotiation trains neural models, but their end-to-end nature makes it hard to control their strategy, and reinforcement learning tends to lead to degenerate solutions. In this paper, we propose a modular approach based on coarse dialogue acts (e.g., propose(price=50)) that decouples strategy and generation. We show that we can flexibly set the strategy using supervised learning, reinforcement learning, or domain-specific knowledge without degeneracy, while our retrieval-based generation can maintain context-awareness and produce diverse utterances. We test our approach on the recently proposed DEALORNODEAL game, and we also collect a richer dataset based on real items on Craigslist. Human evaluation shows that our systems achieve higher task success rate and more human-like negotiation behavior than previous approaches.", "phrases": ["negotiation", "dialogue system", "language generation"], "overall_score": 2.2791985369301098, "scores": [1.1684957533116977, 0.5654061420596022, 0.5485435925489336], "rank_score": 0.7608151626400779} -{"id": "buyko-etal-2010-genereg", "title": "The GeneReg Corpus for Gene Expression Regulation Events \u2014 An Overview of the Corpus and its In-Domain and Out-of-Domain Interoperability", "abstract": "Despite the large variety of corpora in the biomedical domain their annotations differ in many respects, e.g., the coverage of different, highly specialized knowledge domains, varying degrees of granularity of targeted relations, the specificity of linguistic anchoring of relations and named entities in documents, etc. We here present GeneReg (Gene Regulation Corpus), the result of an annotation campaign led by the Jena University Language & Information Engineering (JULIE) Lab. The GeneReg corpus consists of 314 abstracts dealing with the regulation of gene expression in the model organism E. coli. Our emphasis in this paper is on the compatibility of the GeneReg corpus with the alternative Genia event corpus and with several in-domain and out-of-domain lexical resources, e.g., the Specialist Lexicon, FrameNet, and WordNet. The links we established from the GeneReg corpus to these external resources will help improve the performance of the automatic relation extraction engine JREx trained and evaluated on GeneReg.", "phrases": ["genereg corpus", "gene expression", "in-domain"], "overall_score": 0.8358108622991821, "scores": [0.848867132939, 0.8463283183455061, 0.5871680474320466], "rank_score": 0.7607878329055175} -{"id": "chali-hasan-2015-towards", "title": "Towards Topic-to-Question Generation", "abstract": "This paper is concerned with automatic generation of all possible questions from a topic of interest. Specifically, we consider that each topic is associated with a body of texts containing useful information about the topic. Then, questions are generated by exploiting the named entity information and the predicate argument structures of the sentences present in the body of texts. The importance of the generated questions is measured using Latent Dirichlet Allocation by identifying the subtopics (which are closely related to the original topic) in the given body of texts and applying the Extended String Subsequence Kernel to calculate their similarity with the questions. We also propose the use of syntactic tree kernels for the automatic judgment of the syntactic correctness of the questions. The questions are ranked by considering both their importance (in the context of the given body of texts) and syntactic correctness. To the best of our knowledge, no previous study has accomplished this task in our setting. A series of experiments demonstrate that the proposed topic-to-question generation approach can significantly outperform the state-of-the-art results.", "phrases": ["automatic generation", "possible question", "declarative sentence"], "overall_score": 2.1088919411875207, "scores": [0.8842942370725988, 0.8653917614587437, 0.5321799604601092], "rank_score": 0.7606219863304838} -{"id": "sun-etal-2021-tita", "title": "TITA: A Two-stage Interaction and Topic-Aware Text Matching Model", "abstract": "In this paper, we focus on the problem of keyword and document matching by considering different relevance levels. In our recommendation system, different people follow different hot keywords with interest. We need to attach documents to each keyword and then distribute the documents to people who follow these keywords. The ideal documents should have the same topic with the keyword, which we call topic-aware relevance. In other words, topic-aware relevance documents are better than partially-relevance ones in this application. However, previous tasks never define topic-aware relevance clearly. To tackle this problem, we define a three-level relevance in keyword-document matching task: topic-aware relevance, partially-relevance and irrelevance. To capture the relevance between the short keyword and the document at above-mentioned three levels, we should not only combine the latent topic of the document with its deep neural representation, but also model complex interactions between the keyword and the document. To this end, we propose a Two-stage Interaction and Topic-Aware text matching model (TITA). In terms of \u201ctopic-aware\u201d, we introduce neural topic model to analyze the topic of the document and then use it to further encode the document. In terms of \u201ctwo-stage interaction\u201d, we propose two successive stages to model complex interactions between the keyword and the document. Extensive experiments reveal that TITA outperforms other well-designed baselines and shows excellent performance in our recommendation system.", "phrases": ["two-stage interaction", "text matching model", "latent topic"], "overall_score": 0.8354002815343176, "scores": [0.9155490824342088, 0.8087084119596959, 0.5569848241690132], "rank_score": 0.7604141061876394} -{"id": "chen-etal-2006-novel", "title": "Novel Association Measures Using Web Search with Double Checking", "abstract": "A web search with double checking model is proposed to explore the web as a live corpus. Five association measures including variants of Dice, Overlap Ratio, Jaccard, and Cosine, as well as Co-Occurrence Double Check (CODC), are presented. In the experiments on Rubenstein-Goodenough's benchmark data set, the CODC measure achieves correlation coefficient 0.8492, which competes with the performance (0.8914) of the model using WordNet. The experiments on link detection of named entities using the strategies of direct association, association matrix and scalar association matrix verify that the double-check frequencies are reliable. Further study on named entity clustering shows that the five measures are quite useful. In particular, CODC measure is very stable on word-word and name-name experiments. The application of CODC measure to expand community chains for personal name disambiguation achieves 9.65% and 14.22% increase compared to the system without community expansion. All the experiments illustrate that the novel model of web search with double checking is feasible for mining associations from the web.", "phrases": ["web search", "double checking", "semantic similarity"], "overall_score": 1.054130305327713, "scores": [0.8928872789312661, 0.8503226117278495, 0.5379729552614751], "rank_score": 0.7603942819735302} -{"id": "zhou-etal-2020-global", "title": "Global Context-enhanced Graph Convolutional Networks for Document-level Relation Extraction", "abstract": "Document-level Relation Extraction (RE) is particularly challenging due to complex semantic interactions among multiple entities in a document. Among exiting approaches, Graph Convolutional Networks (GCN) is one of the most effective approaches for document-level RE. However, traditional GCN simply takes word nodes and adjacency matrix to represent graphs, which is difficult to establish direct connections between distant entity pairs. In this paper, we propose Global Context-enhanced Graph Convolutional Networks (GCGCN), a novel model which is composed of entities as nodes and context of entity pairs as edges between nodes to capture rich global context information of entities in a document. Two hierarchical blocks, Context-aware Attention Guided Graph Convolution (CAGGC) for partially connected graphs and Multi-head Attention Guided Graph Convolution (MAGGC) for fully connected graphs, could take progressively more global context into account. Meantime, we leverage a large-scale distantly supervised dataset to pre-train a GCGCN model with curriculum learning, which is then fine-tuned on the human-annotated dataset for further improving document-level RE performance. The experimental results on DocRED show that our model could effectively capture rich global context information in the document, leading to a state-of-the-art result. Our code is available at .", "phrases": ["graph convolutional networks", "document-level relation extraction", "edge"], "overall_score": 0.8351204057801451, "scores": [0.9167359117914965, 0.8372374322868883, 0.5265047128144451], "rank_score": 0.7601593522976099} -{"id": "sun-etal-2020-clireval", "title": "CLIReval: Evaluating Machine Translation as a Cross-Lingual Information Retrieval Task", "abstract": "We present CLIReval, an easy-to-use toolkit for evaluating machine translation (MT) with the proxy task of cross-lingual information retrieval (CLIR). Contrary to what the project name might suggest, CLIReval does not actually require any annotated CLIR dataset. Instead, it automatically transforms translations and references used in MT evaluations into a synthetic CLIR dataset; it then sets up a standard search engine (Elasticsearch) and computes various information retrieval metrics (e.g., mean average precision) by treating the translations as documents to be retrieved. The idea is to gauge the quality of MT by its impact on the document translation approach to CLIR. As a case study, we run CLIReval on the \u201cmetrics shared task\u201d of WMT2019; while this extrinsic metric is not intended to replace popular intrinsic metrics such as BLEU, results suggest CLIReval is competitive in many language pairs in terms of correlation to human judgments of quality. CLIReval is publicly available at .", "phrases": ["machine translation", "search engine", "clireval"], "overall_score": 0.8349720142134279, "scores": [0.9074314076035606, 0.8030934839095843, 0.5695479499049046], "rank_score": 0.7600242804726832} -{"id": "barzilay-lapata-2006-aggregation", "title": "Aggregation via Set Partitioning for Natural Language Generation", "abstract": "The role of aggregation in natural language generation is to combine two or more linguistic structures into a single sentence. The task is crucial for generating concise and readable texts. We present an efficient algorithm for automatically learning aggregation rules from a text and its related database. The algorithm treats aggregation as a set partitioning problem and uses a global inference procedure to find an optimal solution. Our experiments show that this approach yields substantial improvements over a clustering-based model which relies exclusively on local information.", "phrases": ["natural language generation", "aggregation", "other work"], "overall_score": 1.361737852566104, "scores": [0.964503392000068, 0.7909365238886771, 0.5245611823939093], "rank_score": 0.7600003660942182} -{"id": "fei-li-2020-cross", "title": "Cross-Lingual Unsupervised Sentiment Classification with Multi-View Transfer Learning", "abstract": "Recent neural network models have achieved impressive performance on sentiment classification in English as well as other languages. Their success heavily depends on the availability of a large amount of labeled data or parallel corpus. In this paper, we investigate an extreme scenario of cross-lingual sentiment classification, in which the low-resource language does not have any labels or parallel corpus. We propose an unsupervised cross-lingual sentiment classification model named multi-view encoder-classifier (MVEC) that leverages an unsupervised machine translation (UMT) system and a language discriminator. Unlike previous language model (LM) based fine-tuning approaches that adjust parameters solely based on the classification error on training data, we employ the encoder-decoder framework of a UMT as a regularization component on the shared network parameters. In particular, the cross-lingual encoder of our model learns a shared representation, which is effective for both reconstructing input sentences of two languages and generating more representative views from the input for classification. Extensive experiments on five language pairs verify that our model significantly outperforms other models for 8/11 sentiment classification tasks.", "phrases": ["sentiment classification", "transfer learning", "input sentence"], "overall_score": 1.580219062071, "scores": [0.8026693462603087, 0.9476052360593125, 0.5294996220484199], "rank_score": 0.759924734789347} -{"id": "schick-schutze-2021-generating", "title": "Generating Datasets with Pretrained Language Models", "abstract": "To obtain high-quality sentence embeddings from pretrained language models (PLMs), they must either be augmented with additional pretraining objectives or finetuned on a large set of labeled text pairs. While the latter approach typically outperforms the former, it requires great human effort to generate suitable datasets of sufficient size. In this paper, we show how PLMs can be leveraged to obtain high-quality sentence embeddings without the need for labeled data, finetuning or modifications to the pretraining objective: We utilize the generative abilities of large and high-performing PLMs to generate entire datasets of labeled text pairs from scratch, which we then use for finetuning much smaller and more efficient models. Our fully unsupervised approach outperforms strong baselines on several semantic textual similarity datasets.", "phrases": ["language model", "plms", "similarity dataset", "dino"], "overall_score": 1.4786235125041314, "scores": [1.4294539013690533, 0.5453297149201448, 0.5381097072212552, 0.5265553647487926], "rank_score": 0.7598621720648114} -{"id": "dozat-etal-2017-stanfords", "title": "Stanford's Graph-based Neural Dependency Parser at the CoNLL 2017 Shared Task", "abstract": "This paper describes the neural dependency parser submitted by Stanford to the CoNLL 2017 Shared Task on parsing Universal Dependencies. Our system uses relatively simple LSTM networks to produce part of speech tags and labeled dependency parses from segmented and tokenized sequences of words. In order to address the rare word problem that abounds in languages with complex morphology, we include a character-based word representation that uses an LSTM to produce embeddings from sequences of characters. Our system was ranked first according to all five relevant metrics for the system: UPOS tagging (93.09%), XPOS tagging (82.27%), unlabeled attachment score (81.30%), labeled attachment score (76.30%), and content word labeled attachment score (72.57%).", "phrases": ["neural dependency parser", "stanford", "pos tagger"], "overall_score": 2.5318922871988376, "scores": [0.9216478273498806, 0.8047731160995779, 0.5530540522785616], "rank_score": 0.7598249985760068} -{"id": "rahman-ng-2010-inducing", "title": "Inducing Fine-Grained Semantic Classes via Hierarchical and Collective Classification", "abstract": "Research in named entity recognition and mention detection has typically involved a fairly small number of semantic classes, which may not be adequate if semantic class information is intended to support natural language applications. Motivated by this observation, we examine the under-studied problem of semantic subtype induction, where the goal is to automatically determine which of a set of 92 fine-grained semantic classes a noun phrase belongs to. We seek to improve the standard supervised approach to this problem using two techniques: hierarchical classification and collective classification. Experimental results demonstrate the effectiveness of these techniques, whether or not they are applied in isolation or in combination with the standard approach.", "phrases": ["semantic class", "collective classification", "noun phrase", "fine-grained typing"], "overall_score": 1.4783901038116842, "scores": [0.8905243414050784, 0.9576845482599153, 0.6279405409534936, 0.5628194642801853], "rank_score": 0.7597422237246682} -{"id": "rashkin-etal-2021-increasing", "title": "Increasing Faithfulness in Knowledge-Grounded Dialogue with Controllable Features", "abstract": "Knowledge-grounded dialogue systems are intended to convey information that is based on evidence provided in a given source text. We discuss the challenges of training a generative neural dialogue model for such systems that is controlled to stay faithful to the evidence. Existing datasets contain a mix of conversational responses that are faithful to selected evidence as well as more subjective or chit-chat style responses. We propose different evaluation measures to disentangle these different styles of responses by quantifying the informativeness and objectivity. At training time, additional inputs based on these evaluation measures are given to the dialogue model. At generation time, these additional inputs act as stylistic controls that encourage the model to generate responses that are faithful to the provided evidence. We also investigate the usage of additional controls at decoding time using resampling techniques. In addition to automatic metrics, we perform a human evaluation study where raters judge the output of these controlled generation models to be generally more objective and faithful to the evidence compared to baseline dialogue systems.", "phrases": ["faithfulness", "knowledge-grounded dialogue", "objective sentence"], "overall_score": 1.669259366562188, "scores": [0.9521911296780087, 0.8017577353864275, 0.5251891672243192], "rank_score": 0.7597126774295851} -{"id": "stern-etal-2017-minimal", "title": "A Minimal Span-Based Neural Constituency Parser", "abstract": "In this work, we present a minimal neural model for constituency parsing based on independent scoring of labels and spans. We show that this model is not only compatible with classical dynamic programming techniques, but also admits a novel greedy top-down inference algorithm based on recursive partitioning of the input. We demonstrate empirically that both prediction schemes are competitive with recent work, and when combined with basic extensions to the scoring model are capable of achieving state-of-the-art single-model performance on the Penn Treebank (91.79 F1) and strong performance on the French Treebank (82.23 F1).", "phrases": ["constituent", "penn treebank", "span-based parser"], "overall_score": 2.632824277307344, "scores": [1.1312591449037908, 0.6203698533045698, 0.5273885188336642], "rank_score": 0.7596725056806749} -{"id": "bjorne-salakoski-2018-biomedical", "title": "Biomedical Event Extraction Using Convolutional Neural Networks and Dependency Parsing", "abstract": "Event and relation extraction are central tasks in biomedical text mining. Where relation extraction concerns the detection of semantic connections between pairs of entities, event extraction expands this concept with the addition of trigger words, multiple arguments and nested events, in order to more accurately model the diversity of natural language. In this work we develop a convolutional neural network that can be used for both event and relation extraction. We use a linear representation of the input text, where information is encoded with various vector space embeddings. Most notably, we encode the parse graph into this linear space using dependency path embeddings. We integrate our neural network into the open source Turku Event Extraction System (TEES) framework. Using this system, our machine learning model can be easily applied to a large set of corpora from e.g. the BioNLP, DDI Extraction and BioCreative shared tasks. We evaluate our system on 12 different event, relation and NER corpora, showing good generalizability to many tasks and achieving improved performance on several corpora.", "phrases": ["event extraction", "biomedical domain", "deep neural network"], "overall_score": 1.8211328932537516, "scores": [0.8666763950880115, 0.8553446665965851, 0.5563931550403092], "rank_score": 0.7594714055749687} -{"id": "rubino-etal-2012-dcu", "title": "DCU-Symantec Submission for the WMT 2012 Quality Estimation Task", "abstract": "This paper describes the features and the machine learning methods used by Dublin City University (DCU) and SYMANTEC for the WMT 2012 quality estimation task. Two sets of features are proposed: one constrained, i.e. respecting the data limitation suggested by the workshop organisers, and one unconstrained, i.e. using data or tools trained on data that was not provided by the workshop organisers. In total, more than 300 features were extracted and used to train classifiers in order to predict the translation quality of unseen data. In this paper, we focus on a subset of our feature set that we consider to be relatively novel: features based on a topic model built using the Latent Dirichlet Allocation approach, and features based on source and target language syntax extracted using part-of-speech (POS) taggers and parsers. We evaluate nine feature combinations using four classification-based and four regression-based machine learning techniques.", "phrases": ["wmt", "quality estimation task", "topic model"], "overall_score": 1.5791959062770704, "scores": [0.9045609178641106, 0.8444726851438319, 0.5292644995701395], "rank_score": 0.7594327008593607} -{"id": "kulkarni-etal-2018-multi", "title": "Multi-view Models for Political Ideology Detection of News Articles", "abstract": "A news article's title, content and link structure often reveal its political ideology. However, most existing works on automatic political ideology detection only leverage textual cues. Drawing inspiration from recent advances in neural inference, we propose a novel attention based multi-view model to leverage cues from all of the above views to identify the ideology evinced by a news article. Our model draws on advances in representation learning in natural language processing and network science to capture cues from both textual content and the network structure of news articles. We empirically evaluate our model against a battery of baselines and show that our model outperforms state of the art by 10 percentage points F1 score.", "phrases": ["political ideology detection", "news article", "left-vs-right bias classification"], "overall_score": 1.6683315006359554, "scores": [0.927272980810882, 0.8263773503914689, 0.5242208311417437], "rank_score": 0.7592903874480315} -{"id": "yates-etzioni-2007-unsupervised", "title": "Unsupervised Resolution of Objects and Relations on the Web", "abstract": "The task of identifying synonymous relations and objects, or Synonym Resolution (SR), is critical for high-quality information extraction. The bulk of previous SR work assumed strong domain knowledge or hand-tagged training examples. This paper investigates SR in the context of unsupervised information extraction, where neither is available. The paper presents a scalable, fully-implemented system for SR that runs in O(KN log N) time in the number of extractions N and the maximum number of synonyms per word, K. The system, called RESOLVER, introduces a probabilistic relational model for predicting whether two strings are co-referential based on the similarity of the assertions containing them. Given two million assertions extracted from the Web, RESOLVER resolves objects with 78% precision and an estimated 68% recall and resolves relations with 90% precision and 35% recall.", "phrases": ["object", "web", "resolver"], "overall_score": 1.2219287722895456, "scores": [0.8548208009644096, 0.8832690446931607, 0.539591255643525], "rank_score": 0.7592270337670316} -{"id": "erkan-etal-2007-semi", "title": "Semi-Supervised Classification for Extracting Protein Interaction Sentences using Dependency Parsing", "abstract": "We introduce a relation extraction method to identify the sentences in biomedical text that indicate an interaction among the protein names mentioned. Our approach is based on the analysis of the paths between two protein names in the dependency parse trees of the sentences. Given two dependency trees, we define two separate similarity functions (kernels) based on cosine similarity and edit distance among the paths between the protein names. Using these similarity functions, we investigate the performances of two classes of learning algorithms, Support Vector Machines and k-nearest-neighbor, and the semisupervised counterparts of these algorithms, transductive SVMs and harmonic functions, respectively. Significant improvement over the previous results in the literature is reported as well as a new benchmark dataset is introduced. Semi-supervised algorithms perform better than their supervised version by a wide margin especially when the amount of labeled data is limited.", "phrases": ["dependency parsing", "semi-supervised classification", "text document"], "overall_score": 2.003636822891882, "scores": [0.9042032143995259, 0.8474693502564444, 0.5260007549880411], "rank_score": 0.7592244398813371} -{"id": "hong-etal-2018-self", "title": "Self-regulation: Employing a Generative Adversarial Network to Improve Event Detection", "abstract": "Due to the ability of encoding and mapping semantic information into a high-dimensional latent feature space, neural networks have been successfully used for detecting events to a certain extent. However, such a feature space can be easily contaminated by spurious features inherent in event detection. In this paper, we propose a self-regulated learning approach by utilizing a generative adversarial network to generate spurious features. On the basis, we employ a recurrent network to eliminate the fakes. Detailed experiments on the ACE 2005 and TAC-KBP 2015 corpora show that our proposed method is highly effective and adaptable.", "phrases": ["generative adversarial network", "event detection", "spurious feature"], "overall_score": 1.360044022783715, "scores": [0.8985919427084598, 0.8505823991366401, 0.5279907232341942], "rank_score": 0.759055021693098} -{"id": "appidi-etal-2020-creation", "title": "Creation of Corpus and analysis in Code-Mixed Kannada-English Twitter data for Emotion Prediction", "abstract": "Emotion prediction is a critical task in the field of Natural Language Processing (NLP). There has been a significant amount of work done in emotion prediction for resource-rich languages. There has been work done on code-mixed social media corpus but not on emotion prediction of Kannada-English code-mixed Twitter data. In this paper, we analyze the problem of emotion prediction on corpus obtained from code-mixed Kannada-English extracted from Twitter annotated with their respective `Emotion' for each tweet. We experimented with machine learning prediction models using features like Character N-Grams, Word N-Grams, Repetitive characters, and others on SVM and LSTM on our corpus, which resulted in an accuracy of 30% and 32% respectively.", "phrases": ["twitter data", "emotion prediction", "resource-rich language"], "overall_score": 1.0517333595283649, "scores": [0.864155299690956, 0.8622683705347399, 0.5495720829678958], "rank_score": 0.7586652510645306} -{"id": "reed-etal-2008-language", "title": "Language Resources for Studying Argument", "abstract": "This paper describes the development of a written corpus of argumentative reasoning. Arguments in the corpus have been analysed using state of the art techniques from argumentation theory and have been marked up using an open, reusable markup language. A number of the key challenges enountered during the process are explored, and preliminary observations about features such as inter-coder reliability and corpus statistics are discussed. In addition, several examples are offered of how this kind of language resource can be used in linguistic, computational and philosophical research, and in particular, how the corpus has been used to initiate a programme investigating the automatic detection of argumentative structure.", "phrases": ["argumentation theory", "araucaria", "newspaper article", "parliamentary record"], "overall_score": 1.9458728794473166, "scores": [1.2822423517533283, 0.6404952384719242, 0.5809237711634065, 0.5308981690546178], "rank_score": 0.7586398826108193} -{"id": "chandlee-etal-2015-output", "title": "Output Strictly Local Functions", "abstract": "This paper characterizes a subclass of subsequential string-to-string functions called Output Strictly Local (OSL) and presents a learning algorithm which provably learns any OSL function in polynomial time and data. This algorithm is more efficient than other existing ones capable of learning this class. The OSL class is motivated by the study of the nature of string-to-string transformations, a cornerstone of modern phonological grammars.", "phrases": ["function", "phonological process", "corollary", "important exception", "prefix function"], "overall_score": 1.359252665170036, "scores": [1.2327798336245275, 0.8614917609647109, 0.575845728828504, 0.5649308980132729, 0.5580185615664927], "rank_score": 0.7586133565995016} -{"id": "constant-etal-2017-survey", "title": "Survey: Multiword Expression Processing: A Survey", "abstract": "Multiword expressions (MWEs) are a class of linguistic forms spanning conventional word boundaries that are both idiosyncratic and pervasive across different languages. The structure of linguistic processing that depends on the clear distinction between words and phrases has to be re-thought to accommodate MWEs. The issue of MWE handling is crucial for NLP applications, where it raises a number of challenges. The emergence of solutions in the absence of guiding principles motivates this survey, whose aim is not only to provide a focused review of MWE processing, but also to clarify the nature of interactions between MWE processing and downstream applications. We propose a conceptual framework within which challenges and research contributions can be positioned. It offers a shared understanding of what is meant by \u201cMWE processing,\u201d distinguishing the subtasks of MWE discovery and identification. It also elucidates the interactions between MWE processing and two use cases: Parsing and machine translation. Many of the approaches in the literature can be differentiated according to how MWE processing is timed with respect to underlying use cases. We discuss how such orchestration choices affect the scope of MWE-aware systems. For each of the two MWE processing subtasks and for each of the two use cases, we conclude on open issues and research perspectives.", "phrases": ["multiword expression", "subtask", "mwe discovery", "survey", "automatic identification"], "overall_score": 1.8188652859527854, "scores": [0.8714617585910056, 0.8644731744187106, 0.8604659137613206, 0.6513675563667284, 0.5448602962594133], "rank_score": 0.7585257398794357} -{"id": "ding-etal-2016-knowledge", "title": "Knowledge-Driven Event Embedding for Stock Prediction", "abstract": "Representing structured events as vectors in continuous space offers a new way for defining dense features for natural language processing (NLP) applications. Prior work has proposed effective methods to learn event representations that can capture syntactic and semantic information over text corpus, demonstrating their effectiveness for downstream tasks such as event-driven stock prediction. On the other hand, events extracted from raw texts do not contain background knowledge on entities and relations that they are mentioned. To address this issue, this paper proposes to leverage extra information from knowledge graph, which provides ground truth such as attributes and properties of entities and encodes valuable relations between entities. Specifically, we propose a joint model to combine knowledge graph information into the objective function of an event embedding learning model. Experiments on event similarity and stock market prediction show that our model is more capable of obtaining better event embeddings and making more accurate prediction on stock market volatilities.", "phrases": ["event embedding", "stock prediction", "knowledge-driven event"], "overall_score": 1.3588312150464703, "scores": [0.8721411596794053, 0.8012095294183544, 0.6017837333231748], "rank_score": 0.7583781408069782} -{"id": "mrksic-etal-2017-semantic", "title": "Semantic Specialization of Distributional Word Vector Spaces using Monolingual and Cross-Lingual Constraints", "abstract": "We present Attract-Repel, an algorithm for improving the semantic quality of word vectors by injecting constraints extracted from lexical resources. Attract-Repel facilitates the use of constraints from mono- and cross-lingual resources, yielding semantically specialized cross-lingual vector spaces. Our evaluation shows that the method can make use of existing cross-lingual lexicons to construct high-quality vector spaces for a plethora of different languages, facilitating semantic transfer from high- to lower-resource ones. The effectiveness of our approach is demonstrated with state-of-the-art results on semantic similarity datasets in six languages. We next show that Attract-Repel-specialized vectors boost performance in the downstream task of dialogue state tracking (DST) across multiple languages. Finally, we show that cross-lingual vector spaces produced by our algorithm facilitate the training of multilingual DST models, which brings further performance improvements.", "phrases": ["vector space", "semantic specialization", "language understanding task"], "overall_score": 2.5792578750399966, "scores": [0.8874039798067918, 0.8673508637586946, 0.5202597341943499], "rank_score": 0.7583381925866122} -{"id": "vogel-jurafsky-2010-learning", "title": "Learning to Follow Navigational Directions", "abstract": "We present a system that learns to follow navigational natural language directions. Where traditional models learn from linguistic annotation or word distributions, our approach is grounded in the world, learning by apprenticeship from routes through a map paired with English descriptions. Lacking an explicit alignment between the text and the reference path makes it difficult to determine what portions of the language describe which aspects of the route. We learn this correspondence with a reinforcement learning algorithm, using the deviation of the route we follow from the intended path as a reward signal. We demonstrate that our system successfully grounds the meaning of spatial terms like above and south into geometric properties of paths.", "phrases": ["route", "reinforcement learning", "environment", "action", "language instruction"], "overall_score": 2.1916995836417206, "scores": [1.5480387569945844, 0.5839397509410333, 0.5602516260456807, 0.5579784862187626, 0.5411714637878355], "rank_score": 0.7582760167975793} -{"id": "curran-2005-supersense", "title": "Supersense Tagging of Unknown Nouns Using Semantic Similarity", "abstract": "The limited coverage of lexical-semantic resources is a significant problem for NLP systems which can be alleviated by automatically classifying the unknown words. Supersense tagging assigns unknown nouns one of 26 broad semantic categories used by lexicographers to organise their manual insertion into WORDNET. Ciaramita and Johnson (2003) present a tagger which uses synonym set glosses as annotated training examples. We describe an unsupervised approach, based on vector-space similarity, which does not require annotated examples but significantly outperforms their tagger. We also demonstrate the use of an extremely large shallow-parsed corpus for calculating vector-space semantic similarity.", "phrases": ["noun", "semantic similarity", "wsd"], "overall_score": 1.8179558197160075, "scores": [0.8739812378620895, 0.8302914296004723, 0.5701667215368158], "rank_score": 0.7581464629997926} -{"id": "xu-etal-2011-passage", "title": "Passage Retrieval for Information Extraction using Distant Supervision", "abstract": "In this paper, we propose a keyword-based passage retrieval algorithm for information extraction, trained by distant supervision. Our goal is to be able to extract attributes of people and organizations more quickly and accurately by first ranking all the potentially relevant passages according to their likelihood of containing the answer and then performing a traditional deeper, slower analysis of individual passages. Using Freebase as our source of known relation instances and Wikipedia as our text source, we collected a weighted set of", "phrases": ["information extraction", "distant supervision", "relation instance"], "overall_score": 0.8328192169022312, "scores": [0.8905009159183125, 0.8531037375733403, 0.530589506247513], "rank_score": 0.7580647199130551} -{"id": "rubino-etal-2016-information", "title": "Information Density and Quality Estimation Features as Translationese Indicators for Human Translation Classification", "abstract": "This paper introduces information density and machine translation quality estimation inspired features to automatically detect and classify human translated texts. We investigate two settings: discriminating between translations and comparable originally authored texts, and distinguishing two levels of translation professionalism. Our framework is based on delexicalised sentence-level dense feature vector representations combined with a supervised machine learning approach. The results show state-of-the-art performance for mixed-domain translationese detection with information density and quality estimation based features, while results on translation expertise classi\ufb01cation are mixed.", "phrases": ["translator", "expertise", "information density"], "overall_score": 1.357974905557527, "scores": [0.8773491783482861, 0.8255610468527569, 0.570790450943704], "rank_score": 0.7579002253815822} -{"id": "qazvinian-radev-2011-learning", "title": "Learning From Collective Human Behavior to Introduce Diversity in Lexical Choice", "abstract": "We analyze collective discourse, a collective human behavior in content generation, and show that it exhibits diversity, a property of general collective systems. Using extensive analysis, we propose a novel paradigm for designing summary generation systems that reflect the diversity of perspectives seen in reallife collective summarization. We analyze 50 sets of summaries written by human about the same story or artifact and investigate the diversity of perspectives across these summaries. We show how different summaries use various phrasal information units (i.e., nuggets) to express the same atomic semantic units, called factoids. Finally, we present a ranker that employs distributional similarities to build a network of words, and captures the diversity of perspectives by detecting communities in this network. Our experiments show how our system outperforms a wide range of other document ranking systems that leverage diversity.", "phrases": ["collective human behavior", "diversity", "factoid"], "overall_score": 0.8325689347353856, "scores": [0.8806280028292726, 0.8636810885144135, 0.5292016184575156], "rank_score": 0.7578369032670672} -{"id": "matuschek-gurevych-2014-high", "title": "High Performance Word Sense Alignment by Joint Modeling of Sense Distance and Gloss Similarity", "abstract": "In this paper, we present a machine learning approach for word sense alignment (WSA) which combines distances between senses in the graph representations of lexical-semantic resources with gloss similarities. In this way, we significantly outperform the state of the art on each of the four datasets we consider. Moreover, we present two novel datasets for WSA between Wiktionary and Wikipedia in English and German. The latter dataset in not only of unprecedented size, but also created by the large community of Wiktionary editors instead of expert annotators, making it an interesting subject of study in its own right as the first crowdsourced WSA dataset. We will make both datasets freely available along with our computed alignments.", "phrases": ["distance", "gloss similarity", "wikipedia"], "overall_score": 1.2196582103712608, "scores": [0.8831244284467138, 0.8471420798427565, 0.5431822546888916], "rank_score": 0.7578162543261207} -{"id": "zheng-etal-2013-dynamic", "title": "Dynamic Knowledge-Base Alignment for Coreference Resolution", "abstract": "Coreference resolution systems can benefit greatly from inclusion of global context, and a number of recent approaches have demonstrated improvements when precomputing an alignment to external knowledge sources. However, since alignment itself is a challenging task and is often noisy, existing systems either align conservatively, resulting in very few links, or combine the attributes of multiple candidates, leading to a conflation of entities. Our approach instead performs joint inference between within-document coreference and entity linking, maintaining ranked lists of candidate entities that are dynamically merged and reranked during inference. Further, we incorporate a large set of surface string variations for each entity by using anchor texts from the web that link to the entity. These forms of global context enables our system to improve classifier-based coreference by 1.09 B 3 F1 points, and improve over the previous state-of-art by 0.41 points, thus introducing a new state-of-art result on the ACE 2004 data.", "phrases": ["list", "knowledge base", "tight integration"], "overall_score": 1.6650137761153252, "scores": [1.1312676557508345, 0.5879014676102992, 0.5541721544802317], "rank_score": 0.7577804259471218} -{"id": "fujita-sato-2008-computing", "title": "Computing Paraphrasability of Syntactic Variants Using Web Snippets", "abstract": "In a broad range of natural language processing tasks, large-scale knowledge-base of paraphrases is anticipated to improve their performance. The key issue in creating such a resource is to establish a practical method of computing semantic equivalence and syntactic substitutability, i.e., paraphrasability, between given pair of expressions. This paper addresses the issues of computing paraphrasability, focusing on syntactic variants of predicate phrases. Our model estimates paraphrasability based on traditional distributional similarity measures, where the Web snippets are used to overcome the data sparseness problem in handling predicate phrases. Several feature sets are evaluated through empirical experiments.", "phrases": ["syntactic variant", "web snippet", "distributional similarity measure"], "overall_score": 0.832239872502482, "scores": [0.8879920338912417, 0.8614591996863192, 0.5231609001664695], "rank_score": 0.7575373779146769} -{"id": "klyueva-etal-2017-neural", "title": "Neural Networks for Multi-Word Expression Detection", "abstract": "In this paper we describe the MUMULS system that participated to the 2017 shared task on automatic identification of verbal multiword expressions (VMWEs). The MUMULS system was implemented using a supervised approach based on recurrent neural networks using the open source library TensorFlow. The model was trained on a data set containing annotated VMWEs as well as morphological and syntactic information. The MUMULS system performed the identification of VMWEs in 15 languages, it was one of few systems that could categorize VMWEs type in nearly all languages.", "phrases": ["supervised approach", "recurrent neural network", "syntactic information"], "overall_score": 1.5744276927290397, "scores": [1.1569445507633873, 0.5660911826170382, 0.5483832911580132], "rank_score": 0.7571396748461462} -{"id": "sun-duh-2020-clirmatrix", "title": "CLIRMatrix: A massively large collection of bilingual and multilingual datasets for Cross-Lingual Information Retrieval", "abstract": "We present CLIRMatrix, a massively large collection of bilingual and multilingual datasets for Cross-Lingual Information Retrieval extracted automatically from Wikipedia. CLIRMatrix comprises (1) BI-139, a bilingual dataset of queries in one language matched with relevant documents in another language for 139x138=19,182 language pairs, and (2) MULTI-8, a multilingual dataset of queries and documents jointly aligned in 8 different languages. In total, we mined 49 million unique queries and 34 billion (query, document, label) triplets, making it the largest and most comprehensive CLIR dataset to date. This collection is intended to support research in end-to-end neural information retrieval and is publicly available at [url]. We provide baseline neural model results on BI-139, and evaluate MULTI-8 in both single-language retrieval and mix-language retrieval settings.", "phrases": ["large collection", "cross-lingual information retrieval", "wikipedia", "query"], "overall_score": 0.8317680374750966, "scores": [0.9944611811034217, 0.8978001240724583, 0.5681884866914415, 0.5679817887896954], "rank_score": 0.7571078951642543} -{"id": "raux-eskenazi-2008-optimizing", "title": "Optimizing Endpointing Thresholds using Dialogue Features in a Spoken Dialogue System", "abstract": "This paper describes a novel algorithm to dynamically set endpointing thresholds based on a rich set of dialogue features to detect the end of user utterances in a dialogue system. By analyzing the relationship between silences in user's speech to a spoken dialogue system and a wide range of automatically extracted features from discourse, semantics, prosody, timing and speaker characteristics, we found that all features correlate with pause duration and with whether a silence indicates the end of the turn, with semantics and timing being the most informative. Based on these features, the proposed method reduces latency by up to 24% over a fixed threshold baseline. Offline evaluation results were confirmed by implementing the proposed algorithm in the Let's Go system.", "phrases": ["spoken dialogue system", "prosody", "turn-taking"], "overall_score": 1.4730919226548047, "scores": [0.9127227788642408, 0.8281498933474609, 0.5301858194200173], "rank_score": 0.7570194972105732} -{"id": "kawahara-etal-2014-inducing", "title": "Inducing Example-based Semantic Frames from a Massive Amount of Verb Uses", "abstract": "We present an unsupervised method for inducing semantic frames from verb uses in giga-word corpora. Our semantic frames are verb-specific example-based frames that are distinguished according to their senses. We use the Chinese Restaurant Process to automatically induce these frames from a massive amount of verb instances. In our experiments, we acquire broad-coverage semantic frames from two giga-word corpora, the larger comprising 20 billion words. Our experimental results indicate the effectiveness of our approach.", "phrases": ["massive amount", "verb use", "case frame", "different language"], "overall_score": 1.3563814589283572, "scores": [0.8589004300485432, 0.8110880819764306, 0.830047573160703, 0.5280075383543242], "rank_score": 0.7570109058850003} -{"id": "abhishek-etal-2017-fine", "title": "Fine-Grained Entity Type Classification by Jointly Learning Representations and Label Embeddings", "abstract": "Fine-grained entity type classification (FETC) is the task of classifying an entity mention to a broad set of types. Distant supervision paradigm is extensively used to generate training data for this task. However, generated training data assigns same set of labels to every mention of an entity without considering its local context. Existing FETC systems have two major drawbacks: assuming training data to be noise free and use of hand crafted features. Our work overcomes both drawbacks. We propose a neural network model that jointly learns entity mentions and their context representation to eliminate use of hand crafted features. Our model treats training data as noisy and uses non-parametric variant of hinge loss function. Experiments show that the proposed model outperforms previous state-of-the-art methods on two publicly available datasets, namely FIGER (GOLD) and BBN with an average relative improvement of 2.69% in micro-F1 score. Knowledge learnt by our model on one dataset can be transferred to other datasets while using same model or other FETC systems. These approaches of transferring knowledge further improve the performance of respective models.", "phrases": ["mention", "neural network model", "hinge loss function"], "overall_score": 1.8145177474157546, "scores": [0.9132588030171161, 0.824490540655832, 0.5323886827866375], "rank_score": 0.7567126754865287} -{"id": "chen-etal-2016-thorough", "title": "A Thorough Examination of the CNN/Daily Mail Reading Comprehension Task", "abstract": "Enabling a computer to understand a document so that it can answer comprehension questions is a central, yet unsolved goal of NLP. A key factor impeding its solution by machine learned systems is the limited availability of human-annotated data. Hermann et al. (2015) seek to solve this problem by creating over a million training examples by pairing CNN and Daily Mail news articles with their summarized bullet points, and show that a neural network can then be trained to give good performance on this task. In this paper, we conduct a thorough examination of this new reading comprehension task. Our primary aim is to understand what depth of language understanding is required to do well on this task. We approach this from one side by doing a careful hand-analysis of a small subset of the problems and from the other by showing that simple, carefully designed systems can obtain accuracies of 73.6% and 76.6% on these two datasets, exceeding current state-of-the-art results by 7-10% and approaching what we believe is the ceiling for performance on this task.", "phrases": ["daily mail", "comprehension task", "language understanding", "human-level performance", "reader"], "overall_score": 3.2663780016561983, "scores": [1.3158366454103123, 0.8749465166500959, 0.5419319451545037, 0.527529856735298, 0.5224846183448261], "rank_score": 0.7565459164590073} -{"id": "huang-riloff-2013-multi", "title": "Multi-faceted Event Recognition with Bootstrapped Dictionaries", "abstract": "Identifying documents that describe a specific type of event is challenging due to the high complexity and variety of event descriptions. We propose a multi-faceted event recognition approach, which identifies documents about an event using event phrases as well as defining characteristics of the event. Our research focuses on civil unrest events and learns civil unrest expressions as well as phrases corresponding to potential agents and reasons for civil unrest. We present a bootstrapping algorithm that automatically acquires event phrases, agent terms, and purpose (reason) phrases from unannotated texts. We use the bootstrapped dictionaries to identify civil unrest documents and show that multi-faceted event recognition can yield high accuracy.", "phrases": ["bootstrapped dictionary", "unrest event", "agent", "multi-faceted event recognition", "event expression"], "overall_score": 1.4721396549753483, "scores": [0.9666339957963996, 0.8869073966706659, 0.8321735246036539, 0.5682875182466055, 0.5286482068257168], "rank_score": 0.7565301284286082} -{"id": "sohrab-miwa-2018-deep", "title": "Deep Exhaustive Model for Nested Named Entity Recognition", "abstract": "We propose a simple deep neural model for nested named entity recognition (NER). Most NER models focused on flat entities and ignored nested entities, which failed to fully capture underlying semantic information in texts. The key idea of our model is to enumerate all possible regions or spans as potential entity mentions and classify them with deep neural networks. To reduce the computational costs and capture the information of the contexts around the regions, the model represents the regions using the outputs of shared underlying bidirectional long short-term memory. We evaluate our exhaustive model on the GENIA and JNLPBA corpora in biomedical domain, and the results show that our model outperforms state-of-the-art models on nested and flat NER, achieving 77.1% and 78.4% respectively in terms of F-score, without any external knowledge resources.", "phrases": ["entity mention", "neural exhaustive model", "subsequence", "genia data", "region classification model"], "overall_score": 2.3377281335994695, "scores": [0.9785364347792299, 0.8853839296113207, 0.8569943645791596, 0.5329301957584918, 0.527610957670962], "rank_score": 0.7562911764798328} -{"id": "cao-etal-2019-low", "title": "Low-Resource Name Tagging Learned with Weakly Labeled Data", "abstract": "Name tagging in low-resource languages or domains suffers from inadequate training data. Existing work heavily relies on additional information, while leaving those noisy annotations unexplored that extensively exist on the web. In this paper, we propose a novel neural model for name tagging solely based on weakly labeled (WL) data, so that it can be applied in any low-resource settings. To take the best advantage of all WL sentences, we split them into high-quality and noisy portions for two modules, respectively: (1) a classification module focusing on the large portion of noisy data can efficiently and robustly pretrain the tag classifier by capturing textual context semantics; and (2) a costly sequence labeling module focusing on high-quality data utilizes Partial-CRFs with non-entity sampling to achieve global optimum. Two modules are combined via shared parameters. Extensive experiments involving five low-resource languages and fine-grained food domain demonstrate our superior performance (6% and 7.8% F1 gains on average) as well as efficiency.", "phrases": ["high-quality", "ds-ner", "noisy label", "knowledge basis"], "overall_score": 2.096318277424624, "scores": [1.045966245237511, 0.8932106474166328, 0.552940849749922, 0.5322302405613335], "rank_score": 0.7560869957413499} -{"id": "reimers-gurevych-2017-reporting", "title": "Reporting Score Distributions Makes a Difference: Performance Study of LSTM-networks for Sequence Tagging", "abstract": "In this paper we show that reporting a single performance score is insufficient to compare non-deterministic approaches. We demonstrate for common sequence tagging tasks that the seed value for the random number generator can result in statistically significant (p < 10^-4) differences for state-of-the-art systems. For two recent systems for NER, we observe an absolute difference of one percentage point F\u2081-score depending on the selected seed value, making these systems perceived either as state-of-the-art or mediocre. Instead of publishing and reporting single performance scores, we propose to compare score distributions based on multiple executions. Based on the evaluation of 50.000 LSTM-networks for five sequence tagging tasks, we present network architectures that produce both superior performance as well as are more stable with respect to the remaining hyperparameters.", "phrases": ["score distribution", "lstm-network", "recent system"], "overall_score": 2.4028344345531045, "scores": [0.84136688700954, 0.8915199574546557, 0.5353262470900461], "rank_score": 0.7560710305180806} -{"id": "rioux-etal-2014-fear", "title": "Fear the REAPER: A System for Automatic Multi-Document Summarization with Reinforcement Learning", "abstract": "This paper explores alternate algorithms, reward functions and feature sets for performing multi-document summarization using reinforcement learning with a high focus on reproducibility. We show that ROUGE results can be improved using a unigram and bigram similarity metric when training a learner to select sentences for summarization. Learners are trained to summarize document clusters based on various algorithms and reward functions and then evaluated using ROUGE. Our experiments show a statistically significant improvement of 1.33%, 1.58%, and 2.25% for ROUGE-1, ROUGE-2 and ROUGEL scores, respectively, when compared with the performance of the state of the art in automatic summarization with reinforcement learning on the DUC2004 dataset. Furthermore query focused extensions of our approach show an improvement of 1.37% and 2.31% for ROUGE-2 and ROUGE-SU4 respectively over query focused extensions of the state of the art with reinforcement learning on the DUC2006 dataset.", "phrases": ["multi-document summarization", "reinforcement learning", "rouge-2"], "overall_score": 1.354687415180043, "scores": [0.9256796758776742, 0.8187002496759946, 0.5238164006480014], "rank_score": 0.7560654420672234} -{"id": "zeller-etal-2013-derivbase", "title": "DErivBase: Inducing and Evaluating a Derivational Morphology Resource for German", "abstract": "Derivational models are still an underresearched area in computational morphology. Even for German, a rather resourcerich language, there is a lack of largecoverage derivational knowledge. This paper describes a rule-based framework for inducing derivational families (i.e., clusters of lemmas in derivational relationships) and its application to create a highcoverage German resource, DERIVBASE, mapping over 280k lemmas into more than 17k non-singleton clusters. We focus on the rule component and a qualitative and quantitative evaluation. Our approach achieves up to 93% precision and 71% recall. We attribute the high precision to the fact that our rules are based on information from grammar books.", "phrases": ["morphology", "rule-based framework", "derivational family", "german resource", "lexeme"], "overall_score": 1.7408963805588187, "scores": [1.2041619864956654, 0.8622114779704854, 0.5822359702554231, 0.5807540086852385, 0.5509450148033795], "rank_score": 0.7560616916420384} -{"id": "kirchhoff-etal-2012-evaluating", "title": "Evaluating User Preferences in Machine Translation Using Conjoint Analysis", "abstract": "In spite of much ongoing research on machine translation evaluation there is little quantitative work that directly measures users\u2019 intuitive or emotional preferences regarding different types of machine translation errors. However, the elicitation and modeling of user preferences is an important prerequisite for future research on user adaptation and customization of machine translation engines. In this paper we explore the use of conjoint analysis as a formal quantitative framework to gain insight into users\u2019 relative preferences for different translation error types. Using English-Spanish as the translation direction we conduct a crowd-sourced conjoint analysis study and obtain utility values for individual error types. Our results indicate that word order errors are clearly the most dispreferred error type, followed by word sense, morphological, and function word errors.", "phrases": ["user preference", "machine translation", "conjoint analysis"], "overall_score": 1.0479524908927955, "scores": [0.8414581100691153, 0.8365674509709446, 0.5897882315073493], "rank_score": 0.7559379308491364} -{"id": "zhai-etal-2010-grouping", "title": "Grouping Product Features Using Semi-Supervised Learning with Soft-Constraints", "abstract": "In opinion mining of product reviews, one often wants to produce a summary of opinions based on product features/attributes. However, for the same feature, people can express it with different words and phrases. To produce a meaningful summary, these words and phrases, which are domain synonyms, need to be grouped under the same feature group. This paper proposes a constrained semi-supervised learning method to solve the problem. Experimental results using reviews from five different domains show that the proposed method is competent for the task. It outperforms the original EM and the state-of-the-art existing methods by a large margin.", "phrases": ["product feature", "synonym", "semi-supervised learning method", "group expression", "such knowledge"], "overall_score": 1.9946707714067338, "scores": [0.8368548779752316, 1.2555071830441444, 0.5770151026863938, 0.5578648705422081, 0.5518929413789668], "rank_score": 0.7558269951253889} -{"id": "lu-etal-2010-mining", "title": "Mining Large-scale Parallel Corpora from Multilingual Patents: An English-Chinese example and its application to SMT", "abstract": "In this paper, we demonstrate how to mine large-scale parallel corpora with multilingual patents, which have not been thoroughly explored before. We show how a large-scale English-Chinese parallel corpus containing over 14 million sentence pairs with only 1-5% wrong can be mined from a large amount of English-Chinese bilingual patents. To our knowledge, this is the largest single parallel corpus in terms of sentence pairs. Moreover, we estimate the potential for mining multilingual parallel corpora involving English, Chinese, Japanese, Korean, German, etc., which would to some extent reduce the parallel data acquisition bottleneck in multilingual information processing.", "phrases": ["large-scale parallel corpora", "multilingual patent", "common verb"], "overall_score": 1.2163062991330913, "scores": [0.9186406889376294, 0.8270728161126438, 0.5214872792786355], "rank_score": 0.755733594776303} -{"id": "yuste-etal-2010-pangeamt", "title": "PangeaMT - putting open standards to work... well", "abstract": "PangeaMT is presented from our standpoint as a LSP keen to develop and implement a cost-effective translation automation strategy that is also in line with our full commitment to open standards. Moses lies at the very core of PangeaMT but we have built several pre-/post-processing modules around it, from word reordering to inline mark-up parser to TMX/XLIFF filters. These represent interesting breakthroughs in real-world, customized SMT applications.", "phrases": ["open standard", "moses", "pangeamt"], "overall_score": 1.0476336153230958, "scores": [0.9032143861803791, 0.8111470175264688, 0.5527623285359613], "rank_score": 0.7557079107476031} -{"id": "obeid-etal-2020-camel", "title": "CAMeL Tools: An Open Source Python Toolkit for Arabic Natural Language Processing", "abstract": "We present CAMeL Tools, a collection of open-source tools for Arabic natural language processing in Python. CAMeL Tools currently provides utilities for pre-processing, morphological modeling, Dialect Identification, Named Entity Recognition and Sentiment Analysis. In this paper, we describe the design of CAMeL Tools and the functionalities it provides.", "phrases": ["python", "sentiment analysis", "camel tools", "arabic nlp"], "overall_score": 1.5712054815172896, "scores": [1.316995863117587, 0.6204897488294283, 0.5475711249872106, 0.5373037382691738], "rank_score": 0.7555901188008499} -{"id": "hastie-etal-2013-demonstration", "title": "Demonstration of the PARLANCE system: a data-driven incremental, spoken dialogue system for interactive search", "abstract": "The Parlance system for interactive search processes dialogue at a microturn level, displaying dialogue phenomena that play a vital role in human spoken conversation. These dialogue phenomena include more natural turn-taking through rapid system responses, generation of backchannels, and user barge-ins. The Parlance demonstration system dierentiates from other incremental systems in that it is data-driven with an infrastructure that scales well.", "phrases": ["parlance system", "dialogue system", "turn-taking"], "overall_score": 1.0474602837082292, "scores": [0.878902945026053, 0.8466015537189217, 0.5412441365060389], "rank_score": 0.7555828784170044} -{"id": "stojanovski-fraser-2018-coreference", "title": "Coreference and Coherence in Neural Machine Translation: A Study Using Oracle Experiments", "abstract": "Cross-sentence context can provide valuable information in Machine Translation and is critical for translation of anaphoric pronouns and for providing consistent translations. In this paper, we devise simple oracle experiments targeting coreference and coherence. Oracles are an easy way to evaluate the effect of different discourse-level phenomena in NMT using BLEU and eliminate the necessity to manually define challenge sets for this purpose. We propose two context-aware NMT models and compare them against models working on a concatenation of consecutive sentences. Concatenation models perform better, but are computationally expensive. We show that NMT models taking advantage of context oracle signals can achieve considerable gains in BLEU, of up to 7.02 BLEU for coreference and 1.89 BLEU for coherence on subtitles translation. Access to strong signals allows us to make clear comparisons between context-aware models.", "phrases": ["oracle experiment", "cross-sentence context", "coreference"], "overall_score": 1.5710939180931125, "scores": [0.8968681863211717, 0.7987189911586984, 0.5710222269238749], "rank_score": 0.7555364681345816} -{"id": "eryani-etal-2020-spelling", "title": "A Spelling Correction Corpus for Multiple Arabic Dialects", "abstract": "Arabic dialects are the non-standard varieties of Arabic commonly spoken \u2013 and increasingly written on social media \u2013 across the Arab world. Arabic dialects do not have standard orthographies, a challenge for natural language processing applications. In this paper, we present the MADAR CODA Corpus, a collection of 10,000 sentences from five Arabic city dialects (Beirut, Cairo, Doha, Rabat, and Tunis) represented in the Conventional Orthography for Dialectal Arabic (CODA) in parallel with their raw original form. The sentences come from the Multi-Arabic Dialect Applications and Resources (MADAR) Project and are in parallel across the cities (2,000 sentences from each city). This publicly available resource is intended to support research on spelling correction and text normalization for Arabic dialects. We present results on a bootstrapping technique we use to speed up the CODA annotation, as well as on the degree of similarity across the dialects before and after CODA annotation.", "phrases": ["spelling correction", "dialect", "arabic city dialect"], "overall_score": 1.469542207269506, "scores": [1.1210890958344675, 0.5991257048988817, 0.5453711123412021], "rank_score": 0.7551953043581837} -{"id": "fulgoni-etal-2016-empirical", "title": "An Empirical Exploration of Moral Foundations Theory in Partisan News Sources", "abstract": "News sources frame issues in different ways in order to appeal or control the perception of their readers. We present a large scale study of news articles from partisan sources in the US across a variety of different issues. We first highlight that differences between sides exist by predicting the political leaning of articles of unseen political bias. Framing can be driven by different types of morality that each group values. We emphasize differences in framing of different news building on the moral foundations theory quantified using hand crafted lexicons. Our results show that partisan sources frame political issues differently both in terms of words usage and through the moral foundations they relate to.", "phrases": ["moral foundation theory", "framing", "news article"], "overall_score": 1.5703575689666018, "scores": [1.0911805746199468, 0.6302700341559827, 0.5440964683946358], "rank_score": 0.755182359056855} -{"id": "rei-2017-semi", "title": "Semi-supervised Multitask Learning for Sequence Labeling", "abstract": "We propose a sequence labeling framework with a secondary training objective, learning to predict surrounding words for every word in the dataset. This language modeling objective incentivises the system to learn general-purpose patterns of semantic and syntactic composition, which are also useful for improving accuracy on different sequence labeling tasks. The architecture was evaluated on a range of datasets, covering the tasks of error detection in learner texts, named entity recognition, chunking and POS-tagging. The novel language modeling objective provided consistent performance improvements on every benchmark, without requiring any additional annotated or unannotated data.", "phrases": ["secondary training objective", "objective", "language modeling", "sequence labeling task"], "overall_score": 1.9928940221877525, "scores": [1.253981477031016, 0.641476134251108, 0.5915892672120902, 0.5335680961388555], "rank_score": 0.7551537436582674} -{"id": "pretorius-etal-2009-setswana", "title": "Setswana Tokenisation and Computational Verb Morphology: Facing the Challenge of a Disjunctive Orthography", "abstract": "Setswana, a Bantu language in the Sotho group, is one of the eleven official languages of South Africa. The language is characterised by a disjunctive orthography, mainly affecting the important word category of verbs. In particular, verbal prefixal morphemes are usually written disjunctively, while suffixal morphemes follow a conjunctive writing style. Therefore, Setswana tokenisation cannot be based solely on whitespace, as is the case in many alphabetic, segmented languages, including the conjunctively written Nguni group of South African Bantu languages. This paper shows how a combination of two tokeniser transducers and a finite-state (rule-based) morphological analyser may be combined to effectively solve the Setswana tokenisation problem. The approach has the important advantage of bringing the processing of Setswana beyond the morphological analysis level in line with what is appropriate for the Nguni languages. This means that the challenge of the disjunctive orthography is met at the tokenisation/morphological analysis level and does not in principle propagate to subsequent levels of analysis such as POS tagging and shallow parsing, etc. Indeed, the approach ensures that an aspect such as orthography does not obfuscate sound linguistics and, ultimately, proper semantic analysis, which remains the ultimate aim of linguistic analysis and therefore also computational linguistic analysis.", "phrases": ["disjunctive orthography", "nguni language", "setswana tokenisation"], "overall_score": 0.8295963753867027, "scores": [0.8726170441050025, 0.8612761958174836, 0.5315002495107733], "rank_score": 0.7551311631444197} -{"id": "miceli-barone-sennrich-2017-parallel", "title": "A Parallel Corpus of Python Functions and Documentation Strings for Automated Code Documentation and Code Generation", "abstract": "Automated documentation of programming source code and automated code generation from natural language are challenging tasks of both practical and scientific interest. Progress in these areas has been limited by the low availability of parallel corpora of code and natural language descriptions, which tend to be small and constrained to specific domains. In this work we introduce a large and diverse parallel corpus of a hundred thousands Python functions with their documentation strings (\u201cdocstrings\u201d) generated by scraping open source repositories on GitHub. We describe baseline results for the code documentation and code generation tasks obtained by neural machine translation. We also experiment with data augmentation techniques to further increase the amount of training data. We release our datasets and processing scripts in order to stimulate research in these areas.", "phrases": ["parallel corpus", "code generation", "python project"], "overall_score": 1.2152216335000472, "scores": [0.9130801724678895, 0.8000521988221796, 0.5520465910884599], "rank_score": 0.7550596541261764} -{"id": "utsuro-etal-2003-effect", "title": "Effect of Cross-Language IR in Bilingual Lexicon Acquisition from Comparable Corpora", "abstract": "Within the framework of translation knowledge acquisition from WWW news sites, this paper studies issues on the effect of cross-language retrieval of relevant texts in bilingual lexicon acquisition from comparable corpora. We experimentally show that it is quite effective to reduce the candidate bilingual term pairs against which bilingual term correspondences are estimated, in terms of both computational complexity and the performance of precise estimation of bilingual term correspondences.", "phrases": ["bilingual lexicon acquisition", "comparable corpora", "translation knowledge acquisition"], "overall_score": 1.215086485274829, "scores": [0.9487715663598649, 0.7929445466987044, 0.5232109323789449], "rank_score": 0.7549756818125047} -{"id": "jauhiainen-etal-2016-heli", "title": "HeLI, a Word-Based Backoff Method for Language Identification", "abstract": "In this paper we describe the Helsinki language identification method, HeLI, and the resources we created for and used in the 3rd edition of the Discriminating between Similar Languages (DSL) shared task, which was organized as part of the VarDial 2016 workshop. The shared task comprised of a total of 8 tracks, of which we participated in 7. The shared task had a record number of participants, with 17 teams providing results for the closed track of the test set A. Our system reached the 2nd position in 4 tracks (A closed and open, B1 open and B2 open) and in this paper we are focusing on the methods and data used for those tracks. We describe our word-based backoff method in mathematical notation. We also describe how we selected the corpus we used in the open tracks.", "phrases": ["word-based backoff method", "heli", "previous vardial workshop"], "overall_score": 1.2150483955290257, "scores": [0.9304369088567784, 0.8134018135859723, 0.5210173235256149], "rank_score": 0.7549520153227885} -{"id": "hidey-mckeown-2016-identifying", "title": "Identifying Causal Relations Using Parallel Wikipedia Articles", "abstract": "The automatic detection of causal relationships in text is important for natural language understanding. This task has proven to be dif\ufb01cult, however, due to the need for world knowledge and inference. We focus on a sub-task of this problem where an open class set of linguistic markers can provide clues towards understanding causality. Unlike the explicit markers, a closed class, these markers vary signi\ufb01-cantly in their linguistic forms. We leverage parallel Wikipedia corpora to identify new markers that are variations on known causal phrases, creating a training set via distant supervision. We also train a causal classi\ufb01er using features from the open class markers and semantic features providing contextual information. The results show that our features provide an 11.05 point absolute increase over the baseline on the task of identifying causality in text.", "phrases": ["causality", "wikipedia", "linguistic marker"], "overall_score": 1.875490924653983, "scores": [1.1299457476828216, 0.6141316503102764, 0.5201818144976293], "rank_score": 0.7547530708302425} -{"id": "pan-etal-2020-semantic", "title": "Semantic Graphs for Generating Deep Questions", "abstract": "This paper proposes the problem of Deep Question Generation (DQG), which aims to generate complex questions that require reasoning over multiple pieces of information about the input passage. In order to capture the global structure of the document and facilitate reasoning, we propose a novel framework that first constructs a semantic-level graph for the input document and then encodes the semantic graph by introducing an attention-based GGNN (Att-GGNN). Afterward, we fuse the document-level and graph-level representations to perform joint training of content selection and question decoding. On the HotpotQA deep-question centric dataset, our model greatly improves performance over questions requiring reasoning over multiple facts, leading to state-of-the-art performance. The code is publicly available at .", "phrases": ["complex question", "complexity", "reasoning", "input passage", "semantic graph"], "overall_score": 2.043627597945039, "scores": [1.1629771713553045, 0.8978404492053028, 0.6072628183389187, 0.5707567380738608, 0.5344082324234862], "rank_score": 0.7546490818793747} -{"id": "kumar-byrne-2003-weighted", "title": "A Weighted Finite State Transducer Implementation of the Alignment Template Model for Statistical Machine Translation", "abstract": "We present a derivation of the alignment template model for statistical machine translation and an implementation of the model using weighted finite state transducers. The approach we describe allows us to implement each constituent distribution of the model as a weighted finite state transducer or acceptor. We show that bitext word alignment and translation under the model can be performed with standard FSM operations involving these transducers. One of the benefits of using this framework is that it obviates the need to develop specialized search procedures, even for the generation of lattices or N-Best lists of bitext word alignments and translation hypotheses. We evaluate the implementation of the model on the French-to-English Hansards task and report alignment and translation performance.", "phrases": ["state transducer", "alignment template model", "statistical machine translation"], "overall_score": 1.658132207986682, "scores": [0.8669211803783927, 0.8258290343153089, 0.5711952532705695], "rank_score": 0.7546484893214238} -{"id": "mulcaire-etal-2019-polyglot", "title": "Polyglot Contextual Representations Improve Crosslingual Transfer", "abstract": "We introduce Rosita, a method to produce multilingual contextual word representations by training a single language model on text from multiple languages. Our method combines the advantages of contextual word representations with those of multilingual representation learning. We produce language models from dissimilar language pairs (English/Arabic and English/Chinese) and use them in dependency parsing, semantic role labeling, and named entity recognition, with comparisons to monolingual and non-contextual variants. Our results provide further evidence for the benefits of polyglot learning, in which representations are shared across multiple languages.", "phrases": ["multiple language", "dependency parsing", "elmo", "downstream task"], "overall_score": 1.990927046835305, "scores": [1.0572363419446298, 0.8786975222362294, 0.5420631196174347, 0.5396366605993552], "rank_score": 0.7544084110994123} -{"id": "karimi-etal-2021-aeda-easier", "title": "AEDA: An Easier Data Augmentation Technique for Text Classification", "abstract": "This paper proposes AEDA (An Easier Data Augmentation) technique to help improve the performance on text classification tasks. AEDA includes only random insertion of punctuation marks into the original text. This is an easier technique to implement for data augmentation than EDA method (Wei and Zou, 2019) with which we compare our results. In addition, it keeps the order of the words while changing their positions in the sentence leading to a better generalized performance. Furthermore, the deletion operation in EDA can cause loss of information which, in turn, misleads the network, whereas AEDA preserves all the input information. Following the baseline, we perform experiments on five different datasets for text classification. We show that using the AEDA-augmented data for training, the models show superior performance compared to using the EDA-augmented data in all five datasets. The source code will be made available for further study and reproduction of the results.", "phrases": ["text classification", "random insertion", "aeda"], "overall_score": 1.0455225168201001, "scores": [0.939652854789189, 0.7901018204991254, 0.5328005499428452], "rank_score": 0.7541850750770532} -{"id": "benton-etal-2017-multitask", "title": "Multitask Learning for Mental Health Conditions with Limited Social Media Data", "abstract": "Language contains information about the author's demographic attributes as well as their mental state, and has been successfully leveraged in NLP to predict either one alone. However, demographic attributes and mental states also interact with each other, and we are the first to demonstrate how to use them jointly to improve the prediction of mental health conditions across the board. We model the different conditions as tasks in a multitask learning (MTL) framework, and establish for the first time the potential of deep learning in the prediction of mental health from online user-generated text. The framework we propose significantly improves over all baselines and single-task models for predicting mental health conditions, with particularly significant gains for conditions with limited data. In addition, our best MTL model can predict the presence of conditions (neuroatypicality) more generally, further reducing the error of the strong feed-forward baseline.", "phrases": ["mental health", "condition", "multi-task learning"], "overall_score": 1.9343570847523868, "scores": [1.1537203444367299, 0.5808808864098562, 0.5278493853325604], "rank_score": 0.7541502053930489} -{"id": "mubarak-etal-2020-overview", "title": "Overview of OSACT4 Arabic Offensive Language Detection Shared Task", "abstract": "This paper provides an overview of the offensive language detection shared task at the 4th workshop on Open-Source Arabic Corpora and Processing Tools (OSACT4). There were two subtasks, namely: Subtask A, involving the detection of offensive language, which contains unacceptable or vulgar content in addition to any kind of explicit or implicit insults or attacks against individuals or groups; and Subtask B, involving the detection of hate speech, which contains insults or threats targeting a group based on their nationality, ethnicity, race, gender, political or sport affiliation, religious belief, or other common characteristics. In total, 40 teams signed up to participate in Subtask A, and 14 of them submitted test runs. For Subtask B, 33 teams signed up to participate and 13 of them submitted runs. We present and analyze all submissions in this paper.", "phrases": ["offensive language", "language detection", "arabic tweet"], "overall_score": 1.7363230559987786, "scores": [1.139128621124657, 0.5869864276087255, 0.5361115173315988], "rank_score": 0.7540755220216604} -{"id": "yin-etal-2017-chinese", "title": "Chinese Zero Pronoun Resolution with Deep Memory Network", "abstract": "Existing approaches for Chinese zero pronoun resolution typically utilize only syntactical and lexical features while ignoring semantic information. The fundamental reason is that zero pronouns have no descriptive information, which brings difficulty in explicitly capturing their semantic similarities with antecedents. Meanwhile, representing zero pronouns is challenging since they are merely gaps that convey no actual content. In this paper, we address this issue by building a deep memory network that is capable of encoding zero pronouns into vector representations with information obtained from their contexts and potential antecedents. Consequently, our resolver takes advantage of semantic information by using these continuous distributed representations. Experiments on the OntoNotes 5.0 dataset show that the proposed memory network could substantially outperform the state-of-the-art systems in various experimental settings.", "phrases": ["pronoun", "deep memory network", "antecedent mention", "pro-drop language"], "overall_score": 1.6568686783061097, "scores": [0.7916764718039825, 1.140406735513707, 0.5477004815993168, 0.5365100398101602], "rank_score": 0.7540734321817917} -{"id": "nuhn-etal-2012-deciphering", "title": "Deciphering Foreign Language by Combining Language Models and Context Vectors", "abstract": "In this paper we show how to train statistical machine translation systems on real-life tasks using only non-parallel monolingual data from two languages. We present a modification of the method shown in (Ravi and Knight, 2011) that is scalable to vocabulary sizes of several thousand words. On the task shown in (Ravi and Knight, 2011) we obtain better results with only 5% of the computational effort when running our method with an n-gram language model. The efficiency improvement of our method allows us to run experiments with vocabulary sizes of around 5,000 words, such as a non-parallel version of the VERBMOBIL corpus. We also report results using data from the monolingual French and English GIGAWORD corpora.", "phrases": ["context vector", "monolingual data", "decipherment technique"], "overall_score": 1.933536180871702, "scores": [0.8605855777631986, 0.8726084452702614, 0.5282964526911043], "rank_score": 0.7538301585748547} -{"id": "oraby-etal-2018-controlling", "title": "Controlling Personality-Based Stylistic Variation with Neural Natural Language Generators", "abstract": "Natural language generators for task-oriented dialogue must effectively realize system dialogue actions and their associated semantics. In many applications, it is also desirable for generators to control the style of an utterance. To date, work on task-oriented neural generation has primarily focused on semantic fidelity rather than achieving stylistic goals, while work on style has been done in contexts where it is difficult to measure content preservation. Here we present three different sequence-to-sequence models and carefully test how well they disentangle content and style. We use a statistical generator, Personage, to synthesize a new corpus of over 88,000 restaurant domain utterances whose style varies according to models of personality, giving us total control over both the semantic content and the stylistic variation in the training data. We then vary the amount of explicit stylistic supervision given to the three models. We show that our most explicit model can simultaneously achieve high fidelity to both semantic and stylistic goals: this model adds a context vector of 36 stylistic parameters as input to the hidden state of the encoder at each time step, showing the benefits of explicit stylistic supervision, even when the amount of training data is large.", "phrases": ["stylistic variation", "generator", "personality type"], "overall_score": 1.8070801280340454, "scores": [0.8773242035568398, 0.8266949400082468, 0.5568136983027265], "rank_score": 0.7536109472892711} -{"id": "ziemski-etal-2016-united", "title": "The United Nations Parallel Corpus v1.0", "abstract": "This paper describes the creation process and statistics of the official United Nations Parallel Corpus, the first parallel corpus composed from United Nations documents published by the original data creator. The parallel corpus presented consists of manually translated UN documents from the last 25 years (1990 to 2014) for the six official UN languages, Arabic, Chinese, English, French, Russian, and Spanish. The corpus is freely available for download under a liberal license. Apart from the pairwise aligned documents, a fully aligned subcorpus for the six official UN languages is distributed. We provide baseline BLEU scores of our Moses-based SMT systems trained with the full data of language pairs involving English and for all possible translation directions of the six-way subcorpus.", "phrases": ["united nations", "parallel corpus", "spanish"], "overall_score": 1.8069051817963273, "scores": [1.0874221680395262, 0.6135175885645933, 0.5596742105200353], "rank_score": 0.7535379890413849} -{"id": "ohara-wiebe-2003-preposition", "title": "Preposition Semantic Classification via Treebank and FrameNet", "abstract": "This paper reports on experiments in classifying the semantic role annotations assigned to prepositional phrases in both the PENN TREEBANK and FRAMENET. In both cases, experiments are done to see how the prepositions can be classified given the dataset\u2019s role inventory, using standard word-sense disambiguation features. In addition to using traditional word collocations, the experiments incorporate class-based collocations in the form of WordNet hypernyms. For Treebank, the word collocations achieve slightly better performance: 78.5% versus 77.4% when separate classifiers are used per preposition. When using a single classifier for all of the prepositions together, the combined approach yields a significant gain at 85.8% accuracy versus 81.3% for wordonly collocations. For FrameNet, the combined use of both collocation types achieves better performance for the individual classifiers: 70.3% versus 68.5%. However, classification using a single classifier is not effective due to confusion among the fine-grained roles.", "phrases": ["treebank", "framenet", "preposition"], "overall_score": 0.8278043671758045, "scores": [0.8479074316478425, 0.7827564430655068, 0.6298361462159192], "rank_score": 0.7535000069764228} -{"id": "dodge-etal-2019-show", "title": "Show Your Work: Improved Reporting of Experimental Results", "abstract": "Research in natural language processing proceeds, in part, by demonstrating that new models achieve superior performance (e.g., accuracy) on held-out test data, compared to previous results. In this paper, we demonstrate that test-set performance scores alone are insufficient for drawing accurate conclusions about which model performs best. We argue for reporting additional details, especially performance on validation data obtained during model development. We present a novel technique for doing so: expected validation performance of the best-found model as a function of computation budget (i.e., the number of hyperparameter search trials or the overall training time). Using our approach, we find multiple recent model comparisons where authors would have reached a different conclusion if they had used more (or less) computation. Our approach also allows us to estimate the amount of computation required to obtain a given accuracy; applying it to several recently published results yields massive variation across papers, from hours to weeks. We conclude with a set of best practices for reporting experimental results which allow for robust future comparisons, and provide code to allow researchers to use our technique.", "phrases": ["computation", "hyperparameter", "model comparison", "practice", "area"], "overall_score": 2.0404669215081896, "scores": [1.144676867101329, 0.9021200511443792, 0.5878634580021428, 0.578301934496445, 0.5544473936210337], "rank_score": 0.753481940873066} -{"id": "camburu-etal-2020-make", "title": "Make Up Your Mind! Adversarial Generation of Inconsistent Natural Language Explanations", "abstract": "To increase trust in artificial intelligence systems, a promising research direction consists of designing neural models capable of generating natural language explanations for their predictions. In this work, we show that such models are nonetheless prone to generating mutually inconsistent explanations, such as \u201dBecause there is a dog in the image.\u201d and \u201dBecause there is no dog in the [same] image.\u201d, exposing flaws in either the decision-making process of the model or in the generation of the explanations. We introduce a simple yet effective adversarial framework for sanity checking models against the generation of inconsistent natural language explanations. Moreover, as part of the framework, we address the problem of adversarial attacks with full target sequences, a scenario that was not previously addressed in sequence-to-sequence attacks. Finally, we apply our framework on a state-of-the-art neural natural language inference model that provides natural language explanations for its predictions. Our framework shows that this model is capable of generating a significant number of inconsistent explanations.", "phrases": ["explanation", "such model", "dog", "adversarial framework"], "overall_score": 1.4660623440667295, "scores": [1.3535269569196835, 0.5892564275114915, 0.5413702292496185, 0.5294744200256206], "rank_score": 0.7534070084266035} -{"id": "eisenstein-2009-hierarchical", "title": "Hierarchical Text Segmentation from Multi-Scale Lexical Cohesion", "abstract": "This paper presents a novel unsupervised method for hierarchical topic segmentation. Lexical cohesion -- the workhorse of unsupervised linear segmentation -- is treated as a multi-scale phenomenon, and formalized in a Bayesian setting. Each word token is modeled as a draw from a pyramid of latent topic models, where the structure of the pyramid is constrained to induce a hierarchical segmentation. Inference takes the form of a coordinate-ascent algorithm, iterating between two steps: a novel dynamic program for obtaining the globally-optimal hierarchical segmentation, and collapsed variational Bayesian inference over the hidden variables. The resulting system is fast and accurate, and compares well against heuristic alternatives.", "phrases": ["segmentation", "lexical cohesion", "latent topic model", "lda"], "overall_score": 1.5665521812664431, "scores": [0.9526022531679699, 0.9353043681187492, 0.5754074615205442, 0.5500953348019183], "rank_score": 0.7533523544022954} -{"id": "zhang-clark-2009-transition", "title": "Transition-Based Parsing of the Chinese Treebank using a Global Discriminative Model", "abstract": "Transition-based approaches have shown competitive performance on constituent and dependency parsing of Chinese. State-of-the-art accuracies have been achieved by a deterministic shift-reduce parsing model on parsing the Chinese Treebank 2 data (Wang et al., 2006). In this paper, we propose a global discriminative model based on the shift-reduce parsing process, combined with a beam-search decoder, obtaining competitive accuracies on CTB2. We also report the performance of the parser on CTB5 data, obtaining the highest scores in the literature for a dependency-based evaluation.", "phrases": ["chinese treebank", "constituent", "shift-reduce parser"], "overall_score": 2.087374475085269, "scores": [0.8682876671064065, 0.8343385690365464, 0.5559573666198379], "rank_score": 0.7528612009209302} -{"id": "lal-etal-2021-interpret", "title": "InterpreT: An Interactive Visualization Tool for Interpreting Transformers", "abstract": "With the increasingly widespread use of Transformer-based models for NLU/NLP tasks, there is growing interest in understanding the inner workings of these models, why they are so effective at a wide range of tasks, and how they can be further tuned and improved. To contribute towards this goal of enhanced explainability and comprehension, we present InterpreT, an interactive visualization tool for interpreting Transformer-based models. In addition to providing various mechanisms for investigating general model behaviours, novel contributions made in InterpreT include the ability to track and visualize token embeddings through each layer of a Transformer, highlight distances between certain token embeddings through illustrative plots, and identify task-related functions of attention heads by using new metrics. InterpreT is a task agnostic tool, and its functionalities are demonstrated through the analysis of model behaviours for two disparate tasks: Aspect Based Sentiment Analysis (ABSA) and the Winograd Schema Challenge (WSC).", "phrases": ["interactive visualization tool", "transformer", "interpret"], "overall_score": 0.8270776524604343, "scores": [0.8948660508689295, 0.8347744096783541, 0.5288751076604938], "rank_score": 0.7528385227359259} -{"id": "joshi-etal-2014-knowledge", "title": "Knowledge Graph and Corpus Driven Segmentation and Answer Inference for Telegraphic Entity-seeking Queries", "abstract": "Much recent work focuses on formal interpretation of natural question utterances, with the goal of executing the resulting structured queries on knowledge graphs (KGs) such as Freebase. Here we address two limitations of this approach when applied to open-domain, entity-oriented Web queries. First, Web queries are rarely wellformed questions. They are \u201ctelegraphic\u201d, with missing verbs, prepositions, clauses, case and phrase clues. Second, the KG is always incomplete, unable to directly answer many queries. We propose a novel technique to segment a telegraphic query and assign a coarse-grained purpose to each segment: a base entity e1, a relation type r, a target entity type t2, and contextual words s. The query seeks entity e2 2 t2 where r(e1,e2) holds, further evidenced by schema-agnostic words s. Query segmentation is integrated with the KG and an unstructured corpus where mentions of entities have been linked to the KG. We do not trust the best or any specific query segmentation. Instead, evidence in favor of candidate e2s are aggregated across several segmentations. Extensive experiments on the ClueWeb corpus and parts of Freebase as our KG, using over a thousand telegraphic queries adapted from TREC, INEX, and WebQuestions, show the efficacy of our approach. For one benchmark, MAP improves from 0.2\u20100.29 (competitive baselines) to 0.42 (our system). NDCG@10 improves from 0.29\u20100.36 to 0.54.", "phrases": ["query", "knowledge graph", "unstructured data"], "overall_score": 1.348822696887453, "scores": [0.8431309872907028, 0.8780311079001575, 0.5372147462083381], "rank_score": 0.7527922804663995} -{"id": "prasov-chai-2010-fusing", "title": "Fusing Eye Gaze with Speech Recognition Hypotheses to Resolve Exophoric References in Situated Dialogue", "abstract": "In situated dialogue humans often utter linguistic expressions that refer to extralinguistic entities in the environment. Correctly resolving these references is critical yet challenging for artificial agents partly due to their limited speech recognition and language understanding capabilities. Motivated by psycholinguistic studies demonstrating a tight link between language production and human eye gaze, we have developed approaches that integrate naturally occurring human eye gaze with speech recognition hypotheses to resolve exophoric references in situated dialogue in a virtual world. In addition to incorporating eye gaze with the best recognized spoken hypothesis, we developed an algorithm to also handle multiple hypotheses modeled as word confusion networks. Our empirical results demonstrate that incorporating eye gaze with recognition hypotheses consistently outperforms the results obtained from processing recognition hypotheses alone. Incorporating eye gaze with word confusion networks further improves performance.", "phrases": ["eye gaze", "situated dialogue", "speech recognition hypothesis"], "overall_score": 1.0435818289343861, "scores": [0.8964435528943985, 0.8184147438095112, 0.5434971973443005], "rank_score": 0.7527851646827367} -{"id": "he-etal-2017-generating", "title": "Generating Natural Answers by Incorporating Copying and Retrieving Mechanisms in Sequence-to-Sequence Learning", "abstract": "Generating answer with natural language sentence is very important in real-world question answering systems, which needs to obtain a right answer as well as a coherent natural response. In this paper, we propose an end-to-end question answering system called COREQA in sequence-to-sequence learning, which incorporates copying and retrieving mechanisms to generate natural answers within an encoder-decoder framework. Specifically, in COREQA, the semantic units (words, phrases and entities) in a natural answer are dynamically predicted from the vocabulary, copied from the given question and/or retrieved from the corresponding knowledge base jointly. Our empirical study on both synthetic and real-world datasets demonstrates the efficiency of COREQA, which is able to generate correct, coherent and natural answers for knowledge inquired questions.", "phrases": ["copying", "sequence-to-sequence learning", "sentence generation"], "overall_score": 1.8704401591396456, "scores": [0.9031335744482553, 0.796075133707881, 0.5589527716313104], "rank_score": 0.7527204932624821} -{"id": "cholakov-etal-2008-towards", "title": "Towards Domain-Independent Deep Linguistic Processing: Ensuring Portability and Re-Usability of Lexicalised Grammars", "abstract": "In this paper we illustrate and underline the importance of making detailed linguistic information a central part of the process of automatic acquisition of large-scale lexicons as a means for enhancing robustness and at the same time ensuring maintainability and re-usability of deep lexicalised grammars. Using the error mining techniques proposed in (van Noord, 2004) we show very convincingly that the main hindrance to portability of deep lexicalised grammars to domains other than the ones originally developed in, as well as to robustness of systems using such grammars is low lexical coverage. To this effect, we develop linguistically-driven methods that use detailed morphosyntactic information to automatically enhance the performance of deep lexicalised grammars maintaining at the same time their usually already achieved high linguistic quality.", "phrases": ["re-usability", "lexicalised grammar", "error mining technique"], "overall_score": 1.0434844241652432, "scores": [0.9137237536320811, 0.822819974765254, 0.5216009775847729], "rank_score": 0.752714901994036} -{"id": "tomuro-2003-interrogative", "title": "Interrogative Reformulation Patterns and Acquisition of Question Paraphrases", "abstract": "We describe a set of paraphrase patterns for questions which we derived from a corpus of questions, and report the result of using them in the automatic recognition of question paraphrases. The aim of our paraphrase patterns is to factor out different syntactic variations of interrogative words, since the interrogative part of a question adds a syntactic superstructure on the sentence part (i.e., the rest of the question), thereby making it difficult for an automatic system to analyze the question. The patterns we derived are rules which map surface syntactic structures to semantic case frames, which serve as the canonical representation of questions. We also describe the process in which we acquired question paraphrases, which we used as the test data. The results obtained by using the patterns in paraphrase recognition were quite promising.", "phrases": ["reformulation pattern", "question paraphrase", "interrogative part"], "overall_score": 1.4646102445943547, "scores": [0.8809409623746907, 0.8541534763452847, 0.5228878920244071], "rank_score": 0.752660776914794} -{"id": "moradi-etal-2019-interrogating", "title": "Interrogating the Explanatory Power of Attention in Neural Machine Translation", "abstract": "Attention models have become a crucial component in neural machine translation (NMT). They are often implicitly or explicitly used to justify the model's decision in generating a specific token but it has not yet been rigorously established to what extent attention is a reliable source of information in NMT. To evaluate the explanatory power of attention for NMT, we examine the possibility of yielding the same prediction but with counterfactual attention models that modify crucial aspects of the trained attention model. Using these counterfactual attention mechanisms we assess the extent to which they still preserve the generation of function and content words in the translation process. Compared to a state of the art attention model, our counterfactual attention models produce 68% of function words and 21% of content words in our German-English dataset. Our experiments demonstrate that attention models by themselves cannot reliably explain the decisions made by a NMT model.", "phrases": ["neural machine translation", "attention weight", "nature"], "overall_score": 1.2113333916300122, "scores": [0.8694244229364272, 0.8672787676415662, 0.5212280702769255], "rank_score": 0.7526437536183063} -{"id": "meng-etal-2021-mixture", "title": "Mixture-of-Partitions: Infusing Large Biomedical Knowledge Graphs into BERT", "abstract": "Infusing factual knowledge into pre-trained models is fundamental for many knowledge-intensive tasks. In this paper, we proposed Mixture-of-Partitions (MoP), an infusion approach that can handle a very large knowledge graph (KG) by partitioning it into smaller sub-graphs and infusing their specific knowledge into various BERT models using lightweight adapters. To leverage the overall factual knowledge for a target task, these sub-graph adapters are further fine-tuned along with the underlying BERT through a mixture layer. We evaluate our MoP with three biomedical BERTs (SciBERT, BioBERT, PubmedBERT) on six downstream tasks (inc. NLI, QA, Classification), and the results show that our MoP consistently enhances the underlying BERTs in task performance, and achieves new SOTA performances on five evaluated datasets.", "phrases": ["knowledge graph", "bert", "mixture-of-partition"], "overall_score": 1.0432692058268371, "scores": [0.9005314862516184, 0.7801836057412338, 0.5769638723449656], "rank_score": 0.7525596547792727} -{"id": "sakaguchi-etal-2012-naist", "title": "NAIST at the HOO 2012 Shared Task", "abstract": "This paper describes the Nara Institute of Science and Technology (NAIST) error correction system in the Helping Our Own (HOO) 2012 Shared Task. Our system targets preposition and determiner errors with spelling correction as a pre-processing step. The result shows that spelling correction improves the Detection, Correction, and Recognition F-scores for preposition errors. With regard to preposition error correction, F-scores were not improved when using the training set with correction of all but preposition errors. As for determiner error correction, there was an improvement when the constituent parser was trained with a concatenation of treebank and modified treebank where all the articles appearing as the first word of an NP were removed. Our system ranked third in preposition and fourth in determiner error corrections.", "phrases": ["shared task", "preposition", "naist", "large feature set", "article correction"], "overall_score": 1.0432601666098538, "scores": [0.9405063977236532, 0.8832752687503866, 0.8456222989322825, 0.5677712899271337, 0.525590416479118], "rank_score": 0.7525531343625149} -{"id": "yu-etal-2018-multilingual", "title": "Multilingual Seq2seq Training with Similarity Loss for Cross-Lingual Document Classification", "abstract": "In this paper we continue experiments where neural machine translation training is used to produce joint cross-lingual fixed-dimensional sentence embeddings. In this framework we introduce a simple method of adding a loss to the learning objective which penalizes distance between representations of bilingually aligned sentences. We evaluate cross-lingual transfer using two approaches, cross-lingual similarity search on an aligned corpus (Europarl) and cross-lingual document classification on a recently published benchmark Reuters corpus, and we find the similarity loss significantly improves performance on both. Furthermore, we notice that while our Reuters results are very competitive, our English results are not as competitive, showing room for improvement in the current cross-lingual state-of-the-art. Our results are based on a set of 6 European languages.", "phrases": ["similarity loss", "cross-lingual document classification", "machine translation"], "overall_score": 1.4643393593316776, "scores": [0.9271831211394254, 0.7843856298951102, 0.54599595724746], "rank_score": 0.7525215694273318} -{"id": "zhao-he-2009-using", "title": "Using N-gram based Features for Machine Translation System Combination", "abstract": "Conventional confusion network based system combination for machine translation (MT) heavily relies on features that are based on the measure of agreement of words in different translation hypotheses. This paper presents two new features that consider agreement of n-grams in different hypotheses to improve the performance of system combination. The first one is based on a sentence specific online n-gram language model, and the second one is based on n-gram voting. Experiments on a large scale Chinese-to-English MT task show that both features yield significant improvements on the translation performance, and a combination of them produces even better translation results.", "phrases": ["n-gram", "system combination", "flexible word order"], "overall_score": 1.210953864192156, "scores": [0.8606978093942387, 0.8403145579059833, 0.556211452587405], "rank_score": 0.7524079399625423} -{"id": "sukhareva-etal-2016-crowdsourcing", "title": "Crowdsourcing a Large Dataset of Domain-Specific Context-Sensitive Semantic Verb Relations", "abstract": "We present a new large dataset of 12403 context-sensitive verb relations manually annotated via crowdsourcing. These relations capture fine-grained semantic information between verb-centric propositions, such as temporal or entailment relations. We propose a novel semantic verb relation scheme and design a multi-step annotation approach for scaling-up the annotations using crowdsourcing. We employ several quality measures and report on agreement scores. The resulting dataset is available under a permissive CreativeCommons license at www.ukp.tu-darmstadt.de/data/verb-relations/. It represents a valuable resource for various applications, such as automatic information consolidation or automatic summarization.", "phrases": ["large dataset", "verb relation", "proposition"], "overall_score": 1.3476987082779441, "scores": [0.8492397097285156, 0.8288440722191027, 0.5784111294903117], "rank_score": 0.75216497047931} -{"id": "tahmasebi-etal-2012-neer", "title": "NEER: An Unsupervised Method for Named Entity Evolution Recognition", "abstract": "High impact events, political changes and new technologies are reflected in our language and lead to constant evolution of terms, expressions and names. Not knowing about names used in the past for referring to a named entity can severely decrease the performance of many computational linguistic algorithms. We propose NEER, an unsupervised method for named entity evolution recognition independent of external knowledge sources. We find time periods with high likelihood of evolution. By analyzing only these time periods using a sliding window co-occurrence method we capture evolving terms in the same context. We thus avoid comparing terms from widely different periods in time and overcome a severe limitation of existing methods for named entity evolution, as shown by the high recall of 90% on the New York Times corpus. We compare several relatedness measures for filtering to improve precision. Furthermore, using machine learning with minimal supervision improves precision to 94%.", "phrases": ["unsupervised method", "entity evolution", "neer"], "overall_score": 1.0424683083811435, "scores": [0.816203700114179, 0.808857579014314, 0.6308845090495806], "rank_score": 0.7519819293926912} -{"id": "napoles-etal-2017-finding", "title": "Finding Good Conversations Online: The Yahoo News Annotated Comments Corpus", "abstract": "This work presents a dataset and annotation scheme for the new task of identifying \u201cgood\u201d conversations that occur online, which we call ERICs: Engaging, Respectful, and/or Informative Conversations. We develop a taxonomy to reflect features of entire threads and individual comments which we believe contribute to identifying ERICs; code a novel dataset of Yahoo News comment threads (2.4k threads and 10k comments) and 1k threads from the Internet Argument Corpus; and analyze the features characteristic of ERICs. This is one of the largest annotated corpora of online human dialogues, with the most detailed set of annotations. It will be valuable for identifying ERICs and other aspects of argumentation, dialogue, and discourse.", "phrases": ["conversation", "individual comment", "large annotated corpora"], "overall_score": 1.4632122634231683, "scores": [1.1602434542330304, 0.5525806673978486, 0.543002948493893], "rank_score": 0.7519423567082573} -{"id": "sil-yates-2011-extracting", "title": "Extracting STRIPS Representations of Actions and Events", "abstract": "Knowledge about how the world changes over time is a vital component of commonsense knowledge for Artificial Intelligence (AI) and natural language understanding. Actions and events are fundamental components to any knowledge about changes in the state of the world: the states before and after an event differ in regular and predictable ways. We describe a novel system that tackles the problem of extracting knowledge from text about how actions and events change the world over time. We leverage standard language-processing tools, like semantic role labelers and coreference resolvers, as well as large-corpus statistics like pointwise mutual information, to identify STRIPS representations of actions and events, a type of representation commonly used in AI planning systems. In experiments on Web text, our extractor\u2019s Area under the Curve (AUC) improves by more than 31% over the closest system from the literature for identifying the preconditions and add effects of actions. In addition, we also extract significant aspects of STRIPS representations that are missing from previous work, including delete effects and arguments.", "phrases": ["strips representation", "world", "web text", "precondition"], "overall_score": 1.3469329684118347, "scores": [0.9739205807407402, 0.8738132558597659, 0.5828077669694279, 0.5764088081215079], "rank_score": 0.7517376029228604} -{"id": "cui-etal-2017-superagent", "title": "SuperAgent: A Customer Service Chatbot for E-commerce Websites", "abstract": "Conventional customer service chatbots are usually based on human dialogue, yet significant issues in terms of data scale and privacy. In this paper, we present SuperAgent, a customer service chatbot that leverages large-scale and publicly available e-commerce data. Distinct from existing counterparts, SuperAgent takes advantage of data from in-page product descriptions as well as user-generated content from e-commerce websites, which is more practical and cost-effective when answering repetitive questions, freeing up human support staff to answer much higher value questions. We demonstrate SuperAgent as an add-on extension to mainstream web browsers and show its usefulness to user\u2019s online shopping experience.", "phrases": ["customer service chatbot", "e-commerce websites", "powerful chatbot"], "overall_score": 1.8025386531988596, "scores": [0.8814806161809285, 0.806583323688026, 0.5670870756655271], "rank_score": 0.7517170051781606} -{"id": "peng-etal-2018-rational", "title": "Rational Recurrences", "abstract": "Despite the tremendous empirical success of neural models in natural language processing, many of them lack the strong intuitions that accompany classical machine learning approaches. Recently, connections have been shown between convolutional neural networks (CNNs) and weighted finite state automata (WFSAs), leading to new interpretations and insights. In this work, we show that some recurrent neural networks also share this connection to WFSAs. We characterize this connection formally, defining rational recurrences to be recurrent hidden state update functions that can be written as the Forward calculation of a finite set of WFSAs. We show that several recent neural models use rational recurrences. Our analysis provides a fresh view of these models and facilitates devising new neural architectures that draw inspiration from WFSAs. We present one such model, which performs better than two recent baselines on language modeling and text classification. Our results demonstrate that transferring intuitions from classical models like WFSAs can be an effective approach to designing and understanding neural models.", "phrases": ["finite state automata", "recurrent neural network", "rational recurrence", "rnn"], "overall_score": 1.7301896807586599, "scores": [1.0113614303613105, 0.8999391599718455, 0.5547126758970905, 0.5396340577674924], "rank_score": 0.7514118309994348} -{"id": "arun-etal-2010-unified", "title": "A Unified Approach to Minimum Risk Training and Decoding", "abstract": "We present a unified approach to performing minimum risk training and minimum Bayes risk (MBR) decoding with BLEU in a phrase-based model. Key to our approach is the use of a Gibbs sampler that allows us to explore the entire probability distribution and maintain a strict probabilistic formulation across the pipeline. We also describe a new sampling algorithm called corpus sampling which allows us at training time to use BLEU instead of an approximation thereof. Our approach is theoretically sound and gives better (up to +0.6%BLEU) and more stable results than the standard MERT optimization algorithm. By comparing our approach to lattice MBR, we are also able to gain crucial insights about both methods.", "phrases": ["unified approach", "minimum risk training", "approximation"], "overall_score": 0.8254865962060145, "scores": [0.8744426827680664, 0.8309011479776348, 0.5488270120184476], "rank_score": 0.7513902809213829} -{"id": "santus-etal-2016-nine", "title": "Nine Features in a Random Forest to Learn Taxonomical Semantic Relations", "abstract": "ROOT9 is a supervised system for the classification of hypernyms, co-hyponyms and random words that is derived from the already introduced ROOT13 (Santus et al., 2016). It relies on a Random Forest algorithm and nine unsupervised corpus-based features. We evaluate it with a 10-fold cross validation on 9,600 pairs, equally distributed among the three classes and involving several Parts-Of-Speech (i.e. adjectives, nouns and verbs). When all the classes are present, ROOT9 achieves an F1 score of 90.7%, against a baseline of 57.2% (vector cosine). When the classification is binary, ROOT9 achieves the following results against the baseline. hypernyms-co-hyponyms 95.7% vs. 69.8%, hypernyms-random 91.8% vs. 64.1% and co-hyponyms-random 97.8% vs. 79.4%. In order to compare the performance with the state-of-the-art, we have also evaluated ROOT9 in subsets of the Weeds et al. (2014) datasets, proving that it is in fact competitive. Finally, we investigated whether the system learns the semantic relation or it simply learns the prototypical hypernyms, as claimed by Levy et al. (2015). The second possibility seems to be the most likely, even though ROOT9 can be trained on negative examples (i.e., switched hypernyms) to drastically reduce this bias.", "phrases": ["semantic relation", "supervised system", "random forest algorithm", "co-hyponymy detection"], "overall_score": 1.461989223065323, "scores": [0.9873777368159559, 0.925394262427374, 0.5465887005607423, 0.5458946533787649], "rank_score": 0.7513138382957093} -{"id": "joanis-stevenson-2003-general", "title": "A General Feature Space for Automatic Verb Classification", "abstract": "Abstract Lexical semantic classes of verbs play an important role in structuring complex predicate information in a lexicon, thereby avoiding redundancy and enabling generalizations across semantically similar verbs with respect to their usage. Such classes, however, require many person-years of expert effort to create manually, and methods are needed for automatically assigning verbs to appropriate classes. In this work, we develop and evaluate a feature space to support the automatic assignment of verbs into a well-known lexical semantic classification that is frequently used in natural language processing. The feature space is general \u2013 applicable to any class distinctions within the target classification; broad \u2013 tapping into a variety of semantic features of the classes; and inexpensive \u2013 requiring no more than a POS tagger and chunker. We perform experiments using support vector machines (SVMs) with the proposed feature space, demonstrating a reduction in error rate ranging from 48% to 88% over a chance baseline accuracy, across classification tasks of varying difficulty. In particular, we attain performance comparable to or better than that of feature sets manually selected for the particular tasks. Our results show that the approach is generally applicable, and reduces the need for resource-intensive linguistic analysis for each new classification task. We also perform a wide range of experiments to determine the most informative features in the feature space, finding that simple, easily extractable features suffice for good verb classification performance.", "phrases": ["general feature space", "automatic verb classification", "difficulty", "recent research"], "overall_score": 2.1283860478255665, "scores": [1.0056436904526151, 0.9172421625888273, 0.55862741764845, 0.523394287423508], "rank_score": 0.75122688952835} -{"id": "stewart-eisenstein-2018-making", "title": "Making \u201cfetch\u201d happen: The influence of social and linguistic context on nonstandard word growth and decline", "abstract": "In an online community, new words come and go: today's \u201chaha\u201d may be replaced by tomorrow's \u201clol.\u201d Changes in online writing are usually studied as a social process, with innovations diffusing through a network of individuals in a speech community. But unlike other types of innovation, language change is shaped and constrained by the grammatical system in which it takes part. To investigate the role of social and structural factors in language change, we undertake a large-scale analysis of the frequencies of non-standard words in Reddit. Dissemination across many linguistic contexts is a predictor of success: words that appear in more linguistic contexts grow faster and survive longer. Furthermore, social dissemination plays a less important role in explaining word growth and decline than previously hypothesized.", "phrases": ["linguistic context", "word growth", "decline"], "overall_score": 1.3458228801626126, "scores": [0.901476314217845, 0.7964826008387966, 0.555395237567038], "rank_score": 0.75111805087456} -{"id": "kiyono-etal-2019-empirical", "title": "An Empirical Study of Incorporating Pseudo Data into Grammatical Error Correction", "abstract": "The incorporation of pseudo data in the training of grammatical error correction models has been one of the main factors in improving the performance of such models. However, consensus is lacking on experimental configurations, namely, choosing how the pseudo data should be generated or used. In this study, these choices are investigated through extensive experiments, and state-of-the-art performance is achieved on the CoNLL-2014 test set (F0.5=65.0) and the official test set of the BEA-2019 shared task (F0.5=70.2) without making any modifications to the model architecture.", "phrases": ["pseudo data", "error correction", "back-translation", "english gec"], "overall_score": 2.286517375409304, "scores": [1.0605613563624847, 0.8509248668493599, 0.5686851317997546, 0.5239350980439071], "rank_score": 0.7510266132638765} -{"id": "song-gildea-2019-sembleu", "title": "SemBleu: A Robust Metric for AMR Parsing Evaluation", "abstract": "Evaluating AMR parsing accuracy involves comparing pairs of AMR graphs. The major evaluation metric, SMATCH (Cai and Knight, 2013), searches for one-to-one mappings between the nodes of two AMRs with a greedy hill-climbing algorithm, which leads to search errors. We propose SEMBLEU, a robust metric that extends BLEU (Papineni et al., 2002) to AMRs. It does not suffer from search errors and considers non-local correspondences in addition to local ones. SEMBLEU is fully content-driven and punishes situations where a system's output does not preserve most information from the input. Preliminary experiments on both sentence and corpus levels show that SEMBLEU has slightly higher consistency with human judgments than SMATCH. Our code is available at .", "phrases": ["robust metric", "amr", "sembleu"], "overall_score": 1.040962333665438, "scores": [0.8612618182700505, 0.8322937249964646, 0.5591312515304796], "rank_score": 0.750895598265665} -{"id": "boyd-2007-discontinuity", "title": "Discontinuity Revisited: An Improved Conversion to Context-Free Representations", "abstract": "This paper introduces a new, reversible method for converting syntactic structures with discontinuous constituents into traditional syntax trees. The method is applied to the Tiger Corpus of German and results for PCFG parsing requiring such context-free trees are provided. A labeled dependency evaluation shows that the new conversion method leads to better results by preserving local relationships and introducing fewer inconsistencies into the training data.", "phrases": ["constituent", "related idea", "splitting conversion"], "overall_score": 1.5606708823182986, "scores": [1.1393291631224733, 0.5708238332952152, 0.5414191459627241], "rank_score": 0.7505240474601376} -{"id": "wang-tian-2016-recurrent", "title": "Recurrent Residual Learning for Sequence Classification", "abstract": "In this paper, we explore the possibility of leveraging Residual Networks (ResNet), a powerful structure in constructing extremely deep neural network for image understanding, to improve recurrent neural networks (RNN) for modeling sequential data. We show that for sequence classi\ufb01cation tasks, incorporating residual connections into recurrent structures yields similar accuracy to Long Short Term Memory (LSTM) RNN with much fewer model parameters. In addition, we propose two novel models which combine the best of both residual learning and LSTM. Experiments show that the new models signi\ufb01cantly outperform LSTM.", "phrases": ["residual learning", "rnn", "residual connection", "sequence prediction"], "overall_score": 1.4604283191480953, "scores": [0.8794340027351781, 0.8763599316919535, 0.6403999252479929, 0.6058529097650653], "rank_score": 0.7505116923600474} -{"id": "mazur-dale-2010-wikiwars", "title": "WikiWars: A New Corpus for Research on Temporal Expressions", "abstract": "The reliable extraction of knowledge from text requires an appropriate treatment of the time at which reported events take place. Unfortunately, there are very few annotated data sets that support the development of techniques for event time-stamping and tracking the progression of time through a narrative. In this paper, we present a new corpus of temporally-rich documents sourced from English Wikipedia, which we have annotated with TIMEX2 tags. The corpus contains around 120000 tokens, and 2600 TIMEX2 expressions, thus comparing favourably in size to other existing corpora used in these areas. We describe the preparation of the corpus, and compare the profile of the data with other existing temporally annotated corpora. We also report the results obtained when we use DANTE, our temporal expression tagger, to process this corpus, and point to where further work is required. The corpus is publicly available for research purposes.", "phrases": ["new corpus", "narrative", "wikiwars"], "overall_score": 1.5605662848312463, "scores": [0.9136650659707918, 0.7900787048732955, 0.5476774692604656], "rank_score": 0.7504737467015176} -{"id": "hendricks-etal-2021-decoupling", "title": "Decoupling the Role of Data, Attention, and Losses in Multimodal Transformers", "abstract": "Recently, multimodal transformer models have gained popularity because their performance on downstream tasks suggests they learn rich visual-linguistic representations. Focusing on zero-shot image retrieval tasks, we study three important factors that can impact the quality of learned representations: pretraining data, the attention mechanism, and loss functions. By pretraining models on six datasets, we observe that dataset noise and language similarity to our downstream task are important indicators of model performance. Through architectural analysis, we learn that models with a multimodal attention mechanism can outperform deeper models with modality-specific attention mechanisms. Finally, we show that successful contrastive losses used in the self-supervised learning literature do not yield similar performance gains when used in multimodal transformers.", "phrases": ["loss", "multimodal transformer", "pre-training task"], "overall_score": 1.2076651576649111, "scores": [0.9096392287451578, 0.8033400092703117, 0.5381144171074832], "rank_score": 0.7503645517076508} -{"id": "hasler-etal-2018-neural", "title": "Neural Machine Translation Decoding with Terminology Constraints", "abstract": "Despite the impressive quality improvements yielded by neural machine translation (NMT) systems, controlling their translation output to adhere to user-provided terminology constraints remains an open problem. We describe our approach to constrained neural decoding based on finite-state machines and multi-stack decoding which supports target-side constraints as well as constraints with corresponding aligned input text spans. We demonstrate the performance of our framework on multiple translation tasks and motivate the need for constrained decoding with attentions as a means of reducing misplacement and duplication when translating user constraints.", "phrases": ["decoding", "terminology constraint", "finite-state machine", "neural machine translation"], "overall_score": 2.1683038686843554, "scores": [0.9940577421646748, 0.9139811393186991, 0.5683667600166533, 0.5243209781962774], "rank_score": 0.7501816549240761} -{"id": "sai-etal-2020-improving", "title": "Improving Dialog Evaluation with a Multi-reference Adversarial Dataset and Large Scale Pretraining", "abstract": "There is an increasing focus on model-based dialog evaluation metrics such as ADEM, RUBER, and the more recent BERT-based metrics. These models aim to assign a high score to all relevant responses and a low score to all irrelevant responses. Ideally, such models should be trained using multiple relevant and irrelevant responses for any given context. However, no such data is publicly available, and hence existing models are usually trained using a single relevant response and multiple randomly selected responses from other contexts (random negatives). To allow for better training and robust evaluation of model-based metrics, we introduce the DailyDialog++ dataset, consisting of (i) five relevant responses for each context and (ii) five adversarially crafted irrelevant responses for each context. Using this dataset, we first show that even in the presence of multiple correct references, n-gram based metrics and embedding based metrics do not perform well at separating relevant responses from even random negatives. While model-based metrics perform better than n-gram and embedding based metrics on random negatives, their performance drops substantially when evaluated on adversarial examples. To check if large scale pretraining could help, we propose a new BERT-based evaluation metric called DEB, which is pretrained on 727M Reddit conversations and then finetuned on our dataset. DEB significantly outperforms existing models, showing better correlation with human judgments and better performance on random negatives (88.27% accuracy). However, its performance again drops substantially when evaluated on adversarial responses, thereby highlighting that even large-scale pretrained evaluation models are not robust to the adversarial examples in our dataset. The dataset1 and code2 are publicly available.", "phrases": ["dialog evaluation", "scale pretraining", "adversarial negative response"], "overall_score": 1.5597888412779266, "scores": [0.8897161621116532, 0.804606720776915, 0.5559767432570394], "rank_score": 0.7500998753818692} -{"id": "si-etal-2021-better", "title": "Better Robustness by More Coverage: Adversarial and Mixup Data Augmentation for Robust Finetuning", "abstract": "Pretrained language models (PLMs) perform poorly under adversarial attacks. To improve the adversarial robustness, adversarial data augmentation (ADA) has been widely adopted to cover more search space of adversarial attacks by adding textual adversarial examples during training. However, the number of adversarial examples for text augmentation is still extremely insufficient due to the exponentially large attack search space. In this work, we propose a simple and effective method to cover a much larger proportion of the attack search space, called Adversarial and Mixup Data Augmentation (AMDA). Specifically, AMDA linearly interpolates the representations of pairs of training samples to form new virtual samples, which are more abundant and diverse than the discrete text adversarial examples in conventional ADA. Moreover, to fairly evaluate the robustness of different models, we adopt a challenging evaluation setup, which generates a new set of adversarial examples targeting each model. In text classification experiments of BERT and RoBERTa, AMDA achieves significant robustness gains under two strong adversarial attacks and alleviates the performance degradation of ADA on the clean data. Our code is available at: https://github.com/thunlp/MixADA .", "phrases": ["robustness", "adversarial", "mixup data augmentation"], "overall_score": 1.7270435040171332, "scores": [0.8720427478461135, 0.8545902024557956, 0.5235034411025833], "rank_score": 0.7500454638014974} -{"id": "marsi-ozturk-2015-extraction", "title": "Extraction and generalisation of variables from scientific publications", "abstract": "Scientific theories and models in Earth science typically involve changing variables and their complex interactions, including correlations, causal relations and chains of positive/negative feedback loops. Variables tend to be complex rather than atomic entities and expressed as noun phrases containing multiple modifiers, e.g. oxygen depletion in the upper 500 m of the ocean or timing and magnitude of surface temperature evolution in the Southern Hemisphere in deglacial proxy records. Text mining from Earth science literature is therefore significantly different from biomedical text mining and requires different approaches and methods. Our approach aims at automatically locating and extracting variables and their direction of variation: increasing, decreasing or just changing. Variables are initially extracted by matching tree patterns onto the syntax trees of the source texts. Next, variables are generalised in order to enhance their similarity, facilitating hierarchical search and inference. This generalisation is accomplished by progressive pruning of syntax trees using a set of tree transformation operations. Text mining results are presented as a browsable variable hierarchy which allows users to inspect all mentions of a particular variable type in the text as well as any generalisations or specialisations. The approach is demonstrated on a corpus of 10k abstracts of Nature publications in the field of Marine science. We discuss experiences with this early prototype and outline a number of possible improvements and directions for future re", "phrases": ["generalisation", "variable", "syntax tree"], "overall_score": 0.8236926665300904, "scores": [0.8741630022871808, 0.8508597866465989, 0.5242493383478609], "rank_score": 0.7497573757605469} -{"id": "lu-etal-2019-debug", "title": "DEBUG: A Dense Bottom-Up Grounding Approach for Natural Language Video Localization", "abstract": "In this paper, we focus on natural language video localization: localizing (ie, grounding) a natural language description in a long and untrimmed video sequence. All currently published models for addressing this problem can be categorized into two types: (i) top-down approach: it does classification and regression for a set of pre-cut video segment candidates; (ii) bottom-up approach: it directly predicts probabilities for each video frame as the temporal boundaries (ie, start and end time point). However, both two approaches suffer several limitations: the former is computation-intensive for densely placed candidates, while the latter has trailed the performance of the top-down counterpart thus far. To this end, we propose a novel dense bottom-up framework: DEnse Bottom-Up Grounding (DEBUG). DEBUG regards all frames falling in the ground truth segment as foreground, and each foreground frame regresses the unique distances from its location to bi-directional ground truth boundaries. Extensive experiments on three challenging benchmarks (TACoS, Charades-STA, and ActivityNet Captions) show that DEBUG is able to match the speed of bottom-up models while surpassing the performance of the state-of-the-art top-down models.", "phrases": ["frame", "temporal boundary", "dense bottom-up framework", "ground truth segment", "distance"], "overall_score": 1.3430454823657902, "scores": [1.1325684932751956, 0.8762643273949441, 0.61947117396042, 0.5732290565104768, 0.5463067271089299], "rank_score": 0.7495679556499933} -{"id": "gong-etal-2018-document", "title": "Document Similarity for Texts of Varying Lengths via Hidden Topics", "abstract": "Measuring similarity between texts is an important task for several applications. Available approaches to measure document similarity are inadequate for document pairs that have non-comparable lengths, such as a long document and its summary. This is because of the lexical, contextual and the abstraction gaps between a long document of rich details and its concise summary of abstract information. In this paper, we present a document matching approach to bridge this gap, by comparing the texts in a common space of hidden topics. We evaluate the matching algorithm on two matching tasks and find that it consistently and widely outperforms strong baselines. We also highlight the benefits of the incorporation of domain knowledge to text matching.", "phrases": ["hidden topic", "common space", "matching task", "document similarity"], "overall_score": 1.558415893660573, "scores": [0.9674049917774981, 0.9263961363340264, 0.568130687298169, 0.5358266931591746], "rank_score": 0.749439627142217} -{"id": "pecina-etal-2011-towards", "title": "Towards Using Web-Crawled Data for Domain Adaptation in Statistical Machine Translation", "abstract": "This paper reports on the ongoing work focused on domain adaptation of statistical machine translation using domain-specific data obtained by domain-focused web crawling. We present a strategy for crawling monolingual and parallel data and their exploitation for testing, language modelling, and system tuning in a phrase-based machine translation framework. The proposed approach is evaluated on the domains of Natural Environment and Labour Legislation and two language pairs: English\u2010French and English\u2010Greek.", "phrases": ["statistical machine translation", "domain-specific data", "domain-focused web-crawling"], "overall_score": 1.3426503882847562, "scores": [0.8710149391212106, 0.8244118308167873, 0.5526155783966444], "rank_score": 0.7493474494448807} -{"id": "al-rfou-skiena-2012-speedread", "title": "SpeedRead: A Fast Named Entity Recognition Pipeline", "abstract": "Online content analysis employs algorithmic methods to identify entities in unstructured text. Both machine learning and knowledge-base approaches lie at the foundation of contemporary named entities extraction systems. However, the progress in deploying these approaches on web-scale has been been hampered by the computational cost of NLP over massive text corpora. We present SpeedRead (SR), a named entity recognition pipeline that runs at least 10 times faster than Stanford NLP pipeline. This pipeline consists of a high performance Penn Treebank- compliant tokenizer, close to state-of-art part-of-speech (POS) tagger and knowledge-based named entity recognizer.", "phrases": ["entity recognition pipeline", "tokenizer", "tagger", "speedread"], "overall_score": 1.2058140176292778, "scores": [0.9773335558906538, 0.872992533587317, 0.5775899566949677, 0.5689414487660613], "rank_score": 0.74921437373475} -{"id": "herbelot-ganesalingam-2013-measuring", "title": "Measuring semantic content in distributional vectors", "abstract": "Some words are more contentful than others: for instance, make is intuitively more general than produce and fifteen is more \u2018precise\u2019 than a group. In this paper, we propose to measure the \u2018semantic content\u2019 of lexical items, as modelled by distributional representations. We investigate the hypothesis that semantic content can be computed using the KullbackLeibler (KL) divergence, an informationtheoretic measure of the relative entropy of two distributions. In a task focusing on retrieving the correct ordering of hyponym-hypernym pairs, the KL divergence achieves close to 80% precision but does not outperform a simpler (linguistically unmotivated) frequency measure. We suggest that this result illustrates the rather \u2018intensional\u2019 aspect of distributions.", "phrases": ["semantic content", "divergence", "hypernym", "distributional inclusion hypothesis"], "overall_score": 1.6457574359285698, "scores": [0.9447570700444906, 0.8845910687700945, 0.5866544219444377, 0.5800633906309535], "rank_score": 0.7490164878474941} -{"id": "hasan-ng-2009-weakly", "title": "Weakly Supervised Part-of-Speech Tagging for Morphologically-Rich, Resource-Scarce Languages", "abstract": "This paper examines unsupervised approaches to part-of-speech (POS) tagging for morphologically-rich, resource-scarce languages, with an emphasis on Goldwater and Griffiths's (2007) fully-Bayesian approach originally developed for English POS tagging. We argue that existing unsupervised POS taggers unrealistically assume as input a perfect POS lexicon, and consequently, we propose a weakly supervised fully-Bayesian approach to POS tagging, which relaxes the unrealistic assumption by automatically acquiring the lexicon from a small amount of POS-tagged data. Since such relaxation comes at the expense of a drop in tagging accuracy, we propose two extensions to the Bayesian framework and demonstrate that they are effective in improving a fully-Bayesian POS tagger for Bengali, our representative morphologically-rich, resource-scarce language.", "phrases": ["morphologically-rich", "resource-scarce language", "pos tagging"], "overall_score": 1.2053099989744316, "scores": [0.8755277537939526, 0.7829201735841196, 0.5882557006324002], "rank_score": 0.7489012093368242} -{"id": "althobaiti-etal-2014-aranlp", "title": "AraNLP: a Java-based Library for the Processing of Arabic Text.", "abstract": "We present a free, Java-based library named \u201cAraNLP\u201d that covers various Arabic text preprocessing tools. Although a good number of tools for processing Arabic text already exist, integration and compatibility problems continually occur. AraNLP is an attempt to gather most of the vital Arabic text preprocessing tools into one library that can be accessed easily by integrating or accurately adapting existing tools and by developing new ones when required. The library includes a sentence detector, tokenizer, light stemmer, root stemmer, part-of speech tagger (POS-tagger), word segmenter, normalizer, and a punctuation and diacritic remover.", "phrases": ["java-based library", "arabic text", "diacritic remover"], "overall_score": 0.8225745932649363, "scores": [0.9165653023601031, 0.8049708693489698, 0.5246828131400098], "rank_score": 0.7487396616163609} -{"id": "papalampidi-etal-2019-movie", "title": "Movie Plot Analysis via Turning Point Identification", "abstract": "According to screenwriting theory, turning points (e.g., change of plans, major setback, climax) are crucial narrative moments within a screenplay: they define the plot structure, determine its progression and segment the screenplay into thematic units (e.g., setup, complications, aftermath). We propose the task of turning point identification in movies as a means of analyzing their narrative structure. We argue that turning points and the segmentation they provide can facilitate processing long, complex narratives, such as screenplays, for summarization and question answering. We introduce a dataset consisting of screenplays and plot synopses annotated with turning points and present an end-to-end neural network model that identifies turning points in plot synopses and projects them onto scenes in screenplays. Our model outperforms strong baselines based on state-of-the-art sentence representations and the expected position of turning points.", "phrases": ["turning point", "point identification", "progression", "narrative structure", "movie"], "overall_score": 1.3411004917394824, "scores": [0.8191498578942141, 0.9029442780529119, 0.9010630343208681, 0.5881533225791268, 0.5311016857174204], "rank_score": 0.7484824357129083} -{"id": "hale-etal-2018-finding", "title": "Finding syntax in human encephalography with beam search", "abstract": "Recurrent neural network grammars (RNNGs) are generative models of (tree , string ) pairs that rely on neural networks to evaluate derivational choices. Parsing with them using beam search yields a variety of incremental complexity metrics such as word surprisal and parser action count. When used as regressors against human electrophysiological responses to naturalistic text, they derive two amplitude effects: an early peak and a P600-like later peak. By contrast, a non-syntactic neural language model yields no reliable effects. Model comparisons attribute the early peak to syntactic composition within the RNNG. This pattern of results recommends the RNNG+beam search combination as a mechanistic model of the syntactic processing that occurs during normal human language comprehension.", "phrases": ["encephalography", "beam search", "rnng", "syntactic composition", "processing"], "overall_score": 1.9192931144369176, "scores": [0.878297290840435, 0.8570089769226608, 0.8578438676922441, 0.5787166110584729, 0.56951923612483], "rank_score": 0.7482771965277286} -{"id": "zhang-etal-2018-exploring", "title": "Exploring Recombination for Efficient Decoding of Neural Machine Translation", "abstract": "In Neural Machine Translation (NMT), the decoder can capture the features of the entire prediction history with neural connections and representations. This means that partial hypotheses with different prefixes will be regarded differently no matter how similar they are. However, this might be inefficient since some partial hypotheses can contain only local differences that will not influence future predictions. In this work, we introduce recombination in NMT decoding based on the concept of the \u201cequivalence\u201d of partial hypotheses. Heuristically, we use a simple n-gram suffix based equivalence function and adapt it into beam search decoding. Through experiments on large-scale Chinese-to-English and English-to-Germen translation tasks, we show that the proposed method can obtain similar translation quality with a smaller beam size, making NMT decoding more efficient.", "phrases": ["recombination", "neural machine translation", "similar translation quality"], "overall_score": 1.2040790917686135, "scores": [0.8817687248792748, 0.8234158993174799, 0.5392245868691902], "rank_score": 0.7481364036886483} -{"id": "zhang-etal-2010-entity", "title": "Entity Linking Leveraging Automatically Generated Annotation", "abstract": "Entity linking refers entity mentions in a document to their representations in a knowledge base (KB). In this paper, we propose to use additional information sources from Wikipedia to find more name variations for entity linking task. In addition, as manually creating a training corpus for entity linking is laborintensive and costly, we present a novel method to automatically generate a large scale corpus annotation for ambiguous mentions leveraging on their unambiguous synonyms in the document collection. Then, a binary classifier is trained to filter out KB entities that are not similar to current mentions. This classifier not only can effectively reduce the ambiguities to the existing entities in KB, but also be very useful to highlight the new entities to KB for the further population. Furthermore, we also leverage on the Wikipedia documents to provide additional information which is not available in our generated corpus through a domain adaption approach which provides further performance improvements. The experiment results show that our proposed method outperforms the state-of-the-art approaches.", "phrases": ["entity mention", "knowledge base", "wikipedia", "binary classifier"], "overall_score": 1.72260480519074, "scores": [1.0004336167733128, 0.841704814638728, 0.578623074370575, 0.5717095397948421], "rank_score": 0.7481177613943645} -{"id": "silfverberg-etal-2018-sound", "title": "Sound Analogies with Phoneme Embeddings", "abstract": "Vector space models of words in NLP\u2014 word embeddings\u2014have been recently shown to reliably encode semantic information, offering capabilities such as solving proportional analogy tasks such as man:woman::king:queen. We study how well these distributional properties carry over to similarly learned phoneme embeddings, and whether phoneme vector spaces align with articulatory distinctive features, using several methods of obtaining such continuous-space representations. We demonstrate a statistically significant correlation between distinctive feature spaces and vector spaces learned with wordcontext PPMI+SVD and word2vec, showing that many distinctive feature contrasts are implicitly present in phoneme distributions. Furthermore, these distributed representations allow us to solve proportional analogy tasks with phonemes, such as p is to b as t is to X, where the solution is that X = d. This effect is even stronger when a supervision signal is added where we extract phoneme representations from the embedding layer of an recurrent neural network that is trained to solve a word inflection task, i.e. a model that is made aware of word relatedness.", "phrases": ["analogy", "phoneme embedding", "feature space"], "overall_score": 1.4557333696622736, "scores": [1.0748906481191127, 0.6432022249241145, 0.5262040237620949], "rank_score": 0.7480989656017739} -{"id": "friedrich-etal-2015-annotating", "title": "Annotating genericity: a survey, a scheme, and a corpus", "abstract": "Generics are linguistic expressions that make statements about or refer to kinds, or that report regularities of events. Non-generic expressions make statements about particular individuals or specific episodes. Generics are treated extensively in semantic theory (Krifka et al., 1995). In practice, it is often hard to decide whether a referring expression is generic or non-generic, and to date there is no data set which is both large and satisfactorily annotated. Such a data set would be valuable for creating automatic systems for identifying generic expressions, in turn facilitating knowledge extraction from natural language text. In this paper we provide the next steps for such an annotation endeavor. Our contributions are: (1) we survey the most important previous projects annotating genericity, focusing on resources for English; (2) with a new agreement study we identify problems in the annotation scheme of the largest currentlyavailable resource (ACE-2005); and (3) we introduce a linguistically-motivated annotation scheme for marking both clauses and their subjects with regard to their genericity. (4) We present a corpus of MASC (Ide et al., 2010) and Wikipedia texts annotated according to our scheme, achieving substantial agreement.", "phrases": ["genericity", "scheme", "linguistic expression"], "overall_score": 1.0370232252358584, "scores": [0.8662077565592139, 0.8195561475535728, 0.5583984923888907], "rank_score": 0.7480541321672257} -{"id": "palmer-etal-2004-different", "title": "Different Sense Granularities for Different Applications", "abstract": "This paper describes an hierarchical approach to WordNet sense distinctions that provides different types of automatic Word Sense Disambiguation (WSD) systems, which perform at varying levels of accuracy. For tasks where fine-grained sense distinctions may not be essential, an accurate coarse-grained WSD system may be sufficient. The paper discusses the criteria behind the three different levels of sense granularity, as well as the machine learning approach used by the WSD system.", "phrases": ["wsd", "sense inventory", "inter-annotator agreement", "trouble", "choice"], "overall_score": 1.555442464492795, "scores": [1.1682519960419993, 0.9558188893379319, 0.5542266073753949, 0.5374996943251366, 0.5242513627726432], "rank_score": 0.7480097099706211} -{"id": "gurrutxaga-alegria-2012-measuring", "title": "Measuring the compositionality of NV expressions in Basque by means of distributional similarity techniques", "abstract": "We present several experiments aiming at measuring the semantic compositionality of NV expressions in Basque. Our approach is based on the hypothesis that compositionality can be related to distributional similarity. The contexts of each NV expression are compared with the contexts of its corresponding components, by means of different techniques, as similarity measures usually used with the Vector Space Model (VSM), Latent Semantic Analysis (LSA) and some measures implemented in the Lemur Toolkit, as Indri index, tf-idf, Okapi index and Kullback-Leibler divergence. Using our previous work with cooccurrence techniques as a baseline, the results point to improvements using the Indri index or Kullback-Leibler divergence, and a slight further improvement when used in combination with cooccurrence measures such as $t$-score, via rank-aggregation. This work is part of a project for MWE extraction and characterization using different techniques aiming at measuring the properties related to idiomaticity, as institutionalization, non-compositionality and lexico-syntactic fixedness.", "phrases": ["compositionality", "basque", "latent semantic analysis"], "overall_score": 0.8217097675772189, "scores": [0.8550256810153308, 0.845027336219375, 0.5438043728189121], "rank_score": 0.747952463351206} -{"id": "asgari-mofrad-2016-comparing", "title": "Comparing Fifty Natural Languages and Twelve Genetic Languages Using Word Embedding Language Divergence (WELD) as a Quantitative Measure of Language Distance", "abstract": "We introduce a new measure of distance between languages based on word embedding, called word embedding language divergence (WELD). WELD is defined as divergence between unified similarity distribution of words between languages. Using such a measure, we perform language comparison for fifty natural languages and twelve genetic languages. Our natural language dataset is a collection of sentence-aligned parallel corpora from bible translations for fifty languages spanning a variety of language families. Although we use parallel corpora, which guarantees having the same content in all languages, interestingly in many cases languages within the same family cluster together. In addition to natural languages, we perform language comparison for the coding regions in the genomes of 12 different organisms (4 plants, 6 animals, and two human subjects). Our result confirms a significant high-level difference in the genetic language model of humans/animals versus plants. The proposed method is a step toward defining a quantitative measure of similarity between languages, with applications in languages classification, genre identification, dialect identification, and evaluation of translations.", "phrases": ["genetic language", "language divergence", "co-occurrence"], "overall_score": 1.203666676348092, "scores": [0.8550181204084911, 0.8521099515792167, 0.5365123947532743], "rank_score": 0.7478801555803273} -{"id": "ravi-knight-2008-attacking", "title": "Attacking Decipherment Problems Optimally with Low-Order N-gram Models", "abstract": "We introduce a method for solving substitution ciphers using low-order letter n-gram models. This method enforces global constraints using integer programming, and it guarantees that no decipherment key is overlooked. We carry out extensive empirical experiments showing how decipherment accuracy varies as a function of cipher length and n-gram order. We also make an empirical investigation of Shannon's (1949) theory of uncertainty in decipherment.", "phrases": ["decipherment", "letter n-gram model", "function"], "overall_score": 1.7932493426454315, "scores": [1.1530926901246519, 0.569085389499558, 0.521351105725936], "rank_score": 0.747843061783382} -{"id": "edunov-etal-2018-classical", "title": "Classical Structured Prediction Losses for Sequence to Sequence Learning", "abstract": "There has been much recent work on training neural attention models at the sequence-level using either reinforcement learning-style methods or by optimizing the beam. In this paper, we survey a range of classical objective functions that have been widely used to train linear models for structured prediction and apply them to neural sequence to sequence models. Our experiments show that these losses can perform surprisingly well by slightly outperforming beam search optimization in a like for like setup. We also report new state of the art results on both IWSLT'14 German-English translation as well as Gigaword abstractive summarization. On the large WMT'14 English-French task, sequence-level training achieves 41.5 BLEU which is on par with the state of the art.", "phrases": ["loss", "beam search", "setup", "sentence-level bleu", "reinforcement learning"], "overall_score": 2.240337497668729, "scores": [1.5036625240456507, 0.5822241542250614, 0.5660737064602446, 0.5502184115657204, 0.5370363489387519], "rank_score": 0.7478430290470857} -{"id": "ilievski-etal-2016-semantic", "title": "Semantic overfitting: what `world' do we consider when evaluating disambiguation of text?", "abstract": "Semantic text processing faces the challenge of defining the relation between lexical expressions and the world to which they make reference within a period of time. It is unclear whether the current test sets used to evaluate disambiguation tasks are representative for the full complexity considering this time-anchored relation, resulting in semantic overfitting to a specific period and the frequent phenomena within. We conceptualize and formalize a set of metrics which evaluate this complexity of datasets. We provide evidence for their applicability on five different disambiguation tasks. To challenge semantic overfitting of disambiguation systems, we propose a time-based, metric-aware method for developing datasets in a systematic and semi-automated manner, as well as an event-based QA task.", "phrases": ["world", "semantic overfitting", "low ambiguity"], "overall_score": 1.3398603683669579, "scores": [0.9206059509384807, 0.7872644239076464, 0.5355005541952762], "rank_score": 0.7477903096804678} -{"id": "wagner-etal-2007-comparative", "title": "A Comparative Evaluation of Deep and Shallow Approaches to the Automatic Detection of Common Grammatical Errors", "abstract": "This paper compares a deep and a shallow processing approach to the problem of classifying a sentence as grammatically wellformed or ill-formed. The deep processing approach uses the XLE LFG parser and English grammar: two versions are presented, one which uses the XLE directly to perform the classification, and another one which uses a decision tree trained on features consisting of the XLE\u2019s output statistics. The shallow processing approach predicts grammaticality based on n-gram frequency statistics: we present two versions, one which uses frequency thresholds and one which uses a decision tree trained on the frequencies of the rarest n-grams in the input sentence. We find that the use of a decision tree improves on the basic approach only for the deep parser-based approach. We also show that combining both the shallow and deep decision tree features is effective. Our evaluation is carried out using a large test set of grammatical and ungrammatical sentences. The ungrammatical test set is generated automatically by inserting grammatical errors into well-formed BNC sentences.", "phrases": ["automatic detection", "common grammatical error", "bnc sentence"], "overall_score": 1.3397267758075853, "scores": [0.8828728833388223, 0.8108035056927554, 0.5494708617287837], "rank_score": 0.7477157502534538} -{"id": "f-astudillo-etal-2015-learning", "title": "Learning Word Representations from Scarce and Noisy Data with Embedding Subspaces", "abstract": "We investigate a technique to adapt unsupervised word embeddings to specific applications, when only small and noisy labeled datasets are available. Current methods use pre-trained embeddings to initialize model parameters, and then use the labeled data to tailor them for the intended task. However, this approach is prone to overfitting when the training is performed with scarce and noisy data. To overcome this issue, we use the supervised data to find an embedding subspace that fits the task complexity. All the word representations are adapted through a projection into this task-specific subspace, even if they do not occur on the labeled dataset. This approach was recently used in the SemEval 2015 Twitter sentiment analysis challenge, attaining state-of-the-art results. Here we show results improving those of the challenge, as well as additional experiments in a Twitter Part-Of-Speech tagging task.", "phrases": ["noisy data", "subspace", "pre-trained embedding"], "overall_score": 0.8214495074089203, "scores": [0.8428083600840797, 0.8025289172840875, 0.5978094156425092], "rank_score": 0.7477155643368921} -{"id": "gurevych-etal-2007-electronic", "title": "What to be? - Electronic Career Guidance Based on Semantic Relatedness", "abstract": "We present a study aimed at investigating the use of semantic information in a novel NLP application, Electronic Career Guidance (ECG), in German. ECG is formulated as an information retrieval (IR) task, whereby textual descriptions of professions (documents) are ranked for their relevance to natural language descriptions of a person\u2019s professional interests (the topic). We compare the performance of two semantic IR models: (IR-1) utilizing semantic relatedness (SR) measures based on either wordnet or Wikipedia and a set of heuristics, and (IR-2) measuring the similarity between the topic and documents based on Explicit Semantic Analysis (ESA) (Gabrilovich and Markovitch, 2007). We evaluate the performance of SR measures intrinsically on the tasks of (T-1) computing SR, and (T-2) solving Reader\u2019s Digest Word Power (RDWP) questions.", "phrases": ["electronic career guidance", "semantic relatedness", "profession"], "overall_score": 0.8214341356887138, "scores": [0.9098398327151614, 0.8097849927959342, 0.5234798916714428], "rank_score": 0.7477015723941794} -{"id": "lu-etal-2020-multi-xscience", "title": "Multi-XScience: A Large-scale Dataset for Extreme Multi-document Summarization of Scientific Articles", "abstract": "Multi-document summarization is a challenging task for which there exists little large-scale datasets. We propose Multi-XScience, a large-scale multi-document summarization dataset created from scientific articles. Multi-XScience introduces a challenging multi-document summarization task: writing the related-work section of a paper based on its abstract and the articles it references. Our work is inspired by extreme summarization, a dataset construction protocol that favours abstractive modeling approaches. Descriptive statistics and empirical results\u2014using several state-of-the-art models trained on the Multi-XScience dataset\u2014reveal that Multi-XScience is well suited for abstractive models.", "phrases": ["large-scale dataset", "summarization dataset", "multi-xscience"], "overall_score": 1.3396820763392987, "scores": [0.8473361720261269, 0.8199671470991595, 0.5757690898903192], "rank_score": 0.7476908030052019} -{"id": "jawahar-etal-2020-automatic", "title": "Automatic Detection of Machine Generated Text: A Critical Survey", "abstract": "Text generative models (TGMs) excel in producing text that matches the style of human language reasonably well. Such TGMs can be misused by adversaries, e.g., by automatically generating fake news and fake product reviews that can look authentic and fool humans. Detectors that can distinguish text generated by TGM from human written text play a vital role in mitigating such misuse of TGMs. Recently, there has been a flurry of works from both natural language processing (NLP) and machine learning (ML) communities to build accurate detectors for English. Despite the importance of this problem, there is currently no work that surveys this fast-growing literature and introduces newcomers to important research challenges. In this work, we fill this void by providing a critical survey and review of this literature to facilitate a comprehensive understanding of this problem. We conduct an in-depth error analysis of the state-of-the-art detector and discuss research directions to guide future work in this exciting area.", "phrases": ["critical survey", "automatic detection", "synthetic text"], "overall_score": 1.6420485774233482, "scores": [0.8954166737523037, 0.7807733330747385, 0.5657955339692479], "rank_score": 0.7473285135987634} -{"id": "hoffmann-etal-2010-learning", "title": "Learning 5000 Relational Extractors", "abstract": "Many researchers are trying to use information extraction (IE) to create large-scale knowledge bases from natural language text on the Web. However, the primary approach (supervised learning of relation-specific extractors) requires manually-labeled training data for each relation and doesn't scale to the thousands of relations encoded in Web text. \n \nThis paper presents LUCHS, a self-supervised, relation-specific IE system which learns 5025 relations --- more than an order of magnitude greater than any previous approach --- with an average F1 score of 61%. Crucial to LUCHS's performance is an automated system for dynamic lexicon learning, which allows it to learn accurately from heuristically-generated training data, which is often noisy and sparse.", "phrases": ["luchs", "distant supervision", "relation extraction", "knowledge basis", "wikipedia"], "overall_score": 2.0718832001595535, "scores": [1.0884805703675158, 0.9620053386255801, 0.5881892664477094, 0.5754905868809906, 0.5222037603923835], "rank_score": 0.7472739045428359} -{"id": "hauch-etal-2012-linguistic", "title": "Linguistic Cues to Deception Assessed by Computer Programs: A Meta-Analysis", "abstract": "Research syntheses suggest that verbal cues are more diagnostic of deception than other cues. Recently, to avoid human judgmental biases, researchers have sought to find faster and more reliable methods to perform automatic content analyses of statements. However, diversity of methods and inconsistent findings do not present a clear picture of effectiveness. We integrate and statistically synthesize this literature. Our meta-analyses revealed small, but significant effect-sizes on some linguistic categories. Liars use fewer exclusive words, self- and other-references, fewer time-related, but more space-related, negative and positive emotion words, and more motion verbs or negations than truth-tellers.", "phrases": ["deception", "meta-analysis", "verbal cue"], "overall_score": 0.820710446902518, "scores": [0.8433868704516991, 0.8361314766339117, 0.5616101803337313], "rank_score": 0.747042842473114} -{"id": "siahbani-etal-2013-efficient", "title": "Efficient Left-to-Right Hierarchical Phrase-Based Translation with Improved Reordering", "abstract": "Left-to-right (LR) decoding (Watanabe et al., 2006b) is a promising decoding algorithm for hierarchical phrase-based translation (Hiero). It generates the target sentence by extending the hypotheses only on the right edge. LR decoding has complexity O(n 2 b) for input of n words and beam sizeb, compared toO(n 3 ) for the CKY algorithm. It requires a single language model (LM) history for each target hypothesis rather than two LM histories per hypothesis as in CKY. In this paper we present an augmented LR decoding algorithm that builds on the original algorithm in (Watanabe et al., 2006b). Unlike that algorithm, using experiments over multiple language pairs we show two new results: our LR decoding algorithm provides demonstrably more efficient decoding than CKY Hiero, four times faster; and by introducing new distortion and reordering features for LR decoding, it maintains the same translation quality (as in BLEU scores) obtained phrase-based and CKY Hiero with the same translation model.", "phrases": ["left-to-right", "hiero", "translation quality"], "overall_score": 1.3385002926157634, "scores": [0.8200317221182338, 0.8551542437913088, 0.5659077449428921], "rank_score": 0.7470312369508115} -{"id": "king-cook-2018-leveraging", "title": "Leveraging distributed representations and lexico-syntactic fixedness for token-level prediction of the idiomaticity of English verb-noun combinations", "abstract": "Verb-noun combinations (VNCs) - e.g., blow the whistle, hit the roof, and see stars - are a common type of English idiom that are ambiguous with literal usages. In this paper we propose and evaluate models for classifying VNC usages as idiomatic or literal, based on a variety of approaches to forming distributed representations. Our results show that a model based on averaging word embeddings performs on par with, or better than, a previously-proposed approach based on skip-thoughts. Idiomatic usages of VNCs are known to exhibit lexico-syntactic fixedness. We further incorporate this information into our models, demonstrating that this rich linguistic knowledge is complementary to the information carried by distributed representations.", "phrases": ["lexico-syntactic fixedness", "verb-noun combination", "usage"], "overall_score": 1.3383821891521046, "scores": [0.8840782423800861, 0.829737991995862, 0.527079732082185], "rank_score": 0.746965322152711} -{"id": "henrich-etal-2012-webcage", "title": "WebCAGe \u2013 A Web-Harvested Corpus Annotated with GermaNet Senses", "abstract": "This paper describes an automatic method for creating a domain-independent sense-annotated corpus harvested from the web. As a proof of concept, this method has been applied to German, a language for which sense-annotated corpora are still in short supply. The sense inventory is taken from the German wordnet GermaNet. The web-harvesting relies on an existing mapping of GermaNet to the German version of the web-based dictionary Wiktionary. The data obtained by this method constitute WebCAGe (short for: Web-Harvested Corpus Annotated with GermaNet Senses), a resource which currently represents the largest sense-annotated corpus available for German. While the present paper focuses on one particular language, the method as such is language-independent.", "phrases": ["web-harvested corpus annotated", "germanet senses", "german", "webcage"], "overall_score": 1.2020702039982685, "scores": [0.9621478291641585, 0.8436446127783117, 0.6171103641459933, 0.5646500400608303], "rank_score": 0.7468882115373234} -{"id": "markert-etal-2012-collective", "title": "Collective Classification for Fine-grained Information Status", "abstract": "Previous work on classifying information status (Nissim, 2006; Rahman and Ng, 2011) is restricted to coarse-grained classification and focuses on conversational dialogue. We here introduce the task of classifying fine-grained information status and work on written text. We add a fine-grained information status layer to the Wall Street Journal portion of the OntoNotes corpus. We claim that the information status of a mention depends not only on the mention itself but also on other mentions in the vicinity and solve the task by collectively classifying the information status of all mentions. Our approach strongly outperforms reimplementations of previous work.", "phrases": ["fine-grained information status", "isnotes", "joint inference", "bridging recognition", "anaphora"], "overall_score": 2.341593901879759, "scores": [0.9773654095948805, 1.0709498780491242, 0.5675013803944318, 0.5591496366126856, 0.5590445730979664], "rank_score": 0.7468021755498178} -{"id": "itagaki-etal-2007-automatic", "title": "Automatic validation of terminology translation consistenscy with statistical method", "abstract": "This paper presents a novel method to automatically validate terminology consistency in localized materials. The goal of the paper is two-fold. First, we explore a way to extract phrase pair translations for compound nouns from a bilingual corpus using word alignment data. To validate the quality of the extracted phrase pair translations, we use a Gaussian mixture model (GMM) classifier. Second, we quantify consistency of translation as a measurement of quality. With this approach, a quality assurance process for terminology translation can be fully automated. It can also be used for maintaining bilingual training data quality for machine translation.", "phrases": ["terminology translation", "statistical method", "automatic validation"], "overall_score": 1.9708216488373602, "scores": [0.8219734966601321, 0.8001675290625802, 0.6182290034399525], "rank_score": 0.7467900097208883} -{"id": "sasano-etal-2009-effect", "title": "The Effect of Corpus Size on Case Frame Acquisition for Discourse Analysis", "abstract": "This paper reports the effect of corpus size on case frame acquisition for discourse analysis in Japanese. For this study, we collected a Japanese corpus consisting of up to 100 billion words, and constructed case frames from corpora of six different sizes. Then, we applied these case frames to syntactic and case structure analysis, and zero anaphora resolution. We obtained better results by using case frames constructed from larger corpora; the performance was not saturated even with a corpus size of 100 billion words.", "phrases": ["corpus size", "case frame", "discourse analysis"], "overall_score": 1.0351767442951336, "scores": [0.9088636468222058, 0.8015197621875353, 0.5297831241475168], "rank_score": 0.7467221777190859} -{"id": "yu-etal-2016-online", "title": "Online Segment to Segment Neural Transduction", "abstract": "We introduce an online neural sequence to sequence model that learns to alternate between encoding and decoding segments of the input as it is read. By independently tracking the encoding and decoding representations our algorithm permits exact polynomial marginalization of the latent segmentation during training, and during decoding beam search is employed to find the best alignment path together with the predicted output sequence. Our model tackles the bottleneck of vanilla encoder-decoders that have to read and memorize the entire input sequence in their fixed-length hidden states before producing any output. It is different from previous attentive models in that, instead of treating the attention weights as output of a deterministic function, our model assigns attention weights to a sequential latent variable which can be marginalized out and permits online generation. Experiments on abstractive sentence summarization and morphological inflection show significant performance gains over the baseline encoder-decoders.", "phrases": ["output sequence", "inflection", "online segment", "neural transduction model", "alignment-based neural model"], "overall_score": 2.022106947642429, "scores": [0.8591634400110689, 1.1050642450357326, 0.6038815331750759, 0.5938920297444954, 0.5715095762041063], "rank_score": 0.7467021648340959} -{"id": "alex-etal-2007-recognising", "title": "Recognising Nested Named Entities in Biomedical Text", "abstract": "Although recent named entity (NE) annotation efforts involve the markup of nested entities, there has been limited focus on recognising such nested structures. This paper introduces and compares three techniques for modelling and recognising nested entities by means of a conventional sequence tagger. The methods are tested and evaluated on two biomedical data sets that contain entity nesting. All methods yield an improvement over the baseline tagger that is only trained on flat annotation.", "phrases": ["biomedical text", "conditional random field", "protein", "entity recognition", "eppi corpus"], "overall_score": 2.460727527489941, "scores": [0.9219102795627214, 0.8628905958912284, 0.83015633941915, 0.5805784208214726, 0.5375489002417825], "rank_score": 0.7466169071872711} -{"id": "green-etal-2013-parsing", "title": "Parsing Models for Identifying Multiword Expressions", "abstract": "Multiword expressions lie at the syntax/semantics interface and have motivated alternative theories of syntax like Construction Grammar. Until now, however, syntactic analysis and multiword expression identification have been modeled separately in natural language processing. We develop two structured prediction models for joint parsing and multiword expression identification. The first is based on context-free grammars and the second uses tree substitution grammars, a formalism that can store larger syntactic fragments. Our experiments show that both models can identify multiword expressions with much higher accuracy than a state-of-the-art system based on word co-occurrence statistics.We experiment with Arabic and French, which both have pervasive multiword expressions. Relative to English, they also have richer morphology, which induces lexical sparsity in finite corpora. To combat this sparsity, we develop a simple factored lexical representation for the context-free parsing model. Morphological analyses are automatically transformed into rich feature tags that are scored jointly with lexical items. This technique, which we call a factored lexicon, improves both standard parsing and multiword expression identification accuracy.", "phrases": ["multiword expressions", "arabic", "french", "constituency parsing model", "presence"], "overall_score": 2.1982808884203484, "scores": [0.9386353954195505, 0.8632866334174703, 0.8567372152131623, 0.5401387179453514, 0.5341387773534906], "rank_score": 0.746587347869805} -{"id": "savary-etal-2012-sejfek", "title": "SEJFEK - a Lexicon and a Shallow Grammar of Polish Economic Multi-Word Units", "abstract": "We present a large-coverage lexical and grammatical resource of Polish economic terminology. It consists of two alternative modules. One is a grammatical lexicon of about 11,000 terminological multi-word units, where inflectional and syntactic variation, as well as nesting of terms, are described via graph-based rules. The other one is a fully lexicalized shallow grammar, obtained by an automatic conversion of the lexicon, and partly manually validated. Both resources have a good coverage, evaluated on a manually annotated corpus, and are freely available under the Creative Commons BY-SA license.", "phrases": ["shallow grammar", "graph-based rule", "sejfek"], "overall_score": 1.034873218281371, "scores": [0.9032807583928826, 0.804028644214606, 0.5322002873375146], "rank_score": 0.7465032299816677} -{"id": "eirew-etal-2021-wec", "title": "WEC: Deriving a Large-scale Cross-document Event Coreference dataset from Wikipedia", "abstract": "Cross-document event coreference resolution is a foundational task for NLP applications involving multi-text processing. However, existing corpora for this task are scarce and relatively small, while annotating only modest-size clusters of documents belonging to the same topic. To complement these resources and enhance future research, we present Wikipedia Event Coreference (WEC), an efficient methodology for gathering a large-scale dataset for cross-document event coreference from Wikipedia, where coreference links are not restricted within predefined topics. We apply this methodology to the English Wikipedia and extract our large-scale WEC-Eng dataset. Notably, our dataset creation method is generic and can be applied with relatively little effort to other Wikipedia languages. To set baseline results, we develop an algorithm that adapts components of state-of-the-art models for within-document coreference resolution to the cross-document setting. Our model is suitably efficient and outperforms previously published state-of-the-art results for the task.", "phrases": ["cross-document event coreference", "wikipedia", "coreference link", "wec-eng dataset"], "overall_score": 1.2009759522057482, "scores": [0.9638136415564039, 0.8774037138730322, 0.5790616721968667, 0.5645542310594013], "rank_score": 0.746208314671426} -{"id": "rozovskaya-etal-2014-correcting", "title": "Correcting Grammatical Verb Errors", "abstract": "Verb errors are some of the most common mistakes made by non-native writers of English but some of the least studied. The reason is that dealing with verb errors requires a new paradigm; essentially all research done on correcting grammatical errors assumes a closed set of triggers \u2010 e.g., correcting the use of prepositions or articles \u2010 but identifying mistakes in verbs necessitates identifying potentially ambiguous triggers first, and then determining the type of mistake made and correcting it. Moreover, once the verb is identified, modeling verb errors is challenging because verbs fulfill many grammatical functions, resulting in a variety of mistakes. Consequently, the little earlier work done on verb errors assumed that the error type is known in advance. We propose a linguistically-motivated approach to verb error correction that makes use of the notion of verb finiteness to identify triggers and types of mistakes, before using a statistical machine learning approach to correct these mistakes. We show that the linguistically-informed model significantly improves the accuracy of the verb correction approach.", "phrases": ["verb error", "mistake", "linguistically-motivated approach", "finiteness"], "overall_score": 1.5516499985303336, "scores": [1.021938559220076, 0.8323507854474926, 0.5675610389533208, 0.5628932938125499], "rank_score": 0.74618591935836} -{"id": "wu-cotterell-2019-exact", "title": "Exact Hard Monotonic Attention for Character-Level Transduction", "abstract": "Many common character-level, string-to-string transduction tasks, e.g., grapheme-to-phoneme conversion and morphological inflection, consist almost exclusively of monotonic transduction. Neural sequence-to-sequence models with soft attention, non-monotonic models, outperform popular monotonic models. In this work, we ask the following question: Is monotonicity really a helpful inductive bias in these tasks? We develop a hard attention sequence-to-sequence model that enforces strict monotonicity and learns alignment jointly. With the help of dynamic programming, we are able to compute the exact marginalization over all alignments. Our models achieve state-of-the-art performance on morphological inflection. Furthermore, we find strong performance on two other character-level transduction tasks. Code is available at .", "phrases": ["hard monotonic attention", "monotonicity", "transduction", "grapheme-to-phoneme conversion", "sequence-to-sequence model"], "overall_score": 1.551321021944046, "scores": [0.9362639057315277, 0.8246912869302928, 0.8512292945047002, 0.5658784940705283, 0.5520755940720748], "rank_score": 0.7460277150618247} -{"id": "hamidian-diab-2016-rumor", "title": "Rumor Identification and Belief Investigation on Twitter", "abstract": "Social media users spend several hours a day to read, post and search for news on microblogging platforms. Social media is becoming a key means for discovering news. However, verifying the trustworthiness of this information is becoming even more challenging. In this study, we attempt to address the problem of rumor detection and belief investigation on Twitter. Our definition of rumor is an unverifiable statement, which spreads misinformation or disinformation. We adopt a supervised rumors classification task using the standard dataset. By employing the Tweet Latent Vector (TLV) feature, which creates a 100-d vector representative of each tweet, we increased the rumor retrieval task precision up to 0.972. We also introduce the belief score and study the belief change among the rumor posters between 2010 and 2016.", "phrases": ["belief investigation", "twitter", "rumor"], "overall_score": 1.2006754621235283, "scores": [0.8683567660951473, 0.7955121163870644, 0.5741959465753508], "rank_score": 0.7460216096858542} -{"id": "etoori-etal-2018-automatic", "title": "Automatic Spelling Correction for Resource-Scarce Languages using Deep Learning", "abstract": "Spelling correction is a well-known task in Natural Language Processing (NLP). Automatic spelling correction is important for many NLP applications like web search engines, text summarization, sentiment analysis etc. Most approaches use parallel data of noisy and correct word mappings from different sources as training data for automatic spelling correction. Indic languages are resource-scarce and do not have such parallel data due to low volume of queries and non-existence of such prior implementations. In this paper, we show how to build an automatic spelling corrector for resource-scarce languages. We propose a sequence-to-sequence deep learning model which trains end-to-end. We perform experiments on synthetic datasets created for Indic languages, Hindi and Telugu, by incorporating the spelling mistakes committed at character level. A comparative evaluation shows that our model is competitive with the existing spell checking and correction techniques for Indic languages.", "phrases": ["spelling correction", "resource-scarce language", "deep learning model"], "overall_score": 1.5508361698217887, "scores": [0.8603805737679029, 0.8560329769462668, 0.5209701007189597], "rank_score": 0.7457945504777097} -{"id": "nguyen-etal-2015-semantic", "title": "Semantic Representations for Domain Adaptation: A Case Study on the Tree Kernel-based Method for Relation Extraction", "abstract": "We study the application of word embeddings to generate semantic representations for the domain adaptation problem of relation extraction (RE) in the tree kernelbased method. We systematically evaluate various techniques to generate the semantic representations and demonstrate that they are effective to improve the generalization performance of a tree kernel-based relation extractor across domains (up to 7% relative improvement). In addition, we compare the tree kernel-based and the feature-based method for RE in a compatible way, on the same resources and settings, to gain insights into which kind of system is more robust to domain changes. Our results and error analysis shows that the tree kernel-based method outperforms the feature-based approach.", "phrases": ["case study", "relation extraction", "feature-based method"], "overall_score": 1.033817334928842, "scores": [0.9086706034590419, 0.7898331090953762, 0.5387210008759095], "rank_score": 0.7457415711434425} -{"id": "xu-etal-2020-position", "title": "Position-Aware Tagging for Aspect Sentiment Triplet Extraction", "abstract": "Aspect Sentiment Triplet Extraction (ASTE) is the task of extracting the triplets of target entities, their associated sentiment, and opinion spans explaining the reason for the sentiment. Existing research efforts mostly solve this problem using pipeline approaches, which break the triplet extraction process into several stages. Our observation is that the three elements within a triplet are highly related to each other, and this motivates us to build a joint model to extract such triplets using a sequence tagging approach. However, how to effectively design a tagging approach to extract the triplets that can capture the rich interactions among the elements is a challenging research question. In this work, we propose the first end-to-end model with a novel position-aware tagging scheme that is capable of jointly extracting the triplets. Our experimental results on several existing datasets show that jointly capturing elements in the triplet using our approach leads to improved performance over the existing approaches. We also conducted extensive experiments to investigate the model effectiveness and robustness.", "phrases": ["tagging", "opinion term", "aspect span"], "overall_score": 2.067318301188053, "scores": [0.830434745296681, 0.8778216297178745, 0.528626020782695], "rank_score": 0.7456274652657502} -{"id": "platanios-etal-2018-contextual", "title": "Contextual Parameter Generation for Universal Neural Machine Translation", "abstract": "We propose a simple modification to existing neural machine translation (NMT) models that enables using a single universal model to translate between multiple languages while allowing for language specific parameterization, and that can also be used for domain adaptation. Our approach requires no changes to the model architecture of a standard NMT system, but instead introduces a new component, the contextual parameter generator (CPG), that generates the parameters of the system (e.g., weights in a neural network). This parameter generator accepts source and target language embeddings as input, and generates the parameters for the encoder and the decoder, respectively. The rest of the model remains unchanged and is shared across all languages. We show how this simple modification enables the system to use monolingual data for training and also perform zero-shot translation. We further show it is able to surpass state-of-the-art performance for both the IWSLT-15 and IWSLT-17 datasets and that the learned language embeddings are able to uncover interesting relationships between languages.", "phrases": ["universal model", "adapter", "language embedding", "zero-shot translation", "contextual parameter generation"], "overall_score": 2.3047388032539007, "scores": [0.9233588174666032, 1.1995952973348407, 0.546099698688852, 0.5297365623775253, 0.5293027157522533], "rank_score": 0.7456186183240149} -{"id": "le-etal-2012-continuous", "title": "Continuous Space Translation Models with Neural Networks", "abstract": "The use of conventional maximum likelihood estimates hinders the performance of existing phrase-based translation models. For lack of sufficient training data, most models only consider a small amount of context. As a partial remedy, we explore here several continuous space translation models, where translation probabilities are estimated using a continuous representation of translation units in lieu of standard discrete representations. In order to handle a large set of translation units, these representations and the associated estimates are jointly computed using a multi-layer neural network with a SOUL architecture. In small scale and large scale English to French experiments, we show that the resulting models can effectively be trained and used on top of a n-gram translation system, delivering significant improvements in performance.", "phrases": ["translation model", "continuous space", "neural network model", "factorization"], "overall_score": 2.1549878228038013, "scores": [0.9834852013149772, 0.8860793526484716, 0.5708634128654476, 0.5418705100595813], "rank_score": 0.7455746192221195} -{"id": "levitan-etal-2012-acoustic", "title": "Acoustic-Prosodic Entrainment and Social Behavior", "abstract": "In conversation, speakers have been shown to entrain, or become more similar to each other, in various ways. We measure entrainment on eight acoustic features extracted from the speech of subjects playing a cooperative computer game and associate the degree of entrainment with a number of manually-labeled social variables acquired using Amazon Mechanical Turk, as well as objective measures of dialogue success. We find that male-female pairs entrain on all features, while male-male pairs entrain only on particular acoustic features (intensity mean, intensity maximum and syllables per second). We further determine that entrainment is more important to the perception of female-male social behavior than it is for same-gender pairs, and it is more important to the smoothness and flow of male-male dialogue than it is for female-female or mixed-gender pairs. Finally, we find that entrainment is more pronounced when intensity or speaking rate is especially high or low.", "phrases": ["social behavior", "variable", "speech data"], "overall_score": 1.0332729012568844, "scores": [0.8501106048753989, 0.8302022425781936, 0.5557336883387958], "rank_score": 0.7453488452641294} -{"id": "wang-etal-2012-improved", "title": "Improved Domain Adaptation for Statistical Machine Translation", "abstract": "We present a simple and effective infrastructure for domain adaptation for statistical machine translation (MT). To build MT systems for different domains, it trains, tunes and deploys a single translation system that is capable of producing adapted domain translations and preserving the original generic accuracy at the same time. The approach unifies automatic domain detection and domain model parameterization into one system. Experiment results on 20 language pairs demonstrate its viability.", "phrases": ["statistical machine translation", "different domain", "feature weight", "single-domain decoder"], "overall_score": 1.8515002741495514, "scores": [0.9805970732413991, 0.9185956182268894, 0.5589074906198546, 0.5222939092673805], "rank_score": 0.7450985228388809} -{"id": "zhou-etal-2021-automatic", "title": "Automatic ICD Coding via Interactive Shared Representation Networks with Self-distillation Mechanism", "abstract": "The ICD coding task aims at assigning codes of the International Classification of Diseases in clinical notes. Since manual coding is very laborious and prone to errors, many methods have been proposed for the automatic ICD coding task. However, existing works either ignore the long-tail of code frequency or the noisy clinical notes. To address the above issues, we propose an Interactive Shared Representation Network with Self-Distillation Mechanism. Specifically, an interactive shared representation network targets building connections among codes while modeling the co-occurrence, consequently alleviating the long-tail problem. Moreover, to cope with the noisy text issue, we encourage the model to focus on the clinical note's noteworthy part and extract valuable information through a self-distillation learning mechanism. Experimental results on two MIMIC datasets demonstrate the effectiveness of our method.", "phrases": ["icd", "self-distillation mechanism", "multi-label classifier"], "overall_score": 1.4491570250578747, "scores": [0.7887427704335769, 0.8919477539425753, 0.5534676546560112], "rank_score": 0.7447193930107211} -{"id": "zhang-etal-2008-extracting", "title": "Extracting Synchronous Grammar Rules From Word-Level Alignments in Linear Time", "abstract": "We generalize Uno and Yagiura's algorithm for finding all common intervals of two permutations to the setting of two sequences with many-to-many alignment links across the two sides. We show how to maximally decompose a word-aligned sentence pair in linear time, which can be used to generate all possible phrase pairs or a Synchronous Context-Free Grammar (SCFG) with the simplest rules possible. We also use the algorithm to precisely analyze the maximum SCFG rule length needed to cover hand-aligned data from various language pairs.", "phrases": ["linear time", "phrase pair", "synchronous context-free grammar", "scfg"], "overall_score": 1.5482585055940603, "scores": [0.8619775350601563, 1.033636268818342, 0.5551157016879227, 0.527490318479857], "rank_score": 0.7445549560115695} -{"id": "damljanovic-etal-2010-identification", "title": "Identification of the Question Focus: Combining Syntactic Analysis and Ontology-based Lookup through the User Interaction", "abstract": "Most question-answering systems contain a classifier module which determines a question category, based on which each question is assigned an answer type. However, setting up syntactic patterns for this classification is a big challenge. In addition, in the case of ontology-based systems, the answer type should be aligned to the queried knowledge structure. In this paper, we present an approach for determining the answer type semi-automatically. We first identify the question focus using syntactic parsing, and then try to identify the answer type by combining the head of the focus with the ontology-based lookup. When this combination is not enough to make conclusions automatically, the user is engaged into a dialog in order to resolve the answer type. User selections are saved and used for training the system in order to improve its performance over time. Further on, the answer type is used to show the feedback and the concise answer to the user. Our approach is evaluated using 250 questions from the Mooney Geoquery dataset.", "phrases": ["question focus", "ontology-based lookup", "syntactic parsing"], "overall_score": 1.0320872261500753, "scores": [0.8758474683077768, 0.8277285438255289, 0.5299046722640344], "rank_score": 0.7444935614657799} -{"id": "sperber-etal-2017-neural", "title": "Neural Lattice-to-Sequence Models for Uncertain Inputs", "abstract": "The input to a neural sequence-to-sequence model is often determined by an up-stream system, e.g. a word segmenter, part of speech tagger, or speech recognizer. These up-stream models are potentially error-prone. Representing inputs through word lattices allows making this uncertainty explicit by capturing alternative sequences and their posterior probabilities in a compact form. In this work, we extend the TreeLSTM (Tai et al., 2015) into a LatticeLSTM that is able to consume word lattices, and can be used as encoder in an attentional encoder-decoder model. We integrate lattice posterior scores into this architecture by extending the TreeLSTM's child-sum and forget gates and introducing a bias term into the attention mechanism. We experiment with speech translation lattices and report consistent improvements over baselines that translate either the 1-best hypothesis or the lattice without posterior scores.", "phrases": ["sequence-to-sequence model", "lattice", "posterior score"], "overall_score": 1.8496718324015158, "scores": [1.1276224516990536, 0.5801045401632619, 0.5253611193864354], "rank_score": 0.7443627037495837} -{"id": "eisenstein-2013-phonological", "title": "Phonological Factors in Social Media Writing", "abstract": "Does phonological variation get transcribed into social media text? This paper investigates examples of the phonological variable of consonant cluster reduction in Twitter. Not only does this variable appear frequently, but it displays the same sensitivity to linguistic context as in spoken language. This suggests that when social media writing transcribes phonological properties of speech, it is not merely a case of inventing orthographic transcriptions. Rather, social media displays influence from structural properties of the phonological system.", "phrases": ["twitter", "spelling", "social medium"], "overall_score": 1.4484361415138896, "scores": [1.1194802978544567, 0.5762002097393539, 0.5373662888634659], "rank_score": 0.7443489321524255} -{"id": "he-etal-2021-model", "title": "Model Extraction and Adversarial Transferability, Your BERT is Vulnerable!", "abstract": "Natural language processing (NLP) tasks, ranging from text classification to text generation, have been revolutionised by the pretrained language models, such as BERT. This allows corporations to easily build powerful APIs by encapsulating fine-tuned BERT models for downstream tasks. However, when a fine-tuned BERT model is deployed as a service, it may suffer from different attacks launched by the malicious users. In this work, we first present how an adversary can steal a BERT-based API service (the victim/target model) on multiple benchmark datasets with limited prior knowledge and queries. We further show that the extracted model can lead to highly transferable adversarial attacks against the victim model. Our studies indicate that the potential vulnerabilities of BERT-based API services still hold, even when there is an architectural mismatch between the victim model and the attack model. Finally, we investigate two defence strategies to protect the victim model, and find that unless the performance of the victim model is sacrificed, both model extraction and adversarial transferability can effectively compromise the target models.", "phrases": ["adversarial transferability", "bert", "victim", "attack model", "model extraction"], "overall_score": 1.0311276803525395, "scores": [0.9672029004750013, 0.8657416637391961, 0.8053155586542402, 0.5499040060026007, 0.5308428485488337], "rank_score": 0.7438013954839745} -{"id": "vergyri-kirchhoff-2004-automatic", "title": "Automatic Diacritization of Arabic for Acoustic Modeling in Speech Recognition", "abstract": "Automatic recognition of Arabic dialectal speech is a challenging task because Arabic dialects are essentially spoken varieties. Only few dialectal resources are available to date; moreover, most available acoustic data collections are transcribed without diacritics. Such a transcription omits essential pronunciation information about a word, such as short vowels. In this paper we investigate various procedures that enable us to use such training data by automatically inserting the missing diacritics into the transcription. These procedures use acoustic information in combination with different levels of morphological and contextual constraints. We evaluate their performance against manually diacritized transcriptions. In addition, we demonstrate the effect of their accuracy on the recognition performance of acoustic models trained on automatically diacritized training data.", "phrases": ["arabic", "automatic diacritization", "acoustic feature"], "overall_score": 1.783194100161885, "scores": [0.8561220929514769, 0.8339570585012215, 0.5408699484396557], "rank_score": 0.7436496999641179} -{"id": "duh-2008-ranking", "title": "Ranking vs. Regression in Machine Translation Evaluation", "abstract": "Automatic evaluation of machine translation (MT) systems is an important research topic for the advancement of MT technology. Most automatic evaluation methods proposed to date are score-based: they compute scores that represent translation quality, and MT systems are compared on the basis of these scores. \n \nWe advocate an alternative perspective of automatic MT evaluation based on ranking. Instead of producing scores, we directly produce a ranking over the set of MT systems to be compared. This perspective is often simpler when the evaluation goal is system comparison. We argue that it is easier to elicit human judgments of ranking and develop a machine learning approach to train on rank data. We compare this ranking method to a score-based regression method on WMT07 data. Results indicate that ranking achieves higher correlation to human judgments, especially in cases where ranking-specific features are used.", "phrases": ["regression", "machine learning approach", "linguistic information", "pos match"], "overall_score": 1.7122124133128644, "scores": [1.3688825913824043, 0.5440449901644587, 0.5334325423080282, 0.5280574879372166], "rank_score": 0.7436044029480269} -{"id": "mukund-srihari-2010-vector", "title": "A Vector Space Model for Subjectivity Classification in Urdu aided by Co-Training", "abstract": "The goal of this work is to produce a classifier that can distinguish subjective sentences from objective sentences for the Urdu language. The amount of labeled data required for training automatic classifiers can be highly imbalanced especially in the multilingual paradigm as generating annotations is an expensive task. In this work, we propose a cotraining approach for subjectivity analysis in the Urdu language that augments the positive set (subjective set) and generates a negative set (objective set) devoid of all samples close to the positive ones. Using the data set thus generated for training, we conduct experiments based on SVM and VSM algorithms, and show that our modified VSM based approach works remarkably well as a sentence level subjectivity classifier.", "phrases": ["vector space model", "subjectivity classification", "urdu language"], "overall_score": 1.0304573954093181, "scores": [0.8874481040040864, 0.8217069542829354, 0.5207986030195497], "rank_score": 0.7433178871021905} -{"id": "filatova-etal-2006-automatic", "title": "Automatic Creation of Domain Templates", "abstract": "Recently, many Natural Language Processing (NLP) applications have improved the quality of their output by using various machine learning techniques to mine Information Extraction (IE) patterns for capturing information from the input text. Currently, to mine IE patterns one should know in advance the type of the information that should be captured by these patterns. In this work we propose a novel methodology for corpus analysis based on cross-examination of several document collections representing different instances of the same domain. We show that this methodology can be used for automatic domain template creation. As the problem of automatic domain template creation is rather new, there is no well-defined procedure for the evaluation of the domain template quality. Thus, we propose a methodology for identifying what information should be present in the template. Using this information we evaluate the automatically created domain templates through the text snippets retrieved according to the created templates.", "phrases": ["domain template", "event-specific document", "extractor", "subtree"], "overall_score": 1.7823293308485286, "scores": [1.2643996961314983, 0.5729843490005743, 0.5715273828792318, 0.56424482458605], "rank_score": 0.7432890631493386} -{"id": "abercrombie-hovy-2016-putting", "title": "Putting Sarcasm Detection into Context: The Effects of Class Imbalance and Manual Labelling on Supervised Machine Classification of Twitter Conversations", "abstract": "Sarcasm can radically alter or invert a phrase\u2019s meaning. Sarcasm detection can therefore help improve natural language processing (NLP) tasks. The majority of prior research has modeled sarcasm detection as classification, with two important limitations: 1. Balanced datasets, when sarcasm is actually rather rare. 2. Using Twitter users\u2019 self-declarations in the form of hashtags to label data, when sarcasm can take many forms. To address these issues, we create an unbalanced corpus of manually annotated Twitter conversations. We compare human and machine ability to recognize sarcasm on this data under varying amounts of context. Our results indicate that both class imbalance and labelling method affect performance, and should both be considered when designing automatic sarcasm detection systems. We conclude that for progress to be made in real-world sarcasm detection, we will require a new class labelling scheme that is able to access the \u2018common ground\u2019 held between conversational parties.", "phrases": ["sarcasm detection", "class imbalance", "twitter conversation"], "overall_score": 1.4459838567321748, "scores": [0.8749392659041921, 0.8002883128554554, 0.5540385424446037], "rank_score": 0.7430887070680837} -{"id": "sanayai-meetei-etal-2019-wat2019", "title": "WAT2019: English-Hindi Translation on Hindi Visual Genome Dataset", "abstract": "A multimodal translation is a task of translating a source language to a target language with the help of a parallel text corpus paired with images that represent the contextual details of the text. In this paper, we carried out an extensive comparison to evaluate the benefits of using a multimodal approach on translating text in English to a low resource language, Hindi as a part of WAT2019 shared task. We carried out the translation of English to Hindi in three separate tasks with both the evaluation and challenge dataset. First, by using only the parallel text corpora, then through an image caption generation approach and, finally with the multimodal approach. Our experiment shows a significant improvement in the result with the multimodal approach than the other approach.", "phrases": ["wat2019", "literature survey", "english-hindi language pair"], "overall_score": 1.445858820789385, "scores": [0.8566950877944345, 0.8485683518928677, 0.5238099142257399], "rank_score": 0.7430244513043474} -{"id": "wang-etal-2021-codet5", "title": "CodeT5: Identifier-aware Unified Pre-trained Encoder-Decoder Models for Code Understanding and Generation", "abstract": "Pre-trained models for Natural Languages (NL) like BERT and GPT have been recently shown to transfer well to Programming Languages (PL) and largely benefit a broad set of code-related tasks. Despite their success, most current methods either rely on an encoder-only (or decoder-only) pre-training that is suboptimal for generation (resp. understanding) tasks or process the code snippet in the same way as NL, neglecting the special characteristics of PL such as token types. We present CodeT5, a unified pre-trained encoder-decoder Transformer model that better leverages the code semantics conveyed from the developer-assigned identifiers. Our model employs a unified framework to seamlessly support both code understanding and generation tasks and allows for multi-task learning. Besides, we propose a novel identifier-aware pre-training task that enables the model to distinguish which code tokens are identifiers and to recover them when they are masked. Furthermore, we propose to exploit the user-written code comments with a bimodal dual generation task for better NL-PL alignment. Comprehensive experiments show that CodeT5 significantly outperforms prior methods on understanding tasks such as code defect detection and clone detection, and generation tasks across various directions including PL-NL, NL-PL, and PL-PL. Further analysis reveals that our model can better capture semantic information from code. Our code and pre-trained models are released at .", "phrases": ["code understanding", "pre-trained model", "codet5"], "overall_score": 1.3308962188016542, "scores": [0.8699492653899215, 0.8149019123496141, 0.5435107899106956], "rank_score": 0.7427873225500771} -{"id": "farzindar-lapalme-2004-legal", "title": "Legal Text Summarization by Exploration of the Thematic Structure and Argumentative Roles", "abstract": "In this paper we describe our method for the summarization of legal documents helping a legal expert determine the key ideas of a judgment. Our approach is based on the exploration of the document\u2019s architecture and its thematic structures in order to build a table style summary for improving coherency and readability of the text. We present the components of a system, called LetSum, built with this approach, its implementation and some preliminary evaluation results.", "phrases": ["thematic structure", "legal document", "text summarization method"], "overall_score": 1.0295729210270754, "scores": [0.8281612336366396, 0.844174941550913, 0.5557034459114366], "rank_score": 0.742679873699663} -{"id": "matsuyoshi-sato-2008-automatic", "title": "Automatic Paraphrasing of Japanese Functional Expressions Using a Hierarchically Organized Dictionary", "abstract": "Automatic paraphrasing is a transformation of expressions into semantically equivalent expressions within one language. For generating a wider variety of phrasal paraphrases in Japanese, it is necessary to paraphrase functional expressions as well as content expressions. We propose a method of paraphrasing of Japanese functional expressions using a dictionary with two hierarchies: a morphological hierarchy and a semantic hierarchy. Our system generates appropriate alternative expressions for 79% of source phrases in Japanese in an open test. It also accepts style and readability specifications.", "phrases": ["japanese functional expression", "functional expression", "automatic paraphrasing"], "overall_score": 1.0293220450961544, "scores": [0.8830206250331345, 0.8242365999272001, 0.5202394899465275], "rank_score": 0.742498904968954} -{"id": "wei-etal-2021-cognitive", "title": "A Cognitive Regularizer for Language Modeling", "abstract": "The uniform information density (UID) hypothesis, which posits that speakers behaving optimally tend to distribute information uniformly across a linguistic signal, has gained traction in psycholinguistics as an explanation for certain syntactic, morphological, and prosodic choices. In this work, we explore whether the UID hypothesis can be operationalized as an inductive bias for statistical language modeling. Specifically, we augment the canonical MLE objective for training language models with a regularizer that encodes UID. In experiments on ten languages spanning five language families, we find that using UID regularization consistently improves perplexity in language models, having a larger effect when training data is limited. Moreover, via an analysis of generated sequences, we find that UID-regularized language models have other desirable properties, e.g., they generate text that is more lexically diverse. Our results not only suggest that UID is a reasonable inductive bias for language modeling, but also provide an alternative validation of the UID hypothesis using modern-day NLP tools.", "phrases": ["regularizer", "language modeling", "information density"], "overall_score": 0.8156705273528807, "scores": [0.887108682334729, 0.8176604183467727, 0.5225968293184703], "rank_score": 0.7424553099999907} -{"id": "pustejovsky-yocum-2014-image", "title": "Image Annotation with ISO-Space: Distinguishing Content from Structure", "abstract": "Natural language descriptions of visual media present interesting problems for linguistic annotation of spatial information. This paper explores the use of ISO-Space, an annotation specification to capturing spatial information, for encoding spatial relations mentioned in descriptions of images. Especially, we focus on the distinction between references to representational content and structural components of images, and the utility of such a distinction within a compositional semantics. We also discuss how such a structure-content distinction within the linguistic annotation can be leveraged to compute further inferences about spatial configurations depicted by images with verbal captions. We construct a composition table to relate content-based relations to structure-based relations in the image, as expressed in the captions. While still preliminary, our initial results suggest that a weak composition table is both sound and informative for deriving new spatial relations.", "phrases": ["iso-space", "spatial relation", "configuration", "image"], "overall_score": 1.3300001359633469, "scores": [0.9150986685775556, 0.9521422492939671, 0.5692996480081439, 0.5326082709033236], "rank_score": 0.7422872091957475} -{"id": "welivita-pu-2020-taxonomy", "title": "A Taxonomy of Empathetic Response Intents in Human Social Conversations", "abstract": "Open-domain conversational agents or chatbots are becoming increasingly popular in the natural language processing community. One of the challenges is enabling them to converse in an empathetic manner. Current neural response generation methods rely solely on end-to-end learning from large scale conversation data to generate dialogues. This approach can produce socially unacceptable responses due to the lack of large-scale quality data used to train the neural models. However, recent work has shown the promise of combining dialogue act/intent modelling and neural response generation. This hybrid method improves the response quality of chatbots and makes them more controllable and interpretable. A key element in dialog intent modelling is the development of a taxonomy. Inspired by this idea, we have manually labeled 500 response intents using a subset of a sizeable empathetic dialogue dataset (25K dialogues). Our goal is to produce a large-scale taxonomy for empathetic response intents. Furthermore, using lexical and machine learning methods, we automatically analysed both speaker and listener utterances of the entire dataset with identified response intents and 32 emotion categories. Finally, we use information visualization methods to summarize emotional dialogue exchange patterns and their temporal progression. These results reveal novel and important empathy patterns in human-human open-domain conversations and can serve as heuristics for hybrid approaches.", "phrases": ["empathetic response intent", "response generation", "empatheticdialogues dataset"], "overall_score": 1.7089788425062238, "scores": [1.1715229707126056, 0.5284345267242115, 0.5266427455327609], "rank_score": 0.7422000809898593} -{"id": "el-ballouli-etal-2017-cat", "title": "CAT: Credibility Analysis of Arabic Content on Twitter", "abstract": "Data generated on Twitter has become a rich source for various data mining tasks. Those data analysis tasks that are dependent on the tweet semantics, such as sentiment analysis, emotion mining, and rumor detection among others, suffer considerably if the tweet is not credible, not real, or spam. In this paper, we perform an extensive analysis on credibility of Arabic content on Twitter. We also build a classification model (CAT) to automatically predict the credibility of a given Arabic tweet. Of particular originality is the inclusion of features extracted directly or indirectly from the author's profile and timeline. To train and test CAT, we annotated for credibility a data set of 9,000 Arabic tweets that are topic independent. CAT achieved consistent improvements in predicting the credibility of the tweets when compared to several baselines and when compared to the state-of-the-art approach with an improvement of 21% in weighted average F-measure. We also conducted experiments to highlight the importance of the user-based features as opposed to the content-based features. We conclude our work with a feature reduction experiment that highlights the best indicative features of credibility.", "phrases": ["arabic content", "twitter", "arabic tweet"], "overall_score": 1.0287445835182119, "scores": [0.8414840764155569, 0.7879629744018946, 0.5968000126572082], "rank_score": 0.7420823544915532} -{"id": "tackstrom-mcdonald-2011-semi", "title": "Semi-supervised latent variable models for sentence-level sentiment analysis", "abstract": "We derive two variants of a semi-supervised model for fine-grained sentiment analysis. Both models leverage abundant natural supervision in the form of review ratings, as well as a small amount of manually crafted sentence labels, to learn sentence-level sentiment classifiers. The proposed model is a fusion of a fully supervised structured conditional model and its partially supervised counterpart. This allows for highly efficient estimation and inference algorithms with rich feature definitions. We describe the two variants as well as their component models and verify experimentally that both variants give significantly improved results for sentence-level sentiment analysis compared to all baselines.", "phrases": ["sentence-level sentiment analysis", "structured conditional model", "polarity", "whole review"], "overall_score": 1.7080826766103254, "scores": [0.9936967541741678, 0.8970249314553332, 0.5423850444726027, 0.5341367942434998], "rank_score": 0.7418108810864008} -{"id": "chen-etal-2014-aspect", "title": "Aspect Extraction with Automated Prior Knowledge Learning", "abstract": "Aspect extraction is an important task in sentiment analysis. Topic modeling is a popular method for the task. However, unsupervised topic models often generate incoherent aspects. To address the issue, several knowledge-based models have been proposed to incorporate prior knowledge provided by the user to guide modeling. In this paper, we take a major step forward and show that in the big data era, without any user input, it is possible to learn prior knowledge automatically from a large amount of review data available on the Web. Such knowledge can then be used by a topic model to discover more coherent aspects. There are two key challenges: (1) learning quality knowledge from reviews of diverse domains, and (2) making the model fault-tolerant to handle possibly wrong knowledge. A novel approach is proposed to solve these problems. Experimental results using reviews from 36 domains show that the proposed approach achieves significant improvements over state-of-the-art baselines.", "phrases": ["extraction", "sentiment analysis", "topic modeling"], "overall_score": 1.7786802086963094, "scores": [0.8318292434533143, 0.8666401178410642, 0.5268324217404196], "rank_score": 0.7417672610115994} -{"id": "do-etal-2012-joint", "title": "Joint Inference for Event Timeline Construction", "abstract": "This paper addresses the task of constructing a timeline of events mentioned in a given text. To accomplish that, we present a novel representation of the temporal structure of a news article based on time intervals. We then present an algorithmic approach that jointly optimizes the temporal structure by coupling local classifiers that predict associations and temporal relations between pairs of temporal entities with global constraints. Moreover, we present ways to leverage knowledge provided by event coreference to further improve the system performance. Overall, our experiments show that the joint inference model significantly outperformed the local classifiers by 9.2% of relative improvement in F1. The experiments also suggest that good event coreference could make remarkable contribution to a robust event timeline construction system.", "phrases": ["event timeline construction", "dense annotation", "integer linear programming", "ilp"], "overall_score": 2.1436666576606713, "scores": [0.9922736248670427, 0.9048420888309444, 0.5420955104139505, 0.5274198354456018], "rank_score": 0.7416577648893848} -{"id": "laparra-rigau-2013-impar", "title": "ImpAr: A Deterministic Algorithm for Implicit Semantic Role Labelling", "abstract": "This paper presents a novel deterministic algorithm for implicit Semantic Role Labeling. The system exploits a very simple but relevant discursive property, the argument coherence over different instances of a predicate. The algorithm solves the implicit arguments sequentially, exploiting not only explicit but also the implicit arguments previously solved. In addition, we empirically demonstrate that the algorithm obtains very competitive and robust performances with respect to supervised approaches that require large amounts of costly training data.", "phrases": ["predicate", "corresponding role", "discourse coherence"], "overall_score": 1.9022571474815861, "scores": [1.131846641238556, 0.5479767122951142, 0.5450827350967112], "rank_score": 0.7416353628767939} -{"id": "jansen-2018-multi", "title": "Multi-hop Inference for Sentence-level TextGraphs: How Challenging is Meaningfully Combining Information for Science Question Answering?", "abstract": "Question Answering for complex questions is often modelled as a graph construction or traversal task, where a solver must build or traverse a graph of facts that answer and explain a given question. This \u201cmulti-hop\u201d inference has been shown to be extremely challenging, with few models able to aggregate more than two facts before being overwhelmed by \u201csemantic drift\u201d, or the tendency for long chains of facts to quickly drift off topic. This is a major barrier to current inference models, as even elementary science questions require an average of 4 to 6 facts to answer and explain. In this work we empirically characterize the difficulty of building or traversing a graph of sentences connected by lexical overlap, by evaluating chance sentence aggregation quality through 9,784 manually-annotated judgements across knowledge graphs built from three free-text corpora (including study guides and Simple Wikipedia). We demonstrate semantic drift tends to be high and aggregation quality low, at between 0.04 and 3, and highlight scenarios that maximize the likelihood of meaningfully combining information.", "phrases": ["science question", "semantic drift", "difficulty", "knowledge graph"], "overall_score": 2.007809956122288, "scores": [1.2865412617256329, 0.5967220929604876, 0.5463230051367708, 0.5361045351318396], "rank_score": 0.7414227237386827} -{"id": "zhu-etal-2015-ranking", "title": "A Re-ranking Model for Dependency Parser with Recursive Convolutional Neural Network", "abstract": "In this work, we address the problem to model all the nodes (words or phrases) in a dependency tree with the dense representations. We propose a recursive convolutional neural network (RCNN) architecture to capture syntactic and compositional-semantic representations of phrases and words in a dependency tree. Different with the original recursive neural network, we introduce the convolution and pooling layers, which can model a variety of compositions by the feature maps and choose the most informative compositions by the pooling layers. Based on RCNN, we use a discriminative model to re-rank a $k$-best list of candidate dependency parsing trees. The experiments show that RCNN is very effective to improve the state-of-the-art dependency parsing on both English and Chinese datasets.", "phrases": ["re-ranking model", "convolutional neural network", "dependency parsing"], "overall_score": 1.4426292717490037, "scores": [0.8549080578040035, 0.836436624979137, 0.5327496914345403], "rank_score": 0.7413647914058936} -{"id": "peters-martins-2019-ist", "title": "IT\u2013IST at the SIGMORPHON 2019 Shared Task: Sparse Two-headed Models for Inflection", "abstract": "This paper presents the Instituto de Telecomunica\u00e7\u00f5es\u2013Instituto Superior T\u00e9cnico submission to Task 1 of the SIGMORPHON 2019 Shared Task. Our models combine sparse sequence-to-sequence models with a two-headed attention mechanism that learns separate attention distributions for the lemma and inflectional tags. Among submissions to Task 1, our models rank second and third. Despite the low data setting of the task (only 100 in-language training examples), they learn plausible inflection patterns and often concentrate all probability mass into a small set of hypotheses, making beam search exact.", "phrases": ["shared task", "inflection", "two-headed attention mechanism"], "overall_score": 1.193088986596251, "scores": [0.9019967566460473, 0.783644566480435, 0.5382822791052435], "rank_score": 0.7413078674105753} -{"id": "andreevskaia-bergler-2008-specialists", "title": "When Specialists and Generalists Work Together: Overcoming Domain Dependence in Sentiment Tagging", "abstract": "This study presents a novel approach to the problem of system portability across different domains: a sentiment annotation system that integrates a corpus-based classifier trained on a small set of annotated in-domain data and a lexicon-based system trained on WordNet. The paper explores the challenges of system portability across domains and text genres (movie reviews, news, blogs, and product reviews), highlights the factors affecting system performance on out-of-domain and smallset in-domain data, and presents a new system consisting of the ensemble of two classifiers with precision-based vote weighting, that provides significant gains in accuracy and recall over the corpus-based classifier and the lexicon-based system taken individually.", "phrases": ["corpus-based classifier", "opinion", "subjectivity analysis"], "overall_score": 1.95447848161111, "scores": [1.113894951195041, 0.555914523181079, 0.5519821400137599], "rank_score": 0.7405972047966266} -{"id": "hu-etal-2014-minimum", "title": "Minimum Translation Modeling with Recurrent Neural Networks", "abstract": "We introduce recurrent neural networkbased Minimum Translation Unit (MTU) models which make predictions based on an unbounded history of previous bilingual contexts. Traditional back-off n-gram models suffer under the sparse nature of MTUs which makes estimation of highorder sequence models challenging. We tackle the sparsity problem by modeling MTUs both as bags-of-words and as a sequence of individual source and target words. Our best results improve the output of a phrase-based statistical machine translation system trained on WMT 2012 French-English data by up to 1.5 BLEU, and we outperform the traditional n-gram based MTU approach by up to 0.8 BLEU.", "phrases": ["recurrent neural network", "minimum translation unit", "recent research", "phrase pair"], "overall_score": 1.6270155573110807, "scores": [0.9634885148771306, 0.8468163704089723, 0.596607237840189, 0.55503464206705], "rank_score": 0.7404866912983354} -{"id": "hassan-etal-2020-alt-semeval", "title": "ALT at SemEval-2020 Task 12: Arabic and English Offensive Language Identification in Social Media", "abstract": "This paper describes the systems submitted by the Arabic Language Technology group (ALT) at SemEval-2020 Task 12: Multilingual Offensive Language Identification in Social Media. We focus on sub-task A (Offensive Language Identification) for two languages: Arabic and English. Our efforts for both languages achieved more than 90% macro-averaged F1-score on the official test set. For Arabic, the best results were obtained by a system combination of Support Vector Machine, Deep Neural Network, and fine-tuned Bidirectional Encoder Representations from Transformers (BERT). For English, the best results were obtained by fine-tuning BERT.", "phrases": ["semeval-2020 task", "offensive language identification", "social media"], "overall_score": 1.0257230787973153, "scores": [0.8465074799387347, 0.7972455404355575, 0.5759553782850769], "rank_score": 0.7399027995531231} -{"id": "poerner-etal-2018-evaluating", "title": "Evaluating neural network explanation methods using hybrid documents and morphosyntactic agreement", "abstract": "The behavior of deep neural networks (DNNs) is hard to understand. This makes it necessary to explore post hoc explanation methods. We conduct the first comprehensive evaluation of explanation methods for NLP. To this end, we design two novel evaluation paradigms that cover two important classes of NLP problems: small context and large context problems. Both paradigms require no manual annotation and are therefore broadly applicable. We also introduce LIMSSE, an explanation method inspired by LIME that is designed for NLP. We show empirically that LIMSSE, LRP and DeepLIFT are the most effective explanation methods and recommend them for explaining DNNs in NLP.", "phrases": ["explanation method", "hybrid document", "morphosyntactic agreement"], "overall_score": 1.7036242614391808, "scores": [0.8962907294807348, 0.7846702061787855, 0.5386629122790966], "rank_score": 0.739874615979539} -{"id": "weiss-etal-2015-structured", "title": "Structured Training for Neural Network Transition-Based Parsing", "abstract": "We present structured perceptron training for neural network transition-based dependency parsing. We learn the neural network representation using a gold corpus augmented by a large number of automatically parsed sentences. Given this fixed network representation, we learn a final layer using the structured perceptron with beam-search decoding. On the Penn Treebank, our parser reaches 94.26% unlabeled and 92.41% labeled attachment accuracy, which to our knowledge is the best accuracy on Stanford Dependencies to date. We also provide indepth ablative analysis to determine which aspects of our model provide the largest gains in accuracy.", "phrases": ["dependency parser", "neural network architecture", "structured learning"], "overall_score": 2.516225758326325, "scores": [1.0968855689154724, 0.5736821145137844, 0.5488499004129244], "rank_score": 0.7398058612807271} -{"id": "kuhlmann-jonsson-2015-parsing", "title": "Parsing to Noncrossing Dependency Graphs", "abstract": "We study the generalization of maximum spanning tree dependency parsing to maximum acyclic subgraphs. Because the underlying optimization problem is intractable even under an arc-factored model, we consider the restriction to noncrossing dependency graphs. Our main contribution is a cubic-time exact inference algorithm for this class. We extend this algorithm into a practical parser and evaluate its performance on four linguistic data sets used in semantic dependency parsing. We also explore a generalization of our parsing framework to dependency graphs with pagenumber at most k and show that the resulting optimization problem is NP-hard for k 2.", "phrases": ["generalization", "dependency parsing", "acyclic subgraph", "pagenumber"], "overall_score": 1.6254146467846757, "scores": [1.0136892180360662, 0.8869593050753296, 0.5348445992510381, 0.523539219712], "rank_score": 0.7397580855186086} -{"id": "duann-huang-2015-embodiment", "title": "When Embodiment Meets Generative Lexicon: The Human Body Part Metaphors in Sinica Corpus", "abstract": "This research aims to integrate embodiment with generative lexicon. By analyzing the metaphorically used human body part terms in Sinica Corpus, the first balanced modern Chinese corpus, we reveal how these two theories complement each other. Embodiment strengthens generative lexicon by spelling out the cognitive reasons which underlies the production of meaning, and generative lexicon, specifically the qualia structure, complements embodiment by accounting for the reason underlying the selection of a particular body part for metaphorization. Discussing how the four body part terms\u2014\u8840 xie \u201cblood\u201d, \u8089 rou \u201cflesh\u201d, \u9aa8 gu \u201cbone\u201d, \u8108 mai \u201cmeridian\u201d\u2014 behave metaphorically, this research argues that the visibility and the telic role of the qualia structure are the major reasons motivating the choice of a body part to represent a comparatively abstract notion. The finding accounts for what constrains the selection of body parts for metaphorical uses. It also facilitates the prediction of the behavior of the four body part terms in these uses, which can function as the starting point to examine whether the two factors\u2014visibility and telicity\u2014also motivate the metaphorization of the rest human body parts.", "phrases": ["embodiment", "generative lexicon", "telic role"], "overall_score": 0.812666521035252, "scores": [0.8471881824430658, 0.8328476943779483, 0.5391269600169364], "rank_score": 0.7397209456126502} -{"id": "ji-etal-2020-dilated", "title": "Dilated Convolutional Attention Network for Medical Code Assignment from Clinical Text", "abstract": "Medical code assignment, which predicts medical codes from clinical texts, is a fundamental task of intelligent medical information systems. The emergence of deep models in natural language processing has boosted the development of automatic assignment methods. However, recent advanced neural architectures with flat convolutions or multi-channel feature concatenation ignore the sequential causal constraint within a text sequence and may not learn meaningful clinical text representations, especially for lengthy clinical notes with long-term sequential dependency. This paper proposes a Dilated Convolutional Attention Network (DCAN), integrating dilated convolutions, residual connections, and label attention, for medical code assignment. It adopts dilated convolutions to capture complex medical patterns with a receptive field which increases exponentially with dilation size. Experiments on a real-world clinical dataset empirically show that our model improves the state of the art.", "phrases": ["medical code assignment", "clinical text", "dilated convolution"], "overall_score": 1.024755216747831, "scores": [0.8668216530457311, 0.8226627513262985, 0.5281294996187564], "rank_score": 0.7392046346635954} -{"id": "abu-farha-magdy-2020-multitask", "title": "Multitask Learning for Arabic Offensive Language and Hate-Speech Detection", "abstract": "Offensive language and hate-speech are phenomena that spread with the rising popularity of social media. Detecting such content is crucial for understanding and predicting conflicts, understanding polarisation among communities and providing means and tools to filter or block inappropriate content. This paper describes the SMASH team submission to OSACT4's shared task on hate-speech and offensive language detection, where we explore different approaches to perform these tasks. The experiments cover a variety of approaches that include deep learning, transfer learning and multitask learning. We also explore the utilisation of sentiment information to perform the previous task. Our best model is a multitask learning architecture, based on CNN-BiLSTM, that was trained to detect hate-speech and offensive language and predict sentiment.", "phrases": ["offensive language", "language detection", "multitask learning", "sarcasm detection"], "overall_score": 1.7723960128466367, "scores": [0.7848869848896513, 1.086235855117132, 0.564749567393492, 0.5207137837526529], "rank_score": 0.7391465477882321} -{"id": "siddharthan-2011-text", "title": "Text Simplification using Typed Dependencies: A Comparision of the Robustness of Different Generation Strategies", "abstract": "We present a framework for text simplification based on applying transformation rules to a typed dependency representation produced by the Stanford parser. We test two approaches to regeneration from typed dependencies: (a) gen-light, where the transformed dependency graphs are linearised using the word order and morphology of the original sentence, with any changes coded into the transformation rules, and (b) gen-heavy, where the Stanford dependencies are reduced to a DSyntS representation and sentences are generating formally using the RealPro surface realiser. The main contribution of this paper is to compare the robustness of these approaches in the presence of parsing errors, using both a single parse and an n-best parse setting in an overgenerate and rank approach. We find that the gen-light approach is robust to parser error, particularly in the n-best parse setting. On the other hand, parsing errors cause the realiser in the gen-heavy approach to order words and phrases in ways that are disliked by our evaluators.", "phrases": ["robustness", "change", "text simplification", "general purpose generator", "relative clause"], "overall_score": 2.001010672070018, "scores": [0.9446716723705131, 0.7843385930050166, 0.8673072524834834, 0.5560840377704982, 0.5421582262674086], "rank_score": 0.7389119563793839} -{"id": "bordes-etal-2014-question", "title": "Question Answering with Subgraph Embeddings", "abstract": "This paper presents a system which learns to answer questions on a broad range of topics from a knowledge base using few hand-crafted features. Our model learns low-dimensional embeddings of words and knowledge base constituents; these representations are used to score natural language questions against candidate answers. Training our system using pairs of questions and structured representations of their answers, and pairs of question paraphrases, yields competitive results on a recent benchmark of the literature.", "phrases": ["knowledge base constituent", "candidate answer", "other work"], "overall_score": 2.461920746590767, "scores": [1.1302646152434206, 0.5578123594422442, 0.5284023011868615], "rank_score": 0.7388264252908421} -{"id": "li-etal-2020-conditional", "title": "Conditional Augmentation for Aspect Term Extraction via Masked Sequence-to-Sequence Generation", "abstract": "Aspect term extraction aims to extract aspect terms from review texts as opinion targets for sentiment analysis. One of the big challenges with this task is the lack of sufficient annotated data. While data augmentation is potentially an effective technique to address the above issue, it is uncontrollable as it may change aspect words and aspect labels unexpectedly. In this paper, we formulate the data augmentation as a conditional generation task: generating a new sentence while preserving the original opinion targets and labels. We propose a masked sequence-to-sequence method for conditional augmentation of aspect term extraction. Unlike existing augmentation approaches, ours is controllable and allows to generate more diversified sentences. Experimental results confirm that our method alleviates the data scarcity problem significantly. It also effectively boosts the performances of several current models for aspect term extraction.", "phrases": ["aspect term extraction", "generation task", "conditional augmentation"], "overall_score": 1.5362914203124836, "scores": [0.8835665380302807, 0.7993033888841029, 0.5335300865306991], "rank_score": 0.7388000044816941} -{"id": "troiano-etal-2019-crowdsourcing", "title": "Crowdsourcing and Validating Event-focused Emotion Corpora for German and English", "abstract": "Sentiment analysis has a range of corpora available across multiple languages. For emotion analysis, the situation is more limited, which hinders potential research on crosslingual modeling and the development of predictive models for other languages. In this paper, we fill this gap for German by constructing deISEAR, a corpus designed in analogy to the well-established English ISEAR emotion dataset. Motivated by Scherer's appraisal theory, we implement a crowdsourcing experiment which consists of two steps. In step 1, participants create descriptions of emotional events for a given emotion. In step 2, five annotators assess the emotion expressed by the texts. We show that transferring an emotion classification model from the original English ISEAR to the German crowdsourced deISEAR via machine translation does not, on average, cause a performance drop.", "phrases": ["emotion", "crowd-sourcing", "event description"], "overall_score": 1.7714763224096581, "scores": [1.1381507826400423, 0.5566419113997761, 0.5214963272179683], "rank_score": 0.7387630070859289} -{"id": "liu-etal-2012-broad", "title": "A Broad-Coverage Normalization System for Social Media Language", "abstract": "Social media language contains huge amount and wide variety of nonstandard tokens, created both intentionally and unintentionally by the users. It is of crucial importance to normalize the noisy nonstandard tokens before applying other NLP techniques. A major challenge facing this task is the system coverage, i.e., for any user-created nonstandard term, the system should be able to restore the correct word within its top n output candidates. In this paper, we propose a cognitively-driven normalization system that integrates different human perspectives in normalizing the nonstandard tokens, including the enhanced letter transformation, visual priming, and string/phonetic similarity. The system was evaluated on both word- and message-level using four SMS and Twitter data sets. Results show that our system achieves over 90% word-coverage across all data sets (a 10% absolute increase compared to state-of-the-art); the broad word-coverage can also successfully translate into message-level performance gain, yielding 6% absolute increase compared to the best prior approach.", "phrases": ["normalization system", "coverage", "phonetic similarity", "social medium text"], "overall_score": 2.212527367776529, "scores": [0.807169890439983, 1.0420178732458911, 0.5762635039998926, 0.5287878508209001], "rank_score": 0.7385597796266667} -{"id": "correia-martins-2019-simple", "title": "A Simple and Effective Approach to Automatic Post-Editing with Transfer Learning", "abstract": "Automatic post-editing (APE) seeks to automatically refine the output of a black-box machine translation (MT) system through human post-edits. APE systems are usually trained by complementing human post-edited data with large, artificial data generated through back-translations, a time-consuming process often no easier than training a MT system from scratch. in this paper, we propose an alternative where we fine-tune pre-trained BERT models on both the encoder and decoder of an APE system, exploring several parameter sharing strategies. By only training on a dataset of 23K sentences for 3 hours on a single GPU we obtain results that are competitive with systems that were trained on 5M artificial sentences. When we add this artificial data our method obtains state-of-the-art results.", "phrases": ["effective approach", "automatic post-editing", "transfer learning", "scratch", "pre-trained bert model"], "overall_score": 1.7003664271309222, "scores": [0.9749999312090173, 0.8414664763143889, 0.7820868955548737, 0.5593804623526527, 0.534365017151604], "rank_score": 0.7384597565165072} -{"id": "dreyer-eisner-2011-discovering", "title": "Discovering Morphological Paradigms from Plain Text Using a Dirichlet Process Mixture Model", "abstract": "We present an inference algorithm that organizes observed words (tokens) into structured inflectional paradigms (types). It also naturally predicts the spelling of unobserved forms that are missing from these paradigms, and discovers inflectional principles (grammar) that generalize to wholly unobserved words. \n \nOur Bayesian generative model of the data explicitly represents tokens, types, inflections, paradigms, and locally conditioned string edits. It assumes that inflected word tokens are generated from an infinite mixture of inflectional paradigms (string tuples). Each paradigm is sampled all at once from a graphical model, whose potential functions are weighted finite-state transducers with language-specific parameters to be learned. These assumptions naturally lead to an elegant empirical Bayes inference procedure that exploits Monte Carlo EM, belief propagation, and dynamic programming. Given 50--100 seed paradigms, adding a 10-million-word corpus reduces prediction error for morphological inflections by up to 10%.", "phrases": ["morphological paradigms", "mixture", "inflection", "generative model", "finite-state transducer"], "overall_score": 2.345904220565766, "scores": [0.8705618604605072, 0.875973258148227, 0.8522986875009064, 0.5598039802800562, 0.5321496354031671], "rank_score": 0.7381574843585728} -{"id": "fortuna-etal-2019-hierarchically", "title": "A Hierarchically-Labeled Portuguese Hate Speech Dataset", "abstract": "Over the past years, the amount of online offensive speech has been growing steadily. To successfully cope with it, machine learning are applied. However, ML-based techniques require sufficiently large annotated datasets. In the last years, different datasets were published, mainly for English. In this paper, we present a new dataset for Portuguese, which has not been in focus so far. The dataset is composed of 5,668 tweets. For its annotation, we defined two different schemes used by annotators with different levels of expertise. Firstly, non-experts annotated the tweets with binary labels (`hate' vs. `no-hate'). Secondly, expert annotators classified the tweets following a fine-grained hierarchical multiple label scheme with 81 hate speech categories in total. The inter-annotator agreement varied from category to category, which reflects the insight that some types of hate speech are more subtle than others and that their detection depends on personal perception. This hierarchical annotation scheme is the main contribution of the presented work, as it facilitates the identification of different types of hate speech and their intersections. To demonstrate the usefulness of our dataset, we carried a baseline classification experiment with pre-trained word embeddings and LSTM on the binary classified data, with a state-of-the-art outcome.", "phrases": ["portuguese", "hate speech category", "hierarchical annotation scheme"], "overall_score": 1.6214678109208756, "scores": [1.063719789541115, 0.6203056501186187, 0.5298599696596592], "rank_score": 0.7379618031064643} -{"id": "martins-almeida-2014-priberam", "title": "Priberam: A Turbo Semantic Parser with Second Order Features", "abstract": "This paper presents our contribution to the SemEval-2014 shared task on BroadCoverage Semantic Dependency Parsing. We employ a feature-rich linear model, including scores for first and second-order dependencies (arcs, siblings, grandparents and co-parents). Decoding is performed in a global manner by solving a linear relaxation with alternating directions dual decomposition (AD 3 ). Our system achieved the top score in the open challenge, and the second highest score in the closed track.", "phrases": ["dual decomposition", "open challenge", "track", "dependency parser", "hand-crafted feature"], "overall_score": 1.8927676248528675, "scores": [0.9559706775300332, 0.8333998789011832, 0.8321048990534718, 0.5434009751949568, 0.5248019236838303], "rank_score": 0.7379356708726952} -{"id": "pouget-abadie-etal-2014-overcoming", "title": "Overcoming the Curse of Sentence Length for Neural Machine Translation using Automatic Segmentation", "abstract": "The authors of (Cho et al., 2014a) have shown that the recently introduced neural network translation systems suffer from a significant drop in translation quality when translating long sentences, unlike existing phrase-based translation systems. In this paper, we propose a way to address this issue by automatically segmenting an input sentence into phrases that can be easily translated by the neural network translation model. Once each segment has been independently translated by the neural machine translation model, the translated clauses are concatenated to form a final translation. Empirical results show a significant improvement in translation quality for long sentences.", "phrases": ["curse", "sentence length", "segment"], "overall_score": 1.435622029658359, "scores": [0.8785309879584096, 0.782979171106345, 0.5517811848680287], "rank_score": 0.7377637813109278} -{"id": "garcia-duran-etal-2018-learning", "title": "Learning Sequence Encoders for Temporal Knowledge Graph Completion", "abstract": "Research on link prediction in knowledge graphs has mainly focused on static multi-relational data. In this work we consider temporal knowledge graphs where relations between entities may only hold for a time interval or a specific point in time. In line with previous work on static knowledge graphs, we propose to address this problem by learning latent entity and relation type representations. To incorporate temporal information, we utilize recurrent neural networks to learn time-aware representations of relation types which can be used in conjunction with existing latent factorization methods. The proposed approach is shown to be robust to common challenges in real-world KGs: the sparsity and heterogeneity of temporal expressions. Experiments show the benefits of our approach on four temporal KGs. The data sets are available under a permissive BSD-3 license.", "phrases": ["temporal knowledge graph", "link prediction", "recurrent neural network", "time-aware representation", "kge model"], "overall_score": 1.8331973057685709, "scores": [0.8648164331344116, 0.85366078680371, 0.8385456819641232, 0.6083572884798654, 0.5232841421894374], "rank_score": 0.7377328665143095} -{"id": "siddhant-lipton-2018-deep", "title": "Deep Bayesian Active Learning for Natural Language Processing: Results of a Large-Scale Empirical Study", "abstract": "Several recent papers investigate Active Learning (AL) for mitigating the data dependence of deep learning for natural language processing. However, the applicability of AL to real-world problems remains an open question. While in supervised learning, practitioners can try many different methods, evaluating each against a validation set before selecting a model, AL affords no such luxury. Over the course of one AL run, an agent annotates its dataset exhausting its labeling budget. Thus, given a new task, we have no opportunity to compare models and acquisition functions. This paper provides a large-scale empirical study of deep active learning, addressing multiple tasks and, for each, multiple datasets, multiple models, and a full suite of acquisition functions. We find that across all settings, Bayesian active learning by disagreement, using uncertainty estimates provided either by Dropout or Bayes-by-Backprop significantly improves over i.i.d. baselines and usually outperforms classic uncertainty sampling.", "phrases": ["bayesian active learning", "large-scale empirical study", "recent paper", "dropout", "text classification"], "overall_score": 1.997615430167164, "scores": [0.9020074158622476, 0.8055344444194712, 0.8947283983728552, 0.5589810309486789, 0.5270396980492463], "rank_score": 0.7376581975304999} -{"id": "chen-etal-2018-recurrent", "title": "Recurrent Neural Networks as Weighted Language Recognizers", "abstract": "We investigate the computational complexity of various problems for simple recurrent neural networks (RNNs) as formal models for recognizing weighted languages. We focus on the single-layer, ReLU-activation, rational-weight RNNs with softmax, which are commonly used in natural language processing applications. We show that most problems for such RNNs are undecidable, including consistency, equivalence, minimization, and the determination of the highest-weighted string. However, for consistent RNNs the last problem becomes decidable, although the solution length can surpass all computable bounds. If additionally the string is limited to polynomial length, the problem becomes NP-complete. In summary, this shows that approximations and heuristic algorithms are necessary in practical applications of those RNNs.", "phrases": ["weighted language", "rnn", "power"], "overall_score": 1.8326504117757105, "scores": [1.0956762023394355, 0.5939442103261274, 0.5229179278778089], "rank_score": 0.7375127801811239} -{"id": "dakwale-monz-2017-fine", "title": "Fine-Tuning for Neural Machine Translation with Limited Degradation across In- and Out-of-Domain Data", "abstract": "Neural machine translation is a recently proposed approach which has shown competitive results to traditional MT approaches. Similar to other neural network based methods, NMT also suffers from low performance for the domains with less available training data. Domain adaptation deals with improving performance of a model trained on large general domain data over test instances from a new domain. Fine-tuning is a fast and simple domain adaptation method which has demonstrated substantial improvements for various neural network based tasks including NMT. However, it suffers from drastic performance degradation on the general or source domain test sentences, which is undesirable in real-time applications. To address this problem of drastic degradation, in this paper, we propose two simple modi\ufb01cations to the \ufb01ne-tuning approach, namely multi-objective learning and multi-output learning which are based on the \u201cKnowledge distillation\u201d framework. Experiments on English-German translations demonstrate that our approaches achieve results comparable to simple \ufb01ne-tuning on the target domain task with comparatively little loss on the general domain task.", "phrases": ["neural machine translation", "new domain", "fine-tuned model", "continued training"], "overall_score": 1.6978533519160521, "scores": [0.9352549985237244, 0.9615083927283167, 0.5266987683900001, 0.5260112076302842], "rank_score": 0.7373683418180813} -{"id": "yuan-etal-2016-semi", "title": "Semi-supervised Word Sense Disambiguation with Neural Models", "abstract": "Determining the intended sense of words in text \u2013 word sense disambiguation (WSD) \u2013 is a long-standing problem in natural language processing. Recently, researchers have shown promising results using word vectors extracted from a neural network language model as features in WSD algorithms. However, a simple average or concatenation of word vectors for each word in a text loses the sequential and syntactic information of the text. In this paper, we study WSD with a sequence learning neural net, LSTM, to better capture the sequential and syntactic patterns of the text. To alleviate the lack of training data in all-words WSD, we employ the same LSTM in a semi-supervised label propagation classifier. We demonstrate state-of-the-art results, especially on verbs.", "phrases": ["word sense disambiguation", "wsd", "semi-supervised learning", "lstm language model"], "overall_score": 2.0887399280282564, "scores": [1.2114163877977289, 0.6291405703866573, 0.565941705701484, 0.5424355311477844], "rank_score": 0.7372335487584136} -{"id": "wu-wang-2009-revisiting", "title": "Revisiting Pivot Language Approach for Machine Translation", "abstract": "This paper revisits the pivot language approach for machine translation. First, we investigate three different methods for pivot translation. Then we employ a hybrid method combining RBMT and SMT systems to fill up the data gap for pivot translation, where the source-pivot and pivot-target corpora are independent. Experimental results on spoken language translation show that this hybrid method significantly improves the translation quality, which outperforms the method using a source-target corpus of the same size. In addition, we propose a system combination approach to select better translations from those produced by various pivot translation methods. This method regards system combination as a translation evaluation problem and formalizes it with a regression learning model. Experimental results indicate that our method achieves consistent and significant improvement over individual translation outputs.", "phrases": ["pivot language approach", "machine translation", "bridging", "many researcher"], "overall_score": 1.1865296874508233, "scores": [0.9718263974741032, 0.9331977697970512, 0.5233792488394113, 0.5205259667106106], "rank_score": 0.7372323457052941} -{"id": "liu-etal-2019-hierarchical", "title": "Hierarchical Pointer Net Parsing", "abstract": "Transition-based top-down parsing with pointer networks has achieved state-of-the-art results in multiple parsing tasks, while having a linear time complexity. However, the decoder of these parsers has a sequential structure, which does not yield the most appropriate inductive bias for deriving tree structures. In this paper, we propose hierarchical pointer network parsers, and apply them to dependency and sentence-level discourse parsing tasks. Our results on standard benchmark datasets demonstrate the effectiveness of our approach, outperforming existing methods and setting a new state-of-the-art.", "phrases": ["pointer network", "tree structure", "sentence-level discourse parsing"], "overall_score": 1.4337295342875904, "scores": [1.0012068328959753, 0.6105269347541403, 0.5986399255807264], "rank_score": 0.7367912310769474} -{"id": "ferreira-freitas-2020-premise", "title": "Premise Selection in Natural Language Mathematical Texts", "abstract": "The discovery of supporting evidence for addressing complex mathematical problems is a semantically challenging task, which is still unexplored in the field of natural language processing for mathematical text. The natural language premise selection task consists in using conjectures written in both natural language and mathematical formulae to recommend premises that most likely will be useful to prove a particular statement. We propose an approach to solve this task as a link prediction problem, using Deep Convolutional Graph Neural Networks. This paper also analyses how different baselines perform in this task and shows that a graph structure can provide higher F1-score, especially when considering multi-hop premise selection.", "phrases": ["mathematical text", "link prediction problem", "premise selection", "natural language term"], "overall_score": 1.3195636168882139, "scores": [0.8875391411471001, 0.8649958548972924, 0.5979404661610659, 0.5953744457773862], "rank_score": 0.7364624769957111} -{"id": "lehman-etal-2019-inferring", "title": "Inferring Which Medical Treatments Work from Reports of Clinical Trials", "abstract": "How do we know if a particular medical treatment actually works? Ideally one would consult all available evidence from relevant clinical trials. Unfortunately, such results are primarily disseminated in natural language scientific articles, imposing substantial burden on those trying to make sense of them. In this paper, we present a new task and corpus for making this unstructured published scientific evidence actionable. The task entails inferring reported findings from a full-text article describing randomized controlled trials (RCT) with respect to a given intervention, comparator, and outcome of interest, e.g., inferring if a given article provides evidence supporting the use of aspirin to reduce risk of stroke, as compared to placebo. We present a new corpus for this task comprising 10,000+ prompts coupled with full-text articles describing RCTs. Results using a suite of baseline models \u2014 ranging from heuristic (rule-based) approaches to attentive neural architectures \u2014 demonstrate the difficulty of the task, which we believe largely owes to the lengthy, technical input texts. To facilitate further work on this important, challenging problem we make the corpus, documentation, a website and leaderboard, and all source code for baselines and evaluation publicly available.", "phrases": ["clinical trial", "full-text article", "intervention", "outcome", "evidence inference dataset"], "overall_score": 1.8888165482727546, "scores": [1.0635129676949944, 0.8638006702198839, 0.6037142082696048, 0.5839621848728038, 0.5669862675743298], "rank_score": 0.7363952597263234} -{"id": "s-etal-2015-solving", "title": "Solving Data Sparsity by Morphology Injection in Factored SMT", "abstract": "SMT approaches face the problem of data sparsity while translating into a morphologically rich language. It is very unlikely for a parallel corpus to contain all morphological forms of words. We propose a solution to generate these unseen morphological forms and inject them into original training corpora. We observe that morphology injection improves the quality of translation in terms of both adequacy and fluency. We verify this with the experiments on two morphologically rich languages: Hindi and Marathi, while translating from English.", "phrases": ["data sparsity", "morphology injection", "ongoing attempt"], "overall_score": 1.0206119916798173, "scores": [0.8468250873981936, 0.8383459907717278, 0.5234767104324993], "rank_score": 0.7362159295341403} -{"id": "fu-etal-2019-asking", "title": "Asking the Right Question: Inferring Advice-Seeking Intentions from Personal Narratives", "abstract": "People often share personal narratives in order to seek advice from others. To properly infer the narrator's intention, one needs to apply a certain degree of common sense and social intuition. To test the capabilities of NLP systems to recover such intuition, we introduce the new task of inferring what is the advice-seeking goal behind a personal narrative. We formulate this as a cloze test, where the goal is to identify which of two advice-seeking questions was removed from a given narrative. The main challenge in constructing this task is finding pairs of semantically plausible advice-seeking questions for given narratives. To address this challenge, we devise a method that exploits commonalities in experiences people share online to automatically extract pairs of questions that are appropriate candidates for the cloze task. This results in a dataset of over 20,000 personal narratives, each matched with a pair of related advice-seeking questions: one actually intended by the narrator, and the other one not. The dataset covers a very broad array of human experiences, from dating, to career options, to stolen iPads. We use human annotation to determine the degree to which the task relies on common sense and social intuition in addition to a semantic understanding of the narrative. By introducing several baselines for this new task we demonstrate its feasibility and identify avenues for better modeling the intention of the narrator.", "phrases": ["intention", "narrative", "advice"], "overall_score": 1.020356119906528, "scores": [0.802285862365844, 0.8616882736327419, 0.5441199351961932], "rank_score": 0.7360313570649263} -{"id": "gao-etal-2021-abcd", "title": "ABCD: A Graph Framework to Convert Complex Sentences to a Covering Set of Simple Sentences", "abstract": "Atomic clauses are fundamental text units for understanding complex sentences. Identifying the atomic sentences within complex sentences is important for applications such as summarization, argument mining, discourse analysis, discourse parsing, and question answering. Previous work mainly relies on rule-based methods dependent on parsing. We propose a new task to decompose each complex sentence into simple sentences derived from the tensed clauses in the source, and a novel problem formulation as a graph edit task. Our neural model learns to Accept, Break, Copy or Drop elements of a graph that combines word adjacency and grammatical dependencies. The full processing pipeline includes modules for graph construction, graph editing, and sentence generation from the output graph. We introduce DeSSE, a new dataset designed to train and evaluate complex sentence decomposition, and MinWiki, a subset of MinWikiSplit. ABCD achieves comparable performance as two parsing baselines on MinWiki. On DeSSE, which has a more even balance of complex sentence types, our model achieves higher accuracy on the number of atomic sentences than an encoder-decoder baseline. Results include a detailed error analysis.", "phrases": ["simple sentence", "subset", "abcd"], "overall_score": 0.808583029614267, "scores": [0.8920745756681743, 0.7856182566987471, 0.530319142252105], "rank_score": 0.7360039915396754} -{"id": "preotiuc-pietro-etal-2019-automatically", "title": "Automatically Identifying Complaints in Social Media", "abstract": "Complaining is a basic speech act regularly used in human and computer mediated communication to express a negative mismatch between reality and expectations in a particular situation. Automatically identifying complaints in social media is of utmost importance for organizations or brands to improve the customer experience or in developing dialogue systems for handling and responding to complaints. In this paper, we introduce the first systematic analysis of complaints in computational linguistics. We collect a new annotated data set of written complaints expressed on Twitter. We present an extensive linguistic analysis of complaining as a speech act in social media and train strong feature-based and neural models of complaints across nine domains achieving a predictive performance of up to 79 F1 using distant supervision.", "phrases": ["complaint", "computational linguistic", "social medium"], "overall_score": 1.3182947564171488, "scores": [1.10410243669021, 0.5611562547198028, 0.5420042460395837], "rank_score": 0.7357543124831988} -{"id": "deneefe-etal-2007-syntax", "title": "What Can Syntax-Based MT Learn from Phrase-Based MT?", "abstract": "We compare and contrast the strengths and weaknesses of a syntax-based machine translation model with a phrase-based machine translation model on several levels. We briefly describe each model, highlighting points where they differ. We include a quantitative comparison of the phrase pairs that each model has to work with, as well as the reasons why some phrase pairs are not learned by the syntax-based model. We then evaluate proposed improvements to the syntax-based extraction techniques in light of phrase pairs captured. We also compare the translation accuracy for all variations.", "phrases": ["translation rule", "string-to-tree model", "coverage"], "overall_score": 2.1252216537448185, "scores": [1.1146617537248045, 0.569837070815642, 0.5213298699231182], "rank_score": 0.7352762314878549} -{"id": "meng-etal-2021-gemnet", "title": "GEMNET: Effective Gated Gazetteer Representations for Recognizing Complex Entities in Low-context Input", "abstract": "Named Entity Recognition (NER) remains difficult in real-world settings; current challenges include short texts (low context), emerging entities, and complex entities (e.g. movie names). Gazetteer features can help, but results have been mixed due to challenges with adding extra features, and a lack of realistic evaluation data. It has been shown that including gazetteer features can cause models to overuse or underuse them, leading to poor generalization. We propose GEMNET, a novel approach for gazetteer knowledge integration, including (1) a flexible Contextual Gazetteer Representation (CGR) encoder that can be fused with any word-level model; and (2) a Mixture-of- Experts gating network that overcomes the feature overuse issue by learning to conditionally combine the context and gazetteer features, instead of assigning them fixed weights. To comprehensively evaluate our approaches, we create 3 large NER datasets (24M tokens) reflecting current challenges. In an uncased setting, our methods show large gains (up to +49% F1) in recognizing difficult entities compared to existing baselines. On standard benchmarks, we achieve a new uncased SOTA on CoNLL03 and WNUT17.", "phrases": ["complex entity", "gazetteer feature", "word-level model", "gemnet", "complex ner task"], "overall_score": 2.3365698960516523, "scores": [0.8082028428564244, 0.8629462559271586, 0.8523219337512067, 0.5856888126380995, 0.5669419781178693], "rank_score": 0.7352203646581517} -{"id": "lin-etal-2021-rockner", "title": "RockNER: A Simple Method to Create Adversarial Examples for Evaluating the Robustness of Named Entity Recognition Models", "abstract": "To audit the robustness of named entity recognition (NER) models, we propose RockNER, a simple yet effective method to create natural adversarial examples. Specifically, at the entity level, we replace target entities with other entities of the same semantic class in Wikidata; at the context level, we use pre-trained language models (e.g., BERT) to generate word substitutions. Together, the two levels of at- tack produce natural adversarial examples that result in a shifted distribution from the training data on which our target models have been trained. We apply the proposed method to the OntoNotes dataset and create a new benchmark named OntoRock for evaluating the robustness of existing NER models via a systematic evaluation protocol. Our experiments and analysis reveal that even the best model has a significant performance drop, and these models seem to memorize in-domain entity patterns instead of reasoning from the context. Our work also studies the effects of a few simple data augmentation methods to improve the robustness of NER models.", "phrases": ["robustness", "wikidata", "ner model"], "overall_score": 1.3169192210212606, "scores": [0.7992529435237127, 0.8761278620833224, 0.5295790290776337], "rank_score": 0.7349866115615562} -{"id": "paul-frank-2019-ranking", "title": "Ranking and Selecting Multi-Hop Knowledge Paths to Better Predict Human Needs", "abstract": "To make machines better understand sentiments, research needs to move from polarity identification to understanding the reasons that underlie the expression of sentiment. Categorizing the goals or needs of humans is one way to explain the expression of sentiment in text. Humans are good at understanding situations described in natural language and can easily connect them to the character's psychological needs using commonsense knowledge. We present a novel method to extract, rank, filter and select multi-hop relation paths from a commonsense knowledge resource to interpret the expression of sentiment in terms of their underlying human needs. We efficiently integrate the acquired knowledge paths in a neural model that interfaces context representations with knowledge using a gated attention mechanism. We assess the model's performance on a recently published dataset for categorizing human needs. Selectively integrating knowledge paths boosts performance and establishes a new state-of-the-art. Our model offers interpretability through the learned attention map over commonsense knowledge paths. Human evaluation highlights the relevance of the encoded knowledge.", "phrases": ["human need", "commonsense knowledge", "relevance"], "overall_score": 1.7621066361681605, "scores": [0.8571022140840736, 0.8264768836321911, 0.5209875355609619], "rank_score": 0.7348555444257423} -{"id": "vajjala-lucic-2018-onestopenglish", "title": "OneStopEnglish corpus: A new corpus for automatic readability assessment and text simplification", "abstract": "This paper describes the collection and compilation of the OneStopEnglish corpus of texts written at three reading levels, and demonstrates its usefulness for through two applications - automatic readability assessment and automatic text simplification. The corpus consists of 189 texts, each in three versions (567 in total). The corpus is now freely available under a CC by-SA 4.0 license and we hope that it would foster further research on the topics of readability assessment and text simplification.", "phrases": ["automatic readability assessment", "text simplification", "educational nlp", "agreement"], "overall_score": 1.5278163605987312, "scores": [0.9268788996724041, 0.8997026082476645, 0.5778057438508382, 0.5345101973288449], "rank_score": 0.734724362274938} -{"id": "lai-etal-2020-event", "title": "Event Detection: Gate Diversity and Syntactic Importance Scores for Graph Convolution Neural Networks", "abstract": "Recent studies on event detection (ED) have shown that the syntactic dependency graph can be employed in graph convolution neural networks (GCN) to achieve state-of-the-art performance. However, the computation of the hidden vectors in such graph-based models is agnostic to the trigger candidate words, potentially leaving irrelevant information for the trigger candidate for event prediction. In addition, the current models for ED fail to exploit the overall contextual importance scores of the words, which can be obtained via the dependency tree, to boost the performance. In this study, we propose a novel gating mechanism to filter noisy information in the hidden vectors of the GCN models for ED based on the information from the trigger candidate. We also introduce novel mechanisms to achieve the contextual diversity for the gates and the importance score consistency for the graphs and models in ED. The experiments show that the proposed model achieves state-of-the-art performance on two ED datasets.", "phrases": ["graph-based model", "dependency tree", "event detection"], "overall_score": 1.5274226412854404, "scores": [0.8097403728211419, 0.8646581621851843, 0.529206534917701], "rank_score": 0.734535023308009} -{"id": "spithourakis-etal-2016-numerically", "title": "Numerically Grounded Language Models for Semantic Error Correction", "abstract": "Semantic error detection and correction is an important task for applications such as fact checking, speech-to-text or grammatical error correction. Current approaches generally focus on relatively shallow semantics and do not account for numeric quantities. Our approach uses language models grounded in numbers within the text. Such groundings are easily achieved for recurrent neural language model architectures, which can be further conditioned on incomplete background knowledge bases. Our evaluation on clinical reports shows that numerical grounding improves perplexity by 33% and F1 for semantic error correction by 5 points when compared to ungrounded approaches. Conditioning on a knowledge base yields further improvements.", "phrases": ["language model", "semantic error correction", "knowledge base", "report"], "overall_score": 1.4292924128217108, "scores": [0.9604706758616535, 0.8551871159537051, 0.5915037042789468, 0.530882510748649], "rank_score": 0.7345110017107386} -{"id": "rozovskaya-roth-2011-algorithm", "title": "Algorithm Selection and Model Adaptation for ESL Correction Tasks", "abstract": "We consider the problem of correcting errors made by English as a Second Language (ESL) writers and address two issues that are essential to making progress in ESL error correction - algorithm selection and model adaptation to the first language of the ESL learner. \n \nA variety of learning algorithms have been applied to correct ESL mistakes, but often comparisons were made between incomparable data sets. We conduct an extensive, fair comparison of four popular learning methods for the task, reversing conclusions from earlier evaluations. Our results hold for different training sets, genres, and feature sets. \n \nA second key issue in ESL error correction is the adaptation of a model to the first language of the writer. Errors made by non-native speakers exhibit certain regularities and, as we show, models perform much better when they use knowledge about error patterns of the non-native writers. We propose a novel way to adapt a learned algorithm to the first language of the writer that is both cheaper to implement and performs better than other adaptation methods.", "phrases": ["model adaptation", "correction", "determiner", "na\u0131\u0308ve bayes classifier"], "overall_score": 2.080674595622044, "scores": [0.9086046030796616, 0.9462154377033342, 0.547470647414345, 0.5352566729812033], "rank_score": 0.734386840294636} -{"id": "kuznetsova-etal-2012-collective", "title": "Collective Generation of Natural Image Descriptions", "abstract": "We present a holistic data-driven approach to image description generation, exploiting the vast amount of (noisy) parallel image data and associated natural language descriptions available on the web. More specifically, given a query image, we retrieve existing human-composed phrases used to describe visually similar images, then selectively combine those phrases to generate a novel description for the query image. We cast the generation process as constraint optimization problems, collectively incorporating multiple interconnected aspects of language composition for content planning, surface realization and discourse structure. Evaluation by human annotators indicates that our final system generates more semantically correct and linguistically appealing descriptions than two nontrivial baselines.", "phrases": ["image", "data-driven approach", "caption", "ilp"], "overall_score": 1.6909659083803035, "scores": [1.2414095846381046, 0.5801258999104542, 0.5641113778095879, 0.5518617900261952], "rank_score": 0.7343771630960855} -{"id": "ke-etal-2018-generating", "title": "Generating Informative Responses with Controlled Sentence Function", "abstract": "Sentence function is a significant factor to achieve the purpose of the speaker, which, however, has not been touched in large-scale conversation generation so far. In this paper, we present a model to generate informative responses with controlled sentence function. Our model utilizes a continuous latent variable to capture various word patterns that realize the expected sentence function, and introduces a type controller to deal with the compatibility of controlling sentence function and generating informative content. Conditioned on the latent variable, the type controller determines the type (i.e., function-related, topic, and ordinary word) of a word to be generated at each decoding position. Experiments show that our model outperforms state-of-the-art baselines, and it has the ability to generate responses with both controlled sentence function and informative content.", "phrases": ["sentence function", "compatibility", "informative content", "conditional variational autoencoder"], "overall_score": 1.61341301440305, "scores": [1.2557015278232397, 0.5624621326487237, 0.5597469245530301, 0.5592730438948195], "rank_score": 0.7342959072299533} -{"id": "ruokolainen-etal-2014-painless", "title": "Painless Semi-Supervised Morphological Segmentation using Conditional Random Fields", "abstract": "We discuss data-driven morphological segmentation, in which word forms are segmented into morphs, that is the surface forms of morphemes. We extend a recent segmentation approach based on conditional random fields from purely supervised to semi-supervised learning by exploiting available unsupervised segmentation techniques. We integrate the unsupervised techniques into the conditional random field model via feature set augmentation. Experiments on three diverse languages show that this straightforward semi-supervised extension greatly improves the segmentation accuracy of the purely supervised CRFs in a computationally efficient manner.", "phrases": ["segmentation", "random field", "augmentation", "crf-based model"], "overall_score": 1.8832039373513483, "scores": [0.9738062484787001, 0.8247751271454231, 0.5886710795458832, 0.5495758012991285], "rank_score": 0.7342070641172838} -{"id": "lawrence-riezler-2018-improving", "title": "Improving a Neural Semantic Parser by Counterfactual Learning from Human Bandit Feedback", "abstract": "Counterfactual learning from human bandit feedback describes a scenario where user feedback on the quality of outputs of a historic system is logged and used to improve a target system. We show how to apply this learning framework to neural semantic parsing. From a machine learning perspective, the key challenge lies in a proper reweighting of the estimator so as to avoid known degeneracies in counterfactual learning, while still being applicable to stochastic gradient optimization. To conduct experiments with human users, we devise an easy-to-use interface to collect human feedback on semantic parses. Our work is the first to show that semantic parsers can be improved significantly by counterfactual learning from logged human feedback data.", "phrases": ["counterfactual learning", "feedback", "parse"], "overall_score": 1.3154966373476509, "scores": [0.8167432299760424, 0.8313081747509281, 0.5545265527614985], "rank_score": 0.7341926524961564} -{"id": "tran-etal-2018-parsing", "title": "Parsing Speech: a Neural Approach to Integrating Lexical and Acoustic-Prosodic Information", "abstract": "In conversational speech, the acoustic signal provides cues that help listeners disambiguate difficult parses. For automatically parsing spoken utterances, we introduce a model that integrates transcribed text and acoustic-prosodic features using a convolutional neural network over energy and pitch trajectories coupled with an attention-based recurrent neural network that accepts text and prosodic features. We find that different types of acoustic-prosodic features are individually helpful, and together give statistically significant improvements in parse and disfluency detection F1 scores over a strong text-only baseline. For this study with known sentence boundaries, error analyses show that the main benefit of acoustic-prosodic features is in sentences with disfluencies, attachment decisions are most improved, and transcription errors obscure gains from prosody.", "phrases": ["energy", "disfluency", "prosodic cue"], "overall_score": 1.526139937082769, "scores": [1.0925090104250128, 0.5843819431541831, 0.52486356535271], "rank_score": 0.7339181729773019} -{"id": "singh-etal-2019-bert", "title": "BERT is Not an Interlingua and the Bias of Tokenization", "abstract": "Multilingual transfer learning can benefit both high- and low-resource languages, but the source of these improvements is not well understood. Cananical Correlation Analysis (CCA) of the internal representations of a pre- trained, multilingual BERT model reveals that the model partitions representations for each language rather than using a common, shared, interlingual space. This effect is magnified at deeper layers, suggesting that the model does not progressively abstract semantic con- tent while disregarding languages. Hierarchical clustering based on the CCA similarity scores between languages reveals a tree structure that mirrors the phylogenetic trees hand- designed by linguists. The subword tokenization employed by BERT provides a stronger bias towards such structure than character- and word-level tokenizations. We release a subset of the XNLI dataset translated into an additional 14 languages at to assist further research into multilingual representations.", "phrases": ["tokenization", "multilinguality", "bert"], "overall_score": 1.6124311155155713, "scores": [0.8461185424917952, 0.7943420195203239, 0.5610865153517941], "rank_score": 0.7338490257879711} -{"id": "hermann-blunsom-2014-multilingual", "title": "Multilingual Models for Compositional Distributed Semantics", "abstract": "We present a novel technique for learning semantic representations, which extends the distributional hypothesis to multilingual data and joint-space embeddings. Our models leverage parallel data and learn to strongly align the embeddings of semantically equivalent sentences, while maintaining sufficient distance between those of dissimilar sentences. The models do not rely on word alignments or any syntactic information and are successfully applied to a number of diverse languages. We extend our approach to learn semantic representations at the document level, too. We evaluate these models on two cross-lingual document classification tasks, outperforming the prior state of the art. Through qualitative analysis and the study of pivoting effects we demonstrate that our representations are semantically plausible and can capture semantic relationships across languages without parallel data.", "phrases": ["parallel data", "document classification task", "word embedding", "different language", "objective"], "overall_score": 2.565841855236833, "scores": [1.4797203035455735, 0.5781363861270074, 0.5471397985642186, 0.5355571141448046, 0.5285959849292933], "rank_score": 0.7338299174621794} -{"id": "coope-etal-2020-span", "title": "Span-ConveRT: Few-shot Span Extraction for Dialog with Pretrained Conversational Representations", "abstract": "We introduce Span-ConveRT, a light-weight model for dialog slot-filling which frames the task as a turn-based span extraction task. This formulation allows for a simple integration of conversational knowledge coded in large pretrained conversational models such as ConveRT (Henderson et al., 2019). We show that leveraging such knowledge in Span-ConveRT is especially useful for few-shot learning scenarios: we report consistent gains over 1) a span extractor that trains representations from scratch in the target domain, and 2) a BERT-based span extractor. In order to inspire more work on span extraction for the slot-filling task, we also release RESTAURANTS-8K, a new challenging data set of 8,198 utterances, compiled from actual conversations in the restaurant booking domain.", "phrases": ["span extraction", "dialog", "few-shot setting"], "overall_score": 1.5259439764242604, "scores": [0.8357983141554627, 0.8389736094168332, 0.52669988388937], "rank_score": 0.7338239358205553} -{"id": "yang-mitchell-2017-joint", "title": "A Joint Sequential and Relational Model for Frame-Semantic Parsing", "abstract": "We introduce a new method for frame-semantic parsing that significantly improves the prior state of the art. Our model leverages the advantages of a deep bidirectional LSTM network which predicts semantic role labels word by word and a relational network which predicts semantic roles for individual text expressions in relation to a predicate. The two networks are integrated into a single model via knowledge distillation, and a unified graphical model is employed to jointly decode frames and semantic roles during inference. Experiments on the standard FrameNet data show that our model significantly outperforms existing neural and non-neural approaches, achieving a 5.7 F1 gain over the current state of the art, for full frame structure extraction.", "phrases": ["frame-semantic parsing", "relational network", "predicate", "frame", "full-text annotation"], "overall_score": 1.881952755500117, "scores": [0.9905972772029714, 1.0848474302348454, 0.5334155619287323, 0.5307365869051617, 0.5289994651828316], "rank_score": 0.7337192642909084} -{"id": "ishiwatari-etal-2019-learning", "title": "Learning to Describe Unknown Phrases with Local and Global Contexts", "abstract": "When reading a text, it is common to become stuck on unfamiliar words and phrases, such as polysemous words with novel senses, rarely used idioms, internet slang, or emerging entities. If we humans cannot figure out the meaning of those expressions from the immediate local context, we consult dictionaries for definitions or search documents or the web to find other global context to help in interpretation. Can machines help us do this work? Which type of context is more important for machines to solve the problem? To answer these questions, we undertake a task of describing a given phrase in natural language based on its local and global contexts. To solve this task, we propose a neural description model that consists of two context encoders and a description decoder. In contrast to the existing methods for non-standard English explanation [Ni+ 2017] and definition generation [Noraset+ 2017; Gadetsky+ 2018], our model appropriately takes important clues from both local and global contexts. Experimental results on three existing datasets (including WordNet, Oxford and Urban Dictionaries) and a dataset newly created from Wikipedia demonstrate the effectiveness of our method over previous work.", "phrases": ["local context", "definition", "wikipedia"], "overall_score": 1.6120022808329615, "scores": [1.1066515593241768, 0.5675699216334877, 0.5267400831814745], "rank_score": 0.7336538547130463} -{"id": "venugopal-etal-2008-wider", "title": "Wider Pipelines: N-Best Alignments and Parses in MT Training", "abstract": "State-of-the-art statistical machine translation systems use hypotheses from several maximum a posteriori inference steps, including word alignments and parse trees, to identify translational structure and estimate the parameters of translation models. While this approach leads to a modular pipeline of independently developed components, errors made in these \u201csingle-best\u201d hypotheses can propagate to downstream estimation steps that treat these inputs as clean, trustworthy training data. In this work we integrate N-best alignments and parses by using a probability distribution over these alternatives to generate posterior fractional counts for use in downstream estimation. Using these fractional counts in a DOP-inspired syntax-based translation system, we show significant improvements in translation quality over a single-best trained baseline.", "phrases": ["n-best alignment", "translation model", "posterior fractional count", "hypothesis"], "overall_score": 1.6892217798719904, "scores": [1.2533732740644947, 0.5821775511402784, 0.5565167688964004, 0.542411196735606], "rank_score": 0.7336196977091949} -{"id": "yangarber-2003-counter", "title": "Counter-Training in Discovery of Semantic Patterns", "abstract": "This paper presents a method for unsupervised discovery of semantic patterns. Semantic patterns are useful for a variety of text understanding tasks, in particular for locating events in text for information extraction. The method builds upon previously described approaches to iterative unsupervised pattern acquisition. One common characteristic of prior approaches is that the output of the algorithm is a continuous stream of patterns, with gradually degrading precision.Our method differs from the previous pattern acquisition algorithms in that it introduces competition among several scenarios simultaneously. This provides natural stopping criteria for the unsupervised learners, while maintaining good precision levels at termination. We discuss the results of experiments with several scenarios, and examine different aspects of the new procedure.", "phrases": ["discovery", "information extraction", "negative example"], "overall_score": 2.0784522462763273, "scores": [0.8488983669213546, 0.8273847315594094, 0.5245242469703312], "rank_score": 0.7336024484836984} -{"id": "kerrigan-etal-2020-differentially", "title": "Differentially Private Language Models Benefit from Public Pre-training", "abstract": "Language modeling is a keystone task in natural language processing. When training a language model on sensitive information, differential privacy (DP) allows us to quantify the degree to which our private data is protected. However, training algorithms which enforce differential privacy often lead to degradation in model quality. We study the feasibility of learning a language model which is simultaneously high-quality and privacy preserving by tuning a public base model on a private corpus. We find that DP fine-tuning boosts the performance of language models in the private domain, making the training of such models possible.", "phrases": ["language model", "private data", "recent system"], "overall_score": 1.5251045579877982, "scores": [1.1260495149580425, 0.5529676630495888, 0.5212436046385198], "rank_score": 0.7334202608820504} -{"id": "morio-fujita-2018-end", "title": "End-to-End Argument Mining for Discussion Threads Based on Parallel Constrained Pointer Architecture", "abstract": "Argument Mining (AM) is a relatively recent discipline, which concentrates on extracting claims or premises from discourses, and inferring their structures. However, many existing works do not consider micro-level AM studies on discussion threads sufficiently. In this paper, we tackle AM for discussion threads. Our main contributions are follows: (1) A novel combination scheme focusing on micro-level inner- and inter- post schemes for a discussion thread. (2) Annotation of large-scale civic discussion threads with the scheme. (3) Parallel constrained pointer architecture (PCPA), a novel end-to-end technique to discriminate sentence types, inner-post relations, and inter-post interactions simultaneously. The experimental results demonstrate that our proposed model shows better accuracy in terms of relations extraction, in comparison to existing state-of-the-art models.", "phrases": ["argument mining", "discussion thread", "constrained pointer architecture"], "overall_score": 1.4268669225599988, "scores": [0.794081550417474, 0.8491362807065409, 0.5565758077334173], "rank_score": 0.7332645462858108} -{"id": "levinboim-etal-2015-model", "title": "Model Invertibility Regularization: Sequence Alignment With or Without Parallel Data", "abstract": "We present Model Invertibility Regularization (MIR), a method that jointly trains two directional sequence alignment models, one in each direction, and takes into account the invertibility of the alignment task. By coupling the two models through their parameters (as opposed to through their inferences, as in Liang et al.\u2019s Alignment by Agreement (ABA), and Ganchev et al.\u2019s Posterior Regularization (PostCAT)), our method seamlessly extends to all IBMstyle word alignment models as well as to alignment without parallel data. Our proposed algorithm is mathematically sound and inherits convergence guarantees from EM. We evaluate MIR on two tasks: (1) On word alignment, applying MIR on fertility based models we attain higher F-scores than ABA and PostCAT. (2) On Japanese-to-English backtransliteration without parallel data, applied to the decipherment model of Ravi and Knight, MIR learns sparser models that close the gap in whole-name error rate by 33% relative to a model trained on parallel data, and further, beats a previous approach by Mylonakis et al.", "phrases": ["parallel data", "sequence alignment model", "agreement", "model invertibility regularization"], "overall_score": 0.8054953714846091, "scores": [0.9200555759438553, 0.8697244922669053, 0.6198483780065807, 0.5231454897492496], "rank_score": 0.7331934839916476} -{"id": "chen-etal-2018-accurate", "title": "Accurate SHRG-Based Semantic Parsing", "abstract": "We demonstrate that an SHRG-based parser can produce semantic graphs much more accurately than previously shown, by relating synchronous production rules to the syntacto-semantic composition process. Our parser achieves an accuracy of 90.35 for EDS (89.51 for DMRS) in terms of elementary dependency match, which is a 4.87 (5.45) point improvement over the best existing data-driven model, indicating, in our view, the importance of linguistically-informed derivation for data-driven semantic parsing. This accuracy is equivalent to that of English Resource Grammar guided models, suggesting that (recurrent) neural network models are able to effectively learn deep linguistic knowledge from annotations.", "phrases": ["semantic parsing", "composition process", "eds", "shrg"], "overall_score": 1.6107460372298277, "scores": [0.9605280621070114, 0.9045228621542862, 0.5374047529343285, 0.5298727772450162], "rank_score": 0.7330821136101606} -{"id": "abend-etal-2015-lexical", "title": "Lexical Event Ordering with an Edge-Factored Model", "abstract": "Extensive lexical knowledge is necessary for temporal analysis and planning tasks. We address in this paper a lexical setting that allows for the straightforward incorporation of rich features and structural constraints. We explore a lexical event ordering task, namely determining the likely temporal order of events based solely on the identity of their predicates and arguments. We propose an \u201cedgefactored\u201d model for the task that decomposes over the edges of the event graph. We learn it using the structured perceptron. As lexical tasks require large amounts of text, we do not attempt manual annotation and instead use the textual order of events in a domain where this order is aligned with their temporal order, namely cooking recipes.", "phrases": ["edge-factored model", "temporal order", "identity", "lexical event"], "overall_score": 0.8049124356851709, "scores": [0.937319106163178, 0.8917940275766678, 0.5710652769365913, 0.5304730811651384], "rank_score": 0.7326628729603939} -{"id": "scansani-dugast-2021-glossary", "title": "Glossary functionality in commercial machine translation: does it help? A first step to identify best practices for a language service provider", "abstract": "Recently, a number of commercial Machine Translation (MT) providers have started to offer glossary features allowing users to enforce terminology into the output of a generic model. However, to the best of our knowledge it is not clear how such features would impact terminology accuracy and the overall quality of the output. The present contribution aims at providing a first insight into the performance of the glossary-enhanced generic models offered by four providers. Our tests involve two different domains and language pairs, i.e. Sportswear En\u2013Fr and Industrial Equipment De\u2013En. The output of each generic model and of the glossaryenhanced one will be evaluated relying on Translation Error Rate (TER) to take into account the overall output quality and on accuracy to assess the compliance with the glossary. This is followed by a manual evaluation. The present contribution mainly focuses on understanding how these glossary features can be fruitfully exploited by language service providers (LSPs), especially in a scenario in which a customer glossary is already available and is added to the generic model as is.", "phrases": ["commercial machine translation", "language service provider", "output quality", "glossary"], "overall_score": 0.8048629333750819, "scores": [0.9189040087610911, 0.9091938346356686, 0.5741909061707909, 0.528182506496218], "rank_score": 0.7326178140159422} -{"id": "haouari-etal-2021-arcov", "title": "ArCOV-19: The First Arabic COVID-19 Twitter Dataset with Propagation Networks", "abstract": "In this paper, we present ArCOV-19, an Arabic COVID-19 Twitter dataset that spans one year, covering the period from 27th of January 2020 till 31st of January 2021. ArCOV-19 is the first publicly-available Arabic Twitter dataset covering COVID-19 pandemic that includes about 2.7M tweets alongside the propagation networks of the most-popular subset of them (i.e., most-retweeted and -liked). The propagation networks include both retweetsand conversational threads (i.e., threads of replies). ArCOV-19 is designed to enable research under several domains including natural language processing, information retrieval, and social computing. Preliminary analysis shows that ArCOV-19 captures rising discussions associated with the first reported cases of the disease as they appeared in the Arab world. In addition to the source tweets and the propagation networks, we also release the search queries and the language-independent crawler used to collect the tweets to encourage the curation of similar datasets.", "phrases": ["twitter", "propagation networks", "arcov-19"], "overall_score": 1.425530637534927, "scores": [0.8177265426705274, 0.8138887910330803, 0.5661181611758709], "rank_score": 0.7325778316264929} -{"id": "zhan-etal-2021-scope", "title": "Out-of-Scope Intent Detection with Self-Supervision and Discriminative Training", "abstract": "Out-of-scope intent detection is of practical importance in task-oriented dialogue systems. Since the distribution of outlier utterances is arbitrary and unknown in the training stage, existing methods commonly rely on strong assumptions on data distribution such as mixture of Gaussians to make inference, resulting in either complex multi-step training procedures or hand-crafted rules such as confidence threshold selection for outlier detection. In this paper, we propose a simple yet effective method to train an out-of-scope intent classifier in a fully end-to-end manner by simulating the test scenario in training, which requires no assumption on data distribution and no additional post-processing or threshold setting. Specifically, we construct a set of pseudo outliers in the training stage, by generating synthetic outliers using inliner features via self-supervision and sampling out-of-scope sentences from easily available open-domain datasets. The pseudo outliers are used to train a discriminative classifier that can be directly applied to and generalize well on the test task. We evaluate our method extensively on four benchmark dialogue datasets and observe significant improvements over state-of-the-art approaches. Our code has been released at .", "phrases": ["detection", "self-supervision", "out-of-scope"], "overall_score": 1.1766632990470987, "scores": [0.8002438787972248, 0.8533390717960337, 0.5397230911431196], "rank_score": 0.731102013912126} -{"id": "ziering-van-der-plas-2016-towards", "title": "Towards Unsupervised and Language-independent Compound Splitting using Inflectional Morphological Transformations", "abstract": "In this paper, we address the task of languageindependent, knowledge-lean and unsupervised compound splitting, which is an essential component for many natural language processing tasks such as machine translation. Previous methods on statistical compound splitting either include language-specific knowledge (e.g., linking elements) or rely on parallel data, which results in limited applicability. We aim to overcome these limitations by learning compounding morphology from inflectional information derived from lemmatized monolingual corpora. In experiments for Germanic languages, we show that our approach significantly outperforms language-dependent stateof-the-art methods in finding the correct split point and that word inflection is a good approximation for compounding morphology.", "phrases": ["inflectional information", "monolingual corpora", "van der"], "overall_score": 1.422358590863019, "scores": [1.0478516870612338, 0.5962470554881156, 0.5487444237502904], "rank_score": 0.7309477220998799} -{"id": "huang-etal-2021-efficient", "title": "Efficient Attentions for Long Document Summarization", "abstract": "The quadratic computational and memory complexities of large Transformers have limited their scalability for long document summarization. In this paper, we propose Hepos, a novel efficient encoder-decoder attention with head-wise positional strides to effectively pinpoint salient information from the source. We further conduct a systematic study of existing efficient self-attentions. Combined with Hepos, we are able to process ten times more tokens than existing models that use full attentions. For evaluation, we present a new dataset, GovReport, with significantly longer documents and summaries. Results show that our models produce significantly higher ROUGE scores than competitive comparisons, including new state-of-the-art results on PubMed. Human evaluation also shows that our models generate more informative summaries with fewer unfaithful errors.", "phrases": ["long document summarization", "memory complexity", "scalability", "encoder-decoder attention", "new dataset"], "overall_score": 1.682878508918372, "scores": [0.9562297331742083, 1.1267980563499747, 0.5247602370592326, 0.5245838642144287, 0.5219523598862621], "rank_score": 0.7308648501368212} -{"id": "chen-huang-2016-semi", "title": "Semi-supervised Convolutional Networks for Translation Adaptation with Tiny Amount of In-domain Data", "abstract": "In this paper, we propose a method which uses semi-supervised convolutional neural networks (CNNs) to select in-domain training data for statistical machine translation. This approach is particularly effective when only tiny amounts of in-domain data are available. The in-domain data and randomly sampled general-domain data are used to train a data selection model with semi-supervised CNN, then this model computes domain relevance scores for all the sentences in the generaldomain data set. The sentence pairs with top scores are selected to train the system. We carry out experiments on 4 language directions with three test domains. Compared with strong baseline systems trained with large amount of data, this method can improve the performance up to 3.1 BLEU. Its performances are significant better than three state-of-the-art language model based data selection methods. We also show that the in-domain data used to train the selection model could be as fewas 100sentences, whichmakesfinegrained topic-dependent translation adaptation possible.", "phrases": ["translation adaptation", "in-domain data", "convolutional neural network", "cnns", "baseline system"], "overall_score": 1.605765762662152, "scores": [0.860529430961585, 0.8135547609440102, 0.8960679062493108, 0.5468408447331102, 0.537084521985611], "rank_score": 0.7308154929747254} -{"id": "liu-etal-2018-jointly", "title": "Jointly Multiple Events Extraction via Attention-based Graph Information Aggregation", "abstract": "Event extraction is of practical utility in natural language processing. In the real world, it is a common phenomenon that multiple events existing in the same sentence, where extracting them are more difficult than extracting a single event. Previous works on modeling the associations between events by sequential modeling methods suffer a lot from the low efficiency in capturing very long-range dependencies. In this paper, we propose a novel Jointly Multiple Events Extraction (JMEE) framework to jointly extract multiple event triggers and arguments by introducing syntactic shortcut arcs to enhance information flow and attention-based graph convolution networks to model graph information. The experiment results demonstrate that our proposed framework achieves competitive results compared with state-of-the-art methods.", "phrases": ["multiple events extraction", "trigger", "graph convolution network", "dependency tree", "attention-based gcn"], "overall_score": 2.3809644615460295, "scores": [0.8947413628282599, 0.8436073401492158, 0.8371031047976041, 0.5484237796536581, 0.5300438617965114], "rank_score": 0.7307838898450499} -{"id": "potash-rumshisky-2017-towards", "title": "Towards Debate Automation: a Recurrent Model for Predicting Debate Winners", "abstract": "In this paper we introduce a practical first step towards the creation of an automated debate agent: a state-of-the-art recurrent predictive model for predicting debate winners. By having an accurate predictive model, we are able to objectively rate the quality of a statement made at a specific turn in a debate. The model is based on a recurrent neural network architecture with attention, which allows the model to effectively account for the entire debate when making its prediction. Our model achieves state-of-the-art accuracy on a dataset of debate transcripts annotated with audience favorability of the debate teams. Finally, we discuss how future work can leverage our proposed model for the creation of an automated debate agent. We accomplish this by determining the model input that will maximize audience favorability toward a given side of a debate at an arbitrary turn.", "phrases": ["debate", "winner", "audience favorability"], "overall_score": 1.309383856342633, "scores": [1.1372116148002476, 0.5302308112279328, 0.5249007073502454], "rank_score": 0.7307810444594752} -{"id": "magdy-darwish-2006-arabic", "title": "Arabic OCR Error Correction Using Character Segment Correction, Language Modeling, and Shallow Morphology", "abstract": "This paper explores the use of a character segment based character correction model, language modeling, and shallow morphology for Arabic OCR error correction. Experimentation shows that character segment based correction is superior to single character correction and that language modeling boosts correction, by improving the ranking of candidate corrections, while shallow morphology had a small adverse effect. Further, given sufficiently large corpus to extract a dictionary and to train a language model, word based correction works well for a morphologically rich language such as Arabic.", "phrases": ["ocr error correction", "language modeling", "arabic"], "overall_score": 1.0127877369100315, "scores": [0.8365647861668946, 0.7994020486061436, 0.5557489334968498], "rank_score": 0.7305719227566293} -{"id": "lee-etal-2020-iterative", "title": "Iterative Refinement in the Continuous Space for Non-Autoregressive Neural Machine Translation", "abstract": "We propose an efficient inference procedure for non-autoregressive machine translation that iteratively refines translation purely in the continuous space. Given a continuous latent variable model for machine translation (Shu et al., 2020), we train an inference network to approximate the gradient of the marginal log probability of the target sentence, using the latent variable instead. This allows us to use gradient-based optimization to find the target sentence at inference time that approximately maximizes its marginal probability. As each refinement step only involves computation in the latent space of low dimensionality (we use 8 in our experiments), we avoid computational overhead incurred by existing non-autoregressive inference procedures that often refine in token space. We compare our approach to a recently proposed EM-like inference procedure (Shu et al., 2020) that optimizes in a hybrid space, consisting of both discrete and continuous variables. We evaluate our approach on WMT'14 En\u2192De, WMT'16 Ro\u2192En and IWSLT'16 De\u2192En, and observe two advantages over the EM-like inference: (1) it is computationally efficient, i.e. each refinement step is twice as fast, and (2) it is more effective, resulting in higher marginal probabilities and BLEU scores with the same number of refinement steps. On WMT'14 En\u2192De, for instance, our approach is able to decode 6.2 times faster than the autoregressive model with minimal degradation to translation quality (0.9 BLEU).", "phrases": ["latent variable", "inference network", "iterative refinement"], "overall_score": 1.1757926979168736, "scores": [0.8131853447360424, 0.8559306030689415, 0.5225672892425662], "rank_score": 0.7305610790158501} -{"id": "zugarini-etal-2020-vulgaris", "title": "Vulgaris: Analysis of a Corpus for Middle-Age Varieties of Italian Language", "abstract": "Italian is a Romance language that has its roots in Vulgar Latin. The birth of the modern Italian started in Tuscany around the 14th century, and it is mainly attributed to the works of Dante Alighieri, Francesco Petrarca and Giovanni Boccaccio, who are among the most acclaimed authors of the medieval age in Tuscany. However, Italy has been characterized by a high variety of dialects, which are often loosely related to each other, due to the past fragmentation of the territory. Italian has absorbed influences from many of these dialects, as also from other languages due to dominion of portions of the country by other nations, such as Spain and France. In this work we present Vulgaris, a project aimed at studying a corpus of Italian textual resources from authors of different regions, ranging in a time period between 1200 and 1600. Each composition is associated to its author, and authors are also grouped in families, i.e. sharing similar stylistic/chronological characteristics. Hence, the dataset is not only a valuable resource for studying the diachronic evolution of Italian and the differences between its dialects, but it is also useful to investigate stylistic aspects between single authors. We provide a detailed statistical analysis of the data, and a corpus-driven study in dialectology and diachronic varieties.", "phrases": ["italian language", "diachronic evolution", "vulgaris"], "overall_score": 0.8025796163913548, "scores": [0.8365104985308973, 0.8341125467189352, 0.5209953027417591], "rank_score": 0.7305394493305305} -{"id": "freeman-etal-2006-cross", "title": "Cross Linguistic Name Matching in English and Arabic", "abstract": "This paper presents a solution to the problem of matching personal names in English to the same names represented in Arabic script. Standard string comparison measures perform poorly on this task due to varying transliteration conventions in both languages and the fact that Arabic script does not usually represent short vowels. Significant improvement is achieved by augmenting the classic Levenshtein edit-distance algorithm with character equivalency classes.", "phrases": ["name matching", "arabic", "in-vocabulary word"], "overall_score": 1.51890671658251, "scores": [0.8488836175407982, 0.818745799233595, 0.5236897708121325], "rank_score": 0.7304397291955086} -{"id": "fu-etal-2013-exploiting", "title": "Exploiting Multiple Sources for Open-Domain Hypernym Discovery", "abstract": "Hypernym discovery aims to extract such noun pairs that one noun is a hypernym of the other. Most previous methods are based on lexical patterns but perform badly on opendomain data. Other work extracts hypernym relations from encyclopedias but has limited coverage. This paper proposes a simple yet effective distant supervision framework for Chinese open-domain hypernym discovery. Given an entity name, we try to discover its hypernyms by leveraging knowledge from multiple sources, i.e., search engine results, encyclopedias, and morphology of the entity name. First, we extract candidate hypernyms from the above sources. Then, we apply a statistical ranking model to select correct hypernyms. A set of novel features is proposed for the ranking model. We also present a heuristic strategy to build a large-scale noisy training data for the model without human annotation. Experimental results demonstrate that our approach outperforms the state-of-the-art methods on a manually labeled test dataset.", "phrases": ["open-domain hypernym discovery", "encyclopedia", "entity name", "search engine result"], "overall_score": 1.3081050101821723, "scores": [1.0208709385445423, 0.8247609829480573, 0.5522406879788063, 0.5223966178389857], "rank_score": 0.7300673068275979} -{"id": "guillaume-etal-2016-crowdsourcing", "title": "Crowdsourcing Complex Language Resources: Playing to Annotate Dependency Syntax", "abstract": "This article presents the results we obtained on a complex annotation task (that of dependency syntax) using a specifically designed Game with a Purpose, ZombiLingo. We show that with suitable mechanisms (decomposition of the task, training of the players and regular control of the annotation quality during the game), it is possible to obtain annotations whose quality is significantly higher than that obtainable with a parser, provided that enough players participate. The source code of the game and the resulting annotated corpora (for French) are freely available.", "phrases": ["zombilingo", "french", "dependency syntax annotation"], "overall_score": 1.6809336416894847, "scores": [1.044197889532826, 0.6188972288772585, 0.5269654966837585], "rank_score": 0.7300202050312811} -{"id": "neubig-etal-2011-unsupervised", "title": "An Unsupervised Model for Joint Phrase Alignment and Extraction", "abstract": "We present an unsupervised model for joint phrase alignment and extraction using non-parametric Bayesian methods and inversion transduction grammars (ITGs). The key contribution is that phrases of many granularities are included directly in the model through the use of a novel formulation that memorizes phrases generated not only by terminal, but also non-terminal symbols. This allows for a completely probabilistic model that is able to create a phrase table that achieves competitive accuracy on phrase-based machine translation tasks directly from unaligned sentence pairs. Experiments on several language pairs demonstrate that the proposed model matches the accuracy of traditional two-step word alignment/phrase extraction approach while reducing the phrase table to a fraction of the original size.", "phrases": ["joint phrase alignment", "inversion transduction grammar", "granularity", "pitman-yor process"], "overall_score": 1.8714297931299855, "scores": [0.982645997694986, 0.8728052853502685, 0.5424294165586462, 0.5205859557878313], "rank_score": 0.7296166638479329} -{"id": "maslennikov-chua-2007-multi", "title": "A Multi-resolution Framework for Information Extraction from Free Text", "abstract": "Extraction of relations between entities is an important part of Information Extraction on free text. Previous methods are mostly based on statistical correlation and dependency relations between entities. This paper re-examines the problem at the multiresolution layers of phrase, clause and sentence using dependency and discourse relations. Our multi-resolution framework ARE (Anchor and Relation) uses clausal relations in 2 ways: 1) to filter noisy dependency paths; and 2) to increase reliability of dependency path extraction. The resulting system outperforms the previous approaches by 3%, 7%, 4% on MUC4, MUC6 and ACE RDC domains respectively.", "phrases": ["multi-resolution framework", "information extraction", "discourse tree", "pattern-based framework"], "overall_score": 1.5168849996510423, "scores": [0.9330787212254178, 0.8863585090085336, 0.5714822258121502, 0.5269504994144533], "rank_score": 0.7294674888651387} -{"id": "su-etal-2015-bilingual", "title": "Bilingual Correspondence Recursive Autoencoder for Statistical Machine Translation", "abstract": "Learning semantic representations and tree structures of bilingual phrases is beneficial for statistical machine translation. In this paper, we propose a new neural network model called Bilingual Correspondence Recursive Autoencoder (BCorrRAE) to model bilingual phrases in translation. We incorporate word alignments into BCorrRAE to allow it freely access bilingual constraints at different levels. BCorrRAE minimizes a joint objective on the combination of a recursive autoencoder reconstruction error, a structural alignment consistency error and a crosslingual reconstruction error so as to not only generate alignment-consistent phrase structures, but also capture different levels of semantic relations within bilingual phrases. In order to examine the effectiveness of BCorrRAE, we incorporate both semantic and structural similarity features built on bilingual phrase representations and tree structures learned by BCorrRAE into a state-of-the-art SMT system. Experiments on NIST Chinese-English test sets show that our model achieves a substantial improvement of up to 1.55 BLEU points over the baseline.", "phrases": ["correspondence recursive autoencoder", "statistical machine translation", "different level", "semantic relation"], "overall_score": 1.3063865213440484, "scores": [0.9487690255001893, 0.9112750465987928, 0.5287493041204545, 0.5276394235622879], "rank_score": 0.7291081999454312} -{"id": "keswani-etal-2020-iitk-semeval", "title": "IITK at SemEval-2020 Task 8: Unimodal and Bimodal Sentiment Analysis of Internet Memes", "abstract": "Social media is abundant in visual and textual information presented together or in isolation. Memes are the most popular form, belonging to the former class. In this paper, we present our approaches for the Memotion Analysis problem as posed in SemEval-2020 Task 8. The goal of this task is to classify memes based on their emotional content and sentiment. We leverage techniques from Natural Language Processing (NLP) and Computer Vision (CV) towards the sentiment classification of internet memes (Subtask A). We consider Bimodal (text and image) as well as Unimodal (text-only) techniques in our study ranging from the Na \u0308\u0131ve Bayes classifier to Transformer-based approaches. Our results show that a text-only approach, a simple Feed Forward Neural Network (FFNN) with Word2vec embeddings as input, performs superior to all the others. We stand first in the Sentiment analysis task with a relative improvement of 63% over the baseline macro-F1 score. Our work is relevant to any task concerned with the combination of different modalities.", "phrases": ["semeval-2020 task", "internet meme", "text-only approach"], "overall_score": 1.0107521547857787, "scores": [0.8768564374877674, 0.7807857275121809, 0.5296685169159671], "rank_score": 0.7291035606386385} -{"id": "patry-etal-2007-mistral", "title": "MISTRAL: a lattice translation system for IWSLT 2007", "abstract": "This paper describes MISTRAL, the lattice translation system that we developed for the Italian-English track of the International Workshop on Spoken Language Translation 2007. MISTRAL is a discriminative phrase-based system that translates a source word lattice in two passes. The first pass extracts a list of top ranked sentence pairs from the lattice and the second pass rescores this list with more complex features. Our experiments show that our system, when translating pruned lattices, is at least as good as a fair baseline that translates the first ranked sentences returned by a speech recognition system.", "phrases": ["lattice translation system", "iwslt", "international workshop", "spoken language translation", "mistral"], "overall_score": 0.8007895008991551, "scores": [0.9160972884022819, 0.8196065309417757, 0.8061779016392884, 0.5779301013731759, 0.5247382575901682], "rank_score": 0.728910015989338} -{"id": "torres-martins-etal-2008-stacking", "title": "Stacking Dependency Parsers", "abstract": "We explore a stacked framework for learning to predict dependency structures for natural language sentences. A typical approach in graph-based dependency parsing has been to assume a factorized model, where local features are used but a global function is optimized (McDonald et al., 2005b). Recently Nivre and McDonald (2008) used the output of one dependency parser to provide features for another. We show that this is an example of stacked learning, in which a second predictor is trained to improve the performance of the first. Further, we argue that this technique is a novel way of approximating rich non-local features in the second parser, without sacrificing efficient, model-optimal prediction. Experiments on twelve languages show that stacking transition-based and graph-based parsers improves performance over existing state-of-the-art dependency parsers.", "phrases": ["dependency parsing", "stacked learning", "stacking"], "overall_score": 2.0651365327314473, "scores": [1.0280091046681992, 0.5902183598013886, 0.5684802930636248], "rank_score": 0.7289025858444041} -{"id": "burchardt-etal-2006-salsa", "title": "The SALSA Corpus: a German Corpus Resource for Lexical Semantics", "abstract": "This paper describes the SALSA corpus, a large German corpus manually annotated with manual role-semantic annotation, based on the syntactically annotated TIGER newspaper corpus. The first release, comprising about 20,000 annotated predicate instances (about half the TIGER corpus), is scheduled for mid-2006. In this paper we discuss the annotation framework (frame semantics) and its cross-lingual applicability, problems arising from exhaustive annotation, strategies for quality control, and possible applications.", "phrases": ["salsa corpus", "predicate", "framenet", "lexical unit"], "overall_score": 2.020785843796408, "scores": [0.9276876986877015, 0.8351656116880731, 0.5909305441768232, 0.561593860991099], "rank_score": 0.7288444288859243} -{"id": "zarriess-schlangen-2016-easy", "title": "Easy Things First: Installments Improve Referring Expression Generation for Objects in Photographs", "abstract": "Research on generating referring expressions has so far mostly focussed on \u201cone-shot reference\u201d, where the aim is to generate a single, discriminating expression. In interactive settings, however, it is not uncommon for reference to be established in \u201cinstallments\u201d, where referring information is offered piecewise until success has been con\ufb01rmed. We show that this strategy can also be advantageous in technical systems that only have uncertain access to object attributes and categories. We train a recently introduced model of grounded word meaning on a data set of REs for objects in images and learn to predict semantically appropriate expressions. In a human evaluation, we observe that users are sensitive to inadequate object names - which unfortunately are not unlikely to be generated from low-level visual input. We propose a solution inspired from human task-oriented interaction and implement strategies for avoiding and repairing semantically inaccurate words. We enhance a word-based REG with context-aware, referential installments and \ufb01nd that they substantially improve the referential success of the system.", "phrases": ["installment", "object", "real-world image"], "overall_score": 1.1726911796655632, "scores": [1.0950348315165879, 0.5662334294440843, 0.5246337311677383], "rank_score": 0.7286339973761368} -{"id": "weng-etal-2017-neural", "title": "Neural Machine Translation with Word Predictions", "abstract": "In the encoder-decoder architecture for neural machine translation (NMT), the hidden states of the recurrent structures in the encoder and decoder carry the crucial information about the sentence. These vectors are generated by parameters which are updated by back-propagation of translation errors through time. We argue that propagating errors through the end-to-end recurrent structures are not a direct way of control the hidden vectors. In this paper, we propose to use word predictions as a mechanism for direct supervision. More specifically, we require these vectors to be able to predict the vocabulary in target sentence. Our simple mechanism ensures better representations in the encoder and decoder without using any extra data or annotation. It is also helpful in reducing the target side vocabulary and improving the decoding efficiency. Experiments on Chinese-English machine translation task show an average BLEU improvement by 4.53, respectively.", "phrases": ["hidden state", "target sentence", "neural machine translation"], "overall_score": 1.5151070571503846, "scores": [1.010226161863783, 0.5904362953264157, 0.585174980576533], "rank_score": 0.7286124792555772} -{"id": "paula-etal-2018-similarity", "title": "Similarity Measures for the Detection of Clinical Conditions with Verbal Fluency Tasks", "abstract": "Semantic Verbal Fluency tests have been used in the detection of certain clinical conditions, like Dementia. In particular, given a sequence of semantically related words, a large number of switches from one semantic class to another has been linked to clinical conditions. In this work, we investigate three similarity measures for automatically identifying switches in semantic chains: semantic similarity from a manually constructed resource, and word association strength and semantic relatedness, both calculated from corpora. This information is used for building classifiers to distinguish healthy controls from clinical cases with early stages of Alzheimer's Disease and Mild Cognitive Deficits. The overall results indicate that for clinical conditions the classifiers that use these similarity measures outperform those that use a gold standard taxonomy.", "phrases": ["detection", "verbal fluency task", "word embedding"], "overall_score": 1.0098044056183588, "scores": [0.8620160817606018, 0.7854170949790585, 0.5378265356404901], "rank_score": 0.7284199041267169} -{"id": "mukherjee-bhattacharyya-2012-sentiment", "title": "Sentiment Analysis in Twitter with Lightweight Discourse Analysis", "abstract": "We propose a lightweight method for using discourse relations for polarity detection of tweets . This method is targeted towards the web-based appli cations that deal with noisy, unstructured text, like the tweets, and cannot afford to use heavy linguistic resource s like parsing due to frequent failure of the parsers to handle noisy dat a. Most of the works in micro-blogs, like Twitter, use a bag-of-words model that ignores the discours e particles like but, since, although etc. In this work, we show how the discourse relations like the connectives and conditionals can be used to incorporate discourse information in any bag-of-words model, to improve sentiment classification accuracy. We also probe the influenc e of the semantic operators like modals and negations on the discourse relations that affect the sentime nt of a sentence. Discourse relations and corresponding rules are identified with minimal processing - just a list look up. We first give a linguistic description of the various discourse r elations which leads to conditions in rules and features in SVM. We show that our discourse-based bag-of-words model performs well in a noisy medium ( Twitter ), where it performs better than an existing Twitte r-based application. Furthermore, we show that our approach is beneficia l to structured reviews as well, where we achieve a better accuracy than a state-of-the-art s ystem in the travel review domain. Our system compares favorably with the state-of-the-art system s and has the additional attractiveness of being less resource intensive.", "phrases": ["twitter", "discourse information", "negation", "sentiment analysis"], "overall_score": 1.41740226375093, "scores": [0.9180095641830673, 0.8760811034535642, 0.5770015927495816, 0.5425104348647274], "rank_score": 0.7284006738127351} -{"id": "li-etal-2010-whitepaper", "title": "Whitepaper of NEWS 2010 Shared Task on Transliteration Generation", "abstract": "Transliteration is defined as phonetic translation of names across languages. Transliteration of Named Entities (NEs) is necessary in many applications, such as machine translation, corpus alignment, cross-language IR, information extraction and automatic lexicon acquisition. All such systems call for high-performance transliteration, which is the focus of shared task in the NEWS 2010 workshop. The objective of the shared task is to promote machine transliteration research by providing a common benchmarking platform for the community to evaluate the state-of-the-art technologies.", "phrases": ["transliteration", "news", "direction", "quality metric"], "overall_score": 1.0097686073264263, "scores": [0.846303963275932, 0.9942585614224315, 0.5446927234651934, 0.528321076306823], "rank_score": 0.7283940811175951} -{"id": "su-etal-2015-reward", "title": "Reward Shaping with Recurrent Neural Networks for Speeding up On-Line Policy Learning in Spoken Dialogue Systems", "abstract": "Statistical spoken dialogue systems have the attractive property of being able to be optimised from data via interactions with real users. However in the reinforcement learning paradigm the dialogue manager (agent) often requires significant time to explore the state-action space to learn to behave in a desirable manner. This is a critical issue when the system is trained on-line with real users where learning costs are expensive. Reward shaping is one promising technique for addressing these concerns. Here we examine three recurrent neural network (RNN) approaches for providing reward shaping information in addition to the primary (task-orientated) environmental feedback. These RNNs are trained on returns from dialogues generated by a simulated user and attempt to diffuse the overall evaluation of the dialogue back down to the turn level to guide the agent towards good behaviour faster. In both simulated and real user scenarios these RNNs are shown to increase policy learning speed. Importantly, they do not require prior knowledge of the user's goal.", "phrases": ["policy learning", "reward", "dialogue policy learning"], "overall_score": 1.417298951050267, "scores": [0.8180365895231466, 0.838395792761483, 0.5286103624767263], "rank_score": 0.7283475815871187} -{"id": "han-eisenstein-2019-unsupervised", "title": "Unsupervised Domain Adaptation of Contextualized Embeddings for Sequence Labeling", "abstract": "Contextualized word embeddings such as ELMo and BERT provide a foundation for strong performance across a wide range of natural language processing tasks by pretraining on large corpora of unlabeled text. However, the applicability of this approach is unknown when the target domain varies substantially from the pretraining corpus. We are specifically interested in the scenario in which labeled data is available in only a canonical source domain such as newstext, and the target domain is distinct from both the labeled and pretraining texts. To address this scenario, we propose domain-adaptive fine-tuning, in which the contextualized embeddings are adapted by masked language modeling on text from the target domain. We test this approach on sequence labeling in two challenging domains: Early Modern English and Twitter. Both domains differ substantially from existing pretraining corpora, and domain-adaptive fine-tuning yields substantial improvements over strong BERT baselines, with particularly impressive results on out-of-vocabulary words. We conclude that domain-adaptive fine-tuning offers a simple and effective approach for the unsupervised adaptation of sequence labeling to difficult new domains.", "phrases": ["word embedding", "language modeling", "modern english", "unsupervised domain adaptation", "specific domain"], "overall_score": 2.3987788867937314, "scores": [0.8914006901044363, 1.1717910210796911, 0.5288584991143211, 0.5247888682097244, 0.5222653194316802], "rank_score": 0.7278208795879706} -{"id": "yang-zong-2014-multi", "title": "Multi-Predicate Semantic Role Labeling", "abstract": "The current approaches to Semantic Role Labeling (SRL) usually perform role classification for each predicate separately and the interaction among individual predicate\u2019s role labeling is ignored if there is more than one predicate in a sentence. In this paper, we prove that different predicates in a sentence could help each other during SRL. In multi-predicate role labeling, there are mainly two key points: argument identification and role labeling of the arguments shared by multiple predicates. To address these issues, in the stage of argument identification, we propose novel predicate-related features which help remove many argument identification errors; in the stage of argument classification, we adopt a discriminative reranking approach to perform role classification of the shared arguments, in which a large set of global features are proposed. We conducted experiments on two standard benchmarks: Chinese PropBank and English PropBank. The experimental results show that our approach can significantly improve SRL performance, especially in Chinese PropBank.", "phrases": ["semantic role labeling", "multi-predicate srl", "chinese proposition bank"], "overall_score": 1.5991822964400744, "scores": [0.9803762043661772, 0.6339945626073944, 0.5690869181468425], "rank_score": 0.7278192283734715} -{"id": "mihaylov-etal-2018-suit", "title": "Can a Suit of Armor Conduct Electricity? A New Dataset for Open Book Question Answering", "abstract": "We present a new kind of question answering dataset, OpenBookQA, modeled after open book exams for assessing human understanding of a subject. The open book that comes with our questions is a set of 1326 elementary level science facts. Roughly 6000 questions probe an understanding of these facts and their application to novel situations. This requires combining an open book fact (e.g., metals conduct electricity) with broad common knowledge (e.g., a suit of armor is made of metal) obtained from other sources. While existing QA datasets over documents or knowledge bases, being generally self-contained, focus on linguistic understanding, OpenBookQA probes a deeper understanding of both the topic\u2014in the context of common knowledge\u2014and the language it is expressed in. Human performance on OpenBookQA is close to 92%, but many state-of-the-art pre-trained QA methods perform surprisingly poorly, worse than several simple neural baselines we develop. Our oracle experiments designed to circumvent the knowledge retrieval bottleneck demonstrate the value of both the open book and additional facts. We leave it as a challenge to solve the retrieval problem in this multi-hop setting and to close the large gap to human performance.", "phrases": ["suit", "openbookqa", "science question"], "overall_score": 2.4753623267068434, "scores": [0.7831625468083039, 0.8378759280258954, 0.5623358333311866], "rank_score": 0.7277914360551286} -{"id": "plaza-del-arco-etal-2019-sinai-semeval-2019", "title": "SINAI at SemEval-2019 Task 6: Incorporating lexicon knowledge into SVM learning to identify and categorize offensive language in social media", "abstract": "Offensive language has an impact across society. The use of social media has aggravated this issue among online users, causing suicides in the worst cases. For this reason, it is important to develop systems capable of identifying and detecting offensive language in text automatically. In this paper, we developed a system to classify offensive tweets as part of our participation in SemEval-2019 Task 6: OffensEval. Our main contribution is the integration of lexical features in the classification using the SVM algorithm.", "phrases": ["semeval-2019 task", "offensive language", "lexical feature"], "overall_score": 1.008773350745303, "scores": [0.8380330312263097, 0.8251046414189935, 0.5198907931064841], "rank_score": 0.7276761552505958} -{"id": "dhingra-etal-2016-tweet2vec", "title": "Tweet2Vec: Character-Based Distributed Representations for Social Media", "abstract": "Text from social media provides a set of challenges that can cause traditional NLP approaches to fail. Informal language, spelling errors, abbreviations, and special characters are all commonplace in these posts, leading to a prohibitively large vocabulary size for word-level approaches. We propose a character composition model, tweet2vec, which finds vector-space representations of whole tweets by learning complex, non-local dependencies in character sequences. The proposed model outperforms a word-level baseline at predicting user-annotated hashtags associated with the posts, doing significantly better when the input contains many out-of-vocabulary words or unusual character sequences. Our tweet2vec encoder is publicly available.", "phrases": ["hashtag", "tweet2vec", "vector representation", "social medium"], "overall_score": 1.7437308526442206, "scores": [0.9520116582238441, 0.8273868924784514, 0.5688417148594423, 0.5605287243520991], "rank_score": 0.7271922474784593} -{"id": "choi-etal-2020-f", "title": "F2-Softmax: Diversifying Neural Text Generation via Frequency Factorized Softmax", "abstract": "Despite recent advances in neural text generation, encoding the rich diversity in human language remains elusive. We argue that the sub-optimal text generation is mainly attributable to the imbalanced token distribution, which particularly misdirects the learning model when trained with the maximum-likelihood objective. As a simple yet effective remedy, we propose two novel methods, F2-Softmax and MefMax, for a balanced training even with the skewed frequency distribution. MefMax assigns tokens uniquely to frequency classes, trying to group tokens with similar frequencies and equalize frequency mass between the classes. F2-Softmax then decomposes a probability distribution of the target token into a product of two conditional probabilities of (1) frequency class, and (2) token from the target frequency class. Models learn more uniform probability distributions because they are confined to subsets of vocabularies. Significant performance gains on seven relevant metrics suggest the supremacy of our approach in improving not only the diversity but also the quality of generated texts.", "phrases": ["neural text generation", "token distribution", "balanced training", "f2-softmax"], "overall_score": 1.1691548537464558, "scores": [0.9562109556014833, 0.8796876368605252, 0.5379198657544306, 0.5319285599539878], "rank_score": 0.7264367545426067} -{"id": "yoneda-etal-2018-ucl", "title": "UCL Machine Reading Group: Four Factor Framework For Fact Finding (HexaF)", "abstract": "In this paper we describe our 2nd place FEVER shared-task system that achieved a FEVER score of 62.52% on the provisional test set (without additional human evaluation), and 65.41% on the development set. Our system is a four stage model consisting of document retrieval, sentence retrieval, natural language inference and aggregation. Retrieval is performed leveraging task-specific features, and then a natural language inference model takes each of the retrieved sentences paired with the claimed fact. The resulting predictions are aggregated across retrieved sentences with a Multi-Layer Perceptron, and re-ranked corresponding to the final prediction.", "phrases": ["sentence retrieval", "claim", "multi-layer perceptron"], "overall_score": 2.1762015601621494, "scores": [0.8289946817280902, 0.8278224813714012, 0.5224846183448261], "rank_score": 0.7264339271481058} -{"id": "jin-etal-2019-imat", "title": "IMaT: Unsupervised Text Attribute Transfer via Iterative Matching and Translation", "abstract": "Text attribute transfer aims to automatically rewrite sentences such that they possess certain linguistic attributes, while simultaneously preserving their semantic content. This task remains challenging due to a lack of supervised parallel data. Existing approaches try to explicitly disentangle content and attribute information, but this is difficult and often results in poor content-preservation and ungrammaticality. In contrast, we propose a simpler approach, Iterative Matching and Translation (IMaT), which: (1) constructs a pseudo-parallel corpus by aligning a subset of semantically similar sentences from the source and the target corpora; (2) applies a standard sequence-to-sequence model to learn the attribute transfer; (3) iteratively improves the learned transfer function by refining imperfections in the alignment. In sentiment modification and formality transfer tasks, our method outperforms complex state-of-the-art systems by a large margin. As an auxiliary contribution, we produce a publicly-available test set with human-generated transfer references.", "phrases": ["iterative matching", "similar sentence", "imat"], "overall_score": 1.5102440686856125, "scores": [0.8313128292172809, 0.8048334770099219, 0.5426753221975015], "rank_score": 0.7262738761415681} -{"id": "quirk-menezes-2006-need", "title": "Do we need phrases? Challenging the conventional wisdom in Statistical Machine Translation", "abstract": "We begin by exploring theoretical and practical issues with phrasal SMT, several of which are addressed by syntax-based SMT. Next, to address problems not handled by syntax, we propose the concept of a Minimal Translation Unit (MTU) and develop MTU sequence models. Finally we incorporate these models into a syntax-based SMT system and demonstrate that it improves on the state of the art translation quality within a theoretically more desirable framework.", "phrases": ["mtu", "smt system", "rule markov model", "reordering"], "overall_score": 1.6721266130473473, "scores": [0.8974429055839711, 0.8855475046345602, 0.5743853335794691, 0.5474057005621473], "rank_score": 0.7261953610900369} -{"id": "gravier-etal-2012-etape", "title": "The ETAPE corpus for the evaluation of speech-based TV content processing in the French language", "abstract": "The paper presents a comprehensive overview of existing data for the evaluation of spoken content processing in a multimedia framework for the French language. We focus on the ETAPE corpus which will be made publicly available by ELDA mid 2012, after completion of the evaluation campaign, and recall existing resources resulting from previous evaluation campaigns. The ETAPE corpus consists of 30 hours of TV and radio broadcasts, selected to cover a wide variety of topics and speaking styles, emphasizing spontaneous speech and multiple speaker areas.", "phrases": ["etape corpus", "french language", "spontaneous speech"], "overall_score": 1.0066190004774127, "scores": [0.8176282547816099, 0.8083318689501987, 0.552406236348244], "rank_score": 0.7261221200266842} -{"id": "johnson-martin-2003-unsupervised", "title": "Unsupervised Learning of Morphology for English and Inuktitut", "abstract": "We describe a simple unsupervised technique for learning morphology by identifying hubs in an automaton. For our purposes, a hub is a node in a graph with in-degree greater than one and out-degree greater than one. We create a word-trie, transform it into a minimal DFA, then identify hubs. Those hubs mark the boundary between root and suffix, achieving similar performance to more complex mixtures of techniques.", "phrases": ["morphology", "inuktitut", "automaton"], "overall_score": 0.7975242872973962, "scores": [0.850407141791206, 0.792756027305207, 0.5346505023606917], "rank_score": 0.7259378904857016} -{"id": "pustejovsky-etal-2009-glml", "title": "GLML: Annotating Argument Selection and Coercion", "abstract": "In this paper we introduce a methodology for annotating compositional operations in natural language text, and describe a mark-up language, GLML, based on Generative Lexicon, for identifying such relations. While most annotation systems capture surface relationships, GLML captures the \"compositional history\" of the argument selection relative to the predicate. We provide a brief overview of GL before moving on to our proposed methodology for annotating with GLML. There are three main tasks described in the paper: (i) Compositional mechanisms of argument selection; (ii) Qualia in modification constructions; (iii) Type selection in modification of dot objects. We explain what each task includes and provide a description of the annotation interface. We also include the XML format for GLML including examples of annotated sentences.", "phrases": ["coercion", "glml", "underspecification"], "overall_score": 1.1682955432075282, "scores": [0.832461421735059, 0.8157172184445687, 0.5295298644757792], "rank_score": 0.7259028348851356} -{"id": "lacoste-julien-etal-2006-word", "title": "Word Alignment via Quadratic Assignment", "abstract": "Recently, discriminative word alignment methods have achieved state-of-the-art accuracies by extending the range of information sources that can be easily incorporated into aligners. The chief advantage of a discriminative framework is the ability to score alignments based on arbitrary features of the matching word tokens, including orthographic form, predictions of other models, lexical context and so on. However, the proposed bipartite matching model of Taskar et al. (2005), despite being tractable and effective, has two important limitations. First, it is limited by the restriction that words have fertility of at most one. More importantly, first order correlations between consecutive words cannot be directly captured by the model. In this work, we address these limitations by enriching the model form. We give estimation and inference algorithms for these enhancements. Our best model achieves a relative AER reduction of 25% over the basic matching formulation, outperforming intersected IBM Model 4 without using any overly compute-intensive features. By including predictions of other models as features, we achieve AER of 3.8 on the standard Hansards dataset.", "phrases": ["limitation", "word alignment", "quadratic assignment problem"], "overall_score": 1.5949208501243368, "scores": [0.9924682175348868, 0.6335626990921132, 0.5516083650952915], "rank_score": 0.7258797605740971} -{"id": "bond-etal-2008-improving", "title": "Improving statistical machine translation by paraphrasing the training data.", "abstract": "Large amounts of training data are essential for training statistical machine translations systems. In this paper we show how training data can be expanded by paraphrasing one side. The new data is made by parsing then generating using a precise HPSG based grammar, which gives sentences with the same meaning, but minor variations in lexical choice and word order. In experiments with Japanese and English, we showed consistent gains on the Tanaka Corpus with less consistent improvement on the IWSLT 2005 evaluation data.", "phrases": ["paraphrasing", "word order", "source side"], "overall_score": 1.6710230393936747, "scores": [1.0932535855969634, 0.5554975329907285, 0.5283971368379272], "rank_score": 0.725716085141873} -{"id": "reisinger-mooney-2010-multi", "title": "Multi-Prototype Vector-Space Models of Word Meaning", "abstract": "Current vector-space models of lexical semantics create a single \"prototype\" vector to represent the meaning of a word. However, due to lexical ambiguity, encoding word meaning with a single vector is problematic. This paper presents a method that uses clustering to produce multiple \"sense-specific\" vectors for each word. This approach provides a context-dependent vector representation of word meaning that naturally accommodates homonymy and polysemy. Experimental comparisons to human judgements of semantic similarity for both isolated words as well as words in sentential contexts demonstrate the superiority of this approach over both prototype and exemplar based vector-space models.", "phrases": ["word meaning", "group", "multi-prototype vector-space model", "sense-specific vector"], "overall_score": 2.9464518875192427, "scores": [0.914680666174814, 0.8244871355712811, 0.6297352141803474, 0.5336885287560033], "rank_score": 0.7256478861706114} -{"id": "kang-hovy-2019-linguistic", "title": "Linguistic Versus Latent Relations for Modeling Coherent Flow in Paragraphs", "abstract": "Generating a long, coherent text such as a paragraph requires a high-level control of different levels of relations between sentences (e.g., tense, coreference). We call such a logical connection between sentences as a (paragraph) flow. In order to produce a coherent flow of text, we explore two forms of intersentential relations in a paragraph: one is a human-created linguistical relation that forms a structure (e.g., discourse tree) and the other is a relation from latent representation learned from the sentences themselves. Our two proposed models incorporate each form of relations into document-level language models: the former is a supervised model that jointly learns a language model as well as discourse relation prediction, and the latter is an unsupervised model that is hierarchically conditioned by a recurrent neural network (RNN) over the latent information. Our proposed models with both forms of relations outperform the baselines in partially conditioned paragraph generation task. Our codes and data are publicly available.", "phrases": ["coherent flow", "paragraph", "language model"], "overall_score": 1.0058597892946404, "scores": [0.8276427123462409, 0.7966020212435533, 0.5524786611776985], "rank_score": 0.7255744649224977} -{"id": "jiang-etal-2016-transitivity", "title": "Transitivity in Light Verb Variations in Mandarin Chinese \u2013 A Comparable Corpus-based Statistical Approach", "abstract": "This paper adopts a comparable corpus-based approach to light verb variations in two varieties of Mandarin Chinese and proposes a transitivity (Hopper and Thompson 1980) based theoretical account. Light verbs are highly grammaticalized and lack strong collocation restrictions; hence it has been a challenge to empirical accounts. It is even more challenging to consider their variations between different varieties (e.g. Taiwan and Mainland Mandarin). This current study follows the research paradigm set up in Lin et al. (2014) for differentiating different light verbs and Huang et al. (2014) for automatic discovery of light verb variations. In our study, a corpus-based statistical approach is adopted to show that both internal variety differences between light verbs and external differences between different variants can be detected effectively. The distributional differences between Mainland and Taiwan can also shed light on the re-classification of syntactic types of the taken complement. We further argue that the variations in selection of arguments of light verb in two Mandarin variants can in fact be accounted for in terms of their different degree of transitivity. Higher degree of transitivity in Taiwan Mandarin in fact show that light verbs are less grammaticalized and hence consistent with the generalization that varieties away from the main speaking community should be more conservative.", "phrases": ["light verb", "corpus-based statistical approach", "transitivity"], "overall_score": 0.7964648338451751, "scores": [0.8172857345028919, 0.7883014664884438, 0.5693334021927784], "rank_score": 0.7249735343947047} -{"id": "bauer-etal-2021-ernie", "title": "ERNIE-NLI: Analyzing the Impact of Domain-Specific External Knowledge on Enhanced Representations for NLI", "abstract": "We examine the effect of domain-specific external knowledge variations on deep large scale language model performance. Recent work in enhancing BERT with external knowledge has been very popular, resulting in models such as ERNIE (Zhang et al., 2019a). Using the ERNIE architecture, we provide a detailed analysis on the types of knowledge that result in a performance increase on the Natural Language Inference (NLI) task, specifically on the Multi-Genre Natural Language Inference Corpus (MNLI). While ERNIE uses general TransE embeddings, we instead train domain-specific knowledge embeddings and insert this knowledge via an information fusion layer in the ERNIE architecture, allowing us to directly control and analyze knowledge input. Using several different knowledge training objectives, sources of knowledge, and knowledge ablations, we find a strong correlation between knowledge and classification labels within the same polarity, illustrating that knowledge polarity is an important feature in predicting entailment. We also perform classification change analysis across different knowledge variations to illustrate the importance of selecting appropriate knowledge input regarding content and polarity, and show representative examples of these changes.", "phrases": ["external knowledge", "knowledge embedding", "ernie-nli"], "overall_score": 0.7964219034750121, "scores": [0.8335249264397957, 0.8212616841178207, 0.5200167619056894], "rank_score": 0.7249344574877686} -{"id": "thater-etal-2010-contextualizing", "title": "Contextualizing Semantic Representations Using Syntactically Enriched Vector Models", "abstract": "We present a syntactically enriched vector model that supports the computation of contextualized semantic representations in a quasi compositional fashion. It employs a systematic combination of first- and second-order context vectors. We apply our model to two different tasks and show that (i) it substantially outperforms previous work on a paraphrase ranking task, and (ii) achieves promising results on a wordsense similarity task; to our knowledge, it is the first time that an unsupervised method has been applied to this task.", "phrases": ["paraphrase", "vector space", "dependency relation", "distributional model", "direct denotation"], "overall_score": 2.3334024673187534, "scores": [1.1185930213927537, 0.8809256916981061, 0.5545796009537516, 0.540520177415034, 0.5299426818721907], "rank_score": 0.7249122346663672} -{"id": "xu-etal-2013-open", "title": "Open Information Extraction with Tree Kernels", "abstract": "Traditional relation extraction seeks to identify pre-specified semantic relations within natural language text, while open Information Extraction (Open IE) takes a more general approach, and looks for a variety of relations without restriction to a fixed relation set. With this generalization comes the question, what is a relation? For example, should the more general task be restricted to relations mediated by verbs, nouns, or both? To help answer this question, we propose two levels of subtasks for Open IE. One task is to determine if a sentence potentially contains a relation between two entities? The other task looks to confirm explicit relation words for two entities. We propose multiple SVM models with dependency tree kernels for both tasks. For explicit relation extraction, our system can extract both noun and verb relations. Our results on three datasets show that our system is superior when compared to state-of-the-art systems like REVERB and OLLIE for both tasks. For example, in some experiments our system achieves 33% improvement on nominal relation extraction over OLLIE. In addition we propose an unsupervised rule-based approach which can serve as a strong baseline for Open IE systems.", "phrases": ["dependency tree kernel", "state-of-the-art system", "open information extraction"], "overall_score": 1.4103788035903326, "scores": [0.9684460005782769, 0.6438910664423662, 0.5620369208148698], "rank_score": 0.7247913292785042} -{"id": "brody-elhadad-2010-unsupervised", "title": "An Unsupervised Aspect-Sentiment Model for Online Reviews", "abstract": "With the increase in popularity of online review sites comes a corresponding need for tools capable of extracting the information most important to the user from the plain text data. Due to the diversity in products and services being reviewed, supervised methods are often not practical. We present an unsuper-vised system for extracting aspects and determining sentiment in review text. The method is simple and flexible with regard to domain and language, and takes into account the influence of aspect on sentiment polarity, an issue largely ignored in previous literature. We demonstrate its effectiveness on both component tasks, where it achieves similar results to more complex semi-supervised methods that are restricted by their reliance on manual annotation and extensive knowledge sources.", "phrases": ["text data", "sentiment analysis", "topic model", "different aspect"], "overall_score": 2.2056546990920745, "scores": [1.286410729696533, 0.5547361426986479, 0.5303095572204829, 0.5264098127384286], "rank_score": 0.7244665605885231} -{"id": "rubino-etal-2012-statistical", "title": "Statistical Post-Editing of Machine Translation for Domain Adaptation", "abstract": "This paper presents a statistical approach to adapt out-of-domain machine translation systems to the medical domain through an unsupervised post-editing step. A statistical post-editing model is built on statistical machine translation (SMT) outputs aligned with their translation references. Evaluations carried out to translate medical texts from French to English show that an out-of-domain machine translation system can be adapted a posteri-ori to a specific domain. Two SMT systems are studied: a state-of-the-art phrase-based implementation and an online publicly available system. Our experiments also indicate that selecting sentences for post-editing leads to significant improvements of translation quality and that more gains are still possible with respect to an oracle measure.", "phrases": ["machine translation", "french", "specific domain", "statistical post-editing"], "overall_score": 0.7954210868565754, "scores": [0.9341991163980147, 0.8838740440232242, 0.5411169282630893, 0.5369038110877021], "rank_score": 0.7240234749430076} -{"id": "seeker-kuhn-2013-morphological", "title": "Morphological and Syntactic Case in Statistical Dependency Parsing", "abstract": "Most morphologically rich languages with free word order use case systems to mark the grammatical function of nominal elements, especially for the core argument functions of a verb. The standard pipeline approach in syntactic dependency parsing assumes a complete disambiguation of morphological (case) information prior to automatic syntactic analysis. Parsing experiments on Czech, German, and Hungarian show that this approach is susceptible to propagating morphological annotation errors when parsing languages displaying syncretism in their morphological case paradigms. We develop a different architecture where we use case as a possibly underspecified filtering device restricting the options for syntactic analysis. Carefully designed morpho-syntactic constraints can delimit the search space of a statistical dependency parser and exclude solutions that would violate the restrictions overtly marked in the morphology of the words in a given sentence. The constrained system outperforms a state-of-the-art data-driven pipeline architecture, as we show experimentally, and, in addition, the parser output comes with guarantees about local and global morpho-syntactic wellformedness, which can be useful for downstream applications.", "phrases": ["czech", "hungarian", "search space", "dependency parser"], "overall_score": 1.5053200595880707, "scores": [0.9998343218923331, 0.8349412690020771, 0.5330153868585118, 0.5278327354715963], "rank_score": 0.7239059283061295} -{"id": "fivez-etal-2017-unsupervised", "title": "Unsupervised Context-Sensitive Spelling Correction of Clinical Free-Text with Word and Character N-Gram Embeddings", "abstract": "We present an unsupervised context-sensitive spelling correction method for clinical free-text that uses word and character n-gram embeddings. Our method generates misspelling replacement candidates and ranks them according to their semantic fit, by calculating a weighted cosine similarity between the vectorized representation of a candidate and the misspelling context. We greatly outperform two baseline off-the-shelf spelling correction tools on a manually annotated MIMIC-III test set, and counter the frequency bias of an optimized noisy channel model, showing that neural embeddings can be successfully exploited to include context-awareness in a spelling correction model.", "phrases": ["spelling correction", "clinical free-text", "character n-gram embedding"], "overall_score": 1.1650748374333282, "scores": [0.8429777920439159, 0.7919554086186216, 0.5367718929585246], "rank_score": 0.7239016978736874} -{"id": "kim-mooney-2010-generative", "title": "Generative Alignment and Semantic Parsing for Learning from Ambiguous Supervision", "abstract": "We present a probabilistic generative model for learning semantic parsers from ambiguous supervision. Our approach learns from natural language sentences paired with world states consisting of multiple potential logical meaning representations. It disambiguates the meaning of each sentence while simultaneously learning a semantic parser that maps sentences into logical form. Compared to a previous generative model for semantic alignment, it also supports full semantic parsing. Experimental results on the Robocup sportscasting corpora in both English and Korean indicate that our approach produces more accurate semantic alignments than existing methods and also produces competitive semantic parsers and improved language generators.", "phrases": ["semantic parsing", "generative model", "generation process", "correspondence"], "overall_score": 2.050682336096027, "scores": [0.9431411452629505, 0.8367127642397457, 0.5795330326527973, 0.535816612350055], "rank_score": 0.7238008886263871} -{"id": "kilickaya-etal-2017-evaluating", "title": "Re-evaluating Automatic Metrics for Image Captioning", "abstract": "The task of generating natural language descriptions from images has received a lot of attention in recent years. Consequently, it is becoming increasingly important to evaluate such image captioning approaches in an automatic manner. In this paper, we provide an in-depth evaluation of the existing image captioning metrics through a series of carefully designed experiments. Moreover, we explore the utilization of the recently proposed Word Mover's Distance (WMD) document metric for the purpose of image captioning. Our findings outline the differences and/or similarities between metrics and their relative robustness by means of extensive correlation, accuracy and distraction based evaluations. Our results also demonstrate that WMD provides strong advantages over other metrics.", "phrases": ["image captioning", "word mover", "distance", "wmd", "human judgment"], "overall_score": 1.7979503057484048, "scores": [0.9551372874580438, 1.0466072029124438, 0.5499665565968928, 0.543299175548764, 0.5227319286865915], "rank_score": 0.7235484302405472} -{"id": "li-etal-2019-rumor", "title": "Rumor Detection on Social Media: Datasets, Methods and Opportunities", "abstract": "Social media platforms have been used for information and news gathering, and they are very valuable in many applications. However, they also lead to the spreading of rumors and fake news. Many efforts have been taken to detect and debunk rumors on social media by analyzing their content and social context using machine learning techniques. This paper gives an overview of the recent studies in the rumor detection field. It provides a comprehensive list of datasets used for rumor detection, and reviews the important studies based on what types of information they exploit and the approaches they take. And more importantly, we also present several new directions for future research.", "phrases": ["rumor detection", "social medium", "twitter", "misinformation"], "overall_score": 1.5894414652962023, "scores": [0.9377381001209151, 0.8707678661056442, 0.5425550961543111, 0.542482877898814], "rank_score": 0.7233859850699211} -{"id": "narayan-gardent-2012-error", "title": "Error Mining with Suspicion Trees: Seeing the Forest for the Trees", "abstract": "In recent years, error mining approaches have been proposed to identify the most likely sources of errors in symbolic parsers and generators. However the techniques used generate a flat list of suspicious forms ranked by decreasing order of suspicion. We introduce a novel algorithm that structures the output of error mining into a tree (called, suspicion tree) highlighting the relationships between suspicious forms. We illustrate the impact of our approach by applying it to detect and analyse the most likely sources of failure in surface realisation; and we show how the suspicion tree built by our algorithm helps presenting the errors identified by error mining in a linguistically meaningful way thus providing better support for error analysis. The right frontier of the tree highlights the relative importance of the main error cases while the subtrees of a node indicate how a given error case divides into smaller more specific cases", "phrases": ["suspicion tree", "likely source", "failure", "error mining"], "overall_score": 1.0028226367133144, "scores": [0.9390027045731532, 0.8365342338785993, 0.5612932545683317, 0.5567042967349024], "rank_score": 0.7233836224387467} -{"id": "ehara-etal-2014-formalizing", "title": "Formalizing Word Sampling for Vocabulary Prediction as Graph-based Active Learning", "abstract": "Predicting vocabulary of second language learners is essential to support their language learning; however, because of the large size of language vocabularies, we cannot collect information on the entire vocabulary. For practical measurements, we need to sample a small portion of words from the entire vocabulary and predict the rest of the words. In this study, we propose a novel framework for this sampling method. Current methods rely on simple heuristic techniques involving inflexible manual tuning by educational experts. We formalize these heuristic techniques as a graph-based non-interactive active learning method as applied to a special graph. We show that by extending the graph, we can support additional functionality such as incorporating domain specificity and sampling from multiple corpora. In our experiments, we show that our extended methods outperform other methods in terms of vocabulary prediction accuracy when the number of samples is small.", "phrases": ["vocabulary prediction", "graph-based active learning", "learner", "label propagation algorithm"], "overall_score": 1.5894140675271893, "scores": [0.9128626500907772, 0.9104399094039305, 0.5408639991096834, 0.5293275046271378], "rank_score": 0.7233735158078822} -{"id": "pezzelle-etal-2018-comparatives", "title": "Comparatives, Quantifiers, Proportions: a Multi-Task Model for the Learning of Quantities from Vision", "abstract": "The present work investigates whether different quantification mechanisms (set comparison, vague quantification, and proportional estimation) can be jointly learned from visual scenes by a multi-task computational model. The motivation is that, in humans, these processes underlie the same cognitive, non-symbolic ability, which allows an automatic estimation and comparison of set magnitudes. We show that when information about lower-complexity tasks is available, the higher-level proportional task becomes more accurate than when performed in isolation. Moreover, the multi-task model is able to generalize to unseen combinations of target/non-target objects. Consistently with behavioral evidence showing the interference of absolute number in the proportional task, the multi-task model no longer works when asked to provide the number of target objects in the scene.", "phrases": ["quantifier", "multi-task model", "scene", "comparative"], "overall_score": 1.002478245370592, "scores": [0.9007038928764408, 0.8607570548052128, 0.5656645660802742, 0.565415272628518], "rank_score": 0.7231351965976114} -{"id": "bartlett-etal-2008-automatic", "title": "Automatic Syllabification with Structured SVMs for Letter-to-Phoneme Conversion", "abstract": "We present the first English syllabification system to improve the accuracy of letter-tophoneme conversion. We propose a novel discriminative approach to automatic syllabification based on structured SVMs. In comparison with a state-of-the-art syllabification system, we reduce the syllabification word error rate for English by 33%. Our approach also performs well on other languages, comparing favorably with published results on German and Dutch.", "phrases": ["structured svms", "automatic syllabification", "orthographic form", "phoneme"], "overall_score": 1.588536653974106, "scores": [0.934065008584889, 0.8158174894434769, 0.5742036724413565, 0.5678105802938258], "rank_score": 0.7229741876908871} -{"id": "yuret-yatbaz-2010-noisy", "title": "The Noisy Channel Model for Unsupervised Word Sense Disambiguation", "abstract": "Abstract We introduce a generative probabilistic model, the noisy channel model, for unsupervised word sense disambiguation. In our model, each context C is modeled as a distinct channel through which the speaker intends to transmit a particular meaning S using a possibly ambiguous word W. To reconstruct the intended meaning the hearer uses the distribution of possible meanings in the given context P(S|C) and possible words that can express each meaning P(W|S). We assume P(W|S) is independent of the context and estimate it using WordNet sense frequencies. The main problem of unsupervised WSD is estimating context-dependent P(S|C) without access to any sense-tagged text. We show one way to solve this problem using a statistical language model based on large amounts of untagged text. Our model uses coarse-grained semantic classes for S internally and we explore the effect of using different levels of granularity on WSD performance. The system outputs fine-grained senses for evaluation, and its performance on noun disambiguation is better than most previously reported unsupervised systems and close to the best supervised systems.", "phrases": ["noisy channel model", "word sense disambiguation", "substitution", "large corpora"], "overall_score": 1.1633170017371424, "scores": [0.8535118244261282, 0.9642855665916503, 0.5389574041127256, 0.5344831774552207], "rank_score": 0.7228094931464312} -{"id": "cohn-lapata-2008-sentence", "title": "Sentence Compression Beyond Word Deletion", "abstract": "In this paper we generalise the sentence compression task. Rather than simply shorten a sentence by deleting words or constituents, as in previous work, we rewrite it using additional operations such as substitution, reordering, and insertion. We present a new corpus that is suited to our task and a discriminative tree-to-tree transduction model that can naturally account for structural and lexical mismatches. The model incorporates a novel grammar extraction method, uses a language model for coherent output, and can be easily tuned to a wide range of compression specific loss functions.", "phrases": ["compression", "grammar extraction method", "translation model", "statistical method", "paraphrasing"], "overall_score": 2.2662927176979966, "scores": [1.2455651916237596, 0.610184517762401, 0.5929151415000846, 0.5842873735964984, 0.5809800005946031], "rank_score": 0.7227864450154693} -{"id": "cotterell-etal-2016-morphological", "title": "Morphological Smoothing and Extrapolation of Word Embeddings", "abstract": "Languages with rich inflectional morphology exhibit lexical data sparsity, since the word used to express a given concept will vary with the syntactic context. For instance, each count noun in Czech has 12 forms (where English uses only singular and plural). Even in large corpora, we are unlikely to observe all inflections of a given lemma. This reduces the vocabulary coverage of methods that induce continuous representations for words from distributional corpus information. We solve this problem by exploiting existing morphological resources that can enumerate a word\u2019s component morphemes. We present a latentvariable Gaussian graphical model that allows us to extrapolate continuous representations for words not observed in the training corpus, as well as smoothing the representations provided for the observed words. The latent variables represent embeddings of morphemes, which combine to create embeddings of words. Over several languages and training sizes, our model improves the embeddings for words, when evaluated on an analogy task, skip-gram predictive accuracy, and word similarity.", "phrases": ["word embedding", "morphological resource", "graphical model", "morpheme-based post-processor"], "overall_score": 1.907123839330162, "scores": [0.9959671280783015, 0.8323192884153957, 0.5408740799188032, 0.5214532358904607], "rank_score": 0.7226534330757403} -{"id": "meurers-etal-2011-evaluating", "title": "Evaluating Answers to Reading Comprehension Questions in Context: Results for German and the Role of Information Structure", "abstract": "Reading comprehension activities are an authentic task including a rich, language-based context, which makes them an interesting real-life challenge for research into automatic content analysis. For textual entailment research, content assessment of reading comprehension exercises provides an interesting opportunity for extrinsic, real-purpose evaluation, which also supports the integration of context and task information into the analysis. \n \nIn this paper, we discuss the first results for content assessment of reading comprehension activities for German and present results which are competitive with the current state of the art for English. Diving deeper into the results, we provide an analysis in terms of the different question types and the ways in which the information asked for is encoded in the text. \n \nWe then turn to analyzing the role of the question and argue that the surface-based account of information that is given in the question should be replaced with a more sophisticated, linguistically informed analysis of the information structuring of the answer in the context of the question that it is a response to.", "phrases": ["comprehension question", "content assessment", "short answer", "language learner"], "overall_score": 1.9065077228302325, "scores": [0.9446295850560914, 0.8487804493709592, 0.5543587320785298, 0.5419111224996006], "rank_score": 0.7224199722512953} -{"id": "hu-etal-2013-unsupervised", "title": "Unsupervised Induction of Contingent Event Pairs from Film Scenes", "abstract": "Human engagement in narrative is partially driven by reasoning about discourse relations between narrative events, and the expectations about what is likely to happen next that results from such reasoning. Researchers in NLP have tackled modeling such expectations from a range of perspectives, including treating it as the inference of the CONTINGENT discourse relation, or as a type of common-sense causal reasoning. Our approach is to model likelihood between events by drawing on several of these lines of previous work. We implement and evaluate different unsupervised methods for learning event pairs that are likely to be CONTINGENT on one another. We refine event pairs that we learn from a corpus of film scene descriptions utilizing web search counts, and evaluate our results by collecting human judgments of contingency. Our results indicate that the use of web search counts increases the average accuracy of our best method to 85.64% over a baseline of 50%, as compared to an average accuracy of 75.15% without web search.", "phrases": ["contingency", "event pair", "film scene description", "causal potential"], "overall_score": 1.587206119866261, "scores": [0.9354691873074976, 0.8614423872387211, 0.5690843153149796, 0.5234786522276986], "rank_score": 0.7223686355222243} -{"id": "kaji-2003-word", "title": "Word Sense Acquisition from Bilingual Comparable Corpora", "abstract": "Manually constructing an inventory of word senses has suffered from problems including high cost, arbitrary assignment of meaning to words, and mismatch to domains. To overcome these problems, we propose a method to assign word meaning from a bilingual comparable corpus and a bilingual dictionary. It clusters second-language translation equivalents of a first-language target word on the basis of their translingually aligned distribution patterns. Thus it produces a hierarchy of corpus-relevant meanings of the target word, each of which is defined with a set of translation equivalents. The effectiveness of the method has been demonstrated through an experiment using a comparable corpus consisting of Wall Street Journal and Nihon Keizai Shimbun corpora together with the EDR bilingual dictionary.", "phrases": ["comparable corpora", "clustering", "word sense acquisition"], "overall_score": 1.294234968422807, "scores": [1.0010999516584547, 0.6083070496934326, 0.5575718660410721], "rank_score": 0.7223262891309865} -{"id": "suresh-ong-2021-negatives", "title": "Not All Negatives are Equal: Label-Aware Contrastive Loss for Fine-grained Text Classification", "abstract": "Fine-grained classification involves dealing with datasets with larger number of classes with subtle differences between them. Guiding the model to focus on differentiating dimensions between these commonly confusable classes is key to improving performance on fine-grained tasks. In this work, we analyse the contrastive fine-tuning of pre-trained language models on two fine-grained text classification tasks, emotion classification and sentiment analysis. We adaptively embed class relationships into a contrastive objective function to help differently weigh the positives and negatives, and in particular, weighting closely confusable negatives more than less similar negative examples. We find that Label-aware Contrastive Loss outperforms previous contrastive methods, in the presence of larger number and/or more confusable classes, and helps models to produce output distributions that are more differentiated.", "phrases": ["negative", "contrastive loss", "fine-grained classification"], "overall_score": 1.000649464625201, "scores": [0.8122633845068968, 0.7907846920300089, 0.5623999538875548], "rank_score": 0.7218160101414869} -{"id": "baldwin-etal-2010-panlex", "title": "PanLex and LEXTRACT: Translating all Words of all Languages of the World", "abstract": "PanLex is a lemmatic translation resource which combines a large number of translation dictionaries and other translingual lexical resources. It currently covers 1353 language varieties and 12M expressions, but aims to cover all languages and up to 350M expressions. This paper describes the resource and current applications of it, as well as lextract, a new effort to expand the coverage of PanLex via semi-automatic dictionary scraping.", "phrases": ["lextract", "world", "bilingual dictionary", "database"], "overall_score": 1.730739040251304, "scores": [0.8758249104316318, 0.8595946424896865, 0.610971812428736, 0.5407055981991709], "rank_score": 0.7217742408873062} -{"id": "choe-charniak-2013-naive", "title": "Naive Bayes Word Sense Induction", "abstract": "We introduce an extended naive Bayes model for word sense induction (WSI) and apply it to a WSI task. The extended model incorporates the idea the words closer to the target word are more relevant in predicting its sense. The proposed model is very simple yet effective when evaluated on SemEval-2010 WSI data.", "phrases": ["naive bayes model", "wsi", "target word"], "overall_score": 0.7928055392990273, "scores": [0.9819191243510884, 0.612507896375752, 0.5705010821442177], "rank_score": 0.7216427009570193} -{"id": "jiang-etal-2018-chengyu", "title": "Chengyu Cloze Test", "abstract": "We present a neural recommendation model for Chengyu, which is a special type of Chinese idiom. Given a query, which is a sentence with an empty slot where the Chengyu is taken out, our model will recommend the best Chengyu candidate that best fits the slot context. The main challenge lies in that the literal meaning of a Chengyu is usually very different from it's figurative meaning. We propose a new neural approach to leverage the definition of each Chengyu and incorporate it as background knowledge. Experiments on both Chengyu cloze test and coherence checking in college entrance exams show that our system achieves 89.5% accuracy on cloze test and outperforms human subjects who attended competitive universities in China. We will make all of our data sets and resources publicly available as a new benchmark for research purposes.", "phrases": ["cloze test", "idiom", "chengyu"], "overall_score": 1.404111876956128, "scores": [0.8916804548053202, 0.6607534093790773, 0.6122784340239035], "rank_score": 0.7215707660694336} -{"id": "ye-etal-2020-coreferential", "title": "Coreferential Reasoning Learning for Language Representation", "abstract": "Language representation models such as BERT could effectively capture contextual semantic information from plain text, and have been proved to achieve promising results in lots of downstream NLP tasks with appropriate fine-tuning. However, most existing language representation models cannot explicitly handle coreference, which is essential to the coherent understanding of the whole discourse. To address this issue, we present CorefBERT, a novel language representation model that can capture the coreferential relations in context. The experimental results show that, compared with existing baseline models, CorefBERT can achieve significant improvements consistently on various downstream NLP tasks that require coreferential reasoning, while maintaining comparable performance to previous models on other common NLP tasks. The source code and experiment details of this paper can be obtained from .", "phrases": ["language representation model", "corefbert", "coreferential relation", "pre-training"], "overall_score": 1.7918663005837674, "scores": [0.9415988633584971, 0.8670413284765277, 0.5481325930033469, 0.5276274009579688], "rank_score": 0.7211000464490851} -{"id": "liu-etal-2007-forest", "title": "Forest-to-String Statistical Translation Rules", "abstract": "In this paper, we propose forest-to-string rules to enhance the expressive power of tree-to-string translation models. A forestto-string rule is capable of capturing nonsyntactic phrase pairs by describing the correspondence between multiple parse trees and one string. To integrate these rules into tree-to-string translation models, auxiliary rules are introduced to provide a generalization level. Experimental results show that, on the NIST 2005 Chinese-English test set, the tree-to-string model augmented with forest-to-string rules achieves a relative improvement of 4.3% in terms of BLEU score over the original model which allows treeto-string rules only.", "phrases": ["expressive power", "translation model", "auxiliary rule", "tree-to-string model", "forest"], "overall_score": 1.8492200834117087, "scores": [1.287466983655366, 0.5857330607797685, 0.584592276757579, 0.5795430308323342, 0.5674533312919472], "rank_score": 0.7209577366633989} -{"id": "jin-etal-2020-unsupervised", "title": "Unsupervised Morphological Paradigm Completion", "abstract": "We propose the task of unsupervised morphological paradigm completion. Given only raw text and a lemma list, the task consists of generating the morphological paradigms, i.e., all inflected forms, of the lemmas. From a natural language processing (NLP) perspective, this is a challenging unsupervised task, and high-performing systems have the potential to improve tools for low-resource languages or to assist linguistic annotators. From a cognitive science perspective, this can shed light on how children acquire morphological knowledge. We further introduce a system for the task, which generates morphological paradigms via the following steps: (i) EDIT TREE retrieval, (ii) additional lemma retrieval, (iii) paradigm size discovery, and (iv) inflection generation. We perform an evaluation on 14 typologically diverse languages. Our system outperforms trivial baselines with ease and, for some languages, even obtains a higher accuracy than minimally supervised systems.", "phrases": ["morphological paradigm completion", "annotator", "following step", "paradigm size discovery", "baseline system"], "overall_score": 1.7272704397841676, "scores": [1.0582518214848584, 0.8413375238839843, 0.5764663183112401, 0.5649120171535691, 0.5606709298643537], "rank_score": 0.7203277221396011} -{"id": "zhang-etal-2013-beyond", "title": "Beyond Left-to-Right: Multiple Decomposition Structures for SMT", "abstract": "Standard phrase-based translation models do not explicitly model context dependence between translation units. As a result, they rely on large phrase pairs and target language models to recover contextual e ects in translation. In this work, we explore n-gram models over Minimal Translation Units (MTUs) to explicitly capture contextual dependencies across phrase boundaries in the channel model. As there is no single best direction in which contextual information should flow, we explore multiple decomposition structures as well as dynamic bidirectional decomposition. The resulting models are evaluated in an intrinsic task of lexical selection for MT as well as a full MT system, through n-best reranking. These experiments demonstrate that additional contextual modeling does indeed benefit a phrase-based system and that the direction of conditioning is important. Integrating multiple conditioning orders provides consistent benefit, and the most important directions di er by language pair.", "phrases": ["multiple decomposition structure", "language model", "markov chain ordering", "tuple"], "overall_score": 1.657857321276359, "scores": [0.9776260985209936, 0.837711436249123, 0.5430105917303157, 0.5216450191524845], "rank_score": 0.7199982864132292} -{"id": "hossain-etal-2020-covidlies", "title": "COVIDLies: Detecting COVID-19 Misinformation on Social Media", "abstract": "The ongoing pandemic has heightened the need for developing tools to flag COVID-19-related misinformation on the internet, specifically on social media such as Twitter. However, due to novel language and the rapid change of information, existing misinformation detection datasets are not effective for evaluating systems designed to detect misinformation on this topic. Misinformation detection can be divided into two sub-tasks: (i) retrieval of misconceptions relevant to posts being checked for veracity, and (ii) stance detection to identify whether the posts Agree, Disagree, or express No Stance towards the retrieved misconceptions. To facilitate research on this task, we release COVIDLies ( ), a dataset of 6761 expert-annotated tweets to evaluate the performance of misinformation detection systems on 86 different pieces of COVID-19 related misinformation. We evaluate existing NLP systems on this dataset, providing initial benchmarks and identifying key challenges for future models to improve upon.", "phrases": ["covid-19 misinformation", "expert-annotated tweet", "covidlie", "social medium"], "overall_score": 1.8460163558109492, "scores": [0.9128014835419994, 0.8345316766200264, 0.5694842011816598, 0.5620174202332939], "rank_score": 0.7197086953942449} -{"id": "evang-etal-2013-elephant", "title": "Elephant: Sequence Labeling for Word and Sentence Segmentation", "abstract": "Tokenization is widely regarded as a solved problem due to the high accuracy that rulebased tokenizers achieve. But rule-based tokenizers are hard to maintain and their rules language specific. We show that highaccuracy word and sentence segmentation can be achieved by using supervised sequence labeling on the character level combined with unsupervised feature learning. We evaluated our method on three languages and obtained error rates of 0.27 \u2030 (English), 0.35 \u2030 (Dutch) and 0.76 \u2030 (Italian) for our best models.", "phrases": ["sentence segmentation", "tokenization", "dutch", "elephant"], "overall_score": 1.2892022623209085, "scores": [0.9714092007097952, 0.8014052375855685, 0.56676209351579, 0.5384933976896774], "rank_score": 0.7195174823752077} -{"id": "heilman-etal-2008-analysis", "title": "An Analysis of Statistical Models and Features for Reading Difficulty Prediction", "abstract": "A reading difficulty measure can be described as a function or model that maps a text to a numerical value corresponding to a difficulty or grade level. We describe a measure of readability that uses a combination of lexical features and grammatical features that are derived from subtrees of syntactic parses. We also tested statistical models for nominal, ordinal, and interval scales of measurement. The results indicate that a model for ordinal regression, such as the proportional odds model, using a combination of grammatical and lexical features is most effective at predicting reading difficulty.", "phrases": ["statistical model", "difficulty", "grade level", "grammatical feature"], "overall_score": 1.2891637401156635, "scores": [0.8790964409878606, 0.8712035204809891, 0.5797075050171935, 0.5479764644063654], "rank_score": 0.7194959827231022} -{"id": "purver-etal-2009-cascaded", "title": "Cascaded Lexicalised Classifiers for Second-Person Reference Resolution", "abstract": "This paper examines the resolution of the second person English pronoun you in multi-party dialogue. Following previous work, we attempt to classify instances as generic or referential, and in the latter case identify the singular or plural addressee. We show that accuracy and robustness can be improved by use of simple lexical features, capturing the intuition that different uses and addressees are associated with different vocabularies; and we show that there is an advantage to treating referentiality and addressee identification as separate (but connected) problems.", "phrases": ["second-person reference resolution", "referentiality", "second-person pronoun"], "overall_score": 1.399660251070148, "scores": [0.93342248094788, 0.6271407692201334, 0.5972859985493209], "rank_score": 0.7192830829057781} -{"id": "yamada-baldwin-2004-automatic", "title": "Automatic Discovery of Telic and Agentive Roles from Corpus Data", "abstract": "We present two methods for automatically discovering the telic and agentive roles of nouns from corpus data. These relations form part of the qualia structure assumed in generative lexicon theory, where the telic role represents a typical purpose of the entity and the agentive role represents the origin of the entity. The first discovery method uses hand-generated templates for each role type, and the second employs a supervised machine-learning technique. To evaluate the effectiveness of the two methods, we took a sample of 30 nouns, selected 50 verbs for each, and then generated a ranked list of verbs for a given noun. Using a variant of Spearman\u2019s rank correlation, we demonstrate the ability of the methods to identify qualia structure.", "phrases": ["telic", "corpus data", "noun", "qualia structure", "regular expression"], "overall_score": 1.1573574778096358, "scores": [0.8597945027934437, 0.8283376064814435, 0.848935184270845, 0.5372249096470407, 0.5212409604918655], "rank_score": 0.7191066327369275} -{"id": "king-etal-2014-iucl", "title": "The IUCL+ System: Word-Level Language Identification via Extended Markov Models", "abstract": "We describe the IUCL+ system for the shared task of the First Workshop on Computational Approaches to Code Switching (Solorio et al., 2014), in which participants were challenged to label each word in Twitter texts as a named entity or one of two candidate languages. Our system combines character n-gram probabilities, lexical probabilities, word label transition probabilities and existing named entity recognitiontools within a Markovmodel framework that weights these components and assigns a label. Our approach is language-independent, and we submitted results for all data sets (five test sets and three \u201csurprise\u201d sets, covering four language pairs), earning the highest accuracy score on the tweet level on two language pairs (Mandarin-English, Arabicdialects 1 & 2) and one of the surprise sets (Arabic-dialects).", "phrases": ["iucl+ system", "lexical probability", "markov model framework", "entity recognition tool"], "overall_score": 0.9967123802725972, "scores": [0.8229512318577681, 0.9215037568806469, 0.5679492740488051, 0.5634997536366018], "rank_score": 0.7189760041059554} -{"id": "zens-etal-2012-systematic", "title": "A Systematic Comparison of Phrase Table Pruning Techniques", "abstract": "When trained on very large parallel corpora, the phrase table component of a machine translation system grows to consume vast computational resources. In this paper, we introduce a novel pruning criterion that places phrase table pruning on a sound theoretical foundation. Systematic experiments on four language pairs under various data conditions show that our principled approach is superior to existing ad hoc pruning methods.", "phrases": ["phrase table", "pruning technique", "translation quality"], "overall_score": 1.5797499339058976, "scores": [0.8984794264177809, 0.6429676395381232, 0.6154784711975484], "rank_score": 0.7189751790511508} -{"id": "palmer-etal-2007-sequencing", "title": "A Sequencing Model for Situation Entity Classification", "abstract": "Situation entities (SEs) are the events, states, generic statements, and embedded facts and propositions introduced to a discourse by clauses of text. We report on the first datadriven models for labeling clauses according to the type of SE they introduce. SE classification is important for discourse mode identification and for tracking the temporal progression of a discourse. We show that (a) linguistically-motivated cooccurrence features and grammatical relation information from deep syntactic analysis improve classification accuracy and (b) using a sequencing model provides improvements over assigning labels based on the utterance alone. We report on genre effects which support the analysis of discourse modes having characteristic distributions and sequences of SEs.", "phrases": ["situation entity", "clause", "maximum entropy model", "brown corpus"], "overall_score": 1.3985941150676138, "scores": [0.8792401093629555, 0.8429370623518264, 0.6295003395908157, 0.5232632782197429], "rank_score": 0.7187351973813351} -{"id": "tan-etal-2019-multilingual", "title": "Multilingual Neural Machine Translation with Language Clustering", "abstract": "Multilingual neural machine translation (NMT), which translates multiple languages using a single model, is of great practical importance due to its advantages in simplifying the training process, reducing online maintenance costs, and enhancing low-resource and zero-shot translation. Given there are thousands of languages in the world and some of them are very different, it is extremely burdensome to handle them all in a single model or use a separate model for each language pair. Therefore, given a fixed resource budget, e.g., the number of models, how to determine which languages should be supported by one model is critical to multilingual NMT, which, unfortunately, has been ignored by previous work. In this work, we develop a framework that clusters languages into different groups and trains one multilingual model for each cluster. We study two methods for language clustering: (1) using prior knowledge, where we cluster languages according to language family, and (2) using language embedding, in which we represent each language by an embedding vector and cluster them in the embedding space. In particular, we obtain the embedding vectors of all the languages by training a universal neural machine translation model. Our experiments on 23 languages show that the first clustering method is simple and easy to understand but leading to suboptimal translation accuracy, while the second method sufficiently captures the relationship among languages well and improves the translation accuracy for almost all the languages over baseline methods.", "phrases": ["neural machine translation", "language clustering", "zero-shot translation", "world", "multilingual setting"], "overall_score": 2.0773695511333408, "scores": [0.9032235580640864, 1.0408923730317774, 0.5837998177423097, 0.5373566212022609, 0.5283303308201133], "rank_score": 0.7187205401721094} -{"id": "hu-etal-2019-looking", "title": "Are You Looking? Grounding to Multiple Modalities in Vision-and-Language Navigation", "abstract": "Vision-and-Language Navigation (VLN) requires grounding instructions, such as \u201cturn right and stop at the door\u201d, to routes in a visual environment. The actual grounding can connect language to the environment through multiple modalities, e.g. \u201cstop at the door\u201d might ground into visual objects, while \u201cturn right\u201d might rely only on the geometric structure of a route. We investigate where the natural language empirically grounds under two recent state-of-the-art VLN models. Surprisingly, we discover that visual features may actually hurt these models: models which only use route structure, ablating visual features, outperform their visual counterparts in unseen new environments on the benchmark Room-to-Room dataset. To better use all the available modalities, we propose to decompose the grounding procedure into a set of expert models with access to different modalities (including object detections) and ensemble them at prediction time, improving the performance of state-of-the-art models on the VLN task.", "phrases": ["grounding", "modality", "vision-and-language navigation", "visual input", "agent"], "overall_score": 1.2875346400680006, "scores": [1.0046633524231163, 0.9043327012785907, 0.5683779976399344, 0.5641968994279397, 0.551362872604351], "rank_score": 0.7185867646747864} -{"id": "zielinski-mutschke-2018-towards", "title": "Towards a Gold Standard Corpus for Variable Detection and Linking in Social Science Publications", "abstract": "In this paper, we describe our effort to create a new corpus for the evaluation of detecting and linking so-called survey variables in social science publications ( e.g. , \u201dDo you believe in Heaven?\u201d ). The task is to recognize survey variable mentions in a given text, disambiguate them, and link them to the corresponding variable within a knowledge base. Since there are generally hundreds of candidates to link to and due to the wide variety of forms they can take, this is a challenging task within NLP. The contribution of our work is the \ufb01rst gold standard corpus for the variable detection and linking task. We describe the annotation guidelines and the annotation process. The produced corpus is multilingual \u2013 German and English \u2013 and includes manually curated word and phrase alignments. Moreover, it includes text samples that could not be assigned to any variables, denoted as negative examples. Based on the new dataset, we conduct an evaluation of several state-of-the-art text classi\ufb01cation and textual similarity methods. The annotated corpus is made available along with an open-source baseline system for variable mention identi\ufb01cation and linking.", "phrases": ["variable detection", "linking", "survey variable"], "overall_score": 0.7892470395963076, "scores": [0.8170358626440551, 0.8153219598345337, 0.5228530223404028], "rank_score": 0.7184036149396639} -{"id": "morin-daille-2012-revising", "title": "Revising the Compositional Method for Terminology Acquisition from Comparable Corpora", "abstract": "In this paper, we present a new method that improves the alignment of equivalent terms monolingually acquired from bilingual comparable corpora: the Compositional Method with Context-Based Projection (CMCBP). Our overall objective is to identify and to translate high specialized terminology made up of multi-word terms acquired from comparable corpora. Our evaluation in the medical domain and for two pairs of languages demonstrates that CMCBP outperforms the state-of-art compositional approach commonly used for translationally equivalent multi-word term discovery from comparable corpora.", "phrases": ["compositional method", "comparable corpora", "multi-word term", "coverage"], "overall_score": 1.2863550030419586, "scores": [0.8647271790345437, 0.8591585442082792, 0.60557337388595, 0.5422544897315438], "rank_score": 0.7179283967150791} -{"id": "croce-etal-2020-gan", "title": "GAN-BERT: Generative Adversarial Learning for Robust Text Classification with a Bunch of Labeled Examples", "abstract": "Recent Transformer-based architectures, e.g., BERT, provide impressive results in many Natural Language Processing tasks. However, most of the adopted benchmarks are made of (sometimes hundreds of) thousands of examples. In many real scenarios, obtaining high- quality annotated data is expensive and time consuming; in contrast, unlabeled examples characterizing the target task can be, in general, easily collected. One promising method to enable semi-supervised learning has been proposed in image processing, based on Semi- Supervised Generative Adversarial Networks. In this paper, we propose GAN-BERT that ex- tends the fine-tuning of BERT-like architectures with unlabeled data in a generative adversarial setting. Experimental results show that the requirement for annotated examples can be drastically reduced (up to only 50-100 annotated examples), still obtaining good performances in several sentence classification tasks.", "phrases": ["fine-tuning", "unlabelled data", "adversarial setting", "gan-bert"], "overall_score": 1.3967315864741838, "scores": [1.0871959282414128, 0.6108133288886405, 0.603182404273747, 0.5699205266944203], "rank_score": 0.7177780470245552} -{"id": "candito-seddah-2010-parsing", "title": "Parsing Word Clusters", "abstract": "We present and discuss experiments in statistical parsing of French, where terminal forms used during training and parsing are replaced by more general symbols, particularly clusters of words obtained through un-supervised linear clustering. We build on the work of Candito and Crabbe (2009) who proposed to use clusters built over slightly coarsened French inflected forms. We investigate the alternative method of building clusters over lemma/part-of-speech pairs, using a raw corpus automatically tagged and lemmatized. We find that both methods lead to comparable improvement over the baseline (we obtain F1=86.20% and F1=86.21% respectively, compared to a baseline of F1=84.10%). Yet, when we replace gold lemma/POS pairs with their corresponding cluster, we obtain an upper bound (F1=87.80) that suggests room for improvement for this technique, should tag-ging/lemmatisation performance increase for French. \n \nWe also analyze the improvement in performance for both techniques with respect to word frequency. We find that replacing word forms with clusters improves attachment performance for words that are originally either unknown or low-frequency, since these words are replaced by cluster symbols that tend to have higher frequencies. Furthermore, clustering also helps significantly for medium to high frequency words, suggesting that training on word clusters leads to better probability estimates for these words.", "phrases": ["word cluster", "french", "terminal form"], "overall_score": 0.9950446907328566, "scores": [0.9935774577137856, 0.5804731920475874, 0.5792684114134042], "rank_score": 0.7177730203915923} -{"id": "xu-etal-2021-discriminative", "title": "Discriminative Reasoning for Document-level Relation Extraction", "abstract": "Document-level relation extraction (DocRE) models generally use graph networks to implicitly model the reasoning skill (i.e., pattern recognition, logical reasoning, coreference reasoning, etc.) related to the relation between one entity pair in a document. In this paper, we propose a novel discriminative reasoning framework to explicitly model the paths of these reasoning skills between each entity pair in this document. Thus, a discriminative reasoning network is designed to estimate the relation probability distribution of different reasoning paths based on the constructed graph and vectorized document contexts for each entity pair, thereby recognizing their relation. Experimental results show that our method outperforms the previous state-of-the-art performance on the large-scale DocRE dataset. The code is publicly available at https://github.com/xwjim/DRN.", "phrases": ["document-level relation extraction", "docre", "logical reasoning"], "overall_score": 0.7885039470392802, "scores": [1.0092910635309484, 0.5882374394945685, 0.5556531658102123], "rank_score": 0.717727222945243} -{"id": "reisinger-etal-2015-semantic", "title": "Semantic Proto-Roles", "abstract": "We present the first large-scale, corpus based verification of Dowty's seminal theory of proto-roles. Our results demonstrate both the need for and the feasibility of a property-based annotation scheme of semantic relationships, as opposed to the currently dominant notion of categorical roles.", "phrases": ["proto-role", "decompositional semantic", "protoroles"], "overall_score": 1.7209376939324867, "scores": [0.9602529675679795, 0.6145581015878745, 0.5782492168225237], "rank_score": 0.7176867619927926} -{"id": "liu-etal-2021-dexperts", "title": "DExperts: Decoding-Time Controlled Text Generation with Experts and Anti-Experts", "abstract": "Despite recent advances in natural language generation, it remains challenging to control attributes of generated text. We propose DExperts: Decoding-time Experts, a decoding-time method for controlled text generation that combines a pretrained language model with \u201cexpert\u201d LMs and/or \u201canti-expert\u201d LMs in a product of experts. Intuitively, under the ensemble, tokens only get high probability if they are considered likely by the experts, and unlikely by the anti-experts. We apply DExperts to language detoxification and sentiment-controlled generation, where we outperform existing controllable generation methods on both automatic and human evaluations. Moreover, because DExperts operates only on the output of the pretrained LM, it is effective with (anti-)experts of smaller size, including when operating on GPT-3. Our work highlights the promise of tuning small LMs on text with (un)desirable attributes for efficient decoding-time steering.", "phrases": ["expert", "language model", "dexpert"], "overall_score": 1.3963869434367946, "scores": [0.788648138903705, 0.831965617998856, 0.5321890497142335], "rank_score": 0.7176009355389316} -{"id": "rameshkumar-bailey-2020-storytelling", "title": "Storytelling with Dialogue: A Critical Role Dungeons and Dragons Dataset", "abstract": "This paper describes the Critical Role Dungeons and Dragons Dataset (CRD3) and related analyses. Critical Role is an unscripted, live-streamed show where a fixed group of people play Dungeons and Dragons, an open-ended role-playing game. The dataset is collected from 159 Critical Role episodes transcribed to text dialogues, consisting of 398,682 turns. It also includes corresponding abstractive summaries collected from the Fandom wiki. The dataset is linguistically unique in that the narratives are generated entirely through player collaboration and spoken interaction. For each dialogue, there are a large number of turns, multiple abstractive summaries with varying levels of detail, and semantic ties to the previous dialogues. In addition, we provide a data augmentation method that produces 34,243 summary-dialogue chunk pairs to support current neural ML approaches, and we provide an abstractive summarization benchmark and evaluation.", "phrases": ["critical role dungeons", "dragons dataset", "conversation"], "overall_score": 1.2856612575473725, "scores": [0.8058067502100786, 0.7898971263647657, 0.5569197533724412], "rank_score": 0.7175412099824285} -{"id": "blacoe-lapata-2012-comparison", "title": "A Comparison of Vector-based Representations for Semantic Composition", "abstract": "In this paper we address the problem of modeling compositional meaning for phrases and sentences using distributional methods. We experiment with several possible combinations of representation and composition, exhibiting varying degrees of sophistication. Some are shallow while others operate over syntactic structure, rely on parameter learning, or require access to very large corpora. We find that shallow approaches are as good as more computationally intensive alternatives with regards to two particular tests: (1) phrase similarity and (2) paraphrase detection. The sizes of the involved training corpora and the generated vectors are not as important as the fit between the meaning representation and compositional method.", "phrases": ["operation", "paraphrase detection", "composition method", "semantic similarity"], "overall_score": 2.1494632496468307, "scores": [0.8298632402181814, 0.8285538542435235, 0.6090754635000638, 0.6025412813396482], "rank_score": 0.7175084598253543} -{"id": "jin-etal-2019-unsupervised", "title": "Unsupervised Learning of PCFGs with Normalizing Flow", "abstract": "Unsupervised PCFG inducers hypothesize sets of compact context-free rules as explanations for sentences. PCFG induction not only provides tools for low-resource languages, but also plays an important role in modeling language acquisition (Bannard et al., 2009; Abend et al. 2017). However, current PCFG induction models, using word tokens as input, are unable to incorporate semantics and morphology into induction, and may encounter issues of sparse vocabulary when facing morphologically rich languages. This paper describes a neural PCFG inducer which employs context embeddings (Peters et al., 2018) in a normalizing flow model (Dinh et al., 2015) to extend PCFG induction to use semantic and morphological information. Linguistically motivated sparsity and categorical distance constraints are imposed on the inducer as regularization. Experiments show that the PCFG induction model with normalizing flow produces grammars with state-of-the-art accuracy on a variety of different languages. Ablation further shows a positive effect of normalizing flow, context embeddings and proposed regularizers.", "phrases": ["pcfgs", "normalizing flow", "induction model", "context embedding"], "overall_score": 1.6517527334340836, "scores": [0.9072064485639828, 0.8541201434157963, 0.5600358388898102, 0.5480259595265516], "rank_score": 0.7173470975990353} -{"id": "ling-etal-2013-paraphrasing", "title": "Paraphrasing 4 Microblog Normalization", "abstract": "Compared to the edited genres that have played a central role in NLP research, microblog texts use a more informal register with nonstandard lexical items, abbreviations, and free orthographic variation. When confronted with such input, conventional text analysis tools often perform poorly. Normalization \u2014 replacing orthographically or lexically idiosyncratic forms with more standard variants \u2014 can improve performance. We propose a method for learning normalization rules from machine translations of a parallel corpus of microblog messages. To validate the utility of our approach, we evaluate extrinsically, showing that normalizing English tweets and then translating improves translation quality (compared to translating unnormalized text) using three standard web translation services as well as a phrase-based translation system trained on parallel microblog data.", "phrases": ["normalization", "parallel corpus", "microblog message", "twitter"], "overall_score": 1.6499296820167646, "scores": [1.0863554201236587, 0.6166672216926395, 0.5841466141019453, 0.5790521697948275], "rank_score": 0.7165553564282677} -{"id": "biran-etal-2012-detecting", "title": "Detecting Influencers in Written Online Conversations", "abstract": "It has long been established that there is a correlation between the dialog behavior of a participant and how influential he or she is perceived to be by other discourse participants. In this paper we explore the characteristics of communication that make someone an opinion leader and develop a machine learning based approach for the automatic identification of discourse participants that are likely to be influencers in online communication. Our approach relies on identification of three types of conversational behavior: persuasion, agreement/disagreement, and dialog patterns.", "phrases": ["influencer", "participant", "disagreement", "dialog pattern"], "overall_score": 1.3937550158743541, "scores": [0.9327806111248858, 0.8389631322335993, 0.5568758357491039, 0.5363739902018353], "rank_score": 0.7162483923273562} -{"id": "amir-etal-2019-mental", "title": "Mental Health Surveillance over Social Media with Digital Cohorts", "abstract": "The ability to track mental health conditions via social media opened the doors for large-scale, automated, mental health surveillance. However, inferring accurate population-level trends requires representative samples of the underlying population, which can be challenging given the biases inherent in social media data. While previous work has adjusted samples based on demographic estimates, the populations were selected based on specific outcomes, e.g. specific mental health conditions. We depart from these methods, by conducting analyses over demographically representative digital cohorts of social media users. To validated this approach, we constructed a cohort of US based Twitter users to measure the prevalence of depression and PTSD, and investigate how these illnesses manifest across demographic subpopulations. The analysis demonstrates that cohort-based studies can help control for sampling biases, contextualize outcomes, and provide deeper insights into the data.", "phrases": ["population", "depression", "mental health surveillance", "social medium data"], "overall_score": 1.393729149116766, "scores": [0.9320441575321089, 0.8424004996054597, 0.5637090956850022, 0.526786644951466], "rank_score": 0.7162350994435092} -{"id": "mostafazadeh-etal-2020-glucose", "title": "GLUCOSE: GeneraLized and COntextualized Story Explanations", "abstract": "When humans read or listen, they make implicit commonsense inferences that frame their understanding of what happened and why. As a step toward AI systems that can build similar mental models, we introduce GLUCOSE, a large-scale dataset of implicit commonsense causal knowledge, encoded as causal mini-theories about the world, each grounded in a narrative context. To construct GLUCOSE, we drew on cognitive psychology to identify ten dimensions of causal explanation, focusing on events, states, motivations, and emotions. Each GLUCOSE entry includes a story-specific causal statement paired with an inference rule generalized from the statement. This paper details two concrete contributions. First, we present our platform for effectively crowdsourcing GLUCOSE data at scale, which uses semi-structured templates to elicit causal explanations. Using this platform, we collected a total of ~670K specific statements and general rules that capture implicit commonsense knowledge about everyday situations. Second, we show that existing knowledge resources and pretrained language models do not include or readily predict GLUCOSE's rich inferential content. However, when state-of-the-art neural models are trained on this knowledge, they can start to make commonsense inferences on unseen stories that match humans' mental models.", "phrases": ["narrative context", "dimension", "causal explanation", "implicit commonsense knowledge", "language model"], "overall_score": 1.4893107550174147, "scores": [0.9953360087532488, 0.9051593510357463, 0.5956127081798186, 0.5507429028434871, 0.5341844301980697], "rank_score": 0.7162070802020741} -{"id": "khapra-etal-2011-takes", "title": "It Takes Two to Tango: A Bilingual Unsupervised Approach for Estimating Sense Distributions using Expectation Maximization", "abstract": "Several bilingual WSD algorithms which exploit translation correspondences between parallel corpora have been proposed. However, the availability of such parallel corpora itself is a tall task for some of the resource constrained languages of the world. We propose an unsupervised bilingual EM based algorithm which relies on the counts of translations to estimate sense distributions. No parallel or sense annotated corpora are needed. The algorithm relies on a synset-aligned bilingual dictionary and in-domain corpora from the two languages. A symmetric generalized Expectation Maximization formulation is used wherein the sense distributions of words in one language are estimated based on the raw counts of the words in the aligned synset in the target language. The overall performance of our algorithm when tested on 4 language-domain pairs is better than current state-of-the-art knowledge based and bilingual unsupervised ap", "phrases": ["expectation maximization", "wsd", "parallel corpora", "other language"], "overall_score": 1.283244193335979, "scores": [0.8444201327290766, 0.8885397881661651, 0.5886498437430653, 0.543159118405666], "rank_score": 0.7161922207609932} -{"id": "lewis-xia-2008-automatically", "title": "Automatically Identifying Computationally Relevant Typological Features", "abstract": "In this paper we explore the potential for identifying computationally relevant typological features from a multilingual corpus of language data built from readily available language data collected off the Web. Our work builds on previous structural projection work, where we extend the work of projection to building individual CFGs for approximately 100 languages. We then use the CFGs to discover the values of typological parameters such as word order, the presence or absence of definite and indefinite determiners, etc. Our methods have the potential of being extended to many more languages and parameters, and can have significant effects on current research focused on tool and resource development for low-density languages and grammar induction from raw corpora.", "phrases": ["relevant typological feature", "web", "grammar induction"], "overall_score": 1.5728815403708185, "scores": [0.9697631574452703, 0.6090481131081078, 0.5687364447707657], "rank_score": 0.7158492384413813} -{"id": "loeff-etal-2006-discriminating", "title": "Discriminating Image Senses by Clustering with Multimodal Features", "abstract": "We discuss Image Sense Discrimination (ISD), and apply a method based on spectral clustering, using multimodal features from the image and text of the embedding web page. We evaluate our method on a new data set of annotated web images, retrieved with ambiguous query terms. Experiments investigate different levels of sense granularity, as well as the impact of text and image features, and global versus local text features.", "phrases": ["multimodal feature", "image sense discrimination", "sense disambiguation"], "overall_score": 1.488362632359629, "scores": [0.9347947310893046, 0.628912347478553, 0.5835463101818231], "rank_score": 0.7157511295832268} -{"id": "grunewald-friedrich-2020-robertnlp", "title": "RobertNLP at the IWPT 2020 Shared Task: Surprisingly Simple Enhanced UD Parsing for English", "abstract": "This paper presents our system at the IWPT 2020 Shared Task on Parsing into Enhanced Universal Dependencies. Using a biaffine classifier architecture (Dozat and Manning, 2017) which operates directly on finetuned RoBERTa embeddings, our parser generates enhanced UD graphs by predicting the best dependency label (or absence of a dependency) for each pair of tokens in the sentence. We address label sparsity issues by replacing lexical items in relations with placeholders at prediction time, later retrieving them from the parse in a rule-based fashion. In addition, we ensure structural graph constraints using a simple set of heuristics. On the English blind test data, our system achieves a very high parsing accuracy, ranking 1st out of 10 with an ELAS F1 score of 88.94%.", "phrases": ["iwpt", "shared task", "dependency label"], "overall_score": 0.7862519748993894, "scores": [0.7903637149622812, 0.7850065912109695, 0.5716618625254807], "rank_score": 0.7156773895662437} -{"id": "rello-etal-2012-elliphant", "title": "Elliphant: Improved Automatic Detection of Zero Subjects and Impersonal Constructions in Spanish", "abstract": "In pro-drop languages, the detection of explicit subjects, zero subjects and non-referential impersonal constructions is crucial for anaphora and co-reference resolution. While the identification of explicit and zero subjects has attracted the attention of researchers in the past, the automatic identification of impersonal constructions in Spanish has not been addressed yet and this work is the first such study. In this paper we present a corpus to underpin research on the automatic detection of these linguistic phenomena in Spanish and a novel machine learning-based methodology for their computational treatment. This study also provides an analysis of the features, discusses performance across two different genres and offers error analysis. The evaluation results show that our system performs better in detecting explicit subjects than alternative systems.", "phrases": ["automatic detection", "spanish", "pro-drop language"], "overall_score": 1.151722979622638, "scores": [0.8053665617643153, 0.793807013628127, 0.5476435911314567], "rank_score": 0.715605722174633} -{"id": "feng-lapata-2010-topic", "title": "Topic Models for Image Annotation and Text Illustration", "abstract": "Image annotation, the task of automatically generating description words for a picture, is a key component in various image search and retrieval applications. Creating image databases for model development is, however, costly and time consuming, since the keywords must be hand-coded and the process repeated for new collections. In this work we exploit the vast resource of images and documents available on the web for developing image annotation models without any human involvement. We describe a probabilistic model based on the assumption that images and their co-occurring textual data are generated by mixtures of latent topics. We show that this model outperforms previously proposed approaches when applied to image annotation and the related task of text illustration despite the noisy nature of our dataset.", "phrases": ["image annotation", "text illustration", "probabilistic model", "textual data", "latent topic"], "overall_score": 1.57224031874538, "scores": [0.9454434739900452, 0.8539302606341802, 0.6171941092283117, 0.5814042208734617, 0.5798149647898182], "rank_score": 0.7155574059031634} -{"id": "lukasik-etal-2020-text", "title": "Text Segmentation by Cross Segment Attention", "abstract": "Document and discourse segmentation are two fundamental NLP tasks pertaining to breaking up text into constituents, which are commonly used to help downstream tasks such as information retrieval or text summarization. In this work, we propose three transformer-based architectures and provide comprehensive comparisons with previously proposed approaches on three standard datasets. We establish a new state-of-the-art, reducing in particular the error rates by a large margin in all cases. We further analyze model sizes and find that we can build models with many fewer parameters while keeping good performance, thus facilitating real-world applications.", "phrases": ["segmentation", "cross segment attention", "fundamental nlp task"], "overall_score": 1.3923679806055396, "scores": [0.9640242230485486, 0.6000302096286294, 0.5825523589285335], "rank_score": 0.715535597201904} -{"id": "van-schijndel-schuler-2015-hierarchic", "title": "Hierarchic syntax improves reading time prediction", "abstract": "Previous work has debated whether humans make use of hierarchic syntax when processing language (Frank and Bod, 2011; Fossum and Levy, 2012). This paper uses an eye-tracking corpus to demonstrate that hierarchic syntax significantly improves reading time prediction over a strong n-gram baseline. This study shows that an interpolated 5-gram baseline can be made stronger by combining n-gram statistics over entire eye-tracking regions rather than simply using the last n-gram in each region, but basic hierarchic syntactic measures are still able to achieve significant improvements over this improved baseline.", "phrases": ["time prediction", "n-gram", "region", "hierarchic syntax"], "overall_score": 1.1514610918409913, "scores": [0.9149649325975532, 0.8437573401868494, 0.5661360917953708, 0.5369136440080731], "rank_score": 0.7154430021469615} -{"id": "bhattacharyya-etal-2021-energy", "title": "Energy-Based Reranking: Improving Neural Machine Translation Using Energy-Based Models", "abstract": "The discrepancy between maximum likelihood estimation (MLE) and task measures such as BLEU score has been studied before for autoregressive neural machine translation (NMT) and resulted in alternative training algorithms (Ranzato et al., 2016; Norouzi et al., 2016; Shen et al., 2016; Wu et al., 2018). However, MLE training remains the de facto approach for autoregressive NMT because of its computational efficiency and stability. Despite this mismatch between the training objective and task measure, we notice that the samples drawn from an MLE-based trained NMT support the desired distribution \u2013 there are samples with much higher BLEU score comparing to the beam decoding output. To benefit from this observation, we train an energy-based model to mimic the behavior of the task measure (i.e., the energy-based model assigns lower energy to samples with higher BLEU score), which is resulted in a re-ranking algorithm based on the samples drawn from NMT: energy-based re-ranking (EBR). We use both marginal energy models (over target sentence) and joint energy models (over both source and target sentences). Our EBR with the joint energy model consistently improves the performance of the Transformer-based NMT: +3.7 BLEU points on IWSLT'14 German-English, +3.37 BELU points on Sinhala-English, +1.4 BLEU points on WMT'16 English-German tasks.", "phrases": ["reranker", "neural machine translation", "bleu score", "low energy"], "overall_score": 1.3916786421212621, "scores": [0.9133380387388766, 0.8470985995323357, 0.5669408213037079, 0.5333479296150887], "rank_score": 0.7151813472975022} -{"id": "ruokolainen-etal-2013-supervised", "title": "Supervised Morphological Segmentation in a Low-Resource Learning Setting using Conditional Random Fields", "abstract": "We discuss data-driven morphological segmentation, in which word forms are segmented into morphs, the surface forms of morphemes. Our focus is on a lowresource learning setting, in which only a small amount of annotated word forms are available for model training, while unannotated word forms are available in abundance. The current state-of-art methods 1) exploit both the annotated and unannotated data in a semi-supervised manner, and 2) learn morph lexicons and subsequently uncover segmentations by generating the most likely morph sequences. In contrast, we discuss 1) employing only the annotated data in a supervised manner, while entirely ignoring the unannotated data, and 2) directly learning to predict morph boundaries given their local sub-string contexts instead of learning the morph lexicons. Specifically, we employ conditional random fields, a popular discriminative log-linear model for segmentation. We present experiments on two data sets comprising five diverse languages. We show that the fully supervised boundary prediction approach outperforms the state-of-art semi-supervised morph lexicon approaches on all languages when using the same annotated data sets.", "phrases": ["small amount", "discriminative log-linear model", "supervised morphological segmentation", "crf"], "overall_score": 1.8339664824019146, "scores": [0.9117287449813682, 0.840372247896165, 0.5804670774584492, 0.5274751146365944], "rank_score": 0.7150107962431442} -{"id": "wan-etal-2005-searching", "title": "Searching for Grammaticality: Propagating Dependencies in the Viterbi Algorithm", "abstract": "In many text-to-text generation scenarios (for instance, summarisation), we encounter humanauthored sentences that could be composed by recycling portions of related sentences to form new sentences. In this paper, we couch the generation of such sentences as a search problem. We investigate a statistical sentence generation method which recombines words to form new sentences. We propose an extension to the Viterbi algorithm designed to improve the grammaticality of generated sentences. Within a statistical framework, the extension favours those partially generated strings with a probable dependency tree structure. Our preliminary evaluations show that our approach generates less fragmented text than a bigram baseline.", "phrases": ["grammaticality", "viterbi algorithm", "fragmented text", "fluency"], "overall_score": 1.150015022454558, "scores": [0.9196829991543397, 0.8584508218303237, 0.5479478745706651, 0.5320963393221652], "rank_score": 0.7145445087193734} -{"id": "felice-specia-2012-linguistic", "title": "Linguistic Features for Quality Estimation", "abstract": "This paper describes a study on the contribution of linguistically-informed features to the task of quality estimation for machine translation at sentence level. A standard regression algorithm is used to build models using a combination of linguistic and non-linguistic features extracted from the input text and its machine translation. Experiments with English-Spanish translations show that linguistic features, although informative on their own, are not yet able to outperform shallower features based on statistics from the input text, its translation and additional corpora. However, further analysis suggests that linguistic information is actually useful but needs to be carefully combined with other features in order to produce better results.", "phrases": ["quality estimation", "linguistic feature", "other approach"], "overall_score": 1.7131717108331577, "scores": [0.9656295505860755, 0.6355995596265895, 0.5421151762546923], "rank_score": 0.7144480954891191} -{"id": "le-thanh-etal-2004-generating", "title": "Generating Discourse Structures for Written Text", "abstract": "This paper presents a system for automatically generating discourse structures from written text. The system is divided into two levels: sentence-level and text-level. The sentence-level discourse parser uses syntactic information and cue phrases to segment sentences into elementary discourse units and to generate discourse structures of sentences. At the text-level, constraints about textual adjacency and textual organization are integrated in a beam search in order to generate best discourse structures. The experiments were done with documents from the RST Discourse Treebank. It shows promising results in a reasonable search space compared to the discourse trees generated by human analysts.", "phrases": ["syntactic information", "cue phrase", "textual adjacency", "organization"], "overall_score": 1.6447721168840905, "scores": [1.1619803280666117, 0.5760394952005187, 0.56674920330085, 0.5524927908363827], "rank_score": 0.7143154543510908} -{"id": "salway-touileb-2014-applying", "title": "Applying Grammar Induction to Text Mining", "abstract": "We report the first steps of a novel investigation into how a grammar induction algorithm can be modified and used to identify salient information structures in a corpus. The information structures are to be used as representations of semantic content for text mining purposes. We modify the learning regime of the ADIOS algorithm (Solan et al., 2005) so that text is presented as increasingly large snippets around key terms, and instances of selected structures are substituted with common identifiers in the input for subsequent iterations. The technique is applied to 1.4m blog posts about climate change which mention diverse topics and reflect multiple perspectives and different points of view. Observation of the resulting information structures suggests that they could be useful as representations of semantic content. Preliminary analysis shows that our modifications had a beneficial effect for inducing more useful structures.", "phrases": ["grammar induction", "salient information structure", "text mining purpose"], "overall_score": 0.9898448663815337, "scores": [0.9847223138553204, 0.6233895927282207, 0.533954513383515], "rank_score": 0.7140221399890186} -{"id": "bollegala-etal-2015-unsupervised", "title": "Unsupervised Cross-Domain Word Representation Learning", "abstract": "Meaning of a word varies from one domain to another. Despite this important domain dependence in word semantics, existing word representation learning methods are bound to a single domain. Given a pair of \\emph{source}-\\emph{target} domains, we propose an unsupervised method for learning domain-specific word representations that accurately capture the domain-specific aspects of word semantics. First, we select a subset of frequent words that occur in both domains as \\emph{pivots}. Next, we optimize an objective function that enforces two constraints: (a) for both source and target domain documents, pivots that appear in a document must accurately predict the co-occurring non-pivots, and (b) word representations learnt for pivots must be similar in the two domains. Moreover, we propose a method to perform domain adaptation using the learnt word representations. Our proposed method significantly outperforms competitive baselines including the state-of-the-art domain-insensitive word representations, and reports best sentiment classification accuracies for all domain-pairs in a benchmark dataset.", "phrases": ["word representation", "non-pivot", "source domain"], "overall_score": 1.7118280696813042, "scores": [0.9414765422442412, 0.6287666715238145, 0.5714200470509814], "rank_score": 0.7138877536063456} -{"id": "jacovi-etal-2018-understanding", "title": "Understanding Convolutional Neural Networks for Text Classification", "abstract": "We present an analysis into the inner workings of Convolutional Neural Networks (CNNs) for processing text. CNNs used for computer vision can be interpreted by projecting filters into image space, but for discrete sequence inputs CNNs remain a mystery. We aim to understand the method by which the networks process and classify text. We examine common hypotheses to this problem: that filters, accompanied by global max-pooling, serve as ngram detectors. We show that filters may capture several different semantic classes of ngrams by using different activation patterns, and that global max-pooling induces behavior which separates important ngrams from the rest. Finally, we show practical use cases derived from our findings in the form of model interpretability (explaining a trained model by deriving a concrete identity for each filter, bridging the gap between visualization tools in vision tasks and NLP) and prediction interpretability (explaining predictions).", "phrases": ["convolutional neural networks", "text classification", "cnn", "filter", "important ngram"], "overall_score": 1.5679202904408247, "scores": [0.9740765456195658, 0.9433794902949659, 0.5561555949893318, 0.548592054799333, 0.5457526957552591], "rank_score": 0.7135912762916912} -{"id": "kumar-etal-2019-topics", "title": "Topics to Avoid: Demoting Latent Confounds in Text Classification", "abstract": "Despite impressive performance on many text classification tasks, deep neural networks tend to learn frequent superficial patterns that are specific to the training data and do not always generalize well. In this work, we observe this limitation with respect to the task of native language identification. We find that standard text classifiers which perform well on the test set end up learning topical features which are confounds of the prediction task (e.g., if the input text mentions Sweden, the classifier predicts that the author's native language is Swedish). We propose a method that represents the latent topical confounds and a model which \u201cunlearns\u201d confounding features by predicting both the label of the input text and the confound; but we train the two predictors adversarially in an alternating fashion to learn a text representation that predicts the correct label but is less prone to using information about the confound. We show that this model generalizes better and learns features that are indicative of the writing style rather than the content.", "phrases": ["text classification task", "language identification", "topical confound"], "overall_score": 1.278465405536252, "scores": [0.9187854516906088, 0.6411675540735862, 0.5806223797596013], "rank_score": 0.713525128507932} -{"id": "she-etal-2014-back", "title": "Back to the Blocks World: Learning New Actions through Situated Human-Robot Dialogue", "abstract": "This paper describes an approach for a robotic arm to learn new actions through dialogue in a simplified blocks world. In particular, we have developed a threetier action knowledge representation that on one hand, supports the connection between symbolic representations of language and continuous sensorimotor representations of the robot; and on the other hand, supports the application of existing planning algorithms to address novel situations. Our empirical studies have shown that, based on this representation the robot was able to learn and execute basic actions in the blocks world. When a human is engaged in a dialogue to teach the robot new actions, step-by-step instructions lead to better learning performance compared to one-shot instructions.", "phrases": ["action", "robot", "goal state"], "overall_score": 1.5676995307519697, "scores": [1.0529755474998812, 0.5639891686564068, 0.5235076965261051], "rank_score": 0.7134908042274644} -{"id": "faruqui-dyer-2015-non", "title": "Non-distributional Word Vector Representations", "abstract": "Data-driven representation learning for words is a technique of central importance in NLP. While indisputably useful as a source of features in downstream tasks, such vectors tend to consist of uninterpretable components whose relationship to the categories of traditional lexical semantic theories is tenuous at best. We present a method for constructing interpretable word vectors from hand-crafted linguistic resources like WordNet, FrameNet etc. These vectors are binary (i.e, contain only 0 and 1) and are 99.9% sparse. We analyze their performance on state-of-the-art evaluation methods for distributional models of word vectors and find they are competitive to standard distributional approaches.", "phrases": ["word vector", "hand-crafted linguistic resource", "framenet"], "overall_score": 1.482688260012284, "scores": [0.8817102362751404, 0.6443214426251543, 0.6130353210037134], "rank_score": 0.713022333301336} -{"id": "logan-iv-etal-2022-cutting", "title": "Cutting Down on Prompts and Parameters: Simple Few-Shot Learning with Language Models", "abstract": "Prompting language models (LMs) with training examples and task descriptions has been seen as critical to recent successes in few-shot learning. In this work, we show that finetuning LMs in the few-shot setting can considerably reduce the need for prompt engineering. In fact, one can use null prompts, prompts that contain neither task-specific templates nor training examples, and achieve competitive accuracy to manually-tuned prompts across a wide range of tasks. While finetuning LMs does introduce new parameters for each downstream task, we show that this memory overhead can be substantially reduced: finetuning only the bias terms can achieve comparable or better accuracy than standard finetuning while only updating 0.1% of the parameters. All in all, we recommend finetuning LMs for few-shot learning as it is more accurate, robust to different prompts, and can be made nearly as efficient as using frozen LMs.", "phrases": ["prompts", "few-shot learning", "language model", "task description", "low performance"], "overall_score": 1.5666520221349796, "scores": [0.8424288424095964, 1.0026672867563873, 0.5919291640015495, 0.5704894726878134, 0.5575555466984398], "rank_score": 0.7130140625107573} -{"id": "yanaka-etal-2021-exploring", "title": "Exploring Transitivity in Neural NLI Models through Veridicality", "abstract": "Despite the recent success of deep neural networks in natural language processing, the extent to which they can demonstrate human-like generalization capacities for natural language understanding remains unclear. We explore this issue in the domain of natural language inference (NLI), focusing on the transitivity of inference relations, a fundamental property for systematically drawing inferences. A model capturing transitivity can compose basic inference patterns and draw new inferences. We introduce an analysis method using synthetic and naturalistic NLI datasets involving clause-embedding verbs to evaluate whether models can perform transitivity inferences composed of veridical inferences and arbitrary inference types. We find that current NLI models do not perform consistently well on transitivity inference tasks, suggesting that they lack the generalization capacity for drawing composite inferences from provided training examples. The data and code for our analysis are publicly available at .", "phrases": ["transitivity", "nli", "veridicality"], "overall_score": 1.2774124475269915, "scores": [0.8202520332437708, 0.7824116917880493, 0.5361486593291342], "rank_score": 0.7129374614536514} -{"id": "corro-2020-span", "title": "Span-based discontinuous constituency parsing: a family of exact chart-based algorithms with time complexities from O(n6) down to O(n3)", "abstract": "We introduce a novel chart-based algorithm for span-based parsing of discontinuous constituency trees of block degree two, including ill-nested structures. In particular, we show that we can build variants of our parser with smaller search spaces and time complexities ranging from O(n6) down to O(n3). The cubic time variant covers 98% of constituents observed in linguistic treebanks while having the same complexity as continuous constituency parsers. We evaluate our approach on German and English treebanks (Negra, Tiger, and DPTB) and report state-of-the-art results in the fully supervised setting. We also experiment with pre-trained word embeddings and Bert-based neural networks.", "phrases": ["chart-based algorithm", "time complexity", "state-of-the-art result"], "overall_score": 0.9881093833490405, "scores": [0.8063380377710428, 0.80008485816484, 0.5318878648843857], "rank_score": 0.7127702536067563} -{"id": "jindal-etal-2020-killed", "title": "Is Killed More Significant than Fled? A Contextual Model for Salient Event Detection", "abstract": "Identifying the key events in a document is critical to holistically understanding its important information. Although measuring the salience of events is highly contextual, most previous work has used a limited representation of events that omits essential information. In this work, we propose a highly contextual model of event salience that uses a rich representation of events, incorporates document-level information and allows for interactions between latent event encodings. Our experimental results on an event salience dataset demonstrate that our model improves over previous work by an absolute 2-4% on standard metrics, establishing a new state-of-the-art performance for the task. We also propose a new evaluation metric that addresses flaws in previous evaluation methodologies. Finally, we discuss the importance of salient event detection for the downstream task of summarization.", "phrases": ["contextual model", "salient event detection", "event salience", "summarization"], "overall_score": 0.7829645751849231, "scores": [0.9107393590125364, 0.8388953742113577, 0.5746611511273518, 0.5264443932188926], "rank_score": 0.7126850693925346} -{"id": "lin-xue-2019-parsing", "title": "Parsing Meaning Representations: Is Easier Always Better?", "abstract": "The parsing accuracy varies a great deal for different meaning representations. In this paper, we compare the parsing performances between Abstract Meaning Representation (AMR) and Minimal Recursion Semantics (MRS), and provide an in-depth analysis of what factors contributed to the discrepancy in their parsing accuracy. By crystalizing the trade-off between representation expressiveness and ease of automatic parsing, we hope our results can help inform the design of the next-generation meaning representations.", "phrases": ["meaning representation", "amr", "factor", "discrepancy"], "overall_score": 1.1470135322015091, "scores": [0.9610305326009223, 0.82892972319204, 0.5325898445063261, 0.5281682115783679], "rank_score": 0.712679577969414} -{"id": "turk-etal-2019-turkish", "title": "Turkish Treebanking: Unifying and Constructing Efforts", "abstract": "In this paper, we present the current version of two different treebanks, the re-annotation of the Turkish PUD Treebank and the first annotation of the Turkish National Corpus Universal Dependency (henceforth TNC-UD). The annotation of both treebanks, the Turkish PUD Treebank and TNC-UD, was carried out based on the decisions concerning linguistic adequacy of re-annotation of the Turkish IMST-UD Treebank (T\u00fcrk et. al., forthcoming). Both of the treebanks were annotated with the same annotation process and morphological and syntactic analyses. The TNC-UD is planned to have 10,000 sentences. In this paper, we will present the first 500 sentences along with the annotation PUD Treebank. Moreover, this paper also offers the parsing results of a graph-based neural parser on the previous and re-annotated PUD, as well as the TNC-UD. In light of the comparisons, even though we observe a slight decrease in the attachment scores of the Turkish PUD treebank, we demonstrate that the annotation of the TNC-UD improves the parsing accuracy of Turkish. In addition to the treebanks, we have also constructed a custom annotation software with advanced filtering and morphological editing options. Both the treebanks, including a full edit-history and the annotation guidelines, and the custom software are publicly available under an open license online.", "phrases": ["treebank", "annotation guideline", "turkish"], "overall_score": 1.1463298016885806, "scores": [1.0034594927736613, 0.593186662309654, 0.5401181018644058], "rank_score": 0.7122547523159071} -{"id": "zhou-etal-2019-bert", "title": "BERT-based Lexical Substitution", "abstract": "Previous studies on lexical substitution tend to obtain substitute candidates by finding the target word's synonyms from lexical resources (e.g., WordNet) and then rank the candidates based on its contexts. These approaches have two limitations: (1) They are likely to overlook good substitute candidates that are not the synonyms of the target words in the lexical resources; (2) They fail to take into account the substitution's influence on the global context of the sentence. To address these issues, we propose an end-to-end BERT-based lexical substitution approach which can propose and validate substitute candidates without using any annotated data or manually curated resources. Our approach first applies dropout to the target word's embedding for partially masking the word, allowing BERT to take balanced consideration of the target word's semantics and contexts for proposing substitute candidates, and then validates the candidates based on their substitution's influence on the global contextualized representation of the sentence. Experiments show our approach performs well in both proposing and ranking substitute candidates, achieving the state-of-the-art results in both LS07 and LS14 benchmarks.", "phrases": ["influence", "state-of-the-art result", "bert-based lexical substitution"], "overall_score": 1.5644651453166405, "scores": [1.0397206468955436, 0.5521153389014731, 0.5442203301394758], "rank_score": 0.7120187719788308} -{"id": "gao-etal-2018-april", "title": "APRIL: Interactively Learning to Summarise by Combining Active Preference Learning and Reinforcement Learning", "abstract": "We propose a method to perform automatic document summarisation without using reference summaries. Instead, our method interactively learns from users' preferences. The merit of preference-based interactive summarisation is that preferences are easier for users to provide than reference summaries. Existing preference-based interactive learning methods suffer from high sample complexity, i.e. they need to interact with the oracle for many rounds in order to converge. In this work, we propose a new objective function, which enables us to leverage active learning, preference learning and reinforcement learning techniques in order to reduce the sample complexity. Both simulation and real-user experiments suggest that our method significantly advances the state of the art. Our source code is freely available at .", "phrases": ["preference", "reference summary", "active learning", "user feedback"], "overall_score": 1.564418856165094, "scores": [1.126589829800944, 0.6024063059159016, 0.5670008103809288, 0.5519938734145386], "rank_score": 0.7119977048780783} -{"id": "chen-etal-2009-global", "title": "Global Models of Document Structure using Latent Permutations", "abstract": "We present a novel Bayesian topic model for learning discourse-level document structure. Our model leverages insights from discourse theory to constrain latent topic assignments in a way that reflects the underlying organization of document topics. We propose a global model in which both topic selection and ordering are biased to be similar across a collection of related documents. We show that this space of orderings can be elegantly represented using a distribution over permutations called the generalized Mallows model. Our structure-aware approach substantially outperforms alternative approaches for cross-document comparison and single-document segmentation.", "phrases": ["document structure", "permutation", "latent topic assignment"], "overall_score": 1.6393012225308308, "scores": [0.9327756152577663, 0.6157965537771093, 0.5872462563323076], "rank_score": 0.7119394751223944} -{"id": "liu-etal-2019-incorporating-contextual", "title": "Incorporating Contextual and Syntactic Structures Improves Semantic Similarity Modeling", "abstract": "Semantic similarity modeling is central to many NLP problems such as natural language inference and question answering. Syntactic structures interact closely with semantics in learning compositional representations and alleviating long-range dependency issues. How-ever, such structure priors have not been well exploited in previous work for semantic mod-eling. To examine their effectiveness, we start with the Pairwise Word Interaction Model, one of the best models according to a recent reproducibility study, then introduce components for modeling context and structure using multi-layer BiLSTMs and TreeLSTMs. In addition, we introduce residual connections to the deep convolutional neural network component of the model. Extensive evaluations on eight benchmark datasets show that incorporating structural information contributes to consistent improvements over strong baselines.", "phrases": ["syntactic structure", "semantic similarity modeling", "connection"], "overall_score": 0.7821161046443939, "scores": [0.9951999515471701, 0.596855911570073, 0.5436824115544799], "rank_score": 0.7119127582239076} -{"id": "ai-etal-2014-sprinter", "title": "Sprinter: Language Technologies for Interactive and Multimedia Language Learning", "abstract": "Modern language learning courses are no longer exclusively based on books or face-to-face lectures. More and more lessons make use of multimedia and personalized learning methods. Many of these are based on e-learning solutions. Learning via the Internet provides 7/24 services that require sizeable human resources. Therefore we witness a growing economic pressure to employ computer-assisted methods for improving language learning in quality, efficiency and scalability. In this paper, we will address three applications of language technologies for language learning: 1) Methods and strategies for pronunciation training in second language learning, e.g., multimodal feedback via visualization of sound features, speech verification and prosody transplantation; 2) Dialogue-based language learning games; 3) Application of parsing and generation technologies to the automatic generation of paraphrases for the semi-automatic production of learning material.", "phrases": ["language technology", "interactive", "sprinter"], "overall_score": 0.9866368282821754, "scores": [0.79308621511867, 0.7822918995501693, 0.5597459743128262], "rank_score": 0.7117080296605551} -{"id": "nivre-2006-constraints", "title": "Constraints on Non-Projective Dependency Parsing", "abstract": "We investigate a series of graph-theoretic constraints on non-projective dependency parsing and their effect on expressivity, i.e. whether they allow naturally occurring syntactic constructions to be adequately represented, and efficiency, i.e. whether they reduce the search space for the parser. In particular, we define a new measure for the degree of non-projectivity in an acyclic dependency graph obeying the single-head constraint. The constraints are evaluated experimentally using data from the Prague Dependency Treebank and the Danish Dependency Treebank. The results indicate that, whereas complete linguistic coverage in principle requires unrestricted non-projective dependency graphs, limiting the degree of non-projectivity to at most 2 can reduce average running time from quadratic to linear, while excluding less than 0.5% of the dependency graphs found in the two treebanks. This is a substantial improvement over the commonly used projective approximation (degree 0), which excludes 15\u201325% of the graphs.", "phrases": ["non-projective dependency parsing", "projectivity", "constituent"], "overall_score": 1.384686746350875, "scores": [1.0236085599146105, 0.5708710974166619, 0.5402850136219621], "rank_score": 0.7115882236510781} -{"id": "baldridge-lascarides-2005-probabilistic", "title": "Probabilistic Head-Driven Parsing for Discourse Structure", "abstract": "We describe a data-driven approach to building interpretable discourse structures for appointment scheduling dialogues. We represent discourse structures as headed trees and model them with probabilistic head-driven parsing techniques. We show that dialogue-based features regarding turn-taking and domain specific goals have a large positive impact on performance. Our best model achieves an f-score of 43.2% for labelled discourse relations and 67.9% for unlabelled ones, significantly beating a right-branching baseline that uses the most frequent relations.", "phrases": ["discourse structure", "data-driven approach", "verbmobil corpus", "pcfg"], "overall_score": 1.4790275477883659, "scores": [0.9235475847488495, 0.8684627548293934, 0.5295852262963548, 0.5234520455019892], "rank_score": 0.7112619028441467} -{"id": "tien-nguyen-joty-2017-neural", "title": "A Neural Local Coherence Model", "abstract": "We propose a local coherence model based on a convolutional neural network that operates over the entity grid representation of a text. The model captures long range entity transitions along with entity-specific features without loosing generalization, thanks to the power of distributed representation. We present a pairwise ranking method to train the model in an end-to-end fashion on a task and learn task-specific high level features. Our evaluation on three different coherence assessment tasks demonstrates that our model achieves state of the art results outperforming existing models by a good margin.", "phrases": ["local coherence model", "convolutional neural network", "entity-specific feature"], "overall_score": 1.4787382589472138, "scores": [0.9601705219117415, 0.6409830835296533, 0.5322147475145306], "rank_score": 0.7111227843186417} -{"id": "finley-etal-2018-dictations", "title": "From dictations to clinical reports using machine translation", "abstract": "A typical workflow to document clinical encounters entails dictating a summary, running speech recognition, and post-processing the resulting text into a formatted letter. Post-processing entails a host of transformations including punctuation restoration, truecasing, marking sections and headers, converting dates and numerical expressions, parsing lists, etc. In conventional implementations, most of these tasks are accomplished by individual modules. We introduce a novel holistic approach to post-processing that relies on machine callytranslation. We show how this technique outperforms an alternative conventional system\u2014even learning to correct speech recognition errors during post-processing\u2014while being much simpler to maintain.", "phrases": ["dictation", "report", "conversation"], "overall_score": 1.144460560614721, "scores": [0.8199031918019573, 0.7859155992529879, 0.5274611915518675], "rank_score": 0.7110933275356043} -{"id": "rozovskaya-roth-2016-grammatical", "title": "Grammatical Error Correction: Machine Translation and Classifiers", "abstract": "We focus on two leading state-of-the-art approaches to grammatical error correction \u2013 machine learning classification and machine translation. Based on the comparative study of the two learning frameworks and through error analysis of the output of the state-of-the-art systems, we identify key strengths and weaknesses of each of these approaches and demonstrate their complementarity. In particular, the machine translation method learns from parallel data without requiring further linguistic input and is better at correcting complex mistakes. The classification approach possesses other desirable characteristics, such as the ability to easily generalize beyond what was seen in training, the ability to train without human-annotated data, and the flexibility to adjust knowledge sources for individual error types. Based on this analysis, we develop an algorithmic approach that combines the strengths of both methods. We present several systems based on resources used in previous work with a relative improvement of over 20% (and 7.4 F score points) over the previous state-of-the-art.", "phrases": ["weakness", "grammatical error correction", "machine translation approach"], "overall_score": 1.7664320054140827, "scores": [0.9690983817930525, 0.6145744622452983, 0.5489207552803028], "rank_score": 0.7108645331062178} -{"id": "pecina-2005-extensive", "title": "An Extensive Empirical Study of Collocation Extraction Methods", "abstract": "This paper presents a status quo of an ongoing research study of collocations -- an essential linguistic phenomenon having a wide spectrum of applications in the field of natural language processing. The core of the work is an empirical evaluation of a comprehensive list of automatic collocation extraction methods using precision-recall measures and a proposal of a new approach integrating multiple basic methods and statistical classification. We demonstrate that combining multiple independent techniques leads to a significant performance improvement in comparison with individual basic methods.", "phrases": ["collocation extraction method", "empirical evaluation", "list"], "overall_score": 1.766337249339196, "scores": [0.906231690739737, 0.6389384144995882, 0.58730909613014], "rank_score": 0.7108264004564884} -{"id": "wang-etal-2020-covost", "title": "CoVoST: A Diverse Multilingual Speech-To-Text Translation Corpus", "abstract": "Spoken language translation has recently witnessed a resurgence in popularity, thanks to the development of end-to-end models and the creation of new corpora, such as Augmented LibriSpeech and MuST-C. Existing datasets involve language pairs with English as a source language, involve very specific domains or are low resource. We introduce CoVoST, a multilingual speech-to-text translation corpus from 11 languages into English, diversified with over 11,000 speakers and over 60 accents. We describe the dataset creation methodology and provide empirical evidence of the quality of the data. We also provide initial benchmarks, including, to our knowledge, the first end-to-end many-to-one multilingual models for spoken language translation. CoVoST is released under CC0 license and free to use. We also provide additional evaluation data derived from Tatoeba under CC licenses.", "phrases": ["speech-to-text translation corpus", "end-to-end model", "common voice"], "overall_score": 1.2734823735477094, "scores": [1.0040751124221048, 0.5822432416587225, 0.5459137821272175], "rank_score": 0.7107440454026817} -{"id": "kim-etal-2019-unsupervised", "title": "Unsupervised Recurrent Neural Network Grammars", "abstract": "Recurrent neural network grammars (RNNG) are generative models of language which jointly model syntax and surface structure by incrementally generating a syntax tree and sentence in a top-down, left-to-right order. Supervised RNNGs achieve strong language modeling and parsing performance, but require an annotated corpus of parse trees. In this work, we experiment with unsupervised learning of RNNGs. Since directly marginalizing over the space of latent trees is intractable, we instead apply amortized variational inference. To maximize the evidence lower bound, we develop an inference network parameterized as a neural CRF constituency parser. On language modeling, unsupervised RNNGs perform as well their supervised counterparts on benchmarks in English and Chinese. On constituency grammar induction, they are competitive with recent neural language models that induce tree structures from words through attention mechanisms.", "phrases": ["neural network grammar", "language modeling", "variational inference", "chinese", "tree structure"], "overall_score": 1.8228400966713332, "scores": [1.06093411135753, 0.852764791190025, 0.5563583679858878, 0.5447944817965947, 0.5385129395860447], "rank_score": 0.7106729383832164} -{"id": "etchegoyhen-etal-2016-exploiting", "title": "Exploiting a Large Strongly Comparable Corpus", "abstract": "This article describes a large comparable corpus for Basque and Spanish and the methods employed to build a parallel resource from the original data. The EITB corpus, a strongly comparable corpus in the news domain, is to be shared with the research community, as an aid for the development and testing of methods in comparable corpora exploitation, and as basis for the improvement of data-driven machine translation systems for this language pair. Competing approaches were explored for the alignment of comparable segments in the corpus, resulting in the design of a simple method which outperformed a state-of-the-art method on the corpus test sets. The method we present is highly portable, computationally efficient, and significantly reduces deployment work, a welcome result for the exploitation of comparable corpora.", "phrases": ["comparable corpus", "news domain", "basis"], "overall_score": 0.7804620258500922, "scores": [0.9934779923533111, 0.5703675114233812, 0.5673759486875157], "rank_score": 0.7104071508214026} -{"id": "mitchell-lapata-2009-language", "title": "Language Models Based on Semantic Composition", "abstract": "In this paper we propose a novel statistical language model to capture long-range semantic dependencies. Specifically, we apply the concept of semantic composition to the problem of constructing predictive history representations for upcoming words. We also examine the influence of the underlying semantic space on the composition task by comparing spatial semantic representations against topic-based ones. The composition models yield reductions in perplexity when combined with a standard n-gram language model over the n-gram model alone. We also obtain perplexity reductions when integrating our models with a structured language model.", "phrases": ["semantic composition", "language modeling", "dimension"], "overall_score": 1.7651642321971954, "scores": [0.9691249678613663, 0.6374326556095145, 0.524505407425419], "rank_score": 0.7103543436320999} -{"id": "gkatzia-etal-2014-comparing", "title": "Comparing Multi-label Classification with Reinforcement Learning for Summarisation of Time-series Data", "abstract": "We present a novel approach for automatic report generation from time-series data, in the context of student feedback generation. Our proposed methodology treats content selection as a multi-label (ML) classification problem, which takes as input time-series data and outputs a set of templates, while capturing the dependencies between selected templates. We show that this method generates output closer to the feedback that lecturers actually generated, achieving 3.5% higher accuracy and 15% higher F-score than multiple simple classifiers that keep a history of selected templates. Furthermore, we compare a ML classifier with a Reinforcement Learning (RL) approach in simulation and using ratings from real student users. We show that the different methods have different benefits, with ML being more accurate for predicting what was seen in the training data, whereas RL is more exploratory and slightly preferred by the students.", "phrases": ["reinforcement learning", "time-series data", "student", "content selection"], "overall_score": 0.9846958206140459, "scores": [0.8970573254365413, 0.8869996112852366, 0.5334756749503274, 0.5236989426958398], "rank_score": 0.7103078885919862} -{"id": "kennington-schlangen-2015-simple", "title": "Simple Learning and Compositional Application of Perceptually Grounded Word Meanings for Incremental Reference Resolution", "abstract": "An elementary way of using language is to refer to objects. Often, these objects are physically present in the shared environment and reference is done via mention of perceivable properties of the objects. This is a type of language use that is modelled well neither by logical semantics nor by distributional semantics, the former focusing on inferential relations between expressed propositions, the latter on similarity relations between words or phrases. We present an account of word and phrase meaning that is perceptually grounded, trainable, compositional, and \u2018dialogueplausible\u2019 in that it computes meanings word-by-word. We show that the approach performs well (with an accuracy of 65% on a 1-out-of-32 reference resolution task) on direct descriptions and target/landmark descriptions, even when trained with less than 800 training examples and automatically transcribed utterances.", "phrases": ["incremental reference resolution", "object", "environment", "individual word", "low-level visual feature"], "overall_score": 1.5605839856758377, "scores": [0.8917033002945901, 1.0495410488846195, 0.5653233472174876, 0.5235495070950771, 0.5211446970277303], "rank_score": 0.7102523801039009} -{"id": "zhou-etal-2019-early", "title": "Early Rumour Detection", "abstract": "Rumours can spread quickly through social media, and malicious ones can bring about significant economical and social impact. Motivated by this, our paper focuses on the task of rumour detection; particularly, we are interested in understanding how early we can detect them. Although there are numerous studies on rumour detection, few are concerned with the timing of the detection. A successfully-detected malicious rumour can still cause significant damage if it isn't detected in a timely manner, and so timing is crucial. To address this, we present a novel methodology for early rumour detection. Our model treats social media posts (e.g. tweets) as a data stream and integrates reinforcement learning to learn the number minimum number of posts required before we classify an event as a rumour. Experiments on Twitter and Weibo demonstrate that our model identifies rumours earlier than state-of-the-art systems while maintaining a comparable accuracy.", "phrases": ["post", "reinforcement learning", "early rumour detection"], "overall_score": 0.7802140812135339, "scores": [1.0397207708399179, 0.5552537757210299, 0.5355698391005788], "rank_score": 0.7101814618871755} -{"id": "cotterell-etal-2016-morphological-segmentation", "title": "Morphological Segmentation Inside-Out", "abstract": "Morphological segmentation has traditionally been modeled with non-hierarchical models, which yield flat segmentations as output. In many cases, however, proper morphological analysis requires hierarchical structure -- especially in the case of derivational morphology. In this work, we introduce a discriminative, joint model of morphological segmentation along with the orthographic changes that occur during word formation. To the best of our knowledge, this is the first attempt to approach discriminative segmentation with a context-free model. Additionally, we release an annotated treebank of 7454 English words with constituency parses, encouraging future research in this area.", "phrases": ["segmentation", "joint model", "change"], "overall_score": 1.1429805391739074, "scores": [1.0201990848736662, 0.5595406811139902, 0.5507814495439327], "rank_score": 0.7101737385105297} -{"id": "clarke-etal-2010-semantic", "title": "Semantic Composition with Quotient Algebras", "abstract": "We describe an algebraic approach for computing with vector based semantics. The tensor product has been proposed as a method of composition, but has the undesirable property that strings of different length are incomparable. We consider how a quotient algebra of the tensor algebra can allow such comparisons to be made, offering the possibility of data-driven models of semantic composition.", "phrases": ["quotient algebra", "length", "semantic composition"], "overall_score": 0.7797789828575328, "scores": [0.9621991421351697, 0.6293684214616382, 0.5377886912914995], "rank_score": 0.7097854182961024} -{"id": "mitkov-ha-2003-computer", "title": "Computer-Aided Generation of Multiple-Choice Tests", "abstract": "Summary form only given. The paper describes a novel automatic procedure for the generation of multiple-choice tests from electronic documents. In addition to employing various NLP techniques including term extraction and shallow parsing, the system makes use of language resources such as corpora and ontologies. The system operates in a fully automatic mode and also a semiautomatic environment where the user is offered the option to post-edit the generated test items. The results from the conducted evaluation suggest that the new procedure is very effective saving time and labour considerably and that the test items produced with the help of the program are not of inferior quality to those produced manually.", "phrases": ["multiple-choice test", "nlp technique", "shallow parsing", "question generation", "activity"], "overall_score": 1.8202278626295296, "scores": [0.9902033200088678, 0.9062291369263238, 0.5789940811980147, 0.5459973619503701, 0.5268486171386775], "rank_score": 0.7096545034444507} -{"id": "daiber-etal-2015-splitting", "title": "Splitting Compounds by Semantic Analogy", "abstract": "Compounding is a highly productive word-formation process in some languages that is often problematic for natural language processing applications. In this paper, we investigate whether distributional semantics in the form of word embeddings can enable a deeper, i.e., more knowledge-rich, processing of compounds than the standard string-based methods. We present an unsupervised approach that exploits regularities in the semantic vector space (based on analogies such as \"bookshop is to shop as bookshelf is to shelf\") to produce compound analyses of high quality. A subsequent compound splitting algorithm based on these analyses is highly effective, particularly for ambiguous compounds. German to English machine translation experiments show that this semantic analogy-based compound splitter leads to better translations than a commonly used frequency-based method.", "phrases": ["semantic analogy", "shelf", "compound split"], "overall_score": 1.2711664155046667, "scores": [0.9709633727949957, 0.6098565609476777, 0.5475345200819643], "rank_score": 0.7094514846082126} -{"id": "foster-etal-2011-news", "title": "From News to Comment: Resources and Benchmarks for Parsing the Language of Web 2.0", "abstract": "We investigate the problem of parsing the noisy language of social media. We evaluate four Wall-Street-Journal-trained statistical parsers (Berkeley, Brown, Malt and MST) on a new dataset containing 1,000 phrase structure trees for sentences from microblogs (tweets) and discussion forum posts. We compare the four parsers on their ability to produce Stanford dependencies for these Web 2.0 sentences. We find that the parsers have a particular problem with tweets and that a substantial part of this problem is related to POS tagging accuracy. We attempt three retraining experiments involving Malt, Brown and an in-house Berkeley-style parser and obtain a statistically significant improvement for all three parsers.", "phrases": ["web", "twitter", "bi-lexical dependency", "part-of-speech"], "overall_score": 1.921055159065124, "scores": [0.8493293834834107, 0.8306159603511427, 0.594268531439201, 0.5633334616009176], "rank_score": 0.709386834218668} -{"id": "kudo-matsumoto-2003-fast", "title": "Fast Methods for Kernel-Based Text Analysis", "abstract": "Kernel-based learning (e.g., Support Vector Machines) has been successfully applied to many hard problems in Natural Language Processing (NLP). In NLP, although feature combinations are crucial to improving performance, they are heuristically selected. Kernel methods change this situation. The merit of the kernel methods is that effective feature combination is implicitly expanded without loss of generality and increasing the computational costs. Kernel-based text analysis shows an excellent performance in terms in accuracy; however, these methods are usually too slow to apply to large-scale text analysis. In this paper, we extend a Basket Mining algorithm to convert a kernel-based classifier into a simple and fast linear classifier. Experimental results on English BaseNP Chunking, Japanese Word Segmentation and Japanese Dependency Parsing show that our new classifiers are about 30 to 300 times faster than the standard kernel-based classifiers.", "phrases": ["kernel-based text analysis", "support vector", "pki"], "overall_score": 1.4749320859242443, "scores": [1.0242186141255207, 0.5815812134401377, 0.5220773784452635], "rank_score": 0.7092924020036406} -{"id": "cheung-etal-2013-probabilistic", "title": "Probabilistic Frame Induction", "abstract": "In natural-language discourse, related events tend to appear near each other to describe a larger scenario. Such structures can be formalized by the notion of a frame (a.k.a. template), which comprises a set of related events and prototypical participants and event transitions. Identifying frames is a prerequisite for information extraction and natural language generation, and is usually done manually. Methods for inducing frames have been proposed recently, but they typically use ad hoc procedures and are difficult to diagnose or extend. In this paper, we propose the first probabilistic approach to frame induction, which incorporates frames, events, and participants as latent topics and learns those frame and event transitions that best explain the text. The number of frame components is inferred by a novel application of a split-merge method from syntactic parsing. In end-to-end evaluations from text to induced frames and extracted facts, our method produces state-of-the-art results while substantially reducing engineering effort.", "phrases": ["frame", "participant", "schema induction"], "overall_score": 1.819263468996321, "scores": [0.9942087293196752, 0.6056236953019658, 0.5280031176716364], "rank_score": 0.7092785140977592} -{"id": "shi-etal-2014-probabilistic", "title": "A Probabilistic Co-Bootstrapping Method for Entity Set Expansion", "abstract": "Entity Set Expansion (ESE) aims at automatically acquiring instances of a specific target category. Unfortunately, traditional ESE methods usually have the expansion boundary problem and the semantic drift problem. To resolve the above two problems, this paper proposes a probabilistic Co-Bootstrapping method, which can accurately determine the expansion boundary using both the positive and the discriminant negative instances, and resolve the semantic drift problem by effectively maintaining and refining the expansion boundary during bootstrapping iterations. Experimental results show that our method can achieve a competitive performance.", "phrases": ["probabilistic co-bootstrapping method", "entity set expansion", "ese", "target category", "expansion boundary"], "overall_score": 0.9829227951259901, "scores": [0.9603609850902891, 0.9589860081726274, 0.5522529171570827, 0.5398095216868839, 0.5337351731555777], "rank_score": 0.7090289210524923} -{"id": "chang-etal-2009-discriminative", "title": "Discriminative Reordering with Chinese Grammatical Relations Features", "abstract": "The prevalence in Chinese of grammatical structures that translate into English in different word orders is an important cause of translation difficulty. While previous work has used phrase-structure parses to deal with such ordering problems, we introduce a richer set of Chinese grammatical relations that describes more semantically abstract relations between words. Using these Chinese grammatical relations, we improve a phrase orientation classifier (introduced by Zens and Ney (2006)) that decides the ordering of two phrases when translated into English by adding path features designed over the Chinese typed dependencies. We then apply the log probability of the phrase orientation classifier as an extra feature in a phrase-based MT system, and get significant BLEU point gains on three test sets: MT02 (+0.59), MT03 (+1.00) and MT05 (+0.77). Our Chinese grammatical relations are also likely to be useful for other NLP tasks.", "phrases": ["grammatical relation", "extra feature", "chinese-english task"], "overall_score": 1.8707681400626661, "scores": [0.8903968034119805, 0.6405485758677176, 0.5956868682305151], "rank_score": 0.7088774158367377} -{"id": "luo-2005-coreference", "title": "On Coreference Resolution Performance Metrics", "abstract": "The paper proposes a Constrained Entity-Alignment F-Measure (CEAF) for evaluating coreference resolution. The metric is computed by aligning reference and system entities (or coreference chains) with the constraint that a system (reference) entity is aligned with at most one reference (system) entity. We show that the best alignment is a maximum bipartite matching problem which can be solved by the Kuhn-Munkres algorithm. Comparative experiments are conducted to show that the widely-known MUC F-measure has serious flaws in evaluating a coreference system. The proposed metric is also compared with the ACE-Value, the official evaluation metric in the Automatic Content Extraction (ACE) task, and we conclude that the proposed metric possesses some properties such as symmetry and better interpretability missing in the ACE-Value.", "phrases": ["coreference resolution", "ceaf", "mention", "good alignment"], "overall_score": 1.6318236465254323, "scores": [1.0751910066531312, 0.6542397606700244, 0.5607804966913437, 0.5445567564864514], "rank_score": 0.7086920051252377} -{"id": "boujelbane-etal-2013-mapping", "title": "Mapping Rules for Building a Tunisian Dialect Lexicon and Generating Corpora", "abstract": "Nowadays in tunisia, the arabic Tunisian Dialect (TD) has become progressively used in interviews, news and debate programs instead of Modern Standard Arabic (MSA). Thus, this gave birth to a new kind of language. Indeed, the majority of speech is no longer made in MSA but alternates between MSA and TD. This situation has important negative consequences on Automatic Speech Recognition (ASR): since the spoken dialects are not officially written and do not have a standard orthography, it is very costly to obtain adequate annotated corpora to use for training language models and building vocabulary. There are neither parallel corpora involving Tunisian dialect and MSA nor dictionaries. In this paper, we describe a method for building a bilingual dictionary using explicit knowledge about the relation between TD and MSA. We also present an automatic process for creating Tunisian Dialect", "phrases": ["tunisian dialect", "arabic", "dictionary"], "overall_score": 1.3789138719720206, "scores": [1.0486766608173934, 0.5571138089479961, 0.5200741894658387], "rank_score": 0.7086215530770761} -{"id": "nguyen-etal-2017-reinforcement", "title": "Reinforcement Learning for Bandit Neural Machine Translation with Simulated Human Feedback", "abstract": "Machine translation is a natural candidate problem for reinforcement learning from human feedback: users provide quick, dirty ratings on candidate translations to guide a system to improve. Yet, current neural machine translation training focuses on expensive human-generated reference translations. We describe a reinforcement learning algorithm that improves neural machine translation systems from simulated human feedback. Our algorithm combines the advantage actor-critic algorithm (Mnih et al., 2016) with the attention-based neural encoder-decoder architecture (Luong et al., 2015). This algorithm (a) is well-designed for problems with a large action space and delayed rewards, (b) effectively optimizes traditional corpus-level machine translation metrics, and (c) is robust to skewed, high-variance, granular feedback modeled after actual human behaviors.", "phrases": ["machine translation", "simulated human feedback", "reinforcement learning", "bandit feedback"], "overall_score": 1.8172932045498027, "scores": [0.9013776544958043, 0.7880565524045839, 0.5770391478950319, 0.5675681037826629], "rank_score": 0.7085103646445208} -{"id": "ren-etal-2019-explicit", "title": "Explicit Cross-lingual Pre-training for Unsupervised Machine Translation", "abstract": "Pre-training has proven to be effective in unsupervised machine translation due to its ability to model deep context information in cross-lingual scenarios. However, the cross-lingual information obtained from shared BPE spaces is inexplicit and limited. In this paper, we propose a novel cross-lingual pre-training method for unsupervised machine translation by incorporating explicit cross-lingual training signals. Specifically, we first calculate cross-lingual n-gram embeddings and infer an n-gram translation table from them. With those n-gram translation pairs, we propose a new pre-training model called Cross-lingual Masked Language Model (CMLM), which randomly chooses source n-grams in the input text stream and predicts their translation candidates at each time step. Experiments show that our method can incorporate beneficial cross-lingual information into pre-trained models. Taking pre-trained CMLM models as the encoder and decoder, we significantly improve the performance of unsupervised machine translation.", "phrases": ["unsupervised machine translation", "cross-lingual information", "pre-trained model"], "overall_score": 1.6988318199030763, "scores": [0.93410721164438, 0.6197695080696561, 0.571526969731317], "rank_score": 0.7084678964817844} -{"id": "zhu-etal-2022-diagnosing", "title": "Diagnosing Vision-and-Language Navigation: What Really Matters", "abstract": "Vision-and-language navigation (VLN) is a multimodal task where an agent follows natural language instructions and navigates in visual environments. Multiple setups have been proposed, and researchers apply new model architectures or training techniques to boost navigation performance. However, there still exist non-negligible gaps between machines' performance and human benchmarks. Moreover, the agents' inner mechanisms for navigation decisions remain unclear. To the best of our knowledge, how the agents perceive the multimodal input is under-studied and needs investigation. In this work, we conduct a series of diagnostic experiments to unveil agents' focus during navigation. Results show that indoor navigation agents refer to both object and direction tokens when making decisions. In contrast, outdoor navigation agents heavily rely on direction tokens and poorly understand the object tokens. Transformer-based agents acquire a better cross-modal understanding of objects and display strong numerical reasoning ability than non-Transformer-based agents. When it comes to vision-and-language alignments, many models claim that they can align object tokens with specific visual targets. We find unbalanced attention on the vision and text input and doubt the reliability of such cross-modal alignments.", "phrases": ["vision-and-language navigation", "indoor navigation agent", "direction token"], "overall_score": 0.7782899458052313, "scores": [1.006251885497909, 0.5699185435844295, 0.5491196860012518], "rank_score": 0.7084300383611968} -{"id": "mills-etal-2018-automatic", "title": "Automatic Identification of Basic-Level Categories", "abstract": "Basic-level categories have been shown to be both psychologically significant and useful in a wide range of practical applications. We build a rule-based system to identify basic-level categories in WordNet, achieving 77% accuracy on a test set derived from prior psychological experiments. With additional annotations we found our system also has low precision, in part due to the existence of many categories that do not fit into the three classes (superordinate, basic-level, and subordinate) relied on in basic-level category research.", "phrases": ["basic-level category", "rule-based system", "wordnet"], "overall_score": 0.7780047292860659, "scores": [0.9737355126733408, 0.5847184934455332, 0.5660572631732378], "rank_score": 0.7081704230973705} -{"id": "flor-etal-2013-lexical", "title": "Lexical Tightness and Text Complexity", "abstract": "We present a computational notion of Lexical Tightness that measures global cohesion of content words in a text. Lexical tightness represents the degree to which a text tends to use words that are highly inter-associated in the language. We demonstrate the utility of this measure for estimating text complexity as measured by US school grade level designations of texts. Lexical tightness strongly correlates with grade level in a collection of expertly rated reading materials. Lexical tightness captures aspects of prose complexity that are not covered by classic readability indexes, especially for literary texts. We also present initial findings on the utility of this measure for automated estimation of complexity for poetry.", "phrases": ["literary text", "lexical tightness", "instruction"], "overall_score": 1.47257428244711, "scores": [0.9963983697034663, 0.5852406710949773, 0.5428365738286258], "rank_score": 0.7081585382090232} -{"id": "wambsganss-etal-2020-corpus", "title": "A Corpus for Argumentative Writing Support in German", "abstract": "In this paper, we present a novel annotation approach to capture claims and premises of arguments and their relations in student-written persuasive peer reviews on business models in German language. We propose an annotation scheme based on annotation guidelines that allows to model claims and premises as well as support and attack relations for capturing the structure of argumentative discourse in student-written peer reviews. We conduct an annotation study with three annotators on 50 persuasive essays to evaluate our annotation scheme. The obtained inter-rater agreement of \u03b1 = 0.57 for argument components and \u03b1 = 0.49 for argumentative relations indicates that the proposed annotation scheme successfully guides annotators to moderate agreement. Finally, we present our freely available corpus of 1,000 persuasive student-written peer reviews on business models and our annotation guidelines to encourage future research on the design and development of argumentative writing support systems for students.", "phrases": ["argumentative writing support", "german language", "student-written text"], "overall_score": 1.139679216995514, "scores": [0.9862504240475818, 0.5850788410567723, 0.5530382700282185], "rank_score": 0.7081225117108575} -{"id": "max-etal-2010-contrastive", "title": "Contrastive Lexical Evaluation of Machine Translation", "abstract": "This paper advocates a complementary measure of translation performance that focuses on the constrastive ability of two or more systems or system versions to adequately translate source words. This is motivated by three main reasons : 1) existing automatic metrics sometimes do not show significant differences that can be revealed by fine-grained focussed human evaluation, 2) these metrics are based on direct comparisons between system hypotheses with the corresponding reference translations, thus ignoring the input words that were actually translated, and 3) as these metrics do not take input hypotheses from several systems at once, fine-grained contrastive evaluation can only be done indirectly. This proposal is illustrated on a multi-source Machine Translation scenario where multiple translations of a source text are available. Significant gains (up to +1.3 BLEU point) are achieved on these experiments, and contrastive lexical evaluation is shown to provide new information that can help to better analyse a system's performance.", "phrases": ["more system", "reference translation", "contrastive lexical evaluation"], "overall_score": 1.139638389930184, "scores": [0.9915383868379654, 0.5966733001917565, 0.536079746256955], "rank_score": 0.7080971444288923} -{"id": "sun-lu-2020-understanding", "title": "Understanding Attention for Text Classification", "abstract": "Attention has been proven successful in many natural language processing (NLP) tasks. Recently, many researchers started to investigate the interpretability of attention on NLP tasks. Many existing approaches focused on examining whether the local attention weights could reflect the importance of input representations. In this work, we present a study on understanding the internal mechanism of attention by looking into the gradient update process, checking its behavior when approaching a local minimum during training. We propose to analyze for each word token the following two quantities: its polarity score and its attention score, where the latter is a global assessment on the token's significance. We discuss conditions under which the attention mechanism may become more (or less) interpretable, and show how the interplay between the two quantities can contribute towards model performance.", "phrases": ["text classification", "attention weight", "mechanism"], "overall_score": 1.2686928434104614, "scores": [0.9640515527831088, 0.6023571413140472, 0.5578041791135323], "rank_score": 0.7080709577368961} -{"id": "reschke-anand-2011-extracting", "title": "Extracting Contextual Evaluativity", "abstract": "Recent work on evaluativity or sentiment in the language sciences has focused on the contributions that lexical items provide. In this paper, we discuss contextual evaluativity, stance that is inferred from lexical meaning and pragmatic environments. Focusing on assessor-grounding claims like We liked him because he so clearly disliked Margaret Thatcher, we build a corpus and construct a system employing compositional principles of evaluativity calculation to derive that we dislikes Margaret Thatcher. The resulting system has an F-score of 0.90 on our dataset, outperforming reasonable baselines, and indicating the viability of inferencing in the evaluative domain.", "phrases": ["contextual evaluativity", "predicate", "opinion inference"], "overall_score": 1.4723664906973348, "scores": [1.0110349608790803, 0.5639056301480456, 0.5492352434730056], "rank_score": 0.7080586115000438} -{"id": "goyal-etal-2022-flores", "title": "The Flores-101 Evaluation Benchmark for Low-Resource and Multilingual Machine Translation", "abstract": "One of the biggest challenges hindering progress in low-resource and multilingual machine translation is the lack of good evaluation benchmarks. Current evaluation benchmarks either lack good coverage of low-resource languages, consider only restricted domains, or are low quality because they are constructed using semi-automatic procedures. In this work, we introduce the Flores-101 evaluation benchmark, consisting of 3001 sentences extracted from English Wikipedia and covering a variety of different topics and domains. These sentences have been translated in 101 languages by professional translators through a carefully controlled process. The resulting dataset enables better assessment of model quality on the long tail of low-resource languages, including the evaluation of many-to-many multilingual translation systems, as all translations are fully aligned. By publicly releasing such a high-quality and high-coverage dataset, we hope to foster progress in the machine translation community and beyond.", "phrases": ["flores-101 evaluation benchmark", "multilingual machine translation", "english wikipedia", "test set"], "overall_score": 1.8681110542884398, "scores": [0.9381678152670397, 0.8260105508516514, 0.5352018482529167, 0.5321021233929716], "rank_score": 0.7078705844411448} -{"id": "li-etal-2012-mandarin", "title": "A Mandarin-English Code-Switching Corpus", "abstract": "Generally the existing monolingual corpora are not suitable for large vocabulary continuous speech recognition (LVCSR) of code-switching speech. The motivation of this paper is to study the rules and constraints code-switching follows and design a corpus for code-switching LVCSR task. This paper presents the development of a Mandarin-English code-switching corpus. This corpus consists of four parts: 1) conversational meeting speech and its data; 2) project meeting speech data; 3) student interviews speech; 4) text data of on-line news. The speech was transcribed by an annotator and verified by Mandarin-English bilingual speakers manually. We propose an approach for automatically downloading from the web text data that contains code-switching. The corpus includes both intra-sentential code-switching (switch in the middle of a sentence) and inter-sentential code-switching (switch at the end of the sentence). The distribution of part-of-speech (POS) tags and code-switching reasons are reported.", "phrases": ["mandarin-english code-switching corpus", "other corpora", "standard"], "overall_score": 1.2678433561098998, "scores": [1.023057565198113, 0.5646733002884303, 0.5350596840554536], "rank_score": 0.7075968498473323} -{"id": "agirre-lopez-de-lacalle-2009-supervised", "title": "Supervised Domain Adaption for WSD", "abstract": "The lack of positive results on supervised domain adaptation for WSD have cast some doubts on the utility of hand-tagging general corpora and thus developing generic supervised WSD systems. In this paper we show for the first time that our WSD system trained on a general source corpus (Bnc) and the target corpus, obtains up to 22% error reduction when compared to a system trained on the target corpus alone. In addition, we show that as little as 40% of the target corpus (when supplemented with the source corpus) is sufficient to obtain the same results as training on the full target data. The key for success is the use of unlabeled data with svd, a combination of kernels and svm.", "phrases": ["wsd", "target data", "supervised domain adaptation"], "overall_score": 1.3767987900819159, "scores": [0.9177647120713948, 0.6369411335353443, 0.5678980023925855], "rank_score": 0.707534615999775} -{"id": "moore-2005-discriminative", "title": "A Discriminative Framework for Bilingual Word Alignment", "abstract": "Bilingual word alignment forms the foundation of most approaches to statistical machine translation. Current word alignment methods are predominantly based on generative models. In this paper, we demonstrate a discriminative approach to training simple word alignment models that are comparable in accuracy to the more complex generative models normally used. These models have the the advantages that they are easy to add features to and they allow fast optimization of model parameters using small amounts of annotated data.", "phrases": ["bilingual word alignment", "generative model", "annotated data", "llr", "feature function"], "overall_score": 2.3049796633094366, "scores": [1.0110340932684594, 0.8269578518250228, 0.5758934060978654, 0.5728018615666322, 0.55062304863342], "rank_score": 0.70746205227828} -{"id": "galley-mckeown-2007-lexicalized", "title": "Lexicalized Markov Grammars for Sentence Compression", "abstract": "We present a sentence compression system based on synchronous context-free grammars (SCFG), following the successful noisy-channel approach of (Knight and Marcu, 2000). We define a headdriven Markovization formulation of SCFG deletion rules, which allows us to lexicalize probabilities of constituent deletions. We also use a robust approach for tree-to-tree alignment between arbitrary document-abstract parallel corpora, which lets us train lexicalized models with much more data than previous approaches relying exclusively on scarcely available document-compression corpora. Finally, we evaluate different Markovized models, and find that our selected best model is one that exploits head-modifier bilexicalization to accurately distinguish adjuncts from complements, and that produces sentences that were judged more grammatical than those generated by previous work.", "phrases": ["synchronous context-free grammar", "markovization formulation", "deletion rule"], "overall_score": 1.814085592750678, "scores": [0.9721320600803164, 0.5765853462254776, 0.5730620208085461], "rank_score": 0.7072598090381134} -{"id": "roark-etal-2012-opengrm", "title": "The OpenGrm open-source finite-state grammar software libraries", "abstract": "In this paper, we present a new collection of open-source software libraries that provides command line binary utilities and library classes and functions for compiling regular expression and context-sensitive rewrite rules into finite-state transducers, and for n-gram language modeling. The OpenGrm libraries use the OpenFst library to provide an efficient encoding of grammars and general algorithms for building, modifying and applying models.", "phrases": ["finite-state transducer", "opengrm library", "encoding"], "overall_score": 1.2671499606133378, "scores": [0.9300964877451483, 0.6348936550994589, 0.5566394325122878], "rank_score": 0.7072098584522983} -{"id": "gan-etal-2017-learning", "title": "Learning Generic Sentence Representations Using Convolutional Neural Networks", "abstract": "We propose a new encoder-decoder approach to learn distributed sentence representations that are applicable to multiple purposes. The model is learned by using a convolutional neural network as an encoder to map an input sentence into a continuous vector, and using a long short-term memory recurrent neural network as a decoder. Several tasks are considered, including sentence reconstruction and future sentence prediction. Further, a hierarchical encoder-decoder model is proposed to encode a sentence to predict multiple future sentences. By training our models on a large collection of novels, we obtain a highly generic convolutional sentence encoder that performs well in practice. Experimental results on several benchmark datasets, and across a broad range of applications, demonstrate the superiority of the proposed model over competing methods.", "phrases": ["sentence representation", "convolutional neural network", "encoder-decoder model"], "overall_score": 1.6958090892370496, "scores": [0.9707725800733213, 0.6227457017030932, 0.528103677874085], "rank_score": 0.7072073198834999} -{"id": "yu-etal-2020-wasserstein", "title": "Wasserstein Distance Regularized Sequence Representation for Text Matching in Asymmetrical Domains", "abstract": "One approach to matching texts from asymmetrical domains is projecting the input sequences into a common semantic space as feature vectors upon which the matching function can be readily defined and learned. In real-world matching practices, it is often observed that with the training goes on, the feature vectors projected from different domains tend to be indistinguishable. The phenomenon, however, is often overlooked in existing matching models. As a result, the feature vectors are constructed without any regularization, which inevitably increases the difficulty of learning the downstream matching functions. In this paper, we propose a novel match method tailored for text matching in asymmetrical domains, called WD-Match. In WD-Match, a Wasserstein distance-based regularizer is defined to regularize the features vectors projected from different domains. As a result, the method enforces the feature projection function to generate vectors such that those correspond to different domains cannot be easily discriminated. The training process of WD-Match amounts to a game that minimizes the matching loss regularized by the Wasserstein distance. WD-Match can be used to improve different text matching methods, by using the method as its underlying matching model. Four popular text matching methods have been exploited in the paper. Experimental results based on four publicly available benchmarks showed that WD-Match consistently outperformed the underlying methods and the baselines.", "phrases": ["regularizer", "text matching", "asymmetrical domain", "wasserstein distance"], "overall_score": 0.7766648743244785, "scores": [0.9187814277747868, 0.8046248166555807, 0.5727685205299123, 0.5316285732530927], "rank_score": 0.7069508345533431} -{"id": "wang-lee-2018-learning", "title": "Learning to Encode Text as Human-Readable Summaries using Generative Adversarial Networks", "abstract": "Auto-encoders compress input data into a latent-space representation and reconstruct the original data from the representation. This latent representation is not easily interpreted by humans. In this paper, we propose training an auto-encoder that encodes input text into human-readable sentences, and unpaired abstractive summarization is thereby achieved. The auto-encoder is composed of a generator and a reconstructor. The generator encodes the input text into a shorter word sequence, and the reconstructor recovers the generator input from the generator output. To make the generator output human-readable, a discriminator restricts the output of the generator to resemble human-written sentences. By taking the generator output as the summary of the input text, abstractive summarization is achieved without document-summary pairs as training data. Promising results are shown on both English and Chinese corpora.", "phrases": ["generative adversarial network", "summarization", "discriminator", "gan"], "overall_score": 1.6948621103552877, "scores": [1.1642120704724725, 0.6043090586372523, 0.5308792055653311, 0.527849261388186], "rank_score": 0.7068123990158105} -{"id": "ellison-kirby-2006-measuring", "title": "Measuring Language Divergence by Intra-Lexical Comparison", "abstract": "This paper presents a method for building genetic language taxonomies based on a new approach to comparing lexical forms. Instead of comparing forms cross-linguistically, a matrix of language-internal similarities between forms is calculated. These matrices are then compared to give distances between languages. We argue that this coheres better with current thinking in linguistics and psycholinguistics. An implementation of this approach, called PHILOLOGICON, is described, along with its application to Dyen et al.'s (1992) ninety-five wordlists from Indo-European languages.", "phrases": ["intra-lexical comparison", "distance", "philologicon"], "overall_score": 1.2660074207587215, "scores": [1.0102557845692701, 0.5702838489706455, 0.5391769509146203], "rank_score": 0.7065721948181786} -{"id": "michel-neubig-2018-extreme", "title": "Extreme Adaptation for Personalized Neural Machine Translation", "abstract": "Every person speaks or writes their own flavor of their native language, influenced by a number of factors: the content they tend to talk about, their gender, their social status, or their geographical origin. When attempting to perform Machine Translation (MT), these variations have a significant effect on how the system should perform translation, but this is not captured well by standard one-size-fits-all models. In this paper, we propose a simple and parameter-efficient adaptation technique that only requires adapting the bias of the output softmax to each particular user of the MT system, either directly or through a factored approximation. Experiments on TED talks in three languages demonstrate improvements in translation accuracy, and better reflection of speaker traits in the target text.", "phrases": ["adaptation", "output softmax", "ted talk", "output vocabulary", "speaker-specific data"], "overall_score": 2.0421225521242357, "scores": [0.9267026661661038, 0.9197930947881301, 0.570535166847184, 0.5589873934265659, 0.5566112558245022], "rank_score": 0.7065259154104971} -{"id": "gorman-etal-2020-sigmorphon", "title": "The SIGMORPHON 2020 Shared Task on Multilingual Grapheme-to-Phoneme Conversion", "abstract": "We describe the design and findings of the SIGMORPHON 2020 shared task on multilingual grapheme-to-phoneme conversion. Participants were asked to submit systems which take in a sequence of graphemes in a given language as input, then output a sequence of phonemes representing the pronunciation of that grapheme sequence. Nine teams submitted a total of 23 systems, at best achieving a 18% relative reduction in word error rate (macro-averaged over languages), versus strong neural sequence-to-sequence baselines. To facilitate error analysis, we publicly release the complete outputs for all systems\u2014a first for the SIGMORPHON workshop.", "phrases": ["multilingual grapheme-to-phoneme conversion", "grapheme", "phoneme"], "overall_score": 1.2658459573467795, "scores": [1.0035687996525866, 0.5591070410626756, 0.5567704004012614], "rank_score": 0.7064820803721745} -{"id": "mielke-etal-2019-kind", "title": "What Kind of Language Is Hard to Language-Model?", "abstract": "How language-agnostic are current state-of-the-art NLP tools? Are there some types of language that are easier to model with current methods? In prior work (Cotterell et al., 2018) we attempted to address this question for language modeling, and observed that recurrent neural network language models do not perform equally well over all the high-resource European languages found in the Europarl corpus. We speculated that inflectional morphology may be the primary culprit for the discrepancy. In this paper, we extend these earlier experiments to cover 69 languages from 13 language families using a multilingual Bible corpus. Methodologically, we introduce a new paired-sample multiplicative mixed-effects model to obtain language difficulty coefficients from at-least-pairwise parallel corpora. In other words, the model is aware of inter-sentence variation and can handle missing data. Exploiting this model, we show that \u201ctranslationese\u201d is not any easier to model than natively written language in a fair comparison. Trying to answer the question of what features difficult languages have in common, we try and fail to reproduce our earlier (Cotterell et al., 2018) observation about morphological complexity and instead reveal far simpler statistics of the data that seem to drive complexity in a much larger sample.", "phrases": ["language modeling", "large scale analysis", "morphological typology effect"], "overall_score": 1.4690014742847834, "scores": [0.9955517077630769, 0.5767691970475385, 0.5470002371986182], "rank_score": 0.7064403806697445} -{"id": "bloom-etal-2007-extracting", "title": "Extracting Appraisal Expressions", "abstract": "Sentiment analysis seeks to characterize opinionated or evaluative aspects of natural language text. We suggest here that appraisal expression extraction should be viewed as a fundamental task in sentiment analysis. An appraisal expression is a textual unit expressing an evaluative stance towards some target. The task is to find and characterize the evaluative attributes of such elements. This paper describes a system for effectively extracting and disambiguating adjectival appraisal expressions in English outputting a generic representation in terms of their evaluative function in the text. Data mining on appraisal expressions gives meaningful and non-obvious insights.", "phrases": ["appraisal expression", "opinion", "subjectivity analysis"], "overall_score": 1.468649621500684, "scores": [0.9747439860758337, 0.5746095076380089, 0.5694600320286473], "rank_score": 0.7062711752474966} -{"id": "ji-bilmes-2004-multi", "title": "Multi-Speaker Language Modeling", "abstract": "In conventional language modeling, the words from only one speaker at a time are represented, even for conversational tasks such as meetings and telephone calls. In a conversational or meeting setting, however, speakers can have significant influence on each other. To recover such un-modeled inter-speaker information, we introduce an approach for conversational language modeling that considers words from other speakers when predicting words from the current one. By augmenting a normal trigram context, our new multi-speaker language model (MSLM) improves on both Switchboard and ICSI Meeting Recorder corpora. Using an MSLM and a conditional mutual information based word clustering algorithm, we achieve a 8.9% perplexity reduction on Switchboard and a 12.2% reduction on the ICSI Meeting Recorder data.", "phrases": ["language modeling", "mslm", "multi-speaker language model"], "overall_score": 0.9790552931385162, "scores": [0.8886225389239316, 0.6881545772138117, 0.5419402081127985], "rank_score": 0.7062391080835139} -{"id": "xu-etal-2019-lexical", "title": "Lexical Micro-adaptation for Neural Machine Translation", "abstract": "This work is inspired by a typical machine translation industry scenario in which translators make use of in-domain data for facilitating translation of similar or repeating sentences. We introduce a generic framework applied at inference in which a subset of segment pairs are first extracted from training data according to their similarity to the input sentences. These segments are then used to dynamically update the parameters of a generic NMT network, thus performing a lexical micro-adaptation. Our approach demonstrates strong adaptation performance to new and existing datasets including pseudo in-domain data. We evaluate our approach on a heterogeneous English-French training dataset showing accuracy gains on all evaluated domains when compared to strong adaptation baselines.", "phrases": ["input sentence", "lexical micro-adaptation", "adaptation time"], "overall_score": 0.9787232539881964, "scores": [0.957622248220851, 0.582240143049362, 0.5781363861270074], "rank_score": 0.7059995924657402} -{"id": "shen-etal-2020-blank", "title": "Blank Language Models", "abstract": "We propose Blank Language Model (BLM), a model that generates sequences by dynamically creating and filling in blanks. The blanks control which part of the sequence to expand, making BLM ideal for a variety of text editing and rewriting tasks. The model can start from a single blank or partially completed text with blanks at specified locations. It iteratively determines which word to place in a blank and whether to insert new blanks, and stops generating when no blanks are left to fill. BLM can be efficiently trained using a lower bound of the marginal data likelihood. On the task of filling missing text snippets, BLM significantly outperforms all other baselines in terms of both accuracy and fluency. Experiments on style transfer and damaged ancient text restoration demonstrate the potential of this framework for a wide range of applications.", "phrases": ["blm", "blank language models", "length"], "overall_score": 1.6248993038210096, "scores": [1.0397208947842924, 0.5504827436015732, 0.5268507655078342], "rank_score": 0.7056848012978999} -{"id": "etchegoyhen-azpeitia-2016-set", "title": "Set-Theoretic Alignment for Comparable Corpora", "abstract": "We describe and evaluate a simple method to extract parallel sentences from comparable corpora. The approach, termed STACC, is based on expanded lexical sets and the Jaccard similarity coefficient. We evaluate our system against state-of-theart methods on a large range of datasets in different domains, for ten language pairs, showing that it either matches or outperforms current methods across the board and gives significantly better results on the noisiest datasets. STACC is a portable method, requiring no particular adaptation for new domains or language pairs, thus enabling the efficient mining of parallel sentences in comparable corpora.", "phrases": ["comparable corpora", "ibm alignment", "set expansion operation"], "overall_score": 0.978274136290881, "scores": [0.9263692404047766, 0.6195188925445728, 0.5711387346358326], "rank_score": 0.705675622528394} -{"id": "thater-etal-2011-word", "title": "Word Meaning in Context: A Simple and Effective Vector Model", "abstract": "We present a model that represents word meaning in context by vectors which are modified according to the words in the target\u2019s syntactic context. Contextualization of a vector is realized by reweighting its components, based on distributional information about the context words. Evaluation on a paraphrase ranking task derived from the SemEval 2007 Lexical Substitution Task shows that our model outperforms all previous models on this task. We show that our model supports a wider range of applications by evaluating it on a word sense disambiguation task. Results show that our model achieves state-of-the-art performance.", "phrases": ["distributional information", "target word", "vector representation", "pado\u0301", "group"], "overall_score": 2.1809703766678727, "scores": [1.2515265028855918, 0.6212362244817867, 0.5819415610513768, 0.540640816606139, 0.5325429935327942], "rank_score": 0.7055776197115378} -{"id": "ayan-dorr-2006-going", "title": "Going Beyond AER: An Extensive Analysis of Word Alignments and Their Impact on MT", "abstract": "This paper presents an extensive evaluation of five different alignments and investigates their impact on the corresponding MT system output. We introduce new measures for intrinsic evaluations and examine the distribution of phrases and untranslated words during decoding to identify which characteristics of different alignments affect translation. We show that precision-oriented alignments yield better MT output (translating more words and using longer phrases) than recall-oriented alignments.", "phrases": ["aer", "recall-oriented alignment", "translation performance", "alignment error rate", "direct correlation"], "overall_score": 1.9105622777583793, "scores": [0.8525128947404659, 0.9304464256651609, 0.5860547377461877, 0.5849694394889483, 0.5735771749434396], "rank_score": 0.7055121345168405} -{"id": "rodriguez-luna-etal-2020-internal", "title": "Internal and external pressures on language emergence: least effort, object constancy and frequency", "abstract": "In previous work, artificial agents were shown to achieve almost perfect accuracy in referential games where they have to communicate to identify images. Nevertheless, the resulting communication protocols rarely display salient features of natural languages, such as compositionality. In this paper, we propose some realistic sources of pressure on communication that avert this outcome. More specifically, we formalise the principle of least effort through an auxiliary objective. Moreover, we explore several game variants, inspired by the principle of object constancy, in which we alter the frequency, position, and luminosity of the objects in the images. We perform an extensive analysis on their effect through compositionality metrics, diagnostic classifiers, and zero-shot evaluation. Our findings reveal that the proposed sources of pressure result in emerging languages with less redundancy, more focus on high-level conceptual information, and better abilities of generalisation. Overall, our contributions reduce the gap between emergent and natural languages.", "phrases": ["least effort", "object constancy", "compositionality"], "overall_score": 0.9778092012961853, "scores": [0.7843291732325605, 0.7816939299157655, 0.5499976253200816], "rank_score": 0.7053402428228024} -{"id": "le-etal-2020-flaubert-unsupervised", "title": "FlauBERT: Unsupervised Language Model Pre-training for French", "abstract": "Language models have become a key step to achieve state-of-the art results in many different Natural Language Processing (NLP) tasks. Leveraging the huge amount of unlabeled texts nowadays available, they provide an efficient way to pre-train continuous word representations that can be fine-tuned for a downstream task, along with their contextualization at the sentence level. This has been widely demonstrated for English using contextualized representations (Dai and Le, 2015; Peters et al., 2018; Howard and Ruder, 2018; Radford et al., 2018; Devlin et al., 2019; Yang et al., 2019b). In this paper, we introduce and share FlauBERT, a model learned on a very large and heterogeneous French corpus. Models of different sizes are trained using the new CNRS (French National Centre for Scientific Research) Jean Zay supercomputer. We apply our French language models to diverse NLP tasks (text classification, paraphrasing, natural language inference, parsing, word sense disambiguation) and show that most of the time they outperform other pre-training approaches. Different versions of FlauBERT as well as a unified evaluation protocol for the downstream tasks, called FLUE (French Language Understanding Evaluation), are shared to the research community for further reproducible experiments in French NLP.", "phrases": ["french language", "text classification", "flaubert", "pos tagging"], "overall_score": 1.4666172786401253, "scores": [0.8037761695239063, 0.8907650109202573, 0.5799948493918975, 0.5466392698655069], "rank_score": 0.7052938249253919} -{"id": "shavarani-sarkar-2021-better", "title": "Better Neural Machine Translation by Extracting Linguistic Information from BERT", "abstract": "Adding linguistic information (syntax or semantics) to neural machine translation (NMT) have mostly focused on using point estimates from pre-trained models. Directly using the capacity of massive pre-trained contextual word embedding models such as BERT(Devlin et al., 2019) has been marginally useful in NMT because effective fine-tuning is difficult to obtain for NMT without making training brittle and unreliable. We augment NMT by extracting dense fine-tuned vector-based linguistic information from BERT instead of using point estimates. Experimental results show that our method of incorporating linguistic information helps NMT to generalize better in a variety of training contexts and is no more difficult to train than conventional Transformer-based NMT.", "phrases": ["neural machine translation", "linguistic information", "bert", "pre-trained model"], "overall_score": 0.9775969302061598, "scores": [0.8434042846363056, 0.7798112148682799, 0.6461460690758271, 0.5513869178129891], "rank_score": 0.7051871215983504} -{"id": "hassan-radev-2010-identifying", "title": "Identifying Text Polarity Using Random Walks", "abstract": "Automatically identifying the polarity of words is a very important task in Natural Language Processing. It has applications in text classification, text filtering, analysis of product review, analysis of responses to surveys, and mining online discussions. We propose a method for identifying the polarity of words. We apply a Markov random walk model to a large word related-ness graph, producing a polarity estimate for any given word. A key advantage of the model is its ability to accurately and quickly assign a polarity sign and magnitude to any word. The method could be used both in a semi-supervised setting where a training set of labeled words is used, and in an unsupervised setting where a handful of seeds is used to define the two polarity classes. The method is experimentally tested using a manually labeled set of positive and negative words. It outperforms the state of the art methods in the semi-supervised setting. The results in the unsupervised setting is comparable to the best reported values. However, the proposed method is faster and does not need a large corpus.", "phrases": ["polarity", "random walk model", "word relatedness graph", "previous research"], "overall_score": 1.9096552606667385, "scores": [1.1269757925828972, 0.6005677563805021, 0.566413520620121, 0.5267517339526702], "rank_score": 0.7051772008840476} -{"id": "shapira-etal-2019-crowdsourcing", "title": "Crowdsourcing Lightweight Pyramids for Manual Summary Evaluation", "abstract": "Conducting a manual evaluation is considered an essential part of summary evaluation methodology. Traditionally, the Pyramid protocol, which exhaustively compares system summaries to references, has been perceived as very reliable, providing objective scores. Yet, due to the high cost of the Pyramid method and the required expertise, researchers resorted to cheaper and less thorough manual evaluation methods, such as Responsiveness and pairwise comparison, attainable via crowdsourcing. We revisit the Pyramid approach, proposing a lightweight sampling-based version that is crowdsourcable. We analyze the performance of our method in comparison to original expert-based Pyramid evaluations, showing higher correlation relative to the common Responsiveness method. We release our crowdsourced Summary-Content-Units, along with all crowdsourcing scripts, for future evaluations.", "phrases": ["manual evaluation", "pyramid method", "crowdsourcing"], "overall_score": 1.3718775824507992, "scores": [0.900510297422884, 0.6222180291863829, 0.5922885200577933], "rank_score": 0.7050056155556867} -{"id": "li-etal-2014-soft", "title": "Soft Cross-lingual Syntax Projection for Dependency Parsing", "abstract": "This paper proposes a simple yet effective framework of soft cross-lingual syntax projection to transfer syntactic structures from source language to target language using monolingual treebanks and large-scale bilingual parallel text. Here, soft means that we only project reliable dependencies to compose high-quality target structures. The projected instances are then used as additional training data to improve the performance of supervised parsers. The major issues for this idea are 1) errors from the source-language parser and unsupervised word aligner; 2) intrinsic syntactic non-isomorphism between languages; 3) incomplete parse trees after projection. To handle the first two issues, we propose to use a probabilistic dependency parser trained on the target-language treebank, and prune out unlikely projected dependencies that have low marginal probabilities. To make use of the incomplete projected syntactic structures, we adopt a new learning technique based on ambiguous labelings. For a word that has no head words after projection, we enrich the projected structure with all other words as its candidate heads as long as the newly-added dependency does not cross any projected dependencies. In this way, the syntactic structure of a sentence becomes a parse forest (ambiguous labels) instead of a single parse tree. During training, the objective is to maximize the mixed likelihood of manually labeled instances and projected instances with ambiguous labelings. Experimental results on benchmark data show that our method significantly outperforms a strong baseline supervised parser and previous syntax projection methods.", "phrases": ["cross-lingual syntax projection", "dependency parser", "ambiguous labeling"], "overall_score": 0.9773342528163045, "scores": [0.9854270615682896, 0.6058843090065859, 0.5236815491686291], "rank_score": 0.7049976399145015} -{"id": "ye-etal-2021-one2set", "title": "One2Set: Generating Diverse Keyphrases as a Set", "abstract": "Recently, the sequence-to-sequence models have made remarkable progress on the task of keyphrase generation (KG) by concatenating multiple keyphrases in a predefined order as a target sequence during training. However, the keyphrases are inherently an unordered set rather than an ordered sequence. Imposing a predefined order will introduce wrong bias during training, which can highly penalize shifts in the order between keyphrases. In this work, we propose a new training paradigm One2Set without predefining an order to concatenate the keyphrases. To fit this paradigm, we propose a novel model that utilizes a fixed set of learned control codes as conditions to generate a set of keyphrases in parallel. To solve the problem that there is no correspondence between each prediction and target during training, we propose a K-step label assignment mechanism via bipartite matching, which greatly increases the diversity and reduces the repetition rate of generated keyphrases. The experimental results on multiple benchmarks demonstrate that our approach significantly outperforms the state-of-the-art methods.", "phrases": ["keyphrase", "ordered sequence", "training paradigm one2set"], "overall_score": 1.4659316461869338, "scores": [0.9742015002161994, 0.585589739768144, 0.5551010762517408], "rank_score": 0.704964105412028} -{"id": "sachan-etal-2015-learning", "title": "Learning Answer-Entailing Structures for Machine Comprehension", "abstract": "Understanding open-domain text is one of the primary challenges in NLP. Machine comprehension evaluates the system\u2019s ability to understand text through a series of question-answering tasks on short pieces of text such that the correct answer can be found only in the given text. For this task, we posit that there is a hidden (latent) structure that explains the relation between the question, correct answer, and text. We call this the answer-entailing structure; given the structure, the correctness of the answer is evident. Since the structure is latent, it must be inferred. We present a unified max-margin framework that learns to find these hidden structures (given a corpus of question-answer pairs), and uses what it learns to answer machine comprehension questions on novel texts. We extend this framework to incorporate multi-task learning on the different subtasks that are required to perform machine comprehension. Evaluation on a publicly available dataset shows that our framework outperforms various IR and neuralnetwork baselines, achieving an overall accuracy of 67.8% (vs. 59.9%, the best previously-published result.)", "phrases": ["answer-entailing structure", "machine comprehension", "correct answer"], "overall_score": 1.5488885999643693, "scores": [0.9099608024245983, 0.6432659736473594, 0.5615619659720809], "rank_score": 0.7049295806813461} -{"id": "leusch-etal-2010-multi", "title": "Multi-pivot translation by system combination", "abstract": "This paper describes a technique to exploit multiple pivot languages when using machine translation (MT) on language pairs with scarce bilingual resources, or where no translation system for a language pair is available. The principal idea is to generate intermediate translations in several pivot languages, translate them separately into the target language, and generate a consensus translation out of these using MT system combination techniques. Our technique can also be applied when a translation system for a language pair is available, but is limited in its translation accuracy because of scarce resources. Using statistical MT systems for the 11 different languages of Europarl, we show experimentally that a direct translation system can be replaced by this pivot approach without a loss in translation quality if about six pivot languages are available. Furthermore, we can already improve an existing MT system by adding two pivot systems to it. The maximum improvement was found to be 1.4% abs. in BLEU in our experiments for 8 or more pivot languages.", "phrases": ["intermediate translation", "different language", "individual system"], "overall_score": 1.2627647639354531, "scores": [0.9400284645565243, 0.6125773052254289, 0.5616815309786075], "rank_score": 0.7047624335868535} -{"id": "schwenk-etal-2021-ccmatrix", "title": "CCMatrix: Mining Billions of High-Quality Parallel Sentences on the Web", "abstract": "We show that margin-based bitext mining in a multilingual sentence space can be successfully scaled to operate on monolingual corpora of billions of sentences. We use 32 snapshots of a curated common crawl corpus (Wenzel et al, 2019) totaling 71 billion unique sentences. Using one unified approach for 90 languages, we were able to mine 10.8 billion parallel sentences, out of which only 2.9 billions are aligned with English. We illustrate the capability of our scalable mining system to create high quality training sets from one language to any other by training hundreds of different machine translation models and evaluating them on the many-to-many TED benchmark. Further, we evaluate on competitive translation benchmarks such as WMT and WAT. Using only mined bitext, we set a new state of the art for a single system on the WMT'19 test set for English-German/Russian/Chinese. In particular, our English/German and English/Russian systems outperform the best single ones by over 4 BLEU points and are on par with best WMT'19 systems, which train on the WMT training data and augment it with backtranslation. We also achieve excellent results for distant languages pairs like Russian/Japanese, outperforming the best submission at the 2020 WAT workshop. All of the mined bitext will be freely available.", "phrases": ["web", "hundred", "sentence embedding", "train model"], "overall_score": 1.4654970165157688, "scores": [0.8599213598606656, 0.9055252666601906, 0.5320159407379561, 0.5215578036276822], "rank_score": 0.7047550927216237} -{"id": "fort-etal-2012-modeling", "title": "Modeling the Complexity of Manual Annotation Tasks: a Grid of Analysis", "abstract": "Manual corpus annotation is getting widely used in Natural Language Processing (NLP). While being recognized as a difficult task, no in-depth analysis of its complexity has been performed yet. We provide in this article a grid of analysis of the different complexity dimensions of an annotation task, which helps estimating beforehand the difficulties and cost of annotation campaigns. We observe the applicability of this grid on existing annotation campaigns and detail its application on a real-world example.", "phrases": ["complexity", "annotation task", "grid", "unit"], "overall_score": 1.1341226633781032, "scores": [0.8594143653970883, 0.8015449848677304, 0.6168293409343848, 0.5408914321312225], "rank_score": 0.7046700308326065} -{"id": "nn-2012-ttc", "title": "TTC - Terminology Extraction, Translation Tools and Comparable Corpora", "abstract": "TTC focuses on semi-/automatic acquisition of aligned bilingual terminologies from comparable corpora, i.e. from texts of the same domain (and possibly genre) in different languages. TTC develops techniques for the collection of comparable corpora, extraction of monolingual term candidates and their contexts for English, German, French, Spanish, Latvian, Russian and Chinese from the collected corpora. Furthermore, TTC defines and combines different symbolic and statistical strategies for the alignment of the extracted monolingual term candidates. TTC develops the software for project languages and tests it on several language pairs. The tools are provided as a standalone package and web service and include components for corpora crawling and management, monolingual term candidate extraction and alignment. An integration with EuroTermBank and selected computer-assisted translation tools and machine translation systems will be provided. TTC will evaluate the quality of machine translation which can be achieved by enhancing machine translation systems with automatically extracted terminology. The main target groups of the TTC tools are professionals from the translation, localization and/or documentation industry. TTC is at the beginning of its second year now and so far the project has made significant progress and achieved its first goals: x Requirements and definitions of the TTC tools were specified. x The first TTC workshop with end users was successfully held. x A crawler was developed and initial comparable corpora for project languages were collected and analyzed. x Term identification issues were researched for project languages and first experiments on term extraction were performed for English, German and French.", "phrases": ["comparable corpora", "ttc", "such simplified method", "englishlatvian"], "overall_score": 1.2622194369261108, "scores": [0.9153463094376528, 0.8119708138110713, 0.5489767368227506, 0.541538463080502], "rank_score": 0.7044580807879942} -{"id": "zhou-etal-2008-semi", "title": "Semi-Supervised Learning for Relation Extraction", "abstract": "This paper proposes a semi-supervised learning method for relation extraction. Given a small amount of labeled data and a large amount of unlabeled data, it first bootstraps a moderate number of weighted support vectors via SVM through a co-training procedure with random feature projection and then applies a label propagation (LP) algorithm via the bootstrapped support vectors. Evaluation on the ACE RDC 2003 corpus shows that our method outperforms the normal LP algorithm via all the available labeled data without SVM bootstrapping. Moreover, our method can largely reduce the computational burden. This suggests that our proposed method can integrate the advantages of both SVM bootstrapping and label propagation.", "phrases": ["relation extraction", "unlabeled data", "moderate number", "support vector", "svm"], "overall_score": 1.1332620766508188, "scores": [0.9468370425638705, 0.9295010563254861, 0.5669011177891011, 0.5470651014212329, 0.530372273073941], "rank_score": 0.7041353182347263} -{"id": "wu-wang-2005-boosting", "title": "Boosting Statistical Word Alignment", "abstract": "This paper proposes an approach to improve statistical word alignment with the boosting method. Applying boosting to word alignment must solve two problems. The first is how to build the reference set for the training data. We propose an approach to automatically build a pseudo reference set, which can avoid manual annotation of the training set. The second is how to calculate the error rate of each individual word aligner. We solve this by calculating the error rate of a manually annotated held-out data set instead of the entire training set. In addition, the final ensemble takes into account the weights of the alignment links produced by the individual word aligners. Experimental results indicate that the boosting method proposed in this paper performs much better than the original word aligner, achieving a large error rate reduction.", "phrases": ["statistical word alignment", "weight", "error rate reduction"], "overall_score": 0.9760317933571774, "scores": [0.9924076087357939, 0.5601986604830106, 0.5595680728207377], "rank_score": 0.7040581140131806} -{"id": "li-li-2014-query", "title": "Query-focused Multi-Document Summarization: Combining a Topic Model with Graph-based Semi-supervised Learning", "abstract": "Graph-based learning algorithms have been shown to be an effective approach for query-focused multi-document summarization (MDS). In this paper, we extend the standard graph ranking algorithm by proposing a two-layer (i.e. sentence layer and topic layer) graph-based semi-supervised learning approach based on topic modeling techniques. Experimental results on TAC datasets show that by considering topic information, we can effectively improve the summary performance.", "phrases": ["semi-supervised learning approach", "topic modeling technique", "query-focused multi-document summarization"], "overall_score": 1.1331179950770587, "scores": [1.0011172418986867, 0.5668787664869135, 0.5441413775729683], "rank_score": 0.7040457953195228} -{"id": "labutov-etal-2015-deep", "title": "Deep Questions without Deep Understanding", "abstract": "We develop an approach for generating deep (i.e, high-level) comprehension questions from novel text that bypasses the myriad challenges of creating a full semantic representation. We do this by decomposing the task into an ontologycrowd-relevance workflow, consisting of first representing the original text in a low-dimensional ontology, then crowdsourcing candidate question templates aligned with that space, and finally ranking potentially relevant templates for a novel region of text. If ontological labels are not available, we infer them from the text. We demonstrate the effectiveness of this method on a corpus of articles from Wikipedia alongside human judgments, and find that we can generate relevant deep questions with a precision of over 85% while maintaining a recall of 70%.", "phrases": ["novel text", "ontology", "question generation"], "overall_score": 1.805066622914764, "scores": [0.9845604877267525, 0.5743376976248992, 0.5523325307602538], "rank_score": 0.7037435720373019} -{"id": "pal-etal-2016-catalog-online", "title": "CATaLog Online: Porting a Post-editing Tool to the Web", "abstract": "This paper presents CATaLog online, a new web-based MT and TM post-editing tool. CATaLog online is a freeware software that can be used through a web browser and it requires only a simple registration. The tool features a number of editing and log functions similar to the desktop version of CATaLog enhanced with several new features that we describe in detail in this paper. CATaLog online is designed to allow users to post-edit both translation memory segments as well as machine translation output. The tool provides a complete set of log information currently not available in most commercial CAT tools. Log information can be used both for project management purposes as well as for the study of the translation process and translator's productivity.", "phrases": ["post-editing tool", "translator", "catalog online"], "overall_score": 0.9755778073305539, "scores": [0.9628416077999914, 0.6165491027038144, 0.5318011864518725], "rank_score": 0.7037306323185594} -{"id": "ng-2007-semantic", "title": "Semantic Class Induction and Coreference Resolution", "abstract": "This paper examines whether a learningbased coreference resolver can be improved using semantic class knowledge that is automatically acquired from a version of the Penn Treebank in which the noun phrases are labeled with their semantic classes. Experiments on the ACE test data show that a resolver that employs such induced semantic class knowledge yields a statistically significant improvement of 2% in F-measure over one that exploits heuristically computed semantic class knowledge. In addition, the induced knowledge improves the accuracy of common noun resolution by 2-6%.", "phrases": ["coreference resolution", "semantic class knowledge", "wordnet"], "overall_score": 1.1323175895487163, "scores": [0.9004357392223766, 0.6263814446676265, 0.5838282423188441], "rank_score": 0.7035484754029491} -{"id": "zhu-etal-2013-fast", "title": "Fast and Accurate Shift-Reduce Constituent Parsing", "abstract": "Shift-reduce dependency parsers give comparable accuracies to their chartbased counterparts, yet the best shiftreduce constituent parsers still lag behind the state-of-the-art. One important reason is the existence of unary nodes in phrase structure trees, which leads to different numbers of shift-reduce actions between different outputs for the same input. This turns out to have a large empirical impact on the framework of global training and beam search. We propose a simple yet effective extension to the shift-reduce process, which eliminates size differences between action sequences in beam-search. Our parser gives comparable accuracies to the state-of-the-art chart parsers. With linear run-time complexity, our parser is over an order of magnitude faster than the fastest chart parser.", "phrases": ["constituent", "beam search", "shift-reduce parser"], "overall_score": 2.0715085466927254, "scores": [0.9431763342591528, 0.6456987125137429, 0.5217224843864989], "rank_score": 0.7035325103864648} -{"id": "choi-cardie-2010-hierarchical", "title": "Hierarchical Sequential Learning for Extracting Opinions and Their Attributes", "abstract": "Automatic opinion recognition involves a number of related tasks, such as identifying the boundaries of opinion expression, determining their polarity, and determining their intensity. Although much progress has been made in this area, existing research typically treats each of the above tasks in isolation. In this paper, we apply a hierarchical parameter sharing technique using Conditional Random Fields for fine-grained opinion analysis, jointly detecting the boundaries of opinion expressions as well as determining two of their key attributes --- polarity and intensity. Our experimental results show that our proposed approach improves the performance over a baseline that does not exploit hierarchical structure among the classes. In addition, we find that the joint approach outperforms a baseline that is based on cascading two separate components.", "phrases": ["opinion expression", "isolation", "conditional random fields", "sequence labeler"], "overall_score": 1.747912554675602, "scores": [1.1368225947237254, 0.593190669844427, 0.5560691644455674, 0.5275646024749279], "rank_score": 0.7034117578721619} -{"id": "ling-rush-2017-coarse", "title": "Coarse-to-Fine Attention Models for Document Summarization", "abstract": "Sequence-to-sequence models with attention have been successful for a variety of NLP problems, but their speed does not scale well for tasks with long source sequences such as document summarization. We propose a novel coarse-to-fine attention model that hierarchically reads a document, using coarse attention to select top-level chunks of text and fine attention to read the words of the chosen chunks. While the computation for training standard attention models scales linearly with source sequence length, our method scales with the number of top-level chunks and can handle much longer sequences. Empirically, we find that while coarse-to-fine attention models lag behind state-of-the-art baselines, our method achieves the desired behavior of sparsely attending to subsets of the document for generation.", "phrases": ["document summarization", "chunk", "coarse-to-fine attention model"], "overall_score": 1.61919503770387, "scores": [0.8981963742374888, 0.6450542843963262, 0.5663717513659404], "rank_score": 0.7032074699999185} -{"id": "ailomaa-etal-2006-archivus", "title": "Archivus: A Multimodal System for Multimedia Meeting Browsing and Retrieval", "abstract": "This paper presents Archivus, a multi-modal language-enabled meeting browsing and retrieval system. The prototype is in an early stage of development, and we are currently exploring the role of natural language for interacting in this relatively unfamiliar and complex domain. We briefly describe the design and implementation status of the system, and then focus on how this system is used to elicit useful data for supporting hypotheses about multimodal interaction in the domain of meeting retrieval and for developing NLP modules for this specific domain.", "phrases": ["meeting", "retrieval", "archivus"], "overall_score": 0.7723649646665218, "scores": [0.7994766631195462, 0.7822363104982406, 0.5273976907173715], "rank_score": 0.7030368881117194} -{"id": "colin-etal-2016-webnlg", "title": "The WebNLG Challenge: Generating Text from DBPedia Data", "abstract": "With the emergence of the linked data initiative and the rapid development of RDF (Resource Description Format) datasets, several approaches have recently been proposed for generating text from RDF data (Sun and Mellish, 2006; Duma and Klein, 2013; Bontcheva and Wilks, 2004; Cimiano et al., 2013; Lebret et al., 2016). To support the evaluation and comparison of such systems, we propose a shared task on generating text from DBPedia data. The training data will consist of Data/Text pairs where the data is a set of triples extracted from DBPedia and the text is a verbalisation of these triples. In essence, the task consists in mapping data to text. Specific subtasks include sentence segmentation (how to chunk the input data into sentences), lexicalisation (of the DBPedia properties), aggregation (how to avoid repetitions) and surface realisation (how to build a syntactically correct and natural sounding text).", "phrases": ["webnlg challenge", "dbpedia data", "rdf", "data-to-text generation", "meaning representation"], "overall_score": 1.8549838016683111, "scores": [0.9043652096968967, 0.9021604871646601, 0.6227536341430562, 0.5432667021226651, 0.5419357874301107], "rank_score": 0.7028963641114778} -{"id": "kedia-chinthakindi-2021-keep", "title": "Keep Learning: Self-supervised Meta-learning for Learning from Inference", "abstract": "A common approach in many machine learning algorithms involves self-supervised learning on large unlabeled data before fine-tuning on downstream tasks to further improve performance. A new approach for language modelling, called dynamic evaluation, further fine-tunes a trained model during inference using trivially-present ground-truth labels, giving a large improvement in performance. However, this approach does not easily extend to classification tasks, where ground-truth labels are absent during inference. We propose to solve this issue by utilizing self-training and back-propagating the loss from the model's own class-balanced predictions (pseudo-labels), adapting the Reptile algorithm from meta-learning, combined with an inductive bias towards pre-trained weights to improve generalization. Our method improves the performance of standard backbones such as BERT, Electra, and ResNet-50 on a wide variety of tasks, such as question answering on SQuAD and NewsQA, benchmark task SuperGLUE, conversation response selection on Ubuntu Dialog corpus v2.0, as well as image classification on MNIST and ImageNet without any changes to the underlying models. Our proposed method outperforms previous approaches, enables self-supervised fine-tuning during inference of any classifier model to better adapt to target domains, can be easily adapted to any model, and is also effective in online and transfer-learning settings.", "phrases": ["pseudo-label", "generalization", "image classification"], "overall_score": 0.974285578028638, "scores": [0.9130793812542354, 0.6246080072435904, 0.5707080692495041], "rank_score": 0.7027984859157766} -{"id": "del-tredici-etal-2019-short", "title": "Short-Term Meaning Shift: A Distributional Exploration", "abstract": "We present the first exploration of meaning shift over short periods of time in online communities using distributional representations. We create a small annotated dataset and use it to assess the performance of a standard model for meaning shift detection on short-term meaning shift. We find that the model has problems distinguishing meaning shift from referential phenomena, and propose a measure of contextual variability to remedy this.", "phrases": ["period", "short-term meaning shift", "del"], "overall_score": 1.3669842912969208, "scores": [1.0180774802337962, 0.5551197092226956, 0.5342756945724365], "rank_score": 0.7024909613429761} -{"id": "takase-etal-2016-neural", "title": "Neural Headline Generation on Abstract Meaning Representation", "abstract": "Neural network-based encoder-decoder models are among recent attractive methodologies for tackling natural language generation tasks. This paper investigates the usefulness of structural syntactic and semantic information additionally incorporated in a baseline neural attention-based model. We encode results obtained from an abstract meaning representation (AMR) parser using a modified version of Tree-LSTM. Our proposed attention-based AMR encoder-decoder model improves headline generation benchmarks compared with the baseline neural attention-based model.", "phrases": ["abstract meaning representation", "encoder-decoder model", "neural attention-based model", "text generation system", "summarization"], "overall_score": 1.9900392892919114, "scores": [0.9394878848268321, 0.8495304192433216, 0.5916673521679766, 0.5729053964340668, 0.5583917167630889], "rank_score": 0.7023965538870571} -{"id": "neviarouskaya-aono-2013-extracting", "title": "Extracting Causes of Emotions from Text", "abstract": "This paper focuses on the novel task of automatic extraction of phrases related to causes of emotions. The analysis of emotional causes in sentences, where emotions are explicitly indicated through emotion keywords can provide the foundation for research on challenging task of recognition of implicit affect from text. We developed a corpus of emotion causes specific for 22 emotions. Based on the analysis of this corpus we introduce a method for the detection of the linguistic relations between an emotion and its cause and the extraction of the phrases describing the emotion causes. The method employs syntactic and dependency parser and rules for the analysis of eight types of the emotion-cause linguistic relations. The results of evaluation showed that our method performed with high level of accuracy (82%).", "phrases": ["cause", "emotion", "linguistic relation"], "overall_score": 1.4605314400083302, "scores": [0.9734948142070722, 0.5778681705007559, 0.5557384808546069], "rank_score": 0.7023671551874783} -{"id": "butnariu-etal-2009-semeval", "title": "SemEval-2010 Task 9: The Interpretation of Noun Compounds Using Paraphrasing Verbs and Prepositions", "abstract": "Previous research has shown that the meaning of many noun-noun compounds N1 N2 can be approximated reasonably well by paraphrasing clauses of the form 'N2 that ... N1', where '...' stands for a verb with or without a preposition. For example, malaria mosquito is a 'mosquito that carries malaria'. Evaluating the quality of such paraphrases is the theme of Task 9 at SemEval-2010. This paper describes some background, the task definition, the process of data collection and the task results. We also venture a few general conclusions before the participating teams present their systems at the SemEval-2010 workshop. There were 5 teams who submitted 7 systems.", "phrases": ["paraphrasing", "preposition", "semeval task"], "overall_score": 1.801465844936915, "scores": [0.9387767030896529, 0.5974959190048019, 0.5707465746351582], "rank_score": 0.7023397322432045} -{"id": "barbu-2015-spotting", "title": "Spotting false translation segments in translation memories", "abstract": "The problem of spotting false translations in the bi-segments of translation memories can be thought of as a classification task. We test the accuracy of various machine learning algorithms to find segments that are not true translations. We show that the Church-Gale scores in two large bisegment sets extracted from MyMemory can be used for finding positive and negative training examples for the machine learning algorithms. The performance of the winning classification algorithms, though high, is not yet sufficient for automatic cleaning of translations memories.", "phrases": ["false translation", "translation memory", "bi-segment"], "overall_score": 0.9734551584776306, "scores": [0.9494297109880511, 0.6111284368032153, 0.5460402467039205], "rank_score": 0.7021994648317289} -{"id": "sachan-etal-2021-syntax", "title": "Do Syntax Trees Help Pre-trained Transformers Extract Information?", "abstract": "Much recent work suggests that incorporating syntax information from dependency trees can improve task-specific transformer models. However, the effect of incorporating dependency tree information into pre-trained transformer models (e.g., BERT) remains unclear, especially given recent studies highlighting how these models implicitly encode syntax. In this work, we systematically study the utility of incorporating dependency trees into pre-trained transformers on three representative information extraction tasks: semantic role labeling (SRL), named entity recognition, and relation extraction. We propose and investigate two distinct strategies for incorporating dependency structure: a late fusion approach, which applies a graph neural network on the output of a transformer, and a joint fusion approach, which infuses syntax structure into the transformer attention layers. These strategies are representative of prior work, but we introduce additional model design elements that are necessary for obtaining improved performance. Our empirical analysis demonstrates that these syntax-infused transformers obtain state-of-the-art results on SRL and relation extraction tasks. However, our analysis also reveals a critical shortcoming of these models: we find that their performance gains are highly contingent on the availability of human-annotated dependency parses, which raises important questions regarding the viability of syntax-augmented transformers in real-world applications.", "phrases": ["pre-trained transformer model", "distinct strategy", "dependency structure", "state-of-the-art result"], "overall_score": 1.7448308817298752, "scores": [1.122347544382776, 0.6094248213767709, 0.5462920603579565, 0.5306219796736119], "rank_score": 0.7021716014477789} -{"id": "mcnamee-etal-2011-cross", "title": "Cross-Language Entity Linking", "abstract": "There has been substantial recent interest in aligning mentions of named entities in unstructured texts to knowledge base descriptors, a task commonly called entity linking. This technology is crucial for applications in knowledge discovery and text data mining. This paper presents experiments in the new problem of crosslanguage entity linking, where documents and named entities are in a different language than that used for the content of the reference knowledge base. We have created a new test collection to evaluate cross-language entity linking performance in twenty-one languages. We present experiments that examine issues such as: the importance of transliteration; the utility of cross-language information retrieval; and, the potential benefit of multilingual named entity recognition. Our best model achieves performance which is 94% of a strong monolingual baseline.", "phrases": ["multiple language", "cross-lingual entity", "xel"], "overall_score": 1.7448252883700615, "scores": [0.9073197462185787, 0.6689357625157787, 0.5302525428082483], "rank_score": 0.7021693505142018} -{"id": "allauzen-etal-2011-limsi", "title": "LIMSI @ WMT11", "abstract": "This paper describes LIMSI's submissions to the Sixth Workshop on Statistical Machine Translation. We report results for the French-English and German-English shared translation tasks in both directions. Our systems use n-code, an open source Statistical Machine Translation system based on bilingual n-grams. For the French-English task, we focussed on finding efficient ways to take advantage of the large and heterogeneous training parallel data. In particular, using a simple filtering strategy helped to improve both processing time and translation quality. To translate from English to French and German, we also investigated the use of the SOUL language model in Machine Translation and showed significant improvements with a 10-gram SOUL model. We also briefly report experiments with several alternatives to the standard n-best MERT procedure, leading to a significant speed-up.", "phrases": ["limsi", "impressive accuracy improvement", "translation model"], "overall_score": 1.1290765256531776, "scores": [0.951040306161491, 0.5791104649655978, 0.5744532964114444], "rank_score": 0.701534689179511} -{"id": "ling-etal-2015-two", "title": "Two/Too Simple Adaptations of Word2Vec for Syntax Problems", "abstract": "We present two simple modifications to the models in the popular Word2Vec tool, in order to generate embeddings more suited to tasks involving syntax. The main issue with the original models is the fact that they are insensitive to word order. While order independence is useful for inducing semantic representations, this leads to suboptimal results when they are used to solve syntax-based problems. We show improvements in part-ofspeech tagging and dependency parsing using our proposed models.", "phrases": ["word2vec", "syntax-based problem", "skip-gram"], "overall_score": 2.0274561080350075, "scores": [0.9160168485032814, 0.6186897873092658, 0.5696483861629787], "rank_score": 0.7014516739918419} -{"id": "francis-landau-etal-2016-capturing", "title": "Capturing Semantic Similarity for Entity Linking with Convolutional Neural Networks", "abstract": "A key challenge in entity linking is making effective use of contextual information to disambiguate mentions that might refer to different entities in different contexts. We present a model that uses convolutional neural networks to capture semantic correspondence between a mention's context and a proposed target entity. These convolutional networks operate at multiple granularities to exploit various kinds of topic information, and their rich parameterization gives them the capacity to learn which n-grams characterize different topics. We combine these networks with a sparse linear model to achieve state-of-the-art performance on multiple entity linking datasets, outperforming the prior systems of Durrett and Klein (2014) and Nguyen et al. (2014).", "phrases": ["semantic similarity", "entity linking", "convolutional neural network", "wikipedia"], "overall_score": 1.74284999142554, "scores": [0.8710850296649468, 0.8259039586896476, 0.5663393605694246, 0.5421693812611067], "rank_score": 0.7013744325462814} -{"id": "toral-etal-2018-attaining", "title": "Attaining the Unattainable? Reassessing Claims of Human Parity in Neural Machine Translation", "abstract": "We reassess a recent study (Hassan et al., 2018) that claimed that machine translation (MT) has reached human parity for the translation of news from Chinese into English, using pairwise ranking and considering three variables that were not taken into account in that previous study: the language in which the source side of the test set was originally written, the translation proficiency of the evaluators, and the provision of inter-sentential context. If we consider only original source text (i.e. not translated from another language, or translationese), then we find evidence showing that human parity has not been achieved. We compare the judgments of professional translators against those of non-experts and discover that those of the experts result in higher inter-annotator agreement and better discrimination between human and machine translations. In addition, we analyse the human translations of the test set and identify important translation issues. Finally, based on these findings, we provide a set of recommendations for future human evaluations of MT.", "phrases": ["parity", "inter-annotator agreement", "human translation", "several recent study"], "overall_score": 2.3114865160851688, "scores": [0.8968777270308967, 0.8521727648549139, 0.5348958709072578, 0.5213945688865671], "rank_score": 0.701335232919909} -{"id": "denero-uszkoreit-2011-inducing", "title": "Inducing Sentence Structure from Parallel Corpora for Reordering", "abstract": "When translating among languages that differ substantially in word order, machine translation (MT) systems benefit from syntactic pre-ordering---an approach that uses features from a syntactic parse to permute source words into a target-language-like order. This paper presents a method for inducing parse trees automatically from a parallel corpus, instead of using a supervised parser trained on a tree-bank. These induced parses are used to pre-order source sentences. We demonstrate that our induced parser is effective: it not only improves a state-of-the-art phrase-based system with integrated reordering, but also approaches the performance of a recent pre-ordering method based on a supervised parser. These results show that the syntactic structure which is relevant to MT pre-ordering can be learned automatically from parallel text, thus establishing a new application for unsupervised grammar induction.", "phrases": ["sentence structure", "pre-ordering method", "induction"], "overall_score": 1.898747665055582, "scores": [0.9513455811556949, 0.5920330293873161, 0.5600694691300703], "rank_score": 0.7011493598910271} -{"id": "wu-etal-2019-self", "title": "Self-Supervised Dialogue Learning", "abstract": "The sequential order of utterances is often meaningful in coherent dialogues, and the order changes of utterances could lead to low-quality and incoherent conversations. We consider the order information as a crucial supervised signal for dialogue learning, which, however, has been neglected by many previous dialogue systems. Therefore, in this paper, we introduce a self-supervised learning task, inconsistent order detection, to explicitly capture the flow of conversation in dialogues. Given a sampled utterance pair triple, the task is to predict whether it is ordered or misordered. Then we propose a sampling-based self-supervised network SSN to perform the prediction with sampled triple references from previous dialogue history. Furthermore, we design a joint learning framework where SSN can guide the dialogue systems towards more coherent and relevant dialogue learning through adversarial training. We demonstrate that the proposed methods can be applied to both open-domain and task-oriented dialogue scenarios, and achieve the new state-of-the-art performance on the OpenSubtitiles and Movie-Ticket Booking datasets.", "phrases": ["dialogue learning", "self-supervised learning task", "inconsistent order detection"], "overall_score": 0.9719898034682073, "scores": [0.8446877341878176, 0.635098080687674, 0.6236414890118398], "rank_score": 0.7011424346291104} -{"id": "cettolo-etal-2013-report", "title": "Report on the 10th IWSLT evaluation campaign", "abstract": "The paper overviews the tenth evaluation campaign organized by the IWSLT workshop. The 2013 evaluation offered multiple tracks on lecture transcription and translation based on the TED Talks corpus. In particular, this year IWSLT included two automatic speech recognition tracks, on English and German, three speech translation tracks, from English to French, English to German, and German to English, and three text translation track, also from English to French, English to German, and German to English. In addition to the official tracks, speech and text translation optional tracks were offered involving 12 other languages: Arabic, Spanish, Portuguese (B), Italian, Chinese, Polish, Persian, Slovenian, Turkish, Dutch, Romanian, Russian. Overall, 18 teams participated in the evaluation for a total of 217 primary runs submitted. All runs were evaluated with objective metrics on a current test set and two progress test sets, in order to compare the progresses against systems of the previous years. In addition, submissions of one of the official machine translation tracks were also evaluated with human post-editing.", "phrases": ["iwslt", "evaluation campaign", "spoken language translation"], "overall_score": 1.5403419004516534, "scores": [0.9366217429456231, 0.6276348941261665, 0.5388627932402494], "rank_score": 0.701039810104013} -{"id": "seginer-2007-fast", "title": "Fast Unsupervised Incremental Parsing", "abstract": "This paper describes an incremental parser and an unsupervised learning algorithm for inducing this parser from plain text. The parser uses a representation for syntactic structure similar to dependency links which is well-suited for incremental parsing. In contrast to previous unsupervised parsers, the parser does not use part-of-speech tags and both learning and parsing are local and fast, requiring no explicit clustering or global optimization. The parser is evaluated by converting its output into equivalent bracketing and improves on previously published results for unsupervised parsing from plain text.", "phrases": ["unsupervised parser", "optimization", "bracketing", "ccl", "punctuation"], "overall_score": 2.4057454708993844, "scores": [1.0092252831544681, 0.8558157469477398, 0.5820585645408323, 0.5296726483951145, 0.526073469021036], "rank_score": 0.7005691424118382} -{"id": "marcheggiani-artieres-2014-experimental", "title": "An Experimental Comparison of Active Learning Strategies for Partially Labeled Sequences", "abstract": "Active learning (AL) consists of asking human annotators to annotate automatically selected data that are assumed to bring the most benefit in the creation of a classifier. AL allows to learn accurate systems with much less annotated data than what is required by pure supervised learning algorithms, hence limiting the tedious effort of annotating a large collection of data. We experimentally investigate the behavior of several AL strategies for sequence labeling tasks (in a partially-labeled scenario) tailored on Partially-Labeled Conditional Random Fields, on four sequence labeling tasks: phrase chunking, part-of-speech tagging, named-entity recognition, and bioentity recognition.", "phrases": ["active learning", "human annotator", "sequence labeling task"], "overall_score": 1.4567645167117087, "scores": [0.9488345370646262, 0.6168599138800759, 0.5359724930582876], "rank_score": 0.7005556480009966} -{"id": "zollmann-etal-2008-systematic", "title": "A Systematic Comparison of Phrase-Based, Hierarchical and Syntax-Augmented Statistical MT", "abstract": "Probabilistic synchronous context-free grammar (PSCFG) translation models define weighted transduction rules that represent translation and reordering operations via nonterminal symbols. In this work, we investigate the source of the improvements in translation quality reported when using two PSCFG translation models (hierarchical and syntax-augmented), when extending a state-of-the-art phrase-based baseline that serves as the lexical support for both PSCFG models. We isolate the impact on translation quality for several important design decisions in each model. We perform this comparison on three NIST language translation tasks; Chinese-to-English, Arabic-to-English and Urdu-to-English, each representing unique challenges.", "phrases": ["translation quality", "phrase-based model", "syntax-based model", "hierarchical model", "samt"], "overall_score": 2.1321974573896747, "scores": [1.2351140367132447, 0.591436485113218, 0.5860506062670402, 0.5448645103681438, 0.5442287996717281], "rank_score": 0.700338887626675} -{"id": "liu-etal-2022-p", "title": "P-Tuning: Prompt Tuning Can Be Comparable to Fine-tuning Across Scales and Tasks", "abstract": "Prompt tuning, which only tunes continuous prompts with a frozen language model, substantially reduces per-task storage and memory usage at training. However, in the context of NLU, prior work reveals that prompt tuning does not perform well for normal-sized pretrained models. We also find that existing methods of prompt tuning cannot handle hard sequence labeling tasks, indicating a lack of universality. We present a novel empirical finding that properly optimized prompt tuning can be universally effective across a wide range of model scales and NLU tasks. It matches the performance of finetuning while having only 0.1%-3% tuned parameters. Our method P-Tuning v2 is an implementation of Deep Prompt Tuning (CITATION) optimized and adapted for NLU. Given the universality and simplicity of P-Tuning v2, we believe it can serve as an alternative to finetuning and a strong baseline for future research.", "phrases": ["prompt tuning", "language model", "comparable performance"], "overall_score": 1.4562829195681082, "scores": [0.957653048397895, 0.5779305145210907, 0.5653885832732256], "rank_score": 0.7003240487307371} -{"id": "dahlmeier-ng-2011-grammatical", "title": "Grammatical Error Correction with Alternating Structure Optimization", "abstract": "We present a novel approach to grammatical error correction based on Alternating Structure Optimization. As part of our work, we introduce the NUS Corpus of Learner English (NUCLE), a fully annotated one million words corpus of learner English available for research purposes. We conduct an extensive evaluation for article and preposition errors using various feature sets. Our experiments show that our approach outperforms two baselines trained on non-learner text and learner text, respectively. Our approach also outperforms two commercial grammar checking software packages.", "phrases": ["alternating structure optimization", "novel approach", "preposition error", "grammatical error correction", "reason"], "overall_score": 1.9840353879385693, "scores": [0.9667769036601094, 0.9020397033717849, 0.5509833549298677, 0.5438368875598025, 0.5377503511650114], "rank_score": 0.7002774401373151} -{"id": "dyer-resnik-2010-context", "title": "Context-free reordering, finite-state translation", "abstract": "We describe a class of translation model in which a set of input variants encoded as a context-free forest is translated using a finite-state translation model. The forest structure of the input is well-suited to representing word order alternatives, making it straightforward to model translation as a two step process: (1) tree-based source reordering and (2) phrase transduction. By treating the reordering process as a latent variable in a probabilistic translation model, we can learn a long-range source reordering model without example reordered sentences, which are problematic to construct. The resulting model has state-of-the-art translation performance, uses linguistically motivated features to effectively model long range reordering, and is significantly smaller than a comparable hierarchical phrase-based translation model.", "phrases": ["latent variable", "context-free reordering", "syntax-based model"], "overall_score": 1.4561845793160788, "scores": [0.9777383582175194, 0.5764533867815086, 0.5466385261992603], "rank_score": 0.7002767570660962} -{"id": "warstadt-etal-2019-neural", "title": "Neural Network Acceptability Judgments", "abstract": "This paper investigates the ability of artificial neural networks to judge the grammatical acceptability of a sentence, with the goal of testing their linguistic competence. We introduce the Corpus of Linguistic Acceptability (CoLA), a set of 10,657 English sentences labeled as grammatical or ungrammatical from published linguistics literature. As baselines, we train several recurrent neural network models on acceptability classification, and find that our models outperform unsupervised models by Lau et al. (2016) on CoLA. Error-analysis on specific grammatical phenomena reveals that both Lau et al.'s models and ours learn systematic generalizations like subject-verb-object order. However, all models we test perform far below human level on a wide range of grammatical constructions.", "phrases": ["acceptability", "grammaticality", "10k sentence", "human performance"], "overall_score": 2.0616781926011476, "scores": [1.1909771970924594, 0.5382505080305997, 0.5372430055257065, 0.5343048628152174], "rank_score": 0.7001938933659957} -{"id": "nivre-etal-2018-enhancing", "title": "Enhancing Universal Dependency Treebanks: A Case Study", "abstract": "We evaluate two cross-lingual techniques for adding enhanced dependencies to existing treebanks in Universal Dependencies. We apply a rule-based system developed for English and a data-driven system trained on Finnish to Swedish and Italian. We find that both systems are accurate enough to bootstrap enhanced dependencies in existing UD treebanks. In the case of Italian, results are even on par with those of a prototype language-specific system.", "phrases": ["universal dependencies", "treebank", "data-driven system"], "overall_score": 1.6787915578288881, "scores": [0.9306900856577446, 0.6120244720007106, 0.5576168165341961], "rank_score": 0.7001104580642171} -{"id": "yao-etal-2013-semi", "title": "Semi-Markov Phrase-Based Monolingual Alignment", "abstract": "We introduce a novel discriminative model for phrase-based monolingual alignment using a semi-Markov CRF. Our model achieves stateof-the-art alignment accuracy on two phrasebased alignment datasets (RTE and paraphrase), while doing significantly better than other strong baselines in both non-identical alignment and phrase-only alignment. Additional experiments highlight the potential benefit of our alignment model to RTE, paraphrase identification and question answering, where even a naive application of our model\u2019s alignment score approaches the state of the art.", "phrases": ["monolingual alignment", "semi-markov crf", "paraphrase identification", "non-homographic nature", "semantic unit"], "overall_score": 1.53823531578177, "scores": [0.9443681325975511, 0.8898942260287289, 0.5666533529846295, 0.5522457283833662, 0.5472438705239424], "rank_score": 0.7000810621036436} -{"id": "li-etal-2016-generative", "title": "Generative Topic Embedding: a Continuous Representation of Documents", "abstract": "Word embedding maps words into a low-dimensional continuous embedding space by exploiting the local word collocation patterns in a small context window. On the other hand, topic modeling maps documents onto a low-dimensional topic space, by utilizing the global word collocation patterns in the same document. These two types of patterns are complementary. In this paper, we propose a generative topic embedding model to combine the two types of patterns. In our model, topics are represented by embedding vectors, and are shared across documents. The probability of each word is influenced by both its local context and its topic. A variational inference method yields the topic embeddings as well as the topic mixing proportions for each document. Jointly they represent the document in a low-dimensional continuous space. In two document classification tasks, our method performs better than eight existing methods, with fewer features. In addition, we illustrate with an example that our method can generate coherent topics even based on only one document.", "phrases": ["word embedding", "variational inference method", "generative topic"], "overall_score": 1.4556023987238726, "scores": [0.957868401748455, 0.5906649726972262, 0.5514569876993296], "rank_score": 0.6999967873816703} -{"id": "aker-etal-2013-extracting", "title": "Extracting bilingual terminologies from comparable corpora", "abstract": "In this paper we present a method for extracting bilingual terminologies from comparable corpora. In our approach we treat bilingual term extraction as a classification problem. For classification we use an SVM binary classifier and training data taken from the EUROVOC thesaurus. We test our approach on a held-out test set from EUROVOC and perform precision, recall and f-measure evaluations for 20 European language pairs. The performance of our classifier reaches the 100% precision level for many language pairs. We also perform manual evaluation on bilingual terms extracted from English-German term-tagged comparable corpora. The results of this manual evaluation showed 60-83% of the term pairs generated are exact translations and over 90% exact or partial translations.", "phrases": ["bilingual terminology", "classification problem", "eurovoc thesaurus"], "overall_score": 1.126257441173106, "scores": [0.9578143620012066, 0.589817565009296, 0.5517173535152009], "rank_score": 0.6997830935085677} -{"id": "faralli-navigli-2012-new", "title": "A New Minimally-Supervised Framework for Domain Word Sense Disambiguation", "abstract": "We present a new minimally-supervised framework for performing domain-driven Word Sense Disambiguation (WSD). Glossaries for several domains are iteratively acquired from the Web by means of a bootstrapping technique. The acquired glosses are then used as the sense inventory for fully-unsupervised domain WSD. Our experiments, on new and gold-standard datasets, show that our wide-coverage framework enables high-performance results on dozens of domains at a coarse and fine-grained level.", "phrases": ["new minimally-supervised framework", "word sense disambiguation", "wsd"], "overall_score": 0.7687830512655016, "scores": [0.9138292922669022, 0.6369660876693948, 0.5485340901468945], "rank_score": 0.6997764900277305} -{"id": "xi-etal-2012-enhancing", "title": "Enhancing Statistical Machine Translation with Character Alignment", "abstract": "The dominant practice of statistical machine translation (SMT) uses the same Chinese word segmentation specification in both alignment and translation rule induction steps in building Chinese-English SMT system, which may suffer from a suboptimal problem that word segmentation better for alignment is not necessarily better for translation. To tackle this, we propose a framework that uses two different segmentation specifications for alignment and translation respectively: we use Chinese character as the basic unit for alignment, and then convert this alignment to conventional word alignment for translation rule induction. Experimentally, our approach outperformed two baselines: fully word-based system (using word for both alignment and translation) and fully character-based system, in terms of alignment quality and translation performance.", "phrases": ["statistical machine translation", "unit", "alignment quality"], "overall_score": 1.2526415148039611, "scores": [0.9894429832439698, 0.5738458863471887, 0.534048752422868], "rank_score": 0.6991125406713422} -{"id": "koehn-etal-2009-462", "title": "462 Machine Translation Systems for Europe", "abstract": "We built 462 machine translation systems for all language pairs of the Acquis Communautaire corpus. We report and analyse the performance of these system, and compare them against pivot translation and a number of system combination methods (multi-pivot, multisource) that are possible due to the available systems.", "phrases": ["acquis communautaire corpus", "pivot language", "news", "europarl", "other domain"], "overall_score": 1.9380571381731428, "scores": [0.9325385227097872, 0.863641547999519, 0.5922058078452617, 0.5653172739431409, 0.5413286252546039], "rank_score": 0.6990063555504624} -{"id": "ji-eisenstein-2013-discriminative", "title": "Discriminative Improvements to Distributional Sentence Similarity", "abstract": "Matrix and tensor factorization have been applied to a number of semantic relatedness tasks, including paraphrase identification. The key idea is that similarity in the latent space implies semantic relatedness. We describe three ways in which labeled data can improve the accuracy of these approaches on paraphrase classification. First, we design a new discriminative term-weighting metric called TF-KLD, which outperforms TF-IDF. Next, we show that using the latent representation from matrix factorization as features in a classification algorithm substantially improves accuracy. Finally, we combine latent features with fine-grained n-gram overlap features, yielding performance that is 3% more accurate than the prior state-of-the-art.", "phrases": ["paraphrase identification", "tf-idf", "matrix factorization"], "overall_score": 1.7928414661157148, "scores": [0.9571857688555859, 0.5926681203618596, 0.5470781155805473], "rank_score": 0.6989773349326643} -{"id": "gouws-etal-2011-unsupervised", "title": "Unsupervised Mining of Lexical Variants from Noisy Text", "abstract": "The amount of data produced in user-generated content continues to grow at a staggering rate. However, the text found in these media can deviate wildly from the standard rules of orthography, syntax and even semantics and present significant problems to downstream applications which make use of this noisy data. In this paper we present a novel unsupervised method for extracting domain-specific lexical variants given a large volume of text. We demonstrate the utility of this method by applying it to normalize text messages found in the online social media service, Twitter, into their most likely standard English versions. Our method yields a 20% reduction in word error rate over an existing state-of-the-art approach.", "phrases": ["noisy text", "strongly-associated word pair", "exception dictionary"], "overall_score": 1.6092876064354924, "scores": [0.8919395382722404, 0.6192648065770057, 0.5855098369614327], "rank_score": 0.6989047272702263} -{"id": "el-haj-etal-2016-learning", "title": "Learning Tone and Attribution for Financial Text Mining", "abstract": "Attribution bias refers to the tendency of people to attribute successes to their own abilities but failures to external factors. In a business context an internal factor might be the restructuring of the firm and an external factor might be an unfavourable change in exchange or interest rates. In accounting research, the presence of an attribution bias has been demonstrated for the narrative sections of the annual financial reports. Previous studies have applied manual content analysis to this problem but in this paper we present novel work to automate the analysis of attribution bias through using machine learning algorithms. Previous studies have only applied manual content analysis on a small scale to reveal such a bias in the narrative section of annual financial reports. In our work a group of experts in accounting and finance labelled and annotated a list of 32,449 sentences from a random sample of UK Preliminary Earning Announcements (PEAs) to allow us to examine whether sentences in PEAs contain internal or external attribution and which kinds of attributions are linked to positive or negative performance. We wished to examine whether human annotators could agree on coding this difficult task and whether Machine Learning (ML) could be applied reliably to replicate the coding process on a much larger scale. Our best machine learning algorithm correctly classified performance sentences with 70% accuracy and detected tone and attribution in financial PEAs with accuracy of 79%.", "phrases": ["attribution", "narrative section", "annual financial report", "report"], "overall_score": 0.9686347554793687, "scores": [0.8352175108169164, 0.8460844081952932, 0.565655228937401, 0.5479319683759475], "rank_score": 0.6987222790813895} -{"id": "zhang-etal-2019-sp", "title": "SP-10K: A Large-scale Evaluation Set for Selectional Preference Acquisition", "abstract": "Selectional Preference (SP) is a commonly observed language phenomenon and proved to be useful in many natural language processing tasks. To provide a better evaluation method for SP models, we introduce SP-10K, a large-scale evaluation set that provides human ratings for the plausibility of 10,000 SP pairs over five SP relations, covering 2,500 most frequent verbs, nouns, and adjectives in American English. Three representative SP acquisition methods based on pseudo-disambiguation are evaluated with SP-10K. To demonstrate the importance of our dataset, we investigate the relationship between SP-10K and the commonsense knowledge in ConceptNet5 and show the potential of using SP to represent the commonsense knowledge. We also use the Winograd Schema Challenge to prove that the proposed new SP relations are essential for the hard pronoun coreference resolution problem.", "phrases": ["large-scale evaluation set", "selectional preference", "plausibility"], "overall_score": 1.124513059369583, "scores": [0.9369920169590757, 0.6290060494256168, 0.5300996780797933], "rank_score": 0.6986992481548286} -{"id": "malmasi-cahill-2015-measuring", "title": "Measuring Feature Diversity in Native Language Identification", "abstract": "The task of Native Language Identification (NLI) is typically solved with machine learning methods, and systems make use of a wide variety of features. Some preliminary studies have been conducted to examine the effectiveness of individual features, however, no systematic study of feature interaction has been carried out. We propose a function to measure feature independence and analyze its effectiveness on a standard NLI corpus.", "phrases": ["native language identification", "nli", "feature interaction"], "overall_score": 1.2518062281901225, "scores": [0.9716402330237197, 0.5692491613329622, 0.5550496806511467], "rank_score": 0.6986463583359429} -{"id": "cho-etal-2019-mixture", "title": "Mixture Content Selection for Diverse Sequence Generation", "abstract": "Generating diverse sequences is important in many NLP applications such as question generation or summarization that exhibit semantically one-to-many relationships between source and the target sequences. We present a method to explicitly separate diversification from generation using a general plug-and-play module (called SELECTOR) that wraps around and guides an existing encoder-decoder model. The diversification stage uses a mixture of experts to sample different binary masks on the source sequence for diverse content selection. The generation stage uses a standard encoder-decoder model given each selected content from the source sequence. Due to the non-differentiable nature of discrete sampling and the lack of ground truth labels for binary mask, we leverage a proxy for ground truth mask and adopt stochastic hard-EM for training. In question generation (SQuAD) and abstractive summarization (CNN-DM), our method demonstrates significant improvements in accuracy, diversity and training efficiency, including state-of-the-art top-1 accuracy in both datasets, 6% gain in top-5 accuracy, and 3.7 times faster training over a state-of-the-art model. Our code is publicly available at .", "phrases": ["diversity", "question generation", "selector", "different binary mask", "research problem"], "overall_score": 1.6749382077801227, "scores": [1.2402969773037007, 0.5840105644936203, 0.5656531631978272, 0.5542943636334128, 0.5482623627633678], "rank_score": 0.6985034862783858} -{"id": "kementchedjhieva-etal-2019-lost", "title": "Lost in Evaluation: Misleading Benchmarks for Bilingual Dictionary Induction", "abstract": "The task of bilingual dictionary induction (BDI) is commonly used for intrinsic evaluation of cross-lingual word embeddings. The largest dataset for BDI was generated automatically, so its quality is dubious. We study the composition and quality of the test sets for five diverse languages from this dataset, with concerning findings: (1) a quarter of the data consists of proper nouns, which can be hardly indicative of BDI performance, and (2) there are pervasive gaps in the gold-standard targets. These issues appear to affect the ranking between cross-lingual embedding systems on individual languages, and the overall degree to which the systems differ in performance. With proper nouns removed from the data, the margin between the top two systems included in the study grows from 3.4% to 17.2%. Manual verification of the predictions, on the other hand, reveals that gaps in the gold standard targets artificially inflate the margin between the two systems on English to Bulgarian BDI from 0.1% to 6.7%. We thus suggest that future research either avoids drawing conclusions from quantitative results on this BDI dataset, or accompanies such evaluation with rigorous error analysis.", "phrases": ["bilingual dictionary induction", "test set", "conclusion"], "overall_score": 0.9673333775584786, "scores": [0.980401055213249, 0.5661060559419689, 0.5468434888797645], "rank_score": 0.6977835333449942} -{"id": "li-khudanpur-2008-large", "title": "Large-scale Discriminative n-gram Language Models for Statistical Machine Translation", "abstract": "We extend discriminative n-gram language modeling techniques originally proposed for automatic speech recognition to a statistical machine translation task. In this context, we propose a novel data selection method that leads to good models using a fraction of the training data. We carry out systematic experiments on several benchmark tests for Chinese to English translation using a hierarchical phrase-based machine translation system, and show that a discriminative language model significantly improves upon a state-of-the-art baseline. The experiments also highlight the benefits of our data selection method.", "phrases": ["statistical machine translation", "speech recognition", "list"], "overall_score": 1.5324247916595515, "scores": [0.9520245484387841, 0.5913882707515675, 0.5488969166456221], "rank_score": 0.6974365786119913} -{"id": "mishra-etal-2019-modular", "title": "A Modular Architecture for Unsupervised Sarcasm Generation", "abstract": "In this paper, we propose a novel framework for sarcasm generation; the system takes a literal negative opinion as input and translates it into a sarcastic version. Our framework does not require any paired data for training. Sarcasm emanates from context-incongruity which becomes apparent as the sentence unfolds. Our framework introduces incongruity into the literal input version through modules that: (a) filter factual content from the input opinion, (b) retrieve incongruous phrases related to the filtered facts and (c) synthesize sarcastic text from the incongruous filtered and incongruous phrases. The framework employs reinforced neural sequence to sequence learning and information retrieval and is trained only using unlabeled non-sarcastic and sarcastic opinions. Since no labeled dataset exists for such a task, for evaluation, we manually prepare a benchmark dataset containing literal opinions and their sarcastic paraphrases. Qualitative and quantitative performance analyses on the data reveal our system's superiority over baselines built using known unsupervised statistical and neural machine translation and style transfer techniques.", "phrases": ["modular architecture", "unsupervised sarcasm generation", "information retrieval", "paraphrase generator", "natural part"], "overall_score": 1.4501485468583861, "scores": [1.0051161212228836, 0.8167641765753199, 0.5719395805537713, 0.5687953596634081, 0.5242549571595014], "rank_score": 0.6973740390349769} -{"id": "uchiumi-etal-2015-inducing", "title": "Inducing Word and Part-of-Speech with Pitman-Yor Hidden Semi-Markov Models", "abstract": "We propose a nonparametric Bayesian model for joint unsupervised word segmentation and part-of-speech tagging from raw strings. Extending a previous model for word segmentation, our model is called a Pitman-Yor Hidden SemiMarkov Model (PYHSMM) and considered as a method to build a class n-gram language model directly from strings, while integrating character and word level information. Experimental results on standard datasets on Japanese, Chinese and Thai revealed it outperforms previous results to yield the state-of-the-art accuracies. This model will also serve to analyze a structure of a language whose words are not identified a priori.", "phrases": ["semi-markov model", "part-of-speech tagging", "pyhsmm"], "overall_score": 0.7660866230836377, "scores": [0.9271063995716575, 0.6201565450361878, 0.5447033413666024], "rank_score": 0.6973220953248158} -{"id": "galley-manning-2010-accurate", "title": "Accurate Non-Hierarchical Phrase-Based Translation", "abstract": "A principal weakness of conventional (i.e., non-hierarchical) phrase-based statistical machine translation is that it can only exploit continuous phrases. In this paper, we extend phrase-based decoding to allow both source and target phrasal discontinuities, which provide better generalization on unseen data and yield significant improvements to a standard phrase-based system (Moses). More interestingly, our discontinuous phrase-based system also outperforms a state-of-the-art hierarchical system (Joshua) by a very significant margin (+1.03 BLEU on average on five Chinese-English NIST test sets), even though both Joshua and our system support discontinuous phrases. Since the key difference between these two systems is that ours is not hierarchical---i.e., our system uses a string-based decoder instead of CKY, and it imposes no hard hierarchical reordering constraints during training and decoding---this paper sets out to challenge the commonly held belief that the tree-based parameterization of systems such as Hiero and Joshua is crucial to their good performance against Moses.", "phrases": ["generalization", "phrase-based system", "hierarchical system", "joshua", "gap"], "overall_score": 1.788209293018326, "scores": [0.992400629423083, 0.8609850581905609, 0.5586263021490802, 0.5483533792489858, 0.5254915501831199], "rank_score": 0.6971713838389659} -{"id": "gardner-etal-2014-incorporating", "title": "Incorporating Vector Space Similarity in Random Walk Inference over Knowledge Bases", "abstract": "Much work in recent years has gone into the construction of large knowledge bases (KBs), such as Freebase, DBPedia, NELL, and YAGO. While these KBs are very large, they are still very incomplete, necessitating the use of inference to fill in gaps. Prior work has shown how to make use of a large text corpus to augment random walk inference over KBs. We present two improvements to the use of such large corpora to augment KB inference. First, we present a new technique for combining KB relations and surface text into a single graph representation that is much more compact than graphs used in prior work. Second, we describe how to incorporate vector space similarity into random walk inference over KBs, reducing the feature sparsity inherent in using surface text. This allows us to combine distributional similarity with symbolic logical inference in novel and effective ways. With experiments on many relations from two separate KBs, we show that our methods significantly outperform prior work on KB inference, both in the size of problem our methods can handle and in the quality of predictions made.", "phrases": ["vector space similarity", "random walk inference", "much work", "entity pair"], "overall_score": 1.9329502203857916, "scores": [0.8695910041756517, 0.8125437466818417, 0.5701618050766304, 0.5363611413016868], "rank_score": 0.6971644243089526} -{"id": "lee-etal-2022-deduplicating", "title": "Deduplicating Training Data Makes Language Models Better", "abstract": "We find that existing language modeling datasets contain many near-duplicate examples and long repetitive substrings. As a result, over 1% of the unprompted output of language models trained on these datasets is copied verbatim from the training data. We develop two tools that allow us to deduplicate training datasets\u2014for example removing from C4 a single 61 word English sentence that is repeated over 60,000 times. Deduplication allows us to train models that emit memorized text ten times less frequently and require fewer training steps to achieve the same or better accuracy. We can also reduce train-test overlap, which affects over 4% of the validation set of standard datasets, thus allowing for more accurate evaluation. Code for deduplication is released at .", "phrases": ["language model", "memorized text", "well accuracy"], "overall_score": 0.9663800376713946, "scores": [0.983072662225142, 0.5743817391926108, 0.5338331305261634], "rank_score": 0.6970958439813053} -{"id": "zhao-etal-2009-application", "title": "Application-driven Statistical Paraphrase Generation", "abstract": "Paraphrase generation (PG) is important in plenty of NLP applications. However, the research of PG is far from enough. In this paper, we propose a novel method for statistical paraphrase generation (SPG), which can (1) achieve various applications based on a uniform statistical model, and (2) naturally combine multiple resources to enhance the PG performance. In our experiments, we use the proposed method to generate paraphrases for three different applications. The results show that the method can be easily transformed from one application to another and generate valuable and interesting paraphrases.", "phrases": ["statistical paraphrase generation", "sample sentence", "spg use"], "overall_score": 1.9746645785710586, "scores": [1.003952407491426, 0.5573414121342283, 0.5296160471307947], "rank_score": 0.6969699555854829} -{"id": "tillmann-etal-2014-improved", "title": "Improved Sentence-Level Arabic Dialect Classification", "abstract": "The paper presents work on improved sentence-level dialect classification of Egyptian Arabic (ARZ) vs. Modern Standard Arabic (MSA). Our approach is based on binary feature functions that can be implemented with a minimal amount of task-specific knowledge. We train a featurerich linear classifier based on a linear support-vector machine (linear SVM) approach. Our best system achieves an accuracy of 89.1 % on the Arabic Online Commentary (AOC) dataset (Zaidan and Callison-Burch, 2011) using 10-fold stratified cross validation: a 1.3 % absolute accuracy improvement over the results published by (Zaidan and Callison-Burch, 2014). We also evaluate the classifier on dialect data from an additional data source. Here, we find that features which measure the informalness of a sentence actually decrease classification accuracy significantly.", "phrases": ["dialect", "egyptian arabic", "linear classifier", "aoc dataset"], "overall_score": 1.4492593522376538, "scores": [1.1673868643085266, 0.5648658272167008, 0.52984402215015, 0.5256889935715757], "rank_score": 0.6969464268117382} -{"id": "baroni-etal-2014-frege", "title": "Frege in Space: A Program for Composition Distributional Semantics", "abstract": "The lexicon of any natural language encodes a huge number of distinct word meanings. Just to understand this article, you will need to know what thousands of words mean. The space of possible sentential meanings is infinite: In this article alone, you will encounter many sentences that express ideas you have never heard before, we hope. Statistical semantics has addressed the issue of the vastness of word meaning by proposing methods to harvest meaning automatically from large collections of text (corpora). Formal semantics in the Fregean tradition has developed methods to account for the infinity of sentential meaning based on the crucial insight of compositionality, the idea that meaning of sentences is built incrementally by combining the meanings of their constituents. This article sketches a new approach to semantics that brings together ideas from statistical and formal semantics to account, in parallel, for the richness of lexical meaning and the combinatorial power of sentential semantics. We adopt, in particular, the idea that word meaning can be approximated by the patterns of co-occurrence of words in corpora from statistical semantics, and the idea that compositionality can be captured in terms of a syntax-driven calculus of function application from formal semantics.", "phrases": ["distributional semantic", "function", "contextual information", "compositional vector", "dsm"], "overall_score": 2.346614551238855, "scores": [0.9558975336945814, 0.872347563407737, 0.5588142844502885, 0.5504183751564562, 0.5469421899165967], "rank_score": 0.696883989325132} -{"id": "sogaard-wu-2009-empirical", "title": "Empirical lower bounds on translation unit error rate for the full class of inversion transduction grammars", "abstract": "Empirical lower bounds studies in which the frequency of alignment configurations that cannot be induced by a particular formalism is estimated, have been important for the development of syntax-based machine translation formalisms. The formalism that has received most attention has been inversion transduction grammars (ITGs) (Wu, 1997). All previous work on the coverage of ITGs, however, concerns parse failure rates (PFRs) or sentence level coverage, which is not directly related to any of the evaluation measures used in machine translation. Sogaard and Kuhn (2009) induce lower bounds on translation unit error rates (TUERs) for a number of formalisms, incl. normal form ITGs, but not for the full class of ITGs. Many of the alignment configurations that cannot be induced by normal form ITGs can be induced by unrestricted ITGs, however. This paper estimates the difference and shows that the average reduction in lower bounds on TUER is 2.48 in absolute difference (16.01 in average parse failure rate).", "phrases": ["inversion transduction grammar", "low bound", "translation model", "adequacy"], "overall_score": 1.4486443533784632, "scores": [0.8844326613187488, 0.7974637445067293, 0.582426968536409, 0.5222793251459901], "rank_score": 0.6966506748769694} -{"id": "xu-etal-2020-megatron", "title": "MEGATRON-CNTRL: Controllable Story Generation with External Knowledge Using Large-Scale Language Models", "abstract": "Existing pre-trained large language models have shown unparalleled generative capabilities. However, they are not controllable. In this paper, we propose MEGATRON-CNTRL, a novel framework that uses large-scale language models and adds control to text generation by incorporating an external knowledge base. Our framework consists of a keyword predictor, a knowledge retriever, a contextual knowledge ranker, and a conditional text generator. As we do not have access to ground-truth supervision for the knowledge ranker, we make use of weak supervision from sentence embedding. The empirical results show that our model generates more fluent, consistent, and coherent stories with less repetition and higher diversity compared to prior work on the ROC story dataset. We showcase the controllability of our model by replacing the keywords used to generate stories and re-running the generation process. Human evaluation results show that 77.5% of these stories are successfully controlled by the new keywords. Furthermore, by scaling our model from 124 million to 8.3 billion parameters we demonstrate that larger models improve both the quality of generation (from 74.5% to 93.0% for consistency) and controllability (from 77.5% to 91.5%).", "phrases": ["story", "language model", "generation process", "megatron-cntrl"], "overall_score": 1.7310331654226871, "scores": [0.7902651172124276, 0.8881197186008805, 0.5713815416653273, 0.5367095902529813], "rank_score": 0.6966189919329042} -{"id": "yoshimoto-etal-2013-naist", "title": "NAIST at 2013 CoNLL Grammatical Error Correction Shared Task", "abstract": "This paper describes the Nara Institute of Science and Technology (NAIST) error correction system in the CoNLL 2013 Shared Task. We constructed three systems: a system based on the Treelet Language Model for verb form and subjectverb agreement errors; a classifier trained on both learner and native corpora for noun number errors; a statistical machine translation (SMT)-based model for preposition and determiner errors. As for subject-verb agreement errors, we show that the Treelet Language Model-based approach can correct errors in which the target verb is distant from its subject. Our system ranked fourth on the official run.", "phrases": ["learner", "machine translation", "subject-verb agreement error", "naist"], "overall_score": 1.3552982628741712, "scores": [0.7883564977906876, 0.8712055504352407, 0.584313030082004, 0.5420670445226248], "rank_score": 0.6964855307076392} -{"id": "deng-etal-2018-alibabas", "title": "Alibaba's Neural Machine Translation Systems for WMT18", "abstract": "This paper describes the submission systems of Alibaba for WMT18 shared news translation task. We participated in 5 translation directions including English \u2194 Russian, English \u2194 Turkish in both directions and English \u2192 Chinese. Our systems are based on Google's Transformer model architecture, into which we integrated the most recent features from the academic research. We also employed most techniques that have been proven effective during the past WMT years, such as BPE, back translation, data selection, model ensembling and reranking, at industrial scale. For some morphologically-rich languages, we also incorporated linguistic knowledge into our neural network. For the translation tasks in which we have participated, our resulting systems achieved the best case sensitive BLEU score in all 5 directions. Notably, our English \u2192 Russian system outperformed the second reranked system by 5 BLEU score.", "phrases": ["wmt18", "back translation", "reranking", "alibaba"], "overall_score": 1.1209375576426732, "scores": [0.8670370063962911, 0.7826966399048477, 0.5935057364442103, 0.542671273347937], "rank_score": 0.6964776640233216} -{"id": "barnes-etal-2018-bilingual", "title": "Bilingual Sentiment Embeddings: Joint Projection of Sentiment Across Languages", "abstract": "Sentiment analysis in low-resource languages suffers from a lack of annotated corpora to estimate high-performing models. Machine translation and bilingual word embeddings provide some relief through cross-lingual sentiment approaches. However, they either require large amounts of parallel data or do not sufficiently capture sentiment information. We introduce Bilingual Sentiment Embeddings (BLSE), which jointly represent sentiment information in a source and target language. This model only requires a small bilingual lexicon, a source-language corpus annotated for sentiment, and monolingual word embeddings for each language. We perform experiments on three language combinations (Spanish, Catalan, Basque) for sentence-level cross-lingual sentiment classification and find that our model significantly outperforms state-of-the-art methods on four out of six experimental setups, as well as capturing complementary information to machine translation. Our analysis of the resulting embedding space provides evidence that it represents sentiment information in the resource-poor target language without any annotated data in that language.", "phrases": ["bilingual sentiment embeddings", "cross-lingual approach", "source language"], "overall_score": 1.7858577984994801, "scores": [1.01117898424216, 0.5555509530161048, 0.522033873969841], "rank_score": 0.6962546037427019} -{"id": "li-etal-2016-role", "title": "The Role of Discourse Units in Near-Extractive Summarization", "abstract": "Although human-written summaries of documents tend to involve signi\ufb01cant edits to the source text, most automated summa-rizers are extractive and select sentences verbatim. In this work we examine how elementary discourse units (EDUs) from Rhetorical Structure Theory can be used to extend extractive summarizers to produce a wider range of human-like summaries. Our analysis demonstrates that EDU segmentation is effective in preserving human-labeled summarization concepts within sentences and also aligns with near-extractive summaries constructed by news editors. Finally, we show that us-ing EDUs as units of content selection instead of sentences leads to stronger summarization performance in near-extractive scenarios, especially under tight budgets.", "phrases": ["elementary discourse unit", "near-extractive summary", "news editor"], "overall_score": 1.7300159598487643, "scores": [0.9273809328157091, 0.612457822848485, 0.5487901592244525], "rank_score": 0.6962096382962155} -{"id": "silberer-etal-2013-models", "title": "Models of Semantic Representation with Visual Attributes", "abstract": "We consider the problem of grounding the meaning of words in the physical world and focus on the visual modality which we represent by visual attributes. We create a new large-scale taxonomy of visual attributes covering more than 500 concepts and their corresponding 688K images. We use this dataset to train attribute classifiers and integrate their predictions with text-based distributional models of word meaning. We show that these bimodal models give a better fit to human word association data compared to amodal models and word representations based on handcrafted norming data.", "phrases": ["visual attribute", "image", "distributional model"], "overall_score": 1.6025464384988046, "scores": [0.9882415805735918, 0.5508531307071406, 0.5488365144204866], "rank_score": 0.6959770752337398} -{"id": "aminian-etal-2019-cross", "title": "Cross-Lingual Transfer of Semantic Roles: From Raw Text to Semantic Roles", "abstract": "We describe a transfer method based on annotation projection to develop a dependency-based semantic role labeling system for languages for which no supervised linguistic information other than parallel data is available. Unlike previous work that presumes the availability of supervised features such as lemmas, part-of-speech tags, and dependency parse trees, we only make use of word and character features. Our deep model considers using character-based representations as well as unsupervised stem embeddings to alleviate the need for supervised features. Our experiments outperform a state-of-the-art method that uses supervised lexico-syntactic features on 6 out of 7 languages in the Universal Proposition Bank.", "phrases": ["part-of-speech tag", "lexico-syntactic feature", "cross-lingual transfer"], "overall_score": 1.3542251427355723, "scores": [0.9711942811645458, 0.5624639504995486, 0.5541439364776547], "rank_score": 0.6959340560472497} -{"id": "tsai-etal-2003-chinese", "title": "Chinese Word Auto-Confirmation Agent", "abstract": "In various Asian languages, including Chinese, there is no space between words in texts. Thus, most Chinese NLP systems must perform word-segmentation (sentence tokenization). However, successful word-segmentation depends on having a sufficiently large lexicon. On the average, about 3% of the words in text are not contained in a lexicon. Therefore, unknown word identification becomes a bottleneck for Chinese NLP systems. In this paper, we present a Chinese word auto-confirmation (CWAC) agent. CWAC agent uses a hybrid approach that takes advantage of statistical and linguistic approaches. The task of a CWAC agent is to auto-confirm whether an n-gram input (n \u2265 2) is a Chinese word. We design our CWAC agent to satisfy two criteria: (1) a greater than 98% precision rate and a greater than 75% recall rate and (2) domain-independent performance (F-measure). These criteria assure our CWAC agents can work automatically without human intervention. Furthermore, by combining several CWAC agents designed based on different principles, we can construct a multi-CWAC agent through a building-block approach. Three experiments are conducted in this study. The results demonstrate that, for n-gram frequency \u2265 4 in large corpus, our CWAC agent can satisfy the two criteria and achieve 97.82% precision, 77.11% recall, and 86.24% domain-independent F-measure. No existing systems can achieve such a high precision and domain-independent F-measure. The proposed method is our first attempt for constructing a CWAC agent. We will continue develop other CWAC agents and integrating them into a multi-CWAC agent system.", "phrases": ["unknown word identification", "bottleneck", "chinese", "web corpus", "cws"], "overall_score": 0.9645610598410849, "scores": [0.929477548145605, 0.8650914591943017, 0.6017015995177238, 0.5491534815006778, 0.5334945558100311], "rank_score": 0.6957837288336679} -{"id": "xu-wan-2017-towards", "title": "Towards a Universal Sentiment Classifier in Multiple languages", "abstract": "Existing sentiment classifiers usually work for only one specific language, and different classification models are used in different languages. In this paper we aim to build a universal sentiment classifier with a single classification model in multiple different languages. In order to achieve this goal, we propose to learn multilingual sentiment-aware word embeddings simultaneously based only on the labeled reviews in English and unlabeled parallel data available in a few language pairs. It is not required that the parallel data exist between English and any other language, because the sentiment information can be transferred into any language via pivot languages. We present the evaluation results of our universal sentiment classifier in five languages, and the results are very promising even when the parallel data between English and the target languages are not used. Furthermore, the universal single classifier is compared with a few cross-language sentiment classifiers relying on direct parallel data between the source and target languages, and the results show that the performance of our universal sentiment classifier is very promising compared to that of different cross-language classifiers in multiple target languages.", "phrases": ["universal sentiment classifier", "unlabeled parallel data", "pivot language"], "overall_score": 1.4466671673415517, "scores": [0.9947938478043731, 0.5607010070325471, 0.5316046933036205], "rank_score": 0.6956998493801803} -{"id": "thakur-etal-2021-augmented", "title": "Augmented SBERT: Data Augmentation Method for Improving Bi-Encoders for Pairwise Sentence Scoring Tasks", "abstract": "There are two approaches for pairwise sentence scoring: Cross-encoders, which perform full-attention over the input pair, and Bi-encoders, which map each input independently to a dense vector space. While cross-encoders often achieve higher performance, they are too slow for many practical use cases. Bi-encoders, on the other hand, require substantial training data and fine-tuning over the target task to achieve competitive performance. We present a simple yet efficient data augmentation strategy called Augmented SBERT, where we use the cross-encoder to label a larger set of input pairs to augment the training data for the bi-encoder. We show that, in this process, selecting the sentence pairs is non-trivial and crucial for the success of the method. We evaluate our approach on multiple tasks (in-domain) as well as on a domain adaptation task. Augmented SBERT achieves an improvement of up to 6 points for in-domain and of up to 37 points for domain adaptation tasks compared to the original bi-encoder performance.", "phrases": ["data augmentation method", "bi-encoders", "sentence pair", "question detection"], "overall_score": 1.668058051013002, "scores": [0.8356670570629486, 0.8092084035661183, 0.5869051614138955, 0.5507563301507162], "rank_score": 0.6956342380484197} -{"id": "zeng-etal-2019-iterative", "title": "Iterative Dual Domain Adaptation for Neural Machine Translation", "abstract": "Previous studies on the domain adaptation for neural machine translation (NMT) mainly focus on the one-pass transferring out-of-domain translation knowledge to in-domain NMT model. In this paper, we argue that such a strategy fails to fully extract the domain-shared translation knowledge, and repeatedly utilizing corpora of different domains can lead to better distillation of domain-shared translation knowledge. To this end, we propose an iterative dual domain adaptation framework for NMT. Specifically, we first pretrain in-domain and out-of-domain NMT models using their own training corpora respectively, and then iteratively perform bidirectional translation knowledge transfer (from in-domain to out-of-domain and then vice versa) based on knowledge distillation until the in-domain NMT model convergences. Furthermore, we extend the proposed framework to the scenario of multiple out-of-domain training corpora, where the above-mentioned transfer is performed sequentially between the in-domain and each out-of-domain NMT models in the ascending order of their domain similarities. Empirical results on Chinese-English and English-German translation tasks demonstrate the effectiveness of our framework.", "phrases": ["neural machine translation", "domain adaptation framework", "translation knowledge transfer"], "overall_score": 0.9639467863224271, "scores": [0.9528472911962041, 0.5721159120837837, 0.5610586691823404], "rank_score": 0.6953406241541095} -{"id": "way-gough-2003-webmt", "title": "wEBMT: Developing and Validating an Example-Based Machine Translation System using the World Wide Web", "abstract": "We have developed an example-based machine translation (EBMT) system that uses the World Wide Web for two different purposes: First, we populate the system's memory with translations gathered from rule-based MT systems located on the Web. The source strings input to these systems were extracted automatically from an extremely small subset of the rule types in the Penn-II Treebank. In subsequent stages, the source, target translation pairs obtained are automatically transformed into a series of resources that render the translation process more successful. Despite the fact that the output from on-line MT systems is often faulty, we demonstrate in a number of experiments that when used to seed the memories of an EBMT system, they can in fact prove useful in generating translations of high quality in a robust fashion. In addition, we demonstrate the relative gain of EBMT in comparison to on-line systems. Second, despite the perception that the documents available on the Web are of questionable quality, we demonstrate in contrast that such resources are extremely useful in automatically postediting translation candidates proposed by our system.", "phrases": ["world", "wide web", "translation service", "double aim"], "overall_score": 0.9638339413127817, "scores": [0.8529503564099918, 0.8158063344497788, 0.5902887188912694, 0.5219914849937883], "rank_score": 0.6952592236862072} -{"id": "son-etal-2018-causal", "title": "Causal Explanation Analysis on Social Media", "abstract": "Understanding causal explanations - reasons given for happenings in one's life - has been found to be an important psychological factor linked to physical and mental health. Causal explanations are often studied through manual identification of phrases over limited samples of personal writing. Automatic identification of causal explanations in social media, while challenging in relying on contextual and sequential cues, offers a larger-scale alternative to expensive manual ratings and opens the door for new applications (e.g. studying prevailing beliefs about causes, such as climate change). Here, we explore automating causal explanation analysis, building on discourse parsing, and presenting two novel subtasks: causality detection (determining whether a causal explanation exists at all) and causal explanation identification (identifying the specific phrase that is the explanation). We achieve strong accuracies for both tasks but find different approaches best: an SVM for causality prediction (F1 = 0.791) and a hierarchy of Bidirectional LSTMs for causal explanation identification (F1 = 0.853). Finally, we explore applications of our complete pipeline (F1 = 0.868), showing demographic differences in mentions of causal explanation and that the association between a word and sentiment can change when it is used within a causal explanation.", "phrases": ["discourse parsing", "causal explanation analysis", "social medium"], "overall_score": 1.1183996939560477, "scores": [0.9411996186098012, 0.5821922178912518, 0.5613105654659595], "rank_score": 0.6949008006556708} -{"id": "vajjala-rama-2018-experiments", "title": "Experiments with Universal CEFR Classification", "abstract": "The Common European Framework of Reference (CEFR) guidelines describe language proficiency of learners on a scale of 6 levels. While the description of CEFR guidelines is generic across languages, the development of automated proficiency classification systems for different languages follow different approaches. In this paper, we explore universal CEFR classification using domain-specific and domain-agnostic, theory-guided as well as data-driven features. We report the results of our preliminary experiments in monolingual, cross-lingual, and multilingual classification with three languages: German, Czech, and Italian. Our results show that both monolingual and multilingual models achieve similar performance, and cross-lingual classification yields lower, but comparable results to monolingual classification.", "phrases": ["universal cefr classification", "learner", "scale"], "overall_score": 1.5266378080208451, "scores": [1.0059726388223338, 0.5478016615236374, 0.5306341262223053], "rank_score": 0.6948028088560921} -{"id": "gratch-etal-2014-distress", "title": "The Distress Analysis Interview Corpus of human and computer interviews", "abstract": "The Distress Analysis Interview Corpus (DAIC) contains clinical interviews designed to support the diagnosis of psychological distress conditions such as anxiety, depression, and post traumatic stress disorder. The interviews are conducted by humans, human controlled agents and autonomous agents, and the participants include both distressed and non-distressed individuals. Data collected include audio and video recordings and extensive questionnaire responses; parts of the corpus have been transcribed and annotated for a variety of verbal and non-verbal features. The corpus has been used to support the creation of an automated interviewer agent, and for research on the automatic identification of psychological distress.", "phrases": ["daic", "clinical interview", "agent", "pause"], "overall_score": 1.444304616129782, "scores": [0.83868221275814, 0.836535503395907, 0.5567592040927718, 0.5462778893844809], "rank_score": 0.6945637024078248} -{"id": "basile-etal-2012-developing", "title": "Developing a large semantically annotated corpus", "abstract": "What would be a good method to provide a large collection of semantically annotated texts with formal, deep semantics rather than shallow? We argue that a bootstrapping approach comprising state-of-the-art NLP tools for parsing and semantic interpretation, in combination with a wiki-like interface for collaborative annotation of experts, and a game with a purpose for crowdsourcing, are the starting ingredients for fulfilling this enterprise. The result is a semantic resource that anyone can edit and that integrates various phenomena, including predicate-argument structure, scope, tense, thematic roles, rhetorical relations and presuppositions, into a single semantic formalism: Discourse Representation Theory. Taking texts rather than sentences as the units of annotation results in deep semantic representations that incorporate discourse structure and dependencies. To manage the various (possibly conflicting) annotations provided by experts and non-experts, we introduce a method that stores \u201cBits of Wisdom\u201d in a database as stand-off annotations.", "phrases": ["semantic resource", "discourse representation theory", "groningen meaning bank", "large corpus", "logical form"], "overall_score": 1.665451582409257, "scores": [1.0750875544152794, 0.6241142954854721, 0.6058572065033787, 0.5839629285390504, 0.5837142961239579], "rank_score": 0.6945472562134277} -{"id": "salton-etal-2016-idiom", "title": "Idiom Token Classification using Sentential Distributed Semantics", "abstract": "Idiom token classification is the task of deciding for a set of potentially idiomatic phrases whether each occurrence of a phrase is a literal or idiomatic usage of the phrase. In this work we explore the use of Skip-Thought Vectors to create distributed representations that encode features that are predictive with respect to idiom token classification. We show that classifiers using these representations have competitive performance compared with the state of the art in idiom token classification. Importantly, however, our models use only the sentence containing the target phrase as input and are thus less dependent on a potentially inaccurate or incomplete model of discourse context. We further demonstrate the feasibility of using these representations to train a competitive general idiom token classifier.", "phrases": ["usage", "skip-thought vectors", "idiom token classification"], "overall_score": 1.7250377631842857, "scores": [0.9792439105336358, 0.5608273889796671, 0.5425474942326798], "rank_score": 0.6942062645819943} -{"id": "shen-etal-2021-taxoclass", "title": "TaxoClass: Hierarchical Multi-Label Text Classification Using Only Class Names", "abstract": "Hierarchical multi-label text classification (HMTC) aims to tag each document with a set of classes from a taxonomic class hierarchy. Most existing HMTC methods train classifiers using massive human-labeled documents, which are often too costly to obtain in real-world applications. In this paper, we explore to conduct HMTC based on only class surface names as supervision signals. We observe that to perform HMTC, human experts typically first pinpoint a few most essential classes for the document as its \u201ccore classes\u201d, and then check core classes' ancestor classes to ensure the coverage. To mimic human experts, we propose a novel HMTC framework, named TaxoClass. Specifically, TaxoClass (1) calculates document-class similarities using a textual entailment model, (2) identifies a document's core classes and utilizes confident core classes to train a taxonomy-enhanced classifier, and (3) generalizes the classifier via multi-label self-training. Our experiments on two challenging datasets show TaxoClass can achieve around 0.71 Example-F1 using only class names, outperforming the best previous method by 25%.", "phrases": ["multi-label text classification", "hmtc", "supervision signal", "self-training", "taxoclass"], "overall_score": 0.9622339763404697, "scores": [1.0146972692545317, 0.7857948774322999, 0.5606308132018322, 0.5559343955957782, 0.5534681091187175], "rank_score": 0.6941050929206319} -{"id": "basu-etal-2013-powergrading", "title": "Powergrading: a Clustering Approach to Amplify Human Effort for Short Answer Grading", "abstract": "We introduce a new approach to the machine-assisted grading of short answer questions. We follow past work in automated grading by first training a similarity metric between student responses, but then go on to use this metric to group responses into clusters and subclusters. The resulting groupings allow teachers to grade multiple responses with a single action, provide rich feedback to groups of similar answers, and discover modalities of misunderstanding among students; we refer to this amplification of grader effort as \u201cpowergrading.\u201d We develop the means to further reduce teacher effort by automatically performing actions when an answer key is available. We show results in terms of grading progress with a small \u201cbudget\u201d of human actions, both from our method and an LDA-based approach, on a test corpus of 10 questions answered by 698 respondents.", "phrases": ["cluster", "grading", "student", "short-answer question"], "overall_score": 1.5979023019771543, "scores": [1.0486068388198018, 0.5949079604668494, 0.578822707442979, 0.5535031027470962], "rank_score": 0.6939601523691816} -{"id": "galley-manning-2009-quadratic", "title": "Quadratic-Time Dependency Parsing for Machine Translation", "abstract": "Efficiency is a prime concern in syntactic MT decoding, yet significant developments in statistical parsing with respect to asymptotic efficiency haven't yet been explored in MT. Recently, McDonald et al. (2005b) formalized dependency parsing as a maximum spanning tree (MST) problem, which can be solved in quadratic time relative to the length of the sentence. They show that MST parsing is almost as accurate as cubic-time dependency parsing in the case of English, and that it is more accurate with free word order languages. This paper applies MST parsing to MT, and describes how it can be integrated into a phrase-based decoder to compute dependency language model scores. Our results show that augmenting a state-of-the-art phrase-based system with this dependency language model leads to significant improvements in TER (0.92%) and BLEU (0.45%) scores on five NIST Chinese-English evaluation test sets.", "phrases": ["maximum spanning tree", "mst", "dependency language model"], "overall_score": 1.3501467562789744, "scores": [0.9322790814599575, 0.5777506712338029, 0.5714847873292217], "rank_score": 0.6938381800076607} -{"id": "bansal-etal-2011-gappy", "title": "Gappy Phrasal Alignment By Agreement", "abstract": "We propose a principled and efficient phrase-to-phrase alignment model, useful in machine translation as well as other related natural language processing problems. In a hidden semi-Markov model, word-to-phrase and phrase-to-word translations are modeled directly by the system. Agreement between two directional models encourages the selection of parsimonious phrasal alignments, avoiding the overfitting commonly encountered in unsupervised training with multi-word units. Expanding the state space to include \"gappy phrases\" (such as French ne * pas) makes the alignment space more symmetric; thus, it allows agreement between discontinuous alignments. The resulting system shows substantial improvements in both alignment quality and translation quality over word-based Hidden Markov Models, while maintaining asymptotically equivalent runtime.", "phrases": ["phrasal alignment", "agreement", "hidden semi-markov model"], "overall_score": 0.9617953284148908, "scores": [0.8908344502298848, 0.6588238433580598, 0.5317077323935575], "rank_score": 0.6937886753271675} -{"id": "petukhova-bunt-2008-lirics", "title": "LIRICS Semantic Role Annotation: Design and Evaluation of a Set of Data Categories", "abstract": "Semantic roles have often proved to be useful labels for stating linguistic generalisations of various sorts. There is, however, a lack of agreement on their defining criteria, which causes serious problems for semantic roles to be a useful classificatory device for predicate-argument relations. These criteria should (a) support the design of a semantic role set which is complete but does not contain redundant relations; (b) be based on semantic rather than morphological, lexical or syntactic properties; and (c) enable formal interpretation. In this paper we report on the analyses of alternative approaches to annotation and representation of semantic role information (such as FrameNet, PropBank and VerbNet) with respect to their models of description, granularity of semantic role sets, definitions of semantic roles concepts, consistency and reliability of annotations. We present methodological principles for characterising well-defined concepts which were developed within the LIRICS (Linguistic InfRastructure for Interoperable ResourCes and Systems; see ) project, as well as the designed set of semantic roles and their definitions in ISO 12620 format. We discuss evaluation results of the defined concepts for semantic role annotation concerning the redundancy and completeness of the tagset and the reliability of annotations in terms of inter-annotator agreement.", "phrases": ["semantic role", "linguistic infrastructure", "interoperable resources", "project", "lirics"], "overall_score": 1.243024016948737, "scores": [0.9755679868394822, 0.9017274187026884, 0.545280963466205, 0.5233330589025431, 0.5228151366766207], "rank_score": 0.6937449129175078} -{"id": "dethlefs-cuayahuitl-2010-hierarchical", "title": "Hierarchical Reinforcement Learning for Adaptive Text Generation", "abstract": "We present a novel approach to natural language generation (NLG) that applies hierarchical reinforcement learning to text generation in the wayfinding domain. Our approach aims to optimise the integration of NLG tasks that are inherently different in nature, such as decisions of content selection, text structure, user modelling, referring expression generation (REG), and surface realisation. It also aims to capture existing interdependencies between these areas. We apply hierarchical reinforcement learning to learn a generation policy that captures these interdependencies, and that can be transferred to other NLG tasks. Our experimental results---in a simulated environment---show that the learnt wayfinding policy outperforms a baseline policy that takes reasonable actions but without optimization.", "phrases": ["nlg task", "generation policy", "hierarchical reinforcement learning"], "overall_score": 0.9615417552781292, "scores": [0.9492838904315424, 0.5659701715928098, 0.5655632208967876], "rank_score": 0.6936057609737132} -{"id": "schulz-etal-2010-multilingual", "title": "Multilingual Corpus Development for Opinion Mining", "abstract": "Opinion Mining is a discipline that has attracted some attention lately. Most of the research in this field has been done for English or Asian languages, due to the lack of resources in other languages. In this paper we describe an approach of building a manually annotated multilingual corpus for the domain of product reviews, which can be used as a basis for fine-grained opinion analysis also considering direct and indirect opinion targets. For each sentence in a review, the mentioned product features with their respective opinion polarity and strength on a scale from 0 to 3 are labelled manually by two annotators. The languages represented in the corpus are English, German and Spanish and the corpus consists of about 500 product reviews per language. After a short introduction and a description of related work, we illustrate the annotation process, including a description of the annotation methodology and the developed tool for the annotation process. Then first results on the inter-annotator agreement for opinions and product features are presented. We conclude the paper with an outlook on future work.", "phrases": ["opinion mining", "other language", "spanish"], "overall_score": 0.9613611688980191, "scores": [0.9564083370177519, 0.5726800242465743, 0.5513381250442579], "rank_score": 0.6934754954361947} -{"id": "zhu-bhat-2021-euphemistic-phrase", "title": "Euphemistic Phrase Detection by Masked Language Model", "abstract": "It is a well-known approach for fringe groups and organizations to use euphemisms\u2014ordinary-sounding and innocent-looking words with a secret meaning\u2014to conceal what they are discussing. For instance, drug dealers often use \u201cpot\u201d for marijuana and \u201cavocado\u201d for heroin. From a social media content moderation perspective, though recent advances in NLP have enabled the automatic detection of such single-word euphemisms, no existing work is capable of automatically detecting multi-word euphemisms, such as \u201cblue dream\u201d (marijuana) and \u201cblack tar\u201d (heroin). Our paper tackles the problem of euphemistic phrase detection without human effort for the first time, as far as we are aware. We first perform phrase mining on a raw text corpus (e.g., social media posts) to extract quality phrases. Then, we utilize word embedding similarities to select a set of euphemistic phrase candidates. Finally, we rank those candidates by a masked language model\u2014SpanBERT. Compared to strong baselines, we report 20-50% higher detection accuracies using our algorithm for detecting euphemistic phrases.", "phrases": ["euphemism", "candidate", "euphemistic phrase detection"], "overall_score": 1.1157116905098323, "scores": [1.0057442713124596, 0.5382274543769571, 0.5357202249415453], "rank_score": 0.6932306502103206} -{"id": "nivre-2009-non", "title": "Non-Projective Dependency Parsing in Expected Linear Time", "abstract": "We present a novel transition system for dependency parsing, which constructs arcs only between adjacent words but can parse arbitrary non-projective trees by swapping the order of words in the input. Adding the swapping operation changes the time complexity for deterministic parsing from linear to quadratic in the worst case, but empirical estimates based on treebank data show that the expected running time is in fact linear for the range of data attested in the corpora. Evaluation on data from five languages shows state-of-the-art accuracy, with especially good results for the labeled exact match score.", "phrases": ["linear time", "non-projective tree", "state-of-the-art accuracy"], "overall_score": 1.441288651440251, "scores": [0.8901960127572318, 0.620440336338825, 0.5687036408263351], "rank_score": 0.6931133299741307} -{"id": "raganato-etal-2017-neural", "title": "Neural Sequence Learning Models for Word Sense Disambiguation", "abstract": "Word Sense Disambiguation models exist in many flavors. Even though supervised ones tend to perform best in terms of accuracy, they often lose ground to more flexible knowledge-based solutions, which do not require training by a word expert for every disambiguation target. To bridge this gap we adopt a different perspective and rely on sequence learning to frame the disambiguation problem: we propose and study in depth a series of end-to-end neural architectures directly tailored to the task, from bidirectional Long Short-Term Memory to encoder-decoder models. Our extensive evaluation over standard benchmarks and in multiple languages shows that sequence learning enables more versatile all-words models that consistently lead to state-of-the-art results, even against word experts with engineered features.", "phrases": ["word sense disambiguation", "neural architecture", "sequence labeling task"], "overall_score": 2.3788602814295077, "scores": [0.9447521122695136, 0.5670547261838027, 0.5664131487869977], "rank_score": 0.6927399957467714} -{"id": "wang-etal-2018-metrics", "title": "No Metrics Are Perfect: Adversarial Reward Learning for Visual Storytelling", "abstract": "Though impressive results have been achieved in visual captioning, the task of generating abstract stories from photo streams is still a little-tapped problem. Different from captions, stories have more expressive language styles and contain many imaginary concepts that do not appear in the images. Thus it poses challenges to behavioral cloning algorithms. Furthermore, due to the limitations of automatic metrics on evaluating story quality, reinforcement learning methods with hand-crafted rewards also face difficulties in gaining an overall performance boost. Therefore, we propose an Adversarial REward Learning (AREL) framework to learn an implicit reward function from human demonstrations, and then optimize policy search with the learned reward function. Though automatic evaluation indicates slight performance boost over state-of-the-art (SOTA) methods in cloning expert behaviors, human evaluation shows that our approach achieves significant improvement in generating more human-like stories than SOTA systems.", "phrases": ["adversarial reward learning", "caption", "human evaluation", "policy gradient"], "overall_score": 1.720724100928683, "scores": [0.882746212188162, 0.8362974748893116, 0.5304899376000599, 0.5203476520706073], "rank_score": 0.6924703191870353} -{"id": "lacroix-etal-2016-frustratingly", "title": "Frustratingly Easy Cross-Lingual Transfer for Transition-Based Dependency Parsing", "abstract": "In this paper, we present a straightforward strategy for transferring dependency parsers across languages. The proposed method learns a parser from partially annotated data obtained through the projection of annotations across unambiguous word alignments. It does not rely on any modeling of the reliability of dependency and/or alignment links and is therefore easy to implement and parameter free. Experiments on six languages show that our method is at par with recent algorithmically demanding methods, at a much cheaper computational cost. It can thus serve as a fair baseline for transferring dependencies across languages with the use of parallel corpora.", "phrases": ["cross-lingual transfer", "dependency parser", "alignment link", "parallel corpora", "annotation projection"], "overall_score": 1.7207161072010895, "scores": [0.9510217764775147, 0.9330735793610639, 0.5278299673805675, 0.5267195084153202, 0.5236906797375449], "rank_score": 0.6924671022744022} -{"id": "jagannatha-yu-2020-calibrating", "title": "Calibrating Structured Output Predictors for Natural Language Processing", "abstract": "We address the problem of calibrating prediction confidence for output entities of interest in natural language processing (NLP) applications. It is important that NLP applications such as named entity recognition and question answering produce calibrated confidence scores for their predictions, especially if the applications are to be deployed in a safety-critical domain such as healthcare. However the output space of such structured prediction models are often too large to directly adapt binary or multi-class calibration methods. In this study, we propose a general calibration scheme for output entities of interest in neural network based structured prediction models. Our proposed method can be used with any binary class calibration scheme and a neural network model. Additionally, we show that our calibration method can also be used as an uncertainty-aware, entity-specific decoding step to improve the performance of the underlying model at no additional training cost or data requirements. We show that our method outperforms current calibration techniques for Named Entity Recognition, Part-of-speech tagging and Question Answering systems. We also observe an improvement in model performance from our decoding step across several tasks and benchmark datasets. Our method improves the calibration and model performance on out-of-domain test scenarios as well.", "phrases": ["output entity", "calibration", "forecaster", "decision tree"], "overall_score": 1.4397809298534405, "scores": [1.0584707452843, 0.575889191989135, 0.5681013537962222, 0.5670917855517552], "rank_score": 0.6923882691553531} -{"id": "nie-etal-2018-operation", "title": "Operation-guided Neural Networks for High Fidelity Data-To-Text Generation", "abstract": "Recent neural models for data-to-text generation are mostly based on data-driven end-to-end training over encoder-decoder networks. Even though the generated texts are mostly fluent and informative, they often generate descriptions that are not consistent with the input structured data. This is a critical issue especially in domains that require inference or calculations over raw data. In this paper, we attempt to improve the fidelity of neural data-to-text generation by utilizing pre-executed symbolic operations. We propose a framework called Operation-guided Attention-based sequence-to-sequence network (OpAtt), with a specifically designed gating mechanism as well as a quantization module for operation results to utilize information from pre-executed operations. Experiments on two sports datasets show our proposed method clearly improves the fidelity of the generated texts to the input structured data.", "phrases": ["pre-executed symbolic operation", "neural table-to-text generation", "text generation"], "overall_score": 1.7202383414767748, "scores": [0.8526485599385238, 0.6272971457058637, 0.5968787999645497], "rank_score": 0.692274835202979} -{"id": "tsai-etal-2019-small", "title": "Small and Practical BERT Models for Sequence Labeling", "abstract": "We propose a practical scheme to train a single multilingual sequence labeling model that yields state of the art results and is small and fast enough to run on a single CPU. Starting from a public multilingual BERT checkpoint, our final model is 6x smaller and 27x faster, and has higher accuracy than a state-of-the-art multilingual baseline. We show that our model especially outperforms on low-resource languages, and works on codemixed input text without being explicitly trained on codemixed examples. We showcase the effectiveness of our method by reporting on part-of-speech tagging and morphological prediction on 70 treebanks and 48 languages.", "phrases": ["sequence labeling", "single cpu", "m-bert model"], "overall_score": 1.5940206314421397, "scores": [0.9380801865943227, 0.5988593897530381, 0.5398835164784145], "rank_score": 0.6922743642752583} -{"id": "rozovskaya-etal-2017-adapting", "title": "Adapting to Learner Errors with Minimal Supervision", "abstract": "This article considers the problem of correcting errors made by English as a Second Language writers from a machine learning perspective, and addresses an important issue of developing an appropriate training paradigm for the task, one that accounts for error patterns of non-native writers using minimal supervision. Existing training approaches present a trade-off between large amounts of cheap data offered by the native-trained models and additional knowledge of learner error patterns provided by the more expensive method of training on annotated learner data. We propose a novel training approach that draws on the strengths offered by the two standard training paradigms\u2014of training either on native or on annotated learner data\u2014and that outperforms both of these standard methods. Using the key observation that parameters relating to error regularities exhibited by non-native writers are relatively simple, we develop models that can incorporate knowledge about error regularities based on a small annotated sample but that are otherwise trained on native English data. The key contribution of this article is the introduction and analysis of two methods for adapting the learned models to error patterns of non-native writers; one method that applies to generative classifiers and a second that applies to discriminative classifiers. Both methods demonstrated state-of-the-art performance in several text correction competitions. In particular, the Illinois system that implements these methods ranked at the top in two recent CoNLL shared tasks on error correction.1 We conduct further evaluation of the proposed approaches studying the effect of using error data from speakers of the same native language, languages that are closely related linguistically, and unrelated languages.", "phrases": ["minimal supervision", "learner error pattern", "error type"], "overall_score": 1.114067170998842, "scores": [0.906718665478441, 0.6379940823108593, 0.5319138105734315], "rank_score": 0.6922088527875773} -{"id": "narayan-chen-etal-2019-collaborative", "title": "Collaborative Dialogue in Minecraft", "abstract": "We wish to develop interactive agents that can communicate with humans to collaboratively solve tasks in grounded scenarios. Since computer games allow us to simulate such tasks without the need for physical robots, we define a Minecraft-based collaborative building task in which one player (A, the Architect) is shown a target structure and needs to instruct the other player (B, the Builder) to build this structure. Both players interact via a chat interface. A can observe B but cannot place blocks. We present the Minecraft Dialogue Corpus, a collection of 509 conversations and game logs. As a first step towards our goal of developing fully interactive agents for this task, we consider the subtask of Architect utterance generation, and show how challenging it is.", "phrases": ["collaborative building task", "minecraft dialogue corpus", "dialog"], "overall_score": 1.7199935307594398, "scores": [0.9184509676158662, 0.6364908849378569, 0.5215870958148375], "rank_score": 0.6921763161228536} -{"id": "hutchins-2003-machine", "title": "Has machine translation improved? some historical comparisons", "abstract": "The common assertion that MT systems have improved over the last decades is examined by informal comparisons of translations produced by operational systems in the 1960s, 1970s and 1980s and of translations of the same source texts produced by some currently available commercial and online systems. The scarcity of source and target texts for earlier systems means that the conclusions are consequently tentative and preliminary.", "phrases": ["decade", "translation tool", "old system"], "overall_score": 0.9595587692612538, "scores": [0.9084578302473622, 0.6159228117798549, 0.552145374754875], "rank_score": 0.692175338927364} -{"id": "zhang-etal-2021-sparsifying", "title": "On Sparsifying Encoder Outputs in Sequence-to-Sequence Models", "abstract": "Sequence-to-sequence models usually transfer all encoder outputs to the decoder for generation. In this work, by contrast, we hypothesize that these encoder outputs can be compressed to shorten the sequence delivered for decoding. We take Transformer as the testbed and introduce a layer of stochastic gates in-between the encoder and the decoder. The gates are regularized using the expected value of the sparsity-inducing L0penalty, resulting in completely masking-out a subset of encoder outputs. In other words, via joint training, the L0DROP layer forces Transformer to route information through a subset of its encoder states. We investigate the effects of this sparsification on two machine translation and two summarization tasks. Experiments show that, depending on the task, around 40-70% of source encodings can be pruned without significantly compromising quality. The decrease of the output length endows L0DROP with the potential of improving decoding efficiency, where it yields a speedup of up to 1.65x on document summarization tasks against the standard Transformer. We analyze the L0DROP behaviour and observe that it exhibits systematic preferences for pruning certain word types, e.g., function words and punctuation get pruned most. Inspired by these observations, we explore the feasibility of specifying rule-based patterns that mask out encoder outputs based on information such as part-of-speech tags, word frequency and word position.", "phrases": ["encoder output", "sequence-to-sequence model", "summarization task"], "overall_score": 0.7601871580926925, "scores": [0.9379555605258403, 0.5981910816861492, 0.5397098704098479], "rank_score": 0.6919521708739458} -{"id": "liu-etal-2016-agreement", "title": "Agreement on Target-bidirectional Neural Machine Translation", "abstract": "Neural machine translation (NMT) with recurrent neural networks, has proven to be an effective technique for end-to-end machine translation. However, in spite of its promising advances over traditional translation methods, it typically suffers from an issue of unbalanced outputs, that arise from both the nature of recurrent neural networks themselves, and the challenges inherent in machine translation. To overcome this issue, we propose an agreement model for neural machine translation and show its effectiveness on large-scale Japaneseto-English and Chinese-to-English translation tasks. Our results show the model can achieve improvements of up to 1.4 BLEU over the strongest baseline NMT system. With the help of an ensemble technique, this new end-to-end NMT approach finally outperformed phrasebased and hierarchical phrase-based Moses baselines by up to 5.6 BLEU points.", "phrases": ["neural machine translation", "bidirectional decoding", "suffix"], "overall_score": 1.8259854381302056, "scores": [0.963905670254413, 0.5727249747396984, 0.5390939908133399], "rank_score": 0.6919082119358171} -{"id": "pershina-etal-2015-personalized", "title": "Personalized Page Rank for Named Entity Disambiguation", "abstract": "The task of Named Entity Disambiguation is to map entity mentions in the document to their correct entries in some knowledge base. We present a novel graph-based disambiguation approach based on Personalized PageRank (PPR) that combines local and global evidence for disambiguation and effectively filters out noise introduced by incorrect candidates. Experiments show that our method outperforms state-of-the-art approaches by achieving 91.7% in microand 89.9% in macroaccuracy on a dataset of 27.8K named entity mentions.", "phrases": ["named entity disambiguation", "personalized pagerank", "candidate"], "overall_score": 1.6588584387697052, "scores": [0.8932611571367131, 0.6393447041589468, 0.5427872439676055], "rank_score": 0.6917977017544218} -{"id": "mozes-etal-2021-frequency", "title": "Frequency-Guided Word Substitutions for Detecting Textual Adversarial Examples", "abstract": "Recent efforts have shown that neural text processing models are vulnerable to adversarial examples, but the nature of these examples is poorly understood. In this work, we show that adversarial attacks against CNN, LSTM and Transformer-based classification models perform word substitutions that are identifiable through frequency differences between replaced words and their corresponding substitutions. Based on these findings, we propose frequency-guided word substitutions (FGWS), a simple algorithm exploiting the frequency properties of adversarial word substitutions for the detection of adversarial examples. FGWS achieves strong performance by accurately detecting adversarial examples on the SST-2 and IMDb sentiment datasets, with F1 detection scores of up to 91.4% against RoBERTa-based classification models. We compare our approach against a recently proposed perturbation discrimination framework and show that we outperform it by up to 13.0% F1.", "phrases": ["detection", "frequency-guided word substitution", "adversarial sample"], "overall_score": 1.5199405715319314, "scores": [0.8790407809519304, 0.6031942203041087, 0.5930292942689283], "rank_score": 0.6917547651749891} -{"id": "mazidi-nielsen-2014-linguistic", "title": "Linguistic Considerations in Automatic Question Generation", "abstract": "This paper describes an automatic question generator that uses semantic pattern recognition to create questions of varying depth and type for self-study or tutoring.", "phrases": ["automatic question generation", "tutoring", "heuristic rule"], "overall_score": 1.9598183169743262, "scores": [1.0011704760075013, 0.544588692820261, 0.5294304610874919], "rank_score": 0.691729876638418} -{"id": "henderson-etal-2020-convert", "title": "ConveRT: Efficient and Accurate Conversational Representations from Transformers", "abstract": "General-purpose pretrained sentence encoders such as BERT are not ideal for real-world conversational AI applications; they are computationally heavy, slow, and expensive to train. We propose ConveRT (Conversational Representations from Transformers), a pretraining framework for conversational tasks satisfying all the following requirements: it is effective, affordable, and quick to train. We pretrain using a retrieval-based response selection task, effectively leveraging quantization and subword-level parameterization in the dual encoder to build a lightweight memory- and energy-efficient model. We show that ConveRT achieves state-of-the-art performance across widely established response selection tasks. We also demonstrate that the use of extended dialog history as context yields further performance gains. Finally, we show that pretrained representations from the proposed encoder can be transferred to the intent classification task, yielding strong results across three diverse data sets. ConveRT trains substantially faster than standard sentence encoders or previous state-of-the-art dual encoders. With its reduced size and superior performance, we believe this model promises wider portability and scalability for Conversational AI applications.", "phrases": ["conversational representations", "language model", "generation task", "original pre-training task"], "overall_score": 2.1055270759855116, "scores": [1.094097646786784, 0.627643735491542, 0.5237673599905213, 0.5208063288855554], "rank_score": 0.6915787677886007} -{"id": "liu-etal-2012-towards", "title": "Towards Mediating Shared Perceptual Basis in Situated Dialogue", "abstract": "To enable effective referential grounding in situated human robot dialogue, we have conducted an empirical study to investigate how conversation partners collaborate and mediate shared basis when they have mismatched visual perceptual capabilities. In particular, we have developed a graph-based representation to capture linguistic discourse and visual discourse, and applied inexact graph matching to ground references. Our empirical results have shown that, even when computer vision algorithms produce many errors (e.g. 84.7% of the objects in the environment are mis-recognized), our approach can still achieve 66% accuracy in referential grounding. These results demonstrate that, due to its error-tolerance nature, inexact graph matching provides a potential solution to mediate shared perceptual basis for referential grounding in situated interaction.", "phrases": ["situated dialogue", "referential grounding", "perception"], "overall_score": 1.3457029915185565, "scores": [0.9596876572762361, 0.5683888634300921, 0.5465870892838748], "rank_score": 0.6915545366634009} -{"id": "brahman-etal-2020-cue", "title": "Cue Me In: Content-Inducing Approaches to Interactive Story Generation", "abstract": "Automatically generating stories is a challenging problem that requires producing causally related and logical sequences of events about a topic. Previous approaches in this domain have focused largely on one-shot generation, where a language model outputs a complete story based on limited initial input from a user. Here, we instead focus on the task of interactive story generation, where the user provides the model mid-level sentence abstractions in the form of cue phrases during the generation process. This provides an interface for human users to guide the story generation. We present two content-inducing approaches to effectively incorporate this additional information. Experimental results from both automatic and human evaluations show that these methods produce more topically coherent and personalized stories compared to baseline methods.", "phrases": ["interactive story generation", "cue phrase", "generation process"], "overall_score": 1.113011707134838, "scores": [0.9713569581559759, 0.5605833425064282, 0.5427188679877154], "rank_score": 0.6915530562167064} -{"id": "zweig-burges-2012-challenge", "title": "A Challenge Set for Advancing Language Modeling", "abstract": "In this paper, we describe a new, publicly available corpus intended to stimulate research into language modeling techniques which are sensitive to overall sentence coherence. The task uses the Scholastic Aptitude Test's sentence completion format. The test set consists of 1040 sentences, each of which is missing a content word. The goal is to select the correct replacement from amongst five alternates. In general, all of the options are syntactically valid, and reasonable with respect to local N-gram statistics. The set was generated by using an N-gram language model to generate a long list of likely words, given the immediate context. These options were then hand-groomed, to identify four decoys which are globally incoherent, yet syntactically correct. To ensure the right to public distribution, all the data is derived from out-of-copyright materials from Project Gutenberg. The test sentences were derived from five of Conan Doyle's Sherlock Holmes novels, and we provide a large set of Nineteenth and early Twentieth Century texts as training material.", "phrases": ["language modeling", "immediate context", "sentence completion challenge"], "overall_score": 1.4378461555559747, "scores": [0.9915305163701895, 0.5501743699980087, 0.5326686318136676], "rank_score": 0.6914578393939553} -{"id": "yin-etal-2016-neural-generative", "title": "Neural Generative Question Answering", "abstract": "This paper presents an end-to-end neural network model, named Neural Generative Question Answering (GENQA), that can generate answers to simple factoid questions, based on the facts in a knowledge-base. More specifically, the model is built on the encoder-decoder framework for sequence-to-sequence learning, while equipped with the ability to enquire the knowledge-base, and is trained on a corpus of question-answer pairs, with their associated triples in the knowledge-base. Empirical study shows the proposed model can effectively deal with the variations of questions and answers, and generate right and natural answers by referring to the facts in the knowledge-base. The experiment on question answering demonstrates that the proposed model can outperform an embedding-based QA model as well as a neural dialogue model trained on the same data.", "phrases": ["neural network model", "factoid question", "encoder-decoder framework", "seq2seq learning"], "overall_score": 2.136574370909118, "scores": [1.1318165640703626, 0.5561538597680898, 0.5419898271773592, 0.5348990521462014], "rank_score": 0.6912148257905032} -{"id": "dohare-etal-2018-unsupervised", "title": "Unsupervised Semantic Abstractive Summarization", "abstract": "Automatic abstractive summary generation remains a significant open problem for natural language processing. In this work, we develop a novel pipeline for Semantic Abstractive Summarization (SAS). SAS, as introduced by Liu et. al. (2015) first generates an AMR graph of an input story, through which it extracts a summary graph and finally, creates summary sentences from this summary graph. Compared to earlier approaches, we develop a more comprehensive method to generate the story AMR graph using state-of-the-art co-reference resolution and Meta Nodes. Which we then use in a novel unsupervised algorithm based on how humans summarize a piece of text to extract the summary sub-graph. Our algorithm outperforms the state of the art SAS method by 1.7% F1 score in node prediction.", "phrases": ["summarization", "pipeline", "amr"], "overall_score": 1.1124336415929559, "scores": [1.0053539639495381, 0.5406183826743686, 0.527609305079303], "rank_score": 0.69119388390107} -{"id": "prabhakaran-etal-2012-annotations", "title": "Annotations for Power Relations on Email Threads", "abstract": "Social relations like power and influence are difficult concepts to define, but are easily recognizable when expressed. In this paper, we describe a multi-layer annotation scheme for social power relations that are recognizable from online written interactions. We introduce a typology of four types of power relations between dialog participants: hierarchical power, situational power, influence and control of communication. We also present a corpus of Enron emails comprising of 122 threaded conversations, manually annotated with instances of these power relations between participants. Our annotations also capture attempts at exercise of power or influence and whether those attempts were successful or not. In addition, we also capture utterance level annotations for overt display of power. We describe the annotation definitions using two example email threads from our corpus illustrating each type of power relation. We also present detailed instructions given to the annotators and provide various statistics on annotations in the corpus.", "phrases": ["email thread", "social relation", "overt display"], "overall_score": 0.9581627433626091, "scores": [0.9523342234582801, 0.5712505737763537, 0.5499201600860671], "rank_score": 0.6911683191069004} -{"id": "zeng-etal-2018-multi", "title": "Multi-Domain Neural Machine Translation with Word-Level Domain Context Discrimination", "abstract": "With great practical value, the study of Multi-domain Neural Machine Translation (NMT) mainly focuses on using mixed-domain parallel sentences to construct a unified model that allows translation to switch between different domains. Intuitively, words in a sentence are related to its domain to varying degrees, so that they will exert disparate impacts on the multi-domain NMT modeling. Based on this intuition, in this paper, we devote to distinguishing and exploiting word-level domain contexts for multi-domain NMT. To this end, we jointly model NMT with monolingual attention-based domain classification tasks and improve NMT as follows: 1) Based on the sentence representations produced by a domain classifier and an adversarial domain classifier, we generate two gating vectors and use them to construct domain-specific and domain-shared annotations, for later translation predictions via different attention models; 2) We utilize the attention weights derived from target-side domain classifier to adjust the weights of target words in the training objective, enabling domain-related words to have greater impacts during model training. Experimental results on Chinese-English and English-French multi-domain translation tasks demonstrate the effectiveness of the proposed model. Source codes of this paper are available on Github .", "phrases": ["machine translation", "adversarial domain classifier", "domain-agnostic representation"], "overall_score": 1.8238075646624266, "scores": [0.9539964487967884, 0.5671614836049724, 0.5520909631745033], "rank_score": 0.6910829651920881} -{"id": "huang-etal-2019-multi-modal", "title": "Multi-modal Discriminative Model for Vision-and-Language Navigation", "abstract": "Vision-and-Language Navigation (VLN) is a natural language grounding task where agents have to interpret natural language instructions in the context of visual scenes in a dynamic environment to achieve prescribed navigation goals. Successful agents must have the ability to parse natural language of varying linguistic styles, ground them in potentially unfamiliar scenes, plan and react with ambiguous environmental feedback. Generalization ability is limited by the amount of human annotated data. In particular, paired vision-language sequence data is expensive to collect. We develop a discriminator that evaluates how well an instruction explains a given path in VLN task using multi-modal alignment. Our study reveals that only a small fraction of the high-quality augmented data from Fried et al., as scored by our discriminator, is useful for training VLN agents with similar performance. We also show that a VLN agent warm-started with pre-trained components from the discriminator outperforms the benchmark success rates of 35.5 by 10% relative measure.", "phrases": ["vision-and-language navigation", "instruction", "environment"], "overall_score": 0.9580224591125772, "scores": [0.9776362280529949, 0.569767042244093, 0.5257981059358595], "rank_score": 0.6910671254109825} -{"id": "batsuren-etal-2019-cognet", "title": "CogNet: A Large-Scale Cognate Database", "abstract": "This paper introduces CogNet, a new, large-scale lexical database that provides cognates -words of common origin and meaning- across languages. The database currently contains 3.1 million cognate pairs across 338 languages using 35 writing systems. The paper also describes the automated method by which cognates were computed from publicly available wordnets, with an accuracy evaluated to 94%. Finally, it presents statistics about the cognate data and some initial insights into it, hinting at a possible future exploitation of the resource by various fields of lingustics.", "phrases": ["large-scale lexical database", "common origin", "cognet"], "overall_score": 1.1118333741955584, "scores": [0.9135655386381301, 0.626981872532123, 0.5319153392207161], "rank_score": 0.6908209167969898} -{"id": "upadhyay-etal-2018-joint", "title": "Joint Multilingual Supervision for Cross-lingual Entity Linking", "abstract": "Cross-lingual Entity Linking (XEL) aims to ground entity mentions written in any language to an English Knowledge Base (KB), such as Wikipedia. XEL for most languages is challenging, owing to limited availability of resources as supervision. We address this challenge by developing the first XEL approach that combines supervision from multiple languages jointly. This enables our approach to: (a) augment the limited supervision in the target language with additional supervision from a high-resource language (like English), and (b) train a single entity linking model for multiple languages, improving upon individually trained models for each language. Extensive evaluation on three benchmark datasets across 8 languages shows that our approach significantly improves over the current state-of-the-art. We also provide analyses in two limited resource settings: (a) zero-shot setting, when no supervision in the target language is available, and in (b) low-resource setting, when some supervision in the target language is available. Our analysis provides insights into the limitations of zero-shot XEL approaches in realistic scenarios, and shows the value of joint supervision in low-resource settings.", "phrases": ["cross-lingual entity linking", "downstream disambiguation model", "english wikipedia"], "overall_score": 1.6563068954174378, "scores": [0.9730623707758495, 0.552669452884727, 0.5464690529246327], "rank_score": 0.6907336255284031} -{"id": "hill-etal-2014-multi", "title": "Multi-Modal Models for Concrete and Abstract Concept Meaning", "abstract": "Multi-modal models that learn semantic representations from both linguistic and perceptual input outperform language-only models on a range of evaluations, and better reflect human concept acquisition. Most perceptual input to such models corresponds to concrete noun concepts and the superiority of the multi-modal approach has only been established when evaluating on such concepts. We therefore investigate which concepts can be effectively learned by multi-modal models. We show that concreteness determines both which linguistic features are most informative and the impact of perceptual input in such models. We then introduce ridge regression as a means of propagating perceptual information from concrete nouns to more abstract concepts that is more robust than previous approaches. Finally, we present weighted gram matrix combination, a means of combining representations from distinct modalities that outperforms alternatives when both modalities are sufficiently rich.", "phrases": ["noun", "multi-modal model", "visual similarity"], "overall_score": 1.4361391708411428, "scores": [0.89110879349512, 0.6127766490942925, 0.5680254172094923], "rank_score": 0.6906369532663016} -{"id": "torroba-hennigen-etal-2020-intrinsic", "title": "Intrinsic Probing through Dimension Selection", "abstract": "Most modern NLP systems make use of pre-trained contextual representations that attain astonishingly high performance on a variety of tasks. Such high performance should not be possible unless some form of linguistic structure inheres in these representations, and a wealth of research has sprung up on probing for it. In this paper, we draw a distinction between intrinsic probing, which examines how linguistic information is structured within a representation, and the extrinsic probing popular in prior work, which only argues for the presence of such information by showing that it can be successfully extracted. To enable intrinsic probing, we propose a novel framework based on a decomposable multivariate Gaussian probe that allows us to determine whether the linguistic information in word embeddings is dispersed or focal. We then probe fastText and BERT for various morphosyntactic attributes across 36 languages. We find that most attributes are reliably encoded by only a few neurons, with fastText concentrating its linguistic structure more than BERT.", "phrases": ["presence", "neuron", "intrinsic probing"], "overall_score": 1.2372436028521459, "scores": [0.9447973519661781, 0.5823514864123853, 0.5444075687744375], "rank_score": 0.6905188023843336} -{"id": "shi-etal-2020-semantic", "title": "Semantic Role Labeling as Syntactic Dependency Parsing", "abstract": "We reduce the task of (span-based) PropBank-style semantic role labeling (SRL) to syntactic dependency parsing. Our approach is motivated by our empirical analysis that shows three common syntactic patterns account for over 98% of the SRL annotations for both English and Chinese data. Based on this observation, we present a conversion scheme that packs SRL annotations into dependency tree representations through joint labels that permit highly accurate recovery back to the original format. This representation allows us to train statistical dependency parsers to tackle SRL and achieve competitive performance with the current state of the art. Our findings show the promise of syntactic dependency trees in encoding semantic role relations within their syntactic domain of locality, and point to potential further integration of syntactic methods into semantic role labeling in the future.", "phrases": ["syntactic dependency parsing", "srl", "joint label"], "overall_score": 1.1111451766032823, "scores": [0.9952598166800164, 0.5464100140876158, 0.5295101160054545], "rank_score": 0.6903933155910288} -{"id": "zhou-etal-2020-temporal", "title": "Temporal Common Sense Acquisition with Minimal Supervision", "abstract": "Temporal common sense (e.g., duration and frequency of events) is crucial for understanding natural language. However, its acquisition is challenging, partly because such information is often not expressed explicitly in text, and human annotation on such concepts is costly. This work proposes a novel sequence modeling approach that exploits explicit and implicit mentions of temporal common sense, extracted from a large corpus, to build TacoLM, a temporal common sense language model. Our method is shown to give quality predictions of various dimensions of temporal common sense (on UDST and a newly collected dataset from RealNews). It also produces representations of events for relevant tasks such as duration comparison, parent-child relations, event coreference and temporal QA (on TimeBank, HiEVE and MCTACO) that are better than using the standard BERT. Thus, it will be an important component of temporal NLP.", "phrases": ["duration", "sense language model", "bert", "temporal data", "data-driven method"], "overall_score": 1.8692063928143197, "scores": [0.8874208249967834, 0.8454647482894025, 0.5862422655846897, 0.5781180423595927, 0.553957482823731], "rank_score": 0.6902406728108399} -{"id": "elahi-monachesi-2012-examination", "title": "An Examination of Cross-Cultural Similarities and Differences from Social Media Data with respect to Language Use", "abstract": "We present a methodology for analyzing cross-cultural similarities and differences using language as a medium, love as domain, social media as a data source and 'Terms' and 'Topics' as cultural features. We discuss the techniques necessary for the creation of the social data corpus from which emotion terms have been extracted using NLP techniques. Topics of love discussion were then extracted from the corpus by means of Latent Dirichlet Allocation (LDA). Finally, on the basis of these features, a cross-cultural comparison was carried out. For the purpose of cross-cultural analysis, the experimental focus was on comparing data from a culture from the East (India) with a culture from the West (United States of America). Similarities and differences between these cultures have been analyzed with respect to the usage of emotions, their intensities and the topics used during love discussion in social media.", "phrases": ["emotion term", "cross-cultural analysis", "social medium data"], "overall_score": 0.9567990512159051, "scores": [0.8562281550727846, 0.6473032550702318, 0.5670224593316613], "rank_score": 0.6901846231582258} -{"id": "chambers-jurafsky-2010-database", "title": "A Database of Narrative Schemas", "abstract": "This paper describes a new language resource of events and semantic roles that characterize real-world situations. Narrative schemas contain sets of related events (edit and publish), a temporal ordering of the events (edit before publish), and the semantic roles of the participants (authors publish books). This type of world knowledge was central to early research in natural language understanding, scripts being one of the main formalisms, they represented common sequences of events that occur in the world. Unfortunately, most of this knowledge was hand-coded and time consuming to create. Current machine learning techniques, as well as a new approach to learning through coreference chains, has allowed us to automatically extract rich event structure from open domain text in the form of narrative schemas. The narrative schema resource described in this paper contains approximately 5000 unique events combined into schemas of varying sizes. We describe the resource, how it is learned, and a new evaluation of the coverage of these schemas over unseen documents.", "phrases": ["database", "narrative schemas", "world knowledge", "chain", "various event mention"], "overall_score": 1.4350989315537386, "scores": [0.9499965086122876, 0.8451194888617585, 0.5713399376703127, 0.5490397831945404, 0.5351878012238155], "rank_score": 0.6901367039125429} -{"id": "vilar-etal-2010-jane", "title": "Jane: Open Source Hierarchical Translation, Extended with Reordering and Lexicon Models", "abstract": "We present Jane, RWTH's hierarchical phrase-based translation system, which has been open sourced for the scientific community. This system has been in development at RWTH for the last two years and has been successfully applied in different machine translation evaluations. It includes extensions to the hierarchical approach developed by RWTH as well as other research institutions. In this paper we give an overview of its main features. \n \nWe also introduce a novel reordering model for the hierarchical phrase-based approach which further enhances translation performance, and analyze the effect some recent extended lexicon models have on the performance of the system.", "phrases": ["translation system", "jane", "europarl task"], "overall_score": 1.342833376956103, "scores": [0.8753234314927163, 0.6205810958333781, 0.5743350121634534], "rank_score": 0.6900798464965159} -{"id": "vulic-etal-2017-morph", "title": "Morph-fitting: Fine-Tuning Word Vector Spaces with Simple Language-Specific Rules", "abstract": "Morphologically rich languages accentuate two properties of distributional vector space models: 1) the difficulty of inducing accurate representations for low-frequency word forms; and 2) insensitivity to distinct lexical relations that have similar distributional signatures. These effects are detrimental for language understanding systems, which may infer that `inexpensive' is a rephrasing for `expensive' or may not associate `acquire' with `acquires'. In this work, we propose a novel morph-fitting procedure which moves past the use of curated semantic lexicons for improving distributional vector spaces. Instead, our method injects morphological constraints generated using simple language-specific rules, pulling inflectional forms of the same word close together and pushing derivational antonyms far apart. In intrinsic evaluation over four languages, we show that our approach: 1) improves low-frequency word estimates; and 2) boosts the semantic quality of the entire word vector collection. Finally, we show that morph-fitted vectors yield large gains in the downstream task of dialogue state tracking, highlighting the importance of morphology for tackling long-tail phenomena in language understanding tasks.", "phrases": ["vector space", "simple language-specific rule", "downstream task"], "overall_score": 1.3426238143229956, "scores": [0.9425188825311597, 0.5852095610569972, 0.5421880142320616], "rank_score": 0.6899721526067394} -{"id": "xiong-etal-2013-lexical", "title": "Lexical Chain Based Cohesion Models for Document-Level Statistical Machine Translation", "abstract": "Lexical chains provide a representation of the lexical cohesion structure of a text. In this paper, we propose two lexical chain based cohesion models to incorporate lexical cohesion into document-level statistical machine translation: 1) a count cohesion model that rewards a hypothesis whenever a chain word occurs in the hypothesis, 2) and a probability cohesion model that further takes chain word translation probabilities into account. We compute lexical chains for each source document to be translated and generate target lexical chains based on the computed source chains via maximum entropy classifiers. We then use the generated target chains to provide constraints for word selection in document-level machine translation through the two proposed lexical chain based cohesion models. We verify the effectiveness of the two models using a hierarchical phrase-based translation system. Experiments on large-scale training data show that they can substantially improve translation quality in terms of BLEU and that the probability cohesion model outperforms previous models based on lexical cohesion devices.", "phrases": ["chain", "statistical machine translation", "lexical cohesion"], "overall_score": 1.1101079652302122, "scores": [0.8977743436425784, 0.6184481370939324, 0.5530240990547427], "rank_score": 0.6897488599304178} -{"id": "alshawi-2003-effective", "title": "Effective Utterance Classification with Unsupervised Phonotactic Models", "abstract": "This paper describes a method for utterance classification that does not require manual transcription of training data. The method combines domain independent acoustic models with off-the-shelf classifiers to give utterance classification performance that is surprisingly close to what can be achieved using conventional word-trigram recognition requiring manual transcription. In our method, unsupervised training is first used to train a phone n-gram model for a particular domain; the output of recognition with this model is then passed to a phone-string classifier. The classification accuracy of the method is evaluated on three different spoken language system domains.", "phrases": ["utterance classification", "transcription", "unsupervised training"], "overall_score": 0.7576513084072962, "scores": [0.9728864317363559, 0.5622058569972084, 0.5338395343188419], "rank_score": 0.6896439410174687} -{"id": "wang-etal-2012-implicit", "title": "Implicit Discourse Relation Recognition by Selecting Typical Training Examples", "abstract": "Implicit discourse relation recognition is a challenging task in the natural language processing field, but important to many applications such as question answering, summarizat ion and so on. Previous research used either art ificially created implicit discourse relat ions with connectives removed from explicit relations or annotated implicit relat ions as training data to detect the possible implicit relations, and do not further discern which examples are fit to be training data. This paper is the first time to apply a d ifferent typical/atypical perspective to select the most suitable discourse relation examples as training data. To differentiate typical and atypical examples for each discourse relation, a novel single centroid clustering algorithm is proposed. With this typical/atypical distinction, we aim to recognize those easily identified discourse relations more precisely so as to promote the performance of the implicit relation recognition. The experimental results verify that the proposed new method outperforms the state -of-the-art methods.", "phrases": ["discourse relation", "implicit", "augment training data"], "overall_score": 1.5144713363722746, "scores": [0.9487477728909703, 0.580669561251465, 0.5383794928095826], "rank_score": 0.689265608984006} -{"id": "qazvinian-radev-2010-identifying", "title": "Identifying Non-Explicit Citing Sentences for Citation-Based Summarization.", "abstract": "Identifying background (context) information in scientific articles can help scholars understand major contributions in their research area more easily. In this paper, we propose a general framework based on probabilistic inference to extract such context information from scientific papers. We model the sentences in an article and their lexical similarities as a Markov Random Field tuned to detect the patterns that context data create, and employ a Belief Propagation mechanism to detect likely context sentences. We also address the problem of generating surveys of scientific papers. Our experiments show greater pyramid scores for surveys generated using such context information rather than citation sentences alone.", "phrases": ["citation-based summarization", "context sentence", "non-explicit citation"], "overall_score": 1.586875672431684, "scores": [0.929006342887188, 0.5961547169291705, 0.5423529841944187], "rank_score": 0.6891713480035925} -{"id": "ponvert-etal-2011-simple", "title": "Simple Unsupervised Grammar Induction from Raw Text with Cascaded Finite State Models", "abstract": "We consider a new subproblem of unsupervised parsing from raw text, unsupervised partial parsing---the unsupervised version of text chunking. We show that addressing this task directly, using probabilistic finite-state methods, produces better results than relying on the local predictions of a current best unsu-pervised parser, Seginer's (2007) CCL. These finite-state models are combined in a cascade to produce more general (full-sentence) constituent structures; doing so outperforms CCL by a wide margin in unlabeled PARSEVAL scores for English, German and Chinese. Finally, we address the use of phrasal punctuation as a heuristic indicator of phrasal boundaries, both in our system and in CCL.", "phrases": ["grammar induction", "raw text", "syntax-dependent model"], "overall_score": 1.8187173762987858, "scores": [0.8479898546568339, 0.6435962027756138, 0.5758764670333609], "rank_score": 0.6891541748219362} -{"id": "guinaudeau-strube-2013-graph", "title": "Graph-based Local Coherence Modeling", "abstract": "We propose a computationally efficient graph-based approach for local coherence modeling. We evaluate our system on three tasks: sentence ordering, summary coherence rating and readability assessment. The performance is comparable to entity grid based approaches though these rely on a computationally expensive training phase and face data sparsity problems.", "phrases": ["local coherence modeling", "sentence ordering", "entity grid model"], "overall_score": 1.5139432332625955, "scores": [0.99863916004866, 0.5450548062976746, 0.5233818103564828], "rank_score": 0.6890252589009392} -{"id": "jahan-etal-2018-new", "title": "A New Approach to Animacy Detection", "abstract": "Animacy is a necessary property for a referent to be an agent, and thus animacy detection is useful for a variety of natural language processing tasks, including word sense disambiguation, co-reference resolution, semantic role labeling, and others. Prior work treated animacy as a word-level property, and has developed statistical classifiers to classify words as either animate or inanimate. We discuss why this approach to the problem is ill-posed, and present a new approach based on classifying the animacy of co-reference chains. We show that simple voting approaches to inferring the animacy of a chain from its constituent words perform relatively poorly, and then present a hybrid system merging supervised machine learning (ML) and a small number of hand-built rules to compute the animacy of referring expressions and co-reference chains. This method achieves state of the art performance. The supervised ML component leverages features such as word embeddings over referring expressions, parts of speech, and grammatical and semantic roles. The rules take into consideration parts of speech and the hypernymy structure encoded in WordNet. The system achieves an F1 of 0.88 for classifying the animacy of referring expressions, which is comparable to state of the art results for classifying the animacy of words, and achieves an F1 of 0.75 for classifying the animacy of coreference chains themselves. We release our training and test dataset, which includes 142 texts (all narratives) comprising 156,154 words, 34,698 referring expressions, and 10,941 co-reference chains. We test the method on a subset of the OntoNotes dataset, showing using manual sampling that animacy classification is 90% +/- 2% accurate for coreference chains, and 92% +/- 1% for referring expressions. The data also contains 46 folktales, which present an interesting challenge because they often involve characters who are members of traditionally inanimate classes (e.g., stoves that walk, trees that talk). We show that our system is able to detect the animacy of these unusual referents with an F1 of 0.95.", "phrases": ["animacy detection", "hand-built rule", "word embedding"], "overall_score": 1.1086710276303782, "scores": [1.0090774454016305, 0.5332685225858751, 0.5242221532150708], "rank_score": 0.6888560404008587} -{"id": "gerz-etal-2018-language", "title": "Language Modeling for Morphologically Rich Languages: Character-Aware Modeling for Word-Level Prediction", "abstract": "Neural architectures are prominent in the construction of language models (LMs). However, word-level prediction is typically agnostic of subword-level information (characters and character sequences) and operates over a closed vocabulary, consisting of a limited word set. Indeed, while subword-aware models boost performance across a variety of NLP tasks, previous work did not evaluate the ability of these models to assist next-word prediction in language modeling tasks. Such subword-level informed models should be particularly effective for morphologically-rich languages (MRLs) that exhibit high type-to-token ratios. In this work, we present a large-scale LM study on 50 typologically diverse languages covering a wide variety of morphological systems, and offer new LM benchmarks to the community, while considering subword-level information. The main technical contribution of our work is a novel method for injecting subword-level information into semantic word vectors, integrated into the neural language modeling training, to facilitate word-level prediction. We conduct experiments in the LM setting where the number of infrequent words is large, and demonstrate strong perplexity gains across our 50 languages, especially for morphologically-rich languages. Our code and data sets are publicly available.", "phrases": ["word-level prediction", "morphologically-rich language", "character-aware nlms"], "overall_score": 1.7117130804893055, "scores": [0.9249221898333879, 0.6062492425596787, 0.5353606209965527], "rank_score": 0.6888440177965398} -{"id": "gao-etal-2003-improved", "title": "Improved Source-Channel Models for Chinese Word Segmentation", "abstract": "This paper presents a Chinese word segmentation system that uses improved source-channel models of Chinese sentence generation. Chinese words are defined as one of the following four types: lexicon words, morphologically derived words, factoids, and named entities. Our system provides a unified approach to the four fundamental features of word-level Chinese language processing: (1) word segmentation, (2) morphological analysis, (3) factoid detection, and (4) named entity recognition. The performance of the system is evaluated on a manually annotated test set, and is also compared with several state-of-the-art systems, taking into account the fact that the definition of Chinese words often varies from system to system.", "phrases": ["source-channel model", "chinese word segmentation", "word-based generative model"], "overall_score": 1.6517638596988833, "scores": [0.9461762951038213, 0.5721064923113275, 0.5482343100199567], "rank_score": 0.6888390324783685} -{"id": "tay-etal-2018-compare", "title": "Compare, Compress and Propagate: Enhancing Neural Architectures with Alignment Factorization for Natural Language Inference", "abstract": "This paper presents a new deep learning architecture for Natural Language Inference (NLI). Firstly, we introduce a new architecture where alignment pairs are compared, compressed and then propagated to upper layers for enhanced representation learning. Secondly, we adopt factorization layers for efficient and expressive compression of alignment vectors into scalar features, which are then used to augment the base word representations. The design of our approach is aimed to be conceptually simple, compact and yet powerful. We conduct experiments on three popular benchmarks, SNLI, MultiNLI and SciTail, achieving competitive performance on all. A lightweight parameterization of our model also enjoys a 3 times reduction in parameter size compared to the existing state-of-the-art models, e.g., ESIM and DIIN, while maintaining competitive performance. Additionally, visual analysis shows that our propagated features are highly interpretable.", "phrases": ["natural language inference", "alignment pair", "factorization layer"], "overall_score": 1.1082778177756056, "scores": [0.9186784299996412, 0.5916044297405613, 0.5555523164042234], "rank_score": 0.6886117253814753} -{"id": "shaikh-etal-2010-mpc", "title": "MPC: A Multi-Party Chat Corpus for Modeling Social Phenomena in Discourse", "abstract": "In this paper, we describe our experience with collecting and creating an annotated corpus of multi-party online conversations in a chat-room environment. This effort is part of a larger project to develop computational models of social phenomena such as agenda control, influence, and leadership in on-line interactions. Such models will help capturing the dialogue dynamics that are essential for developing, among others, realistic human-machine dialogue systems, including autonomous virtual chat agents. In this paper we describe data collection method used and the characteristics of the initial dataset of English chat. We have devised a multi-tiered collection process in which the subjects start from simple, free-flowing conversations and progress towards more complex and structured interactions. In this paper, we report on the first two stages of this process, which were recently completed. The third, large-scale collection effort is currently being conducted. All English dialogue has been annotated at four levels: communication links, dialogue acts, local topics and meso-topics. Some details of these annotations will be discussed later in this paper, although a full description is impossible within the scope of this article.", "phrases": ["discourse", "english chat", "meso-topic"], "overall_score": 1.339766687763998, "scores": [0.8780644613330814, 0.5952011715419427, 0.5922460071373662], "rank_score": 0.68850388000413} -{"id": "akiba-etal-2004-overview", "title": "Overview of the IWSLT evaluation campaign", "abstract": "This paper gives an overview of the evaluation campaign results of the IWSLT04 1 workshop, which is organized by the C-STAR 2 consortium to investigate novel speech translation technologies and their evaluation. The objectives of this workshop is to provide a framework for the applicability validation of existing machine translation evaluation methodologies to evaluate speech translation technologies. The work-shop also strives to \ufb01nd new directions in how to improve current methods.", "phrases": ["iwslt evaluation campaign", "ted talk", "translation quality"], "overall_score": 2.2430127764658057, "scores": [0.9610254508815709, 0.5587630541088603, 0.5455395940608344], "rank_score": 0.6884426996837552} -{"id": "neculoiu-etal-2016-learning", "title": "Learning Text Similarity with Siamese Recurrent Networks", "abstract": "This paper presents a deep architecture for learning a similarity metric on variable-length character sequences. The model combines a stack of character-level bidirectional LSTM\u2019s with a Siamese architecture. It learns to project variable-length strings into a \ufb01xed-dimensional embedding space by using only information about the similarity between pairs of strings. This model is applied to the task of job title normalization based on a manually annotated taxonomy. A small data set is incrementally expanded and augmented with new sources of variance. The model learns a representation that is selective to differences in the input that re\ufb02ect semantic differences (e.g., \u201cJava developer\u201d vs. \u201cHR manager\u201d) but also invariant to non-semantic string differences (e.g., \u201cJava de-veloper\u201d vs. \u201cJava programmer\u201d).", "phrases": ["similarity metric", "siamese architecture", "job title normalization"], "overall_score": 1.5126394971905337, "scores": [0.9312103432877034, 0.5804348932358907, 0.5536504726082851], "rank_score": 0.6884319030439597} -{"id": "nakashole-flauger-2018-characterizing", "title": "Characterizing Departures from Linearity in Word Translation", "abstract": "We investigate the behavior of maps learned by machine translation methods. The maps translate words by projecting between word embedding spaces of different languages. We locally approximate these maps using linear maps, and find that they vary across the word embedding space. This demonstrates that the underlying maps are non-linear. Importantly, we show that the locally linear maps vary by an amount that is tightly correlated with the distance between the neighborhoods on which they are trained. Our results can be used to test non-linear methods, and to drive the design of more accurate maps for word translation.", "phrases": ["linearity", "word translation", "behavior", "assumption", "local region"], "overall_score": 1.5125861554686286, "scores": [0.9419994316579529, 0.8649864086936494, 0.5606703927720645, 0.5383200821394425, 0.5360618156374551], "rank_score": 0.6884076261801129} -{"id": "wachsmuth-etal-2017-argumentation", "title": "Argumentation Quality Assessment: Theory vs. Practice", "abstract": "Argumentation quality is viewed differently in argumentation theory and in practical assessment approaches. This paper studies to what extent the views match empirically. We find that most observations on quality phrased spontaneously are in fact adequately represented by theory. Even more, relative comparisons of arguments in practice correlate with absolute quality ratings based on theory. Our results clarify how the two views can learn from each other.", "phrases": ["argument quality", "quality dimension", "reasoning"], "overall_score": 1.5122848423395372, "scores": [1.0044423180433975, 0.5345359777587249, 0.5258331821938212], "rank_score": 0.6882704926653145} -{"id": "wang-etal-2018-cross-lingual", "title": "Cross-lingual Knowledge Graph Alignment via Graph Convolutional Networks", "abstract": "Multilingual knowledge graphs (KGs) such as DBpedia and YAGO contain structured knowledge of entities in several distinct languages, and they are useful resources for cross-lingual AI and NLP applications. Cross-lingual KG alignment is the task of matching entities with their counterparts in different languages, which is an important way to enrich the cross-lingual links in multilingual KGs. In this paper, we propose a novel approach for cross-lingual KG alignment via graph convolutional networks (GCNs). Given a set of pre-aligned entities, our approach trains GCNs to embed entities of each language into a unified vector space. Entity alignments are discovered based on the distances between entities in the embedding space. Embeddings can be learned from both the structural and attribute information of entities, and the results of structure embedding and attribute embedding are combined to get accurate alignments. In the experiments on aligning real multilingual KGs, our approach gets the best performance compared with other embedding-based KG alignment approaches.", "phrases": ["pre-aligned entity", "graph encoder", "low-dimensional vector space"], "overall_score": 1.989301946482405, "scores": [0.9557342860833181, 0.5683424256044749, 0.5406770083634707], "rank_score": 0.6882512400170878} -{"id": "hovy-spruit-2016-social", "title": "The Social Impact of Natural Language Processing", "abstract": "Medical sciences have long since established an ethics code for experiments, to minimize the risk of harm to subjects. Natural language processing (NLP) used to involve mostly anonymous corpora, with the goal of enriching linguistic analysis, and was therefore unlikely to raise ethical concerns. As NLP becomes increasingly wide-spread and uses more data from social media, however, the situation has changed: the outcome of NLP experiments and applications can now have a direct effect on individual users\u2019 lives. Until now, the discourse on this topic in the \ufb01eld has not followed the technological development, while public discourse was often focused on exaggerated dangers. This position paper tries to take back the initiative and start a discussion. We identify a number of social implications of NLP and discuss their ethical signi\ufb01cance, as well as ways to address them.", "phrases": ["implication", "nlp system", "million"], "overall_score": 1.5846816292733161, "scores": [0.9320551971507279, 0.5697160184766222, 0.5628842458732171], "rank_score": 0.6882184871668557} -{"id": "lal-etal-2019-de", "title": "De-Mixing Sentiment from Code-Mixed Text", "abstract": "Code-mixing is the phenomenon of mixing the vocabulary and syntax of multiple languages in the same sentence. It is an increasingly common occurrence in today's multilingual society and poses a big challenge when encountered in different downstream tasks. In this paper, we present a hybrid architecture for the task of Sentiment Analysis of English-Hindi code-mixed data. Our method consists of three components, each seeking to alleviate different issues. We first generate subword level representations for the sentences using a CNN architecture. The generated representations are used as inputs to a Dual Encoder Network which consists of two different BiLSTMs - the Collective and Specific Encoder. The Collective Encoder captures the overall sentiment of the sentence, while the Specific Encoder utilizes an attention mechanism in order to focus on individual sentiment-bearing sub-words. This, combined with a Feature Network consisting of orthographic features and specially trained word embeddings, achieves state-of-the-art results - 83.54% accuracy and 0.827 F1 score - on a benchmark dataset.", "phrases": ["code-mixed text", "sentiment analysis", "dual encoder network", "collective", "orthographic feature"], "overall_score": 1.7099436631118994, "scores": [0.883208586676947, 0.9607519400988284, 0.5385513623421159, 0.5292642103665992, 0.528883659822329], "rank_score": 0.6881319518613639} -{"id": "gao-etal-2016-physical", "title": "Physical Causality of Action Verbs in Grounded Language Understanding", "abstract": "Linguistics studies have shown that action verbs often denote some Change of State (CoS) as the result of an action. However, the causality of action verbs and its potential connection with the physical world has not been systematically explored. To address this limitation, this paper presents a study on physical causality of action verbs and their implied changes in the physical world. We \ufb01rst conducted a crowd-sourcing experiment and identi\ufb01ed eighteen categories of physical causality for action verbs. For a subset of these categories, we then de\ufb01ned a set of detectors that detect the corresponding change from visual perception of the physical environment. We further incorporated physical causality modeling and state detection in grounded language understanding. Our empirical studies have demonstrated the effectiveness of causality modeling in grounding language to perception.", "phrases": ["action verb", "language understanding", "change", "perception", "physical causality"], "overall_score": 1.3389741066746614, "scores": [0.8847943310033151, 0.835675064722822, 0.613115595643548, 0.57861340670937, 0.5282844714015767], "rank_score": 0.6880965738961263} -{"id": "steingrimsson-etal-2017-malromur", "title": "M\u00e1lr\u00f3mur: A Manually Verified Corpus of Recorded Icelandic Speech", "abstract": "This paper describes the M\u00e1lr\u00f3mur corpus, an open, manually verified, Icelandic speech corpus. The recordings were collected in 2011\u20132012 by Reykjavik University and the Icelandic Center for Language Technology in cooperation with Google. 152 hours of speech were recorded from 563 participants. The recordings were subsequently manually inspected by evaluators listening to all the segments, determining whether any given segment contains the utterance the participant was supposed to read, and nothing else. Out of 127,286 recorded segments 108,568 were approved and 18,718 deemed unsatisfactory.", "phrases": ["speech corpus", "hour", "segment"], "overall_score": 0.9537959486667857, "scores": [0.8885262987927514, 0.6348886560096906, 0.5406400729398926], "rank_score": 0.6880183425807781} -{"id": "chakrabarty-etal-2019-imho", "title": "IMHO Fine-Tuning Improves Claim Detection", "abstract": "Claims are the central component of an argument. Detecting claims across different domains or data sets can often be challenging due to their varying conceptualization. We propose to alleviate this problem by fine-tuning a language model using a Reddit corpus of 5.5 million opinionated claims. These claims are self-labeled by their authors using the internet acronyms IMO/IMHO (in my (humble) opinion). Empirical results show that using this approach improves the state of art performance across four benchmark argumentation data sets by an average of 4 absolute F1 points in claim detection. As these data sets include diverse domains such as social media and student essays this improvement demonstrates the robustness of fine-tuning on this novel corpus.", "phrases": ["claim detection", "language model", "reddit corpus", "opinion", "imho"], "overall_score": 1.430494992035033, "scores": [0.9301856116376358, 0.84728686141857, 0.5758110244036656, 0.5453694597495431, 0.5409604278329846], "rank_score": 0.6879226770084798} -{"id": "callison-burch-etal-2010-findings", "title": "Findings of the 2010 Joint Workshop on Statistical Machine Translation and Metrics for Machine Translation", "abstract": "This paper presents the results of the WMT10 and MetricsMATR10 shared tasks, which included a translation task, a system combination task, and an evaluation task. We conducted a large-scale manual evaluation of 104 machine translation systems and 41 system combination entries. We used the ranking of these systems to measure how strongly automatic metrics correlate with human judgments of translation quality for 26 metrics. This year we also investigated increasing the number of human judgments by hiring non-expert annotators through Amazon's Mechanical Turk.", "phrases": ["statistical machine translation", "wmt10", "evaluation task"], "overall_score": 1.1070994737540605, "scores": [0.9488269694952319, 0.5612127320397482, 0.5535990356928997], "rank_score": 0.6878795790759599} -{"id": "badene-etal-2019-weak", "title": "Weak Supervision for Learning Discourse Structure", "abstract": "This paper provides a detailed comparison of a data programming approach with (i) off-the-shelf, state-of-the-art deep learning architectures that optimize their representations (BERT) and (ii) handcrafted-feature approaches previously used in the discourse analysis literature. We compare these approaches on the task of learning discourse structure for multi-party dialogue. The data programming paradigm offered by the Snorkel framework allows a user to label training data using expert-composed heuristics, which are then transformed via the \u201cgenerative step\u201d into probability distributions of the class labels given the data. We show that on our task the generative model outperforms both deep learning architectures as well as more traditional ML approaches when learning discourse structure\u2014it even outperforms the combination of deep learning methods and hand-crafted features. We also implement several strategies for \u201cdecoding\u201d our generative model output in order to improve our results. We conclude that weak supervision methods hold great promise as a means for creating and improving data sets for discourse structure.", "phrases": ["discourse structure", "learning method", "weak supervision"], "overall_score": 0.9535195790575979, "scores": [0.9137711623552979, 0.5961171617837203, 0.5535686280063745], "rank_score": 0.6878189840484642} -{"id": "lawrence-reed-2015-combining", "title": "Combining Argument Mining Techniques", "abstract": "In this paper, we look at three different methods of extracting the argumentative structure from a piece of natural language text. These methods cover linguistic features, changes in the topic being discussed and a supervised machine learning approach to identify the components of argumentation schemes, patterns of human reasoning which have been detailed extensively in philosophy and psychology. For each of these approaches we achieve results comparable to those previously reported, whilst at the same time achieving a more detailed argument structure. Finally, we use the results from these individual techniques to apply them in combination, further improving the argument structure identification.", "phrases": ["argument structure", "discourse indicator", "connection"], "overall_score": 1.5109649121132411, "scores": [0.8934140357361612, 0.6352125226600579, 0.5343827411971466], "rank_score": 0.6876697665311219} -{"id": "erk-etal-2003-towards", "title": "Towards a Resource for Lexical Semantics: A Large German Corpus with Extensive Semantic Annotation", "abstract": "We describe the ongoing construction of a large, semantically annotated corpus resource as reliable basis for the large-scale acquisition of word-semantic information, e.g. the construction of domain-independent lexica. The backbone of the annotation are semantic roles in the frame semantics paradigm. We report experiences and evaluate the annotated data from the first project stage. On this basis, we discuss the problems of vagueness and ambiguity in semantic annotation.", "phrases": ["german corpus", "semantic role", "salsa project"], "overall_score": 1.6489208071583292, "scores": [0.9292935220027261, 0.607477283421462, 0.5261893570111215], "rank_score": 0.6876533874784365} -{"id": "fleischman-hovy-2004-multi", "title": "Multi-Document Person Name Resolution", "abstract": "Multi-document person name resolution focuses on the problem of determining if two instances with the same name and from different documents refer to the same individual. We present a two-step approach in which a Maximum Entropy model is trained to give the probability that two names refer to the same individual. We then apply a modified agglomerative clustering technique to partition the instances according to their referents.", "phrases": ["person name resolution", "maximum entropy model", "mention"], "overall_score": 1.1066743248107205, "scores": [0.9766209377099087, 0.5633940290852189, 0.5228312907600872], "rank_score": 0.6876154191850716} -{"id": "song-etal-2021-improved-word", "title": "Improved Word Sense Disambiguation with Enhanced Sense Representations", "abstract": "Current state-of-the-art supervised word sense disambiguation (WSD) systems (such as GlossBERT and bi-encoder model) yield surprisingly good results by purely leveraging pre-trained language models and short dictionary definitions (or glosses) of the different word senses. While concise and intuitive, the sense gloss is just one of many ways to provide information about word senses. In this paper, we focus on enhancing the sense representations via incorporating synonyms, example phrases or sentences showing usage of word senses, and sense gloss of hypernyms. We show that incorporating such additional information boosts the performance on WSD. With the proposed enhancements, our system achieves an F1 score of 82.0% on the standard benchmark test dataset of the English all-words WSD task, surpassing all previous published scores on this benchmark dataset.", "phrases": ["word sense disambiguation", "wsd", "hypernyms"], "overall_score": 0.7551982632737599, "scores": [0.9700830578756562, 0.5573894199219214, 0.5347607715391367], "rank_score": 0.687411083112238} -{"id": "yu-etal-2017-refining", "title": "Refining Word Embeddings for Sentiment Analysis", "abstract": "Word embeddings that can capture semantic and syntactic information from contexts have been extensively used for various natural language processing tasks. However, existing methods for learning context-based word embeddings typically fail to capture sufficient sentiment information. This may result in words with similar vector representations having an opposite sentiment polarity (e.g., good and bad), thus degrading sentiment analysis performance. Therefore, this study proposes a word vector refinement model that can be applied to any pre-trained word vectors (e.g., Word2vec and GloVe). The refinement model is based on adjusting the vector representations of words such that they can be closer to both semantically and sentimentally similar words and further away from sentimentally dissimilar words. Experimental results show that the proposed method can improve conventional word embeddings and outperform previously proposed sentiment embeddings for both binary and fine-grained classification on Stanford Sentiment Treebank (SST).", "phrases": ["sentiment analysis", "syntactic information", "pre-trained word vector"], "overall_score": 1.7078952852992322, "scores": [0.9458564566456227, 0.5661226644881416, 0.549943750831999], "rank_score": 0.6873076239885877} -{"id": "moneglia-etal-2014-imagact", "title": "The IMAGACT Visual Ontology. An Extendable Multilingual Infrastructure for the representation of lexical encoding of Action", "abstract": "Action verbs have many meanings, covering actions in different ontological types. Moreover, each language categorizes action in its own way. One verb can refer to many different actions and one action can be identified by more than one verb. The range of variations within and across languages is largely unknown, causing trouble for natural language processing tasks. IMAGACT is a corpus-based ontology of action concepts, derived from English and Italian spontaneous speech corpora, which makes use of the universal language of images to identify the different action types extended by verbs referring to action in English, Italian, Chinese and Spanish. This paper presents the infrastructure and the various linguistic information the user can derive from it. IMAGACT makes explicit the variation of meaning of action verbs within one language and allows comparisons of verb variations within and across languages. Because the action concepts are represented with videos, extension into new languages beyond those presently implemented in IMAGACT is done using competence-based judgments by mother-tongue informants without intense lexicographic work involving underdetermined semantic description", "phrases": ["imagact visual ontology", "lexical encoding", "corpus-based ontology", "action concept"], "overall_score": 1.106047290240583, "scores": [0.8904134111899699, 0.7950307784089926, 0.5350657160150089, 0.528393377191903], "rank_score": 0.6872258207014685} -{"id": "zaghouani-etal-2010-revised", "title": "The Revised Arabic PropBank", "abstract": "The revised Arabic PropBank (APB) reflects a number of changes to the data and the process of PropBanking. Several changes stem from Treebank revisions. An automatic process was put in place to map existing annotation to the new trees. We have revised the original 493 Frame Files from the Pilot APB and added 1462 new files for a total of 1955 Frame Files with 2446 framesets. In addition to a heightened attention to sense distinctions this cycle includes a greater attempt to address complicated predicates such as light verb constructions and multi-word expressions. New tools facilitate the data tagging and also simplify frame creation.", "phrases": ["arabic propbank", "apb", "semantic role labeling"], "overall_score": 1.3371870717879843, "scores": [0.8530372634365019, 0.6454452049532563, 0.5630521905005602], "rank_score": 0.6871782196301061} -{"id": "waibel-etal-2003-speechalator", "title": "Speechalator: Two-Way Speech-to-Speech Translation in Your Hand", "abstract": "This demonstration involves two-way automatic speech-to-speech translation on a consumer off-the-shelf PDA. This work was done as part of the DARPA-funded Babylon project, investigating better speech-to-speech translation systems for communication in the field. The development of the Speechalator software-based translation system required addressing a number of hard issues, including a new language for the team (Egyptian Arabic), close integration on a small device, computational efficiency on a limited platform, and scalable coverage for the domain.", "phrases": ["translation system", "device", "speechalator"], "overall_score": 1.1056443169100891, "scores": [0.936775548109146, 0.5912677141900453, 0.5328830555814195], "rank_score": 0.686975439293537} -{"id": "bender-etal-2013-towards", "title": "Towards Creating Precision Grammars from Interlinear Glossed Text: Inferring Large-Scale Typological Properties", "abstract": "We propose to bring together two kinds of linguistic resources\u2014interlinear glossed text (IGT) and a language-independent precision grammar resource\u2014to automatically create precision grammars in the context of language documentation. This paper takes the first steps in that direction by extracting major-constituent word order and case system properties from IGT for a diverse sample of languages.", "phrases": ["precision grammar", "large-scale typological property", "igt"], "overall_score": 1.230800192388295, "scores": [0.9098409482145312, 0.6133637735959343, 0.5375632777892155], "rank_score": 0.686922666533227} -{"id": "perez-ortiz-etal-2014-black", "title": "Black-box integration of heterogeneous bilingual resources into an interactive translation system", "abstract": "The objective of interactive translation prediction (ITP) is to assist human translators in the translation of texts by making context-based computer-generated suggestions as they type. Most of the ITP systems in literature are strongly coupled with a statistical machine translation system that is conveniently adapted to provide the suggestions. In this paper, however, we propose a resource-agnostic approach in which the suggestions are obtained from any bilingual resource (a machine translation system, a translation memory, a bilingual dictionary, etc.) that provides targetlanguage equivalents for source-language segments. These bilingual resources are considered to be black boxes and do not need to be adapted to the peculiarities of the ITP system. Our evaluation shows that savings of up to 85% can be theoretically achieved in the number of keystrokes when using our novel approach. Preliminary user trials indicate that these benefits can be partly transferred to real-world computer-assisted translation interfaces.", "phrases": ["bilingual resource", "translator", "interactive translation prediction"], "overall_score": 0.9519086488894497, "scores": [0.9125595441231257, 0.5821953991301952, 0.5652158874448628], "rank_score": 0.6866569435660613} -{"id": "bosco-lombardo-2004-dependency", "title": "Dependency and relational structure in treebank annotation", "abstract": "Among the variety of proposals currently making the dependency perspective on grammar more concrete, there are several treebanks whose annotation exploits some form of Relational Structure that we can consider a generalization of the fundamental idea of dependency at various degrees and with reference to different types of linguistic knowledge. The paper describes the Relational Structure as the common underlying representation of treebanks which is motivated by both theoretical and task-dependent considerations. Then it presents a system for the annotation of the Relational Structure in treebanks, called Augmented Relational Structure, which allows for a systematic annotation of various components of linguistic knowledge crucial in several tasks. Finally, it shows a dependency-based annotation for an Italian treebank, i.e. the Turin University Treebank, that implements the Augmented Relational Structure.", "phrases": ["relational structure", "non-projective dependency tree", "syntax"], "overall_score": 0.9518754392469714, "scores": [0.8807026793148625, 0.6160750567864378, 0.5631212275171138], "rank_score": 0.6866329878728047} -{"id": "allauzen-etal-2003-generalized", "title": "Generalized Algorithms for Constructing Statistical Language Models", "abstract": "Recent text and speech processing applications such as speech mining raise new and more general problems related to the construction of language models. We present and describe in detail several new and efficient algorithms to address these more general problems and report experimental results demonstrating their usefulness. We give an algorithm for computing efficiently the expected counts of any sequence in a word lattice output by a speech recognizer or any arbitrary weighted automaton; describe a new technique for creating exact representations of n-gram language models by weighted automata whose size is practical for offline use even for a vocabulary size of about 500,000 words and an n-gram order n = 6; and present a simple and more general technique for constructing class-based language models that allows each class to represent an arbitrary weighted automaton. An efficient implementation of our algorithms and techniques has been incorporated in a general software library for language modeling, the GRM Library, that includes many other text and grammar processing functionalities.", "phrases": ["n-gram language model", "transducer", "wfst"], "overall_score": 1.2300866743819658, "scores": [0.9662679806242966, 0.5495944342700835, 0.5437109187605972], "rank_score": 0.6865244445516591} -{"id": "durrett-etal-2013-decentralized", "title": "Decentralized Entity-Level Modeling for Coreference Resolution", "abstract": "Efficiently incorporating entity-level information is a challenge for coreference resolution systems due to the difficulty of exact inference over partitions. We describe an end-to-end discriminative probabilistic model for coreference that, along with standard pairwise features, enforces structural agreement constraints between specified properties of coreferent mentions. This model can be represented as a factor graph for each document that admits efficient inference via belief propagation. We show that our method can use entity-level information to outperform a basic pairwise system.", "phrases": ["coreference resolution", "probabilistic model", "factor graph"], "overall_score": 1.5070748279258612, "scores": [0.931698166810907, 0.566057552376778, 0.5599422195723294], "rank_score": 0.6858993129200049} -{"id": "kummerfeld-klein-2013-error", "title": "Error-Driven Analysis of Challenges in Coreference Resolution", "abstract": "Coreference resolution metrics quantify errors but do not analyze them. Here, we consider an automated method of categorizing errors in the output of a coreference system into intuitive underlying error types. Using this tool, we first compare the error distributions across a large set of systems, then analyze common errors across the top ten systems, empirically characterizing the major unsolved challenges of the coreference resolution task.", "phrases": ["coreference system", "state-of-the-art system", "reference"], "overall_score": 1.8100844239945546, "scores": [0.984336341487942, 0.5426908565590958, 0.5306216491552801], "rank_score": 0.6858829490674393} -{"id": "van-cranenburgh-koolen-2015-identifying", "title": "Identifying Literary Texts with Bigrams", "abstract": "We study perceptions of literariness in a set of contemporary Dutch novels. Experiments with machine learning models show that it is possible to automatically distinguish novels that are seen as highly literary from those that are seen as less literary, using surprisingly simple textual features. The most discriminating features of our classification model indicate that genre might be a confounding factor, but a regression model shows that we can also explain variation between highly literary novels from less literary ones within genre.", "phrases": ["novel", "textual feature", "literary one"], "overall_score": 1.1038490976325153, "scores": [0.8539346341804863, 0.615029007581099, 0.5886163787619711], "rank_score": 0.6858600068411854} -{"id": "liu-etal-2019-robust", "title": "Robust Neural Machine Translation with Joint Textual and Phonetic Embedding", "abstract": "Neural machine translation (NMT) is notoriously sensitive to noises, but noises are almost inevitable in practice. One special kind of noise is the homophone noise, where words are replaced by other words with similar pronunciations. We propose to improve the robustness of NMT to homophone noises by 1) jointly embedding both textual and phonetic information of source sentences, and 2) augmenting the training dataset with homophone noises. Interestingly, to achieve better translation quality and more robustness, we found that most (though not all) weights should be put on the phonetic rather than textual information. Experiments show that our method not only significantly improves the robustness of NMT to homophone noises, but also surprisingly improves the translation quality on some clean test sets.", "phrases": ["neural machine translation", "phonetic information", "training corpus"], "overall_score": 1.809905510745264, "scores": [0.9569830670819515, 0.5664720223648486, 0.5339903746225148], "rank_score": 0.6858151546897716} -{"id": "bommasani-cardie-2020-intrinsic", "title": "Intrinsic Evaluation of Summarization Datasets", "abstract": "High quality data forms the bedrock for building meaningful statistical models in NLP. Consequently, data quality must be evaluated either during dataset construction or *post hoc*. Almost all popular summarization datasets are drawn from natural sources and do not come with inherent quality assurance guarantees. In spite of this, data quality has gone largely unquestioned for many of these recent datasets. We perform the first large-scale evaluation of summarization datasets by introducing 5 intrinsic metrics and applying them to 10 popular datasets. We find that data usage in recent summarization research is sometimes inconsistent with the underlying properties of the data. Further, we discover that our metrics can serve the additional purpose of being inexpensive heuristics for detecting generically low quality examples.", "phrases": ["summarization dataset", "purpose", "intrinsic evaluation"], "overall_score": 1.1030854329367736, "scores": [0.8570445489581202, 0.6678121654468425, 0.5312998314573314], "rank_score": 0.6853855152874314} -{"id": "mckeown-etal-2010-time", "title": "Time-Efficient Creation of an Accurate Sentence Fusion Corpus", "abstract": "Sentence fusion enables summarization and question-answering systems to produce output by combining fully formed phrases from different sentences. Yet there is little data that can be used to develop and evaluate fusion techniques. In this paper, we present a methodology for collecting fusions of similar sentence pairs using Amazon's Mechanical Turk, selecting the input pairs in a semi-automated fashion. We evaluate the results using a novel technique for automatically selecting a representative sentence from multiple responses. Our approach allows for rapid construction of a high accuracy fusion corpus.", "phrases": ["fusion", "sentence pair", "amazon"], "overall_score": 1.5780384989869602, "scores": [0.9215517554763373, 0.6029881421242347, 0.5314603394222092], "rank_score": 0.685333412340927} -{"id": "chen-etal-2018-variational-knowledge", "title": "Variational Knowledge Graph Reasoning", "abstract": "Inferring missing links in knowledge graphs (KG) has attracted a lot of attention from the research community. In this paper, we tackle a practical query answering task involving predicting the relation of a given entity pair. We frame this prediction problem as an inference problem in a probabilistic graphical model and aim at resolving it from a variational inference perspective. In order to model the relation between the query entity pair, we assume that there exists an underlying latent variable (paths connecting two nodes) in the KG, which carries the equivalent semantics of their relations. However, due to the intractability of connections in large KGs, we propose to use variation inference to maximize the evidence lower bound. More specifically, our framework (Diva) is composed of three modules, i.e. a posterior approximator, a prior (path finder), and a likelihood (path reasoner). By using variational inference, we are able to incorporate them closely into a unified architecture and jointly optimize them to perform KG reasoning. With active interactions among these sub-modules, Diva is better at handling noise and coping with more complex reasoning scenarios. In order to evaluate our method, we conduct the experiment of the link prediction task on multiple datasets and achieve state-of-the-art performances on both datasets.", "phrases": ["knowledge graph", "latent variable", "path", "diva", "reinforcement learning"], "overall_score": 1.3335346005559032, "scores": [1.162717301316931, 0.6321716300631721, 0.5532453810778794, 0.5491004746232162, 0.5292713165107328], "rank_score": 0.6853012207183863} -{"id": "venhuizen-etal-2013-gamification", "title": "Gamification for Word Sense Labeling", "abstract": "Obtaining gold standard data for word sense disambiguation is important but costly. We show how it can be done using a \u201cGame with a Purpose\u201d (GWAP) called Wordrobe. This game consists of a large set of multiple-choice questions on word senses generated from the Groningen Meaning Bank. The players need to answer these questions, scoring points depending on the agreement with fellow players. The working assumption is that the right sense for a word can be determined by the answers given by the players. To evaluate our method, we gold-standard tagged a portion of the data that was also used in the GWAP. A comparison yielded promising results, ranging from a precision of 0.88 and recall of 0.83 for relative majority agreement, to a precision of 0.98 and recall of 0.35 for questions that were answered unanimously.", "phrases": ["word sense disambiguation", "multiple-choice question", "groningen meaning bank"], "overall_score": 1.424809591739877, "scores": [0.9453894427701307, 0.560925718183376, 0.5492505712606425], "rank_score": 0.6851885774047165} -{"id": "globerson-etal-2016-collective", "title": "Collective Entity Resolution with Multi-Focal Attention", "abstract": "Entity resolution is the task of linking each mention of an entity in text to the corresponding record in a knowledge base (KB). Coherence models for entity resolution encourage all referring expressions in a document to resolve to entities that are related in the KB. We explore attention-like mechanisms for coherence, where the evidence for each candidate is based on a small set of strong relations, rather than relations to all other entities in the document. The rationale is that document-wide support may simply not exist for non-salient entities, or entities not densely connected in the KB. Our proposed sys-tem outperforms state-of-the-art systems on the CoNLL 2003, TAC KBP 2010, 2011 and 2012 tasks.", "phrases": ["coherence", "other entity", "state-of-the-art system"], "overall_score": 1.7574478951398467, "scores": [0.8735003809754432, 0.5946432152830818, 0.5873916017687143], "rank_score": 0.6851783993424131} -{"id": "bergsma-cherry-2010-fast", "title": "Fast and Accurate Arc Filtering for Dependency Parsing", "abstract": "We propose a series of learned arc filters to speed up graph-based dependency parsing. A cascade of filters identify implausible head-modifier pairs, with time complexity that is first linear, and then quadratic in the length of the sentence. The linear filters reliably predict, in context, words that are roots or leaves of dependency trees, and words that are likely to have heads on their left or right. We use this information to quickly prune arcs from the dependency graph. More than 78% of total arcs are pruned while retaining 99.5% of the true dependencies. These filters improve the speed of two state-of-the-art dependency parsers, with low overhead and negligible loss in accuracy.", "phrases": ["arc", "filter", "dependency parsing"], "overall_score": 1.1027148087386627, "scores": [0.9693653579755597, 0.5582338116300739, 0.5278665309710223], "rank_score": 0.6851552335255519} -{"id": "singh-etal-2016-quantifying", "title": "Quantifying sentence complexity based on eye-tracking measures", "abstract": "Eye-tracking reading times have been attested to reflect cognitive processes underlying sentence comprehension. However, the use of reading times in NLP applications is an underexplored area of research. In this initial work we build an automatic system to assess sentence complexity using automatically predicted eye-tracking reading time measures and demonstrate the efficacy of these reading times for a well known NLP task, namely, readability assessment. We use a machine learning model and a set of features known to be significant predictors of reading times in order to learn per-word reading times from a corpus of English text having reading times of human readers. Subsequently, we use the model to predict reading times for novel text in the context of the aforementioned task. A model based only on reading times gave competitive results compared to the systems that use extensive syntactic features to compute linguistic complexity. Our work, to the best of our knowledge, is the first study to show that automatically predicted reading times can successfully model the difficulty of a text and can be deployed in practical text processing applications.", "phrases": ["sentence complexity", "token-level", "gaze behaviour"], "overall_score": 1.2275989307166655, "scores": [0.934639862593463, 0.5756602254147842, 0.5451079371195106], "rank_score": 0.6851360083759194} -{"id": "xu-etal-2016-cached", "title": "Cached Long Short-Term Memory Neural Networks for Document-Level Sentiment Classification", "abstract": "Recently, neural networks have achieved great success on sentiment classification due to their ability to alleviate feature engineering. However, one of the remaining challenges is to model long texts in document-level sentiment classification under a recurrent architecture because of the deficiency of the memory unit. To address this problem, we present a Cached Long Short-Term Memory neural networks (CLSTM) to capture the overall semantic information in long texts. CLSTM introduces a cache mechanism, which divides memory into several groups with different forgetting rates and thus enables the network to keep sentiment information better within a recurrent unit. The proposed CLSTM outperforms the state-of-the-art models on three publicly available document-level sentiment analysis datasets.", "phrases": ["short-term memory", "document-level sentiment classification", "semantic information"], "overall_score": 1.2273384326285055, "scores": [0.938606826241246, 0.5829352231011262, 0.5334298155317909], "rank_score": 0.684990621624721} -{"id": "saxena-etal-2020-keygames", "title": "KeyGames: A Game Theoretic Approach to Automatic Keyphrase Extraction", "abstract": "In this paper, we introduce two advancements in the automatic keyphrase extraction (AKE) space - KeyGames and pke+. KeyGames is an unsupervised AKE framework that employs the concept of evolutionary game theory and consistent labelling problem to ensure consistent classification of candidates into keyphrase and non-keyphrase. Pke+ is a python based pipeline built on top of the existing pke library to standardize various AKE steps, namely candidate extraction and evaluation, to ensure truly systematic and comparable performance analysis of AKE models. In the experiments section, we compare the performance of KeyGames across three publicly available datasets (Inspec 2001, SemEval 2010, DUC 2001) against the results quoted by the existing state-of-the-art models as well as their performance when reproduced using pke+. The results show that KeyGames outperforms most of the state-of-the-art systems while generalizing better on input documents with different domains and length. Further, pke+'s pre-processing brings out improvement in several other system's quoted performance as well.", "phrases": ["automatic keyphrase extraction", "game theory", "candidate"], "overall_score": 0.7523450919606572, "scores": [0.9435439025076381, 0.5677820730877084, 0.5431160683929497], "rank_score": 0.6848140146627655} -{"id": "ravichandran-etal-2003-statistical", "title": "Statistical QA - Classifier vs. Re-ranker: What's the difference?", "abstract": "In this paper, we show that we can obtain a good baseline performance for Question Answering (QA) by using only 4 simple features. Using these features, we contrast two approaches used for a Maximum Entropy based QA system. We view the QA problem as a classification problem and as a re-ranking problem. Our results indicate that the QA system viewed as a re-ranker clearly outperforms the QA system used as a classifier. Both systems are trained using the same data.", "phrases": ["re-ranker", "question answering", "classification problem"], "overall_score": 1.102045417750585, "scores": [0.9484154121999604, 0.5672765452992284, 0.5385259950601506], "rank_score": 0.6847393175197798} -{"id": "downey-etal-2007-sparse", "title": "Sparse Information Extraction: Unsupervised Language Models to the Rescue", "abstract": "Even in a massive corpus such as the Web, a substantial fraction of extractions appear infrequently. This paper shows how to assess the correctness of sparse extractions by utilizing unsupervised language models. The REALM system, which combines HMMbased and n-gram-based language models, ranks candidate extractions by the likelihood that they are correct. Our experiments show that REALM reduces extraction error by 39%, on average, when compared with previous work. Because REALM pre-computes language models based on its corpus and does not require any hand-tagged seeds, it is far more scalable than approaches that learn models for each individual relation from handtagged data. Thus, REALM is ideally suited for open information extraction where the relations of interest are not specified in advance and their number is potentially vast.", "phrases": ["sparse information extraction", "candidate relation", "hmm"], "overall_score": 1.5043669541668945, "scores": [0.9326617103776715, 0.5849555577190129, 0.5363834512890828], "rank_score": 0.6846669064619224} -{"id": "lampert-etal-2006-classifying", "title": "Classifying Speech Acts using Verbal Response Modes", "abstract": "The driving vision for our work is to provide intelligent, automated assistance to users in understanding the status of their email conversations. Our approach is to create tools that enable the detection and connection of speech acts across email messages. We thus require a mechanism for tagging email utterances with some indication of their dialogic function. However, existing dialog act taxonomies as used in computational linguistics tend to be too taskor application-specific for the wide range of acts we find represented in email conversation. The Verbal Response Modes (VRM) taxonomy of speech acts, widely applied for discourse analysis in linguistics and psychology, is distinguished from other speech act taxonomies by its construction from crosscutting principles of classification, which ensure universal applicability across any domain of discourse. The taxonomy categorises on two dimensions, characterised as literal meaning and pragmatic meaning. In this paper, we describe a statistical classifier that automatically identifies the literal meaning category of utterances using the VRM classification. We achieve an accuracy of 60.8% using linguistic features derived from VRM\u2019s human annotation guidelines. Accuracy is improved to 79.8% using additional features.", "phrases": ["speech act", "response mode", "email message"], "overall_score": 0.9490375278154635, "scores": [0.9477926536906719, 0.5612726384873861, 0.5446923103172787], "rank_score": 0.6845858674984454} -{"id": "zhang-gildea-2005-stochastic", "title": "Stochastic Lexicalized Inversion Transduction Grammar for Alignment", "abstract": "We present a version of Inversion Transduction Grammar where rule probabilities are lexicalized throughout the synchronous parse tree, along with pruning techniques for efficient training. Alignment results improve over unlexicalized ITG on short sentences for which full EM is feasible, but pruning seems to have a negative impact on longer sentences.", "phrases": ["efficient training", "itg", "word alignment", "tic-tac-toe pruning"], "overall_score": 1.755819885242236, "scores": [1.0615005242022795, 0.6087230483287878, 0.5346999974808779, 0.5332511703734558], "rank_score": 0.6845436850963502} -{"id": "brooks-youssef-2020-metaphor", "title": "Metaphor Detection using Ensembles of Bidirectional Recurrent Neural Networks", "abstract": "In this paper we present our results from the Second Shared Task on Metaphor Detection, hosted by the Second Workshop on Figurative Language Processing. We use an ensemble of RNN models with bidirectional LSTMs and bidirectional attention mechanisms. Some of the models were trained on all parts of speech. Each of the other models was trained on one of four categories for parts of speech: \u201cnouns\u201d, \u201cverbs\u201d, \u201cadverbs/adjectives\u201d, or \u201cother\u201d. The models were combined into voting pools and the voting pools were combined using the logical \u201cOR\u201d operator.", "phrases": ["bidirectional attention mechanism", "metaphor detection", "bi-lstms"], "overall_score": 0.9489483610231698, "scores": [0.955534033400572, 0.5533358604712081, 0.5446947478899756], "rank_score": 0.6845215472539184} -{"id": "fang-etal-2020-video2commonsense", "title": "Video2Commonsense: Generating Commonsense Descriptions to Enrich Video Captioning", "abstract": "Captioning is a crucial and challenging task for video understanding. In videos that involve active agents such as humans, the agent's actions can bring about myriad changes in the scene. Observable changes such as movements, manipulations, and transformations of the objects in the scene, are reflected in conventional video captioning. Unlike images, actions in videos are also inherently linked to social aspects such as intentions (why the action is taking place), effects (what changes due to the action), and attributes that describe the agent. Thus for video understanding, such as when captioning videos or when answering questions about videos, one must have an understanding of these commonsense aspects. We present the first work on generating commonsense captions directly from videos, to describe latent aspects such as intentions, effects, and attributes. We present a new dataset \u201cVideo-to-Commonsense (V2C)\u201d that contains ~9k videos of human agents performing various actions, annotated with 3 types of commonsense descriptions. Additionally we explore the use of open-ended video-based commonsense question answering (V2C-QA) as a way to enrich our captions. Both the generation task and the QA task can be used to enrich video captions.", "phrases": ["commonsense description", "video caption", "video2commonsense"], "overall_score": 0.7519839700081498, "scores": [0.8808671534997218, 0.6141687923078119, 0.5584199760804573], "rank_score": 0.6844853072959971} -{"id": "qu-etal-2010-bag", "title": "The Bag-of-Opinions Method for Review Rating Prediction from Sparse Text Patterns", "abstract": "The problem addressed in this paper is to predict a user's numeric rating in a product review from the text of the review. Unigram and n-gram representations of text are common choices in opinion mining. However, unigrams cannot capture important expressions like \"could have been better\", which are essential for prediction models of ratings. N-grams of words, on the other hand, capture such phrases, but typically occur too sparsely in the training set and thus fail to yield robust predictors. This paper overcomes the limitations of these two models, by introducing a novel kind of bag-of-opinions representation, where an opinion, within a review, consists of three components: a root word, a set of modifier words from the same sentence, and one or more negation words. Each opinion is assigned a numeric score which is learned, by ridge regression, from a large, domain-independent corpus of reviews. For the actual test case of a domain-dependent review, the review's rating is predicted by aggregating the scores of all opinions in the review and combining it with a domain-dependent unigram model. The paper presents a constrained ridge regression algorithm for learning opinion scores. Experiments show that the bag-of-opinions method outperforms prior state-of-the-art techniques for review rating prediction.", "phrases": ["review rating prediction", "bag-of-opinion representation", "root word"], "overall_score": 1.3311487862543878, "scores": [0.9156324969981956, 0.6137473401199821, 0.5228456269927289], "rank_score": 0.6840751547036356} -{"id": "liu-etal-2021-mulda", "title": "MulDA: A Multilingual Data Augmentation Framework for Low-Resource Cross-Lingual NER", "abstract": "Named Entity Recognition (NER) for low-resource languages is a both practical and challenging research problem. This paper addresses zero-shot transfer for cross-lingual NER, especially when the amount of source-language training data is also limited. The paper first proposes a simple but effective labeled sequence translation method to translate source-language training data to target languages and avoids problems such as word order change and entity span determination. With the source-language data as well as the translated data, a generation-based multilingual data augmentation method is introduced to further increase diversity by generating synthetic labeled data in multiple languages. These augmented data enable the language model based NER models to generalize better with both the language-specific features from the target-language synthetic data and the language-independent features from multilingual synthetic data. An extensive set of experiments were conducted to demonstrate encouraging cross-lingual transfer performance of the new research on a wide variety of target languages.", "phrases": ["cross-lingual ner", "low-resource language", "data augmentation method"], "overall_score": 1.5029508949614587, "scores": [0.9266923633888975, 0.5702988875547422, 0.5550760394881072], "rank_score": 0.6840224301439156} -{"id": "goutte-etal-2004-aligning", "title": "Aligning words using matrix factorisation", "abstract": "Aligning words from sentences which are mutual translations is an important problem in different settings, such as bilingual terminology extraction, Machine Translation, or projection of linguistic features. Here, we view word alignment as matrix factorisation. In order to produce proper alignments, we show that factors must satisfy a number of constraints such as orthogonality. We then propose an algorithm for orthogonal non-negative matrix factorisation, based on a probabilistic model of the alignment data, and apply it to word alignment. This is illustrated on a French-English alignment task from the Hansard.", "phrases": ["matrix factorisation", "machine translation", "aer"], "overall_score": 1.225299320119242, "scores": [0.9489318264359938, 0.5772495228132192, 0.5253763645444894], "rank_score": 0.6838525712645674} -{"id": "bansal-klein-2011-web", "title": "Web-Scale Features for Full-Scale Parsing", "abstract": "Counts from large corpora (like the web) can be powerful syntactic cues. Past work has used web counts to help resolve isolated ambiguities, such as binary noun-verb PP attachments and noun compound bracketings. In this work, we first present a method for generating web count features that address the full range of syntactic attachments. These features encode both surface evidence of lexical affinities as well as paraphrase-based cues to syntactic structure. We then integrate our features into full-scale dependency and constituent parsers. We show relative error reductions of 7.0% over the second-order dependency parser of McDonald and Pereira (2006), 9.2% over the constituent parser of Petrov et al. (2006), and 3.4% over a non-local constituent reranker.", "phrases": ["web-scale feature", "conjunction", "unlabeled data"], "overall_score": 1.6396769855776414, "scores": [0.9561426002789889, 0.5655128168511889, 0.5297398262460518], "rank_score": 0.6837984144587432} -{"id": "onoe-durrett-2020-interpretable", "title": "Interpretable Entity Representations through Large-Scale Typing", "abstract": "In standard methodology for natural language processing, entities in text are typically embedded in dense vector spaces with pre-trained models. The embeddings produced this way are effective when fed into downstream models, but they require end-task fine-tuning and are fundamentally difficult to interpret. In this paper, we present an approach to creating entity representations that are human readable and achieve high performance on entity-related tasks out of the box. Our representations are vectors whose values correspond to posterior probabilities over fine-grained entity types, indicating the confidence of a typing model's decision that the entity belongs to the corresponding type. We obtain these representations using a fine-grained entity typing model, trained either on supervised ultra-fine entity typing data (Choi et al. 2018) or distantly-supervised examples from Wikipedia. On entity probing tasks involving recognizing entity identity, our embeddings used in parameter-free downstream models achieve competitive performance with ELMo- and BERT-based embeddings in trained models. We also show that it is possible to reduce the size of our type set in a learning-based way for particular domains. Finally, we show that these embeddings can be post-hoc modified through a small number of rules to incorporate domain knowledge and improve performance.", "phrases": ["entity representation", "probability", "ultra-fine entity"], "overall_score": 1.100306521958339, "scores": [0.9676858709278208, 0.5569738757492725, 0.5263168957724028], "rank_score": 0.6836588808164987} -{"id": "peng-etal-2015-dual", "title": "Dual Decomposition Inference for Graphical Models over Strings", "abstract": "We investigate dual decomposition for joint MAP inference of many strings. Given an arbitrary graphical model, we decompose it into small acyclic sub-models, whose MAP configurations can be found by finite-state composition and dynamic programming. We force the solutions of these subproblems to agree on overlapping variables, by tuning Lagrange multipliers for an adaptively expanding set of variable-lengthn-gram count features. This is the first inference method for arbitrary graphical models over strings that does not require approximations such as random sampling, message simplification, or a bound on string length. Provided that the inference method terminates, it gives a certificate of global optimality (though MAP inference in our setting is undecidable in general). On our global phonological inference problems, it always terminates, and achieves more accurate results than max-product and sum-product loopy belief propagation.", "phrases": ["string", "dual decomposition technique", "instance-level constraint"], "overall_score": 1.224739706958176, "scores": [0.8890362032735685, 0.634040256766765, 0.5275442755975225], "rank_score": 0.6835402452126186} -{"id": "zhou-etal-2020-improving-grammatical", "title": "Improving Grammatical Error Correction with Machine Translation Pairs", "abstract": "We propose a novel data synthesis method to generate diverse error-corrected sentence pairs for improving grammatical error correction, which is based on a pair of machine translation models (e.g., Chinese to English) of different qualities (i.e., poor and good). The poor translation model can resemble the ESL (English as a second language) learner and tends to generate translations of low quality in terms of fluency and grammaticality, while the good translation model generally generates fluent and grammatically correct translations. With the pair of translation models, we can generate unlimited numbers of poor to good English sentence pairs from text in the source language (e.g., Chinese) of the translators. Our approach can generate various error-corrected patterns and nicely complement the other data synthesis approaches for GEC. Experimental results demonstrate the data generated by our approach can effectively help a GEC model to improve the performance and achieve the state-of-the-art single-model performance in BEA-19 and CoNLL-14 benchmark datasets.", "phrases": ["grammatical error correction", "sentence pair", "different quality"], "overall_score": 0.947351472644066, "scores": [0.9515244328879873, 0.5724145353965602, 0.5261699390591286], "rank_score": 0.6833696357812253} -{"id": "narayanan-harabagiu-2004-question", "title": "Question Answering Based on Semantic Structures", "abstract": "The ability to answer complex questions posed in Natural Language depends on (1) the depth of the available semantic representations and (2) the inferential mechanisms they support. In this paper we describe a QA architecture where questions are analyzed and candidate answers generated by 1) identifying predicate argument structures and semantic frames from the input and 2) performing structured probabilistic inference using the extracted relations in the context of a domain and scenario model. A novel aspect of our system is a scalable and expressive representation of actions and events based on Coordinated Probabilistic Relational Models (CPRM). In this paper we report on the ability of the implemented system to perform several forms of probabilistic and temporal inferences to extract answers to complex questions. The results indicate enhanced accuracy over current state-of-the-art Q/A systems.", "phrases": ["action", "question answering", "information extraction"], "overall_score": 1.420714733817578, "scores": [0.9745414609680265, 0.5547961730906601, 0.5203204669378172], "rank_score": 0.6832193669988346} -{"id": "schwartz-etal-2017-story", "title": "Story Cloze Task: UW NLP System", "abstract": "This paper describes University of Washington NLP's submission for the Linking Models of Lexical, Sentential and Discourse-level Semantics (LSDSem 2017) shared task\u2014the Story Cloze Task. Our system is a linear classifier with a variety of features, including both the scores of a neural language model and style features. We report 75.2% accuracy on the task. A further discussion of our results can be found in Schwartz et al. (2017).", "phrases": ["language model", "story cloze task", "stylistic feature"], "overall_score": 1.4203227617151741, "scores": [0.9803864917492544, 0.5442705689259086, 0.5244355441130358], "rank_score": 0.683030868262733} -{"id": "baldwin-2007-scalable", "title": "Scalable Deep Linguistic Processing: Mind the Lexical Gap", "abstract": "Coverage has been a constant thorn in the side of deployed deep linguistic processing applications, largely because of the difficulty in constructing, maintaining and domaintuning the complex lexicons that they rely on. This paper reviews various strands of research on deep lexical acquisition (DLA), i.e. the (semi-)automatic creation of linguistically-rich language resources, particularly from the viewpoint of DLA for precision grammars.", "phrases": ["deep lexical acquisition", "dla", "precision grammar"], "overall_score": 0.9468448813256348, "scores": [0.863348383316155, 0.6043880938333427, 0.5812761450198912], "rank_score": 0.6830042073897963} -{"id": "white-rajkumar-2012-minimal", "title": "Minimal Dependency Length in Realization Ranking", "abstract": "Comprehension and corpus studies have found that the tendency to minimize dependency length has a strong influence on constituent ordering choices. In this paper, we investigate dependency length minimization in the context of discriminative realization ranking, focusing on its potential to eliminate egregious ordering errors as well as better match the distributional characteristics of sentence orderings in news text. We find that with a state-of-the-art, comprehensive realization ranking model, dependency length minimization yields statistically significant improvements in BLEU scores and significantly reduces the number of heavy/light ordering errors. Through distributional analyses, we also show that with simpler ranking models, dependency length minimization can go overboard, too often sacrificing canonical word order to shorten dependencies, while richer models manage to better counterbalance the dependency length minimization preference against (sometimes) competing canonical word order preferences.", "phrases": ["dependency length", "realization", "ranking model"], "overall_score": 1.099212148766813, "scores": [0.9200437613970014, 0.6054689714078944, 0.5234239927585781], "rank_score": 0.6829789085211581} -{"id": "iter-etal-2020-pretraining", "title": "Pretraining with Contrastive Sentence Objectives Improves Discourse Performance of Language Models", "abstract": "Recent models for unsupervised representation learning of text have employed a number of techniques to improve contextual word representations but have put little focus on discourse-level representations. We propose Conpono, an inter-sentence objective for pretraining language models that models discourse coherence and the distance between sentences. Given an anchor sentence, our model is trained to predict the text k sentences away using a sampled-softmax objective where the candidates consist of neighboring sentences and sentences randomly sampled from the corpus. On the discourse representation benchmark DiscoEval, our model improves over the previous state-of-the-art by up to 13% and on average 4% absolute across 7 tasks. Our model is the same size as BERT-Base, but outperforms the much larger BERT-Large model and other more recent approaches that incorporate discourse. We also show that Conpono yields gains of 2%-6% absolute even for tasks that do not explicitly evaluate discourse: textual entailment (RTE), common sense reasoning (COPA) and reading comprehension (ReCoRD).", "phrases": ["discourse-level representation", "inter-sentence objective", "contrastive learning"], "overall_score": 1.572468980301926, "scores": [0.910846693507089, 0.5856917459882941, 0.5522053638320957], "rank_score": 0.6829146011091596} -{"id": "tsai-etal-2021-style", "title": "Style Control for Schema-Guided Natural Language Generation", "abstract": "Natural Language Generation (NLG) for task-oriented dialogue systems focuses on communicating specific content accurately, fluently, and coherently. While these attributes are crucial for a successful dialogue, it is also desirable to simultaneously accomplish specific stylistic goals, such as response length, point-of-view, descriptiveness, sentiment, formality, and empathy. In this work, we focus on stylistic control and evaluation for schema-guided NLG, with joint goals of achieving both semantic and stylistic control. We experiment in detail with various controlled generation methods for large pretrained language models: specifically, conditional training, guided fine-tuning, and guided decoding. We discuss their advantages and limitations, and evaluate them with a broad range of automatic and human evaluation metrics. Our results show that while high style accuracy and semantic correctness are easier to achieve for more lexically-defined styles with conditional training, stylistic control is also achievable for more semantically complex styles using discriminator-based guided decoding methods. The results also suggest that methods that are more scalable (with less hyper-parameters tuning) and that disentangle context generation and stylistic variations are more effective at achieving semantic correctness and style accuracy.", "phrases": ["natural language generation", "nlg", "style accuracy"], "overall_score": 0.7501909859350926, "scores": [0.9481455233246542, 0.5662506163973376, 0.5341636488579581], "rank_score": 0.6828532628599833} -{"id": "kuhn-etal-2010-phrase", "title": "Phrase Clustering for Smoothing TM Probabilities - or, How to Extract Paraphrases from Phrase Tables", "abstract": "This paper describes how to cluster together the phrases of a phrase-based statistical machine translation (SMT) system, using information in the phrase table itself. The clustering is symmetric and recursive: it is applied both to source-language and target-language phrases, and the clustering in one language helps determine the clustering in the other. The phrase clusters have many possible uses. This paper looks at one of these uses: smoothing the conditional translation model (TM) probabilities employed by the SMT system. We incorporated phrase-cluster-derived probability estimates into a baseline loglinear feature combination that included relative frequency and lexically-weighted conditional probability estimates. In Chinese-English (C-E) and French-English (F-E) learning curve experiments, we obtained a gain over the baseline in 29 of 30 tests, with a maximum gain of 0.55 BLEU points (though most gains were fairly small). The largest gains came with medium (200--400K sentence pairs) rather than with small (less than 100K sentence pairs) amounts of training data, contrary to what one would expect from the paraphrasing literature. We have only begun to explore the original smoothing approach described here.", "phrases": ["clustering", "paraphrase", "translation model"], "overall_score": 1.3282753384864052, "scores": [0.8961991329693154, 0.5883933615175926, 0.5632029894894416], "rank_score": 0.6825984946587832} -{"id": "lapata-lascarides-2004-inferring", "title": "Inferring Sentence-internal Temporal Relations", "abstract": "In this paper we propose a data intensive approach for inferring sentence-internal temporal relations, which relies on a simple probabilistic model and assumes no manual coding. We explore various combinations of features, and evaluate performance against a goldstandard corpus and human subjects performing the same task. The best model achieves 70.7% accuracy in inferring the temporal relation between two clauses and 97.4% accuracy in ordering them, assuming that the temporal relation is known.", "phrases": ["temporal relation", "clause", "rhetorical relation"], "overall_score": 1.419300035617603, "scores": [0.848236290666748, 0.6173262752462382, 0.5820545570060592], "rank_score": 0.6825390409730151} -{"id": "kazama-torisawa-2005-speeding", "title": "Speeding up Training with Tree Kernels for Node Relation Labeling", "abstract": "We present a method for speeding up the calculation of tree kernels during training. The calculation of tree kernels is still heavy even with efficient dynamic programming (DP) procedures. Our method maps trees into a small feature space where the inner product, which can be calculated much faster, yields the same value as the tree kernel for most tree pairs. The training is sped up by using the DP procedure only for the exceptional pairs. We describe an algorithm that detects such exceptional pairs and converts trees into vectors in a feature space. We propose tree kernels on marked labeled ordered trees and show that the training of SVMs for semantic role labeling using these kernels can be sped up by a factor of several tens.", "phrases": ["tree kernel", "node relation labeling", "calculation"], "overall_score": 0.9461570805498416, "scores": [0.9012799243565718, 0.6097133225656362, 0.5365309450946463], "rank_score": 0.682508064005618} -{"id": "lin-och-2004-orange", "title": "ORANGE: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation", "abstract": "Comparisons of automatic evaluation metrics for machine translation are usually conducted on corpus level using correlation statistics such as Pearson's product moment correlation coefficient or Spearman's rank order correlation coefficient between human scores and automatic scores. However, such comparisons rely on human judgments of translation qualities such as adequacy and fluency. Unfortunately, these judgments are often inconsistent and very expensive to acquire. In this paper, we introduce a new evaluation method, Orange, for evaluating automatic machine translation evaluation metrics automatically without extra human involvement other than using a set of reference translations. We also show the results of comparing several existing automatic metrics and three new automatic metrics using Orange.", "phrases": ["evaluation metric", "machine translation", "human judgment"], "overall_score": 1.8923016561753814, "scores": [0.9102484773176341, 0.6142359288439576, 0.5230262552610546], "rank_score": 0.6825035538075488} -{"id": "ma-etal-2020-multi", "title": "Multi-resolution Annotations for Emoji Prediction", "abstract": "Emojis are able to express various linguistic components, including emotions, sentiments, events, etc. Predicting the proper emojis associated with text provides a way to summarize the text accurately, and it has been proven to be a good auxiliary task to many Natural Language Understanding (NLU) tasks. Labels in existing emoji prediction datasets are all passage-based and are usually under the multi-class classification setting. However, in many cases, one single emoji cannot fully cover the theme of a piece of text. It is thus useful to infer the part of text related to each emoji. The lack of multi-label and aspect-level emoji prediction datasets is one of the bottlenecks for this task. This paper annotates an emoji prediction dataset with passage-level multi-class/multi-label, and aspect-level multi-class annotations. We also present a novel annotation method with which we generate the aspect-level annotations. The annotations are generated heuristically, taking advantage of the self-attention mechanism in Transformer networks. We validate the annotations both automatically and manually to ensure their quality. We also benchmark the dataset with a pre-trained BERT model.", "phrases": ["emoji prediction", "passage-level multi-class", "aspect-level multi-class annotation"], "overall_score": 0.7497649956259068, "scores": [0.9480503340450973, 0.5667731658799051, 0.532573029386196], "rank_score": 0.6824655097703994} -{"id": "pitler-2014-crossing", "title": "A Crossing-Sensitive Third-Order Factorization for Dependency Parsing", "abstract": "Parsers that parametrize over wider scopes are generally more accurate than edge-factored models. For graph-based non-projective parsers, wider factorizations have so far implied large increases in the computational complexity of the parsing problem. This paper introduces a \u201ccrossing-sensitive\u201d generalization of a third-order factorization that trades off complexity in the model structure (i.e., scoring with features over multiple edges) with complexity in the output structure (i.e., producing crossing edges). Under this model, the optimal 1-Endpoint-Crossing tree can be found in O(n4) time, matching the asymptotic run-time of both the third-order projective parser and the edge-factored 1-Endpoint-Crossing parser. The crossing-sensitive third-order parser is significantly more accurate than the third-order projective parser under many experimental settings and significantly less accurate on none.", "phrases": ["third-order factorization", "graph-based non-projective parser", "1-endpoint-crossing parser"], "overall_score": 1.3279918404192774, "scores": [0.8881660517929316, 0.5863184500601684, 0.5728739145629633], "rank_score": 0.6824528054720211} -{"id": "wang-etal-2019-youmakeup", "title": "YouMakeup: A Large-Scale Domain-Specific Multimodal Dataset for Fine-Grained Semantic Comprehension", "abstract": "Multimodal semantic comprehension has attracted increasing research interests recently such as visual question answering and caption generation. However, due to the data limitation, fine-grained semantic comprehension has not been well investigated, which requires to capture semantic details of multimodal contents. In this work, we introduce \u201cYouMakeup\u201d, a large-scale multimodal instructional video dataset to support fine-grained semantic comprehension research in specific domain. YouMakeup contains 2,800 videos from YouTube, spanning more than 420 hours in total. Each video is annotated with a sequence of natural language descriptions for instructional steps, grounded in temporal video range and spatial facial areas. The annotated steps in a video involve subtle difference in actions, products and regions, which requires fine-grained understanding and reasoning both temporally and spatially. In order to evaluate models' ability for fined-grained comprehension, we further propose two groups of tasks including generation tasks and visual question answering from different aspects. We also establish a baseline of step caption generation for future comparison. The dataset will be publicly available at to support research investigation in fine-grained semantic comprehension.", "phrases": ["fine-grained semantic comprehension", "natural language description", "instructional step"], "overall_score": 0.749721535718138, "scores": [0.9645395837573995, 0.5550939701076071, 0.5276442987076818], "rank_score": 0.6824259508575627} -{"id": "gehrmann-etal-2019-generating", "title": "Generating Abstractive Summaries with Finetuned Language Models", "abstract": "Neural abstractive document summarization is commonly approached by models that exhibit a mostly extractive behavior. This behavior is facilitated by a copy-attention which allows models to copy words from a source document. While models in the mostly extractive news summarization domain benefit from this inductive bias, they commonly fail to paraphrase or compress information from the source document. Recent advances in transfer-learning from large pretrained language models give rise to alternative approaches that do not rely on copy-attention and instead learn to generate concise and abstractive summaries. In this paper, as part of the TL;DR challenge, we compare the abstractiveness of summaries from different summarization approaches and show that transfer-learning can be efficiently utilized without any changes to the model architecture. We demonstrate that the approach leads to a higher level of abstraction for a similar performance on the TL;DR challenge tasks, enabling true natural language compression.", "phrases": ["language model", "advance", "abstractive summarization"], "overall_score": 1.3276194056074206, "scores": [0.8269585721313701, 0.6204987967687612, 0.5993268666185703], "rank_score": 0.6822614118395672} -{"id": "li-etal-2018-guiding", "title": "Guiding Generation for Abstractive Text Summarization Based on Key Information Guide Network", "abstract": "Neural network models, based on the attentional encoder-decoder model, have good capability in abstractive text summarization. However, these models are hard to be controlled in the process of generation, which leads to a lack of key information. We propose a guiding generation model that combines the extractive method and the abstractive method. Firstly, we obtain keywords from the text by a extractive model. Then, we introduce a Key Information Guide Network (KIGN), which encodes the keywords to the key information representation, to guide the process of generation. In addition, we use a prediction-guide mechanism, which can obtain the long-term value for future decoding, to further guide the summary generation. We evaluate our model on the CNN/Daily Mail dataset. The experimental results show that our model leads to significant improvements.", "phrases": ["abstractive text summarization", "input document", "generation process"], "overall_score": 1.3271832555946297, "scores": [0.9682408106664196, 0.5555781794636864, 0.5222928350828021], "rank_score": 0.6820372750709693} -{"id": "samardzic-merlo-2014-likelihood", "title": "Likelihood of External Causation in the Structure of Events", "abstract": "This article addresses the causal structure of events described by verbs: whether an event happens spontaneously or it is caused by an external causer. We automatically estimate the likelihood of external causation of events based on the distribution of causative and anticausative uses of verbs in the causative alternation. We train a Bayesian model and test it on a monolingual and on a bilingual input. The performance is evaluated against an independent scale of likelihood of external causation based on typological data. The accuracy of a two-way classification is 85% in both monolingual and bilingual setting. On the task of a three-way classification, the score is 61% in the monolingual setting and 69% in the bilingual setting.", "phrases": ["external causation", "temporal relation", "multi-sieve approach"], "overall_score": 0.9450414790244632, "scores": [0.9459035555079035, 0.5548046839377038, 0.5444017433888396], "rank_score": 0.6817033276114822} -{"id": "bethard-etal-2008-building", "title": "Building a Corpus of Temporal-Causal Structure", "abstract": "While recent corpus annotation efforts cover a wide variety of semantic structures, work on temporal and causal relations is still in its early stages. Annotation efforts have typically considered either temporal relations or causal relations, but not both, and no corpora currently exist that allow the relation between temporals and causals to be examined empirically. We have annotated a corpus of 1000 event pairs for both temporal and causal relations, focusing on a relatively frequent construction in which the events are conjoined by the word \u0093and\u0094. Temporal relations were annotated using an extension of the BEFORE and AFTER scheme used in the TempEval competition, and causal relations were annotated using a scheme based on connective phrases like \u0093and as a result\u0094. The annotators achieved 81.2% agreement on temporal relations and 77.8% agreement on causal relations. Analysis of the resulting corpus revealed some interesting findings, for example, that over 30% of CAUSAL relations do not have an underlying BEFORE relation. The corpus was also explored using machine learning methods, and while model performance exceeded all baselines, the results suggested that simple grammatical cues may be insufficient for identifying the more difficult temporal and causal relations.", "phrases": ["temporal relation", "event pair", "connective phrase"], "overall_score": 1.6346466697130333, "scores": [0.8973858049353463, 0.6259423923786287, 0.5217736320983442], "rank_score": 0.6817006098041064} -{"id": "nicolai-etal-2013-cognate", "title": "Cognate and Misspelling Features for Natural Language Identification", "abstract": "We apply Support Vector Machines to differentiate between 11 native languages in the 2013 Native Language Identification Shared Task. We expand a set of common language identification features to include cognate interference and spelling mistakes. Our best results are obtained with a classifier which includes both the cognate and the misspelling features, as well as word unigrams, word bigrams, character bigrams, and syntax production rules.", "phrases": ["natural language identification", "cognate interference", "spelling error"], "overall_score": 1.4175356288675018, "scores": [0.8956133115023258, 0.5862506111725675, 0.5632076993756696], "rank_score": 0.6816905406835211} -{"id": "li-etal-2016-discourse", "title": "Discourse Parsing with Attention-based Hierarchical Neural Networks", "abstract": "RST-style document-level discourse parsing remains a dif\ufb01cult task and ef\ufb01cient deep learning models on this task have rarely been presented. In this paper, we propose an attention-based hierarchical neural network model for discourse parsing. We also incorporate tensor-based transformation function to model complicated feature interactions. Experimental results show that our approach obtains comparable performance to the contemporary state-of-the-art systems with little manual feature engineering.", "phrases": ["neural network model", "discourse unit", "hierarchical attention"], "overall_score": 1.9689789976513488, "scores": [0.8734673123658612, 0.5959842520995474, 0.5742083823275845], "rank_score": 0.6812199822643311} -{"id": "srikumar-etal-2012-amortizing", "title": "On Amortizing Inference Cost for Structured Prediction", "abstract": "This paper deals with the problem of predicting structures in the context of NLP. Typically, in structured prediction, an inference procedure is applied to each example independently of the others. In this paper, we seek to optimize the time complexity of inference over entire datasets, rather than individual examples. By considering the general inference representation provided by integer linear programs, we propose three exact inference theorems which allow us to re-use earlier solutions for certain instances, thereby completely avoiding possibly expensive calls to the inference procedure. We also identify several approximation schemes which can provide further speedup. We instantiate these ideas to the structured prediction task of semantic role labeling and show that we can achieve a speedup of over 2.5 using our approach while retaining the guarantees of exactness and a further speedup of over 3 using approximations that do not degrade performance.", "phrases": ["structured prediction", "integer linear program", "ilp"], "overall_score": 0.9440799935317988, "scores": [0.9718140030366608, 0.5426307848522921, 0.5285844994172634], "rank_score": 0.6810097624354055} -{"id": "jie-etal-2019-better", "title": "Better Modeling of Incomplete Annotations for Named Entity Recognition", "abstract": "Supervised approaches to named entity recognition (NER) are largely developed based on the assumption that the training data is fully annotated with named entity information. However, in practice, annotated data can often be imperfect with one typical issue being the training data may contain incomplete annotations. We highlight several pitfalls associated with learning under such a setup in the context of NER and identify limitations associated with existing approaches, proposing a novel yet easy-to-implement approach for recognizing named entities with incomplete data annotations. We demonstrate the effectiveness of our approach through extensive experiments.", "phrases": ["incomplete annotation", "label noise", "many study"], "overall_score": 1.4159671039823412, "scores": [0.955075533187486, 0.5471985068829036, 0.5405346789068415], "rank_score": 0.680936239659077} -{"id": "kim-etal-2019-probing", "title": "Probing What Different NLP Tasks Teach Machines about Function Word Comprehension", "abstract": "We introduce a set of nine challenge tasks that test for the understanding of function words. These tasks are created by structurally mutating sentences from existing datasets to target the comprehension of specific types of function words (e.g., prepositions, wh-words). Using these probing tasks, we explore the effects of various pretraining objectives for sentence encoders (e.g., language modeling, CCG supertagging and natural language inference (NLI)) on the learned representations. Our results show that pretraining on CCG\u2014our most syntactic objective\u2014performs the best on average across our probing tasks, suggesting that syntactic knowledge helps function word comprehension. Language modeling also shows strong performance, supporting its widespread use for pretraining state-of-the-art NLP models. Overall, no pretraining objective dominates across the board, and our function word probing tasks highlight several intuitive differences between pretraining objectives, e.g., that NLI helps the comprehension of negation.", "phrases": ["function word comprehension", "preposition", "language model", "training objective", "nli dataset"], "overall_score": 1.968140743650886, "scores": [0.9214566431523331, 0.8604245288875467, 0.5456212320887878, 0.5397481692215446, 0.5373992580670625], "rank_score": 0.680929966283455} -{"id": "niculae-etal-2017-argument", "title": "Argument Mining with Structured SVMs and RNNs", "abstract": "We propose a novel factor graph model for argument mining, designed for settings in which the argumentative relations in a document do not necessarily form a tree structure. (This is the case in over 20% of the web comments dataset we release.) Our model jointly learns elementary unit type classification and argumentative relation prediction. Moreover, our model supports SVM and RNN parametrizations, can enforce structure constraints (e.g., transitivity), and can express dependencies between adjacent relations and propositions. Our approaches outperform unstructured baselines in both web comments and argumentative essay datasets.", "phrases": ["factor graph model", "proposition", "argument mining"], "overall_score": 1.7464735301646601, "scores": [0.9911940693658182, 0.5269585971135822, 0.5245467635316847], "rank_score": 0.680899810003695} -{"id": "gari-soler-etal-2019-comparison", "title": "A Comparison of Context-sensitive Models for Lexical Substitution", "abstract": "Word embedding representations provide good estimates of word meaning and give state-of-the art performance in semantic tasks. Embedding approaches differ as to whether and how they account for the context surrounding a word. We present a comparison of different word and context representations on the task of proposing substitutes for a target word in context (lexical substitution). We also experiment with tuning contextualized word embeddings on a dataset of sense-specific instances for each target word. We show that powerful contextualized word representations, which give high performance in several semantics-related tasks, deal less well with the subtle in-context similarity relationships needed for substitution. This is better handled by models trained with this objective in mind, where the inter-dependence between word and context representations is explicitly modeled during training.", "phrases": ["lexical substitution", "substitute", "elmo embedding"], "overall_score": 1.2197690594611583, "scores": [0.9595229352026279, 0.5508170628941835, 0.5319582239742664], "rank_score": 0.6807660740236926} -{"id": "tran-etal-2016-unsupervised", "title": "Unsupervised Neural Hidden Markov Models", "abstract": "In this work, we present the first results for neuralizing an Unsupervised Hidden Markov Model. We evaluate our approach on tag in- duction. Our approach outperforms existing generative models and is competitive with the state-of-the-art though with a simpler model easily extended to include additional context.", "phrases": ["generative model", "hmm", "word embedding"], "overall_score": 1.567272110696978, "scores": [0.8533031219610624, 0.6033222135280962, 0.5853475524605214], "rank_score": 0.6806576293165599} -{"id": "stadler-etal-2021-observing", "title": "Observing the Learning Curve of NMT Systems With Regard to Linguistic Phenomena", "abstract": "In this paper we present our observations and evaluations by observing the linguistic performance of the system on several steps on the training process of various English-to-German Neural Machine Translation models. The linguistic performance is measured through a semi-automatic process using a test suite. Among several linguistic observations, we find that the translation quality of some linguistic categories decreased within the recorded iterations. Additionally, we notice some drops of the translation quality of certain categories when using a larger corpus.", "phrases": ["regard", "linguistic phenomena", "translation quality", "iteration"], "overall_score": 0.7477058981028044, "scores": [0.8597191446137945, 0.8097593363104286, 0.5300908367144178, 0.5227956360950449], "rank_score": 0.6805912384334215} -{"id": "takeoka-etal-2021-low", "title": "Low-resource Taxonomy Enrichment with Pretrained Language Models", "abstract": "Taxonomies are symbolic representations of hierarchical relationships between terms or entities. While taxonomies are useful in broad applications, manually updating or maintaining them is labor-intensive and difficult to scale in practice. Conventional supervised methods for this enrichment task fail to find optimal parents of new terms in low-resource settings where only small taxonomies are available because of overfitting to hierarchical relationships in the taxonomies. To tackle the problem of low-resource taxonomy enrichment, we propose Musubu, an efficient framework for taxonomy enrichment in low-resource settings with pretrained language models (LMs) as knowledge bases to compensate for the shortage of information. Musubu leverages an LM-based classifier to determine whether or not inputted term pairs have hierarchical relationships. Musubu also utilizes Hearst patterns to generate queries to leverage implicit knowledge from the LM efficiently for more accurate prediction. We empirically demonstrate the effectiveness of our method in extensive experiments on taxonomies from both a SemEval task and real-world retailer datasets.", "phrases": ["language model", "low-resource taxonomy enrichment", "knowledge basis"], "overall_score": 1.0952653100103764, "scores": [0.9700164377744038, 0.545862262582249, 0.5257010988054778], "rank_score": 0.6805265997207103} -{"id": "ture-etal-2012-combining", "title": "Combining Statistical Translation Techniques for Cross-Language Information Retrieval", "abstract": "Cross-language information retrieval today is dominated by techniques that rely principally on context-independent token-to-token mappings despite the fact that state-of-the-art statistical machine translation systems now have far richer translation models available in their internal representations. This paper explores combination-of-evidence techniques using three types of statistical translation models: context-independent token translation, token translation using phrase-dependent contexts, and token translation using sentence-dependent contexts. Context-independent translation is performed using statistically-aligned tokens in parallel text, phrase-dependent translation is performed using aligned statistical phrases, and sentence-dependent translation is performed using those same aligned phrases together with an n-gram language model. Experiments on retrieval of Arabic, Chinese, and French documents using English queries show that no one technique is optimal for all queries, but that statistically significant improvements in mean average precision over strong baselines can be achieved by combining translation evidence from all three techniques. The optimal combination is, however, found to be resource-dependent, indicating a need for future work on robust tuning to the characteristics of individual collections.", "phrases": ["cross-language information retrieval", "internal representation", "paradigm"], "overall_score": 1.324191507822408, "scores": [0.9777787640835813, 0.5426558216159255, 0.5210648768506019], "rank_score": 0.6804998208500362} -{"id": "cross-huang-2016-incremental", "title": "Incremental Parsing with Minimal Features Using Bi-Directional LSTM", "abstract": "Recently, neural network approaches for parsing have largely automated the combination of individual features, but still rely on (often a larger number of) atomic features created from human linguistic intuition, and potentially omitting important global context. To further reduce feature engineering to the bare minimum, we use bi-directional LSTM sentence representations to model a parser state with only three sentence positions, which automatically identifies important aspects of the entire sentence. This model achieves state-of-the-art results among greedy dependency parsers for English. We also introduce a novel transition system for constituency parsing which does not require binarization, and together with the above architecture, achieves state-of-the-art results among greedy parsers for both English and Chinese.", "phrases": ["bi-directional lstm", "dependency parser", "binarization"], "overall_score": 1.7449552576676246, "scores": [0.9148085147970313, 0.5951413890386794, 0.5309737338082244], "rank_score": 0.6803078792146451} -{"id": "tackstrom-etal-2015-efficient", "title": "Efficient Inference and Structured Learning for Semantic Role Labeling", "abstract": "We present a dynamic programming algorithm for efficient constrained inference in semantic role labeling. The algorithm tractably captures a majority of the structural constraints examined by prior work in this area, which has resorted to either approximate methods or off-the-shelf integer linear programming solvers. In addition, it allows training a globally-normalized log-linear model with respect to constrained conditional likelihood. We show that the dynamic program is several times faster than an off-the-shelf integer linear programming solver, while reaching the same solution. Furthermore, we show that our structured model results in significant improvements over its local counterpart, achieving state-of-the-art results on both PropBank- and FrameNet-annotated corpora.", "phrases": ["semantic role labeling", "program", "graphical model"], "overall_score": 1.566361376374186, "scores": [0.9533001839403459, 0.5629836905762957, 0.5245024327604327], "rank_score": 0.6802621024256914} -{"id": "abad-etal-2017-self", "title": "Self-Crowdsourcing Training for Relation Extraction", "abstract": "In this paper we introduce a self-training strategy for crowdsourcing. The training examples are automatically selected to train the crowd workers. Our experimental results show an impact of 5% Improvement in terms of F1 for relation extraction task, compared to the method based on distant supervision.", "phrases": ["relation extraction", "crowdsourcing", "distant supervision"], "overall_score": 0.9429855167373214, "scores": [0.9077870659859674, 0.5929320392497976, 0.5399416877048103], "rank_score": 0.6802202643135251} -{"id": "koppel-etal-2011-unsupervised", "title": "Unsupervised Decomposition of a Document into Authorial Components", "abstract": "We propose a novel unsupervised method for separating out distinct authorial components of a document. In particular, we show that, given a book artificially \"munged\" from two thematically similar biblical books, we can separate out the two constituent books almost perfectly. This allows us to automatically recapitulate many conclusions reached by Bible scholars over centuries of research. One of the key elements of our method is exploitation of differences in synonym choice by different authors.", "phrases": ["authorial component", "unsupervised method", "book"], "overall_score": 1.2180212428913313, "scores": [0.8727658649483508, 0.6040813315066458, 0.5625246006134329], "rank_score": 0.6797905990228098} -{"id": "vijay-etal-2018-corpus", "title": "Corpus Creation and Emotion Prediction for Hindi-English Code-Mixed Social Media Text", "abstract": "Emotion Prediction is a Natural Language Processing (NLP) task dealing with detection and classification of emotions in various monolingual and bilingual texts. While some work has been done on code-mixed social media text and in emotion prediction separately, our work is the first attempt which aims at identifying the emotion associated with Hindi-English code-mixed social media text. In this paper, we analyze the problem of emotion identification in code-mixed content and present a Hindi-English code-mixed corpus extracted from twitter and annotated with the associated emotion. For every tweet in the dataset, we annotate the source language of all the words present, and also the causal language of the expressed emotion. Finally, we propose a supervised classification system which uses various machine learning techniques for detecting the emotion associated with the text using a variety of character level, word level, and lexicon based features.", "phrases": ["emotion prediction", "hindi-english code-mixed corpus", "social medium text"], "overall_score": 1.3226205786236787, "scores": [0.9007781355567201, 0.5857680544081473, 0.5525313788516197], "rank_score": 0.679692522938829} -{"id": "beigman-klebanov-flor-2013-argumentation", "title": "Argumentation-Relevant Metaphors in Test-Taker Essays", "abstract": "This article discusses metaphor annotation in a corpus of argumentative essays written by test-takers during a standardized examination for graduate school admission. The quality of argumentation being the focus of the project, we developed a metaphor annotation protocol that targets metaphors that are relevant for the writer\u2019s arguments. The reliability of the protocol is =0.58, on a set of 116 essays (the total of about 30K content-word tokens). We found a moderate-to-strong correlation (r=0.51-0.57) between the percentage of metaphorically used words in an essay and the writing quality score. We also describe encouraging findings regarding the potential of metaphor identification to contribute to automated scoring of essays.", "phrases": ["essay", "argumentation", "metaphor detection"], "overall_score": 1.493430952010951, "scores": [0.9110434681047009, 0.5712700330431381, 0.556755651020705], "rank_score": 0.6796897173895147} -{"id": "bjerva-etal-2019-probabilistic", "title": "A Probabilistic Generative Model of Linguistic Typology", "abstract": "In the principles-and-parameters framework, the structural features of languages depend on parameters that may be toggled on or off, with a single parameter often dictating the status of multiple features. The implied covariance between features inspires our probabilisation of this line of linguistic inquiry\u2014we develop a generative model of language based on exponential-family matrix factorisation. By modelling all languages and features within the same architecture, we show how structural similarities between languages can be exploited to predict typological features with near-perfect accuracy, outperforming several baselines on the task of predicting held-out features. Furthermore, we show that language embeddings pre-trained on monolingual text allow for generalisation to unobserved languages. This finding has clear practical and also theoretical implications: the results confirm what linguists have hypothesised, i.e. that there are significant correlations between typological features and languages.", "phrases": ["generative model", "typological feature", "language embedding"], "overall_score": 0.942248011349062, "scores": [0.9017348182133473, 0.5765295712569873, 0.5608004104208343], "rank_score": 0.6796882666303897} -{"id": "steingrimsson-etal-2021-combalign", "title": "CombAlign: a Tool for Obtaining High-Quality Word Alignments", "abstract": "Being able to generate accurate word alignments is useful for a variety of tasks. While statistical word aligners can work well, especially when parallel training data are plentiful, multilingual embedding models have recently been shown to give good results in unsupervised scenarios. We evaluate an ensemble method for word alignment on four language pairs and demonstrate that by combining multiple tools, taking advantage of their different approaches, substantial gains can be made. This holds for settings ranging from very low-resource to high-resource. Furthermore, we introduce a new gold alignment test set for Icelandic and a new easy-to-use tool for creating manual word alignments.", "phrases": ["word alignment", "ensemble method", "combalign"], "overall_score": 0.9419680740948415, "scores": [0.8721080665314344, 0.6260070913420774, 0.5403438458850215], "rank_score": 0.6794863345861778} -{"id": "tian-etal-2017-facebook", "title": "Facebook sentiment: Reactions and Emojis", "abstract": "Emojis are used frequently in social media. A widely assumed view is that emojis express the emotional state of the user, which has led to research focusing on the expressiveness of emojis independent from the linguistic context. We argue that emojis and the linguistic texts can modify the meaning of each other. The overall communicated meaning is not a simple sum of the two channels. In order to study the meaning interplay, we need data indicating the overall sentiment of the entire message as well as the sentiment of the emojis stand-alone. We propose that Facebook Reactions are a good data source for such a purpose. FB reactions (e.g. \u201cLove\u201d and \u201cAngry\u201d) indicate the readers' overall sentiment, against which we can investigate the types of emojis used the comments under different reaction profiles. We present a data set of 21,000 FB posts (57 million reactions and 8 million comments) from public media pages across four countries.", "phrases": ["emojis", "comment", "social medium"], "overall_score": 0.9408322260469866, "scores": [0.9663300841903144, 0.5422427563307651, 0.5274281397186881], "rank_score": 0.6786669934132559} -{"id": "schulder-etal-2018-introducing", "title": "Introducing a Lexicon of Verbal Polarity Shifters for English", "abstract": "The sentiment polarity of a phrase does not only depend on the polarities of its words, but also on how these are affected by their context. Negation words (e.g. not, no, never) can change the polarity of a phrase. Similarly, verbs and other content words can also act as polarity shifters (e.g. fail, deny, alleviate). While individually more sparse, they are far more numerous. Among verbs alone, there are more than 1200 shifters. However, sentiment analysis systems barely consider polarity shifters other than negation words. A major reason for this is the scarcity of lexicons and corpora that provide information on them. We introduce a lexicon of verbal polarity shifters that covers the entirety of verbs found in WordNet. We provide a fine-grained annotation of individual word senses, as well as information for each verbal shifter on the syntactic scopes that it can affect.", "phrases": ["polarity shifter", "word sense", "verbal shifter"], "overall_score": 0.940790182019968, "scores": [0.9199221605890928, 0.5615209403841468, 0.5544668942026096], "rank_score": 0.6786366650586165} -{"id": "xu-etal-2020-fact", "title": "Fact-based Content Weighting for Evaluating Abstractive Summarisation", "abstract": "Abstractive summarisation is notoriously hard to evaluate since standard word-overlap-based metrics are insufficient. We introduce a new evaluation metric which is based on fact-level content weighting, i.e. relating the facts of the document to the facts of the summary. We fol- low the assumption that a good summary will reflect all relevant facts, i.e. the ones present in the ground truth (human-generated refer- ence summary). We confirm this hypothe- sis by showing that our weightings are highly correlated to human perception and compare favourably to the recent manual highlight- based metric of Hardy et al. (2019).", "phrases": ["abstractive summarisation", "evaluation metric", "contextual embedding"], "overall_score": 1.411150113367375, "scores": [0.9339559375353967, 0.56475035237453, 0.5371529805950839], "rank_score": 0.6786197568350035} -{"id": "liu-etal-2021-mathematical", "title": "Mathematical Word Problem Generation from Commonsense Knowledge Graph and Equations", "abstract": "There is an increasing interest in the use of mathematical word problem (MWP) generation in educational assessment. Different from standard natural question generation, MWP generation needs to maintain the underlying mathematical operations between quantities and variables, while at the same time ensuring the relevance between the output and the given topic. To address above problem, we develop an end-to-end neural model to generate diverse MWPs in real-world scenarios from commonsense knowledge graph and equations. The proposed model (1) learns both representations from edge-enhanced Levi graphs of symbolic equations and commonsense knowledge; (2) automatically fuses equation and commonsense knowledge information via a self-planning module when generating the MWPs. Experiments on an educational gold-standard set and a large-scale generated MWP set show that our approach is superior on the MWP generation task, and it outperforms the SOTA models in terms of both automatic evaluation metrics, i.e., BLEU-4, ROUGE-L, Self-BLEU, and human evaluation metrics, i.e., equation relevance, topic relevance, and language coherence. To encourage reproducible results, we make our code and MWP dataset public available at .", "phrases": ["commonsense knowledge graph", "equation", "mwp generation"], "overall_score": 0.9406656814559206, "scores": [0.9367554071483022, 0.5517566438818929, 0.5471285196261461], "rank_score": 0.678546856885447} -{"id": "zhu-etal-2020-crosswoz", "title": "CrossWOZ: A Large-Scale Chinese Cross-Domain Task-Oriented Dialogue Dataset", "abstract": "To advance multi-domain (cross-domain) dialogue modeling as well as alleviate the shortage of Chinese task-oriented datasets, we propose CrossWOZ, the first large-scale Chinese Cross-Domain Wizard-of-Oz task-oriented dataset. It contains 6K dialogue sessions and 102K utterances for 5 domains, including hotel, restaurant, attraction, metro, and taxi. Moreover, the corpus contains rich annotation of dialogue states and dialogue acts on both user and system sides. About 60% of the dialogues have cross-domain user goals that favor inter-domain dependency and encourage natural transition across domains in conversation. We also provide a user simulator and several benchmark models for pipelined task-oriented dialogue systems, which will facilitate researchers to compare and evaluate their models on this corpus. The large size and rich annotation of CrossWOZ make it suitable to investigate a variety of tasks in cross-domain dialogue modeling, such as dialogue state tracking, policy learning, user simulation, etc.", "phrases": ["large-scale chinese cross-domain", "conversation", "task-oriented dialogue system"], "overall_score": 1.5621065234758014, "scores": [0.8803594153698979, 0.6040722835673128, 0.5508110309346282], "rank_score": 0.678414243290613} -{"id": "safi-samghabadi-etal-2020-aggression", "title": "Aggression and Misogyny Detection using BERT: A Multi-Task Approach", "abstract": "In recent times, the focus of the NLP community has increased towards offensive language, aggression, and hate-speech detection. This paper presents our system for TRAC-2 shared task on \u201cAggression Identification\u201d (sub-task A) and \u201cMisogynistic Aggression Identification\u201d (sub-task B). The data for this shared task is provided in three different languages - English, Hindi, and Bengali. Each data instance is annotated into one of the three aggression classes - Not Aggressive, Covertly Aggressive, Overtly Aggressive, as well as one of the two misogyny classes - Gendered and Non-Gendered. We propose an end-to-end neural model using attention on top of BERT that incorporates a multi-task learning paradigm to address both the sub-tasks simultaneously. Our team, \u201cna14\u201d, scored 0.8579 weighted F1-measure on the English sub-task B and secured 3rd rank out of 15 teams for the task. The code and the model weights are publicly available at . Keywords: Aggression, Misogyny, Abusive Language, Hate-Speech Detection, BERT, NLP, Neural Networks, Social Media", "phrases": ["misogyny", "detection", "bert", "aggression"], "overall_score": 1.4106439682037273, "scores": [0.8409773298407284, 0.7901478038620362, 0.5503131876973625, 0.5320670884498013], "rank_score": 0.6783763524624821} -{"id": "li-etal-2017-nlp", "title": "An NLP Analysis of Exaggerated Claims in Science News", "abstract": "The discrepancy between science and media has been affecting the effectiveness of science communication. Original findings from science publications may be distorted with altered claim strength when reported to the public, causing misinformation spread. This study conducts an NLP analysis of exaggerated claims in science news, and then constructed prediction models for identifying claim strength levels in science reporting. The results demonstrate different writing styles journal articles and news/press releases use for reporting scientific findings. Preliminary prediction models reached promising result with room for further improvement.", "phrases": ["exaggeration", "claim", "science news"], "overall_score": 1.0916929307117125, "scores": [0.915168511232543, 0.5751371801547192, 0.5446151756015961], "rank_score": 0.6783069556629527} -{"id": "yoon-etal-2016-spoken", "title": "Spoken Text Difficulty Estimation Using Linguistic Features", "abstract": "We present an automated method for estimating the difficulty of spoken texts for use in generating items that assess non-native learners\u2019 listening proficiency. We collected information on the perceived difficulty of listening to various English monologue speech samples using a Likert-scale questionnaire distributed to 15 non-native English learners. We averaged the overall rating provided by three nonnative learners at different proficiency levels into an overall score of listenability. We then trained a multiple linear regression model with the listenability score as the dependent variable and features from both natural language and speech processing as the independent variables. Our method demonstrated a correlation of 0.76 with the listenability score, comparable to the agreement between the nonnative learners\u2019 ratings and the listenability score.", "phrases": ["linguistic feature", "listenability", "english learner"], "overall_score": 0.9403267884615939, "scores": [0.9174810653705276, 0.5718110502374946, 0.545615076184858], "rank_score": 0.6783023972642934} -{"id": "liang-etal-2020-beyond", "title": "Beyond User Self-Reported Likert Scale Ratings: A Comparison Model for Automatic Dialog Evaluation", "abstract": "Open Domain dialog system evaluation is one of the most important challenges in dialog research. Existing automatic evaluation metrics, such as BLEU are mostly reference-based. They calculate the difference between the generated response and a limited number of available references. Likert-score based self-reported user rating is widely adopted by social conversational systems, such as Amazon Alexa Prize chatbots. However, self-reported user rating suffers from bias and variance among different users. To alleviate this problem, we formulate dialog evaluation as a comparison task. We also propose an automatic evaluation model CMADE (Comparison Model for Automatic Dialog Evaluation) that automatically cleans self-reported user ratings as it trains on them. Specifically, we first use a self-supervised method to learn better dialog feature representation, and then use KNN and Shapley to remove confusing samples. Our experiments show that CMADE achieves 89.2% accuracy in the dialog comparison task.", "phrases": ["automatic dialog evaluation", "user rating", "different user"], "overall_score": 1.091658498113455, "scores": [0.911002926724747, 0.5915198170476218, 0.532333940687934], "rank_score": 0.6782855614867677} -{"id": "shoemark-etal-2017-aye", "title": "Aye or naw, whit dae ye hink? Scottish independence and linguistic identity on social media", "abstract": "Political surveys have indicated a relationship between a sense of Scottish identity and voting decisions in the 2014 Scottish Independence Referendum. Identity is often reflected in language use, suggesting the intuitive hypothesis that individuals who support Scottish independence are more likely to use distinctively Scottish words than those who oppose it. In the first large-scale study of sociolinguistic variation on social media in the UK, we identify distinctively Scottish terms in a data-driven way, and find that these terms are indeed used at a higher rate by users of pro-independence hashtags than by users of anti-independence hashtags. However, we also find that in general people are less likely to use distinctively Scottish words in tweets with referendum-related hashtags than in their general Twitter activity. We attribute this difference to style shifting relative to audience, aligning with previous work showing that Twitter users tend to use fewer local variants when addressing a broader audience.", "phrases": ["scottish independence", "hashtag", "twitter user", "social medium"], "overall_score": 1.0914453058054947, "scores": [0.8571706003869084, 0.7801874480168409, 0.5476066970226701, 0.5276476452057912], "rank_score": 0.6781530976580525} -{"id": "lin-etal-2020-generating", "title": "Generating Informative Conversational Response using Recurrent Knowledge-Interaction and Knowledge-Copy", "abstract": "Knowledge-driven conversation approaches have achieved remarkable research attention recently. However, generating an informative response with multiple relevant knowledge without losing fluency and coherence is still one of the main challenges. To address this issue, this paper proposes a method that uses recurrent knowledge interaction among response decoding steps to incorporate appropriate knowledge. Furthermore, we introduce a knowledge copy mechanism using a knowledge-aware pointer network to copy words from external knowledge according to knowledge attention distribution. Our joint neural conversation model which integrates recurrent Knowledge-Interaction and knowledge Copy (KIC) performs well on generating informative responses. Experiments demonstrate that our model with fewer parameters yields significant improvements over competitive baselines on two datasets Wizard-of-Wikipedia(average Bleu +87%; abs.: 0.034) and DuConv(average Bleu +20%; abs.: 0.047)) with different knowledge formats (textual & structured) and different languages (English & Chinese).", "phrases": ["recurrent knowledge-interaction", "conversation model", "response generation"], "overall_score": 1.3186069986287339, "scores": [0.8937385908669905, 0.6028012340076048, 0.5363500276227802], "rank_score": 0.6776299508324585} -{"id": "mei-zhai-2008-generating", "title": "Generating Impact-Based Summaries for Scientific Literature", "abstract": "In this paper, we present a study of a novel summarization problem, i.e., summarizing the impact of a scientic publication. Given a paper and its citation context, we study how to extract sentences that can represent the most inuential content of the paper. We propose language modeling methods for solving this problem, and study how to incorporate features such as authority and proximity to accurately estimate the impact language model. Experiment results on a SIGIR publication collection show that the proposed methods are effective for generating impact-based summaries.", "phrases": ["scientific literature", "summarization", "influence"], "overall_score": 1.8787661351960663, "scores": [0.9205059898005086, 0.5730543775721233, 0.5393045723054846], "rank_score": 0.6776216465593722} -{"id": "schuster-etal-2021-get", "title": "Get Your Vitamin C! Robust Fact Verification with Contrastive Evidence", "abstract": "Typical fact verification models use retrieved written evidence to verify claims. Evidence sources, however, often change over time as more information is gathered and revised. In order to adapt, models must be sensitive to subtle differences in supporting evidence. We present VitaminC, a benchmark infused with challenging cases that require fact verification models to discern and adjust to slight factual changes. We collect over 100,000 Wikipedia revisions that modify an underlying fact, and leverage these revisions, together with additional synthetically constructed ones, to create a total of over 400,000 claim-evidence pairs. Unlike previous resources, the examples in VitaminC are contrastive, i.e., they contain evidence pairs that are nearly identical in language and content, with the exception that one supports a given claim while the other does not. We show that training using this design increases robustness\u2014improving accuracy by 10% on adversarial fact verification and 6% on adversarial natural language inference (NLI). Moreover, the structure of VitaminC leads us to define additional tasks for fact-checking resources: tagging relevant words in the evidence for verifying the claim, identifying factual revisions, and providing automatic edits via factually consistent text generation.", "phrases": ["fact verification", "revision", "wikipedia revision"], "overall_score": 1.624784027993873, "scores": [0.9439892966171272, 0.5589674796970753, 0.5298059299124107], "rank_score": 0.6775875687422044} -{"id": "eric-etal-2020-multiwoz", "title": "MultiWOZ 2.1: A Consolidated Multi-Domain Dialogue Dataset with State Corrections and State Tracking Baselines", "abstract": "MultiWOZ 2.0 (Budzianowski et al., 2018) is a recently released multi-domain dialogue dataset spanning 7 distinct domains and containing over 10,000 dialogues. Though immensely useful and one of the largest resources of its kind to-date, MultiWOZ 2.0 has a few shortcomings. Firstly, there are substantial noise in the dialogue state annotations and dialogue utterances which negatively impact the performance of state-tracking models. Secondly, follow-up work (Lee et al., 2019) has augmented the original dataset with user dialogue acts. This leads to multiple co-existent versions of the same dataset with minor modifications. In this work we tackle the aforementioned issues by introducing MultiWOZ 2.1. To fix the noisy state annotations, we use crowdsourced workers to re-annotate state and utterances based on the original utterances in the dataset. This correction process results in changes to over 32% of state annotations across 40% of the dialogue turns. In addition, we fix 146 dialogue utterances by canonicalizing slot values in the utterances to the values in the dataset ontology. To address the second problem, we combined the contributions of the follow-up works into MultiWOZ 2.1. Hence, our dataset also includes user dialogue acts as well as multiple slot descriptions per dialogue state slot. We then benchmark a number of state-of-the-art dialogue state tracking models on the MultiWOZ 2.1 dataset and show the joint state tracking performance on the corrected state annotations. We are publicly releasing MultiWOZ 2.1 to the community, hoping that this dataset resource will allow for more effective models across various dialogue subproblems to be built in the future.", "phrases": ["multi-domain dialogue dataset", "state annotation", "dialog state tracking"], "overall_score": 1.9196689843515204, "scores": [0.9492894059562043, 0.5579448559785024, 0.5254425095256399], "rank_score": 0.6775589238201155} -{"id": "gebre-etal-2013-improving", "title": "Improving Native Language Identification with TF-IDF Weighting", "abstract": "This paper presents a Native Language Identification (NLI) system based on TF-IDF weighting schemes and using linear classifiers - support vector machines, logistic regressions and perceptrons. The system was one of the participants of the 2013 NLI Shared Task in the closed-training track, achieving 0.814 overall accuracy for a set of 11 native languages. This accuracy was only 2.2 percentage points lower than the winner\u2019s performance. Furthermore, with subsequent evaluations using 10-fold cross-validation (as given by the organizers) on the combined training and development data, the best average accuracy obtained is 0.8455 and the features that contributed to this accuracy are the TF-IDF of the combined unigrams and bigrams of words.", "phrases": ["native language identification", "tf-idf", "bigram"], "overall_score": 1.4882839590486872, "scores": [0.9067586995113797, 0.5799782821605163, 0.5453046781565113], "rank_score": 0.6773472199428023} -{"id": "tekiroglu-etal-2020-generating", "title": "Generating Counter Narratives against Online Hate Speech: Data and Strategies", "abstract": "Recently research has started focusing on avoiding undesired effects that come with content moderation, such as censorship and overblocking, when dealing with hatred online. The core idea is to directly intervene in the discussion with textual responses that are meant to counter the hate content and prevent it from further spreading. Accordingly, automation strategies, such as natural language generation, are beginning to be investigated. Still, they suffer from the lack of sufficient amount of quality data and tend to produce generic/repetitive responses. Being aware of the aforementioned limitations, we present a study on how to collect responses to hate effectively, employing large scale unsupervised language models such as GPT-2 for the generation of silver data, and the best annotation strategies/neural architectures that can be used for data filtering before expert validation/post-editing.", "phrases": ["hate speech", "textual response", "counter-narratives"], "overall_score": 1.7372035243535628, "scores": [0.9289023507860633, 0.5814017006711817, 0.5215530524266626], "rank_score": 0.677285701294636} -{"id": "zhuang-zong-2010-joint", "title": "Joint Inference for Bilingual Semantic Role Labeling", "abstract": "We show that jointly performing semantic role labeling (SRL) on bitext can improve SRL results on both sides. In our approach, we use monolingual SRL systems to produce argument candidates for predicates in bitext at first. Then, we simultaneously generate SRL results for two sides of bitext using our joint inference model. Our model prefers the bilingual SRL result that is not only reasonable on each side of bitext, but also has more consistent argument structures between two sides. To evaluate the consistency between two argument structures, we also formulate a log-linear model to compute the probability of aligning two arguments. We have experimented with our model on Chinese-English parallel Prop-Bank data. Using our joint inference model, F1 scores of SRL results on Chinese and English text achieve 79.53% and 77.87% respectively, which are 1.52 and 1.74 points higher than the results of baseline monolingual SRL combination systems respectively.", "phrases": ["semantic role", "consistency", "joint inference"], "overall_score": 1.0897608929100249, "scores": [0.8781764450753725, 0.6257080548813863, 0.5274350392888643], "rank_score": 0.6771065130818744} -{"id": "chinkina-meurers-2016-linguistically", "title": "Linguistically Aware Information Retrieval: Providing Input Enrichment for Second Language Learners", "abstract": "How can second language teachers retrieve texts that are rich in terms of the grammatical constructions to be taught, but also address the content of interest to the learners? We developed an Information Retrieval system that identifies the 87 grammatical constructions spelled out in the official English language curriculum of schools in Baden-W\u00a8 urttemberg (Germany) and reranks the search results based on the selected (de)prioritization of grammatical forms. In combination with a visualization of the characteristics of the search results, the approach effectively supports teachers in prioritizing those texts that provide the targeted forms. The approach facilitates systematic input enrichment for language learners as a complement to the established notion of input enhancement: while input enrichment aims at richly representing the selected forms and categories in a text, input enhancement targets their presentation to make them more salient and support noticing.", "phrases": ["learner", "grammatical construction", "information retrieval system"], "overall_score": 1.3174500764989574, "scores": [0.9122216928417494, 0.5646779275450755, 0.5542066110163213], "rank_score": 0.6770354104677154} -{"id": "lardilleux-lepage-2009-sampling", "title": "Sampling-based Multilingual Alignment", "abstract": "We present a sub-sentential alignment method that extracts high quality multi-word alignments from sentence-aligned multilingual parallel corpora. Unlike other methods, it exploits low frequency terms, which makes it highly scalable. As it relies on alingual concepts, it can process any number of languages at once. Experiments have shown that it is competitive with state-of-the-art methods.", "phrases": ["aligner", "parallel corpora", "frequency term"], "overall_score": 1.4874039714411702, "scores": [0.8985694647111974, 0.5854227453810047, 0.5468479508772438], "rank_score": 0.6769467203231486} -{"id": "kim-mooney-2012-unsupervised", "title": "Unsupervised PCFG Induction for Grounded Language Learning with Highly Ambiguous Supervision", "abstract": "\"Grounded\" language learning employs training data in the form of sentences paired with relevant but ambiguous perceptual contexts. Borschinger et al. (2011) introduced an approach to grounded language learning based on unsupervised PCFG induction. Their approach works well when each sentence potentially refers to one of a small set of possible meanings, such as in the sportscasting task. However, it does not scale to problems with a large set of potential meanings for each sentence, such as the navigation instruction following task studied by Chen and Mooney (2011). This paper presents an enhancement of the PCFG approach that scales to such problems with highly-ambiguous supervision. Experimental results on the navigation task demonstrates the effectiveness of our approach.", "phrases": ["such problem", "highly-ambiguous supervision", "unsupervised pcfg induction"], "overall_score": 1.623203504967515, "scores": [0.9177235625390863, 0.5842116022689344, 0.5288501535264434], "rank_score": 0.6769284394448213} -{"id": "luyckx-daelemans-2008-personae", "title": "Personae: a Corpus for Author and Personality Prediction from Text", "abstract": "We present a new corpus for computational stylometry, more specifically authorship attribution and the prediction of author personality from text. Because of the large number of authors (145), the corpus will allow previously impossible studies of variation in features considered predictive for writing style. The innovative meta-information (personality profiles of the authors) associated with these texts allows the study of personality prediction, a not yet very well researched aspect of style. In this paper, we describe the contents of the corpus and show its use in both authorship attribution and personality prediction. We focus on features that have been proven useful in the field of author recognition. Syntactic features like part-of-speech n-grams are generally accepted as not being under the author\u0092s conscious control and therefore providing good clues for predicting gender or authorship. We want to test whether these features are helpful for personality prediction and authorship attribution on a large set of authors. Both tasks are approached as text categorization tasks. First a document representation is constructed based on feature selection from the linguistically analyzed corpus (using the Memory-Based Shallow Parser (MBSP)). These are associated with each of the 145 authors or each of the four components of the Myers-Briggs Type Indicator (Introverted-Extraverted, Sensing-iNtuitive, Thinking-Feeling, Judging-Perceiving). Authorship attribution on 145 authors achieves results around 50%-accuracy. Preliminary results indicate that the first two personality dimensions can be predicted fairly accurately.", "phrases": ["personality prediction", "authorship attribution", "syntactic feature", "n-gram", "personae"], "overall_score": 1.3171677321119382, "scores": [0.8969814714793943, 0.8612145954633954, 0.5633782055200842, 0.5398664121547441, 0.5230108861586261], "rank_score": 0.6768903141552489} -{"id": "zhang-etal-2019-evidence", "title": "Evidence-based Trustworthiness", "abstract": "The information revolution brought with it information pollution. Information retrieval and extraction help us cope with abundant information from diverse sources. But some sources are of anonymous authorship, and some are of uncertain accuracy, so how can we determine what we should actually believe? Not all information sources are equally trustworthy, and simply accepting the majority view is often wrong. This paper develops a general framework for estimating the trustworthiness of information sources in an environment where multiple sources provide claims and supporting evidence, and each claim can potentially be produced by multiple sources. We consider two settings: one in which information sources directly assert claims, and a more realistic and challenging one, in which claims are inferred from evidence provided by sources, via (possibly noisy) NLP techniques. Our key contribution is to develop a family of probabilistic models that jointly estimate the trustworthiness of sources, and the credibility of claims they assert. This is done while accounting for the (possibly noisy) NLP needed to infer claims from evidence supplied by sources. We evaluate our framework on several datasets, showing strong results and significant improvement over baselines.", "phrases": ["trustworthiness", "information pollution", "claim"], "overall_score": 1.0892608721823818, "scores": [0.9306963244324458, 0.5765761743417703, 0.5231149994331415], "rank_score": 0.6767958327357859} -{"id": "onishi-etal-2016-large", "title": "Who did What: A Large-Scale Person-Centered Cloze Dataset", "abstract": "We have constructed a new \"Who-did-What\" dataset of over 200,000 fill-in-the-gap (cloze) multiple choice reading comprehension problems constructed from the LDC English Gigaword newswire corpus. The WDW dataset has a variety of novel features. First, in contrast with the CNN and Daily Mail datasets (Hermann et al., 2015) we avoid using article summaries for question formation. Instead, each problem is formed from two independent articles --- an article given as the passage to be read and a separate article on the same events used to form the question. Second, we avoid anonymization --- each choice is a person named entity. Third, the problems have been filtered to remove a fraction that are easily solved by simple baselines, while remaining 84% solvable by humans. We report performance benchmarks of standard systems and propose the WDW dataset as a challenge task for the community.", "phrases": ["cloze dataset", "who-did-what", "passage"], "overall_score": 2.091967850694986, "scores": [0.9345465944517096, 0.5552352666944493, 0.5405699204239691], "rank_score": 0.6767839271900428} -{"id": "garcia-etal-2021-harnessing", "title": "Harnessing Multilinguality in Unsupervised Machine Translation for Rare Languages", "abstract": "Unsupervised translation has reached impressive performance on resource-rich language pairs such as English-French and English-German. However, early studies have shown that in more realistic settings involving low-resource, rare languages, unsupervised translation performs poorly, achieving less than 3.0 BLEU. In this work, we show that multilinguality is critical to making unsupervised systems practical for low-resource settings. In particular, we present a single model for 5 low-resource languages (Gujarati, Kazakh, Nepali, Sinhala, and Turkish) to and from English directions, which leverages monolingual and auxiliary parallel data from other high-resource language pairs via a three-stage training scheme. We outperform all current state-of-the-art unsupervised baselines for these languages, achieving gains of up to 14.4 BLEU. Additionally, we outperform strong supervised baselines for various language pairs as well as match the performance of the current state-of-the-art supervised model for Nepali-English. We conduct a series of ablation studies to establish the robustness of our model under different degrees of data quality, as well as to analyze the factors which led to the superior performance of the proposed approach over traditional unsupervised models.", "phrases": ["unsupervised translation", "low-resource language", "auxiliary parallel data"], "overall_score": 1.3169519824814706, "scores": [0.9093197095331418, 0.5951067259286325, 0.5259118868715799], "rank_score": 0.6767794407777847} -{"id": "mishra-etal-2019-abusive", "title": "Abusive Language Detection with Graph Convolutional Networks", "abstract": "Abuse on the Internet represents a significant societal problem of our time. Previous research on automated abusive language detection in Twitter has shown that community-based profiling of users is a promising technique for this task. However, existing approaches only capture shallow properties of online communities by modeling follower\u2013following relationships. In contrast, working with graph convolutional networks (GCNs), we present the first approach that captures not only the structure of online communities but also the linguistic behavior of the users within them. We show that such a heterogeneous graph-structured modeling of communities significantly advances the current state of the art in abusive language detection.", "phrases": ["convolutional network", "art", "abusive language detection"], "overall_score": 1.5581195589748975, "scores": [0.9418832957791184, 0.5650100158389462, 0.5231548682069143], "rank_score": 0.6766827266083263} -{"id": "darwish-etal-2014-verifiably", "title": "Verifiably Effective Arabic Dialect Identification", "abstract": "Several recent papers on Arabic dialect identification have hinted that using a word unigram model is sufficient and effective for the task. However, most previous work was done on a standard fairly homogeneous dataset of dialectal user comments. In this paper, we show that training on the standard dataset does not generalize, because a unigram model may be tuned to topics in the comments and does not capture the distinguishing features of dialects. We show that effective dialect identification requires that we account for the distinguishing lexical, morphological, and phonological phenomena of dialects. We show that accounting for such can improve dialect detection accuracy by nearly 10% absolute.", "phrases": ["arabic dialect identification", "word unigram model", "egyptian"], "overall_score": 1.832221306155017, "scores": [0.9867650410901861, 0.5215727595821958, 0.5214118384694033], "rank_score": 0.6765832130472619} -{"id": "zhu-etal-2010-unified", "title": "A Unified Framework for Scope Learning via Simplified Shallow Semantic Parsing", "abstract": "This paper approaches the scope learning problem via simplified shallow semantic parsing. This is done by regarding the cue as the predicate and mapping its scope into several constituents as the arguments of the cue. Evaluation on the BioScope corpus shows that the structural information plays a critical role in capturing the relationship between a cue and its dominated arguments. It also shows that our parsing approach significantly outperforms the state-of-the-art chunking ones. Although our parsing approach is only evaluated on negation and speculation scope learning here, it is portable to other kinds of scope learning.", "phrases": ["scope learning", "shallow semantic parsing", "bioscope corpus"], "overall_score": 0.9379083170101076, "scores": [0.8435549390234167, 0.6375118560647708, 0.5486067215503064], "rank_score": 0.676557838879498} -{"id": "fang-etal-2017-learning", "title": "Learning how to Active Learn: A Deep Reinforcement Learning Approach", "abstract": "Active learning aims to select a small subset of data for annotation such that a classifier learned on the data is highly accurate. This is usually done using heuristic selection methods, however the effectiveness of such methods is limited and moreover, the performance of heuristics varies between datasets. To address these shortcomings, we introduce a novel formulation by reframing the active learning as a reinforcement learning problem and explicitly learning a data selection policy, where the policy takes the role of the active learning heuristic. Importantly, our method allows the selection policy learned using simulation to one language to be transferred to other languages. We demonstrate our method using cross-lingual named entity recognition, observing uniform improvements over traditional active learning algorithms.", "phrases": ["reinforcement", "active learning algorithm", "imitation"], "overall_score": 1.6806030006933985, "scores": [0.9317943807705987, 0.5489655818290525, 0.5482132394763048], "rank_score": 0.6763244006919854} -{"id": "oncevay-2021-peru", "title": "Peru is Multilingual, Its Machine Translation Should Be Too?", "abstract": "Peru is a multilingual country with a long history of contact between the indigenous languages and Spanish. Taking advantage of this context for machine translation is possible with multilingual approaches for learning both unsupervised subword segmentation and neural machine translation models. The study proposes the first multilingual translation models for four languages spoken in Peru: Aymara, Ashaninka, Quechua and Shipibo-Konibo, providing both many-to-Spanish and Spanish-to-many models and outperforming pairwise baselines in most of them. The task exploited a large English-Spanish dataset for pre-training, monolingual texts with tagged back-translation, and parallel corpora aligned with English. Finally, by fine-tuning the best models, we also assessed the out-of-domain capabilities in two evaluation datasets for Quechua and a new one for Shipibo-Konibo.", "phrases": ["machine translation", "aymara", "quechua"], "overall_score": 0.9375799147881531, "scores": [0.9202372783967595, 0.5691823966299397, 0.5395431652262489], "rank_score": 0.6763209467509826} -{"id": "yang-cardie-2014-joint", "title": "Joint Modeling of Opinion Expression Extraction and Attribute Classification", "abstract": "In this paper, we study the problems of opinion expression extraction and expression-level polarity and intensity classification. Traditional fine-grained opinion analysis systems address these problems in isolation and thus cannot capture interactions among the textual spans of opinion expressions and their opinion-related properties. We present two types of joint approaches that can account for such interactions during 1) both learning and inference or 2) only during inference. Extensive experiments on a standard dataset demonstrate that our approaches provide substantial improvements over previously published results. By analyzing the results, we gain some insight into the advantages of different joint models.", "phrases": ["opinion expression extraction", "labeling", "joint model"], "overall_score": 1.211783289094067, "scores": [0.9563462408861659, 0.5364817804927918, 0.5360993707829053], "rank_score": 0.6763091307206209} -{"id": "roth-anthonio-2021-unimplicit", "title": "UnImplicit Shared Task Report: Detecting Clarification Requirements in Instructional Text", "abstract": "This paper describes the data, task setup, and results of the shared task at the First Workshop on Understanding Implicit and Underspecified Language (UnImplicit). The task requires computational models to predict whether a sentence contains aspects of meaning that are contextually unspecified and thus require clarification. Two teams participated and the best scoring system achieved an accuracy of 68%.", "phrases": ["clarification", "instructional text", "underspecified language"], "overall_score": 0.9374686495990969, "scores": [0.9237849382258695, 0.5660635017067503, 0.5388736177156157], "rank_score": 0.6762406858827452} -{"id": "druskat-etal-2016-corpus", "title": "corpus-tools.org: An Interoperable Generic Software Tool Set for Multi-layer Linguistic Corpora", "abstract": "This paper introduces an open source, interoperable generic software tool set catering for the entire workflow of creation, migration, annotation, query and analysis of multi-layer linguistic corpora. It consists of four components: Salt, a graph-based meta model and API for linguistic data, the common data model for the rest of the tool set; Pepper, a conversion tool and platform for linguistic data that can be used to convert many different linguistic formats into each other; Atomic, an extensible, platform-independent multi-layer desktop annotation software for linguistic corpora; ANNIS, a search and visualization architecture for multi-layer linguistic corpora with many different visualizations and a powerful native query language. The set was designed to solve the following issues in a multi-layer corpus workflow: Lossless data transition between tools through a common data model generic enough to allow for a potentially unlimited number of different types of annotation, conversion capabilities for different linguistic formats to cater for the processing of data from different sources and/or with existing annotations, a high level of extensibility to enhance the sustainability of the whole tool set, analysis capabilities encompassing corpus and annotation query alongside multi-faceted visualizations of all annotation layers.", "phrases": ["multi-layer linguistic corpora", "salt", "pepper"], "overall_score": 0.7429182937661768, "scores": [0.9831389244720663, 0.5247986185005122, 0.5207625765213841], "rank_score": 0.6762333731646543} -{"id": "hjalmarsson-etal-2007-dealing", "title": "Dealing with DEAL: A Dialogue System for Conversation Training", "abstract": "We present DEAL, a spoken dialogue system for conversation training under development at KTH. DEAL is a game with a spoken language interface designed for second language learners. The system is intended as a multidisciplinary research platform where challenges and potential benefits of combining elements from computer games, dialogue systems and language learning can be explored.", "phrases": ["conversation training", "spoken language interface", "second language learner"], "overall_score": 0.7426121605115927, "scores": [0.9477411548030992, 0.5568428252307159, 0.5232801759694559], "rank_score": 0.6759547186677569} -{"id": "bangalore-etal-2006-learning", "title": "Learning the Structure of Task-Driven Human-Human Dialogs", "abstract": "Data-driven techniques have been used for many computational linguistics tasks. Models derived from data are generally more robust than hand-crafted systems since they better reflect the distribution of the phenomena being modeled. With the availability of large corpora of spoken dialog, dialog management is now reaping the benefits of data-driven techniques. In this paper, we compare two approaches to modeling subtask structure in dialog: a chunk-based model of subdialog sequences, and a parse-based, or hierarchical, model. We evaluate these models using customer agent dialogs from a catalog service domain.", "phrases": ["dialog", "task structure", "natural language generation"], "overall_score": 1.8303667059889095, "scores": [0.9196491644834542, 0.5655726406692437, 0.5424732928671919], "rank_score": 0.67589836600663} -{"id": "wu-dredze-2020-explicit", "title": "Do Explicit Alignments Robustly Improve Multilingual Encoders?", "abstract": "Multilingual BERT (mBERT), XLM-RoBERTa (XLMR) and other unsupervised multilingual encoders can effectively learn cross-lingual representation. Explicit alignment objectives based on bitexts like Europarl or MultiUN have been shown to further improve these representations. However, word-level alignments are often suboptimal and such bitexts are unavailable for many languages. In this paper, we propose a new contrastive alignment objective that can better utilize such signal, and examine whether these previous alignment methods can be adapted to noisier sources of aligned data: a randomly sampled 1 million pair subset of the OPUS collection. Additionally, rather than report results on a single dataset with a single model run, we report the mean and standard derivation of multiple runs with different seeds, on four datasets and tasks. Our more extensive analysis finds that, while our new objective outperforms previous work, overall these methods do not improve performance with a more robust evaluation framework. Furthermore, the gains from using a better underlying model eclipse any benefits from alignment training. These negative results dictate more care in evaluating these methods and suggest limitations in applying explicit alignment objectives.", "phrases": ["multilingual encoder", "cross-lingual representation", "contrastive learning"], "overall_score": 1.4849845917843363, "scores": [0.9320084615522751, 0.5665803084333029, 0.5289480695822375], "rank_score": 0.6758456131892719} -{"id": "van-de-cruys-etal-2013-melodi-supervised", "title": "MELODI: A Supervised Distributional Approach for Free Paraphrasing of Noun Compounds", "abstract": "This paper describes the system submitted by the MELODI team for the SemEval-2013 Task 4 : Free Paraphrases of Noun Compounds (Hendrickx et al., 2013). Our approach combines the strength of an unsupervised distributional word space model with a supervised maximum-entropy classification model; the distributional model yields a feature representation for a particular compound noun, which is subsequently used by the classifier to induce a number of appropriate paraphrases.", "phrases": ["paraphrase", "noun compounds", "distributional model"], "overall_score": 1.0876965882720946, "scores": [0.8488784118770725, 0.6056203074890648, 0.5729729461181273], "rank_score": 0.6758238884947549} -{"id": "lo-wu-2013-informal", "title": "Can Informal Genres be better Translated by Tuning on Automatic Semantic Metrics?", "abstract": "Even though the informal language of spoken text and web forum genres presents great difficulties for automatic semantic role labeling, we show that surprisingly, tuning statistical machine translation against the SRL-based objective function, MEANT, nevertheless leads more robustly to adequate translations of these informal genres than tuning against BLEU or TER. The accuracy of automatic semantic parsing has been shown to degrade significantly on informal genres such as speech or tweets, compared to formal genres like newswire. In spite of this, human evaluators preferred translations from MEANTtuned systems over the BLEUor TERtuned ones by a significant margin. Error analysis indicates that one of the major sources of errors in automatic shallow semantic parsing of informal genres is failure to identify the semantic frame for copula or existential senses of \u201cbe\u201d. We show that MEANT\u2019s correlation with human adequacy judgment on informal text is improved by reconstructing the missing semantic frames for \u201cbe\u201d. Our tuning approach is independent of the translationmodel architecture, so any SMTmodel can potentially benefit from the semantic knowledge incorporated through our approach.", "phrases": ["informal genre", "meant", "adequate translation"], "overall_score": 0.7424000277468145, "scores": [0.9481105090388797, 0.5530343864378199, 0.5261399858353096], "rank_score": 0.675761627104003} -{"id": "sun-2010-improving", "title": "Improving Chinese Semantic Role Labeling with Rich Syntactic Features", "abstract": "Developing features has been shown crucial to advancing the state-of-the-art in Semantic Role Labeling (SRL). To improve Chinese SRL, we propose a set of additional features, some of which are designed to better capture structural information. Our system achieves 93.49 Fmeasure, a significant improvement over the best reported performance 92.0. We are further concerned with the effect of parsing in Chinese SRL. We empirically analyze the two-fold effect, grouping words into constituents and providing syntactic information. We also give some preliminary linguistic explanations.", "phrases": ["semantic role labeling", "chinese srl", "statistical classifier"], "overall_score": 1.0874354457727886, "scores": [0.9453691073653915, 0.5479295721180419, 0.5336862151276807], "rank_score": 0.675661631537038} -{"id": "le-etal-2014-tuhoi", "title": "TUHOI: Trento Universal Human Object Interaction Dataset", "abstract": "This paper describes the Trento Universal Human Object Interaction dataset, TUHOI, which is dedicated to human object interactions in images.1 Recognizing human actions is an important yet challenging task. Most available datasets in this field are limited in numbers of actions and objects. A large dataset with various actions and human object interactions is needed for training and evaluating complicated and robust human action recognition systems, especially systems that combine knowledge learned from language and vision. We introduce an image collection with more than two thousand actions which have been annotated through crowdsourcing. We review publicly available datasets, describe the annotation process of our image collection and some statistics of this dataset. Finally, experimental results on the dataset including human action recognition based on objects and an analysis of the relation between human-object positions in images and prepositions in language are presented.", "phrases": ["object", "human action", "tuhoi"], "overall_score": 0.9363093285673803, "scores": [0.8630815076031421, 0.6020085271035867, 0.5611232028866233], "rank_score": 0.6754044125311173} -{"id": "lubetich-sagae-2014-data", "title": "Data-driven Measurement of Child Language Development with Simple Syntactic Templates", "abstract": "When assessing child language development, researchers have traditionally had to choose between easily computable metrics focused on superficial aspects of language, and more expressive metrics that are carefully designed to cover specific syntactic structures and require substantial and tedious labor. Recent work has shown that existing expressive metrics for child language development can be automated and produce accurate results. We go a step further and propose that measurement of syntactic development can be performed automatically in a completely data-driven way without the need for definition of language-specific inventories of grammatical structures. As a crucial step in that direction, we show that four simple feature templates are as expressive of language development as a carefully crafted standard inventory of grammatical structures that is commonly used and has been validated empirically.", "phrases": ["child language development", "developmental level", "linguistic feature"], "overall_score": 0.936229478420979, "scores": [0.9432642839989398, 0.5606142872852424, 0.5221618671938285], "rank_score": 0.6753468128260036} -{"id": "oren-etal-2019-distributionally", "title": "Distributionally Robust Language Modeling", "abstract": "Language models are generally trained on data spanning a wide range of topics (e.g., news, reviews, fiction), but they might be applied to an a priori unknown target distribution (e.g., restaurant reviews). In this paper, we first show that training on text outside the test distribution can degrade test performance when using standard maximum likelihood (MLE) training. To remedy this without the knowledge of the test distribution, we propose an approach which trains a model that performs well over a wide range of potential test distributions. In particular, we derive a new distributionally robust optimization (DRO) procedure which minimizes the loss of the model over the worst-case mixture of topics with sufficient overlap with the training distribution. Our approach, called topic conditional value at risk (topic CVaR), obtains a 5.5 point perplexity reduction over MLE when the language models are trained on a mixture of Yelp reviews and news and tested only on reviews.", "phrases": ["robust optimization", "dro", "mixture"], "overall_score": 1.3141568552003688, "scores": [0.9083270619041239, 0.5597893961586657, 0.5579126304411525], "rank_score": 0.6753430295013141} -{"id": "peng-etal-2019-huaweis", "title": "Huawei's NMT Systems for the WMT 2019 Biomedical Translation Task", "abstract": "This paper describes Huawei's neural machine translation systems for the WMT 2019 biomedical translation shared task. We trained and fine-tuned our systems on a combination of out-of-domain and in-domain parallel corpora for six translation directions covering English\u2013Chinese, English\u2013French and English\u2013German language pairs. Our submitted systems achieve the best BLEU scores on English\u2013French and English\u2013German language pairs according to the official evaluation results. In the English\u2013Chinese translation task, our systems are in the second place. The enhanced performance is attributed to more in-domain training and more sophisticated models developed. Development of translation models and transfer learning (or domain adaptation) methods has significantly contributed to the progress of the task.", "phrases": ["wmt", "translation direction", "bleu score", "huawei"], "overall_score": 0.9359283483888952, "scores": [0.8286511237765466, 0.808095197167238, 0.5357885182918524, 0.5279835344604776], "rank_score": 0.6751295934240286} -{"id": "das-smith-2009-paraphrase", "title": "Paraphrase Identification as Probabilistic Quasi-Synchronous Recognition", "abstract": "We present a novel approach to deciding whether two sentences hold a paraphrase relationship. We employ a generative model that generates a paraphrase of a given sentence, and we use probabilistic inference to reason about whether two sentences share the paraphrase relationship. The model cleanly incorporates both syntax and lexical semantics using quasi-synchronous dependency grammars (Smith and Eisner, 2006). Furthermore, using a product of experts (Hinton, 2002), we combine the model with a complementary logistic regression model based on state-of-the-art lexical overlap features. We evaluate our models on the task of distinguishing true paraphrase pairs from false ones on a standard corpus, giving competitive state-of-the-art performance.", "phrases": ["probabilistic inference", "paraphrase identification", "inter alia"], "overall_score": 1.4832059354859202, "scores": [0.9366421839622667, 0.5619271474139225, 0.5265390040913688], "rank_score": 0.6750361118225193} -{"id": "wu-etal-2019-proactive", "title": "Proactive Human-Machine Conversation with Explicit Conversation Goal", "abstract": "Though great progress has been made for human-machine conversation, current dialogue system is still in its infancy: it usually converses passively and utters words more as a matter of response, rather than on its own initiatives. In this paper, we take a radical step towards building a human-like conversational agent: endowing it with the ability of proactively leading the conversation (introducing a new topic or maintaining the current topic). To facilitate the development of such conversation systems, we create a new dataset named Konv where one acts as a conversation leader and the other acts as the follower. The leader is provided with a knowledge graph and asked to sequentially change the discussion topics, following the given conversation goal, and meanwhile keep the dialogue as natural and engaging as possible. Konv enables a very challenging task as the model needs to both understand dialogue and plan over the given knowledge graph. We establish baseline results on this dataset (about 270K utterances and 30k dialogues) using several state-of-the-art models. Experimental results show that dialogue models that plan over the knowledge graph can make full use of related knowledge to generate more diverse multi-turn conversations. The baseline systems along with the dataset are publicly available.", "phrases": ["human-machine conversation", "response generation", "path"], "overall_score": 1.7811073701271856, "scores": [0.9505488667169038, 0.5531921263116689, 0.5209676218314713], "rank_score": 0.6749028716200147} -{"id": "abdulrahman-etal-2019-developing", "title": "Developing a Fine-grained Corpus for a Less-resourced Language: the case of Kurdish", "abstract": "Kurdish is a less-resourced language consisting of different dialects written in various scripts. Approximately 30 million people in different countries speak the language. The lack of corpora is one of the main obstacles in Kurdish language processing. In this paper, we present KTC-the Kurdish Textbooks Corpus, which is composed of 31 K-12 textbooks in Sorani dialect. The corpus is normalized and categorized into 12 educational subjects containing 693,800 tokens (110,297 types). Our resource is publicly available for non-commercial use under the CC BY-NC-SA 4.0 license.", "phrases": ["less-resourced language", "kurdish textbooks corpus", "sorani dialect"], "overall_score": 0.7414340755060855, "scores": [0.9226149033313162, 0.579075471337219, 0.5229567637817948], "rank_score": 0.6748823794834434} -{"id": "denkowski-lavie-2010-meteor", "title": "METEOR-NEXT and the METEOR Paraphrase Tables: Improved Evaluation Support for Five Target Languages", "abstract": "This paper describes our submission to the WMT10 Shared Evaluation Task and MetricsMATR10. We present a version of the Meteor-next metric with paraphrase tables for five target languages. We describe the creation of these paraphrase tables and conduct a tuning experiment that demonstrates consistent improvement across all languages over baseline versions of the metric without paraphrase resources.", "phrases": ["meteor", "paraphrase table", "machine translation evaluation"], "overall_score": 1.4031308322192655, "scores": [0.9367834844617965, 0.5579562588609494, 0.5295501500383931], "rank_score": 0.6747632977870462} -{"id": "gliozzo-etal-2005-domain", "title": "Domain Kernels for Word Sense Disambiguation", "abstract": "In this paper we present a supervised Word Sense Disambiguation methodology, that exploits kernel methods to model sense distinctions. In particular a combination of kernel functions is adopted to estimate independently both syntagmatic and domain similarity. We defined a kernel function, namely the Domain Kernel, that allowed us to plug \"external knowledge\" into the supervised learning process. External knowledge is acquired from unlabeled data in a totally unsupervised way, and it is represented by means of Domain Models. We evaluated our methodology on several lexical sample tasks in different languages, outperforming significantly the state-of-the-art for each of them, while reducing the amount of labeled training data required for learning.", "phrases": ["kernel method", "train", "wsd", "matrix", "term-to-document matrix"], "overall_score": 1.2088138001414046, "scores": [0.8811993426276968, 0.8339174517529383, 0.5891655762850395, 0.5479512210687746, 0.5210255451691183], "rank_score": 0.6746518273807135} -{"id": "nakov-ng-2011-translating", "title": "Translating from Morphologically Complex Languages: A Paraphrase-Based Approach", "abstract": "We propose a novel approach to translating from a morphologically complex language. Unlike previous research, which has targeted word inflections and concatenations, we focus on the pairwise relationship between morphologically related words, which we treat as potential paraphrases and handle using paraphrasing techniques at the word, phrase, and sentence level. An important advantage of this framework is that it can cope with derivational morphology, which has so far remained largely beyond the capabilities of statistical machine translation systems. Our experiments translating from Malay, whose morphology is mostly derivational, into English show significant improvements over rivaling approaches based on five automatic evaluation measures (for 320,000 sentence pairs; 9.5 million English word tokens).", "phrases": ["paraphrase", "morphological knowledge", "rich language"], "overall_score": 1.2085729518800958, "scores": [0.9033030560962781, 0.5654416727802701, 0.5548074933435241], "rank_score": 0.6745174074066907} -{"id": "oncevay-etal-2020-bridging", "title": "Bridging Linguistic Typology and Multilingual Machine Translation with Multi-View Language Representations", "abstract": "Sparse language vectors from linguistic typology databases and learned embeddings from tasks like multilingual machine translation have been investigated in isolation, without analysing how they could benefit from each other's language characterisation. We propose to fuse both views using singular vector canonical correlation analysis and study what kind of information is induced from each source. By inferring typological features and language phylogenies, we observe that our representations embed typology and strengthen correlations with language relationships. We then take advantage of our multi-view language vector space for multilingual machine translation, where we achieve competitive overall translation accuracy in tasks that require information about language similarities, such as language clustering and ranking candidates for multilingual transfer. With our method, we can easily project and assess new languages without expensive retraining of massive multilingual or ranking models, which are major disadvantages of related approaches.", "phrases": ["multilingual machine translation", "typological database", "mnmt model"], "overall_score": 1.4816104689397285, "scores": [0.9381376348118677, 0.545831854895724, 0.5389604614072948], "rank_score": 0.6743099837049621} -{"id": "kamath-etal-2019-specializing", "title": "Specializing Distributional Vectors of All Words for Lexical Entailment", "abstract": "Semantic specialization methods fine-tune distributional word vectors using lexical knowledge from external resources (e.g. WordNet) to accentuate a particular relation between words. However, such post-processing methods suffer from limited coverage as they affect only vectors of words seen in the external resources. We present the first post-processing method that specializes vectors of all vocabulary words \u2013 including those unseen in the resources \u2013 for the asymmetric relation of lexical entailment (LE) (i.e., hyponymy-hypernymy relation). Leveraging a partially LE-specialized distributional space, our POSTLE (i.e., post-specialization for LE) model learns an explicit global specialization function, allowing for specialization of vectors of unseen words, as well as word vectors from other languages via cross-lingual transfer. We capture the function as a deep feed-forward neural network: its objective re-scales vector norms to reflect the concept hierarchy while simultaneously attracting hyponymy-hypernymy pairs to better reflect semantic similarity. An extended model variant augments the basic architecture with an adversarial discriminator. We demonstrate the usefulness and versatility of POSTLE models with different input distributional spaces in different scenarios (monolingual LE and zero-shot cross-lingual LE transfer) and tasks (binary and graded LE). We report consistent gains over state-of-the-art LE-specialization methods, and successfully LE-specialize word vectors for languages without any external lexical knowledge.", "phrases": ["lexical entailment", "global specialization function", "unseen word"], "overall_score": 1.401946307712002, "scores": [0.963869044691771, 0.5313439143398344, 0.5273680266970929], "rank_score": 0.6741936619095661} -{"id": "huang-etal-2016-well", "title": "How well do Computers Solve Math Word Problems? Large-Scale Dataset Construction and Evaluation", "abstract": "Recently a few systems for automatically solving math word problems have reported promising results. However, the datasets used for evaluation have limitations in both scale and diversity. In this paper, we build a large-scale dataset which is more than 9 times the size of previous ones, and contains many more problem types. Problems in the dataset are semi-automatically obtained from community question-answering (CQA) web pages. A ranking SVM model is trained to automatically extract problem answers from the answer text provided by CQA users, which signi\ufb01cantly reduces human annotation cost. Experiments conducted on the new dataset lead to interesting and surprising results.", "phrases": ["math word problem", "large-scale dataset", "web page"], "overall_score": 1.616186711214508, "scores": [0.912523600254543, 0.5860947304643348, 0.5233882967787442], "rank_score": 0.674002209165874} -{"id": "dyer-etal-2010-cdec", "title": "cdec: A Decoder, Alignment, and Learning Framework for Finite-State and Context-Free Translation Models", "abstract": "We present cdec, an open source framework for decoding, aligning with, and training a number of statistical machine translation models, including word-based models, phrase-based models, and models based on synchronous context-free grammars. Using a single unified internal representation for translation forests, the decoder strictly separates model-specific translation logic from general rescoring, pruning, and inference algorithms. From this unified representation, the decoder can extract not only the 1or k-best translations, but also alignments to a reference, or the quantities necessary to drive discriminative training using gradient-based or gradient-free optimization techniques. Its efficient C++ implementation means that memory use and runtime performance are significantly better than comparable decoders.", "phrases": ["word alignment", "qe-clean system", "cdec"], "overall_score": 1.9478694063668271, "scores": [0.9561937105681251, 0.5410832567080378, 0.5244727274253627], "rank_score": 0.6739165649005084} -{"id": "krug-etal-2015-rule", "title": "Rule-based Coreference Resolution in German Historic Novels", "abstract": "Coreference resolution (CR) is a key task in the automated analysis of characters in stories. Standard CR systems usually trained on newspaper texts have difficulties with literary texts, even with novels; a comparison with newspaper texts showed that average sentence length is greater in novels and the number of pronouns, as well as the percentage of direct speech is higher. We report promising evaluation results for a rule-based system similar to [Lee et al. 2011], but tailored to the domain which recognizes coreference chains in novels much better than CR systems like CorZu. Rule-based systems performed best on the CoNLL 2011 challenge [Pradhan et al. 2011]. Recent work in machine learning showed similar results as rule-based systems [Durett et al. 2013]. The latter has the advantage that its explanation component facilitates a fine grained error analysis for incremental refinement of the rules.", "phrases": ["pronoun", "rule-based coreference resolution", "literary character"], "overall_score": 1.3113093149094084, "scores": [0.9562521671059789, 0.5333267351270624, 0.5320601475648336], "rank_score": 0.6738796832659584} -{"id": "elming-etal-2013-stream", "title": "Down-stream effects of tree-to-dependency conversions", "abstract": "Dependency analysis relies on morphosyntactic evidence, as well as semantic evidence. In some cases, however, morphosyntactic evidence seems to be in conflict with semantic evidence. For this reason dependency grammar theories, annotation guidelines and tree-to-dependency conversion schemes often differ in how they analyze various syntactic constructions. Most experiments for which constituent-based treebanks such as the Penn Treebank are converted into dependency treebanks rely blindly on one of four-five widely used tree-to-dependency conversion schemes. This paper evaluates the down-stream effect of choice of conversion scheme, showing that it has dramatic impact on end results.", "phrases": ["dependency analysis", "conversion scheme", "down-stream effect", "downstream task", "view"], "overall_score": 1.6158739888500717, "scores": [0.8903208247422758, 0.8391068660910895, 0.5985619232544229, 0.5211858052452473, 0.5201835497188713], "rank_score": 0.6738717938103813} -{"id": "nozza-etal-2017-multi", "title": "A Multi-View Sentiment Corpus", "abstract": "Sentiment Analysis is a broad task that involves the analysis of various aspect of the natural language text. However, most of the approaches in the state of the art usually investigate independently each aspect, i.e. Subjectivity Classification, Sentiment Polarity Classification, Emotion Recognition, Irony Detection. In this paper we present a Multi-View Sentiment Corpus (MVSC), which comprises 3000 English microblog posts related the movie domain. Three independent annotators manually labelled MVSC, following a broad annotation schema about different aspects that can be grasped from natural language text coming from social networks. The contribution is therefore a corpus that comprises five different views for each message, i.e. subjective/objective, sentiment polarity, implicit/explicit, irony, emotion. In order to allow a more detailed investigation on the human labelling behaviour, we provide the annotations of each human annotator involved.", "phrases": ["multi-view sentiment corpus", "emotion", "different view", "implicit"], "overall_score": 1.3111086158109428, "scores": [1.0333299510059184, 0.5720087415146993, 0.5515657695452815, 0.5382017152618686], "rank_score": 0.6737765443319419} -{"id": "bender-etal-2015-layers", "title": "Layers of Interpretation: On Grammar and Compositionality", "abstract": "With the recent resurgence of interest in semantic annotation of corpora for improved semantic parsing, we observe a tendency which we view as ill-advised, to conflate sentence meaning and speaker meaning into a single mapping, whether done by annotators or by a parser. We argue instead for the more traditional hypothesis that sentence meaning, but not speaker meaning, is compositional, and accordingly that NLP systems would benefit from reusable, automatically derivable, taskindependent semantic representations which target sentence meaning, in order to capture exactly the information in the linguistic signal itself. We further argue that compositional construction of such sentence meaning representations affords better consistency, more comprehensiveness, greater scalability, and less duplication of effort for each new NLP application. For concreteness, we describe one well-tested grammar-based method for producing sentence meaning representations which is efficient for annotators, and which exhibits many of the above benefits. We then report on a small inter-annotator agreement study to quantify the consistency of semantic representations produced via this grammar-based method.", "phrases": ["interpretation", "compositionality", "scalability", "duplication", "amr"], "overall_score": 1.4008767128531625, "scores": [0.9117853875604796, 0.8584993460529103, 0.5343678265574243, 0.5342956083019271, 0.5294483090774088], "rank_score": 0.67367929551003} -{"id": "hu-etal-2021-one", "title": "One-class Text Classification with Multi-modal Deep Support Vector Data Description", "abstract": "This work presents multi-modal deep SVDD (mSVDD) for one-class text classification. By extending the uni-modal SVDD to a multiple modal one, we build mSVDD with multiple hyperspheres, that enable us to build a much better description for target one-class data. Additionally, the end-to-end architecture of mSVDD can jointly handle neural feature learning and one-class text learning. We also introduce a mechanism for incorporating negative supervision in the absence of real negative data, which can be beneficial to the mSVDD model. We conduct experiments on Reuters and 20 Newsgroup datasets, and the experimental results demonstrate that mSVDD outperforms uni-modal SVDD and mSVDD can get further improvements when negative supervision is incorporated.", "phrases": ["multiple modal one", "one-class text classification", "well description"], "overall_score": 0.7400027202286726, "scores": [0.9547657022235236, 0.5405242262645985, 0.5254485827999866], "rank_score": 0.6735795037627028} -{"id": "roark-hollingshead-2009-linear", "title": "Linear Complexity Context-Free Parsing Pipelines via Chart Constraints", "abstract": "In this paper, we extend methods from Roark and Hollingshead (2008) for reducing the worst-case complexity of a context-free parsing pipeline via hard constraints derived from finite-state tagging pre-processing. Methods from our previous paper achieved quadratic worst-case complexity. We prove here that alternate methods for choosing constraints can achieve either linear or O(Nlog2N) complexity. These worst-case bounds on processing are demonstrated to be achieved without reducing the parsing accuracy, in fact in some cases improving the accuracy. The new methods achieve observed performance comparable to the previously published quadratic complexity method. Finally, we demonstrate improved performance by combining complexity bounding methods with additional high precision constraints.", "phrases": ["complexity", "chart constraint", "worst-case"], "overall_score": 1.3103860869972375, "scores": [0.8803869310210198, 0.573151467332088, 0.5666773155636846], "rank_score": 0.6734052379722643} -{"id": "chen-etal-2020-exclusive", "title": "Exclusive Hierarchical Decoding for Deep Keyphrase Generation", "abstract": "Keyphrase generation (KG) aims to summarize the main ideas of a document into a set of keyphrases. A new setting is recently introduced into this problem, in which, given a document, the model needs to predict a set of keyphrases and simultaneously determine the appropriate number of keyphrases to produce. Previous work in this setting employs a sequential decoding process to generate keyphrases. However, such a decoding method ignores the intrinsic hierarchical compositionality existing in the keyphrase set of a document. Moreover, previous work tends to generate duplicated keyphrases, which wastes time and computing resources. To overcome these limitations, we propose an exclusive hierarchical decoding framework that includes a hierarchical decoding process and either a soft or a hard exclusion mechanism. The hierarchical decoding process is to explicitly model the hierarchical compositionality of a keyphrase set. Both the soft and the hard exclusion mechanisms keep track of previously-predicted keyphrases within a window size to enhance the diversity of the generated keyphrases. Extensive experiments on multiple KG benchmark datasets demonstrate the effectiveness of our method to generate less duplicated and more accurate keyphrases.", "phrases": ["hierarchical decoding", "keyphrase set", "many work"], "overall_score": 1.400089398046239, "scores": [0.8903356360950193, 0.5792193294411327, 0.5503470658263715], "rank_score": 0.6733006771208411} -{"id": "wang-etal-2021-selective", "title": "Selective Knowledge Distillation for Neural Machine Translation", "abstract": "Neural Machine Translation (NMT) models achieve state-of-the-art performance on many translation benchmarks. As an active research field in NMT, knowledge distillation is widely applied to enhance the model's performance by transferring teacher model's knowledge on each training sample. However, previous work rarely discusses the different impacts and connections among these samples, which serve as the medium for transferring teacher knowledge. In this paper, we design a novel protocol that can effectively analyze the different impacts of samples by comparing various samples' partitions. Based on above protocol, we conduct extensive experiments and find that the teacher's knowledge is not the more, the better. Knowledge over specific samples may even hurt the whole performance of knowledge distillation. Finally, to address these issues, we propose two simple yet effective strategies, i.e., batch-level and global-level selections, to pick suitable samples for distillation. We evaluate our approaches on two large-scale machine translation tasks, WMT'14 English-German and WMT'19 Chinese-English. Experimental results show that our approaches yield up to +1.28 and +0.89 BLEU points improvements over the Transformer baseline, respectively.", "phrases": ["knowledge distillation", "neural machine translation", "sample"], "overall_score": 0.9333194870784757, "scores": [0.9037914092155054, 0.5844999381986339, 0.5314517459455825], "rank_score": 0.6732476977865739} -{"id": "xie-etal-2020-exploring", "title": "Exploring Question-Specific Rewards for Generating Deep Questions", "abstract": "Recent question generation (QG) approaches often utilize the sequence-to-sequence framework (Seq2Seq) to optimize the log likelihood of ground-truth questions using teacher forcing. However, this training objective is inconsistent with actual question quality, which is often reflected by certain global properties such as whether the question can be answered by the document. As such, we directly optimize for QG-specific objectives via reinforcement learning to improve question quality. We design three different rewards that target to improve the fluency, relevance, and answerability of generated questions. We conduct both automatic and human evaluations in addition to thorough analysis to explore the effect of each QG-specific reward. We find that optimizing on question-specific rewards generally leads to better performance in automatic evaluation metrics. However, only the rewards that correlate well with human judgement (e.g., relevance) lead to real improvement in question quality. Optimizing for the others, especially answerability, introduces incorrect bias to the model, resulting in poorer question quality. The code is publicly available at .", "phrases": ["question-specific reward", "fluency", "relevance"], "overall_score": 1.08346788397613, "scores": [0.9472467406935258, 0.5372634976622778, 0.5350791020074465], "rank_score": 0.6731964467877499} -{"id": "song-etal-2020-summarizing", "title": "Summarizing Medical Conversations via Identifying Important Utterances", "abstract": "Summarization is an important natural language processing (NLP) task in identifying key information from text. For conversations, the summarization systems need to extract salient contents from spontaneous utterances by multiple speakers. In a special task-oriented scenario, namely medical conversations between patients and doctors, the symptoms, diagnoses, and treatments could be highly important because the nature of such conversation is to find a medical solution to the problem proposed by the patients. Especially consider that current online medical platforms provide millions of public available conversations between real patients and doctors, where the patients propose their medical problems and the registered doctors offer diagnosis and treatment, a conversation in most cases could be too long and the key information is hard to be located. Therefore, summarizations to the patients' problems and the doctors' treatments in the conversations can be highly useful, in terms of helping other patients with similar problems have a precise reference for potential medical solutions. In this paper, we focus on medical conversation summarization, using a dataset of medical conversations and corresponding summaries which were crawled from a well-known online healthcare service provider in China. We propose a hierarchical encoder-tagger model (HET) to generate summaries by identifying important utterances (with respect to problem proposing and solving) in the conversations. For the particular dataset used in this study, we show that high-quality summaries can be generated by extracting two types of utterances, namely, problem statements and treatment recommendations. Experimental results demonstrate that HET outperforms strong baselines and models from previous studies, and adding conversation-related features can further improve system performance.", "phrases": ["important utterance", "doctor", "dialogue summarization dataset"], "overall_score": 1.309918912395836, "scores": [0.8922037876785103, 0.597439482999648, 0.529852202478862], "rank_score": 0.6731651577190068} -{"id": "khademi-2020-multimodal", "title": "Multimodal Neural Graph Memory Networks for Visual Question Answering", "abstract": "We introduce a new neural network architecture, Multimodal Neural Graph Memory Networks (MN-GMN), for visual question answering. The MN-GMN uses graph structure with different region features as node attributes and applies a recently proposed powerful graph neural network model, Graph Network (GN), to reason about objects and their interactions in an image. The input module of the MN-GMN generates a set of visual features plus a set of encoded region-grounded captions (RGCs) for the image. The RGCs capture object attributes and their relationships. Two GNs are constructed from the input module using the visual features and encoded RGCs. Each node of the GNs iteratively computes a question-guided contextualized representation of the visual/textual information assigned to it. Then, to combine the information from both GNs, the nodes write the updated representations to an external spatial memory. The final states of the memory cells are fed into an answer module to predict an answer. Experiments show MN-GMN rivals the state-of-the-art models on Visual7W, VQA-v2.0, and CLEVR datasets.", "phrases": ["visual question answering", "neural network architecture", "caption"], "overall_score": 1.0832870768179153, "scores": [0.9313496972022167, 0.5647985667361806, 0.5231040510134009], "rank_score": 0.6730841049839326} -{"id": "jarrar-etal-2014-building", "title": "Building a Corpus for Palestinian Arabic: a Preliminary Study", "abstract": "This paper presents preliminary results in building an annotated corpus of the Palestinian Arabic dialect. The corpus consists of about 43K words, stemming from diverse resources. The paper discusses some linguistic facts about the Palestinian dialect, compared with the Modern Standard Arabic, especially in terms of morphological, orthographic, and lexical variations, and suggests some directions to resolve the challenges these differences pose to the annotation goal. Furthermore, we present two pilot studies that investigate whether existing tools for processing Modern Standard Arabic and Egyptian Arabic can be used to speed up the annotation process of our Palestinian Arabic corpus.", "phrases": ["palestinian arabic", "dialectal arabic", "curras"], "overall_score": 1.478626010124567, "scores": [0.8708566001828851, 0.6076941447619107, 0.5403043489443721], "rank_score": 0.672951697963056} -{"id": "veale-2016-round", "title": "Round Up The Usual Suspects: Knowledge-Based Metaphor Generation", "abstract": "The elasticity of metaphor as a communication strategy has spurred philosophers to question its ability to mean anything at all. If a metaphor can elicit different responses from different people in varying contexts, how can one say it has a single meaning? Davidson has argued that metaphors have no special or secondary meaning, and must thus mean exactly what they seem to mean on the surface. It is this literally anomalous meaning that directs us to the pragmatic inferences that a speaker actually wishes us to explore. Conveniently, this laissez faire strategy assumes that metaphors are crafted from apt knowledge by speakers with real communicative intent, allowing useful inference to be extracted from their words. But this assumption is not valid in the case of many machine-generated metaphors that merely echo the linguistic form \u2013 but lack the actual substance \u2013 of real metaphors. We present here an open public resource with which a metaphor-generation system can give its figurative efforts real meaning. 1 The Dreamwork of Language Metaphor is the rubber cement of language. Not only does it help us to the plug holes on our lexica, we also use it to fill the gaps in our understanding and to hide the cracks in our arguments. For unlike the brittle plaster of literal language, metaphors are elastic and can readily expand to fit our meanings in a shifting conversational context. This elasticity comes at a price, though one which a master orator is happy to pay: our metaphors are elastic because they are indeterminate, underspecified and vague. Like dreams, our metaphors paint vivid pictures with words, albeit with fuzzy and ill-defined edges. Like dreams, metaphors can be highly suggestive, yet leave us feeling confused and uncertain. If metaphorical images are crisp at their focal points but hazy and dreamlike at their edges, just what is the meaning of any metaphor? The philosopher Donald Davidson (1978) has controversially argued that, like our dreams, our metaphors do not have well-defined meanings, at least not of a kind that an AI researcher, semanticist or computational linguist could squeeze into a symbolic structure. Rather, metaphors can move us to think and feel in certain ways, and perhaps act in certain ways, but like dreams, two analysts (a Jungian and a Freudian, say) can hold conflicting views as to how they should be interpreted and as to what they actually \u201cmean\u201d, if anything. So, for Davidson, a metaphor means just what it purports to mean on the surface, that is, what the literal or dictionary senses of its words suggest that it means. This meaning is to be distinguished from the panoply of inferences and insights that might later emerge from a metaphor, for regardless of how salient these may seem, they cannot be considered its definitive meaning. Freud once joked that when it comes to dreams, a cigar is often just a cigar. For Davidson, a figurative cigar is always a cigar, even if the metaphor spurs us to further inference far beyond the realm of tobacco. If all metaphors mean simply what they seem to mean on the surface, and most \u2013 from the very best to the truly awful \u2013 are superficially anomalous, how can we tell the good from the bad by simply looking? Indeed, how can we tell real metaphors from fake metaphors based only on the words they use and their senses in the dictionary? Empirical results seem to bear out Davidson\u2019s intuitions regarding our folk grasp of metaphors. Veale (2015)", "phrases": ["knowledge-base", "metaphor generation", "bruce wayne"], "overall_score": 1.2057506128280435, "scores": [0.9337115812012218, 0.5523711600902823, 0.5327439486785254], "rank_score": 0.6729422299900097} -{"id": "qian-etal-2017-syntax", "title": "Syntax Aware LSTM model for Semantic Role Labeling", "abstract": "In Semantic Role Labeling (SRL) task, the tree structured dependency relation is rich in syntax information, but it is not well handled by existing models. In this paper, we propose Syntax Aware Long Short Time Memory (SA-LSTM). The structure of SA-LSTM changes according to dependency structure of each sentence, so that SA-LSTM can model the whole tree structure of dependency relation in an architecture engineering way. Experiments demonstrate that on Chinese Proposition Bank (CPB) 1.0, SA-LSTM improves F1 by 2.06% than ordinary bi-LSTM with feature engineered dependency relation information, and gives state-of-the-art F1 of 79.92%. On English CoNLL 2005 dataset, SA-LSTM brings improvement (2.1%) to bi-LSTM model and also brings slight improvement (0.3%) when added to the state-of-the-art model.", "phrases": ["semantic role labeling", "srl", "tree structure"], "overall_score": 0.9324207743966583, "scores": [0.9512942062124966, 0.5402562585270959, 0.5262477761262662], "rank_score": 0.672599413621953} -{"id": "hall-2007-k", "title": "K-best Spanning Tree Parsing", "abstract": "This paper introduces a Maximum Entropy dependency parser based on an efficient kbest Maximum Spanning Tree (MST) algorithm. Although recent work suggests that the edge-factored constraints of the MST algorithm significantly inhibit parsing accuracy, we show that generating the 50-best parses according to an edge-factored model has an oracle performance well above the 1-best performance of the best dependency parsers. This motivates our parsing approach, which is based on reranking the kbest parses generated by an edge-factored model. Oracle parse accuracy results are presented for the edge-factored model and 1-best results for the reranker on eight languages (seven from CoNLL-X and English).", "phrases": ["parse", "maximum spanning tree", "edge-factored model", "oracle performance"], "overall_score": 1.397962259552001, "scores": [0.9412565213963291, 0.6423338293221154, 0.5771522677940886, 0.5283683404282696], "rank_score": 0.6722777397352007} -{"id": "hahn-2020-theoretical", "title": "Theoretical Limitations of Self-Attention in Neural Sequence Models", "abstract": "Transformers are emerging as the new workhorse of NLP, showing great success across tasks. Unlike LSTMs, transformers process input sequences entirely through self-attention. Previous work has suggested that the computational capabilities of self-attention to process hierarchical structures are limited. In this work, we mathematically investigate the computational power of self-attention to model formal languages. Across both soft and hard attention, we show strong theoretical limitations of the computational abilities of self-attention, finding that it cannot model periodic finite-state languages, nor hierarchical structure, unless the number of layers or heads increases with input length. These limitations seem surprising given the practical success of self-attention and the prominent role assigned to hierarchical structure in linguistics, suggesting that natural language can be approximated well with models that are too weak for the formal languages typically assumed in theoretical linguistics.", "phrases": ["limitation", "self-attention", "input length"], "overall_score": 1.204556410334568, "scores": [0.949667870103505, 0.5349338392006228, 0.5322254893603139], "rank_score": 0.6722757328881471} -{"id": "skachkova-etal-2018-closing", "title": "Closing Brackets with Recurrent Neural Networks", "abstract": "Many natural and formal languages contain words or symbols that require a matching counterpart for making an expression well-formed. The combination of opening and closing brackets is a typical example of such a construction. Due to their commonness, the ability to follow such rules is important for language modeling. Currently, recurrent neural networks (RNNs) are extensively used for this task. We investigate whether they are capable of learning the rules of opening and closing brackets by applying them to synthetic Dyck languages that consist of different types of brackets. We provide an analysis of the statistical properties of these languages as a baseline and show strengths and limits of Elman-RNNs, GRUs and LSTMs in experiments on random samples of these languages. In terms of perplexity and prediction accuracy, the RNNs get close to the theoretical baseline in most cases.", "phrases": ["capability", "dyck language", "string"], "overall_score": 1.4766644015016646, "scores": [0.8685814089295174, 0.607108590222345, 0.5404867950635227], "rank_score": 0.6720589314051284} -{"id": "deschacht-moens-2009-semi", "title": "Semi-supervised Semantic Role Labeling Using the Latent Words Language Model", "abstract": "Semantic Role Labeling (SRL) has proved to be a valuable tool for performing automatic analysis of natural language texts. Currently however, most systems rely on a large training set, which is manually annotated, an effort that needs to be repeated whenever different languages or a different set of semantic roles is used in a certain application. A possible solution for this problem is semi-supervised learning, where a small set of training examples is automatically expanded using unlabeled texts. We present the Latent Words Language Model, which is a language model that learns word similarities from unlabeled texts. We use these similarities for different semi-supervised SRL methods as additional features or to automatically expand a small training set. We evaluate the methods on the PropBank dataset and find that for small training sizes our best performing system achieves an error reduction of 33.27% F1-measure compared to a state-of-the-art supervised baseline.", "phrases": ["word similarity", "hidden markov model", "unlabeled data"], "overall_score": 1.6113839705641342, "scores": [0.8947275471017175, 0.5747463422273719, 0.546524042912085], "rank_score": 0.6719993107470582} -{"id": "jimeno-yepes-etal-2017-findings", "title": "Findings of the WMT 2017 Biomedical Translation Shared Task", "abstract": "Automatic translation of documents is an important task in many domains, including the biological and clinical do-mains. The second edition of the Biomedical Translation task in the Conference of Machine Translation focused on the automatic translation of biomedical-related documents between English and various European languages. This year, we addressed ten languages: Czech, German, English, French, Hungarian, Polish, Portuguese, Spanish, Romanian and Swedish. Test data included both scienti\ufb01c publications (from the Scielo and EDP Sciences databases) and health-related news (from the Cochrane and UK National Health Service web sites). Seven teams participated in the task, submitting a total of 82 runs. Herein we describe the datasets, participating systems and results of both the automatic and manual evaluation of the translations. We identify two main use cases of machine translation (MT) in the", "phrases": ["wmt", "edition", "biomedical translation task"], "overall_score": 1.2039728497078817, "scores": [0.8267872482739732, 0.6378590242575296, 0.5512038519719663], "rank_score": 0.6719500415011564} -{"id": "herbig-etal-2020-mmpe", "title": "MMPE: A Multi-Modal Interface for Post-Editing Machine Translation", "abstract": "Current advances in machine translation (MT) increase the need for translators to switch from traditional translation to post-editing (PE) of machine-translated text, a process that saves time and reduces errors. This affects the design of translation interfaces, as the task changes from mainly generating text to correcting errors within otherwise helpful translation proposals. Since this paradigm shift offers potential for modalities other than mouse and keyboard, we present MMPE, the first prototype to combine traditional input modes with pen, touch, and speech modalities for PE of MT. The results of an evaluation with professional translators suggest that pen and touch interaction are suitable for deletion and reordering tasks, while they are of limited use for longer insertions. On the other hand, speech and multi-modal combinations of select & speech are considered suitable for replacements and insertions but offer less potential for deletion and reordering. Overall, participants were enthusiastic about the new modalities and saw them as good extensions to mouse & keyboard, but not as a complete substitute.", "phrases": ["machine translation", "professional translator", "multi-modal combination"], "overall_score": 0.9315049557420552, "scores": [0.8982142841995929, 0.5706425853050173, 0.5469595008142245], "rank_score": 0.6719387901062782} -{"id": "li-etal-2018-named", "title": "Named-Entity Tagging and Domain adaptation for Better Customized Translation", "abstract": "Customized translation need pay spe-cial attention to the target domain ter-minology especially the named-entities for the domain. Adding linguistic features to neural machine translation (NMT) has been shown to benefit translation in many studies. In this paper, we further demonstrate that adding named-entity (NE) feature with named-entity recognition (NER) into the source language produces better translation with NMT. Our experiments show that by just including the different NE classes and boundary tags, we can increase the BLEU score by around 1 to 2 points using the standard test sets from WMT2017. We also show that adding NE tags using NER and applying in-domain adaptation can be combined to further improve customized machine translation.", "phrases": ["domain adaptation", "source language", "entity feature"], "overall_score": 1.2039467678210591, "scores": [0.8951729371400009, 0.5657905761942709, 0.554842941434609], "rank_score": 0.6719354849229603} -{"id": "och-etal-2003-efficient", "title": "Efficient Search for Interactive Statistical Machine Translation", "abstract": "The goal of interactive machine translation is to improve the productivity of human translators. An interactive machine translation system operates as follows: the automatic system proposes a translation. Now, the human user has two options: to accept the suggestion or to correct it. During the post-editing process, the human user is assisted by the interactive system in the following way: the system suggests an extension of the current translation prefix. Then, the user either accepts this extension (completely or partially) or ignores it. The two most important factors of such an interactive system are the quality of the proposed extensions and the response time. Here, we will use a fully fledged translation system to ensure the quality of the proposed extensions. To achieve fast response times, we will use word hypotheses graphs as an efficient search space representation. We will show results of our approach on the Verbmobil task and on the Canadian Hansards task.", "phrases": ["translator", "interactive system", "alignment template"], "overall_score": 1.4762361570044311, "scores": [0.8907685469695987, 0.5674149911654589, 0.557408548670374], "rank_score": 0.6718640289351439} -{"id": "dickinson-ledbetter-2012-annotating", "title": "Annotating Errors in a Hungarian Learner Corpus", "abstract": "We are developing and annotating a learner corpus of Hungarian, composed of student journals from three different proficiency levels written at Indiana University. Our annotation marks learner errors that are of different linguistic categories, including phonology, morphology, and syntax, but defining the annotation for an agglutinative language presents several issues. First, we must adapt an analysis that is centered on the morpheme rather than the word. Second, and more importantly, we see a need to distinguish errors from secondary corrections. We argue that although certain learner errors require a series of corrections to reach a target form, these secondary corrections, conditioned on those that come before, are our own adjustments that link the learner's productions to the target form and are not representative of the learner's internal grammar. In this paper, we report the annotation scheme and the principles that guide it, as well as examples illustrating its functionality and directions for expansion.", "phrases": ["hungarian learner corpus", "different linguistic category", "student essay"], "overall_score": 1.08102878820834, "scores": [0.9260144496329907, 0.5572553947383787, 0.5317730097640869], "rank_score": 0.6716809513784855} -{"id": "mirroshandel-nasr-2016-integrating", "title": "Integrating Selectional Constraints and Subcategorization Frames in a Dependency Parser", "abstract": "Statistical parsers are trained on treebanks that are composed of a few thousand sentences. In order to prevent data sparseness and computational complexity, such parsers make strong independence hypotheses on the decisions that are made to build a syntactic tree. These independence hypotheses yield a decomposition of the syntactic structures into small pieces, which in turn prevent the parser from adequately modeling many lexico-syntactic phenomena like selectional constraints and subcategorization frames. Additionally, treebanks are several orders of magnitude too small to observe many lexico-syntactic regularities, such as selectional constraints and subcategorization frames. In this article, we propose a solution to both problems: how to account for patterns that exceed the size of the pieces that are modeled in the parser and how to obtain subcategorization frames and selectional constraints from raw corpora and incorporate them in the parsing process. The method proposed was evaluated on French and on English. The experiments on French showed a decrease of 41.6% of selectional constraint violations and a decrease of 22% of erroneous subcategorization frame assignment. These figures are lower for English: 16.21% in the first case and 8.83% in the second.", "phrases": ["selectional constraint", "subcategorization frame", "dependency parser"], "overall_score": 0.9310853343929028, "scores": [0.8903047119736008, 0.5955280954868792, 0.5290754843991444], "rank_score": 0.6716360972865415} -{"id": "park-etal-2022-consistency", "title": "Consistency Training with Virtual Adversarial Discrete Perturbation", "abstract": "Consistency training regularizes a model by enforcing predictions of original and perturbed inputs to be similar. Previous studies have proposed various augmentation methods for the perturbation but are limited in that they are agnostic to the training model. Thus, the perturbed samples may not aid in regularization due to their ease of classification from the model. In this context, we propose an augmentation method of adding a discrete noise that would incur the highest divergence between predictions. This virtual adversarial discrete noise obtained by replacing a small portion of tokens while keeping original semantics as much as possible efficiently pushes a training model's decision boundary. Experimental results show that our proposed method outperforms other consistency training baselines with text editing, paraphrasing, or a continuous noise on semi-supervised text classification tasks and a robustness benchmark.", "phrases": ["sample", "consistency training", "training method"], "overall_score": 0.9309551304482624, "scores": [0.88485289472023, 0.5763135775271595, 0.5534600527343799], "rank_score": 0.6715421749939231} -{"id": "andrade-etal-2011-learning", "title": "Learning the Optimal Use of Dependency-parsing Information for Finding Translations with Comparable Corpora", "abstract": "Using comparable corpora to find new word translations is a promising approach for extending bilingual dictionaries (semi-) automatically. The basic idea is based on the assumption that similar words have similar contexts across languages. The context of a word is often summarized by using the bag-of-words in the sentence, or by using the words which are in a certain dependency position, e.g. the predecessors and successors. These different context positions are then combined into one context vector and compared across languages. However, previous research makes the (implicit) assumption that these different context positions should be weighted as equally important. Furthermore, only the same context positions are compared with each other, for example the successor position in Spanish is compared with the successor position in English. However, this is not necessarily always appropriate for languages like Japanese and English. To overcome these limitations, we suggest to perform a linear transformation of the context vectors, which is defined by a matrix. We define the optimal transformation matrix by using a Bayesian probabilistic model, and show that it is feasible to find an approximate solution using Markov chain Monte Carlo methods. Our experiments demonstrate that our proposed method constantly improves translation accuracy.", "phrases": ["comparable corpora", "new word translation", "context vector"], "overall_score": 0.9306672117255175, "scores": [0.8678097995708186, 0.6018190987846768, 0.5443745582560495], "rank_score": 0.6713344855371816} -{"id": "bella-etal-2022-language", "title": "Language Diversity: Visible to Humans, Exploitable by Machines", "abstract": "The Universal Knowledge Core (UKC) is a large multilingual lexical database with a focus on language diversity and covering over two thousand languages. The aim of the database, as well as its tools and data catalogue, is to make the abstract notion of linguistic diversity visually understandable for humans and formally exploitable by machines. The UKC website lets users explore millions of individual words and their meanings, but also phenomena of cross-lingual convergence and divergence, such as shared interlingual meanings, lexicon similarities, cognate clusters, or lexical gaps. The UKC LiveLanguage Catalogue, in turn, provides access to the underlying lexical data in a computer-processable form, ready to be reused in cross-lingual applications.", "phrases": ["universal knowledge core", "ukc", "language diversity"], "overall_score": 0.7375299611665648, "scores": [0.9302186428134195, 0.5524661014810903, 0.5313013601046159], "rank_score": 0.6713287014663752} -{"id": "fung-etal-2016-zara-supergirl", "title": "Zara The Supergirl: An Empathetic Personality Recognition System", "abstract": "Zara the Supergirl is an interactive system that, while having a conversation with a user, uses its built in sentiment analysis, emotion recognition, facial and speech recognition modules, to exhibit the human-like response of sharing emotions. In addition, at the end of a 5-10 minute conversation with the user, it can give a comprehensive personality analysis based on the user\u2019s interaction with Zara. This is a first prototype that has incorporated a full empathy module, the recognition and response of human emotions, into a spoken language interactive system that enhances human-robot understanding. Zara was shown at the World Economic Forum in Dalian in September 2015.", "phrases": ["supergirl", "empathy", "zara", "agent"], "overall_score": 1.3062982187907446, "scores": [0.81587320243978, 0.7837921222581855, 0.5498835551808209, 0.5356690772297], "rank_score": 0.6713044892771216} -{"id": "adrian-bejan-harabagiu-2014-unsupervised", "title": "Unsupervised Event Coreference Resolution", "abstract": "The task of event coreference resolution plays a critical role in many natural language processing applications such as information extraction, question answering, and topic detection and tracking. In this article, we describe a new class of unsupervised, nonparametric Bayesian models with the purpose of probabilistically inferring coreference clusters of event mentions from a collection of unlabeled documents. In order to infer these clusters, we automatically extract various lexical, syntactic, and semantic features for each event mention from the document collection. Extracting a rich set of features for each event mention allows us to cast event coreference resolution as the task of grouping together the mentions that share the same features (they have the same participating entities, share the same location, happen at the same time, etc.).Some of the most important challenges posed by the resolution of event coreference in an unsupervised way stem from (a) the choice of representing event mentions through a rich set of features and (b) the ability of modeling events described both within the same document and across multiple documents. Our first unsupervised model that addresses these challenges is a generalization of the hierarchical Dirichlet process. This new extension presents the hierarchical Dirichlet process's ability to capture the uncertainty regarding the number of clustering components and, additionally, takes into account any finite number of features associated with each event mention. Furthermore, to overcome some of the limitations of this extension, we devised a new hybrid model, which combines an infinite latent class model with a discrete time series model. The main advantage of this hybrid model stands in its capability to automatically infer the number of features associated with each event mention from data and, at the same time, to perform an automatic selection of the most informative features for the task of event coreference. The evaluation performed for solving both within- and cross-document event coreference shows significant improvements of these models when compared against two baselines for this task.", "phrases": ["event coreference", "nonparametric bayesian model", "rich set", "multiple document"], "overall_score": 1.3061852378490693, "scores": [1.0474344572078216, 0.554422852634898, 0.5419585105654217, 0.5411698938257595], "rank_score": 0.6712464285584753} -{"id": "lucy-gauthier-2017-distributional", "title": "Are Distributional Representations Ready for the Real World? Evaluating Word Vectors for Grounded Perceptual Meaning", "abstract": "Distributional word representation methods exploit word co-occurrences to build compact vector encodings of words. While these representations enjoy widespread use in modern natural language processing, it is unclear whether they accurately encode all necessary facets of conceptual meaning. In this paper, we evaluate how well these representations can predict perceptual and conceptual features of concrete concepts, drawing on two semantic norm datasets sourced from human participants. We find that several standard word representations fail to encode many salient perceptual features of concepts, and show that these deficits correlate with word-word similarity prediction errors. Our analyses provide motivation for grounded and embodied language learning approaches, which may help to remedy these deficits.", "phrases": ["conceptual feature", "semantic norm dataset", "text corpora"], "overall_score": 1.5455955421359904, "scores": [0.9028840997317121, 0.56171607014428, 0.549130675735784], "rank_score": 0.6712436152039255} -{"id": "hahn-powell-etal-2017-swanson", "title": "Swanson linking revisited: Accelerating literature-based discovery across domains using a conceptual influence graph", "abstract": "We introduce a modular approach for literature-based discovery consisting of a machine reading and knowledge assembly component that together produce a graph of influence relations (e.g., \u201cA promotes B\u201d) from a collection of publications. A search engine is used to explore direct and indirect influence chains. Query results are substantiated with textual evidence, ranked according to their relevance, and presented in both a table-based view, as well as a network graph visualization. Our approach operates in both domain-specific settings, where there are knowledge bases and ontologies available to guide reading, and in multi-domain settings where such resources are absent. We demonstrate that this deep reading and search system reduces the effort needed to uncover \u201cundiscovered public knowledge\u201d, and that with the aid of this tool a domain expert was able to drastically reduce her model building time from months to two days.", "phrases": ["literature-based discovery", "influence relation", "publication"], "overall_score": 0.7373846787420743, "scores": [0.8896664604175095, 0.5759660375012774, 0.5479568811952066], "rank_score": 0.6711964597046646} -{"id": "zhang-etal-2014-triple", "title": "Triple based Background Knowledge Ranking for Document Enrichment", "abstract": "Document enrichment is the task of retrieving additional knowledge from external resource over what is available through source document. This task is essential because of the phenomenon that text is generally replete with gaps and ellipses since authors assume a certain amount of background knowledge. The recovery of these gaps is intuitively useful for better understanding of document. Conventional document enrichment techniques usually rely on Wikipedia which has great coverage but less accuracy, or Ontology which has great accuracy but less coverage. In this study, we propose a document enrichment framework which automatically extracts \u201cargument1,predicate,argument2\u201d triple from any text corpus as background knowledge, so that to ensure the compatibility with any resource (e.g. news text, ontology, and on-line encyclopedia) and improve the enriching accuracy. We first incorporate source document and background knowledge together into a triple based document-level graph and then propose a global iterative ranking model to propagate relevance score and select the most relevant knowledge triple. We evaluate our model as a ranking problem and compute the MAP and P&N score to validate the ranking result. Our final result, a MAP score of 0.676 and P&20 score of 0.417 outperform a strong baseline based on search engine by 0.182 inMAP and 0.04 inP&20.", "phrases": ["background knowledge", "document enrichment", "search engine"], "overall_score": 0.737367839446218, "scores": [0.9246480248771642, 0.5511034157138922, 0.5377919551600261], "rank_score": 0.6711811319170274} -{"id": "rottmann-vogel-2007-word", "title": "Word reordering in statistical machine translation with a POS-based distortion model", "abstract": "In this paper we describe a word reordering strategy for statistical machine translation that reorders the source side based on Part of Speech (POS) information. Reordering rules are learned from the word aligned corpus. Reordering is integrated into the decoding process by constructing a lattice, which contains all word reorderings according to the reordering rules. Probabilities are assigned to the different reorderings. On this lattice monotone decoding is performed. This reordering strategy is compared with our previous reordering strategy, which looks at all permutations within a sliding window. We extend reordering rules by adding context information. Phrase translation pairs are learned from the original corpus and from a reordered source corpus to better capture the reordered word sequences at decoding time. Results are presented for English \u2192 Spanish and German \u2194 English translations, using the European Parliament Plenary Sessions corpus.", "phrases": ["statistical machine translation", "source side", "pos"], "overall_score": 1.3058542682580028, "scores": [0.9407332779010346, 0.5515478389257816, 0.520947914675938], "rank_score": 0.6710763438342514} -{"id": "lopez-resnik-2006-word", "title": "Word-Based Alignment, Phrase-Based Translation: What's the Link?", "abstract": "State-of-the-art statistical machine translation is based on alignments between phrases \u2013 sequences of words in the source and target sentences. The learning step in these systems often relies on alignments between words. It is often assumed that the quality of this word alignment is critical for translation. However, recent results suggest that the relationship between alignment quality and translation quality is weaker than previously thought. We investigate this question directly, comparing the impact of high-quality alignments with a carefully constructed set of degraded alignments. In order to tease apart various interactions, we report experiments investigating the impact of alignments on different aspects of the system. Our results confirm a weak correlation, but they also illustrate that more data and better feature engineering may be more beneficial than better alignment.", "phrases": ["translation quality", "feature engineering", "alignment performance", "aer", "large gain"], "overall_score": 1.544850534209649, "scores": [1.1811440288328179, 0.5837443319773596, 0.5346712837008033, 0.5318234964392686, 0.5232171709124576], "rank_score": 0.6709200623725413} -{"id": "toshniwal-etal-2020-learning", "title": "Learning to Ignore: Long Document Coreference with Bounded Memory Neural Networks", "abstract": "Long document coreference resolution remains a challenging task due to the large memory and runtime requirements of current models. Recent work doing incremental coreference resolution using just the global representation of entities shows practical benefits but requires keeping all entities in memory, which can be impractical for long documents. We argue that keeping all entities in memory is unnecessary, and we propose a memory-augmented neural network that tracks only a small bounded number of entities at a time, thus guaranteeing a linear runtime in length of document. We show that (a) the model remains competitive with models with high memory and computational requirements on OntoNotes and LitBank, and (b) the model learns an efficient memory management strategy easily outperforming a rule-based strategy", "phrases": ["memory", "coreference resolution", "mention"], "overall_score": 1.3051354932197472, "scores": [0.8814358650392023, 0.588202817699313, 0.5424822168621504], "rank_score": 0.6707069665335551} -{"id": "salloum-habash-2012-elissa", "title": "Elissa: A Dialectal to Standard Arabic Machine Translation System", "abstract": "Modern Standard Arabic (MSA) has a wealth of natural language processing (NLP) tools and resources. In comparison, resources for dialectal Arabic (DA), the unstandardized spoken varieties of Arabic, are still lacking. We present Elissa , a machine translation (MT) system from DA to MSA. Elissa (version 1.0) employs a rule-based approach that relies on morphological analysis, morphological transfer rules and dictionaries in addition to language models to produce MSA paraphrases of dialectal sentences. Elissa can be employed as a general preprocessor for dialectal Arabic when using MSA NLP tools. u j\u0092 \u00aeE @ e \u000fJ K. QaE@ u I@ e J K. QaE@ H A \u000fJ OA aE @ aO e J E B@ e Og. Q E E u G. n\u0083A g \u00d0A \u00a2 \u001d : A \u0082 E @ eE KA U O XP@ nO\u00f0 H@ \u00f0X @ Q n J K B A O J K. A \u000fJ K. n\u0083A g u j\u0092 \u00aeE @ e \u000fJ K. QaE@ e a EE @ e m.I'A aOI eQ J\u00bb XP@ n O\u00f0 H@ \u00f0 X @ Yg. n K @ Ye A J Jm '. u \u00d0Y \u00ae J\u0083 . e J K. QaE@ e a EE @ aO e \u000fJ \u0083A J \u00aeE @ Q \u00ab e \u000fJ ojOI @ q\u0082 E @ u e\u00f0 , e J K. QaE@ H A \u000fJ OA aE @ em.I'A a UI A \u0082 E @ YO Ja K . u j\u0092 \u00aeE @ e \u000fJ K. QaE@ u I@ e \u000fJ K. QaE@ H A \u000fJ OA aE @ aO e \u000fJ E B@ e Og. Q E AK. \u00d0n \u00aeK u G. n\u0083A g \u00d0A \u00a2 \u001d u e\u00f0 , A \u0082 E @ Nk. A aO\u00f0 e Og. Q E @ Y \u00ab@ n \u0010 aO e \u00abnOm.\u00d7\u00f0 e OE3\u20444E E u Q \u0092E@ EJ Ej JE @ eJ \u00d0Y j J \u0082 , Y \u00ab@ n \u00aeE @ u I\u00ab A \u000fJ J.O C k u j\u0092 \u00aeE @ e E Om.I'@ P A J J kB e \u000fK n a E h. X A U s u I@ e A \u0093@ , e \u000fJ OA aE @ H A OE3\u20444E E H A Og. Q K\u00f0 H A X @ QO Z A \u0082 B e \u000fJ OA \u00ab \u00d0 @ Y j J\u0083@ EJ. \u0010 e \u000fJ K. QaE@ H A \u000fJ OA aE @ em.I'A a UI A \u0082 E @ \u00d0@ Y j J\u0083@ aoOs . e JoO UI @ E O m.I'@ \u00a9 J Og. a K. e\u0010C \u00a3 E \u0092 B@ . A iD E\u00ab u j\u0092 \u00aeE @ e \u000fJ K. QaE@ e a EE e Y a O H @ \u00f0 X @", "phrases": ["dialectal", "arabic", "english machine translation"], "overall_score": 1.7200974576006758, "scores": [0.8454783078257131, 0.5936110065328869, 0.5727602988864089], "rank_score": 0.6706165377483364} -{"id": "specia-etal-2012-semeval", "title": "SemEval-2012 Task 1: English Lexical Simplification", "abstract": "We describe the English Lexical Simplification task at SemEval-2012. This is the first time such a shared task has been organized and its goal is to provide a framework for the evaluation of systems for lexical simplification and foster research on context-aware lexical simplification approaches. The task requires that annotators and systems rank a number of alternative substitutes -- all deemed adequate -- for a target word in context, according to how \"simple\" these substitutes are. The notion of simplicity is biased towards non-native speakers of English. Out of nine participating systems, the best scoring ones combine context-dependent and context-independent information, with the strongest individual contribution given by the frequency of the substitute regardless of its context.", "phrases": ["english lexical simplification", "non-native speaker", "semeval task", "difficulty"], "overall_score": 2.158491120588814, "scores": [0.9638196528585634, 0.6335713751983227, 0.5557116262401485, 0.5291892240200733], "rank_score": 0.670572969579277} -{"id": "liu-etal-2009-automated", "title": "Automated Suggestions for Miscollocations", "abstract": "One of the most common and persistent error types in second language writing is collocation errors, such as learn knowledge instead of gain or acquire knowledge, or make damage rather than cause damage. In this work-in-progress report, we propose a probabilistic model for suggesting corrections to lexical collocation errors. The probabilistic model incorporates three features: word association strength (MI), semantic similarity (via Word-Net) and the notion of shared collocations (or intercollocability). The results suggest that the combination of all three features outperforms any single feature or any combination of two features.", "phrases": ["miscollocation", "wordnet", "learner"], "overall_score": 1.6077011437518423, "scores": [0.9562335134776282, 0.5288761818450722, 0.5262806627002797], "rank_score": 0.6704634526743267} -{"id": "jiang-etal-2020-multi-domain", "title": "Multi-Domain Neural Machine Translation with Word-Level Adaptive Layer-wise Domain Mixing", "abstract": "Many multi-domain neural machine translation (NMT) models achieve knowledge transfer by enforcing one encoder to learn shared embedding across domains. However, this design lacks adaptation to individual domains. To overcome this limitation, we propose a novel multi-domain NMT model using individual modules for each domain, on which we apply word-level, adaptive and layer-wise domain mixing. We first observe that words in a sentence are often related to multiple domains. Hence, we assume each word has a domain proportion, which indicates its domain preference. Then word representations are obtained by mixing their embedding in individual domains based on their domain proportions. We show this can be achieved by carefully designing multi-head dot-product attention modules for different domains, and eventually taking weighted averages of their parameters by word-level layer-wise domain proportions. Through this, we can achieve effective domain knowledge sharing and capture fine-grained domain-specific knowledge as well. Our experiments show that our proposed model outperforms existing ones in several NMT tasks.", "phrases": ["neural machine translation", "domain mixing", "head"], "overall_score": 1.3046545489817547, "scores": [0.9115014309986764, 0.5755974682465348, 0.524280531015424], "rank_score": 0.6704598100868785} -{"id": "rimell-clark-2008-adapting", "title": "Adapting a Lexicalized-Grammar Parser to Contrasting Domains", "abstract": "Most state-of-the-art wide-coverage parsers are trained on newspaper text and suffer a loss of accuracy in other domains, making parser adaptation a pressing issue. In this paper we demonstrate that a CCG parser can be adapted to two new domains, biomedical text and questions for a QA system, by using manually-annotated training data at the pos and lexical category levels only. This approach achieves parser accuracy comparable to that on newspaper data without the need for annotated parse trees in the new domain. We find that retraining at the lexical category level yields a larger performance increase for questions than for biomedical text and analyze the two datasets to investigate why different domains might behave differently for parser adaptation.", "phrases": ["ccg parser", "biomedical text", "lexical category"], "overall_score": 1.4728905623749384, "scores": [0.8997233954796221, 0.5859552930431089, 0.5253454610804666], "rank_score": 0.6703413832010657} -{"id": "chen-etal-2016-guided", "title": "Guided Alignment Training for Topic-Aware Neural Machine Translation", "abstract": "In this paper, we propose an effective way for biasing the attention mechanism of a sequence-to-sequence neural machine translation (NMT) model towards the well-studied statistical word alignment models. We show that our novel guided alignment training approach improves translation quality on real-life e-commerce texts consisting of product titles and descriptions, overcoming the problems posed by many unknown words and a large type/token ratio. We also show that meta-data associated with input texts such as topic or category information can significantly improve translation quality when used as an additional signal to the decoder part of the network. With both novel features, the BLEU score of the NMT system on a product title set improves from 18.6 to 21.3%. Even larger MT quality gains are obtained through domain adaptation of a general domain NMT system to e-commerce data. The developed NMT system also performs well on the IWSLT speech translation task, where an ensemble of four variant systems outperforms the phrase-based baseline by 2.1% BLEU absolute.", "phrases": ["neural machine translation", "translation quality", "topic vector"], "overall_score": 2.0407243893063205, "scores": [0.8997163660732255, 0.5691697543037486, 0.5419951567854594], "rank_score": 0.6702937590541445} -{"id": "hoory-etal-2021-learning-evaluating", "title": "Learning and Evaluating a Differentially Private Pre-trained Language Model", "abstract": "Contextual language models have led to significantly better results, especially when pre-trained on the same data as the downstream task. While this additional pre-training usually improves performance, it can lead to information leakage and therefore risks the privacy of individuals mentioned in the training data. One method to guarantee the privacy of such individuals is to train a differentially-private language model, but this usually comes at the expense of model performance. Also, in the absence of a differentially private vocabulary training, it is not possible to modify the vocabulary to fit the new data, which might further degrade results. In this work we bridge these gaps, and provide guidance to future researchers and practitioners on how to improve privacy while maintaining good model performance. We introduce a novel differentially private word-piece algorithm, which allows training a tailored domain-specific vocabulary while maintaining privacy. We then experiment with entity extraction tasks from clinical notes, and demonstrate how to train a differentially private pre-trained language model (i.e., BERT) with a privacy guarantee of \u03f5=1.1 and with only a small degradation in performance. Finally, as it is hard to tell given a privacy parameter \u03f5 what was the effect on the trained representation, we present experiments showing that the trained model does not memorize private information.", "phrases": ["pre-trained language model", "batch size", "bert model"], "overall_score": 1.0786039331514483, "scores": [0.944902518767876, 0.5374699889900665, 0.5281504049032425], "rank_score": 0.670174304220395} -{"id": "ashby-weir-2020-leveraging", "title": "Leveraging HTML in Free Text Web Named Entity Recognition", "abstract": "HTML tags are typically discarded in free text Named Entity Recognition from Web pages. We investigate whether these discarded tags might be used to improve NER performance. We compare Text+Tags sentences with their Text-Only equivalents, over five datasets, two free text segmentation granularities and two NER models. We find an increased F1 performance for Text+Tags of between 0.9% and 13.2% over all datasets, variants and models. This performance increase, over datasets of varying entity types, HTML density and construction quality, indicates our method is flexible and adaptable. These findings imply that a similar technique might be of use in other Web-aware NLP tasks, including the enrichment of deep language models.", "phrases": ["entity recognition", "text-only equivalent", "language model"], "overall_score": 0.7362484499018731, "scores": [0.9071337551883837, 0.556613321564076, 0.5467395821792066], "rank_score": 0.6701622196438888} -{"id": "vulic-etal-2020-probing", "title": "Probing Pretrained Language Models for Lexical Semantics", "abstract": "The success of large pretrained language models (LMs) such as BERT and RoBERTa has sparked interest in probing their representations, in order to unveil what types of knowledge they implicitly capture. While prior research focused on morphosyntactic, semantic, and world knowledge, it remains unclear to which extent LMs also derive lexical type-level knowledge from words in context. In this work, we present a systematic empirical analysis across six typologically diverse languages and five different lexical tasks, addressing the following questions: 1) How do different lexical knowledge extraction strategies (monolingual versus multilingual source LM, out-of-context versus in-context encoding, inclusion of special tokens, and layer-wise averaging) impact performance? How consistent are the observed effects across tasks and languages? 2) Is lexical knowledge stored in few parameters, or is it scattered throughout the network? 3) How do these representations fare against traditional static word vectors in lexical tasks 4) Does the lexical information emerging from independently trained monolingual LMs display latent similarities? Our main results indicate patterns and best practices that hold universally, but also point to prominent variations across languages and tasks. Moreover, we validate the claim that lower Transformer layers carry more type-level lexical knowledge, but also show that this knowledge is distributed across multiple layers.", "phrases": ["language model", "previous study", "different layer"], "overall_score": 1.718831782281554, "scores": [0.9662207023349536, 0.523616478372057, 0.5205320812997488], "rank_score": 0.6701230873355866} -{"id": "crego-habash-2008-using", "title": "Using Shallow Syntax Information to Improve Word Alignment and Reordering for SMT", "abstract": "We describe two methods to improve SMT accuracy using shallow syntax information. First, we use chunks to refine the set of word alignments typically used as a starting point in SMT systems. Second, we extend an N-gram-based SMT system with chunk tags to better account for long-distance reorderings. Experiments are reported on an Arabic-English task showing significant improvements. A human error analysis indicates that long-distance reorderings are captured effectively.", "phrases": ["shallow syntax information", "reordering", "smt system"], "overall_score": 1.0779664480051416, "scores": [0.9156504689324869, 0.5492585863301885, 0.5444255820235203], "rank_score": 0.6697782124287319} -{"id": "wan-etal-2006-using", "title": "Using Dependency-Based Features to Take the 'Para-farce' out of Paraphrase", "abstract": "As research in text-to-text paraphrase generation progresses, it has the potential to improve the quality of generated text. However, the use of paraphrase generation methods creates a secondary problem. We must ensure that generated novel sentences are not inconsistent with the text from which it was generated. We propose a machine learning approach be used to filter out inconsistent novel sentences, or False Paraphrases. To train such a filter, we use the Microsoft Research Paraphrase corpus and investigate whether features based on syntactic dependencies can aid us in this task. Like Finch et al. (2005), we obtain a classification accuracy of 75.6%, the best known performance for this corpus. We also examine the strengths and weaknesses of dependency based features and conclude that they may be useful in more accurately classifying cases of False Paraphrase.", "phrases": ["paraphrase", "dependency relation", "predicat-argument relation"], "overall_score": 1.8973559699745903, "scores": [0.8787499976680151, 0.565703567243426, 0.5645966613502454], "rank_score": 0.6696834087538955} -{"id": "de-gispert-etal-2010-hierarchical-phrase", "title": "Hierarchical Phrase-Based Translation with Weighted Finite-State Transducers and Shallow-n Grammars", "abstract": "In this article we describe HiFST, a lattice-based decoder for hierarchical phrase-based translation and alignment. The decoder is implemented with standard Weighted Finite-State Transducer (WFST) operations as an alternative to the well-known cube pruning procedure. We find that the use of WFSTs rather than k-best lists requires less pruning in translation search, resulting in fewer search errors, better parameter optimization, and improved translation performance. The direct generation of translation lattices in the target language can improve subsequent rescoring procedures, yielding further gains when applying long-span language models and Minimum Bayes Risk decoding. We also provide insights as to how to control the size of the search space defined by hierarchical rules. We show that shallow-n grammars, low-level rule catenation, and other search constraints can help to match the power of the translation system to specific language pairs.", "phrases": ["finite-state transducer", "translation performance", "hierarchical rule"], "overall_score": 1.5419798299880665, "scores": [0.8727509349622425, 0.6018476886203771, 0.534421370527175], "rank_score": 0.6696733313699316} -{"id": "liu-etal-2009-unsupervised", "title": "Unsupervised Approaches for Automatic Keyword Extraction Using Meeting Transcripts", "abstract": "This paper explores several unsupervised approaches to automatic keyword extraction using meeting transcripts. In the TFIDF (term frequency, inverse document frequency) weighting framework, we incorporated part-of-speech (POS) information, word clustering, and sentence salience score. We also evaluated a graph-based approach that measures the importance of a word based on its connection with other sentences or words. The system performance is evaluated in different ways, including comparison to human annotated keywords using F-measure and a weighted score relative to the oracle system performance, as well as a novel alternative human evaluation. Our results have shown that the simple unsupervised TFIDF approach performs reasonably well, and the additional information from POS and sentence score helps keyword extraction. However, the graph method is less effective for this domain. Experiments were also performed using speech recognition output and we observed degradation and different patterns compared to human transcripts.", "phrases": ["automatic keyword extraction", "transcript", "part-of-speech"], "overall_score": 1.471247251573262, "scores": [0.9172654641312188, 0.5513203596839239, 0.5401946168582162], "rank_score": 0.669593480224453} -{"id": "lyu-etal-2008-acoustic", "title": "Acoustic Model Optimization for Multilingual Speech Recognition", "abstract": "Due to abundant resources not always being available for resource-limited languages, training an acoustic model with unbalanced training data for multilingual speech recognition is an interesting research issue. In this paper, we propose a three-step data-driven phone clustering method to train a multilingual acoustic model. The first step is to obtain a clustering rule of context independent phone models driven from a well-trained acoustic model using a similarity measurement. For the second step, we further clustered the sub-phone units using hierarchical agglomerative clustering with delta Bayesian information criteria according to the clustering rules. Then, we chose a parametric modeling technique- model complexity selection -- to adjust the number of Gaussian components in a Gaussian mixture for optimizing the acoustic model between the new phoneme set and the available training data. We used an unbalanced trilingual corpus where the percentages of the amounts of the training sets for Mandarin, Taiwanese, and Hakka are about 60%, 30%, and 10%, respectively. The experimental results show that the proposed sub-phone clustering approach reduced relative syllable error rate by 4.5% over the best result of the decision tree based approach and 13.5% over the best result of the knowledge-based approach.", "phrases": ["multilingual speech recognition", "phone clustering method", "taiwanese"], "overall_score": 0.7355250561437717, "scores": [0.9448663889827317, 0.5413412262660036, 0.5223036595581684], "rank_score": 0.6695037582689679} -{"id": "levy-manning-2004-deep", "title": "Deep Dependencies from Context-Free Statistical Parsers: Correcting the Surface Dependency Approximation", "abstract": "We present a linguistically-motivated algorithm for reconstructing nonlocal dependency in broad-coverage context-free parse trees derived from treebanks. We use an algorithm based on loglinear classifiers to augment and reshape context-free trees so as to reintroduce underlying nonlocal dependencies lost in the context-free approximation. We find that our algorithm compares favorably with prior work on English using an existing evaluation metric, and also introduce and argue for a new dependency-based evaluation metric. By this new evaluation metric our algorithm achieves 60% error reduction on gold-standard input trees and 5% error reduction on state-of-the-art machine-parsed input trees, when compared with the best previous work. We also present the first results on non-local dependency reconstruction for a language other than English, comparing performance on English and German. Our new evaluation metric quantitatively corroborates the intuition that in a language with freer word order, the surface dependencies in context-free parse trees are a poorer approximation to underlying dependency structure.", "phrases": ["surface dependency approximation", "long-distance dependency", "body"], "overall_score": 1.3920538339201096, "scores": [0.9070883915473449, 0.5803001243861015, 0.5209206469135649], "rank_score": 0.6694363876156704} -{"id": "li-etal-2019-multilingual", "title": "Multilingual Entity, Relation, Event and Human Value Extraction", "abstract": "This paper demonstrates a state-of-the-art end-to-end multilingual (English, Russian, and Ukrainian) knowledge extraction system that can perform entity discovery and linking, relation extraction, event extraction, and coreference. It extracts and aggregates knowledge elements across multiple languages and documents as well as provides visualizations of the results along three dimensions: temporal (as displayed in an event timeline), spatial (as displayed in an event heatmap), and relational (as displayed in entity-relation networks). For our system to further support users' analyses of causal sequences of events in complex situations, we also integrate a wide range of human moral value measures, independently derived from region-based survey, into the event heatmap. This system is publicly available as a docker container and a live demo.", "phrases": ["russian", "linking", "relation extraction"], "overall_score": 1.3023525493992048, "scores": [0.9550583767999404, 0.532637397831313, 0.5201346743205572], "rank_score": 0.6692768163172702} -{"id": "roesiger-etal-2018-bridging", "title": "Bridging resolution: Task definition, corpus resources and rule-based experiments", "abstract": "Recent work on bridging resolution has so far been based on the corpus ISNotes (Markert et al. 2012), as this was the only corpus available with unrestricted bridging annotation. Hou et al. 2014's rule-based system currently achieves state-of-the-art performance on this corpus, as learning-based approaches suffer from the lack of available training data. Recently, a number of new corpora with bridging annotations have become available. To test the generalisability of the approach by Hou et al. 2014, we apply a slightly extended rule-based system to these corpora. Besides the expected out-of-domain effects, we also observe low performance on some of the in-domain corpora. Our analysis shows that this is the result of two very different phenomena being defined as bridging, namely referential and lexical bridging. We also report that filtering out gold or predicted coreferent anaphors before applying the bridging resolution system helps improve bridging resolution.", "phrases": ["definition", "anaphor", "bridging resolution"], "overall_score": 1.3916946800846905, "scores": [0.862584862494829, 0.5769763080971994, 0.5682298427977074], "rank_score": 0.6692636711299119} -{"id": "li-etal-2010-fast", "title": "Fast-Champollion: A Fast and Robust Sentence Alignment Algorithm", "abstract": "Sentence-level aligned parallel texts are important resources for a number of natural language processing (NLP) tasks and applications such as statistical machine translation and cross-language information retrieval. With the rapid growth of online parallel texts, efficient and robust sentence alignment algorithms become increasingly important. In this paper, we propose a fast and robust sentence alignment algorithm, i.e., Fast-Champollion, which employs a combination of both length-based and lexicon-based algorithm. By optimizing the process of splitting the input bilingual texts into small fragments for alignment, Fast-Champollion, as our extensive experiments show, is 4.0 to 5.1 times as fast as the current baseline methods such as Champollion (Ma, 2006) on short texts and achieves about 39.4 times as fast on long texts, and Fast-Champollion is as robust as Champollion.", "phrases": ["lexicon-based algorithm", "fast-champollion", "speed"], "overall_score": 1.076652778442134, "scores": [0.8840202364128562, 0.5982425186015348, 0.5246231958959122], "rank_score": 0.6689619836367676} -{"id": "al-twairesh-etal-2016-arasenti", "title": "AraSenTi: Large-Scale Twitter-Specific Arabic Sentiment Lexicons", "abstract": "Sentiment Analysis (SA) is an active research area nowadays due to the tremendous interest in aggregating and evaluating opinions being disseminated by users on the Web. SA of English has been thoroughly researched; however research on SA of Arabic has just flourished. Twitter is considered a powerful tool for disseminating information and a rich resource for opinionated text containing views on many different topics. In this paper we attempt to bridge a gap in Arabic SA of Twitter which is the lack of sentiment lexi-cons that are tailored for the informal language of Twitter. We generate two lexicons extracted from a large dataset of tweets using two approaches and evaluate their use in a simple lexicon based method. The evaluation is performed on internal and external datasets. The performance of these automatically generated lexicons was very promising, albeit the simple method used for classification. The best F-score obtained was 89.58% on the internal dataset and 63.1-64.7% on the exter-nal datasets.", "phrases": ["sentiment analysis", "twitter", "several nlp task"], "overall_score": 1.076625396961352, "scores": [0.9029214901793782, 0.5577048170400365, 0.5462086044791784], "rank_score": 0.6689449705661977} -{"id": "rubino-etal-2013-dcu", "title": "DCU-Symantec at the WMT 2013 Quality Estimation Shared Task", "abstract": "We describe the two systems submitted by the DCU-Symantec team to Task 1.1. of the WMT 2013 Shared Task on Quality Estimation for Machine Translation. Task 1.1 involve estimating postediting effort for English-Spanish translation pairs in the news domain. The two systems use a wide variety of features, of which the most effective are the word-alignment, n-gram frequency, language model, POS-tag-based and pseudoreferences ones. Both systems perform at a similarly high level in the two tasks of scoring and ranking translations, although there is some evidence that the systems are over-fitting to the training data.", "phrases": ["wmt", "quality estimation", "machine translation"], "overall_score": 0.927236786434203, "scores": [0.8443532647390753, 0.6118096763998356, 0.5504169291387546], "rank_score": 0.6688599567592218} -{"id": "glavas-etal-2014-hieve", "title": "HiEve: A Corpus for Extracting Event Hierarchies from News Stories", "abstract": "In news stories, event mentions denote real-world events of different spatial and temporal granularity. Narratives in news stories typically describe some real-world event of coarse spatial and temporal granularity along with its subevents. In this work, we present HiEve, a corpus for recognizing relations of spatiotemporal containment between events. In HiEve, the narratives are represented as hierarchies of events based on relations of spatiotemporal containment (i.e., superevent\u2015subevent relations). We describe the process of manual annotation of HiEve. Furthermore, we build a supervised classifier for recognizing spatiotemporal containment between events to serve as a baseline for future research. Preliminary experimental results are encouraging, with classifier performance reaching 58% F1-score, only 11% less than the inter annotator agreement.", "phrases": ["event mention", "spatiotemporal containment", "hieve"], "overall_score": 1.603765992614197, "scores": [0.8256803630381885, 0.6033430361829993, 0.5774437023331486], "rank_score": 0.6688223671847787} -{"id": "elazar-etal-2021-amnesic", "title": "Amnesic Probing: Behavioral Explanation with Amnesic Counterfactuals", "abstract": "A growing body of work makes use of probing in order to investigate the working of neural models, often considered black boxes. Recently, an ongoing debate emerged surrounding the limitations of the probing paradigm. In this work, we point out the inability to infer behavioral conclusions from probing results, and offer an alternative method that focuses on how the information is being used, rather than on what information is encoded. Our method, Amnesic Probing, follows the intuition that the utility of a property for a given task can be assessed by measuring the influence of a causal intervention that removes it from the representation. Equipped with this new analysis tool, we can ask questions that were not possible before, for example, is part-of-speech information important for word prediction? We perform a series of analyses on BERT to answer these types of questions. Our findings demonstrate that conventional probing performance is not correlated to task importance, and we call for increased scrutiny of claims that draw behavioral or causal conclusions from probing results.1", "phrases": ["bert", "amnesic probing", "change"], "overall_score": 1.854115664752545, "scores": [0.9318459085052193, 0.5391643912180121, 0.5351823063565494], "rank_score": 0.6687308686932601} -{"id": "lin-och-2004-automatic", "title": "Automatic Evaluation of Machine Translation Quality Using Longest Common Subsequence and Skip-Bigram Statistics", "abstract": "In this paper we describe two new objective automatic evaluation methods for machine translation. The first method is based on longest common subsequence between a candidate translation and a set of reference translations. Longest common subsequence takes into account sentence level structure similarity naturally and identifies longest co-occurring in-sequence n-grams automatically. The second method relaxes strict n-gram matching to skip-bigram matching. Skip-bigram is any pair of words in their sentence order. Skip-bigram cooccurrence statistics measure the overlap of skip-bigrams between a candidate translation and a set of reference translations. The empirical results show that both methods correlate with human judgments very well in both adequacy and fluency.", "phrases": ["subsequence", "sentence order", "smoothed bleu"], "overall_score": 1.8106270584320832, "scores": [0.9162243042490793, 0.5539974342270867, 0.5356056177099955], "rank_score": 0.6686091187287205} -{"id": "peng-etal-2019-distantly", "title": "Distantly Supervised Named Entity Recognition using Positive-Unlabeled Learning", "abstract": "In this work, we explore the way to perform named entity recognition (NER) using only unlabeled data and named entity dictionaries. To this end, we formulate the task as a positive-unlabeled (PU) learning problem and accordingly propose a novel PU learning algorithm to perform the task. We prove that the proposed algorithm can unbiasedly and consistently estimate the task loss as if there is fully labeled data. A key feature of the proposed method is that it does not require the dictionaries to label every entity within a sentence, and it even does not require the dictionaries to label all of the words constituting an entity. This greatly reduces the requirement on the quality of the dictionaries and makes our method generalize well with quite simple dictionaries. Empirical studies on four public NER datasets demonstrate the effectiveness of our proposed method. We have published the source code at .", "phrases": ["positive-unlabeled learning", "ner task loss", "entity type"], "overall_score": 1.8939208559589278, "scores": [0.9314324920443313, 0.5499701096689597, 0.5240102909643904], "rank_score": 0.6684709642258938} -{"id": "lee-etal-2022-direct", "title": "Direct Speech-to-Speech Translation With Discrete Units", "abstract": "We present a direct speech-to-speech translation (S2ST) model that translates speech from one language to speech in another language without relying on intermediate text generation. We tackle the problem by first applying a self-supervised discrete speech encoder on the target speech and then training a sequence-to-sequence speech-to-unit translation (S2UT) model to predict the discrete representations of the target speech. When target text transcripts are available, we design a joint speech and text training framework that enables the model to generate dual modality output (speech and text) simultaneously in the same inference pass. Experiments on the Fisher Spanish-English dataset show that the proposed framework yields improvement of 6.7 BLEU compared with a baseline direct S2ST model that predicts spectrogram features. When trained without any text transcripts, our model performance is comparable to models that predict spectrograms and are trained with text supervision, showing the potential of our system for translation between unwritten languages.", "phrases": ["text generation", "s2ut", "discrete representation", "direct speech-to-speech translation"], "overall_score": 1.0758477294446351, "scores": [1.021860014652438, 0.5672496493699787, 0.5430061710476279, 0.5417312792123128], "rank_score": 0.6684617785705894} -{"id": "zhang-etal-2007-tree", "title": "A tree-to-tree alignment-based model for statistical machine translation", "abstract": "This paper presents a novel statistical machine translation (SMT) model that uses tree-to-tree alignment between a source parse tree and a target parse tree. The model is formally a probabilistic synchronous tree-substitution grammar (STSG) that is a collection of aligned elementary tree pairs with mapping probabilities (which are automatically learned from word-aligned bi-parsed parallel texts). Unlike previous syntax-based SMT models, this new model supports multi-level global structure distortion of the tree typology and can fully utilize the source and target parse tree structure features, which gives our system more expressive power and flexibility. The experimental results on the HIT bi-parsed text show that our method performs significantly better than Pharaoh, a state-of-the-art phrase-based SMT system, and other syntax-based methods, such as the synchronous CFG-based method on the small dataset.", "phrases": ["statistical machine translation", "stsg", "smt system"], "overall_score": 1.3899657215680448, "scores": [0.9380400286170095, 0.5383370625187385, 0.5289195623761203], "rank_score": 0.6684322178372893} -{"id": "buechel-hahn-2018-representation", "title": "Representation Mapping: A Novel Approach to Generate High-Quality Multi-Lingual Emotion Lexicons", "abstract": "In the past years, sentiment analysis has increasingly shifted attention to representational frameworks more expressive than semantic polarity (being positive, negative or neutral). However, these richer formats (like Basic Emotions or Valence-Arousal-Dominance, and variants therefrom), rooted in psychological research, tend to proliferate the number of representation schemes for emotion encoding. Thus, a large amount of representationally incompatible emotion lexicons has been developed by various research groups adopting one or the other emotion representation format. As a consequence, the reusability of these resources decreases as does the comparability of systems using them. In this paper, we propose to solve this dilemma by methods and tools which map different representation formats onto each other for the sake of mutual compatibility and interoperability of language resources. We present the first large-scale investigation of such representation mappings for four typologically diverse languages and find evidence that our approach produces (near-)gold quality emotion lexicons, even in cross-lingual settings. Finally, we use our models to create new lexicons for eight typologically diverse languages.", "phrases": ["emotion lexicon", "cross-lingual setting", "representation mapping"], "overall_score": 1.1976648701199295, "scores": [0.8056083152666274, 0.6082918871649615, 0.5913882707515675], "rank_score": 0.6684294910610521} -{"id": "litvak-last-2008-graph", "title": "Graph-Based Keyword Extraction for Single-Document Summarization", "abstract": "In this paper, we introduce and compare between two novel approaches, supervised and unsupervised, for identifying the keywords to be used in extractive summarization of text documents. Both our approaches are based on the graph-based syntactic representation of text and web documents, which enhances the traditional vector-space model by taking into account some structural document features. In the supervised approach, we train classification algorithms on a summarized collection of documents with the purpose of inducing a keyword identification model. In the unsupervised approach, we run the HITS algorithm on document graphs under the assumption that the top-ranked nodes should represent the document keywords. Our experiments on a collection of benchmark summaries show that given a set of summarized training documents, the supervised classification provides the highest keyword identification accuracy, while the highest F-measure is reached with a simple degree-based ranking. In addition, it is sufficient to perform only the first iteration of HITS rather than running it to its convergence.", "phrases": ["summarization", "hits algorithm", "top-ranked node"], "overall_score": 1.0755886925757348, "scores": [0.8993812204847856, 0.5669207010002599, 0.5386005682587618], "rank_score": 0.6683008299146026} -{"id": "li-etal-2016-fast", "title": "Fast Coupled Sequence Labeling on Heterogeneous Annotations via Context-aware Pruning", "abstract": "The recently proposed coupled sequence labeling is shown to be able to effectively exploit multiple labeled data with heterogeneous annotations but suffer from severe inefficiency problem due to the large bundled tag space (Li et al., 2015). In their case study of part-of-speech (POS) tagging, Li et al. (2015) manually design context-free tag-to-tag mapping rules with a lot of effort to reduce the tag space. This paper proposes a context-aware pruning approach that performs token-wise constraints on the tag space based on contextual evidences, making the coupled approach efficient enough to be applied to the more complex task of joint word segmentation (WS) and POS tagging for the first time. Experiments show that using the large-scale People Daily as auxiliary heterogeneous data, the coupled approach can improve F-score by 95 . 55 \u2212 94 . 88 = 0 . 67 % on WS, and by 90 . 58 \u2212 89 . 49 = 1 . 09 % on joint WS&POS on Penn Chinese Treebank. All codes are released at http://hlt.suda.edu.cn/~zhli .", "phrases": ["coupled sequence labeling", "joint word segmentation", "heterogeneous data"], "overall_score": 0.9264361562693342, "scores": [0.8950641759514447, 0.5663174224151517, 0.5434656741584055], "rank_score": 0.6682824241750006} -{"id": "vasserman-2004-identifying", "title": "Identifying Chemical Names in Biomedical Text: an Investigation of Substring Co-occurrence Based Approaches", "abstract": "We investigate various strategies for finding chemicals in biomedical text using substring co-occurrence information. The goal is to build a system from readily available data with minimal human involvement. Our models are trained from a dictionary of chemical names and general biomedical text. We investigated several strategies including Naive Bayes classifiers and several types of N-gram models. We introduced a new way of interpolating N-grams that does not require tuning any parameters. We also found the task to be similar to Language Identification.", "phrases": ["chemical", "biomedical text", "n-gram"], "overall_score": 0.7341698408092122, "scores": [0.8938276449000134, 0.5692397002457146, 0.5417432191870489], "rank_score": 0.6682701881109256} -{"id": "zhao-etal-2015-improving", "title": "Improving Chinese Grammatical Error Correction with Corpus Augmentation and Hierarchical Phrase-based Statistical Machine Translation", "abstract": "In this study, we describe our system submitted to the 2nd Workshop on Natural Language Processing Techniques for Educational Applications (NLP-TEA-2) shared task on Chinese grammatical error diagnosis (CGED). We use a statistical machine translation method already applied to several similar tasks (Brockett et al., 2006; Chiu et al., 2013; Zhao et al., 2014). In this research, we examine corpus-augmentation and explore alternative translation models including syntaxbased and hierarchical phrase-based models. Finally, we show variations using different combinations of these factors.", "phrases": ["grammatical error correction", "corpus-augmentation", "hierarchical phrase-based model"], "overall_score": 1.3001959431726542, "scores": [0.8877528832207925, 0.579702753816174, 0.5370499828199384], "rank_score": 0.6681685399523016} -{"id": "yaghoobzadeh-etal-2021-increasing", "title": "Increasing Robustness to Spurious Correlations using Forgettable Examples", "abstract": "Neural NLP models tend to rely on spurious correlations between labels and input features to perform their tasks. Minority examples, i.e., examples that contradict the spurious correlations present in the majority of data points, have been shown to increase the out-of-distribution generalization of pre-trained language models. In this paper, we first propose using example forgetting to find minority examples without prior knowledge of the spurious correlations present in the dataset. Forgettable examples are instances either learned and then forgotten during training or never learned. We show empirically how these examples are related to minorities in our training sets. Then, we introduce a new approach to robustify models by fine-tuning our models twice, first on the full training data and second on the minorities only. We obtain substantial improvements in out-of-distribution generalization when applying our approach to the MNLI, QQP and FEVER datasets.", "phrases": ["spurious correlation", "forgettable example", "minority", "prior knowledge"], "overall_score": 1.1968843659554589, "scores": [0.9341188624155757, 0.6320806135775542, 0.5563932789846836, 0.5493827785933604], "rank_score": 0.6679938833927935} -{"id": "lin-ji-2019-attentive", "title": "An Attentive Fine-Grained Entity Typing Model with Latent Type Representation", "abstract": "We propose a fine-grained entity typing model with a novel attention mechanism and a hybrid type classifier. We advance existing methods in two aspects: feature extraction and type prediction. To capture richer contextual information, we adopt contextualized word representations instead of fixed word embeddings used in previous work. In addition, we propose a two-step mention-aware attention mechanism to enable the model to focus on important words in mentions and contexts. We also present a hybrid classification method beyond binary relevance to exploit type inter-dependency with latent type representation. Instead of independently predicting each type, we predict a low-dimensional vector that encodes latent type features and reconstruct the type vector from this latent representation. Experiment results on multiple data sets show that our model significantly advances the state-of-the-art on fine-grained entity typing, obtaining up to 6.1% and 5.5% absolute gains in macro averaged F-score and micro averaged F-score respectively.", "phrases": ["fine-grained entity", "latent type representation", "mention"], "overall_score": 1.0749155336174576, "scores": [0.8411879733050603, 0.6106688923776461, 0.5517908525292338], "rank_score": 0.6678825727373133} -{"id": "chu-etal-2013-accurate", "title": "Accurate Parallel Fragment Extraction from Quasi\u2013Comparable Corpora using Alignment Model and Translation Lexicon", "abstract": "Although parallel sentences rarely exist in quasi\u2010comparable corpora, there could be parallel fragments that are also helpful for statistical machine translation (SMT). Previous studies cannot accurately extract parallel fragments from quasi\u2010comparable corpora. To solve this problem, we propose an accurate parallel fragment extraction system that uses an alignment model to locate the parallel fragment candidates, and uses an accurate lexicon filter to identify the truly parallel ones. Experimental results indicate that our system can accurately extract parallel fragments, and our proposed method significantly outperforms a state\u2010of\u2010the\u2010art approach. Furthermore, we investigate the factors that may affect the performance of our system in detail.", "phrases": ["alignment model", "parallel sentence", "quasi-comparable corpora"], "overall_score": 0.9258575696496469, "scores": [0.8445057782918028, 0.5916551229897002, 0.5674342851730775], "rank_score": 0.6678650621515269} -{"id": "kim-etal-2019-image", "title": "Image Captioning with Very Scarce Supervised Data: Adversarial Semi-Supervised Learning Approach", "abstract": "Constructing an organized dataset comprised of a large number of images and several captions for each image is a laborious task, which requires vast human effort. On the other hand, collecting a large number of images and sentences separately may be immensely easier. In this paper, we develop a novel data-efficient semi-supervised framework for training an image captioning model. We leverage massive unpaired image and caption data by learning to associate them. To this end, our proposed semi-supervised learning method assigns pseudo-labels to unpaired samples via Generative Adversarial Networks to learn the joint distribution of image and caption. To evaluate, we construct scarcely-paired COCO dataset, a modified version of MS COCO caption dataset. The empirical results show the effectiveness of our method compared to several strong baselines, especially when the amount of the paired samples are scarce.", "phrases": ["caption", "generative adversarial networks", "image"], "overall_score": 0.9258162945659931, "scores": [0.8679861739570408, 0.5750771084479155, 0.5604425830118751], "rank_score": 0.667835288472277} -{"id": "schwitter-etal-2003-ecole", "title": "ECOLE: a look-ahead editor of controlled language", "abstract": "This paper presents ECOLE, a look-ahead text editor that supports authors writing seemingly informal specifications in PENG, a computer-processable controlled natural language. ECOLE communicates via a socket interface with the controlled language processor of the PENG system. After each word form entered the lookahead editor displays appropriate lookahead categories. These syntactic hints tell the author what kind of word or syntactic structure can follow the current input string and reduce thereby the cognitive burden to learn and remember the controlled language. While the author types the text word by word and adds unknown content words on the fly to the lexicon, a discourse representation structure and a paraphrase is built up dynamically for the text in a completely compositional manner. The arising specification can be checked automatically for consistency and informativity with the help of third-party reasoning services.", "phrases": ["look-ahead editor", "syntactic structure", "input string", "ecole", "writing process"], "overall_score": 0.9256850971517173, "scores": [0.8996245852639652, 0.8146997590749301, 0.5558917587309766, 0.5377652658047336, 0.5307218788393968], "rank_score": 0.6677406495428004} -{"id": "kim-schubert-2016-high", "title": "High-Fidelity Lexical Axiom Construction from Verb Glosses", "abstract": "This paper presents a rule-based approach to constructing lexical axioms from WordNet verb entries in an expressive semantic representation, Episodic Logic (EL). EL differs from other representations in being syntactically close to natural language and covering phenomena such as generalized quantification, modification, and intensionality while still allowing highly effective inference. The presented approach uses a novel preprocessing technique to improve parsing precision of coordinators and incorporates frames, hand-tagged word senses, and examples from WordNet to achieve highly consistent semantic interpretations. EL allows the full content of glosses to be incorporated into the formal lexical axioms, without sacrificing interpretive accuracy, or verb-to-verb inference accuracy on a standard test set. Evaluation of semantic parser performance is based on EL-match, introduced here as a generalization of the smatch metric for semantic structure accuracy. On gloss parses, the approach achieves an ELmatch F1 score of 0.83, and a wholeaxiom F1 score of 0.45; verb entailment identification based on extracted axioms is competitive with the state-of-the-art.", "phrases": ["axiom", "wordnet", "great expressivity"], "overall_score": 0.9256734155762301, "scores": [0.8922165591749204, 0.5834845858833605, 0.5274955241435827], "rank_score": 0.6677322230672879} -{"id": "liu-hwa-2018-heuristically", "title": "Heuristically Informed Unsupervised Idiom Usage Recognition", "abstract": "Many idiomatic expressions can be interpreted figuratively or literally depending on their contexts. This paper proposes an unsupervised learning method for recognizing the intended usages of idioms. We treat the usages as a latent variable in probabilistic models and train them in a linguistically motivated feature space. Crucially, we show that distributional semantics is a helpful heuristic for distinguishing the literal usage of idioms, giving us a way to formulate a literal usage metric to estimate the likelihood that the idiom is intended literally. This information then serves as a form of distant supervision to guide the unsupervised training process for the probabilistic models. Experiments show that our overall model performs competitively against supervised methods.", "phrases": ["idiom", "latent variable", "feature space", "literal usage metric"], "overall_score": 1.1960811242495448, "scores": [0.9726711766400964, 0.5917520061757076, 0.5620665022055654, 0.5436926576227655], "rank_score": 0.6675455856610337} -{"id": "yang-etal-2019-simple", "title": "Simple and Effective Text Matching with Richer Alignment Features", "abstract": "In this paper, we present a fast and strong neural approach for general purpose text matching applications. We explore what is sufficient to build a fast and well-performed text matching model and propose to keep three key features available for inter-sequence alignment: original point-wise features, previous aligned features, and contextual features while simplifying all the remaining components. We conduct experiments on four well-studied benchmark datasets across tasks of natural language inference, paraphrase identification and answer selection. The performance of our model is on par with the state-of-the-art on all datasets with much fewer parameters and the inference speed is at least 6 times faster compared with similarly performed ones.", "phrases": ["text matching", "original point-wise feature", "contextual feature", "connection"], "overall_score": 1.3879080076369263, "scores": [0.9882415904226876, 0.5829453452250375, 0.5514544261822582, 0.547129304607184], "rank_score": 0.6674426666092917} -{"id": "aker-gaizauskas-2009-summary", "title": "Summary Generation for Toponym-referenced Images using Object Type Language Models", "abstract": "This paper presents a novel approach to automatic captioning of toponym-referenced images. The automatic captioning procedure works by summarizing multiple web-documents that contain information related to an image\u2019s location. Our summarizer can generate both query-based and language model-biased multidocument summaries. The models are created from large numbers of existing articles pertaining to places of the same \u201cobject type\u201d. Evaluation relative to human written captions shows that when language models are used to bias the summarizer the summaries score more highly than the non-biased ones.", "phrases": ["image", "language model", "captioning", "web-document"], "overall_score": 1.0740462518103346, "scores": [1.068559404214419, 0.5392975901057254, 0.5311886533534739, 0.5303241826566648], "rank_score": 0.6673424575825707} -{"id": "kazama-etal-2010-bayesian", "title": "A Bayesian Method for Robust Estimation of Distributional Similarities", "abstract": "Existing word similarity measures are not robust to data sparseness since they rely only on the point estimation of words' context profiles obtained from a limited amount of data. This paper proposes a Bayesian method for robust distributional word similarities. The method uses a distribution of context profiles obtained by Bayesian estimation and takes the expectation of a base similarity measure under that distribution. When the context profiles are multinomial distributions, the priors are Dirichlet, and the base measure is the Bhattacharyya coefficient, we can derive an analytical form that allows efficient calculation. For the task of word similarity estimation using a large amount of Web data in Japanese, we show that the proposed measure gives better accuracies than other well-known similarity measures.", "phrases": ["bayesian method", "similarity measure", "data sparseness", "expectation"], "overall_score": 1.0738560884496484, "scores": [0.9115271494563691, 0.5924409303235422, 0.5831819137210195, 0.5817472162722817], "rank_score": 0.6672243024433031} -{"id": "pavlick-etal-2014-language", "title": "The Language Demographics of Amazon Mechanical Turk", "abstract": "We present a large scale study of the languages spoken by bilingual workers on Mechanical Turk (MTurk). We establish a methodology for determining the language skills of anonymous crowd workers that is more robust than simple surveying. We validate workers' self-reported language skill claims by measuring their ability to correctly translate words, and by geolocating workers to see if they reside in countries where the languages are likely to be spoken. Rather than posting a one-off survey, we posted paid tasks consisting of 1,000 assignments to translate a total of 10,000 words in each of 100 languages. Our study ran for several months, and was highly visible on the MTurk crowdsourcing platform, increasing the chances that bilingual workers would complete it. Our study was useful both to create bilingual dictionaries and to act as census of the bilingual speakers on MTurk. We use this data to recommend languages with the largest speaker populations as good candidates for other researchers who want to develop crowdsourced, multilingual technologies. To further demonstrate the value of creating data via crowdsourcing, we hire workers to create bilingual parallel corpora in six Indian languages, and use them to train statistical machine translation systems.", "phrases": ["mechanical turk", "mturk", "world"], "overall_score": 1.466021968213218, "scores": [0.8968599813017943, 0.5714663609322241, 0.5333197116125118], "rank_score": 0.6672153512821768} -{"id": "pfeiffer-etal-2021-unks", "title": "UNKs Everywhere: Adapting Multilingual Language Models to New Scripts", "abstract": "Massively multilingual language models such as multilingual BERT offer state-of-the-art cross-lingual transfer performance on a range of NLP tasks. However, due to limited capacity and large differences in pretraining data sizes, there is a profound performance gap between resource-rich and resource-poor target languages. The ultimate challenge is dealing with under-resourced languages not covered at all by the models and written in scripts unseen during pretraining. In this work, we propose a series of novel data-efficient methods that enable quick and effective adaptation of pretrained multilingual models to such low-resource languages and unseen scripts. Relying on matrix factorization, our methods capitalize on the existing latent knowledge about multiple languages already available in the pretrained model's embedding matrix. Furthermore, we show that learning of the new dedicated embedding matrix in the target language can be improved by leveraging a small number of vocabulary items (i.e., the so-called lexically overlapping tokens) shared between mBERT's and target language vocabulary. Our adaptation techniques offer substantial performance gains for languages with unseen scripts. We also demonstrate that they can yield improvements for low-resource languages written in scripts covered by the pretrained model.", "phrases": ["new script", "multilingual model", "tokenizer"], "overall_score": 1.6577666292077815, "scores": [0.8232811717824822, 0.6208090708527335, 0.5573128636133194], "rank_score": 0.6671343687495117} -{"id": "li-etal-2019-hint", "title": "Hint-Based Training for Non-Autoregressive Machine Translation", "abstract": "Due to the unparallelizable nature of the autoregressive factorization, AutoRegressive Translation (ART) models have to generate tokens sequentially during decoding and thus suffer from high inference latency. Non-AutoRegressive Translation (NART) models were proposed to reduce the inference time, but could only achieve inferior translation accuracy. In this paper, we proposed a novel approach to leveraging the hints from hidden states and word alignments to help the training of NART models. The results achieve significant improvement over previous NART models for the WMT14 En-De and De-En datasets and are even comparable to a strong LSTM-based ART baseline but one order of magnitude faster in inference.", "phrases": ["hidden state", "non-autoregressive model", "teacher"], "overall_score": 1.760394302459567, "scores": [0.9219685509218613, 0.5536985217107698, 0.5254955577178929], "rank_score": 0.6670542101168414} -{"id": "altantawy-etal-2011-fast", "title": "Fast Yet Rich Morphological Analysis", "abstract": "Implementations of models of morphologically rich languages such as Arabic typically achieve speed and small memory footprint at the cost of abandoning linguistically abstract and elegant representations. We present a solution to modeling rich morphologies that is both fast and based on linguistically rich representations. In our approach, we convert a linguistically complex and abstract implementation of Arabic verbs in finite-state machinery into a simple precompiled tabular representation.", "phrases": ["morphology", "rich representation", "abstract implementation", "finite-state machinery", "end"], "overall_score": 1.0734473936931637, "scores": [0.9642782980457818, 0.6414356457554632, 0.5879371222753416, 0.5870598853079668, 0.5541408791830856], "rank_score": 0.6669703661135278} -{"id": "stevenson-greenwood-2005-semantic", "title": "A Semantic Approach to IE Pattern Induction", "abstract": "This paper presents a novel algorithm for the acquisition of Information Extraction patterns. The approach makes the assumption that useful patterns will have similar meanings to those already identified as relevant. Patterns are compared using a variation of the standard vector space model in which information from an ontology is used to capture semantic similarity. Evaluation shows this algorithm performs well when compared with a previously reported document-centric approach.", "phrases": ["variation", "candidate pattern", "sentence filtering", "supervised training", "event extraction system"], "overall_score": 1.9273995596247249, "scores": [1.1768577844765211, 0.5580133559027669, 0.5366650115929805, 0.5345132959382054, 0.5281229718817035], "rank_score": 0.6668344839584355} -{"id": "yang-etal-2019-exploiting", "title": "Exploiting Noisy Data in Distant Supervision Relation Classification", "abstract": "Distant supervision has obtained great progress on relation classification task. However, it still suffers from noisy labeling problem. Different from previous works that underutilize noisy data which inherently characterize the property of classification, in this paper, we propose RCEND, a novel framework to enhance Relation Classification by Exploiting Noisy Data. First, an instance discriminator with reinforcement learning is designed to split the noisy data into correctly labeled data and incorrectly labeled data. Second, we learn a robust relation classifier in semi-supervised learning way, whereby the correctly and incorrectly labeled data are treated as labeled and unlabeled data respectively. The experimental results show that our method outperforms the state-of-the-art models.", "phrases": ["noisy data", "distant supervision", "labeling problem"], "overall_score": 0.7325801310932752, "scores": [0.8233546707965149, 0.6173703168139498, 0.5597445282951247], "rank_score": 0.6668231719685299} -{"id": "tack-etal-2017-human", "title": "Human and Automated CEFR-based Grading of Short Answers", "abstract": "This paper is concerned with the task of automatically assessing the written proficiency level of non-native (L2) learners of English. Drawing on previous research on automated L2 writing assessment following the Common European Framework of Reference for Languages (CEFR), we investigate the possibilities and difficulties of deriving the CEFR level from short answers to open-ended questions, which has not yet been subjected to numerous studies up to date. The object of our study is twofold: to examine the intricacy involved with both human and automated CEFR-based grading of short answers. On the one hand, we describe the compilation of a learner corpus of short answers graded with CEFR levels by three certified Cambridge examiners. We mainly observe that, although the shortness of the answers is reported as undermining a clear-cut evaluation, the length of the answer does not necessarily correlate with inter-examiner disagreement. On the other hand, we explore the development of a soft-voting system for the automated CEFR-based grading of short answers and draw tentative conclusions about its use in a computer-assisted testing (CAT) setting.", "phrases": ["grading", "short answer", "cefr"], "overall_score": 0.9243644148677869, "scores": [0.8775308188290031, 0.5847484053545605, 0.5380847117724132], "rank_score": 0.6667879786519922} -{"id": "zhang-etal-2010-cross", "title": "Cross-Lingual Latent Topic Extraction", "abstract": "Probabilistic latent topic models have recently enjoyed much success in extracting and analyzing latent topics in text in an unsupervised way. One common deficiency of existing topic models, though, is that they would not work well for extracting cross-lingual latent topics simply because words in different languages generally do not co-occur with each other. In this paper, we propose a way to incorporate a bilingual dictionary into a probabilistic topic model so that we can apply topic models to extract shared latent topics in text data of different languages. Specifically, we propose a new topic model called Probabilistic Cross-Lingual Latent Semantic Analysis (PCLSA) which extends the Probabilistic Latent Semantic Analysis (PLSA) model by regularizing its likelihood function with soft constraints defined based on a bilingual dictionary. Both qualitative and quantitative experimental results show that the PCLSA model can effectively extract cross-lingual latent topics from multilingual text data.", "phrases": ["topic model", "different language", "text data", "pclsa model"], "overall_score": 1.1946890634683056, "scores": [0.9701451932447956, 0.5846128928385247, 0.5776625054687966, 0.5346540554327585], "rank_score": 0.6667686617462188} -{"id": "kong-etal-2021-multilingual", "title": "Multilingual Neural Machine Translation with Deep Encoder and Multiple Shallow Decoders", "abstract": "Recent work in multilingual translation advances translation quality surpassing bilingual baselines using deep transformer models with increased capacity. However, the extra latency and memory costs introduced by this approach may make it unacceptable for efficiency-constrained applications. It has recently been shown for bilingual translation that using a deep encoder and shallow decoder (DESD) can reduce inference latency while maintaining translation quality, so we study similar speed-accuracy trade-offs for multilingual translation. We find that for many-to-one translation we can indeed increase decoder speed without sacrificing quality using this approach, but for one-to-many translation, shallow decoders cause a clear quality drop. To ameliorate this drop, we propose a deep encoder with multiple shallow decoders (DEMSD) where each shallow decoder is responsible for a disjoint subset of target languages. Specifically, the DEMSD model with 2-layer decoders is able to obtain a 1.8x speedup on average compared to a standard transformer model with no drop in translation quality.", "phrases": ["multiple shallow decoder", "translation quality", "one-to-many translation"], "overall_score": 0.9242936713609152, "scores": [0.9373230104109724, 0.5429788619704634, 0.5199089716147328], "rank_score": 0.6667369479987229} -{"id": "heinz-rogers-2010-estimating", "title": "Estimating Strictly Piecewise Distributions", "abstract": "Strictly Piecewise (SP) languages are a subclass of regular languages which encode certain kinds of long-distance dependencies that are found in natural languages. Like the classes in the Chomsky and Subregular hierarchies, there are many independently converging characterizations of the SP class (Rogers et al., to appear). Here we define SP distributions and show that they can be efficiently estimated from positive data.", "phrases": ["subclass", "characterization", "k-piecewise language"], "overall_score": 1.1945900355415142, "scores": [0.849670813498656, 0.5867495699092031, 0.5637197962159941], "rank_score": 0.6667133932079512} -{"id": "paranjape-etal-2020-information", "title": "An Information Bottleneck Approach for Controlling Conciseness in Rationale Extraction", "abstract": "Decisions of complex models for language understanding can be explained by limiting the inputs they are provided to a relevant subsequence of the original text \u2014 a rationale. Models that condition predictions on a concise rationale, while being more interpretable, tend to be less accurate than models that are able to use the entire context. In this paper, we show that it is possible to better manage the trade-off between concise explanations and high task accuracy by optimizing a bound on the Information Bottleneck (IB) objective. Our approach jointly learns an explainer that predicts sparse binary masks over input sentences without explicit supervision, and an end-task predictor that considers only the residual sentences. Using IB, we derive a learning objective that allows direct control of mask sparsity levels through a tunable sparse prior. Experiments on the ERASER benchmark demonstrate significant gains over previous work for both task performance and agreement with human rationales. Furthermore, we find that in the semi-supervised setting, a modest amount of gold rationales (25% of training examples with gold masks) can close the performance gap with a model that uses the full input.", "phrases": ["information bottleneck", "rationale extraction", "explainer"], "overall_score": 1.9628638799658062, "scores": [0.9160839024098442, 0.5580481016423968, 0.5257707555439035], "rank_score": 0.6666342531987148} -{"id": "putra-etal-2021-parsing", "title": "Parsing Argumentative Structure in English-as-Foreign-Language Essays", "abstract": "This paper presents a study on parsing the argumentative structure in English-as-foreign-language (EFL) essays, which are inherently noisy. The parsing process consists of two steps, linking related sentences and then labelling their relations. We experiment with several deep learning architectures to address each task independently. In the sentence linking task, a biaffine model performed the best. In the relation labelling task, a fine-tuned BERT model performed the best. Two sentence encoders are employed, and we observed that non-fine-tuning models generally performed better when using Sentence-BERT as opposed to BERT encoder. We trained our models using two types of parallel texts: original noisy EFL essays and those improved by annotators, then evaluate them on the original essays. The experiment shows that an end-to-end in-domain system achieved an accuracy of .341. On the other hand, the cross-domain system achieved 94% performance of the in-domain system. This signals that well-written texts can also be useful to train argument mining system for noisy texts.", "phrases": ["argumentative structure", "essay", "language learner"], "overall_score": 1.0727382563671561, "scores": [0.9094937800217724, 0.5464517420270049, 0.54364374090966], "rank_score": 0.6665297543194791} -{"id": "britz-etal-2017-massive", "title": "Massive Exploration of Neural Machine Translation Architectures", "abstract": "Neural Machine Translation (NMT) has shown remarkable progress over the past few years, with production systems now being deployed to end-users. As the field is moving rapidly, it has become unclear which elements of NMT architectures have a significant impact on translation quality. In this work, we present a large-scale analysis of the sensitivity of NMT architectures to common hyperparameters. We report empirical results and variance numbers for several hundred experimental runs, corresponding to over 250,000 GPU hours on a WMT English to German translation task. Our experiments provide practical insights into the relative importance of factors such as embedding size, network depth, RNN cell type, residual connections, attention mechanism, and decoding heuristics. As part of this contribution, we also release an open-source NMT framework in TensorFlow to make it easy for others to reproduce our results and perform their own experiments.", "phrases": ["exploration", "translation quality", "hyperparameter", "gpu hour", "sequence-to-sequence model"], "overall_score": 1.962341187157693, "scores": [1.1533495028684564, 0.5581343256122038, 0.5512879275726166, 0.5358285110099995, 0.5336834057218603], "rank_score": 0.6664567345570274} -{"id": "agarwal-etal-2013-sinnet", "title": "SINNET: Social Interaction Network Extractor from Text", "abstract": "In this paper we present a demo of our system: Social Interaction Network Extractor from Text (SINNET). SINNET is able to extract a social network from unstructured text. Nodes in the network are people and links are social events.", "phrases": ["unstructured text", "sinnet", "social network extraction"], "overall_score": 0.9238605618758554, "scores": [0.7891031387022118, 0.6402200406459138, 0.5699503972886562], "rank_score": 0.666424525545594} -{"id": "johannsen-etal-2016-joint", "title": "Joint part-of-speech and dependency projection from multiple sources", "abstract": "Most previous work on annotation projection has been limited to a subset of IndoEuropean languages, using only a single source language, and projecting annotation for one task at a time. In contrast, we present an Integer Linear Programming (ILP) algorithm that simultaneously projects annotation for multiple tasks from multiple source languages, relying on parallel corpora available for hundreds of languages. When training POS taggers and dependency parsers on jointly projected POS tags and syntactic dependencies using our algorithm, we obtain better performance than a standard approach on 20/23 languages using one parallel corpus; and 18/27 languages using another.", "phrases": ["multiple source", "dependency parser", "pos tag"], "overall_score": 1.38535229225479, "scores": [0.853716580532675, 0.5932390494652434, 0.5516852519222253], "rank_score": 0.6662136273067145} -{"id": "de-gispert-etal-2009-minimum", "title": "Minimum Bayes Risk Combination of Translation Hypotheses from Alternative Morphological Decompositions", "abstract": "We describe a simple strategy to achieve translation performance improvements by combining output from identical statistical machine translation systems trained on alternative morphological decompositions of the source language. Combination is done by means of Minimum Bayes Risk decoding over a shared N-best list. When translating into English from two highly inflected languages such as Arabic and Finnish we obtain significant improvements over simply selecting the best morphological decomposition.", "phrases": ["morphological decomposition", "source language", "minimum bayes risk"], "overall_score": 1.1932158012797238, "scores": [0.8695430833855999, 0.5723157930449365, 0.5559803789586891], "rank_score": 0.6659464184630752} -{"id": "gangula-etal-2019-detecting", "title": "Detecting Political Bias in News Articles Using Headline Attention", "abstract": "Language is a powerful tool which can be used to state the facts as well as express our views and perceptions. Most of the times, we find a subtle bias towards or against someone or something. When it comes to politics, media houses and journalists are known to create bias by shrewd means such as misinterpreting reality and distorting viewpoints towards some parties. This misinterpretation on a large scale can lead to the production of biased news and conspiracy theories. Automating bias detection in newspaper articles could be a good challenge for research in NLP. We proposed a headline attention network for this bias detection. Our model has two distinctive characteristics: (i) it has a structure that mirrors a person's way of reading a news article (ii) it has attention mechanism applied on the article based on its headline, enabling it to attend to more critical content to predict bias. As the required datasets were not available, we created a dataset comprising of 1329 news articles collected from various Telugu newspapers and marked them for bias towards a particular political party. The experiments conducted on it demonstrated that our model outperforms various baseline methods by a substantial margin.", "phrases": ["headline attention", "telugu newspaper", "political party"], "overall_score": 1.2958695065331811, "scores": [0.8705017474389121, 0.581348859052886, 0.5459849675129278], "rank_score": 0.6659451913349086} -{"id": "jain-2016-question", "title": "Question Answering over Knowledge Base using Factual Memory Networks", "abstract": "In the task of question answering, Memory Networks have recently shown to be quite effective towards complex reasoning as well as scalability, in spite of limited range of topics covered in training data. In this paper, we introduce Factual Memory Network, which learns to answer questions by extracting and reasoning over relevant facts from a Knowledge Base. Our system generate distributed representation of questions and KB in same word vector space, extract a subset of initial candidate facts, then try to find a path to answer entity using multi-hop reasoning and refinement. Additionally, we also improve the run-time efficiency of our model using various computational heuristics.", "phrases": ["knowledge base", "factual memory network", "reasoning"], "overall_score": 1.1929911292484474, "scores": [0.8855195915102514, 0.5715703089475735, 0.5403731793869683], "rank_score": 0.665821026614931} -{"id": "kontonatsios-etal-2014-using", "title": "Using a Random Forest Classifier to Compile Bilingual Dictionaries of Technical Terms from Comparable Corpora", "abstract": "We describe a machine learning approach, a Random Forest (RF) classifier, that is used to automatically compile bilingual dictionaries of technical terms from comparable corpora. We evaluate the RF classifier against a popular term alignment method, namely context vectors, and we report an improvement of the translation accuracy. As an application, we use the automatically extracted dictionary in combination with a trained Statistical Machine Translation (SMT) system to more accurately translate unknown terms. The dictionary extraction method described in this paper is freely available 1 .", "phrases": ["random forest", "comparable corpora", "context vector"], "overall_score": 0.7313956311179178, "scores": [0.8644232674284584, 0.6060799345442169, 0.5267317789083881], "rank_score": 0.6657449936270211} -{"id": "tu-etal-2014-enhancing", "title": "Enhancing Grammatical Cohesion: Generating Transitional Expressions for SMT", "abstract": "Transitional expressions provide glue that holds ideas together in a text and enhance the logical organization, which together help improve readability of a text. However, in most current statistical machine translation (SMT) systems, the outputs of compound-complex sentences still lack proper transitional expressions. As a result, the translations are often hard to read and understand. To address this issue, we propose two novel models to encourage generating such transitional expressions by introducing the source compoundcomplex sentence structure (CSS). Our models include a CSS-based translation model, which generates new CSS-based translation rules, and a generative transfer model, which encourages producing transitional expressions during decoding. The two models are integrated into a hierarchical phrase-based translation system to evaluate their effectiveness. The experimental results show that significant improvements are achieved on various test data meanwhile the translations are more cohesive and smooth.", "phrases": ["grammatical cohesion", "transitional expression", "translation rule"], "overall_score": 0.9229126886137669, "scores": [0.91094107848191, 0.5485050045336965, 0.5377762555392658], "rank_score": 0.6657407795182908} -{"id": "qadir-riloff-2011-classifying", "title": "Classifying Sentences as Speech Acts in Message Board Posts", "abstract": "This research studies the text genre of message board forums, which contain a mixture of expository sentences that present factual information and conversational sentences that include communicative acts between the writer and readers. Our goal is to create sentence classifiers that can identify whether a sentence contains a speech act, and can recognize sentences containing four different speech act classes: Commissives, Directives, Expressives, and Representatives. We conduct experiments using a wide variety of features, including lexical and syntactic features, speech act word lists from external resources, and domain-specific semantic class features. We evaluate our results on a collection of message board posts in the domain of veterinary medicine.", "phrases": ["speech act", "message board post", "sequence tagger"], "overall_score": 1.0713156924654041, "scores": [0.8929203101070498, 0.579546583904401, 0.5244707030005805], "rank_score": 0.6656458656706771} -{"id": "setiawan-etal-2020-variational", "title": "Variational Neural Machine Translation with Normalizing Flows", "abstract": "Variational Neural Machine Translation (VNMT) is an attractive framework for modeling the generation of target translations, conditioned not only on the source sentence but also on some latent random variables. The latent variable modeling may introduce useful statistical dependencies that can improve translation accuracy. Unfortunately, learning informative latent variables is non-trivial, as the latent space can be prohibitively large, and the latent codes are prone to be ignored by many translation models at training time. Previous works impose strong assumptions on the distribution of the latent code and limit the choice of the NMT architecture. In this paper, we propose to apply the VNMT framework to the state-of-the-art Transformer and introduce a more flexible approximate posterior based on normalizing flows. We demonstrate the efficacy of our proposal under both in-domain and out-of-domain conditions, significantly outperforming strong baselines.", "phrases": ["normalizing flow", "translation model", "approximate posterior"], "overall_score": 0.9225570619306603, "scores": [0.840651665996932, 0.5900794181576604, 0.5657216631220917], "rank_score": 0.665484249092228} -{"id": "chang-etal-2013-constrained", "title": "A Constrained Latent Variable Model for Coreference Resolution", "abstract": "Coreference resolution is a well known clustering task in Natural Language Processing. In this paper, we describe the Latent Left Linking model (L 3 M), a novel, principled, and linguistically motivated latent structured prediction approach to coreference resolution. We show that L 3 M admits efficient inference and can be augmented with knowledge-based constraints; we also present a fast stochastic gradient based learning. Experiments on ACE and Ontonotes data show that L 3 M and its constrained version, CL 3 M, are more accurate than several state-of-the-art approaches as well as some structured prediction models proposed in the literature.", "phrases": ["coreference resolution", "prediction model", "mention", "latent antecedent"], "overall_score": 1.5955183052351134, "scores": [0.9612985623106121, 0.5795911625644018, 0.5746115320627911, 0.5460300006356348], "rank_score": 0.6653828143933599} -{"id": "jayasinghe-etal-2016-csiro", "title": "CSIRO Data61 at the WNUT Geo Shared Task", "abstract": "In this paper, we describe CSIRO Data61's participation in the Geolocation shared task at the Workshop for Noisy User-generated Text. Our approach was to use ensemble methods to capitalise on four component methods: heuristics based on metadata, a label propagation method, timezone text classifiers, and an information retrieval approach. The ensembles we explored focused on examining the role of language technologies in geolocation prediction and also in examining the use of hard voting and cascading ensemble methods. Based on the accuracy of city-level predictions, our systems were the best performing submissions at this year's shared task. Furthermore, when estimating the latitude and longitude of a user, our median error distance was accurate to within 30 kilometers.", "phrases": ["cascade", "csiro data61", "ensemble approach"], "overall_score": 1.0706702391703538, "scores": [0.9262349466750893, 0.5446003435914568, 0.5248991787029609], "rank_score": 0.6652448229898357} -{"id": "vashishth-etal-2018-reside", "title": "RESIDE: Improving Distantly-Supervised Neural Relation Extraction using Side Information", "abstract": "Distantly-supervised Relation Extraction (RE) methods train an extractor by automatically aligning relation instances in a Knowledge Base (KB) with unstructured text. In addition to relation instances, KBs often contain other relevant side information, such as aliases of relations (e.g., founded and co-founded are aliases for the relation founderOfCompany). RE models usually ignore such readily available side information. In this paper, we propose RESIDE, a distantly-supervised neural relation extraction method which utilizes additional side information from KBs for improved relation extraction. It uses entity type and relation alias information for imposing soft constraints while predicting relations. RESIDE employs Graph Convolution Networks (GCN) to encode syntactic information from text and improves performance even when limited side information is available. Through extensive experiments on benchmark datasets, we demonstrate RESIDE's effectiveness. We have made RESIDE's source code available to encourage reproducible research.", "phrases": ["neural relation extraction", "graph convolution network", "encode syntactic information"], "overall_score": 1.88465760325968, "scores": [0.9475316888103242, 0.5249695377928416, 0.5231031007731969], "rank_score": 0.6652014424587875} -{"id": "joty-etal-2016-joint", "title": "Joint Learning with Global Inference for Comment Classification in Community Question Answering", "abstract": "This paper addresses the problem of comment classi\ufb01cation in community Question Answering. Following the state of the art, we approach the task with a global inference process to exploit the information of all comments in the answer-thread in the form of a fully connected graph. Our contribution comprises two novel joint learning models that are on-line and integrate inference within learning. The \ufb01rst one jointly learns two node - and edge -level MaxEnt classi\ufb01ers with stochastic gradient descent and integrates the inference step with loopy belief propagation. The second model is an instance of fully connected pairwise CRFs (FCCRF). The FCCRF model signi\ufb01cantly outperforms all other approaches and yields the best results on the task to date. Crucial elements for its success are the global normalization and an Ising-like edge potential.", "phrases": ["comment", "community question answering", "joint learning model"], "overall_score": 1.2939032891477962, "scores": [0.9158803857470416, 0.5549178038367606, 0.52400607685566], "rank_score": 0.6649347554798207} -{"id": "iso-etal-2016-forecasting", "title": "Forecasting Word Model: Twitter-based Influenza Surveillance and Prediction", "abstract": "Because of the increasing popularity of social media, much information has been shared on the internet, enabling social media users to understand various real world events. Particularly, social media-based infectious disease surveillance has attracted increasing attention. In this work, we specifically examine influenza: a common topic of communication on social media. The fundamental theory of this work is that several words, such as symptom words (fever, headache, etc.), appear in advance of flu epidemic occurrence. Consequently, past word occurrence can contribute to estimation of the number of current patients. To employ such forecasting words, one can first estimate the optimal time lag for each word based on their cross correlation. Then one can build a linear model consisting of word frequencies at different time points for nowcasting and for forecasting influenza epidemics. Experimentally obtained results (using 7.7 million tweets of August 2012 \u2013 January 2016), the proposed model achieved the best nowcasting performance to date (correlation ratio 0.93) and practically sufficient forecasting performance (correlation ratio 0.91 in 1-week future prediction, and correlation ratio 0.77 in 3-weeks future prediction). This report is the first of the relevant literature to describe a model enabling prediction of future epidemics using Twitter.", "phrases": ["influenza", "infectious disease surveillance", "time lag", "nowcasting", "twitter"], "overall_score": 1.1913900715283925, "scores": [1.1611256076605905, 0.573119324424321, 0.5382548460837046, 0.5318669595998996, 0.5202705586697162], "rank_score": 0.6649274592876464} -{"id": "louvan-magnini-2020-simple", "title": "Simple is Better! Lightweight Data Augmentation for Low Resource Slot Filling and Intent Classification", "abstract": "Neural-based models have achieved outstanding performance on slot filling and intent classification, when fairly large in-domain training data are available. However, as new domains are frequently added, creating sizeable data is expensive. We show that lightweight augmentation, a set of augmentation methods involving word span and sentence level operations, alleviates data scarcity problems. Our experiments on limited data settings show that lightweight augmentation yields significant performance improvement on slot filling on the ATIS and SNIPS datasets, and achieves competitive performance with respect to more complex, state-of-the-art, augmentation approaches. Furthermore, lightweight augmentation is also beneficial when combined with pre-trained LM-based models, as it improves BERT-based joint intent and slot filling models.", "phrases": ["slot filling", "intent classification", "augmentation method"], "overall_score": 0.7303752802354543, "scores": [0.8817019188614609, 0.5706627469232568, 0.5420840249019206], "rank_score": 0.6648162302288795} -{"id": "rangarajan-sridhar-etal-2013-segmentation", "title": "Segmentation Strategies for Streaming Speech Translation", "abstract": "The study presented in this work is a first effort at real-time speech translation of TED talks, a compendium of public talks with different speakers addressing a variety of topics. We address the goal of achieving a system that balances translation accuracy and latency. In order to improve ASR performance for our diverse data set, adaptation techniques such as constrained model adaptation and vocal tract length normalization are found to be useful. In order to improve machine translation (MT) performance, techniques that could be employed in real-time such as monotonic and partial translation retention are found to be of use. We also experiment with inserting text segmenters of various types between ASR and MT in a series of real-time translation experiments. Among other results, our experiments demonstrate that a good segmentation is useful, and a novel conjunction-based segmentation strategy improves translation quality nearly as much as other strategies such as comma-based segmentation. It was also found to be important to synchronize various pipeline components in order to minimize latency.", "phrases": ["speech translation", "segmentation strategy", "policy"], "overall_score": 1.7050718689944508, "scores": [0.8347071078830424, 0.6332326352230246, 0.526335735317315], "rank_score": 0.664758492807794} -{"id": "bawden-etal-2020-parbleu", "title": "ParBLEU: Augmenting Metrics with Automatic Paraphrases for the WMT'20 Metrics Shared Task", "abstract": "We describe parBLEU, parCHRF++, and parESIM, which augment baseline metrics with automatically generated paraphrases produced by PRISM (Thompson and Post, 2020a), a multilingual neural machine translation system. We build on recent work studying how to improve BLEU by using diverse automatically paraphrased references (Bawden et al., 2020), extending experiments to the multilingual setting for the WMT2020 metrics shared task and for three base metrics. We compare their capacity to exploit up to 100 additional synthetic references. We find that gains are possible when using additional, automatically paraphrased references, although they are not systematic. However, segment-level correlations, particularly into English, are improved for all three metrics and even with higher numbers of paraphrased references.", "phrases": ["parbleu", "sentence-level embedding", "hypothesis"], "overall_score": 0.9214657543231053, "scores": [0.8562717557835927, 0.5860093327903574, 0.5518100225924778], "rank_score": 0.664697037055476} -{"id": "beinborn-etal-2015-candidate", "title": "Candidate evaluation strategies for improved difficulty prediction of language tests", "abstract": "Language proficiency tests are a useful tool for evaluating learner progress, if the test difficulty fits the level of the learner. In this work, we describe a generalized framework for test difficulty prediction that is applicable to several languages and test types. In addition, we develop two ranking strategies for candidate evaluation inspired by automatic solving methods based on language model probability and semantic relatedness. These ranking strategies lead to significant improvements for the difficulty prediction of cloze tests.", "phrases": ["difficulty prediction", "language learning", "comprehension question"], "overall_score": 1.2933319629896722, "scores": [0.8869895098187212, 0.5814354961706077, 0.5254984497532961], "rank_score": 0.6646411519142084} -{"id": "al-khatib-etal-2020-exploiting", "title": "Exploiting Personal Characteristics of Debaters for Predicting Persuasiveness", "abstract": "Predicting the persuasiveness of arguments has applications as diverse as writing assistance, essay scoring, and advertising. While clearly relevant to the task, the personal characteristics of an argument's source and audience have not yet been fully exploited toward automated persuasiveness prediction. In this paper, we model debaters' prior beliefs, interests, and personality traits based on their previous activity, without dependence on explicit user profiles or questionnaires. Using a dataset of over 60,000 argumentative discussions, comprising more than three million individual posts collected from the subreddit r/ChangeMyView, we demonstrate that our modeling of debater's characteristics enhances the prediction of argument persuasiveness as well as of debaters' resistance to persuasion.", "phrases": ["debater", "persuasiveness", "belief"], "overall_score": 1.190829083948125, "scores": [0.8998919952517829, 0.5703001269984864, 0.5236509762229381], "rank_score": 0.6646143661577358} -{"id": "hu-etal-2021-word", "title": "Word Graph Guided Summarization for Radiology Findings", "abstract": "Radiology reports play a critical role in communicating medical findings to physicians. In each report, the impression section summarizes essential radiology findings. In clinical practice, writing impression is highly demanded yet time-consuming and prone to errors for radiologists. Therefore, automatic impression generation has emerged as an attractive research direction to facilitate such clinical practice. Existing studies mainly focused on introducing salient word information to the general text summarization framework to guide the selection of the key content in radiology findings. However, for this task, a model needs not only capture the important words in findings but also accurately describe their relations so as to generate high-quality impressions. In this paper, we propose a novel method for automatic impression generation, where a word graph is constructed from the findings to record the critical words and their relations, then a Word Graph guided Summarization model (WGSum) is designed to generate impressions with the help of the word graph. Experimental results on two datasets, OpenI and MIMIC-CXR, confirm the validity and effectiveness of our proposed approach, where the state-of-the-art results are achieved on both datasets. Further experiments are also conducted to analyze the impact of different graph designs to the performance of our method.", "phrases": ["radiology finding", "summarization model", "word graph"], "overall_score": 0.7301476320188338, "scores": [0.8590822564458218, 0.5866012084930188, 0.5481435827378791], "rank_score": 0.6646090158922399} -{"id": "lin-etal-2019-alpacatag", "title": "AlpacaTag: An Active Learning-based Crowd Annotation Framework for Sequence Tagging", "abstract": "We introduce an open-source web-based data annotation framework (AlpacaTag) for sequence tagging tasks such as named-entity recognition (NER). The distinctive advantages of AlpacaTag are three-fold. 1) Active intelligent recommendation: dynamically suggesting annotations and sampling the most informative unlabeled instances with a back-end active learned model; 2) Automatic crowd consolidation: enhancing real-time inter-annotator agreement by merging inconsistent labels from multiple annotators; 3) Real-time model deployment: users can deploy their models in downstream systems while new annotations are being made. AlpacaTag is a comprehensive solution for sequence labeling tasks, ranging from rapid tagging with recommendations powered by active learning and auto-consolidation of crowd annotations to real-time model deployment.", "phrases": ["annotator", "sequence tagging", "active learning"], "overall_score": 1.4602350783063869, "scores": [0.8716120411449935, 0.5681520470453612, 0.5539807843661225], "rank_score": 0.6645816241854924} -{"id": "huang-etal-2005-mining", "title": "Mining Key Phrase Translations from Web Corpora", "abstract": "Key phrases are usually among the most information-bearing linguistic structures. Translating them correctly will improve many natural language processing applications. We propose a new framework to mine key phrase translations from web corpora. We submit a source phrase to a search engine as a query, then expand queries by adding the translations of topic-relevant hint words from the returned snippets. We retrieve mixed-language web pages based on the expanded queries. Finally, we extract the key phrase translation from the second-round returned web page snippets with phonetic, semantic and frequency-distance features. We achieve 46% phrase translation accuracy when using top 10 returned snippets, and 80% accuracy with 165 snippets. Both results are significantly better than several existing methods.", "phrases": ["phrase translation", "web corpora", "query", "topic-relevant hint word", "frequency-distance feature"], "overall_score": 1.5297012360443099, "scores": [0.931023661525297, 0.6455952602758911, 0.5978508130635666, 0.5793575274186143, 0.5678767665897676], "rank_score": 0.6643408057746274} -{"id": "pasupat-liang-2014-zero", "title": "Zero-shot Entity Extraction from Web Pages", "abstract": "In order to extract entities of a fine-grained category from semi-structured data in web pages, existing information extraction systems rely on seed examples or redundancy across multiple web pages. In this paper, we consider a new zero-shot learning task of extracting entities specified by a natural language query (in place of seeds) given only a single web page. Our approach defines a log-linear model over latent extraction predicates, which select lists of entities from the web page. The main challenge is to define features on widely varying candidate entity lists. We tackle this by", "phrases": ["web page", "semi-structured data", "natural language query", "zero-shot entity extraction"], "overall_score": 0.9209059370220507, "scores": [1.0126168009576542, 0.558184523083845, 0.546797629461228, 0.5395739034311058], "rank_score": 0.6642932142334583} -{"id": "beigman-klebanov-beigman-2010-game", "title": "A Game-Theoretic Model of Metaphorical Bargaining", "abstract": "We present a game-theoretic model of bargaining over a metaphor in the context of political communication, find its equilibrium, and use it to rationalize observed linguistic behavior. We argue that game theory is well suited for modeling discourse as a dynamic resulting from a number of conflicting pressures, and suggest applications of interest to computational linguists.", "phrases": ["game-theoretic model", "metaphor", "political communication"], "overall_score": 0.7297855644634592, "scores": [0.890619158851512, 0.5751152420004463, 0.5271039425499889], "rank_score": 0.6642794478006491} -{"id": "parnell-etal-2021-rewardsofsum", "title": "RewardsOfSum: Exploring Reinforcement Learning Rewards for Summarisation", "abstract": "To date, most abstractive summarisation models have relied on variants of the negative log-likelihood (NLL) as their training objective. In some cases, reinforcement learning has been added to train the models with an objective that is closer to their evaluation measures (e.g. ROUGE). However, the reward function to be used within the reinforcement learning approach can play a key role for performance and is still partially unexplored. For this reason, in this paper, we propose two reward functions for the task of abstractive summarisation: the first function, referred to as RwB-Hinge, dynamically selects the samples for the gradient update. The second function, nicknamed RISK, leverages a small pool of strong candidates to inform the reward. In the experiments, we probe the proposed approach by fine-tuning an NLL pre-trained model over nine summarisation datasets of diverse size and nature. The experimental results show a consistent improvement over the negative log-likelihood baselines.", "phrases": ["reinforcement learning", "summarisation", "objective"], "overall_score": 0.7296769336060948, "scores": [0.8827007246027487, 0.5704511738761167, 0.5393898047202961], "rank_score": 0.6641805677330538} -{"id": "zhang-etal-2021-towards", "title": "Towards Navigation by Reasoning over Spatial Configurations", "abstract": "We deal with the navigation problem where the agent follows natural language instructions while observing the environment. Focusing on language understanding, we show the importance of spatial semantics in grounding navigation instructions into visual perceptions. We propose a neural agent that uses the elements of spatial configurations and investigate their influence on the navigation agent's reasoning ability. Moreover, we model the sequential execution order and align visual objects with spatial configurations in the instruction. Our neural agent improves strong baselines on the seen environments and shows competitive performance on the unseen environments. Additionally, the experimental results demonstrate that explicit modeling of spatial semantic elements in the instructions can improve the grounding and spatial reasoning of the model.", "phrases": ["reasoning", "spatial configuration", "semantic structure"], "overall_score": 0.9206779059091068, "scores": [0.8706908245820943, 0.5789075267098759, 0.542787822374686], "rank_score": 0.664128724555552} -{"id": "kwiatkowski-etal-2013-scaling", "title": "Scaling Semantic Parsers with On-the-Fly Ontology Matching", "abstract": "We consider the challenge of learning semantic parsers that scale to large, open-domain problems, such as question answering with Freebase. In such settings, the sentences cover a wide variety of topics and include many phrases whose meaning is difficult to represent in a fixed target ontology. For example, even simple phrases such as \u2018daughter\u2019 and \u2018number of people living in\u2019 cannot be directly represented in Freebase, whose ontology instead encodes facts about gender, parenthood, and population. In this paper, we introduce a new semantic parsing approach that learns to resolve such ontological mismatches. The parser is learned from question-answer pairs, uses a probabilistic CCG to build linguistically motivated logicalform meaning representations, and includes an ontology matching model that adapts the output logical forms for each target ontology. Experiments demonstrate state-of-the-art performance on two benchmark semantic parsing datasets, including a nine point accuracy improvement on a recent Freebase QA corpus.", "phrases": ["ontology matching", "open-domain problem", "current semantic parser"], "overall_score": 2.5980187243361708, "scores": [0.8683846535793925, 0.6014911006551621, 0.5224581768782824], "rank_score": 0.6641113103709456} -{"id": "avramidis-etal-2011-evaluate", "title": "Evaluate with Confidence Estimation: Machine ranking of translation outputs using grammatical features", "abstract": "We present a pilot study on an evaluation method which is able to rank translation outputs with no reference translation, given only their source sentence. The system employs a statistical classifier trained upon existing human rankings, using several features derived from analysis of both the source and the target sentences. Development experiments on one language pair showed that the method has considerably good correlation with human ranking when using features obtained from a PCFG parser.", "phrases": ["ranking", "translation output", "parsing-based feature"], "overall_score": 1.2922545913095176, "scores": [0.8781138531662889, 0.5921965533319714, 0.5219520706827218], "rank_score": 0.6640874923936607} -{"id": "federico-etal-2020-speech", "title": "From Speech-to-Speech Translation to Automatic Dubbing", "abstract": "We present enhancements to a speech-to-speech translation pipeline in order to perform automatic dubbing. Our architecture features neural machine translation generating output of preferred length, prosodic alignment of the translation with the original speech segments, neural text-to-speech with fine tuning of the duration of each utterance, and, finally, audio rendering to enriches text-to-speech output with background noise and reverberation extracted from the original audio. We report and discuss results of a first subjective evaluation of automatic dubbing of excerpts of TED Talks from English into Italian, which measures the perceived naturalness of automatic dubbing and the relative importance of each proposed enhancement.", "phrases": ["automatic dubbing", "prosodic alignment", "duration"], "overall_score": 0.9204162761646274, "scores": [0.8807238738028887, 0.5705257057599363, 0.5405704162014668], "rank_score": 0.6639399985880973} -{"id": "mukherjee-joshi-2013-sentiment", "title": "Sentiment Aggregation using ConceptNet Ontology", "abstract": "Sentiment analysis of reviews traditionally ignored the association between the features of the given product domain. The hierarchical relationship between the features of a product and their associated sentiment that influence the polarity of a review is not dealt with very well. In this work, we analyze the influence of the hierarchical relationship between the product attributes and their sentiments on the overall review polarity. ConceptNet is used to automatically create a product specific ontology that depicts the hierarchical relationship between the product attributes. The ontology tree is annotated with feature-specific polarities which are aggregated bottom-up, exploiting the ontological information, to find the overall review polarity. We propose a weakly supervised system that achieves a reasonable performance improvement over the baseline without requiring any tagged training data.", "phrases": ["product", "ontology tree", "feature-specific polarity", "sentiment aggregation"], "overall_score": 0.9203624166919996, "scores": [0.967477375292161, 0.6084626825129165, 0.5519318599125357, 0.5277326710466455], "rank_score": 0.6639011471910647} -{"id": "moeller-etal-2020-igt2p", "title": "IGT2P: From Interlinear Glossed Texts to Paradigms", "abstract": "An intermediate step in the linguistic analysis of an under-documented language is to find and organize inflected forms that are attested in natural speech. From this data, linguists generate unseen inflected word forms in order to test hypotheses about the language's inflectional patterns and to complete inflectional paradigm tables. To get the data linguists spend many hours manually creating interlinear glossed texts (IGTs). We introduce a new task that speeds this process and automatically generates new morphological resources for natural language processing systems: IGT-to-paradigms (IGT2P). IGT2P generates entire morphological paradigms from IGT input. We show that existing morphological reinflection models can solve the task with 21% to 64% accuracy, depending on the language. We further find that (i) having a language expert spend only a few hours cleaning the noisy IGT data improves performance by as much as 21 percentage points, and (ii) POS tags, which are generally considered a necessary part of NLP morphological reinflection input, have no effect on the accuracy of the models considered here.", "phrases": ["interlinear glossed text", "paradigms", "igt"], "overall_score": 1.1894809687119294, "scores": [0.8219906009838025, 0.6145266197167711, 0.5550686854552248], "rank_score": 0.6638619687185995} -{"id": "huang-etal-2019-matters", "title": "What Matters for Neural Cross-Lingual Named Entity Recognition: An Empirical Analysis", "abstract": "Building named entity recognition (NER) models for languages that do not have much training data is a challenging task. While recent work has shown promising results on cross-lingual transfer from high-resource languages, it is unclear what knowledge is transferred. In this paper, we first propose a simple and efficient neural architecture for cross-lingual NER. Experiments show that our model achieves competitive performance with the state-of-the-art. We further explore how transfer learning works for cross-lingual NER on two transferable factors: sequential order and multilingual embedding. Our results shed light on future research for improving cross-lingual NER.", "phrases": ["entity recognition", "cross-lingual transfer", "multilingual embedding"], "overall_score": 0.9201711414653342, "scores": [0.8706654159853376, 0.5741660346663233, 0.5464580631901005], "rank_score": 0.6637631712805873} -{"id": "michael-etal-2018-crowdsourcing", "title": "Crowdsourcing Question-Answer Meaning Representations", "abstract": "We introduce Question-Answer Meaning Representations (QAMRs), which represent the predicate-argument structure of a sentence as a set of question-answer pairs. We develop a crowdsourcing scheme to show that QAMRs can be labeled with very little training, and gather a dataset with over 5,000 sentences and 100,000 questions. A qualitative analysis demonstrates that the crowd-generated question-answer pairs cover the vast majority of predicate-argument relationships in existing datasets (including PropBank, NomBank, and QA-SRL) along with many previously under-resourced ones, including implicit arguments and relations. We also report baseline models for question generation and answering, and summarize a recent approach for using QAMR labels to improve an Open IE system. These results suggest the freely available QAMR data and annotation scheme should support significant future work.", "phrases": ["qamr", "predicate-argument structure", "qa-driven meaning representation", "crowdsourcing"], "overall_score": 1.4583933500142439, "scores": [0.8923282066587568, 0.6338105052113763, 0.5765730344176183, 0.5522619237816241], "rank_score": 0.6637434175173439} -{"id": "wang-etal-2020-joint", "title": "Joint Constrained Learning for Event-Event Relation Extraction", "abstract": "Understanding natural language involves recognizing how multiple event mentions structurally and temporally interact with each other. In this process, one can induce event complexes that organize multi-granular events with temporal order and membership relations interweaving among them. Due to the lack of jointly labeled data for these relational phenomena and the restriction on the structures they articulate, we propose a joint constrained learning framework for modeling event-event relations. Specifically, the framework enforces logical constraints within and across multiple temporal and subevent relations of events by converting these constraints into differentiable learning objectives. We show that our joint constrained learning approach effectively compensates for the lack of jointly labeled data, and outperforms SOTA methods on benchmarks for both temporal relation extraction and event hierarchy construction, replacing a commonly used but more expensive global inference process. We also present a promising case study to show the effectiveness of our approach to inducing event complexes on an external corpus.", "phrases": ["event-event relation", "relation extraction", "learning framework"], "overall_score": 1.5278412096543736, "scores": [0.9010233595015161, 0.5696818098292815, 0.5198938504010532], "rank_score": 0.6635330065772836} -{"id": "van-schijndel-schuler-2013-analysis", "title": "An Analysis of Frequency- and Memory-Based Processing Costs", "abstract": "The frequency of words and syntactic constructions has been observed to have a substantial effect on language processing. This begs the question of what causes certain constructions to be more or less frequent. A theory of grounding (Phillips, 2010) would suggest that cognitive limitations might cause languages to develop frequent constructions in such a way as to avoid processing costs. This paper studies how current theories of working memory fit into theories of language processing and what influence memory limitations may have over reading times. Measures of such limitations are evaluated on eye-tracking data and the results are compared with predictions made by different theories of processing.", "phrases": ["processing cost", "reading time", "memory effect"], "overall_score": 1.067369851908715, "scores": [0.8918546363757607, 0.5511362609731143, 0.546591633910937], "rank_score": 0.663194177086604} -{"id": "visweswariah-etal-2011-word", "title": "A Word Reordering Model for Improved Machine Translation", "abstract": "Preordering of source side sentences has proved to be useful in improving statistical machine translation. Most work has used a parser in the source language along with rules to map the source language word order into the target language word order. The requirement to have a source language parser is a major drawback, which we seek to overcome in this paper. Instead of using a parser and then using rules to order the source side sentence we learn a model that can directly reorder source side sentences to match target word order using a small parallel corpus with high-quality word alignments. Our model learns pairwise costs of a word immediately preceding another word. We use the Lin-Kernighan heuristic to find the best source reordering efficiently during training and testing and show that it suffices to provide good quality reordering. \n \nWe show gains in translation performance based on our reordering model for translating from Hindi to English, Urdu to English (with a public dataset), and English to Hindi. For English to Hindi we show that our technique achieves better performance than a method that uses rules applied to the source side English parse.", "phrases": ["parallel corpus", "structural syntactic information", "part-of-speech tag", "arbitrary permutation"], "overall_score": 1.7010285212271359, "scores": [0.9406742519381045, 0.5913897580840606, 0.5644446229176199, 0.5562197981752829], "rank_score": 0.663182107778767} -{"id": "reimers-etal-2016-temporal", "title": "Temporal Anchoring of Events for the TimeBank Corpus", "abstract": "Today\u2019s extraction of temporal information for events heavily depends on annotated temporal links. These so called TLINKs capture the relation between pairs of event mentions and time expressions. One problem is that the number of possible TLINKs grows quadratic with the number of event mentions, therefore most annotation studies concentrate on links for mentions in the same or in adjacent sentences. However, as our annotation study shows, this restriction results for 58% of the event mentions in a less precise information when the event took place. This paper proposes a new annotation scheme to anchor events in time. Not only is the annotation effort much lower as it scales linear with the number of events, it also gives a more precise anchoring when the events have happened as the complete document can be taken into account. Using this scheme, we annotated a subset of the TimeBank Corpus and compare our re-sults to other annotation schemes. Additionally, we present some baseline experiments to automatically anchor events in time. Our annotation scheme, the automated system and the annotated corpus are publicly available. 1", "phrases": ["timebank corpus", "adjacent sentence", "annotation scheme"], "overall_score": 1.378843207101924, "scores": [0.9164797187695641, 0.5392503672990702, 0.5335201709807452], "rank_score": 0.66308341901646} -{"id": "chambers-2012-labeling", "title": "Labeling Documents with Timestamps: Learning from their Time Expressions", "abstract": "Temporal reasoners for document understanding typically assume that a document's creation date is known. Algorithms to ground relative time expressions and order events often rely on this timestamp to assist the learner. Unfortunately, the timestamp is not always known, particularly on the Web. This paper addresses the task of automatic document timestamping, presenting two new models that incorporate rich linguistic features about time. The first is a discriminative classifier with new features extracted from the text's time expressions (e.g., 'since 1999'). This model alone improves on previous generative models by 77%. The second model learns probabilistic constraints between time expressions and the unknown document time. Imposing these learned constraints on the discriminative model further improves its accuracy. Finally, we present a new experiment design that facilitates easier comparison by future work.", "phrases": ["time expression", "web", "discriminative model"], "overall_score": 1.3785336386323723, "scores": [0.8877994863055756, 0.567149956778151, 0.5338542010698153], "rank_score": 0.6629345480511807} -{"id": "ontanon-etal-2022-making", "title": "Making Transformers Solve Compositional Tasks", "abstract": "Several studies have reported the inability of Transformer models to generalize compositionally, a key type of generalization in many NLP tasks such as semantic parsing. In this paper we explore the design space of Transformer models showing that the inductive biases given to the model by several design decisions significantly impact compositional generalization. We identified Transformer configurations that generalize compositionally significantly better than previously reported in the literature in many compositional tasks. We achieve state-of-the-art results in a semantic parsing compositional generalization benchmark (COGS), and a string edit operation composition benchmark (PCFG).", "phrases": ["transformer", "compositional generalization", "cogs"], "overall_score": 0.9189209544989381, "scores": [0.8010317312132447, 0.619628211482814, 0.5679241133407973], "rank_score": 0.6628613520122854} -{"id": "zhang-etal-2014-character", "title": "Character-Level Chinese Dependency Parsing", "abstract": "Recent work on Chinese analysis has led to large-scale annotations of the internal structures of words, enabling characterlevel analysis of Chinese syntactic structures. In this paper, we investigate the problem of character-level Chinese dependency parsing, building dependency trees over characters. Character-level information can benefit downstream applications by offering flexible granularities for word segmentation while improving wordlevel dependency parsing accuracies. We present novel adaptations of two major shift-reduce dependency parsing algorithms to character-level parsing. Experimental results on the Chinese Treebank demonstrate improved performances over word-based parsing methods.", "phrases": ["chinese dependency parsing", "character", "joint model", "transition-based framework"], "overall_score": 1.5261334330117875, "scores": [0.9765084581901198, 0.5862625511473036, 0.5670973217338128, 0.5212969833491047], "rank_score": 0.6627913286050853} -{"id": "ovrelid-etal-2018-lia", "title": "The LIA Treebank of Spoken Norwegian Dialects", "abstract": "This article presents the LIA treebank of transcribed spoken Norwegian dialects. It consists of dialect recordings made in the period between 1950\u20131990, which have been digitised, transcribed, and subsequently annotated with morphological and dependency-style syntactic analysis as part of the LIA (Language Infrastructure made Accessible) project at the University of Oslo. In this article, we describe the LIA material of dialect recordings and its transcription, transliteration and further morphosyntactic annotation. We focus in particular on the extension of the native NDT annotation scheme to spoken language phenomena, such as pauses and various types of disfluencies, and present the subsequent conversion of the treebank to the Universal Dependencies scheme. The treebank currently consists of 13,608 tokens, distributed over 1396 segments taken from three different dialects of spoken Norwegian. The LIA treebank annotation is an on-going effort and future releases will extend on the current data set.", "phrases": ["lia treebank", "norwegian", "dialect", "dependency-style syntactic analysis"], "overall_score": 0.9186885275710011, "scores": [0.9502517100792246, 0.5721145073808735, 0.5707792959500059, 0.5576292522864299], "rank_score": 0.6626936914241335} -{"id": "sennhauser-berwick-2018-evaluating", "title": "Evaluating the Ability of LSTMs to Learn Context-Free Grammars", "abstract": "While long short-term memory (LSTM) neural net architectures are designed to capture sequence information, human language is generally composed of hierarchical structures. This raises the question as to whether LSTMs can learn hierarchical structures. We explore this question with a well-formed bracket prediction task using two types of brackets modeled by an LSTM. Demonstrating that such a system is learnable by an LSTM is the first step in demonstrating that the entire class of CFLs is also learnable. We observe that the model requires exponential memory in terms of the number of characters and embedded depth, where a sub-linear memory should suffice. Still, the model does more than memorize the training input. It learns how to distinguish between relevant and irrelevant information. On the other hand, we also observe that the model does not generalize well. We conclude that LSTMs do not learn the relevant underlying context-free rules, suggesting the good overall performance is attained rather by an efficient way of evaluating nuisance variables. LSTMs are a way to quickly reach good results for many natural language tasks, but to understand and generate natural language one has to investigate other concepts that can make more direct use of natural language's structural nature.", "phrases": ["context-free grammar", "dyck language", "position"], "overall_score": 1.5258495665998324, "scores": [0.9290191091577535, 0.5374126853742917, 0.5215723464342812], "rank_score": 0.6626680469887755} -{"id": "wang-etal-2019-hmeae", "title": "HMEAE: Hierarchical Modular Event Argument Extraction", "abstract": "Existing event extraction methods classify each argument role independently, ignoring the conceptual correlations between different argument roles. In this paper, we propose a Hierarchical Modular Event Argument Extraction (HMEAE) model, to provide effective inductive bias from the concept hierarchy of event argument roles. Specifically, we design a neural module network for each basic unit of the concept hierarchy, and then hierarchically compose relevant unit modules with logical operations into a role-oriented modular network to classify a specific argument role. As many argument roles share the same high-level unit module, their correlation can be utilized to extract specific event arguments better. Experiments on real-world datasets show that HMEAE can effectively leverage useful knowledge from the concept hierarchy and significantly outperform the state-of-the-art baselines. The source code can be obtained from .", "phrases": ["concept hierarchy", "event argument role", "hmeae"], "overall_score": 0.9182861529943543, "scores": [0.8149796254723772, 0.6427046295755975, 0.5295260635149636], "rank_score": 0.6624034395209795} -{"id": "niu-etal-2018-bi", "title": "Bi-Directional Neural Machine Translation with Synthetic Parallel Data", "abstract": "Despite impressive progress in high-resource settings, Neural Machine Translation (NMT) still struggles in low-resource and out-of-domain scenarios, often failing to match the quality of phrase-based translation. We propose a novel technique that combines back-translation and multilingual NMT to improve performance in these difficult cases. Our technique trains a single model for both directions of a language pair, allowing us to back-translate source or target monolingual data without requiring an auxiliary model. We then continue training on the augmented parallel data, enabling a cycle of improvement for a single model that can incorporate any source, target, or parallel data to improve both translation directions. As a byproduct, these models can reduce training and deployment costs significantly compared to uni-directional models. Extensive experiments show that our technique outperforms standard back-translation in low-resource scenarios, improves quality on cross-domain tasks, and effectively reduces costs across the board.", "phrases": ["parallel data", "back-translation", "direction"], "overall_score": 1.186825233300191, "scores": [0.9259016855078713, 0.5327619206128167, 0.5284757175713114], "rank_score": 0.6623797745639998} -{"id": "cui-etal-2018-deep", "title": "Deep Attentive Sentence Ordering Network", "abstract": "In this paper, we propose a novel deep attentive sentence ordering network (referred as ATTOrderNet) which integrates self-attention mechanism with LSTMs in the encoding of input sentences. It enables us to capture global dependencies among sentences regardless of their input order and obtains a reliable representation of the sentence set. With this representation, a pointer network is exploited to generate an ordered sequence. The proposed model is evaluated on Sentence Ordering and Order Discrimination tasks. The extensive experimental results demonstrate its effectiveness and superiority to the state-of-the-art methods.", "phrases": ["attordernet", "self-attention mechanism", "global dependency"], "overall_score": 1.5248250605706324, "scores": [0.9109050383212228, 0.5384504716213355, 0.5373138190782936], "rank_score": 0.6622231096736173} -{"id": "kirschner-etal-2015-linking", "title": "Linking the Thoughts: Analysis of Argumentation Structures in Scientific Publications", "abstract": "This paper presents the results of an annotation study focused on the fine-grained analysis of argumentation structures in scientific publications. Our new annotation scheme specifies four types of binary argumentative relations between sentences, resulting in the representation of arguments as small graph structures. We developed an annotation tool that supports the annotation of such graphs and carried out an annotation study with four annotators on 24 scientific articles from the domain of educational research. For calculating the inter-annotator agreement, we adapted existing measures and developed a novel graphbased agreement measure which reflects the semantic similarity of different annotation graphs.", "phrases": ["argumentative relation", "scientific article", "inter-annotator agreement"], "overall_score": 1.5875111605829157, "scores": [0.8397705301665626, 0.5999296907409724, 0.5464305062241871], "rank_score": 0.662043575710574} -{"id": "reichart-rappoport-2007-self", "title": "Self-Training for Enhancement and Domain Adaptation of Statistical Parsers Trained on Small Datasets", "abstract": "Creating large amounts of annotated data to train statistical PCFG parsers is expensive, and the performance of such parsers declines when training and test data are taken from different domains. In this paper we use selftraining in order to improve the quality of a parser and to adapt it to a different domain, using only small amounts of manually annotated seed data. We report significant improvement both when the seed and test data are in the same domain and in the outof-domain adaptation scenario. In particular, we achieve 50% reduction in annotation cost for the in-domain case, yielding an improvement of 66% over previous work, and a 20-33% reduction for the domain adaptation case. This is the first time that self-training with small labeled datasets is applied successfully to these tasks. We were also able to formulate a characterization of when selftraining is valuable.", "phrases": ["domain adaptation", "generative parser", "reranker"], "overall_score": 1.9829156584154977, "scores": [0.873647393689581, 0.5860214793390508, 0.5260716511702112], "rank_score": 0.6619135080662809} -{"id": "qi-etal-2018-pre", "title": "When and Why Are Pre-Trained Word Embeddings Useful for Neural Machine Translation?", "abstract": "The performance of Neural Machine Translation (NMT) systems often suffers in low-resource scenarios where sufficiently large-scale parallel corpora cannot be obtained. Pre-trained word embeddings have proven to be invaluable for improving performance in natural language analysis tasks, which often suffer from paucity of data. However, their utility for NMT has not been extensively explored. In this work, we perform five sets of experiments that analyze when we can expect pre-trained word embeddings to help in NMT tasks. We show that such embeddings can be surprisingly effective in some cases \u2013 providing gains of up to 20 BLEU points in the most favorable setting.", "phrases": ["neural machine translation", "nmt task", "monolingual data"], "overall_score": 2.1034918942315626, "scores": [0.9161361449636635, 0.5420624172659796, 0.5274429717288274], "rank_score": 0.6618805113194902} -{"id": "koto-etal-2020-liputan6", "title": "Liputan6: A Large-scale Indonesian Dataset for Text Summarization", "abstract": "In this paper, we introduce a large-scale Indonesian summarization dataset. We harvest articles from Liputan6.com, an online news portal, and obtain 215,827 document\u2013summary pairs. We leverage pre-trained language models to develop benchmark extractive and abstractive summarization methods over the dataset with multilingual and monolingual BERT-based models. We include a thorough error analysis by examining machine-generated summaries that have low ROUGE scores, and expose both issues with ROUGE itself, as well as with extractive and abstractive summarization models.", "phrases": ["indonesian summarization dataset", "news portal", "liputan6"], "overall_score": 0.9172293341589389, "scores": [0.7923126163057087, 0.639748680189983, 0.5528620211277889], "rank_score": 0.6616411058744935} -{"id": "pianta-etal-2008-textpro", "title": "The TextPro Tool Suite", "abstract": "We present TextPro, a suite of modular Natural Language Processing (NLP) tools for analysis of Italian and English texts. The suite has been designed so as to integrate and reuse state of the art NLP components developed by researchers at FBK. The current version of the tool suite provides functions ranging from tokenization to chunking and Named Entity Recognition (NER). The system\u0092s architecture is organized as a pipeline of processors wherein each stage accepts data from an initial input or from an output of a previous stage, executes a specific task, and sends the resulting data to the next stage, or to the output of the pipeline. TextPro performed the best on the task of Italian NER and Italian PoS Tagging at EVALITA 2007. When tested on a number of other standard English benchmarks, TextPro confirms that it performs as state of the art system. Distributions for Linux, Solaris and Windows are available, for both research and commercial purposes. A web-service version of the system is under development.", "phrases": ["textpro", "function", "tokenization", "pos tagging"], "overall_score": 1.1854499609210774, "scores": [0.9900919166150957, 0.5616652116359752, 0.5564657864437211, 0.538225967044464], "rank_score": 0.661612220434814} -{"id": "rama-singh-2009-bag", "title": "From Bag of Languages to Family Trees From Noisy Corpus", "abstract": "In this paper, we use corpus-based measures for constructing phylogenetic trees and try to address some questions about the validity of doing this and applicability to linguistic areas as against language families. We experiment with four corpus based distance measures for constructing phylogenetic trees. Three of these measures were earlier tried for estimating language distances. We use a fourth measure based on phonetic and orthographic feature n-grams. We compare the trees obtained using these measures and present our observations.", "phrases": ["corpus-based measure", "phylogenetic tree", "linguistic area", "n-gram"], "overall_score": 0.9171632699259076, "scores": [0.9240066249904377, 0.5770723649873772, 0.5764504947461054, 0.5688443176913052], "rank_score": 0.6615934506038064} -{"id": "gao-etal-2019-structuring", "title": "Structuring Latent Spaces for Stylized Response Generation", "abstract": "Generating responses in a targeted style is a useful yet challenging task, especially in the absence of parallel data. With limited data, existing methods tend to generate responses that are either less stylized or less context-relevant. We propose StyleFusion, which bridges conversation modeling and non-parallel style transfer by sharing a structured latent space. This structure allows the system to generate stylized relevant responses by sampling in the neighborhood of the conversation model prediction, and continuously control the style level. We demonstrate this method using dialogues from Reddit data and two sets of sentences with distinct styles (arXiv and Sherlock Holmes novels). Automatic and human evaluation show that, without sacrificing appropriateness, the system generates responses of the targeted style and outperforms competitive baselines.", "phrases": ["conversation modeling", "sherlock holmes novel", "style intensity", "structured latent space"], "overall_score": 1.2869289162644355, "scores": [0.9217964631872443, 0.5983852612060786, 0.5815921205450869, 0.5436287023255633], "rank_score": 0.6613506368159932} -{"id": "popovic-ney-2009-syntax", "title": "Syntax-Oriented Evaluation Measures for Machine Translation Output", "abstract": "We explored novel automatic evaluation measures for machine translation output oriented to the syntactic structure of the sentence: the Bleu score on the detailed Part-of-Speech (pos) tags as well as the precision, recall and F-measure obtained on pos n-grams. We also introduced F-measure based on both word and pos n-grams. Correlations between the new metrics and human judgments were calculated on the data of the first, second and third shared task of the Statistical Machine Translation Workshop. Machine translation outputs in four different European languages were taken into account: English, Spanish, French and German. The results show that the new measures correlate very well with the human judgements and that they are competitive with the widely used BLEU, METEOR and TER metrics.", "phrases": ["machine translation output", "part-of-speech", "pos"], "overall_score": 0.9167224265290098, "scores": [0.9384994904129957, 0.5230145218602759, 0.5223123356643781], "rank_score": 0.6612754493125498} -{"id": "nicolae-nicolae-2006-bestcut", "title": "BESTCUT: A Graph Algorithm for Coreference Resolution", "abstract": "In this paper we describe a coreference resolution method that employs a classification and a clusterization phase. In a novel way, the clusterization is produced as a graph cutting algorithm, in which nodes of the graph correspond to the mentions of the text, whereas the edges of the graph constitute the confidences derived from the coreference classification. In experiments, the graph cutting algorithm for coreference resolution, called BestCut, achieves state-of-the-art performance.", "phrases": ["coreference resolution", "node", "mention", "graph-cut algorithm"], "overall_score": 1.452966682956899, "scores": [0.9732710931023778, 0.576939951080702, 0.5601910585613793, 0.5346924368740382], "rank_score": 0.6612736349046244} -{"id": "baldwin-li-2015-depth", "title": "An In-depth Analysis of the Effect of Text Normalization in Social Media", "abstract": "Recent years have seen increased interest in text normalization in social media, as the informal writing styles found in Twitter and other social media data often cause problems for NLP applications. Unfortunately, most current approaches narrowly regard the normalization task as a \u201cone size fits all\u201d task of replacing non-standard words with their standard counterparts. In this work we build a taxonomy of normalization edits and present a study of normalization to examine its effect on three different downstream applications (dependency parsing, named entity recognition, and text-to-speech synthesis). The results suggest that how the normalization task should be viewed is highly dependent on the targeted application. The results also show that normalization must be thought of as more than word replacement in order to produce results comparable to those seen on clean text.", "phrases": ["text normalization", "twitter", "social medium data"], "overall_score": 1.1845799428267088, "scores": [0.8795915179981602, 0.5524335867402, 0.5513548575348051], "rank_score": 0.6611266540910551} -{"id": "ladhak-etal-2020-exploring", "title": "Exploring Content Selection in Summarization of Novel Chapters", "abstract": "We present a new summarization task, generating summaries of novel chapters using summary/chapter pairs from online study guides. This is a harder task than the news summarization task, given the chapter length as well as the extreme paraphrasing and generalization found in the summaries. We focus on extractive summarization, which requires the creation of a gold-standard set of extractive summaries. We present a new metric for aligning reference summary sentences with chapter sentences to create gold extracts and also experiment with different alignment methods. Our experiments demonstrate significant improvement over prior alignment approaches for our task as shown through automatic metrics and a crowd-sourced pyramid analysis.", "phrases": ["summarization", "chapter", "gold-standard set"], "overall_score": 1.0637795304225457, "scores": [0.8686655115318351, 0.5897788943644761, 0.52444574886653], "rank_score": 0.6609633849209471} -{"id": "yu-etal-2015-predicting", "title": "Predicting Valence-Arousal Ratings of Words Using a Weighted Graph Method", "abstract": "Compared to the categorical approach that represents affective states as several discrete classes (e.g., positive and negative), the dimensional approach represents affective states as continuous numerical values on multiple dimensions, such as the valence-arousal (VA) space, thus allowing for more fine-grained sentiment analysis. In building dimensional sentiment applications, affective lexicons with valence-arousal ratings are useful resources but are still very rare. Therefore, this study proposes a weighted graph model that considers both the relations of multiple nodes and their similarities as weights to automatically determine the VA ratings of affective words. Experiments on both English and Chinese affective lexicons show that the proposed method yielded a smaller error rate on VA prediction than the linear regression, kernel method, and pagerank algorithm used in previous studies.", "phrases": ["valence-arousal rating", "dimension", "sentiment analysis", "weighted graph model"], "overall_score": 1.4521360540512644, "scores": [0.9487834650198095, 0.5915884409162606, 0.5690753913200212, 0.5341351003370494], "rank_score": 0.6608955993982851} -{"id": "bacchiani-etal-2004-language", "title": "Language Model Adaptation with MAP Estimation and the Perceptron Algorithm", "abstract": "In this paper, we contrast two language model adaptation approaches: MAP estimation and the perceptron algorithm. Used in isolation, we show that MAP estimation outperforms the latter approach, for reasons which argue for combining the two approaches. When combined, the resulting system provides a 0.7 percent absolute reduction in word error rate over MAP estimation alone. In addition, we demonstrate that, in a multi-pass recognition scenario, it is better to use the perceptron algorithm on early pass word lattices, since the improved error rate improves acoustic model adaptation.", "phrases": ["map estimation", "perceptron algorithm", "adaptation data", "error-driven learning approach", "background model"], "overall_score": 0.9161281428883136, "scores": [0.8705283335072258, 0.8148116188728469, 0.5473956610678191, 0.5423522405281721, 0.5291459674333996], "rank_score": 0.6608467642818928} -{"id": "wang-etal-2020-learning-decouple", "title": "Learning to Decouple Relations: Few-Shot Relation Classification with Entity-Guided Attention and Confusion-Aware Training", "abstract": "This paper aims to enhance the few-shot relation classification especially for sentences that jointly describe multiple relations. Due to the fact that some relations usually keep high co-occurrence in the same context, previous few-shot relation classifiers struggle to distinguish them with few annotated instances. To alleviate the above relation confusion problem, we propose CTEG, a model equipped with two novel mechanisms to learn to decouple these easily-confused relations. On the one hand, an Entity -Guided Attention (EGA) mechanism, which leverages the syntactic relations and relative positions between each word and the specified entity pair, is introduced to guide the attention to filter out information causing confusion. On the other hand, a Confusion-Aware Training (CAT) method is proposed to explicitly learn to distinguish relations by playing a pushing-away game between classifying a sentence into a true relation and its confusing relation. Extensive experiments are conducted on the FewRel dataset, and the results show that our proposed model achieves comparable and even much better results to strong baselines in terms of accuracy. Furthermore, the ablation test and case study verify the effectiveness of our proposed EGA and CAT, especially in addressing the relation confusion problem.", "phrases": ["few-shot relation classification", "confusion-aware training", "relation confusion problem"], "overall_score": 1.1838752013616254, "scores": [0.8448044842341623, 0.6010544859388612, 0.5363410209982388], "rank_score": 0.6607333303904208} -{"id": "troles-schmid-2021-extending", "title": "Extending Challenge Sets to Uncover Gender Bias in Machine Translation: Impact of Stereotypical Verbs and Adjectives", "abstract": "Human gender bias is reflected in language and text production. Because state-of-the-art machine translation (MT) systems are trained on large corpora of text, mostly generated by humans, gender bias can also be found in MT. For instance when occupations are translated from a language like English, which mostly uses gender neutral words, to a language like German, which mostly uses a feminine and a masculine version for an occupation, a decision must be made by the MT System. Recent research showed that MT systems are biased towards stereotypical translation of occupations. In 2019 the first, and so far only, challenge set, explicitly designed to measure the extent of gender bias in MT systems has been published. In this set measurement of gender bias is solely based on the translation of occupations. With our paper we present an extension of this challenge set, called WiBeMT, which adds gender-biased adjectives and sentences with gender-biased verbs. The resulting challenge set consists of over 70, 000 sentences and has been translated with three commercial MT systems: DeepL Translator, Microsoft Translator, and Google Translate. Results show a gender bias for all three MT systems. This gender bias is to a great extent significantly influenced by adjectives and to a lesser extent by verbs.", "phrases": ["gender bias", "machine translation", "adjective"], "overall_score": 0.9159621080399633, "scores": [0.8674502369406174, 0.5718076211098022, 0.5429231283167645], "rank_score": 0.660726995455728} -{"id": "ahmed-butt-2011-discovering", "title": "Discovering Semantic Classes for Urdu N-V Complex Predicates", "abstract": "This paper reports on an exploratory investigation as to whether classes of Urdu N-V complex predicates can be identified on the basis syntactic patterns and lexical choices associated with the N-V complex predicates. Working with data from a POS annotated corpus, we show that choices with respect to the number of arguments, case marking on subjects and which light verbs are felicitous with which nouns depend heavily on the semantics of the noun in the N-V complex predicate. This initial work represents an important step towards identifying semantic criteria relevant for complex predicate formation. Identifying the semantic criteria and being able to systematically code them in turn represents a first step towards building up a lexical resource for nouns as part of developing natural language processing tools for the underresourced South Asian language Urdu.", "phrases": ["complex predicate", "language urdu", "semantic compatibility"], "overall_score": 1.1837020727286516, "scores": [0.9114577097368116, 0.5491199338900007, 0.5213324727549812], "rank_score": 0.6606367054605978} -{"id": "saunders-etal-2020-neural", "title": "Neural Machine Translation Doesn't Translate Gender Coreference Right Unless You Make It", "abstract": "Neural Machine Translation (NMT) has been shown to struggle with grammatical gender that is dependent on the gender of human referents, which can cause gender bias effects. Many existing approaches to this problem seek to control gender inflection in the target language by explicitly or implicitly adding a gender feature to the source sentence, usually at the sentence level. In this paper we propose schemes for incorporating explicit word-level gender inflection tags into NMT. We explore the potential of this gender-inflection controlled translation when the gender feature can be determined from a human reference, or when a test sentence can be automatically gender-tagged, assessing on English-to-Spanish and English-to-German translation. We find that simple existing approaches can over-generalize a gender-feature to multiple entities in a sentence, and suggest effective alternatives in the form of tagged coreference adaptation data. We also propose an extension to assess translations of gender-neutral entities from English given a corresponding linguistic convention, such as a non-binary inflection, in the target language.", "phrases": ["source sentence", "gender inflection tag", "neural machine translation"], "overall_score": 0.9157621819830317, "scores": [0.8496058414105612, 0.5926169726500143, 0.5395255238102893], "rank_score": 0.6605827792902882} -{"id": "farajian-etal-2017-neural", "title": "Neural vs. Phrase-Based Machine Translation in a Multi-Domain Scenario", "abstract": "State-of-the-art neural machine translation (NMT) systems are generally trained on specific domains by carefully selecting the training sets and applying proper domain adaptation techniques. In this paper we consider the real world scenario in which the target domain is not predefined, hence the system should be able to translate text from multiple domains. We compare the performance of a generic NMT system and phrase-based statistical machine translation (PBMT) system by training them on a generic parallel corpus composed of data from different domains. Our results on multi-domain English-French data show that, in these realistic conditions, PBMT outperforms its neural counterpart. This raises the question: is NMT ready for deployment as a generic/multi-purpose MT backbone in real-world settings?", "phrases": ["machine translation", "neural counterpart", "multi-domain data"], "overall_score": 1.18339171177752, "scores": [0.9038037416807605, 0.5553557406263886, 0.522230986839965], "rank_score": 0.6604634897157047} -{"id": "vafa-etal-2020-text", "title": "Text-Based Ideal Points", "abstract": "Ideal point models analyze lawmakers' votes to quantify their political positions, or ideal points. But votes are not the only way to express a political position. Lawmakers also give speeches, release press statements, and post tweets. In this paper, we introduce the text-based ideal point model (TBIP), an unsupervised probabilistic topic model that analyzes texts to quantify the political positions of its authors. We demonstrate the TBIP with two types of politicized text data: U.S. Senate speeches and senator tweets. Though the model does not analyze their votes or political affiliations, the TBIP separates lawmakers by party, learns interpretable politicized topics, and infers ideal points close to the classical vote-based ideal points. One benefit of analyzing texts, as opposed to votes, is that the TBIP can estimate ideal points of anyone who authors political texts, including non-voting actors. To this end, we use it to study tweets from the 2020 Democratic presidential candidates. Using only the texts of their tweets, it identifies them along an interpretable progressive-to-moderate spectrum.", "phrases": ["point model", "only way", "tbip", "topic model", "text-based ideal point"], "overall_score": 1.062973393146595, "scores": [1.0311327890805215, 0.5802874820599102, 0.5719956447258019, 0.5693255936971897, 0.5495710087833174], "rank_score": 0.6604625036693481} -{"id": "ruff-etal-2019-self", "title": "Self-Attentive, Multi-Context One-Class Classification for Unsupervised Anomaly Detection on Text", "abstract": "There exist few text-specific methods for unsupervised anomaly detection, and for those that do exist, none utilize pre-trained models for distributed vector representations of words. In this paper we introduce a new anomaly detection method\u2014Context Vector Data Description (CVDD)\u2014which builds upon word embedding models to learn multiple sentence representations that capture multiple semantic contexts via the self-attention mechanism. Modeling multiple contexts enables us to perform contextual anomaly detection of sentences and phrases with respect to the multiple themes and concepts present in an unlabeled text corpus. These contexts in combination with the self-attention weights make our method highly interpretable. We demonstrate the effectiveness of CVDD quantitatively as well as qualitatively on the well-known Reuters, 20 Newsgroups, and IMDB Movie Reviews datasets.", "phrases": ["unsupervised anomaly detection", "multiple sentence representation", "self-attention mechanism"], "overall_score": 1.0626856000157185, "scores": [0.915927360664948, 0.5440484606069426, 0.5208752419577346], "rank_score": 0.6602836877432083} -{"id": "gu-etal-2018-multimodal", "title": "Multimodal Affective Analysis Using Hierarchical Attention Strategy with Word-Level Alignment", "abstract": "Multimodal affective computing, learning to recognize and interpret human affect and subjective information from multiple data sources, is still a challenge because: (i) it is hard to extract informative features to represent human affects from heterogeneous inputs; (ii) current fusion strategies only fuse different modalities at abstract levels, ignoring time-dependent interactions between modalities. Addressing such issues, we introduce a hierarchical multimodal architecture with attention and word-level fusion to classify utterance-level sentiment and emotion from text and audio data. Our introduced model outperforms state-of-the-art approaches on published datasets, and we demonstrate that our model is able to visualize and interpret synchronized attention over modalities.", "phrases": ["word-level alignment", "hierarchical multimodal architecture", "emotion"], "overall_score": 1.3730059513936161, "scores": [0.8369420728426382, 0.5781225869866549, 0.5657642173573103], "rank_score": 0.6602762923955344} -{"id": "yang-etal-2009-combining", "title": "Combining a Two-step Conditional Random Field Model and a Joint Source Channel Model for Machine Transliteration", "abstract": "This paper describes our system for \"NEWS 2009 Machine Transliteration Shared Task\" (NEWS 2009). We only participated in the standard run, which is a direct orthographical mapping (DOP) between two languages without using any intermediate phonemic mapping. We propose a new two-step conditional random field (CRF) model for DOP machine transliteration, in which the first CRF segments a source word into chunks and the second CRF maps the chunks to a word in the target language. The two-step CRF model obtains a slightly lower top-1 accuracy when compared to a state-of-the-art n-gram joint source-channel model. The combination of the CRF model with the joint source-channel leads to improvements in all the tasks. The official result of our system in the NEWS 2009 shared task confirms the effectiveness of our system; where we achieved 0.627 top-1 accuracy for Japanese transliterated to Japanese Kanji(JJ), 0.713 for English-to-Chinese(E2C) and 0.510 for English-to-Japanese Katakana(E2J).", "phrases": ["machine transliteration", "orthographical mapping", "source word"], "overall_score": 1.1830247763962585, "scores": [0.9004627590960008, 0.5518612116191145, 0.5284521268253796], "rank_score": 0.660258699180165} -{"id": "berard-etal-2019-machine", "title": "Machine Translation of Restaurant Reviews: New Corpus for Domain Adaptation and Robustness", "abstract": "We share a French-English parallel corpus of Foursquare restaurant reviews, and define a new task to encourage research on Neural Machine Translation robustness and domain adaptation, in a real-world scenario where better-quality MT would be greatly beneficial. We discuss the challenges of such user-generated content, and train good baseline models that build upon the latest techniques for MT robustness. We also perform an extensive evaluation (automatic and human) that shows significant improvements over existing online systems. Finally, we propose task-specific metrics based on sentiment analysis or translation accuracy of domain-specific polysemous words.", "phrases": ["restaurant review", "domain adaptation", "online system"], "overall_score": 1.2847778682101676, "scores": [0.8678570843496609, 0.5618192331785915, 0.5510593328313891], "rank_score": 0.6602452167865471} -{"id": "suzuki-2011-automatic", "title": "Automatic Post-Editing based on SMT and its selective application by Sentence-Level Automatic Quality Evaluation", "abstract": "In the computing assisted translation process with machine translation (MT), postediting costs time and efforts on the part of human. To solve this problem, some have attempted to automate post editing. Post-editing isn\u2019t always necessary, however, when MT outputs are of adequate quality for human. This means that we need to be able to estimate the translation quality of each translated sentence to determine whether post-editing should be performed. While conventional automatic metrics such as BLEU, NIST and METEOR, require the golden standards (references), for wider applications we need to establish methods that can estimate the quality of translations without references. This paper presents a sentence-level automatic quality evaluator, composed of an SMT phrase-based automatic post-editing (APE) module and a confidence estimator characterized by PLS (Partial Least Squares) regression analysis. It is known that this model is a better model for predicting output variable than a normal multiple regression analysis when the multicollinearity exists between the input variables. Experiments with Japanese to English patent translations show the validity of the proposed methods.", "phrases": ["automatic post-editing", "spe", "domain adaptation"], "overall_score": 1.182866730525778, "scores": [0.9305922731301177, 0.5267364061650333, 0.5231827970059509], "rank_score": 0.6601704921003674} -{"id": "bojar-etal-2011-grain", "title": "A Grain of Salt for the WMT Manual Evaluation", "abstract": "The Workshop on Statistical Machine Translation (WMT) has become one of ACL's flagship workshops, held annually since 2006. In addition to soliciting papers from the research community, WMT also features a shared translation task for evaluating MT systems. This shared task is notable for having manual evaluation as its cornerstone. The Workshop's overview paper, playing a descriptive and administrative role, reports the main results of the evaluation without delving deep into analyzing those results. The aim of this paper is to investigate and explain some interesting idiosyncrasies in the reported results, which only become apparent when performing a more thorough analysis of the collected annotations. Our analysis sheds some light on how the reported results should (and should not) be interpreted, and also gives rise to some helpful recommendation for the organizers of WMT.", "phrases": ["workshop", "statistical machine translation", "wmt evaluation", "discrepancy"], "overall_score": 1.6932500500519982, "scores": [0.9072123550904311, 0.6295539661901495, 0.5723103808072534, 0.5315213200544253], "rank_score": 0.6601495055355648} -{"id": "artetxe-etal-2019-effective", "title": "An Effective Approach to Unsupervised Machine Translation", "abstract": "While machine translation has traditionally relied on large amounts of parallel corpora, a recent research line has managed to train both Neural Machine Translation (NMT) and Statistical Machine Translation (SMT) systems using monolingual corpora only. In this paper, we identify and address several deficiencies of existing unsupervised SMT approaches by exploiting subword information, developing a theoretically well founded unsupervised tuning method, and incorporating a joint refinement procedure. Moreover, we use our improved SMT system to initialize a dual NMT model, which is further fine-tuned through on-the-fly back-translation. Together, we obtain large improvements over the previous state-of-the-art in unsupervised machine translation. For instance, we get 22.5 BLEU points in English-to-German WMT 2014, 5.5 points more than the previous best unsupervised system, and 0.5 points more than the (supervised) shared task winner back in 2014.", "phrases": ["unsupervised machine translation", "large amount", "unmt model", "bli"], "overall_score": 2.2667903309015522, "scores": [1.0092190518494089, 0.5587099646018157, 0.5419999079864789, 0.530488987359856], "rank_score": 0.6601044779493899} -{"id": "liu-etal-2018-parsing", "title": "Parsing Tweets into Universal Dependencies", "abstract": "We study the problem of analyzing tweets with universal dependencies (UD). We extend the UD guidelines to cover special constructions in tweets that affect tokenization, part-of-speech tagging, and labeled dependencies. Using the extended guidelines, we create a new tweet treebank for English (Tweebank v2) that is four times larger than the (unlabeled) Tweebank v1 introduced by Kong et al. (2014). We characterize the disagreements between our annotators and show that it is challenging to deliver consistent annotation due to ambiguity in understanding and explaining tweets. Nonetheless, using the new treebank, we build a pipeline system to parse raw tweets into UD. To overcome the annotation noise without sacrificing computational efficiency, we propose a new method to distill an ensemble of 20 transition-based parsers into a single one. Our parser achieves an improvement of 2.2 in LAS over the un-ensembled baseline and outperforms parsers that are state-of-the-art on other treebanks in both accuracy and speed.", "phrases": ["universal dependency", "guideline", "treebank"], "overall_score": 1.3726410844305672, "scores": [0.8914057098516005, 0.5584266690766762, 0.5304701065001521], "rank_score": 0.6601008284761428} -{"id": "nicholson-baldwin-2006-interpretation", "title": "Interpretation of Compound Nominalisations using Corpus and Web Statistics", "abstract": "We present two novel paraphrase tests for automatically predicting the inherent semantic relation of a given compound nominalisation as one of subject, direct object, or prepositional object. We compare these to the usual verb-argument paraphrase test using corpus statistics, and frequencies obtained by scraping the Google search engine interface. We also implemented a more robust statistical measure than maximum likelihood estimation --- the confidence interval. A significant reduction in data sparseness was achieved, but this alone is insufficient to provide a substantial performance improvement.", "phrases": ["distributional semantic", "non-literal usage", "few token-based approach"], "overall_score": 2.0978345017311617, "scores": [0.8903405540344934, 0.5613608042523922, 0.5285997445753173], "rank_score": 0.6601003676207343} -{"id": "poliak-etal-2018-evaluation", "title": "On the Evaluation of Semantic Phenomena in Neural Machine Translation Using Natural Language Inference", "abstract": "We propose a process for investigating the extent to which sentence representations arising from neural machine translation (NMT) systems encode distinct semantic phenomena. We use these representations as features to train a natural language inference (NLI) classifier based on datasets recast from existing semantic annotations. In applying this process to a representative NMT system, we find its encoder appears most suited to supporting inferences at the syntax-semantics interface, as compared to anaphora resolution requiring world knowledge. We conclude with a discussion on the merits and potential deficiencies of the existing process, and how it may be improved and extended as a broader framework for evaluating semantic coverage", "phrases": ["natural language inference", "nli", "semantic phenomenon"], "overall_score": 1.0623480231327043, "scores": [0.8970908523898227, 0.5562594603750982, 0.5268715055331543], "rank_score": 0.6600739394326918} -{"id": "furstenau-lapata-2012-semi", "title": "Semi-Supervised Semantic Role Labeling via Structural Alignment", "abstract": "Large-scale annotated corpora are a prerequisite to developing high-performance semantic role labeling systems. Unfortunately, such corpora are expensive to produce, limited in size, and may not be representative. Our work aims to reduce the annotation effort involved in creating resources for semantic role labeling via semi-supervised learning. The key idea of our approach is to find novel instances for classifier training based on their similarity to manually labeled seed instances. The underlying assumption is that sentences that are similar in their lexical material and syntactic structure are likely to share a frame semantic analysis. We formalize the detection of similar sentences and the projection of role annotations as a graph alignment problem, which we solve exactly using integer linear programming. Experimental results on semantic role labeling show that the automatic annotations produced by our method improve performance over using hand-labeled instances alone.", "phrases": ["novel instance", "classifi training", "seed instance", "fu\u0308rstenau", "annotation projection"], "overall_score": 1.2837254912957288, "scores": [1.074597147840479, 0.5862510656352737, 0.5519345453739815, 0.5431800650049434, 0.5425591863186671], "rank_score": 0.6597044020346688} -{"id": "zhang-etal-2018-sentence", "title": "Sentence-State LSTM for Text Representation", "abstract": "Bi-directional LSTMs are a powerful tool for text representation. On the other hand, they have been shown to suffer various limitations due to their sequential nature. We investigate an alternative LSTM structure for encoding text, which consists of a parallel state for each word. Recurrent steps are used to perform local and global information exchange between words simultaneously, rather than incremental reading of a sequence of words. Results on various classification and sequence labelling benchmarks show that the proposed model has strong representation power, giving highly competitive performances compared to stacked BiLSTM models with similar parameter numbers.", "phrases": ["text representation", "information exchange", "reading", "graph recurrent network"], "overall_score": 1.5186272424475966, "scores": [0.9310536560639073, 0.5909431865030145, 0.5757728908511348, 0.540355992433715], "rank_score": 0.6595314314629429} -{"id": "peng-etal-2015-solving", "title": "Solving Hard Coreference Problems", "abstract": "Coreference resolution is a key problem in natural language understanding that still escapes reliable solutions. One fundamental difficulty has been that of resolving instances involving pronouns since they often require deep language understanding and use of background knowledge. In this paper we propose an algorithmic solution that involves a new representation for the knowledge required to address hard coreference problems, along with a constrained optimization framework that uses this knowledge in coreference decision making. Our representation, Predicate Schemas, is instantiated with knowledge acquired in an unsupervised way, and is compiled automatically into constraints that impact the coreference decision. We present a general coreference resolution system that significantly improves state-of-the-art performance on hard, Winograd-style, pronoun resolution cases, while still performing at the stateof-the-art level on standard coreference resolution datasets.", "phrases": ["hard coreference problem", "predicate schemas", "knowledge basis", "antecedent"], "overall_score": 1.638315371229139, "scores": [1.0150424543372998, 0.5491460448382124, 0.5380041066142468, 0.5350338209959906], "rank_score": 0.6593066066964374} -{"id": "yin-etal-2016-neural", "title": "Neural Enquirer: Learning to Query Tables in Natural Language", "abstract": "We propose NEURAL ENQUIRER -- a neural network architecture for answering natural language (NL) questions based on a knowledge base (KB) table. Unlike existing work on end-to-end training of semantic parsers [Pasupat and Liang, 2015; Neelakantan et al., 2015], NEURAL ENQUIRER is fully \"neuralized\": it finds distributed representations of queries and KB tables, and executes queries through a series of neural network components called \"executors\". Executors model query operations and compute intermediate execution results in the form of table annotations at different levels. NEURAL ENQUIRER can be trained with gradient descent, with which the representations of queries and the KB table are jointly optimized with the query execution logic. The training can be done in an end-to-end fashion, and it can also be carried out with stronger guidance, e.g., step-by-step supervision for complex queries. NEURAL ENQUIRER is one step towards building neural network systems that can understand natural language in real-world tasks. As a proof-of-concept, we conduct experiments on a synthetic QA task, and demonstrate that the model can learn to execute reasonably complex NL queries on small-scale KB tables.", "phrases": ["query", "semantic parser", "neural enquirer"], "overall_score": 1.6382107401139017, "scores": [0.8929664793865223, 0.5599660582070101, 0.524860962520847], "rank_score": 0.6592645000381264} -{"id": "bollmann-2013-pos", "title": "POS Tagging for Historical Texts with Sparse Training Data", "abstract": "This paper presents a method for part-ofspeech tagging of historical data and evaluates it on texts from different corpora of historical German (15th\u201318th century). Spelling normalization is used to preprocess the texts before applying a POS tagger trained on modern German corpora. Using only 250 manually normalized tokens as training data, the tagging accuracy of a manuscript from the 15th century can be raised from 28.65% to 74.89%.", "phrases": ["historical text", "spelling normalization", "pos tagging"], "overall_score": 1.1811396136427994, "scores": [0.9084822699821061, 0.5385023629994273, 0.5306350764625092], "rank_score": 0.6592065698146808} -{"id": "lacroix-2019-dependency", "title": "Dependency Parsing as Sequence Labeling with Head-Based Encoding and Multi-Task Learning", "abstract": "Dependency parsing as sequence labeling has recently proved to be a relevant alternative to the traditional transitionand graph-based approaches. It offers a good trade-off between parsing accuracy and speed. However, recent work on dependency parsing as sequence labeling ignore the pre-processing time of Part-of-Speech tagging \u2013 which is required for this task \u2013 in the evaluation of speed while other studies showed that Part-of-Speech tags are not essential to achieve state-ofthe-art parsing scores. In this paper, we compare the accuracy and speed of shared and stacked multi-task learning strategies \u2013 as well as a strategy that combines both \u2013 to learn Part-of-Speech tagging and dependency parsing in a single sequence labeling pipeline. In addition, we propose an alternative encoding of the dependencies as labels which does not use Part-of-Speech tags and improves dependency parsing accuracy for most of the languages we evaluate.", "phrases": ["alternative encoding", "dependency parsing", "pos tag"], "overall_score": 0.9138107933185831, "scores": [0.9028028288851074, 0.5401212417885578, 0.5346013790736287], "rank_score": 0.6591751499157646} -{"id": "hsi-etal-2016-leveraging", "title": "Leveraging Multilingual Training for Limited Resource Event Extraction", "abstract": "Event extraction has become one of the most important topics in information extraction, but to date, there is very limited work on leveraging cross-lingual training to boost performance. We propose a new event extraction approach that trains on multiple languages using a combination of both language-dependent and language-independent features, with particular focus on the case where target domain training data is of very limited size. We show empirically that multilingual training can boost performance for the tasks of event trigger extraction and event argument extraction on the Chinese ACE 2005 dataset.", "phrases": ["multilingual training", "event extraction", "language-independent feature"], "overall_score": 0.9136814580366233, "scores": [0.8580872310079538, 0.5819104510133967, 0.5372478806711005], "rank_score": 0.659081854230817} -{"id": "morante-etal-2009-joint", "title": "Joint Memory-Based Learning of Syntactic and Semantic Dependencies in Multiple Languages", "abstract": "In this paper we present a system submitted to the CoNLL Shared Task 2009 performing the identification and labeling of syntactic and semantic dependencies in multiple languages. Dependencies are truly jointly learned, i.e. as if they were a single task. The system works in two phases: a classification phase in which three classifiers predict different types of information, and a ranking phase in which the output of the classifiers is combined.", "phrases": ["multiple language", "syntactic parsing", "joint learning"], "overall_score": 0.9135826214059191, "scores": [0.8462718616829564, 0.5723830122106652, 0.5583768021233666], "rank_score": 0.6590105586723294} -{"id": "pitler-etal-2010-using", "title": "Using Web-scale N-grams to Improve Base NP Parsing Performance", "abstract": "We use web-scale N-grams in a base NP parser that correctly analyzes 95.4% of the base NPs in natural text. Web-scale data improves performance. That is, there is no data like more data. Performance scales log-linearly with the number of parameters in the model (the number of unique N-grams). The web-scale N-grams are particularly helpful in harder cases, such as NPs that contain conjunctions.", "phrases": ["web-scale n-grams", "noun phrase", "supervised task"], "overall_score": 1.8268163649695572, "scores": [0.91168647994969, 0.5384836887136809, 0.5264840141039164], "rank_score": 0.6588847275890958} -{"id": "konkol-2016-uwb", "title": "UWB at SemEval-2016 Task 11: Exploring Features for Complex Word Identification", "abstract": "In this paper, we present our system developed for the SemEval 2016 Task 11: Complex Word Identification. Our team achieved the 3rd place among 21 participants. Our systems ranked 4th and 13th among 42 submitted systems. We proposed multiple features suitable for complex word identification, evaluated them, and discussed their properties. According to the results of our experiments, our final system used maximum entropy classifier with a single feature \u2013 document frequency.", "phrases": ["complex word identification", "document frequency", "wikipedia"], "overall_score": 1.0601826039062634, "scores": [0.9225666889696656, 0.5271958266462279, 0.5264229508421174], "rank_score": 0.658728488819337} -{"id": "bicici-2013-feature", "title": "Feature Decay Algorithms for Fast Deployment of Accurate Statistical Machine Translation Systems", "abstract": "We use feature decay algorithms (FDA) for fast deployment of accurate statistical machine translation systems taking only about half a day for each translation direction. We develop parallel FDA for solving computational scalability problems caused by the abundance of training data for SMT models and LM models and still achieve SMT performance that is on par with using all of the training data or better. Parallel FDA runs separate FDA models on randomized subsets of the training data and combines the instance selections later. Parallel FDA can also be used for selecting the LM corpus based on the training set selected by parallel FDA. The high quality of the selected training data allows us to obtain very accurate translation outputs close to the top performing SMT systems. The relevancy of the selected LM corpus can reach up to 86% reduction in the number of OOV tokens and up to 74% reduction in the perplexity. We perform SMT experiments in all language pairs in the WMT13 translation task and obtain SMT performance close to the top systems using significantly less resources for training and development.", "phrases": ["fast deployment", "fda", "feature decay algorithm"], "overall_score": 0.9128325168626763, "scores": [0.8286454843075103, 0.5997713724600426, 0.5469915610924085], "rank_score": 0.6584694726199871} -{"id": "van-der-wees-etal-2016-simple", "title": "A Simple but Effective Approach to Improve Arabizi-to-English Statistical Machine Translation", "abstract": "A major challenge for statistical machine translation (SMT) of Arabic-to-English user-generated text is the prevalence of text written in Arabizi, or Romanized Arabic. When facing such texts, a translation system trained on conventional Arabic-English data will suffer from extremely low model coverage. In addition, Arabizi is not regulated by any official standardization and therefore highly ambiguous, which prevents rule-based approaches from achieving good translation results. In this paper, we improve Arabizi-to-English machine translation by presenting a simple but effective Arabizi-to-Arabic transliteration pipeline that does not require knowledge by experts or native Arabic speakers. We incorporate this pipeline into a phrase-based SMT system, and show that translation quality after automatically transliterating Arabizi to Arabic yields results that are comparable to those achieved after human transliteration.", "phrases": ["statistical machine translation", "arabizi", "arabizi-to-arabic transliteration pipeline"], "overall_score": 1.059756723533414, "scores": [0.821670204775919, 0.6238055913635758, 0.5299158272577325], "rank_score": 0.6584638744657424} -{"id": "wang-etal-2013-financial", "title": "Financial Sentiment Analysis for Risk Prediction", "abstract": "This paper attempts to identify the importance of sentiment words in financial reports on financial risk. By using a financespecific sentiment lexicon, we apply regression and ranking techniques to analyze the relations between sentiment words and financial risk. The experimental results show that, based on the bag-of-words model, models trained on sentiment words only result in comparable performance to those on origin texts, which confirms the importance of financial sentiment words on risk prediction. Furthermore, the learned models suggest strong correlations between financial sentiment words and risk of companies. As a result, these findings are of great value for providing us more insight and understanding into the impact of financial sentiment words in financial reports.", "phrases": ["risk prediction", "report", "volatility"], "overall_score": 1.369075004574936, "scores": [0.8751538755885057, 0.5658313538934561, 0.5341724902233336], "rank_score": 0.6583859065684318} -{"id": "faralli-etal-2020-multiple", "title": "Multiple Knowledge GraphDB (MKGDB)", "abstract": "We present MKGDB, a large-scale graph database created as a combination of multiple taxonomy backbones extracted from 5 existing knowledge graphs, namely: ConceptNet, DBpedia, WebIsAGraph, WordNet and the Wikipedia category hierarchy. MKGDB, thanks the versatility of the Neo4j graph database manager technology, is intended to favour and help the development of open-domain natural language processing applications relying on knowledge bases, such as information extraction, hypernymy discovery, topic clustering, and others. Our resource consists of a large hypernymy graph which counts more than 37 million nodes and more than 81 million hypernymy relations.", "phrases": ["mkgdb", "dbpedia", "knowledge basis"], "overall_score": 0.7230777149287239, "scores": [0.7885732765015534, 0.6030524692545602, 0.5828953543273535], "rank_score": 0.6581737000278224} -{"id": "chan-etal-2019-rapid", "title": "Rapid Customization for Event Extraction", "abstract": "Extracting events in the form of who is involved in what at when and where from text, is one of the core information extraction tasks that has many applications such as web search and question answering. We present a system for rapidly customizing event extraction capability to find new event types (what happened) and their arguments (who, when, and where). To enable extracting events of new types, we develop a novel approach to allow a user to find, expand and filter event triggers by exploring an unannotated development corpus. The system will then generate mention level event annotation automatically and train a neural network model for finding the corresponding events. To enable extracting arguments for new event types, the system makes novel use of the ACE annotation dataset to train a generic argument attachment model for extracting Actor, Place, and Time. We demonstrate that with less than 10 minutes of human effort per event type, the system achieves good performance for 67 novel event types. Experiments also show that the generic argument attachment model performs well on the novel event types. Our system (code, UI, documentation, demonstration video) is released as open source.", "phrases": ["event extraction", "new type", "leverage annotation", "keyword"], "overall_score": 0.9121499961851267, "scores": [0.9742017914099202, 0.5717695701868543, 0.552547243731546, 0.5333899467580182], "rank_score": 0.6579771380215846} -{"id": "huang-etal-2009-bilingually", "title": "Bilingually-Constrained (Monolingual) Shift-Reduce Parsing", "abstract": "Jointly parsing two languages has been shown to improve accuracies on either or both sides. However, its search space is much bigger than the monolingual case, forcing existing approaches to employ complicated modeling and crude approximations. Here we propose a much simpler alternative, bilingually-constrained monolingual parsing, where a source-language parser learns to exploit reorderings as additional observation, but not bothering to build the target-side tree as well. We show specifically how to enhance a shift-reduce dependency parser with alignment features to resolve shift-reduce conflicts. Experiments on the bilingual portion of Chinese Treebank show that, with just 3 bilingual features, we can improve parsing accuracies by 0.6% (absolute) for both English and Chinese over a state-of-the-art baseline, with negligible (~6%) efficiency overhead, thus much faster than biparsing.", "phrases": ["shift-reduce parsing", "source-language parser", "shift-reduce conflict", "bilingual constraint"], "overall_score": 1.6348110458982525, "scores": [0.9307643079218169, 0.5925873912593187, 0.5847134117261817, 0.5235203388522962], "rank_score": 0.6578963624399035} -{"id": "ebner-etal-2020-multi", "title": "Multi-Sentence Argument Linking", "abstract": "We present a novel document-level model for finding argument spans that fill an event's roles, connecting related ideas in sentence-level semantic role labeling and coreference resolution. Because existing datasets for cross-sentence linking are small, development of our neural model is supported through the creation of a new resource, Roles Across Multiple Sentences (RAMS), which contains 9,124 annotated events across 139 types. We demonstrate strong performance of our model on RAMS and other event-related datasets.", "phrases": ["argument linking", "multiple sentences", "trigger", "eae"], "overall_score": 1.8637679364976854, "scores": [0.9787494964240624, 0.5913928980082126, 0.5396272821416906, 0.5215435500246235], "rank_score": 0.6578283066496473} -{"id": "qin-etal-2020-feature", "title": "Feature Projection for Improved Text Classification", "abstract": "In classification, there are usually some good features that are indicative of class labels. For example, in sentiment classification, words like good and nice are indicative of the positive sentiment and words like bad and terrible are indicative of the negative sentiment. However, there are also many common features (e.g., words) that are not indicative of any specific class (e.g., voice and screen, which are common to both sentiment classes and are not discriminative for classification). Although deep learning has made significant progresses in generating discriminative features through its powerful representation learning, we believe there is still room for improvement. In this paper, we propose a novel angle to further improve this representation learning, i.e., feature projection. This method projects existing features into the orthogonal space of the common features. The resulting projection is thus perpendicular to the common features and more discriminative for classification. We apply this new method to improve CNN, RNN, Transformer, and Bert based text classification and obtain markedly better results.", "phrases": ["projection", "sentiment classification", "bert"], "overall_score": 0.911889263740559, "scores": [0.8281959380614781, 0.6132477616614744, 0.5319234782346365], "rank_score": 0.6577890593191963} -{"id": "brooks-2006-unsupervised", "title": "Unsupervised Grammar Induction by Distribution and Attachment", "abstract": "Distributional approaches to grammar induction are typically inefficient, enumerating large numbers of candidate constituents. In this paper, we describe a simplified model of distributional analysis which uses heuristics to reduce the number of candidate constituents under consideration. We apply this model to a large corpus of over 400000 words of written English, and evaluate the results using EVALB. We show that the performance of this approach is limited, providing a detailed analysis of learned structure and a comparison with actual constituent-context distributions. This motivates a more structured approach, using a process of attachment to form constituents from their distributional components. Our findings suggest that distributional methods do not generalize enough to learn syntax effectively from raw text, but that attachment methods are more successful.", "phrases": ["attachment", "distributional approach", "constituent"], "overall_score": 0.722281880426185, "scores": [0.8210066685674453, 0.5851442423716762, 0.5661969897980039], "rank_score": 0.6574493002457085} -{"id": "kantor-etal-2019-learning", "title": "Learning to combine Grammatical Error Corrections", "abstract": "The field of Grammatical Error Correction (GEC) has produced various systems to deal with focused phenomena or general text editing. We propose an automatic way to combine black-box systems. Our method automatically detects the strength of a system or the combination of several systems per error type, improving precision and recall while optimizing F-score directly. We show consistent improvement over the best standalone system in all the configurations tested. This approach also outperforms average ensembling of different RNN models with random initializations. In addition, we analyze the use of BERT for GEC - reporting promising results on this end. We also present a spellchecker created for this task which outperforms standard spellcheckers tested on the task of spellchecking. This paper describes a system submission to Building Educational Applications 2019 Shared Task: Grammatical Error Correction. Combining the output of top BEA 2019 shared task systems using our approach, currently holds the highest reported score in the open phase of the BEA 2019 shared task, improving F-0.5 score by 3.7 points over the best result reported.", "phrases": ["recall", "bert", "spellchecker"], "overall_score": 1.367110238681717, "scores": [0.8422923881709649, 0.5806911275726145, 0.5493396459510612], "rank_score": 0.6574410538982135} -{"id": "virpioja-etal-2011-empirical", "title": "Empirical Comparison of Evaluation Methods for Unsupervised Learning of Morphology", "abstract": "Unsupervised and semi-supervised learning of morphology provide p ractical solu- tions for processing morphologically rich languages with less human labor th the traditional rule-based analyzers. Direct evaluation of the learning methods using lin guistic reference anal- yses is important for their development, as evaluation through the final app lications is often time consuming. However, even linguistic evaluation is not straightforward for fu ll morphological analysis, because the morpheme labels generated by the learning method can be arbitrary. We review the previous evaluation methods for the learning tasks and propose new variations. In order to compare the methods, we perform an extensive meta-evaluation u sing the large collec- tion of results from the Morpho Challenge competitions. RESUME. L'apprentissage non supervise et semi-supervise de la morphologie fo urnit des so- lutions pratiques pour le traitement des langues morphologiquement riche s et requiert une intervention humaine reduite comparee aux analyseurs traditionnels base s sur des regles. L'evaluation directe des methodes d'apprentissage utilisant des analyse s de reference linguis- tique est importante pour leur developpement, puisque l'evaluation par lesapplications fi- nales prend generalement beaucoup de temps. Cependant, meme l'ev aluation linguistique n'est pas simple pour l'analyse morphologique complete, car les identifiants de m orphemes gene- res par la methode d'apprentissage peuvent se reveler arbitraires. N ous passons en revue les methodes d'evaluation existantes pour les t\u00e2ches d'apprentissage et pr oposons de nouvelles va- riations. Afin de comparer les methodes, nous effectuons une vaste meta -evaluation a l'aide de l'importante base de resultats provenant des competitions Morpho Challen ge.", "phrases": ["evaluation method", "morphology", "segmentation algorithm"], "overall_score": 1.058092200533574, "scores": [0.8908431263360944, 0.5417120265194858, 0.5397337916741114], "rank_score": 0.6574296481765639} -{"id": "sumita-sugaya-2006-word", "title": "Word Pronunciation Disambiguation using the Web", "abstract": "This paper proposes an automatic method of reading proper names with multiple pronunciations. First, the method obtains Web pages that include both the proper name and its pronunciation. Second, the method feeds them to the learner for classification. The current accuracy is around 90% for open data.", "phrases": ["pronunciation disambiguation", "web", "proper name"], "overall_score": 1.0579543704001557, "scores": [0.8254443109770998, 0.6076279584659688, 0.5389597590558397], "rank_score": 0.6573440094996361} -{"id": "hough-purver-2014-strongly", "title": "Strongly Incremental Repair Detection", "abstract": "We present STIR (STrongly Incremental Repair detection), a system that detects speech repairs and edit terms on transcripts incrementally with minimal latency. STIR uses information-theoretic measures from n-gram models as its principal decision features in a pipeline of classifiers detecting the different stages of repairs. Results on the Switchboard disfluency tagged corpus show utterance-final accuracy on a par with state-of-the-art incremental repair detection methods, but with better incremental accuracy, faster time-to-detection and less computational overhead. We evaluate its performance using incremental metrics and propose new repair processing evaluation standards.", "phrases": ["repair", "pipeline", "good decision", "assumption", "word-by-word data"], "overall_score": 1.057648733217515, "scores": [0.9561381814969854, 0.620613486629894, 0.5742759733264365, 0.573717356030912, 0.561025534719578], "rank_score": 0.6571541064407611} -{"id": "kobbe-etal-2020-unsupervised", "title": "Unsupervised stance detection for arguments from consequences", "abstract": "Social media platforms have become an essential venue for online deliberation where users discuss arguments, debate, and form opinions. In this paper, we propose an unsupervised method to detect the stance of argumentative claims with respect to a topic. Most related work focuses on topic-specific supervised models that need to be trained for every emergent debate topic. To address this limitation, we propose a topic independent approach that focuses on a frequently encountered class of arguments, specifically, on arguments from consequences. We do this by extracting the effects that claims refer to, and proposing a means for inferring if the effect is a good or bad consequence. Our experiments provide promising results that are comparable to, and in particular regards even outperform BERT. Furthermore, we publish a novel dataset of arguments relating to consequences, annotated with Amazon Mechanical Turk.", "phrases": ["consequence", "opinion", "unsupervised method"], "overall_score": 0.7219392090768192, "scores": [0.8290723487330232, 0.5773650802849729, 0.5649747330070272], "rank_score": 0.6571373873416745} -{"id": "buck-2012-black", "title": "Black Box Features for the WMT 2012 Quality Estimation Shared Task", "abstract": "In this paper we introduce a number of new features for quality estimation in machine translation that were developed for the WMT 2012 quality estimation shared task. We find that very simple features such as indicators of certain characters are able to outperform complex features that aim to model the connection between two languages.", "phrases": ["wmt", "non-linear learning method", "different type"], "overall_score": 0.9104281540204406, "scores": [0.8662583878361657, 0.5634517871637, 0.5404950993366091], "rank_score": 0.6567350914454916} -{"id": "sakaguchi-etal-2014-efficient", "title": "Efficient Elicitation of Annotations for Human Evaluation of Machine Translation", "abstract": "A main output of the annual Workshop on Statistical Machine Translation (WMT) is a ranking of the systems that participated in its shared translation tasks, produced by aggregating pairwise sentencelevel comparisons collected from human judges. Over the past few years, there have been a number of tweaks to the aggregation formula in attempts to address issues arising from the inherent ambiguity and subjectivity of the task, as well as weaknesses in the proposed models and the manner of model selection. We continue this line of work by adapting the TrueSkill TM algorithm \u2014 an online approach for modeling the relative skills of players in ongoing competitions, such as Microsoft\u2019s Xbox Live \u2014 to the human evaluation of machine translation output. Our experimental results show that TrueSkill outperforms other recently proposed models on accuracy, and also can significantly reduce the number of pairwise annotations that need to be collected by sampling non-uniformly from the space of system competitions.", "phrases": ["human evaluation", "workshop", "statistical machine translation"], "overall_score": 1.277619093430575, "scores": [0.8827617052349649, 0.5526473907860797, 0.5342899068607037], "rank_score": 0.6565663342939161} -{"id": "hossain-schwitter-2018-specifying", "title": "Specifying Conceptual Models Using Restricted Natural Language", "abstract": "The key activity to design an information system is conceptual modelling which brings out and describes the general knowledge that is required to build a system. In this paper we propose a novel approach to conceptual modelling where the domain experts will be able to specify and construct a model using a restricted form of natural language. A restricted natural language is a subset of a natural language that has well-defined computational properties and therefore can be translated unambiguously into a formal notation. We will argue that a restricted natural language is suitable for writing precise and consistent specifications that lead to executable conceptual models. Using a restricted natural language will allow the domain experts to describe a scenario in the terminology of the application domain without the need to formally encode this scenario. The resulting textual specification can then be automatically translated into the language of the desired conceptual modelling framework.", "phrases": ["conceptual modelling", "restricted natural language", "domain expert", "specification"], "overall_score": 0.7212421560321817, "scores": [0.9207491686631265, 0.5981829013574372, 0.5627701757339564, 0.5443093635151031], "rank_score": 0.6565029023174058} -{"id": "beigman-klebanov-etal-2017-continuous", "title": "Continuous fluency tracking and the challenges of varying text complexity", "abstract": "This paper is a preliminary report on using text complexity measurement in the service of a new educational application. We describe a reading intervention where a child takes turns reading a book aloud with a virtual reading partner. Our ultimate goal is to provide meaningful feedback to the parent or the teacher by continuously tracking the child's improvement in reading fluency. We show that this would not be a simple endeavor, due to an intricate relationship between text complexity from the point of view of comprehension and reading rate.", "phrases": ["fluency", "text complexity", "reading rate"], "overall_score": 0.7211196285950833, "scores": [0.8336567412819945, 0.5908863786647373, 0.5446309991667307], "rank_score": 0.6563913730378208} -{"id": "nie-etal-2020-learn", "title": "What Can We Learn from Collective Human Opinions on Natural Language Inference Data?", "abstract": "Despite the subjective nature of many NLP tasks, most NLU evaluations have focused on using the majority label with presumably high agreement as the ground truth. Less attention has been paid to the distribution of human opinions. We collect ChaosNLI, a dataset with a total of 464,500 annotations to study Collective HumAn OpinionS in oft-used NLI evaluation sets. This dataset is created by collecting 100 annotations per example for 3,113 examples in SNLI and MNLI and 1,532 examples in \u03b1NLI. Analysis reveals that: (1) high human disagreement exists in a noticeable amount of examples in these datasets; (2) the state-of-the-art models lack the ability to recover the distribution over human labels; (3) models achieve near-perfect accuracy on the subset of data with a high level of human agreement, whereas they can barely beat a random guess on the data with low levels of human agreement, which compose most of the common errors made by state-of-the-art models on the evaluation sets. This questions the validity of improving model performance on old metrics for the low-agreement part of evaluation datasets. Hence, we argue for a detailed examination of human agreement in future data collection efforts, and evaluating model outputs against the distribution over collective human opinions.", "phrases": ["collective human opinions", "natural language inference", "nli", "annotation artifact"], "overall_score": 1.364874524135958, "scores": [0.9127371564116739, 0.627251534176076, 0.5532512064634771, 0.5322237128242805], "rank_score": 0.6563659024688768} -{"id": "fournier-2013-evaluating", "title": "Evaluating Text Segmentation using Boundary Edit Distance", "abstract": "This work proposes a new segmentation evaluation metric, named boundary similarity (B), an inter-coder agreement coefficient adaptation, and a confusion-matrix for segmentation that are all based upon an adaptation of the boundary edit distance in Fournier and Inkpen (2012). Existing segmentation metrics such as Pk, WindowDiff, and Segmentation Similarity (S) are all able to award partial credit for near misses between boundaries, but are biased towards segmentations containing few or tightly clustered boundaries. Despite S\u2019s improvements, its normalization also produces cosmetically high values that overestimate agreement & performance, leading this work to propose a solution.", "phrases": ["text segmentation", "edit distance", "agreement", "near miss"], "overall_score": 1.056287068522294, "scores": [0.9816528933949216, 0.5881513807839275, 0.5289607945380116, 0.526467157668995], "rank_score": 0.6563080565964639} -{"id": "majumder-etal-2018-iarm", "title": "IARM: Inter-Aspect Relation Modeling with Memory Networks in Aspect-Based Sentiment Analysis", "abstract": "Sentiment analysis has immense implications in e-commerce through user feedback mining. Aspect-based sentiment analysis takes this one step further by enabling businesses to extract aspect specific sentimental information. In this paper, we present a novel approach of incorporating the neighboring aspects related information into the sentiment classification of the target aspect using memory networks. We show that our method outperforms the state of the art by 1.6% on average in two distinct domains: restaurant and laptop.", "phrases": ["sentiment analysis", "aspect-aware sentence representation", "aspect term"], "overall_score": 1.511044404186583, "scores": [0.8536686760319605, 0.569173885782896, 0.5458721781322029], "rank_score": 0.6562382466490199} -{"id": "pasca-van-durme-2008-weakly", "title": "Weakly-Supervised Acquisition of Open-Domain Classes and Class Attributes from Web Documents and Query Logs", "abstract": "A new approach to large-scale information extraction exploits both Web documents and query logs to acquire thousands of opendomain classes of instances, along with relevant sets of open-domain class attributes at precision levels previously obtained only on small-scale, manually-assembled classes.", "phrases": ["class attribute", "web document", "concept acquisition method"], "overall_score": 1.8591855566471058, "scores": [0.8504277899873061, 0.5736952939322647, 0.5445096989389621], "rank_score": 0.6562109276195108} -{"id": "watanabe-etal-2006-left", "title": "Left-to-Right Target Generation for Hierarchical Phrase-Based Translation", "abstract": "We present a hierarchical phrase-based statistical machine translation in which a target sentence is efficiently generated in left-to-right order. The model is a class of synchronous-CFG with a Greibach Normal Form-like structure for the projected production rule: The paired target-side of a production rule takes a phrase prefixed form. The decoder for the target-normalized form is based on an Early-style top down parser on the source side. The target-normalized form coupled with our top down parser implies a left-to-right generation of translations which enables us a straightforward integration with ngram language models. Our model was experimented on a Japanese-to-English newswire translation task, and showed statistically significant performance improvements against a phrase-based translation system.", "phrases": ["target sentence", "left-to-right order", "manner"], "overall_score": 1.683134891049311, "scores": [0.8705433269724624, 0.5764043048092371, 0.5216700559161179], "rank_score": 0.6562058958992725} -{"id": "mou-etal-2015-discriminative", "title": "Discriminative Neural Sentence Modeling by Tree-Based Convolution", "abstract": "This paper proposes a tree-based convolutional neural network (TBCNN) for discriminative sentence modeling. Our models leverage either constituency trees or dependency trees of sentences. The tree-based convolution process extracts sentences' structural features, and these features are aggregated by max pooling. Such architecture allows short propagation paths between the output layer and underlying feature detectors, which enables effective structural feature learning and extraction. We evaluate our models on two tasks: sentiment analysis and question classification. In both experiments, TBCNN outperforms previous state-of-the-art results, including existing neural networks and dedicated feature/rule engineering. We also make efforts to visualize the tree-based convolution process, shedding light on how our models work.", "phrases": ["tree-based convolution", "dependency tree", "question classification"], "overall_score": 1.4407946786022612, "scores": [0.8861230146971298, 0.5431030955484267, 0.5379756407229209], "rank_score": 0.6557339169894925} -{"id": "wu-etal-2020-similarity", "title": "Similarity Analysis of Contextual Word Representation Models", "abstract": "This paper investigates contextual word representation models from the lens of similarity analysis. Given a collection of trained models, we measure the similarity of their internal representations and attention. Critically, these models come from vastly different architectures. We use existing and novel similarity measures that aim to gauge the level of localization of information in the deep models, and facilitate the investigation of which design factors affect model similarity, without requiring any external linguistic annotation. The analysis reveals that models within the same family are more similar to one another, as may be expected. Surprisingly, different architectures have rather similar representations, but different individual neurons. We also observed differences in information localization in lower and higher layers and found that higher layers are more affected by fine-tuning on downstream tasks.", "phrases": ["internal representation", "similarity analysis", "cka"], "overall_score": 1.5097810192921437, "scores": [0.9113031199995996, 0.5357398907822871, 0.5200256859006478], "rank_score": 0.6556895655608449} -{"id": "kim-etal-2015-new", "title": "New Transfer Learning Techniques for Disparate Label Sets", "abstract": "In natural language understanding (NLU), a user utterance can be labeled differently depending on the domain or application (e.g., weather vs. calendar). Standard domain adaptation techniques are not directly applicable to take advantage of the existing annotations because they assume that the label set is invariant. We propose a solution based on label embeddings induced from canonical correlation analysis (CCA) that reduces the problem to a standard domain adaptation task and allows use of a number of transfer learning techniques. We also introduce a new transfer learning technique based on pretraining of hidden-unit CRFs (HUCRFs). We perform extensive experiments on slot tagging on eight personal digital assistant domains and demonstrate that the proposed methods are superior to strong baselines.", "phrases": ["language understanding", "canonical correlation analysis", "label space"], "overall_score": 1.572241700441736, "scores": [0.8364870464234502, 0.6073815157348245, 0.523158586538147], "rank_score": 0.6556757162321406} -{"id": "huang-etal-2021-document", "title": "Document-level Entity-based Extraction as Template Generation", "abstract": "Document-level entity-based extraction (EE), aiming at extracting entity-centric information such as entity roles and entity relations, is key to automatic knowledge acquisition from text corpora for various domains. Most document-level EE systems build extractive models, which struggle to model long-term dependencies among entities at the document level. To address this issue, we propose a generative framework for two document-level EE tasks: role-filler entity extraction (REE) and relation extraction (RE). We first formulate them as a template generation problem, allowing models to efficiently capture cross-entity dependencies, exploit label semantics, and avoid the exponential computation complexity of identifying N-ary relations. A novel cross-attention guided copy mechanism, TopK Copy, is incorporated into a pre-trained sequence-to-sequence model to enhance the capabilities of identifying key information in the input document. Experiments done on the MUC-4 and SciREX dataset show new state-of-the-art results on REE (+3.26%), binary RE (+4.8%), and 4-ary RE (+2.7%) in F1 score.", "phrases": ["extraction", "template generation problem", "cross-attention"], "overall_score": 0.720301788022623, "scores": [0.8746598333120554, 0.5684928114454415, 0.5237881826454244], "rank_score": 0.6556469424676404} -{"id": "duma-klein-2013-generating", "title": "Generating Natural Language from Linked Data: Unsupervised template extraction", "abstract": "We propose an architecture for generating natural language from Linked Data that automatically learns sentence templates and statistical document planning from parallel RDF datasets and text. We have built a proof-of-concept system (LOD-DEF) trained on un-annotated text from the Simple English Wikipedia and RDF triples from DBpedia, focusing exclusively on factual, non-temporal information. The goal of the system is to generate short descriptions, equivalent to Wikipedia stubs, of entities found in Linked Datasets. We have evaluated the LOD-DEF system against a simple generate-from-triples baseline and human-generated output. In evaluation by humans, LOD-DEF significantly outperforms the baseline on two of three measures: non-redundancy and structure and coherence.", "phrases": ["linked data", "rdf", "natural-language description"], "overall_score": 1.628525868494408, "scores": [0.8332655108641283, 0.5895328234664549, 0.5433027286208307], "rank_score": 0.6553670209838046} -{"id": "stanovsky-etal-2015-open", "title": "Open IE as an Intermediate Structure for Semantic Tasks", "abstract": "Semantic applications typically extract information from intermediate structures derived from sentences, such as dependency parse or semantic role labeling. In this paper, we study Open Information Extraction\u2019s (Open IE) output as an additional intermediate structure and find that for tasks such as text comprehension, word similarity and word analogy it can be very effective. Specifically, for word analogy, Open IE-based embeddings surpass the state of the art. We suggest that semantic applications will likely benefit from adding Open IE format to their set of potential sentencelevel structures.", "phrases": ["intermediate structure", "open information extraction", "different semantic task"], "overall_score": 1.174221757125137, "scores": [0.8115304394487464, 0.5863230773168135, 0.5681834049720902], "rank_score": 0.6553456405792167} -{"id": "adams-stymne-2017-learning", "title": "Learning with learner corpora: Using the TLE for native language identification", "abstract": "This study investigates the usefulness of the Treebank of Learner English (TLE) when applied to the task of Native Language Identification (NLI). The TLE is effectively a parallel corpus of Standar ...", "phrases": ["native language identification", "learner corpus", "example retrieval system"], "overall_score": 1.1737741332198361, "scores": [0.8552087468563537, 0.5818665333900594, 0.5282121705164966], "rank_score": 0.65509581692097} -{"id": "katiyar-cardie-2018-nested", "title": "Nested Named Entity Recognition Revisited", "abstract": "We propose a novel recurrent neural network-based approach to simultaneously handle nested named entity recognition and nested entity mention detection. The model learns a hypergraph representation for nested entities using features extracted from a recurrent neural network. In evaluations on three standard data sets, we show that our approach significantly outperforms existing state-of-the-art methods, which are feature-based. The approach is also efficient: it operates linearly in the number of tokens and the number of possible output labels at any token. Finally, we present an extension of our model that jointly learns the head of each entity mention.", "phrases": ["recurrent neural network", "extension", "hypergraph-based representation", "bilou tagging scheme"], "overall_score": 1.4384252755698899, "scores": [0.9053878845950359, 0.5845678597158177, 0.5699079669978121, 0.5587585094817981], "rank_score": 0.654655555197616} -{"id": "morante-daelemans-2009-learning", "title": "Learning the Scope of Hedge Cues in Biomedical Texts", "abstract": "Identifying hedged information in biomedical literature is an important subtask in information extraction because it would be misleading to extract speculative information as factual information. In this paper we present a machine learning system that finds the scope of hedge cues in biomedical texts. The system is based on a similar system that finds the scope of negation cues. We show that the same scope finding approach can be applied to both negation and hedging. To investigate the robustness of the approach, the system is tested on the three subcorpora of the BioScope corpus that represent different text types.", "phrases": ["biomedical text", "negation cue", "shallow syntactic feature"], "overall_score": 2.107162751743534, "scores": [0.895582616218395, 0.5357739341704619, 0.5325241953026734], "rank_score": 0.6546269152305101} -{"id": "he-etal-2010-maximum", "title": "Maximum Entropy Based Phrase Reordering for Hierarchical Phrase-Based Translation", "abstract": "Hierarchical phrase-based (HPB) translation provides a powerful mechanism to capture both short and long distance phrase reorderings. However, the phrase reorderings lack of contextual information in conventional HPB systems. This paper proposes a context-dependent phrase reordering approach that uses the maximum entropy (MaxEnt) model to help the HPB decoder select appropriate reordering patterns. We classify translation rules into several reordering patterns, and build a MaxEnt model for each pattern based on various contextual features. We integrate the MaxEnt models into the HPB model. Experimental results show that our approach achieves significant improvements over a standard HPB system on large-scale translation tasks. On Chinese-to-English translation, the absolute improvements in BLEU (case-insensitive) range from 1.2 to 2.1.", "phrases": ["hierarchical phrase-based translation", "soft constraint modeling", "rule pattern", "different classifier"], "overall_score": 1.4380575289785316, "scores": [0.9854953549185967, 0.5656146578121731, 0.5381680850216084, 0.5286746482922604], "rank_score": 0.6544881865111596} -{"id": "wachsmuth-etal-2018-retrieval", "title": "Retrieval of the Best Counterargument without Prior Topic Knowledge", "abstract": "Given any argument on any controversial topic, how to counter it? This question implies the challenging retrieval task of finding the best counterargument. Since prior knowledge of a topic cannot be expected in general, we hypothesize the best counterargument to invoke the same aspects as the argument while having the opposite stance. To operationalize our hypothesis, we simultaneously model the similarity and dissimilarity of pairs of arguments, based on the words and embeddings of the arguments' premises and conclusions. A salient property of our model is its independence from the topic at hand, i.e., it applies to arbitrary arguments. We evaluate different model variations on millions of argument pairs derived from the web portal idebate.org. Systematic ranking experiments suggest that our hypothesis is true for many arguments: For 7.6 candidates with opposing stance on average, we rank the best counterargument highest with 60% accuracy. Even among all 2801 test set pairs as candidates, we still find the best one about every third time.", "phrases": ["counterargument", "stance", "retrieval"], "overall_score": 1.3607602560815855, "scores": [0.8330781689421879, 0.6001842311712458, 0.529899673174266], "rank_score": 0.6543873577625665} -{"id": "li-etal-2019-choosing", "title": "Choosing between Long and Short Word Forms in Mandarin", "abstract": "Between 80% and 90% of all Chinese words have long and short form such as \u8001\u864e/\u864e (lao-hu/hu , tiger) (Duanmu:2013). Consequently, the choice between long and short forms is a key problem for lexical choice across NLP and NLG. Following an earlier work on abbreviations in English (Mahowald et al, 2013), we bring a probabilistic perspective to these questions, using both a behavioral and a corpus-based approach. We hypothesized that there is a higher probability of choosing short form in supportive context than in neutral context in Mandarin. Consistent with our prediction, our findings revealed that predictability of contexts makes effect on speakers' long and short form choice.", "phrases": ["mandarin", "short form", "corpus-based approach"], "overall_score": 0.7186997854694144, "scores": [0.8538019782066524, 0.5663256027438636, 0.542438629757145], "rank_score": 0.6541887369025536} -{"id": "liu-etal-2021-cross", "title": "Cross Attention Augmented Transducer Networks for Simultaneous Translation", "abstract": "This paper proposes a novel architecture, Cross Attention Augmented Transducer (CAAT), for simultaneous translation. The framework aims to jointly optimize the policy and translation models. To effectively consider all possible READ-WRITE simultaneous translation action paths, we adapt the online automatic speech recognition (ASR) model, RNN-T, but remove the strong monotonic constraint, which is critical for the translation task to consider reordering. To make CAAT work, we introduce a novel latency loss whose expectation can be optimized by a forward-backward algorithm. We implement CAAT with Transformer while the general CAAT architecture can also be implemented with other attention-based encoder-decoder frameworks. Experiments on both speech-to-text (S2T) and text-to-text (T2T) simultaneous translation tasks show that CAAT achieves significantly better latency-quality trade-offs compared to the state-of-the-art simultaneous translation approaches.", "phrases": ["transducer", "simultaneous translation", "cross-attention"], "overall_score": 1.0527848261100876, "scores": [0.8795331401978068, 0.5522147422897604, 0.5306480906218236], "rank_score": 0.6541319910364636} -{"id": "milne-etal-2016-clpsych", "title": "CLPsych 2016 Shared Task: Triaging content in online peer-support forums", "abstract": "This paper introduces a new shared task for the text mining community. It aims to directly support the moderators of a youth mental health forum by asking participants to automatically triage posts into one of four severity labels: green , amber , red or crisis . The task attracted 60 submissions from 15 different teams, the best of whom achieve scores well above baselines. Their approaches and results provide valuable insights to enable moderators of peer support forums to react quickly to the most urgent, concerning content.", "phrases": ["participant", "peer support forum", "social medium"], "overall_score": 1.726190208338473, "scores": [0.8294397000859506, 0.6026510134258041, 0.5301897443252074], "rank_score": 0.654093485945654} -{"id": "he-toutanova-2009-joint", "title": "Joint Optimization for Machine Translation System Combination", "abstract": "System combination has emerged as a powerful method for machine translation (MT). This paper pursues a joint optimization strategy for combining outputs from multiple MT systems, where word alignment, ordering, and lexical selection decisions are made jointly according to a set of feature functions combined in a single log-linear model. The decoding algorithm is described in detail and a set of new features that support this joint decoding approach is proposed. The approach is evaluated in comparison to state-of-the-art confusion-network-based system combination methods using equivalent features and shown to outperform them significantly.", "phrases": ["system combination", "log-linear model", "flexible word order", "skeleton"], "overall_score": 1.0525155079981974, "scores": [0.9672842019155907, 0.5852026614868209, 0.5390174758195294, 0.5243542779182058], "rank_score": 0.6539646542850366} -{"id": "albrecht-hwa-2007-examination", "title": "A Re-examination of Machine Learning Approaches for Sentence-Level MT Evaluation", "abstract": "Recent studies suggest that machine learning can be applied to develop good automatic evaluation metrics for machine translated sentences. This paper further analyzes aspects of learning that impact performance. We argue that previously proposed approaches of training a HumanLikeness classifier is not as well correlated with human judgments of translation quality, but that regression-based learning produces more reliable metrics. We demonstrate the feasibility of regression-based metrics through empirical analysis of learning curves and generalization studies and show that they can achieve higher correlations with human judgments than standard automatic metrics.", "phrases": ["evaluation metric", "regression algorithm", "state-of-the-art correlation"], "overall_score": 1.7705816161308496, "scores": [0.8324773629174373, 0.5693907471233448, 0.559596580026855], "rank_score": 0.653821563355879} -{"id": "das-bandyopadhyay-2010-identifying", "title": "Identifying Emotional Expressions, Intensities and Sentence Level Emotion Tags Using a Supervised Framework", "abstract": "The present work deals with the extraction of emotional expressions and tagging of English blog sentences with Ekman\u2019s six basic emotion tags and any of the three intensities: low, medium and high. Baseline system is developed based on WordNet Affect lists and dependency relations. Support Vector Machine (SVM) based supervised framework is employed by incorporating different word and context level features. The feature analysis is carried out on 358 development sentences followed by Information Gain Based Pruning. Application of admissible tag sequences and a class-splitting technique improves the system\u2019s performance and reduces the label bias problem of SVM. The supervised system outperforms the baseline system and achieves average F-Scores of 82.72%, 76.74% and 89.21% for emotional expressions, sentential emotion tags and intensities respectively on 565 gold standard test sentences. A comparative evaluation shows that sentential emotion tagging based on emotional expressions, intensities and context features bridges the gap of identifying sentential emotion depending only on words.", "phrases": ["intensity", "supervised framework", "basic emotion tag"], "overall_score": 1.1712929117548507, "scores": [0.7964899135568876, 0.6409916356914884, 0.5236515133152273], "rank_score": 0.6537110208545345} -{"id": "chinkina-etal-2016-online", "title": "Online Information Retrieval for Language Learning", "abstract": "The reading material used in a language learning classroom should ideally be rich in terms of the grammatical constructions and vocabulary to be taught and in line with the learner\u2019s interests. We developed an online Information Retrieval system that helps teachers search for texts appropriate in form, content, and reading level. It identifies the 87 grammatical constructions spelled out in the official English language curriculum of schools in Baden-Wurttemberg, Germany. The tool incorporates a classical efficient algorithm for reranking the results by assigning weights to selected constructions and prioritizing the documents containing them. Supplemented by an interactive visualization module, it allows for a multifaceted presentation and analysis of the retrieved documents.", "phrases": ["reading material", "grammatical construction", "learner", "information retrieval system", "language curriculum"], "overall_score": 1.1711819790254023, "scores": [0.9823974077657165, 0.5958155224911662, 0.579837935813878, 0.555544879741758, 0.5546497947844665], "rank_score": 0.653649108119397} -{"id": "zhang-bowman-2018-language", "title": "Language Modeling Teaches You More than Translation Does: Lessons Learned Through Auxiliary Syntactic Task Analysis", "abstract": "Recently, researchers have found that deep LSTMs trained on tasks like machine translation learn substantial syntactic and semantic information about their input sentences, including part-of-speech. These findings begin to shed light on why pretrained representations, like ELMo and CoVe, are so beneficial for neural language understanding models. We still, though, do not yet have a clear understanding of how the choice of pretraining objective affects the type of linguistic information that models learn. With this in mind, we compare four objectives\u2014language modeling, translation, skip-thought, and autoencoding\u2014on their ability to induce syntactic and part-of-speech information, holding constant the quantity and genre of the training data, as well as the LSTM architecture.", "phrases": ["objective", "part-of-speech information", "language model", "pre-trained model"], "overall_score": 1.8122289330995882, "scores": [0.9727785677682439, 0.5530989201421028, 0.5521150910127244, 0.5365011158152018], "rank_score": 0.6536234236845683} -{"id": "silberer-lapata-2012-grounded", "title": "Grounded Models of Semantic Representation", "abstract": "A popular tradition of studying semantic representation has been driven by the assumption that word meaning can be learned from the linguistic environment, despite ample evidence suggesting that language is grounded in perception and action. In this paper we present a comparative study of models that represent word meaning based on linguistic and perceptual data. Linguistic information is approximated by naturally occurring corpora and sensorimotor experience by feature norms (i.e., attributes native speakers consider important in describing the meaning of a word). The models differ in terms of the mechanisms by which they integrate the two modalities. Experimental results show that a closer correspondence to human data can be obtained by uncovering latent information shared among the textual and perceptual modalities rather than arriving at semantic knowledge by concatenating the two.", "phrases": ["semantic representation", "sensorimotor experience", "modality", "compositionality"], "overall_score": 1.5667102131001176, "scores": [0.9489064798114243, 0.5848713581739882, 0.5512405395067955, 0.5284572498595224], "rank_score": 0.6533689068379326} -{"id": "church-etal-2007-compressing", "title": "Compressing Trigram Language Models With Golomb Coding", "abstract": "Trigram language models are compressed using a Golomb coding method inspired by the original Unix spell program. Compression methods trade off space, time and accuracy (loss). The proposed HashTBO method optimizes space at the expense of time and accuracy. Trigram language models are normally considered memory hogs, but with HashTBO, it is possible to squeeze a trigram language model into a few megabytes or less. HashTBO made it possible to ship a trigram contextual speller in Microsoft Office 2007.", "phrases": ["language model", "golomb coding", "microsoft office"], "overall_score": 1.271033467092872, "scores": [0.8426948270371082, 0.5952996246900261, 0.5215515237793781], "rank_score": 0.6531819918355041} -{"id": "roller-etal-2013-un", "title": "The (Un)expected Effects of Applying Standard Cleansing Models to Human Ratings on Compositionality", "abstract": "Human ratings are an important source for evaluating computational models that predict compositionality, but like many data sets of human semantic judgements, are often fraught with uncertainty and noise. However, despite their importance, to our knowledge there has been no extensive look at the effects of cleansing methods on human rating data. This paper assesses two standard cleansing approaches on two sets of compositionality ratings for German noun-noun compounds, in their ability to produce compositionality ratings of higher consistency, while reducing data quantity. We find (i) that our ratings are highly robust against aggressive filtering; (ii) Z-score filtering fails to detect unreliable item ratings; and (iii) Minimum Subject Agreement is highly effective at detecting unreliable subjects.", "phrases": ["compositionality", "crowdsourcing", "judgment"], "overall_score": 1.2710150298688854, "scores": [0.88586143009491, 0.5450157225049398, 0.5286403983301281], "rank_score": 0.6531725169766593} -{"id": "xu-etal-2020-self", "title": "Self-Attention Guided Copy Mechanism for Abstractive Summarization", "abstract": "Copy module has been widely equipped in the recent abstractive summarization models, which facilitates the decoder to extract words from the source into the summary. Generally, the encoder-decoder attention is served as the copy distribution, while how to guarantee that important words in the source are copied remains a challenge. In this work, we propose a Transformer-based model to enhance the copy mechanism. Specifically, we identify the importance of each source word based on the degree centrality with a directed graph built by the self-attention layer in the Transformer. We use the centrality of each source word to guide the copy process explicitly. Experimental results show that the self-attention graph provides useful guidance for the copy distribution. Our proposed models significantly outperform the baseline methods on the CNN/Daily Mail dataset and the Gigaword dataset.", "phrases": ["copy mechanism", "abstractive summarization", "centrality"], "overall_score": 1.1703119416407286, "scores": [0.9044615764480106, 0.5340078094645169, 0.5210212071160135], "rank_score": 0.6531635310095137} -{"id": "jurgens-etal-2019-just", "title": "A Just and Comprehensive Strategy for Using NLP to Address Online Abuse", "abstract": "Online abusive behavior affects millions and the NLP community has attempted to mitigate this problem by developing technologies to detect abuse. However, current methods have largely focused on a narrow definition of abuse to detriment of victims who seek both validation and solutions. In this position paper, we argue that the community needs to make three substantive changes: (1) expanding our scope of problems to tackle both more subtle and more serious forms of abuse, (2) developing proactive technologies that counter or inhibit abuse before it harms, and (3) reframing our effort within a framework of justice to promote healthy communities.", "phrases": ["online abuse", "abusive behavior", "victim"], "overall_score": 2.0478574983758127, "scores": [0.8856978235206717, 0.5377739419109432, 0.5358915986965809], "rank_score": 0.6531211213760653} -{"id": "gittens-etal-2017-skip", "title": "Skip-Gram \u2212 Zipf + Uniform = Vector Additivity", "abstract": "In recent years word-embedding models have gained great popularity due to their remarkable performance on several tasks, including word analogy questions and caption generation. An unexpected \u201cside-effect\u201d of such models is that their vectors often exhibit compositionality, i.e., addingtwo word-vectors results in a vector that is only a small angle away from the vector of a word representing the semantic composite of the original words, e.g., \u201cman\u201d + \u201croyal\u201d = \u201cking\u201d. This work provides a theoretical justification for the presence of additive compositionality in word vectors learned using the Skip-Gram model. In particular, it shows that additive compositionality holds in an even stricter sense (small distance rather than small angle) under certain assumptions on the process generating the corpus. As a corollary, it explains the success of vector calculus in solving word analogies. When these assumptions do not hold, this work describes the correct non-linear composition operator. Finally, this work establishes a connection between the Skip-Gram model and the Sufficient Dimensionality Reduction (SDR) framework of Globerson and Tishby: the parameters of SDR models can be obtained from those of Skip-Gram models simply by adding information on symbol frequencies. This shows that Skip-Gram embeddings are optimal in the sense of Globerson and Tishby and, further, implies that the heuristics commonly used to approximately fit Skip-Gram models can be used to fit SDR models.", "phrases": ["additive compositionality", "assumption", "skip-gram", "theoretical perspective"], "overall_score": 1.270640041508524, "scores": [0.878934372059711, 0.6526581065080124, 0.5561916214874973, 0.5241351442642259], "rank_score": 0.6529798110798617} -{"id": "duh-etal-2011-generalized", "title": "Generalized Minimum Bayes Risk System Combination", "abstract": "Minimum Bayes Risk (MBR) has been used as a decision rule for both singlesystem decoding and system combination in machine translation. For system combination, we argue that common MBR implementations are actually not correct, since probabilities in the hypothesis space cannot be reliably estimated. These implementations achieve the effect of consensus decoding (which may be beneficial in its own right), but does not reduce Bayes Risk in the true Bayesian sense. We introduce Generalized MBR, which parameterizes the loss function in MBR and allows it to be optimized in the given hypothesis space of multiple systems. This extension better approximates the true Bayes Risk decision rule and empirically improves over MBR, even in cases where the combined systems are of mixed quality.", "phrases": ["minimum bayes risk", "system combination", "mbr", "loss function", "optimal sentence"], "overall_score": 1.1698699265548693, "scores": [0.9476992687788368, 0.6643473830894301, 0.5660727975348321, 0.5505245541705452, 0.5359401848913546], "rank_score": 0.6529168376929998} -{"id": "kutuzov-giulianelli-2020-uio", "title": "UiO-UvA at SemEval-2020 Task 1: Contextualised Embeddings for Lexical Semantic Change Detection", "abstract": "We apply contextualised word embeddings to lexical semantic change detection in the SemEval-2020 Shared Task 1. This paper focuses on Subtask 2, ranking words by the degree of their semantic drift over time. We analyse the performance of two contextualising architectures (BERT and ELMo) and three change detection algorithms. We find that the most effective algorithms rely on the cosine similarity between averaged token embeddings and the pairwise distances between token embeddings. They outperform strong baselines by a large margin (in the post-evaluation phase, we have the best Subtask 2 submission for SemEval-2020 Task 1), but interestingly, the choice of a particular algorithm depends on the distribution of gold scores in the test set.", "phrases": ["semeval-2020 task", "semantic change detection", "word embedding"], "overall_score": 1.169310774720207, "scores": [0.8802820740802579, 0.5522185432505761, 0.5253136900058228], "rank_score": 0.652604769112219} -{"id": "chi-etal-2021-improving", "title": "Improving Pretrained Cross-Lingual Language Models via Self-Labeled Word Alignment", "abstract": "The cross-lingual language models are typically pretrained with masked language modeling on multilingual text or parallel sentences. In this paper, we introduce denoising word alignment as a new cross-lingual pre-training task. Specifically, the model first self-label word alignments for parallel sentences. Then we randomly mask tokens in a bitext pair. Given a masked token, the model uses a pointer network to predict the aligned token in the other language. We alternately perform the above two steps in an expectation-maximization manner. Experimental results show that our method improves cross-lingual transferability on various datasets, especially on the token-level tasks, such as question answering, and structured prediction. Moreover, the model can serve as a pretrained word aligner, which achieves reasonably low error rate on the alignment benchmarks. The code and pretrained parameters are available at github.com/CZWin32768/XLM-Align.", "phrases": ["cross-lingual language model", "pre-training task", "token-level alignment", "xlm-align"], "overall_score": 1.1689791487562848, "scores": [0.8838388070138998, 0.6418037192327081, 0.5627801325987017, 0.521256081705545], "rank_score": 0.6524196851377138} -{"id": "hacioglu-2004-semantic", "title": "Semantic Role Labeling Using Dependency Trees", "abstract": "In this paper, a novel semantic role labeler based on dependency trees is developed. This is accomplished by formulating the semantic role labeling as a classification problem of dependency relations into one of several semantic roles. A dependency tree is created from a constituency parse of an input sentence. The dependency tree is then linearized into a sequence of dependency relations. A number of features are extracted for each dependency relation using a predefined linguistic context. Finally, the features are input to a set of one-versus-all support vector machine (SVM) classifiers to determine the corresponding semantic role label. We report results on CoNLL2004 shared task data using the representation and scoring scheme adopted for that task.", "phrases": ["classification problem", "srl", "semantic role", "proposition"], "overall_score": 1.356609475429888, "scores": [0.8422436692689271, 0.6512819108040021, 0.5658782874965709, 0.5501611492647369], "rank_score": 0.6523912542085593} -{"id": "macavaney-etal-2018-rsdd", "title": "RSDD-Time: Temporal Annotation of Self-Reported Mental Health Diagnoses", "abstract": "Self-reported diagnosis statements have been widely employed in studying language related to mental health in social media. However, existing research has largely ignored the temporality of mental health diagnoses. In this work, we introduce RSDD-Time: a new dataset of 598 manually annotated self-reported depression diagnosis posts from Reddit that include temporal information about the diagnosis. Annotations include whether a mental health condition is present and how recently the diagnosis happened. Furthermore, we include exact temporal spans that relate to the date of diagnosis. This information is valuable for various computational methods to examine mental health through social media because one's mental health state is not static. We also test several baseline classification and extraction approaches, which suggest that extracting temporal information from self-reported diagnosis statements is challenging.", "phrases": ["diagnosis", "mental health condition", "rsdd-time"], "overall_score": 1.0495723877057945, "scores": [0.8475043025700317, 0.5731030050816885, 0.5358006648405459], "rank_score": 0.6521359908307555} -{"id": "yu-etal-2017-improved", "title": "Improved Neural Relation Detection for Knowledge Base Question Answering", "abstract": "Relation detection is a core component of many NLP applications including Knowledge Base Question Answering (KBQA). In this paper, we propose a hierarchical recurrent neural network enhanced by residual learning which detects KB relations given an input question. Our method uses deep residual bidirectional LSTMs to compare questions and relation names via different levels of abstraction. Additionally, we propose a simple KBQA system that integrates entity linking and our proposed relation detector to make the two components enhance each other. Our experimental results show that our approach not only achieves outstanding relation detection performance, but more importantly, it helps our KBQA system achieve state-of-the-art accuracy for both single-relation (SimpleQuestions) and multi-relation (WebQSP) QA benchmarks.", "phrases": ["relation detection", "bidirectional lstm", "entity linking"], "overall_score": 2.015465733434635, "scores": [0.8740290858343844, 0.5576299959526765, 0.5244438483861221], "rank_score": 0.6520343100577276} -{"id": "berg-kirkpatrick-etal-2010-painless", "title": "Painless Unsupervised Learning with Features", "abstract": "We show how features can easily be added to standard generative models for unsupervised learning, without requiring complex new training methods. In particular, each component multinomial of a generative model can be turned into a miniature logistic regression model if feature locality permits. The intuitive EM algorithm still applies, but with a gradient-based M-step familiar from discriminative training of logistic regression models. We apply this technique to part-of-speech induction, grammar induction, word alignment, and word segmentation, incorporating a few linguistically-motivated features into the standard generative model for each task. These feature-enhanced models each outperform their basic counterparts by a substantial margin, and even compete with and surpass more complex state-of-the-art models.", "phrases": ["unsupervised learning", "generative model", "m-step", "induction", "linguistically-motivated feature"], "overall_score": 2.015379397235582, "scores": [0.8989884865581586, 0.6598340313243988, 0.5758343672608486, 0.5696864784007181, 0.5556885312717144], "rank_score": 0.6520063789631678} -{"id": "chami-etal-2020-low", "title": "Low-Dimensional Hyperbolic Knowledge Graph Embeddings", "abstract": "Knowledge graph (KG) embeddings learn low- dimensional representations of entities and relations to predict missing facts. KGs often exhibit hierarchical and logical patterns which must be preserved in the embedding space. For hierarchical data, hyperbolic embedding methods have shown promise for high-fidelity and parsimonious representations. However, existing hyperbolic embedding methods do not account for the rich logical patterns in KGs. In this work, we introduce a class of hyperbolic KG embedding models that simultaneously capture hierarchical and logical patterns. Our approach combines hyperbolic reflections and rotations with attention to model complex relational patterns. Experimental results on standard KG benchmarks show that our method improves over previous Euclidean- and hyperbolic-based efforts by up to 6.1% in mean reciprocal rank (MRR) in low dimensions. Furthermore, we observe that different geometric transformations capture different types of relations while attention- based transformations generalize to multiple relations. In high dimensions, our approach yields new state-of-the-art MRRs of 49.6% on WN18RR and 57.7% on YAGO3-10.", "phrases": ["knowledge graph", "logical pattern", "curvature"], "overall_score": 1.355589648593336, "scores": [0.8610073222049329, 0.5709422414875807, 0.5237528998135053], "rank_score": 0.6519008211686729} -{"id": "nisioi-etal-2017-exploring", "title": "Exploring Neural Text Simplification Models", "abstract": "We present the first attempt at using sequence to sequence neural networks to model text simplification (TS). Unlike the previously proposed automated TS systems, our neural text simplification (NTS) systems are able to simultaneously perform lexical simplification and content reduction. An extensive human evaluation of the output has shown that NTS systems achieve almost perfect grammaticality and meaning preservation of output sentences and higher level of simplification than the state-of-the-art automated TS systems", "phrases": ["lexical simplification", "content reduction", "neural machine translation", "seq2seq model"], "overall_score": 1.4322937315399262, "scores": [0.8517430693775331, 0.6118657818866577, 0.6114813064371976, 0.5323697192973508], "rank_score": 0.6518649692496847} -{"id": "wiegand-etal-2019-detection", "title": "Detection of Abusive Language: the Problem of Biased Datasets", "abstract": "We discuss the impact of data bias on abusive language detection. We show that classification scores on popular datasets reported in previous work are much lower under realistic settings in which this bias is reduced. Such biases are most notably observed on datasets that are created by focused sampling instead of random sampling. Datasets with a higher proportion of implicit abuse are more affected than datasets with a lower proportion.", "phrases": ["abusive language", "explicit abuse", "topic bias"], "overall_score": 1.805851639825217, "scores": [0.8623762641126751, 0.5713328315261791, 0.5202608083789283], "rank_score": 0.6513233013392608} -{"id": "agirre-martinez-2004-unsupervised", "title": "Unsupervised WSD based on Automatically Retrieved Examples: The Importance of Bias", "abstract": "This paper explores the large-scale acquisition of sense-tagged examples for Word Sense Disambiguation (WSD). We have applied the \u201cWordNet monosemous relatives\u201d method to construct automatically a web corpus that we have used to train disambiguation systems. The corpus-building process has highlighted important factors, such as the distribution of senses (bias). The corpus has been used to train WSD algorithms that include supervised methods (combining automatic and manuallytagged examples), minimally supervised (requiring sense bias information from hand-tagged corpora), and fully unsupervised. These methods were tested on the Senseval-2 lexical sample test set, and compared successfully to other systems with minimum or no supervision.", "phrases": ["monosemous relative", "web", "majority"], "overall_score": 1.4995974078705645, "scores": [0.8677541698308049, 0.5606244507239451, 0.5254220173890686], "rank_score": 0.6512668793146061} -{"id": "nagy-t-vincze-2014-vpctagger", "title": "VPCTagger: Detecting Verb-Particle Constructions With Syntax-Based Methods", "abstract": "Verb-particle combinations (VPCs) con- sist of a verbal and a preposition/particle component, which often have some addi- tional meaning compared to the meaning of their parts. If a data-driven morpholog- ical parser or a syntactic parser is trained on a dataset annotated with extra informa- tion for VPCs, they will be able to iden- tify VPCs in raw texts. In this paper, we examine how syntactic parsers perform on this task and we introduce VPCTag- ger, a machine learning-based tool that is able to identify English VPCs in context. Our method consists of two steps: it first selects VPC candidates on the basis of syntactic information and then selects gen- uine VPCs among them by exploiting new features like semantic and contextual ones. Based on our results, we see that VPC- Tagger outperforms state-of-the-art meth- ods in the VPC detection task.", "phrases": ["vpc candidate", "syntactic information", "vpctagger"], "overall_score": 0.902648945615373, "scores": [0.8386769222514372, 0.5916542553790792, 0.5230395586239094], "rank_score": 0.6511235787514752} -{"id": "rosti-etal-2010-bbn", "title": "BBN System Description for WMT10 System Combination Task", "abstract": "BBN submitted system combination outputs for Czech-English, German-English, Spanish-English, French-English, and All-English language pairs. All combinations were based on confusion network decoding. An incremental hypothesis alignment algorithm with flexible matching was used to build the networks. The bi-gram decoding weights for the single source language translations were tuned directly to maximize the BLEU score of the decoding output. Approximate expected BLEU was used as the objective function in gradient based optimization of the combination weights for a 44 system multi-source language combination (All-English). The system combination gained around 0.4--2.0 BLEU points over the best individual systems on the single source conditions. On the multi-source condition, the system combination gained 6.6 BLEU points.", "phrases": ["hypothesis alignment algorithm", "bleu score", "objective function", "bbn submission"], "overall_score": 1.8046602719345646, "scores": [0.8774712334210953, 0.6091216121221407, 0.5592845294068494, 0.5576970498592393], "rank_score": 0.6508936062023312} -{"id": "kawano-etal-2019-neural", "title": "Neural Conversation Model Controllable by Given Dialogue Act Based on Adversarial Learning and Label-aware Objective", "abstract": "Building a controllable neural conversation model (NCM) is an important task. In this paper, we focus on controlling the responses of NCMs by using dialogue act labels of responses as conditions. We introduce an adversarial learning framework for the task of generating conditional responses with a new objective to a discriminator, which explicitly distinguishes sentences by using labels. This change strongly encourages the generation of label-conditioned sentences. We compared the proposed method with some existing methods for generating conditional responses. The experimental results show that our proposed method has higher controllability for dialogue acts even though it has higher or comparable naturalness to existing methods.", "phrases": ["dialogue act", "adversarial learning framework", "neural conversation model"], "overall_score": 0.7149725593846118, "scores": [0.8022715467905981, 0.6017105235126823, 0.5484061382376985], "rank_score": 0.6507960695136595} -{"id": "opitz-etal-2021-weisfeiler", "title": "Weisfeiler-Leman in the Bamboo: Novel AMR Graph Metrics and a Benchmark for AMR Graph Similarity", "abstract": "Several metrics have been proposed for assessing the similarity of (abstract) meaning representations (AMRs), but little is known about how they relate to human similarity ratings. Moreover, the current metrics have complementary strengths and weaknesses: Some emphasize speed, while others make the alignment of graph structures explicit, at the price of a costly alignment step. In this work we propose new Weisfeiler-Leman AMR similarity metrics that unify the strengths of previous metrics, while mitigating their weaknesses. Specifically, our new metrics are able to match contextualized substructures and induce n:m alignments between their nodes. Furthermore, we introduce a Benchmark for AMR Metrics based on Overt Objectives (Bamboo), the first benchmark to support empirical assessment of graph-based MR similarity metrics. Bamboo maximizes the interpretability of results by defining multiple overt objectives that range from sentence similarity objectives to stress tests that probe a metric's robustness against meaning-altering and meaning- preserving graph transformations. We show the benefits of Bamboo by profiling previous metrics and our own metrics. Results indicate that our novel metrics may serve as a strong baseline for future work.", "phrases": ["bamboo", "strength", "amr similarity metric"], "overall_score": 0.7148558568611745, "scores": [0.8174859046675851, 0.5954779806448208, 0.5391056415845356], "rank_score": 0.6506898422989805} -{"id": "rello-ilisei-2009-rule", "title": "A Rule-Based Approach to the Identification of Spanish Zero Pronouns", "abstract": "This paper presents a new rule-based method to identify Spanish zero pronouns. The paper describes the comparative evaluation of a baseline method for the identification of zero pronouns with an approach that supplements the baseline by adding a set of restrictions treating impersonal sentences and other zero subject expressions. The identification rules have been tested on a new corpus in which zero pronouns have been manually annotated (the Z-Corpus). The comparative evaluation shows that this rulebased method outperforms the baseline.", "phrases": ["pronoun", "rule-based method", "z-corpus"], "overall_score": 1.1657119345731453, "scores": [0.8882350330988487, 0.5346819842317951, 0.52887163721801], "rank_score": 0.6505962181828846} -{"id": "merkx-frank-2021-human", "title": "Human Sentence Processing: Recurrence or Attention?", "abstract": "Recurrent neural networks (RNNs) have long been an architecture of interest for computational models of human sentence processing. The recently introduced Transformer architecture outperforms RNNs on many natural language processing tasks but little is known about its ability to model human language processing. We compare Transformer- and RNN-based language models' ability to account for measures of human reading effort. Our analysis shows Transformers to outperform RNNs in explaining self-paced reading times and neural activity during reading English sentences, challenging the widely held idea that human sentence processing involves recurrent and immediate processing and provides evidence for cue-based retrieval.", "phrases": ["recurrent neural network", "computational model", "reading time", "human sentence processing"], "overall_score": 1.046356973713271, "scores": [0.9591480034699983, 0.5725790095814195, 0.5405156741027634, 0.5283098799983335], "rank_score": 0.6501381417881287} -{"id": "hira-etal-2019-exploring", "title": "Exploring Transfer Learning and Domain Data Selection for the Biomedical Translation", "abstract": "Transfer Learning and Selective data training are two of the many approaches being extensively investigated to improve the quality of Neural Machine Translation systems. This paper presents a series of experiments by applying transfer learning and selective data training for participation in the Bio-medical shared task of WMT19. We have used Information Retrieval to selectively choose related sentences from out-of-domain data and used them as additional training data using transfer learning. We also report the effect of tokenization on translation model performance.", "phrases": ["transfer learning", "data training", "out-of-domain data"], "overall_score": 0.9012481172669852, "scores": [0.83987874887803, 0.5652794709089418, 0.5451810643004201], "rank_score": 0.6501130946957973} -{"id": "bergsma-etal-2013-broadly", "title": "Broadly Improving User Classification via Communication-Based Name and Location Clustering on Twitter", "abstract": "Hidden properties of social media users, such as their ethnicity, gender, and location, are often reflected in their observed attributes, such as their first and last names. Furthermore, users who communicate with each other often have similar hidden properties. We propose an algorithm that exploits these insights to cluster the observed attributes of hundreds of millions of Twitter users. Attributes such as user names are grouped together if users with those names communicate with other similar users. We separately cluster millions of unique first names, last names, and userprovided locations. The efficacy of these clusters is then evaluated on a diverse set of classification tasks that predict hidden users properties such as ethnicity, geographic location, gender, language, and race, using only profile names and locations when appropriate. Our readily-replicable approach and publiclyreleased clusters are shown to be remarkably effective and versatile, substantially outperforming state-of-the-art approaches and human accuracy on each of the tasks studied.", "phrases": ["twitter", "ethnicity", "user name"], "overall_score": 1.0463011135550742, "scores": [0.8722320109058574, 0.5514312898990326, 0.5266470009562827], "rank_score": 0.6501034339203909} -{"id": "bawden-etal-2019-findings", "title": "Findings of the WMT 2019 Biomedical Translation Shared Task: Evaluation for MEDLINE Abstracts and Biomedical Terminologies", "abstract": "In the fourth edition of the WMT Biomedical Translation task, we considered a total of six languages, namely Chinese (zh), English (en), French (fr), German (de), Portuguese (pt), and Spanish (es). We performed an evaluation of automatic translations for a total of 10 language directions, namely, zh/en, en/zh, fr/en, en/fr, de/en, en/de, pt/en, en/pt, es/en, and en/es. We provided training data based on MEDLINE abstracts for eight of the 10 language pairs and test sets for all of them. In addition to that, we offered a new sub-task for the translation of terms in biomedical terminologies for the en/es language direction. Higher BLEU scores (close to 0.5) were obtained for the es/en, en/es and en/pt test sets, as well as for the terminology sub-task. After manual validation of the primary runs, some submissions were judged to be better than the reference translations, for instance, for de/en, en/es and es/en.", "phrases": ["wmt", "abstract", "biomedical terminology"], "overall_score": 1.164689432038571, "scores": [0.8060434219930394, 0.6002678936239813, 0.543765330340969], "rank_score": 0.6500255486526632} -{"id": "xu-etal-2020-novel", "title": "A Novel Joint Framework for Multiple Chinese Events Extraction", "abstract": "Event extraction is an essential yet challenging task in information extraction. Previous approaches have paid little attention to the problem of roles overlap which is a common phenomenon in practice. To solve this problem, this paper defines event relation triple to explicitly represent relations among triggers, arguments and roles which are incorporated into the model to learn their inter-dependencies. The task of argument extraction is converted to event relation triple extraction. A novel joint framework for multiple Chinese event extraction is proposed which jointly performs predictions for event triggers and arguments based on shared feature representations from pre-trained language model. Experimental comparison with state-of-the-art baselines on ACE 2005 dataset shows the superiority of the proposed method in both trigger classification and argument classification.", "phrases": ["novel joint framework", "trigger", "argument extraction"], "overall_score": 0.7140339237195056, "scores": [0.8382242773960443, 0.5692788666680323, 0.54232191547123], "rank_score": 0.6499416865117689} -{"id": "wallace-etal-2020-imitation", "title": "Imitation Attacks and Defenses for Black-box Machine Translation Systems", "abstract": "Adversaries may look to steal or attack black-box NLP systems, either for financial gain or to exploit model errors. One setting of particular interest is machine translation (MT), where models have high commercial value and errors can be costly. We investigate possible exploitations of black-box MT systems and explore a preliminary defense against such threats. We first show that MT systems can be stolen by querying them with monolingual sentences and training models to imitate their outputs. Using simulated experiments, we demonstrate that MT model stealing is possible even when imitation models have different input data or architectures than their target models. Applying these ideas, we train imitation models that reach within 0.6 BLEU of three production MT systems on both high-resource and low-resource language pairs. We then leverage the similarity of our imitation models to transfer adversarial examples to the production systems. We use gradient-based attacks that expose inputs which lead to semantically-incorrect translations, dropped content, and vulgar model outputs. To mitigate these vulnerabilities, we propose a defense that modifies translation outputs in order to misdirect the optimization of imitation models. This defense degrades the adversary's BLEU score and attack success rate at some cost in the defender's BLEU and inference speed.", "phrases": ["attack", "machine translation", "imitation model"], "overall_score": 1.558169537859174, "scores": [0.8318996117444823, 0.588899880861068, 0.528622013247922], "rank_score": 0.6498071686178241} -{"id": "rushdi-saleh-etal-2011-bilingual", "title": "Bilingual Experiments with an Arabic-English Corpus for Opinion Mining", "abstract": "Recently, Opinion Mining (OM) is receiving more attention due to the abundance of forums, blogs, ecommerce web sites, news reports and additional web sources where people tend to express their opinions. There are a number of works about Sentiment Analysis (SA) studying the task of identifying the polarity, whether the opinion expressed in a text is positive or negative about a given topic. However, most of research is focused on English texts and there are very few resources for other languages. In this work we present an Opinion Corpus for Arabic (OCA) composed of Arabic reviews extracted from specialized web pages related to movies and films using this language. Moreover, we have translated the OCA corpus into English, generating the EVOCA corpus (English Version of OCA). In the experiments carried out in this work we have used different machine learning algorithms to classify the polarity in these corpora showing that, although the experiments with EVOCA are worse than OCA, the results are comparable with other English experiments, since the loss of precision due to the translation is very slight.", "phrases": ["opinion mining", "arabic", "other english experiment"], "overall_score": 0.8995550555196812, "scores": [0.8669897835896359, 0.55240136120285, 0.5272842816147745], "rank_score": 0.6488918088024201} -{"id": "mi-liu-2010-constituency", "title": "Constituency to Dependency Translation with Forests", "abstract": "Tree-to-string systems (and their forest-based extensions) have gained steady popularity thanks to their simplicity and efficiency, but there is a major limitation: they are unable to guarantee the grammaticality of the output, which is explicitly modeled in string-to-tree systems via target-side syntax. We thus propose to combine the advantages of both, and present a novel constituency-to-dependency translation model, which uses constituency forests on the source side to direct the translation, and dependency trees on the target side (as a language model) to ensure grammaticality. Medium-scale experiments show an absolute and statistically significant improvement of +0.7 BLEU points over a state-of-the-art forest-based tree-to-string system even with fewer rules. This is also the first time that a tree-to-tree model can surpass tree-to-string counterparts.", "phrases": ["constituency-to-dependency translation model", "source side", "dependency tree", "constituency"], "overall_score": 1.0440712066269076, "scores": [0.8403458332530431, 0.6203358099163949, 0.5732787582046204, 0.56091125800636], "rank_score": 0.6487179148451045} -{"id": "belz-2005-statistical", "title": "Statistical Generation: Three Methods Compared and Evaluated", "abstract": "Statistical NL G has largely meant n-gram modelling which has the considerable advantages of lending robustness to NL G systems, and of making automatic adaptation to new domains from raw corpora possible. On the downside, n-gram models are expensive to use as selection mechanisms and have a built-in bias towards shorter realisations. This paper looks at treebank-training of generators, an alternative method for building statistical models for NL G from raw corpora, and two different ways of using treebank-trained models during generation. Results show that the treebank-trained generators achieve improvements similar to a 2-gram generator over a baseline of random selection. However, the treebank-trained generators achieve this at a much lower cost than the 2-gram generator, and without its strong preference for shorter reasations.", "phrases": ["n-gram model", "realisation", "cost", "generation model"], "overall_score": 1.1622178808344588, "scores": [0.9071313533675539, 0.6156977701106942, 0.5417666446738149, 0.5299888304942676], "rank_score": 0.6486461496615826} -{"id": "pekar-2006-acquisition", "title": "Acquisition of Verb Entailment from Text", "abstract": "The study addresses the problem of automatic acquisition of entailment relations between verbs. While this task has much in common with paraphrases acquisition which aims to discover semantic equivalence between verbs, the main challenge of entailment acquisition is to capture asymmetric, or directional, relations. Motivated by the intuition that it often under-lies the local structure of coherent text, we develop a method that discovers verb entailment using evidence about discourse relations between clauses available in a parsed corpus. In comparison with earlier work, the proposed method covers a much wider range of verb entailment types and learns the mapping between verbs with highly varied argument structures.", "phrases": ["entailment relation", "coherent text", "acquisition"], "overall_score": 1.1621072635546728, "scores": [0.7949474877893804, 0.6045008005844847, 0.5463049505728965], "rank_score": 0.6485844129822539} -{"id": "ma-etal-2011-improving", "title": "Improving Low-Resource Statistical Machine Translation with a Novel Semantic Word Clustering Algorithm", "abstract": "In this paper we present a non-languagespecific strategy that uses large amounts of monolingual data to improve statistical machine translation (SMT) when only a small parallel training corpus is available. This strategy uses word classes derived from monolingual text data to improve the word alignment quality, which generally deteriorates significantly because of insufficient training. We present a novel semantic word clustering algorithm to generate the word classes motivated by the word similarity metric presented in (Lin, 1998). Our clustering results showed this novel word clustering outperforms a state-of-the-art hierarchical clustering. We then designed a new procedure for using the derived word classes to improve word alignment quality. Our experiments showed that the use of the word classes can recover over 90% of the loss resulting from the alignment quality that is lost due to the limited parallel training.", "phrases": ["novel semantic word", "monolingual data", "few study"], "overall_score": 1.1619395449314633, "scores": [0.8560302501700294, 0.5593631101402335, 0.5300790619988476], "rank_score": 0.6484908074363701} -{"id": "goyal-etal-2010-toward", "title": "Toward Plot Units: Automatic Affect State Analysis", "abstract": "We present a system called AESOP that automatically produces affect states associated with characters in a story. This research represents a first step toward the automatic generation of plot unit structures from text. AESOP incorporates several existing sentiment analysis tools and lexicons to evaluate the effectiveness of current sentiment technology on this task. AESOP also includes two novel components: a method for acquiring patient polarity verbs, which impart negative affect on their patients, and affect projection rules to propagate affect tags from surrounding words onto the characters in the story. We evaluate AESOP on a small collection of fables.", "phrases": ["character", "story", "projection rule"], "overall_score": 0.8988116861816008, "scores": [0.8760215186734982, 0.5358647853969141, 0.5331804394504518], "rank_score": 0.6483555811736214} -{"id": "weller-etal-2013-using", "title": "Using subcategorization knowledge to improve case prediction for translation to German", "abstract": "This paper demonstrates the need and impact of subcategorization information for SMT. We combine (i) features on sourceside syntactic subcategorization and (ii) an external knowledge base with quantitative, dependency-based information about target-side subcategorization frames. A manual evaluation of an English-toGerman translation task shows that the subcategorization information has a positive impact on translation quality through better prediction of case.", "phrases": ["case prediction", "subcategorization frame", "source-side syntactic feature"], "overall_score": 1.5544779228689218, "scores": [0.7936369619464186, 0.5850624803993485, 0.5661034944248975], "rank_score": 0.6482676455902215} -{"id": "pratapa-etal-2018-word", "title": "Word Embeddings for Code-Mixed Language Processing", "abstract": "We compare three existing bilingual word embedding approaches, and a novel approach of training skip-grams on synthetic code-mixed text generated through linguistic models of code-mixing, on two tasks - sentiment analysis and POS tagging for code-mixed text. Our results show that while CVM and CCA based embeddings perform as well as the proposed embedding technique on semantic and syntactic tasks respectively, the proposed approach provides the best performance for both tasks overall. Thus, this study demonstrates that existing bilingual embedding techniques are not ideal for code-mixed text processing and there is a need for learning multilingual word embedding from the code-mixed text.", "phrases": ["bilingual word", "code-mixed text", "sentiment analysis", "pos", "syntactic task"], "overall_score": 1.6622682604859793, "scores": [0.8901711510780802, 0.6155468471774385, 0.6004826892248564, 0.583124816679202, 0.5510274791271623], "rank_score": 0.6480705966573479} -{"id": "bastianelli-etal-2013-unitor", "title": "UNITOR-HMM-TK: Structured Kernel-based learning for Spatial Role Labeling", "abstract": "In this paper the UNITOR-HMM-TK system participating in the Spatial Role Labeling task at SemEval 2013 is presented. The spatial roles classification is addressed as a sequence-based word classification problem: the SVM learning algorithm is applied, based on a simple feature modeling and a robust lexical generalization achieved through a Distributional Model of Lexical Semantics. In the identification of spatial relations, roles are combined to generate candidate relations, later verified by a SVM classifier. The Smoothed Partial Tree Kernel is applied, i.e. a convolution kernel that enhances both syntactic and lexical properties of the examples, avoiding the need of a manual feature engineering phase. Finally, results on three of the five tasks of the challenge are reported.", "phrases": ["spatial relation", "pipeline", "element"], "overall_score": 1.0430201022130763, "scores": [0.8708961461178643, 0.5444502469540305, 0.5288480877868696], "rank_score": 0.6480648269529214} -{"id": "somasundaran-etal-2008-discourse-level", "title": "Discourse Level Opinion Relations: An Annotation Study", "abstract": "This work proposes opinion frames as a representation of discourse-level associations that arise from related opinion targets and which are common in task-oriented meeting dialogs. We define the opinion frames and explain their interpretation. Additionally we present an annotation scheme that realizes the opinion frames and via human annotation studies, we show that these can be reliably identified.", "phrases": ["annotation study", "opinion frame", "meeting dialog"], "overall_score": 0.8984054997838127, "scores": [0.8389465012658073, 0.5818054701282604, 0.5234357674741482], "rank_score": 0.6480625796227386} -{"id": "bernardy-etal-2018-influence", "title": "The Influence of Context on Sentence Acceptability Judgements", "abstract": "We investigate the influence that document context exerts on human acceptability judgements for English sentences, via two sets of experiments. The first compares ratings for sentences presented on their own with ratings for the same set of sentences given in their document contexts. The second assesses the accuracy with which two types of neural models \u2014 one that incorporates context during training and one that does not \u2014 predict these judgements. Our results indicate that: (1) context improves acceptability ratings for ill-formed sentences, but also reduces them for well-formed sentences; and (2) context helps unsupervised systems to model acceptability.", "phrases": ["influence", "acceptability", "document context"], "overall_score": 1.0428895268435103, "scores": [0.8267600424837874, 0.5885250730728129, 0.5286659721860507], "rank_score": 0.647983695914217} -{"id": "wong-etal-2012-exploring", "title": "Exploring Adaptor Grammars for Native Language Identification", "abstract": "The task of inferring the native language of an author based on texts written in a second language has generally been tackled as a classification problem, typically using as features a mix of n-grams over characters and part of speech tags (for small and fixed n) and unigram function words. To capture arbitrarily long n-grams that syntax-based approaches have suggested are useful, adaptor grammars have some promise. In this work we investigate their extension to identifying n-gram collocations of arbitrary length over a mix of PoS tags and words, using both maxent and induced syntactic language model approaches to classification. After presenting a new, simple baseline, we show that learned collocations used as features in a maxent model perform better still, but that the story is more mixed for the syntactic language model.", "phrases": ["adaptor grammar", "native language identification", "n-gram", "collocation"], "overall_score": 1.3471329217525243, "scores": [0.941720556815501, 0.5977296367801723, 0.529559115348143, 0.5223266718970196], "rank_score": 0.647833995210209} -{"id": "naseem-etal-2019-rewarding", "title": "Rewarding Smatch: Transition-Based AMR Parsing with Reinforcement Learning", "abstract": "Our work involves enriching the Stack-LSTM transition-based AMR parser (Ballesteros and Al-Onaizan, 2017) by augmenting training with Policy Learning and rewarding the Smatch score of sampled graphs. In addition, we also combined several AMR-to-text alignments with an attention mechanism and we supplemented the parser with pre-processed concept identification, named entities and contextualized embeddings. We achieve a highly competitive performance that is comparable to the best published results. We show an in-depth study ablating each of the new components of the parser.", "phrases": ["reinforcement learning", "amr parser", "transition-based model"], "overall_score": 1.4230436400266124, "scores": [0.8151083417052156, 0.5932528899203874, 0.5346039819054916], "rank_score": 0.6476550711770316} -{"id": "banerjee-bhattacharyya-2018-meaningless", "title": "Meaningless yet meaningful: Morphology grounded subword-level NMT", "abstract": "We explore the use of two independent subsystems Byte Pair Encoding (BPE) and Morfessor as basic units for subword-level neural machine translation (NMT). We show that, for linguistically distant language-pairs Morfessor-based segmentation algorithm produces significantly better quality translation than BPE. However, for close language-pairs BPE-based subword-NMT may translate better than Morfessor-based subword-NMT. We propose a combined approach of these two segmentation algorithms Morfessor-BPE (M-BPE) which outperforms these two baseline systems in terms of BLEU score. Our results are supported by experiments on three language-pairs: English-Hindi, Bengali-Hindi and English-Bengali.", "phrases": ["morphology", "morfessor", "hindi"], "overall_score": 1.3466841834985228, "scores": [0.8650158454825749, 0.5395612197901232, 0.5382775279042239], "rank_score": 0.6476181977256407} -{"id": "sokolov-etal-2012-computing", "title": "Computing Lattice BLEU Oracle Scores for Machine Translation", "abstract": "The search space of Phrase-Based Statistical Machine Translation (PBSMT) systems can be represented under the form of a directed acyclic graph (lattice). The quality of this search space can thus be evaluated by computing the best achievable hypothesis in the lattice, the so-called oracle hypothesis. For common SMT metrics, this problem is however NP-hard and can only be solved using heuristics. In this work, we present two new methods for efficiently computing BLEU oracles on lattices: the first one is based on a linear approximation of the corpus BLEU score and is solved using the FST formalism; the second one relies on integer linear programming formulation and is solved directly and using the Lagrangian relaxation framework. These new decoders are positively evaluated and compared with several alternatives from the literature for three language pairs, using lattices produced by two PBSMT systems.", "phrases": ["machine translation", "search space", "suboptimal result"], "overall_score": 1.0416863363831839, "scores": [0.8794600130168972, 0.5328184392475537, 0.5294298826804112], "rank_score": 0.6472361116482874} -{"id": "cai-etal-2018-full", "title": "A Full End-to-End Semantic Role Labeler, Syntactic-agnostic Over Syntactic-aware?", "abstract": "Semantic role labeling (SRL) is to recognize the predicate-argument structure of a sentence, including subtasks of predicate disambiguation and argument labeling. Previous studies usually formulate the entire SRL problem into two or more subtasks. For the first time, this paper introduces an end-to-end neural model which unifiedly tackles the predicate disambiguation and the argument labeling in one shot. Using a biaffine scorer, our model directly predicts all semantic role labels for all given word pairs in the sentence without relying on any syntactic parse information. Specifically, we augment the BiLSTM encoder with a non-linear transformation to further distinguish the predicate and the argument in a given sentence, and model the semantic role labeling process as a word pair classification task by employing the biaffine attentional mechanism. Though the proposed model is syntax-agnostic with local decoder, it outperforms the state-of-the-art syntax-aware SRL systems on the CoNLL-2008, 2009 benchmarks for both English and Chinese. To our best knowledge, we report the first syntax-agnostic SRL model that surpasses all known syntax-aware models.", "phrases": ["semantic role labeling", "end-to-end model", "dependency-based srl"], "overall_score": 1.999612529962726, "scores": [0.8256054439167105, 0.5852462485918263, 0.5298649687494276], "rank_score": 0.6469055537526548} -{"id": "sheng-etal-2021-casee", "title": "CasEE: A Joint Learning Framework with Cascade Decoding for Overlapping Event Extraction", "abstract": "Event extraction (EE) is a crucial information extraction task that aims to extract event information in texts. Most existing methods assume that events appear in sentences without overlaps, which are not applicable to the complicated overlapping event extraction. This work systematically studies the realistic event overlapping problem, where a word may serve as triggers with several types or arguments with different roles. To tackle the above problem, we propose a novel joint learning framework with cascade decoding for overlapping event extraction, termed as CasEE. Particularly, CasEE sequentially performs type detection, trigger extraction and argument extraction, where the overlapped targets are extracted separately conditioned on the specific former prediction. All the subtasks are jointly learned in a framework to capture dependencies among the subtasks. The evaluation on a public event extraction benchmark FewFC demonstrates that CasEE achieves significant improvements on overlapping event extraction over previous competitive methods.", "phrases": ["joint learning framework", "type detection", "trigger extraction", "argument extraction", "casee"], "overall_score": 0.7106633427249969, "scores": [0.8041796084626532, 0.7856983247646243, 0.570052196934849, 0.5510250828692569, 0.5234130443388374], "rank_score": 0.6468736514740442} -{"id": "murahari-etal-2019-improving", "title": "Improving Generative Visual Dialog by Answering Diverse Questions", "abstract": "Prior work on training generative Visual Dialog models with reinforcement learning ((Das et al., ICCV 2017) has explored a Q-Bot-A-Bot image-guessing game and shown that this `self-talk' approach can lead to improved performance at the downstream dialog-conditioned image-guessing task. However, this improvement saturates and starts degrading after a few rounds of interaction, and does not lead to a better Visual Dialog model. We find that this is due in part to repeated interactions between Q-Bot and A-BOT during self-talk, which are not informative with respect to the image. To improve this, we devise a simple auxiliary objective that incentivizes Q-Bot to ask diverse questions, thus reducing repetitions and in turn enabling A-Bot to explore a larger state space during RL i.e. be exposed to more visual concepts to talk about, and varied questions to answer. We evaluate our approach via a host of automatic metrics and human studies, and demonstrate that it leads to better dialog, i.e. dialog that is more diverse (i.e. less repetitive), consistent (i.e. has fewer conflicting exchanges), fluent (i.e., more human-like), and detailed, while still being comparably image-relevant as prior work and ablations.", "phrases": ["dialog", "a-bot", "repetition"], "overall_score": 1.0409060519924875, "scores": [0.8263897920465592, 0.5888840572959334, 0.5249800317498761], "rank_score": 0.6467512936974562} -{"id": "mairesse-young-2014-stochastic", "title": "Stochastic Language Generation in Dialogue using Factored Language Models", "abstract": "Most previous work on trainable language generation has focused on two paradigms: (a) using a generation decisions of an existing generator. Both approaches rely on the existence of a handcrafted generation component, which is likely to limit their scalability to new domains. The first contribution of this article is to present Bagel, a fully data-driven generation method that treats the language generation task as a search for the most likely sequence of semantic concepts and realization phrases, according to Factored Language Models (FLMs). As domain utterances are not readily available for most natural language generation tasks, a large creative effort is required to produce the data necessary to represent human linguistic variation for nontrivial domains. This article is based on the assumption that learning to produce paraphrases can be facilitated by collecting data from a large sample of untrained annotators using crowdsourcing\u2014rather than a few domain experts\u2014by relying on a coarse meaning representation. A second contribution of this article is to use crowdsourced data to show how dialogue naturalness can be improved by learning to vary the output utterances generated for a given semantic input. Two data-driven methods for generating paraphrases in dialogue are presented: (a) by sampling from the n-best list of realizations produced by Bagel's FLM reranker; and (b) by learning a structured perceptron predicting whether candidate realizations are valid paraphrases. We train Bagel on a set of 1,956 utterances produced by 137 annotators, which covers 10 types of dialogue acts and 128 semantic concepts in a tourist information system for Cambridge. An automated evaluation shows that Bagel outperforms utterance class LM baselines on this domain. A human evaluation of 600 resynthesized dialogue extracts shows that Bagel's FLM output produces utterances comparable to a handcrafted baseline, whereas the perceptron classifier performs worse. Interestingly, human judges find the system sampling from the n-best list to be more natural than a system always returning the first-best utterance. The judges are also more willing to interact with the n-best system in the future. These results suggest that capturing the large variation found in human language using data-driven methods is beneficial for dialogue interaction.", "phrases": ["generator", "factored language models", "phrase-based nlg system"], "overall_score": 1.5505724922942288, "scores": [0.8850322422300201, 0.530040597927985, 0.5248440234563426], "rank_score": 0.6466389545381159} -{"id": "chen-etal-2020-hierarchical", "title": "Hierarchical Entity Typing via Multi-level Learning to Rank", "abstract": "We propose a novel method for hierarchical entity classification that embraces ontological structure at both training and during prediction. At training, our novel multi-level learning-to-rank loss compares positive types against negative siblings according to the type tree. During prediction, we define a coarse-to-fine decoder that restricts viable candidates at each level of the ontology based on already predicted parent type(s). Our approach significantly outperform prior work on strict accuracy, demonstrating the effectiveness of our method.", "phrases": ["multi-level learning", "ontology", "multi-level learning-to-rank loss"], "overall_score": 1.3446273716401882, "scores": [0.8292013748267976, 0.5742916316324054, 0.5363942344496577], "rank_score": 0.6466290803029536} -{"id": "murray-carenini-2009-predicting", "title": "Predicting Subjectivity in Multimodal Conversations", "abstract": "In this research we aim to detect subjective sentences in multimodal conversations. We introduce a novel technique wherein subjective patterns are learned from both labeled and unlabeled data, using n-gram word sequences with varying levels of lexical instantiation. Applying this technique to meeting speech and email conversations, we gain significant improvement over state-of-the-art approaches. Furthermore, we show that coupling the pattern-based approach with features that capture characteristics of general conversation structure yields additional improvement.", "phrases": ["subjective pattern", "n-gram word sequence", "lexical instantiation"], "overall_score": 0.8962209980425134, "scores": [0.8245091376560723, 0.5679724103320307, 0.5469788361366344], "rank_score": 0.6464867947082458} -{"id": "garg-etal-2018-code", "title": "Code-switched Language Models Using Dual RNNs and Same-Source Pretraining", "abstract": "This work focuses on building language models (LMs) for code-switched text. We propose two techniques that significantly improve these LMs: 1) A novel recurrent neural network unit with dual components that focus on each language in the code-switched text separately 2) Pretraining the LM using synthetic text from a generative model estimated using the training data. We demonstrate the effectiveness of our proposed techniques by reporting perplexities on a Mandarin-English task and derive significant reductions in perplexity.", "phrases": ["rnn", "code-switched text", "code-mixed machine translation"], "overall_score": 1.4883708762747472, "scores": [0.7816136759333266, 0.6059926363898316, 0.551567463451732], "rank_score": 0.6463912585916302} -{"id": "chahuneau-etal-2012-word", "title": "Word Salad: Relating Food Prices and Descriptions", "abstract": "We investigate the use of language in food writing, specifically on restaurant menus and in customer reviews. Our approach is to build predictive models of concrete external variables, such as restaurant menu prices. We make use of a dataset of menus and customer reviews for thousands of restaurants in several U.S. cities. By focusing on prediction tasks and doing our analysis at scale, our methodology allows quantitative, objective measurements of the words and phrases used to describe food in restaurants. We also explore interactions in language use between menu prices and sentiment as expressed in user reviews.", "phrases": ["food price", "restaurant menu", "review", "sentiment information"], "overall_score": 0.8958921480914462, "scores": [0.9081373594051949, 0.6316166071545058, 0.5249518137472992, 0.5202925381387805], "rank_score": 0.6462495796114451} -{"id": "uchimoto-den-2008-word", "title": "Word-level Dependency-structure Annotation to Corpus of Spontaneous Japanese and its Application", "abstract": "In Japanese, the syntactic structure of a sentence is generally represented by the relationship between phrasal units, bunsetsus in Japanese, based on a dependency grammar. In many cases, the syntactic structure of a bunsetsu is not considered in syntactic structure annotation. This paper gives the criteria and definitions of dependency relationships between words in a bunsetsu and their applications. The target corpus for the word-level dependency annotation is a large spontaneous Japanese-speech corpus, the Corpus of Spontaneous Japanese (CSJ). One application of word-level dependency relationships is to find basic units for constructing accent phrases.", "phrases": ["japanese", "definition", "dependency structure"], "overall_score": 0.8958639741497487, "scores": [0.7953110786117503, 0.6069585762145017, 0.5364181143991298], "rank_score": 0.6462292564084605} -{"id": "prabhakaran-rambow-2013-written", "title": "Written Dialog and Social Power: Manifestations of Different Types of Power in Dialog Behavior", "abstract": "Dialog behavior is affected by power relations among the discourse participants. We show that four different types of power relations (hierarchical power, situational power, influence, and power over communication) affect written dialog behavior in different ways. We also present a system that can identify power relations given a written dialog.", "phrases": ["dialog behavior", "power relation", "communication"], "overall_score": 1.0400582186248233, "scores": [0.8871268401855819, 0.5289301389627377, 0.5226165364740036], "rank_score": 0.646224505207441} -{"id": "lawrence-reed-2017-mining", "title": "Mining Argumentative Structure from Natural Language text using Automatically Generated Premise-Conclusion Topic Models", "abstract": "This paper presents a method of extracting argumentative structure from natural language text. The approach presented is based on the way in which we understand an argument being made, not just from the words said, but from existing contextual knowledge and understanding of the broader issues. We leverage high-precision, low-recall techniques in order to automatically build a large corpus of inferential statements related to the text's topic. These statements are then used to produce a matrix representing the inferential relationship between different aspects of the topic. From this matrix, we are able to determine connectedness and directionality of inference between statements in the original text. By following this approach, we obtain results that compare favourably to those of other similar techniques to classify premise-conclusion pairs (with results 22 points above baseline), but without the requirement of large volumes of annotated, domain specific data.", "phrases": ["natural language text", "contextual knowledge", "premise-conclusion pair"], "overall_score": 0.8958459995142827, "scores": [0.8502998059629556, 0.563548381146167, 0.524800684240086], "rank_score": 0.6462162904497362} -{"id": "dragut-etal-2012-polarity", "title": "Polarity Consistency Checking for Sentiment Dictionaries", "abstract": "Polarity classification of words is important for applications such as Opinion Mining and Sentiment Analysis. A number of sentiment word/sense dictionaries have been manually or (semi)automatically constructed. The dictionaries have substantial inaccuracies. Besides obvious instances, where the same word appears with different polarities in different dictionaries, the dictionaries exhibit complex cases, which cannot be detected by mere manual inspection. We introduce the concept of polarity consistency of words/senses in sentiment dictionaries in this paper. We show that the consistency problem is NP-complete. We reduce the polarity consistency problem to the satisfiability problem and utilize a fast SAT solver to detect inconsistencies in a sentiment dictionary. We perform experiments on four sentiment dictionaries and WordNet.", "phrases": ["sentiment dictionary", "inconsistency", "pcp", "polynomial time"], "overall_score": 1.2567819619210643, "scores": [0.9688083519569026, 0.5663979449437352, 0.5241789792579802, 0.5240473916471344], "rank_score": 0.645858166951438} -{"id": "cai-etal-2017-fast", "title": "Fast and Accurate Neural Word Segmentation for Chinese", "abstract": "Neural models with minimal feature engineering have achieved competitive performance against traditional methods for the task of Chinese word segmentation. However, both training and working procedures of the current neural models are computationally inefficient. In this paper, we propose a greedy neural word segmenter with balanced word and character embedding inputs to alleviate the existing drawbacks. Our segmenter is truly end-to-end, capable of performing segmentation much faster and even more accurate than state-of-the-art neural models on Chinese benchmark datasets.", "phrases": ["neural word segmenter", "segmentation", "chinese", "various nlp task"], "overall_score": 1.7489546496667208, "scores": [0.829594712299029, 0.647139441922036, 0.5534719100795331, 0.5531354837325576], "rank_score": 0.645835387008289} -{"id": "li-etal-2020-empdg", "title": "EmpDG: Multi-resolution Interactive Empathetic Dialogue Generation", "abstract": "A humanized dialogue system is expected to generate empathetic replies, which should be sensitive to the users' expressed emotion. The task of empathetic dialogue generation is proposed to address this problem. The essential challenges lie in accurately capturing the nuances of human emotion and considering the potential of user feedback, which are overlooked by the majority of existing work. In response to this problem, we propose a multi-resolution adversarial model \u2013 EmpDG, to generate more empathetic responses. EmpDG exploits both the coarse-grained dialogue-level and fine-grained token-level emotions, the latter of which helps to better capture the nuances of user emotion. In addition, we introduce an interactive adversarial learning framework which exploits the user feedback, to identify whether the generated responses evoke emotion perceptivity in dialogues. Experimental results show that the proposed approach significantly outperforms the state-of-the-art baselines in both content quality and emotion perceptivity.", "phrases": ["dialogue-level", "fine-grained token-level emotion", "empdg"], "overall_score": 1.5486415968535157, "scores": [0.7982941718153634, 0.5890050270053703, 0.5502019269639221], "rank_score": 0.6458337085948852} -{"id": "kurtyigit-etal-2021-lexical", "title": "Lexical Semantic Change Discovery", "abstract": "While there is a large amount of research in the field of Lexical Semantic Change Detection, only few approaches go beyond a standard benchmark evaluation of existing models. In this paper, we propose a shift of focus from change detection to change discovery, i.e., discovering novel word senses over time from the full corpus vocabulary. By heavily fine-tuning a type-based and a token-based approach on recently published German data, we demonstrate that both models can successfully be applied to discover new words undergoing meaning change. Furthermore, we provide an almost fully automated framework for both evaluation and discovery.", "phrases": ["change", "change detection", "vocabulary", "intersection"], "overall_score": 1.1571352624433964, "scores": [0.929793631667293, 0.574089271783764, 0.5572033794159125, 0.5221516624403343], "rank_score": 0.6458094863268259} -{"id": "lee-etal-2017-l1", "title": "L1-L2 Parallel Dependency Treebank as Learner Corpus", "abstract": "This opinion paper proposes the use of parallel treebank as learner corpus. We show how an L1-L2 parallel treebank \u2014 i.e., parse trees of non-native sentences, aligned to the parse trees of their target hypotheses \u2014 can facilitate retrieval of sentences with specific learner errors. We argue for its benefits, in terms of corpus re-use and interoperability, over a conventional learner corpus annotated with error tags. As a proof of concept, we conduct a case study on word-order errors made by learners of Chinese as a foreign language. We report precision and recall in retrieving a range of word-order error categories from L1-L2 tree pairs annotated in the Universal Dependency framework.", "phrases": ["learner corpus", "l1-l2 parallel treebank", "word-order error", "target hypothesis"], "overall_score": 0.7094545921967529, "scores": [0.9117118885464467, 0.6240414161933113, 0.5266802593634196, 0.5206600332089448], "rank_score": 0.6457733993280306} -{"id": "roy-subramaniam-2006-automatic", "title": "Automatic Generation of Domain Models for Call-Centers from Noisy Transcriptions", "abstract": "Call centers handle customer queries from various domains such as computer sales and support, mobile phones, car rental, etc. Each such domain generally has a domain model which is essential to handle customer complaints. These models contain common problem categories, typical customer issues and their solutions, greeting styles. Currently these models are manually created over time. Towards this, we propose an unsupervised technique to generate domain models automatically from call transcriptions. We use a state of the art Automatic Speech Recognition system to transcribe the calls between agents and customers, which still results in high word error rates (40%) and show that even from these noisy transcriptions of calls we can automatically build a domain model. The domain model is comprised of primarily a topic taxonomy where every node is characterized by topic(s), typical Questions-Answers (Q&As), typical actions and call statistics. We show how such a domain model can be used for topic identification of unseen calls. We also propose applications for aiding agents while handling calls and for agent monitoring based on the domain model.", "phrases": ["domain model", "noisy transcription", "call center", "unsupervised technique"], "overall_score": 0.894934014918, "scores": [0.9062866959517151, 0.5848050892484633, 0.5513787374842772, 0.5397632078056411], "rank_score": 0.6455584326225241} -{"id": "shi-etal-2016-recurrent", "title": "Recurrent Support Vector Machines For Slot Tagging In Spoken Language Understanding", "abstract": "We propose recurrent support vector machine (RSVM) for slot tagging. This model is a combination of the recurrent neural network (RNN) and the structured support vector machine. RNN extracts features from the input sequence. The structured support vector machine uses a sequence-level discriminative objective function. The proposed model therefore combines the sequence representation capability of an RNN with the sequence-level discriminative objective. We have observed new state-ofthe-art results on two benchmark datasets and one private dataset. RSVM obtained statistical significant 4% and 2% relative average F1 score improvement on ATIS dataset and Chunking dataset, respectively. Out of eight domains in Cortana live log dataset, RSVM achieved F1 score improvement on seven domains. Experiments also show that RSVM significantly speeds up the model training by skipping the weight updating for non-support vector training samples, compared against training using RNN with CRF or minimum cross-entropy objectives.", "phrases": ["support vector machine", "slot tagging", "recurrent neural network"], "overall_score": 0.8948744718992016, "scores": [0.8785969263656026, 0.5291362584574032, 0.5288132594176568], "rank_score": 0.6455154814135541} -{"id": "gao-etal-2011-soft", "title": "Soft Dependency Constraints for Reordering in Hierarchical Phrase-Based Translation", "abstract": "Long-distance reordering remains one of the biggest challenges facing machine translation. We derive soft constraints from the source dependency parsing to directly address the reordering problem for the hierarchical phrase-based model. Our approach significantly improves Chinese--English machine translation on a large-scale task by 0.84 BLEU points on average. Moreover, when we switch the tuning function from BLEU to the LRscore which promotes reordering, we observe total improvements of 1.21 BLEU, 1.30 LRscore and 3.36 TER over the baseline. On average our approach improves reordering precision and recall by 6.9 and 0.3 absolute points, respectively, and is found to be especially effective for long-distance reodering.", "phrases": ["soft dependency constraint", "english translation", "parent", "chinese"], "overall_score": 1.4863248699186633, "scores": [0.9117864410876622, 0.5894212322146827, 0.5443103550700984, 0.5364927289125325], "rank_score": 0.645502689321244} -{"id": "karimi-mahabadi-etal-2020-end", "title": "End-to-End Bias Mitigation by Modelling Biases in Corpora", "abstract": "Several recent studies have shown that strong natural language understanding (NLU) models are prone to relying on unwanted dataset biases without learning the underlying task, resulting in models that fail to generalize to out-of-domain datasets and are likely to perform poorly in real-world scenarios. We propose two learning strategies to train neural models, which are more robust to such biases and transfer better to out-of-domain datasets. The biases are specified in terms of one or more bias-only models, which learn to leverage the dataset biases. During training, the bias-only models' predictions are used to adjust the loss of the base model to reduce its reliance on biases by down-weighting the biased examples and focusing the training on the hard examples. We experiment on large-scale natural language inference and fact verification benchmarks, evaluating on out-of-domain datasets that are specifically designed to assess the robustness of models against known biases in the training data. Results show that our debiasing methods greatly improve robustness in all settings and better transfer to other textual entailment datasets. Our code and data are publicly available in .", "phrases": ["loss", "dataset bias", "recent paper", "lexical artifact"], "overall_score": 1.7891137224258788, "scores": [0.9524320088927799, 0.5587291346650598, 0.5452379960830718, 0.5247463552892973], "rank_score": 0.6452863737325523} -{"id": "collell-moens-2016-image", "title": "Is an Image Worth More than a Thousand Words? On the Fine-Grain Semantic Differences between Visual and Linguistic Representations", "abstract": "Human concept representations are often grounded with visual information, yet some aspects of meaning cannot be visually represented or are better described with language. Thus, vision and language provide complementary information that, properly combined, can potentially yield more complete concept representations. Recently, state-of-the-art distributional semantic models and convolutional neural networks have achieved great success in representing linguistic and visual knowledge respectively. In this paper, we compare both, visual and linguistic representations in their ability to capture different types of fine-grain semantic knowledge\u2014or attributes\u2014of concepts. Humans often describe objects using attributes, that is, properties such as shape, color or functionality, which often transcend the linguistic and visual modalities. In our setting, we evaluate how well attributes can be predicted by using the unimodal representations as inputs. We are interested in first, finding out whether attributes are generally better captured by either the vision or by the language modality; and second, if none of them is clearly superior (as we hypothesize), what type of attributes or semantic knowledge are better encoded from each modality. Ultimately, our study sheds light on the potential of combining visual and textual representations.", "phrases": ["image", "linguistic representation", "attribute", "collell"], "overall_score": 1.54704502385799, "scores": [0.8714097900259068, 0.5876093720345755, 0.5802410442342931, 0.5414113374671355], "rank_score": 0.6451678859404778} -{"id": "bethard-2013-cleartk", "title": "ClearTK-TimeML: A minimalist approach to TempEval 2013", "abstract": "The ClearTK-TimeML submission to TempEval 2013 competed in all English tasks: identifying events, identifying times, and identifying temporal relations. The system is a pipeline of machine-learning models, each with a small set of features from a simple morpho-syntactic annotation pipeline, and where temporal relations are only predicted for a small set of syntactic constructions and relation types. ClearTKTimeML ranked 1 st for temporal relation F1, time extent strict F1 and event tense accuracy.", "phrases": ["tempeval", "syntactic construction", "temporal relation extraction", "top system"], "overall_score": 1.7886471406456041, "scores": [0.8662091199473325, 0.5897166742885157, 0.5711185317028017, 0.5534280337709874], "rank_score": 0.6451180899274094} -{"id": "hovy-yang-2021-importance", "title": "The Importance of Modeling Social Factors of Language: Theory and Practice", "abstract": "Natural language processing (NLP) applications are now more powerful and ubiquitous than ever before. With rapidly developing (neural) models and ever-more available data, current NLP models have access to more information than any human speaker during their life. Still, it would be hard to argue that NLP models have reached human-level capacity. In this position paper, we argue that the reason for the current limitations is a focus on information content while ignoring language's social factors. We show that current NLP systems systematically break down when faced with interpreting the social factors of language. This limits applications to a subset of information-related tasks and prevents NLP from reaching human-level performance. At the same time, systems that incorporate even a minimum of social factors already show remarkable improvements. We formalize a taxonomy of seven social factors based on linguistic theory and exemplify current failures and emerging successes for each of them. We suggest that the NLP community address social factors to get closer to the goal of human-like language understanding.", "phrases": ["theory", "nlp community address", "language understanding"], "overall_score": 1.3414577742508784, "scores": [0.8267771468074577, 0.574823518257846, 0.5337138134083855], "rank_score": 0.6451048261578963} -{"id": "takeno-etal-2017-controlling", "title": "Controlling Target Features in Neural Machine Translation via Prefix Constraints", "abstract": "We propose prefix constraints, a novel method to enforce constraints on target sentences in neural machine translation. It places a sequence of special tokens at the beginning of target sentence (target prefix), while side constraints places a special token at the end of source sentence (source suffix). Prefix constraints can be predicted from source sentence jointly with target sentence, while side constraints (Sennrich et al., 2016) must be provided by the user or predicted by some other methods. In both methods, special tokens are designed to encode arbitrary features on target-side or metatextual information. We show that prefix constraints are more flexible than side constraints and can be used to control the behavior of neural machine translation, in terms of output length, bidirectional decoding, domain adaptation, and unaligned target word generation.", "phrases": ["neural machine translation", "special token", "decoding", "target word generation"], "overall_score": 1.254881768824339, "scores": [0.9303790268339229, 0.563635390097012, 0.5531587026453662, 0.5323535238990929], "rank_score": 0.6448816608688486} -{"id": "dey-fung-2014-hindi", "title": "A Hindi-English Code-Switching Corpus", "abstract": "The aim of this paper is to investigate the rules and constraints of code-switching (CS) in Hindi-English mixed language data. In this paper, we\u0092ll discuss how we collected the mixed language corpus. This corpus is primarily made up of student interview speech. The speech was manually transcribed and verified by bilingual speakers of Hindi and English. The code-switching cases in the corpus are discussed and the reasons for code-switching are explained.", "phrases": ["code-switching", "hindi", "context code mixing", "bilingual student"], "overall_score": 1.3402252656019094, "scores": [0.8849970596204086, 0.6020575677610667, 0.5519276871185967, 0.5390661446438861], "rank_score": 0.6445121147859896} -{"id": "habernal-etal-2016-c4corpus", "title": "C4Corpus: Multilingual Web-size Corpus with Free License", "abstract": "Large Web corpora containing full documents with permissive licenses are crucial for many NLP tasks. In this article we present the construction of 12 million-pages Web corpus (over 10 billion tokens) licensed under CreativeCommons license family in 50+ languages that has been extracted from CommonCrawl, the largest publicly available general Web crawl to date with about 2 billion crawled URLs. Our highly-scalable Hadoop-based framework is able to process the full CommonCrawl corpus on 2000+ CPU cluster on the Amazon Elastic Map/Reduce infrastructure. The processing pipeline includes license identification, state-of-the-art boilerplate removal, exact duplicate and near-duplicate document removal, and language detection. The construction of the corpus is highly configurable and fully reproducible, and we provide both the framework (DKPro C4CorpusTools) and the resulting data (C4Corpus) to the research community.", "phrases": ["license", "processing pipeline", "c4corpu"], "overall_score": 0.707896363491465, "scores": [0.8340700957706954, 0.5497147016280652, 0.549280317910504], "rank_score": 0.6443550384364216} -{"id": "surya-etal-2019-unsupervised", "title": "Unsupervised Neural Text Simplification", "abstract": "The paper presents a first attempt towards unsupervised neural text simplification that relies only on unlabeled text corpora. The core framework is composed of a shared encoder and a pair of attentional-decoders, crucially assisted by discrimination-based losses and denoising. The framework is trained using unlabeled text collected from en-Wikipedia dump. Our analysis (both quantitative and qualitative involving human evaluators) on public test data shows that the proposed model can perform text-simplification at both lexical and syntactic levels, competitive to existing supervised methods. It also outperforms viable unsupervised baselines. Adding a few labeled pairs helps improve the performance further.", "phrases": ["neural text simplification", "loss", "denoising", "complex sentence", "unsupervised approach"], "overall_score": 1.4156709019132014, "scores": [0.8480493384284179, 0.6852827447436389, 0.5785268109064399, 0.566428848407758, 0.5432102248027197], "rank_score": 0.6442995934577949} -{"id": "park-etal-2017-rotated", "title": "Rotated Word Vector Representations and their Interpretability", "abstract": "Vector representation of words improves performance in various NLP tasks, but the high dimensional word vectors are very difficult to interpret. We apply several rotation algorithms to the vector representation of words to improve the interpretability. Unlike previous approaches that induce sparsity, the rotated vectors are interpretable while preserving the expressive performance of the original vectors. Furthermore, any prebuilt word vector representation can be rotated for improved interpretability. We apply rotation to skipgrams and glove and compare the expressive power and interpretability with the original vectors and the sparse overcomplete vectors. The results show that the rotated vectors outperform the original and the sparse overcomplete vectors for interpretability and expressiveness tasks.", "phrases": ["word vector", "interpretability", "rotation", "factor analysis"], "overall_score": 1.2537182238907207, "scores": [0.8106354371210377, 0.6115490626952155, 0.5822034968293243, 0.5727468715791798], "rank_score": 0.6442837170561894} -{"id": "ruckle-etal-2020-multicqa", "title": "MultiCQA: Zero-Shot Transfer of Self-Supervised Text Matching Models on a Massive Scale", "abstract": "We study the zero-shot transfer capabilities of text matching models on a massive scale, by self-supervised training on 140 source domains from community question answering forums in English. We investigate the model performances on nine benchmarks of answer selection and question similarity tasks, and show that all 140 models transfer surprisingly well, where the large majority of models substantially outperforms common IR baselines. We also demonstrate that considering a broad selection of source domains is crucial for obtaining the best zero-shot transfer performances, which contrasts the standard procedure that merely relies on the largest and most similar domains. In addition, we extensively study how to best combine multiple source domains. We propose to incorporate self-supervised with supervised multi-task learning on all available source domains. Our best zero-shot transfer model considerably outperforms in-domain BERT and the previous state of the art on six benchmarks. Fine-tuning of our model with in-domain data results in additional large gains and achieves the new state of the art on all nine benchmarks.", "phrases": ["text matching model", "massive scale", "multi-task learning"], "overall_score": 1.415237635405514, "scores": [0.7889099094224864, 0.5952495924775507, 0.5481477142170265], "rank_score": 0.6441024053723545} -{"id": "hu-etal-2019-diachronic", "title": "Diachronic Sense Modeling with Deep Contextualized Word Embeddings: An Ecological View", "abstract": "Diachronic word embeddings have been widely used in detecting temporal changes. However, existing methods face the meaning conflation deficiency by representing a word as a single vector at each time period. To address this issue, this paper proposes a sense representation and tracking framework based on deep contextualized embeddings, aiming at answering not only what and when, but also how the word meaning changes. The experiments show that our framework is effective in representing fine-grained word senses, and it brings a significant improvement in word change detection task. Furthermore, we model the word change from an ecological viewpoint, and sketch two interesting sense behaviors in the process of language evolution, i.e. sense competition and sense cooperation.", "phrases": ["sense representation", "example sentence", "bert embedding"], "overall_score": 1.8243277675803504, "scores": [0.8710017692213412, 0.5402754699051315, 0.5204457333855674], "rank_score": 0.6439076575040134} -{"id": "chen-etal-2006-chinese", "title": "Chinese Named Entity Recognition with Conditional Random Fields", "abstract": "We present a Chinese Named Entity Recognition (NER) system submitted to the close track of Sighan Bakeoff2006. We define some additional features via doing statistics in training corpus. Our system incorporates basic features and additional features based on Conditional Random Fields (CRFs). In order to correct inconsistently results, we perform the postprocessing procedure according to n-best results given by the CRFs model. Our final system achieved a F-score of 85.14 at MSRA, 89.03 at CityU, and 76.27 at LDC.", "phrases": ["conditional random fields", "crfs", "chinese", "character sequence labeling"], "overall_score": 1.15357327388767, "scores": [0.9284602439734801, 0.5646668964957519, 0.556250123232225, 0.5259087469474277], "rank_score": 0.6438215026622212} -{"id": "barbieri-etal-2016-emoji", "title": "What does this Emoji Mean? A Vector Space Skip-Gram Model for Twitter Emojis", "abstract": "Emojis allow us to describe objects, situations and even feelings with small images, providing a visual and quick way to communicate. In this paper, we analyse emojis used in Twitter with distributional semantic models. We retrieve 10 millions tweets posted by USA users, and we build several skip gram word embedding models by mapping in the same vectorial space both words and emojis. We test our models with semantic similarity experiments, comparing the output of our models with human assessment. We also carry out an exhaustive qualitative evaluation, showing interesting results.", "phrases": ["skip-gram model", "twitter emojis", "word2vec", "vector space embedding"], "overall_score": 1.4822233520517674, "scores": [0.9450083672636332, 0.5564172415637387, 0.5469728454918708, 0.5264872366576515], "rank_score": 0.6437214227442235} -{"id": "klang-nugues-2016-langforia", "title": "Langforia: Language Pipelines for Annotating Large Collections of Documents", "abstract": "In this paper, we describe Langforia, a multilingual processing pipeline to annotate texts with multiple layers: formatting, parts of speech, named entities, dependencies, semantic roles, and entity links. Langforia works as a web service, where the server hosts the language processing components and the client, the input and result visualization. To annotate a text or a Wikipedia page, the user chooses an NLP pipeline and enters the text in the interface or selects the page URL. Once processed, the results are returned to the client, where the user can select the annotation layers s/he wants to visualize. We designed Langforia with a specific focus for Wikipedia, although it can process any type of text. Wikipedia has become an essential encyclopedic corpus used in many NLP projects. However, processing articles and visualizing the annotations are nontrivial tasks that require dealing with multiple markup variants, encodings issues, and tool incompatibilities across the language versions. This motivated the development of a new architecture. A demonstration of Langforia is available for six languages: English, French, German, Spanish, Russian, and Swedish at as well as a web API: . Langforia is also provided as a standalone library and is compatible with cluster computing.", "phrases": ["wikipedia", "language version", "swedish", "langforia"], "overall_score": 0.7071312005635212, "scores": [0.8980791228592846, 0.5842753509921793, 0.5674252785485361, 0.5248544760985856], "rank_score": 0.6436585571246465} -{"id": "fonseca-martins-2020-revisiting", "title": "Revisiting Higher-Order Dependency Parsers", "abstract": "Neural encoders have allowed dependency parsers to shift from higher-order structured models to simpler first-order ones, making decoding faster and still achieving better accuracy than non-neural parsers. This has led to a belief that neural encoders can implicitly encode structural constraints, such as siblings and grandparents in a tree. We tested this hypothesis and found that neural parsers may benefit from higher-order features, even when employing a powerful pre-trained encoder, such as BERT. While the gains of higher-order features are small in the presence of a powerful encoder, they are consistent for long-range dependencies and long sentences. In particular, higher-order models are more accurate on full sentence parses and on the exact match of modifier lists, indicating that they deal better with larger, more complex structures.", "phrases": ["dependency parser", "presence", "powerful encoder", "graph-based method"], "overall_score": 1.15310065381269, "scores": [0.9540252585307205, 0.5600396398506259, 0.534815761526589, 0.5253502535962776], "rank_score": 0.6435577283760533} -{"id": "lang-lapata-2010-unsupervised", "title": "Unsupervised Induction of Semantic Roles", "abstract": "Datasets annotated with semantic roles are an important prerequisite to developing high-performance role labeling systems. Unfortunately, the reliance on manual annotations, which are both difficult and highly expensive to produce, presents a major obstacle to the widespread application of these systems across different languages and text genres. In this paper we describe a method for inducing the semantic roles of verbal arguments directly from unannotated text. We formulate the role induction problem as one of detecting alternations and finding a canonical syntactic form for them. Both steps are implemented in a novel probabilistic model, a latent-variable variant of the logistic classifier. Our method increases the purity of the induced role clusters by a wide margin over a strong baseline.", "phrases": ["role induction problem", "alternation", "strong baseline", "non-standard linking", "canonical one"], "overall_score": 1.1527954875597786, "scores": [0.9638011933142676, 0.5777371199821993, 0.5638376260012788, 0.5587067420480807, 0.5528543778913662], "rank_score": 0.6433874118474385} -{"id": "sasaki-etal-2018-cross", "title": "Cross-Lingual Learning-to-Rank with Shared Representations", "abstract": "Cross-lingual information retrieval (CLIR) is a document retrieval task where the documents are written in a language different from that of the user's query. This is a challenging problem for data-driven approaches due to the general lack of labeled training data. We introduce a large-scale dataset derived from Wikipedia to support CLIR research in 25 languages. Further, we present a simple yet effective neural learning-to-rank model that shares representations across languages and reduces the data requirement. This model can exploit training data in, for example, Japanese-English CLIR to improve the results of Swahili-English CLIR.", "phrases": ["clir", "query", "wikipedia"], "overall_score": 1.3377123392998675, "scores": [0.8539401347016842, 0.5419452898321498, 0.5340255335100594], "rank_score": 0.6433036526812979} -{"id": "korhonen-2009-automatic", "title": "Automatic Lexical Classification \u2013 Balancing between Machine Learning and Linguistics", "abstract": "Verb classifications have been used to support a number of pra ctical tasks and ap- plications, such as parsing, information extraction, ques tion-answering, and machine trans- lation. However, large-scale exploitation of verb classes in real-world or domain-sensitive tasks has not been possible because existing manually built classifications are incomprehen- sive. This paper describes recent and on-going research on extending and acquiring lexical classifications automatically. The automatic approach is a ttractive since it is cost-effective and opens up the opportunity of learning and tuning lexical classifications for the application and domain in question. However, the development of an optimal approach is challenging, and requires not only expertise in machine learning but also a good understanding of the linguistic principles of lexical classification.", "phrases": ["machine learning", "information extraction", "argument structure information", "word-sense disambiguation"], "overall_score": 1.0352848597777586, "scores": [0.8689825612416091, 0.574311462732313, 0.568772884416846, 0.5609676940115139], "rank_score": 0.6432586506005705} -{"id": "liu-etal-2021-self", "title": "Self-Alignment Pretraining for Biomedical Entity Representations", "abstract": "Despite the widespread success of self-supervised learning via masked language models (MLM), accurately capturing fine-grained semantic relationships in the biomedical domain remains a challenge. This is of paramount importance for entity-level tasks such as entity linking where the ability to model entity relations (especially synonymy) is pivotal. To address this challenge, we propose SapBERT, a pretraining scheme that self-aligns the representation space of biomedical entities. We design a scalable metric learning framework that can leverage UMLS, a massive collection of biomedical ontologies with 4M+ concepts. In contrast with previous pipeline-based hybrid systems, SapBERT offers an elegant one-model-for-all solution to the problem of medical entity linking (MEL), achieving a new state-of-the-art (SOTA) on six MEL benchmarking datasets. In the scientific domain, we achieve SOTA even without task-specific supervision. With substantial improvement over various domain-specific pretrained MLMs such as BioBERT, SciBERTand and PubMedBERT, our pretraining scheme proves to be both effective and robust.", "phrases": ["biomedical domain", "scheme", "umls", "pre-training scheme"], "overall_score": 1.4127552000069572, "scores": [0.8395660286463157, 0.6065182431669681, 0.5800860724514728, 0.5457200570699944], "rank_score": 0.6429726003336877} -{"id": "guo-etal-2020-sequence", "title": "Sequence-Level Mixed Sample Data Augmentation", "abstract": "Despite their empirical success, neural networks still have difficulty capturing compositional aspects of natural language. This work proposes a simple data augmentation approach to encourage compositional behavior in neural models for sequence-to-sequence problems. Our approach, SeqMix, creates new synthetic examples by softly combining input/output sequences from the training set. We connect this approach to existing techniques such as SwitchOut and word dropout, and show that these techniques are all essentially approximating variants of a single objective. SeqMix consistently yields approximately 1.0 BLEU improvement on five different translation datasets over strong Transformer baselines. On tasks that require strong compositional generalization such as SCAN and semantic parsing, SeqMix also offers further improvements.", "phrases": ["data augmentation approach", "seqmix", "synthetic example", "switchout", "compositional behaviour"], "overall_score": 1.5416658889674266, "scores": [0.9422403289019933, 0.619503482127353, 0.5811373273205375, 0.542819345560581, 0.5289225783558978], "rank_score": 0.6429246124532725} -{"id": "gala-etal-2020-alector", "title": "Alector: A Parallel Corpus of Simplified French Texts with Alignments of Misreadings by Poor and Dyslexic Readers", "abstract": "In this paper, we present a new parallel corpus addressed to researchers, teachers, and speech therapists interested in text simplification as a means of alleviating difficulties in children learning to read. The corpus is composed of excerpts drawn from 79 authentic literary (tales, stories) and scientific (documentary) texts commonly used in French schools for children aged between 7 to 9 years old. The excerpts were manually simplified at the lexical, morpho-syntactic, and discourse levels in order to propose a parallel corpus for reading tests and for the development of automatic text simplification tools. A sample of 21 poor-reading and dyslexic children with an average reading delay of 2.5 years read a portion of the corpus. The transcripts of readings errors were integrated into the corpus with the goal of identifying lexical difficulty in the target population. By means of statistical testing, we provide evidence that the manual simplifications significantly reduced reading errors, highlighting that the words targeted for simplification were not only well-chosen but also substituted with substantially easier alternatives. The entire corpus is available for consultation through a web interface and available on demand for research purposes.", "phrases": ["parallel corpus", "french", "text simplification"], "overall_score": 1.1518294949478545, "scores": [0.8257645212684217, 0.5588134581544589, 0.5439668638937808], "rank_score": 0.6428482811055538} -{"id": "murray-etal-2012-using", "title": "Using the Omega Index for Evaluating Abstractive Community Detection", "abstract": "Numerous NLP tasks rely on clustering or community detection algorithms. For many of these tasks, the solutions are disjoint, and the relevant evaluation metrics assume nonoverlapping clusters. In contrast, the relatively recent task of abstractive community detection (ACD) results in overlapping clusters of sentences. ACD is a sub-task of an abstractive summarization system and represents a twostep process. In the first step, we classify sentence pairs according to whether the sentences should be realized by a common abstractive sentence. This results in an undirected graph with sentences as nodes and predicted abstractive links as edges. The second step is to identify communities within the graph, where each community corresponds to an abstractive sentence to be generated. In this paper, we describe how the Omega Index, a metric for comparing non-disjoint clustering solutions, can be used as a summarization evaluation metric for this task. We use the Omega Index to compare and contrast several community detection algorithms.", "phrases": ["omega index", "common abstractive sentence", "edge"], "overall_score": 0.8911343158606919, "scores": [0.8233141409860787, 0.577636146631836, 0.5275022997693846], "rank_score": 0.6428175291290997} -{"id": "sharoff-2004-stake", "title": "What is at Stake: a Case Study of Russian Expressions Starting with a Preposition", "abstract": "The paper describes an experiment in detecting a specific type of multiword expressions in Russian, namely expressions starting with a preposition. This covers not only prepositional phrases proper, but also fixed syntactic constructions like v techenie ('in the course of'). First, we collect lists of such constructions in a corpus of 50 mln words using a simple mechanism that combines statistical methods with knowledge about the structure of Russian prepositional phrases. Then we analyse the results of this data collection and estimate the efficiency of the collected list for the resolution of morphosyntactic and semantic ambiguity in a corpus.", "phrases": ["russian", "preposition", "non-configurational language"], "overall_score": 1.3364810243850613, "scores": [0.8475680719506724, 0.5525976477771446, 0.5279688263947128], "rank_score": 0.6427115153741766} -{"id": "bosc-vincent-2020-sequence", "title": "Do sequence-to-sequence VAEs learn global features of sentences?", "abstract": "Autoregressive language models are powerful and relatively easy to train. However, these models are usually trained without explicit conditioning labels and do not offer easy ways to control global aspects such as sentiment or topic during generation. Bowman & al. 2016 adapted the Variational Autoencoder (VAE) for natural language with the sequence-to-sequence architecture and claimed that the latent vector was able to capture such global features in an unsupervised manner. We question this claim. We measure which words benefit most from the latent information by decomposing the reconstruction loss per position in the sentence. Using this method, we find that VAEs are prone to memorizing the first words and the sentence length, producing local features of limited usefulness. To alleviate this, we investigate alternative architectures based on bag-of-words assumptions and language model pretraining. These variants learn latent variables that are more global, i.e., more predictive of topic or sentiment labels. Moreover, using reconstructions, we observe that they decrease memorization: the first word and the sentence length are not recovered as accurately than with the baselines, consequently yielding more diverse reconstructions.", "phrases": ["vae", "global feature", "latent variable"], "overall_score": 0.8909101817976309, "scores": [0.8573661226375607, 0.5366479485841016, 0.5339534805137282], "rank_score": 0.6426558505784635} -{"id": "lambert-banchs-2006-tuning", "title": "Tuning machine translation parameters with SPSA", "abstract": "Most of statistical machine translation systems are combinations of various models, and tuning of the scaling factors is an important step. However, this optimisation problem is hard because the objective function has many local minima and the available algorithms cannot achieve a global optimum. Consequently, optimisations starting from different initial settings can converge to fairly different solutions. We present tuning experiments with the Simultaneous Perturbation Stochastic Approximation (SPSA) algorithm, and compare them to tuning with the widely used downhill simplex method. With IWSLT 2006 Chinese-English data, both methods showed similar performance, but SPSA was more robust to the choice of initial settings.", "phrases": ["spsa", "translation hypothesis", "beam search"], "overall_score": 0.8903681840137065, "scores": [0.861093439837397, 0.5341120053686151, 0.5315892002568177], "rank_score": 0.6422648818209432} -{"id": "lagoudaki-2008-value", "title": "The Value of Machine Translation for the Professional Translator", "abstract": "More and more Translation Memory (TM) systems nowadays are fortified with machine translation (MT) techniques to enable them to propose a translation to the translator when no match is found in his TM resources. The system attempts this by assembling a combination of terms from its terminology database, translations from its memory, and even portions of them. This paper reviews the most popular commercial TM systems with integrated MT techniques and explores their usefulness based on the perceived practical benefits brought to their users. Feedback from translators reveals a variety of attitudes towards machine translation, with some supporting and others contradicting several points of conventional wisdom regarding the relationship between machine translation and human translators.", "phrases": ["machine translation", "freelance translator", "language service provider", "productivity"], "overall_score": 1.2496520723490367, "scores": [0.8907173847682422, 0.5828498667419403, 0.5710719699328101, 0.5241372926333826], "rank_score": 0.6421941285190937} -{"id": "hovy-purschke-2018-capturing", "title": "Capturing Regional Variation with Distributed Place Representations and Geographic Retrofitting", "abstract": "Dialects are one of the main drivers of language variation, a major challenge for natural language processing tools. In most languages, dialects exist along a continuum, and are commonly discretized by combining the extent of several preselected linguistic variables. However, the selection of these variables is theory-driven and itself insensitive to change. We use Doc2Vec on a corpus of 16.8M anonymous online posts in the German-speaking area to learn continuous document representations of cities. These representations capture continuous regional linguistic distinctions, and can serve as input to downstream NLP tasks sensitive to regional variation. By incorporating geographic information via retrofitting and agglomerative clustering with structure, we recover dialect areas at various levels of granularity. Evaluating these clusters against an existing dialect map, we achieve a match of up to 0.77 V-score (harmonic mean of cluster completeness and homogeneity). Our results show that representation learning with retrofitting offers a robust general method to automatically expose dialectal differences and regional variation at a finer granularity than was previously possible.", "phrases": ["regional variation", "area", "city", "dialect map"], "overall_score": 1.24957279295585, "scores": [0.8773779334431523, 0.586483213448568, 0.5780264061521027, 0.5267259948375816], "rank_score": 0.6421533869703511} -{"id": "langer-etal-2004-text", "title": "Text Type Structure and Logical Document Structure", "abstract": "Most research on automated categorization of documents has concentrated on the assignment of one or many categories to a whole text. However, new applications, e.g. in the area of the Semantic Web, require a richer and more fine-grained annotation of documents, such as detailed thematic information about the parts of a document. Hence we investigate the automatic categorization of text segments of scientific articles with XML markup into 16 topic types from a text type structure schema. A corpus of 47 linguistic articles was provided with XML markup on different annotation layers representing text type structure, logical document structure, and grammatical categories. Six different feature extraction strategies were applied to this corpus and combined in various parametrizations in different classifiers. The aim was to explore the contribution of each type of information, in particular the logical structure features, to the classification accuracy. The results suggest that some of the topic types of our hierarchy are successfully learnable, while the features from the logical structure layer had no particular impact on the results.", "phrases": ["new application", "area", "semantic web", "fine-grained annotation", "text type structure"], "overall_score": 0.8901284922974947, "scores": [0.9913871747011693, 0.5900755345672618, 0.5706342810319309, 0.5348646369249033, 0.5234982767536489], "rank_score": 0.6420919807957828} -{"id": "kauchak-2013-improving", "title": "Improving Text Simplification Language Modeling Using Unsimplified Text Data", "abstract": "In this paper we examine language modeling for text simplification. Unlike some text-to-text translation tasks, text simplification is a monolingual translation task allowing for text in both the input and output domain to be used for training the language model. We explore the relationship between normal English and simplified English and compare language models trained on varying amounts of text from each. We evaluate the models intrinsically with perplexity and extrinsically on the lexical simplification task from SemEval 2012. We find that a combined model using both simplified and normal English data achieves a 23% improvement in perplexity and a 24% improvement on the lexical simplification task over a model trained only on simple data. Post-hoc analysis shows that the additional unsimplified data provides better coverage for unseen and raren-grams.", "phrases": ["simplification", "language modeling", "translation task", "readability"], "overall_score": 1.4782968804623637, "scores": [0.8859171527555316, 0.595542183830772, 0.5536427467422795, 0.5329626278697991], "rank_score": 0.6420161777995956} -{"id": "pitler-etal-2012-dynamic", "title": "Dynamic Programming for Higher Order Parsing of Gap-Minding Trees", "abstract": "We introduce gap inheritance, a new structural property on trees, which provides a way to quantify the degree to which intervals of descendants can be nested. Based on this property, two new classes of trees are derived that provide a closer approximation to the set of plausible natural language dependency trees than some alternative classes of trees: unlike projective trees, a word can have descendants in more than one interval; unlike spanning trees, these intervals cannot be nested in arbitrary ways. The 1-Inherit class of trees has exactly the same empirical coverage of natural language sentences as the class of mildly non-projective trees, yet the optimal scoring tree can be found in an order of magnitude less time. Gap-minding trees (the second class) have the property that all edges into an interval of descendants come from the same node, and thus an algorithm which uses only single intervals can produce trees in which a node has descendants in multiple intervals.", "phrases": ["gap-minding tree", "coverage", "non-projective tree"], "overall_score": 1.2490670958115984, "scores": [0.858784913891581, 0.5375420419863978, 0.5293535742605582], "rank_score": 0.641893510046179} -{"id": "wang-etal-2015-language", "title": "Language and Domain Independent Entity Linking with Quantified Collective Validation", "abstract": "Linking named mentions detected in a source document to an existing knowledge base provides disambiguated entity referents for the mentions. This allows better document analysis, knowledge extraction and knowledge base population. Most of the previous research extensively exploited the linguistic features of the source documents in a supervised or semi-supervised way. These systems therefore cannot be easily applied to a new language or domain. In this paper, we present a novel unsupervised algorithm named Quantified Collective Validation that avoids excessive linguistic analysis on the source documents and fully leverages the knowledge base structure for the entity linking task. We show our approach achieves stateof-the-art English entity linking performance and demonstrate successful deployment in a new language (Chinese) and two new domains (Biomedical and Earth Science). Experiment datasets and system demonstration are available at http://tw.rpi.edu/web/doc/ hanwang_emnlp_2015 for research purpose.", "phrases": ["quantified collective validation", "mention", "chinese", "new domain"], "overall_score": 1.33442488550166, "scores": [0.8743431554354046, 0.5981590627227565, 0.5620779050880124, 0.532310763089917], "rank_score": 0.6417227215840227} -{"id": "tamkin-etal-2020-investigating", "title": "Investigating Transferability in Pretrained Language Models", "abstract": "How does language model pretraining help transfer learning? We consider a simple ablation technique for determining the impact of each pretrained layer on transfer task performance. This method, partial reinitialization, involves replacing different layers of a pretrained model with random weights, then finetuning the entire model on the transfer task and observing the change in performance. This technique reveals that in BERT, layers with high probing performance on downstream GLUE tasks are neither necessary nor sufficient for high accuracy on those tasks. Furthermore, the benefit of using pretrained parameters for a layer varies dramatically with finetuning dataset size: parameters that provide tremendous performance improvement when data is plentiful may provide negligible benefits in data-scarce settings. These results reveal the complexity of the transfer learning process, highlighting the limitations of methods that operate on frozen models or single data samples.", "phrases": ["transferability", "language model", "different layer"], "overall_score": 1.2486785741933388, "scores": [0.8183220334174428, 0.5780482203620011, 0.5287112945122981], "rank_score": 0.6416938494305806} -{"id": "uchimoto-etal-2006-dependency", "title": "Dependency-structure Annotation to Corpus of Spontaneous Japanese", "abstract": "In Japanese, syntactic structure of a sentence is generally represented by the relationship between phrasal units, or bunsetsus inJapanese, based on a dependency grammar. In the same way, thesyntactic structure of a sentence in a large, spontaneous, Japanese-speech corpus, the Corpus of Spontaneous Japanese (CSJ), isrepresented by dependency relationships between bunsetsus. This paper describes the criteria and definitions of dependency relationships between bunsetsus in the CSJ. The dependency structure of the CSJ is investigated, and the difference in the dependency structures ofwritten text and spontaneous speech is discussed in terms of thedependency accuracies obtained by using a corpus-based model. It is shown that the accuracy of automatic dependency-structure analysis canbe improved if characteristic phenomena of spontaneous speech such as self-corrections, basic utterance units in spontaneous speech, and bunsetsus that have no modifiee are detected and used for dependency-structure analysis.", "phrases": ["syntactic structure", "same way", "csj", "dependency relationship"], "overall_score": 0.8894858097321844, "scores": [0.9000014337296921, 0.5922863716886366, 0.5454455615954388, 0.5287801662696858], "rank_score": 0.6416283833208632} -{"id": "jakob-etal-2010-mapping", "title": "Mapping between Dependency Structures and Compositional Semantic Representations", "abstract": "This paper investigates the mapping between two semantic formalisms, namely the tectogrammatical layer of the Prague Dependency Treebank 2.0 (PDT) and (Robust) Minimal Recursion Semantics ((R)MRS). It is a first attempt to relate the dependency-based annotation scheme of PDT to a compositional semantics approach like (R)MRS. A mapping algorithm that converts PDT trees to (R)MRS structures is developed, associating (R)MRSs to each node on the dependency tree. Furthermore, composition rules are formulated and the relation between dependency in PDT and semantic heads in (R)MRS is analyzed. It turns out that structure and dependencies, morphological categories and some coreferences can be preserved in the target structures. Moreover, valency and free modifications are distinguished using the valency dictionary of PDT as an additional resource. The validation results show that systematically correct underspecified target representations can be obtained by a rule-based mapping approach, which is an indicator that (R)MRS is indeed robust in relation to the formal representation of Czech data. This finding is novel, for Czech, with its free word order and rich morphology, is typologically different than languages analyzed with (R)MRS to date.", "phrases": ["prague dependency treebank", "node", "mapping"], "overall_score": 0.7049004590992008, "scores": [0.7985812889587144, 0.6024132881156607, 0.5238895691437024], "rank_score": 0.6416280487393592} -{"id": "ivanova-etal-2012-contrastive", "title": "Who Did What to Whom? A Contrastive Study of Syntacto-Semantic Dependencies", "abstract": "We investigate aspects of interoperability between a broad range of common annotation schemes for syntacto-semantic dependencies. With the practical goal of making the LinGO Redwoods Treebank accessible to broader usage, we contrast seven distinct annotation schemes of functor--argument structure, both in terms of syntactic and semantic relations. Drawing examples from a multi-annotated gold standard, we show how abstractly similar information can take quite different forms across frameworks. We further seek to shed light on the representational 'distance' between pure bilexical dependencies, on the one hand, and full-blown logical-form propositional semantics, on the other hand. Furthermore, we propose a fully automated conversion procedure from (logical-form) meaning representation to bilexical semantic dependencies.", "phrases": ["syntacto-semantic dependency", "bilexical dependency", "structural comparison", "other parser", "broad hpsg construction"], "overall_score": 1.8175655008173228, "scores": [1.0071150341213908, 0.5753658162107381, 0.5570999684928521, 0.540037909854154, 0.5279856415148428], "rank_score": 0.6415208740387957} -{"id": "popovic-ney-2006-pos", "title": "POS-based Word Reorderings for Statistical Machine Translation", "abstract": "Translation In this work we investigate new possibilities for improving the quality of statistical machine translation (SMT) by applying word reorderings of the source language sentences based on Part-of-Speech tags. Results are presented on the European Parliament corpus containing about 700k sentences and 15M running words. In order to investigate sparse training data scenarios, we also report results obtained on about 1\\% of the original corpus. The source languages are Spanish and English and target languages are Spanish, English and German. We propose two types of reorderings depending on the language pair and the translation direction: local reorderings of nouns and adjectives for translation from and into Spanish and long-range reorderings of verbs for translation into German. For our best translation system, we achieve up to 2\\% relative reduction of WER and up to 7\\% relative increase of BLEU score. Improvements can be seen both on the reordered sentences as well as on the rest of the test corpus. Local reorderings are especially important for the translation systems trained on the small corpus whereas long-range reorderings are more effective for the larger corpus.", "phrases": ["statistical machine translation", "pos information", "small set", "popovic\u0301"], "overall_score": 1.5939039158359332, "scores": [0.9524561847227123, 0.5643714957367103, 0.5252806381726434, 0.5236281704580442], "rank_score": 0.6414341222725276} -{"id": "mueller-etal-2014-dependency", "title": "Dependency parsing with latent refinements of part-of-speech tags", "abstract": "In this paper we propose a method to increase dependency parser performance without using additional labeled or unlabeled data by refining the layer of predicted part-of-speech (POS) tags. We perform experiments on English and German and show significant improvements for both languages. The refinement is based on generative split-merge training for Hidden Markov models (HMMs).", "phrases": ["refinement", "split-merge training", "hidden markov model"], "overall_score": 0.7046544661869601, "scores": [0.8121932319909734, 0.5912115260736402, 0.5208076509588826], "rank_score": 0.6414041363411654} -{"id": "chen-etal-2019-evaluating", "title": "Evaluating Question Answering Evaluation", "abstract": "As the complexity of question answering (QA) datasets evolve, moving away from restricted formats like span extraction and multiple-choice (MC) to free-form answer generation, it is imperative to understand how well current metrics perform in evaluating QA. This is especially important as existing metrics (BLEU, ROUGE, METEOR, and F1) are computed using n-gram similarity and have a number of well-known drawbacks. In this work, we study the suitability of existing metrics in QA. For generative QA, we show that while current metrics do well on existing datasets, converting multiple-choice datasets into free-response datasets is challenging for current metrics. We also look at span-based QA, where F1 is a reasonable metric. We show that F1 may not be suitable for all extractive QA tasks depending on the answer types. Our study suggests that while current metrics may be suitable for existing QA datasets, they limit the complexity of QA datasets that can be created. This is especially true in the context of free-form QA, where we would like our models to be able to generate more complex and abstractive answers, thus necessitating new metrics that go beyond n-gram based matching. As a step towards a better QA metric, we explore using BERTScore, a recently proposed metric for evaluating translation, for QA. We find that although it fails to provide stronger correlation with human judgements, future work focused on tailoring a BERT-based metric to QA evaluation may prove fruitful.", "phrases": ["rouge", "human judgment", "summarization"], "overall_score": 1.4092896704263378, "scores": [0.879162709171815, 0.5242314077283611, 0.5207919926529139], "rank_score": 0.6413953698510301} -{"id": "de-lhoneux-nivre-2016-investigating", "title": "Should Have, Would Have, Could Have. Investigating Verb Group Representations for Parsing with Universal Dependencies.", "abstract": "Treebanks have recently been released for a number of languages with the harmonized annotation created by the Universal Dependencies project. The representation of certain constructions in UD are known to be suboptimal for parsing and may be worth transforming for the purpose of parsing. In this paper, we focus on the representation of verb groups. Several studies have shown that parsing works better when auxiliaries are the head of auxiliary dependency relations which is not the case in UD. We therefore transformed verb groups in UD treebanks, parsed the test set and transformed it back, and contrary to expectations, observed significant decreases in accuracy. We provide suggestive evidence that improvements in previous studies were obtained because the transformation helps disambiguating POS tags of main verbs and auxiliaries. The question of why parsing accuracy decreases with this approach in the case of UD is left open.", "phrases": ["verb group", "universal dependencies", "treebank"], "overall_score": 0.8889787532833464, "scores": [0.8280535259752659, 0.5728430110989405, 0.5228913211520995], "rank_score": 0.6412626194087686} -{"id": "hoshen-wolf-2018-non", "title": "Non-Adversarial Unsupervised Word Translation", "abstract": "Unsupervised word translation from non-parallel inter-lingual corpora has attracted much research interest. Very recently, neural network methods trained with adversarial loss functions achieved high accuracy on this task. Despite the impressive success of the recent techniques, they suffer from the typical drawbacks of generative adversarial models: sensitivity to hyper-parameters, long training time and lack of interpretability. In this paper, we make the observation that two sufficiently similar distributions can be aligned correctly with iterative matching methods. We present a novel method that first aligns the second moment of the word distributions of the two languages and then iteratively refines the alignment. Extensive experiments on word translation of European and Non-European languages show that our method achieves better performance than recent state-of-the-art deep adversarial approaches and is competitive with the supervised baseline. It is also efficient, easy to parallelize on CPU and interpretable.", "phrases": ["unsupervised word translation", "adversarial model", "long training time", "interpretability", "second moment"], "overall_score": 1.4764125069098715, "scores": [0.9347921902296288, 0.6152225673791564, 0.563255376645031, 0.5496510768491948, 0.5430678127165076], "rank_score": 0.6411978047639038} -{"id": "nakamura-kawahara-2018-jfckb", "title": "JFCKB: Japanese Feature Change Knowledge Base", "abstract": "Commonsense knowledge plays an essential role in our language activities. Although many projects have aimed to develop language resources for commonsense knowledge, there is little work focusing on connotational meanings. This is because constructing commonsense knowledge including connotational meanings is challenging. In this paper, we present a Japanese knowledge base where arguments in event sentences are associated with various feature changes caused by the events. For example, \u201cmy child\u201d in \u201cmy wife hits my child\u201d is associated with some feature changes, such as increase in pain, increase in anger, increase in disgust, and decrease in joy. We constructed this knowledge base through crowdsourcing tasks by gathering feature changes of arguments in event sentences. After the construction of the knowledge base, we conducted an experiment in anaphora resolution using the knowledge base. We regarded anaphora resolution as an antecedent candidate ranking task and used Ranking SVM as the solver. Experimental results demonstrated the usefulness of our feature change knowledge base.", "phrases": ["knowledge base", "wife", "jfckb"], "overall_score": 0.7038076069537162, "scores": [0.8362965085684559, 0.5493558413493191, 0.5362475256251323], "rank_score": 0.6406332918476357} -{"id": "kiritchenko-mohammad-2016-effect", "title": "The Effect of Negators, Modals, and Degree Adverbs on Sentiment Composition", "abstract": "Negators, modals, and degree adverbs can significantly affect the sentiment of the words they modify. Often, their impact is modeled with simple heuristics; although, recent work has shown that such heuristics do not capture the true sentiment of multi-word phrases. We created a dataset of phrases that include various negators, modals, and degree adverbs, as well as their combinations. Both the phrases and their constituent content words were annotated with real-valued scores of sentiment association. Using phrasal terms in the created dataset, we analyze the impact of individual modifiers and the average effect of the groups of modifiers on overall sentiment. We find that the effect of modifiers varies substantially among the members of the same group. Furthermore, each individual modifier can affect sentiment words in different ways. Therefore, solutions based on statistical learning seem more promising than fixed hand-crafted rules on the task of automatic sentiment prediction.", "phrases": ["negator", "modal", "degree adverb", "sentiment composition"], "overall_score": 1.2465058391306252, "scores": [0.9314762444085026, 0.5561995952422519, 0.5419729707424377, 0.5326603275405813], "rank_score": 0.6405772844834434} -{"id": "hope-etal-2021-extracting", "title": "Extracting a Knowledge Base of Mechanisms from COVID-19 Papers", "abstract": "The COVID-19 pandemic has spawned a diverse body of scientific literature that is challenging to navigate, stimulating interest in automated tools to help find useful knowledge. We pursue the construction of a knowledge base (KB) of mechanisms\u2014a fundamental concept across the sciences, which encompasses activities, functions and causal relations, ranging from cellular processes to economic impacts. We extract this information from the natural language of scientific papers by developing a broad, unified schema that strikes a balance between relevance and breadth. We annotate a dataset of mechanisms with our schema and train a model to extract mechanism relations from papers. Our experiments demonstrate the utility of our KB in supporting interdisciplinary scientific search over COVID-19 literature, outperforming the prominent PubMed search in a study with clinical experts. Our search engine, dataset and code are publicly available.", "phrases": ["knowledge base", "activity", "mechanism relation"], "overall_score": 1.030894049646257, "scores": [0.7994412770006485, 0.5910342443034239, 0.5311159393204791], "rank_score": 0.6405304868748504} -{"id": "leusch-etal-2003-novel", "title": "A novel string-to-string distance measure with applications to machine translation evaluation", "abstract": "We introduce a string-to-string distance measure which extends the edit distance by block transpositions as constant cost edit operation. An algorithm for the calculation of this distance measure in polynomial time is presented. We then demonstrate how this distance measure can be used as an evaluation criterion in machine translation. The correlation between this evaluation criterion and human judgment is systematically compared with that of other automatic evaluation measures on two translation tasks. In general, like other automatic evaluation measures, the criterion shows low correlation at sentence level, but good correlation at system level.", "phrases": ["edit distance", "per", "bag-of-word", "word error rate", "invwer"], "overall_score": 1.40660309485864, "scores": [0.8483562544147762, 0.6307465589608477, 0.591740809867218, 0.5680763583473801, 0.5619433014973889], "rank_score": 0.6401726566175222} -{"id": "jing-etal-2003-howtogetachinesename", "title": "HowtogetaChineseName(Entity): Segmentation and Combination Issues", "abstract": "When building a Chinese named entity recognition system, one must deal with certain language-specific issues such as whether the model should be based on characters or words. While there is no unique answer to this question, we discuss in detail advantages and disadvantages of each model, identify problems in segmentation and suggest possible solutions, presenting our observations, analysis, and experimental results. The second topic of this paper is classifier combination. We present and describe four classifiers for Chinese named entity recognition and describe various methods for combining their outputs. The results demonstrate that classifier combination is an effective technique of improving system performance: experiments over a large annotated corpus of fine-grained entity types exhibit a 10% relative reduction in F-measure error.", "phrases": ["segmentation", "entity recognition", "character"], "overall_score": 1.0300705597593145, "scores": [0.8129843689329156, 0.5663910040587674, 0.5406810985278265], "rank_score": 0.6400188238398364} -{"id": "bowman-etal-2016-fast", "title": "A Fast Unified Model for Parsing and Sentence Understanding", "abstract": "Tree-structured neural networks exploit valuable syntactic parse information as they interpret the meanings of sentences. However, they suer from two key technical problems that make them slow and unwieldyforlarge-scaleNLPtasks: theyusually operate on parsed sentences and they do not directly support batched computation. We address these issues by introducingtheStack-augmentedParser-Interpreter NeuralNetwork(SPINN),whichcombines parsing and interpretation within a single tree-sequence hybrid model by integrating tree-structured sentence interpretation into the linear sequential structure of a shiftreduceparser. Ourmodelsupportsbatched computation for a speedup of up to 25\u25ca over other tree-structured models, and its integrated parser can operate on unparsed data with little loss in accuracy. We evaluate it on the Stanford NLI entailment task and show that it significantly outperforms other sentence-encoding models.", "phrases": ["hybrid model", "sentence interpretation", "sequential structure", "shift-reduce parser", "tree-lstm"], "overall_score": 1.6890235230950357, "scores": [0.834910033899561, 0.6160684051050105, 0.60302387941886, 0.5843811168583537, 0.5616674013199233], "rank_score": 0.6400101673203417} -{"id": "maletti-2010-synchronous", "title": "Why Synchronous Tree Substitution Grammars?", "abstract": "Synchronous tree substitution grammars are a translation model that is used in syntax-based machine translation. They are investigated in a formal setting and compared to a competitor that is at least as expressive. The competitor is the extended multi bottom-up tree transducer, which is the bottom-up analogue with one essential additional feature. This model has been investigated in theoretical computer science, but seems widely unknown in natural language processing. The two models are compared with respect to standard algorithms (binarization, regular restriction, composition, application). Particular attention is paid to the complexity of the algorithms.", "phrases": ["restriction", "stssg", "point"], "overall_score": 1.2453654388514002, "scores": [0.8619018117235281, 0.5338784941672022, 0.5241933981202047], "rank_score": 0.6399912346703117} -{"id": "toutanova-etal-2004-leaf", "title": "The Leaf Path Projection View of Parse Trees: Exploring String Kernels for HPSG Parse Selection", "abstract": "We present a novel representation of parse trees as lists of paths (leaf projection paths) from leaves to the top level of the tree. This representation allows us to achieve significantly higher accuracy in the task of HPSG parse selection than standard models, and makes the application of string kernels natural. We define tree kernels via string kernels on projection paths and explore their performance in the context of parse disambiguation. We apply SVM ranking models and achieve an exact sentence accuracy of 85.40% on the Redwoods corpus.", "phrases": ["string kernel", "hpsg parse selection", "semantic dependency", "ancestor node"], "overall_score": 1.0299380601955013, "scores": [0.9395931755729044, 0.5443515459171984, 0.5441365024275744, 0.5316647650104241], "rank_score": 0.6399364972320254} -{"id": "shi-etal-2021-learning", "title": "Learning Syntax from Naturally-Occurring Bracketings", "abstract": "Naturally-occurring bracketings, such as answer fragments to natural language questions and hyperlinks on webpages, can reflect human syntactic intuition regarding phrasal boundaries. Their availability and approximate correspondence to syntax make them appealing as distant information sources to incorporate into unsupervised constituency parsing. But they are noisy and incomplete; to address this challenge, we develop a partial-brackets-aware structured ramp loss in learning. Experiments demonstrate that our distantly-supervised models trained on naturally-occurring bracketing data are more accurate in inducing syntactic structures than competing unsupervised systems. On the English WSJ corpus, our models achieve an unlabeled F1 score of 68.9 for constituency parsing.", "phrases": ["bracketing", "answer fragment", "hyperlink"], "overall_score": 0.7029134146233604, "scores": [0.8552630964645381, 0.537073904084202, 0.5271210881884508], "rank_score": 0.639819362912397} -{"id": "johansson-nugues-2007-extended", "title": "Extended Constituent-to-Dependency Conversion for English", "abstract": "We describe a new method to convert English constituent trees using the Penn Treebank annotation style into dependency trees. The new format was inspired by annotation practices used in other dependency treebanks with the intention to produce a better interface to further semantic processing than existing methods. In particular, we used a richer set of edge labels and introduced links to handle long-distance phenomena such as wh-movement and topicalization. The resulting trees generally have a more complex dependency structure. For example, 6% of the trees contain at least one nonprojective link, which is difficult for many parsing algorithms. As can be expected, the more complex structure and the enriched set of edge labels make the trees more difficult to predict, and we observed a decrease in parsing accuracy when applying two dependency parsers to the new corpus. However, the richer information contained in the new trees resulted in a 23% error reduction in a baseline FrameNet semantic role labeler that relied on dependency arc labels only. (Less)", "phrases": ["dependency tree", "link", "wh-movement", "conversion scheme"], "overall_score": 2.059260927253873, "scores": [0.9269121443483868, 0.5738419201272071, 0.534517096899021, 0.5237103455782868], "rank_score": 0.6397453767382254} -{"id": "ge-mooney-2005-statistical", "title": "A Statistical Semantic Parser that Integrates Syntax and Semantics", "abstract": "We introduce a learning semantic parser, Scissor, that maps natural-language sentences to a detailed, formal, meaning-representation language. It first uses an integrated statistical parser to produce a semantically augmented parse tree, in which each non-terminal node has both a syntactic and a semantic label. A compositional-semantics procedure is then used to map the augmented parse tree into a final meaning representation. We evaluate the system in two domains, a natural-language database interface and an interpreter for coaching instructions in robotic soccer. We present experimental results demonstrating that Scissor produces more accurate semantic representations than several previous approaches.", "phrases": ["syntax", "natural language question", "scissor model"], "overall_score": 2.0316090658680395, "scores": [0.863821889323758, 0.5297814715558579, 0.5241826562744214], "rank_score": 0.6392620057180124} -{"id": "zhang-etal-2015-neural", "title": "Neural Networks for Open Domain Targeted Sentiment", "abstract": "Open domain targeted sentiment is the joint information extraction task that finds target mentions together with the sentiment towards each mention from a text corpus. The task is typically modeled as a sequence labeling problem, and solved using state-of-the-art labelers such as CRF. We empirically study the effect of word embeddings and automatic feature combinations on the task by extending a CRF baseline using neural networks, which have demonstrated large potentials for sentiment analysis. Results show that the neural model can give better results by significantly increasing the recall. In addition, we propose a novel integration of neural and discrete features, which combines their relative advantages, leading to significantly higher results compared to both baselines.", "phrases": ["word embedding", "sentiment-bearing iob label", "sequence labeling task", "unified tagging scheme", "fashion"], "overall_score": 2.057312649931109, "scores": [0.9723669454319417, 0.6116691234792399, 0.5531594876264041, 0.5355387703773901, 0.5229662248690424], "rank_score": 0.6391401103568036} -{"id": "uchimoto-etal-2004-multilingual", "title": "Multilingual Aligned Parallel Treebank Corpus Reflecting Contextual Information and Its Applications", "abstract": "This paper describes Japanese-English-Chinese aligned parallel treebank corpora of newspaper articles. They have been constructed by translating each sentence in the Penn Treebank and the Kyoto University text corpus into a corresponding natural sentence in a target language. Each sentence is translated so as to reflect its contextual information and is annotated with morphological and syntactic structures and phrasal alignment. This paper also describes the possible applications of the parallel corpus and proposes a new framework to aid in translation. In this framework, parallel translations whose source language sentence is similar to a given sentence can be semi-automatically generated. In this paper we show that the framework can be achieved by using our aligned parallel treebank corpus.", "phrases": ["contextual information", "parallel corpora", "national institute"], "overall_score": 0.885904078769807, "scores": [0.7826506565419368, 0.6052499203834975, 0.5292335547913252], "rank_score": 0.6390447105722531} -{"id": "mehdad-etal-2014-abstractive", "title": "Abstractive Summarization of Spoken and Written Conversations Based on Phrasal Queries", "abstract": "We propose a novel abstractive querybased summarization system for conversations, where queries are defined as phrases reflecting a user information needs. We rank and extract the utterances in a conversation based on the overall content and the phrasal query information. We cluster the selected sentences based on their lexical similarity and aggregate the sentences in each cluster by means of a word graph model. We propose a ranking strategy to select the best path in the constructed graph as a query-based abstract sentence for each cluster. A resulting summary consists of abstractive sentences representing the phrasal query information and the overall content of the conversation. Automatic and manual evaluation results over meeting, chat and email conversations show that our approach significantly outperforms baselines and previous extractive models.", "phrases": ["conversation", "word graph", "meeting summarization"], "overall_score": 1.4040521448531633, "scores": [0.8395313229768533, 0.5421841719564545, 0.5353195127790357], "rank_score": 0.6390116692374478} -{"id": "rubinstein-etal-2013-toward", "title": "Toward Fine-grained Annotation of Modality in Text", "abstract": "We present a linguistically-informed schema for annotating modal expressions and describe its application to a subset of the MPQA corpus of English texts (Wiebe et al. 2005). The annotation is fine-grained in two respects: (i) in the range of expressions that are defined as modal targets and (ii) in the amount of information that is annotated for each target expression. We use inter-annotator reliability results to support a two-way distinction between priority and nonpriority modality types.", "phrases": ["modality", "scope annotation", "mpqa english corpus"], "overall_score": 0.8856514983903948, "scores": [0.8369550250297655, 0.5560959777452342, 0.5235365342505541], "rank_score": 0.6388625123418512} -{"id": "wang-pan-2018-recursive", "title": "Recursive Neural Structural Correspondence Network for Cross-domain Aspect and Opinion Co-Extraction", "abstract": "Fine-grained opinion analysis aims to extract aspect and opinion terms from each sentence for opinion summarization. Supervised learning methods have proven to be effective for this task. However, in many domains, the lack of labeled data hinders the learning of a precise extraction model. In this case, unsupervised domain adaptation methods are desired to transfer knowledge from the source domain to any unlabeled target domain. In this paper, we develop a novel recursive neural network that could reduce domain shift effectively in word level through syntactic relations. We treat these relations as invariant \u201cpivot information\u201d across domains to build structural correspondences and generate an auxiliary task to predict the relation between any two adjacent words in the dependency tree. In the end, we demonstrate state-of-the-art results on three benchmark datasets.", "phrases": ["cross-domain aspect", "opinion term", "dependency relation"], "overall_score": 1.6858710988910306, "scores": [0.8387096435662849, 0.5549106150630441, 0.5228266635034421], "rank_score": 0.6388156407109237} -{"id": "ross-etal-2022-tailor", "title": "Tailor: Generating and Perturbing Text with Semantic Controls", "abstract": "Controlled text perturbation is useful for evaluating and improving model generalizability. However, current techniques rely on training a model for every target perturbation, which is expensive and hard to generalize. We present Tailor, a semantically-controlled text generation system. Tailor builds on a pretrained seq2seq model and produces textual outputs conditioned on control codes derived from semantic representations. We craft a set of operations to modify the control codes, which in turn steer generation towards targeted attributes. These operations can be further composed into higher-level ones, allowing for flexible perturbation strategies. We demonstrate the effectiveness of these perturbations in multiple applications. First, we use Tailor to automatically create high-quality contrast sets for four distinct natural language processing (NLP) tasks. These contrast sets contain fewer spurious artifacts and are complementary to manually annotated ones in their lexical diversity. Second, we show that Tailor perturbations can improve model generalization through data augmentation. Perturbing just \u223c2% of training data leads to a 5.8-point gain on an NLI challenge set measuring reliance on syntactic heuristics.", "phrases": ["control code", "perturbation strategy", "tailor"], "overall_score": 1.1445347712046106, "scores": [0.8113629905989008, 0.5693131992597474, 0.5356548649414329], "rank_score": 0.6387770182666936} -{"id": "dasigi-etal-2019-quoref", "title": "Quoref: A Reading Comprehension Dataset with Questions Requiring Coreferential Reasoning", "abstract": "Machine comprehension of texts longer than a single sentence often requires coreference resolution. However, most current reading comprehension benchmarks do not contain complex coreferential phenomena and hence fail to evaluate the ability of models to resolve coreference. We present a new crowdsourced dataset containing more than 24K span-selection questions that require resolving coreference among entities in over 4.7K English paragraphs from Wikipedia. Obtaining questions focused on such phenomena is challenging, because it is hard to avoid lexical cues that shortcut complex reasoning. We deal with this issue by using a strong baseline model as an adversary in the crowdsourcing loop, which helps crowdworkers avoid writing questions with exploitable surface cues. We show that state-of-the-art reading comprehension models perform significantly worse than humans on this benchmark\u2014the best model performance is 70.5 F1, while the estimated human performance is 93.4 F1.", "phrases": ["comprehension", "reasoning", "coreference"], "overall_score": 1.4032102023820112, "scores": [0.7904659070989929, 0.5922221685026855, 0.5331973785149563], "rank_score": 0.6386284847055449} -{"id": "nishimura-etal-2018-multi", "title": "Multi-Source Neural Machine Translation with Missing Data", "abstract": "Multi-source translation is an approach to exploit multiple inputs (e.g. in two different languages) to increase translation accuracy. In this paper, we examine approaches for multi-source neural machine translation (NMT) using an incomplete multilingual corpus in which some translations are missing. In practice, many multilingual corpora are not complete due to the difficulty to provide translations in all of the relevant languages (for example, in TED talks, most English talks only have subtitles for a small portion of the languages that TED supports). Existing studies on multi-source translation did not explicitly handle such situations. This study focuses on the use of incomplete multilingual corpora in multi-encoder NMT and mixture of NMT experts and examines a very simple implementation where missing source translations are replaced by a special symbol NULL. These methods allow us to use incomplete corpora both at training time and test time. In experiments with real incomplete multilingual corpora of TED Talks, the multi-source NMT with the NULL tokens achieved higher translation accuracies measured by BLEU than those by any one-to-one NMT systems.", "phrases": ["neural machine translation", "situation", "multiple source", "direction"], "overall_score": 1.4029890145241783, "scores": [0.916022921777628, 0.5910804342402922, 0.5239672409516741, 0.5230406741232791], "rank_score": 0.6385278177732183} -{"id": "dasgupta-ng-2007-high", "title": "High-Performance, Language-Independent Morphological Segmentation", "abstract": "This paper introduces an unsupervised morphological segmentation algorithm that shows robust performance for four languages with different levels of morphological complexity. In particular, our algorithm outperforms Goldsmithis Linguistica and Creutz and Lagusis Morphessor for English and Bengali, and achieves performance that is comparable to the best results for all three PASCAL evaluation datasets. Improvements arise from (1) the use of relative corpus frequency and suffix level similarity for detecting incorrect morpheme attachments and (2) the induction of orthographic rules and allomorphs for segmenting words where roots exhibit spelling changes during morpheme attachments.", "phrases": ["morphological segmentation", "complexity", "affix", "transitional probability"], "overall_score": 1.4698845632461646, "scores": [0.9343337199886381, 0.571739493018661, 0.527186861336478, 0.5201909450665452], "rank_score": 0.6383627548525805} -{"id": "nissim-etal-2020-fair", "title": "Fair Is Better than Sensational: Man Is to Doctor as Woman Is to Doctor", "abstract": "Analogies such as man is to king as woman is to X are often used to illustrate the amazing power of word embeddings. Concurrently, they have also been used to expose how strongly human biases are encoded in vector spaces trained on natural language, with examples like man is to computer programmer as woman is to homemaker. Recent work has shown that analogies are in fact not an accurate diagnostic for bias, but this does not mean that they are not used anymore, or that their legacy is fading. Instead of focusing on the intrinsic problems of the analogy task as a bias detection tool, we discuss a series of issues involving implementation as well as subjective choices that might have yielded a distorted picture of bias in word embeddings. We stand by the truth that human biases are present in word embeddings, and, of course, the need to address them. But analogies are not an accurate tool to do so, and the way they have been most often used has exacerbated some possibly non-existing biases and perhaps hidden others. Because they are still widely popular, and some of them have become classics within and outside the NLP community, we deem it important to provide a series of clarifications that should put well-known, and potentially new analogies, into the right perspective.", "phrases": ["woman", "subjective factor", "judgement"], "overall_score": 1.1436541435623555, "scores": [0.8079471456119897, 0.557148637317209, 0.5497608089353506], "rank_score": 0.6382855306215164} -{"id": "liu-liu-2008-correlation", "title": "Correlation between ROUGE and Human Evaluation of Extractive Meeting Summaries", "abstract": "Automatic summarization evaluation is critical to the development of summarization systems. While ROUGE has been shown to correlate well with human evaluation for content match in text summarization, there are many characteristics in multiparty meeting domain, which may pose potential problems to ROUGE. In this paper, we carefully examine how well the ROUGE scores correlate with human evaluation for extractive meeting summarization. Our experiments show that generally the correlation is rather low, but a significantly better correlation can be obtained by accounting for several unique meeting characteristics, such as disfluencies and speaker information, especially when evaluating system-generated summaries.", "phrases": ["human evaluation", "summarization system", "rouge score"], "overall_score": 1.3269003814039873, "scores": [0.8238656934522611, 0.5610759387651767, 0.5293709677877688], "rank_score": 0.6381042000017355} -{"id": "you-etal-2020-hard", "title": "Hard-Coded Gaussian Attention for Neural Machine Translation", "abstract": "Recent work has questioned the importance of the Transformer's multi-headed attention for achieving high translation quality. We push further in this direction by developing a \u201chard-coded\u201d attention variant without any learned parameters. Surprisingly, replacing all learned self-attention heads in the encoder and decoder with fixed, input-agnostic Gaussian distributions minimally impacts BLEU scores across four different language pairs. However, additionally, hard-coding cross attention (which connects the decoder to the encoder) significantly lowers BLEU, suggesting that it is more important than self-attention. Much of this BLEU drop can be recovered by adding just a single learned cross attention head to an otherwise hard-coded Transformer. Taken as a whole, our results offer insight into which components of the Transformer are actually important, which we hope will guide future work into the development of simpler and more efficient attention-based models.", "phrases": ["gaussian attention", "translation quality", "bleu score", "position"], "overall_score": 1.4692109656714272, "scores": [0.9328995596321894, 0.5481237929527628, 0.5401416512955461, 0.5311158566908961], "rank_score": 0.6380702151428487} -{"id": "zhang-etal-2020-syntax", "title": "Syntax-Aware Opinion Role Labeling with Dependency Graph Convolutional Networks", "abstract": "Opinion role labeling (ORL) is a fine-grained opinion analysis task and aims to answer \u201cwho expressed what kind of sentiment towards what?\u201d. Due to the scarcity of labeled data, ORL remains challenging for data-driven methods. In this work, we try to enhance neural ORL models with syntactic knowledge by comparing and integrating different representations. We also propose dependency graph convolutional networks (DEPGCN) to encode parser information at different processing levels. In order to compensate for parser inaccuracy and reduce error propagation, we introduce multi-task learning (MTL) to train the parser and the ORL model simultaneously. We verify our methods on the benchmark MPQA corpus. The experimental results show that syntactic information is highly valuable for ORL, and our final MTL model effectively boosts the F1 score by 9.29 over the syntax-agnostic baseline. In addition, we find that the contributions from syntactic knowledge do not fully overlap with contextualized word representations (BERT). Our best model achieves 4.34 higher F1 score than the current state-ofthe-art.", "phrases": ["opinion role labeling", "convolutional network", "syntactic knowledge"], "overall_score": 1.1432473821022564, "scores": [0.8532938682437052, 0.5334223788693255, 0.5274592910714597], "rank_score": 0.6380585127281635} -{"id": "kuncoro-etal-2018-lstms", "title": "LSTMs Can Learn Syntax-Sensitive Dependencies Well, But Modeling Structure Makes Them Better", "abstract": "Language exhibits hierarchical structure, but recent work using a subject-verb agreement diagnostic argued that state-of-the-art language models, LSTMs, fail to learn long-range syntax sensitive dependencies. Using the same diagnostic, we show that, in fact, LSTMs do succeed in learning such dependencies\u2014provided they have enough capacity. We then explore whether models that have access to explicit syntactic information learn agreement more effectively, and how the way in which this structural information is incorporated into the model impacts performance. We find that the mere presence of syntactic information does not improve accuracy, but when model architecture is determined by syntax, number agreement is improved. Further, we find that the choice of how syntactic structure is built affects how well number agreement is learned: top-down construction outperforms left-corner and bottom-up variants in capturing non-local structural dependencies.", "phrases": ["syntax-sensitive dependency", "language model", "number agreement", "long-distance dependency"], "overall_score": 1.8065553028484231, "scores": [0.8752382817074877, 0.578802587139531, 0.5679561736189813, 0.5285419864968363], "rank_score": 0.6376347572407092} -{"id": "anzaroot-etal-2014-learning", "title": "Learning Soft Linear Constraints with Application to Citation Field Extraction", "abstract": "Accurately segmenting a citation string into fields for authors, titles, etc. is a challenging task because the output typically obeys various global constraints. Previous work has shown that modeling soft constraints, where the model is encouraged, but not require to obey the constraints, can substantially improve segmentation performance. On the other hand, for imposing hard constraints, dual decomposition is a popular technique for efficient prediction given existing algorithms for unconstrained inference. We extend the technique to perform prediction subject to soft constraints. Moreover, with a technique for performing inference given soft constraints, it is easy to automatically generate large families of constraints and learn their costs with a simple convex optimization problem during training. This allows us to obtain substantial gains in accuracy on a new, challenging citation extraction dataset.", "phrases": ["citation field extraction", "global constraint", "large family", "cost"], "overall_score": 0.8839274105894936, "scores": [0.9287364540118819, 0.5569308670513478, 0.5410155004500199, 0.5237925620133207], "rank_score": 0.6376188458816425} -{"id": "arun-etal-2009-monte", "title": "Monte Carlo inference and maximization for phrase-based translation", "abstract": "Recent advances in statistical machine translation have used beam search for approximate NP-complete inference within probabilistic translation models. We present an alternative approach of sampling from the posterior distribution defined by a translation model. We define a novel Gibbs sampler for sampling translations given a source sentence and show that it effectively explores this posterior distribution. In doing so we overcome the limitations of heuristic beam search and obtain theoretically sound solutions to inference problems such as finding the maximum probability translation and minimum expected risk training and decoding.", "phrases": ["approximation", "sample", "posterior distribution", "gibbs"], "overall_score": 1.3248083830187851, "scores": [0.9217767529862855, 0.561010289561524, 0.541738096152906, 0.5238675070450551], "rank_score": 0.6370981614364426} -{"id": "van-deemter-2006-generating", "title": "Generating Referring Expressions that Involve Gradable Properties", "abstract": "This article examines the role of gradable properties in referring expressions from the perspective of natural language generation. First, we propose a simple semantic analysis of vague descriptions (i.e., referring expressions that contain gradable adjectives) that reflects the context-dependent meaning of the adjectives in them. Second, we show how this type of analysis can inform algorithms for the generation of vague descriptions from numerical data. Third, we ask when such descriptions should be used. The article concludes with a discussion of salience and pointing, which are analyzed as if they were gradable adjectives.", "phrases": ["gradable property", "natural language generation", "adjective", "modifier"], "overall_score": 1.3247902971362642, "scores": [0.851491531123033, 0.5919513087297797, 0.5802703777362399, 0.5246446382726875], "rank_score": 0.637089463965435} -{"id": "smit-etal-2020-combining", "title": "Combining Automatic Labelers and Expert Annotations for Accurate Radiology Report Labeling Using BERT", "abstract": "The extraction of labels from radiology text reports enables large-scale training of medical imaging models. Existing approaches to report labeling typically rely either on sophisticated feature engineering based on medical domain knowledge or manual annotations by experts. In this work, we introduce a BERT-based approach to medical image report labeling that exploits both the scale of available rule-based systems and the quality of expert annotations. We demonstrate superior performance of a biomedically pretrained BERT model first trained on annotations of a rule-based labeler and then finetuned on a small set of expert annotations augmented with automated backtranslation. We find that our final model, CheXbert, is able to outperform the previous best rules-based labeler with statistical significance, setting a new SOTA for report labeling on one of the largest datasets of chest x-rays.", "phrases": ["expert annotation", "radiology report", "image report"], "overall_score": 1.025166441487732, "scores": [0.8390288110759969, 0.5503503710096894, 0.5215359894177838], "rank_score": 0.6369717238344901} -{"id": "xu-cohen-2018-stock", "title": "Stock Movement Prediction from Tweets and Historical Prices", "abstract": "Stock movement prediction is a challenging problem: the market is highly stochastic, and we make temporally-dependent predictions from chaotic data. We treat these three complexities and present a novel deep generative model jointly exploiting text and price signals for this task. Unlike the case with discriminative or topic modeling, our model introduces recurrent, continuous latent variables for a better treatment of stochasticity, and uses neural variational inference to address the intractable posterior inference. We also provide a hybrid objective with temporal auxiliary to flexibly capture predictive dependencies. We demonstrate the state-of-the-art performance of our proposed model on a new stock movement prediction dataset which we collected.", "phrases": ["temporal auxiliary", "predictive dependency", "stock movement prediction", "news"], "overall_score": 1.6337528270068098, "scores": [0.9502656538213472, 0.5385996593333494, 0.5311976599780153, 0.5277500232590646], "rank_score": 0.636953249097944} -{"id": "gooding-etal-2021-word", "title": "Word Complexity is in the Eye of the Beholder", "abstract": "Lexical complexity is a highly subjective notion, yet this factor is often neglected in lexical simplification and readability systems which use a \u201done-size-fits-all\u201d approach. In this paper, we investigate which aspects contribute to the notion of lexical complexity in various groups of readers, focusing on native and non-native speakers of English, and how the notion of complexity changes depending on the proficiency level of a non-native reader. To facilitate reproducibility of our approach and foster further research into these aspects, we release a dataset of complex words annotated by readers with different backgrounds.", "phrases": ["factor", "non-native speaker", "word complexity", "audience"], "overall_score": 1.0251204249280346, "scores": [0.9228425271749441, 0.5501216523240875, 0.5452905071830356, 0.5295178418714601], "rank_score": 0.6369431321383818} -{"id": "blasi-etal-2022-systematic", "title": "Systematic Inequalities in Language Technology Performance across the World's Languages", "abstract": "Natural language processing (NLP) systems have become a central technology in communication, education, medicine, artificial intelligence, and many other domains of research and development. While the performance of NLP methods has grown enormously over the last decade, this progress has been restricted to a minuscule subset of the world's \u22486,500 languages. We introduce a framework for estimating the global utility of language technologies as revealed in a comprehensive snapshot of recent publications in NLP. Our analyses involve the field at large, but also more in-depth studies on both user-facing technologies (machine translation, language understanding, question answering, text-to-speech synthesis) as well as foundational NLP tasks (dependency parsing, morphological inflection). In the process, we (1) quantify disparities in the current state of NLP research, (2) explore some of its associated societal and academic factors, and (3) produce tailored recommendations for evidence-based policy making aimed at promoting more global and equitable language technologies. Data and code to reproduce the findings discussed in this paper areavailable on GitHub ().", "phrases": ["technology", "machine translation", "systematic inequality"], "overall_score": 1.6807954831336565, "scores": [0.8450354457063931, 0.5348431119185451, 0.5307985590923732], "rank_score": 0.6368923722391038} -{"id": "duan-etal-2012-twitter", "title": "Twitter Topic Summarization by Ranking Tweets using Social Influence and Content Quality", "abstract": "In this paper, we propose a time-line based framework for topic summarization in Twitter. We summarize topics by sub-topics along time line to fully capture rapid topic evolution in Twitter. Specifically, we rank and select salient and diversified tweets as a summary of each sub-topic. We have observed that ranking tweets is significantly different from ranking sentences in traditional extractive document summarization. We model and formulate the tweet ranking in a unified mutual reinforcement graph, where the social influence of users and the content quality of tweets are taken into consideration simultaneously in a mutually reinforcing manner. Extensive experiments are conducted on 3.9 million tweets. The results show that the proposed approach outperforms previous approaches by 14% improvement on average ROUGE-1. Moreover, we show how the content quality of tweets and the social influence of users effectively improve the performance of measuring the salience of tweets. TITLE AND ABSTRACT IN ANOTHER LANGUAGE (CHINESE)", "phrases": ["summarization", "social influence", "twitter"], "overall_score": 1.1409786774303254, "scores": [0.8037397918500132, 0.5706676633834422, 0.5359695183933014], "rank_score": 0.6367923245422523} -{"id": "zhang-etal-2017-position", "title": "Position-aware Attention and Supervised Data Improve Slot Filling", "abstract": "Organized relational knowledge in the form of \u201cknowledge graphs\u201d is important for many applications. However, the ability to populate knowledge bases with facts automatically extracted from documents has improved frustratingly slowly. This paper simultaneously addresses two issues that have held back prior work. We first propose an effective new model, which combines an LSTM sequence model with a form of entity position-aware attention that is better suited to relation extraction. Then we build TACRED, a large (119,474 examples) supervised relation extraction dataset obtained via crowdsourcing and targeted towards TAC KBP relations. The combination of better supervised data and a more appropriate high-capacity model enables much better relation extraction performance. When the model trained on this new dataset replaces the previous relation extraction component of the best TAC KBP 2015 slot filling system, its F1 score increases markedly from 22.2% to 26.7%.", "phrases": ["slot filling system", "position-aware attention", "few-shot scenario", "powerful encoder"], "overall_score": 2.550336841395711, "scores": [0.9288334404848678, 0.5544506988043517, 0.5343290732830214, 0.5280566616413872], "rank_score": 0.6364174685534071} -{"id": "pustejovsky-krishnaswamy-2014-generating", "title": "Generating Simulations of Motion Events from Verbal Descriptions", "abstract": "In this paper, we describe a computational model for motion events in natural language that maps from linguistic expressions, through a dynamic event interpretation, into three-dimensional temporal simulations in a model. Starting with the model from (Pustejovsky and Moszkowicz, 2011), we analyze motion events using temporally-traced Labelled Transition Systems. We model the distinction between path- and manner-motion in an operational semantics, and further distinguish different types of manner-of-motion verbs in terms of the mereo-topological relations that hold throughout the process of movement. From these representations, we generate minimal models, which are realized as three-dimensional simulations in software developed with the game engine, Unity. The generated simulations act as a conceptual \u201cdebugger\u201d for the semantics of different motion verbs: that is, by testing for consistency and informativeness in the model, simulations expose the presuppositions associated with linguistic expressions and their compositions. Because the model generation component is still incomplete, this paper focuses on an implementation which maps directly from linguistic interpretations into the Unity code snippets that create the simulations.", "phrases": ["motion event", "unity", "formal semantic"], "overall_score": 1.1402865821024193, "scores": [0.8323371055275127, 0.5464718623304529, 0.530409208497519], "rank_score": 0.6364060587851615} -{"id": "gillick-etal-2019-learning", "title": "Learning Dense Representations for Entity Retrieval", "abstract": "We show that it is feasible to perform entity linking by training a dual encoder (two-tower) model that encodes mentions and entities in the same dense vector space, where candidate entities are retrieved by approximate nearest neighbor search. Unlike prior work, this setup does not rely on an alias table followed by a re-ranker, and is thus the first fully learned entity retrieval model. We show that our dual encoder, trained using only anchor-text links in Wikipedia, outperforms discrete alias table and BM25 baselines, and is competitive with the best comparable results on the standard TACKBP-2010 dataset. In addition, it can retrieve candidates extremely fast, and generalizes well to a new dataset derived from Wikinews. On the modeling side, we demonstrate the dramatic value of an unsupervised negative mining algorithm for this task.", "phrases": ["entity retrieval", "dual encoder", "candidate", "wikipedia", "bm25 baseline"], "overall_score": 1.873707470800009, "scores": [0.9419708005074612, 0.5821665614057462, 0.560171516665012, 0.5545068042911738, 0.5429576261676456], "rank_score": 0.6363546618074077} -{"id": "chen-etal-2018-xl", "title": "XL-NBT: A Cross-lingual Neural Belief Tracking Framework", "abstract": "Task-oriented dialog systems are becoming pervasive, and many companies heavily rely on them to complement human agents for customer service in call centers. With globalization, the need for providing cross-lingual customer support becomes more urgent than ever. However, cross-lingual support poses great challenges\u2014it requires a large amount of additional annotated data from native speakers. In order to bypass the expensive human annotation and achieve the first step towards the ultimate goal of building a universal dialog system, we set out to build a cross-lingual state tracking framework. Specifically, we assume that there exists a source language with dialog belief tracking annotations while the target languages have no annotated dialog data of any form. Then, we pre-train a state tracker for the source language as a teacher, which is able to exploit easy-to-access parallel data. We then distill and transfer its own knowledge to the student state tracker in target languages. We specifically discuss two types of common parallel resources: bilingual corpus and bilingual dictionary, and design different transfer learning strategies accordingly. Experimentally, we successfully use English state tracker as the teacher to transfer its knowledge to both Italian and German trackers and achieve promising results.", "phrases": ["xl-nbt", "dialogue state tracking", "different language"], "overall_score": 1.238258340244989, "scores": [0.8026941971073805, 0.5570604715522027, 0.5492620567726724], "rank_score": 0.6363389084774186} -{"id": "ahmad-etal-2019-cross", "title": "Cross-Lingual Dependency Parsing with Unlabeled Auxiliary Languages", "abstract": "Cross-lingual transfer learning has become an important weapon to battle the unavailability of annotated resources for low-resource languages. One of the fundamental techniques to transfer across languages is learning language-agnostic representations, in the form of word embeddings or contextual encodings. In this work, we propose to leverage unannotated sentences from auxiliary languages to help learning language-agnostic representations. Specifically, we explore adversarial training for learning contextual encoders that produce invariant representations across languages to facilitate cross-lingual transfer. We conduct experiments on cross-lingual dependency parsing where we train a dependency parser on a source language and transfer it to a wide range of target languages. Experiments on 28 target languages demonstrate that adversarial training significantly improves the overall transfer performances under several different settings. We conduct a careful analysis to evaluate the language-agnostic representations resulted from adversarial training.", "phrases": ["dependency parsing", "auxiliary language", "cross-lingual transfer", "word embedding", "contextual encoder"], "overall_score": 1.3231824726758423, "scores": [0.8852843450875965, 0.6278544409280611, 0.5926479587436201, 0.5476146707774245, 0.5281799036643552], "rank_score": 0.6363162638402114} -{"id": "zhang-etal-2018-simplifying", "title": "Simplifying Neural Machine Translation with Addition-Subtraction Twin-Gated Recurrent Networks", "abstract": "In this paper, we propose an additionsubtraction twin-gated recurrent network (ATR) to simplify neural machine translation. The recurrent units of ATR are heavily simplified to have the smallest number of weight matrices among units of all existing gated RNNs. With the simple addition and subtraction operation, we introduce a twin-gated mechanism to build input and forget gates which are highly correlated. Despite this simplification, the essential non-linearities and capability of modeling long-distance dependencies are preserved. Additionally, the proposed ATR is more transparent than LSTM/GRU due to the simplification. Forward self-attention can be easily established in ATR, which makes the proposed network interpretable. Experiments on WMT14 translation tasks demonstrate that ATR-based neural machine translation can yield competitive performance on English-German and English-French language pairs in terms of both translation quality and speed. Further experiments on NIST Chinese-English translation, natural language inference and Chinese word segmentation verify the generality and applicability of ATR on different natural language processing tasks.", "phrases": ["neural machine translation", "recurrent unit", "rnn", "weight matrix"], "overall_score": 0.8818754599011592, "scores": [0.9259214913521735, 0.5704832341543008, 0.5246074549603605, 0.5235425248953179], "rank_score": 0.6361386763405382} -{"id": "rosiger-2018-bashi", "title": "BASHI: A Corpus of Wall Street Journal Articles Annotated with Bridging Links", "abstract": "This paper presents a corpus resource for the anaphoric phenomenon of bridging, named BASHI. The corpus consisting of 50 Wall Street Journal (WSJ) articles adds bridging anaphors and their antecedents to the other gold annotations that have been created as part of the OntoNotes project (Weischedel et al., 2011). Bridging anaphors are context-dependent expressions that do not refer to the same entity as their antecedent, but to a related entity. Bridging resolution is an under-researched area of NLP, where the lack of annotated training data makes the application of statistical models difficult. Thus, we believe that the corpus is a valuable resource for researchers interested in anaphoric phenomena going beyond coreference, as it can be combined with other corpora to create a larger corpus resource. The corpus contains 57,709 tokens and 459 bridging pairs and is available for download in an offset-based format and a CoNLL-12 style bridging column that can be merged with the other annotation layers in OntoNotes. The paper also reviews previous annotation efforts and different definitions of bridging and reports challenges with respect to the bridging annotation.", "phrases": ["ontonotes", "bridging annotation", "bashi", "anaphora resolution"], "overall_score": 1.3224385133733076, "scores": [0.8521091159667679, 0.593802459276579, 0.5530481442633808, 0.5448742606589316], "rank_score": 0.6359584950414148} -{"id": "fort-etal-2020-rigor", "title": "Rigor Mortis: Annotating MWEs with a Gamified Platform", "abstract": "We present here Rigor Mortis, a gamified crowdsourcing platform designed to evaluate the intuition of the speakers, then train them to annotate multi-word expressions (MWEs) in French corpora. We previously showed that the speakers' intuition is reasonably good (65% in recall on non-fixed MWE). We detail here the annotation results, after a training phase using some of the tests developed in the PARSEME-FR project.", "phrases": ["mwes", "french corpora", "rigor mortis"], "overall_score": 0.881125298083454, "scores": [0.8331059944542458, 0.5527158907103442, 0.5209707617556233], "rank_score": 0.6355975489734044} -{"id": "subramanian-etal-2020-obtaining", "title": "Obtaining Faithful Interpretations from Compositional Neural Networks", "abstract": "Neural module networks (NMNs) are a popular approach for modeling compositionality: they achieve high accuracy when applied to problems in language and vision, while reflecting the compositional structure of the problem in the network architecture. However, prior work implicitly assumed that the structure of the network modules, describing the abstract reasoning process, provides a faithful explanation of the model's reasoning; that is, that all modules perform their intended behaviour. In this work, we propose and conduct a systematic evaluation of the intermediate outputs of NMNs on NLVR2 and DROP, two datasets which require composing multiple reasoning steps. We find that the intermediate outputs differ from the expected output, illustrating that the network structure does not provide a faithful explanation of model behaviour. To remedy that, we train the model with auxiliary supervision and propose particular choices for module architecture that yield much better faithfulness, at a minimal cost to accuracy.", "phrases": ["faithfulness", "neural module network", "modeling compositionality", "explanation", "intermediate output"], "overall_score": 1.5234622182333875, "scores": [0.8521775450697643, 0.6016369831838579, 0.5864561109453609, 0.5844756864160384, 0.5519191349567616], "rank_score": 0.6353330921143566} -{"id": "liu-etal-2018-entity", "title": "Entity-Duet Neural Ranking: Understanding the Role of Knowledge Graph Semantics in Neural Information Retrieval", "abstract": "This paper presents the Entity-Duet Neural Ranking Model (EDRM), which introduces knowledge graphs to neural search systems. EDRM represents queries and documents by their words and entity annotations. The semantics from knowledge graphs are integrated in the distributed representations of their entities, while the ranking is conducted by interaction-based neural ranking networks. The two components are learned end-to-end, making EDRM a natural combination of entity-oriented search and neural information retrieval. Our experiments on a commercial search log demonstrate the effectiveness of EDRM. Our analyses reveal that knowledge graph semantics significantly improve the generalization ability of neural ranking models.", "phrases": ["knowledge graph", "neural information retrieval", "entity annotation"], "overall_score": 0.8805870708683079, "scores": [0.8195827955940738, 0.5543170454539322, 0.5317280592709629], "rank_score": 0.635209300106323} -{"id": "benajiba-etal-2008-arabic", "title": "Arabic Named Entity Recognition using Optimized Feature Sets", "abstract": "The Named Entity Recognition (NER) task has been garnering significant attention in NLP as it helps improve the performance of many natural language processing applications. In this paper, we investigate the impact of using different sets of features in two discriminative machine learning frameworks, namely, Support Vector Machines and Conditional Random Fields using Arabic data. We explore lexical, contextual and morphological features on eight standardized data-sets of different genres. We measure the impact of the different features in isolation, rank them according to their impact for each named entity class and incrementally combine them in order to infer the optimal machine learning approach and feature set. Our system yields a performance of F\u03b2=1-measure=83.5 on ACE 2003 Broadcast News data.", "phrases": ["entity recognition", "feature set", "arabic ner", "crf sequence labeling"], "overall_score": 1.6290347357022599, "scores": [0.8003830683297019, 0.6087819632214301, 0.5825444264885704, 0.5487457458236176], "rank_score": 0.6351138009658299} -{"id": "naert-etal-2020-lsf", "title": "LSF-ANIMAL: A Motion Capture Corpus in French Sign Language Designed for the Animation of Signing Avatars", "abstract": "Signing avatars allow deaf people to access information in their preferred language using an interactive visualization of the sign language spatio-temporal content. However, avatars are often procedurally animated, resulting in robotic and unnatural movements, which are therefore rejected by the community for which they are intended. To overcome this lack of authenticity, solutions in which the avatar is animated from motion capture data are promising. Yet, the initial data set drastically limits the range of signs that the avatar can produce. Therefore, it can be interesting to enrich the initial corpus with new content by editing the captured motions. For this purpose, we collected the LSF-ANIMAL corpus, a French Sign Language (LSF) corpus composed of captured isolated signs and full sentences that can be used both to study LSF features and to generate new signs and utterances. This paper presents the precise definition and content of this corpus, technical considerations relative to the motion capture process (including the marker set definition), the post-processing steps required to obtain data in a standard motion format and the annotation scheme used to label the data. The quality of the corpus with respect to intelligibility, accuracy and realism is perceptually evaluated by 41 participants including native LSF signers.", "phrases": ["sign language", "avatar", "lsf-animal corpus"], "overall_score": 0.6976479877019934, "scores": [0.8453529380909842, 0.5346545512102562, 0.5250722050496553], "rank_score": 0.6350265647836318} -{"id": "lazaridou-etal-2020-multi", "title": "Multi-agent Communication meets Natural Language: Synergies between Functional and Structural Language Learning", "abstract": "We present a method for combining multi-agent communication and traditional data-driven approaches to natural language learning, with an end goal of teaching agents to communicate with humans in natural language. Our starting point is a language model that has been trained on generic, not task-specific language data. We then place this model in a multi-agent self-play environment that generates task-specific rewards used to adapt or modulate the model, turning it into a task-conditional language model. We introduce a new way for combining the two types of learning based on the idea of reranking language model samples, and show that this method outperforms others in communicating with humans in a visual referential communication task. Finally, we present a taxonomy of different types of language drift that can occur alongside a set of measures to detect them.", "phrases": ["language data", "communication task", "multi-agent communication"], "overall_score": 1.4613890636522922, "scores": [0.820049632080338, 0.5580805337537041, 0.5258894529398093], "rank_score": 0.6346732062579504} -{"id": "banerjee-etal-2019-hierarchical", "title": "Hierarchical Transfer Learning for Multi-label Text Classification", "abstract": "Multi-Label Hierarchical Text Classification (MLHTC) is the task of categorizing documents into one or more topics organized in an hierarchical taxonomy. MLHTC can be formulated by combining multiple binary classification problems with an independent classifier for each category. We propose a novel transfer learning based strategy, HTrans, where binary classifiers at lower levels in the hierarchy are initialized using parameters of the parent classifier and fine-tuned on the child category classification task. In HTrans, we use a Gated Recurrent Unit (GRU)-based deep learning architecture coupled with attention. Compared to binary classifiers trained from scratch, our HTrans approach results in significant improvements of 1% on micro-F1 and 3% on macro-F1 on the RCV1 dataset. Our experiments also show that binary classifiers trained from scratch are significantly better than single multi-label models.", "phrases": ["multi-label text classification", "htrans", "low level", "transfer parameter", "parent model"], "overall_score": 1.4613408925745681, "scores": [1.0020424246815671, 0.5679282035051532, 0.5471687189182506, 0.5285687997965031, 0.5275532822220639], "rank_score": 0.6346522858247076} -{"id": "sugawara-etal-2018-makes", "title": "What Makes Reading Comprehension Questions Easier?", "abstract": "A challenge in creating a dataset for machine reading comprehension (MRC) is to collect questions that require a sophisticated understanding of language to answer beyond using superficial cues. In this work, we investigate what makes questions easier across recent 12 MRC datasets with three question styles (answer extraction, description, and multiple choice). We propose to employ simple heuristics to split each dataset into easy and hard subsets and examine the performance of two baseline models for each of the subsets. We then manually annotate questions sampled from each subset with both validity and requisite reasoning skills to investigate which skills explain the difference between easy and hard questions. From this study, we observed that (i) the baseline performances for the hard subsets remarkably degrade compared to those of entire datasets, (ii) hard questions require knowledge inference and multiple-sentence reasoning in comparison with easy questions, and (iii) multiple-choice questions tend to require a broader range of reasoning skills than answer extraction and description questions. These results suggest that one might overestimate recent advances in MRC.", "phrases": ["superficial cue", "reasoning skill", "easy question"], "overall_score": 1.8682172541968676, "scores": [0.8260018088549285, 0.5567801093772579, 0.5206882512115217], "rank_score": 0.634490056481236} -{"id": "tsvetkov-etal-2016-polyglot", "title": "Polyglot Neural Language Models: A Case Study in Cross-Lingual Phonetic Representation Learning", "abstract": "We introduce polyglot language models, recurrent neural network models trained to predict symbol sequences in many different languages using shared representations of symbols and conditioning on typological information about the language to be predicted. We apply these to the problem of modeling phone sequences---a domain in which universal symbol inventories and cross-linguistically shared feature representations are a natural fit. Intrinsic evaluation on held-out perplexity, qualitative analysis of the learned representations, and extrinsic evaluation in two downstream applications that make use of phonetic features show (i) that polyglot models better generalize to held-out data than comparable monolingual models and (ii) that polyglot phonetic feature representations are of higher quality than those learned monolingually.", "phrases": ["language model", "phonetic representation learning", "polyglot language model", "machine polyglotism"], "overall_score": 1.5765625404461712, "scores": [0.9283128741122911, 0.5678233878791829, 0.520968034979386, 0.5207174607690941], "rank_score": 0.6344554394349886} -{"id": "zhong-etal-2017-time", "title": "Time Expression Analysis and Recognition Using Syntactic Token Types and General Heuristic Rules", "abstract": "Extracting time expressions from free text is a fundamental task for many applications. We analyze the time expressions from four datasets and find that only a small group of words are used to express time information, and the words in time expressions demonstrate similar syntactic behaviour. Based on the findings, we propose a type-based approach, named SynTime, to recognize time expressions. Specifically, we define three main syntactic token types, namely time token, modifier, and numeral, to group time-related regular expressions over tokens. On the types we design general heuristic rules to recognize time expressions. In recognition, SynTime first identifies the time tokens from raw text, then searches their surroundings for modifiers and numerals to form time segments, and finally merges the time segments to time expressions. As a light-weight rule-based tagger, SynTime runs in real time, and can be easily expanded by simply adding keywords for the text of different types and of different domains. Experiment on benchmark datasets and tweets data shows that SynTime outperforms state-of-the-art methods.", "phrases": ["recognition", "token type", "syntime", "time expression"], "overall_score": 1.1367040979989411, "scores": [0.7971870386908299, 0.6144023861388077, 0.5852112136486561, 0.5408259068719441], "rank_score": 0.6344066363375594} -{"id": "el-kishky-etal-2020-ccaligned", "title": "CCAligned: A Massive Collection of Cross-Lingual Web-Document Pairs", "abstract": "Cross-lingual document alignment aims to identify pairs of documents in two distinct languages that are of comparable content or translations of each other. In this paper, we exploit the signals embedded in URLs to label web documents at scale with an average precision of 94.5% across different language pairs. We mine sixty-eight snapshots of the Common Crawl corpus and identify web document pairs that are translations of each other. We release a new web dataset consisting of over 392 million URL pairs from Common Crawl covering documents in 8144 language pairs of which 137 pairs include English. In addition to curating this massive dataset, we introduce baseline methods that leverage cross-lingual representations to identify aligned documents based on their textual content. Finally, we demonstrate the value of this parallel documents dataset through a downstream task of mining parallel sentences and measuring the quality of machine translations from models trained on this mined data. Our objective in releasing this dataset is to foster new research in cross-lingual NLP across a variety of low, medium, and high-resource languages.", "phrases": ["web", "average precision", "parallel sentence", "parallel data"], "overall_score": 1.4605056216543308, "scores": [0.9020896018306056, 0.5578914359531261, 0.5428880520588029, 0.5342890392500828], "rank_score": 0.6342895322731543} -{"id": "ivanova-etal-2013-survey", "title": "Survey on parsing three dependency representations for English", "abstract": "In this paper we focus on practical issues of data representation for dependency parsing. We carry out an experimental comparison of (a) three syntactic dependency schemes; (b) three data-driven dependency parsers; and (c) the influence of two different approaches to lexical category disambiguation (aka tagging) prior to parsing. Comparing parsing accuracies in various setups, we study the interactions of these three aspects and analyze which configurations are easier to learn for a dependency parser.", "phrases": ["dependency representation", "state-of-the-art parser", "accuracy level", "complement", "same data set"], "overall_score": 1.459919238529955, "scores": [0.9393237205029088, 0.5927979314366719, 0.5518044864104202, 0.5472768810423304, 0.5389713271974526], "rank_score": 0.6340348693179567} -{"id": "razmara-sarkar-2013-ensemble", "title": "Ensemble Triangulation for Statistical Machine Translation", "abstract": "State-of-the-art statistical machine translation systems rely heavily on training data and insufficient training data usually results in poor translation quality. One solution to alleviate this problem is triangulation. Triangulation uses a third language as a pivot through which another sourcetarget translation system can be built. In this paper, we dynamically create multiple such triangulated systems and combine them using a novel approach calledensemble decoding. Experimental results of this approach show significant improvements in the BLEU score over the direct sourcetarget system. Our approach also outperforms a strong linear mixture baseline.", "phrases": ["triangulation", "pivot", "phrase coverage", "different system", "increase"], "overall_score": 0.8785649959714498, "scores": [0.843059843219784, 0.6107732122261188, 0.5887622199758755, 0.5751753550220414, 0.5509827765227872], "rank_score": 0.6337506813933214} -{"id": "bowman-dahl-2021-will", "title": "What Will it Take to Fix Benchmarking in Natural Language Understanding?", "abstract": "Evaluation for many natural language understanding (NLU) tasks is broken: Unreliable and biased systems score so highly on standard benchmarks that there is little room for researchers who develop better systems to demonstrate their improvements. The recent trend to abandon IID benchmarks in favor of adversarially-constructed, out-of-distribution test sets ensures that current models will perform poorly, but ultimately only obscures the abilities that we want our benchmarks to measure. In this position paper, we lay out four criteria that we argue NLU benchmarks should meet. We argue most current benchmarks fail at these criteria, and that adversarial data collection does not meaningfully address the causes of these failures. Instead, restoring a healthy evaluation ecosystem will require significant progress in the design of benchmark datasets, the reliability with which they are annotated, their size, and the ways they handle social bias.", "phrases": ["natural language understanding", "biased system score", "little room", "benchmark dataset"], "overall_score": 1.4590161283317227, "scores": [0.9294923287793007, 0.5412649178461505, 0.5401845773638879, 0.5236287901799164], "rank_score": 0.6336426535423139} -{"id": "bhattacharja-2010-benglish", "title": "Benglish Verbs: A Case of Code-mixing in Bengali", "abstract": "In this article, we show how grammar can account for Benglish verbs, a particular type of complex predicate, which are constituted of an English word and a Bengali verb (e.g. /EksiDenT kOra/ \u2018to have an accident\u2019, /in kOra/ \u2018to get/come/put in\u2019 or /kOnfuz kOra/ \u2018to confuse\u2019). We analyze these verbs in the light of a couple of models (e.g. Kageyama, 1991; Lieber, 1992; Matsumoto, 1996) which claim that complex predicates are necessarily formed in syntax. However, Benglish verbs like /in kOra/ or /kOnfuz kOra/ are problematic for these approaches because it is unclear how preposition in or flexional verb confuse can appear as the arguments of the verb /kOra/ \u2018to do\u2019 in an underlying syntactic structure. We claim that all Benglish verbs can be satisfactorily handled in Morphology in the light of Whole Word Morphology (Ford et al., 1997 and Singh, 2006).", "phrases": ["bengali", "complex predicate", "english word"], "overall_score": 0.6959957698541341, "scores": [0.813608304913761, 0.5458434230373367, 0.5411162259116343], "rank_score": 0.6335226512875773} -{"id": "yanaka-etal-2019-help", "title": "HELP: A Dataset for Identifying Shortcomings of Neural Models in Monotonicity Reasoning", "abstract": "Large crowdsourced datasets are widely used for training and evaluating neural models on natural language inference (NLI). Despite these efforts, neural models have a hard time capturing logical inferences, including those licensed by phrase replacements, so-called monotonicity reasoning. Since no large dataset has been developed for monotonicity reasoning, it is still unclear whether the main obstacle is the size of datasets or the model architectures themselves. To investigate this issue, we introduce a new dataset, called HELP, for handling entailments with lexical and logical phenomena. We add it to training data for the state-of-the-art neural models and evaluate them on test sets for monotonicity phenomena. The results showed that our data augmentation improved the overall accuracy. We also find that the improvement is better on monotonicity inferences with lexical replacements than on downward inferences with disjunction and modification. This suggests that some types of inferences can be improved by our data augmentation while others are immune to it.", "phrases": ["monotonicity reasoning", "natural language inference", "entailment", "data augmentation"], "overall_score": 1.1349198174914972, "scores": [0.9346002623658348, 0.538006833390484, 0.537279197283038, 0.5237569486630698], "rank_score": 0.6334108104256067} -{"id": "diab-bhutada-2009-verb", "title": "Verb Noun Construction MWE Token Classification", "abstract": "We address the problem of classifying multi-word expression tokens in running text. We focus our study on Verb-Noun Constructions (VNC) that vary in their idiomaticity depending on context. VNC tokens are classi\ufb01ed as either idiomatic or literal. We present a supervised learning approach to the problem. We ex-periment with different features. Our approach yields the best results to date on MWE clas-si\ufb01cation combining different linguistically motivated features, the overall performance yields an F-measure of 84.58% corresponding to an F-measure of 89.96% for idiomaticity identi\ufb01cation and classi\ufb01cation and 62.03% for literal identi\ufb01-cation and classi\ufb01cation.", "phrases": ["mwe", "running text", "verb-noun constructions", "vnc", "idiomatic use"], "overall_score": 1.75571052451898, "scores": [0.8789608269473766, 0.6192596422280714, 0.5666812404688747, 0.5573585990874815, 0.5439332749683121], "rank_score": 0.6332387167400233} -{"id": "choi-etal-2010-propbank-instance", "title": "Propbank Instance Annotation Guidelines Using a Dedicated Editor, Jubilee", "abstract": "This paper gives guidelines of how to annotate Propbank instances using a dedicated editor, Jubilee. Propbank is a corpus in which the arguments of each verb predicate are annotated with their semantic roles in relation to the predicate. Propbank annotation also requires the choice of a sense ID for each predicate. Jubilee facilitates this annotation process by displaying several resources of syntactic and semantic information simultaneously: the syntactic structure of a sentence is displayed in the main frame, the available senses with their corresponding argument structures are displayed in another frame, all available Propbank arguments are displayed for the annotators choice, and example annotations of each sense of the predicate are available to the annotator for viewing. Easy access to each of these resources allows the annotator to quickly absorb and apply the necessary syntactic and semantic information pertinent to each predicate for consistent and efficient annotation. Jubilee has been successfully adapted to many Propbank projects in several universities. The tool runs platform independently, is light enough to run as an X11 application and supports multiple languages such as Arabic, Chinese, English, Hindi and Korean.", "phrases": ["annotator", "dedicated editor", "predicate"], "overall_score": 0.8776622836230045, "scores": [0.7900627780211821, 0.5724209805040302, 0.536814777712075], "rank_score": 0.6330995120790958} -{"id": "tanvir-etal-2021-estbert", "title": "EstBERT: A Pretrained Language-Specific BERT for Estonian", "abstract": "This paper presents EstBERT, a large pretrained transformer-based language-specific BERT model for Estonian. Recent work has evaluated multilingual BERT models on Estonian tasks and found them to outperform the baselines. Still, based on existing studies on other languages, a language-specific BERT model is expected to improve over the multilingual ones. We first describe the EstBERT pretraining process and then present the models' results based on the finetuned EstBERT for multiple NLP tasks, including POS and morphological tagging, dependency parsing, named entity recognition and text classification. The evaluation results show that the models based on EstBERT outperform multilingual BERT models on five tasks out of seven, providing further evidence towards a view that training language-specific BERT models are still useful, even when multilingual models are available.", "phrases": ["estonian", "bert model", "monolingual model"], "overall_score": 0.8774995764209228, "scores": [0.7846002395794236, 0.5751304871585003, 0.5392157041890232], "rank_score": 0.6329821436423156} -{"id": "yahya-etal-2012-natural", "title": "Natural Language Questions for the Web of Data", "abstract": "The Linked Data initiative comprises structured databases in the Semantic-Web data model RDF. Exploring this heterogeneous data by structured query languages is tedious and error-prone even for skilled users. To ease the task, this paper presents a methodology for translating natural language questions into structured SPARQL queries over linked-data sources. \n \nOur method is based on an integer linear program to solve several disambiguation tasks jointly: the segmentation of questions into phrases; the mapping of phrases to semantic entities, classes, and relations; and the construction of SPARQL triple patterns. Our solution harnesses the rich type system provided by knowledge bases in the web of linked data, to constrain our semantic-coherence objective function. We present experiments on both the question translation and the resulting query answering.", "phrases": ["web", "different schema", "knowledge basis"], "overall_score": 1.5712227175736555, "scores": [0.8394089377267796, 0.5340481327009958, 0.5234625394590237], "rank_score": 0.632306536628933} -{"id": "kolak-resnik-2005-ocr", "title": "OCR Post-Processing for Low Density Languages", "abstract": "We present a lexicon-free post-processing method for optical character recognition (OCR), implemented using weighted finite state machines. We evaluate the technique in a number of scenarios relevant for natural language processing, including creation of new OCR capabilities for low density languages, improvement of OCR performance for a native commercial system, acquisition of knowledge from a foreign-language dictionary, creation of a parallel text, and machine translation from OCR output.", "phrases": ["low density language", "finite state machine", "sequence-to-sequence model", "cebuano"], "overall_score": 1.1328560974499406, "scores": [0.925405944726761, 0.5398754187792855, 0.5380943381188267, 0.5256604037358755], "rank_score": 0.6322590263401872} -{"id": "bae-etal-2019-summary", "title": "Summary Level Training of Sentence Rewriting for Abstractive Summarization", "abstract": "As an attempt to combine extractive and abstractive summarization, Sentence Rewriting models adopt the strategy of extracting salient sentences from a document first and then paraphrasing the selected ones to generate a summary. However, the existing models in this framework mostly rely on sentence-level rewards or suboptimal labels, causing a mismatch between a training objective and evaluation metric. In this paper, we present a novel training signal that directly maximizes summary-level ROUGE scores through reinforcement learning. In addition, we incorporate BERT into our model, making good use of its ability on natural language understanding. In extensive experiments, we show that a combination of our proposed model and training procedure obtains new state-of-the-art performance on both CNN/Daily Mail and New York Times datasets. We also demonstrate that it generalizes better on DUC-2002 test set.", "phrases": ["abstractive summarization", "scoring", "extract-then-rewrite architecture", "language model"], "overall_score": 1.455765971873635, "scores": [0.908176933015715, 0.5507926871672136, 0.5413936960511759, 0.5285611978748719], "rank_score": 0.6322311285272442} -{"id": "jagannatha-yu-2016-bidirectional", "title": "Bidirectional RNN for Medical Event Detection in Electronic Health Records", "abstract": "Sequence labeling for extraction of medical events and their attributes from unstructured text in Electronic Health Record (EHR) notes is a key step towards semantic understanding of EHRs. It has important applications in health informatics including pharmacovigilance and drug surveillance. The state of the art supervised machine learning models in this domain are based on Conditional Random Fields (CRFs) with features calculated from fixed context windows. In this application, we explored recurrent neural network frameworks and show that they significantly out-performed the CRF models.", "phrases": ["rnn", "medical event", "unstructured text"], "overall_score": 1.314237799598386, "scores": [0.7885036197631277, 0.5739690457405737, 0.5335716905257137], "rank_score": 0.6320147853431384} -{"id": "yong-torrent-2020-semi", "title": "Semi-supervised Deep Embedded Clustering with Anomaly Detection for Semantic Frame Induction", "abstract": "Although FrameNet is recognized as one of the most fine-grained lexical databases, its coverage of lexical units is still limited. To tackle this issue, we propose a two-step frame induction process: for a set of lexical units not yet present in Berkeley FrameNet data release 1.7, first remove those that cannot fit into any existing semantic frame in FrameNet; then, assign the remaining lexical units to their correct frames. We also present the Semi-supervised Deep Embedded Clustering with Anomaly Detection (SDEC-AD) model\u2014an algorithm that maps high-dimensional contextualized vector representations of lexical units to a low-dimensional latent space for better frame prediction and uses reconstruction error to identify lexical units that cannot evoke frames in FrameNet. SDEC-AD outperforms the state-of-the-art methods in both steps of the frame induction process. Empirical results also show that definitions provide contextual information for representing and characterizing the frame membership of lexical units.", "phrases": ["anomaly detection", "low-dimensional latent space", "well frame prediction"], "overall_score": 0.8761576071284479, "scores": [0.7851214876460596, 0.5601730453122965, 0.5507478193036726], "rank_score": 0.6320141174206763} -{"id": "uchendu-etal-2021-turingbench-benchmark", "title": "TURINGBENCH: A Benchmark Environment for Turing Test in the Age of Neural Text Generation", "abstract": "Recent progress in generative language models has enabled machines to generate astonishingly realistic texts. While there are many legitimate applications of such models, there is also a rising need to distinguish machine-generated texts from human-written ones (e.g., fake news detection). However, to our best knowledge, there is currently no benchmark environment with datasets and tasks to systematically study the so-called \u201dTuring Test\u201d problem for neural text generation methods. In this work, we present the TURINGBENCH benchmark environment, which is comprised of (1) a dataset with 200K human- or machine-generated samples across 20 labels Human, GPT-1, GPT-2_small, GPT-2_medium, GPT-2_large,GPT-2_xl, GPT-2_PyTorch, GPT-3, GROVER_base, GROVER_large, GROVER_mega, CTRL, XLM, XLNET_base, XLNET_large, FAIR_wmt19, FAIR_wmt20, TRANSFORMER_XL, PPLM_distil, PPLM_gpt2, (2) two benchmark tasks\u2013i.e., Turing Test (TT) and Authorship Attribution (AA), and (3) a website with leaderboards. Our preliminary experimental results using TURINGBENCH show that GPT-3 and FAIR_wmt20 are the current winners, among all language models tested, in generating the most human-like indistinguishable texts with the lowest F1 score by five state-of-the-art TT detection models. The TURINGBENCH is available at: ", "phrases": ["language model", "machine-generated text", "turingbench"], "overall_score": 0.8760858995014583, "scores": [0.7850819493906187, 0.5774174261257708, 0.5333877983888615], "rank_score": 0.6319623913017504} -{"id": "agarwal-etal-2009-contextual", "title": "Contextual Phrase-Level Polarity Analysis Using Lexical Affect Scoring and Syntactic N-Grams", "abstract": "We present a classifier to predict contextual polarity of subjective phrases in a sentence. Our approach features lexical scoring derived from the Dictionary of Affect in Language (DAL) and extended through WordNet, allowing us to automatically score the vast majority of words in our input avoiding the need for manual labeling. We augment lexical scoring with n-gram analysis to capture the effect of context. We combine DAL scores with syntactic constituents and then extract n-grams of constituents from all sentences. We also use the polarity of all syntactic constituents within the sentence as features. Our results show significant improvement over a majority class baseline as well as a more difficult baseline consisting of lexical n-grams.", "phrases": ["polarity", "n-gram", "subjectivity analysis"], "overall_score": 1.3137442941926707, "scores": [0.8055107710439564, 0.5509431143229717, 0.5388784928610096], "rank_score": 0.6317774594093125} -{"id": "zhang-etal-2007-chunk", "title": "Chunk-Level Reordering of Source Language Sentences with Automatically Learned Rules for Statistical Machine Translation", "abstract": "In this paper, we describe a source-side reordering method based on syntactic chunks for phrase-based statistical machine translation. First, we shallow parse the source language sentences. Then, reordering rules are automatically learned from source-side chunks and word alignments. During translation, the rules are used to generate a reordering lattice for each sentence. Experimental results are reported for a Chinese-to-English task, showing an improvement of 0.5%--1.8% BLEU score absolute on various test sets and better computational efficiency than reordering during decoding. The experiments also show that the reordering at the chunk-level performs better than at the POS-level.", "phrases": ["statistical machine translation", "chunk level", "source-reordering lattice", "smt system", "well computational efficiency"], "overall_score": 1.5146994756056673, "scores": [0.9022508426136145, 0.6124003126587527, 0.5941851168752144, 0.5265590830800253, 0.5229983677768094], "rank_score": 0.6316787446008832} -{"id": "goldberg-zhu-2006-seeing", "title": "Seeing stars when there aren't many stars: Graph-based semi-supervised learning for sentiment categorization", "abstract": "We present a graph-based semi-supervised learning algorithm to address the sentiment analysis task of rating inference. Given a set of documents (e.g., movie reviews) and accompanying ratings (e.g., \"4 stars\"), the task calls for inferring numerical ratings for unlabeled documents based on the perceived sentiment expressed by their text. In particular, we are interested in the situation where labeled data is scarce. We place this task in the semi-supervised setting and demonstrate that considering unlabeled reviews in the learning process can improve rating-inference performance. We do so by creating a graph on both labeled and unlabeled data to encode certain assumptions for this task. We then solve an optimization problem to obtain a smooth rating function over the whole graph. When only limited labeled data is available, this method achieves significantly better predictive accuracy over other methods that ignore the unlabeled examples during training.", "phrases": ["star", "semi-supervised learning algorithm", "rating inference", "sentiment classification", "graph-based method"], "overall_score": 1.7497570962110598, "scores": [0.8384142841220349, 0.6094660122238708, 0.6066837089068229, 0.5606081726961042, 0.5402851788811279], "rank_score": 0.6310914713659922} -{"id": "oya-etal-2014-template", "title": "A Template-based Abstractive Meeting Summarization: Leveraging Summary and Source Text Relationships", "abstract": "In this paper, we present an automatic abstractive summarization system of meeting conversations. Our system extends a novel multi-sentence fusion algorithm in order to generate abstract templates. It also leverages the relationship between summaries and their source meeting transcripts to select the best templates for generating abstractive summaries of meetings. Our manual and automatic evaluation results demonstrate the success of our system in achieving higher scores both in readability and informativeness.", "phrases": ["meeting", "abstractive summarization system", "multi-sentence fusion algorithm", "readability"], "overall_score": 1.7089646694921503, "scores": [0.8518552407205311, 0.6139538314477708, 0.5293877415931074, 0.5290764346393483], "rank_score": 0.6310683121001894} -{"id": "chou-etal-2006-semi", "title": "A Semi-Automatic Method for Annotating a Biomedical Proposition Bank", "abstract": "In this paper, we present a semiautomatic approach for annotating semantic information in biomedical texts. The information is used to construct a biomedical proposition bank called BioProp. Like PropBank in the newswire domain, BioProp contains annotations of predicate argument structures and semantic roles in a treebank schema. To construct BioProp, a semantic role labeling (SRL) system trained on PropBank is used to annotate BioProp. Incorrect tagging results are then corrected by human annotators. To suit the needs in the biomedical domain, we modify the PropBank annotation guidelines and characterize semantic roles as components of biological events. The method can substantially reduce annotation efforts, and we introduce a measure of an upper bound for the saving of annotation efforts. Thus far, the method has been applied experimentally to a 4,389-sentence tree-bank corpus for the construction of BioProp. Inter-annotator agreement measured by kappa statistic reaches .95 for combined decision of role identification and classification when all argument labels are considered. In addition, we show that, when trained on BioProp, our biomedical SRL system called BIOSMILE achieves an F-score of 87%.", "phrases": ["annotator", "biomedical proposition bank", "propbank", "predicate argument structure"], "overall_score": 1.2277883351712757, "scores": [0.8775632302829146, 0.5708515968350859, 0.5523806624923214, 0.5230380712914163], "rank_score": 0.6309583902254345} -{"id": "vexler-minkov-2016-multi", "title": "Multi-source named entity typing for social media", "abstract": "Typed lexicons that encode knowledge about the semantic types of an entity name, e.g., that \u2018Paris\u2019 denotes a geolocation, product, or person, have proven useful for many text processing tasks. While lexicons may be derived from large-scale knowledge bases (KBs), KBs are inherently imperfect, in particular they lack coverage with respect to long tail entity names. We infer the types of a given entity name using multi-source learning, considering information obtained by alignment to the Freebase knowledge base, Web-scale distributional patterns, and global semi-structured contexts retrieved by means of Web search. Evaluation in the challenging domain of social media shows that multi-source learning improves performance compared with rule-based KB lookups, boosting typing results for some semantic categories.", "phrases": ["multi-source learning", "web search", "social medium"], "overall_score": 0.6931049786765694, "scores": [0.8014033784199521, 0.5471416164150434, 0.5441290244503175], "rank_score": 0.6308913397617711} -{"id": "galley-etal-2015-deltableu", "title": "deltaBLEU: A Discriminative Metric for Generation Tasks with Intrinsically Diverse Targets", "abstract": "We introduce Discriminative BLEU (\u2206BLEU), a novel metric for intrinsic evaluation of generated text in tasks that admit a diverse range of possible outputs. Reference strings are scored for quality by human raters on a scale of [\u22121, +1] to weight multi-reference BLEU. In tasks involving generation of conversational responses, \u2206BLEU correlates reasonably with human judgments and outperforms sentence-level and IBM BLEU in terms of both Spearman\u2019s \u03c1 and Kendall\u2019s \u03c4 .", "phrases": ["discriminative bleu", "human rater", "deltableu", "multiple reference"], "overall_score": 1.6176045090577464, "scores": [0.8487289349615184, 0.5908382882474611, 0.5495384527276357, 0.5335242611451012], "rank_score": 0.6306574842704291} -{"id": "cheng-etal-2015-open", "title": "Open-Domain Name Error Detection using a Multi-Task RNN", "abstract": "Out-of-vocabulary name errors in speech recognition create significant problems for downstream language processing, but the fact that they are rare poses challenges for automatic detection, particularly in an open-domain scenario. To address this problem, a multi-task recurrent neural network language model for sentence-level name detection is proposed for use in combination with out-of-vocabulary word detection. The sentence-level model is also effective for leveraging external text data. Experiments show a 26% improvement in name-error detection F-score over a system using n-gram lexical features.", "phrases": ["name error", "multi-task rnn", "network language model", "next word"], "overall_score": 1.3853904525196323, "scores": [0.8909524452743355, 0.5587741264729753, 0.5431546150933952, 0.5291922813146424], "rank_score": 0.6305183670388371} -{"id": "djokic-etal-2019-modeling", "title": "Modeling Affirmative and Negated Action Processing in the Brain with Lexical and Compositional Semantic Models", "abstract": "Recent work shows that distributional semantic models can be used to decode patterns of brain activity associated with individual words and sentence meanings. However, it is yet unclear to what extent such models can be used to study and decode fMRI patterns associated with specific aspects of semantic composition such as the negation function. In this paper, we apply lexical and compositional semantic models to decode fMRI patterns associated with negated and affirmative sentences containing hand-action verbs. Our results show reduced decoding (correlation) of sentences where the verb is in the negated context, as compared to the affirmative one, within brain regions implicated in action-semantic processing. This supports behavioral and brain imaging studies, suggesting that negation involves reduced access to aspects of the affirmative mental representation. The results pave the way for testing alternate semantic models of negation against human semantic processing in the brain.", "phrases": ["negation", "brain", "affirmative mental representation"], "overall_score": 0.6925718856042522, "scores": [0.7951296860197822, 0.5706511374668525, 0.52543746912108], "rank_score": 0.6304060975359049} -{"id": "francois-2009-combining", "title": "Combining a Statistical Language Model with Logistic Regression to Predict the Lexical and Syntactic Difficulty of Texts for FFL", "abstract": "Reading is known to be an essential task in language learning, but finding the appropriate text for every learner is far from easy. In this context, automatic procedures can support the teacher's work. Some tools exist for English, but at present there are none for French as a foreign language (FFL). In this paper, we present an original approach to assessing the readability of FFL texts using NLP techniques and extracts from FFL textbooks as our corpus. Two logistic regression models based on lexical and grammatical features are explored and give quite good predictions on new texts. The results shows a slight superiority for multinomial logistic regression over the proportional odds model.", "phrases": ["logistic regression", "foreign language", "readability"], "overall_score": 0.692268369778404, "scores": [0.8256807348713118, 0.5358747009468678, 0.5288340407577684], "rank_score": 0.6301298255253159} -{"id": "horacek-2003-best", "title": "A Best-First Search Algorithm for Generating Referring Expressions", "abstract": "Existing algorithms for generating referential descriptions to sets of objects have serious deficits: while incremental approaches may produce ambiguous and redundant expressions, exhaustive searches are computationally expensive. Mediating between these extreme control regimes, we propose a best-first searching algorithm for uniquely identifying sets of objects. We incorporate linguistically motivated preferences and several techniques to cut down the search space. Preliminary results show the effectiveness of the new algorithm.", "phrases": ["object", "deficit", "incremental approach", "resp"], "overall_score": 1.1289064917758003, "scores": [0.889787358759743, 0.5500378246121861, 0.5450010144391749, 0.5353926399599452], "rank_score": 0.6300547094427623} -{"id": "nenkova-etal-2008-high", "title": "High Frequency Word Entrainment in Spoken Dialogue", "abstract": "Cognitive theories of dialogue hold that entrainment, the automatic alignment between dialogue partners at many levels of linguistic representation, is key to facilitating both production and comprehension in dialogue. In this paper we examine novel types of entrainment in two corpora---Switchboard and the Columbia Games corpus. We examine entrainment in use of high-frequency words (the most common words in the corpus), and its association with dialogue naturalness and flow, as well as with task success. Our results show that such entrainment is predictive of the perceived naturalness of dialogues and is significantly correlated with task success; in overall interaction flow, higher degrees of entrainment are associated with more overlaps and fewer interruptions.", "phrases": ["frequency word entrainment", "dialogue naturalness", "more overlap", "engagement", "social variable"], "overall_score": 1.5107387350633206, "scores": [0.9264965932494963, 0.63620486263648, 0.535024359908743, 0.5277162277596387, 0.5246928939491294], "rank_score": 0.6300269875006974} -{"id": "shaw-etal-2019-generating", "title": "Generating Logical Forms from Graph Representations of Text and Entities", "abstract": "Structured information about entities is critical for many semantic parsing tasks. We present an approach that uses a Graph Neural Network (GNN) architecture to incorporate information about relevant entities and their relations during parsing. Combined with a decoder copy mechanism, this approach provides a conceptually simple mechanism to generate logical forms with entities. We demonstrate that this approach is competitive with the state-of-the-art across several tasks without pre-training, and outperforms existing approaches when combined with BERT pre-training.", "phrases": ["semantic parsing", "graph neural network", "gnn", "relevant entity"], "overall_score": 1.1281042380434114, "scores": [0.8337276182892189, 0.5719183447509534, 0.5682028229240831, 0.5445790664738475], "rank_score": 0.6296069631095257} -{"id": "benamara-etal-2017-evaluative", "title": "Evaluative Language Beyond Bags of Words: Linguistic Insights and Computational Applications", "abstract": "The study of evaluation, affect, and subjectivity is a multidisciplinary enterprise, including sociology, psychology, economics, linguistics, and computer science. A number of excellent computational linguistics and linguistic surveys of the field exist. Most surveys, however, do not bring the two disciplines together to show how methods from linguistics can benefit computational sentiment analysis systems. In this survey, we show how incorporating linguistic insights, discourse information, and other contextual phenomena, in combination with the statistical exploitation of data, can result in an improvement over approaches that take advantage of only one of these perspectives. We first provide a comprehensive introduction to evaluative language from both a linguistic and computational perspective. We then argue that the standard computational definition of the concept of evaluative language neglects the dynamic nature of evaluation, in which the interpretation of a given evaluation depends on linguistic and extra-linguistic contextual factors. We thus propose a dynamic definition that incorporates update functions. The update functions allow for different contextual aspects to be incorporated into the calculation of sentiment for evaluative words or expressions, and can be applied at all levels of discourse. We explore each level and highlight which linguistic aspects contribute to accurate extraction of sentiment. We end the review by outlining what we believe the future directions of sentiment analysis are, and the role that discourse and contextual information need to play.", "phrases": ["linguistic insight", "subjectivity", "computer science", "sentiment analysis"], "overall_score": 1.3833228729388125, "scores": [0.8622963819633594, 0.563162087845882, 0.5591603371436775, 0.5336906771251598], "rank_score": 0.6295773710195197} -{"id": "kirov-etal-2017-rich", "title": "A Rich Morphological Tagger for English: Exploring the Cross-Linguistic Tradeoff Between Morphology and Syntax", "abstract": "A traditional claim in linguistics is that all human languages are equally expressive\u2014able to convey the same wide range of meanings. Morphologically rich languages, such as Czech, rely on overt inflectional and derivational morphology to convey many semantic distinctions. Languages with comparatively limited morphology, such as English, should be able to accomplish the same using a combination of syntactic and contextual cues. We capitalize on this idea by training a tagger for English that uses syntactic features obtained by automatic parsing to recover complex morphological tags projected from Czech. The high accuracy of the resulting model provides quantitative confirmation of the underlying linguistic hypothesis of equal expressivity, and bodes well for future improvements in downstream HLT tasks including machine translation.", "phrases": ["morphology", "tagger", "rich language"], "overall_score": 1.383032671201332, "scores": [0.8054490467454938, 0.5577510069769048, 0.5251358298285258], "rank_score": 0.6294452945169747} -{"id": "echihabi-marcu-2003-noisy", "title": "A Noisy-Channel Approach to Question Answering", "abstract": "We introduce a probabilistic noisy-channel model for question answering and we show how it can be exploited in the context of an end-to-end QA system. Our noisy-channel system outperforms a state-of-the-art rule-based QA system that uses similar resources. We also show that the model we propose is flexible enough to accommodate within one mathematical framework many QA-specific resources and techniques, which range from the exploitation of WordNet, structured, and semi-structured databases to reasoning, and paraphrasing.", "phrases": ["noisy-channel approach", "question generation", "machine translation", "ibm model", "intermediate step"], "overall_score": 1.8854678215824099, "scores": [0.9268598122387429, 0.5753699890046771, 0.5699392009801667, 0.5422981594661322, 0.5324559432671577], "rank_score": 0.6293846209913754} -{"id": "kennedy-szpakowicz-2008-evaluating", "title": "Evaluating Roget`s Thesauri", "abstract": "Roget\u2019s Thesaurus has gone through many revisions since it was first published 150 years ago. But how do these revisions affect Roget\u2019s usefulness for NLP? We examine the differences in content between the 1911 and 1987 versions of Roget\u2019s, and we test both versions with each other and WordNet on problems such as synonym identification and word relatedness. We also present a novel method for measuring sentence relatedness that can be implemented in either version of Roget\u2019s or in WordNet. Although the 1987 version of the Thesaurus is better, we show that the 1911 version performs surprisingly well and that often the differences between the versions of Roget\u2019s and WordNet are not statistically significant. We hope that this work will encourage others to use the 1911 Roget\u2019s Thesaurus in NLP tasks.", "phrases": ["wordnet", "pseudo-word-sense disambiguation", "cohesion", "alternative source"], "overall_score": 0.872404798224245, "scores": [0.9199820769807118, 0.5408887466697766, 0.5289735194937857, 0.5273838089474361], "rank_score": 0.6293070380229275} -{"id": "huang-etal-2021-shot", "title": "Few-Shot Named Entity Recognition: An Empirical Baseline Study", "abstract": "This paper presents an empirical study to efficiently build named entity recognition (NER) systems when a small amount of in-domain labeled data is available. Based upon recent Transformer-based self-supervised pre-trained language models (PLMs), we investigate three orthogonal schemes to improve model generalization ability in few-shot settings: (1) meta-learning to construct prototypes for different entity types, (2) task-specific supervised pre-training on noisy web data to extract entity-related representations and (3) self-training to leverage unlabeled in-domain data. On 10 public NER datasets, we perform extensive empirical comparisons over the proposed schemes and their combinations with various proportions of labeled data, our experiments show that (i)in the few-shot learning setting, the proposed NER schemes significantly improve or outperform the commonly used baseline, a PLM-based linear classifier fine-tuned using domain labels. (ii) We create new state-of-the-art results on both few-shot and training-free settings compared with existing methods.", "phrases": ["entity recognition", "scheme", "model generalization ability", "in-domain data"], "overall_score": 1.307342663090672, "scores": [0.8919728793089602, 0.5778906870621094, 0.5239079955407, 0.5210241404662082], "rank_score": 0.6286989255944945} -{"id": "ethayarajh-etal-2019-towards", "title": "Towards Understanding Linear Word Analogies", "abstract": "A surprising property of word vectors is that word analogies can often be solved with vector arithmetic. However, it is unclear why arithmetic operators correspond to non-linear embedding models such as skip-gram with negative sampling (SGNS). We provide a formal explanation of this phenomenon without making the strong assumptions that past theories have made about the vector space and word distribution. Our theory has several implications. Past work has conjectured that linear substructures exist in vector spaces because relations can be represented as ratios; we prove that this holds for SGNS. We provide novel justification for the addition of SGNS word vectors by showing that it automatically down-weights the more frequent word, as weighting schemes do ad hoc. Lastly, we offer an information theoretic interpretation of Euclidean distance in vector spaces, justifying its use in capturing word dissimilarity.", "phrases": ["sgns", "explanation", "word pair", "co-occurrence"], "overall_score": 1.4457313414435444, "scores": [0.8246702898306888, 0.599014237591484, 0.5450838919108725, 0.5427241562810241], "rank_score": 0.6278731439035173} -{"id": "ive-etal-2016-limsis", "title": "LIMSI's Contribution to the WMT'16 Biomedical Translation Task", "abstract": "The article describes LIMSI\u2019s submission to the first WMT\u201916 shared biomedical translation task, focusing on the sole English-French translation direction. Our main submission is the output of a MOSES-based statistical machine translation (SMT) system, rescored with Structured OUtput Layer (SOUL) neural network models. We also present an attempt to circumvent syntactic complexity: our proposal combines the outputs of PBSMT systems trained either to translate entire source sentences or specific syntactic constructs extracted from those sentences. The approach is implemented using Confusion Network (CN) decoding. The quality of the combined output is comparable to the quality of our main system.", "phrases": ["limsi", "re-ranking model", "vocabulary", "moses"], "overall_score": 0.8702953940731268, "scores": [0.812584276492278, 0.5893971456912532, 0.5797417136645342, 0.5294185624275473], "rank_score": 0.6277854245689032} -{"id": "baumann-etal-2009-assessing", "title": "Assessing and Improving the Performance of Speech Recognition for Incremental Systems", "abstract": "In incremental spoken dialogue systems, partial hypotheses about what was said are required even while the utterance is still ongoing. We define measures for evaluating the quality of incremental ASR components with respect to the relative correctness of the partial hypotheses compared to hypotheses that can optimize over the complete input, the timing of hypothesis formation relative to the portion of the input they are about, and hypothesis stability, defined as the number of times they are revised. We show that simple incremental post-processing can improve stability dramatically, at the cost of timeliness (from 90 % of edits of hypotheses being spurious down to 10 % at a lag of 320 ms). The measures are not independent, and we show how system designers can find a desired operating point for their ASR. To our knowledge, we are the first to suggest and examine a variety of measures for assessing incremental ASR and improve performance on this basis.", "phrases": ["speech recognition", "spoken dialogue system", "asr", "incremental processing"], "overall_score": 1.3793580471128168, "scores": [0.8658957885687911, 0.5921888687807573, 0.5305196016203384, 0.5224873451210633], "rank_score": 0.6277729010227375} -{"id": "voita-etal-2021-analyzing", "title": "Analyzing the Source and Target Contributions to Predictions in Neural Machine Translation", "abstract": "In Neural Machine Translation (and, more generally, conditional language modeling), the generation of a target token is influenced by two types of context: the source and the prefix of the target sequence. While many attempts to understand the internal workings of NMT models have been made, none of them explicitly evaluates relative source and target contributions to a generation decision. We argue that this relative contribution can be evaluated by adopting a variant of Layerwise Relevance Propagation (LRP). Its underlying `conservation principle' makes relevance propagation unique: differently from other methods, it evaluates not an abstract quantity reflecting token importance, but the proportion of each token's influence. We extend LRP to the Transformer and conduct an analysis of NMT models which explicitly evaluates the source and target relative contributions to the generation process. We analyze changes in these contributions when conditioning on different types of prefixes, when varying the training objective or the amount of training data, and during the training process. We find that models trained with more data tend to rely on source information more and to have more sharp token contributions; the training process is non-monotonic with several stages of different nature.", "phrases": ["neural machine translation", "influence", "prefix", "different type"], "overall_score": 1.4453938701366809, "scores": [0.9275107061210254, 0.5368298162961717, 0.5266890180992122, 0.5198767873921742], "rank_score": 0.6277265819771458} -{"id": "haghighi-etal-2009-better", "title": "Better Word Alignments with Supervised ITG Models", "abstract": "This work investigates supervised word alignment methods that exploit inversion transduction grammar (ITG) constraints. We consider maximum margin and conditional likelihood objectives, including the presentation of a new normal form grammar for canonicalizing derivations. Even for non-ITG sentence pairs, we show that it is possible learn ITG alignment models by simple relaxations of structured discriminative learning objectives. For efficiency, we describe a set of pruning techniques that together allow us to align sentences two orders of magnitude faster than naive bitext CKY parsing. Finally, we introduce many-to-one block alignment features, which significantly improve our ITG models. Altogether, our method results in the best reported AER numbers for Chinese-English and a performance improvement of 1.1 BLEU over GIZA++ alignments.", "phrases": ["itg", "block", "word alignment community", "probability", "more attention"], "overall_score": 1.7778929246450603, "scores": [0.8573215801080336, 0.6390834707324546, 0.5513094112641832, 0.5452931100148986, 0.5445834045269523], "rank_score": 0.6275181953293044} -{"id": "mitchell-etal-2010-syntactic", "title": "Syntactic and Semantic Factors in Processing Difficulty: An Integrated Measure", "abstract": "The analysis of reading times can provide insights into the processes that underlie language comprehension, with longer reading times indicating greater cognitive load. There is evidence that the language processor is highly predictive, such that prior context allows upcoming linguistic material to be anticipated. Previous work has investigated the contributions of semantic and syntactic contexts in isolation, essentially treating them as independent factors. In this paper we analyze reading times in terms of a single predictive measure which integrates a model of semantic composition with an incremental parser and a language model.", "phrases": ["processing difficulty", "incremental parser", "psycholinguistic", "semantic component", "coherence"], "overall_score": 1.5045732240926597, "scores": [0.9183065968763722, 0.5950577679007353, 0.5511533652967847, 0.5421944180247401, 0.5305667004826192], "rank_score": 0.6274557697162503} -{"id": "sogaard-2010-inversion", "title": "Can inversion transduction grammars generate hand alignments", "abstract": "The adequacy of inversion transduction grammars (ITGs) has been widely debated, and the discussion\u2019s crux seems to be whether the search space is inclusive enough (Zens and Ney, 2003; Wellington et al., 2006; S\u00f8gaard and Wu, 2009). Parse failure rate when parses are constrained by word alignments is one metric that has been used, but no one has studied parse failure rates of the full class of ITGs on representative hand aligned corpora. It has also been noted that ITGs in Chomsky normal form induce strictly less alignments than ITGs (S\u00f8gaard and Wu, 2009). This study is the first study that directly compares parse failure rates for this subclass and the full class of ITGs.", "phrases": ["adequacy", "itg", "translation model", "setup"], "overall_score": 1.1241488977157532, "scores": [0.8572302976491082, 0.5755478078671826, 0.5393360128617966, 0.5374836641860445], "rank_score": 0.627399445641033} -{"id": "cohen-etal-2011-unsupervised", "title": "Unsupervised Structure Prediction with Non-Parallel Multilingual Guidance", "abstract": "We describe a method for prediction of linguistic structure in a language for which only unlabeled data is available, using annotated data from a set of one or more helper languages. Our approach is based on a model that locally mixes between supervised models from the helper languages. Parallel data is not used, allowing the technique to be applied even in domains where human-translated texts are unavailable. We obtain state-of-the-art performance for two tasks of structure prediction: unsupervised part-of-speech tagging and unsupervised dependency parsing.", "phrases": ["non-parallel multilingual guidance", "unlabeled data", "helper language", "part-of-speech tagging", "source language"], "overall_score": 1.698872647875475, "scores": [0.9426136380054061, 0.5687221498529156, 0.558658775575179, 0.5414628983268955, 0.525250726263616], "rank_score": 0.6273416376048024} -{"id": "bruni-etal-2012-distributional", "title": "Distributional Semantics in Technicolor", "abstract": "Our research aims at building computational models of word meaning that are perceptually grounded. Using computer vision techniques, we build visual and multimodal distributional models and compare them to standard textual models. Our results show that, while visual models with state-of-the-art computer vision techniques perform worse than textual models in general tasks (accounting for semantic relatedness), they are as good or better models of the meaning of words with visual correlates such as color terms, even in a nontrivial task that involves nonliteral uses of such words. Moreover, we show that visual and textual information are tapping on different aspects of meaning, and indeed combining them in multimodal models often improves performance.", "phrases": ["relatedness", "color term", "men", "vector concatenation"], "overall_score": 1.90982839141433, "scores": [0.8673933618331872, 0.5758932408386995, 0.5414457940032251, 0.5244669020397649], "rank_score": 0.6272998246787191} -{"id": "kisselew-etal-2016-predicting", "title": "Predicting the Direction of Derivation in English Conversion", "abstract": "Conversion is a word formation operation that changes the grammatical category of a word in the absence of overt morphology. Conversion is extremely productive in English (e.g., tunnel, talk). This paper investigates whether distributional information can be used to predict the diachronic direction of conversion for homophonous noun\u2010verb pairs. We aim to predict, for example, that tunnel was used as a noun prior to its use as a verb. We test two hypotheses: (1) that derived forms are less frequent than their bases, and (2) that derived forms are more semantically specific than their bases, as approximated by information theoretic measures. We find that hypothesis (1) holds for N-to-V conversion, while hypothesis (2) holds for V-to-N conversion. We achieve the best overall account of the historical data by taking both frequency and semantic specificity into account. These results provide a new perspective on linguistic theories regarding the semantic specificity of derivational morphemes, and on the morphosyntactic status of conversion.", "phrases": ["direction", "conversion", "noun"], "overall_score": 1.0088123479160018, "scores": [0.8170305950081421, 0.5404763424212797, 0.5229241250965301], "rank_score": 0.6268103541753174} -{"id": "srivastava-singh-2021-hinge", "title": "HinGE: A Dataset for Generation and Evaluation of Code-Mixed Hinglish Text", "abstract": "Text generation is a highly active area of research in the computational linguistic community. The evaluation of the generated text is a challenging task and multiple theories and metrics have been proposed over the years. Unfortunately, text generation and evaluation are relatively understudied due to the scarcity of high-quality resources in code-mixed languages where the words and phrases from multiple languages are mixed in a single utterance of text and speech. To address this challenge, we present a corpus (HinGE) for a widely popular code-mixed language Hinglish (code-mixing of Hindi and English languages). HinGE has Hinglish sentences generated by humans as well as two rule-based algorithms corresponding to the parallel Hindi-English sentences. In addition, we demonstrate the in- efficacy of widely-used evaluation metrics on the code-mixed data. The HinGE dataset will facilitate the progress of natural language generation research in code-mixed languages.", "phrases": ["efficacy", "evaluation metric", "hinge"], "overall_score": 1.0087860607827326, "scores": [0.8136166711590346, 0.5390804395617362, 0.5276849524624926], "rank_score": 0.6267940210610878} -{"id": "mcintosh-2010-unsupervised", "title": "Unsupervised Discovery of Negative Categories in Lexicon Bootstrapping", "abstract": "Multi-category bootstrapping algorithms were developed to reduce semantic drift. By extracting multiple semantic lexicons simultaneously, a category's search space may be restricted. The best results have been achieved through reliance on manually crafted negative categories. Unfortunately, identifying these categories is non-trivial, and their use shifts the unsupervised bootstrapping paradigm towards a supervised framework. \n \nWe present NEG-FINDER, the first approach for discovering negative categories automatically. NEG-FINDER exploits unsupervised term clustering to generate multiple negative categories during bootstrapping. Our algorithm effectively removes the necessity of manual intervention and formulation of negative categories, with performance closely approaching that obtained using negative categories defined by a domain expert.", "phrases": ["negative category", "bootstrapping", "semantic drift", "iterative process", "limitation"], "overall_score": 1.3753706210529157, "scores": [0.9408335051232599, 0.5790617548264496, 0.5439232767887753, 0.5398802939246794, 0.5260918954180336], "rank_score": 0.6259581452162395} -{"id": "engonopoulos-etal-2013-predicting", "title": "Predicting the Resolution of Referring Expressions from User Behavior", "abstract": "We present a statistical model for predicting how the user of an interactive, situated NLP system resolved a referring expression. The model makes an initial prediction based on the meaning of the utterance, and revises it continuously based on the user\u2019s behavior. The combined model outperforms its components in predicting reference resolution and when to give feedback.", "phrases": ["resolution", "log-linear model", "semantic model"], "overall_score": 1.2178407225769075, "scores": [0.8193529407517063, 0.5367594158914993, 0.5214266291647511], "rank_score": 0.6258463286026522} -{"id": "zhou-etal-2021-challenges", "title": "Challenges in Automated Debiasing for Toxic Language Detection", "abstract": "Biased associations have been a challenge in the development of classifiers for detecting toxic language, hindering both fairness and accuracy. As potential solutions, we investigate recently introduced debiasing methods for text classification datasets and models, as applied to toxic language detection. Our focus is on lexical (e.g., swear words, slurs, identity mentions) and dialectal markers (specifically African American English). Our comprehensive experiments establish that existing methods are limited in their ability to prevent biased behavior in current toxicity detectors. We then propose an automatic, dialect-aware data correction method, as a proof-of-concept. Despite the use of synthetic labels, this method reduces dialectal associations with toxicity. Overall, our findings show that debiasing a model trained on biased toxic language data is not as effective as simply relabeling the data to remove existing biases.", "phrases": ["toxic language detection", "debiasing method", "offensive content", "hate speech dataset"], "overall_score": 1.7349087032753436, "scores": [0.8852377420028135, 0.5626951893874305, 0.5335006290843779, 0.5215106221358184], "rank_score": 0.6257360456526101} -{"id": "liu-etal-2012-tag", "title": "Tag Dispatch Model with Social Network Regularization for Microblog User Tag Suggestion", "abstract": "Microblog is a popular Web 2.0 service which reserves rich information about Web users. In a microblog service, it is a simple and effective way to annotate tags for users to represent their interests and attributes. The attributes and interests of a microblog user usually hide behind the text and network information of the user. In this paper, we propose a probabilistic model, Network-Regularized Tag Dispatch Model (NTDM), for microblog user tag suggestion. NTDM models the semantic relations between words in user descriptions and tags, and takes the social network structure as regularization. Experiments on a real-world dataset demonstrate the effectiveness and efficiency of NTDM compared to other baseline methods.", "phrases": ["microblog", "social network structure", "tag dispatch model"], "overall_score": 0.6873411585256938, "scores": [0.7988987105016118, 0.5581738638676447, 0.5198620793264094], "rank_score": 0.6256448845652219} -{"id": "ive-etal-2018-deepquest", "title": "deepQuest: A Framework for Neural-based Quality Estimation", "abstract": "Predicting Machine Translation (MT) quality can help in many practical tasks such as MT post-editing. The performance of Quality Estimation (QE) methods has drastically improved recently with the introduction of neural approaches to the problem. However, thus far neural approaches have only been designed for word and sentence-level prediction. We present a neural framework that is able to accommodate neural QE approaches at these fine-grained levels and generalize them to the level of documents. We test the framework with two sentence-level neural QE approaches: a state of the art approach that requires extensive pre-training, and a new light-weight approach that we propose, which employs basic encoders. Our approach is significantly faster and yields performance improvements for a range of document-level quality estimation tasks. To our knowledge, this is the first neural architecture for document-level QE. In addition, for the first time we apply QE models to the output of both statistical and neural MT systems for a series of European languages and highlight the new challenges resulting from the use of neural MT.", "phrases": ["quality estimation", "different level", "translation quality", "sentence level"], "overall_score": 1.3735614735005044, "scores": [0.8334621526590297, 0.6010067673547084, 0.5446009219985375, 0.5214692247147612], "rank_score": 0.6251347666817592} -{"id": "bamman-etal-2019-annotated", "title": "An annotated dataset of literary entities", "abstract": "We present a new dataset comprised of 210,532 tokens evenly drawn from 100 different English-language literary texts annotated for ACE entity categories (person, location, geo-political entity, facility, organization, and vehicle). These categories include non-named entities (such as \u201cthe boy\u201d, \u201cthe kitchen\u201d) and nested structure (such as [[the cook]'s sister]). In contrast to existing datasets built primarily on news (focused on geo-political entities and organizations), literary texts offer strikingly different distributions of entity categories, with much stronger emphasis on people and description of settings. We present empirical results demonstrating the performance of nested entity recognition models in this domain; training natively on in-domain literary data yields an improvement of over 20 absolute points in F-score (from 45.7 to 68.3), and mitigates a disparate impact in performance for male and female entities present in models trained on news data.", "phrases": ["literary entity", "ace entity category", "person", "geo-political entity", "organization"], "overall_score": 0.8664019620273526, "scores": [0.9288002233925224, 0.574074728977165, 0.5488179227643232, 0.5368487797854583, 0.5363428801638551], "rank_score": 0.6249769070166649} -{"id": "sha-2020-gradient", "title": "Gradient-guided Unsupervised Lexically Constrained Text Generation", "abstract": "Lexically constrained generation requires the target sentence to satisfy some lexical constraints, such as containing some specific words or being the paraphrase to a given sentence, which is very important in many real-world natural language generation applications. Previous works usually apply beam-search-based methods or stochastic searching methods to lexically-constrained generation. However, when the search space is too large, beam-search-based methods always fail to find the constrained optimal solution. At the same time, stochastic search methods always cost too many steps to find the correct optimization direction. In this paper, we propose a novel method G2LC to solve the lexically-constrained generation as an unsupervised gradient-guided optimization problem. We propose a differentiable objective function and use the gradient to help determine which position in the sequence should be changed (deleted or inserted/replaced by another word). The word updating process of the inserted/replaced word also benefits from the guidance of gradient. Besides, our method is free of parallel data training, which is flexible to be used in the inference stage of any pre-trained generation model. We apply G2LC to two generation tasks: keyword-to-sentence generation and unsupervised paraphrase generation. The experiment results show that our method achieves state-of-the-art compared to previous lexically-constrained methods.", "phrases": ["paraphrase", "search space", "g2lc", "gradient", "generation task"], "overall_score": 1.4387792221176896, "scores": [0.9061743281615313, 0.6064428849873189, 0.5582537666743561, 0.5332724474910652, 0.520125956899556], "rank_score": 0.6248538768427656} -{"id": "wang-manning-2014-cross", "title": "Cross-lingual Projected Expectation Regularization for Weakly Supervised Learning", "abstract": "We consider a multilingual weakly supervised learning scenario where knowledge from annotated corpora in a resource-rich language is transferred via bitext to guide the learning in other languages. Past approaches project labels across bitext and use them as features or gold labels for training. We propose a new method that projects model expectations rather than labels, which facilities transfer of model uncertainty across language boundaries. We encode expectations as constraints and train a discriminative CRF model using Generalized Expectation Criteria (Mann and McCallum, 2010). Evaluated on standard Chinese-English and German-English NER datasets, our method demonstrates F1 scores of 64% and 60% when no labeled data is used. Attaining the same accuracy with supervised CRFs requires 12k and 1.5k labeled sentences. Furthermore, when combined with labeled examples, our method yields significant improvements over state-of-the-art supervised methods, achieving best reported numbers to date on Chinese OntoNotes and German CoNLL-03 datasets.", "phrases": ["model expectation", "language boundary", "parallel corpora", "annotation projection"], "overall_score": 1.691718399502129, "scores": [0.845518050959105, 0.5649359384178327, 0.5465110287527706, 0.5418341530430839], "rank_score": 0.6246997927931981} -{"id": "foster-2010-cba", "title": "\u201ccba to check the spelling\u201d: Investigating Parser Performance on Discussion Forum Posts", "abstract": "We evaluate the Berkeley parser on text from an online discussion forum. We evaluate the parser output with and without gold tokens and spellings (using Sparseval and Parseval), and we compile a list of problematic phenomena for this domain. The Parseval f-score for a small development set is 77.56. This increases to 80.27 when we apply a set of simple transformations to the input sentences and to the Wall Street Journal (WSJ) training sections.", "phrases": ["spelling", "berkeley parser", "online discussion forum"], "overall_score": 1.767873695793671, "scores": [0.7851245862554203, 0.5458297065265673, 0.5409912486674244], "rank_score": 0.623981847149804} -{"id": "volkova-etal-2014-inferring", "title": "Inferring User Political Preferences from Streaming Communications", "abstract": "Existing models for social media personal analytics assume access to thousands of messages per user, even though most users author content only sporadically over time. Given this sparsity, we: (i) leverage content from the local neighborhood of a user; (ii) evaluate batch models as a function of size and the amount of messages in various types of neighborhoods; and (iii) estimate the amount of time and tweets required for a dynamic model to predict user preferences. We show that even when limited or no selfauthored data is available, language from friend, retweet and user mention communications provide sufficient evidence for prediction. When updating models over time based on Twitter, we find that political preference can be often be predicted using roughly 100 tweets, depending on the context of user selection, where this could mean hours, or weeks, based on the author\u2019s tweeting frequency.", "phrases": ["political preference", "friend", "twitter user", "social medium"], "overall_score": 1.4366091061960742, "scores": [0.8534625715934034, 0.5718950018937704, 0.5359448121479998, 0.534343244256497], "rank_score": 0.6239114074729176} -{"id": "elhadad-sutaria-2007-mining", "title": "Mining a Lexicon of Technical Terms and Lay Equivalents", "abstract": "We present a corpus-driven method for building a lexicon of semantically equivalent pairs of technical and lay medical terms. Using a parallel corpus of abstracts of clinical studies and corresponding news stories written for a lay audience, we identify terms which are good semantic equivalents of technical terms for a lay audience. Our method relies on measures of association. Results show that, despite the small size of our corpus, a promising number of pairs are identified.", "phrases": ["technical term", "audience", "comparable corpora", "medical text", "paraphrase"], "overall_score": 1.495310193590125, "scores": [0.8410423947287641, 0.5863852973927739, 0.5669206596854685, 0.5655084374832926, 0.5581071404794137], "rank_score": 0.6235927859539425} -{"id": "woodsend-lapata-2010-automatic", "title": "Automatic Generation of Story Highlights", "abstract": "In this paper we present a joint content selection and compression model for single-document summarization. The model operates over a phrase-based representation of the source document which we obtain by merging information from PCFG parse trees and dependency graphs. Using an integer linear programming formulation, the model learns to select and combine phrases subject to length, coverage and grammar constraints. We evaluate the approach on the task of generating \"story highlights\"---a small number of brief, self-contained sentences that allow readers to quickly gather information on news stories. Experimental results show that the model's output is comparable to human-written highlights in terms of both grammaticality and content.", "phrases": ["story highlight", "content selection", "summarization", "length", "self-contained sentence"], "overall_score": 1.766431061364265, "scores": [0.8793674071392475, 0.5922711265305827, 0.5717234215647775, 0.5466393111802984, 0.5273620360523291], "rank_score": 0.6234726604934471} -{"id": "park-zhang-2003-text", "title": "Text Chunking by Combining Hand-Crafted Rules and Memory-Based Learning", "abstract": "This paper proposes a hybrid of hand-crafted rules and a machine learning method for chunking Korean. In the partially free word-order languages such as Korean and Japanese, a small number of rules dominate the performance due to their well-developed postpositions and endings. Thus, the proposed method is primarily based on the rules, and then the residual errors are corrected by adopting a memory-based machine learning method. Since the memory-based learning is an efficient method to handle exceptions in natural language processing, it is good at checking whether the estimates are exceptional cases of the rules and revising them. An evaluation of the method yields the improvement in F-score over the rules or various machine learning methods alone.", "phrases": ["hand-crafted rule", "memory-based learning", "machine learning method"], "overall_score": 0.8618448335106879, "scores": [0.7863319284066748, 0.5575208835883928, 0.5212160889873979], "rank_score": 0.6216896336608219} -{"id": "cho-2017-strawman", "title": "Strawman: An Ensemble of Deep Bag-of-Ngrams for Sentiment Analysis", "abstract": "This paper describes a builder entry, named \u201cstrawman\u201d, to the sentence-level sentiment analysis task of the \u201cBuild It, Break It\u201d shared task of the First Workshop on Building Linguistically Generalizable NLP Systems. The goal of a builder is to provide an automated sentiment analyzer that would serve as a target for breakers whose goal is to find pairs of minimally-differing sentences that break the analyzer.", "phrases": ["strawman", "telugu sentiment analysis", "neutral polarity", "several source"], "overall_score": 0.9983287188908139, "scores": [0.8091795245268778, 0.6043925145160305, 0.5345488266588734, 0.5330651711822384], "rank_score": 0.620296509221005} -{"id": "cross-huang-2016-span", "title": "Span-Based Constituency Parsing with a Structure-Label System and Provably Optimal Dynamic Oracles", "abstract": "Parsing accuracy using efficient greedy transition systems has improved dramatically in recent years thanks to neural networks. Despite striking results in dependency parsing, however, neural models have not surpassed state-of-the-art approaches in constituency parsing. To remedy this, we introduce a new shift-reduce system whose stack contains merely sentence spans, represented by a bare minimum of LSTM features. We also design the first provably optimal dynamic oracle for constituency parsing, which runs in amortized O(1) time, compared to O(n^3) oracles for standard dependency parsing. Training with this oracle, we achieve the best F1 scores on both English and French of any parser that does not use reranking or external data.", "phrases": ["dynamic oracle", "stack", "constituency parser", "transition-based parser"], "overall_score": 1.969009721858501, "scores": [0.8829623906363832, 0.5489405450654191, 0.5262255074536616, 0.520130047063912], "rank_score": 0.6195646225548439} -{"id": "amidei-etal-2018-rethinking", "title": "Rethinking the Agreement in Human Evaluation Tasks", "abstract": "Human evaluations are broadly thought to be more valuable the higher the inter-annotator agreement. In this paper we examine this idea. We will describe our experiments and analysis within the area of Automatic Question Generation. Our experiments show how annotators diverge in language annotation tasks due to a range of ineliminable factors. For this reason, we believe that annotation schemes for natural language generation tasks that are aimed at evaluating language quality need to be treated with great care. In particular, an unchecked focus on reduction of disagreement among annotators runs the danger of creating generation goals that reward output that is more distant from, rather than closer to, natural human-like language. We conclude the paper by suggesting a new approach to the use of the agreement metrics in natural language generation evaluation tasks.", "phrases": ["agreement", "language annotation task", "ineliminable factor", "factor"], "overall_score": 1.1100147771103361, "scores": [0.8660360316284508, 0.5464343071850027, 0.5350337796811991, 0.5305400524421182], "rank_score": 0.6195110427341928} -{"id": "adolphs-etal-2008-fine", "title": "Some Fine Points of Hybrid Natural Language Parsing", "abstract": "Large-scale grammar-based parsing systems nowadays increasingly rely on independently developed, more specialized components for pre-processing their input. However, different tools make conflicting assumptions about very basic properties such as tokenization. To make linguistic annotation gathered in pre-processing available to \u0093deep\u0094 parsing, a hybrid NLP system needs to establish a coherent mapping between the two universes. Our basic assumption is that tokens are best described by attribute value matrices (AVMs) that may be arbitrarily complex. We propose a powerful resource-sensitive rewrite formalism, \u0093chart mapping\u0094, that allows us to mediate between the token descriptions delivered by shallow pre-processing components and the input expected by the grammar. We furthermore propose a novel way of unknown word treatment where all generic lexical entries are instantiated that are licensed by a particular token AVM. Again, chart mapping is used to give the grammar writer full control as to which items (e.g. native vs. generic lexical items) enter syntactic parsing. We discuss several further uses of the original idea and report on early experiences with the new machinery.", "phrases": ["tokenization", "syntactico-semantic analysis", "english wikipedia", "newspaper text", "entity recognition"], "overall_score": 1.2028022217122356, "scores": [0.8446867519518895, 0.5926515118156869, 0.5682904102820088, 0.5481793200325044, 0.5367823456007677], "rank_score": 0.6181180679365713} -{"id": "marciniak-strube-2005-beyond", "title": "Beyond the Pipeline: Discrete Optimization in NLP", "abstract": "We present a discrete optimization model based on a linear programming formulation as an alternative to the cascade of classifiers implemented in many language processing systems. Since NLP tasks are correlated with one another, sequential processing does not guarantee optimal solutions. We apply our model in an NLG application and show that it performs better than a pipeline-based system.", "phrases": ["decision", "ilp approach", "co-dependent", "language processing application", "subtask"], "overall_score": 1.3579323606971652, "scores": [0.8717705980235245, 0.5654470850179533, 0.562328024835598, 0.5535793285373664, 0.5369832181169159], "rank_score": 0.6180216509062716} -{"id": "wisniewski-etal-2010-assessing", "title": "Assessing Phrase-Based Translation Models with Oracle Decoding", "abstract": "Extant Statistical Machine Translation (SMT) systems are very complex softwares, which embed multiple layers of heuristics and embark very large numbers of numerical parameters. As a result, it is difficult to analyze output translations and there is a real need for tools that could help developers to better understand the various causes of errors. \n \nIn this study, we make a step in that direction and present an attempt to evaluate the quality of the phrase-based translation model. In order to identify those translation errors that stem from deficiencies in the phrase table (PT), we propose to compute the oracle BLEU-4 score, that is the best score that a system based on this PT can achieve on a reference corpus. By casting the computation of the oracle BLEU-1 as an Integer Linear Programming (ILP) problem, we show that it is possible to efficiently compute accurate lower-bounds of this score, and report measures performed on several standard benchmarks. Various other applications of these oracle decoding techniques are also reported and discussed.", "phrases": ["statistical machine translation", "integer linear programming", "hypothesis", "bottleneck", "suboptimal result"], "overall_score": 1.2830346257007887, "scores": [0.8673226408463582, 0.6124070056549716, 0.543128380200809, 0.5420434124619015, 0.5201447138148854], "rank_score": 0.6170092305957852} -{"id": "gupta-etal-2018-semantic", "title": "Semantic Parsing for Technical Support Questions", "abstract": "Technical support problems are very complex. In contrast to regular web queries (that contain few keywords) or factoid questions (which are a few sentences), these problems usually include attributes like a detailed description of what is failing (symptom), steps taken in an effort to remediate the failure (activity), and sometimes a specific request or ask (intent). Automating support is the task of automatically providing answers to these problems given a corpus of solution documents. Traditional approaches to this task rely on information retrieval and are keyword based; looking for keyword overlap between the question and solution documents and ignoring these attributes. We present an approach for semantic parsing of technical questions that uses grammatical structure to extract these attributes as a baseline, and a CRF based model that can improve performance considerably in the presence of annotated data for training. We also demonstrate that combined with reasoning, these attributes help outperform retrieval baselines.", "phrases": ["attribute", "symptom", "request", "intent", "semantic parsing"], "overall_score": 0.8547797536498484, "scores": [0.9684865303887132, 0.538304093315142, 0.529588118331758, 0.5245622565784877, 0.5220252804932143], "rank_score": 0.6165932558214631} -{"id": "goutte-etal-2012-impact", "title": "The Impact of Sentence Alignment Errors on Phrase-Based Machine Translation Performance", "abstract": "When parallel or comparable corpora are harvested from the web, there is typically a tradeoff between the size and quality of the data. In order to improve quality, corpus collection efforts often attempt to fix or remove misaligned sentence pairs. But, at the same time, Statistical Machine Translation (SMT) systems are widely assumed to be relatively robust to sentence alignment errors. However, there is little empirical evidence to support and characterize this robustness. This contribution investigates the impact of sentence alignment errors on a typical phrase-based SMT system. We confirm that SMT systems are highly tolerant to noise, and that performance only degrades seriously at very high noise levels. Our findings suggest that when collecting larger, noisy parallel data for training phrase-based SMT, cleaning up by trying to detect and remove incorrect alignments can actually degrade performance. Although fixing errors, when applicable, is a preferable strategy to removal, its benefits only become apparent for fairly high misalignment rates. We provide several explanations to support these findings.", "phrases": ["sentence alignment error", "degrade", "noisy parallel data", "different type"], "overall_score": 1.3546334899857344, "scores": [0.8943588704887905, 0.526084004292862, 0.5249437573629616, 0.5206944484302429], "rank_score": 0.6165202701437142} -{"id": "goyal-etal-2012-distributed", "title": "A Distributed Platform for Sanskrit Processing", "abstract": "Sanskrit, the classical language of India, presents specific challenges for computational linguistics: exact phonetic transcription in writing that obscures word boundaries, rich morphology and an enormous corpus, among others. Recent international cooperation has developed innovative solutions to these problems and significant resources for linguistic research. Solutions include efficient segmenting and tagging algorithms and dependency parsers based on constraint programming. The integration of lexical resources, text archives and linguistic software is achieved by distributed interoperable Web services. Resources include a morphological tagger and tagged corpus.", "phrases": ["sanskrit", "india", "digitisation", "sanskrit heritage reader"], "overall_score": 1.199203495387767, "scores": [0.8906087044428863, 0.5322715553528078, 0.5219112516687452, 0.5202832423106988], "rank_score": 0.6162686884437845} -{"id": "marchisio-etal-2020-unsupervised", "title": "When Does Unsupervised Machine Translation Work?", "abstract": "Despite the reported success of unsupervised machine translation (MT), the field has yet to examine the conditions under which the methods succeed and fail. We conduct an extensive empirical evaluation using dissimilar language pairs, dissimilar domains, and diverse datasets. We find that performance rapidly deteriorates when source and target corpora are from different domains, and that stochasticity during embedding training can dramatically affect downstream results. We additionally find that unsupervised MT performance declines when source and target languages use different scripts, and observe very poor performance on authentic low-resource language pairs. We advocate for extensive empirical evaluation of unsupervised MT systems to highlight failure points and encourage continued research on the most promising paradigms. We release our preprocessed dataset to encourage evaluations that stress-test systems under multiple data conditions.", "phrases": ["condition", "low-resource language", "distant language pair", "bleu score", "unmt"], "overall_score": 1.6660192173725563, "scores": [0.9075515538976289, 0.5677048144276514, 0.540255390916475, 0.5371210442612743, 0.5234165560961127], "rank_score": 0.6152098719198285} -{"id": "mohiuddin-etal-2021-rethinking", "title": "Rethinking Coherence Modeling: Synthetic vs. Downstream Tasks", "abstract": "Although coherence modeling has come a long way in developing novel models, their evaluation on downstream applications for which they are purportedly developed has largely been neglected. With the advancements made by neural approaches in applications such as machine translation (MT), summarization and dialog systems, the need for coherence evaluation of these tasks is now more crucial than ever. However, coherence models are typically evaluated only on synthetic tasks, which may not be representative of their performance in downstream applications. To investigate how representative the synthetic tasks are of downstream use cases, we conduct experiments on benchmarking well-known traditional and neural coherence models on synthetic sentence ordering tasks, and contrast this with their performance on three downstream applications: coherence evaluation for MT and summarization, and next utterance prediction in retrieval-based dialog. Our results demonstrate a weak correlation between the model performances in the synthetic tasks and the downstream applications, motivating alternate training and evaluation methods for coherence models.", "phrases": ["coherence modeling", "machine translation", "summarization", "model performance"], "overall_score": 0.9891689107836396, "scores": [0.8510621878100316, 0.5448301777764285, 0.5391993435315994, 0.5233290926825616], "rank_score": 0.6146052004501552} -{"id": "zeng-etal-2020-counterfactual", "title": "Counterfactual Generator: A Weakly-Supervised Method for Named Entity Recognition", "abstract": "Past progress on neural models has proven that named entity recognition is no longer a problem if we have enough labeled data. However, collecting enough data and annotating them are labor-intensive, time-consuming, and expensive. In this paper, we decompose the sentence into two parts: entity and context, and rethink the relationship between them and model performance from a causal perspective. Based on this, we propose the Counterfactual Generator, which generates counterfactual examples by the interventions on the existing observational examples to enhance the original dataset. Experiments across three datasets show that our method improves the generalization ability of models under limited observational examples. Besides, we provide a theoretical foundation by using a structural causal model to explore the spurious correlations between input features and output labels. We investigate the causal effects of entity or context on model performance under both conditions: the non-augmented and the augmented. Interestingly, we find that the non-spurious correlations are more located in entity representation rather than context representation. As a result, our method eliminates part of the spurious correlations between context representation and output labels. The code is available at .", "phrases": ["generalization ability", "counterfactual generator", "various nlp task", "ner model", "causal theory"], "overall_score": 1.5757112196527763, "scores": [0.8795667291232755, 0.5669794919485279, 0.563335857858823, 0.5348973995545424, 0.526842998327037], "rank_score": 0.6143244953624412} -{"id": "lopes-etal-2020-document", "title": "Document-level Neural MT: A Systematic Comparison", "abstract": "In this paper we provide a systematic comparison of existing and new document-level neural machine translation solutions. As part of this comparison, we introduce and evaluate a document-level variant of the recently proposed Star Transformer architecture. In addition to using the traditional metric BLEU, we report the accuracy of the models in handling anaphoric pronoun translation as well as coherence and cohesion using contrastive test sets. Finally, we report the results of human evaluation in terms of Multidimensional Quality Metrics (MQM) and analyse the correlation of the results obtained by the automatic metrics with human judgments.", "phrases": ["systematic comparison", "test set", "sentence-level baseline", "contextual information"], "overall_score": 1.575402255155856, "scores": [0.7980063729779532, 0.6005929584033014, 0.529336924399594, 0.5288799001763049], "rank_score": 0.6142040389892884} -{"id": "venugopal-etal-2007-efficient", "title": "An Efficient Two-Pass Approach to Synchronous-CFG Driven Statistical MT", "abstract": "We present an efficient, novel two-pass approach to mitigate the computational impact resulting from online intersection of an n-gram language model (LM) and a probabilistic synchronous context-free grammar (PSCFG) for statistical machine translation. In first pass CYK-style decoding, we consider first-best chart item approximations, generating a hypergraph of sentence spanning target language derivations. In the second stage, we instantiate specific alternative derivations from this hypergraph, using the LM to drive this search process, recovering from search errors made in the first pass. Model search errors in our approach are comparable to those made by the state-of-the-art \u201cCube Pruning\u201d approach in (Chiang, 2007) under comparable pruning conditions evaluated on both hierarchical and syntax-based grammars.", "phrases": ["computational impact", "n-gram language model", "cube pruning", "smt decoder", "hypothesis recombination"], "overall_score": 1.525638343712118, "scores": [0.8528478883495489, 0.5793121224627841, 0.5568730676580751, 0.5458329290803022, 0.5349441678984913], "rank_score": 0.6139620350898404} -{"id": "chang-etal-2016-measuring", "title": "Measuring the Information Content of Financial News", "abstract": "Measuring the information content of news text is useful for decision makers in their investments since news information can influence the intrinsic values of companies. We propose a model to automatically measure the information content given news text, trained using news and corresponding cumulative abnormal returns of listed companies. Existing methods in finance literature exploit sentiment signal features, which are limited by not considering factors such as events. We address this issue by leveraging deep neural models to extract rich semantic features from news text. In particular, a novel tree-structured LSTM is used to find target-specific representations of news text given syntax structures. Empirical results show that the neural models can outperform sentiment-based models, demonstrating the effectiveness of recent NLP technology advances for computational finance.", "phrases": ["information content", "company", "abnormal return", "stock price prediction", "data extraction"], "overall_score": 1.099928615371413, "scores": [0.8820214474587234, 0.5750170367411118, 0.5421905344343415, 0.5351216149278736, 0.5350586098708753], "rank_score": 0.6138818486865851} -{"id": "bleicken-etal-2016-using", "title": "Using a Language Technology Infrastructure for German in order to Anonymize German Sign Language Corpus Data", "abstract": "For publishing sign language corpus data on the web, anonymization is crucial even if it is impossible to hide the visual appearance of the signers: In a small community, even vague references to third persons may be enough to identify those persons. In the case of the DGS Korpus (German Sign Language corpus) project, we want to publish data as a contribution to the cultural heritage of the sign language community while annotation of the data is still ongoing. This poses the question how well anonymization can be achieved given that no full linguistic analysis of the data is available. Basically, we combine analysis of all data that we have, including named entity recognition on translations into German. For this, we use the WebLicht language technology infrastructure. We report on the reliability of these methods in this special context and also illustrate how the anonymization of the video data is technically achieved in order to minimally disturb the viewer.", "phrases": ["german", "anonymization", "sign language"], "overall_score": 0.8510109393794766, "scores": [0.783140050904346, 0.5332476586161805, 0.5252361834570171], "rank_score": 0.6138746309925146} -{"id": "wan-etal-2020-improving", "title": "Improving Grammatical Error Correction with Data Augmentation by Editing Latent Representation", "abstract": "The incorporation of data augmentation method in grammatical error correction task has attracted much attention. However, existing data augmentation methods mainly apply noise to tokens, which leads to the lack of diversity of generated errors. In view of this, we propose a new data augmentation method that can apply noise to the latent representation of a sentence. By editing the latent representations of grammatical sentences, we can generate synthetic samples with various error types. Combining with some pre-defined rules, our method can greatly improve the performance and robustness of existing grammatical error correction models. We evaluate our method on public benchmarks of GEC task and it achieves the state-of-the-art performance on CoNLL-2014 and FCE benchmarks.", "phrases": ["data augmentation method", "noise", "error type", "seq2seq model"], "overall_score": 0.9879026941915164, "scores": [0.831894242188075, 0.569047503835776, 0.5317725966161722, 0.5225594807469776], "rank_score": 0.6138184558467501} -{"id": "misra-walker-2013-topic", "title": "Topic Independent Identification of Agreement and Disagreement in Social Media Dialogue", "abstract": "Research on the structure of dialogue has been hampered for years because large dialogue corpora have not been available. This has impacted the dialogue research community's ability to develop better theories, as well as good off the shelf tools for dialogue processing. Happily, an increasing amount of information and opinion exchange occur in natural dialogue in online forums, where people share their opinions about a vast range of topics. In particular we are interested in rejection in dialogue, also called disagreement and denial, where the size of available dialogue corpora, for the first time, offers an opportunity to empirically test theoretical accounts of the expression and inference of rejection in dialogue. In this paper, we test whether topic-independent features motivated by theoretical predictions can be used to recognize rejection in online forums in a topic independent way. Our results show that our theoretically motivated features achieve 66% accuracy, an improvement over a unigram baseline of an absolute 6%.", "phrases": ["disagreement", "natural dialogue", "online forum", "topic-independent feature"], "overall_score": 1.3486718232381958, "scores": [0.8354801489463187, 0.5436907158275662, 0.5402536970100246, 0.5358034329315746], "rank_score": 0.613806998678871} -{"id": "kann-etal-2018-sentence", "title": "Sentence-Level Fluency Evaluation: References Help, But Can Be Spared!", "abstract": "Motivated by recent findings on the probabilistic modeling of acceptability judgments, we propose syntactic log-odds ratio (SLOR), a normalized language model score, as a metric for referenceless fluency evaluation of natural language generation output at the sentence level. We further introduce WPSLOR, a novel WordPiece-based version, which harnesses a more compact language model. Even though word-overlap metrics like ROUGE are computed with the help of hand-written references, our referenceless methods obtain a significantly higher correlation with human fluency scores on a benchmark dataset of compressed sentences. Finally, we present ROUGE-LM, a reference-based metric which is a natural extension of WPSLOR to the case of available references. We show that ROUGE-LM yields a significantly higher correlation with human judgments than all baseline metrics, including WPSLOR on its own.", "phrases": ["log-odds ratio", "language model", "wpslor", "human judgment"], "overall_score": 0.9878132745834641, "scores": [0.8443385820565475, 0.5435283900118636, 0.536487192730475, 0.530697420482844], "rank_score": 0.6137628963204326} -{"id": "kawahara-kurohashi-2011-generative", "title": "Generative Modeling of Coordination by Factoring Parallelism and Selectional Preferences", "abstract": "We present a unified generative model of coordination that considers parallelism of conjuncts and selectional preferences. Parallelism of conjuncts, which frequently characterizes coordinate structures, is modeled as a synchronized generation process in the generative parser. Selectional preferences learned from a large web corpus provide an important clue for resolving the ambiguities of coordinate structures. Our experiments of Japanese dependency parsing indicate the effectiveness of our approach, particularly in the domains of newspapers and patents.", "phrases": ["coordination", "parallelism", "selectional preference", "dependency parsing"], "overall_score": 0.8499736129527823, "scores": [0.8271393122695218, 0.5591645099376163, 0.5386437009010611, 0.527557909478709], "rank_score": 0.6131263581467271} -{"id": "clement-etal-2020-pymt5", "title": "PyMT5: multi-mode translation of natural language and Python code with transformers", "abstract": "Simultaneously modeling source code and natural language has many exciting applications in automated software development and understanding. Pursuant to achieving such technology, we introduce PyMT5, the Python method text-to-text transfer transformer, which is trained to translate between all pairs of Python method feature combinations: a single model that can both predict whole methods from natural language documentation strings (docstrings) and summarize code into docstrings of any common style. We present an analysis and modeling effort of a large-scale parallel corpus of 26 million Python methods and 7.7 million method-docstring pairs, demonstrating that for docstring and method generation, PyMT5 outperforms similarly-sized auto-regressive language models (GPT2) which were English pre-trained or randomly initialized. On the CodeSearchNet test set, our best model predicts 92.1% syntactically correct method bodies, achieved a BLEU score of 8.59 for method generation and 16.3 for docstring generation (summarization), and achieved a ROUGE-L F-score of 24.8 for method generation and 36.7 for docstring generation.", "phrases": ["docstring", "parallel corpus", "method generation", "pymt5", "code summarization"], "overall_score": 1.1924321516514538, "scores": [0.928819450073725, 0.545788061216761, 0.5311289534797935, 0.5309295269813469, 0.5272785388587595], "rank_score": 0.6127889061220773} -{"id": "kolomiyets-moens-2010-kul", "title": "KUL: Recognition and Normalization of Temporal Expressions", "abstract": "In this paper we describe a system for the recognition and normalization of temporal expressions (Task 13: TempEval-2, Task A). The recognition task is approached as a classification problem of sentence constituents and the normalization is implemented in a rule-based manner. One of the system features is extending positive annotations in the corpus by semantically similar words automatically obtained from a large unannotated textual corpus. The best results obtained by the system are 0.85 and 0.84 for precision and recall respectively for recognition of temporal expressions; the accuracy values of 0.91 and 0.55 were obtained for the feature values type and val respectively.", "phrases": ["normalization", "temporal type", "grounding", "vein", "location"], "overall_score": 0.8478966691208955, "scores": [0.8861624290081964, 0.5736443941091683, 0.5371692999377162, 0.5322000394487657, 0.5289646368136187], "rank_score": 0.6116281598634931} -{"id": "daza-frank-2019-translate", "title": "Translate and Label! An Encoder-Decoder Approach for Cross-lingual Semantic Role Labeling", "abstract": "We propose a Cross-lingual Encoder-Decoder model that simultaneously translates and generates sentences with Semantic Role Labeling annotations in a resource-poor target language. Unlike annotation projection techniques, our model does not need parallel data during inference time. Our approach can be applied in monolingual, multilingual and cross-lingual settings and is able to produce dependency-based and span-based SRL annotations. We benchmark the labeling performance of our model in different monolingual and multilingual settings using well-known SRL datasets. We then train our model in a cross-lingual setting to generate new SRL labeled data. Finally, we measure the effectiveness of our method by using the generated data to augment the training basis for resource-poor languages and perform manual evaluation to show that it produces high-quality sentences and assigns accurate semantic role annotations. Our proposed architecture offers a flexible method for leveraging SRL data in multiple languages.", "phrases": ["cross-lingual encoder-decoder model", "resource-poor target language", "semantic role annotation"], "overall_score": 1.1835141086901824, "scores": [0.6418282602188439, 0.6052440536831081, 0.5775455019793413], "rank_score": 0.6082059386270977} -{"id": "deneefe-knight-2009-synchronous", "title": "Synchronous Tree Adjoining Machine Translation", "abstract": "Tree Adjoining Grammars have well-known advantages, but are typically considered too difficult for practical systems. We demonstrate that, when done right, adjoining improves translation quality without becoming computationally intractable. Using adjoining to model optionality allows general translation patterns to be learned without the clutter of endless variations of optional material. The appropriate modifiers can later be spliced in as needed. \n \nIn this paper, we describe a novel method for learning a type of Synchronous Tree Adjoining Grammar and associated probabilities from aligned tree/string training data. We introduce a method of converting these grammars to a weakly equivalent tree transducer for decoding. Finally, we show that adjoining results in an end-to-end improvement of +0.8 Bleu over a baseline statistical syntax-based MT model on a large-scale Arabic/English MT task.", "phrases": ["tree adjoining grammar", "machine translation decoder", "foundation"], "overall_score": 1.2632447706299021, "scores": [0.6531604943723403, 0.6196381270327678, 0.5496783446115678], "rank_score": 0.6074923220055587} -{"id": "lin-etal-2021-differentiable", "title": "Differentiable Open-Ended Commonsense Reasoning", "abstract": "Current commonsense reasoning research focuses on developing models that use commonsense knowledge to answer multiple-choice questions. However, systems designed to answer multiple-choice questions may not be useful in applications that do not provide a small list of candidate answers to choose from. As a step towards making commonsense reasoning research more realistic, we propose to study open-ended commonsense reasoning (OpenCSR) \u2014 the task of answering a commonsense question without any pre-defined choices \u2014 using as a resource only a corpus of commonsense facts written in natural language. OpenCSR is challenging due to a large decision space, and because many questions require implicit multi-hop reasoning. As an approach to OpenCSR, we propose DrFact, an efficient Differentiable model for multi-hop Reasoning over knowledge Facts. To evaluate OpenCSR methods, we adapt several popular commonsense reasoning benchmarks, and collect multiple new answers for each test question via crowd-sourcing. Experiments show that DrFact outperforms strong baseline methods by a large margin.", "phrases": ["open-ended commonsense reasoning", "opencsr", "choice"], "overall_score": 0.840421425833803, "scores": [0.6875008532683132, 0.5828500733158977, 0.5483568083766781], "rank_score": 0.6062359116536297} -{"id": "steinberger-etal-2012-jrc", "title": "JRC Eurovoc Indexer JEX - A freely available multi-label categorisation tool", "abstract": "EuroVoc (2012) is a highly multilingual thesaurus consisting of over 6,700 hierarchically organised subject domains used by European Institutions and many authorities in Member States of the European Union (EU) for the classification and retrieval of official documents. JEX is JRC-developed multi-label classification software that learns from manually labelled data to automatically assign EuroVoc descriptors to new documents in a profile-based category-ranking task. The JEX release consists of trained classifiers for 22 official EU languages, of parallel training data in the same languages, of an interface that allows viewing and amending the assignment results, and of a module that allows users to re-train the tool on their own document collections. JEX allows advanced users to change the document representation so as to possibly improve the categorisation result through linguistic pre-processing. JEX can be used as a tool for interactive EuroVoc descriptor assignment to increase speed and consistency of the human categorisation process, or it can be used fully automatically. The output of JEX is a language-independent EuroVoc feature vector lending itself also as input to various other Language Technology tasks, including cross-lingual clustering and classification, cross-lingual plagiarism detection, sentence selection and ranking, and more.", "phrases": ["jex", "multi-label classification software", "eurovoc descriptor"], "overall_score": 0.8361023586203186, "scores": [0.6376067974555788, 0.5918111689570988, 0.5799431232729716], "rank_score": 0.6031203632285497} -{"id": "suzgun-etal-2019-lstm", "title": "LSTM Networks Can Perform Dynamic Counting", "abstract": "In this paper, we systematically assess the ability of standard recurrent networks to perform dynamic counting and to encode hierarchical representations. All the neural models in our experiments are designed to be small-sized networks both to prevent them from memorizing the training sets and to visualize and interpret their behaviour at test time. Our results demonstrate that the Long Short-Term Memory (LSTM) networks can learn to recognize the well-balanced parenthesis language (Dyck-1) and the shuffles of multiple Dyck-1 languages, each defined over different parenthesis-pairs, by emulating simple real-time k-counter machines. To the best of our knowledge, this work is the first study to introduce the shuffle languages to analyze the computational power of neural networks. We also show that a single-layer LSTM with only one hidden unit is practically sufficient for recognizing the Dyck-1 language. However, none of our recurrent networks was able to yield a good performance on the Dyck-2 language learning task, which requires a model to have a stack-like mechanism for recognition.", "phrases": ["counting", "dyck-1", "shuffle", "k-counter machine", "formal language"], "overall_score": 1.252739617594548, "scores": [0.8739441165219498, 0.5436635720095676, 0.5328504582109462, 0.531917157071541, 0.5298267525673138], "rank_score": 0.6024404112762637} -{"id": "bohnet-etal-2018-morphosyntactic", "title": "Morphosyntactic Tagging with a Meta-BiLSTM Model over Context Sensitive Token Encodings", "abstract": "The rise of neural networks, and particularly recurrent neural networks, has produced significant advances in part-of-speech tagging accuracy. One characteristic common among these models is the presence of rich initial word encodings. These encodings typically are composed of a recurrent character-based representation with dynamically and pre-trained word embeddings. However, these encodings do not consider a context wider than a single word and it is only through subsequent recurrent layers that word or sub-word information interacts. In this paper, we investigate models that use recurrent neural networks with sentence-level context for initial character and word-based representations. In particular we show that optimal results are obtained by integrating these context sensitive representations through synchronized training with a meta-model that learns to combine their states.", "phrases": ["meta-bilstm model", "part-of-speech", "word embedding", "morphological tagging"], "overall_score": 1.48803069323188, "scores": [0.783944883699662, 0.567471964262902, 0.5221210068650604, 0.5217725579137658], "rank_score": 0.5988276031853476} -{"id": "lita-etal-2003-truecasing", "title": "tRuEcasIng", "abstract": "Truecasing is the process of restoring case information to badly-cased or non-cased text. This paper explores truecasing issues and proposes a statistical, language modeling based truecaser which achieves an accuracy of (cid:24) 98 % on news articles. Task based evaluation shows a 26% F-measure improvement in named entity recognition when using truecasing. In the context of automatic content extraction, mention detection on automatic speech recognition text is also improved by a factor of 8. Truecasing also enhances machine translation output legibility and yields a BLEU score improvement of 80 : 2% . This paper argues for the use of truecasing as a valuable component in text processing applications.", "phrases": ["case information", "truecasing", "trigram language model"], "overall_score": 1.315409569121153, "scores": [0.6931471805599453, 0.5668817411518997, 0.5359771616297242], "rank_score": 0.5986686944471897} -{"id": "nerbonne-wiersma-2006-measure", "title": "A Measure of Aggregate Syntactic Distance", "abstract": "We compare vectors containing counts of trigrams of part-of-speech (POS) tags in order to obtain an aggregate measure of syntax difference. Since lexical syntactic categories reflect more abstract syntax as well, we argue that this procedure reflects more than just the basic syntactic categories. We tag the material automatically and analyze the frequency vectors for POS trigrams using a permutation test. A test analysis of a 305,000 word corpus containing the English of Finnish emigrants to Australia is promising in that the procedure proposed works well in distinguishing two different groups (adult vs. child emigrants) and also in highlighting syntactic deviations between the two groups.", "phrases": ["syntactic distance", "part-of-speech", "permutation test"], "overall_score": 0.9622130736269, "scores": [0.6399531470929894, 0.57698585181403, 0.5766307924960995], "rank_score": 0.5978565971343729} -{"id": "montazery-faili-2010-automatic", "title": "Automatic Persian WordNet Construction", "abstract": "In this paper, an automatic method for Persian WordNet construction based on Prenceton WordNet 2.1 (PWN) is introduced. The proposed approach uses Persian and English corpora as well as a bilingual dictionary in order to make a mapping between PWN synsets and Persian words. Our method calculates a score for each candidate synset of a given Persian word and for each of its translation, it selects the synset with maximum score as a link to the Persian word. The manual evaluation on selected links proposed by our method on 500 randomly selected Persian words, shows about 76.4% quality respect to precision measure. By augmenting the Persian WordNet with the un-ambiguous words, the total accuracy of automatically extracted Persian Word-Net is about 82.6% which outperforms the previously semi-automated generated Persian WordNet by about 12.6%.", "phrases": ["persian wordnet construction", "automatic method", "pwn"], "overall_score": 0.4143739679058841, "scores": [0.6789580698907898, 0.5721649114264723, 0.5423228243966424], "rank_score": 0.5978152685713015} -{"id": "kallmeyer-romero-2004-ltag", "title": "LTAG Semantics with Semantic Unification", "abstract": "This paper sets up a framework for LTAG (Lexicalized Tree Adjoining Grammar) semantics that brings together ideas from different recent approaches addressing some shortcomings of TAG semantics based on the derivation tree. Within this framework, several sample analyses are proposed, and it is shown that the framework allows to analyze data that have been claimed to be problematic for derivation tree based LTAG semantics approaches.", "phrases": ["tag semantic", "ltag", "quantifier"], "overall_score": 1.1616824256178597, "scores": [0.6025637565862103, 0.5994540335467283, 0.5889422285223292], "rank_score": 0.5969866728850893} -{"id": "cook-etal-2014-novel", "title": "Novel Word-sense Identification", "abstract": "Automatic lexical acquisition has been an active area of research in computational linguistics for over two decades, but the automatic identification of new word-senses has received attention only very recently. Previous work on this topic has been limited by the availability of appropriate evaluation resources. In this paper we present the largest corpus-based dataset of diachronic sense differences to date, which we believe will encourage further work in this area. We then describe several extensions to a state-of-the-art topic modelling approach for identifying new word-senses. This adapted method shows superior performance on our dataset of two different corpus pairs to that of the original method for both: (a) types having taken on a novel sense over time; and (b) the token instances of such novel senses.", "phrases": ["novel sense", "reference corpus", "probability distribution"], "overall_score": 1.4293948883870446, "scores": [0.6144555582754352, 0.6046649855658037, 0.5691913619396897], "rank_score": 0.5961039685936429} -{"id": "madotto-etal-2020-plug", "title": "Plug-and-Play Conversational Models", "abstract": "There has been considerable progress made towards conversational models that generate coherent and fluent responses; however, this often involves training large language models on large dialogue datasets, such as Reddit. These large conversational models provide little control over the generated responses, and this control is further limited in the absence of annotated conversational datasets for attribute specific generation that can be used for fine-tuning the model. In this paper, we first propose and evaluate plug-and-play methods for controllable response generation, which does not require dialogue specific datasets and does not rely on fine-tuning a large model. While effective, the decoding procedure induces considerable computational overhead, rendering the conversational model unsuitable for interactive usage. To overcome this, we introduce an approach that does not require further computation at decoding time, while also does not require any fine-tuning of a large language model. We demonstrate, through extensive automatic and human evaluation, a high degree of control over the generated conversational responses with regard to multiple desired attributes, while being fluent.", "phrases": ["conversational model", "plug-and-play method", "response generation"], "overall_score": 1.3055247057682366, "scores": [0.6225878378848697, 0.620889510751734, 0.5390323491444601], "rank_score": 0.5941698992603546} -{"id": "huang-chen-2011-chinese", "title": "Chinese Discourse Relation Recognition", "abstract": "The challenging issues of discourse relation recognition in Chinese are addressed. Due to the lack of Chinese discourse corpora, we construct a moderate corpus with humanannotated discourse relations. Based on the corpus, a statistical classifier is proposed, and various features are explored in the experiments. The experimental results show that our method achieves an accuracy of 88.28% and an F-Score of 63.69% in four-class classification and achieves an F-Score of 93.57% in the best case.", "phrases": ["discourse relation recognition", "statistical classifier", "comparison relation"], "overall_score": 1.4753535813210445, "scores": [0.6751288910725736, 0.5553464861130983, 0.5507024969774252], "rank_score": 0.5937259580543657} -{"id": "moryossef-etal-2021-data", "title": "Data Augmentation for Sign Language Gloss Translation", "abstract": "Sign language translation (SLT) is often decomposed into video-to-gloss recognition and gloss to-text translation, where a gloss is a sequence of transcribed spoken-language words in the order in which they are signed. We focus here on gloss-to-text translation, which we treat as a low-resource neural machine translation (NMT) problem. However, unlike traditional low resource NMT, gloss-to-text translation differs because gloss-text pairs often have a higher lexical overlap and lower syntactic overlap than pairs of spoken languages. We exploit this lexical overlap and handle syntactic divergence by proposing two rule-based heuristics that generate pseudo-parallel gloss-text pairs from monolingual spoken language text. By pre-training on this synthetic data, we improve translation from American Sign Language (ASL) to English and German Sign Language (DGS) to German by up to 3.14 and 2.20 BLEU, respectively.", "phrases": ["gloss-to-text translation", "lexical overlap", "spoken language"], "overall_score": 1.2345715996788533, "scores": [0.6210750967950368, 0.5931798453690608, 0.566855382314939], "rank_score": 0.5937034414930121} -{"id": "liu-zhang-2017-attention", "title": "Attention Modeling for Targeted Sentiment", "abstract": "Neural network models have been used for target-dependent sentiment analysis. Previous work focus on learning a target specific representation for a given input sentence which is used for classification. However, they do not explicitly model the contribution of each word in a sentence with respect to targeted sentiment polarities. We investigate an attention model to this end. In particular, a vanilla LSTM model is used to induce an attention value of the whole sentence. The model is further extended to differentiate left and right contexts given a certain target following previous work. Results show that by using attention to model the contribution of each word with respect to the target, our model gives significantly improved results over two standard benchmarks. We report the best accuracy for this task.", "phrases": ["sentiment classification", "attention model", "good accuracy"], "overall_score": 1.3028681820273615, "scores": [0.62615904714512, 0.6237327947009981, 0.5289907477618305], "rank_score": 0.5929608632026495} -{"id": "puduppully-etal-2019-data", "title": "Data-to-text Generation with Entity Modeling", "abstract": "Recent approaches to data-to-text generation have shown great promise thanks to the use of large-scale datasets and the application of neural network architectures which are trained end-to-end. These models rely on representation learning to select content appropriately, structure it coherently, and verbalize it grammatically, treating entities as nothing more than vocabulary tokens. In this work we propose an entity-centric neural architecture for data-to-text generation. Our model creates entity-specific representations which are dynamically updated. Text is generated conditioned on the data input and entity memory representations using hierarchical attention at each time step. We present experiments on the RotoWire benchmark and a (five times larger) new dataset on the baseball domain which we create. Our results show that the proposed model outperforms competitive baselines in automatic and human evaluation.", "phrases": ["large-scale dataset", "data-to-text generation", "entity representation"], "overall_score": 1.5182165800153211, "scores": [0.6530909615782889, 0.5751516816465266, 0.5474843226103231], "rank_score": 0.5919089886117129} -{"id": "kim-etal-2020-unsupervised", "title": "When and Why is Unsupervised Neural Machine Translation Useless?", "abstract": "This paper studies the practicality of the current state-of-the-art unsupervised methods in neural machine translation (NMT). In ten translation tasks with various data settings, we analyze the conditions under which the unsupervised methods fail to produce reasonable translations. We show that their performance is severely affected by linguistic dissimilarity and domain mismatch between source and target monolingual data. Such conditions are common for low-resource language pairs, where unsupervised learning works poorly. In all of our experiments, supervised and semi-supervised baselines with 50k-sentence bilingual data outperform the best unsupervised results. Our analyses pinpoint the limits of the current unsupervised NMT and also suggest immediate research directions.", "phrases": ["neural machine translation", "monolingual data", "good unsupervised system"], "overall_score": 1.3593169409097046, "scores": [0.6611180537286299, 0.5707310402735638, 0.5391824457818863], "rank_score": 0.5903438465946933} -{"id": "lin-etal-2003-word", "title": "Word-Transliteration Alignment", "abstract": "The named-entity phrases in free text represent a formidable challenge to text analysis. Translating a named-entity is important for the task of Cross Language Information Retrieval and Question Answering. However, both tasks are not easy to handle because named-entities found in free text are often not listed in a monolingual or bilingual dictionary. Although it is possible to identify and translate named-entities on the fly without a list of proper names and transliterations, an extensive list certainly will ensure the high accuracy rate of text analysis. We use a list of proper names and transliterations to train a Machine Transliteration Model. With the model it is possible to extract proper names and their transliterations in a bilingual corpus with high average precision and recall rates.", "phrases": ["proper name", "transliteration", "average precision"], "overall_score": 0.6482571074547919, "scores": [0.6623802206081709, 0.5608398660466923, 0.5469870577801379], "rank_score": 0.5900690481450004} -{"id": "joty-etal-2017-cross", "title": "Cross-language Learning with Adversarial Neural Networks", "abstract": "We address the problem of cross-language adaptation for question-question similarity reranking in community question answering, with the objective to port a system trained on one input language to another input language given labeled training data for the first language and only unlabeled data for the second language. In particular, we propose to use adversarial training of neural networks to learn high-level features that are discriminative for the main learning task, and at the same time are invariant across the input languages. The evaluation results show sizable improvements for our cross-language adversarial neural network (CLANN) model over a strong non-adversarial system.", "phrases": ["cross-language adaptation", "adversarial training", "clann"], "overall_score": 1.2261606730444308, "scores": [0.6201421261739632, 0.6133058502582873, 0.5355279459020237], "rank_score": 0.5896586407780914} -{"id": "aghajanyan-etal-2020-conversational", "title": "Conversational Semantic Parsing", "abstract": "The structured representation for semantic parsing in task-oriented assistant systems is geared towards simple understanding of one-turn queries. Due to the limitations of the representation, the session-based properties such as co-reference resolution and context carryover are processed downstream in a pipelined system. In this paper, we propose a semantic representation for such task-oriented conversational systems that can represent concepts such as co-reference and context carryover, enabling comprehensive understanding of queries in a session. We release a new session-based, compositional task-oriented parsing dataset of 20k sessions consisting of 60k utterances. Unlike Dialog State Tracking Challenges, the queries in the dataset have compositional forms. We propose a new family of Seq2Seq models for the session-based parsing above, which also set state-of-the-art in ATIS, SNIPS, TOP and DSTC2. Notably, we improve the best known results on DSTC2 by up to 5 points for slot-carryover.", "phrases": ["semantic parsing", "query", "limitation"], "overall_score": 1.1466654942685421, "scores": [0.6493242094099906, 0.5698750804237985, 0.5486092004377948], "rank_score": 0.5892694967571946} -{"id": "cotterell-etal-2014-stochastic", "title": "Stochastic Contextual Edit Distance and Probabilistic FSTs", "abstract": "String similarity is most often measured by weighted or unweighted edit distance d(x, y). Ristad and Yianilos (1998) defined stochastic edit distance\u2014a probability distribution p(y | x) whose parameters can be trained from data. We generalize this so that the probability of choosing each edit operation can depend on contextual features. We show how to construct and train a probabilistic finite-state transducer that computes our stochastic contextual edit distance. To illustrate the improvement from conditioning on context, we model typos found in social media text.", "phrases": ["edit distance", "finite-state transducer", "unigram model"], "overall_score": 1.2253201416707207, "scores": [0.622212203800785, 0.5871640812120652, 0.5583870068768608], "rank_score": 0.5892544306299037} -{"id": "power-williams-2012-generating", "title": "Generating Numerical Approximations", "abstract": "We describe a computational model for planning phrases like \u201cmore than a quarter\u201d and \u201c25.9 per cent\u201d which describe proportions at different levels of precision. The model lays out the key choices in planning a numerical description, using formal definitions of mathematical form (e.g., the distinction between fractions and percentages) and roundness adapted from earlier studies. The task is modeled as a constraint satisfaction problem, with solutions subsequently ranked by preferences (e.g., for roundness). Detailed constraints are based on a corpus of numerical expressions collected in the NumGen project,11\u2002NumGen: Generating intelligent descriptions of numerical quantities for people with different levels of numeracy (http://mcs.open.ac.uk/sw6629/numgen). NumGen was funded by the Economic and Social Research Council under Grant Ref. RES-000-22-2760. and evaluated through empirical studies in which subjects were asked to produce (or complete) numerical expressions in specified contexts.", "phrases": ["fraction", "preference", "numerical expression"], "overall_score": 0.8167055600685693, "scores": [0.6193088068299258, 0.5799191193791251, 0.5681576658570017], "rank_score": 0.5891285306886842} -{"id": "huang-etal-2010-classical", "title": "Classical Chinese Sentence Segmentation", "abstract": "Sentence segmentation is a fundamental issue in Classical Chinese language processing. To facilitate reading and processing of the raw Classical Chinese data, we propose a statistical method to split unstructured Classical Chinese text into smaller pieces such as sentences and clauses. The segmenter based on the conditional random field (CRF) model is tested under different tagging schemes and various features including n-gram, jump, word class, and phonetic information. We evaluated our method on four datasets from several eras (i.e., from the 5th century BCE to the 19th century). Our CRF segmenter achieves an F-score of 83.34% and can be applied on a variety of data from different eras.", "phrases": ["chinese text", "clause", "n-gram", "jump", "phonetic information"], "overall_score": 0.8159932070326773, "scores": [0.6311112446251917, 0.6033559677127307, 0.5877662029830122, 0.5769717221553458, 0.5438682454865315], "rank_score": 0.5886146765925624} -{"id": "kuribayashi-etal-2021-lower", "title": "Lower Perplexity is Not Always Human-Like", "abstract": "In computational psycholinguistics, various language models have been evaluated against human reading behavior (e.g., eye movement) to build human-like computational models. However, most previous efforts have focused almost exclusively on English, despite the recent trend towards linguistic universal within the general community. In order to fill the gap, this paper investigates whether the established results in computational psycholinguistics can be generalized across languages. Specifically, we re-examine an established generalization \u2014the lower perplexity a language model has, the more human-like the language model is\u2014 in Japanese with typologically different structures from English. Our experiments demonstrate that this established generalization exhibits a surprising lack of universality; namely, lower perplexity is not always human-like. Moreover, this discrepancy between English and Japanese is further explored from the perspective of (non-)uniform information density. Overall, our results suggest that a cross-lingual evaluation will be necessary to construct human-like computational models.", "phrases": ["language model", "cross-lingual evaluation", "low perplexity"], "overall_score": 0.8148773783185977, "scores": [0.6552703581433519, 0.5633958882508352, 0.5447630825550742], "rank_score": 0.5878097763164204} -{"id": "hovy-2015-demographic", "title": "Demographic Factors Improve Classification Performance", "abstract": "Extra-linguistic factors influence language use, and are accounted for by speakers and listeners. Most natural language processing (NLP) tasks to date, however, treat language as uniform. This assumption can harm performance. We investigate the effect of including demographic information on performance in a variety of text-classification tasks. We find that by including age or gender information, we consistently and significantly improve performance over demographic-agnostic models. These results hold across three text-classification tasks in five languages.", "phrases": ["demographic information", "age", "gender"], "overall_score": 1.0530212647807788, "scores": [0.6166540009593677, 0.5743785166388758, 0.5720745559775179], "rank_score": 0.5877023578585873} -{"id": "widdows-2003-orthogonal", "title": "Orthogonal Negation in Vector Spaces for Modelling Word-Meanings and Document Retrieval", "abstract": "Standard IR systems can process queries such as \"web NOT internet\", enabling users who are interested in arachnids to avoid documents about computing. The documents retrieved for such a query should be irrelevant to the negated query term. Most systems implement this by reprocessing results after retrieval to remove documents containing the unwanted string of letters.This paper describes and evaluates a theoretically motivated method for removing unwanted meanings directly from the original query in vector models, with the same vector negation operator as used in quantum logic. Irrelevance in vector spaces is modelled using orthogonality, so query vectors are made orthogonal to the negated term or terms.As well as removing unwanted terms, this form of vector negation reduces the occurrence of synonyms and neighbours of the negated terms by as much as 76% compared with standard Boolean methods. By altering the query vector itself, vector negation removes not only unwanted strings but unwanted meanings.", "phrases": ["query", "unwanted string", "vector negation operator"], "overall_score": 0.8140982356228922, "scores": [0.6443084697806314, 0.5663044082558373, 0.5511303529579334], "rank_score": 0.5872477436648007} -{"id": "wu-etal-2020-neural", "title": "Neural Mixed Counting Models for Dispersed Topic Discovery", "abstract": "Mixed counting models that use the negative binomial distribution as the prior can well model over-dispersed and hierarchically dependent random variables; thus they have attracted much attention in mining dispersed document topics. However, the existing parameter inference method like Monte Carlo sampling is quite time-consuming. In this paper, we propose two efficient neural mixed counting models, i.e., the Negative Binomial-Neural Topic Model (NB-NTM) and the Gamma Negative Binomial-Neural Topic Model (GNB-NTM) for dispersed topic discovery. Neural variational inference algorithms are developed to infer model parameters by using the reparameterization of Gamma distribution and the Gaussian approximation of Poisson distribution. Experiments on real-world datasets indicate that our models outperform state-of-the-art baseline models in terms of perplexity and topic coherence. The results also validate that both NB-NTM and GNB-NTM can produce explainable intermediate variables by generating dispersed proportions of document topics.", "phrases": ["mixed counting model", "topic model", "gnb-ntm"], "overall_score": 0.8130532248724855, "scores": [0.622269342157394, 0.5993718584264858, 0.5378405826695913], "rank_score": 0.586493927751157} -{"id": "bicici-2018-robust", "title": "Robust parfda Statistical Machine Translation Results", "abstract": "We build parallel feature decay algorithms (parfda) Moses statistical machine translation (SMT) models for language pairs in the translation task. parfda obtains results close to the top constrained phrase-based SMT with an average of 2.252 BLEU points difference on WMT 2017 datasets using significantly less computation for building SMT systems than that would be spent using all available corpora. We obtain BLEU upper bounds based on target coverage to identify which systems used additional data. We use PRO for tuning to decrease fluctuations in the results and postprocess translation outputs to decrease translation errors due to the casing of words. F1 scores on the key phrases of the English to Turkish testsuite that we prepared reveal that parfda achieves 2nd best results. Truecasing translations before scoring obtained the best results overall.", "phrases": ["parfda", "key phrase", "good result"], "overall_score": 0.40632149349471575, "scores": [0.6121287092196005, 0.5845966974402668, 0.5618686043544033], "rank_score": 0.5861980036714236} -{"id": "nissim-markert-2003-syntactic", "title": "Syntactic Features and Word Similarity for Supervised Metonymy Resolution", "abstract": "We present a supervised machine learning algorithm for metonymy resolution, which exploits the similarity between examples of conventional metonymy. We show that syntactic head-modifier relations are a high precision feature for metonymy recognition but suffer from data sparseness. We partially overcome this problem by integrating a thesaurus and introducing simpler grammatical features, thereby preserving precision and increasing recall. Our algorithm generalises over two levels of contextual similarity. Resulting inferences exceed the complexity of inferences undertaken in word sense disambiguation. We also compare automatic and manual methods for syntactic feature extraction.", "phrases": ["metonymy resolution", "head-modifier relation", "grammatical role"], "overall_score": 1.28796896066151, "scores": [0.6546648485595039, 0.5828677560466486, 0.5210072014017036], "rank_score": 0.5861799353359521} -{"id": "mccrae-etal-2015-reconciling", "title": "Reconciling Heterogeneous Descriptions of Language Resources", "abstract": "Language resources are a cornerstone of linguistic research and for the development of natural language processing tools, but the discovery of relevant resources remains a challenging task. This is due to the fact that relevant metadata records are spread among different repositories and it is currently impossible to query all these repositories in an integrated fashion, as they use different data models and vocabularies. In this paper we present a first attempt to collect and harmonize the metadata of different repositories, thus making them queriable and browsable in an integrated way. We make use of RDF and linked data technologies for this and provide a first level of harmonization of the vocabularies used in the different resources by mapping them to standard RDF vocabularies including Dublin Core and DCAT. Further, we present an approach that relies on NLP and in particular word sense disambiguation techniques to harmonize resources by mapping values of attributes \u2010 such as the type, license or intended use of a resource \u2010 into normalized values. Finally, as there are duplicate entries within the same repository as well as across different repositories, we also report results of detection of these duplicates.", "phrases": ["language resource", "repository", "rdf"], "overall_score": 0.8122374074293234, "scores": [0.6470554076361772, 0.5728562731470038, 0.5378046388010087], "rank_score": 0.5859054398613966} -{"id": "kshirsagar-etal-2017-detecting", "title": "Detecting and Explaining Crisis", "abstract": "Individuals on social media may reveal themselves to be in various states of crisis (e.g. suicide, self-harm, abuse, or eating disorders). Detecting crisis from social media text automatically and accurately can have profound consequences. However, detecting a general state of crisis without explaining why has limited applications. An explanation in this context is a coherent, concise subset of the text that rationalizes the crisis detection. We explore several methods to detect and explain crisis using a combination of neural and non-neural techniques. We evaluate these techniques on a unique data set obtained from Koko, an anonymous emotional support network available through various messaging applications. We annotate a small subset of the samples labeled with crisis with corresponding explanations. Our best technique significantly outperforms the baseline for detection and explanation.", "phrases": ["crisis", "self-harm", "explanation"], "overall_score": 0.9428578972970098, "scores": [0.6369551805644456, 0.5932041384664476, 0.527332330717259], "rank_score": 0.5858305499160508} -{"id": "bellare-mccallum-2009-generalized", "title": "Generalized Expectation Criteria for Bootstrapping Extractors using Record-Text Alignment", "abstract": "Traditionally, machine learning approaches for information extraction require human annotated data that can be costly and time-consuming to produce. However, in many cases, there already exists a database (DB) with schema related to the desired output, and records related to the expected input text. We present a conditional random field (CRF) that aligns tokens of a given DB record and its realization in text. The CRF model is trained using only the available DB and unlabeled text with generalized expectation criteria. An annotation of the text induced from inferred alignments is used to train an information extractor. We evaluate our method on a citation extraction task in which alignments between DBLP database records and citation texts are used to train an extractor. Experimental results demonstrate an error reduction of 35% over a previous state-of-the-art method that uses heuristic alignments.", "phrases": ["database", "information extractor", "generalized expectation criterion"], "overall_score": 0.6432853091356953, "scores": [0.6024296074582931, 0.5947561699229726, 0.5594447894829783], "rank_score": 0.5855435222880813} -{"id": "menezes-quirk-2005-dependency", "title": "Dependency Treelet Translation: The Convergence of Statistical and Example-based Machine-translation?", "abstract": "We describe a novel approach to machine translation that combines the strengths of the two leading corpus-based approaches: Phrasal SMT and EBMT. We use a syntactically informed decoder and reordering model based on the source dependency tree, in combination with conventional SMT models to incorporate the power of phrasal SMT with the linguistic generality available in a parser. We show that this approach significantly outperforms a leading string-based Phrasal SMT decoder and an EBMT system. We present results from two radically different language pairs, and investigate the sensitivity of this approach to parse quality by using two distinct parsers and oracle experiments. We also validate our automated BLEU scores with a small human evaluation.", "phrases": ["dependency structure", "treelet approach", "source side"], "overall_score": 1.1388296472758075, "scores": [0.6077222388201133, 0.5856031257605817, 0.5624026393490007], "rank_score": 0.5852426679765653} -{"id": "liu-etal-2022-cross", "title": "Cross-Modal Discrete Representation Learning", "abstract": "In contrast to recent advances focusing on high-level representation learning across modalities, in this work we present a self-supervised learning framework that is able to learn a representation that captures finer levels of granularity across different modalities such as concepts or events represented by visual objects or spoken words. Our framework relies on a discretized embedding space created via vector quantization that is shared across different modalities. Beyond the shared embedding space, we propose a Cross-Modal Code Matching objective that forces the representations from different views (modalities) to have a similar distribution over the discrete embedding space such that cross-modal objects/actions localization can be performed without direct supervision. We show that the proposed discretized multi-modal fine-grained representation (e.g., pixel/word/frame) can complement high-level summary representations (e.g., video/sentence/waveform) for improved performance on cross-modal retrieval tasks. We also observe that the discretized representation uses individual clusters to represent the same semantic concept across modalities.", "phrases": ["different modality", "vector quantization", "video", "cross-modal retrieval task"], "overall_score": 0.9417840313314911, "scores": [0.6337410550469078, 0.5956047757398556, 0.5772187432935708, 0.5340887038262236], "rank_score": 0.5851633194766395} -{"id": "graham-van-genabith-2008-packed", "title": "Packed rules for automatic transfer-rule induction", "abstract": "We present a method of encoding transfer rules in a highly efficient packed structure using contextualized constraints (Maxwell and Kaplan, 1991), an existing method of encoding adopted from LFG parsing (Kaplan and Bresnan, 1982; Bresnan, 2001; Dalrymple, 2001). The packed representation allows us to encode O(2 n ) transfer rules in a single packed representation only requiring O(n) storage space. Besides reducing space requirements, the representation also has a high impact on the amount of time taken to load large numbers of transfer rules to memory with very little trade-off in time needed to unpack the rules. We include an experimental evaluation which shows a considerable reduction in space and time requirements for a large set of automatically induced transfer rules by storing the rules in the packed representation.", "phrases": ["rule induction approach", "generator", "van"], "overall_score": 0.8108091159761898, "scores": [0.6384185504784666, 0.5893854122904745, 0.5268214733206789], "rank_score": 0.5848751453632067} -{"id": "beck-etal-2014-joint", "title": "Joint Emotion Analysis via Multi-task Gaussian Processes", "abstract": "We propose a model for jointly predicting multiple emotions in natural language sentences. Our model is based on a low-rank coregionalisation approach, which combines a vector-valued Gaussian Process with a rich parameterisation scheme. We show that our approach is able to learn correlations and anti-correlations between emotions on a news headlines dataset. The proposed model outperforms both singletask baselines and other multi-task approaches.", "phrases": ["multi-task", "gaussian process", "emotion classification"], "overall_score": 1.04652842951153, "scores": [0.6113868608238872, 0.5812760623903083, 0.5595729892809231], "rank_score": 0.5840786374983729} -{"id": "bojar-2007-english", "title": "English-to-Czech Factored Machine Translation", "abstract": "This paper describes experiments with English-to-Czech phrase-based machine translation. Additional annotation of input and output tokens (multiple factors) is used to explicitly model morphology. We vary the translation scenario (the setup of multiple factors) and the amount of information in the morphological tags. Experimental results demonstrate significant improvement of translation quality in terms of BLEU.", "phrases": ["output token", "multiple factor", "morphological tag", "english-to-czech"], "overall_score": 0.8096865556036235, "scores": [0.6187553951981271, 0.5902171203576444, 0.5705609059622725, 0.5567281353695831], "rank_score": 0.5840653892219068} -{"id": "husain-etal-2007-simple", "title": "Simple Preposition Correspondence: A Problem in English to Indian Language Machine Translation", "abstract": "The paper describes an approach to automatically select from Indian Language the appropriate lexical correspondence of English simple preposition. The paper describes this task from a Machine Translation (MT) perspective. We use the properties of the head and complement of the preposition to select the appropriate sense in the target language. We later show that the results obtained from this approach are promising.", "phrases": ["indian language", "lexical correspondence", "english simple preposition"], "overall_score": 0.6416364500692197, "scores": [0.6363589254938878, 0.5698912758220565, 0.5458777969438433], "rank_score": 0.5840426660865958} -{"id": "nahnsen-2009-domain", "title": "Domain-Independent Shallow Sentence Ordering", "abstract": "We present a shallow approach to the sentence ordering problem. The employed features are based on discourse entities, shallow syntactic analysis, and temporal precedence relations retrieved from VerbOcean. We show that these relatively simple features perform well in a machine learning algorithm on datasets containing sequences of events, and that the resulting models achieve optimal performance with small amounts of training data. The model does not yet perform well on datasets describing the consequences of events, such as the destructions after an earthquake.", "phrases": ["discourse entity", "shallow syntactic analysis", "precedence relation"], "overall_score": 0.641467346017303, "scores": [0.6153683672782693, 0.5807113305056455, 0.5555865250515643], "rank_score": 0.5838887409451597} -{"id": "morita-etal-2013-subtree", "title": "Subtree Extractive Summarization via Submodular Maximization", "abstract": "This study proposes a text summarization model that simultaneously performs sentence extraction and compression. We translate the text summarization task into a problem of extracting a set of dependency subtrees in the document cluster. We also encode obligatory case constraints as must-link dependency constraints in order to guarantee the readability of the generated summary. In order to handle the subtree extraction problem, we investigate a new class of submodular maximization problem, and a new algorithm that has the approximation ratio 12(1 \u2212 e\u22121). Our experiments with the NTCIR ACLIA test collections show that our approach outperforms a state-of-the-art algorithm.", "phrases": ["compression", "text summarization task", "submodular maximization problem"], "overall_score": 1.0460397827709953, "scores": [0.6123738711922092, 0.575821683619866, 0.5632222008674771], "rank_score": 0.5838059185598508} -{"id": "melamud-etal-2014-probabilistic", "title": "Probabilistic Modeling of Joint-context in Distributional Similarity", "abstract": "Most traditional distributional similarity models fail to capture syntagmatic patterns that group together multiple word features within the same joint context. In this work we introduce a novel generic distributional similarity scheme under which the power of probabilistic models can be leveraged to effectively model joint contexts. Based on this scheme, we implement a concrete model which utilizes probabilistic n-gram language models. Our evaluations suggest that this model is particularly wellsuited for measuring similarity for verbs, which are known to exhibit richer syntagmatic patterns, while maintaining comparable or better performance with respect to competitive baselines for nouns. Following this, we propose our scheme as a framework for future semantic similarity models leveraging the substantial body of work that exists in probabilistic language modeling.", "phrases": ["joint context", "similarity scheme", "hypernymy"], "overall_score": 1.135979836650165, "scores": [0.6003104891739913, 0.5975761523298452, 0.5534478235561036], "rank_score": 0.58377815501998} -{"id": "lawrence-etal-2019-attending", "title": "Attending to Future Tokens for Bidirectional Sequence Generation", "abstract": "Neural sequence generation is typically performed token-by-token and left-to-right. Whenever a token is generated only previously produced tokens are taken into consideration. In contrast, for problems such as sequence classification, bidirectional attention, which takes both past and future tokens into consideration, has been shown to perform much better. We propose to make the sequence generation process bidirectional by employing special placeholder tokens. Treated as a node in a fully connected graph, a placeholder token can take past and future tokens into consideration when generating the actual output token. We verify the effectiveness of our approach experimentally on two conversational tasks where the proposed bidirectional model outperforms competitive baselines by a large margin.", "phrases": ["future token", "bidirectional attention", "placeholder token"], "overall_score": 1.2821607803889363, "scores": [0.6263752474489053, 0.5716922702120059, 0.5525420380678202], "rank_score": 0.5835365185762438} -{"id": "zhang-wang-2009-cross", "title": "Cross-Domain Dependency Parsing Using a Deep Linguistic Grammar", "abstract": "Pure statistical parsing systems achieves high in-domain accuracy but performs poorly out-domain. In this paper, we propose two different approaches to produce syntactic dependency structures using a large-scale hand-crafted HPSG grammar. The dependency backbone of an HPSG analysis is used to provide general linguistic insights which, when combined with state-of-the-art statistical dependency parsing models, achieves performance improvements on out-domain tests.", "phrases": ["bi-lexical syntactic dependency", "deepbank", "theory"], "overall_score": 1.0445817880248467, "scores": [0.6432558515234481, 0.5661944695957241, 0.5395262674765359], "rank_score": 0.5829921961985693} -{"id": "mason-charniak-2014-nonparametric", "title": "Nonparametric Method for Data-driven Image Captioning", "abstract": "We present a nonparametric density estimation technique for image caption generation. Data-driven matching methods have shown to be effective for a variety of complex problems in Computer Vision. These methods reduce an inference problem for an unknown image to finding an existing labeled image which is semantically similar. However, related approaches for image caption generation (Ordonez et al., 2011; Kuznetsova et al., 2012) are hampered by noisy estimations of visual content and poor alignment between images and human-written captions. Our work addresses this challenge by estimating a word frequency representation of the visual content of a query image. This allows us to cast caption generation as an extractive summarization problem. Our model strongly outperforms two state-ofthe-art caption extraction systems according to human judgments of caption relevance.", "phrases": ["image", "caption", "extractive summarization problem"], "overall_score": 0.8079122500313316, "scores": [0.6149480719046008, 0.5693599676036964, 0.5640484553821724], "rank_score": 0.5827854982968232} -{"id": "augustinus-etal-2012-example", "title": "Example-Based Treebank Querying", "abstract": "The recent construction of large linguistic treebanks for spoken and written Dutch (e.g. CGN, LASSY, Alpino) has created new and exciting opportunities for the empirical investigation of Dutch syntax and semantics. However, the exploitation of those treebanks requires knowledge of specific data structures and query languages such as XPath. Linguists who are unfamiliar with formal languages are often reluctant towards learning such a language. In order to make treebank querying more attractive for non-technical users we developed GrETEL (Greedy Extraction of Trees for Empirical Linguistics), a query engine in which linguists can use natural language examples as a starting point for searching the Lassy treebank without knowledge about tree representations nor formal query languages. By allowing linguists to search for similar constructions as the example they provide, we hope to bridge the gap between traditional and computational linguistics. Two case studies are conducted to provide a concrete demonstration of the tool. The architecture of the tool is optimised for searching the LASSY treebank, but the approach can be adapted to other treebank lay-outs.", "phrases": ["treebank", "dutch", "empirical linguistics", "query engine"], "overall_score": 1.1337439011686719, "scores": [0.6326280345645892, 0.6020983454602519, 0.5496112493902134, 0.5461788165145254], "rank_score": 0.5826291114823949} -{"id": "agirre-etal-2006-two", "title": "Two graph-based algorithms for state-of-the-art WSD", "abstract": "This paper explores the use of two graph algorithms for unsupervised induction and tagging of nominal word senses based on corpora. Our main contribution is the optimization of the free parameters of those algorithms and its evaluation against publicly available gold standards. We present a thorough evaluation comprising supervised and unsupervised modes, and both lexical-sample and all-words tasks. The results show that, in spite of the information loss inherent to mapping the induced senses to the gold-standard, the optimization of parameters based on a small sample of nouns carries over to all nouns, performing close to supervised systems in the lexical sample task and yielding the second-best WSD systems for the Senseval-3 all-words task.", "phrases": ["word sense", "graph-based method", "fixed-list"], "overall_score": 1.341288159574192, "scores": [0.6249526139192779, 0.5903032616978684, 0.5322862634185727], "rank_score": 0.5825140463452397} -{"id": "meng-etal-2022-fast", "title": "Fast Nearest Neighbor Machine Translation", "abstract": "Though nearest neighbor Machine Translation (kNN-MT) (CITATION) has proved to introduce significant performance boosts over standard neural MT systems, it is prohibitively slow since it uses the entire reference corpus as the datastore for the nearest neighbor search. This means each step for each beam in the beam search has to search over the entire reference corpus. kNN-MT is thus two-orders slower than vanilla MT models, making it hard to be applied to real-world applications, especially online services. In this work, we propose Fast kNN-MT to address this issue. Fast kNN-MT constructs a significantly smaller datastore for the nearest neighbor search: for each word in a source sentence, Fast kNN-MT first selects its nearest token-level neighbors, which is limited to tokens that are the same as the query token. Then at each decoding step, in contrast to using the entire corpus as the datastore, the search space is limited to target tokens corresponding to the previously selected reference source tokens. This strategy avoids search through the whole datastore for nearest neighbors and drastically improves decoding efficiency. Without loss of performance, Fast kNN-MT is two-orders faster than kNN-MT, and is only two times slower than the standard NMT model. Fast kNN-MT enables the practical use of kNN-MT systems in real-world MT applications. The code is available at .", "phrases": ["neighbor", "knn-mt", "two-order", "source sentence", "efficiency"], "overall_score": 0.6391158571187212, "scores": [0.6260293186998906, 0.5897850089536143, 0.5743560413923138, 0.5598964427833758, 0.5586748057142711], "rank_score": 0.5817483235086931} -{"id": "taskar-etal-2004-max", "title": "Max-Margin Parsing", "abstract": "We present a novel discriminative approach to parsing inspired by the large-margin criterion underlying support vector machines. Our formulation uses a factorization analogous to the standard dynamic programs for parsing. In particular, it allows one to efficiently learn a model which discriminates among the entire space of parse trees, as opposed to reranking the top few candidates. Our models can condition on arbitrary features of input sentences, thus incorporating an important kind of lexical information without the added algorithmic complexity of modeling headedness. We provide an efficient algorithm for learning such models and show experimental evidence of the model\u2019s improved performance over a natural baseline model and a lexicalized probabilistic context-free grammar.", "phrases": ["dynamic program", "discriminative parsing", "gain"], "overall_score": 1.7971601559943728, "scores": [0.6308449707941396, 0.5676819673479662, 0.5457003085996696], "rank_score": 0.5814090822472585} -{"id": "mylonakis-simaan-2011-learning", "title": "Learning Hierarchical Translation Structure with Linguistic Annotations", "abstract": "While it is generally accepted that many translation phenomena are correlated with linguistic structures, employing linguistic syntax for translation has proven a highly non-trivial task. The key assumption behind many approaches is that translation is guided by the source and/or target language parse, employing rules extracted from the parse tree or performing tree transformations. These approaches enforce strict constraints and might overlook important translation phenomena that cross linguistic constituents. We propose a novel flexible modelling approach to introduce linguistic information of varying granularity from the source side. Our method induces joint probability synchronous grammars and estimates their parameters, by selecting and weighing together linguistically motivated rules according to an objective function directly targeting generalisation over future data. We obtain statistically significant improvements across 4 different language pairs with English as source, mounting up to +1.92 BLEU for Chinese as target.", "phrases": ["probability", "synchronous grammar", "syntactic label"], "overall_score": 1.0416279958356602, "scores": [0.610997510229033, 0.5789415287832592, 0.5540919211551885], "rank_score": 0.5813436533891603} -{"id": "wang-etal-2005-strictly", "title": "Strictly Lexical Dependency Parsing", "abstract": "We present a strictly lexical parsing model where all the parameters are based on the words. This model does not rely on part-of-speech tags or grammatical categories. It maximizes the conditional probability of the parse tree given the sentence. This is in contrast with most previous models that compute the joint probability of the parse tree and the sentence. Although the maximization of joint and conditional probabilities are theoretically equivalent, the conditional model allows us to use distributional word similarity to generalize the observed frequency counts in the training corpus. Our experiments with the Chinese Treebank show that the accuracy of the conditional model is 13.6% higher than the joint model and that the strictly lexicalized conditional model outperforms the corresponding unlexicalized model based on part-of-speech tags.", "phrases": ["parsing model", "chinese", "treebank data", "central role", "tremendous progress"], "overall_score": 1.129149016672069, "scores": [0.6234062839039763, 0.5793100980380018, 0.5710367284156824, 0.5649473826150712, 0.5626385468083192], "rank_score": 0.5802678079562102} -{"id": "graca-etal-2008-building", "title": "Building a Golden Collection of Parallel Multi-Language Word Alignment", "abstract": "This paper reports an experience on producing manual word alignments over six different language pairs (all combinations between Portuguese, English, French and Spanish) (Gra\u00e7a et al., 2008). Word alignment of each language pair is made over the first 100 sentences of the common test set from the Europarl corpora (Koehn, 2005), corresponding to 600 new annotated sentences. This collection is publicly available at http://www.l2f.inesc- id.pt/resources/translation/. It contains, to our knowledge, the first word alignment gold set for the Portuguese language, with three other languages. Besides, it is to our knowledge, the first multi-language manual word aligned parallel corpus, where the same sentences are annotated for each language pair. We started by using the guidelines presented at (Mari\u00f1o, 2005) and performed several refinements: some due to under-specifications on the original guidelines, others because of disagreement on some choices. This lead to the development of an extensive new set of guidelines for multi-lingual word alignment annotation that, we believe, makes the alignment process less ambiguous. We evaluate the inter-annotator agreement obtaining an average of 91.6% agreement between the different language pairs.", "phrases": ["portuguese", "word alignment annotation", "average"], "overall_score": 1.1289354235760434, "scores": [0.6222507505012306, 0.5617679615223719, 0.556455416431061], "rank_score": 0.580158042818221} -{"id": "pavlopoulos-etal-2017-deeper", "title": "Deeper Attention to Abusive User Content Moderation", "abstract": "Experimenting with a new dataset of 1.6M user comments from a news portal and an existing dataset of 115K Wikipedia talk page comments, we show that an RNN operating on word embeddings outpeforms the previous state of the art in moderation, which used logistic regression or an MLP classifier with character or word n-grams. We also compare against a CNN operating on word embeddings, and a word-list baseline. A novel, deep, classificationspecific attention mechanism improves the performance of the RNN further, and can also highlight suspicious words for free, without including highlighted words in the training data. We consider both fully automatic and semi-automatic moderation.", "phrases": ["abuse", "user content moderation", "moderator"], "overall_score": 1.8672497705729179, "scores": [0.6031709187617172, 0.5775685143181926, 0.5595418379281515], "rank_score": 0.5800937570026871} -{"id": "bella-etal-2020-exploring", "title": "Exploring the Language of Data", "abstract": "We set out to uncover the unique grammatical properties of an important yet so far under-researched type of natural language text: that of short labels typically found within structured datasets. We show that such labels obey a specific type of abbreviated grammar that we call the Language of Data, with properties significantly different from the kinds of text typically addressed in computational linguistics and NLP, such as `standard' written language or social media messages. We analyse orthography, parts of speech, and syntax over a large, bilingual, hand-annotated corpus of data labels collected from a variety of domains. We perform experiments on tokenisation, part-of-speech tagging, and named entity recognition over real-world structured data, demonstrating that models adapted to the Language of Data outperform those trained on standard text. These observations point in a new direction to be explored as future research, in order to develop new NLP tools and models dedicated to the Language of Data.", "phrases": ["unique grammatical property", "entity recognition", "real-world structured data"], "overall_score": 0.40176434588656346, "scores": [0.6304873912739293, 0.5669974225680278, 0.5413854744076726], "rank_score": 0.5796234294165433} -{"id": "guta-etal-2015-extended", "title": "Extended Translation Models in Phrase-based Decoding", "abstract": "We propose a novel extended translation model (ETM) to counteract some problems in phrase-based translation: The lack of translation context when using singleword phrases and uncaptured dependencies beyond phrase boundaries. The ETM operates on word-level and augments the IBM models by an additional bilingual word pair and a reordering operation. Its implementation in a phrase-based decoder introduces translation and reordering dependencies for single-word phrases and dependencies across phrase boundaries. More, the model incorporates an explicit treatment of multiple and empty alignments. Its integration outperforms competitive systems that include lexical and phrase translation models as well as hierarchical reordering models on 4 language pairs significantly by +0.7% BLEU on average. Although simpler and using fewer dependencies, the ETM proves to be on par with 7-gram operation sequence models (Durrani et al., 2013b).", "phrases": ["translation model", "ibm model", "bilingual word pair"], "overall_score": 0.80344459377076, "scores": [0.6469635648547297, 0.5522738224415688, 0.5394509092968867], "rank_score": 0.5795627655310617} -{"id": "lang-lapata-2011-unsupervised", "title": "Unsupervised Semantic Role Induction with Graph Partitioning", "abstract": "In this paper we present a method for unsupervised semantic role induction which we formalize as a graph partitioning problem. Argument instances of a verb are represented as vertices in a graph whose edge weights quantify their role-semantic similarity. Graph partitioning is realized with an algorithm that iteratively assigns vertices to clusters based on the cluster assignments of neighboring vertices. Our method is algorithmically and conceptually simple, especially with respect to how problem-specific knowledge is incorporated into the model. Experimental results on the CoNLL 2008 benchmark dataset demonstrate that our model is competitive with other unsupervised approaches in terms of F1 whilst attaining significantly higher cluster purity.", "phrases": ["semantic role induction", "graph partitioning problem", "vertex"], "overall_score": 1.204784612256506, "scores": [0.6582246549373074, 0.547288697072692, 0.5326234334317947], "rank_score": 0.579378928480598} -{"id": "eryigit-etal-2011-multiword", "title": "Multiword Expressions in Statistical Dependency Parsing", "abstract": "In this paper, we investigated the impact of extracting different types of multiword expressions (MWEs) in improving the accuracy of a data-driven dependency parser for a morphologically rich language (Turkish). We showed that in the training stage, the unification of MWEs of a certain type, namely compound verb and noun formations, has a negative effect on parsing accuracy by increasing the lexical sparsity. Our results gave a statistically significant improvement by using a variant of the treebank excluding this MWE type in the training stage. Our extrinsic evaluation of an ideal MWE recognizer (for only extracting MWEs of type named entities, duplications, numbers, dates and some predefined list of compound prepositions) showed that the preprocessing of the test data would improve the labeled parsing accuracy by 1.5%.", "phrases": ["mwes", "dependency parser", "turkish"], "overall_score": 1.2039096589308624, "scores": [0.6418438772100211, 0.5565029284412565, 0.5385276889666011], "rank_score": 0.5789581648726262} -{"id": "chang-2004-chinese", "title": "Chinese-English Parallel Corpus Construction and its Application", "abstract": "Chinese-English parallel corpora are key resources for Chinese-English cross-language information processing, Chinese-English bilingual lexicography, Chinese-English language research and teaching. But so far large-scale Chinese-English corpus is still unavailable yet, given the difficulties and the intensive labours required. In this paper, our work towards building a large-scale Chinese-English parallel corpus is presented. We elaborate on the collection, annotation and mark-up of the parallel Chinese-English texts and the workflow that we used to construct the corpus. In addition, we also present our work toward building tools for constructing and using the corpus easily for different purposes. Among these tools, a parallel concordance tool developed by us is examined in detail. Several applications of the corpus being conducted are also introduced briefly in the paper.", "phrases": ["chinese-english parallel corpora", "information processing", "language research"], "overall_score": 0.6358883647500516, "scores": [0.6537695157134635, 0.5560805673280144, 0.5265815170117959], "rank_score": 0.5788105333510912} -{"id": "sasano-korhonen-2020-investigating", "title": "Investigating Word-Class Distributions in Word Vector Spaces", "abstract": "This paper presents an investigation on the distribution of word vectors belonging to a certain word class in a pre-trained word vector space. To this end, we made several assumptions about the distribution, modeled the distribution accordingly, and validated each assumption by comparing the goodness of each model. Specifically, we considered two types of word classes \u2013 the semantic class of direct objects of a verb and the semantic class in a thesaurus \u2013 and tried to build models that properly estimate how likely it is that a word in the vector space is a member of a given word class. Our results on selectional preference and WordNet datasets show that the centroid-based model will fail to achieve good enough performance, the geometry of the distribution and the existence of subgroups will have limited impact, and also the negative instances need to be considered for adequate modeling of the distribution. We further investigated the relationship between the scores calculated by each model and the degree of membership and found that discriminative learning-based models are best in finding the boundaries of a class, while models based on the offset between positive and negative instances perform best in determining the degree of membership.", "phrases": ["word class", "direct object", "geometry"], "overall_score": 0.6357576446097134, "scores": [0.6363327732308847, 0.571766058429579, 0.5279758085944719], "rank_score": 0.5786915467516452} -{"id": "tinsley-etal-2007-robust", "title": "Robust language pair-independent sub-tree alignment", "abstract": "Data-driven approaches to machine translation (MT) achieve state-of-the-art results. Many syntax-aware approaches, such as Example-Based MT and Data-Oriented Translation, make use of tree pairs aligned at sub-sentential level. Obtaining sub-sentential alignments manually is time-consuming and error-prone, and requires expert knowledge of both source and target languages. We propose a novel, language pair-independent algorithm which automatically induces alignments between phrase-structure trees. We evaluate the alignments themselves against a manually aligned gold standard, and perform an extrinsic evaluation by using the aligned data to train and test a DOT system. Our results show that translation accuracy is comparable to that of the same translation system trained on manually aligned data, and coverage improves.", "phrases": ["machine translation", "sub-sentential level", "node alignment technique"], "overall_score": 1.4841008041408994, "scores": [0.6082000856983055, 0.5945054304535149, 0.5331191696146953], "rank_score": 0.5786082285888385} -{"id": "gong-etal-2017-multi", "title": "Multi-Grained Chinese Word Segmentation", "abstract": "Traditionally, word segmentation (WS) adopts the single-grained formalism, where a sentence corresponds to a single word sequence. However, Sproat et al. (1997) show that the inter-native-speaker consistency ratio over Chinese word boundaries is only 76%, indicating single-grained WS (SWS) imposes unnecessary challenges on both manual annotation and statistical modeling. Moreover, WS results of different granularities can be complementary and beneficial for high-level applications. This work proposes and addresses multi-grained WS (MWS). We build a large-scale pseudo MWS dataset for model training and tuning by leveraging the annotation heterogeneity of three SWS datasets. Then we manually annotate 1,500 test sentences with true MWS annotations. Finally, we propose three benchmark approaches by casting MWS as constituent parsing and sequence labeling. Experiments and analysis lead to many interesting findings.", "phrases": ["word segmentation", "mws", "model training"], "overall_score": 0.9311216453671671, "scores": [0.6402346247673042, 0.5562516518795095, 0.5391289431269272], "rank_score": 0.5785384065912469} -{"id": "tannier-moriceau-2013-building", "title": "Building Event Threads out of Multiple News Articles", "abstract": "We present an approach for building multidocument event threads from a large corpus of newswire articles. An event thread is basically a succession of events belonging to the same story. It helps the reader to contextualize the information contained in a single article, by navigating backward or forward in the thread from this article. A specific effort is also made on the detection of reactions to a particular event. In order to build these event threads, we use a cascade of classifiers and other modules, taking advantage of the redundancy of information in the newswire corpus. We also share interesting comments concerning our manual annotation procedure for building a training and testing set 1 .", "phrases": ["event thread", "succession", "story"], "overall_score": 0.6354625106076787, "scores": [0.6087969191759438, 0.5875370711494955, 0.5389347222922063], "rank_score": 0.5784229042058818} -{"id": "roth-woodsend-2014-composition", "title": "Composition of Word Representations Improves Semantic Role Labelling", "abstract": "State-of-the-art semantic role labelling systems require large annotated corpora to achieve full performance. Unfortunately, such corpora are expensive to produce and often do not generalize well across domains. Even in domain, errors are often made where syntactic information does not provide sufficient cues. In this paper, we mitigate both of these problems by employing distributional word representations gathered from unlabelled data. While straight-forward word representations of predicates and arguments improve performance, we show that further gains are achieved by composing representations that model the interaction between predicate and argument, and capture full argument spans.", "phrases": ["word representation", "full argument span", "srl", "feature-based system"], "overall_score": 1.4363611151147324, "scores": [0.6216725086797558, 0.5882949496843007, 0.551276731264127, 0.5508927515921646], "rank_score": 0.578034235305087} -{"id": "clark-wicentwoski-2013-swatcs", "title": "SwatCS: Combining simple classifiers with estimated accuracy", "abstract": "This paper is an overview of the SwatCS system submitted to SemEval-2013 Task 2A: Contextual Polarity Disambiguation. The sentiment of individual phrases within a tweet are labeled using a combination of classifiers trained on a range of lexical features. The classifiers are combined by estimating the accuracy of the classifiers on each tweet. Performance is measured when using only the provided training data, and separately when including external data.", "phrases": ["classifier ensemble", "expression-level", "exception"], "overall_score": 0.8010208912517032, "scores": [0.6231056361664176, 0.5672081280045469, 0.5431295370149704], "rank_score": 0.5778144337286449} -{"id": "huang-riloff-2012-bootstrapped", "title": "Bootstrapped Training of Event Extraction Classifiers", "abstract": "Most event extraction systems are trained with supervised learning and rely on a collection of annotated documents. Due to the domain-specificity of this task, event extraction systems must be retrained with new annotated data for each domain. In this paper, we propose a bootstrapping solution for event role filler extraction that requires minimal human supervision. We aim to rapidly train a state-of-the-art event extraction system using a small set of \"seed nouns\" for each event role, a collection of relevant (in-domain) and irrelevant (out-of-domain) texts, and a semantic dictionary. The experimental results show that the bootstrapped system outperforms previous weakly supervised event extraction systems on the MUC-4 data set, and achieves performance levels comparable to supervised training with 700 manually annotated documents.", "phrases": ["event extraction", "human supervision", "noun"], "overall_score": 1.5642065951892183, "scores": [0.6256335229975665, 0.5658308581159583, 0.5413763851535482], "rank_score": 0.577613588755691} -{"id": "tsuruoka-etal-2009-stochastic", "title": "Stochastic Gradient Descent Training for L1-regularized Log-linear Models with Cumulative Penalty", "abstract": "Stochastic gradient descent (SGD) uses approximate gradients estimated from subsets of the training data and updates the parameters in an online fashion. This learning framework is attractive because it often requires much less training time in practice than batch training algorithms. However, L1-regularization, which is becoming popular in natural language processing because of its ability to produce compact models, cannot be efficiently applied in SGD training, due to the large dimensions of feature vectors and the fluctuations of approximate gradients. We present a simple method to solve these problems by penalizing the weights according to cumulative values for L1 penalty. We evaluate the effectiveness of our method in three applications: text chunking, named entity recognition, and part-of-speech tagging. Experimental results demonstrate that our method can produce compact and accurate models much more quickly than a state-of-the-art quasi-Newton method for L1-regularized loglinear models.", "phrases": ["sgd", "training time", "stochastic gradient descent"], "overall_score": 0.9292146756495666, "scores": [0.6254313284080911, 0.5570080430818218, 0.5496212475697503], "rank_score": 0.5773535396865545} -{"id": "owczarzak-etal-2007-labelled", "title": "Labelled Dependencies in Machine Translation Evaluation", "abstract": "We present a method for evaluating the quality of Machine Translation (MT) output, using labelled dependencies produced by a Lexical-Functional Grammar (LFG) parser. Our dependency-based method, in contrast to most popular string-based evaluation metrics, does not unfairly penalize perfectly valid syntactic variations in the translation, and the addition of WordNet provides a way to accommodate lexical variation. In comparison with other metrics on 16,800 sentences of Chinese-English newswire text, our method reaches high correlation with human scores.", "phrases": ["reference sentence", "dependency graph", "term-based encoding"], "overall_score": 1.0338352610415231, "scores": [0.5933770821835592, 0.5806458465611587, 0.5569604071272519], "rank_score": 0.5769944452906567} -{"id": "kollar-etal-2018-alexa", "title": "The Alexa Meaning Representation Language", "abstract": "This paper introduces a meaning representation for spoken language understanding. The Alexa meaning representation language (AMRL), unlike previous approaches, which factor spoken utterances into domains, provides a common representation for how people communicate in spoken language. AMRL is a rooted graph, links to a large-scale ontology, supports cross-domain queries, fine-grained types, complex utterances and composition. A spoken language dataset has been collected for Alexa, which contains \u223c20k examples across eight domains. A version of this meaning representation was released to developers at a trade show in 2016.", "phrases": ["meaning representation", "query", "conversation system"], "overall_score": 0.9285073389851337, "scores": [0.6299534802237063, 0.5506572572807608, 0.5501314026148754], "rank_score": 0.5769140467064474} -{"id": "stevenson-greenwood-2006-comparing", "title": "Comparing Information Extraction Pattern Models", "abstract": "Several recently reported techniques for the automatic acquisition of Information Extraction (IE) systems have used dependency trees as the basis of their extraction pattern representation. These approaches have used a variety of pattern models (schemes for representing IE patterns based on particular parts of the dependency analysis). An appropriate model should be expressive enough to represent the information which is to be extracted from text without being overly complicated. Four previously reported pattern models are evaluated using existing IE evaluation corpora and three dependency parsers. It was found that one model, linked chains, could represent around 95% of the information of interest without generating an unwieldy number of possible patterns.", "phrases": ["information extraction", "pattern model", "new domain", "coverage"], "overall_score": 1.4331491879801677, "scores": [0.6426192319016201, 0.5919602740395297, 0.538789335541008, 0.5335978014739255], "rank_score": 0.5767416607390208} -{"id": "saggion-etal-2010-multilingual", "title": "Multilingual Summarization Evaluation without Human Models", "abstract": "We study correlation of rankings of text summarization systems using evaluation methods with and without human models. We apply our comparison framework to various well-established content-based evaluation measures in text summarization such as coverage, Responsiveness, Pyramids and Rouge studying their associations in various text summarization tasks including generic and focus-based multi-document summarization in English and generic single-document summarization in French and Spanish. The research is carried out using a new content-based evaluation framework called Fresa to compute a variety of divergences among probability distributions.", "phrases": ["summarization", "spanish", "automatic evaluation"], "overall_score": 1.0322061918191774, "scores": [0.6196201964132679, 0.5838579063391226, 0.5247776305864433], "rank_score": 0.576085244446278} -{"id": "angeli-uszkoreit-2013-language", "title": "Language-Independent Discriminative Parsing of Temporal Expressions", "abstract": "Temporal resolution systems are traditionally tuned to a particular language, requiring significant human effort to translate them to new languages. We present a language independent semantic parser for learning the interpretation of temporal phrases given only a corpus of utterances and the times they reference. We make use of a latent parse that encodes a language-flexible representation of time, and extract rich features over both the parse and associated temporal semantics. The parameters of the model are learned using a weakly supervised bootstrapping approach, without the need for manually tuned parameters or any other language expertise. We achieve state-of-the-art accuracy on all languages in the TempEval2 temporal normalization task, reporting a 4% improvement in both English and Spanish accuracy, and to our knowledge the first results for four other languages.", "phrases": ["temporal expression", "semantic parser", "limitation"], "overall_score": 1.1201495235821683, "scores": [0.6116585468926224, 0.5859838002492262, 0.5292866029835783], "rank_score": 0.5756429833751423} -{"id": "riezler-etal-2014-response", "title": "Response-based Learning for Grounded Machine Translation", "abstract": "We propose a novel learning approach for statistical machine translation (SMT) that allows to extract supervision signals for structured learning from an extrinsic response to a translation input. We show how to generate responses by grounding SMT in the task of executing a semantic parse of a translated query against a database. Experiments on the GEOQUERY database show an improvement of about 6 points in F1-score for responsebased learning over learning from references only on returning the correct answer from a semantic parse of a translated query. In general, our approach alleviates the dependency on human reference translations and solves the reachability problem in structured learning for SMT.", "phrases": ["structured learning", "semantic parse", "reference"], "overall_score": 0.7976826110247297, "scores": [0.599583183584877, 0.5802147680269154, 0.5464211690813139], "rank_score": 0.5754063735643687} -{"id": "yao-etal-2017-weakly", "title": "A Weakly Supervised Approach to Train Temporal Relation Classifiers and Acquire Regular Event Pairs Simultaneously", "abstract": "Capabilities of detecting temporal and causal relations between two events can benefit many applications. Most of existing temporal relation classifiers were trained in a supervised manner. Instead, we explore the observation that regular event pairs show a consistent temporal relation despite of their various contexts and these rich contexts can be used to train a contextual temporal relation classifier, which can further recognize new temporal relation contexts and identify new regular event pairs. We focus on detecting after and before temporal relations and design a weakly supervised learning approach that extracts thousands of regular event pairs and learns a contextual temporal relation classifier simultaneously. Evaluation shows that the acquired regular event pairs are of high quality and contain rich commonsense knowledge and domain specific knowledge. In addition, the weakly supervised trained temporal relation classifier achieves comparable performance with the state-of-the-art supervised systems.", "phrases": ["temporal relation", "relation classifier", "event pair", "observation"], "overall_score": 0.631945015456372, "scores": [0.6058543144679756, 0.5817821272710775, 0.5812278893434492, 0.5320202374762694], "rank_score": 0.5752211421396929} -{"id": "charniak-etal-2006-multilevel", "title": "Multilevel Coarse-to-Fine PCFG Parsing", "abstract": "We present a PCFG parsing algorithm that uses a multilevel coarse-to-fine (mlctf) scheme to improve the efficiency of search for the best parse. Our approach requires the user to specify a sequence of nested partitions or equivalence classes of the PCFG nonterminals. We define a sequence of PCFGs corresponding to each partition, where the nonterminals of each PCFG are clusters of nonterminals of the original source PCFG. We use the results of parsing at a coarser level (i.e., grammar defined in terms of a coarser partition) to prune the next finer level. We present experiments showing that with our algorithm the work load (as measured by the total number of constituents processed) is decreased by a factor of ten with no decrease in parsing accuracy compared to standard CKY parsing with the original PCFG. We suggest that the search space over mlctf algorithms is almost totally unexplored so that future work should be able to improve significantly on these results.", "phrases": ["pcfg", "constituent", "search space", "coarse-to-fine approach"], "overall_score": 1.4743508521626907, "scores": [0.6325394969664596, 0.5781448143444681, 0.5462016222794193, 0.5423420770894695], "rank_score": 0.5748070026699541} -{"id": "libovicky-helcl-2017-attention", "title": "Attention Strategies for Multi-Source Sequence-to-Sequence Learning", "abstract": "Modeling attention in neural multi-source sequence-to-sequence learning remains a relatively unexplored area, despite its usefulness in tasks that incorporate multiple source languages or modalities. We propose two novel approaches to combine the outputs of attention mechanisms over each source sequence, flat and hierarchical. We compare the proposed methods with existing techniques and present results of systematic evaluation of those methods on the WMT16 Multimodal Translation and Automatic Post-editing tasks. We show that the proposed methods achieve competitive results on both tasks.", "phrases": ["modality", "sequence-to-sequence model", "attention strategy"], "overall_score": 1.377934228978562, "scores": [0.6112857635291495, 0.5923193408922331, 0.5203245157873817], "rank_score": 0.5746432067362548} -{"id": "bjorkelund-etal-2009-multilingual", "title": "Multilingual Semantic Role Labeling", "abstract": "This paper describes our contribution to the semantic role labeling task (SRL-only) of the CoNLL-2009 shared task in the closed challenge (Hajic et al., 2009). Our system consists of a pipeline of independent, local classifiers that identify the predicate sense, the arguments of the predicates, and the argument labels. Using these local models, we carried out a beam search to generate a pool of candidates. We then reranked the candidates using a joint learning approach that combines the local models and proposition features. \n \nTo address the multilingual nature of the data, we implemented a feature selection procedure that systematically explored the feature space, yielding significant gains over a standard set of features. Our system achieved the second best semantic score overall with an average labeled semantic F1 of 80.31. It obtained the best F1 score on the Chinese and German data and the second best one on English.", "phrases": ["semantic role", "feature selection procedure", "other type"], "overall_score": 1.555183306490464, "scores": [0.6500293289560831, 0.5424413152185907, 0.5303740496099744], "rank_score": 0.5742815645948828} -{"id": "xu-etal-2019-neural", "title": "Neural Response Generation with Meta-words", "abstract": "We present open domain dialogue generation with meta-words. A meta-word is a structured record that describes attributes of a response, and thus allows us to explicitly model the one-to-many relationship within open domain dialogues and perform response generation in an explainable and controllable manner. To incorporate meta-words into generation, we propose a novel goal-tracking memory network that formalizes meta-word expression as a goal in response generation and manages the generation process to achieve the goal with a state memory panel and a state controller. Experimental results from both automatic evaluation and human judgment on two large-scale data sets indicate that our model can significantly outperform state-of-the-art generation models in terms of response relevance, response diversity, and accuracy of meta-word expression.", "phrases": ["response generation", "meta-word", "domain dialogue generation", "memory network", "specificity"], "overall_score": 1.0287829898231993, "scores": [0.5985606424958871, 0.5962513935412205, 0.5704623288698147, 0.568211003252795, 0.5373882270177388], "rank_score": 0.5741747190354912} -{"id": "huang-etal-2005-robustness", "title": "The Robustness of Domain Lexico-Taxonomy: Expanding Domain Lexicon with CiLin", "abstract": "This paper deals with the robust expansion of Domain LexicoTaxonomy (DLT). DLT is a domain taxonomy enriched with domain lexica. DLT was proposed as an infrastructure for crossing domain barriers (Huang et al. 2004). The DLT proposal is based on the observation that domain lexica contain entries that are also part of a general lexicon. Hence, when entries of a general lexicon are marked with their associated domain attributes, this information can have two important applications. First, the DLT will serve as seeds for domain lexica. Second, the DLT offers the most reliable evidence for deciding the domain of a new text since these lexical clues belong to the general lexicon and do occur reliably in all texts. Hence general lexicon lemmas are extracted to populate domain lexica, which are situated in domain taxonomy. Based on this previous work, we show in this paper that the original DLT can be further expanded when a new language resource is introduced. We applied CiLin, a Chinese thesaurus, and added more than 1000 new entries for DLT and show with evaluation that the DLT approach is robust since the size and number of domain lexica increased effectively.", "phrases": ["domain lexica", "chinese thesaurus", "dlt approach"], "overall_score": 0.6304935264027145, "scores": [0.6238563259275063, 0.5571155854840295, 0.540727908186567], "rank_score": 0.5738999398660343} -{"id": "guzman-etal-2019-flores", "title": "The FLORES Evaluation Datasets for Low-Resource Machine Translation: Nepali\u2013English and Sinhala\u2013English", "abstract": "For machine translation, a vast majority of language pairs in the world are considered low-resource because they have little parallel data available. Besides the technical challenges of learning with limited supervision, it is difficult to evaluate methods trained on low-resource language pairs because of the lack of freely and publicly available benchmarks. In this work, we introduce the FLORES evaluation datasets for Nepali\u2013English and Sinhala\u2013 English, based on sentences translated from Wikipedia. Compared to English, these are languages with very different morphology and syntax, for which little out-of-domain parallel data is available and for which relatively large amounts of monolingual data are freely available. We describe our process to collect and cross-check the quality of translations, and we report baseline performance using several learning settings: fully supervised, weakly supervised, semi-supervised, and fully unsupervised. Our experiments demonstrate that current state-of-the-art methods perform rather poorly on this benchmark, posing a challenge to the research community working on low-resource MT. Data and code to reproduce our experiments are available at .", "phrases": ["low-resource language pair", "large amount", "bleu score"], "overall_score": 1.513333792244784, "scores": [0.6206660390446493, 0.5592844467772664, 0.5403612807270237], "rank_score": 0.5734372555163132} -{"id": "lee-etal-2021-towards", "title": "Towards Few-shot Fact-Checking via Perplexity", "abstract": "Few-shot learning has drawn researchers' attention to overcome the problem of data scarcity. Recently, large pre-trained language models have shown great performance in few-shot learning for various downstream tasks, such as question answering and machine translation. Nevertheless, little exploration has been made to achieve few-shot learning for the fact-checking task. However, fact-checking is an important problem, especially when the amount of information online is growing exponentially every day. In this paper, we propose a new way of utilizing the powerful transfer learning ability of a language model via a perplexity score. The most notable strength of our methodology lies in its capability in few-shot learning. With only two training samples, our methodology can already outperform the Major Class baseline by more than an absolute 10% on the F1-Macro metric across multiple datasets. Through experiments, we empirically verify the plausibility of the rather surprising usage of the perplexity score in the context of fact-checking and highlight the strength of our few-shot methodology by comparing it to strong fine-tuning-based baseline models. Moreover, we construct and publicly release two new fact-checking datasets related to COVID-19.", "phrases": ["pre-trained language model", "fact-checking task", "perplexity score"], "overall_score": 0.9226110862309052, "scores": [0.6020653349418639, 0.5685811011548222, 0.5491050605650699], "rank_score": 0.5732504988872519} -{"id": "peters-etal-2006-lois", "title": "The LOIS Project", "abstract": "The LOIS (Lexical Ontologies for legal Information Sharing) project The legal knowledge base resulting from the LOIS (Lexical Ontologies for legal Information Sharing) (Lexical Ontologies for legal Information Sharing) project consists of legal WordNets in six languages (Italian, Dutch, Portuguese, German, Czech, English). Its architecture is based on the EuroWordNet (EWN) framework (Vossen et al, 1997). Using the EWN framework assures compatibility of the LOIS WordNets with EWN, allowing them to function as an extension of EWN for the legal domain. For each legal system, the document-derived legal concepts are integrated into a taxonomy, which links into existing formal ontologies. These give the legal wordnets a first formal backbone, which can, in future, be further extended. The database consists of 33,000 synsets, and is aimed to be used in information retrieval, where it provides mono- and multi-lingual access to European legal databases for legal experts as well as for laymen. The LOIS knowledge base also provides a flexible, modular architecture that allows integration of multiple classification schemes, and enables the comparison of legal systems by exploring translation, equivalence and structure across the different legal wordnets.", "phrases": ["project", "information sharing", "wordnets"], "overall_score": 0.6294641809457797, "scores": [0.609944726712684, 0.5589867323899023, 0.5499575086575599], "rank_score": 0.5729629892533821} -{"id": "liang-etal-2009-learning", "title": "Learning Semantic Correspondences with Less Supervision", "abstract": "A central problem in grounded language acquisition is learning the correspondences between a rich world state and a stream of text which references that world state. To deal with the high degree of ambiguity present in this setting, we present a generative model that simultaneously segments the text into utterances and maps each utterance to a meaning representation grounded in the world state. We show that our model generalizes across three domains of increasing difficulty---Robocup sportscasting, weather forecasts (a new domain), and NFL recaps.", "phrases": ["correspondence", "language acquisition", "semantic representation"], "overall_score": 1.6226135302662823, "scores": [0.5870582740310993, 0.5847254343305008, 0.5463504381583097], "rank_score": 0.5727113821733033} -{"id": "palshikar-etal-2019-extraction-message", "title": "Extraction of Message Sequence Charts from Narrative History Text", "abstract": "In this paper, we advocate the use of Message Sequence Chart (MSC) as a knowledge representation to capture and visualize multi-actor interactions and their temporal ordering. We propose algorithms to automatically extract an MSC from a history narrative. For a given narrative, we first identify verbs which indicate interactions and then use dependency parsing and Semantic Role Labelling based approaches to identify senders (initiating actors) and receivers (other actors involved) for these interaction verbs. As a final step in MSC extraction, we employ a state-of-the art algorithm to temporally re-order these interactions. Our evaluation on multiple publicly available narratives shows improvements over four baselines.", "phrases": ["narrative", "msc", "semantic role labelling"], "overall_score": 0.62843232396763, "scores": [0.6053176353267239, 0.566498876979307, 0.544254745360774], "rank_score": 0.5720237525556017} -{"id": "reiter-thomson-2020-shared", "title": "Shared Task on Evaluating Accuracy", "abstract": "We propose a shared task on methodologies and algorithms for evaluating the accuracy of generated texts, specifically summaries of basketball games produced from basketball box score and other game data. We welcome submissions based on protocols for human evaluation, automatic metrics, as well as combinations of human evaluations and metrics.", "phrases": ["future work", "multimodal task", "english text-to-text task"], "overall_score": 0.7928911822279185, "scores": [0.5849345284901525, 0.5736016746147838, 0.5573140617422723], "rank_score": 0.5719500882824028} -{"id": "wang-etal-2016-automatic", "title": "Automatic Construction of Discourse Corpora for Dialogue Translation", "abstract": "In this paper, a novel approach is proposed to automatically construct parallel discourse corpus for dialogue machine translation. Firstly, the parallel subtitle data and its corresponding monolingual movie script data are crawled and collected from Internet. Then tags such as speaker and discourse boundary from the script data are projected to its subtitle data via an information retrieval approach in order to map monolingual discourse to bilingual texts. We not only evaluate the mapping results, but also integrate speaker information into the translation. Experiments show our proposed method can achieve 81.79% and 98.64% accuracy on speaker and dialogue boundary annotation, and speaker-based language model adaptation can obtain around 0.5 BLEU points improvement in translation qualities. Finally, we publicly release around 100K parallel discourse data with manual speaker and dialogue boundary annotation.", "phrases": ["novel approach", "dialogue machine translation", "boundary annotation", "parallel discourse data"], "overall_score": 1.0247738472592063, "scores": [0.6137258564284155, 0.5817953893191407, 0.5629831121692151, 0.5292443379519001], "rank_score": 0.5719371739671678} -{"id": "chodorow-etal-2012-problems", "title": "Problems in Evaluating Grammatical Error Detection Systems", "abstract": "Many evaluation issues for grammatical error detection have previously been overlooked, making it hard to draw meaningful comparisons between different approaches, even when they are evaluated on the same corpus. To begin with, the three-way contingency between a writer\u2019s sentence, the annotator\u2019s correction, and the system\u2019s output makes evaluation more complex than in some other NLP tasks, which we address by presenting an intuitive evaluation scheme. Of particular importance to error detection is the skew of the data \u2010 the low frequency of errors as compared to non-errors \u2010 which distorts some traditional measures of performance and limits their usefulness, leading us to recommend the reporting of raw measurements (true positives, false negatives, false positives, true negatives). Other issues that are particularly vexing for error detection focus on defining these raw measurements: specifying the size or scope of an error, properly treating errors as graded rather than discrete phenomena, and counting non-errors. We discuss recommendations for best practices with regard to reporting the results of system evaluation for these cases, recommendations which depend upon making clear one\u2019s assumptions and applications for error detection. By highlighting the problems with current error detection evaluation, the field will be better able to move forward.", "phrases": ["grammatical error detection", "same corpus", "annotator", "evaluation scheme"], "overall_score": 1.2564385063296153, "scores": [0.6575082151383509, 0.5492618915135065, 0.5461473759582134, 0.5344017460012247], "rank_score": 0.5718298071528238} -{"id": "lin-etal-2019-unified", "title": "A Unified Linear-Time Framework for Sentence-Level Discourse Parsing", "abstract": "We propose an efficient neural framework for sentence-level discourse analysis in accordance with Rhetorical Structure Theory (RST). Our framework comprises a discourse segmenter to identify the elementary discourse units (EDU) in a text, and a discourse parser that constructs a discourse tree in a top-down fashion. Both the segmenter and the parser are based on Pointer Networks and operate in linear time. Our segmenter yields an F1 score of 95.4%, and our parser achieves an F1 score of 81.7% on the aggregated labeled (relation) metric, surpassing previous approaches by a good margin and approaching human agreement on both tasks (98.3 and 83.0 F1).", "phrases": ["sentence-level discourse analysis", "pointer network", "agreement"], "overall_score": 1.1888515727332183, "scores": [0.6498283324955605, 0.5363023090386273, 0.5290196268010711], "rank_score": 0.571716756111753} -{"id": "titov-henderson-2007-constituent", "title": "Constituent Parsing with Incremental Sigmoid Belief Networks", "abstract": "We introduce a framework for syntactic parsing with latent variables based on a form of dynamic Sigmoid Belief Networks called Incremental Sigmoid Belief Networks. We demonstrate that a previous feed-forward neural network parsing model can be viewed as a coarse approximation to inference with this class of graphical model. By constructing a more accurate but still tractable approximation, we significantly improve parsing accuracy, suggesting that ISBNs provide a good idealization for parsing. This generative model of parsing achieves state-of-theart results on WSJ text and 8% error reduction over the baseline neural network parser.", "phrases": ["graphical model", "isbn", "constituent"], "overall_score": 1.188284899201721, "scores": [0.6145760735221659, 0.5692331311938702, 0.5305235265255285], "rank_score": 0.5714442437471883} -{"id": "mohamed-etal-2011-discovering", "title": "Discovering Relations between Noun Categories", "abstract": "Traditional approaches to Relation Extraction from text require manually defining the relations to be extracted. We propose here an approach to automatically discovering relevant relations, given a large text corpus plus an initial ontology defining hundreds of noun categories (e.g., Athlete, Musician, Instrument). Our approach discovers frequently stated relations between pairs of these categories, using a two step process. For each pair of categories (e.g., Musician and Instrument) it first co-clusters the text contexts that connect known instances of the two categories, generating a candidate relation for each resulting cluster. It then applies a trained classifier to determine which of these candidate relations is semantically valid. Our experiments apply this to a text corpus containing approximately 200 million web pages and an ontology containing 122 categories from the NELL system [Carlson et al., 2010b], producing a set of 781 proposed candidate relations, approximately half of which are semantically valid. We conclude this is a useful approach to semi-automatic extension of the ontology for large-scale information extraction systems such as NELL.", "phrases": ["text context", "candidate relation", "nell"], "overall_score": 1.111781462536555, "scores": [0.5889636708991044, 0.5643966151299267, 0.5606676659958273], "rank_score": 0.5713426506749527} -{"id": "cui-etal-2020-bert", "title": "BERT-enhanced Relational Sentence Ordering Network", "abstract": "In this paper, we introduce a novel BERT-enhanced Relational Sentence Ordering Network (referred to as BRSON) by leveraging BERT for capturing better dependency relationship among sentences to enhance the coherence modeling for the entire paragraph. In particular, we develop a new Relational Pointer Decoder (referred as RPD) by incorporating the relative ordering information into the pointer network with a Deep Relational Module (referred as DRM), which utilizes BERT to exploit the deep semantic connection and relative ordering between sentences. This enables us to strengthen both local and global dependencies among sentences. Extensive evaluations are conducted on six public datasets. The experimental results demonstrate the effectiveness and promise of our BRSON, showing a significant improvement over the state-of-the-art by a wide margin.", "phrases": ["ordering", "bert", "semantic connection"], "overall_score": 0.6276244462669766, "scores": [0.5920150987678161, 0.5698041016120455, 0.5520459713665877], "rank_score": 0.5712883905821498} -{"id": "freitag-etal-2021-results", "title": "Results of the WMT21 Metrics Shared Task: Evaluating Metrics with Expert-based Human Evaluations on TED and News Domain", "abstract": "This paper presents the results of the WMT21 Metrics Shared Task. Participants were asked to score the outputs of the translation systems competing in the WMT21 News Translation Task with automatic metrics on two different domains: news and TED talks. All metrics were evaluated on how well they correlate at the system- and segment-level with human ratings. Contrary to previous years' editions, this year we acquired our own human ratings based on expert-based human evaluation via Multidimensional Quality Metrics (MQM). This setup had several advantages: (i) expert-based evaluation has been shown to be more reliable, (ii) we were able to evaluate all metrics on two different domains using translations of the same MT systems, (iii) we added 5 additional translations coming from the same system during system development. In addition, we designed three challenge sets that evaluate the robustness of all automatic metrics. We present an extensive analysis on how well metrics perform on three language pairs: English to German, English to Russian and Chinese to English. We further show the impact of different reference translations on reference-based metrics and compare our expert-based MQM annotation with the DA scores acquired by WMT.", "phrases": ["human evaluation", "different domain", "news"], "overall_score": 0.7915365457334981, "scores": [0.5948994496198057, 0.565876469645746, 0.552142854552595], "rank_score": 0.570972924606049} -{"id": "mille-etal-2019-second", "title": "The Second Multilingual Surface Realisation Shared Task (SR'19): Overview and Evaluation Results", "abstract": "We report results from the SR'19 Shared Task, the second edition of a multilingual surface realisation task organised as part of the EMNLP'19 Workshop on Multilingual Surface Realisation. As in SR'18, the shared task comprised two tracks with different levels of complexity: (a) a shallow track where the inputs were full UD structures with word order information removed and tokens lemmatised; and (b) a deep track where additionally, functional words and morphological information were removed. The shallow track was offered in eleven, and the deep track in three languages. Systems were evaluated (a) automatically, using a range of intrinsic metrics, and (b) by human judges in terms of readability and meaning similarity. This report presents the evaluation results, along with descriptions of the SR'19 tracks, data and evaluation methods. For full descriptions of the participating systems, please see the separate system reports elsewhere in this volume.", "phrases": ["multilingual surface realisation", "functional word", "shared-task", "surface realization task"], "overall_score": 1.7092746086291573, "scores": [0.6313215369137961, 0.556979494560913, 0.5472322197527466, 0.5467462751754254], "rank_score": 0.5705698816007203} -{"id": "mendes-etal-2019-jointly", "title": "Jointly Extracting and Compressing Documents with Summary State Representations", "abstract": "We present a new neural model for text summarization that first extracts sentences from a document and then compresses them. The pro-posed model offers a balance that sidesteps thedifficulties in abstractive methods while gener-ating more concise summaries than extractivemethods. In addition, our model dynamically determines the length of the output summary based on the gold summaries it observes during training and does not require length constraints typical to extractive summarization. The model achieves state-of-the-art results on the CNN/DailyMail and Newsroom datasets, improving over current extractive and abstractive methods. Human evaluations demonstratethat our model generates concise and informa-tive summaries. We also make available a new dataset of oracle compressive summaries derived automatically from the CNN/DailyMailreference summaries.", "phrases": ["summarization", "extractor", "content selection"], "overall_score": 1.186012935825186, "scores": [0.605684428045433, 0.5595791038700614, 0.545791449029662], "rank_score": 0.5703516603150521} -{"id": "finegan-dollak-etal-2018-improving", "title": "Improving Text-to-SQL Evaluation Methodology", "abstract": "To be informative, an evaluation must measure how well systems generalize to realistic unseen data. We identify limitations of and propose improvements to current evaluations of text-to-SQL systems. First, we compare human-generated and automatically generated questions, characterizing properties of queries necessary for real-world applications. To facilitate evaluation on multiple datasets, we release standardized and improved versions of seven existing datasets and one new text-to-SQL dataset. Second, we show that the current division of data into training and test sets measures robustness to variations in the way questions are asked, but only partially tests how well systems generalize to new queries; therefore, we propose a complementary dataset split for evaluation of future work. Finally, we demonstrate how the common practice of anonymizing variables during evaluation removes an important challenge of the task. Our observations highlight key difficulties, and our methodology enables effective measurement of future development.", "phrases": ["text-to-sql system", "query", "semantic parser"], "overall_score": 1.8118056379504552, "scores": [0.638192475939519, 0.5456161503694363, 0.5264886826753531], "rank_score": 0.5700991029947695} -{"id": "maamouri-etal-2006-developing", "title": "Developing and Using a Pilot Dialectal Arabic Treebank", "abstract": "In this paper, we describe the methodological procedures and issues that emerged from the development of a pilot Levantine Arabic Treebank (LATB) at the Linguistic Data Consortium (LDC) and its use at the Johns Hopkins University (JHU) Center for Language and Speech Processing workshop on Parsing Arabic Dialects (PAD). This pilot, consisting of morphological and syntactic annotation of approximately 26,000 words of Levantine Arabic conversational telephone speech, was developed under severe time constraints; hence the LDC team drew on their experience in treebanking Modern Standard Arabic (MSA) text. The resulting Levantine dialect treebanked corpus was used by the PAD team to develop and evaluate parsers for Levantine dialect texts. The parsers were trained on MSA resources and adapted using dialect-MSA lexical resources (some developed especially for this task) and existing linguistic knowledge about syntactic differences between MSA and dialect. The use of the LATB for development and evaluation of syntactic parsers allowed the PAD team to provide feedbasck to the LDC treebank developers. In this paper, we describe the creation of resources for this corpus, as well as transformations on the corpus to eliminate speech effects and lessen the gap between our pre-existing MSA resources and the new dialectal corpus", "phrases": ["dialectal arabic", "levantine arabic", "syntactic annotation", "egyptian arabic"], "overall_score": 1.1850031870521622, "scores": [0.6216040500702827, 0.5849112682625524, 0.5399393327616963, 0.5330096441024968], "rank_score": 0.5698660737992571} -{"id": "chen-yoon-2011-detecting", "title": "Detecting Structural Events for Assessing Non-Native Speech", "abstract": "Structural events, (i.e., the structure of clauses and disfluencies) in spontaneous speech, are important components of human speaking and have been used to measure language development. However, they have not been actively used in automated speech assessment research. Given the recent substantial progress on automated structural event detection on spontaneous speech, we investigated the detection of clause boundaries and interruption points of edit disfluencies on transcriptions of non-native speech data and extracted features from the detected events for speech assessment. Compared to features computed on human-annotated events, the features computed on machine-generated events show promising correlations to holistic scores that reflect speaking proficiency levels.", "phrases": ["structural event", "speech scoring", "syntactic competence"], "overall_score": 1.0205748300396693, "scores": [0.6001651024227931, 0.5809267045136013, 0.527689166571223], "rank_score": 0.5695936578358726} -{"id": "tsarfaty-etal-2012-cross", "title": "Cross-Framework Evaluation for Statistical Parsing", "abstract": "A serious bottleneck of comparative parser evaluation is the fact that different parsers subscribe to different formal frameworks and theoretical assumptions. Converting outputs from one framework to another is less than optimal as it easily introduces noise into the process. Here we present a principled protocol for evaluating parsing results across frameworks based on function trees, tree generalization and edit distance metrics. This extends a previously proposed framework for cross-theory evaluation and allows us to compare a wider class of parsers. We demonstrate the usefulness and language independence of our procedure by evaluating constituency and dependency parsers on English and Swedish.", "phrases": ["different parser", "optimization", "maltoptimizer"], "overall_score": 1.0205629858526544, "scores": [0.613938255771385, 0.5669028943251345, 0.5279199923111901], "rank_score": 0.5695870474692366} -{"id": "acs-etal-2015-two", "title": "A Two-level Classifier for Discriminating Similar Languages", "abstract": "The BRUniBP team\u2019s submission is presented for the Discriminating between Similar Languages Shared Task 2015. Our method is a two phase classifier that utilizes both character and word-level features. The evaluation shows 100% accuracy on language group identification and 93.66% accuracy on language identification. The main contribution of the paper is a memory-efficient correlation based feature selection method.", "phrases": ["heli method", "variation", "previous vardial workshop"], "overall_score": 0.9166224517132149, "scores": [0.579865286205834, 0.5685255740750806, 0.5601977928723896], "rank_score": 0.5695295510511014} -{"id": "xu-etal-2021-stacked", "title": "Stacked Acoustic-and-Textual Encoding: Integrating the Pre-trained Models into Speech Translation Encoders", "abstract": "Encoder pre-training is promising in end-to-end Speech Translation (ST), given the fact that speech-to-translation data is scarce. But ST encoders are not simple instances of Automatic Speech Recognition (ASR) or Machine Translation (MT) encoders. For example, we find that ASR encoders lack the global context representation, which is necessary for translation, whereas MT encoders are not designed to deal with long but locally attentive acoustic sequences. In this work, we propose a Stacked Acoustic-and-Textual Encoding (SATE) method for speech translation. Our encoder begins with processing the acoustic sequence as usual, but later behaves more like an MT encoder for a global representation of the input sequence. In this way, it is straightforward to incorporate the pre-trained models into the system. Also, we develop an adaptor module to alleviate the representation inconsistency between the pre-trained ASR encoder and MT encoder, and develop a multi-teacher knowledge distillation method to preserve the pre-training knowledge. Experimental results on the LibriSpeech En-Fr and MuST-C En-De ST tasks show that our method achieves state-of-the-art BLEU scores of 18.3 and 25.2. To our knowledge, we are the first to develop an end-to-end ST system that achieves comparable or even better BLEU performance than the cascaded ST counterpart when large-scale ASR and MT data is available.", "phrases": ["end-to-end speech translation", "automatic speech recognition", "machine translation"], "overall_score": 0.9164249657457381, "scores": [0.6065902135337164, 0.5643538543207508, 0.5372764705068007], "rank_score": 0.5694068461204227} -{"id": "bannard-2007-measure", "title": "A Measure of Syntactic Flexibility for Automatically Identifying Multiword Expressions in Corpora", "abstract": "Natural languages contain many multi-word sequences that do not display the variety of syntactic processes we would expect given their phrase type, and consequently must be included in the lexicon as multiword units. This paper describes a method for identifying such items in corpora, focussing on English verb-noun combinations. In an evaluation using a set of dictionary-published MWEs we show that our method achieves greater accuracy than existing MWE extraction methods based on lexical association.", "phrases": ["multiword expression", "verb-noun combination", "mwe", "syntactic fixedness", "english vnic"], "overall_score": 1.5021374236681522, "scores": [0.6091920538416045, 0.6040798441741526, 0.5577365054850973, 0.5453878448317492, 0.5295772112268088], "rank_score": 0.5691946919118824} -{"id": "suzuki-etal-2003-hierarchical", "title": "Hierarchical Directed Acyclic Graph Kernel: Methods for Structured Natural Language Data", "abstract": "This paper proposes the \"Hierarchical Directed Acyclic Graph (HDAG) Kernel\" for structured natural language data. The HDAG Kernel directly accepts several levels of both chunks and their relations, and then efficiently computes the weighed sum of the number of common attribute sequences of the HDAGs. We applied the proposed method to question classification and sentence alignment tasks to evaluate its performance as a similarity measure and a kernel function. The results of the experiments demonstrate that the HDAG Kernel is superior to other kernel functions and baseline methods.", "phrases": ["kernel", "natural language data", "hdag"], "overall_score": 0.7890634153533453, "scores": [0.6119698125315901, 0.5622779099935397, 0.5333190918906396], "rank_score": 0.5691889381385898} -{"id": "weerkamp-de-rijke-2008-credibility", "title": "Credibility Improves Topical Blog Post Retrieval", "abstract": "Topical blog post retrieval is the task of ranking blog posts with respect to their relevance for a given topic. To improve topical blog post retrieval we incorporate textual credibility indicators in the retrieval process. We consider two groups of indicators: post level (determined using information about individual blog posts only) and blog level (determined using information from the underlying blogs). We describe how to estimate these indicators and how to integrate them into a retrieval approach based on language models. Experiments on the TREC Blog track test set show that both groups of credibility indicators significantly improve retrieval effectiveness; the best performance is achieved when combining them.", "phrases": ["blog post retrieval", "credibility indicator", "personal pronoun", "text length", "capital"], "overall_score": 0.7887994755388803, "scores": [0.6515569433708468, 0.5836885570088693, 0.5542774245689083, 0.5343094900718626, 0.5211603140189076], "rank_score": 0.5689985458078789} -{"id": "borchert-etal-2020-ggponc", "title": "GGPONC: A Corpus of German Medical Text with Rich Metadata Based on Clinical Practice Guidelines", "abstract": "The lack of publicly accessible text corpora is a major obstacle for progress in natural language processing. For medical applications, unfortunately, all language communities other than English are low-resourced. In this work, we present GGPONC (German Guideline Program in Oncology NLP Corpus), a freely dis tributable German language corpus based on clinical practice guidelines for oncology. This corpus is one of the largest ever built from German medical documents. Unlike clinical documents, clinical guidelines do not contain any patient-related information and can therefore be used without data protection restrictions. Moreover, GGPONC is the first corpus for the German language covering diverse conditions in a large medical subfield and provides a variety of metadata, such as literature references and evidence levels. By applying and evaluating existing medical information extraction pipelines for German text, we are able to draw comparisons for the use of medical language to other corpora, medical and non-medical ones.", "phrases": ["oncology nlp corpus", "german language", "clinical guideline"], "overall_score": 0.6250384720648048, "scores": [0.5859977646487445, 0.5754495199782651, 0.5453563216458542], "rank_score": 0.568934535424288} -{"id": "zhu-etal-2010-imposing", "title": "Imposing Hierarchical Browsing Structures onto Spoken Documents", "abstract": "This paper studies the problem of imposing a known hierarchical structure onto an unstructured spoken document, aiming to help browse such archives. We formulate our solutions within a dynamic-programming-based alignment framework and use minimum error-rate training to combine a number of global and hierarchical constraints. This pragmatic approach is computationally efficient. Results show that it outperforms a baseline that ignores the hierarchical and global features and the improvement is consistent on transcripts with different WERs. Directly imposing such hierarchical structures onto raw speech without using transcripts yields competitive results.", "phrases": ["hierarchical structure", "transcript", "pre-order walk"], "overall_score": 0.788706118165084, "scores": [0.6101601833502226, 0.5508620960168906, 0.545771328726214], "rank_score": 0.5689312026977758} -{"id": "singh-husain-2005-comparison", "title": "Comparison, Selection and Use of Sentence Alignment Algorithms for New Language Pairs", "abstract": "Several algorithms are available for sentence alignment, but there is a lack of systematic evaluation and comparison of these algorithms under different conditions. In most cases, the factors which can significantly affect the performance of a sentence alignment algorithm have not been considered while evaluating. We have used a method for evaluation that can give a better estimate about a sentence alignment algorithm's performance, so that the best one can be selected. We have compared four approaches using this method. These have mostly been tried on European language pairs. We have evaluated manually-checked and validated English-Hindi aligned parallel corpora under different conditions. We also suggest some guidelines on actual alignment.", "phrases": ["sentence alignment algorithm", "systematic evaluation", "condition"], "overall_score": 0.9153610682778484, "scores": [0.6396580355374882, 0.5336836949254007, 0.5328956979076106], "rank_score": 0.5687458094568333} -{"id": "clark-curran-2006-partial", "title": "Partial Training for a Lexicalized-Grammar Parser", "abstract": "We propose a solution to the annotation bottleneck for statistical parsing, by exploiting the lexicalized nature of Combinatory Categorial Grammar (CCG). The parsing model uses predicate-argument dependencies for training, which are derived from sequences of CCG lexical categories rather than full derivations. A simple method is used for extracting dependencies from lexical category sequences, resulting in high precision, yet incomplete and noisy data. The dependency parsing model of Clark and Curran (2004b) is extended to exploit this partial training data. Remarkably, the accuracy of the parser trained on data derived from category sequences alone is only 1.3% worse in terms of F-score than the parser trained on complete dependency structures.", "phrases": ["lexical category", "partial training data", "complete dependency structure"], "overall_score": 0.9147364939706052, "scores": [0.5690996431026166, 0.5686631523306902, 0.5673104234282375], "rank_score": 0.5683577396205147} -{"id": "hu-etal-2021-explicit", "title": "Explicit Alignment Objectives for Multilingual Bidirectional Encoders", "abstract": "Pre-trained cross-lingual encoders such as mBERT (Devlin et al., 2019) and XLM-R (Conneau et al., 2020) have proven impressively effective at enabling transfer-learning of NLP systems from high-resource languages to low-resource languages. This success comes despite the fact that there is no explicit objective to align the contextual embeddings of words/sentences with similar meanings across languages together in the same space. In this paper, we present a new method for learning multilingual encoders, AMBER (Aligned Multilingual Bidirectional EncodeR). AMBER is trained on additional parallel data using two explicit alignment objectives that align the multilingual representations at different granularities. We conduct experiments on zero-shot cross-lingual transfer learning for different tasks including sequence tagging, sentence retrieval and sentence classification. Experimental results on the tasks in the XTREME benchmark (Hu et al., 2020) show that AMBER obtains gains of up to 1.1 average F1 score on sequence tagging and up to 27.3 average accuracy on retrieval over the XLM-R-large model which has 3.2x the parameters of AMBER. Our code and models are available at .", "phrases": ["multilingual encoder", "parallel data", "backward attention matrix"], "overall_score": 1.1814310933779733, "scores": [0.6328969525422956, 0.5506087124007785, 0.5209391146253539], "rank_score": 0.5681482598561427} -{"id": "zalmout-etal-2016-analysis", "title": "Analysis of Foreign Language Teaching Methods: An Automatic Readability Approach", "abstract": "Much research in education has been done on the study of different language teaching methods. However, there has been little investigation using computational analysis to compare such methods in terms of readability or complexity progression. In this paper, we make use of existing readability scoring techniques and our own classifiers to analyze the textbooks used in two very different teaching methods for English as a Second Language \u2013 the grammar-based and the communicative methods. Our analysis indicates that the grammar-based curriculum shows a more coherent readability progression compared to the communicative curriculum. This finding corroborates with the expectations about the differences between these two methods and validates our approach's value in comparing different teaching methods quantitatively.", "phrases": ["teaching method", "readability scoring technique", "second language"], "overall_score": 0.9143645953119861, "scores": [0.5874643158017092, 0.5670524125554801, 0.549863269618207], "rank_score": 0.5681266659917988} -{"id": "erk-pado-2010-exemplar", "title": "Exemplar-Based Models for Word Meaning in Context", "abstract": "This paper describes ongoing work on distributional models for word meaning in context. We abandon the usual one-vector-per-word paradigm in favor of an exemplar model that activates only relevant occurrences. On a paraphrasing task, we find that a simple exemplar model outperforms more complex state-of-the-art models.", "phrases": ["occurrence", "target word", "contextual information"], "overall_score": 1.6727716422420187, "scores": [0.5998637523337793, 0.5733885729203593, 0.5310842095606267], "rank_score": 0.5681121782715884} -{"id": "iglesias-etal-2009-rule", "title": "Rule Filtering by Pattern for Efficient Hierarchical Translation", "abstract": "We describe refinements to hierarchical translation search procedures intended to reduce both search errors and memory usage through modifications to hypothesis expansion in cube pruning and reductions in the size of the rule sets used in translation. Rules are put into syntactic classes based on the number of non-terminals and the pattern, and various filtering strategies are then applied to assess the impact on translation speed and quality. Results are reported on the 2008 NIST Arabic-to-English evaluation task.", "phrases": ["modification", "rule set", "syntactic class", "non-terminal", "maximum phrase jump"], "overall_score": 1.105007513890937, "scores": [0.5948120688358375, 0.5771756932808545, 0.5617555670849296, 0.5588941872569998, 0.5466701320147382], "rank_score": 0.5678615296946719} -{"id": "kolluru-etal-2020-imojie", "title": "IMoJIE: Iterative Memory-Based Joint Open Information Extraction", "abstract": "While traditional systems for Open Information Extraction were statistical and rule-based, recently neural models have been introduced for the task. Our work builds upon CopyAttention, a sequence generation OpenIE model (Cui et. al. 18). Our analysis reveals that CopyAttention produces a constant number of extractions per sentence, and its extracted tuples often express redundant information. We present IMoJIE, an extension to CopyAttention, which produces the next extraction conditioned on all previously extracted tuples. This approach overcomes both shortcomings of CopyAttention, resulting in a variable number of diverse extractions per sentence. We train IMoJIE on training data bootstrapped from extractions of several non-neural systems, which have been automatically filtered to reduce redundancy and noise. IMoJIE outperforms CopyAttention by about 18 F1 pts, and a BERT-based strong baseline by 2 F1 pts, establishing a new state of the art for the task.", "phrases": ["open information extraction", "art", "seq2seq architecture"], "overall_score": 1.3065669947042333, "scores": [0.625641662011487, 0.5391703405479844, 0.53749250555142], "rank_score": 0.5674348360369638} -{"id": "lewis-steedman-2013-combined", "title": "Combined Distributional and Logical Semantics", "abstract": "We introduce a new approach to semantics which combines the benefits of distributional and formal logical semantics. Distributional models have been successful in modelling the meanings of content words, but logical semantics is necessary to adequately represent many function words. We follow formal semantics in mapping language to logical representations, but differ in that the relational constants used are induced by offline distributional clustering at the level of predicate-argument structure. Our clustering algorithm is highly scalable, allowing us to run on corpora the size of Gigaword. Different senses of a word are disambiguated based on their induced types. We outperform a variety of existing approaches on a wide-coverage question answering task, and demonstrate the ability to make complex multi-sentence inferences involving quantifiers on the FraCaS suite.", "phrases": ["content word", "relational constant", "distributional clustering"], "overall_score": 1.6704460400069159, "scores": [0.6014157837903044, 0.5583377596454234, 0.5422135054584013], "rank_score": 0.5673223496313763} -{"id": "chandu-etal-2017-tackling", "title": "Tackling Biomedical Text Summarization: OAQA at BioASQ 5B", "abstract": "In this paper, we describe our participation in phase B of task 5b of the fifth edition of the annual BioASQ challenge, which includes answering factoid, list, yes-no and summary questions from biomedical data. We describe our techniques with an emphasis on ideal answer generation, where the goal is to produce a relevant, precise, non-redundant, query-oriented summary from multiple relevant documents. We make use of extractive summarization techniques to address this task and experiment with different biomedical ontologies and various algorithms including agglomerative clustering, Maximum Marginal Relevance (MMR) and sentence compression. We propose a novel word embedding based tf-idf similarity metric and a soft positional constraint which improve our system performance. We evaluate our techniques on test batch 4 from the fourth edition of the challenge. Our best system achieves a ROUGE-2 score of 0.6534 and ROUGE-SU4 score of 0.6536.", "phrases": ["edition", "bioasq challenge", "extractive summarization technique"], "overall_score": 1.0156430586189888, "scores": [0.6179527314293636, 0.5545462186022403, 0.5280246013632031], "rank_score": 0.566841183798269} -{"id": "liu-etal-2014-iterative", "title": "An Iterative Link-based Method for Parallel Web Page Mining", "abstract": "Identifying parallel web pages from bilingual web sites is a crucial step of bilingual resource construction for crosslingual information processing. In this paper, we propose a link-based approach to distinguish parallel web pages from bilingual web sites. Compared with the existing methods, which only employ the internal translation similarity (such as content-based similarity and page structural similarity), we hypothesize that the external translation similarity is an effective feature to identify parallel web pages. Within a bilingual web site, web pages are interconnected by hyperlinks. The basic idea of our method is that the translation similarity of two pages can be inferred from their neighbor pages, which can be adopted as an important source of external similarity. Thus, the translation similarity of page pairs will influence each other. An iterative algorithm is developed to estimate the external translation similarity and the final translation similarity. Both internal and external similarity measures are combined in the iterative algorithm. Experiments on six bilingual websites demonstrate that our method is effective and obtains significant improvement (6.2% F-Score) over the baseline which only utilizes internal translation similarity.", "phrases": ["parallel web page", "link-based approach", "content-based similarity"], "overall_score": 0.6227200656498878, "scores": [0.6073722199067427, 0.5573106739293713, 0.535789799050388], "rank_score": 0.5668242309621673} -{"id": "gutierrez-etal-2016-finding", "title": "Finding Non-Arbitrary Form-Meaning Systematicity Using String-Metric Learning for Kernel Regression", "abstract": "Arbitrariness of the sign\u2014the notion that the forms of words are unrelated to their meanings\u2014is an underlying assumption of many linguistic theories. Two lines of research have recently challenged this assumption, but they produce differing characterizations of non-arbitrariness in language. Behavioral and corpus studies have con\ufb01rmed the validity of localized form-meaning patterns manifested in limited subsets of the lexicon. Meanwhile, global (lexicon-wide) statistical analyses instead \ufb01nd diffuse form-meaning system-aticity across the lexicon as a whole. We bridge the gap with an approach that can detect both local and global form-meaning systematicity in language. In the kernel regression formulation we introduce, form-meaning relationships can be used to predict words\u2019 distributional semantic vectors from their forms. Furthermore, we introduce a novel metric learning algorithm that can learn weighted edit distances that minimize kernel regression error. Our results suggest that the English lexicon exhibits far more global form-meaning systematicity than previously discovered, and that much of this systematicity is focused in localized form-meaning patterns.", "phrases": ["systematicity", "characterization", "form-meaning association"], "overall_score": 0.7857545551085847, "scores": [0.5791856578860811, 0.5747567535548235, 0.5464638885756984], "rank_score": 0.5668021000055344} -{"id": "ture-etal-2012-encouraging", "title": "Encouraging Consistent Translation Choices", "abstract": "It has long been observed that monolingual text exhibits a tendency toward \"one sense per discourse,\" and it has been argued that a related \"one translation per discourse\" constraint is operative in bilingual contexts as well. In this paper, we introduce a novel method using forced decoding to confirm the validity of this constraint, and we demonstrate that it can be exploited in order to improve machine translation quality. Three ways of incorporating such a preference into a hierarchical phrase-based MT model are proposed, and the approach where all three are combined yields the greatest improvements for both Arabic-English and Chinese-English translation experiments.", "phrases": ["discourse", "consistency constraint", "counting feature"], "overall_score": 0.9121306101101927, "scores": [0.5861619496300636, 0.5794961798588022, 0.5345577093390403], "rank_score": 0.5667386129426354} -{"id": "strubell-etal-2017-fast", "title": "Fast and Accurate Entity Recognition with Iterated Dilated Convolutions", "abstract": "Today when many practitioners run basic NLP on the entire web and large-volume traffic, faster methods are paramount to saving time and energy costs. Recent advances in GPU hardware have led to the emergence of bi-directional LSTMs as a standard method for obtaining per-token vector representations serving as input to labeling tasks such as NER (often followed by prediction in a linear-chain CRF). Though expressive and accurate, these models fail to fully exploit GPU parallelism, limiting their computational efficiency. This paper proposes a faster alternative to Bi-LSTMs for NER: Iterated Dilated Convolutional Neural Networks (ID-CNNs), which have better capacity than traditional CNNs for large context and structured prediction. Unlike LSTMs whose sequential processing on sentences of length N requires O(N) time even in the face of parallelism, ID-CNNs permit fixed-depth convolutions to run in parallel across entire documents. We describe a distinct combination of network structure, parameter sharing and training procedures that enable dramatic 14-20x test-time speedups while retaining accuracy comparable to the Bi-LSTM-CRF. Moreover, ID-CNNs trained to aggregate context from the entire document are more accurate than Bi-LSTM-CRFs while attaining 8x faster test time speeds.", "phrases": ["entity recognition", "convolution", "bi-lstms"], "overall_score": 1.637595042141582, "scores": [0.6086540526270255, 0.5599970443006158, 0.5310559089284669], "rank_score": 0.5665690019520361} -{"id": "lewis-steedman-2013-unsupervised", "title": "Unsupervised Induction of Cross-Lingual Semantic Relations", "abstract": "Creating a language-independent meaning representation would benefit many crosslingual NLP tasks. We introduce the first unsupervised approach to this problem, learning clusters of semantically equivalent English and French relations between referring expressions, based on their named-entity arguments in large monolingual corpora. The clusters can be used as language-independent semantic relations, by mapping clustered expressions in different languages onto the same relation. Our approach needs no parallel text for training, but outperforms a baseline that uses machine translation on a cross-lingual question answering task. We also show how to use the semantics to improve the accuracy of machine translation, by using it in a simple reranker.", "phrases": ["semantic relation", "unsupervised approach", "equivalent english"], "overall_score": 1.1024688957359718, "scores": [0.6008524979233432, 0.5568111367856551, 0.5420071793897784], "rank_score": 0.5665569380329255} -{"id": "kaplan-etal-2004-speed", "title": "Speed and Accuracy in Shallow and Deep Stochastic Parsing", "abstract": "Abstract : This paper reports some experiments that Compare the accuracy and performance of two stochastic parsing systems. The currently popular Collins parser is a shallow parser whose output contains more detailed semantically relevant information than other such parsers. The XLE parser is a deep-parsing system that couples a Lexical Functional Grammar to a log- linear disambiguation component and provides much richer representations theory. We measured the accuracy of both systems against a gold standard of the PARC 700 dependency bank, and also measured their processing times. We found the deep-parsing system to be more accurate than the Collins parser with only a slight reduction in parsing speed.", "phrases": ["such parser", "parc", "reduction"], "overall_score": 1.5334732102573962, "scores": [0.5997928561516094, 0.554419712710746, 0.5445815040465445], "rank_score": 0.5662646909696333} -{"id": "gemechu-reed-2019-decompositional", "title": "Decompositional Argument Mining: A General Purpose Approach for Argument Graph Construction", "abstract": "This work presents an approach decomposing propositions into four functional components and identify the patterns linking those components to determine argument structure. The entities addressed by a proposition are target concepts and the features selected to make a point about the target concepts are aspects. A line of reasoning is followed by providing evidence for the points made about the target concepts via aspects. Opinions on target concepts and opinions on aspects are used to support or attack the ideas expressed by target concepts and aspects. The relations between aspects, target concepts, opinions on target concepts and aspects are used to infer the argument relations. Propositions are connected iteratively to form a graph structure. The approach is generic in that it is not tuned for a specific corpus and evaluated on three different corpora from the literature: AAEC, AMT, US2016G1tv and achieved an F score of 0.79, 0.77 and 0.64, respectively.", "phrases": ["argument mining", "functional component", "opinion"], "overall_score": 1.1774654668722786, "scores": [0.6315243512251437, 0.5382531521772541, 0.5289460864722467], "rank_score": 0.5662411966248815} -{"id": "xiong-etal-2018-session", "title": "Session-level Language Modeling for Conversational Speech", "abstract": "We propose to generalize language models for conversational speech recognition to allow them to operate across utterance boundaries and speaker changes, thereby capturing conversation-level phenomena such as adjacency pairs, lexical entrainment, and topical coherence. The model consists of a long-short-term memory (LSTM) recurrent network that reads the entire word-level history of a conversation, as well as information about turn taking and speaker overlap, in order to predict each next word. The model is applied in a rescoring framework, where the word history prior to the current utterance is approximated with preliminary recognition results. In experiments in the conversational telephone speech domain (Switchboard) we find that such a model gives substantial perplexity reductions over a standard LSTM-LM with utterance scope, as well as improvements in word error rate.", "phrases": ["language model", "conversation", "history"], "overall_score": 0.9111623747562907, "scores": [0.5877818199741894, 0.5867336637144855, 0.5238955597884661], "rank_score": 0.5661370144923804} -{"id": "genzel-2010-automatically", "title": "Automatically Learning Source-side Reordering Rules for Large Scale Machine Translation", "abstract": "We describe an approach to automatically learn reordering rules to be applied as a preprocessing step in phrase-based machine translation. We learn rules for 8 different language pairs, showing BLEU improvements for all of them, and demonstrate that many important order transformations (SVO to SOV or VSO, head-modifier, verb movement) can be captured by this approach.", "phrases": ["machine translation", "pre-processing step", "position"], "overall_score": 1.695493252388658, "scores": [0.6212746472378579, 0.5443964550955309, 0.5322375532794245], "rank_score": 0.5659695518709378} -{"id": "hopkins-may-2013-models", "title": "Models of Translation Competitions", "abstract": "What do we want to learn from a translation competition and how do we learn it with confidence? We argue that a disproportionate focus on ranking competition participants has led to lots of different rankings, but little insight about which rankings we should trust. In response, we provide the first framework that allows an empirical comparison of different analyses of competition results. We then use this framework to compare several analytical models on data from the Workshop on Machine Translation (WMT).", "phrases": ["ranking", "workshop", "wmt"], "overall_score": 0.6217579714688564, "scores": [0.5726251168887049, 0.570228569779653, 0.5549917986282911], "rank_score": 0.565948495098883} -{"id": "amancio-specia-2014-analysis", "title": "An Analysis of Crowdsourced Text Simplifications", "abstract": "We present a study on the text simplification operations undertaken collaboratively by Simple English Wikipedia contributors. The aim is to understand whether a complex-simple parallel corpus involving this version of Wikipedia is appropriate as data source to induce simplification rules, and whether we can automatically categorise the different operations performed by humans. A subset of the corpus was first manually analysed to identify its transformation operations. We then built machine learning models to attempt to automatically classify segments based on such transformations. This classification could be used, e.g., to filter out potentially noisy transformations. Our results show that the most common transformation operations performed by humans are paraphrasing (39.80%) and drop of information (26.76%), which are some of the most difficult operations to generalise from data. They are also the most difficult operations to identify automatically, with the lowest overall classifier accuracy among all operations (73% and 59%, respectively).", "phrases": ["text simplification", "wikipedia", "reason"], "overall_score": 1.100197874592408, "scores": [0.6306331085434592, 0.5359964969521342, 0.5295399865996904], "rank_score": 0.5653898640317613} -{"id": "sassano-2004-linear", "title": "Linear-Time Dependency Analysis for Japanese", "abstract": "We present a novel algorithm for Japanese dependency analysis. The algorithm allows us to analyze dependency structures of a sentence in linear-time while keeping a state-of-the-art accuracy. In this paper, we show a formal description of the algorithm and discuss it theoretically with respect to time complexity. In addition, we evaluate its efficiency and performance empirically against the Kyoto University Corpus. The proposed algorithm with improved models for dependency yields the best accuracy in the previously published results on the Kyoto University Corpus.", "phrases": ["dependency analyzer", "coordinate structure", "sassano"], "overall_score": 1.241909809991181, "scores": [0.5703123148619714, 0.5695746392601971, 0.555765583357814], "rank_score": 0.5652175124933275} -{"id": "laddha-mukherjee-2016-extracting", "title": "Extracting Aspect Specific Opinion Expressions", "abstract": "Opinionated expression extraction is a central problem in \ufb01ne-grained sentiment analysis. Most existing works focus on either generic subjective expression or aspect expression extraction. However, in opinion mining, it is often desirable to mine the aspect speci\ufb01c opinion expressions (or aspect-sentiment phrases) containing both the aspect and the opinion. This paper proposes a hybrid generative-discriminative framework for extracting such expressions. The hybrid model consists of (i) an unsupervised generative component for modeling the semantic coherence of terms (words/phrases) based on their collocations across different documents, and (ii) a supervised discriminative sequence modeling component for opinion phrase extraction. Experimental results using Amazon.com reviews demonstrate the effectiveness of the approach that signi\ufb01cantly outperforms several state-of-the-art baselines.", "phrases": ["aspect term", "asc task", "position"], "overall_score": 0.62077056539028, "scores": [0.6089089235756308, 0.5436860472561297, 0.5425541872288987], "rank_score": 0.565049719353553} -{"id": "dehouck-etal-2020-efficient", "title": "Efficient EUD Parsing", "abstract": "We present the system submission from the FASTPARSE team for the EUD Shared Task at IWPT 2020. We engaged with the task by focusing on efficiency. For this we considered training costs and inference efficiency. Our models are a combination of distilled neural dependency parsers and a rule-based system that projects UD trees into EUD graphs. We obtained an average ELAS of 74.04 for our official submission, ranking 4th overall.", "phrases": ["dependency parser", "rule-based system", "eud graph"], "overall_score": 1.0995258538570587, "scores": [0.606326955682442, 0.5655538837539144, 0.5232527016331254], "rank_score": 0.5650445136898272} -{"id": "yao-etal-2021-adapt", "title": "Adapt-and-Distill: Developing Small, Fast and Effective Pretrained Language Models for Domains", "abstract": "Large pre-trained models have achieved great success in many natural language processing tasks. However, when they are applied in specific domains, these models suffer from domain shift and bring challenges in fine-tuning and online serving for latency and capacity constraints. In this paper, we present a general approach to developing small, fast and effective pre-trained models for specific domains. This is achieved by adapting the off-the-shelf general pre-trained models and performing task-agnostic knowledge distillation in target domains. Specifically, we propose domain-specific vocabulary expansion in the adaptation stage and employ corpus level occurrence probability to choose the size of incremental vocabulary automatically. Then we systematically explore different strategies to compress the large pre-trained models for specific domains. We conduct our experiments in the biomedical and computer science domain. The experimental results demonstrate that our approach achieves better performance over the BERT BASE model in domain-specific tasks while 3.3x smaller and 5.1x faster than BERT BASE. The code and pre-trained models are available at https://aka.ms/adalm.", "phrases": ["adapt language model", "domain-specific downstream task", "performance improvement"], "overall_score": 0.7824870852086887, "scores": [0.5866217419443815, 0.5832845396630417, 0.5234290744779294], "rank_score": 0.5644451186951175} -{"id": "prabhu-etal-2019-sampling", "title": "Sampling Bias in Deep Active Classification: An Empirical Study", "abstract": "The exploding cost and time needed for data labeling and model training are bottlenecks for training DNN models on large datasets. Identifying smaller representative data samples with strategies like active learning can help mitigate such bottlenecks. Previous works on active learning in NLP identify the problem of sampling bias in the samples acquired by uncertainty-based querying and develop costly approaches to address it. Using a large empirical study, we demonstrate that active set selection using the posterior entropy of deep models like FastText.zip (FTZ) is robust to sampling biases and to various algorithmic choices (query size and strategies) unlike that suggested by traditional literature. We also show that FTZ based query strategy produces sample sets similar to those from more sophisticated approaches (e.g ensemble networks). Finally, we show the effectiveness of the selected samples by creating tiny high-quality datasets, and utilizing them for fast and cheap training of large models. Based on the above, we propose a simple baseline for deep active text classification that outperforms the state of the art. We expect the presented work to be useful and informative for dataset compression and for problems involving active, semi-supervised or online learning scenarios. Code and models are available at: .", "phrases": ["active learning", "text classification", "sample"], "overall_score": 0.9084344308259683, "scores": [0.5790631595293597, 0.5672915425685336, 0.5469714407889605], "rank_score": 0.5644420476289512} -{"id": "simon-etal-2019-unsupervised", "title": "Unsupervised Information Extraction: Regularizing Discriminative Approaches with Relation Distribution Losses", "abstract": "Unsupervised relation extraction aims at extracting relations between entities in text. Previous unsupervised approaches are either generative or discriminative. In a supervised setting, discriminative approaches, such as deep neural network classifiers, have demonstrated substantial improvement. However, these models are hard to train without supervision, and the currently proposed solutions are unstable. To overcome this limitation, we introduce a skewness loss which encourages the classifier to predict a relation with confidence given a sentence, and a distribution distance loss enforcing that all relations are predicted in average. These losses improve the performance of discriminative based models, and enable us to train deep neural networks satisfactorily, surpassing current state of the art on three different datasets.", "phrases": ["unsupervised relation extraction", "substantial improvement", "skewness loss", "openre model", "instability"], "overall_score": 1.352976399448851, "scores": [0.6438283919036996, 0.5612068240245673, 0.5435492952963497, 0.5371046009742675, 0.5354858048147199], "rank_score": 0.5642349834027207} -{"id": "zhang-etal-2003-hhmm", "title": "HHMM-based Chinese Lexical Analyzer ICTCLAS", "abstract": "This document presents the results from Inst. of Computing Tech., CAS in the ACL SIGHAN-sponsored First International Chinese Word Segmentation Bake-off. The authors introduce the unified HHMM-based frame of our Chinese lexical analyzer ICTCLAS and explain the operation of the six tracks. Then provide the evaluation results and give more analysis. Evaluation on ICTCLAS shows that its performance is competitive. Compared with other system, ICTCLAS has ranked top both in CTB and PK closed track. In PK open track, it ranks second position. ICTCLAS BIG5 version was transformed from GB version only in two days; however, it achieved well in two BIG5 closed tracks. Through the first bakeoff, we could learn more about the development in Chinese word segmentation and become more confident on our HHMM-based approach. At the same time, we really find our problems during the evaluation. The bakeoff is interesting and helpful.", "phrases": ["analyzer ictclas", "chinese word segmentation", "word-based generative model"], "overall_score": 1.2396291671723618, "scores": [0.607084379754541, 0.5549627130150931, 0.5304915488769274], "rank_score": 0.5641795472155205} -{"id": "xu-rosti-2010-combining", "title": "Combining Unsupervised and Supervised Alignments for MT: An Empirical Study", "abstract": "Word alignment plays a central role in statistical MT (SMT) since almost all SMT systems extract translation rules from word aligned parallel training data. While most SMT systems use unsupervised algorithms (e.g. GIZA++) for training word alignment, supervised methods, which exploit a small amount of human-aligned data, have become increasingly popular recently. This work empirically studies the performance of these two classes of alignment algorithms and explores strategies to combine them to improve overall system performance. We used two unsupervised aligners, GIZA++ and HMM, and one supervised aligner, ITG, in this study. To avoid language and genre specific conclusions, we ran experiments on test sets consisting of two language pairs (Chinese-to-English and Arabic-to-English) and two genres (newswire and weblog). Results show that the two classes of algorithms achieve the same level of MT performance. Modest improvements were achieved by taking the union of the translation grammars extracted from different alignments. Significant improvements (around 1.0 in BLEU) were achieved by combining outputs of different systems trained with different alignments. The improvements are consistent across languages and genres.", "phrases": ["aligner", "giza++", "system performance", "union"], "overall_score": 0.6197889842253648, "scores": [0.6057882934311996, 0.5658312712638731, 0.5585523486723412, 0.5264530693251022], "rank_score": 0.564156245673129} -{"id": "lyu-etal-2019-semantic", "title": "Semantic Role Labeling with Iterative Structure Refinement", "abstract": "Modern state-of-the-art Semantic Role Labeling (SRL) methods rely on expressive sentence encoders (e.g., multi-layer LSTMs) but tend to model only local (if any) interactions between individual argument labeling decisions. This contrasts with earlier work and also with the intuition that the labels of individual arguments are strongly interdependent. We model interactions between argument labeling decisions through iterative refinement. Starting with an output produced by a factorized model, we iteratively refine it using a refinement network. Instead of modeling arbitrary interactions among roles and words, we encode prior knowledge about the SRL problem by designing a restricted network architecture capturing non-local interactions. This modeling choice prevents overfitting and results in an effective model, outperforming strong factorized baseline models on all 7 CoNLL-2009 languages, and achieving state-of-the-art results on 5 of them, including English.", "phrases": ["refinement", "srl", "argument labeling decision", "baseline model"], "overall_score": 1.010761045121466, "scores": [0.5896521405842328, 0.5867483717802503, 0.5404972890205573, 0.5395681193602994], "rank_score": 0.5641164801863349} -{"id": "li-etal-2019-learning", "title": "Learning to Rank for Plausible Plausibility", "abstract": "Researchers illustrate improvements in contextual encoding strategies via resultant performance on a battery of shared Natural Language Understanding (NLU) tasks. Many of these tasks are of a categorical prediction variety: given a conditioning context (e.g., an NLI premise), provide a label based on an associated prompt (e.g., an NLI hypothesis). The categorical nature of these tasks has led to common use of a cross entropy log-loss objective during training. We suggest this loss is intuitively wrong when applied to plausibility tasks, where the prompt by design is neither categorically entailed nor contradictory given the context. Log-loss naturally drives models to assign scores near 0.0 or 1.0, in contrast to our proposed use of a margin-based loss. Following a discussion of our intuition, we describe a confirmation study based on an extreme, synthetically curated task derived from MultiNLI. We find that a margin-based loss leads to a more plausible model of plausibility. Finally, we illustrate improvements on the Choice Of Plausible Alternative (COPA) task through this change in loss.", "phrases": ["objective", "plausibility task", "margin-based loss"], "overall_score": 1.0105775465865983, "scores": [0.59973290838918, 0.5719199147130294, 0.5203893800099963], "rank_score": 0.5640140677040686} -{"id": "branavan-etal-2008-learning", "title": "Learning Document-Level Semantic Properties from Free-Text Annotations", "abstract": "This paper presents a new method for inferring the semantic properties of documents by leveraging free-text keyphrase annotations. Such annotations are becoming increasingly abundant due to the recent dramatic growth in semi-structured, user-generated online content. One especially relevant domain is product reviews, which are often annotated by their authors with pros/cons keyphrases such as \"a real bargain\" or \"good value.\" These annotations are representative of the underlying semantic properties; however, unlike expert annotations, they are noisy: lay authors may use different labels to denote the same property, and some labels may be missing. To learn using such noisy annotations, we find a hidden paraphrase structure which clusters the keyphrases. The paraphrase structure is linked with a latent topic model of the review texts, enabling the system to predict the properties of unannotated documents and to effectively aggregate the semantic properties of multiple reviews. Our approach is implemented as a hierarchical Bayesian model with joint inference. We find that joint inference increases the robustness of the keyphrase clustering and encourages the latent topics to correlate with semantically meaningful properties. Multiple evaluations demonstrate that our model substantially outperforms alternative approaches for summarizing single and multiple documents into a set of semantically salient keyphrases.", "phrases": ["semantic property", "topic model", "user annotation", "category information"], "overall_score": 1.238477241324568, "scores": [0.5926452732821742, 0.5787278073669625, 0.5557241859367567, 0.5275238660905341], "rank_score": 0.5636552831691068} -{"id": "buys-blunsom-2017-robust", "title": "Robust Incremental Neural Semantic Graph Parsing", "abstract": "Parsing sentences to linguistically-expressive semantic representations is a key goal of Natural Language Processing. Yet statistical parsing has focussed almost exclusively on bilexical dependencies or domain-specific logical forms. We propose a neural encoder-decoder transition-based parser which is the first full-coverage semantic graph parser for Minimal Recursion Semantics (MRS). The model architecture uses stack-based embedding features, predicting graphs jointly with unlexicalized predicates and their token alignments. Our parser is more accurate than attention-based baselines on MRS, and on an additional Abstract Meaning Representation (AMR) benchmark, and GPU batch processing makes it an order of magnitude faster than a high-precision grammar-based parser. Further, the 86.69% Smatch score of our MRS parser is higher than the upper-bound on AMR parsing, making MRS an attractive choice as a semantic representation.", "phrases": ["semantic representation", "transition-based parser", "eds"], "overall_score": 1.171855089628358, "scores": [0.5893928489529399, 0.5640872499713668, 0.5371494275230171], "rank_score": 0.5635431754824413} -{"id": "cohen-smith-2009-shared", "title": "Shared Logistic Normal Distributions for Soft Parameter Tying in Unsupervised Grammar Induction", "abstract": "We present a family of priors over probabilistic grammar weights, called the shared logistic normal distribution. This family extends the partitioned logistic normal distribution, enabling factored covariance between the probabilities of different derivation events in the probabilistic grammar, providing a new way to encode prior knowledge about an unknown grammar. We describe a variational EM algorithm for learning a probabilistic grammar based on this family of priors. We then experiment with unsupervised dependency grammar induction and show significant improvements using our model for both monolingual learning and bilingual learning with a non-parallel, multilingual corpus.", "phrases": ["logistic normal distribution", "new way", "unsupervised parsing", "pos tag"], "overall_score": 1.7419116156194454, "scores": [0.6169013939307161, 0.5830731731898591, 0.5283594990628941, 0.5258072365047753], "rank_score": 0.5635353256720611} -{"id": "ke-etal-2021-jointgt", "title": "JointGT: Graph-Text Joint Representation Learning for Text Generation from Knowledge Graphs", "abstract": "Existing pre-trained models for knowledge-graph-to-text (KG-to-text) generation simply fine-tune text-to-text pre-trained models such as BART or T5 on KG-to-text datasets, which largely ignore the graph structure during encoding and lack elaborate pre-training tasks to explicitly model graph-text alignments. To tackle these problems, we propose a graph-text joint representation learning model called JointGT. During encoding, we devise a structure-aware semantic aggregation module which is plugged into each Transformer layer to preserve the graph structure. Furthermore, we propose three new pre-training tasks to explicitly enhance the graph-text alignment including respective text / graph reconstruction, and graph-text alignment in the embedding space via Optimal Transport. Experiments show that JointGT obtains new state-of-the-art performance on various KG-to-text datasets.", "phrases": ["pre-training task", "graph-to-text generation", "structure-aware graph encoding"], "overall_score": 1.0096579935172265, "scores": [0.6143618976431628, 0.5485261577069315, 0.5276145107430288], "rank_score": 0.5635008553643744} -{"id": "jha-etal-2010-corpus", "title": "Corpus Creation for New Genres: A Crowdsourced Approach to PP Attachment", "abstract": "This paper explores the task of building an accurate prepositional phrase attachment corpus for new genres while avoiding a large investment in terms of time and money by crowd-sourcing judgments. We develop and present a system to extract prepositional phrases and their potential attachments from ungrammatical and informal sentences and pose the subsequent disambiguation tasks as multiple choice questions to workers from Amazon's Mechanical Turk service. Our analysis shows that this two-step approach is capable of producing reliable annotations on informal and potentially noisy blog text, and this semi-automated strategy holds promise for similar annotation projects in new genres.", "phrases": ["phrase attachment corpus", "amazon", "crowdsourcing service"], "overall_score": 1.0094960736105765, "scores": [0.5919532092101876, 0.5551490427246424, 0.5431292064966385], "rank_score": 0.5634104861438228} -{"id": "oh-etal-2021-surprisal", "title": "Surprisal Estimators for Human Reading Times Need Character Models", "abstract": "While the use of character models has been popular in NLP applications, it has not been explored much in the context of psycholinguistic modeling. This paper presents a character model that can be applied to a structural parser-based processing model to calculate word generation probabilities. Experimental results show that surprisal estimates from a structural processing model using this character model deliver substantially better fits to self-paced reading, eye-tracking, and fMRI data than those from large-scale language models trained on much more data. This may suggest that the proposed processing model provides a more humanlike account of sentence processing, which assumes a larger role of morphology, phonotactics, and orthographic complexity than was previously thought.", "phrases": ["self-paced reading", "sentence processing", "morphology", "orthographic complexity", "large role"], "overall_score": 0.7807878664566712, "scores": [0.6016241755985009, 0.5605797894343614, 0.5597148229600546, 0.5508331756628585, 0.543344993652509], "rank_score": 0.5632193914616569} -{"id": "kim-baldwin-2006-automatic", "title": "Automatic Identification of English Verb Particle Constructions using Linguistic Features", "abstract": "This paper presents a method for identifying token instances of verb particle constructions (VPCs) automatically, based on the output of the RASP parser. The proposed method pools together instances of VPCs and verb-PPs from the parser output and uses the sentential context of each such instance to differentiate VPCs from verb-PPs. We show our technique to perform at an F-score of 97.4% at identifying VPCs in Wall Street Journal and Brown Corpus data taken from the Penn Tree-bank.", "phrases": ["sentential context", "verb-particle construction", "semantic information"], "overall_score": 0.9064598814546262, "scores": [0.6268742888151239, 0.5344646684286402, 0.528306616129807], "rank_score": 0.5632151911245237} -{"id": "ma-etal-2014-punctuation", "title": "Punctuation Processing for Projective Dependency Parsing", "abstract": "Modern statistical dependency parsers as- sign lexical heads to punctuations as well as words. Punctuation parsing errors lead to low parsing accuracy on words. In this work, we propose an alternative approach to addressing punctuation in dependency parsing. Rather than assigning lexical heads to punctuations, we treat punctu- ations as properties of their neighbour- ing words, used as features to guide the parser to build the dependency graph. In- tegrating our method with an arc-standard parser yields a 93.06% unlabelled attach- ment score, which is the best accuracy by a single-model transition-based parser re- ported so far.", "phrases": ["transition-based parser", "punctuation", "neighboring word"], "overall_score": 0.9063717552810361, "scores": [0.6081991767728931, 0.5411898901848331, 0.5400922388049428], "rank_score": 0.5631604352542231} -{"id": "rotman-reichart-2019-deep", "title": "Deep Contextualized Self-training for Low Resource Dependency Parsing", "abstract": "Neural dependency parsing has proven very effective, achieving state-of-the-art results on numerous domains and languages. Unfortunately, it requires large amounts of labeled data, which is costly and laborious to create. In this paper we propose a self-training algorithm that alleviates this annotation bottleneck by training a parser on its own output. Our Deep Contextualized Self-training (DCST) algorithm utilizes representation models trained on sequence labeling tasks that are derived from the parser's output when applied to unlabeled data, and integrates these models with the base parser through a gating mechanism. We conduct experiments across multiple languages, both in low resource in-domain and in cross-domain setups, and demonstrate that DCST substantially outperforms traditional self-training as well as recent semi-supervised training methods.1", "phrases": ["sequence labeling task", "base parser", "deep contextualized self-training"], "overall_score": 1.0089780384810108, "scores": [0.6005429675056174, 0.5506527952832816, 0.5381683329103573], "rank_score": 0.5631213652330854} -{"id": "dras-2015-squibs", "title": "Squibs: Evaluating Human Pairwise Preference Judgments", "abstract": "Human evaluation plays an important role in NLP, often in the form of preference judgments. Although there has been some use of classical non-parametric and bespoke approaches to evaluating these sorts of judgments, there is an entire body of work on this in the context of sensory discrimination testing and the human judgments that are central to it, backed by rigorous statistical theory and freely available software, that NLP can draw on. We investigate one approach, Log-Linear Bradley-Terry models, and apply it to sample NLP data.", "phrases": ["formulation", "irt", "bayesian model"], "overall_score": 0.9062752287503556, "scores": [0.5674240391047919, 0.5650393906556844, 0.5568379500853219], "rank_score": 0.5631004599485995} -{"id": "oflazer-durgar-el-kahlout-2007-exploring", "title": "Exploring Different Representational Units in English-to-Turkish Statistical Machine Translation", "abstract": "We investigate different representational granularities for sub-lexical representation in statistical machine translation work from English to Turkish. We find that (i) representing both Turkish and English at the morpheme-level but with some selective morpheme-grouping on the Turkish side of the training data, (ii) augmenting the training data with \"sentences\" comprising only the content words of the original training data to bias root word alignment, (iii) reranking the n-best morpheme-sequence outputs of the decoder with a word-based language model, and (iv) using model iteration all provide a non-trivial improvement over a fully word-based baseline. Despite our very limited training data, we improve from 20.22 BLEU points for our simplest model to 25.08 BLEU points for an improvement of 4.86 points or 24% relative.", "phrases": ["sub-lexical representation", "machine translation work", "spe"], "overall_score": 1.444047403706332, "scores": [0.6066318588435226, 0.5623787593995285, 0.5199670602115457], "rank_score": 0.5629925594848656} -{"id": "babko-malaya-etal-2004-proposition", "title": "Proposition Bank II: Delving Deeper", "abstract": "The PropBank project is creating a corpus of text annotated with information about basic semantic propositions. PropBank I (Kingsbury & Palmer, 2002) added a layer of predicateargument information, or semantic roles, to the syntactic structures of the English Penn Treebank. This paper presents an overview of the second phase of PropBank Annotation, PropBank II, which is being applied to English and Chinese, and includes (Neodavidsonian) eventuality variables, nominal references, sense tagging, and connections to the Penn Discourse Treebank (PDTB), a project for annotating discourse connectives and their arguments.", "phrases": ["propbank", "project", "discourse connective"], "overall_score": 0.7803536496653372, "scores": [0.5689805325587961, 0.5639957377082512, 0.555742240500631], "rank_score": 0.5629061702558927} -{"id": "wu-etal-2010-complexity", "title": "Complexity Metrics in an Incremental Right-Corner Parser", "abstract": "Hierarchical HMM (HHMM) parsers make promising cognitive models: while they use a bounded model of working memory and pursue incremental hypotheses in parallel, they still achieve parsing accuracies competitive with chart-based techniques. This paper aims to validate that a right-corner HHMM parser is also able to produce complexity metrics, which quantify a reader's incremental difficulty in understanding a sentence. Besides defining standard metrics in the HHMM framework, a new metric, embedding difference, is also proposed, which tests the hypothesis that HHMM store elements represents syntactic working memory. Results show that HHMM surprisal outperforms all other evaluated metrics in predicting reading times, and that embedding difference makes a significant, independent contribution.", "phrases": ["complexity metric", "syntactic structure", "latency"], "overall_score": 1.0942907809746891, "scores": [0.5768442247088559, 0.5679193208249862, 0.5422991097063361], "rank_score": 0.5623542184133927} -{"id": "zhuo-etal-2016-segment", "title": "Segment-Level Sequence Modeling using Gated Recursive Semi-Markov Conditional Random Fields", "abstract": "Most of the sequence tagging tasks in natural language processing require to recognize segments with certain syntactic role or semantic meaning in a sentence. They are usually tackled with Conditional Random Fields (CRFs), which do indirect word-level modeling over word-level features and thus cannot make full use of segment-level information. Semi-Markov Conditional Random Fields (Semi-CRFs) model segments directly but extracting segment-level features for Semi-CRFs is still a very challenging problem. This paper presents Gated Recursive Semi-CRFs (grSemi-CRFs), which model segments directly and automatically learn segmentlevel features through a gated recursive convolutional neural network. Our experiments on text chunking and named entity recognition (NER) demonstrate that grSemi-CRFs generally outperform other neural models.", "phrases": ["modeling", "semi-crfs", "segment-level feature", "entity recognition"], "overall_score": 1.0938672607888036, "scores": [0.592873620134653, 0.557545383259737, 0.5536666266917517, 0.544460658281482], "rank_score": 0.5621365720919059} -{"id": "braud-etal-2017-syntax", "title": "Does syntax help discourse segmentation? Not so much", "abstract": "Discourse segmentation is the first step in building discourse parsers. Most work on discourse segmentation does not scale to real-world discourse parsing across languages, for two reasons: (i) models rely on constituent trees, and (ii) experiments have relied on gold standard identification of sentence and token boundaries. We therefore investigate to what extent constituents can be replaced with universal dependencies, or left out completely, as well as how state-of-the-art segmenters fare in the absence of sentence boundaries. Our results show that dependency information is less useful than expected, but we provide a fully scalable, robust model that only relies on part-of-speech information, and show that it performs well across languages in the absence of any gold-standard annotation.", "phrases": ["discourse segmentation", "dependency information", "part-of-speech information", "pos tag"], "overall_score": 1.0070367330473315, "scores": [0.6480176291296146, 0.5520537385473849, 0.5259545650511728, 0.522125675436497], "rank_score": 0.5620379020411673} -{"id": "mackay-kondrak-2005-computing", "title": "Computing Word Similarity and Identifying Cognates with Pair Hidden Markov Models", "abstract": "We present a system for computing similarity between pairs of words. Our system is based on Pair Hidden Markov Models, a variation on Hidden Markov Models that has been used successfully for the alignment of biological sequences. The parameters of the model are automatically learned from training data that consists of word pairs known to be similar. Our tests focus on the identification of cognates --- words of common origin in related languages. The results show that our system outperforms previously proposed techniques.", "phrases": ["cognate", "identification", "language study"], "overall_score": 1.0061003451933952, "scores": [0.5787946546995679, 0.5669428870432817, 0.5388083403450863], "rank_score": 0.561515294029312} -{"id": "dehdari-etal-2016-bira", "title": "BIRA: Improved Predictive Exchange Word Clustering", "abstract": "Word clusters are useful for many NLP tasks including training neural network language models, but current increases in datasets are outpacing the ability of word clusterers to handle them. Little attention has been paid thus far on inducing high-quality word clusters at a large scale. The predictive exchange algorithm is quite scalable, but sometimes does not provide as good perplexity as other slower clustering algorithms. We introduce the bidirectional, interpolated, refining, and alternating (BIRA) predictive exchange algorithm. It improves upon the predictive exchange algorithm\u2019s perplexity by up to 18%, giving it perplexities comparable to the slower two-sided exchange algorithm, and better perplexities than the slower Brown clustering algorithm. Our BIRA implementation is fast, clustering a 2.5 billion token English News Crawl corpus in 3 hours. It also reduces machine translation training time while preserving translation quality. Our implementation is portable and freely available.", "phrases": ["cluster", "predictive exchange algorithm", "refining"], "overall_score": 0.7781923214587717, "scores": [0.5812833337936077, 0.5663382863848462, 0.5364196843612059], "rank_score": 0.56134710151322} -{"id": "zhang-chan-2009-dependency", "title": "Dependency Parsing with Energy-based Reinforcement Learning", "abstract": "We present a model which integrates dependency parsing with reinforcement learning based on Markov decision process. At each time step, a transition is picked up to construct the dependency tree in terms of the long-run reward. The optimal policy for choosing transitions can be found with the SARSA algorithm. In SARSA, an approximation of the state-action function can be obtained by calculating the negative free energies for the Restricted Boltzmann Machine. The experimental results on CoNLL-X multilingual data show that the proposed model achieves comparable results with the current state-of-the-art methods.", "phrases": ["reinforcement learning", "optimal policy", "restricted boltzmann machine"], "overall_score": 0.7777863982518887, "scores": [0.5875650412633237, 0.550041832146959, 0.5455559960330497], "rank_score": 0.561054289814444} -{"id": "reddy-knight-2011-unsupervised", "title": "Unsupervised Discovery of Rhyme Schemes", "abstract": "This paper describes an unsupervised, language-independent model for finding rhyme schemes in poetry, using no prior knowledge about rhyme or pronunciation.", "phrases": ["rhyme scheme", "poetry", "pronunciation"], "overall_score": 1.0913454880393587, "scores": [0.6184221087753036, 0.5335174855192995, 0.5305823174737965], "rank_score": 0.5608406372561331} -{"id": "liu-etal-2018-narrative", "title": "Narrative Modeling with Memory Chains and Semantic Supervision", "abstract": "Story comprehension requires a deep semantic understanding of the narrative, making it a challenging task. Inspired by previous studies on ROC Story Cloze Test, we propose a novel method, tracking various semantic aspects with external neural memory chains while encouraging each to focus on a particular semantic aspect. Evaluated on the task of story ending prediction, our model demonstrates superior performance to a collection of competitive baselines, setting a new state of the art.", "phrases": ["story", "semantic aspect", "event sequence"], "overall_score": 1.0040412579562388, "scores": [0.5847969089197514, 0.561089903164695, 0.5352114745993303], "rank_score": 0.5603660955612589} -{"id": "furstenau-lapata-2009-graph", "title": "Graph Alignment for Semi-Supervised Semantic Role Labeling", "abstract": "Unknown lexical items present a major obstacle to the development of broad-coverage semantic role labeling systems. We address this problem with a semi-supervised learning approach which acquires training instances for unseen verbs from an unlabeled corpus. Our method relies on the hypothesis that unknown lexical items will be structurally and semantically similar to known items for which annotations are available. Accordingly, we represent known and unknown sentences as graphs, formalize the search for the most similar verb as a graph alignment problem and solve the optimization using integer linear programming. Experimental results show that role labeling performance for unknown lexical items improves with training data produced automatically by our method.", "phrases": ["semi-supervised learning approach", "unseen verb", "fu\u0308rstenau"], "overall_score": 1.1645353274153345, "scores": [0.59756507996573, 0.5608173494853388, 0.5216869123510395], "rank_score": 0.5600231139340361} -{"id": "hazarika-etal-2018-icon", "title": "ICON: Interactive Conversational Memory Network for Multimodal Emotion Detection", "abstract": "Emotion recognition in conversations is crucial for building empathetic machines. Present works in this domain do not explicitly consider the inter-personal influences that thrive in the emotional dynamics of dialogues. To this end, we propose Interactive COnversational memory Network (ICON), a multimodal emotion detection framework that extracts multimodal features from conversational videos and hierarchically models the self- and inter-speaker emotional influences into global memories. Such memories generate contextual summaries which aid in predicting the emotional orientation of utterance-videos. Our model outperforms state-of-the-art networks on multiple classification and regression tasks in two benchmark datasets.", "phrases": ["emotion recognition", "conversational video", "inter-speaker emotional influence"], "overall_score": 1.229310839465199, "scores": [0.577544510424346, 0.5640185434731451, 0.5368873678006953], "rank_score": 0.5594834738993955} -{"id": "eskander-etal-2013-processing", "title": "Processing Spontaneous Orthography", "abstract": "In cases in which there is no standard orthography for a language or language variant, written texts will display a variety of orthographic choices. This is problematic for natural language processing (NLP) because it creates spurious data sparseness. We study the transformation of spontaneously spelled Egyptian Arabic into a conventionalized orthography which we have previously proposed for NLP purposes. We show that a two-stage process can reduce divergences from this standard by 69%, making subsequent processing of Egyptian Arabic easier.", "phrases": ["orthography", "script", "arabic script"], "overall_score": 1.0886631399321678, "scores": [0.6166394581527688, 0.532154427918978, 0.5295926629588201], "rank_score": 0.559462183010189} -{"id": "singh-etal-2010-constraint", "title": "Constraint-Driven Rank-Based Learning for Information Extraction", "abstract": "Most learning algorithms for undirected graphical models require complete inference over at least one instance before parameter updates can be made. SampleRank is a rank-based learning framework that alleviates this problem by updating the parameters during inference. Most semi-supervised learning algorithms also perform full inference on at least one instance before each parameter update. We extend SampleRank to semi-supervised learning in order to circumvent this computational bottleneck. Different approaches to incorporate unlabeled data and prior knowledge into this framework are explored. When evaluated on a standard information extraction dataset, our method significantly outperforms the supervised method, and matches results of a competing state-of-the-art semi-supervised learning approach.", "phrases": ["learning algorithm", "samplerank", "unlabeled data"], "overall_score": 0.7754791156235087, "scores": [0.5885604385343148, 0.5462556620266676, 0.54335371107351], "rank_score": 0.5593899372114975} -{"id": "pater-etal-2012-learning", "title": "Learning probabilities over underlying representations", "abstract": "We show that a class of cases that has been previously studied in terms of learning of abstract phonological underlying representations (URs) can be handled by a learner that chooses URs from a contextually conditioned distribution over observed surface representations. We implement such a learner in a Maximum Entropy version of Optimality Theory, in which UR learning is an instance of semi-supervised learning. Our objective function incorporates a term aimed to ensure generalization, independently required for phonotactic learning in Optimality Theory, and does not have a bias for single URs for morphemes. This learner is successful on a test language provided by Tesar (2006) as a challenge for UR learning. We also provide successful results on learning of a toy case modeled on French vowel alternations, which have also been previously analyzed in terms of abstract URs. This case includes lexically conditioned variation, an aspect of the data that cannot be handled by abstract URs, showing that in this respect our approach is more general.", "phrases": ["urs", "learner", "morpheme", "underlying form"], "overall_score": 1.002260305541969, "scores": [0.5851101989835014, 0.5724624605546704, 0.5583992360551372, 0.5215166127805823], "rank_score": 0.5593721270934728} -{"id": "chuang-yeh-2005-aligning", "title": "Aligning Parallel Bilingual Corpora Statistically with Punctuation Criteria", "abstract": "We present a new approach to aligning sentences in bilingual parallel corpora based on punctuation, especially for English and Chinese. Although the length-based approach produces high accuracy rates of sentence alignment for clean parallel corpora written in two Western languages, such as French-English or German-English, it does not work as well for parallel corpora that are noisy or written in two disparate languages such as Chinese-English. It is possible to use cognates on top of the length-based approach to increase the alignment accuracy. However, cognates do not exist between two disparate languages, which limit the applicability of the cognate-based approach. In this paper, we examine the feasibility of exploiting the statistically ordered matching of punctuation marks in two languages to achieve high accuracy sentence alignment. We have experimented with an implementation of the proposed method on parallel corpora, the Chinese-English Sinorama Magazine Corpus and Scientific American Magazine articles, with satisfactory results. Compared with the length-based method, the proposed method exhibits better precision rates based on our experimental reuslts. Highly promising improvement was observed when both the punctuation-based and length-based methods were adopted within a common statistical framework. We also demonstrate that the method can be applied to other language pairs, such as English-Japanese, with minimal additional effort.", "phrases": ["chinese", "sentence alignment", "punctuation mark"], "overall_score": 0.614383645903792, "scores": [0.6016942041700499, 0.5549275541275485, 0.5210865258013344], "rank_score": 0.5592360946996443} -{"id": "kim-etal-2018-efficient", "title": "Efficient Large-Scale Neural Domain Classification with Personalized Attention", "abstract": "In this paper, we explore the task of mapping spoken language utterances to one of thousands of natural language understanding domains in intelligent personal digital assistants (IPDAs). This scenario is observed in mainstream IPDAs in industry that allow third parties to develop thousands of new domains to augment built-in first party domains to rapidly increase domain coverage and overall IPDA capabilities. We propose a scalable neural model architecture with a shared encoder, a novel attention mechanism that incorporates personalization information and domain-specific classifiers that solves the problem efficiently. Our architecture is designed to efficiently accommodate incremental domain additions achieving two orders of magnitude speed up compared to full model retraining. We consider the practical constraints of real-time production systems, and design to minimize memory footprint and runtime latency. We demonstrate that incorporating personalization significantly improves domain classification accuracy in a setting with thousands of overlapping domains.", "phrases": ["personalization", "new domain", "domain classification accuracy"], "overall_score": 1.001992040874928, "scores": [0.58476071716242, 0.5468682364398577, 0.5460382635939297], "rank_score": 0.559222405732069} -{"id": "gupta-etal-2015-reval", "title": "ReVal: A Simple and Effective Machine Translation Evaluation Metric Based on Recurrent Neural Networks", "abstract": "Many state-of-the-art Machine Translation (MT) evaluation metrics are complex, involve extensive external resources (e.g. for paraphrasing) and require tuning to achieve best results. We present a simple alternative approach based on dense vector spaces and recurrent neural networks (RNNs), in particular Long Short Term Memory (LSTM) networks. ForWMT-14, our new metric scores best for two out of five language pairs, and overall best and second best on all language pairs, using Spearman and Pearson correlation, respectively. We also show how training data is computed automatically from WMT ranks data.", "phrases": ["evaluation metric", "tree-lstm", "reference translation"], "overall_score": 1.1628321280881764, "scores": [0.5754677811160968, 0.5549005755687157, 0.5472437878943595], "rank_score": 0.5592040481930574} -{"id": "lo-etal-2018-accurate", "title": "Accurate semantic textual similarity for cleaning noisy parallel corpora using semantic machine translation evaluation metric: The NRC supervised submissions to the Parallel Corpus Filtering task", "abstract": "We present our semantic textual similarity approach in filtering a noisy web crawled parallel corpus using YiSi\u2014a novel semantic machine translation evaluation metric. The systems mainly based on this supervised approach perform well in the WMT18 Parallel Corpus Filtering shared task (4th place in 100-million-word evaluation, 8th place in 10-million-word evaluation, and 6th place overall, out of 48 submissions). In fact, our best performing system\u2014NRC-yisi-bicov is one of the only four submissions ranked top 10 in both evaluations. Our submitted systems also include some initial filtering steps for scaling down the size of the test corpus and a final redundancy removal step for better semantic and token coverage of the filtered corpus. In this paper, we also describe our unsuccessful attempt in automatically synthesizing a noisy parallel development corpus for tuning the weights to combine different parallelism and fluency features.", "phrases": ["parallel corpus", "textual similarity approach", "noisy web"], "overall_score": 1.0018318729209408, "scores": [0.583319491976629, 0.5622730761629371, 0.5318064747451812], "rank_score": 0.5591330142949158} -{"id": "renduchintala-etal-2016-creating", "title": "Creating Interactive Macaronic Interfaces for Language Learning", "abstract": "We present a prototype of a novel technology for second language instruction. Our learn-by-reading approach lets a human learner acquire new words and constructions by encountering them in context. To facilitate reading comprehension, our technology presents mixed native language (L1) and second language (L2) sentences to a learner and allows them to interact with the sentences to make the sentences easier (more L1-like) or harder (more L2-like) to read. Eventually, our system should continuously track a learner\u2019s knowledge and learning style by modeling their interactions, including performance on a pop quiz feature. This will allow our system to generate personalized mixed-language texts for learners.", "phrases": ["language instruction", "learner", "user interface"], "overall_score": 0.8998843080026193, "scores": [0.5791939621591674, 0.5742604389648422, 0.5239342717480776], "rank_score": 0.559129557624029} -{"id": "johny-etal-2021-finite", "title": "Finite-state script normalization and processing utilities: The Nisaba Brahmic library", "abstract": "This paper presents an open-source library for efficient low-level processing of ten major South Asian Brahmic scripts. The library provides a flexible and extensible framework for supporting crucial operations on Brahmic scripts, such as NFC, visual normalization, reversible transliteration, and validity checks, implemented in Python within a finite-state transducer formalism. We survey some common Brahmic script issues that may adversely affect the performance of downstream NLP tasks, and provide the rationale for finite-state design and system implementation details.", "phrases": ["normalization", "open-source library", "brahmic script"], "overall_score": 0.6136947174789882, "scores": [0.5940768308067601, 0.542754811856298, 0.5389953724060906], "rank_score": 0.5586090050230497} -{"id": "castelli-etal-2020-techqa", "title": "The TechQA Dataset", "abstract": "We introduce TECHQA, a domain-adaptation question answering dataset for the technical support domain. The TECHQA corpus highlights two real-world issues from the automated customer support domain. First, it contains actual questions posed by users on a technical forum, rather than questions generated specifically for a competition or a task. Second, it has a real-world size \u2013 600 training, 310 dev, and 490 evaluation question/answer pairs \u2013 thus reflecting the cost of creating large labeled datasets with actual data. Hence, TECHQA is meant to stimulate research in domain adaptation rather than as a resource to build QA systems from scratch. TECHQA was obtained by crawling the IBMDeveloper and DeveloperWorks forums for questions with accepted answers provided in an IBM Technote\u2014a technical document that addresses a specific technical issue. We also release a collection of the 801,998 Technotes available on the web as of April 4, 2019 as a companion resource that can be used to learn representations of the IT domain language.", "phrases": ["support domain", "actual question", "real-world size"], "overall_score": 0.774323202422308, "scores": [0.5726303225524306, 0.5611657984366334, 0.5418722452808232], "rank_score": 0.5585561220899624} -{"id": "wang-etal-2020-learning-efficient", "title": "Learning Efficient Dialogue Policy from Demonstrations through Shaping", "abstract": "Training a task-oriented dialogue agent with reinforcement learning is prohibitively expensive since it requires a large volume of interactions with users. Human demonstrations can be used to accelerate learning progress. However, how to effectively leverage demonstrations to learn dialogue policy remains less explored. In this paper, we present S2Agent that efficiently learns dialogue policy from demonstrations through policy shaping and reward shaping. We use an imitation model to distill knowledge from demonstrations, based on which policy shaping estimates feedback on how the agent should act in policy space. Reward shaping is then incorporated to bonus state-actions similar to demonstrations explicitly in value space encouraging better exploration. The effectiveness of the proposed S2Agentt is demonstrated in three dialogue domains and a challenging domain adaptation task with both user simulator evaluation and human evaluation.", "phrases": ["dialogue policy", "reward shaping", "imitation model"], "overall_score": 0.8989354222839147, "scores": [0.6156836817668014, 0.5320040420780114, 0.5279322214894665], "rank_score": 0.5585399817780932} -{"id": "feng-etal-2012-characterizing", "title": "Characterizing Stylistic Elements in Syntactic Structure", "abstract": "Much of the writing styles recognized in rhetorical and composition theories involve deep syntactic elements. However, most previous research for computational stylometric analysis has relied on shallow lexico-syntactic patterns. Some very recent work has shown that PCFG models can detect distributional difference in syntactic styles, but without offering much insights into exactly what constitute salient stylistic elements in sentence structure characterizing each authorship. In this paper, we present a comprehensive exploration of syntactic elements in writing styles, with particular emphasis on interpretable characterization of stylistic elements. We present analytic insights with respect to the authorship attribution task in two different domains.", "phrases": ["style", "previous research", "stylometric analysis"], "overall_score": 1.086702175011756, "scores": [0.5710851080364989, 0.5687168615596069, 0.5355613695683265], "rank_score": 0.5584544463881441} -{"id": "dai-etal-2018-fine", "title": "Fine-grained Structure-based News Genre Categorization", "abstract": "Journalists usually organize and present the contents of a news article following a well-defined structure. In this work, we propose a new task to categorize news articles based on their content presentation structures, which is beneficial for various NLP applications. We first define a small set of news elements considering their functions (e.g., introducing the main story or event, catching the reader's attention and providing details) in a news story and their writing style (narrative or expository), and then formally define four commonly used news article structures based on their selections and organizations of news elements. We create an annotated dataset for structure-based news genre identification, and finally, we build a predictive model to assess the feasibility of this classification task using structure indicative features.", "phrases": ["small set", "news element", "indicative feature"], "overall_score": 0.3868791617453171, "scores": [0.6011770256103741, 0.5435311994176838, 0.5297377191916866], "rank_score": 0.5581486480732482} -{"id": "recski-2016-building", "title": "Building Concept Graphs from Monolingual Dictionary Entries", "abstract": "We present the dict_to_4lang tool for processing entries of three monolingual dictionaries of English and mapping definitions to concept graphs following the 4lang principles of semantic representation introduced by (Kornai, 2010). 4lang representations are domain- and language-independent, and make use of only a very limited set of primitives to encode the meaning of all utterances. Our pipeline relies on the Stanford Dependency Parser for syntactic analysis, the dep to 4lang module then builds directed graphs of concepts based on dependency relations between words in each definition. Several issues are handled by construction-specific rules that are applied to the output of dep_to_4lang. Manual evaluation suggests that ca. 75% of graphs built from the Longman Dictionary are either entirely correct or contain only minor errors. dict_to_4lang is available under an MIT license as part of the 4lang library and has been used successfully in measuring Semantic Textual Similarity (Recski and \u00c1cs, 2015). An interactive demo of core 4lang functionalities is available at .", "phrases": ["definition", "dependency relation", "2-edge"], "overall_score": 0.7729043421639916, "scores": [0.5727679834376231, 0.569227884215353, 0.5306020246293297], "rank_score": 0.5575326307607686} -{"id": "kato-etal-2018-construction", "title": "Construction of Large-scale English Verbal Multiword Expression Annotated Corpus", "abstract": "Multiword expressions (MWEs) consist of groups of tokens, which should be treated as a single syntactic or semantic unit. In this work, we focus on verbal MWEs (VMWEs), whose accurate recognition is challenging because they could be discontinuous (e.g., take .. off). Since previous English VMWE annotations are relatively small-scale in terms of VMWE occurrences and types, we conduct large-scale annotations of VMWEs on the Wall Street Journal portion of English Ontonotes by a combination of automatic annotations and crowdsourcing. Concretely, we first construct a VMWE dictionary based on the English-language Wiktionary. After that, we collect possible VMWE occurrences in Ontonotes and filter candidates with the help of gold dependency trees, then we formalize VMWE annotations as a multiword sense disambiguation problem to exploit crowdsourcing. As a result, we annotate 7,833 VMWE instances belonging to various categories, such as phrasal verbs, light verb constructions, and semi-fixed VMWEs. We hope this large-scale VMWE-annotated resource helps to develop models for MWE recognition and dependency parsing that are aware of English MWEs. Our resource is publicly available.", "phrases": ["multiword expression", "verbal mwe", "automatic annotation", "verb construction"], "overall_score": 0.7728134428192089, "scores": [0.5908080871348933, 0.5568500553192239, 0.547857973584417, 0.534352126936664], "rank_score": 0.5574670607437996} -{"id": "laubli-etal-2013-statistical", "title": "Statistical Machine Translation for Automobile Marketing Texts", "abstract": "We describe a project on introducing an in-house statistical machine translation system for marketing texts from the automobile industry with the final aim of replacing manual translation with post-editing, based on the translation system. The focus of the paper is the suitability of such texts for SMT; we present experiments in domain adaptation and decompounding that improve the baseline translation systems, the results of which are evaluated using automatic metrics as well as manual evaluation.", "phrases": ["automobile marketing", "solution text", "topical domain", "film subtitling"], "overall_score": 0.7728016872357906, "scores": [0.6055962209656354, 0.5573835945363236, 0.540975094583958, 0.525879413445481], "rank_score": 0.5574585808828494} -{"id": "krone-etal-2020-learning", "title": "Learning to Classify Intents and Slot Labels Given a Handful of Examples", "abstract": "Intent classification (IC) and slot filling (SF) are core components in most goal-oriented dialogue systems. Current IC/SF models perform poorly when the number of training examples per class is small. We propose a new few-shot learning task, few-shot IC/SF, to study and improve the performance of IC and SF models on classes not seen at training time in ultra low resource scenarios. We establish a few-shot IC/SF benchmark by defining few-shot splits for three public IC/SF datasets, ATIS, TOP, and Snips. We show that two popular few-shot learning algorithms, model agnostic meta learning (MAML) and prototypical networks, outperform a fine-tuning baseline on this benchmark. Prototypical networks achieves significant gains in IC performance on the ATIS and TOP datasets, while both prototypical networks and MAML outperform the baseline with respect to SF on all three datasets. In addition, we demonstrate that joint training as well as the use of pre-trained language models, ELMo and BERT in our case, are complementary to these few-shot learning methods and yield further gains.", "phrases": ["intent", "training example", "maml", "few-shot learning method"], "overall_score": 1.2248337124827264, "scores": [0.577749720993599, 0.5654921594554517, 0.5489462465066425, 0.5375952554378167], "rank_score": 0.5574458455983775} -{"id": "takamura-etal-2016-discriminative", "title": "Discriminative Analysis of Linguistic Features for Typological Study", "abstract": "We address the task of automatically estimating the missing values of linguistic features by making use of the fact that some linguistic features in typological databases are informative to each other. The questions to address in this work are (i) how much predictive power do features have on the value of another feature? (ii) to what extent can we attribute this predictive power to genealogical or areal factors, as opposed to being provided by tendencies or implicational universals? To address these questions, we conduct a discriminative or predictive analysis on the typological database. Specifically, we use a machine-learning classifier to estimate the value of each feature of each language using the values of the other features, under different choices of training data: all the other languages, or all the other languages except for the ones having the same origin or area with the target language.", "phrases": ["predictive power", "implicational universal", "other feature"], "overall_score": 0.772735611597609, "scores": [0.5646027346245922, 0.5596212449573652, 0.5480087725732983], "rank_score": 0.5574109173850853} -{"id": "mota-grishman-2008-ne", "title": "Is this NE tagger getting old?", "abstract": "This paper focuses on the influence of changing the text time frame on the performance of a named entity tagger. We followed a twofold approach to investigate this subject: on the one hand, we analyzed a corpus that spans 8 years, and, on the other hand, we assessed the performance of a name tagger trained and tested on that corpus. We created 8 samples from the corpus, each drawn from the articles for a particular year. In terms of corpus analysis, we calculated the corpus similarity and names shared between samples. To see the effect on tagger performance, we implemented a semi-supervised name tagger based on co-training; then, we trained and tested our tagger on those samples. We observed that corpus similarity, names shared between samples, and tagger performance all decay as the time gap between the samples increases. Furthermore, we observed that the corpus similarity and names shared correlate with the tagger F-measure. These results show that named entity recognition systems may become obsolete in a short period of time.", "phrases": ["name tagger", "decay", "time gap"], "overall_score": 0.38604716426651214, "scores": [0.6111295109877936, 0.5323914921924578, 0.5273239851293813], "rank_score": 0.5569483294365442} -{"id": "belinkov-etal-2020-linguistic", "title": "On the Linguistic Representational Power of Neural Machine Translation Models", "abstract": "Despite the recent success of deep neural networks in natural language processing and other spheres of artificial intelligence, their interpretability remains a challenge. We analyze the representations learned by neural machine translation (NMT) models at various levels of granularity and evaluate their quality through relevant extrinsic properties. In particular, we seek answers to the following questions: (i) How accurately is word structure captured within the learned representations, which is an important aspect in translating morphologically rich languages? (ii) Do the representations capture long-range dependencies, and effectively handle syntactically divergent languages? (iii) Do the representations capture lexical semantics? We conduct a thorough investigation along several parameters: (i) Which layers in the architecture capture each of these linguistic phenomena; (ii) How does the choice of translation unit (word, character, or subword unit) impact the linguistic properties captured by the underlying representations? (iii) Do the encoder and decoder learn differently and independently? (iv) Do the representations learned by multilingual NMT models capture the same amount of linguistic information as their bilingual counterparts? Our data-driven, quantitative evaluation illuminates important aspects in NMT models and their ability to capture various linguistic phenomena. We show that deep NMT models trained in an end-to-end fashion, without being provided any direct supervision during the training process, learn a non-trivial amount of linguistic information. Notable findings include the following observations: (i) Word morphology and part-of-speech information are captured at the lower layers of the model; (ii) In contrast, lexical semantics or non-local syntactic and semantic dependencies are better represented at the higher layers of the model; (iii) Representations learned using characters are more informed about word-morphology compared to those learned using subword units; and (iv) Representations learned by multilingual models are richer compared to bilingual models.", "phrases": ["relevant extrinsic property", "investigation", "linguistic information", "character-level representation"], "overall_score": 1.334824907412458, "scores": [0.5888352645272021, 0.5804913292410446, 0.5356825458517207, 0.5216517534634948], "rank_score": 0.5566652232708655} -{"id": "maharana-etal-2021-improving", "title": "Improving Generation and Evaluation of Visual Stories via Semantic Consistency", "abstract": "Story visualization is an underexplored task that falls at the intersection of many important research directions in both computer vision and natural language processing. In this task, given a series of natural language captions which compose a story, an agent must generate a sequence of images that correspond to the captions. Prior work has introduced recurrent generative models which outperform text-to-image synthesis models on this task. However, there is room for improvement of generated images in terms of visual quality, coherence and relevance. We present a number of improvements to prior modeling approaches, including (1) the addition of a dual learning framework that utilizes video captioning to reinforce the semantic alignment between the story and generated images, (2) a copy-transform mechanism for sequentially-consistent story visualization, and (3) MART-based transformers to model complex interactions between frames. We present ablation studies to demonstrate the effect of each of these techniques on the generative power of the model for both individual images as well as the entire narrative. Furthermore, due to the complexity and generative nature of the task, standard evaluation metrics do not accurately reflect performance. Therefore, we also provide an exploration of evaluation metrics for the model, focused on aspects of the generated frames such as the presence/quality of generated characters, the relevance to captions, and the diversity of the generated images. We also present correlation experiments of our proposed automated metrics with human evaluations.", "phrases": ["story visualization", "video captioning", "evaluation metric"], "overall_score": 0.611534990789982, "scores": [0.6001698949386042, 0.5410535513729677, 0.5287059649041979], "rank_score": 0.5566431370719233} -{"id": "kim-etal-2018-modeling", "title": "Modeling with Recurrent Neural Networks for Open Vocabulary Slots", "abstract": "Dealing with `open-vocabulary' slots has been among the challenges in the natural language area. While recent studies on attention-based recurrent neural network (RNN) models have performed well in completing several language related tasks such as spoken language understanding and dialogue systems, there has been a lack of attempts to address filling slots that take on values from a virtually unlimited set. In this paper, we propose a new RNN model that can capture the vital concept: Understanding the role of a word may vary according to how long a reader focuses on a particular part of a sentence. The proposed model utilizes a long-term aware attention structure, positional encoding primarily considering the relative distance between words, and multi-task learning of a character-based language model and an intent detection model. We show that the model outperforms the existing RNN models with respect to discovering `open-vocabulary' slots without any external information, such as a named entity database or knowledge base. In particular, we confirm that it performs better with a greater number of slots in a dataset, including unknown words, by evaluating the models on a dataset of several domains. In addition, the proposed model also demonstrates superior performance with regard to intent detection.", "phrases": ["aware attention structure", "encoding", "multi-task learning"], "overall_score": 0.771126886496265, "scores": [0.5651958084562063, 0.5560549521573003, 0.5475006419529554], "rank_score": 0.5562504675221541} -{"id": "su-markert-2008-words", "title": "From Words to Senses: A Case Study of Subjectivity Recognition", "abstract": "We determine the subjectivity of word senses. To avoid costly annotation, we evaluate how useful existing resources established in opinion mining are for this task. We show that results achieved with existing resources that are not tailored towards word sense subjectivity classification can rival results achieved with supervision on a manually annotated training set. However, results with different resources vary substantially and are dependent on the different definitions of subjectivity used in the establishment of the resources.", "phrases": ["subjectivity", "training set", "similar task", "agreement"], "overall_score": 1.3335412019971589, "scores": [0.6282853542031385, 0.541608574281634, 0.5327527487291094, 0.5218728289126741], "rank_score": 0.556129876531639} -{"id": "drabek-yarowsky-2005-induction", "title": "Induction of Fine-Grained Part-of-Speech Taggers via Classifier Combination and Crosslingual Projection", "abstract": "This paper presents an original approach to part-of-speech tagging of fine-grained features (such as case, aspect, and adjective person/number) in languages such as English where these properties are generally not morphologically marked. \n \nThe goals of such rich lexical tagging in English are to provide additional features for word alignment models in bilingual corpora (for statistical machine translation), and to provide an information source for part-of-speech tagger induction in new languages via tag projection across bilingual corpora. \n \nFirst, we present a classifier-combination approach to tagging English bitext with very fine-grained part-of-speech tags necessary for annotating morphologically richer languages such as Czech and French, combining the extracted features of three major English parsers, and achieve fine-grained-tag-level syntactic analysis accuracy higher than any individual parser. \n \nSecond, we present experimental results for the cross-language projection of part-of-speech taggers in Czech and French via word-aligned bitext, achieving successful fine-grained part-of-speech tagging of these languages without any Czech or French training data of any kind.", "phrases": ["tagger", "french", "rich language"], "overall_score": 0.8949589503459626, "scores": [0.5738176683446117, 0.5651702759150752, 0.5292198382805557], "rank_score": 0.5560692608467476} -{"id": "cui-etal-2020-unsupervised", "title": "Unsupervised Natural Language Inference via Decoupled Multimodal Contrastive Learning", "abstract": "We propose to solve the natural language inference problem without any supervision from the inference labels via task-agnostic multimodal pretraining. Although recent studies of multimodal self-supervised learning also represent the linguistic and visual context, their encoders for different modalities are coupled. Thus they cannot incorporate visual information when encoding plain text alone. In this paper, we propose Multimodal Aligned Contrastive Decoupled learning (MACD) network. MACD forces the decoupled text encoder to represent the visual information via contrastive learning. Therefore, it embeds visual knowledge even for plain text inference. We conducted comprehensive experiments over plain text inference datasets (i.e. SNLI and STS-B). The unsupervised MACD even outperforms the fully-supervised BiLSTM and BiLSTM+ELMO on STS-B.", "phrases": ["multimodal", "contrastive learning", "text encoder"], "overall_score": 0.8947934921251752, "scores": [0.5889193814426439, 0.5496476890362939, 0.5293322971429489], "rank_score": 0.5559664558739623} -{"id": "brown-2008-exploiting", "title": "Exploiting Document-Level Context for Data-Driven Machine Translation", "abstract": "This paper presents a method for exploiting document-level similarity between the documents in the training corpus for a corpus-driven (statistical or example-based) machine translation system and the input documents it must translate. The method is simple to implement, efficient (increases the translation time of an example-based system by only a few percent), and robust (still works even when the actual document boundaries in the input text are not known). Experiments on French-English and Arabic-English showed relative gains over the same system without using document-level similarity of up to 7.4% and 5.4%, respectively, on the BLEU metric.", "phrases": ["document-level similarity", "training corpus", "input document"], "overall_score": 0.7706873049969478, "scores": [0.5970303013048862, 0.5390853147071302, 0.531684513480749], "rank_score": 0.5559333764975883} -{"id": "talukdar-etal-2006-context", "title": "A Context Pattern Induction Method for Named Entity Extraction", "abstract": "We present a novel context pattern induction method for information extraction, specifically named entity extraction. Using this method, we extended several classes of seed entity lists into much larger high-precision lists. Using token membership in these extended lists as additional features, we improved the accuracy of a conditional random field-based named entity tagger. In contrast, features derived from the seed lists decreased extractor accuracy.", "phrases": ["seed entity list", "unlabeled data", "other approach"], "overall_score": 1.3812087110937292, "scores": [0.6104432723014048, 0.5312950802563118, 0.5257794729649046], "rank_score": 0.5558392751742071} -{"id": "kaeshammer-2013-synchronous", "title": "Synchronous Linear Context-Free Rewriting Systems for Machine Translation", "abstract": "We propose synchronous linear context-free rewriting systems as an extension to synchronous context-free grammars in which synchronized non-terminals span k 1 continuous blocks on each side of the bitext. Such discontinuous constituents are required for inducing certain alignment configurations that occur relatively frequently in manually annotated parallel corpora and that cannot be generated with less expressive grammar formalisms. As part of our investigations concerning the minimal k that is required for inducing manual alignments, we present a hierarchical aligner in form of a deduction system. We find that by restrictingk to 2 on both sides, 100% of the data can be covered.", "phrases": ["constituent", "alignment configuration", "translation model"], "overall_score": 0.8937199974155438, "scores": [0.5806829472439027, 0.5443289467262619, 0.5408864743562455], "rank_score": 0.5552994561088034} -{"id": "moon-etal-2018-multimodal", "title": "Multimodal Named Entity Recognition for Short Social Media Posts", "abstract": "We introduce a new task called Multimodal Named Entity Recognition (MNER) for noisy user-generated data such as tweets or Snapchat captions, which comprise short text with accompanying images. These social media posts often come in inconsistent or incomplete syntax and lexical notations with very limited surrounding textual contexts, bringing significant challenges for NER. To this end, we create a new dataset for MNER called SnapCaptions (Snapchat image-caption pairs submitted to public and crowd-sourced stories with fully annotated named entities). We then build upon the state-of-the-art Bi-LSTM word/character based NER models with 1) a deep image network which incorporates relevant visual context to augment textual information, and 2) a generic modality-attention module which learns to attenuate irrelevant modalities while amplifying the most informative ones to extract contexts from, adaptive to each sample and token. The proposed MNER model with modality attention significantly outperforms the state-of-the-art text-only NER models by successfully leveraging provided visual contexts, opening up potential applications of MNER on myriads of social media platforms.", "phrases": ["entity recognition task", "social medium post", "multimodal ner network"], "overall_score": 1.4241420395373314, "scores": [0.5687368579186804, 0.5513134601137477, 0.5456457730749235], "rank_score": 0.5552320303691172} -{"id": "wang-etal-2017-crowd", "title": "CROWD-IN-THE-LOOP: A Hybrid Approach for Annotating Semantic Roles", "abstract": "Crowdsourcing has proven to be an effective method for generating labeled data for a range of NLP tasks. However, multiple recent attempts of using crowdsourcing to generate gold-labeled training data for semantic role labeling (SRL) reported only modest results, indicating that SRL is perhaps too difficult a task to be effectively crowdsourced. In this paper, we postulate that while producing SRL annotation does require expert involvement in general, a large subset of SRL labeling tasks is in fact appropriate for the crowd. We present a novel workflow in which we employ a classifier to identify difficult annotation tasks and route each task either to experts or crowd workers according to their difficulties. Our experimental evaluation shows that the proposed approach reduces the workload for experts by over two-thirds, and thus significantly reduces the cost of producing SRL annotation at little loss in quality.", "phrases": ["gold-labeled training data", "semantic role labeling", "expert involvement", "crowd", "worker"], "overall_score": 0.8936012354277845, "scores": [0.6095257120975511, 0.5563347359251645, 0.5522232531368042, 0.5378099270943174, 0.5202346974307165], "rank_score": 0.5552256651369107} -{"id": "horvat-etal-2015-hierarchical", "title": "Hierarchical Statistical Semantic Realization for Minimal Recursion Semantics", "abstract": "We introduce a robust statistical approach to realization from Minimal Recursion Semantics representations. The approach treats realization as a translation problem, transforming the Dependency MRS graph representation to a surface string. Translation is based on a Synchronous Context-Free Grammar that is automatically extracted from a large corpus of parsed sentences. We have evaluated the new approach on the Wikiwoods corpus, where it shows promising results. 1", "phrases": ["realization", "statistical approach", "translation problem"], "overall_score": 0.6099385256871627, "scores": [0.5717500696052784, 0.5474255316620551, 0.5463943144668556], "rank_score": 0.5551899719113963} -{"id": "durmus-cardie-2018-exploring", "title": "Exploring the Role of Prior Beliefs for Argument Persuasion", "abstract": "Public debate forums provide a common platform for exchanging opinions on a topic of interest. While recent studies in natural language processing (NLP) have provided empirical evidence that the language of the debaters and their patterns of interaction play a key role in changing the mind of a reader, research in psychology has shown that prior beliefs can affect our interpretation of an argument and could therefore constitute a competing alternative explanation for resistance to changing one's stance. To study the actual effect of language use vs. prior beliefs on persuasion, we provide a new dataset and propose a controlled setting that takes into consideration two reader-level factors: political and religious ideology. We find that prior beliefs affected by these reader-level factors play a more important role than language use effects and argue that it is important to account for them in NLP studies of persuasion.", "phrases": ["belief", "persuasion", "debate forum"], "overall_score": 0.8924689869804887, "scores": [0.5778899847106543, 0.546828450295668, 0.5388480438596931], "rank_score": 0.554522159622005} -{"id": "sipos-etal-2012-large", "title": "Large-Margin Learning of Submodular Summarization Models", "abstract": "In this paper, we present a supervised learning approach to training submodular scoring functions for extractive multidocument summarization. By taking a structured prediction approach, we provide a large-margin method that directly optimizes a convex relaxation of the desired performance measure. The learning method applies to all submodular summarization methods, and we demonstrate its effectiveness for both pairwise as well as coverage-based scoring functions on multiple datasets. Compared to state-of-the-art functions that were tuned manually, our method significantly improves performance and enables high-fidelity models with number of parameters well beyond what could reasonably be tuned by hand.", "phrases": ["structured output learning", "benchmark dataset", "greedy algorithm", "rouge score"], "overall_score": 1.2764399826679158, "scores": [0.55723217582557, 0.5564057973665003, 0.5555473586292465, 0.5482180319921158], "rank_score": 0.5543508409533582} -{"id": "arcan-etal-2014-identification", "title": "Identification of Bilingual Terms from Monolingual Documents for Statistical Machine Translation", "abstract": "This publication has emanated from research supported in part by a research grant from Science Foundation Ireland (SFI) under Grant Number SFI/12/RC/2289 and by the European Union supported projects \nEuroSentiment (Grant No. 296277), LIDER (Grant No. 610782) and MateCat (ICT-2011.4.2-287688).", "phrases": ["english-italian language pair", "translation equivalent", "disambiguation", "wiki machine"], "overall_score": 0.993222802212189, "scores": [0.5804920729072912, 0.5719811019192029, 0.5329820045070005, 0.5318576224570264], "rank_score": 0.5543282004476303} -{"id": "kachuee-etal-2021-self", "title": "Self-Supervised Contrastive Learning for Efficient User Satisfaction Prediction in Conversational Agents", "abstract": "Turn-level user satisfaction is one of the most important performance metrics for conversational agents. It can be used to monitor the agent's performance and provide insights about defective user experiences. While end-to-end deep learning has shown promising results, having access to a large number of reliable annotated samples required by these methods remains challenging. In a large-scale conversational system, there is a growing number of newly developed skills, making the traditional data collection, annotation, and modeling process impractical due to the required annotation costs and the turnaround times. In this paper, we suggest a self-supervised contrastive learning approach that leverages the pool of unlabeled data to learn user-agent interactions. We show that the pre-trained models using the self-supervised objective are transferable to the user satisfaction prediction. In addition, we propose a novel few-shot transfer learning approach that ensures better transferability for very small sample sizes. The suggested few-shot method does not require any inner loop optimization process and is scalable to very large datasets and complex models. Based on our experiments using real data from a large-scale commercial system, the suggested approach is able to significantly reduce the required number of annotations, while improving the generalization on unseen skills.", "phrases": ["user satisfaction prediction", "conversational system", "contrastive learning approach"], "overall_score": 1.078223758605128, "scores": [0.5760042949981826, 0.5458211956795235, 0.5404667160748662], "rank_score": 0.5540974022508575} -{"id": "sauri-etal-2005-evita", "title": "Evita: A Robust Event Recognizer For QA Systems", "abstract": "We present Evita, an application for recognizing events in natural language texts. Although developed as part of a suite of tools aimed at providing question answering systems with information about both temporal and intensional relations among events, it can be used independently as an event extraction tool. It is unique in that it is not limited to any pre-established list of relation types (events), nor is it restricted to a specific domain. Evita performs the identification and tagging of event expressions based on fairly simple strategies, informed by both linguistic-and statistically-based data. It achieves a performance ratio of 80.12% F-measure.", "phrases": ["event extractor", "rule-based module", "most verb"], "overall_score": 1.3767654391387347, "scores": [0.5873971379507719, 0.5521466555134107, 0.5226097195334103], "rank_score": 0.5540511709991977} -{"id": "stamborg-etal-2012-using", "title": "Using Syntactic Dependencies to Solve Coreferences", "abstract": "This paper describes the structure of the LTH coreference solver used in the closed track of the CoNLL 2012 shared task (Pradhan et al., 2012). The solver core is a mention classifier that uses Soon et al. (2001)'s algorithm and features extracted from the dependency graphs of the sentences. \n \nThis system builds on Bjorkelund and Nugues (2011)'s solver that we extended so that it can be applied to the three languages of the task: English, Chinese, and Arabic. We designed a new mention detection module that removes pleonastic pronouns, prunes constituents, and recovers mentions when they do not match exactly a noun phrase. We carefully redesigned the features so that they reflect more complex linguistic phenomena as well as discourse properties. Finally, we introduced a minimal cluster model grounded in the first mention of an entity. \n \nWe optimized the feature sets for the three languages: We carried out an extensive evaluation of pairs of features and we complemented the single features with associations that improved the CoNLL score. We obtained the respective scores of 59.57, 56.62, and 48.25 on English, Chinese, and Arabic on the development set, 59.36, 56.85, and 49.43 on the test set, and the combined official score of 55.21.", "phrases": ["mention classifier", "dependency graph", "arabic"], "overall_score": 0.6084797706210748, "scores": [0.5977860314705348, 0.5404505206766083, 0.5233499153374646], "rank_score": 0.5538621558282025} -{"id": "gupta-etal-2012-mining", "title": "Mining Hindi-English Transliteration Pairs from Online Hindi Lyrics", "abstract": "This paper describes a method to mine Hindi-English transliteration pairs from online Hindi song lyrics. The technique is based on the observations that lyrics are transliterated word-by-word, maintaining the precise word order. The mining task is nevertheless challenging because the Hindi lyrics and its transliterations are usually available from different, often unrelated, websites. Therefore, it is a non-trivial task to match the Hindi lyrics to their transliterated counterparts. Moreover, there are various types of noise in lyrics data that needs to be appropriately handled before songs can be aligned at word level. The mined data of 30823 unique Hindi-English transliteration pairs with an accuracy of more than 92% is available publicly. Although the present work reports mining of Hindi-English word pairs, the same technique can be easily adapted for other languages for which song lyrics are available online in native and Roman scripts.", "phrases": ["transliteration pair", "hindi song lyric", "word order"], "overall_score": 0.7674933564153528, "scores": [0.6060687795505189, 0.5315613954021554, 0.5232581138708086], "rank_score": 0.5536294296078276} -{"id": "murphy-etal-2012-selecting", "title": "Selecting Corpus-Semantic Models for Neurolinguistic Decoding", "abstract": "Neurosemantics aims to learn the mapping between concepts and the neural activity which they elicit during neuroimaging experiments. Different approaches have been used to represent individual concepts, but current state-of-the-art techniques require extensive manual intervention to scale to arbitrary words and domains. To overcome this challenge, we initiate a systematic comparison of automatically-derived corpus representations, based on various types of textual co-occurrence. We find that dependency parse-based features are the most effective, achieving accuracies similar to the leading semi-manual approaches and higher than any published for a corpus-based model. We also find that simple word features enriched with directional information provide a close-to-optimal solution at much lower computational cost.", "phrases": ["corpus-based model", "brain", "various study"], "overall_score": 1.3266171529119852, "scores": [0.5807806980405309, 0.546446701622445, 0.5324995716869547], "rank_score": 0.5532423237833103} -{"id": "qian-etal-2018-hierarchical", "title": "Hierarchical CVAE for Fine-Grained Hate Speech Classification", "abstract": "Existing work on automated hate speech detection typically focuses on binary classification or on differentiating among a small set of categories. In this paper, we propose a novel method on a fine-grained hate speech classification task, which focuses on differentiating among 40 hate groups of 13 different hate group categories. We first explore the Conditional Variational Autoencoder (CVAE) as a discriminative model and then extend it to a hierarchical architecture to utilize the additional hate category information for more accurate prediction. Experimentally, we show that incorporating the hate category information for training can significantly improve the classification performance and our proposed model outperforms commonly-used discriminative models.", "phrases": ["hate speech detection", "binary classification", "hate group", "conditional variational autoencoder"], "overall_score": 0.9912646064743185, "scores": [0.6098825066367236, 0.5426481783795029, 0.5402383279075961, 0.5201722294660073], "rank_score": 0.5532353105974575} -{"id": "qiu-etal-2019-dynamically", "title": "Dynamically Fused Graph Network for Multi-hop Reasoning", "abstract": "Text-based question answering (TBQA) has been studied extensively in recent years. Most existing approaches focus on finding the answer to a question within a single paragraph. However, many difficult questions require multiple supporting evidence from scattered text among two or more documents. In this paper, we propose Dynamically Fused Graph Network (DFGN), a novel method to answer those questions requiring multiple scattered evidence and reasoning over them. Inspired by human's step-by-step reasoning behavior, DFGN includes a dynamic fusion layer that starts from the entities mentioned in the given query, explores along the entity graph dynamically built from the text, and gradually finds relevant supporting entities from the given documents. We evaluate DFGN on HotpotQA, a public TBQA dataset requiring multi-hop reasoning. DFGN achieves competitive results on the public board. Furthermore, our analysis shows DFGN produces interpretable reasoning chains.", "phrases": ["multi-hop reasoning", "reasoning chain", "dynamic entity graph"], "overall_score": 1.5669151266122616, "scores": [0.5990324160997327, 0.5345287063554254, 0.5255957460872182], "rank_score": 0.5530522895141253} -{"id": "jha-elhadad-2010-cancer", "title": "Cancer Stage Prediction Based on Patient Online Discourse", "abstract": "Forums and mailing lists dedicated to particular diseases are increasingly popular online. Automatically inferring the health status of a patient can be useful for both forum users and health researchers who study patients' online behaviors. In this paper, we focus on breast cancer forums and present a method to predict the stage of patients' cancers from their online discourse. We show that what the patients talk about (content-based features) and whom they interact with (social network-based features) provide complementary cues to predicting cancer stage and can be leveraged for better prediction. Our methods are extendable and can be applied to other tasks of acquiring contextual information about online health forum participants.", "phrases": ["patient", "discourse", "cancer stage"], "overall_score": 0.8900008778421681, "scores": [0.5775563264547076, 0.5562545026001213, 0.5251550825213529], "rank_score": 0.5529886371920606} -{"id": "scott-etal-2012-corpus", "title": "Corpus Annotation as a Scientific Task", "abstract": "Annotation studies in CL are generally unscientific: they are mostly not reproducible, make use of too few (and often non-independent) annotators and use guidelines that are often something of a moving target. Additionally, the notion of \u0091expert annotators' invariably means only that the annotators have linguistic training. While this can be acceptable in some special contexts, it is often far from ideal. This is particularly the case when subtle judgements are required or when, as increasingly, one is making use of corpora originating from technical texts that have been produced by, and intended to be consumed by, an audience of technical experts in the field. We outline a more rigorous approach to collecting human annotations, using as our example a study designed to capture judgements on the meaning of hedge words in medical records.", "phrases": ["ambiguity", "structured data", "medical language"], "overall_score": 0.7664401935997905, "scores": [0.5578232665471935, 0.5532226992573599, 0.547563233862039], "rank_score": 0.5528697332221975} -{"id": "le-zuidema-2015-compositional", "title": "Compositional Distributional Semantics with Long Short Term Memory", "abstract": "We are proposing an extension of the recursive neural network that makes use of a variant of the long short-term memory architecture. The extension allows information low in parse trees to be stored in a memory register (the `memory cell') and used much later higher up in the parse tree. This provides a solution to the vanishing gradient problem and allows the network to capture long range dependencies. Experimental results show that our composition outperformed the traditional neural-network composition on the Stanford Sentiment Treebank.", "phrases": ["recursive neural network", "memory cell", "tree-structured lstm", "composition function"], "overall_score": 1.3730949231513443, "scores": [0.5856407635356148, 0.561582623367818, 0.5372627539960312, 0.5258100459105955], "rank_score": 0.5525740467025149} -{"id": "wang-eisner-2018-surface", "title": "Surface Statistics of an Unknown Language Indicate How to Parse It", "abstract": "We introduce a novel framework for delexicalized dependency parsing in a new language. We show that useful features of the target language can be extracted automatically from an unparsed corpus, which consists only of gold part-of-speech (POS) sequences. Providing these features to our neural parser enables it to parse sequences like those in the corpus. Strikingly, our system has no supervision in the target language. Rather, it is a multilingual system that is trained end-to-end on a variety of other languages, so it learns a feature extractor that works well. We show experimentally across multiple languages: (1) Features computed from the unparsed corpus improve parsing accuracy. (2) Including thousands of synthetic languages in the training yields further improvement. (3) Despite being computed from unparsed corpora, our learned task-specific features beat previous work's interpretable typological features that require parsed corpora or expert categorization of the language. Our best method improved attachment scores on held-out test languages by an average of 5.6 percentage points over past work that does not inspect the unparsed data (McDonald et al., 2011), and by 20.7 points over past \u201cgrammar induction\u201d work that does not use training languages (Naseem et al., 2010).", "phrases": ["pos", "feature extractor", "word order"], "overall_score": 0.9898839999188482, "scores": [0.5706811320054629, 0.5485644978334197, 0.538148708384407], "rank_score": 0.5524647794077632} -{"id": "minard-etal-2016-meantime", "title": "MEANTIME, the NewsReader Multilingual Event and Time Corpus", "abstract": "In this paper, we present the NewsReader MEANTIME corpus, a semantically annotated corpus of Wikinews articles. The corpus consists of 480 news articles, i.e. 120 English news articles and their translations in Spanish, Italian, and Dutch. MEANTIME contains annotations at different levels. The document-level annotation includes markables (e.g. entity mentions, event mentions, time expressions, and numerical expressions), relations between markables (modeling, for example, temporal information and semantic role labeling), and entity and event intra-document coreference. The corpus-level annotation includes entity and event cross-document coreference. Semantic annotation on the English section was performed manually; for the annotation in Italian, Spanish, and (partially) Dutch, a procedure was devised to automatically project the annotations on the English texts onto the translated texts, based on the manual alignment of the annotated elements; this enabled us not only to speed up the annotation process but also provided cross-lingual coreference. The English section of the corpus was extended with timeline annotations for the SemEval 2015 TimeLine shared task. The \u201cFirst CLIN Dutch Shared Task\u201d at CLIN26 was based on the Dutch section, while the EVALITA 2016 FactA (Event Factuality Annotation) shared task, based on the Italian section, is currently being organized.", "phrases": ["wikinews article", "cross-document coreference", "temporal relation"], "overall_score": 1.3719593205067624, "scores": [0.5843757046206706, 0.5477665852656757, 0.524208849852216], "rank_score": 0.5521170465795208} -{"id": "magnini-etal-2006-cab", "title": "I-CAB: the Italian Content Annotation Bank", "abstract": "In this paper we present work in progress for the creation of the Italian Content Annotation Bank (I-CAB), a corpus of Italian news annotated with semantic information at different levels. The first level is represented by temporal expressions, the second level is represented by different types of entities (i.e. person, organizations, locations and geo-political entities), and the third level is represented by relations between entities (e.g. the affiliation relation connecting a person to an organization). So far I-CAB has been manually annotated with temporal expressions, person entities and organization entities. As we intend I-CAB to become a benchmark for various automatic Information Extraction tasks, we followed a policy of reusing already available markup languages. In particular, we adopted the annotation schemes developed for the ACE Entity Detection and Time Expressions Recognition and Normalization tasks. As the ACE guidelines have originally been developed for English, part of the effort consisted in adapting them to the specific morpho-syntactic features of Italian. Finally, we have extended them to include a wider range of entities, such as conjunctions.", "phrases": ["semantic information", "different type", "organization"], "overall_score": 0.9887994425191247, "scores": [0.5803159892660276, 0.5505173653968287, 0.5247450745307616], "rank_score": 0.5518594763978727} -{"id": "turchi-etal-2013-coping", "title": "Coping with the Subjectivity of Human Judgements in MT Quality Estimation", "abstract": "Supervised approaches to NLP tasks rely on high-quality data annotations, which typically result from expensive manual labelling procedures. For some tasks, however, the subjectivity of human judgements might reduce the usefulness of the annotation for real-world applications. In Machine Translation (MT) Quality Estimation (QE), for instance, using humanannotated data to train a binary classifier that discriminates between good (useful for a post-editor) and bad translations is not trivial. Focusing on this binary task, we show that subjective human judgements can be effectively replaced with an automatic annotation procedure. To this aim, we compare binary classifiers trained on different data: the human-annotated dataset from the 7 th Workshop on Statistical Machine Translation (WMT-12), and an automatically labelled version of the same corpus. Our results show that human labels are less suitable for the task.", "phrases": ["judgement", "quality standard", "notion"], "overall_score": 1.2113059556911012, "scores": [0.5910013990442018, 0.5404068096272284, 0.5224590858036949], "rank_score": 0.551289098158375} -{"id": "zhang-etal-2017-earth", "title": "Earth Mover's Distance Minimization for Unsupervised Bilingual Lexicon Induction", "abstract": "Cross-lingual natural language processing hinges on the premise that there exists invariance across languages. At the word level, researchers have identified such invariance in the word embedding semantic spaces of different languages. However, in order to connect the separate spaces, cross-lingual supervision encoded in parallel data is typically required. In this paper, we attempt to establish the cross-lingual connection without relying on any cross-lingual supervision. By viewing word embedding spaces as distributions, we propose to minimize their earth mover's distance, a measure of divergence between distributions. We demonstrate the success on the unsupervised bilingual lexicon induction task. In addition, we reveal an interesting finding that the earth mover's distance shows potential as a measure of language difference.", "phrases": ["semantic space", "different language", "recent method"], "overall_score": 1.651022812932202, "scores": [0.5682092680315531, 0.5622139133815459, 0.5229516820624435], "rank_score": 0.5511249544918475} -{"id": "agrawal-carpuat-2019-controlling", "title": "Controlling Text Complexity in Neural Machine Translation", "abstract": "This work introduces a machine translation task where the output is aimed at audiences of different levels of target language proficiency. We collect a high quality dataset of news articles available in English and Spanish, written for diverse grade levels and propose a method to align segments across comparable bilingual articles. The resulting dataset makes it possible to train multi-task sequence to sequence models that can translate and simplify text jointly. We show that these multi-task models outperform pipeline approaches that translate and simplify text independently.", "phrases": ["syntactic diversity", "translation quality", "inference time"], "overall_score": 0.8865345890691304, "scores": [0.5654940599358595, 0.5494840824620555, 0.5375265902543863], "rank_score": 0.5508349108841005} -{"id": "faruqui-etal-2016-morpho", "title": "Morpho-syntactic Lexicon Generation Using Graph-based Semi-supervised Learning", "abstract": "Morpho-syntactic lexicons provide information about the morphological and syntactic roles of words in a language. Such lexicons are not available for all languages and even when available, their coverage can be limited. We present a graph-based semi-supervised learning method that uses the morphological, syntactic and semantic relations between words to automatically construct wide coverage lexicons from small seed sets. Our method is language-independent, and we show that we can expand a 1000 word seed lexicon to more than 100 times its size with high quality for 11 languages. In addition, the automatically created lexicons provide features that improve performance in two downstream tasks: morphological tagging and dependency parsing.", "phrases": ["learning method", "tagging", "morpho-syntactic lexicon"], "overall_score": 1.1448722598876468, "scores": [0.6045030728980157, 0.5248751334943228, 0.5223233253989102], "rank_score": 0.5505671772637496} -{"id": "murty-etal-2021-dreca", "title": "DReCa: A General Task Augmentation Strategy for Few-Shot Natural Language Inference", "abstract": "Meta-learning promises few-shot learners that can adapt to new distributions by repurposing knowledge acquired from previous training. However, we believe meta-learning has not yet succeeded in NLP due to the lack of a well-defined task distribution, leading to attempts that treat datasets as tasks. Such an ad hoc task distribution causes problems of quantity and quality. Since there's only a handful of datasets for any NLP problem, meta-learners tend to overfit their adaptation mechanism and, since NLP datasets are highly heterogeneous, many learning episodes have poor transfer between their support and query sets, which discourages the meta-learner from adapting. To alleviate these issues, we propose DReCA (Decomposing datasets into Reasoning Categories), a simple method for discovering and using latent reasoning categories in a dataset, to form additional high quality tasks. DReCA works by splitting examples into label groups, embedding them with a finetuned BERT model and then clustering each group into reasoning categories. Across four few-shot NLI problems, we demonstrate that using DReCA improves the accuracy of meta-learners by 1.5-4%", "phrases": ["meta-learner", "nlp dataset", "support", "query set", "clustering"], "overall_score": 0.7632305289056986, "scores": [0.567765051393621, 0.5622109387165597, 0.5458016950979476, 0.5391261750358983, 0.5378683875242536], "rank_score": 0.5505544495536561} -{"id": "ferreira-etal-2016-jointly", "title": "Jointly Learning to Embed and Predict with Multiple Languages", "abstract": "We propose a joint formulation for learning task-specific cross-lingual word embeddings, along with classifiers for that task. Unlike prior work, which first learns the embeddings from parallel data and then plugs them in a supervised learning problem, our approach is oneshot: a single optimization problem combines a co-regularizer for the multilingual embeddings with a task-specific loss. We present theoretical results showing the limitation of Euclidean co-regularizers to increase the embedding dimension, a limitation which does not exist for other co-regularizers (such as the \u20181distance). Despite its simplicity, our method achieves state-of-the-art accuracies on the RCV1/RCV2 dataset when transferring from English to German, with training times below 1 minute. On the TED Corpus, we obtain the highest reported scores on 10 out of 11 languages.", "phrases": ["word embedding", "single optimization problem", "co-regularizer"], "overall_score": 0.7631563622951212, "scores": [0.5577280359528451, 0.5550093574146677, 0.5387654555915359], "rank_score": 0.5505009496530162} -{"id": "hossain-etal-2017-filling", "title": "Filling the Blanks (hint: plural noun) for Mad Libs Humor", "abstract": "Computerized generation of humor is a notoriously difficult AI problem. We develop an algorithm called Libitum that helps humans generate humor in a Mad Lib, which is a popular fill-in-the-blank game. The algorithm is based on a machine learned classifier that determines whether a potential fill-in word is funny in the context of the Mad Lib story. We use Amazon Mechanical Turk to create ground truth data and to judge humor for our classifier to mimic, and we make this data freely available. Our testing shows that Libitum successfully aids humans in filling in Mad Libs that are usually judged funnier than those filled in by humans with no computerized help. We go on to analyze why some words are better than others at making a Mad Lib funny.", "phrases": ["humor", "fill-in-the-blank game", "mad lib story"], "overall_score": 0.9855425378439141, "scores": [0.5678158685871345, 0.5490972107546898, 0.5332122105250956], "rank_score": 0.5500417632889733} -{"id": "guillou-etal-2014-parcor", "title": "ParCor 1.0: A Parallel Pronoun-Coreference Corpus to Support Statistical MT", "abstract": "We present ParCor, a parallel corpus of texts in which pronoun coreference \u2015 reduced coreference in which pronouns are used as referring expressions \u2015 has been annotated. The corpus is intended to be used both as a resource from which to learn systematic differences in pronoun use between languages and ultimately for developing and testing informed Statistical Machine Translation systems aimed at addressing the problem of pronoun coreference in translation. At present, the corpus consists of a collection of parallel English-German documents from two different text genres: TED Talks (transcribed planned speech), and EU Bookshop publications (written text). All documents in the corpus have been manually annotated with respect to the type and location of each pronoun and, where relevant, its antecedent. We provide details of the texts that we selected, the guidelines and tools used to support annotation and some corpus statistics. The texts in the corpus have already been translated into many languages, and we plan to expand the corpus into these other languages, as well as other genres, in the future.", "phrases": ["pronoun", "detail", "annotation scheme"], "overall_score": 1.14293220202102, "scores": [0.5980942811297248, 0.5287471970660893, 0.5220611417322141], "rank_score": 0.5496342066426761} -{"id": "chen-etal-2017-automatically", "title": "Automatically Labeled Data Generation for Large Scale Event Extraction", "abstract": "Modern models of event extraction for tasks like ACE are based on supervised learning of events from small hand-labeled data. However, hand-labeled training data is expensive to produce, in low coverage of event types, and limited in size, which makes supervised methods hard to extract large scale of events for knowledge base population. To solve the data labeling problem, we propose to automatically label training data for event extraction via world knowledge and linguistic knowledge, which can detect key arguments and trigger words for each event type and employ them to label events in texts automatically. The experimental results show that the quality of our large scale automatically labeled data is competitive with elaborately human-labeled data. And our automatically labeled data can incorporate with human-labeled data, then improve the performance of models learned from these data.", "phrases": ["linguistic knowledge", "trigger word", "distance supervision"], "overall_score": 1.4097153776754063, "scores": [0.5576141310727504, 0.548803793105639, 0.5424045450541787], "rank_score": 0.5496074897441893} -{"id": "gormley-etal-2015-improved", "title": "Improved Relation Extraction with Feature-Rich Compositional Embedding Models", "abstract": "Compositional embedding models build a representation (or embedding) for a linguistic structure based on its component word embeddings. We propose a Feature-rich Compositional Embedding Model (FCM) for relation extraction that is expressive, generalizes to new domains, and is easy-to-implement. The key idea is to combine both (unlexicalized) hand-crafted features with learned word embeddings. The model is able to directly tackle the difficulties met by traditional compositional embeddings models, such as handling arbitrary types of sentence annotations and utilizing global information for composition. We test the proposed model on two relation extraction tasks, and demonstrate that our model outperforms both previous compositional models and traditional feature rich models on the ACE 2005 relation extraction task, and the SemEval 2010 relation classification task. The combination of our model and a log-linear classifier with hand-crafted features gives state-of-the-art results.", "phrases": ["word embedding", "entity pair", "pipelined manner"], "overall_score": 1.2652933772995443, "scores": [0.5925174866321441, 0.5358408641326504, 0.5201714444849692], "rank_score": 0.5495099317499212} -{"id": "armendariz-etal-2020-cosimlex", "title": "CoSimLex: A Resource for Evaluating Graded Word Similarity in Context", "abstract": "State of the art natural language processing tools are built on context-dependent word embeddings, but no direct method for evaluating these representations currently exists. Standard tasks and datasets for intrinsic evaluation of embeddings are based on judgements of similarity, but ignore context; standard tasks for word sense disambiguation take account of context but do not provide continuous measures of meaning similarity. This paper describes an effort to build a new dataset, CoSimLex, intended to fill this gap. Building on the standard pairwise similarity task of SimLex-999, it provides context-dependent similarity measures; covers not only discrete differences in word sense but more subtle, graded changes in meaning; and covers not only a well-resourced language (English) but a number of less-resourced languages. We define the task and evaluation metrics, outline the dataset collection methodology, and describe the status of the dataset so far.", "phrases": ["intrinsic evaluation", "gap", "cosimlex dataset"], "overall_score": 1.2067468487224688, "scores": [0.5598304630613913, 0.5580934239686441, 0.529718590443234], "rank_score": 0.5492141591577565} -{"id": "marecek-etal-2011-two", "title": "Two-step translation with grammatical post-processing", "abstract": "This paper describes an experiment in which we try to automatically correct mistakes in grammatical agreement in English to Czech MT outputs. We perform several rule-based corrections on sentences parsed to dependency trees. We prove that it is possible to improve the MT quality of majority of the systems participating in WMT shared task. We made both automatic (BLEU) and manual evaluations.", "phrases": ["correction", "dependency tree", "marecek"], "overall_score": 1.0686511679900894, "scores": [0.5753845731260675, 0.5441649683189003, 0.5279846499598474], "rank_score": 0.5491780638016049} -{"id": "wang-etal-2019-crossweigh", "title": "CrossWeigh: Training Named Entity Tagger from Imperfect Annotations", "abstract": "Everyone makes mistakes. So do human annotators when curating labels for named entity recognition (NER). Such label mistakes might hurt model training and interfere model comparison. In this study, we dive deep into one of the widely-adopted NER benchmark datasets, CoNLL03 NER. We are able to identify label mistakes in about 5.38% test sentences, which is a significant ratio considering that the state-of-the-art test F1 score is already around 93%. Therefore, we manually correct these label mistakes and form a cleaner test set. Our re-evaluation of popular models on this corrected test set leads to more accurate assessments, compared to those on the original test set. More importantly, we propose a simple yet effective framework, CrossWeigh, to handle label mistakes during NER model training. Specifically, it partitions the training data into several folds and train independent NER models to identify potential mistakes in each fold. Then it adjusts the weights of training data accordingly to train the final NER model. Extensive experiments demonstrate significant improvements of plugging various NER models into our proposed framework on three datasets. All implementations and corrected test set are available at our Github repo .", "phrases": ["annotator", "fold", "search engine"], "overall_score": 1.357043012899376, "scores": [0.5751719672091405, 0.5432333197711539, 0.519937561450433], "rank_score": 0.5461142828102424} -{"id": "zhang-etal-2012-big", "title": "Big Data versus the Crowd: Looking for Relationships in All the Right Places", "abstract": "Classically, training relation extractors relies on high-quality, manually annotated training data, which can be expensive to obtain. To mitigate this cost, NLU researchers have considered two newly available sources of less expensive (but potentially lower quality) labeled data from distant supervision and crowd sourcing. There is, however, no study comparing the relative impact of these two sources on the precision and recall of post-learning answers. To fill this gap, we empirically study how state-of-the-art techniques are affected by scaling these two sources. We use corpus sizes of up to 100 million documents and tens of thousands of crowd-source labeled examples. Our experiments show that increasing the corpus size for distant supervision has a statistically significant, positive impact on quality (F1 score). In contrast, human feedback has a positive and statistically significant, but lower, impact on precision and recall.", "phrases": ["ten", "crowdsourcing", "annotated data"], "overall_score": 1.062015160865911, "scores": [0.5674085047431975, 0.5483731690341019, 0.521521818444308], "rank_score": 0.5457678307405358} -{"id": "he-golub-2016-character", "title": "Character-Level Question Answering with Attention", "abstract": "We show that a character-level encoder-decoder framework can be successfully applied to question answering with a structured knowledge base. We use our model for single-relation question answering and demonstrate the effectiveness of our approach on the SimpleQuestions dataset (Bordes et al., 2015), where we improve state-of-the-art accuracy from 63.9% to 70.9%, without use of ensembles. Importantly, our character-level model has 16x fewer parameters than an equivalent word-level model, can be learned with significantly less data compared to previous work, which relies on data augmentation, and is robust to new entities in testing.", "phrases": ["encoder-decoder framework", "character-level model", "entity name", "generative framework"], "overall_score": 1.3987079197336239, "scores": [0.5606788623043168, 0.5474040066556969, 0.5394693770086757, 0.5337117476688118], "rank_score": 0.5453159984093754} -{"id": "thater-etal-2009-ranking", "title": "Ranking Paraphrases in Context", "abstract": "We present a vector space model that supports the computation of appropriate vector representations for words in context, and apply it to a paraphrase ranking task. An evaluation on the SemEval 2007 lexical substitution task data shows promising results: the model significantly outperforms a current state of the art model, and our treatment of context is effective.", "phrases": ["computation", "vector representation", "preference"], "overall_score": 1.1981013454408433, "scores": [0.5658690329832806, 0.5410381822705392, 0.5289310478881502], "rank_score": 0.5452794210473233} -{"id": "zhao-etal-2020-reducing", "title": "Reducing Quantity Hallucinations in Abstractive Summarization", "abstract": "It is well-known that abstractive summaries are subject to hallucination\u2014including material that is not supported by the original text. While summaries can be made hallucination-free by limiting them to general phrases, such summaries would fail to be very informative. Alternatively, one can try to avoid hallucinations by verifying that any specific entities in the summary appear in the original text in a similar context. This is the approach taken by our system, Herman. The system learns to recognize and verify quantity entities (dates, numbers, sums of money, etc.) in a beam-worth of abstractive summaries produced by state-of-the-art models, in order to up-rank those summaries whose quantity terms are supported by the original text. Experimental results demonstrate that the ROUGE scores of such up-ranked summaries have a higher Precision than summaries that have not been up-ranked, without a comparable loss in Recall, resulting in higher F1. Preliminary human evaluation of up-ranked vs. original summaries shows people's preference for the former.", "phrases": ["hallucination", "inconsistent text", "text generation model", "core issue"], "overall_score": 0.9768226191161666, "scores": [0.5664281047415114, 0.55002728934036, 0.5384493974367572, 0.5257955444187881], "rank_score": 0.5451750839843541} -{"id": "chai-qu-2005-salience", "title": "A Salience Driven Approach to Robust Input Interpretation in Multimodal Conversational Systems", "abstract": "To improve the robustness in multimodal input interpretation, this paper presents a new salience driven approach. This approach is based on the observation that, during multimodal conversation, information from deictic gestures (e.g., point or circle) on a graphical display can signal a part of the physical world (i.e., representation of the domain and task) of the application which is salient during the communication. This salient part of the physical world will prime what users tend to communicate in speech and in turn can be used to constrain hypotheses for spoken language understanding, thus improving overall input interpretation. Our experimental results have indicated the potential of this approach in reducing word error rate and improving concept identification in multimodal conversation.", "phrases": ["salience", "gesture", "language understanding"], "overall_score": 0.598904822934211, "scores": [0.5547553127618919, 0.5496297171020025, 0.5310549586882629], "rank_score": 0.5451466628507191} -{"id": "sasano-kurohashi-2008-japanese", "title": "Japanese Named Entity Recognition Using Structural Natural Language Processing", "abstract": "This paper presents an approach that uses structural information for Japanese named entity recognition (NER). Our NER system is based on Support Vector Machine (SVM), and utilizes four types of structural information: cache features, coreference relations, syntactic features and caseframe features, which are obtained from structural analyses. We evaluated our approach on CRL NE data and obtained a higher F-measure than existing approaches that do not use structural information. We also conducted experiments on IREX NE data and an NE-annotated web corpus and confirmed that structural information improves the performance of NER.", "phrases": ["caseframe feature", "syntactic dependency feature", "japanese ner"], "overall_score": 0.8767683206173305, "scores": [0.5725581042969335, 0.5417847405524806, 0.5199575164947151], "rank_score": 0.5447667871147098} -{"id": "jolly-etal-2020-data", "title": "Data-Efficient Paraphrase Generation to Bootstrap Intent Classification and Slot Labeling for New Features in Task-Oriented Dialog Systems", "abstract": "Recent progress through advanced neural models pushed the performance of task-oriented dialog systems to almost perfect accuracy on existing benchmark datasets for intent classification and slot labeling. However, in evolving real-world dialog systems, where new functionality is regularly added, a major additional challenge is the lack of annotated training data for such new functionality, as the necessary data collection efforts are laborious and time-consuming. A potential solution to reduce the effort is to augment initial seed data by paraphrasing existing utterances automatically. In this paper, we propose a new, data-efficient approach following this idea. Using an interpretation-to-text model for paraphrase generation, we are able to rely on existing dialog system training data, and, in combination with shuffling-based sampling techniques, we can obtain diverse and novel paraphrases from small amounts of seed data. In experiments on a public dataset and with a real-world dialog system, we observe improvements for both intent classification and slot labeling, demonstrating the usefulness of our approach.", "phrases": ["intent classification", "interpretation-to-text model", "novel paraphrase"], "overall_score": 0.8765896745008473, "scores": [0.5534681091187175, 0.5455742571708814, 0.5349249978352473], "rank_score": 0.5446557880416154} -{"id": "agic-schluter-2018-baselines", "title": "Baselines and Test Data for Cross-Lingual Inference", "abstract": "The recent years have seen a revival of interest in textual entailment, sparked by i) the emergence of powerful deep neural network learners for natural language processing and ii) the timely development of large-scale evaluation datasets such as SNLI. Recast as natural language inference, the problem now amounts to detecting the relation between pairs of statements: they either contradict or entail one another, or they are mutually neutral. Current research in natural language inference is effectively exclusive to English. In this paper, we propose to advance the research in SNLI-style natural language inference toward multilingual evaluation. To that end, we provide test data for four major languages: Arabic, French, Spanish, and Russian. We experiment with a set of baselines. Our systems are based on cross-lingual word embeddings and machine translation. While our best system scores an average accuracy of just over 75%, we focus largely on enabling further research in multilingual inference.", "phrases": ["major language", "arabic", "french"], "overall_score": 0.7546441553195965, "scores": [0.5801898552076564, 0.529567047788106, 0.5233251677773715], "rank_score": 0.5443606902577113} -{"id": "liao-veeramachaneni-2009-simple", "title": "A Simple Semi-supervised Algorithm For Named Entity Recognition", "abstract": "We present a simple semi-supervised learning algorithm for named entity recognition (NER) using conditional random fields (CRFs). The algorithm is based on exploiting evidence that is independent from the features used for a classifier, which provides high-precision labels to unlabeled data. Such independent evidence is used to automatically extract high-accuracy and non-redundant data, leading to a much improved classifier at the next iteration. We show that our algorithm achieves an average improvement of 12 in recall and 4 in precision compared to the supervised algorithm. We also show that our algorithm achieves high accuracy when the training and test sets are from different domains.", "phrases": ["unlabeled data", "iteration", "self-training"], "overall_score": 0.8758970994137816, "scores": [0.564305970477432, 0.5389599243150056, 0.5294105060432098], "rank_score": 0.5442254669452158} -{"id": "wang-etal-2020-hat", "title": "HAT: Hardware-Aware Transformers for Efficient Natural Language Processing", "abstract": "Transformers are ubiquitous in Natural Language Processing (NLP) tasks, but they are difficult to be deployed on hardware due to the intensive computation. To enable low-latency inference on resource-constrained hardware platforms, we propose to design Hardware-Aware Transformers (HAT) with neural architecture search. We first construct a large design space with arbitrary encoder-decoder attention and heterogeneous layers. Then we train a SuperTransformer that covers all candidates in the design space, and efficiently produces many SubTransformers with weight sharing. Finally, we perform an evolutionary search with a hardware latency constraint to find a specialized SubTransformer dedicated to run fast on the target hardware. Extensive experiments on four machine translation tasks demonstrate that HAT can discover efficient models for different hardware (CPU, GPU, IoT device). When running WMT'14 translation task on Raspberry Pi-4, HAT can achieve 3 speedup, 3.7 smaller size over baseline Transformer; 2.7 speedup, 3.6 smaller size over Evolved Transformer with 12,041 less search cost and no performance loss. HAT is open-sourced at .", "phrases": ["hardware-aware transformers", "search", "translation task"], "overall_score": 1.131633325166394, "scores": [0.5689318637344393, 0.5359072156877581, 0.5277627069000472], "rank_score": 0.5442005954407482} -{"id": "kim-etal-2019-research", "title": "From Research to Production and Back: Ludicrously Fast Neural Machine Translation", "abstract": "This paper describes the submissions of the \u201cMarian\u201d team to the WNGT 2019 efficiency shared task. Taking our dominating submissions to the previous edition of the shared task as a starting point, we develop improved teacher-student training via multi-agent dual-learning and noisy backward-forward translation for Transformer-based student models. For efficient CPU-based decoding, we propose pre-packed 8-bit matrix products, improved batched decoding, cache-friendly student architectures with parameter sharing and light-weight RNN-based decoder architectures. GPU-based decoding benefits from the same architecture changes, from pervasive 16-bit inference and concurrent streams. These modifications together with profiler-based C++ code optimization allow us to push the Pareto frontier established during the 2018 edition towards 24x (CPU) and 14x (GPU) faster models at comparable or higher BLEU values. Our fastest CPU model is more than 4x faster than last year's fastest submission at more than 3 points higher BLEU. Our fastest GPU model at 1.5 seconds translation time is slightly faster than last year's fastest RNN-based submissions, but outperforms them by more than 4 BLEU and 10 BLEU points respectively.", "phrases": ["decoding", "cpu", "small student model"], "overall_score": 1.3950708761017596, "scores": [0.5632643006399894, 0.5440356530215855, 0.524394105377187], "rank_score": 0.5438980196795873} -{"id": "passonneau-etal-2009-making", "title": "Making Sense of Word Sense Variation", "abstract": "We present a pilot study of word-sense annotation using multiple annotators, relatively polysemous words, and a heterogenous corpus. Annotators selected senses for words in context, using an annotation interface that presented WordNet senses. Interannotator agreement (IA) results show that annotators agree well or not, depending primarily on the individual words and their general usage properties. Our focus is on identifying systematic differences across words and annotators that can account for IA variation. We identify three lexical use factors: semantic specificity of the context, sense concreteness, and similarity of senses. We discuss systematic differences in sense selection across annotators, and present the use of association rules to mine the data for systematic differences across annotators.", "phrases": ["pilot study", "annotator", "specificity"], "overall_score": 0.7535945244292188, "scores": [0.5662763555124262, 0.5386634493713858, 0.5258708199688543], "rank_score": 0.5436035416175554} -{"id": "chowdhury-zamparelli-2018-rnn", "title": "RNN Simulations of Grammaticality Judgments on Long-distance Dependencies", "abstract": "The paper explores the ability of LSTM networks trained on a language modeling task to detect linguistic structures which are ungrammatical due to extraction violations (extra arguments and subject-relative clause island violations), and considers its implications for the debate on language innatism. The results show that the current RNN model can correctly classify (un)grammatical sentences, in certain conditions, but it is sensitive to linguistic processing factors and probably ultimately unable to induce a more abstract notion of grammaticality, at least in the domain we tested.", "phrases": ["grammaticality", "processing factor", "abstract notion"], "overall_score": 0.973757399036329, "scores": [0.5832157918500285, 0.5246533143788971, 0.5225239500263096], "rank_score": 0.5434643520850785} -{"id": "bohus-horvitz-2009-models", "title": "Models for Multiparty Engagement in Open-World Dialog", "abstract": "We present computational models that allow spoken dialog systems to handle multi-participant engagement in open, dynamic environments, where multiple people may enter and leave conversations, and interact with the system and with others in a natural manner. The models for managing the engagement process include components for (1) sensing the engagement state, actions and intentions of multiple agents in the scene, (2) making engagement decisions (i.e. whom to engage with, and when) and (3) rendering these decisions in a set of coordinated low-level behaviors in an embodied conversational agent. We review results from a study of interactions \"in the wild\" with a system that implements such a model.", "phrases": ["dialog system", "engagement state", "user disengagement"], "overall_score": 1.1299963351178304, "scores": [0.5609691400292155, 0.5455128220759591, 0.5237581467920225], "rank_score": 0.5434133696323991} -{"id": "ravi-knight-2009-minimized", "title": "Minimized Models for Unsupervised Part-of-Speech Tagging", "abstract": "We describe a novel method for the task of unsupervised POS tagging with a dictionary, one that uses integer programming to explicitly search for the smallest model that explains the data, and then uses EM to set parameter values. We evaluate our method on a standard test corpus using different standard tagsets (a 45-tagset as well as a smaller 17-tagset), and show that our approach performs better than existing state-of-the-art systems in both settings.", "phrases": ["dictionary", "parameter value", "minimal set"], "overall_score": 1.3916969921670466, "scores": [0.5629598519416151, 0.5391657132913393, 0.5256223528129277], "rank_score": 0.5425826393486274} -{"id": "petukhova-etal-2016-modelling", "title": "Modelling Multi-issue Bargaining Dialogues: Data Collection, Annotation Design and Corpus", "abstract": "The paper describes experimental dialogue data collection activities, as well semantically annotated corpus creation undertaken within EU-funded METALOGUE project(www.metalogue.eu). The project aims to develop a dialogue system with flexible dialogue management to enable system's adaptive, reactive, interactive and proactive dialogue behavior in setting goals, choosing appropriate strategies and monitoring numerous parallel interpretation and management processes. To achieve these goals negotiation (or more precisely multi-issue bargaining) scenario has been considered as the specific setting and application domain. The dialogue corpus forms the basis for the design of task and interaction models of participants negotiation behavior, and subsequently for dialogue system development which would be capable to replace one of the negotiators. The METALOGUE corpus will be released to the community for research purposes.", "phrases": ["dialogue system", "negotiator", "metalogue corpus"], "overall_score": 0.8713238696544656, "scores": [0.5511906312386945, 0.5467427634181501, 0.5262184839391109], "rank_score": 0.5413839595319851} -{"id": "ramadan-etal-2018-large", "title": "Large-Scale Multi-Domain Belief Tracking with Knowledge Sharing", "abstract": "Robust dialogue belief tracking is a key component in maintaining good quality dialogue systems. The tasks that dialogue systems are trying to solve are becoming increasingly complex, requiring scalability to multi-domain, semantically rich dialogues. However, most current approaches have difficulty scaling up with domains because of the dependency of the model parameters on the dialogue ontology. In this paper, a novel approach is introduced that fully utilizes semantic similarity between dialogue utterances and the ontology terms, allowing the information to be shared across domains. The evaluation is performed on a recently collected multi-domain dialogues dataset, one order of magnitude larger than currently available corpora. Our model demonstrates great capability in handling multi-domain dialogues, simultaneously outperforming existing state-of-the-art models in single-domain dialogue tracking tasks.", "phrases": ["ontology", "multi-domain dst", "state tracker", "bi-lstm"], "overall_score": 1.6954983898093905, "scores": [0.580919804943425, 0.5312423212675991, 0.5261072232056705, 0.5247049991830315], "rank_score": 0.5407435871499315} -{"id": "landwehr-etal-2014-model", "title": "A Model of Individual Differences in Gaze Control During Reading", "abstract": "We develop a statistical model of saccadic eye movements during reading of isolated sentences. The model is focused on representing individual differences between readers and supports the inference of the most likely reader for a novel set of eye movement patterns. We empirically study the model for biometric reader identification using eye-tracking data collected from 20 individuals and observe that the model distinguishes between 20 readers with an accuracy of up to 98%.", "phrases": ["individual difference", "reading", "eye movement"], "overall_score": 0.5940665891288768, "scores": [0.5500155559395814, 0.5378240154382101, 0.5343885665827444], "rank_score": 0.5407427126535119} -{"id": "hu-etal-2019-multi", "title": "A Multi-Type Multi-Span Network for Reading Comprehension that Requires Discrete Reasoning", "abstract": "Rapid progress has been made in the field of reading comprehension and question answering, where several systems have achieved human parity in some simplified settings. However, the performance of these models degrades significantly when they are applied to more realistic scenarios, such as answers involve various types, multiple text strings are correct answers, or discrete reasoning abilities are required. In this paper, we introduce the Multi-Type Multi-Span Network (MTMSN), a neural reading comprehension model that combines a multi-type answer predictor designed to support various answer types (e.g., span, count, negation, and arithmetic expression) with a multi-span extraction method for dynamically producing one or multiple text spans. In addition, an arithmetic expression reranking mechanism is proposed to rank expression candidates for further confirming the prediction. Experiments show that our model achieves 79.9 F1 on the DROP hidden test set, creating new state-of-the-art results. Source code () is released to facilitate future work.", "phrases": ["comprehension", "reasoning ability", "span"], "overall_score": 1.2965345274576452, "scores": [0.5631341590468453, 0.535405075712179, 0.5235514488902764], "rank_score": 0.5406968945497669} -{"id": "hua-wang-2020-pair", "title": "PAIR: Planning and Iterative Refinement in Pre-trained Transformers for Long Text Generation", "abstract": "Pre-trained Transformers have enabled impressive breakthroughs in generating long and fluent text, yet their outputs are often \u201crambling\u201d without coherently arranged content. In this work, we present a novel content-controlled text generation framework, PAIR, with planning and iterative refinement, which is built upon a large model, BART. We first adapt the BERT model to automatically construct the content plans, consisting of keyphrase assignments and their corresponding sentence-level positions. The BART model is employed for generation without modifying its structure. We then propose a refinement algorithm to gradually enhance the generation quality within the sequence-to-sequence framework. Evaluation with automatic metrics shows that adding planning consistently improves the generation quality on three distinct domains, with an average of 20 BLEU points and 12 METEOR points improvements. In addition, human judges rate our system outputs to be more relevant and coherent than comparisons without planning.", "phrases": ["refinement", "content plan", "generation quality"], "overall_score": 1.187738022166856, "scores": [0.5621707807392466, 0.5345423402366118, 0.5249754871228139], "rank_score": 0.5405628693662242} -{"id": "reitter-moore-2007-predicting", "title": "Predicting Success in Dialogue", "abstract": "Task-solving in dialogue depends on the linguistic alignment of the interlocutors, which Pickering & Garrod (2004) have suggested to be based on mechanistic repetition effects. In this paper, we seek confirmation of this hypothesis by looking at repetition in corpora, and whether repetition is correlated with task success. We show that the relevant repetition tendency is based on slow adaptation rather than short-term priming and demonstrate that lexical and syntactic repetition is a reliable predictor of task success given the first five minutes of a taskoriented dialogue.", "phrases": ["task success", "adaptation", "syntactic repetition", "speech data", "social variable"], "overall_score": 1.1830762030583242, "scores": [0.5505665299986832, 0.5452917466267798, 0.5398223705870324, 0.5305755831627862, 0.5259496899057788], "rank_score": 0.5384411840562121} -{"id": "rijhwani-preotiuc-pietro-2020-temporally", "title": "Temporally-Informed Analysis of Named Entity Recognition", "abstract": "Natural language processing models often have to make predictions on text data that evolves over time as a result of changes in language use or the information described in the text. However, evaluation results on existing data sets are seldom reported by taking the timestamp of the document into account. We analyze and propose methods that make better use of temporally-diverse training data, with a focus on the task of named entity recognition. To support these experiments, we introduce a novel data set of English tweets annotated with named entities. We empirically demonstrate the effect of temporal drift on performance, and how the temporal information of documents can be used to obtain better models compared to those that disregard temporal information. Our analysis gives insights into why this information is useful, in the hope of informing potential avenues of improvement for named entity recognition as well as other NLP tasks under similar experimental setups.", "phrases": ["other nlp task", "temporal generalization ability", "tweet stream", "fine-tuning scenario", "late data"], "overall_score": 1.1190954985949055, "scores": [0.5470679934566361, 0.5468125441009503, 0.5418772443705916, 0.5326241357832499, 0.5224739591286257], "rank_score": 0.5381711753680107} -{"id": "vertanen-kristensson-2011-imagination", "title": "The Imagination of Crowds: Conversational AAC Language Modeling using Crowdsourcing and Large Data Sources", "abstract": "Augmented and alternative communication (AAC) devices enable users with certain communication disabilities to participate in everyday conversations. Such devices often rely on statistical language models to improve text entry by offering word predictions. These predictions can be improved if the language model is trained on data that closely reflects the style of the users' intended communications. Unfortunately, there is no large dataset consisting of genuine AAC messages. In this paper we demonstrate how we can crowd-source the creation of a large set of fictional AAC messages. We show that these messages model conversational AAC better than the currently used datasets based on telephone conversations or newswire text. We leverage our crowdsourced messages to intelligently select sentences from much larger sets of Twitter, blog and Usenet data. Compared to a model trained only on telephone transcripts, our best performing model reduced perplexity on three test sets of AAC-like communications by 60--82% relative. This translated to a potential keystroke savings in a predictive keyboard interface of 5--11%.", "phrases": ["language model", "fictional aac message", "telephone transcript"], "overall_score": 0.7456599685904948, "scores": [0.56398309538206, 0.5262421159998343, 0.5234146969304964], "rank_score": 0.5378799694374635} -{"id": "blodgett-etal-2016-demographic", "title": "Demographic Dialectal Variation in Social Media: A Case Study of African-American English", "abstract": "Though dialectal language is increasingly abundant on social media, few resources exist for developing NLP tools to handle such language. We conduct a case study of dialectal language in online conversational text by investigating African-American English (AAE) on Twitter. We propose a distantly supervised model to identify AAE-like language from demographics associated with geo-located messages, and we verify that this language follows well-known AAE linguistic phenomena. In addition, we analyze the quality of existing language identification and dependency parsing tools on AAE-like text, demonstrating that they perform poorly on such text compared to text associated with white speakers. We also provide an ensemble classifier for language identification which eliminates this disparity and release a new corpus of tweets containing AAE-like language.", "phrases": ["twitter", "demographic", "social medium"], "overall_score": 1.5235396206828375, "scores": [0.549861699656131, 0.5406903117263253, 0.5226759058293522], "rank_score": 0.5377426390706028} -{"id": "li-etal-2017-noise", "title": "Noise Reduction Methods for Distantly Supervised Biomedical Relation Extraction", "abstract": "Distant supervision has been applied to automatically generate labeled data for biomedical relation extraction. Noise exists in both positively and negatively-labeled data and affects the performance of supervised machine learning methods. In this paper, we propose three novel heuristics based on the notion of proximity, trigger word and confidence of patterns to leverage lexical and syntactic information to reduce the level of noise in the distantly labeled data. Experiments on three different tasks, extraction of protein-protein-interaction, miRNA-gene regulation relation and protein-localization event, show that the proposed methods can improve the F-score over the baseline by 6, 10 and 14 points for the three tasks, respectively. We also show that when the models are configured to output high-confidence results, high precisions can be obtained using the proposed methods, making them promising for facilitating manual curation for databases.", "phrases": ["novel heuristic", "syntactic information", "noise"], "overall_score": 0.5903310644333133, "scores": [0.5513320930847027, 0.532013585794842, 0.5286817957511855], "rank_score": 0.5373424915435767} -{"id": "ran-etal-2020-learning", "title": "Learning to Recover from Multi-Modality Errors for Non-Autoregressive Neural Machine Translation", "abstract": "Non-autoregressive neural machine translation (NAT) predicts the entire target sequence simultaneously and significantly accelerates inference process. However, NAT discards the dependency information in a sentence, and thus inevitably suffers from the multi-modality problem: the target tokens may be provided by different possible translations, often causing token repetitions or missing. To alleviate this problem, we propose a novel semi-autoregressive model RecoverSAT in this work, which generates a translation as a sequence of segments. The segments are generated simultaneously while each segment is predicted token-by-token. By dynamically determining segment length and deleting repetitive segments, RecoverSAT is capable of recovering from repetitive and missing token errors. Experimental results on three widely-used benchmark datasets show that our proposed model achieves more than 4 times speedup while maintaining comparable performance compared with the corresponding autoregressive model.", "phrases": ["multi-modality problem", "token repetition", "semi-autoregressive model recoversat"], "overall_score": 0.7432116380525835, "scores": [0.5470207293351895, 0.536460544689974, 0.524860342798975], "rank_score": 0.5361138722747127} -{"id": "blunsom-cohn-2011-hierarchical", "title": "A Hierarchical Pitman-Yor Process HMM for Unsupervised Part of Speech Induction", "abstract": "In this work we address the problem of unsupervised part-of-speech induction by bringing together several strands of research into a single model. We develop a novel hidden Markov model incorporating sophisticated smoothing using a hierarchical Pitman-Yor processes prior, providing an elegant and principled means of incorporating lexical characteristics. Central to our approach is a new type-based sampling algorithm for hierarchical Pitman-Yor models in which we track fractional table counts. In an empirical evaluation we show that our model consistently out-performs the current state-of-the-art across 10 languages.", "phrases": ["hierarchical pitman-yor process", "sophisticated smoothing", "word type"], "overall_score": 1.1658095975879372, "scores": [0.540278651144075, 0.5307840576005657, 0.5206857310092419], "rank_score": 0.5305828132512942} -{"id": "candito-etal-2014-developing", "title": "Developing a French FrameNet: Methodology and First results", "abstract": "The Asfalda project aims to develop a French corpus with frame-based semantic annotations and automatic tools for shallow semantic analysis. We present the first part of the project: focusing on a set of notional domains, we delimited a subset of English frames, adapted them to French data when necessary, and developed the corresponding French lexicon. We believe that working domain by domain helped us to enforce the coherence of the resulting resource, and also has the advantage that, though the number of frames is limited (around a hundred), we obtain full coverage within a given domain.", "phrases": ["project", "notional domain", "frame"], "overall_score": 0.8513537384236043, "scores": [0.5400131622940609, 0.5239297271210155, 0.522984568636457], "rank_score": 0.5289758193505111} -{"id": "surdeanu-etal-2008-learning", "title": "Learning to Rank Answers on Large Online QA Collections", "abstract": "This work describes an answer ranking engine for non-factoid questions built using a large online community-generated question-answer collection (Yahoo! Answers). We show how such collections may be used to effectively set up large supervised learning experiments. Furthermore we investigate a wide range of feature types, some exploiting NLP processors, and demonstrate that using them in combination leads to considerable improvements in accuracy.", "phrases": ["candidate", "question-answer pair", "similar q&a"], "overall_score": 1.5769400413892718, "scores": [0.5323520365665998, 0.524309533999039, 0.5225249828960965], "rank_score": 0.5263955178205785} -{"id": "mellebeek-etal-2005-improving", "title": "Improving Online Machine Translation Systems", "abstract": "In (Mellebeek et al., 2005), we proposed the design, implementation and evaluation of a novel and modular approach to boost the translation performance of existing, wide-coverage, freely available machine translation systems, based on reliable and fast automatic decomposition of the translation input and corresponding composition of translation output. Despite showing some initial promise, our method did not improve on the baseline Logomedia1 and Systran2 MT systems. In this paper, we improve on the algorithm presented in (Mellebeek et al., 2005), and on the same test data, show increased scores for a range of automatic evaluation metrics. Our algorithm now outperforms Logomedia, obtains similar results to SDL3 and falls tantalisingly short of the performance achieved by Systran.", "phrases": ["input string", "poor quality", "stage"], "overall_score": 0.7294639533728596, "scores": [0.5281900671030578, 0.5253160036341454, 0.5250849713202209], "rank_score": 0.5261970140191413} -{"id": "mckinley-ray-2014-decision", "title": "A Decision-Theoretic Approach to Natural Language Generation", "abstract": "We study the problem of generating an English sentence given an underlying probabilistic grammar, a world and a communicative goal. We model the generation problem as a Markov decision process with a suitably defined reward function that reflects the communicative goal. We then use probabilistic planning to solve the MDP and generate a sentence that, with high probability, accomplishes the communicative goal. We show empirically that our approach can generate complex sentences with a speed that generally matches or surpasses the state of the art. Further, we show that our approach is anytime and can handle complex communicative goals, including negated goals.", "phrases": ["communicative goal", "planning", "mdp"], "overall_score": 0.8467573024007702, "scores": [0.5335140150768156, 0.5225873682312228, 0.5222582959171296], "rank_score": 0.526119893075056} -{"id": "ogren-etal-2008-constructing", "title": "Constructing Evaluation Corpora for Automated Clinical Named Entity Recognition", "abstract": "We report on the construction of a gold-standard dataset consisting of annotated clinical notes suitable for evaluating our biomedical named entity recognition system. The dataset is the result of consensus between four human annotators and contains 1,556 annotations on 160 clinical notes using 658 unique concept codes from SNOMED-CT corresponding to human disorders. Inter-annotator agreement was calculated on annotations from 100 of the documents for span (90.9%), concept code (81.7%), context (84.8%), and status (86.0%) agreement. Complete agreement for span, concept code, context, and status was 74.6%. We found that creating a consensus set based on annotations from two independently-created annotation sets can reduce inter-annotator disagreement by 32.3%. We found little benefit to pre-annotating the corpus with a third-party named entity recognizer.", "phrases": ["annotator", "disorder", "mayo clinic"], "overall_score": 1.023165089306643, "scores": [0.5273515007805032, 0.526709386291409, 0.5233476430239336], "rank_score": 0.5258028433652818} \ No newline at end of file +{"id": "mintz-etal-2009-distant", "title": "Distant supervision for relation extraction without labeled data", "abstract": "Modern models of relation extraction for tasks like ACE are based on supervised learning of relations from small hand-labeled corpora. We investigate an alternative paradigm that does not require labeled corpora, avoiding the domain dependence of ACE-style algorithms, and allowing the use of corpora of any size. Our experiments use Freebase, a large semantic database of several thousand relations, to provide distant supervision. For each pair of entities that appears in some Freebase relation, we find all sentences containing those entities in a large unlabeled corpus and extract textual features to train a relation classifier. Our algorithm combines the advantages of supervised IE (combining 400,000 noisy pattern features in a probabilistic classifier) and unsupervised IE (extracting large numbers of relations from large corpora of any domain). Our model is able to extract 10,000 instances of 102 relations at a precision of 67.6%. We also analyze feature performance, showing that syntactic parse features are particularly helpful for relations that are ambiguous or lexically distant in their expression.", "keyphrases": ["relation extraction", "distant supervision", "knowledge base", "wikipedia", "large amount"]} +{"id": "thorne-etal-2018-fever", "title": "FEVER: a Large-scale Dataset for Fact Extraction and VERification", "abstract": "In this paper we introduce a new publicly available dataset for verification against textual sources, FEVER: Fact Extraction and VERification. It consists of 185,445 claims generated by altering sentences extracted from Wikipedia and subsequently verified without knowledge of the sentence they were derived from. The claims are classified as Supported, Refuted or NotEnoughInfo by annotators achieving 0.6841 in Fleiss kappa. For the first two classes, the annotators also recorded the sentence(s) forming the necessary evidence for their judgment. To characterize the challenge of the dataset presented, we develop a pipeline approach and compare it to suitably designed oracles. The best accuracy we achieve on labeling a claim accompanied by the correct evidence is 31.87%, while if we ignore the evidence we achieve 50.91%. Thus we believe that FEVER is a challenging testbed that will help stimulate progress on claim verification against textual sources.", "keyphrases": ["fact extraction", "verification", "wikipedia", "fever", "veracity"]} +{"id": "riloff-etal-2013-sarcasm", "title": "Sarcasm as Contrast between a Positive Sentiment and Negative Situation", "abstract": "A common form of sarcasm on Twitter consists of a positive sentiment contrasted with a negative situation. For example, many sarcastic tweets include a positive sentiment, such as \u201clove\u201d or \u201cenjoy\u201d, followed by an expression that describes an undesirable activity or state (e.g., \u201ctaking exams\u201d or \u201cbeing ignored\u201d). We have developed a sarcasm recognizer to identify this type of sarcasm in tweets. We present a novel bootstrapping algorithm that automatically learns lists of positive sentiment phrases and negative situation phrases from sarcastic tweets. We show that identifying contrasting contexts using the phrases learned through bootstrapping yields improved recall for sarcasm recognition.", "keyphrases": ["negative situation", "sentiment phrase", "sarcasm"]} +{"id": "habash-etal-2012-conventional", "title": "Conventional Orthography for Dialectal Arabic", "abstract": "Dialectal Arabic (DA) refers to the day-to-day vernaculars spoken in the Arab world. DA lives side-by-side with the official language, Modern Standard Arabic (MSA). DA differs from MSA on all levels of linguistic representation, from phonology and morphology to lexicon and syntax. Unlike MSA, DA has no standard orthography since there are no Arabic dialect academies, nor is there a large edited body of dialectal literature that follows the same spelling standard. In this paper, we present CODA, a conventional orthography for dialectal Arabic; it is designed primarily for the purpose of developing computational models of Arabic dialects. We explain the design principles of CODA and provide a detailed description of its guidelines as applied to Egyptian Arabic.", "keyphrases": ["dialectal arabic", "egyptian arabic", "conventional orthography"]} +{"id": "see-etal-2017-get", "title": "Get To The Point: Summarization with Pointer-Generator Networks", "abstract": "Neural sequence-to-sequence models have provided a viable new approach for abstractive text summarization (meaning they are not restricted to simply selecting and rearranging passages from the original text). However, these models have two shortcomings: they are liable to reproduce factual details inaccurately, and they tend to repeat themselves. In this work we propose a novel architecture that augments the standard sequence-to-sequence attentional model in two orthogonal ways. First, we use a hybrid pointer-generator network that can copy words from the source text via pointing, which aids accurate reproduction of information, while retaining the ability to produce novel words through the generator. Second, we use coverage to keep track of what has been summarized, which discourages repetition. We apply our model to the CNN / Daily Mail summarization task, outperforming the current abstractive state-of-the-art by at least 2 ROUGE points.", "keyphrases": ["summarization", "pointer-generator network", "sequence-to-sequence model", "copy mechanism", "coverage mechanism"]} +{"id": "chen-etal-2017-reading", "title": "Reading Wikipedia to Answer Open-Domain Questions", "abstract": "This paper proposes to tackle open-domain question answering using Wikipedia as the unique knowledge source: the answer to any factoid question is a text span in a Wikipedia article. This task of machine reading at scale combines the challenges of document retrieval (finding the relevant articles) with that of machine comprehension of text (identifying the answer spans from those articles). Our approach combines a search component based on bigram hashing and TF-IDF matching with a multi-layer recurrent neural network model trained to detect answers in Wikipedia paragraphs. Our experiments on multiple existing QA datasets indicate that (1) both modules are highly competitive with respect to existing counterparts and (2) multitask learning using distant supervision on their combination is an effective complete system on this challenging task.", "keyphrases": ["wikipedia", "open-domain question", "knowledge source", "passage", "drqa"]} +{"id": "hatori-etal-2011-incremental", "title": "Incremental Joint POS Tagging and Dependency Parsing in Chinese", "abstract": "We address the problem of joint part-of-speech (POS) tagging and dependency parsing in Chinese. In Chinese, some POS tags are often hard to disambiguate without considering longrange syntactic information. Also, the traditional pipeline approach to POS tagging and dependency parsing may suffer from the problem of error propagation. In this paper, we propose the first incremental approach to the task of joint POS tagging and dependency parsing, which is built upon a shift-reduce parsing framework with dynamic programming. Although the incremental approach encounters difficulties with underspecified POS tags of look-ahead words, we overcome this issue by introducing so-called delayed features. Our joint approach achieved substantial improvements over the pipeline and baseline systems in both POS tagging and dependency parsing task, achieving the new state-of-the-art performance on this joint task.", "keyphrases": ["pos tagging", "dependency parsing", "transition-based joint model"]} +{"id": "sennrich-etal-2016-neural", "title": "Neural Machine Translation of Rare Words with Subword Units", "abstract": "Neural machine translation (NMT) models typically operate with a fixed vocabulary, but translation is an open-vocabulary problem. Previous work addresses the translation of out-of-vocabulary words by backing off to a dictionary. In this paper, we introduce a simpler and more effective approach, making the NMT model capable of open-vocabulary translation by encoding rare and unknown words as sequences of subword units. This is based on the intuition that various word classes are translatable via smaller units than words, for instance names (via character copying or transliteration), compounds (via compositional translation), and cognates and loanwords (via phonological and morphological transformations). We discuss the suitability of different word segmentation techniques, including simple character n-gram models and a segmentation based on the byte pair encoding compression algorithm, and empirically show that subword models improve over a back-off dictionary baseline for the WMT 15 translation tasks English-German and English-Russian by 1.1 and 1.3 BLEU, respectively.", "keyphrases": ["subword unit", "segmentation", "neural machine translation", "bpe", "back-translation"]} +{"id": "szarvas-etal-2008-bioscope", "title": "The BioScope corpus: annotation for negation, uncertainty and their scope in biomedical texts", "abstract": "This article reports on a corpus annotation project that has produced a freely available resource for research on handling negation and uncertainty in biomedical texts (we call this corpus the BioScope corpus). The corpus consists of three parts, namely medical free texts, biological full papers and biological scientific abstracts. The dataset contains annotations at the token level for negative and speculative keywords and at the sentence level for their linguistic scope. The annotation process was carried out by two independent linguist annotators and a chief annotator -- also responsible for setting up the annotation guidelines -- who resolved cases where the annotators disagreed. We will report our statistics on corpus size, ambiguity levels and the consistency of annotations.", "keyphrases": ["bioscope corpus", "negation", "scope", "token level"]} +{"id": "koehn-etal-2007-moses", "title": "Moses: Open Source Toolkit for Statistical Machine Translation", "abstract": "We describe an open-source toolkit for statistical machine translation whose novel contributions are (a) support for linguistically motivated factors, (b) confusion network decoding, and (c) efficient data formats for translation models and language models. In addition to the SMT decoder, the toolkit also includes a wide variety of tools for training, tuning and applying the system to many translation tasks.", "keyphrases": ["statistical machine translation", "factor", "moses", "smt system", "baseline system"]} +{"id": "sennrich-etal-2016-improving", "title": "Improving Neural Machine Translation Models with Monolingual Data", "abstract": "Neural Machine Translation (NMT) has obtained state-of-the art performance for several language pairs, while only using parallel data for training. Target-side monolingual data plays an important role in boosting fluency for phrase-based statistical machine translation, and we investigate the use of monolingual data for NMT. In contrast to previous work, which combines NMT models with separately trained language models, we note that encoder-decoder NMT architectures already have the capacity to learn the same information as a language model, and we explore strategies to train with monolingual data without changing the neural network architecture. By pairing monolingual training data with an automatic back-translation, we can treat it as additional parallel training data, and we obtain substantial improvements on the WMT 15 task English German (+2.8-3.7 BLEU), and for the low-resourced IWSLT 14 task Turkish->English (+2.1-3.4 BLEU), obtaining new state-of-the-art results. We also show that fine-tuning on in-domain monolingual and parallel data gives substantial improvements for the IWSLT 15 task English->German.", "keyphrases": ["neural machine translation", "monolingual data", "back-translation", "data augmentation", "synthetic parallel corpus"]} +{"id": "banarescu-etal-2013-abstract", "title": "Abstract Meaning Representation for Sembanking", "abstract": "We describe Abstract Meaning Representation (AMR), a semantic representation language in which we are writing down the meanings of thousands of English sentences. We hope that a sembank of simple, whole-sentence semantic structures will spur new work in statistical natural language understanding and generation, like the Penn Treebank encouraged work on statistical parsing. This paper gives an overview of AMR and tools associated with it.", "keyphrases": ["amr", "abstract meaning representation", "acyclic graph", "node", "propbank"]} +{"id": "nadeem-etal-2021-stereoset", "title": "StereoSet: Measuring stereotypical bias in pretrained language models", "abstract": "A stereotype is an over-generalized belief about a particular group of people, e.g., Asians are good at math or African Americans are athletic. Such beliefs (biases) are known to hurt target groups. Since pretrained language models are trained on large real-world data, they are known to capture stereotypical biases. It is important to quantify to what extent these biases are present in them. Although this is a rapidly growing area of research, existing literature lacks in two important aspects: 1) they mainly evaluate bias of pretrained language models on a small set of artificial sentences, even though these models are trained on natural data 2) current evaluations focus on measuring bias without considering the language modeling ability of a model, which could lead to misleading trust on a model even if it is a poor language model. We address both these problems. We present StereoSet, a large-scale natural English dataset to measure stereotypical biases in four domains: gender, profession, race, and religion. We contrast both stereotypical bias and language modeling ability of popular models like BERT, GPT-2, RoBERTa, and XLnet. We show that these models exhibit strong stereotypical biases. Our data and code are available at .", "keyphrases": ["stereotypical bias", "language model", "stereoset"]} +{"id": "li-etal-2011-joint", "title": "Joint Models for Chinese POS Tagging and Dependency Parsing", "abstract": "Part-of-speech (POS) is an indispensable feature in dependency parsing. Current research usually models POS tagging and dependency parsing independently. This may suffer from error propagation problem. Our experiments show that parsing accuracy drops by about 6% when using automatic POS tags instead of gold ones. To solve this issue, this paper proposes a solution by jointly optimizing POS tagging and dependency parsing in a unique model. We design several joint models and their corresponding decoding algorithms to incorporate different feature sets. We further present an effective pruning strategy to reduce the search space of candidate POS tags, leading to significant improvement of parsing speed. Experimental results on Chinese Penn Treebank 5 show that our joint models significantly improve the state-of-the-art parsing accuracy by about 1.5%. Detailed analysis shows that the joint method is able to choose such POS tags that are more helpful and discriminative from parsing viewpoint. This is the fundamental reason of parsing accuracy improvement.", "keyphrases": ["pos tagging", "dependency parsing", "first joint model"]} +{"id": "rajpurkar-etal-2016-squad", "title": "SQuAD: 100,000+ Questions for Machine Comprehension of Text", "abstract": "We present the Stanford Question Answering Dataset (SQuAD), a new reading comprehension dataset consisting of 100,000+ questions posed by crowdworkers on a set of Wikipedia articles, where the answer to each question is a segment of text from the corresponding reading passage. We analyze the dataset to understand the types of reasoning required to answer the questions, leaning heavily on dependency and constituency trees. We build a strong logistic regression model, which achieves an F1 score of 51.0%, a significant improvement over a simple baseline (20%). However, human performance (86.8%) is much higher, indicating that the dataset presents a good challenge problem for future research. \nThe dataset is freely available at this https URL", "keyphrases": ["machine comprehension", "wikipedia article", "squad", "large-scale dataset", "answer span"]} +{"id": "blatz-etal-2004-confidence", "title": "Confidence Estimation for Machine Translation", "abstract": "We present a detailed study of confidence estimation for machine translation. Various methods for determining whether MT output is correct are investigated, for both whole sentences and words. Since the notion of correctness is not intuitively clear in this context, different ways of defining it are proposed. We present results on data from the NIST 2003 Chinese-to-English MT evaluation.", "keyphrases": ["machine translation", "correctness", "confidence estimation", "access", "segment"]} +{"id": "maynard-greenwood-2014-cares", "title": "Who cares about Sarcastic Tweets? Investigating the Impact of Sarcasm on Sentiment Analysis.", "abstract": "Sarcasm is a common phenomenon in social media, and is inherently difficult to analyse, not just automatically but often for humans too. It has an important effect on sentiment, but is usually ignored in social media analysis, because it is considered too tricky to handle. While there exist a few systems which can detect sarcasm, almost no work has been carried out on studying the effect that sarcasm has on sentiment in tweets, and on incorporating this into automatic tools for sentiment analysis. We perform an analysis of the effect of sarcasm scope on the polarity of tweets, and have compiled a number of rules which enable us to improve the accuracy of sentiment analysis when sarcasm is known to be present. We consider in particular the effect of sentiment and sarcasm contained in hashtags, and have developed a hashtag tokeniser for GATE, so that sentiment and sarcasm found within hashtags can be detected more easily. According to our experiments, the hashtag tokenisation achieves 98% Precision, while the sarcasm detection achieved 91% Precision and polarity detection 80%.", "keyphrases": ["sarcasm", "sentiment analysis", "hashtag"]} +{"id": "blitzer-etal-2007-biographies", "title": "Biographies, Bollywood, Boom-boxes and Blenders: Domain Adaptation for Sentiment Classification", "abstract": "Automatic sentiment classification has been extensively studied and applied in recent years. However, sentiment is expressed differently in different domains, and annotating corpora for every possible domain of interest is impractical. We investigate domain adaptation for sentiment classifiers, focusing on online reviews for different types of products. First, we extend to sentiment classification the recently-proposed structural correspondence learning (SCL) algorithm, reducing the relative error due to adaptation between domains by an average of 30% over the original SCL algorithm and 46% over a supervised baseline. Second, we identify a measure of domain similarity that correlates well with the potential for adaptation of a classifier from one domain to another. This measure could for instance be used to select a small set of domains to annotate whose trained classifiers would transfer well to many other domains.", "keyphrases": ["domain adaptation", "sentiment classification", "product", "scl", "pivot feature"]} +{"id": "pennington-etal-2014-glove", "title": "GloVe: Global Vectors for Word Representation", "abstract": "Recent methods for learning vector space representations of words have succeeded in capturing fine-grained semantic and syntactic regularities using vector arithmetic, but the origin of these regularities has remained opaque. We analyze and make explicit the model properties needed for such regularities to emerge in word vectors. The result is a new global logbilinear regression model that combines the advantages of the two major model families in the literature: global matrix factorization and local context window methods. Our model efficiently leverages statistical information by training only on the nonzero elements in a word-word cooccurrence matrix, rather than on the entire sparse matrix or on individual context windows in a large corpus. The model produces a vector space with meaningful substructure, as evidenced by its performance of 75% on a recent word analogy task. It also outperforms related models on similarity tasks and named entity recognition.", "keyphrases": ["word representation", "regularity", "glove", "co-occurrence information", "lsa"]} +{"id": "ratinov-etal-2011-local", "title": "Local and Global Algorithms for Disambiguation to Wikipedia", "abstract": "Disambiguating concepts and entities in a context sensitive way is a fundamental problem in natural language processing. The comprehensiveness of Wikipedia has made the online encyclopedia an increasingly popular target for disambiguation. Disambiguation to Wikipedia is similar to a traditional Word Sense Disambiguation task, but distinct in that the Wikipedia link structure provides additional information about which disambiguations are compatible. In this work we analyze approaches that utilize this information to arrive at coherent sets of disambiguations for a given document (which we call \"global\" approaches), and compare them to more traditional (local) approaches. We show that previous approaches for global disambiguation can be improved, but even then the local disambiguation provides a baseline which is very hard to beat.", "keyphrases": ["disambiguation", "wikipedia", "mention"]} +{"id": "ganitkevitch-etal-2013-ppdb", "title": "PPDB: The Paraphrase Database", "abstract": "We present the 1.0 release of our paraphrase database, PPDB. Its English portion, PPDB:Eng, contains over 220 million paraphrase pairs, consisting of 73 million phrasal and 8 million lexical paraphrases, as well as 140 million paraphrase patterns, which capture many meaning-preserving syntactic transformations. The paraphrases are extracted from bilingual parallel corpora totaling over 100 million sentence pairs and over 2 billion English words. We also release PPDB:Spa, a collection of 196 million Spanish paraphrases. Each paraphrase pair in PPDB contains a set of associated scores, including paraphrase probabilities derived from the bitext data and a variety of monolingual distributional similarity scores computed from the Google n-grams and the Annotated Gigaword corpus. Our release includes pruning tools that allow users to determine their own precision/recall tradeoff.", "keyphrases": ["paraphrase database", "parallel corpora", "ppdb", "pivoting", "coverage"]} +{"id": "wiegand-etal-2010-survey", "title": "A survey on the role of negation in sentiment analysis", "abstract": "This paper presents a survey on the role of negation in sentiment analysis. Negation is a very common linguistic construction that affects polarity and, therefore, needs to be taken into consideration in sentiment analysis. \n \nWe will present various computational approaches modeling negation in sentiment analysis. We will, in particular, focus on aspects, such as level of representation used for sentiment analysis, negation word detection and scope of negation. We will also discuss limits and challenges of negation modeling on that task.", "keyphrases": ["survey", "negation", "sentiment analysis", "biomedical domain"]} +{"id": "riedel-clarke-2006-incremental", "title": "Incremental Integer Linear Programming for Non-projective Dependency Parsing", "abstract": "Integer Linear Programming has recently been used for decoding in a number of probabilistic models in order to enforce global constraints. However, in certain applications, such as non-projective dependency parsing and machine translation, the complete formulation of the decoding problem as an integer linear program renders solving intractable. We present an approach which solves the problem incrementally, thus we avoid creating intractable integer linear programs. This approach is applied to Dutch dependency parsing and we show how the addition of linguistically motivated constraints can yield a significant improvement over state-of-the-art.", "keyphrases": ["integer linear programming", "dependency parsing", "ilp"]} +{"id": "pak-paroubek-2010-twitter", "title": "Twitter as a Corpus for Sentiment Analysis and Opinion Mining", "abstract": "Microblogging today has become a very popular communication tool among Internet users. Millions of users share opinions on different aspects of life everyday. Therefore microblogging web-sites are rich sources of data for opinion mining and sentiment analysis. Because microblogging has appeared relatively recently, there are a few research works that were devoted to this topic. In our paper, we focus on using Twitter, the most popular microblogging platform, for the task of sentiment analysis. We show how to automatically collect a corpus for sentiment analysis and opinion mining purposes. We perform linguistic analysis of the collected corpus and explain discovered phenomena. Using the corpus, we build a sentiment classifier, that is able to determine positive, negative and neutral sentiments for a document. Experimental evaluations show that our proposed techniques are efficient and performs better than previously proposed methods. In our research, we worked with English, however, the proposed technique can be used with any other language.", "keyphrases": ["sentiment analysis", "twitter", "n-gram", "social medium"]} +{"id": "wang-etal-2018-glue", "title": "GLUE: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding", "abstract": "Human ability to understand language is general, flexible, and robust. In contrast, most NLU models above the word level are designed for a specific task and struggle with out-of-domain data. If we aspire to develop models with understanding beyond the detection of superficial correspondences between inputs and outputs, then it is critical to develop a unified model that can execute a range of linguistic tasks across different domains. To facilitate research in this direction, we present the General Language Understanding Evaluation (GLUE, gluebenchmark.com): a benchmark of nine diverse NLU tasks, an auxiliary dataset for probing models for understanding of specific linguistic phenomena, and an online platform for evaluating and comparing models. For some benchmark tasks, training data is plentiful, but for others it is limited or does not match the genre of the test set. GLUE thus favors models that can represent linguistic knowledge in a way that facilitates sample-efficient learning and effective knowledge-transfer across tasks. While none of the datasets in GLUE were created from scratch for the benchmark, four of them feature privately-held test data, which is used to ensure that the benchmark is used fairly. We evaluate baselines that use ELMo (Peters et al., 2018), a powerful transfer learning technique, as well as state-of-the-art sentence representation models. The best models still achieve fairly low absolute scores. Analysis with our diagnostic dataset yields similarly weak performance over all phenomena tested, with some exceptions.", "keyphrases": ["multi-task benchmark", "natural language understanding", "glue", "downstream task", "mrpc"]} +{"id": "luong-etal-2015-effective", "title": "Effective Approaches to Attention-based Neural Machine Translation", "abstract": "An attentional mechanism has lately been used to improve neural machine translation (NMT) by selectively focusing on parts of the source sentence during translation. However, there has been little work exploring useful architectures for attention-based NMT. This paper examines two simple and effective classes of attentional mechanism: a global approach which always attends to all source words and a local one that only looks at a subset of source words at a time. We demonstrate the effectiveness of both approaches on the WMT translation tasks between English and German in both directions. With local attention, we achieve a significant gain of 5.0 BLEU points over non-attentional systems that already incorporate known techniques such as dropout. Our ensemble model using different attention architectures yields a new state-of-the-art result in the WMT\u201915 English to German translation task with 25.9 BLEU points, an improvement of 1.0 BLEU points over the existing best system backed by NMT and an n-gram reranker. 1", "keyphrases": ["neural machine translation", "source sentence", "attention model", "context vector", "encoder-decoder architecture"]} +{"id": "devlin-etal-2019-bert", "title": "BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding", "abstract": "We introduce a new language representation model called BERT, which stands for Bidirectional Encoder Representations from Transformers. Unlike recent language representation models (Peters et al., 2018a; Radford et al., 2018), BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly conditioning on both left and right context in all layers. As a result, the pre-trained BERT model can be fine-tuned with just one additional output layer to create state-of-the-art models for a wide range of tasks, such as question answering and language inference, without substantial task-specific architecture modifications. BERT is conceptually simple and empirically powerful. It obtains new state-of-the-art results on eleven natural language processing tasks, including pushing the GLUE score to 80.5 (7.7 point absolute improvement), MultiNLI accuracy to 86.7% (4.6% absolute improvement), SQuAD v1.1 question answering Test F1 to 93.2 (1.5 point absolute improvement) and SQuAD v2.0 Test F1 to 83.1 (5.1 point absolute improvement).", "keyphrases": ["deep bidirectional transformers", "language understanding", "encoder representation", "pre-trained model", "many nlp task"]} +{"id": "kim-2014-convolutional", "title": "Convolutional Neural Networks for Sentence Classification", "abstract": "We report on a series of experiments with convolutional neural networks (CNN) trained on top of pre-trained word vectors for sentence-level classification tasks. We show that a simple CNN with little hyperparameter tuning and static vectors achieves excellent results on multiple benchmarks. Learning task-specific vectors through fine-tuning offers further gains in performance. We additionally propose a simple modification to the architecture to allow for the use of both task-specific and static vectors. The CNN models discussed herein improve upon the state of the art on 4 out of 7 tasks, which include sentiment analysis and question classification.", "keyphrases": ["sentence classification", "cnn", "convolutional neural networks", "learning model", "filter"]} +{"id": "snover-etal-2006-study", "title": "A Study of Translation Edit Rate with Targeted Human Annotation", "abstract": "We examine a new, intuitive measure for evaluating machine-translation output that avoids the knowledge intensiveness of more meaning-based approaches, and the labor-intensiveness of human judgments. Translation Edit Rate (TER) measures the amount of editing that a human would have to perform to change a system output so it exactly matches a reference translation. We show that the single-reference variant of TER correlates as well with human judgments of MT quality as the four-reference variant of BLEU. We also define a human-targeted TER (or HTER) and show that it yields higher correlations with human judgments than BLEU\u2014even when BLEU is given human-targeted references. Our results indicate that HTER correlates with human judgments better than HMETEOR and that the four-reference variants of TER and HTER correlate with human judgments as well as\u2014or better than\u2014a second human judgment does.", "keyphrases": ["translation edit rate", "human judgment", "hter", "post-editing effort", "deletion"]} +{"id": "agirre-etal-2012-semeval", "title": "SemEval-2012 Task 6: A Pilot on Semantic Textual Similarity", "abstract": "Semantic Textual Similarity (STS) measures the degree of semantic equivalence between two texts. This paper presents the results of the STS pilot task in Semeval. The training data contained 2000 sentence pairs from previously existing paraphrase datasets and machine translation evaluation resources. The test data also comprised 2000 sentences pairs for those datasets, plus two surprise datasets with 400 pairs from a different machine translation evaluation corpus and 750 pairs from a lexical resource mapping exercise. The similarity of pairs of sentences was rated on a 0-5 scale (low to high similarity) by human judges using Amazon Mechanical Turk, with high Pearson correlation scores, around 90%. 35 teams participated in the task, submitting 88 runs. The best results scored a Pearson correlation >80%, well above a simple lexical baseline that only scored a 31% correlation. This pilot task opens an exciting way ahead, although there are still open issues, specially the evaluation metric.", "keyphrases": ["semantic textual similarity", "semeval", "long text segment"]} +{"id": "feng-etal-2020-codebert", "title": "CodeBERT: A Pre-Trained Model for Programming and Natural Languages", "abstract": "We present CodeBERT, a bimodal pre-trained model for programming language (PL) and natural language (NL). CodeBERT learns general-purpose representations that support downstream NL-PL applications such as natural language code search, code documentation generation, etc. We develop CodeBERT with Transformer-based neural architecture, and train it with a hybrid objective function that incorporates the pre-training task of replaced token detection, which is to detect plausible alternatives sampled from generators. This enables us to utilize both \u201cbimodal\u201d data of NL-PL pairs and \u201cunimodal data, where the former provides input tokens for model training while the latter helps to learn better generators. We evaluate CodeBERT on two NL-PL applications by fine-tuning model parameters. Results show that CodeBERT achieves state-of-the-art performance on both natural language code search and code documentation generation. Furthermore, to investigate what type of knowledge is learned in CodeBERT, we construct a dataset for NL-PL probing, and evaluate in a zero-shot setting where parameters of pre-trained models are fixed. Results show that CodeBERT performs better than previous pre-trained models on NLPL probing.", "keyphrases": ["pre-trained model", "programming language", "codebert"]} +{"id": "zeng-etal-2015-distant", "title": "Distant Supervision for Relation Extraction via Piecewise Convolutional Neural Networks", "abstract": "Two problems arise when using distant supervision for relation extraction. First, in this method, an already existing knowledge base is heuristically aligned to texts, and the alignment results are treated as labeled data. However, the heuristic alignment can fail, resulting in wrong label problem. In addition, in previous approaches, statistical models have typically been applied to ad hoc features. The noise that originates from the feature extraction process can cause poor performance. In this paper, we propose a novel model dubbed the Piecewise Convolutional Neural Networks (PCNNs) with multi-instance learning to address these two problems. To solve the first problem, distant supervised relation extraction is treated as a multi-instance problem in which the uncertainty of instance labels is taken into account. To address the latter problem, we avoid feature engineering and instead adopt convolutional architecture with piecewise max pooling to automatically learn relevant features. Experiments show that our method is effective and outperforms several competitive baseline methods.", "keyphrases": ["relation extraction", "convolutional neural network", "multi-instance", "distant supervision", "cnn"]} +{"id": "qian-liu-2012-joint", "title": "Joint Chinese Word Segmentation, POS Tagging and Parsing", "abstract": "In this paper, we propose a novel decoding algorithm for discriminative joint Chinese word segmentation, part-of-speech (POS) tagging, and parsing. Previous work often used a pipeline method -- Chinese word segmentation followed by POS tagging and parsing, which suffers from error propagation and is unable to leverage information in later modules for earlier components. In our approach, we train the three individual models separately during training, and incorporate them together in a unified framework during decoding. We extend the CYK parsing algorithm so that it can deal with word segmentation and POS tagging features. As far as we know, this is the first work on joint Chinese word segmentation, POS tagging and parsing. Our experimental results on Chinese Tree Bank 5 corpus show that our approach outperforms the state-of-the-art pipeline system.", "keyphrases": ["word segmentation", "pos tagging", "joint model"]} +{"id": "koehn-knowles-2017-six", "title": "Six Challenges for Neural Machine Translation", "abstract": "We explore six challenges for neural machine translation: domain mismatch, amount of training data, rare words, long sentences, word alignment, and beam search. We show both deficiencies and improvements over the quality of phrase-based statistical machine translation.", "keyphrases": ["neural machine translation", "domain mismatch", "nmt model", "translation quality", "low-resource language pair"]} +{"id": "ma-etal-2019-stacl", "title": "STACL: Simultaneous Translation with Implicit Anticipation and Controllable Latency using Prefix-to-Prefix Framework", "abstract": "Simultaneous translation, which translates sentences before they are finished, is use- ful in many scenarios but is notoriously dif- ficult due to word-order differences. While the conventional seq-to-seq framework is only suitable for full-sentence translation, we pro- pose a novel prefix-to-prefix framework for si- multaneous translation that implicitly learns to anticipate in a single translation model. Within this framework, we present a very sim- ple yet surprisingly effective \u201cwait-k\u201d policy trained to generate the target sentence concur- rently with the source sentence, but always k words behind. Experiments show our strat- egy achieves low latency and reasonable qual- ity (compared to full-sentence translation) on 4 directions: zh\u2194en and de\u2194en.", "keyphrases": ["simultaneous translation", "prefix-to-prefix framework", "wait-k"]} +{"id": "peters-etal-2018-deep", "title": "Deep Contextualized Word Representations", "abstract": "We introduce a new type of deep contextualized word representation that models both (1) complex characteristics of word use (e.g., syntax and semantics), and (2) how these uses vary across linguistic contexts (i.e., to model polysemy). Our word vectors are learned functions of the internal states of a deep bidirectional language model (biLM), which is pre-trained on a large text corpus. We show that these representations can be easily added to existing models and significantly improve the state of the art across six challenging NLP problems, including question answering, textual entailment and sentiment analysis. We also present an analysis showing that exposing the deep internals of the pre-trained network is crucial, allowing downstream models to mix different types of semi-supervision signals.", "keyphrases": ["language model", "pre-training", "deep", "elmo", "downstream task"]} +{"id": "pasha-etal-2014-madamira", "title": "MADAMIRA: A Fast, Comprehensive Tool for Morphological Analysis and Disambiguation of Arabic", "abstract": "In this paper, we present MADAMIRA, a system for morphological analysis and disambiguation of Arabic that combines some of the best aspects of two previously commonly used systems for Arabic processing, MADA (Habash and Rambow, 2005; Habash et al., 2009; Habash et al., 2013) and AMIRA (Diab et al., 2007). MADAMIRA improves upon the two systems with a more streamlined Java implementation that is more robust, portable, extensible, and is faster than its ancestors by more than an order of magnitude. We also discuss an online demo (see ) that highlights these aspects.", "keyphrases": ["disambiguation", "arabic", "morphological tagger"]} +{"id": "davidov-etal-2010-semi", "title": "Semi-Supervised Recognition of Sarcasm in Twitter and Amazon", "abstract": "Sarcasm is a form of speech act in which the speakers convey their message in an implicit way. The inherently ambiguous nature of sarcasm sometimes makes it hard even for humans to decide whether an utterance is sarcastic or not. Recognition of sarcasm can benefit many sentiment analysis NLP applications, such as review summarization, dialogue systems and review ranking systems. \n \nIn this paper we experiment with semi-supervised sarcasm identification on two very different data sets: a collection of 5.9 million tweets collected from Twitter, and a collection of 66000 product reviews from Amazon. Using the Mechanical Turk we created a gold standard sample in which each sentence was tagged by 3 annotators, obtaining F-scores of 0.78 on the product reviews dataset and 0.83 on the Twitter dataset. We discuss the differences between the datasets and how the algorithm uses them (e.g., for the Amazon dataset the algorithm makes use of structured information). We also discuss the utility of Twitter #sarcasm hashtags for the task.", "keyphrases": ["sarcasm", "twitter", "emoticon", "semi-supervised approach"]} +{"id": "callison-burch-etal-2009-findings", "title": "Findings of the 2009 Workshop on Statistical Machine Translation", "abstract": "This paper presents the results of the WMT09 shared tasks, which included a translation task, a system combination task, and an evaluation task. We conducted a large-scale manual evaluation of 87 machine translation systems and 22 system combination entries. We used the ranking of these systems to measure how strongly automatic metrics correlate with human judgments of translation quality, for more than 20 metrics. We present a new evaluation technique whereby system output is edited and judged for correctness.", "keyphrases": ["workshop", "statistical machine translation", "wmt", "quality estimation", "smt system"]} +{"id": "poliak-etal-2018-hypothesis", "title": "Hypothesis Only Baselines in Natural Language Inference", "abstract": "We propose a hypothesis only baseline for diagnosing Natural Language Inference (NLI). Especially when an NLI dataset assumes inference is occurring based purely on the relationship between a context and a hypothesis, it follows that assessing entailment relations while ignoring the provided context is a degenerate solution. Yet, through experiments on 10 distinct NLI datasets, we find that this approach, which we refer to as a hypothesis-only model, is able to significantly outperform a majority-class baseline across a number of NLI datasets. Our analysis suggests that statistical irregularities may allow a model to perform NLI in some datasets beyond what should be achievable without access to the context.", "keyphrases": ["natural language inference", "nli dataset", "irregularity", "hypothesis", "annotation artifact"]} +{"id": "rosen-etal-2016-mwes", "title": "MWEs in Treebanks: From Survey to Guidelines", "abstract": "By means of an online survey, we have investigated ways in which various types of multiword expressions are annotated in existing treebanks. The results indicate that there is considerable variation in treatments across treebanks and thereby also, to some extent, across languages and across theoretical frameworks. The comparison is focused on the annotation of light verb constructions and verbal idioms. The survey shows that the light verb constructions either get special annotations as such, or are treated as ordinary verbs, while VP idioms are handled through different strategies. Based on insights from our investigation, we propose some general guidelines for annotating multiword expressions in treebanks. The recommendations address the following application-based needs: distinguishing MWEs from similar but compositional constructions; searching distinct types of MWEs in treebanks; awareness of literal and nonliteral meanings; and normalization of the MWE representation. The cross-lingually and cross-theoretically focused survey is intended as an aid to accessing treebanks and an aid for further work on treebank annotation.", "keyphrases": ["treebank", "guideline", "mwes"]} +{"id": "yang-etal-2016-hierarchical", "title": "Hierarchical Attention Networks for Document Classification", "abstract": "We propose a hierarchical attention network for document classification. Our model has two distinctive characteristics: (i) it has a hierarchical structure that mirrors the hierarchical structure of documents; (ii) it has two levels of attention mechanisms applied at the wordand sentence-level, enabling it to attend differentially to more and less important content when constructing the document representation. Experiments conducted on six large scale text classification tasks demonstrate that the proposed architecture outperform previous methods by a substantial margin. Visualization of the attention layers illustrates that the model selects qualitatively informative words and sentences.", "keyphrases": ["document classification", "hierarchical attention networks", "han", "sentence level", "deep learning model"]} +{"id": "zhang-zong-2016-exploiting", "title": "Exploiting Source-side Monolingual Data in Neural Machine Translation", "abstract": "Neural Machine Translation (NMT) based on the encoder-decoder architecture has recently become a new paradigm. Researchers have proven that the target-side monolingual data can greatly enhance the decoder model of NMT. However, the source-side monolingual data is not fully explored although it should be useful to strengthen the encoder model of NMT, especially when the parallel corpus is far from suf\ufb01cient. In this paper, we propose two approaches to make full use of the source-side monolingual data in NMT. The \ufb01rst approach employs the self-learning algorithm to generate the synthetic large-scale parallel data for NMT training. The second approach applies the multi-task learning framework using two NMTs to predict the translation and the reordered source-side monolingual sentences simultaneously. The extensive experiments demonstrate that the proposed methods obtain signi\ufb01cant improvements over the strong attention-based NMT.", "keyphrases": ["source-side monolingual data", "neural machine translation", "multi-task learning framework"]} +{"id": "chiang-2005-hierarchical", "title": "A Hierarchical Phrase-Based Model for Statistical Machine Translation", "abstract": "We present a statistical phrase-based translation model that uses hierarchical phrases---phrases that contain subphrases. The model is formally a synchronous context-free grammar but is learned from a bitext without any syntactic information. Thus it can be seen as a shift to the formal machinery of syntax-based translation systems without any linguistic commitment. In our experiments using BLEU as a metric, the hierarchical phrase-based model achieves a relative improvement of 7.5% over Pharaoh, a state-of-the-art phrase-based system.", "keyphrases": ["phrase-based model", "context-free grammar", "hpb", "smt system", "syntax-based model"]} +{"id": "levy-andrew-2006-tregex", "title": "Tregex and Tsurgeon: tools for querying and manipulating tree data structures", "abstract": "With syntactically annotated corpora becoming increasingly available for a variety of languages and grammatical frameworks, tree query tools have proven invaluable to linguists and computer scientists for both data exploration and corpus-based research. We provide a combined engine for tree query (Tregex) and manipulation (Tsurgeon) that can operate on arbitrary tree data structures with no need for preprocessing. Tregex remedies several expressive and implementational limitations of existing query tools, while Tsurgeon is to our knowledge the most expressive tree manipulation utility available.", "keyphrases": ["tsurgeon", "tree data structure", "tregex"]} +{"id": "cho-etal-2014-learning", "title": "Learning Phrase Representations using RNN Encoder\u2013Decoder for Statistical Machine Translation", "abstract": "In this paper, we propose a novel neural network model called RNN Encoder\u2010 Decoder that consists of two recurrent neural networks (RNN). One RNN encodes a sequence of symbols into a fixedlength vector representation, and the other decodes the representation into another sequence of symbols. The encoder and decoder of the proposed model are jointly trained to maximize the conditional probability of a target sequence given a source sequence. The performance of a statistical machine translation system is empirically found to improve by using the conditional probabilities of phrase pairs computed by the RNN Encoder\u2010Decoder as an additional feature in the existing log-linear model. Qualitatively, we show that the proposed model learns a semantically and syntactically meaningful representation of linguistic phrases.", "keyphrases": ["rnn", "statistical machine translation", "recurrent neural network", "encoder-decoder framework", "sequence-to-sequence"]} +{"id": "banea-etal-2008-multilingual", "title": "Multilingual Subjectivity Analysis Using Machine Translation", "abstract": "Although research in other languages is increasing, much of the work in subjectivity analysis has been applied to English data, mainly due to the large body of electronic resources and tools that are available for this language. In this paper, we propose and evaluate methods that can be employed to transfer a repository of subjectivity resources across languages. Specifically, we attempt to leverage on the resources available for English and, by employing machine translation, generate resources for subjectivity analysis in other languages. Through comparative evaluations on two different languages (Romanian and Spanish), we show that automatic translation is a viable alternative for the construction of resources and tools for subjectivity analysis in a new target language.", "keyphrases": ["subjectivity analysis", "machine translation", "romanian", "sentiment analysis"]} +{"id": "riedel-etal-2013-relation", "title": "Relation Extraction with Matrix Factorization and Universal Schemas", "abstract": "\u00a9 2013 Association for Computational Linguistics. Traditional relation extraction predicts relations within some fixed and finite target schema. Machine learning approaches to this task require either manual annotation or, in the case of distant supervision, existing structured sources of the same schema. The need for existing datasets can be avoided by using a universal schema: the union of all involved schemas (surface form predicates as in OpenIE, and relations in the schemas of preexisting databases). This schema has an almost unlimited set of relations (due to surface forms), and supports integration with existing structured data (through the relation types of existing databases). To populate a database of such schema we present matrix factorization models that learn latent feature vectors for entity tuples and relations. We show that such latent models achieve substantially higher accuracy than a traditional classification approach. More importantly, by operating simultaneously on relations observed in text and in pre-existing structured DBs such as Freebase, we are able to reason about unstructured and structured data in mutually-supporting ways. By doing so our approach outperforms stateof- the-Art distant supervision.", "keyphrases": ["matrix factorization", "universal schemas", "openie", "relation extraction", "knowledge base"]} +{"id": "zheng-etal-2013-deep", "title": "Deep Learning for Chinese Word Segmentation and POS Tagging", "abstract": "This study explores the feasibility of performing Chinese word segmentation (CWS) and POS tagging by deep learning. We try to avoid task-specific feature engineering, and use deep layers of neural networks to discover relevant features to the tasks. We leverage large-scale unlabeled data to improve internal representation of Chinese characters, and use these improved representations to enhance supervised word segmentation and POS tagging models. Our networks achieved close to state-of-theart performance with minimal computational cost. We also describe a perceptron-style algorithm for training the neural networks, as an alternative to maximum-likelihood method, to speed up the training process and make the learning algorithm easier to be implemented.", "keyphrases": ["chinese word segmentation", "pos tagging", "deep learning"]} +{"id": "ide-etal-2008-masc", "title": "MASC: the Manually Annotated Sub-Corpus of American English", "abstract": "To answer the critical need for sharable, reusable annotated resources with rich linguistic annotations, we are developing a Manually Annotated Sub-Corpus (MASC) including texts from diverse genres and manual annotations or manually-validated annotations for multiple levels, including WordNet senses and FrameNet frames and frame elements, both of which have become significant resources in the international computational linguistics community. To derive maximal benefit from the semantic information provided by these resources, the MASC will also include manually-validated shallow parses and named entities, which will enable linking WordNet senses and FrameNet frames within the same sentences into more complex semantic structures and, because named entities will often be the role fillers of FrameNet frames, enrich the semantic and pragmatic information derivable from the sub-corpus. All MASC annotations will be published with detailed inter-annotator agreement measures. The MASC and its annotations will be freely downloadable from the ANC website, thus providing maximum accessibility for researchers from around the globe.", "keyphrases": ["manually annotated sub-corpus", "masc", "project"]} +{"id": "abu-farha-etal-2021-overview", "title": "Overview of the WANLP 2021 Shared Task on Sarcasm and Sentiment Detection in Arabic", "abstract": "This paper provides an overview of the WANLP 2021 shared task on sarcasm and sentiment detection in Arabic. The shared task has two subtasks: sarcasm detection (subtask 1) and sentiment analysis (subtask 2). This shared task aims to promote and bring attention to Arabic sarcasm detection, which is crucial to improve the performance in other tasks such as sentiment analysis. The dataset used in this shared task, namely ArSarcasm-v2, consists of 15,548 tweets labelled for sarcasm, sentiment and dialect. We received 27 and 22 submissions for subtasks 1 and 2 respectively. Most of the approaches relied on using and fine-tuning pre-trained language models such as AraBERT and MARBERT. The top achieved results for the sarcasm detection and sentiment analysis tasks were 0.6225 F1-score and 0.748 F1-PN respectively.", "keyphrases": ["sarcasm", "sentiment detection", "arabic"]} +{"id": "nenkova-passonneau-2004-evaluating", "title": "Evaluating Content Selection in Summarization: The Pyramid Method", "abstract": "We present an empirically grounded method for evaluating content selection in summarization. It incorporates the idea that no single best model summary for a collection of documents exists. Our method quanti\ufb01es the relative importance of facts to be conveyed. We argue that it is reliable, predictive and diagnostic, thus improves considerably over the shortcomings of the human evaluation method currently used in the Document Understanding Conference.", "keyphrases": ["content selection", "summarization", "pyramid method", "evaluation method"]} +{"id": "chiang-2007-hierarchical", "title": "Hierarchical Phrase-Based Translation", "abstract": "We present a statistical machine translation model that uses hierarchical phrasesphrases that contain subphrases. The model is formally a synchronous context-free grammar but is learned from a parallel text without any syntactic annotations. Thus it can be seen as combining fundamental ideas from both syntax-based translation and phrase-based translation. We describe our system's training and decoding methods in detail, and evaluate it for translation speed and translation accuracy. Using BLEU as a metric of translation accuracy, we find that our system performs significantly better than the Alignment Template System, a state-of-the-art phrase-based system.", "keyphrases": ["phrase-based translation", "context-free grammar", "cube pruning", "scfg", "smt system"]} +{"id": "clark-etal-2019-bert", "title": "What Does BERT Look at? An Analysis of BERT's Attention", "abstract": "Large pre-trained neural networks such as BERT have had great recent success in NLP, motivating a growing body of research investigating what aspects of language they are able to learn from unlabeled data. Most recent analysis has focused on model outputs (e.g., language model surprisal) or internal vector representations (e.g., probing classifiers). Complementary to these works, we propose methods for analyzing the attention mechanisms of pre-trained models and apply them to BERT. BERT's attention heads exhibit patterns such as attending to delimiter tokens, specific positional offsets, or broadly attending over the whole sentence, with heads in the same layer often exhibiting similar behaviors. We further show that certain attention heads correspond well to linguistic notions of syntax and coreference. For example, we find heads that attend to the direct objects of verbs, determiners of nouns, objects of prepositions, and coreferent mentions with remarkably high accuracy. Lastly, we propose an attention-based probing classifier and use it to further demonstrate that substantial syntactic information is captured in BERT's attention.", "keyphrases": ["bert", "attention head", "behavior", "coreference", "direct object"]} +{"id": "rajpurkar-etal-2018-know", "title": "Know What You Don't Know: Unanswerable Questions for SQuAD", "abstract": "Extractive reading comprehension systems can often locate the correct answer to a question in a context document, but they also tend to make unreliable guesses on questions for which the correct answer is not stated in the context. Existing datasets either focus exclusively on answerable questions, or use automatically generated unanswerable questions that are easy to identify. To address these weaknesses, we present SQuADRUn, a new dataset that combines the existing Stanford Question Answering Dataset (SQuAD) with over 50,000 unanswerable questions written adversarially by crowdworkers to look similar to answerable ones. To do well on SQuADRUn, systems must not only answer questions when possible, but also determine when no answer is supported by the paragraph and abstain from answering. SQuADRUn is a challenging natural language understanding task for existing models: a strong neural system that gets 86% F1 on SQuAD achieves only 66% F1 on SQuADRUn. We release SQuADRUn to the community as the successor to SQuAD.", "keyphrases": ["unanswerable question", "squad", "paragraph", "crowdsourcing", "human performance"]} +{"id": "nakov-etal-2013-semeval", "title": "SemEval-2013 Task 2: Sentiment Analysis in Twitter", "abstract": "In recent years, sentiment analysis in social media has attracted a lot of research interest and has been used for a number of applications. Unfortunately, research has been hindered by the lack of suitable datasets, complicating the comparison between approaches. To address this issue, we have proposed SemEval-2013 Task 2: Sentiment Analysis in Twitter, which included two subtasks: A, an expression-level subtask, and B, a messagelevel subtask. We used crowdsourcing on Amazon Mechanical Turk to label a large Twitter training dataset along with additional test sets of Twitter and SMS messages for both subtasks. All datasets used in the evaluation are released to the research community. The task attracted significant interest and a total of 149 submissions from 44 teams. The bestperforming team achieved an F1 of 88.9% and 69% for subtasks A and B, respectively.", "keyphrases": ["sentiment analysis", "twitter", "semeval-2013 task", "semantic evaluation", "arabic tweet"]} +{"id": "artetxe-etal-2016-learning", "title": "Learning principled bilingual mappings of word embeddings while preserving monolingual invariance", "abstract": "Mapping word embeddings of different languages into a single space has multiple applications. In order to map from a source space into a target space, a common approach is to learn a linear mapping that minimizes the distances between equivalences listed in a bilingual dictionary. In this paper, we propose a framework that generalizes previous work, provides an ef\ufb01cient exact method to learn the optimal linear transformation and yields the best bilingual results in translation induction while preserving monolingual performance in an analogy task.", "keyphrases": ["principled bilingual mapping", "mapping", "word embedding"]} +{"id": "chklovski-pantel-2004-verbocean", "title": "VerbOcean: Mining the Web for Fine-Grained Semantic Verb Relations", "abstract": "Broad-coverage repositories of semantic relations between verbs could benefit many NLP tasks. We present a semi-automatic method for extracting fine-grained semantic relations between verbs. We detect similarity, strength, antonymy, enablement, and temporal happens-before relations between pairs of strongly associated verbs using lexicosyntactic patterns over the Web. On a set of 29,165 strongly associated verb pairs, our extraction algorithm yielded 65.5% accuracy. Analysis of error types shows that on the relation strength we achieved 75% accuracy. We provide the resource, called VERBOCEAN, for download at http://semantics.isi.edu/ocean/.", "keyphrases": ["web", "fine-grained semantic relation", "antonymy", "verbocean", "previous approach"]} +{"id": "nivre-etal-2020-universal", "title": "Universal Dependencies v2: An Evergrowing Multilingual Treebank Collection", "abstract": "Universal Dependencies is an open community effort to create cross-linguistically consistent treebank annotation for many languages within a dependency-based lexicalist framework. The annotation consists in a linguistically motivated word segmentation; a morphological layer comprising lemmas, universal part-of-speech tags, and standardized morphological features; and a syntactic layer focusing on syntactic relations between predicates, arguments and modifiers. In this paper, we describe version 2 of the universal guidelines (UD v2), discuss the major changes from UD v1 to UD v2, and give an overview of the currently available treebanks for 90 languages.", "keyphrases": ["treebank", "guideline", "universal dependencies", "project", "pos tag"]} +{"id": "zhao-etal-2018-gender", "title": "Gender Bias in Coreference Resolution: Evaluation and Debiasing Methods", "abstract": "In this paper, we introduce a new benchmark for co-reference resolution focused on gender bias, WinoBias. Our corpus contains Winograd-schema style sentences with entities corresponding to people referred by their occupation (e.g. the nurse, the doctor, the carpenter). We demonstrate that a rule-based, a feature-rich, and a neural coreference system all link gendered pronouns to pro-stereotypical entities with higher accuracy than anti-stereotypical entities, by an average difference of 21.1 in F1 score. Finally, we demonstrate a data-augmentation approach that, in combination with existing word-embedding debiasing techniques, removes the bias demonstrated by these systems in WinoBias without significantly affecting their performance on existing datasets.", "keyphrases": ["coreference resolution", "winobias", "pronoun", "gender bias", "stereotype"]} +{"id": "jiang-zhai-2007-instance", "title": "Instance Weighting for Domain Adaptation in NLP", "abstract": "Domain adaptation is an important problem in natural language processing (NLP) due to the lack of labeled data in novel domains. In this paper, we study the domain adaptation problem from the instance weighting perspective. We formally analyze and characterize the domain adaptation problem from a distributional view, and show that there are two distinct needs for adaptation, corresponding to the different distributions of instances and classification functions in the source and the target domains. We then propose a general instance weighting framework for domain adaptation. Our empirical results on three NLP tasks show that incorporating and exploiting more information from the target domain through instance weighting is effective.", "keyphrases": ["weight", "domain adaptation", "training instance"]} +{"id": "koo-etal-2010-dual", "title": "Dual Decomposition for Parsing with Non-Projective Head Automata", "abstract": "This paper introduces algorithms for non-projective parsing based on dual decomposition. We focus on parsing algorithms for non-projective head automata, a generalization of head-automata models to non-projective structures. The dual decomposition algorithms are simple and efficient, relying on standard dynamic programming and minimum spanning tree algorithms. They provably solve an LP relaxation of the non-projective parsing problem. Empirically the LP relaxation is very often tight: for many languages, exact solutions are achieved on over 98% of test sentences. The accuracy of our models is higher than previous work on a broad range of datasets.", "keyphrases": ["non-projective head automata", "dual decomposition", "dependency parser"]} +{"id": "strubell-etal-2018-linguistically", "title": "Linguistically-Informed Self-Attention for Semantic Role Labeling", "abstract": "Current state-of-the-art semantic role labeling (SRL) uses a deep neural network with no explicit linguistic features. However, prior work has shown that gold syntax trees can dramatically improve SRL decoding, suggesting the possibility of increased accuracy from explicit modeling of syntax. In this work, we present linguistically-informed self-attention (LISA): a neural network model that combines multi-head self-attention with multi-task learning across dependency parsing, part-of-speech tagging, predicate detection and SRL. Unlike previous models which require significant pre-processing to prepare linguistic features, LISA can incorporate syntax using merely raw tokens as input, encoding the sequence only once to simultaneously perform parsing, predicate detection and role labeling for all predicates. Syntax is incorporated by training one attention head to attend to syntactic parents for each token. Moreover, if a high-quality syntactic parse is already available, it can be beneficially injected at test time without re-training our SRL model. In experiments on CoNLL-2005 SRL, LISA achieves new state-of-the-art performance for a model using predicted predicates and standard word embeddings, attaining 2.5 F1 absolute higher than the previous state-of-the-art on newswire and more than 3.5 F1 on out-of-domain data, nearly 10% reduction in error. On ConLL-2012 English SRL we also show an improvement of more than 2.5 F1. LISA also out-performs the state-of-the-art with contextually-encoded (ELMo) word representations, by nearly 1.0 F1 on news and more than 2.0 F1 on out-of-domain text.", "keyphrases": ["self-attention", "semantic role labeling", "head", "auxiliary task"]} +{"id": "wilson-etal-2005-recognizing", "title": "Recognizing Contextual Polarity in Phrase-Level Sentiment Analysis", "abstract": "This paper presents a new approach to phrase-level sentiment analysis that first determines whether an expression is neutral or polar and then disambiguates the polarity of the polar expressions. With this approach, the system is able to automatically identify the contextual polarity for a large subset of sentiment expressions, achieving results that are significantly better than baseline.", "keyphrases": ["contextual polarity", "sentiment analysis", "phrase level", "news article", "negator"]} +{"id": "wei-zou-2019-eda", "title": "EDA: Easy Data Augmentation Techniques for Boosting Performance on Text Classification Tasks", "abstract": "We present EDA: easy data augmentation techniques for boosting performance on text classification tasks. EDA consists of four simple but powerful operations: synonym replacement, random insertion, random swap, and random deletion. On five text classification tasks, we show that EDA improves performance for both convolutional and recurrent neural networks. EDA demonstrates particularly strong results for smaller datasets; on average, across five datasets, training with EDA while using only 50% of the available training set achieved the same accuracy as normal training with all available data. We also performed extensive ablation studies and suggest parameters for practical use.", "keyphrases": ["data augmentation", "text classification task", "synonyms", "random insertion", "eda"]} +{"id": "eskander-rambow-2015-slsa", "title": "SLSA: A Sentiment Lexicon for Standard Arabic", "abstract": "Sentiment analysis has been a major area of interest, for which the existence of highquality resources is crucial. In Arabic, there is a reasonable number of sentiment lexicons but with major deficiencies. The paper presents a large-scale Standard Arabic Sentiment Lexicon (SLSA) that is publicly available for free and avoids the deficiencies in the current resources. SLSA has the highest up-to-date reported coverage. The construction of SLSA is based on linking the lexicon of AraMorph with SentiWordNet along with a few heuristics and powerful back-off. SLSA shows a relative improvement of 37.8% over a state-of-theart lexicon when tested for accuracy. It also outperforms it by an absolute 3.5% of F1-score when tested for sentiment analysis.", "keyphrases": ["sentiment lexicon", "standard arabic", "slsa"]} +{"id": "mihalcea-tarau-2004-textrank", "title": "TextRank: Bringing Order into Text", "abstract": "In this paper, the authors introduce TextRank, a graph-based ranking model for text processing, and show how this model can be successfully used in natural language applications.", "keyphrases": ["textrank", "node", "graph-based method", "unsupervised approach", "keyphrase extraction method"]} +{"id": "edunov-etal-2018-understanding", "title": "Understanding Back-Translation at Scale", "abstract": "An effective method to improve neural machine translation with monolingual data is to augment the parallel training corpus with back-translations of target language sentences. This work broadens the understanding of back-translation and investigates a number of methods to generate synthetic source sentences. We find that in all but resource poor settings back-translations obtained via sampling or noised beam outputs are most effective. Our analysis shows that sampling or noisy synthetic data gives a much stronger training signal than data generated by beam or greedy search. We also compare how synthetic data compares to genuine bitext and study various domain effects. Finally, we scale to hundreds of millions of monolingual sentences and achieve a new state of the art of 35 BLEU on the WMT'14 English-German test set.", "keyphrases": ["back-translation", "scale", "neural machine translation", "monolingual data", "beam search"]} +{"id": "garrette-baldridge-2013-learning", "title": "Learning a Part-of-Speech Tagger from Two Hours of Annotation", "abstract": "Most work on weakly-supervised learning for part-of-speech taggers has been based on unrealistic assumptions about the amount and quality of training data. For this paper, we attempt to create true low-resource scenarios by allowing a linguist just two hours to annotate data and evaluating on the languages Kinyarwanda and Malagasy. Given these severely limited amounts of either type supervision (tag dictionaries) or token supervision (labeled sentences), we are able to dramatically improve the learning of a hidden Markov model through our method of automatically generalizing the annotations, reducing noise, and inducing word-tag frequency information.", "keyphrases": ["part-of-speech tagger", "hour", "annotated corpora"]} +{"id": "blitzer-etal-2006-domain", "title": "Domain Adaptation with Structural Correspondence Learning", "abstract": "Discriminative learning methods are widely used in natural language processing. These methods work best when their training and test data are drawn from the same distribution. For many NLP tasks, however, we are confronted with new domains in which labeled data is scarce or non-existent. In such cases, we seek to adapt existing models from a resource-rich source domain to a resource-poor target domain. We introduce structural correspondence learning to automatically induce correspondences among features from different domains. We test our technique on part of speech tagging and show performance gains for varying amounts of source and target training data, as well as improvements in target domain parsing accuracy using our improved tagger.", "keyphrases": ["tagger", "domain adaptation", "scl", "unlabeled data", "pivot feature"]} +{"id": "clarke-etal-2010-driving", "title": "Driving Semantic Parsing from the World's Response", "abstract": "Current approaches to semantic parsing, the task of converting text to a formal meaning representation, rely on annotated training data mapping sentences to logical forms. Providing this supervision is a major bottleneck in scaling semantic parsers. This paper presents a new learning paradigm aimed at alleviating the supervision burden. We develop two novel learning algorithms capable of predicting complex structures which only rely on a binary feedback signal based on the context of an external world. In addition we reformulate the semantic parsing problem to reduce the dependency of the model on syntactic patterns, thus allowing our parser to scale better using less supervision. Our results surprisingly show that without using any annotated meaning representations learning with a weak feedback signal is capable of producing a parser that is competitive with fully supervised parsers.", "keyphrases": ["semantic parsing", "world", "full logical form"]} +{"id": "resnik-smith-2003-web", "title": "The Web as a Parallel Corpus", "abstract": "Parallel corpora have become an essential resource for work in multilingual natural language processing. In this article, we report on our work using the STRAND system for mining parallel text on the World Wide Web, first reviewing the original algorithm and results and then presenting a set of significant enhancements. These enhancements include the use of supervised learning based on structural features of documents to improve classification performance, a new content-based measure of translational equivalence, and adaptation of the system to take advantage of the Internet Archive for mining parallel text from the Web on a large scale. Finally, the value of these techniques is demonstrated in the construction of a significant parallel corpus for a low-density language pair.", "keyphrases": ["web", "parallel corpus", "low-density language pair", "document structure", "large number"]} +{"id": "berant-etal-2013-semantic", "title": "Semantic Parsing on Freebase from Question-Answer Pairs", "abstract": "In this paper, we train a semantic parser that scales up to Freebase. Instead of relying on annotated logical forms, which is especially expensive to obtain at large scale, we learn from question-answer pairs. The main challenge in this setting is narrowing down the huge number of possible logical predicates for a given question. We tackle this problem in two ways: First, we build a coarse mapping from phrases to predicates using a knowledge base and a large text corpus. Second, we use a bridging operation to generate additional predicates based on neighboring predicates. On the dataset of Cai and Yates (2013), despite not having annotated logical forms, our system outperforms their state-of-the-art parser. Additionally, we collected a more realistic and challenging dataset of question-answer pairs and improves over a natural baseline.", "keyphrases": ["freebase", "question-answer pair", "knowledge base", "semantic parsing", "webquestions dataset"]} +{"id": "dos-santos-gatti-2014-deep", "title": "Deep Convolutional Neural Networks for Sentiment Analysis of Short Texts", "abstract": "Sentiment analysis of short texts such as single sentences and Twitter messages is challenging because of the limited contextual information that they normally contain. Effectively solving this task requires strategies that combine the small text content with prior knowledge and use more than just bag-of-words. In this work we propose a new deep convolutional neural network that exploits from characterto sentence-level information to perform sentiment analysis of short texts. We apply our approach for two corpora of two different domains: the Stanford Sentiment Treebank (SSTb), which contains sentences from movie reviews; and the Stanford Twitter Sentiment corpus (STS), which contains Twitter messages. For the SSTb corpus, our approach achieves state-of-the-art results for single sentence sentiment prediction in both binary positive/negative classification, with 85.7% accuracy, and fine-grained classification, with 48.3% accuracy. For the STS corpus, our approach achieves a sentiment prediction accuracy of 86.4%.", "keyphrases": ["convolutional neural network", "sentiment analysis", "short text"]} +{"id": "hong-etal-2011-using", "title": "Using Cross-Entity Inference to Improve Event Extraction", "abstract": "Event extraction is the task of detecting certain specified types of events that are mentioned in the source language data. The state-of-the-art research on the task is transductive inference (e.g. cross-event inference). In this paper, we propose a new method of event extraction by well using cross-entity inference. In contrast to previous inference methods, we regard entity-type consistency as key feature to predict event mentions. We adopt this inference method to improve the traditional sentence-level event extraction system. Experiments show that we can get 8.6% gain in trigger (event) identification, and more than 11.8% gain for argument (role) classification in ACE event extraction.", "keyphrases": ["cross-entity inference", "event extraction", "syntactic feature"]} +{"id": "chan-ng-2007-domain", "title": "Domain Adaptation with Active Learning for Word Sense Disambiguation", "abstract": "When a word sense disambiguation (WSD) system is trained on one domain but applied to a different domain, a drop in accuracy is frequently observed. This highlights the importance of domain adaptation for word sense disambiguation. In this paper, we first show that an active learning approach can be successfully used to perform domain adaptation of WSD systems. Then, by using the predominant sense predicted by expectation-maximization (EM) and adopting a count-merging technique, we improve the effectiveness of the original adaptation process achieved by the basic active learning approach.", "keyphrases": ["active learning", "word sense disambiguation", "domain adaptation"]} +{"id": "turian-etal-2010-word", "title": "Word Representations: A Simple and General Method for Semi-Supervised Learning", "abstract": "If we take an existing supervised NLP system, a simple and general way to improve accuracy is to use unsupervised word representations as extra word features. We evaluate Brown clusters, Collobert and Weston (2008) embeddings, and HLBL (Mnih & Hinton, 2009) embeddings of words on both NER and chunking. We use near state-of-the-art supervised baselines, and find that each of the three word representations improves the accuracy of these baselines. We find further improvements by combining different word representations. You can download our word features, for off-the-shelf use in existing NLP systems, as well as our code, here: http://metaoptimize.com/projects/wordreprs/", "keyphrases": ["semi-supervised learning", "brown cluster", "word representation", "inter alia", "pos tagging"]} +{"id": "baroni-zamparelli-2010-nouns", "title": "Nouns are Vectors, Adjectives are Matrices: Representing Adjective-Noun Constructions in Semantic Space", "abstract": "We propose an approach to adjective-noun composition (AN) for corpus-based distributional semantics that, building on insights from theoretical linguistics, represents nouns as vectors and adjectives as data-induced (linear) functions (encoded as matrices) over nominal vectors. Our model significantly outperforms the rivals on the task of reconstructing AN vectors not seen in training. A small post-hoc analysis further suggests that, when the model-generated AN vector is not similar to the corpus-observed AN vector, this is due to anomalies in the latter. We show moreover that our approach provides two novel ways to represent adjective meanings, alternative to its representation via corpus-based co-occurrence vectors, both outperforming the latter in an adjective clustering task.", "keyphrases": ["semantic space", "noun", "matrix", "compositionality"]} +{"id": "barzilay-lee-2003-learning", "title": "Learning to Paraphrase: An Unsupervised Approach Using Multiple-Sequence Alignment", "abstract": "We address the text-to-text generation problem of sentence-level paraphrasing --- a phenomenon distinct from and more difficult than word- or phrase-level paraphrasing. Our approach applies multiple-sequence alignment to sentences gathered from unannotated comparable corpora: it learns a set of paraphrasing patterns represented by word lattice pairs and automatically determines how to apply these patterns to rewrite new sentences. The results of our evaluation experiments show that the system derives accurate paraphrases, outperforming baseline systems.", "keyphrases": ["paraphrase", "multiple-sequence alignment", "same event", "news article", "barzilay"]} +{"id": "koehn-2005-europarl", "title": "Europarl: A Parallel Corpus for Statistical Machine Translation", "abstract": "We collected a corpus of parallel text in 11 languages from the proceedings of the European Parliament, which are published on the web. This corpus has found widespread use in the NLP community. Here, we focus on its acquisition and its application as training data for statistical machine translation (SMT). We trained SMT systems for 110 language pairs, which reveal interesting clues into the challenges ahead.", "keyphrases": ["statistical machine translation", "europarl", "sentence pair", "bleu score", "test set"]} +{"id": "callison-burch-etal-2006-improved", "title": "Improved Statistical Machine Translation Using Paraphrases", "abstract": "Parallel corpora are crucial for training SMT systems. However, for many language pairs they are available only in very limited quantities. For these language pairs a huge portion of phrases encountered at run-time will be unknown. We show how techniques from paraphrasing can be used to deal with these otherwise unknown source language phrases. Our results show that augmenting a state-of-the-art SMT system with paraphrases leads to significantly improved coverage and translation quality. For a training corpus with 10,000 sentence pairs we increase the coverage of unique test set unigrams from 48% to 90%, with more than half of the newly covered items accurately translated, as opposed to none in current approaches.", "keyphrases": ["machine translation", "paraphrase", "pivot language"]} +{"id": "howard-ruder-2018-universal", "title": "Universal Language Model Fine-tuning for Text Classification", "abstract": "Inductive transfer learning has greatly impacted computer vision, but existing approaches in NLP still require task-specific modifications and training from scratch. We propose Universal Language Model Fine-tuning (ULMFiT), an effective transfer learning method that can be applied to any task in NLP, and introduce techniques that are key for fine-tuning a language model. Our method significantly outperforms the state-of-the-art on six text classification tasks, reducing the error by 18-24% on the majority of datasets. Furthermore, with only 100 labeled examples, it matches the performance of training from scratch on 100 times more data. We open-source our pretrained models and code.", "keyphrases": ["text classification", "transfer learning", "ulmfit", "downstream task", "pre-training"]} +{"id": "och-2003-minimum", "title": "Minimum Error Rate Training in Statistical Machine Translation", "abstract": "Often, the training procedure for statistical machine translation models is based on maximum likelihood or related criteria. A general problem of this approach is that there is only a loose relation to the final translation quality on unseen text. In this paper, we analyze various training criteria which directly optimize translation quality. These training criteria make use of recently proposed automatic evaluation metrics. We describe a new algorithm for efficient training an unsmoothed error count. We show that significantly better results can often be obtained if the final evaluation criterion is taken directly into account as part of the training procedure.", "keyphrases": ["error rate training", "statistical machine translation", "evaluation metric", "mert", "weight"]} +{"id": "surdeanu-etal-2012-multi", "title": "Multi-instance Multi-label Learning for Relation Extraction", "abstract": "Distant supervision for relation extraction (RE) -- gathering training data by aligning a database of facts with text -- is an efficient approach to scale RE to thousands of different relations. However, this introduces a challenging learning scenario where the relation expressed by a pair of entities found in a sentence is unknown. For example, a sentence containing Balzac and France may express BornIn or Died, an unknown relation, or no relation at all. Because of this, traditional supervised learning, which assumes that each example is explicitly mapped to a label, is not appropriate. We propose a novel approach to multi-instance multi-label learning for RE, which jointly models all the instances of a pair of entities in text and all their labels using a graphical model with latent variables. Our model performs competitively on two difficult domains.", "keyphrases": ["relation extraction", "distant supervision", "multi-instance multi-label learning", "knowledge base", "learning problem"]} +{"id": "jiao-etal-2020-tinybert", "title": "TinyBERT: Distilling BERT for Natural Language Understanding", "abstract": "Language model pre-training, such as BERT, has significantly improved the performances of many natural language processing tasks. However, pre-trained language models are usually computationally expensive, so it is difficult to efficiently execute them on resource-restricted devices. To accelerate inference and reduce model size while maintaining accuracy, we first propose a novel Transformer distillation method that is specially designed for knowledge distillation (KD) of the Transformer-based models. By leveraging this new KD method, the plenty of knowledge encoded in a large \u201cteacher\u201d BERT can be effectively transferred to a small \u201cstudent\u201d TinyBERT. Then, we introduce a new two-stage learning framework for TinyBERT, which performs Transformer distillation at both the pre-training and task-specific learning stages. This framework ensures that TinyBERT can capture the general-domain as well as the task-specific knowledge in BERT. TinyBERT4 with 4 layers is empirically effective and achieves more than 96.8% the performance of its teacher BERT-Base on GLUE benchmark, while being 7.5x smaller and 9.4x faster on inference. TinyBERT4 is also significantly better than 4-layer state-of-the-art baselines on BERT distillation, with only ~28% parameters and ~31% inference time of them. Moreover, TinyBERT6 with 6 layers performs on-par with its teacher BERT-Base.", "keyphrases": ["natural language understanding", "knowledge distillation", "teacher", "tinybert", "data augmentation"]} +{"id": "francopoulo-etal-2006-lexical", "title": "Lexical Markup Framework (LMF)", "abstract": "Optimizing the production, maintenance and extension of lexical resources is one the crucial aspects impacting Natural Language Processing (NLP). A second aspect involves optimizing the process leading to their integration in applications. With this respect, we believe that the production of a consensual specification on lexicons can be a useful aid for the various NLP actors. Within ISO, the purpose of LMF is to define a standard for lexicons. LMF is a model that provides a common standardized framework for the construction of NLP lexicons. The goals of LMF are to provide a common model for the creation and use of lexical resources, to manage the exchange of data between and among these resources, and to enable the merging of large number of individual electronic resources to form extensive global electronic resources. In this paper, we describe the work in progress within the sub-group ISO-TC37/SC4/WG4. Various experts from a lot of countries have been consulted in order to take into account best practices in a lot of languages for (we hope) all kinds of NLP lexicons.", "keyphrases": ["lmf", "standardized framework", "lexical markup framework"]} +{"id": "denis-baldridge-2007-joint", "title": "Joint Determination of Anaphoricity and Coreference Resolution using Integer Programming", "abstract": "Standard pairwise coreference resolution systems are subject to errors resulting from their performing anaphora identification as an implicit part of coreference resolution. In this paper, we propose an integer linear programming (ILP) formulation for coreference resolution which models anaphoricity and coreference as a joint task, such that each local model informs the other for the final assignments. This joint ILP formulation provides f score improvements of 3.7-5.3% over a base coreference classifier on the ACE datasets.", "keyphrases": ["anaphoricity", "coreference resolution", "integer programming"]} +{"id": "hendrickx-etal-2009-semeval", "title": "SemEval-2010 Task 8: Multi-Way Classification of Semantic Relations Between Pairs of Nominals", "abstract": "We present a brief overview of the main challenges in the extraction of semantic relations from English text, and discuss the shortcomings of previous data sets and shared tasks. This leads us to introduce a new task, which will be part of SemEval-2010: multi-way classification of mutually exclusive semantic relations between pairs of common nominals. The task is designed to compare different approaches to the problem and to provide a standard testbed for future research, which can benefit many applications in Natural Language Processing.", "keyphrases": ["multi-way classification", "semantic relations", "nominal"]} +{"id": "tan-bansal-2019-lxmert", "title": "LXMERT: Learning Cross-Modality Encoder Representations from Transformers", "abstract": "Vision-and-language reasoning requires an understanding of visual concepts, language semantics, and, most importantly, the alignment and relationships between these two modalities. We thus propose the LXMERT (Learning Cross-Modality Encoder Representations from Transformers) framework to learn these vision-and-language connections. In LXMERT, we build a large-scale Transformer model that consists of three encoders: an object relationship encoder, a language encoder, and a cross-modality encoder. Next, to endow our model with the capability of connecting vision and language semantics, we pre-train the model with large amounts of image-and-sentence pairs, via five diverse representative pre-training tasks: masked language modeling, masked object prediction (feature regression and label classification), cross-modality matching, and image question answering. These tasks help in learning both intra-modality and cross-modality relationships. After fine-tuning from our pre-trained parameters, our model achieves the state-of-the-art results on two visual question answering datasets (i.e., VQA and GQA). We also show the generalizability of our pre-trained cross-modality model by adapting it to a challenging visual-reasoning task, NLVR2, and improve the previous best result by 22% absolute (54% to 76%). Lastly, we demonstrate detailed ablation studies to prove that both our novel model components and pre-training strategies significantly contribute to our strong results. Code and pre-trained models publicly available at: ", "keyphrases": ["cross-modality encoder representations", "transformer", "vision", "visual question answering", "lxmert"]} +{"id": "kwiatkowksi-etal-2010-inducing", "title": "Inducing Probabilistic CCG Grammars from Logical Form with Higher-Order Unification", "abstract": "This paper addresses the problem of learning to map sentences to logical form, given training data consisting of natural language sentences paired with logical representations of their meaning. Previous approaches have been designed for particular natural languages or specific meaning representations; here we present a more general method. The approach induces a probabilistic CCG grammar that represents the meaning of individual words and defines how these meanings can be combined to analyze complete sentences. We use higher-order unification to define a hypothesis space containing all grammars consistent with the training data, and develop an online learning algorithm that efficiently searches this space while simultaneously estimating the parameters of a log-linear parsing model. Experiments demonstrate high accuracy on benchmark data sets in four languages with two different meaning representations.", "keyphrases": ["ccg", "logical form", "natural language sentence"]} +{"id": "mayhew-etal-2020-simultaneous", "title": "Simultaneous Translation and Paraphrase for Language Education", "abstract": "We present the task of Simultaneous Translation and Paraphrasing for Language Education (STAPLE). Given a prompt in one language, the goal is to generate a diverse set of correct translations that language learners are likely to produce. This is motivated by the need to create and maintain large, high-quality sets of acceptable translations for exercises in a language-learning application, and synthesizes work spanning machine translation, MT evaluation, automatic paraphrasing, and language education technology. We developed a novel corpus with unique properties for five languages (Hungarian, Japanese, Korean, Portuguese, and Vietnamese), and report on the results of a shared task challenge which attracted 20 teams to solve the task. In our meta-analysis, we focus on three aspects of the resulting systems: external training corpus selection, model architecture and training decisions, and decoding and filtering strategies. We find that strong systems start with a large amount of generic training data, and then fine-tune with in-domain data, sampled according to our provided learner response frequencies.", "keyphrases": ["paraphrase", "language education", "simultaneous translation"]} +{"id": "ghosal-etal-2019-dialoguegcn", "title": "DialogueGCN: A Graph Convolutional Neural Network for Emotion Recognition in Conversation", "abstract": "Emotion recognition in conversation (ERC) has received much attention, lately, from researchers due to its potential widespread applications in diverse areas, such as health-care, education, and human resources. In this paper, we present Dialogue Graph Convolutional Network (DialogueGCN), a graph neural network based approach to ERC. We leverage self and inter-speaker dependency of the interlocutors to model conversational context for emotion recognition. Through the graph network, DialogueGCN addresses context propagation issues present in the current RNN-based methods. We empirically show that this method alleviates such issues, while outperforming the current state of the art on a number of benchmark emotion classification datasets.", "keyphrases": ["emotion recognition", "conversation", "graph convolutional network"]} +{"id": "poria-etal-2019-meld", "title": "MELD: A Multimodal Multi-Party Dataset for Emotion Recognition in Conversations", "abstract": "Emotion recognition in conversations is a challenging task that has recently gained popularity due to its potential applications. Until now, however, a large-scale multimodal multi-party emotional conversational database containing more than two speakers per dialogue was missing. Thus, we propose the Multimodal EmotionLines Dataset (MELD), an extension and enhancement of EmotionLines. MELD contains about 13,000 utterances from 1,433 dialogues from the TV-series Friends. Each utterance is annotated with emotion and sentiment labels, and encompasses audio, visual and textual modalities. We propose several strong multimodal baselines and show the importance of contextual and multimodal information for emotion recognition in conversations. The full dataset is available for use at .", "keyphrases": ["emotion recognition", "conversation", "modality", "meld"]} +{"id": "bojar-etal-2017-findings", "title": "Findings of the 2017 Conference on Machine Translation (WMT17)", "abstract": "This paper presents the results of the WMT17 shared tasks, which included \nthree machine translation (MT) tasks (news, biomedical, and multimodal), two evaluation tasks (metrics and run-time estimation of MT quality), an automatic post-editing task, a neural MT training task, and a bandit learning task.", "keyphrases": ["conference", "machine translation", "human evaluation", "state-of-the-art result", "bleu score"]} +{"id": "bowman-etal-2015-large", "title": "A large annotated corpus for learning natural language inference", "abstract": "Understanding entailment and contradiction is fundamental to understanding natural language, and inference about entailment and contradiction is a valuable testing ground for the development of semantic representations. However, machine learning research in this area has been dramatically limited by the lack of large-scale resources. To address this, we introduce the Stanford Natural Language Inference corpus, a new, freely available collection of labeled sentence pairs, written by humans doing a novel grounded task based on image captioning. At 570K pairs, it is two orders of magnitude larger than all other resources of its type. This increase in scale allows lexicalized classifiers to outperform some sophisticated existing entailment models, and it allows a neural network-based model to perform competitively on natural language inference benchmarks for the first time.", "keyphrases": ["natural language inference", "entailment", "nli", "annotated dataset", "text pair"]} +{"id": "lin-2004-rouge", "title": "ROUGE: A Package for Automatic Evaluation of Summaries", "abstract": "ROUGE stands for Recall-Oriented Understudy for Gisting Evaluation. It includes measures to automatically determine the quality of a summary by comparing it to other (ideal) summaries created by humans. The measures count the number of overlapping units such as n-gram, word sequences, and word pairs between the computer-generated summary to be evaluated and the ideal summaries created by humans. This paper introduces four different ROUGE measures: ROUGE-N, ROUGE-L, ROUGE-W, and ROUGE-S included in the ROUGE summarization evaluation package and their evaluations. Three of them have been used in the Document Understanding Conference (DUC) 2004, a large-scale summarization evaluation sponsored by NIST.", "keyphrases": ["automatic evaluation", "summarization", "rouge"]} +{"id": "jean-etal-2015-using", "title": "On Using Very Large Target Vocabulary for Neural Machine Translation", "abstract": "Neural machine translation, a recently proposed approach to machine translation based purely on neural networks, has shown promising results compared to the existing approaches such as phrase-based statistical machine translation. Despite its recent success, neural machine translation has its limitation in handling a larger vocabulary, as training complexity as well as decoding complexity increase proportionally to the number of target words. In this paper, we propose a method based on importance sampling that allows us to use a very large target vocabulary without increasing training complexity. We show that decoding can be efficiently done even with the model having a very large target vocabulary by selecting only a small subset of the whole target vocabulary. The models trained by the proposed approach are empirically found to outperform the baseline models with a small vocabulary as well as the LSTM-based neural machine translation models. Furthermore, when we use the ensemble of a few models with very large target vocabularies, we achieve the state-of-the-art translation performance (measured by BLEU) on the English!German translation and almost as high performance as state-of-the-art English!French translation system.", "keyphrases": ["target vocabulary", "neural machine translation", "state-of-the-art translation performance"]} +{"id": "popescu-etzioni-2005-extracting", "title": "Extracting Product Features and Opinions from Reviews", "abstract": "Consumers are often forced to wade through many on-line reviews in order to make an informed product choice. This paper introduces Opine, an unsupervised information-extraction system which mines reviews in order to build a model of important product features, their evaluation by reviewers, and their relative quality across products.Compared to previous work, Opine achieves 22% higher precision (with only 3% lower recall) on the feature extraction task. Opine's novel use of relaxation labeling for finding the semantic orientation of words in context leads to strong performance on the tasks of finding opinion phrases and their polarity.", "keyphrases": ["product feature", "review", "sentiment analysis", "syntactic pattern", "information extraction system"]} +{"id": "agirre-etal-2013-sem", "title": "*SEM 2013 shared task: Semantic Textual Similarity", "abstract": "In Semantic Textual Similarity (STS), systems rate the degree of semantic equivalence, on a graded scale from 0 to 5, with 5 being the most similar. This year we set up two tasks: (i) a core task (CORE), and (ii) a typed-similarity task (TYPED). CORE is similar in set up to SemEval STS 2012 task with pairs of sentences from sources related to those of 2012, yet different in genre from the 2012 set, namely, this year we included newswire headlines, machine translation evaluation datasets and multiple lexical resource glossed sets. TYPED, on the other hand, is novel and tries to characterize why two items are deemed similar, using cultural heritage items which are described with metadata such as title, author or description. Several types of similarity have been defined, including similar author, similar time period or similar location. The annotation for both tasks leverages crowdsourcing, with relative high interannotator correlation, ranging from 62% to 87%. The CORE task attracted 34 participants with 89 runs, and the TYPED task attracted 6 teams with 14 runs.", "keyphrases": ["semantic textual similarity", "sts", "semeval", "long text segment"]} +{"id": "koehn-etal-2003-statistical", "title": "Statistical Phrase-Based Translation", "abstract": "We propose a new phrase-based translation model and decoding algorithm that enables us to evaluate and compare several, previously proposed phrase-based translation models. Within our framework, we carry out a large number of experiments to understand better and explain why phrase-based models out-perform word-based models. Our empirical results, which hold for all examined language pairs, suggest that the highest levels of performance can be obtained through relatively simple means: heuristic learning of phrase translations from word-based alignments and lexical weighting of phrase translations. Surprisingly, learning phrases longer than three words and learning phrases from high-accuracy word-level alignment models does not have a strong impact on performance. Learning only syntactically motivated phrases degrades the performance of our systems.", "keyphrases": ["phrase-based translation", "heuristic", "smt system", "parallel corpora", "distortion model"]} +{"id": "zeng-etal-2014-relation", "title": "Relation Classification via Convolutional Deep Neural Network", "abstract": "The state-of-the-art methods used for relation classification are primarily based on statistical machine learning, and their performance strongly depends on the quality of the extracted features. The extracted features are often derived from the output of pre-existing natural language processing (NLP) systems, which leads to the propagation of the errors in the existing tools and hinders the performance of these systems. In this paper, we exploit a convolutional deep neural network (DNN) to extract lexical and sentence level features. Our method takes all of the word tokens as input without complicated pre-processing. First, the word tokens are transformed to vectors by looking up word embeddings 1 . Then, lexical level features are extracted according to the given nouns. Meanwhile, sentence level features are learned using a convolutional approach. These two level features are concatenated to form the final extracted feature vector. Finally, the features are fed into a softmax classifier to predict the relationship between two marked nouns. The experimental results demonstrate that our approach significantly outperforms the state-of-the-art methods.", "keyphrases": ["sentence level feature", "relation classification", "convolutional neural network", "cnns", "learning method"]} +{"id": "zhou-etal-2011-phrase", "title": "Phrase-Based Translation Model for Question Retrieval in Community Question Answer Archives", "abstract": "Community-based question answer (Q&A) has become an important issue due to the popularity of Q&A archives on the web. This paper is concerned with the problem of question retrieval. Question retrieval in Q&A archives aims to find historical questions that are semantically equivalent or relevant to the queried questions. In this paper, we propose a novel phrase-based translation model for question retrieval. Compared to the traditional word-based translation models, the phrase-based translation model is more effective because it captures contextual information in modeling the translation of phrases as a whole, rather than translating single words in isolation. Experiments conducted on real Q&A data demonstrate that our proposed phrase-based translation model significantly outperforms the state-of-the-art word-based translation model.", "keyphrases": ["translation model", "question retrieval", "web"]} +{"id": "nivre-etal-2007-conll", "title": "The CoNLL 2007 Shared Task on Dependency Parsing", "abstract": "The Conference on Computational Natural Language Learning features a shared task, in which participants train and test their learning systems on the same data sets. In 2007, as in 2006, the shared task has been devoted to dependency parsing, this year with both a multilingual track and a domain adaptation track. In thispaper, we definethe tasksof the different tracks and describe how the data sets were created from existing treebanks for ten languages. In addition, we characterize the different approaches of the participating systems, report the test results, and provide a first analysis of these results.", "keyphrases": ["conll", "dependency parsing", "availability", "such language", "arabic"]} +{"id": "shen-etal-2016-minimum", "title": "Minimum Risk Training for Neural Machine Translation", "abstract": "We propose minimum risk training for end-to-end neural machine translation. Unlike conventional maximum likelihood estimation, minimum risk training is capable of optimizing model parameters directly with respect to arbitrary evaluation metrics, which are not necessarily differentiable. Experiments show that our approach achieves significant improvements over maximum likelihood estimation on a state-of-the-art neural machine translation system across various languages pairs. Transparent to architectures, our approach can be applied to more neural networks and potentially benefit more NLP tasks.", "keyphrases": ["neural machine translation", "evaluation metric", "minimum risk training", "mrt", "sentence-level bleu"]} +{"id": "snow-etal-2008-cheap", "title": "Cheap and Fast \u2013 But is it Good? Evaluating Non-Expert Annotations for Natural Language Tasks", "abstract": "Human linguistic annotation is crucial for many natural language processing tasks but can be expensive and time-consuming. We explore the use of Amazon's Mechanical Turk system, a significantly cheaper and faster method for collecting annotations from a broad base of paid non-expert contributors over the Web. We investigate five tasks: affect recognition, word similarity, recognizing textual entailment, event temporal ordering, and word sense disambiguation. For all five, we show high agreement between Mechanical Turk non-expert annotations and existing gold standard labels provided by expert labelers. For the task of affect recognition, we also show that using non-expert labels for training machine learning algorithms can be as effective as using gold standard annotations from experts. We propose a technique for bias correction that significantly improves annotation quality on two tasks. We conclude that many large labeling tasks can be effectively designed and carried out in this method at a fraction of the usual expense.", "keyphrases": ["annotator", "natural language task", "crowdsourcing", "mturk", "cost"]} +{"id": "baccianella-etal-2010-sentiwordnet", "title": "SentiWordNet 3.0: An Enhanced Lexical Resource for Sentiment Analysis and Opinion Mining", "abstract": "In this work we present SENTIWORDNET 3.0, a lexical resource explicitly devised for supporting sentiment classification and opinion mining applications. SENTIWORDNET 3.0 is an improved version of SENTIWORDNET 1.0, a lexical resource publicly available for research purposes, now currently licensed to more than 300 research groups and used in a variety of research projects worldwide. Both SENTIWORDNET 1.0 and 3.0 are the result of automatically annotating all WORDNET synsets according to their degrees of positivity, negativity, and neutrality. SENTIWORDNET 1.0 and 3.0 differ (a) in the versions of WORDNET which they annotate (WORDNET 2.0 and 3.0, respectively), (b) in the algorithm used for automatically annotating WORDNET, which now includes (additionally to the previous semi-supervised learning step) a random-walk step for refining the scores. We here discuss SENTIWORDNET 3.0, especially focussing on the improvements concerning aspect (b) that it embodies with respect to version 1.0. We also report the results of evaluating SENTIWORDNET 3.0 against a fragment of WORDNET 3.0 manually annotated for positivity, negativity, and neutrality; these results indicate accuracy improvements of about 20% with respect to SENTIWORDNET 1.0.", "keyphrases": ["sentiwordnet", "sentiment polarity", "entry"]} +{"id": "joshi-etal-2020-spanbert", "title": "SpanBERT: Improving Pre-training by Representing and Predicting Spans", "abstract": "We present SpanBERT, a pre-training method that is designed to better represent and predict spans of text. Our approach extends BERT by (1) masking contiguous random spans, rather than random tokens, and (2) training the span boundary representations to predict the entire content of the masked span, without relying on the individual token representations within it. SpanBERT consistently outperforms BERT and our better-tuned baselines, with substantial gains on span selection tasks such as question answering and coreference resolution. In particular, with the same training data and model size as BERTlarge, our single model obtains 94.6% and 88.7% F1 on SQuAD 1.1 and 2.0 respectively. We also achieve a new state of the art on the OntoNotes coreference resolution task (79.6% F1), strong performance on the TACRED relation extraction benchmark, and even gains on GLUE.1", "keyphrases": ["coreference resolution", "spanbert", "language model", "pre-training objective", "downstream task"]} +{"id": "somasundaran-wiebe-2009-recognizing", "title": "Recognizing Stances in Online Debates", "abstract": "This paper presents an unsupervised opinion analysis method for debate-side classification, i.e., recognizing which stance a person is taking in an online debate. In order to handle the complexities of this genre, we mine the web to learn associations that are indicative of opinion stances in debates. We combine this knowledge with discourse information, and formulate the debate side classification task as an Integer Linear Programming problem. Our results show that our method is substantially better than challenging baseline methods.", "keyphrases": ["stance", "online debate", "argument trigger expression"]} +{"id": "pennacchiotti-etal-2008-automatic", "title": "Automatic induction of FrameNet lexical units", "abstract": "Most attempts to integrate FrameNet in NLP systems have so far failed because of its limited coverage. In this paper, we investigate the applicability of distributional and WordNet-based models on the task of lexical unit induction, i.e. the expansion of FrameNet with new lexical units. Experimental results show that our distributional and WordNet-based models achieve good level of accuracy and coverage, especially when combined.", "keyphrases": ["framenet", "lexical unit", "automatic induction"]} +{"id": "reimers-gurevych-2019-sentence", "title": "Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks", "abstract": "BERT (Devlin et al., 2018) and RoBERTa (Liu et al., 2019) has set a new state-of-the-art performance on sentence-pair regression tasks like semantic textual similarity (STS). However, it requires that both sentences are fed into the network, which causes a massive computational overhead: Finding the most similar pair in a collection of 10,000 sentences requires about 50 million inference computations (~65 hours) with BERT. The construction of BERT makes it unsuitable for semantic similarity search as well as for unsupervised tasks like clustering. In this publication, we present Sentence-BERT (SBERT), a modification of the pretrained BERT network that use siamese and triplet network structures to derive semantically meaningful sentence embeddings that can be compared using cosine-similarity. This reduces the effort for finding the most similar pair from 65 hours with BERT / RoBERTa to about 5 seconds with SBERT, while maintaining the accuracy from BERT. We evaluate SBERT and SRoBERTa on common STS tasks and transfer learning tasks, where it outperforms other state-of-the-art sentence embeddings methods.", "keyphrases": ["sentence embedding", "siamese architecture", "mnli dataset"]} +{"id": "zoph-etal-2016-transfer", "title": "Transfer Learning for Low-Resource Neural Machine Translation", "abstract": "The encoder-decoder framework for neural machine translation (NMT) has been shown effective in large data scenarios, but is much less effective for low-resource languages. We present a transfer learning method that significantly improves Bleu scores across a range of low-resource languages. Our key idea is to first train a high-resource language pair (the parent model), then transfer some of the learned parameters to the low-resource pair (the child model) to initialize and constrain training. Using our transfer learning method we improve baseline NMT models by an average of 5.6 Bleu on four low-resource language pairs. Ensembling and unknown word replacement add another 2 Bleu which brings the NMT performance on low-resource machine translation close to a strong syntax based machine translation (SBMT) system, exceeding its performance on one language pair. Additionally, using the transfer learning model for re-scoring, we can improve the SBMT system by an average of 1.3 Bleu, improving the state-of-the-art on low-resource machine translation.", "keyphrases": ["neural machine translation", "nmt model", "low-resource language pair", "transfer learning", "translation quality"]} +{"id": "cho-etal-2014-properties", "title": "On the Properties of Neural Machine Translation: Encoder\u2013Decoder Approaches", "abstract": "Neural machine translation is a relatively new approach to statistical machine translation based purely on neural networks. The neural machine translation models often consist of an encoder and a decoder. The encoder extracts a fixed-length representation from a variable-length input sentence, and the decoder generates a correct translation from this representation. In this paper, we focus on analyzing the properties of the neural machine translation using two models; RNN Encoder--Decoder and a newly proposed gated recursive convolutional neural network. We show that the neural machine translation performs relatively well on short sentences without unknown words, but its performance degrades rapidly as the length of the sentence and the number of unknown words increase. Furthermore, we find that the proposed gated recursive convolutional network learns a grammatical structure of a sentence automatically.", "keyphrases": ["neural machine translation", "rnn", "source sentence", "encoder-decoder architecture", "recurrent unit"]} +{"id": "socher-etal-2013-recursive", "title": "Recursive Deep Models for Semantic Compositionality Over a Sentiment Treebank", "abstract": "Semantic word spaces have been very useful but cannot express the meaning of longer phrases in a principled way. Further progress towards understanding compositionality in tasks such as sentiment detection requires richer supervised training and evaluation resources and more powerful models of composition. To remedy this, we introduce a Sentiment Treebank. It includes fine grained sentiment labels for 215,154 phrases in the parse trees of 11,855 sentences and presents new challenges for sentiment compositionality. To address them, we introduce the Recursive Neural Tensor Network. When trained on the new treebank, this model outperforms all previous methods on several metrics. It pushes the state of the art in single sentence positive/negative classification from 80% up to 85.4%. The accuracy of predicting fine-grained sentiment labels for all phrases reaches 80.7%, an improvement of 9.7% over bag of features baselines. Lastly, it is the only model that can accurately capture the effects of negation and its scope at various tree levels for both positive and negative phrases.", "keyphrases": ["sentiment treebank", "composition", "recursive neural network", "movie review", "node"]} +{"id": "dyer-etal-2013-simple", "title": "A Simple, Fast, and Effective Reparameterization of IBM Model 2", "abstract": "We present a simple log-linear reparameterization of IBM Model 2 that overcomes problems arising from Model 1\u2019s strong assumptions and Model 2\u2019s overparameterization. Efficient inference, likelihood evaluation, and parameter estimation algorithms are provided. Training the model is consistently ten times faster than Model 4. On three large-scale translation tasks, systems built using our alignment model outperform IBM Model 4. An open-source implementation of the alignment model described in this paper is available from http://github.com/clab/fast align .", "keyphrases": ["reparameterization", "ibm model", "fast_align", "variation"]} +{"id": "howcroft-etal-2020-twenty", "title": "Twenty Years of Confusion in Human Evaluation: NLG Needs Evaluation Sheets and Standardised Definitions", "abstract": "Human assessment remains the most trusted form of evaluation in NLG, but highly diverse approaches and a proliferation of different quality criteria used by researchers make it difficult to compare results and draw conclusions across papers, with adverse implications for meta-evaluation and reproducibility. In this paper, we present (i) our dataset of 165 NLG papers with human evaluations, (ii) the annotation scheme we developed to label the papers for different aspects of evaluations, (iii) quantitative analyses of the annotations, and (iv) a set of recommendations for improving standards in evaluation reporting. We use the annotations as a basis for examining information included in evaluation reports, and levels of consistency in approaches, experimental design and terminology, focusing in particular on the 200+ different terms that have been used for evaluated aspects of quality. We conclude that due to a pervasive lack of clarity in reports and extreme diversity in approaches, human evaluation in NLG presents as extremely confused in 2020, and that the field is in urgent need of standard methods and terminology.", "keyphrases": ["human evaluation", "nlg", "open-ended text generation"]} +{"id": "ghosal-etal-2020-cosmic", "title": "COSMIC: COmmonSense knowledge for eMotion Identification in Conversations", "abstract": "In this paper, we address the task of utterance level emotion recognition in conversations using commonsense knowledge. We propose COSMIC, a new framework that incorporates different elements of commonsense such as mental states, events, and causal relations, and build upon them to learn interactions between interlocutors participating in a conversation. Current state-of-theart methods often encounter difficulties in context propagation, emotion shift detection, and differentiating between related emotion classes. By learning distinct commonsense representations, COSMIC addresses these challenges and achieves new state-of-the-art results for emotion recognition on four different benchmark conversational datasets. Our code is available at .", "keyphrases": ["commonsense knowledge", "conversation", "emotion recognition"]} +{"id": "bowman-etal-2016-generating", "title": "Generating Sentences from a Continuous Space", "abstract": "The standard recurrent neural network language model (RNNLM) generates sentences one word at a time and does not work from an explicit global sentence representation. In this work, we introduce and study an RNN-based variational autoencoder generative model that incorporates distributed latent representations of entire sentences. This factorization allows it to explicitly model holistic properties of sentences such as style, topic, and high-level syntactic features. Samples from the prior over these sentence representations remarkably produce diverse and well-formed sentences through simple deterministic decoding. By examining paths through this latent space, we are able to generate coherent novel sentences that interpolate between known sentences. We present techniques for solving the difficult learning problem presented by this model, demonstrate its effectiveness in imputing missing words, explore many interesting properties of the model's latent sentence space, and present negative results on the use of the model in language modeling.", "keyphrases": ["continuous space", "variational autoencoder", "vae", "latent variable", "text generation"]} +{"id": "artetxe-etal-2018-robust", "title": "A robust self-learning method for fully unsupervised cross-lingual mappings of word embeddings", "abstract": "Recent work has managed to learn cross-lingual word embeddings without parallel data by mapping monolingual embeddings to a shared space through adversarial training. However, their evaluation has focused on favorable conditions, using comparable corpora or closely-related languages, and we show that they often fail in more realistic scenarios. This work proposes an alternative approach based on a fully unsupervised initialization that explicitly exploits the structural similarity of the embeddings, and a robust self-learning algorithm that iteratively improves this solution. Our method succeeds in all tested scenarios and obtains the best published results in standard datasets, even surpassing previous supervised systems. Our implementation is released as an open source project at .", "keyphrases": ["self-learning method", "mapping", "word embedding", "different language", "unsupervised learning"]} +{"id": "waseem-etal-2017-understanding", "title": "Understanding Abuse: A Typology of Abusive Language Detection Subtasks", "abstract": "As the body of research on abusive language detection and analysis grows, there is a need for critical consideration of the relationships between different subtasks that have been grouped under this label. Based on work on hate speech, cyberbullying, and online abuse we propose a typology that captures central similarities and differences between subtasks and discuss the implications of this for data annotation and feature construction. We emphasize the practical actions that can be taken by researchers to best approach their abusive language detection subtask of interest.", "keyphrases": ["abuse", "abusive language detection", "hate speech", "cyberbullying", "trolling"]} +{"id": "li-etal-2016-diversity", "title": "A Diversity-Promoting Objective Function for Neural Conversation Models", "abstract": "Sequence-to-sequence neural network models for generation of conversational responses tend to generate safe, commonplace responses (e.g., \"I don't know\") regardless of the input. We suggest that the traditional objective function, i.e., the likelihood of output (response) given input (message) is unsuited to response generation tasks. Instead we propose using Maximum Mutual Information (MMI) as the objective function in neural models. Experimental results demonstrate that the proposed MMI models produce more diverse, interesting, and appropriate responses, yielding substantive gains in BLEU scores on two conversational datasets and in human evaluations.", "keyphrases": ["objective function", "neural conversation model", "diversity", "dialogue generation", "language model"]} +{"id": "schmidt-wiegand-2017-survey", "title": "A Survey on Hate Speech Detection using Natural Language Processing", "abstract": "This paper presents a survey on hate speech detection. Given the steadily growing body of social media content, the amount of online hate speech is also increasing. Due to the massive scale of the web, methods that automatically detect hate speech are required. Our survey describes key areas that have been explored to automatically recognize these types of utterances using natural language processing. We also discuss limits of those approaches.", "keyphrases": ["hate speech detection", "language detection", "abusive language", "cyberbullying", "offensive content"]} +{"id": "sun-etal-2019-utilizing", "title": "Utilizing BERT for Aspect-Based Sentiment Analysis via Constructing Auxiliary Sentence", "abstract": "Aspect-based sentiment analysis (ABSA), which aims to identify fine-grained opinion polarity towards a specific aspect, is a challenging subtask of sentiment analysis (SA). In this paper, we construct an auxiliary sentence from the aspect and convert ABSA to a sentence-pair classification task, such as question answering (QA) and natural language inference (NLI). We fine-tune the pre-trained model from BERT and achieve new state-of-the-art results on SentiHood and SemEval-2014 Task 4 datasets. The source codes are available at .", "keyphrases": ["sentiment analysis", "auxiliary sentence", "aspect category"]} +{"id": "langlais-patry-2007-translating", "title": "Translating Unknown Words by Analogical Learning", "abstract": "Unknown words are a well-known hindrance to natural language applications. In particular, they drastically impact machine translation quality. An easy way out commercial translation systems usually offer their users is the possibility to add unknown words and their translations into a dedicated lexicon. Recently, Stroppa and Yvon (2005) have shown how analogical learning alone deals nicely with morphology in different languages. In this study we show that analogical learning offers as well an elegant and effective solution to the problem of identifying potential translations of unknown words.", "keyphrases": ["unknown word", "analogical learning", "european language"]} +{"id": "och-ney-2004-alignment", "title": "The Alignment Template Approach to Statistical Machine Translation", "abstract": "A phrase-based statistical machine translation approach the alignment template approach is described. This translation approach allows for general many-to-many relations between words. Thereby, the context of words is taken into account in the translation model, and local changes in word order from source to target language can be learned explicitly. The model is described using a log-linear modeling approach, which is a generalization of the often used source-channel approach. Thereby, the model is easier to extend than classical statistical machine translation systems. We describe in detail the process for learning phrasal translations, the feature functions used, and the search algorithm. The evaluation of this approach is performed on three different tasks. For the German-English speech Verbmobil task, we analyze the effect of various system components. On the French-English Canadian Hansards task, the alignment template system obtains significantly better results than a single-word-based translation model. In the Chinese-English 2002 National Institute of Standards and Technology (NIST) machine translation evaluation it yields statistically significantly better NIST scores than all competing research and commercial translation systems.", "keyphrases": ["alignment template approach", "machine translation", "log-linear model", "smt system", "unit"]} +{"id": "kobayashi-2018-contextual", "title": "Contextual Augmentation: Data Augmentation by Words with Paradigmatic Relations", "abstract": "We propose a novel data augmentation for labeled sentences called contextual augmentation. We assume an invariance that sentences are natural even if the words in the sentences are replaced with other words with paradigmatic relations. We stochastically replace words with other words that are predicted by a bi-directional language model at the word positions. Words predicted according to a context are numerous but appropriate for the augmentation of the original words. Furthermore, we retrofit a language model with a label-conditional architecture, which allows the model to augment sentences without breaking the label-compatibility. Through the experiments for six various different text classification tasks, we demonstrate that the proposed method improves classifiers based on the convolutional or recurrent neural networks.", "keyphrases": ["paradigmatic relation", "language model", "contextual augmentation"]} +{"id": "ribeiro-etal-2020-beyond", "title": "Beyond Accuracy: Behavioral Testing of NLP Models with CheckList", "abstract": "Although measuring held-out accuracy has been the primary approach to evaluate generalization, it often overestimates the performance of NLP models, while alternative approaches for evaluating models either focus on individual tasks or on specific behaviors. Inspired by principles of behavioral testing in software engineering, we introduce CheckList, a task-agnostic methodology for testing NLP models. CheckList includes a matrix of general linguistic capabilities and test types that facilitate comprehensive test ideation, as well as a software tool to generate a large and diverse number of test cases quickly. We illustrate the utility of CheckList with tests for three tasks, identifying critical failures in both commercial and state-of-art models. In a user study, a team responsible for a commercial sentiment analysis model found new and actionable bugs in an extensively tested model. In another user study, NLP practitioners with CheckList created twice as many tests, and found almost three times as many bugs as users without it.", "keyphrases": ["behavioral testing", "checklist", "negation", "testing framework", "change"]} +{"id": "marton-etal-2009-improved", "title": "Improved Statistical Machine Translation Using Monolingually-Derived Paraphrases", "abstract": "Untranslated words still constitute a major problem for Statistical Machine Translation (SMT), and current SMT systems are limited by the quantity of parallel training texts. Augmenting the training data with paraphrases generated by pivoting through other languages alleviates this problem, especially for the so-called \"low density\" languages. But pivoting requires additional parallel texts. We address this problem by deriving paraphrases monolingually, using distributional semantic similarity measures, thus providing access to larger training resources, such as comparable and unrelated monolingual corpora. We present what is to our knowledge the first successful integration of a collocational approach to untranslated words with an end-to-end, state of the art SMT system demonstrating significant translation improvements in a low-resource setting.", "keyphrases": ["statistical machine translation", "paraphrase", "source side"]} +{"id": "chu-wang-2018-survey", "title": "A Survey of Domain Adaptation for Neural Machine Translation", "abstract": "Neural machine translation (NMT) is a deep learning based approach for machine translation, which yields the state-of-the-art translation performance in scenarios where large-scale parallel corpora are available. Although the high-quality and domain-specific translation is crucial in the real world, domain-specific corpora are usually scarce or nonexistent, and thus vanilla NMT performs poorly in such scenarios. Domain adaptation that leverages both out-of-domain parallel corpora as well as monolingual corpora for in-domain translation, is very important for domain-specific translation. In this paper, we give a comprehensive survey of the state-of-the-art domain adaptation techniques for NMT.", "keyphrases": ["domain adaptation", "neural machine translation", "out-of-domain parallel corpora", "distinction"]} +{"id": "yimam-etal-2013-webanno", "title": "WebAnno: A Flexible, Web-based and Visually Supported System for Distributed Annotations", "abstract": "We present WebAnno, a general purpose web-based annotation tool for a wide range of linguistic annotations. WebAnno offers annotation project management, freely configurable tagsets and the management of users in different roles. WebAnno uses modern web technology for visualizing and editing annotations in a web browser. It supports arbitrarily large documents, pluggable import/export filters, the curation of annotations across various users, and an interface to farming out annotations to a crowdsourcing platform. Currently WebAnno allows part-ofspeech, named entity, dependency parsing and co-reference chain annotations. The architecture design allows adding additional modes of visualization and editing, when new kinds of annotations are to be supported.", "keyphrases": ["annotation tool", "management", "webanno"]} +{"id": "chambers-jurafsky-2008-unsupervised", "title": "Unsupervised Learning of Narrative Event Chains", "abstract": "Hand-coded scripts were used in the 1970-80s as knowledge backbones that enabled inference and other NLP tasks requiring deep semantic knowledge. We propose unsupervised induction of similar schemata called narrative event chains from raw newswire text. A narrative event chain is a partially ordered set of events related by a common protagonist. We describe a three step process to learning narrative event chains. The first uses unsupervised distributional methods to learn narrative relations between events sharing coreferring arguments. The second applies a temporal classifier to partially order the connected events. Finally, the third prunes and clusters self-contained chains from the space of events. We introduce two evaluations: the narrative cloze to evaluate event relatedness, and an order coherence task to evaluate narrative order. We show a 36% improvement over baseline for narrative prediction and 25% for temporal coherence.", "keyphrases": ["narrative event chain", "script", "newswire text", "protagonist", "co-occurrence"]} +{"id": "mccoy-etal-2019-right", "title": "Right for the Wrong Reasons: Diagnosing Syntactic Heuristics in Natural Language Inference", "abstract": "A machine learning system can score well on a given test set by relying on heuristics that are effective for frequent example types but break down in more challenging cases. We study this issue within natural language inference (NLI), the task of determining whether one sentence entails another. We hypothesize that statistical NLI models may adopt three fallible syntactic heuristics: the lexical overlap heuristic, the subsequence heuristic, and the constituent heuristic. To determine whether models have adopted these heuristics, we introduce a controlled evaluation set called HANS (Heuristic Analysis for NLI Systems), which contains many examples where the heuristics fail. We find that models trained on MNLI, including BERT, a state-of-the-art model, perform very poorly on HANS, suggesting that they have indeed adopted these heuristics. We conclude that there is substantial room for improvement in NLI systems, and that the HANS dataset can motivate and measure progress in this area.", "keyphrases": ["reason", "heuristic", "natural language inference", "nli dataset", "annotation artifact"]} +{"id": "joshi-etal-2017-triviaqa", "title": "TriviaQA: A Large Scale Distantly Supervised Challenge Dataset for Reading Comprehension", "abstract": "We present TriviaQA, a challenging reading comprehension dataset containing over 650K question-answer-evidence triples. TriviaQA includes 95K question-answer pairs authored by trivia enthusiasts and independently gathered evidence documents, six per question on average, that provide high quality distant supervision for answering the questions. We show that, in comparison to other recently introduced large-scale datasets, TriviaQA (1) has relatively complex, compositional questions, (2) has considerable syntactic and lexical variability between questions and corresponding answer-evidence sentences, and (3) requires more cross sentence reasoning to find answers. We also present two baseline algorithms: a feature-based classifier and a state-of-the-art neural network, that performs well on SQuAD reading comprehension. Neither approach comes close to human performance (23% and 40% vs. 80%), suggesting that TriviaQA is a challenging testbed that is worth significant future study.", "keyphrases": ["reading comprehension", "question-answer pair", "distant supervision", "triviaqa", "paragraph"]} +{"id": "domhan-hieber-2017-using", "title": "Using Target-side Monolingual Data for Neural Machine Translation through Multi-task Learning", "abstract": "The performance of Neural Machine Translation (NMT) models relies heavily on the availability of sufficient amounts of parallel data, and an efficient and effective way of leveraging the vastly available amounts of monolingual data has yet to be found. We propose to modify the decoder in a neural sequence-to-sequence model to enable multi-task learning for two strongly related tasks: target-side language modeling and translation. The decoder predicts the next target word through two channels, a target-side language model on the lowest layer, and an attentional recurrent model which is conditioned on the source representation. This architecture allows joint training on both large amounts of monolingual and moderate amounts of bilingual data to improve NMT performance. Initial results in the news domain for three language pairs show moderate but consistent improvements over a baseline trained on bilingual data only.", "keyphrases": ["monolingual data", "neural machine translation", "multi-task learning"]} +{"id": "pang-lee-2004-sentimental", "title": "A Sentimental Education: Sentiment Analysis Using Subjectivity Summarization Based on Minimum Cuts", "abstract": "Sentiment analysis seeks to identify the viewpoint(s) underlying a text span; an example application is classifying a movie review as \"thumbs up\" or \"thumbs down\". To determine this sentiment polarity, we propose a novel machine-learning method that applies text-categorization techniques to just the subjective portions of the document. Extracting these portions can be implemented using efficient techniques for finding minimum cuts in graphs; this greatly facilitates incorporation of cross-sentence contextual constraints.", "keyphrases": ["sentiment analysis", "movie review", "objective sentence", "subjectivity analysis", "text categorization"]} +{"id": "gururangan-etal-2020-dont", "title": "Don't Stop Pretraining: Adapt Language Models to Domains and Tasks", "abstract": "Language models pretrained on text from a wide variety of sources form the foundation of today's NLP. In light of the success of these broad-coverage models, we investigate whether it is still helpful to tailor a pretrained model to the domain of a target task. We present a study across four domains (biomedical and computer science publications, news, and reviews) and eight classification tasks, showing that a second phase of pretraining in-domain (domain-adaptive pretraining) leads to performance gains, under both high- and low-resource settings. Moreover, adapting to the task's unlabeled data (task-adaptive pretraining) improves performance even after domain-adaptive pretraining. Finally, we show that adapting to a task corpus augmented using simple data selection strategies is an effective alternative, especially when resources for domain-adaptive pretraining might be unavailable. Overall, we consistently find that multi-phase adaptive pretraining offers large gains in task performance.", "keyphrases": ["language model", "unlabeled data", "pre-training", "downstream task", "tapt"]} +{"id": "wu-etal-2020-mind", "title": "MIND: A Large-scale Dataset for News Recommendation", "abstract": "News recommendation is an important technique for personalized news service. Compared with product and movie recommendations which have been comprehensively studied, the research on news recommendation is much more limited, mainly due to the lack of a high-quality benchmark dataset. In this paper, we present a large-scale dataset named MIND for news recommendation. Constructed from the user click logs of Microsoft News, MIND contains 1 million users and more than 160k English news articles, each of which has rich textual content such as title, abstract and body. We demonstrate MIND a good testbed for news recommendation through a comparative study of several state-of-the-art news recommendation methods which are originally developed on different proprietary datasets. Our results show the performance of news recommendation highly relies on the quality of news content understanding and user interest modeling. Many natural language processing techniques such as effective text representation methods and pre-trained language models can effectively improve the performance of news recommendation. The MIND dataset will be available at .", "keyphrases": ["large-scale dataset", "news recommendation", "mind"]} +{"id": "chan-etal-2007-word", "title": "Word Sense Disambiguation Improves Statistical Machine Translation", "abstract": "Recent research presents conflicting evidence on whether word sense disambiguation (WSD) systems can help to improve the performance of statistical machine translation (MT) systems. In this paper, we successfully integrate a state-of-the-art WSD system into a state-of-the-art hierarchical phrase-based MT system, Hiero. We show for the first time that integrating a WSD system improves the performance of a state-ofthe-art statistical MT system on an actual translation task. Furthermore, the improvement is statistically significant.", "keyphrases": ["wsd", "word sense disambiguation", "smt system", "translation quality", "soft constraint modeling"]} +{"id": "konstas-etal-2017-neural", "title": "Neural AMR: Sequence-to-Sequence Models for Parsing and Generation", "abstract": "Sequence-to-sequence models have shown strong performance across a broad range of applications. However, their application to parsing and generating text using Abstract Meaning Representation (AMR) has been limited, due to the relatively limited amount of labeled data and the non-sequential nature of the AMR graphs. We present a novel training procedure that can lift this limitation using millions of unlabeled sentences and careful preprocessing of the AMR graphs. For AMR parsing, our model achieves competitive results of 62.1 SMATCH, the current best score reported without significant use of external semantic resources. For AMR generation, our model establishes a new state-of-the-art performance of BLEU 33.8. We present extensive ablative and qualitative analysis including strong evidence that sequence-based AMR models are robust against ordering variations of graph-to-sequence conversions.", "keyphrases": ["amr", "sequence-to-sequence model", "neural amr", "input graph", "semantic parsing"]} +{"id": "socher-etal-2012-semantic", "title": "Semantic Compositionality through Recursive Matrix-Vector Spaces", "abstract": "Single-word vector space models have been very successful at learning lexical information. However, they cannot capture the compositional meaning of longer phrases, preventing them from a deeper understanding of language. We introduce a recursive neural network (RNN) model that learns compositional vector representations for phrases and sentences of arbitrary syntactic type and length. Our model assigns a vector and a matrix to every node in a parse tree: the vector captures the inherent meaning of the constituent, while the matrix captures how it changes the meaning of neighboring words or phrases. This matrix-vector RNN can learn the meaning of operators in propositional logic and natural language. The model obtains state of the art performance on three different experiments: predicting fine-grained sentiment distributions of adverb-adjective pairs; classifying sentiment labels of movie reviews and classifying semantic relationships such as cause-effect or topic-message between nouns using the syntactic path between them.", "keyphrases": ["recursive neural network", "matrix", "parse tree", "semantic compositionality", "network model"]} +{"id": "zhang-clark-2008-joint", "title": "Joint Word Segmentation and POS Tagging Using a Single Perceptron", "abstract": "For Chinese POS tagging, word segmentation is a preliminary step. To avoid error propagation and improve segmentation by utilizing POS information, segmentation and tagging can be performed simultaneously. A challenge for this joint approach is the large combined search space, which makes efficient decoding very hard. Recent research has explored the integration of segmentation and POS tagging, by decoding under restricted versions of the full combined search space. In this paper, we propose a joint segmentation and POS tagging model that does not impose any hard constraints on the interaction between word and POS information. Fast decoding is achieved by using a novel multiple-beam search algorithm. The system uses a discriminative statistical model, trained using the generalized perceptron algorithm. The joint model gives an error reduction in segmentation accuracy of 14.6% and an error reduction in tagging accuracy of 12.2%, compared to the traditional pipeline approach.", "keyphrases": ["word segmentation", "pos tagging", "generalized perceptron algorithm", "joint model", "part-of-speech"]} +{"id": "wang-sennrich-2020-exposure", "title": "On Exposure Bias, Hallucination and Domain Shift in Neural Machine Translation", "abstract": "The standard training algorithm in neural machine translation (NMT) suffers from exposure bias, and alternative algorithms have been proposed to mitigate this. However, the practical impact of exposure bias is under debate. In this paper, we link exposure bias to another well-known problem in NMT, namely the tendency to generate hallucinations under domain shift. In experiments on three datasets with multiple test domains, we show that exposure bias is partially to blame for hallucinations, and that training with Minimum Risk Training, which avoids exposure bias, can mitigate this. Our analysis explains why exposure bias is more problematic under domain shift, and also links exposure bias to the beam search problem, i.e. performance deterioration with increasing beam size. Our results provide a new justification for methods that reduce exposure bias: even if they do not increase performance on in-domain test sets, they can increase model robustness to domain shift.", "keyphrases": ["hallucination", "domain shift", "neural machine translation"]} +{"id": "pontiki-etal-2014-semeval", "title": "SemEval-2014 Task 4: Aspect Based Sentiment Analysis", "abstract": "Sentiment analysis is increasingly viewed as a vital task both from an academic and a commercial standpoint. The majority of current approaches, however, attempt to detect the overall polarity of a sentence, paragraph, or text span, irrespective of the entities mentioned (e.g., laptops) and their aspects (e.g., battery, screen). SemEval2014 Task 4 aimed to foster research in the field of aspect-based sentiment analysis, where the goal is to identify the aspects of given target entities and the sentiment expressed for each aspect. The task provided datasets containing manually annotated reviews of restaurants and laptops, as well as a common evaluation procedure. It attracted 163 submissions from 32 teams.", "keyphrases": ["sentiment analysis", "semeval", "aspect term"]} +{"id": "liu-etal-2019-linguistic", "title": "Linguistic Knowledge and Transferability of Contextual Representations", "abstract": "Contextual word representations derived from large-scale neural language models are successful across a diverse set of NLP tasks, suggesting that they encode useful and transferable features of language. To shed light on the linguistic knowledge they capture, we study the representations produced by several recent pretrained contextualizers (variants of ELMo, the OpenAI transformer language model, and BERT) with a suite of sixteen diverse probing tasks. We find that linear models trained on top of frozen contextual representations are competitive with state-of-the-art task-specific models in many cases, but fail on tasks requiring fine-grained linguistic knowledge (e.g., conjunct identification). To investigate the transferability of contextual word representations, we quantify differences in the transferability of individual layers within contextualizers, especially between recurrent neural networks (RNNs) and transformers. For instance, higher layers of RNNs are more task-specific, while transformer layers do not exhibit the same monotonic trend. In addition, to better understand what makes contextual word representations transferable, we compare language model pretraining with eleven supervised pretraining tasks. For any given task, pretraining on a closely related task yields better performance than language model pretraining (which is better on average) when the pretraining dataset is fixed. However, language model pretraining on more data gives the best results.", "keyphrases": ["transferability", "linguistic knowledge", "methodology", "different layer", "capability"]} +{"id": "esuli-sebastiani-2006-sentiwordnet", "title": "SENTIWORDNET: A Publicly Available Lexical Resource for Opinion Mining", "abstract": "Opinion mining (OM) is a recent subdiscipline at the crossroads of information retrieval and computational linguistics which is concerned not with the topic a document is about, but with the opinion it expresses. OM has a rich set of applications, ranging from tracking users\u0092 opinions about products or about political candidates as expressed in online forums, to customer relationship management. In order to aid the extraction of opinions from text, recent research has tried to automatically determine the \u0093PNpolarity\u0094 of subjective terms, i.e. identify whether a term that is a marker of opinionated content has a positive or a negative connotation. Research on determining whether a term is indeed a marker of opinionated content (a subjective term) or not (an objective term) has been instead much scarcer. In this work we describe SENTIWORDNET, a lexical resource in which each WORDNET synset sis associated to three numerical scores Obj(s), Pos(s) and Neg(s), describing how objective, positive, and negative the terms contained in the synset are. The method used to develop SENTIWORDNET is based on the quantitative analysis of the glosses associated to synsets, and on the use of the resulting vectorial term representations for semi-supervised synset classi.cation. The three scores are derived by combining the results produced by a committee of eight ternary classi.ers, all characterized by similar accuracy levels but different classification behaviour. SENTIWORDNET is freely available for research purposes, and is endowed with a Web-based graphical user interface.", "keyphrases": ["information retrieval", "gloss", "sentiwordnet", "polarity", "opinion mining application"]} +{"id": "zadeh-etal-2017-tensor", "title": "Tensor Fusion Network for Multimodal Sentiment Analysis", "abstract": "Multimodal sentiment analysis is an increasingly popular research area, which extends the conventional language-based definition of sentiment analysis to a multimodal setup where other relevant modalities accompany language. In this paper, we pose the problem of multimodal sentiment analysis as modeling intra-modality and inter-modality dynamics. We introduce a novel model, termed Tensor Fusion Networks, which learns both such dynamics end-to-end. The proposed approach is tailored for the volatile nature of spoken language in online videos as well as accompanying gestures and voice. In the experiments, our model outperforms state-of-the-art approaches for both multimodal and unimodal sentiment analysis.", "keyphrases": ["multimodal sentiment analysis", "modality", "tensor fusion network", "concatenation"]} +{"id": "stanovsky-etal-2019-evaluating", "title": "Evaluating Gender Bias in Machine Translation", "abstract": "We present the first challenge set and evaluation protocol for the analysis of gender bias in machine translation (MT). Our approach uses two recent coreference resolution datasets composed of English sentences which cast participants into non-stereotypical gender roles (e.g., \u201cThe doctor asked the nurse to help her in the operation\u201d). We devise an automatic gender bias evaluation method for eight target languages with grammatical gender, based on morphological analysis (e.g., the use of female inflection for the word \u201cdoctor\u201d). Our analyses show that four popular industrial MT systems and two recent state-of-the-art academic MT models are significantly prone to gender-biased translation errors for all tested target languages. Our data and code are publicly available at .", "keyphrases": ["gender bias", "machine translation", "evaluation protocol", "winomt", "adjective"]} +{"id": "rush-etal-2015-neural", "title": "A Neural Attention Model for Abstractive Sentence Summarization", "abstract": "Summarization based on text extraction is inherently limited, but generation-style abstractive methods have proven challenging to build. In this work, we propose a fully data-driven approach to abstractive sentence summarization. Our method utilizes a local attention-based model that generates each word of the summary conditioned on the input sentence. While the model is structurally simple, it can easily be trained end-to-end and scales to a large amount of training data. The model shows significant performance gains on the DUC-2004 shared task compared with several strong baselines.", "keyphrases": ["neural attention model", "abstractive sentence summarization", "gigaword", "input text", "language generation task"]} +{"id": "pfeiffer-etal-2020-mad", "title": "MAD-X: An Adapter-Based Framework for Multi-Task Cross-Lingual Transfer", "abstract": "The main goal behind state-of-the-art pre-trained multilingual models such as multilingual BERT and XLM-R is enabling and bootstrapping NLP applications in low-resource languages through zero-shot or few-shot cross-lingual transfer. However, due to limited model capacity, their transfer performance is the weakest exactly on such low-resource languages and languages unseen during pre-training. We propose MAD-X, an adapter-based framework that enables high portability and parameter-efficient transfer to arbitrary tasks and languages by learning modular language and task representations. In addition, we introduce a novel invertible adapter architecture and a strong baseline method for adapting a pre-trained multilingual model to a new language. MAD-X outperforms the state of the art in cross lingual transfer across a representative set of typologically diverse languages on named entity recognition and causal commonsense reasoning, and achieves competitive results on question answering. Our code and adapters are available at AdapterHub.ml.", "keyphrases": ["cross-lingual transfer", "task representation", "adapter", "new language", "mad-x"]} +{"id": "clark-etal-2018-semi", "title": "Semi-Supervised Sequence Modeling with Cross-View Training", "abstract": "Unsupervised representation learning algorithms such as word2vec and ELMo improve the accuracy of many supervised NLP models, mainly because they can take advantage of large amounts of unlabeled text. However, the supervised models only learn from task-specific labeled data during the main training phase. We therefore propose Cross-View Training (CVT), a semi-supervised learning algorithm that improves the representations of a Bi-LSTM sentence encoder using a mix of labeled and unlabeled data. On labeled examples, standard supervised learning is used. On unlabeled examples, CVT teaches auxiliary prediction modules that see restricted views of the input (e.g., only part of a sentence) to match the predictions of the full model seeing the whole input. Since the auxiliary modules and the full model share intermediate representations, this in turn improves the full model. Moreover, we show that CVT is particularly effective when combined with multi-task learning. We evaluate CVT on five sequence tagging tasks, machine translation, and dependency parsing, achieving state-of-the-art results.", "keyphrases": ["cross-view training", "unlabeled data", "view", "dependency parsing", "self-training"]} +{"id": "li-etal-2020-flat", "title": "FLAT: Chinese NER Using Flat-Lattice Transformer", "abstract": "Recently, the character-word lattice structure has been proved to be effective for Chinese named entity recognition (NER) by incorporating the word information. However, since the lattice structure is complex and dynamic, the lattice-based models are hard to fully utilize the parallel computation of GPUs and usually have a low inference speed. In this paper, we propose FLAT: Flat-LAttice Transformer for Chinese NER, which converts the lattice structure into a flat structure consisting of spans. Each span corresponds to a character or latent word and its position in the original lattice. With the power of Transformer and well-designed position encoding, FLAT can fully leverage the lattice information and has an excellent parallel ability. Experiments on four datasets show FLAT outperforms other lexicon-based models in performance and efficiency.", "keyphrases": ["chinese ner", "flat-lattice transformer", "flat"]} +{"id": "yih-etal-2015-semantic", "title": "Semantic Parsing via Staged Query Graph Generation: Question Answering with Knowledge Base", "abstract": "We propose a novel semantic parsing framework for question answering using a knowledge base. We define a query graph that resembles subgraphs of the knowledge base and can be directly mapped to a logical form. Semantic parsing is reduced to query graph generation, formulated as a staged search problem. Unlike traditional approaches, our method leverages the knowledge base in an early stage to prune the search space and thus simplifies the semantic matching problem. By applying an advanced entity linking system and a deep convolutional neural network model that matches questions and predicate sequences, our system outperforms previous methods substantially, and achieves an F1 measure of 52.5% on the WEBQUESTIONS dataset.", "keyphrases": ["query graph", "knowledge base", "semantic parsing", "natural language question", "relation path"]} +{"id": "dua-etal-2019-drop", "title": "DROP: A Reading Comprehension Benchmark Requiring Discrete Reasoning Over Paragraphs", "abstract": "Reading comprehension has recently seen rapid progress, with systems matching humans on the most popular datasets for the task. However, a large body of work has highlighted the brittleness of these systems, showing that there is much work left to be done. We introduce a new reading comprehension benchmark, DROP, which requires Discrete Reasoning Over the content of Paragraphs. In this crowdsourced, adversarially-created, 55k-question benchmark, a system must resolve references in a question, perhaps to multiple input positions, and perform discrete operations over them (such as addition, counting, or sorting). These operations require a much more comprehensive understanding of the content of paragraphs, as they remove the paraphrase-and-entity-typing shortcuts available in prior datasets. We apply state-of-the-art methods from both the reading comprehension and semantic parsing literatures on this dataset and show that the best systems only achieve 38.4% F1 on our generalized accuracy metric, while expert human performance is 96%. We additionally present a new model that combines reading comprehension methods with simple numerical reasoning to achieve 51% F1.", "keyphrases": ["reading comprehension benchmark", "discrete reasoning", "paragraphs", "drop", "answer type"]} +{"id": "malmasi-etal-2016-discriminating", "title": "Discriminating between Similar Languages and Arabic Dialect Identification: A Report on the Third DSL Shared Task", "abstract": "We present the results of the third edition of the Discriminating between Similar Languages (DSL) shared task, which was organized as part of the VarDial'2016 workshop at COLING'2016. The challenge offered two subtasks: subtask 1 focused on the identification of very similar languages and language varieties in newswire texts, whereas subtask 2 dealt with Arabic dialect identification in speech transcripts. A total of 37 teams registered to participate in the task, 24 teams submitted test results, and 20 teams also wrote system description papers. High-order character n-grams were the most successful feature, and the best classification approaches included traditional supervised learning methods such as SVM, logistic regression, and language models, while deep learning approaches did not perform very well.", "keyphrases": ["similar languages", "arabic dialect identification", "discriminating", "dsl task"]} +{"id": "morante-sporleder-2012-modality", "title": "Modality and Negation: An Introduction to the Special Issue", "abstract": "Traditionally, most research in NLP has focused on propositional aspects of meaning. To truly understand language, however, extra-propositional aspects are equally important. Modality and negation typically contribute significantly to these extra-propositional meaning aspects. Although modality and negation have often been neglected by mainstream computational linguistics, interest has grown in recent years, as evidenced by several annotation projects dedicated to these phenomena. Researchers have started to work on modeling factuality, belief and certainty, detecting speculative sentences and hedging, identifying contradictions, and determining the scope of expressions of modality and negation. In this article, we will provide an overview of how modality and negation have been modeled in computational linguistics.", "keyphrases": ["negation", "extra-propositional aspect", "factuality", "modality", "attitude"]} +{"id": "pavlick-etal-2015-ppdb", "title": "PPDB 2.0: Better paraphrase ranking, fine-grained entailment relations, word embeddings, and style classification", "abstract": "We present a new release of the Paraphrase Database. PPDB 2.0 includes a discriminatively re-ranked set of paraphrases that achieve a higher correlation with human judgments than PPDB 1.0\u2019s heuristic rankings. Each paraphrase pair in the database now also includes finegrained entailment relations, word embedding similarities, and style annotations.", "keyphrases": ["ranking", "entailment relation", "paraphrase database", "style annotation", "ppdb"]} +{"id": "och-ney-2003-systematic", "title": "A Systematic Comparison of Various Statistical Alignment Models", "abstract": "We present and compare various methods for computing word alignments using statistical or heuristic models. We consider the five alignment models presented in Brown, Della Pietra, Della Pietra, and Mercer (1993), the hidden Markov alignment model, smoothing techniques, and refinements. These statistical models are compared with two heuristic models based on the Dice coefficient. We present different methods for combining word alignments to perform a symmetrization of directed statistical alignment models. As evaluation criterion, we use the quality of the resulting Viterbi alignment compared to a manually produced reference alignment. We evaluate the models on the German-English Verbmobil task and the French-English Hansards task. We perform a detailed analysis of various design decisions of our statistical alignment system and evaluate these on training corpora of various sizes. An important result is that refined alignment models with a first-order dependence and a fertility model yield significantly better results than simple heuristic models. In the Appendix, we present an efficient training algorithm for the alignment models presented.", "keyphrases": ["systematic comparison", "giza++", "ibm model", "parallel corpora", "smt system"]} +{"id": "zhang-etal-2019-ernie", "title": "ERNIE: Enhanced Language Representation with Informative Entities", "abstract": "Neural language representation models such as BERT pre-trained on large-scale corpora can well capture rich semantic patterns from plain text, and be fine-tuned to consistently improve the performance of various NLP tasks. However, the existing pre-trained language models rarely consider incorporating knowledge graphs (KGs), which can provide rich structured knowledge facts for better language understanding. We argue that informative entities in KGs can enhance language representation with external knowledge. In this paper, we utilize both large-scale textual corpora and KGs to train an enhanced language representation model (ERNIE), which can take full advantage of lexical, syntactic, and knowledge information simultaneously. The experimental results have demonstrated that ERNIE achieves significant improvements on various knowledge-driven tasks, and meanwhile is comparable with the state-of-the-art model BERT on other common NLP tasks. The code and datasets will be available in the future.", "keyphrases": ["enhanced language representation", "knowledge graph", "ernie", "entity embedding", "downstream task"]} +{"id": "zhang-yang-2018-chinese", "title": "Chinese NER Using Lattice LSTM", "abstract": "We investigate a lattice-structured LSTM model for Chinese NER, which encodes a sequence of input characters as well as all potential words that match a lexicon. Compared with character-based methods, our model explicitly leverages word and word sequence information. Compared with word-based methods, lattice LSTM does not suffer from segmentation errors. Gated recurrent cells allow our model to choose the most relevant characters and words from a sentence for better NER results. Experiments on various datasets show that lattice LSTM outperforms both word-based and character-based LSTM baselines, achieving the best results.", "keyphrases": ["lattice lstm", "potential word", "segmentation", "chinese ner"]} +{"id": "morante-daelemans-2009-metalearning", "title": "A Metalearning Approach to Processing the Scope of Negation", "abstract": "Finding negation signals and their scope in text is an important subtask in information extraction. In this paper we present a machine learning system that finds the scope of negation in biomedical texts. The system combines several classifiers and works in two phases. To investigate the robustness of the approach, the system is tested on the three subcorpora of the BioScope corpus representing different text types. It achieves the best results to date for this task, with an error reduction of 32.07% compared to current state of the art results.", "keyphrases": ["scope", "negation", "machine learning system", "bioscope corpus"]} +{"id": "gamon-2004-sentiment", "title": "Sentiment classification on customer feedback data: noisy data, large feature vectors, and the role of linguistic analysis", "abstract": "We demonstrate that it is possible to perform automatic sentiment classification in the very noisy domain of customer feedback data. We show that by using large feature vectors in combination with feature reduction, we can train linear support vector machines that achieve high classification accuracy on data that present classification challenges even for a human annotator. We also show that, surprisingly, the addition of deep linguistic analysis features to a set of surface level word n-gram features contributes consistently to classification accuracy in this domain.", "keyphrases": ["customer feedback data", "n-gram", "sentiment classification"]} +{"id": "habash-rambow-2006-magead", "title": "MAGEAD: A Morphological Analyzer and Generator for the Arabic Dialects", "abstract": "We present MAGEAD, a morphological analyzer and generator for the Arabic language family. Our work is novel in that it explicitly addresses the need for processing the morphology of the dialects. MAGEAD performs an on-line analysis to or generation from a root+pattern+features representation, it has separate phonological and orthographic representations, and it allows for combining morphemes from different dialects. We present a detailed evaluation of MAGEAD.", "keyphrases": ["morphological analyzer", "generator", "arabic", "dialect", "disambiguation"]} +{"id": "stenetorp-etal-2012-brat", "title": "brat: a Web-based Tool for NLP-Assisted Text Annotation", "abstract": "We introduce the brat rapid annotation tool (BRAT), an intuitive web-based tool for text annotation supported by Natural Language Processing (NLP) technology. BRAT has been developed for rich structured annotation for a variety of NLP tasks and aims to support manual curation efforts and increase annotator productivity using NLP techniques. We discuss several case studies of real-world annotation projects using pre-release versions of BRAT and present an evaluation of annotation assisted by semantic class disambiguation on a multicategory entity mention annotation task, showing a 15% decrease in total annotation time. BRAT is available under an open-source license from: http://brat.nlplab.org", "keyphrases": ["web-based tool", "text annotation", "support", "brat", "visualization"]} +{"id": "sun-etal-2019-mitigating", "title": "Mitigating Gender Bias in Natural Language Processing: Literature Review", "abstract": "As Natural Language Processing (NLP) and Machine Learning (ML) tools rise in popularity, it becomes increasingly vital to recognize the role they play in shaping societal biases and stereotypes. Although NLP models have shown success in modeling various applications, they propagate and may even amplify gender bias found in text corpora. While the study of bias in artificial intelligence is not new, methods to mitigate gender bias in NLP are relatively nascent. In this paper, we review contemporary studies on recognizing and mitigating gender bias in NLP. We discuss gender bias based on four forms of representation bias and analyze methods recognizing gender bias. Furthermore, we discuss the advantages and drawbacks of existing gender debiasing methods. Finally, we discuss future studies for recognizing and mitigating gender bias in NLP.", "keyphrases": ["gender bias", "literature review", "language model"]} +{"id": "tiedemann-2012-parallel", "title": "Parallel Data, Tools and Interfaces in OPUS", "abstract": "This paper presents the current status of OPUS, a growing language resource of parallel corpora and related tools. The focus in OPUS is to provide freely available data sets in various formats together with basic annotation to be useful for applications in computational linguistics, translation studies and cross-linguistic corpus studies. In this paper, we report about new data sets and their features, additional annotation tools and models provided from the website and essential interfaces and on-line services included in the project.", "keyphrases": ["opus", "parallel data", "parallel sentence", "large number"]} +{"id": "taboada-etal-2011-lexicon", "title": "Lexicon-Based Methods for Sentiment Analysis", "abstract": "We present a lexicon-based approach to extracting sentiment from text. The Semantic Orientation CALculator (SO-CAL) uses dictionaries of words annotated with their semantic orientation (polarity and strength), and incorporates intensification and negation. SO-CAL is applied to the polarity classification task, the process of assigning a positive or negative label to a text that captures the text's opinion towards its main subject matter. We show that SO-CAL's performance is consistent across domains and in completely unseen data. Additionally, we describe the process of dictionary creation, and our use of Mechanical Turk to check dictionaries for consistency and reliability.", "keyphrases": ["sentiment analysis", "lexicon-based approach", "semantic orientation", "negation", "statistical model"]} +{"id": "lample-etal-2016-neural", "title": "Neural Architectures for Named Entity Recognition", "abstract": "Comunicacio presentada a la 2016 Conference of the North American Chapter of the Association for Computational Linguistics, celebrada a San Diego (CA, EUA) els dies 12 a 17 de juny 2016.", "keyphrases": ["named entity recognition", "neural architecture", "character", "ner task", "conditional random field"]} +{"id": "gimpel-etal-2013-systematic", "title": "A Systematic Exploration of Diversity in Machine Translation", "abstract": "This paper addresses the problem of producing a diverse set of plausible translations. We present a simple procedure that can be used with any statistical machine translation (MT) system. We explore three ways of using diverse translations: (1) system combination, (2) discriminative reranking with rich features, and (3) a novel post-editing scenario in which multiple translations are presented to users. We find that diversity can improve performance on these tasks, especially for sentences that are difficult for MT.", "keyphrases": ["diversity", "machine translation", "high model score"]} +{"id": "lewis-etal-2020-bart", "title": "BART: Denoising Sequence-to-Sequence Pre-training for Natural Language Generation, Translation, and Comprehension", "abstract": "We present BART, a denoising autoencoder for pretraining sequence-to-sequence models. BART is trained by (1) corrupting text with an arbitrary noising function, and (2) learning a model to reconstruct the original text. It uses a standard Tranformer-based neural machine translation architecture which, despite its simplicity, can be seen as generalizing BERT (due to the bidirectional encoder), GPT (with the left-to-right decoder), and other recent pretraining schemes. We evaluate a number of noising approaches, finding the best performance by both randomly shuffling the order of sentences and using a novel in-filling scheme, where spans of text are replaced with a single mask token. BART is particularly effective when fine tuned for text generation but also works well for comprehension tasks. It matches the performance of RoBERTa on GLUE and SQuAD, and achieves new state-of-the-art results on a range of abstractive dialogue, question answering, and summarization tasks, with gains of up to 3.5 ROUGE. BART also provides a 1.1 BLEU increase over a back-translation system for machine translation, with only target language pretraining. We also replicate other pretraining schemes within the BART framework, to understand their effect on end-task performance.", "keyphrases": ["sequence-to-sequence", "natural language generation", "comprehension", "summarization", "objective"]} +{"id": "read-2005-using", "title": "Using Emoticons to Reduce Dependency in Machine Learning Techniques for Sentiment Classification", "abstract": "Sentiment Classification seeks to identify a piece of text according to its author's general feeling toward their subject, be it positive or negative. Traditional machine learning techniques have been applied to this problem with reasonable success, but they have been shown to work well only when there is a good match between the training and test data with respect to topic. This paper demonstrates that match with respect to domain and time is also important, and presents preliminary experiments with training data labeled with emoticons, which has the potential of being independent of domain, topic and time.", "keyphrases": ["emoticon", "positive tweet", "negative polarity"]} +{"id": "vadas-curran-2007-adding", "title": "Adding Noun Phrase Structure to the Penn Treebank", "abstract": "The Penn Treebank does not annotate within base noun phrases (NPs), committing only to flat structures that ignore the complexity of English NPs. This means that tools trained on Treebank data cannot learn the correct internal structure of NPs. This paper details the process of adding gold-standard bracketing within each noun phrase in the Penn Treebank. We then examine the consistency and reliability of our annotations. Finally, we use this resource to determine NP structure using several statistical approaches, thus demonstrating the utility of the corpus. This adds detail to the Penn Treebank that is necessary for many NLP applications.", "keyphrases": ["noun phrase", "penn treebank", "internal structure"]} +{"id": "mccarthy-etal-2019-sigmorphon", "title": "The SIGMORPHON 2019 Shared Task: Morphological Analysis in Context and Cross-Lingual Transfer for Inflection", "abstract": "The SIGMORPHON 2019 shared task on cross-lingual transfer and contextual analysis in morphology examined transfer learning of inflection between 100 language pairs, as well as contextual lemmatization and morphosyntactic description in 66 languages. The first task evolves past years' inflection tasks by examining transfer of morphological inflection knowledge from a high-resource language to a low-resource language. This year also presents a new second challenge on lemmatization and morphological feature analysis in context. All submissions featured a neural component and built on either this year's strong baselines or highly ranked systems from previous years' shared tasks. Every participating team improved in accuracy over the baselines for the inflection task (though not Levenshtein distance), and every team in the contextual analysis task improved on both state-of-the-art neural and non-neural baselines.", "keyphrases": ["cross-lingual transfer", "inflection", "high resource language"]} +{"id": "kumar-etal-2007-improving", "title": "Improving Word Alignment with Bridge Languages", "abstract": "We describe an approach to improve Statistical Machine Translation (SMT) performance using multi-lingual, parallel, sentence-aligned corpora in several bridge languages. Our approach consists of a simple method for utilizing a bridge language to create a word alignment system and a procedure for combining word alignment systems from multiple bridge languages. The final translation is obtained by consensus decoding that combines hypotheses obtained using all bridge language word alignments. We present experiments showing that multilingual, parallel text in Spanish, French, Russian, and Chinese can be utilized in this framework to improve translation performance on an Arabic-to-English task.", "keyphrases": ["word alignment", "bridge language", "high quality"]} +{"id": "hamilton-etal-2016-diachronic", "title": "Diachronic Word Embeddings Reveal Statistical Laws of Semantic Change", "abstract": "Understanding how words change their meanings over time is key to models of language and cultural evolution, but historical data on meaning is scarce, making theories hard to develop and test. Word embeddings show promise as a diachronic tool, but have not been carefully evaluated. We develop a robust methodology for quantifying semantic change by evaluating word embeddings (PPMI, SVD, word2vec) against known historical changes. We then use this methodology to reveal statistical laws of semantic evolution. Using six historical corpora spanning four languages and two centuries, we propose two quantitative laws of semantic change: (i) the law of conformity---the rate of semantic change scales with an inverse power-law of word frequency; (ii) the law of innovation---independent of frequency, words that are more polysemous have higher rates of semantic change.", "keyphrases": ["word embedding", "semantic change", "methodology", "historical corpora"]} +{"id": "takamatsu-etal-2012-reducing", "title": "Reducing Wrong Labels in Distant Supervision for Relation Extraction", "abstract": "In relation extraction, distant supervision seeks to extract relations between entities from text by using a knowledge base, such as Freebase, as a source of supervision. When a sentence and a knowledge base refer to the same entity pair, this approach heuristically labels the sentence with the corresponding relation in the knowledge base. However, this heuristic can fail with the result that some sentences are labeled wrongly. This noisy labeled data causes poor extraction performance. In this paper, we propose a method to reduce the number of wrong labels. We present a novel generative model that directly models the heuristic labeling process of distant supervision. The model predicts whether assigned labels are correct or wrong via its hidden variables. Our experimental results show that this model detected wrong labels with higher performance than baseline methods. In the experiment, we also found that our wrong label reduction boosted the performance of relation extraction.", "keyphrases": ["distant supervision", "relation extraction", "labeling process"]} +{"id": "koen-2004-pharaoh", "title": "Pharaoh: a beam search decoder for phrase-based statistical machine translation models", "abstract": "We describe Pharaoh, a freely available decoder for phrase-based statistical machine translation models. The decoder is the implement at ion of an efficient dynamic programming search algorithm with lattice generation and XML markup for external components.", "keyphrases": ["beam search decoder", "machine translation model", "pharaoh", "smt system", "hypothesis"]} +{"id": "jawahar-etal-2019-bert", "title": "What Does BERT Learn about the Structure of Language?", "abstract": "BERT is a recent language representation model that has surprisingly performed well in diverse language understanding benchmarks. This result indicates the possibility that BERT networks capture structural information about language. In this work, we provide novel support for this claim by performing a series of experiments to unpack the elements of English language structure learned by BERT. Our findings are fourfold. BERT's phrasal representation captures the phrase-level information in the lower layers. The intermediate layers of BERT compose a rich hierarchy of linguistic information, starting with surface features at the bottom, syntactic features in the middle followed by semantic features at the top. BERT requires deeper layers while tracking subject-verb agreement to handle long-term dependency problem. Finally, the compositional scheme underlying BERT mimics classical, tree-like structures.", "keyphrases": ["bert", "linguistic information", "tree-like structure", "low layer", "attention head"]} +{"id": "smith-eisner-2005-contrastive", "title": "Contrastive Estimation: Training Log-Linear Models on Unlabeled Data", "abstract": "Conditional random fields (Lafferty et al., 2001) are quite effective at sequence labeling tasks like shallow parsing (Sha and Pereira, 2003) and named-entity extraction (McCallum and Li, 2003). CRFs are log-linear, allowing the incorporation of arbitrary features into the model. To train on unlabeled data, we require unsupervised estimation methods for log-linear models; few exist. We describe a novel approach, contrastive estimation. We show that the new technique can be intuitively understood as exploiting implicit negative evidence and is computationally efficient. Applied to a sequence labeling problem---POS tagging given a tagging dictionary and unlabeled text---contrastive estimation outperforms EM (with the same feature set), is more robust to degradations of the dictionary, and can largely recover by modeling additional features.", "keyphrases": ["log-linear model", "unlabeled data", "tagging", "contrastive estimation", "neighborhood"]} +{"id": "kalchbrenner-etal-2014-convolutional", "title": "A Convolutional Neural Network for Modelling Sentences", "abstract": "The ability to accurately represent sentences is central to language understanding. We describe a convolutional architecture dubbed the Dynamic Convolutional Neural Network (DCNN) that we adopt for the semantic modelling of sentences. The network uses Dynamic k-Max Pooling, a global pooling operation over linear sequences. The network handles input sentences of varying length and induces a feature graph over the sentence that is capable of explicitly capturing short and long-range relations. The network does not rely on a parse tree and is easily applicable to any language. We test the DCNN in four experiments: small scale binary and multi-class sentiment prediction, six-way question classification and Twitter sentiment prediction by distant supervision. The network achieves excellent performance in the first three tasks and a greater than 25% error reduction in the last task with respect to the strongest baseline.", "keyphrases": ["convolutional neural network", "modeling", "k-max pooling", "cnn", "sentiment analysis"]} +{"id": "zhou-choi-2018-exist", "title": "They Exist! Introducing Plural Mentions to Coreference Resolution and Entity Linking", "abstract": "This paper analyzes arguably the most challenging yet under-explored aspect of resolution tasks such as coreference resolution and entity linking, that is the resolution of plural mentions. Unlike singular mentions each of which represents one entity, plural mentions stand for multiple entities. To tackle this aspect, we take the character identification corpus from the SemEval 2018 shared task that consists of entity annotation for singular mentions, and expand it by adding annotation for plural mentions. We then introduce a novel coreference resolution algorithm that selectively creates clusters to handle both singular and plural mentions, and also a deep learning-based entity linking model that jointly handles both types of mentions through multi-task learning. Adjusted evaluation metrics are proposed for these tasks as well to handle the uniqueness of plural mentions. Our experiments show that the new coreference resolution and entity linking models significantly outperform traditional models designed only for singular mentions. To the best of our knowledge, this is the first time that plural mentions are thoroughly analyzed for these two resolution tasks.", "keyphrases": ["plural mention", "coreference resolution", "entity linking"]} +{"id": "zellers-etal-2018-swag", "title": "SWAG: A Large-Scale Adversarial Dataset for Grounded Commonsense Inference", "abstract": "Given a partial description like \u201cshe opened the hood of the car,\u201d humans can reason about the situation and anticipate what might come next (\u201dthen, she examined the engine\u201d). In this paper, we introduce the task of grounded commonsense inference, unifying natural language inference and commonsense reasoning. We present SWAG, a new dataset with 113k multiple choice questions about a rich spectrum of grounded situations. To address the recurring challenges of the annotation artifacts and human biases found in many existing datasets, we propose Adversarial Filtering (AF), a novel procedure that constructs a de-biased dataset by iteratively training an ensemble of stylistic classifiers, and using them to filter the data. To account for the aggressive adversarial filtering, we use state-of-the-art language models to massively oversample a diverse set of potential counterfactuals. Empirical results demonstrate that while humans can solve the resulting inference problems with high accuracy (88%), various competitive models struggle on our task. We provide comprehensive analysis that indicates significant opportunities for future research.", "keyphrases": ["situation", "natural language inference", "multiple choice question", "adversarial filtering", "swag"]} +{"id": "gurevych-etal-2012-uby", "title": "UBY - A Large-Scale Unified Lexical-Semantic Resource Based on LMF", "abstract": "We present Uby, a large-scale lexical-semantic resource combining a wide range of information from expert-constructed and collaboratively constructed resources for English and German. It currently contains nine resources in two languages: English WordNet, Wiktionary, Wikipedia, FrameNet and VerbNet, German Wikipedia, Wiktionary and GermaNet, and multilingual OmegaWiki modeled according to the LMF standard. For FrameNet, VerbNet and all collaboratively constructed resources, this is done for the first time. Our LMF model captures lexical information at a fine-grained level by employing a large number of Data Categories from ISOCat and is designed to be directly extensible by new languages and resources. All resources in Uby can be accessed with an easy to use publicly available API.", "keyphrases": ["lexical-semantic resource", "wiktionary", "uby", "uniform representation"]} +{"id": "ide-suderman-2009-bridging", "title": "Bridging the Gaps: Interoperability for GrAF, GATE, and UIMA", "abstract": "This paper explores interoperability for data represented using the Graph Annotation Framework (GrAF) (Ide and Suderman, 2007) and the data formats utilized by two general-purpose annotation systems: the General Architecture for Text Engineering (GATE) (Cunningham, 2002) and the Unstructured Information Management Architecture (UIMA). GrAF is intended to serve as a \"pivot\" to enable interoperability among different formats, and both GATE and UIMA are at least implicitly designed with an eye toward interoperability with other formats and tools. We describe the steps required to perform a round-trip rendering from GrAF to GATE and GrAF to UIMA CAS and back again, and outline the commonalities as well as the differences and gaps that came to light in the process.", "keyphrases": ["interoperability", "gate", "uima"]} +{"id": "jiang-zhai-2007-systematic", "title": "A Systematic Exploration of the Feature Space for Relation Extraction", "abstract": "Relation extraction is the task of finding semantic relations between entities from text. The state-of-the-art methods for relation extraction are mostly based on statistical learning, and thus all have to deal with feature selection, which can significantly affect the classification performance. In this paper, we systematically explore a large space of features for relation extraction and evaluate the effectiveness of different feature subspaces. We present a general definition of feature spaces based on a graphic representation of relation instances, and explore three different representations of relation instances and features of different complexities within this framework. Our experiments show that using only basic unit features is generally sufficient to achieve state-of-the-art performance, while overinclusion of complex features may hurt the performance. A combination of features of different levels of complexity and from different sentence representations, coupled with task-oriented feature pruning, gives the best performance.", "keyphrases": ["systematic exploration", "feature space", "relation extraction", "jiang"]} +{"id": "felbo-etal-2017-using", "title": "Using millions of emoji occurrences to learn any-domain representations for detecting sentiment, emotion and sarcasm", "abstract": "NLP tasks are often limited by scarcity of manually annotated data. In social media sentiment analysis and related tasks, researchers have therefore used binarized emoticons and specific hashtags as forms of distant supervision. Our paper shows that by extending the distant supervision to a more diverse set of noisy labels, the models can learn richer representations. Through emoji prediction on a dataset of 1246 million tweets containing one of 64 common emojis we obtain state-of-the-art performance on 8 benchmark datasets within emotion, sentiment and sarcasm detection using a single pretrained model. Our analyses confirm that the diversity of our emotional labels yield a performance improvement over previous distant supervision approaches.", "keyphrases": ["emojis", "emotion", "sarcasm", "deepmoji", "rich representation"]} +{"id": "cai-knight-2013-smatch", "title": "Smatch: an Evaluation Metric for Semantic Feature Structures", "abstract": "The evaluation of whole-sentence semantic structures plays an important role in semantic parsing and large-scale semantic structure annotation. However, there is no widely-used metric to evaluate wholesentence semantic structures. In this paper, we present smatch, a metric that calculates the degree of overlap between two semantic feature structures. We give an efficient algorithm to compute the metric and show the results of an inter-annotator agreement study.", "keyphrases": ["smatch", "amr parser", "triple"]} +{"id": "shin-etal-2020-autoprompt", "title": "AutoPrompt: Eliciting Knowledge from Language Models with Automatically Generated Prompts", "abstract": "The remarkable success of pretrained language models has motivated the study of what kinds of knowledge these models learn during pretraining. Reformulating tasks as fill-in-the-blanks problems (e.g., cloze tests) is a natural approach for gauging such knowledge, however, its usage is limited by the manual effort and guesswork required to write suitable prompts. To address this, we develop AutoPrompt, an automated method to create prompts for a diverse set of tasks, based on a gradient-guided search. Using AutoPrompt, we show that masked language models (MLMs) have an inherent capability to perform sentiment analysis and natural language inference without additional parameters or finetuning, sometimes achieving performance on par with recent state-of-the-art supervised models. We also show that our prompts elicit more accurate factual knowledge from MLMs than the manually created prompts on the LAMA benchmark, and that MLMs can be used as relation extractors more effectively than supervised relation extraction models. These results demonstrate that automatically generated prompts are a viable parameter-free alternative to existing probing methods, and as pretrained LMs become more sophisticated and capable, potentially a replacement for finetuning.", "keyphrases": ["language model", "prompt", "factual knowledge", "fine-tuning", "bert"]} +{"id": "palmer-etal-2005-proposition", "title": "The Proposition Bank: An Annotated Corpus of Semantic Roles", "abstract": "The Proposition Bank project takes a practical approach to semantic representation, adding a layer of predicate-argument information, or semantic role labels, to the syntactic structures of the Penn Treebank. The resulting resource can be thought of as shallow, in that it does not represent coreference, quantification, and many other higher-order phenomena, but also broad, in that it covers every instance of every verb in the corpus and allows representative statistics to be calculated. We discuss the criteria used to define the sets of semantic roles used in the annotation process and to analyze the frequency of syntactic/semantic alternations in the corpus. We describe an automatic system for semantic role tagging trained on the corpus and discuss the effect on its performance of various types of information, including a comparison of full syntactic parsing with a flat representation and the contribution of the empty trace categories of the treebank.", "keyphrases": ["proposition bank", "annotated corpus", "semantic role", "propbank frameset", "broad-coverage"]} +{"id": "dai-etal-2019-transformer", "title": "Transformer-XL: Attentive Language Models beyond a Fixed-Length Context", "abstract": "Transformers have a potential of learning longer-term dependency, but are limited by a fixed-length context in the setting of language modeling. We propose a novel neural architecture Transformer-XL that enables learning dependency beyond a fixed length without disrupting temporal coherence. It consists of a segment-level recurrence mechanism and a novel positional encoding scheme. Our method not only enables capturing longer-term dependency, but also resolves the context fragmentation problem. As a result, Transformer-XL learns dependency that is 80% longer than RNNs and 450% longer than vanilla Transformers, achieves better performance on both short and long sequences, and is up to 1,800+ times faster than vanilla Transformers during evaluation. Notably, we improve the state-of-the-art results of bpc/perplexity to 0.99 on enwiki8, 1.08 on text8, 18.3 on WikiText-103, 21.8 on One Billion Word, and 54.5 on Penn Treebank (without finetuning). When trained only on WikiText-103, Transformer-XL manages to generate reasonably coherent, novel text articles with thousands of tokens. Our code, pretrained models, and hyperparameters are available in both Tensorflow and PyTorch.", "keyphrases": ["language modeling", "fixed-length context", "transformer-xl", "memory", "previous segment"]} +{"id": "mohammad-2018-obtaining", "title": "Obtaining Reliable Human Ratings of Valence, Arousal, and Dominance for 20,000 English Words", "abstract": "Words play a central role in language and thought. Factor analysis studies have shown that the primary dimensions of meaning are valence, arousal, and dominance (VAD). We present the NRC VAD Lexicon, which has human ratings of valence, arousal, and dominance for more than 20,000 English words. We use Best\u2013Worst Scaling to obtain fine-grained scores and address issues of annotation consistency that plague traditional rating scale methods of annotation. We show that the ratings obtained are vastly more reliable than those in existing lexicons. We also show that there exist statistically significant differences in the shared understanding of valence, arousal, and dominance across demographic variables such as age, gender, and personality.", "keyphrases": ["valence", "dominance", "english word"]} +{"id": "rashkin-etal-2017-truth", "title": "Truth of Varying Shades: Analyzing Language in Fake News and Political Fact-Checking", "abstract": "We present an analytic study on the language of news media in the context of political fact-checking and fake news detection. We compare the language of real news with that of satire, hoaxes, and propaganda to find linguistic characteristics of untrustworthy text. To probe the feasibility of automatic political fact-checking, we also present a case study based on PolitiFact.com using their factuality judgments on a 6-point scale. Experiments show that while media fact-checking remains to be an open research question, stylistic cues can help determine the truthfulness of text.", "keyphrases": ["political fact-checking", "fake news", "propaganda", "truth", "textual content"]} +{"id": "rahman-ng-2009-supervised", "title": "Supervised Models for Coreference Resolution", "abstract": "Traditional learning-based coreference re-solvers operate by training a mention-pair classi\ufb01er for determining whether two mentions are coreferent or not. Two independent lines of recent research have attempted to improve these mention-pair classi\ufb01ers, one by learning a mention-ranking model to rank preceding mentions for a given anaphor, and the other by training an entity-mention classi\ufb01er to determine whether a preceding cluster is coreferent with a given mention. We propose a cluster-ranking approach to coreference resolution that combines the strengths of mention rankers and entity-mention models. We additionally show how our cluster-ranking framework naturally allows discourse-new entity detection to be learned jointly with coreference resolution. Experimental results on the ACE data sets demonstrate its superior performance to competing approaches.", "keyphrases": ["coreference resolution", "mention", "cluster"]} +{"id": "hsu-etal-2018-unified", "title": "A Unified Model for Extractive and Abstractive Summarization using Inconsistency Loss", "abstract": "We propose a unified model combining the strength of extractive and abstractive summarization. On the one hand, a simple extractive model can obtain sentence-level attention with high ROUGE scores but less readable. On the other hand, a more complicated abstractive model can obtain word-level dynamic attention to generate a more readable paragraph. In our model, sentence-level attention is used to modulate the word-level attention such that words in less attended sentences are less likely to be generated. Moreover, a novel inconsistency loss function is introduced to penalize the inconsistency between two levels of attentions. By end-to-end training our model with the inconsistency loss and original losses of extractive and abstractive models, we achieve state-of-the-art ROUGE scores while being the most informative and readable summarization on the CNN/Daily Mail dataset in a solid human evaluation.", "keyphrases": ["abstractive summarization", "inconsistency loss", "extractor"]} +{"id": "moro-etal-2014-entity", "title": "Entity Linking meets Word Sense Disambiguation: a Unified Approach", "abstract": "Entity Linking (EL) and Word Sense Disambiguation (WSD) both address the lexical ambiguity of language. But while the two tasks are pretty similar, they differ in a fundamental respect: in EL the textual mention can be linked to a named entity which may or may not contain the exact mention, while in WSD there is a perfect match between the word form (better, its lemma) and a suitable word sense. In this paper we present Babelfy, a unified graph-based approach to EL and WSD based on a loose identification of candidate meanings coupled with a densest subgraph heuristic which selects high-coherence semantic interpretations. Our experiments show state-of-the-art performances on both tasks on 6 different datasets, including a multilingual setting. Babelfy is online at ", "keyphrases": ["word sense disambiguation", "unified approach", "graph-based approach", "entity linking", "sense repository"]} +{"id": "anand-etal-2011-cats", "title": "Cats Rule and Dogs Drool!: Classifying Stance in Online Debate", "abstract": "A growing body of work has highlighted the challenges of identifying the stance a speaker holds towards a particular topic, a task that involves identifying a holistic subjective disposition. We examine stance classification on a corpus of 4873 posts across 14 topics on ConvinceMe.net, ranging from the playful to the ideological. We show that ideological debates feature a greater share of rebuttal posts, and that rebuttal posts are significantly harder to classify for stance, for both humans and trained classifiers. We also demonstrate that the number of subjective expressions varies across debates, a fact correlated with the performance of systems sensitive to sentiment-bearing terms. We present results for identifing rebuttals with 63% accuracy, and for identifying stance on a per topic basis that range from 54% to 69%, as compared to unigram baselines that vary between 49% and 60%. Our results suggest that methods that take into account the dialogic context of such posts might be fruitful.", "keyphrases": ["stance", "online debate", "dialogic structure"]} +{"id": "field-etal-2018-framing", "title": "Framing and Agenda-setting in Russian News: a Computational Analysis of Intricate Political Strategies", "abstract": "Amidst growing concern over media manipulation, NLP attention has focused on overt strategies like censorship and \u201cfake news\u201d. Here, we draw on two concepts from political science literature to explore subtler strategies for government media manipulation: agenda-setting (selecting what topics to cover) and framing (deciding how topics are covered). We analyze 13 years (100K articles) of the Russian newspaper Izvestia and identify a strategy of distraction: articles mention the U.S. more frequently in the month directly following an economic downturn in Russia. We introduce embedding-based methods for cross-lingually projecting English frames to Russian, and discover that these articles emphasize U.S. moral failings and threats to the U.S. Our work offers new ways to identify subtle media manipulation strategies at the intersection of agenda-setting and framing.", "keyphrases": ["news", "russian newspaper izvestia", "media manipulation strategy", "framing"]} +{"id": "hoffmann-etal-2011-knowledge", "title": "Knowledge-Based Weak Supervision for Information Extraction of Overlapping Relations", "abstract": "Information extraction (IE) holds the promise of generating a large-scale knowledge base from the Web's natural language text. Knowledge-based weak supervision, using structured data to heuristically label a training corpus, works towards this goal by enabling the automated learning of a potentially unbounded number of relation extractors. Recently, researchers have developed multi-instance learning algorithms to combat the noisy training data that can come from heuristic labeling, but their models assume relations are disjoint --- for example they cannot extract the pair Founded(Jobs, Apple) and CEO-of(Jobs, Apple). \n \nThis paper presents a novel approach for multi-instance learning with overlapping relations that combines a sentence-level extraction model with a simple, corpus-level component for aggregating the individual facts. We apply our model to learn extractors for NY Times text using weak supervision from Free-base. Experiments show that the approach runs quickly and yields surprising gains in accuracy, at both the aggregate and sentence level.", "keyphrases": ["weak supervision", "information extraction", "knowledge base", "multi-instance", "entity pair"]} +{"id": "strotgen-gertz-2010-heideltime", "title": "HeidelTime: High Quality Rule-Based Extraction and Normalization of Temporal Expressions", "abstract": "In this paper, we describe HeidelTime, a system for the extraction and normalization of temporal expressions. HeidelTime is a rule-based system mainly using regular expression patterns for the extraction of temporal expressions and knowledge resources as well as linguistic clues for their normalization. In the TempEval-2 challenge, HeidelTime achieved the highest F-Score (86%) for the extraction and the best results in assigning the correct value attribute, i.e., in understanding the semantics of the temporal expressions.", "keyphrases": ["normalization", "temporal expression", "heideltime"]} +{"id": "tenney-etal-2019-bert", "title": "BERT Rediscovers the Classical NLP Pipeline", "abstract": "Pre-trained text encoders have rapidly advanced the state of the art on many NLP tasks. We focus on one such model, BERT, and aim to quantify where linguistic information is captured within the network. We find that the model represents the steps of the traditional NLP pipeline in an interpretable and localizable way, and that the regions responsible for each step appear in the expected sequence: POS tagging, parsing, NER, semantic roles, then coreference. Qualitative analysis reveals that the model can and often does adjust this pipeline dynamically, revising lower-level decisions on the basis of disambiguating information from higher-level representations.", "keyphrases": ["classical nlp pipeline", "pos tagging", "bert", "language model", "high layer"]} +{"id": "chauhan-etal-2020-sentiment", "title": "Sentiment and Emotion help Sarcasm? A Multi-task Learning Framework for Multi-Modal Sarcasm, Sentiment and Emotion Analysis", "abstract": "In this paper, we hypothesize that sarcasm is closely related to sentiment and emotion, and thereby propose a multi-task deep learning framework to solve all these three problems simultaneously in a multi-modal conversational scenario. We, at first, manually annotate the recently released multi-modal MUStARD sarcasm dataset with sentiment and emotion classes, both implicit and explicit. For multi-tasking, we propose two attention mechanisms, viz. Inter-segment Inter-modal Attention (Ie-Attention) and Intra-segment Inter-modal Attention (Ia-Attention). The main motivation of Ie-Attention is to learn the relationship between the different segments of the sentence across the modalities. In contrast, Ia-Attention focuses within the same segment of the sentence across the modalities. Finally, representations from both the attentions are concatenated and shared across the five classes (i.e., sarcasm, implicit sentiment, explicit sentiment, implicit emotion, explicit emotion) for multi-tasking. Experimental results on the extended version of the MUStARD dataset show the efficacy of our proposed approach for sarcasm detection over the existing state-of-the-art systems. The evaluation also shows that the proposed multi-task framework yields better performance for the primary task, i.e., sarcasm detection, with the help of two secondary tasks, emotion and sentiment analysis.", "keyphrases": ["emotion", "sarcasm", "multi-task learning framework"]} +{"id": "zhang-etal-2015-randomized", "title": "Randomized Greedy Inference for Joint Segmentation, POS Tagging and Dependency Parsing", "abstract": "In this paper, we introduce a new approach for joint segmentation, POS tagging and dependency parsing. While joint modeling of these tasks addresses the issue of error propagation inherent in traditional pipeline architectures, it also complicates the inference task. Past research has addressed this challenge by placing constraints on the scoring function. In contrast, we propose an approach that can handle arbitrarily complex scoring functions. Specifically, we employ a randomized greedy algorithm that jointly predicts segmentations, POS tags and dependency trees. Moreover, this architecture readily handles different segmentation tasks, such as morphological segmentation for Arabic and word segmentation for Chinese. The joint model outperforms the state-of-the-art systems on three datasets, obtaining 2.1% TedEval absolute gain against the best published results in the 2013 SPMRL shared task. 1", "keyphrases": ["joint segmentation", "pos tagging", "dependency parsing", "complex scoring function", "greedy algorithm"]} +{"id": "li-etal-2018-multi-head", "title": "Multi-Head Attention with Disagreement Regularization", "abstract": "Multi-head attention is appealing for the ability to jointly attend to information from different representation subspaces at different positions. In this work, we introduce a disagreement regularization to explicitly encourage the diversity among multiple attention heads. Specifically, we propose three types of disagreement regularization, which respectively encourage the subspace, the attended positions, and the output representation associated with each attention head to be different from other heads. Experimental results on widely-used WMT14 English-German and WMT17 Chinese-English translation tasks demonstrate the effectiveness and universality of the proposed approach.", "keyphrases": ["disagreement regularization", "diversity", "attention head"]} +{"id": "zhang-etal-2020-discriminative", "title": "Discriminative Nearest Neighbor Few-Shot Intent Detection by Transferring Natural Language Inference", "abstract": "Intent detection is one of the core components of goal-oriented dialog systems, and detecting out-of-scope (OOS) intents is also a practically important skill. Few-shot learning is attracting much attention to mitigate data scarcity, but OOS detection becomes even more challenging. In this paper, we present a simple yet effective approach, discriminative nearest neighbor classification with deep self-attention. Unlike softmax classifiers, we leverage BERT-style pairwise encoding to train a binary classifier that estimates the best matched training example for a user input. We propose to boost the discriminative ability by transferring a natural language inference (NLI) model. Our extensive experiments on a large-scale multi-domain intent detection task show that our method achieves more stable and accurate in-domain and OOS detection accuracy than RoBERTa-based classifiers and embedding-based nearest neighbor approaches. More notably, the NLI transfer enables our 10-shot model to perform competitively with 50-shot or even full-shot classifiers, while we can keep the inference time constant by leveraging a faster embedding retrieval model.", "keyphrases": ["intent detection", "natural language inference", "few-shot text classification"]} +{"id": "bykh-meurers-2012-native", "title": "Native Language Identification using Recurring n-grams \u2013 Investigating Abstraction and Domain Dependence", "abstract": "Native Language Identification tackles the problem of determining the native language of an author based on a text the author has written in a second language. In this paper, we discuss the systematic use of recurring n-grams of any length as features for training a native language classifier. Starting with surface n-grams, we investigate two degrees of abstraction incorporating parts-of-speech. The approach outperforms previous work employing a comparable data setup, reaching 89.71% accuracy for a task with seven native languages using data from the International Corpus of Learner English (ICLE). We then investigate the claim by Brooke and Hirst (2011) that a content bias in ICLE seems to result in an easy classification by topic instead of by native language characteristics. We show that training our model on ICLE and testing it on three other, independently compiled learner corpora dealing with other topics still results in high accuracy classification.", "keyphrases": ["n-gram", "abstraction", "native language identification"]} +{"id": "zhang-etal-2019-paws", "title": "PAWS: Paraphrase Adversaries from Word Scrambling", "abstract": "Existing paraphrase identification datasets lack sentence pairs that have high lexical overlap without being paraphrases. Models trained on such data fail to distinguish pairs like flights from New York to Florida and flights from Florida to New York. This paper introduces PAWS (Paraphrase Adversaries from Word Scrambling), a new dataset with 108,463 well-formed paraphrase and non-paraphrase pairs with high lexical overlap. Challenging pairs are generated by controlled word swapping and back translation, followed by fluency and paraphrase judgments by human raters. State-of-the-art models trained on existing datasets have dismal performance on PAWS (40% accuracy); however, including PAWS training data for these models improves their accuracy to 85% while maintaining performance on existing tasks. In contrast, models that do not capture non-local contextual information fail even with PAWS training examples. As such, PAWS provides an effective instrument for driving further progress on models that better exploit structure, context, and pairwise comparisons.", "keyphrases": ["paraphrase adversaries", "word scrambling", "sentence pair"]} +{"id": "gerber-chai-2010-beyond", "title": "Beyond NomBank: A Study of Implicit Arguments for Nominal Predicates", "abstract": "Despite its substantial coverage, NomBank does not account for all within-sentence arguments and ignores extra-sentential arguments altogether. These arguments, which we call implicit, are important to semantic processing, and their recovery could potentially benefit many NLP applications. We present a study of implicit arguments for a select group of frequent nominal predicates. We show that implicit arguments are pervasive for these predicates, adding 65% to the coverage of NomBank. We demonstrate the feasibility of recovering implicit arguments with a supervised classification model. Our results and analyses provide a baseline for future work on this emerging task.", "keyphrases": ["nombank", "implicit argument", "semantic role"]} +{"id": "yatskar-etal-2010-sake", "title": "For the sake of simplicity: Unsupervised extraction of lexical simplifications from Wikipedia", "abstract": "We report on work in progress on extracting lexical simplifications (e.g., \"collaborate\" \u2192 \"work together\"), focusing on utilizing edit histories in Simple English Wikipedia for this task. We consider two main approaches: (1) deriving simplification probabilities via an edit model that accounts for a mixture of different operations, and (2) using metadata to focus on edits that are more likely to be simplification operations. We find our methods to outperform a reasonable baseline and yield many high-quality lexical simplifications not included in an independently-created manually prepared list.", "keyphrases": ["lexical simplification", "wikipedia", "relevant edit"]} +{"id": "tang-etal-2016-aspect", "title": "Aspect Level Sentiment Classification with Deep Memory Network", "abstract": "We introduce a deep memory network for aspect level sentiment classification. Unlike feature-based SVM and sequential neural models such as LSTM, this approach explicitly captures the importance of each context word when inferring the sentiment polarity of an aspect. Such importance degree and text representation are calculated with multiple computational layers, each of which is a neural attention model over an external memory. Experiments on laptop and restaurant datasets demonstrate that our approach performs comparable to state-of-art feature based SVM system, and substantially better than LSTM and attention-based LSTM architectures. On both datasets we show that multiple computational layers could improve the performance. Moreover, our approach is also fast. The deep memory network with 9 layers is 15 times faster than LSTM with a CPU implementation.", "keyphrases": ["sentiment classification", "deep memory network", "question-answering perspective"]} +{"id": "zhao-etal-2011-topical", "title": "Topical Keyphrase Extraction from Twitter", "abstract": "Summarizing and analyzing Twitter content is an important and challenging task. In this paper, we propose to extract topical keyphrases as one way to summarize Twitter. We propose a context-sensitive topical PageRank method for keyword ranking and a probabilistic scoring function that considers both relevance and interestingness of keyphrases for keyphrase ranking. We evaluate our proposed methods on a large Twitter data set. Experiments show that these methods are very effective for topical keyphrase extraction.", "keyphrases": ["twitter", "pagerank", "topical keyphrase extraction"]} +{"id": "johnson-etal-2017-googles", "title": "Google's Multilingual Neural Machine Translation System: Enabling Zero-Shot Translation", "abstract": "We propose a simple solution to use a single Neural Machine Translation (NMT) model to translate between multiple languages. Our solution requires no changes to the model architecture from a standard NMT system but instead introduces an artificial token at the beginning of the input sentence to specify the required target language. Using a shared wordpiece vocabulary, our approach enables Multilingual NMT systems using a single model. On the WMT'14 benchmarks, a single multilingual model achieves comparable performance for English\u2192French and surpasses state-of-theart results for English\u2192German. Similarly, a single multilingual model surpasses state-of-the-art results for French\u2192English and German\u2192English on WMT'14 and WMT'15 benchmarks, respectively. On production corpora, multilingual models of up to twelve language pairs allow for better translation of many individual pairs. Our models can also learn to perform implicit bridging between language pairs never seen explicitly during training, showing that transfer learning and zero-shot translation is possible for neural translation. Finally, we show analyses that hints at a universal interlingua representation in our models and also show some interesting examples when mixing languages.", "keyphrases": ["machine translation", "transfer learning", "google", "nmt model", "resource language"]} +{"id": "xia-ding-2019-emotion", "title": "Emotion-Cause Pair Extraction: A New Task to Emotion Analysis in Texts", "abstract": "Emotion cause extraction (ECE), the task aimed at extracting the potential causes behind certain emotions in text, has gained much attention in recent years due to its wide applications. However, it suffers from two shortcomings: 1) the emotion must be annotated before cause extraction in ECE, which greatly limits its applications in real-world scenarios; 2) the way to first annotate emotion and then extract the cause ignores the fact that they are mutually indicative. In this work, we propose a new task: emotion-cause pair extraction (ECPE), which aims to extract the potential pairs of emotions and corresponding causes in a document. We propose a 2-step approach to address this new ECPE task, which first performs individual emotion extraction and cause extraction via multi-task learning, and then conduct emotion-cause pairing and filtering. The experimental results on a benchmark emotion cause corpus prove the feasibility of the ECPE task as well as the effectiveness of our approach.", "keyphrases": ["cause", "ecpe", "emotion-cause pair extraction", "two-step method", "sentiment analysis"]} +{"id": "keller-lapata-2003-using", "title": "Using the Web to Obtain Frequencies for Unseen Bigrams", "abstract": "This article shows that the Web can be employed to obtain frequencies for bigrams that are unseen in a given corpus. We describe a method for retrieving counts for adjective-noun, noun-noun, and verb-object bigrams from the Web by querying a search engine. We evaluate this method by demonstrating: (a) a high correlation between Web frequencies and corpus frequencies; (b) a reliable correlation between Web frequencies and plausibility judgments; (c) a reliable correlation between Web frequencies and frequencies recreated using class-based smoothing; (d) a good performance of Web frequencies in a pseudo disambiguation task.", "keyphrases": ["web", "frequency", "unseen bigram", "plausibility judgment", "english language"]} +{"id": "wu-etal-2020-corefqa", "title": "CorefQA: Coreference Resolution as Query-based Span Prediction", "abstract": "In this paper, we present CorefQA, an accurate and extensible approach for the coreference resolution task. We formulate the problem as a span prediction task, like in question answering: A query is generated for each candidate mention using its surrounding context, and a span prediction module is employed to extract the text spans of the coreferences within the document using the generated query. This formulation comes with the following key advantages: (1) The span prediction strategy provides the flexibility of retrieving mentions left out at the mention proposal stage; (2) In the question answering framework, encoding the mention and its context explicitly in a query makes it possible to have a deep and thorough examination of cues embedded in the context of coreferent mentions; and (3) A plethora of existing question answering datasets can be used for data augmentation to improve the model's generalization capability. Experiments demonstrate significant performance boost over previous models, with 83.1 (+3.5) F1 score on the CoNLL-2012 benchmark and 87.5 (+2.5) F1 score on the GAP benchmark.", "keyphrases": ["coreference resolution", "query-based span prediction", "mention"]} +{"id": "gururangan-etal-2018-annotation", "title": "Annotation Artifacts in Natural Language Inference Data", "abstract": "Large-scale datasets for natural language inference are created by presenting crowd workers with a sentence (premise), and asking them to generate three new sentences (hypotheses) that it entails, contradicts, or is logically neutral with respect to. We show that, in a significant portion of such data, this protocol leaves clues that make it possible to identify the label by looking only at the hypothesis, without observing the premise. Specifically, we show that a simple text categorization model can correctly classify the hypothesis alone in about 67% of SNLI (Bowman et. al, 2015) and 53% of MultiNLI (Williams et. al, 2017). Our analysis reveals that specific linguistic phenomena such as negation and vagueness are highly correlated with certain inference classes. Our findings suggest that the success of natural language inference models to date has been overestimated, and that the task remains a hard open problem.", "keyphrases": ["natural language inference", "hypothesis", "annotation artifact", "dataset bias", "surface-level reasoning"]} +{"id": "zampieri-etal-2014-report", "title": "A Report on the DSL Shared Task 2014", "abstract": "This paper summarizes the methods, results and findings of the Discriminating between Similar Languages (DSL) shared task 2014. The shared task provided data from 13 different languages and varieties divided into 6 groups. Participants were required to train their systems to discriminate between languages on a training and development set containing 20,000 sentences from each language (closed submission) and/or any other dataset (open submission). One month later, a test set containing 1,000 unidentified instances per language was released for evaluation. The DSL shared task received 22 inscriptions and 8 final submissions. The best system obtained 95.7% average accuracy.", "keyphrases": ["dsl", "similar language", "group", "task report"]} +{"id": "liu-etal-2020-event", "title": "Event Extraction as Machine Reading Comprehension", "abstract": "Event extraction (EE) is a crucial information extraction task that aims to extract event information in texts. Previous methods for EE typically model it as a classification task, which are usually prone to the data scarcity problem. In this paper, we propose a new learning paradigm of EE, by explicitly casting it as a machine reading comprehension problem (MRC). Our approach includes an unsupervised question generation process, which can transfer event schema into a set of natural questions, followed by a BERT-based question-answering process to retrieve answers as EE results. This learning paradigm enables us to strengthen the reasoning process of EE, by introducing sophisticated models in MRC, and relieve the data scarcity problem, by introducing the large-scale datasets in MRC. The empirical results show that: i) our approach attains state-of-the-art performance by considerable margins over previous methods. ii) Our model is excelled in the data-scarce scenario, for example, obtaining 49.8% in F1 for event argument extraction with only 1% data, compared with 2.2% of the previous method. iii) Our model also fits with zero-shot scenarios, achieving 37.0% and 16% in F1 on two datasets without using any EE training data.", "keyphrases": ["machine reading comprehension", "event extraction", "eae"]} +{"id": "yu-etal-2018-spider", "title": "Spider: A Large-Scale Human-Labeled Dataset for Complex and Cross-Domain Semantic Parsing and Text-to-SQL Task", "abstract": "We present Spider, a large-scale complex and cross-domain semantic parsing and text-to-SQL dataset annotated by 11 college students. It consists of 10,181 questions and 5,693 unique complex SQL queries on 200 databases with multiple tables covering 138 different domains. We define a new complex and cross-domain semantic parsing and text-to-SQL task so that different complicated SQL queries and databases appear in train and test sets. In this way, the task requires the model to generalize well to both new SQL queries and new database schemas. Therefore, Spider is distinct from most of the previous semantic parsing tasks because they all use a single database and have the exact same program in the train set and the test set. We experiment with various state-of-the-art models and the best model achieves only 9.7% exact matching accuracy on a database split setting. This shows that Spider presents a strong challenge for future research. Our dataset and task with the most recent updates are publicly available at .", "keyphrases": ["semantic parsing", "text-to-sql task", "complex sql query", "database", "natural language question"]} +{"id": "shardlow-2013-comparison", "title": "A Comparison of Techniques to Automatically Identify Complex Words.", "abstract": "Identifying complex words (CWs) is an important, yet often overlooked, task within lexical simplification (The process of automatically replacing CWs with simpler alternatives). If too many words are identified then substitutions may be made erroneously, leading to a loss of meaning. If too few words are identified then those which impede a user\u2019s understanding may be missed, resulting in a complex final text. This paper addresses the task of evaluating different methods for CW identification. A corpus of sentences with annotated CWs is mined from Simple Wikipedia edit histories, which is then used as the basis for several experiments. Firstly, the corpus design is explained and the results of the validation experiments using human judges are reported. Experiments are carried out into the CW identification techniques of: simplifying everything, frequency thresholding and training a support vector machine. These are based upon previous approaches to the task and show that thresholding does not perform significantly differently to the more naive technique of simplifying everything. The support vector machine achieves a slight increase in precision over the other two methods, but at the cost of a dramatic trade off in recall.", "keyphrases": ["complex word", "simplification", "frequency", "support vector machine", "cwi"]} +{"id": "laubli-etal-2018-machine", "title": "Has Machine Translation Achieved Human Parity? A Case for Document-level Evaluation", "abstract": "Recent research suggests that neural machine translation achieves parity with professional human translation on the WMT Chinese\u2013English news translation task. We empirically test this claim with alternative evaluation protocols, contrasting the evaluation of single sentences and entire documents. In a pairwise ranking experiment, human raters assessing adequacy and fluency show a stronger preference for human over machine translation when evaluating documents as compared to isolated sentences. Our findings emphasise the need to shift towards document-level evaluation as machine translation improves to the degree that errors which are hard or impossible to spot at the sentence-level become decisive in discriminating quality of different translation outputs.", "keyphrases": ["machine translation", "parity", "document-level evaluation", "claim", "fluency"]} +{"id": "sellam-etal-2020-bleurt", "title": "BLEURT: Learning Robust Metrics for Text Generation", "abstract": "Text generation has made significant advances in the last few years. Yet, evaluation metrics have lagged behind, as the most popular choices (e.g., BLEU and ROUGE) may correlate poorly with human judgment. We propose BLEURT, a learned evaluation metric for English based on BERT. BLEURT can model human judgment with a few thousand possibly biased training examples. A key aspect of our approach is a novel pre-training scheme that uses millions of synthetic examples to help the model generalize. BLEURT provides state-of-the-art results on the last three years of the WMT Metrics shared task and the WebNLG data set. In contrast to a vanilla BERT-based approach, it yields superior results even when the training data is scarce and out-of-distribution.", "keyphrases": ["text generation", "evaluation metric", "human judgment", "bleurt", "synthetic data"]} +{"id": "wang-etal-2016-attention", "title": "Attention-based LSTM for Aspect-level Sentiment Classification", "abstract": "Aspect-level sentiment classification is a fine-grained task in sentiment analysis. Since it provides more complete and in-depth results, aspect-level sentiment analysis has received much attention these years. In this paper, we reveal that the sentiment polarity of a sentence is not only determined by the content but is also highly related to the concerned aspect. For instance, \u201cThe appetizers are ok, but the service is slow.\u201d, for aspect taste, the polarity is positive while for service, the polarity is negative. Therefore, it is worthwhile to explore the connection between an aspect and the content of a sentence. To this end, we propose an Attention-based Long Short-Term Memory Network for aspect-level sentiment classification. The attention mechanism can concentrate on different parts of a sentence when different aspects are taken as input. We experiment on the SemEval 2014 dataset and results show that our model achieves state-ofthe-art performance on aspect-level sentiment classification.", "keyphrases": ["sentiment classification", "short-term memory network", "different aspect", "attention-based lstm", "hidden state"]} +{"id": "zhou-etal-2005-exploring", "title": "Exploring Various Knowledge in Relation Extraction", "abstract": "Extracting semantic relationships between entities is challenging. This paper investigates the incorporation of diverse lexical, syntactic and semantic knowledge in feature-based relation extraction using SVM. Our study illustrates that the base phrase chunking information is very effective for relation extraction and contributes to most of the performance improvement from syntactic aspect while additional information from full parsing gives limited further enhancement. This suggests that most of useful information in full parse trees for relation extraction is shallow and can be captured by chunking. We also demonstrate how semantic information such as WordNet and Name List, can be used in feature-based relation extraction to further improve the performance. Evaluation on the ACE corpus shows that effective incorporation of diverse features enables our system outperform previously best-reported systems on the 24 ACE relation subtypes and significantly outperforms tree kernel-based systems by over 20 in F-measure on the 5 ACE relation types.", "keyphrases": ["relation extraction", "svm", "feature-based method", "syntactic feature", "large amount"]} +{"id": "kim-etal-2009-overview", "title": "Overview of BioNLP'09 Shared Task on Event Extraction", "abstract": "The paper presents the design and implementation of the BioNLP'09 Shared Task, and reports the final results with analysis. The shared task consists of three sub-tasks, each of which addresses bio-molecular event extraction at a different level of specificity. The data was developed based on the GENIA event corpus. The shared task was run over 12 weeks, drawing initial interest from 42 teams. Of these teams, 24 submitted final results. The evaluation results are encouraging, indicating that state-of-the-art performance is approaching a practically applicable level and revealing some remaining challenges.", "keyphrases": ["shared task", "event extraction", "bionlp", "negation", "trigger"]} +{"id": "sun-etal-2019-aspect", "title": "Aspect-Level Sentiment Analysis Via Convolution over Dependency Tree", "abstract": "We propose a method based on neural networks to identify the sentiment polarity of opinion words expressed on a specific aspect of a sentence. Although a large majority of works typically focus on leveraging the expressive power of neural networks in handling this task, we explore the possibility of integrating dependency trees with neural networks for representation learning. To this end, we present a convolution over a dependency tree (CDT) model which exploits a Bi-directional Long Short Term Memory (Bi-LSTM) to learn representations for features of a sentence, and further enhance the embeddings with a graph convolutional network (GCN) which operates directly on the dependency tree of the sentence. Our approach propagates both contextual and dependency information from opinion words to aspect words, offering discriminative properties for supervision. Experimental results ranks our approach as the new state-of-the-art in aspect-based sentiment classification.", "keyphrases": ["sentiment analysis", "dependency tree", "convolutional network"]} +{"id": "antoun-etal-2020-arabert", "title": "AraBERT: Transformer-based Model for Arabic Language Understanding", "abstract": "The Arabic language is a morphologically rich language with relatively few resources and a less explored syntax compared to English. Given these limitations, Arabic Natural Language Processing (NLP) tasks like Sentiment Analysis (SA), Named Entity Recognition (NER), and Question Answering (QA), have proven to be very challenging to tackle. Recently, with the surge of transformers based models, language-specific BERT based models have proven to be very efficient at language understanding, provided they are pre-trained on a very large corpus. Such models were able to set new standards and achieve state-of-the-art results for most NLP tasks. In this paper, we pre-trained BERT specifically for the Arabic language in the pursuit of achieving the same success that BERT did for the English language. The performance of AraBERT is compared to multilingual BERT from Google and other state-of-the-art approaches. The results showed that the newly developed AraBERT achieved state-of-the-art performance on most tested Arabic NLP tasks. The pretrained araBERT models are publicly available on hoping to encourage research and applications for Arabic NLP.", "keyphrases": ["arabic language understanding", "question answering", "arabert"]} +{"id": "liu-etal-2016-evaluate", "title": "How NOT To Evaluate Your Dialogue System: An Empirical Study of Unsupervised Evaluation Metrics for Dialogue Response Generation", "abstract": "We investigate evaluation metrics for dialogue response generation systems where supervised labels, such as task completion, are not available. Recent works in response generation have adopted metrics from machine translation to compare a model's generated response to a single target response. We show that these metrics correlate very weakly with human judgements in the non-technical Twitter domain, and not at all in the technical Ubuntu domain. We provide quantitative and qualitative results highlighting specific weaknesses in existing metrics, and provide recommendations for future development of better automatic evaluation metrics for dialogue systems.", "keyphrases": ["dialogue system", "evaluation metric", "machine translation", "human judgment", "response quality"]} +{"id": "li-etal-2013-joint", "title": "Joint Event Extraction via Structured Prediction with Global Features", "abstract": "Traditional approaches to the task of ACE event extraction usually rely on sequential pipelines with multiple stages, which suffer from error propagation since event triggers and arguments are predicted in isolation by independent local classifiers. By contrast, we propose a joint framework based on structured prediction which extracts triggers and arguments together so that the local predictions can be mutually improved. In addition, we propose to incorporate global features which explicitly capture the dependencies of multiple triggers and arguments. Experimental results show that our joint approach with local features outperforms the pipelined baseline, and adding global features further improves the performance significantly. Our approach advances state-ofthe-art sentence-level event extraction, and even outperforms previous argument labeling methods which use external knowledge from other sentences and documents.", "keyphrases": ["event extraction", "structured prediction", "global feature", "error propagation", "trigger"]} +{"id": "meyer-gurevych-2011-psycholinguists", "title": "What Psycholinguists Know About Chemistry: Aligning Wiktionary and WordNet for Increased Domain Coverage", "abstract": "By today, no lexical resource can claim to be fully comprehensive or perform best for every NLP task. This caused a steep increase of resource alignment research. An important challenge is thereby the alignment of differently represented word senses, which we address in this paper. In particular, we propose a new automatically aligned resource of Wiktionary and WordNet that has (i) a very high domain coverage of word senses and (ii) an enriched sense representation, including pronunciations, etymologies, translations, etc. We evaluate our alignment both quantitatively and qualitatively, and explore how it can contribute to practical tasks.", "keyphrases": ["wiktionary", "wordnet", "domain coverage"]} +{"id": "dahlmeier-ng-2012-beam", "title": "A Beam-Search Decoder for Grammatical Error Correction", "abstract": "We present a novel beam-search decoder for grammatical error correction. The decoder iteratively generates new hypothesis corrections from current hypotheses and scores them based on features of grammatical correctness and fluency. These features include scores from discriminative classifiers for specific error categories, such as articles and prepositions. Unlike all previous approaches, our method is able to perform correction of whole sentences with multiple and interacting errors while still taking advantage of powerful existing classifier approaches. Our decoder achieves an F1 correction score significantly higher than all previous published scores on the Helping Our Own (HOO) shared task data set.", "keyphrases": ["beam-search decoder", "grammatical error correction", "noun number"]} +{"id": "qian-etal-2019-reducing", "title": "Reducing Gender Bias in Word-Level Language Models with a Gender-Equalizing Loss Function", "abstract": "Gender bias exists in natural language datasets, which neural language models tend to learn, resulting in biased text generation. In this research, we propose a debiasing approach based on the loss function modification. We introduce a new term to the loss function which attempts to equalize the probabilities of male and female words in the output. Using an array of bias evaluation metrics, we provide empirical evidence that our approach successfully mitigates gender bias in language models without increasing perplexity. In comparison to existing debiasing strategies, data augmentation, and word embedding debiasing, our method performs better in several aspects, especially in reducing gender bias in occupation words. Finally, we introduce a combination of data augmentation and our approach and show that it outperforms existing strategies in all bias evaluation metrics.", "keyphrases": ["gender bias", "language model", "loss function"]} +{"id": "yu-ettinger-2020-assessing", "title": "Assessing Phrasal Representation and Composition in Transformers", "abstract": "Deep transformer models have pushed performance on NLP tasks to new limits, suggesting sophisticated treatment of complex linguistic inputs, such as phrases. However, we have limited understanding of how these models handle representation of phrases, and whether this reflects sophisticated composition of phrase meaning like that done by humans. In this paper, we present systematic analysis of phrasal representations in state-of-the-art pre-trained transformers. We use tests leveraging human judgments of phrase similarity and meaning shift, and compare results before and after control of word overlap, to tease apart lexical effects versus composition effects. We find that phrase representation in these models relies heavily on word content, with little evidence of nuanced composition. We also identify variations in phrase representation quality across models, layers, and representation types, and make corresponding recommendations for usage of representations from these models.", "keyphrases": ["phrasal representation", "composition", "pre-trained transformer"]} +{"id": "yeh-etal-2009-wikiwalk", "title": "WikiWalk: Random walks on Wikipedia for Semantic Relatedness", "abstract": "Computing semantic relatedness of natural language texts is a key component of tasks such as information retrieval and summarization, and often depends on knowledge of a broad range of real-world concepts and relationships. We address this knowledge integration issue by computing semantic relatedness using personalized PageRank (random walks) on a graph derived from Wikipedia. This paper evaluates methods for building the graph, including link selection strategies, and two methods for representing input texts as distributions over the graph nodes: one based on a dictionary lookup, the other based on Explicit Semantic Analysis. We evaluate our techniques on standard word relatedness and text similarity datasets, finding that they capture similarity information complementary to existing Wikipedia-based relatedness measures, resulting in small improvements on a state-of-the-art measure.", "keyphrases": ["random walk", "wikipedia", "semantic relatedness"]} +{"id": "fader-etal-2011-identifying", "title": "Identifying Relations for Open Information Extraction", "abstract": "Open Information Extraction (IE) is the task of extracting assertions from massive corpora without requiring a pre-specified vocabulary. This paper shows that the output of state-of-the-art Open IE systems is rife with uninformative and incoherent extractions. To overcome these problems, we introduce two simple syntactic and lexical constraints on binary relations expressed by verbs. We implemented the constraints in the ReVerb Open IE system, which more than doubles the area under the precision-recall curve relative to previous extractors such as TextRunner and woepos. More than 30% of ReVerb's extractions are at precision 0.8 or higher---compared to virtually none for earlier systems. The paper concludes with a detailed analysis of ReVerb's errors, suggesting directions for future work.", "keyphrases": ["open information extraction", "lexical constraint", "textrunner", "triple", "oie"]} +{"id": "wu-etal-2017-adversarial", "title": "Adversarial Training for Relation Extraction", "abstract": "Adversarial training is a mean of regularizing classification algorithms by generating adversarial noise to the training data. We apply adversarial training in relation extraction within the multi-instance multi-label learning framework. We evaluate various neural network architectures on two different datasets. Experimental results demonstrate that adversarial training is generally effective for both CNN and RNN models and significantly improves the precision of predicted relations.", "keyphrases": ["relation extraction", "rnn", "adversarial training"]} +{"id": "hokamp-liu-2017-lexically", "title": "Lexically Constrained Decoding for Sequence Generation Using Grid Beam Search", "abstract": "We present Grid Beam Search (GBS), an algorithm which extends beam search to allow the inclusion of pre-specified lexical constraints. The algorithm can be used with any model which generates sequences token by token. Lexical constraints take the form of phrases or words that must be present in the output sequence. This is a very general way to incorporate auxillary knowledge into a model's output without requiring any modification of the parameters or training data. We demonstrate the feasibility and flexibility of Lexically Constrained Decoding by conducting experiments on Neural Interactive-Predictive Translation, as well as Domain Adaptation for Neural Machine Translation. Experiments show that GBS can provide large improvements in translation quality in interactive scenarios, and that, even without any user input, GBS can be used to achieve significant gains in performance in domain adaptation scenarios.", "keyphrases": ["decoding", "grid beam search", "lexical constraint", "translation quality", "generation model"]} +{"id": "heilman-etal-2014-predicting", "title": "Predicting Grammaticality on an Ordinal Scale", "abstract": "Automated methods for identifying whether sentences are grammatical have various potential applications (e.g., machine translation, automated essay scoring, computer-assisted language learning). In this work, we construct a statistical model of grammaticality using various linguistic features (e.g., misspelling counts, parser outputs, n-gram language model scores). We also present a new publicly available dataset of learner sentences judged for grammaticality on an ordinal scale. In evaluations, we compare our system to the one from Post (2011) and find that our approach yields state-of-the-art performance.", "keyphrases": ["grammaticality", "ordinal scale", "gug"]} +{"id": "chen-etal-2017-adversarial", "title": "Adversarial Multi-Criteria Learning for Chinese Word Segmentation", "abstract": "Different linguistic perspectives causes many diverse segmentation criteria for Chinese word segmentation (CWS). Most existing methods focus on improve the performance for each single criterion. However, it is interesting to exploit these different criteria and mining their common underlying knowledge. In this paper, we propose adversarial multi-criteria learning for CWS by integrating shared knowledge from multiple heterogeneous segmentation criteria. Experiments on eight corpora with heterogeneous segmentation criteria show that the performance of each corpus obtains a significant improvement, compared to single-criterion learning. Source codes of this paper are available on Github.", "keyphrases": ["chinese word segmentation", "cws", "adversarial multi-criteria learning"]} +{"id": "liu-etal-2018-modeling", "title": "Modeling Sentiment Association in Discourse for Humor Recognition", "abstract": "Humor is one of the most attractive parts in human communication. However, automatically recognizing humor in text is challenging due to the complex characteristics of humor. This paper proposes to model sentiment association between discourse units to indicate how the punchline breaks the expectation of the setup. We found that discourse relation, sentiment conflict and sentiment transition are effective indicators for humor recognition. On the perspective of using sentiment related features, sentiment association in discourse is more useful than counting the number of emotional words.", "keyphrases": ["sentiment association", "humor recognition", "discourse relation"]} +{"id": "barzilay-mckeown-2005-sentence", "title": "Sentence Fusion for Multidocument News Summarization", "abstract": "A system that can produce informative summaries, highlighting common information found in many online documents, will help Web users to pinpoint information that they need without extensive reading. In this article, we introduce sentence fusion, a novel text-to-text generation technique for synthesizing common information across documents. Sentence fusion involves bottom-up local multisequence alignment to identify phrases conveying similar information and statistical generation to combine common phrases into a sentence. Sentence fusion moves the summarization field from the use of purely extractive methods to the generation of abstracts that contain sentences not found in any of the input documents and can synthesize information across sources.", "keyphrases": ["summarization", "common information", "sentence fusion", "linearization", "text-to-text generation process"]} +{"id": "zhou-etal-2019-gear", "title": "GEAR: Graph-based Evidence Aggregating and Reasoning for Fact Verification", "abstract": "Fact verification (FV) is a challenging task which requires to retrieve relevant evidence from plain text and use the evidence to verify given claims. Many claims require to simultaneously integrate and reason over several pieces of evidence for verification. However, previous work employs simple models to extract information from evidence without letting evidence communicate with each other, e.g., merely concatenate the evidence for processing. Therefore, these methods are unable to grasp sufficient relational and logical information among the evidence. To alleviate this issue, we propose a graph-based evidence aggregating and reasoning (GEAR) framework which enables information to transfer on a fully-connected evidence graph and then utilizes different aggregators to collect multi-evidence information. We further employ BERT, an effective pre-trained language representation model, to improve the performance. Experimental results on a large-scale benchmark dataset FEVER have demonstrated that GEAR could leverage multi-evidence information for FV and thus achieves the promising result with a test FEVER score of 67.10%. Our code is available at .", "keyphrases": ["reasoning", "fact verification", "piece", "veracity"]} +{"id": "owoputi-etal-2013-improved", "title": "Improved Part-of-Speech Tagging for Online Conversational Text with Word Clusters", "abstract": "We consider the problem of part-of-speech tagging for informal, online conversational text. We systematically evaluate the use of large-scale unsupervised word clustering and new lexical features to improve tagging accuracy. With these features, our system achieves state-of-the-art tagging results on both Twitter and IRC POS tagging tasks; Twitter tagging is improved from 90% to 93% accuracy (more than 3% absolute). Qualitative analysis of these word clusters yields insights about NLP and linguistic phenomena in this genre. Additionally, we contribute the first POS annotation guidelines for such text and release a new dataset of English language tweets annotated using these guidelines. Tagging software, annotation guidelines, and large-scale word clusters are available at: http://www.ark.cs.cmu.edu/TweetNLP This paper describes release 0.3 of the \u201cCMU Twitter Part-of-Speech Tagger\u201d and annotated data. [This paper is forthcoming in Proceedings of NAACL 2013; Atlanta, GA, USA.]", "keyphrases": ["part-of-speech tagging", "online conversational text", "word cluster"]} +{"id": "chung-etal-2016-character", "title": "A Character-level Decoder without Explicit Segmentation for Neural Machine Translation", "abstract": "The existing machine translation systems, whether phrase-based or neural, have relied almost exclusively on word-level modelling with explicit segmentation. In this paper, we ask a fundamental question: can neural machine translation generate a character sequence without any explicit segmentation? To answer this question, we evaluate an attention-based encoder-decoder with a subword-level encoder and a character-level decoder on four language pairs--En-Cs, En-De, En-Ru and En-Fi-- using the parallel corpora from WMT'15. Our experiments show that the models with a character-level decoder outperform the ones with a subword-level decoder on all of the four language pairs. Furthermore, the ensembles of neural models with a character-level decoder outperform the state-of-the-art non-neural machine translation systems on En-Cs, En-De and En-Fi and perform comparably on En-Ru.", "keyphrases": ["character-level decoder", "explicit segmentation", "neural machine translation", "word-level modelling", "character"]} +{"id": "huang-etal-2016-visual", "title": "Visual Storytelling", "abstract": "We introduce the first dataset for sequential vision-to-language, and explore how this data may be used for the task of visual storytelling. The first release of this dataset, SIND1 v.1, includes 81,743 unique photos in 20,211 sequences, aligned to both descriptive (caption) and story language. We establish several strong baselines for the storytelling task, and motivate an automatic metric to benchmark progress. Modelling concrete description as well as figurative and social language, as provided in this dataset and the storytelling task, has the potential to move artificial intelligence from basic understandings of typical visual scenes towards more and more human-like understanding of grounded event structure and subjective expression.", "keyphrases": ["human-like understanding", "visual storytelling", "image", "temporal sequence"]} +{"id": "dorr-etal-2003-hedge", "title": "Hedge Trimmer: A Parse-and-Trim Approach to Headline Generation", "abstract": "This paper presents Hedge Trimmer, a HEaDline GEneration system that creates a headline for a newspaper story using linguistically-motivated heuristics to guide the choice of a potential headline. We present feasibility tests used to establish the validity of an approach that constructs a headline by selecting words in order from a story. In addition, we describe experimental results that demonstrate the effectiveness of our linguistically-motivated approach over a HMM-based model, using both human evaluation and automatic metrics for comparing the two approaches.", "keyphrases": ["headline generation", "linguistically-motivated heuristic", "hedge trimmer"]} +{"id": "li-etal-2016-deep", "title": "Deep Reinforcement Learning for Dialogue Generation", "abstract": "Recent neural models of dialogue generation offer great promise for generating responses for conversational agents, but tend to be shortsighted, predicting utterances one at a time while ignoring their influence on future outcomes. Modeling the future direction of a dialogue is crucial to generating coherent, interesting dialogues, a need which led traditional NLP models of dialogue to draw on reinforcement learning. In this paper, we show how to integrate these goals, applying deep reinforcement learning to model future reward in chatbot dialogue. The model simulates dialogues between two virtual agents, using policy gradient methods to reward sequences that display three useful conversational properties: informativity (non-repetitive turns), coherence, and ease of answering (related to forward-looking function). We evaluate our model on diversity, length as well as with human judges, showing that the proposed algorithm generates more interactive responses and manages to foster a more sustained conversation in dialogue simulation. This work marks a first step towards learning a neural conversational model based on the long-term success of dialogues.", "keyphrases": ["dialogue generation", "reward", "deep reinforcement learning", "encoder-decoder model"]} +{"id": "velldal-etal-2012-speculation", "title": "Speculation and Negation: Rules, Rankers, and the Role of Syntax", "abstract": "This article explores a combination of deep and shallow approaches to the problem of resolving the scope of speculation and negation within a sentence, specifically in the domain of biomedical research literature. The first part of the article focuses on speculation. After first showing how speculation cues can be accurately identified using a very simple classifier informed only by local lexical context, we go on to explore two different syntactic approaches to resolving the in-sentence scopes of these cues. Whereas one uses manually crafted rules operating over dependency structures, the other automatically learns a discriminative ranking function over nodes in constituent trees. We provide an in-depth error analysis and discussion of various linguistic properties characterizing the problem, and show that although both approaches perform well in isolation, even better results can be obtained by combining them, yielding the best published results to date on the CoNLL-2010 Shared Task data. The last part of the article describes how our speculation system is ported to also resolve the scope of negation. With only modest modifications to the initial design, the system obtains state-of-the-art results on this task also.", "keyphrases": ["negation", "scope", "syntactic approach", "speculation"]} +{"id": "zhang-etal-2019-joint", "title": "Joint Slot Filling and Intent Detection via Capsule Neural Networks", "abstract": "Being able to recognize words as slots and detect the intent of an utterance has been a keen issue in natural language understanding. The existing works either treat slot filling and intent detection separately in a pipeline manner, or adopt joint models which sequentially label slots while summarizing the utterance-level intent without explicitly preserving the hierarchical relationship among words, slots, and intents. To exploit the semantic hierarchy for effective modeling, we propose a capsule-based neural network model which accomplishes slot filling and intent detection via a dynamic routing-by-agreement schema. A re-routing schema is proposed to further synergize the slot filling performance using the inferred intent representation. Experiments on two real-world datasets show the effectiveness of our model when compared with other alternative model architectures, as well as existing natural language understanding services.", "keyphrases": ["slot filling", "intent detection", "hierarchical capsule"]} +{"id": "erk-2009-representing", "title": "Representing words as regions in vector space", "abstract": "Vector space models of word meaning typically represent the meaning of a word as a vector computed by summing over all its corpus occurrences. Words close to this point in space can be assumed to be similar to it in meaning. But how far around this point does the region of similar meaning extend? In this paper we discuss two models that represent word meaning as regions in vector space. Both representations can be computed from traditional point representations in vector space. We find that both models perform at over 95% F-score on a token classification task.", "keyphrases": ["region", "vector space", "diversity"]} +{"id": "wachsmuth-etal-2017-pagerank", "title": "\u201cPageRank\u201d for Argument Relevance", "abstract": "Future search engines are expected to deliver pro and con arguments in response to queries on controversial topics. While argument mining is now in the focus of research, the question of how to retrieve the relevant arguments remains open. This paper proposes a radical model to assess relevance objectively at web scale: the relevance of an argument's conclusion is decided by what other arguments reuse it as a premise. We build an argument graph for this model that we analyze with a recursive weighting scheme, adapting key ideas of PageRank. In experiments on a large ground-truth argument graph, the resulting relevance scores correlate with human average judgments. We outline what natural language challenges must be faced at web scale in order to stepwise bring argument relevance to web search engines.", "keyphrases": ["pagerank", "argument relevance", "premise"]} +{"id": "tan-lee-2014-corpus", "title": "A Corpus of Sentence-level Revisions in Academic Writing: A Step towards Understanding Statement Strength in Communication", "abstract": "The strength with which a statement is made can have a significant impact on the audience. For example, international relations can be strained by how the media in one country describes an event in another; and papers can be rejected because they overstate or understate their findings. It is thus important to understand the effects of statement strength. A first step is to be able to distinguish between strong and weak statements. However, even this problem is understudied, partly due to a lack of data. Since strength is inherently relative, revisions of texts that make claims are a natural source of data on strength differences. In this paper, we introduce a corpus of sentence-level revisions from academic writing. We also describe insights gained from our annotation efforts for this task.", "keyphrases": ["revision", "academic writing", "statement strength"]} +{"id": "michel-neubig-2018-mtnt", "title": "MTNT: A Testbed for Machine Translation of Noisy Text", "abstract": "Noisy or non-standard input text can cause disastrous mistranslations in most modern Machine Translation (MT) systems, and there has been growing research interest in creating noise-robust MT systems. However, as of yet there are no publicly available parallel corpora of with naturally occurring noisy inputs and translations, and thus previous work has resorted to evaluating on synthetically created datasets. In this paper, we propose a benchmark dataset for Machine Translation of Noisy Text (MTNT), consisting of noisy comments on Reddit () and professionally sourced translations. We commissioned translations of English comments into French and Japanese, as well as French and Japanese comments into English, on the order of 7k-37k sentences per language pair. We qualitatively and quantitatively examine the types of noise included in this dataset, then demonstrate that existing MT models fail badly on a number of noise-related phenomena, even after performing adaptation on a small training set of in-domain data. This indicates that this dataset can provide an attractive testbed for methods tailored to handling noisy text in MT.", "keyphrases": ["noisy text", "french", "mtnt", "robustness", "new dataset"]} +{"id": "wu-weld-2010-open", "title": "Open Information Extraction Using Wikipedia", "abstract": "Information-extraction (IE) systems seek to distill semantic relations from natural-language text, but most systems use supervised learning of relation-specific examples and are thus limited by the availability of training data. Open IE systems such as TextRunner, on the other hand, aim to handle the unbounded number of relations found on the Web. But how well can these open systems perform? \n \nThis paper presents WOE, an open IE system which improves dramatically on TextRunner's precision and recall. The key to WOE's performance is a novel form of self-supervised learning for open extractors -- using heuristic matches between Wikipedia infobox attribute values and corresponding sentences to construct training data. Like TextRunner, WOE's extractor eschews lexicalized features and handles an unbounded set of semantic relations. WOE can operate in two modes: when restricted to POS tag features, it runs as quickly as TextRunner, but when set to use dependency-parse features its precision and recall rise even higher.", "keyphrases": ["information extraction", "wikipedia", "web"]} +{"id": "xue-2003-chinese", "title": "Chinese Word Segmentation as Character Tagging", "abstract": "In this paper we report results of a supervised machine-learning approach to Chinese word segmentation. A maximum entropy tagger is trained on manually annotated data to automatically assign to Chinese characters, or hanzi, tags that indicate the position of a hanzi within a word. The tagged output is then converted into segmented text for evaluation. Preliminary results show that this approach is competitive against other supervised machine-learning segmenters reported in previous studies, achieving precision and recall rates of 95.01% and 94.94% respectively, trained on a 237K-word training set.", "keyphrases": ["word segmentation", "chinese character", "cws task", "character-based tagging approach", "end"]} +{"id": "moldovan-etal-2003-cogex", "title": "COGEX: A Logic Prover for Question Answering", "abstract": "Recent TREC results have demonstrated the need for deeper text understanding methods. This paper introduces the idea of automated reasoning applied to question answering and shows the feasibility of integrating a logic prover into a Question Answering system. The approach is to transform questions and answer passages into logic representations. World knowledge axioms as well as linguistic axioms are supplied to the prover which renders a deep understanding of the relationship between question text and answer text. Moreover, the trace of the proofs provide answer justifications. The results show that the prover boosts the performance of the QA system on TREC questions by 30%.", "keyphrases": ["logic prover", "question answering", "cogex"]} +{"id": "wong-kit-2012-extending", "title": "Extending Machine Translation Evaluation Metrics with Lexical Cohesion to Document Level", "abstract": "This paper proposes the utilization of lexical cohesion to facilitate evaluation of machine translation at the document level. As a linguistic means to achieve text coherence, lexical cohesion ties sentences together into a meaningfully interwoven structure through words with the same or related meaning. A comparison between machine and human translation is conducted to illustrate one of their critical distinctions that human translators tend to use more cohesion devices than machine. Various ways to apply this feature to evaluate machine-translated documents are presented, including one without reliance on reference translation. Experimental results show that incorporating this feature into sentence-level evaluation metrics can enhance their correlation with human judgements.", "keyphrases": ["machine translation", "lexical cohesion", "document level"]} +{"id": "bunescu-mooney-2005-shortest", "title": "A Shortest Path Dependency Kernel for Relation Extraction", "abstract": "We present a novel approach to relation extraction, based on the observation that the information required to assert a relationship between two named entities in the same sentence is typically captured by the shortest path between the two entities in the dependency graph. Experiments on extracting top-level relations from the ACE (Automated Content Extraction) newspaper corpus show that the new shortest path dependency kernel outperforms a recent approach based on dependency tree kernels.", "keyphrases": ["path", "relation extraction", "kernel-based method", "predicate-argument sequence", "svm"]} +{"id": "jain-wallace-2019-attention", "title": "Attention is not Explanation", "abstract": "Attention mechanisms have seen wide adoption in neural NLP models. In addition to improving predictive performance, these are often touted as affording transparency: models equipped with attention provide a distribution over attended-to input units, and this is often presented (at least implicitly) as communicating the relative importance of inputs. However, it is unclear what relationship exists between attention weights and model outputs. In this work we perform extensive experiments across a variety of NLP tasks that aim to assess the degree to which attention weights provide meaningful \u201cexplanations\u201d for predictions. We find that they largely do not. For example, learned attention weights are frequently uncorrelated with gradient-based measures of feature importance, and one can identify very different attention distributions that nonetheless yield equivalent predictions. Our findings show that standard attention modules do not provide meaningful explanations and should not be treated as though they do.", "keyphrases": ["explanation", "transparency", "attention weight", "feature importance"]} +{"id": "liu-etal-2019-multi", "title": "Multi-Task Deep Neural Networks for Natural Language Understanding", "abstract": "In this paper, we present a Multi-Task Deep Neural Network (MT-DNN) for learning representations across multiple natural language understanding (NLU) tasks. MT-DNN not only leverages large amounts of cross-task data, but also benefits from a regularization effect that leads to more general representations to help adapt to new tasks and domains. MT-DNN extends the model proposed in Liu et al. (2015) by incorporating a pre-trained bidirectional transformer language model, known as BERT (Devlin et al., 2018). MT-DNN obtains new state-of-the-art results on ten NLU tasks, including SNLI, SciTail, and eight out of nine GLUE tasks, pushing the GLUE benchmark to 82.7% (2.2% absolute improvement) as of February 25, 2019 on the latest GLUE test set. We also demonstrate using the SNLI and SciTail datasets that the representations learned by MT-DNN allow domain adaptation with substantially fewer in-domain labels than the pre-trained BERT representations. Our code and pre-trained models will be made publicly available.", "keyphrases": ["deep neural network", "natural language understanding", "mt-dnn", "bert", "multi-task learning"]} +{"id": "mostafazadeh-etal-2016-generating", "title": "Generating Natural Questions About an Image", "abstract": "There has been an explosion of work in the vision & language community during the past few years from image captioning to video transcription, and answering questions about images. These tasks have focused on literal descriptions of the image. To move beyond the literal, we choose to explore how questions about an image are often directed at commonsense inference and the abstract events evoked by objects in the image. In this paper, we introduce the novel task of Visual Question Generation (VQG), where the system is tasked with asking a natural and engaging question when shown an image. We provide three datasets which cover a variety of images from object-centric to event-centric, with considerably more abstract training data than provided to state-of-the-art captioning systems thus far. We train and test several generative and retrieval models to tackle the task of VQG. Evaluation results show that while such models ask reasonable questions for a variety of images, there is still a wide gap with human performance which motivates further work on connecting images with commonsense knowledge and pragmatics. Our proposed task offers a new challenge to the community which we hope furthers interest in exploring deeper connections between vision & language.", "keyphrases": ["image", "question generation", "engaging question", "conversation"]} +{"id": "conneau-etal-2020-unsupervised", "title": "Unsupervised Cross-lingual Representation Learning at Scale", "abstract": "This paper shows that pretraining multilingual language models at scale leads to significant performance gains for a wide range of cross-lingual transfer tasks. We train a Transformer-based masked language model on one hundred languages, using more than two terabytes of filtered CommonCrawl data. Our model, dubbed XLM-R, significantly outperforms multilingual BERT (mBERT) on a variety of cross-lingual benchmarks, including +14.6% average accuracy on XNLI, +13% average F1 score on MLQA, and +2.4% F1 score on NER. XLM-R performs particularly well on low-resource languages, improving 15.7% in XNLI accuracy for Swahili and 11.4% for Urdu over previous XLM models. We also present a detailed empirical analysis of the key factors that are required to achieve these gains, including the trade-offs between (1) positive transfer and capacity dilution and (2) the performance of high and low resource languages at scale. Finally, we show, for the first time, the possibility of multilingual modeling without sacrificing per-language performance; XLM-R is very competitive with strong monolingual models on the GLUE and XNLI benchmarks. We will make our code and models publicly available.", "keyphrases": ["cross-lingual representation learning", "scale", "multilinguality", "language model", "pre-trained model"]} +{"id": "wu-etal-2019-hierarchical", "title": "Hierarchical User and Item Representation with Three-Tier Attention for Recommendation", "abstract": "Utilizing reviews to learn user and item representations is useful for recommender systems. Existing methods usually merge all reviews from the same user or for the same item into a long document. However, different reviews, sentences and even words usually have different informativeness for modeling users and items. In this paper, we propose a hierarchical user and item representation model with three-tier attention to learn user and item representations from reviews for recommendation. Our model contains three major components, i.e., a sentence encoder to learn sentence representations from words, a review encoder to learn review representations from sentences, and a user/item encoder to learn user/item representations from reviews. In addition, we incorporate a three-tier attention network in our model to select important words, sentences and reviews. Besides, we combine the user and item representations learned from the reviews with user and item embeddings based on IDs as the final representations to capture the latent factors of individual users and items. Extensive experiments on four benchmark datasets validate the effectiveness of our approach.", "keyphrases": ["item representation", "three-tier attention", "hierarchical user"]} +{"id": "gatt-reiter-2009-simplenlg", "title": "SimpleNLG: A Realisation Engine for Practical Applications", "abstract": "This paper describes SimpleNLG, a realisation engine for English which aims to provide simple and robust interfaces to generate syntactic structures and linearise them. The library is also flexible in allowing the use of mixed (canned and non-canned) representations.", "keyphrases": ["realisation engine", "simplenlg", "project", "other domain-dependent decision", "lexical choice"]} +{"id": "reddy-etal-2019-coqa", "title": "CoQA: A Conversational Question Answering Challenge", "abstract": "Humans gather information through conversations involving a series of interconnected questions and answers. For machines to assist in information gathering, it is therefore essential to enable them to answer conversational questions. We introduce CoQA, a novel dataset for building Conversational Question Answering systems. Our dataset contains 127k questions with answers, obtained from 8k conversations about text passages from seven diverse domains. The questions are conversational, and the answers are free-form text with their corresponding evidence highlighted in the passage. We analyze CoQA in depth and show that conversational questions have challenging phenomena not present in existing reading comprehension datasets (e.g., coreference and pragmatic reasoning). We evaluate strong dialogue and reading comprehension models on CoQA. The best system obtains an F1 score of 65.4%, which is 23.4 points behind human performance (88.8%), indicating that there is ample room for improvement. We present CoQA as a challenge to the community at .", "keyphrases": ["conversation", "coqa", "annotator", "sequential question", "text snippet"]} +{"id": "may-etal-2019-measuring", "title": "On Measuring Social Biases in Sentence Encoders", "abstract": "The Word Embedding Association Test shows that GloVe and word2vec word embeddings exhibit human-like implicit biases based on gender, race, and other social constructs (Caliskan et al., 2017). Meanwhile, research on learning reusable text representations has begun to explore sentence-level texts, with some sentence encoders seeing enthusiastic adoption. Accordingly, we extend the Word Embedding Association Test to measure bias in sentence encoders. We then test several sentence encoders, including state-of-the-art methods such as ELMo and BERT, for the social biases studied in prior work and two important biases that are difficult or impossible to test at the word level. We observe mixed results including suspicious patterns of sensitivity that suggest the test's assumptions may not hold in general. We conclude by proposing directions for future work on measuring bias in sentence encoders.", "keyphrases": ["sentence encoder", "association", "social bias"]} +{"id": "conneau-kiela-2018-senteval", "title": "SentEval: An Evaluation Toolkit for Universal Sentence Representations", "abstract": "We introduce SentEval, a toolkit for evaluating the quality of universal sentence representations. SentEval encompasses a variety of tasks, including binary and multi-class classification, natural language inference and sentence similarity. The set of tasks was selected based on what appears to be the community consensus regarding the appropriate evaluations for universal sentence representations. The toolkit comes with scripts to download and preprocess datasets, and an easy interface to evaluate sentence encoders. The aim is to provide a fairer, less cumbersome and more centralized way for evaluating sentence representations.", "keyphrases": ["evaluation toolkit", "sentence representation", "senteval", "caption", "similarity task"]} +{"id": "yuan-briscoe-2016-grammatical", "title": "Grammatical error correction using neural machine translation", "abstract": "This paper presents the first study using neural machine translation (NMT) for grammatical error correction (GEC). We propose a twostep approach to handle the rare word problem in NMT, which has been proved to be useful and effective for the GEC task. Our best NMTbased system trained on the CLC outperforms our SMT-based system when testing on the publicly available FCE test set. The same system achieves an F0.5 score of 39.90% on the CoNLL-2014 shared task test set, outperforming the state-of-the-art and demonstrating that the NMT-based GEC system generalises effectively.", "keyphrases": ["error correction", "neural machine translation", "rare word problem"]} +{"id": "li-etal-2018-paraphrase", "title": "Paraphrase Generation with Deep Reinforcement Learning", "abstract": "Automatic generation of paraphrases from a given sentence is an important yet challenging task in natural language processing (NLP). In this paper, we present a deep reinforcement learning approach to paraphrase generation. Specifically, we propose a new framework for the task, which consists of a generator and an evaluator, both of which are learned from data. The generator, built as a sequence-to-sequence learning model, can produce paraphrases given a sentence. The evaluator, constructed as a deep matching model, can judge whether two sentences are paraphrases of each other. The generator is first trained by deep learning and then further fine-tuned by reinforcement learning in which the reward is given by the evaluator. For the learning of the evaluator, we propose two methods based on supervised learning and inverse reinforcement learning respectively, depending on the type of available training data. Experimental results on two datasets demonstrate the proposed models (the generators) can produce more accurate paraphrases and outperform the state-of-the-art methods in paraphrase generation in both automatic evaluation and human evaluation.", "keyphrases": ["reinforcement learning", "deep learning", "paraphrase generation"]} +{"id": "abdul-mageed-etal-2011-subjectivity", "title": "Subjectivity and Sentiment Analysis of Modern Standard Arabic", "abstract": "Although Subjectivity and Sentiment Analysis (SSA) has been witnessing a flurry of novel research, there are few attempts to build SSA systems for Morphologically-Rich Languages (MRL). In the current study, we report efforts to partially fill this gap. We present a newly developed manually annotated corpus of Modern Standard Arabic (MSA) together with a new polarity lexicon. The corpus is a collection of newswire documents annotated on the sentence level. We also describe an automatic SSA tagging system that exploits the annotated data. We investigate the impact of different levels of preprocessing settings on the SSA classification task. We show that by explicitly accounting for the rich morphology the system is able to achieve significantly higher levels of performance.", "keyphrases": ["sentiment analysis", "modern standard arabic", "subjectivity", "specific language"]} +{"id": "luo-etal-2017-learning", "title": "Learning to Predict Charges for Criminal Cases with Legal Basis", "abstract": "The charge prediction task is to determine appropriate charges for a given case, which is helpful for legal assistant systems where the user input is fact description. We argue that relevant law articles play an important role in this task, and therefore propose an attention-based neural network method to jointly model the charge prediction task and the relevant article extraction task in a unified framework. The experimental results show that, besides providing legal basis, the relevant articles can also clearly improve the charge prediction results, and our full model can effectively predict appropriate charges for cases with different expression styles.", "keyphrases": ["charge", "criminal case", "legal judgment prediction"]} +{"id": "cohen-etal-2004-learning", "title": "Learning to Classify Email into \u201cSpeech Acts\u201d", "abstract": "It is often useful to classify email according to the intent of the sender (e.g., \"propose a meeting\", \"deliver information\"). We present experimental results in learning to classify email in this fashion, where each class corresponds to a verbnoun pair taken from a predefined ontology describing typical \u201cemail speech acts\u201d. We demonstrate that, although this categorization problem is quite different from \u201ctopical\u201d text classification, certain categories of messages can nonetheless be detected with high precision (above 80%) and reasonable recall (above 50%) using existing text-classification learning methods. This result suggests that useful task-tracking tools could be constructed based on automatic classification into this taxonomy.", "keyphrases": ["email", "speech act", "textual feature"]} +{"id": "richardson-etal-2013-mctest", "title": "MCTest: A Challenge Dataset for the Open-Domain Machine Comprehension of Text", "abstract": "We present MCTest, a freely available set of stories and associated questions intended for research on the machine comprehension of text. Previous work on machine comprehension (e.g., semantic modeling) has made great strides, but primarily focuses either on limited-domain datasets, or on solving a more restricted goal (e.g., open-domain relation extraction). In contrast, MCTest requires machines to answer multiple-choice reading comprehension questions about fictional stories, directly tackling the high-level goal of open-domain machine comprehension. Reading comprehension can test advanced abilities such as causal reasoning and understanding the world, yet, by being multiple-choice, still provide a clear metric. By being fictional, the answer typically can be found only in the story itself. The stories and questions are also carefully limited to those a young child would understand, reducing the world knowledge that is required for the task. We present the scalable crowd-sourcing methods that allow us to cheaply construct a dataset of 500 stories and 2000 questions. By screening workers (with grammar tests) and stories (with grading), we have ensured that the data is the same quality as another set that we manually edited, but at one tenth the editing cost. By being open-domain, yet carefully restricted, we hope MCTest will serve to encourage research and provide a clear metric for advancement on the machine comprehension of text. 1 Reading Comprehension A major goal for NLP is for machines to be able to understand text as well as people. Several research disciplines are focused on this problem: for example, information extraction, relation extraction, semantic role labeling, and recognizing textual entailment. Yet these techniques are necessarily evaluated individually, rather than by how much they advance us towards the end goal. On the other hand, the goal of semantic parsing is the machine comprehension of text (MCT), yet its evaluation requires adherence to a specific knowledge representation, and it is currently unclear what the best representation is, for open-domain text. We believe that it is useful to directly tackle the top-level task of MCT. For this, we need a way to measure progress. One common method for evaluating someone\u2019s understanding of text is by giving them a multiple-choice reading comprehension test. This has the advantage that it is objectively gradable (vs. essays) yet may test a range of abilities such as causal or counterfactual reasoning, inference among relations, or just basic understanding of the world in which the passage is set. Therefore, we propose a multiple-choice reading comprehension task as a way to evaluate progress on MCT. We have built a reading comprehension dataset containing 500 fictional stories, with 4 multiple choice questions per story. It was built using methods which can easily scale to at least 5000 stories, since the stories were created, and the curation was done, using crowd sourcing almost entirely, at a total of $4.00 per story. We plan to periodically update the dataset to ensure that methods are not overfitting to the existing data. The dataset is open-domain, yet restricted to concepts and words that a 7 year old is expected to understand. This task is still beyond the capability of today\u2019s computers and algorithms.", "keyphrases": ["open-domain machine comprehension", "story", "reading comprehension dataset", "mctest", "multiple-choice question"]} +{"id": "rush-etal-2010-dual", "title": "On Dual Decomposition and Linear Programming Relaxations for Natural Language Processing", "abstract": "This paper introduces dual decomposition as a framework for deriving inference algorithms for NLP problems. The approach relies on standard dynamic-programming algorithms as oracle solvers for sub-problems, together with a simple method for forcing agreement between the different oracles. The approach provably solves a linear programming (LP) relaxation of the global inference problem. It leads to algorithms that are simple, in that they use existing decoding algorithms; efficient, that they avoid exact algorithms for the full model; and often exact, in that empirically they often recover the correct solution in spite of using an LP relaxation. We give experimental results on two problems: 1) the combination of two lexicalized parsing models; and 2) the combination of a lexicalized parsing model and a trigram part-of-speech tagger.", "keyphrases": ["dual decomposition", "nlp problem", "parsing model"]} +{"id": "habash-etal-2013-morphological", "title": "Morphological Analysis and Disambiguation for Dialectal Arabic", "abstract": "The many differences between Dialectal Arabic and Modern Standard Arabic (MSA) pose a challenge to the majority of Arabic natural language processing tools, which are designed for MSA. In this paper, we retarget an existing state-of-the-art MSA morphological tagger to Egyptian Arabic (ARZ). Our evaluation demonstrates that our ARZ morphology tagger outperforms its MSA variant on ARZ input in terms of accuracy in part-of-speech tagging, diacritization, lemmatization and tokenization; and in terms of utility for ARZ-toEnglish statistical machine translation.", "keyphrases": ["disambiguation", "dialectal arabic", "arabic pos tagging"]} +{"id": "chen-etal-2018-sequence", "title": "Sequence-to-Action: End-to-End Semantic Graph Generation for Semantic Parsing", "abstract": "This paper proposes a neural semantic parsing approach \u2013 Sequence-to-Action, which models semantic parsing as an end-to-end semantic graph generation process. Our method simultaneously leverages the advantages from two recent promising directions of semantic parsing. Firstly, our model uses a semantic graph to represent the meaning of a sentence, which has a tight-coupling with knowledge bases. Secondly, by leveraging the powerful representation learning and prediction ability of neural network models, we propose a RNN model which can effectively map sentences to action sequences for semantic graph generation. Experiments show that our method achieves state-of-the-art performance on Overnight dataset and gets competitive performance on Geo and Atis datasets.", "keyphrases": ["semantic parsing", "graph generation process", "sequence-to-action"]} +{"id": "tamura-etal-2012-bilingual", "title": "Bilingual Lexicon Extraction from Comparable Corpora Using Label Propagation", "abstract": "This paper proposes a novel method for lexicon extraction that extracts translation pairs from comparable corpora by using graph-based label propagation. In previous work, it was established that performance drastically decreases when the coverage of a seed lexicon is small. We resolve this problem by utilizing indirect relations with the bilingual seeds together with direct relations, in which each word is represented by a distribution of translated seeds. The seed distributions are propagated over a graph representing relations among words, and translation pairs are extracted by identifying word pairs with a high similarity in the seed distributions. We propose two types of the graphs: a co-occurrence graph, representing co-occurrence relations between words, and a similarity graph, representing context similarities between words. Evaluations using English and Japanese patent comparable corpora show that our proposed graph propagation method outperforms conventional methods. Further, the similarity graph achieved improved performance by clustering synonyms into the same translation.", "keyphrases": ["lexicon extraction", "comparable corpora", "label propagation"]} +{"id": "kordjamshidi-etal-2010-spatial", "title": "Spatial Role Labeling: Task Definition and Annotation Scheme", "abstract": "One of the essential functions of natural language is to talk about spatial relationships between objects. Linguistic constructs can express highly complex, relational structures of objects, spatial relations between them, and patterns of motion through spaces relative to some reference point. Learning how to map this information onto a formal representation from a text is a challenging problem. At present no well-defined framework for automatic spatial information extraction exists that can handle all of these issues. In this paper we introduce the task of spatial role labeling and propose an annotation scheme that is language-independent and facilitates the application of machine learning techniques. Our framework consists of a set of spatial roles based on the theory of holistic spatial semantics with the intent of covering all aspects of spatial concepts, including both static and dynamic spatial relations. We illustrate our annotation scheme with many examples throughout the paper, and in addition we highlight how to connect to spatial calculi such as region connection calculus and also how our approach fits into related work.", "keyphrases": ["annotation scheme", "object", "spatial role labeling", "sprl"]} +{"id": "majumder-etal-2020-mime", "title": "MIME: MIMicking Emotions for Empathetic Response Generation", "abstract": "Current approaches to empathetic response generation view the set of emotions expressed in the input text as a flat structure, where all the emotions are treated uniformly. We argue that empathetic responses often mimic the emotion of the user to a varying degree, depending on its positivity or negativity and content. We show that the consideration of these polarity-based emotion clusters and emotional mimicry results in improved empathy and contextual relevance of the response as compared to the state-of-the-art. Also, we introduce stochasticity into the emotion mixture that yields emotionally more varied empathetic responses than the previous work. We demonstrate the importance of these factors to empathetic response generation using both automatic- and human-based evaluations. The implementation of MIME is publicly available at .", "keyphrases": ["emotion", "empathetic response generation", "mime"]} +{"id": "waseem-hovy-2016-hateful", "title": "Hateful Symbols or Hateful People? Predictive Features for Hate Speech Detection on Twitter", "abstract": "Hate speech in the form of racist and sexist remarks are a common occurrence on social media. For that reason, many social media services address the problem of identifying hate speech, but the definition of hate speech varies markedly and is largely a manual effort (BBC, 2015; Lomas, 2015). We provide a list of criteria founded in critical race theory, and use them to annotate a publicly available corpus of more than 16k tweets. We analyze the impact of various extra-linguistic features in conjunction with character n-grams for hatespeech detection. We also present a dictionary based the most indicative words in our data.", "keyphrases": ["hate speech detection", "twitter", "abusive language", "sexist tweet", "language detection dataset"]} +{"id": "martins-smith-2009-summarization", "title": "Summarization with a Joint Model for Sentence Extraction and Compression", "abstract": "Text summarization is one of the oldest problems in natural language processing. Popular approaches rely on extracting relevant sentences from the original documents. As a side effect, sentences that are too long but partly relevant are doomed to either not appear in the final summary, or prevent inclusion of other relevant sentences. Sentence compression is a recent framework that aims to select the shortest subsequence of words that yields an informative and grammatical sentence. This work proposes a one-step approach for document summarization that jointly performs sentence extraction and compression by solving an integer linear program. We report favorable experimental results on newswire data.", "keyphrases": ["joint model", "sentence extraction", "compression"]} +{"id": "kantor-globerson-2019-coreference", "title": "Coreference Resolution with Entity Equalization", "abstract": "A key challenge in coreference resolution is to capture properties of entity clusters, and use those in the resolution process. Here we provide a simple and effective approach for achieving this, via an \u201cEntity Equalization\u201d mechanism. The Equalization approach represents each mention in a cluster via an approximation of the sum of all mentions in the cluster. We show how this can be done in a fully differentiable end-to-end manner, thus enabling high-order inferences in the resolution process. Our approach, which also employs BERT embeddings, results in new state-of-the-art results on the CoNLL-2012 coreference resolution task, improving average F1 by 3.6%.", "keyphrases": ["entity equalization", "mention", "coreference resolution"]} +{"id": "savary-etal-2017-parseme", "title": "The PARSEME Shared Task on Automatic Identification of Verbal Multiword Expressions", "abstract": "Multiword expressions (MWEs) are known as a \u201cpain in the neck\u201d for NLP due to their idiosyncratic behaviour. While some categories of MWEs have been addressed by many studies, verbal MWEs (VMWEs), such as to take a decision, to break one's heart or to turn off, have been rarely modelled. This is notably due to their syntactic variability, which hinders treating them as \u201cwords with spaces\u201d. We describe an initiative meant to bring about substantial progress in understanding, modelling and processing VMWEs. It is a joint effort, carried out within a European research network, to elaborate universal terminologies and annotation guidelines for 18 languages. Its main outcome is a multilingual 5-million-word annotated corpus which underlies a shared task on automatic identification of VMWEs. This paper presents the corpus annotation methodology and outcome, the shared task organisation and the results of the participating systems.", "keyphrases": ["parseme", "automatic identification", "verbal multiword expressions", "edition"]} +{"id": "bhatia-etal-2015-better", "title": "Better Document-level Sentiment Analysis from RST Discourse Parsing", "abstract": "Discourse structure is the hidden link between surface features and document-level properties, such as sentiment polarity. We show that the discourse analyses produced by Rhetorical Structure Theory (RST) parsers can improve document-level sentiment analysis, via composition of local information up the discourse tree. First, we show that reweighting discourse units according to their position in a dependency representation of the rhetorical structure can yield substantial improvements on lexicon-based sentiment analysis. Next, we present a recursive neural network over the RST structure, which offers significant improvements over classificationbased methods.", "keyphrases": ["sentiment analysis", "discourse structure", "text categorization"]} +{"id": "paetzold-specia-2016-semeval", "title": "SemEval 2016 Task 11: Complex Word Identification", "abstract": "We report the \ufb01ndings of the Complex Word Identi\ufb01cation task of SemEval 2016. To create a dataset, we conduct a user study with 400 non-native English speakers, and \ufb01nd that complex words tend to be rarer, less ambiguous and shorter. A total of 42 systems were submitted from 21 distinct teams, and nine baselines were provided. The results high-light the effectiveness of Decision Trees and Ensemble methods for the task, but ultimately reveal that word frequencies remain the most reliable predictor of word complexity.", "keyphrases": ["complex word identification", "semeval", "cwi", "reader", "non-native speaker"]} +{"id": "smit-etal-2014-morfessor", "title": "Morfessor 2.0: Toolkit for statistical morphological segmentation", "abstract": "Morfessor is a family of probabilistic machine learning methods for finding the morphological segmentation from raw text data. Recent developments include the development of semi-supervised methods for utilizing annotated data. Morfessor 2.0 is a rewrite of the original, widely-used Morfessor 1.0 software, with well documented command-line tools and library interface. It includes new features such as semi-supervised learning, online training, and integrated evaluation code.", "keyphrases": ["segmentation", "morfessor", "bpe"]} +{"id": "rudinger-etal-2018-gender", "title": "Gender Bias in Coreference Resolution", "abstract": "We present an empirical study of gender bias in coreference resolution systems. We first introduce a novel, Winograd schema-style set of minimal pair sentences that differ only by pronoun gender. With these \u201cWinogender schemas,\u201d we evaluate and confirm systematic gender bias in three publicly-available coreference resolution systems, and correlate this bias with real-world and textual gender statistics.", "keyphrases": ["coreference resolution", "pronoun", "gender bias", "occupation", "template"]} +{"id": "chiu-nichols-2016-named", "title": "Named Entity Recognition with Bidirectional LSTM-CNNs", "abstract": "Named entity recognition is a challenging task that has traditionally required large amounts of knowledge in the form of feature engineering and lexicons to achieve high performance. In this paper, we present a novel neural network architecture that automatically detects word- and character-level features using a hybrid bidirectional LSTM and CNN architecture, eliminating the need for most feature engineering. We also propose a novel method of encoding partial lexicon matches in neural networks and compare it to existing approaches. Extensive evaluation shows that, given only tokenized text and publicly available word embeddings, our system is competitive on the CoNLL-2003 dataset and surpasses the previously reported state of the art performance on the OntoNotes 5.0 dataset by 2.13 F1 points. By using two lexicons constructed from publicly-available sources, we establish new state of the art performance with an F1 score of 91.62 on CoNLL-2003 and 86.28 on OntoNotes, surpassing systems that employ heavy feature engineering, proprietary lexicons, and rich entity linking information.", "keyphrases": ["entity recognition", "cnn", "word embedding", "convolutional neural network", "tagging"]} +{"id": "liao-grishman-2010-using", "title": "Using Document Level Cross-Event Inference to Improve Event Extraction", "abstract": "Event extraction is a particularly challenging type of information extraction (IE). Most current event extraction systems rely on local information at the phrase or sentence level. However, this local context may be insufficient to resolve ambiguities in identifying particular types of events; information from a wider scope can serve to resolve some of these ambiguities. In this paper, we use document level information to improve the performance of ACE event extraction. In contrast to previous work, we do not limit ourselves to information about events of the same type, but rather use information about other types of events to make predictions or resolve ambiguities regarding a given event. We learn such relationships from the training corpus and use them to help predict the occurrence of events and event arguments in a text. Experiments show that we can get 9.0% (absolute) gain in trigger (event) classification, and more than 8% gain for argument (role) classification in ACE event extraction.", "keyphrases": ["document level", "event extraction", "cross-entity inference"]} +{"id": "lee-etal-2018-higher", "title": "Higher-Order Coreference Resolution with Coarse-to-Fine Inference", "abstract": "We introduce a fully-differentiable approximation to higher-order inference for coreference resolution. Our approach uses the antecedent distribution from a span-ranking architecture as an attention mechanism to iteratively refine span representations. This enables the model to softly consider multiple hops in the predicted clusters. To alleviate the computational cost of this iterative process, we introduce a coarse-to-fine approach that incorporates a less accurate but more efficient bilinear factor, enabling more aggressive pruning without hurting accuracy. Compared to the existing state-of-the-art span-ranking approach, our model significantly improves accuracy on the English OntoNotes benchmark, while being far more computationally efficient.", "keyphrases": ["coreference resolution", "coarse-to-fine inference", "span representation", "mention", "probability distribution"]} +{"id": "chen-yang-2020-multi", "title": "Multi-View Sequence-to-Sequence Models with Conversational Structure for Abstractive Dialogue Summarization", "abstract": "Text summarization is one of the most challenging and interesting problems in NLP. Although much attention has been paid to summarizing structured text like news reports or encyclopedia articles, summarizing conversations\u2014an essential part of human-human/machine interaction where most important pieces of information are scattered across various utterances of different speakers\u2014remains relatively under-investigated. This work proposes a multi-view sequence-to-sequence model by first extracting conversational structures of unstructured daily chats from different views to represent conversations and then utilizing a multi-view decoder to incorporate different views to generate dialogue summaries. Experiments on a large-scale dialogue summarization corpus demonstrated that our methods significantly outperformed previous state-of-the-art models via both automatic evaluations and human judgment. We also discussed specific challenges that current approaches faced with this task. We have publicly released our code at .", "keyphrases": ["sequence-to-sequence model", "conversational structure", "summarization model"]} +{"id": "mikolov-etal-2013-linguistic", "title": "Linguistic Regularities in Continuous Space Word Representations", "abstract": "Continuous space language models have recently demonstrated outstanding results across a variety of tasks. In this paper, we examine the vector-space word representations that are implicitly learned by the input-layer weights. We find that these representations are surprisingly good at capturing syntactic and semantic regularities in language, and that each relationship is characterized by a relation-specific vector offset. This allows vector-oriented reasoning based on the offsets between words. For example, the male/female relationship is automatically learned, and with the induced vector representations, \u201cKing Man + Woman\u201d results in a vector very close to \u201cQueen.\u201d We demonstrate that the word vectors capture syntactic regularities by means of syntactic analogy questions (provided with this paper), and are able to correctly answer almost 40% of the questions. We demonstrate that the word vectors capture semantic regularities by using the vector offset method to answer SemEval-2012 Task 2 questions. Remarkably, this method outperforms the best previous systems.", "keyphrases": ["regularity", "word embedding", "analogy task", "semantic property", "cbow"]} +{"id": "choi-etal-2018-quac", "title": "QuAC: Question Answering in Context", "abstract": "We present QuAC, a dataset for Question Answering in Context that contains 14K information-seeking QA dialogs (100K questions in total). The dialogs involve two crowd workers: (1) a student who poses a sequence of freeform questions to learn as much as possible about a hidden Wikipedia text, and (2) a teacher who answers the questions by providing short excerpts from the text. QuAC introduces challenges not found in existing machine comprehension datasets: its questions are often more open-ended, unanswerable, or only meaningful within the dialog context, as we show in a detailed qualitative evaluation. We also report results for a number of reference models, including a recently state-of-the-art reading comprehension architecture extended to model dialog context. Our best model underperforms humans by 20 F1, suggesting that there is significant room for future work on this data. Dataset, baseline, and leaderboard available at .", "keyphrases": ["quac", "conversation", "machine reading comprehension", "factoid question"]} +{"id": "hassan-etal-2010-whats", "title": "What's with the Attitude? Identifying Sentences with Attitude in Online Discussions", "abstract": "Mining sentiment from user generated content is a very important task in Natural Language Processing. An example of such content is threaded discussions which act as a very important tool for communication and collaboration in the Web. Threaded discussions include e-mails, e-mail lists, bulletin boards, newsgroups, and Internet forums. Most of the work on sentiment analysis has been centered around finding the sentiment toward products or topics. In this work, we present a method to identify the attitude of participants in an online discussion toward one another. This would enable us to build a signed network representation of participant interaction where every edge has a sign that indicates whether the interaction is positive or negative. This is different from most of the research on social networks that has focused almost exclusively on positive links. The method is experimentally tested using a manually labeled set of discussion posts. The results show that the proposed method is capable of identifying attitudinal sentences, and their signs, with high accuracy and that it outperforms several other baselines.", "keyphrases": ["attitude", "online discussion", "sentiment analysis"]} +{"id": "jones-etal-2012-semantics", "title": "Semantics-Based Machine Translation with Hyperedge Replacement Grammars", "abstract": "We present an approach to semantics-based statistical machine translation that uses synchronous hyperedge replacement grammars to translate into and from graph-shaped intermediate meaning representations, to our knowledge the first work in NLP to make use of synchronous context free graph grammars. We present algorithms for each step of the semantics-based translation pipeline, including a novel graph-to-word alignment algorithm and two algorithms for synchronous grammar rule extraction. We investigate the influence of syntactic annotations on semantics-based translation by presenting two alternative rule extraction algorithms, one that requires only semantic annotations and another that additionally relies on syntactic annotations, and explore the effect of syntax and language bias in meaning representation structures by running experiments with two different meaning representations, one biased toward an English syntax-like structure and another that is language neutral. While preliminary work, these experiments show promise for semantically-informed machine translation.", "keyphrases": ["machine translation", "hyperedge replacement", "meaning representation"]} +{"id": "pires-etal-2019-multilingual", "title": "How Multilingual is Multilingual BERT?", "abstract": "In this paper, we show that Multilingual BERT (M-BERT), released by Devlin et al. (2018) as a single language model pre-trained from monolingual corpora in 104 languages, is surprisingly good at zero-shot cross-lingual model transfer, in which task-specific annotations in one language are used to fine-tune the model for evaluation in another language. To understand why, we present a large number of probing experiments, showing that transfer is possible even to languages in different scripts, that transfer works best between typologically similar languages, that monolingual corpora can train models for code-switching, and that the model can find translation pairs. From these results, we can conclude that M-BERT does create multilingual representations, but that these representations exhibit systematic deficiencies affecting certain language pairs.", "keyphrases": ["multilingual bert", "m-bert", "similar language", "zero-shot cross-lingual transfer", "crosslingual transfer"]} +{"id": "ebrahimi-etal-2018-hotflip", "title": "HotFlip: White-Box Adversarial Examples for Text Classification", "abstract": "We propose an efficient method to generate white-box adversarial examples to trick a character-level neural classifier. We find that only a few manipulations are needed to greatly decrease the accuracy. Our method relies on an atomic flip operation, which swaps one token for another, based on the gradients of the one-hot input vectors. Due to efficiency of our method, we can perform adversarial training which makes the model more robust to attacks at test time. With the use of a few semantics-preserving constraints, we demonstrate that HotFlip can be adapted to attack a word-level classifier as well.", "keyphrases": ["adversarial example", "text classification", "gradient", "hotflip", "nlp model"]} +{"id": "tai-etal-2020-exbert", "title": "exBERT: Extending Pre-trained Models with Domain-specific Vocabulary Under Constrained Training Resources", "abstract": "We introduce exBERT, a training method to extend BERT pre-trained models from a general domain to a new pre-trained model for a specific domain with a new additive vocabulary under constrained training resources (i.e., constrained computation and data). exBERT uses a small extension module to learn to adapt an augmenting embedding for the new domain in the context of the original BERT's embedding of a general vocabulary. The exBERT training method is novel in learning the new vocabulary and the extension module while keeping the weights of the original BERT model fixed, resulting in a substantial reduction in required training resources. We pre-train exBERT with biomedical articles from ClinicalKey and PubMed Central, and study its performance on biomedical downstream benchmark tasks using the MTL-Bioinformatics-2016 datasets. We demonstrate that exBERT consistently outperforms prior approaches when using limited corpus and pre-training computation resources.", "keyphrases": ["pre-trained model", "vocabulary", "exbert"]} +{"id": "da-san-martino-etal-2019-fine", "title": "Fine-Grained Analysis of Propaganda in News Article", "abstract": "Propaganda aims at influencing people's mindset with the purpose of advancing a specific agenda. Previous work has addressed propaganda detection at document level, typically labelling all articles from a propagandistic news outlet as propaganda. Such noisy gold labels inevitably affect the quality of any learning system trained on them. A further issue with most existing systems is the lack of explainability. To overcome these limitations, we propose a novel task: performing fine-grained analysis of texts by detecting all fragments that contain propaganda techniques as well as their type. In particular, we create a corpus of news articles manually annotated at fragment level with eighteen propaganda techniques and propose a suitable evaluation measure. We further design a novel multi-granularity neural network, and we show that it outperforms several strong BERT-based baselines.", "keyphrases": ["document level", "propaganda technique", "news article", "fine-grained analysis", "fallacy"]} +{"id": "tang-etal-2015-document", "title": "Document Modeling with Gated Recurrent Neural Network for Sentiment Classification", "abstract": "Document level sentiment classification remains a challenge: encoding the intrinsic relations between sentences in the semantic meaning of a document. To address this, we introduce a neural network model to learn vector-based document representation in a unified, bottom-up fashion. The model first learns sentence representation with convolutional neural network or long short-term memory. Afterwards, semantics of sentences and their relations are adaptively encoded in document representation with gated recurrent neural network. We conduct document level sentiment classification on four large-scale review datasets from IMDB and Yelp Dataset Challenge. Experimental results show that: (1) our neural model shows superior performances over several state-of-the-art algorithms; (2) gated recurrent neural network dramatically outperforms standard recurrent neural network in document modeling for sentiment classification. 1", "keyphrases": ["recurrent neural network", "sentiment classification", "document modeling", "language processing task"]} +{"id": "schmidt-2014-research", "title": "The Research and Teaching Corpus of Spoken German \u2014 FOLK", "abstract": "FOLK is the \u201cForschungs- und Lehrkorpus Gesprochenes Deutsch (FOLK)\u201d (eng.: research and teaching corpus of spoken German). The project has set itself the aim of building a corpus of German conversations which a) covers a broad range of interaction types in private, institutional and public settings, b) is sufficiently large and diverse and of sufficient quality to support different qualitative and quantitative research approaches, c) is transcribed, annotated and made accessible according to current technological standards, and d) is available to the scientific community on a sound legal basis and without unnecessary restrictions of usage. This paper gives an overview of the corpus design, the strategies for acquisition of a diverse range of interaction data, and the corpus construction workflow from recording via transcription an annotation to dissemination.", "keyphrases": ["teaching corpus", "spoken german", "folk"]} +{"id": "resnik-elkiss-2005-linguists", "title": "The Linguist's Search Engine: An Overview", "abstract": "The Linguist's Search Engine (LSE) was designed to provide an intuitive, easy-to-use interface that enables language researchers to seek linguistically interesting examples on the Web, based on syntactic and lexical criteria. We briefly describe its user interface and architecture, as well as recent developments that include LSE search capabilities for Chinese.", "keyphrases": ["linguist", "search engine", "lse"]} +{"id": "trischler-etal-2017-newsqa", "title": "NewsQA: A Machine Comprehension Dataset", "abstract": "We present NewsQA, a challenging machine comprehension dataset of over 100,000 human-generated question-answer pairs. Crowdworkers supply questions and answers based on a set of over 10,000 news articles from CNN, with answers consisting of spans of text in the articles. We collect this dataset through a four-stage process designed to solicit exploratory questions that require reasoning. Analysis confirms that NewsQA demands abilities beyond simple word matching and recognizing textual entailment. We measure human performance on the dataset and compare it to several strong neural models. The performance gap between humans and machines (13.3% F1) indicates that significant progress can be made on NewsQA through future research. The dataset is freely available online.", "keyphrases": ["machine comprehension dataset", "crowdworker", "cnn", "newsqa", "unanswerable question"]} +{"id": "zhang-etal-2020-optimizing", "title": "Optimizing the Factual Correctness of a Summary: A Study of Summarizing Radiology Reports", "abstract": "Neural abstractive summarization models are able to generate summaries which have high overlap with human references. However, existing models are not optimized for factual correctness, a critical metric in real-world applications. In this work, we develop a general framework where we evaluate the factual correctness of a generated summary by fact-checking it automatically against its reference using an information extraction module. We further propose a training strategy which optimizes a neural summarization model with a factual correctness reward via reinforcement learning. We apply the proposed method to the summarization of radiology reports, where factual correctness is a key requirement. On two separate datasets collected from hospitals, we show via both automatic and human evaluation that the proposed approach substantially improves the factual correctness and overall quality of outputs over a competitive neural summarization system, producing radiology summaries that approach the quality of human-authored ones.", "keyphrases": ["factual correctness", "radiology report", "summarization model"]} +{"id": "hieber-etal-2020-sockeye", "title": "Sockeye 2: A Toolkit for Neural Machine Translation", "abstract": "We present Sockeye 2, a modernized and streamlined version of the Sockeye neural machine translation (NMT) toolkit. New features include a simplified code base through the use of MXNet's Gluon API, a focus on state of the art model architectures, and distributed mixed precision training. These improvements result in faster training and inference, higher automatic metric scores, and a shorter path from research to production.", "keyphrases": ["neural machine translation", "sockeye", "versatile toolkit"]} +{"id": "tsvetkov-etal-2016-learning", "title": "Learning the Curriculum with Bayesian Optimization for Task-Specific Word Representation Learning", "abstract": "We use Bayesian optimization to learn curricula for word representation learning, optimizing performance on downstream tasks that depend on the learned representations as features. The curricula are modeled by a linear ranking function which is the scalar product of a learned weight vector and an engineered feature vector that characterizes the different aspects of the complexity of each instance in the training corpus. We show that learning the curriculum improves performance on a variety of downstream tasks over random orders and in comparison to the natural corpus order.", "keyphrases": ["curriculum", "bayesian optimization", "word representation learning"]} +{"id": "kiritchenko-mohammad-2018-examining", "title": "Examining Gender and Race Bias in Two Hundred Sentiment Analysis Systems", "abstract": "Automatic machine learning systems can inadvertently accentuate and perpetuate inappropriate human biases. Past work on examining inappropriate biases has largely focused on just individual systems. Further, there is no benchmark dataset for examining inappropriate biases in systems. Here for the first time, we present the Equity Evaluation Corpus (EEC), which consists of 8,640 English sentences carefully chosen to tease out biases towards certain races and genders. We use the dataset to examine 219 automatic sentiment analysis systems that took part in a recent shared task, SemEval-2018 Task 1 `Affect in Tweets'. We find that several of the systems show statistically significant bias; that is, they consistently provide slightly higher sentiment intensity predictions for one race or one gender. We make the EEC freely available.", "keyphrases": ["gender", "sentiment analysis system", "equity evaluation corpus", "english sentence"]} +{"id": "smith-eisner-2009-parser", "title": "Parser Adaptation and Projection with Quasi-Synchronous Grammar Features", "abstract": "We connect two scenarios in structured learning: adapting a parser trained on one corpus to another annotation style, and projecting syntactic annotations from one language to another. We propose quasi-synchronous grammar (QG) features for these structured learning tasks. That is, we score a aligned pair of source and target trees based on local features of the trees and the alignment. Our quasi-synchronous model assigns positive probability to any alignment of any trees, in contrast to a synchronous grammar, which would insist on some form of structural parallelism. \n \nIn monolingual dependency parser adaptation, we achieve high accuracy in translating among multiple annotation styles for the same sentence. On the more difficult problem of cross-lingual parser projection, we learn a dependency parser for a target language by using bilingual text, an English parser, and automatic word alignments. Our experiments show that unsupervised QG projection improves on parses trained using only high-precision projected annotations and far outperforms, by more than 35% absolute dependency accuracy, learning an unsupervised parser from raw target-language text alone. When a few target-language parse trees are available, projection gives a boost equivalent to doubling the number of target-language trees.", "keyphrases": ["projection", "quasi-synchronous grammar feature", "parser adaptation"]} +{"id": "jamshid-lou-johnson-2017-disfluency", "title": "Disfluency Detection using a Noisy Channel Model and a Deep Neural Language Model", "abstract": "This paper presents a model for disfluency detection in spontaneous speech transcripts called LSTM Noisy Channel Model. The model uses a Noisy Channel Model (NCM) to generate n-best candidate disfluency analyses and a Long Short-Term Memory (LSTM) language model to score the underlying fluent sentences of each analysis. The LSTM language model scores, along with other features, are used in a MaxEnt reranker to identify the most plausible analysis. We show that using an LSTM language model in the reranking process of noisy channel disfluency model improves the state-of-the-art in disfluency detection.", "keyphrases": ["noisy channel model", "language model", "disfluency detection"]} +{"id": "lo-wu-2011-meant", "title": "MEANT: An inexpensive, high-accuracy, semi-automatic metric for evaluating translation utility based on semantic roles", "abstract": "We introduce a novel semi-automated metric, MEANT, that assesses translation utility by matching semantic role fillers, producing scores that correlate with human judgment as well as HTER but at much lower labor cost. As machine translation systems improve in lexical choice and fluency, the shortcomings of widespread n-gram based, fluency-oriented MT evaluation metrics such as BLEU, which fail to properly evaluate adequacy, become more apparent. But more accurate, non-automatic adequacy-oriented MT evaluation metrics like HTER are highly labor-intensive, which bottlenecks the evaluation cycle. We first show that when using untrained monolingual readers to annotate semantic roles in MT output, the non-automatic version of the metric HMEANT achieves a 0.43 correlation coefficient with human adequacy judgments at the sentence level, far superior to BLEU at only 0.20, and equal to the far more expensive HTER. We then replace the human semantic role annotators with automatic shallow semantic parsing to further automate the evaluation metric, and show that even the semi-automated evaluation metric achieves a 0.34 correlation coefficient with human adequacy judgment, which is still about 80% as closely correlated as HTER despite an even lower labor cost for the evaluation procedure. The results show that our proposed metric is significantly better correlated with human judgment on adequacy than current widespread automatic evaluation metrics, while being much more cost effective than HTER.", "keyphrases": ["translation utility", "semantic role", "evaluation metric", "meant", "n-gram matching"]} +{"id": "mi-etal-2016-coverage", "title": "Coverage Embedding Models for Neural Machine Translation", "abstract": "In this paper, we enhance the attention-based neural machine translation (NMT) by adding explicit coverage embedding models to alleviate issues of repeating and dropping translations in NMT. For each source word, our model starts with a full coverage embedding vector to track the coverage status, and then keeps updating it with neural networks as the translation goes. Experiments on the large-scale Chinese-to-English task show that our enhanced model improves the translation quality significantly on various test sets over the strong large vocabulary NMT system.", "keyphrases": ["neural machine translation", "source word", "coverage"]} +{"id": "bunt-etal-2016-dialogbank", "title": "The DialogBank", "abstract": "This paper presents the DialogBank, a new language resource consisting of dialogues with gold standard annotations according to the ISO 24617-2 standard. Some of these dialogues have been taken from existing corpora and have been re-annotated according to the ISO standard; others have been annotated directly according to the standard. The ISO 24617-2 annotations have been designed according to the ISO principles for semantic annotation, as formulated in ISO 24617-6. The DialogBank makes use of three alternative representation formats, which are shown to be interoperable.", "keyphrases": ["dialogbank", "language resource", "standard"]} +{"id": "ganapathibhotla-liu-2008-mining", "title": "Mining Opinions in Comparative Sentences", "abstract": "This paper studies sentiment analysis from the user-generated content on the Web. In particular, it focuses on mining opinions from comparative sentences, i.e., to determine which entities in a comparison are preferred by its author. A typical comparative sentence compares two or more entities. For example, the sentence, \"the picture quality of Camera X is better than that of Camera Y\", compares two entities \"Camera X\" and \"Camera Y\" with regard to their picture quality. Clearly, \"Camera X\" is the preferred entity. Existing research has studied the problem of extracting some key elements in a comparative sentence. However, there is still no study of mining opinions from comparative sentences, i.e., identifying preferred entities of the author. This paper studies this problem, and proposes a technique to solve the problem. Our experiments using comparative sentences from product reviews and forum posts show that the approach is effective.", "keyphrases": ["comparative sentence", "web", "opinion mining"]} +{"id": "alm-etal-2005-emotions", "title": "Emotions from Text: Machine Learning for Text-based Emotion Prediction", "abstract": "In addition to information, text contains attitudinal, and more specifically, emotional content. This paper explores the text-based emotion prediction problem empirically, using supervised machine learning with the SNoW learning architecture. The goal is to classify the emotional affinity of sentences in the narrative domain of children's fairy tales, for subsequent usage in appropriate expressive rendering of text-to-speech synthesis. Initial experiments on a preliminary data set of 22 fairy tales show encouraging results over a naive baseline and BOW approach for classification of emotional versus non-emotional contents, with some dependency on parameter tuning. We also discuss results for a tripartite model which covers emotional valence, as well as feature set alternations. In addition, we present plans for a more cognitively sound sequential model, taking into consideration a larger set of basic emotions.", "keyphrases": ["machine learning", "tale", "emotion", "text instance", "decade"]} +{"id": "tu-etal-2018-learning", "title": "Learning to Remember Translation History with a Continuous Cache", "abstract": "Existing neural machine translation (NMT) models generally translate sentences in isolation, missing the opportunity to take advantage of document-level information. In this work, we propose to augment NMT models with a very light-weight cache-like memory network, which stores recent hidden representations as translation history. The probability distribution over generated words is updated online depending on the translation history retrieved from the memory, endowing NMT models with the capability to dynamically adapt over time. Experiments on multiple domains with different topics and styles show the effectiveness of the proposed approach with negligible impact on the computational cost.", "keyphrases": ["translation history", "cache", "neural machine translation", "memory network", "hidden representation"]} +{"id": "goldberg-nivre-2012-dynamic", "title": "A Dynamic Oracle for Arc-Eager Dependency Parsing", "abstract": "The standard training regime for transition-based dependency parsers makes use of an oracle, which predicts an optimal transition sequence for a sentence and its gold tree. We present an improved oracle for the arc-eager transition system, which provides a set of optimal transitions for every valid parser configuration, including configurations from which the gold tree is not reachable. In such cases, the oracle provides transitions that will lead to the best reachable tree from the given configuration. The oracle is efficient to implement and provably correct. We use the oracle to train a deterministic left-to-right dependency parser that is less sensitive to error propagation, using an online training procedure that also explores parser configurations resulting from non-optimal sequences of transitions. This new parser outperforms greedy parsers trained using conventional oracles on a range of data sets, with an average improvement of over 1.2 LAS points and up to almost 3 LAS points on some data sets.", "keyphrases": ["oracle", "dependency parsing", "transition sequence"]} +{"id": "kudo-2018-subword", "title": "Subword Regularization: Improving Neural Network Translation Models with Multiple Subword Candidates", "abstract": "Subword units are an effective way to alleviate the open vocabulary problems in neural machine translation (NMT). While sentences are usually converted into unique subword sequences, subword segmentation is potentially ambiguous and multiple segmentations are possible even with the same vocabulary. The question addressed in this paper is whether it is possible to harness the segmentation ambiguity as a noise to improve the robustness of NMT. We present a simple regularization method, subword regularization, which trains the model with multiple subword segmentations probabilistically sampled during training. In addition, for better subword sampling, we propose a new subword segmentation algorithm based on a unigram language model. We experiment with multiple corpora and report consistent improvements especially on low resource and out-of-domain settings.", "keyphrases": ["consistent improvement", "subword regularization", "tokenization", "translation performance"]} +{"id": "bott-etal-2012-spanish", "title": "Can Spanish Be Simpler? LexSiS: Lexical Simplification for Spanish", "abstract": "Lexical simplification is the task of replacing a word in a given context by an easier-to-understand synonym. Although a number of lexical simplification approaches have been developed in recent years, most of them have been applied to English, with recent work taking advantage of parallel monolingual datasets for training. Here we present LexSiS, a lexical simplification system for Spanish that does not require a parallel corpus, but instead relies on freely available resources, such as an on-line dictionary and the Web as a corpus. LexSiS uses three techniques for finding a suitable word substitute: a word vector model, word frequency, and word length. In experiments with human informants, we have verified that LexSiS performs better than a hard-to-beat baseline based on synonym frequency.", "keyphrases": ["spanish", "lexical simplification", "frequency"]} +{"id": "daume-iii-marcu-2006-bayesian", "title": "Bayesian Query-Focused Summarization", "abstract": "We present BAYESUM (for \"Bayesian summarization\"), a model for sentence extraction in query-focused summarization. BAYESUM leverages the common case in which multiple documents are relevant to a single query. Using these documents as reinforcement for query terms, BAYESUM is not afflicted by the paucity of information in short queries. We show that approximate inference in BAYESUM is possible on large data sets and results in a state-of-the-art summarization system. Furthermore, we show how BAYESUM can be understood as a justified query expansion technique in the language modeling for IR framework.", "keyphrases": ["summarization", "bayesum", "query"]} +{"id": "sennrich-haddow-2016-linguistic", "title": "Linguistic Input Features Improve Neural Machine Translation", "abstract": "Neural machine translation has recently achieved impressive results, while using little in the way of external linguistic information. In this paper we show that the strong learning capability of neural MT models does not make linguistic features redundant; they can be easily incorporated to provide further improvements in performance. We generalize the embedding layer of the encoder in the attentional encoder--decoder architecture to support the inclusion of arbitrary features, in addition to the baseline word feature. We add morphological features, part-of-speech tags, and syntactic dependency labels as input features to English German, and English->Romanian neural machine translation systems. In experiments on WMT16 training and test sets, we find that linguistic input features improve model quality according to three metrics: perplexity, BLEU and CHRF3. An open-source implementation of our neural MT system is available, as are sample files and configurations.", "keyphrases": ["input feature", "neural machine translation", "part-of-speech tag", "dependency label", "translation quality"]} +{"id": "sirts-goldwater-2013-minimally", "title": "Minimally-Supervised Morphological Segmentation using Adaptor Grammars", "abstract": "This paper explores the use of Adaptor Grammars, a nonparametric Bayesian modelling framework, for minimally supervised morphological segmentation. We compare three training methods: unsupervised training, semi-supervised training, and a novel model selection method. In the model selection method, we train unsupervised Adaptor Grammars using an over-articulated metagrammar, then use a small labelled data set to select which potential morph boundaries identified by the metagrammar should be returned in the final output. We evaluate on five languages and show that semi-supervised training provides a boost over unsupervised training, while the model selection method yields the best average results over all languages and is competitive with state-of-the-art semi-supervised systems. Moreover, this method provides the potential to tune performance according to different evaluation metrics or downstream tasks.", "keyphrases": ["morphological segmentation", "adaptor grammars", "nonparametric bayesian model"]} +{"id": "haghighi-klein-2010-coreference", "title": "Coreference Resolution in a Modular, Entity-Centered Model", "abstract": "Coreference resolution is governed by syntactic, semantic, and discourse constraints. We present a generative, model-based approach in which each of these factors is modularly encapsulated and learned in a primarily unsu-pervised manner. Our semantic representation first hypothesizes an underlying set of latent entity types, which generate specific entities that in turn render individual mentions. By sharing lexical statistics at the level of abstract entity types, our model is able to substantially reduce semantic compatibility errors, resulting in the best results to date on the complete end-to-end coreference task.", "keyphrases": ["entity-centered model", "mention", "coreference resolution"]} +{"id": "nedoluzhko-etal-2016-coreference", "title": "Coreference in Prague Czech-English Dependency Treebank", "abstract": "We present coreference annotation on parallel Czech-English texts of the Prague Czech-English Dependency Treebank (PCEDT). The paper describes innovations made to PCEDT 2.0 concerning coreference, as well as coreference information already present there. We characterize the coreference annotation scheme, give the statistics and compare our annotation with the coreference annotation in Ontonotes and Prague Dependency Treebank for Czech. We also present the experiments made using this corpus to improve the alignment of coreferential expressions, which helps us to collect better statistics of correspondences between types of coreferential relations in Czech and English. The corpus released as PCEDT 2.0 Coref is publicly available.", "keyphrases": ["prague", "czech-english dependency treebank", "coreference"]} +{"id": "yang-etal-2008-entity", "title": "An Entity-Mention Model for Coreference Resolution with Inductive Logic Programming", "abstract": "The traditional mention-pair model for coreference resolution cannot capture information beyond mention pairs for both learning and testing. To deal with this problem, we present an expressive entity-mention model that performs coreference resolution at an entity level. The model adopts the Inductive Logic Programming (ILP) algorithm, which provides a relational way to organize different knowledge of entities and mentions. The solution can explicitly express relations between an entity and the contained mentions, and automatically learn first-order rules important for coreference decision. The evaluation on the ACE data set shows that the ILP based entity-mention model is effective for the coreference resolution task.", "keyphrases": ["entity-mention model", "coreference resolution", "inductive logic programming"]} +{"id": "bohnet-nivre-2012-transition", "title": "A Transition-Based System for Joint Part-of-Speech Tagging and Labeled Non-Projective Dependency Parsing", "abstract": "Most current dependency parsers presuppose that input words have been morphologically disambiguated using a part-of-speech tagger before parsing begins. We present a transition-based system for joint part-of-speech tagging and labeled dependency parsing with non-projective trees. Experimental evaluation on Chinese, Czech, English and German shows consistent improvements in both tagging and parsing accuracy when compared to a pipeline system, which lead to improved state-of-the-art results for all languages.", "keyphrases": ["transition-based system", "joint part-of-speech tagging", "dependency parsing"]} +{"id": "bosselut-etal-2019-comet", "title": "COMET: Commonsense Transformers for Automatic Knowledge Graph Construction", "abstract": "We present the first comprehensive study on automatic knowledge base construction for two prevalent commonsense knowledge graphs: ATOMIC (Sap et al., 2019) and ConceptNet (Speer et al., 2017). Contrary to many conventional KBs that store knowledge with canonical templates, commonsense KBs only store loosely structured open-text descriptions of knowledge. We posit that an important step toward automatic commonsense completion is the development of generative models of commonsense knowledge, and propose COMmonsEnse Transformers (COMET) that learn to generate rich and diverse commonsense descriptions in natural language. Despite the challenges of commonsense modeling, our investigation reveals promising results when implicit knowledge from deep pre-trained language models is transferred to generate explicit knowledge in commonsense knowledge graphs. Empirical results demonstrate that COMET is able to generate novel knowledge that humans rate as high quality, with up to 77.5% (ATOMIC) and 91.7% (ConceptNet) precision at top 1, which approaches human performance for these resources. Our findings suggest that using generative commonsense models for automatic commonsense KB completion could soon be a plausible alternative to extractive methods.", "keyphrases": ["commonsense transformer", "language model", "comet", "knowledge model", "reasoning"]} +{"id": "yates-etal-2017-depression", "title": "Depression and Self-Harm Risk Assessment in Online Forums", "abstract": "Users suffering from mental health conditions often turn to online resources for support, including specialized online support communities or general communities such as Twitter and Reddit. In this work, we present a framework for supporting and studying users in both types of communities. We propose methods for identifying posts in support communities that may indicate a risk of self-harm, and demonstrate that our approach outperforms strong previously proposed methods for identifying such posts. Self-harm is closely related to depression, which makes identifying depressed users on general forums a crucial related task. We introduce a large-scale general forum dataset consisting of users with self-reported depression diagnoses matched with control users. We show how our method can be applied to effectively identify depressed users from their use of language alone. We demonstrate that our method outperforms strong baselines on this general forum dataset.", "keyphrases": ["self-harm", "mental health condition", "depression"]} +{"id": "ding-etal-2014-using", "title": "Using Structured Events to Predict Stock Price Movement: An Empirical Investigation", "abstract": "It has been shown that news events influence the trends of stock price movements. However, previous work on news-driven stock market prediction rely on shallow features (such as bags-of-words, named entities and noun phrases), which do not capture structured entity-relation information, and hence cannot represent complete and exact events. Recent advances in Open Information Extraction (Open IE) techniques enable the extraction of structured events from web-scale data. We propose to adapt Open IE technology for event-based stock price movement prediction, extracting structured events from large-scale public news without manual efforts. Both linear and nonlinear models are employed to empirically investigate the hidden and complex relationships between events and the stock market. Largescale experiments show that the accuracy of S&P 500 index prediction is 60%, and that of individual stock prediction can be over 70%. Our event-based system outperforms bags-of-words-based baselines, and previously reported systems trained on S&P 500 stock historical data.", "keyphrases": ["structured event", "stock price movement", "news"]} +{"id": "lin-etal-2016-neural", "title": "Neural Relation Extraction with Selective Attention over Instances", "abstract": "Distant supervised relation extraction has been widely used to \ufb01nd novel relational facts from text. However, distant supervision inevitably accompanies with the wrong labelling problem, and these noisy data will substantially hurt the performance of relation extraction. To alleviate this issue, we propose a sentence-level attention-based model for relation extraction. In this model, we employ convolutional neural networks to embed the semantics of sentences. Afterwards, we build sentence-level attention over multiple instances, which is expected to dynamically reduce the weights of those noisy instances. Experimental results on real-world datasets show that, our model can make full use of all informative sentences and effectively reduce the in\ufb02uence of wrong labelled instances. Our model achieves signi\ufb01cant and consistent improvements on relation extraction as compared with baselines. The source code of this paper can be obtained from https: //github.com/thunlp/NRE .", "keyphrases": ["selective attention", "nre", "neural relation extraction", "bag", "entity pair"]} +{"id": "liu-etal-2017-exploiting", "title": "Exploiting Argument Information to Improve Event Detection via Supervised Attention Mechanisms", "abstract": "This paper tackles the task of event detection (ED), which involves identifying and categorizing events. We argue that arguments provide significant clues to this task, but they are either completely ignored or exploited in an indirect manner in existing detection approaches. In this work, we propose to exploit argument information explicitly for ED via supervised attention mechanisms. In specific, we systematically investigate the proposed model under the supervision of different attention strategies. Experimental results show that our approach advances state-of-the-arts and achieves the best F1 score on ACE 2005 dataset.", "keyphrases": ["argument information", "event detection", "supervised attention mechanism"]} +{"id": "schnabel-etal-2015-evaluation", "title": "Evaluation methods for unsupervised word embeddings", "abstract": "We present a comprehensive study of evaluation methods for unsupervised embedding techniques that obtain meaningful representations of words from text. Different evaluations result in different orderings of embedding methods, calling into question the common assumption that there is one single optimal vector representation. We present new evaluation techniques that directly compare embeddings with respect to specific queries. These methods reduce bias, provide greater insight, and allow us to solicit data-driven relevance judgments rapidly and accurately through crowdsourcing.", "keyphrases": ["word embedding", "evaluation method", "extrinsic task", "analogy"]} +{"id": "stab-gurevych-2017-parsing", "title": "Parsing Argumentation Structures in Persuasive Essays", "abstract": "In this article, we present a novel approach for parsing argumentation structures. We identify argument components using sequence labeling at the token level and apply a new joint model for detecting argumentation structures. The proposed model globally optimizes argument component types and argumentative relations using Integer Linear Programming. We show that our model significantly outperforms challenging heuristic baselines on two different types of discourse. Moreover, we introduce a novel corpus of persuasive essays annotated with argumentation structures. We show that our annotation scheme and annotation guidelines successfully guide human annotators to substantial agreement.", "keyphrases": ["persuasive essay", "discourse", "writing support system"]} +{"id": "huang-2008-forest", "title": "Forest Reranking: Discriminative Parsing with Non-Local Features", "abstract": "Conventional n-best reranking techniques often suffer from the limited scope of the nbest list, which rules out many potentially good alternatives. We instead propose forest reranking, a method that reranks a packed forest of exponentially many parses. Since exact inference is intractable with non-local features, we present an approximate algorithm inspired by forest rescoring that makes discriminative training practical over the whole Treebank. Our final result, an F-score of 91.7, outperforms both 50-best and 100-best reranking baselines, and is better than any previously reported systems trained on the Treebank.", "keyphrases": ["non-local feature", "list", "packed forest", "forest reranking"]} +{"id": "talmor-etal-2020-olmpics", "title": "oLMpics-On What Language Model Pre-training Captures", "abstract": "Recent success of pre-trained language models (LMs) has spurred widespread interest in the language capabilities that they possess. However, efforts to understand whether LM representations are useful for symbolic reasoning tasks have been limited and scattered. In this work, we propose eight reasoning tasks, which conceptually require operations such as comparison, conjunction, and composition. A fundamental challenge is to understand whether the performance of a LM on a task should be attributed to the pre-trained representations or to the process of fine-tuning on the task data. To address this, we propose an evaluation protocol that includes both zero-shot evaluation (no fine-tuning), as well as comparing the learning curve of a fine-tuned LM to the learning curve of multiple controls, which paints a rich picture of the LM capabilities. Our main findings are that: (a) different LMs exhibit qualitatively different reasoning abilities, e.g., RoBERTa succeeds in reasoning tasks where BERT fails completely; (b) LMs do not reason in an abstract manner and are context-dependent, e.g., while RoBERTa can compare ages, it can do so only when the ages are in the typical range of human ages; (c) On half of our reasoning tasks all models fail completely. Our findings and infrastructure can help future work on designing new datasets, models, and objective functions for pre-training.", "keyphrases": ["language model", "pre-trained representation", "high performance"]} +{"id": "song-etal-2018-graph", "title": "A Graph-to-Sequence Model for AMR-to-Text Generation", "abstract": "The problem of AMR-to-text generation is to recover a text representing the same meaning as an input AMR graph. The current state-of-the-art method uses a sequence-to-sequence model, leveraging LSTM for encoding a linearized AMR structure. Although being able to model non-local semantic information, a sequence LSTM can lose information from the AMR graph structure, and thus facing challenges with large-graphs, which result in long sequences. We introduce a neural graph-to-sequence model, using a novel LSTM structure for directly encoding graph-level semantics. On a standard benchmark, our model shows superior results to existing methods in the literature.", "keyphrases": ["graph-to-sequence model", "amr-to-text generation", "amr structure", "sequential encoder"]} +{"id": "mccoy-etal-2020-berts", "title": "BERTs of a feather do not generalize together: Large variability in generalization across models with similar test set performance", "abstract": "If the same neural network architecture is trained multiple times on the same dataset, will it make similar linguistic generalizations across runs? To study this question, we fine-tuned 100 instances of BERT on the Multi-genre Natural Language Inference (MNLI) dataset and evaluated them on the HANS dataset, which evaluates syntactic generalization in natural language inference. On the MNLI development set, the behavior of all instances was remarkably consistent, with accuracy ranging between 83.6% and 84.8%. In stark contrast, the same models varied widely in their generalization performance. For example, on the simple case of subject-object swap (e.g., determining that \u201cthe doctor visited the lawyer\u201d does not entail \u201cthe lawyer visited the doctor\u201d), accuracy ranged from 0.0% to 66.2%. Such variation is likely due to the presence of many local minima in the loss surface that are equally attractive to a low-bias learner such as a neural network; decreasing the variability may therefore require models with stronger inductive biases.", "keyphrases": ["generalization", "variability", "bert"]} +{"id": "khayrallah-koehn-2018-impact", "title": "On the Impact of Various Types of Noise on Neural Machine Translation", "abstract": "We examine how various types of noise in the parallel training data impact the quality of neural machine translation systems. We create five types of artificial noise and analyze how they degrade performance in neural and statistical machine translation. We find that neural models are generally more harmed by noise than statistical models. For one especially egregious type of noise they learn to just copy the input sentence.", "keyphrases": ["various type", "noise", "neural machine translation", "wrong language", "translation model"]} +{"id": "sennrich-etal-2017-nematus", "title": "Nematus: a Toolkit for Neural Machine Translation", "abstract": "We present Nematus, a toolkit for Neural Machine Translation. The toolkit prioritizes high translation accuracy, usability, and extensibility. Nematus has been used to build top-performing submissions to shared translation tasks at WMT and IWSLT, and has been used to train systems for production environments.", "keyphrases": ["toolkit", "neural machine translation", "nematus"]} +{"id": "zeng-etal-2018-extracting", "title": "Extracting Relational Facts by an End-to-End Neural Model with Copy Mechanism", "abstract": "The relational facts in sentences are often complicated. Different relational triplets may have overlaps in a sentence. We divided the sentences into three types according to triplet overlap degree, including Normal, EntityPairOverlap and SingleEntiyOverlap. Existing methods mainly focus on Normal class and fail to extract relational triplets precisely. In this paper, we propose an end-to-end model based on sequence-to-sequence learning with copy mechanism, which can jointly extract relational facts from sentences of any of these classes. We adopt two different strategies in decoding process: employing only one united decoder or applying multiple separated decoders. We test our models in two public datasets and our model outperform the baseline method significantly.", "keyphrases": ["relational fact", "copy mechanism", "sequence-to-sequence model", "extraction"]} +{"id": "krahmer-van-deemter-2012-computational", "title": "Computational Generation of Referring Expressions: A Survey", "abstract": "This article offers a survey of computational research on referring expression generation (REG). It introduces the REG problem and describes early work in this area, discussing what basic assumptions lie behind it, and showing how its remit has widened in recent years. We discuss computational frameworks underlying REG, and demonstrate a recent trend that seeks to link REG algorithms with well-established Knowledge Representation techniques. Considerable attention is given to recent efforts at evaluating REG algorithms and the lessons that they allow us to learn. The article concludes with a discussion of the way forward in REG, focusing on references in larger and more realistic settings.", "keyphrases": ["referring expression", "survey", "expression generation", "reg", "object"]} +{"id": "lison-tiedemann-2016-opensubtitles2016", "title": "OpenSubtitles2016: Extracting Large Parallel Corpora from Movie and TV Subtitles", "abstract": "We present a new major release of the OpenSubtitles collection of parallel corpora. The release is compiled from a large database of movie and TV subtitles and includes a total of 1689 bitexts spanning 2.6 billion sentences across 60 languages. The release also incorporates a number of enhancements in the preprocessing and alignment of the subtitles, such as the automatic correction of OCR errors and the use of meta-data to estimate the quality of each subtitle and score subtitle pairs.", "keyphrases": ["parallel corpora", "movie", "opensubtitles"]} +{"id": "du-etal-2021-self", "title": "Self-training Improves Pre-training for Natural Language Understanding", "abstract": "Unsupervised pre-training has led to much recent progress in natural language understanding. In this paper, we study self-training as another way to leverage unlabeled data through semi-supervised learning. To obtain additional data for a specific task, we introduce SentAugment, a data augmentation method which computes task-specific query embeddings from labeled data to retrieve sentences from a bank of billions of unlabeled sentences crawled from the web. Unlike previous semi-supervised methods, our approach does not require in-domain unlabeled data and is therefore more generally applicable. Experiments show that self-training is complementary to strong RoBERTa baselines on a variety of tasks. Our augmentation approach leads to scalable and effective self-training with improvements of up to 2.6% on standard text classification benchmarks. Finally, we also show strong gains on knowledge-distillation and few-shot learning.", "keyphrases": ["natural language understanding", "semi-supervised learning", "self-training"]} +{"id": "yu-hatzivassiloglou-2003-towards", "title": "Towards Answering Opinion Questions: Separating Facts from Opinions and Identifying the Polarity of Opinion Sentences", "abstract": "Opinion question answering is a challenging task for natural language processing. In this paper, we discuss a necessary component for an opinion question answering system: separating opinions from fact, at both the document and sentence level. We present a Bayesian classifier for discriminating between documents with a preponderance of opinions such as editorials from regular news stories, and describe three unsupervised, statistical techniques for the significantly harder task of detecting opinions at the sentence level. We also present a first model for classifying opinion sentences as positive or negative in terms of the main perspective being expressed in the opinion. Results from a large collection of news stories and a human evaluation of 400 sentences are reported, indicating that we achieve very high performance in document classification (upwards of 97% precision and recall), and respectable performance in detecting opinions and classifying them at the sentence level as positive, negative, or neutral (up to 91% accuracy).", "keyphrases": ["opinion question", "polarity", "sentence level", "subjectivity classification"]} +{"id": "wang-etal-2021-k", "title": "K-Adapter: Infusing Knowledge into Pre-Trained Models with Adapters", "abstract": "We study the problem of injecting knowledge into large pre-trained models like BERT and RoBERTa. Existing methods typically update the original parameters of pre-trained models when injecting knowledge. However, when multiple kinds of knowledge are injected, they may suffer from catastrophic forgetting. To address this, we propose K-Adapter, which remains the original parameters of the pre-trained model fixed and supports continual knowledge infusion. Taking RoBERTa as the pre-trained model, K-Adapter has a neural adapter for each kind of infused knowledge, like a plug-in connected to RoBERTa. There is no information flow between different adapters, thus different adapters are efficiently trained in a distributed way. We inject two kinds of knowledge, including factual knowledge obtained from automatically aligned text-triplets on Wikipedia and Wikidata, and linguistic knowledge obtained from dependency parsing. Results on three knowledge-driven tasks (total six datasets) including relation classification, entity typing and question answering demonstrate that each adapter improves the performance, and the combination of both adapters brings further improvements. Probing experiments further indicate that K-Adapter captures richer factual and commonsense knowledge than RoBERTa.", "keyphrases": ["neural adapter", "linguistic knowledge", "k-adapter", "plm", "limitation"]} +{"id": "mallinson-etal-2017-paraphrasing", "title": "Paraphrasing Revisited with Neural Machine Translation", "abstract": "Recognizing and generating paraphrases is an important component in many natural language processing applications. A well-established technique for automatically extracting paraphrases leverages bilingual corpora to find meaning-equivalent phrases in a single language by \u201cpivoting\u201d over a shared translation in another language. In this paper we revisit bilingual pivoting in the context of neural machine translation and present a paraphrasing model based purely on neural networks. Our model represents paraphrases in a continuous space, estimates the degree of semantic relatedness between text segments of arbitrary length, and generates candidate paraphrases for any source input. Experimental results across tasks and datasets show that neural paraphrases outperform those obtained with conventional phrase-based pivoting approaches.", "keyphrases": ["neural machine translation", "bilingual pivoting", "paraphrasing", "back-translation", "sentence similarity score"]} +{"id": "resnik-etal-2013-using", "title": "Using Topic Modeling to Improve Prediction of Neuroticism and Depression in College Students", "abstract": "We investigate the value-add of topic modeling in text analysis for depression, and for neuroticism as a strongly associated personality measure. Using Pennebaker\u2019s Linguistic Inquiry and Word Count (LIWC) lexicon to provide baseline features, we show that straightforward topic modeling using Latent Dirichlet Allocation (LDA) yields interpretable, psychologically relevant \u201cthemes\u201d that add value in prediction of clinical assessments.", "keyphrases": ["topic modeling", "neuroticism", "depression"]} +{"id": "zhu-etal-2021-mediasum", "title": "MediaSum: A Large-scale Media Interview Dataset for Dialogue Summarization", "abstract": "This paper introduces MediaSum, a large-scale media interview dataset consisting of 463.6K transcripts with abstractive summaries. To create this dataset, we collect interview transcripts from NPR and CNN and employ the overview and topic descriptions as summaries. Compared with existing public corpora for dialogue summarization, our dataset is an order of magnitude larger and contains complex multi-party conversations from multiple domains. We conduct statistical analysis to demonstrate the unique positional bias exhibited in the transcripts of televised and radioed interviews. We also show that MediaSum can be used in transfer learning to improve a model's performance on other dialogue summarization tasks.", "keyphrases": ["dialogue summarization", "abstractive summary", "mediasum", "medium interview dataset"]} +{"id": "ettinger-2020-bert", "title": "What BERT Is Not: Lessons from a New Suite of Psycholinguistic Diagnostics for Language Models", "abstract": "Pre-training by language modeling has become a popular and successful approach to NLP tasks, but we have yet to understand exactly what linguistic capacities these pre-training processes confer upon models. In this paper we introduce a suite of diagnostics drawn from human language experiments, which allow us to ask targeted questions about information used by language models for generating predictions in context. As a case study, we apply these diagnostics to the popular BERT model, finding that it can generally distinguish good from bad completions involving shared category or role reversal, albeit with less sensitivity than humans, and it robustly retrieves noun hypernyms, but it struggles with challenging inference and role-based event prediction\u2014 and, in particular, it shows clear insensitivity to the contextual impacts of negation.", "keyphrases": ["bert", "psycholinguistic diagnostic", "language model", "contextual impact", "negation"]} +{"id": "pitenis-etal-2020-offensive", "title": "Offensive Language Identification in Greek", "abstract": "As offensive language has become a rising issue for online communities and social media platforms, researchers have been investigating ways of coping with abusive content and developing systems to detect its different types: cyberbullying, hate speech, aggression, etc. With a few notable exceptions, most research on this topic so far has dealt with English. This is mostly due to the availability of language resources for English. To address this shortcoming, this paper presents the first Greek annotated dataset for offensive language identification: the Offensive Greek Tweet Dataset (OGTD). OGTD is a manually annotated dataset containing 4,779 posts from Twitter annotated as offensive and not offensive. Along with a detailed description of the dataset, we evaluate several computational models trained and tested on this data.", "keyphrases": ["greek", "twitter", "offensive language identification"]} +{"id": "trnka-etal-2007-effects", "title": "The Effects of Word Prediction on Communication Rate for AAC", "abstract": "Individuals using an Augmentative and Alternative Communication (AAC) device communicate at less than 10% of the speed of \"traditional\" speech, creating a large communication gap. In this user study, we compare the communication rate of pseudo-impaired individuals using two different word prediction algorithms and a system without word prediction. Our results show that word prediction can increase AAC communication rate and that more accurate predictions significantly improve communication rate.", "keyphrases": ["word prediction", "communication rate", "aac"]} +{"id": "xu-etal-2014-extracting", "title": "Extracting Lexically Divergent Paraphrases from Twitter", "abstract": "We present MultiP (Multi-instance Learning Paraphrase Model), a new model suited to identify paraphrases within the short messages on Twitter. We jointly model paraphrase relations between word and sentence pairs and assume only sentence-level annotations during learning. Using this principled latent variable model alone, we achieve the performance competitive with a state-of-the-art method which combines a latent space model with a feature-based supervised classifier. Our model also captures lexically divergent paraphrases that differ from yet complement previous methods; combining our model with previous work significantly outperforms the state-of-the-art. In addition, we present a novel annotation methodology that has allowed us to crowdsource a paraphrase corpus from Twitter. We make this new dataset available to the research community.", "keyphrases": ["paraphrase", "twitter", "sentence pair"]} +{"id": "ott-etal-2019-fairseq", "title": "fairseq: A Fast, Extensible Toolkit for Sequence Modeling", "abstract": "fairseq is an open-source sequence modeling toolkit that allows researchers and developers to train custom models for translation, summarization, language modeling, and other text generation tasks. The toolkit is based on PyTorch and supports distributed training across multiple GPUs and machines. We also support fast mixed-precision training and inference on modern GPUs. A demo video can be found at ", "keyphrases": ["extension", "fairseq", "neural machine translation"]} +{"id": "klein-manning-2004-corpus", "title": "Corpus-Based Induction of Syntactic Structure: Models of Dependency and Constituency", "abstract": "We present a generative model for the unsupervised learning of dependency structures. We also describe the multiplicative combination of this dependency model with a model of linear constituency. The product model outperforms both components on their respective evaluation metrics, giving the best published figures for unsupervised dependency parsing and unsupervised constituency parsing. We also demonstrate that the combined model works and is robust cross-linguistically, being able to exploit either attachment or distributional regularities that are salient in the data.", "keyphrases": ["induction", "generative model", "dependency model", "valence", "pcfg"]} +{"id": "miculicich-etal-2018-document", "title": "Document-Level Neural Machine Translation with Hierarchical Attention Networks", "abstract": "Neural Machine Translation (NMT) can be improved by including document-level contextual information. For this purpose, we propose a hierarchical attention model to capture the context in a structured and dynamic manner. The model is integrated in the original NMT architecture as another level of abstraction, conditioning on the NMT model's own previous hidden states. Experiments show that hierarchical attention significantly improves the BLEU score over a strong NMT baseline with the state-of-the-art in context-aware methods, and that both the encoder and decoder benefit from context in complementary ways.", "keyphrases": ["neural machine translation", "hierarchical attention networks", "contextual information", "nmt model", "document-level translation"]} +{"id": "heilman-smith-2010-good", "title": "Good Question! Statistical Ranking for Question Generation", "abstract": "We address the challenge of automatically generating questions from reading materials for educational practice and assessment. Our approach is to overgenerate questions, then rank them. We use manually written rules to perform a sequence of general purpose syntactic transformations (e.g., subject-auxiliary inversion) to turn declarative sentences into questions. These questions are then ranked by a logistic regression model trained on a small, tailored dataset consisting of labeled output from our system. Experimental results show that ranking nearly doubles the percentage of questions rated as acceptable by annotators, from 27% of all questions to 52% of the top ranked 20% of questions.", "keyphrases": ["question generation", "declarative sentence", "regression model", "template", "rule-based approach"]} +{"id": "genzel-etal-2010-poetic", "title": "\u201cPoetic\u201d Statistical Machine Translation: Rhyme and Meter", "abstract": "As a prerequisite to translation of poetry, we implement the ability to produce translations with meter and rhyme for phrase-based MT, examine whether the hypothesis space of such a system is flexible enough to accomodate such constraints, and investigate the impact of such constraints on translation quality.", "keyphrases": ["statistical machine translation", "rhyme", "poem"]} +{"id": "ghosh-etal-2015-semeval", "title": "SemEval-2015 Task 11: Sentiment Analysis of Figurative Language in Twitter", "abstract": "This report summarizes the objectives and evaluation of the SemEval 2015 task on the sentiment analysis of figurative language on Twitter (Task 11). This is the first sentiment analysis task wholly dedicated to analyzing figurative language on Twitter. Specifically, three broad classes of figurative language are considered: irony, sarcasm and metaphor. Gold standard sets of 8000 training tweets and 4000 test tweets were annotated using workers on the crowdsourcing platform CrowdFlower. Participating systems were required to provide a fine-grained sentiment score on an 11-point scale (-5 to +5, including 0 for neutral intent) for each tweet, and systems were evaluated against the gold standard using both a Cosinesimilarity and a Mean-Squared-Error measure.", "keyphrases": ["sentiment analysis", "figurative language", "twitter", "semeval"]} +{"id": "mcclosky-etal-2006-effective", "title": "Effective Self-Training for Parsing", "abstract": "We present a simple, but surprisingly effective, method of self-training a two-phase parser-reranker system using readily available unlabeled data. We show that this type of bootstrapping is possible for parsing when the bootstrapped parses are processed by a discriminative reranker. Our improved model achieves an f-score of 92.1%, an absolute 1.1% improvement (12% error reduction) over the previous best result for Wall Street Journal parsing. Finally, we provide some analysis to better understand the phenomenon.", "keyphrases": ["self-training", "unlabeled data", "reranker", "domain adaptation", "good result"]} +{"id": "wong-dras-2011-exploiting", "title": "Exploiting Parse Structures for Native Language Identification", "abstract": "Attempts to profile authors according to their characteristics extracted from textual data, including native language, have drawn attention in recent years, via various machine learning approaches utilising mostly lexical features. Drawing on the idea of contrastive analysis, which postulates that syntactic errors in a text are to some extent influenced by the native language of an author, this paper explores the usefulness of syntactic features for native language identification. We take two types of parse substructure as features---horizontal slices of trees, and the more general feature schemas from discriminative parse reranking---and show that using this kind of syntactic feature results in an accuracy score in classification of seven native languages of around 80%, an error reduction of more than 30%.", "keyphrases": ["native language identification", "syntactic feature", "context-free grammar"]} +{"id": "kazama-torisawa-2007-exploiting", "title": "Exploiting Wikipedia as External Knowledge for Named Entity Recognition", "abstract": "We explore the use of Wikipedia as external knowledge to improve named entity recognition (NER). Our method retrieves the corresponding Wikipedia entry for each candidate word sequence and extracts a category label from the first sentence of the entry, which can be thought of as a definition part. These category labels are used as features in a CRF-based NE tagger. We demonstrate using the CoNLL 2003 dataset that the Wikipedia category labels extracted by such a simple method actually improve the accuracy of NER.", "keyphrases": ["wikipedia", "entity recognition", "candidate word sequence"]} +{"id": "koehn-senellart-2010-convergence", "title": "Convergence of Translation Memory and Statistical Machine Translation", "abstract": "We present two methods that merge ideas from statistical machine translation (SMT) and translation memories (TM). We use a TM to retrieve matches for source segments, and replace the mismatched parts with instructions to an SMT system to fill in the gap. We show that for fuzzy matches of over 70%, one method outperforms both SMT and TM baselines.", "keyphrases": ["translation memory", "statistical machine translation", "segment", "mismatched part", "smt system"]} +{"id": "calixto-liu-2017-incorporating", "title": "Incorporating Global Visual Features into Attention-based Neural Machine Translation.", "abstract": "We introduce multi-modal, attention-based neural machine translation (NMT) models which incorporate visual features into different parts of both the encoder and the decoder. Global image features are extracted using a pre-trained convolutional neural network and are incorporated (i) as words in the source sentence, (ii) to initialise the encoder hidden state, and (iii) as additional data to initialise the decoder hidden state. In our experiments, we evaluate translations into English and German, how different strategies to incorporate global image features compare and which ones perform best. We also study the impact that adding synthetic multi-modal, multilingual data brings and find that the additional data have a positive impact on multi-modal NMT models. We report new state-of-the-art results and our best models also significantly improve on a comparable phrase-based Statistical MT (PBSMT) model trained on the Multi30k data set according to all metrics evaluated. To the best of our knowledge, it is the first time a purely neural model significantly improves over a PBSMT model on all metrics evaluated on this data set.", "keyphrases": ["global visual feature", "neural machine translation", "source sentence"]} +{"id": "hill-etal-2015-simlex", "title": "SimLex-999: Evaluating Semantic Models With (Genuine) Similarity Estimation", "abstract": "We present SimLex-999, a gold standard resource for evaluating distributional semantic models that improves on existing resources in several important ways. First, in contrast to gold standards such as WordSim-353 and MEN, it explicitly quantifies similarity rather than association or relatedness so that pairs of entities that are associated but not actually similar (Freud, psychology) have a low rating. We show that, via this focus on similarity, SimLex-999 incentivizes the development of models with a different, and arguably wider, range of applications than those which reflect conceptual association. Second, SimLex-999 contains a range of concrete and abstract adjective, noun, and verb pairs, together with an independent rating of concreteness and (free) association strength for each pair. This diversity enables fine-grained analyses of the performance of models on concepts of different types, and consequently greater insight into how architectures can be improved. Further, unlike existing gold standard evaluations, for which automatic approaches have reached or surpassed the inter-annotator agreement ceiling, state-of-the-art models perform well below this ceiling on SimLex-999. There is therefore plenty of scope for SimLex-999 to quantify future improvements to distributional semantic models, guiding the development of the next generation of representation-learning architectures.", "keyphrases": ["relatedness", "conceptual association", "inter-annotator agreement", "simlex-999", "other type"]} +{"id": "lu-etal-2008-generative", "title": "A Generative Model for Parsing Natural Language to Meaning Representations", "abstract": "In this paper, we present an algorithm for learning a generative model of natural language sentences together with their formal meaning representations with hierarchical structures. The model is applied to the task of mapping sentences to hierarchical representations of their underlying meaning. We introduce dynamic programming techniques for efficient training and decoding. In experiments, we demonstrate that the model, when coupled with a discriminative reranking technique, achieves state-of-the-art performance when tested on two publicly available corpora. The generative model degrades robustly when presented with instances that are different from those seen in training. This allows a notable improvement in recall compared to previous models.", "keyphrases": ["generative model", "meaning representation", "hybrid tree", "parsing model", "derivation"]} +{"id": "clark-curran-2007-wide", "title": "Wide-Coverage Efficient Statistical Parsing with CCG and Log-Linear Models", "abstract": "This article describes a number of log-linear parsing models for an automatically extracted lexicalized grammar. The models are full parsing models in the sense that probabilities are defined for complete parses, rather than for independent events derived by decomposing the parse tree. Discriminative training is used to estimate the models, which requires incorrect parses for each sentence in the training data as well as the correct parse. The lexicalized grammar formalism used is Combinatory Categorial Grammar (CCG), and the grammar is automatically extracted from CCGbank, a CCG version of the Penn Treebank. The combination of discriminative training and an automatically extracted grammar leads to a significant memory requirement (up to 25 GB), which is satisfied using a parallel implementation of the BFGS optimization algorithm running on a Beowulf cluster. Dynamic programming over a packed chart, in combination with the parallel implementation, allows us to solve one of the largest-scale estimation problems in the statistical parsing literature in under three hours. A key component of the parsing system, for both training and testing, is a Maximum Entropy supertagger which assigns CCG lexical categories to words in a sentence. The supertagger makes the discriminative training feasible, and also leads to a highly efficient parser. Surprisingly, given CCG's spurious ambiguity, the parsing speeds are significantly higher than those reported for comparable parsers in the literature. We also extend the existing parsing techniques for CCG by developing a new model and efficient parsing algorithm which exploits all derivations, including CCG's nonstandard derivations. This model and parsing algorithm, when combined with normal-form constraints, give state-of-the-art accuracy for the recovery of predicate-argument dependencies from CCGbank. The parser is also evaluated on DepBank and compared against the RASP parser, outperforming RASP overall and on the majority of relation types. The evaluation on DepBank raises a number of issues regarding parser evaluation. This article provides a comprehensive blueprint for building a wide-coverage CCG parser. We demonstrate that both accurate and highly efficient parsing is possible with CCG.", "keyphrases": ["ccg", "derivation", "grammar formalism", "supertagger", "c&c parser"]} +{"id": "specia-etal-2009-estimating", "title": "Estimating the Sentence-Level Quality of Machine Translation Systems", "abstract": "We investigate the problem of predicting the quality of sentences produced by machine translation systems when reference translations are not available. The problem is addressed as a regression task and a method that takes into account the contribution of different features is proposed. We experiment with this method for translations produced by various MT systems and different language pairs, annotated with quality scores both automatically and manually. Results show that our method allows obtaining good estimates and that identifying a reduced set of relevant features plays an important role. The experiments also highlight a number of outstanding features that were consistently selected as the most relevant and could be used in different ways to improve MT performance or to enhance MT evaluation.", "keyphrases": ["machine translation", "quality estimation", "run-time", "nist"]} +{"id": "yang-eisenstein-2013-log", "title": "A Log-Linear Model for Unsupervised Text Normalization", "abstract": "We present a unified unsupervised statistical model for text normalization. The relationship between standard and non-standard tokens is characterized by a log-linear model, permitting arbitrary features. The weights of these features are trained in a maximumlikelihood framework, employing a novel sequential Monte Carlo training algorithm to overcome the large label space, which would be impractical for traditional dynamic programming solutions. This model is implemented in a normalization system called UNLOL, which achieves the best known results on two normalization datasets, outperforming more complex systems. We use the output of UNLOL to automatically normalize a large corpus of social media text, revealing a set of coherent orthographic styles that underlie online language variation.", "keyphrases": ["log-linear model", "text normalization", "social medium text"]} +{"id": "rashkin-etal-2018-event2mind", "title": "Event2Mind: Commonsense Inference on Events, Intents, and Reactions", "abstract": "We investigate a new commonsense inference task: given an event described in a short free-form text (\u201cX drinks coffee in the morning\u201d), a system reasons about the likely intents (\u201cX wants to stay awake\u201d) and reactions (\u201cX feels alert\u201d) of the event's participants. To support this study, we construct a new crowdsourced corpus of 25,000 event phrases covering a diverse range of everyday events and situations. We report baseline performance on this task, demonstrating that neural encoder-decoder models can successfully compose embedding representations of previously unseen events and reason about the likely intents and reactions of the event participants. In addition, we demonstrate how commonsense inference on people's intents and reactions can help unveil the implicit gender inequality prevalent in modern movie scripts.", "keyphrases": ["commonsense inference", "intent", "free-form text", "diverse range", "event2mind"]} +{"id": "dusmanu-etal-2017-argument", "title": "Argument Mining on Twitter: Arguments, Facts and Sources", "abstract": "Social media collect and spread on the Web personal opinions, facts, fake news and all kind of information users may be interested in. Applying argument mining methods to such heterogeneous data sources is a challenging open research issue, in particular considering the peculiarities of the language used to write textual messages on social media. In addition, new issues emerge when dealing with arguments posted on such platforms, such as the need to make a distinction between personal opinions and actual facts, and to detect the source disseminating information about such facts to allow for provenance verification. In this paper, we apply supervised classification to identify arguments on Twitter, and we present two new tasks for argument mining, namely facts recognition and source identification. We study the feasibility of the approaches proposed to address these tasks on a set of tweets related to the Grexit and Brexit news topics.", "keyphrases": ["twitter", "opinion", "argument mining"]} +{"id": "belinkov-etal-2017-neural", "title": "What do Neural Machine Translation Models Learn about Morphology?", "abstract": "Neural machine translation (MT) models obtain state-of-the-art performance while maintaining a simple, end-to-end architecture. However, little is known about what these models learn about source and target languages during the training process. In this work, we analyze the representations learned by neural MT models at various levels of granularity and empirically evaluate the quality of the representations for learning morphology through extrinsic part-of-speech and morphological tagging tasks. We conduct a thorough investigation along several parameters: word-based vs. character-based representations, depth of the encoding layer, the identity of the target language, and encoder vs. decoder representations. Our data-driven, quantitative evaluation sheds light on important aspects in the neural MT system and its ability to capture word structure.", "keyphrases": ["morphology", "linguistic knowledge", "internal representation"]} +{"id": "baldridge-osborne-2004-active", "title": "Active Learning and the Total Cost of Annotation", "abstract": "Active learning (AL) promises to reduce the cost of annotating labeled datasets for trainable human language technologies. Contrary to expectations, when creating labeled training material for HPSG parse selection and later reusing it with other models, gains from AL may be negligible or even negative. This has serious implications for using AL, showing that additional cost-saving strategies may need to be adopted. We explore one such strategy: using a model during annotation to automate some of the decisions. Our best results show an 80% reduction in annotation cost compared with labeling randomly selected data with a single model.", "keyphrases": ["cost", "hpsg parse selection", "active learning"]} +{"id": "nguyen-chiang-2017-transfer", "title": "Transfer Learning across Low-Resource, Related Languages for Neural Machine Translation", "abstract": "We present a simple method to improve neural translation of a low-resource language pair using parallel data from a related, also low-resource, language pair. The method is based on the transfer method of Zoph et al., but whereas their method ignores any source vocabulary overlap, ours exploits it. First, we split words using Byte Pair Encoding (BPE) to increase vocabulary overlap. Then, we train a model on the first language pair and transfer its parameters, including its source word embeddings, to another model and continue training on the second language pair. Our experiments show that transfer learning helps word-based translation only slightly, but when used on top of a much stronger BPE baseline, it yields larger improvements of up to 4.3 BLEU.", "keyphrases": ["neural machine translation", "low-resource language", "vocabulary", "transfer learning", "parent"]} +{"id": "coppersmith-etal-2015-clpsych", "title": "CLPsych 2015 Shared Task: Depression and PTSD on Twitter", "abstract": "This paper presents a summary of the Computational Linguistics and Clinical Psychology (CLPsych) 2015 shared and unshared tasks. These tasks aimed to provide apples-to-apples comparisons of various approaches to modeling language relevant to mental health from social media. The data used for these tasks is from Twitter users who state a diagnosis of depression or post traumatic stress disorder (PTSD) and demographically-matched community controls. The unshared task was a hackathon held at Johns Hopkins University in November 2014 to explore the data, and the shared task was conducted remotely, with each participating team submitted scores for a held-back test set of users. The shared task consisted of three binary classification experiments: (1) depression versus control, (2) PTSD versus control, and (3) depression versus PTSD. Classifiers were compared primarily via their average precision, though a number of other metrics are used along with this to allow a more nuanced interpretation of the performance measures.", "keyphrases": ["depression", "ptsd", "twitter", "social medium platform"]} +{"id": "smith-etal-2010-extracting", "title": "Extracting Parallel Sentences from Comparable Corpora using Document Level Alignment", "abstract": "The quality of a statistical machine translation (SMT) system is heavily dependent upon the amount of parallel sentences used in training. In recent years, there have been several approaches developed for obtaining parallel sentences from non-parallel, or comparable data, such as news articles published within the same time period (Munteanu and Marcu, 2005), or web pages with a similar structure (Resnik and Smith, 2003). One resource not yet thoroughly explored is Wikipedia, an online encyclopedia containing linked articles in many languages. We advance the state of the art in parallel sentence extraction by modeling the document level alignment, motivated by the observation that parallel sentence pairs are often found in close proximity. We also include features which make use of the additional annotation given by Wikipedia, and features using an automatically induced lexicon model. Results for both accuracy in sentence extraction and downstream improvement in an SMT system are presented.", "keyphrases": ["comparable corpora", "document level alignment", "wikipedia", "sentence extraction", "parallel data"]} +{"id": "nimishakavi-etal-2016-relation", "title": "Relation Schema Induction using Tensor Factorization with Side Information", "abstract": "Given a set of documents from a specific domain (e.g., medical research journals), how do we automatically build a Knowledge Graph (KG) for that domain? Automatic identification of relations and their schemas, i.e., type signature of arguments of relations (e.g., undergo(Patient, Surgery)), is an important first step towards this goal. We refer to this problem as Relation Schema Induction (RSI). In this paper, we propose Schema Induction using Coupled Tensor Factorization (SICTF), a novel tensor factorization method for relation schema induction. SICTF factorizes Open Information Extraction (OpenIE) triples extracted from a domain corpus along with additional side information in a principled way to induce relation schemas. To the best of our knowledge, this is the first application of tensor factorization for the RSI problem. Through extensive experiments on multiple real-world datasets, we find that SICTF is not only more accurate than state-of-the-art baselines, but also significantly faster (about 14x faster).", "keyphrases": ["tensor factorization", "side information", "relation schema induction"]} +{"id": "zhao-ng-2014-domain", "title": "Domain Adaptation with Active Learning for Coreference Resolution", "abstract": "In the literature, most prior work on coreference resolution centered on the newswire domain. Although a coreference resolution system trained on the newswire domain performs well on newswire texts, there is a huge performance drop when it is applied to the biomedical domain. In this paper, we present an approach integrating domain adaptation with active learning to adapt coreference resolution from the newswire domain to the biomedical domain. We explore the effect of domain adaptation, active learning, and target domain instance weighting for coreference resolution. Experimental results show that domain adaptation with active learning and target domain instance weighting achieves performance on MEDLINE abstracts similar to a system trained on coreference annotation of only target domain training instances, but with a greatly reduced number of target domain training instances that we need to annotate.", "keyphrases": ["active learning", "coreference resolution", "domain adaptation"]} +{"id": "zhang-etal-2018-graph", "title": "Graph Convolution over Pruned Dependency Trees Improves Relation Extraction", "abstract": "Dependency trees help relation extraction models capture long-range relations between words. However, existing dependency-based models either neglect crucial information (e.g., negation) by pruning the dependency trees too aggressively, or are computationally inefficient because it is difficult to parallelize over different tree structures. We propose an extension of graph convolutional networks that is tailored for relation extraction, which pools information over arbitrary dependency structures efficiently in parallel. To incorporate relevant information while maximally removing irrelevant content, we further apply a novel pruning strategy to the input trees by keeping words immediately around the shortest path between the two entities among which a relation might hold. The resulting model achieves state-of-the-art performance on the large-scale TACRED dataset, outperforming existing sequence and dependency-based neural models. We also show through detailed analysis that this model has complementary strengths to sequence models, and combining them further improves the state of the art.", "keyphrases": ["relation extraction", "tacred dataset", "graph convolution", "input sentence", "many study"]} +{"id": "schulte-im-walde-etal-2013-exploring", "title": "Exploring Vector Space Models to Predict the Compositionality of German Noun-Noun Compounds", "abstract": "This paper explores two hypotheses regarding vector space models that predict the compositionality of German noun-noun compounds: (1) Against our intuition, we demonstrate that window-based rather than syntax-based distributional features perform better predictions, and that not adjectives or verbs but nouns represent the most salient part-of-speech. Our overall best result is state-of-the-art, reaching Spearman\u2019s = 0.65 with a wordspace model of nominal features from a 20word window of a 1.5 billion word web corpus. (2) While there are no significant differences in predicting compound\u2010modifier vs. compound\u2010head ratings on compositionality, we show that the modifier (rather than the head) properties predominantly influence the degree of compositionality of the compound.", "keyphrases": ["compositionality", "german noun-noun compound", "component word"]} +{"id": "ponti-etal-2020-xcopa", "title": "XCOPA: A Multilingual Dataset for Causal Commonsense Reasoning", "abstract": "In order to simulate human language capacity, natural language processing systems must be able to reason about the dynamics of everyday situations, including their possible causes and effects. Moreover, they should be able to generalise the acquired world knowledge to new languages, modulo cultural differences. Advances in machine reasoning and cross-lingual transfer depend on the availability of challenging evaluation benchmarks. Motivated by both demands, we introduce Cross-lingual Choice of Plausible Alternatives (XCOPA), a typologically diverse multilingual dataset for causal commonsense reasoning in 11 languages, which includes resource-poor languages like Eastern Apur\u00edmac Quechua and Haitian Creole. We evaluate a range of state-of-the-art models on this novel dataset, revealing that the performance of current methods based on multilingual pretraining and zero-shot fine-tuning falls short compared to translation-based transfer. Finally, we propose strategies to adapt multilingual models to out-of-sample resource-lean languages where only a small corpus or a bilingual dictionary is available, and report substantial improvements over the random baseline. The XCOPA dataset is freely available at github.com/cambridgeltl/xcopa.", "keyphrases": ["multilingual dataset", "causal commonsense reasoning", "cultural difference", "xcopa"]} +{"id": "das-etal-2016-human", "title": "Human Attention in Visual Question Answering: Do Humans and Deep Networks look at the same regions?", "abstract": "We conduct large-scale studies on `human attention' in Visual Question Answering (VQA) to understand where humans choose to look to answer questions about images. We design and test multiple game-inspired novel attention-annotation interfaces that require the subject to sharpen regions of a blurred image to answer a question. Thus, we introduce the VQA-HAT (Human ATtention) dataset. We evaluate attention maps generated by state-of-the-art VQA models against human attention both qualitatively (via visualizations) and quantitatively (via rank-order correlation). Overall, our experiments show that current attention models in VQA do not seem to be looking at the same regions as humans.", "keyphrases": ["visual question answering", "same region", "human attention"]} +{"id": "talmor-etal-2019-commonsenseqa", "title": "CommonsenseQA: A Question Answering Challenge Targeting Commonsense Knowledge", "abstract": "When answering a question, people often draw upon their rich world knowledge in addition to the particular context. Recent work has focused primarily on answering questions given some relevant document or context, and required very little general background. To investigate question answering with prior knowledge, we present CommonsenseQA: a challenging new dataset for commonsense question answering. To capture common sense beyond associations, we extract from ConceptNet (Speer et al., 2017) multiple target concepts that have the same semantic relation to a single source concept. Crowd-workers are asked to author multiple-choice questions that mention the source concept and discriminate in turn between each of the target concepts. This encourages workers to create questions with complex semantics that often require prior knowledge. We create 12,247 questions through this procedure and demonstrate the difficulty of our task with a large number of strong baselines. Our best baseline is based on BERT-large (Devlin et al., 2018) and obtains 56% accuracy, well below human performance, which is 89%.", "keyphrases": ["multiple-choice question", "commonsenseqa", "reasoning", "language model", "challenging task"]} +{"id": "vadapalli-etal-2017-ssas", "title": "SSAS: Semantic Similarity for Abstractive Summarization", "abstract": "Ideally a metric evaluating an abstract system summary should represent the extent to which the system-generated summary approximates the semantic inference conceived by the reader using a human-written reference summary. Most of the previous approaches relied upon word or syntactic sub-sequence overlap to evaluate system-generated summaries. Such metrics cannot evaluate the summary at semantic inference level. Through this work we introduce the metric of Semantic Similarity for Abstractive Summarization (SSAS), which leverages natural language inference and paraphrasing techniques to frame a novel approach to evaluate system summaries at semantic inference level. SSAS is based upon a weighted composition of quantities representing the level of agreement, contradiction, independence, paraphrasing, and optionally ROUGE score between a system-generated and a human-written summary.", "keyphrases": ["semantic similarity", "abstractive summarization", "ssas"]} +{"id": "wang-2017-liar", "title": "\u201cLiar, Liar Pants on Fire\u201d: A New Benchmark Dataset for Fake News Detection", "abstract": "Automatic fake news detection is a challenging problem in deception detection, and it has tremendous real-world political and social impacts. However, statistical approaches to combating fake news has been dramatically limited by the lack of labeled benchmark datasets. In this paper, we present LIAR: a new, publicly available dataset for fake news detection. We collected a decade-long, 12.8K manually labeled short statements in various contexts from PolitiFact.com, which provides detailed analysis report and links to source documents for each case. This dataset can be used for fact-checking research as well. Notably, this new dataset is an order of magnitude larger than previously largest public fake news datasets of similar type. Empirically, we investigate automatic fake news detection based on surface-level linguistic patterns. We have designed a novel, hybrid convolutional neural network to integrate meta-data with text. We show that this hybrid approach can improve a text-only deep learning model.", "keyphrases": ["fake news detection", "liar dataset", "politifact", "news article", "fact verification"]} +{"id": "bollegala-etal-2006-bottom", "title": "A Bottom-Up Approach to Sentence Ordering for Multi-Document Summarization", "abstract": "Ordering information is a difficult but important task for applications generating natural language texts such as multi-document summarization, question answering, and concept-to-text generation. In multi-document summarization, information is selected from a set of source documents. However, improper ordering of information in a summary can confuse the reader and deteriorate the readability of the summary. Therefore, it is vital to properly order the information in multi-document summarization. We present a bottom-up approach to arrange sentences extracted for multi-document summarization. To capture the association and order of two textual segments (e.g. sentences), we define four criteria: chronology, topical-closeness, precedence, and succession. These criteria are integrated into a criterion by a supervised learning approach. We repeatedly concatenate two textual segments into one segment based on the criterion, until we obtain the overall segment with all sentences arranged. We evaluate the sentence orderings produced by the proposed method and numerous baselines using subjective gradings as well as automatic evaluation measures. We introduce the average continuity, an automatic evaluation measure of sentence ordering in a summary, and investigate its appropriateness for this task.", "keyphrases": ["bottom-up approach", "sentence ordering", "multi-document summarization"]} +{"id": "wu-etal-2008-domain", "title": "Domain Adaptation for Statistical Machine Translation with Domain Dictionary and Monolingual Corpora", "abstract": "Statistical machine translation systems are usually trained on large amounts of bilingual text and monolingual text. In this paper, we propose a method to perform domain adaptation for statistical machine translation, where in-domain bilingual corpora do not exist. This method first uses out-of-domain corpora to train a baseline system and then uses in-domain translation dictionaries and in-domain monolingual corpora to improve the in-domain performance. We propose an algorithm to combine these different resources in a unified framework. Experimental results indicate that our method achieves absolute improvements of 8.16 and 3.36 BLEU scores on Chinese to English translation and English to French translation respectively, as compared with the baselines using only out-of-domain corpora.", "keyphrases": ["baseline system", "domain adaptation", "bilingual data"]} +{"id": "roberts-etal-2020-much", "title": "How Much Knowledge Can You Pack Into the Parameters of a Language Model?", "abstract": "It has recently been observed that neural language models trained on unstructured text can implicitly store and retrieve knowledge using natural language queries. In this short paper, we measure the practical utility of this approach by fine-tuning pre-trained models to answer questions without access to any external context or knowledge. We show that this approach scales with model size and performs competitively with open-domain systems that explicitly retrieve answers from an external knowledge source when answering questions. To facilitate reproducibility and future work, we release our code and trained models.", "keyphrases": ["much knowledge", "language model", "pre-trained model", "access", "plm"]} +{"id": "roberts-etal-2012-empatweet", "title": "EmpaTweet: Annotating and Detecting Emotions on Twitter", "abstract": "The rise of micro-blogging in recent years has resulted in significant access to emotion-laden text. Unlike emotion expressed in other textual sources (e.g., blogs, quotes in newswire, email, product reviews, or even clinical text), micro-blogs differ by (1) placing a strict limit on length, resulting radically in new forms of emotional expression, and (2) encouraging users to express their daily thoughts in real-time, often resulting in far more emotion statements than might normally occur. In this paper, we introduce a corpus collected from Twitter with annotated micro-blog posts (or \u0093tweets\u0094) annotated at the tweet-level with seven emotions: ANGER, DISGUST, FEAR, JOY, LOVE, SADNESS, and SURPRISE. We analyze how emotions are distributed in the data we annotated and compare it to the distributions in other emotion-annotated corpora. We also used the annotated corpus to train a classifier that automatically discovers the emotions in tweets. In addition, we present an analysis of the linguistic style used for expressing emotions our corpus. We hope that these observations will lead to the design of novel emotion detection techniques that account for linguistic style and psycholinguistic theories.", "keyphrases": ["emotion", "twitter", "social medium"]} +{"id": "amir-etal-2016-modelling", "title": "Modelling Context with User Embeddings for Sarcasm Detection in Social Media", "abstract": "We introduce a deep neural network for automated sarcasm detection. Recent work has emphasized the need for models to capitalize on contextual features, beyond lexical and syntactic cues present in utterances. For example, different speakers will tend to employ sarcasm regarding different subjects and, thus, sarcasm detection models ought to encode such speaker information. Current methods have achieved this by way of laborious feature engineering. By contrast, we propose to automatically learn and then exploit user embeddings, to be used in concert with lexical signals to recognize sarcasm. Our approach does not require elaborate feature engineering (and concomitant data scraping); fitting user embeddings requires only the text from their previous posts. The experimental results show that our model outperforms a state-of-the-art approach leveraging an extensive set of carefully crafted features.", "keyphrases": ["user embedding", "sarcasm detection", "historical tweet"]} +{"id": "tu-etal-2016-modeling", "title": "Modeling Coverage for Neural Machine Translation", "abstract": "Attention mechanism has enhanced state-of-the-art Neural Machine Translation (NMT) by jointly learning to align and translate. It tends to ignore past alignment information, however, which often leads to over-translation and under-translation. To address this problem, we propose coverage-based NMT in this paper. We maintain a coverage vector to keep track of the attention history. The coverage vector is fed to the attention model to help adjust future attention, which lets NMT system to consider more about untranslated source words. Experiments show that the proposed approach significantly improves both translation quality and alignment quality over standard attention-based NMT.", "keyphrases": ["coverage", "neural machine translation", "under-translation", "attention model", "untranslated source word"]} +{"id": "callison-burch-etal-2008-parametric", "title": "ParaMetric: An Automatic Evaluation Metric for Paraphrasing", "abstract": "We present ParaMetric, an automatic evaluation metric for data-driven approaches to paraphrasing. ParaMetric provides an objective measure of quality using a collection of multiple translations whose paraphrases have been manually annotated. ParaMetric calculates precision and recall scores by comparing the paraphrases discovered by automatic paraphrasing techniques against gold standard alignments of words and phrases within equivalent sentences. We report scores for several established paraphrasing techniques.", "keyphrases": ["automatic evaluation metric", "paraphrasing", "parametric"]} +{"id": "banon-etal-2020-paracrawl", "title": "ParaCrawl: Web-Scale Acquisition of Parallel Corpora", "abstract": "We report on methods to create the largest publicly available parallel corpora by crawling the web, using open source software. We empirically compare alternative methods and publish benchmark data sets for sentence alignment and sentence pair filtering. We also describe the parallel corpora released and evaluate their quality and their usefulness to create machine translation systems.", "keyphrases": ["parallel corpora", "web", "paracrawl"]} +{"id": "surdeanu-etal-2008-conll", "title": "The CoNLL 2008 Shared Task on Joint Parsing of Syntactic and Semantic Dependencies", "abstract": "The Conference on Computational Natural Language Learning is accompanied every year by a shared task whose purpose is to promote natural language processing applications and evaluate them in a standard setting. In 2008 the shared task was dedicated to the joint parsing of syntactic and semantic dependencies. This shared task not only unifies the shared tasks of the previous four years under a unique dependency-based formalism, but also extends them significantly: this year's syntactic dependencies include more information such as named-entity boundaries; the semantic dependencies model roles of both verbal and nominal predicates. In this paper, we define the shared task and describe how the data sets were created. Furthermore, we report and analyze the results and describe the approaches of the participating systems.", "keyphrases": ["conll", "joint parsing", "srl", "semantic role"]} +{"id": "xu-etal-2020-matinf", "title": "MATINF: A Jointly Labeled Large-Scale Dataset for Classification, Question Answering and Summarization", "abstract": "Recently, large-scale datasets have vastly facilitated the development in nearly all domains of Natural Language Processing. However, there is currently no cross-task dataset in NLP, which hinders the development of multi-task learning. We propose MATINF, the first jointly labeled large-scale dataset for classification, question answering and summarization. MATINF contains 1.07 million question-answer pairs with human-labeled categories and user-generated question descriptions. Based on such rich information, MATINF is applicable for three major NLP tasks, including classification, question answering, and summarization. We benchmark existing methods and a novel multi-task baseline over MATINF to inspire further research. Our comprehensive comparison and experiments over MATINF and other datasets demonstrate the merits held by MATINF.", "keyphrases": ["question answering", "summarization", "matinf"]} +{"id": "park-levy-2011-automated", "title": "Automated Whole Sentence Grammar Correction Using a Noisy Channel Model", "abstract": "Automated grammar correction techniques have seen improvement over the years, but there is still much room for increased performance. Current correction techniques mainly focus on identifying and correcting a specific type of error, such as verb form misuse or preposition misuse, which restricts the corrections to a limited scope. We introduce a novel technique, based on a noisy channel model, which can utilize the whole sentence context to determine proper corrections. We show how to use the EM algorithm to learn the parameters of the noise model, using only a data set of erroneous sentences, given the proper language model. This frees us from the burden of acquiring a large corpora of corrected sentences. We also present a cheap and efficient way to provide automated evaluation results for grammar corrections by using BLEU and METEOR, in contrast to the commonly used manual evaluations.", "keyphrases": ["correction", "noisy channel model", "language model"]} +{"id": "wang-cho-2019-bert", "title": "BERT has a Mouth, and It Must Speak: BERT as a Markov Random Field Language Model", "abstract": "We show that BERT (Devlin et al., 2018) is a Markov random field language model. This formulation gives way to a natural procedure to sample sentences from BERT. We generate from BERT and find that it can produce high quality, fluent generations. Compared to the generations of a traditional left-to-right language model, BERT generates sentences that are more diverse but of slightly worse quality.", "keyphrases": ["markov", "language model", "procedure", "bert"]} +{"id": "mihalcea-etal-2004-pagerank", "title": "PageRank on Semantic Networks, with Application to Word Sense Disambiguation", "abstract": "This paper presents a new open text word sense disambiguation method that combines the use of logical inferences with PageRank-style algorithms applied on graphs extracted from natural language documents. We evaluate the accuracy of the proposed algorithm on several sense-annotated texts, and show that it consistently outperforms the accuracy of other previously proposed knowledge-based word sense disambiguation methods. We also explore and evaluate methods that combine several open-text word sense disambiguation algorithms.", "keyphrases": ["word sense disambiguation", "pagerank", "wsd"]} +{"id": "hatori-etal-2012-incremental", "title": "Incremental Joint Approach to Word Segmentation, POS Tagging, and Dependency Parsing in Chinese", "abstract": "We propose the first joint model for word segmentation, POS tagging, and dependency parsing for Chinese. Based on an extension of the incremental joint model for POS tagging and dependency parsing (Hatori et al., 2011), we propose an efficient character-based decoding method that can combine features from state-of-the-art segmentation, POS tagging, and dependency parsing models. We also describe our method to align comparable states in the beam, and how we can combine features of different characteristics in our incremental framework. In experiments using the Chinese Treebank (CTB), we show that the accuracies of the three tasks can be improved significantly over the baseline models, particularly by 0.6% for POS tagging and 2.4% for dependency parsing. We also perform comparison experiments with the partially joint models.", "keyphrases": ["word segmentation", "pos tagging", "joint modeling"]} +{"id": "wan-etal-2010-cross", "title": "Cross-Language Document Summarization Based on Machine Translation Quality Prediction", "abstract": "Cross-language document summarization is a task of producing a summary in one language for a document set in a different language. Existing methods simply use machine translation for document translation or summary translation. However, current machine translation services are far from satisfactory, which results in that the quality of the cross-language summary is usually very poor, both in readability and content. In this paper, we propose to consider the translation quality of each sentence in the English-to-Chinese cross-language summarization process. First, the translation quality of each English sentence in the document set is predicted with the SVM regression method, and then the quality score of each sentence is incorporated into the summarization process. Finally, the English sentences with high translation quality and high informative-ness are selected and translated to form the Chinese summary. Experimental results demonstrate the effectiveness and usefulness of the proposed approach.", "keyphrases": ["machine translation", "quality score", "cross-language document summarization"]} +{"id": "shinyama-sekine-2006-preemptive", "title": "Preemptive Information Extraction using Unrestricted Relation Discovery", "abstract": "We are trying to extend the boundary of Information Extraction (IE) systems. Existing IE systems require a lot of time and human effort to tune for a new scenario. Preemptive Information Extraction is an attempt to automatically create all feasible IE systems in advance without human intervention. We propose a technique called Unrestricted Relation Discovery that discovers all possible relations from texts and presents them as tables. We present a preliminary system that obtains reasonably good results.", "keyphrases": ["unrestricted relation discovery", "preemptive information extraction", "same relation"]} +{"id": "chersoni-etal-2017-logical", "title": "Logical Metonymy in a Distributional Model of Sentence Comprehension", "abstract": "In theoretical linguistics, logical metonymy is defined as the combination of an event-subcategorizing verb with an entity-denoting direct object (e.g., The author began the book), so that the interpretation of the VP requires the retrieval of a covert event (e.g., writing). Psycholinguistic studies have revealed extra processing costs for logical metonymy, a phenomenon generally explained with the introduction of new semantic structure. In this paper, we present a general distributional model for sentence comprehension inspired by the Memory, Unification and Control model by Hagoort (2013,2016). We show that our distributional framework can account for the extra processing costs of logical metonymy and can identify the covert event in a classification task.", "keyphrases": ["distributional model", "sentence comprehension", "logical metonymy"]} +{"id": "warner-hirschberg-2012-detecting", "title": "Detecting Hate Speech on the World Wide Web", "abstract": "We present an approach to detecting hate speech in online text, where hate speech is defined as abusive speech targeting specific group characteristics, such as ethnic origin, religion, gender, or sexual orientation. While hate speech against any group may exhibit some common characteristics, we have observed that hatred against each different group is typically characterized by the use of a small set of high frequency stereotypical words; however, such words may be used in either a positive or a negative sense, making our task similar to that of words sense disambiguation. In this paper we describe our definition of hate speech, the collection and annotation of our hate speech corpus, and a mechanism for detecting some commonly used methods of evading common \"dirty word\" filters. We describe pilot classification experiments in which we classify anti-semitic speech reaching an accuracy 94%, precision of 68% and recall at 60%, for an F1 measure of. 6375.", "keyphrases": ["hate speech", "group", "anti-semitic speech", "social medium", "n-gram"]} +{"id": "fader-etal-2007-mavenrank", "title": "MavenRank: Identifying Influential Members of the US Senate Using Lexical Centrality", "abstract": "We introduce a technique for identifying the most salient participants in a discussion. Our method, MavenRank is based on lexical centrality: a random walk is performed on a graph in which each node is a participant in the discussion and an edge links two participants who use similar rhetoric. As a test, we used MavenRank to identify the most influential members of the US Senate using data from the US Congressional Record and used committee ranking to evaluate the output. Our results show that MavenRank scores are largely driven by committee status in most topics, but can capture speaker centrality in topics where speeches are used to indicate ideological position instead of influence legislation.", "keyphrases": ["influential member", "lexical centrality", "mavenrank"]} +{"id": "carpuat-2009-one", "title": "One Translation Per Discourse", "abstract": "We revisit the one sense per discourse hypothesis of Gale et al. in the context of machine translation. Since a given sense can be lexicalized differently in translation, do we observe one translation per discourse? Analysis of manual translations reveals that the hypothesis still holds when using translations in parallel text as sense annotation, thus confirming that translational differences represent useful sense distinctions. Analysis of Statistical Machine Translation (SMT) output showed that despite ignoring document structure, the one translation per discourse hypothesis is strongly supported in part because of the low variability in SMT lexical choice. More interestingly, cases where the hypothesis does not hold can reveal lexical choice errors. A preliminary study showed that enforcing the one translation per discourse constraint in SMT can potentially improve translation quality, and that SMT systems might benefit from translating sentences within their entire document context.", "keyphrases": ["discourse", "translation quality", "smt system"]} +{"id": "jiang-etal-2009-automatic", "title": "Automatic Adaptation of Annotation Standards: Chinese Word Segmentation and POS Tagging \u2013 A Case Study", "abstract": "Manually annotated corpora are valuable but scarce resources, yet for many annotation tasks such as treebanking and sequence labeling there exist multiple corpora with different and incompatible annotation guidelines or standards. This seems to be a great waste of human efforts, and it would be nice to automatically adapt one annotation standard to another. We present a simple yet effective strategy that transfers knowledge from a differently annotated corpus to the corpus with desired annotation. We test the efficacy of this method in the context of Chinese word segmentation and part-of-speech tagging, where no segmentation and POS tagging standards are widely accepted due to the lack of morphology in Chinese. Experiments show that adaptation from the much larger People's Daily corpus to the smaller but more popular Penn Chinese Treebank results in significant improvements in both segmentation and tagging accuracies (with error reductions of 30.2% and 14%, respectively), which in turn helps improve Chinese parsing accuracy.", "keyphrases": ["chinese word segmentation", "pos tagging", "annotation guideline"]} +{"id": "poon-domingos-2008-joint", "title": "Joint Unsupervised Coreference Resolution with Markov Logic", "abstract": "Machine learning approaches to coreference resolution are typically supervised, and require expensive labeled data. Some unsupervised approaches have been proposed (e.g., Haghighi and Klein (2007)), but they are less accurate. In this paper, we present the first unsupervised approach that is competitive with supervised ones. This is made possible by performing joint inference across mentions, in contrast to the pairwise classification typically used in supervised methods, and by using Markov logic as a representation language, which enables us to easily express relations like apposition and predicate nominals. On MUC and ACE datasets, our model outperforms Haghigi and Klein's one using only a fraction of the training data, and often matches or exceeds the accuracy of state-of-the-art supervised models.", "keyphrases": ["coreference resolution", "markov logic", "joint inference", "mention", "entity-level information"]} +{"id": "xu-etal-2013-filling", "title": "Filling Knowledge Base Gaps for Distant Supervision of Relation Extraction", "abstract": "Distant supervision has attracted recent interest for training information extraction systems because it does not require any human annotation but rather employs existing knowledge bases to heuristically label a training corpus. However, previous work has failed to address the problem of false negative training examples mislabeled due to the incompleteness of knowledge bases. To tackle this problem, we propose a simple yet novel framework that combines a passage retrieval model using coarse features into a state-of-the-art relation extractor using multi-instance learning with fine features. We adapt the information retrieval technique of pseudorelevance feedback to expand knowledge bases, assuming entity pairs in top-ranked passages are more likely to express a relation. Our proposed technique significantly improves the quality of distantly supervised relation extraction, boosting recall from 47.7% to 61.2% with a consistently high level of precision of around 93% in the experiments.", "keyphrases": ["knowledge base", "distant supervision", "relation extraction", "pseudo-relevance feedback"]} +{"id": "cai-yates-2013-large", "title": "Large-scale Semantic Parsing via Schema Matching and Lexicon Extension", "abstract": "Supervised training procedures for semantic parsers produce high-quality semantic parsers, but they have difficulty scaling to large databases because of the sheer number of logical constants for which they must see labeled training data. We present a technique for developing semantic parsers for large databases based on a reduction to standard supervised training algorithms, schema matching, and pattern learning. Leveraging techniques from each of these areas, we develop a semantic parser for Freebase that is capable of parsing questions with an F1 that improves by 0.42 over a purely-supervised learning algorithm.", "keyphrases": ["semantic parsing", "freebase", "knowledge basis"]} +{"id": "diab-etal-2007-arabic", "title": "Arabic diacritization in the context of statistical machine translation", "abstract": "Diacritics in Arabic are optional orthographic symbols typically representing short vowels. Most Arabic text is underspecified for diacritics. However, we do observe partial diacritization depending on genre and domain. In this paper, we investigate the impact of Arabic diacritization on statistical machine translation (SMT). We define several diacritization schemes ranging from full to partial diacritization. We explore the impact of the defined schemes on SMT in two different modes which tease apart the effect of diacritization on the alignment and its consequences on decoding. Our results show that none of the partial diacritization schemes significantly varies in performance from the no-diacritization baseline despite the increase in the number of types in the data. However, a full diacritization scheme performs significantly worse than no diacritization. Crucially, our research suggests that the SMT performance is positively correlated with the increase in the number of tokens correctly affected by a diacritization scheme and the high F-score of the automatic assignment of the particular diacritic.", "keyphrases": ["diacritization", "machine translation", "arabic"]} +{"id": "geva-etal-2019-discofuse", "title": "DiscoFuse: A Large-Scale Dataset for Discourse-Based Sentence Fusion", "abstract": "Sentence fusion is the task of joining several independent sentences into a single coherent text. Current datasets for sentence fusion are small and insufficient for training modern neural models. In this paper, we propose a method for automatically-generating fusion examples from raw text and present DiscoFuse, a large scale dataset for discourse-based sentence fusion. We author a set of rules for identifying a diverse set of discourse phenomena in raw text, and decomposing the text into two independent sentences. We apply our approach on two document collections: Wikipedia and Sports articles, yielding 60 million fusion examples annotated with discourse information required to reconstruct the fused text. We develop a sequence-to-sequence model on DiscoFuse and thoroughly analyze its strengths and weaknesses with respect to the various discourse phenomena, using both automatic as well as human evaluation. Finally, we conduct transfer learning experiments with WebSplit, a recent dataset for text simplification. We show that pretraining on DiscoFuse substantially improves performance on WebSplit when viewed as a sentence fusion task.", "keyphrases": ["large-scale dataset", "sentence fusion", "discofuse"]} +{"id": "maruf-haffari-2018-document", "title": "Document Context Neural Machine Translation with Memory Networks", "abstract": "We present a document-level neural machine translation model which takes both source and target document context into account using memory networks. We model the problem as a structured prediction problem with interdependencies among the observed and hidden variables, i.e., the source sentences and their unobserved target translations in the document. The resulting structured prediction problem is tackled with a neural translation model equipped with two memory components, one each for the source and target side, to capture the documental interdependencies. We train the model end-to-end, and propose an iterative decoding algorithm based on block coordinate descent. Experimental results of English translations from French, German, and Estonian documents show that our model is effective in exploiting both source and target document context, and statistically significantly outperforms the previous work in terms of BLEU and METEOR.", "keyphrases": ["machine translation", "memory network", "document context"]} +{"id": "qiu-etal-2011-opinion", "title": "Opinion Word Expansion and Target Extraction through Double Propagation", "abstract": "Analysis of opinions, known as opinion mining or sentiment analysis, has attracted a great deal of attention recently due to many practical applications and challenging research problems. In this article, we study two important problems, namely, opinion lexicon expansion and opinion target extraction. Opinion targets (targets, for short) are entities and their attributes on which opinions have been expressed. To perform the tasks, we found that there are several syntactic relations that link opinion words and targets. These relations can be identified using a dependency parser and then utilized to expand the initial opinion lexicon and to extract targets. This proposed method is based on bootstrapping. We call it double propagation as it propagates information between opinion words and targets. A key advantage of the proposed method is that it only needs an initial opinion lexicon to start the bootstrapping process. Thus, the method is semi-supervised due to the use of opinion word seeds. In evaluation, we compare the proposed method with several state-of-the-art methods using a standard product review test collection. The results show that our approach outperforms these existing methods significantly.", "keyphrases": ["double propagation", "syntactic relation", "opinion word", "absa", "unsupervised method"]} +{"id": "kiela-etal-2015-specializing", "title": "Specializing Word Embeddings for Similarity or Relatedness", "abstract": "We demonstrate the advantage of specializing semantic word embeddings for either similarity or relatedness. We compare two variants of retrofitting and a joint-learning approach, and find that all three yield specialized semantic spaces that capture human intuitions regarding similarity and relatedness better than unspecialized spaces. We also show that using specialized spaces in NLP tasks and applications leads to clear improvements, for document classification and synonym selection, which rely on either similarity or relatedness but not both.", "keyphrases": ["relatedness", "human intuition", "document classification"]} +{"id": "mccarthy-etal-2003-detecting", "title": "Detecting a Continuum of Compositionality in Phrasal Verbs", "abstract": "We investigate the use of an automatically acquired thesaurus for measures designed to indicate the compositionality of candidate multiword verbs, specifically English phrasal verbs identified automatically using a robust parser. We examine various measures using the nearest neighbours of the phrasal verb, and in some cases the neighbours of the simplex counterpart and show that some of these correlate significantly with human rankings of compositionality on the test set. We also show that whilst the compositionality judgements correlate with some statistics commonly used for extracting multiwords, the relationship is not as strong as that using the automatically constructed thesaurus.", "keyphrases": ["compositionality", "phrasal verb", "thesaurus", "various measure", "vpc"]} +{"id": "collins-etal-2005-clause", "title": "Clause Restructuring for Statistical Machine Translation", "abstract": "We describe a method for incorporating syntactic information in statistical machine translation systems. The first step of the method is to parse the source language string that is being translated. The second step is to apply a series of transformations to the parse tree, effectively reordering the surface string on the source language side of the translation system. The goal of this step is to recover an underlying word order that is closer to the target language word-order than the original string. The reordering approach is applied as a pre-processing step in both the training and decoding phases of a phrase-based statistical MT system. We describe experiments on translation from German to English, showing an improvement from 25.2% Bleu score for a baseline system to 26.8% Bleu score for the system with reordering, a statistically significant improvement.", "keyphrases": ["statistical machine translation", "transformation", "pre-processing step", "clause restructuring", "negation"]} +{"id": "grissom-ii-etal-2014-dont", "title": "Don't Until the Final Verb Wait: Reinforcement Learning for Simultaneous Machine Translation", "abstract": "We introduce a reinforcement learningbased approach to simultaneous machine translation\u2014producing a translation while receiving input words\u2014 between languages with drastically different word orders: from verb-final languages (e.g., German) to verb-medial languages (English). In traditional machine translation, a translator must \u201cwait\u201d for source material to appear before translation begins. We remove this bottleneck by predicting the final verb in advance. We use reinforcement learning to learn when to trust predictions about unseen, future portions of the sentence. We also introduce an evaluation metric to measure expeditiousness and quality. We show that our new translation model outperforms batch and monotone translation strategies.", "keyphrases": ["final verb", "reinforcement learning", "simultaneous machine translation"]} +{"id": "socher-etal-2011-semi", "title": "Semi-Supervised Recursive Autoencoders for Predicting Sentiment Distributions", "abstract": "We introduce a novel machine learning framework based on recursive autoencoders for sentence-level prediction of sentiment label distributions. Our method learns vector space representations for multi-word phrases. In sentiment prediction tasks these representations outperform other state-of-the-art approaches on commonly used datasets, such as movie reviews, without using any pre-defined sentiment lexica or polarity shifting rules. We also evaluate the model's ability to predict sentiment distributions on a new dataset based on confessions from the experience project. The dataset consists of personal user stories annotated with multiple labels which, when aggregated, form a multinomial distribution that captures emotional reactions. Our algorithm can more accurately predict distributions over such labels compared to several competitive baselines.", "keyphrases": ["sentiment distribution", "multi-word phrase", "recursive neural network", "network model", "deep learning"]} +{"id": "sanches-duran-etal-2015-normalizer", "title": "A Normalizer for UGC in Brazilian Portuguese", "abstract": "User-generated contents (UGC) represent an important source of information for governments, companies, political candidates and consumers. However, most of the Natural Language Processing tools and techniques are developed from and for texts of standard language, and UGC is a type of text especially full of creativity and idiosyncrasies, which represents noise for NLP purposes. This paper presents UGCNormal, a lexicon-based tool for UGC normalization. It encompasses a tokenizer, a sentence segmentation tool, a phonetic-based speller and some lexicons, which were originated from a deep analysis of a corpus of product reviews in Brazilian Portuguese. The normalizer was evaluated in two different data sets and carried out from 31% to 89% of the appropriate corrections, depending on the type of text noise. The use of UGCNormal was also validated in a task of POS tagging, which improved from 91.35% to 93.15% in accuracy and in a task of opinion classification, which improved the average of F1-score measures (F1-score positive and F1-score negative) from 0.736 to 0.758.", "keyphrases": ["normalizer", "ugc", "brazilian portuguese"]} +{"id": "burger-etal-2011-discriminating", "title": "Discriminating Gender on Twitter", "abstract": "Accurate prediction of demographic attributes from social media and other informal online content is valuable for marketing, personalization, and legal investigation. This paper describes the construction of a large, multilingual dataset labeled with gender, and investigates statistical models for determining the gender of uncharacterized Twitter users. We explore several different classifier types on this dataset. We show the degree to which classifier accuracy varies based on tweet volumes as well as when various kinds of profile metadata are included in the models. We also perform a large-scale human assessment using Amazon Mechanical Turk. Our methods significantly out-perform both baseline models and almost all humans on the same task.", "keyphrases": ["gender", "twitter", "multilingual dataset", "age", "social medium"]} +{"id": "mekala-shang-2020-contextualized", "title": "Contextualized Weak Supervision for Text Classification", "abstract": "Weakly supervised text classification based on a few user-provided seed words has recently attracted much attention from researchers. Existing methods mainly generate pseudo-labels in a context-free manner (e.g., string matching), therefore, the ambiguous, context-dependent nature of human language has been long overlooked. In this paper, we propose a novel framework ConWea, providing contextualized weak supervision for text classification. Specifically, we leverage contextualized representations of word occurrences and seed word information to automatically differentiate multiple interpretations of the same word, and thus create a contextualized corpus. This contextualized corpus is further utilized to train the classifier and expand seed words in an iterative manner. This process not only adds new contextualized, highly label-indicative keywords but also disambiguates initial seed words, making our weak supervision fully contextualized. Extensive experiments and case studies on real-world datasets demonstrate the necessity and significant advantages of using contextualized weak supervision, especially when the class labels are fine-grained.", "keyphrases": ["weak supervision", "text classification", "conwea"]} +{"id": "bick-2011-framenet", "title": "A FrameNet for Danish", "abstract": "This paper presents work on a comprehensive FrameNet for Danish (cf. www.framenet.dk), with over 12.000 frames, and an almost complete coverage of Danish verb lemmas. We discuss design principles and frame roles as well as the distinctional use of valency, syntactic function and semantic noun classes. By converting frame distinctors into Constraint Grammar rules, we were able to build a robust frame tagger for running Danish text, using DanGram parses as input. The combined context-informed coverage of the parser-frametagger was 94.3%, with an overall F-score for frame senses of 85.12.", "keyphrases": ["framenet", "danish", "coverage"]} +{"id": "lai-etal-2017-race", "title": "RACE: Large-scale ReAding Comprehension Dataset From Examinations", "abstract": "We present RACE, a new dataset for benchmark evaluation of methods in the reading comprehension task. Collected from the English exams for middle and high school Chinese students in the age range between 12 to 18, RACE consists of near 28,000 passages and near 100,000 questions generated by human experts (English instructors), and covers a variety of topics which are carefully designed for evaluating the students' ability in understanding and reasoning. In particular, the proportion of questions that requires reasoning is much larger in RACE than that in other benchmark datasets for reading comprehension, and there is a significant gap between the performance of the state-of-the-art models (43%) and the ceiling human performance (95%). We hope this new dataset can serve as a valuable resource for research and evaluation in machine comprehension. The dataset is freely available at and the code is available at .", "keyphrases": ["comprehension", "english exam", "high school", "race", "multiple-choice question"]} +{"id": "wang-etal-2017-exploiting-cross", "title": "Exploiting Cross-Sentence Context for Neural Machine Translation", "abstract": "In translation, considering the document as a whole can help to resolve ambiguities and inconsistencies. In this paper, we propose a cross-sentence context-aware approach and investigate the influence of historical contextual information on the performance of neural machine translation (NMT). First, this history is summarized in a hierarchical way. We then integrate the historical representation into NMT in two strategies: 1) a warm-start of encoder and decoder states, and 2) an auxiliary context source for updating decoder states. Experimental results on a large Chinese-English translation task show that our approach significantly improves upon a strong attention-based NMT system by up to +2.1 BLEU points.", "keyphrases": ["cross-sentence context", "neural machine translation", "contextual information", "rnn", "translation quality"]} +{"id": "yates-etal-2007-textrunner", "title": "TextRunner: Open Information Extraction on the Web", "abstract": "Traditional information extraction systems have focused on satisfying precise, narrow, pre-specified requests from small, homogeneous corpora. In contrast, the TextRunner system demonstrates a new kind of information extraction, called Open Information Extraction (OIE), in which the system makes a single, data-driven pass over the entire corpus and extracts a large set of relational tuples, without requiring any human input. (Banko et al., 2007) TextRunner is a fully-implemented, highly scalable example of OIE. TextRunner's extractions are indexed, allowing a fast query mechanism.", "keyphrases": ["open information extraction", "oie", "textrunner", "relation phrase", "entity pair"]} +{"id": "danescu-niculescu-mizil-etal-2013-computational", "title": "A computational approach to politeness with application to social factors", "abstract": "We propose a computational framework for identifying linguistic aspects of politeness. Our starting point is a new corpus of requests annotated for politeness, which we use to evaluate aspects of politeness theory and to uncover new interactions between politeness markers and context. These findings guide our construction of a classifier with domain-independent lexical and syntactic features operationalizing key components of politeness theory, such as indirection, deference, impersonalization and modality. Our classifier achieves close to human performance and is effective across domains. We use our framework to study the relationship between politeness and social power, showing that polite Wikipedia editors are more likely to achieve high status through elections, but, once elevated, they become less polite. We see a similar negative correlation between politeness and power on Stack Exchange, where users at the top of the reputation scale are less polite than those at the bottom. Finally, we apply our classifier to a preliminary analysis of politeness variation by gender and community.", "keyphrases": ["politeness", "computational framework", "linguistic cue"]} +{"id": "chen-choi-2016-character", "title": "Character Identification on Multiparty Conversation: Identifying Mentions of Characters in TV Shows", "abstract": "This paper introduces a subtask of entity linking, called character identi\ufb01cation, that maps mentions in multiparty conversation to their referent characters. Transcripts of TV shows are collected as the sources of our corpus and automatically annotated with mentions by linguistically-motivated rules. These mentions are manually linked to their referents through crowdsourcing. Our corpus comprises 543 scenes from two TV shows, and shows the inter-annotator agreement of \u03ba = 79.96. For statistical modeling, this task is reformulated as coreference resolution, and experimented with a state-of-the-art system on our corpus. Our best model gives a purity score of 69.21 on average, which is promising given the challenging nature of this task and our corpus.", "keyphrases": ["multiparty conversation", "mention", "character identification"]} +{"id": "lazaridou-etal-2013-compositional", "title": "Compositional-ly Derived Representations of Morphologically Complex Words in Distributional Semantics", "abstract": "Speakers of a language can construct an unlimited number of new words through morphological derivation. This is a major cause of data sparseness for corpus-based approaches to lexical semantics, such as distributional semantic models of word meaning. We adapt compositional methods originally developed for phrases to the task of deriving the distributional meaning of morphologically complex words from their parts. Semantic representations constructed in this way beat a strong baseline and can be of higher quality than representations directly constructed from corpus data. Our results constitute a novel evaluation of the proposed composition methods, in which the full additive model achieves the best performance, and demonstrate the usefulness of a compositional morphology component in distributional semantics.", "keyphrases": ["complex word", "distributional semantic model", "compositional method"]} +{"id": "conneau-etal-2017-deep", "title": "Very Deep Convolutional Networks for Text Classification", "abstract": "The dominant approach for many NLP tasks are recurrent neural networks, in particular LSTMs, and convolutional neural networks. However, these architectures are rather shallow in comparison to the deep convolutional networks which have pushed the state-of-the-art in computer vision. We present a new architecture (VDCNN) for text processing which operates directly at the character level and uses only small convolutions and pooling operations. We are able to show that the performance of this model increases with the depth: using up to 29 convolutional layers, we report improvements over the state-of-the-art on several public text classification tasks. To the best of our knowledge, this is the first time that very deep convolutional nets have been applied to text processing.", "keyphrases": ["text classification", "convolutional neural network", "cnn", "learning model"]} +{"id": "bannard-callison-burch-2005-paraphrasing", "title": "Paraphrasing with Bilingual Parallel Corpora", "abstract": "Previous work has used monolingual parallel corpora to extract and generate paraphrases. We show that this task can be done using bilingual parallel corpora, a much more commonly available resource. Using alignment techniques from phrase-based statistical machine translation, we show how paraphrases in one language can be identified using a phrase in another language as a pivot. We define a paraphrase probability that allows paraphrases extracted from a bilingual parallel corpus to be ranked using translation probabilities, and show how it can be refined to take contextual information into account. We evaluate our paraphrase extraction and ranking methods using a set of manual word alignments, and contrast the quality with paraphrases extracted from automatic alignments.", "keyphrases": ["bilingual parallel corpora", "paraphrase", "pivot language", "statistical machine paclic"]} +{"id": "peters-etal-2019-knowledge", "title": "Knowledge Enhanced Contextual Word Representations", "abstract": "Contextual word representations, typically trained on unstructured, unlabeled text, do not contain any explicit grounding to real world entities and are often unable to remember facts about those entities. We propose a general method to embed multiple knowledge bases (KBs) into large scale models, and thereby enhance their representations with structured, human-curated knowledge. For each KB, we first use an integrated entity linker to retrieve relevant entity embeddings, then update contextual word representations via a form of word-to-entity attention. In contrast to previous approaches, the entity linkers and self-supervised language modeling objective are jointly trained end-to-end in a multitask setting that combines a small amount of entity linking supervision with a large amount of raw text. After integrating WordNet and a subset of Wikipedia into BERT, the knowledge enhanced BERT (KnowBert) demonstrates improved perplexity, ability to recall facts as measured in a probing task and downstream performance on relationship extraction, entity typing, and word sense disambiguation. KnowBert's runtime is comparable to BERT's and it scales to large KBs.", "keyphrases": ["entity linker", "entity embedding", "language model", "knowbert", "knowledge basis"]} +{"id": "culotta-sorensen-2004-dependency", "title": "Dependency Tree Kernels for Relation Extraction", "abstract": "We extend previous work on tree kernels to estimate the similarity between the dependency trees of sentences. Using this kernel within a Support Vector Machine, we detect and classify relations between entities in the Automatic Content Extraction (ACE) corpus of news articles. We examine the utility of different features such as Wordnet hypernyms, parts of speech, and entity types, and find that the dependency tree kernel achieves a 20% F1 improvement over a \"bag-of-words\" kernel.", "keyphrases": ["kernel", "relation extraction", "low recall", "svm", "feature engineering"]} +{"id": "zaidan-eisner-2008-modeling", "title": "Modeling Annotators: A Generative Approach to Learning from Annotator Rationales", "abstract": "A human annotator can provide hints to a machine learner by highlighting contextual \"rationales\" for each of his or her annotations (Zaidan et al., 2007). How can one exploit this side information to better learn the desired parameters \u03b8? We present a generative model of how a given annotator, knowing the true \u03b8, stochastically chooses rationales. Thus, observing the rationales helps us infer the true \u03b8. We collect substring rationales for a sentiment classification task (Pang and Lee, 2004) and use them to obtain significant accuracy improvements for each annotator. Our new generative approach exploits the rationales more effectively than our previous \"masking SVM\" approach. It is also more principled, and could be adapted to help learn other kinds of probabilistic classifiers for quite different tasks.", "keyphrases": ["annotator", "generative approach", "rationale"]} +{"id": "baldwin-bond-2003-learning", "title": "Learning the Countability of English Nouns from Corpus Data", "abstract": "This paper describes a method for learning the countability preferences of English nouns from raw text corpora. The method maps the corpus-attested lexico-syntactic properties of each noun onto a feature vector, and uses a suite of memory-based classifiers to predict membership in 4 countability classes. We were able to assign countability to English nouns with a precision of 94.6%.", "keyphrases": ["countability", "english noun", "acquisition"]} +{"id": "buchholz-marsi-2006-conll", "title": "CoNLL-X Shared Task on Multilingual Dependency Parsing", "abstract": "Each year the Conference on Computational Natural Language Learning (CoNLL) features a shared task, in which participants train and test their systems on exactly the same data sets, in order to better compare systems. The tenth CoNLL (CoNLL-X) saw a shared task on Multilingual Dependency Parsing. In this paper, we describe how treebanks for 13 languages were converted into the same dependency format and how parsing performance was measured. We also give an overview of the parsing approaches that participants took and the results that they achieved. Finally, we try to draw general conclusions about multi-lingual parsing: What makes a particular language, treebank or annotation scheme easier or harder to parse and which phenomena are challenging for any dependency parser?", "keyphrases": ["multilingual dependency parsing", "data set", "conll-x shared task", "unlabeled attachment score", "advance"]} +{"id": "rahman-ng-2011-coreference", "title": "Coreference Resolution with World Knowledge", "abstract": "While world knowledge has been shown to improve learning-based coreference resolvers, the improvements were typically obtained by incorporating world knowledge into a fairly weak baseline resolver. Hence, it is not clear whether these benefits can carry over to a stronger baseline. Moreover, since there has been no attempt to apply different sources of world knowledge in combination to coreference resolution, it is not clear whether they offer complementary benefits to a resolver. We systematically compare commonly-used and under-investigated sources of world knowledge for coreference resolution by applying them to two learning-based coreference models and evaluating them on documents annotated with two different annotation schemes.", "keyphrases": ["world knowledge", "different source", "coreference resolution"]} +{"id": "butnaru-ionescu-2019-moroco", "title": "MOROCO: The Moldavian and Romanian Dialectal Corpus", "abstract": "In this work, we introduce the MOldavian and ROmanian Dialectal COrpus (MOROCO), which is freely available for download at . The corpus contains 33564 samples of text (with over 10 million tokens) collected from the news domain. The samples belong to one of the following six topics: culture, finance, politics, science, sports and tech. The data set is divided into 21719 samples for training, 5921 samples for validation and another 5924 samples for testing. For each sample, we provide corresponding dialectal and category labels. This allows us to perform empirical studies on several classification tasks such as (i) binary discrimination of Moldavian versus Romanian text samples, (ii) intra-dialect multi-class categorization by topic and (iii) cross-dialect multi-class categorization by topic. We perform experiments using a shallow approach based on string kernels, as well as a novel deep approach based on character-level convolutional neural networks containing Squeeze-and-Excitation blocks. We also present and analyze the most discriminative features of our best performing model, before and after named entity removal.", "keyphrases": ["romanian dialectal corpus", "sample", "news domain", "empirical study", "moroco"]} +{"id": "clinchant-etal-2019-use", "title": "On the use of BERT for Neural Machine Translation", "abstract": "Exploiting large pretrained models for various NMT tasks have gained a lot of visibility recently. In this work we study how BERT pretrained models could be exploited for supervised Neural Machine Translation. We compare various ways to integrate pretrained BERT model with NMT model and study the impact of the monolingual data used for BERT training on the final translation quality. We use WMT-14 English-German, IWSLT15 English-German and IWSLT14 English-Russian datasets for these experiments. In addition to standard task test set evaluation, we perform evaluation on out-of-domain test sets and noise injected test sets, in order to assess how BERT pretrained representations affect model robustness.", "keyphrases": ["bert", "neural machine translation", "translation quality"]} +{"id": "coster-kauchak-2011-simple", "title": "Simple English Wikipedia: A New Text Simplification Task", "abstract": "In this paper we examine the task of sentence simplification which aims to reduce the reading complexity of a sentence by incorporating more accessible vocabulary and sentence structure. We introduce a new data set that pairs English Wikipedia with Simple English Wikipedia and is orders of magnitude larger than any previously examined for sentence simplification. The data contains the full range of simplification operations including rewording, reordering, insertion and deletion. We provide an analysis of this corpus as well as preliminary results using a phrase-based translation approach for simplification.", "keyphrases": ["english wikipedia", "text simplification", "deletion", "sentence pair"]} +{"id": "bhattacharyya-etal-2016-statistical", "title": "Statistical Machine Translation between Related Languages", "abstract": "Language\u00adindependent Statistical Machine Translation (SMT) has proven to be very challenging. The diversity of languages makes high accuracy difficult and requires substantial parallel corpus as well as linguistic resources (parsers, morph analyzers, etc.). An interesting observation is that a large chunk of machine translation (MT) requirements involve related languages. They are either : (i) between related languages, or (ii) between a lingua franca (like English) and a set of related languages. For instance, India, the European Union and South\u00adEast Asia have such translation requirements due to government, business and socio\u00adcultural communication needs. Related languages share a lot of linguistic features and the divergences among them are at a lower level of the NLP pipeline. The objective of the tutorial is to discuss how the relatedness among languages can be leveraged to bridge this language divergence thereby achieving some/all of these goals: (i) improving translation quality, (ii) achieving better generalization, (iii) sharing linguistic resources, and (iv) reducing resource requirements. We will look at the existing research in SMT from the perspective of related languages, with the goal to build a toolbox of methods that are useful for translation between related languages. This tutorial would be relevant to Machine Translation researchers and developers, especially those interested in translation between low\u00adresource languages which have resource\u00adrich related languages. It will also be relevant for researchers interested in multilingual computation. We start with a motivation for looking at the SMT problem from the perspective of related languages. We introduce notions of language relatedness useful for MT. We explore how lexical, morphological and syntactic similarity among related languages can help MT. Lexical similarity will receive special attention since related languages share a significant vocabulary in terms of cognates, loanwords, etc. Then, we look beyond bilingual MT and present how pivot\u00adbased and multi\u00adsource methods incorporate knowledge from multiple languages, and handle language pairs lacking parallel corpora. We present some studies concerning the implications of languages relatedness to pivot\u00adbased SMT, and ways of handling language divergence in the pivot\u00adbased SMT scenario. Recent advances in deep learning have made it possible to train multi\u00adlanguage neural MT systems, which we think would be relevant to training between related languages.", "keyphrases": ["related language", "structural similarity", "long period"]} +{"id": "koller-stone-2007-sentence", "title": "Sentence generation as a planning problem", "abstract": "We translate sentence generation from TAG grammars with semantic and pragmatic information into a planning problem by encoding the contribution of each word declaratively and explicitly. This allows us to exploit the performance of off-the-shelf planners. It also opens up new perspectives on referring expression generation and the relationship between language and action.", "keyphrases": ["planning problem", "action", "sentence generation"]} +{"id": "lin-hovy-2003-automatic", "title": "Automatic Evaluation of Summaries Using N-gram Co-occurrence Statistics", "abstract": "Following the recent adoption by the machine translation community of automatic evaluation using the BLEU/NIST scoring process, we conduct an in-depth study of a similar idea for evaluating summaries. The results show that automatic evaluation using unigram co-occurrences between summary pairs correlates surprising well with human evaluations, based on various statistical metrics; while direct application of the BLEU evaluation procedure does not always give good results.", "keyphrases": ["n-gram co-occurrence statistic", "automatic evaluation", "rouge", "summarization", "document understanding conference"]} +{"id": "farkas-etal-2010-conll", "title": "The CoNLL-2010 Shared Task: Learning to Detect Hedges and their Scope in Natural Language Text", "abstract": "The CoNLL-2010 Shared Task was dedicated to the detection of uncertainty cues and their linguistic scope in natural language texts. The motivation behind this task was that distinguishing factual and uncertain information in texts is of essential importance in information extraction. This paper provides a general overview of the shared task, including the annotation protocols of the training and evaluation datasets, the exact task definitions, the evaluation metrics employed and the overall results. The paper concludes with an analysis of the prominent approaches and an overview of the systems submitted to the shared task.", "keyphrases": ["scope", "natural language text", "conll\u20192010"]} +{"id": "kummerfeld-2019-slate", "title": "SLATE: A Super-Lightweight Annotation Tool for Experts", "abstract": "Many annotation tools have been developed, covering a wide variety of tasks and providing features like user management, pre-processing, and automatic labeling. However, all of these tools use Graphical User Interfaces, and often require substantial effort to install and configure. This paper presents a new annotation tool that is designed to fill the niche of a lightweight interface for users with a terminal-based workflow. SLATE supports annotation at different scales (spans of characters, tokens, and lines, or a document) and of different types (free text, labels, and links), with easily customisable keybindings, and unicode support. In a user study comparing with other tools it was consistently the easiest to install and use. SLATE fills a need not met by existing systems, and has already been used to annotate two corpora, one of which involved over 250 hours of annotation effort.", "keyphrases": ["annotation tool", "workflow", "support", "slate"]} +{"id": "lukin-walker-2013-really", "title": "Really? Well. Apparently Bootstrapping Improves the Performance of Sarcasm and Nastiness Classifiers for Online Dialogue", "abstract": "More and more of the information on the web is dialogic, from Facebook newsfeeds, to forum conversations, to comment threads on news articles. In contrast to traditional, monologic Natural Language Processing resources such as news, highly social dialogue is frequent in social media, making it a challenging context for NLP. This paper tests a bootstrapping method, originally proposed in a monologic domain, to train classifiers to identify two different types of subjective language in dialogue: sarcasm and nastiness. We explore two methods of developing linguistic indicators to be used in a first level classifier aimed at maximizing precision at the expense of recall. The best performing classifier for the first phase achieves 54% precision and 38% recall for sarcastic utterances. We then use general syntactic patterns from previous work to create more general sarcasm indicators, improving precision to 62% and recall to 52%. To further test the generality of the method, we then apply it to bootstrapping a classifier for nastiness dialogic acts. Our first phase, using crowdsourced nasty indicators, achieves 58% precision and 49% recall, which increases to 75% precision and 62% recall when we bootstrap over the first level with generalized syntactic patterns.", "keyphrases": ["sarcasm", "nastiness classifier", "online dialogue", "syntactic pattern"]} +{"id": "kocmi-bojar-2018-trivial", "title": "Trivial Transfer Learning for Low-Resource Neural Machine Translation", "abstract": "Transfer learning has been proven as an effective technique for neural machine translation under low-resource conditions. Existing methods require a common target language, language relatedness, or specific training tricks and regimes. We present a simple transfer learning method, where we first train a \u201cparent\u201d model for a high-resource language pair and then continue the training on a low-resource pair only by replacing the training corpus. This \u201cchild\u201d model performs significantly better than the baseline trained for low-resource pair only. We are the first to show this for targeting different languages, and we observe the improvements even for unrelated languages with different alphabets.", "keyphrases": ["transfer learning", "neural machine translation", "parent", "high-resource language pair", "vocabulary"]} +{"id": "carpuat-wu-2007-improving", "title": "Improving Statistical Machine Translation Using Word Sense Disambiguation", "abstract": "We show for the first time that incorporating the predictions of a word sense disambiguation system within a typical phrase-based statistical machine translation (SMT) model consistently improves translation quality across all three different IWSLT ChineseEnglish test sets, as well as producing statistically significant improvements on the larger NIST Chinese-English MT task\u2014 and moreover never hurts performance on any test set, according not only to BLEU but to all eight most commonly used automatic evaluation metrics. Recent work has challenged the assumption that word sense disambiguation (WSD) systems are useful for SMT. Yet SMT translation quality still obviously suffers from inaccurate lexical choice. In this paper, we address this problem by investigating a new strategy for integrating WSD into an SMT system, that performs fully phrasal multi-word disambiguation. Instead of directly incorporating a Senseval-style WSD system, we redefine the WSD task to match the exact same phrasal translation disambiguation task faced by phrase-based SMT systems. Our results provide the first known empirical evidence that lexical semantics are indeed useful for SMT, despite claims to the contrary.", "keyphrases": ["word sense disambiguation", "translation quality", "phrase-based smt system", "phrase pair", "context-dependent probability distribution"]} +{"id": "voita-etal-2019-good", "title": "When a Good Translation is Wrong in Context: Context-Aware Machine Translation Improves on Deixis, Ellipsis, and Lexical Cohesion", "abstract": "Though machine translation errors caused by the lack of context beyond one sentence have long been acknowledged, the development of context-aware NMT systems is hampered by several problems. Firstly, standard metrics are not sensitive to improvements in consistency in document-level translations. Secondly, previous work on context-aware NMT assumed that the sentence-aligned parallel data consisted of complete documents while in most practical scenarios such document-level data constitutes only a fraction of the available parallel data. To address the first issue, we perform a human study on an English-Russian subtitles dataset and identify deixis, ellipsis and lexical cohesion as three main sources of inconsistency. We then create test sets targeting these phenomena. To address the second shortcoming, we consider a set-up in which a much larger amount of sentence-level data is available compared to that aligned at the document level. We introduce a model that is suitable for this scenario and demonstrate major gains over a context-agnostic baseline on our new benchmarks without sacrificing performance as measured with BLEU.", "keyphrases": ["machine translation", "deixis", "lexical cohesion", "context-aware nmt", "document level"]} +{"id": "lowe-etal-2015-ubuntu", "title": "The Ubuntu Dialogue Corpus: A Large Dataset for Research in Unstructured Multi-Turn Dialogue Systems", "abstract": "This paper introduces the Ubuntu Dialogue Corpus, a dataset containing almost 1 million multi-turn dialogues, with a total of over 7 million utterances and 100 million words. This provides a unique resource for research into building dialogue managers based on neural language models that can make use of large amounts of unlabeled data. The dataset has both the multi-turn property of conversations in the Dialog State Tracking Challenge datasets, and the unstructured nature of interactions from microblog services such as Twitter. We also describe two neural learning architectures suitable for analyzing this dataset, and provide benchmark performance on the task of selecting the best next response.", "keyphrases": ["ubuntu dialogue corpus", "large dataset", "dialog", "support", "multi-turn response selection"]} +{"id": "gehrmann-etal-2018-bottom", "title": "Bottom-Up Abstractive Summarization", "abstract": "Neural summarization produces outputs that are fluent and readable, but which can be poor at content selection, for instance often copying full sentences from the source document. This work explores the use of data-efficient content selectors to over-determine phrases in a source document that should be part of the summary. We use this selector as a bottom-up attention step to constrain the model to likely phrases. We show that this approach improves the ability to compress text, while still generating fluent summaries. This two-step process is both simpler and higher performing than other end-to-end content selection models, leading to significant improvements on ROUGE for both the CNN-DM and NYT corpus. Furthermore, the content selector can be trained with as little as 1,000 sentences making it easy to transfer a trained summarizer to a new domain.", "keyphrases": ["summarization", "source document", "content selector", "over-determine phrase", "copy mechanism"]} +{"id": "wu-dredze-2019-beto", "title": "Beto, Bentz, Becas: The Surprising Cross-Lingual Effectiveness of BERT", "abstract": "Pretrained contextual representation models (Peters et al., 2018; Devlin et al., 2018) have pushed forward the state-of-the-art on many NLP tasks. A new release of BERT (Devlin, 2018) includes a model simultaneously pretrained on 104 languages with impressive performance for zero-shot cross-lingual transfer on a natural language inference task. This paper explores the broader cross-lingual potential of mBERT (multilingual) as a zero shot language transfer model on 5 NLP tasks covering a total of 39 languages from various language families: NLI, document classification, NER, POS tagging, and dependency parsing. We compare mBERT with the best-published methods for zero-shot cross-lingual transfer and find mBERT competitive on each task. Additionally, we investigate the most effective strategy for utilizing mBERT in this manner, determine to what extent mBERT generalizes away from language specific features, and measure factors that influence cross-lingual transfer.", "keyphrases": ["bert", "cross-lingual transfer", "language model", "various nlp task", "wikipedia data"]} +{"id": "bouamor-etal-2014-multidialectal", "title": "A Multidialectal Parallel Corpus of Arabic", "abstract": "The daily spoken variety of Arabic is often termed the colloquial or dialect form of Arabic. There are many Arabic dialects across the Arab World and within other Arabic speaking communities. These dialects vary widely from region to region and to a lesser extent from city to city in each region. The dialects are not standardized, they are not taught, and they do not have official status. However they are the primary vehicles of communication (face-to-face and recently, online) and have a large presence in the arts as well. In this paper, we present the first multidialectal Arabic parallel corpus, a collection of 2,000 sentences in Standard Arabic, Egyptian, Tunisian, Jordanian, Palestinian and Syrian Arabic, in addition to English. Such parallel data does not exist naturally, which makes this corpus a very valuable resource that has many potential applications such as Arabic dialect identification and machine translation.", "keyphrases": ["multidialectal parallel corpus", "arabic", "multi-dialectal data set", "topic bias"]} +{"id": "dinu-etal-2019-training", "title": "Training Neural Machine Translation to Apply Terminology Constraints", "abstract": "This paper proposes a novel method to inject custom terminology into neural machine translation at run time. Previous works have mainly proposed modifications to the decoding algorithm in order to constrain the output to include run-time-provided target terms. While being effective, these constrained decoding methods add, however, significant computational overhead to the inference step, and, as we show in this paper, can be brittle when tested in realistic conditions. In this paper we approach the problem by training a neural MT system to learn how to use custom terminology when provided with the input. Comparative experiments show that our method is not only more effective than a state-of-the-art implementation of constrained decoding, but is also as fast as constraint-free decoding.", "keyphrases": ["neural machine translation", "terminology constraint", "decoding"]} +{"id": "poria-etal-2017-context", "title": "Context-Dependent Sentiment Analysis in User-Generated Videos", "abstract": "Multimodal sentiment analysis is a developing area of research, which involves the identification of sentiments in videos. Current research considers utterances as independent entities, i.e., ignores the interdependencies and relations among the utterances of a video. In this paper, we propose a LSTM-based model that enables utterances to capture contextual information from their surroundings in the same video, thus aiding the classification process. Our method shows 5-10% performance improvement over the state of the art and high robustness to generalizability.", "keyphrases": ["sentiment analysis", "video", "emotion recognition"]} +{"id": "wilson-etal-2005-opinionfinder", "title": "OpinionFinder: A System for Subjectivity Analysis", "abstract": "OpinionFinder is a system that performs subjectivity analysis, automatically identifying when opinions, sentiments, speculations, and other private states are present in text. Specifically, OpinionFinder aims to identify subjective sentences and to mark various aspects of the subjectivity in these sentences, including the source (holder) of the subjectivity and words that are included in phrases expressing positive or negative sentiments.", "keyphrases": ["subjectivity analysis", "speculation", "opinionfinder"]} +{"id": "banerjee-lavie-2005-meteor", "title": "METEOR: An Automatic Metric for MT Evaluation with Improved Correlation with Human Judgments", "abstract": "We describe METEOR, an automatic metric for machine translation evaluation that is based on a generalized concept of unigram matching between the machineproduced translation and human-produced reference translations. Unigrams can be matched based on their surface forms, stemmed forms, and meanings; furthermore, METEOR can be easily extended to include more advanced matching strategies. Once all generalized unigram matches between the two strings have been found, METEOR computes a score for this matching using a combination of unigram-precision, unigram-recall, and a measure of fragmentation that is designed to directly capture how well-ordered the matched words in the machine translation are in relation to the reference. We evaluate METEOR by measuring the correlation between the metric scores and human judgments of translation quality. We compute the Pearson R correlation value between its scores and human quality assessments of the LDC TIDES 2003 Arabic-to-English and Chinese-to-English datasets. We perform segment-bysegment correlation, and show that METEOR gets an R correlation value of 0.347 on the Arabic data and 0.331 on the Chinese data. This is shown to be an improvement on using simply unigramprecision, unigram-recall and their harmonic F1 combination. We also perform experiments to show the relative contributions of the various mapping modules.", "keyphrases": ["automatic metric", "matching", "meteor", "synonyms", "human judgement"]} +{"id": "higgins-etal-2004-evaluating", "title": "Evaluating Multiple Aspects of Coherence in Student Essays", "abstract": "Criterion Online Essay Evaluation Service includes a capability that labels sentences in student writing with essay-based discourse elements (e.g., thesis statements). We describe a new system that enhances Criterion\u2019s capability, by evaluating multiple aspects of coherence in essays. This system identifies features of sentences based on semantic similarity measures and discourse structure. A support vector machine uses these features to capture breakdowns in coherence due to relatedness to the essay question and relatedness between discourse elements. Intra-sentential quality is evaluated with rule-based heuristics. Results indicate that the system yields higher performance than a baseline on all three aspects.", "keyphrases": ["coherence", "student essay", "discourse element"]} +{"id": "gao-etal-2013-modeling", "title": "Modeling User Leniency and Product Popularity for Sentiment Classification", "abstract": "Classical approaches to sentiment classification exploit only textual features in a given review and are not aware of the personality of the user or the public sentiment toward the target product. In this paper, we propose a model that can accurately estimate the sentiment polarity by referring to the user leniency and product popularity computed during testing. For decoding with this model, we adopt an approximate strategy called \u201ctwo-stage decoding.\u201d Preliminary experimental results on two realworld datasets show that our method significantly improves classification accuracy over existing state-of-the-art methods.", "keyphrases": ["user leniency", "product popularity", "sentiment classification", "user-specific feature"]} +{"id": "stab-gurevych-2014-identifying", "title": "Identifying Argumentative Discourse Structures in Persuasive Essays", "abstract": "In this paper, we present a novel approach for identifying argumentative discourse structures in persuasive essays. The structure of argumentation consists of several components (i.e. claims and premises) that are connected with argumentative relations. We consider this task in two consecutive steps. First, we identify the components of arguments using multiclass classification. Second, we classify a pair of argument components as either support or non-support for identifying the structure of argumentative discourse. For both tasks, we evaluate several classifiers and propose novel feature sets including structural, lexical, syntactic and contextual features. In our experiments, we obtain a macro F1-score of 0.726 for identifying argument components and 0.722 for argumentative relations.", "keyphrases": ["discourse structure", "persuasive essay", "gold argument component"]} +{"id": "zhou-jurgens-2020-condolence", "title": "Condolence and Empathy in Online Communities", "abstract": "Offering condolence is a natural reaction to hearing someone's distress. Individuals frequently express distress in social media, where some communities can provide support. However, not all condolence is equal\u2014trite responses offer little actual support despite their good intentions. Here, we develop computational tools to create a massive dataset of 11.4M expressions of distress and 2.8M corresponding offerings of condolence in order to examine the dynamics of condolence online. Our study reveals widespread disparity in what types of distress receive supportive condolence rather than just engagement. Building on studies from social psychology, we analyze the language of condolence and develop a new dataset for quantifying the empathy in a condolence using appraisal theory. Finally, we demonstrate that the features of condolence individuals find most helpful online differ substantially in their features from those seen in interpersonal settings.", "keyphrases": ["empathy", "condolence", "online support group"]} +{"id": "huang-etal-2019-glossbert", "title": "GlossBERT: BERT for Word Sense Disambiguation with Gloss Knowledge", "abstract": "Word Sense Disambiguation (WSD) aims to find the exact sense of an ambiguous word in a particular context. Traditional supervised methods rarely take into consideration the lexical resources like WordNet, which are widely utilized in knowledge-based methods. Recent studies have shown the effectiveness of incorporating gloss (sense definition) into neural networks for WSD. However, compared with traditional word expert supervised methods, they have not achieved much improvement. In this paper, we focus on how to better leverage gloss knowledge in a supervised neural WSD system. We construct context-gloss pairs and propose three BERT based models for WSD. We fine-tune the pre-trained BERT model and achieve new state-of-the-art results on WSD task.", "keyphrases": ["bert", "word sense disambiguation", "gloss knowledge", "wsd system", "classification task"]} +{"id": "axelrod-etal-2011-domain", "title": "Domain Adaptation via Pseudo In-Domain Data Selection", "abstract": "We explore efficient domain adaptation for the task of statistical machine translation based on extracting sentences from a large general-domain parallel corpus that are most relevant to the target domain. These sentences may be selected with simple cross-entropy based methods, of which we present three. As these sentences are not themselves identical to the in-domain data, we call them pseudo in-domain subcorpora. These subcorpora -- 1% the size of the original -- can then used to train small domain-adapted Statistical Machine Translation (SMT) systems which outperform systems trained on the entire corpus. Performance is further improved when we use these domain-adapted models in combination with a true in-domain model. The results show that more training data is not always better, and that best results are attained via proper domain-relevant data selection, as well as combining in- and general-domain systems during decoding.", "keyphrases": ["domain adaptation", "cross-entropy difference", "rich literature", "side"]} +{"id": "bunescu-pasca-2006-using", "title": "Using Encyclopedic Knowledge for Named entity Disambiguation", "abstract": "We present a new method for detecting and disambiguating named entities in open domain text. A disambiguation SVM kernel is trained to exploit the high coverage and rich structure of the knowledge encoded in an online encyclopedia. The resulting model significantly outperforms a less informed baseline.", "keyphrases": ["entity disambiguation", "wikipedia", "knowledge base", "textual context"]} +{"id": "he-etal-2011-automatically", "title": "Automatically Extracting Polarity-Bearing Topics for Cross-Domain Sentiment Classification", "abstract": "Joint sentiment-topic (JST) model was previously proposed to detect sentiment and topic simultaneously from text. The only supervision required by JST model learning is domain-independent polarity word priors. In this paper, we modify the JST model by incorporating word polarity priors through modifying the topic-word Dirichlet priors. We study the polarity-bearing topics extracted by JST and show that by augmenting the original feature space with polarity-bearing topics, the in-domain supervised classifiers learned from augmented feature representation achieve the state-of-the-art performance of 95% on the movie review data and an average of 90% on the multi-domain sentiment dataset. Furthermore, using feature augmentation and selection according to the information gain criteria for cross-domain sentiment classification, our proposed approach performs either better or comparably compared to previous approaches. Nevertheless, our approach is much simpler and does not require difficult parameter tuning.", "keyphrases": ["polarity-bearing topic", "cross-domain sentiment classification", "topic-word dirichlet prior"]} +{"id": "pradhan-etal-2005-semantic-role", "title": "Semantic Role Labeling Using Different Syntactic Views", "abstract": "Semantic role labeling is the process of annotating the predicate-argument structure in text with semantic labels. In this paper we present a state-of-the-art baseline semantic role labeling system based on Support Vector Machine classifiers. We show improvements on this system by: i) adding new features including features extracted from dependency parses, ii) performing feature selection and calibration and iii) combining parses obtained from semantic parsers trained using different syntactic views. Error analysis of the baseline system showed that approximately half of the argument identification errors resulted from parse errors in which there was no syntactic constituent that aligned with the correct argument. In order to address this problem, we combined semantic parses from a Minipar syntactic parse and from a chunked syntactic representation with our original baseline system which was based on Charniak parses. All of the reported techniques resulted in performance improvements.", "keyphrases": ["view", "semantic role labeling", "rule-based dependency parser"]} +{"id": "hovy-etal-2013-learning", "title": "Learning Whom to Trust with MACE", "abstract": "Non-expert annotation services like Amazon\u2019s Mechanical Turk (AMT) are cheap and fast ways to evaluate systems and provide categorical annotations for training data. Unfortunately, some annotators choose bad labels in order to maximize their pay. Manual identification is tedious, so we experiment with an item-response model. It learns in an unsupervised fashion to a) identify which annotators are trustworthy and b) predict the correct underlying labels. We match performance of more complex state-of-the-art systems and perform well even under adversarial conditions. We show considerable improvements over standard baselines, both for predicted label accuracy and trustworthiness estimates. The latter can be further improved by introducing a prior on model parameters and using Variational Bayes inference. Additionally, we can achieve even higher accuracy by focusing on the instances our model is most confident in (trading in some recall), and by incorporating annotated control instances. Our system, MACE (Multi-Annotator Competence Estimation), is available for download 1 .", "keyphrases": ["mace", "annotator", "competence estimation", "spammer", "majority voting"]} +{"id": "chung-gildea-2010-effects", "title": "Effects of Empty Categories on Machine Translation", "abstract": "We examine effects that empty categories have on machine translation. Empty categories are elements in parse trees that lack corresponding overt surface forms (words) such as dropped pronouns and markers for control constructions. We start by training machine translation systems with manually inserted empty elements. We find that inclusion of some empty categories in training data improves the translation result. We expand the experiment by automatically inserting these elements into a larger data set using various methods and training on the modified corpus. We show that even when automatic prediction of null elements is not highly accurate, it nevertheless improves the end translation result.", "keyphrases": ["empty category", "machine translation", "pronoun", "pro"]} +{"id": "hamilton-etal-2016-cultural", "title": "Cultural Shift or Linguistic Drift? Comparing Two Computational Measures of Semantic Change", "abstract": "Words shift in meaning for many reasons, including cultural factors like new technologies and regular linguistic processes like subjectification. Understanding the evolution of language and culture requires disentangling these underlying causes. Here we show how two different distributional measures can be used to detect two different types of semantic change. The first measure, which has been used in many previous works, analyzes global shifts in a word's distributional semantics; it is sensitive to changes due to regular processes of linguistic drift, such as the semantic generalization of promise (\"I promise.\" \"It promised to be exciting.\"). The second measure, which we develop here, focuses on local changes to a word's nearest semantic neighbors; it is more sensitive to cultural shifts, such as the change in the meaning of cell (\"prison cell\" \"cell phone\"). Comparing measurements made by these two methods allows researchers to determine whether changes are more cultural or linguistic in nature, a distinction that is essential for work in the digital humanities and historical linguistics.", "keyphrases": ["drift", "semantic change", "cultural shift", "target word"]} +{"id": "huang-etal-2012-improving", "title": "Improving Word Representations via Global Context and Multiple Word Prototypes", "abstract": "Unsupervised word representations are very useful in NLP tasks both as inputs to learning algorithms and as extra word features in NLP systems. However, most of these models are built with only local context and one representation per word. This is problematic because words are often polysemous and global context can also provide useful information for learning word meanings. We present a new neural network architecture which 1) learns word embeddings that better capture the semantics of words by incorporating both local and global document context, and 2) accounts for homonymy and polysemy by learning multiple embeddings per word. We introduce a new dataset with human judgments on pairs of words in sentential context, and evaluate our model on it, showing that our model outperforms competitive baselines and other neural language models.", "keyphrases": ["global context", "polysemy", "neural language model", "multi-prototype embedding", "vector representation"]} +{"id": "gao-etal-2020-machine", "title": "From Machine Reading Comprehension to Dialogue State Tracking: Bridging the Gap", "abstract": "Dialogue state tracking (DST) is at the heart of task-oriented dialogue systems. However, the scarcity of labeled data is an obstacle to building accurate and robust state tracking systems that work across a variety of domains. Existing approaches generally require some dialogue data with state information and their ability to generalize to unknown domains is limited. In this paper, we propose using machine reading comprehension (RC) in state tracking from two perspectives: model architectures and datasets. We divide the slot types in dialogue state into categorical or extractive to borrow the advantages from both multiple-choice and span-based reading comprehension models. Our method achieves near the current state-of-the-art in joint goal accuracy on MultiWOZ 2.1 given full training data. More importantly, by leveraging machine reading comprehension datasets, our method outperforms the existing approaches by many a large margin in few-shot scenarios when the availability of in-domain data is limited. Lastly, even without any state tracking data, i.e., zero-shot scenario, our proposed approach achieves greater than 90% average slot accuracy in 12 out of 30 slots in MultiWOZ 2.1.", "keyphrases": ["machine reading comprehension", "dialogue state tracking", "cross-task transfer"]} +{"id": "kazemzadeh-etal-2014-referitgame", "title": "ReferItGame: Referring to Objects in Photographs of Natural Scenes", "abstract": "In this paper we introduce a new game to crowd-source natural language referring expressions. By designing a two player game, we can both collect and verify referring expressions directly within the game. To date, the game has produced a dataset containing 130,525 expressions, referring to 96,654 distinct objects, in 19,894 photographs of natural scenes. This dataset is larger and more varied than previous REG datasets and allows us to study referring expressions in real-world scenes. We provide an in depth analysis of the resulting dataset. Based on our findings, we design a new optimization based model for generating referring expressions and perform experimental evaluations on 3 test sets.", "keyphrases": ["object", "game", "reg", "real-world image", "reasoning"]} +{"id": "popovic-2015-chrf", "title": "chrF: character n-gram F-score for automatic MT evaluation", "abstract": "We propose the use of character n-gram F-score for automatic evaluation of machine translation output. Character ngrams have already been used as a part of more complex metrics, but their individual potential has not been investigated yet. We report system-level correlations with human rankings for 6-gram F1-score (CHRF) on the WMT12, WMT13 and WMT14 data as well as segment-level correlation for 6gram F1 (CHRF) and F3-scores (CHRF3) on WMT14 data for all available target languages. The results are very promising, especially for the CHRF3 score \u2013 for translation from English, this variant showed the highest segment-level correlations outperforming even the best metrics on the WMT14 shared evaluation task.", "keyphrases": ["character n-gram f-score", "variant", "chrf", "machine translation evaluation"]} +{"id": "thompson-koehn-2019-vecalign", "title": "Vecalign: Improved Sentence Alignment in Linear Time and Space", "abstract": "We introduce Vecalign, a novel bilingual sentence alignment method which is linear in time and space with respect to the number of sentences being aligned and which requires only bilingual sentence embeddings. On a standard German\u2013French test set, Vecalign outperforms the previous state-of-the-art method (which has quadratic time complexity and requires a machine translation system) by 5 F1 points. It substantially outperforms the popular Hunalign toolkit at recovering Bible verse alignments in medium- to low-resource language pairs, and it improves downstream MT quality by 1.7 and 1.6 BLEU in Sinhala-English and Nepali-English, respectively, compared to the Hunalign-based Paracrawl pipeline.", "keyphrases": ["sentence alignment", "linear time", "vecalign"]} +{"id": "nema-etal-2017-diversity", "title": "Diversity driven attention model for query-based abstractive summarization", "abstract": "Abstractive summarization aims to generate a shorter version of the document covering all the salient points in a compact and coherent fashion. On the other hand, query-based summarization highlights those points that are relevant in the context of a given query. The encode-attend-decode paradigm has achieved notable success in machine translation, extractive summarization, dialog systems, etc. But it suffers from the drawback of generation of repeated phrases. In this work we propose a model for the query-based summarization task based on the encode-attend-decode paradigm with two key additions (i) a query attention model (in addition to document attention model) which learns to focus on different portions of the query at different time steps (instead of using a static representation for the query) and (ii) a new diversity based attention model which aims to alleviate the problem of repeating phrases in the summary. In order to enable the testing of this model we introduce a new query-based summarization dataset building on debatepedia. Our experiments show that with these two additions the proposed model clearly outperforms vanilla encode-attend-decode models with a gain of 28% (absolute) in ROUGE-L scores.", "keyphrases": ["attention model", "summarization", "query"]} +{"id": "ding-palmer-2005-machine", "title": "Machine Translation Using Probabilistic Synchronous Dependency Insertion Grammars", "abstract": "Syntax-based statistical machine translation (MT) aims at applying statistical models to structured data. In this paper, we present a syntax-based statistical machine translation system based on a probabilistic synchronous dependency insertion grammar. Synchronous dependency insertion grammars are a version of synchronous grammars defined on dependency trees. We first introduce our approach to inducing such a grammar from parallel corpora. Second, we describe the graphical model for the machine translation task, which can also be viewed as a stochastic tree-to-tree transducer. We introduce a polynomial time decoding algorithm for the model. We evaluate the outputs of our MT system using the NIST and Bleu automatic MT evaluation software. The result shows that our system outperforms the baseline system based on the IBM models in both translation speed and quality.", "keyphrases": ["synchronous grammar", "machine translation", "sdig"]} +{"id": "sha-pereira-2003-shallow", "title": "Shallow Parsing with Conditional Random Fields", "abstract": "Conditional random fields for sequence labeling offer advantages over both generative models like HMMs and classifiers applied at each sequence position. Among sequence labeling tasks in language processing, shallow parsing has received much attention, with the development of standard evaluation datasets and extensive comparison among methods. We show here how to train a conditional random field to achieve performance as good as any reported base noun-phrase chunking method on the CoNLL task, and better than any reported single model. Improved training methods based on modern optimization algorithms were critical in achieving these results. We present extensive comparisons between models and training methods that confirm and strengthen previous results on shallow parsing and training methods for maximum-entropy models.", "keyphrases": ["conditional random fields", "training method", "shallow parsing", "crf"]} +{"id": "indurthi-etal-2019-fermi", "title": "FERMI at SemEval-2019 Task 5: Using Sentence embeddings to Identify Hate Speech Against Immigrants and Women in Twitter", "abstract": "This paper describes our system (Fermi) for Task 5 of SemEval-2019: HatEval: Multilingual Detection of Hate Speech Against Immigrants and Women on Twitter. We participated in the subtask A for English and ranked first in the evaluation on the test set. We evaluate the quality of multiple sentence embeddings and explore multiple training models to evaluate the performance of simple yet effective embedding-ML combination algorithms. Our team - Fermi's model achieved an accuracy of 65.00% for English language in task A. Our models, which use pretrained Universal Encoder sentence embeddings for transforming the input and SVM (with RBF kernel) for classification, scored first position (among 68) in the leaderboard on the test set for Subtask A in English language. In this paper we provide a detailed description of the approach, as well as the results obtained in the task.", "keyphrases": ["sentence embedding", "hate speech", "twitter"]} +{"id": "lester-etal-2021-power", "title": "The Power of Scale for Parameter-Efficient Prompt Tuning", "abstract": "In this work, we explore \u201cprompt tuning,\u201d a simple yet effective mechanism for learning \u201csoft prompts\u201d to condition frozen language models to perform specific downstream tasks. Unlike the discrete text prompts used by GPT-3, soft prompts are learned through backpropagation and can be tuned to incorporate signals from any number of labeled examples. Our end-to-end learned approach outperforms GPT-3's few-shot learning by a large margin. More remarkably, through ablations on model size using T5, we show that prompt tuning becomes more competitive with scale: as models exceed billions of parameters, our method \u201ccloses the gap\u201d and matches the strong performance of model tuning (where all model weights are tuned). This finding is especially relevant because large models are costly to share and serve and the ability to reuse one frozen model for multiple downstream tasks can ease this burden. Our method can be seen as a simplification of the recently proposed \u201cprefix tuning\u201d of Li and Liang (2021) and we provide a comparison to this and other similar approaches. Finally, we show that conditioning a frozen model with soft prompts confers benefits in robustness to domain transfer and enables efficient \u201cprompt ensembling.\u201d We release code and model checkpoints to reproduce our experiments.", "keyphrases": ["prompt tuning", "language model", "downstream task", "model size", "design"]} +{"id": "refaee-rieser-2014-arabic", "title": "An Arabic Twitter Corpus for Subjectivity and Sentiment Analysis", "abstract": "We present a newly collected data set of 8,868 gold-standard annotated Arabic feeds. The corpus is manually labelled for subjectivity and sentiment analysis (SSA) ( = 0:816). In addition, the corpus is annotated with a variety of motivated feature-sets that have previously shown positive impact on performance. The paper highlights issues posed by twitter as a genre, such as mixture of language varieties and topic-shifts. Our next step is to extend the current corpus, using online semi-supervised learning. A first sub-corpus will be released via the ELRA repository as part of this submission.", "keyphrases": ["arabic twitter corpus", "subjectivity", "sentiment analysis"]} +{"id": "bond-foster-2013-linking", "title": "Linking and Extending an Open Multilingual Wordnet", "abstract": "We create an open multilingual wordnet with large wordnets for over 26 languages and smaller ones for 57 languages. It is made by combining wordnets with open licences, data from Wiktionary and the Unicode Common Locale Data Repository. Overall there are over 2 million senses for over 100 thousand concepts, linking over 1.4 million words in hundreds of languages.", "keyphrases": ["open multilingual wordnet", "wiktionary", "format"]} +{"id": "molina-etal-2016-overview", "title": "Overview for the Second Shared Task on Language Identification in Code-Switched Data", "abstract": "We present an overview of the first shared task on language identification on codeswitched data. The shared task included code-switched data from four language pairs: Modern Standard ArabicDialectal Arabic (MSA-DA), MandarinEnglish (MAN-EN), Nepali-English (NEPEN), and Spanish-English (SPA-EN). A total of seven teams participated in the task and submitted 42 system runs. The evaluation showed that language identification at the token level is more difficult when the languages present are closely related, as in the case of MSA-DA, where the prediction performance was the lowest among all language pairs. In contrast, the language pairs with the higest F-measure where SPA-EN and NEP-EN. The task made evident that language identification in code-switched data is still far from solved and warrants further research.", "keyphrases": ["language identification", "code-switched data", "code-switched text"]} +{"id": "dabre-etal-2017-empirical", "title": "An Empirical Study of Language Relatedness for Transfer Learning in Neural Machine Translation", "abstract": "Neural Machine Translation (NMT) is known to outperform Phrase Based Statistical Machine Translation (PBSMT) for resource rich language pairs but not for resource poor ones. Transfer Learning (Zoph et al., 2016) is a simple approach in which we can simply initialize an NMT model (child model) for a resource poor language pair using a previously trained model (parent model) for a resource rich language pair where the target languages are the same. This paper explores how different choices of parent models affect the performance of child models. We empirically show that using a parent model with the source language falling in the same or linguistically similar language family as the source language of the child model is the best.", "keyphrases": ["transfer learning", "neural machine translation", "source language"]} +{"id": "kovaleva-etal-2019-revealing", "title": "Revealing the Dark Secrets of BERT", "abstract": "BERT-based architectures currently give state-of-the-art performance on many NLP tasks, but little is known about the exact mechanisms that contribute to its success. In the current work, we focus on the interpretation of self-attention, which is one of the fundamental underlying components of BERT. Using a subset of GLUE tasks and a set of handcrafted features-of-interest, we propose the methodology and carry out a qualitative and quantitative analysis of the information encoded by the individual BERT's heads. Our findings suggest that there is a limited set of attention patterns that are repeated across different heads, indicating the overall model overparametrization. While different heads consistently use the same attention patterns, they have varying impact on performance across different tasks. We show that manually disabling attention in certain heads leads to a performance improvement over the regular fine-tuned BERT models.", "keyphrases": ["bert", "explicit mechanism", "over-parametrization"]} +{"id": "barzilay-lapata-2005-modeling", "title": "Modeling Local Coherence: An Entity-Based Approach", "abstract": "This paper considers the problem of automatic assessment of local coherence. We present a novel entity-based representation of discourse which is inspired by Centering Theory and can be computed automatically from raw text. We view coherence assessment as a ranking learning problem and show that the proposed discourse representation supports the effective learning of a ranking function. Our experiments demonstrate that the induced model achieves significantly higher accuracy than a state-of-the-art coherence model.", "keyphrases": ["coherence", "entity-based approach", "discourse entity", "local model", "entity-grid model"]} +{"id": "costa-jussa-fonollosa-2016-character", "title": "Character-based Neural Machine Translation", "abstract": "Neural Machine Translation (MT) has reached state-of-the-art results. However, one of the main challenges that neural MT still faces is dealing with very large vocabularies and morphologically rich languages. In this paper, we propose a neural MT system using character-based embeddings in combination with convolutional and highway layers to replace the standard lookup-based word representations. The resulting unlimited-vocabulary and affix-aware source word embeddings are tested in a state-of-the-art neural MT based on an attention-based bidirectional recurrent neural network. The proposed MT scheme provides improved results even when the source language is not morphologically rich. Improvements up to 3 BLEU points are obtained in the German-English WMT task.", "keyphrases": ["neural machine translation", "lookup-based word representation", "character"]} +{"id": "ng-etal-2003-exploiting", "title": "Exploiting Parallel Texts for Word Sense Disambiguation: An Empirical Study", "abstract": "A central problem of word sense disambiguation (WSD) is the lack of manually sense-tagged data required for supervised learning. In this paper, we evaluate an approach to automatically acquire sense-tagged training data from English-Chinese parallel corpora, which are then used for disambiguating the nouns in the SENSEVAL-2 English lexical sample task. Our investigation reveals that this method of acquiring sense-tagged data is promising. On a subset of the most difficult SENSEVAL-2 nouns, the accuracy difference between the two approaches is only 14.0%, and the difference could narrow further to 6.5% if we disregard the advantage that manually sense-tagged data have in their sense coverage. Our analysis also highlights the importance of the issue of domain dependence in evaluating WSD programs.", "keyphrases": ["word sense disambiguation", "parallel corpora", "cross-lingual evidence idea"]} +{"id": "zbib-etal-2012-machine", "title": "Machine Translation of Arabic Dialects", "abstract": "Arabic Dialects present many challenges for machine translation, not least of which is the lack of data resources. We use crowdsourcing to cheaply and quickly build Levantine-English and Egyptian-English parallel corpora, consisting of 1.1M words and 380k words, respectively. The dialectal sentences are selected from a large corpus of Arabic web text, and translated using Amazon's Mechanical Turk. We use this data to build Dialectal Arabic MT systems, and find that small amounts of dialectal data have a dramatic impact on translation quality. When translating Egyptian and Levantine test sets, our Dialectal Arabic MT system performs 6.3 and 7.0 BLEU points higher than a Modern Standard Arabic MT system trained on a 150M-word Arabic-English parallel corpus.", "keyphrases": ["dialect", "egyptian-english parallel corpora", "da-english data"]} +{"id": "tsur-etal-2015-frame", "title": "A Frame of Mind: Using Statistical Models for Detection of Framing and Agenda Setting Campaigns", "abstract": "Framing is a sophisticated form of discourse in which the speaker tries to induce a cognitive bias through consistent linkage between a topic and a specific context (frame). We build on political science and communication theory and use probabilistic topic models combined with time series regression analysis (autoregressive distributed-lag models) to gain insights about the language dynamics in the political processes. Processing four years of public statements issued by members of the U.S. Congress, our results provide a glimpse into the complex dynamic processes of framing, attention shifts and agenda setting, commonly known as \u2018spin\u2019. We further provide new evidence for the divergence in party discipline in U.S. politics.", "keyphrases": ["framing", "news article", "computational analysis"]} +{"id": "saha-mausam-2018-open", "title": "Open Information Extraction from Conjunctive Sentences", "abstract": "We develop CALM, a coordination analyzer that improves upon the conjuncts identified from dependency parses. It uses a language model based scoring and several linguistic constraints to search over hierarchical conjunct boundaries (for nested coordination). By splitting a conjunctive sentence around these conjuncts, CALM outputs several simple sentences. We demonstrate the value of our coordination analyzer in the end task of Open Information Extraction (Open IE). State-of-the-art Open IE systems lose substantial yield due to ineffective processing of conjunctive sentences. Our Open IE system, CALMIE, performs extraction over the simple sentences identified by CALM to obtain up to 1.8x yield with a moderate increase in precision compared to extractions from original sentences.", "keyphrases": ["conjunctive sentence", "calmie", "open information extraction"]} +{"id": "reschke-etal-2014-event", "title": "Event Extraction Using Distant Supervision", "abstract": "Distant supervision is a successful paradigm that gathers training data for information extraction systems by automatically aligning vast databases of facts with text. Previous work has demonstrated its usefulness for the extraction of binary relations such as a person's employer or a film's director. Here, we extend the distant supervision approach to template-based event extraction, focusing on the extraction of passenger counts, aircraft types, and other facts concerning airplane crash events. We present a new publicly available dataset and event extraction task in the plane crash domain based on Wikipedia infoboxes and newswire text. Using this dataset, we conduct a preliminary evaluation of four distantly supervised extraction models which assign named entity mentions in text to entries in the event template. Our results indicate that joint inference over sequences of candidate entity mentions is beneficial. Furthermore, we demonstrate that the Searn algorithm outperforms a linear-chain CRF and strong baselines with local inference.", "keyphrases": ["distant supervision", "newswire text", "event extraction"]} +{"id": "hewitt-liang-2019-designing", "title": "Designing and Interpreting Probes with Control Tasks", "abstract": "Probes, supervised models trained to predict properties (like parts-of-speech) from representations (like ELMo), have achieved high accuracy on a range of linguistic tasks. But does this mean that the representations encode linguistic structure or just that the probe has learned the linguistic task? In this paper, we propose control tasks, which associate word types with random outputs, to complement linguistic tasks. By construction, these tasks can only be learned by the probe itself. So a good probe, (one that reflects the representation), should be selective, achieving high linguistic task accuracy and low control task accuracy. The selectivity of a probe puts linguistic task accuracy in context with the probe's capacity to memorize from word types. We construct control tasks for English part-of-speech tagging and dependency edge prediction, and show that popular probes on ELMo representations are not selective. We also find that dropout, commonly used to control probe complexity, is ineffective for improving selectivity of MLPs, but that other forms of regularization are effective. Finally, we find that while probes on the first layer of ELMo yield slightly better part-of-speech tagging accuracy than the second, probes on the second layer are substantially more selective, which raises the question of which layer better represents parts-of-speech.", "keyphrases": ["probe", "control task", "selectivity", "capacity", "linguistic knowledge"]} +{"id": "turney-2008-uniform", "title": "A Uniform Approach to Analogies, Synonyms, Antonyms, and Associations", "abstract": "Recognizing analogies, synonyms, antonyms, and associations appear to be four distinct tasks, requiring distinct NLP algorithms. In the past, the four tasks have been treated independently, using a wide variety of algorithms. These four semantic classes, however, are a tiny sample of the full range of semantic phenomena, and we cannot afford to create ad hoc algorithms for each semantic phenomenon; we need to seek a unified approach. We propose to subsume a broad range of phenomena under analogies. To limit the scope of this paper, we restrict our attention to the subsumption of synonyms, antonyms, and associations. We introduce a supervised corpus-based machine learning algorithm for classifying analogous word pairs, and we show that it can solve multiple-choice SAT analogy questions, TOEFL synonym questions, ESL synonym-antonym questions, and similar-associated-both questions from cognitive psychology.", "keyphrases": ["uniform approach", "antonyms", "association", "nlp problem", "analogy task"]} +{"id": "navigli-ponzetto-2010-babelnet", "title": "BabelNet: Building a Very Large Multilingual Semantic Network", "abstract": "In this paper we present BabelNet -- a very large, wide-coverage multilingual semantic network. The resource is automatically constructed by means of a methodology that integrates lexicographic and encyclopedic knowledge from WordNet and Wikipedia. In addition Machine Translation is also applied to enrich the resource with lexical information for all languages. We conduct experiments on new and existing gold-standard datasets to show the high quality and coverage of the resource.", "keyphrases": ["multilingual semantic network", "encyclopedic knowledge", "different language", "knowledge base", "link"]} +{"id": "jeong-etal-2009-semi", "title": "Semi-supervised Speech Act Recognition in Emails and Forums", "abstract": "In this paper, we present a semi-supervised method for automatic speech act recognition in email and forums. The major challenge of this task is due to lack of labeled data in these two genres. Our method leverages labeled data in the Switchboard-DAMSL and the Meeting Recorder Dialog Act database and applies simple domain adaptation techniques over a large amount of unlabeled email and forum data to address this problem. Our method uses automatically extracted features such as phrases and dependency trees, called subtree features, for semi-supervised learning. Empirical results demonstrate that our model is effective in email and forum speech act recognition.", "keyphrases": ["speech act recognition", "forum", "semi-supervised learning"]} +{"id": "ebrahimi-dou-2015-chain", "title": "Chain Based RNN for Relation Classification", "abstract": "We present a novel approach for relation classification, using a recursive neural network (RNN), based on the shortest path between two entities in a dependency graph. Previous works on RNN are based on constituencybased parsing because phrasal nodes in a parse tree can capture compositionality in a sentence. Compared with constituency-based parse trees, dependency graphs can represent relations more compactly. This is particularly important in sentences with distant entities, where the parse tree spans words that are not relevant to the relation. In such cases RNN cannot be trained effectively in a timely manner. However, due to the lack of phrasal nodes in dependency graphs, application of RNN is not straightforward. In order to tackle this problem, we utilize dependency constituent units called chains. Our experiments on two relation classification datasets show that Chain based RNN provides a shallower network, which performs considerably faster and achieves better classification results.", "keyphrases": ["rnn", "relation classification", "recursive neural network"]} +{"id": "pavlick-kwiatkowski-2019-inherent", "title": "Inherent Disagreements in Human Textual Inferences", "abstract": "We analyze human's disagreements about the validity of natural language inferences. We show that, very often, disagreements are not dismissible as annotation \u201cnoise\u201d, but rather persist as we collect more ratings and as we vary the amount of context provided to raters. We further show that the type of uncertainty captured by current state-of-the-art models for natural language inference is not reflective of the type of uncertainty present in human disagreements. We discuss implications of our results in relation to the recognizing textual entailment (RTE)/natural language inference (NLI) task. We argue for a refined evaluation objective that requires models to explicitly capture the full distribution of plausible human judgments.", "keyphrases": ["disagreement", "natural language inference", "rating", "nli", "annotation artifact"]} +{"id": "kogan-etal-2009-predicting", "title": "Predicting Risk from Financial Reports with Regression", "abstract": "We address a text regression problem: given a piece of text, predict a real-world continuous quantity associated with the text's meaning. In this work, the text is an SEC-mandated financial report published annually by a publicly-traded company, and the quantity to be predicted is volatility of stock returns, an empirical measure of financial risk. We apply well-known regression techniques to a large corpus of freely available financial reports, constructing regression models of volatility for the period following a report. Our models rival past volatility (a strong baseline) in predicting the target variable, and a single model that uses both can significantly outperform past volatility. Interestingly, our approach is more accurate for reports after the passage of the Sarbanes-Oxley Act of 2002, giving some evidence for the success of that legislation in making financial reports more informative.", "keyphrases": ["risk", "report", "text regression problem", "company", "volatility"]} +{"id": "luong-etal-2015-addressing", "title": "Addressing the Rare Word Problem in Neural Machine Translation", "abstract": "Neural Machine Translation (NMT) is a new approach to machine translation that has shown promising results that are comparable to traditional approaches. A significant weakness in conventional NMT systems is their inability to correctly translate very rare words: end-to-end NMTs tend to have relatively small vocabularies with a single unk symbol that represents every possible out-of-vocabulary (OOV) word. In this paper, we propose and implement an effective technique to address this problem. We train an NMT system on data that is augmented by the output of a word alignment algorithm, allowing the NMT system to emit, for each OOV word in the target sentence, the position of its corresponding word in the source sentence. This information is later utilized in a post-processing step that translates every OOV word using a dictionary. Our experiments on the WMT\u201914 English to French translation task show that this method provides a substantial improvement of up to 2.8 BLEU points over an equivalent NMT system that does not use this technique. With 37.5 BLEU points, our NMT system is the first to surpass the best result achieved on a WMT\u201914 contest task.", "keyphrases": ["rare word problem", "neural machine translation", "nmt system", "post-processing step", "translation quality"]} +{"id": "parikh-etal-2016-decomposable", "title": "A Decomposable Attention Model for Natural Language Inference", "abstract": "We propose a simple neural architecture for natural language inference. Our approach uses attention to decompose the problem into subproblems that can be solved separately, thus making it trivially parallelizable. On the Stanford Natural Language Inference (SNLI) dataset, we obtain state-of-the-art results with almost an order of magnitude fewer parameters than previous work and without relying on any word-order information. Adding intra-sentence attention that takes a minimum amount of order into account yields further improvements.", "keyphrases": ["decomposable attention model", "natural language inference", "nli", "text sequence", "entailment"]} +{"id": "narayan-gardent-2014-hybrid", "title": "Hybrid Simplification using Deep Semantics and Machine Translation", "abstract": "We present a hybrid approach to sentence simplification which combines deep semantics and monolingual machine translation to derive simple sentences from complex ones. The approach differs from previous work in two main ways. First, it is semantic based in that it takes as input a deep semantic representation rather than e.g., a sentence or a parse tree. Second, it combines a simplification model for splitting and deletion with a monolingual translation model for phrase substitution and reordering. When compared against current state of the art methods, our model yields significantly simpler output that is both grammatical and meaning preserving.", "keyphrases": ["simplification", "machine translation", "hybrid approach"]} +{"id": "huber-carenini-2019-predicting", "title": "Predicting Discourse Structure using Distant Supervision from Sentiment", "abstract": "Discourse parsing could not yet take full advantage of the neural NLP revolution, mostly due to the lack of annotated datasets. We propose a novel approach that uses distant supervision on an auxiliary task (sentiment classification), to generate abundant data for RST-style discourse structure prediction. Our approach combines a neural variant of multiple-instance learning, using document-level supervision, with an optimal CKY-style tree generation algorithm. In a series of experiments, we train a discourse parser (for only structure prediction) on our automatically generated dataset and compare it with parsers trained on human-annotated corpora (news domain RST-DT and Instructional domain). Results indicate that while our parser does not yet match the performance of a parser trained and tested on the same dataset (intra-domain), it does perform remarkably well on the much more difficult and arguably more useful task of inter-domain discourse structure prediction, where the parser is trained on one domain and tested/applied on another one.", "keyphrases": ["discourse structure", "distant supervision", "sentiment classification"]} +{"id": "chen-etal-2018-adversarial", "title": "Adversarial Deep Averaging Networks for Cross-Lingual Sentiment Classification", "abstract": "In recent years great success has been achieved in sentiment classification for English, thanks in part to the availability of copious annotated resources. Unfortunately, most languages do not enjoy such an abundance of labeled data. To tackle the sentiment classification problem in low-resource languages without adequate annotated data, we propose an Adversarial Deep Averaging Network (ADAN1) to transfer the knowledge learned from labeled data on a resource-rich source language to low-resource languages where only unlabeled data exist. ADAN has two discriminative branches: a sentiment classifier and an adversarial language discriminator. Both branches take input from a shared feature extractor to learn hidden representations that are simultaneously indicative for the classification task and invariant across languages. Experiments on Chinese and Arabic sentiment classification demonstrate that ADAN significantly outperforms state-of-the-art systems.", "keyphrases": ["deep averaging network", "sentiment classification", "adversarial training", "different language"]} +{"id": "zhao-etal-2020-gender", "title": "Gender Bias in Multilingual Embeddings and Cross-Lingual Transfer", "abstract": "Multilingual representations embed words from many languages into a single semantic space such that words with similar meanings are close to each other regardless of the language. These embeddings have been widely used in various settings, such as cross-lingual transfer, where a natural language processing (NLP) model trained on one language is deployed to another language. While the cross-lingual transfer techniques are powerful, they carry gender bias from the source to target languages. In this paper, we study gender bias in multilingual embeddings and how it affects transfer learning for NLP applications. We create a multilingual dataset for bias analysis and propose several ways for quantifying bias in multilingual representations from both the intrinsic and extrinsic perspectives. Experimental results show that the magnitude of bias in the multilingual representations changes differently when we align the embeddings to different target spaces and that the alignment direction can also have an influence on the bias in transfer learning. We further provide recommendations for using the multilingual word representations for downstream tasks.", "keyphrases": ["cross-lingual transfer", "gender bias", "word embedding"]} +{"id": "baker-etal-2012-modality", "title": "Modality and Negation in SIMT Use of Modality and Negation in Semantically-Informed Syntactic MT", "abstract": "This article describes the resource- and system-building efforts of an 8-week Johns Hopkins University Human Language Technology Center of Excellence Summer Camp for Applied Language Exploration (SCALE-2009) on Semantically Informed Machine Translation (SIMT). We describe a new modality/negation (MN) annotation scheme, the creation of a (publicly available) MN lexicon, and two automated MN taggers that we built using the annotation scheme and lexicon. Our annotation scheme isolates three components of modality and negation: a trigger (a word that conveys modality or negation), a target (an action associated with modality or negation), and a holder (an experiencer of modality). We describe how our MN lexicon was semi-automatically produced and we demonstrate that a structure-based MN tagger results in precision around 86% (depending on genre) for tagging of a standard LDC data set.We apply our MN annotation scheme to statistical machine translation using a syntactic framework that supports the inclusion of semantic annotations. Syntactic tags enriched with semantic annotations are assigned to parse trees in the target-language training texts through a process of tree grafting. Although the focus of our work is modality and negation, the tree grafting procedure is general and supports other types of semantic information. We exploit this capability by including named entities, produced by a pre-existing tagger, in addition to the MN elements produced by the taggers described here. The resulting system significantly outperformed a linguistically naive baseline model (Hiero), and reached the highest scores yet reported on the NIST 2009 Urdu\u2013English test set. This finding supports the hypothesis that both syntactic and semantic information can improve translation quality.", "keyphrases": ["negation", "semantic information", "machine translation", "modality"]} +{"id": "yang-etal-2003-coreference", "title": "Coreference Resolution Using Competition Learning Approach", "abstract": "In this paper we propose a competition learning approach to coreference resolution. Traditionally, supervised machine learning approaches adopt the single-candidate model. Nevertheless the preference relationship between the antecedent candidates cannot be determined accurately in this model. By contrast, our approach adopts a twin-candidate learning model. Such a model can present the competition criterion for antecedent candidates reliably, and ensure that the most preferred candidate is selected. Furthermore, our approach applies a candidate filter to reduce the computational cost and data noises during training and resolution. The experimental results on MUC-6 and MUC-7 data set show that our approach can outperform those based on the single-candidate model.", "keyphrases": ["candidate", "coreference resolution", "training instance"]} +{"id": "bansal-etal-2014-tailoring", "title": "Tailoring Continuous Word Representations for Dependency Parsing", "abstract": "Word representations have proven useful for many NLP tasks, e.g., Brown clusters as features in dependency parsing (Koo et al., 2008). In this paper, we investigate the use of continuous word representations as features for dependency parsing. We compare several popular embeddings to Brown clusters, via multiple types of features, in both news and web domains. We find that all embeddings yield significant parsing gains, including some recent ones that can be trained in a fraction of the time of others. Explicitly tailoring the representations for the task leads to further improvements. Moreover, an ensemble of all representations achieves the best results, suggesting their complementarity.", "keyphrases": ["dependency parsing", "complementarity", "word embedding", "slight improvement", "entity recognition"]} +{"id": "engelbrecht-etal-2009-modeling", "title": "Modeling User Satisfaction with Hidden Markov Models", "abstract": "Models for predicting judgments about the quality of Spoken Dialog Systems have been used as overall evaluation metric or as optimization functions in adaptive systems. We describe a new approach to such models, using Hidden Markov Models (HMMs). The user's opinion is regarded as a continuous process evolving over time. We present the data collection method and results achieved with the HMM model.", "keyphrases": ["user satisfaction", "hidden markov models", "hmm", "sds"]} +{"id": "jia-etal-2019-certified", "title": "Certified Robustness to Adversarial Word Substitutions", "abstract": "State-of-the-art NLP models can often be fooled by adversaries that apply seemingly innocuous label-preserving transformations (e.g., paraphrasing) to input text. The number of possible transformations scales exponentially with text length, so data augmentation cannot cover all transformations of an input. This paper considers one exponentially large family of label-preserving transformations, in which every word in the input can be replaced with a similar word. We train the first models that are provably robust to all word substitutions in this family. Our training procedure uses Interval Bound Propagation (IBP) to minimize an upper bound on the worst-case loss that any combination of word substitutions can induce. To evaluate models' robustness to these transformations, we measure accuracy on adversarially chosen word substitutions applied to test examples. Our IBP-trained models attain 75% adversarial accuracy on both sentiment analysis on IMDB and natural language inference on SNLI; in comparison, on IMDB, models trained normally and ones trained with data augmentation achieve adversarial accuracy of only 12% and 41%, respectively.", "keyphrases": ["robustness", "propagation", "ibp", "loss", "attack"]} +{"id": "hazarika-etal-2018-conversational", "title": "Conversational Memory Network for Emotion Recognition in Dyadic Dialogue Videos", "abstract": "Emotion recognition in conversations is crucial for the development of empathetic machines. Present methods mostly ignore the role of inter-speaker dependency relations while classifying emotions in conversations. In this paper, we address recognizing utterance-level emotions in dyadic conversational videos. We propose a deep neural framework, termed Conversational Memory Network (CMN), which leverages contextual information from the conversation history. In particular, CMN uses multimodal approach comprising audio, visual and textual features with gated recurrent units to model past utterances of each speaker into memories. These memories are then merged using attention-based hops to capture inter-speaker dependencies. Experiments show a significant improvement of 3 \u2212 4% in accuracy over the state of the art.", "keyphrases": ["emotion recognition", "past utterance", "conversational memory network"]} +{"id": "nakov-etal-2016-semeval-2016", "title": "SemEval-2016 Task 3: Community Question Answering", "abstract": "This paper describes the SemEval\u20132016 Task 3 on Community Question Answering, which we offered in English and Arabic. For English, we had three subtasks: Question\u2013Comment Similarity (subtask A), Question\u2013Question Similarity (B), and Question\u2013External Comment Similarity (C). For Arabic, we had another subtask: Rerank the correct answers for a new question (D). Eighteen teams participated in the task, submitting a total of 95 runs (38 primary and 57 contrastive) for the four subtasks. A variety of approaches and features were used by the participating systems to address the different subtasks, which are summarized in this paper. The best systems achieved an official score (MAP) of 79.19, 76.70, 55.41, and 45.83 in subtasks A, B, C, and D, respectively. These scores are significantly better than those for the baselines that we provided. For subtask A, the best system improved over the 2015 winner by 3 points absolute in terms of Accuracy.", "keyphrases": ["community question answering", "comment similarity", "semeval task"]} +{"id": "yimam-etal-2014-automatic", "title": "Automatic Annotation Suggestions and Custom Annotation Layers in WebAnno", "abstract": "In this paper, we present a flexible approach to the efficient and exhaustive manual annotation of text documents. For this purpose, we extend WebAnno (Yimam et al., 2013) an open-source web-based annotation tool. 1 While it was previously limited to specific annotation layers, our extension allows adding and configuring an arbitrary number of layers through a web-based UI. These layers can be annotated separately or simultaneously, and support most types of linguistic annotations such as spans, semantic classes, dependency relations, lexical chains, and morphology. Further, we tightly integrate a generic machine learning component for automatic annotation suggestions of span annotations. In two case studies, we show that automatic annotation suggestions, combined with our split-pane UI concept, significantly reduces annotation time.", "keyphrases": ["suggestion", "webanno", "annotation tool"]} +{"id": "iyyer-etal-2014-neural", "title": "A Neural Network for Factoid Question Answering over Paragraphs", "abstract": "Text classification methods for tasks like factoid question answering typically use manually defined string matching rules or bag of words representations. These methods are ineective when question text contains very few individual words (e.g., named entities) that are indicative of the answer. We introduce a recursive neural network (rnn) model that can reason over such input by modeling textual compositionality. We apply our model, qanta, to a dataset of questions from a trivia competition called quiz bowl. Unlike previous rnn models, qanta learns word and phrase-level representations that combine across sentences to reason about entities. The model outperforms multiple baselines and, when combined with information retrieval methods, rivals the best human players.", "keyphrases": ["factoid question", "paragraph", "recursive neural network"]} +{"id": "xu-choi-2020-revealing", "title": "Revealing the Myth of Higher-Order Inference in Coreference Resolution", "abstract": "This paper analyzes the impact of higher-order inference (HOI) on the task of coreference resolution. HOI has been adapted by almost all recent coreference resolution models without taking much investigation on its true effectiveness over representation learning. To make a comprehensive analysis, we implement an end-to-end coreference system as well as four HOI approaches, attended antecedent, entity equalization, span clustering, and cluster merging, where the latter two are our original methods. We find that given a high-performing encoder such as SpanBERT, the impact of HOI is negative to marginal, providing a new perspective of HOI to this task. Our best model using cluster merging shows the Avg-F1 of 80.2 on the CoNLL 2012 shared task dataset in English.", "keyphrases": ["higher-order inference", "coreference resolution", "mention"]} +{"id": "morante-etal-2008-learning", "title": "Learning the Scope of Negation in Biomedical Texts", "abstract": "In this paper we present a machine learning system that finds the scope of negation in biomedical texts. The system consists of two memory-based engines, one that decides if the tokens in a sentence are negation signals, and another that finds the full scope of these negation signals. Our approach to negation detection differs in two main aspects from existing research on negation. First, we focus on finding the scope of negation signals, instead of determining whether a term is negated or not. Second, we apply supervised machine learning techniques, whereas most existing systems apply rule-based algorithms. As far as we know, this way of approaching the negation scope finding task is novel.", "keyphrases": ["scope", "negation", "biomedical text", "bioscope corpus"]} +{"id": "sogaard-goldberg-2016-deep", "title": "Deep multi-task learning with low level tasks supervised at lower layers", "abstract": "In all previous work on deep multi-task learning we are aware of, all task supervisions are on the same (outermost) layer. We present a multi-task learning architecture with deep bi-directional RNNs, where different tasks supervision can happen at different layers. We present experiments in syntactic chunking and CCG supertagging, coupled with the additional task of POS-tagging. We show that it is consistently better to have POS supervision at the innermost rather than the outermost layer. We argue that this is because \u201clowlevel\u201d tasks are better kept at the lower layers, enabling the higher-level tasks to make use of the shared representation of the lower-level tasks. Finally, we also show how this architecture can be used for domain adaptation.", "keyphrases": ["pos-tagging", "low layer", "mtl", "auxiliary task", "parameter sharing"]} +{"id": "artzi-etal-2015-broad", "title": "Broad-coverage CCG Semantic Parsing with AMR", "abstract": "We propose a grammar induction technique for AMR semantic parsing. While previous grammar induction techniques were designed to re-learn a new parser for each target application, the recently annotated AMR Bank provides a unique opportunity to induce a single model for understanding broad-coverage newswire text and support a wide range of applications. We present a new model that combines CCG parsing to recover compositional aspects of meaning and a factor graph to model non-compositional phenomena, such as anaphoric dependencies. Our approach achieves 66.2 Smatch F1 score on the AMR bank, significantly outperforming the previous state of the art.", "keyphrases": ["ccg", "semantic parsing", "amr", "non-compositional phenomenon"]} +{"id": "blanco-moldovan-2011-semantic", "title": "Semantic Representation of Negation Using Focus Detection", "abstract": "Negation is present in all human languages and it is used to reverse the polarity of part of statements that are otherwise affirmative by default. A negated statement often carries positive implicit meaning, but to pinpoint the positive part from the negative part is rather difficult. This paper aims at thoroughly representing the semantics of negation by revealing implicit positive meaning. The proposed representation relies on focus of negation detection. For this, new annotation over PropBank and a learning algorithm are proposed.", "keyphrases": ["negation", "focus detection", "propbank"]} +{"id": "xu-etal-2014-shift", "title": "Shift-Reduce CCG Parsing with a Dependency Model", "abstract": "This paper presents the first dependency model for a shift-reduce CCG parser. Modelling dependencies is desirable for a number of reasons, including handling the \u201cspurious\u201d ambiguity of CCG; fitting well with the theory of CCG; and optimizing for structures which are evaluated at test time. We develop a novel training technique using a dependency oracle, in which all derivations are hidden. A challenge arises from the fact that the oracle needs to keep track of exponentially many goldstandard derivations, which is solved by integrating a packed parse forest with the beam-search decoder. Standard CCGBank tests show the model achieves up to 1.05 labeled F-score improvements over three existing, competitive CCG parsing models.", "keyphrases": ["ccg", "dependency model", "shift-reduce ccg"]} +{"id": "marcheggiani-titov-2017-encoding", "title": "Encoding Sentences with Graph Convolutional Networks for Semantic Role Labeling", "abstract": "Semantic role labeling (SRL) is the task of identifying the predicate-argument structure of a sentence. It is typically regarded as an important step in the standard NLP pipeline. As the semantic representations are closely related to syntactic ones, we exploit syntactic information in our model. We propose a version of graph convolutional networks (GCNs), a recent class of neural networks operating on graphs, suited to model syntactic dependency graphs. GCNs over syntactic dependency trees are used as sentence encoders, producing latent feature representations of words in a sentence. We observe that GCN layers are complementary to LSTM ones: when we stack both GCN and LSTM layers, we obtain a substantial improvement over an already state-of-the-art LSTM SRL model, resulting in the best reported scores on the standard benchmark (CoNLL-2009) both for Chinese and English.", "keyphrases": ["graph convolutional networks", "semantic role labeling", "gcn", "sentence encoder", "many nlp task"]} +{"id": "artetxe-schwenk-2019-massively", "title": "Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond", "abstract": "We introduce an architecture to learn joint multilingual sentence representations for 93 languages, belonging to more than 30 different families and written in 28 different scripts. Our system uses a single BiLSTM encoder with a shared byte-pair encoding vocabulary for all languages, which is coupled with an auxiliary decoder and trained on publicly available parallel corpora. This enables us to learn a classifier on top of the resulting embeddings using English annotated data only, and transfer it to any of the 93 languages without any modification. Our experiments in cross-lingual natural language inference (XNLI data set), cross-lingual document classification (MLDoc data set), and parallel corpus mining (BUCC data set) show the effectiveness of our approach. We also introduce a new test set of aligned sentences in 112 languages, and show that our sentence embeddings obtain strong results in multilingual similarity search even for low- resource languages. Our implementation, the pre-trained encoder, and the multilingual test set are available at .", "keyphrases": ["sentence embedding", "cross-lingual transfer", "low resource language"]} +{"id": "specia-etal-2016-shared", "title": "A Shared Task on Multimodal Machine Translation and Crosslingual Image Description", "abstract": "This paper introduces and summarises the findings of a new shared task at the intersection of Natural Language Processing and Computer Vision: the generation of image descriptions in a target language, given an image and/or one or more descriptions in a different (source) language. This challenge was organised along with the Conference on Machine Translation (WMT16), and called for system submissions for two task variants: (i) a translation task, in which a source language image description needs to be translated to a target language, (optionally) with additional cues from the corresponding image, and (ii) a description generation task, in which a target language description needs to be generated for an image, (optionally) with additional cues from source language descriptions of the same image. In this first edition of the shared task, 16 systems were submitted for the translation task and seven for the image description task, from a total of 10 teams.", "keyphrases": ["multimodal machine translation", "image", "wmt16", "source language", "ambiguity"]} +{"id": "liu-etal-2019-tigs", "title": "TIGS: An Inference Algorithm for Text Infilling with Gradient Search", "abstract": "Text infilling aims at filling in the missing part of a sentence or paragraph, which has been applied to a variety of real-world natural language generation scenarios. Given a well-trained sequential generative model, it is challenging for its unidirectional decoder to generate missing symbols conditioned on the past and future information around the missing part. In this paper, we propose an iterative inference algorithm based on gradient search, which could be the first inference algorithm that can be broadly applied to any neural sequence generative models for text infilling tasks. Extensive experimental comparisons show the effectiveness and efficiency of the proposed method on three different text infilling tasks with various mask ratios and different mask strategies, comparing with five state-of-the-art methods.", "keyphrases": ["text infilling", "gradient search", "span"]} +{"id": "wang-etal-2020-cord", "title": "CORD-19: The COVID-19 Open Research Dataset", "abstract": "The COVID-19 Open Research Dataset (CORD-19) is a growing resource of scientific papers on COVID-19 and related historical coronavirus research. CORD-19 is designed to facilitate the development of text mining and information retrieval systems over its rich collection of metadata and structured full text papers. Since its release, CORD-19 has been downloaded over 200K times and has served as the basis of many COVID-19 text mining and discovery systems. In this article, we describe the mechanics of dataset construction, highlighting challenges and key design decisions, provide an overview of how CORD-19 has been used, and describe several shared tasks built around the dataset. We hope this resource will continue to bring together the computing community, biomedical experts, and policy makers in the search for effective treatments and management policies for COVID-19.", "keyphrases": ["scientific paper", "treatment", "cord-19", "allen institute", "white house"]} +{"id": "linzen-etal-2016-assessing", "title": "Assessing the Ability of LSTMs to Learn Syntax-Sensitive Dependencies", "abstract": "The success of long short-term memory (LSTM) neural networks in language processing is typically attributed to their ability to capture long-distance statistical regularities. Linguistic regularities are often sensitive to syntactic structure; can such dependencies be captured by LSTMs, which do not have explicit structural representations? We begin addressing this question using number agreement in English subject-verb dependencies. We probe the architecture's grammatical competence both using training objectives with an explicit grammatical target (number prediction, grammaticality judgments) and using language models. In the strongly supervised settings, the LSTM achieved very high overall accuracy (less than 1% errors), but errors increased when sequential and structural information conflicted. The frequency of such errors rose sharply in the language-modeling setting. We conclude that LSTMs can capture a non-trivial amount of grammatical structure given targeted supervision, but stronger architectures may be required to further reduce errors; furthermore, the language modeling signal is insufficient for capturing syntax-sensitive dependencies, and should be supplemented with more direct supervision if such dependencies need to be captured.", "keyphrases": ["syntax-sensitive dependency", "grammaticality judgment", "language model", "rnn", "long-distance dependency"]} +{"id": "kilgarriff-grefenstette-2003-introduction", "title": "Introduction to the Special Issue on the Web as Corpus", "abstract": "The Web, teeming as it is with language data, of all manner of varieties and languages, in vast quantity and freely available, is a fabulous linguists' playground. This special issue of Computational Linguistics explores ways in which this dream is being explored.", "keyphrases": ["special issue", "web", "linguistic data", "such resource", "wac"]} +{"id": "kedzie-etal-2018-content", "title": "Content Selection in Deep Learning Models of Summarization", "abstract": "We carry out experiments with deep learning models of summarization across the domains of news, personal stories, meetings, and medical articles in order to understand how content selection is performed. We find that many sophisticated features of state of the art extractive summarizers do not improve performance over simpler models. These results suggest that it is easier to create a summarizer for a new domain than previous work suggests and bring into question the benefit of deep learning models for summarization for those domains that do have massive datasets (i.e., news). At the same time, they suggest important questions for new research in summarization; namely, new forms of sentence representations or external knowledge sources are needed that are better suited to the sumarization task.", "keyphrases": ["deep learning model", "summarization", "sentence representation", "content selection"]} +{"id": "wen-etal-2015-semantically", "title": "Semantically Conditioned LSTM-based Natural Language Generation for Spoken Dialogue Systems", "abstract": "\u00a9 2015 Association for Computational Linguistics. Natural language generation (NLG) is a critical component of spoken dialogue and it has a significant impact both on usability and perceived quality. Most NLG systems in common use employ rules and heuristics and tend to generate rigid and stylised responses without the natural variation of human language. They are also not easily scaled to systems covering multiple domains and languages. This paper presents a statistical language generator based on a semantically controlled Long Short-term Memory (LSTM) structure. The LSTM generator can learn from unaligned data by jointly optimising sentence planning and surface realisation using a simple cross entropy training criterion, and language variation can be easily achieved by sampling from output candidates. With fewer heuristics, an objective evaluation in two differing test domains showed the proposed method improved performance compared to previous methods. Human judges scored the LSTM system higher on informativeness and naturalness and overall preferred it to the other systems..", "keyphrases": ["natural language generation", "dialogue act", "sc-lstm", "response generation"]} +{"id": "liu-etal-2017-adversarial", "title": "Adversarial Multi-task Learning for Text Classification", "abstract": "Neural network models have shown their promising opportunities for multi-task learning, which focus on learning the shared layers to extract the common and task-invariant features. However, in most existing approaches, the extracted shared features are prone to be contaminated by task-specific features or the noise brought by other tasks. In this paper, we propose an adversarial multi-task learning framework, alleviating the shared and private latent feature spaces from interfering with each other. We conduct extensive experiments on 16 different text classification tasks, which demonstrates the benefits of our approach. Besides, we show that the shared knowledge learned by our proposed model can be regarded as off-the-shelf knowledge and easily transferred to new tasks. The datasets of all 16 tasks are publicly available at .", "keyphrases": ["multi-task learning", "text classification", "adversarial training", "loss", "subspace"]} +{"id": "hulden-etal-2014-semi", "title": "Semi-supervised learning of morphological paradigms and lexicons", "abstract": "We present a semi-supervised approach to the problem of paradigm induction from inflection tables. Our system extracts generalizations from inflection tables, representing the resulting paradigms in an abstract form. The process is intended to be language-independent, and to provide human-readable generalizations of paradigms. The tools we provide can be used by linguists for the rapid creation of lexical resources. We evaluate the system through an inflection table reconstruction task using Wiktionary data for German, Spanish, and Finnish. With no additional corpus information available, the evaluation yields per word form accuracy scores on inflecting unseen base forms in different languages ranging from 87.81% (German nouns) to 99.52% (Spanish verbs); with additional unlabeled text corpora available for training the scores range from 91.81% (German nouns) to 99.58% (Spanish verbs). We separately evaluate the system in a simulated task of Swedish lexicon creation, and show that on the basis of a small number of inflection tables, the system can accurately collect from a list of noun forms a lexicon with inflection information ranging from 100.0% correct (collect 100 words), to 96.4% correct (collect 1000 words).", "keyphrases": ["paradigm", "inflection table", "semi-supervised learning"]} +{"id": "kim-rush-2016-sequence", "title": "Sequence-Level Knowledge Distillation", "abstract": "Neural machine translation (NMT) offers a novel alternative formulation of translation that is potentially simpler than statistical approaches. However to reach competitive performance, NMT models need to be exceedingly large. In this paper we consider applying knowledge distillation approaches (Bucila et al., 2006; Hinton et al., 2015) that have proven successful for reducing the size of neural models in other domains to the problem of NMT. We demonstrate that standard knowledge distillation applied to word-level prediction can be effective for NMT, and also introduce two novel sequence-level versions of knowledge distillation that further improve performance, and somewhat surprisingly, seem to eliminate the need for beam search (even when applied on the original teacher model). Our best student model runs 10 times faster than its state-of-the-art teacher with little loss in performance. It is also significantly better than a baseline model trained without knowledge distillation: by 4.2/1.7 BLEU with greedy decoding/beam search. Applying weight pruning on top of knowledge distillation results in a student model that has 13 times fewer parameters than the original teacher model, with a decrease of 0.4 BLEU.", "keyphrases": ["knowledge distillation", "student", "sequence-level distillation", "specific task", "seqkd"]} +{"id": "kuchaiev-etal-2018-openseq2seq", "title": "OpenSeq2Seq: Extensible Toolkit for Distributed and Mixed Precision Training of Sequence-to-Sequence Models", "abstract": "We present OpenSeq2Seq \u2013 an open-source toolkit for training sequence-to-sequence models. The main goal of our toolkit is to allow researchers to most effectively explore different sequence-to-sequence architectures. The efficiency is achieved by fully supporting distributed and mixed-precision training. OpenSeq2Seq provides building blocks for training encoder-decoder models for neural machine translation and automatic speech recognition. We plan to extend it with other modalities in the future.", "keyphrases": ["mixed precision training", "sequence-to-sequence model", "openseq2seq"]} +{"id": "jeretic-etal-2020-natural", "title": "Are Natural Language Inference Models IMPPRESsive? Learning IMPlicature and PRESupposition", "abstract": "Natural language inference (NLI) is an increasingly important task for natural language understanding, which requires one to infer whether a sentence entails another. However, the ability of NLI models to make pragmatic inferences remains understudied. We create an IMPlicature and PRESupposition diagnostic dataset (IMPPRES), consisting of 32K semi-automatically generated sentence pairs illustrating well-studied pragmatic inference types. We use IMPPRES to evaluate whether BERT, InferSent, and BOW NLI models trained on MultiNLI (Williams et al., 2018) learn to make pragmatic inferences. Although MultiNLI appears to contain very few pairs illustrating these inference types, we find that BERT learns to draw pragmatic inferences. It reliably treats scalar implicatures triggered by \u201csome\u201d as entailments. For some presupposition triggers like \u201conly\u201d, BERT reliably recognizes the presupposition as an entailment, even when the trigger is embedded under an entailment canceling operator like negation. BOW and InferSent show weaker evidence of pragmatic reasoning. We conclude that NLI training encourages models to learn some, but not all, pragmatic inferences.", "keyphrases": ["implicature", "presupposition", "entailment", "nli model"]} +{"id": "hosseini-etal-2014-learning", "title": "Learning to Solve Arithmetic Word Problems with Verb Categorization", "abstract": "This paper presents a novel approach to learning to solve simple arithmetic word problems. Our system, ARIS, analyzes each of the sentences in the problem statement to identify the relevant variables and their values. ARIS then maps this information into an equation that represents the problem, and enables its (trivial) solution as shown in Figure 1. The paper analyzes the arithmetic-word problems \u201cgenre\u201d, identifying seven categories of verbs used in such problems. ARIS learns to categorize verbs with 81.2% accuracy, and is able to solve 77.7% of the problems in a corpus of standard primary school test questions. We report the first learning results on this task without reliance on predefined templates and make our data publicly available. 1", "keyphrases": ["arithmetic word problem", "verb categorization", "subtraction problem", "solver", "hand-crafted rule"]} +{"id": "gonzalez-ibanez-etal-2011-identifying", "title": "Identifying Sarcasm in Twitter: A Closer Look", "abstract": "Sarcasm transforms the polarity of an apparently positive or negative utterance into its opposite. We report on a method for constructing a corpus of sarcastic Twitter messages in which determination of the sarcasm of each message has been made by its author. We use this reliable corpus to compare sarcastic utterances in Twitter to utterances that express positive or negative attitudes without sarcasm. We investigate the impact of lexical and pragmatic factors on machine learning effectiveness for identifying sarcastic utterances and we compare the performance of machine learning techniques and human judges on this task. Perhaps unsurprisingly, neither the human judges nor the machine learning techniques perform very well.", "keyphrases": ["sarcasm", "twitter", "emoticon", "social medium", "computational linguistic"]} +{"id": "cheng-etal-2020-ape", "title": "APE: Argument Pair Extraction from Peer Review and Rebuttal via Multi-task Learning", "abstract": "Peer review and rebuttal, with rich interactions and argumentative discussions in between, are naturally a good resource to mine arguments. However, few works study both of them simultaneously. In this paper, we introduce a new argument pair extraction (APE) task on peer review and rebuttal in order to study the contents, the structure and the connections between them. We prepare a challenging dataset that contains 4,764 fully annotated review-rebuttal passage pairs from an open review platform to facilitate the study of this task. To automatically detect argumentative propositions and extract argument pairs from this corpus, we cast it as the combination of a sequence labeling task and a text relation classification task. Thus, we propose a multitask learning framework based on hierarchical LSTM networks. Extensive experiments and analysis demonstrate the effectiveness of our multi-task framework, and also show the challenges of the new task as well as motivate future research directions.", "keyphrases": ["argument pair extraction", "peer review", "rebuttal", "discussion", "new task"]} +{"id": "louis-nenkova-2012-coherence", "title": "A Coherence Model Based on Syntactic Patterns", "abstract": "We introduce a model of coherence which captures the intentional discourse structure in text. Our work is based on the hypothesis that syntax provides a proxy for the communicative goal of a sentence and therefore the sequence of sentences in a coherent discourse should exhibit detectable structural patterns. Results show that our method has high discriminating power for separating out coherent and incoherent news articles reaching accuracies of up to 90%. We also show that our syntactic patterns are correlated with manual annotations of intentional structure for academic conference articles and can successfully predict the coherence of abstract, introduction and related work sections of these articles.", "keyphrases": ["coherence", "syntactic pattern", "adjacent sentence"]} +{"id": "mohiuddin-joty-2019-revisiting", "title": "Revisiting Adversarial Autoencoder for Unsupervised Word Translation with Cycle Consistency and Improved Training", "abstract": "Adversarial training has shown impressive success in learning bilingual dictionary without any parallel data by mapping monolingual embeddings to a shared space. However, recent work has shown superior performance for non-adversarial methods in more challenging language pairs. In this work, we revisit adversarial autoencoder for unsupervised word translation and propose two novel extensions to it that yield more stable training and improved results. Our method includes regularization terms to enforce cycle consistency and input reconstruction, and puts the target encoders as an adversary against the corresponding discriminator. Extensive experimentations with European, non-European and low-resource languages show that our method is more robust and achieves better performance than recently proposed adversarial and non-adversarial approaches.", "keyphrases": ["adversarial autoencoder", "unsupervised word translation", "novel extension"]} +{"id": "shi-etal-2020-learn", "title": "Learn to Combine Linguistic and Symbolic Information for Table-based Fact Verification", "abstract": "Table-based fact verification is expected to perform both linguistic reasoning and symbolic reasoning. Existing methods lack attention to take advantage of the combination of linguistic information and symbolic information. In this work, we propose HeterTFV, a graph-based reasoning approach, that learns to combine linguistic information and symbolic information effectively. We first construct a program graph to encode programs, a kind of LISP-like logical form, to learn the semantic compositionality of the programs. Then we construct a heterogeneous graph to incorporate both linguistic information and symbolic information by introducing program nodes into the heterogeneous graph. Finally, we propose a graph-based reasoning approach to reason over the multiple types of nodes to make an effective combination of both types of information. Experimental results on a large-scale benchmark dataset TABFACT illustrate the effect of our approach.", "keyphrases": ["symbolic information", "table-based fact verification", "reasoning"]} +{"id": "liao-etal-2018-abstract", "title": "Abstract Meaning Representation for Multi-Document Summarization", "abstract": "Generating an abstract from a collection of documents is a desirable capability for many real-world applications. However, abstractive approaches to multi-document summarization have not been thoroughly investigated. This paper studies the feasibility of using Abstract Meaning Representation (AMR), a semantic representation of natural language grounded in linguistic theory, as a form of content representation. Our approach condenses source documents to a set of summary graphs following the AMR formalism. The summary graphs are then transformed to a set of summary sentences in a surface realization step. The framework is fully data-driven and flexible. Each component can be optimized independently using small-scale, in-domain training data. We perform experiments on benchmark summarization datasets and report promising results. We also describe opportunities and challenges for advancing this line of research.", "keyphrases": ["summarization", "amr", "abstract meaning representation"]} +{"id": "bhattacharyya-2010-indowordnet", "title": "IndoWordNet", "abstract": "India is a multilingual country where machine translation and cross lingual search are highly relevant problems. These problems require large resources- like wordnets and lexicons- of high quality and coverage. Wordnets are lexical structures composed of synsets and semantic relations. Synsets are sets of synonyms. They are linked by semantic relations like hypernymy (is-a), meronymy (part-of), troponymy (manner-of) etc. IndoWordnet is a linked structure of wordnets of major Indian languages from Indo-Aryan, Dravidian and Sino-Tibetan families. These wordnets have been created by following the expansion approach from Hindi wordnet which was made available free for research in 2006. Since then a number of Indian languages have been creating their wordnets. In this paper we discuss the methodology, coverage, important considerations and multifarious benefits of IndoWordnet. Case studies are provided for Marathi, Sanskrit, Bodo and Telugu, to bring out the basic methodology of and challenges involved in the expansion approach. The guidelines the lexicographers follow for wordnet construction are enumerated. The difference between IndoWordnet and EuroWordnet also is discussed.", "keyphrases": ["indian language", "dravidian", "expansion approach", "lexicographer", "indowordnet"]} +{"id": "sapena-etal-2010-relaxcor", "title": "RelaxCor: A Global Relaxation Labeling Approach to Coreference Resolution", "abstract": "This paper describes the participation of RelaxCor in the Semeval-2010 task number 1: \"Coreference Resolution in Multiple Languages\". RelaxCor is a constraint-based graph partitioning approach to coreference resolution solved by relaxation labeling. The approach combines the strengths of groupwise classifiers and chain formation methods in one global method.", "keyphrases": ["coreference resolution", "constraint-based graph", "relaxcor"]} +{"id": "kwiatkowski-etal-2011-lexical", "title": "Lexical Generalization in CCG Grammar Induction for Semantic Parsing", "abstract": "We consider the problem of learning factored probabilistic CCG grammars for semantic parsing from data containing sentences paired with logical-form meaning representations. Traditional CCG lexicons list lexical items that pair words and phrases with syntactic and semantic content. Such lexicons can be inefficient when words appear repeatedly with closely related lexical content. In this paper, we introduce factored lexicons, which include both lexemes to model word meaning and templates to model systematic variation in word usage. We also present an algorithm for learning factored CCG lexicons, along with a probabilistic parse-selection model. Evaluations on benchmark datasets demonstrate that the approach learns highly accurate parsers, whose generalization performance benefits greatly from the lexical factoring.", "keyphrases": ["semantic parsing", "lexeme", "template"]} +{"id": "kumar-etal-2020-data", "title": "Data Augmentation using Pre-trained Transformer Models", "abstract": "Language model based pre-trained models such as BERT have provided significant gains across different NLP tasks. In this paper, we study different types of transformer based pre-trained models such as auto-regressive models (GPT-2), auto-encoder models (BERT), and seq2seq models (BART) for conditional data augmentation. We show that prepending the class labels to text sequences provides a simple yet effective way to condition the pre-trained models for data augmentation. Additionally, on three classification benchmarks, pre-trained Seq2Seq model outperforms other data augmentation methods in a low-resource setting. Further, we explore how different pre-trained model based data augmentation differs in-terms of data diversity, and how well such methods preserve the class-label information.", "keyphrases": ["transformer", "different type", "data augmentation", "pre-trained language model"]} +{"id": "visweswariah-etal-2010-urdu", "title": "Urdu and Hindi: Translation and sharing of linguistic resources", "abstract": "Hindi and Urdu share a common phonology, morphology and grammar but are written in different scripts. In addition, the vocabularies have also diverged significantly especially in the written form. In this paper we show that we can get reasonable quality translations (we estimated the Translation Error rate at 18%) between the two languages even in absence of a parallel corpus. Linguistic resources such as treebanks, part of speech tagged data and parallel corpora with English are limited for both these languages. We use the translation system to share linguistic resources between the two languages. We demonstrate improvements on three tasks and show: statistical machine translation from Urdu to English is improved (0.8 in BLEU score) by using a Hindi-English parallel corpus, Hindi part of speech tagging is improved (upto 6% absolute) by using an Urdu part of speech corpus and a Hindi-English word aligner is improved by using a manually word aligned Urdu-English corpus (upto 9% absolute in F-Measure).", "keyphrases": ["hindi", "linguistic resource", "urdu"]} +{"id": "mani-etal-2008-spatialml", "title": "SpatialML: Annotation Scheme, Corpora, and Tools", "abstract": "SpatialML is an annotation scheme for marking up references to places in natural language. It covers both named and nominal references to places, grounding them where possible with geo-coordinates, including both relative and absolute locations, and characterizes relationships among places in terms of a region calculus. A freely available annotation editor has been developed for SpatialML, along with a corpus of annotated documents released by the Linguistic Data Consortium. Inter-annotator agreement on SpatialML is 77.0 F-measure for extents on that corpus. An automatic tagger for SpatialML extents scores 78.5 F-measure. A disambiguator scores 93.0 F-measure and 93.4 Predictive Accuracy. In adapting the extent tagger to new domains, merging the training data from the above corpus with annotated data in the new domain provides the best performance.", "keyphrases": ["annotation scheme", "nominal reference", "geo-coordinate", "region", "spatialml"]} +{"id": "morishita-etal-2020-jparacrawl", "title": "JParaCrawl: A Large Scale Web-Based English-Japanese Parallel Corpus", "abstract": "Recent machine translation algorithms mainly rely on parallel corpora. However, since the availability of parallel corpora remains limited, only some resource-rich language pairs can benefit from them. We constructed a parallel corpus for English-Japanese, for which the amount of publicly available parallel corpora is still limited. We constructed the parallel corpus by broadly crawling the web and automatically aligning parallel sentences. Our collected corpus, called JParaCrawl, amassed over 8.7 million sentence pairs. We show how it includes a broader range of domains and how a neural machine translation model trained with it works as a good pre-trained model for fine-tuning specific domains. The pre-training and fine-tuning approaches achieved or surpassed performance comparable to model training from the initial state and reduced the training time. Additionally, we trained the model with an in-domain dataset and JParaCrawl to show how we achieved the best performance with them. JParaCrawl and the pre-trained models are freely available online for research purposes.", "keyphrases": ["english-japanese", "parallel corpus", "web", "jparacrawl"]} +{"id": "mourad-darwish-2013-subjectivity", "title": "Subjectivity and Sentiment Analysis of Modern Standard Arabic and Arabic Microblogs", "abstract": "Though much research has been conducted on Subjectivity and Sentiment Analysis (SSA) during the last decade, little work has focused on Arabic. In this work, we focus on SSA for both Modern Standard Arabic (MSA) news articles and dialectal Arabic microblogs from Twitter. We showcase some of the challenges associated with SSA on microblogs. We adopted a random graph walk approach to extend the Arabic SSA lexicon using ArabicEnglish phrase tables, leading to improvements for SSA on Arabic microblogs. We used different features for both subjectivity and sentiment classification including stemming, part-of-speech tagging, as well as tweet specific features. Our classification features yield results that surpass Arabic SSA results in the literature.", "keyphrases": ["sentiment analysis", "modern standard arabic", "subjectivity", "arabic tweet"]} +{"id": "nekoto-etal-2020-participatory", "title": "Participatory Research for Low-resourced Machine Translation: A Case Study in African Languages", "abstract": "Research in NLP lacks geographic diversity, and the question of how NLP can be scaled to low-resourced languages has not yet been adequately solved. `Low-resourced'-ness is a complex problem going beyond data availability and reflects systemic problems in society. In this paper, we focus on the task of Machine Translation (MT), that plays a crucial role for information accessibility and communication worldwide. Despite immense improvements in MT over the past decade, MT is centered around a few high-resourced languages. As MT researchers cannot solve the problem of low-resourcedness alone, we propose participatory research as a means to involve all necessary agents required in the MT development process. We demonstrate the feasibility and scalability of participatory research with a case study on MT for African languages. Its implementation leads to a collection of novel translation datasets, MT benchmarks for over 30 languages, with human evaluations for a third of them, and enables participants without formal training to make a unique scientific contribution. Benchmarks, models, data, code, and evaluation results are released at .", "keyphrases": ["machine translation", "african languages", "participatory research"]} +{"id": "durrett-klein-2014-joint", "title": "A Joint Model for Entity Analysis: Coreference, Typing, and Linking", "abstract": "We present a joint model of three core tasks in the entity analysis stack: coreference resolution (within-document clustering), named entity recognition (coarse semantic typing), and entity linking (matching to Wikipedia entities). Our model is formally a structured conditional random field. Unary factors encode local features from strong baselines for each task. We then add binary and ternary factors to capture cross-task interactions, such as the constraint that coreferent mentions have the same semantic type. On the ACE 2005 and OntoNotes datasets, we achieve state-of-the-art results for all three tasks. Moreover, joint modeling improves performance on each task over strong independent baselines.", "keyphrases": ["joint model", "coreference", "entity linking", "crf model", "exception"]} +{"id": "wang-etal-2011-detection", "title": "Detection of Agreement and Disagreement in Broadcast Conversations", "abstract": "We present Conditional Random Fields based approaches for detecting agreement/disagreement between speakers in English broadcast conversation shows. We develop annotation approaches for a variety of linguistic phenomena. Various lexical, structural, durational, and prosodic features are explored. We compare the performance when using features extracted from automatically generated annotations against that when using human annotations. We investigate the efficacy of adding prosodic features on top of lexical, structural, and durational features. Since the training data is highly imbalanced, we explore two sampling approaches, random downsampling and ensemble downsampling. Overall, our approach achieves 79.2% (precision), 50.5% (recall), 61.7% (F1) for agreement detection and 69.2% (precision), 46.9% (recall), and 55.9% (F1) for disagreement detection, on the English broadcast conversation data.", "keyphrases": ["agreement", "broadcast conversation", "detection"]} +{"id": "pitler-nenkova-2008-revisiting", "title": "Revisiting Readability: A Unified Framework for Predicting Text Quality", "abstract": "We combine lexical, syntactic, and discourse features to produce a highly predictive model of human readers' judgments of text readability. This is the first study to take into account such a variety of linguistic factors and the first to empirically demonstrate that discourse relations are strongly associated with the perceived quality of text. We show that various surface metrics generally expected to be related to readability are not very good predictors of readability judgments in our Wall Street Journal corpus. We also establish that readability predictors behave differently depending on the task: predicting text readability or ranking the readability. Our experiments indicate that discourse relations are the one class of features that exhibits robustness across these two tasks.", "keyphrases": ["readability", "factor", "discourse relation", "essay scoring", "assessment"]} +{"id": "nguyen-etal-2016-joint-event", "title": "Joint Event Extraction via Recurrent Neural Networks", "abstract": "Event extraction is a particularly challenging problem in information extraction. The state-of-the-art models for this problem have either applied convolutional neural networks in a pipelined framework (Chen et al., 2015) or followed the joint architecture via structured prediction with rich local and global features (Li et al., 2013). The former is able to learn hidden feature representations automatically from data based on the continuous and generalized representations of words. The latter, on the other hand, is capable of mitigating the error propagation problem of the pipelined approach and exploiting the inter-dependencies between event triggers and argument roles via discrete structures. In this work, we propose to do event extraction in a joint framework with bidirectional recurrent neural networks, thereby bene\ufb01ting from the advantages of the two models as well as addressing issues inherent in the existing approaches. We systematically investigate different memory features for the joint model and demonstrate that the proposed model achieves the state-of-the-art performance on the ACE 2005 dataset.", "keyphrases": ["event extraction", "recurrent neural networks", "trigger", "argument role", "joint model"]} +{"id": "greene-resnik-2009-words", "title": "More than Words: Syntactic Packaging and Implicit Sentiment", "abstract": "Work on sentiment analysis often focuses on the words and phrases that people use in overtly opinionated text. In this paper, we introduce a new approach to the problem that focuses not on lexical indicators, but on the syntactic \"packaging\" of ideas, which is well suited to investigating the identification of implicit sentiment, or perspective. We establish a strong predictive connection between linguistically well motivated features and implicit sentiment, and then show how computational approximations of these features can be used to improve on existing state-of-the-art sentiment classification results.", "keyphrases": ["packaging", "implicit sentiment", "perspective", "syntactic representation", "text classification task"]} +{"id": "popovic-ney-2011-towards", "title": "Towards Automatic Error Analysis of Machine Translation Output", "abstract": "Evaluation and error analysis of machine translation output are important but difficult tasks. In this article, we propose a framework for automatic error analysis and classification based on the identification of actual erroneous words using the algorithms for computation of Word Error Rate (WER) and Position-independent word Error Rate (PER), which is just a very first step towards development of automatic evaluation measures that provide more specific information of certain translation problems. The proposed approach enables the use of various types of linguistic knowledge in order to classify translation errors in many different ways. This work focuses on one possible set-up, namely, on five error categories: inflectional errors, errors due to wrong word order, missing words, extra words, and incorrect lexical choices. For each of the categories, we analyze the contribution of various POS classes. We compared the results of automatic error analysis with the results of human error analysis in order to investigate two possible applications: estimating the contribution of each error type in a given translation output in order to identify the main sources of errors for a given translation system, and comparing different translation outputs using the introduced error categories in order to obtain more information about advantages and disadvantages of different systems and possibilites for improvements, as well as about advantages and disadvantages of applied methods for improvements. We used Arabic\u2013English Newswire and Broadcast News and Chinese\u2013English Newswire outputs created in the framework of the GALE project, several Spanish and English European Parliament outputs generated during the TC-Star project, and three German\u2013English outputs generated in the framework of the fourth Machine Translation Workshop. We show that our results correlate very well with the results of a human error analysis, and that all our metrics except the extra words reflect well the differences between different versions of the same translation system as well as the differences between different translation systems.", "keyphrases": ["automatic error analysis", "machine translation output", "inflectional error"]} +{"id": "li-etal-2004-joint", "title": "A Joint Source-Channel Model for Machine Transliteration", "abstract": "Most foreign names are transliterated into Chinese, Japanese or Korean with approximate phonetic equivalents. The transliteration is usually achieved through intermediate phonemic mapping. This paper presents a new framework that allows direct orthographical mapping (DOM) between two different languages, through a joint source-channel model, also called n-gram transliteration model (TM). With the n-gram TM model, we automate the orthographic alignment process to derive the aligned transliteration units from a bilingual dictionary. The n-gram TM under the DOM framework greatly reduces system development effort and provides a quantum leap in improvement in transliteration accuracy over that of other state-of-the-art machine learning algorithms. The modeling framework is validated through several experiments for English-Chinese language pair.", "keyphrases": ["joint source-channel model", "machine transliteration", "orthographic mapping", "source language", "phoneme-based method"]} +{"id": "lee-etal-2011-modeling", "title": "Modeling Syntactic Context Improves Morphological Segmentation", "abstract": "The connection between part-of-speech (POS) categories and morphological properties is well-documented in linguistics but underutilized in text processing systems. This paper proposes a novel model for morphological segmentation that is driven by this connection. Our model learns that words with common affixes are likely to be in the same syntactic category and uses learned syntactic categories to refine the segmentation boundaries of words. Our results demonstrate that incorporating POS categorization yields substantial performance gains on morphological segmentation of Arabic.", "keyphrases": ["syntactic context", "morphological segmentation", "parametric bayesian model"]} +{"id": "klein-etal-2017-opennmt", "title": "OpenNMT: Open-Source Toolkit for Neural Machine Translation", "abstract": "We describe an open-source toolkit for neural machine translation (NMT). The toolkit prioritizes efficiency, modularity, and extensibility with the goal of supporting NMT research into model architectures, feature representations, and source modalities, while maintaining competitive performance and reasonable training requirements. The toolkit consists of modeling and translation support, as well as detailed pedagogical documentation about the underlying techniques.", "keyphrases": ["open-source toolkit", "neural machine translation", "extensibility", "end"]} +{"id": "chen-etal-2015-lifelong", "title": "Lifelong Learning for Sentiment Classification", "abstract": "This paper proposes a novel lifelong learning (LL) approach to sentiment classification. LL mimics the human continuous learning process, i.e., retaining the knowledge learned from past tasks and use it to help future learning. In this paper, we first discuss LL in general and then LL for sentiment classification in particular. The proposed LL approach adopts a Bayesian optimization framework based on stochastic gradient descent. Our experimental results show that the proposed method outperforms baseline methods significantly, which demonstrates that lifelong learning is a promising research direction.", "keyphrases": ["sentiment classification", "lifelong learning", "task learning"]} +{"id": "elgohary-etal-2020-speak", "title": "Speak to your Parser: Interactive Text-to-SQL with Natural Language Feedback", "abstract": "We study the task of semantic parse correction with natural language feedback. Given a natural language utterance, most semantic parsing systems pose the problem as one-shot translation where the utterance is mapped to a corresponding logical form. In this paper, we investigate a more interactive scenario where humans can further interact with the system by providing free-form natural language feedback to correct the system when it generates an inaccurate interpretation of an initial utterance. We focus on natural language to SQL systems and construct, SPLASH, a dataset of utterances, incorrect SQL interpretations and the corresponding natural language feedback. We compare various reference models for the correction task and show that incorporating such a rich form of feedback can significantly improve the overall semantic parsing accuracy while retaining the flexibility of natural language interaction. While we estimated human correction accuracy is 81.5%, our best model achieves only 25.1%, which leaves a large gap for improvement in future research. SPLASH is publicly available at .", "keyphrases": ["text-to-sql", "natural language feedback", "semantic parsing"]} +{"id": "shang-etal-2018-learning", "title": "Learning Named Entity Tagger using Domain-Specific Dictionary", "abstract": "Recent advances in deep neural models allow us to build reliable named entity recognition (NER) systems without handcrafting features. However, such methods require large amounts of manually-labeled training data. There have been efforts on replacing human annotations with distant supervision (in conjunction with external dictionaries), but the generated noisy labels pose significant challenges on learning effective neural models. Here we propose two neural models to suit noisy distant supervision from the dictionary. First, under the traditional sequence labeling framework, we propose a revised fuzzy CRF layer to handle tokens with multiple possible labels. After identifying the nature of noisy labels in distant supervision, we go beyond the traditional framework and propose a novel, more effective neural model AutoNER with a new Tie or Break scheme. In addition, we discuss how to refine distant supervision for better NER performance. Extensive experiments on three benchmark datasets demonstrate that AutoNER achieves the best performance when only using dictionaries with no additional human effort, and delivers competitive results with state-of-the-art supervised benchmarks.", "keyphrases": ["dictionary", "entity recognition", "knowledge basis"]} +{"id": "currey-etal-2017-copied", "title": "Copied Monolingual Data Improves Low-Resource Neural Machine Translation", "abstract": "We train a neural machine translation (NMT) system to both translate source-language text and copy target-language text, thereby exploiting monolingual corpora in the target language. Speci\ufb01cally, we create a bitext from the monolingual text in the target language so that each source sentence is identical to the target sentence. This copied data is then mixed with the parallel corpus and the NMT system is trained like normal, with no metadata to distinguish the two input languages. Our proposed method proves to be an effective way of incorporating monolingual data into low-resource NMT. see gains of up to 1.2 BLEU over a strong baseline with back-translation. Further analysis shows that the linguis-tic phenomena behind these gains are different from and largely orthogonal to back-translation, with our copied corpus method improving accuracy on named entities and other words that should remain identical between the source and target languages.", "keyphrases": ["monolingual data", "neural machine translation", "target sentence", "back-translation"]} +{"id": "park-etal-2018-reducing", "title": "Reducing Gender Bias in Abusive Language Detection", "abstract": "Abusive language detection models tend to have a problem of being biased toward identity words of a certain group of people because of imbalanced training datasets. For example, \u201cYou are a good woman\u201d was considered \u201csexist\u201d when trained on an existing dataset. Such model bias is an obstacle for models to be robust enough for practical use. In this work, we measure them on models trained with different datasets, while analyzing the effect of different pre-trained word embeddings and model architectures. We also experiment with three mitigation methods: (1) debiased word embeddings, (2) gender swap data augmentation, and (3) fine-tuning with a larger corpus. These methods can effectively reduce model bias by 90-98% and can be extended to correct model bias in other scenarios.", "keyphrases": ["gender bias", "abusive language detection", "hate speech"]} +{"id": "lei-etal-2015-high", "title": "High-Order Low-Rank Tensors for Semantic Role Labeling", "abstract": "This paper introduces a tensor-based approach to semantic role labeling (SRL). The motivation behind the approach is to automatically induce a compact feature representation for words and their relations, tailoring them to the task. In this sense, our dimensionality reduction method provides a clear alternative to the traditional feature engineering approach used in SRL. To capture meaningful interactions between the argument, predicate, their syntactic path and the corresponding role label, we compress each feature representation first to a lower dimensional space prior to assessing their interactions. This corresponds to using an overall cross-product feature representation and maintaining associated parameters as a four-way low-rank tensor. The tensor parameters are optimized for the SRL performance using standard online algorithms. Our tensor-based approach rivals the best performing system on the CoNLL-2009 shared task. In addition, we demonstrate that adding the representation tensor to a competitive tensorfree model yields 2% absolute increase in Fscore. 1", "keyphrases": ["tensor", "semantic role labeling", "feature representation"]} +{"id": "peng-etal-2017-cross", "title": "Cross-Sentence N-ary Relation Extraction with Graph LSTMs", "abstract": "Past work in relation extraction has focused on binary relations in single sentences. Recent NLP inroads in high-value domains have sparked interest in the more general setting of extracting n-ary relations that span multiple sentences. In this paper, we explore a general relation extraction framework based on graph long short-term memory networks (graph LSTMs) that can be easily extended to cross-sentence n-ary relation extraction. The graph formulation provides a unified way of exploring different LSTM approaches and incorporating various intra-sentential and inter-sentential dependencies, such as sequential, syntactic, and discourse relations. A robust contextual representation is learned for the entities, which serves as input to the relation classifier. This simplifies handling of relations with arbitrary arity, and enables multi-task learning with related relations. We evaluate this framework in two important precision medicine settings, demonstrating its effectiveness with both conventional supervised learning and distant supervision. Cross-sentence extraction produced larger knowledge bases. and multi-task learning significantly improved extraction accuracy. A thorough analysis of various LSTM approaches yielded useful insight the impact of linguistic analysis on extraction accuracy.", "keyphrases": ["n-ary relation extraction", "graph lstm", "inter-sentential dependency", "sentence boundary"]} +{"id": "eljundi-etal-2019-hulmona", "title": "hULMonA: The Universal Language Model in Arabic", "abstract": "Arabic is a complex language with limited resources which makes it challenging to produce accurate text classification tasks such as sentiment analysis. The utilization of transfer learning (TL) has recently shown promising results for advancing accuracy of text classification in English. TL models are pre-trained on large corpora, and then fine-tuned on task-specific datasets. In particular, universal language models (ULMs), such as recently developed BERT, have achieved state-of-the-art results in various NLP tasks in English. In this paper, we hypothesize that similar success can be achieved for Arabic. The work aims at supporting the hypothesis by developing the first Universal Language Model in Arabic (hULMonA - \u062d\u0644\u0645\u0646\u0627 meaning our dream), demonstrating its use for Arabic classifications tasks, and demonstrating how a pre-trained multi-lingual BERT can also be used for Arabic. We then conduct a benchmark study to evaluate both ULM successes with Arabic sentiment analysis. Experiment results show that the developed hULMonA and multi-lingual ULM are able to generalize well to multiple Arabic data sets and achieve new state of the art results in Arabic Sentiment Analysis for some of the tested sets.", "keyphrases": ["universal language model", "arabic", "hulmona"]} +{"id": "warstadt-etal-2020-blimp-benchmark", "title": "BLiMP: The Benchmark of Linguistic Minimal Pairs for English", "abstract": "We introduce The Benchmark of Linguistic Minimal Pairs (BLiMP),1 a challenge set for evaluating the linguistic knowledge of language models (LMs) on major grammatical phenomena in English. BLiMP consists of 67 individual datasets, each containing 1,000 minimal pairs\u2014that is, pairs of minimally different sentences that contrast in grammatical acceptability and isolate specific phenomenon in syntax, morphology, or semantics. We generate the data according to linguist-crafted grammar templates, and human aggregate agreement with the labels is 96.4%. We evaluate n-gram, LSTM, and Transformer (GPT-2 and Transformer-XL) LMs by observing whether they assign a higher probability to the acceptable sentence in each minimal pair. We find that state-of-the-art models identify morphological contrasts related to agreement reliably, but they struggle with some subtle semantic and syntactic phenomena, such as negative polarity items and extraction islands.", "keyphrases": ["linguistic minimal pairs", "language model", "phenomena", "negative polarity item", "blimp"]} +{"id": "rastogi-etal-2018-multi", "title": "Multi-task Learning for Joint Language Understanding and Dialogue State Tracking", "abstract": "This paper presents a novel approach for multi-task learning of language understanding (LU) and dialogue state tracking (DST) in task-oriented dialogue systems. Multi-task training enables the sharing of the neural network layers responsible for encoding the user utterance for both LU and DST and improves performance while reducing the number of network parameters. In our proposed framework, DST operates on a set of candidate values for each slot that has been mentioned so far. These candidate sets are generated using LU slot annotations for the current user utterance, dialogue acts corresponding to the preceding system utterance and the dialogue state estimated for the previous turn, enabling DST to handle slots with a large or unbounded set of possible values and deal with slot values not seen during training. Furthermore, to bridge the gap between training and inference, we investigate the use of scheduled sampling on LU output for the current user utterance as well as the DST output for the preceding turn.", "keyphrases": ["language understanding", "dialogue state tracking", "multi-task learning"]} +{"id": "sinha-2009-mining", "title": "Mining Complex Predicates In Hindi Using A Parallel Hindi-English Corpus", "abstract": "Complex predicate is a noun, a verb, an adjective or an adverb followed by a light verb that behaves as a single unit of verb. Complex predicates (CPs) are abundantly used in Hindi and other languages of Indo Aryan family. Detecting and interpreting CPs constitute an important and somewhat a difficult task. The linguistic and statistical methods have yielded limited success in mining this data. In this paper, we present a simple method for detecting CPs of all kinds using a Hindi-English parallel corpus. A CP is hypothesized by detecting absence of the conventional meaning of the light verb in the aligned English sentence. This simple strategy exploits the fact that CP is a multiword expression with a meaning that is distinct from the meaning of the light verb. Although there are several shortcomings in the methodology, this empirical method surprisingly yields mining of CPs with an average precision of 89% and a recall of 90%.", "keyphrases": ["complex predicate", "hindi", "noun", "hindi-english parallel corpus"]} +{"id": "simard-etal-2007-statistical", "title": "Statistical Phrase-Based Post-Editing", "abstract": "We propose to use a statistical phrasebased machine translation system in a post-editing task: the system takes as input raw machine translation output (from a commercial rule-based MT system), and produces post-edited target-language text. We report on experiments that were performed on data collected in precisely such a setting: pairs of raw MT output and their manually post-edited versions. In our evaluation, the output of our automatic post-editing (APE) system is not only better quality than the rule-based MT (both in terms of the BLEU and TER metrics), it is also better than the output of a stateof-the-art phrase-based MT system used in standalone translation mode. These results indicate that automatic post-editing constitutes a simple and efcient way of combining rule-based and statistical MT technologies.", "keyphrases": ["post-editing", "machine translation", "ape system", "phrase-based smt", "repetitive nature"]} +{"id": "xiao-etal-2019-label", "title": "Label-Specific Document Representation for Multi-Label Text Classification", "abstract": "Multi-label text classification (MLTC) aims to tag most relevant labels for the given document. In this paper, we propose a Label-Specific Attention Network (LSAN) to learn a label-specific document representation. LSAN takes advantage of label semantic information to determine the semantic connection between labels and document for constructing label-specific document representation. Meanwhile, the self-attention mechanism is adopted to identify the label-specific document representation from document content information. In order to seamlessly integrate the above two parts, an adaptive fusion strategy is proposed, which can effectively output the comprehensive label-specific document representation to build multi-label text classifier. Extensive experimental results demonstrate that LSAN consistently outperforms the state-of-the-art methods on four different datasets, especially on the prediction of low-frequency labels. The code and hyper-parameter settings are released to facilitate other researchers.", "keyphrases": ["document representation", "multi-label text classification", "label-specific attention network"]} +{"id": "roth-etal-2008-arabic", "title": "Arabic Morphological Tagging, Diacritization, and Lemmatization Using Lexeme Models and Feature Ranking", "abstract": "We investigate the tasks of general morphological tagging, diacritization, and lemmatization for Arabic. We show that for all tasks we consider, both modeling the lexeme explicitly, and retuning the weights of individual classifiers for the specific task, improve the performance.", "keyphrases": ["diacritization", "lemmatization", "arabic word"]} +{"id": "dahlmeier-ng-2011-correcting", "title": "Correcting Semantic Collocation Errors with L1-induced Paraphrases", "abstract": "We present a novel approach for automatic collocation error correction in learner English which is based on paraphrases extracted from parallel corpora. Our key assumption is that collocation errors are often caused by semantic similarity in the first language (L1-language) of the writer. An analysis of a large corpus of annotated learner English confirms this assumption. We evaluate our approach on real-world learner data and show that L1-induced paraphrases outperform traditional approaches based on edit distance, homophones, and WordNet synonyms.", "keyphrases": ["collocation error", "paraphrase", "semantic error correction"]} +{"id": "li-etal-2009-non", "title": "A Non-negative Matrix Tri-factorization Approach to Sentiment Classification with Lexical Prior Knowledge", "abstract": "Sentiment classification refers to the task of automatically identifying whether a given piece of text expresses positive or negative opinion towards a subject at hand. The proliferation of user-generated web content such as blogs, discussion forums and online review sites has made it possible to perform large-scale mining of public opinion. Sentiment modeling is thus becoming a critical component of market intelligence and social media technologies that aim to tap into the collective wisdom of crowds. In this paper, we consider the problem of learning high-quality sentiment models with minimal manual supervision. We propose a novel approach to learn from lexical prior knowledge in the form of domain-independent sentiment-laden terms, in conjunction with domain-dependent unlabeled data and a few labeled documents. Our model is based on a constrained non-negative tri-factorization of the term-document matrix which can be implemented using simple update rules. Extensive experimental studies demonstrate the effectiveness of our approach on a variety of real-world sentiment prediction tasks.", "keyphrases": ["non-negative matrix tri-factorization", "sentiment classification", "lexical prior knowledge"]} +{"id": "schroeder-etal-2009-word", "title": "Word Lattices for Multi-Source Translation", "abstract": "Multi-source statistical machine translation is the process of generating a single translation from multiple inputs. Previous work has focused primarily on selecting from potential outputs of separate translation systems, and solely on multi-parallel corpora and test sets. We demonstrate how multi-source translation can be adapted for multiple monolingual inputs. We also examine different approaches to dealing with multiple sources, including consensus decoding, and we present a novel method of input combination to generate lattices for multi-source translation within a single translation model.", "keyphrases": ["lattice", "multi-source translation", "paraphrase"]} +{"id": "niessen-ney-2004-statistical", "title": "Statistical Machine Translation with Scarce Resources Using Morpho-syntactic Information", "abstract": "In statistical machine translation, correspondences between the words in the source and the target language are learned from parallel corpora, and often little or no linguistic knowledge is used to structure the underlying models. In particular, existing statistical systems for machine translation often treat different inflected forms of the same lemma as if they were independent of one another. The bilingual training data can be better exploited by explicitly taking into account the interdependencies of related inflected forms. We propose the construction of hierarchical lexicon models on the basis of equivalence classes of words. In addition, we introduce sentence-level restructuring transformations which aim at the assimilation of word order in related sentences. We have systematically investigated the amount of bilingual training data required to maintain an acceptable quality of machine translation. The combination of the suggested methods for improving translation quality in frameworks with scarce resources has been successfully tested: We were able to reduce the amount of bilingual training data to less than 10 of the original corpus, while losing only 1.6 in translation quality. The improvement of the translation results is demonstrated on two German-English corpora taken from the Verbmobil task and the Nespole! task.", "keyphrases": ["machine translation", "scarce resource", "morpho-syntactic information", "idiomatic multi-word expression", "morphological analysis"]} +{"id": "toutanova-cherry-2009-global", "title": "A global model for joint lemmatization and part-of-speech prediction", "abstract": "We present a global joint model for lemmatization and part-of-speech prediction. Using only morphological lexicons and unlabeled data, we learn a partially-supervised part-of-speech tagger and a lemmatizer which are combined using features on a dynamically linked dependency structure of words. We evaluate our model on English, Bulgarian, Czech, and Slovene, and demonstrate substantial improvements over both a direct transduction approach to lemmatization and a pipelined approach, which predicts part-of-speech tags before lemmatization.", "keyphrases": ["lemmatization", "part-of-speech prediction", "joint model"]} +{"id": "chen-etal-2020-mixtext", "title": "MixText: Linguistically-Informed Interpolation of Hidden Space for Semi-Supervised Text Classification", "abstract": "This paper presents MixText, a semi-supervised learning method for text classification, which uses our newly designed data augmentation method called TMix. TMix creates a large amount of augmented training samples by interpolating text in hidden space. Moreover, we leverage recent advances in data augmentation to guess low-entropy labels for unlabeled data, hence making them as easy to use as labeled data. By mixing labeled, unlabeled and augmented data, MixText significantly outperformed current pre-trained and fined-tuned models and other state-of-the-art semi-supervised learning methods on several text classification benchmarks. The improvement is especially prominent when supervision is extremely limited. We have publicly released our code at .", "keyphrases": ["hidden space", "semi-supervised text classification", "tmix", "unlabeled data", "mixtext"]} +{"id": "naik-etal-2018-stress", "title": "Stress Test Evaluation for Natural Language Inference", "abstract": "Natural language inference (NLI) is the task of determining if a natural language hypothesis can be inferred from a given premise in a justifiable manner. NLI was proposed as a benchmark task for natural language understanding. Existing models perform well at standard datasets for NLI, achieving impressive results across different genres of text. However, the extent to which these models understand the semantic content of sentences is unclear. In this work, we propose an evaluation methodology consisting of automatically constructed \u201cstress tests\u201d that allow us to examine whether systems have the ability to make real inferential decisions. Our evaluation of six sentence-encoder models on these stress tests reveals strengths and weaknesses of these models with respect to challenging linguistic phenomena, and suggests important directions for future work in this area.", "keyphrases": ["natural language inference", "nli", "weakness", "stress test evaluation", "reasoning"]} +{"id": "guo-etal-2018-soft", "title": "Soft Layer-Specific Multi-Task Summarization with Entailment and Question Generation", "abstract": "An accurate abstractive summary of a document should contain all its salient information and should be logically entailed by the input document. We improve these important aspects of abstractive summarization via multi-task learning with the auxiliary tasks of question generation and entailment generation, where the former teaches the summarization model how to look for salient questioning-worthy details, and the latter teaches the model how to rewrite a summary which is a directed-logical subset of the input document. We also propose novel multi-task architectures with high-level (semantic) layer-specific sharing across multiple encoder and decoder layers of the three tasks, as well as soft-sharing mechanisms (and show performance ablations and analysis examples of each contribution). Overall, we achieve statistically significant improvements over the state-of-the-art on both the CNN/DailyMail and Gigaword datasets, as well as on the DUC-2002 transfer setup. We also present several quantitative and qualitative analysis studies of our model's learned saliency and entailment skills.", "keyphrases": ["summarization", "question generation", "auxiliary task", "entailment generation"]} +{"id": "dale-kilgarriff-2011-helping", "title": "Helping Our Own: The HOO 2011 Pilot Shared Task", "abstract": "The aim of the Helping Our Own (HOO) Shared Task is to promote the development of automated tools and techniques that can assist authors in the writing task, with a specific focus on writing within the natural language processing community. This paper reports on the results of a pilot run of the shared task, in which six teams participated. We describe the nature of the task and the data used, report on the results achieved, and discuss some of the things we learned that will guide future versions of the task.", "keyphrases": ["hoo", "shared task", "series", "learner", "non-native speaker"]} +{"id": "lu-etal-2018-neural", "title": "A neural interlingua for multilingual machine translation", "abstract": "We incorporate an explicit neural interlingua into a multilingual encoder-decoder neural machine translation (NMT) architecture. We demonstrate that our model learns a language-independent representation by performing direct zero-shot translation (without using pivot translation), and by using the source sentence embeddings to create an English Yelp review classifier that, through the mediation of the neural interlingua, can also classify French and German reviews. Furthermore, we show that, despite using a smaller number of parameters than a pairwise collection of bilingual NMT models, our approach produces comparable BLEU scores for each language pair in WMT15.", "keyphrases": ["interlingua", "machine translation", "recurrent layer"]} +{"id": "van-der-wees-etal-2017-dynamic", "title": "Dynamic Data Selection for Neural Machine Translation", "abstract": "Intelligent selection of training data has proven a successful technique to simultaneously increase training efficiency and translation performance for phrase-based machine translation (PBMT). With the recent increase in popularity of neural machine translation (NMT), we explore in this paper to what extent and how NMT can also benefit from data selection. While state-of-the-art data selection (Axelrod et al., 2011) consistently performs well for PBMT, we show that gains are substantially lower for NMT. Next, we introduce `dynamic data selection' for NMT, a method in which we vary the selected subset of training data between different training epochs. Our experiments show that the best results are achieved when applying a technique we call `gradual fine-tuning', with improvements up to +2.6 BLEU over the original data selection approach and up to +3.1 BLEU over a general baseline.", "keyphrases": ["data selection", "neural machine translation", "gradual fine-tuning", "language model", "in-domain sentence"]} +{"id": "han-etal-2012-geolocation", "title": "Geolocation Prediction in Social Media Data by Finding Location Indicative Words", "abstract": "Geolocation prediction is vital to geospatial applications like localised search and local event detection. Predominately, social media geolocation models are based on full text data, including common words with no geospatial dimension (e.g. today) and noisy strings (tmrw), potentially hampering prediction and leading to slower/more memory-intensive models. In this paper, we focus on finding location indicative words (LIWs) via feature selection, and establishing whether the reduced feature set boosts geolocation accuracy. Our results show that an information gain ratiobased approach surpasses other methods at LIW selection, outperforming state-of-the-art geolocation prediction methods by 10.6% in accuracy and reducing the mean and median of prediction error distance by 45km and 209km, respectively, on a public dataset. We further formulate notions of prediction confidence, and demonstrate that performance is even higher in cases where our model is more confident, striking a trade-off between accuracy and coverage. Finally, the identified LIWs reveal regional language differences, which could be potentially useful for lexicographers.", "keyphrases": ["location", "indicative word", "geolocation prediction"]} +{"id": "socher-etal-2013-parsing", "title": "Parsing with Compositional Vector Grammars", "abstract": "Natural language parsing has typically been done with small sets of discrete categories such as NP and VP, but this representation does not capture the full syntactic nor semantic richness of linguistic phrases, and attempts to improve on this by lexicalizing phrases or splitting categories only partly address the problem at the cost of huge feature spaces and sparseness. Instead, we introduce a Compositional Vector Grammar (CVG), which combines PCFGs with a syntactically untied recursive neural network that learns syntactico-semantic, compositional vector representations. The CVG improves the PCFG of the Stanford Parser by 3.8% to obtain an F1 score of 90.4%. It is fast to train and implemented approximately as an efficient reranker it is about 20% faster than the current Stanford factored parser. The CVG learns a soft notion of head words and improves performance on the types of ambiguities that require semantic information such as PP attachments.", "keyphrases": ["compositional vector grammar", "recursive neural network", "rnn", "node", "sentiment analysis"]} +{"id": "geiger-etal-2020-neural", "title": "Neural Natural Language Inference Models Partially Embed Theories of Lexical Entailment and Negation", "abstract": "We address whether neural models for Natural Language Inference (NLI) can learn the compositional interactions between lexical entailment and negation, using four methods: the behavioral evaluation methods of (1) challenge test sets and (2) systematic generalization tasks, and the structural evaluation methods of (3) probes and (4) interventions. To facilitate this holistic evaluation, we present Monotonicity NLI (MoNLI), a new naturalistic dataset focused on lexical entailment and negation. In our behavioral evaluations, we find that models trained on general-purpose NLI datasets fail systematically on MoNLI examples containing negation, but that MoNLI fine-tuning addresses this failure. In our structural evaluations, we look for evidence that our top-performing BERT-based model has learned to implement the monotonicity algorithm behind MoNLI. Probes yield evidence consistent with this conclusion, and our intervention experiments bolster this, showing that the causal dynamics of the model mirror the causal dynamics of this algorithm on subsets of MoNLI. This suggests that the BERT model at least partially embeds a theory of lexical entailment and negation at an algorithmic level.", "keyphrases": ["lexical entailment", "negation", "nli"]} +{"id": "sennrich-etal-2016-controlling", "title": "Controlling Politeness in Neural Machine Translation via Side Constraints", "abstract": "Many languages use honori\ufb01cs to express politeness, social distance, or the relative social status between the speaker and their ad-dressee(s). In machine translation from a language without honori\ufb01cs such as English, it is dif\ufb01cult to predict the appropriate honori\ufb01c, but users may want to control the level of politeness in the output. In this paper, we perform a pilot study to control honori\ufb01cs in neural machine translation (NMT) via side constraints , focusing on English \u2192 German. We show that by marking up the (English) source side of the training data with a feature that en-codes the use of honori\ufb01cs on the (German) target side, we can control the honori\ufb01cs produced at test time. Experiments show that the choice of honori\ufb01cs has a big impact on translation quality as measured by B LEU , and oracle experiments show that substantial im-provements are possible by constraining the translation to the desired level of politeness.", "keyphrases": ["politeness", "neural machine translation", "pronoun", "voice", "source text"]} +{"id": "pavlick-callison-burch-2016-simple", "title": "Simple PPDB: A Paraphrase Database for Simplification", "abstract": "We release the Simple Paraphrase Database, a subset of of the Paraphrase Database (PPDB) adapted for the task of text simplification. We train a supervised model to associate simplification scores with each phrase pair, producing rankings competitive with state-of-theart lexical simplification models. Our new simplification database contains 4.5 million paraphrase rules, making it the largest available resource for lexical simplification.", "keyphrases": ["ppdb", "paraphrase database", "simplification"]} +{"id": "galley-etal-2004-whats", "title": "What's in a translation rule?", "abstract": "Abstract : We propose a theory that gives formal semantics to word-level alignments defined over parallel corpora. We use our theory to introduce a linear algorithm that can be used to derive from word-aligned, parallel corpora the minimal set of syntactically motivated transformation rules that explain human translation data.", "keyphrases": ["translation rule", "parallel corpora", "syntax-based model"]} +{"id": "dahlmeier-etal-2013-building", "title": "Building a Large Annotated Corpus of Learner English: The NUS Corpus of Learner English", "abstract": "We describe the NUS Corpus of Learner English (NUCLE), a large, fully annotated corpus of learner English that is freely available for research purposes. The goal of the corpus is to provide a large data resource for the development and evaluation of grammatical error correction systems. Although NUCLE has been available for almost two years, there has been no reference paper that describes the corpus in detail. In this paper, we address this need. We describe the annotation schema and the data collection and annotation process of NUCLE. Most importantly, we report on an unpublished study of annotator agreement for grammatical error correction. Finally, we present statistics on the distribution of grammatical errors in the NUCLE corpus.", "keyphrases": ["learner english", "nus corpus", "grammatical error", "national university"]} +{"id": "cucerzan-brill-2004-spelling", "title": "Spelling Correction as an Iterative Process that Exploits the Collective Knowledge of Web Users", "abstract": "Logs of user queries to an internet search engine provide a large amount of implicit and explicit information about language. In this paper, we investigate their use in spelling correction of search queries, a task which poses many additional challenges beyond the traditional spelling correction problem. We present an approach that uses an iterative transformation of the input query strings into other strings that correspond to more and more likely queries according to statistics extracted from internet search query logs.", "keyphrases": ["iterative process", "search query", "spelling correction", "chinese", "trust dictionary"]} +{"id": "ponzetto-strube-2006-exploiting", "title": "Exploiting Semantic Role Labeling, WordNet and Wikipedia for Coreference Resolution", "abstract": "In this paper we present an extension of a machine learning based coreference resolution system which uses features induced from different semantic knowledge sources. These features represent knowledge mined from WordNet and Wikipedia, as well as information about semantic role labels. We show that semantic features indeed improve the performance on different referring expression types such as pronouns and common nouns.", "keyphrases": ["wikipedia", "coreference resolution", "knowledge source", "mention", "semantic similarity"]} +{"id": "bali-etal-2014-borrowing", "title": "\u201cI am borrowing ya mixing ?\u201d An Analysis of English-Hindi Code Mixing in Facebook", "abstract": "Code-Mixing is a frequently observed phenomenon in social media content generated by multi-lingual users. The processing of such data for linguistic analysis as well as computational modelling is challenging due to the linguistic complexity resulting from the nature of the mixing as well as the presence of non-standard variations in spellings and grammar, and transliteration. Our analysis shows the extent of Code-Mixing in English-Hindi data. The classification of Code-Mixed words based on frequency and linguistic typology underline the fact that while there are easily identifiable cases of borrowing and mixing at the two ends, a large majority of the words form a continuum in the middle, emphasizing the need to handle these at different levels for automatic processing of the data.", "keyphrases": ["mixing", "facebook", "spelling", "bilingual user", "code-mixed data"]} +{"id": "riloff-etal-2003-learning", "title": "Learning subjective nouns using extraction pattern bootstrapping", "abstract": "We explore the idea of creating a subjectivity classifier that uses lists of subjective nouns learned by bootstrapping algorithms. The goal of our research is to develop a system that can distinguish subjective sentences from objective sentences. First, we use two bootstrapping algorithms that exploit extraction patterns to learn sets of subjective nouns. Then we train a Naive Bayes classifier using the subjective nouns, discourse features, and subjectivity clues identified in prior research. The bootstrapping algorithms learned over 1000 subjective nouns, and the subjectivity classifier performed well, achieving 77% recall with 81% precision.", "keyphrases": ["noun", "extraction pattern", "self-training", "newswire text", "previous research"]} +{"id": "tsai-etal-2019-multimodal", "title": "Multimodal Transformer for Unaligned Multimodal Language Sequences", "abstract": "Human language is often multimodal, which comprehends a mixture of natural language, facial gestures, and acoustic behaviors. However, two major challenges in modeling such multimodal human language time-series data exist: 1) inherent data non-alignment due to variable sampling rates for the sequences from each modality; and 2) long-range dependencies between elements across modalities. In this paper, we introduce the Multimodal Transformer (MulT) to generically address the above issues in an end-to-end manner without explicitly aligning the data. At the heart of our model is the directional pairwise crossmodal attention, which attends to interactions between multimodal sequences across distinct time steps and latently adapt streams from one modality to another. Comprehensive experiments on both aligned and non-aligned multimodal time-series show that our model outperforms state-of-the-art methods by a large margin. In addition, empirical analysis suggests that correlated crossmodal signals are able to be captured by the proposed crossmodal attention mechanism in MulT.", "keyphrases": ["mult", "multimodal transformer", "cross-modal attention", "emotion"]} +{"id": "liu-etal-2020-multilingual-denoising", "title": "Multilingual Denoising Pre-training for Neural Machine Translation", "abstract": "This paper demonstrates that multilingual denoising pre-training produces significant performance gains across a wide variety of machine translation (MT) tasks. We present mBART\u2014a sequence-to-sequence denoising auto-encoder pre-trained on large-scale monolingual corpora in many languages using the BART objective (Lewis et al., 2019). mBART is the first method for pre-training a complete sequence-to-sequence model by denoising full texts in multiple languages, whereas previous approaches have focused only on the encoder, decoder, or reconstructing parts of the text. Pre-training a complete model allows it to be directly fine-tuned for supervised (both sentence-level and document-level) and unsupervised machine translation, with no task- specific modifications. We demonstrate that adding mBART initialization produces performance gains in all but the highest-resource settings, including up to 12 BLEU points for low resource MT and over 5 BLEU points for many document-level and unsupervised models. We also show that it enables transfer to language pairs with no bi-text or that were not in the pre-training corpus, and present extensive analysis of which factors contribute the most to effective pre-training.1", "keyphrases": ["neural machine translation", "multilingual denoising", "language model", "pre-trained model", "encoder-decoder model"]} +{"id": "liu-etal-2019-inoculation", "title": "Inoculation by Fine-Tuning: A Method for Analyzing Challenge Datasets", "abstract": "Several datasets have recently been constructed to expose brittleness in models trained on existing benchmarks. While model performance on these challenge datasets is significantly lower compared to the original benchmark, it is unclear what particular weaknesses they reveal. For example, a challenge dataset may be difficult because it targets phenomena that current models cannot capture, or because it simply exploits blind spots in a model's specific training set. We introduce inoculation by fine-tuning, a new analysis method for studying challenge datasets by exposing models (the metaphorical patient) to a small amount of data from the challenge dataset (a metaphorical pathogen) and assessing how well they can adapt. We apply our method to analyze the NLI \u201cstress tests\u201d (Naik et al., 2018) and the Adversarial SQuAD dataset (Jia and Liang, 2017). We show that after slight exposure, some of these datasets are no longer challenging, while others remain difficult. Our results indicate that failures on challenge datasets may lead to very different conclusions about models, training datasets, and the challenge datasets themselves.", "keyphrases": ["fine-tuning", "failure", "inoculation"]} +{"id": "solorio-etal-2014-overview", "title": "Overview for the First Shared Task on Language Identification in Code-Switched Data", "abstract": "We present an overview of the \ufb01rst shared task on language identi\ufb01cation on code-switched data. The shared task included code-switched data from four language pairs: Modern Standard Arabic-Dialectal Arabic (MSA-DA), Mandarin-English (MAN-EN), Nepali-English (NEP-EN), and Spanish-English (SPA-EN). A total of seven teams participated in the task and submitted 42 system runs. The evaluation showed that language identi\ufb01cation at the token level is more dif\ufb01cult when the languages present are closely related, as in the case of MSA-DA, where the prediction performance was the lowest among all language pairs. In contrast, the language pairs with the higest F-measure where SPA-EN and NEP-EN. The task made evident that language identi\ufb01cation in code-switched data is still far from solved and warrants further research.", "keyphrases": ["language identification", "code-switched data", "codeswitched data", "code-switched text"]} +{"id": "clark-curran-2004-parsing", "title": "Parsing the WSJ Using CCG and Log-Linear Models", "abstract": "This paper describes and evaluates log-linear parsing models for Combinatory Categorial Grammar (CCG). A parallel implementation of the L-BFGS optimisation algorithm is described, which runs on a Beowulf cluster allowing the complete Penn Treebank to be used for estimation. We also develop a new efficient parsing algorithm for CCG which maximises expected recall of dependencies. We compare models which use all CCG derivations, including non-standard derivations, with normal-form models. The performances of the two models are comparable and the results are competitive with existing wide-coverage CCG parsers.", "keyphrases": ["ccg", "combinatory categorial grammar", "derivation"]} +{"id": "vyas-etal-2014-pos", "title": "POS Tagging of English-Hindi Code-Mixed Social Media Content", "abstract": "Code-mixing is frequently observed in user generated content on social media, especially from multilingual users. The linguistic complexity of such content is compounded by presence of spelling variations, transliteration and non-adherance to formal grammar. We describe our initial efforts to create a multi-level annotated corpus of Hindi-English codemixed text collated from Facebook forums, and explore language identification, back-transliteration, normalization and POS tagging of this data. Our results show that language identification and transliteration for Hindi are two major challenges that impact POS tagging accuracy.", "keyphrases": ["facebook forum", "language identification", "pos tagging", "social medium text", "code-mixed data"]} +{"id": "suhr-etal-2019-corpus", "title": "A Corpus for Reasoning about Natural Language Grounded in Photographs", "abstract": "We introduce a new dataset for joint reasoning about natural language and images, with a focus on semantic diversity, compositionality, and visual reasoning challenges. The data contains 107,292 examples of English sentences paired with web photographs. The task is to determine whether a natural language caption is true about a pair of photographs. We crowdsource the data using sets of visually rich images and a compare-and-contrast task to elicit linguistically diverse language. Qualitative analysis shows the data requires compositional joint reasoning, including about quantities, comparisons, and relations. Evaluation using state-of-the-art visual reasoning methods shows the data presents a strong challenge.", "keyphrases": ["reasoning", "image", "nlvr2"]} +{"id": "surdeanu-etal-2003-using", "title": "Using Predicate-Argument Structures for Information Extraction", "abstract": "In this paper we present a novel, customizable IE paradigm that takes advantage of predicate-argument structures. We also introduce a new way of automatically identifying predicate argument structures, which is central to our IE paradigm. It is based on: (1) an extended set of features; and (2) inductive decision tree learning. The experimental results prove our claim that accurate predicate-argument structures enable high quality IE results.", "keyphrases": ["predicate-argument structure", "information extraction", "machine translation"]} +{"id": "jiang-zhou-2008-generating", "title": "Generating Chinese Couplets using a Statistical MT Approach", "abstract": "Part of the unique cultural heritage of China is the game of Chinese couplets (duilian). One person challenges the other person with a sentence (first sentence). The other person then replies with a sentence (second sentence) equal in length and word segmentation, in a way that corresponding words in the two sentences match each other by obeying certain constraints on semantic, syntactic, and lexical relatedness. This task is viewed as a difficult problem in AI and has not been explored in the research community. \n \nIn this paper, we regard this task as a kind of machine translation process. We present a phrase-based SMT approach to generate the second sentence. First, the system takes as input the first sentence, and generates as output an N-best list of proposed second sentences, using a phrase-based SMT decoder. Then, a set of filters is used to remove candidates violating linguistic constraints. Finally, a Ranking SVM is applied to rerank the candidates. A comprehensive evaluation, using both human judgments and BLEU scores, has been conducted, and the results demonstrate that this approach is very successful.", "keyphrases": ["chinese couplet", "first line", "smt system", "poetry generation"]} +{"id": "punyakanok-etal-2008-importance", "title": "The Importance of Syntactic Parsing and Inference in Semantic Role Labeling", "abstract": "We present a general framework for semantic role labeling. The framework combines a machine-learning technique with an integer linear programming-based inference procedure, which incorporates linguistic and structural constraints into a global decision process. Within this framework, we study the role of syntactic parsing information in semantic role labeling. We show that full syntactic parsing information is, by far, most relevant in identifying the argument, especially, in the very first stagethe pruning stage. Surprisingly, the quality of the pruning stage cannot be solely determined based on its recall and precision. Instead, it depends on the characteristics of the output candidates that determine the difficulty of the downstream problems. Motivated by this observation, we propose an effective and simple approach of combining different semantic role labeling systems through joint inference, which significantly improves its performance. Our system has been evaluated in the CoNLL-2005 shared task on semantic role labeling, and achieves the highest F1 score among 19 participants.", "keyphrases": ["semantic role labeling", "pruning stage", "srl", "essential role"]} +{"id": "jiang-etal-2011-target", "title": "Target-dependent Twitter Sentiment Classification", "abstract": "Sentiment analysis on Twitter data has attracted much attention recently. In this paper, we focus on target-dependent Twitter sentiment classification; namely, given a query, we classify the sentiments of the tweets as positive, negative or neutral according to whether they contain positive, negative or neutral sentiments about that query. Here the query serves as the target of the sentiments. The state-of-the-art approaches for solving this problem always adopt the target-independent strategy, which may assign irrelevant sentiments to the given target. Moreover, the state-of-the-art approaches only take the tweet to be classified into consideration when classifying the sentiment; they ignore its context (i.e., related tweets). However, because tweets are usually short and more ambiguous, sometimes it is not enough to consider only the current tweet for sentiment classification. In this paper, we propose to improve target-dependent Twitter sentiment classification by 1) incorporating target-dependent features; and 2) taking related tweets into consideration. According to the experimental results, our approach greatly improves the performance of target-dependent sentiment classification.", "keyphrases": ["twitter sentiment classification", "query", "target-dependent feature", "syntactic structure", "polarity"]} +{"id": "augenstein-etal-2016-stance", "title": "Stance Detection with Bidirectional Conditional Encoding", "abstract": "Stance detection is the task of classifying the attitude expressed in a text towards a target such as Hillary Clinton to be \"positive\", negative\" or \"neutral\". Previous work has assumed that either the target is mentioned in the text or that training data for every target is given. This paper considers the more challenging version of this task, where targets are not always mentioned and no training data is available for the test targets. We experiment with conditional LSTM encoding, which builds a representation of the tweet that is dependent on the target, and demonstrate that it outperforms encoding the tweet and the target independently. Performance is improved further when the conditional model is augmented with bidirectional encoding. We evaluate our approach on the SemEval 2016 Task 6 Twitter Stance Detection corpus achieving performance second best only to a system trained on semi-automatically labelled tweets for the test target. When such weak supervision is added, our approach achieves state-of-the-art results.", "keyphrases": ["attitude", "stance detection", "tweet representation"]} +{"id": "liu-etal-2005-log", "title": "Log-Linear Models for Word Alignment", "abstract": "We present a framework for word alignment based on log-linear models. All knowledge sources are treated as feature functions, which depend on the source language sentence, the target language sentence and possible additional variables. Log-linear models allow statistical alignment models to be easily extended by incorporating syntactic information. In this paper, we use IBM Model 3 alignment probabilities, POS correspondence, and bilingual dictionary coverage as features. Our experiments show that log-linear models significantly outperform IBM translation models.", "keyphrases": ["word alignment", "knowledge source", "coverage", "log-linear model"]} +{"id": "hockenmaier-steedman-2007-ccgbank", "title": "CCGbank: A Corpus of CCG Derivations and Dependency Structures Extracted from the Penn Treebank", "abstract": "This article presents an algorithm for translating the Penn Treebank into a corpus of Combinatory Categorial Grammar (CCG) derivations augmented with local and long-range word-word dependencies. The resulting corpus, CCGbank, includes 99.4% of the sentences in the Penn Treebank. It is available from the Linguistic Data Consortium, and has been used to train wide-coverage statistical parsers that obtain state-of-the-art rates of dependency recovery. In order to obtain linguistically adequate CCG analyses, and to eliminate noise and inconsistencies in the original annotation, an extensive analysis of the constructions and annotations in the Penn Treebank was called for, and a substantial number of changes to the Treebank were necessary. We discuss the implications of our findings for the extraction of other linguistically expressive grammars from the Treebank, and for the design of future treebanks.", "keyphrases": ["derivation", "wide-coverage statistical parser", "ccgbank", "ptb", "english verb join"]} +{"id": "lee-etal-2018-deterministic", "title": "Deterministic Non-Autoregressive Neural Sequence Modeling by Iterative Refinement", "abstract": "We propose a conditional non-autoregressive neural sequence model based on iterative refinement. The proposed model is designed based on the principles of latent variable models and denoising autoencoders, and is generally applicable to any sequence generation task. We extensively evaluate the proposed model on machine translation (En-De and En-Ro) and image caption generation, and observe that it significantly speeds up decoding while maintaining the generation quality comparable to the autoregressive counterpart.", "keyphrases": ["iterative refinement", "latent variable model", "machine translation", "nat", "non-autoregressive prediction"]} +{"id": "nakazawa-etal-2016-aspec", "title": "ASPEC: Asian Scientific Paper Excerpt Corpus", "abstract": "In this paper, we describe the details of the ASPEC (Asian Scientific Paper Excerpt Corpus), which is the first large-size parallel corpus of scientific paper domain. ASPEC was constructed in the Japanese-Chinese machine translation project conducted between 2006 and 2010 using the Special Coordination Funds for Promoting Science and Technology. It consists of a Japanese-English scientific paper abstract corpus of approximately 3 million parallel sentences (ASPEC-JE) and a Chinese-Japanese scientific paper excerpt corpus of approximately 0.68 million parallel sentences (ASPEC-JC). ASPEC is used as the official dataset for the machine translation evaluation workshop WAT (Workshop on Asian Translation).", "keyphrases": ["scientific paper", "aspec", "translation task"]} +{"id": "lee-etal-2014-sentence", "title": "A Sentence Judgment System for Grammatical Error Detection", "abstract": "This study develops a sentence judgment system using both rule-based and n-gram statistical methods to detect grammatical errors in Chinese sentences. The rule-based method provides 142 rules developed by linguistic experts to identify potential rule violations in input sentences. The n-gram statistical method relies on the n-gram scores of both correct and incorrect training sentences to determine the correctness of the input sentences, providing learners with improved understanding of linguistic rules and n-gram frequencies.", "keyphrases": ["sentence judgment system", "grammatical error detection", "n-gram", "rule-based linguistic analysis"]} +{"id": "maynez-etal-2020-faithfulness", "title": "On Faithfulness and Factuality in Abstractive Summarization", "abstract": "It is well known that the standard likelihood training and approximate decoding objectives in neural text generation models lead to less human-like responses for open-ended tasks such as language modeling and story generation. In this paper we have analyzed limitations of these models for abstractive document summarization and found that these models are highly prone to hallucinate content that is unfaithful to the input document. We conducted a large scale human evaluation of several neural abstractive summarization systems to better understand the types of hallucinations they produce. Our human annotators found substantial amounts of hallucinated content in all model generated summaries. However, our analysis does show that pretrained models are better summarizers not only in terms of raw metrics, i.e., ROUGE, but also in generating faithful and factual summaries as evaluated by humans. Furthermore, we show that textual entailment measures better correlate with faithfulness than standard metrics, potentially leading the way to automatic evaluation metrics as well as training and decoding criteria.", "keyphrases": ["faithfulness", "abstractive summarization", "text generation model", "consistency", "original document"]} +{"id": "huang-etal-2021-dagn", "title": "DAGN: Discourse-Aware Graph Network for Logical Reasoning", "abstract": "Recent QA with logical reasoning questions requires passage-level relations among the sentences. However, current approaches still focus on sentence-level relations interacting among tokens. In this work, we explore aggregating passage-level clues for solving logical reasoning QA by using discourse-based information. We propose a discourse-aware graph network (DAGN) that reasons relying on the discourse structure of the texts. The model encodes discourse information as a graph with elementary discourse units (EDUs) and discourse relations, and learns the discourse-aware features via a graph network for downstream QA tasks. Experiments are conducted on two logical reasoning QA datasets, ReClor and LogiQA, and our proposed DAGN achieves competitive results. The source code is available at .", "keyphrases": ["discourse-aware graph network", "logical reasoning", "dagn"]} +{"id": "lavie-agarwal-2007-meteor", "title": "METEOR: An Automatic Metric for MT Evaluation with High Levels of Correlation with Human Judgments", "abstract": "Meteor is an automatic metric for Machine Translation evaluation which has been demonstrated to have high levels of correlation with human judgments of translation quality, significantly outperforming the more commonly used Bleu metric. It is one of several automatic metrics used in this year's shared task within the ACL WMT-07 workshop. This paper recaps the technical details underlying the metric and describes recent improvements in the metric. The latest release includes improved metric parameters and extends the metric to support evaluation of MT output in Spanish, French and German, in addition to English.", "keyphrases": ["automatic metric", "judgment", "translation quality", "meteor"]} +{"id": "davidson-etal-2019-racial", "title": "Racial Bias in Hate Speech and Abusive Language Detection Datasets", "abstract": "Technologies for abusive language detection are being developed and applied with little consideration of their potential biases. We examine racial bias in five different sets of Twitter data annotated for hate speech and abusive language. We train classifiers on these datasets and compare the predictions of these classifiers on tweets written in African-American English with those written in Standard American English. The results show evidence of systematic racial bias in all datasets, as classifiers trained on them tend to predict that tweets written in African-American English are abusive at substantially higher rates. If these abusive language detection systems are used in the field they will therefore have a disproportionate negative impact on African-American social media users. Consequently, these systems may discriminate against the groups who are often the targets of the abuse we are trying to detect.", "keyphrases": ["hate speech", "language detection", "racial bias"]} +{"id": "thorne-etal-2018-fact", "title": "The Fact Extraction and VERification (FEVER) Shared Task", "abstract": "We present the results of the first Fact Extraction and VERification (FEVER) Shared Task. The task challenged participants to classify whether human-written factoid claims could be SUPPORTED or REFUTED using evidence retrieved from Wikipedia. We received entries from 23 competing teams, 19 of which scored higher than the previously published baseline. The best performing system achieved a FEVER score of 64.21%. In this paper, we present the results of the shared task and a summary of the systems, highlighting commonalities and innovations among participating systems.", "keyphrases": ["fact extraction", "verification", "complex entity", "human-generated claim", "entailment"]} +{"id": "hoffart-etal-2011-robust", "title": "Robust Disambiguation of Named Entities in Text", "abstract": "Disambiguating named entities in natural-language text maps mentions of ambiguous names onto canonical entities like people or places, registered in a knowledge base such as DBpedia or YAGO. This paper presents a robust method for collective disambiguation, by harnessing context from knowledge bases and using a new form of coherence graph. It unifies prior approaches into a comprehensive framework that combines three measures: the prior probability of an entity being mentioned, the similarity between the contexts of a mention and a candidate entity, as well as the coherence among candidate entities for all mentions together. The method builds a weighted graph of mentions and candidate entities, and computes a dense subgraph that approximates the best joint mention-entity mapping. Experiments show that the new method significantly outperforms prior methods in terms of accuracy, with robust behavior across a variety of inputs.", "keyphrases": ["disambiguation", "mention", "knowledge base", "global coherence", "wikipedia"]} +{"id": "wan-2009-co", "title": "Co-Training for Cross-Lingual Sentiment Classification", "abstract": "The lack of Chinese sentiment corpora limits the research progress on Chinese sentiment classification. However, there are many freely available English sentiment corpora on the Web. This paper focuses on the problem of cross-lingual sentiment classification, which leverages an available English corpus for Chinese sentiment classification by using the English corpus as training data. Machine translation services are used for eliminating the language gap between the training set and test set, and English features and Chinese features are considered as two independent views of the classification problem. We propose a cotraining approach to making use of unlabeled Chinese data. Experimental results show the effectiveness of the proposed approach, which can outperform the standard inductive classifiers and the transductive classifiers.", "keyphrases": ["sentiment classification", "chinese feature", "co-training"]} +{"id": "vu-etal-2016-combining", "title": "Combining Recurrent and Convolutional Neural Networks for Relation Classification", "abstract": "This paper investigates two different neural architectures for the task of relation classification: convolutional neural networks and recurrent neural networks. For both models, we demonstrate the effect of different architectural choices. We present a new context representation for convolutional neural networks for relation classification (extended middle context). Furthermore, we propose connectionist bi-directional recurrent neural networks and introduce ranking loss for their optimization. Finally, we show that combining convolutional and recurrent neural networks using a simple voting scheme is accurate enough to improve results. Our neural models achieve state-of-the-art results on the SemEval 2010 relation classification task.", "keyphrases": ["convolutional neural networks", "relation classification", "cnn"]} +{"id": "tai-etal-2015-improved", "title": "Improved Semantic Representations From Tree-Structured Long Short-Term Memory Networks", "abstract": "Because of their superior ability to preserve sequence information over time, Long Short-Term Memory (LSTM) networks, a type of recurrent neural network with a more complex computational unit, have obtained strong results on a variety of sequence modeling tasks. The only underlying LSTM structure that has been explored so far is a linear chain. However, natural language exhibits syntactic properties that would naturally combine words to phrases. We introduce the Tree-LSTM, a generalization of LSTMs to tree-structured network topologies. Tree-LSTMs outperform all existing systems and strong LSTM baselines on two tasks: predicting the semantic relatedness of two sentences (SemEval 2014, Task 1) and sentiment classification (Stanford Sentiment Treebank).", "keyphrases": ["short-term memory network", "tree-lstm", "tree-structured network topology", "sentiment classification", "constituent"]} +{"id": "young-etal-2014-image", "title": "From image descriptions to visual denotations: New similarity metrics for semantic inference over event descriptions", "abstract": "We propose to use the visual denotations of linguistic expressions (i.e. the set of images they describe) to define novel denotational similarity metrics, which we show to be at least as beneficial as distributional similarities for two tasks that require semantic inference. To compute these denotational similarities, we construct a denotation graph, i.e. a subsumption hierarchy over constituents and their denotations, based on a large corpus of 30K images and 150K descriptive captions.", "keyphrases": ["denotation", "similarity metric", "image caption", "flickr30k", "entailment task"]} +{"id": "gasco-sanchez-etal-2022-socialdisner", "title": "The SocialDisNER shared task on detection of disease mentions in health-relevant content from social media: methods, evaluation, guidelines and corpora", "abstract": "There is a pressing need to exploit health-related content from social media, a global source of data where key health information is posted directly by citizens, patients and other healthcare stakeholders. Use cases of disease related social media mining include disease outbreak/surveillance, mental health and pharmacovigilance. Current efforts address the exploitation of social media beyond English. The SocialDisNER task, organized as part of the SMM4H 2022 initiative, has applied the LINKAGE methodology to select and annotate a Gold Standard corpus of 9,500 tweets in Spanish enriched with disease mentions generated by patients and medical professionals. As a complementary resource for teams participating in the SocialDisNER track, we have also created a large-scale corpus of 85,000 tweets, where in addition to disease mentions, other medical entities of relevance (e.g., medications, symptoms and procedures, among others) have been automatically labelled. Using these large-scale datasets, co-mention networks or knowledge graphs were released for each entity pair type. Out of the 47 teams registered for the task, 17 teams uploaded a total of 32 runs. The top-performing team achieved a very competitive 0.891 f-score, with a system trained following a continue pre-training strategy. We anticipate that the corpus and systems resulting from the SocialDisNER track might further foster health related text mining of social media content in Spanish and inspire disease detection strategies in other languages.", "keyphrases": ["disease mention", "social media mining", "spanish"]} +{"id": "liu-etal-2006-tree", "title": "Tree-to-String Alignment Template for Statistical Machine Translation", "abstract": "We present a novel translation model based on tree-to-string alignment template (TAT) which describes the alignment between a source parse tree and a target string. A TAT is capable of generating both terminals and non-terminals and performing reordering at both low and high levels. The model is linguistically syntax-based because TATs are extracted automatically from word-aligned, source side parsed parallel texts. To translate a source sentence, we first employ a parser to produce a source parse tree and then apply TATs to transform the tree into a target string. Our experiments show that the TAT-based model significantly outperforms Pharaoh, a state-of-the-art decoder for phrase-based models.", "keyphrases": ["alignment template", "statistical machine translation", "source side", "smt system", "syntax-based approach"]} +{"id": "han-etal-2018-fewrel", "title": "FewRel: A Large-Scale Supervised Few-Shot Relation Classification Dataset with State-of-the-Art Evaluation", "abstract": "We present a Few-Shot Relation Classification Dataset (dataset), consisting of 70, 000 sentences on 100 relations derived from Wikipedia and annotated by crowdworkers. The relation of each sentence is first recognized by distant supervision methods, and then filtered by crowdworkers. We adapt the most recent state-of-the-art few-shot learning methods for relation classification and conduct thorough evaluation of these methods. Empirical results show that even the most competitive few-shot learning models struggle on this task, especially as compared with humans. We also show that a range of different reasoning skills are needed to solve our task. These results indicate that few-shot relation classification remains an open problem and still requires further research. Our detailed analysis points multiple directions for future research.", "keyphrases": ["relation classification dataset", "few-shot learning model", "fewrel", "baseline model"]} +{"id": "du-etal-2019-extracting", "title": "Extracting Symptoms and their Status from Clinical Conversations", "abstract": "This paper describes novel models tailored for a new application, that of extracting the symptoms mentioned in clinical conversations along with their status. Lack of any publicly available corpus in this privacy-sensitive domain led us to develop our own corpus, consisting of about 3K conversations annotated by professional medical scribes. We propose two novel deep learning approaches to infer the symptom names and their status: (1) a new hierarchical span-attribute tagging (SA-T) model, trained using curriculum learning, and (2) a variant of sequence-to-sequence model which decodes the symptoms and their status from a few speaker turns within a sliding window over the conversation. This task stems from a realistic application of assisting medical providers in capturing symptoms mentioned by patients from their clinical conversations. To reflect this application, we define multiple metrics. From inter-rater agreement, we find that the task is inherently difficult. We conduct comprehensive evaluations on several contrasting conditions and observe that the performance of the models range from an F-score of 0.5 to 0.8 depending on the condition. Our analysis not only reveals the inherent challenges of the task, but also provides useful directions to improve the models.", "keyphrases": ["symptom", "status", "clinical conversation"]} +{"id": "mairesse-walker-2007-personage", "title": "PERSONAGE: Personality Generation for Dialogue", "abstract": "Over the last fifty years, the \u201cBig Five\u201d model of personality traits has become a standard in psychology, and research has systematically documented correlations between a wide range of linguistic variables and the Big Five traits. A distinct line of research has explored methods for automatically generating language that varies along personality dimensions. We present PERSONAGE (PERSONAlity GEnerator), the first highly parametrizable language generator for extraversion, an important aspect of personality. We evaluate two personality generation methods: (1) direct generation with particular parameter settings suggested by the psychology literature; and (2) overgeneration and selection using statistical models trained from judge\u2019s ratings. Results show that both methods reliably generate utterances that vary along the extraversion dimension, according to human judges.", "keyphrases": ["personality trait", "language generator", "personage"]} +{"id": "zeldes-etal-2019-introduction", "title": "Introduction to Discourse Relation Parsing and Treebanking (DISRPT): 7th Workshop on Rhetorical Structure Theory and Related Formalisms", "abstract": "This overview summarizes the main contributions of the accepted papers at the 2019 workshop on Discourse Relation Parsing and Treebanking (DISRPT 2019). Co-located with NAACL 2019 in Minneapolis, the workshop's aim was to bring together researchers working on corpus-based and computational approaches to discourse relations. In addition to an invited talk, eighteen papers outlined below were presented, four of which were submitted as part of a shared task on elementary discourse unit segmentation and connective detection.", "keyphrases": ["treebanking", "disrpt", "discourse unit segmentation"]} +{"id": "kiritchenko-mohammad-2017-best", "title": "Best-Worst Scaling More Reliable than Rating Scales: A Case Study on Sentiment Intensity Annotation", "abstract": "Rating scales are a widely used method for data annotation; however, they present several challenges, such as difficulty in maintaining inter- and intra-annotator consistency. Best\u2013worst scaling (BWS) is an alternative method of annotation that is claimed to produce high-quality annotations while keeping the required number of annotations similar to that of rating scales. However, the veracity of this claim has never been systematically established. Here for the first time, we set up an experiment that directly compares the rating scale method with BWS. We show that with the same total number of annotations, BWS produces significantly more reliable results than the rating scale.", "keyphrases": ["rating scale", "best-worst scaling", "empirical experiment", "further detail"]} +{"id": "versley-etal-2008-bart-modular", "title": "BART: A modular toolkit for coreference resolution", "abstract": "Developing a full coreference system able to run all the way from raw text to semantic interpretation is a considerable engineering effort. Accordingly, there is very limited availability of off-the shelf tools for researchers whose interests are not primarily in coreference or others who want to concentrate on a specific aspect of the problem. We present BART, a highly modular toolkit for developing coreference applications. In the Johns Hopkins workshop on using lexical and encyclopedic knowledge for entity disambiguation, the toolkit was used to extend a reimplementation of Soon et al.\u0092s proposal with a variety of additional syntactic and knowledge-based features, and experiment with alternative resolution processes, preprocessing tools, and classifiers. BART has been released as open source software and is available from ", "keyphrases": ["modular toolkit", "coreference resolution", "bart"]} +{"id": "munteanu-marcu-2005-improving", "title": "Improving Machine Translation Performance by Exploiting Non-Parallel Corpora", "abstract": "We present a novel method for discovering parallel sentences in comparable, non-parallel corpora. We train a maximum entropy classifier that, given a pair of sentences, can reliably determine whether or not they are translations of each other. Using this approach, we extract parallel data from large Chinese, Arabic, and English non-parallel newspaper corpora. We evaluate the quality of the extracted data by showing that it improves the performance of a state-of-the-art statistical machine translation system. We also show that a good-quality MT system can be built from scratch by starting with a very small parallel corpus (100,000 words) and exploiting a large non-parallel corpus. Thus, our method can be applied with great benefit to language pairs for which only scarce resources are available.", "keyphrases": ["non-parallel corpora", "maximum entropy classifier", "parallel data", "sentence extraction", "proper smt translation"]} +{"id": "wu-etal-2020-generating", "title": "Generating Diverse Translation from Model Distribution with Dropout", "abstract": "Despite the improvement of translation quality, neural machine translation (NMT) often suffers from the lack of diversity in its generation. In this paper, we propose to generate diverse translations by deriving a large number of possible models with Bayesian modelling and sampling models from them for inference. The possible models are obtained by applying concrete dropout to the NMT model and each of them has specific confidence for its prediction, which corresponds to a posterior model distribution under specific training data in the principle of Bayesian modeling. With variational inference, the posterior model distribution can be approximated with a variational distribution, from which the final models for inference are sampled. We conducted experiments on Chinese-English and English-German translation tasks and the results shows that our method makes a better trade-off between diversity and accuracy.", "keyphrases": ["diverse translation", "dropout", "posterior model distribution"]} +{"id": "brown-etal-2005-automatic", "title": "Automatic Question Generation for Vocabulary Assessment", "abstract": "In the REAP system, users are automatically provided with texts to read targeted to their individual reading levels. To find appropriate texts, the user's vocabulary knowledge must be assessed. We describe an approach to automatically generating questions for vocabulary assessment. Traditionally, these assessments have been hand-written. Using data from WordNet, we generate 6 types of vocabulary questions. They can have several forms, including wordbank and multiple-choice. We present experimental results that suggest that these automatically-generated questions give a measure of vocabulary skill that correlates well with subject performance on independently developed human-written questions. In addition, strong correlations with standardized vocabulary tests point to the validity of our approach to automatic assessment of word knowledge.", "keyphrases": ["question generation", "vocabulary assessment", "distractor"]} +{"id": "dubossarsky-etal-2017-outta", "title": "Outta Control: Laws of Semantic Change and Inherent Biases in Word Representation Models", "abstract": "This article evaluates three proposed laws of semantic change. Our claim is that in order to validate a putative law of semantic change, the effect should be observed in the genuine condition but absent or reduced in a suitably matched control condition, in which no change can possibly have taken place. Our analysis shows that the effects reported in recent literature must be substantially revised: (i) the proposed negative correlation between meaning change and word frequency is shown to be largely an artefact of the models of word representation used; (ii) the proposed negative correlation between meaning change and prototypicality is shown to be much weaker than what has been claimed in prior art; and (iii) the proposed positive correlation between meaning change and polysemy is largely an artefact of word frequency. These empirical observations are corroborated by analytical proofs that show that count representations introduce an inherent dependence on word frequency, and thus word frequency cannot be evaluated as an independent factor with these representations.", "keyphrases": ["law", "semantic change", "control condition", "polysemy", "previous literature"]} +{"id": "zhu-etal-2010-monolingual", "title": "A Monolingual Tree-based Translation Model for Sentence Simplification", "abstract": "In this paper, we consider sentence simplification as a special form of translation with the complex sentence as the source and the simple sentence as the target. We propose a Tree-based Simplification Model (TSM), which, to our knowledge, is the first statistical simplification model covering splitting, dropping, reordering and substitution integrally. We also describe an efficient method to train our model with a large-scale parallel dataset obtained from the Wikipedia and Simple Wikipedia. The evaluation shows that our model achieves better readability scores than a set of baseline systems.", "keyphrases": ["sentence simplification", "simple english wikipedia", "pwkp", "paraphrasing"]} +{"id": "wu-etal-2017-sequential", "title": "Sequential Matching Network: A New Architecture for Multi-turn Response Selection in Retrieval-Based Chatbots", "abstract": "We study response selection for multi-turn conversation in retrieval based chatbots. Existing work either concatenates utterances in context or matches a response with a highly abstract context vector finally, which may lose relationships among the utterances or important information in the context. We propose a sequential matching network (SMN) to address both problems. SMN first matches a response with each utterance in the context on multiple levels of granularity, and distills important matching information from each pair as a vector with convolution and pooling operations. The vectors are then accumulated in a chronological order through a recurrent neural network (RNN) which models relationships among the utterances. The final matching score is calculated with the hidden states of the RNN. Empirical study on two public data sets shows that SMN can significantly outperform state-of-the-art methods for response selection in multi-turn conversation.", "keyphrases": ["multi-turn response selection", "chatbot", "conversation", "sequential matching network"]} +{"id": "caglayan-etal-2020-simultaneous", "title": "Simultaneous Machine Translation with Visual Context", "abstract": "Simultaneous machine translation (SiMT) aims to translate a continuous input text stream into another language with the lowest latency and highest quality possible. The translation thus has to start with an incomplete source text, which is read progressively, creating the need for anticipation. In this paper, we seek to understand whether the addition of visual information can compensate for the missing source context. To this end, we analyse the impact of different multimodal approaches and visual features on state-of-the-art SiMT frameworks. Our results show that visual context is helpful and that visually-grounded models based on explicit object region information are much better than commonly used global features, reaching up to 3 BLEU points improvement under low latency scenarios. Our qualitative analysis illustrates cases where only the multimodal systems are able to translate correctly from English into gender-marked languages, as well as deal with differences in word order, such as adjective-noun placement between English and French.", "keyphrases": ["visual context", "simultaneous machine translation", "mmt"]} +{"id": "xiong-etal-2010-error", "title": "Error Detection for Statistical Machine Translation Using Linguistic Features", "abstract": "Automatic error detection is desired in the post-processing to improve machine translation quality. The previous work is largely based on confidence estimation using system-based features, such as word posterior probabilities calculated from N-best lists or word lattices. We propose to incorporate two groups of linguistic features, which convey information from outside machine translation systems, into error detection: lexical and syntactic features. We use a maximum entropy classifier to predict translation errors by integrating word posterior probability feature and linguistic features. The experimental results show that 1) linguistic features alone outperform word posterior probability based confidence estimation in error detection; and 2) linguistic features can further provide complementary information when combined with word confidence scores, which collectively reduce the classification error rate by 18.52% and improve the F measure by 16.37%.", "keyphrases": ["linguistic feature", "error detection", "pos", "dependency link"]} +{"id": "zhu-etal-2019-ncls", "title": "NCLS: Neural Cross-Lingual Summarization", "abstract": "Cross-lingual summarization (CLS) is the task to produce a summary in one particular language for a source document in a different language. Existing methods simply divide this task into two steps: summarization and translation, leading to the problem of error propagation. To handle that, we present an end-to-end CLS framework, which we refer to as Neural Cross-Lingual Summarization (NCLS), for the first time. Moreover, we propose to further improve NCLS by incorporating two related tasks, monolingual summarization and machine translation, into the training process of CLS under multi-task learning. Due to the lack of supervised CLS data, we propose a round-trip translation strategy to acquire two high-quality large-scale CLS datasets based on existing monolingual summarization datasets. Experimental results have shown that our NCLS achieves remarkable improvement over traditional pipeline methods on both English-to-Chinese and Chinese-to-English CLS human-corrected test sets. In addition, NCLS with multi-task learning can further significantly improve the quality of generated summaries. We make our dataset and code publicly available here: .", "keyphrases": ["summarization", "machine translation", "round-trip translation strategy", "large-scale cls dataset"]} +{"id": "qazvinian-etal-2011-rumor", "title": "Rumor has it: Identifying Misinformation in Microblogs", "abstract": "A rumor is commonly defined as a statement whose true value is unverifiable. Rumors may spread misinformation (false information) or disinformation (deliberately false information) on a network of people. Identifying rumors is crucial in online social media where large amounts of information are easily spread across a large network by sources with unverified authority. In this paper, we address the problem of rumor detection in microblogs and explore the effectiveness of 3 categories of features: content-based, network-based, and microblog-specific memes for correctly identifying rumors. Moreover, we show how these features are also effective in identifying disinformers, users who endorse a rumor and further help it to spread. We perform our experiments on more than 10,000 manually annotated tweets collected from Twitter and show how our retrieval model achieves more than 0.95 in Mean Average Precision (MAP). Finally, we believe that our dataset is the first large-scale dataset on rumor detection. It can open new dimensions in analyzing online misinformation and other aspects of microblog conversations.", "keyphrases": ["twitter", "rumor", "social medium platform", "textual characteristic"]} +{"id": "blevins-zettlemoyer-2020-moving", "title": "Moving Down the Long Tail of Word Sense Disambiguation with Gloss Informed Bi-encoders", "abstract": "A major obstacle in Word Sense Disambiguation (WSD) is that word senses are not uniformly distributed, causing existing models to generally perform poorly on senses that are either rare or unseen during training. We propose a bi-encoder model that independently embeds (1) the target word with its surrounding context and (2) the dictionary definition, or gloss, of each sense. The encoders are jointly optimized in the same representation space, so that sense disambiguation can be performed by finding the nearest sense embedding for each target word embedding. Our system outperforms previous state-of-the-art models on English all-words WSD; these gains predominantly come from improved performance on rare senses, leading to a 31.1% error reduction on less frequent senses over prior work. This demonstrates that rare senses can be more effectively disambiguated by modeling their definitions.", "keyphrases": ["word sense disambiguation", "gloss", "bi-encoder"]} +{"id": "he-etal-2015-question", "title": "Question-Answer Driven Semantic Role Labeling: Using Natural Language to Annotate Natural Language", "abstract": "This paper introduces the task of questionanswer driven semantic role labeling (QA-SRL), where question-answer pairs are used to represent predicate-argument structure. For example, the verb \u201cintroduce\u201d in the previous sentence would be labeled with the questions \u201cWhat is introduced?\u201d, and \u201cWhat introduces something?\u201d, each paired with the phrase from the sentence that gives the correct answer. Posing the problem this way allows the questions themselves to define the set of possible roles, without the need for predefined frame or thematic role ontologies. It also allows for scalable data collection by annotators with very little training and no linguistic expertise. We gather data in two domains, newswire text and Wikipedia articles, and introduce simple classifierbased models for predicting which questions to ask and what their answers should be. Our results show that non-expert annotators can produce high quality QA-SRL data, and also establish baseline performance levels for future work on this task.", "keyphrases": ["annotator", "question-answer pair", "predicate-argument structure"]} +{"id": "rasooli-tetreault-2013-joint", "title": "Joint Parsing and Disfluency Detection in Linear Time", "abstract": "We introduce a novel method to jointly parse and detect disfluencies in spoken utterances. Our model can use arbitrary features for parsing sentences and adapt itself with out-ofdomain data. We show that our method, based on transition-based parsing, performs at a high level of accuracy for both the parsing and disfluency detection tasks. Additionally, our method is the fastest for the joint task, running in linear time.", "keyphrases": ["disfluency detection", "linear time", "dependency parsing"]} +{"id": "niehues-etal-2016-pre", "title": "Pre-Translation for Neural Machine Translation", "abstract": "Recently, the development of neural machine translation (NMT) has significantly improved the translation quality of automatic machine translation. While most sentences are more accurate and fluent than translations by statistical machine translation (SMT)-based systems, in some cases, the NMT system produces translations that have a completely different meaning. This is especially the case when rare words occur. When using statistical machine translation, it has already been shown that significant gains can be achieved by simplifying the input in a preprocessing step. A commonly used example is the pre-reordering approach. In this work, we used phrase-based machine translation to pre-translate the input into the target language. Then a neural machine translation system generates the final hypothesis using the pre-translation. Thereby, we use either only the output of the phrase-based machine translation (PBMT) system or a combination of the PBMT output and the source sentence. We evaluate the technique on the English to German translation task. Using this approach we are able to outperform the PBMT system as well as the baseline neural MT system by up to 2 BLEU points. We analyzed the influence of the quality of the initial system on the final result.", "keyphrases": ["neural machine translation", "pbmt", "pre-translation"]} +{"id": "miwa-sasaki-2014-modeling", "title": "Modeling Joint Entity and Relation Extraction with Table Representation", "abstract": "This paper proposes a history-based structured learning approach that jointly extracts entities and relations in a sentence. We introduce a novel simple and flexible table representation of entities and relations. We investigate several feature settings, search orders, and learning methods with inexact search on the table. The experimental results demonstrate that a joint learning approach significantly outperforms a pipeline approach by incorporating global features and by selecting appropriate learning methods and search orders.", "keyphrases": ["joint entity", "relation extraction", "table representation", "feature-based model"]} +{"id": "cotterell-etal-2015-modeling", "title": "Modeling Word Forms Using Latent Underlying Morphs and Phonology", "abstract": "The observed pronunciations or spellings of words are often explained as arising from the \u201cunderlying forms\u201d of their morphemes. These forms are latent strings that linguists try to reconstruct by hand. We propose to reconstruct them automatically at scale, enabling generalization to new words. Given some surface word types of a concatenative language along with the abstract morpheme sequences that they express, we show how to recover consistent underlying forms for these morphemes, together with the (stochastic) phonology that maps each concatenation of underlying forms to a surface form. Our technique involves loopy belief propagation in a natural directed graphical model whose variables are unknown strings and whose conditional distributions are encoded as finite-state machines with trainable weights. We define training and evaluation paradigms for the task of surface word prediction, and report results on subsets of 7 languages.", "keyphrases": ["word form", "phonology", "graphical model"]} +{"id": "smith-eisner-2008-dependency", "title": "Dependency Parsing by Belief Propagation", "abstract": "We formulate dependency parsing as a graphical model with the novel ingredient of global constraints. We show how to apply loopy belief propagation (BP), a simple and effective tool for approximate learning and inference. As a parsing algorithm, BP is both asymptotically and empirically efficient. Even with second-order features or latent variables, which would make exact parsing considerably slower or NP-hard, BP needs only O(n3) time with a small constant factor. Furthermore, such features significantly improve parse accuracy over exact first-order methods. Incorporating additional features would increase the runtime additively rather than multiplicatively.", "keyphrases": ["belief propagation", "global constraint", "approximate learning", "dependency parsing", "decomposition"]} +{"id": "kumar-etal-2020-evaluating", "title": "Evaluating Aggression Identification in Social Media", "abstract": "In this paper, we present the report and findings of the Shared Task on Aggression and Gendered Aggression Identification organised as part of the Second Workshop on Trolling, Aggression and Cyberbullying (TRAC - 2) at LREC 2020. The task consisted of two sub-tasks - aggression identification (sub-task A) and gendered identification (sub-task B) - in three languages - Bangla, Hindi and English. For this task, the participants were provided with a dataset of approximately 5,000 instances from YouTube comments in each language. For testing, approximately 1,000 instances were provided in each language for each sub-task. A total of 70 teams registered to participate in the task and 19 teams submitted their test runs. The best system obtained a weighted F-score of approximately 0.80 in sub-task A for all the three languages. While approximately 0.87 in sub-task B for all the three languages.", "keyphrases": ["aggression identification", "cyberbullying", "hate speech", "social medium", "abusive language workshop"]} +{"id": "sporleder-li-2009-unsupervised", "title": "Unsupervised Recognition of Literal and Non-Literal Use of Idiomatic Expressions", "abstract": "We propose an unsupervised method for distinguishing literal and non-literal usages of idiomatic expressions. Our method determines how well a literal interpretation is linked to the overall cohesive structure of the discourse. If strong links can be found, the expression is classified as literal, otherwise as idiomatic. We show that this method can help to tell apart literal and non-literal usages, even for idioms which occur in canonical form.", "keyphrases": ["idiomatic expression", "unsupervised method", "component word"]} +{"id": "wong-mooney-2007-generation", "title": "Generation by Inverting a Semantic Parser that Uses Statistical Machine Translation", "abstract": "This paper explores the use of statistical machine translation (SMT) methods for tactical natural language generation. We present results on using phrase-based SMT for learning to map meaning representations to natural language. Improved results are obtained by inverting a semantic parser that uses SMT methods to map sentences into meaning representations. Finally, we show that hybridizing these two approaches results in still more accurate generation systems. Automatic and human evaluation of generated sentences are presented across two domains and four languages.", "keyphrases": ["semantic parser", "smt method", "synchronous grammar"]} +{"id": "marvin-linzen-2018-targeted", "title": "Targeted Syntactic Evaluation of Language Models", "abstract": "We present a data set for evaluating the grammaticality of the predictions of a language model. We automatically construct a large number of minimally different pairs of English sentences, each consisting of a grammatical and an ungrammatical sentence. The sentence pairs represent different variations of structure-sensitive phenomena: subject-verb agreement, reflexive anaphora and negative polarity items. We expect a language model to assign a higher probability to the grammatical sentence than the ungrammatical one. In an experiment using this data set, an LSTM language model performed poorly on many of the constructions. Multi-task training with a syntactic objective (CCG supertagging) improved the LSTM's accuracy, but a large gap remained between its performance and the accuracy of human participants recruited online. This suggests that there is considerable room for improvement over LSTMs in capturing syntax in a language model.", "keyphrases": ["agreement", "negative polarity item", "targeted syntactic evaluation", "neural language model", "high probability"]} +{"id": "chen-etal-2020-distilling", "title": "Distilling Knowledge Learned in BERT for Text Generation", "abstract": "Large-scale pre-trained language model such as BERT has achieved great success in language understanding tasks. However, it remains an open question how to utilize BERT for language generation. In this paper, we present a novel approach, Conditional Masked Language Modeling (C-MLM), to enable the finetuning of BERT on target generation tasks. The finetuned BERT (teacher) is exploited as extra supervision to improve conventional Seq2Seq models (student) for better text generation performance. By leveraging BERT's idiosyncratic bidirectional nature, distilling knowledge learned in BERT can encourage auto-regressive Seq2Seq models to plan ahead, imposing global sequence-level supervision for coherent text generation. Experiments show that the proposed approach significantly outperforms strong Transformer baselines on multiple language generation tasks such as machine translation and text summarization. Our proposed model also achieves new state of the art on IWSLT German-English and English-Vietnamese MT datasets.", "keyphrases": ["bert", "text generation", "pre-trained language model", "teacher"]} +{"id": "strapparava-etal-2012-parallel", "title": "A Parallel Corpus of Music and Lyrics Annotated with Emotions", "abstract": "In this paper, we introduce a novel parallel corpus of music and lyrics, annotated with emotions at line level. We first describe the corpus, consisting of 100 popular songs, each of them including a music component, provided in the MIDI format, as well as a lyrics component, made available as raw text. We then describe our work on enhancing this corpus with emotion annotations using crowdsourcing. We also present some initial experiments on emotion classification using the music and the lyrics representations of the songs, which lead to encouraging results, thus demonstrating the promise of using joint music-lyric models for song processing.", "keyphrases": ["parallel corpus", "music", "lyric", "emotion"]} +{"id": "prasad-etal-2006-annotating", "title": "Annotating Attribution in the Penn Discourse TreeBank", "abstract": "An emerging task in text understanding and generation is to categorize information as fact or opinion and to further attribute it to the appropriate source. Corpus annotation schemes aim to encode such distinctions for NLP applications concerned with such tasks, such as information extraction, question answering, summarization, and generation. We describe an annotation scheme for marking the attribution of abstract objects such as propositions, facts and eventualities associated with discourse relations and their arguments annotated in the Penn Discourse TreeBank. The scheme aims to capture the source and degrees of factuality of the abstract objects. Key aspects of the scheme are annotation of the text spans signalling the attribution, and annotation of features recording the source, type, scopal polarity, and determinacy of attribution.", "keyphrases": ["attribution", "penn discourse treebank", "annotation scheme"]} +{"id": "hovy-etal-2006-ontonotes", "title": "OntoNotes: The 90% Solution", "abstract": "We describe the OntoNotes methodology and its result, a large multilingual richly-annotated corpus constructed at 90% interannotator agreement. An initial portion (300K words of English newswire and 250K words of Chinese newswire) will be made available to the community during 2007.", "keyphrases": ["ontonotes", "sense inventory", "wsd", "project", "large-scale semantic annotation"]} +{"id": "post-etal-2012-constructing", "title": "Constructing Parallel Corpora for Six Indian Languages via Crowdsourcing", "abstract": "Recent work has established the efficacy of Amazon's Mechanical Turk for constructing parallel corpora for machine translation research. We apply this to building a collection of parallel corpora between English and six languages from the Indian subcontinent: Bengali, Hindi, Malayalam, Tamil, Telugu, and Urdu. These languages are low-resource, under-studied, and exhibit linguistic phenomena that are difficult for machine translation. We conduct a variety of baseline experiments and analysis, and release the data to the community.", "keyphrases": ["parallel corpora", "indian language", "crowdsourcing", "bengali"]} +{"id": "merchant-etal-2020-happens", "title": "What Happens To BERT Embeddings During Fine-tuning?", "abstract": "While much recent work has examined how linguistic information is encoded in pre-trained sentence representations, comparatively little is understood about how these models change when adapted to solve downstream tasks. Using a suite of analysis techniques\u2014supervised probing, unsupervised similarity analysis, and layer-based ablations\u2014we investigate how fine-tuning affects the representations of the BERT model. We find that while fine-tuning necessarily makes some significant changes, there is no catastrophic forgetting of linguistic phenomena. We instead find that fine-tuning is a conservative process that primarily affects the top layers of BERT, albeit with noteworthy variation across tasks. In particular, dependency parsing reconfigures most of the model, whereas SQuAD and MNLI involve much shallower processing. Finally, we also find that fine-tuning has a weaker effect on representations of out-of-domain sentences, suggesting room for improvement in model generalization.", "keyphrases": ["bert", "fine-tuning", "downstream task", "weight"]} +{"id": "bingel-sogaard-2017-identifying", "title": "Identifying beneficial task relations for multi-task learning in deep neural networks", "abstract": "Multi-task learning (MTL) in deep neural networks for NLP has recently received increasing interest due to some compelling benefits, including its potential to efficiently regularize models and to reduce the need for labeled data. While it has brought significant improvements in a number of NLP tasks, mixed results have been reported, and little is known about the conditions under which MTL leads to gains in NLP. This paper sheds light on the specific task relations that can lead to gains from MTL models over single-task setups.", "keyphrases": ["task relation", "multi-task learning", "deep neural network", "predictor"]} +{"id": "matuschek-gurevych-2013-dijkstra", "title": "Dijkstra-WSA: A Graph-Based Approach to Word Sense Alignment", "abstract": "In this paper, we present Dijkstra-WSA, a novel graph-based algorithm for word sense alignment. We evaluate it on four different pairs of lexical-semantic resources with different characteristics (WordNet-OmegaWiki, WordNet-Wiktionary, GermaNet-Wiktionary and WordNet-Wikipedia) and show that it achieves competitive performance on 3 out of 4 datasets. Dijkstra-WSA outperforms the state of the art on every dataset if it is combined with a back-off based on gloss similarity. We also demonstrate that Dijkstra-WSA is not only flexibly applicable to different resources but also highly parameterizable to optimize for precision or recall.", "keyphrases": ["graph-based approach", "word sense alignment", "dijkstra-wsa", "wiktionary", "research area"]} +{"id": "prasad-etal-2010-realization", "title": "Realization of Discourse Relations by Other Means: Alternative Lexicalizations", "abstract": "Studies of discourse relations have not, in the past, attempted to characterize what serves as evidence for them, beyond lists of frozen expressions, or markers, drawn from a few well-defined syntactic classes. In this paper, we describe how the lexicalized discourse relation annotations of the Penn Discourse Treebank (PDTB) led to the discovery of a wide range of additional expressions, annotated as AltLex (alternative lexicalizations) in the PDTB 2.0. Further analysis of AltLex annotation suggests that the set of markers is open-ended, and drawn from a wider variety of syntactic types than currently assumed. As a first attempt towards automatically identifying discourse relation markers, we propose the use of syntactic paraphrase methods.", "keyphrases": ["other mean", "lexicalization", "altlex relation"]} +{"id": "marton-etal-2013-dependency", "title": "Dependency Parsing of Modern Standard Arabic with Lexical and Inflectional Features", "abstract": "We explore the contribution of lexical and inflectional morphology features to dependency parsing of Arabic, a morphologically rich language with complex agreement patterns. Using controlled experiments, we contrast the contribution of different part-of-speech (POS) tag sets and morphological features in two input conditions: machine-predicted condition (in which POS tags and morphological feature values are automatically assigned), and gold condition (in which their true values are known). We find that more informative (fine-grained) tag sets are useful in the gold condition, but may be detrimental in the predicted condition, where they are outperformed by simpler but more accurately predicted tag sets. We identify a set of features (definiteness, person, number, gender, and undiacritized lemma) that improve parsing quality in the predicted condition, whereas other features are more useful in gold. We are the first to show that functional features for gender and number (e.g., \u201cbroken plurals\u201d), and optionally the related rationality (\u201chumanness\u201d) feature, are more helpful for parsing than form-based gender and number. We finally show that parsing quality in the predicted condition can dramatically improve by training in a combined gold+predicted condition. We experimented with two transition-based parsers, MaltParser and Easy-First Parser. Our findings are robust across parsers, models, and input conditions. This suggests that the contribution of the linguistic knowledge in the tag sets and features we identified goes beyond particular experimental settings, and may be informative for other parsers and morphologically rich languages.", "keyphrases": ["arabic", "inflectional morphology", "dependency parsing"]} +{"id": "hillard-etal-2003-detection", "title": "Detection Of Agreement vs. Disagreement In Meetings: Training With Unlabeled Data", "abstract": "To support summarization of automatically transcribed meetings, we introduce a classifier to recognize agreement or disagreement utterances, utilizing both word-based and prosodic cues. We show that hand-labeling efforts can be minimized by using unsupervised training on a large unlabeled data set combined with supervised training on a small amount of data. For ASR transcripts with over 45% WER, the system recovers nearly 80% of agree/disagree utterances with a confusion rate of only 3%.", "keyphrases": ["agreement", "unlabeled data", "detection"]} +{"id": "bos-2008-wide", "title": "Wide-Coverage Semantic Analysis with Boxer", "abstract": "Boxer is an open-domain software component for semantic analysis of text, based on Combinatory Categorial Grammar (CCG) and Discourse Representation Theory (DRT). Used together with the CC (b) discourse structure triggered by conditionals, negation or discourse adverbs was overall correctly computed; (c) some measure and time expressions are correctly analysed, others aren't; (d) several shallow analyses are given for lexical phrases that require deep analysis; (e) bridging references and pronouns are not resolved in most cases. Boxer is distributed with the C&C tools and freely available for research purposes.", "keyphrases": ["boxer", "wide-coverage semantic analysis", "meaning representation", "parsing system"]} +{"id": "ng-2010-supervised", "title": "Supervised Noun Phrase Coreference Research: The First Fifteen Years", "abstract": "The research focus of computational coreference resolution has exhibited a shift from heuristic approaches to machine learning approaches in the past decade. This paper surveys the major milestones in supervised coreference research since its inception fifteen years ago.", "keyphrases": ["noun phrase", "coreference resolution", "survey", "mention-pair model", "same discourse entity"]} +{"id": "narayan-etal-2017-split", "title": "Split and Rephrase", "abstract": "We propose a new sentence simplification task (Split-and-Rephrase) where the aim is to split a complex sentence into a meaning preserving sequence of shorter sentences. Like sentence simplification, splitting-and-rephrasing has the potential of benefiting both natural language processing and societal applications. Because shorter sentences are generally better processed by NLP systems, it could be used as a preprocessing step which facilitates and improves the performance of parsers, semantic role labellers and machine translation systems. It should also be of use for people with reading disabilities because it allows the conversion of longer sentences into shorter ones. This paper makes two contributions towards this new task. First, we create and make available a benchmark consisting of 1,066,115 tuples mapping a single complex sentence to a sequence of sentences expressing the same meaning. Second, we propose five models (vanilla sequence-to-sequence to semantically-motivated models) to understand the difficulty of the proposed task.", "keyphrases": ["rephrase", "complex sentence", "split"]} +{"id": "chen-etal-2013-vector", "title": "Vector Space Model for Adaptation in Statistical Machine Translation", "abstract": "This paper proposes a new approach to domain adaptation in statistical machine translation (SMT) based on a vector space model (VSM). The general idea is first to create a vector profile for the in-domain development (\u201cdev\u201d) set. This profile might, for instance, be a vector with a dimensionality equal to the number of training subcorpora; each entry in the vector reflects the contribution of a particular subcorpus to all the phrase pairs that can be extracted from the dev set. Then, for each phrase pair extracted from the training data, we create a vector with features defined in the same way, and calculate its similarity score with the vector representing the dev set. Thus, we obtain a decoding feature whose value represents the phrase pair\u2019s closeness to the dev. This is a simple, computationally cheap form of instance weighting for phrase pairs. Experiments on large scale NIST evaluation data show improvements over strong baselines: +1.8 BLEU on Arabic to English and +1.4 BLEU on Chinese to English over a non-adapted baseline, and significant improvements in most circumstances over baselines with linear mixture model adaptation. An informal analysis suggests that VSM adaptation may help in making a good choice among words with the same meaning, on the basis of style and genre.", "keyphrases": ["vsm", "phrase pair", "vector space model"]} +{"id": "zhou-etal-2010-predicting", "title": "Predicting Discourse Connectives for Implicit Discourse Relation Recognition", "abstract": "Existing works indicate that the absence of explicit discourse connectives makes it difficult to recognize implicit discourse relations. In this paper we attempt to overcome this difficulty for implicit relation recognition by automatically inserting discourse connectives between arguments with the use of a language model. Then we propose two algorithms to leverage the information of these predicted connectives. One is to use these predicted implicit connectives as additional features in a supervised model. The other is to perform implicit relation recognition based only on these predicted connectives. Results on Penn Discourse Treebank 2.0 show that predicted discourse connectives help implicit relation recognition and the first algorithm can achieve an absolute average f-score improvement of 3% over a state of the art baseline system.", "keyphrases": ["discourse relation", "language model", "implicit connective"]} +{"id": "shi-etal-2015-automatically", "title": "Automatically Solving Number Word Problems by Semantic Parsing and Reasoning", "abstract": "This paper presents a semantic parsing and reasoning approach to automatically solving math word problems. A new meaning representation language is designed to bridge natural language text and math expressions. A CFG parser is implemented based on 9,600 semi-automatically created grammar rules. We conduct experiments on a test set of over 1,500 number word problems (i.e., verbally expressed number problems) and yield 95.4% precision and 60.2% recall.", "keyphrases": ["number word problem", "rule-based approach", "dolphin language"]} +{"id": "dong-etal-2014-adaptive", "title": "Adaptive Recursive Neural Network for Target-dependent Twitter Sentiment Classification", "abstract": "We propose Adaptive Recursive Neural Network (AdaRNN) for target-dependent Twitter sentiment classification. AdaRNN adaptively propagates the sentiments of words to target depending on the context and syntactic relationships between them. It consists of more than one composition functions, and we model the adaptive sentiment propagations as distributions over these composition functions. The experimental studies illustrate that AdaRNN improves the baseline methods. Furthermore, we introduce a manually annotated dataset for target-dependent Twitter sentiment analysis.", "keyphrases": ["recursive neural network", "twitter sentiment classification", "dependency tree", "input sentence", "deep learning"]} +{"id": "huang-etal-2019-unicoder", "title": "Unicoder: A Universal Language Encoder by Pre-training with Multiple Cross-lingual Tasks", "abstract": "We present Unicoder, a universal language encoder that is insensitive to different languages. Given an arbitrary NLP task, a model can be trained with Unicoder using training data in one language and directly applied to inputs of the same task in other languages. Comparing to similar efforts such as Multilingual BERT and XLM , three new cross-lingual pre-training tasks are proposed, including cross-lingual word recovery, cross-lingual paraphrase classification and cross-lingual masked language model. These tasks help Unicoder learn the mappings among different languages from more perspectives. We also find that doing fine-tuning on multiple languages together can bring further improvement. Experiments are performed on two tasks: cross-lingual natural language inference (XNLI) and cross-lingual question answering (XQA), where XLM is our baseline. On XNLI, 1.8% averaged accuracy improvement (on 15 languages) is obtained. On XQA, which is a new cross-lingual dataset built by us, 5.5% averaged accuracy improvement (on French and German) is obtained.", "keyphrases": ["universal language encoder", "cross-lingual task", "xlm", "pre-training task", "unicoder"]} +{"id": "shen-etal-2007-guided", "title": "Guided Learning for Bidirectional Sequence Classification", "abstract": "In this paper, we propose guided learning, a new learning framework for bidirectional sequence classification. The tasks of learning the order of inference and training the local classifier are dynamically incorporated into a single Perceptron like learning algorithm. We apply this novel learning algorithm to POS tagging. It obtains an error rate of 2.67% on the standard PTB test set, which represents 3.3% relative error reduction over the previous best result on the same data set, while using fewer features.", "keyphrases": ["bidirectional sequence classification", "guided learning", "tagging accuracy"]} +{"id": "hajic-etal-2012-announcing", "title": "Announcing Prague Czech-English Dependency Treebank 2.0", "abstract": "We introduce a substantial update of the Prague Czech-English Dependency Treebank, a parallel corpus manually annotated at the deep syntactic layer of linguistic representation. The English part consists of the Wall Street Journal (WSJ) section of the Penn Treebank. The Czech part was translated from the English source sentence by sentence. This paper gives a high level overview of the underlying linguistic theory (the so-called tectogrammatical annotation) with some details of the most important features like valency annotation, ellipsis reconstruction or coreference.", "keyphrases": ["prague", "czech-english dependency treebank", "detail"]} +{"id": "wang-etal-2020-tplinker", "title": "TPLinker: Single-stage Joint Extraction of Entities and Relations Through Token Pair Linking", "abstract": "Extracting entities and relations from unstructured text has attracted increasing attention in recent years but remains challenging, due to the intrinsic difficulty in identifying overlapping relations with shared entities. Prior works show that joint learning can result in a noticeable performance gain. However, they usually involve sequential interrelated steps and suffer from the problem of exposure bias. At training time, they predict with the ground truth conditions while at inference it has to make extraction from scratch. This discrepancy leads to error accumulation. To mitigate the issue, we propose in this paper a one-stage joint extraction model, namely, TPLinker, which is capable of discovering overlapping relations sharing one or both entities while being immune from the exposure bias. TPLinker formulates joint extraction as a token pair linking problem and introduces a novel handshaking tagging scheme that aligns the boundary tokens of entity pairs under each relation type. Experiment results show that TPLinker performs significantly better on overlapping and multiple relation extraction, and achieves state-of-the-art performance on two public datasets.", "keyphrases": ["extraction", "token pair", "tagging scheme", "entity pair", "tplinker"]} +{"id": "agirre-etal-2009-study", "title": "A Study on Similarity and Relatedness Using Distributional and WordNet-based Approaches", "abstract": "This paper presents and compares WordNet-based and distributional similarity approaches. The strengths and weaknesses of each approach regarding similarity and relatedness tasks are discussed, and a combination is presented. Each of our methods independently provide the best results in their class on the RG and WordSim353 datasets, and a supervised combination of them yields the best published results on all datasets. Finally, we pioneer cross-lingual similarity, showing that our methods are easily adapted for a cross-lingual task with minor losses.", "keyphrases": ["relatedness", "wordsim353 dataset", "terawords"]} +{"id": "chi-etal-2020-finding", "title": "Finding Universal Grammatical Relations in Multilingual BERT", "abstract": "Recent work has found evidence that Multilingual BERT (mBERT), a transformer-based multilingual masked language model, is capable of zero-shot cross-lingual transfer, suggesting that some aspects of its representations are shared cross-lingually. To better understand this overlap, we extend recent work on finding syntactic trees in neural networks' internal representations to the multilingual setting. We show that subspaces of mBERT representations recover syntactic tree distances in languages other than English, and that these subspaces are approximately shared across languages. Motivated by these results, we present an unsupervised analysis method that provides evidence mBERT learns representations of syntactic dependency labels, in the form of clusters which largely agree with the Universal Dependencies taxonomy. This evidence suggests that even without explicit supervision, multilingual masked language models learn certain linguistic universals.", "keyphrases": ["multilingual bert", "tree distance", "syntactic dependency label"]} +{"id": "alsarsour-etal-2018-dart", "title": "DART: A Large Dataset of Dialectal Arabic Tweets", "abstract": "In this paper, we present a new large manually-annotated multi-dialect dataset of Arabic tweets that is publicly available. The Dialectal ARabic Tweets (DART) dataset has about 25K tweets that are annotated via crowdsourcing and it is well-balanced over five main groups of Arabic dialects: Egyptian, Maghrebi, Levantine, Gulf, and Iraqi. The paper outlines the pipeline of constructing the dataset from crawling tweets that match a list of dialect phrases to annotating the tweets by the crowd. We also touch some challenges that we face during the process. We evaluate the quality of the dataset from two perspectives: the inter-annotator agreement and the accuracy of the final labels. Results show that both measures were substantially high for the Egyptian, Gulf, and Levantine dialect groups, but lower for the Iraqi and Maghrebi dialects, which indicates the difficulty of identifying those two dialects manually and hence automatically.", "keyphrases": ["dialectal arabic tweets", "dialect", "dart"]} +{"id": "lin-etal-2020-joint", "title": "A Joint Neural Model for Information Extraction with Global Features", "abstract": "Most existing joint neural models for Information Extraction (IE) use local task-specific classifiers to predict labels for individual instances (e.g., trigger, relation) regardless of their interactions. For example, a victim of a die event is likely to be a victim of an attack event in the same sentence. In order to capture such cross-subtask and cross-instance inter-dependencies, we propose a joint neural framework, OneIE, that aims to extract the globally optimal IE result as a graph from an input sentence. OneIE performs end-to-end IE in four stages: (1) Encoding a given sentence as contextualized word representations; (2) Identifying entity mentions and event triggers as nodes; (3) Computing label scores for all nodes and their pairwise links using local classifiers; (4) Searching for the globally optimal graph with a beam decoder. At the decoding stage, we incorporate global features to capture the cross-subtask and cross-instance interactions. Experiments show that adding global features improves the performance of our model and achieves new state of-the-art on all subtasks. In addition, as OneIE does not use any language-specific feature, we prove it can be easily applied to new languages or trained in a multilingual manner.", "keyphrases": ["joint neural model", "information extraction", "global feature", "input sentence", "error propagation"]} +{"id": "wu-etal-2018-study", "title": "A Study of Reinforcement Learning for Neural Machine Translation", "abstract": "Recent studies have shown that reinforcement learning (RL) is an effective approach for improving the performance of neural machine translation (NMT) system. However, due to its instability, successfully RL training is challenging, especially in real-world systems where deep models and large datasets are leveraged. In this paper, taking several large-scale translation tasks as testbeds, we conduct a systematic study on how to train better NMT models using reinforcement learning. We provide a comprehensive comparison of several important factors (e.g., baseline reward, reward shaping) in RL training. Furthermore, to fill in the gap that it remains unclear whether RL is still beneficial when monolingual data is used, we propose a new method to leverage RL to further boost the performance of NMT systems trained with source/target monolingual data. By integrating all our findings, we obtain competitive results on WMT14 English-German, WMT17 English-Chinese, and WMT17 Chinese-English translation tasks, especially setting a state-of-the-art performance on WMT17 Chinese-English translation task.", "keyphrases": ["reinforcement learning", "neural machine translation", "gradient estimation"]} +{"id": "quirk-poon-2017-distant", "title": "Distant Supervision for Relation Extraction beyond the Sentence Boundary", "abstract": "The growing demand for structured knowledge has led to great interest in relation extraction, especially in cases with limited supervision. However, existing distance supervision approaches only extract relations expressed in single sentences. In general, cross-sentence relation extraction is under-explored, even in the supervised-learning setting. In this paper, we propose the first approach for applying distant supervision to cross-sentence relation extraction. At the core of our approach is a graph representation that can incorporate both standard dependencies and discourse relations, thus providing a unifying way to model relations within and across sentences. We extract features from multiple paths in this graph, increasing accuracy and robustness when confronted with linguistic variation and analysis error. Experiments on an important extraction task for precision medicine show that our approach can learn an accurate cross-sentence extractor, using only a small existing knowledge base and unlabeled text from biomedical research articles. Compared to the existing distant supervision paradigm, our approach extracted twice as many relations at similar precision, thus demonstrating the prevalence of cross-sentence relations and the promise of our approach.", "keyphrases": ["relation extraction", "sentence boundary", "distant supervision", "biomedical domain", "document graph"]} +{"id": "zhou-etal-2018-neural-document", "title": "Neural Document Summarization by Jointly Learning to Score and Select Sentences", "abstract": "Sentence scoring and sentence selection are two main steps in extractive document summarization systems. However, previous works treat them as two separated subtasks. In this paper, we present a novel end-to-end neural network framework for extractive document summarization by jointly learning to score and select sentences. It first reads the document sentences with a hierarchical encoder to obtain the representation of sentences. Then it builds the output summary by extracting sentences one by one. Different from previous methods, our approach integrates the selection strategy into the scoring model, which directly predicts the relative importance given previously selected sentences. Experiments on the CNN/Daily Mail dataset show that the proposed framework significantly outperforms the state-of-the-art extractive summarization models.", "keyphrases": ["document summarization", "select sentence", "neural sequence model"]} +{"id": "ramesh-etal-2022-samanantar", "title": "Samanantar: The Largest Publicly Available Parallel Corpora Collection for 11 Indic Languages", "abstract": "We present Samanantar, the largest publicly available parallel corpora collection for Indic languages. The collection contains a total of 49.7 million sentence pairs between English and 11 Indic languages (from two language families). Specifically, we compile 12.4 million sentence pairs from existing, publicly available parallel corpora, and additionally mine 37.4 million sentence pairs from the Web, resulting in a 4 increase. We mine the parallel sentences from the Web by combining many corpora, tools, and methods: (a) Web-crawled monolingual corpora, (b) document OCR for extracting sentences from scanned documents, (c) multilingual representation models for aligning sentences, and (d) approximate nearest neighbor search for searching in a large collection of sentences. Human evaluation of samples from the newly mined corpora validate the high quality of the parallel sentences across 11 languages. Further, we extract 83.4 million sentence pairs between all 55 Indic language pairs from the English-centric parallel corpus using English as the pivot language. We trained multilingual NMT models spanning all these languages on Samanantar which outperform existing models and baselines on publicly available benchmarks, such as FLORES, establishing the utility of Samanantar. Our data and models are available publicly at Samanantar and we hope they will help advance research in NMT and multilingual NLP for Indic languages.", "keyphrases": ["parallel corpora collection", "indic language", "samanantar"]} +{"id": "strube-muller-2003-machine", "title": "A Machine Learning Approach to Pronoun Resolution in Spoken Dialogue", "abstract": "We apply a decision tree based approach to pronoun resolution in spoken dialogue. Our system deals with pronouns with NP-and non-NP-antecedents. We present a set of features designed for pronoun resolution in spoken dialogue and determine the most promising features. We evaluate the system on twenty Switchboard dialogues and show that it compares well to Byron's (2002) manually tuned system.", "keyphrases": ["machine learning approach", "pronoun resolution", "spoken dialogue"]} +{"id": "castro-ferreira-etal-2016-individual", "title": "Individual Variation in the Choice of Referential Form", "abstract": "This study aims to measure the variation between writers in their choices of referential form by collecting and analysing a new and publicly available corpus of referring expressions. The corpus is composed of referring expressions produced by different participants in identical situations. Results, measured in terms of normalized entropy, reveal substantial individual variation. We discuss the problems and prospects of this finding for automatic text generation applications.", "keyphrases": ["choice", "referential form", "writer", "individual variation", "position"]} +{"id": "johnson-goldwater-2009-improving", "title": "Improving nonparameteric Bayesian inference: experiments on unsupervised word segmentation with adaptor grammars", "abstract": "One of the reasons nonparametric Bayesian inference is attracting attention in computational linguistics is because it provides a principled way of learning the units of generalization together with their probabilities. Adaptor grammars are a framework for defining a variety of hierarchical nonparametric Bayesian models. This paper investigates some of the choices that arise in formulating adaptor grammars and associated inference procedures, and shows that they can have a dramatic impact on performance in an unsupervised word segmentation task. With appropriate adaptor grammars and inference procedures we achieve an 87% word token f-score on the standard Brent version of the Bernstein-Ratner corpus, which is an error reduction of over 35% over the best previously reported results for this corpus.", "keyphrases": ["bayesian inference", "word segmentation", "adaptor grammar"]} +{"id": "henrich-hinrichs-2010-standardizing", "title": "Standardizing Wordnets in the ISO Standard LMF: Wordnet-LMF for GermaNet", "abstract": "It has been recognized for quite some time that sustainable data formats play an important role in the development and curation of linguistic resources. The purpose of this paper is to show how GermaNet, the German version of the Princeton WordNet, can be converted to the Lexical Markup Framework (LMF), a published ISO standard (ISO-24613) for encoding lexical resources. The conversion builds on Wordnet-LMF, which has been proposed in the context of the EU KYOTO project as an LMF format for wordnets. The present paper proposes a number of crucial modifications and a set of extensions to Wordnet-LMF that are needed for conversion of wordnets in general and for conversion of Ger-maNet in particular.", "keyphrases": ["wordnet-lmf", "germanet", "conversion"]} +{"id": "cohn-lapata-2007-machine", "title": "Machine Translation by Triangulation: Making Effective Use of Multi-Parallel Corpora", "abstract": "Current phrase-based SMT systems perform poorly when using small training sets. This is a consequence of unreliable translation estimates and low coverage over source and target phrases. This paper presents a method which alleviates this problem by exploiting multiple translations of the same source phrase. Central to our approach is triangulation, the process of translating from a source to a target language via an intermediate third language. This allows the use of a much wider range of parallel corpora for training, and can be combined with a standard phrase-table using conventional smoothing methods. Experimental results demonstrate BLEU improvements for triangulated models over a standard phrase-based system.", "keyphrases": ["triangulation", "machine translation", "pivot language", "second strategy", "source-target model"]} +{"id": "das-bandyopadhyay-2010-sentiwordnet", "title": "SentiWordNet for Indian Languages", "abstract": "The discipline where sentiment/ opinion/ emotion has been identified and classified in human written text is well known as sentiment analysis. A typical computational approach to sentiment analysis starts with prior polarity lexicons where entries are tagged with their prior out of context polarity as human beings perceive using their cognitive knowledge. Till date, all research efforts found in sentiment lexicon literature deal mostly with English texts. In this article, we propose multiple computational techniques like, WordNet based, dictionary based, corpus based or generative approaches for generating SentiWordNet(s) for Indian languages. Currently, SentiWordNet(s) are being developed for three Indian languages: Bengali, Hindi and Telugu. An online intuitive game has been developed to create and validate the developed SentiWordNet(s) by involving Internet population. A number of automatic, semi-automatic and manual validations and evaluation methodologies have been adopted to measure the coverage and credibility of the developed SentiWordNet(s).", "keyphrases": ["indian language", "wordnet", "bengali"]} +{"id": "prabhumoye-etal-2018-style", "title": "Style Transfer Through Back-Translation", "abstract": "Style transfer is the task of rephrasing the text to contain specific stylistic properties without changing the intent or affect within the context. This paper introduces a new method for automatic style transfer. We first learn a latent representation of the input sentence which is grounded in a language translation model in order to better preserve the meaning of the sentence while reducing stylistic properties. Then adversarial generation techniques are used to make the output match the desired style. We evaluate this technique on three different style transformations: sentiment, gender and political slant. Compared to two state-of-the-art style transfer modeling techniques we show improvements both in automatic evaluation of style transfer and in manual evaluation of meaning preservation and fluency.", "keyphrases": ["stylistic property", "latent representation", "fluency", "style transfer", "sentiment transfer"]} +{"id": "hassan-mihalcea-2009-cross", "title": "Cross-lingual Semantic Relatedness Using Encyclopedic Knowledge", "abstract": "In this paper, we address the task of crosslingual semantic relatedness. We introduce a method that relies on the information extracted from Wikipedia, by exploiting the interlanguage links available between Wikipedia versions in multiple languages. Through experiments performed on several language pairs, we show that the method performs well, with a performance comparable to monolingual measures of relatedness.", "keyphrases": ["semantic relatedness", "wikipedia", "different language"]} +{"id": "pitler-nenkova-2009-using", "title": "Using Syntax to Disambiguate Explicit Discourse Connectives in Text", "abstract": "Discourse connectives are words or phrases such as once, since, and on the contrary that explicitly signal the presence of a discourse relation. There are two types of ambiguity that need to be resolved during discourse processing. First, a word can be ambiguous between discourse or non-discourse usage. For example, once can be either a temporal discourse connective or a simply a word meaning \"formerly\". Secondly, some connectives are ambiguous in terms of the relation they mark. For example since can serve as either a temporal or causal connective. We demonstrate that syntactic features improve performance in both disambiguation tasks. We report state-of-the-art results for identifying discourse vs. non-discourse usage and human-level performance on sense disambiguation.", "keyphrases": ["disambiguation", "discourse connective", "syntactic feature"]} +{"id": "gao-etal-2019-jointly", "title": "Jointly Optimizing Diversity and Relevance in Neural Response Generation", "abstract": "Although recent neural conversation models have shown great potential, they often generate bland and generic responses. While various approaches have been explored to diversify the output of the conversation model, the improvement often comes at the cost of decreased relevance. In this paper, we propose a SpaceFusion model to jointly optimize diversity and relevance that essentially fuses the latent space of a sequence-to-sequence model and that of an autoencoder model by leveraging novel regularization terms. As a result, our approach induces a latent space in which the distance and direction from the predicted response vector roughly match the relevance and diversity, respectively. This property also lends itself well to an intuitive visualization of the latent space. Both automatic and human evaluation results demonstrate that the proposed approach brings significant improvement compared to strong baselines in both diversity and relevance.", "keyphrases": ["relevance", "latent space", "direction"]} +{"id": "stoyanov-etal-2010-coreference", "title": "Coreference Resolution with Reconcile", "abstract": "Despite the existence of several noun phrase coreference resolution data sets as well as several formal evaluations on the task, it remains frustratingly difficult to compare results across different coreference resolution systems. This is due to the high cost of implementing a complete end-to-end coreference resolution system, which often forces researchers to substitute available gold-standard information in lieu of implementing a module that would compute that information. Unfortunately, this leads to inconsistent and often unrealistic evaluation scenarios. \n \nWith the aim to facilitate consistent and realistic experimental evaluations in coreference resolution, we present Reconcile, an infrastructure for the development of learning-based noun phrase (NP) coreference resolution systems. Reconcile is designed to facilitate the rapid creation of coreference resolution systems, easy implementation of new feature sets and approaches to coreference resolution, and empirical evaluation of coreference resolvers across a variety of benchmark data sets and standard scoring metrics. We describe Reconcile and present experimental results showing that Reconcile can be used to create a coreference resolver that achieves performance comparable to state-of-the-art systems on six benchmark data sets.", "keyphrases": ["reconcile", "coreference resolution", "mention-pair model"]} +{"id": "pilehvar-etal-2013-align", "title": "Align, Disambiguate and Walk: A Unified Approach for Measuring Semantic Similarity", "abstract": "Semantic similarity is an essential component of many Natural Language Processing applications. However, prior methods for computing semantic similarity often operate at different levels, e.g., single words or entire documents, which requires adapting the method for each data type. We present a unified approach to semantic similarity that operates at multiple levels, all the way from comparing word senses to comparing text documents. Our method leverages a common probabilistic representation over word senses in order to compare different types of linguistic data. This unified representation shows state-ofthe-art performance on three tasks: semantic textual similarity, word similarity, and word sense coarsening.", "keyphrases": ["disambiguate", "walk", "semantic similarity", "text document"]} +{"id": "minkov-etal-2005-extracting", "title": "Extracting Personal Names from Email: Applying Named Entity Recognition to Informal Text", "abstract": "There has been little prior work on Named Entity Recognition for \"informal\" documents like email. We present two methods for improving performance of person name recognizers for email: email-specific structural features and a recall-enhancing method which exploits name repetition across multiple documents.", "keyphrases": ["personal name", "email", "entity recognition", "informal text"]} +{"id": "rothe-etal-2020-leveraging", "title": "Leveraging Pre-trained Checkpoints for Sequence Generation Tasks", "abstract": "Unsupervised pre-training of large neural models has recently revolutionized Natural Language Processing. By warm-starting from the publicly released checkpoints, NLP practitioners have pushed the state-of-the-art on multiple benchmarks while saving significant amounts of compute time. So far the focus has been mainly on the Natural Language Understanding tasks. In this paper, we demonstrate the efficacy of pre-trained checkpoints for Sequence Generation. We developed a Transformer-based sequence-to-sequence model that is compatible with publicly available pre-trained BERT, GPT-2, and RoBERTa checkpoints and conducted an extensive empirical study on the utility of initializing our model, both encoder and decoder, with these checkpoints. Our models result in new state-of-the-art results on Machine Translation, Text Summarization, Sentence Splitting, and Sentence Fusion.", "keyphrases": ["pre-trained checkpoint", "checkpoint", "state-of-the-art result"]} +{"id": "hough-schlangen-2017-joint", "title": "Joint, Incremental Disfluency Detection and Utterance Segmentation from Speech", "abstract": "We present the joint task of incremental disfluency detection and utterance segmentation and a simple deep learning system which performs it on transcripts and ASR results. We show how the constraints of the two tasks interact. Our joint-task system outperforms the equivalent individual task systems, provides competitive results and is suitable for future use in conversation agents in the psychiatric domain.", "keyphrases": ["incremental disfluency detection", "utterance segmentation", "joint task"]} +{"id": "castilho-etal-2020-context", "title": "On Context Span Needed for Machine Translation Evaluation", "abstract": "Despite increasing efforts to improve evaluation of machine translation (MT) by going beyond the sentence level to the document level, the definition of what exactly constitutes a \u201cdocument level\u201d is still not clear. This work deals with the context span necessary for a more reliable MT evaluation. We report results from a series of surveys involving three domains and 18 target languages designed to identify the necessary context span as well as issues related to it. Our findings indicate that, despite the fact that some issues and spans are strongly dependent on domain and on the target language, a number of common patterns can be observed so that general guidelines for context-aware MT evaluation can be drawn.", "keyphrases": ["context span", "native speaker", "document-level evaluation"]} +{"id": "kryscinski-etal-2020-evaluating", "title": "Evaluating the Factual Consistency of Abstractive Text Summarization", "abstract": "The most common metrics for assessing summarization algorithms do not account for whether summaries are factually consistent with source documents. We propose a weakly-supervised, model-based approach for verifying factual consistency and identifying conflicts between source documents and generated summaries. Training data is generated by applying a series of rule-based transformations to the sentences of source documents. The factual consistency model is then trained jointly for three tasks: 1) predict whether each summary sentence is factually consistent or not, 2) in either case, extract a span in the source document to support this consistency prediction, 3) for each summary sentence that is deemed inconsistent, extract the inconsistent span from it. Transferring this model to summaries generated by several neural models reveals that this highly scalable approach outperforms previous models, including those trained with strong supervision using datasets from related domains, such as natural language inference and fact checking. Additionally, human evaluation shows that the auxiliary span extraction tasks provide useful assistance in the process of verifying factual consistency. We also release a manually annotated dataset for factual consistency verification, code for training data generation, and trained model weights at .", "keyphrases": ["factual consistency", "series", "natural language inference", "input document"]} +{"id": "chatterjee-etal-2018-findings", "title": "Findings of the WMT 2018 Shared Task on Automatic Post-Editing", "abstract": "We present the results from the fourth round of the WMT shared task on MT Automatic Post-Editing. The task consists in automatically correcting the output of a \u201cblack-box\u201d machine translation system by learning from human corrections. Keeping the same general evaluation setting of the three previous rounds, this year we focused on one language pair (English-German) and on domain-specific data (Information Technology), with MT outputs produced by two different paradigms: phrase-based (PBSMT) and neural (NMT). Five teams submitted respectively 11 runs for the PBSMT subtask and 10 runs for the NMT subtask. In the former subtask, characterized by original translations of lower quality, top results achieved impressive improvements, up to -6.24 TER and +9.53 BLEU points over the baseline \u201cdo-nothing\u201d system. The NMT subtask proved to be more challenging due to the higher quality of the original translations and the availability of less training data. In this case, top results show smaller improvements up to -0.38 TER and +0.8 BLEU points.", "keyphrases": ["automatic post-editing", "machine translation", "ape system"]} +{"id": "wang-etal-2019-tree", "title": "Tree Transformer: Integrating Tree Structures into Self-Attention", "abstract": "Pre-training Transformer from large-scale raw texts and fine-tuning on the desired task have achieved state-of-the-art results on diverse NLP tasks. However, it is unclear what the learned attention captures. The attention computed by attention heads seems not to match human intuitions about hierarchical structures. This paper proposes Tree Transformer, which adds an extra constraint to attention heads of the bidirectional Transformer encoder in order to encourage the attention heads to follow tree structures. The tree structures can be automatically induced from raw texts by our proposed \u201cConstituent Attention\u201d module, which is simply implemented by self-attention between two adjacent words. With the same training procedure identical to BERT, the experiments demonstrate the effectiveness of Tree Transformer in terms of inducing tree structures, better language modeling, and further learning more explainable attention scores.", "keyphrases": ["self-attention", "human intuition", "tree transformer", "locality constraint", "low layer"]} +{"id": "chen-manning-2014-fast", "title": "A Fast and Accurate Dependency Parser using Neural Networks", "abstract": "Almost all current dependency parsers classify based on millions of sparse indicator features. Not only do these features generalize poorly, but the cost of feature computation restricts parsing speed significantly. In this work, we propose a novel way of learning a neural network classifier for use in a greedy, transition-based dependency parser. Because this classifier learns and uses just a small number of dense features, it can work very fast, while achieving an about 2% improvement in unlabeled and labeled attachment scores on both English and Chinese datasets. Concretely, our parser is able to parse more than 1000 sentences per second at 92.2% unlabeled attachment score on the English Penn Treebank.", "keyphrases": ["dependency parser", "word embedding", "feed-forward neural network", "deep learning"]} +{"id": "zhou-etal-2014-simple", "title": "A Simple Bayesian Modelling Approach to Event Extraction from Twitter", "abstract": "With the proliferation of social media sites, social streams have proven to contain the most up-to-date information on current events. Therefore, it is crucial to extract events from the social streams such as tweets. However, it is not straightforward to adapt the existing event extraction systems since texts in social media are fragmented and noisy. In this paper we propose a simple and yet effective Bayesian model, called Latent Event Model (LEM), to extract structured representation of events from social media. LEM is fully unsupervised and does not require annotated data for training. We evaluate LEM on a Twitter corpus. Experimental results show that the proposed model achieves 83% in F-measure, and outperforms the state-of-the-art baseline by over 7%.", "keyphrases": ["bayesian model", "event extraction", "twitter"]} +{"id": "ismail-manandhar-2010-bilingual", "title": "Bilingual lexicon extraction from comparable corpora using in-domain terms", "abstract": "Many existing methods for bilingual lexicon learning from comparable corpora are based on similarity of context vectors. These methods suffer from noisy vectors that greatly affect their accuracy. We introduce a method for filtering this noise allowing highly accurate learning of bilingual lexicons. Our method is based on the notion of in-domain terms which can be thought of as the most important contextually relevant words. We provide a method for identifying such terms. Our evaluation shows that the proposed method can learn highly accurate bilingual lexicons without using orthographic features or a large initial seed dictionary. In addition, we also introduce a method for measuring the similarity between two words in different languages without requiring any initial dictionary.", "keyphrases": ["comparable corpora", "in-domain term", "context vector"]} +{"id": "zhang-etal-2017-dependency", "title": "Dependency Parsing as Head Selection", "abstract": "Conventional graph-based dependency parsers guarantee a tree structure both during training and inference. Instead, we formalize dependency parsing as the problem of independently selecting the head of each word in a sentence. Our model which we call DENSE (as shorthand for Dependency Neural Selection) produces a distribution over possible heads for each word using features obtained from a bidirectional recurrent neural network. Without enforcing structural constraints during training, DeNSe generates (at inference time) trees for the overwhelming majority of sentences, while non-tree outputs can be adjusted with a maximum spanning tree algorithm. We evaluate DeNSe on four languages (English, Chinese, Czech, and German) with varying degrees of non-projectivity. Despite the simplicity of the approach, our parsers are on par with the state of the art.", "keyphrases": ["head selection", "czech", "simplicity", "dependency parsing"]} +{"id": "icard-iii-moss-2014-recent", "title": "Recent Progress on Monotonicity", "abstract": "This paper serves two purposes. It is a summary of much work concerning formal treatments of monotonicity and polarity in natural language, and it also discusses connections to related work on exclusion relations, and connections to psycholinguistics and computational linguistics. The second part of the paper presents a summary of some new work on a formal Monotonicity Calculus.", "keyphrases": ["monotonicity", "precise semantic", "pervasive feature"]} +{"id": "potthast-etal-2010-evaluation", "title": "An Evaluation Framework for Plagiarism Detection", "abstract": "We present an evaluation framework for plagiarism detection. The framework provides performance measures that address the specifics of plagiarism detection, and the PAN-PC-10 corpus, which contains 64 558 artificial and 4 000 simulated plagiarism cases, the latter generated via Amazon's Mechanical Turk. We discuss the construction principles behind the measures and the corpus, and we compare the quality of our corpus to existing corpora. Our analysis gives empirical evidence that the construction of tailored training corpora for plagiarism detection can be automated, and hence be done on a large scale.", "keyphrases": ["evaluation framework", "plagiarism detection", "source text"]} +{"id": "melamud-etal-2015-simple", "title": "A Simple Word Embedding Model for Lexical Substitution", "abstract": "The lexical substitution task requires identifying meaning-preserving substitutes for a target word instance in a given sentential context. Since its introduction in SemEval-2007, various models addressed this challenge, mostly in an unsupervised setting. In this work we propose a simple model for lexical substitution, which is based on the popular skip-gram word embedding model. The novelty of our approach is in leveraging explicitly the context embeddings generated within the skip-gram model, which were so far considered only as an internal component of the learning process. Our model is efficient, very simple to implement, and at the same time achieves state-ofthe-art results on lexical substitution tasks in an unsupervised setting.", "keyphrases": ["lexical substitution", "substitute", "simple model"]} +{"id": "hardmeier-federico-2010-modelling", "title": "Modelling pronominal anaphora in statistical machine translation", "abstract": "Current Statistical Machine Translation (SMT) systems translate texts sentence by sentence without considering any cross-sentential context. Assuming independence between sentences makes it difficult to take certain translation decisions when the necessary information cannot be determined locally. We argue for the necessity to include crosssentence dependencies in SMT. As a case in point, we study the problem of pronominal anaphora translation by manually evaluating German-English SMT output. We then present a word dependency model for SMT, which can represent links between word pairs in the same or in different sentences. We use this model to integrate the output of a coreference resolution system into English-German SMT with a view to improving the translation of anaphoric pronouns.", "keyphrases": ["machine translation", "pronoun", "anaphora resolution"]} +{"id": "carreras-marquez-2004-introduction", "title": "Introduction to the CoNLL-2004 Shared Task: Semantic Role Labeling", "abstract": "In this paper we describe the CoNLL-2004 shared task: semantic role labeling. We introduce the specification and goal of the task, describe the data sets and evaluation methods, and present a general overview of the systems that have contributed to the task, providing comparative description.", "keyphrases": ["conll-2004", "semantic role labeling", "srl", "propbank", "much attention"]} +{"id": "card-etal-2015-media", "title": "The Media Frames Corpus: Annotations of Frames Across Issues", "abstract": "We describe the first version of the Media Frames Corpus: several thousand news articles on three policy issues, annotated in terms of media framing. We motivate framing as a phenomenon of study for computational linguistics and describe our annotation process.", "keyphrases": ["media frames corpus", "framing", "frame", "congressional speech", "several previous work"]} +{"id": "liu-avci-2019-incorporating", "title": "Incorporating Priors with Feature Attribution on Text Classification", "abstract": "Feature attribution methods, proposed recently, help users interpret the predictions of complex models. Our approach integrates feature attributions into the objective function to allow machine learning practitioners to incorporate priors in model building. To demonstrate the effectiveness our technique, we apply it to two tasks: (1) mitigating unintended bias in text classifiers by neutralizing identity terms; (2) improving classifier performance in scarce data setting by forcing model to focus on toxic terms. Our approach adds an L2 distance loss between feature attributions and task-specific prior values to the objective. Our experiments show that i) a classifier trained with our technique reduces undesired model biases without a tradeoff on the original task; ii) incorporating prior helps model performance in scarce data settings.", "keyphrases": ["prior", "feature attribution", "objective function"]} +{"id": "xu-etal-2020-bert", "title": "BERT-of-Theseus: Compressing BERT by Progressive Module Replacing", "abstract": "In this paper, we propose a novel model compression approach to effectively compress BERT by progressive module replacing. Our approach first divides the original BERT into several modules and builds their compact substitutes. Then, we randomly replace the original modules with their substitutes to train the compact modules to mimic the behavior of the original modules. We progressively increase the probability of replacement through the training. In this way, our approach brings a deeper level of interaction between the original and compact models. Compared to the previous knowledge distillation approaches for BERT compression, our approach does not introduce any additional loss function. Our approach outperforms existing knowledge distillation approaches on GLUE benchmark, showing a new perspective of model compression.", "keyphrases": ["bert", "progressive module", "model compression"]} +{"id": "riezler-etal-2007-statistical", "title": "Statistical Machine Translation for Query Expansion in Answer Retrieval", "abstract": "We present an approach to query expansion in answer retrieval that uses Statistical Machine Translation (SMT) techniques to bridge the lexical gap between questions and answers. SMT-based query expansion is done by i) using a full-sentence paraphraser to introduce synonyms in context of the entire query, and ii) by translating query terms into answer terms using a full-sentence SMT model trained on question-answer pairs. We evaluate these global, context-aware query expansion techniques on tfidf retrieval from 10 million question-answer pairs extracted from FAQ pages. Experimental results show that SMTbased expansion improves retrieval performance over local expansion and over retrieval without expansion.", "keyphrases": ["query expansion", "lexical gap", "question-answer pair", "statistical machine translation", "paraphrase generation"]} +{"id": "quirk-etal-2004-monolingual", "title": "Monolingual Machine Translation for Paraphrase Generation", "abstract": "We apply statistical machine translation (SMT) tools to generate novel paraphrases of input sentences in the same language. The system is trained on large volumes of sentence pairs automatically extracted from clustered news articles available on the World Wide Web. Alignment Error Rate (AER) is measured to gauge the quality of the resulting corpus. A monotone phrasal decoder generates contextual replacements. Human evaluation shows that this system outperforms baseline paraphrase generation techniques and, in a departure from previous work, offers better coverage and scalability than the current best-of-breed paraphrasing approaches.", "keyphrases": ["paraphrase generation", "news article", "monolingual machine translation", "smt technique"]} +{"id": "nikolaus-etal-2019-compositional", "title": "Compositional Generalization in Image Captioning", "abstract": "Image captioning models are usually evaluated on their ability to describe a held-out set of images, not on their ability to generalize to unseen concepts. We study the problem of compositional generalization, which measures how well a model composes unseen combinations of concepts when describing images. State-of-the-art image captioning models show poor generalization performance on this task. We propose a multi-task model to address the poor performance, that combines caption generation and image\u2013sentence ranking, and uses a decoding mechanism that re-ranks the captions according their similarity to the image. This model is substantially better at generalizing to unseen combinations of concepts compared to state-of-the-art captioning models.", "keyphrases": ["image captioning", "unseen combination", "compositional generalization"]} +{"id": "hasan-etal-2019-ur", "title": "UR-FUNNY: A Multimodal Language Dataset for Understanding Humor", "abstract": "Humor is a unique and creative communicative behavior often displayed during social interactions. It is produced in a multimodal manner, through the usage of words (text), gestures (visual) and prosodic cues (acoustic). Understanding humor from these three modalities falls within boundaries of multimodal language; a recent research trend in natural language processing that models natural language as it happens in face-to-face communication. Although humor detection is an established research area in NLP, in a multimodal context it has been understudied. This paper presents a diverse multimodal dataset, called UR-FUNNY, to open the door to understanding multimodal language used in expressing humor. The dataset and accompanying studies, present a framework in multimodal humor detection for the natural language processing community. UR-FUNNY is publicly available for research.", "keyphrases": ["multimodal language", "humor", "ted talk", "ur-funny dataset"]} +{"id": "tseng-etal-2005-morphological", "title": "Morphological features help POS tagging of unknown words across language varieties", "abstract": "Part-of-speech tagging, like any supervised statistical NLP task, is more difficult when test sets are very different from training sets, for example when tagging across genres or language varieties. We examined the problem of POS tagging of different varieties of Mandarin Chinese (PRC-Mainland, PRCHong Kong, and Taiwan). An analytic study first showed that unknown words were a major source of difficulty in cross-variety tagging. Unknown words in English tend to be proper nouns. By contrast, we found that Mandarin unknown words were mostly common nouns and verbs. We showed these results are caused by the high frequency of morphological compounding in Mandarin; in this sense Mandarin is more like German than English. Based on this analysis, we propose a variety of new morphological unknown-word features for POS tagging, extending earlier work by others on unknown-word tagging in English and German. Our features were implemented in a maximum entropy Markov model. Our system achieves state-of-the-art performance in Mandarin tagging, including improving unknown-word tagging performance on unseen varieties in Chinese Treebank 5.0 from 61% to 80% correct.", "keyphrases": ["pos tagging", "unknown word", "morphological feature"]} +{"id": "nguyen-etal-2009-convolution", "title": "Convolution Kernels on Constituent, Dependency and Sequential Structures for Relation Extraction", "abstract": "This paper explores the use of innovative kernels based on syntactic and semantic structures for a target relation extraction task. Syntax is derived from constituent and dependency parse trees whereas semantics concerns to entity types and lexical sequences. We investigate the effectiveness of such representations in the automated relation extraction from texts. We process the above data by means of Support Vector Machines along with the syntactic tree, the partial tree and the word sequence kernels. Our study on the ACE 2004 corpus illustrates that the combination of the above kernels achieves high effectiveness and significantly improves the current state-of-the-art.", "keyphrases": ["constituent", "relation extraction", "tree kernel"]} +{"id": "wang-etal-2019-persuasion", "title": "Persuasion for Good: Towards a Personalized Persuasive Dialogue System for Social Good", "abstract": "Developing intelligent persuasive conversational agents to change people's opinions and actions for social good is the frontier in advancing the ethical development of automated dialogue systems. To do so, the first step is to understand the intricate organization of strategic disclosures and appeals employed in human persuasion conversations. We designed an online persuasion task where one participant was asked to persuade the other to donate to a specific charity. We collected a large dataset with 1,017 dialogues and annotated emerging persuasion strategies from a subset. Based on the annotation, we built a baseline classifier with context information and sentence-level features to predict the 10 persuasion strategies used in the corpus. Furthermore, to develop an understanding of personalized persuasion processes, we analyzed the relationships between individuals' demographic and psychological backgrounds including personality, morality, value systems, and their willingness for donation. Then, we analyzed which types of persuasion strategies led to a greater amount of donation depending on the individuals' personal backgrounds. This work lays the ground for developing a personalized persuasive dialogue system.", "keyphrases": ["social good", "persuasion", "negotiation"]} +{"id": "maharjan-etal-2018-letting", "title": "Letting Emotions Flow: Success Prediction by Modeling the Flow of Emotions in Books", "abstract": "Books have the power to make us feel happiness, sadness, pain, surprise, or sorrow. An author's dexterity in the use of these emotions captivates readers and makes it difficult for them to put the book down. In this paper, we model the flow of emotions over a book using recurrent neural networks and quantify its usefulness in predicting success in books. We obtained the best weighted F1-score of 69% for predicting books' success in a multitask setting (simultaneously predicting success and genre of books).", "keyphrases": ["emotion", "success prediction", "book"]} +{"id": "lebanoff-etal-2018-adapting", "title": "Adapting the Neural Encoder-Decoder Framework from Single to Multi-Document Summarization", "abstract": "Generating a text abstract from a set of documents remains a challenging task. The neural encoder-decoder framework has recently been exploited to summarize single documents, but its success can in part be attributed to the availability of large parallel data automatically acquired from the Web. In contrast, parallel data for multi-document summarization are scarce and costly to obtain. There is a pressing need to adapt an encoder-decoder model trained on single-document summarization data to work with multiple-document input. In this paper, we present an initial investigation into a novel adaptation method. It exploits the maximal marginal relevance method to select representative sentences from multi-document input, and leverages an abstractive encoder-decoder model to fuse disparate sentences to an abstractive summary. The adaptation method is robust and itself requires no training data. Our system compares favorably to state-of-the-art extractive and abstractive approaches judged by automatic metrics and human assessors.", "keyphrases": ["neural encoder-decoder framework", "summarization", "multi-document input"]} +{"id": "beisswenger-etal-2016-empirist", "title": "EmpiriST 2015: A Shared Task on the Automatic Linguistic Annotation of Computer-Mediated Communication and Web Corpora", "abstract": "This paper describes the goals, design and results of a shared task on the automatic linguistic annotation of German language data from genres of computer-mediated communication (CMC), social media interactions and Web corpora. The two subtasks of tokenization and part-of-speech tagging were performed on two data sets: (i) a genuine CMC data set with samples from several CMC genres, and (ii) a Web corpora data set of CC-licensed Web pages which represents the type of data found in large corpora crawled from the Web. The teams participating in the shared task achieved a substantial improvement over current off-the-shelf tools for German. The best tokenizer reached an F1score of 99.57% (vs. 98.95% off-the-shelf baseline), while the best tagger reached an accuracy of 90.44% (vs. 84.86% baseline). The gold standard (more than 20,000 tokens of training and test data) is freely available online together with detailed annotation guidelines. 1 Motivation, premises and goals Over the past decade, there has been a growing interest in collecting, processing and analyzing data from genres of computer-mediated communication and social media interactions (henceforth referred to as CMC) such as chats, blogs, forums, tweets, newsgroups, messaging applications (SMS, WhatsApp), interactions on \u201csocial network\u201d sites and on wiki talk pages. The development of resources, tools and best practices for automatic linguistic processing and annotation of CMC discourse has turned out to be a desideratum for several fields of research in the humanities: 1. Large corpora crawled from the Web often contain substantial amounts of CMC (blogs, forums, etc.) and similar forms of noncanonical language. Such data are often regarded as \u201cbycatch\u201d that proves difficult for linguistic annotation by means of standard natural language processing (NLP) tools that are optimized for edited text (Giesbrecht and Evert, 2009). 2. For corpus-based variational linguistics, corpora of CMC discourse are an important resource that closes the \u201cCMC gap\u201d in corpora of contemporary written language and language-in-interaction. With a considerable part of contemporary everyday communication being mediated through CMC technologies, up-to-date investigations of language change and linguistic variation need to be able to include CMC discourse in their empirical analyses. In order to harness the full potential of corpusbased research, the preparation of any type of linguistic corpus which includes CMC discourse\u2014 whether a genuine CMC corpus or a broadcoverage Web corpus\u2014faces the challenge of handling and annotating the linguistic peculiarities characteristic for the types of written discourse found in CMC genres. Two fundamental (but nontrivial) tasks are (i) accurate tokenization and (ii) sufficiently reliable part-of-speech (PoS) annotation. Together, they provide a layer of basic linguistic information on the token level that is a pre-", "keyphrases": ["automatic linguistic annotation", "computer-mediated communication", "german", "tokenization", "empirist"]} +{"id": "uzzaman-etal-2013-semeval", "title": "SemEval-2013 Task 1: TempEval-3: Evaluating Time Expressions, Events, and Temporal Relations", "abstract": "Within the SemEval-2013 evaluation exercise, the TempEval-3 shared task aims to advance research on temporal information processing. It follows on from TempEval-1 and -2, with: a three-part structure covering temporal expression, event, and temporal relation extraction; a larger dataset; and new single measures to rank systems \u2010 in each task and in general. In this paper, we describe the participants\u2019 approaches, results, and the observations from the results, which may guide future research in this area.", "keyphrases": ["tempeval-3", "temporal information extraction", "news article", "relation identification"]} +{"id": "moghe-etal-2018-towards", "title": "Towards Exploiting Background Knowledge for Building Conversation Systems", "abstract": "Existing dialog datasets contain a sequence of utterances and responses without any explicit background knowledge associated with them. This has resulted in the development of models which treat conversation as a sequence-to-sequence generation task (i.e., given a sequence of utterances generate the response sequence). This is not only an overly simplistic view of conversation but it is also emphatically different from the way humans converse by heavily relying on their background knowledge about the topic (as opposed to simply relying on the previous sequence of utterances). For example, it is common for humans to (involuntarily) produce utterances which are copied or suitably modified from background articles they have read about the topic. To facilitate the development of such natural conversation models which mimic the human process of conversing, we create a new dataset containing movie chats wherein each response is explicitly generated by copying and/or modifying sentences from unstructured background knowledge such as plots, comments and reviews about the movie. We establish baseline results on this dataset (90K utterances from 9K conversations) using three different models: (i) pure generation based models which ignore the background knowledge (ii) generation based models which learn to copy information from the background knowledge when required and (iii) span prediction based models which predict the appropriate response span in the background knowledge.", "keyphrases": ["background knowledge", "conversation", "dialog dataset"]} +{"id": "liu-etal-2021-plome", "title": "PLOME: Pre-training with Misspelled Knowledge for Chinese Spelling Correction", "abstract": "Chinese spelling correction (CSC) is a task to detect and correct spelling errors in texts. CSC is essentially a linguistic problem, thus the ability of language understanding is crucial to this task. In this paper, we propose a Pre-trained masked Language model with Misspelled knowledgE (PLOME) for CSC, which jointly learns how to understand language and correct spelling errors. To this end, PLOME masks the chosen tokens with similar characters according to a confusion set rather than the fixed token \u201c[MASK]\u201d as in BERT. Besides character prediction, PLOME also introduces pronunciation prediction to learn the misspelled knowledge on phonic level. Moreover, phonological and visual similarity knowledge is important to this task. PLOME utilizes GRU networks to model such knowledge based on characters' phonics and strokes. Experiments are conducted on widely used benchmarks. Our method achieves superior performance against state-of-the-art approaches by a remarkable margin. We release the source code and pre-trained model for further use by the community ().", "keyphrases": ["misspelled knowledge", "chinese spelling correction", "plome"]} +{"id": "barman-etal-2014-code", "title": "Code Mixing: A Challenge for Language Identification in the Language of Social Media", "abstract": "In social media communication, multilingual speakers often switch between languages, and, in such an environment, automatic language identification becomes both a necessary and challenging task. In this paper, we describe our work in progress on the problem of automatic language identification for the language of social media. We describe a new dataset that we are in the process of creating, which contains Facebook posts and comments that exhibit code mixing between Bengali, English and Hindi. We also present some preliminary word-level language identification experiments using this dataset. Different techniques are employed, including a simple unsupervised dictionary-based approach, supervised word-level classification with and without contextual clues, and sequence labelling using Conditional Random Fields. We find that the dictionary-based approach is surpassed by supervised classification and sequence labelling, and that it is important to take contextual clues into consideration.", "keyphrases": ["language identification", "code mixing", "social medium", "bengali-hindi-english facebook comment", "code-mixed text"]} +{"id": "shirani-etal-2019-learning", "title": "Learning Emphasis Selection for Written Text in Visual Media from Crowd-Sourced Label Distributions", "abstract": "In visual communication, text emphasis is used to increase the comprehension of written text to convey the author's intent. We study the problem of emphasis selection, i.e. choosing candidates for emphasis in short written text, to enable automated design assistance in authoring. Without knowing the author's intent and only considering the input text, multiple emphasis selections are valid. We propose a model that employs end-to-end label distribution learning (LDL) on crowd-sourced data and predicts a selection distribution, capturing the inter-subjectivity (common-sense) in the audience as well as the ambiguity of the input. We compare the model with several baselines in which the problem is transformed to single-label learning by mapping label distributions to absolute labels via majority voting.", "keyphrases": ["emphasis selection", "candidate", "label distribution learning", "visual medium"]} +{"id": "gonzalez-rubio-etal-2012-active", "title": "Active learning for interactive machine translation", "abstract": "Translation needs have greatly increased during the last years. In many situations, text to be translated constitutes an unbounded stream of data that grows continually with time. An effective approach to translate text documents is to follow an interactive-predictive paradigm in which both the system is guided by the user and the user is assisted by the system to generate error-free translations. Unfortunately, when processing such unbounded data streams even this approach requires an overwhelming amount of manpower. Is in this scenario where the use of active learning techniques is compelling. In this work, we propose different active learning techniques for interactive machine translation. Results show that for a given translation quality the use of active learning allows us to greatly reduce the human effort required to translate the sentences in the stream.", "keyphrases": ["machine translation", "human effort", "active learning"]} +{"id": "kim-hovy-2004-determining", "title": "Determining the Sentiment of Opinions", "abstract": "Identifying sentiments (the affective parts of opinions) is a challenging problem. We present a system that, given a topic, automatically finds the people who hold opinions about that topic and the sentiment of each opinion. The system contains a module for determining word sentiment and another for combining sentiments within a sentence. We experiment with various models of classifying and combining sentiment at word and sentence levels, with promising results.", "keyphrases": ["opinion", "sentence level", "synonyms", "orientation", "subjectivity"]} +{"id": "florescu-caragea-2017-positionrank", "title": "PositionRank: An Unsupervised Approach to Keyphrase Extraction from Scholarly Documents", "abstract": "The large and growing amounts of online scholarly data present both challenges and opportunities to enhance knowledge discovery. One such challenge is to automatically extract a small set of keyphrases from a document that can accurately describe the document's content and can facilitate fast information processing. In this paper, we propose PositionRank, an unsupervised model for keyphrase extraction from scholarly documents that incorporates information from all positions of a word's occurrences into a biased PageRank. Our model obtains remarkable improvements in performance over PageRank models that do not take into account word positions as well as over strong baselines for this task. Specifically, on several datasets of research papers, PositionRank achieves improvements as high as 29.09%.", "keyphrases": ["keyphrase extraction", "scholarly document", "positionrank"]} +{"id": "oda-etal-2015-syntax", "title": "Syntax-based Simultaneous Translation through Prediction of Unseen Syntactic Constituents", "abstract": "Simultaneous translation is a method to reduce the latency of communication through machine translation (MT) by dividing the input into short segments before performing translation. However, short segments pose problems for syntaxbased translation methods, as it is difficult to generate accurate parse trees for sub-sentential segments. In this paper, we perform the first experiments applying syntax-based SMT to simultaneous translation, and propose two methods to prevent degradations in accuracy: a method to predict unseen syntactic constituents that help generate complete parse trees, and a method that waits for more input when the current utterance is not enough to generate a fluent translation. Experiments on English-Japanese translation show that the proposed methods allow for improvements in accuracy, particularly with regards to word order of the target sentences.", "keyphrases": ["simultaneous translation", "syntactic constituent", "complete parse tree"]} +{"id": "goo-etal-2018-slot", "title": "Slot-Gated Modeling for Joint Slot Filling and Intent Prediction", "abstract": "Attention-based recurrent neural network models for joint intent detection and slot filling have achieved the state-of-the-art performance, while they have independent attention weights. Considering that slot and intent have the strong relationship, this paper proposes a slot gate that focuses on learning the relationship between intent and slot attention vectors in order to obtain better semantic frame results by the global optimization. The experiments show that our proposed model significantly improves sentence-level semantic frame accuracy with 4.2% and 1.9% relative improvement compared to the attentional model on benchmark ATIS and Snips datasets respectively", "keyphrases": ["slot filling", "intent detection", "joint modeling"]} +{"id": "cucerzan-2007-large", "title": "Large-Scale Named Entity Disambiguation Based on Wikipedia Data", "abstract": "This paper presents a large-scale system for the recognition and semantic disambiguation of named entities based on information extracted from a large encyclopedic collection and Web search results. It describes in detail the disambiguation paradigm employed and the information extraction process from Wikipedia. Through a process of maximizing the agreement between the contextual information extracted from Wikipedia and the context of a document, as well as the agreement among the category tags associated with the candidate entities, the implemented system shows high disambiguation accuracy on both news stories and Wikipedia articles.", "keyphrases": ["entity disambiguation", "wikipedia", "knowledge base", "overlap", "noun phrase"]} +{"id": "nakanishi-etal-2005-probabilistic", "title": "Probabilistic Models for Disambiguation of an HPSG-Based Chart Generator", "abstract": "We describe probabilistic models for a chart generator based on HPSG. Within the research field of parsing with lexicalized grammars such as HPSG, recent developments have achieved efficient estimation of probabilistic models and high-speed parsing guided by probabilistic models. The focus of this paper is to show that two essential techniques -- model estimation on packed parse forests and beam search during parsing -- are successfully exported to the task of natural language generation. Additionally, we report empirical evaluation of the performance of several disambiguation models and how the performance changes according to the feature set used in the models and the size of training data.", "keyphrases": ["chart generator", "hpsg grammar", "probabilistic model"]} +{"id": "lin-2004-path", "title": "A Path-based Transfer Model for Machine Translation", "abstract": "We propose a path-based transfer model for machine translation. The model is trained with a word-aligned parallel corpus where the source language sentences are parsed. The training algorithm extracts a set of transfer rules and their probabilities from the training corpus. A rule translates a path in the source language dependency tree into a fragment in the target dependency tree. The problem of finding the most probable translation becomes a graph-theoretic problem of finding the minimum path covering of the source language dependency tree.", "keyphrases": ["path-based transfer model", "machine translation", "syntax-based model"]} +{"id": "kim-etal-2011-overview", "title": "Overview of BioNLP Shared Task 2011", "abstract": "The BioNLP Shared Task 2011, an information extraction task held over 6 months up to March 2011, met with community-wide participation, receiving 46 final submissions from 24 teams. Five main tasks and three supporting tasks were arranged, and their results show advances in the state of the art in fine-grained biomedical domain information extraction and demonstrate that extraction methods successfully generalize in various aspects.", "keyphrases": ["bionlp shared task", "information extraction", "series", "protein", "biomedical event"]} +{"id": "bonial-etal-2014-propbank", "title": "PropBank: Semantics of New Predicate Types", "abstract": "This research focuses on expanding PropBank, a corpus annotated with predicate argument structures, with new predicate types; namely, noun, adjective and complex predicates, such as Light Verb Constructions. This effort is in part inspired by a sister project to PropBank, the Abstract Meaning Representation project, which also attempts to capture \u0093who is doing what to whom\u0094 in a sentence, but does so in a way that abstracts away from syntactic structures. For example, alternate realizations of a `destroying' event in the form of either the verb `destroy' or the noun `destruction' would receive the same Abstract Meaning Representation. In order for PropBank to reach the same level of coverage and continue to serve as the bedrock for Abstract Meaning Representation, predicate types other than verbs, which have previously gone without annotation, must be annotated. This research describes the challenges therein, including the development of new annotation practices that walk the line between abstracting away from language-particular syntactic facts to explore deeper semantics, and maintaining the connection between semantics and syntactic structures that has proven to be very valuable for PropBank as a corpus of training data for Natural Language Processing applications.", "keyphrases": ["predicate", "connection", "propbank"]} +{"id": "hua-etal-2019-argument", "title": "Argument Mining for Understanding Peer Reviews", "abstract": "Peer-review plays a critical role in the scientific writing and publication ecosystem. To assess the efficiency and efficacy of the reviewing process, one essential element is to understand and evaluate the reviews themselves. In this work, we study the content and structure of peer reviews under the argument mining framework, through automatically detecting (1) the argumentative propositions put forward by reviewers, and (2) their types (e.g., evaluating the work or making suggestions for improvement). We first collect 14.2K reviews from major machine learning and natural language processing venues. 400 reviews are annotated with 10,386 propositions and corresponding types of Evaluation, Request, Fact, Reference, or Quote. We then train state-of-the-art proposition segmentation and classification models on the data to evaluate their utilities and identify new challenges for this new domain, motivating future directions for argument mining. Further experiments show that proposition usage varies across venues in amount, type, and topic.", "keyphrases": ["peer review", "quote", "argument mining"]} +{"id": "graca-etal-2019-generalizing", "title": "Generalizing Back-Translation in Neural Machine Translation", "abstract": "Back-translation \u2014 data augmentation by translating target monolingual data \u2014 is a crucial component in modern neural machine translation (NMT). In this work, we reformulate back-translation in the scope of cross-entropy optimization of an NMT model, clarifying its underlying mathematical assumptions and approximations beyond its heuristic usage. Our formulation covers broader synthetic data generation schemes, including sampling from a target-to-source NMT model. With this formulation, we point out fundamental problems of the sampling-based approaches and propose to remedy them by (i) disabling label smoothing for the target-to-source model and (ii) sampling from a restricted search space. Our statements are investigated on the WMT 2018 German - English news translation task.", "keyphrases": ["back-translation", "neural machine translation", "formulation"]} +{"id": "levine-etal-2020-sensebert", "title": "SenseBERT: Driving Some Sense into BERT", "abstract": "The ability to learn from large unlabeled corpora has allowed neural language models to advance the frontier in natural language understanding. However, existing self-supervision techniques operate at the word form level, which serves as a surrogate for the underlying semantic content. This paper proposes a method to employ weak-supervision directly at the word sense level. Our model, named SenseBERT, is pre-trained to predict not only the masked words but also their WordNet supersenses. Accordingly, we attain a lexical-semantic level language model, without the use of human annotation. SenseBERT achieves significantly improved lexical understanding, as we demonstrate by experimenting on SemEval Word Sense Disambiguation, and by attaining a state of the art result on the `Word in Context' task.", "keyphrases": ["language model", "pre-training", "supersense", "sensebert", "wordin-context task"]} +{"id": "falke-etal-2019-ranking", "title": "Ranking Generated Summaries by Correctness: An Interesting but Challenging Application for Natural Language Inference", "abstract": "While recent progress on abstractive summarization has led to remarkably fluent summaries, factual errors in generated summaries still severely limit their use in practice. In this paper, we evaluate summaries produced by state-of-the-art models via crowdsourcing and show that such errors occur frequently, in particular with more abstractive models. We study whether textual entailment predictions can be used to detect such errors and if they can be reduced by reranking alternative predicted summaries. That leads to an interesting downstream application for entailment models. In our experiments, we find that out-of-the-box entailment models trained on NLI datasets do not yet offer the desired performance for the downstream task and we therefore release our annotations as additional test data for future extrinsic evaluations of NLI.", "keyphrases": ["summaries", "correctness", "natural language inference", "entailment model", "beam search"]} +{"id": "levy-manning-2003-harder", "title": "Is it Harder to Parse Chinese, or the Chinese Treebank?", "abstract": "We present a detailed investigation of the challenges posed when applying parsing models developed against English corpora to Chinese. We develop a factored-model statistical parser for the Penn Chinese Treebank, showing the implications of gross statistical differences between WSJ and Chinese Tree-banks for the most general methods of parser adaptation. We then provide a detailed analysis of the major sources of statistical parse errors for this corpus, showing their causes and relative frequencies, and show that while some types of errors are due to difficult ambiguities inherent in Chinese grammar, others arise due to treebank annotation practices. We show how each type of error can be addressed with simple, targeted changes to the independence assumptions of the maximum likelihood-estimated PCFG factor of the parsing model, which raises our F1 from 80.7% to 82.6% on our development set, and achieves parse accuracy close to the best published figures for Chinese parsing.", "keyphrases": ["chinese", "investigation", "function word"]} +{"id": "fernandes-etal-2014-latent", "title": "Latent Trees for Coreference Resolution", "abstract": "We describe a structure learning system for unrestricted coreference resolution that explores two key modeling techniques: latent coreference trees and automatic entropy-guided feature induction. The latent tree modeling makes the learning problem computationally feasible because it incorporates a meaningful hidden structure. Additionally, using an automatic feature induction method, we can efficiently build enhanced nonlinear models using linear model learning algorithms. We present empirical results that highlight the contribution of each modeling technique used in the proposed system. Empirical evaluation is performed on the multilingual unrestricted coreference CoNLL-2012 Shared Task datasets, which comprise three languages: Arabic, Chinese and English. We apply the same system to all languages, except for minor adaptations to some language-dependent features such as nested mentions and specific static pronoun lists. A previous version of this system was submitted to the CoNLL-2012 Shared Task closed track, achieving an official score of 58.69, the best among the competitors. The unique enhancement added to the current system version is the inclusion of candidate arcs linking nested mentions for the Chinese language. By including such arcs, the score increases by almost 4.5 points for that language. The current system shows a score of 60.15, which corresponds to a 3.5% error reduction, and is the best performing system for each of the three languages.", "keyphrases": ["coreference resolution", "mention", "perceptron"]} +{"id": "macherey-etal-2011-language", "title": "Language-independent compound splitting with morphological operations", "abstract": "Translating compounds is an important problem in machine translation. Since many compounds have not been observed during training, they pose a challenge for translation systems. Previous decompounding methods have often been restricted to a small set of languages as they cannot deal with more complex compound forming processes. We present a novel and unsupervised method to learn the compound parts and morphological operations needed to split compounds into their compound parts. The method uses a bilingual corpus to learn the morphological operations required to split a compound into its parts. Furthermore, monolingual corpora are used to learn and filter the set of compound part candidates. We evaluate our method within a machine translation task and show significant improvements for various languages to show the versatility of the approach.", "keyphrases": ["compound splitting", "operation", "machine translation", "transitional element"]} +{"id": "jiang-etal-2020-cross", "title": "Cross-lingual Information Retrieval with BERT", "abstract": "Multiple neural language models have been developed recently, e.g., BERT and XLNet, and achieved impressive results in various NLP tasks including sentence classification, question answering and document ranking. In this paper, we explore the use of the popular bidirectional language model, BERT, to model and learn the relevance between English queries and foreign-language documents in the task of cross-lingual information retrieval. A deep relevance matching model based on BERT is introduced and trained by finetuning a pretrained multilingual BERT model with weak supervision, using home-made CLIR training data derived from parallel corpora. Experimental results of the retrieval of Lithuanian documents against short English queries show that our model is effective and outperforms the competitive baseline approaches.", "keyphrases": ["bert", "language model", "query", "cross-lingual information retrieval"]} +{"id": "turner-charniak-2005-supervised", "title": "Supervised and Unsupervised Learning for Sentence Compression", "abstract": "In Statistics-Based Summarization - Step One: Sentence Compression, Knight and Marcu (Knight and Marcu, 2000) (KM Knight and Marcu use a corpus of 1035 training sentences. More data is not easily available, so in addition to improving the original K&M noisy-channel model, we create unsupervised and semi-supervised models of the task. Finally, we point out problems with modeling the task in this way. They suggest areas for future research.", "keyphrases": ["sentence compression", "marcu", "noisy-channel model", "unsupervised variant"]} +{"id": "lawrence-etal-2017-counterfactual", "title": "Counterfactual Learning from Bandit Feedback under Deterministic Logging : A Case Study in Statistical Machine Translation", "abstract": "The goal of counterfactual learning for statistical machine translation (SMT) is to optimize a target SMT system from logged data that consist of user feedback to translations that were predicted by another, historic SMT system. A challenge arises by the fact that risk-averse commercial SMT systems deterministically log the most probable translation. The lack of sufficient exploration of the SMT output space seemingly contradicts the theoretical requirements for counterfactual learning. We show that counterfactual learning from deterministic bandit logs is possible nevertheless by smoothing out deterministic components in learning. This can be achieved by additive and multiplicative control variates that avoid degenerate behavior in empirical risk minimization. Our simulation experiments show improvements of up to 2 BLEU points by counterfactual learning from deterministic bandit feedback.", "keyphrases": ["bandit feedback", "deterministic logging", "statistical machine translation"]} +{"id": "kalchbrenner-blunsom-2013-recurrent", "title": "Recurrent Continuous Translation Models", "abstract": "We introduce a class of probabilistic continuous translation models called Recurrent Continuous Translation Models that are purely based on continuous representations for words, phrases and sentences and do not rely on alignments or phrasal translation units. The models have a generation and a conditioning aspect. The generation of the translation is modelled with a target Recurrent Language Model, whereas the conditioning on the source sentence is modelled with a Convolutional Sentence Model. Through various experiments, we show first that our models obtain a perplexity with respect to gold translations that is > 43% lower than that of stateof-the-art alignment-based translation models. Secondly, we show that they are remarkably sensitive to the word order, syntax, and meaning of the source sentence despite lacking alignments. Finally we show that they match a state-of-the-art system when rescoring n-best lists of translations.", "keyphrases": ["machine translation", "recurrent neural network", "nmt system", "target sequence", "encoder-decoder architecture"]} +{"id": "brants-etal-2007-large", "title": "Large Language Models in Machine Translation", "abstract": "Systems, methods, and computer program products for machine translation are provided. In some implementations a system is provided. The system includes a language model including a collection of n-grams from a corpus, each n-gram having a corresponding relative frequency in the corpus and an order n corresponding to a number of tokens in the n-gram, each n-gram corresponding to a backoff n-gram having an order of n-1 and a collection of backoff scores, each backoff score associated with an n-gram, the backoff score determined as a function of a backoff factor and a relative frequency of a corresponding backoff n-gram in the corpus.", "keyphrases": ["machine translation", "relative frequency", "billion", "more data", "cluster"]} +{"id": "ruppenhofer-etal-2009-semeval", "title": "SemEval-2010 Task 10: Linking Events and Their Participants in Discourse", "abstract": "In this paper, we describe the SemEval-2010 shared task on \"Linking Events and Their Participants in Discourse\". This task is a variant of the classical semantic role labelling task. The novel aspect is that we focus on linking local semantic argument structures across sentence boundaries. Specifically, the task aims at linking locally uninstantiated roles to their co-referents in the wider discourse context (if such co-referents exist). This task is potentially beneficial for a number of NLP applications and we hope that it will not only attract researchers from the semantic role labelling community but also from co-reference resolution and information extraction.", "keyphrases": ["discourse", "semeval task", "f-score annotator agreement"]} +{"id": "shwartz-etal-2016-improving", "title": "Improving Hypernymy Detection with an Integrated Path-based and Distributional Method", "abstract": "Detecting hypernymy relations is a key task in NLP, which is addressed in the literature using two complementary approaches. Distributional methods, whose supervised variants are the current best performers, and path-based methods, which received less research attention. We suggest an improved path-based algorithm, in which the dependency paths are encoded using a recurrent neural network, that achieves results comparable to distributional methods. We then extend the approach to integrate both path-based and distributional signals, significantly improving upon the state-of-the-art on this task.", "keyphrases": ["hypernymy detection", "semantic relation", "dbpedia"]} +{"id": "chen-etal-2010-emotion", "title": "Emotion Cause Detection with Linguistic Constructions", "abstract": "This paper proposes a multi-label approach to detect emotion causes. The multi-label model not only detects multi-clause causes, but also captures the long-distance information to facilitate emotion cause detection. In addition, based on the linguistic analysis, we create two sets of linguistic patterns during feature extraction. Both manually generalized patterns and automatically generalized patterns are designed to extract general cause expressions or specific constructions for emotion causes. Experiments show that our system achieves a performance much higher than a baseline model.", "keyphrases": ["cause", "emotion", "linguistic cue"]} +{"id": "wang-poon-2018-deep", "title": "Deep Probabilistic Logic: A Unifying Framework for Indirect Supervision", "abstract": "Deep learning has emerged as a versatile tool for a wide range of NLP tasks, due to its superior capacity in representation learning. But its applicability is limited by the reliance on annotated examples, which are difficult to produce at scale. Indirect supervision has emerged as a promising direction to address this bottleneck, either by introducing labeling functions to automatically generate noisy examples from unlabeled text, or by imposing constraints over interdependent label decisions. A plethora of methods have been proposed, each with respective strengths and limitations. Probabilistic logic offers a unifying language to represent indirect supervision, but end-to-end modeling with probabilistic logic is often infeasible due to intractable inference and learning. In this paper, we propose deep probabilistic logic (DPL) as a general framework for indirect supervision, by composing probabilistic logic with deep learning. DPL models label decisions as latent variables, represents prior knowledge on their relations using weighted first-order logical formulas, and alternates between learning a deep neural network for the end task and refining uncertain formula weights for indirect supervision, using variational EM. This framework subsumes prior indirect supervision methods as special cases, and enables novel combination via infusion of rich domain and linguistic knowledge. Experiments on biomedical machine reading demonstrate the promise of this approach.", "keyphrases": ["probabilistic logic", "indirect supervision", "deep learning", "dpl"]} +{"id": "galley-etal-2004-identifying", "title": "Identifying Agreement and Disagreement in Conversational Speech: Use of Bayesian Networks to Model Pragmatic Dependencies", "abstract": "We describe a statistical approach for modeling agreements and disagreements in conversational interaction. Our approach first identifies adjacency pairs using maximum entropy ranking based on a set of lexical, durational, and structural features that look both forward and backward in the discourse. We then classify utterances as agreement or disagreement using these adjacency pairs and features that represent various pragmatic influences of previous agreement or disagreement on the current utterance. Our approach achieves 86.9% accuracy, a 4.9% increase over previous work.", "keyphrases": ["disagreement", "conversational speech", "bayesian networks", "pragmatic influence"]} +{"id": "woodsend-lapata-2011-learning", "title": "Learning to Simplify Sentences with Quasi-Synchronous Grammar and Integer Programming", "abstract": "Text simplification aims to rewrite text into simpler versions, and thus make information accessible to a broader audience. Most previous work simplifies sentences using handcrafted rules aimed at splitting long sentences, or substitutes difficult words using a predefined dictionary. This paper presents a data-driven model based on quasi-synchronous grammar, a formalism that can naturally capture structural mismatches and complex rewrite operations. We describe how such a grammar can be induced from Wikipedia and propose an integer linear programming model for selecting the most appropriate simplification from the space of possible rewrites generated by the grammar. We show experimentally that our method creates simplifications that significantly reduce the reading difficulty of the input, while maintaining grammaticality and preserving its meaning.", "keyphrases": ["quasi-synchronous grammar", "integer programming", "simplification", "complex rewrite operation", "sentence splitting"]} +{"id": "sridhar-etal-2015-joint", "title": "Joint Models of Disagreement and Stance in Online Debate", "abstract": "Online debate forums present a valuable opportunity for the understanding and modeling of dialogue. To understand these debates, a key challenge is inferring the stances of the participants, all of which are interrelated and dependent. While collectively modeling users\u2019 stances has been shown to be effective (Walker et al., 2012c; Hasan and Ng, 2013), there are many modeling decisions whose ramifications are not well understood. To investigate these choices and their effects, we introduce a scalable unified probabilistic modeling framework for stance classification models that 1) are collective, 2) reason about disagreement, and 3) can model stance at either the author level or at the post level. We comprehensively evaluate the possible modeling choices on eight topics across two online debate corpora, finding accuracy improvements of up to 11.5 percentage points over a local classifier. Our results highlight the importance of making the correct modeling choices for online dialogues, and having a unified probabilistic modeling framework that makes this possible.", "keyphrases": ["stance", "online debate", "social interaction"]} +{"id": "shenoy-sardana-2020-multilogue", "title": "Multilogue-Net: A Context-Aware RNN for Multi-modal Emotion Detection and Sentiment Analysis in Conversation", "abstract": "Sentiment Analysis and Emotion Detection in conversation is key in several real-world applications, with an increase in modalities available aiding a better understanding of the underlying emotions. Multi-modal Emotion Detection and Sentiment Analysis can be particularly useful, as applications will be able to use specific subsets of available modalities, as per the available data. Current systems dealing with Multi-modal functionality fail to leverage and capture - the context of the conversation through all modalities, the dependency between the listener(s) and speaker emotional states, and the relevance and relationship between the available modalities. In this paper, we propose an end to end RNN architecture that attempts to take into account all the mentioned drawbacks. Our proposed model, at the time of writing, out-performs the state of the art on a benchmark dataset on a variety of accuracy and regression metrics.", "keyphrases": ["context-aware rnn", "conversation", "multilogue-net"]} +{"id": "zhao-etal-2018-paragraph", "title": "Paragraph-level Neural Question Generation with Maxout Pointer and Gated Self-attention Networks", "abstract": "Question generation, the task of automatically creating questions that can be answered by a certain span of text within a given passage, is important for question-answering and conversational systems in digital assistants such as Alexa, Cortana, Google Assistant and Siri. Recent sequence to sequence neural models have outperformed previous rule-based systems. Existing models mainly focused on using one or two sentences as the input. Long text has posed challenges for sequence to sequence neural models in question generation \u2013 worse performances were reported if using the whole paragraph (with multiple sentences) as the input. In reality, however, it often requires the whole paragraph as context in order to generate high quality questions. In this paper, we propose a maxout pointer mechanism with gated self-attention encoder to address the challenges of processing long text inputs for question generation. With sentence-level inputs, our model outperforms previous approaches with either sentence-level or paragraph-level inputs. Furthermore, our model can effectively utilize paragraphs as inputs, pushing the state-of-the-art result from 13.9 to 16.3 (BLEU_4).", "keyphrases": ["question generation", "paragraph", "self-attention encoder"]} +{"id": "tran-etal-2018-importance", "title": "The Importance of Being Recurrent for Modeling Hierarchical Structure", "abstract": "Recent work has shown that recurrent neural networks (RNNs) can implicitly capture and exploit hierarchical information when trained to solve common natural language processing tasks (Blevins et al., 2018) such as language modeling (Linzen et al., 2016; Gulordava et al., 2018) and neural machine translation (Shi et al., 2016). In contrast, the ability to model structured data with non-recurrent neural networks has received little attention despite their success in many NLP tasks (Gehring et al., 2017; Vaswani et al., 2017). In this work, we compare the two architectures\u2014recurrent versus non-recurrent\u2014with respect to their ability to model hierarchical structure and find that recurrency is indeed important for this purpose. The code and data used in our experiments is available at ", "keyphrases": ["recurrent", "hierarchical structure", "language understanding"]} +{"id": "zhang-lapata-2017-sentence", "title": "Sentence Simplification with Deep Reinforcement Learning", "abstract": "Sentence simplification aims to make sentences easier to read and understand. Most recent approaches draw on insights from machine translation to learn simplification rewrites from monolingual corpora of complex and simple sentences. We address the simplification problem with an encoder-decoder model coupled with a deep reinforcement learning framework. Our model, which we call DRESS (as shorthand for Deep REinforcement Sentence Simplification), explores the space of possible simplifications while learning to optimize a reward function that encourages outputs which are simple, fluent, and preserve the meaning of the input. Experiments on three datasets demonstrate that our model outperforms competitive simplification systems.", "keyphrases": ["deep reinforcement learning", "reward", "sentence simplification", "fluency", "seq2seq model"]} +{"id": "phandi-etal-2015-flexible", "title": "Flexible Domain Adaptation for Automated Essay Scoring Using Correlated Linear Regression", "abstract": "Most of the current automated essay scoring (AES) systems are trained using manually graded essays from a specific prompt. These systems experience a drop in accuracy when used to grade an essay from a different prompt. Obtaining a large number of manually graded essays each time a new prompt is introduced is costly and not viable. We propose domain adaptation as a solution to adapt an AES system from an initial prompt to a new prompt. We also propose a novel domain adaptation technique that uses Bayesian linear ridge regression. We evaluate our domain adaptation technique on the publicly available Automated Student Assessment Prize (ASAP) dataset and show that our proposed technique is a competitive default domain adaptation algorithm for the AES task.", "keyphrases": ["domain adaptation", "essay", "cblrr"]} +{"id": "raganato-etal-2017-word", "title": "Word Sense Disambiguation: A Unified Evaluation Framework and Empirical Comparison", "abstract": "Word Sense Disambiguation is a long-standing task in Natural Language Processing, lying at the core of human language understanding. However, the evaluation of automatic systems has been problematic, mainly due to the lack of a reliable evaluation framework. In this paper we develop a unified evaluation framework and analyze the performance of various Word Sense Disambiguation systems in a fair setup. The results show that supervised systems clearly outperform knowledge-based models. Among the supervised systems, a linear classifier trained on conventional local features still proves to be a hard baseline to beat. Nonetheless, recent approaches exploiting neural networks on unlabeled corpora achieve promising results, surpassing this hard baseline in most test sets.", "keyphrases": ["unified evaluation framework", "word sense disambiguation", "wsd", "supervised model"]} +{"id": "ma-hovy-2016-end", "title": "End-to-end Sequence Labeling via Bi-directional LSTM-CNNs-CRF", "abstract": "State-of-the-art sequence labeling systems traditionally require large amounts of task-specific knowledge in the form of hand-crafted features and data pre-processing. In this paper, we introduce a novel neutral network architecture that benefits from both word- and character-level representations automatically, by using combination of bidirectional LSTM, CNN and CRF. Our system is truly end-to-end, requiring no feature engineering or data pre-processing, thus making it applicable to a wide range of sequence labeling tasks. We evaluate our system on two data sets for two sequence labeling tasks --- Penn Treebank WSJ corpus for part-of-speech (POS) tagging and CoNLL 2003 corpus for named entity recognition (NER). We obtain state-of-the-art performance on both the two data --- 97.55\\% accuracy for POS tagging and 91.21\\% F1 for NER.", "keyphrases": ["sequence labeling", "bi-directional lstm-cnns-crf", "hand-crafted feature", "convolutional neural network", "crf layer"]} +{"id": "quirk-etal-2007-generative", "title": "Generative models of noisy translations with applications to parallel fragment extraction", "abstract": "The development of broad domain statistical machine translation systems is gated by the availability of parallel data. A promising strategy for mitigating data scarcity is to mine parallel data from comparable corpora. Although comparable corpora seldom contain parallel sentences, they often contain parallel words or phrases. Recent fragment extraction approaches have shown that including parallel fragments in SMT training data can significantly improve translation quality. We describe efficient and effective generative models for extracting fragments, and demonstrate that these algorithms produce competitive improvements on cross-domain test data without suffering in-domain degradation even at very large scale.", "keyphrases": ["noisy translation", "fragment", "parallel data", "cross-domain test data", "generative model"]} +{"id": "recasens-etal-2010-typology", "title": "A Typology of Near-Identity Relations for Coreference (NIDENT)", "abstract": "The task of coreference resolution requires people or systems to decide when two referring expressions refer to the 'same' entity or event. In real text, this is often a difficult decision because identity is never adequately defined, leading to contradictory treatment of cases in previous work. This paper introduces the concept of 'near-identity', a middle ground category between identity and non-identity, to handle such cases systematically. We present a typology of Near-Identity Relations (NIDENT) that includes fifteen types\u2015grouped under four main families\u2015that capture a wide range of ways in which (near-)coreference relations hold between discourse entities. We validate the theoretical model by annotating a small sample of real data and showing that inter-annotator agreement is high enough for stability (K=0.58, and up to K=0.65 and K=0.84 when leaving out one and two outliers, respectively). This work enables subsequent creation of the first internally consistent language resource of this type through larger annotation efforts.", "keyphrases": ["typology", "near-identity relations", "coreference"]} +{"id": "xin-etal-2020-deebert", "title": "DeeBERT: Dynamic Early Exiting for Accelerating BERT Inference", "abstract": "Large-scale pre-trained language models such as BERT have brought significant improvements to NLP applications. However, they are also notorious for being slow in inference, which makes them difficult to deploy in real-time applications. We propose a simple but effective method, DeeBERT, to accelerate BERT inference. Our approach allows samples to exit earlier without passing through the entire model. Experiments show that DeeBERT is able to save up to ~40% inference time with minimal degradation in model quality. Further analyses show different behaviors in the BERT transformer layers and also reveal their redundancy. Our work provides new ideas to efficiently apply deep transformer-based models to downstream tasks. Code is available at .", "keyphrases": ["exiting", "deebert", "current layer"]} +{"id": "chakravarthi-etal-2018-improving", "title": "Improving Wordnets for Under-Resourced Languages Using Machine Translation", "abstract": "Wordnets are extensively used in natural language processing, but the current approaches for manually building a wordnet from scratch involves large research groups for a long period of time, which are typically not available for under-resourced languages. Even if wordnet-like resources are available for under-resourced languages, they are often not easily accessible, which can alter the results of applications using these resources. Our proposed method presents an expand approach for improving and generating wordnets with the help of machine translation. We apply our methods to improve and extend wordnets for the Dravidian languages, i.e., Tamil, Telugu, Kannada, which are severly under-resourced languages. We report evaluation results of the generated wordnet senses in term of precision for these languages. In addition to that, we carried out a manual evaluation of the translations for the Tamil language, where we demonstrate that our approach can aid in improving wordnet resources for under-resourced Dravidian languages.", "keyphrases": ["machine translation", "dravidian language", "india"]} +{"id": "yang-mitchell-2017-leveraging", "title": "Leveraging Knowledge Bases in LSTMs for Improving Machine Reading", "abstract": "This paper focuses on how to take advantage of external knowledge bases (KBs) to improve recurrent neural networks for machine reading. Traditional methods that exploit knowledge from KBs encode knowledge as discrete indicator features. Not only do these features generalize poorly, but they require task-specific feature engineering to achieve good performance. We propose KBLSTM, a novel neural model that leverages continuous representations of KBs to enhance the learning of recurrent neural networks for machine reading. To effectively integrate background knowledge with information from the currently processed text, our model employs an attention mechanism with a sentinel to adaptively decide whether to attend to background knowledge and which information from KBs is useful. Experimental results show that our model achieves accuracies that surpass the previous state-of-the-art results for both entity extraction and event extraction on the widely used ACE2005 dataset.", "keyphrases": ["machine reading", "entity extraction", "knowledge basis"]} +{"id": "bertoldi-etal-2008-phrase", "title": "Phrase-based statistical machine translation with pivot languages.", "abstract": "Translation with pivot languages has recently gained attention as a means to circumvent the data bottleneck of statistical machine translation (SMT). This paper tries to give a mathematically sound formulation of the various approaches presented in the literature and introduces new methods for training alignment models through pivot languages. We present experimental results on Chinese-Spanish translation via English, on a popular traveling domain task. In contrast to previous literature, we report experimental results by using parallel corpora that are either disjoint or overlapped on the pivot language side. Finally, our original method for generating training data through random sampling shows to perform as well as the best methods based on the coupling of translation systems.", "keyphrases": ["statistical machine translation", "pivot language", "source-target corpus", "bridging", "many researcher"]} +{"id": "ultes-etal-2017-pydial", "title": "PyDial: A Multi-domain Statistical Dialogue System Toolkit", "abstract": "Statistical Spoken Dialogue Systems have been around for many years. However, access to these systems has always been dif-\ufb01cult as there is still no publicly available end-to-end system implementation. To al-leviate this, we present PyDial, an open-source end-to-end statistical spoken dialogue system toolkit which provides implementations of statistical approaches for all dialogue system modules. Moreover, it has been extended to provide multi-domain conversational functionality. It offers easy con\ufb01guration, easy extensibility, and domain-independent implementations of the respective dialogue system modules. The toolkit is available for download under the Apache 2.0 license.", "keyphrases": ["dialogue system toolkit", "statistical approach", "pydial"]} +{"id": "zhao-etal-2017-learning", "title": "Learning Discourse-level Diversity for Neural Dialog Models using Conditional Variational Autoencoders", "abstract": "While recent neural encoder-decoder models have shown great promise in modeling open-domain conversations, they often generate dull and generic responses. Unlike past work that has focused on diversifying the output of the decoder from word-level to alleviate this problem, we present a novel framework based on conditional variational autoencoders that capture the discourse-level diversity in the encoder. Our model uses latent variables to learn a distribution over potential conversational intents and generates diverse responses using only greedy decoders. We have further developed a novel variant that is integrated with linguistic prior knowledge for better performance. Finally, the training procedure is improved through introducing a bag-of-word loss. Our proposed models have been validated to generate significantly more diverse responses than baseline approaches and exhibit competence of discourse-level decision-making.", "keyphrases": ["discourse-level diversity", "conditional variational autoencoders", "dialogue generation", "neural dialogue model"]} +{"id": "saunders-byrne-2020-reducing", "title": "Reducing Gender Bias in Neural Machine Translation as a Domain Adaptation Problem", "abstract": "Training data for NLP tasks often exhibits gender bias in that fewer sentences refer to women than to men. In Neural Machine Translation (NMT) gender bias has been shown to reduce translation quality, particularly when the target language has grammatical gender. The recent WinoMT challenge set allows us to measure this effect directly (Stanovsky et al, 2019) Ideally we would reduce system bias by simply debiasing all data prior to training, but achieving this effectively is itself a challenge. Rather than attempt to create a `balanced' dataset, we use transfer learning on a small set of trusted, gender-balanced examples. This approach gives strong and consistent improvements in gender debiasing with much less computational cost than training from scratch. A known pitfall of transfer learning on new domains is `catastrophic forgetting', which we address at adaptation and inference time. During adaptation we show that Elastic Weight Consolidation allows a performance trade-off between general translation quality and bias reduction. At inference time we propose a lattice-rescoring scheme which outperforms all systems evaluated in Stanovsky et al, 2019 on WinoMT with no degradation of general test set BLEU. We demonstrate our approach translating from English into three languages with varied linguistic properties and data availability.", "keyphrases": ["gender bias", "neural machine translation", "domain adaptation problem"]} +{"id": "specia-etal-2013-quest", "title": "QuEst - A translation quality estimation framework", "abstract": "We describe QUEST, an open source framework for machine translation quality estimation. The framework allows the extraction of several quality indicators from source segments, their translations, external resources (corpora, language models, topic models, etc.), as well as language tools (parsers, part-of-speech tags, etc.). It also provides machine learning algorithms to build quality estimation models. We benchmark the framework on a number of datasets and discuss the efficacy of features and algorithms.", "keyphrases": ["quality estimation", "machine translation", "quest", "access"]} +{"id": "nguyen-etal-2016-j", "title": "J-NERD: Joint Named Entity Recognition and Disambiguation with Rich Linguistic Features", "abstract": "Methods for Named Entity Recognition and Disambiguation (NERD) perform NER and NED in two separate stages. Therefore, NED may be penalized with respect to precision by NER false positives, and suffers in recall from NER false negatives. Conversely, NED does not fully exploit information computed by NER such as types of mentions. This paper presents J-NERD, a new approach to perform NER and NED jointly, by means of a probabilistic graphical model that captures mention spans, mention types, and the mapping of mentions to entities in a knowledge base. We present experiments with different kinds of texts from the CoNLL'03, ACE'05, and ClueWeb'09-FACC1 corpora. J-NERD consistently outperforms state-of-the-art competitors in end-to-end NERD precision, recall, and F1.", "keyphrases": ["disambiguation", "graphical model", "j-nerd"]} +{"id": "yu-li-2014-chinese", "title": "Chinese Spelling Error Detection and Correction Based on Language Model, Pronunciation, and Shape", "abstract": "S pell ing check is an important preprocessing task when dealing with user generated texts such as tweets and product comments. Compared with some western languages such as English, Chinese spelling check is more complex because there is no word delimiter in Chinese written texts and misspelled characters can only be determined in word level. Our system works as follows. First, we use character-level n-gram language model s to detect potential misspelled characters with low probabilities below some predefined threshold. Second, for each potential incorrect character, we generate a candidate set based on pronunciation and shape similarities. Third, we filter some candidate corrections if the candidate cannot form a legal word with its neighbors according to a word dictionary. Finally, we find the best candidate with highest language model probability. If the probability is higher than a predefined threshold, then we replace the original character; or we consider the original character as correct and take no action. Our preliminary experiments shows that our simple method can achieve relatively high precision but low recall.", "keyphrases": ["spelling error", "detection", "correction", "language model", "pronunciation"]} +{"id": "zhang-etal-2016-rationale", "title": "Rationale-Augmented Convolutional Neural Networks for Text Classification", "abstract": "We present a new Convolutional Neural Network (CNN) model for text classification that jointly exploits labels on documents and their constituent sentences. Specifically, we consider scenarios in which annotators explicitly mark sentences (or snippets) that support their overall document categorization, i.e., they provide rationales. Our model exploits such supervision via a hierarchical approach in which each document is represented by a linear combination of the vector representations of its component sentences. We propose a sentence-level convolutional model that estimates the probability that a given sentence is a rationale, and we then scale the contribution of each sentence to the aggregate document representation in proportion to these estimates. Experiments on five classification datasets that have document labels and associated rationales demonstrate that our approach consistently outperforms strong baselines. Moreover, our model naturally provides explanations for its predictions.", "keyphrases": ["convolutional neural networks", "text classification", "rationale", "cnn model"]} +{"id": "faruqui-etal-2016-morphological", "title": "Morphological Inflection Generation Using Character Sequence to Sequence Learning", "abstract": "Morphological inflection generation is the task of generating the inflected form of a given lemma corresponding to a particular linguistic transformation. We model the problem of inflection generation as a character sequence to sequence learning problem and present a variant of the neural encoder-decoder model for solving it. Our model is language independent and can be trained in both supervised and semi-supervised settings. We evaluate our system on seven datasets of morphologically rich languages and achieve either better or comparable results to existing state-of-the-art models of inflection generation.", "keyphrases": ["character sequence", "rich language", "morphological inflection generation", "sequence-to-sequence model", "reinflection"]} +{"id": "wang-etal-2007-jeopardy", "title": "What is the Jeopardy Model? A Quasi-Synchronous Grammar for QA", "abstract": "This paper presents a syntax-driven approach to question answering, specifically the answer-sentence selection problem for short-answer questions. Rather than using syntactic features to augment existing statistical classifiers (as in previous work), we build on the idea that questions and their (correct) answers relate to each other via loose but predictable syntactic transformations. We propose a probabilistic quasi-synchronous grammar, inspired by one proposed for machine translation (D. Smith and Eisner, 2006), and parameterized by mixtures of a robust nonlexical syntax/alignment model with a(n optional) lexical-semantics-driven log-linear model. Our model learns soft alignments as a hidden variable in discriminative training. Experimentalresultsusing theTRECdataset are shown to significantly outperform strong state-of-the-art baselines.", "keyphrases": ["quasi-synchronous grammar", "syntax-driven approach", "dependency tree"]} +{"id": "wang-etal-2021-kepler", "title": "KEPLER: A Unified Model for Knowledge Embedding and Pre-trained Language Representation", "abstract": "Pre-trained language representation models (PLMs) cannot well capture factual knowledge from text. In contrast, knowledge embedding (KE) methods can effectively represent the relational facts in knowledge graphs (KGs) with informative entity embeddings, but conventional KE models cannot take full advantage of the abundant textual information. In this paper, we propose a unified model for Knowledge Embedding and Pre-trained LanguagERepresentation (KEPLER), which can not only better integrate factual knowledge into PLMs but also produce effective text-enhanced KE with the strong PLMs. In KEPLER, we encode textual entity descriptions with a PLM as their embeddings, and then jointly optimize the KE and language modeling objectives. Experimental results show that KEPLER achieves state-of-the-art performances on various NLP tasks, and also works remarkably well as an inductive KE model on KG link prediction. Furthermore, for pre-training and evaluating KEPLER, we construct Wikidata5M1 , a large-scale KG dataset with aligned entity descriptions, and benchmark state-of-the-art KE methods on it. It shall serve as a new KE benchmark and facilitate the research on large KG, inductive KE, and KG with text. The source code can be obtained from .", "keyphrases": ["unified model", "knowledge embedding", "language modeling objective"]} +{"id": "irsoy-cardie-2014-opinion", "title": "Opinion Mining with Deep Recurrent Neural Networks", "abstract": "Recurrent neural networks (RNNs) are connectionist models of sequential data that are naturally applicable to the analysis of natural language. Recently, \u201cdepth in space\u201d \u2014 as an orthogonal notion to \u201cdepth in time\u201d \u2014 in RNNs has been investigated by stacking multiple layers of RNNs and shown empirically to bring a temporal hierarchy to the architecture. In this work we apply these deep RNNs to the task of opinion expression extraction formulated as a token-level sequence-labeling task. Experimental results show that deep, narrow RNNs outperform traditional shallow, wide RNNs with the same number of parameters. Furthermore, our approach outperforms previous CRF-based baselines, including the state-of-the-art semi-Markov CRF model, and does so without access to the powerful opinion lexicons and syntactic features relied upon by the semi-CRF, as well as without the standard layer-by-layer pre-training typically required of RNN architectures.", "keyphrases": ["recurrent neural network", "rnn", "opinion mining"]} +{"id": "barzilay-lee-2004-catching", "title": "Catching the Drift: Probabilistic Content Models, with Applications to Generation and Summarization", "abstract": "We consider the problem of modeling the content structure of texts within a specic domain, in terms of the topics the texts address and the order in which these topics appear. We rst present an effective knowledge-lean method for learning content models from unannotated documents, utilizing a novel adaptation of algorithms for Hidden Markov Models. We then apply our method to two complementary tasks: information ordering and extractive summarization. Our experiments show that incorporating content models in these applications yields substantial improvement over previously-proposed methods.", "keyphrases": ["content model", "summarization", "hidden markov models", "hmm", "re-occurrence"]} +{"id": "straka-strakova-2017-tokenizing", "title": "Tokenizing, POS Tagging, Lemmatizing and Parsing UD 2.0 with UDPipe", "abstract": "Many natural language processing tasks, including the most advanced ones, routinely start by several basic processing steps \u2013 tokenization and segmentation, most likely also POS tagging and lemmatization, and commonly parsing as well. A multilingual pipeline performing these steps can be trained using the Universal Dependencies project, which contains annotations of the described tasks for 50 languages in the latest release UD 2.0. We present an update to UDPipe, a simple-to-use pipeline processing CoNLL-U version 2.0 files, which performs these tasks for multiple languages without requiring additional external data. We provide models for all 50 languages of UD 2.0, and furthermore, the pipeline can be trained easily using data in CoNLL-U format. UDPipe is a standalone application in C++, with bindings available for Python, Java, C# and Perl. In the CoNLL 2017 Shared Task: Multilingual Parsing from Raw Text to Universal Dependencies, UDPipe was the eight best system, while achieving low running times and moderately sized models.", "keyphrases": ["pos tagging", "udpipe", "tokenization"]} +{"id": "gong-etal-2018-information", "title": "Information Aggregation via Dynamic Routing for Sequence Encoding", "abstract": "While much progress has been made in how to encode a text sequence into a sequence of vectors, less attention has been paid to how to aggregate these preceding vectors (outputs of RNN/CNN) into fixed-size encoding vector. Usually, a simple max or average pooling is used, which is a bottom-up and passive way of aggregation and lack of guidance by task information. In this paper, we propose an aggregation mechanism to obtain a fixed-size encoding with a dynamic routing policy. The dynamic routing policy is dynamically deciding that what and how much information need be transferred from each word to the final encoding of the text sequence. Following the work of Capsule Network, we design two dynamic routing policies to aggregate the outputs of RNN/CNN encoding layer into a final encoding vector. Compared to the other aggregation methods, dynamic routing can refine the messages according to the state of final encoding vector. Experimental results on five text classification tasks show that our method outperforms other aggregating models by a significant margin. Related source code is released on our github page. Related source code is released on our github page.", "keyphrases": ["dynamic routing", "sequence encoding", "aggregation mechanism"]} +{"id": "mostafazadeh-etal-2016-corpus", "title": "A Corpus and Cloze Evaluation for Deeper Understanding of Commonsense Stories", "abstract": "Representation and learning of commonsense knowledge is one of the foundational problems in the quest to enable deep language understanding. This issue is particularly challenging for understanding casual and correlational relationships between events. While this topic has received a lot of interest in the NLP community, research has been hindered by the lack of a proper evaluation framework. This paper attempts to address this problem with a new framework for evaluating story understanding and script learning: the `Story Cloze Test\u2019. This test requires a system to choose the correct ending to a four-sentence story. We created a new corpus of 50k five-sentence commonsense stories, ROCStories, to enable this evaluation. This corpus is unique in two ways: (1) it captures a rich set of causal and temporal commonsense relations between daily events, and (2) it is a high quality collection of everyday life stories that can also be used for story generation. Experimental evaluation shows that a host of baselines and state-of-the-art models based on shallow language understanding struggle to achieve a high score on the Story Cloze Test. We discuss these implications for script and story learning, and offer suggestions for deeper language understanding.", "keyphrases": ["story", "commonsense knowledge", "language understanding", "rocstories corpus", "causal relation"]} +{"id": "gao-etal-2005-chinese", "title": "Chinese Word Segmentation and Named Entity Recognition: A Pragmatic Approach", "abstract": "This article presents a pragmatic approach to Chinese word segmentation. It differs from most previous approaches mainly in three respects. First, while theoretical linguists have defined Chinese words using various linguistic criteria, Chinese words in this study are defined pragmatically as segmentation units whose definition depends on how they are used and processed in realistic computer applications. Second, we propose a pragmatic mathematical framework in which segmenting known words and detecting unknown words of different types (i.e., morphologically derived words, factoids, named entities, and other unlisted words) can be performed simultaneously in a unified way. These tasks are usually conducted separately in other systems. Finally, we do not assume the existence of a universal word segmentation standard that is application-independent. Instead, we argue for the necessity of multiple segmentation standards due to the pragmatic fact that different natural language processing applications might require different granularities of Chinese words. These pragmatic approaches have been implemented in an adaptive Chinese word segmenter, called MSRSeg, which will be described in detail. It consists of two components: (1) a generic segmenter that is based on the framework of linear mixture models and provides a unified approach to the five fundamental features of word-level Chinese language processing: lexicon word processing, morphological analysis, factoid detection, named entity recognition, and new word identification; and (2) a set of output adaptors for adapting the output of (1) to different application-specific standards. Evaluation on five test sets with different standards shows that the adaptive system achieves state-of-the-art performance on all the test sets.", "keyphrases": ["word segmentation", "entity recognition", "pragmatic approach", "different type"]} +{"id": "yadav-bethard-2018-survey", "title": "A Survey on Recent Advances in Named Entity Recognition from Deep Learning models", "abstract": "Named Entity Recognition (NER) is a key component in NLP systems for question answering, information retrieval, relation extraction, etc. NER systems have been studied and developed widely for decades, but accurate systems using deep neural networks (NN) have only been introduced in the last few years. We present a comprehensive survey of deep neural network architectures for NER, and contrast them with previous approaches to NER based on feature engineering and other supervised or semi-supervised learning algorithms. Our results highlight the improvements achieved by neural networks, and show how incorporating some of the lessons learned from past work on feature-based NER systems can yield further improvements.", "keyphrases": ["named entity recognition", "deep learning model", "question answering"]} +{"id": "lu-nguyen-2018-similar", "title": "Similar but not the Same: Word Sense Disambiguation Improves Event Detection via Neural Representation Matching", "abstract": "Event detection (ED) and word sense disambiguation (WSD) are two similar tasks in that they both involve identifying the classes (i.e. event types or word senses) of some word in a given sentence. It is thus possible to extract the knowledge hidden in the data for WSD, and utilize it to improve the performance on ED. In this work, we propose a method to transfer the knowledge learned on WSD to ED by matching the neural representations learned for the two tasks. Our experiments on two widely used datasets for ED demonstrate the effectiveness of the proposed method.", "keyphrases": ["word sense disambiguation", "event detection", "neural representation matching"]} +{"id": "mayer-cysouw-2014-creating", "title": "Creating a massively parallel Bible corpus", "abstract": "We present our ongoing effort to create a massively parallel Bible corpus. While an ever-increasing number of Bible translations is available in electronic form on the internet, there is no large-scale parallel Bible corpus that allows language researchers to easily get access to the texts and their parallel structure for a large variety of different languages. We report on the current status of the corpus, with over 900 translations in more than 830 language varieties. All translations are tokenized (e.g., separating punctuation marks) and Unicode normalized. Mainly due to copyright restrictions only portions of the texts are made publicly available. However, we provide co-occurrence information for each translation in a (sparse) matrix format. All word forms in the translation are given together with their frequency and the verses in which they occur.", "keyphrases": ["parallel bible corpus", "word form", "pbc"]} +{"id": "nn-2012-aida", "title": "AIDA: Automatic Identification and Glossing of Dialectal Arabic", "abstract": "Description AIDA is a system for dialect identification, classification and glossing on the token and sentence level for written Arabic. Automatic dialect identification in Arabic is quite challenging because of the diglossic nature of the language and informality associated with the typical genres where dialectal Arabic (DA) is used. Moreover, DA lacks a standard orthography. Additionally the abundance of faux amis between the different varieties of Arabic, namely between Modern Standard Arabic (MSA) and DA, exacerbates the challenge of identifying dialectal variants. Hence identifying whether a (sequence of) token(s) is MSA or DA and providing an MSA-Gloss for the dialectal tokens in an utterance can aid Arabic MT in handling such informal genres more accurately.", "keyphrases": ["glossing", "dialectal arabic", "sentence level"]} +{"id": "ling-etal-2015-finding", "title": "Finding Function in Form: Compositional Character Models for Open Vocabulary Word Representation", "abstract": "We introduce a model for constructing vector representations of words by composing characters using bidirectional LSTMs. Relative to traditional word representation models that have independent vectors for each word type, our model requires only a single vector per character type and a fixed set of parameters for the compositional model. Despite the compactness of this model and, more importantly, the arbitrary nature of the form\u2010function relationship in language, our \u201ccomposed\u201d word representations yield state-of-the-art results in language modeling and part-of-speech tagging. Benefits over traditional baselines are particularly pronounced in morphologically rich languages (e.g., Turkish).", "keyphrases": ["character", "bidirectional lstm", "language modeling", "pos tagging", "bilstm"]} +{"id": "hulden-2009-foma", "title": "Foma: a Finite-State Compiler and Library", "abstract": "Foma is a compiler, programming language, and C library for constructing finite-state automata and transducers for various uses. It has specific support for many natural language processing applications such as producing morphological and phonological analyzers. Foma is largely compatible with the Xerox/PARC finite-state toolkit. It also embraces Unicode fully and supports various different formats for specifying regular expressions: the Xerox/PARC format, a Perl-like format, and a mathematical format that takes advantage of the 'Mathematical Operators' Unicode block.", "keyphrases": ["automata", "transducer", "foma"]} +{"id": "maccartney-manning-2009-extended", "title": "An extended model of natural logic", "abstract": "We propose a model of natural language inference which identifies valid inferences by their lexical and syntactic features, without full semantic interpretation. We extend past work in natural logic, which has focused on semantic containment and monotonicity, by incorporating both semantic exclusion and implicativity. Our model decomposes an inference problem into a sequence of atomic edits linking premise to hypothesis; predicts a lexical semantic relation for each edit; propagates these relations upward through a semantic composition tree according to properties of intermediate nodes; and joins the resulting semantic relations across the edit sequence. A computational implementation of the model achieves 70% accuracy and 89% precision on the FraCaS test suite. Moreover, including this model as a component in an existing system yields significant performance gains on the Recognizing Textual Entailment challenge.", "keyphrases": ["natural logic", "containment", "exclusion", "table", "deduction"]} +{"id": "fomicheva-etal-2021-eval4nlp", "title": "The Eval4NLP Shared Task on Explainable Quality Estimation: Overview and Results", "abstract": "In this paper, we introduce the Eval4NLP-2021 shared task on explainable quality estimation. Given a source-translation pair, this shared task requires not only to provide a sentence-level score indicating the overall quality of the translation, but also to explain this score by identifying the words that negatively impact translation quality. We present the data, annotation guidelines and evaluation setup of the shared task, describe the six participating systems, and analyze the results. To the best of our knowledge, this is the first shared task on explainable NLP evaluation metrics. Datasets and results are available at .", "keyphrases": ["eval4nlp", "explainable quality estimation", "sentence-level score"]} +{"id": "mirkin-etal-2015-motivating", "title": "Motivating Personality-aware Machine Translation", "abstract": "Language use is known to be influenced by personality traits as well as by sociodemographic characteristics such as age or mother tongue. As a result, it is possible to automatically identify these traits of the author from her texts. It has recently been shown that knowledge of such dimensions can improve performance in NLP tasks such as topic and sentiment modeling. We posit that machine translation is another application that should be personalized. In order to motivate this, we explore whether translation preserves demographic and psychometric traits. We show that, largely, both translation of the source training data into the target language, and the target test data into the source language has a detrimental effect on the accuracy of predicting author traits. We argue that this supports the need for personal and personality-aware machine translation models.", "keyphrases": ["machine translation", "personalization", "trait"]} +{"id": "kudo-richardson-2018-sentencepiece", "title": "SentencePiece: A simple and language independent subword tokenizer and detokenizer for Neural Text Processing", "abstract": "This paper describes SentencePiece, a language-independent subword tokenizer and detokenizer designed for Neural-based text processing, including Neural Machine Translation. It provides open-source C++ and Python implementations for subword units. While existing subword segmentation tools assume that the input is pre-tokenized into word sequences, SentencePiece can train subword models directly from raw sentences, which allows us to make a purely end-to-end and language independent system. We perform a validation experiment of NMT on English-Japanese machine translation, and find that it is possible to achieve comparable accuracy to direct subword training from raw sentences. We also compare the performance of subword training and segmentation with various configurations. SentencePiece is available under the Apache 2 license at .", "keyphrases": ["tokenization", "neural text processing", "sentencepiece", "vocabulary size"]} +{"id": "kumar-byrne-2004-minimum", "title": "Minimum Bayes-Risk Decoding for Statistical Machine Translation", "abstract": "Abstract : We present Minimum Bayes-Risk (MBR) decoding for statistical machine translation. This statistical approach aims to minimize expected loss of translation errors under loss functions that measure translation performance. We describe a hierarchy of loss functions that incorporate different levels of linguistic information from word strings, word-to-word alignments from an MT system, and syntactic structure from parse-trees of source and target language sentences. We report the performance of the MBR decoders on a Chinese-to-English translation task. Our results show that MBR decoding can be used to tune statistical MT performance for specific loss functions.", "keyphrases": ["statistical machine translation", "mbr", "minimum bayes-risk", "hypothesis", "system combination"]} +{"id": "habernal-gurevych-2016-argument", "title": "Which argument is more convincing? Analyzing and predicting convincingness of Web arguments using bidirectional LSTM", "abstract": "We propose a new task in the field of computational argumentation in which we investigate qualitative properties of Web arguments, namely their convincingness. We cast the problem as relation classification, where a pair of arguments having the same stance to the same prompt is judged. We annotate a large datasets of 16k pairs of arguments over 32 topics and investigate whether the relation \u201cA is more convincing than B\u201d exhibits properties of total ordering; these findings are used as global constraints for cleaning the crowdsourced data. We propose two tasks: (1) predicting which argument from an argument pair is more convincing and (2) ranking all arguments to the topic based on their convincingness. We experiment with feature-rich SVM and bidirectional LSTM and obtain 0.76-0.78 accuracy and 0.35-0.40 Spearman\u2019s correlation in a cross-topic evaluation. We release the newly created corpus UKPConvArg1 and the experimental software under open licenses.", "keyphrases": ["convincingness", "bidirectional lstm", "annotator", "argument pair", "social medium argument"]} +{"id": "koehn-knight-2003-empirical", "title": "Empirical Methods for Compound Splitting", "abstract": "Compounded words are a challenge for NLP applications such as machine translation (MT). We introduce methods to learn splitting rules from monolingual and parallel corpora. We evaluate them against a gold standard and measure their impact on performance of statistical MT systems. Results show accuracy of 99.1% and performance gains for MT of 0.039 BLEU on a German-English noun phrase translation task.", "keyphrases": ["compound splitting", "parallel corpora", "german", "segmentation", "empirical method"]} +{"id": "agirre-soroa-2009-personalizing", "title": "Personalizing PageRank for Word Sense Disambiguation", "abstract": "In this paper we propose a new graph-based method that uses the knowledge in a LKB (based on WordNet) in order to perform unsupervised Word Sense Disambiguation. Our algorithm uses the full graph of the LKB efficiently, performing better than previous approaches in English all-words datasets. We also show that the algorithm can be easily ported to other languages with good results, with the only requirement of having a wordnet. In addition, we make an analysis of the performance of the algorithm, showing that it is efficient and that it could be tuned to be faster.", "keyphrases": ["pagerank", "word sense disambiguation", "wsd", "knowledge base", "node"]} +{"id": "johnson-2010-pcfgs", "title": "PCFGs, Topic Models, Adaptor Grammars and Learning Topical Collocations and the Structure of Proper Names", "abstract": "This paper establishes a connection between two apparently very different kinds of probabilistic models. Latent Dirichlet Allocation (LDA) models are used as \"topic models\" to produce a low-dimensional representation of documents, while Probabilistic Context-Free Grammars (PCFGs) define distributions over trees. The paper begins by showing that LDA topic models can be viewed as a special kind of PCFG, so Bayesian inference for PCFGs can be used to infer Topic Models as well. Adaptor Grammars (AGs) are a hierarchical, non-parameteric Bayesian extension of PCFGs. Exploiting the close relationship between LDA and PCFGs just described, we propose two novel probabilistic models that combine insights from LDA and AG models. The first replaces the unigram component of LDA topic models with multi-word sequences or collocations generated by an AG. The second extension builds on the first one to learn aspects of the internal structure of proper names.", "keyphrases": ["topic models", "adaptor grammars", "collocation", "proper name"]} +{"id": "xu-etal-2012-paraphrasing", "title": "Paraphrasing for Style", "abstract": "We present initial investigation into the task of paraphrasing language while targeting a particular writing style. The plays of William Shakespeare and their modern translations are used as a testbed for evaluating paraphrase systems targeting a specific style of writing. We show that even with a relatively small amount of parallel training data, it is possible to learn paraphrase models which capture stylistic phenomena, and these models outperform baselines based on dictionaries and out-of-domain parallel text. In addition we present an initial investigation into automatic evaluation metrics for paraphrasing writing style. To the best of our knowledge this is the first work to investigate the task of paraphrasing text with the goal of targeting a specific style of writing.", "keyphrases": ["style", "paraphrasing", "parallel corpus", "shakespearean english", "fluency"]} +{"id": "yasunaga-etal-2021-qa", "title": "QA-GNN: Reasoning with Language Models and Knowledge Graphs for Question Answering", "abstract": "The problem of answering questions using knowledge from pre-trained language models (LMs) and knowledge graphs (KGs) presents two challenges: given a QA context (question and answer choice), methods need to (i) identify relevant knowledge from large KGs, and (ii) perform joint reasoning over the QA context and KG. Here we propose a new model, QA-GNN, which addresses the above challenges through two key innovations: (i) relevance scoring, where we use LMs to estimate the importance of KG nodes relative to the given QA context, and (ii) joint reasoning, where we connect the QA context and KG to form a joint graph, and mutually update their representations through graph-based message passing. We evaluate QA-GNN on the CommonsenseQA and OpenBookQA datasets, and show its improvement over existing LM and LM+KG models, as well as its capability to perform interpretable and structured reasoning, e.g., correctly handling negation in questions.", "keyphrases": ["reasoning", "knowledge graph", "relevance scoring", "qa-gnn"]} +{"id": "di-fabio-etal-2019-verbatlas", "title": "VerbAtlas: a Novel Large-Scale Verbal Semantic Resource and Its Application to Semantic Role Labeling", "abstract": "We present VerbAtlas, a new, hand-crafted lexical-semantic resource whose goal is to bring together all verbal synsets from WordNet into semantically-coherent frames. The frames define a common, prototypical argument structure while at the same time providing new concept-specific information. In contrast to PropBank, which defines enumerative semantic roles, VerbAtlas comes with an explicit, cross-frame set of semantic roles linked to selectional preferences expressed in terms of WordNet synsets, and is the first resource enriched with semantic information about implicit, shadow, and default arguments. We demonstrate the effectiveness of VerbAtlas in the task of dependency-based Semantic Role Labeling and show how its integration into a high-performance system leads to improvements on both the in-domain and out-of-domain test sets of CoNLL-2009. VerbAtlas is available at .", "keyphrases": ["prototypical argument structure", "concept-specific information", "verbatlas"]} +{"id": "ferguson-etal-2015-disfluency", "title": "Disfluency Detection with a Semi-Markov Model and Prosodic Features", "abstract": "We present a discriminative model for detecting disfluencies in spoken language transcripts. Structurally, our model is a semiMarkov conditional random field with features targeting characteristics unique to speech repairs. This gives a significant performance improvement over standard chain-structured CRFs that have been employed in past work. We then incorporate prosodic features over silences and relative word duration into our semi-CRF model, resulting in further performance gains; moreover, these features are not easily replaced by discrete prosodic indicators such as ToBI breaks. Our final system, the semi-CRF with prosodic information, achieves an F-score of 85.4, which is 1.3 F1 better than the best prior reported F-score on this dataset.", "keyphrases": ["prosodic feature", "semi-crf", "disfluency detection"]} +{"id": "maas-etal-2011-learning", "title": "Learning Word Vectors for Sentiment Analysis", "abstract": "Unsupervised vector-based approaches to semantics can model rich lexical meanings, but they largely fail to capture sentiment information that is central to many word meanings and important for a wide range of NLP tasks. We present a model that uses a mix of unsupervised and supervised techniques to learn word vectors capturing semantic term--document information as well as rich sentiment content. The proposed model can leverage both continuous and multi-dimensional sentiment information as well as non-sentiment annotations. We instantiate the model to utilize the document-level sentiment polarity annotations present in many online documents (e.g. star ratings). We evaluate the model using small, widely used sentiment and subjectivity corpora and find it out-performs several previously introduced methods for sentiment classification. We also introduce a large dataset of movie reviews to serve as a more robust benchmark for work in this area.", "keyphrases": ["word vector", "movie review", "probabilistic model", "document model", "product"]} +{"id": "li-etal-2017-dailydialog", "title": "DailyDialog: A Manually Labelled Multi-turn Dialogue Dataset", "abstract": "We develop a high-quality multi-turn dialog dataset, DailyDialog, which is intriguing in several aspects. The language is human-written and less noisy. The dialogues in the dataset reflect our daily communication way and cover various topics about our daily life. We also manually label the developed dataset with communication intention and emotion information. Then, we evaluate existing approaches on DailyDialog dataset and hope it benefit the research field of dialog systems. The dataset is available on ", "keyphrases": ["multi-turn dialog dataset", "dialog system", "dailydialog", "conversation", "learner"]} +{"id": "pagnoni-etal-2021-understanding", "title": "Understanding Factuality in Abstractive Summarization with FRANK: A Benchmark for Factuality Metrics", "abstract": "Modern summarization models generate highly fluent but often factually unreliable outputs. This motivated a surge of metrics attempting to measure the factuality of automatically generated summaries. Due to the lack of common benchmarks, these metrics cannot be compared. Moreover, all these methods treat factuality as a binary concept and fail to provide deeper insights on the kinds of inconsistencies made by different systems. To address these limitations, we devise a typology of factual errors and use it to collect human annotations of generated summaries from state-of-the-art summarization systems for the CNN/DM and XSum datasets. Through these annotations we identify the proportion of different categories of factual errors and benchmark factuality metrics, showing their correlation with human judgement as well as their specific strengths and weaknesses.", "keyphrases": ["summarization", "factuality metric", "consistency"]} +{"id": "wu-etal-2021-applying", "title": "Applying the Transformer to Character-level Transduction", "abstract": "The transformer has been shown to outperform recurrent neural network-based sequence-to-sequence models in various word-level NLP tasks. Yet for character-level transduction tasks, e.g. morphological inflection generation and historical text normalization, there are few works that outperform recurrent models using the transformer. In an empirical study, we uncover that, in contrast to recurrent sequence-to-sequence models, the batch size plays a crucial role in the performance of the transformer on character-level tasks, and we show that with a large enough batch size, the transformer does indeed outperform recurrent models. We also introduce a simple technique to handle feature-guided character-level transduction that further improves performance. With these insights, we achieve state-of-the-art performance on morphological inflection and historical text normalization. We also show that the transformer outperforms a strong baseline on two other character-level transduction tasks: grapheme-to-phoneme conversion and transliteration.", "keyphrases": ["transformer", "character-level transduction", "character-level task"]} +{"id": "malmasi-zampieri-2017-detecting", "title": "Detecting Hate Speech in Social Media", "abstract": "In this paper we examine methods to detect hate speech in social media, while distinguishing this from general profanity. We aim to establish lexical baselines for this task by applying supervised classification methods using a recently released dataset annotated for this purpose. As features, our system uses character n-grams, word n-grams and word skip-grams. We obtain results of 78% accuracy in identifying posts across three classes. Results demonstrate that the main challenge lies in discriminating profanity and hate speech from each other. A number of directions for future work are discussed.", "keyphrases": ["hate speech", "social media", "abusive language workshop", "cyberbullying", "english tweet"]} +{"id": "gliwa-etal-2019-samsum", "title": "SAMSum Corpus: A Human-annotated Dialogue Dataset for Abstractive Summarization", "abstract": "This paper introduces the SAMSum Corpus, a new dataset with abstractive dialogue summaries. We investigate the challenges it poses for automated summarization by testing several models and comparing their results with those obtained on a corpus of news articles. We show that model-generated summaries of dialogues achieve higher ROUGE scores than the model-generated summaries of news \u2013 in contrast with human evaluators' judgement. This suggests that a challenging task of abstractive dialogue summarization requires dedicated models and non-standard quality measures. To our knowledge, our study is the first attempt to introduce a high-quality chat-dialogues corpus, manually annotated with abstractive summarizations, which can be used by the research community for further studies.", "keyphrases": ["abstractive summarization", "samsum corpus", "conversation", "dialogue summarization dataset"]} +{"id": "xu-etal-2020-improving", "title": "Improving AMR Parsing with Sequence-to-Sequence Pre-training", "abstract": "In the literature, the research on abstract meaning representation (AMR) parsing is much restricted by the size of human-curated dataset which is critical to build an AMR parser with good performance. To alleviate such data size restriction, pre-trained models have been drawing more and more attention in AMR parsing. However, previous pre-trained models, like BERT, are implemented for general purpose which may not work as expected for the specific task of AMR parsing. In this paper, we focus on sequence-to-sequence (seq2seq) AMR parsing and propose a seq2seq pre-training approach to build pre-trained models in both single and joint way on three relevant tasks, i.e., machine translation, syntactic parsing, and AMR parsing itself. Moreover, we extend the vanilla fine-tuning method to a multi-task learning fine-tuning method that optimizes for the performance of AMR parsing while endeavors to preserve the response of pre-trained models. Extensive experimental results on two English benchmark datasets show that both the single and joint pre-trained models significantly improve the performance (e.g., from 71.5 to 80.2 on AMR 2.0), which reaches the state of the art. The result is very encouraging since we achieve this with seq2seq models rather than complex models. We make our code and model available at .", "keyphrases": ["amr", "sequence-to-sequence", "machine translation"]} +{"id": "potthast-etal-2018-stylometric", "title": "A Stylometric Inquiry into Hyperpartisan and Fake News", "abstract": "We report on a comparative style analysis of hyperpartisan (extremely one-sided) news and fake news. A corpus of 1,627 articles from 9 political publishers, three each from the mainstream, the hyperpartisan left, and the hyperpartisan right, have been fact-checked by professional journalists at BuzzFeed: 97% of the 299 fake news articles identified are also hyperpartisan. We show how a style analysis can distinguish hyperpartisan news from the mainstream (F1 = 0.78), and satire from both (F1 = 0.81). But stylometry is no silver bullet as style-based fake news detection does not work (F1 = 0.46). We further reveal that left-wing and right-wing news share significantly more stylistic similarities than either does with the mainstream. This result is robust: it has been confirmed by three different modeling approaches, one of which employs Unmasking in a novel way. Applications of our results include partisanship detection and pre-screening for semi-automatic fake news detection.", "keyphrases": ["fake news", "professional journalist", "hyperpartisan news", "propaganda detection", "story"]} +{"id": "yang-etal-2007-building", "title": "Building Emotion Lexicon from Weblog Corpora", "abstract": "An emotion lexicon is an indispensable resource for emotion analysis. This paper aims to mine the relationships between words and emotions using weblog corpora. A collocation model is proposed to learn emotion lexicons from weblog articles. Emotion classification at sentence level is experimented by using the mined lexicons to demonstrate their usefulness.", "keyphrases": ["emotion lexicon", "weblog corpora", "collocation model"]} +{"id": "bapna-firat-2019-simple", "title": "Simple, Scalable Adaptation for Neural Machine Translation", "abstract": "Fine-tuning pre-trained Neural Machine Translation (NMT) models is the dominant approach for adapting to new languages and domains. However, fine-tuning requires adapting and maintaining a separate model for each target task. We propose a simple yet efficient approach for adaptation in NMT. Our proposed approach consists of injecting tiny task specific adapter layers into a pre-trained model. These lightweight adapters, with just a small fraction of the original model size, adapt the model to multiple individual tasks simultaneously. We evaluate our approach on two tasks: (i) Domain Adaptation and (ii) Massively Multilingual NMT. Experiments on domain adaptation demonstrate that our proposed approach is on par with full fine-tuning on various domains, dataset sizes and model capacities. On a massively multilingual dataset of 103 languages, our adaptation approach bridges the gap between individual bilingual models and one massively multilingual model for most language pairs, paving the way towards universal machine translation.", "keyphrases": ["adapter", "neural machine translation", "pre-trained model", "multilingual nmt"]} +{"id": "yang-etal-2018-hotpotqa", "title": "HotpotQA: A Dataset for Diverse, Explainable Multi-hop Question Answering", "abstract": "Existing question answering (QA) datasets fail to train QA systems to perform complex reasoning and provide explanations for answers. We introduce HotpotQA, a new dataset with 113k Wikipedia-based question-answer pairs with four key features: (1) the questions require finding and reasoning over multiple supporting documents to answer; (2) the questions are diverse and not constrained to any pre-existing knowledge bases or knowledge schemas; (3) we provide sentence-level supporting facts required for reasoning, allowing QA systems to reason with strong supervision and explain the predictions; (4) we offer a new type of factoid comparison questions to test QA systems' ability to extract relevant facts and perform necessary comparison. We show that HotpotQA is challenging for the latest QA systems, and the supporting facts enable models to improve performance and make explainable predictions.", "keyphrases": ["multi-hop question", "wikipedia-based question-answer pair", "hotpotqa", "complex question", "wikipedia page"]} +{"id": "ku-etal-2020-room", "title": "Room-Across-Room: Multilingual Vision-and-Language Navigation with Dense Spatiotemporal Grounding", "abstract": "We introduce Room-Across-Room (RxR), a new Vision-and-Language Navigation (VLN) dataset. RxR is multilingual (English, Hindi, and Telugu) and larger (more paths and instructions) than other VLN datasets. It emphasizes the role of language in VLN by addressing known biases in paths and eliciting more references to visible entities. Furthermore, each word in an instruction is time-aligned to the virtual poses of instruction creators and validators. We establish baseline scores for monolingual and multilingual settings and multitask learning when including Room-to-Room annotations (Anderson et al., 2018). We also provide results for a model that learns from synchronized pose traces by focusing only on portions of the panorama attended to in human demonstrations. The size, scope and detail of RxR dramatically expands the frontier for research on embodied language agents in photorealistic simulated environments.", "keyphrases": ["vision-and-language navigation", "environment", "room-across-room"]} +{"id": "strzalkowski-etal-2013-robust", "title": "Robust Extraction of Metaphor from Novel Data", "abstract": "This article describes our novel approach to the automated detection and analysis of metaphors in text. We employ robust, quantitative language processing to implement a system prototype combined with sound social science methods for validation. We show results in 4 different languages and discuss how our methods are a significant step forward from previously established techniques of metaphor identification. We use Topical Structure and Tracking, an Imageability score, and innovative methods to build an effective metaphor identification system that is fully automated and performs well over baseline.", "keyphrases": ["metaphor", "novel data", "robust extraction"]} +{"id": "yang-etal-2018-sgm", "title": "SGM: Sequence Generation Model for Multi-label Classification", "abstract": "Multi-label classification is an important yet challenging task in natural language processing. It is more complex than single-label classification in that the labels tend to be correlated. Existing methods tend to ignore the correlations between labels. Besides, different parts of the text can contribute differently for predicting different labels, which is not considered by existing models. In this paper, we propose to view the multi-label classification task as a sequence generation problem, and apply a sequence generation model with a novel decoder structure to solve it. Extensive experimental results show that our proposed methods outperform previous work by a substantial margin. Further analysis of experimental results demonstrates that the proposed methods not only capture the correlations between labels, but also select the most informative words automatically when predicting different labels.", "keyphrases": ["sequence generation model", "multi-label classification", "sgm"]} +{"id": "arthur-etal-2016-incorporating", "title": "Incorporating Discrete Translation Lexicons into Neural Machine Translation", "abstract": "Neural machine translation (NMT) often makes mistakes in translating low-frequency content words that are essential to understanding the meaning of the sentence. We propose a method to alleviate this problem by augmenting NMT systems with discrete translation lexicons that efficiently encode translations of these low-frequency words. We describe a method to calculate the lexicon probability of the next word in the translation candidate by using the attention vector of the NMT model to select which source word lexical probabilities the model should focus on. We test two methods to combine this probability with the standard NMT probability: (1) using it as a bias, and (2) linear interpolation. Experiments on two corpora show an improvement of 2.0-2.3 BLEU and 0.13-0.44 NIST score, and faster convergence time.", "keyphrases": ["neural machine translation", "probability", "source sentence"]} +{"id": "swanson-etal-2020-rationalizing", "title": "Rationalizing Text Matching: Learning Sparse Alignments via Optimal Transport", "abstract": "Selecting input features of top relevance has become a popular method for building self-explaining models. In this work, we extend this selective rationalization approach to text matching, where the goal is to jointly select and align text pieces, such as tokens or sentences, as a justification for the downstream prediction. Our approach employs optimal transport (OT) to find a minimal cost alignment between the inputs. However, directly applying OT often produces dense and therefore uninterpretable alignments. To overcome this limitation, we introduce novel constrained variants of the OT problem that result in highly sparse alignments with controllable sparsity. Our model is end-to-end differentiable using the Sinkhorn algorithm for OT and can be trained without any alignment annotations. We evaluate our model on the StackExchange, MultiNews, e-SNLI, and MultiRC datasets. Our model achieves very sparse rationale selections with high fidelity while preserving prediction accuracy compared to strong attention baseline models.", "keyphrases": ["sparse alignment", "optimal transport", "rationale"]} +{"id": "yang-etal-2015-wikiqa", "title": "WikiQA: A Challenge Dataset for Open-Domain Question Answering", "abstract": "We describe the WIKIQA dataset, a new publicly available set of question and sentence pairs, collected and annotated for research on open-domain question answering. Most previous work on answer sentence selection focuses on a dataset created using the TREC-QA data, which includes editor-generated questions and candidate answer sentences selected by matching content words in the question. WIKIQA is constructed using a more natural process and is more than an order of magnitude larger than the previous dataset. In addition, the WIKIQA dataset also includes questions for which there are no correct sentences, enabling researchers to work on answer triggering, a critical component in any QA system. We compare several systems on the task of answer sentence selection on both datasets and also describe the performance of a system on the problem of answer triggering using the WIKIQA dataset.", "keyphrases": ["open-domain question answering", "wikiqa", "semantic feature", "paragraph", "state-of-the-art result"]} +{"id": "gulcehre-etal-2016-pointing", "title": "Pointing the Unknown Words", "abstract": "The problem of rare and unknown words is an important issue that can potentially influence the performance of many NLP systems, including both the traditional count-based and the deep learning models. We propose a novel way to deal with the rare and unseen words for the neural network models using attention. Our model uses two softmax layers in order to predict the next word in conditional language models: one predicts the location of a word in the source sentence, and the other predicts a word in the shortlist vocabulary. At each time-step, the decision of which softmax layer to use choose adaptively made by an MLP which is conditioned on the context.~We motivate our work from a psychological evidence that humans naturally have a tendency to point towards objects in the context or the environment when the name of an object is not known.~We observe improvements on two tasks, neural machine translation on the Europarl English to French parallel corpora and text summarization on the Gigaword dataset using our proposed model.", "keyphrases": ["unknown word", "location", "neural machine translation", "copy mechanism", "pointer network"]} +{"id": "tang-etal-2010-cascade", "title": "A Cascade Method for Detecting Hedges and their Scope in Natural Language Text", "abstract": "Detecting hedges and their scope in natural language text is very important for information inference. In this paper, we present a system based on a cascade method for the CoNLL-2010 shared task. The system composes of two components: one for detecting hedges and another one for detecting their scope. For detecting hedges, we build a cascade subsystem. Firstly, a conditional random field (CRF) model and a large margin-based model are trained respectively. Then, we train another CRF model using the result of the first phase. For detecting the scope of hedges, a CRF model is trained according to the result of the first subtask. The experiments show that our system achieves 86.36% F-measure on biological corpus and 55.05% F-measure on Wikipedia corpus for hedge detection, and 49.95% F-measure on biological corpus for hedge scope detection. Among them, 86.36% is the best result on biological corpus for hedge detection.", "keyphrases": ["cascade method", "scope", "natural language text"]} +{"id": "burlot-yvon-2018-using", "title": "Using Monolingual Data in Neural Machine Translation: a Systematic Study", "abstract": "Neural Machine Translation (MT) has radically changed the way systems are developed. A major difference with the previous generation (Phrase-Based MT) is the way monolingual target data, which often abounds, is used in these two paradigms. While Phrase-Based MT can seamlessly integrate very large language models trained on billions of sentences, the best option for Neural MT developers seems to be the generation of artificial parallel data through back-translation - a technique that fails to fully take advantage of existing datasets. In this paper, we conduct a systematic study of back-translation, comparing alternative uses of monolingual data, as well as multiple data generation procedures. Our findings confirm that back-translation is very effective and give new explanations as to why this is the case. We also introduce new data simulation techniques that are almost as effective, yet much cheaper to implement.", "keyphrases": ["monolingual data", "neural machine translation", "backtranslation"]} +{"id": "iyyer-etal-2018-adversarial", "title": "Adversarial Example Generation with Syntactically Controlled Paraphrase Networks", "abstract": "We propose syntactically controlled paraphrase networks (SCPNs) and use them to generate adversarial examples. Given a sentence and a target syntactic form (e.g., a constituency parse), SCPNs are trained to produce a paraphrase of the sentence with the desired syntax. We show it is possible to create training data for this task by first doing backtranslation at a very large scale, and then using a parser to label the syntactic transformations that naturally occur during this process. Such data allows us to train a neural encoder-decoder model with extra inputs to specify the target syntax. A combination of automated and human evaluations show that SCPNs generate paraphrases that follow their target specifications without decreasing paraphrase quality when compared to baseline (uncontrolled) paraphrase systems. Furthermore, they are more capable of generating syntactically adversarial examples that both (1) \u201cfool\u201d pretrained models and (2) improve the robustness of these models to syntactic variation when used to augment their training data.", "keyphrases": ["paraphrase", "syntax", "adversarial example generation", "parallel corpus"]} +{"id": "nguyen-grishman-2015-relation", "title": "Relation Extraction: Perspective from Convolutional Neural Networks", "abstract": "Up to now, relation extraction systems have made extensive use of features generated by linguistic analysis modules. Errors in these features lead to errors of relation detection and classification. In this work, we depart from these traditional approaches with complicated feature engineering by introducing a convolutional neural network for relation extraction that automatically learns features from sentences and minimizes the dependence on external toolkits and resources. Our model takes advantages of multiple window sizes for filters and pre-trained word embeddings as an initializer on a non-static architecture to improve the performance. We emphasize the relation extraction problem with an unbalanced corpus. The experimental results show that our system significantly outperforms not only the best baseline systems for relation extraction but also the state-of-the-art systems for relation classification.", "keyphrases": ["convolutional neural networks", "multiple window size", "relation extraction", "cnn", "research effort"]} +{"id": "xu-etal-2015-semeval", "title": "SemEval-2015 Task 1: Paraphrase and Semantic Similarity in Twitter (PIT)", "abstract": "In this shared task, we present evaluations on two related tasks Paraphrase Identification (PI) and Semantic Textual Similarity (SS) systems for the Twitter data. Given a pair of sentences, participants are asked to produce a binary yes/no judgement or a graded score to measure their semantic equivalence. The task features a newly constructed Twitter Paraphrase Corpus that contains 18,762 sentence pairs. A total of 19 teams participated, submitting 36 runs to the PI task and 26 runs to the SS task. The evaluation shows encouraging results and open challenges for future research. The best systems scored a F1-measure of 0.674 for the PI task and a Pearson correlation of 0.619 for the SS task respectively, comparing to a strong baseline using logistic regression model of 0.589 F1 and 0.511 Pearson; while the best SS systems can often reach >0.80 Pearson on well-formed text. This shared task also provides insights into the relation between the PI and SS tasks and suggests the importance to bringing these two research areas together. We make all the data, baseline systems and evaluation scripts publicly available. 1", "keyphrases": ["paraphrase", "semantic similarity", "twitter"]} +{"id": "zheng-etal-2021-comae", "title": "CoMAE: A Multi-factor Hierarchical Framework for Empathetic Response Generation", "abstract": "The capacity of empathy is crucial to the success of open-domain dialog systems. Due to its nature of multi-dimensionality, there are various factors that relate to empathy expression, such as communication mechanism, dialog act and emotion. However, existing methods for empathetic response generation usually either consider only one empathy factor or ignore the hierarchical relationships between different factors, leading to a weak ability of empathy modeling. In this paper, we propose a multi-factor hierarchical framework, CoMAE, for empathetic response generation, which models the above three key factors of empathy expression in a hierarchical way. We show experimentally that our CoMAE-based model can generate more empathetic responses than previous methods. We also highlight the importance of hierarchical modeling of different factors through both the empirical analysis on a real-life corpus and the extensive experiments. Our codes and used data are available at https://github.com/chujiezheng/CoMAE.", "keyphrases": ["multi-factor hierarchical framework", "empathetic response generation", "emotion", "comae"]} +{"id": "aharoni-goldberg-2017-towards", "title": "Towards String-To-Tree Neural Machine Translation", "abstract": "We present a simple method to incorporate syntactic information about the target language in a neural machine translation system by translating into linearized, lexicalized constituency trees. An experiment on the WMT16 German-English news translation task resulted in an improved BLEU score when compared to a syntax-agnostic NMT baseline trained on the same dataset. An analysis of the translations from the syntax-aware system shows that it performs more reordering during translation in comparison to the baseline. A small-scale human evaluation also showed an advantage to the syntax-aware system.", "keyphrases": ["neural machine translation", "parse tree", "target sentence"]} +{"id": "yu-etal-2015-domain", "title": "Domain Adaptation for Dependency Parsing via Self-Training", "abstract": "This paper presents a successful approach for domain adaptation of a dependency parser via self-training. We improve parsing accuracy for out-of-domain texts with a self-training approach that uses confidence-based methods to select additional training samples. We compare two confidence-based methods: The first method uses the parse score of the employed parser to measure the confidence into a parse tree. The second method calculates the score differences between the best tree and alternative trees. With these methods, we were able to improve the labeled accuracy score by 1.6 percentage points on texts from a chemical domain and by 0.6 on average on texts of three web domains. Our improvements on the chemical texts of 1.5% UAS is substantially higher than improvements reported in previous work of 0.5% UAS. For the three web domains, no positive results for self-training have been reported before.", "keyphrases": ["self-training", "out-of-domain text", "domain adaptation"]} +{"id": "chan-roth-2010-exploiting", "title": "Exploiting Background Knowledge for Relation Extraction", "abstract": "Relation extraction is the task of recognizing semantic relations among entities. Given a particular sentence supervised approaches to Relation Extraction employed feature or kernel functions which usually have a single sentence in their scope. The overall aim of this paper is to propose methods for using knowledge and resources that are external to the target sentence, as a way to improve relation extraction. We demonstrate this by exploiting background knowledge such as relationships among the target relations, as well as by considering how target relations relate to some existing knowledge resources. Our methods are general and we suggest that some of them could be applied to other NLP tasks.", "keyphrases": ["background knowledge", "relation extraction", "target sentence", "cluster"]} +{"id": "carpuat-wu-2007-phrase", "title": "How phrase sense disambiguation outperforms word sense disambiguation for statistical machine translation", "abstract": "We present comparative empirical evidence arguing that a generalized phrase sense disambiguation approach better improves statistical machine translation than ordinary word sense disambiguation, along with a data analysis suggesting the reasons for this. Standalone word sense disambiguation, as exemplified by the Senseval series of evaluations, typically defines the target of disambiguation as a single word. But in order to be useful in statistical machine translation, our studies indicate that word sense disambiguation should be redefined to move beyond the particular case of single word targets, and instead to generalize to multi-word phrase targets. We investigate how and why the phrase sense disambiguation approach\u2014in contrast to recent efforts to apply traditional word sense disambiguation to SMT\u2014is able to yield statistically significant yimprovements in translation quality even under large data conditions, and consistently improve SMT across both IWSLT and NIST Chinese-English text translation tasks. We discuss architectural issues raised by this change of perspective, and consider the new model architecture necessitated by the phrase sense disambiguation approach. This material is based upon work supported in part by", "keyphrases": ["sense disambiguation", "machine translation", "wsd", "smt system"]} +{"id": "dhingra-etal-2017-gated", "title": "Gated-Attention Readers for Text Comprehension", "abstract": "In this paper we study the problem of answering cloze-style questions over documents. Our model, the Gated-Attention (GA) Reader, integrates a multi-hop architecture with a novel attention mechanism, which is based on multiplicative interactions between the query embedding and the intermediate states of a recurrent neural network document reader. This enables the reader to build query-specific representations of tokens in the document for accurate answer selection. The GA Reader obtains state-of-the-art results on three benchmarks for this task\u2013the CNN & Daily Mail news stories and the Who Did What dataset. The effectiveness of multiplicative interaction is demonstrated by an ablation study, and by comparing to alternative compositional operators for implementing the gated-attention.", "keyphrases": ["reader", "multiplicative interaction", "cnn", "gated-attention", "reading comprehension"]} +{"id": "alberti-etal-2019-synthetic", "title": "Synthetic QA Corpora Generation with Roundtrip Consistency", "abstract": "We introduce a novel method of generating synthetic question answering corpora by combining models of question generation and answer extraction, and by filtering the results to ensure roundtrip consistency. By pretraining on the resulting corpora we obtain significant improvements on SQuAD2 and NQ, establishing a new state-of-the-art on the latter. Our synthetic data generation models, for both question generation and answer extraction, can be fully reproduced by finetuning a publicly available BERT model on the extractive subsets of SQuAD2 and NQ. We also describe a more powerful variant that does full sequence-to-sequence pretraining for question generation, obtaining exact match and F1 at less than 0.1% and 0.4% from human performance on SQuAD2.", "keyphrases": ["roundtrip consistency", "question generation", "bert", "data augmentation", "language model"]} +{"id": "yan-etal-2009-unsupervised", "title": "Unsupervised Relation Extraction by Mining Wikipedia Texts Using Information from the Web", "abstract": "This paper presents an unsupervised relation extraction method for discovering and enhancing relations in which a specified concept in Wikipedia participates. Using respective characteristics of Wikipedia articles and Web corpus, we develop a clustering approach based on combinations of patterns: dependency patterns from dependency analysis of texts in Wikipedia, and surface patterns generated from highly redundant information related to the Web. Evaluations of the proposed approach on two different domains demonstrate the superiority of the pattern combination over existing approaches. Fundamentally, our method demonstrates how deep linguistic patterns contribute complementarily with Web surface patterns to the generation of various relations.", "keyphrases": ["wikipedia", "web", "unsupervised relation extraction"]} +{"id": "nivre-etal-2017-universal", "title": "Universal Dependencies", "abstract": "Universal Dependencies (UD) is a project that seeks to develop cross-linguistically consistent treebank annotation for many languages. This tutorial gives an introduction to the UD framework and resources, from basic design principles to annotation guidelines and existing treebanks. We also discuss tools for developing and exploiting UD treebanks and survey applications of UD in NLP and linguistics.", "keyphrases": ["project", "treebank", "universal dependencies"]} +{"id": "zou-etal-2013-bilingual", "title": "Bilingual Word Embeddings for Phrase-Based Machine Translation", "abstract": "We introduce bilingual word embeddings: semantic embeddings associated across two languages in the context of neural language models. We propose a method to learn bilingual embeddings from a large unlabeled corpus, while utilizing MT word alignments to constrain translational equivalence. The new embeddings significantly out-perform baselines in word semantic similarity. A single semantic similarity feature induced with bilingual embeddings adds near half a BLEU point to the results of NIST08 Chinese-English machine translation task.", "keyphrases": ["machine translation", "semantic similarity", "sentiment analysis"]} +{"id": "gedigian-etal-2006-catching", "title": "Catching Metaphors", "abstract": "Metaphors are ubiquitous in language and developing methods to identify and deal with metaphors is an open problem in Natural Language Processing (NLP). In this paper we describe results from using a maximum entropy (ME) classifier to identify metaphors. Using the Wall Street Journal (WSJ) corpus, we annotated all the verbal targets associated with a set of frames which includes frames of spatial motion, manipulation, and health. One surprising finding was that over 90% of annotated targets from these frames are used metaphorically, underscoring the importance of processing figurative language. We then used this labeled data and each verbal target's PropBank annotation to train a maximum entropy classifier to make this literal vs. metaphoric distinction. Using the classifier, we reduce the final error in the test set by 5% over the verb-specific majority class baseline and 31% over the corpus-wide majority class baseline.", "keyphrases": ["metaphor", "wall street journal", "propbank annotation", "entropy classifier", "framenet"]} +{"id": "he-etal-2018-dureader", "title": "DuReader: a Chinese Machine Reading Comprehension Dataset from Real-world Applications", "abstract": "This paper introduces DuReader, a new large-scale, open-domain Chinese machine reading comprehension (MRC) dataset, designed to address real-world MRC. DuReader has three advantages over previous MRC datasets: (1) data sources: questions and documents are based on Baidu Search and Baidu Zhidao; answers are manually generated. (2) question types: it provides rich annotations for more question types, especially yes-no and opinion questions, that leaves more opportunity for the research community. (3) scale: it contains 200K questions, 420K answers and 1M documents; it is the largest Chinese MRC dataset so far. Experiments show that human performance is well above current state-of-the-art baseline systems, leaving plenty of room for the community to make improvements. To help the community make these improvements, both DuReader and baseline systems have been posted online. We also organize a shared competition to encourage the exploration of more models. Since the release of the task, there are significant improvements over the baselines.", "keyphrases": ["chinese", "reading comprehension", "dureader", "new language"]} +{"id": "ghosal-etal-2018-contextual", "title": "Contextual Inter-modal Attention for Multi-modal Sentiment Analysis", "abstract": "Multi-modal sentiment analysis offers various challenges, one being the effective combination of different input modalities, namely text, visual and acoustic. In this paper, we propose a recurrent neural network based multi-modal attention framework that leverages the contextual information for utterance-level sentiment prediction. The proposed approach applies attention on multi-modal multi-utterance representations and tries to learn the contributing features amongst them. We evaluate our proposed approach on two multi-modal sentiment analysis benchmark datasets, viz. CMU Multi-modal Opinion-level Sentiment Intensity (CMU-MOSI) corpus and the recently released CMU Multi-modal Opinion Sentiment and Emotion Intensity (CMU-MOSEI) corpus. Evaluation results show the effectiveness of our proposed approach with the accuracies of 82.31% and 79.80% for the MOSI and MOSEI datasets, respectively. These are approximately 2 and 1 points performance improvement over the state-of-the-art models for the datasets.", "keyphrases": ["multi-modal sentiment analysis", "contextual information", "contextual inter-modal attention"]} +{"id": "ohta-etal-2011-overview", "title": "Overview of the Epigenetics and Post-translational Modifications (EPI) task of BioNLP Shared Task 2011", "abstract": "This paper presents the preparation, resources, results and analysis of the Epigenetics and Post-translational Modifications (EPI) task, a main task of the BioNLP Shared Task 2011. The task concerns the extraction of detailed representations of 14 protein and DNA modification events, the catalysis of these reactions, and the identification of instances of negated or speculatively stated event instances. Seven teams submitted final results to the EPI task in the shared task, with the highest-performing system achieving 53% F-score in the full task and 69% F-score in the extraction of a simplified set of core event arguments.", "keyphrases": ["epigenetics", "post-translational modifications", "protein"]} +{"id": "rust-etal-2021-good", "title": "How Good is Your Tokenizer? On the Monolingual Performance of Multilingual Language Models", "abstract": "In this work, we provide a systematic and comprehensive empirical comparison of pretrained multilingual language models versus their monolingual counterparts with regard to their monolingual task performance. We study a set of nine typologically diverse languages with readily available pretrained monolingual models on a set of five diverse monolingual downstream tasks. We first aim to establish, via fair and controlled comparisons, if a gap between the multilingual and the corresponding monolingual representation of that language exists, and subsequently investigate the reason for any performance difference. To disentangle conflating factors, we train new monolingual models on the same data, with monolingually and multilingually trained tokenizers. We find that while the pretraining data size is an important factor, a designated monolingual tokenizer plays an equally important role in the downstream performance. Our results show that languages that are adequately represented in the multilingual model's vocabulary exhibit negligible performance decreases over their monolingual counterparts. We further find that replacing the original multilingual tokenizer with the specialized monolingual tokenizer improves the downstream performance of the multilingual model for almost every task and language.", "keyphrases": ["tokenizer", "counterpart", "multilingual model"]} +{"id": "mager-etal-2021-findings", "title": "Findings of the AmericasNLP 2021 Shared Task on Open Machine Translation for Indigenous Languages of the Americas", "abstract": "This paper presents the results of the 2021 Shared Task on Open Machine Translation for Indigenous Languages of the Americas. The shared task featured two independent tracks, and participants submitted machine translation systems for up to 10 indigenous languages. Overall, 8 teams participated with a total of 214 submissions. We provided training sets consisting of data collected from various sources, as well as manually translated sentences for the development and test sets. An official baseline trained on this data was also provided. Team submissions featured a variety of architectures, including both statistical and neural models, and for the majority of languages, many teams were able to considerably improve over the baseline. The best performing systems achieved 12.97 ChrF higher than baseline, when averaged across languages.", "keyphrases": ["shared task", "machine translation", "indigenous languages"]} +{"id": "xu-etal-2016-optimizing", "title": "Optimizing Statistical Machine Translation for Text Simplification", "abstract": "Most recent sentence simplification systems use basic machine translation models to learn lexical and syntactic paraphrases from a manually simplified parallel corpus. These methods are limited by the quality and quantity of manually simplified corpora, which are expensive to build. In this paper, we conduct an in-depth adaptation of statistical machine translation to perform text simplification, taking advantage of large-scale paraphrases learned from bilingual texts and a small amount of manual simplifications with multiple references. Our work is the first to design automatic metrics that are effective for tuning and evaluating simplification systems, which will facilitate iterative development for this task.", "keyphrases": ["text simplification", "automatic metric", "sari", "complex sentence", "fkgl"]} +{"id": "meng-etal-2012-cross", "title": "Cross-Lingual Mixture Model for Sentiment Classification", "abstract": "The amount of labeled sentiment data in English is much larger than that in other languages. Such a disproportion arouse interest in cross-lingual sentiment classification, which aims to conduct sentiment classification in the target language (e.g. Chinese) using labeled data in the source language (e.g. English). Most existing work relies on machine translation engines to directly adapt labeled data from the source language to the target language. This approach suffers from the limited coverage of vocabulary in the machine translation results. In this paper, we propose a generative cross-lingual mixture model (CLMM) to leverage unlabeled bilingual parallel data. By fitting parameters to maximize the likelihood of the bilingual parallel data, the proposed model learns previously unseen sentiment words from the large bilingual parallel data and improves vocabulary coverage significantly. Experiments on multiple data sets show that CLMM is consistently effective in two settings: (1) labeled data in the target language are unavailable; and (2) labeled data in the target language are also available.", "keyphrases": ["sentiment classification", "source language", "vocabulary", "bilingual parallel data", "cross-lingual mixture model"]} +{"id": "specia-etal-2018-findings", "title": "Findings of the WMT 2018 Shared Task on Quality Estimation", "abstract": "We report the results of the WMT18 shared task on Quality Estimation, i.e. the task of predicting the quality of the output of machine translation systems at various granularity levels: word, phrase, sentence and document. This year we include four language pairs, three text domains, and translations produced by both statistical and neural machine translation systems. Participating teams from ten institutions submitted a variety of systems to different task variants and language pairs.", "keyphrases": ["wmt", "quality estimation", "team", "translation quality", "sentence-level"]} +{"id": "lawrence-reed-2019-argument", "title": "Argument Mining: A Survey", "abstract": "Argument mining is the automatic identification and extraction of the structure of inference and reasoning expressed as arguments presented in natural language. Understanding argumentative structure makes it possible to determine not only what positions people are adopting, but also why they hold the opinions they do, providing valuable insights in domains as diverse as financial market prediction and public relations. This survey explores the techniques that establish the foundations for argument mining, provides a review of recent advances in argument mining techniques, and discusses the challenges faced in automatically extracting a deeper understanding of reasoning expressed in language in general.", "keyphrases": ["survey", "argument mining", "claim", "unstructured text", "student feedback"]} +{"id": "alzantot-etal-2018-generating", "title": "Generating Natural Language Adversarial Examples", "abstract": "Deep neural networks (DNNs) are vulnerable to adversarial examples, perturbations to correctly classified examples which can cause the model to misclassify. In the image domain, these perturbations can often be made virtually indistinguishable to human perception, causing humans and state-of-the-art models to disagree. However, in the natural language domain, small perturbations are clearly perceptible, and the replacement of a single word can drastically alter the semantics of the document. Given these challenges, we use a black-box population-based optimization algorithm to generate semantically and syntactically similar adversarial examples that fool well-trained sentiment analysis and textual entailment models with success rates of 97% and 70%, respectively. We additionally demonstrate that 92.3% of the successful sentiment analysis adversarial examples are classified to their original label by 20 human annotators, and that the examples are perceptibly quite similar. Finally, we discuss an attempt to use adversarial training as a defense, but fail to yield improvement, demonstrating the strength and diversity of our adversarial examples. We hope our findings encourage researchers to pursue improving the robustness of DNNs in the natural language domain.", "keyphrases": ["adversarial example", "perturbation", "sentiment analysis", "sample", "input sentence"]} +{"id": "daume-iii-2007-frustratingly", "title": "Frustratingly Easy Domain Adaptation", "abstract": "We describe an approach to domain adaptation that is appropriate exactly in the case when one has enough \u201ctarget\u201d data to do slightly better than just using only \u201csource\u201d data. Our approach is incredibly simple, easy to implement as a preprocessing step (10 lines of Perl!) and outperforms stateof-the-art approaches on a range of datasets. Moreover, it is trivially extended to a multidomain adaptation problem, where one has data from a variety of different domains.", "keyphrases": ["domain adaptation", "feature augmentation", "access", "model parameter", "small amount"]} +{"id": "zhu-hovy-2007-active", "title": "Active Learning for Word Sense Disambiguation with Methods for Addressing the Class Imbalance Problem", "abstract": "In this paper, we analyze the effect of resampling techniques, including undersampling and over-sampling used in active learning for word sense disambiguation (WSD). Experimental results show that under-sampling causes negative effects on active learning, but over-sampling is a relatively good choice. To alleviate the withinclass imbalance problem of over-sampling, we propose a bootstrap-based oversampling (BootOS) method that works better than ordinary over-sampling in active learning for WSD. Finally, we investigate when to stop active learning, and adopt two strategies, max-confidence and min-error, as stopping conditions for active learning. According to experimental results, we suggest a prediction solution by considering max-confidence as the upper bound and min-error as the lower bound for stopping conditions.", "keyphrases": ["word sense disambiguation", "class imbalance problem", "wsd", "active learning"]} +{"id": "arivazhagan-etal-2019-monotonic", "title": "Monotonic Infinite Lookback Attention for Simultaneous Machine Translation", "abstract": "Simultaneous machine translation begins to translate each source sentence before the source speaker is finished speaking, with applications to live and streaming scenarios. Simultaneous systems must carefully schedule their reading of the source sentence to balance quality against latency. We present the first simultaneous translation system to learn an adaptive schedule jointly with a neural machine translation (NMT) model that attends over all source tokens read thus far. We do so by introducing Monotonic Infinite Lookback (MILk) attention, which maintains both a hard, monotonic attention head to schedule the reading of the source sentence, and a soft attention head that extends from the monotonic head back to the beginning of the source. We show that MILk's adaptive schedule allows it to arrive at latency-quality trade-offs that are favorable to those of a recently proposed wait-k strategy for many latency values.", "keyphrases": ["simultaneous machine translation", "latency", "policy", "bernoulli variable", "live broadcast"]} +{"id": "zhang-etal-2018-speeding", "title": "Speeding Up Neural Machine Translation Decoding by Cube Pruning", "abstract": "Although neural machine translation has achieved promising results, it suffers from slow translation speed. The direct consequence is that a trade-off has to be made between translation quality and speed, thus its performance can not come into full play. We apply cube pruning, a popular technique to speed up dynamic programming, into neural machine translation to speed up the translation. To construct the equivalence class, similar target hidden states are combined, leading to less RNN expansion operations on the target side and less softmax operations over the large target vocabulary. The experiments show that, at the same or even better translation quality, our method can translate faster compared with naive beam search by 3.3x on GPUs and 3.5x on CPUs.", "keyphrases": ["neural machine translation", "cube pruning", "translation quality"]} +{"id": "gu-etal-2022-ppt", "title": "PPT: Pre-trained Prompt Tuning for Few-shot Learning", "abstract": "Prompts for pre-trained language models (PLMs) have shown remarkable performance by bridging the gap between pre-training tasks and various downstream tasks. Among these methods, prompt tuning, which freezes PLMs and only tunes soft prompts, provides an efficient and effective solution for adapting large-scale PLMs to downstream tasks. However, prompt tuning is yet to be fully explored. In our pilot experiments, we find that prompt tuning performs comparably with conventional full-model tuning when downstream data are sufficient, whereas it is much worse under few-shot learning settings, which may hinder the application of prompt tuning. We attribute this low performance to the manner of initializing soft prompts. Therefore, in this work, we propose to pre-train prompts by adding soft prompts into the pre-training stage to obtain a better initialization. We name this Pre-trained Prompt Tuning framework \u201cPPT\u201d. To ensure the generalization of PPT, we formulate similar classification tasks into a unified task form and pre-train soft prompts for this unified task. Extensive experiments show that tuning pre-trained prompts for downstream tasks can reach or even outperform full-model fine-tuning under both full-data and few-shot settings. Our approach is effective and efficient for using large-scale PLMs in practice.", "keyphrases": ["prompt", "few-shot learning", "low performance", "ppt"]} +{"id": "takamura-etal-2005-extracting", "title": "Extracting Semantic Orientations of Words using Spin Model", "abstract": "We propose a method for extracting semantic orientations of words: desirable or undesirable. Regarding semantic orientations as spins of electrons, we use the mean field approximation to compute the approximate probability function of the system instead of the intractable actual probability function. We also propose a criterion for parameter selection on the basis of magnetization. Given only a small number of seed words, the proposed method extracts semantic orientations with high accuracy in the experiments on English lexicon. The result is comparable to the best value ever reported.", "keyphrases": ["semantic orientation", "spin model", "electron", "seed word", "sentiment analysis"]} +{"id": "gao-etal-2021-simcse", "title": "SimCSE: Simple Contrastive Learning of Sentence Embeddings", "abstract": "This paper presents SimCSE, a simple contrastive learning framework that greatly advances the state-of-the-art sentence embeddings. We first describe an unsupervised approach, which takes an input sentence and predicts itself in a contrastive objective, with only standard dropout used as noise. This simple method works surprisingly well, performing on par with previous supervised counterparts. We find that dropout acts as minimal data augmentation and removing it leads to a representation collapse. Then, we propose a supervised approach, which incorporates annotated pairs from natural language inference datasets into our contrastive learning framework, by using \u201centailment\u201d pairs as positives and \u201ccontradiction\u201d pairs as hard negatives. We evaluate SimCSE on standard semantic textual similarity (STS) tasks, and our unsupervised and supervised models using BERT base achieve an average of 76.3% and 81.6% Spearman's correlation respectively, a 4.2% and 2.2% improvement compared to previous best results. We also show\u2014both theoretically and empirically\u2014that contrastive learning objective regularizes pre-trained embeddings' anisotropic space to be more uniform, and it better aligns positive pairs when supervised signals are available.", "keyphrases": ["simple contrastive learning", "sentence embeddings", "language inference dataset", "textual similarity", "simcse"]} +{"id": "sun-etal-2019-dream", "title": "DREAM: A Challenge Data Set and Models for Dialogue-Based Reading Comprehension", "abstract": "We present DREAM, the first dialogue-based multiple-choice reading comprehension data set. Collected from English as a Foreign Language examinations designed by human experts to evaluate the comprehension level of Chinese learners of English, our data set contains 10,197 multiple-choice questions for 6,444 dialogues. In contrast to existing reading comprehension data sets, DREAM is the first to focus on in-depth multi-turn multi-party dialogue understanding. DREAM is likely to present significant challenges for existing reading comprehension systems: 84% of answers are non-extractive, 85% of questions require reasoning beyond a single sentence, and 34% of questions also involve commonsense knowledge. We apply several popular neural reading comprehension models that primarily exploit surface information within the text and find them to, at best, just barely outperform a rule-based approach. We next investigate the effects of incorporating dialogue structure and different kinds of general world knowledge into both rule-based and (neural and non-neural) machine learning-based reading comprehension models. Experimental results on the DREAM data set show the effectiveness of dialogue structure and general world knowledge. DREAM is available at .", "keyphrases": ["reading comprehension", "dialogue understanding", "dream", "passage"]} +{"id": "mishra-etal-2018-author", "title": "Author Profiling for Abuse Detection", "abstract": "The rapid growth of social media in recent years has fed into some highly undesirable phenomena such as proliferation of hateful and offensive language on the Internet. Previous research suggests that such abusive content tends to come from users who share a set of common stereotypes and form communities around them. The current state-of-the-art approaches to abuse detection are oblivious to user and community information and rely entirely on textual (i.e., lexical and semantic) cues. In this paper, we propose a novel approach to this problem that incorporates community-based profiling features of Twitter users. Experimenting with a dataset of 16k tweets, we show that our methods significantly outperform the current state of the art in abuse detection. Further, we conduct a qualitative analysis of model characteristics. We release our code, pre-trained models and all the resources used in the public domain.", "keyphrases": ["abuse detection", "community-based profiling feature", "author profiling"]} +{"id": "kamocki-witt-2020-privacy", "title": "Privacy by Design and Language Resources", "abstract": "Privacy by Design (also referred to as Data Protection by Design) is an approach in which solutions and mechanisms addressing privacy and data protection are embedded through the entire project lifecycle, from the early design stage, rather than just added as an additional lawyer to the final product. Formulated in the 1990 by the Privacy Commissionner of Ontario, the principle of Privacy by Design has been discussed by institutions and policymakers on both sides of the Atlantic, and mentioned already in the 1995 EU Data Protection Directive (95/46/EC). More recently, Privacy by Design was introduced as one of the requirements of the General Data Protection Regulation (GDPR), obliging data controllers to define and adopt, already at the conception phase, appropriate measures and safeguards to implement data protection principles and protect the rights of the data subject. Failing to meet this obligation may result in a hefty fine, as it was the case in the Uniontrad decision by the French Data Protection Authority (CNIL). The ambition of the proposed paper is to analyse the practical meaning of Privacy by Design in the context of Language Resources, and propose measures and safeguards that can be implemented by the community to ensure respect of this principle.", "keyphrases": ["design", "language resources", "privacy"]} +{"id": "rogers-etal-2020-primer", "title": "A Primer in BERTology: What We Know About How BERT Works", "abstract": "Transformer-based models have pushed state of the art in many areas of NLP, but our understanding of what is behind their success is still limited. This paper is the first survey of over 150 studies of the popular BERT model. We review the current state of knowledge about how BERT works, what kind of information it learns and how it is represented, common modifications to its training objectives and architecture, the overparameterization issue, and approaches to compression. We then outline directions for future research.", "keyphrases": ["bertology", "many study", "interpretation work"]} +{"id": "nallapati-etal-2016-abstractive", "title": "Abstractive Text Summarization using Sequence-to-sequence RNNs and Beyond", "abstract": "In this work, we model abstractive text summarization using Attentional Encoder-Decoder Recurrent Neural Networks, and show that they achieve state-of-the-art performance on two different corpora. We propose several novel models that address critical problems in summarization that are not adequately modeled by the basic architecture, such as modeling key-words, capturing the hierarchy of sentence-to-word structure, and emitting words that are rare or unseen at training time. Our work shows that many of our proposed models contribute to further improvement in performance. We also propose a new dataset consisting of multi-sentence summaries, and establish performance benchmarks for further research.", "keyphrases": ["sequence-to-sequence", "rnn", "abstractive text summarization", "language model", "hierarchical attention"]} +{"id": "wittenburg-etal-2006-elan", "title": "ELAN: a Professional Framework for Multimodality Research", "abstract": "Utilization of computer tools in linguistic research has gained importance with the maturation of media frameworks for the handling of digital audio and video. The increased use of these tools in gesture, sign language and multimodal interaction studies has led to stronger requirements on the flexibility, the efficiency and in particular the time accuracy of annotation tools. This paper describes the efforts made to make ELAN a tool that meets these requirements, with special attention to the developments in the area of time accuracy. In subsequent sections an overview will be given of other enhancements in the latest versions of ELAN that makes it a useful tool in multimodality research.", "keyphrases": ["multimodality research", "audio", "elan"]} +{"id": "liu-etal-2021-visually", "title": "Visually Grounded Reasoning across Languages and Cultures", "abstract": "The design of widespread vision-and-language datasets and pre-trained encoders directly adopts, or draws inspiration from, the concepts and images of ImageNet. While one can hardly overestimate how much this benchmark contributed to progress in computer vision, it is mostly derived from lexical databases and image queries in English, resulting in source material with a North American or Western European bias. Therefore, we devise a new protocol to construct an ImageNet-style hierarchy representative of more languages and cultures. In particular, we let the selection of both concepts and images be entirely driven by native speakers, rather than scraping them automatically. Specifically, we focus on a typologically diverse set of languages, namely, Indonesian, Mandarin Chinese, Swahili, Tamil, and Turkish. On top of the concepts and images obtained through this new protocol, we create a multilingual dataset for Multicultural Reasoning over Vision and Language (MaRVL) by eliciting statements from native speaker annotators about pairs of images. The task consists of discriminating whether each grounded statement is true or false. We establish a series of baselines using state-of-the-art models and find that their cross-lingual transfer performance lags dramatically behind supervised performance in English. These results invite us to reassess the robustness and accuracy of current state-of-the-art models beyond a narrow domain, but also open up new exciting challenges for the development of truly multilingual and multicultural systems.", "keyphrases": ["reasoning", "culture", "visual question"]} +{"id": "eisenschlos-etal-2020-understanding", "title": "Understanding tables with intermediate pre-training", "abstract": "Table entailment, the binary classification task of finding if a sentence is supported or refuted by the content of a table, requires parsing language and table structure as well as numerical and discrete reasoning. While there is extensive work on textual entailment, table entailment is less well studied. We adapt TAPAS (Herzig et al., 2020), a table-based BERT model, to recognize entailment. Motivated by the benefits of data augmentation, we create a balanced dataset of millions of automatically created training examples which are learned in an intermediate step prior to fine-tuning. This new data is not only useful for table entailment, but also for SQA (Iyyer et al., 2017), a sequential table QA task. To be able to use long examples as input of BERT models, we evaluate table pruning techniques as a pre-processing step to drastically improve the training and prediction efficiency at a moderate drop in accuracy. The different methods set the new state-of-the-art on the TabFact (Chen et al., 2020) and SQA datasets.", "keyphrases": ["table", "pre-training", "table-based fact verification"]} +{"id": "kong-etal-2010-dependency", "title": "Dependency-driven Anaphoricity Determination for Coreference Resolution", "abstract": "This paper proposes a dependency-driven scheme to dynamically determine the syntactic parse tree structure for tree kernel-based anaphoricity determination in coreference resolution. Given a full syntactic parse tree, it keeps the nodes and the paths related with current mention based on constituent dependencies from both syntactic and semantic perspectives, while removing the noisy information, eventually leading to a dependency-driven dynamic syntactic parse tree (D-DSPT). Evaluation on the ACE 2003 corpus shows that the D-DSPT outperforms all previous parse tree structures on anaphoricity determination, and that applying our anaphoricity determination module in coreference resolution achieves the so far best performance.", "keyphrases": ["anaphoricity determination", "coreference resolution", "dependency-driven scheme", "mention"]} +{"id": "habernal-etal-2017-argotario", "title": "Argotario: Computational Argumentation Meets Serious Games", "abstract": "An important skill in critical thinking and argumentation is the ability to spot and recognize fallacies. Fallacious arguments, omnipresent in argumentative discourse, can be deceptive, manipulative, or simply leading to `wrong moves' in a discussion. Despite their importance, argumentation scholars and NLP researchers with focus on argumentation quality have not yet investigated fallacies empirically. The nonexistence of resources dealing with fallacious argumentation calls for scalable approaches to data acquisition and annotation, for which the serious games methodology offers an appealing, yet unexplored, alternative. We present Argotario, a serious game that deals with fallacies in everyday argumentation. Argotario is a multilingual, open-source, platform-independent application with strong educational aspects, accessible at .", "keyphrases": ["game", "fallacy", "argotario", "irrelevant authority", "propaganda technique"]} +{"id": "eskander-etal-2014-foreign", "title": "Foreign Words and the Automatic Processing of Arabic Social Media Text Written in Roman Script", "abstract": "Arabic on social media has all the properties of any language on social media that make it tough for natural language processing, plus some specific problems. These include diglossia, the use of an alternative alphabet (Roman), and code switching with foreign languages. In this paper, we present a system which can process Arabic written in Roman alphabet (\u201cArabizi\u201d). It identifies whether each word is a foreign word or one of another four categories (Arabic, name, punctuation, sound), and transliterates Arabic words and names into the Arabic alphabet. We obtain an overall system performance of 83.8% on an unseen test set.", "keyphrases": ["roman script", "arabizi", "foreign word", "social medium text"]} +{"id": "liu-lapata-2019-text", "title": "Text Summarization with Pretrained Encoders", "abstract": "Bidirectional Encoder Representations from Transformers (BERT) represents the latest incarnation of pretrained language models which have recently advanced a wide range of natural language processing tasks. In this paper, we showcase how BERT can be usefully applied in text summarization and propose a general framework for both extractive and abstractive models. We introduce a novel document-level encoder based on BERT which is able to express the semantics of a document and obtain representations for its sentences. Our extractive model is built on top of this encoder by stacking several inter-sentence Transformer layers. For abstractive summarization, we propose a new fine-tuning schedule which adopts different optimizers for the encoder and the decoder as a means of alleviating the mismatch between the two (the former is pretrained while the latter is not). We also demonstrate that a two-staged fine-tuning approach can further boost the quality of the generated summaries. Experiments on three datasets show that our model achieves state-of-the-art results across the board in both extractive and abstractive settings.", "keyphrases": ["language model", "advance", "inter-sentence transformer layer", "text summarization"]} +{"id": "daxenberger-etal-2017-essence", "title": "What is the Essence of a Claim? Cross-Domain Claim Identification", "abstract": "Argument mining has become a popular research area in NLP. It typically includes the identification of argumentative components, e.g. claims, as the central component of an argument. We perform a qualitative analysis across six different datasets and show that these appear to conceptualize claims quite differently. To learn about the consequences of such different conceptualizations of claim for practical applications, we carried out extensive experiments using state-of-the-art feature-rich and deep learning systems, to identify claims in a cross-domain fashion. While the divergent conceptualization of claims in different datasets is indeed harmful to cross-domain classification, we show that there are shared properties on the lexical level as well as system configurations that can help to overcome these gaps.", "keyphrases": ["claim", "cross-domain claim identification", "argument mining", "cross-domain classification"]} +{"id": "talbot-osborne-2007-smoothed", "title": "Smoothed Bloom Filter Language Models: Tera-Scale LMs on the Cheap", "abstract": "A Bloom filter (BF) is a randomised data structure for set membership queries. Its space requirements fall significantly below lossless information-theoretic lower bounds but it produces false positives with some quantifiable probability. Here we present a general framework for deriving smoothed language model probabilities from BFs. We investigate how a BF containing n-gram statistics can be used as a direct replacement for a conventional n-gram model. Recent work has demonstrated that corpus statistics can be stored efficiently within a BF, here we consider how smoothed language model probabilities can be derived efficiently from this randomised representation. Our proposal takes advantage of the one-sided error guarantees of the BF and simple inequalities that hold between related n-gram statistics in order to further reduce the BF storage requirements and the error rate of the derived probabilities. We use these models as replacements for a conventional language model in machine translation experiments.", "keyphrases": ["bloom filter", "language model", "data structure"]} +{"id": "shekhar-etal-2017-foil", "title": "FOIL it! Find One mismatch between Image and Language caption", "abstract": "In this paper, we aim to understand whether current language and vision (LaVi) models truly grasp the interaction between the two modalities. To this end, we propose an extension of the MS-COCO dataset, FOIL-COCO, which associates images with both correct and `foil' captions, that is, descriptions of the image that are highly similar to the original ones, but contain one single mistake (`foil word'). We show that current LaVi models fall into the traps of this data and perform badly on three tasks: a) caption classification (correct vs. foil); b) foil word detection; c) foil word correction. Humans, in contrast, have near-perfect performance on those tasks. We demonstrate that merely utilising language cues is not enough to model FOIL-COCO and that it challenges the state-of-the-art by requiring a fine-grained understanding of the relation between text and image.", "keyphrases": ["image", "language caption", "foil"]} +{"id": "herzig-etal-2020-tapas", "title": "TaPas: Weakly Supervised Table Parsing via Pre-training", "abstract": "Answering natural language questions over tables is usually seen as a semantic parsing task. To alleviate the collection cost of full logical forms, one popular approach focuses on weak supervision consisting of denotations instead of logical forms. However, training semantic parsers from weak supervision poses difficulties, and in addition, the generated logical forms are only used as an intermediate step prior to retrieving the denotation. In this paper, we present TaPas, an approach to question answering over tables without generating logical forms. TaPas trains from weak supervision, and predicts the denotation by selecting table cells and optionally applying a corresponding aggregation operator to such selection. TaPas extends BERT's architecture to encode tables as input, initializes from an effective joint pre-training of text segments and tables crawled from Wikipedia, and is trained end-to-end. We experiment with three different semantic parsing datasets, and find that TaPas outperforms or rivals semantic parsing models by improving state-of-the-art accuracy on SQA from 55.1 to 67.2 and performing on par with the state-of-the-art on WikiSQL and WikiTQ, but with a simpler model architecture. We additionally find that transfer learning, which is trivial in our setting, from WikiSQL to WikiTQ, yields 48.7 accuracy, 4.2 points above the state-of-the-art.", "keyphrases": ["table", "language model", "pre-training model"]} +{"id": "ono-etal-2015-word", "title": "Word Embedding-based Antonym Detection using Thesauri and Distributional Information", "abstract": "This paper proposes a novel approach to train word embeddings to capture antonyms. Word embeddings have shown to capture synonyms and analogies. Such word embeddings, however, cannot capture antonyms since they depend on the distributional hypothesis. Our approach utilizes supervised synonym and antonym information from thesauri, as well as distributional information from large-scale unlabelled text data. The evaluation results on the GRE antonym question task show that our model outperforms the state-of-the-art systems and it can answer the antonym questions in the F-score of 89%.", "keyphrases": ["antonym detection", "thesauri", "distributional information"]} +{"id": "chen-etal-2018-joint", "title": "Joint Learning for Emotion Classification and Emotion Cause Detection", "abstract": "We present a neural network-based joint approach for emotion classification and emotion cause detection, which attempts to capture mutual benefits across the two sub-tasks of emotion analysis. Considering that emotion classification and emotion cause detection need different kinds of features (affective and event-based separately), we propose a joint encoder which uses a unified framework to extract features for both sub-tasks and a joint model trainer which simultaneously learns two models for the two sub-tasks separately. Our experiments on Chinese microblogs show that the joint approach is very promising.", "keyphrases": ["emotion classification", "detection", "joint approach"]} +{"id": "shao-etal-2017-character", "title": "Character-based Joint Segmentation and POS Tagging for Chinese using Bidirectional RNN-CRF", "abstract": "We present a character-based model for joint segmentation and POS tagging for Chinese. The bidirectional RNN-CRF architecture for general sequence tagging is adapted and applied with novel vector representations of Chinese characters that capture rich contextual information and lower-than-character level features. The proposed model is extensively evaluated and compared with a state-of-the-art tagger respectively on CTB5, CTB9 and UD Chinese. The experimental results indicate that our model is accurate and robust across datasets in different sizes, genres and annotation schemes. We obtain state-of-the-art performance on CTB5, achieving 94.38 F1-score for joint segmentation and POS tagging.", "keyphrases": ["joint segmentation", "pos tagging", "word boundary"]} +{"id": "kulick-2010-simultaneous", "title": "Simultaneous Tokenization and Part-Of-Speech Tagging for Arabic without a Morphological Analyzer", "abstract": "We describe an approach to simultaneous tokenization and part-of-speech tagging that is based on separating the closed and open-class items, and focusing on the likelihood of the possible stems of the openclass words. By encoding some basic linguistic information, the machine learning task is simplified, while achieving state-of-the-art tokenization results and competitive POS results, although with a reduced tag set and some evaluation difficulties.", "keyphrases": ["part-of-speech tagging", "arabic", "simultaneous tokenization"]} +{"id": "mcdonald-etal-2005-online", "title": "Online Large-Margin Training of Dependency Parsers", "abstract": "We present an effective training algorithm for linearly-scored dependency parsers that implements online large-margin multi-class training (Crammer and Singer, 2003; Crammer et al., 2003) on top of efficient parsing techniques for dependency trees (Eisner, 1996). The trained parsers achieve a competitive dependency accuracy for both English and Czech with no language specific enhancements.", "keyphrases": ["dependency parser", "edge", "mst", "feature representation", "learning method"]} +{"id": "martinez-alonso-plank-2017-multitask", "title": "When is multitask learning effective? Semantic sequence prediction under varying data conditions", "abstract": "Multitask learning has been applied successfully to a range of tasks, mostly morphosyntactic. However, little is known on when MTL works and whether there are data characteristics that help to determine the success of MTL. In this paper we evaluate a range of semantic sequence labeling tasks in a MTL setup. We examine different auxiliary task configurations, amongst which a novel setup, and correlate their impact to data-dependent conditions. Our results show that MTL is not always effective, because significant improvements are obtained only for 1 out of 5 tasks. When successful, auxiliary tasks with compact and more uniform label distributions are preferable.", "keyphrases": ["mtl", "auxiliary task", "uniform label distribution"]} +{"id": "kipper-etal-2006-extending", "title": "Extending VerbNet with Novel Verb Classes", "abstract": "Lexical classifications have proved useful in supporting various natural language processing (NLP) tasks. The largest verb classification for English is Levin's (1993) work which defined groupings of verbs based on syntactic properties. VerbNet - the largest computational verb lexicon currently available for English - provides detailed syntactic-semantic descriptions of Levin classes. While the classes included are extensive enough for some NLP use, they are not comprehensive. Korhonen and Briscoe (2004) have proposed a significant extension of Levin's classification which incorporates 57 novel classes for verbs not covered (comprehensively) by Levin. This paper describes the integration of these classes into VerbNet. The result is the most extensive Levin-style classification for English verbs which can be highly useful for practical applications.", "keyphrases": ["verbnet", "levin class", "extension"]} +{"id": "yang-etal-2019-reducing", "title": "Reducing Word Omission Errors in Neural Machine Translation: A Contrastive Learning Approach", "abstract": "While neural machine translation (NMT) has achieved remarkable success, NMT systems are prone to make word omission errors. In this work, we propose a contrastive learning approach to reducing word omission errors in NMT. The basic idea is to enable the NMT model to assign a higher probability to a ground-truth translation and a lower probability to an erroneous translation, which is automatically constructed from the ground-truth translation by omitting words. We design different types of negative examples depending on the number of omitted words, word frequency, and part of speech. Experiments on Chinese-to-English, German-to-English, and Russian-to-English translation tasks show that our approach is effective in reducing word omission errors and achieves better translation performance than three baseline methods.", "keyphrases": ["word omission error", "neural machine translation", "contrastive learning approach", "ground-truth translation"]} +{"id": "clouatre-etal-2021-mlmlm", "title": "MLMLM: Link Prediction with Mean Likelihood Masked Language Model", "abstract": "Knowledge Bases (KBs) are easy to query, verifiable, and interpretable. They however scale with man-hours and high-quality data. Masked Language Models (MLMs), such as BERT, scale with computing power as well as unstructured raw text data. The knowledge contained within those models is however not directly interpretable. We propose to perform link prediction with MLMs to address both the KBs scalability issues and the MLMs interpretability issues. To do that we introduce MLMLM, Mean Likelihood Masked Language Model, an approach comparing the mean likelihood of generating the different entities to perform link prediction in a tractable manner. We obtain State of the Art (SotA) results on the WN18RR dataset and the best non-entity-embedding based results on the FB15k-237 dataset. We also obtain convincing results on link prediction on previously unseen entities, making MLMLM a suitable approach to introducing new entities to a KB.", "keyphrases": ["link prediction", "query", "mlmlm"]} +{"id": "liang-etal-2006-alignment", "title": "Alignment by Agreement", "abstract": "We present an unsupervised approach to symmetric word alignment in which two simple asymmetric models are trained jointly to maximize a combination of data likelihood and agreement between the models. Compared to the standard practice of intersecting predictions of independently-trained models, joint training provides a 32% reduction in AER. Moreover, a simple and efficient pair of HMM aligners provides a 29% reduction in AER over symmetrized IBM model 4 predictions.", "keyphrases": ["agreement", "word alignment", "asymmetric model", "hmm aligner", "direction"]} +{"id": "jiang-etal-2020-know", "title": "How Can We Know What Language Models Know?", "abstract": "Recent work has presented intriguing results examining the knowledge contained in language models (LMs) by having the LM fill in the blanks of prompts such as \u201cObama is a __ by profession\u201d. These prompts are usually manually created, and quite possibly sub-optimal; another prompt such as \u201cObama worked as a __ \u201d may result in more accurately predicting the correct profession. Because of this, given an inappropriate prompt, we might fail to retrieve facts that the LM does know, and thus any given prompt only provides a lower bound estimate of the knowledge contained in an LM. In this paper, we attempt to more accurately estimate the knowledge contained in LMs by automatically discovering better prompts to use in this querying process. Specifically, we propose mining-based and paraphrasing-based methods to automatically generate high-quality and diverse prompts, as well as ensemble methods to combine answers from different prompts. Extensive experiments on the LAMA benchmark for extracting relational knowledge from LMs demonstrate that our methods can improve accuracy from 31.1% to 39.6%, providing a tighter lower bound on what LMs know. We have released the code and the resulting LM Prompt And Query Archive (LPAQA) at .", "keyphrases": ["language model", "prompt", "paraphrasing-based method", "relational knowledge", "mlm"]} +{"id": "modi-etal-2016-inscript", "title": "InScript: Narrative texts annotated with script information", "abstract": "This paper presents the InScript corpus (Narrative Texts Instantiating Script structure). InScript is a corpus of 1,000 stories centered around 10 different scenarios. Verbs and noun phrases are annotated with event and participant types, respectively. Additionally, the text is annotated with coreference information. The corpus shows rich lexical variation and will serve as a unique resource for the study of the role of script knowledge in natural language processing.", "keyphrases": ["story", "different scenario", "participant", "script knowledge", "inscript"]} +{"id": "zhu-etal-2021-ontogum", "title": "OntoGUM: Evaluating Contextualized SOTA Coreference Resolution on 12 More Genres", "abstract": "SOTA coreference resolution produces increasingly impressive scores on the OntoNotes benchmark. However lack of comparable data following the same scheme for more genres makes it difficult to evaluate generalizability to open domain data. This paper provides a dataset and comprehensive evaluation showing that the latest neural LM based end-to-end systems degrade very substantially out of domain. We make an OntoNotes-like coreference dataset called OntoGUM publicly available, converted from GUM, an English corpus covering 12 genres, using deterministic rules, which we evaluate. Thanks to the rich syntactic and discourse annotations in GUM, we are able to create the largest human-annotated coreference corpus following the OntoNotes guidelines, and the first to be evaluated for consistency with the OntoNotes scheme. Out-of-domain evaluation across 12 genres shows nearly 15-20% degradation for both deterministic and deep learning systems, indicating a lack of generalizability or covert overfitting in existing coreference resolution models.", "keyphrases": ["sota coreference resolution", "genre", "ontogum", "ontonotes schema"]} +{"id": "nguyen-etal-2007-subtree", "title": "Subtree Mining for Relation Extraction from Wikipedia", "abstract": "In this study, we address the problem of extracting relations between entities from Wikipedia's English articles. Our proposed method first anchors the appearance of entities in Wikipedia's articles using neither Named Entity Recognizer (NER) nor coreference resolution tool. It then classifies the relationships between entity pairs using SVM with features extracted from the web structure and subtrees mined from the syntactic structure of text. We evaluate our method on manually annotated data from actual Wikipedia articles.", "keyphrases": ["relation extraction", "wikipedia", "subtree"]} +{"id": "bekoulis-etal-2018-adversarial", "title": "Adversarial training for multi-context joint entity and relation extraction", "abstract": "Adversarial training (AT) is a regularization method that can be used to improve the robustness of neural network methods by adding small perturbations in the training data. We show how to use AT for the tasks of entity recognition and relation extraction. In particular, we demonstrate that applying AT to a general purpose baseline model for jointly extracting entities and relations, allows improving the state-of-the-art effectiveness on several datasets in different contexts (i.e., news, biomedical, and real estate data) and for different languages (English and Dutch).", "keyphrases": ["joint entity", "relation extraction", "adversarial training"]} +{"id": "ikeda-etal-2016-japanese", "title": "Japanese Text Normalization with Encoder-Decoder Model", "abstract": "Text normalization is the task of transforming lexical variants to their canonical forms. We model the problem of text normalization as a character-level sequence to sequence learning problem and present a neural encoder-decoder model for solving it. To train the encoder-decoder model, many sentences pairs are generally required. However, Japanese non-standard canonical pairs are scarce in the form of parallel corpora. To address this issue, we propose a method of data augmentation to increase data size by converting existing resources into synthesized non-standard forms using handcrafted rules. We conducted an experiment to demonstrate that the synthesized corpus contributes to stably train an encoder-decoder model and improve the performance of Japanese text normalization.", "keyphrases": ["text normalization", "encoder-decoder model", "sentence pair", "data augmentation", "character level"]} +{"id": "utiyama-isahara-2007-comparison", "title": "A Comparison of Pivot Methods for Phrase-Based Statistical Machine Translation", "abstract": "We compare two pivot strategies for phrase-based statistical machine translation (SMT), namely phrase translation and sentence translation. The phrase translation strategy means that we directly construct a phrase translation table (phrase-table) of the source and target language pair from two phrase-tables; one constructed from the source language and English and one constructed from English and the target language. We then use that phrase-table in a phrase-based SMT system. The sentence translation strategy means that we first translate a source language sentence into n English sentences and then translate these n sentences into target language sentences separately. Then, we select the highest scoring sentence from these target sentences. We conducted controlled experiments using the Europarl corpus to evaluate the performance of these pivot strategies as compared to directly trained SMT systems. The phrase translation strategy significantly outperformed the sentence translation strategy. Its relative performance was 0.92 to 0.97 compared to directly trained SMT systems.", "keyphrases": ["statistical machine translation", "pivot language", "many researcher", "bridging", "transfer method"]} +{"id": "kim-cho-2021-length", "title": "Length-Adaptive Transformer: Train Once with Length Drop, Use Anytime with Search", "abstract": "Despite transformers' impressive accuracy, their computational cost is often prohibitive to use with limited computational resources. Most previous approaches to improve inference efficiency require a separate model for each possible computational budget. In this paper, we extend PoWER-BERT (Goyal et al., 2020) and propose Length-Adaptive Transformer that can be used for various inference scenarios after one-shot training. We train a transformer with LengthDrop, a structural variant of dropout, which stochastically determines a sequence length at each layer. We then conduct a multi-objective evolutionary search to find a length configuration that maximizes the accuracy and minimizes the efficiency metric under any given computational budget. Additionally, we significantly extend the applicability of PoWER-BERT beyond sequence-level classification into token-level classification with Drop-and-Restore process that drops word-vectors temporarily in intermediate layers and restores at the last layer if necessary. We empirically verify the utility of the proposed approach by demonstrating the superior accuracy-efficiency trade-off under various setups, including span-based question answering and text classification. Code is available at .", "keyphrases": ["search", "one-shot training", "length-adaptive transformer"]} +{"id": "duan-etal-2019-contrastive", "title": "Contrastive Attention Mechanism for Abstractive Sentence Summarization", "abstract": "We propose a contrastive attention mechanism to extend the sequence-to-sequence framework for abstractive sentence summarization task, which aims to generate a brief summary of a given source sentence. The proposed contrastive attention mechanism accommodates two categories of attention: one is the conventional attention that attends to relevant parts of the source sentence, the other is the opponent attention that attends to irrelevant or less relevant parts of the source sentence. Both attentions are trained in an opposite way so that the contribution from the conventional attention is encouraged and the contribution from the opponent attention is discouraged through a novel softmax and softmin functionality. Experiments on benchmark datasets show that, the proposed contrastive attention mechanism is more focused on the relevant parts for the summary than the conventional attention mechanism, and greatly advances the state-of-the-art performance on the abstractive sentence summarization task. We release the code at Abstractive-Text-Summarization.", "keyphrases": ["abstractive sentence summarization", "relevant part", "contrastive attention mechanism"]} +{"id": "pasunuru-etal-2017-towards", "title": "Towards Improving Abstractive Summarization via Entailment Generation", "abstract": "Abstractive summarization, the task of rewriting and compressing a document into a short summary, has achieved considerable success with neural sequence-to-sequence models. However, these models can still benefit from stronger natural language inference skills, since a correct summary is logically entailed by the input document, i.e., it should not contain any contradictory or unrelated information. We incorporate such knowledge into an abstractive summarization model via multi-task learning, where we share its decoder parameters with those of an entailment generation model. We achieve promising initial improvements based on multiple metrics and datasets (including a test-only setting). The domain mismatch between the entailment (captions) and summarization (news) datasets suggests that the model is learning some domain-agnostic inference skills.", "keyphrases": ["abstractive summarization", "entailment generation", "auxiliary task", "question generation"]} +{"id": "hildebrand-etal-2005-adaptation", "title": "Adaptation of the translation model for statistical machine translation based on information retrieval", "abstract": "In this paper we present experiments concerning translation model adaptation for statistical machine translation. We develop a method to adapt translation models using in- formation retrieval. The approach selects sentences similar to the test set to form an adapted training corpus. The method allows a better use of additionally available out-of-domain training data or finds in-domain data in a mixed corpus. The adapted translation models significantly improve the translation performance compared to competitive baseline sys- tems.", "keyphrases": ["translation model", "information retrieval", "out-of-domain training data", "sentence pair", "similar approach"]} +{"id": "zhou-xu-2015-end", "title": "End-to-end learning of semantic role labeling using recurrent neural networks", "abstract": "Semantic role labeling (SRL) is one of the basic natural language processing (NLP) problems. To this date, most of the successful SRL systems were built on top of some form of parsing results (Koomen et al., 2005; Palmer et al., 2010; Pradhan et al., 2013), where pre-defined feature templates over the syntactic structure are used. The attempts of building an end-to-end SRL learning system without using parsing were less successful (Collobert et al., 2011). In this work, we propose to use deep bi-directional recurrent network as an end-to-end system for SRL. We take only original text information as input feature, without using any syntactic knowledge. The proposed algorithm for semantic role labeling was mainly evaluated on CoNLL-2005 shared task and achieved F1 score of 81.07. This result outperforms the previous state-of-the-art system from the combination of different parsing trees or models. We also obtained the same conclusion with F1 = 81.27 on CoNLL2012 shared task. As a result of simplicity, our model is also computationally efficient that the parsing speed is 6.7k tokens per second. Our analysis shows that our model is better at handling longer sentences than traditional models. And the latent variables of our model implicitly capture the syntactic structure of a sentence.", "keyphrases": ["semantic role labeling", "recurrent neural network", "state-of-the-art result"]} +{"id": "sap-etal-2020-social", "title": "Social Bias Frames: Reasoning about Social and Power Implications of Language", "abstract": "Warning: this paper contains content that may be offensive or upsetting. Language has the power to reinforce stereotypes and project social biases onto others. At the core of the challenge is that it is rarely what is stated explicitly, but rather the implied meanings, that frame people's judgments about others. For example, given a statement that \u201cwe shouldn't lower our standards to hire more women,\u201d most listeners will infer the implicature intended by the speaker - that \u201cwomen (candidates) are less qualified.\u201d Most semantic formalisms, to date, do not capture such pragmatic implications in which people express social biases and power differentials in language. We introduce Social Bias Frames, a new conceptual formalism that aims to model the pragmatic frames in which people project social biases and stereotypes onto others. In addition, we introduce the Social Bias Inference Corpus to support large-scale modelling and evaluation with 150k structured annotations of social media posts, covering over 34k implications about a thousand demographic groups. We then establish baseline approaches that learn to recover Social Bias Frames from unstructured text. We find that while state-of-the-art neural models are effective at high-level categorization of whether a given statement projects unwanted social bias (80% F1), they are not effective at spelling out more detailed explanations in terms of Social Bias Frames. Our study motivates future work that combines structured pragmatic inference with commonsense reasoning on social implications.", "keyphrases": ["implication", "stereotype", "social bias frames", "hate speech"]} +{"id": "diab-etal-2004-automatic", "title": "Automatic Tagging of Arabic Text: From Raw Text to Base Phrase Chunks", "abstract": "To date, there are no fully automated systems addressing the community's need for fundamental language processing tools for Arabic text. In this paper, we present a Support Vector Machine (SVM) based approach to automatically tokenize (segmenting off clitics), part-of-speech (POS) tag and annotate base phrases (BPs) in Arabic text. We adapt highly accurate tools that have been developed for English text and apply them to Arabic text. Using standard evaluation metrics, we report that the SVM-TOK tokenizer achieves an F\u03b2=1 score of 99.12, the SVM-POS tagger achieves an accuracy of 95.49%, and the SVM-BP chunker yields an F\u03b2=1 score of 92.08.", "keyphrases": ["arabic text", "pos tagging", "disambiguation"]} +{"id": "lin-etal-2006-side", "title": "Which Side are You on? Identifying Perspectives at the Document and Sentence Levels", "abstract": "In this paper we investigate a new problem of identifying the perspective from which a document is written. By perspective we mean a point of view, for example, from the perspective of Democrats or Republicans. Can computers learn to identify the perspective of a document? Not every sentence is written strongly from a perspective. Can computers learn to identify which sentences strongly convey a particular perspective? We develop statistical models to capture how perspectives are expressed at the document and sentence levels, and evaluate the proposed models on articles about the Israeli-Palestinian conflict. The results show that the proposed models successfully learn how perspectives are reflected in word usage and can identify the perspective of a document with high accuracy.", "keyphrases": ["perspective", "sentence level", "conflict", "document collection"]} +{"id": "duan-etal-2017-question", "title": "Question Generation for Question Answering", "abstract": "This paper presents how to generate questions from given passages using neural networks, where large scale QA pairs are automatically crawled and processed from Community-QA website, and used as training data. The contribution of the paper is 2-fold: First, two types of question generation approaches are proposed, one is a retrieval-based method using convolution neural network (CNN), the other is a generation-based method using recurrent neural network (RNN); Second, we show how to leverage the generated questions to improve existing question answering systems. We evaluate our question generation method for the answer sentence selection task on three benchmark datasets, including SQuAD, MS MARCO, and WikiQA. Experimental results show that, by using generated questions as an extra signal, significant QA improvement can be achieved.", "keyphrases": ["question generation", "single sentence", "synthetic data"]} +{"id": "misra-etal-2020-exploring", "title": "Exploring BERT's Sensitivity to Lexical Cues using Tests from Semantic Priming", "abstract": "Models trained to estimate word probabilities in context have become ubiquitous in natural language processing. How do these models use lexical cues in context to inform their word probabilities? To answer this question, we present a case study analyzing the pre-trained BERT model with tests informed by semantic priming. Using English lexical stimuli that show priming in humans, we find that BERT too shows \u201cpriming\u201d, predicting a word with greater probability when the context includes a related word versus an unrelated one. This effect decreases as the amount of information provided by the context increases. Follow-up analysis shows BERT to be increasingly distracted by related prime words as context becomes more informative, assigning lower probabilities to related words. Our findings highlight the importance of considering contextual constraint effects when studying word prediction in these models, and highlight possible parallels with human processing.", "keyphrases": ["bert", "semantic priming", "related word"]} +{"id": "voita-titov-2020-information", "title": "Information-Theoretic Probing with Minimum Description Length", "abstract": "To measure how well pretrained representations encode some linguistic property, it is common to use accuracy of a probe, i.e. a classifier trained to predict the property from the representations. Despite widespread adoption of probes, differences in their accuracy fail to adequately reflect differences in representations. For example, they do not substantially favour pretrained representations over randomly initialized ones. Analogously, their accuracy can be similar when probing for genuine linguistic labels and probing for random synthetic tasks. To see reasonable differences in accuracy with respect to these random baselines, previous work had to constrain either the amount of probe training data or its model size. Instead, we propose an alternative to the standard probes, information-theoretic probing with minimum description length (MDL). With MDL probing, training a probe to predict labels is recast as teaching it to effectively transmit the data. Therefore, the measure of interest changes from probe accuracy to the description length of labels given representations. In addition to probe quality, the description length evaluates \u201cthe amount of effort\u201d needed to achieve the quality. This amount of effort characterizes either (i) size of a probing model, or (ii) the amount of data needed to achieve the high quality. We consider two methods for estimating MDL which can be easily implemented on top of the standard probing pipelines: variational coding and online coding. We show that these methods agree in results and are more informative and stable than the standard probes.", "keyphrases": ["probing", "minimum description length", "intermediate representation", "significant uncertainty", "diagnostic classifier"]} +{"id": "poon-etal-2009-unsupervised", "title": "Unsupervised Morphological Segmentation with Log-Linear Models", "abstract": "Morphological segmentation breaks words into morphemes (the basic semantic units). It is a key component for natural language processing systems. Unsupervised morphological segmentation is attractive, because in every language there are virtually unlimited supplies of text, but very few labeled resources. However, most existing model-based systems for unsupervised morphological segmentation use directed generative models, making it difficult to leverage arbitrary overlapping features that are potentially helpful to learning. In this paper, we present the first log-linear model for unsupervised morphological segmentation. Our model uses overlapping features such as morphemes and their contexts, and incorporates exponential priors inspired by the minimum description length (MDL) principle. We present efficient algorithms for learning and inference by combining contrastive estimation with sampling. Our system, based on monolingual features only, outperforms a state-of-the-art system by a large margin, even when the latter uses bilingual information such as phrasal alignment and phonetic correspondence. On the Arabic Penn Treebank, our system reduces F1 error by 11% compared to Morfessor.", "keyphrases": ["log-linear model", "unit", "generative model", "contrastive estimation", "unsupervised morphological segmentation"]} +{"id": "bender-friedman-2018-data", "title": "Data Statements for Natural Language Processing: Toward Mitigating System Bias and Enabling Better Science", "abstract": "In this paper, we propose data statements as a design solution and professional practice for natural language processing technologists, in both research and development. Through the adoption and widespread use of data statements, the field can begin to address critical scientific and ethical issues that result from the use of data from certain populations in the development of technology for other populations. We present a form that data statements can take and explore the implications of adopting them as part of regular practice. We argue that data statements will help alleviate issues related to exclusion and bias in language technology, lead to better precision in claims about how natural language processing research can generalize and thus better engineering results, protect companies from public embarrassment, and ultimately lead to language technology that meets its users in their own preferred linguistic style and furthermore does not misrepresent them to others.", "keyphrases": ["practice", "language processing technologist", "data statement", "template"]} +{"id": "zeman-resnik-2008-cross", "title": "Cross-Language Parser Adaptation between Related Languages", "abstract": "The present paper describes an approach to adapting a parser to a new language. Presumably the target language is much poorer in linguistic resources than the source language. The technique has been tested on two European languages due to test data availability; however, it is easily applicable to any pair of sufficiently related languages, including some of the Indic language group. Our adaptation technique using existing annotations in the source language achieves performance equivalent to that obtained by training on 1546 trees in the target language.", "keyphrases": ["related language", "treebank", "part-of-speech tag"]} +{"id": "baroni-lenci-2010-distributional", "title": "Distributional Memory: A General Framework for Corpus-Based Semantics", "abstract": "Research into corpus-based semantics has focused on the development of ad hoc models that treat single tasks, or sets of closely related tasks, as unrelated challenges to be tackled by extracting different kinds of distributional information from the corpus. As an alternative to this \u201cone task, one model\u201d approach, the Distributional Memory framework extracts distributional information once and for all from the corpus, in the form of a set of weighted word-link-word tuples arranged into a third-order tensor. Different matrices are then generated from the tensor, and their rows and columns constitute natural spaces to deal with different semantic problems. In this way, the same distributional information can be shared across tasks such as modeling word similarity judgments, discovering synonyms, concept categorization, predicting selectional preferences of verbs, solving analogy problems, classifying relations between word pairs, harvesting qualia structures with patterns or example pairs, predicting the typical properties of concepts, and classifying verbs into alternation classes. Extensive empirical testing in all these domains shows that a Distributional Memory implementation performs competitively against task-specific algorithms recently reported in the literature for the same tasks, and against our implementations of several state-of-the-art methods. The Distributional Memory approach is thus shown to be tenable despite the constraints imposed by its multi-purpose nature.", "keyphrases": ["general framework", "corpus-based semantic", "distributional memory", "syntactic relation", "co-occurrence"]} +{"id": "jia-etal-2013-graph", "title": "Graph Model for Chinese Spell Checking", "abstract": "This paper describes our system in the Bake-Off 2013 task of SIGHAN 7. We illustrate that Chinese spell checking and correction can be efficiently tackled with by utilizing word segmenter. A graph model is used to represent the sentence and a single source shortest path (SSSP) algorithm is performed on the graph to correct spell errors. Our system achieves 4 first ranks out of 10 metrics on the standard test set.", "keyphrases": ["chinese spell checking", "single source", "graph model"]} +{"id": "huang-sagae-2010-dynamic", "title": "Dynamic Programming for Linear-Time Incremental Parsing", "abstract": "Incremental parsing techniques such as shift-reduce have gained popularity thanks to their efficiency, but there remains a major problem: the search is greedy and only explores a tiny fraction of the whole space (even with beam search) as opposed to dynamic programming. We show that, surprisingly, dynamic programming is in fact possible for many shift-reduce parsers, by merging \"equivalent\" stacks based on feature values. Empirically, our algorithm yields up to a five-fold speedup over a state-of-the-art shift-reduce dependency parser with no loss in accuracy. Better search also leads to better learning, and our final parser outperforms all previously reported dependency parsers for English and Chinese, yet is much faster.", "keyphrases": ["dynamic programming", "shift-reduce parsing", "decoding", "beam-search", "transition-based dependency parsing"]} +{"id": "aguilar-etal-2018-named", "title": "Named Entity Recognition on Code-Switched Data: Overview of the CALCS 2018 Shared Task", "abstract": "In the third shared task of the Computational Approaches to Linguistic Code-Switching (CALCS) workshop, we focus on Named Entity Recognition (NER) on code-switched social-media data. We divide the shared task into two competitions based on the English-Spanish (ENG-SPA) and Modern Standard Arabic-Egyptian (MSA-EGY) language pairs. We use Twitter data and 9 entity types to establish a new dataset for code-switched NER benchmarks. In addition to the CS phenomenon, the diversity of the entities and the social media challenges make the task considerably hard to process. As a result, the best scores of the competitions are 63.76% and 71.61% for ENG-SPA and MSA-EGY, respectively. We present the scores of 9 participants and discuss the most common challenges among submissions.", "keyphrases": ["entity recognition", "linguistic code-switching", "eng-spa"]} +{"id": "wang-etal-2020-response", "title": "Response Selection for Multi-Party Conversations with Dynamic Topic Tracking", "abstract": "While participants in a multi-party multi-turn conversation simultaneously engage in multiple conversation topics, existing response selection methods are developed mainly focusing on a two-party single-conversation scenario. Hence, the prolongation and transition of conversation topics are ignored by current methods. In this work, we frame response selection as a dynamic topic tracking task to match the topic between the response and relevant conversation context. With this new formulation, we propose a novel multi-task learning framework that supports efficient encoding through large pretrained models with only two utterances at once to perform dynamic topic disentanglement and response selection. We also propose Topic-BERT an essential pretraining step to embed topic information into BERT with self-supervised learning. Experimental results on the DSTC-8 Ubuntu IRC dataset show state-of-the-art results in response selection and topic disentanglement tasks outperforming existing methods by a good margin.", "keyphrases": ["conversation", "dynamic topic", "response selection"]} +{"id": "filatova-2012-irony", "title": "Irony and Sarcasm: Corpus Generation and Analysis Using Crowdsourcing", "abstract": "The ability to reliably identify sarcasm and irony in text can improve the performance of many Natural Language Processing (NLP) systems including summarization, sentiment analysis, etc. The existing sarcasm detection systems have focused on identifying sarcasm on a sentence level or for a specific phrase. However, often it is impossible to identify a sentence containing sarcasm without knowing the context. In this paper we describe a corpus generation experiment where we collect regular and sarcastic Amazon product reviews. We perform qualitative and quantitative analysis of the corpus. The resulting corpus can be used for identifying sarcasm on two levels: a document and a text utterance (where a text utterance can be as short as a sentence and as long as a whole document).", "keyphrases": ["sarcasm", "crowdsourcing", "corpus generation experiment", "irony", "annotator"]} +{"id": "wang-etal-2020-relational", "title": "Relational Graph Attention Network for Aspect-based Sentiment Analysis", "abstract": "Aspect-based sentiment analysis aims to determine the sentiment polarity towards a specific aspect in online reviews. Most recent efforts adopt attention-based neural network models to implicitly connect aspects with opinion words. However, due to the complexity of language and the existence of multiple aspects in a single sentence, these models often confuse the connections. In this paper, we address this problem by means of effective encoding of syntax information. Firstly, we define a unified aspect-oriented dependency tree structure rooted at a target aspect by reshaping and pruning an ordinary dependency parse tree. Then, we propose a relational graph attention network (R-GAT) to encode the new tree structure for sentiment prediction. Extensive experiments are conducted on the SemEval 2014 and Twitter datasets, and the experimental results confirm that the connections between aspects and opinion words can be better established with our approach, and the performance of the graph attention network (GAT) is significantly improved as a consequence.", "keyphrases": ["graph attention network", "sentiment analysis", "aspect term", "dependency graph"]} +{"id": "khandelwal-sawant-2020-negbert", "title": "NegBERT: A Transfer Learning Approach for Negation Detection and Scope Resolution", "abstract": "Negation is an important characteristic of language, and a major component of information extraction from text. This subtask is of considerable importance to the biomedical domain. Over the years, multiple approaches have been explored to address this problem: Rule-based systems, Machine Learning classifiers, Conditional Random Field models, CNNs and more recently BiLSTMs. In this paper, we look at applying Transfer Learning to this problem. First, we extensively review previous literature addressing Negation Detection and Scope Resolution across the 3 datasets that have gained popularity over the years: the BioScope Corpus, the Sherlock dataset, and the SFU Review Corpus. We then explore the decision choices involved with using BERT, a popular transfer learning model, for this task, and report state-of-the-art results for scope resolution across all 3 datasets. Our model, referred to as NegBERT, achieves a token level F1 score on scope resolution of 92.36 on the Sherlock dataset, 95.68 on the BioScope Abstracts subcorpus, 91.24 on the BioScope Full Papers subcorpus, 90.95 on the SFU Review Corpus, outperforming the previous state-of-the-art systems by a significant margin. We also analyze the model's generalizability to datasets on which it is not trained.", "keyphrases": ["negation detection", "scope resolution", "bioscope corpus", "transfer learning model", "negbert"]} +{"id": "wang-potts-2019-talkdown", "title": "TalkDown: A Corpus for Condescension Detection in Context", "abstract": "Condescending language use is caustic; it can bring dialogues to an end and bifurcate communities. Thus, systems for condescension detection could have a large positive impact. A challenge here is that condescension is often impossible to detect from isolated utterances, as it depends on the discourse and social context. To address this, we present TalkDown, a new labeled dataset of condescending linguistic acts in context. We show that extending a language-only model with representations of the discourse improves performance, and we motivate techniques for dealing with the low rates of condescension overall. We also use our model to estimate condescension rates in various online communities and relate these differences to differing community norms.", "keyphrases": ["condescension detection", "community", "linguistic act", "talkdown", "social medium message"]} +{"id": "ando-2006-applying", "title": "Applying Alternating Structure Optimization to Word Sense Disambiguation", "abstract": "This paper presents a new application of the recently proposed machine learning method Alternating Structure Optimization (ASO), to word sense disambiguation (WSD). Given a set of WSD problems and their respective labeled examples, we seek to improve overall performance on that set by using all the labeled examples (irrespective of target words) for the entire set in learning a disambiguator for each individual problem. Thus, in effect, on each individual problem (e.g., disambiguation of \"art\") we benefit from training examples for other problems (e.g., disambiguation of \"bar\", \"canal\", and so forth). We empirically study the effective use of ASO for this purpose in the multitask and semi-supervised learning configurations. Our performance results rival or exceed those of the previous best systems on several Senseval lexical sample task data sets.", "keyphrases": ["alternating structure optimization", "word sense disambiguation", "wsd"]} +{"id": "gardner-etal-2020-evaluating", "title": "Evaluating Models' Local Decision Boundaries via Contrast Sets", "abstract": "Standard test sets for supervised learning evaluate in-distribution generalization. Unfortunately, when a dataset has systematic gaps (e.g., annotation artifacts), these evaluations are misleading: a model can learn simple decision rules that perform well on the test set but do not capture the abilities a dataset is intended to test. We propose a more rigorous annotation paradigm for NLP that helps to close systematic gaps in the test data. In particular, after a dataset is constructed, we recommend that the dataset authors manually perturb the test instances in small but meaningful ways that (typically) change the gold label, creating contrast sets. Contrast sets provide a local view of a model's decision boundary, which can be used to more accurately evaluate a model's true linguistic capabilities. We demonstrate the efficacy of contrast sets by creating them for 10 diverse NLP datasets (e.g., DROP reading comprehension, UD parsing, and IMDb sentiment analysis). Although our contrast sets are not explicitly adversarial, model performance is significantly lower on them than on the original test sets\u2014up to 25% in some cases. We release our contrast sets as new evaluation benchmarks and encourage future dataset construction efforts to follow similar annotation processes.", "keyphrases": ["decision boundary", "contrast set", "gold label", "model evaluation"]} +{"id": "ran-etal-2019-numnet", "title": "NumNet: Machine Reading Comprehension with Numerical Reasoning", "abstract": "Numerical reasoning, such as addition, subtraction, sorting and counting is a critical skill in human's reading comprehension, which has not been well considered in existing machine reading comprehension (MRC) systems. To address this issue, we propose a numerical MRC model named as NumNet, which utilizes a numerically-aware graph neural network to consider the comparing information and performs numerical reasoning over numbers in the question and passage. Our system achieves an EM-score of 64.56% on the DROP dataset, outperforming all existing machine reading comprehension models by considering the numerical relations among numbers.", "keyphrases": ["machine reading comprehension", "numerical reasoning", "drop", "mathematical problem"]} +{"id": "zhao-etal-2019-moverscore", "title": "MoverScore: Text Generation Evaluating with Contextualized Embeddings and Earth Mover Distance", "abstract": "A robust evaluation metric has a profound impact on the development of text generation systems. A desirable metric compares system output against references based on their semantics rather than surface forms. In this paper we investigate strategies to encode system and reference texts to devise a metric that shows a high correlation with human judgment of text quality. We validate our new metric, namely MoverScore, on a number of text generation tasks including summarization, machine translation, image captioning, and data-to-text generation, where the outputs are produced by a variety of neural and non-neural systems. Our findings suggest that metrics combining contextualized representations with a distance measure perform the best. Such metrics also demonstrate strong generalization capability across tasks. For ease-of-use we make our metrics available as web service.", "keyphrases": ["contextual embedding", "text generation task", "summarization", "moverscore"]} +{"id": "rei-yannakoudakis-2016-compositional", "title": "Compositional Sequence Labeling Models for Error Detection in Learner Writing", "abstract": "In this paper, we present the first experiments using neural network models for the task of error detection in learner writing. We perform a systematic comparison of alternative compositional architectures and propose a framework for error detection based on bidirectional LSTMs. Experiments on the CoNLL-14 shared task dataset show the model is able to outperform other participants on detecting errors in learner writing. Finally, the model is integrated with a publicly deployed self-assessment system, leading to performance comparable to human annotators.", "keyphrases": ["error detection", "learner writing", "compositional architecture", "bi-lstm"]} +{"id": "mrksic-etal-2017-neural", "title": "Neural Belief Tracker: Data-Driven Dialogue State Tracking", "abstract": "One of the core components of modern spoken dialogue systems is the belief tracker, which estimates the user's goal at every step of the dialogue. However, most current approaches have difficulty scaling to larger, more complex dialogue domains. This is due to their dependency on either: a) Spoken Language Understanding models that require large amounts of annotated training data; or b) hand-crafted lexicons for capturing some of the linguistic variation in users' language. We propose a novel Neural Belief Tracking (NBT) framework which overcomes these problems by building on recent advances in representation learning. NBT models reason over pre-trained word vectors, learning to compose them into distributed representations of user utterances and dialogue context. Our evaluation on two datasets shows that this approach surpasses past limitations, matching the performance of state-of-the-art models which rely on hand-crafted semantic lexicons and outperforming them when such lexicons are not provided.", "keyphrases": ["dialogue state tracking", "pre-trained word vector", "user utterance", "neural belief tracker", "ontology"]} +{"id": "zampieri-etal-2017-findings", "title": "Findings of the VarDial Evaluation Campaign 2017", "abstract": "We present the results of the VarDial Evaluation Campaign on Natural Language Processing (NLP) for Similar Languages, Varieties and Dialects, which we organized as part of the fourth edition of the VarDial workshop at EACL'2017. This year, we included four shared tasks: Discriminating between Similar Languages (DSL), Arabic Dialect Identification (ADI), German Dialect Identification (GDI), and Cross-lingual Dependency Parsing (CLP). A total of 19 teams submitted runs across the four tasks, and 15 of them wrote system description papers.", "keyphrases": ["vardial evaluation campaign", "identification", "team", "acoustic feature", "ili"]} +{"id": "vajjala-meurers-2012-improving", "title": "On Improving the Accuracy of Readability Classification using Insights from Second Language Acquisition", "abstract": "We investigate the problem of readability assessment using a range of lexical and syntactic features and study their impact on predicting the grade level of texts. As empirical basis, we combined two web-based text sources, Weekly Reader and BBC Bitesize, targeting different age groups, to cover a broad range of school grades. On the conceptual side, we explore the use of lexical and syntactic measures originally designed to measure language development in the production of second language learners. We show that the developmental measures from Second Language Acquisition (SLA) research when combined with traditional readability features such as word length and sentence length provide a good indication of text readability across different grades. The resulting classifiers significantly outperform the previous approaches on readability classification, reaching a classification accuracy of 93.3%.", "keyphrases": ["readability classification", "second language acquisition", "syntactic feature"]} +{"id": "spiegler-monson-2010-emma", "title": "EMMA: A novel Evaluation Metric for Morphological Analysis", "abstract": "We present a novel Evaluation Metric for Morphological Analysis (EMMA) that is both linguistically appealing and empirically sound. EMMA uses a graphbased assignment algorithm, optimized via integer linear programming, to match morphemes of predicted word analyses to the analyses of a morphologically rich answer key. This is necessary especially for unsupervised morphology analysis systems which do not have access to linguistically motivated morpheme labels. Across 3 languages, EMMA scores of 14 systems have a substantially greater positive correlation with mean average precision in an information retrieval (IR) task than do scores from the metric currently used by the Morpho Challenge (MC) competition series. We compute EMMA and MC metric scores for 93 separate system-language pairs from the 2007, 2008, and 2009 MC competitions, demonstrating that EMMA is not susceptible to two types of gaming that have plagued recent MC competitions: Ambiguity Hijacking and Shared Morpheme Padding. The EMMA evaluation script is publicly available from http://www.cs.bris.ac.uk/ Research/MachineLearning/ Morphology/Resources/.", "keyphrases": ["evaluation metric", "morphological analysis", "emma"]} +{"id": "yokoi-etal-2020-word", "title": "Word Rotator's Distance", "abstract": "One key principle for assessing textual similarity is measuring the degree of semantic overlap between texts by considering the word alignment. Such alignment-based approaches are both intuitive and interpretable; however, they are empirically inferior to the simple cosine similarity between general-purpose sentence vectors. We focus on the fact that the norm of word vectors is a good proxy for word importance, and the angle of them is a good proxy for word similarity. However, alignment-based approaches do not distinguish the norm and direction, whereas sentence-vector approaches automatically use the norm as the word importance. Accordingly, we propose decoupling word vectors into their norm and direction then computing the alignment-based similarity with the help of earth mover's distance (optimal transport), which we refer to as word rotator's distance. Furthermore, we demonstrate how to grow the norm and direction of word vectors (vector converter); this is a new systematic approach derived from the sentence-vector estimation methods, which can significantly improve the performance of the proposed method. On several STS benchmarks, the proposed methods outperform not only alignment-based approaches but also strong baselines. The source code is avaliable at ", "keyphrases": ["distance", "word vector", "word rotator", "wmd"]} +{"id": "newman-etal-2010-automatic", "title": "Automatic Evaluation of Topic Coherence", "abstract": "This paper introduces the novel task of topic coherence evaluation, whereby a set of words, as generated by a topic model, is rated for coherence or interpretability. We apply a range of topic scoring models to the evaluation task, drawing on WordNet, Wikipedia and the Google search engine, and existing research on lexical similarity/relatedness. In comparison with human scores for a set of learned topics over two distinct datasets, we show a simple co-occurrence measure based on pointwise mutual information over Wikipedia data is able to achieve results for the task at or nearing the level of inter-annotator correlation, and that other Wikipedia-based lexical relatedness methods also achieve strong results. Google produces strong, if less consistent, results, while our results over WordNet are patchy at best.", "keyphrases": ["topic coherence", "wikipedia", "mutual information", "automatic evaluation", "pmi"]} +{"id": "joshi-etal-2019-bert", "title": "BERT for Coreference Resolution: Baselines and Analysis", "abstract": "We apply BERT to coreference resolution, achieving a new state of the art on the GAP (+11.5 F1) and OntoNotes (+3.9 F1) benchmarks. A qualitative analysis of model predictions indicates that, compared to ELMo and BERT-base, BERT-large is particularly better at distinguishing between related but distinct entities (e.g., President and CEO), but that there is still room for improvement in modeling document-level context, conversations, and mention paraphrasing. We will release all code and trained models upon publication.", "keyphrases": ["coreference resolution", "bert", "mention detection", "co-reference resolution", "span representation"]} +{"id": "prasad-etal-2006-attribution", "title": "Attribution and its annotation in the Penn Discourse TreeBank", "abstract": "In this paper, we describe an annotation scheme for the attribution of abstract objects (propositions, facts, and eventualities) associated with discourse relations and their arguments annotated in the Penn Discourse TreeBank. The scheme aims to capture both the source and degrees of factuality of the abstract objects through the annotation of text spans signalling the attribution, and of features recording the source, type, scopal polarity, and determinacy of attribution. RESUME. Dans cet article, nous decrivons un schema d\u2019annotation pour l\u2019encodage des objets abstraits (propositions, faits et possibilites) associes aux relations de discours et a leurs arguments tels qu\u2019annotes dans le Penn Discourse TreeBank. Ce schema a pour objet la capture de la source et du degre de factualite des objets abstraits. Les aspects cles de ce schema comprennent l\u2019annotation des intervalles textuels signalant l\u2019attribution, ainsi que l\u2019annotation des proprietes caracterisant la source, le type, la polarite de la portee, et le degre de determination de l\u2019attribution.", "keyphrases": ["penn discourse treebank", "annotation scheme", "attribution"]} +{"id": "li-etal-2009-report", "title": "Report of NEWS 2009 Machine Transliteration Shared Task", "abstract": "This report documents the details of the Machine Transliteration Shared Task conducted as a part of the Named Entities Workshop (NEWS), an ACL-IJCNLP 2009 workshop. The shared task features machine transliteration of proper names from English to a set of languages. This shared task has witnessed enthusiastic participation of 31 teams from all over the world, with diversity of participation for a given system and wide coverage for a given language pair (more than a dozen participants per language pair). Diverse transliteration methodologies are represented adequately in the shared task for a given language pair, thus underscoring the fact that the workshop may truly indicate the state of the art in machine transliteration in these language pairs. We measure and report 6 performance metrics on the submitted results. We believe that the shared task has successfully achieved the following objectives: (i) bringing together the community of researchers in the area of Machine Transliteration to focus on various research avenues, (ii) Calibrating systems on common corpora, using common metrics, thus creating a reasonable baseline for the state-of-the-art of transliteration systems, and (iii) providing a quantitative basis for meaningful comparison and analysis between various algorithmic approaches used in machine transliteration. We believe that the results of this shared task would uncover a host of interesting research problems, giving impetus to research in this significant research area.", "keyphrases": ["transliteration", "named entities workshop", "news"]} +{"id": "dong-etal-2018-banditsum", "title": "BanditSum: Extractive Summarization as a Contextual Bandit", "abstract": "In this work, we propose a novel method for training neural networks to perform single-document extractive summarization without heuristically-generated extractive labels. We call our approach BanditSum as it treats extractive summarization as a contextual bandit (CB) problem, where the model receives a document to summarize (the context), and chooses a sequence of sentences to include in the summary (the action). A policy gradient reinforcement learning algorithm is used to train the model to select sequences of sentences that maximize ROUGE score. We perform a series of experiments demonstrating that BanditSum is able to achieve ROUGE scores that are better than or comparable to the state-of-the-art for extractive summarization, and converges using significantly fewer update steps than competing approaches. In addition, we show empirically that BanditSum performs significantly better than competing approaches when good summary sentences appear late in the source document.", "keyphrases": ["extractive summarization", "contextual bandit", "action", "rouge score", "source document"]} +{"id": "spitkovsky-etal-2011-punctuation", "title": "Punctuation: Making a Point in Unsupervised Dependency Parsing", "abstract": "We show how punctuation can be used to improve unsupervised dependency parsing. Our linguistic analysis confirms the strong connection between English punctuation and phrase boundaries in the Penn Treebank. However, approaches that naively include punctuation marks in the grammar (as if they were words) do not perform well with Klein and Manning's Dependency Model with Valence (DMV). Instead, we split a sentence at punctuation and impose parsing restrictions over its fragments. Our grammar inducer is trained on the Wall Street Journal (WSJ) and achieves 59.5% accuracy out-of-domain (Brown sentences with 100 or fewer words), more than 6% higher than the previous best results. Further evaluation, using the 2006/7 CoNLL sets, reveals that punctuation aids grammar induction in 17 of 18 languages, for an overall average net gain of 1.3%. Some of this improvement is from training, but more than half is from parsing with induced constraints, in inference. Punctuation-aware decoding works with existing (even already-trained) parsing models and always increased accuracy in our experiments.", "keyphrases": ["unsupervised dependency parsing", "fragment", "punctuation"]} +{"id": "berant-liang-2014-semantic", "title": "Semantic Parsing via Paraphrasing", "abstract": "A central challenge in semantic parsing is handling the myriad ways in which knowledge base predicates can be expressed. Traditionally, semantic parsers are trained primarily from text paired with knowledge base information. Our goal is to exploit the much larger amounts of raw text not tied to any knowledge base. In this paper, we turn semantic parsing on its head. Given an input utterance, we first use a simple method to deterministically generate a set of candidate logical forms with a canonical realization in natural language for each. Then, we use a paraphrase model to choose the realization that best paraphrases the input, and output the corresponding logical form. We present two simple paraphrase models, an association model and a vector space model, and train them jointly from question-answer pairs. Our system PARASEMPRE improves stateof-the-art accuracies on two recently released question-answering datasets.", "keyphrases": ["knowledge base", "semantic parsing", "freebase", "natural language question", "language utterance"]} +{"id": "das-etal-2014-frame", "title": "Frame-Semantic Parsing", "abstract": "Frame semantics is a linguistic theory that has been instantiated for English in the FrameNet lexicon. We solve the problem of frame-semantic parsing using a two-stage statistical model that takes lexical targets (i.e., content words and phrases) in their sentential contexts and predicts frame-semantic structures. Given a target in context, the first stage disambiguates it to a semantic frame. This model uses latent variables and semi-supervised learning to improve frame disambiguation for targets unseen at training time. The second stage finds the target's locally expressed semantic arguments. At inference time, a fast exact dual decomposition algorithm collectively predicts all the arguments of a frame at once in order to respect declaratively stated linguistic constraints, resulting in qualitatively better structures than na\u00efve local predictors. Both components are feature-based and discriminatively trained on a small set of annotated frame-semantic parses. On the SemEval 2007 benchmark data set, the approach, along with a heuristic identifier of frame-evoking targets, outperforms the prior state of the art by significant margins. Additionally, we present experiments on the much larger FrameNet 1.5 data set. We have released our frame-semantic parser as open-source software.", "keyphrases": ["frame", "frame-semantic parsing", "semafor", "role labeling", "argument identification"]} +{"id": "nangia-etal-2020-crows", "title": "CrowS-Pairs: A Challenge Dataset for Measuring Social Biases in Masked Language Models", "abstract": "Pretrained language models, especially masked language models (MLMs) have seen success across many NLP tasks. However, there is ample evidence that they use the cultural biases that are undoubtedly present in the corpora they are trained on, implicitly creating harm with biased representations. To measure some forms of social bias in language models against protected demographic groups in the US, we introduce the Crowdsourced Stereotype Pairs benchmark (CrowS-Pairs). CrowS-Pairs has 1508 examples that cover stereotypes dealing with nine types of bias, like race, religion, and age. In CrowS-Pairs a model is presented with two sentences: one that is more stereotyping and another that is less stereotyping. The data focuses on stereotypes about historically disadvantaged groups and contrasts them with advantaged groups. We find that all three of the widely-used MLMs we evaluate substantially favor sentences that express stereotypes in every category in CrowS-Pairs. As work on building less biased models advances, this dataset can be used as a benchmark to evaluate progress.", "keyphrases": ["language model", "social bias", "crows-pair", "stereotypical sentence"]} +{"id": "gusev-etal-2011-using", "title": "Using Query Patterns to Learn the Duration of Events", "abstract": "We present the first approach to learning the durations of events without annotated training data, employing web query patterns to infer duration distributions. For example, we learn that \"war\" lasts years or decades, while \"look\" lasts seconds or minutes. Learning aspectual information is an important goal for computational semantics and duration information may help enable rich document understanding. We first describe and improve a supervised baseline that relies on event duration annotations. We then show how web queries for linguistic patterns can help learn the duration of events without labeled data, producing fine-grained duration judgments that surpass the supervised system. We evaluate on the TimeBank duration corpus, and also investigate how an event's participants (arguments) effect its duration using a corpus collected through Amazon's Mechanical Turk. We make available a new database of events and their duration distributions for use in research involving the temporal and aspectual properties of events.", "keyphrases": ["query pattern", "duration", "full-length weblog"]} +{"id": "schmidt-2014-database", "title": "The Database for Spoken German \u2014 DGD2", "abstract": "The Database for Spoken German (Datenbank f\u00fcr Gesprochenes Deutsch, DGD2, ) is the central platform for publishing and disseminating spoken language corpora from the Archive of Spoken German (Archiv f\u00fcr Gesprochenes Deutsch, AGD, ) at the Institute for the German Language in Mannheim. The corpora contained in the DGD2 come from a variety of sources, some of them in-house projects, some of them external projects. Most of the corpora were originally intended either for research into the (dialectal) variation of German or for studies in conversation analysis and related fields. The AGD has taken over the task of permanently archiving these resources and making them available for reuse to the research community. To date, the DGD2 offers access to 19 different corpora, totalling around 9000 speech events, 2500 hours of audio recordings or 8 million transcribed words. This paper gives an overview of the data made available via the DGD2, of the technical basis for its implementation, and of the most important functionalities it offers. The paper concludes with information about the users of the database and future plans for its development.", "keyphrases": ["database", "spoken german", "dgd2"]} +{"id": "ott-etal-2011-finding", "title": "Finding Deceptive Opinion Spam by Any Stretch of the Imagination", "abstract": "Consumers increasingly rate, review and research products online (Jansen, 2010; Litvin et al., 2008). Consequently, websites containing consumer reviews are becoming targets of opinion spam. While recent work has focused primarily on manually identifiable instances of opinion spam, in this work we study deceptive opinion spam---fictitious opinions that have been deliberately written to sound authentic. Integrating work from psychology and computational linguistics, we develop and compare three approaches to detecting deceptive opinion spam, and ultimately develop a classifier that is nearly 90% accurate on our gold-standard opinion spam dataset. Based on feature analysis of our learned models, we additionally make several theoretical contributions, including revealing a relationship between deceptive opinions and imaginative writing.", "keyphrases": ["deceptive opinion spam", "fake hotel review", "news", "reputation"]} +{"id": "mubarak-darwish-2014-using", "title": "Using Twitter to Collect a Multi-Dialectal Corpus of Arabic", "abstract": "This paper describes the collection and classification of a multi-dialectal corpus of Arabic based on the geographical information of tweets. We mapped information of user locations to one of the Arab countries, and extracted tweets that have dialectal word(s). Manual evaluation of the extracted corpus shows that the accuracy of assignment of tweets to some countries (like Saudi Arabia and Egypt) is above 93% while the accuracy for other countries, such Algeria and Syria is below 70%.", "keyphrases": ["twitter", "multi-dialectal corpus", "arab country"]} +{"id": "goldwater-griffiths-2007-fully", "title": "A fully Bayesian approach to unsupervised part-of-speech tagging", "abstract": "Unsupervised learning of linguistic structure is a difficult problem. A common approach is to define a generative model and maximize the probability of the hidden structure given the observed data. Typically, this is done using maximum-likelihood estimation (MLE) of the model parameters. We show using part-of-speech tagging that a fully Bayesian approach can greatly improve performance. Rather than estimating a single set of parameters, the Bayesian approach integrates over all possible parameter values. This difference ensures that the learned structure will have high probability over a range of possible parameters, and permits the use of priors favoring the sparse distributions that are typical of natural language. Our model has the structure of a standard trigram HMM, yet its accuracy is closer to that of a state-of-the-art discriminative model (Smith and Eisner, 2005), up to 14 percentage points better than MLE. We find improvements both when training from data alone, and using a tagging dictionary.", "keyphrases": ["bayesian approach", "tagging", "unsupervised learning", "bhmm", "markov model"]} +{"id": "filippova-etal-2015-sentence", "title": "Sentence Compression by Deletion with LSTMs", "abstract": "We present an LSTM approach to deletion-based sentence compression where the task is to translate a sentence into a sequence of zeros and ones, corresponding to token deletion decisions. We demonstrate that even the most basic version of the system, which is given no syntactic information (no PoS or NE tags, or dependencies) or desired compression length, performs surprisingly well: around 30% of the compressions from a large test set could be regenerated. We compare the LSTM system with a competitive baseline which is trained on the same amount of data but is additionally provided with all kinds of linguistic features. In an experiment with human raters the LSTMbased model outperforms the baseline achieving 4.5 in readability and 3.8 in informativeness.", "keyphrases": ["deletion", "sentence compression", "language generation task", "seq2seq", "sequence-to-sequence"]} +{"id": "denero-klein-2010-discriminative", "title": "Discriminative Modeling of Extraction Sets for Machine Translation", "abstract": "We present a discriminative model that directly predicts which set of phrasal translation rules should be extracted from a sentence pair. Our model scores extraction sets: nested collections of all the overlapping phrase pairs consistent with an underlying word alignment. Extraction set models provide two principle advantages over word-factored alignment models. First, we can incorporate features on phrase pairs, in addition to word links. Second, we can optimize for an extraction-based loss function that relates directly to the end task of generating translations. Our model gives improvements in alignment quality relative to state-of-the-art unsupervised and supervised baselines, as well as providing up to a 1.4 improvement in BLEU score in Chinese-to-English translation experiments.", "keyphrases": ["extraction set", "word alignment", "loss function"]} +{"id": "hu-etal-2016-harnessing", "title": "Harnessing Deep Neural Networks with Logic Rules", "abstract": "Combining deep neural networks with structured logic rules is desirable to harness flexibility and reduce uninterpretability of the neural models. We propose a general framework capable of enhancing various types of neural networks (e.g., CNNs and RNNs) with declarative first-order logic rules. Specifically, we develop an iterative distillation method that transfers the structured information of logic rules into the weights of neural networks. We deploy the framework on a CNN for sentiment analysis, and an RNN for named entity recognition. With a few highly intuitive rules, we obtain substantial improvements and achieve state-of-the-art or comparable results to previous best-performing systems.", "keyphrases": ["deep neural network", "logic rule", "various type", "weight", "entity recognition"]} +{"id": "cer-etal-2018-universal", "title": "Universal Sentence Encoder for English", "abstract": "We present easy-to-use TensorFlow Hub sentence embedding models having good task transfer performance. Model variants allow for trade-offs between accuracy and compute resources. We report the relationship between model complexity, resources, and transfer performance. Comparisons are made with baselines without transfer learning and to baselines that incorporate word-level transfer. Transfer learning using sentence-level embeddings is shown to outperform models without transfer learning and often those that use only word-level transfer. We show good transfer task performance with minimal training data and obtain encouraging results on word embedding association tests (WEAT) of model bias.", "keyphrases": ["transfer learning", "universal sentence encoder", "other nlp task", "dozen", "semantic similarity"]} +{"id": "liu-etal-2016-neural", "title": "Neural Machine Translation with Supervised Attention", "abstract": "The attention mechanism is appealing for neural machine translation, since it is able to dynamically encode a source sentence by generating a alignment between a target word and source words. Unfortunately, it has been proved to be worse than conventional alignment models in alignment accuracy. In this paper, we analyze and explain this issue from the point view of reordering, and propose a supervised attention which is learned with guidance from conventional alignment models. Experiments on two Chinese-to-English translation tasks show that the supervised attention mechanism yields better alignments leading to substantial gains over the standard attention based NMT.", "keyphrases": ["alignment model", "neural machine translation", "attention model"]} +{"id": "jamshid-lou-etal-2018-disfluency", "title": "Disfluency Detection using Auto-Correlational Neural Networks", "abstract": "In recent years, the natural language processing community has moved away from task-specific feature engineering, i.e., researchers discovering ad-hoc feature representations for various tasks, in favor of general-purpose methods that learn the input representation by themselves. However, state-of-the-art approaches to disfluency detection in spontaneous speech transcripts currently still depend on an array of hand-crafted features, and other representations derived from the output of pre-existing systems such as language models or dependency parsers. As an alternative, this paper proposes a simple yet effective model for automatic disfluency detection, called an auto-correlational neural network (ACNN). The model uses a convolutional neural network (CNN) and augments it with a new auto-correlation operator at the lowest layer that can capture the kinds of \u201crough copy\u201d dependencies that are characteristic of repair disfluencies in speech. In experiments, the ACNN model outperforms the baseline CNN on a disfluency detection task with a 5% increase in f-score, which is close to the previous best result on this task.", "keyphrases": ["auto-correlation operator", "disfluency detection", "human-annotated corpora"]} +{"id": "rubin-etal-2016-fake", "title": "Fake News or Truth? Using Satirical Cues to Detect Potentially Misleading News", "abstract": "Satire is an attractive subject in deception detection research: it is a type of deception that intentionally incorporates cues revealing its own deceptiveness. Whereas other types of fabrications aim to instill a false sense of truth in the reader, a successful satirical hoax must eventually be exposed as a jest. This paper provides a conceptual overview of satire and humor, elaborating and illustrating the unique features of satirical news, which mimics the format and style of journalistic reporting. Satirical news stories were carefully matched and examined in contrast with their legitimate news counterparts in 12 contemporary news topics in 4 domains (civics, science, business, and \u201csoft\u201d news). Building on previous work in satire detection, we proposed an SVMbased algorithm, enriched with 5 predictive features (Absurdity, Humor, Grammar, Negative Affect, and Punctuation) and tested their combinations on 360 news articles. Our best predicting feature combination (Absurdity, Grammar and Punctuation) detects satirical news with a 90% precision and 84% recall (F-score=87%). Our work in algorithmically identifying satirical news pieces can aid in minimizing the potential deceptive impact of satire.", "keyphrases": ["satire", "news", "journalistic reporting", "deceptive content", "social medium"]} +{"id": "carlos-yalamanchi-2012-intention", "title": "Intention Analysis for Sales, Marketing and Customer Service", "abstract": "In recent years, social media has become a customer touch-point for the business functions of marketing, sales and customer service. We aim to show that intention analysis might be useful to these business functions and that it can be performed effectively on short texts (at the granularity level of a single sentence). We demonstrate a scheme of categorization of intentions that is amenable to automation using simple machine learning techniques that are language-independent. We discuss the grounding that this scheme of categorization has in speech act theory. In the demonstration we go over a number of usage scenarios in an attempt to show that the use of automatic intention detection tools would benefit the business functions of sales, marketing and service. We also show that social media can be used not just to convey pleasure or displeasure (that is, to express sentiment) but also to discuss personal needs and to report problems (to express intentions). We evaluate methods for automatically discovering intentions in text, and establish that it is possible to perform intention analysis on social media with an accuracy of 66.97%\u00b10.10%.", "keyphrases": ["marketing", "customer service", "intention analysis"]} +{"id": "lapata-keller-2004-web", "title": "The Web as a Baseline: Evaluating the Performance of Unsupervised Web-based Models for a Range of NLP Tasks", "abstract": "Previous work demonstrated that web counts can be used to approximate bigram frequencies, and thus should be useful for a wide variety of NLP tasks. So far, only two generation tasks (candidate selection for machine translation and confusion-set disambiguation) have been tested using web-scale data sets. The present paper investigates if these results generalize to tasks covering both syntax and semantics, both generation and analysis, and a larger range of n-grams. For the majority of tasks, we find that simple, unsupervised models perform better when n-gram frequencies are obtained from the web rather than from a large corpus. However, in most cases, web-based models fail to outperform more sophisticated state-of-theart models trained on small corpora. We argue that web-based models should therefore be used as a baseline for, rather than an alternative to, standard models.", "keyphrases": ["web", "unsupervised model", "n-gram frequency", "noun compound"]} +{"id": "wang-etal-2018-switchout", "title": "SwitchOut: an Efficient Data Augmentation Algorithm for Neural Machine Translation", "abstract": "In this work, we examine methods for data augmentation for text-based tasks such as neural machine translation (NMT). We formulate the design of a data augmentation policy with desirable properties as an optimization problem, and derive a generic analytic solution. This solution not only subsumes some existing augmentation schemes, but also leads to an extremely simple data augmentation strategy for NMT: randomly replacing words in both the source sentence and the target sentence with other random words from their corresponding vocabularies. We name this method SwitchOut. Experiments on three translation datasets of different scales show that SwitchOut yields consistent improvements of about 0.5 BLEU, achieving better or comparable performances to strong alternatives such as word dropout (Sennrich et al., 2016a). Code to implement this method is included in the appendix.", "keyphrases": ["data augmentation", "neural machine translation", "other random word"]} +{"id": "battisti-etal-2020-corpus", "title": "A Corpus for Automatic Readability Assessment and Text Simplification of German", "abstract": "In this paper, we present a corpus for use in automatic readability assessment and automatic text simplification for German, the first of its kind for this language. The corpus is compiled from web sources and consists of parallel as well as monolingual-only (simplified German) data amounting to approximately 6,200 documents (nearly 211,000 sentences). As a unique feature, the corpus contains information on text structure (e.g., paragraphs, lines), typography (e.g., font type, font style), and images (content, position, and dimensions). While the importance of considering such information in machine learning tasks involving simplified language, such as readability assessment, has repeatedly been stressed in the literature, we provide empirical evidence for its benefit. We also demonstrate the added value of leveraging monolingual-only data for automatic text simplification via machine translation through applying back-translation, a data augmentation technique.", "keyphrases": ["automatic readability assessment", "text simplification", "german", "monolingual-only data"]} +{"id": "ruder-plank-2017-learning", "title": "Learning to select data for transfer learning with Bayesian Optimization", "abstract": "Domain similarity measures can be used to gauge adaptability and select suitable data for transfer learning, but existing approaches define ad hoc measures that are deemed suitable for respective tasks. Inspired by work on curriculum learning, we propose to learn data selection measures using Bayesian Optimization and evaluate them across models, domains and tasks. Our learned measures outperform existing domain similarity measures significantly on three tasks: sentiment analysis, part-of-speech tagging, and parsing. We show the importance of complementing similarity with diversity, and that learned measures are\u2013to some degree\u2013transferable across models, domains, and even tasks.", "keyphrases": ["transfer learning", "bayesian optimization", "target data"]} +{"id": "hirao-etal-2013-single", "title": "Single-Document Summarization as a Tree Knapsack Problem", "abstract": "Recent studies on extractive text summarization formulate it as a combinatorial optimization problem such as a Knapsack Problem, a Maximum Coverage Problem or a Budgeted Median Problem. These methods successfully improved summarization quality, but they did not consider the rhetorical relations between the textual units of a source document. Thus, summaries generated by these methods may lack logical coherence. This paper proposes a single document summarization method based on the trimming of a discourse tree. This is a two-fold process. First, we propose rules for transforming a rhetorical structure theorybased discourse tree into a dependency-based discourse tree, which allows us to take a treetrimming approach to summarization. Second, we formulate the problem of trimming a dependency-based discourse tree as a Tree Knapsack Problem, then solve it with integer linear programming (ILP). Evaluation results showed that our method improved ROUGE scores.", "keyphrases": ["summarization", "tree knapsack problem", "discourse tree", "rst tree"]} +{"id": "kocmi-bojar-2017-curriculum", "title": "Curriculum Learning and Minibatch Bucketing in Neural Machine Translation", "abstract": "We examine the effects of particular orderings of sentence pairs on the on-line training of neural machine translation (NMT). We focus on two types of such orderings: (1) ensuring that each minibatch contains sentences similar in some aspect and (2) gradual inclusion of some sentence types as the training progresses (so called \u201ccurriculum learning\u201d). In our English-to-Czech experiments, the internal homogeneity of minibatches has no effect on the training but some of our \u201ccurricula\u201d achieve a small improvement over the baseline.", "keyphrases": ["neural machine translation", "curriculum", "training example", "sentence length"]} +{"id": "fisch-etal-2019-mrqa", "title": "MRQA 2019 Shared Task: Evaluating Generalization in Reading Comprehension", "abstract": "We present the results of the Machine Reading for Question Answering (MRQA) 2019 shared task on evaluating the generalization capabilities of reading comprehension systems. In this task, we adapted and unified 18 distinct question answering datasets into the same format. Among them, six datasets were made available for training, six datasets were made available for development, and the rest were hidden for final evaluation. Ten teams submitted systems, which explored various ideas including data sampling, multi-task learning, adversarial training and ensembling. The best system achieved an average F1 score of 72.5 on the 12 held-out datasets, 10.7 absolute points higher than our initial baseline based on BERT.", "keyphrases": ["generalization", "question answering", "mrqa"]} +{"id": "ng-low-2004-chinese", "title": "Chinese Part-of-Speech Tagging: One-at-a-Time or All-at-Once? Word-Based or Character-Based?", "abstract": "Chinese part-of-speech (POS) tagging assigns one POS tag to each word in a Chinese sentence. However, since words are not demarcated in a Chinese sentence, Chinese POS tagging requires word segmentation as a prerequisite. We could perform Chinese POS tagging strictly after word segmentation (one-at-a-time approach), or perform both word segmentation and POS tagging in a combined, single step simultaneously (all-atonce approach). Also, we could choose to assign POS tags on a word-by-word basis, making use of word features in the surrounding context (word-based), or on a character-by-character basis with character features (character-based). This paper presents an in-depth study on such issues of processing architecture and feature representation for Chinese POS tagging, within a maximum entropy framework. We found that while the all-at-once, characterbased approach is the best, the one-at-a-time, character-based approach is a worthwhile compromise, performing only slightly worse in terms of accuracy, but taking shorter time to train and run. As part of our investigation, we also built a state-of-the-art Chinese word segmenter, which outperforms the best SIGHAN 2003 word segmenters in the closed track on 3 out of 4 test corpora.", "keyphrases": ["all-at-once", "pos tagging", "word segmentation"]} +{"id": "karpukhin-etal-2020-dense", "title": "Dense Passage Retrieval for Open-Domain Question Answering", "abstract": "Open-domain question answering relies on efficient passage retrieval to select candidate contexts, where traditional sparse vector space models, such as TF-IDF or BM25, are the de facto method. In this work, we show that retrieval can be practically implemented using dense representations alone, where embeddings are learned from a small number of questions and passages by a simple dual-encoder framework. When evaluated on a wide range of open-domain QA datasets, our dense retriever outperforms a strong Lucene-BM25 system greatly by 9%-19% absolute in terms of top-20 passage retrieval accuracy, and helps our end-to-end QA system establish new state-of-the-art on multiple open-domain QA benchmarks.", "keyphrases": ["open-domain question", "dense passage retrieval", "knowledge source", "cross-encoder", "in-batch negative"]} +{"id": "ye-ling-2019-multi", "title": "Multi-Level Matching and Aggregation Network for Few-Shot Relation Classification", "abstract": "This paper presents a multi-level matching and aggregation network (MLMAN) for few-shot relation classification. Previous studies on this topic adopt prototypical networks, which calculate the embedding vector of a query instance and the prototype vector of the support set for each relation candidate independently. On the contrary, our proposed MLMAN model encodes the query instance and each support set in an interactive way by considering their matching information at both local and instance levels. The final class prototype for each support set is obtained by attentive aggregation over the representations of support instances, where the weights are calculated using the query instance. Experimental results demonstrate the effectiveness of our proposed methods, which achieve a new state-of-the-art performance on the FewRel dataset.", "keyphrases": ["aggregation network", "relation classification", "prototypical network", "multi-level matching"]} +{"id": "casacuberta-vidal-2004-machine", "title": "Machine Translation with Inferred Stochastic Finite-State Transducers", "abstract": "Finite-state transducers are models that are being used in different areas of pattern recognition and computational linguistics. One of these areas is machine translation, in which the approaches that are based on building models automatically from training examples are becoming more and more attractive. Finite-state transducers are very adequate for use in constrained tasks in which training samples of pairs of sentences are available. A technique for inferring finite-state transducers is proposed in this article. This technique is based on formal relations between finite-state transducers and rational grammars. Given a training corpus of source-target pairs of sentences, the proposed approach uses statistical alignment methods to produce a set of conventional strings from which a stochastic rational grammar (e.g., an n-gram) is inferred. This grammar is finally converted into a finite-state transducer. The proposed methods are assessed through a series of machine translation experiments within the framework of the E u Trans project.", "keyphrases": ["finite-state transducer", "transducer", "machine translation"]} +{"id": "guo-etal-2018-multi", "title": "Multi-Source Domain Adaptation with Mixture of Experts", "abstract": "We propose a mixture-of-experts approach for unsupervised domain adaptation from multiple sources. The key idea is to explicitly capture the relationship between a target example and different source domains. This relationship, expressed by a point-to-set metric, determines how to combine predictors trained on various domains. The metric is learned in an unsupervised fashion using meta-training. Experimental results on sentiment analysis and part-of-speech tagging demonstrate that our approach consistently outperforms multiple baselines and can robustly handle negative transfer.", "keyphrases": ["domain adaptation", "mixture", "meta-training", "adversarial learning"]} +{"id": "lin-etal-2012-entity", "title": "Entity Linking at Web Scale", "abstract": "This paper investigates entity linking over millions of high-precision extractions from a corpus of 500 million Web documents, toward the goal of creating a useful knowledge base of general facts. This paper is the first to report on entity linking over this many extractions, and describes new opportunities (such as corpus-level features) and challenges we found when entity linking at Web scale. We present several techniques that we developed and also lessons that we learned. We envision a future where information extraction and entity linking are paired to automatically generate knowledge bases with billions of assertions over millions of linked entities.", "keyphrases": ["web scale", "entity linking", "wikipedia"]} +{"id": "zhang-etal-2018-personalizing", "title": "Personalizing Dialogue Agents: I have a dog, do you have pets too?", "abstract": "Chit-chat models are known to have several problems: they lack specificity, do not display a consistent personality and are often not very captivating. In this work we present the task of making chit-chat more engaging by conditioning on profile information. We collect data and train models to (i)condition on their given profile information; and (ii) information about the person they are talking to, resulting in improved dialogues, as measured by next utterance prediction. Since (ii) is initially unknown our model is trained to engage its partner with personal topics, and we show the resulting dialogue can be used to predict profile information about the interlocutors.", "keyphrases": ["persona", "conversation", "dialog system", "persona-based dialogue generation"]} +{"id": "gao-etal-2019-soft", "title": "Soft Contextual Data Augmentation for Neural Machine Translation", "abstract": "While data augmentation is an important trick to boost the accuracy of deep learning methods in computer vision tasks, its study in natural language tasks is still very limited. In this paper, we present a novel data augmentation method for neural machine translation. Different from previous augmentation methods that randomly drop, swap or replace words with other words in a sentence, we softly augment a randomly chosen word in a sentence by its contextual mixture of multiple related words. More accurately, we replace the one-hot representation of a word by a distribution (provided by a language model) over the vocabulary, i.e., replacing the embedding of this word by a weighted combination of multiple semantically similar words. Since the weights of those words depend on the contextual information of the word to be replaced,the newly generated sentences capture much richer information than previous augmentation methods. Experimental results on both small scale and large scale machine translation data sets demonstrate the superiority of our method over strong baselines.", "keyphrases": ["data augmentation", "neural machine translation", "language model", "contextual information"]} +{"id": "riaz-girju-2014-recognizing", "title": "Recognizing Causality in Verb-Noun Pairs via Noun and Verb Semantics", "abstract": "Several supervised approaches have been proposed for causality identification by relying on shallow linguistic features. However, such features do not lead to improved performance. Therefore, novel sources of knowledge are required to achieve progress on this problem. In this paper, we propose a model for the recognition of causality in verb-noun pairs by employing additional types of knowledge along with linguistic features. In particular, we focus on identifying and employing semantic classes of nouns and verbs with high tendency to encode cause or non-cause relations. Our model incorporates the information about these classes to minimize errors in predictions made by a basic supervised classifier relying merely on shallow linguistic features. As compared with this basic classifier our model achieves 14.74% (29.57%) improvement in F-score (accuracy), respectively.", "keyphrases": ["causality", "noun", "semantic class"]} +{"id": "birch-etal-2016-hume", "title": "HUME: Human UCCA-Based Evaluation of Machine Translation", "abstract": "Human evaluation of machine translation normally uses sentence-level measures such as relative ranking or adequacy scales. However, these provide no insight into possible errors, and do not scale well with sentence length. We argue for a semantics-based evaluation, which captures what meaning components are retained in the MT output, thus providing a more fine-grained analysis of translation quality, and enabling the construction and tuning of semantics-based MT. We present a novel human semantic evaluation measure, Human UCCA-based MT Evaluation (HUME), building on the UCCA semantic representation scheme. HUME covers a wider range of semantic phenomena than previous methods and does not rely on semantic annotation of the potentially garbled MT output. We experiment with four language pairs, demonstrating HUME's broad applicability, and report good inter-annotator agreement rates and correlation with human adequacy scores.", "keyphrases": ["machine translation", "evaluation measure", "hume"]} +{"id": "blodgett-etal-2020-language", "title": "Language (Technology) is Power: A Critical Survey of \u201cBias\u201d in NLP", "abstract": "We survey 146 papers analyzing \u201cbias\u201d in NLP systems, finding that their motivations are often vague, inconsistent, and lacking in normative reasoning, despite the fact that analyzing \u201cbias\u201d is an inherently normative process. We further find that these papers' proposed quantitative techniques for measuring or mitigating \u201cbias\u201d are poorly matched to their motivations and do not engage with the relevant literature outside of NLP. Based on these findings, we describe the beginnings of a path forward by proposing three recommendations that should guide work analyzing \u201cbias\u201d in NLP systems. These recommendations rest on a greater recognition of the relationships between language and social hierarchies, encouraging researchers and practitioners to articulate their conceptualizations of \u201cbias\u201d\u2014i.e., what kinds of system behaviors are harmful, in what ways, to whom, and why, as well as the normative reasoning underlying these statements\u2014and to center work around the lived experiences of members of communities affected by NLP systems, while interrogating and reimagining the power relations between technologists and such communities.", "keyphrases": ["technology", "critical survey", "harm", "gender", "language model"]} +{"id": "oepen-etal-2014-semeval", "title": "SemEval 2014 Task 8: Broad-Coverage Semantic Dependency Parsing", "abstract": "Task 18 at SemEval 2015 defines Broad-Coverage Semantic Dependency Parsing (SDP) as the problem of recovering sentence-internal predicate\u2013argument relationships for all content words, i.e. the sema ...", "keyphrases": ["semantic dependency parsing", "acyclic graph", "reentrancie", "empty node"]} +{"id": "niu-etal-2020-self", "title": "A Self-Training Method for Machine Reading Comprehension with Soft Evidence Extraction", "abstract": "Neural models have achieved great success on machine reading comprehension (MRC), many of which typically consist of two components: an evidence extractor and an answer predictor. The former seeks the most relevant information from a reference text, while the latter is to locate or generate answers from the extracted evidence. Despite the importance of evidence labels for training the evidence extractor, they are not cheaply accessible, particularly in many non-extractive MRC tasks such as YES/NO question answering and multi-choice MRC. To address this problem, we present a Self-Training method (STM), which supervises the evidence extractor with auto-generated evidence labels in an iterative process. At each iteration, a base MRC model is trained with golden answers and noisy evidence labels. The trained model will predict pseudo evidence labels as extra supervision in the next iteration. We evaluate STM on seven datasets over three MRC tasks. Experimental results demonstrate the improvement on existing MRC models, and we also analyze how and why such a self-training method works in MRC.", "keyphrases": ["self-training method", "machine reading comprehension", "soft evidence extraction"]} +{"id": "proisl-2018-someweta", "title": "SoMeWeTa: A Part-of-Speech Tagger for German Social Media and Web Texts", "abstract": "Off-the-shelf part-of-speech taggers typically perform relatively poorly on web and social media texts since those domains are quite different from the newspaper articles on which most tagger models are trained. In this paper, we describe SoMeWeTa, a part-of-speech tagger based on the averaged structured perceptron that is capable of domain adaptation and that can use various external resources. We train the tagger on the German web and social media data of the EmpiriST 2015 shared task. Using the TIGER corpus as background data and adding external information about word classes and Brown clusters, we substantially improve on the state of the art for both the web and the social media data sets. The tagger is available as free software.", "keyphrases": ["part-of-speech tagger", "social media", "someweta"]} +{"id": "neale-etal-2016-word", "title": "Word Sense-Aware Machine Translation: Including Senses as Contextual Features for Improved Translation Models", "abstract": "Although it is commonly assumed that word sense disambiguation (WSD) should help to improve lexical choice and improve the quality of machine translation systems, how to successfully integrate word senses into such systems remains an unanswered question. Some successful approaches have involved reformulating either WSD or the word senses it produces, but work on using traditional word senses to improve machine translation have met with limited success. In this paper, we build upon previous work that experimented on including word senses as contextual features in maxent-based translation models. Training on a large, open-domain corpus (Europarl), we demonstrate that this aproach yields significant improvements in machine translation from English to Portuguese.", "keyphrases": ["machine translation", "contextual feature", "wsd"]} +{"id": "cucchiarini-etal-2006-jasmin", "title": "JASMIN-CGN: Extension of the Spoken Dutch Corpus with Speech of Elderly People, Children and Non-natives in the Human-Machine Interaction Modality", "abstract": "Large speech corpora (LSC) constitute an indispensable resource for conducting research in speech processing and for developing real-life speech applications. In 2004 the Spoken Dutch Corpus (CGN) became available, a corpus of standard Dutch as spoken by adult natives in the Netherlands and Flanders. Owing to budget constraints, CGN does not include speech of children, non-natives, elderly people and recordings of speech produced in human-machine interactions. Since such recordings would be extremely useful for conducting research and for developing HLT applications for these specific groups of speakers of Dutch, a new project, JASMIN-CGN, was started which aims at extending CGN in different ways: by collecting a corpus of contemporary Dutch as spoken by children of different age groups, non-natives with different mother tongues and elderly people in the Netherlands and Flanders and, in addition, by collecting speech material in a communication setting that was not envisaged in CGN: human-machine interaction. We expect that the knowledge gathered from these data can be generalized to developing appropriate systems also for other speaker groups (i.e. adult natives). One third of the data will be collected in Flanders and two thirds in the Netherlands.", "keyphrases": ["spoken dutch corpus", "non-native", "jasmin-cgn"]} +{"id": "de-marneffe-etal-2008-finding", "title": "Finding Contradictions in Text", "abstract": "Detecting conflicting statements is a foundational text understanding task with applications in information analysis. We propose an appropriate definition of contradiction for NLP tasks and develop available corpora, from which we construct a typology of contradictions. We demonstrate that a system for contradiction needs to make more fine-grained distinctions than the common systems for entailment. In particular, we argue for the centrality of event coreference and therefore incorporate such a component based on topicality. We present the first detailed breakdown of performance on this task. Detecting some types of contradiction requires deeper inferential paths than our system is capable of, but we achieve good performance on types arising from negation and antonymy.", "keyphrases": ["contradiction", "typology", "negation", "google news"]} +{"id": "tian-etal-2021-hypogen-hyperbole", "title": "HypoGen: Hyperbole Generation with Commonsense and Counterfactual Knowledge", "abstract": "A hyperbole is an intentional and creative exaggeration not to be taken literally. Despite its ubiquity in daily life, the computational explorations of hyperboles are scarce. In this paper, we tackle the under-explored and challenging task: sentence-level hyperbole generation. We start with a representative syntactic pattern for intensification and systematically study the semantic (commonsense and counterfactual) relationships between each component in such hyperboles. We then leverage commonsense and counterfactual inference to generate hyperbole candidates based on our findings from the pattern, and train neural classifiers to rank and select high-quality hyperboles. Automatic and human evaluations show that our generation method is able to generate hyperboles with high success rate, intensity, funniness, and creativity.", "keyphrases": ["hyperbole generation", "commonsense", "counterfactual knowledge"]} +{"id": "bojanowski-etal-2017-enriching", "title": "Enriching Word Vectors with Subword Information", "abstract": "Continuous word representations, trained on large unlabeled corpora are useful for many natural language processing tasks. Popular models that learn such representations ignore the morphology of words, by assigning a distinct vector to each word. This is a limitation, especially for languages with large vocabularies and many rare words. In this paper, we propose a new approach based on the skipgram model, where each word is represented as a bag of character n-grams. A vector representation is associated to each character n-gram; words being represented as the sum of these representations. Our method is fast, allowing to train models on large corpora quickly and allows us to compute word representations for words that did not appear in the training data. We evaluate our word representations on nine different languages, both on word similarity and analogy tasks. By comparing to recently proposed morphological word representations, we show that our vectors achieve state-of-the-art performance on these tasks.", "keyphrases": ["subword information", "fasttext model", "unit", "facebook", "cbow"]} +{"id": "gordon-etal-2020-compressing", "title": "Compressing BERT: Studying the Effects of Weight Pruning on Transfer Learning", "abstract": "Pre-trained universal feature extractors, such as BERT for natural language processing and VGG for computer vision, have become effective methods for improving deep learning models without requiring more labeled data. While effective, feature extractors like BERT may be prohibitively large for some deployment scenarios. We explore weight pruning for BERT and ask: how does compression during pre-training affect transfer learning? We find that pruning affects transfer learning in three broad regimes. Low levels of pruning (30-40%) do not affect pre-training loss or transfer to downstream tasks at all. Medium levels of pruning increase the pre-training loss and prevent useful pre-training information from being transferred to downstream tasks. High levels of pruning additionally prevent models from fitting downstream datasets, leading to further degradation. Finally, we observe that fine-tuning BERT on a specific task does not improve its prunability. We conclude that BERT can be pruned once during pre-training rather than separately for each task without affecting performance.", "keyphrases": ["bert", "weight pruning", "transfer learning", "pre-training loss"]} +{"id": "abbes-etal-2020-daict", "title": "DAICT: A Dialectal Arabic Irony Corpus Extracted from Twitter", "abstract": "Identifying irony in user-generated social media content has a wide range of applications; however to date Arabic content has received limited attention. To bridge this gap, this study builds a new open domain Arabic corpus annotated for irony detection. We query Twitter using irony-related hashtags to collect ironic messages, which are then manually annotated by two linguists according to our working definition of irony. Challenges which we have encountered during the annotation process reflect the inherent limitations of Twitter messages interpretation, as well as the complexity of Arabic and its dialects. Once published, our corpus will be a valuable free resource for developing open domain systems for automatic irony recognition in Arabic language and its dialects in social media text.", "keyphrases": ["arabic", "twitter", "daict"]} +{"id": "wang-2008-examination", "title": "A Re-examination of Dependency Path Kernels for Relation Extraction", "abstract": "Extracting semantic relations between entities from natural language text is an important step towards automatic knowledge extraction from large text collections and the Web. The state-of-the-art approach to relation extraction employs Support Vector Machines (SVM) and kernel methods for classification. Despite the diversity of kernels and the near exhaustive trial-and-error on kernel combination, there lacks a clear understanding of how these kernels relate to each other and why some are superior than others. In this paper, we provide an analysis of the relative strength and weakness of several kernels through systematic experimentation. We show that relation extraction can benefit from increasing the feature space through convolution kernel and introducing bias towards more syntactically meaningful feature space. Based on our analysis, we propose a new convolution dependency path kernel that combines the above two benefits. Our experimental results on the standard ACE 2003 datasets demonstrate that our new kernel gives consistent and significantly better performance than baseline methods, obtaining very competitive results to the state-ofthe-art performance.", "keyphrases": ["relation extraction", "several kernel", "syntactic feature"]} +{"id": "jones-etal-2020-robust", "title": "Robust Encodings: A Framework for Combating Adversarial Typos", "abstract": "Despite excellent performance on many tasks, NLP systems are easily fooled by small adversarial perturbations of inputs. Existing procedures to defend against such perturbations are either (i) heuristic in nature and susceptible to stronger attacks or (ii) provide guaranteed robustness to worst-case attacks, but are incompatible with state-of-the-art models like BERT. In this work, we introduce robust encodings (RobEn): a simple framework that confers guaranteed robustness, without making compromises on model architecture. The core component of RobEn is an encoding function, which maps sentences to a smaller, discrete space of encodings. Systems using these encodings as a bottleneck confer guaranteed robustness with standard training, and the same encodings can be used across multiple tasks. We identify two desiderata to construct robust encoding functions: perturbations of a sentence should map to a small set of encodings (stability), and models using encodings should still perform well (fidelity). We instantiate RobEn to defend against a large family of adversarial typos. Across six tasks from GLUE, our instantiation of RobEn paired with BERT achieves an average robust accuracy of 71.3% against all adversarial typos in the family considered, while previous work using a typo-corrector achieves only 35.3% accuracy against a simple greedy attack.", "keyphrases": ["encoding", "adversarial typo", "robustness"]} +{"id": "aguilar-etal-2020-lince", "title": "LinCE: A Centralized Benchmark for Linguistic Code-switching Evaluation", "abstract": "Recent trends in NLP research have raised an interest in linguistic code-switching (CS); modern approaches have been proposed to solve a wide range of NLP tasks on multiple language pairs. Unfortunately, these proposed methods are hardly generalizable to different code-switched languages. In addition, it is unclear whether a model architecture is applicable for a different task while still being compatible with the code-switching setting. This is mainly because of the lack of a centralized benchmark and the sparse corpora that researchers employ based on their specific needs and interests. To facilitate research in this direction, we propose a centralized benchmark for Linguistic Code-switching Evaluation (LinCE) that combines eleven corpora covering four different code-switched language pairs (i.e., Spanish-English, Nepali-English, Hindi-English, and Modern Standard Arabic-Egyptian Arabic) and four tasks (i.e., language identification, named entity recognition, part-of-speech tagging, and sentiment analysis). As part of the benchmark centralization effort, we provide an online platform where researchers can submit their results while comparing with others in real-time. In addition, we provide the scores of different popular models, including LSTM, ELMo, and multilingual BERT so that the NLP community can compare against state-of-the-art systems. LinCE is a continuous effort, and we will expand it with more low-resource languages and tasks.", "keyphrases": ["centralized benchmark", "linguistic code-switching evaluation", "code-switched language pair", "lince"]} +{"id": "eikema-aziz-2020-map", "title": "Is MAP Decoding All You Need? The Inadequacy of the Mode in Neural Machine Translation", "abstract": "Recent studies have revealed a number of pathologies of neural machine translation (NMT) systems. Hypotheses explaining these mostly suggest there is something fundamentally wrong with NMT as a model or its training algorithm, maximum likelihood estimation (MLE). Most of this evidence was gathered using maximum a posteriori (MAP) decoding, a decision rule aimed at identifying the highest-scoring translation, i.e. the mode. We argue that the evidence corroborates the inadequacy of MAP decoding more than casts doubt on the model and its training algorithm. In this work, we show that translation distributions do reproduce various statistics of the data well, but that beam search strays from such statistics. We show that some of the known pathologies and biases of NMT are due to MAP decoding and not to NMT's statistical assumptions nor MLE. In particular, we show that the most likely translations under the model accumulate so little probability mass that the mode can be considered essentially arbitrary. We therefore advocate for the use of decision rules that take into account the translation distribution holistically. We show that an approximation to minimum Bayes risk decoding gives competitive results confirming that NMT models do capture important aspects of translation well in expectation.", "keyphrases": ["map", "mode", "neural machine translation", "highest-scoring translation"]} +{"id": "zhang-etal-2008-tree", "title": "A Tree Sequence Alignment-based Tree-to-Tree Translation Model", "abstract": "This paper presents a translation model that is based on tree sequence alignment, where a tree sequence refers to a single sequence of subtrees that covers a phrase. The model leverages on the strengths of both phrase-based and linguistically syntax-based method. It automatically learns aligned tree sequence pairs with mapping probabilities from word-aligned biparsed parallel texts. Compared with previous models, it not only captures non-syntactic phrases and discontinuous phrases with linguistically structured features, but also supports multi-level structure reordering of tree typology with larger span. This gives our model stronger expressive power than other reported models. Experimental results on the NIST MT-2005 Chinese-English translation task show that our method statistically significantly outperforms the baseline systems.", "keyphrases": ["tree sequence", "translation model", "previous model", "non-syntactic phrase", "synchronous grammar"]} +{"id": "riloff-wiebe-2003-learning", "title": "Learning Extraction Patterns for Subjective Expressions", "abstract": "This paper presents a bootstrapping process that learns linguistically rich extraction patterns for subjective (opinionated) expressions. High-precision classifiers label unannotated data to automatically create a large training set, which is then given to an extraction pattern learning algorithm. The learned patterns are then used to identify more subjective sentences. The bootstrapping process learns many subjective patterns and increases recall while maintaining high precision.", "keyphrases": ["extraction pattern", "subjective expression", "subjectivity analysis", "sentiment analysis", "clause"]} +{"id": "thomas-etal-2006-get", "title": "Get out the vote: Determining support or opposition from Congressional floor-debate transcripts", "abstract": "We investigate whether one can determine from the transcripts of U.S. Congressional floor debates whether the speeches represent support of or opposition to proposed legislation. To address this problem, we exploit the fact that these speeches occur as part of a discussion; this allows us to use sources of information regarding relationships between discourse segments, such as whether a given utterance indicates agreement with the opinion expressed by another. We find that the incorporation of such information yields substantial improvements over classifying speeches in isolation.", "keyphrases": ["support", "floor-debate transcript", "legislation", "participant", "political debate"]} +{"id": "faruqui-dyer-2014-improving", "title": "Improving Vector Space Word Representations Using Multilingual Correlation", "abstract": "The distributional hypothesis of Harris (1954), according to which the meaning of words is evidenced by the contexts they occur in, has motivated several effective techniques for obtaining vector space semantic representations of words using unannotated text corpora. This paper argues that lexico-semantic content should additionally be invariant across languages and proposes a simple technique based on canonical correlation analysis (CCA) for incorporating multilingual evidence into vectors generated monolingually. We evaluate the resulting word representations on standard lexical semantic evaluation tasks and show that our method produces substantially better semantic representations than monolingual techniques.", "keyphrases": ["canonical correlation analysis", "monolingual embedding", "mapping", "different language", "semantic task"]} +{"id": "wong-dras-2009-contrastive", "title": "Contrastive Analysis and Native Language Identification", "abstract": "Attempts to profile authors based on their characteristics, including native language, have drawn attention in recent years, via several approaches using machine learning with simple features. In this paper we investigate the potential usefulness to this task of contrastive analysis from second language acquistion research, which postulates that the (syntactic) errors in a text are influenced by an author\u2019s native language. We explore this, first, by conducting an analysis of three syntactic error types, through hypothesis testing and machine learning; and second, through adding in these errors as features to the replication of a previous machine learning approach. This preliminary study provides some support for the use of this kind of syntactic errors as a clue to identifying the native language of an author.", "keyphrases": ["native language identification", "syntactic error", "contrastive analysis", "disagreement", "learner"]} +{"id": "yu-etal-2017-joint", "title": "Joint Embeddings of Chinese Words, Characters, and Fine-grained Subcharacter Components", "abstract": "Word embeddings have attracted much attention recently. Different from alphabetic writing systems, Chinese characters are often composed of subcharacter components which are also semantically informative. In this work, we propose an approach to jointly embed Chinese words as well as their characters and fine-grained subcharacter components. We use three likelihoods to evaluate whether the context words, characters, and components can predict the current target word, and collected 13,253 subcharacter components to demonstrate the existing approaches of decomposing Chinese characters are not enough. Evaluation on both word similarity and word analogy tasks demonstrates the superior performance of our model.", "keyphrases": ["chinese word", "character", "semantic information"]} +{"id": "see-etal-2019-makes", "title": "What makes a good conversation? How controllable attributes affect human judgments", "abstract": "A good conversation requires balance \u2013 between simplicity and detail; staying on topic and changing it; asking questions and answering them. Although dialogue agents are commonly evaluated via human judgments of overall quality, the relationship between quality and these individual factors is less well-studied. In this work, we examine two controllable neural text generation methods, conditional training and weighted decoding, in order to control four important attributes for chit-chat dialogue: repetition, specificity, response-relatedness and question-asking. We conduct a large-scale human evaluation to measure the effect of these control parameters on multi-turn interactive conversations on the PersonaChat task. We provide a detailed analysis of their relationship to high-level aspects of conversation, and show that by controlling combinations of these variables our models obtain clear improvements in human quality judgments.", "keyphrases": ["conversation", "attribute", "human judgment", "engagingness", "response generation"]} +{"id": "narasimhan-etal-2016-improving", "title": "Improving Information Extraction by Acquiring External Evidence with Reinforcement Learning", "abstract": "Most successful information extraction systems operate with access to a large collection of documents. In this work, we explore the task of acquiring and incorporating external evidence to improve extraction accuracy in domains where the amount of training data is scarce. This process entails issuing search queries, extraction from new sources and reconciliation of extracted values, which are repeated until sufficient evidence is collected. We approach the problem using a reinforcement learning framework where our model learns to select optimal actions based on contextual information. We employ a deep Q-network, trained to optimize a reward function that reflects extraction accuracy while penalizing extra effort. Our experiments on two databases -- of shooting incidents, and food adulteration cases -- demonstrate that our system significantly outperforms traditional extractors and a competitive meta-classifier baseline.", "keyphrases": ["information extraction", "external evidence", "reinforcement learning", "action"]} +{"id": "vanmassenhove-etal-2019-lost", "title": "Lost in Translation: Loss and Decay of Linguistic Richness in Machine Translation", "abstract": "This work presents an empirical approach to quantifying the loss of lexical richness in Machine Translation (MT) systems compared to Human Translation (HT). Our experiments show how current MT systems indeed fail to render the lexical diversity of human generated or translated text. The inability of MT systems to generate diverse outputs and its tendency to exacerbate already frequent patterns while ignoring less frequent ones, might be the underlying cause for, among others, the currently heavily debated issues related to gender biased output. Can we indeed, aside from biased data, talk about an algorithm that exacerbates seen biases?", "keyphrases": ["loss", "linguistic richness", "machine translation"]} +{"id": "vulic-mrksic-2018-specialising", "title": "Specialising Word Vectors for Lexical Entailment", "abstract": "We present LEAR (Lexical Entailment Attract-Repel), a novel post-processing method that transforms any input word vector space to emphasise the asymmetric relation of lexical entailment (LE), also known as the IS-A or hyponymy-hypernymy relation. By injecting external linguistic constraints (e.g., WordNet links) into the initial vector space, the LE specialisation procedure brings true hyponymy-hypernymy pairs closer together in the transformed Euclidean space. The proposed asymmetric distance measure adjusts the norms of word vectors to reflect the actual WordNet-style hierarchy of concepts. Simultaneously, a joint objective enforces semantic similarity using the symmetric cosine distance, yielding a vector space specialised for both lexical relations at once. LEAR specialisation achieves state-of-the-art performance in the tasks of hypernymy directionality, hypernymy detection, and graded lexical entailment, demonstrating the effectiveness and robustness of the proposed asymmetric specialisation model.", "keyphrases": ["word vector", "lexical entailment", "hypernymy detection"]} +{"id": "yu-jiang-2016-learning", "title": "Learning Sentence Embeddings with Auxiliary Tasks for Cross-Domain Sentiment Classification", "abstract": "In this paper, we study cross-domain sentiment classi\ufb01cation with neural network archi-tectures. We borrow the idea from Structural Correspondence Learning and use two auxiliary tasks to help induce a sentence embedding that supposedly works well across domains for sentiment classi\ufb01cation. We also propose to jointly learn this sentence embedding together with the sentiment classi\ufb01er itself. Experiment results demonstrate that our proposed joint model outperforms several state-of-the-art methods on \ufb01ve benchmark datasets.", "keyphrases": ["auxiliary task", "sentiment classification", "pivot word"]} +{"id": "lee-etal-2017-end", "title": "End-to-end Neural Coreference Resolution", "abstract": "We introduce the first end-to-end coreference resolution model and show that it significantly outperforms all previous work without using a syntactic parser or hand-engineered mention detector. The key idea is to directly consider all spans in a document as potential mentions and learn distributions over possible antecedents for each. The model computes span embeddings that combine context-dependent boundary representations with a head-finding attention mechanism. It is trained to maximize the marginal likelihood of gold antecedent spans from coreference clusters and is factored to enable aggressive pruning of potential mentions. Experiments demonstrate state-of-the-art performance, with a gain of 1.5 F1 on the OntoNotes benchmark and by 3.1 F1 using a 5-model ensemble, despite the fact that this is the first approach to be successfully trained with no external resources.", "keyphrases": ["neural coreference resolution", "end-to-end", "wsc", "input sentence", "approximation"]} +{"id": "hammerton-2003-named", "title": "Named Entity Recognition with Long Short-Term Memory", "abstract": "In this approach to named entity recognition, a recurrent neural network, known as Long Short-Term Memory, is applied. The network is trained to perform 2 passes on each sentence, outputting its decisions on the second pass. The first pass is used to acquire information for disambiguation during the second pass. SARDNET, a self-organising map for sequences is used to generate representations for the lexical items presented to the LSTM network, whilst orthogonal representations are used to represent the part of speech and chunk tags.", "keyphrases": ["entity recognition", "long short-term memory", "lstm network"]} +{"id": "zopf-etal-2016-next", "title": "The Next Step for Multi-Document Summarization: A Heterogeneous Multi-Genre Corpus Built with a Novel Construction Approach", "abstract": "Research in multi-document summarization has focused on newswire corpora since the early beginnings. However, the newswire genre provides genre-specific features such as sentence position which are easy to exploit in summarization systems. Such easy to exploit genre-specific features are available in other genres as well. We therefore present the new hMDS corpus for multi-document summarization, which contains heterogeneous source documents from multiple text genres, as well as summaries with different lengths. For the construction of the corpus, we developed a novel construction approach which is suited to build large and heterogeneous summarization corpora with little effort. The method reverses the usual process of writing summaries for given source documents: it combines already available summaries with appropriate source documents. In a detailed analysis, we show that our new corpus is significantly different from the homogeneous corpora commonly used, and that it is heterogeneous along several dimensions. Our experimental evaluation using well-known state-of-the-art summarization systems shows that our corpus poses new challenges in the field of multi-document summarization. Last but not least, we make our corpus publicly available to the research community at the corpus web page .", "keyphrases": ["summarization", "novel construction approach", "wikipedia article"]} +{"id": "weir-etal-2016-aligning", "title": "Aligning Packed Dependency Trees: A Theory of Composition for Distributional Semantics", "abstract": "We present a new framework for compositional distributional semantics in which the distributional contexts of lexemes are expressed in terms of anchored packed dependency trees. We show that these structures have the potential to capture the full sentential contexts of a lexeme and provide a uniform basis for the composition of distributional knowledge in a way that captures both mutual disambiguation and generalization.", "keyphrases": ["composition", "distributional semantic", "anchored packed tree"]} +{"id": "lin-etal-2019-open", "title": "Open Sesame: Getting inside BERT's Linguistic Knowledge", "abstract": "How and to what extent does BERT encode syntactically-sensitive hierarchical information or positionally-sensitive linear information? Recent work has shown that contextual representations like BERT perform well on tasks that require sensitivity to linguistic structure. We present here two studies which aim to provide a better understanding of the nature of BERT's representations. The first of these focuses on the identification of structurally-defined elements using diagnostic classifiers, while the second explores BERT's representation of subject-verb agreement and anaphor-antecedent dependencies through a quantitative assessment of self-attention vectors. In both cases, we find that BERT encodes positional information about word tokens well on its lower layers, but switches to a hierarchically-oriented encoding on higher layers. We conclude then that BERT's representations do indeed model linguistically relevant aspects of hierarchical structure, though they do not appear to show the sharp sensitivity to hierarchical structure that is found in human processing of reflexive anaphora.", "keyphrases": ["bert", "linguistic knowledge", "subject-verb agreement", "agreement", "relevant aspect"]} +{"id": "dannells-etal-2013-multilingual", "title": "Multilingual access to cultural heritage content on the Semantic Web", "abstract": "As the amount of cultural data available on the Semantic Web is expanding, the demand of accessing this data in multiple languages is increasing. Previous work on multilingual access to cultural heritage information has shown that mapping from ontologies to natural language requires at least two different steps: (1) mapping multilingual metadata to interoperable knowledge sources; (2) assigning multilingual knowledge to cultural data. This paper presents our work on making cultural heritage content available on the Semantic Web and accessible in 15 languages. The objective of our work is both to form queries and to retrieve semantic content in multiple languages. We describe our experiences with processing museum data extracted from two different sources, harmonizing this data and making its content accessible in natural language.", "keyphrases": ["cultural heritage content", "semantic web", "multilingual access"]} +{"id": "martins-etal-2009-concise", "title": "Concise Integer Linear Programming Formulations for Dependency Parsing", "abstract": "We formulate the problem of non-projective dependency parsing as a polynomial-sized integer linear program. Our formulation is able to handle non-local output features in an efficient manner; not only is it compatible with prior knowledge encoded as hard constraints, it can also learn soft constraints from data. In particular, our model is able to learn correlations among neighboring arcs (siblings and grandparents), word valency, and tendencies toward nearly-projective parses. The model parameters are learned in a max-margin framework by employing a linear programming relaxation. We evaluate the performance of our parser on data in several natural languages, achieving improvements over existing state-of-the-art methods.", "keyphrases": ["integer linear programming", "dependency parsing", "similar line"]} +{"id": "fung-cheung-2004-multi", "title": "Multi-level Bootstrapping For Extracting Parallel Sentences From a Quasi-Comparable Corpus", "abstract": "We propose a completely unsupervised method for mining parallel sentences from quasi-comparable bilingual texts which have very different sizes, and which include both in-topic and off-topic documents. We discuss and analyze different bilingual corpora with various levels of comparability. We propose that while better document matching leads to better parallel sentence extraction, better sentence matching also leads to better document matching. Based on this, we use multi-level bootstrapping to improve the alignments between documents, sentences, and bilingual word pairs, iteratively. Our method is the first method that does not rely on any supervised training data, such as a sentence-aligned corpus, or temporal information, such as the publishing date of a news article. It is validated by experimental results that show a 23% improvement over a method without multilevel bootstrapping.", "keyphrases": ["parallel sentence", "in-topic", "document matching", "multi-level bootstrapping", "comparable corpora"]} +{"id": "rabinovich-etal-2017-abstract", "title": "Abstract Syntax Networks for Code Generation and Semantic Parsing", "abstract": "Tasks like code generation and semantic parsing require mapping unstructured (or partially structured) inputs to well-formed, executable outputs. We introduce abstract syntax networks, a modeling framework for these problems. The outputs are represented as abstract syntax trees (ASTs) and constructed by a decoder with a dynamically-determined modular structure paralleling the structure of the output tree. On the benchmark Hearthstone dataset for code generation, our model obtains 79.2 BLEU and 22.7% exact match accuracy, compared to previous state-of-the-art values of 67.1 and 6.1%. Furthermore, we perform competitively on the Atis, Jobs, and Geo semantic parsing datasets with no task-specific engineering.", "keyphrases": ["code generation", "ast", "modular structure", "valid program", "programming language"]} +{"id": "chrysostomou-aletras-2022-empirical", "title": "An Empirical Study on Explanations in Out-of-Domain Settings", "abstract": "Recent work in Natural Language Processing has focused on developing approaches that extract faithful explanations, either via identifying the most important tokens in the input (i.e. post-hoc explanations) or by designing inherently faithful models that first select the most important tokens and then use them to predict the correct label (i.e. select-then-predict models). Currently, these approaches are largely evaluated on in-domain settings. Yet, little is known about how post-hoc explanations and inherently faithful models perform in out-of-domain settings. In this paper, we conduct an extensive empirical study that examines: (1) the out-of-domain faithfulness of post-hoc explanations, generated by five feature attribution methods; and (2) the out-of-domain performance of two inherently faithful models over six datasets. Contrary to our expectations, results show that in many cases out-of-domain post-hoc explanation faithfulness measured by sufficiency and comprehensiveness is higher compared to in-domain. We find this misleading and suggest using a random baseline as a yardstick for evaluating post-hoc explanation faithfulness. Our findings also show that select-then predict models demonstrate comparable predictive performance in out-of-domain settings to full-text trained models.", "keyphrases": ["empirical study", "explanation", "out-of-domain setting"]} +{"id": "clark-etal-2019-boolq", "title": "BoolQ: Exploring the Surprising Difficulty of Natural Yes/No Questions", "abstract": "In this paper we study yes/no questions that are naturally occurring \u2014 meaning that they are generated in unprompted and unconstrained settings. We build a reading comprehension dataset, BoolQ, of such questions, and show that they are unexpectedly challenging. They often query for complex, non-factoid information, and require difficult entailment-like inference to solve. We also explore the effectiveness of a range of transfer learning baselines. We find that transferring from entailment data is more effective than transferring from paraphrase or extractive QA data, and that it, surprisingly, continues to be very beneficial even when starting from massive pre-trained language models such as BERT. Our best method trains BERT on MultiNLI and then re-trains it on our train set. It achieves 80.4% accuracy compared to 90% accuracy of human annotators (and 62% majority-baseline), leaving a significant gap for future work.", "keyphrases": ["difficulty", "boolq", "question answering"]} +{"id": "gimpel-etal-2011-part", "title": "Part-of-Speech Tagging for Twitter: Annotation, Features, and Experiments", "abstract": "We address the problem of part-of-speech tagging for English data from the popular micro-blogging service Twitter. We develop a tagset, annotate data, develop features, and report tagging results nearing 90% accuracy. The data and tools have been made available to the research community with the goal of enabling richer text analysis of Twitter and related social media data sets.", "keyphrases": ["twitter", "tagset", "part-of-speech tagging", "annotated tweet", "pos"]} +{"id": "clark-manning-2016-improving", "title": "Improving Coreference Resolution by Learning Entity-Level Distributed Representations", "abstract": "A long-standing challenge in coreference resolution has been the incorporation of entity-level information - features defined over clusters of mentions instead of mention pairs. We present a neural network based coreference system that produces high-dimensional vector representations for pairs of coreference clusters. Using these representations, our system learns when combining clusters is desirable. We train the system with a learning-to-search algorithm that teaches it which local decisions (cluster merges) will lead to a high-scoring final coreference partition. The system substantially outperforms the current state-of-the-art on the English and Chinese portions of the CoNLL 2012 Shared Task dataset despite using few hand-engineered features.", "keyphrases": ["coreference resolution", "entity-level information", "cluster", "mention"]} +{"id": "de-jong-etal-2018-clarin", "title": "CLARIN: Towards FAIR and Responsible Data Science Using Language Resources", "abstract": "CLARIN is a European Research Infrastructure providing access to language resources and technologies for researchers in the humanities and social sciences. It supports the study of language data in general and aims to increase the potential for comparative research of cultural and societal phenomena across the boundaries of languages. This paper outlines the CLARIN vision and strategy, and it explains how the design and implementation of CLARIN are compliant with the FAIR principles: findability, accessibility, interoperability and reusability of data. The paper also explains the approach of CLARIN towards the enabling of responsible data science. Attention is paid to (i) the development of measures for increasing the transparency and explainability of the results from applying CLARIN technologies, in particular in the context of multidisciplinary research, and (ii) stimulating the uptake of its resources, tools and services by the various communities of use, all in accordance with the principles for Open Science.", "keyphrases": ["responsible data science", "language resources", "clarin"]} +{"id": "mausam-etal-2012-open", "title": "Open Language Learning for Information Extraction", "abstract": "Open Information Extraction (IE) systems extract relational tuples from text, without requiring a pre-specified vocabulary, by identifying relation phrases and associated arguments in arbitrary sentences. However, state-of-the-art Open IE systems such as ReVerb and woe share two important weaknesses -- (1) they extract only relations that are mediated by verbs, and (2) they ignore context, thus extracting tuples that are not asserted as factual. This paper presents ollie, a substantially improved Open IE system that addresses both these limitations. First, ollie achieves high yield by extracting relations mediated by nouns, adjectives, and more. Second, a context-analysis step increases precision by including contextual information from the sentence in the extractions. ollie obtains 2.7 times the area under precision-yield curve (AUC) compared to ReVerb and 1.9 times the AUC of woeparse.", "keyphrases": ["information extraction", "relation phrase", "open language learning", "openie", "recall"]} +{"id": "grusky-etal-2018-newsroom", "title": "Newsroom: A Dataset of 1.3 Million Summaries with Diverse Extractive Strategies", "abstract": "We present NEWSROOM, a summarization dataset of 1.3 million articles and summaries written by authors and editors in newsrooms of 38 major news publications. Extracted from search and social media metadata between 1998 and 2017, these high-quality summaries demonstrate high diversity of summarization styles. In particular, the summaries combine abstractive and extractive strategies, borrowing words and phrases from articles at varying rates. We analyze the extraction strategies used in NEWSROOM summaries against other datasets to quantify the diversity and difficulty of our new data, and train existing methods on the data to evaluate its utility and challenges. The dataset is available online at summari.es.", "keyphrases": ["summarization dataset", "abstractiveness", "newsroom", "large-scale dataset", "extractive method"]} +{"id": "fujiki-etal-2003-automatic", "title": "Automatic Acquisition of Script Knowledge from a Text Collection", "abstract": "In this paper, we describe a method for automatic acquisition of script knowledge from a Japanese text collection. Script knowledge represents a typical sequence of actions that occur in a particular situation. We extracted sequences (pairs) of actions occurring in time order from a Japanese text collection and then chose those that were typical of certain situations by ranking these sequences (pairs) in terms of the frequency of their occurrence. To extract sequences of actions occurring in time order, we constructed a text collection in which texts describing facts relating to a similar situation were clustered together and arranged in time order.We also describe a preliminary experiment with our acquisition system and discuss the results.", "keyphrases": ["script knowledge", "text collection", "automatic acquisition"]} +{"id": "xu-etal-2015-classifying", "title": "Classifying Relations via Long Short Term Memory Networks along Shortest Dependency Paths", "abstract": "Relation classification is an important research arena in the field of natural language processing (NLP). In this paper, we present SDP-LSTM, a novel neural network to classify the relation of two entities in a sentence. Our neural architecture leverages the shortest dependency path (SDP) between two entities; multichannel recurrent neural networks, with long short term memory (LSTM) units, pick up heterogeneous information along the SDP. Our proposed model has several distinct features: (1) The shortest dependency paths retain most relevant information (to relation classification), while eliminating irrelevant words in the sentence. (2) The multichannel LSTM networks allow effective information integration from heterogeneous sources over the dependency paths. (3) A customized dropout strategy regularizes the neural network to alleviate overfitting. We test our model on the SemEval 2010 relation classification task, and achieve an $F_1$-score of 83.7\\%, higher than competing methods in the literature.", "keyphrases": ["short term memory", "recurrent neural network", "short dependency path", "cnn", "input sentence"]} +{"id": "somasundaran-etal-2014-lexical", "title": "Lexical Chaining for Measuring Discourse Coherence Quality in Test-taker Essays", "abstract": "This paper presents an investigation of lexical chaining (Morris and Hirst, 1991) for measuring discourse coherence quality in test-taker essays. We hypothesize that attributes of lexical chains, as well as interactions between lexical chains and explicit discourse elements, can be harnessed for representing coherence. Our experiments reveal that performance achieved by our new lexical chain features is better than that of previous discourse features used for this task, and that the best system performance is achieved when combining lexical chaining features with complementary discourse features, such as those provided by a discourse parser based on rhetorical structure theory, and features that reflect errors in grammar, word usage, and mechanics.", "keyphrases": ["discourse coherence quality", "test-taker essay", "lexical chaining"]} +{"id": "liu-li-2016-recognizing", "title": "Recognizing Implicit Discourse Relations via Repeated Reading: Neural Networks with Multi-Level Attention", "abstract": "Recognizing implicit discourse relations is a challenging but important task in the field of Natural Language Processing. For such a complex text processing task, different from previous studies, we argue that it is necessary to repeatedly read the arguments and dynamically exploit the efficient features useful for recognizing discourse relations. To mimic the repeated reading strategy, we propose the neural networks with multi-level attention (NNMA), combining the attention mechanism and external memories to gradually fix the attention on some specific words helpful to judging the discourse relations. Experiments on the PDTB dataset show that our proposed method achieves the state-of-art results. The visualization of the attention weights also illustrates the progress that our model observes the arguments on each level and progressively locates the important words.", "keyphrases": ["discourse relation", "multi-level attention", "memory"]} +{"id": "fonseca-etal-2019-findings", "title": "Findings of the WMT 2019 Shared Tasks on Quality Estimation", "abstract": "We report the results of the WMT19 shared task on Quality Estimation, i.e. the task of predicting the quality of the output of machine translation systems given just the source text and the hypothesis translations. The task includes estimation at three granularity levels: word, sentence and document. A novel addition is evaluating sentence-level QE against human judgments: in other words, designing MT metrics that do not need a reference translation. This year we include three language pairs, produced solely by neural machine translation systems. Participating teams from eleven institutions submitted a variety of systems to different task variants and language pairs.", "keyphrases": ["wmt", "shared tasks", "quality estimation", "granularity level", "post-editing"]} +{"id": "rios-etal-2011-tine", "title": "TINE: A Metric to Assess MT Adequacy", "abstract": "We describe TINE, a new automatic evaluation metric for Machine Translation that aims at assessing segment-level adequacy. Lexical similarity and shallow-semantics are used as indicators of adequacy between machine and reference translations. The metric is based on the combination of a lexical matching component and an adequacy component. Lexical matching is performed comparing bags-of-words without any linguistic annotation. The adequacy component consists in: i) using ontologies to align predicates (verbs), ii) using semantic roles to align predicate arguments (core arguments and modifiers), and iii) matching predicate arguments using distributional semantics. TINE's performance is comparable to that of previous metrics at segment level for several language pairs, with average Kendall's tau correlation from 0.26 to 0.29. We show that the addition of the shallow-semantic component improves the performance of simple lexical matching strategies and metrics such as BLEU.", "keyphrases": ["evaluation metric", "tine", "adequacy judgment", "meteor"]} +{"id": "roller-etal-2021-recipes", "title": "Recipes for Building an Open-Domain Chatbot", "abstract": "Building open-domain chatbots is a challenging area for machine learning research. While prior work has shown that scaling neural models in the number of parameters and the size of the data they are trained on gives improved results, we highlight other ingredients. Good conversation requires blended skills: providing engaging talking points, and displaying knowledge, empathy and personality appropriately, while maintaining a consistent persona. We show that large scale models can learn these skills when given appropriate training data and choice of generation strategy. We build variants of these recipes with 90M, 2.7B and 9.4B parameter models, and make our models and code publicly available. Human evaluations show our best models outperform existing approaches in multi-turn dialogue on engagingness and humanness measurements. We then discuss the limitations of this work by analyzing failure cases of our models.", "keyphrases": ["open-domain chatbot", "dialogue system", "interlocutor", "text generation task"]} +{"id": "turney-2013-distributional", "title": "Distributional Semantics Beyond Words: Supervised Learning of Analogy and Paraphrase", "abstract": "There have been several efforts to extend distributional semantics beyond individual words, to measure the similarity of word pairs, phrases, and sentences (briefly, tuples; ordered sets of words, contiguous or noncontiguous). One way to extend beyond words is to compare two tuples using a function that combines pairwise similarities between the component words in the tuples. A strength of this approach is that it works with both relational similarity (analogy) and compositional similarity (paraphrase). However, past work required hand-coding the combination function for different tasks. The main contribution of this paper is that combination functions are generated by supervised learning. We achieve state-of-the-art results in measuring relational similarity between word pairs (SAT analogies and SemEval 2012 Task 2) and measuring compositional similarity between noun-modifier phrases and unigrams (multiple-choice paraphrase questions).", "keyphrases": ["supervised learning", "analogy", "paraphrase"]} +{"id": "martins-etal-2011-dual", "title": "Dual Decomposition with Many Overlapping Components", "abstract": "Dual decomposition has been recently proposed as a way of combining complementary models, with a boost in predictive power. However, in cases where lightweight decompositions are not readily available (e.g., due to the presence of rich features or logical constraints), the original subgradient algorithm is inefficient. We sidestep that difficulty by adopting an augmented Lagrangian method that accelerates model consensus by regularizing towards the averaged votes. We show how first-order logical constraints can be handled efficiently, even though the corresponding subproblems are no longer combinatorial, and report experiments in dependency parsing, with state-of-the-art results.", "keyphrases": ["consensus", "dependency parsing", "state-of-the-art result", "dual decomposition"]} +{"id": "li-etal-2007-probabilistic", "title": "A Probabilistic Approach to Syntax-based Reordering for Statistical Machine Translation", "abstract": "Inspired by previous preprocessing approaches to SMT, this paper proposes a novel, probabilistic approach to reordering which combines the merits of syntax and phrase-based SMT. Given a source sentence and its parse tree, our method generates, by tree operations, an n-best list of reordered inputs, which are then fed to standard phrase-based decoder to produce the optimal translation. Experiments show that, for the NIST MT-05 task of Chinese-toEnglish translation, the proposal leads to BLEU improvement of 1.56%.", "keyphrases": ["probabilistic approach", "reordering", "phrase-based machine translation"]} +{"id": "balazevic-etal-2019-tucker", "title": "TuckER: Tensor Factorization for Knowledge Graph Completion", "abstract": "Knowledge graphs are structured representations of real world facts. However, they typically contain only a small subset of all possible facts. Link prediction is a task of inferring missing facts based on existing ones. We propose TuckER, a relatively straightforward but powerful linear model based on Tucker decomposition of the binary tensor representation of knowledge graph triples. TuckER outperforms previous state-of-the-art models across standard link prediction datasets, acting as a strong baseline for more elaborate models. We show that TuckER is a fully expressive model, derive sufficient bounds on its embedding dimensionalities and demonstrate that several previously introduced linear models can be viewed as special cases of TuckER.", "keyphrases": ["tensor factorization", "knowledge graph", "tucker"]} +{"id": "kim-etal-2019-compound", "title": "Compound Probabilistic Context-Free Grammars for Grammar Induction", "abstract": "We study a formalization of the grammar induction problem that models sentences as being generated by a compound probabilistic context free grammar. In contrast to traditional formulations which learn a single stochastic grammar, our context-free rule probabilities are modulated by a per-sentence continuous latent variable, which induces marginal dependencies beyond the traditional context-free assumptions. Inference in this context-dependent grammar is performed by collapsed variational inference, in which an amortized variational posterior is placed on the continuous variable, and the latent trees are marginalized with dynamic programming. Experiments on English and Chinese show the effectiveness of our approach compared to recent state-of-the-art methods for grammar induction from words with neural language models.", "keyphrases": ["grammar induction", "chinese", "pcfg", "syntax-dependent model", "mixture"]} +{"id": "wei-gulla-2010-sentiment", "title": "Sentiment Learning on Product Reviews via Sentiment Ontology Tree", "abstract": "Existing works on sentiment analysis on product reviews suffer from the following limitations: (1) The knowledge of hierarchical relationships of products attributes is not fully utilized. (2) Reviews or sentences mentioning several attributes associated with complicated sentiments are not dealt with very well. In this paper, we propose a novel HL-SOT approach to labeling a product's attributes and their associated sentiments in product reviews by a Hierarchical Learning (HL) process with a defined Sentiment Ontology Tree (SOT). The empirical analysis against a human-labeled data set demonstrates promising and reasonable performance of the proposed HL-SOT approach. While this paper is mainly on sentiment analysis on reviews of one product, our proposed HL-SOT approach is easily generalized to labeling a mix of reviews of more than one products.", "keyphrases": ["review", "sentiment ontology tree", "hl-sot approach", "hierarchical learning"]} +{"id": "meyers-etal-2004-nombank", "title": "The NomBank Project: An Interim Report", "abstract": "This paper describes NomBank, a project that will provide argument structure for instances of common nouns in the Penn Treebank II corpus. NomBank is part of a larger effort to add additional layers of annotation to the Penn Tree-bank II corpus. The University of Pennsylva-nia\u2019s PropBank, NomBank and other annotation projects taken together should lead to the creation of better tools for the automatic analysis of text. This paper describes the NomBank project in detail including its speci(cid:2)cations and the process involved in creating the resource", "keyphrases": ["nombank project", "argument structure", "recent release"]} +{"id": "cotterell-etal-2015-labeled", "title": "Labeled Morphological Segmentation with Semi-Markov Models", "abstract": "We present labeled morphological segmentation\u2014an alternative view of morphological processing that unifies several tasks. We introduce a new hierarchy of morphotactic tagsets and CHIPMUNK, a discriminative morphological segmentation system that, contrary to previous work, explicitly models morphotactics. We show improved performance on three tasks for all six languages: (i) morphological segmentation, (ii) stemming and (iii) morphological tag classification. For morphological segmentation our method shows absolute improvements of 2-6 points F1 over a strong baseline.", "keyphrases": ["morphological segmentation", "semi-markov crf", "manner"]} +{"id": "dubey-keller-2003-probabilistic", "title": "Probabilistic Parsing for German Using Sister-Head Dependencies", "abstract": "We present a probabilistic parsing model for German trained on the Negra treebank. We observe that existing lexicalized parsing models using head-head dependencies, while successful for English, fail to outperform an unlexicalized baseline model for German. Learning curves show that this effect is not due to lack of training data. We propose an alternative model that uses sister-head dependencies instead of head-head dependencies. This model out-performs the baseline, achieving a labeled precision and recall of up to 74%. This indicates that sister-head dependencies are more appropriate for treebanks with very flat structures such as Negra.", "keyphrases": ["german", "sister-head dependency", "lexicalization"]} +{"id": "weber-etal-2019-nlprolog", "title": "NLProlog: Reasoning with Weak Unification for Question Answering in Natural Language", "abstract": "Rule-based models are attractive for various tasks because they inherently lead to interpretable and explainable decisions and can easily incorporate prior knowledge. However, such systems are difficult to apply to problems involving natural language, due to its large linguistic variability. In contrast, neural models can cope very well with ambiguity by learning distributed representations of words and their composition from data, but lead to models that are difficult to interpret. In this paper, we describe a model combining neural networks with logic programming in a novel manner for solving multi-hop reasoning tasks over natural language. Specifically, we propose to use an Prolog prover which we extend to utilize a similarity function over pretrained sentence encoders. We fine-tune the representations for the similarity function via backpropagation. This leads to a system that can apply rule-based reasoning to natural language, and induce domain-specific natural language rules from training data. We evaluate the proposed system on two different question answering tasks, showing that it outperforms two baselines \u2013 BiDAF (Seo et al., 2016a) and FastQA( Weissenborn et al., 2017) on a subset of the WikiHop corpus and achieves competitive results on the MedHop data set (Welbl et al., 2017).", "keyphrases": ["reasoning", "unification", "prolog prover", "nlprolog"]} +{"id": "sabou-etal-2014-corpus", "title": "Corpus Annotation through Crowdsourcing: Towards Best Practice Guidelines", "abstract": "Crowdsourcing is an emerging collaborative approach that can be used for the acquisition of annotated corpora and a wide range of other linguistic resources. Although the use of this approach is intensifying in all its key genres (paid-for crowdsourcing, games with a purpose, volunteering-based approaches), the community still lacks a set of best-practice guidelines similar to the annotation best practices for traditional, expert-based corpus acquisition. In this paper we focus on the use of crowdsourcing methods for corpus acquisition and propose a set of best practice guidelines based in our own experiences in this area and an overview of related literature. We also introduce GATE Crowd, a plugin of the GATE platform that relies on these guidelines and offers tool support for using crowdsourcing in a more principled and efficient manner.", "keyphrases": ["crowdsourcing", "corpus annotation", "good practice"]} +{"id": "jia-liang-2017-adversarial", "title": "Adversarial Examples for Evaluating Reading Comprehension Systems", "abstract": "Standard accuracy metrics indicate that reading comprehension systems are making rapid progress, but the extent to which these systems truly understand language remains unclear. To reward systems with real language understanding abilities, we propose an adversarial evaluation scheme for the Stanford Question Answering Dataset (SQuAD). Our method tests whether systems can answer questions about paragraphs that contain adversarially inserted sentences, which are automatically generated to distract computer systems without changing the correct answer or misleading humans. In this adversarial setting, the accuracy of sixteen published models drops from an average of 75% F1 score to 36%; when the adversary is allowed to add ungrammatical sequences of words, average accuracy on four models decreases further to 7%. We hope our insights will motivate the development of new models that understand language more precisely.", "keyphrases": ["reading comprehension system", "paragraph", "adversarial examples", "distractor sentence", "incorrect answer"]} +{"id": "zheng-etal-2017-joint", "title": "Joint Extraction of Entities and Relations Based on a Novel Tagging Scheme", "abstract": "Joint extraction of entities and relations is an important task in information extraction. To tackle this problem, we firstly propose a novel tagging scheme that can convert the joint extraction task to a tagging problem.. Then, based on our tagging scheme, we study different end-to-end models to extract entities and their relations directly, without identifying entities and relations separately. We conduct experiments on a public dataset produced by distant supervision method and the experimental results show that the tagging based methods are better than most of the existing pipelined and joint learning methods. What's more, the end-to-end model proposed in this paper, achieves the best results on the public dataset.", "keyphrases": ["novel tagging scheme", "joint extraction", "relational triple", "fact extraction", "schema"]} +{"id": "shan-etal-2020-contextual", "title": "A Contextual Hierarchical Attention Network with Adaptive Objective for Dialogue State Tracking", "abstract": "Recent studies in dialogue state tracking (DST) leverage historical information to determine states which are generally represented as slot-value pairs. However, most of them have limitations to efficiently exploit relevant context due to the lack of a powerful mechanism for modeling interactions between the slot and the dialogue history. Besides, existing methods usually ignore the slot imbalance problem and treat all slots indiscriminately, which limits the learning of hard slots and eventually hurts overall performance. In this paper, we propose to enhance the DST through employing a contextual hierarchical attention network to not only discern relevant information at both word level and turn level but also learn contextual representations. We further propose an adaptive objective to alleviate the slot imbalance problem by dynamically adjust weights of different slots during training. Experimental results show that our approach reaches 52.68% and 58.55% joint accuracy on MultiWOZ 2.0 and MultiWOZ 2.1 datasets respectively and achieves new state-of-the-art performance with considerable improvements (+1.24% and +5.98%).", "keyphrases": ["hierarchical attention network", "adaptive objective", "dialogue state tracking"]} +{"id": "chang-etal-2008-optimizing", "title": "Optimizing Chinese Word Segmentation for Machine Translation Performance", "abstract": "Previous work has shown that Chinese word segmentation is useful for machine translation to English, yet the way different segmentation strategies affect MT is still poorly understood. In this paper, we demonstrate that optimizing segmentation for an existing segmentation standard does not always yield better MT performance. We find that other factors such as segmentation consistency and granularity of Chinese \"words\" can be more important for machine translation. Based on these findings, we implement methods inside a conditional random field segmenter that directly optimize segmentation granularity with respect to the MT task, providing an improvement of 0.73 BLEU. We also show that improving segmentation consistency using external lexicon and proper noun features yields a 0.32 BLEU increase.", "keyphrases": ["chinese", "word segmentation", "translation performance", "speech recognition", "tokenization"]} +{"id": "xie-etal-2020-worldtree", "title": "WorldTree V2: A Corpus of Science-Domain Structured Explanations and Inference Patterns supporting Multi-Hop Inference", "abstract": "Explainable question answering for complex questions often requires combining large numbers of facts to answer a question while providing a human-readable explanation for the answer, a process known as multi-hop inference. Standardized science questions require combining an average of 6 facts, and as many as 16 facts, in order to answer and explain, but most existing datasets for multi-hop reasoning focus on combining only two facts, significantly limiting the ability of multi-hop inference algorithms to learn to generate large inferences. In this work we present the second iteration of the WorldTree project, a corpus of 5,114 standardized science exam questions paired with large detailed multi-fact explanations that combine core scientific knowledge and world knowledge. Each explanation is represented as a lexically-connected \u201cexplanation graph\u201d that combines an average of 6 facts drawn from a semi-structured knowledge base of 9,216 facts across 66 tables. We use this explanation corpus to author a set of 344 high-level science domain inference patterns similar to semantic frames supporting multi-hop inference. Together, these resources provide training data and instrumentation for developing many-fact multi-hop inference models for question answering.", "keyphrases": ["explanation", "multi-hop inference", "worldtree corpus"]} +{"id": "venugopal-etal-2009-preference", "title": "Preference Grammars: Softening Syntactic Constraints to Improve Statistical Machine Translation", "abstract": "We propose a novel probabilistic synchoronous context-free grammar formalism for statistical machine translation, in which syntactic nonterminal labels are represented as \"soft\" preferences rather than as \"hard\" matching constraints. This formalism allows us to efficiently score unlabeled synchronous derivations without forgoing traditional syntactic constraints. Using this score as a feature in a log-linear model, we are able to approximate the selection of the most likely unlabeled derivation. This helps reduce fragmentation of probability across differently labeled derivations of the same translation. It also allows the importance of syntactic preferences to be learned alongside other features (e.g., the language model) and for particular labeling procedures. We show improvements in translation quality on small and medium sized Chinese-to-English translation tasks.", "keyphrases": ["syntactic constraint", "derivation", "log-linear model", "preference grammar"]} +{"id": "acosta-etal-2011-identification", "title": "Identification and Treatment of Multiword Expressions Applied to Information Retrieval", "abstract": "The extensive use of Multiword Expressions (MWE) in natural language texts prompts more detailed studies that aim for a more adequate treatment of these expressions. A MWE typically expresses concepts and ideas that usually cannot be expressed by a single word. Intuitively, with the appropriate treatment of MWEs, the results of an Information Retrieval (IR) system could be improved. The aim of this paper is to apply techniques for the automatic extraction of MWEs from corpora to index them as a single unit. Experimental results show improvements on the retrieval of relevant documents when identifying MWEs and treating them as a single indexing unit.", "keyphrases": ["treatment", "multiword expressions", "information retrieval", "compositionality"]} +{"id": "chakravarthi-muralidaran-2021-findings", "title": "Findings of the Shared Task on Hope Speech Detection for Equality, Diversity, and Inclusion", "abstract": "Hope is considered significant for the well-being, recuperation and restoration of human life by health professionals. Hope speech reflects the belief that one can discover pathways to their desired objectives and become roused to utilise those pathways. To encourage research in natural language processing towards positive reinforcement approach, we created a hope speech detection dataset. This paper reports on the shared task of hope speech detection for Tamil, English, and Malayalam languages. The shared task was conducted as a part of the EACL 2021 workshop on Language Technology for Equality, Diversity, and Inclusion (LT-EDI-2021). We summarize here the datasets for this challenge which are openly available at , and present an overview of the methods and the results of the competing systems. To the best of our knowledge, this is the first shared task to conduct hope speech detection.", "keyphrases": ["hope speech detection", "inclusion", "dravidian language", "emotion", "religion"]} +{"id": "chen-etal-2017-cost", "title": "Cost Weighting for Neural Machine Translation Domain Adaptation", "abstract": "In this paper, we propose a new domain adaptation technique for neural machine translation called cost weighting, which is appropriate for adaptation scenarios in which a small in-domain data set and a large general-domain data set are available. Cost weighting incorporates a domain classifier into the neural machine translation training algorithm, using features derived from the encoder representation in order to distinguish in-domain from out-of-domain data. Classifier probabilities are used to weight sentences according to their domain similarity when updating the parameters of the neural translation model. We compare cost weighting to two traditional domain adaptation techniques developed for statistical machine translation: data selection and sub-corpus weighting. Experiments on two large-data tasks show that both the traditional techniques and our novel proposal lead to significant gains, with cost weighting outperforming the traditional methods.", "keyphrases": ["domain classifier", "cost weighting", "nmt system"]} +{"id": "lin-bilmes-2011-class", "title": "A Class of Submodular Functions for Document Summarization", "abstract": "We design a class of submodular functions meant for document summarization tasks. These functions each combine two terms, one which encourages the summary to be representative of the corpus, and the other which positively rewards diversity. Critically, our functions are monotone nondecreasing and submodular, which means that an efficient scalable greedy optimization scheme has a constant factor guarantee of optimality. When evaluated on DUC 2004-2007 corpora, we obtain better than existing state-of-art results in both generic and query-focused document summarization. Lastly, we show that several well-established methods for document summarization correspond, in fact, to submodular function optimization, adding further evidence that submodular functions are a natural fit for document summarization.", "keyphrases": ["submodularity", "function", "document summarization"]} +{"id": "straka-etal-2016-udpipe", "title": "UDPipe: Trainable Pipeline for Processing CoNLL-U Files Performing Tokenization, Morphological Analysis, POS Tagging and Parsing", "abstract": "Automatic natural language processing of large texts often presents recurring challenges in multiple languages: even for most advanced tasks, the texts are first processed by basic processing steps \u2013 from tokenization to parsing. We present an extremely simple-to-use tool consisting of one binary and one model (per language), which performs these tasks for multiple languages without the need for any other external data. UDPipe, a pipeline processing CoNLL-U-formatted files, performs tokenization, morphological analysis, part-of-speech tagging, lemmatization and dependency parsing for nearly all treebanks of Universal Dependencies 1.2 (namely, the whole pipeline is currently available for 32 out of 37 treebanks). In addition, the pipeline is easily trainable with training data in CoNLL-U format (and in some cases also with additional raw corpora) and requires minimal linguistic knowledge on the users' part. The training code is also released.", "keyphrases": ["tokenization", "morphological analysis", "pos tagging", "sentence segmentation", "baseline udpipe system"]} +{"id": "airola-etal-2008-graph", "title": "A Graph Kernel for Protein-Protein Interaction Extraction", "abstract": "In this paper, we propose a graph kernel based approach for the automated extraction of protein-protein interactions (PPI) from scientific literature. In contrast to earlier approaches to PPI extraction, the introduced all-dependency-paths kernel has the capability to consider full, general dependency graphs. We evaluate the proposed method across five publicly available PPI corpora providing the most comprehensive evaluation done for a machine learning based PPI-extraction system. Our method is shown to achieve state-of-the-art performance with respect to comparable evaluations, achieving 56.4 F-score and 84.8 AUC on the AImed corpus. Further, we identify several pitfalls that can make evaluations of PPI-extraction systems incomparable, or even invalid. These include incorrect cross-validation strategies and problems related to comparing F-score results achieved on different evaluation resources.", "keyphrases": ["graph kernel", "protein-protein interaction extraction", "relation extraction"]} +{"id": "jansen-ustalov-2019-textgraphs", "title": "TextGraphs 2019 Shared Task on Multi-Hop Inference for Explanation Regeneration", "abstract": "While automated question answering systems are increasingly able to retrieve answers to natural language questions, their ability to generate detailed human-readable explanations for their answers is still quite limited. The Shared Task on Multi-Hop Inference for Explanation Regeneration tasks participants with regenerating detailed gold explanations for standardized elementary science exam questions by selecting facts from a knowledge base of semi-structured tables. Each explanation contains between 1 and 16 interconnected facts that form an \u201cexplanation graph\u201d spanning core scientific knowledge and detailed world knowledge. It is expected that successfully combining these facts to generate detailed explanations will require advancing methods in multi-hop inference and information combination, and will make use of the supervised training data provided by the WorldTree explanation corpus. The top-performing system achieved a mean average precision (MAP) of 0.56, substantially advancing the state-of-the-art over a baseline information retrieval model. Detailed extended analyses of all submitted systems showed large relative improvements in accessing the most challenging multi-hop inference problems, while absolute performance remains low, highlighting the difficulty of generating detailed explanations through multi-hop reasoning.", "keyphrases": ["shared task", "multi-hop inference", "explanation regeneration", "knowledge base", "worldtree corpus"]} +{"id": "zhang-etal-2019-aspect", "title": "Aspect-based Sentiment Classification with Aspect-specific Graph Convolutional Networks", "abstract": "Due to their inherent capability in semantic alignment of aspects and their context words, attention mechanism and Convolutional Neural Networks (CNNs) are widely applied for aspect-based sentiment classification. However, these models lack a mechanism to account for relevant syntactical constraints and long-range word dependencies, and hence may mistakenly recognize syntactically irrelevant contextual words as clues for judging aspect sentiment. To tackle this problem, we propose to build a Graph Convolutional Network (GCN) over the dependency tree of a sentence to exploit syntactical information and word dependencies. Based on it, a novel aspect-specific sentiment classification framework is raised. Experiments on three benchmarking collections illustrate that our proposed model has comparable effectiveness to a range of state-of-the-art models, and further demonstrate that both syntactical information and long-range word dependencies are properly captured by the graph convolution structure.", "keyphrases": ["sentiment classification", "convolutional network", "gcn", "tree-structured syntactic information"]} +{"id": "kobus-etal-2017-domain", "title": "Domain Control for Neural Machine Translation", "abstract": "Machine translation systems are very sensitive to the domains they were trained on. Several domain adaptation techniques have already been deeply studied. We propose a new technique for neural machine translation (NMT) that we call domain control which is performed at runtime using a unique neural network covering multiple domains. The presented approach shows quality improvements when compared to dedicated domains translating on any of the covered domains and even on out-of-domain data. In addition, model parameters do not need to be re-estimated for each domain, making this effective to real use cases. Evaluation is carried out on English-to-French translation for two different testing scenarios. We first consider the case where an end-user performs translations on a known domain. Secondly, we consider the scenario where the domain is not known and predicted at the sentence level before translating. Results show consistent accuracy improvements for both conditions.", "keyphrases": ["neural machine translation", "domain control", "special token", "source sentence"]} +{"id": "popescu-2011-studying", "title": "Studying Translationese at the Character Level", "abstract": "This paper presents a set of preliminary experiments which show that identifying translationese is possible with machine learning methods that work at character level, more precisely methods that use string kernels. But caution is necessary because string kernels very easily can introduce confounding factors.", "keyphrases": ["character level", "text analysis task", "recent result"]} +{"id": "velikovich-etal-2010-viability", "title": "The viability of web-derived polarity lexicons", "abstract": "We examine the viability of building large polarity lexicons semi-automatically from the web. We begin by describing a graph propagation framework inspired by previous work on constructing polarity lexicons from lexical graphs (Kim and Hovy, 2004; Hu and Liu, 2004; Esuli and Sabastiani, 2009; Blair-Goldensohn et al., 2008; Rao and Ravichandran, 2009). We then apply this technique to build an English lexicon that is significantly larger than those previously studied. Crucially, this web-derived lexicon does not require WordNet, part-of-speech taggers, or other language-dependent resources typical of sentiment analysis systems. As a result, the lexicon is not limited to specific word classes -- e.g., adjectives that occur in WordNet -- and in fact contains slang, misspellings, multiword expressions, etc. We evaluate a lexicon derived from English documents, both qualitatively and quantitatively, and show that it provides superior performance to previously studied lexicons, including one derived from WordNet.", "keyphrases": ["polarity lexicon", "web", "word co-occurrence"]} +{"id": "bean-riloff-2004-unsupervised", "title": "Unsupervised Learning of Contextual Role Knowledge for Coreference Resolution", "abstract": "We present a coreference resolver called BABAR that uses contextual role knowledge to evaluate possible antecedents for an anaphor. BABAR uses information extraction patterns to identify contextual roles and creates four contextual role knowledge sources using unsupervised learning. These knowledge sources determine whether the contexts surrounding an anaphor and antecedent are compatible. BABAR applies a Dempster-Shafer probabilistic model to make resolutions based on evidence from the contextual role knowledge sources as well as general knowledge sources. Experiments in two domains showed that the contextual role knowledge improved coreference performance, especially on pronouns.", "keyphrases": ["contextual role knowledge", "coreference resolution", "information extraction pattern", "unsupervised learning"]} +{"id": "qi-etal-2018-cross", "title": "Cross-lingual Lexical Sememe Prediction", "abstract": "Sememes are defined as the minimum semantic units of human languages. As important knowledge sources, sememe-based linguistic knowledge bases have been widely used in many NLP tasks. However, most languages still do not have sememe-based linguistic knowledge bases. Thus we present a task of cross-lingual lexical sememe prediction, aiming to automatically predict sememes for words in other languages. We propose a novel framework to model correlations between sememes and multi-lingual words in low-dimensional semantic space for sememe prediction. Experimental results on real-world datasets show that our proposed model achieves consistent and significant improvements as compared to baseline methods in cross-lingual sememe prediction. The codes and data of this paper are available at .", "keyphrases": ["lexical sememe prediction", "new language", "hownet"]} +{"id": "liu-etal-2015-dependency", "title": "A Dependency-Based Neural Network for Relation Classification", "abstract": "Previous research on relation classification has verified the effectiveness of using dependency shortest paths or subtrees. In this paper, we further explore how to make full use of the combination of these dependency information. We first propose a new structure, termed augmented dependency path (ADP), which is composed of the shortest dependency path between two entities and the subtrees attached to the shortest path. To exploit the semantic representation behind the ADP structure, we develop dependency-based neural networks (DepNN): a recursive neural network designed to model the subtrees, and a convolutional neural network to capture the most important features on the shortest path. Experiments on the SemEval-2010 dataset show that our proposed method achieves state-of-art results.", "keyphrases": ["dependency-based neural network", "relation classification", "augmented dependency path", "convolutional neural network"]} +{"id": "xu-etal-2009-using", "title": "Using a Dependency Parser to Improve SMT for Subject-Object-Verb Languages", "abstract": "We introduce a novel precedence reordering approach based on a dependency parser to statistical machine translation systems. Similar to other preprocessing reordering approaches, our method can efficiently incorporate linguistic knowledge into SMT systems without increasing the complexity of decoding. For a set of five subject-object-verb (SOV) order languages, we show significant improvements in BLEU scores when translating from English, compared to other reordering approaches, in state-of-the-art phrase-based SMT systems.", "keyphrases": ["dependency parser", "smt system", "sov language"]} +{"id": "cortis-etal-2017-semeval", "title": "SemEval-2017 Task 5: Fine-Grained Sentiment Analysis on Financial Microblogs and News", "abstract": "This paper discusses the \u201cFine-Grained Sentiment Analysis on Financial Microblogs and News\u201d task as part of SemEval-2017, specifically under the \u201cDetecting sentiment, humour, and truth\u201d theme. This task contains two tracks, where the first one concerns Microblog messages and the second one covers News Statements and Headlines. The main goal behind both tracks was to predict the sentiment score for each of the mentioned companies/stocks. The sentiment scores for each text instance adopted floating point values in the range of -1 (very negative/bearish) to 1 (very positive/bullish), with 0 designating neutral sentiment. This task attracted a total of 32 participants, with 25 participating in Track 1 and 29 in Track 2.", "keyphrases": ["fine-grained sentiment analysis", "financial microblogs", "news"]} +{"id": "heilman-etal-2007-combining", "title": "Combining Lexical and Grammatical Features to Improve Readability Measures for First and Second Language Texts", "abstract": "This work evaluates a system that uses interpolated predictions of reading difficulty that are based on both vocabulary and grammatical features. The combined approach is compared to individual grammar- and language modeling-based approaches. While the vocabulary-based language modeling approach outperformed the grammar-based approach, grammar-based predictions can be combined using confidence scores with the vocabulary-based predictions to produce more accurate predictions of reading difficulty for both first and second language texts. The results also indicate that grammatical features may play a more important role in second language readability than in first language readability.", "keyphrases": ["grammatical feature", "readability", "second language learner"]} +{"id": "haber-etal-2019-photobook", "title": "The PhotoBook Dataset: Building Common Ground through Visually-Grounded Dialogue", "abstract": "This paper introduces the PhotoBook dataset, a large-scale collection of visually-grounded, task-oriented dialogues in English designed to investigate shared dialogue history accumulating during conversation. Taking inspiration from seminal work on dialogue analysis, we propose a data-collection task formulated as a collaborative game prompting two online participants to refer to images utilising both their visual context as well as previously established referring expressions. We provide a detailed description of the task setup and a thorough analysis of the 2,500 dialogues collected. To further illustrate the novel features of the dataset, we propose a baseline model for reference resolution which uses a simple method to take into account shared information accumulated in a reference chain. Our results show that this information is particularly important to resolve later descriptions and underline the need to develop more sophisticated models of common ground in dialogue interaction.", "keyphrases": ["photobook dataset", "common ground", "visually-grounded dialogue", "dialogue history", "image"]} +{"id": "miller-etal-2016-key", "title": "Key-Value Memory Networks for Directly Reading Documents", "abstract": "Directly reading documents and being able to answer questions from them is an unsolved challenge. To avoid its inherent difficulty, question answering (QA) has been directed towards using Knowledge Bases (KBs) instead, which has proven effective. Unfortunately KBs often suffer from being too restrictive, as the schema cannot support certain types of answers, and too sparse, e.g. Wikipedia contains much more information than Freebase. In this work we introduce a new method, Key-Value Memory Networks, that makes reading documents more viable by utilizing different encodings in the addressing and output stages of the memory read operation. To compare using KBs, information extraction or Wikipedia documents directly in a single framework we construct an analysis tool, WikiMovies, a QA dataset that contains raw text alongside a preprocessed KB, in the domain of movies. Our method reduces the gap between all three settings. It also achieves state-of-the-art results on the existing WikiQA benchmark.", "keyphrases": ["memory network", "key-value memory networks", "key"]} +{"id": "collins-roark-2004-incremental", "title": "Incremental Parsing with the Perceptron Algorithm", "abstract": "This paper describes an incremental parsing approach where parameters are estimated using a variant of the perceptron algorithm. A beam-search algorithm is used during both training and decoding phases of the method. The perceptron approach was implemented with the same feature set as that of an existing generative model (Roark, 2001a), and experimental results show that it gives competitive performance to the generative model on parsing the Penn treebank. We demonstrate that training a perceptron model to combine with the generative model during search provides a 2.1 percent F-measure improvement over the generative model alone, to 88.8 percent.", "keyphrases": ["perceptron algorithm", "generative model", "update", "dependency parsing", "competitive accuracy"]} +{"id": "pandia-etal-2021-pragmatic", "title": "Pragmatic competence of pre-trained language models through the lens of discourse connectives", "abstract": "As pre-trained language models (LMs) continue to dominate NLP, it is increasingly important that we understand the depth of language capabilities in these models. In this paper, we target pre-trained LMs' competence in pragmatics, with a focus on pragmatics relating to discourse connectives. We formulate cloze-style tests using a combination of naturally-occurring data and controlled inputs drawn from psycholinguistics. We focus on testing models' ability to use pragmatic cues to predict discourse connectives, models' ability to understand implicatures relating to connectives, and the extent to which models show humanlike preferences regarding temporal dynamics of connectives. We find that although models predict connectives reasonably well in the context of naturally-occurring data, when we control contexts to isolate high-level pragmatic cues, model sensitivity is much lower. Models also do not show substantial humanlike temporal preferences. Overall, the findings suggest that at present, dominant pre-training paradigms do not result in substantial pragmatic competence in our models.", "keyphrases": ["pre-trained language model", "discourse connective", "pragmatic competence"]} +{"id": "balahur-etal-2010-sentiment", "title": "Sentiment Analysis in the News", "abstract": "Recent years have brought a significant growth in the volume of research in sentiment analysis, mostly on highly subjective text types (movie or product reviews). The main difference these texts have with news articles is that their target is clearly defined and unique across the text. Following different annotation efforts and the analysis of the issues encountered, we realised that news opinion mining is different from that of other text types. We identified three subtasks that need to be addressed: definition of the target; separation of the good and bad news content from the good and bad sentiment expressed on the target; and analysis of clearly marked opinion that is expressed explicitly, not needing interpretation or the use of world knowledge. Furthermore, we distinguish three different possible views on newspaper articles \u2015 author, reader and text, which have to be addressed differently at the time of analysing sentiment. Given these definitions, we present work on mining opinions about entities in English language news, in which we apply these concepts. Results showed that this idea is more appropriate in the context of news opinion mining and that the approaches taking this into consideration produce a better performance.", "keyphrases": ["product review", "news article", "sentiment analysis", "idiom"]} +{"id": "li-etal-2017-adversarial", "title": "Adversarial Learning for Neural Dialogue Generation", "abstract": "We apply adversarial training to open-domain dialogue generation, training a system to produce sequences that are indistinguishable from human-generated dialogue utterances. We cast the task as a reinforcement learning problem where we jointly train two systems: a generative model to produce response sequences, and a discriminator\u2014analagous to the human evaluator in the Turing test\u2014 to distinguish between the human-generated dialogues and the machine-generated ones. In this generative adversarial network approach, the outputs from the discriminator are used to encourage the system towards more human-like dialogue. Further, we investigate models for adversarial evaluation that uses success in fooling an adversary as a dialogue evaluation metric, while avoiding a number of potential pitfalls. Experimental results on several metrics, including adversarial evaluation, demonstrate that the adversarially-trained system generates higher-quality responses than previous baselines", "keyphrases": ["neural dialogue generation", "adversarial learning", "text generation", "dialogue model"]} +{"id": "shaar-etal-2020-known", "title": "That is a Known Lie: Detecting Previously Fact-Checked Claims", "abstract": "The recent proliferation of \u201dfake news\u201d has triggered a number of responses, most notably the emergence of several manual fact-checking initiatives. As a result and over time, a large number of fact-checked claims have been accumulated, which increases the likelihood that a new claim in social media or a new statement by a politician might have already been fact-checked by some trusted fact-checking organization, as viral claims often come back after a while in social media, and politicians like to repeat their favorite statements, true or false, over and over again. As manual fact-checking is very time-consuming (and fully automatic fact-checking has credibility issues), it is important to try to save this effort and to avoid wasting time on claims that have already been fact-checked. Interestingly, despite the importance of the task, it has been largely ignored by the research community so far. Here, we aim to bridge this gap. In particular, we formulate the task and we discuss how it relates to, but also differs from, previous work. We further create a specialized dataset, which we release to the research community. Finally, we present learning-to-rank experiments that demonstrate sizable improvements over state-of-the-art retrieval and textual similarity approaches.", "keyphrases": ["fact-checking", "claim", "misinformation"]} +{"id": "nothman-etal-2009-analysing", "title": "Analysing Wikipedia and Gold-Standard Corpora for NER Training", "abstract": "Named entity recognition (ner) for English typically involves one of three gold standards: muc, conll, or bbn, all created by costly manual annotation. Recent work has used Wikipedia to automatically create a massive corpus of named entity annotated text. \n \nWe present the first comprehensive cross-corpus evaluation of ner. We identify the causes of poor cross-corpus performance and demonstrate ways of making them more compatible. Using our process, we develop a Wikipedia corpus which outperforms gold standard corpora on cross-corpus evaluation by up to 11%.", "keyphrases": ["wikipedia", "gold-standard corpora", "ner training"]} +{"id": "khalifa-etal-2016-large", "title": "A Large Scale Corpus of Gulf Arabic", "abstract": "Most Arabic natural language processing tools and resources are developed to serve Modern Standard Arabic (MSA), which is the official written language in the Arab World. Some Dialectal Arabic varieties, notably Egyptian Arabic, have received some attention lately and have a growing collection of resources that include annotated corpora and morphological analyzers and taggers. Gulf Arabic, however, lags behind in that respect. In this paper, we present the Gumar Corpus, a large-scale corpus of Gulf Arabic consisting of 110 million words from 1,200 forum novels. We annotate the corpus for sub-dialect information at the document level. We also present results of a preliminary study in the morphological annotation of Gulf Arabic which includes developing guidelines for a conventional orthography. The text of the corpus is publicly browsable through a web interface we developed for it.", "keyphrases": ["gulf arabic", "large-scale corpus", "gulf dialect"]} +{"id": "speriosu-etal-2011-twitter", "title": "Twitter Polarity Classification with Label Propagation over Lexical Links and the Follower Graph", "abstract": "There is high demand for automated tools that assign polarity to microblog content such as tweets (Twitter posts), but this is challenging due to the terseness and informality of tweets in addition to the wide variety and rapid evolution of language in Twitter. It is thus impractical to use standard supervised machine learning techniques dependent on annotated training examples. We do without such annotations by using label propagation to incorporate labels from a maximum entropy classifier trained on noisy labels and knowledge about word types encoded in a lexicon, in combination with the Twitter follower graph. Results on polarity classification for several datasets show that our label propagation approach rivals a model supervised with in-domain annotated tweets, and it outperforms the noisily supervised classifier it exploits as well as a lexicon-based polarity ratio classifier.", "keyphrases": ["polarity classification", "label propagation", "follower graph", "twitter", "n-gram"]} +{"id": "ji-grishman-2008-refining", "title": "Refining Event Extraction through Cross-Document Inference", "abstract": "We apply the hypothesis of \u201cOne Sense Per Discourse\u201d (Yarowsky, 1995) to information extraction (IE), and extend the scope of \u201cdiscourse\u201d from one single document to a cluster of topically-related documents. We employ a similar approach to propagate consistent event arguments across sentences and documents. Combining global evidence from related documents with local decisions, we design a simple scheme to conduct cross-document inference for improving the ACE event extraction task 1 . Without using any additional labeled data this new approach obtained 7.6% higher F-Measure in trigger labeling and 6% higher F-Measure in argument labeling over a state-of-the-art IE system which extracts events independently for each sentence.", "keyphrases": ["event extraction", "cross-document inference", "trigger", "topic-related document", "rule-based approach"]} +{"id": "chen-ji-2009-one", "title": "Can One Language Bootstrap the Other: A Case Study on Event Extraction", "abstract": "This paper proposes a new bootstrapping framework using cross-lingual information projection. We demonstrate that this framework is particularly effective for a challenging NLP task which is situated at the end of a pipeline and thus suffers from the errors propagated from upstream processing and has low-performance baseline. Using Chinese event extraction as a case study and bitexts as a new source of information, we present three bootstrapping techniques. We first conclude that the standard mono-lingual bootstrapping approach is not so effective. Then we exploit a second approach that potentially benefits from the extra information captured by an English event extraction system and projected into Chinese. Such a cross-lingual scheme produces significant performance gain. Finally we show that the combination of mono-lingual and cross-lingual information in bootstrapping can further enhance the performance. Ultimately this new framework obtained 10.1% relative improvement in trigger labeling (F-measure) and 9.5% relative improvement in argument-labeling.", "keyphrases": ["bootstrapping", "case study", "event extraction", "extra information", "cross-lingual bootstrapping"]} +{"id": "budzianowski-etal-2018-multiwoz", "title": "MultiWOZ - A Large-Scale Multi-Domain Wizard-of-Oz Dataset for Task-Oriented Dialogue Modelling", "abstract": "Even though machine learning has become the major scene in dialogue research community, the real breakthrough has been blocked by the scale of data available. To address this fundamental obstacle, we introduce the Multi-Domain Wizard-of-Oz dataset (MultiWOZ), a fully-labeled collection of human-human written conversations spanning over multiple domains and topics. At a size of 10k dialogues, it is at least one order of magnitude larger than all previous annotated task-oriented corpora. The contribution of this work apart from the open-sourced dataset is two-fold:firstly, a detailed description of the data collection procedure along with a summary of data structure and analysis is provided. The proposed data-collection pipeline is entirely based on crowd-sourcing without the need of hiring professional annotators;secondly, a set of benchmark results of belief tracking, dialogue act and response generation is reported, which shows the usability of the data and sets a baseline for future studies.", "keyphrases": ["wizard-of-oz", "task-oriented dialogue", "annotated task-oriented corpora", "multiwoz dataset", "dialogue dataset"]} +{"id": "moore-lewis-2010-intelligent", "title": "Intelligent Selection of Language Model Training Data", "abstract": "We address the problem of selecting non-domain-specific language model training data to build auxiliary language models for use in tasks such as machine translation. Our approach is based on comparing the cross-entropy, according to domain-specific and non-domain-specifc language models, for each sentence of the text source used to produce the latter language model. We show that this produces better language models, trained on less data, than both random data selection and two other previously proposed methods.", "keyphrases": ["language model", "intelligent selection", "cross-entropy difference", "in-domain data", "smt system"]} +{"id": "yih-etal-2012-polarity", "title": "Polarity Inducing Latent Semantic Analysis", "abstract": "Existing vector space models typically map synonyms and antonyms to similar word vectors, and thus fail to represent antonymy. We introduce a new vector space representation where antonyms lie on opposite sides of a sphere: in the word vector space, synonyms have cosine similarities close to one, while antonyms are close to minus one. \n \nWe derive this representation with the aid of a thesaurus and latent semantic analysis (LSA). Each entry in the thesaurus -- a word sense along with its synonyms and antonyms -- is treated as a \"document,\" and the resulting document collection is subjected to LSA. The key contribution of this work is to show how to assign signs to the entries in the co-occurrence matrix on which LSA operates, so as to induce a subspace with the desired property. \n \nWe evaluate this procedure with the Graduate Record Examination questions of (Mohammed et al., 2008) and find that the method improves on the results of that study. Further improvements result from refining the subspace representation with discriminative training, and augmenting the training data with general newspaper text. Altogether, we improve on the best previous results by 11 points absolute in F measure.", "keyphrases": ["latent semantic analysis", "thesaurus", "polarity", "pilsa", "negative similarity"]} +{"id": "jiang-etal-2013-discriminative", "title": "Discriminative Learning with Natural Annotations: Word Segmentation as a Case Study", "abstract": "Structural information in web text provides natural annotations for NLP problems such as word segmentation and parsing. In this paper we propose a discriminative learning algorithm to take advantage of the linguistic knowledge in large amounts of natural annotations on the Internet. It utilizes the Internet as an external corpus with massive (although slight and sparse) natural annotations, and enables a classifier to evolve on the large-scaled and real-time updated web text. With Chinese word segmentation as a case study, experiments show that the segmenter enhanced with the Chinese wikipedia achieves significant improvement on a series of testing sets from different domains, even with a single classifier and local features.", "keyphrases": ["natural annotation", "case study", "web text", "segmentation accuracy"]} +{"id": "goldwater-mcclosky-2005-improving", "title": "Improving Statistical MT through Morphological Analysis", "abstract": "In statistical machine translation, estimating word-to-word alignment probabilities for the translation model can be difficult due to the problem of sparse data: most words in a given corpus occur at most a handful of times. With a highly inflected language such as Czech, this problem can be particularly severe. In addition, much of the morphological variation seen in Czech words is not reflected in either the morphology or syntax of a language like English. In this work, we show that using morphological analysis to modify the Czech input can improve a Czech-English machine translation system. We investigate several different methods of incorporating morphological information, and show that a system that combines these methods yields the best results. Our final system achieves a BLEU score of .333, as compared to .270 for the baseline word-to-word system.", "keyphrases": ["morphological analysis", "machine translation", "czech", "analyzer", "arabic"]} +{"id": "upadhyay-etal-2018-robust", "title": "Robust Cross-Lingual Hypernymy Detection Using Dependency Context", "abstract": "Cross-lingual Hypernymy Detection involves determining if a word in one language (\u201cfruit\u201d) is a hypernym of a word in another language (\u201cpomme\u201d i.e. apple in French). The ability to detect hypernymy cross-lingually can aid in solving cross-lingual versions of tasks such as textual entailment and event coreference. We propose BiSparse-Dep, a family of unsupervised approaches for cross-lingual hypernymy detection, which learns sparse, bilingual word embeddings based on dependency contexts. We show that BiSparse-Dep can significantly improve performance on this task, compared to approaches based only on lexical context. Our approach is also robust, showing promise for low-resource settings: our dependency-based embeddings can be learned using a parser trained on related languages, with negligible loss in performance. We also crowd-source a challenging dataset for this task on four languages \u2013 Russian, French, Arabic, and Chinese. Our embeddings and datasets are publicly available.", "keyphrases": ["cross-lingual hypernymy detection", "dependency context", "bilingual word embedding"]} +{"id": "bak-etal-2012-self", "title": "Self-Disclosure and Relationship Strength in Twitter Conversations", "abstract": "In social psychology, it is generally accepted that one discloses more of his/her personal information to someone in a strong relationship. We present a computational framework for automatically analyzing such self-disclosure behavior in Twitter conversations. Our framework uses text mining techniques to discover topics, emotions, sentiments, lexical patterns, as well as personally identifiable information (PII) and personally embarrassing information (PEI). Our preliminary results illustrate that in relationships with high relationship strength, Twitter users show significantly more frequent behaviors of self-disclosure.", "keyphrases": ["relationship strength", "twitter conversation", "self-disclosure"]} +{"id": "mostafazadeh-etal-2017-image", "title": "Image-Grounded Conversations: Multimodal Context for Natural Question and Response Generation", "abstract": "The popularity of image sharing on social media and the engagement it creates between users reflect the important role that visual context plays in everyday conversations. We present a novel task, Image Grounded Conversations (IGC), in which natural-sounding conversations are generated about a shared image. To benchmark progress, we introduce a new multiple reference dataset of crowd-sourced, event-centric conversations on images. IGC falls on the continuum between chit-chat and goal-directed conversation models, where visual grounding constrains the topic of conversation to event-driven utterances. Experiments with models trained on social media data show that the combination of visual and textual context enhances the quality of generated conversational turns. In human evaluation, the gap between human performance and that of both neural and retrieval architectures suggests that multi-modal IGC presents an interesting challenge for dialog research.", "keyphrases": ["conversation", "response generation", "agent", "visual dialog"]} +{"id": "novikova-etal-2017-need", "title": "Why We Need New Evaluation Metrics for NLG", "abstract": "The majority of NLG evaluation relies on automatic metrics, such as BLEU . In this paper, we motivate the need for novel, system- and data-independent automatic evaluation methods: We investigate a wide range of metrics, including state-of-the-art word-based and novel grammar-based ones, and demonstrate that they only weakly reflect human judgements of system outputs as generated by data-driven, end-to-end NLG. We also show that metric performance is data- and system-specific. Nevertheless, our results also suggest that automatic metrics perform reliably at system-level and can support system development by finding cases where a system performs poorly.", "keyphrases": ["nlg", "judgement", "human evaluation", "natural language generation", "open research problem"]} +{"id": "li-2011-parsing", "title": "Parsing the Internal Structure of Words: A New Paradigm for Chinese Word Segmentation", "abstract": "Lots of Chinese characters are very productive in that they can form many structured words either as prefixes or as suffixes. Previous research in Chinese word segmentation mainly focused on identifying only the word boundaries without considering the rich internal structures of many words. In this paper we argue that this is unsatisfying in many ways, both practically and theoretically. Instead, we propose that word structures should be recovered in morphological analysis. An elegant approach for doing this is given and the result is shown to be promising enough for encouraging further effort in this direction. Our probability model is trained with the Penn Chinese Treebank and actually is able to parse both word and phrase structures in a unified way.", "keyphrases": ["internal structure", "chinese word segmentation", "cws"]} +{"id": "sun-etal-2021-chinesebert", "title": "ChineseBERT: Chinese Pretraining Enhanced by Glyph and Pinyin Information", "abstract": "Recent pretraining models in Chinese neglect two important aspects specific to the Chinese language: glyph and pinyin, which carry significant syntax and semantic information for language understanding. In this work, we propose ChineseBERT, which incorporates both the glyph and pinyin information of Chinese characters into language model pretraining. The glyph embedding is obtained based on different fonts of a Chinese character, being able to capture character semantics from the visual features, and the pinyin embedding characterizes the pronunciation of Chinese characters, which handles the highly prevalent heteronym phenomenon in Chinese (the same character has different pronunciations with different meanings). Pretrained on large-scale unlabeled Chinese corpus, the proposed ChineseBERT model yields significant performance boost over baseline models with fewer training steps. The proposed model achieves new SOTA performances on a wide range of Chinese NLP tasks, including machine reading comprehension, natural language inference, text classification, sentence pair matching, and competitive performances in named entity recognition and word segmentation.", "keyphrases": ["glyph", "pinyin information", "chinese character"]} +{"id": "wang-etal-2014-knowledge", "title": "Knowledge Graph and Text Jointly Embedding", "abstract": "We examine the embedding approach to reason new relational facts from a largescale knowledge graph and a text corpus. We propose a novel method of jointly embedding entities and words into the same continuous vector space. The embedding process attempts to preserve the relations between entities in the knowledge graph and the concurrences of words in the text corpus. Entity names and Wikipedia anchors are utilized to align the embeddings of entities and words in the same space. Large scale experiments on Freebase and a Wikipedia/NY Times corpus show that jointly embedding brings promising improvement in the accuracy of predicting facts, compared to separately embedding knowledge graphs and text. Particularly, jointly embedding enables the prediction of facts containing entities out of the knowledge graph, which cannot be handled by previous embedding methods. At the same time, concerning the quality of the word embeddings, experiments on the analogical reasoning task show that jointly embedding is comparable to or slightly better than word2vec (Skip-Gram).", "keyphrases": ["vector space", "freebase", "knowledge graph", "entity embedding", "link prediction task"]} +{"id": "chiang-2010-learning", "title": "Learning to Translate with Source and Target Syntax", "abstract": "Statistical translation models that try to capture the recursive structure of language have been widely adopted over the last few years. These models make use of varying amounts of information from linguistic theory: some use none at all, some use information about the grammar of the target language, some use information about the grammar of the source language. But progress has been slower on translation models that are able to learn the relationship between the grammars of both the source and target language. We discuss the reasons why this has been a challenge, review existing attempts to meet this challenge, and show how some old and new ideas can be combined into a simple approach that uses both source and target syntax for significant improvements in translation accuracy.", "keyphrases": ["target syntax", "translation quality", "syntactic label"]} +{"id": "tsur-rappoport-2007-using", "title": "Using Classifier Features for Studying the Effect of Native Language on the Choice of Written Second Language Words", "abstract": "We apply machine learning techniques to study language transfer, a major topic in the theory of Second Language Acquisition (SLA). Using an SVM for the problem of native language classification, we show that a careful analysis of the effects of various features can lead to scientific insights. In particular, we demonstrate that character bigrams alone allow classification levels of about 66% for a 5-class task, even when content and function word differences are accounted for. This may show that native language has a strong effect on the word choice of people writing in a second language.", "keyphrases": ["native language", "character n-gram", "language writing"]} +{"id": "liu-etal-2020-coach", "title": "Coach: A Coarse-to-Fine Approach for Cross-domain Slot Filling", "abstract": "As an essential task in task-oriented dialog systems, slot filling requires extensive training data in a certain domain. However, such data are not always available. Hence, cross-domain slot filling has naturally arisen to cope with this data scarcity problem. In this paper, we propose a Coarse-to-fine approach (Coach) for cross-domain slot filling. Our model first learns the general pattern of slot entities by detecting whether the tokens are slot entities or not. It then predicts the specific types for the slot entities. In addition, we propose a template regularization approach to improve the adaptation robustness by regularizing the representation of utterances based on utterance templates. Experimental results show that our model significantly outperforms state-of-the-art approaches in slot filling. Furthermore, our model can also be applied to the cross-domain named entity recognition task, and it achieves better adaptation performance than other existing baselines. The code is available at .", "keyphrases": ["coarse-to-fine approach", "cross-domain slot", "coach"]} +{"id": "tetreault-etal-2013-report", "title": "A Report on the First Native Language Identification Shared Task", "abstract": "Native Language Identification (NLI) is the task of automatically identifying the native language (L1) of an individual based on their language production in a learned language. It is typically framed as a classification task where the set of L1s is known a priori. Two previous shared tasks on NLI have been organized where the aim was to identify the L1 of learners of English based on essays (2013) and spoken responses (2016) they provided during a standardized assessment of academic English proficiency. The 2017 shared task combines the inputs from the two prior tasks for the first time. There are three tracks: NLI on the essay only, NLI on the spoken response only (based on a transcription of the response and i-vector acoustic features), and NLI using both responses. We believe this makes for a more interesting shared task while building on the methods and results from the previous two shared tasks. In this paper, we report the results of the shared task. A total of 19 teams competed across the three different sub-tasks. The fusion track showed that combining the written and spoken responses provides a large boost in prediction accuracy. Multiple classifier systems (e.g. ensembles and meta-classifiers) were the most effective in all tasks, with most based on traditional classifiers (e.g. SVMs) with lexical/syntactic features.", "keyphrases": ["report", "native language identification", "learner", "nli shared task", "text classification"]} +{"id": "hoang-etal-2018-iterative", "title": "Iterative Back-Translation for Neural Machine Translation", "abstract": "We present iterative back-translation, a method for generating increasingly better synthetic parallel data from monolingual data to train neural machine translation systems. Our proposed method is very simple yet effective and highly applicable in practice. We demonstrate improvements in neural machine translation quality in both high and low resourced scenarios, including the best reported BLEU scores for the WMT 2017 German\u2194English tasks.", "keyphrases": ["neural machine translation", "monolingual data", "bleu score", "iterative back-translation", "iteration"]} +{"id": "zhang-etal-2021-ambert", "title": "AMBERT: A Pre-trained Language Model with Multi-Grained Tokenization", "abstract": "Pre-trained language models such as BERT have exhibited remarkable performances in many tasks in natural language understanding (NLU). The tokens in the models are usually fine-grained in the sense that for languages like English they are words or sub-words and for languages like Chinese they are characters. In English, for example, there are multi-word expressions which form natural lexical units and thus the use of coarse-grained tokenization also appears to be reasonable. In fact, both fine-grained and coarse-grained tokenizations have advantages and disadvantages for learning of pre-trained language models. In this paper, we propose a novel pre-trained language model, referred to as AMBERT (A Multi-grained BERT), on the basis of both fine-grained and coarse-grained tokenizations. For English, AMBERT takes both the sequence of words (fine-grained tokens) and the sequence of phrases (coarse-grained tokens) as input after tokenization, employs one encoder for processing the sequence of words and the other encoder for processing the sequence of the phrases, utilizes shared parameters between the two encoders, and finally creates a sequence of contextualized representations of the words and a sequence of contextualized representations of the phrases. Experiments have been conducted on benchmark datasets for Chinese and English, including CLUE, GLUE, SQuAD and RACE. The results show that AMBERT outperforms the existing best performing models in almost all cases, particularly the improvements are significant for Chinese.", "keyphrases": ["language model", "tokenization", "chinese", "ambert"]} +{"id": "le-roux-etal-2014-syntactic", "title": "Syntactic Parsing and Compound Recognition via Dual Decomposition: Application to French", "abstract": "In this paper we show how the task of syntactic parsing of non-segmented texts, including compound recognition, can be represented as constraints between phrase-structure parsers and CRF sequence labellers. In order to build a joint system we use dual decomposition, a way to combine several elementary systems which has proven successful in various NLP tasks. We evaluate this proposition on the French SPMRL corpus. This method compares favorably with pipeline architectures and improves state-of-the-art results.", "keyphrases": ["compound recognition", "dual decomposition", "syntactic parsing"]} +{"id": "dolan-etal-2004-unsupervised", "title": "Unsupervised Construction of Large Paraphrase Corpora: Exploiting Massively Parallel News Sources", "abstract": "We investigate unsupervised techniques for acquiring monolingual sentence-level paraphrases from a corpus of temporally and topically clustered news articles collected from thousands of web-based news sources. Two techniques are employed: (1) simple string edit distance, and (2) a heuristic strategy that pairs initial (presumably summary) sentences from different news stories in the same cluster. We evaluate both datasets using a word alignment algorithm and a metric borrowed from machine translation. Results show that edit distance data is cleaner and more easily-aligned than the heuristic data, with an overall alignment error rate (AER) of 11.58% on a similarly-extracted test set. On test data extracted by the heuristic strategy, however, performance of the two training sets is similar, with AERs of 13.2% and 14.7% respectively. Analysis of 100 pairs of sentences from each set reveals that the edit distance data lacks many of the complex lexical and syntactic alternations that characterize monolingual paraphrase. The summary sentences, while less readily alignable, retain more of the non-trivial alternations that are of greatest interest learning paraphrase relationships.", "keyphrases": ["paraphrase", "news article", "sentence pair", "mrpc corpus"]} +{"id": "denkowski-lavie-2011-meteor", "title": "Meteor 1.3: Automatic Metric for Reliable Optimization and Evaluation of Machine Translation Systems", "abstract": "This paper describes Meteor 1.3, our submission to the 2011 EMNLP Workshop on Statistical Machine Translation automatic evaluation metric tasks. New metric features include improved text normalization, higher-precision paraphrase matching, and discrimination between content and function words. We include Ranking and Adequacy versions of the metric shown to have high correlation with human judgments of translation quality as well as a more balanced Tuning version shown to outperform BLEU in minimum error rate training for a phrase-based Urdu-English system.", "keyphrases": ["human judgment", "meteor", "monolingual alignment"]} +{"id": "martelli-etal-2021-semeval", "title": "SemEval-2021 Task 2: Multilingual and Cross-lingual Word-in-Context Disambiguation (MCL-WiC)", "abstract": "In this paper, we introduce the first SemEval task on Multilingual and Cross-Lingual Word-in-Context disambiguation (MCL-WiC). This task allows the largely under-investigated inherent ability of systems to discriminate between word senses within and across languages to be evaluated, dropping the requirement of a fixed sense inventory. Framed as a binary classification, our task is divided into two parts. In the multilingual sub-task, participating systems are required to determine whether two target words, each occurring in a different context within the same language, express the same meaning or not. Instead, in the cross-lingual part, systems are asked to perform the task in a cross-lingual scenario, in which the two target words and their corresponding contexts are provided in two different languages. We illustrate our task, as well as the construction of our manually-created dataset including five languages, namely Arabic, Chinese, English, French and Russian, and the results of the participating systems. Datasets and results are available at: .", "keyphrases": ["word-in-context disambiguation", "semeval task", "different language"]} +{"id": "ji-smith-2017-neural", "title": "Neural Discourse Structure for Text Categorization", "abstract": "We show that discourse structure, as defined by Rhetorical Structure Theory and provided by an existing discourse parser, benefits text categorization. Our approach uses a recursive neural network and a newly proposed attention mechanism to compute a representation of the text that focuses on salient content, from the perspective of both RST and the task. Experiments consider variants of the approach and illustrate its strengths and weaknesses.", "keyphrases": ["discourse structure", "text categorization", "recursive neural network", "sentiment analysis", "downstream task"]} +{"id": "carpuat-wu-2005-word", "title": "Word Sense Disambiguation vs. Statistical Machine Translation", "abstract": "We directly investigate a subject of much recent debate: do word sense disambiguation models help statistical machine translation quality? We present empirical results casting doubt on this common, but unproved, assumption. Using a state-of-the-art Chinese word sense disambiguation model to choose translation candidates for a typical IBM statistical MT system, we find that word sense disambiguation does not yield significantly better translation quality than the statistical machine translation system alone. Error analysis suggests several key factors behind this surprising finding, including inherent limitations of current statistical MT architectures.", "keyphrases": ["translation quality", "word sense disambiguation", "wsd", "smt system", "parallel sentence"]} +{"id": "ma-etal-2018-stack", "title": "Stack-Pointer Networks for Dependency Parsing", "abstract": "We introduce a novel architecture for dependency parsing: stack-pointer networks (StackPtr). Combining pointer networks (Vinyals et al., 2015) with an internal stack, the proposed model first reads and encodes the whole sentence, then builds the dependency tree top-down (from root-to-leaf) in a depth-first fashion. The stack tracks the status of the depth-first search and the pointer networks select one child for the word at the top of the stack at each step. The StackPtr parser benefits from the information of whole sentence and all previously derived subtree structures, and removes the left-to-right restriction in classical transition-based parsers. Yet the number of steps for building any (non-projective) parse tree is linear in the length of the sentence just as other transition-based parsers, yielding an efficient decoding algorithm with O(n^2) time complexity. We evaluate our model on 29 treebanks spanning 20 languages and different dependency annotation schemas, and achieve state-of-the-art performances on 21 of them", "keyphrases": ["dependency parsing", "pointer network", "manner", "error propagation", "various neural architecture"]} +{"id": "zhu-etal-2008-active", "title": "Active Learning with Sampling by Uncertainty and Density for Word Sense Disambiguation and Text Classification", "abstract": "This paper addresses two issues of active learning. Firstly, to solve a problem of uncertainty sampling that it often fails by selecting outliers, this paper presents a new selective sampling technique, sampling by uncertainty and density (SUD), in which a k-Nearest-Neighbor-based density measure is adopted to determine whether an unlabeled example is an outlier. Secondly, a technique of sampling by clustering (SBC) is applied to build a representative initial training data set for active learning. Finally, we implement a new algorithm of active learning with SUD and SBC techniques. The experimental results from three real-world data sets show that our method outperforms competing methods, particularly at the early stages of active learning.", "keyphrases": ["density", "text classification", "active learning"]} +{"id": "ruder-etal-2018-discriminative", "title": "A Discriminative Latent-Variable Model for Bilingual Lexicon Induction", "abstract": "We introduce a novel discriminative latent-variable model for the task of bilingual lexicon induction. Our model combines the bipartite matching dictionary prior of Haghighi et al. (2008) with a state-of-the-art embedding-based approach. To train the model, we derive an efficient Viterbi EM algorithm. We provide empirical improvements on six language pairs under two metrics and show that the prior theoretically and empirically helps to mitigate the hubness problem. We also demonstrate how previous work may be viewed as a similarly fashioned latent-variable model, albeit with a different prior.", "keyphrases": ["latent-variable model", "bilingual lexicon induction", "hubness problem"]} +{"id": "schmidt-2019-generalization", "title": "Generalization in Generation: A closer look at Exposure Bias", "abstract": "Exposure bias refers to the train-test discrepancy that seemingly arises when an autoregressive generative model uses only ground-truth contexts at training time but generated ones at test time. We separate the contribution of the learning framework and the model to clarify the debate on consequences and review proposed counter-measures. In this light, we argue that generalization is the underlying property to address and propose unconditional generation as its fundamental benchmark. Finally, we combine latent variable modeling with a recent formulation of exploration in reinforcement learning to obtain a rigorous handling of true and generated contexts. Results on language modeling and variational sentence auto-encoding confirm the model's generalization capability.", "keyphrases": ["exposure bias", "train-test discrepancy", "generalization", "mle"]} +{"id": "croce-etal-2011-structured", "title": "Structured Lexical Similarity via Convolution Kernels on Dependency Trees", "abstract": "A central topic in natural language processing is the design of lexical and syntactic features suitable for the target application. In this paper, we study convolution dependency tree kernels for automatic engineering of syntactic and semantic patterns exploiting lexical similarities. We define efficient and powerful kernels for measuring the similarity between dependency structures, whose surface forms of the lexical nodes are in part or completely different. The experiments with such kernels for question classification show an unprecedented results, e.g. 41% of error reduction of the former state-of-the-art. Additionally, semantic role classification confirms the benefit of semantic smoothing for dependency kernels.", "keyphrases": ["lexical similarity", "convolution kernel", "node", "sptk"]} +{"id": "koehn-hoang-2007-factored", "title": "Factored Translation Models", "abstract": "We present an extension of phrase-based statistical machine translation models that enables the straight-forward integration of additional annotation at the word-level \u2014 may it be linguistic markup or automatically generated word classes. In a number of experiments we show that factored translation models lead to better translation performance, both in terms of automatic scores, as well as more grammatical coherence.", "keyphrases": ["integration", "factored translation models", "linguistic information", "phrase-based model", "feature function"]} +{"id": "peinelt-etal-2020-tbert", "title": "tBERT: Topic Models and BERT Joining Forces for Semantic Similarity Detection", "abstract": "Semantic similarity detection is a fundamental task in natural language understanding. Adding topic information has been useful for previous feature-engineered semantic similarity models as well as neural models for other tasks. There is currently no standard way of combining topics with pretrained contextual representations such as BERT. We propose a novel topic-informed BERT-based architecture for pairwise semantic similarity detection and show that our model improves performance over strong neural baselines across a variety of English language datasets. We find that the addition of topics to BERT helps particularly with resolving domain-specific cases.", "keyphrases": ["topic model", "bert", "semantic similarity detection"]} +{"id": "elsherief-etal-2021-latent", "title": "Latent Hatred: A Benchmark for Understanding Implicit Hate Speech", "abstract": "Hate speech has grown significantly on social media, causing serious consequences for victims of all demographics. Despite much attention being paid to characterize and detect discriminatory speech, most work has focused on explicit or overt hate speech, failing to address a more pervasive form based on coded or indirect language. To fill this gap, this work introduces a theoretically-justified taxonomy of implicit hate speech and a benchmark corpus with fine-grained labels for each message and its implication. We present systematic analyses of our dataset using contemporary baselines to detect and explain implicit hate speech, and we discuss key features that challenge existing models. This dataset will continue to serve as a useful benchmark for understanding this multifaceted issue.", "keyphrases": ["implicit hate speech", "taxonomy", "stereotype"]} +{"id": "snyder-barzilay-2008-unsupervised", "title": "Unsupervised Multilingual Learning for Morphological Segmentation", "abstract": "For centuries, the deep connection between languages has brought about major discoveries about human communication. In this paper we investigate how this powerful source of information can be exploited for unsupervised language learning. In particular, we study the task of morphological segmentation of multiple languages. We present a nonparametric Bayesian model that jointly induces morpheme segmentations of each language under consideration and at the same time identifies cross-lingual morpheme patterns, or abstract morphemes. We apply our model to three Semitic languages: Arabic, Hebrew, Aramaic, as well as to English. Our results demonstrate that learning morphological models in tandem reduces error by up to 24% relative to monolingual models. Furthermore, we provide evidence that our joint model achieves better performance when applied to languages from the same family.", "keyphrases": ["morphological segmentation", "abstract morpheme", "arabic", "hebrew", "unsupervised multilingual learning"]} +{"id": "d-zamora-reina-etal-2022-black", "title": "LSCDiscovery: A shared task on semantic change discovery and detection in Spanish", "abstract": "We present the first shared task on semantic change discovery and detection in Spanish. We create the first dataset of Spanish words manually annotated by semantic change using the DURel framewok (Schlechtweg et al., 2018). The task is divided in two phases: 1) graded change discovery, and 2) binary change detection. In addition to introducing a new language for this task, the main novelty with respect to the previous tasks consists in predicting and evaluating changes for all vocabulary words in the corpus. Six teams participated in phase 1 and seven teams in phase 2 of the shared task, and the best system obtained a Spearman rank correlation of 0.735 for phase 1 and an F1 score of 0.735 for phase 2. We describe the systems developed by the competing teams, highlighting the techniques that were particularly useful.", "keyphrases": ["semantic change discovery", "spanish", "change detection"]} +{"id": "kong-etal-2014-constituent", "title": "A Constituent-Based Approach to Argument Labeling with Joint Inference in Discourse Parsing", "abstract": "Discourse parsing is a challenging task and plays a critical role in discourse analysis. In this paper, we focus on labeling full argument spans of discourse connectives in the Penn Discourse Treebank (PDTB). Previous studies cast this task as a linear tagging or subtree extraction problem. In this paper, we propose a novel constituent-based approach to argument labeling, which integrates the advantages of both linear tagging and subtree extraction. In particular, the proposed approach unifies intra- and intersentence cases by treating the immediately preceding sentence as a special constituent. Besides, a joint inference mechanism is introduced to incorporate global information across arguments into our constituent-based approach via integer linear programming. Evaluation on PDTB shows significant performance improvements of our constituent-based approach over the best state-of-the-art system. It also shows the effectiveness of our joint inference mechanism in modeling global information across arguments.", "keyphrases": ["constituent-based approach", "discourse parsing", "linear tagging"]} +{"id": "he-etal-2019-towards", "title": "Towards Understanding Neural Machine Translation with Word Importance", "abstract": "Although neural machine translation (NMT) has advanced the state-of-the-art on various language pairs, the interpretability of NMT remains unsatisfactory. In this work, we propose to address this gap by focusing on understanding the input-output behavior of NMT models. Specifically, we measure the word importance by attributing the NMT output to every input word through a gradient-based method. We validate the approach on a couple of perturbation operations, language pairs, and model architectures, demonstrating its superiority on identifying input words with higher influence on translation performance. Encouragingly, the calculated importance can serve as indicators of input words that are under-translated by NMT models. Furthermore, our analysis reveals that words of certain syntactic categories have higher importance while the categories vary across language pairs, which can inspire better design principles of NMT architectures for multi-lingual translation.", "keyphrases": ["neural machine translation", "word importance", "certain syntactic category"]} +{"id": "yin-neubig-2017-syntactic", "title": "A Syntactic Neural Model for General-Purpose Code Generation", "abstract": "We consider the problem of parsing natural language descriptions into source code written in a general-purpose programming language like Python. Existing data-driven methods treat this problem as a language generation task without considering the underlying syntax of the target programming language. Informed by previous work in semantic parsing, in this paper we propose a novel neural architecture powered by a grammar model to explicitly capture the target syntax as prior knowledge. Experiments find this an effective way to scale up to generation of complex programs from natural language descriptions, achieving state-of-the-art results that well outperform previous code generation and semantic parsing approaches.", "keyphrases": ["code generation", "natural language description", "programming language", "grammar-aware", "top-down decoding"]} +{"id": "talmor-berant-2018-web", "title": "The Web as a Knowledge-Base for Answering Complex Questions", "abstract": "Answering complex questions is a time-consuming activity for humans that requires reasoning and integration of information. Recent work on reading comprehension made headway in answering simple questions, but tackling complex questions is still an ongoing research challenge. Conversely, semantic parsers have been successful at handling compositionality, but only when the information resides in a target knowledge-base. In this paper, we present a novel framework for answering broad and complex questions, assuming answering simple questions is possible using a search engine and a reading comprehension model. We propose to decompose complex questions into a sequence of simple questions, and compute the final answer from the sequence of answers. To illustrate the viability of our approach, we create a new dataset of complex questions, ComplexWebQuestions, and present a model that decomposes questions and interacts with the web to compute an answer. We empirically demonstrate that question decomposition improves performance from 20.8 precision@1 to 27.5 precision@1 on this new dataset.", "keyphrases": ["web", "complex question", "reasoning"]} +{"id": "koponen-etal-2012-post", "title": "Post-editing time as a measure of cognitive effort", "abstract": "Post-editing machine translations has been attracting increasing attention both as a common practice within the translation industry and as a way to evaluate Machine Translation (MT) quality via edit distance metrics between the MT and its post-edited version. Commonly used metrics such as HTER are limited in that they cannot fully capture the effort required for post-editing. Particularly, the cognitive effort required may vary for different types of errors and may also depend on the context. We suggest post-editing time as a way to assess some of the cognitive effort involved in post-editing. This paper presents two experiments investigating the connection between post-editing time and cognitive effort. First, we examine whether sentences with long and short post-editing times involve edits of different levels of difficulty. Second, we study the variability in post-editing time and other statistics among editors.", "keyphrases": ["cognitive effort", "variability", "post-editing time"]} +{"id": "zhang-etal-2019-bridging", "title": "Bridging the Gap between Training and Inference for Neural Machine Translation", "abstract": "Neural Machine Translation (NMT) generates target words sequentially in the way of predicting the next word conditioned on the context words. At training time, it predicts with the ground truth words as context while at inference it has to generate the entire sequence from scratch. This discrepancy of the fed context leads to error accumulation among the way. Furthermore, word-level training requires strict matching between the generated sequence and the ground truth sequence which leads to overcorrection over different but reasonable translations. In this paper, we address these issues by sampling context words not only from the ground truth sequence but also from the predicted sequence by the model during training, where the predicted sequence is selected with a sentence-level optimum. Experiment results on Chinese-English and WMT'14 English-German translation tasks demonstrate that our approach can achieve significant improvements on multiple datasets.", "keyphrases": ["neural machine translation", "ground truth word", "training step"]} +{"id": "tang-etal-2014-building", "title": "Building Large-Scale Twitter-Specific Sentiment Lexicon : A Representation Learning Approach", "abstract": "In this paper, we propose to build large-scale sentiment lexicon from Twitter with a representation learning approach. We cast sentiment lexicon learning as a phrase-level sentiment classification task. The challenges are developing effective feature representation of phrases and obtaining training data with minor manual annotations for building the sentiment classifier. Specifically, we develop a dedicated neural architecture and integrate the sentiment information of text (e.g. sentences or tweets) into its hybrid loss function for learning sentiment-specific phrase embedding (SSPE). The neural network is trained from massive tweets collected with positive and negative emoticons, without any manual annotation. Furthermore, we introduce the Urban Dictionary to expand a small number of sentiment seeds to obtain more training data for building the phrase-level sentiment classifier. We evaluate our sentiment lexicon (TS-Lex) by applying it in a supervised learning framework for Twitter sentiment classification. Experiment results on the benchmark dataset of SemEval 2013 show that, TS-Lex yields better performance than previously introduced sentiment lexicons.", "keyphrases": ["sentiment lexicon", "representation learning approach", "twitter", "negative score"]} +{"id": "wu-etal-2018-beyond", "title": "Beyond Error Propagation in Neural Machine Translation: Characteristics of Language Also Matter", "abstract": "Neural machine translation usually adopts autoregressive models and suffers from exposure bias as well as the consequent error propagation problem. Many previous works have discussed the relationship between error propagation and the accuracy drop (i.e., the left part of the translated sentence is often better than its right part in left-to-right decoding models) problem. In this paper, we conduct a series of analyses to deeply understand this problem and get several interesting findings. (1) The role of error propagation on accuracy drop is overstated in the literature, although it indeed contributes to the accuracy drop problem. (2) Characteristics of a language play a more important role in causing the accuracy drop: the left part of the translation result in a right-branching language (e.g., English) is more likely to be more accurate than its right part, while the right part is more accurate for a left-branching language (e.g., Japanese). Our discoveries are confirmed on different model structures including Transformer and RNN, and in other sequence generation tasks such as text summarization.", "keyphrases": ["error propagation", "neural machine translation", "accuracy drop"]} +{"id": "clark-2003-combining", "title": "Combining Distributional and Morphological Information for Part of Speech Induction", "abstract": "In this paper we discuss algorithms for clustering words into classes from unlabelled text using unsupervised algorithms, based on distributional and morphological information. We show how the use of morphological information can improve the performance on rare words, and that this is robust across a wide range of languages.", "keyphrases": ["morphological information", "pos induction", "suffix"]} +{"id": "reichart-etal-2008-multi", "title": "Multi-Task Active Learning for Linguistic Annotations", "abstract": "We extend the classical single-task active learning (AL) approach. In the multi-task active learning (MTAL) paradigm, we select examples for several annotation tasks rather than for a single one as usually done in the context of AL. We introduce two MTAL metaprotocols, alternating selection and rank combination, and propose a method to implement them in practice. We experiment with a twotask annotation scenario that includes named entity and syntactic parse tree annotations on three different corpora. MTAL outperforms random selection and a stronger baseline, onesided example selection, in which one task is pursued using AL and the selected examples are provided also to the other task.", "keyphrases": ["active learning", "mtal", "annotation task"]} +{"id": "dong-lapata-2016-language", "title": "Language to Logical Form with Neural Attention", "abstract": "Semantic parsing aims at mapping natural language to machine interpretable meaning representations. Traditional approaches rely on high-quality lexicons, manually-built templates, and linguistic features which are either domain- or representation-specific. In this paper we present a general method based on an attention-enhanced encoder-decoder model. We encode input utterances into vector representations, and generate their logical forms by conditioning the output sequences or trees on the encoding vectors. Experimental results on four datasets show that our approach performs competitively without using hand-engineered features and is easy to adapt across domains and meaning representations.", "keyphrases": ["logical form", "encoder-decoder model", "sequence-to-sequence model", "program", "neural semantic parser"]} +{"id": "pedersen-etal-2004-wordnet", "title": "WordNet::Similarity - Measuring the Relatedness of Concepts", "abstract": "WordNet::Similarity is a freely available software package that makes it possible to measure the semantic similarity and relatedness between a pair of concepts (or synsets). It provides six measures of similarity, and three measures of relatedness, all of which are based on the lexical database WordNet. These measures are implemented as Perl modules which take as input two concepts, and return a numeric value that represents the degree to which they are similar or related.", "keyphrases": ["value", "wordnet::similarity", "lcs"]} +{"id": "monsalve-etal-2019-assessing", "title": "Assessing Back-Translation as a Corpus Generation Strategy for non-English Tasks: A Study in Reading Comprehension and Word Sense Disambiguation", "abstract": "Corpora curated by experts have sustained Natural Language Processing mainly in English, but the expensiveness of corpora creation is a barrier for the development in further languages. Thus, we propose a corpus generation strategy that only requires a machine translation system between English and the target language in both directions, where we filter the best translations by computing automatic translation metrics and the task performance score. By studying Reading Comprehension in Spanish and Word Sense Disambiguation in Portuguese, we identified that a more quality-oriented metric has high potential in the corpora selection without degrading the task performance. We conclude that it is possible to systematise the building of quality corpora using machine translation and automatic metrics, besides some prior effort to clean and process the data.", "keyphrases": ["corpus generation strategy", "reading comprehension", "word sense disambiguation"]} +{"id": "ribeiro-etal-2021-investigating", "title": "Investigating Pretrained Language Models for Graph-to-Text Generation", "abstract": "Graph-to-text generation aims to generate fluent texts from graph-based data. In this paper, we investigate two recent pretrained language models (PLMs) and analyze the impact of different task-adaptive pretraining strategies for PLMs in graph-to-text generation. We present a study across three graph domains: meaning representations, Wikipedia knowledge graphs (KGs) and scientific KGs. We show that approaches based on PLMs BART and T5 achieve new state-of-the-art results and that task-adaptive pretraining strategies improve their performance even further. We report new state-of-the-art BLEU scores of 49.72 on AMR-LDC2017T10, 59.70 on WebNLG, and 25.66 on AGENDA datasets - a relative improvement of 31.8%, 4.5%, and 42.4%, respectively, with our models generating significantly more fluent texts than human references. In an extensive analysis, we identify possible reasons for the PLMs' success on graph-to-text tasks. Our findings suggest that the PLMs benefit from similar facts seen during pretraining or fine-tuning, such that they perform well even when the input graph is reduced to a simple bag of node and edge labels.", "keyphrases": ["pretrained language models", "graph-to-text generation", "pre-trained model"]} +{"id": "haruechaiyasak-etal-2006-collaborative", "title": "A Collaborative Framework for Collecting Thai Unknown Words from the Web", "abstract": "We propose a collaborative framework for collecting Thai unknown words found on Web pages over the Internet. Our main goal is to design and construct a Web-based system which allows a group of interested users to participate in constructing a Thai unknown-word open dictionary. The proposed framework provides supporting algorithms and tools for automatically identifying and extracting unknown words from Web pages of given URLs. The system yields the result of unknown-word candidates which are presented to the users for verification. The approved unknown words could be combined with the set of existing words in the lexicon to improve the performance of many NLP tasks such as word segmentation, information retrieval and machine translation. Our framework includes word segmentation and morphological analysis modules for handling the non-segmenting characteristic of Thai written language. To take advantage of large available text resource on the Web, our unknown-word boundary identification approach is based on the statistical string pattern-matching algorithm.", "keyphrases": ["collaborative framework", "unknown word", "web"]} +{"id": "ljubesic-klubicka-2014-bs", "title": "bs,hr,srWaC - Web Corpora of Bosnian, Croatian and Serbian", "abstract": "In this paper we present the construction process of top-level-domain web corpora of Bosnian, Croatian and Serbian. For constructing the corpora we use the SpiderLing crawler with its associated tools adapted for simultaneous crawling and processing of text written in two scripts, Latin and Cyrillic. In addition to the modified collection process we focus on two sources of noise in the resulting corpora: 1. they contain documents written in the other, closely related languages that can not be identified with standard language identification methods and 2. as most web corpora, they partially contain low-quality data not suitable for the specific research and application objectives. We approach both problems by using language modeling on the crawled data only, omitting the need for manually validated language samples for training. On the task of discriminating between closely related languages we outperform the state-of-the-art Blacklist classifier reducing its error to a fourth.", "keyphrases": ["web corpora", "bosnian", "serbian"]} +{"id": "lin-etal-2015-hierarchical", "title": "Hierarchical Recurrent Neural Network for Document Modeling", "abstract": "This paper proposes a novel hierarchical recurrent neural network language model (HRNNLM) for document modeling. After establishing a RNN to capture the coherence between sentences in a document, HRNNLM integrates it as the sentence history information into the word level RNN to predict the word sequence with cross-sentence contextual information. A two-step training approach is designed, in which sentence-level and word-level language models are approximated for the convergence in a pipeline style. Examined by the standard sentence reordering scenario, HRNNLM is proved for its better accuracy in modeling the sentence coherence. And at the word level, experimental results also indicate a significant lower model perplexity, followed by a practical better translation result when applied to a Chinese-English document translation reranking task.", "keyphrases": ["document modeling", "network language model", "hierarchical recurrent", "rnnlm"]} +{"id": "lin-etal-2015-unsupervised", "title": "Unsupervised POS Induction with Word Embeddings", "abstract": "Unsupervised word embeddings have been shown to be valuable as features in supervised learning problems; however, their role in unsupervised problems has been less thoroughly explored. In this paper, we show that embeddings can likewise add value to the problem of unsupervised POS induction. In two representative models of POS induction, we replace multinomial distributions over the vocabulary with multivariate Gaussian distributions over word embeddings and observe consistent improvements in eight languages. We also analyze the effect of various choices while inducing word embeddings on \"downstream\" POS induction results.", "keyphrases": ["pos induction", "word embedding", "autoencoder"]} +{"id": "garcia-salido-etal-2018-lexical", "title": "A Lexical Tool for Academic Writing in Spanish based on Expert and Novice Corpora", "abstract": "The object of this article is to describe the extraction of data from a corpus of academic texts in Spanish and the use of those data for developing a lexical tool oriented to the production of academic texts. The corpus provides the lexical combinations that will be included in the afore-mentioned tool, namely collocations, idioms and formulas. They have been retrieved from the corpus controlling for their keyness (i.e., their specificity with regard to academic texts) and their even distribution across the corpus. For the extraction of collocations containing academic vocabulary other methods have been used, taking advantage of the morphological and syntactic information with which the corpus has been enriched. In the case of collocations and other multiword units, several association measures are being tested in order to restrict the list of candidates the lexicographers will have to deal with manually.", "keyphrases": ["lexical tool", "academic writing", "spanish"]} +{"id": "wang-etal-2021-automated", "title": "Automated Concatenation of Embeddings for Structured Prediction", "abstract": "Pretrained contextualized embeddings are powerful word representations for structured prediction tasks. Recent work found that better word representations can be obtained by concatenating different types of embeddings. However, the selection of embeddings to form the best concatenated representation usually varies depending on the task and the collection of candidate embeddings, and the ever-increasing number of embedding types makes it a more difficult problem. In this paper, we propose Automated Concatenation of Embeddings (ACE) to automate the process of finding better concatenations of embeddings for structured prediction tasks, based on a formulation inspired by recent progress on neural architecture search. Specifically, a controller alternately samples a concatenation of embeddings, according to its current belief of the effectiveness of individual embedding types in consideration for a task, and updates the belief based on a reward. We follow strategies in reinforcement learning to optimize the parameters of the controller and compute the reward based on the accuracy of a task model, which is fed with the sampled concatenation as input and trained on a task dataset. Empirical results on 6 tasks and 21 datasets show that our approach outperforms strong baselines and achieves state-of-the-art performance with fine-tuned embeddings in all the evaluations.", "keyphrases": ["embeddings", "automated concatenation", "ner task"]} +{"id": "nivre-etal-2016-universal", "title": "Universal Dependencies v1: A Multilingual Treebank Collection", "abstract": "Cross-linguistically consistent annotation is necessary for sound comparative evaluation and cross-lingual learning experiments. It is also useful for multilingual system development and comparative linguistic studies. Universal Dependencies is an open community effort to create cross-linguistically consistent treebank annotation for many languages within a dependency-based lexicalist framework. In this paper, we describe v1 of the universal guidelines, the underlying design principles, and the currently available treebanks for 33 languages.", "keyphrases": ["treebank", "universal dependencies", "project", "annotation guideline", "morphological analysis"]} +{"id": "cybulska-vossen-2015-translating", "title": "Translating Granularity of Event Slots into Features for Event Coreference Resolution.", "abstract": "Using clues from event semantics to solve coreference, we present an \u201cevent template\u201d approach to cross-document event coreference resolution on news articles. The approach uses a pairwise model, in which event information is compared along five semantically motivated slots of an event template. The templates, filled in on the sentence level for every event mention from the data set, are used for supervised classification. In this study, we determine granularity of events and we use the grain size as a clue for solving event coreference. We experiment with a newly-created granularity ontology employing granularity levels of locations, times and human participants as well as event durations as features in event coreference resolution. The granularity ontology is available for research. Results show that determining granularity along semantic event slots, even on the sentence level exclusively, improves precision and solves event coreference with scores comparable to those achieved in related work.", "keyphrases": ["granularity", "event slot", "event coreference resolution"]} +{"id": "lin-etal-2020-birds", "title": "Birds have four legs?! NumerSense: Probing Numerical Commonsense Knowledge of Pre-Trained Language Models", "abstract": "Recent works show that pre-trained language models (PTLMs), such as BERT, possess certain commonsense and factual knowledge. They suggest that it is promising to use PTLMs as \u201cneural knowledge bases\u201d via predicting masked words. Surprisingly, we find that this may not work for numerical commonsense knowledge (e.g., a bird usually has two legs). In this paper, we investigate whether and to what extent we can induce numerical commonsense knowledge from PTLMs as well as the robustness of this process. In this paper, we investigate whether and to what extent we can induce numerical commonsense knowledge from PTLMs as well as the robustness of this process. To study this, we introduce a novel probing task with a diagnostic dataset, NumerSense, containing 13.6k masked-word-prediction probes (10.5k for fine-tuning and 3.1k for testing). Our analysis reveals that: (1) BERT and its stronger variant RoBERTa perform poorly on the diagnostic dataset prior to any fine-tuning; (2) fine-tuning with distant supervision brings some improvement; (3) the best supervised model still performs poorly as compared to human performance (54.06% vs. 96.3% in accuracy).", "keyphrases": ["numerical commonsense knowledge", "pre-trained language model", "bird"]} +{"id": "kenter-etal-2016-siamese", "title": "Siamese CBOW: Optimizing Word Embeddings for Sentence Representations", "abstract": "We present the Siamese Continuous Bag of Words (Siamese CBOW) model, a neural network for efficient estimation of high-quality sentence embeddings. Averaging the embeddings of words in a sentence has proven to be a surprisingly successful and efficient way of obtaining sentence embeddings. However, word embeddings trained with the methods currently available are not optimized for the task of sentence representation, and, thus, likely to be suboptimal. Siamese CBOW handles this problem by training word embeddings directly for the purpose of being averaged. The underlying neural network learns word embeddings by predicting, from a sentence representation, its surrounding sentences. We show the robustness of the Siamese CBOW model by evaluating it on 20 datasets stemming from a wide variety of sources.", "keyphrases": ["sentence embedding", "siamese cbow", "sum"]} +{"id": "kajiwara-etal-2013-selecting", "title": "Selecting Proper Lexical Paraphrase for Children", "abstract": "We propose a method for acquiring plain lexical paraphrase using a Japanese dictionary in order to achieve lexical simplification for children. The proposed method extracts plain words that are the most similar to the headword from the dictionary definition. The definition statements describe the headword using plain words; therefore, paraphrasing by replacing the headword with the most similar word in the dictionary definition is expected to be an accurate means of lexical simplification. However, it is difficult to determine which word is the most appropriate for the paraphrase. The method proposed in this paper measures the similarity of each word in the definition statements against the headword and selects the one with the closest semantic match for the paraphrase. This method compares favorably with the method that acquires the target word from the end of the definition statements.", "keyphrases": ["lexical paraphrase", "japanese dictionary", "child"]} +{"id": "liebrecht-etal-2013-perfect", "title": "The perfect solution for detecting sarcasm in tweets #not", "abstract": "To avoid a sarcastic message being understood in its unintended literal meaning, in microtexts such as messages on Twitter.com sarcasm is often explicitly marked with the hashtag \u2018#sarcasm\u2019. We collected a training corpus of about 78 thousand Dutch tweets with this hashtag. Assuming that the human labeling is correct (annotation of a sample indicates that about 85% of these tweets are indeed sarcastic), we train a machine learning classifier on the harvested examples, and apply it to a test set of a day\u2019s stream of 3.3 million Dutch tweets. Of the 135 explicitly marked tweets on this day, we detect 101 (75%) when we remove the hashtag. We annotate the top of the ranked list of tweets most likely to be sarcastic that do not have the explicit hashtag. 30% of the top-250 ranked tweets are indeed sarcastic. Analysis shows that sarcasm is often signalled by hyperbole, using intensifiers and exclamations; in contrast, non-hyperbolic sarcastic messages often receive an explicit marker. We hypothesize that explicit markers such as hashtags are the digital extralinguistic equivalent of nonverbal expressions that people employ in live interaction when conveying sarcasm.", "keyphrases": ["sarcasm", "hashtag", "n-gram", "emoticon"]} +{"id": "bernhard-gurevych-2008-answering", "title": "Answering Learners' Questions by Retrieving Question Paraphrases from Social Q&A Sites", "abstract": "Information overload is a well-known problem which can be particularly detrimental to learners. In this paper, we propose a method to support learners in the information seeking process which consists in answering their questions by retrieving question paraphrases and their corresponding answers from social Q&A sites. Given the novelty of this kind of data, it is crucial to get a better understanding of how questions in social Q&A sites can be automatically analysed and retrieved. We discuss and evaluate several pre-processing strategies and question similarity metrics, using a new question paraphrase corpus collected from the WikiAnswers Q&A site. The results show that viable performance levels of more than 80% accuracy can be obtained for the task of question paraphrase retrieval.", "keyphrases": ["question paraphrase", "social q&a site", "wikianswers repository"]} +{"id": "xu-etal-2004-need", "title": "Do We Need Chinese Word Segmentation for Statistical Machine Translation?", "abstract": "In Chinese texts, words are not separated by white spaces. This is problematic for many natural language processing tasks. The standard approach is to segment the Chinese character sequence into words. Here, we investigate Chinese word segmentation for statistical machine translation. We pursue two goals: the first one is the maximization of the final translation quality; the second is the minimization of the manual effort for building a translation system. The commonly used method for getting the word boundaries is based on a word segmentation tool and a predefined monolingual dictionary. To avoid the dependence of the translation system on an external dictionary, we have developed a system that learns a domainspecific dictionary from the parallel training corpus. This method produces results that are comparable with the predefined dictionary. Further more, our translation system is able to work without word segmentation with only a minor loss in translation quality.", "keyphrases": ["word segmentation", "chinese character", "parallel training corpus"]} +{"id": "zhao-etal-2017-men", "title": "Men Also Like Shopping: Reducing Gender Bias Amplification using Corpus-level Constraints", "abstract": "Language is increasingly being used to de-fine rich visual recognition problems with supporting image collections sourced from the web. Structured prediction models are used in these tasks to take advantage of correlations between co-occurring labels and visual input but risk inadvertently encoding social biases found in web corpora. In this work, we study data and models associated with multilabel object classification and visual semantic role labeling. We find that (a) datasets for these tasks contain significant gender bias and (b) models trained on these datasets further amplify existing bias. For example, the activity cooking is over 33% more likely to involve females than males in a training set, and a trained model further amplifies the disparity to 68% at test time. We propose to inject corpus-level constraints for calibrating existing structured prediction models and design an algorithm based on Lagrangian relaxation for collective inference. Our method results in almost no performance loss for the underlying recognition task but decreases the magnitude of bias amplification by 47.5% and 40.5% for multilabel classification and visual semantic role labeling, respectively\u3002", "keyphrases": ["gender bias", "corpus-level constraint", "semantic role labeling", "man", "social group"]} +{"id": "han-baldwin-2011-lexical", "title": "Lexical Normalisation of Short Text Messages: Makn Sens a #twitter", "abstract": "Twitter provides access to large volumes of data in real time, but is notoriously noisy, hampering its utility for NLP. In this paper, we target out-of-vocabulary words in short text messages and propose a method for identifying and normalising ill-formed words. Our method uses a classifier to detect ill-formed words, and generates correction candidates based on morphophonemic similarity. Both word similarity and context are then exploited to select the most probable correction candidate for the word. The proposed method doesn't require any annotations, and achieves state-of-the-art performance over an SMS corpus and a novel dataset based on Twitter.", "keyphrases": ["short text message", "twitter", "ill-formed word", "lexical normalisation", "normalization"]} +{"id": "ling-etal-2016-latent", "title": "Latent Predictor Networks for Code Generation", "abstract": "Many language generation tasks require the production of text conditioned on both structured and unstructured inputs. We present a novel neural network architecture which generates an output sequence conditioned on an arbitrary number of input functions. Crucially, our approach allows both the choice of conditioning context and the granularity of generation, for example characters or tokens, to be marginalised, thus permitting scalable and effective training. Using this framework, we address the problem of generating programming code from a mixed natural language and structured specification. We create two new data sets for this paradigm derived from the collectible trading card games Magic the Gathering and Hearthstone. On these, and a third preexisting corpus, we demonstrate that marginalising multiple predictors allows our model to outperform strong benchmarks.", "keyphrases": ["code generation", "sequence-to-sequence model", "java", "python", "programming language"]} +{"id": "fadaee-etal-2017-data", "title": "Data Augmentation for Low-Resource Neural Machine Translation", "abstract": "The quality of a Neural Machine Translation system depends substantially on the availability of sizable parallel corpora. For low-resource language pairs this is not the case, resulting in poor translation quality. Inspired by work in computer vision, we propose a novel data augmentation approach that targets low-frequency words by generating new sentence pairs containing rare words in new, synthetically created contexts. Experimental results on simulated low-resource settings show that our method improves translation quality by up to 2.9 BLEU points over the baseline and up to 3.2 BLEU over back-translation.", "keyphrases": ["rare word", "data augmentation", "parallel training data", "synthetic sentence pair"]} +{"id": "vilar-etal-2006-error", "title": "Error Analysis of Statistical Machine Translation Output", "abstract": "Evaluation of automatic translation output is a difficult task. Several performance measures like Word Error Rate, Position Independent Word Error Rate and the BLEU and NIST scores are widely use and provide a useful tool for comparing different systems and to evaluate improvements within a system. However the interpretation of all of these measures is not at all clear, and the identification of the most prominent source of errors in a given system using these measures alone is not possible. Therefore some analysis of the generated translations is needed in order to identify the main problems and to focus the research efforts. This area is however mostly unexplored and few works have dealt with it until now. In this paper we will present a framework for classification of the errors of a machine translation system and we will carry out an error analysis of the system used by the RWTH in the first TC-STAR evaluation.", "keyphrases": ["rwth", "error analysis", "typology", "missing word", "human evaluation"]} +{"id": "mishra-etal-2016-leveraging", "title": "Leveraging Cognitive Features for Sentiment Analysis", "abstract": "Sentiments expressed in user-generated short text and sentences are nuanced by subtleties at lexical, syntactic, semantic and pragmatic levels. To address this, we propose to augment traditional features used for sentiment analysis and sarcasm detection, with cognitive features derived from the eye-movement patterns of readers. Statistical classification using our enhanced feature set improves the performance (F-score) of polarity detection by a maximum of 3.7% and 9.3% on two datasets, over the systems that use only traditional features. We perform feature significance analysis, and experiment on a held-out dataset, showing that cognitive features indeed empower sentiment analyzers to handle complex constructs.", "keyphrases": ["cognitive feature", "sentiment analysis", "sarcasm detection"]} +{"id": "romanov-shivade-2018-lessons", "title": "Lessons from Natural Language Inference in the Clinical Domain", "abstract": "State of the art models using deep neural networks have become very good in learning an accurate mapping from inputs to outputs. However, they still lack generalization capabilities in conditions that differ from the ones encountered during training. This is even more challenging in specialized, and knowledge intensive domains, where training data is limited. To address this gap, we introduce MedNLI - a dataset annotated by doctors, performing a natural language inference task (NLI), grounded in the medical history of patients. We present strategies to: 1) leverage transfer learning using datasets from the open domain, (e.g. SNLI) and 2) incorporate domain knowledge from external data and lexical sources (e.g. medical terminologies). Our results demonstrate performance gains using both strategies.", "keyphrases": ["natural language inference", "clinical domain", "mednli", "history", "open domain model"]} +{"id": "schoenmackers-etal-2010-learning", "title": "Learning First-Order Horn Clauses from Web Text", "abstract": "Even the entire Web corpus does not explicitly answer all questions, yet inference can uncover many implicit answers. But where do inference rules come from? \n \nThis paper investigates the problem of learning inference rules from Web text in an unsupervised, domain-independent manner. The Sherlock system, described herein, is a first-order learner that acquires over 30,000 Horn clauses from Web text. Sherlock embodies several innovations, including a novel rule scoring function based on Statistical Relevance (Salmon et al., 1971) which is effective on ambiguous, noisy and incomplete Web extractions. Our experiments show that inference over the learned rules discovers three times as many facts (at precision 0.8) as the TextRunner system which merely extracts facts explicitly stated in Web text.", "keyphrases": ["horn clause", "web text", "inference rule", "entailment rule"]} +{"id": "tandon-etal-2019-wiqa", "title": "WIQA: A dataset for \u201cWhat if...\u201d reasoning over procedural text", "abstract": "We introduce WIQA, the first large-scale dataset of \u201cWhat if...\u201d questions over procedural text. WIQA contains a collection of paragraphs, each annotated with multiple influence graphs describing how one change affects another, and a large (40k) collection of \u201cWhat if...?\u201d multiple-choice questions derived from these. For example, given a paragraph about beach erosion, would stormy weather hasten or decelerate erosion? WIQA contains three kinds of questions: perturbations to steps mentioned in the paragraph; external (out-of-paragraph) perturbations requiring commonsense knowledge; and irrelevant (no effect) perturbations. We find that state-of-the-art models achieve 73.8% accuracy, well below the human performance of 96.3%. We analyze the challenges, in particular tracking chains of influences, and present the dataset as an open challenge to the community.", "keyphrases": ["reasoning", "procedural text", "large-scale dataset", "wiqa"]} +{"id": "ebrahimi-etal-2018-adversarial", "title": "On Adversarial Examples for Character-Level Neural Machine Translation", "abstract": "Evaluating on adversarial examples has become a standard procedure to measure robustness of deep learning models. Due to the difficulty of creating white-box adversarial examples for discrete text input, most analyses of the robustness of NLP models have been done through black-box adversarial examples. We investigate adversarial examples for character-level neural machine translation (NMT), and contrast black-box adversaries with a novel white-box adversary, which employs differentiable string-edit operations to rank adversarial changes. We propose two novel types of attacks which aim to remove or change a word in a translation, rather than simply break the NMT. We demonstrate that white-box adversarial examples are significantly stronger than their black-box counterparts in different attack scenarios, which show more serious vulnerabilities than previously known. In addition, after performing adversarial training, which takes only 3 times longer than regular training, we can improve the model's robustness significantly.", "keyphrases": ["adversarial example", "neural machine translation", "perturbation", "character-level nmt", "token level"]} +{"id": "miura-etal-2016-selecting", "title": "Selecting Syntactic, Non-redundant Segments in Active Learning for Machine Translation", "abstract": "Active learning is a framework that makes it possible to efficiently train statistical models by selecting informative examples from a pool of unlabeled data. Previous work has found this framework effective for machine translation (MT), making it possible to train better translation models with less effort, particularly when annotators translate short phrases instead of full sentences. However, previous methods for phrase-based active learning in MT fail to consider whether the selected units are coherent and easy for human translators to translate, and also have problems with selecting redundant phrases with similar content. In this paper, we tackle these problems by proposing two new methods for selecting more syntactically coherent and less redundant segments in active learning for MT. Experiments using both simulation and extensive manual translation by professional translators find the proposed method effective, achieving both greater gain of BLEU score for the same number of translated words, and allowing translators to be more confident in their translations1.", "keyphrases": ["segment", "active learning", "machine translation"]} +{"id": "alansary-nagi-2014-international", "title": "The International Corpus of Arabic: Compilation, Analysis and Evaluation", "abstract": "This paper focuses on a project for building the first International Corpus of Arabic (ICA). It is planned to contain 100 million analyzed tokens with an interface which allows users to interact with the corpus data in a number of ways [ICA website]. ICA is a representative corpus of Arabic that has been initiated in 2006, it is intended to cover the Modern Standard Arabic (MSA) language as being used all over the Arab world. ICA has been analyzed by Bibliotheca Alexandrina Morphological Analysis Enhancer (BAMAE). BAMAE is based on Buckwalter Arabic Morphological Analyzer (BAMA). Precision and Recall are the evaluation measures used to evaluate the BAMAE system. At this point, Precision measurement ranges from 95%-92% while recall measurement was 92%-89%. This depends on the number of qualifiers retrieved for every word. The percentages are expected to rise by implementing the improvements while working on larger amounts of data.", "keyphrases": ["international corpus", "arabic", "ica"]} +{"id": "liu-etal-2013-additive", "title": "Additive Neural Networks for Statistical Machine Translation", "abstract": "Most statistical machine translation (SMT) systems are modeled using a loglinear framework. Although the log-linear model achieves success in SMT, it still suffers from some limitations: (1) the features are required to be linear with respect to the model itself; (2) features cannot be further interpreted to reach their potential. A neural network is a reasonable method to address these pitfalls. However, modeling SMT with a neural network is not trivial, especially when taking the decoding efficiency into consideration. In this paper, we propose a variant of a neural network, i.e. additive neural networks, for SMT to go beyond the log-linear translation model. In addition, word embedding is employed as the input to the neural network, which encodes each word as a feature vector. Our model outperforms the log-linear translation models with/without embedding features on Chinese-to-English and Japanese-to-English translation tasks.", "keyphrases": ["statistical machine translation", "log-linear model", "additive neural network"]} +{"id": "vlachos-riedel-2014-fact", "title": "Fact Checking: Task definition and dataset construction", "abstract": "In this paper we introduce the task of fact checking, i.e. the assessment of the truthfulness of a claim. The task is commonly performed manually by journalists verifying the claims made by public figures. Furthermore, ordinary citizens need to assess the truthfulness of the increasing volume of statements they consume. Thus, developing fact checking systems is likely to be of use to various members of society. We first define the task and detail the construction of a publicly available dataset using statements fact-checked by journalists available online. Then, we discuss baseline approaches for the task and the challenges that need to be addressed. Finally, we discuss how fact checking relates to mainstream natural language processing tasks and can stimulate further research.", "keyphrases": ["fact checking", "website", "verdict", "news detection", "assignment"]} +{"id": "elming-habash-2009-syntactic", "title": "Syntactic Reordering for English-Arabic Phrase-Based Machine Translation", "abstract": "We investigate syntactic reordering within an English to Arabic translation task. We extend a pre-translation syntactic reordering approach developed on a close language pair (English-Danish) to the distant language pair, English-Arabic. We achieve significant improvements in translation quality over related approaches, measured by manual as well as automatic evaluations. These results prove the viability of this approach for distant languages.", "keyphrases": ["arabic", "syntactic reordering", "rich language"]} +{"id": "ning-etal-2018-multi", "title": "A Multi-Axis Annotation Scheme for Event Temporal Relations", "abstract": "Existing temporal relation (TempRel) annotation schemes often have low inter-annotator agreements (IAA) even between experts, suggesting that the current annotation task needs a better definition. This paper proposes a new multi-axis modeling to better capture the temporal structure of events. In addition, we identify that event end-points are a major source of confusion in annotation, so we also propose to annotate TempRels based on start-points only. A pilot expert annotation effort using the proposed scheme shows significant improvement in IAA from the conventional 60's to 80's (Cohen's Kappa). This better-defined annotation scheme further enables the use of crowdsourcing to alleviate the labor intensity for each annotator. We hope that this work can foster more interesting studies towards event understanding.", "keyphrases": ["annotation scheme", "temporal relation", "start-point"]} +{"id": "elliott-keller-2013-image", "title": "Image Description using Visual Dependency Representations", "abstract": "Describing the main event of an image involves identifying the objects depicted and predicting the relationships between them. Previous approaches have represented images as unstructured bags of regions, which makes it difficult to accurately predict meaningful relationships between regions. In this paper, we introduce visual dependency representations to capture the relationships between the objects in an image, and hypothesize that this representation can improve image description. We test this hypothesis using a new data set of region-annotated images, associated with visual dependency representations and gold-standard descriptions. We describe two template-based description generation models that operate over visual dependency representations. In an image description task, we find that these models outperform approaches that rely on object proximity or corpus information to generate descriptions on both automatic measures and on human judgements.", "keyphrases": ["visual dependency representation", "image description", "spatial relation", "caption", "preposition"]} +{"id": "wachsmuth-etal-2017-computational", "title": "Computational Argumentation Quality Assessment in Natural Language", "abstract": "Research on computational argumentation faces the problem of how to automatically assess the quality of an argument or argumentation. While different quality dimensions have been approached in natural language processing, a common understanding of argumentation quality is still missing. This paper presents the first holistic work on computational argumentation quality in natural language. We comprehensively survey the diverse existing theories and approaches to assess logical, rhetorical, and dialectical quality dimensions, and we derive a systematic taxonomy from these. In addition, we provide a corpus with 320 arguments, annotated for all 15 dimensions in the taxonomy. Our results establish a common ground for research on computational argumentation quality assessment.", "keyphrases": ["argumentation", "dialectical quality dimension", "taxonomy", "reasonableness"]} +{"id": "ghosh-etal-2018-sarcasm", "title": "Sarcasm Analysis Using Conversation Context", "abstract": "Computational models for sarcasm detection have often relied on the content of utterances in isolation. However, the speaker's sarcastic intent is not always apparent without additional context. Focusing on social media discussions, we investigate three issues: (1) does modeling conversation context help in sarcasm detection? (2) can we identify what part of conversation context triggered the sarcastic reply? and (3) given a sarcastic post that contains multiple sentences, can we identify the specific sentence that is sarcastic? To address the first issue, we investigate several types of Long Short-Term Memory (LSTM) networks that can model both the conversation context and the current turn. We show that LSTM networks with sentence-level attention on context and current turn, as well as the conditional LSTM network, outperform the LSTM model that reads only the current turn. As conversation context, we consider the prior turn, the succeeding turn, or both. Our computational models are tested on two types of social media platforms: Twitter and discussion forums. We discuss several differences between these data sets, ranging from their size to the nature of the gold-label annotations. To address the latter two issues, we present a qualitative analysis of the attention weights produced by the LSTM models (with attention) and discuss the results compared with human performance on the two tasks.", "keyphrases": ["conversation context", "sentence-level attention", "sarcasm analysis"]} +{"id": "dalvi-etal-2018-tracking", "title": "Tracking State Changes in Procedural Text: a Challenge Dataset and Models for Process Paragraph Comprehension", "abstract": "We present a new dataset and models for comprehending paragraphs about processes (e.g., photosynthesis), an important genre of text describing a dynamic world. The new dataset, ProPara, is the first to contain natural (rather than machine-generated) text about a changing world along with a full annotation of entity states (location and existence) during those changes (81k datapoints). The end-task, tracking the location and existence of entities through the text, is challenging because the causal effects of actions are often implicit and need to be inferred. We find that previous models that have worked well on synthetic data achieve only mediocre performance on ProPara, and introduce two new neural models that exploit alternative mechanisms for state prediction, in particular using LSTM input encoding and span prediction. The new models improve accuracy by up to 19%. We are releasing the ProPara dataset and our models to the community.", "keyphrases": ["state change", "procedural text", "paragraph", "scientific process"]} +{"id": "muller-etal-2019-enhancing", "title": "Enhancing BERT for Lexical Normalization", "abstract": "Language model-based pre-trained representations have become ubiquitous in natural language processing. They have been shown to significantly improve the performance of neural models on a great variety of tasks. However, it remains unclear how useful those general models can be in handling non-canonical text. In this article, focusing on User Generated Content (UGC), we study the ability of BERT to perform lexical normalisation. Our contribution is simple: by framing lexical normalisation as a token prediction task, by enhancing its architecture and by carefully fine-tuning it, we show that BERT can be a competitive lexical normalisation model without the need of any UGC resources aside from 3,000 training sentences. To the best of our knowledge, it is the first work done in adapting and analysing the ability of this model to handle noisy UGC data.", "keyphrases": ["bert", "lexical normalization", "ugc"]} +{"id": "jia-etal-2018-modeling", "title": "Modeling discourse cohesion for discourse parsing via memory network", "abstract": "Identifying long-span dependencies between discourse units is crucial to improve discourse parsing performance. Most existing approaches design sophisticated features or exploit various off-the-shelf tools, but achieve little success. In this paper, we propose a new transition-based discourse parser that makes use of memory networks to take discourse cohesion into account. The automatically captured discourse cohesion benefits discourse parsing, especially for long span scenarios. Experiments on the RST discourse treebank show that our method outperforms traditional featured based methods, and the memory based discourse cohesion can improve the overall parsing performance significantly.", "keyphrases": ["discourse cohesion", "memory network", "transition-based discourse parser"]} +{"id": "kim-etal-2014-composite", "title": "A Composite Kernel Approach for Dialog Topic Tracking with Structured Domain Knowledge from Wikipedia", "abstract": "Dialog topic tracking aims at analyzing and maintaining topic transitions in ongoing dialogs. This paper proposes a composite kernel approach for dialog topic tracking to utilize various types of domain knowledge obtained from Wikipedia. Two kernels are defined based on history sequences and context trees constructed based on the extracted features. The experimental results show that our composite kernel approach can significantly improve the performances of topic tracking in mixed-initiative human-human dialogs.", "keyphrases": ["dialog topic tracking", "domain knowledge", "wikipedia"]} +{"id": "geng-etal-2019-induction", "title": "Induction Networks for Few-Shot Text Classification", "abstract": "Text classification tends to struggle when data is deficient or when it needs to adapt to unseen classes. In such challenging scenarios, recent studies have used meta-learning to simulate the few-shot task, in which new queries are compared to a small support set at the sample-wise level. However, this sample-wise comparison may be severely disturbed by the various expressions in the same class. Therefore, we should be able to learn a general representation of each class in the support set and then compare it to new queries. In this paper, we propose a novel Induction Network to learn such a generalized class-wise representation, by innovatively leveraging the dynamic routing algorithm in meta-learning. In this way, we find the model is able to induce and generalize better. We evaluate the proposed model on a well-studied sentiment classification dataset (English) and a real-world dialogue intent classification dataset (Chinese). Experiment results show that on both datasets, the proposed model significantly outperforms the existing state-of-the-art approaches, proving the effectiveness of class-wise generalization in few-shot text classification.", "keyphrases": ["text classification", "induction networks", "few-shot learning", "new class", "nlp community"]} +{"id": "galley-etal-2006-scalable", "title": "Scalable Inference and Training of Context-Rich Syntactic Translation Models", "abstract": "Statistical MT has made great progress in the last few years, but current translation models are weak on re-ordering and target language fluency. Syntactic approaches seek to remedy these problems. In this paper, we take the framework for acquiring multi-level syntactic translation rules of (Galley et al., 2004) from aligned tree-string pairs, and present two main extensions of their approach: first, instead of merely computing a single derivation that minimally explains a sentence pair, we construct a large number of derivations that include contextually richer rules, and account for multiple interpretations of unaligned words. Second, we propose probability estimates and a training procedure for weighting these rules. We contrast different approaches on real examples, show that our estimates based on multiple derivations favor phrasal re-orderings that are linguistically better motivated, and establish that our larger rules provide a 3.63 BLEU point increase over minimal rules.", "keyphrases": ["translation model", "smt system", "syntax-based model", "string-to-tree model", "artificial constituent node"]} +{"id": "hu-etal-2018-shot", "title": "Few-Shot Charge Prediction with Discriminative Legal Attributes", "abstract": "Automatic charge prediction aims to predict the final charges according to the fact descriptions in criminal cases and plays a crucial role in legal assistant systems. Existing works on charge prediction perform adequately on those high-frequency charges but are not yet capable of predicting few-shot charges with limited cases. Moreover, these exist many confusing charge pairs, whose fact descriptions are fairly similar to each other. To address these issues, we introduce several discriminative attributes of charges as the internal mapping between fact descriptions and charges. These attributes provide additional information for few-shot charges, as well as effective signals for distinguishing confusing charges. More specifically, we propose an attribute-attentive charge prediction model to infer the attributes and charges simultaneously. Experimental results on real-work datasets demonstrate that our proposed model achieves significant and consistent improvements than other state-of-the-art baselines. Specifically, our model outperforms other baselines by more than 50% in the few-shot scenario. Our codes and datasets can be obtained from .", "keyphrases": ["charge prediction", "legal attribute", "discriminative attribute"]} +{"id": "zhang-etal-2018-learning-summarize", "title": "Learning to Summarize Radiology Findings", "abstract": "The Impression section of a radiology report summarizes crucial radiology findings in natural language and plays a central role in communicating these findings to physicians. However, the process of generating impressions by summarizing findings is time-consuming for radiologists and prone to errors. We propose to automate the generation of radiology impressions with neural sequence-to-sequence learning. We further propose a customized neural model for this task which learns to encode the study background information and use this information to guide the decoding process. On a large dataset of radiology reports collected from actual hospital studies, our model outperforms existing non-neural and neural baselines under the ROUGE metrics. In a blind experiment, a board-certified radiologist indicated that 67% of sampled system summaries are at least as good as the corresponding human-written summaries, suggesting significant clinical validity. To our knowledge our work represents the first attempt in this direction.", "keyphrases": ["summarization", "radiology finding", "reference", "medical knowledge"]} +{"id": "rei-etal-2020-comet", "title": "COMET: A Neural Framework for MT Evaluation", "abstract": "We present COMET, a neural framework for training multilingual machine translation evaluation models which obtains new state-of-the-art levels of correlation with human judgements. Our framework leverages recent breakthroughs in cross-lingual pretrained language modeling resulting in highly multilingual and adaptable MT evaluation models that exploit information from both the source input and a target-language reference translation in order to more accurately predict MT quality. To showcase our framework, we train three models with different types of human judgements: Direct Assessments, Human-mediated Translation Edit Rate and Multidimensional Quality Metric. Our models achieve new state-of-the-art performance on the WMT 2019 Metrics shared task and demonstrate robustness to high-performing systems.", "keyphrases": ["neural framework", "reference", "comet", "evaluation metric", "bertscore"]} +{"id": "takase-kiyono-2021-rethinking", "title": "Rethinking Perturbations in Encoder-Decoders for Fast Training", "abstract": "We often use perturbations to regularize neural models. For neural encoder-decoders, previous studies applied the scheduled sampling (Bengio et al., 2015) and adversarial perturbations (Sato et al., 2019) as perturbations but these methods require considerable computational time. Thus, this study addresses the question of whether these approaches are efficient enough for training time. We compare several perturbations in sequence-to-sequence problems with respect to computational time. Experimental results show that the simple techniques such as word dropout (Gal and Ghahramani, 2016) and random replacement of input tokens achieve comparable (or better) scores to the recently proposed perturbations, even though these simple methods are faster.", "keyphrases": ["perturbation", "encoder-decoder", "word dropout"]} +{"id": "dyer-etal-2015-transition", "title": "Transition-Based Dependency Parsing with Stack Long Short-Term Memory", "abstract": "This work was sponsored in part by the U. S. Army Research Laboratory and the U. S. Army Research Office/nunder contract/grant number W911NF-10-1-0533, and in part by NSF CAREER grant IIS-1054319./nMiguel Ballesteros is supported by the European Commission under the contract numbers FP7-ICT-610411 (project MULTISENSOR) and H2020-RIA-645012 (project KRISTINA).", "keyphrases": ["dependency parsing", "stack lstm", "pos tag", "short-term memory network", "transition-based method"]} +{"id": "nothman-etal-2012-event", "title": "Event Linking: Grounding Event Reference in a News Archive", "abstract": "Interpreting news requires identifying its constituent events. Events are complex linguistically and ontologically, so disambiguating their reference is challenging. We introduce event linking, which canonically labels an event reference with the article where it was first reported. This implicitly relaxes coreference to co-reporting, and will practically enable augmenting news archives with semantic hyperlinks. We annotate and analyse a corpus of 150 documents, extracting 501 links to a news archive with reasonable inter-annotator agreement.", "keyphrases": ["event reference", "coreference", "news archive", "inter-annotator agreement"]} +{"id": "johnson-etal-2007-improving", "title": "Improving Translation Quality by Discarding Most of the Phrasetable", "abstract": "It is possible to reduce the bulk of phrasetables for Statistical Machine Translation using a technique based on the significance testing of phrase pair co-occurrence in the parallel corpus. The savings can be quite substantial (up to 90%) and cause no reduction in BLEU score. In some cases, an improvement in BLEU is obtained at the same time although the effect is less pronounced if state-of-the-art phrasetable smoothing is employed.", "keyphrases": ["translation quality", "significance testing", "bleu score", "phrase table"]} +{"id": "boyd-graber-etal-2007-topic", "title": "A Topic Model for Word Sense Disambiguation", "abstract": "We develop latent Dirichlet allocation with WORDNET (LDAWN), an unsupervised probabilistic topic model that includes word sense as a hidden variable. We develop a probabilistic posterior inference algorithm for simultaneously disambiguating a corpus and learning the domains in which to consider each word. Using the WORDNET hierarchy, we embed the construction of Abney and Light (1999) in the topic model and show that automatically learned domains improve WSD accuracy compared to alternative contexts.", "keyphrases": ["topic model", "word sense disambiguation", "ldawn"]} +{"id": "tsvetkov-etal-2014-metaphor", "title": "Metaphor Detection with Cross-Lingual Model Transfer", "abstract": "We show that it is possible to reliably discriminate whether a syntactic construction is meant literally or metaphorically using lexical semantic features of the words that participate in the construction. Our model is constructed using English resources, and we obtain state-of-the-art performance relative to previous work in this language. Using a model transfer approach by pivoting through a bilingual dictionary, we show our model can identify metaphoric expressions in other languages. We provide results on three new test sets in Spanish, Farsi, and Russian. The results support the hypothesis that metaphors are conceptual, rather than lexical, in nature.", "keyphrases": ["cross-lingual model transfer", "metaphor detection", "abstractness", "sentiment analysis", "language processing application"]} +{"id": "bojar-etal-2016-findings", "title": "Findings of the 2016 Conference on Machine Translation", "abstract": "This paper presents the results of the WMT16 shared tasks, which included five machine translation (MT) tasks (standard news, IT-domain, biomedical, multimodal, pronoun), three evaluation tasks (metrics, tuning, run-time estimation of MT quality), and an automatic post-editing task and bilingual document alignment task. This year, 102 MT systems from 24 institutions (plus 36 anonymized online systems) were submitted to the 12 translation directions in the news translation task. The IT-domain task received 31 submissions from 12 institutions in 7 directions and the Biomedical task received 15 submissions systems from 5 institutions. Evaluation was both automatic and manual (relative ranking and 100-point scale assessments). The quality estimation task had three subtasks, with a total of 14 teams, submitting 39 entries. The automatic post-editing task had a total of 6 teams, submitting 11 entries.", "keyphrases": ["conference", "machine translation", "automatic post-editing task", "online system", "state-of-the-art result"]} +{"id": "kikuchi-etal-2016-controlling", "title": "Controlling Output Length in Neural Encoder-Decoders", "abstract": "Neural encoder-decoder models have shown great success in many sequence generation tasks. However, previous work has not investigated situations in which we would like to control the length of encoder-decoder outputs. This capability is crucial for applications such as text summarization, in which we have to generate concise summaries with a desired length. In this paper, we propose methods for controlling the output sequence length for neural encoder-decoder models: two decoding-based methods and two learning-based methods. Results show that our learning-based methods have the capability to control length without degrading summary quality in a summarization task.", "keyphrases": ["output length", "encoder-decoder model", "sentence compression"]} +{"id": "eger-mehler-2016-linearity", "title": "On the Linearity of Semantic Change: Investigating Meaning Variation via Dynamic Graph Models", "abstract": "We consider two graph models of semantic change. The first is a time-series model that relates embedding vectors from one time period to embedding vectors of previous time periods. In the second, we construct one graph for each word: nodes in this graph correspond to time points and edge weights to the similarity of the word\u2019s meaning across two time points. We apply our two models to corpora across three different languages. We find that semantic change is linear in two senses. Firstly, today\u2019s embedding vectors (= meaning) of words can be derived as linear combinations of embedding vectors of their neighbors in previous time periods. Secondly, self-similarity of words decays linearly in time. We consider both findings as new laws/hypotheses of semantic change.", "keyphrases": ["semantic change", "time period", "self-similarity"]} +{"id": "begum-etal-2008-dependency", "title": "Dependency Annotation Scheme for Indian Languages", "abstract": "The paper introduces a dependency annotation effort which aims to fully annotate a million word Hindi corpus. It is the first attempt of its kind to develop a large scale tree-bank for an Indian language. In this paper we provide the motivation for following the Paninian framework as the annotation scheme and argue that the Paninian framework is better suited to model the various linguistic phenomena manifest in Indian languages. We present the basic annotation scheme. We also show how the scheme handles some phenomenon such as complex verbs, ellipses, etc. Empirical results of some experiments done on the currently annotated sentences are also reported.", "keyphrases": ["indian language", "paninian framework", "dependency annotation scheme"]} +{"id": "petrovic-etal-2012-using", "title": "Using paraphrases for improving first story detection in news and Twitter", "abstract": "First story detection (FSD) involves identifying first stories about events from a continuous stream of documents. A major problem in this task is the high degree of lexical variation in documents which makes it very difficult to detect stories that talk about the same event but expressed using different words. We suggest using paraphrases to alleviate this problem, making this the first work to use paraphrases for FSD. We show a novel way of integrating paraphrases with locality sensitive hashing (LSH) in order to obtain an efficient FSD system that can scale to very large datasets. Our system achieves state-of-the-art results on the first story detection task, beating both the best supervised and unsupervised systems. To test our approach on large data, we construct a corpus of events for Twitter, consisting of 50 million documents, and show that paraphrasing is also beneficial in this domain.", "keyphrases": ["paraphrase", "story detection", "twitter"]} +{"id": "beltagy-etal-2019-scibert", "title": "SciBERT: A Pretrained Language Model for Scientific Text", "abstract": "Obtaining large-scale annotated data for NLP tasks in the scientific domain is challenging and expensive. We release SciBERT, a pretrained language model based on BERT (Devlin et. al., 2018) to address the lack of high-quality, large-scale labeled scientific data. SciBERT leverages unsupervised pretraining on a large multi-domain corpus of scientific publications to improve performance on downstream scientific NLP tasks. We evaluate on a suite of tasks including sequence tagging, sentence classification and dependency parsing, with datasets from a variety of scientific domains. We demonstrate statistically significant improvements over BERT and achieve new state-of-the-art results on several of these tasks. The code and pretrained models are available at .", "keyphrases": ["language model", "scientific text", "downstream task", "biomedical domain", "pre-trained model"]} +{"id": "lindemann-etal-2019-compositional", "title": "Compositional Semantic Parsing across Graphbanks", "abstract": "Most semantic parsers that map sentences to graph-based meaning representations are hand-designed for specific graphbanks. We present a compositional neural semantic parser which achieves, for the first time, competitive accuracies across a diverse range of graphbanks. Incorporating BERT embeddings and multi-task learning improves the accuracy further, setting new states of the art on DM, PAS, PSD, AMR 2015 and EDS.", "keyphrases": ["graphbank", "pas", "compositional structure"]} +{"id": "yang-etal-2006-kernel", "title": "Kernel-Based Pronoun Resolution with Structured Syntactic Knowledge", "abstract": "Syntactic knowledge is important for pronoun resolution. Traditionally, the syntactic information for pronoun resolution is represented in terms of features that have to be selected and defined heuristically. In the paper, we propose a kernel-based method that can automatically mine the syntactic information from the parse trees for pronoun resolution. Specifically, we utilize the parse trees directly as a structured feature and apply kernel functions to this feature, as well as other normal features, to learn the resolution classifier. In this way, our approach avoids the efforts of decoding the parse trees into the set of flat syntactic features. The experimental results show that our approach can bring significant performance improvement and is reliably effective for the pronoun resolution task.", "keyphrases": ["pronoun resolution", "syntactic knowledge", "convolution tree kernel"]} +{"id": "karpukhin-etal-2019-training", "title": "Training on Synthetic Noise Improves Robustness to Natural Noise in Machine Translation", "abstract": "Contemporary machine translation systems achieve greater coverage by applying subword models such as BPE and character-level CNNs, but these methods are highly sensitive to orthographical variations such as spelling mistakes. We show how training on a mild amount of random synthetic noise can dramatically improve robustness to these variations, without diminishing performance on clean text. We focus on translation performance on natural typos, and show that robustness to such noise can be achieved using a balanced diet of simple synthetic noises at training time, without access to the natural noise data or distribution.", "keyphrases": ["synthetic noise", "robustness", "machine translation", "training sample", "adversarial example"]} +{"id": "ruder-plank-2018-strong", "title": "Strong Baselines for Neural Semi-Supervised Learning under Domain Shift", "abstract": "Novel neural models have been proposed in recent years for learning under domain shift. Most models, however, only evaluate on a single task, on proprietary datasets, or compare to weak baselines, which makes comparison of models difficult. In this paper, we re-evaluate classic general-purpose bootstrapping approaches in the context of neural networks under domain shifts vs. recent neural approaches and propose a novel multi-task tri-training method that reduces the time and space complexity of classic tri-training. Extensive experiments on two benchmarks for part-of-speech tagging and sentiment analysis are negative: while our novel method establishes a new state-of-the-art for sentiment analysis, it does not fare consistently the best. More importantly, we arrive at the somewhat surprising conclusion that classic tri-training, with some additions, outperforms the state-of-the-art for NLP. Hence classic approaches constitute an important and strong baseline.", "keyphrases": ["domain shift", "tri-training", "strong baseline"]} +{"id": "krishnamurthy-mitchell-2012-weakly", "title": "Weakly Supervised Training of Semantic Parsers", "abstract": "We present a method for training a semantic parser using only a knowledge base and an unlabeled text corpus, without any individually annotated sentences. Our key observation is that multiple forms of weak supervision can be combined to train an accurate semantic parser: semantic supervision from a knowledge base, and syntactic supervision from dependency-parsed sentences. We apply our approach to train a semantic parser that uses 77 relations from Freebase in its knowledge representation. This semantic parser extracts instances of binary relations with state-of-the-art accuracy, while simultaneously recovering much richer semantic structures, such as conjunctions of multiple relations with partially shared arguments. We demonstrate recovery of this richer structure by extracting logical forms from natural language queries against Freebase. On this task, the trained semantic parser achieves 80% precision and 56% recall, despite never having seen an annotated logical form.", "keyphrases": ["semantic parser", "knowledge base", "freebase", "distant supervision"]} +{"id": "kuang-etal-2018-modeling", "title": "Modeling Coherence for Neural Machine Translation with Dynamic and Topic Caches", "abstract": "Sentences in a well-formed text are connected to each other via various links to form the cohesive structure of the text. Current neural machine translation (NMT) systems translate a text in a conventional sentence-by-sentence fashion, ignoring such cross-sentence links and dependencies. This may lead to generate an incoherent target text for a coherent source text. In order to handle this issue, we propose a cache-based approach to modeling coherence for neural machine translation by capturing contextual information either from recently translated sentences or the entire document. Particularly, we explore two types of caches: a dynamic cache, which stores words from the best translation hypotheses of preceding sentences, and a topic cache, which maintains a set of target-side topical words that are semantically related to the document to be translated. On this basis, we build a new layer to score target words in these two caches with a cache-based neural model. Here the estimated probabilities from the cache-based neural model are combined with NMT probabilities into the final word prediction probabilities via a gating mechanism. Finally, the proposed cache-based neural model is trained jointly with NMT system in an end-to-end manner. Experiments and analysis presented in this paper demonstrate that the proposed cache-based model achieves substantial improvements over several state-of-the-art SMT and NMT baselines.", "keyphrases": ["coherence", "neural machine translation", "topic cache", "translation quality"]} +{"id": "sokolov-etal-2017-shared", "title": "A Shared Task on Bandit Learning for Machine Translation", "abstract": "We introduce and describe the results of a novel shared task on bandit learning for machine translation. The task was organized jointly by Amazon and Heidelberg University for the first time at the Second Conference on Machine Translation (WMT 2017). The goal of the task is to encourage research on learning machine translation from weak user feedback instead of human references or post-edits. On each of a sequence of rounds, a machine translation system is required to propose a translation for an input, and receives a real-valued estimate of the quality of the proposed translation for learning. This paper describes the shared task's learning and evaluation setup, using services hosted on Amazon Web Services (AWS), the data and evaluation metrics, and the results of various machine translation architectures and learning protocols.", "keyphrases": ["bandit learning", "machine translation", "feedback", "post-edit"]} +{"id": "liu-etal-2022-makes", "title": "What Makes Good In-Context Examples for GPT-3?", "abstract": "GPT-3 has attracted lots of attention due to its superior performance across a wide range of NLP tasks, especially with its in-context learning abilities. Despite its success, we found that the empirical results of GPT-3 depend heavily on the choice of in-context examples. In this work, we investigate whether there are more effective strategies for judiciously selecting in-context examples (relative to random sampling) that better leverage GPT-3's in-context learning capabilities. Inspired by the recent success of leveraging a retrieval module to augment neural networks, we propose to retrieve examples that are semantically-similar to a test query sample to formulate its corresponding prompt. Intuitively, the examples selected with such a strategy may serve as more informative inputs to unleash GPT-3's power of text generation. We evaluate the proposed approach on several natural language understanding and generation benchmarks, where the retrieval-based prompt selection approach consistently outperforms the random selection baseline. Moreover, it is observed that the sentence encoders fine-tuned on task-related datasets yield even more helpful retrieval results. Notably, significant gains are observed on tasks such as table-to-text generation (44.3% on the ToTTo dataset) and open-domain question answering (45.5% on the NQ dataset).", "keyphrases": ["in-context example", "gpt-3", "few-shot learning"]} +{"id": "prasad-etal-2014-reflections", "title": "Reflections on the Penn Discourse TreeBank, Comparable Corpora, and Complementary Annotation", "abstract": "The Penn Discourse Treebank (PDTB) was released to the public in 2008. It remains the largest manually annotated corpus of discourse relations to date. Its focus on discourse relations that are either lexically-grounded in explicit discourse connectives or associated with sentential adjacency has not only facilitated its use in language technology and psycholinguistics but also has spawned the annotation of comparable corpora in other languages and genres.Given this situation, this paper has four aims: (1) to provide a comprehensive introduction to the PDTB for those who are unfamiliar with it; (2) to correct some wrong (or perhaps inadvertent) assumptions about the PDTB and its annotation that may have weakened previous results or the performance of decision procedures induced from the data; (3) to explain variations seen in the annotation of comparable resources in other languages and genres, which should allow developers of future comparable resources to recognize whether the variations are relevant to them; and (4) to enumerate and explain relationships between PDTB annotation and complementary annotation of other linguistic phenomena. The paper draws on work done by ourselves and others since the corpus was released.", "keyphrases": ["penn discourse treebank", "comparable corpora", "annotator", "pdtb annotation"]} +{"id": "el-kholy-habash-2010-orthographic", "title": "Orthographic and Morphological Processing for English-Arabic Statistical Machine Translation", "abstract": "Much of the work on Statistical Machine Translation (SMT) from morphologically rich languages has shown that morphological tokenization and orthographic normalization help improve SMT quality because of the sparsity reduction they contribute. In this paper, we study the effect of these processes on SMT when translating into a morphologically rich language, namely Arabic. We explore a space of tokenization schemes and normalization options. We only evaluate on detokenized and orthographically correct (enriched) output. Our results show that the best performing tokenization scheme is that of the Penn Arabic Treebank. Additionally, training on orthographically normalized (reduced) text then jointly enriching and detokenizing the output outperforms training on enriched text.", "keyphrases": ["statistical machine translation", "rich language", "tokenization", "arabic"]} +{"id": "collins-koo-2005-discriminative", "title": "Discriminative Reranking for Natural Language Parsing", "abstract": "This article considers approaches which rerank the output of an existing probabilistic parser. The base parser produces a set of candidate parses for each input sentence, with associated probabilities that define an initial ranking of these parses. A second model then attempts to improve upon this initial ranking, using additional features of the tree as evidence. The strength of our approach is that it allows a tree to be represented as an arbitrary set of features, without concerns about how these features interact or overlap and without the need to define a derivation or a generative model which takes these features into account. We introduce a new method for the reranking task, based on the boosting approach to ranking problems described in Freund et al. (1998). We apply the boosting method to parsing the Wall Street Journal treebank. The method combined the log-likelihood under a baseline model (that of Collins [1999]) with evidence from an additional 500,000 features over parse trees that were not included in the original model. The new model achieved 89.75 F-measure, a 13 relative decrease in F-measure error over the baseline model's score of 88.2. The article also introduces a new algorithm for the boosting approach which takes advantage of the sparsity of the feature space in the parsing data. Experiments show significant efficiency gains for the new algorithm over the obvious implementation of the boosting approach. We argue that the method is an appealing alternative-in terms of both simplicity and efficiency-to work on feature selection methods within log-linear (maximum-entropy) models. Although the experiments in this article are on natural language parsing (NLP), the approach should be applicable to many other NLP problems which are naturally framed as ranking tasks, for example, speech recognition, machine translation, or natural language generation.", "keyphrases": ["generative model", "collins", "feature space", "discriminative reranking", "syntactic parsing"]} +{"id": "valenzuela-escarcega-etal-2016-odins", "title": "Odin's Runes: A Rule Language for Information Extraction", "abstract": "Odin is an information extraction framework that applies cascades of finite state automata over both surface text and syntactic dependency graphs. Support for syntactic patterns allow us to concisely define relations that are otherwise difficult to express in languages such as Common Pattern Specification Language (CPSL), which are currently limited to shallow linguistic features. The interaction of lexical and syntactic automata provides robustness and flexibility when writing extraction rules. This paper describes Odin's declarative language for writing these cascaded automata.", "keyphrases": ["rule language", "information extraction framework", "odin"]} +{"id": "mccrae-doyle-2019-adapting", "title": "Adapting Term Recognition to an Under-Resourced Language: the Case of Irish", "abstract": "Automatic Term Recognition (ATR) is an important method for the summarization and analysis of large corpora, and normally requires a significant amount of linguistic input, in particular the use of part-ofspeech taggers. For an under-resourced language such as Irish, the resources necessary for this may be scarce or entirely absent. We evaluate two methods for the automatic extraction of terms, based on the small part-of-speech-tagged corpora that are available for Irish and on a large terminology list, and show that both methods can produce viable term extractors. We evaluate this with a newly constructed corpus that is the first available corpus for term extraction in Irish. Our results shine some light on the challenge of adapting natural language processing systems to under-resourced scenarios.", "keyphrases": ["term recognition", "under-resourced language", "irish"]} +{"id": "zhao-xing-2006-bitam", "title": "BiTAM: Bilingual Topic AdMixture Models for Word Alignment", "abstract": "We propose a novel bilingual topical admixture (BiTAM) formalism for word alignment in statistical machine translation. Under this formalism, the parallel sentence-pairs within a document-pair are assumed to constitute a mixture of hidden topics; each word-pair follows a topic-specific bilingual translation model. Three BiTAM models are proposed to capture topic sharing at different levels of linguistic granularity (i.e., at the sentence or word levels). These models enable word-alignment process to leverage topical contents of document-pairs. Efficient variational approximation algorithms are designed for inference and parameter estimation. With the inferred latent topics, BiTAM models facilitate coherent pairing of bilingual linguistic entities that share common topical aspects. Our preliminary experiments show that the proposed models improve word alignment accuracy, and lead to better translation quality.", "keyphrases": ["word alignment", "mixture", "hidden topic", "different level", "topical content"]} +{"id": "sagae-lavie-2006-parser", "title": "Parser Combination by Reparsing", "abstract": "We present a novel parser combination scheme that works by reparsing input sentences once they have already been parsed by several different parsers. We apply this idea to dependency and constituent parsing, generating results that surpass state-of-the-art accuracy levels for individual parsers.", "keyphrases": ["reparsing", "parser combination", "dependency graph"]} +{"id": "tomanek-olsson-2009-web", "title": "A Web Survey on the Use of Active Learning to Support Annotation of Text Data", "abstract": "As supervised machine learning methods for addressing tasks in natural language processing (NLP) prove increasingly viable, the focus of attention is naturally shifted towards the creation of training data. The manual annotation of corpora is a tedious and time consuming process. To obtain high-quality annotated data constitutes a bottleneck in machine learning for NLP today. Active learning is one way of easing the burden of annotation. This paper presents a first probe into the NLP research community concerning the nature of the annotation projects undertaken in general, and the use of active learning as annotation support in particular.", "keyphrases": ["web survey", "active learning", "annotation project"]} +{"id": "ng-etal-2013-conll", "title": "The CoNLL-2013 Shared Task on Grammatical Error Correction", "abstract": "The CoNLL-2013 shared task was devoted to grammatical error correction. In this paper, we give the task definition, present the data sets, and describe the evaluation metric and scorer used in the shared task. We also give an overview of the various approaches adopted by the participating teams, and present the evaluation results.", "keyphrases": ["conll-2013", "grammatical error correction", "learner", "content word", "punctuation"]} +{"id": "yang-etal-2018-modeling", "title": "Modeling Localness for Self-Attention Networks", "abstract": "Self-attention networks have proven to be of profound value for its strength of capturing global dependencies. In this work, we propose to model localness for self-attention networks, which enhances the ability of capturing useful local context. We cast localness modeling as a learnable Gaussian bias, which indicates the central and scope of the local region to be paid more attention. The bias is then incorporated into the original attention distribution to form a revised distribution. To maintain the strength of capturing long distance dependencies while enhance the ability of capturing short-range dependencies, we only apply localness modeling to lower layers of self-attention networks. Quantitative and qualitative analyses on Chinese-English and English-German translation tasks demonstrate the effectiveness and universality of the proposed approach.", "keyphrases": ["localness", "self-attention network", "gaussian bias", "scope", "translation task"]} +{"id": "kushman-etal-2014-learning", "title": "Learning to Automatically Solve Algebra Word Problems", "abstract": "We present an approach for automatically learning to solve algebra word problems. Our algorithm reasons across sentence boundaries to construct and solve a system of linear equations, while simultaneously recovering an alignment of the variables and numbers in these equations to the problem text. The learning algorithm uses varied supervision, including either full equations or just the final answers. We evaluate performance on a newly gathered corpus of algebra word problems, demonstrating that the system can correctly answer almost 70% of the questions in the dataset. This is, to our knowledge, the first learning result for this task.", "keyphrases": ["algebra word problem", "operation", "hand-crafted feature", "learning method", "mwp solver"]} +{"id": "grave-etal-2018-learning", "title": "Learning Word Vectors for 157 Languages", "abstract": "Distributed word representations, or word vectors, have recently been applied to many tasks in natural language processing, leading to state-of-the-art performance. A key ingredient to the successful application of these representations is to train them on very large corpora, and use these pre-trained models in downstream tasks. In this paper, we describe how we trained such high quality word representations for 157 languages. We used two sources of data to train these models: the free online encyclopedia Wikipedia and data from the common crawl project. We also introduce three new word analogy datasets to evaluate these word vectors, for French, Hindi and Polish. Finally, we evaluate our pre-trained word vectors on 10 languages for which evaluation datasets exists, showing very strong performance compared to previous models.", "keyphrases": ["word vector", "pre-trained model", "wikipedia", "hindi", "fasttext"]} +{"id": "liu-etal-2010-tesla", "title": "TESLA: Translation Evaluation of Sentences with Linear-Programming-Based Analysis", "abstract": "We present TESLA-M and TESLA, two novel automatic machine translation evaluation metrics with state-of-the-art performances. TESLA-M builds on the success of METEOR and MaxSim, but employs a more expressive linear programming framework. TESLA further exploits parallel texts to build a shallow semantic representation. We evaluate both on the WMT 2009 shared evaluation task and show that they outperform all participating systems in most tasks.", "keyphrases": ["translation evaluation", "sentences", "tesla"]} +{"id": "agirre-etal-2016-semeval", "title": "SemEval-2016 Task 1: Semantic Textual Similarity, Monolingual and Cross-Lingual Evaluation", "abstract": "Comunicacio presentada al 10th International Workshop on Semantic Evaluation (SemEval-2016), celebrat els dies 16 i 17 de juny de 2016 a San Diego, California.", "keyphrases": ["semantic textual similarity", "semeval", "cross-lingual sub-task"]} +{"id": "hamoui-etal-2020-flodusta", "title": "FloDusTA: Saudi Tweets Dataset for Flood, Dust Storm, and Traffic Accident Events", "abstract": "The rise of social media platforms makes it a valuable information source of recent events and users' perspective towards them. Twitter has been one of the most important communication platforms in recent years. Event detection, one of the information extraction aspects, involves identifying specified types of events in the text. Detecting events from tweets can help to predict real-world events precisely. A serious challenge that faces Arabic event detection is the lack of Arabic datasets that can be exploited in detecting events. This paper will describe FloDusTA, which is a dataset of tweets that we have built for the purpose of developing an event detection system. The dataset contains tweets written in both Modern Standard Arabic and Saudi dialect. The process of building the dataset starting from tweets collection to annotation by human annotators will be present. The tweets are labeled with four labels: flood, dust storm, traffic accident, and non-event. The dataset was tested for classification and the result was strongly encouraging.", "keyphrases": ["flood", "dust storm", "traffic accident event"]} +{"id": "zeman-etal-2017-conll", "title": "CoNLL 2017 Shared Task: Multilingual Parsing from Raw Text to Universal Dependencies", "abstract": "The Conference on Computational Natural Language Learning (CoNLL) features a shared task, in which participants train and test their learning systems on the same data sets. In 2017, the task was devoted to learning dependency parsers for a large number of languages, in a real-world setting without any gold-standard annotation on input. All test sets followed a unified annotation scheme, namely that of Universal Dependencies. In this paper, we define the task and evaluation methodology, describe how the data sets were prepared, report and analyze the main results, and provide a brief categorization of the different approaches of the participating systems.", "keyphrases": ["shared task", "raw text", "universal dependency"]} +{"id": "brooke-etal-2009-cross", "title": "Cross-Linguistic Sentiment Analysis: From English to Spanish", "abstract": "We explore the adaptation of English resources and techniques for text sentiment analysis to a new language, Spanish. Our main focus is the modification of an existing English semantic orientation calculator and the building of dictionaries; however we also compare alternate approaches, including machine translation and Support Vector Machine classification. The results indicate that, although languageindependent methods provide a decent baseline performance, there is also a significant cost to automation, and thus the best path to long-term improvement is through the inclusion of language-specific knowledge and resources.", "keyphrases": ["sentiment analysis", "spanish", "machine translation"]} +{"id": "srivastava-singh-2020-phinc", "title": "PHINC: A Parallel Hinglish Social Media Code-Mixed Corpus for Machine Translation", "abstract": "Code-mixing is the phenomenon of using more than one language in a sentence. In the multilingual communities, it is a very frequently observed pattern of communication on social media platforms. Flexibility to use multiple languages in one text message might help to communicate efficiently with the target audience. But, the noisy user-generated code-mixed text adds to the challenge of processing and understanding natural language to a much larger extent. Machine translation from monolingual source to the target language is a well-studied research problem. Here, we demonstrate that widely popular and sophisticated translation systems such as Google Translate fail at times to translate code-mixed text effectively. To address this challenge, we present a parallel corpus of the 13,738 code-mixed Hindi-English sentences and their corresponding human translation in English. In addition, we also propose a translation pipeline build on top of Google Translate. The evaluation of the proposed pipeline on PHINC demonstrates an increase in the performance of the underlying system. With minimal effort, we can extend the dataset and the proposed approach to other code-mixing language pairs.", "keyphrases": ["machine translation", "code-mixed language", "phinc"]} +{"id": "petrov-etal-2006-learning", "title": "Learning Accurate, Compact, and Interpretable Tree Annotation", "abstract": "We present an automatic approach to tree annotation in which basic nonterminal symbols are alternately split and merged to maximize the likelihood of a training treebank. Starting with a simple X-bar grammar, we learn a new grammar whose nonterminals are subsymbols of the original nonterminals. In contrast with previous work, we are able to split various terminals to different degrees, as appropriate to the actual complexity in the data. Our grammars automatically learn the kinds of linguistic distinctions exhibited in previous work on manual tree annotation. On the other hand, our grammars are much more compact and substantially more accurate than previous work on automatic annotation. Despite its simplicity, our best grammar achieves an F1 of 90.2% on the Penn Treebank, higher than fully lexicalized systems.", "keyphrases": ["berkeley parser", "latent annotation", "pcfg", "variable approach", "subcategorie"]} +{"id": "zribi-etal-2014-conventional", "title": "A Conventional Orthography for Tunisian Arabic", "abstract": "Tunisian Arabic is a dialect of the Arabic language spoken in Tunisia. Tunisian Arabic is an under-resourced language. It has neither a standard orthography nor large collections of written text and dictionaries. Actually, there is no strict separation between Modern Standard Arabic, the official language of the government, media and education, and Tunisian Arabic; the two exist on a continuum dominated by mixed forms. In this paper, we present a conventional orthography for Tunisian Arabic, following a previous effort on developing a conventional orthography for Dialectal Arabic (or CODA) demonstrated for Egyptian Arabic. We explain the design principles of CODA and provide a detailed description of its guidelines as applied to Tunisian Arabic.", "keyphrases": ["conventional orthography", "tunisian arabic", "dialect"]} +{"id": "guo-etal-2018-implicit", "title": "Implicit Discourse Relation Recognition using Neural Tensor Network with Interactive Attention and Sparse Learning", "abstract": "Implicit discourse relation recognition aims to understand and annotate the latent relations between two discourse arguments, such as temporal, comparison, etc. Most previous methods encode two discourse arguments separately, the ones considering pair specific clues ignore the bidirectional interactions between two arguments and the sparsity of pair patterns. In this paper, we propose a novel neural Tensor network framework with Interactive Attention and Sparse Learning (TIASL) for implicit discourse relation recognition. (1) We mine the most correlated word pairs from two discourse arguments to model pair specific clues, and integrate them as interactive attention into argument representations produced by the bidirectional long short-term memory network. Meanwhile, (2) the neural tensor network with sparse constraint is proposed to explore the deeper and the more important pair patterns so as to fully recognize discourse relations. The experimental results on PDTB show that our proposed TIASL framework is effective.", "keyphrases": ["neural tensor network", "discourse argument", "word pair"]} +{"id": "nguyen-etal-2020-bertweet", "title": "BERTweet: A pre-trained language model for English Tweets", "abstract": "We present BERTweet, the first public large-scale pre-trained language model for English Tweets. Our BERTweet, having the same architecture as BERT-base (Devlin et al., 2019), is trained using the RoBERTa pre-training procedure (Liu et al., 2019). Experiments show that BERTweet outperforms strong baselines RoBERTa-base and XLM-R-base (Conneau et al., 2020), producing better performance results than the previous state-of-the-art models on three Tweet NLP tasks: Part-of-speech tagging, Named-entity recognition and text classification. We release BERTweet under the MIT License to facilitate future research and applications on Tweet data. Our BERTweet is available at ", "keyphrases": ["language model", "english tweets", "pre-training procedure", "strong baseline", "part-of-speech tagging"]} +{"id": "murray-etal-2010-generating", "title": "Generating and Validating Abstracts of Meeting Conversations: a User Study", "abstract": "In this paper we present a complete system for automatically generating natural language abstracts of meeting conversations. This system is comprised of components relating to interpretation of the meeting documents according to a meeting ontology, transformation or content selection from that source representation to a summary representation, and generation of new summary text. In a formative user study, we compare this approach to gold-standard human abstracts and extracts to gauge the usefulness of the different summary types for browsing meeting conversations. We find that our automatically generated summaries are ranked significantly higher than human-selected extracts on coherence and usability criteria. More generally, users demonstrate a strong preference for abstract-style summaries over extracts.", "keyphrases": ["abstract", "meeting conversation", "user study"]} +{"id": "jin-etal-2019-pubmedqa", "title": "PubMedQA: A Dataset for Biomedical Research Question Answering", "abstract": "We introduce PubMedQA, a novel biomedical question answering (QA) dataset collected from PubMed abstracts. The task of PubMedQA is to answer research questions with yes/no/maybe (e.g.: Do preoperative statins reduce atrial fibrillation after coronary artery bypass grafting?) using the corresponding abstracts. PubMedQA has 1k expert-annotated, 61.2k unlabeled and 211.3k artificially generated QA instances. Each PubMedQA instance is composed of (1) a question which is either an existing research article title or derived from one, (2) a context which is the corresponding abstract without its conclusion, (3) a long answer, which is the conclusion of the abstract and, presumably, answers the research question, and (4) a yes/no/maybe answer which summarizes the conclusion. PubMedQA is the first QA dataset where reasoning over biomedical research texts, especially their quantitative contents, is required to answer the questions. Our best performing model, multi-phase fine-tuning of BioBERT with long answer bag-of-word statistics as additional supervision, achieves 68.1% accuracy, compared to single human performance of 78.0% accuracy and majority-baseline of 55.2% accuracy, leaving much room for improvement. PubMedQA is publicly available at .", "keyphrases": ["biomedical question", "abstract", "pubmedqa"]} +{"id": "schubert-tong-2003-extracting", "title": "Extracting and evaluating general world knowledge from the Brown Corpus", "abstract": "We have been developing techniques for extracting general world knowledge from miscellaneous texts by a process of approximate interpretation and abstraction, focusing initially on the Brown corpus. We apply interpretive rules to clausal patterns and patterns of modification, and concurrently abstract general \"possibilistic\" propositions from the resulting formulas. Two examples are \"A person may believe a proposition\", and \"Children may live with relatives\". Our methods currently yield over 117,000 such propositions (of variable quality) for the Brown corpus (more than 2 per sentence). We report here on our efforts to evaluate these results with a judging scheme aimed at determining how many of these propositions pass muster as \"reasonable general claims\" about the world in the opinion of human judges. We find that nearly 60% of the extracted propositions are favorably judged according to our scheme by any given judge. The percentage unanimously judged to be reasonable claims by multiple judges is lower, but still sufficiently high to suggest that our techniques may be of some use in tackling the long-standing \"knowledge acquisition bottleneck\" in AI.", "keyphrases": ["world knowledge", "brown corpus", "proposition"]} +{"id": "ding-etal-2019-event-representation", "title": "Event Representation Learning Enhanced with External Commonsense Knowledge", "abstract": "Prior work has proposed effective methods to learn event representations that can capture syntactic and semantic information over text corpus, demonstrating their effectiveness for downstream tasks such as script event prediction. On the other hand, events extracted from raw texts lacks of commonsense knowledge, such as the intents and emotions of the event participants, which are useful for distinguishing event pairs when there are only subtle differences in their surface realizations. To address this issue, this paper proposes to leverage external commonsense knowledge about the intent and sentiment of the event. Experiments on three event-related tasks, i.e., event similarity, script event prediction and stock market prediction, show that our model obtains much better event embeddings for the tasks, achieving 78% improvements on hard similarity task, yielding more precise inferences on subsequent events under given contexts, and better accuracies in predicting the volatilities of the stock market.", "keyphrases": ["external commonsense knowledge", "event pair", "event representation learning"]} +{"id": "navigli-velardi-2010-learning", "title": "Learning Word-Class Lattices for Definition and Hypernym Extraction", "abstract": "Definition extraction is the task of automatically identifying definitional sentences within texts. The task has proven useful in many research areas including ontology learning, relation extraction and question answering. However, current approaches -- mostly focused on lexicosyntactic patterns -- suffer from both low recall and precision, as definitional sentences occur in highly variable syntactic structures. In this paper, we propose Word-Class Lattices (WCLs), a generalization of word lattices that we use to model textual definitions. Lattices are learned from a dataset of definitions from Wikipedia. Our method is applied to the task of definition and hypernym extraction and compares favorably to other pattern generalization methods proposed in the literature.", "keyphrases": ["definition", "hypernym extraction", "wikipedia", "supervised approach"]} +{"id": "asher-etal-2016-discourse", "title": "Discourse Structure and Dialogue Acts in Multiparty Dialogue: the STAC Corpus", "abstract": "This paper describes the STAC resource, a corpus of multi-party chats annotated for discourse structure in the style of SDRT (Asher and Lascarides, 2003; Lascarides and Asher, 2009). The main goal of the STAC project is to study the discourse structure of multi-party dialogues in order to understand the linguistic strategies adopted by interlocutors to achieve their conversational goals, especially when these goals are opposed. The STAC corpus is not only a rich source of data on strategic conversation, but also the first corpus that we are aware of that provides full discourse structures for multi-party dialogues. It has other remarkable features that make it an interesting resource for other topics: interleaved threads, creative language, and interactions between linguistic and extra-linguistic contexts.", "keyphrases": ["multiparty dialogue", "stac corpus", "discourse structure"]} +{"id": "diab-etal-2009-committed", "title": "Committed Belief Annotation and Tagging", "abstract": "We present a preliminary pilot study of belief annotation and automatic tagging. Our objective is to explore semantic meaning beyond surface propositions. We aim to model people's cognitive states, namely their beliefs as expressed through linguistic means. We model the strength of their beliefs and their (the human) degree of commitment to their utterance. We explore only the perspective of the author of a text. We classify predicates into one of three possibilities: committed belief, non committed belief, or not applicable. We proceed to manually annotate data to that end, then we build a supervised framework to test the feasibility of automatically predicting these belief states. Even though the data is relatively small, we show that automatic prediction of a belief class is a feasible task. Using syntactic features, we are able to obtain significant improvements over a simple baseline of 23% F-measure absolute points. The best performing automatic tagging condition is where we use POS tag, word type feature AlphaNumeric, and shallow syntactic chunk information CHUNK. Our best overall performance is 53.97% F-measure.", "keyphrases": ["belief", "tagging", "writer", "surface word"]} +{"id": "gonen-goldberg-2019-lipstick", "title": "Lipstick on a Pig: Debiasing Methods Cover up Systematic Gender Biases in Word Embeddings But do not Remove Them", "abstract": "Word embeddings are widely used in NLP for a vast range of tasks. It was shown that word embeddings derived from text corpora reflect gender biases in society. This phenomenon is pervasive and consistent across different word embedding models, causing serious concern. Several recent works tackle this problem, and propose methods for significantly reducing this gender bias in word embeddings, demonstrating convincing results. However, we argue that this removal is superficial. While the bias is indeed substantially reduced according to the provided bias definition, the actual effect is mostly hiding the bias, not removing it. The gender bias information is still reflected in the distances between \u201cgender-neutralized\u201d words in the debiased embeddings, and can be recovered from them. We present a series of experiments to support this claim, for two debiasing methods. We conclude that existing bias removal techniques are insufficient, and should not be trusted for providing gender-neutral modeling.", "keyphrases": ["debiasing method", "word embedding", "gender bias", "bias removal technique", "implicit bias"]} +{"id": "dubossarsky-etal-2019-time", "title": "Time-Out: Temporal Referencing for Robust Modeling of Lexical Semantic Change", "abstract": "State-of-the-art models of lexical semantic change detection suffer from noise stemming from vector space alignment. We have empirically tested the Temporal Referencing method for lexical semantic change and show that, by avoiding alignment, it is less affected by this noise. We show that, trained on a diachronic corpus, the skip-gram with negative sampling architecture with temporal referencing outperforms alignment models on a synthetic task as well as a manual testset. We introduce a principled way to simulate lexical semantic change and systematically control for possible biases.", "keyphrases": ["temporal referencing", "lexical semantic change", "change detection"]} +{"id": "lin-etal-2019-moel", "title": "MoEL: Mixture of Empathetic Listeners", "abstract": "Previous research on empathetic dialogue systems has mostly focused on generating responses given certain emotions. However, being empathetic not only requires the ability of generating emotional responses, but more importantly, requires the understanding of user emotions and replying appropriately. In this paper, we propose a novel end-to-end approach for modeling empathy in dialogue systems: Mixture of Empathetic Listeners (MoEL). Our model first captures the user emotions and outputs an emotion distribution. Based on this, MoEL will softly combine the output states of the appropriate Listener(s), which are each optimized to react to certain emotions, and generate an empathetic response. Human evaluations on EMPATHETIC-DIALOGUES dataset confirm that MoEL outperforms multitask training baseline in terms of empathy, relevance, and fluency. Furthermore, the case study on generated responses of different Listeners shows high interpretability of our model.", "keyphrases": ["mixture", "empathetic listeners", "emotion"]} +{"id": "gu-etal-2017-learning", "title": "Learning to Translate in Real-time with Neural Machine Translation", "abstract": "Translating in real-time, a.k.a.simultaneous translation, outputs translation words before the input sentence ends, which is a challenging problem for conventional machine translation methods. We propose a neural machine translation (NMT) framework for simultaneous translation in which an agent learns to make decisions on when to translate from the interaction with a pre-trained NMT environment. To trade off quality and delay, we extensively explore various targets for delay and design a method for beam-search applicable in the simultaneous MT setting. Experiments against state-of-the-art baselines on two language pairs demonstrate the efficacy of the proposed framework both quantitatively and qualitatively.", "keyphrases": ["real-time", "neural machine translation", "read", "latency", "source sentence"]} +{"id": "patwa-etal-2020-semeval", "title": "SemEval-2020 Task 9: Overview of Sentiment Analysis of Code-Mixed Tweets", "abstract": "In this paper, we present the results of the SemEval-2020 Task 9 on Sentiment Analysis of Code-Mixed Tweets (SentiMix 2020). We also release and describe our Hinglish (Hindi-English)and Spanglish (Spanish-English) corpora annotated with word-level language identification and sentence-level sentiment labels. These corpora are comprised of 20K and 19K examples, respectively. The sentiment labels are - Positive, Negative, and Neutral. SentiMix attracted 89 submissions in total including 61 teams that participated in the Hinglish contest and 28 submitted systems to the Spanglish competition. The best performance achieved was 75.0% F1 score for Hinglish and 80.6% F1 for Spanglish. We observe that BERT-like models and ensemble methods are the most common and successful approaches among the participants.", "keyphrases": ["sentiment analysis", "code-mixed tweets", "semeval-2020 task", "code-mixed text", "twitter"]} +{"id": "futrell-etal-2015-quantifying", "title": "Quantifying Word Order Freedom in Dependency Corpora", "abstract": "Using recently available dependency corpora, we present novel measures of a key quantitative property of language, word order freedom: the extent to which word order in a sentence is free to vary while conveying the same meaning. We discuss two topics. First, we discuss linguistic and statistical issues associated with our measures and with the annotation styles of available corpora. We find that we can measure reliable upper bounds on word order freedom in head direction and the ordering of certain sisters, but that more general measures of word order freedom are not currently feasible. Second, we present results of our measures in 34 languages and demonstrate a correlation between quantitative word order freedom of subjects and objects and the presence of nominative-accusative case marking. To our knowledge this is the first large-scale quantitative test of the hypothesis that languages with more word order freedom have more case marking (Sapir, 1921; Kiparsky, 1997).", "keyphrases": ["word order freedom", "dependency corpora", "presence"]} +{"id": "ye-etal-2018-interpretable", "title": "Interpretable Charge Predictions for Criminal Cases: Learning to Generate Court Views from Fact Descriptions", "abstract": "In this paper, we propose to study the problem of court view generation from the fact description in a criminal case. The task aims to improve the interpretability of charge prediction systems and help automatic legal document generation. We formulate this task as a text-to-text natural language generation (NLG) problem. Sequence-to-sequence model has achieved cutting-edge performances in many NLG tasks. However, due to the non-distinctions of fact descriptions, it is hard for Seq2Seq model to generate charge-discriminative court views. In this work, we explore charge labels to tackle this issue. We propose a label-conditioned Seq2Seq model with attention for this problem, to decode court views conditioned on encoded charge labels. Experimental results show the effectiveness of our method.", "keyphrases": ["charge prediction", "criminal case", "fact description"]} +{"id": "kobayashi-etal-2015-effects", "title": "Effects of Game on User Engagement with Spoken Dialogue System", "abstract": "In this study, we examine the effects of using a game for encouraging the use of a spoken dialogue system. As a case study, we developed a word-chain game, called Shiritori in Japanese, and released the game as a module in a Japanese Android/iOS app, Onsei-Assist, which is a Siri-like personal assistant based on a spoken dialogue technology. We analyzed the log after the release and confirmed that the game can increase the number of user utterances. Furthermore, we discovered a positive side effect, in which users who have played the game tend to begin using non-game modules. This suggests that just adding a game module to the system can improve user engagement with an assistant agent.", "keyphrases": ["game", "user engagement", "spoken dialogue system"]} +{"id": "malmasi-etal-2017-report", "title": "A Report on the 2017 Native Language Identification Shared Task", "abstract": "Native Language Identification (NLI) is the task of automatically identifying the native language (L1) of an individual based on their language production in a learned language. It is typically framed as a classification task where the set of L1s is known a priori. Two previous shared tasks on NLI have been organized where the aim was to identify the L1 of learners of English based on essays (2013) and spoken responses (2016) they provided during a standardized assessment of academic English proficiency. The 2017 shared task combines the inputs from the two prior tasks for the first time. There are three tracks: NLI on the essay only, NLI on the spoken response only (based on a transcription of the response and i-vector acoustic features), and NLI using both responses. We believe this makes for a more interesting shared task while building on the methods and results from the previous two shared tasks. In this paper, we report the results of the shared task. A total of 19 teams competed across the three different sub-tasks. The fusion track showed that combining the written and spoken responses provides a large boost in prediction accuracy. Multiple classifier systems (e.g. ensembles and meta-classifiers) were the most effective in all tasks, with most based on traditional classifiers (e.g. SVMs) with lexical/syntactic features.", "keyphrases": ["native language identification", "nli", "learned language"]} +{"id": "barzilay-elhadad-2003-sentence", "title": "Sentence Alignment for Monolingual Comparable Corpora", "abstract": "We address the problem of sentence alignment for monolingual corpora, a phenomenon distinct from alignment in parallel corpora. Aligning large comparable corpora automatically would provide a valuable resource for learning of text-to-text rewriting rules. We incorporate context into the search for an optimal alignment in two complementary ways: learning rules for matching paragraphs using topic structure and further refining the matching through local alignment to find good sentence pairs. Evaluation shows that our alignment method outperforms state-of-the-art systems developed for the same task.", "keyphrases": ["sentence alignment", "programming", "britannica elementary", "alignment technique"]} +{"id": "savoldi-etal-2021-gender", "title": "Gender Bias in Machine Translation", "abstract": "AbstractMachine translation (MT) technology has facilitated our daily tasks by providing accessible shortcuts for gathering, processing, and communicating information. However, it can suffer from biases that harm users and society at large. As a relatively new field of inquiry, studies of gender bias in MT still lack cohesion. This advocates for a unified framework to ease future research. To this end, we: i) critically review current conceptualizations of bias in light of theoretical insights from related disciplines, ii) summarize previous analyses aimed at assessing gender bias in MT, iii) discuss the mitigating strategies proposed so far, and iv) point toward potential directions for future work.", "keyphrases": ["society", "gender bias", "million"]} +{"id": "bergsma-etal-2012-language", "title": "Language Identification for Creating Language-Specific Twitter Collections", "abstract": "Social media services such as Twitter offer an immense volume of real-world linguistic data. We explore the use of Twitter to obtain authentic user-generated text in low-resource languages such as Nepali, Urdu, and Ukrainian. Automatic language identification (LID) can be used to extract language-specific data from Twitter, but it is unclear how well LID performs on short, informal texts in low-resource languages. We address this question by annotating and releasing a large collection of tweets in nine languages, focusing on confusable languages using the Cyrillic, Arabic, and Devanagari scripts. This is the first publicly-available collection of LID-annotated tweets in non-Latin scripts, and should become a standard evaluation set for LID systems. We also advance the state-of-the-art by evaluating new, highly-accurate LID systems, trained both on our new corpus and on standard materials only. Both types of systems achieve a huge performance improvement over the existing state-of-the-art, correctly classifying around 98% of our gold standard tweets. We provide a detailed analysis showing how the accuracy of our systems vary along certain dimensions, such as the tweet-length and the amount of in- and out-of-domain training data.", "keyphrases": ["language-specific twitter collection", "devanagari script", "language identification", "social medium platform"]} +{"id": "chen-etal-2009-improving", "title": "Improving Dependency Parsing with Subtrees from Auto-Parsed Data", "abstract": "This paper presents a simple and effective approach to improve dependency parsing by using subtrees from auto-parsed data. First, we use a baseline parser to parse large-scale unannotated data. Then we extract subtrees from dependency parse trees in the auto-parsed data. Finally, we construct new subtree-based features for parsing algorithms. To demonstrate the effectiveness of our proposed approach, we present the experimental results on the English Penn Treebank and the Chinese Penn Treebank. These results show that our approach significantly outperforms baseline systems. And, it achieves the best accuracy for the Chinese data and an accuracy which is competitive with the best known systems for the English data.", "keyphrases": ["subtree", "auto-parsed data", "large amount"]} +{"id": "white-etal-2016-universal", "title": "Universal Decompositional Semantics on Universal Dependencies", "abstract": "We present a framework for augmenting data sets from the Universal Dependencies project with Universal Decompositional Semantics . Where the Universal Dependencies project aims to provide a syntactic annotation standard that can be used consistently across many languages as well as a collection of corpora that use that standard, our extension has similar aims for semantic annotation. We describe results from annotating the English Universal Dependencies treebank, dealing with word senses, semantic roles, and event properties", "keyphrases": ["universal decompositional semantics", "predpatt", "lightweight tool"]} +{"id": "barr-etal-2008-linguistic", "title": "The Linguistic Structure of English Web-Search Queries", "abstract": "Web-search queries are known to be short, but little else is known about their structure. In this paper we investigate the applicability of part-of-speech tagging to typical English-language web search-engine queries and the potential value of these tags for improving search results. We begin by identifying a set of part-of-speech tags suitable for search queries and quantifying their occurrence. We find that proper-nouns constitute 40% of query terms, and proper nouns and nouns together constitute over 70% of query terms. We also show that the majority of queries are noun-phrases, not unstructured collections of terms. We then use a set of queries manually labeled with these tags to train a Brill tagger and evaluate its performance. In addition, we investigate classification of search queries into grammatical classes based on the syntax of part-of-speech tag sequences. We also conduct preliminary investigative experiments into the practical applicability of leveraging query-trained part-of-speech taggers for information-retrieval tasks. In particular, we show that part-of-speech information can be a significant feature in machine-learned search-result relevance. These experiments also include the potential use of the tagger in selecting words for omission or substitution in query reformulation, actions which can improve recall. We conclude that training a part-of-speech tagger on labeled corpora of queries significantly outperforms taggers based on traditional corpora, and leveraging the unique linguistic structure of web-search queries can improve search experience.", "keyphrases": ["linguistic structure", "query", "occurrence", "pos tagger"]} +{"id": "lin-2003-improving", "title": "Improving Summarization Performance by Sentence Compression \u2014 A Pilot Study", "abstract": "In this paper we study the effectiveness of applying sentence compression on an extraction based multi-document summarization system. Our results show that pure syntactic-based compression does not improve system performance. Topic signature-based reranking of compressed sentences does not help much either. However reranking using an oracle showed a significant improvement remains possible.", "keyphrases": ["summarization performance", "sentence compression", "conciseness", "pipeline approach"]} +{"id": "petroni-etal-2019-language", "title": "Language Models as Knowledge Bases?", "abstract": "Recent progress in pretraining language models on large textual corpora led to a surge of improvements for downstream NLP tasks. Whilst learning linguistic knowledge, these models may also be storing relational knowledge present in the training data, and may be able to answer queries structured as \u201cfill-in-the-blank\u201d cloze statements. Language models have many advantages over structured knowledge bases: they require no schema engineering, allow practitioners to query about an open class of relations, are easy to extend to more data, and require no human supervision to train. We present an in-depth analysis of the relational knowledge already present (without fine-tuning) in a wide range of state-of-the-art pretrained language models. We find that (i) without fine-tuning, BERT contains relational knowledge competitive with traditional NLP methods that have some access to oracle knowledge, (ii) BERT also does remarkably well on open-domain question answering against a supervised baseline, and (iii) certain types of factual knowledge are learned much more readily than others by standard language model pretraining approaches. The surprisingly strong ability of these models to recall factual knowledge without any fine-tuning demonstrates their potential as unsupervised open-domain QA systems. The code to reproduce our analysis is available at .", "keyphrases": ["knowledge bases", "query", "open-domain question", "language models", "dante"]} +{"id": "jain-etal-2020-scirex", "title": "SciREX: A Challenge Dataset for Document-Level Information Extraction", "abstract": "Extracting information from full documents is an important problem in many domains, but most previous work focus on identifying relationships within a sentence or a paragraph. It is challenging to create a large-scale information extraction (IE) dataset at the document level since it requires an understanding of the whole document to annotate entities and their document-level relationships that usually span beyond sentences or even sections. In this paper, we introduce SciREX, a document level IE dataset that encompasses multiple IE tasks, including salient entity identification and document level N-ary relation identification from scientific articles. We annotate our dataset by integrating automatic and human annotations, leveraging existing scientific knowledge resources. We develop a neural model as a strong baseline that extends previous state-of-the-art IE models to document-level IE. Analyzing the model performance shows a significant gap between human performance and current baselines, inviting the community to use our dataset as a challenge to develop document-level IE models. Our data and code are publicly available at .", "keyphrases": ["information extraction", "document level", "n-ary relation identification", "scirex"]} +{"id": "shao-ng-2004-mining", "title": "Mining New Word Translations from Comparable Corpora", "abstract": "New words such as names, technical terms, etc appear frequently. As such, the bilingual lexicon of a machine translation system has to be constantly updated with these new word translations. Comparable corpora such as news documents of the same period from different news agencies are readily available. In this paper, we present a new approach to mining new word translations from comparable corpora, by using context information to complement transliteration information. We evaluated our approach on six months of Chinese and English Gigaword corpora, with encouraging results.", "keyphrases": ["new word translation", "comparable corpora", "news document", "context information"]} +{"id": "hasan-ney-2005-clustered", "title": "Clustered language models based on regular expressions for SMT", "abstract": "In this paper, we present a language model based on clusters obtained by applying regular expressions to the training data and, thus, discriminating several different sentence types as, e.g. interrog- atives, imperatives or enumerations. The main motivation lies in the observation that different sentence types also underlie a different syntactic structure, and thus yield a varying distribution of n-grams reflect- ing their word order. We show that this assumption is valid by applying the models to English-Spanish bilingual corpora and obtaining good perplexity reductions of approximately 25%. In addition, we per- form an n-best rescoring experiment and show a relative improvement of 4-5% in word error rate. The models can be easily adapted to other translation tasks and do not need complicated training methods, thus being a valuable alternative for on-demand rescoring of sentence hypotheses such as they occur in the CAT framework.", "keyphrases": ["language model", "perplexity reduction", "specific class"]} +{"id": "choi-cardie-2008-learning", "title": "Learning with Compositional Semantics as Structural Inference for Subsentential Sentiment Analysis", "abstract": "Determining the polarity of a sentiment-bearing expression requires more than a simple bag-of-words approach. In particular, words or constituents within the expression can interact with each other to yield a particular overall polarity. In this paper, we view such subsentential interactions in light of compositional semantics, and present a novel learning-based approach that incorporates structural inference motivated by compositional semantics into the learning procedure. Our experiments show that (1) simple heuristics based on compositional semantics can perform better than learning-based methods that do not incorporate compositional semantics (accuracy of 89.7% vs. 89.1%), but (2) a method that integrates compositional semantics into learning performs better than all other alternatives (90.7%). We also find that \"content-word negators\", not widely employed in previous work, play an important role in determining expression-level polarity. Finally, in contrast to conventional wisdom, we find that expression-level classification accuracy uniformly decreases as additional, potentially disambiguating, context is considered.", "keyphrases": ["compositional semantic", "structural inference", "subsentential sentiment analysis", "polarity"]} +{"id": "dale-etal-2012-hoo", "title": "HOO 2012: A Report on the Preposition and Determiner Error Correction Shared Task", "abstract": "Incorrect usage of prepositions and determiners constitute the most common types of errors made by non-native speakers of English. It is not surprising, then, that there has been a significant amount of work directed towards the automated detection and correction of such errors. However, to date, the use of different data sets and different task definitions has made it difficult to compare work on the topic. This paper reports on the HOO 2012 shared task on error detection and correction in the use of prepositions and determiners, where systems developed by 14 teams from around the world were evaluated on the same previously unseen errorful text.", "keyphrases": ["preposition", "determiner error correction", "non-native speaker", "textual error", "helping"]} +{"id": "zhang-etal-2003-chinese-lexical", "title": "Chinese Lexical Analysis Using Hierarchical Hidden Markov Model", "abstract": "This paper presents a unified approach for Chinese lexical analysis using hierarchical hidden Markov model (HHMM), which aims to incorporate Chinese word segmentation, Part-Of-Speech tagging, disambiguation and unknown words recognition into a whole theoretical frame. A class-based HMM is applied in word segmentation, and in this level unknown words are treated in the same way as common words listed in the lexicon. Unknown words are recognized with reliability in role-based HMM. As for disambiguation, the authors bring forth an n-shortest-path strategy that, in the early stage, reserves top N segmentation results as candidates and covers more ambiguity. Various experiments show that each level in HHMM contributes to lexical analysis. An HHMM-based system ICTCLAS was accomplished. The recent official evaluation indicates that ICTCLAS is one of the best Chinese lexical analyzers. In a word, HHMM is effective to Chinese lexical analysis.", "keyphrases": ["hidden markov model", "unified approach", "chinese lexical analysis"]} +{"id": "pruthi-etal-2020-learning", "title": "Learning to Deceive with Attention-Based Explanations", "abstract": "Attention mechanisms are ubiquitous components in neural architectures applied to natural language processing. In addition to yielding gains in predictive accuracy, attention weights are often claimed to confer interpretability, purportedly useful both for providing insights to practitioners and for explaining why a model makes its decisions to stakeholders. We call the latter use of attention mechanisms into question by demonstrating a simple method for training models to produce deceptive attention masks. Our method diminishes the total weight assigned to designated impermissible tokens, even when the models can be shown to nevertheless rely on these features to drive predictions. Across multiple models and tasks, our approach manipulates attention weights while paying surprisingly little cost in accuracy. Through a human study, we show that our manipulated attention-based explanations deceive people into thinking that predictions from a model biased against gender minorities do not rely on the gender. Consequently, our results cast doubt on attention's reliability as a tool for auditing algorithms in the context of fairness and accountability.", "keyphrases": ["explanation", "attention weight", "practitioner", "deceptive attention mask"]} +{"id": "medlock-briscoe-2007-weakly", "title": "Weakly Supervised Learning for Hedge Classification in Scientific Literature", "abstract": "We investigate automatic classification of speculative language (\u2018hedging\u2019), in biomedical text using weakly supervised machine learning. Our contributions include a precise description of the task with annotation guidelines, analysis and discussion, a probabilistic weakly supervised learning model, and experimental evaluation of the methods presented. We show that hedge classification is feasible using weakly supervised ML, and point toward avenues for future research.", "keyphrases": ["hedge classification", "annotation guideline", "weakly supervised learning", "single word"]} +{"id": "walker-etal-2012-stance", "title": "Stance Classification using Dialogic Properties of Persuasion", "abstract": "Public debate functions as a forum for both expressing and forming opinions, an important aspect of public life. We present results for automatically classifying posts in online debate as to the position, or stance that the speaker takes on an issue, such as Pro or Con. We show that representing the dialogic structure of the debates in terms of agreement relations between speakers, greatly improves performance for stance classification, over models that operate on post content and parent-post context alone.", "keyphrases": ["online debate", "dialogic structure", "stance classification", "collective classification"]} +{"id": "guan-etal-2020-knowledge", "title": "A Knowledge-Enhanced Pretraining Model for Commonsense Story Generation", "abstract": "Story generation, namely, generating a reasonable story from a leading context, is an important but challenging task. In spite of the success in modeling fluency and local coherence, existing neural language generation models (e.g., GPT-2) still suffer from repetition, logic conflicts, and lack of long-range coherence in generated stories. We conjecture that this is because of the difficulty of associating relevant commonsense knowledge, understanding the causal relationships, and planning entities and events with proper temporal order. In this paper, we devise a knowledge-enhanced pretraining model for commonsense story generation. We propose to utilize commonsense knowledge from external knowledge bases to generate reasonable stories. To further capture the causal and temporal dependencies between the sentences in a reasonable story, we use multi-task learning, which combines a discriminative objective to distinguish true and fake stories during fine-tuning. Automatic and manual evaluation shows that our model can generate more reasonable stories than state-of-the-art baselines, particularly in terms of logic and global coherence.", "keyphrases": ["knowledge-enhanced pretraining model", "commonsense story generation", "knowledge graph"]} +{"id": "napoles-etal-2017-jfleg", "title": "JFLEG: A Fluency Corpus and Benchmark for Grammatical Error Correction", "abstract": "We present a new parallel corpus, JHU FLuency-Extended GUG corpus (JFLEG) for developing and evaluating grammatical error correction (GEC). Unlike other corpora, it represents a broad range of language proficiency levels and uses holistic fluency edits to not only correct grammatical errors but also make the original text more native sounding. We describe the types of corrections made and benchmark four leading GEC systems on this corpus, identifying specific areas in which they do well and how they can improve. JFLEG fulfills the need for a new gold standard to properly assess the current state of GEC.", "keyphrases": ["grammatical error correction", "parallel corpus", "native sounding", "jfleg", "language learner"]} +{"id": "stab-gurevych-2014-annotating", "title": "Annotating Argument Components and Relations in Persuasive Essays", "abstract": "In this paper, we present a novel approach to model arguments, their components and relations in persuasive essays in English. We propose an annotation scheme that includes the annotation of claims and premises as well as support and attack relations for capturing the structure of argumentative discourse. We further conduct a manual annotation study with three annotators on 90 persuasive essays. The obtained inter-rater agreement of \u03b1U =0 .72 for argument components and \u03b1 =0 .81 for argumentative relations indicates that the proposed annotation scheme successfully guides annotators to substantial agreement. The final corpus and the annotation guidelines are freely available to encourage future research in argument recognition.", "keyphrases": ["persuasive essay", "annotator", "discourse structure", "student-written text", "writing support system"]} +{"id": "moon-etal-2019-opendialkg", "title": "OpenDialKG: Explainable Conversational Reasoning with Attention-based Walks over Knowledge Graphs", "abstract": "We study a conversational reasoning model that strategically traverses through a large-scale common fact knowledge graph (KG) to introduce engaging and contextually diverse entities and attributes. For this study, we collect a new Open-ended Dialog - KG parallel corpus called OpenDialKG, where each utterance from 15K human-to-human role-playing dialogs is manually annotated with ground-truth reference to corresponding entities and paths from a large-scale KG with 1M+ facts. We then propose the DialKG Walker model that learns the symbolic transitions of dialog contexts as structured traversals over KG, and predicts natural entities to introduce given previous dialog contexts via a novel domain-agnostic, attention-based graph path decoder. Automatic and human evaluations show that our model can retrieve more natural and human-like responses than the state-of-the-art baselines or rule-based models, in both in-domain and cross-domain tasks. The proposed model also generates a KG walk path for each entity retrieved, providing a natural way to explain conversational reasoning.", "keyphrases": ["conversational reasoning", "knowledge graph", "opendialkg", "recommendation", "parallel dialog\u2194kg corpus"]} +{"id": "nguyen-etal-2018-novel", "title": "A Novel Embedding Model for Knowledge Base Completion Based on Convolutional Neural Network", "abstract": "In this paper, we propose a novel embedding model, named ConvKB, for knowledge base completion. Our model ConvKB advances state-of-the-art models by employing a convolutional neural network, so that it can capture global relationships and transitional characteristics between entities and relations in knowledge bases. In ConvKB, each triple (head entity, relation, tail entity) is represented as a 3-column matrix where each column vector represents a triple element. This 3-column matrix is then fed to a convolution layer where multiple filters are operated on the matrix to generate different feature maps. These feature maps are then concatenated into a single feature vector representing the input triple. The feature vector is multiplied with a weight vector via a dot product to return a score. This score is then used to predict whether the triple is valid or not. Experiments show that ConvKB achieves better link prediction performance than previous state-of-the-art embedding models on two benchmark datasets WN18RR and FB15k-237.", "keyphrases": ["knowledge base completion", "convolutional neural network", "relation embedding"]} +{"id": "cheng-etal-2020-spellgcn", "title": "SpellGCN: Incorporating Phonological and Visual Similarities into Language Models for Chinese Spelling Check", "abstract": "Chinese Spelling Check (CSC) is a task to detect and correct spelling errors in Chinese natural language. Existing methods have made attempts to incorporate the similarity knowledge between Chinese characters. However, they take the similarity knowledge as either an external input resource or just heuristic rules. This paper proposes to incorporate phonological and visual similarity knowledge into language models for CSC via a specialized graph convolutional network (SpellGCN). The model builds a graph over the characters, and SpellGCN is learned to map this graph into a set of inter-dependent character classifiers. These classifiers are applied to the representations extracted by another network, such as BERT, enabling the whole network to be end-to-end trainable. Experiments are conducted on three human-annotated datasets. Our method achieves superior performance against previous models by a large margin.", "keyphrases": ["language model", "spellgcn", "confusion set"]} +{"id": "lacruz-etal-2012-average", "title": "Average Pause Ratio as an Indicator of Cognitive Effort in Post-Editing: A Case Study", "abstract": "Pauses are known to be good indicators of cognitive demand in monolingual language production and in translation. However, a previous effort by O'Brien (2006) to establish an analogous relationship in post-editing did not produce the expected result. In this case study, we introduce a metric for pause activity, the average pause ratio, which is sensitive to both the number and duration of pauses. We measured cognitive effort in a segment by counting the number of complete editing events. We found that the average pause ratio was higher for less cognitively demanding segments than for more cognitively demanding segments. Moreover, this effect became more pronounced as the minimum threshold for pause length was shortened.", "keyphrases": ["pause ratio", "cognitive effort", "post-editing"]} +{"id": "hasan-ng-2014-taking", "title": "Why are You Taking this Stance? Identifying and Classifying Reasons in Ideological Debates", "abstract": "Recent years have seen a surge of interest in stance classification in online debates. Oftentimes, however, it is important to determine not only the stance expressed by an author in her debate posts, but also the reasons behind her supporting or opposing the issue under debate. We therefore examine the new task of reason classification in this paper. Given the close interplay between stance classification and reason classification, we design computational models for examining how automatically computed stance information can be profitably exploited for reason classification. Experiments on our reason-annotated corpus of ideological debate posts from four domains demonstrate that sophisticated models of stances and reasons can indeed yield more accurate reason and stance classification results than their simpler counterparts.", "keyphrases": ["stance", "online debate", "reason classification", "counterargument"]} +{"id": "matsuzaki-etal-2005-probabilistic", "title": "Probabilistic CFG with Latent Annotations", "abstract": "This paper defines a generative probabilistic model of parse trees, which we call PCFG-LA. This model is an extension of PCFG in which non-terminal symbols are augmented with latent variables. Fine-grained CFG rules are automatically induced from a parsed corpus by training a PCFG-LA model using an EM-algorithm. Because exact parsing with a PCFG-LA is NP-hard, several approximations are described and empirically compared. In experiments using the Penn WSJ corpus, our automatically trained model gave a performance of 86.6% (F1, sentences \u2264 40 words), which is comparable to that of an unlexicalized PCFG parser created using extensive manual feature selection.", "keyphrases": ["latent annotation", "variable", "probabilistic cfg", "treebank", "tsg"]} +{"id": "cotterell-etal-2016-sigmorphon", "title": "The SIGMORPHON 2016 Shared Task\u2014Morphological Reinflection", "abstract": "The 2016 SIGMORPHON Shared Task was devoted to the problem of morphological rein\ufb02ection. It introduced morphological datasets for 10 languages with diverse ty-pological characteristics. The shared task drew submissions from 9 teams representing 11 institutions re\ufb02ecting a variety of approaches to addressing supervised learning of rein\ufb02ection. For the simplest task, in-\ufb02ection generation from lemmas, the best system averaged 95.56% exact-match accuracy across all languages, ranging from Maltese (88.99%) to Hungarian (99.30%). With the relatively large training datasets provided, recurrent neural network architectures consistently performed best\u2014in fact, there was a signi\ufb01cant margin between neural and non-neural approaches. The best neural approach, averaged over all tasks and languages, outperformed the best non-neural one by 13.76% absolute; on individual tasks and languages the gap in accuracy sometimes exceeded 60%. Overall, the results show a strong state of the art, and serve as encouragement for future shared tasks that explore morphological analysis and generation with varying degrees of supervision.", "keyphrases": ["sigmorphon", "shared task", "art", "morphological inflection", "reinflection task"]} +{"id": "tatman-2017-gender", "title": "Gender and Dialect Bias in YouTube's Automatic Captions", "abstract": "This project evaluates the accuracy of YouTube's automatically-generated captions across two genders and five dialect groups. Speakers' dialect and gender was controlled for by using videos uploaded as part of the \u201caccent tag challenge\u201d, where speakers explicitly identify their language background. The results show robust differences in accuracy across both gender and dialect, with lower accuracy for 1) women and 2) speakers from Scotland. This finding builds on earlier research finding that speaker's sociolinguistic identity may negatively impact their ability to use automatic speech recognition, and demonstrates the need for sociolinguistically-stratified validation of systems.", "keyphrases": ["dialect", "youtube", "caption", "gender"]} +{"id": "nakashole-etal-2012-patty", "title": "PATTY: A Taxonomy of Relational Patterns with Semantic Types", "abstract": "This paper presents PATTY: a large resource for textual patterns that denote binary relations between entities. The patterns are semantically typed and organized into a subsumption taxonomy. The PATTY system is based on efficient algorithms for frequent itemset mining and can process Web-scale corpora. It harnesses the rich type system and entity population of large knowledge bases. The PATTY taxonomy comprises 350,569 pattern synsets. Random-sampling-based evaluation shows a pattern accuracy of 84.7%. PATTY has 8,162 subsumptions, with a random-sampling-based precision of 75%. The PATTY resource is freely available for interactive access and download.", "keyphrases": ["taxonomy", "patty", "entity pair", "semantic type signature", "paraphrase"]} +{"id": "sheng-etal-2020-towards", "title": "Towards Controllable Biases in Language Generation", "abstract": "We present a general approach towards controllable societal biases in natural language generation (NLG). Building upon the idea of adversarial triggers, we develop a method to induce societal biases in generated text when input prompts contain mentions of specific demographic groups. We then analyze two scenarios: 1) inducing negative biases for one demographic and positive biases for another demographic, and 2) equalizing biases between demographics. The former scenario enables us to detect the types of biases present in the model. Specifically, we show the effectiveness of our approach at facilitating bias analysis by finding topics that correspond to demographic inequalities in generated text and comparing the relative effectiveness of inducing biases for different demographics. The second scenario is useful for mitigating biases in downstream applications such as dialogue generation. In our experiments, the mitigation technique proves to be effective at equalizing the amount of biases across demographics while simultaneously generating less negatively biased text overall.", "keyphrases": ["language generation", "trigger", "group"]} +{"id": "barba-etal-2021-esc", "title": "ESC: Redesigning WSD with Extractive Sense Comprehension", "abstract": "Word Sense Disambiguation (WSD) is a historical NLP task aimed at linking words in contexts to discrete sense inventories and it is usually cast as a multi-label classification task. Recently, several neural approaches have employed sense definitions to better represent word meanings. Yet, these approaches do not observe the input sentence and the sense definition candidates all at once, thus potentially reducing the model performance and generalization power. We cope with this issue by reframing WSD as a span extraction problem \u2014 which we called Extractive Sense Comprehension (ESC) \u2014 and propose ESCHER, a transformer-based neural architecture for this new formulation. By means of an extensive array of experiments, we show that ESC unleashes the full potential of our model, leading it to outdo all of its competitors and to set a new state of the art on the English WSD task. In the few-shot scenario, ESCHER proves to exploit training data efficiently, attaining the same performance as its closest competitor while relying on almost three times fewer annotations. Furthermore, ESCHER can nimbly combine data annotated with senses from different lexical resources, achieving performances that were previously out of everyone's reach. The model along with data is available at .", "keyphrases": ["wsd", "extractive sense comprehension", "span extraction problem", "gloss"]} +{"id": "del-tredici-fernandez-2017-semantic", "title": "Semantic Variation in Online Communities of Practice", "abstract": "We introduce a framework for quantifying semantic variation of common words in Communities of Practice and in sets of topic-related communities. We show that while some meaning shifts are shared across related communities, others are community-specific, and therefore independent from the discussed topic. We propose such findings as evidence in favour of sociolinguistic theories of socially-driven semantic variation. Results are evaluated using an independent language modelling task. Furthermore, we investigate extralinguistic features and show that factors such as prominence and dissemination of words are related to semantic variation.", "keyphrases": ["online community", "practice", "semantic variation"]} +{"id": "freitag-etal-2014-jane", "title": "Jane: Open Source Machine Translation System Combination", "abstract": "Different machine translation engines can be remarkably dissimilar not only with respect to their technical paradigm, but also with respect to the translation output they yield. System combination is a method for combining the output of multiple machine translation engines in order to take benefit of the strengths of each of the individual engines. In this work we introduce a novel system combination implementation which is integrated into Jane, RWTH\u2019s open source statistical machine translation toolkit. On the most recent Workshop on Statistical Machine Translation system combination shared task, we achieve improvements of up to 0.7 points in BLEU over the best system combination hypotheses which were submitted for the official evaluation. Moreover, we enhance our system combination pipeline with additional n-gram language models and lexical translation models.", "keyphrases": ["individual engine", "machine translation toolkit", "jane"]} +{"id": "ahn-2006-stages", "title": "The stages of event extraction", "abstract": "Event detection and recognition is a complex task consisting of multiple sub-tasks of varying difficulty. In this paper, we present a simple, modular approach to event extraction that allows us to experiment with a variety of machine learning methods for these sub-tasks, as well as to evaluate the impact on performance these sub-tasks have on the overall task.", "keyphrases": ["stage", "event extraction", "modular approach", "trigger", "classification problem"]} +{"id": "yin-etal-2016-multi", "title": "Multi-Granularity Chinese Word Embedding", "abstract": "This paper considers the problem of learning Chinese word embeddings. In contrast to English, a Chinese word is usually composed of characters, and most of the characters themselves can be further divided into components such as radicals. While characters and radicals contain rich information and are capable of indicating semantic meanings of words, they have not been fully exploited by existing word embedding methods. In this work, we propose multi-granularity embedding (MGE) for Chinese words. The key idea is to make full use of such word-character-radical composition, and enrich word embeddings by further incorporating \ufb01ner-grained semantics from characters and radicals. Quantitative evaluation demonstrates the superiority of MGE in word similarity computation and analogical reasoning. Qualitative analysis further shows its capability to identify \ufb01ner-grained semantic meanings of words.", "keyphrases": ["chinese word", "character", "multi-granularity", "cwe model"]} +{"id": "zhao-etal-2008-pivot", "title": "Pivot Approach for Extracting Paraphrase Patterns from Bilingual Corpora", "abstract": "Paraphrase patterns are useful in paraphrase recognition and generation. In this paper, we present a pivot approach for extracting paraphrase patterns from bilingual parallel corpora, whereby the English paraphrase patterns are extracted using the sentences in a foreign language as pivots. We propose a loglinear model to compute the paraphrase likelihood of two patterns and exploit feature functions based on maximum likelihood estimation (MLE) and lexical weighting (LW). Using the presented method, we extract over 1,000,000 pairs of paraphrase patterns from 2M bilingual sentence pairs, the precision of which exceeds 67%. The evaluation results show that: (1) The pivot approach is effective in extracting paraphrase patterns, which significantly outperforms the conventional method DIRT. Especially, the log-linear model with the proposed feature functions achieves high performance. (2) The coverage of the extracted paraphrase patterns is high, which is above 84%. (3) The extracted paraphrase patterns can be classified into 5 types, which are useful in various applications.", "keyphrases": ["paraphrase pattern", "bilingual corpora", "pivot approach"]} +{"id": "li-etal-2018-delete", "title": "Delete, Retrieve, Generate: a Simple Approach to Sentiment and Style Transfer", "abstract": "We consider the task of text attribute transfer: transforming a sentence to alter a specific attribute (e.g., sentiment) while preserving its attribute-independent content (e.g., \u201cscreen is just the right size\u201d to \u201cscreen is too small\u201d). Our training data includes only sentences labeled with their attribute (e.g., positive and negative), but not pairs of sentences that only differ in the attributes, so we must learn to disentangle attributes from attribute-independent content in an unsupervised way. Previous work using adversarial methods has struggled to produce high-quality outputs. In this paper, we propose simpler methods motivated by the observation that text attributes are often marked by distinctive phrases (e.g., \u201ctoo small\u201d). Our strongest method extracts content words by deleting phrases associated with the sentence's original attribute value, retrieves new phrases associated with the target attribute, and uses a neural model to fluently combine these into a final output. Based on human evaluation, our best method generates grammatical and appropriate responses on 22% more inputs than the best previous system, averaged over three attribute transfer datasets: altering sentiment of reviews on Yelp, altering sentiment of reviews on Amazon, and altering image captions to be more romantic or humorous.", "keyphrases": ["retrieve", "style transfer", "text attribute", "input sentence", "sentiment polarity"]} +{"id": "lao-etal-2011-random", "title": "Random Walk Inference and Learning in A Large Scale Knowledge Base", "abstract": "We consider the problem of performing learning and inference in a large scale knowledge base containing imperfect knowledge with incomplete coverage. We show that a soft inference procedure based on a combination of constrained, weighted, random walks through the knowledge base graph can be used to reliably infer new beliefs for the knowledge base. More specifically, we show that the system can learn to infer different target relations by tuning the weights associated with random walks that follow different paths through the graph, using a version of the Path Ranking Algorithm (Lao and Cohen, 2010b). We apply this approach to a knowledge base of approximately 500,000 beliefs extracted imperfectly from the web by NELL, a never-ending language learner (Carlson et al., 2010). This new system improves significantly over NELL's earlier Horn-clause learning and inference method: it obtains nearly double the precision at rank 100, and the new learning method is also applicable to many more inference tasks.", "keyphrases": ["scale knowledge base", "path ranking algorithm", "random walk", "reasoning", "entity pair"]} +{"id": "hamon-etal-2009-end", "title": "End-to-End Evaluation in Simultaneous Translation", "abstract": "This paper presents the end-to-end evaluation of an automatic simultaneous translation system, built with state-of-the-art components. It shows whether, and for which situations, such a system might be advantageous when compared to a human interpreter. Using speeches in English translated into Spanish, we present the evaluation procedure and we discuss the results both for the recognition and translation components as well as for the overall system. Even if the translation process remains the Achilles' heel of the system, the results show that the system can keep at least half of the information, becoming potentially useful for final users.", "keyphrases": ["simultaneous translation", "human interpreter", "end-to-end evaluation"]} +{"id": "dyer-etal-2016-recurrent", "title": "Recurrent Neural Network Grammars", "abstract": "We introduce recurrent neural network grammars, probabilistic models of sentences with explicit phrase structure. We explain efficient inference procedures that allow application to both parsing and language modeling. Experiments show that they provide better parsing in English than any single previously published supervised generative model and better language modeling than state-of-the-art sequential RNNs in English and Chinese.", "keyphrases": ["neural network grammar", "generative model", "rnng", "transition-based parser", "discriminative parser"]} +{"id": "tseng-2003-semantic", "title": "Semantic Classification of Chinese Unknown Words", "abstract": "This paper describes a classifier that assigns semantic thesaurus categories to unknown Chinese words (words not already in the CiLin thesaurus and the Chinese Electronic Dictionary, but in the Sinica Corpus). The focus of the paper differs in two ways from previous research in this particular area.Prior research in Chinese unknown words mostly focused on proper nouns (Lee 1993, Lee, Lee and Chen 1994, Huang, Hong and Chen 1994, Chen and Chen 2000). This paper does not address proper nouns, focusing rather on common nouns, adjectives, and verbs. My analysis of the Sinica Corpus shows that contrary to expectation, most of unknown words in Chinese are common nouns, adjectives, and verbs rather than proper nouns. Other previous research has focused on features related to unknown word contexts (Caraballo 1999; Roark and Charniak 1998). While context is clearly an important feature, this paper focuses on non-contextual features, which may play a key role for unknown words that occur only once and hence have limited context. The feature I focus on, following Ciaramita (2002), is morphological similarity to words whose semantic category is known. My nearest neighbor approach to lexical acquisition computes the distance between an unknown word and examples from the CiLin thesaurus based upon its morphological structure. The classifier improves on baseline semantic categorization performance for adjectives and verbs, but not for nouns.", "keyphrases": ["chinese", "unknown word", "morphological similarity"]} +{"id": "holtzman-etal-2018-learning", "title": "Learning to Write with Cooperative Discriminators", "abstract": "Despite their local fluency, long-form text generated from RNNs is often generic, repetitive, and even self-contradictory. We propose a unified learning framework that collectively addresses all the above issues by composing a committee of discriminators that can guide a base RNN generator towards more globally coherent generations. More concretely, discriminators each specialize in a different principle of communication, such as Grice's maxims, and are collectively combined with the base RNN generator through a composite decoding objective. Human evaluation demonstrates that text generated by our model is preferred over that of baselines by a large margin, significantly enhancing the overall coherence, style, and information of the generations.", "keyphrases": ["discriminator", "fluency", "text generation", "beam search"]} +{"id": "settles-craven-2008-analysis", "title": "An Analysis of Active Learning Strategies for Sequence Labeling Tasks", "abstract": "Active learning is well-suited to many problems in natural language processing, where unlabeled data may be abundant but annotation is slow and expensive. This paper aims to shed light on the best active learning approaches for sequence labeling tasks such as information extraction and document segmentation. We survey previously used query selection strategies for sequence models, and propose several novel algorithms to address their shortcomings. We also conduct a large-scale empirical comparison using multiple corpora, which demonstrates that our proposed methods advance the state of the art.", "keyphrases": ["active learning", "sequence labeling task", "annotation effort"]} +{"id": "van-aken-etal-2018-challenges", "title": "Challenges for Toxic Comment Classification: An In-Depth Error Analysis", "abstract": "Toxic comment classification has become an active research field with many recently proposed approaches. However, while these approaches address some of the task's challenges others still remain unsolved and directions for further research are needed. To this end, we compare different deep learning and shallow approaches on a new, large comment dataset and propose an ensemble that outperforms all individual models. Further, we validate our findings on a second dataset. The results of the ensemble enable us to perform an extensive error analysis, which reveals open challenges for state-of-the-art methods and directions towards pending future research. These challenges include missing paradigmatic context and inconsistent dataset labels.", "keyphrases": ["comment classification", "in-depth error analysis", "explicit abuse"]} +{"id": "li-etal-2019-word-segmentation", "title": "Is Word Segmentation Necessary for Deep Learning of Chinese Representations?", "abstract": "Segmenting a chunk of text into words is usually the first step of processing Chinese text, but its necessity has rarely been explored. In this paper, we ask the fundamental question of whether Chinese word segmentation (CWS) is necessary for deep learning-based Chinese Natural Language Processing. We benchmark neural word-based models which rely on word segmentation against neural char-based models which do not involve word segmentation in four end-to-end NLP benchmark tasks: language modeling, machine translation, sentence matching/paraphrase and text classification. Through direct comparisons between these two types of models, we find that char-based models consistently outperform word-based models. Based on these observations, we conduct comprehensive experiments to study why word-based models underperform char-based models in these deep learning-based NLP tasks. We show that it is because word-based models are more vulnerable to data sparsity and the presence of out-of-vocabulary (OOV) words, and thus more prone to overfitting. We hope this paper could encourage researchers in the community to rethink the necessity of word segmentation in deep learning-based Chinese Natural Language Processing.", "keyphrases": ["word segmentation", "deep learning", "chinese"]} +{"id": "zhang-etal-2017-adversarial", "title": "Adversarial Training for Unsupervised Bilingual Lexicon Induction", "abstract": "Word embeddings are well known to capture linguistic regularities of the language on which they are trained. Researchers also observe that these regularities can transfer across languages. However, previous endeavors to connect separate monolingual word embeddings typically require cross-lingual signals as supervision, either in the form of parallel corpus or seed lexicon. In this work, we show that such cross-lingual connection can actually be established without any form of supervision. We achieve this end by formulating the problem as a natural adversarial game, and investigating techniques that are crucial to successful training. We carry out evaluation on the unsupervised bilingual lexicon induction task. Even though this task appears intrinsically cross-lingual, we are able to demonstrate encouraging performance without any cross-lingual clues.", "keyphrases": ["induction", "word embedding", "cross-lingual signal", "adversarial training", "unsupervised bwe"]} +{"id": "kumar-etal-2018-benchmarking", "title": "Benchmarking Aggression Identification in Social Media", "abstract": "In this paper, we present the report and findings of the Shared Task on Aggression Identification organised as part of the First Workshop on Trolling, Aggression and Cyberbullying (TRAC - 1) at COLING 2018. The task was to develop a classifier that could discriminate between Overtly Aggressive, Covertly Aggressive, and Non-aggressive texts. For this task, the participants were provided with a dataset of 15,000 aggression-annotated Facebook Posts and Comments each in Hindi (in both Roman and Devanagari script) and English for training and validation. For testing, two different sets - one from Facebook and another from a different social media - were provided. A total of 130 teams registered to participate in the task, 30 teams submitted their test runs, and finally 20 teams also sent their system description paper which are included in the TRAC workshop proceedings. The best system obtained a weighted F-score of 0.64 for both Hindi and English on the Facebook test sets, while the best scores on the surprise set were 0.60 and 0.50 for English and Hindi respectively. The results presented in this report depict how challenging the task is. The positive response from the community and the great levels of participation in the first edition of this shared task also highlights the interest in this topic.", "keyphrases": ["aggression identification", "aggressive", "cyberbullying", "social medium", "hate speech"]} +{"id": "taira-etal-2008-japanese", "title": "A Japanese Predicate Argument Structure Analysis using Decision Lists", "abstract": "This paper describes a new automatic method for Japanese predicate argument structure analysis. The method learns relevant features to assign case roles to the argument of the target predicate using the features of the words located closest to the target predicate under various constraints such as dependency types, words, semantic categories, parts of speech, functional words and predicate voices. We constructed decision lists in which these features were sorted by their learned weights. Using our method, we integrated the tasks of semantic role labeling and zero-pronoun identification, and achieved a 17% improvement compared with a baseline method in a sentence level performance analysis.", "keyphrases": ["predicate", "decision list", "zero-pronoun identification"]} +{"id": "lei-etal-2014-low", "title": "Low-Rank Tensors for Scoring Dependency Structures", "abstract": "Accurate scoring of syntactic structures such as head-modifier arcs in dependency parsing typically requires rich, highdimensional feature representations. A small subset of such features is often selected manually. This is problematic when features lack clear linguistic meaning as in embeddings or when the information is blended across features. In this paper, we use tensors to map high-dimensional feature vectors into low dimensional representations. We explicitly maintain the parameters as a low-rank tensor to obtain low dimensional representations of words in their syntactic roles, and to leverage modularity in the tensor for easy training with online algorithms. Our parser consistently outperforms the Turbo and MST parsers across 14 different languages. We also obtain the best published UAS results on 5 languages. 1", "keyphrases": ["tensor", "scoring", "dependency parsing", "rbgparser"]} +{"id": "he-etal-2017-unsupervised", "title": "An Unsupervised Neural Attention Model for Aspect Extraction", "abstract": "Aspect extraction is an important and challenging task in aspect-based sentiment analysis. Existing works tend to apply variants of topic models on this task. While fairly successful, these methods usually do not produce highly coherent aspects. In this paper, we present a novel neural approach with the aim of discovering coherent aspects. The model improves coherence by exploiting the distribution of word co-occurrences through the use of neural word embeddings. Unlike topic models which typically assume independently generated words, word embedding models encourage words that appear in similar contexts to be located close to each other in the embedding space. In addition, we use an attention mechanism to de-emphasize irrelevant words during training, further improving the coherence of aspects. Experimental results on real-life datasets demonstrate that our approach discovers more meaningful and coherent aspects, and substantially outperforms baseline methods on several evaluation tasks.", "keyphrases": ["aspect extraction", "topic model", "neural approach"]} +{"id": "peters-etal-2019-tune", "title": "To Tune or Not to Tune? Adapting Pretrained Representations to Diverse Tasks", "abstract": "While most previous work has focused on different pretraining objectives and architectures for transfer learning, we ask how to best adapt the pretrained model to a given target task. We focus on the two most common forms of adaptation, feature extraction (where the pretrained weights are frozen), and directly fine-tuning the pretrained model. Our empirical results across diverse NLP tasks with two state-of-the-art models show that the relative performance of fine-tuning vs. feature extraction depends on the similarity of the pretraining and target tasks. We explore possible explanations for this finding and provide a set of adaptation guidelines for the NLP practitioner.", "keyphrases": ["weight", "fine-tuning", "downstream task"]} +{"id": "marsi-krahmer-2005-explorations", "title": "Explorations in Sentence Fusion", "abstract": "The invention provides methods and compositions for expressing a recombinant gene in eukaryotic cells, especially fungi, preferably yeast. The invention provides transcriptional regulating elements having a novel nucleotide sequence which are capable of trancriptionally regulating the expression of a cis joined gene, typically in response to the availability of certain nutrients to the host cell. Preferred regulatory elements are responsive to nutrient depletion, particulary glucose, ethanol, phosphate or a nitrogen source. Nucleic acid constructs comprising such regulatory elements operably linked to recombinant genes, cells comprising such regulatory elements, and methods of producing recombinant protein in such cells are also provided. The invention discloses regulatory elements which are induced through the ras gene product. Accordingly, the disclosed expression systems also provide a convenient marker for ras gene function. Finally, the invention also provides methods and compositions for the diagnosis and treatment of fungal infection. In particular, the invention provides gp37-derived peptides encoded by YGP1 and gp37- selective binding agents, such as antibodies.", "keyphrases": ["sentence fusion", "union fusion", "variant"]} +{"id": "bing-etal-2015-abstractive", "title": "Abstractive Multi-Document Summarization via Phrase Selection and Merging", "abstract": "We propose an abstraction-based multi-document summarization framework that can construct new sentences by exploring more fine-grained syntactic units than sentences, namely, noun/verb phrases. Different from existing abstraction-based approaches, our method first constructs a pool of concepts and facts represented by phrases from the input documents. Then new sentences are generated by selecting and merging informative phrases to maximize the salience of phrases and meanwhile satisfy the sentence construction constraints. We employ integer linear optimization for conducting phrase selection and merging simultaneously in order to achieve the global optimal solution for a summary. Experimental results on the benchmark data set TAC 2011 show that our framework outperforms the state-of-the-art models under automated pyramid evaluation metric, and achieves reasonably well results on manual linguistic quality evaluation.", "keyphrases": ["summarization", "phrase selection", "fine-grained syntactic unit", "input document"]} +{"id": "ji-eisenstein-2014-representation", "title": "Representation Learning for Text-level Discourse Parsing", "abstract": "Text-level discourse parsing is notoriously difficult, as distinctions between discourse relations require subtle semantic judgments that are not easily captured using standard features. In this paper, we present a representation learning approach, in which we transform surface features into a latent space that facilitates RST discourse parsing. By combining the machinery of large-margin transition-based structured prediction with representation learning, our method jointly learns to parse discourse while at the same time learning a discourse-driven projection of surface features. The resulting shift-reduce discourse parser obtains substantial improvements over the previous state-of-the-art in predicting relations and nuclearity on the RST Treebank.", "keyphrases": ["rst", "discourse parser", "representation learning", "shift-reduce parser", "dplp"]} +{"id": "wellington-etal-2006-empirical", "title": "Empirical Lower Bounds on the Complexity of Translational Equivalence", "abstract": "This paper describes a study of the patterns of translational equivalence exhibited by a variety of bitexts. The study found that the complexity of these patterns in every bitext was higher than suggested in the literature. These findings shed new light on why \"syntactic\" constraints have not helped to improve statistical translation models, including finite-state phrase-based models, tree-to-string models, and tree-to-tree models. The paper also presents evidence that inversion transduction grammars cannot generate some translational equivalence relations, even in relatively simple real bitexts in syntactically similar languages with rigid word order. Instructions for replicating our experiments are at http://nip.cs.nyu.edu/GenPar/ACL06", "keyphrases": ["complexity", "translational equivalence", "bitext", "itg", "inside-out alignment"]} +{"id": "chen-etal-2018-temporally", "title": "Temporally Grounding Natural Sentence in Video", "abstract": "We introduce an effective and efficient method that grounds (i.e., localizes) natural sentences in long, untrimmed video sequences. Specifically, a novel Temporal GroundNet (TGN) is proposed to temporally capture the evolving fine-grained frame-by-word interactions between video and sentence. TGN sequentially scores a set of temporal candidates ended at each frame based on the exploited frame-by-word interactions, and finally grounds the segment corresponding to the sentence. Unlike traditional methods treating the overlapping segments separately in a sliding window fashion, TGN aggregates the historical information and generates the final grounding result in one single pass. We extensively evaluate our proposed TGN on three public datasets with significant improvements over the state-of-the-arts. We further show the consistent effectiveness and efficiency of TGN through an ablation study and a runtime test.", "keyphrases": ["natural sentence", "video", "frame-by-word interaction"]} +{"id": "hulth-2003-improved", "title": "Improved Automatic Keyword Extraction Given More Linguistic Knowledge", "abstract": "In this paper, experiments on automatic extraction of keywords from abstracts using a supervised machine learning algorithm are discussed. The main point of this paper is that by adding linguistic knowledge to the representation (such as syntactic features), rather than relying only on statistics (such as term frequency and n-grams), a better result is obtained as measured by keywords previously assigned by professional indexers. In more detail, extracting NP-chunks gives a better precision than n-grams, and by adding the PoS tag(s) assigned to the term as a feature, a dramatic improvement of the results is obtained, independent of the term selection approach applied.", "keyphrases": ["keyword", "linguistic knowledge", "binary classification problem", "adjective"]} +{"id": "coppersmith-etal-2015-adhd", "title": "From ADHD to SAD: Analyzing the Language of Mental Health on Twitter through Self-Reported Diagnoses", "abstract": "Many significant challenges exist for the mental health field, but one in particular is a lack of data available to guide research. Language provides a natural lens for studying mental health \u2010 much existing work and therapy have strong linguistic components, so the creation of a large, varied, language-centric dataset could provide significant grist for the field of mental health research. We examine a broad range of mental health conditions in Twitter data by identifying self-reported statements of diagnosis. We systematically explore language differences between ten conditions with respect to the general population, and to each other. Our aim is to provide guidance and a roadmap for where deeper exploration is likely to be fruitful.", "keyphrases": ["mental health", "twitter", "self-reported diagnosis"]} +{"id": "kudo-etal-2004-applying", "title": "Applying Conditional Random Fields to Japanese Morphological Analysis", "abstract": "This paper presents Japanese morphological analysis based on conditional random fields (CRFs). Previous work in CRFs assumed that observation sequence (word) boundaries were fixed. However, word boundaries are not clear in Japanese, and hence a straightforward application of CRFs is not possible. We show how CRFs can be applied to situations where word boundary ambiguity exists. CRFs offer a solution to the long-standing problems in corpus-based or statistical Japanese morphological analysis. First, flexible feature designs for hierarchical tagsets become possible. Second, influences of label and length bias are minimized. We experiment CRFs on the standard testbed corpus used for Japanese morphological analysis, and evaluate our results using the same experimental dataset as the HMMs and MEMMs previously reported in this task. Our results confirm that CRFs not only solve the long-standing problems but also improve the performance over HMMs and MEMMs.", "keyphrases": ["conditional random fields", "japanese morphological analysis", "word boundary", "mecab", "dictionary-based approach"]} +{"id": "voorhees-2008-contradictions", "title": "Contradictions and Justifications: Extensions to the Textual Entailment Task", "abstract": "The third PASCAL Recognizing Textual Entailment Challenge (RTE-3) contained an optional task that extended the main entailment task by requiring a system to make three-way entailment decisions (entails, contradicts, neither) and to justify its response. Contradiction was rare in the RTE-3 test set, occurring in only about 10% of the cases, and systems found accurately detecting it difficult. Subsequent analysis of the results shows a test set must contain many more entailment pairs for the three-way decision task than the traditional two-way task to have equal confidence in system comparisons. Each of six human judges representing eventual end users rated the quality of a justification by assigning \u201cunderstandability\u201d and \u201ccorrectness\u201d scores. Ratings of the same justification across judges differed significantly, signaling the need for a better characterization of the justification task.", "keyphrases": ["justification", "optional task", "contradiction"]} +{"id": "xu-koehn-2017-zipporah", "title": "Zipporah: a Fast and Scalable Data Cleaning System for Noisy Web-Crawled Parallel Corpora", "abstract": "We introduce Zipporah, a fast and scalable data cleaning system. We propose a novel type of bag-of-words translation feature, and train logistic regression models to classify good data and synthetic noisy data in the proposed feature space. The trained model is used to score parallel sentences in the data pool for selection. As shown in experiments, Zipporah selects a high-quality parallel corpus from a large, mixed quality data pool. In particular, for one noisy dataset, Zipporah achieves a 2.1 BLEU score improvement with using 1/5 of the data over using the entire corpus.", "keyphrases": ["regression model", "noisy data", "zipporah", "sentence pair", "adequacy"]} +{"id": "wang-etal-2020-semeval", "title": "SemEval-2020 Task 4: Commonsense Validation and Explanation", "abstract": "In this paper, we present SemEval-2020 Task 4, Commonsense Validation and Explanation (ComVE), which includes three subtasks, aiming to evaluate whether a system can distinguish a natural language statement that makes sense to humans from one that does not, and provide the reasons. Specifically, in our first subtask, the participating systems are required to choose from two natural language statements of similar wording the one that makes sense and the one does not. The second subtask additionally asks a system to select the key reason from three options why a given statement does not make sense. In the third subtask, a participating system needs to generate the reason automatically. 39 teams submitted their valid systems to at least one subtask. For Subtask A and Subtask B, top-performing teams have achieved results closed to human performance. However, for Subtask C, there is still a considerable gap between system and human performance. The dataset used in our task can be found at .", "keyphrases": ["validation", "explanation", "natural language statement", "semeval-2020 task"]} +{"id": "narayan-etal-2018-ranking", "title": "Ranking Sentences for Extractive Summarization with Reinforcement Learning", "abstract": "Single document summarization is the task of producing a shorter version of a document while preserving its principal information content. In this paper we conceptualize extractive summarization as a sentence ranking task and propose a novel training algorithm which globally optimizes the ROUGE evaluation metric through a reinforcement learning objective. We use our algorithm to train a neural summarization model on the CNN and DailyMail datasets and demonstrate experimentally that it outperforms state-of-the-art extractive and abstractive systems when evaluated automatically and by humans.", "keyphrases": ["extractive summarization", "reinforcement learning", "human evaluation", "document encoder"]} +{"id": "li-etal-2011-automatic", "title": "Automatic Evaluation of Chinese Translation Output: Word-Level or Character-Level?", "abstract": "Word is usually adopted as the smallest unit in most tasks of Chinese language processing. However, for automatic evaluation of the quality of Chinese translation output when translating from other languages, either a word-level approach or a character-level approach is possible. So far, there has been no detailed study to compare the correlations of these two approaches with human assessment. In this paper, we compare word-level metrics with character-level metrics on the submitted output of English-to-Chinese translation systems in the IWSLT'08 CT-EC and NIST'08 EC tasks. Our experimental results reveal that character-level metrics correlate with human assessment better than word-level metrics. Our analysis suggests several key reasons behind this finding.", "keyphrases": ["chinese translation output", "word-level metric", "automatic evaluation"]} +{"id": "schick-schutze-2021-exploiting", "title": "Exploiting Cloze-Questions for Few-Shot Text Classification and Natural Language Inference", "abstract": "Some NLP tasks can be solved in a fully unsupervised fashion by providing a pretrained language model with \u201ctask descriptions\u201d in natural language (e.g., Radford et al., 2019). While this approach underperforms its supervised counterpart, we show in this work that the two ideas can be combined: We introduce Pattern-Exploiting Training (PET), a semi-supervised training procedure that reformulates input examples as cloze-style phrases to help language models understand a given task. These phrases are then used to assign soft labels to a large set of unlabeled examples. Finally, standard supervised training is performed on the resulting training set. For several tasks and languages, PET outperforms supervised training and strong semi-supervised approaches in low-resource settings by a large margin.", "keyphrases": ["text classification", "cloze-style phrase", "few-shot learning", "prompt-based finetuning"]} +{"id": "green-etal-2014-human", "title": "Human Effort and Machine Learnability in Computer Aided Translation", "abstract": "Analyses of computer aided translation typically focus on either frontend interfaces and human effort, or backend translation and machine learnability of corrections. However, this distinction is artificial in practice since the frontend and backend must work in concert. We present the first holistic, quantitative evaluation of these issues by contrasting two assistive modes: postediting and interactive machine translation (MT). We describe a new translator interface, extensive modifications to a phrasebased MT system, and a novel objective function for re-tuning to human corrections. Evaluation with professional bilingual translators shows that post-edit is faster than interactive at the cost of translation quality for French-English and EnglishGerman. However, re-tuning the MT system to interactive output leads to larger, statistically significant reductions in HTER versus re-tuning to post-edit. Analysis shows that tuning directly to HTER results in fine-grained corrections to subsequent machine output.", "keyphrases": ["machine learnability", "translator", "human effort"]} +{"id": "chiang-etal-2006-parsing", "title": "Parsing Arabic Dialects", "abstract": "The Arabic language is a collection of spoken dialects with important phonological, morphological, lexical, and syntactic differences, along with a standard written language, Modern Standard Arabic (MSA). Since the spoken dialects are not officially written, it is very costly to obtain adequate corpora to use for training dialect NLP tools such as parsers. In this paper, we address the problem of parsing transcribed spoken Levantine Arabic (LA).We do not assume the existence of any annotated LA corpus (except for development and testing), nor of a parallel corpus LAMSA. Instead, we use explicit knowledge about the relation between LA and MSA.", "keyphrases": ["arabic dialect", "levantine arabic", "msa treebank", "danlp"]} +{"id": "colmenares-etal-2015-heads", "title": "HEADS: Headline Generation as Sequence Prediction Using an Abstract Feature-Rich Space", "abstract": "Automatic headline generation is a sub-task of document summarization with many reported applications. In this study we present a sequence-prediction technique for learning how editors title their news stories. The introduced technique models the problem as a discrete optimization task in a feature-rich space. In this space the global optimum can be found in polynomial time by means of dynamic programming. We train and test our model on an extensive corpus of financial news, and compare it against a number of baselines by using standard metrics from the document summarization domain, as well as some new ones proposed in this work. We also assess the readability and informativeness of the generated titles through human evaluation. The obtained results are very appealing and substantiate the soundness of the approach.", "keyphrases": ["headline generation", "feature-rich space", "optimization task"]} +{"id": "lee-2004-morphological", "title": "Morphological Analysis for Statistical Machine Translation", "abstract": "We present a novel morphological analysis technique which induces a morphological and syntactic symmetry between two languages with highly asymmetrical morphological structures to improve statistical machine translation qualities. The technique pre-supposes fine-grained segmentation of a word in the morphologically rich language into the sequence of prefix(es)-stem-suffix(es) and part-of-speech tagging of the parallel corpus. The algorithm identifies morphemes to be merged or deleted in the morphologically rich language to induce the desired morphological and syntactic symmetry. The technique improves Arabic-to-English translation qualities significantly when applied to IBM Model 1 and Phrase Translation Models trained on the training corpus size ranging from 3,500 to 3.3 million sentence pairs.", "keyphrases": ["machine translation", "symmetry", "morphological analysis", "arabic", "pos"]} +{"id": "khayrallah-etal-2018-regularized", "title": "Regularized Training Objective for Continued Training for Domain Adaptation in Neural Machine Translation", "abstract": "Supervised domain adaptation\u2014where a large generic corpus and a smaller in-domain corpus are both available for training\u2014is a challenge for neural machine translation (NMT). Standard practice is to train a generic model and use it to initialize a second model, then continue training the second model on in-domain data to produce an in-domain model. We add an auxiliary term to the training objective during continued training that minimizes the cross entropy between the in-domain model's output word distribution and that of the out-of-domain model to prevent the model's output from differing too much from the original out-of-domain model. We perform experiments on EMEA (descriptions of medicines) and TED (rehearsed presentations), initialized from a general domain (WMT) model. Our method shows improvements over standard continued training by up to 1.5 BLEU.", "keyphrases": ["continued training", "domain adaptation", "neural machine translation", "model parameter"]} +{"id": "vaswani-etal-2018-tensor2tensor", "title": "Tensor2Tensor for Neural Machine Translation", "abstract": "Tensor2Tensor is a library for deep learning models that is well-suited for neural machine translation and includes the reference implementation of the state-of-the-art Transformer model.", "keyphrases": ["neural machine translation", "implementation", "tensor2tensor"]} +{"id": "nairn-etal-2006-computing", "title": "Computing relative polarity for textual inference", "abstract": "Semantic relations between main and complement sentences are of great signi(cid:12)-cance in any system of automatic data processing that depends on natural language. In this paper we present a strategy for detecting author commitment to the truth/falsity of complement clauses based on their syntactic type and on the meaning of their embedding predicate. We show that the implications of a predicate at an arbitrary depth of embedding about its complement clause depend on a globally determined notion of relative polarity. We, moreover, observe that di(cid:11)erent classes of complement-taking verbs have a di(cid:11)erent e(cid:11)ect on the polarity of their complement clauses and that this e(cid:11)ect depends recursively on their own embedding. A polarity propagation algorithm is presented as part of a general strategy of canonicalization of linguistically-based representations, with a view to minimizing the demands on the entailment and contradiction detection process.", "keyphrases": ["polarity", "author commitment", "notion"]} +{"id": "xiao-guo-2014-distributed", "title": "Distributed Word Representation Learning for Cross-Lingual Dependency Parsing", "abstract": "This paper proposes to learn languageindependent word representations to address cross-lingual dependency parsing, which aims to predict the dependency parsing trees for sentences in the target language by training a dependency parser with labeled sentences from a source language. We first combine all sentences from both languages to induce real-valued distributed representation of words under a deep neural network architecture, which is expected to capture semantic similarities of words not only within the same language but also across different languages. We then use the induced interlingual word representation as augmenting features to train a delexicalized dependency parser on labeled sentences in the source language and apply it to the target sentences. To investigate the effectiveness of the proposed technique, extensive experiments are conducted on cross-lingual dependency parsing tasks with nine different languages. The experimental results demonstrate the superior cross-lingual generalizability of the word representation induced by the proposed approach, comparing to alternative comparison methods.", "keyphrases": ["dependency parsing", "pseudo-cross-lingual method", "word pair"]} +{"id": "lee-etal-2011-discriminative", "title": "A Discriminative Model for Joint Morphological Disambiguation and Dependency Parsing", "abstract": "Most previous studies of morphological disambiguation and dependency parsing have been pursued independently. Morphological taggers operate on n-grams and do not take into account syntactic relations; parsers use the \"pipeline\" approach, assuming that morphological information has been separately obtained. \n \nHowever, in morphologically-rich languages, there is often considerable interaction between morphology and syntax, such that neither can be disambiguated without the other. In this paper, we propose a discriminative model that jointly infers morphological properties and syntactic structures. In evaluations on various highly-inflected languages, this joint model outperforms both a baseline tagger in morphological disambiguation, and a pipeline parser in head selection.", "keyphrases": ["dependency parsing", "morphological property", "highly-inflected language", "joint tagging"]} +{"id": "liu-seneff-2009-review", "title": "Review Sentiment Scoring via a Parse-and-Paraphrase Paradigm", "abstract": "This paper presents a parse-and-paraphrase paradigm to assess the degrees of sentiment for product reviews. Sentiment identification has been well studied; however, most previous work provides binary polarities only (positive and negative), and the polarity of sentiment is simply reversed when a negation is detected. The extraction of lexical features such as unigram/bigram also complicates the sentiment classification task, as linguistic structure such as implicit long-distance dependency is often disregarded. In this paper, we propose an approach to extracting adverb-adjective-noun phrases based on clause structure obtained by parsing sentences into a hierarchical representation. We also propose a robust general solution for modeling the contribution of adverbials and negation to the score for degree of sentiment. In an application involving extracting aspect-based pros and cons from restaurant reviews, we obtained a 45% relative improvement in recall through the use of parsing methods, while also improving precision.", "keyphrases": ["parse-and-paraphrase paradigm", "product review", "polarity"]} +{"id": "qin-etal-2019-stack", "title": "A Stack-Propagation Framework with Token-Level Intent Detection for Spoken Language Understanding", "abstract": "Intent detection and slot filling are two main tasks for building a spoken language understanding (SLU) system. The two tasks are closely tied and the slots often highly depend on the intent. In this paper, we propose a novel framework for SLU to better incorporate the intent information, which further guiding the slot filling. In our framework, we adopt a joint model with Stack-Propagation which can directly use the intent information as input for slot filling, thus to capture the intent semantic knowledge. In addition, to further alleviate the error propagation, we perform the token-level intent detection for the Stack-Propagation framework. Experiments on two publicly datasets show that our model achieves the state-of-the-art performance and outperforms other previous methods by a large margin. Finally, we use the Bidirectional Encoder Representation from Transformer (BERT) model in our framework, which further boost our performance in SLU task.", "keyphrases": ["stack-propagation framework", "intent detection", "spoken language understanding", "slu task"]} +{"id": "petroni-etal-2021-kilt", "title": "KILT: a Benchmark for Knowledge Intensive Language Tasks", "abstract": "Challenging problems such as open-domain question answering, fact checking, slot filling and entity linking require access to large, external knowledge sources. While some models do well on individual tasks, developing general models is difficult as each task might require computationally expensive indexing of custom knowledge sources, in addition to dedicated infrastructure. To catalyze research on models that condition on specific information in large textual resources, we present a benchmark for knowledge-intensive language tasks (KILT). All tasks in KILT are grounded in the same snapshot of Wikipedia, reducing engineering turnaround through the re-use of components, as well as accelerating research into task-agnostic memory architectures. We test both task-specific and general baselines, evaluating downstream performance in addition to the ability of the models to provide provenance. We find that a shared dense vector index coupled with a seq2seq model is a strong baseline, outperforming more tailor-made approaches for fact checking, open-domain question answering and dialogue, and yielding competitive results on entity linking and slot filling, by generating disambiguated text. KILT data and code are available at .", "keyphrases": ["question answering", "knowledge source", "kilt", "knowledge-intensive task", "genre"]} +{"id": "yu-chen-2012-detecting", "title": "Detecting Word Ordering Errors in Chinese Sentences for Learning Chinese as a Foreign Language", "abstract": "Automatic detection of sentence errors is an important NLP task and is valuable to assist foreign language learners. In this paper, we investigate the problem of word ordering errors in Chinese sentences and propose classifiers to detect this type of errors. Word n-gram features in Google Chinese Web 5-gram corpus and ClueWeb09 corpus, and POS features in the Chinese POStagged ClueWeb09 corpus are adopted in the classifiers. The experimental results show that integrating syntactic features, web corpus features and perturbation features are useful for word ordering error detection, and the proposed classifier achieves 71.64% accuracy in the experimental datasets.", "keyphrases": ["chinese", "syntactic feature", "word-ordering error"]} +{"id": "beinborn-etal-2014-predicting", "title": "Predicting the Difficulty of Language Proficiency Tests", "abstract": "Language proficiency tests are used to evaluate and compare the progress of language learners. We present an approach for automatic difficulty prediction of C-tests that performs on par with human experts. On the basis of detailed analysis of newly collected data, we develop a model for C-test difficulty introducing four dimensions: solution difficulty, candidate ambiguity, inter-gap dependency, and paragraph difficulty. We show that cues from all four dimensions contribute to C-test difficulty.", "keyphrases": ["difficulty", "language proficiency test", "c-tests"]} +{"id": "muis-lu-2017-labeling", "title": "Labeling Gaps Between Words: Recognizing Overlapping Mentions with Mention Separators", "abstract": "In this paper, we propose a new model that is capable of recognizing overlapping mentions. We introduce a novel notion of mention separators that can be effectively used to capture how mentions overlap with one another. On top of a novel multigraph representation that we introduce, we show that efficient and exact inference can still be performed. We present some theoretical analysis on the differences between our model and a recently proposed model for recognizing overlapping mentions, and discuss the possible implications of the differences. Through extensive empirical analysis on standard datasets, we demonstrate the effectiveness of our approach.", "keyphrases": ["gap", "mention separator", "multigraph representation", "ambiguity issue", "spurious structure issue"]} +{"id": "sun-etal-2018-stance", "title": "Stance Detection with Hierarchical Attention Network", "abstract": "Stance detection aims to assign a stance label (for or against) to a post toward a specific target. Recently, there is a growing interest in using neural models to detect stance of documents. Most of these works model the sequence of words to learn document representation. However, much linguistic information, such as polarity and arguments of the document, is correlated with the stance of the document, and can inspire us to explore the stance. Hence, we present a neural model to fully employ various linguistic information to construct the document representation. In addition, since the influences of different linguistic information are different, we propose a hierarchical attention network to weigh the importance of various linguistic information, and learn the mutual attention between the document and the linguistic information. The experimental results on two datasets demonstrate the effectiveness of the proposed hierarchical attention neural model.", "keyphrases": ["hierarchical attention network", "document representation", "linguistic information", "stance detection"]} +{"id": "baly-etal-2018-predicting", "title": "Predicting Factuality of Reporting and Bias of News Media Sources", "abstract": "We present a study on predicting the factuality of reporting and bias of news media. While previous work has focused on studying the veracity of claims or documents, here we are interested in characterizing entire news media. This is an under-studied, but arguably important research problem, both in its own right and as a prior for fact-checking systems. We experiment with a large list of news websites and with a rich set of features derived from (i) a sample of articles from the target news media, (ii) its Wikipedia page, (iii) its Twitter account, (iv) the structure of its URL, and (v) information about the Web traffic it attracts. The experimental results show sizable performance gains over the baseline, and reveal the importance of each feature type.", "keyphrases": ["factuality", "wikipedia page", "news medium", "political bias", "article level"]} +{"id": "lin-etal-2008-mining", "title": "Mining Parenthetical Translations from the Web by Word Alignment", "abstract": "Documents in languages such as Chinese, Japanese and Korean sometimes annotate terms with their translations in English inside a pair of parentheses. We present a method to extract such translations from a large collection of web documents by building a partially parallel corpus and use a word alignment algorithm to identify the terms being translated. The method is able to generalize across the translations for different terms and can reliably extract translations that occurred only once in the entire web. Our experiment on Chinese web pages produced more than 26 million pairs of translations, which is over two orders of magnitude more than previous results. We show that the addition of the extracted translation pairs as training data provides significant increase in the BLEU score for a statistical machine translation system.", "keyphrases": ["web", "word alignment", "different method"]} +{"id": "quirk-corston-oliver-2006-impact", "title": "The impact of parse quality on syntactically-informed statistical machine translation", "abstract": "We investigate the impact of parse quality on a syntactically-informed statistical machine translation system applied to technical text. We vary parse quality by varying the amount of data used to train the parser. As the amount of data increases, parse quality improves, leading to improvements in machine translation output and results that significantly outperform a state-of-the-art phrasal baseline.", "keyphrases": ["parse quality", "syntax-based model", "translation mistake"]} +{"id": "kaji-kitsuregawa-2007-building", "title": "Building Lexicon for Sentiment Analysis from Massive Collection of HTML Documents", "abstract": "Recognizing polarity requires a list of polar words and phrases. For the purpose of building such lexicon automatically, a lot of studies have investigated (semi-) unsupervised method of learning polarity of words and phrases. In this paper, we explore to use structural clues that can extract polar sentences from Japanese HTML documents, and build lexicon from the extracted polar sentences. The key idea is to develop the structural clues so that it achieves extremely high precision at the cost of recall. In order to compensate for the low recall, we used massive collection of HTML documents. Thus, we could prepare enough polar sentence corpus.", "keyphrases": ["sentiment analysis", "massive collection", "html document"]} +{"id": "zhang-etal-2020-spelling", "title": "Spelling Error Correction with Soft-Masked BERT", "abstract": "Spelling error correction is an important yet challenging task because a satisfactory solution of it essentially needs human-level language understanding ability. Without loss of generality we consider Chinese spelling error correction (CSC) in this paper. A state-of-the-art method for the task selects a character from a list of candidates for correction (including non-correction) at each position of the sentence on the basis of BERT, the language representation model. The accuracy of the method can be sub-optimal, however, because BERT does not have sufficient capability to detect whether there is an error at each position, apparently due to the way of pre-training it using mask language modeling. In this work, we propose a novel neural architecture to address the aforementioned issue, which consists of a network for error detection and a network for error correction based on BERT, with the former being connected to the latter with what we call soft-masking technique. Our method of using `Soft-Masked BERT' is general, and it may be employed in other language detection-correction problems. Experimental results on two datasets, including one large dataset which we create and plan to release, demonstrate that the performance of our proposed method is significantly better than the baselines including the one solely based on BERT.", "keyphrases": ["soft-masked bert", "detection", "spelling error correction", "correction network"]} +{"id": "silberer-lapata-2014-learning", "title": "Learning Grounded Meaning Representations with Autoencoders", "abstract": "In this paper we address the problem of grounding distributional representations of lexical meaning. We introduce a new model which uses stacked autoencoders to learn higher-level embeddings from textual and visual input. The two modalities are encoded as vectors of attributes and are obtained automatically from text and images, respectively. We evaluate our model on its ability to simulate similarity judgments and concept categorization. On both tasks, our approach outperforms baselines and related models.", "keyphrases": ["autoencoder", "attribute", "visual representation"]} +{"id": "haffari-etal-2009-active", "title": "Active Learning for Statistical Phrase-based Machine Translation", "abstract": "Statistical machine translation (SMT) models need large bilingual corpora for training, which are unavailable for some language pairs. This paper provides the first serious experimental study of active learning for SMT. We use active learning to improve the quality of a phrase-based SMT system, and show significant improvements in translation compared to a random sentence selection baseline, when test and training data are taken from the same or different domains. Experimental results are shown in a simulated setting using three language pairs, and in a realistic situation for Bangla-English, a language pair with limited translation resources.", "keyphrases": ["machine translation", "active learning", "seed corpus", "query selection strategy"]} +{"id": "huang-etal-2018-zero", "title": "Zero-Shot Transfer Learning for Event Extraction", "abstract": "Most previous supervised event extraction methods have relied on features derived from manual annotations, and thus cannot be applied to new event types without extra annotation effort. We take a fresh look at event extraction and model it as a generic grounding problem: mapping each event mention to a specific type in a target event ontology. We design a transferable architecture of structural and compositional neural networks to jointly represent and map event mentions and types into a shared semantic space. Based on this new framework, we can select, for each event mention, the event type which is semantically closest in this space as its type. By leveraging manual annotations available for a small set of existing event types, our framework can be applied to new unseen event types without additional manual annotations. When tested on 23 unseen event types, our zero-shot framework, without manual annotations, achieved performance comparable to a supervised model trained from 3,000 sentences annotated with 500 event mentions.", "keyphrases": ["event extraction", "supervised model", "zero-shot learning"]} +{"id": "peng-etal-2019-palm", "title": "PaLM: A Hybrid Parser and Language Model", "abstract": "We present PaLM, a hybrid parser and neural language model. Building on an RNN language model, PaLM adds an attention layer over text spans in the left context. An unsupervised constituency parser can be derived from its attention weights, using a greedy decoding algorithm. We evaluate PaLM on language modeling, and empirically show that it outperforms strong baselines. If syntactic annotations are available, the attention component can be trained in a supervised manner, providing syntactically-informed representations of the context, and further improving language modeling performance.", "keyphrases": ["language model", "attention component", "palm"]} +{"id": "ramanathan-etal-2008-simple", "title": "Simple Syntactic and Morphological Processing Can Help English-Hindi Statistical Machine Translation", "abstract": "In this paper, we report our work on incorporating syntactic and morphological information for English to Hindi statistical machine translation. Two simple and computationally inexpensive ideas have proven to be surprisingly effective: (i) reordering the English source sentence as per Hindi syntax, and (ii) using the suffixes of Hindi words. The former is done by applying simple transformation rules on the English parse tree. The latter, by using a simple suffix separation program. With only a small amount of bilingual training data and limited tools for Hindi, we achieve reasonable performance and substantial improvements over the baseline phrase-based system. Our approach eschews the use of parsing or other sophisticated linguistic tools for the target language (Hindi) making it a useful framework for statistical machine translation from English to Indian languages in general, since such tools are not widely available for Indian languages currently.", "keyphrases": ["statistical machine translation", "hindi", "indian language"]} +{"id": "boratko-etal-2018-systematic", "title": "A Systematic Classification of Knowledge, Reasoning, and Context within the ARC Dataset", "abstract": "The recent work of Clark et al. (2018) introduces the AI2 Reasoning Challenge (ARC) and the associated ARC dataset that partitions open domain, complex science questions into easy and challenge sets. That paper includes an analysis of 100 questions with respect to the types of knowledge and reasoning required to answer them; however, it does not include clear definitions of these types, nor does it offer information about the quality of the labels. We propose a comprehensive set of definitions of knowledge and reasoning types necessary for answering the questions in the ARC dataset. Using ten annotators and a sophisticated annotation interface, we analyze the distribution of labels across the challenge set and statistics related to them. Additionally, we demonstrate that although naive information retrieval methods return sentences that are irrelevant to answering the query, sufficient supporting text is often present in the (ARC) corpus. Evaluating with human-selected relevant sentences improves the performance of a neural machine comprehension model by 42 points.", "keyphrases": ["reasoning", "arc dataset", "definition"]} +{"id": "voita-etal-2019-analyzing", "title": "Analyzing Multi-Head Self-Attention: Specialized Heads Do the Heavy Lifting, the Rest Can Be Pruned", "abstract": "Multi-head self-attention is a key component of the Transformer, a state-of-the-art architecture for neural machine translation. In this work we evaluate the contribution made by individual attention heads to the overall performance of the model and analyze the roles played by them in the encoder. We find that the most important and confident heads play consistent and often linguistically-interpretable roles. When pruning heads using a method based on stochastic gates and a differentiable relaxation of the L0 penalty, we observe that specialized heads are last to be pruned. Our novel pruning method removes the vast majority of heads without seriously affecting performance. For example, on the English-Russian WMT dataset, pruning 38 out of 48 encoder heads results in a drop of only 0.15 BLEU.", "keyphrases": ["self-attention", "head", "heavy lifting", "encoder-decoder attention", "downstream task"]} +{"id": "li-etal-2010-topic", "title": "Topic Models for Word Sense Disambiguation and Token-Based Idiom Detection", "abstract": "This paper presents a probabilistic model for sense disambiguation which chooses the best sense based on the conditional probability of sense paraphrases given a context. We use a topic model to decompose this conditional probability into two conditional probabilities with latent variables. We propose three different instantiations of the model for solving sense disambiguation problems with different degrees of resource availability. The proposed models are tested on three different tasks: coarse-grained word sense disambiguation, fine-grained word sense disambiguation, and detection of literal vs. non-literal usages of potentially idiomatic expressions. In all three cases, we outperform state-of-the-art systems either quantitatively or statistically significantly.", "keyphrases": ["word sense disambiguation", "probability", "topic model"]} +{"id": "galley-etal-2003-discourse", "title": "Discourse Segmentation of Multi-Party Conversation", "abstract": "We present a domain-independent topic segmentation algorithm for multi-party speech. Our feature-based algorithm combines knowledge about content using a text-based algorithm as a feature and about form using linguistic and acoustic cues about topic shifts extracted from speech. This segmentation algorithm uses automatically induced decision rules to combine the different features. The embedded text-based algorithm builds on lexical cohesion and has performance comparable to state-of-the-art algorithms based on lexical information. A significant error reduction is obtained by combining the two knowledge sources.", "keyphrases": ["conversation", "topic segmentation", "lexical cohesion", "annotator"]} +{"id": "niu-etal-2017-study", "title": "A Study of Style in Machine Translation: Controlling the Formality of Machine Translation Output", "abstract": "Stylistic variations of language, such as formality, carry speakers' intention beyond literal meaning and should be conveyed adequately in translation. We propose to use lexical formality models to control the formality level of machine translation output. We demonstrate the effectiveness of our approach in empirical evaluations, as measured by automatic metrics and human assessments.", "keyphrases": ["style", "formality", "linguistic nuance", "fluency"]} +{"id": "riesa-yarowsky-2006-minimally", "title": "Minimally Supervised Morphological Segmentation with Applications to Machine Translation", "abstract": "Inflected languages in a low-resource setting present a data sparsity problem for statistical machine translation. In this paper, we present a minimally supervised algorithm for morpheme segmentation on Arabic dialects which reduces unknown words at translation time by over 50%, total vocabulary size by over 40%, and yields a significant increase in BLEU score over a previous state-of-the-art phrase-based statistical MT system.", "keyphrases": ["machine translation", "da-to-english smt", "oov"]} +{"id": "gan-etal-2019-multi", "title": "Multi-step Reasoning via Recurrent Dual Attention for Visual Dialog", "abstract": "This paper presents a new model for visual dialog, Recurrent Dual Attention Network (ReDAN), using multi-step reasoning to answer a series of questions about an image. In each question-answering turn of a dialog, ReDAN infers the answer progressively through multiple reasoning steps. In each step of the reasoning process, the semantic representation of the question is updated based on the image and the previous dialog history, and the recurrently-refined representation is used for further reasoning in the subsequent step. On the VisDial v1.0 dataset, the proposed ReDAN model achieves a new state-of-the-art of 64.47% NDCG score. Visualization on the reasoning process further demonstrates that ReDAN can locate context-relevant visual and textual clues via iterative refinement, which can lead to the correct answer step-by-step.", "keyphrases": ["visual dialog", "attention network", "multi-step reasoning"]} +{"id": "jiao-etal-2019-higru", "title": "HiGRU: Hierarchical Gated Recurrent Units for Utterance-Level Emotion Recognition", "abstract": "In this paper, we address three challenges in utterance-level emotion recognition in dialogue systems: (1) the same word can deliver different emotions in different contexts; (2) some emotions are rarely seen in general dialogues; (3) long-range contextual information is hard to be effectively captured. We therefore propose a hierarchical Gated Recurrent Unit (HiGRU) framework with a lower-level GRU to model the word-level inputs and an upper-level GRU to capture the contexts of utterance-level embeddings. Moreover, we promote the framework to two variants, Hi-GRU with individual features fusion (HiGRU-f) and HiGRU with self-attention and features fusion (HiGRU-sf), so that the word/utterance-level individual inputs and the long-range contextual information can be sufficiently utilized. Experiments on three dialogue emotion datasets, IEMOCAP, Friends, and EmotionPush demonstrate that our proposed Hi-GRU models attain at least 8.7%, 7.5%, 6.0% improvement over the state-of-the-art methods on each dataset, respectively. Particularly, by utilizing only the textual feature in IEMOCAP, our HiGRU models gain at least 3.8% improvement over the state-of-the-art conversational memory network (CMN) with the trimodal features of text, video, and audio.", "keyphrases": ["gated recurrent unit", "emotion recognition", "higru"]} +{"id": "sanderson-guenter-2006-short", "title": "Short Text Authorship Attribution via Sequence Kernels, Markov Chains and Author Unmasking: An Investigation", "abstract": "We present an investigation of recently proposed character and word sequence kernels for the task of authorship attribution based on relatively short texts. Performance is compared with two corresponding probabilistic approaches based on Markov chains. Several configurations of the sequence kernels are studied on a relatively large dataset (50 authors), where each author covered several topics. Utilising Moffat smoothing, the two probabilistic approaches obtain similar performance, which in turn is comparable to that of character sequence kernels and is better than that of word sequence kernels. The results further suggest that when using a realistic setup that takes into account the case of texts which are not written by any hypothesised authors, the amount of training material has more influence on discrimination performance than the amount of test material. Moreover, we show that the recently proposed author unmasking approach is less useful when dealing with short texts.", "keyphrases": ["authorship identification", "string kernel", "text analysis task"]} +{"id": "aly-etal-2021-fact", "title": "The Fact Extraction and VERification Over Unstructured and Structured information (FEVEROUS) Shared Task", "abstract": "The Fact Extraction and VERification Over Unstructured and Structured information (FEVEROUS) shared task, asks participating systems to determine whether human-authored claims are Supported or Refuted based on evidence retrieved from Wikipedia (or NotEnoughInfo if the claim cannot be verified). Compared to the FEVER 2018 shared task, the main challenge is the addition of structured data (tables and lists) as a source of evidence. The claims in the FEVEROUS dataset can be verified using only structured evidence, only unstructured evidence, or a mixture of both. Submissions are evaluated using the FEVEROUS score that combines label accuracy and evidence retrieval. Unlike FEVER 2018, FEVEROUS requires partial evidence to be returned for NotEnoughInfo claims, and the claims are longer and thus more complex. The shared task received 13 entries, six of which were able to beat the baseline system. The winning team was \u201cBust a move!\u201d, achieving a FEVEROUS score of 27% (+9% compared to the baseline). In this paper we describe the shared task, present the full results and highlight commonalities and innovations among the participating systems.", "keyphrases": ["fact extraction", "structured information", "wikipedia"]} +{"id": "li-etal-2020-sentence", "title": "On the Sentence Embeddings from Pre-trained Language Models", "abstract": "Pre-trained contextual representations like BERT have achieved great success in natural language processing. However, the sentence embeddings from the pre-trained language models without fine-tuning have been found to poorly capture semantic meaning of sentences. In this paper, we argue that the semantic information in the BERT embeddings is not fully exploited. We first reveal the theoretical connection between the masked language model pre-training objective and the semantic similarity task theoretically, and then analyze the BERT sentence embeddings empirically. We find that BERT always induces a non-smooth anisotropic semantic space of sentences, which harms its performance of semantic similarity. To address this issue, we propose to transform the anisotropic sentence embedding distribution to a smooth and isotropic Gaussian distribution through normalizing flows that are learned with an unsupervised objective. Experimental results show that our proposed BERT-flow method obtains significant performance gains over the state-of-the-art sentence embeddings on a variety of semantic textual similarity tasks. The code is available at .", "keyphrases": ["sentence embedding", "pre-trained language model", "anisotropic", "semantic space"]} +{"id": "bouamor-etal-2018-madar", "title": "The MADAR Arabic Dialect Corpus and Lexicon", "abstract": "In this paper, we present two resources that were created as part of the Multi Arabic Dialect Applications and Resources (MADAR) project. The \ufb01rst is a large parallel corpus of 25 Arabic city dialects in the travel domain. The second is a lexicon of 1,045 concepts with an average of 45 words from 25 cities per concept. These resources are the \ufb01rst of their kind in terms of the breadth of their coverage and the \ufb01ne location granularity. The focus on cities, as opposed to regions in studying Arabic dialects, opens new avenues to many areas of research from dialectology to dialect identi\ufb01cation and machine translation.", "keyphrases": ["dialect", "arabic city dialect", "madar corpus"]} +{"id": "jain-etal-2019-entity", "title": "Entity Projection via Machine Translation for Cross-Lingual NER", "abstract": "Although over 100 languages are supported by strong off-the-shelf machine translation systems, only a subset of them possess large annotated corpora for named entity recognition. Motivated by this fact, we leverage machine translation to improve annotation-projection approaches to cross-lingual named entity recognition. We propose a system that improves over prior entity-projection methods by: (a) leveraging machine translation systems twice: first for translating sentences and subsequently for translating entities; (b) matching entities based on orthographic and phonetic similarity; and (c) identifying matches based on distributional statistics derived from the dataset. Our approach improves upon current state-of-the-art methods for cross-lingual named entity recognition on 5 diverse languages by an average of 4.1 points. Further, our method achieves state-of-the-art F_1 scores for Armenian, outperforming even a monolingual model trained on Armenian source data.", "keyphrases": ["machine translation", "cross-lingual ner", "entity recognition"]} +{"id": "suzuki-gao-2012-unified", "title": "A Unified Approach to Transliteration-based Text Input with Online Spelling Correction", "abstract": "This paper presents an integrated, end-to-end approach to online spelling correction for text input. Online spelling correction refers to the spelling correction as you type, as opposed to post-editing. The online scenario is particularly important for languages that routinely use transliteration-based text input methods, such as Chinese and Japanese, because the desired target characters cannot be input at all unless they are in the list of candidates provided by an input method, and spelling errors prevent them from appearing in the list. For example, a user might type suesheng by mistake to mean xuesheng 'student' in Chinese; existing input methods fail to convert this misspelled input to the desired target Chinese characters. In this paper, we propose a unified approach to the problem of spelling correction and transliteration-based character conversion using an approach inspired by the phrase-based statistical machine translation framework. At the phrase (substring) level, k most probable pinyin (Romanized Chinese) corrections are generated using a monotone decoder; at the sentence level, input pinyin strings are directly transliterated into target Chinese characters by a decoder using a log-linear model that refer to the features of both levels. A new method of automatically deriving parallel training data from user keystroke logs is also presented. Experiments on Chinese pinyin conversion show that our integrated method reduces the character error rate by 20% (from 8.9% to 7.12%) over the previous state-of-the art based on a noisy channel model.", "keyphrases": ["text input", "online spelling correction", "machine translation framework"]} +{"id": "jakob-gurevych-2010-extracting", "title": "Extracting Opinion Targets in a Single and Cross-Domain Setting with Conditional Random Fields", "abstract": "In this paper, we focus on the opinion target extraction as part of the opinion mining task. We model the problem as an information extraction task, which we address based on Conditional Random Fields (CRF). As a baseline we employ the supervised algorithm by Zhuang et al. (2006), which represents the state-of-the-art on the employed data. We evaluate the algorithms comprehensively on datasets from four different domains annotated with individual opinion target instances on a sentence level. Furthermore, we investigate the performance of our CRF-based approach and the baseline in a single- and cross-domain opinion target extraction setting. Our CRF-based approach improves the performance by 0.077, 0.126, 0.071 and 0.178 regarding F-Measure in the single-domain extraction in the four domains. In the cross-domain setting our approach improves the performance by 0.409, 0.242, 0.294 and 0.343 regarding F-Measure over the baseline.", "keyphrases": ["opinion target", "conditional random fields", "target extraction", "crf", "sequence labeling problem"]} +{"id": "ganjigunte-ashok-etal-2013-success", "title": "Success with Style: Using Writing Style to Predict the Success of Novels", "abstract": "Predicting the success of literary works is a curious question among publishers and aspiring writers alike. We examine the quantitative connection, if any, between writing style and successful literature. Based on novels over several different genres, we probe the predictive power of statistical stylometry in discriminating successful literary works, and identify characteristic stylistic elements that are more prominent in successful writings. Our study reports for the first time that statistical stylometry can be surprisingly effective in discriminating highly successful literature from less successful counterpart, achieving accuracy up to 84%. Closer analyses lead to several new insights into characteristics of the writing style in successful literature, including findings that are contrary to the conventional wisdom with respect to good writing style and readability.", "keyphrases": ["writing style", "novel", "successful literature"]} +{"id": "durrett-klein-2013-easy", "title": "Easy Victories and Uphill Battles in Coreference Resolution", "abstract": "Classical coreference systems encode various syntactic, discourse, and semantic phenomena explicitly, using heterogenous features computed from hand-crafted heuristics. In contrast, we present a state-of-the-art coreference system that captures such phenomena implicitly, with a small number of homogeneous feature templates examining shallow properties of mentions. Surprisingly, our features are actually more effective than the corresponding hand-engineered ones at modeling these key linguistic phenomena, allowing us to win \u201ceasy victories\u201d without crafted heuristics. These features are successful on syntax and discourse; however, they do not model semantic compatibility well, nor do we see gains from experiments with shallow semantic features from the literature, suggesting that this approach to semantics is an \u201cuphill battle.\u201d Nonetheless, our final system 1 outperforms the Stanford system (Lee et al. (2011), the winner of the CoNLL 2011 shared task) by 3.5% absolute on the CoNLL metric and outperforms the IMS system (Bj\u00a8 orkelund and Farkas (2012), the best publicly available English coreference system) by 1.9% absolute.", "keyphrases": ["uphill battle", "coreference resolution", "mention", "semantic compatibility"]} +{"id": "rijhwani-etal-2017-estimating", "title": "Estimating Code-Switching on Twitter with a Novel Generalized Word-Level Language Detection Technique", "abstract": "Word-level language detection is necessary for analyzing code-switched text, where multiple languages could be mixed within a sentence. Existing models are restricted to code-switching between two specific languages and fail in real-world scenarios as text input rarely has a priori information on the languages used. We present a novel unsupervised word-level language detection technique for code-switched text for an arbitrarily large number of languages, which does not require any manually annotated training data. Our experiments with tweets in seven languages show a 74% relative error reduction in word-level labeling with respect to competitive baselines. We then use this system to conduct a large-scale quantitative analysis of code-switching patterns on Twitter, both global as well as region-specific, with 58M tweets.", "keyphrases": ["twitter", "language identification", "code-mixed text"]} +{"id": "scholak-etal-2021-picard", "title": "PICARD: Parsing Incrementally for Constrained Auto-Regressive Decoding from Language Models", "abstract": "Large pre-trained language models for textual data have an unconstrained output space; at each decoding step, they can produce any of 10,000s of sub-word tokens. When fine-tuned to target constrained formal languages like SQL, these models often generate invalid code, rendering it unusable. We propose PICARD (code available at ), a method for constraining auto-regressive decoders of language models through incremental parsing. PICARD helps to find valid output sequences by rejecting inadmissible tokens at each decoding step. On the challenging Spider and CoSQL text-to-SQL translation tasks, we show that PICARD transforms fine-tuned T5 models with passable performance into state-of-the-art solutions.", "keyphrases": ["auto-regressive decoder", "language model", "picard"]} +{"id": "bos-etal-2004-wide", "title": "Wide-Coverage Semantic Representations from a CCG Parser", "abstract": "This paper shows how to construct semantic representations from the derivations produced by a wide-coverage CCG parser. Unlike the dependency structures returned by the parser itself, these can be used directly for semantic interpretation. We demonstrate that well-formed semantic representations can be produced for over 97% of the sentences in unseen WSJ text. We believe this is a major step towards widecoverage semantic interpretation, one of the key objectives of the field of NLP.", "keyphrases": ["ccg parser", "boxer", "logical form"]} +{"id": "lee-etal-2010-emotion", "title": "Emotion Cause Events: Corpus Construction and Analysis", "abstract": "Emotion processing has always been a great challenge. Given the fact that an emotion is triggered by cause events and that cause events are an integral part of emotion, this paper constructs a Chinese emotion cause corpus as a first step towards automatic inference of cause-emotion correlation. The corpus focuses on five primary emotions, namely happiness, sadness, fear, anger, and surprise. It is annotated with emotion cause events based on our proposed annotation scheme. Corpus data shows that most emotions are expressed with causes, and that causes mostly occur before the corresponding emotion verbs. We also examine the correlations between emotions and cause events in terms of linguistic cues: causative verbs, perception verbs, epistemic markers, conjunctions, prepositions, and others. Results show that each group of linguistic cues serves as an indicator marking the cause events in different structures of emotional constructions. We believe that the emotion cause corpus will be the useful resource for automatic emotion cause detection as well as emotion detection and classification.", "keyphrases": ["cause", "linguistic cue", "emotion"]} +{"id": "wu-ng-2013-grammatical", "title": "Grammatical Error Correction Using Integer Linear Programming", "abstract": "We propose a joint inference algorithm for grammatical error correction. Different from most previous work where different error types are corrected independently, our proposed inference process considers all possible errors in a uni ed framework. We use integer linear programming (ILP) to model the inference process, which can easily incorporate both the power of existing error classi ers and prior knowledge on grammatical error correction. Experimental results on the Helping Our Own shared task show that our method is competitive with state-of-the-art systems.", "keyphrases": ["integer linear programming", "inference process", "grammatical error correction"]} +{"id": "rastogi-etal-2019-scaling", "title": "Scaling Multi-Domain Dialogue State Tracking via Query Reformulation", "abstract": "We present a novel approach to dialogue state tracking and referring expression resolution tasks. Successful contextual understanding of multi-turn spoken dialogues requires resolving referring expressions across turns and tracking the entities relevant to the conversation across turns. Tracking conversational state is particularly challenging in a multi-domain scenario when there exist multiple spoken language understanding (SLU) sub-systems, and each SLU sub-system operates on its domain-specific meaning representation. While previous approaches have addressed the disparate schema issue by learning candidate transformations of the meaning representation, in this paper, we instead model the reference resolution as a dialogue context-aware user query reformulation task \u2013 the dialog state is serialized to a sequence of natural language tokens representing the conversation. We develop our model for query reformulation using a pointer-generator network and a novel multi-task learning setup. In our experiments, we show a significant improvement in absolute F1 on an internal as well as a, soon to be released, public benchmark respectively.", "keyphrases": ["dialogue state tracking", "query reformulation", "conversation"]} +{"id": "gao-etal-2020-supert", "title": "SUPERT: Towards New Frontiers in Unsupervised Evaluation Metrics for Multi-Document Summarization", "abstract": "We study unsupervised multi-document summarization evaluation metrics, which require neither human-written reference summaries nor human annotations (e.g. preferences, ratings, etc.). We propose SUPERT, which rates the quality of a summary by measuring its semantic similarity with a pseudo reference summary, i.e. selected salient sentences from the source documents, using contextualized embeddings and soft token alignment techniques. Compared to the state-of-the-art unsupervised evaluation metrics, SUPERT correlates better with human ratings by 18- 39%. Furthermore, we use SUPERT as rewards to guide a neural-based reinforcement learning summarizer, yielding favorable performance compared to the state-of-the-art unsupervised summarizers. All source code is available at .", "keyphrases": ["evaluation metric", "summarization", "reference"]} +{"id": "tsarfaty-2006-integrated", "title": "Integrated Morphological and Syntactic Disambiguation for Modern Hebrew", "abstract": "Current parsing models are not immediately applicable for languages that exhibit strong interaction between morphology and syntax, e.g., Modern Hebrew (MH), Arabic and other Semitic languages. This work represents a first attempt at modeling morphological-syntactic interaction in a generative probabilistic framework to allow for MH parsing. We show that morphological information selected in tandem with syntactic categories is instrumental for parsing Semitic languages. We further show that redundant morphological information helps syntactic disambiguation.", "keyphrases": ["syntactic disambiguation", "modern hebrew", "morphological analysis"]} +{"id": "kirov-etal-2018-unimorph", "title": "UniMorph 2.0: Universal Morphology", "abstract": "The Universal Morphology UniMorph project is a collaborative effort to improve how NLP handles complex morphology across the world's languages. The project releases annotated morphological data using a universal tagset, the UniMorph schema. Each inflected form is associated with a lemma, which typically carries its underlying lexical meaning, and a bundle of morphological features from our schema. Additional supporting data and tools are also released on a per-language basis when available. UniMorph is based at the Center for Language and Speech Processing (CLSP) at Johns Hopkins University in Baltimore, Maryland and is sponsored by the DARPA LORELEI program. This paper details advances made to the collection, annotation, and dissemination of project resources since the initial UniMorph release described at LREC 2016. lexical resources} }", "keyphrases": ["morphological data", "unimorph", "database", "wiktionary", "inflection table"]} +{"id": "seddah-etal-2012-french", "title": "The French Social Media Bank: a Treebank of Noisy User Generated Content", "abstract": "In recent years, statistical parsers have reached high performance levels on well-edited texts. Domain adaptation techniques have improved parsing results on text genres differing from the journalistic data most parsers are trained on. However, such corpora usually comply with standard linguistic, spelling and typographic conventions. In the meantime, the emergence of Web 2.0 communication media has caused the apparition of new types of online textual data. Although valuable, e.g., in terms of data mining and sentiment analysis, such user-generated content rarely complies with standard conventions: they are noisy. This prevents most NLP tools, especially treebank based parsers, from performing well on such data. For this reason, we have developed the French Social Media Bank, the first user-generated content treebank for French, a morphologically rich language (MRL). The first release of this resource contains 1,700 sentences from various Web 2.0 sources, including data specifically chosen for their high noisiness. We describe here how we created this treebank and expose the methodology we used for fully annotating it. We also provide baseline POS tagging and statistical constituency parsing results, which are lower by far than usual results on edited texts. This highlights the high difficulty of automatically processing such noisy data in a MRL.", "keyphrases": ["social media bank", "treebank", "user-generated content", "ugc", "online forum"]} +{"id": "kapustin-kapustin-2019-modeling", "title": "Modeling language constructs with fuzzy sets: some approaches, examples and interpretations", "abstract": "We present and discuss a couple of approaches, including different types of projections, and some examples, discussing the use of fuzzy sets for modeling meaning of certain types of language constructs. We are mostly focusing on words other than adjectives and linguistic hedges as these categories are the most studied from before. We discuss logical and linguistic interpretations of membership functions. We argue that using fuzzy sets for modeling meaning of words and other natural language constructs, along with situations described with natural language is interesting both from purely linguistic perspective, and also as a knowledge representation for problems of computational linguistics and natural language processing.", "keyphrases": ["fuzzy set", "interpretation", "natural language construct"]} +{"id": "park-etal-2019-thisiscompetition", "title": "ThisIsCompetition at SemEval-2019 Task 9: BERT is unstable for out-of-domain samples", "abstract": "This paper describes our system, Joint Encoders for Stable Suggestion Inference (JESSI), for the SemEval 2019 Task 9: Suggestion Mining from Online Reviews and Forums. JESSI is a combination of two sentence encoders: (a) one using multiple pre-trained word embeddings learned from log-bilinear regression (GloVe) and translation (CoVe) models, and (b) one on top of word encodings from a pre-trained deep bidirectional transformer (BERT). We include a domain adversarial training module when training for out-of-domain samples. Our experiments show that while BERT performs exceptionally well for in-domain samples, several runs of the model show that it is unstable for out-of-domain samples. The problem is mitigated tremendously by (1) combining BERT with a non-BERT encoder, and (2) using an RNN-based classifier on top of BERT. Our final models obtained second place with 77.78% F-Score on Subtask A (i.e. in-domain) and achieved an F-Score of 79.59% on Subtask B (i.e. out-of-domain), even without using any additional external data.", "keyphrases": ["bert", "out-of-domain sample", "stable suggestion inference"]} +{"id": "miller-etal-2017-parlai", "title": "ParlAI: A Dialog Research Software Platform", "abstract": "We introduce ParlAI (pronounced \u201cpar-lay\u201d), an open-source software platform for dialog research implemented in Python, available at . Its goal is to provide a unified framework for sharing, training and testing dialog models; integration of Amazon Mechanical Turk for data collection, human evaluation, and online/reinforcement learning; and a repository of machine learning models for comparing with others' models, and improving upon existing architectures. Over 20 tasks are supported in the first release, including popular datasets such as SQuAD, bAbI tasks, MCTest, WikiQA, QACNN, QADailyMail, CBT, bAbI Dialog, Ubuntu, OpenSubtitles and VQA. Several models are integrated, including neural models such as memory networks, seq2seq and attentive LSTMs.", "keyphrases": ["dialog research", "software platform", "parlai", "chatbot", "evaluation system"]} +{"id": "duong-etal-2016-attentional", "title": "An Attentional Model for Speech Translation Without Transcription", "abstract": "For many low-resource languages, spoken language resources are more likely to be annotated with translations than transcriptions. This bilingual speech data can be used for word-spotting, spoken document retrieval, and even for documentation of endangered languages. We experiment with the neural, attentional model applied to this data. On phoneto-word alignment and translation reranking tasks, we achieve large improvements relative to several baselines. On the more challenging speech-to-word alignment task, our model nearly matches GIZA++\u2019s performance on gold transcriptions, but without recourse to transcriptions or to a lexicon.", "keyphrases": ["attentional model", "transcription", "sequence-to-sequence model"]} +{"id": "wang-etal-2020-structure", "title": "Structure-Level Knowledge Distillation For Multilingual Sequence Labeling", "abstract": "Multilingual sequence labeling is a task of predicting label sequences using a single unified model for multiple languages. Compared with relying on multiple monolingual models, using a multilingual model has the benefit of a smaller model size, easier in online serving, and generalizability to low-resource languages. However, current multilingual models still underperform individual monolingual models significantly due to model capacity limitations. In this paper, we propose to reduce the gap between monolingual models and the unified multilingual model by distilling the structural knowledge of several monolingual models (teachers) to the unified multilingual model (student). We propose two novel KD methods based on structure-level information: (1) approximately minimizes the distance between the student's and the teachers' structure-level probability distributions, (2) aggregates the structure-level knowledge to local distributions and minimizes the distance between two local probability distributions. Our experiments on 4 multilingual tasks with 25 datasets show that our approaches outperform several strong baselines and have stronger zero-shot generalizability than both the baseline model and teacher models.", "keyphrases": ["knowledge distillation", "multilingual sequence labeling", "structural knowledge"]} +{"id": "ma-2006-champollion", "title": "Champollion: A Robust Parallel Text Sentence Aligner", "abstract": "This paper describes Champollion, a lexicon-based sentence aligner designed for robust alignment of potential noisy parallel text. Champollion increases the robustness of the alignment by assigning greater weights to less frequent translated words. Experiments on a manually aligned Chinese \u0096 English parallel corpus show that Champollion achieves high precision and recall on noisy data. Champollion can be easily ported to new language pairs. It\u0092s freely available to the public.", "keyphrases": ["lexicon-based sentence aligner", "robust alignment", "champollion"]} +{"id": "jiang-de-marneffe-2019-evaluating", "title": "Evaluating BERT for natural language inference: A case study on the CommitmentBank", "abstract": "Natural language inference (NLI) datasets (e.g., MultiNLI) were collected by soliciting hypotheses for a given premise from annotators. Such data collection led to annotation artifacts: systems can identify the premise-hypothesis relationship without observing the premise (e.g., negation in hypothesis being indicative of contradiction). We address this problem by recasting the CommitmentBank for NLI, which contains items involving reasoning over the extent to which a speaker is committed to complements of clause-embedding verbs under entailment-canceling environments (conditional, negation, modal and question). Instead of being constructed to stand in certain relationships with the premise, hypotheses in the recast CommitmentBank are the complements of the clause-embedding verb in each premise, leading to no annotation artifacts in the hypothesis. A state-of-the-art BERT-based model performs well on the CommitmentBank with 85% F1. However analysis of model behavior shows that the BERT models still do not capture the full complexity of pragmatic reasoning, nor encode some of the linguistic generalizations, highlighting room for improvement.", "keyphrases": ["bert", "natural language inference", "reasoning"]} +{"id": "xing-etal-2018-adaptive", "title": "Adaptive Multi-Task Transfer Learning for Chinese Word Segmentation in Medical Text", "abstract": "Chinese word segmentation (CWS) trained from open source corpus faces dramatic performance drop when dealing with domain text, especially for a domain with lots of special terms and diverse writing styles, such as the biomedical domain. However, building domain-specific CWS requires extremely high annotation cost. In this paper, we propose an approach by exploiting domain-invariant knowledge from high resource to low resource domains. Extensive experiments show that our model achieves consistently higher accuracy than the single-task CWS and other transfer learning baselines, especially when there is a large disparity between source and target domains.", "keyphrases": ["chinese word segmentation", "medical text", "domain-invariant knowledge"]} +{"id": "luan-etal-2019-general", "title": "A general framework for information extraction using dynamic span graphs", "abstract": "We introduce a general framework for several information extraction tasks that share span representations using dynamically constructed span graphs. The graphs are dynamically constructed by selecting the most confident entity spans and linking these nodes with confidence-weighted relation types and coreferences. The dynamic span graph allow coreference and relation type confidences to propagate through the graph to iteratively refine the span representations. This is unlike previous multi-task frameworks for information extraction in which the only interaction between tasks is in the shared first-layer LSTM. Our framework significantly outperforms state-of-the-art on multiple information extraction tasks across multiple datasets reflecting different domains. We further observe that the span enumeration approach is good at detecting nested span entities, with significant F1 score improvement on the ACE dataset.", "keyphrases": ["information extraction", "span graph", "confidence-weighted relation type", "coreference", "entity recognition"]} +{"id": "kothur-etal-2018-document", "title": "Document-Level Adaptation for Neural Machine Translation", "abstract": "It is common practice to adapt machine translation systems to novel domains, but even a well-adapted system may be able to perform better on a particular document if it were to learn from a translator's corrections within the document itself. We focus on adaptation within a single document \u2013 appropriate for an interactive translation scenario where a model adapts to a human translator's input over the course of a document. We propose two methods: single-sentence adaptation (which performs online adaptation one sentence at a time) and dictionary adaptation (which specifically addresses the issue of translating novel words). Combining the two models results in improvements over both approaches individually, and over baseline systems, even on short documents. On WMT news test data, we observe an improvement of +1.8 BLEU points and +23.3% novel word translation accuracy and on EMEA data (descriptions of medications) we observe an improvement of +2.7 BLEU points and +49.2% novel word translation accuracy.", "keyphrases": ["adaptation", "neural machine translation", "novel word"]} +{"id": "mohammad-etal-2018-semeval", "title": "SemEval-2018 Task 1: Affect in Tweets", "abstract": "We present the SemEval-2018 Task 1: Affect in Tweets, which includes an array of subtasks on inferring the affectual state of a person from their tweet. For each task, we created labeled data from English, Arabic, and Spanish tweets. The individual tasks are: 1. emotion intensity regression, 2. emotion intensity ordinal classification, 3. valence (sentiment) regression, 4. valence ordinal classification, and 5. emotion classification. Seventy-five teams (about 200 team members) participated in the shared task. We summarize the methods, resources, and tools used by the participating teams, with a focus on the techniques and resources that are particularly useful. We also analyze systems for consistent bias towards a particular race or gender. The data is made freely available to further improve our understanding of how people convey emotions through language.", "keyphrases": ["tweets", "semeval-2018 task", "emotion dataset"]} +{"id": "felice-yuan-2014-generating", "title": "Generating artificial errors for grammatical error correction", "abstract": "This paper explores the generation of artificial errors for correcting grammatical mistakes made by learners of English as a second language. Artificial errors are injected into a set of error-free sentences in a probabilistic manner using statistics from a corpus. Unlike previous approaches, we use linguistic information to derive error generation probabilities and build corpora to correct several error types, including open-class errors. In addition, we also analyse the variables involved in the selection of candidate sentences. Experiments using the NUCLE corpus from the CoNLL 2013 shared task reveal that: 1) training on artificially created errors improves precision at the expense of recall and 2) different types of linguistic information are better suited for correcting different error types.", "keyphrases": ["artificial error", "learner", "linguistic information", "different type"]} +{"id": "stahlberg-byrne-2019-nmt", "title": "On NMT Search Errors and Model Errors: Cat Got Your Tongue?", "abstract": "We report on search errors and model errors in neural machine translation (NMT). We present an exact inference procedure for neural sequence models based on a combination of beam search and depth-first search. We use our exact search to find the global best model scores under a Transformer base model for the entire WMT15 English-German test set. Surprisingly, beam search fails to find these global best model scores in most cases, even with a very large beam size of 100. For more than 50% of the sentences, the model in fact assigns its global best score to the empty translation, revealing a massive failure of neural models in properly accounting for adequacy. We show by constraining search with a minimum translation length that at the root of the problem of empty translations lies an inherent bias towards shorter translations. We conclude that vanilla NMT in its current form requires just the right amount of beam search errors, which, from a modelling perspective, is a highly unsatisfactory conclusion indeed, as the model often prefers an empty translation.", "keyphrases": ["model error", "neural machine translation", "beam search", "likely output"]} +{"id": "hajic-etal-2014-comparing", "title": "Comparing Czech and English AMRs", "abstract": "This paper describes in detail the differences between Czech and English annotation using the Abstract Meaning Representation scheme, which stresses the use of ontologies (and semantically-oriented verbal lexicons) and relations based on meaning or ontological content rather than semantics or syntax. The basic \u201cslogan\u201d of the AMR specification clearly states that AMR is not an interlingua, yet it is expected that many relations as well as structures constructed from these relations will be similar or even identical across languages. In our study, we have investigated 100 sentences in English and their translations into Czech, annotated manually by AMRs, with the goal to describe the differences and if possible, to classify them into two main categories: those which are merely convention differences and thus can be unified by changing such conventions in the AMR annotation guidelines, and those which are so deeply rooted in the language structure that the level of abstraction which is inherent in the current AMR scheme does not allow for such unification.", "keyphrases": ["amr", "ontology", "interlingua"]} +{"id": "baly-etal-2018-integrating", "title": "Integrating Stance Detection and Fact Checking in a Unified Corpus", "abstract": "A reasonable approach for fact checking a claim involves retrieving potentially relevant documents from different sources (e.g., news websites, social media, etc.), determining the stance of each document with respect to the claim, and finally making a prediction about the claim's factuality by aggregating the strength of the stances, while taking the reliability of the source into account. Moreover, a fact checking system should be able to explain its decision by providing relevant extracts (rationales) from the documents. Yet, this setup is not directly supported by existing datasets, which treat fact checking, document retrieval, source credibility, stance detection and rationale extraction as independent tasks. In this paper, we support the interdependencies between these tasks as annotations in the same corpus. We implement this setup on an Arabic fact checking corpus, the first of its kind.", "keyphrases": ["stance detection", "fact checking", "other language"]} +{"id": "geffet-dagan-2005-distributional", "title": "The Distributional Inclusion Hypotheses and Lexical Entailment", "abstract": "This paper suggests refinements for the Distributional Similarity Hypothesis. Our proposed hypotheses relate the distributional behavior of pairs of words to lexical entailment -- a tighter notion of semantic similarity that is required by many NLP applications. To automatically explore the validity of the defined hypotheses we developed an inclusion testing algorithm for characteristic features of two words, which incorporates corpus and web-based feature sampling to overcome data sparseness. The degree of hypotheses validity was then empirically tested and manually analyzed with respect to the word sense level. In addition, the above testing algorithm was exploited to improve lexical entailment acquisition.", "keyphrases": ["distributional inclusion hypotheses", "lexical entailment", "hypothesis", "hypernym", "directional similarity measure"]} +{"id": "vatanen-etal-2010-language", "title": "Language Identification of Short Text Segments with N-gram Models", "abstract": "There are many accurate methods for language identification of long text samples, but identification of very short strings still presents a challenge. This paper studies a language identification task, in which the test samples have only 5-21 characters. We compare two distinct methods that are well suited for this task: a naive Bayes classifier based on character n-gram models, and the ranking method by Cavnar and Trenkle (1994). For the n-gram models, we test several standard smoothing techniques, including the current state-of-the-art, the modified Kneser-Ney interpolation. Experiments are conducted with 281 languages using the Universal Declaration of Human Rights. Advanced language model smoothing techniques improve the identification accuracy and the respective classifiers outperform the ranking method. The higher accuracy is obtained at the cost of larger models and slower classification speed. However, there are several methods to reduce the size of an n-gram model, and our experiments with model pruning show that it provides an easy way to balance the size and the identification accuracy. We also compare the results to the language identifier in Google AJAX Language API, using a subset of 50 languages.", "keyphrases": ["character", "naive bayes classifier", "universal declaration", "language identification"]} +{"id": "titov-mcdonald-2008-joint", "title": "A Joint Model of Text and Aspect Ratings for Sentiment Summarization", "abstract": "Online reviews are often accompanied with numerical ratings provided by users for a set of service or product aspects. We propose a statistical model which is able to discover corresponding topics in text and extract textual evidence from reviews supporting each of these aspect ratings \u2010 a fundamental problem in aspect-based sentiment summarization (Hu and Liu, 2004a). Our model achieves high accuracy, without any explicitly labeled data except the user provided opinion ratings. The proposed approach is general and can be used for segmentation in other applications where sequential data is accompanied with correlated signals.", "keyphrases": ["joint model", "aspect rating", "sentiment summarization", "much attention"]} +{"id": "habash-rambow-2005-arabic", "title": "Arabic Tokenization, Part-of-Speech Tagging and Morphological Disambiguation in One Fell Swoop", "abstract": "We present an approach to using a morphological analyzer for tokenizing and morphologically tagging (including part-of-speech tagging) Arabic words in one process. We learn classifiers for individual morphological features, as well as ways of using these classifiers to choose among entries from the output of the analyzer. We obtain accuracy rates on all tasks in the high nineties.", "keyphrases": ["part-of-speech tagging", "morphological disambiguation", "arabic pos tagging"]} +{"id": "dodge-etal-2012-detecting", "title": "Detecting Visual Text", "abstract": "When people describe a scene, they often include information that is not visually apparent; sometimes based on background knowledge, sometimes to tell a story. We aim to separate visual text---descriptions of what is being seen---from non-visual text in natural images and their descriptions. To do so, we first concretely define what it means to be visual, annotate visual text and then develop algorithms to automatically classify noun phrases as visual or non-visual. We find that using text alone, we are able to achieve high accuracies at this task, and that incorporating features derived from computer vision algorithms improves performance. Finally, we show that we can reliably mine visual nouns and adjectives from large corpora and that we can use these effectively in the classification task.", "keyphrases": ["visual text", "noun phrase", "object"]} +{"id": "rajani-etal-2019-explain", "title": "Explain Yourself! Leveraging Language Models for Commonsense Reasoning", "abstract": "Deep learning models perform poorly on tasks that require commonsense reasoning, which often necessitates some form of world-knowledge or reasoning over information not immediately present in the input. We collect human explanations for commonsense reasoning in the form of natural language sequences and highlighted annotations in a new dataset called Common Sense Explanations (CoS-E). We use CoS-E to train language models to automatically generate explanations that can be used during training and inference in a novel Commonsense Auto-Generated Explanation (CAGE) framework. CAGE improves the state-of-the-art by 10% on the challenging CommonsenseQA task. We further study commonsense reasoning in DNNs using both human and auto-generated explanations including transfer to out-of-domain tasks. Empirical results indicate that we can effectively leverage language models for commonsense reasoning.", "keyphrases": ["language model", "commonsense reasoning", "highlighted annotation", "cage", "correct answer"]} +{"id": "du-etal-2013-topic", "title": "Topic Segmentation with a Structured Topic Model", "abstract": "We present a new hierarchical Bayesian model for unsupervised topic segmentation. This new model integrates a point-wise boundary sampling algorithm used in Bayesian segmentation into a structured topic model that can capture a simple hierarchical topic structure latent in documents. We develop an MCMC inference algorithm to split/merge segment(s). Experimental results show that our model outperforms previous unsupervised segmentation methods using only lexical information on Choi\u2019s datasets and two meeting transcripts and has performance comparable to those previous methods on two written datasets.", "keyphrases": ["hierarchical bayesian model", "point-wise boundary", "topic segmentation"]} +{"id": "liu-etal-2015-toward", "title": "Toward Abstractive Summarization Using Semantic Representations", "abstract": "We present a novel abstractive summarization framework that draws on the recent development of a treebank for the Abstract Meaning Representation (AMR). In this framework, the source text is parsed to a set of AMR graphs, the graphs are transformed into a summary graph, and then text is generated from the summary graph. We focus on the graph-tograph transformation that reduces the source semantic graph into a summary graph, making use of an existing AMR parser and assuming the eventual availability of an AMR-totext generator. The framework is data-driven, trainable, and not specifically designed for a particular domain. Experiments on goldstandard AMR annotations and system parses show promising results. Code is available at: https://github.com/summarization", "keyphrases": ["abstractive summarization", "amr", "graph-based representation", "predicate"]} +{"id": "liu-etal-2015-representation", "title": "Representation Learning Using Multi-Task Deep Neural Networks for Semantic Classification and Information Retrieval", "abstract": "Methods of deep neural networks (DNNs) have recently demonstrated superior performance on a number of natural language processing tasks. However, in most previous work, the models are learned based on either unsupervised objectives, which does not directly optimize the desired task, or singletask supervised objectives, which often suffer from insufficient training data. We develop a multi-task DNN for learning representations across multiple tasks, not only leveraging large amounts of cross-task data, but also benefiting from a regularization effect that leads to more general representations to help tasks in new domains. Our multi-task DNN approach combines tasks of multiple-domain classification (for query classification) and information retrieval (ranking for web search), and demonstrates significant gains over strong baselines in a comprehensive set of domain adaptation.", "keyphrases": ["deep neural network", "information retrieval", "more general representation", "multi-task learning"]} +{"id": "yao-etal-2019-docred", "title": "DocRED: A Large-Scale Document-Level Relation Extraction Dataset", "abstract": "Multiple entities in a document generally exhibit complex inter-sentence relations, and cannot be well handled by existing relation extraction (RE) methods that typically focus on extracting intra-sentence relations for single entity pairs. In order to accelerate the research on document-level RE, we introduce DocRED, a new dataset constructed from Wikipedia and Wikidata with three features: (1) DocRED annotates both named entities and relations, and is the largest human-annotated dataset for document-level RE from plain text; (2) DocRED requires reading multiple sentences in a document to extract entities and infer their relations by synthesizing all information of the document; (3) along with the human-annotated data, we also offer large-scale distantly supervised data, which enables DocRED to be adopted for both supervised and weakly supervised scenarios. In order to verify the challenges of document-level RE, we implement recent state-of-the-art methods for RE and conduct a thorough evaluation of these methods on DocRED. Empirical results show that DocRED is challenging for existing RE methods, which indicates that document-level RE remains an open problem and requires further efforts. Based on the detailed analysis on the experiments, we discuss multiple promising directions for future research. We make DocRED and the code for our baselines publicly available at .", "keyphrases": ["relation extraction dataset", "wikipedia", "multiple sentence", "state-of-the-art method", "document level"]} +{"id": "zhang-etal-2014-type", "title": "Type-Supervised Domain Adaptation for Joint Segmentation and POS-Tagging", "abstract": "We report an empirical investigation on type-supervised domain adaptation for joint Chinese word segmentation and POS-tagging, making use of domain-speci\ufb01c tag dictionaries and only unlabeled target domain data to improve target-domain accuracies, given a set of annotated source domain sentences. Previous work on POS-tagging of other languages showed that type-supervision can be a competitive alternative to token-supervision, while semi-supervised techniques such as label propagation are important to the effectiveness of type-supervision. We report similar \ufb01ndings using a novel approach for joint Chinese segmentation and POS-tagging, under a cross-domain setting. With the help of unlabeled sentences and a lexicon of 3,000 words, we obtain 33% error reduction in target-domain tagging. In addition, combined type-and token-supervision can lead to improved cost-effectiveness.", "keyphrases": ["domain adaptation", "pos-tagging", "cws"]} +{"id": "cao-etal-2020-unsupervised", "title": "Unsupervised Parsing via Constituency Tests", "abstract": "We propose a method for unsupervised parsing based on the linguistic notion of a constituency test. One type of constituency test involves modifying the sentence via some transformation (e.g. replacing the span with a pronoun) and then judging the result (e.g. checking if it is grammatical). Motivated by this idea, we design an unsupervised parser by specifying a set of transformations and using an unsupervised neural acceptability model to make grammaticality decisions. To produce a tree given a sentence, we score each span by aggregating its constituency test judgments, and we choose the binary tree with the highest total score. While this approach already achieves performance in the range of current methods, we further improve accuracy by fine-tuning the grammaticality model through a refinement procedure, where we alternate between improving the estimated trees and improving the grammaticality model. The refined model achieves 62.8 F1 on the Penn Treebank test set, an absolute improvement of 7.6 points over the previously best published result.", "keyphrases": ["constituency test", "neural acceptability model", "grammaticality decision", "unsupervised parsing"]} +{"id": "chan-roth-2011-exploiting", "title": "Exploiting Syntactico-Semantic Structures for Relation Extraction", "abstract": "In this paper, we observe that there exists a second dimension to the relation extraction (RE) problem that is orthogonal to the relation type dimension. We show that most of these second dimensional structures are relatively constrained and not difficult to identify. We propose a novel algorithmic approach to RE that starts by first identifying these structures and then, within these, identifying the semantic type of the relation. In the real RE problem where relation arguments need to be identified, exploiting these structures also allows reducing pipelined propagated errors. We show that this RE framework provides significant improvement in RE performance.", "keyphrases": ["relation extraction", "pipeline", "entity recognition", "traditional approach", "error propagation problem"]} +{"id": "choi-etal-2005-identifying", "title": "Identifying Sources of Opinions with Conditional Random Fields and Extraction Patterns", "abstract": "Recent systems have been developed for sentiment classification, opinion recognition, and opinion analysis (e.g., detecting polarity and strength). We pursue another aspect of opinion analysis: identifying the sources of opinions, emotions, and sentiments. We view this problem as an information extraction task and adopt a hybrid approach that combines Conditional Random Fields (Lafferty et al., 2001) and a variation of AutoSlog (Riloff, 1996a). While CRFs model source identification as a sequence tagging task, AutoSlog learns extraction patterns. Our results show that the combination of these two methods performs better than either one alone. The resulting system identifies opinion sources with 79.3% precision and 59.5% recall using a head noun matching measure, and 81.2% precision and 60.6% recall using an overlap measure.", "keyphrases": ["opinion", "conditional random fields", "information extraction task", "hybrid approach", "crf"]} +{"id": "carl-2012-translog", "title": "Translog-II: a Program for Recording User Activity Data for Empirical Reading and Writing Research", "abstract": "This paper presents a novel implementation of Translog-II. Translog-II is a Windows-oriented program to record and study reading and writing processes on a computer. In our research, it is an instrument to acquire objective, digital data of human translation processes. As their predecessors, Translog 2000 and Translog 2006, also Translog-II consists of two main components: Translog-II Supervisor and Translog-II User, which are used to create a project file, to run a text production experiments (a user reads, writes or translates a text) and to replay the session. Translog produces a log files which contains all user activity data of the reading, writing, or translation session, and which can be evaluated by external tools. While there is a large body of translation process research based on Translog, this paper gives an overview of the Translog-II functions and its data visualization options.", "keyphrases": ["user activity data", "reading", "translation process research", "translog-ii"]} +{"id": "bouchacourt-baroni-2018-agents", "title": "How agents see things: On visual representations in an emergent language game", "abstract": "There is growing interest in the language developed by agents interacting in emergent-communication settings. Earlier studies have focused on the agents' symbol usage, rather than on their representation of visual input. In this paper, we consider the referential games of Lazaridou et al. (2017), and investigate the representations the agents develop during their evolving interaction. We find that the agents establish successful communication by inducing visual representations that almost perfectly align with each other, but, surprisingly, do not capture the conceptual properties of the objects depicted in the input images. We conclude that, if we care about developing language-like communication systems, we must pay more attention to the visual semantics agents associate to the symbols they use.", "keyphrases": ["agent", "visual representation", "emergent language"]} +{"id": "rama-2016-siamese", "title": "Siamese Convolutional Networks for Cognate Identification", "abstract": "In this paper, we present phoneme level Siamese convolutional networks for the task of pair-wise cognate identification. We represent a word as a two-dimensional matrix and employ a siamese convolutional network for learning deep representations. We present siamese architectures that jointly learn phoneme level feature representations and language relatedness from raw words for cognate identification. Compared to previous works, we train and test on larger and realistic datasets; and, show that siamese architectures consistently perform better than traditional linear classifier approach.", "keyphrases": ["cognate identification", "siamese architecture", "language relatedness", "convolutional neural network", "phonetic feature"]} +{"id": "li-etal-2018-transformation", "title": "Transformation Networks for Target-Oriented Sentiment Classification", "abstract": "Target-oriented sentiment classification aims at classifying sentiment polarities over individual opinion targets in a sentence. RNN with attention seems a good fit for the characteristics of this task, and indeed it achieves the state-of-the-art performance. After re-examining the drawbacks of attention mechanism and the obstacles that block CNN to perform well in this classification task, we propose a new model that achieves new state-of-the-art results on a few benchmarks. Instead of attention, our model employs a CNN layer to extract salient features from the transformed word representations originated from a bi-directional RNN layer. Between the two layers, we propose a component which first generates target-specific representations of words in the sentence, and then incorporates a mechanism for preserving the original contextual information from the RNN layer.", "keyphrases": ["sentiment classification", "cnn", "contextual information", "absa", "deep learning"]} +{"id": "murawaki-2015-continuous", "title": "Continuous Space Representations of Linguistic Typology and their Application to Phylogenetic Inference", "abstract": "For phylogenetic inference, linguistic typology is a promising alternative to lexical evidence because it allows us to compare an arbitrary pair of languages. A challenging problem with typology-based phylogenetic inference is that the changes of typological features over time are less intuitive than those of lexical features. In this paper, we work on reconstructing typologically natural ancestors To do this, we leverage dependencies among typological features. We first represent each language by continuous latent components that capture feature dependencies. We then combine them with a typology evaluator that distinguishes typologically natural languages from other possible combinations of features. We perform phylogenetic inference in the continuous space and use the evaluator to ensure the typological naturalness of inferred ancestors. We show that the proposed method reconstructs known language families more accurately than baseline methods. Lastly, assuming the monogenesis hypothesis, we attempt to reconstruct a common ancestor of the world\u2019s languages.", "keyphrases": ["linguistic typology", "phylogenetic inference", "continuous latent component"]} +{"id": "nguyen-moschitti-2011-end", "title": "End-to-End Relation Extraction Using Distant Supervision from External Semantic Repositories", "abstract": "In this paper, we extend distant supervision (DS) based on Wikipedia for Relation Extraction (RE) by considering (i) relations defined in external repositories, e.g. YAGO, and (ii) any subset of Wikipedia documents. We show that training data constituted by sentences containing pairs of named entities in target relations is enough to produce reliable supervision. Our experiments with state-of-the-art relation extraction models, trained on the above data, show a meaningful F1 of 74.29% on a manually annotated test set: this highly improves the state-of-art in RE using DS. Additionally, our end-to-end experiments demonstrated that our extractors can be applied to any general text document.", "keyphrases": ["relation extraction", "distant supervision", "wikipedia"]} +{"id": "le-nagard-koehn-2010-aiding", "title": "Aiding Pronoun Translation with Co-Reference Resolution", "abstract": "We propose a method to improve the translation of pronouns by resolving their co-reference to prior mentions. We report results using two different co-reference resolution methods and point to remaining challenges.", "keyphrases": ["pronoun translation", "co-reference resolution", "antecedent", "annotated corpus", "coreference link"]} +{"id": "melamud-etal-2016-context2vec", "title": "context2vec: Learning Generic Context Embedding with Bidirectional LSTM", "abstract": "Context representations are central to various NLP tasks, such as word sense disam-biguation, named entity recognition, co-reference resolution, and many more. In this work we present a neural model for ef\ufb01ciently learning a generic context embedding function from large corpora, us-ing bidirectional LSTM. With a very simple application of our context representations, we manage to surpass or nearly reach state-of-the-art results on sentence completion, lexical substitution and word sense disambiguation tasks, while substantially outperforming the popular context representation of averaged word embeddings. We release our code and pre-trained models, suggesting they could be useful in a wide variety of NLP tasks.", "keyphrases": ["generic context", "bidirectional lstm", "state-of-the-art result", "word sense disambiguation", "supervised approach"]} +{"id": "miller-etal-2004-name", "title": "Name Tagging with Word Clusters and Discriminative Training", "abstract": "We present a technique for augmenting annotated training data with hierarchical word clusters that are automatically derived from a large unannotated corpus. Cluster membership is encoded in features that are incorporated in a discriminatively trained tagging model. Active learning is used to select training examples. We evaluate the technique for named-entity tagging. Compared with a state-of-the-art HMM-based name finder, the presented technique requires only 13% as much annotated data to achieve the same level of performance. Given a large annotated training set of 1,000,000 words, the technique achieves a 25% reduction in error over the state-of-the-art HMM trained on the same material.", "keyphrases": ["word cluster", "active learning", "name tagging", "unlabeled data"]} +{"id": "baroni-etal-2014-dont", "title": "Don't count, predict! A systematic comparison of context-counting vs. context-predicting semantic vectors", "abstract": "Context-predicting models (more commonly known as embeddings or neural language models) are the new kids on the distributional semantics block. Despite the buzz surrounding these models, the literature is still lacking a systematic comparison of the predictive models with classic, count-vector-based distributional semantic approaches. In this paper, we perform such an extensive evaluation, on a wide range of lexical semantics tasks and across many parameter settings. The results, to our own surprise, show that the buzz is fully justified, as the context-predicting models obtain a thorough and resounding victory against their count-based counterparts.", "keyphrases": ["systematic comparison", "word embedding", "count-based method", "semantic model", "preference"]} +{"id": "koo-etal-2008-simple", "title": "Simple Semi-supervised Dependency Parsing", "abstract": "We present a simple and effective semisupervised method for training dependency parsers. We focus on the problem of lexical representation, introducing features that incorporate word clusters derived from a large unannotated corpus. We demonstrate the effectiveness of the approach in a series of dependency parsing experiments on the Penn Treebank and Prague Dependency Treebank, and we show that the cluster-based features yield substantial gains in performance across a wide range of conditions. For example, in the case of English unlabeled second-order parsing, we improve from a baseline accuracy of 92.02% to 93.16%, and in the case of Czech unlabeled second-order parsing, we improve from a baseline accuracy of 86.13% to 87.13%. In addition, we demonstrate that our method also improves performance when small amounts of training data are available, and can roughly halve the amount of supervised data required to reach a desired level of performance.", "keyphrases": ["word cluster", "czech", "unlabeled data", "brown cluster", "error reduction"]} +{"id": "soni-etal-2013-exploring", "title": "Exploring Verb Frames for Sentence Simplification in Hindi", "abstract": "Systems processing on natural language text encounters fatal problems due to long and complex sentences. Their performance degrades as the complexity of the sentence increases. This paper addresses the task of simplifying complex sentences in Hindi into multiple simple sentences, using a rule based approach. Our approach utilizes two linguistic resources viz. verb demand frames and conjuncts\u2019 list. We performed automatic as well as human evaluation of our system.", "keyphrases": ["sentence simplification", "hindi", "conjunct"]} +{"id": "islamaj-dogan-etal-2017-biocreative", "title": "BioCreative VI Precision Medicine Track: creating a training corpus for mining protein-protein interactions affected by mutations", "abstract": "The Precision Medicine Track in BioCre-ative VI aims to bring together the Bi-oNLP community for a novel challenge focused on mining the biomedical litera-ture in search of mutations and protein-protein interactions (PPI). In order to support this track with an effective train-ing dataset with limited curator time, the track organizers carefully reviewed Pub-Med articles from two different sources: curated public PPI databases, and the re-sults of state-of-the-art public text mining tools. We detail here the data collection, manual review and annotation process and describe this training corpus charac-teristics. We also describe a corpus per-formance baseline. This analysis will provide useful information to developers and researchers for comparing and devel-oping innovative text mining approaches for the BioCreative VI challenge and other Precision Medicine related applica-tions.", "keyphrases": ["precision medicine track", "protein-protein interaction", "mutation"]} +{"id": "yu-etal-2018-diverse", "title": "Diverse Few-Shot Text Classification with Multiple Metrics", "abstract": "We study few-shot learning in natural language domains. Compared to many existing works that apply either metric-based or optimization-based meta-learning to image domain with low inter-task variance, we consider a more realistic setting, where tasks are diverse. However, it imposes tremendous difficulties to existing state-of-the-art metric-based algorithms since a single metric is insufficient to capture complex task variations in natural language domain. To alleviate the problem, we propose an adaptive metric learning approach that automatically determines the best weighted combination from a set of metrics obtained from meta-training tasks for a newly seen few-shot task. Extensive quantitative evaluations on real-world sentiment analysis and dialog intent classification datasets demonstrate that the proposed method performs favorably against state-of-the-art few shot learning algorithms in terms of predictive accuracy. We make our code and data available for further study.", "keyphrases": ["text classification", "multiple metric", "few-shot task", "new class", "training task"]} +{"id": "venugopal-etal-2014-relieving", "title": "Relieving the Computational Bottleneck: Joint Inference for Event Extraction with High-Dimensional Features", "abstract": "Several state-of-the-art event extraction systems employ models based on Support Vector Machines (SVMs) in a pipeline architecture, which fails to exploit the joint dependencies that typically exist among events and arguments. While there have been attempts to overcome this limitation using Markov Logic Networks (MLNs), it remains challenging to perform joint inference in MLNs when the model encodes many high-dimensional sophisticated features such as those essential for event extraction. In this paper, we propose a new model for event extraction that combines the power of MLNs and SVMs, dwarfing their limitations. The key idea is to reliably learn and process high-dimensional features using SVMs; encode the output of SVMs as low-dimensional, soft formulas in MLNs; and use the superior joint inferencing power of MLNs to enforce joint consistency constraints over the soft formulas. We evaluate our approach for the task of extracting biomedical events on the BioNLP 2013, 2011 and 2009 Genia shared task datasets. Our approach yields the best F1 score to date on the BioNLP\u201913 (53.61) and BioNLP\u201911 (58.07) datasets and the second-best F1 score to date on the BioNLP\u201909 dataset (58.16).", "keyphrases": ["joint inference", "event extraction", "svms", "hand-crafted feature", "argument extraction"]} +{"id": "luo-etal-2018-incorporating", "title": "Incorporating Glosses into Neural Word Sense Disambiguation", "abstract": "Word Sense Disambiguation (WSD) aims to identify the correct meaning of polysemous words in the particular context. Lexical resources like WordNet which are proved to be of great help for WSD in the knowledge-based methods. However, previous neural networks for WSD always rely on massive labeled data (context), ignoring lexical resources like glosses (sense definitions). In this paper, we integrate the context and glosses of the target word into a unified framework in order to make full use of both labeled data and lexical knowledge. Therefore, we propose GAS: a gloss-augmented WSD neural network which jointly encodes the context and glosses of the target word. GAS models the semantic relationship between the context and the gloss in an improved memory network framework, which breaks the barriers of the previous supervised methods and knowledge-based methods. We further extend the original gloss of word sense via its semantic relations in WordNet to enrich the gloss information. The experimental results show that our model outperforms the state-of-the-art systems on several English all-words WSD datasets.", "keyphrases": ["gloss", "target word", "semantic relationship"]} +{"id": "pitler-etal-2008-easily", "title": "Easily Identifiable Discourse Relations", "abstract": "We present a corpus study of local discourse relations based on the Penn Discourse Tree Bank, a large manually annotated corpus of explicitly or implicitly realized relations. We show that while there is a large degree of ambiguity in temporal explicit discourse connectives, overall connectives are mostly unambiguous and allow high-accuracy prediction of discourse relation type. We achieve 93.09% accuracy in classifying the explicit relations and 74.74% accuracy overall. In addition, we show that some pairs of relations occur together in text more often than expected by chance. This finding suggests that global sequence classification of the relations in text can lead to better results, especially for implicit relations.", "keyphrases": ["discourse relation", "explicit relation", "cue", "high accuracy", "previous study"]} +{"id": "flanigan-etal-2014-discriminative", "title": "A Discriminative Graph-Based Parser for the Abstract Meaning Representation", "abstract": "Abstract Meaning Representation (AMR) is a semantic formalism for which a grow- ing set of annotated examples is avail- able. We introduce the first approach to parse sentences into this representa- tion, providing a strong baseline for fu- ture improvement. The method is based on a novel algorithm for finding a maxi- mum spanning, connected subgraph, em- bedded within a Lagrangian relaxation of an optimization problem that imposes lin- guistically inspired constraints. Our ap- proach is described in the general frame- work of structured prediction, allowing fu- ture incorporation of additional features and constraints, and may extend to other formalisms as well. Our open-source sys- tem, JAMR, is available at: http://github.com/jflanigan/jamr", "keyphrases": ["abstract meaning representation", "amr graph", "aligner", "jamr parser"]} +{"id": "kim-etal-2006-automatically", "title": "Automatically Assessing Review Helpfulness", "abstract": "User-supplied reviews are widely and increasingly used to enhance e-commerce and other websites. Because reviews can be numerous and varying in quality, it is important to assess how helpful each review is. While review helpfulness is currently assessed manually, in this paper we consider the task of automatically assessing it. Experiments using SVM regression on a variety of features over Amazon.com product reviews show promising results, with rank correlations of up to 0.66. We found that the most useful features include the length of the review, its unigrams, and its product rating.", "keyphrases": ["review helpfulness", "ranking", "percentage", "semantic feature"]} +{"id": "van-gael-etal-2009-infinite", "title": "The infinite HMM for unsupervised PoS tagging", "abstract": "We extend previous work on fully unsupervised part-of-speech tagging. Using a non-parametric version of the HMM, called the infinite HMM (iHMM), we address the problem of choosing the number of hidden states in unsupervised Markov models for PoS tagging. We experiment with two non-parametric priors, the Dirichlet and Pitman-Yor processes, on the Wall Street Journal dataset using a parallelized implementation of an iHMM inference algorithm. We evaluate the results with a variety of clustering evaluation metrics and achieve equivalent or better performances than previously reported. Building on this promising result we evaluate the output of the unsupervised PoS tagger as a direct replacement for the output of a fully supervised PoS tagger for the task of shallow parsing and compare the two evaluations.", "keyphrases": ["hmm", "pos tagging", "non-parametric prior"]} +{"id": "laubli-etal-2013-assessing", "title": "Assessing post-editing efficiency in a realistic translation environment", "abstract": "In many experimental studies on assessing post-editing efficiency, idiosyncratic user interfaces isolate translators from translation aids that are available to them in their daily work. In contrast, our experimental design allows translators to use a well-known translator workbench for both conventional translation and post-editing. We find that post-editing reduces translation time significantly, although considerably less than reported in isolated experiments, and argue that overall assessments of post-editing efficiency should be based on a realistic translation environment.", "keyphrases": ["post-editing efficiency", "realistic translation environment", "translator"]} +{"id": "lei-etal-2016-rationalizing", "title": "Rationalizing Neural Predictions", "abstract": "Prediction without justification has limited applicability. As a remedy, we learn to extract pieces of input text as justifications -- rationales -- that are tailored to be short and coherent, yet sufficient for making the same prediction. Our approach combines two modular components, generator and encoder, which are trained to operate well together. The generator specifies a distribution over text fragments as candidate rationales and these are passed through the encoder for prediction. Rationales are never given during training. Instead, the model is regularized by desiderata for rationales. We evaluate the approach on multi-aspect sentiment analysis against manually annotated test cases. Our approach outperforms attention-based baseline by a significant margin. We also successfully illustrate the method on the question retrieval task.", "keyphrases": ["neural prediction", "rationale", "subset", "hard attention", "reinforcement learning"]} +{"id": "bunt-2006-dimensions", "title": "Dimensions in Dialogue Act Annotation", "abstract": "This paper is concerned with the fundamentals of multidimensional dialogue act annotation, i.e. with what it means to annotate dialogues with information about the communicative acts that are performed with the utterances, taking various 'dimensions' into account. Two ideas seem to be prevalent in the literature concerning the notion of dimension: (1) dimensions correspond to different types of information; and (2) a dimension is formed by a set of mutually exclusive tags. In DAMSL, for instance, the terms \u0093dimension\u0094 and \u0093layer\u0094 are used sometimes in the sense of (1) and sometimes in that of (2). We argue that being mutually exclusive is not a good criterion for a set of dialogue act types to constitute a dimension, even though the description of an object in a multidimensional space should never assign more than one value per dimension. We define a dimension of dialogue act annotation as an aspect of participating in a dialogue that can be addressed independently by means of dialogue acts. We show that DAMSL dimensions such as Info-request, Statement, and Answer do not qualify as proper dimensions, and that the communicative functions in these categories do not fall in any specific dimension, but should be considered as \u0093general-purpose\u0094 in the sense that they can be used in any dimension. We argue that using the notion of dimension that we propose, a multidimensional taxonomy of dialogue acts emerges that optimally supports multidimensional dialogue act annotation.", "keyphrases": ["dialogue act annotation", "communicative function", "dimension"]} +{"id": "mizumoto-nagata-2017-analyzing", "title": "Analyzing the Impact of Spelling Errors on POS-Tagging and Chunking in Learner English", "abstract": "Part-of-speech (POS) tagging and chunking have been used in tasks targeting learner English; however, to the best our knowledge, few studies have evaluated their performance and no studies have revealed the causes of POS-tagging/chunking errors in detail. Therefore, we investigate performance and analyze the causes of failure. We focus on spelling errors that occur frequently in learner English. We demonstrate that spelling errors reduced POS-tagging performance by 0.23% owing to spelling errors, and that a spell checker is not necessary for POS-tagging/chunking of learner English.", "keyphrases": ["spelling error", "pos-tagging", "learner english"]} +{"id": "gangi-reddy-etal-2019-multi", "title": "Multi-Level Memory for Task Oriented Dialogs", "abstract": "Recent end-to-end task oriented dialog systems use memory architectures to incorporate external knowledge in their dialogs. Current work makes simplifying assumptions about the structure of the knowledge base, such as the use of triples to represent knowledge, and combines dialog utterances (context) as well as knowledge base (KB) results as part of the same memory. This causes an explosion in the memory size, and makes the reasoning over memory harder. In addition, such a memory design forces hierarchical properties of the data to be fit into a triple structure of memory. This requires the memory reader to infer relationships across otherwise connected attributes. In this paper we relax the strong assumptions made by existing architectures and separate memories used for modeling dialog context and KB results. Instead of using triples to store KB results, we introduce a novel multi-level memory architecture consisting of cells for each query and their corresponding results. The multi-level memory first addresses queries, followed by results and finally each key-value pair within a result. We conduct detailed experiments on three publicly available task oriented dialog data sets and we find that our method conclusively outperforms current state-of-the-art models. We report a 15-25% increase in both entity F1 and BLEU scores.", "keyphrases": ["memory", "dialog", "multi-level memory architecture", "task-oriented dialogue generation"]} +{"id": "barnes-etal-2021-structured", "title": "Structured Sentiment Analysis as Dependency Graph Parsing", "abstract": "Structured sentiment analysis attempts to extract full opinion tuples from a text, but over time this task has been subdivided into smaller and smaller sub-tasks, e.g., target extraction or targeted polarity classification. We argue that this division has become counterproductive and propose a new unified framework to remedy the situation. We cast the structured sentiment problem as dependency graph parsing, where the nodes are spans of sentiment holders, targets and expressions, and the arcs are the relations between them. We perform experiments on five datasets in four languages (English, Norwegian, Basque, and Catalan) and show that this approach leads to strong improvements over state-of-the-art baselines. Our analysis shows that refining the sentiment graphs with syntactic dependency information further improves results.", "keyphrases": ["sentiment analysis", "dependency graph parsing", "target extraction", "polarity", "strong improvement"]} +{"id": "feng-etal-2020-genaug", "title": "GenAug: Data Augmentation for Finetuning Text Generators", "abstract": "In this paper, we investigate data augmentation for text generation, which we call GenAug. Text generation and language modeling are important tasks within natural language processing, and are especially challenging for low-data regimes. We propose and evaluate various augmentation methods, including some that incorporate external knowledge, for finetuning GPT-2 on a subset of Yelp Reviews. We also examine the relationship between the amount of augmentation and the quality of the generated text. We utilize several metrics that evaluate important aspects of the generated text including its diversity and fluency. Our experiments demonstrate that insertion of character-level synthetic noise and keyword replacement with hypernyms are effective augmentation methods, and that the quality of generations improves to a peak at approximately three times the amount of original data.", "keyphrases": ["data augmentation", "text generator", "character-level synthetic noise", "genaug"]} +{"id": "koehn-etal-2020-findings", "title": "Findings of the WMT 2020 Shared Task on Parallel Corpus Filtering and Alignment", "abstract": "Following two preceding WMT Shared Task on Parallel Corpus Filtering (Koehn et al., 2018, 2019), we posed again the challenge of assigning sentence-level quality scores for very noisy corpora of sentence pairs crawled from the web, with the goal of sub-selecting the highest-quality data to be used to train ma-chine translation systems. This year, the task tackled the low resource condition of Pashto\u2013English and Khmer\u2013English and also included the challenge of sentence alignment from document pairs.", "keyphrases": ["wmt", "shared task", "parallel corpus filtering"]} +{"id": "dubey-2005-lexicalization", "title": "What to Do When Lexicalization Fails: Parsing German with Suffix Analysis and Smoothing", "abstract": "In this paper, we present an unlexicalized parser for German which employs smoothing and suffix analysis to achieve a labelled bracket F-score of 76.2, higher than previously reported results on the NEGRA corpus. In addition to the high accuracy of the model, the use of smoothing in an unlexicalized parser allows us to better examine the interplay between smoothing and parsing results.", "keyphrases": ["german", "suffix analysis", "smoothing"]} +{"id": "rosenberg-hirschberg-2007-v", "title": "V-Measure: A Conditional Entropy-Based External Cluster Evaluation Measure", "abstract": "We present V-measure, an external entropybased cluster evaluation measure. Vmeasure provides an elegant solution to many problems that affect previously defined cluster evaluation measures including 1) dependence on clustering algorithm or data set, 2) the \u201cproblem of matching\u201d, where the clustering of only a portion of data points are evaluated and 3) accurate evaluation and combination of two desirable aspects of clustering, homogeneity and completeness. We compare V-measure to a number of popular cluster evaluation measures and demonstrate that it satisfies several desirable properties of clustering solutions, using simulated clustering results. Finally, we use V-measure to evaluate two clustering tasks: document clustering and pitch accent type clustering.", "keyphrases": ["evaluation measure", "clustering solution", "v-measure"]} +{"id": "mcdonald-etal-2005-non", "title": "Non-Projective Dependency Parsing using Spanning Tree Algorithms", "abstract": "We formalize weighted dependency parsing as searching for maximum spanning trees (MSTs) in directed graphs. Using this representation, the parsing algorithm of Eisner (1996) is sufficient for searching over all projective trees in O(n3) time. More surprisingly, the representation is extended naturally to non-projective parsing using Chu-Liu-Edmonds (Chu and Liu, 1965; Edmonds, 1967) MST algorithm, yielding an O(n2) parsing algorithm. We evaluate these methods on the Prague Dependency Treebank using online large-margin learning techniques (Crammer et al., 2003; McDonald et al., 2005) and show that MST parsing increases efficiency and accuracy for languages with non-projective dependencies.", "keyphrases": ["maximum spanning tree", "non-projective dependency parsing", "mstparser", "graph-based parser", "graph-based approach"]} +{"id": "heafield-2011-kenlm", "title": "KenLM: Faster and Smaller Language Model Queries", "abstract": "We present KenLM, a library that implements two data structures for efficient language model queries, reducing both time and memory costs. The Probing data structure uses linear probing hash tables and is designed for speed. Compared with the widely-used SRILM, our Probing model is 2.4 times as fast while using 57% of the memory. The Trie data structure is a trie with bit-level packing, sorted records, interpolation search, and optional quantization aimed at lower memory consumption. Trie simultaneously uses less memory than the smallest lossless baseline and less CPU than the fastest baseline. Our code is open-source, thread-safe, and integrated into the Moses, cdec, and Joshua translation systems. This paper describes the several performance techniques used and presents benchmarks against alternative implementations.", "keyphrases": ["language model", "kenlm", "storage", "sentence pair", "qe-clean system"]} +{"id": "mikolov-etal-2018-advances", "title": "Advances in Pre-Training Distributed Word Representations", "abstract": "Many Natural Language Processing applications nowadays rely on pre-trained word representations estimated from large text corpora such as news collections, Wikipedia and Web Crawl. In this paper, we show how to train high-quality word vector representations by using a combination of known tricks that are however rarely used together. The main result of our work is the new set of publicly available pre-trained models that outperform the current state of the art by a large margin on a number of tasks.", "keyphrases": ["word representation", "pre-trained model", "fasttext"]} +{"id": "chen-etal-2018-neural-natural", "title": "Neural Natural Language Inference Models Enhanced with External Knowledge", "abstract": "Modeling natural language inference is a very challenging task. With the availability of large annotated data, it has recently become feasible to train complex models such as neural-network-based inference models, which have shown to achieve the state-of-the-art performance. Although there exist relatively large annotated data, can machines learn all knowledge needed to perform natural language inference (NLI) from these data? If not, how can neural-network-based NLI models benefit from external knowledge and how to build NLI models to leverage it? In this paper, we enrich the state-of-the-art neural natural language inference models with external knowledge. We demonstrate that the proposed models improve neural NLI models to achieve the state-of-the-art performance on the SNLI and MultiNLI datasets.", "keyphrases": ["external knowledge", "nli model", "kim", "co-attention", "local inference collection"]} +{"id": "chen-cardie-2018-unsupervised", "title": "Unsupervised Multilingual Word Embeddings", "abstract": "Multilingual Word Embeddings (MWEs) represent words from multiple languages in a single distributional vector space. Unsupervised MWE (UMWE) methods acquire multilingual embeddings without cross-lingual supervision, which is a significant advantage over traditional supervised approaches and opens many new possibilities for low-resource languages. Prior art for learning UMWEs, however, merely relies on a number of independently trained Unsupervised Bilingual Word Embeddings (UBWEs) to obtain multilingual embeddings. These methods fail to leverage the interdependencies that exist among many languages. To address this shortcoming, we propose a fully unsupervised framework for learning MWEs that directly exploits the relations between all language pairs. Our model substantially outperforms previous approaches in the experiments on multilingual word translation and cross-lingual word similarity. In addition, our model even beats supervised approaches trained with cross-lingual resources.", "keyphrases": ["multilingual word embedding", "multiple language", "unsupervised approach"]} +{"id": "seroussi-etal-2014-authorship", "title": "Authorship Attribution with Topic Models", "abstract": "Authorship attribution deals with identifying the authors of anonymous texts. Traditionally, research in this field has focused on formal texts, such as essays and novels, but recently more attention has been given to texts generated by on-line users, such as e-mails and blogs. Authorship attribution of such on-line texts is a more challenging task than traditional authorship attribution, because such texts tend to be short, and the number of candidate authors is often larger than in traditional settings. We address this challenge by using topic models to obtain author representations. In addition to exploring novel ways of applying two popular topic models to this task, we test our new model that projects authors and documents to two disjoint topic spaces. Utilizing our model in authorship attribution yields state-of-the-art performance on several data sets, containing either formal texts written by a few authors or informal texts generated by tens to thousands of on-line users. We also present experimental results that demonstrate the applicability of topical author representations to two other problems: inferring the sentiment polarity of texts, and predicting the ratings that users would give to items such as movies.", "keyphrases": ["blog", "authorship attribution", "author-topic model"]} +{"id": "vickrey-koller-2008-sentence", "title": "Sentence Simplification for Semantic Role Labeling", "abstract": "Parse-tree paths are commonly used to incorporate information from syntactic parses into NLP systems. These systems typically treat the pathsas atomic(or nearly atomic)features; these features are quite sparse due to the immense variety of syntactic expression. In this paper, we propose a general method for learning how to iteratively simplify a sentence, thus decomposing complicated syntax into small, easy-to-process pieces. Our method applies a series of hand-written transformation rules corresponding to basic syntactic patterns \u2014 for example, one rule \u201cdepassivizes\u201d a sentence. The model is parameterized by learned weights specifying preferences for some rules over others. After applying all possible transformations to a sentence, we are left with a set of candidate simplified sentences. We apply our simplification system to semantic role labeling (SRL). As we do not have labeled examples of correct simplifications, we use labeled training data for the SRL task to jointly learn both the weights of the simplification model and of an SRL model, treating the simplification as a hidden variable. By extracting and labeling simplified sentences, this combined simplification/SRL system better generalizes across syntactic variation. It achieves a statistically significant 1.2% F1 measure increase over a strong baseline on the Conll2005 SRL task, attaining near-state-of-the-art performance.", "keyphrases": ["semantic role labeling", "path", "text simplification"]} +{"id": "katiyar-cardie-2016-investigating", "title": "Investigating LSTMs for Joint Extraction of Opinion Entities and Relations", "abstract": "We investigate the use of deep bi-directional LSTMs for joint extraction of opinion entities and the IS - FROM and IS - ABOUT relations that connect them \u2014 the \ufb01rst such attempt using a deep learning approach. Perhaps surprisingly, we \ufb01nd that standard LSTMs are not competitive with a state-of-the-art CRF+ILP joint inference approach (Yang and Cardie, 2013) to opinion entities extraction, performing below even the standalone sequence-tagging CRF. Incorporating sentence-level and a novel relation-level optimization, however, allows the LSTM to identify opinion relations and to perform within 1\u2013 3% of the state-of-the-art joint model for opinion entities and the IS - FROM relation; and to perform as well as the state-of-the-art for the IS - ABOUT relation \u2014 all without access to opinion lexicons, parsers and other preprocessing components required for the feature-rich CRF+ILP approach.", "keyphrases": ["joint extraction", "opinion entity", "bidirectional lstm"]} +{"id": "lin-etal-2020-commongen", "title": "CommonGen: A Constrained Text Generation Challenge for Generative Commonsense Reasoning", "abstract": "Recently, large-scale pre-trained language models have demonstrated impressive performance on several commonsense-reasoning benchmark datasets. However, building machines with commonsense to compose realistically plausible sentences remains challenging. In this paper, we present a constrained text generation task, CommonGen associated with a benchmark dataset, to explicitly test machines for the ability of generative commonsense reasoning. Given a set of common concepts (e.g., dog, frisbee, catch, throw); the task is to generate a coherent sentence describing an everyday scenario using these concepts (e.g., \u201ca man throws a frisbee and his dog catches it\u201d). The CommonGen task is challenging because it inherently requires 1) relational reasoning with background commonsense knowledge and 2) compositional generalization ability to work on unseen concept combinations. Our dataset, constructed through a combination of crowdsourced and existing caption corpora, consists of 77k commonsense descriptions over 35k unique concept-sets. Experiments show that there is a large gap between state-of-the-art text generation models (e.g., T5) and human performance (31.6% v.s. 63.5% in SPICE metric). Furthermore, we demonstrate that the learned generative commonsense reasoning capability can be transferred to improve downstream tasks such as CommonsenseQA (76.9% to 78.4 in dev accuracy) by generating additional context.", "keyphrases": ["language model", "commongen", "bart"]} +{"id": "wiegand-klakow-2010-convolution", "title": "Convolution Kernels for Opinion Holder Extraction", "abstract": "Opinion holder extraction is one of the important subtasks in sentiment analysis. The effective detection of an opinion holder depends on the consideration of various cues on various levels of representation, though they are hard to formulate explicitly as features. In this work, we propose to use convolution kernels for that task which identify meaningful fragments of sequences or trees by themselves. We not only investigate how different levels of information can be effectively combined in different kernels but also examine how the scope of these kernels should be chosen. In general relation extraction, the two candidate entities thought to be involved in a relation are commonly chosen to be the boundaries of sequences and trees. The definition of boundaries in opinion holder extraction, however, is less straightforward since there might be several expressions beside the candidate opinion holder to be eligible for being a boundary.", "keyphrases": ["opinion holder extraction", "sentiment analysis", "convolution kernel"]} +{"id": "lewis-etal-2021-paq", "title": "PAQ: 65 Million Probably-Asked Questions and What You Can Do With Them", "abstract": "Open-domain Question Answering models that directly leverage question-answer (QA) pairs, such as closed-book QA (CBQA) models and QA-pair retrievers, show promise in terms of speed and memory compared with conventional models which retrieve and read from text corpora. QA-pair retrievers also offer interpretable answers, a high degree of control, and are trivial to update at test time with new knowledge. However, these models fall short of the accuracy of retrieve-and-read systems, as substantially less knowledge is covered by the available QA-pairs relative to text corpora like Wikipedia. To facilitate improved QA-pair models, we introduce Probably Asked Questions (PAQ), a very large resource of 65M automatically generated QA-pairs. We introduce a new QA-pair retriever, RePAQ, to complement PAQ. We find that PAQ preempts and caches test questions, enabling RePAQ to match the accuracy of recent retrieve-and-read models, whilst being significantly faster. Using PAQ, we train CBQA models which outperform comparable baselines by 5%, but trail RePAQ by over 15%, indicating the effectiveness of explicit retrieval. RePAQ can be configured for size (under 500MB) or speed (over 1K questions per second) while retaining high accuracy. Lastly, we demonstrate RePAQ's strength at selective QA, abstaining from answering when it is likely to be incorrect. This enables RePAQ to \u201cback-off\u201d to a more expensive state-of-the-art model, leading to a combined system which is both more accurate and 2x faster than the state-of-the-art model alone.", "keyphrases": ["memory", "asked questions", "paq"]} +{"id": "angeli-etal-2010-simple", "title": "A Simple Domain-Independent Probabilistic Approach to Generation", "abstract": "We present a simple, robust generation system which performs content selection and surface realization in a unified, domain-independent framework. In our approach, we break up the end-to-end generation process into a sequence of local decisions, arranged hierarchically and each trained discriminatively. We deployed our system in three different domains---Robocup sportscasting, technical weather forecasts, and common weather forecasts, obtaining results comparable to state-of-the-art domain-specific systems both in terms of BLEU scores and human evaluation.", "keyphrases": ["probabilistic approach", "content selection", "surface realization", "decision", "database record"]} +{"id": "weeds-weir-2005-co", "title": "Co-occurrence Retrieval: A Flexible Framework for Lexical Distributional Similarity", "abstract": "Techniques that exploit knowledge of distributional similarity between words have been proposed in many areas of Natural Language Processing. For example, in language modeling, the sparse data problem can be alleviated by estimating the probabilities of unseen co-occurrences of events from the probabilities of seen co-occurrences of similar events. In other applications, distributional similarity is taken to be an approximation to semantic similarity. However, due to the wide range of potential applications and the lack of a strict definition of the concept of distributional similarity, many methods of calculating distributional similarity have been proposed or adopted. In this work, a flexible, parameterized framework for calculating distributional similarity is proposed. Within this framework, the problem of finding distributionally similar words is cast as one of co-occurrence retrieval (CR) for which precision and recall can be measured by analogy with the way they are measured in document retrieval. As will be shown, a number of popular existing measures of distributional similarity are simulated with parameter settings within the CR framework. In this article, the CR framework is then used to systematically investigate three fundamental questions concerning distributional similarity. First, is the relationship of lexical similarity necessarily symmetric, or are there advantages to be gained from considering it as an asymmetric relationship? Second, are some co-occurrences inherently more salient than others in the calculation of distributional similarity? Third, is it necessary to consider the difference in the extent to which each word occurs in each co-occurrence type? Two application-based tasks are used for evaluation: automatic thesaurus generation and pseudo-disambiguation. It is possible to achieve significantly better results on both these tasks by varying the parameters within the CR framework rather than using other existing distributional similarity measures; it will also be shown that any single unparameterized measure is unlikely to be able to do better on both tasks. This is due to an inherent asymmetry in lexical substitutability and therefore also in lexical distributional similarity.", "keyphrases": ["flexible framework", "lexical distributional similarity", "co-occurrence retrieval", "well result"]} +{"id": "cui-etal-2017-attention", "title": "Attention-over-Attention Neural Networks for Reading Comprehension", "abstract": "Cloze-style reading comprehension is a representative problem in mining relationship between document and query. In this paper, we present a simple but novel model called attention-over-attention reader for better solving cloze-style reading comprehension task. The proposed model aims to place another attention mechanism over the document-level attention and induces \u201cattended attention\u201d for final answer predictions. One advantage of our model is that it is simpler than related works while giving excellent performance. In addition to the primary model, we also propose an N-best re-ranking strategy to double check the validity of the candidates and further improve the performance. Experimental results show that the proposed methods significantly outperform various state-of-the-art systems by a large margin in public datasets, such as CNN and Children's Book Test.", "keyphrases": ["reading comprehension", "reader", "two-way attention mechanism", "neural network model"]} +{"id": "dutta-weikum-2015-cross", "title": "Cross-Document Co-Reference Resolution using Sample-Based Clustering with Knowledge Enrichment", "abstract": "Identifying and linking named entities across information sources is the basis of knowledge acquisition and at the heart of Web search, recommendations, and analytics. An important problem in this context is cross-document co-reference resolution (CCR): computing equivalence classes of textual mentions denoting the same entity, within and across documents. Prior methods employ ranking, clustering, or probabilistic graphical models using syntactic features and distant features from knowledge bases. However, these methods exhibit limitations regarding run-time and robustness. This paper presents the CROCS framework for unsupervised CCR, improving the state of the art in two ways. First, we extend the way knowledge bases are harnessed, by constructing a notion of semantic summaries for intra-document co-reference chains using co-occurring entity mentions belonging to different chains. Second, we reduce the computational cost by a new algorithm that embeds sample-based bisection, using spectral clustering or graph partitioning, in a hierarchical clustering process. This allows scaling up CCR to large corpora. Experiments with three datasets show significant gains in output quality, compared to the best prior methods, and the run-time efficiency of CROCS.", "keyphrases": ["co-reference resolution", "clustering", "mention"]} +{"id": "du-etal-2017-learning", "title": "Learning to Ask: Neural Question Generation for Reading Comprehension", "abstract": "We study automatic question generation for sentences from text passages in reading comprehension. We introduce an attention-based sequence learning model for the task and investigate the effect of encoding sentence- vs. paragraph-level information. In contrast to all previous work, our model does not rely on hand-crafted rules or a sophisticated NLP pipeline; it is instead trainable end-to-end via sequence-to-sequence learning. Automatic evaluation results show that our system significantly outperforms the state-of-the-art rule-based system. In human evaluations, questions generated by our system are also rated as being more natural (i.e.,, grammaticality, fluency) and as more difficult to answer (in terms of syntactic and lexical divergence from the original text and reasoning needed to answer).", "keyphrases": ["question generation", "comprehension", "input text"]} +{"id": "saha-etal-2021-explagraphs", "title": "ExplaGraphs: An Explanation Graph Generation Task for Structured Commonsense Reasoning", "abstract": "Recent commonsense-reasoning tasks are typically discriminative in nature, where a model answers a multiple-choice question for a certain context. Discriminative tasks are limiting because they fail to adequately evaluate the model's ability to reason and explain predictions with underlying commonsense knowledge. They also allow such models to use reasoning shortcuts and not be \u201cright for the right reasons\u201d. In this work, we present ExplaGraphs, a new generative and structured commonsense-reasoning task (and an associated dataset) of explanation graph generation for stance prediction. Specifically, given a belief and an argument, a model has to predict if the argument supports or counters the belief and also generate a commonsense-augmented graph that serves as non-trivial, complete, and unambiguous explanation for the predicted stance. We collect explanation graphs through a novel Create-Verify-And-Refine graph collection framework that improves the graph quality (up to 90%) via multiple rounds of verification and refinement. A significant 79% of our graphs contain external commonsense nodes with diverse structures and reasoning depths. Next, we propose a multi-level evaluation framework, consisting of automatic metrics and human evaluation, that check for the structural and semantic correctness of the generated graphs and their degree of match with ground-truth graphs. Finally, we present several structured, commonsense-augmented, and text generation models as strong starting points for this explanation graph generation task, and observe that there is a large gap with human performance, thereby encouraging future work for this new challenging task.", "keyphrases": ["explanation graph", "semantic correctness", "explagraphs"]} +{"id": "iter-etal-2018-automatic", "title": "Automatic Detection of Incoherent Speech for Diagnosing Schizophrenia", "abstract": "Schizophrenia is a mental disorder which afflicts an estimated 0.7% of adults world wide. It affects many areas of mental function, often evident from incoherent speech. Diagnosing schizophrenia relies on subjective judgments resulting in disagreements even among trained clinicians. Recent studies have proposed the use of natural language processing for diagnosis by drawing on automatically-extracted linguistic features like discourse coherence and lexicon. Here, we present the first benchmark comparison of previously proposed coherence models for detecting symptoms of schizophrenia and evaluate their performance on a new dataset of recorded interviews between subjects and clinicians. We also present two alternative coherence metrics based on modern sentence embedding techniques that outperform the previous methods on our dataset. Lastly, we propose a novel computational model for reference incoherence based on ambiguous pronoun usage and show that it is a highly predictive feature on our data. While the number of subjects is limited in this pilot study, our results suggest new directions for diagnosing common symptoms of schizophrenia.", "keyphrases": ["incoherent speech", "schizophrenia", "linguistic feature"]} +{"id": "finkel-etal-2005-incorporating", "title": "Incorporating Non-local Information into Information Extraction Systems by Gibbs Sampling", "abstract": "Most current statistical natural language processing models use only local features so as to permit dynamic programming in inference, but this makes them unable to fully account for the long distance structure that is prevalent in language use. We show how to solve this dilemma with Gibbs sampling, a simple Monte Carlo method used to perform approximate inference in factored probabilistic models. By using simulated annealing in place of Viterbi decoding in sequence models such as HMMs, CMMs, and CRFs, it is possible to incorporate non-local structure while preserving tractable inference. We use this technique to augment an existing CRF-based information extraction system with long-distance dependency models, enforcing label consistency and extraction template consistency constraints. This technique results in an error reduction of up to 9% over state-of-the-art systems on two established information extraction tasks.", "keyphrases": ["non-local information", "information extraction", "gibbs sampling", "stanford ner", "coarse-grained type"]} +{"id": "espinosa-etal-2008-hypertagging", "title": "Hypertagging: Supertagging for Surface Realization with CCG", "abstract": "In lexicalized grammatical formalisms, it is possible to separate lexical category assignment from the combinatory processes that make use of such categories, such as parsing and realization. We adapt techniques from supertagging \u2014 a relatively recent technique that performs complex lexical tagging before full parsing (Bangalore and Joshi, 1999; Clark, 2002) \u2014 for chart realization in OpenCCG, an open-source NLP toolkit for CCG. We call this approach hypertagging, as it operates at a level \u201cabove\u201d the syntax, tagging semantic representations with syntactic lexical categories. Our results demonstrate that a hypertagger-informed chart realizer can achieve substantial improvements in realization speed (being approximately twice as fast) with superior realization quality.", "keyphrases": ["supertag", "surface realization", "ccg", "lexical category"]} +{"id": "pantel-pennacchiotti-2006-espresso", "title": "Espresso: Leveraging Generic Patterns for Automatically Harvesting Semantic Relations", "abstract": "In this paper, we present Espresso, a weakly-supervised, general-purpose, and accurate algorithm for harvesting semantic relations. The main contributions are: i) a method for exploiting generic patterns by filtering incorrect instances using the Web; and ii) a principled measure of pattern and instance reliability enabling the filtering algorithm. We present an empirical comparison of Espresso with various state of the art systems, on different size and genre corpora, on extracting various general and specific relations. Experimental results show that our exploitation of generic patterns substantially increases system recall with small effect on overall precision.", "keyphrases": ["semantic relation", "recall", "espresso", "pattern-based approach", "broad coverage"]} +{"id": "feng-etal-2022-language", "title": "Language-agnostic BERT Sentence Embedding", "abstract": "While BERT is an effective method for learning monolingual sentence embeddings for semantic similarity and embedding based transfer learning BERT based cross-lingual sentence embeddings have yet to be explored. We systematically investigate methods for learning multilingual sentence embeddings by combining the best methods for learning monolingual and cross-lingual representations including: masked language modeling (MLM), translation language modeling (TLM), dual encoder translation ranking, and additive margin softmax. We show that introducing a pre-trained multilingual language model dramatically reduces the amount of parallel training data required to achieve good performance by 80%. Composing the best of these methods produces a model that achieves 83.7% bi-text retrieval accuracy over 112 languages on Tatoeba, well above the 65.5% achieved by LASER, while still performing competitively on monolingual transfer learning benchmarks. Parallel data mined from CommonCrawl using our best model is shown to train competitive NMT models for en-zh and en-de. We publicly release our best multilingual sentence embedding model for 109+ languages at .", "keyphrases": ["sentence embedding", "language model", "language-agnostic bert sentence"]} +{"id": "zhou-kong-2009-global", "title": "Global Learning of Noun Phrase Anaphoricity in Coreference Resolution via Label Propagation", "abstract": "Knowledge of noun phrase anaphoricity might be profitably exploited in coreference resolution to bypass the resolution of non-anaphoric noun phrases. However, it is surprising to notice that recent attempts to incorporate automatically acquired anaphoricity information into coreference resolution have been somewhat disappointing. This paper employs a global learning method in determining the anaphoricity of noun phrases via a label propagation algorithm to improve learning-based coreference resolution. In particular, two kinds of kernels, i.e. the feature-based RBF kernel and the convolution tree kernel, are employed to compute the anaphoricity similarity between two noun phrases. Experiments on the ACE 2003 corpus demonstrate the effectiveness of our method in anaphoricity determination of noun phrases and its application in learning-based coreference resolution.", "keyphrases": ["noun phrase anaphoricity", "coreference resolution", "label propagation algorithm"]} +{"id": "zhao-bethard-2020-berts", "title": "How does BERT's attention change when you fine-tune? An analysis methodology and a case study in negation scope", "abstract": "Large pretrained language models like BERT, after fine-tuning to a downstream task, have achieved high performance on a variety of NLP problems. Yet explaining their decisions is difficult despite recent work probing their internal representations. We propose a procedure and analysis methods that take a hypothesis of how a transformer-based model might encode a linguistic phenomenon, and test the validity of that hypothesis based on a comparison between knowledge-related downstream tasks with downstream control tasks, and measurement of cross-dataset consistency. We apply this methodology to test BERT and RoBERTa on a hypothesis that some attention heads will consistently attend from a word in negation scope to the negation cue. We find that after fine-tuning BERT and RoBERTa on a negation scope task, the average attention head improves its sensitivity to negation and its attention consistency across negation datasets compared to the pre-trained models. However, only the base models (not the large models) improve compared to a control task, indicating there is evidence for a shallow encoding of negation only in the base models.", "keyphrases": ["bert", "methodology", "negation scope", "attention head"]} +{"id": "sun-etal-2009-chinese", "title": "Chinese Semantic Role Labeling with Shallow Parsing", "abstract": "Most existing systems for Chinese Semantic Role Labeling (SRL) make use of full syntactic parses. In this paper, we evaluate SRL methods that take partial parses as inputs. We first extend the study on Chinese shallow parsing presented in (Chen et al., 2006) by raising a set of additional features. On the basis of our shallow parser, we implement SRL systems which cast SRL as the classification of syntactic chunks with IOB2 representation for semantic roles (i.e. semantic chunks). Two labeling strategies are presented: 1) directly tagging semantic chunks in one-stage, and 2) identifying argument boundaries as a chunking task and labeling their semantic types as a classification task. Lor both methods, we present encouraging results, achieving significant improvements over the best reported SRL performance in the literature. Additionally, we put forward a rule-based algorithm to automatically acquire Chinese verb formation, which is empirically shown to enhance SRL.", "keyphrases": ["semantic role", "shallow parsing", "srl", "basis"]} +{"id": "tonelli-pianta-2009-novel", "title": "A novel approach to mapping FrameNet lexical units to WordNet synsets (short paper)", "abstract": "In this paper we present a novel approach to mapping FrameNet lexical units to WordNet synsets in order to automatically enrich the lexical unit set of a given frame. While the mapping approaches proposed in the past mainly rely on the semantic similarity between lexical units in a frame and lemmas in a synset, we exploit the definition of the lexical entries in FrameNet and the WordNet glosses to find the best candidate synset(s) for the mapping. Evaluation results are also reported and discussed.", "keyphrases": ["novel approach", "mapping framenet", "wordnet synset"]} +{"id": "fazly-stevenson-2007-distinguishing", "title": "Distinguishing Subtypes of Multiword Expressions Using Linguistically-Motivated Statistical Measures", "abstract": "We identify several classes of multiword expressions that each require a different encoding in a (computational) lexicon, as well as a different treatment within a computational system. We examine linguistic properties pertaining to the degree of semantic idiosyncrasy of these classes of expressions. Accordingly, we propose statistical measures to quantify each property, and use the measures to automatically distinguish the classes.", "keyphrases": ["statistical measure", "idiom", "syntactic behavior"]} +{"id": "passarotti-etal-2017-lemlat", "title": "The Lemlat 3.0 Package for Morphological Analysis of Latin", "abstract": "This paper introduces the main components of the downloadable package of the 3.0 version of the morphological analyser for Latin Lemlat. The processes of word form analysis and treatment of spelling variation performed by the tool are detailed, as well as the different output formats and the connection of the results with a recently built resource for derivational morphology of Latin. A light evaluation of the tool\u2019s lexical coverage against a diachronic vocabulary of the entire Latin world is also provided.", "keyphrases": ["latin", "morphological analyzer", "lemma"]} +{"id": "conneau-etal-2018-xnli", "title": "XNLI: Evaluating Cross-lingual Sentence Representations", "abstract": "State-of-the-art natural language processing systems rely on supervision in the form of annotated data to learn competent models. These models are generally trained on data in a single language (usually English), and cannot be directly used beyond that language. Since collecting data in every language is not realistic, there has been a growing interest in cross-lingual language understanding (XLU) and low-resource cross-language transfer. In this work, we construct an evaluation set for XLU by extending the development and test sets of the Multi-Genre Natural Language Inference Corpus (MultiNLI) to 14 languages, including low-resource languages such as Swahili and Urdu. We hope that our dataset, dubbed XNLI, will catalyze research in cross-lingual sentence understanding by providing an informative standard evaluation task. In addition, we provide several baselines for multilingual sentence understanding, including two based on machine translation systems, and two that use parallel data to train aligned multilingual bag-of-words and LSTM encoders. We find that XNLI represents a practical and challenging evaluation suite, and that directly translating the test data yields the best performance among available baselines.", "keyphrases": ["natural language inference", "cross-lingual transfer", "nli", "representation learning", "zero-shot"]} +{"id": "nangia-bowman-2019-human", "title": "Human vs. Muppet: A Conservative Estimate of Human Performance on the GLUE Benchmark", "abstract": "The GLUE benchmark (Wang et al., 2019b) is a suite of language understanding tasks which has seen dramatic progress in the past year, with average performance moving from 70.0 at launch to 83.9, state of the art at the time of writing (May 24, 2019). Here, we measure human performance on the benchmark, in order to learn whether significant headroom remains for further progress. We provide a conservative estimate of human performance on the benchmark through crowdsourcing: Our annotators are non-experts who must learn each task from a brief set of instructions and 20 examples. In spite of limited training, these annotators robustly outperform the state of the art on six of the nine GLUE tasks and achieve an average score of 87.1. Given the fast pace of progress however, the headroom we observe is quite limited. To reproduce the data-poor setting that our annotators must learn in, we also train the BERT model (Devlin et al., 2019) in limited-data regimes, and conclude that low-resource sentence classification remains a challenge for modern neural network approaches to text understanding.", "keyphrases": ["human performance", "glue benchmark", "annotator"]} +{"id": "mizukami-etal-2016-analyzing", "title": "Analyzing the Effect of Entrainment on Dialogue Acts", "abstract": "Entrainment is a factor in dialogue that affects not only human-human but also human-machine interaction. While entrainment on the lexical level is well documented, less is known about how entrainment affects dialogue on a more abstract, structural level. In this paper, we investigate the effect of entrainment on dialogue acts and on lexical choice given dialogue acts, as well as how entrainment changes during a dialogue. We also define a novel measure of entrainment to measure these various types of entrainment. These results may serve as guidelines for dialogue systems that would like to entrain with users in a similar manner.", "keyphrases": ["entrainment", "dialogue act", "lexical choice"]} +{"id": "li-etal-2019-semi-supervised-domain", "title": "Semi-supervised Domain Adaptation for Dependency Parsing", "abstract": "During the past decades, due to the lack of sufficient labeled data, most studies on cross-domain parsing focus on unsupervised domain adaptation, assuming there is no target-domain training data. However, unsupervised approaches make limited progress so far due to the intrinsic difficulty of both domain adaptation and parsing. This paper tackles the semi-supervised domain adaptation problem for Chinese dependency parsing, based on two newly-annotated large-scale domain-aware datasets. We propose a simple domain embedding approach to merge the source- and target-domain training data, which is shown to be more effective than both direct corpus concatenation and multi-task learning. In order to utilize unlabeled target-domain data, we employ the recent contextualized word representations and show that a simple fine-tuning procedure can further boost cross-domain parsing accuracy by large margin.", "keyphrases": ["dependency parsing", "semi-supervised domain adaptation", "extra domain"]} +{"id": "eisenstein-barzilay-2008-bayesian", "title": "Bayesian Unsupervised Topic Segmentation", "abstract": "This paper describes a novel Bayesian approach to unsupervised topic segmentation. Unsupervised systems for this task are driven by lexical cohesion: the tendency of well-formed segments to induce a compact and consistent lexical distribution. We show that lexical cohesion can be placed in a Bayesian context by modeling the words in each topic segment as draws from a multinomial language model associated with the segment; maximizing the observation likelihood in such a model yields a lexically-cohesive segmentation. This contrasts with previous approaches, which relied on hand-crafted cohesion metrics. The Bayesian framework provides a principled way to incorporate additional features such as cue phrases, a powerful indicator of discourse structure that has not been previously used in unsupervised segmentation systems. Our model yields consistent improvements over an array of state-of-the-art systems on both text and speech datasets. We also show that both an entropy-based analysis and a well-known previous technique can be derived as special cases of the Bayesian framework.", "keyphrases": ["topic segmentation", "bayesian approach", "lexical cohesion"]} +{"id": "wang-etal-2017-gated", "title": "Gated Self-Matching Networks for Reading Comprehension and Question Answering", "abstract": "In this paper, we present the gated self-matching networks for reading comprehension style question answering, which aims to answer questions from a given passage. We first match the question and passage with gated attention-based recurrent networks to obtain the question-aware passage representation. Then we propose a self-matching attention mechanism to refine the representation by matching the passage against itself, which effectively encodes information from the whole passage. We finally employ the pointer networks to locate the positions of answers from the passages. We conduct extensive experiments on the SQuAD dataset. The single model achieves 71.3% on the evaluation metrics of exact match on the hidden test set, while the ensemble model further boosts the results to 75.9%. At the time of submission of the paper, our model holds the first place on the SQuAD leaderboard for both single and ensemble model.", "keyphrases": ["reading comprehension", "passage", "self-match attention mechanism"]} +{"id": "ghazvininejad-etal-2019-mask", "title": "Mask-Predict: Parallel Decoding of Conditional Masked Language Models", "abstract": "Most machine translation systems generate text autoregressively from left to right. We, instead, use a masked language modeling objective to train a model to predict any subset of the target words, conditioned on both the input text and a partially masked target translation. This approach allows for efficient iterative decoding, where we first predict all of the target words non-autoregressively, and then repeatedly mask out and regenerate the subset of words that the model is least confident about. By applying this strategy for a constant number of iterations, our model improves state-of-the-art performance levels for non-autoregressive and parallel decoding translation models by over 4 BLEU on average. It is also able to reach within about 1 BLEU point of a typical left-to-right transformer model, while decoding significantly faster.", "keyphrases": ["parallel decoding", "language model", "mask-predict", "nat", "neural machine translation"]} +{"id": "ma-etal-2017-detect", "title": "Detect Rumors in Microblog Posts Using Propagation Structure via Kernel Learning", "abstract": "How fake news goes viral via social media? How does its propagation pattern differ from real stories? In this paper, we attempt to address the problem of identifying rumors, i.e., fake information, out of microblog posts based on their propagation structure. We firstly model microblog posts diffusion with propagation trees, which provide valuable clues on how an original message is transmitted and developed over time. We then propose a kernel-based method called Propagation Tree Kernel, which captures high-order patterns differentiating different types of rumors by evaluating the similarities between their propagation tree structures. Experimental results on two real-world datasets demonstrate that the proposed kernel-based approach can detect rumors more quickly and accurately than state-of-the-art rumor detection models.", "keyphrases": ["rumor", "propagation tree", "twitter", "social medium"]} +{"id": "li-etal-2017-modeling", "title": "Modeling Source Syntax for Neural Machine Translation", "abstract": "Even though a linguistics-free sequence to sequence model in neural machine translation (NMT) has certain capability of implicitly learning syntactic information of source sentences, this paper shows that source syntax can be explicitly incorporated into NMT effectively to provide further improvements. Specifically, we linearize parse trees of source sentences to obtain structural label sequences. On the basis, we propose three different sorts of encoders to incorporate source syntax into NMT: 1) Parallel RNN encoder that learns word and label annotation vectors parallelly; 2) Hierarchical RNN encoder that learns word and label annotation vectors in a two-level hierarchy; and 3) Mixed RNN encoder that stitchingly learns word and label annotation vectors over sequences where words and labels are mixed. Experimentation on Chinese-to-English translation demonstrates that all the three proposed syntactic encoders are able to improve translation accuracy. It is interesting to note that the simplest RNN encoder, i.e., Mixed RNN encoder yields the best performance with an significant improvement of 1.4 BLEU points. Moreover, an in-depth analysis from several perspectives is provided to reveal how source syntax benefits NMT.", "keyphrases": ["source syntax", "neural machine translation", "label sequence", "linguistic feature", "simple rnn encoder"]} +{"id": "branavan-etal-2009-reinforcement", "title": "Reinforcement Learning for Mapping Instructions to Actions", "abstract": "In this paper, we present a reinforcement learning approach for mapping natural language instructions to sequences of executable actions. We assume access to a reward function that defines the quality of the executed actions. During training, the learner repeatedly constructs action sequences for a set of documents, executes those actions, and observes the resulting reward. We use a policy gradient algorithm to estimate the parameters of a log-linear model for action selection. We apply our method to interpret instructions in two domains --- Windows troubleshooting guides and game tutorials. Our results demonstrate that this method can rival supervised learning techniques while requiring few or no annotated training examples.", "keyphrases": ["mapping instruction", "action", "reinforcement learning", "environment", "supervision signal"]} +{"id": "williams-etal-2014-finding", "title": "Finding Good Enough: A Task-Based Evaluation of Query Biased Summarization for Cross-Language Information Retrieval", "abstract": "In this paper we present our task-based evaluation of query biased summarization for cross-language information retrieval (CLIR) using relevance prediction. We describe our 13 summarization methods each from one of four summarization strategies. We show how well our methods perform using Farsi text from the CLEF 2008 shared-task, which we translated to English automtatically. We report precision/recall/F1, accuracy and time-on-task. We found that different summarization methods perform optimally for different evaluation metrics, but overall query biased word clouds are the best summarization strategy. In our analysis, we demonstrate that using the ROUGE metric on our sentence-based summaries cannot make the same kinds of distinctions as our evaluation framework does. Finally, we present our recommendations for creating muchneeded evaluation standards and datasets.", "keyphrases": ["task-based evaluation", "cross-language information retrieval", "summarization method"]} +{"id": "ma-etal-2019-domain", "title": "Domain Adaptation with BERT-based Domain Classification and Data Selection", "abstract": "The performance of deep neural models can deteriorate substantially when there is a domain shift between training and test data. For example, the pre-trained BERT model can be easily fine-tuned with just one additional output layer to create a state-of-the-art model for a wide range of tasks. However, the fine-tuned BERT model suffers considerably at zero-shot when applied to a different domain. In this paper, we present a novel two-step domain adaptation framework based on curriculum learning and domain-discriminative data selection. The domain adaptation is conducted in a mostly unsupervised manner using a small target domain validation set for hyper-parameter tuning. We tested the framework on four large public datasets with different domain similarities and task types. Our framework outperforms a popular discrepancy-based domain adaptation method on most transfer tasks while consuming only a fraction of the training budget.", "keyphrases": ["data selection", "bert", "target domain", "adversarial learning"]} +{"id": "maekawa-etal-2010-design", "title": "Design, Compilation, and Preliminary Analyses of Balanced Corpus of Contemporary Written Japanese", "abstract": "Compilation of a 100 million words balanced corpus called the Balanced Corpus of Contemporary Written Japanese (or BCCWJ) is underway at the National Institute for Japanese Language and Linguistics. The corpus covers a wide range of text genres including books, magazines, newspapers, governmental white papers, textbooks, minutes of the National Diet, internet text (bulletin board and blogs) and so forth, and when possible, samples are drawn from the rigidly defined statistical populations by means of random sampling. All texts are dually POS-analyzed based upon two different, but mutually related, definitions of \u0091word.\u0092 Currently, more than 90 million words have been sampled and XML annotated with respect to text-structure and lexical and character information. A preliminary linear discriminant analysis of text genres using the data of POS frequencies and sentence length revealed it was possible to classify the text genres with a correct identification rate of 88% as far as the samples of books, newspapers, whitepapers, and internet bulletin boards are concerned. When the samples of blogs were included in this data set, however, the identification rate went down to 68%, suggesting the considerable variance of the blog texts in terms of the textual register and style.", "keyphrases": ["balanced corpus", "contemporary written japanese", "national institute", "linguistics"]} +{"id": "singh-etal-2015-detection", "title": "Detection of Multiword Expressions for Hindi Language using Word Embeddings and WordNet-based Features", "abstract": "Detection of Multiword Expressions (MWEs) is a challenging problem faced by several natural language processing applications. The difficulty emanates from the task of detecting MWEs with respect to a given context. In this paper, we propose approaches that use Word Embeddings and WordNet-based features for the detection of MWEs for Hindi language. These approaches are restricted to two types of MWEs viz., noun compounds and noun+verb compounds. The results obtained indicate that using linguistic information from a rich lexical resource such as WordNet, help in improving the accuracy of MWEs detection. It also demonstrates that the linguistic information which word embeddings capture from a corpus can be comparable to that provided by WordNet. Thus, we can say that, for the detection of above mentioned MWEs, word embeddings can be a reasonable alternative to WordNet, especially for those languages whose WordNets does not have a better coverage.", "keyphrases": ["hindi language", "word embeddings", "noun+verb compound"]} +{"id": "yimam-etal-2018-report", "title": "A Report on the Complex Word Identification Shared Task 2018", "abstract": "We report the findings of the second Complex Word Identification (CWI) shared task organized as part of the BEA workshop co-located with NAACL-HLT'2018. The second CWI shared task featured multilingual and multi-genre datasets divided into four tracks: English monolingual, German monolingual, Spanish monolingual, and a multilingual track with a French test set, and two tasks: binary classification and probabilistic classification. A total of 12 teams submitted their results in different task/track combinations and 11 of them wrote system description papers that are referred to in this report and appear in the BEA workshop proceedings.", "keyphrases": ["report", "complex word identification", "complexity", "cwi", "non-native speaker"]} +{"id": "devlin-etal-2014-fast", "title": "Fast and Robust Neural Network Joint Models for Statistical Machine Translation", "abstract": "Recent work has shown success in using neural network language models (NNLMs) as features in MT systems. Here, we present a novel formulation for a neural network joint model (NNJM), which augments the NNLM with a source context window. Our model is purely lexicalized and can be integrated into any MT decoder. We also present several variations of the NNJM which provide significant additive improvements.", "keyphrases": ["joint model", "statistical machine translation", "network language model", "neural network model", "objective function"]} +{"id": "cai-etal-2007-improving", "title": "Improving Word Sense Disambiguation Using Topic Features", "abstract": "This paper presents a novel approach for exploiting the global context for the task of word sense disambiguation (WSD). This is done by using topic features constructed using the latent dirichlet allocation (LDA) algorithm on unlabeled data. The features are", "keyphrases": ["word sense disambiguation", "topic feature", "global context", "latent dirichlet allocation"]} +{"id": "kong-etal-2014-dependency", "title": "A Dependency Parser for Tweets", "abstract": "We describe a new dependency parser for English tweets, TWEEBOPARSER. The parser builds on several contributions: new syntactic annotations for a corpus of tweets (TWEEBANK), with conventions informed by the domain; adaptations to a statistical parsing algorithm; and a new approach to exploiting out-of-domain Penn Treebank data. Our experiments show that the parser achieves over 80% unlabeled attachment accuracy on our new, high-quality test set and measure the benefit of our contributions. Our dataset and parser can be found at http://www.ark.cs.cmu.edu/TweetNLP.", "keyphrases": ["dependency parser", "english tweet", "tweeboparser", "social medium text"]} +{"id": "welleck-etal-2019-dialogue", "title": "Dialogue Natural Language Inference", "abstract": "Consistency is a long standing issue faced by dialogue models. In this paper, we frame the consistency of dialogue agents as natural language inference (NLI) and create a new natural language inference dataset called Dialogue NLI. We propose a method which demonstrates that a model trained on Dialogue NLI can be used to improve the consistency of a dialogue model, and evaluate the method with human evaluation and with automatic metrics on a suite of evaluation sets designed to measure a dialogue model's consistency.", "keyphrases": ["natural language inference", "nli", "dialoguenli"]} +{"id": "liu-etal-2019-tree", "title": "Tree-structured Decoding for Solving Math Word Problems", "abstract": "Automatically solving math word problems is an interesting research topic that needs to bridge natural language descriptions and formal math equations. Previous studies introduced end-to-end neural network methods, but these approaches did not efficiently consider an important characteristic of the equation, i.e., an abstract syntax tree. To address this problem, we propose a tree-structured decoding method that generates the abstract syntax tree of the equation in a top-down manner. In addition, our approach can automatically stop during decoding without a redundant stop token. The experimental results show that our method achieves single model state-of-the-art performance on Math23K, which is the largest dataset on this task.", "keyphrases": ["math word problem", "syntax tree", "top-down manner", "mwp"]} +{"id": "daudert-ahmadi-2019-cofif", "title": "CoFiF: A Corpus of Financial Reports in French Language", "abstract": "In an era when machine learning and artificial intelligence have huge momentum, the data demand to train and test models is steadily growing. We introduce CoFiF, the first corpus comprising company reports in the French language. It contains over 188 million tokens in 2655 reports, covering reference documents, annual, semestrial and trimestrial reports. Our main focus is on the 60 largest French companies listed in France\u2019s main stock indices CAC40 and CAC Next 20. The corpus spans over 20 years, ranging from 1995 to 2018. To evaluate this novel collection of organizational writing, we use CoFiF to generate two character-level language models, a forward and a backward one, which we use to demonstrate the corpus potential on business, economics, and management research in the French language. The corpus is accessible on Github 1.", "keyphrases": ["french language", "trimestrial report", "cofif"]} +{"id": "yin-etal-2019-benchmarking", "title": "Benchmarking Zero-shot Text Classification: Datasets, Evaluation and Entailment Approach", "abstract": "Zero-shot text classification (0Shot-TC) is a challenging NLU problem to which little attention has been paid by the research community. 0Shot-TC aims to associate an appropriate label with a piece of text, irrespective of the text domain and the aspect (e.g., topic, emotion, event, etc.) described by the label. And there are only a few articles studying 0Shot-TC, all focusing only on topical categorization which, we argue, is just the tip of the iceberg in 0Shot-TC. In addition, the chaotic experiments in literature make no uniform comparison, which blurs the progress. This work benchmarks the 0Shot-TC problem by providing unified datasets, standardized evaluations, and state-of-the-art baselines. Our contributions include: i) The datasets we provide facilitate studying 0Shot-TC relative to conceptually different and diverse aspects: the \u201ctopic\u201d aspect includes \u201csports\u201d and \u201cpolitics\u201d as labels; the \u201cemotion\u201d aspect includes \u201cjoy\u201d and \u201canger\u201d; the \u201csituation\u201d aspect includes \u201cmedical assistance\u201d and \u201cwater shortage\u201d. ii) We extend the existing evaluation setup (label-partially-unseen) \u2013 given a dataset, train on some labels, test on all labels \u2013 to include a more challenging yet realistic evaluation label-fully-unseen 0Shot-TC (Chang et al., 2008), aiming at classifying text snippets without seeing task specific training data at all. iii) We unify the 0Shot-TC of diverse aspects within a textual entailment formulation and study it this way.", "keyphrases": ["zero-shot text classification", "entailment approach", "natural language inference", "pre-trained nli model"]} +{"id": "miwa-bansal-2016-end", "title": "End-to-End Relation Extraction using LSTMs on Sequences and Tree Structures", "abstract": "We present a novel end-to-end neural model to extract entities and relations between them. Our recurrent neural network based model captures both word sequence and dependency tree substructure information by stacking bidirectional tree-structured LSTM-RNNs on bidirectional sequential LSTM-RNNs. This allows our model to jointly represent both entities and relations with shared parameters in a single model. We further encourage detection of entities during training and use of entity information in relation extraction via entity pretraining and scheduled sampling. Our model improves over the state-of-the-art feature-based model on end-to-end relation extraction, achieving 12.1% and 5.7% relative error reductions in F1-score on ACE2005 and ACE2004, respectively. We also show that our LSTM-RNN based model compares favorably to the state-of-the-art CNN based model (in F1-score) on nominal relation classification (SemEval-2010 Task 8). Finally, we present an extensive ablation analysis of several model components.", "keyphrases": ["dependency tree", "end-to-end relation extraction", "lstm model", "bottom-up", "entity pair"]} +{"id": "kamath-etal-2020-selective", "title": "Selective Question Answering under Domain Shift", "abstract": "To avoid giving wrong answers, question answering (QA) models need to know when to abstain from answering. Moreover, users often ask questions that diverge from the model's training data, making errors more likely and thus abstention more critical. In this work, we propose the setting of selective question answering under domain shift, in which a QA model is tested on a mixture of in-domain and out-of-domain data, and must answer (i.e., not abstain on) as many questions as possible while maintaining high accuracy. Abstention policies based solely on the model's softmax probabilities fare poorly, since models are overconfident on out-of-domain inputs. Instead, we train a calibrator to identify inputs on which the QA model errs, and abstain when it predicts an error is likely. Crucially, the calibrator benefits from observing the model's behavior on out-of-domain data, even if from a different domain than the test data. We combine this method with a SQuAD-trained QA model and evaluate on mixtures of SQuAD and five other QA datasets. Our method answers 56% of questions while maintaining 80% accuracy; in contrast, directly using the model's probabilities only answers 48% at 80% accuracy.", "keyphrases": ["domain shift", "selective question", "out-of-domain setting", "identification", "confidence"]} +{"id": "mrksic-etal-2016-counter", "title": "Counter-fitting Word Vectors to Linguistic Constraints", "abstract": "In this work, we present a novel counter-fitting method which injects antonymy and synonymy constraints into vector space representations in order to improve the vectors' capability for judging semantic similarity. Applying this method to publicly available pre-trained word vectors leads to a new state of the art performance on the SimLex-999 dataset. We also show how the method can be used to tailor the word vector space for the downstream task of dialogue state tracking, resulting in robust improvements across different dialogue domains.", "keyphrases": ["word vector", "downstream task", "ppdb", "post-processor", "distance"]} +{"id": "sutton-mccallum-2005-joint", "title": "Joint Parsing and Semantic Role Labeling", "abstract": "A striking feature of human syntactic processing is that it is context-dependent, that is, it seems to take into account semantic information from the discourse context and world knowledge. In this paper, we attempt to use this insight to bridge the gap between SRL results from gold parses and from automatically-generated parses. To do this, we jointly perform parsing and semantic role labeling, using a probabilistic SRL system to rerank the results of a probabilistic parser. Our current results are negative, because a locally-trained SRL model can return inaccurate probability estimates.", "keyphrases": ["semantic role labeling", "probabilistic srl system", "joint parsing"]} +{"id": "yang-etal-2019-assessing", "title": "Assessing the Ability of Self-Attention Networks to Learn Word Order", "abstract": "Self-attention networks (SAN) have attracted a lot of interests due to their high parallelization and strong performance on a variety of NLP tasks, e.g. machine translation. Due to the lack of recurrence structure such as recurrent neural networks (RNN), SAN is ascribed to be weak at learning positional information of words for sequence modeling. However, neither this speculation has been empirically confirmed, nor explanations for their strong performances on machine translation tasks when \u201clacking positional information\u201d have been explored. To this end, we propose a novel word reordering detection task to quantify how well the word order information learned by SAN and RNN. Specifically, we randomly move one word to another position, and examine whether a trained model can detect both the original and inserted positions. Experimental results reveal that: 1) SAN trained on word reordering detection indeed has difficulty learning the positional information even with the position embedding; and 2) SAN trained on machine translation learns better positional information than its RNN counterpart, in which position embedding plays a critical role. Although recurrence structure make the model more universally-effective on learning word order, learning objectives matter more in the downstream tasks such as machine translation.", "keyphrases": ["self-attention networks", "word order", "downstream task"]} +{"id": "bengtson-roth-2008-understanding", "title": "Understanding the Value of Features for Coreference Resolution", "abstract": "In recent years there has been substantial work on the important problem of coreference resolution, most of which has concentrated on the development of new models and algorithmic techniques. These works often show that complex models improve over a weak pairwise baseline. However, less attention has been given to the importance of selecting strong features to support learning a coreference model. \n \nThis paper describes a rather simple pairwise classification model for coreference resolution, developed with a well-designed set of features. We show that this produces a state-of-the-art system that outperforms systems built with complex models. We suggest that our system can be used as a baseline for the development of more complex models -- which may have less impact when a more robust set of features is used. The paper also presents an ablation study and discusses the relative contributions of various features.", "keyphrases": ["coreference resolution", "complex model", "state-of-the-art system", "mention"]} +{"id": "riaz-girju-2013-toward", "title": "Toward a Better Understanding of Causality between Verbal Events: Extraction and Analysis of the Causal Power of Verb-Verb Associations", "abstract": "The identification of causal relations between verbal events is important for achieving natural language understanding. However, the problem has proven notoriously difficult since it is not clear which types of knowledge are necessary to solve this challenging problem close to human level performance. Instead of employing a large set of features proved useful in other NLP tasks, we split the problem in smaller sub problems. Since verbs play a very important role in causal relations, in this paper we harness, explore, and evaluate the predictive power of causal associations of verb-verb pairs. More specifically, we propose a set of knowledge-rich metrics to learn the likelihood of causal relations between verbs. Employing these metrics, we automatically generate a knowledge base (KBc) which identifies three categories of verb pairs: Strongly Causal, Ambiguous, and Strongly Non-causal. The knowledge base is evaluated empirically. The results show that our metrics perform significantly better than the state-of-the-art on the task of detecting causal verbal events.", "keyphrases": ["causality", "verbal event", "verb-verb pair"]} +{"id": "artetxe-etal-2019-bilingual", "title": "Bilingual Lexicon Induction through Unsupervised Machine Translation", "abstract": "A recent research line has obtained strong results on bilingual lexicon induction by aligning independently trained word embeddings in two languages and using the resulting cross-lingual embeddings to induce word translation pairs through nearest neighbor or related retrieval methods. In this paper, we propose an alternative approach to this problem that builds on the recent work on unsupervised machine translation. This way, instead of directly inducing a bilingual lexicon from cross-lingual embeddings, we use them to build a phrase-table, combine it with a language model, and use the resulting machine translation system to generate a synthetic parallel corpus, from which we extract the bilingual lexicon using statistical word alignment techniques. As such, our method can work with any word embedding and cross-lingual mapping technique, and it does not require any additional resource besides the monolingual corpus used to train the embeddings. When evaluated on the exact same cross-lingual embeddings, our proposed method obtains an average improvement of 6 accuracy points over nearest neighbor and 4 points over CSLS retrieval, establishing a new state-of-the-art in the standard MUSE dataset.", "keyphrases": ["induction", "unsupervised machine translation", "cross-lingual embedding"]} +{"id": "yen-etal-2015-writeahead", "title": "WriteAhead: Mining Grammar Patterns in Corpora for Assisted Writing", "abstract": "This paper describes WriteAhead, a resource-rich, Interactive Writing Environment that provides L2 learners with writing prompts, as well as \u201dget it right\u201d advice, to helps them write fluently and accurately. The method involves automatically analyzing reference and learner corpora, extracting grammar patterns with example phrases, and computing dubious, overused patterns. At run-time, as the user types (or mouses over) a word, the system automatically retrieves and displays grammar patterns and examples, most relevant to the word. The user can opt for patterns from a general corpus, academic corpus, learner corpus, or commonly overused dubious patterns found in a learner corpus. WriteAhead proactively engages the user with steady, timely, and spot-on information for effective assisted writing. Preliminary experiments show that WriteAhead fulfills the design goal of fostering learner independence and encouraging self-editing, and is likely to induce better writing, and improve writing skills in the long run.", "keyphrases": ["grammar pattern", "interactive writing environment", "writeahead", "english sentence", "esl learner"]} +{"id": "chi-etal-2016-geolocation", "title": "Geolocation Prediction in Twitter Using Location Indicative Words and Textual Features", "abstract": "Knowing the location of a social media user and their posts is important for various purposes, such as the recommendation of location-based items/services, and locality detection of crisis/disasters. This paper describes our submission to the shared task \u201cGeolocation Prediction in Twitter\u201d of the 2nd Workshop on Noisy User-generated Text. In this shared task, we propose an algorithm to predict the location of Twitter users and tweets using a multinomial Naive Bayes classifier trained on Location Indicative Words and various textual features (such as city/country names, #hashtags and @mentions). We compared our approach against various baselines based on Location Indicative Words, city/country names, #hashtags and @mentions as individual feature sets, and experimental results show that our approach outperforms these baselines in terms of classification accuracy, mean and median error distance.", "keyphrases": ["location indicative words", "textual feature", "hashtag", "geolocation prediction"]} +{"id": "hartung-frank-2011-exploring", "title": "Exploring Supervised LDA Models for Assigning Attributes to Adjective-Noun Phrases", "abstract": "This paper introduces an attribute selection task as a way to characterize the inherent meaning of property-denoting adjectives in adjective-noun phrases, such as e.g. hot in hot summer denoting the attribute temperature, rather than taste. We formulate this task in a vector space model that represents adjectives and nouns as vectors in a semantic space defined over possible attributes. The vectors incorporate latent semantic information obtained from two variants of LDA topic models. Our LDA models outperform previous approaches on a small set of 10 attributes with considerable gains on sparse representations, which highlights the strong smoothing power of LDA models. For the first time, we extend the attribute selection task to a new data set with more than 200 classes. We observe that large-scale attribute selection is a hard problem, but a subset of attributes performs robustly on the large scale as well. Again, the LDA models outperform the VSM baseline.", "keyphrases": ["lda", "attribute", "adjective-noun phrase"]} +{"id": "sahin-steedman-2018-data", "title": "Data Augmentation via Dependency Tree Morphing for Low-Resource Languages", "abstract": "Neural NLP systems achieve high scores in the presence of sizable training dataset. Lack of such datasets leads to poor system performances in the case low-resource languages. We present two simple text augmentation techniques using dependency trees, inspired from image processing. We \u201ccrop\u201d sentences by removing dependency links, and we \u201crotate\u201d sentences by moving the tree fragments around the root. We apply these techniques to augment the training sets of low-resource languages in Universal Dependencies project. We implement a character-level sequence tagging model and evaluate the augmented datasets on part-of-speech tagging task. We show that crop and rotate provides improvements over the models trained with non-augmented data for majority of the languages, especially for languages with rich case marking systems.", "keyphrases": ["dependency tree morphing", "low-resource language", "data augmentation"]} +{"id": "chen-etal-2010-semafor", "title": "SEMAFOR: Frame Argument Resolution with Log-Linear Models", "abstract": "This paper describes the SEMAFOR system's performance in the SemEval 2010 task on linking events and their participants in discourse. Our entry is based upon SEMAFOR 1.0 (Das et al., 2010a), a frame-semantic probabilistic parser built from log-linear models. The extended system models null instantiations, including non-local argument reference. Performance is evaluated on the task data with and without gold-standard overt arguments. In both settings, it fares the best of the submitted systems with respect to recall and F1.", "keyphrases": ["semafor", "framenet-style parser", "extension"]} +{"id": "davidov-etal-2010-enhanced", "title": "Enhanced Sentiment Learning Using Twitter Hashtags and Smileys", "abstract": "Automated identification of diverse sentiment types can be beneficial for many NLP systems such as review summarization and public media analysis. In some of these systems there is an option of assigning a sentiment value to a single sentence or a very short text. \n \nIn this paper we propose a supervised sentiment classification framework which is based on data from Twitter, a popular microblogging service. By utilizing 50 Twitter tags and 15 smileys as sentiment labels, this framework avoids the need for labor intensive manual annotation, allowing identification and classification of diverse sentiment types of short texts. We evaluate the contribution of different feature types for sentiment classification and show that our framework successfully identifies sentiment types of untagged sentences. The quality of the sentiment identification was also confirmed by human judges. We also explore dependencies and overlap between different sentiment types represented by smileys and Twitter hashtags.", "keyphrases": ["hashtag", "smileys", "emoticon", "tweet label"]} +{"id": "dasgupta-ng-2009-mine", "title": "Mine the Easy, Classify the Hard: A Semi-Supervised Approach to Automatic Sentiment Classification", "abstract": "Supervised polarity classification systems are typically domain-specific. Building these systems involves the expensive process of annotating a large amount of data for each domain. A potential solution to this corpus annotation bottleneck is to build unsupervised polarity classification systems. However, unsupervised learning of polarity is difficult, owing in part to the prevalence of sentimentally ambiguous reviews, where reviewers discuss both the positive and negative aspects of a product. To address this problem, we propose a semi-supervised approach to sentiment classification where we first mine the unambiguous reviews using spectral techniques and then exploit them to classify the ambiguous reviews via a novel combination of active learning, transductive learning, and ensemble learning.", "keyphrases": ["semi-supervised approach", "sentiment classification", "review", "active learning"]} +{"id": "kim-linzen-2020-cogs", "title": "COGS: A Compositional Generalization Challenge Based on Semantic Interpretation", "abstract": "Natural language is characterized by compositionality: the meaning of a complex expression is constructed from the meanings of its constituent parts. To facilitate the evaluation of the compositional abilities of language processing architectures, we introduce COGS, a semantic parsing dataset based on a fragment of English. The evaluation portion of COGS contains multiple systematic gaps that can only be addressed by compositional generalization; these include new combinations of familiar syntactic structures, or new combinations of familiar words and familiar structures. In experiments with Transformers and LSTMs, we found that in-distribution accuracy on the COGS test set was near-perfect (96\u201399%), but generalization accuracy was substantially lower (16\u201335%) and showed high sensitivity to random seed (+-6\u20138%). These findings indicate that contemporary standard NLP models are limited in their compositional generalization capacity, and position COGS as a good way to measure progress.", "keyphrases": ["generalization", "fragment", "systematic gap", "cogs", "semantic parser"]} +{"id": "guu-etal-2017-language", "title": "From Language to Programs: Bridging Reinforcement Learning and Maximum Marginal Likelihood", "abstract": "Our goal is to learn a semantic parser that maps natural language utterances into executable programs when only indirect supervision is available: examples are labeled with the correct execution result, but not the program itself. Consequently, we must search the space of programs for those that output the correct result, while not being misled by spurious programs: incorrect programs that coincidentally output the correct result. We connect two common learning paradigms, reinforcement learning (RL) and maximum marginal likelihood (MML), and then present a new learning algorithm that combines the strengths of both. The new algorithm guards against spurious programs by combining the systematic search traditionally employed in MML with the randomized exploration of RL, and by updating parameters such that probability is spread more evenly across consistent programs. We apply our learning algorithm to a new neural semantic parser and show significant gains over existing state-of-the-art results on a recent context-dependent semantic parsing task.", "keyphrases": ["program", "reinforcement learning", "maximum marginal likelihood", "strength", "exploration"]} +{"id": "miceli-barone-etal-2017-regularization", "title": "Regularization techniques for fine-tuning in neural machine translation", "abstract": "We investigate techniques for supervised domain adaptation for neural machine translation where an existing model trained on a large out-of-domain dataset is adapted to a small in-domain dataset. In this scenario, overfitting is a major challenge. We investigate a number of techniques to reduce overfitting and improve transfer learning, including regularization techniques such as dropout and L2-regularization towards an out-of-domain prior. In addition, we introduce tuneout, a novel regularization technique inspired by dropout. We apply these techniques, alone and in combination, to neural machine translation, obtaining improvements on IWSLT datasets for English\u2192German and English\u2192Russian. We also investigate the amounts of in-domain training data needed for domain adaptation in NMT, and find a logarithmic relationship between the amount of training data and gain in BLEU score.", "keyphrases": ["machine translation", "dropout", "regularization technique", "continued training", "miceli"]} +{"id": "gao-etal-2018-neural-approaches", "title": "Neural Approaches to Conversational AI", "abstract": "This tutorial surveys neural approaches to conversational AI that were developed in the last few years. We group conversational systems into three categories: (1) question answering agents, (2) task-oriented dialogue agents, and (3) social bots. For each category, we present a review of state-of-the-art neural approaches, draw the connection between neural approaches and traditional symbolic approaches, and discuss the progress we have made and challenges we are facing, using specific systems and models as case studies.", "keyphrases": ["agent", "dialogue system", "neural approach", "language understanding"]} +{"id": "zhou-rush-2019-simple", "title": "Simple Unsupervised Summarization by Contextual Matching", "abstract": "We propose an unsupervised method for sentence summarization using only language modeling. The approach employs two language models, one that is generic (i.e. pretrained), and the other that is specific to the target domain. We show that by using a product-of-experts criteria these are enough for maintaining continuous contextual matching while maintaining output fluency. Experiments on both abstractive and extractive sentence summarization data sets show promising results of our method without being exposed to any paired data.", "keyphrases": ["summarization", "contextual matching", "language model", "fluency", "beam search"]} +{"id": "zoph-etal-2016-simple", "title": "Simple, Fast Noise-Contrastive Estimation for Large RNN Vocabularies", "abstract": "We present a simple algorithm to efficiently train language models with noise-contrastive estimation (NCE) on graphics processing units (GPUs). Our NCE-trained language models achieve significantly lower perplexity on the One Billion Word Benchmark language modeling challenge, and contain one sixth of the parameters in the best single model in Chelba et al. (2013). When incorporated into a strong Arabic-English machine translation system they give a strong boost in translation quality. We release a toolkit so that others may also train large-scale, large vocabulary LSTM language models with NCE, parallelizing computation across multiple GPUs.", "keyphrases": ["noise-contrastive estimation", "estimation", "nce"]} +{"id": "belinkov-glass-2019-analysis", "title": "Analysis Methods in Neural Language Processing: A Survey", "abstract": "The field of natural language processing has seen impressive progress in recent years, with neural network models replacing many of the traditional systems. A plethora of new models have been proposed, many of which are thought to be opaque compared to their feature-rich counterparts. This has led researchers to analyze, interpret, and evaluate neural networks in novel and more fine-grained ways. In this survey paper, we review analysis methods in neural language processing, categorize them according to prominent research trends, highlight existing limitations, and point to potential directions for future work.", "keyphrases": ["neural language processing", "survey", "interpretability", "area", "input token"]} +{"id": "agirre-etal-2009-use", "title": "Use of Rich Linguistic Information to Translate Prepositions and Grammar Cases to Basque", "abstract": "This paper presents three successful techniques to translate prepositions heading verbal complements by means of rich linguistic information, in the context of a rule-based Machine Translation system for an agglutinative language with scarce resources. This information comes in the form of lexicalized syntactic dependency triples, verb subcategorization and manually coded selection rules based on lexical, syntactic and semantic information. The first two resources have been automatically extracted from monolingual corpora. The results obtained using a new evaluation methodology show that all proposed techniques improve precision over the baselines, including a translation dictionary compiled from an aligned corpus, and a state-of-the-art statistical Machine Translation system. The results also show that linguistic information in all three techniques are complementary, and that a combination of them obtains the best F-score results overall.", "keyphrases": ["rich linguistic information", "preposition", "grammatical case"]} +{"id": "da-san-martino-etal-2020-prta", "title": "Prta: A System to Support the Analysis of Propaganda Techniques in the News", "abstract": "Recent events, such as the 2016 US Presidential Campaign, Brexit and the COVID-19 \u201cinfodemic\u201d, have brought into the spotlight the dangers of online disinformation. There has been a lot of research focusing on fact-checking and disinformation detection. However, little attention has been paid to the specific rhetorical and psychological techniques used to convey propaganda messages. Revealing the use of such techniques can help promote media literacy and critical thinking, and eventually contribute to limiting the impact of \u201cfake news\u201d and disinformation campaigns. Prta (Propaganda Persuasion Techniques Analyzer) allows users to explore the articles crawled on a regular basis by highlighting the spans in which propaganda techniques occur and to compare them on the basis of their use of propaganda techniques. The system further reports statistics about the use of such techniques, overall and over time, or according to filtering criteria specified by the user based on time interval, keywords, and/or political orientation of the media. Moreover, it allows users to analyze any text or URL through a dedicated interface or via an API. The system is available online: .", "keyphrases": ["propaganda technique", "news", "prta"]} +{"id": "bohra-etal-2018-dataset", "title": "A Dataset of Hindi-English Code-Mixed Social Media Text for Hate Speech Detection", "abstract": "Hate speech detection in social media texts is an important Natural language Processing task, which has several crucial applications like sentiment analysis, investigating cyberbullying and examining socio-political controversies. While relevant research has been done independently on code-mixed social media texts and hate speech detection, our work is the first attempt in detecting hate speech in Hindi-English code-mixed social media text. In this paper, we analyze the problem of hate speech detection in code-mixed texts and present a Hindi-English code-mixed dataset consisting of tweets posted online on Twitter. The tweets are annotated with the language at word level and the class they belong to (Hate Speech or Normal Speech). We also propose a supervised classification system for detecting hate speech in the text using various character level, word level, and lexicon based features.", "keyphrases": ["hate speech detection", "code-mixed text", "twitter", "social medium text"]} +{"id": "mihalcea-strapparava-2009-lie", "title": "The Lie Detector: Explorations in the Automatic Recognition of Deceptive Language", "abstract": "In this paper, we present initial experiments in the recognition of deceptive language. We introduce three data sets of true and lying texts collected for this purpose, and we show that automatic classification is a viable technique to distinguish between truth and falsehood as expressed in language. We also introduce a method for class-based feature analysis, which sheds some light on the features that are characteristic for deceptive text.", "keyphrases": ["lie detector", "deceptive language", "death penalty", "fake news"]} +{"id": "gauthier-etal-2020-syntaxgym", "title": "SyntaxGym: An Online Platform for Targeted Evaluation of Language Models", "abstract": "Targeted syntactic evaluations have yielded insights into the generalizations learned by neural network language models. However, this line of research requires an uncommon confluence of skills: both the theoretical knowledge needed to design controlled psycholinguistic experiments, and the technical proficiency needed to train and deploy large-scale language models. We present SyntaxGym, an online platform designed to make targeted evaluations accessible to both experts in NLP and linguistics, reproducible across computing environments, and standardized following the norms of psycholinguistic experimental design. This paper releases two tools of independent value for the computational linguistics community: 1. A website, syntaxgym.org, which centralizes the process of targeted syntactic evaluation and provides easy tools for analysis and visualization; 2. Two command-line tools, `syntaxgym` and `lm-zoo`, which allow any user to reproduce targeted syntactic evaluations and general language model inference on their own machine.", "keyphrases": ["online platform", "targeted evaluation", "syntaxgym"]} +{"id": "snow-etal-2006-semantic", "title": "Semantic Taxonomy Induction from Heterogenous Evidence", "abstract": "We propose a novel algorithm for inducing semantic taxonomies. Previous algorithms for taxonomy induction have typically focused on independent classifiers for discovering new single relationships based on hand-constructed or automatically discovered textual patterns. By contrast, our algorithm flexibly incorporates evidence from multiple classifiers over heterogenous relationships to optimize the entire structure of the taxonomy, using knowledge of a word's coordinate terms to help in determining its hypernyms, and vice versa. We apply our algorithm on the problem of sense-disambiguated noun hyponym acquisition, where we combine the predictions of hypernym and coordinate term classifiers with the knowledge in a preexisting semantic taxonomy (WordNet 2.1). We add 10,000 novel synsets to WordNet 2.1 at 84% precision, a relative error reduction of 70% over a non-joint algorithm using the same component classifiers. Finally, we show that a taxonomy built using our algorithm shows a 23% relative F-score improvement over WordNet 2.1 on an independent testset of hypernym pairs.", "keyphrases": ["taxonomy induction", "hypernym", "dependency path", "high precision"]} +{"id": "liu-gildea-2010-semantic", "title": "Semantic Role Features for Machine Translation", "abstract": "We propose semantic role features for a Tree-to-String transducer to model the reordering/deletion of source-side semantic roles. These semantic features, as well as the Tree-to-String templates, are trained based on a conditional log-linear model and are shown to significantly outperform systems trained based on Max-Likelihood and EM. We also show significant improvement in sentence fluency by using the semantic role features in the log-linear model, based on manual evaluation.", "keyphrases": ["machine translation", "deletion", "semantic role", "srl", "smt system"]} +{"id": "naderi-hirst-2017-classifying", "title": "Classifying Frames at the Sentence Level in News Articles", "abstract": "Previous approaches to generic frame classification analyze frames at the document level. Here, we propose a supervised based approach based on deep neural networks and distributional representations for classifying frames at the sentence level in news articles. We conduct our experiments on the publicly available Media Frames Corpus compiled from the U.S. Newspapers. Using (B)LSTMs and GRU networks to represent the meaning of frames, we demonstrate that our approach yields at least 14-point improvement over several baseline methods.", "keyphrases": ["frame", "sentence level", "recurrent neural network"]} +{"id": "allaway-mckeown-2020-zero", "title": "Zero-Shot Stance Detection: A Dataset and Model using Generalized Topic Representations", "abstract": "Stance detection is an important component of understanding hidden influences in everyday life. Since there are thousands of potential topics to take a stance on, most with little to no training data, we focus on zero-shot stance detection: classifying stance from no training examples. In this paper, we present a new dataset for zero-shot stance detection that captures a wider range of topics and lexical variation than in previous datasets. Additionally, we propose a new model for stance detection that implicitly captures relationships between topics using generalized topic representations and show that this model improves performance on a number of challenging linguistic phenomena.", "keyphrases": ["generalized topic representation", "zero-shot stance detection", "linguistic phenomenon"]} +{"id": "kauchak-barzilay-2006-paraphrasing", "title": "Paraphrasing for Automatic Evaluation", "abstract": "This paper studies the impact of paraphrases on the accuracy of automatic evaluation. Given a reference sentence and a machine-generated sentence, we seek to find a paraphrase of the reference sentence that is closer in wording to the machine output than the original reference. We apply our paraphrasing method in the context of machine translation evaluation. Our experiments show that the use of a paraphrased synthetic reference refines the accuracy of automatic evaluation. We also found a strong connection between the quality of automatic paraphrases as judged by humans and their contribution to automatic evaluation.", "keyphrases": ["automatic evaluation", "reference", "paraphrasing"]} +{"id": "coster-kauchak-2011-learning", "title": "Learning to Simplify Sentences Using Wikipedia", "abstract": "In this paper we examine the sentence simplification problem as an English-to-English translation problem, utilizing a corpus of 137K aligned sentence pairs extracted by aligning English Wikipedia and Simple English Wikipedia. This data set contains the full range of transformation operations including rewording, reordering, insertion and deletion. We introduce a new translation model for text simplification that extends a phrase-based machine translation approach to include phrasal deletion. Evaluated based on three metrics that compare against a human reference (BLEU, word-F1 and SSA) our new approach performs significantly better than two text compression techniques (including T3) and the phrase-based translation system without deletion.", "keyphrases": ["wikipedia", "simplification", "machine translation", "change", "giza++"]} +{"id": "hajic-etal-2009-conll", "title": "The CoNLL-2009 Shared Task: Syntactic and Semantic Dependencies in Multiple Languages", "abstract": "For the 11th straight year, the Conference on Computational Natural Language Learning has been accompanied by a shared task whose purpose is to promote natural language processing applications and evaluate them in a standard setting. In 2009, the shared task was dedicated to the joint parsing of syntactic and semantic dependencies in multiple languages. This shared task combines the shared tasks of the previous five years under a unique dependency-based formalism similar to the 2008 task. In this paper, we define the shared task, describe how the data sets were created and show their quantitative properties, report the results and summarize the approaches of the participating systems.", "keyphrases": ["semantic dependency", "multiple language", "conll", "joint learning"]} +{"id": "johannsen-etal-2015-cross", "title": "Cross-lingual syntactic variation over age and gender", "abstract": "Most computational sociolinguistics studies have focused on phonological and lexical variation. We present the first large-scale study of syntactic variation among demographic groups (age and gender) across several languages. We harvest data from online user-review sites and parse it with universal dependencies. We show that several age and gender-specific variations hold across languages, for example that women are more likely to use VP conjunctions.", "keyphrases": ["syntactic variation", "gender", "large-scale study"]} +{"id": "de-saeger-etal-2008-looking", "title": "Looking for Trouble", "abstract": "This paper presents a method for mining potential troubles or obstacles related to the use of a given object. Some example instances of this relation are (medicine, side effect) and (amusement park, height restriction). Our acquisition method consists of three steps. First, we use an un-supervised method to collect training samples from Web documents. Second, a set of expressions generally referring to troubles is acquired by a supervised learning method. Finally, the acquired troubles are associated with objects so that each of the resulting pairs consists of an object and a trouble or obstacle in using that object. To show the effectiveness of our method we conducted experiments using a large collection of Japanese Web documents for acquisition. Experimental results show an 85.5% precision for the top 10,000 acquired troubles, and a 74% precision for the top 10% of over 60,000 acquired object-trouble pairs.", "keyphrases": ["trouble", "obstacle", "web"]} +{"id": "ferraro-etal-2015-survey", "title": "A Survey of Current Datasets for Vision and Language Research", "abstract": "Integrating vision and language has long been a dream in work on artificial intelligence (AI). In the past two years, we have witnessed an explosion of work that brings together vision and language from images to videos and beyond. The available corpora have played a crucial role in advancing this area of research. In this paper, we propose a set of quality metrics for evaluating and analyzing the vision & language datasets and categorize them accordingly. Our analyses show that the most recent datasets have been using more complex language and more abstract concepts, however, there are different strengths and weaknesses in each.", "keyphrases": ["survey", "vision", "language research"]} +{"id": "abdul-mageed-diab-2012-awatif", "title": "AWATIF: A Multi-Genre Corpus for Modern Standard Arabic Subjectivity and Sentiment Analysis", "abstract": "We present AWATIF, a multi-genre corpus of Modern Standard Arabic (MSA) labeled for subjectivity and sentiment analysis (SSA) at the sentence level. The corpus is labeled using both regular as well as crowd sourcing methods under three different conditions with two types of annotation guidelines. We describe the sub-corpora constituting the corpus and provide examples from the various SSA categories. In the process, we present our linguistically-motivated and genre-nuanced annotation guidelines and provide evidence showing their impact on the labeling task.", "keyphrases": ["multi-genre corpus", "modern standard arabic", "subjectivity", "sentiment analysis"]} +{"id": "koller-kuhlmann-2009-dependency", "title": "Dependency Trees and the Strong Generative Capacity of CCG", "abstract": "We propose a novel algorithm for extracting dependencies from the derivations of a large fragment of CCG. Unlike earlier proposals, our dependency structures are always tree-shaped. We then use these dependency trees to compare the strong generative capacities of CCG and TAG and obtain surprising results: Both formalisms generate the same languages of derivation trees --- but the mechanisms they use to bring the words in these trees into a linear order are incomparable.", "keyphrases": ["strong generative capacity", "ccg", "derivation"]} +{"id": "scarlini-etal-2019-just", "title": "Just \u201cOneSeC\u201d for Producing Multilingual Sense-Annotated Data", "abstract": "The well-known problem of knowledge acquisition is one of the biggest issues in Word Sense Disambiguation (WSD), where annotated data are still scarce in English and almost absent in other languages. In this paper we formulate the assumption of One Sense per Wikipedia Category and present OneSeC, a language-independent method for the automatic extraction of hundreds of thousands of sentences in which a target word is tagged with its meaning. Our automatically-generated data consistently lead a supervised WSD model to state-of-the-art performance when compared with other automatic and semi-automatic methods. Moreover, our approach outperforms its competitors on multilingual and domain-specific settings, where it beats the existing state of the art on all languages and most domains. All the training data are available for research purposes at .", "keyphrases": ["onesec", "multilingual sense-annotated data", "wikipedia category"]} +{"id": "cheng-lapata-2016-neural", "title": "Neural Summarization by Extracting Sentences and Words", "abstract": "Traditional approaches to extractive summarization rely heavily on humanengineered features. In this work we propose a data-driven approach based on neural networks and continuous sentence features. We develop a general framework for single-document summarization composed of a hierarchical document encoder and an attention-based extractor. This architecture allows us to develop different classes of summarization models which can extract sentences or words. We train our models on large scale corpora containing hundreds of thousands of document-summary pairs 1 . Experimental results on two summarization datasets demonstrate that our models obtain results comparable to the state of the art without any access to linguistic annotation.", "keyphrases": ["summarization", "hierarchical document encoder", "extractor", "cnn", "recurrent neural network"]} +{"id": "narayan-etal-2020-stepwise", "title": "Stepwise Extractive Summarization and Planning with Structured Transformers", "abstract": "We propose encoder-centric stepwise models for extractive summarization using structured transformers \u2013 HiBERT and Extended Transformers. We enable stepwise summarization by injecting the previously generated summary into the structured transformer as an auxiliary sub-structure. Our models are not only efficient in modeling the structure of long inputs, but they also do not rely on task-specific redundancy-aware modeling, making them a general purpose extractive content planner for different tasks. When evaluated on CNN/DailyMail extractive summarization, stepwise models achieve state-of-the-art performance in terms of Rouge without any redundancy aware modeling or sentence filtering. This also holds true for Rotowire table-to-text generation, where our models surpass previously reported metrics for content selection, planning and ordering, highlighting the strength of stepwise modeling. Amongst the two structured transformers we test, stepwise Extended Transformers provides the best performance across both datasets and sets a new standard for these challenges.", "keyphrases": ["extractive summarization", "planning", "structured transformer"]} +{"id": "wallace-etal-2019-universal", "title": "Universal Adversarial Triggers for Attacking and Analyzing NLP", "abstract": "Adversarial examples highlight model vulnerabilities and are useful for evaluation and interpretation. We define universal adversarial triggers: input-agnostic sequences of tokens that trigger a model to produce a specific prediction when concatenated to any input from a dataset. We propose a gradient-guided search over tokens which finds short trigger sequences (e.g., one word for classification and four words for language modeling) that successfully trigger the target prediction. For example, triggers cause SNLI entailment accuracy to drop from 89.94% to 0.55%, 72% of \u201cwhy\u201d questions in SQuAD to be answered \u201cto kill american people\u201d, and the GPT-2 language model to spew racist output even when conditioned on non-racial contexts. Furthermore, although the triggers are optimized using white-box access to a specific model, they transfer to other models for all tasks we consider. Finally, since triggers are input-agnostic, they provide an analysis of global model behavior. For instance, they confirm that SNLI models exploit dataset biases and help to diagnose heuristics learned by reading comprehension models.", "keyphrases": ["trigger", "input-agnostic sequence", "universal adversarial trigger", "attack method"]} +{"id": "feng-etal-2021-survey", "title": "A Survey of Data Augmentation Approaches for NLP", "abstract": "Data augmentation has recently seen increased interest in NLP due to more work in low-resource domains, new tasks, and the popularity of large-scale neural networks that require large amounts of training data. Despite this recent upsurge, this area is still relatively underexplored, perhaps due to the challenges posed by the discrete nature of language data. In this paper, we present a comprehensive and unifying survey of data augmentation for NLP by summarizing the literature in a structured manner. We first introduce and motivate data augmentation for NLP, and then discuss major methodologically representative approaches. Next, we highlight techniques that are used for popular NLP applications and tasks. We conclude by outlining current challenges and directions for future research. Overall, our paper aims to clarify the landscape of existing literature in data augmentation for NLP and motivate additional work in this area. We also present a GitHub repository with a paper list that will be continuously updated at https://github.com/styfeng/DataAug4NLP", "keyphrases": ["survey", "data augmentation approaches", "popular nlp application"]} +{"id": "warstadt-etal-2020-blimp", "title": "BLiMP: A Benchmark of Linguistic Minimal Pairs for English", "abstract": "We introduce The Benchmark of Linguistic Minimal Pairs (shortened to BLiMP, or ), a challenge set for evaluating what language models (LMs) know about major grammatical phenomena in English. consists of 67 sub-datasets, each containing 1000 minimal pairs isolating specific contrasts in syntax, morphology, or semantics. The data is automatically generated according to expert-crafted grammars, and aggregate human agreement with the labels is 96.4%. We use it to evaluate n-gram, LSTM, and Transformer (GPT-2 and Transformer-XL) LMs. We find that state-of-the-art models identify morphological contrasts reliably, but they struggle with semantic restrictions on the distribution of quantifiers and negative polarity items and subtle syntactic phenomena such as extraction islands.", "keyphrases": ["linguistic minimal pairs", "language model", "blimp"]} +{"id": "yang-choi-2019-friendsqa", "title": "FriendsQA: Open-Domain Question Answering on TV Show Transcripts", "abstract": "This paper presents FriendsQA, a challenging question answering dataset that contains 1,222 dialogues and 10,610 open-domain questions, to tackle machine comprehension on everyday conversations. Each dialogue, involving multiple speakers, is annotated with several types of questions regarding the dialogue contexts, and the answers are annotated with certain spans in the dialogue. A series of crowdsourcing tasks are conducted to ensure good annotation quality, resulting a high inter-annotator agreement of 81.82%. A comprehensive annotation analytics is provided for a deeper understanding in this dataset. Three state-of-the-art QA systems are experimented, R-Net, QANet, and BERT, and evaluated on this dataset. BERT in particular depicts promising results, an accuracy of 74.2% for answer utterance selection and an F1-score of 64.2% for answer span selection, suggesting that the FriendsQA task is hard yet has a great potential of elevating QA research on multiparty dialogue to another level.", "keyphrases": ["open-domain question", "conversation", "friendsqa"]} +{"id": "raghunathan-etal-2010-multi", "title": "A Multi-Pass Sieve for Coreference Resolution", "abstract": "Most coreference resolution models determine if two mentions are coreferent using a single function over a set of constraints or features. This approach can lead to incorrect decisions as lower precision features often overwhelm the smaller number of high precision ones. To overcome this problem, we propose a simple coreference architecture based on a sieve that applies tiers of deterministic coreference models one at a time from highest to lowest precision. Each tier builds on the previous tier's entity cluster output. Further, our model propagates global information by sharing attributes (e.g., gender and number) across mentions in the same cluster. This cautious sieve guarantees that stronger features are given precedence over weaker ones and that each decision is made using all of the information available at the time. The framework is highly modular: new coreference modules can be plugged in without any change to the other modules. In spite of its simplicity, our approach outperforms many state-of-the-art supervised and unsupervised models on several standard corpora. This suggests that sieve-based approaches could be applied to other NLP tasks.", "keyphrases": ["coreference resolution", "mention", "decision", "cluster", "multi-pass sieve system"]} +{"id": "sheng-etal-2019-woman", "title": "The Woman Worked as a Babysitter: On Biases in Language Generation", "abstract": "We present a systematic study of biases in natural language generation (NLG) by analyzing text generated from prompts that contain mentions of different demographic groups. In this work, we introduce the notion of the regard towards a demographic, use the varying levels of regard towards different demographics as a defining metric for bias in NLG, and analyze the extent to which sentiment scores are a relevant proxy metric for regard. To this end, we collect strategically-generated text from language models and manually annotate the text with both sentiment and regard scores. Additionally, we build an automatic regard classifier through transfer learning, so that we can analyze biases in unseen text. Together, these methods reveal the extent of the biased nature of language model generations. Our analysis provides a study of biases in NLG, bias metrics and correlated human judgments, and empirical evidence on the usefulness of our annotated dataset.", "keyphrases": ["woman", "language generation", "demographic", "gender bias"]} +{"id": "marcu-etal-2006-spmt", "title": "SPMT: Statistical Machine Translation with Syntactified Target Language Phrases", "abstract": "We introduce SPMT, a new class of statistical Translation Models that use Syntactified target language Phrases. The SPMT models outperform a state of the art phrase-based baseline model by 2.64 Bleu points on the NIST 2003 Chinese-English test corpus and 0.28 points on a human-based quality metric that ranks translations on a scale from 1 to 5.", "keyphrases": ["machine translation", "spmt model", "syntax-based model", "synchronous grammar", "span"]} +{"id": "demszky-etal-2020-goemotions", "title": "GoEmotions: A Dataset of Fine-Grained Emotions", "abstract": "Understanding emotion expressed in language has a wide range of applications, from building empathetic chatbots to detecting harmful online behavior. Advancement in this area can be improved using large-scale datasets with a fine-grained typology, adaptable to multiple downstream tasks. We introduce GoEmotions, the largest manually annotated dataset of 58k English Reddit comments, labeled for 27 emotion categories or Neutral. We demonstrate the high quality of the annotations via Principal Preserved Component Analysis. We conduct transfer learning experiments with existing emotion benchmarks to show that our dataset generalizes well to other domains and different emotion taxonomies. Our BERT-based model achieves an average F1-score of .46 across our proposed taxonomy, leaving much room for improvement.", "keyphrases": ["emotion", "reddit comment", "taxonomy"]} +{"id": "cheng-roth-2013-relational", "title": "Relational Inference for Wikification", "abstract": "Wikification, commonly referred to as Disambiguation to Wikipedia (D2W), is the task of identifying concepts and entities in text and disambiguating them into the most specific corresponding Wikipedia pages. Previous approaches to D2W focused on the use of local and global statistics over the given text, Wikipedia articles and its link structures, to evaluate context compatibility among a list of probable candidates. However, these methods fail (often, embarrassingly), when some level of text understanding is needed to support Wikification. In this paper we introduce a novel approach to Wikification by incorporating, along with statistical methods, richer relational analysis of the text. We provide an extensible, efficient and modular Integer Linear Programming (ILP) formulation of Wikification that incorporates the entity-relation inference problem, and show that the ability to identify relations in text helps both candidate generation and ranking Wikipedia titles considerably. Our results show significant improvements in both Wikification and the TAC Entity Linking task.", "keyphrases": ["wikification", "ilp", "mention", "global coherence"]} +{"id": "zwarts-johnson-2011-impact", "title": "The impact of language models and loss functions on repair disfluency detection", "abstract": "Unrehearsed spoken language often contains disfluencies. In order to correctly interpret a spoken utterance, any such disfluencies must be identified and removed or otherwise dealt with. Operating on transcripts of speech which contain disfluencies, we study the effect of language model and loss function on the performance of a linear reranker that rescores the 25-best output of a noisy-channel model. We show that language models trained on large amounts of non-speech data improve performance more than a language model trained on a more modest amount of speech data, and that optimising f-score rather than log loss improves disfluency detection performance. \n \nOur approach uses a log-linear reranker, operating on the top n analyses of a noisy channel model. We use large language models, introduce new features into this reranker and examine different optimisation strategies. We obtain a disfluency detection f-scores of 0.838 which improves upon the current state-of-the-art.", "keyphrases": ["language model", "disfluency", "n-b reranking"]} +{"id": "matusov-etal-2006-computing", "title": "Computing Consensus Translation for Multiple Machine Translation Systems Using Enhanced Hypothesis Alignment", "abstract": "This paper describes a novel method for computing a consensus translation from the outputs of multiple machine translation (MT) systems. The outputs are combined and a possibly new translation hypothesis can be generated. Similarly to the well-established ROVER approach of (Fiscus, 1997) for combining speech recognition hypotheses, the consensus translation is computed by voting on a confusion network. To create the confusion network, we produce pairwise word alignments of the original machine translation hypotheses with an enhanced statistical alignment algorithm that explicitly models word reordering. The context of a whole document of translations rather than a single sentence is taken into account to produce the alignment. The proposed alignment and voting approach was evaluated on several machine translation tasks, including a large vocabulary task. The method was also tested in the framework of multi-source and speech translation. On all tasks and conditions, we achieved significant improvements in translation quality, increasing e. g. the BLEU score by as much as 15% relative.", "keyphrases": ["consensus translation", "hypothesis", "system combination", "translation output"]} +{"id": "habernal-etal-2018-argument", "title": "The Argument Reasoning Comprehension Task: Identification and Reconstruction of Implicit Warrants", "abstract": "Reasoning is a crucial part of natural language argumentation. To comprehend an argument, one must analyze its warrant, which explains why its claim follows from its premises. As arguments are highly contextualized, warrants are usually presupposed and left implicit. Thus, the comprehension does not only require language understanding and logic skills, but also depends on common sense. In this paper we develop a methodology for reconstructing warrants systematically. We operationalize it in a scalable crowdsourcing process, resulting in a freely licensed dataset with warrants for 2k authentic arguments from news comments. On this basis, we present a new challenging task, the argument reasoning comprehension task. Given an argument with a claim and a premise, the goal is to choose the correct implicit warrant from two options. Both warrants are plausible and lexically close, but lead to contradicting claims. A solution to this task will define a substantial step towards automatic warrant reconstruction. However, experiments with several neural attention and language models reveal that current approaches do not suffice.", "keyphrases": ["reasoning", "implicit warrant", "natural language argumentation"]} +{"id": "verhagen-etal-2010-semeval", "title": "SemEval-2010 Task 13: TempEval-2", "abstract": "Tempeval-2 comprises evaluation tasks for time expressions, events and temporal relations, the latter of which was split up in four sub tasks, motivated by the notion that smaller subtasks would make both data preparation and temporal relation extraction easier. Manually annotated data were provided for six languages: Chinese, English, French, Italian, Korean and Spanish.", "keyphrases": ["tempeval-2", "information extraction", "nlp community", "same sentence", "international evaluation"]} +{"id": "katz-giesbrecht-2006-automatic", "title": "Automatic Identification of Non-Compositional Multi-Word Expressions using Latent Semantic Analysis", "abstract": "Making use of latent semantic analysis, we explore the hypothesis that local linguistic context can serve to identify multi-word expressions that have non-compositional meanings. We propose that vector-similarity between distribution vectors associated with an MWE as a whole and those associated with its constituent parts can serve as a good measure of the degree to which the MWE is compositional. We present experiments that show that low (cosine) similarity does, in fact, correlate with non-compositionality.", "keyphrases": ["multi-word expression", "latent semantic analysis", "local linguistic context", "automatic identification"]} +{"id": "birch-etal-2008-predicting", "title": "Predicting Success in Machine Translation", "abstract": "The performance of machine translation systems varies greatly depending on the source and target languages involved. Determining the contribution of different characteristics of language pairs on system performance is key to knowing what aspects of machine translation to improve and which are irrelevant. This paper investigates the effect of different explanatory variables on the performance of a phrase-based system for 110 European language pairs. We show that three factors are strong predictors of performance in isolation: the amount of reordering, the morphological complexity of the target language and the historical relatedness of the two languages. Together, these factors contribute 75% to the variability of the performance of the system.", "keyphrases": ["machine translation", "explanatory variable", "european language pair", "complexity", "smt performance"]} +{"id": "mairesse-etal-2010-phrase", "title": "Phrase-Based Statistical Language Generation Using Graphical Models and Active Learning", "abstract": "Most previous work on trainable language generation has focused on two paradigms: (a) using a statistical model to rank a set of generated utterances, or (b) using statistics to inform the generation decision process. Both approaches rely on the existence of a handcrafted generator, which limits their scalability to new domains. This paper presents Bagel, a statistical language generator which uses dynamic Bayesian networks to learn from semantically-aligned data produced by 42 untrained annotators. A human evaluation shows that Bagel can generate natural and informative utterances from unseen inputs in the information presentation domain. Additionally, generation performance on sparse datasets is improved significantly by using certainty-based active learning, yielding ratings close to the human gold standard with a fraction of the data.", "keyphrases": ["language generation", "active learning", "nlg system"]} +{"id": "li-liang-2021-prefix", "title": "Prefix-Tuning: Optimizing Continuous Prompts for Generation", "abstract": "Fine-tuning is the de facto way of leveraging large pretrained language models for downstream tasks. However, fine-tuning modifies all the language model parameters and therefore necessitates storing a full copy for each task. In this paper, we propose prefix-tuning, a lightweight alternative to fine-tuning for natural language generation tasks, which keeps language model parameters frozen and instead optimizes a sequence of continuous task-specific vectors, which we call the prefix. Prefix-tuning draws inspiration from prompting for language models, allowing subsequent tokens to attend to this prefix as if it were \u201cvirtual tokens\u201d. We apply prefix-tuning to GPT-2 for table-to-text generation and to BART for summarization. We show that by learning only 0.1% of the parameters, prefix-tuning obtains comparable performance in the full data setting, outperforms fine-tuning in low-data settings, and extrapolates better to examples with topics that are unseen during training.", "keyphrases": ["prompt", "fine-tuning", "language model", "prefix-tuning", "text generation task"]} +{"id": "wiebe-etal-2004-learning", "title": "Learning Subjective Language", "abstract": "Subjectivity in natural language refers to aspects of language used to express opinions, evaluations, and speculations. There are numerous natural language processing applications for which subjectivity analysis is relevant, including information extraction and text categorization. The goal of this work is learning subjective language from corpora. Clues of subjectivity are generated and tested, including low-frequency words, collocations, and adjectives and verbs identified using distributional similarity. The features are also examined working together in concert. The features, generated from different data sets using different procedures, exhibit consistency in performance in that they all do better and worse on the same data sets. In addition, this article shows that the density of subjectivity clues in the surrounding context strongly affects how likely it is that a word is subjective, and it provides the results of an annotation study assessing the subjectivity of sentences with high-density features. Finally, the clues are used to perform opinion piece recognition (a type of text categorization and genre detection) to demonstrate the utility of the knowledge acquired in this article.", "keyphrases": ["subjective language", "collocation", "history", "opinion mining"]} +{"id": "kumano-etal-2007-extracting", "title": "Extracting phrasal alignments from comparable corpora by using joint probability SMT model", "abstract": "We propose a method of extracting phrasal alignments from comparable corpora by using an extended phrase-based joint probability model for statistical machine translation (SMT). Our method does not require preexisting dictionaries or splitting documents into sentences in advance. By checking each alignment for its reliability by using log-likelihood ratio statistics while searching for optimal alignments , our method aims to produce phrasal alignments for only parallel parts of the comparable corpora. Experimental result shows that our method achieves about 0.8 in precision of phrasal alignment extraction when using 2,000 Japanese-English document pairs as training data.", "keyphrases": ["phrasal alignment", "comparable corpora", "probability smt model"]} +{"id": "kurita-etal-2019-measuring", "title": "Measuring Bias in Contextualized Word Representations", "abstract": "Contextual word embeddings such as BERT have achieved state of the art performance in numerous NLP tasks. Since they are optimized to capture the statistical properties of training data, they tend to pick up on and amplify social stereotypes present in the data as well. In this study, we (1) propose a template-based method to quantify bias in BERT; (2) show that this method obtains more consistent results in capturing social biases than the traditional cosine based method; and (3) conduct a case study, evaluating gender bias in a downstream task of Gender Pronoun Resolution. Although our case study focuses on gender bias, the proposed technique is generalizable to unveiling other biases, including in multiclass settings, such as racial and religious biases.", "keyphrases": ["word embedding", "stereotype", "gender bias", "association"]} +{"id": "nguyen-grishman-2015-event", "title": "Event Detection and Domain Adaptation with Convolutional Neural Networks", "abstract": "We study the event detection problem using convolutional neural networks (CNNs) that overcome the two fundamental limitations of the traditional feature-based approaches to this task: complicated feature engineering for rich feature sets and error propagation from the preceding stages which generate these features. The experimental results show that the CNNs outperform the best reported feature-based systems in the general setting as well as the domain adaptation setting without resorting to extensive external resources.", "keyphrases": ["domain adaptation", "convolutional neural networks", "cnn", "event detection", "learning method"]} +{"id": "mitchell-lapata-2008-vector", "title": "Vector-based Models of Semantic Composition", "abstract": "This paper proposes a framework for representing the meaning of phrases and sentences in vector space. Central to our approach is vector composition which we operationalize in terms of additive and multiplicative functions. Under this framework, we introduce a wide range of composition models which we evaluate empirically on a sentence similarity task. Experimental results demonstrate that the multiplicative models are superior to the additive alternatives when compared against human judgments.", "keyphrases": ["semantic composition", "operation", "general framework", "word vector", "co-occurrence"]} +{"id": "mai-etal-2019-divide", "title": "Divide, Conquer and Combine: Hierarchical Feature Fusion Network with Local and Global Perspectives for Multimodal Affective Computing", "abstract": "We propose a general strategy named `divide, conquer and combine' for multimodal fusion. Instead of directly fusing features at holistic level, we conduct fusion hierarchically so that both local and global interactions are considered for a comprehensive interpretation of multimodal embeddings. In the `divide' and `conquer' stages, we conduct local fusion by exploring the interaction of a portion of the aligned feature vectors across various modalities lying within a sliding window, which ensures that each part of multimodal embeddings are explored sufficiently. On its basis, global fusion is conducted in the `combine' stage to explore the interconnection across local interactions, via an Attentive Bi-directional Skip-connected LSTM that directly connects distant local interactions and integrates two levels of attention mechanism. In this way, local interactions can exchange information sufficiently and thus obtain an overall view of multimodal information. Our method achieves state-of-the-art performance on multimodal affective computing with higher efficiency.", "keyphrases": ["combine", "multimodal affective computing", "holistic level"]} +{"id": "li-etal-2017-end", "title": "End-to-End Task-Completion Neural Dialogue Systems", "abstract": "One of the major drawbacks of modularized task-completion dialogue systems is that each module is trained individually, which presents several challenges. For example, downstream modules are affected by earlier modules, and the performance of the entire system is not robust to the accumulated errors. This paper presents a novel end-to-end learning framework for task-completion dialogue systems to tackle such issues. Our neural dialogue system can directly interact with a structured database to assist users in accessing information and accomplishing certain tasks. The reinforcement learning based dialogue manager offers robust capabilities to handle noises caused by other components of the dialogue system. Our experiments in a movie-ticket booking domain show that our end-to-end system not only outperforms modularized dialogue system baselines for both objective and subjective evaluation, but also is robust to noises as demonstrated by several systematic experiments with different error granularity and rates specific to the language understanding module.", "keyphrases": ["dialogue system", "end-to-end", "deep learning"]} +{"id": "reddy-etal-2011-empirical", "title": "An Empirical Study on Compositionality in Compound Nouns", "abstract": "A multiword is compositional if its meaning can be expressed in terms of the meaning of its constituents. In this paper, we collect and analyse the compositionality judgments for a range of compound nouns using Mechanical Turk. Unlike existing compositionality datasets, our dataset has judgments on the contribution of constituent words as well as judgments for the phrase as a whole. We use this dataset to study the relation between the judgments at constituent level to that for the whole phrase. We then evaluate two different types of distributional models for compositionality detection \u2013 constituent based models and composition function based models. Both the models show competitive performance though the composition function based models perform slightly better. In both types, additive models perform better than their multiplicative counterparts.", "keyphrases": ["compositionality", "judgment", "noun compound", "co-occurrence", "standard distributional model"]} +{"id": "chang-etal-2013-multi", "title": "Multi-Relational Latent Semantic Analysis", "abstract": "We present Multi-Relational Latent Semantic Analysis (MRLSA) which generalizes Latent Semantic Analysis (LSA). MRLSA provides an elegant approach to combining multiple relations between words by constructing a 3-way tensor. Similar to LSA, a lowrank approximation of the tensor is derived using a tensor decomposition. Each word in the vocabulary is thus represented by a vector in the latent semantic space and each relation is captured by a latent square matrix. The degree of two words having a specific relation can then be measured through simple linear algebraic operations. We demonstrate that by integrating multiple relations from both homogeneous and heterogeneous information sources, MRLSA achieves stateof-the-art performance on existing benchmark datasets for two relations, antonymy and is-a.", "keyphrases": ["latent semantic analysis", "multiple relation", "knowledge graph"]} +{"id": "zampieri-etal-2017-complex", "title": "Complex Word Identification: Challenges in Data Annotation and System Performance", "abstract": "This paper revisits the problem of complex word identification (CWI) following up the SemEval CWI shared task. We use ensemble classifiers to investigate how well computational methods can discriminate between complex and non-complex words. Furthermore, we analyze the classification performance to understand what makes lexical complexity challenging. Our findings show that most systems performed poorly on the SemEval CWI dataset, and one of the reasons for that is the way in which human annotation was performed.", "keyphrases": ["most system", "complex word identification", "complexity"]} +{"id": "post-vilar-2018-fast", "title": "Fast Lexically Constrained Decoding with Dynamic Beam Allocation for Neural Machine Translation", "abstract": "The end-to-end nature of neural machine translation (NMT) removes many ways of manually guiding the translation process that were available in older paradigms. Recent work, however, has introduced a new capability: lexically constrained or guided decoding, a modification to beam search that forces the inclusion of pre-specified words and phrases in the output. However, while theoretically sound, existing approaches have computational complexities that are either linear (Hokamp and Liu, 2017) or exponential (Anderson et al., 2017) in the number of constraints. We present a algorithm for lexically constrained decoding with a complexity of O(1) in the number of constraints. We demonstrate the algorithm's remarkable ability to properly place these constraints, and use it to explore the shaky relationship between model and BLEU scores. Our implementation is available as part of Sockeye.", "keyphrases": ["decoding", "neural machine translation", "translation process", "lexical constraint", "unconstrained generation"]} +{"id": "schuster-etal-2020-limitations", "title": "The Limitations of Stylometry for Detecting Machine-Generated Fake News", "abstract": "Recent developments in neural language models (LMs) have raised concerns about their potential misuse for automatically spreading misinformation. In light of these concerns, several studies have proposed to detect machine-generated fake news by capturing their stylistic differences from human-written text. These approaches, broadly termed stylometry, have found success in source attribution and misinformation detection in human-written texts. However, in this work, we show that stylometry is limited against machine-generated misinformation. Whereas humans speak differently when trying to deceive, LMs generate stylistically consistent text, regardless of underlying motive. Thus, though stylometry can successfully prevent impersonation by identifying text provenance, it fails to distinguish legitimate LM applications from those that introduce false information. We create two benchmarks demonstrating the stylistic similarity between malicious and legitimate uses of LMs, utilized in auto-completion and editing-assistance settings.1 Our findings highlight the need for non-stylometry approaches in detecting machine-generated misinformation, and open up the discussion on the desired evaluation benchmarks.", "keyphrases": ["stylometry", "fake news", "human-written text"]} +{"id": "zollmann-venugopal-2006-syntax", "title": "Syntax Augmented Machine Translation via Chart Parsing", "abstract": "We present translation results on the shared task \"Exploiting Parallel Texts for Statistical Machine Translation\" generated by a chart parsing decoder operating on phrase tables augmented and generalized with target language syntactic categories. We use a target language parser to generate parse trees for each sentence on the target side of the bilingual training corpus, matching them with phrase table lattices built for the corresponding source sentence. Considering phrases that correspond to syntactic categories in the parse trees we develop techniques to augment (declare a syntactically motivated category for a phrase pair) and generalize (form mixed terminal and nonterminal phrases) the phrase table into a synchronous bilingual grammar. We present results on the French-to-English task for this workshop, representing significant improvements over the workshop's baseline system. Our translation system is available open-source under the GNU General Public License.", "keyphrases": ["chart", "syntactic category", "target side", "non-terminal", "synchronous grammar"]} +{"id": "van-der-lee-etal-2019-best", "title": "Best practices for the human evaluation of automatically generated text", "abstract": "Currently, there is little agreement as to how Natural Language Generation (NLG) systems should be evaluated. While there is some agreement regarding automatic metrics, there is a high degree of variation in the way that human evaluation is carried out. This paper provides an overview of how human evaluation is currently conducted, and presents a set of best practices, grounded in the literature. With this paper, we hope to contribute to the quality and consistency of human evaluations in NLG.", "keyphrases": ["practice", "human evaluation", "van der", "nlg system", "fluency"]} +{"id": "de-lhoneux-etal-2018-parameter", "title": "Parameter sharing between dependency parsers for related languages", "abstract": "Previous work has suggested that parameter sharing between transition-based neural dependency parsers for related languages can lead to better performance, but there is no consensus on what parameters to share. We present an evaluation of 27 different parameter sharing strategies across 10 languages, representing five pairs of related languages, each pair from a different language family. We find that sharing transition classifier parameters always helps, whereas the usefulness of sharing word and/or character LSTM parameters varies. Based on this result, we propose an architecture where the transition classifier is shared, and the sharing of word and character parameters is controlled by a parameter that can be tuned on validation data. This model is linguistically motivated and obtains significant improvements over a monolingually trained baseline. We also find that sharing transition classifier parameters helps when training a parser on unrelated language pairs, but we find that, in the case of unrelated languages, sharing too many parameters does not help.", "keyphrases": ["dependency parser", "character lstm parameter", "unrelated language", "parameter sharing"]} +{"id": "li-etal-2017-multi", "title": "Multi-modal Summarization for Asynchronous Collection of Text, Image, Audio and Video", "abstract": "The rapid increase of the multimedia data over the Internet necessitates multi-modal summarization from collections of text, image, audio and video. In this work, we propose an extractive Multi-modal Summarization (MMS) method which can automatically generate a textual summary given a set of documents, images, audios and videos related to a specific topic. The key idea is to bridge the semantic gaps between multi-modal contents. For audio information, we design an approach to selectively use its transcription. For vision information, we learn joint representations of texts and images using a neural network. Finally, all the multi-modal aspects are considered to generate the textural summary by maximizing the salience, non-redundancy, readability and coverage through budgeted optimization of submodular functions. We further introduce an MMS corpus in English and Chinese. The experimental results on this dataset demonstrate that our method outperforms other competitive baseline methods.", "keyphrases": ["asynchronous collection", "video", "multimedia data", "multi-modal summarization", "multimodal corpus"]} +{"id": "pfeiffer-etal-2021-adapterfusion", "title": "AdapterFusion: Non-Destructive Task Composition for Transfer Learning", "abstract": "Sequential fine-tuning and multi-task learning are methods aiming to incorporate knowledge from multiple tasks; however, they suffer from catastrophic forgetting and difficulties in dataset balancing. To address these shortcomings, we propose AdapterFusion, a new two stage learning algorithm that leverages knowledge from multiple tasks. First, in the knowledge extraction stage we learn task specific parameters called adapters, that encapsulate the task-specific information. We then combine the adapters in a separate knowledge composition step. We show that by separating the two stages, i.e., knowledge extraction and knowledge composition, the classifier can effectively exploit the representations learned from multiple tasks in a non-destructive manner. We empirically evaluate AdapterFusion on 16 diverse NLU tasks, and find that it effectively combines various types of knowledge at different layers of the model. We show that our approach outperforms traditional strategies such as full fine-tuning as well as multi-task learning. Our code and adapters are available at AdapterHub.ml.", "keyphrases": ["transfer learning", "adapter", "target task"]} +{"id": "jang-etal-2016-metaphor", "title": "Metaphor Detection with Topic Transition, Emotion and Cognition in Context", "abstract": "Metaphor is a common linguistic tool in communication, making its detection in discourse a crucial task for natural language understanding. One popular approach to this challenge is to capture semantic incohesion between a metaphor and the dominant topic of the surrounding text. While these methods are effective, they tend to overclassify target words as metaphorical when they deviate in meaning from its context. We present a new approach that (1) distinguishes literal and non-literal use of target words by examining sentence-level topic transitions and (2) captures the motivation of speakers to express emotions and abstract concepts metaphorically. Experiments on an online breast cancer discussion forum dataset demonstrate a significant improvement in metaphor detection over the state-of-theart. These experimental results also reveal a tendency toward metaphor usage in personal topics and certain emotional contexts.", "keyphrases": ["topic transition", "emotion", "metaphor detection"]} +{"id": "peng-etal-2016-event", "title": "Event Detection and Co-reference with Minimal Supervision", "abstract": "An important aspect of natural language understanding involves recognizing and catego-rizing events and the relations among them. However, these tasks are quite subtle and annotating training data for machine learning based approaches is an expensive task, resulting in supervised systems that attempt to learn complex models from small amounts of data, which they over-\ufb01t. This paper addresses this challenge by developing an event detection and co-reference system with minimal supervision, in the form of a few event examples. We view these tasks as semantic similarity problems between event mentions or event mentions and an ontology of types, thus facilitating the use of large amounts of out of domain text data. Notably, our semantic re-latedness function exploits the structure of the text by making use of a semantic-role-labeling based representation of an event. We show that our approach to event detection is competitive with the top supervised meth-ods. More signi\ufb01cantly, we outperform state-of-the-art supervised methods for event co-reference on benchmark data sets, and support signi\ufb01cantly better transfer across domains.", "keyphrases": ["minimal supervision", "co-reference system", "event detection", "trigger example", "ace annotation guideline"]} +{"id": "yasunaga-etal-2018-robust", "title": "Robust Multilingual Part-of-Speech Tagging via Adversarial Training", "abstract": "Adversarial training (AT) is a powerful regularization method for neural networks, aiming to achieve robustness to input perturbations. Yet, the specific effects of the robustness obtained from AT are still unclear in the context of natural language processing. In this paper, we propose and analyze a neural POS tagging model that exploits AT. In our experiments on the Penn Treebank WSJ corpus and the Universal Dependencies (UD) dataset (27 languages), we find that AT not only improves the overall tagging accuracy, but also 1) prevents over-fitting well in low resource languages and 2) boosts tagging accuracy for rare / unseen words. We also demonstrate that 3) the improved tagging performance by AT contributes to the downstream task of dependency parsing, and that 4) AT helps the model to learn cleaner word representations. 5) The proposed AT model is generally effective in different sequence labeling tasks. These positive results motivate further use of AT for natural language tasks.", "keyphrases": ["adversarial training", "overall tagging accuracy", "word representation"]} +{"id": "solorio-liu-2008-part", "title": "Part-of-Speech Tagging for English-Spanish Code-Switched Text", "abstract": "Code-switching is an interesting linguistic phenomenon commonly observed in highly bilingual communities. It consists of mixing languages in the same conversational event. This paper presents results on Part-of-Speech tagging Spanish-English code-switched discourse. We explore different approaches to exploit existing resources for both languages that range from simple heuristics, to language identification, to machine learning. The best results are achieved by training a machine learning algorithm with features that combine the output of an English and a Spanish Part-of-Speech tagger.", "keyphrases": ["code-switched text", "language identification", "part-of-speech tagging"]} +{"id": "mihalcea-nastase-2012-word", "title": "Word Epoch Disambiguation: Finding How Words Change Over Time", "abstract": "In this paper we introduce the novel task of \"word epoch disambiguation,\" defined as the problem of identifying changes in word usage over time. Through experiments run using word usage examples collected from three major periods of time (1800, 1900, 2000), we show that the task is feasible, and significant differences can be observed between occurrences of words in different periods of time.", "keyphrases": ["change", "word epoch disambiguation", "probability distribution", "supervised learning approach"]} +{"id": "baldwin-etal-2003-empirical", "title": "An Empirical Model of Multiword Expression Decomposability", "abstract": "This paper presents a construction-inspecific model of multiword expression decomposability based on latent semantic analysis. We use latent semantic analysis to determine the similarity between a multiword expression and its constituent words, and claim that higher similarities indicate greater decomposability. We test the model over English noun-noun compounds and verb-particles, and evaluate its correlation with similarities and hyponymy values in WordNet. Based on mean hyponymy over partitions of data ranked on similarity, we furnish evidence for the calculated similarities being correlated with the semantic relational content of WordNet.", "keyphrases": ["multiword expression decomposability", "compositionality", "mwes", "idiom", "wordnet-based gold standard"]} +{"id": "shaw-etal-2021-compositional", "title": "Compositional Generalization and Natural Language Variation: Can a Semantic Parsing Approach Handle Both?", "abstract": "Sequence-to-sequence models excel at handling natural language variation, but have been shown to struggle with out-of-distribution compositional generalization. This has motivated new specialized architectures with stronger compositional biases, but most of these approaches have only been evaluated on synthetically-generated datasets, which are not representative of natural language variation. In this work we ask: can we develop a semantic parsing approach that handles both natural language variation and compositional generalization? To better assess this capability, we propose new train and test splits of non-synthetic datasets. We demonstrate that strong existing approaches do not perform well across a broad set of evaluations. We also propose NQG-T5, a hybrid model that combines a high-precision grammar-based approach with a pre-trained sequence-to-sequence model. It outperforms existing approaches across several compositional generalization challenges on non-synthetic data, while also being competitive with the state-of-the-art on standard evaluations. While still far from solving this problem, our study highlights the importance of diverse evaluations and the open challenge of handling both compositional generalization and natural language variation in semantic parsing.", "keyphrases": ["language variation", "specialized architecture", "compositional generalization"]} +{"id": "koper-schulte-im-walde-2016-automatically", "title": "Automatically Generated Affective Norms of Abstractness, Arousal, Imageability and Valence for 350 000 German Lemmas", "abstract": "This paper presents a collection of 350,000 German lemmatised words, rated on four psycholinguistic affective attributes. All ratings were obtained via a supervised learning algorithm that can automatically calculate a numerical rating of a word. We applied this algorithm to abstractness, arousal, imageability and valence. Comparison with human ratings reveals high correlation across all rating types. The full resource is publically available at: ", "keyphrases": ["abstractness", "valence", "rating"]} +{"id": "luong-manning-2015-stanford", "title": "Stanford neural machine translation systems for spoken language domains", "abstract": "Neural Machine Translation (NMT), though recently developed, has shown promising results for various language pairs. Despite that, NMT has only been applied to mostly formal texts such as those in the WMT shared tasks. This work further explores the effectiveness of NMT in spoken language domains by participating in the MT track of the IWSLT 2015. We consider two scenarios: (a) how to adapt existing NMT systems to a new domain and (b) the generalization of NMT to low-resource language pairs. Our results demonstrate that using an existing NMT framework1, we can achieve competitive results in the aforementioned scenarios when translating from English to German and Vietnamese. Notably, we have advanced state-of-the-art results in the IWSLT EnglishGerman MT track by up to 5.2 BLEU points.", "keyphrases": ["neural machine translation", "spoken language domain", "fine-tuning", "target domain", "continued training"]} +{"id": "auli-etal-2013-joint", "title": "Joint Language and Translation Modeling with Recurrent Neural Networks", "abstract": "We present a joint language and translation model based on a recurrent neural network which predicts target words based on an unbounded history of both source and target words. The weaker independence assumptions of this model result in a vastly larger search space compared to related feedforward-based language or translation models. We tackle this issue with a new lattice rescoring algorithm and demonstrate its effectiveness empirically. Our joint model builds on a well known recurrent neural network language model (Mikolov, 2012) augmented by a layer of additional inputs from the source language. We show competitive accuracy compared to the traditional channel model features. Our best results improve the output of a system trained on WMT 2012 French-English data by up to 1.5 BLEU, and by 1.1 BLEU on average across several test sets.", "keyphrases": ["recurrent neural networks", "joint language", "extended context"]} +{"id": "bod-2006-subtrees", "title": "An All-Subtrees Approach to Unsupervised Parsing", "abstract": "We investigate generalizations of the all-subtrees \"DOP\" approach to unsupervised parsing. Unsupervised DOP models assign all possible binary trees to a set of sentences and next use (a large random subset of) all subtrees from these binary trees to compute the most probable parse trees. We will test both a relative frequency estimator for unsupervised DOP and a maximum likelihood estimator which is known to be statistically consistent. We report state-of-the-art results on English (WSJ), German (NEGRA) and Chinese (CTB) data. To the best of our knowledge this is the first paper which tests a maximum likelihood estimator for DOP on the Wall Street Journal, leading to the surprising result that an unsupervised parsing model beats a widely used supervised model (a treebank PCFG).", "keyphrases": ["unsupervised parsing", "dop", "chinese"]} +{"id": "caines-etal-2016-crowdsourcing", "title": "Crowdsourcing a Multi-lingual Speech Corpus: Recording, Transcription and Annotation of the CrowdIS Corpora", "abstract": "We announce the release of the CROWDED CORPUS: a pair of speech corpora collected via crowdsourcing, containing a native speaker corpus of English (CROWDED_ENGLISH), and a corpus of German/English bilinguals (CROWDED_BILINGUAL). Release 1 of the CROWDED CORPUS contains 1000 recordings amounting to 33,400 tokens collected from 80 speakers and is freely available to other researchers. We recruited participants via the Crowdee application for Android. Recruits were prompted to respond to business-topic questions of the type found in language learning oral tests. We then used the CrowdFlower web application to pass these recordings to crowdworkers for transcription and annotation of errors and sentence boundaries. Finally, the sentences were tagged and parsed using standard natural language processing tools. We propose that crowdsourcing is a valid and economical method for corpus collection, and discuss the advantages and disadvantages of this approach.", "keyphrases": ["recording", "transcription", "crowded corpus"]} +{"id": "pan-etal-2017-cross", "title": "Cross-lingual Name Tagging and Linking for 282 Languages", "abstract": "The ambitious goal of this work is to develop a cross-lingual name tagging and linking framework for 282 languages that exist in Wikipedia. Given a document in any of these languages, our framework is able to identify name mentions, assign a coarse-grained or fine-grained type to each mention, and link it to an English Knowledge Base (KB) if it is linkable. We achieve this goal by performing a series of new KB mining methods: generating \u201csilver-standard\u201d annotations by transferring annotations from English to other languages through cross-lingual links and KB properties, refining annotations through self-training and topic selection, deriving language-specific morphology features from anchor links, and mining word translation pairs from cross-lingual links. Both name tagging and linking results for 282 languages are promising on Wikipedia data and on-Wikipedia data.", "keyphrases": ["name tagging", "link", "wikipedia", "cross-lingual link", "language-independent framework"]} +{"id": "chen-etal-2017-enhanced", "title": "Enhanced LSTM for Natural Language Inference", "abstract": "Reasoning and inference are central to human and artificial intelligence. Modeling inference in human language is very challenging. With the availability of large annotated data (Bowman et al., 2015), it has recently become feasible to train neural network based inference models, which have shown to be very effective. In this paper, we present a new state-of-the-art result, achieving the accuracy of 88.6% on the Stanford Natural Language Inference Dataset. Unlike the previous top models that use very complicated network architectures, we first demonstrate that carefully designing sequential inference models based on chain LSTMs can outperform all previous models. Based on this, we further show that by explicitly considering recursive architectures in both local inference modeling and inference composition, we achieve additional improvement. Particularly, incorporating syntactic parsing information contributes to our best result\u2014it further improves the performance even when added to the already very strong model.", "keyphrases": ["natural language inference", "sequential inference model", "esim", "snli", "bidirectional lstm"]} +{"id": "zhang-etal-2018-improving", "title": "Improving the Transformer Translation Model with Document-Level Context", "abstract": "Although the Transformer translation model (Vaswani et al., 2017) has achieved state-of-the-art performance in a variety of translation tasks, how to use document-level context to deal with discourse phenomena problematic for Transformer still remains a challenge. In this work, we extend the Transformer model with a new context encoder to represent document-level context, which is then incorporated into the original encoder and decoder. As large-scale document-level parallel corpora are usually not available, we introduce a two-step training method to take full advantage of abundant sentence-level parallel corpora and limited document-level parallel corpora. Experiments on the NIST Chinese-English datasets and the IWSLT French-English datasets show that our approach improves over Transformer significantly.", "keyphrases": ["transformer translation model", "document-level context", "new context encoder"]} +{"id": "hinrichs-etal-2010-weblicht-web", "title": "WebLicht: Web-Based LRT Services for German", "abstract": "This software demonstration presents WebLicht (short for: Web-Based Linguistic Chaining Tool), a web-based service environment for the integration and use of language resources and tools (LRT). WebLicht is being developed as part of the D-SPIN project. We-bLicht is implemented as a web application so that there is no need for users to install any software on their own computers or to concern themselves with the technical details involved in building tool chains. The integrated web services are part of a prototypical infrastructure that was developed to facilitate chaining of LRT services. WebLicht allows the integration and use of distributed web services with standardized APIs. The nature of these open and standardized APIs makes it possible to access the web services from nearly any programming language, shell script or workflow engine (UIMA, Gate etc.) Additionally, an application for integration of additional services is available, allowing anyone to contribute his own web service.", "keyphrases": ["api", "workflow engine", "weblicht"]} +{"id": "nguyen-etal-2014-gender", "title": "Why Gender and Age Prediction from Tweets is Hard: Lessons from a Crowdsourcing Experiment", "abstract": "There is a growing interest in automatically predicting the gender and age of authors from texts. However, most research so far ignores that language use is related to the social identity of speakers, which may be different from their biological identity. In this paper, we combine insights from sociolinguistics with data collected through an online game, to underline the importance of approaching age and gender as social variables rather than static biological variables. In our game, thousands of players guessed the gender and age of Twitter users based on tweets alone. We show that more than 10% of the Twitter users do not employ language that the crowd associates with their biological sex. It is also shown that older Twitter users are often perceived to be younger. Our findings highlight the limitations of current approaches to gender and age prediction from texts.", "keyphrases": ["gender", "age prediction", "language use", "twitter user", "crowd"]} +{"id": "wei-etal-2020-iterative", "title": "Iterative Domain-Repaired Back-Translation", "abstract": "In this paper, we focus on the domain-specific translation with low resources, where in-domain parallel corpora are scarce or nonexistent. One common and effective strategy for this case is exploiting in-domain monolingual data with the back-translation method. However, the synthetic parallel data is very noisy because they are generated by imperfect out-of-domain systems, resulting in the poor performance of domain adaptation. To address this issue, we propose a novel iterative domain-repaired back-translation framework, which introduces the Domain-Repair (DR) model to refine translations in synthetic bilingual data. To this end, we construct corresponding data for the DR model training by round-trip translating the monolingual sentences, and then design the unified training framework to optimize paired DR and NMT models jointly. Experiments on adapting NMT models between specific domains and from the general domain to specific domains demonstrate the effectiveness of our proposed approach, achieving 15.79 and 4.47 BLEU improvements on average over unadapted models and back-translation.", "keyphrases": ["back-translation", "domain adaptation", "iterative"]} +{"id": "prettenhofer-stein-2010-cross", "title": "Cross-Language Text Classification Using Structural Correspondence Learning", "abstract": "We present a new approach to cross-language text classification that builds on structural correspondence learning, a recently proposed theory for domain adaptation. The approach uses unlabeled documents, along with a simple word translation oracle, in order to induce task-specific, cross-lingual word correspondences. We report on analyses that reveal quantitative insights about the use of unlabeled data and the complexity of inter-language correspondence modeling. \n \nWe conduct experiments in the field of cross-language sentiment classification, employing English as source language, and German, French, and Japanese as target languages. The results are convincing; they demonstrate both the robustness and the competitiveness of the presented ideas.", "keyphrases": ["structural correspondence learning", "theory", "cross-language text classification", "sentiment analysis"]} +{"id": "ma-etal-2020-simple", "title": "A Simple and Effective Unified Encoder for Document-Level Machine Translation", "abstract": "Most of the existing models for document-level machine translation adopt dual-encoder structures. The representation of the source sentences and the document-level contexts are modeled with two separate encoders. Although these models can make use of the document-level contexts, they do not fully model the interaction between the contexts and the source sentences, and can not directly adapt to the recent pre-training models (e.g., BERT) which encodes multiple sentences with a single encoder. In this work, we propose a simple and effective unified encoder that can outperform the baseline models of dual-encoder models in terms of BLEU and METEOR scores. Moreover, the pre-training models can further boost the performance of our proposed model.", "keyphrases": ["machine translation", "bert", "contextual information"]} +{"id": "tsakalidis-etal-2022-identifying", "title": "Identifying Moments of Change from Longitudinal User Text", "abstract": "Identifying changes in individuals' behaviour and mood, as observed via content shared on online platforms, is increasingly gaining importance. Most research to-date on this topic focuses on either: (a) identifying individuals at risk or with a certain mental health condition given a batch of posts or (b) providing equivalent labels at the post level. A disadvantage of such work is the lack of a strong temporal component and the inability to make longitudinal assessments following an individual's trajectory and allowing timely interventions. Here we define a new task, that of identifying moments of change in individuals on the basis of their shared content online. The changes we consider are sudden shifts in mood (switches) or gradual mood progression (escalations). We have created detailed guidelines for capturing moments of change and a corpus of 500 manually annotated user timelines (18.7K posts). We have developed a variety of baseline models drawing inspiration from related tasks and show that the best performance is obtained through context aware sequential modelling. We also introduce new metrics for capturing rare events in temporal windows.", "keyphrases": ["change", "baseline mood", "sequential classification task"]} +{"id": "zeng-etal-2021-sire", "title": "SIRE: Separate Intra- and Inter-sentential Reasoning for Document-level Relation Extraction", "abstract": "Document-level relation extraction has attracted much attention in recent years. It is usually formulated as a classification problem that predicts relations for all entity pairs in the document. However, previous works indiscriminately represent intra- and inter-sentential relations in the same way, confounding the different patterns for predicting them. Besides, they create a document graph and use paths between entities on the graph as clues for logical reasoning. However, not all entity pairs can be connected with a path and have the correct logical reasoning paths in their graph. Thus many cases of logical reasoning cannot be covered. This paper proposes an effective architecture, SIRE, to represent intra- and inter-sentential relations in different ways. We design a new and straightforward form of logical reasoning module that can cover more logical reasoning chains. Experiments on the public datasets show SIRE outperforms the previous state-of-the-art methods. Further analysis shows that our predictions are reliable and explainable. Our code is available at https://github.com/DreamInvoker/SIRE.", "keyphrases": ["reasoning", "document-level relation extraction", "entity pair", "sire"]} +{"id": "kurata-etal-2016-improved", "title": "Improved Neural Network-based Multi-label Classification with Better Initialization Leveraging Label Co-occurrence", "abstract": "In a multi-label text classification task, in which multiple labels can be assigned to one text, label co-occurrence itself is informative. We propose a novel neural network initialization method to treat some of the neurons in the final hidden layer as dedicated neurons for each pattern of label co-occurrence. These dedicated neurons are initialized to connect to the corresponding co-occurring labels with stronger weights than to others. In experiments with a natural language query classification task, which requires multi-label classification, our initialization method improved classification accuracy without any computational overhead in training and evaluation.", "keyphrases": ["multi-label classification", "initialization method", "neuron"]} +{"id": "lan-etal-2017-continuously", "title": "A Continuously Growing Dataset of Sentential Paraphrases", "abstract": "A major challenge in paraphrase research is the lack of parallel corpora. In this paper, we present a new method to collect large-scale sentential paraphrases from Twitter by linking tweets through shared URLs. The main advantage of our method is its simplicity, as it gets rid of the classifier or human in the loop needed to select data before annotation and subsequent application of paraphrase identification algorithms in the previous work. We present the largest human-labeled paraphrase corpus to date of 51,524 sentence pairs and the first cross-domain benchmarking for automatic paraphrase identification. In addition, we show that more than 30,000 new sentential paraphrases can be easily and continuously captured every month at ~70% precision, and demonstrate their utility for downstream NLP tasks through phrasal paraphrase extraction. We make our code and data freely available.", "keyphrases": ["sentential paraphrase", "twitter", "url", "sentence pair"]} +{"id": "klementiev-roth-2006-named", "title": "Named Entity Transliteration and Discovery from Multilingual Comparable Corpora", "abstract": "Named Entity recognition (NER) is an important part of many natural language processing tasks. Most current approaches employ machine learning techniques and require supervised data. However, many languages lack such resources. This paper presents an algorithm to automatically discover Named Entities (NEs) in a resource free language, given a bilingual corpora in which it is weakly temporally aligned with a resource rich language. We observe that NEs have similar time distributions across such corpora, and that they are often transliterated, and develop an algorithm that exploits both iteratively. The algorithm makes use of a new, frequency based, metric for time distributions and a resource free discriminative approach to transliteration. We evaluate the algorithm on an English-Russian corpus, and show high level of NEs discovery in Russian.", "keyphrases": ["transliteration", "discovery", "comparable corpora", "lexicon induction"]} +{"id": "simard-foster-2013-pepr", "title": "PEPr: Post-Edit Propagation Using Phrase-based Statistical Machine Translation", "abstract": "Translators who work by post-editing machine translation output often find themselves repeatedly correcting the same errors. We propose a method for Post-edit Propagation (PEPr), which learns posteditor corrections and applies them on-thefly to further MT output. Our proposal is based on a phrase-based SMT system, used in an automatic post-editing (APE) setting with online learning. Simulated experiments on a variety of data sets show that for documents with high levels of internal repetition, the proposed mechanism could substantially reduce the post-editing effort.", "keyphrases": ["post-edit propagation", "translator", "correction", "pepr"]} +{"id": "kirchhoff-etal-2007-semi", "title": "Semi-automatic error analysis for large-scale statistical machine translation", "abstract": "This paper presents a general framework for semi-automatic error analysis in large-scale statistical machine translation (SMT) systems. The main objective is to relate characteristics of input documents (which can be either in text or audio form) to the system's overall translation performance and thus identify particularly problematic input characteristics (e.g. source, genre, dialect, etc.). Various measurements of these factors are extracted from the input, either automatically or by human annotation, and are related to translation performance scores by means of mutual information. We apply this analysis to a state-of-the-art large-scale SMT system operating on Chinese and Arabic text and audio documents, and demonstrate how the proposed error analysis can help identify system weaknesses.", "keyphrases": ["error analysis", "statistical machine translation", "characteristic", "input document"]} +{"id": "matsoukas-etal-2009-discriminative", "title": "Discriminative Corpus Weight Estimation for Machine Translation", "abstract": "Current statistical machine translation (SMT) systems are trained on sentence-aligned and word-aligned parallel text collected from various sources. Translation model parameters are estimated from the word alignments, and the quality of the translations on a given test set depends on the parameter estimates. There are at least two factors affecting the parameter estimation: domain match and training data quality. This paper describes a novel approach for automatically detecting and down-weighing certain parts of the training corpus by assigning a weight to each sentence in the training bitext so as to optimize a discriminative objective function on a designated tuning set. This way, the proposed method can limit the negative effects of low quality training data, and can adapt the translation model to the domain of interest. It is shown that such discriminative corpus weights can provide significant improvements in Arabic-English translation on various conditions, using a state-of-the-art SMT system.", "keyphrases": ["weight", "machine translation", "meta-information", "development set"]} +{"id": "levy-etal-2015-supervised", "title": "Do Supervised Distributional Methods Really Learn Lexical Inference Relations?", "abstract": "Distributional representations of words have been recently used in supervised settings for recognizing lexical inference relations between word pairs, such as hypernymy and entailment. We investigate a collection of these state-of-the-art methods, and show that they do not actually learn a relation between two words. Instead, they learn an independent property of a single word in the pair: whether that word is a \u201cprototypical hypernym\u201d.", "keyphrases": ["distributional method", "entailment", "hypernym", "lexical memorization", "word embedding"]} +{"id": "lapata-2003-probabilistic", "title": "Probabilistic Text Structuring: Experiments with Sentence Ordering", "abstract": "Ordering information is a critical task for natural language generation applications. In this paper we propose an approach to information ordering that is particularly suited for text-to-text generation. We describe a model that learns constraints on sentence order from a corpus of domain-specific texts and an algorithm that yields the most likely order among several alternatives. We evaluate the automatically generated orderings against authored texts from our corpus and against human subjects that are asked to mimic the model's task. We also assess the appropriateness of such a model for multidocument summarization.", "keyphrases": ["sentence ordering", "text-to-text generation", "probabilistic model", "source document"]} +{"id": "kepler-etal-2019-openkiwi", "title": "OpenKiwi: An Open Source Framework for Quality Estimation", "abstract": "We introduce OpenKiwi, a Pytorch-based open source framework for translation quality estimation. OpenKiwi supports training and testing of word-level and sentence-level quality estimation systems, implementing the winning systems of the WMT 2015\u201318 quality estimation campaigns. We benchmark OpenKiwi on two datasets from WMT 2018 (English-German SMT and NMT), yielding state-of-the-art performance on the word-level tasks and near state-of-the-art in the sentence-level tasks.", "keyphrases": ["translator", "openkiwi", "machine-translated sentence"]} +{"id": "devlin-etal-2015-language", "title": "Language Models for Image Captioning: The Quirks and What Works", "abstract": "Two recent approaches have achieved state-of-the-art results in image captioning. The first uses a pipelined process where a set of candidate words is generated by a convolutional neural network (CNN) trained on images, and then a maximum entropy (ME) language model is used to arrange these words into a coherent sentence. The second uses the penultimate activation layer of the CNN as input to a recurrent neural network (RNN) that then generates the caption sequence. In this paper, we compare the merits of these different language modeling approaches for the first time by using the same state-ofthe-art CNN as input. We examine issues in the different approaches, including linguistic irregularities, caption repetition, and data set overlap. By combining key aspects of the ME and RNN methods, we achieve a new record performance over previously published results on the benchmark COCO dataset. However, the gains we see in BLEU do not translate to human judgments.", "keyphrases": ["image captioning", "cnn", "language model", "dense vector", "output string"]} +{"id": "niu-etal-2020-evaluating", "title": "Evaluating Robustness to Input Perturbations for Neural Machine Translation", "abstract": "Neural Machine Translation (NMT) models are sensitive to small perturbations in the input. Robustness to such perturbations is typically measured using translation quality metrics such as BLEU on the noisy input. This paper proposes additional metrics which measure the relative degradation and changes in translation when small perturbations are added to the input. We focus on a class of models employing subword regularization to address robustness and perform extensive evaluations of these models using the robustness measures proposed. Results show that our proposed metrics reveal a clear trend of improved robustness to perturbations when subword regularization methods are used.", "keyphrases": ["robustness", "neural machine translation", "small perturbation", "nlp model"]} +{"id": "xu-etal-2016-improved", "title": "Improved relation classification by deep recurrent neural networks with data augmentation", "abstract": "Nowadays, neural networks play an important role in the task of relation classification. By designing different neural architectures, researchers have improved the performance to a large extent in comparison with traditional methods. However, existing neural networks for relation classification are usually of shallow architectures (e.g., one-layer convolutional neural networks or recurrent networks). They may fail to explore the potential representation space in different abstraction levels. In this paper, we propose deep recurrent neural networks (DRNNs) for relation classification to tackle this challenge. Further, we propose a data augmentation method by leveraging the directionality of relations. We evaluated our DRNNs on the SemEval-2010 Task 8, and achieve an F1-score of 86.1%, outperforming previous state-of-the-art recorded results.", "keyphrases": ["relation classification", "recurrent neural network", "data augmentation"]} +{"id": "hou-etal-2014-rule", "title": "A Rule-Based System for Unrestricted Bridging Resolution: Recognizing Bridging Anaphora and Finding Links to Antecedents", "abstract": "Bridging resolution plays an important role in establishing (local) entity coherence. This paper proposes a rule-based approach for the challenging task of unrestricted bridging resolution, where bridging anaphors are not limited to definite NPs and semantic relations between anaphors and their antecedents are not restricted to meronymic relations. The system consists of eight rules which target different relations based on linguistic insights. Our rule-based system significantly outperforms a reimplementation of a previous rule-based system (Vieira and Poesio, 2000). Furthermore, it performs better than a learning-based approach which has access to the same knowledge resources as the rule-based system. Additionally, incorporating the rules and more features into the learning-based system yields a minor improvement over the rule-based system.", "keyphrases": ["rule-based system", "unrestricted bridging resolution", "anaphor"]} +{"id": "rambow-etal-2004-summarizing", "title": "Summarizing Email Threads", "abstract": "Summarizing threads of email is different from summarizing other types of written communication as it has an inherent dialog structure. We present initial research which shows that sentence extraction techniques can work for email threads as well, but profit from email-specific features. In addition, the presentation of the summary should take into account the dialogic structure of email communication.", "keyphrases": ["email thread", "sentence extraction technique", "summarization", "discussion issue", "recipient"]} +{"id": "gehrmann-etal-2019-gltr", "title": "GLTR: Statistical Detection and Visualization of Generated Text", "abstract": "The rapid improvement of language models has raised the specter of abuse of text generation systems. This progress motivates the development of simple methods for detecting generated text that can be used by non-experts. In this work, we introduce GLTR, a tool to support humans in detecting whether a text was generated by a model. GLTR applies a suite of baseline statistical methods that can detect generation artifacts across multiple sampling schemes. In a human-subjects study, we show that the annotation scheme provided by GLTR improves the human detection-rate of fake text from 54% to 72% without any prior training. GLTR is open-source and publicly deployed, and has already been widely used to detect generated outputs.", "keyphrases": ["statistical method", "generation artifact", "gltr", "synthetic text"]} +{"id": "wang-etal-2018-tree", "title": "A Tree-based Decoder for Neural Machine Translation", "abstract": "Recent advances in Neural Machine Translation (NMT) show that adding syntactic information to NMT systems can improve the quality of their translations. Most existing work utilizes some specific types of linguistically-inspired tree structures, like constituency and dependency parse trees. This is often done via a standard RNN decoder that operates on a linearized target tree structure. However, it is an open question of what specific linguistic formalism, if any, is the best structural representation for NMT. In this paper, we (1) propose an NMT model that can naturally generate the topology of an arbitrary tree structure on the target side, and (2) experiment with various target tree structures. Our experiments show the surprising result that our model delivers the best improvements with balanced binary trees constructed without any linguistic knowledge; this model outperforms standard seq2seq models by up to 2.1 BLEU points, and other methods for incorporating target-side syntax by up to 0.7 BLEU.", "keyphrases": ["neural machine translation", "tree structure", "target sentence"]} +{"id": "stahlberg-etal-2016-syntactically", "title": "Syntactically Guided Neural Machine Translation", "abstract": "We investigate the use of hierarchical phrase-based SMT lattices in end-to-end neural machine translation (NMT). Weight pushing transforms the Hiero scores for complete translation hypotheses, with the full translation grammar score and full n-gram language model score, into posteriors compatible with NMT predictive probabilities. With a slightly modified NMT beam-search decoder we find gains over both Hiero and NMT decoding alone, with practical advantages in extending NMT to very large input and output vocabularies.", "keyphrases": ["neural machine translation", "lattice", "hiero", "research track nagoya"]} +{"id": "pruksachatkun-etal-2020-intermediate", "title": "Intermediate-Task Transfer Learning with Pretrained Language Models: When and Why Does It Work?", "abstract": "While pretrained models such as BERT have shown large gains across natural language understanding tasks, their performance can be improved by further training the model on a data-rich intermediate task, before fine-tuning it on a target task. However, it is still poorly understood when and why intermediate-task training is beneficial for a given target task. To investigate this, we perform a large-scale study on the pretrained RoBERTa model with 110 intermediate-target task combinations. We further evaluate all trained models with 25 probing tasks meant to reveal the specific skills that drive transfer. We observe that intermediate tasks requiring high-level inference and reasoning abilities tend to work best. We also observe that target task performance is strongly correlated with higher-level abilities such as coreference resolution. However, we fail to observe more granular correlations between probing and target task performance, highlighting the need for further work on broad-coverage probing benchmarks. We also observe evidence that the forgetting of knowledge learned during pretraining may limit our analysis, highlighting the need for further work on transfer learning methods in these settings.", "keyphrases": ["language model", "intermediate task", "pre-training"]} +{"id": "kochkina-etal-2018-one", "title": "All-in-one: Multi-task Learning for Rumour Verification", "abstract": "Automatic resolution of rumours is a challenging task that can be broken down into smaller components that make up a pipeline, including rumour detection, rumour tracking and stance classification, leading to the final outcome of determining the veracity of a rumour. In previous work, these steps in the process of rumour verification have been developed as separate components where the output of one feeds into the next. We propose a multi-task learning approach that allows joint training of the main and auxiliary tasks, improving the performance of rumour verification. We examine the connection between the dataset properties and the outcomes of the multi-task learning models used.", "keyphrases": ["multi-task learning", "rumour verification", "stance classification", "target claim", "story"]} +{"id": "yin-etal-2017-document", "title": "Document-Level Multi-Aspect Sentiment Classification as Machine Comprehension", "abstract": "Document-level multi-aspect sentiment classification is an important task for customer relation management. In this paper, we model the task as a machine comprehension problem where pseudo question-answer pairs are constructed by a small number of aspect-related keywords and aspect ratings. A hierarchical iterative attention model is introduced to build aspectspecific representations by frequent and repeated interactions between documents and aspect questions. We adopt a hierarchical architecture to represent both word level and sentence level information, and use the attention operations for aspect questions and documents alternatively with the multiple hop mechanism. Experimental results on the TripAdvisor and BeerAdvocate datasets show that our model outperforms classical baselines. We will release our code and data for the method replicability.", "keyphrases": ["multi-aspect sentiment classification", "machine comprehension problem", "pseudo question-answer pair", "rating"]} +{"id": "graehl-knight-2004-training", "title": "Training Tree Transducers", "abstract": "Many probabilistic models for natural language are now written in terms of hierarchical tree structure. Tree-based modeling still lacks many of the standard tools taken for granted in (\ufb01nite-state) string-based modeling. The theory of tree transducer automata provides a possible framework to draw on, as it has been worked out in an extensive literature. We motivate the use of tree transducers for natural language and address the training problem for probabilistic tree-to-tree and tree-to-string transducers", "keyphrases": ["tree transducer", "training problem", "machine translation", "mapping"]} +{"id": "song-etal-2017-amr", "title": "AMR-to-text Generation with Synchronous Node Replacement Grammar", "abstract": "This paper addresses the task of AMR-to-text generation by leveraging synchronous node replacement grammar. During training, graph-to-string rules are learned using a heuristic extraction algorithm. At test time, a graph transducer is applied to collapse input AMRs and generate output sentences. Evaluated on a standard benchmark, our method gives the state-of-the-art result.", "keyphrases": ["node replacement grammar", "input amr", "output sentence", "amr-to-text generation"]} +{"id": "lin-etal-2015-modeling", "title": "Modeling Relation Paths for Representation Learning of Knowledge Bases", "abstract": "Representation learning of knowledge bases aims to embed both entities and relations into a low-dimensional space. Most existing methods only consider direct relations in representation learning. We argue that multiple-step relation paths also contain rich inference patterns between entities, and propose a path-based representation learning model. This model considers relation paths as translations between entities for representation learning, and addresses two key challenges: (1) Since not all relation paths are reliable, we design a path-constraint resource allocation algorithm to measure the reliability of relation paths. (2) We represent relation paths via semantic composition of relation embeddings. Experimental results on real-world datasets show that, as compared with baselines, our model achieves significant and consistent improvements on knowledge base completion and relation extraction from text. The source code of this paper can be obtained from https://github.com/mrlyk423/ relation_extraction.", "keyphrases": ["relation path", "representation learning", "knowledge graph"]} +{"id": "dolan-brockett-2005-automatically", "title": "Automatically Constructing a Corpus of Sentential Paraphrases", "abstract": "An obstacle to research in automatic paraphrase identification and generation is the lack of large-scale, publiclyavailable labeled corpora of sentential paraphrases. This paper describes the creation of the recently-released Microsoft Research Paraphrase Corpus, which contains 5801 sentence pairs, each hand-labeled with a binary judgment as to whether the pair constitutes a paraphrase. The corpus was created using heuristic extraction techniques in conjunction with an SVM-based classifier to select likely sentence-level paraphrases from a large corpus of topicclustered news data. These pairs were then submitted to human judges, who confirmed that 67% were in fact semantically equivalent. In addition to describing the corpus itself, we explore a number of issues that arose in defining guidelines for the human raters.", "keyphrases": ["sentential paraphrase", "sentence pair", "heuristic extraction technique"]} +{"id": "keller-2010-cognitively", "title": "Cognitively Plausible Models of Human Language Processing", "abstract": "We pose the development of cognitively plausible models of human language processing as a challenge for computational linguistics. Existing models can only deal with isolated phenomena (e.g., garden paths) on small, specifically selected data sets. The challenge is to build models that integrate multiple aspects of human language processing at the syntactic, semantic, and discourse level. Like human language processing, these models should be incremental, predictive, broad coverage, and robust to noise. This challenge can only be met if standardized data sets and evaluation measures are developed.", "keyphrases": ["plausible model", "human language processing", "incrementality", "coverage"]} +{"id": "mcdonald-etal-2011-multi", "title": "Multi-Source Transfer of Delexicalized Dependency Parsers", "abstract": "We present a simple method for transferring dependency parsers from source languages with labeled training data to target languages without labeled training data. We first demonstrate that delexicalized parsers can be directly transferred between languages, producing significantly higher accuracies than unsupervised parsers. We then use a constraint driven learning algorithm where constraints are drawn from parallel corpora to project the final parser. Unlike previous work on projecting syntactic resources, we show that simple methods for introducing multiple source languages can significantly improve the overall quality of the resulting parsers. The projected parsers from our system result in state-of-the-art performance when compared to previously studied unsupervised and projected parsing systems across eight different languages.", "keyphrases": ["source language", "unsupervised parser", "multi-source transfer", "part-of-speech tag", "cross-lingual dependency"]} +{"id": "lau-etal-2014-learning", "title": "Learning Word Sense Distributions, Detecting Unattested Senses and Identifying Novel Senses Using Topic Models", "abstract": "Unsupervised word sense disambiguation (WSD) methods are an attractive approach to all-words WSD due to their non-reliance on expensive annotated data. Unsupervised estimates of sense frequency have been shown to be very useful for WSD due to the skewed nature of word sense distributions. This paper presents a fully unsupervised topic modelling-based approach to sense frequency estimation, which is highly portable to different corpora and sense inventories, in being applicable to any part of speech, and not requiring a hierarchical sense inventory, parsing or parallel text. We demonstrate the effectiveness of the method over the tasks of predominant sense learning and sense distribution acquisition, and also the novel tasks of detecting senses which aren\u2019t attested in the corpus, and identifying novel senses in the corpus which aren\u2019t captured in the sense inventory.", "keyphrases": ["novel sense", "estimation", "unsupervised topic"]} +{"id": "boltuzic-snajder-2014-back", "title": "Back up your Stance: Recognizing Arguments in Online Discussions", "abstract": "In online discussions, users often back up their stance with arguments. Their arguments are often vague, implicit, and poorly worded, yet they provide valuable insights into reasons underpinning users\u2019 opinions. In this paper, we make a first step towards argument-based opinion mining from online discussions and introduce a new task of argument recognition. We match usercreated comments to a set of predefined topic-based arguments, which can be either attacked or supported in the comment. We present a manually-annotated corpus for argument recognition in online discussions. We describe a supervised model based on comment-argument similarity and entailment features. Depending on problem formulation, model performance ranges from 70.5% to 81.8% F1-score, and decreases only marginally when applied to an unseen topic.", "keyphrases": ["stance", "online discussion", "argument mining", "textual entailment"]} +{"id": "kaji-kitsuregawa-2014-accurate", "title": "Accurate Word Segmentation and POS Tagging for Japanese Microblogs: Corpus Annotation and Joint Modeling with Lexical Normalization", "abstract": "Microblogs have recently received widespread interest from NLP researchers. However, current tools for Japanese word segmentation and POS tagging still perform poorly on microblog texts. We developed an annotated corpus and proposed a joint model for overcoming this situation. Our annotated corpus of microblog texts enables not only training of accurate statistical models but also quantitative evaluation of their performance. Our joint model with lexical normalization handles the orthographic diversity of microblog texts. We conducted an experiment to demonstrate that the corpus and model substantially contribute to boosting accuracy.", "keyphrases": ["pos tagging", "microblog", "lexical normalization"]} +{"id": "li-etal-2014-constructing", "title": "Constructing Information Networks Using One Single Model", "abstract": "In this paper, we propose a new framework that unifies the output of three information extraction (IE) tasks - entity mentions, relations and events as an information network representation, and extracts all of them using one single joint model based on structured prediction. This novel formulation allows different parts of the information network fully interact with each other. For example, many relations can now be considered as the resultant states of events. Our approach achieves substantial improvements over traditional pipelined approaches, and significantly advances state-of-the-art end-toend event argument extraction.", "keyphrases": ["information network", "entity mention", "joint model"]} +{"id": "xue-etal-2015-conll", "title": "The CoNLL-2015 Shared Task on Shallow Discourse Parsing", "abstract": "The CoNLL-2015 Shared Task is on Shallow Discourse Parsing, a task focusing on identifying individual discourse relations that are present in a natural language text. A discourse relation can be expressed explicitly or implicitly, and takes two arguments realized as sentences, clauses, or in some rare cases, phrases. Sixteen teams from three continents participated in this task. For the first time in the history of the CoNLL shared tasks, participating teams, instead of running their systems on the test set and submitting the output, were asked to deploy their systems on a remote virtual machine and use a web-based evaluation platform to run their systems on the test set. This meant they were unable to actually see the data set, thus preserving its integrity and ensuring its replicability. In this paper, we present the task definition, the training and test sets, and the evaluation protocol and metric used during this shared task. We also summarize the different approaches adopted by the participating teams, and present the evaluation results. The evaluation data sets and the scorer will serve as a benchmark for future research on shallow discourse parsing.", "keyphrases": ["conll-2015 shared task", "shallow discourse parsing", "individual discourse relation", "clause"]} +{"id": "shirani-etal-2020-semeval", "title": "SemEval-2020 Task 10: Emphasis Selection for Written Text in Visual Media", "abstract": "In this paper, we present the main findings and compare the results of SemEval-2020 Task 10, Emphasis Selection for Written Text in Visual Media. The goal of this shared task is to design automatic methods for emphasis selection, i.e. choosing candidates for emphasis in textual content to enable automated design assistance in authoring. The main focus is on short text instances for social media, with a variety of examples, from social media posts to inspirational quotes. Participants were asked to model emphasis using plain text with no additional context from the user or other design considerations. SemEval-2020 Emphasis Selection shared task attracted 197 participants in the early phase and a total of 31 teams made submissions to this task. The highest-ranked submission achieved 0.823 Matchm score. The analysis of systems submitted to the task indicates that BERT and RoBERTa were the most common choice of pre-trained models used, and part of speech tag (POS) was the most useful feature. Full results can be found on the task's website.", "keyphrases": ["emphasis selection", "visual media", "semeval-2020 task"]} +{"id": "fitzgerald-etal-2018-large", "title": "Large-Scale QA-SRL Parsing", "abstract": "We present a new large-scale corpus of Question-Answer driven Semantic Role Labeling (QA-SRL) annotations, and the first high-quality QA-SRL parser. Our corpus, QA-SRL Bank 2.0, consists of over 250,000 question-answer pairs for over 64,000 sentences across 3 domains and was gathered with a new crowd-sourcing scheme that we show has high precision and good recall at modest cost. We also present neural models for two QA-SRL subtasks: detecting argument spans for a predicate and generating questions to label the semantic relationship. The best models achieve question accuracy of 82.6% and span-level accuracy of 77.6% (under human evaluation) on the full pipelined QA-SRL prediction task. They can also, as we show, be used to gather additional annotations at low cost.", "keyphrases": ["qa-srl", "semantic role labeling", "scheme"]} +{"id": "alabau-etal-2014-casmacat", "title": "CASMACAT: A Computer-assisted Translation Workbench", "abstract": "CASMACAT is a modular, web-based translation workbench that offers advanced functionalities for computer-aided translation and the scientific study of human translation: automatic interaction with machine translation (MT) engines and translation memories (TM) to obtain raw translations or close TM matches for conventional post-editing; interactive translation prediction based on an MT engine\u2019s search graph, detailed recording and replay of edit actions and translator\u2019s gaze (the latter via eye-tracking), and the support of e-pen as an alternative input device. The system is open source sofware and interfaces with multiple MT systems.", "keyphrases": ["translation workbench", "e-pen", "interface", "casmacat"]} +{"id": "lamproudis-etal-2021-developing", "title": "Developing a Clinical Language Model for Swedish: Continued Pretraining of Generic BERT with In-Domain Data", "abstract": "The use of pretrained language models, fine-tuned to perform a specific downstream task, has become widespread in NLP. Using a generic language model in specialized domains may, however, be sub-optimal due to differences in language use and vocabulary. In this paper, it is investigated whether an existing, generic language model for Swedish can be improved for the clinical domain through continued pretraining with clinical text. The generic and domain-specific language models are fine-tuned and evaluated on three representative clinical NLP tasks: (i) identifying protected health information, (ii) assigning ICD-10 diagnosis codes to discharge summaries, and (iii) sentence-level uncertainty prediction. The results show that continued pretraining on in-domain data leads to improved performance on all three downstream tasks, indicating that there is a potential added value of domain-specific language models for clinical NLP.", "keyphrases": ["swedish", "in-domain data", "clinical domain"]} +{"id": "lin-xu-2019-deep", "title": "Deep Unknown Intent Detection with Margin Loss", "abstract": "Identifying the unknown (novel) user intents that have never appeared in the training set is a challenging task in the dialogue system. In this paper, we present a two-stage method for detecting unknown intents. We use bidirectional long short-term memory (BiLSTM) network with the margin loss as the feature extractor. With margin loss, we can learn discriminative deep features by forcing the network to maximize inter-class variance and to minimize intra-class variance. Then, we feed the feature vectors to the density-based novelty detection algorithm, local outlier factor (LOF), to detect unknown intents. Experiments on two benchmark datasets show that our method can yield consistent improvements compared with the baseline methods.", "keyphrases": ["intent detection", "margin loss", "discriminative deep feature", "ood detection", "unknown class"]} +{"id": "kumar-talukdar-2020-nile", "title": "NILE : Natural Language Inference with Faithful Natural Language Explanations", "abstract": "The recent growth in the popularity and success of deep learning models on NLP classification tasks has accompanied the need for generating some form of natural language explanation of the predicted labels. Such generated natural language (NL) explanations are expected to be faithful, i.e., they should correlate well with the model's internal decision making. In this work, we focus on the task of natural language inference (NLI) and address the following question: can we build NLI systems which produce labels with high accuracy, while also generating faithful explanations of its decisions? We propose Natural-language Inference over Label-specific Explanations (NILE), a novel NLI method which utilizes auto-generated label-specific NL explanations to produce labels along with its faithful explanation. We demonstrate NILE's effectiveness over previously reported methods through automated and human evaluation of the produced labels and explanations. Our evaluation of NILE also supports the claim that accurate systems capable of providing testable explanations of their decisions can be designed. We discuss the faithfulness of NILE's explanations in terms of sensitivity of the decisions to the corresponding explanations. We argue that explicit evaluation of faithfulness, in addition to label and explanation accuracy, is an important step in evaluating model's explanations. Further, we demonstrate that task-specific probes are necessary to establish such sensitivity.", "keyphrases": ["natural language inference", "decision making", "faithful explanation", "nile"]} +{"id": "zampieri-etal-2019-predicting", "title": "Predicting the Type and Target of Offensive Posts in Social Media", "abstract": "As offensive content has become pervasive in social media, there has been much research in identifying potentially offensive messages. However, previous work on this topic did not consider the problem as a whole, but rather focused on detecting very specific types of offensive content, e.g., hate speech, cyberbulling, or cyber-aggression. In contrast, here we target several different kinds of offensive content. In particular, we model the task hierarchically, identifying the type and the target of offensive messages in social media. For this purpose, we complied the Offensive Language Identification Dataset (OLID), a new dataset with tweets annotated for offensive content using a fine-grained three-layer annotation scheme, which we make publicly available. We discuss the main similarities and differences between OLID and pre-existing datasets for hate speech identification, aggression detection, and similar tasks. We further experiment with and we compare the performance of different machine learning models on OLID.", "keyphrases": ["offensive language", "social medium", "twitter post"]} +{"id": "mimno-etal-2011-optimizing", "title": "Optimizing Semantic Coherence in Topic Models", "abstract": "Latent variable models have the potential to add value to large document collections by discovering interpretable, low-dimensional subspaces. In order for people to use such models, however, they must trust them. Unfortunately, typical dimensionality reduction methods for text, such as latent Dirichlet allocation, often produce low-dimensional subspaces (topics) that are obviously flawed to human domain experts. The contributions of this paper are threefold: (1) An analysis of the ways in which topics can be flawed; (2) an automated evaluation metric for identifying such topics that does not rely on human annotators or reference collections outside the training data; (3) a novel statistical topic model based on this metric that significantly improves topic quality in a large-scale document collection from the National Institutes of Health (NIH).", "keyphrases": ["semantic coherence", "topic model", "co-document frequency", "pmi"]} +{"id": "dziri-etal-2019-evaluating", "title": "Evaluating Coherence in Dialogue Systems using Entailment", "abstract": "Evaluating open-domain dialogue systems is difficult due to the diversity of possible correct answers. Automatic metrics such as BLEU correlate weakly with human annotations, resulting in a significant bias across different models and datasets. Some researchers resort to human judgment experimentation for assessing response quality, which is expensive, time consuming, and not scalable. Moreover, judges tend to evaluate a small number of dialogues, meaning that minor differences in evaluation configuration may lead to dissimilar results. In this paper, we present interpretable metrics for evaluating topic coherence by making use of distributed sentence representations. Furthermore, we introduce calculable approximations of human judgment based on conversational coherence by adopting state-of-the-art entailment techniques. Results show that our metrics can be used as a surrogate for human judgment, making it easy to evaluate dialogue systems on large-scale datasets and allowing an unbiased estimate for the quality of the responses.", "keyphrases": ["coherence", "dialogue system", "entailment"]} +{"id": "hu-etal-2019-texar", "title": "Texar: A Modularized, Versatile, and Extensible Toolkit for Text Generation", "abstract": "We introduce Texar, an open-source toolkit aiming to support the broad set of text generation tasks that transform any inputs into natural language, such as machine translation, summarization, dialog, content manipulation, and so forth. With the design goals of modularity, versatility, and extensibility in mind, Texar extracts common patterns underlying the diverse tasks and methodologies, creates a library of highly reusable modules and functionalities, and allows arbitrary model architectures and algorithmic paradigms. In Texar, model architecture, inference, and learning processes are properly decomposed. Modules at a high concept level can be freely assembled or plugged in/swapped out. Texar is thus particularly suitable for researchers and practitioners to do fast prototyping and experimentation. The versatile toolkit also fosters technique sharing across different text generation tasks. Texar supports both TensorFlow and PyTorch, and is released under Apache License 2.0 at .", "keyphrases": ["versatility", "text generation task", "texar"]} +{"id": "kurita-sogaard-2019-multi", "title": "Multi-Task Semantic Dependency Parsing with Policy Gradient for Learning Easy-First Strategies", "abstract": "In Semantic Dependency Parsing (SDP), semantic relations form directed acyclic graphs, rather than trees. We propose a new iterative predicate selection (IPS) algorithm for SDP. Our IPS algorithm combines the graph-based and transition-based parsing approaches in order to handle multiple semantic head words. We train the IPS model using a combination of multi-task learning and task-specific policy gradient training. Trained this way, IPS achieves a new state of the art on the SemEval 2015 Task 18 datasets. Furthermore, we observe that policy gradient training learns an easy-first strategy.", "keyphrases": ["semantic dependency parsing", "policy gradient", "easy-first strategy"]} +{"id": "cao-etal-2019-multi", "title": "Multi-Channel Graph Neural Network for Entity Alignment", "abstract": "Entity alignment typically suffers from the issues of structural heterogeneity and limited seed alignments. In this paper, we propose a novel Multi-channel Graph Neural Network model (MuGNN) to learn alignment-oriented knowledge graph (KG) embeddings by robustly encoding two KGs via multiple channels. Each channel encodes KGs via different relation weighting schemes with respect to self-attention towards KG completion and cross-KG attention for pruning exclusive entities respectively, which are further combined via pooling techniques. Moreover, we also infer and transfer rule knowledge for completing two KGs consistently. MuGNN is expected to reconcile the structural differences of two KGs, and thus make better use of seed alignments. Extensive experiments on five publicly available datasets demonstrate our superior performance (5% Hits@1 up on average). Source code and data used in the experiments can be accessed at .", "keyphrases": ["graph neural network", "entity alignment", "mugnn"]} +{"id": "yang-eisenstein-2017-overcoming", "title": "Overcoming Language Variation in Sentiment Analysis with Social Attention", "abstract": "Variation in language is ubiquitous, particularly in newer forms of writing such as social media. Fortunately, variation is not random; it is often linked to social properties of the author. In this paper, we show how to exploit social networks to make sentiment analysis more robust to social language variation. The key idea is linguistic homophily: the tendency of socially linked individuals to use language in similar ways. We formalize this idea in a novel attention-based neural network architecture, in which attention is divided among several basis models, depending on the author's position in the social network. This has the effect of smoothing the classification function across the social network, and makes it possible to induce personalized classifiers even for authors for whom there is no labeled data or demographic metadata. This model significantly improves the accuracies of sentiment analysis on Twitter and on review data.", "keyphrases": ["sentiment analysis", "text classification", "social medium"]} +{"id": "schwenk-etal-2021-wikimatrix", "title": "WikiMatrix: Mining 135M Parallel Sentences in 1620 Language Pairs from Wikipedia", "abstract": "We present an approach based on multilingual sentence embeddings to automatically extract parallel sentences from the content of Wikipedia articles in 96 languages, including several dialects or low-resource languages. We do not limit the extraction process to alignments with English, but we systematically consider all possible language pairs. In total, we are able to extract 135M parallel sentences for 16720 different language pairs, out of which only 34M are aligned with English. This corpus is freely available. To get an indication on the quality of the extracted bitexts, we train neural MT baseline systems on the mined data only for 1886 languages pairs, and evaluate them on the TED corpus, achieving strong BLEU scores for many language pairs. The WikiMatrix bitexts seem to be particularly interesting to train MT systems between distant languages without the need to pivot through English.", "keyphrases": ["mining", "parallel sentence", "wikipedia", "low-resource language", "parallel data"]} +{"id": "li-etal-2019-deep", "title": "Deep Reinforcement Learning with Distributional Semantic Rewards for Abstractive Summarization", "abstract": "Deep reinforcement learning (RL) has been a commonly-used strategy for the abstractive summarization task to address both the exposure bias and non-differentiable task issues. However, the conventional reward Rouge-L simply looks for exact n-grams matches between candidates and annotated references, which inevitably makes the generated sentences repetitive and incoherent. In this paper, instead of Rouge-L, we explore the practicability of utilizing the distributional semantics to measure the matching degrees. With distributional semantics, sentence-level evaluation can be obtained, and semantically-correct phrases can also be generated without being limited to the surface form of the reference sentences. Human judgments on Gigaword and CNN/Daily Mail datasets show that our proposed distributional semantics reward (DSR) has distinct superiority in capturing the lexical and compositional diversity of natural language.", "keyphrases": ["reward", "abstractive summarization", "deep reinforcement learning"]} +{"id": "rai-etal-2016-supervised", "title": "Supervised Metaphor Detection using Conditional Random Fields", "abstract": "In this paper, we propose a novel approach for supervised classification of linguistic metaphors in an open domain text using Conditional Random Fields (CRF). We analyze CRF based classification model for metaphor detection using syntactic, conceptual, affective, and word embeddings based features which are extracted from MRC Psycholinguistic Database (MRCPD) and WordNet-Affect. We use word embeddings given by Huang et al. to capture information such as coherence and analogy between words. To tackle the bottleneck of limited coverage of psychological features in MRCPD, we employ synonymy relations from WordNet \u00ae . A comparison of our approach with previous approaches shows the efficacy of CRF classifier in detecting metaphors. The experiments conducted on VU Amsterdam metaphor corpus provides an accuracy of more than 92% and Fmeasure of approximately 78%. Results shows that inclusion of conceptual features improves the recall by 5% whereas affective features do not have any major impact on metaphor detection in open text.", "keyphrases": ["metaphor detection", "conditional random fields", "open domain text"]} +{"id": "hao-etal-2019-multi", "title": "Multi-Granularity Self-Attention for Neural Machine Translation", "abstract": "Current state-of-the-art neural machine translation (NMT) uses a deep multi-head self-attention network with no explicit phrase information. However, prior work on statistical machine translation has shown that extending the basic translation unit from words to phrases has produced substantial improvements, suggesting the possibility of improving NMT performance from explicit modeling of phrases. In this work, we present multi-granularity self-attention (Mg-Sa): a neural network that combines multi-head self-attention and phrase modeling. Specifically, we train several attention heads to attend to phrases in either n-gram or syntactic formalisms. Moreover, we exploit interactions among phrases to enhance the strength of structure modeling \u2013 a commonly-cited weakness of self-attention. Experimental results on WMT14 English-to-German and NIST Chinese-to-English translation tasks show the proposed approach consistently improves performance. Targeted linguistic analysis reveal that Mg-Sa indeed captures useful phrase information at various levels of granularities.", "keyphrases": ["neural machine translation", "head", "multi-granularity self-attention"]} +{"id": "xu-etal-2020-clue", "title": "CLUE: A Chinese Language Understanding Evaluation Benchmark", "abstract": "The advent of natural language understanding (NLU) benchmarks for English, such as GLUE and SuperGLUE allows new NLU models to be evaluated across a diverse set of tasks. These comprehensive benchmarks have facilitated a broad range of research and applications in natural language processing (NLP). The problem, however, is that most such benchmarks are limited to English, which has made it difficult to replicate many of the successes in English NLU for other languages. To help remedy this issue, we introduce the first large-scale Chinese Language Understanding Evaluation (CLUE) benchmark. CLUE is an open-ended, community-driven project that brings together 9 tasks spanning several well-established single-sentence/sentence-pair classification tasks, as well as machine reading comprehension, all on original Chinese text. To establish results on these tasks, we report scores using an exhaustive set of current state-of-the-art pre-trained Chinese models (9 in total). We also introduce a number of supplementary datasets and additional tools to help facilitate further progress on Chinese NLU. Our benchmark is released at ", "keyphrases": ["other language", "clue", "pretraining"]} +{"id": "ji-lin-2009-gender", "title": "Gender and Animacy Knowledge Discovery from Web-Scale N-Grams for Unsupervised Person Mention Detection", "abstract": "In this paper we present a simple approach to discover gender and animacy knowledge for person mention detection. We learn noun-gender and noun-animacy pair counts from web-scale n-grams using specific lexical patterns, and then apply confidence estimation metrics to filter noise. The selected informative pairs are then used to detect person mentions from raw texts in an unsupervised learning framework. Experiments showed that this approach can achieve high performance comparable to state-of-the-art supervised learning methods which require manually annotated corpora and gazetteers.", "keyphrases": ["web-scale n-gram", "person mention detection", "gender"]} +{"id": "inui-etal-2003-text", "title": "Text Simplification for Reading Assistance: A Project Note", "abstract": "This paper describes our ongoing research project on text simplification for congenitally deaf people. Text simplification we are aiming at is the task of offering a deaf reader a syntactic and lexical paraphrase of a given text for assisting her/him to understand what it means. In this paper, we discuss the issues we should address to realize text simplification and report on the present results in three different aspects of this task: readability assessment, paraphrase representation and post-transfer error detection.", "keyphrases": ["aim", "paraphrase", "text simplification"]} +{"id": "peters-etal-2017-semi", "title": "Semi-supervised sequence tagging with bidirectional language models", "abstract": "Pre-trained word embeddings learned from unlabeled text have become a standard component of neural network architectures for NLP tasks. However, in most cases, the recurrent network that operates on word-level representations to produce context sensitive representations is trained on relatively little labeled data. In this paper, we demonstrate a general semi-supervised approach for adding pretrained context embeddings from bidirectional language models to NLP systems and apply it to sequence labeling tasks. We evaluate our model on two standard datasets for named entity recognition (NER) and chunking, and in both cases achieve state of the art results, surpassing previous systems that use other forms of transfer or joint learning with additional labeled data and task specific gazetteers.", "keyphrases": ["language model", "entity recognition", "semi-supervised sequence", "advance", "many nlp task"]} +{"id": "louis-nenkova-2013-makes", "title": "What Makes Writing Great? First Experiments on Article Quality Prediction in the Science Journalism Domain", "abstract": "Great writing is rare and highly admired. Readers seek out articles that are beautifully written, informative and entertaining. Yet information-access technologies lack capabilities for predicting article quality at this level. In this paper we present first experiments on article quality prediction in the science journalism domain. We introduce a corpus of great pieces of science journalism, along with typical articles from the genre. We implement features to capture aspects of great writing, including surprising, visual and emotional content, as well as general features related to discourse organization and sentence structure. We show that the distinction between great and typical articles can be detected fairly accurately, and that the entire spectrum of our features contribute to the distinction.", "keyphrases": ["article quality", "science journalism domain", "great writing"]} +{"id": "van-hee-etal-2015-detection", "title": "Detection and Fine-Grained Classification of Cyberbullying Events", "abstract": "In the current era of online interactions, both positive and negative experiences are abundant on the Web. As in real life, negative experiences can have a serious impact on youngsters. Recent studies have reported cybervictimization rates among teenagers that vary between 20% and 40%. In this paper, we focus on cyberbullying as a particular form of cybervictimization and explore its automatic detection and fine-grained classification. Data containing cyberbullying was collected from the social networking site Ask.fm. We developed and applied a new scheme for cyberbullying annotation, which describes the presence and severity of cyberbullying, a post author's role (harasser, victim or bystander) and a number of fine-grained categories related to cyberbullying, such as insults and threats. We present experimental results on the automatic detection of cyberbullying and explore the feasibility of detecting the more fine-grained cyberbullying categories in online posts. For the first task, an F-score of 55.39% is obtained. We observe that the detection of the fine-grained categories (e.g. threats) is more challenging, presumably due to data sparsity, and because they are often expressed in a subtle and implicit way.", "keyphrases": ["fine-grained classification", "cyberbullying", "victim"]} +{"id": "shu-etal-2019-generating", "title": "Generating Diverse Translations with Sentence Codes", "abstract": "Users of machine translation systems may desire to obtain multiple candidates translated in different ways. In this work, we attempt to obtain diverse translations by using sentence codes to condition the sentence generation. We describe two methods to extract the codes, either with or without the help of syntax information. For diverse generation, we sample multiple candidates, each of which conditioned on a unique code. Experiments show that the sampled translations have much higher diversity scores when using reasonable sentence codes, where the translation quality is still on par with the baselines even under strong constraint imposed by the codes. In qualitative analysis, we show that our method is able to generate paraphrase translations with drastically different structures. The proposed approach can be easily adopted to existing translation systems as no modification to the model is required.", "keyphrases": ["diversity", "sentence code", "inference time", "bleu score"]} +{"id": "yan-etal-2021-consert", "title": "ConSERT: A Contrastive Framework for Self-Supervised Sentence Representation Transfer", "abstract": "Learning high-quality sentence representations benefits a wide range of natural language processing tasks. Though BERT-based pre-trained language models achieve high performance on many downstream tasks, the native derived sentence representations are proved to be collapsed and thus produce a poor performance on the semantic textual similarity (STS) tasks. In this paper, we present ConSERT, a Contrastive Framework for Self-Supervised SEntence Representation Transfer, that adopts contrastive learning to fine-tune BERT in an unsupervised and effective way. By making use of unlabeled texts, ConSERT solves the collapse issue of BERT-derived sentence representations and make them more applicable for downstream tasks. Experiments on STS datasets demonstrate that ConSERT achieves an 8% relative improvement over the previous state-of-the-art, even comparable to the supervised SBERT-NLI. And when further incorporating NLI supervision, we achieve new state-of-the-art performance on STS tasks. Moreover, ConSERT obtains comparable results with only 1000 samples available, showing its robustness in data scarcity scenarios.", "keyphrases": ["contrastive framework", "sentence representation", "pre-trained language model", "consert", "data augmentation strategy"]} +{"id": "li-etal-2012-active", "title": "Active Learning for Chinese Word Segmentation", "abstract": "Currently, the best performing models for Chinese word segmentation (CWS) are extremely resource intensive in terms of annotation data quanti ty. One promising solution to minimize the cost of data acquisition is active learning, which aims to actively select the most useful instances to annotate for learning. Active learning on CWS, h owever, remains challenging due to its inherent nature. In this paper, we propose a Word Bounda ry Annotation (WBA) model to make effective active learning on CWS possible. This is achie ved by annotating only those uncertain boundaries. In this way, the manual annotation cost is l argely reduced, compared to annotating the whole character sequence. To further minimize the a nnotation effort, a diversity measurement among the instances is considered to avoid duplicat e annotation. Experimental results show that employing the WBA model and the diversity measurement into active learning on CWS can save much annotation cost with little loss in the perfor mance.", "keyphrases": ["chinese word segmentation", "annotation cost", "active learning"]} +{"id": "poesio-artstein-2008-anaphoric", "title": "Anaphoric Annotation in the ARRAU Corpus", "abstract": "Arrau is a new corpus annotated for anaphoric relations, with information about agreement and explicit representation of multiple antecedents for ambiguous anaphoric expressions and discourse antecedents for expressions which refer to abstract entities such as events, actions and plans. The corpus contains texts from different genres: task-oriented dialogues from the Trains-91 and Trains-93 corpus, narratives from the English Pear Stories corpus, newspaper articles from the Wall Street Journal portion of the Penn Treebank, and mixed text from the Gnome corpus.", "keyphrases": ["arrau corpus", "antecedent", "task-oriented dialogue", "anaphora", "discourse deixis"]} +{"id": "yin-roth-2018-twowingos", "title": "TwoWingOS: A Two-Wing Optimization Strategy for Evidential Claim Verification", "abstract": "Determining whether a given claim is supported by evidence is a fundamental NLP problem that is best modeled as Textual Entailment. However, given a large collection of text, finding evidence that could support or refute a given claim is a challenge in itself, amplified by the fact that different evidence might be needed to support or refute a claim. Nevertheless, most prior work decouples evidence finding from determining the truth value of the claim given the evidence. We propose to consider these two aspects jointly. We develop TwoWingOS (two-wing optimization strategy), a system that, while identifying appropriate evidence for a claim, also determines whether or not the claim is supported by the evidence. Given the claim, TwoWingOS attempts to identify a subset of the evidence candidates; given the predicted evidence, it then attempts to determine the truth value of the corresponding claim entailment problem. We treat this problem as coupled optimization problems, training a joint model for it. TwoWingOS offers two advantages: (i) Unlike pipeline systems it facilitates flexible-size evidence set, and (ii) Joint training improves both the claim entailment and the evidence identification. Experiments on a benchmark dataset show state-of-the-art performance.", "keyphrases": ["optimization strategy", "claim", "evidence identification", "twowingos"]} +{"id": "li-hovy-2014-model", "title": "A Model of Coherence Based on Distributed Sentence Representation", "abstract": "Coherence is what makes a multi-sentence text meaningful, both logically and syntactically. To solve the challenge of ordering a set of sentences into coherent order, existing approaches focus mostly on defining and using sophisticated features to capture the cross-sentence argumentation logic and syntactic relationships. But both argumentation semantics and crosssentence syntax (such as coreference and tense rules) are very hard to formalize. In this paper, we introduce a neural network model for the coherence task based on distributed sentence representation. The proposed approach learns a syntacticosemantic representation for sentences automatically, using either recurrent or recursive neural networks. The architecture obviated the need for feature engineering, and learns sentence representations, which are to some extent able to capture the \u2018rules\u2019 governing coherent sentence structure. The proposed approach outperforms existing baselines and generates the stateof-art performance in standard coherence evaluation tasks 1 .", "keyphrases": ["coherence", "sentence representation", "network model", "recursive neural network", "readability assessment"]} +{"id": "xue-palmer-2004-calibrating", "title": "Calibrating Features for Semantic Role Labeling", "abstract": "This paper takes a critical look at the features used in the semantic role tagging literature and show that the information in the input, generally a syntactic parse tree, has yet to be fully exploited. We propose an additional set of features and our experiments show that these features lead to fairly significant improvements in the tasks we performed. We further show that different features are needed for different subtasks. Finally, we show that by using a Maximum Entropy classifier and fewer features, we achieved results comparable with the best previously reported results obtained with SVM models. We believe this is a clear indication that developing features that capture the right kind of information is crucial to advancing the stateof-the-art in semantic analysis.", "keyphrases": ["semantic role labeling", "maximum entropy classifier", "srl", "candidate", "argument identification"]} +{"id": "mairesse-walker-2011-controlling", "title": "Controlling User Perceptions of Linguistic Style: Trainable Generation of Personality Traits", "abstract": "Recent work in natural language generation has begun to take linguistic variation into account, developing algorithms that are capable of modifying the system's linguistic style based either on the user's linguistic style or other factors, such as personality or politeness. While stylistic control has traditionally relied on handcrafted rules, statistical methods are likely to be needed for generation systems to scale to the production of the large range of variation observed in human dialogues. Previous work on statistical natural language generation (SNLG) has shown that the grammaticality and naturalness of generated utterances can be optimized from data; however these data-driven methods have not been shown to produce stylistic variation that is perceived by humans in the way that the system intended. This paper describes Personage, a highly parameterizable language generator whose parameters are based on psychological findings about the linguistic reflexes of personality. We present a novel SNLG method which uses parameter estimation models trained on personality-annotated data to predict the generation decisions required to convey any combination of scalar values along the five main dimensions of personality. A human evaluation shows that parameter estimation models produce recognizable stylistic variation along multiple dimensions, on a continuous scale, and without the computational cost incurred by overgeneration techniques.", "keyphrases": ["linguistic style", "personality trait", "parameterizable language generator"]} +{"id": "romanov-etal-2019-adversarial", "title": "Adversarial Decomposition of Text Representation", "abstract": "In this paper, we present a method for adversarial decomposition of text representation. This method can be used to decompose a representation of an input sentence into several independent vectors, each of them responsible for a specific aspect of the input sentence. We evaluate the proposed method on two case studies: the conversion between different social registers and diachronic language change. We show that the proposed method is capable of fine-grained controlled change of these aspects of the input sentence. It is also learning a continuous (rather than categorical) representation of the style of the sentence, which is more linguistically realistic. The model uses adversarial-motivational training and includes a special motivational loss, which acts opposite to the discriminator and encourages a better decomposition. Furthermore, we evaluate the obtained meaning embeddings on a downstream task of paraphrase detection and show that they significantly outperform the embeddings of a regular autoencoder.", "keyphrases": ["decomposition", "text representation", "adversarial-motivational training", "special motivational loss", "discriminator"]} +{"id": "zhang-etal-2021-bstc", "title": "BSTC: A Large-Scale Chinese-English Speech Translation Dataset", "abstract": "This paper presents BSTC (Baidu Speech Translation Corpus), a large-scale Chinese-English speech translation dataset. This dataset is constructed based on a collection of licensed videos of talks or lectures, including about 68 hours of Mandarin data, their manual transcripts and translations into English, as well as automated transcripts by an automatic speech recognition (ASR) model. We have further asked three experienced interpreters to simultaneously interpret the testing talks in a mock conference setting. This corpus is expected to promote the research of automatic simultaneous translation as well as the development of practical systems. We have organized simultaneous translation tasks and used this corpus to evaluate automatic simultaneous translation systems.", "keyphrases": ["speech translation dataset", "hour", "bstc"]} +{"id": "lee-etal-2015-overview", "title": "Overview of the NLP-TEA 2015 Shared Task for Chinese Grammatical Error Diagnosis", "abstract": "This paper introduces the NLP-TEA 2015 shared task for Chinese grammatical error diagnosis. We describe the task, data preparation, performance metrics, and evaluation results. The hope is that such an evaluation campaign may produce more advanced Chinese grammatical error diagnosis techniques. All data sets with gold standards and evaluation tools are publicly available for research purposes.", "keyphrases": ["nlp-tea", "shared task", "grammatical error diagnosis"]} +{"id": "ren-etal-2019-generating", "title": "Generating Natural Language Adversarial Examples through Probability Weighted Word Saliency", "abstract": "We address the problem of adversarial attacks on text classification, which is rarely studied comparing to attacks on image classification. The challenge of this task is to generate adversarial examples that maintain lexical correctness, grammatical correctness and semantic similarity. Based on the synonyms substitution strategy, we introduce a new word replacement order determined by both the word saliency and the classification probability, and propose a greedy algorithm called probability weighted word saliency (PWWS) for text adversarial attack. Experiments on three popular datasets using convolutional as well as LSTM models show that PWWS reduces the classification accuracy to the most extent, and keeps a very low word substitution rate. A human evaluation study shows that our generated adversarial examples maintain the semantic similarity well and are hard for humans to perceive. Performing adversarial training using our perturbed datasets improves the robustness of the models. At last, our method also exhibits a good transferability on the generated adversarial examples.", "keyphrases": ["adversarial example", "word saliency", "grammatical correctness", "greedy algorithm", "perturbation"]} +{"id": "schulte-im-walde-etal-2016-role", "title": "The Role of Modifier and Head Properties in Predicting the Compositionality of English and German Noun-Noun Compounds: A Vector-Space Perspective", "abstract": "In this paper, we explore the role of constituent properties in English and German noun-noun compounds (corpus frequencies of the compounds and their constituents; productivity and ambiguity of the constituents; and semantic relations between the constituents), when predicting the degrees of compositionality of the compounds within a vector space model. The results demonstrate that the empirical and semantic properties of the compounds and the head nouns play a significant role.", "keyphrases": ["modifier", "compositionality", "noun-noun compound"]} +{"id": "pampari-etal-2018-emrqa", "title": "emrQA: A Large Corpus for Question Answering on Electronic Medical Records", "abstract": "We propose a novel methodology to generate domain-specific large-scale question answering (QA) datasets by re-purposing existing annotations for other NLP tasks. We demonstrate an instance of this methodology in generating a large-scale QA dataset for electronic medical records by leveraging existing expert annotations on clinical notes for various NLP tasks from the community shared i2b2 datasets. The resulting corpus (emrQA) has 1 million questions-logical form and 400,000+ question-answer evidence pairs. We characterize the dataset and explore its learning potential by training baseline models for question to logical form and question to answer mapping.", "keyphrases": ["record", "emrqa", "domain knowledge", "question-logical form pair", "clinical question"]} +{"id": "li-etal-2020-unified", "title": "A Unified MRC Framework for Named Entity Recognition", "abstract": "The task of named entity recognition (NER) is normally divided into nested NER and flat NER depending on whether named entities are nested or not. Models are usually separately developed for the two tasks, since sequence labeling models, the most widely used backbone for flat NER, are only able to assign a single label to a particular token, which is unsuitable for nested NER where a token may be assigned several labels. In this paper, we propose a unified framework that is capable of handling both flat and nested NER tasks. Instead of treating the task of NER as a sequence labeling problem, we propose to formulate it as a machine reading comprehension (MRC) task. For example, extracting entities with the per label is formalized as extracting answer spans to the question \u201cwhich person is mentioned in the text\u201d.This formulation naturally tackles the entity overlapping issue in nested NER: the extraction of two overlapping entities with different categories requires answering two independent questions. Additionally, since the query encodes informative prior knowledge, this strategy facilitates the process of entity extraction, leading to better performances for not only nested NER, but flat NER. We conduct experiments on both nested and flat NER datasets.Experiment results demonstrate the effectiveness of the proposed formulation. We are able to achieve a vast amount of performance boost over current SOTA models on nested NER datasets, i.e., +1.28, +2.55, +5.44, +6.37,respectively on ACE04, ACE05, GENIA and KBP17, along with SOTA results on flat NER datasets, i.e., +0.24, +1.95, +0.21, +1.49 respectively on English CoNLL 2003, English OntoNotes 5.0, Chinese MSRA and Chinese OntoNotes 4.0.", "keyphrases": ["mrc", "entity recognition", "nested ner task", "machine reading comprehension", "information extraction"]} +{"id": "song-etal-2019-semantic", "title": "Semantic Neural Machine Translation Using AMR", "abstract": "It is intuitive that semantic representations can be useful for machine translation, mainly because they can help in enforcing meaning preservation and handling data sparsity (many sentences correspond to one meaning) of machine translation models. On the other hand, little work has been done on leveraging semantics for neural machine translation (NMT). In this work, we study the usefulness of AMR (abstract meaning representation) on NMT. Experiments on a standard English-to-German dataset show that incorporating AMR as additional knowledge can significantly improve a strong attention-based sequence-to-sequence neural translation model.", "keyphrases": ["usefulness", "amr", "neural translation model"]} +{"id": "martindale-carpuat-2018-fluency", "title": "Fluency Over Adequacy: A Pilot Study in Measuring User Trust in Imperfect MT", "abstract": "Although measuring intrinsic quality has been a key factor in the advancement of Machine Translation (MT), successfully deploying MT requires considering not just intrinsic quality but also the user experience, including aspects such as trust. This work introduces a method of studying how users modulate their trust in an MT system after seeing errorful (disfluent or inadequate) output amidst good (fluent and adequate) output. We conduct a survey to determine how users respond to good translations compared to translations that are either adequate but not fluent, or fluent but not adequate. In this pilot study, users responded strongly to disfluent translations, but were, surprisingly, much less concerned with adequacy.", "keyphrases": ["adequacy", "trust", "fluency"]} +{"id": "liu-etal-2010-semantic", "title": "Semantic Role Labeling for News Tweets", "abstract": "News tweets that report what is happening have become an important real-time information source. We raise the problem of Semantic Role Labeling (SRL) for news tweets, which is meaningful for fine grained information extraction and retrieval. We present a self-supervised learning approach to train a domain specific SRL system to resolve the problem. A large volume of training data is automatically labeled, by leveraging the existing SRL system on news domain and content similarity between news and news tweets. On a human annotated test set, our system achieves state-of-the-art performance, outperforming the SRL system trained on news.", "keyphrases": ["news tweet", "information extraction", "semantic role labeling"]} +{"id": "tratz-hovy-2010-isi", "title": "ISI: Automatic Classification of Relations Between Nominals Using a Maximum Entropy Classifier", "abstract": "The automatic interpretation of semantic relations between nominals is an important subproblem within natural language understanding applications and is an area of increasing interest. In this paper, we present the system we used to participate in the SemEval 2010 Task 8 Multi-Way Classification of Semantic Relations between Pairs of Nominals. Our system, based upon a Maximum Entropy classifier trained using a large number of boolean features, received the third highest score.", "keyphrases": ["nominals", "maximum entropy classifier", "semantic relation"]} +{"id": "tiedemann-scherrer-2017-neural", "title": "Neural Machine Translation with Extended Context", "abstract": "We investigate the use of extended context in attention-based neural machine translation. We base our experiments on translated movie subtitles and discuss the effect of increasing the segments beyond single translation units. We study the use of extended source language context as well as bilingual context extensions. The models learn to distinguish between information from different segments and are surprisingly robust with respect to translation quality. In this pilot study, we observe interesting cross-sentential attention patterns that improve textual coherence in translation at least in some selected cases.", "keyphrases": ["extended context", "translation quality", "coherence", "neural machine translation", "consecutive sentence"]} +{"id": "smith-etal-2005-context", "title": "Context-Based Morphological Disambiguation with Random Fields", "abstract": "Finite-state approaches have been highly successful at describing the morphological processes of many languages. Such approaches have largely focused on modeling the phone- or character-level processes that generate candidate lexical types, rather than tokens in context. For the full analysis of words in context, disambiguation is also required (Hakkani-Tur et al., 2000; Hajic et al., 2001). In this paper, we apply a novel source-channel model to the problem of morphological disambiguation (segmentation into morphemes, lemmatization, and POS tagging) for concatenative, templatic, and inflectional languages. The channel model exploits an existing morphological dictionary, constraining each word's analysis to be linguistically valid. The source model is a factored, conditionally-estimated random field (Lafferty et al., 2001) that learns to disambiguate the full sentence by modeling local contexts. Compared with baseline state-of-the-art methods, our method achieves statistically significant error rate reductions on Korean, Arabic, and Czech, for various training set sizes and accuracy measures.", "keyphrases": ["morphological disambiguation", "random field", "pos tagging"]} +{"id": "klein-manning-2003-accurate", "title": "Accurate Unlexicalized Parsing", "abstract": "We demonstrate that an unlexicalized PCFG can parse much more accurately than previously shown, by making use of simple, linguistically motivated state splits, which break down false independence assumptions latent in a vanilla treebank grammar. Indeed, its performance of 86.36% (LP/LR F1) is better than that of early lexicalized PCFG models, and surprisingly close to the current state-of-the-art. This result has potential uses beyond establishing a strong lower bound on the maximum possible accuracy of unlexicalized models: an unlexicalized PCFG is much more compact, easier to replicate, and easier to interpret than more complex lexical models, and the parsing algorithms are simpler, more widely understood, of lower asymptotic complexity, and easier to optimize.", "keyphrases": ["unlexicalized pcfg", "pcfg", "stanford parser", "symbol", "refinement"]} +{"id": "wang-manning-2012-baselines", "title": "Baselines and Bigrams: Simple, Good Sentiment and Topic Classification", "abstract": "Variants of Naive Bayes (NB) and Support Vector Machines (SVM) are often used as baseline methods for text classification, but their performance varies greatly depending on the model variant, features used and task/dataset. We show that: (i) the inclusion of word bigram features gives consistent gains on sentiment analysis tasks; (ii) for short snippet sentiment tasks, NB actually does better than SVMs (while for longer documents the opposite result holds); (iii) a simple but novel SVM variant using NB log-count ratios as feature values consistently performs well across tasks and datasets. Based on these observations, we identify simple NB and SVM variants which outperform most published results on sentiment analysis datasets, sometimes providing a new state-of-the-art performance level.", "keyphrases": ["naive bayes", "svm", "bag-of-word", "bow", "document representation"]} +{"id": "gandrabur-foster-2003-confidence", "title": "Confidence estimation for translation prediction", "abstract": "The purpose of this work is to investigate the use of machine learning approaches for confidence estimation within a statistical machine translation application. Specifically, we attempt to learn probabilities of correctness for various model predictions, based on the native probabilites (i.e. the probabilites given by the original model) and on features of the current context. Our experiments were conducted using three original translation models and two types of neural nets (single-layer and multilayer perceptrons) for the confidence estimation task.", "keyphrases": ["machine translation", "confidence estimation", "quality indicator"]} +{"id": "luo-etal-2004-mention", "title": "A Mention-Synchronous Coreference Resolution Algorithm Based On the Bell Tree", "abstract": "This paper proposes a new approach for coreference resolution which uses the Bell tree to represent the search space and casts the coreference resolution problem as finding the best path from the root of the Bell tree to the leaf nodes. A Maximum Entropy model is used to rank these paths. The coreference performance on the 2002 and 2003 Automatic Content Extraction (ACE) data will be reported. We also train a coreference system using the MUC6 data and competitive results are obtained.", "keyphrases": ["coreference resolution", "mention", "entity-level feature"]} +{"id": "choubey-etal-2018-identifying", "title": "Identifying the Most Dominant Event in a News Article by Mining Event Coreference Relations", "abstract": "Identifying the most dominant and central event of a document, which governs and connects other foreground and background events in the document, is useful for many applications, such as text summarization, storyline generation and text segmentation. We observed that the central event of a document usually has many coreferential event mentions that are scattered throughout the document for enabling a smooth transition of subtopics. Our empirical experiments, using gold event coreference relations, have shown that the central event of a document can be well identified by mining properties of event coreference chains. But the performance drops when switching to system predicted event coreference relations. In addition, we found that the central event can be more accurately identified by further considering the number of sub-events as well as the realis status of an event.", "keyphrases": ["dominant event", "event coreference relation", "news article"]} +{"id": "bostan-etal-2020-goodnewseveryone", "title": "GoodNewsEveryone: A Corpus of News Headlines Annotated with Emotions, Semantic Roles, and Reader Perception", "abstract": "Most research on emotion analysis from text focuses on the task of emotion classification or emotion intensity regression. Fewer works address emotions as a phenomenon to be tackled with structured learning, which can be explained by the lack of relevant datasets. We fill this gap by releasing a dataset of 5000 English news headlines annotated via crowdsourcing with their associated emotions, the corresponding emotion experiencers and textual cues, related emotion causes and targets, as well as the reader's perception of the emotion of the headline. This annotation task is comparably challenging, given the large number of classes and roles to be identified. We therefore propose a multiphase annotation procedure in which we first find relevant instances with emotional content and then annotate the more fine-grained aspects. Finally, we develop a baseline for the task of automatic prediction of semantic role structures and discuss the results. The corpus we release enables further research on emotion classification, emotion intensity prediction, emotion cause detection, and supports further qualitative studies.", "keyphrases": ["emotion", "reader", "experiencer", "cue", "goodnewseveryone"]} +{"id": "huang-etal-2012-structured", "title": "Structured Perceptron with Inexact Search", "abstract": "Most existing theory of structured prediction assumes exact inference, which is often intractable in many practical problems. This leads to the routine use of approximate inference such as beam search but there is not much theory behind it. Based on the structured perceptron, we propose a general framework of \"violation-fixing\" perceptrons for inexact search with a theoretical guarantee for convergence under new separability conditions. This framework subsumes and justifies the popular heuristic \"early-update\" for perceptron with beam search (Collins and Roark, 2004). We also propose several new update methods within this framework, among which the \"max-violation\" method dramatically reduces training time (by 3 fold as compared to early-update) on state-of-the-art part-of-speech tagging and incremental parsing systems.", "keyphrases": ["inexact search", "collins", "tagging"]} +{"id": "jia-etal-2019-cross", "title": "Cross-Domain NER using Cross-Domain Language Modeling", "abstract": "Due to limitation of labeled resources, cross-domain named entity recognition (NER) has been a challenging task. Most existing work considers a supervised setting, making use of labeled data for both the source and target domains. A disadvantage of such methods is that they cannot train for domains without NER data. To address this issue, we consider using cross-domain LM as a bridge cross-domains for NER domain adaptation, performing cross-domain and cross-task knowledge transfer by designing a novel parameter generation network. Results show that our method can effectively extract domain differences from cross-domain LM contrast, allowing unsupervised domain adaptation while also giving state-of-the-art results among supervised domain adaptation methods.", "keyphrases": ["domain adaptation", "cross-task knowledge transfer", "cross-domain ner", "language modeling task"]} +{"id": "feng-etal-2021-multidoc2dial", "title": "MultiDoc2Dial: Modeling Dialogues Grounded in Multiple Documents", "abstract": "We propose MultiDoc2Dial, a new task and dataset on modeling goal-oriented dialogues grounded in multiple documents. Most previous works treat document-grounded dialogue modeling as machine reading comprehension task based on a single given document or passage. In this work, we aim to address more realistic scenarios where a goal-oriented information-seeking conversation involves multiple topics, and hence is grounded on different documents. To facilitate such task, we introduce a new dataset that contains dialogues grounded in multiple documents from four different domains. We also explore modeling the dialogue-based and document-based contexts in the dataset. We present strong baseline approaches and various experimental results, aiming to support further research efforts on such a task.", "keyphrases": ["multiple document", "conversation", "dialdoc"]} +{"id": "borin-etal-2012-korp", "title": "Korp \u2014 the corpus infrastructure of Spr\u00e5kbanken", "abstract": "We present Korp, the corpus infrastructure of Spr\u00e5kbanken (the Swedish Language Bank). The infrastructure consists of three main components: the Korp corpus pipeline, the Korp backend, and the Korp frontend. The Korp corpus pipeline is used for importing corpora, annotating them, and then exporting the annotated corpora into different formats. An essential feature of the pipeline is the ability to leave existing annotations untouched, both structural and word level annotations, and to use the existing annotations as the foundation of other annotations. The Korp backend consists of a set of REST-based web services for searching in and retrieving information about the corpora. Finally, the Korp frontend is a graphical search interface that interacts with the Korp backend. The interface has been inspired by corpus search interfaces such as SketchEngine, Glossa, and DeepDict, and it uses State Chart XML (SCXML) in order to enable users to bookmark interaction states. We give a functional and technical overview of the three components, followed by a discussion of planned future work.", "keyphrases": ["corpus infrastructure", "spr\u00e5kbanken", "korp"]} +{"id": "cherry-2008-cohesive", "title": "Cohesive Phrase-Based Decoding for Statistical Machine Translation", "abstract": "Phrase-based decoding produces state-of-theart translations with no regard for syntax. We add syntax to this process with a cohesion constraint based on a dependency tree for the source sentence. The constraint allows the decoder to employ arbitrary, non-syntactic phrases, but ensures that those phrases are translated in an order that respects the source tree\u2019s structure. In this way, we target the phrasal decoder\u2019s weakness in order modeling, without affecting its strengths. To further increase flexibility, we incorporate cohesion as a decoder feature, creating a soft constraint. The resulting cohesive, phrase-based decoder is shown to produce translations that are preferred over non-cohesive output in both automatic and human evaluations.", "keyphrases": ["decoding", "cohesion", "syntactic constraint"]} +{"id": "xu-etal-2018-unsupervised-cross", "title": "Unsupervised Cross-lingual Transfer of Word Embedding Spaces", "abstract": "Cross-lingual transfer of word embeddings aims to establish the semantic mappings among words in different languages by learning the transformation functions over the corresponding word embedding spaces. Successfully solving this problem would benefit many downstream tasks such as to translate text classification models from resource-rich languages (e.g. English) to low-resource languages. Supervised methods for this problem rely on the availability of cross-lingual supervision, either using parallel corpora or bilingual lexicons as the labeled data for training, which may not be available for many low resource languages. This paper proposes an unsupervised learning approach that does not require any cross-lingual labeled data. Given two monolingual word embedding spaces for any language pair, our algorithm optimizes the transformation functions in both directions simultaneously based on distributional matching as well as minimizing the back-translation losses. We use a neural network implementation to calculate the Sinkhorn distance, a well-defined distributional similarity measure, and optimize our objective through back-propagation. Our evaluation on benchmark datasets for bilingual lexicon induction and cross-lingual word similarity prediction shows stronger or competitive performance of the proposed method compared to other state-of-the-art supervised and unsupervised baseline methods over many language pairs.", "keyphrases": ["cross-lingual transfer", "lexicon induction", "many language pair"]} +{"id": "chen-cardie-2018-multinomial", "title": "Multinomial Adversarial Networks for Multi-Domain Text Classification", "abstract": "Many text classification tasks are known to be highly domain-dependent. Unfortunately, the availability of training data can vary drastically across domains. Worse still, for some domains there may not be any annotated data at all. In this work, we propose a multinomial adversarial network (MAN) to tackle this real-world problem of multi-domain text classification (MDTC) in which labeled data may exist for multiple domains, but in insufficient amounts to train effective classifiers for one or more of the domains. We provide theoretical justifications for the MAN framework, proving that different instances of MANs are essentially minimizers of various f-divergence metrics (Ali and Silvey, 1966) among multiple probability distributions. MANs are thus a theoretically sound generalization of traditional adversarial networks that discriminate over two distributions. More specifically, for the MDTC task, MAN learns features that are invariant across multiple domains by resorting to its ability to reduce the divergence among the feature distributions of each domain. We present experimental results showing that MANs significantly outperform the prior art on the MDTC task. We also show that MANs achieve state-of-the-art performance for domains with no labeled data.", "keyphrases": ["text classification", "man", "multinomial adversarial network"]} +{"id": "etchegoyhen-etal-2014-machine", "title": "Machine Translation for Subtitling: A Large-Scale Evaluation", "abstract": "This article describes a large-scale evaluation of the use of Statistical Machine Translation for professional subtitling. The work was carried out within the FP7 EU-funded project SUMAT and involved two rounds of evaluation: a quality evaluation and a measure of productivity gain/loss. We present the SMT systems built for the project and the corpora they were trained on, which combine professionally created and crowd-sourced data. Evaluation goals, methodology and results are presented for the eleven translation pairs that were evaluated by professional subtitlers. Overall, a majority of the machine translated subtitles received good quality ratings. The results were also positive in terms of productivity, with a global gain approaching 40%. We also evaluated the impact of applying quality estimation and filtering of poor MT output, which resulted in higher productivity gains for filtered files as opposed to fully machine-translated files. Finally, we present and discuss feedback from the subtitlers who participated in the evaluation, a key aspect for any eventual adoption of machine translation technology in professional subtitling.", "keyphrases": ["subtitler", "machine translation", "sumat project"]} +{"id": "turney-2006-similarity", "title": "Similarity of Semantic Relations", "abstract": "There are at least two kinds of similarity. Relational similarity is correspondence between relations, in contrast with attributional similarity, which is correspondence between attributes. When two words have a high degree of attributional similarity, we call them synonyms. When two pairs of words have a high degree of relational similarity, we say that their relations are analogous. For example, the word pair mason:stone is analogous to the pair carpenter:wood. This article introduces Latent Relational Analysis (LRA), a method for measuring relational similarity. LRA has potential applications in many areas, including information extraction, word sense disambiguation, and information retrieval. Recently the Vector Space Model (VSM) of information retrieval has been adapted to measuring relational similarity, achieving a score of 47% on a collection of 374 college-level multiple-choice word analogy questions. In the VSM approach, the relation between a pair of words is characterized by a vector of frequencies of predefined patterns in a large corpus. LRA extends the VSM approach in three ways: (1) The patterns are derived automatically from the corpus, (2) the Singular Value Decomposition (SVD) is used to smooth the frequency data, and (3) automatically generated synonyms are used to explore variations of the word pairs. LRA achieves 56% on the 374 analogy questions, statistically equivalent to the average human score of 57%. On the related problem of classifying semantic relations, LRA achieves similar gains over the VSM.", "keyphrases": ["relational similarity", "attributional similarity", "wood", "lra", "word sense disambiguation"]} +{"id": "williams-etal-2018-broad", "title": "A Broad-Coverage Challenge Corpus for Sentence Understanding through Inference", "abstract": "This paper introduces the Multi-Genre Natural Language Inference (MultiNLI) corpus, a dataset designed for use in the development and evaluation of machine learning models for sentence understanding. At 433k examples, this resource is one of the largest corpora available for natural language inference (a.k.a. recognizing textual entailment), improving upon available resources in both its coverage and difficulty. MultiNLI accomplishes this by offering data from ten distinct genres of written and spoken English, making it possible to evaluate systems on nearly the full complexity of the language, while supplying an explicit setting for evaluating cross-genre domain adaptation. In addition, an evaluation using existing machine learning models designed for the Stanford NLI corpus shows that it represents a substantially more difficult task than does that corpus, despite the two showing similar levels of inter-annotator agreement.", "keyphrases": ["sentence understanding", "entailment", "nli", "contradiction", "large-scale annotated dataset"]} +{"id": "ye-etal-2020-safer", "title": "SAFER: A Structure-free Approach for Certified Robustness to Adversarial Word Substitutions", "abstract": "State-of-the-art NLP models can often be fooled by human-unaware transformations such as synonymous word substitution. For security reasons, it is of critical importance to develop models with certified robustness that can provably guarantee that the prediction is can not be altered by any possible synonymous word substitution. In this work, we propose a certified robust method based on a new randomized smoothing technique, which constructs a stochastic ensemble by applying random word substitutions on the input sentences, and leverage the statistical properties of the ensemble to provably certify the robustness. Our method is simple and structure-free in that it only requires the black-box queries of the model outputs, and hence can be applied to any pre-trained models (such as BERT) and any types of models (world-level or subword-level). Our method significantly outperforms recent state-of-the-art methods for certified robustness on both IMDB and Amazon text classification tasks. To the best of our knowledge, we are the first work to achieve certified robustness on large systems such as BERT with practically meaningful certified accuracy.", "keyphrases": ["robustness", "safer", "word substitution attack"]} +{"id": "mason-2004-cormet", "title": "CorMet: A Computational, Corpus-Based Conventional Metaphor Extraction System", "abstract": "CorMet is a corpus-based system for discovering metaphorical mappings between concepts. It does this by finding systematic variations in domain-specific selectional preferences, which are inferred from large, dynamically mined Internet corpora. Metaphors transfer structure from a source domain to a target domain, making some concepts in the target domain metaphorically equivalent to concepts in the source domain. The verbs that select for a concept in the source domain tend to select for its metaphorical equivalent in the target domain. This regularity, detectable with a shallow linguistic analysis, is used to find the metaphorical interconcept mappings, which can then be used to infer the existence of higher-level conventional metaphors. Most other computational metaphor systems use small, hand-coded semantic knowledge bases and work on a few examples. Although Cor Met's only knowledge base is Word Net (Fellbaum 1998) it can find the mappings constituting many conventional metaphors and in some cases recognize sentences instantiating those mappings. CorMet is tested on its ability to find a subset of the Master Metaphor List (Lakoff, Espenson, and Schwartz 1991).", "keyphrases": ["metaphor", "corpus-based system", "domain-specific selectional preference"]} +{"id": "birch-etal-2007-ccg", "title": "CCG Supertags in Factored Statistical Machine Translation", "abstract": "Combinatorial Categorial Grammar (CCG) supertags present phrase-based machine translation with an opportunity to access rich syntactic information at a word level. The challenge is incorporating this information into the translation process. Factored translation models allow the inclusion of supertags as a factor in the source or target language. We show that this results in an improvement in the quality of translation and that the value of syntactic supertags in flat structured phrase-based models is largely due to better local reorderings.", "keyphrases": ["supertag", "factor", "combinatorial categorial grammar", "translation model", "ccg"]} +{"id": "wan-2008-exploration", "title": "An Exploration of Document Impact on Graph-Based Multi-Document Summarization", "abstract": "The graph-based ranking algorithm has been recently exploited for multi-document summarization by making only use of the sentence-to-sentence relationships in the documents, under the assumption that all the sentences are indistinguishable. However, given a document set to be summarized, different documents are usually not equally important, and moreover, different sentences in a specific document are usually differently important. This paper aims to explore document impact on summarization performance. We propose a document-based graph model to incorporate the document-level information and the sentence-to-document relationship into the graph-based ranking process. Various methods are employed to evaluate the two factors. Experimental results on the DUC2001 and DUC2002 datasets demonstrate that the good effectiveness of the proposed model. Moreover, the results show the robustness of the proposed model.", "keyphrases": ["document impact", "summarization", "document-level information", "graph-based ranking process"]} +{"id": "baroni-etal-2012-entailment", "title": "Entailment above the word level in distributional semantics", "abstract": "We introduce two ways to detect entailment using distributional semantic representations of phrases. Our first experiment shows that the entailment relation between adjective-noun constructions and their head nouns (big cat|= cat), once represented as semantic vector pairs, generalizes to lexical entailment among nouns (dog|= animal). Our second experiment shows that a classifier fed semantic vector pairs can similarly generalize the entailment relation among quantifier phrases (many dogs|= some dogs) to entailment involving unseen quantifiers (all cats|= several cats). Moreover, nominal and quantifier phrase entailment appears to be cued by different distributional correlates, as predicted by the type-based view of entailment in formal semantics.", "keyphrases": ["quantifier phrase", "entailment", "hypernym"]} +{"id": "naseem-etal-2012-selective", "title": "Selective Sharing for Multilingual Dependency Parsing", "abstract": "We present a novel algorithm for multilingual dependency parsing that uses annotations from a diverse set of source languages to parse a new unannotated language. Our motivation is to broaden the advantages of multilingual learning to languages that exhibit significant differences from existing resource-rich languages. The algorithm learns which aspects of the source languages are relevant for the target language and ties model parameters accordingly. The model factorizes the process of generating a dependency tree into two steps: selection of syntactic dependents and their ordering. Being largely language-universal, the selection component is learned in a supervised fashion from all the training languages. In contrast, the ordering decisions are only influenced by languages with similar properties. We systematically model this cross-lingual sharing using typological features. In our experiments, the model consistently outperforms a state-of-the-art multi-lingual parser. The largest improvement is achieved on the non Indo-European languages yielding a gain of 14.4%.", "keyphrases": ["multilingual dependency parsing", "significant difference", "typological feature", "selective sharing", "cross-lingual transfer"]} +{"id": "chodorow-etal-2007-detection", "title": "Detection of Grammatical Errors Involving Prepositions", "abstract": "This paper presents ongoing work on the detection of preposition errors of non-native speakers of English. Since prepositions account for a substantial proportion of all grammatical errors by ESL (English as a Second Language) learners, developing an NLP application that can reliably detect these types of errors will provide an invaluable learning resource to ESL students. To address this problem, we use a maximum entropy classifier combined with rule-based filters to detect preposition errors in a corpus of student essays. Although our work is preliminary, we achieve a precision of 0.8 with a recall of 0.3.", "keyphrases": ["grammatical error", "preposition", "detection"]} +{"id": "futrell-etal-2019-neural", "title": "Neural language models as psycholinguistic subjects: Representations of syntactic state", "abstract": "We investigate the extent to which the behavior of neural network language models reflects incremental representations of syntactic state. To do so, we employ experimental methodologies which were originally developed in the field of psycholinguistics to study syntactic representation in the human mind. We examine neural network model behavior on sets of artificial sentences containing a variety of syntactically complex structures. These sentences not only test whether the networks have a representation of syntactic state, they also reveal the specific lexical cues that networks use to update these states. We test four models: two publicly available LSTM sequence models of English (Jozefowicz et al., 2016; Gulordava et al., 2018) trained on large datasets; an RNN Grammar (Dyer et al., 2016) trained on a small, parsed dataset; and an LSTM trained on the same small corpus as the RNNG. We find evidence for basic syntactic state representations in all models, but only the models trained on large datasets are sensitive to subtle lexical cues signaling changes in syntactic state.", "keyphrases": ["syntactic state", "neural language model", "language modeling performance", "human-like representation"]} +{"id": "gerani-etal-2014-abstractive", "title": "Abstractive Summarization of Product Reviews Using Discourse Structure", "abstract": "We propose a novel abstractive summarization system for product reviews by taking advantage of their discourse structure. First, we apply a discourse parser to each review and obtain a discourse tree representation for every review. We then modify the discourse trees such that every leaf node only contains the aspect words. Second, we aggregate the aspect discourse trees and generate a graph. We then select a subgraph representing the most important aspects and the rhetorical relations between them using a PageRank algorithm, and transform the selected subgraph into an aspect tree. Finally, we generate a natural language summary by applying a template-based NLG framework. Quantitative and qualitative analysis of the results, based on two user studies, show that our approach significantly outperforms extractive and abstractive baselines.", "keyphrases": ["review", "discourse structure", "summarization system", "sentiment analysis"]} +{"id": "angelidis-lapata-2018-summarizing", "title": "Summarizing Opinions: Aspect Extraction Meets Sentiment Prediction and They Are Both Weakly Supervised", "abstract": "We present a neural framework for opinion summarization from online product reviews which is knowledge-lean and only requires light supervision (e.g., in the form of product domain labels and user-provided ratings). Our method combines two weakly supervised components to identify salient opinions and form extractive summaries from multiple reviews: an aspect extractor trained under a multi-task objective, and a sentiment predictor based on multiple instance learning. We introduce an opinion summarization dataset that includes a training set of product reviews from six diverse domains and human-annotated development and test sets with gold standard aspect annotations, salience labels, and opinion summaries. Automatic evaluation shows significant improvements over baselines, and a large-scale study indicates that our opinion summaries are preferred by human judges according to multiple criteria.", "keyphrases": ["opinion", "aspect annotation", "extractive summarization", "weakly-supervised method"]} +{"id": "clark-etal-2019-sentence", "title": "Sentence Mover's Similarity: Automatic Evaluation for Multi-Sentence Texts", "abstract": "For evaluating machine-generated texts, automatic methods hold the promise of avoiding collection of human judgments, which can be expensive and time-consuming. The most common automatic metrics, like BLEU and ROUGE, depend on exact word matching, an inflexible approach for measuring semantic similarity. We introduce methods based on sentence mover's similarity; our automatic metrics evaluate text in a continuous space using word and sentence embeddings. We find that sentence-based metrics correlate with human judgments significantly better than ROUGE, both on machine-generated summaries (average length of 3.4 sentences) and human-authored essays (average length of 7.5). We also show that sentence mover's similarity can be used as a reward when learning a generation model via reinforcement learning; we present both automatic and human evaluations of summaries learned in this way, finding that our approach outperforms ROUGE.", "keyphrases": ["human judgment", "word matching", "sentence mover"]} +{"id": "cohn-specia-2013-modelling", "title": "Modelling Annotator Bias with Multi-task Gaussian Processes: An Application to Machine Translation Quality Estimation", "abstract": "Annotating linguistic data is often a complex, time consuming and expensive endeavour. Even with strict annotation guidelines, human subjects often deviate in their analyses, each bringing different biases, interpretations of the task and levels of consistency. We present novel techniques for learning from the outputs of multiple annotators while accounting for annotator specific behaviour. These techniques use multi-task Gaussian Processes to learn jointly a series of annotator and metadata specific models, while explicitly representing correlations between models which can be learned directly from data. Our experiments on two machine translation quality estimation datasets show uniform significant accuracy gains from multi-task learning, and consistently outperform strong baselines.", "keyphrases": ["annotator bias", "multi-task gaussian processes", "regression"]} +{"id": "piskorski-etal-2019-second", "title": "The Second Cross-Lingual Challenge on Recognition, Normalization, Classification, and Linking of Named Entities across Slavic Languages", "abstract": "We describe the Second Multilingual Named Entity Challenge in Slavic languages. The task is recognizing mentions of named entities in Web documents, their normalization, and cross-lingual linking. The Challenge was organized as part of the 7th Balto-Slavic Natural Language Processing Workshop, co-located with the ACL-2019 conference. Eight teams participated in the competition, which covered four languages and five entity types. Performance for the named entity recognition task reached 90% F-measure, much higher than reported in the first edition of the Challenge. Seven teams covered all four languages, and five teams participated in the cross-lingual entity linking task. Detailed evaluation information is available on the shared task web page.", "keyphrases": ["linking", "slavic language", "cross-lingual linking", "edition"]} +{"id": "deyoung-etal-2020-eraser", "title": "ERASER: A Benchmark to Evaluate Rationalized NLP Models", "abstract": "State-of-the-art models in NLP are now predominantly based on deep neural networks that are opaque in terms of how they come to make predictions. This limitation has increased interest in designing more interpretable deep models for NLP that reveal the `reasoning' behind model outputs. But work in this direction has been conducted on different datasets and tasks with correspondingly unique aims and metrics; this makes it difficult to track progress. We propose the Evaluating Rationales And Simple English Reasoning (ERASER a benchmark to advance research on interpretable models in NLP. This benchmark comprises multiple datasets and tasks for which human annotations of \u201crationales\u201d (supporting evidence) have been collected. We propose several metrics that aim to capture how well the rationales provided by models align with human rationales, and also how faithful these rationales are (i.e., the degree to which provided rationales influenced the corresponding predictions). Our hope is that releasing this benchmark facilitates progress on designing more interpretable NLP systems. The benchmark, code, and documentation are available at ", "keyphrases": ["rationale", "eraser", "agreement", "e-snli"]} +{"id": "jacovi-goldberg-2020-towards", "title": "Towards Faithfully Interpretable NLP Systems: How Should We Define and Evaluate Faithfulness?", "abstract": "With the growing popularity of deep-learning based NLP models, comes a need for interpretable systems. But what is interpretability, and what constitutes a high-quality interpretation? In this opinion piece we reflect on the current state of interpretability evaluation research. We call for more clearly differentiating between different desired criteria an interpretation should satisfy, and focus on the faithfulness criteria. We survey the literature with respect to faithfulness evaluation, and arrange the current approaches around three assumptions, providing an explicit form to how faithfulness is \u201cdefined\u201d by the community. We provide concrete guidelines on how evaluation of interpretation methods should and should not be conducted. Finally, we claim that the current binary definition for faithfulness sets a potentially unrealistic bar for being considered faithful. We call for discarding the binary notion of faithfulness in favor of a more graded one, which we believe will be of greater practical utility.", "keyphrases": ["faithfulness", "nlp model", "interpretation method", "plausibility", "trustworthiness"]} +{"id": "akhtar-etal-2019-multi", "title": "Multi-task Learning for Multi-modal Emotion Recognition and Sentiment Analysis", "abstract": "Related tasks often have inter-dependence on each other and perform better when solved in a joint framework. In this paper, we present a deep multi-task learning framework that jointly performs sentiment and emotion analysis both. The multi-modal inputs (i.e. text, acoustic and visual frames) of a video convey diverse and distinctive information, and usually do not have equal contribution in the decision making. We propose a context-level inter-modal attention framework for simultaneously predicting the sentiment and expressed emotions of an utterance. We evaluate our proposed approach on CMU-MOSEI dataset for multi-modal sentiment and emotion analysis. Evaluation results suggest that multi-task learning framework offers improvement over the single-task framework. The proposed approach reports new state-of-the-art performance for both sentiment analysis and emotion analysis.", "keyphrases": ["emotion recognition", "sentiment analysis", "multi-task learning framework"]} +{"id": "sinha-etal-2009-semeval", "title": "SemEval-2010 Task 2: Cross-Lingual Lexical Substitution", "abstract": "In this paper we describe the SemEval-2010 Cross-Lingual Lexical Substitution task, where given an English target word in context, participating systems had to find an alternative substitute word or phrase in Spanish. The task is based on the English Lexical Substitution task run at SemEval-2007. In this paper we provide background and motivation for the task, we describe the data annotation process and the scoring system, and present the results of the participating systems.", "keyphrases": ["cross-lingual lexical substitution", "semeval task", "identification"]} +{"id": "ustalov-etal-2017-negative", "title": "Negative Sampling Improves Hypernymy Extraction Based on Projection Learning", "abstract": "We present a new approach to extraction of hypernyms based on projection learning and word embeddings. In contrast to classification-based approaches, projection-based methods require no candidate hyponym-hypernym pairs. While it is natural to use both positive and negative training examples in supervised relation extraction, the impact of positive examples on hypernym prediction was not studied so far. In this paper, we show that explicit negative examples used for regularization of the model significantly improve performance compared to the state-of-the-art approach of Fu et al. (2014) on three datasets from different languages.", "keyphrases": ["hypernymy extraction", "projection learning", "negative example"]} +{"id": "wang-etal-2019-dynamically", "title": "Dynamically Composing Domain-Data Selection with Clean-Data Selection by \u201cCo-Curricular Learning\u201d for Neural Machine Translation", "abstract": "Noise and domain are important aspects of data quality for neural machine translation. Existing research focus separately on domain-data selection, clean-data selection, or their static combination, leaving the dynamic interaction across them not explicitly examined. This paper introduces a \u201cco-curricular learning\u201d method to compose dynamic domain-data selection with dynamic clean-data selection, for transfer learning across both capabilities. We apply an EM-style optimization procedure to further refine the \u201cco-curriculum\u201d. Experiment results and analysis with two domains demonstrate the effectiveness of the method and the properties of data scheduled by the co-curriculum.", "keyphrases": ["domain-data selection", "co-curricular learning", "machine translation", "curriculum"]} +{"id": "sugawara-etal-2017-evaluation", "title": "Evaluation Metrics for Machine Reading Comprehension: Prerequisite Skills and Readability", "abstract": "Knowing the quality of reading comprehension (RC) datasets is important for the development of natural-language understanding systems. In this study, two classes of metrics were adopted for evaluating RC datasets: prerequisite skills and readability. We applied these classes to six existing datasets, including MCTest and SQuAD, and highlighted the characteristics of the datasets according to each metric and the correlation between the two classes. Our dataset analysis suggests that the readability of RC datasets does not directly affect the question difficulty and that it is possible to create an RC dataset that is easy to read but difficult to answer.", "keyphrases": ["prerequisite skill", "readability", "comprehension dataset"]} +{"id": "xu-etal-2019-bert", "title": "BERT Post-Training for Review Reading Comprehension and Aspect-based Sentiment Analysis", "abstract": "Question-answering plays an important role in e-commerce as it allows potential customers to actively seek crucial information about products or services to help their purchase decision making. Inspired by the recent success of machine reading comprehension (MRC) on formal documents, this paper explores the potential of turning customer reviews into a large source of knowledge that can be exploited to answer user questions. We call this problem Review Reading Comprehension (RRC). To the best of our knowledge, no existing work has been done on RRC. In this work, we first build an RRC dataset called ReviewRC based on a popular benchmark for aspect-based sentiment analysis. Since ReviewRC has limited training examples for RRC (and also for aspect-based sentiment analysis), we then explore a novel post-training approach on the popular language model BERT to enhance the performance of fine-tuning of BERT for RRC. To show the generality of the approach, the proposed post-training is also applied to some other review-based tasks such as aspect extraction and aspect sentiment classification in aspect-based sentiment analysis. Experimental results demonstrate that the proposed post-training is highly effective.", "keyphrases": ["review reading comprehension", "sentiment analysis", "post-training approach", "aspect term"]} +{"id": "gaman-etal-2020-report", "title": "A Report on the VarDial Evaluation Campaign 2020", "abstract": "This paper presents the results of the VarDial Evaluation Campaign 2020 organized as part of the seventh workshop on Natural Language Processing (NLP) for Similar Languages, Varieties and Dialects (VarDial), co-located with COLING 2020. The campaign included three shared tasks each focusing on a different challenge of language and dialect identification: Romanian Dialect Identification (RDI), Social Media Variety Geolocation (SMG), and Uralic Language Identification (ULI). The campaign attracted 30 teams who enrolled to participate in one or multiple shared tasks and 14 of them submitted runs across the three shared tasks. Finally, 11 papers describing participating systems are published in the VarDial proceedings and referred to in this report.", "keyphrases": ["vardial evaluation campaign", "similar languages", "smg", "uli", "participant"]} +{"id": "malmasi-etal-2022-multiconer", "title": "MultiCoNER: A Large-scale Multilingual Dataset for Complex Named Entity Recognition", "abstract": "We present AnonData, a large multilingual dataset for Named Entity Recognition that covers 3 domains (Wiki sentences, questions, and search queries) across 11 languages, as well as multilingual and code-mixing subsets. This dataset is designed to represent contemporary challenges in NER, including low-context scenarios (short and uncased text), syntactically complex entities like movie titles, and long-tail entity distributions. The 26M token dataset is compiled from public resources using techniques such as heuristic-based sentence sampling, template extraction and slotting, and machine translation. We tested the performance of two NER models on our dataset: a baseline XLM-RoBERTa model, and a state-of-the-art NER GEMNET model that leverages gazetteers. The baseline achieves moderate performance (macro-F1=54%). GEMNET, which uses gazetteers, improvement significantly (average improvement of macro-F1=+30%) and demonstrates the difficulty of our dataset. AnonData poses challenges even for large pre-trained language models, and we believe that it can help further research in building robust NER systems.", "keyphrases": ["multilingual dataset", "complex entity", "multiconer"]} +{"id": "shi-etal-2022-selective", "title": "Selective Differential Privacy for Language Modeling", "abstract": "With the increasing applications of language models, it has become crucial to protect these models from leaking private information. Previous work has attempted to tackle this challenge by training RNN-based language models with differential privacy guarantees. However, applying classical differential privacy to language models leads to poor model performance as the underlying privacy notion is over-pessimistic and provides undifferentiated protection for all tokens in the data. Given that the private information in natural language is sparse (for example, the bulk of an email might not carry personally identifiable information), we propose a new privacy notion, selective differential privacy, to provide rigorous privacy guarantees on the sensitive portion of the data to improve model utility. To realize such a new notion, we develop a corresponding privacy mechanism, Selective-DPSGD, for RNN-based language models. Besides language modeling, we also apply the method to a more concrete application \u2013 dialog systems. Experiments on both language modeling and dialog system building show that the proposed privacy-preserving mechanism achieves better utilities while remaining safe under various privacy attacks compared to the baselines. The data and code are released at to facilitate future research.", "keyphrases": ["language modeling", "notion", "selective differential privacy"]} +{"id": "pang-etal-2003-syntax", "title": "Syntax-based Alignment of Multiple Translations: Extracting Paraphrases and Generating New Sentences", "abstract": "We describe a syntax-based algorithm that automatically builds Finite State Automata (word lattices) from semantically equivalent translation sets. These FSAs are good representations of paraphrases. They can be used to extract lexical and syntactic paraphrase pairs and to generate new, unseen sentences that express the same meaning as the sentences in the input sets. Our FSAs can also predict the correctness of alternative semantic renderings, which may be used to evaluate the quality of translations.", "keyphrases": ["multiple translation", "paraphrase", "syntax-based algorithm", "lattice", "entailment"]} +{"id": "kiperwasser-goldberg-2016-simple", "title": "Simple and Accurate Dependency Parsing Using Bidirectional LSTM Feature Representations", "abstract": "We present a simple and effective scheme for dependency parsing which is based on bidirectional-LSTMs (BiLSTMs). Each sentence token is associated with a BiLSTM vector representing the token in its sentential context, and feature vectors are constructed by concatenating a few BiLSTM vectors. The BiLSTM is trained jointly with the parser objective, resulting in very effective feature extractors for parsing. We demonstrate the effectiveness of the approach by applying it to a greedy transition-based parser as well as to a globally optimized graph-based parser. The resulting parsers have very simple architectures, and match or surpass the state-of-the-art accuracies on English and Chinese.", "keyphrases": ["dependency parsing", "bidirectional lstm", "bilstms", "improved performance", "hand-crafted feature"]} +{"id": "hedderich-etal-2021-survey", "title": "A Survey on Recent Approaches for Natural Language Processing in Low-Resource Scenarios", "abstract": "Deep neural networks and huge language models are becoming omnipresent in natural language applications. As they are known for requiring large amounts of training data, there is a growing body of work to improve the performance in low-resource settings. Motivated by the recent fundamental changes towards neural models and the popular pre-train and fine-tune paradigm, we survey promising approaches for low-resource natural language processing. After a discussion about the different dimensions of data availability, we give a structured overview of methods that enable learning when training data is sparse. This includes mechanisms to create additional labeled data like data augmentation and distant supervision as well as transfer learning settings that reduce the need for target supervision. A goal of our survey is to explain how these methods differ in their requirements as understanding them is essential for choosing a technique suited for a specific low-resource setting. Further key aspects of this work are to highlight open issues and to outline promising directions for future research.", "keyphrases": ["survey", "low-resource scenario", "data augmentation", "other language"]} +{"id": "zou-etal-2015-negation", "title": "Negation and Speculation Identification in Chinese Language", "abstract": "Identifying negative or speculative narrative fragments from fact is crucial for natural language processing (NLP) applications. Previous studies on negation and speculation identification in Chinese language suffers much from two problems: corpus scarcity and the bottleneck in fundamental Chinese information processing. To resolve these problems, this paper constructs a Chinese corpus which consists of three sub-corpora from different resources. In order to detect the negative and speculative cues, a sequence labeling model is proposed. Moreover, a bilingual cue expansion method is proposed to increase the coverage in cue detection. In addition, this paper presents a new syntactic structure-based framework to identify the linguistic scope of a cue, instead of the traditional chunking-based framework. Experimental results justify the usefulness of our Chinese corpus and the appropriateness of our syntactic structure-based framework which obtained significant improvement over the stateof-the-art on negation and speculation identification in Chinese language. *", "keyphrases": ["speculation identification", "chinese language", "negation"]} +{"id": "callison-burch-2009-fast", "title": "Fast, Cheap, and Creative: Evaluating Translation Quality Using Amazon's Mechanical Turk", "abstract": "Manual evaluation of translation quality is generally thought to be excessively time consuming and expensive. We explore a fast and inexpensive way of doing it using Amazon's Mechanical Turk to pay small sums to a large number of non-expert annotators. For $10 we redundantly recreate judgments from a WMT08 translation task. We find that when combined non-expert judgments have a high-level of agreement with the existing gold-standard judgments of machine translation quality, and correlate more strongly with expert judgments than Bleu does. We go on to show that Mechanical Turk can be used to calculate human-mediated translation edit rate (HTER), to conduct reading comprehension experiments with machine translation, and to create high quality reference translations.", "keyphrases": ["translation quality", "mechanical turk", "expert", "mturk"]} +{"id": "fadaee-monz-2018-back", "title": "Back-Translation Sampling by Targeting Difficult Words in Neural Machine Translation", "abstract": "Neural Machine Translation has achieved state-of-the-art performance for several language pairs using a combination of parallel and synthetic data. Synthetic data is often generated by back-translating sentences randomly sampled from monolingual data using a reverse translation model. While back-translation has been shown to be very effective in many cases, it is not entirely clear why. In this work, we explore different aspects of back-translation, and show that words with high prediction loss during training benefit most from the addition of synthetic data. We introduce several variations of sampling strategies targeting difficult-to-predict words using prediction losses and frequencies of words. In addition, we also target the contexts of difficult words and sample sentences that are similar in context. Experimental results for the WMT news translation task show that our method improves translation quality by up to 1.7 and 1.2 Bleu points over back-translation using random sampling for German-English and English-German, respectively.", "keyphrases": ["difficult word", "neural machine translation", "synthetic data", "prediction loss", "back-translation"]} +{"id": "berzak-etal-2016-universal", "title": "Universal Dependencies for Learner English", "abstract": "We introduce the Treebank of Learner English (TLE), the first publicly available syntactic treebank for English as a Second Language (ESL). The TLE provides manually annotated POS tags and Universal Dependency (UD) trees for 5,124 sentences from the Cambridge First Certificate in English (FCE) corpus. The UD annotations are tied to a pre-existing error annotation of the FCE, whereby full syntactic analyses are provided for both the original and error corrected versions of each sentence. Further on, we delineate ESL annotation guidelines that allow for consistent syntactic treatment of ungrammatical English. Finally, we benchmark POS tagging and dependency parsing performance on the TLE dataset and measure the effect of grammatical errors on parsing accuracy. We envision the treebank to support a wide range of linguistic and computational research on second language acquisition as well as automatic processing of ungrammatical language. The treebank is available at universaldependencies.org. The annotation manual used in this project and a graphical query engine are available at esltreebank.org.", "keyphrases": ["learner english", "treebank", "esl", "pos tagging", "universal dependency"]} +{"id": "bikel-2004-intricacies", "title": "Intricacies of Collins' Parsing Model", "abstract": "This article documents a large set of heretofore unpublished details Collins used in his parser, such that, along with Collins' (1999) thesis, this article contains all information necessary to duplicate Collins' benchmark results. Indeed, these as-yet-unpublished details account for an 11 relative increase in error from an implementation including all details to a clean-room implementation of Collins' model. We also show a cleaner and equally well-performing method for the handling of punctuation and conjunction and reveal certain other probabilistic oddities about Collins' parser. We not only analyze the effect of the unpublished details, but also reanalyze the effect of certain well-known details, revealing that bilexical dependencies are barely used by the model and that head choice is not nearly as important to overall parsing performance as once thought. Finally, we perform experiments that show that the true discriminative power of lexicalization appears to lie in the fact that unlexicalized syntactic structures are generated conditioning on the headword and its part of speech.", "keyphrases": ["collins", "simple likelihood", "part-of-speech category"]} +{"id": "teufel-etal-2009-towards", "title": "Towards Domain-Independent Argumentative Zoning: Evidence from Chemistry and Computational Linguistics", "abstract": "Argumentative Zoning (AZ) is an analysis of the argumentative and rhetorical structure of a scientific paper. It has been shown to be reliably used by independent human coders, and has proven useful for various information access tasks. Annotation experiments have however so far been restricted to one discipline, computational linguistics (CL). Here, we present a more informative AZ scheme with 15 categories in place of the original 7, and show that it can be applied to the life sciences as well as to CL. We use a domain expert to encode basic knowledge about the subject (such as terminology and domain specific rules for individual categories) as part of the annotation guidelines. Our results show that non-expert human coders can then use these guidelines to reliably annotate this scheme in two domains, chemistry and computational linguistics.", "keyphrases": ["argumentative zoning", "computational linguistics", "scientific article", "annotation scheme"]} +{"id": "eisenstein-2013-bad", "title": "What to do about bad language on the internet", "abstract": "The rise of social media has brought computational linguistics in ever-closer contact with bad language: text that defies our expectations about vocabulary, spelling, and syntax. This paper surveys the landscape of bad language, and offers a critical review of the NLP community\u2019s response, which has largely followed two paths: normalization and domain adaptation. Each approach is evaluated in the context of theoretical and empirical work on computer-mediated communication. In addition, the paper presents a quantitative analysis of the lexical diversity of social media text, and its relationship to other corpora.", "keyphrases": ["bad language", "internet", "social medium", "hashtag", "unique language"]} +{"id": "eryigit-etal-2008-dependency", "title": "Dependency Parsing of Turkish", "abstract": "The suitability of different parsing methods for different languages is an important topic in syntactic parsing. Especially lesser-studied languages, typologically different from the languages for which methods have originally been developed, pose interesting challenges in this respect. This article presents an investigation of data-driven dependency parsing of Turkish, an agglutinative, free constituent order language that can be seen as the representative of a wider class of languages of similar type. Our investigations show that morphological structure plays an essential role in finding syntactic relations in such a language. In particular, we show that employing sublexical units called inflectional groups, rather than word forms, as the basic parsing units improves parsing accuracy. We test our claim on two different parsing methods, one based on a probabilistic model with beam search and the other based on discriminative classifiers and a deterministic parsing strategy, and show that the usefulness of sublexical units holds regardless of the parsing method. We examine the impact of morphological and lexical information in detail and show that, properly used, this kind of information can improve parsing accuracy substantially. Applying the techniques presented in this article, we achieve the highest reported accuracy for parsing the Turkish Treebank.", "keyphrases": ["turkish", "inflectional group", "dependency parsing", "previous study", "hindi"]} +{"id": "moryossef-etal-2019-step", "title": "Step-by-Step: Separating Planning from Realization in Neural Data-to-Text Generation", "abstract": "Data-to-text generation can be conceptually divided into two parts: ordering and structuring the information (planning), and generating fluent language describing the information (realization). Modern neural generation systems conflate these two steps into a single end-to-end differentiable system. We propose to split the generation process into a symbolic text-planning stage that is faithful to the input, followed by a neural generation stage that focuses only on realization. For training a plan-to-text generator, we present a method for matching reference texts to their corresponding text plans. For inference time, we describe a method for selecting high-quality text plans for new inputs. We implement and evaluate our approach on the WebNLG benchmark. Our results demonstrate that decoupling text planning from neural realization indeed improves the system's reliability and adequacy while maintaining fluent output. We observe improvements both in BLEU scores and in manual evaluations. Another benefit of our approach is the ability to output diverse realizations of the same input, paving the way to explicit control over the generated text structure.", "keyphrases": ["realization", "data-to-text generation", "text planning"]} +{"id": "xiong-litman-2011-automatically", "title": "Automatically Predicting Peer-Review Helpfulness", "abstract": "Identifying peer-review helpfulness is an important task for improving the quality of feedback that students receive from their peers. As a first step towards enhancing existing peer-review systems with new functionality based on helpfulness detection, we examine whether standard product review analysis techniques also apply to our new context of peer reviews. In addition, we investigate the utility of incorporating additional specialized features tailored to peer review. Our preliminary results show that the structural features, review uni-grams and meta-data combined are useful in modeling the helpfulness of both peer reviews and product reviews, while peer-review specific auxiliary features can further improve helpfulness prediction.", "keyphrases": ["peer-review helpfulness", "feedback", "utility", "specialized feature"]} +{"id": "nan-etal-2020-reasoning", "title": "Reasoning with Latent Structure Refinement for Document-Level Relation Extraction", "abstract": "Document-level relation extraction requires integrating information within and across multiple sentences of a document and capturing complex interactions between inter-sentence entities. However, effective aggregation of relevant information in the document remains a challenging research question. Existing approaches construct static document-level graphs based on syntactic trees, co-references or heuristics from the unstructured text to model the dependencies. Unlike previous methods that may not be able to capture rich non-local interactions for inference, we propose a novel model that empowers the relational reasoning across sentences by automatically inducing the latent document-level graph. We further develop a refinement strategy, which enables the model to incrementally aggregate relevant information for multi-hop reasoning. Specifically, our model achieves an F1 score of 59.05 on a large-scale document-level dataset (DocRED), significantly improving over the previous results, and also yields new state-of-the-art results on the CDR and GDA dataset. Furthermore, extensive analyses show that the model is able to discover more accurate inter-sentence relations.", "keyphrases": ["latent structure", "relation extraction", "document-level graph", "reasoning", "mention"]} +{"id": "madnani-dorr-2010-generating", "title": "Generating Phrasal and Sentential Paraphrases: A Survey of Data-Driven Methods", "abstract": "The task of paraphrasing is inherently familiar to speakers of all languages. Moreover, the task of automatically generating or extracting semantic equivalences for the various units of language\u2014words, phrases, and sentences\u2014is an important part of natural language processing (NLP) and is being increasingly employed to improve the performance of several NLP applications. In this article, we attempt to conduct a comprehensive and application-independent survey of data-driven phrasal and sentential paraphrase generation methods, while also conveying an appreciation for the importance and potential use of paraphrases in the field of NLP research. Recent work done in manual and automatic construction of paraphrase corpora is also examined. We also discuss the strategies used for evaluating paraphrase generation techniques and briefly explore some future trends in paraphrase generation.", "keyphrases": ["sentential paraphrase", "survey", "several nlp application", "same semantic content", "data-driven approach"]} +{"id": "hu-etal-2015-improved", "title": "Improved beam search with constrained softmax for NMT", "abstract": "We propose an improved beam search decoding algorithm with constrained softmax operations for neural machine translation (NMT). NMT is a newly emerging approach to predict the best translation by building a neural network instead of a log-linear model. It has achieved comparable translation quality to the existing phrase-based statistical machine translation systems. However, how to perform ef\ufb01cient decoding for NMT is still challenging, especially for commercial systems which provide real-time translation service. Unlike the standard beam search algorithm, we use a priority queue to choose the best hypothesis for the next search, which drastically reduces search space. Another time consuming factor is the softmax operation in the output layer because of the large target vocabulary size. To solve this problem, we introduce a limited word set of translation candidates to greatly reduce the computation complexity. Our experiments show that, under the GPU environment, our method achieves a speed about 3.5 times faster than the well optimized baseline system without sacri\ufb01cing the translation quality. Our method translates about 117 words per second, beating the real-time translation requirements for practical MT systems.", "keyphrases": ["beam search", "priority queue", "hypothesis"]} +{"id": "zhang-etal-2019-amr", "title": "AMR Parsing as Sequence-to-Graph Transduction", "abstract": "We propose an attention-based model that treats AMR parsing as sequence-to-graph transduction. Unlike most AMR parsers that rely on pre-trained aligners, external semantic resources, or data augmentation, our proposed parser is aligner-free, and it can be effectively trained with limited amounts of labeled AMR data. Our experimental results outperform all previously reported SMATCH scores, on both AMR 2.0 (76.3% on LDC2017T10) and AMR 1.0 (70.2% on LDC2014T12).", "keyphrases": ["sequence-to-graph transduction", "smatch score", "amr", "semantic parser", "two-stage"]} +{"id": "ritter-etal-2010-unsupervised", "title": "Unsupervised Modeling of Twitter Conversations", "abstract": "We propose the first unsupervised approach to the problem of modeling dialogue acts in an open domain. Trained on a corpus of noisy Twitter conversations, our method discovers dialogue acts by clustering raw utterances. Because it accounts for the sequential behaviour of these acts, the learned model can provide insight into the shape of communication in a new medium. We address the challenge of evaluating the emergent model with a qualitative visualization and an intrinsic conversation ordering task. This work is inspired by a corpus of 1.3 million Twitter conversations, which will be made publicly available. This huge amount of data, available only because Twitter blurs the line between chatting and publishing, highlights the need to be able to adapt quickly to a new medium.", "keyphrases": ["twitter", "conversation", "hidden markov model", "additional word source"]} +{"id": "rubin-2007-stating", "title": "Stating with Certainty or Stating with Doubt: Intercoder Reliability Results for Manual Annotation of Epistemically Modalized Statements", "abstract": "Texts exhibit subtle yet identifiable modality about writers' estimation of how true each statement is (e.g., definitely true or somewhat true). This study is an analysis of such explicit certainty and doubt markers in epistemically modalized statements for a written news discourse. The study systematically accounts for five levels of writer's certainty (ABSOLUTE, HIGH, MODERATE, LOW CERTAINTY and UNCERTAINTY) in three news pragmatic contexts: perspective, focus, and time. The study concludes that independent coders' perceptions of the boundaries between shades of certainty in epistemically modalized statements are highly subjective and present difficulties for manual annotation and consequent automation for opinion extraction and sentiment analysis. While stricter annotation instructions and longer coder training can improve inter-coder agreement results, it is not entirely clear that a five-level distinction of certainty is preferable to a simplistic distinction between statements with certainty and statements with doubt.", "keyphrases": ["certainty", "manual annotation", "modalized statement"]} +{"id": "lau-etal-2018-deep", "title": "Deep-speare: A joint neural model of poetic language, meter and rhyme", "abstract": "In this paper, we propose a joint architecture that captures language, rhyme and meter for sonnet modelling. We assess the quality of generated poems using crowd and expert judgements. The stress and rhyme models perform very well, as generated poems are largely indistinguishable from human-written poems. Expert evaluation, however, reveals that a vanilla language model captures meter implicitly, and that machine-generated poems still underperform in terms of readability and emotion. Our research shows the importance expert evaluation for poetry generation, and that future research should look beyond rhyme/meter and focus on poetic language.", "keyphrases": ["poetic language", "rhyme", "poem", "deep-speare"]} +{"id": "liang-etal-2006-end", "title": "An End-to-End Discriminative Approach to Machine Translation", "abstract": "We present a perceptron-style discriminative approach to machine translation in which large feature sets can be exploited. Unlike discriminative reranking approaches, our system can take advantage of learned features in all stages of decoding. We first discuss several challenges to error-driven discriminative approaches. In particular, we explore different ways of updating parameters given a training example. We find that making frequent but smaller updates is preferable to making fewer but larger updates. Then, we discuss an array of features and show both how they quantitatively increase BLEU score and how they qualitatively interact on specific examples. One particular feature we investigate is a novel way to introduce learning into the initial phrase extraction process, which has previously been entirely heuristic.", "keyphrases": ["machine translation", "training example", "bleu score", "model parameter"]} +{"id": "mohammad-yang-2011-tracking", "title": "Tracking Sentiment in Mail: How Genders Differ on Emotional Axes", "abstract": "With the widespread use of email, we now have access to unprecedented amounts of text that we ourselves have written. In this paper, we show how sentiment analysis can be used in tandem with effective visualizations to quantify and track emotions in many types of mail. We create a large word--emotion association lexicon by crowdsourcing, and use it to compare emotions in love letters, hate mail, and suicide notes. We show that there are marked differences across genders in how they use emotion words in work-place email. For example, women use many words from the joy--sadness axis, whereas men prefer terms from the fear--trust axis. Finally, we show visualizations that can help people track emotions in their emails.", "keyphrases": ["mail", "gender", "emotion word", "workplace email"]} +{"id": "sproat-emerson-2003-first", "title": "The First International Chinese Word Segmentation Bakeoff", "abstract": "This paper presents the results from the ACL-SIGHAN-sponsored First International Chinese Word Segmentation Bakeoff held in 2003 and reported in conjunction with the Second SIGHAN Workshop on Chinese Language Processing, Sapporo, Japan. We give the motivation for having an international segmentation contest (given that there have been two within-China contests to date) and we report on the results of this first international contest, analyze these results, and make some recommendations for the future.", "keyphrases": ["chinese", "word segmentation", "sighan"]} +{"id": "soboroff-harman-2005-novelty", "title": "Novelty Detection: The TREC Experience", "abstract": "A challenge for search systems is to detect not only when an item is relevant to the user's information need, but also when it contains something new which the user has not seen before. In the TREC novelty track, the task was to highlight sentences containing relevant and new information in a short, topical document stream. This is analogous to highlighting key parts of a document for another person to read, and this kind of output can be useful as input to a summarization system. Search topics involved both news events and reported opinions on hot-button subjects. When people performed this task, they tended to select small blocks of consecutive sentences, whereas current systems identified many relevant and novel passages. We also found that opinions are much harder to track than events.", "keyphrases": ["trec", "new information", "novelty detection"]} +{"id": "perez-rosas-etal-2018-automatic", "title": "Automatic Detection of Fake News", "abstract": "The proliferation of misleading information in everyday access media outlets such as social media feeds, news blogs, and online newspapers have made it challenging to identify trustworthy news sources, thus increasing the need for computational tools able to provide insights into the reliability of online content. In this paper, we focus on the automatic identification of fake content in online news. Our contribution is twofold. First, we introduce two novel datasets for the task of fake news detection, covering seven different news domains. We describe the collection, annotation, and validation process in detail and present several exploratory analyses on the identification of linguistic differences in fake and legitimate news content. Second, we conduct a set of learning experiments to build accurate fake news detectors, and show that we can achieve accuracies of up to 76%. In addition, we provide comparative analyses of the automatic and manual identification of fake news.", "keyphrases": ["fake news", "linguistic feature", "recent research"]} +{"id": "castilho-etal-2017-comparative", "title": "A Comparative Quality Evaluation of PBSMT and NMT using Professional Translators", "abstract": "This paper reports on a comparative evaluation of phrase-based statistical machine translation \n(PBSMT) and neural machine translation (NMT) for four language pairs, using the PET interface to compare educational domain output from both systems using a variety of metrics, \nincluding automatic evaluation as well as human rankings of adequacy and fluency, error-type \nmarkup, and post-editing (technical and temporal) effort, performed by professional translators. \nOur results show a preference for NMT in side-by-side ranking for all language pairs, texts, and \nsegment lengths. In addition, perceived fluency is improved and annotated errors are fewer in \nthe NMT output. Results are mixed for perceived adequacy and for errors of omission, addition, and mistranslation. Despite far fewer segments requiring post-editing, document-level \npost-editing performance was not found to have significantly improved in NMT compared to \nPBSMT. This evaluation was conducted as part of the TraMOOC project, which aims to create \na replicable semi-automated methodology for high-quality machine translation of educational \ndata.", "keyphrases": ["pbsmt", "professional translator", "comparative evaluation", "semantic faithfulness"]} +{"id": "lee-etal-2017-fully", "title": "Fully Character-Level Neural Machine Translation without Explicit Segmentation", "abstract": "Most existing machine translation systems operate at the level of words, relying on explicit segmentation to extract tokens. We introduce a neural machine translation (NMT) model that maps a source character sequence to a target character sequence without any segmentation. We employ a character-level convolutional network with max-pooling at the encoder to reduce the length of source representation, allowing the model to be trained at a speed comparable to subword-level models while capturing local regularities. Our character-to-character model outperforms a recently proposed baseline with a subword-level encoder on WMT'15 DE-EN and CS-EN, and gives comparable performance on FI-EN and RU-EN. We then demonstrate that it is possible to share a single character-level encoder across multiple languages by training a model on a many-to-one translation task. In this multilingual setting, the character-level encoder significantly outperforms the subword-level encoder on all the language pairs. We observe that on CS-EN, FI-EN and RU-EN, the quality of the multilingual character-level translation even surpasses the models specifically trained on that language pair alone, both in terms of the BLEU score and human judgment.", "keyphrases": ["neural machine translation", "explicit segmentation", "character-level encoder", "many-to-one translation task", "multilingual setting"]} +{"id": "madnani-etal-2012-examining", "title": "Re-examining Machine Translation Metrics for Paraphrase Identification", "abstract": "We propose to re-examine the hypothesis that automated metrics developed for MT evaluation can prove useful for paraphrase identification in light of the significant work on the development of new MT metrics over the last 4 years. We show that a meta-classifier trained using nothing but recent MT metrics outperforms all previous paraphrase identification approaches on the Microsoft Research Paraphrase corpus. In addition, we apply our system to a second corpus developed for the task of plagiarism detection and obtain extremely positive results. Finally, we conduct extensive error analysis and uncover the top systematic sources of error for a paraphrase identification approach relying solely on MT metrics. We release both the new dataset and the error analysis annotations for use by the community.", "keyphrases": ["machine translation metric", "paraphrase identification", "error analysis", "nist"]} +{"id": "sagae-tsujii-2007-dependency", "title": "Dependency Parsing and Domain Adaptation with LR Models and Parser Ensembles", "abstract": "We present a data-driven variant of the LR algorithm for dependency parsing, and extend it with a best-first search for probabilistic generalized LR dependency parsing. Parser actions are determined by a classifier, based on features that represent the current state of the parser. We apply this parsing framework to both tracks of the CoNLL 2007 shared task, in each case taking advantage of multiple models trained with different learners. In the multilingual track, we train three LR models for each of the ten languages, and combine the analyses obtained with each individual model with a maximum spanning tree voting scheme. In the domain adaptation track, we use two models to parse unlabeled data in the target domain to supplement the labeled out-ofdomain training set, in a scheme similar to one iteration of co-training.", "keyphrases": ["variant", "training set", "dependency parsing"]} +{"id": "min-etal-2013-distant", "title": "Distant Supervision for Relation Extraction with an Incomplete Knowledge Base", "abstract": "Distant supervision, heuristically labeling a corpus using a knowledge base, has emerged as a popular choice for training relation extractors. In this paper, we show that a significant number of \u201cnegative\u201c examples generated by the labeling process are false negatives because the knowledge base is incomplete. Therefore the heuristic for generating negative examples has a seriousflaw. Building on a state-of-the-art distantly-supervised extraction algorithm, we proposed an algorithm that learns from only positive and unlabeled labels at the pair-of-entity level. Experimental results demonstrate its advantage over existing algorithms.", "keyphrases": ["relation extraction", "knowledge base", "distant supervision", "downstream task", "schema"]} +{"id": "kermes-etal-2016-royal", "title": "The Royal Society Corpus: From Uncharted Data to Corpus", "abstract": "We present the Royal Society Corpus (RSC) built from the Philosophical Transactions and Proceedings of the Royal Society of London. At present, the corpus contains articles from the first two centuries of the journal (1665\u20151869) and amounts to around 35 million tokens. The motivation for building the RSC is to investigate the diachronic linguistic development of scientific English. Specifically, we assume that due to specialization, linguistic encodings become more compact over time (Halliday, 1988; Halliday and Martin, 1993), thus creating a specific discourse type characterized by high information density that is functional for expert communication. When building corpora from uncharted material, typically not all relevant meta-data (e.g. author, time, genre) or linguistic data (e.g. sentence/word boundaries, words, parts of speech) is readily available. We present an approach to obtain good quality meta-data and base text data adopting the concept of Agile Software Development.", "keyphrases": ["royal society corpus", "rsc", "proceedings"]} +{"id": "wang-etal-2019-confusionset", "title": "Confusionset-guided Pointer Networks for Chinese Spelling Check", "abstract": "This paper proposes Confusionset-guided Pointer Networks for Chinese Spell Check (CSC) task. More concretely, our approach utilizes the off-the-shelf confusionset for guiding the character generation. To this end, our novel Seq2Seq model jointly learns to copy a correct character from an input sentence through a pointer network, or generate a character from the confusionset rather than the entire vocabulary. We conduct experiments on three human-annotated datasets, and results demonstrate that our proposed generative model outperforms all competitor models by a large margin of up to 20% F1 score, achieving state-of-the-art performance on three datasets.", "keyphrases": ["pointer networks", "confusion", "spelling error"]} +{"id": "sogaard-etal-2018-limitations", "title": "On the Limitations of Unsupervised Bilingual Dictionary Induction", "abstract": "Unsupervised machine translation - i.e., not assuming any cross-lingual supervision signal, whether a dictionary, translations, or comparable corpora - seems impossible, but nevertheless, Lample et al. (2017) recently proposed a fully unsupervised machine translation (MT) model. The model relies heavily on an adversarial, unsupervised cross-lingual word embedding technique for bilingual dictionary induction (Conneau et al., 2017), which we examine here. Our results identify the limitations of current unsupervised MT: unsupervised bilingual dictionary induction performs much worse on morphologically rich languages that are not dependent marking, when monolingual corpora from different domains or different embedding algorithms are used. We show that a simple trick, exploiting a weak supervision signal from identical words, enables more robust induction and establish a near-perfect correlation between unsupervised bilingual dictionary induction performance and a previously unexplored graph similarity metric.", "keyphrases": ["assumption", "isomorphism", "unsupervised method", "distant language pair", "spelling"]} +{"id": "shutova-etal-2010-metaphor", "title": "Metaphor Identification Using Verb and Noun Clustering", "abstract": "We present a novel approach to automatic metaphor identification in unrestricted text. Starting from a small seed set of manually annotated metaphorical expressions, the system is capable of harvesting a large number of metaphors of similar syntactic structure from a corpus. Our method is distinguished from previous work in that it does not employ any hand-crafted knowledge, other than the initial seed set, but, in contrast, captures metaphoricity by means of verb and noun clustering. Being the first to employ unsupervised methods for metaphor identification, our system operates with the precision of 0.79.", "keyphrases": ["noun", "clustering", "metaphor identification", "dependency feature", "target concept"]} +{"id": "clark-gardner-2018-simple", "title": "Simple and Effective Multi-Paragraph Reading Comprehension", "abstract": "We introduce a method of adapting neural paragraph-level question answering models to the case where entire documents are given as input. Most current question answering models cannot scale to document or multi-document input, and naively applying these models to each paragraph independently often results in them being distracted by irrelevant text. We show that it is possible to significantly improve performance by using a modified training scheme that teaches the model to ignore non-answer containing paragraphs. Our method involves sampling multiple paragraphs from each document, and using an objective function that requires the model to produce globally correct output. We additionally identify and improve upon a number of other design decisions that arise when working with document-level data. Experiments on TriviaQA and SQuAD shows our method advances the state of the art, including a 10 point gain on TriviaQA.", "keyphrases": ["reading comprehension", "paragraph", "open-domain question"]} +{"id": "xu-etal-2016-hybrid", "title": "Hybrid Question Answering over Knowledge Base and Free Text", "abstract": "Recent trend in question answering (QA) systems focuses on using structured knowledge bases (KBs) to find answers. While these systems are able to provide more precise answers than information retrieval (IR) based QA systems, the natural incompleteness of KB inevitably limits the question scope that the system can answer. In this paper, we present a hybrid question answering (hybrid-QA) system which exploits both structured knowledge base and free text to answer a question. The main challenge is to recognize the meaning of a question using these two resources, i.e., structured KB and free text. To address this, we map relational phrases to KB predicates and textual relations simultaneously, and further develop an integer linear program (ILP) model to infer on these candidates and provide a globally optimal solution. Experiments on benchmark datasets show that our system can benefit from both structured KB and free text, outperforming the state-of-the-art systems.", "keyphrases": ["knowledge base", "free text", "hybrid question"]} +{"id": "zhang-etal-2016-variational-neural", "title": "Variational Neural Machine Translation", "abstract": "Models of neural machine translation are often from a discriminative family of encoderdecoders that learn a conditional distribution of a target sentence given a source sentence. In this paper, we propose a variational model to learn this conditional distribution for neural machine translation: a variational encoderdecoder model that can be trained end-to-end. Different from the vanilla encoder-decoder model that generates target translations from hidden representations of source sentences alone, the variational model introduces a continuous latent variable to explicitly model underlying semantics of source sentences and to guide the generation of target translations. In order to perform efficient posterior inference and large-scale training, we build a neural posterior approximator conditioned on both the source and the target sides, and equip it with a reparameterization technique to estimate the variational lower bound. Experiments on both Chinese-English and English- German translation tasks show that the proposed variational neural machine translation achieves significant improvements over the vanilla neural machine translation baselines.", "keyphrases": ["machine translation", "end-to-end", "latent variable", "vae"]} +{"id": "niu-etal-2017-improved", "title": "Improved Word Representation Learning with Sememes", "abstract": "Sememes are minimum semantic units of word meanings, and the meaning of each word sense is typically composed by several sememes. Since sememes are not explicit for each word, people manually annotate word sememes and form linguistic common-sense knowledge bases. In this paper, we present that, word sememe information can improve word representation learning (WRL), which maps words into a low-dimensional semantic space and serves as a fundamental step for many NLP tasks. The key idea is to utilize word sememes to capture exact meanings of a word within specific contexts accurately. More specifically, we follow the framework of Skip-gram and present three sememe-encoded models to learn representations of sememes, senses and words, where we apply the attention scheme to detect word senses in various contexts. We conduct experiments on two tasks including word similarity and word analogy, and our models significantly outperform baselines. The results indicate that WRL can benefit from sememes via the attention scheme, and also confirm our models being capable of correctly modeling sememe information.", "keyphrases": ["word representation learning", "sememe", "hownet"]} +{"id": "qi-etal-2020-stanza", "title": "Stanza: A Python Natural Language Processing Toolkit for Many Human Languages", "abstract": "We introduce Stanza, an open-source Python natural language processing toolkit supporting 66 human languages. Compared to existing widely used toolkits, Stanza features a language-agnostic fully neural pipeline for text analysis, including tokenization, multi-word token expansion, lemmatization, part-of-speech and morphological feature tagging, dependency parsing, and named entity recognition. We have trained Stanza on a total of 112 datasets, including the Universal Dependencies treebanks and other multilingual corpora, and show that the same neural architecture generalizes well and achieves competitive performance on all languages tested. Additionally, Stanza includes a native Python interface to the widely used Java Stanford CoreNLP software, which further extends its functionality to cover other tasks such as coreference resolution and relation extraction. Source code, documentation, and pretrained models for 66 languages are available at .", "keyphrases": ["pipeline", "tokenization", "morphological feature tagging", "stanza", "opinion extraction"]} +{"id": "ziai-meurers-2014-focus", "title": "Focus Annotation in Reading Comprehension Data", "abstract": "When characterizing the information structure of sentences, the so-called focus identifies the part of a sentence addressing the current question under discussion in the discourse. While this notion is precisely defined in formal semantics and potentially very useful in theoretical and practical terms, it has turned out to be difficult to reliably annotate focus in corpus data. We present a new focus annotation effort designed to overcome this problem. On the one hand, it is based on a task-based corpus providing more explicit context. The annotation study is based on the CREG corpus (Ott et al., 2012), which consists of answers to explicitly given reading comprehension questions. On the other hand, we operationalize focus annotation as an incremental process including several substeps which provide guidance, such as explicit answer typing. We evaluate the focus annotation both intrinsically by calculating agreement between annotators and extrinsically by showing that the focus information substantially improves the automatic meaning assessment of answers in the CoMiC system (Meurers et al., 2011).", "keyphrases": ["information structure", "focus annotation", "explicit question", "ill-formed learner language"]} +{"id": "kirov-etal-2016-large", "title": "Very-large Scale Parsing and Normalization of Wiktionary Morphological Paradigms", "abstract": "Wiktionary is a large-scale resource for cross-lingual lexical information with great potential utility for machine translation (MT) and many other NLP tasks, especially automatic morphological analysis and generation. However, it is designed primarily for human viewing rather than machine readability, and presents numerous challenges for generalized parsing and extraction due to a lack of standardized formatting and grammatical descriptor definitions. This paper describes a large-scale effort to automatically extract and standardize the data in Wiktionary and make it available for use by the NLP research community. The methodological innovations include a multidimensional table parsing algorithm, a cross-lexeme, token-frequency-based method of separating inflectional form data from grammatical descriptors, the normalization of grammatical descriptors to a unified annotation scheme that accounts for cross-linguistic diversity, and a verification and correction process that exploits within-language, cross-lexeme table format consistency to minimize human effort. The effort described here resulted in the extraction of a uniquely large normalized resource of nearly 1,000,000 inflectional paradigms across 350 languages. Evaluation shows that even though the data is extracted using a language-independent approach, it is comparable in quantity and quality to data extracted using hand-tuned, language-specific approaches.", "keyphrases": ["normalization", "wiktionary", "inflectional paradigm"]} +{"id": "morales-zhai-2017-identifying", "title": "Identifying Humor in Reviews using Background Text Sources", "abstract": "We study the problem of automatically identifying humorous text from a new kind of text data, i.e., online reviews. We propose a generative language model, based on the theory of incongruity, to model humorous text, which allows us to leverage background text sources, such as Wikipedia entry descriptions, and enables construction of multiple features for identifying humorous reviews. Evaluation of these features using supervised learning for classifying reviews into humorous and non-humorous reviews shows that the features constructed based on the proposed generative model are much more effective than the major features proposed in the existing literature, allowing us to achieve almost 86% accuracy. These humorous review predictions can also supply good indicators for identifying helpful reviews.", "keyphrases": ["humor", "background text source", "generative language model", "wikipedia entry description", "yelp review"]} +{"id": "cohen-etal-2012-spectral", "title": "Spectral Learning of Latent-Variable PCFGs", "abstract": "We introduce a spectral learning algorithm for latent-variable PCFGs (Petrov et al., 2006). Under a separability (singular value) condition, we prove that the method provides consistent parameter estimates.", "keyphrases": ["pcfg", "spectral learning algorithm", "definition"]} +{"id": "villavicencio-etal-2004-lexical", "title": "Lexical Encoding of MWEs", "abstract": "Multiword Expressions present a challenge for language technology, given their flexible nature. Each type of multiword expression has its own characteristics, and providing a uniform lexical encoding for them is a difficult task to undertake. Nonetheless, in this paper we present an architecture for the lexical encoding of these expressions in a database, that takes into account their flexibility. This encoding extends in a straightforward manner the one required for simplex (single) words, and maximises the information contained for them in the description of multiwords.", "keyphrases": ["mwes", "lexical encoding", "idiom"]} +{"id": "dong-lapata-2018-coarse", "title": "Coarse-to-Fine Decoding for Neural Semantic Parsing", "abstract": "Semantic parsing aims at mapping natural language utterances into structured meaning representations. In this work, we propose a structure-aware neural architecture which decomposes the semantic parsing process into two stages. Given an input utterance, we first generate a rough sketch of its meaning, where low-level information (such as variable names and arguments) is glossed over. Then, we fill in missing details by taking into account the natural language input and the sketch itself. Experimental results on four datasets characteristic of different domains and meaning representations show that our approach consistently improves performance, achieving competitive results despite the use of relatively simple decoders.", "keyphrases": ["neural semantic parsing", "natural language utterance", "sketch", "coarse-to-fine"]} +{"id": "gao-etal-2021-making", "title": "Making Pre-trained Language Models Better Few-shot Learners", "abstract": "The recent GPT-3 model (Brown et al., 2020) achieves remarkable few-shot performance solely by leveraging a natural-language prompt and a few task demonstrations as input context. Inspired by their findings, we study few-shot learning in a more practical scenario, where we use smaller language models for which fine-tuning is computationally efficient. We present LM-BFF\u2014better few-shot fine-tuning of language models\u2014a suite of simple and complementary techniques for fine-tuning language models on a small number of annotated examples. Our approach includes (1) prompt-based fine-tuning together with a novel pipeline for automating prompt generation; and (2) a refined strategy for dynamically and selectively incorporating demonstrations into each context. Finally, we present a systematic evaluation for analyzing few-shot performance on a range of NLP tasks, including classification and regression. Our experiments demonstrate that our methods combine to dramatically outperform standard fine-tuning procedures in this low resource setting, achieving up to 30% absolute improvement, and 11% on average across all tasks. Our approach makes minimal assumptions on task resources and domain expertise, and hence constitutes a strong task-agnostic method for few-shot learning.", "keyphrases": ["language model", "learner", "fine-tuning", "in-context learning", "text classification task"]} +{"id": "ling-etal-2017-program", "title": "Program Induction by Rationale Generation: Learning to Solve and Explain Algebraic Word Problems", "abstract": "Solving algebraic word problems requires executing a series of arithmetic operations\u2014a program\u2014to obtain a final answer. However, since programs can be arbitrarily complicated, inducing them directly from question-answer pairs is a formidable challenge. To make this task more feasible, we solve these problems by generating answer rationales, sequences of natural language and human-readable mathematical expressions that derive the final answer through a series of small steps. Although rationales do not explicitly specify programs, they provide a scaffolding for their structure via intermediate milestones. To evaluate our approach, we have created a new 100,000-sample dataset of questions, answers and rationales. Experimental results show that indirect supervision of program learning via answer rationales is a promising strategy for inducing arithmetic programs.", "keyphrases": ["algebraic word problem", "explanation", "program", "solver", "correct answer"]} +{"id": "power-etal-2003-document", "title": "Document Structure", "abstract": "We argue the case for abstract document structure as a separate descriptive level in the analysis and generation of written texts. The purpose of this representation is to mediate between the message of a text (i.e., its discourse structure) and its physical presentation (i.e., its organization into graphical constituents like sections, paragraphs, sentences, bulleted lists, figures, and footnotes). Abstract document structure can be seen as an extension of Nunberg's text-grammar it is also closely related to logical markup in languages like HTML and LaTEX. We show that by using this intermediate representation, several subtasks in language generation and language understanding can be defined more cleanly.", "keyphrases": ["language generation", "document structure", "constraint satisfaction"]} +{"id": "mitchell-etal-2015-quantifying", "title": "Quantifying the Language of Schizophrenia in Social Media", "abstract": "Analyzing symptoms of schizophrenia has traditionally been challenging given the low prevalence of the condition, affecting around 1% of the U.S. population. We explore potential linguistic markers of schizophrenia using the tweets 1 of self-identified schizophrenia sufferers, and describe several natural language processing (NLP) methods to analyze the language of schizophrenia. We examine how these signals compare with the widelyused LIWC categories for understanding mental health (Pennebaker et al., 2007), and provide preliminary evidence of additional linguistic signals that may aid in identifying and getting help to people suffering from schizophrenia.", "keyphrases": ["schizophrenia", "linguistic marker", "social medium", "twitter"]} +{"id": "choubey-etal-2020-discourse", "title": "Discourse as a Function of Event: Profiling Discourse Structure in News Articles around the Main Event", "abstract": "Understanding discourse structures of news articles is vital to effectively contextualize the occurrence of a news event. To enable computational modeling of news structures, we apply an existing theory of functional discourse structure for news articles that revolves around the main event and create a human-annotated corpus of 802 documents spanning over four domains and three media sources. Next, we propose several document-level neural-network models to automatically construct news content structures. Finally, we demonstrate that incorporating system predicted news structures yields new state-of-the-art performance for event coreference resolution. The news documents we annotated are openly available and the annotations are publicly released for future research.", "keyphrases": ["main event", "news article", "discourse role"]} +{"id": "su-etal-2019-dual", "title": "Dual Supervised Learning for Natural Language Understanding and Generation", "abstract": "Natural language understanding (NLU) and natural language generation (NLG) are both critical research topics in the NLP and dialogue fields. Natural language understanding is to extract the core semantic meaning from the given utterances, while natural language generation is opposite, of which the goal is to construct corresponding sentences based on the given semantics. However, such dual relationship has not been investigated in literature. This paper proposes a novel learning framework for natural language understanding and generation on top of dual supervised learning, providing a way to exploit the duality. The preliminary experiments show that the proposed approach boosts the performance for both tasks, demonstrating the effectiveness of the dual relationship.", "keyphrases": ["natural language understanding", "nlu", "dual supervised learning"]} +{"id": "he-etal-2015-multi", "title": "Multi-Perspective Sentence Similarity Modeling with Convolutional Neural Networks", "abstract": "Modeling sentence similarity is complicated by the ambiguity and variability of linguistic expression. To cope with these challenges, we propose a model for comparing sentences that uses a multiplicity of perspectives. We first model each sentence using a convolutional neural network that extracts features at multiple levels of granularity and uses multiple types of pooling. We then compare our sentence representations at several granularities using multiple similarity metrics. We apply our model to three tasks, including the Microsoft Research paraphrase identification task and two SemEval semantic textual similarity tasks. We obtain strong performance on all tasks, rivaling or exceeding the state of the art without using external resources such as WordNet or parsers.", "keyphrases": ["sentence similarity", "convolutional neural network", "well-studied task"]} +{"id": "wang-etal-2016-recursive", "title": "Recursive Neural Conditional Random Fields for Aspect-based Sentiment Analysis", "abstract": "In aspect-based sentiment analysis, extracting aspect terms along with the opinions being expressed from user-generated content is one of the most important subtasks. Previous studies have shown that exploiting connections between aspect and opinion terms is promising for this task. In this paper, we propose a novel joint model that integrates recursive neural networks and conditional random fields into a unified framework for explicit aspect and opinion terms co-extraction. The proposed model learns high-level discriminative features and double propagate information between aspect and opinion terms, simultaneously. Moreover, it is flexible to incorporate hand-crafted features into the proposed model to further boost its information extraction performance. Experimental results on the SemEval Challenge 2014 dataset show the superiority of our proposed model over several baseline methods as well as the winning systems of the challenge.", "keyphrases": ["conditional random field", "sentiment analysis", "aspect term", "recursive neural network"]} +{"id": "baldwin-lui-2010-language", "title": "Language Identification: The Long and the Short of the Matter", "abstract": "Language identification is the task of identifying the language a given document is written in. This paper describes a detailed examination of what models perform best under different conditions, based on experiments across three separate datasets and a range of tokenisation strategies. We demonstrate that the task becomes increasingly difficult as we increase the number of languages, reduce the amount of training data and reduce the length of documents. We also show that it is possible to perform language identification without having to perform explicit character encoding detection.", "keyphrases": ["condition", "length", "language identification", "baldwin"]} +{"id": "haouari-etal-2021-arcov19", "title": "ArCOV19-Rumors: Arabic COVID-19 Twitter Dataset for Misinformation Detection", "abstract": "In this paper we introduce ArCOV19-Rumors, an Arabic COVID-19 Twitter dataset for misinformation detection composed of tweets containing claims from 27th January till the end of April 2020. We collected 138 verified claims, mostly from popular fact-checking websites, and identified 9.4K relevant tweets to those claims. Tweets were manually-annotated by veracity to support research on misinformation detection, which is one of the major problems faced during a pandemic. ArCOV19-Rumors supports two levels of misinformation detection over Twitter: verifying free-text claims (called claim-level verification) and verifying claims expressed in tweets (called tweet-level verification). Our dataset covers, in addition to health, claims related to other topical categories that were influenced by COVID-19, namely, social, politics, sports, entertainment, and religious. Moreover, we present benchmarking results for tweet-level verification on the dataset. We experimented with SOTA models of versatile approaches that either exploit content, user profiles features, temporal features and propagation structure of the conversational threads for tweet verification.", "keyphrases": ["covid-19 twitter dataset", "misinformation detection", "arcov19-rumor"]} +{"id": "yu-etal-2018-syntaxsqlnet", "title": "SyntaxSQLNet: Syntax Tree Networks for Complex and Cross-Domain Text-to-SQL Task", "abstract": "Most existing studies in text-to-SQL tasks do not require generating complex SQL queries with multiple clauses or sub-queries, and generalizing to new, unseen databases. In this paper we propose SyntaxSQLNet, a syntax tree network to address the complex and cross-domain text-to-SQL generation task. SyntaxSQLNet employs a SQL specific syntax tree-based decoder with SQL generation path history and table-aware column attention encoders. We evaluate SyntaxSQLNet on a new large-scale text-to-SQL corpus containing databases with multiple tables and complex SQL queries containing multiple SQL clauses and nested queries. We use a database split setting where databases in the test set are unseen during training. Experimental results show that SyntaxSQLNet can handle a significantly greater number of complex SQL examples than prior work, outperforming the previous state-of-the-art model by 9.5% in exact matching accuracy. To our knowledge, we are the first to study this complex text-to-SQL task. Our task and models with the latest updates are available at .", "keyphrases": ["text-to-sql generation", "syntaxsqlnet", "natural language question"]} +{"id": "fahmi-bouma-2006-learning", "title": "Learning to Identify Definitions using Syntactic Features", "abstract": "This paper describes an approach to learning concept definitions which operates on fully parsed text. A subcorpus of the Dutch version of Wikipedia was searched for sentences which have the syntactic properties of definitions. Next, we experimented with various text classification techniques to distinguish actual definitions from other sentences. A maximum entropy classifier which incorporates features referring to the position of the sentence in the document as well as various syntactic features, gives the best results.", "keyphrases": ["definition", "syntactic feature", "position", "machine learning", "symbolic method"]} +{"id": "hardmeier-etal-2013-docent", "title": "Docent: A Document-Level Decoder for Phrase-Based Statistical Machine Translation", "abstract": "We describe Docent, an open-source decoder for statistical machine translation that breaks with the usual sentence-bysentence paradigm and translates complete documents as units. By taking translation to the document level, our decoder can handle feature models with arbitrary discourse-wide dependencies and constitutes an essential infrastructure component in the quest for discourse-aware SMT models. 1 Motivation", "keyphrases": ["document-level decoder", "statistical machine translation", "unit", "docent", "document-level smt paradigm"]} +{"id": "vaswani-etal-2013-decoding", "title": "Decoding with Large-Scale Neural Language Models Improves Translation", "abstract": "We explore the application of neural language models to machine translation. We develop a new model that combines the neural probabilistic language model of Bengio et al., rectified linear units, and noise-contrastive estimation, and we incorporate it into a machine translation system both by reranking k-best lists and by direct integration into the decoder. Our large-scale, large-vocabulary experiments across four language pairs show that our neural language model improves translation quality by up to 1.1 Bleu.", "keyphrases": ["language model", "unit", "noise-contrastive estimation", "estimation", "feedforward"]} +{"id": "verhagen-etal-2005-automating", "title": "Automating Temporal Annotation with TARSQI", "abstract": "We present an overview of TARSQI, a modular system for automatic temporal annotation that adds time expressions, events and temporal relations to news texts.", "keyphrases": ["tarsqi", "time expression", "toolkit"]} +{"id": "wang-etal-2019-learning-deep", "title": "Learning Deep Transformer Models for Machine Translation", "abstract": "Transformer is the state-of-the-art model in recent machine translation evaluations. Two strands of research are promising to improve models of this kind: the first uses wide networks (a.k.a. Transformer-Big) and has been the de facto standard for development of the Transformer system, and the other uses deeper language representation but faces the difficulty arising from learning deep networks. Here, we continue the line of research on the latter. We claim that a truly deep Transformer model can surpass the Transformer-Big counterpart by 1) proper use of layer normalization and 2) a novel way of passing the combination of previous layers to the next. On WMT'16 English-German and NIST OpenMT'12 Chinese-English tasks, our deep system (30/25-layer encoder) outperforms the shallow Transformer-Big/Base baseline (6-layer encoder) by 0.4-2.4 BLEU points. As another bonus, the deep model is 1.6X smaller in size and 3X faster in training than Transformer-Big.", "keyphrases": ["deep transformer", "machine translation", "previous layer"]} +{"id": "li-etal-2004-applying", "title": "Applying Machine Learning to Chinese Temporal Relation Resolution", "abstract": "Temporal relation resolution involves extraction of temporal information explicitly or implicitly embedded in a language. This information is often inferred from a variety of interactive grammatical and lexical cues, especially in Chinese. For this purpose, inter-clause relations (temporal or otherwise) in a multiple-clause sentence play an important role. In this paper, a computational model based on machine learning and heterogeneous collaborative bootstrapping is proposed for analyzing temporal relations in a Chinese multiple-clause sentence. The model makes use of the fact that events are represented in different temporal structures. It takes into account the effects of linguistic features such as tense/aspect, temporal connectives, and discourse structures. A set of experiments has been conducted to investigate how linguistic features could affect temporal relation resolution.", "keyphrases": ["machine learning", "chinese", "temporal relation"]} +{"id": "liu-lapata-2019-hierarchical", "title": "Hierarchical Transformers for Multi-Document Summarization", "abstract": "In this paper, we develop a neural summarization model which can effectively process multiple input documents and distill Transformer architecture with the ability to encode documents in a hierarchical manner. We represent cross-document relationships via an attention mechanism which allows to share information as opposed to simply concatenating text spans and processing them as a flat sequence. Our model learns latent dependencies among textual units, but can also take advantage of explicit graph representations focusing on similarity or discourse relations. Empirical results on the WikiSum dataset demonstrate that the proposed architecture brings substantial improvements over several strong baselines.", "keyphrases": ["summarization", "cross-document relationship", "wikisum", "hierarchical transformer", "large-scale dataset"]} +{"id": "wible-tsao-2010-stringnet", "title": "StringNet as a Computational Resource for Discovering and Investigating Linguistic Constructions", "abstract": "We describe and motivate the design of a lexico-grammatical knowledgebase called StringNet and illustrate its significance for research into constructional phenomena in English. StringNet consists of a massive archive of what we call hybrid n-grams. Unlike traditional n-grams, hybrid n-grams can consist of any co-occurring combination of POS tags, lexemes, and specific word forms. Further, we detect and represent superordinate and subordinate relations among hybrid n-grams by cross-indexing, allowing the navigation of StringNet through these hierarchies, from specific fixed expressions (\"It's the thought that counts\") up to their hosting proto-constructions (e.g. the It Cleft construction: \"it's the [noun] that [verb]\"). StringNet supports discovery of grammatical dependencies (e.g., subject-verb agreement) in non-canonical configurations as well as lexical dependencies (e.g., adjective/noun collocations specific to families of constructions).", "keyphrases": ["pos tag", "stringnet", "frequency"]} +{"id": "nalisnick-baird-2013-character", "title": "Character-to-Character Sentiment Analysis in Shakespeare's Plays", "abstract": "We present an automatic method for analyzing sentiment dynamics between characters in plays. This literary format\u2019s structured dialogue allows us to make assumptions about who is participating in a conversation. Once we have an idea of who a character is speaking to, the sentiment in his or her speech can be attributed accordingly, allowing us to generate lists of a character\u2019s enemies and allies as well as pinpoint scenes critical to a character\u2019s emotional development. Results of experiments on Shakespeare\u2019s plays are presented along with discussion of how this work can be extended to unstructured texts (i.e. novels).", "keyphrases": ["sentiment analysis", "play", "character"]} +{"id": "bawden-etal-2018-evaluating", "title": "Evaluating Discourse Phenomena in Neural Machine Translation", "abstract": "For machine translation to tackle discourse phenomena, models must have access to extra-sentential linguistic context. There has been recent interest in modelling context in neural machine translation (NMT), but models have been principally evaluated with standard automatic metrics, poorly adapted to evaluating discourse phenomena. In this article, we present hand-crafted, discourse test sets, designed to test the models' ability to exploit previous source and target sentences. We investigate the performance of recently proposed multi-encoder NMT models trained on subtitles for English to French. We also explore a novel way of exploiting context from the previous sentence. Despite gains using BLEU, multi-encoder models give limited improvement in the handling of discourse phenomena: 50% accuracy on our coreference test set and 53.5% for coherence/cohesion (compared to a non-contextual baseline of 50%). A simple strategy of decoding the concatenation of the previous and current sentence leads to good performance, and our novel strategy of multi-encoding and decoding of two sentences leads to the best performance (72.5% for coreference and 57% for coherence/cohesion), highlighting the importance of target-side context.", "keyphrases": ["discourse phenomena", "neural machine translation", "previous sentence", "lexical consistency"]} +{"id": "yao-van-durme-2011-nonparametric", "title": "Nonparametric Bayesian Word Sense Induction", "abstract": "We propose the use of a nonparametric Bayesian model, the Hierarchical Dirichlet Process (HDP), for the task of Word Sense Induction. Results are shown through comparison against Latent Dirichlet Allocation (LDA), a parametric Bayesian model employed by Brody and Lapata (2009) for this task. We find that the two models achieve similar levels of induction quality, while the HDP confers the advantage of automatically inducing a variable number of senses per word, as compared to manually fixing the number of senses a priori, as in LDA. This flexibility allows for the model to adapt to terms with greater or lesser polysemy, when evidenced by corpus distributional statistics. When trained on out-of-domain data, experimental results confirm the model's ability to make use of a restricted set of topically coherent induced senses, when then applied in a restricted domain.", "keyphrases": ["word sense induction", "bayesian model", "hdp", "van", "yao"]} +{"id": "wieting-etal-2016-charagram", "title": "Charagram: Embedding Words and Sentences via Character n-grams", "abstract": "We present Charagram embeddings, a simple approach for learning character-based compositional models to embed textual sequences. A word or sentence is represented using a character n-gram count vector, followed by a single nonlinear transformation to yield a low-dimensional embedding. We use three tasks for evaluation: word similarity, sentence similarity, and part-of-speech tagging. We demonstrate that Charagram embeddings outperform more complex architectures based on character-level recurrent and convolutional neural networks, achieving new state-of-the-art performance on several similarity tasks.", "keyphrases": ["charagram", "subword information", "well embedding"]} +{"id": "gomez-rodriguez-vilares-2018-constituent", "title": "Constituent Parsing as Sequence Labeling", "abstract": "We introduce a method to reduce constituent parsing to sequence labeling. For each word w_t, it generates a label that encodes: (1) the number of ancestors in the tree that the words w_t and w_t+1 have in common, and (2) the nonterminal symbol at the lowest common ancestor. We first prove that the proposed encoding function is injective for any tree without unary branches. In practice, the approach is made extensible to all constituency trees by collapsing unary branches. We then use the PTB and CTB treebanks as testbeds and propose a set of fast baselines. We achieve 90% F-score on the PTB test set, outperforming the Vinyals et al. (2015) sequence-to-sequence parser. In addition, sacrificing some accuracy, our approach achieves the fastest constituent parsing speeds reported to date on PTB by a wide margin.", "keyphrases": ["sequence labeling", "common ancestor", "constituent"]} +{"id": "davison-etal-2019-commonsense", "title": "Commonsense Knowledge Mining from Pretrained Models", "abstract": "Inferring commonsense knowledge is a key challenge in machine learning. Due to the sparsity of training data, previous work has shown that supervised methods for commonsense knowledge mining underperform when evaluated on novel data. In this work, we develop a method for generating commonsense knowledge using a large, pre-trained bidirectional language model. By transforming relational triples into masked sentences, we can use this model to rank a triple's validity by the estimated pointwise mutual information between the two entities. Since we do not update the weights of the bidirectional model, our approach is not biased by the coverage of any one commonsense knowledge base. Though we do worse on a held-out test set than models explicitly trained on a corresponding training set, our approach outperforms these methods when mining commonsense knowledge from new sources, suggesting that our unsupervised technique generalizes better than current supervised approaches.", "keyphrases": ["language model", "commonsense knowledge", "fine-tuning"]} +{"id": "qian-etal-2014-bilingual", "title": "Bilingual Active Learning for Relation Classification via Pseudo Parallel Corpora", "abstract": "Active learning (AL) has been proven effective to reduce human annotation efforts in NLP. However, previous studies on AL are limited to applications in a single language. This paper proposes a bilingual active learning paradigm for relation classification, where the unlabeled instances are first jointly chosen in terms of their prediction uncertainty scores in two languages and then manually labeled by an oracle. Instead of using a parallel corpus, labeled and unlabeled instances in one language are translated into ones in the other language and all instances in both languages are then fed into a bilingual active learning engine as pseudo parallel corpora. Experimental results on the ACE RDC 2005 Chinese and English corpora show that bilingual active learning for relation classification significantly outperforms monolingual active learning.", "keyphrases": ["pseudo parallel corpora", "learning paradigm", "bilingual active learning"]} +{"id": "miura-etal-2017-unifying", "title": "Unifying Text, Metadata, and User Network Representations with a Neural Network for Geolocation Prediction", "abstract": "We propose a novel geolocation prediction model using a complex neural network. Geolocation prediction in social media has attracted many researchers to use information of various types. Our model unifies text, metadata, and user network representations with an attention mechanism to overcome previous ensemble approaches. In an evaluation using two open datasets, the proposed model exhibited a maximum 3.8% increase in accuracy and a maximum of 6.6% increase in accuracy@161 against previous models. We further analyzed several intermediate layers of our model, which revealed that their states capture some statistical characteristics of the datasets.", "keyphrases": ["metadata", "user network representation", "geolocation prediction"]} +{"id": "ferreira-freitas-2020-natural", "title": "Natural Language Premise Selection: Finding Supporting Statements for Mathematical Text", "abstract": "Mathematical text is written using a combination of words and mathematical expressions. This combination, along with a specific way of structuring sentences makes it challenging for state-of-art NLP tools to understand and reason on top of mathematical discourse. In this work, we propose a new NLP task, the natural premise selection, which is used to retrieve supporting definitions and supporting propositions that are useful for generating an informal mathematical proof for a particular statement. We also make available a dataset, NL-PS, which can be used to evaluate different approaches for the natural premise selection task. Using different baselines, we demonstrate the underlying interpretation challenges associated with the task.", "keyphrases": ["premise selection", "mathematical text", "adaptation"]} +{"id": "xu-lapata-2020-coarse", "title": "Coarse-to-Fine Query Focused Multi-Document Summarization", "abstract": "We consider the problem of better modeling query-cluster interactions to facilitate query focused multi-document summarization. Due to the lack of training data, existing work relies heavily on retrieval-style methods for assembling query relevant summaries. We propose a coarse-to-fine modeling framework which employs progressively more accurate modules for estimating whether text segments are relevant, likely to contain an answer, and central. The modules can be independently developed and leverage training data if available. We present an instantiation of this framework with a trained evidence estimator which relies on distant supervision from question answering (where various resources exist) to identify segments which are likely to answer the query and should be included in the summary. Our framework is robust across domains and query types (i.e., long vs short) and outperforms strong comparison systems on benchmark datasets.", "keyphrases": ["query", "summarization", "coarse-to-fine framework"]} +{"id": "zhou-wang-2018-mojitalk", "title": "MojiTalk: Generating Emotional Responses at Scale", "abstract": "Generating emotional language is a key step towards building empathetic natural language processing agents. However, a major challenge for this line of research is the lack of large-scale labeled training data, and previous studies are limited to only small sets of human annotated sentiment labels. Additionally, explicitly controlling the emotion and sentiment of generated text is also difficult. In this paper, we take a more radical approach: we exploit the idea of leveraging Twitter data that are naturally labeled with emojis. We collect a large corpus of Twitter conversations that include emojis in the response and assume the emojis convey the underlying emotions of the sentence. We investigate several conditional variational autoencoders training on these conversations, which allow us to use emojis to control the emotion of the generated text. Experimentally, we show in our quantitative and qualitative analyses that the proposed models can successfully generate high-quality abstractive conversation responses in accordance with designated emotions.", "keyphrases": ["emotion", "emojis", "response generation", "signal", "vae"]} +{"id": "wang-etal-2017-two", "title": "A Two-Stage Parsing Method for Text-Level Discourse Analysis", "abstract": "Previous work introduced transition-based algorithms to form a unified architecture of parsing rhetorical structures (including span, nuclearity and relation), but did not achieve satisfactory performance. In this paper, we propose that transition-based model is more appropriate for parsing the naked discourse tree (i.e., identifying span and nuclearity) due to data sparsity. At the same time, we argue that relation labeling can benefit from naked tree structure and should be treated elaborately with consideration of three kinds of relations including within-sentence, across-sentence and across-paragraph relations. Thus, we design a pipelined two-stage parsing method for generating an RST tree from text. Experimental results show that our method achieves state-of-the-art performance, especially on span and nuclearity identification.", "keyphrases": ["two-stage", "discourse tree", "same parser"]} +{"id": "chakravarthi-2020-hopeedi", "title": "HopeEDI: A Multilingual Hope Speech Detection Dataset for Equality, Diversity, and Inclusion", "abstract": "Over the past few years, systems have been developed to control online content and eliminate abusive, offensive or hate speech content. However, people in power sometimes misuse this form of censorship to obstruct the democratic right of freedom of speech. Therefore, it is imperative that research should take a positive reinforcement approach towards online content that is encouraging, positive and supportive contents. Until now, most studies have focused on solving this problem of negativity in the English language, though the problem is much more than just harmful content. Furthermore, it is multilingual as well. Thus, we have constructed a Hope Speech dataset for Equality, Diversity and Inclusion (HopeEDI) containing user-generated comments from the social media platform YouTube with 28,451, 20,198 and 10,705 comments in English, Tamil and Malayalam, respectively, manually labelled as containing hope speech or not. To our knowledge, this is the first research of its kind to annotate hope speech for equality, diversity and inclusion in a multilingual setting. We determined that the inter-annotator agreement of our dataset using Krippendorff's alpha. Further, we created several baselines to benchmark the resulting dataset and the results have been expressed using precision, recall and F1-score. The dataset is publicly available for the research community. We hope that this resource will spur further research on encouraging inclusive and responsive speech that reinforces positiveness.", "keyphrases": ["hope speech detection", "user-generated comment", "hopeedi", "language identification", "emotion"]} +{"id": "luo-2007-coreference", "title": "Coreference or Not: A Twin Model for Coreference Resolution", "abstract": "A twin-model is proposed for coreference resolution: a link component, modeling the coreferential relationship between an anaphor and a candidate antecedent, and a creation component modeling the possibility that a phrase is not coreferential with any candidate antecedent. The creation model depends on all candidate antecedents and is often expensive to compute; Therefore constraints are imposed on feature forms so that features in the creation model can be efficiently computed from feature values in the link model. The proposed twin-model is tested on the data from the 2005 Automatic Content Extraction (ACE) task and the proposed model performs better than a thresholding baseline without tuning free parameter.", "keyphrases": ["coreference resolution", "twin-model", "coreferential relationship", "anaphor", "candidate"]} +{"id": "liu-etal-2017-soft", "title": "A Soft-label Method for Noise-tolerant Distantly Supervised Relation Extraction", "abstract": "Distant-supervised relation extraction inevitably suffers from wrong labeling problems because it heuristically labels relational facts with knowledge bases. Previous sentence level denoise models don't achieve satisfying performances because they use hard labels which are determined by distant supervision and immutable during training. To this end, we introduce an entity-pair level denoise method which exploits semantic information from correctly labeled entity pairs to correct wrong labels dynamically during training. We propose a joint score function which combines the relational scores based on the entity-pair representation and the confidence of the hard label to obtain a new label, namely a soft label, for certain entity pair. During training, soft labels instead of hard labels serve as gold labels. Experiments on the benchmark dataset show that our method dramatically reduces noisy instances and outperforms other state-of-the-art systems.", "keyphrases": ["soft-label method", "relation extraction", "noisy instance"]} +{"id": "mei-etal-2016-talk", "title": "What to talk about and how? Selective Generation using LSTMs with Coarse-to-Fine Alignment", "abstract": "We propose an end-to-end, domain-independent neural encoder-aligner-decoder model for selective generation, i.e., the joint task of content selection and surface realization. Our model first encodes a full set of over-determined database event records via an LSTM-based recurrent neural network, then utilizes a novel coarse-to-fine aligner to identify the small subset of salient records to talk about, and finally employs a decoder to generate free-form descriptions of the aligned, selected records. Our model achieves the best selection and generation results reported to-date (with 59% relative improvement in generation) on the benchmark WeatherGov dataset, despite using no specialized features or linguistic resources. Using an improved k-nearest neighbor beam filter helps further. We also perform a series of ablations and visualizations to elucidate the contributions of our key model components. Lastly, we evaluate the generalizability of our model on the RoboCup dataset, and get results that are competitive with or better than the state-of-the-art, despite being severely data-starved.", "keyphrases": ["selective generation", "end-to-end", "encoder-aligner-decoder model", "recurrent neural network", "sentence planning"]} +{"id": "mirkin-etal-2009-source", "title": "Source-Language Entailment Modeling for Translating Unknown Terms", "abstract": "This paper addresses the task of handling unknown terms in SMT. We propose using source-language monolingual models and resources to paraphrase the source text prior to translation. We further present a conceptual extension to prior work by allowing translations of entailed texts rather than paraphrases only. A method for performing this process efficiently is presented and applied to some 2500 sentences with unknown terms. Our experiments show that the proposed approach substantially increases the number of properly translated texts.", "keyphrases": ["entailment", "unknown term", "paraphrase", "synonym"]} +{"id": "habernal-gurevych-2017-argumentation", "title": "Argumentation Mining in User-Generated Web Discourse", "abstract": "The goal of argumentation mining, an evolving research field in computational linguistics, is to design methods capable of analyzing people's argumentation. In this article, we go beyond the state of the art in several ways. (i) We deal with actual Web data and take up the challenges given by the variety of registers, multiple domains, and unrestricted noisy user-generated Web discourse. (ii) We bridge the gap between normative argumentation theories and argumentation phenomena encountered in actual data by adapting an argumentation model tested in an extensive annotation study. (iii) We create a new gold standard corpus (90k tokens in 340 documents) and experiment with several machine learning methods to identify argument components. We offer the data, source codes, and annotation guidelines to the community under free licenses. Our findings show that argumentation mining in user-generated Web discourse is a feasible but challenging task.", "keyphrases": ["user-generated web discourse", "annotation study", "argumentation mining", "premise", "forum post"]} +{"id": "smith-fellbaum-2004-medical", "title": "Medical WordNet: A New Methodology for the Construction and Validation of Information Resources for Consumer Health", "abstract": "A consumer health information system must be able to comprehend both expert and nonexpert medical vocabulary and to map between the two. We describe an ongoing project to create a new lexical database called Medical WordNet (MWN), consisting of medically relevant terms used by and intelligible to non-expert subjects and supplemented by a corpus of natural-language sentences that is designed to provide medically validated contexts for MWN terms. The corpus derives primarily from online health information sources targeted to consumers, and involves two sub-corpora, called Medical FactNet (MFN) and Medical BeliefNet (MBN), respectively. The former consists of statements accredited as true on the basis of a rigorous process of validation, the latter of statements which non-experts believe to be true. We summarize the MWN / MFN / MBN project, and describe some of its applications.", "keyphrases": ["validation", "mwn", "medical wordnet"]} +{"id": "gao-etal-2018-action", "title": "What Action Causes This? Towards Naive Physical Action-Effect Prediction", "abstract": "Despite recent advances in knowledge representation, automated reasoning, and machine learning, artificial agents still lack the ability to understand basic action-effect relations regarding the physical world, for example, the action of cutting a cucumber most likely leads to the state where the cucumber is broken apart into smaller pieces. If artificial agents (e.g., robots) ever become our partners in joint tasks, it is critical to empower them with such action-effect understanding so that they can reason about the state of the world and plan for actions. Towards this goal, this paper introduces a new task on naive physical action-effect prediction, which addresses the relations between concrete actions (expressed in the form of verb-noun pairs) and their effects on the state of the physical world as depicted by images. We collected a dataset for this task and developed an approach that harnesses web image data through distant supervision to facilitate learning for action-effect prediction. Our empirical results have shown that web data can be used to complement a small number of seed examples (e.g., three examples for each action) for model learning. This opens up possibilities for agents to learn physical action-effect relations for tasks at hand through communication with humans with a few examples.", "keyphrases": ["action", "world", "causal relation"]} +{"id": "zhang-weiss-2016-stack", "title": "Stack-propagation: Improved Representation Learning for Syntax", "abstract": "Traditional syntax models typically leverage part-of-speech (POS) information by constructing features from hand-tuned templates. We demonstrate that a better approach is to utilize POS tags as a regularizer of learned representations. We propose a simple method for learning a stacked pipeline of models which we call \u201cstack-propagation\u201d. We apply this to dependency parsing and tagging, where we use the hidden layer of the tagger network as a representation of the input tokens for the parser. At test time, our parser does not require predicted POS tags. On 19 languages from the Universal Dependencies, our method is 1.3% (absolute) more accurate than a state-of-the-art graph-based approach and 2.7% more accurate than the most comparable greedy model.", "keyphrases": ["pos tagging", "dependency parsing", "stack-propagation"]} +{"id": "aroyehun-gelbukh-2018-aggression", "title": "Aggression Detection in Social Media: Using Deep Neural Networks, Data Augmentation, and Pseudo Labeling", "abstract": "With the advent of the read-write web which facilitates social interactions in online spaces, the rise of anti-social behaviour in online spaces has attracted the attention of researchers. In this paper, we address the challenge of automatically identifying aggression in social media posts. Our team, saroyehun, participated in the English track of the Aggression Detection in Social Media Shared Task. On this task, we investigate the efficacy of deep neural network models of varying complexity. Our results reveal that deep neural network models require more data points to do better than an NBSVM linear baseline based on character n-grams. Our improved deep neural network models were trained on augmented data and pseudo labeled examples. Our LSTM classifier receives a weighted macro-F1 score of 0.6425 to rank first overall on the Facebook subtask of the shared task. On the social media sub-task, our CNN-LSTM model records a weighted macro-F1 score of 0.5920 to place third overall.", "keyphrases": ["data augmentation", "aggression detection", "recurrent neural network"]} +{"id": "huang-huang-2013-optimized", "title": "Optimized Event Storyline Generation based on Mixture-Event-Aspect Model", "abstract": "Recently, much research focuses on event storyline generation, which aims to produce a concise, global and temporal event summary from a collection of articles. Generally, each event contains multiple sub-events and the storyline should be composed by the component summaries of all the sub-events. However, different sub-events have different part-whole relationship with the major event, which is important to correspond to users\u2019 interests but seldom considered in previous work. To distinguish different types of sub-events, we propose a mixture-event-aspect model which models different sub-events into local and global aspects. Combining these local/global aspects with summarization requirements together, we utilize an optimization method to generate the component summaries along the timeline. Wedevelopexperimentalsystemson 6 distinctively different datasets. Evaluation and comparison results indicate the effectiveness of our proposed method.", "keyphrases": ["event storyline generation", "mixture-event-aspect model", "sub-event"]} +{"id": "komachi-etal-2006-phrase", "title": "Phrase reordering for statistical machine translation based on predicate-argument structure", "abstract": "In this paper, we describe a novel phrase reordering model based on predicate-argument structure. Our phrase reordering method utilizes a general predicate-argument structure analyzer to reorder source language chunks based on predicate-argument structure. We explicitly model longdistance phrase alignments by reordering arguments and predicates. The reordering approach is applied as a preprocessing step in training phase of a phrase-based statistical MT system. We report experimental results in the evaluation campaign of IWSLT 2006.", "keyphrases": ["predicate-argument structure", "input sentence", "heuristic rule"]} +{"id": "henderson-etal-2008-hybrid", "title": "Hybrid Reinforcement/Supervised Learning of Dialogue Policies from Fixed Data Sets", "abstract": "Abstract We propose a method for learning dialogue management policies from a fixed data set. The method addresses the challenges posed by Information State Update (ISU)-based dialogue systems, which represent the state of a dialogue as a large set of features, resulting in a very large state space and a huge policy space. To address the problem that any fixed data set will only provide information about small portions of these state and policy spaces, we propose a hybrid model that combines reinforcement learning with supervised learning. The reinforcement learning is used to optimize a measure of dialogue reward, while the supervised learning is used to restrict the learned policy to the portions of these spaces for which we have data. We also use linear function approximation to address the need to generalize from a fixed amount of data to large state spaces. To demonstrate the effectiveness of this method on this challenging task, we trained this model on the COMMUNICATOR corpus, to which we have added annotations for user actions and Information States. When tested with a user simulation trained on a different part of the same data set, our hybrid model outperforms a pure supervised learning model and a pure reinforcement learning model. It also outperforms the hand-crafted systems on the COMMUNICATOR data, according to automatic evaluation measures, improving over the average COMMUNICATOR system policy by 10%. The proposed method will improve techniques for bootstrapping and automatic optimization of dialogue management policies from limited initial data sets.", "keyphrases": ["supervised learning", "dialogue policy", "reinforcement learning approach"]} +{"id": "rohrbach-etal-2018-object", "title": "Object Hallucination in Image Captioning", "abstract": "Despite continuously improving performance, contemporary image captioning models are prone to \u201challucinating\u201d objects that are not actually in a scene. One problem is that standard metrics only measure similarity to ground truth captions and may not fully capture image relevance. In this work, we propose a new image relevance metric to evaluate current models with veridical visual labels and assess their rate of object hallucination. We analyze how captioning model architectures and learning objectives contribute to object hallucination, explore when hallucination is likely due to image misclassification or language priors, and assess how well current sentence metrics capture object hallucination. We investigate these questions on the standard image captioning benchmark, MSCOCO, using a diverse set of models. Our analysis yields several interesting findings, including that models which score best on standard sentence metrics do not always have lower hallucination and that models which hallucinate more tend to make errors driven by language priors.", "keyphrases": ["image captioning", "object hallucination", "text generation model"]} +{"id": "li-eisner-2019-specializing", "title": "Specializing Word Embeddings (for Parsing) by Information Bottleneck", "abstract": "Pre-trained word embeddings like ELMo and BERT contain rich syntactic and semantic information, resulting in state-of-the-art performance on various tasks. We propose a very fast variational information bottleneck (VIB) method to nonlinearly compress these embeddings, keeping only the information that helps a discriminative parser. We compress each word embedding to either a discrete tag or a continuous vector. In the discrete version, our automatically compressed tags form an alternative tag set: we show experimentally that our tags capture most of the information in traditional POS tag annotations, but our tag sequences can be parsed more accurately at the same level of tag granularity. In the continuous version, we show experimentally that moderately compressing the word embeddings by our method yields a more accurate parser in 8 of 9 languages, unlike simple dimensionality reduction.", "keyphrases": ["information bottleneck", "discriminative parser", "dimensionality reduction"]} +{"id": "hu-etal-2019-open", "title": "Open-Domain Targeted Sentiment Analysis via Span-Based Extraction and Classification", "abstract": "Open-domain targeted sentiment analysis aims to detect opinion targets along with their sentiment polarities from a sentence. Prior work typically formulates this task as a sequence tagging problem. However, such formulation suffers from problems such as huge search space and sentiment inconsistency. To address these problems, we propose a span-based extract-then-classify framework, where multiple opinion targets are directly extracted from the sentence under the supervision of target span boundaries, and corresponding polarities are then classified using their span representations. We further investigate three approaches under this framework, namely the pipeline, joint, and collapsed models. Experiments on three benchmark datasets show that our approach consistently outperforms the sequence tagging baseline. Moreover, we find that the pipeline model achieves the best performance compared with the other two models.", "keyphrases": ["sentiment analysis", "span representation", "aspect term"]} +{"id": "heilman-madnani-2013-ets", "title": "ETS: Domain Adaptation and Stacking for Short Answer Scoring", "abstract": "Automatic scoring of short text responses to educational assessment items is a challenging task, particularly because large amounts of labeled data (i.e., human-scored responses) may or may not be available due to the variety of possible questions and topics. As such, it seems desirable to integrate various approaches, making use of model answers from experts (e.g., to give higher scores to responses that are similar), prescored student responses (e.g., to learn direct associations between particular phrases and scores), etc. Here, we describe a system that uses stacking (Wolpert, 1992) and domain adaptation (Daume III, 2007) to achieve this aim, allowing us to integrate item-specific n-gram features and more general text similarity measures (Heilman and Madnani, 2012). We report encouraging results from the Joint Student Response Analysis and 8th Recognizing Textual Entailment Challenge.", "keyphrases": ["domain adaptation", "short answer scoring", "n-gram feature"]} +{"id": "roth-yih-2004-linear", "title": "A Linear Programming Formulation for Global Inference in Natural Language Tasks", "abstract": "Abstract : Given a collection of discrete random variables representing outcomes of learned local predictors in natural language. e.g.. named entities and relations. we seek an optimal global assignment to the variables in the presence of general (non-sequential) constraints. Examples of these constraints include the type of arguments a relation can take, and the mutual activity of different relations. etc. We develop a linear programing formulation for this problem and evaluate it in the context of simultaneously learning named entities and relations. Our approach allows us to efficiently incorporate domain and task specific constraints at decision time, resulting in significant improvements in the accuracy and the \"human-like\" quality of the inferences.", "keyphrases": ["global inference", "integer linear programming", "relation extraction", "mention", "nlp community"]} +{"id": "zhang-etal-2015-fixed", "title": "The Fixed-Size Ordinally-Forgetting Encoding Method for Neural Network Language Models", "abstract": "In this paper, we propose the new fixedsize ordinally-forgetting encoding (FOFE) method, which can almost uniquely encode any variable-length sequence of words into a fixed-size representation. FOFE can model the word order in a sequence using a simple ordinally-forgetting mechanism according to the positions of words. In this work, we have applied FOFE to feedforward neural network language models (FNN-LMs). Experimental results have shown that without using any recurrent feedbacks, FOFE based FNNLMs can significantly outperform not only the standard fixed-input FNN-LMs but also the popular recurrent neural network (RNN) LMs.", "keyphrases": ["fixed-size", "ordinally-forgetting encoding", "encoding method"]} +{"id": "hasegawa-etal-2004-discovering", "title": "Discovering Relations among Named Entities from Large Corpora", "abstract": "Discovering the significant relations embedded in documents would be very useful not only for information retrieval but also for question answering and summarization. Prior methods for relation discovery, however, needed large annotated corpora which cost a great deal of time and effort. We propose an unsupervised method for relation discovery from large corpora. The key idea is clustering pairs of named entities according to the similarity of context words intervening between the named entities. Our experiments using one year of newspapers reveals not only that the relations among named entities could be detected with high recall and precision, but also that appropriate labels could be automatically provided for the relations.", "keyphrases": ["large corpora", "relation extraction", "clustering method", "relationship type", "difficulty"]} +{"id": "zhang-etal-2006-synchronous", "title": "Synchronous Binarization for Machine Translation", "abstract": "Systems based on synchronous grammars and tree transducers promise to improve the quality of statistical machine translation output, but are often very computationally intensive. The complexity is exponential in the size of individual grammar rules due to arbitrary re-orderings between the two languages, and rules extracted from parallel corpora can be quite large. We devise a linear-time algorithm for factoring syntactic re-orderings by binarizing synchronous rules when possible and show that the resulting rule set significantly improves the speed and accuracy of a state-of-the-art syntax-based machine translation system.", "keyphrases": ["binarization", "machine translation", "synchronous grammar", "scfg", "span"]} +{"id": "kim-hovy-2006-extracting", "title": "Extracting Opinions, Opinion Holders, and Topics Expressed in Online News Media Text", "abstract": "This paper presents a method for identifying an opinion with its holder and topic, given a sentence from online news media texts. We introduce an approach of exploiting the semantic structure of a sentence, anchored to an opinion bearing verb or adjective. This method uses semantic role labeling as an intermediate step to label an opinion holder and topic using data from FrameNet. We decompose our task into three phases: identifying an opinion-bearing word, labeling semantic roles related to the word in the sentence, and then finding the holder and the topic of the opinion word among the labeled semantic roles. For a broader coverage, we also employ a clustering technique to predict the most probable frame for a word which is not defined in FrameNet. Our experimental results show that our system performs significantly better than the baseline.", "keyphrases": ["opinion holder", "role labeling", "opinion word", "subjectivity analysis system", "well-trained srl model"]} +{"id": "yang-etal-2011-corpus", "title": "Corpus-Guided Sentence Generation of Natural Images", "abstract": "We propose a sentence generation strategy that describes images by predicting the most likely nouns, verbs, scenes and prepositions that make up the core sentence structure. The input are initial noisy estimates of the objects and scenes detected in the image using state of the art trained detectors. As predicting actions from still images directly is unreliable, we use a language model trained from the English Gigaword corpus to obtain their estimates; together with probabilities of co-located nouns, scenes and prepositions. We use these estimates as parameters on a HMM that models the sentence generation process, with hidden nodes as sentence components and image detections as the emissions. Experimental results show that our strategy of combining vision and language produces readable and descriptive sentences compared to naive strategies that use vision alone.", "keyphrases": ["sentence generation", "image", "preposition", "caption"]} +{"id": "pinter-etal-2017-mimicking", "title": "Mimicking Word Embeddings using Subword RNNs", "abstract": "Word embeddings improve generalization over lexical features by placing each word in a lower-dimensional space, using distributional information obtained from unlabeled data. However, the effectiveness of word embeddings for downstream NLP tasks is limited by out-of-vocabulary (OOV) words, for which embeddings do not exist. In this paper, we present MIMICK, an approach to generating OOV word embeddings compositionally, by learning a function from spellings to distributional embeddings. Unlike prior work, MIMICK does not require re-training on the original word embedding corpus; instead, learning is performed at the type level. Intrinsic and extrinsic evaluations demonstrate the power of this simple approach. On 23 languages, MIMICK improves performance over a word-based baseline for tagging part-of-speech and morphosyntactic attributes. It is competitive with (and complementary to) a supervised character-based model in low resource settings.", "keyphrases": ["word embedding", "spelling", "mimick"]} +{"id": "mcclosky-etal-2006-reranking", "title": "Reranking and Self-Training for Parser Adaptation", "abstract": "Statistical parsers trained and tested on the Penn Wall Street Journal (WSJ) treebank have shown vast improvements over the last 10 years. Much of this improvement, however, is based upon an ever-increasing number of features to be trained on (typically) the WSJ treebank data. This has led to concern that such parsers may be too finely tuned to this corpus at the expense of portability to other genres. Such worries have merit. The standard \"Charniak parser\" checks in at a labeled precision-recall f-measure of 89.7% on the Penn WSJ test set, but only 82.9% on the test set from the Brown treebank corpus.This paper should allay these fears. In particular, we show that the reranking parser described in Charniak and Johnson (2005) improves performance of the parser on Brown to 85.2%. Furthermore, use of the self-training techniques described in (McClosky et al., 2006) raise this to 87.8% (an error reduction of 28%) again without any use of labeled Brown data. This is remarkable since training the parser and reranker on labeled Brown data achieves only 88.4%.", "keyphrases": ["self-training", "wsj", "reranking", "target domain", "parser degrade"]} +{"id": "kawahara-kurohashi-2008-coordination", "title": "Coordination Disambiguation without Any Similarities", "abstract": "The use of similarities has been one of the main approaches to resolve the ambiguities of coordinate structures. In this paper, we present an alternative method for coordination disambiguation, which does not use similarities. Our hypothesis is that coordinate structures are supported by surrounding dependency relations, and that such dependency relations rather yield similarity between conjuncts, which humans feel. Based on this hypothesis, we built a Japanese fully-lexicalized generative parser that includes coordination disambiguation. Experimental results on web sentences indicated the effectiveness of our approach, and endorsed our hypothesis.", "keyphrases": ["coordinate structure", "dependency relation", "conjunct", "generative parser", "coordination disambiguation"]} +{"id": "xu-etal-2015-problems", "title": "Problems in Current Text Simplification Research: New Data Can Help", "abstract": "Simple Wikipedia has dominated simplification research in the past 5 years. In this opinion paper, we argue that focusing on Wikipedia limits simplification research. We back up our arguments with corpus analysis and by highlighting statements that other researchers have made in the simplification literature. We introduce a new simplification dataset that is a significant improvement over Simple Wikipedia, and present a novel quantitative-comparative approach to study the quality of simplification data resources.", "keyphrases": ["simplification", "newsela", "parallel corpus", "complexity level", "simple english wikipedia"]} +{"id": "yang-etal-2018-distantly", "title": "Distantly Supervised NER with Partial Annotation Learning and Reinforcement Learning", "abstract": "A bottleneck problem with Chinese named entity recognition (NER) in new domains is the lack of annotated data. One solution is to utilize the method of distant supervision, which has been widely used in relation extraction, to automatically populate annotated training data without humancost. The distant supervision assumption here is that if a string in text is included in a predefined dictionary of entities, the string might be an entity. However, this kind of auto-generated data suffers from two main problems: incomplete and noisy annotations, which affect the performance of NER models. In this paper, we propose a novel approach which can partially solve the above problems of distant supervision for NER. In our approach, to handle the incomplete problem, we apply partial annotation learning to reduce the effect of unknown labels of characters. As for noisy annotation, we design an instance selector based on reinforcement learning to distinguish positive sentences from auto-generated annotations. In experiments, we create two datasets for Chinese named entity recognition in two domains with the help of distant supervision. The experimental results show that the proposed approach obtains better performance than the comparison systems on both two datasets.", "keyphrases": ["reinforcement learning", "annotated data", "distant supervision", "ner model", "instance selector"]} +{"id": "gkatzia-etal-2013-generating", "title": "Generating Student Feedback from Time-Series Data Using Reinforcement Learning", "abstract": "We describe a statistical Natural Language Generation (NLG) method for summarisation of time-series data in the context of feedback generation for students. In this paper, we initially present a method for collecting time-series data from students (e.g. marks, lectures attended) and use example feedback from lecturers in a datadriven approach to content selection. We show a novel way of constructing a reward function for our Reinforcement Learning agent that is informed by the lecturers\u2019 method of providing feedback. We evaluate our system with undergraduate students by comparing it to three baseline systems: a rule-based system, lecturerconstructed summaries and a Brute Force system. Our evaluation shows that the feedback generated by our learning agent is viewed by students to be as good as the feedback from the lecturers. Our findings suggest that the learning agent needs to take into account both the student and lecturers\u2019 preferences.", "keyphrases": ["student", "feedback", "time-series data", "reinforcement learning"]} +{"id": "wang-etal-2016-relation", "title": "Relation Classification via Multi-Level Attention CNNs", "abstract": "Relation classification is a crucial ingredient \nin numerous information extraction systems \nseeking to mine structured facts from \ntext. We propose a novel convolutional \nneural network architecture for this task, \nrelying on two levels of attention in order \nto better discern patterns in heterogeneous \ncontexts. This architecture enables endto-end \nlearning from task-specific labeled \ndata, forgoing the need for external knowledge \nsuch as explicit dependency structures. \nExperiments show that our model outperforms \nprevious state-of-the-art methods, including \nthose relying on much richer forms \nof prior knowledge.", "keyphrases": ["multi-level attention cnn", "relation classification", "rnn", "deep learning model", "lexicon-level feature"]} +{"id": "zhong-etal-2020-logicalfactchecker", "title": "LogicalFactChecker: Leveraging Logical Operations for Fact Checking with Graph Module Network", "abstract": "Verifying the correctness of a textual statement requires not only semantic reasoning about the meaning of words, but also symbolic reasoning about logical operations like count, superlative, aggregation, etc. In this work, we propose LogicalFactChecker, a neural network approach capable of leveraging logical operations for fact checking. It achieves the state-of-the-art performance on TABFACT, a large-scale, benchmark dataset built for verifying a textual statement with semi-structured tables. This is achieved by a graph module network built upon the Transformer-based architecture. With a textual statement and a table as the input, LogicalFactChecker automatically derives a program (a.k.a. logical form) of the statement in a semantic parsing manner. A heterogeneous graph is then constructed to capture not only the structures of the table and the program, but also the connections between inputs with different modalities. Such a graph reveals the related contexts of each word in the statement, the table and the program. The graph is used to obtain graph-enhanced contextual representations of words in Transformer-based architecture. After that, a program-driven module network is further introduced to exploit the hierarchical structure of the program, where semantic compositionality is dynamically modeled along the program structure with a set of function-specific modules. Ablation experiments suggest that both the heterogeneous graph and the module network are important to obtain strong results.", "keyphrases": ["graph module network", "symbolic reasoning", "graph-enhanced contextual representation", "logicalfactchecker"]} +{"id": "lalor-etal-2016-building", "title": "Building an Evaluation Scale using Item Response Theory", "abstract": "Evaluation of NLP methods requires testing against a previously vetted gold-standard test set and reporting standard metrics (accuracy/precision/recall/F1). The current assumption is that all items in a given test set are equal with regards to difficulty and discriminating power. We propose Item Response Theory (IRT) from psychometrics as an alternative means for gold-standard test-set generation and NLP system evaluation. IRT is able to describe characteristics of individual items - their difficulty and discriminating power - and can account for these characteristics in its estimation of human intelligence or ability for an NLP task. In this paper, we demonstrate IRT by generating a gold-standard test set for Recognizing Textual Entailment. By collecting a large number of human responses and fitting our IRT model, we show that our IRT model compares NLP systems with the performance in a human population and is able to provide more insight into system performance than standard evaluation metrics. We show that a high accuracy score does not always imply a high IRT score, which depends on the item characteristics and the response pattern.", "keyphrases": ["item response theory", "difficulty", "human response", "model performance"]} +{"id": "gamback-sikdar-2017-using", "title": "Using Convolutional Neural Networks to Classify Hate-Speech", "abstract": "The paper introduces a deep learning-based Twitter hate-speech text classification system. The classifier assigns each tweet to one of four predefined categories: racism, sexism, both (racism and sexism) and non-hate-speech. Four Convolutional Neural Network models were trained on resp. character 4-grams, word vectors based on semantic information built using word2vec, randomly generated word vectors, and word vectors combined with character n-grams. The feature set was down-sized in the networks by max-pooling, and a softmax function used to classify tweets. Tested by 10-fold cross-validation, the model based on word2vec embeddings performed best, with higher precision than recall, and a 78.3% F-score.", "keyphrases": ["convolutional neural networks", "hate-speech", "offensive language detection"]} +{"id": "simard-etal-2005-translating", "title": "Translating with Non-contiguous Phrases", "abstract": "This paper presents a phrase-based statistical machine translation method, based on non-contiguous phrases, i.e. phrases with gaps. A method for producing such phrases from a word-aligned corpora is proposed. A statistical translation model is also presented that deals such phrases, as well as a training method based on the maximization of translation accuracy, as measured with the NIST evaluation metric. Translations are produced by means of a beam-search decoder. Experimental results are presented, that demonstrate how the proposed method allows to better generalize from the training data.", "keyphrases": ["non-contiguous phrase", "gap", "translation model", "additional linguistic phenomenon", "matrax"]} +{"id": "velldal-etal-2018-norec", "title": "NoReC: The Norwegian Review Corpus", "abstract": "This paper presents the Norwegian Review Corpus (NoReC), created for training and evaluating models for document-level sentiment analysis. The full-text reviews have been collected from major Norwegian news sources and cover a range of different domains, including literature, movies, video games, restaurants, music and theater, in addition to product reviews across a range of categories. Each review is labeled with a manually assigned score of 1-6, as provided by the rating of the original author. This first release of the corpus comprises more than 35,000 reviews. It is distributed using the CoNLL-U format, pre-processed using UDPipe, along with a rich set of metadata. The work reported in this paper forms part of the SANT initiative (Sentiment Analysis for Norwegian Text), a project seeking to provide resources and tools for sentiment analysis and opinion mining for Norwegian. As resources for sentiment analysis have so far been unavailable for Norwegian, NoReC represents a highly valuable and sought-after addition to Norwegian language technology.", "keyphrases": ["norwegian review corpus", "full-text review", "news source", "norec"]} +{"id": "lai-etal-2019-gated", "title": "A Gated Self-attention Memory Network for Answer Selection", "abstract": "Answer selection is an important research problem, with applications in many areas. Previous deep learning based approaches for the task mainly adopt the Compare-Aggregate architecture that performs word-level comparison followed by aggregation. In this work, we take a departure from the popular Compare-Aggregate architecture, and instead, propose a new gated self-attention memory network for the task. Combined with a simple transfer learning technique from a large-scale online corpus, our model outperforms previous methods by a large margin, achieving new state-of-the-art results on two standard answer selection datasets: TrecQA and WikiQA.", "keyphrases": ["self-attention memory network", "answer selection", "many nlp task"]} +{"id": "su-etal-2020-deepmet", "title": "DeepMet: A Reading Comprehension Paradigm for Token-level Metaphor Detection", "abstract": "Machine metaphor understanding is one of the major topics in NLP. Most of the recent attempts consider it as classification or sequence tagging task. However, few types of research introduce the rich linguistic information into the field of computational metaphor by leveraging powerful pre-training language models. We focus a novel reading comprehension paradigm for solving the token-level metaphor detection task which provides an innovative type of solution for this task. We propose an end-to-end deep metaphor detection model named DeepMet based on this paradigm. The proposed approach encodes the global text context (whole sentence), local text context (sentence fragments), and question (query word) information as well as incorporating two types of part-of-speech (POS) features by making use of the advanced pre-training language model. The experimental results by using several metaphor datasets show that our model achieves competitive results in the second shared task on metaphor detection.", "keyphrases": ["reading comprehension paradigm", "metaphor detection", "language model", "part-of-speech"]} +{"id": "rinott-etal-2015-show", "title": "Show Me Your Evidence - an Automatic Method for Context Dependent Evidence Detection", "abstract": "Engaging in a debate with oneself or others to take decisions is an integral part of our day-today life. A debate on a topic (say, use of performance enhancing drugs) typically proceeds by one party making an assertion/claim (say, PEDs are bad for health) and then providing an evidence to support the claim (say, a 2006 study shows that PEDs have psychiatric side effects). In this work, we propose the task of automatically detecting such evidences from unstructured text that support a given claim. This task has many practical applications in decision support and persuasion enhancement in a wide range of domains. We first introduce an extensive benchmark data set tailored for this task, which allows training statistical models and assessing their performance. Then, we suggest a system architecture based on supervised learning to address the evidence detection task. Finally, promising experimental results are reported.", "keyphrases": ["evidence detection", "claim", "wikipedia article", "lexical feature", "technology"]} +{"id": "jovanoski-etal-2015-sentiment", "title": "Sentiment Analysis in Twitter for Macedonian", "abstract": "We present work on sentiment analysis in Twitter for Macedonian. As this is pioneering work for this combination of language and genre, we created suitable resources for training and evaluating a system for sentiment analysis of Macedonian tweets. In particular, we developed a corpus of tweets annotated with tweet-level sentiment polarity (positive, negative, and neutral), as well as with phrase-level sentiment, which we made freely available for research purposes. We further bootstrapped several large-scale sentiment lexicons for Macedonian, motivated by previous work for English. The impact of several different pre-processing steps as well as of various features is shown in experiments that represent the first attempt to build a system for sentiment analysis in Twitter for the morphologically rich Macedonian language. Overall, our experimental results show an F1-score of 92.16, which is very strong and is on par with the best results for English, which were achieved in recent SemEval competitions.", "keyphrases": ["twitter", "macedonian tweet", "sentiment analysis"]} +{"id": "zhang-etal-2020-improving", "title": "Improving Massively Multilingual Neural Machine Translation and Zero-Shot Translation", "abstract": "Massively multilingual models for neural machine translation (NMT) are theoretically attractive, but often underperform bilingual models and deliver poor zero-shot translations. In this paper, we explore ways to improve them. We argue that multilingual NMT requires stronger modeling capacity to support language pairs with varying typological characteristics, and overcome this bottleneck via language-specific components and deepening NMT architectures. We identify the off-target translation issue (i.e. translating into a wrong target language) as the major source of the inferior zero-shot performance, and propose random online backtranslation to enforce the translation of unseen training language pairs. Experiments on OPUS-100 (a novel multilingual dataset with 100 languages) show that our approach substantially narrows the performance gap with bilingual models in both one-to-many and many-to-many settings, and improves zero-shot performance by ~10 BLEU, approaching conventional pivot-based methods.", "keyphrases": ["neural machine translation", "zero-shot translation", "multilingual nmt", "training language pair", "language-aware layer normalization"]} +{"id": "calixto-etal-2019-latent", "title": "Latent Variable Model for Multi-modal Translation", "abstract": "In this work, we propose to model the interaction between visual and textual features for multi-modal neural machine translation (MMT) through a latent variable model. This latent variable can be seen as a multi-modal stochastic embedding of an image and its description in a foreign language. It is used in a target-language decoder and also to predict image features. Importantly, our model formulation utilises visual and textual inputs during training but does not require that images be available at test time. We show that our latent variable MMT formulation improves considerably over strong baselines, including a multi-task learning approach (Elliott and Kadar, 2017) and a conditional variational auto-encoder approach (Toyama et al., 2016). Finally, we show improvements due to (i) predicting image features in addition to only conditioning on them, (ii) imposing a constraint on the KL term to promote models with non-negligible mutual information between inputs and latent variable, and (iii) by training on additional target-language image descriptions (i.e. synthetic data).", "keyphrases": ["textual feature", "image", "latent variable model"]} +{"id": "rudinger-etal-2015-script", "title": "Script Induction as Language Modeling", "abstract": "The narrative cloze is an evaluation metric commonly used for work on automatic script induction. While prior work in this area has focused on count-based methods from distributional semantics, such as pointwise mutual information, we argue that the narrative cloze can be productively reframed as a language modeling task. By training a discriminative language model for this task, we attain improvements of up to 27 percent over prior methods on standard narrative cloze metrics.", "keyphrases": ["language modeling", "script induction", "event prediction", "cloze task", "other system"]} +{"id": "ghosh-veale-2017-magnets", "title": "Magnets for Sarcasm: Making Sarcasm Detection Timely, Contextual and Very Personal", "abstract": "Sarcasm is a pervasive phenomenon in social media, permitting the concise communication of meaning, affect and attitude. Concision requires wit to produce and wit to understand, which demands from each party knowledge of norms, context and a speaker's mindset. Insight into a speaker's psychological profile at the time of production is a valuable source of context for sarcasm detection. Using a neural architecture, we show significant gains in detection accuracy when knowledge of the speaker's mood at the time of production can be inferred. Our focus is on sarcasm detection on Twitter, and show that the mood exhibited by a speaker over tweets leading up to a new post is as useful a cue for sarcasm as the topical context of the post itself. The work opens the door to an empirical exploration not just of sarcasm in text but of the sarcastic state of mind.", "keyphrases": ["sarcasm", "mood", "twitter"]} +{"id": "jurgens-etal-2018-measuring", "title": "Measuring the Evolution of a Scientific Field through Citation Frames", "abstract": "Citations have long been used to characterize the state of a scientific field and to identify influential works. However, writers use citations for different purposes, and this varied purpose influences uptake by future scholars. Unfortunately, our understanding of how scholars use and frame citations has been limited to small-scale manual citation analysis of individual papers. We perform the largest behavioral study of citations to date, analyzing how scientific works frame their contributions through different types of citations and how this framing affects the field as a whole. We introduce a new dataset of nearly 2,000 citations annotated for their function, and use it to develop a state-of-the-art classifier and label the papers of an entire field: Natural Language Processing. We then show how differences in framing affect scientific uptake and reveal the evolution of the publication venues and the field as a whole. We demonstrate that authors are sensitive to discourse structure and publication venue when citing, and that how a paper frames its work through citations is predictive of the citation count it will receive. Finally, we use changes in citation framing to show that the field of NLP is undergoing a significant increase in consensus.", "keyphrases": ["evolution", "scientific field", "framing", "citation context"]} +{"id": "koehn-knight-2003-feature", "title": "Feature-Rich Statistical Translation of Noun Phrases", "abstract": "We define noun phrase translation as a subtask of machine translation. This enables us to build a dedicated noun phrase translation subsystem that improves over the currently best general statistical machine translation methods by incorporating special modeling and special features. We achieved 65.5% translation accuracy in a German-English translation task vs. 53.2% with IBM Model 4.", "keyphrases": ["noun phrase", "overall translation performance", "isolated translation"]} +{"id": "shi-etal-2016-string", "title": "Does String-Based Neural MT Learn Source Syntax?", "abstract": "We investigate whether a neural, encoder-decoder translation system learns syntactic information on the source side as a by-product of training. We propose two methods to detect whether the encoder has learned local and global source syntax. A \ufb01ne-grained analysis of the syntactic structure learned by the encoder reveals which kinds of syntax are learned and which are missing.", "keyphrases": ["syntactic information", "source side", "nmt encoder", "part-of-speech", "sentence vector"]} +{"id": "wu-etal-2020-attentive", "title": "Attentive Pooling with Learnable Norms for Text Representation", "abstract": "Pooling is an important technique for learning text representations in many neural NLP models. In conventional pooling methods such as average, max and attentive pooling, text representations are weighted summations of the L1 or L norm of input features. However, their pooling norms are always fixed and may not be optimal for learning accurate text representations in different tasks. In addition, in many popular pooling methods such as max and attentive pooling some features may be over-emphasized, while other useful ones are not fully exploited. In this paper, we propose an Attentive Pooling with Learnable Norms (APLN) approach for text representation. Different from existing pooling methods that use a fixed pooling norm, we propose to learn the norm in an end-to-end manner to automatically find the optimal ones for text representation in different tasks. In addition, we propose two methods to ensure the numerical stability of the model training. The first one is scale limiting, which re-scales the input to ensure non-negativity and alleviate the risk of exponential explosion. The second one is re-formulation, which decomposes the exponent operation to avoid computing the real-valued powers of the input and further accelerate the pooling operation. Experimental results on four benchmark datasets show that our approach can effectively improve the performance of attentive pooling.", "keyphrases": ["learnable norms", "text representation", "attentive pooling"]} +{"id": "ardila-etal-2020-common", "title": "Common Voice: A Massively-Multilingual Speech Corpus", "abstract": "The Common Voice corpus is a massively-multilingual collection of transcribed speech intended for speech technology research and development. Common Voice is designed for Automatic Speech Recognition purposes but can be useful in other domains (e.g. language identification). To achieve scale and sustainability, the Common Voice project employs crowdsourcing for both data collection and data validation. The most recent release includes 29 languages, and as of November 2019 there are a total of 38 languages collecting data. Over 50,000 individuals have participated so far, resulting in 2,500 hours of collected audio. To our knowledge this is the largest audio corpus in the public domain for speech recognition, both in terms of number of hours and number of languages. As an example use case for Common Voice, we present speech recognition experiments using Mozilla's DeepSpeech Speech-to-Text toolkit. By applying transfer learning from a source English model, we find an average Character Error Rate improvement of 5.99 5.48 for twelve target languages (German, French, Italian, Turkish, Catalan, Slovenian, Welsh, Irish, Breton, Tatar, Chuvash, and Kabyle). For most of these languages, these are the first ever published results on end-to-end Automatic Speech Recognition.", "keyphrases": ["speech recognition", "project", "data collection", "mozilla", "common voice"]} +{"id": "yu-etal-2019-sparc", "title": "SParC: Cross-Domain Semantic Parsing in Context", "abstract": "We present SParC, a dataset for cross-domainSemanticParsing inContext that consists of 4,298 coherent question sequences (12k+ individual questions annotated with SQL queries). It is obtained from controlled user interactions with 200 complex databases over 138 domains. We provide an in-depth analysis of SParC and show that it introduces new challenges compared to existing datasets. SParC demonstrates complex contextual dependencies, (2) has greater semantic diversity, and (3) requires generalization to unseen domains due to its cross-domain nature and the unseen databases at test time. We experiment with two state-of-the-art text-to-SQL models adapted to the context-dependent, cross-domain setup. The best model obtains an exact match accuracy of 20.2% over all questions and less than10% over all interaction sequences, indicating that the cross-domain setting and the con-textual phenomena of the dataset present significant challenges for future research. The dataset, baselines, and leaderboard are released at .", "keyphrases": ["semantic parsing", "coherent question sequence", "database", "sparc"]} +{"id": "kozareva-hovy-2010-semi", "title": "A Semi-Supervised Method to Learn and Construct Taxonomies Using the Web", "abstract": "Although many algorithms have been developed to harvest lexical resources, few organize the mined terms into taxonomies. We propose (1) a semi-supervised algorithm that uses a root concept, a basic level concept, and recursive surface patterns to learn automatically from the Web hyponym-hypernym pairs subordinated to the root; (2) a Web based concept positioning procedure to validate the learned pairs' is-a relations; and (3) a graph algorithm that derives from scratch the integrated taxonomy structure of all the terms. Comparing results with WordNet, we find that the algorithm misses some concepts and links, but also that it discovers many additional ones lacking in WordNet. We evaluate the taxonomization power of our method on reconstructing parts of the WordNet taxonomy. Experiments show that starting from scratch, the algorithm can reconstruct 62% of the WordNet taxonomy for the regions tested.", "keyphrases": ["web", "root", "long path", "lexico-syntactic pattern", "dap"]} +{"id": "ataman-federico-2018-compositional", "title": "Compositional Representation of Morphologically-Rich Input for Neural Machine Translation", "abstract": "Neural machine translation (NMT) models are typically trained with fixed-size input and output vocabularies, which creates an important bottleneck on their accuracy and generalization capability. As a solution, various studies proposed segmenting words into sub-word units and performing translation at the sub-lexical level. However, statistical word segmentation methods have recently shown to be prone to morphological errors, which can lead to inaccurate translations. In this paper, we propose to overcome this problem by replacing the source-language embedding layer of NMT with a bi-directional recurrent neural network that generates compositional representations of the input at any desired level of granularity. We test our approach in a low-resource setting with five languages from different morphological typologies, and under different composition assumptions. By training NMT to compose word representations from character n-grams, our approach consistently outperforms (from 1.71 to 2.48 BLEU points) NMT learning embeddings of statistically generated sub-word units.", "keyphrases": ["neural machine translation", "compositional representation", "morphologically-rich language"]} +{"id": "cook-stevenson-2010-automatically", "title": "Automatically Identifying Changes in the Semantic Orientation of Words", "abstract": "The meanings of words are not fixed but in fact undergo change, with new word senses arising and established senses taking on new aspects of meaning or falling out of usage. Two types of semantic change are amelioration and pejoration; in these processes a word sense changes to become more positive or negative, respectively. In this first computational study of amelioration and pejoration we adapt a web-based method for determining semantic orientation to the task of identifying ameliorations and pejorations in corpora from differing time periods. We evaluate our proposed method on a small dataset of known historical ameliorations and pejorations, and find it to perform better than a random baseline. Since this test dataset is small, we conduct a further evaluation on artificial examples of amelioration and pejoration, and again find evidence that our proposed method is able to identify changes in semantic orientation. Finally, we conduct a preliminary evaluation in which we apply our methods to the task of finding words which have recently undergone amelioration or pejoration.", "keyphrases": ["semantic orientation", "pejoration", "time period"]} +{"id": "he-wang-2008-chinese", "title": "Chinese Named Entity Recognition and Word Segmentation Based on Character", "abstract": "Chinese word segmentation and named entity recognition (NER) are both important tasks in Chinese information processing. This paper presents a character-based Conditional Random Fields (CRFs) model for such two tasks. In The SIGHAN Bakeoff 2007, this model participated in all closed tracks for both Chinese NER and word segmentation tasks, and turns out to perform well. Our system ranks 2nd in the closed track on NER of MSRA, and 4th in the closed track on word segmentation of SXU.", "keyphrases": ["word segmentation", "chinese ner", "character-based approach"]} +{"id": "billami-etal-2018-resyf", "title": "ReSyf: a French lexicon with ranked synonyms", "abstract": "In this article, we present ReSyf, a lexical resource of monolingual synonyms ranked according to their difficulty to be read and understood by native learners of French. The synonyms come from an existing lexical network and they have been semantically disambiguated and refined. A ranking algorithm, based on a wide range of linguistic features and validated through an evaluation campaign with human annotators, automatically sorts the synonyms corresponding to a given word sense by reading difficulty. ReSyf is freely available and will be integrated into a web platform for reading assistance. It can also be applied to perform lexical simplification of French texts.", "keyphrases": ["synonym", "difficulty", "resyf"]} +{"id": "gao-etal-2019-dialog", "title": "Dialog State Tracking: A Neural Reading Comprehension Approach", "abstract": "Dialog state tracking is used to estimate the current belief state of a dialog given all the preceding conversation. Machine reading comprehension, on the other hand, focuses on building systems that read passages of text and answer questions that require some understanding of passages. We formulate dialog state tracking as a reading comprehension task to answer the question what is the state of the current dialog? after reading conversational context. In contrast to traditional state tracking methods where the dialog state is often predicted as a distribution over a closed set of all the possible slot values within an ontology, our method uses a simple attention-based neural network to point to the slot values within the conversation. Experiments on MultiWOZ-2.0 cross-domain dialog dataset show that our simple system can obtain similar accuracies compared to the previous more complex methods. By exploiting recent advances in contextual word embeddings, adding a model that explicitly tracks whether a slot value should be carried over to the next turn, and combining our method with a traditional joint state tracking method that relies on closed set vocabulary, we can obtain a joint-goal accuracy of 47.33% on the standard test split, exceeding current state-of-the-art by 11.75%**.", "keyphrases": ["reading comprehension task", "dialog state tracking", "dialogue context"]} +{"id": "dong-etal-2017-learning-generate", "title": "Learning to Generate Product Reviews from Attributes", "abstract": "Automatically generating product reviews is a meaningful, yet not well-studied task in sentiment analysis. Traditional natural language generation methods rely extensively on hand-crafted rules and predefined templates. This paper presents an attention-enhanced attribute-to-sequence model to generate product reviews for given attribute information, such as user, product, and rating. The attribute encoder learns to represent input attributes as vectors. Then, the sequence decoder generates reviews by conditioning its output on these vectors. We also introduce an attention mechanism to jointly generate reviews and align words with input attributes. The proposed model is trained end-to-end to maximize the likelihood of target product reviews given the attributes. We build a publicly available dataset for the review generation task by leveraging the Amazon book reviews and their metadata. Experiments on the dataset show that our approach outperforms baseline methods and the attention mechanism significantly improves the performance of our model.", "keyphrases": ["review", "attribute", "review generation task"]} +{"id": "guu-etal-2018-generating", "title": "Generating Sentences by Editing Prototypes", "abstract": "We propose a new generative language model for sentences that first samples a prototype sentence from the training corpus and then edits it into a new sentence. Compared to traditional language models that generate from scratch either left-to-right or by first sampling a latent sentence vector, our prototype-then-edit model improves perplexity on language modeling and generates higher quality outputs according to human evaluation. Furthermore, the model gives rise to a latent edit vector that captures interpretable semantics such as sentence similarity and sentence-level analogies.", "keyphrases": ["prototype", "unconditional text generation", "neural editor model", "generative model", "human-written sentence"]} +{"id": "bel-etal-2007-automatic", "title": "Automatic Acquisition of Grammatical Types for Nouns", "abstract": "The work we present here is concerned with the acquisition of deep grammatical information for nouns in Spanish. The aim is to build a learner that can handle noise, but, more interestingly, that is able to overcome the problem of sparse data, especially important in the case of nouns. We have based our work on two main points. Firstly, we have used distributional evidences as features. Secondly, we made the learner deal with all occurrences of a word as a single complex unit. The obtained results show that grammatical features of nouns is a level of generalization that can be successfully approached with a Decision Tree learner.", "keyphrases": ["acquisition", "noun", "decision tree learner"]} +{"id": "del-corro-etal-2015-finet", "title": "FINET: Context-Aware Fine-Grained Named Entity Typing", "abstract": "We propose FINET, a system for detecting the types of named entities in short inputs\u2014such as sentences or tweets\u2014with respect to WordNet\u2019s super fine-grained type system. FINET generates candidate types using a sequence of multiple extractors, ranging from explicitly mentioned types to implicit types, and subsequently selects the most appropriate using ideas from word-sense disambiguation. FINET combats data scarcity and noise from existing systems: It does not rely on supervision in its extractors and generates training data for type selection from WordNet and other resources. FINET supports the most fine-grained type system so far, including types with no annotated training data. Our experiments indicate that FINET outperforms state-of-the-art methods in terms of recall, precision, and granularity of extracted types.", "keyphrases": ["entity typing", "type system", "finet"]} +{"id": "caselli-vossen-2017-event", "title": "The Event StoryLine Corpus: A New Benchmark for Causal and Temporal Relation Extraction", "abstract": "This paper reports on the Event StoryLine Corpus (ESC) v1.0, a new benchmark dataset for the temporal and causal relation detection. By developing this dataset, we also introduce a new task, the StoryLine Extraction from news data, which aims at extracting and classifying events relevant for stories, from across news documents spread in time and clustered around a single seminal event or topic. In addition to describing the dataset, we also report on three baselines systems whose results show the complexity of the task and suggest directions for the development of more robust systems.", "keyphrases": ["event storyline corpus", "causal relation", "dataset eventstoryline", "eci"]} +{"id": "ortiz-martinez-etal-2010-online", "title": "Online Learning for Interactive Statistical Machine Translation", "abstract": "State-of-the-art Machine Translation (MT) systems are still far from being perfect. An alternative is the so-called Interactive Machine Translation (IMT) framework. In this framework, the knowledge of a human translator is combined with a MT system. The vast majority of the existing work on IMT makes use of the well-known batch learning paradigm. In the batch learning paradigm, the training of the IMT system and the interactive translation process are carried out in separate stages. This paradigm is not able to take advantage of the new knowledge produced by the user of the IMT system. In this paper, we present an application of the online learning paradigm to the IMT framework. In the online learning paradigm, the training and prediction stages are no longer separated. This feature is particularly useful in IMT since it allows the user feedback to be taken into account. The online learning techniques proposed here incrementally update the statistical models involved in the translation process. Empirical results show the great potential of online learning in the IMT framework.", "keyphrases": ["interactive machine translation", "imt system", "online learning"]} +{"id": "bethard-etal-2015-semeval", "title": "SemEval-2015 Task 6: Clinical TempEval", "abstract": "Clinical TempEval 2015 brought the temporal information extraction tasks of past TempEval campaigns to the clinical domain. Nine sub-tasks were included, covering problems in time expression identification, event expression identification and temporal relation identification. Participant systems were trained and evaluated on a corpus of clinical notes and pathology reports from the Mayo Clinic, annotated with an extension of TimeML for the clinical domain. Three teams submitted a total of 13 system runs, with the best systems achieving near-human performance on identifying events and times, but with a large performance gap still remaining for temporal relations.", "keyphrases": ["clinical tempeval", "clinical note", "semeval"]} +{"id": "zhong-ng-2012-word", "title": "Word Sense Disambiguation Improves Information Retrieval", "abstract": "Previous research has conflicting conclusions on whether word sense disambiguation (WSD) systems can improve information retrieval (IR) performance. In this paper, we propose a method to estimate sense distributions for short queries. Together with the senses predicted for words in documents, we propose a novel approach to incorporate word senses into the language modeling approach to IR and also exploit the integration of synonym relations. Our experimental results on standard TREC collections show that using the word senses tagged by a supervised WSD system, we obtain significant improvements over a state-of-the-art IR system.", "keyphrases": ["wsd", "language modeling approach", "word sense disambiguation"]} +{"id": "vulic-etal-2011-identifying", "title": "Identifying Word Translations from Comparable Corpora Using Latent Topic Models", "abstract": "A topic model outputs a set of multinomial distributions over words for each topic. In this paper, we investigate the value of bilingual topic models, i.e., a bilingual Latent Dirichlet Allocation model for finding translations of terms in comparable corpora without using any linguistic resources. Experiments on a document-aligned English-Italian Wikipedia corpus confirm that the developed methods which only use knowledge from word-topic distributions outperform methods based on similarity measures in the original word-document space. The best results, obtained by combining knowledge from word-topic distributions with similarity measures in the original space, are also reported.", "keyphrases": ["comparable corpora", "topic model", "cross-lingual signal", "statistical method"]} +{"id": "zhang-etal-2018-guiding", "title": "Guiding Neural Machine Translation with Retrieved Translation Pieces", "abstract": "One of the difficulties of neural machine translation (NMT) is the recall and appropriate translation of low-frequency words or phrases. In this paper, we propose a simple, fast, and effective method for recalling previously seen translation examples and incorporating them into the NMT decoding process. Specifically, for an input sentence, we use a search engine to retrieve sentence pairs whose source sides are similar with the input sentence, and then collect n-grams that are both in the retrieved target sentences and aligned with words that match in the source sentences, which we call \u201ctranslation pieces\u201d. We compute pseudo-probabilities for each retrieved sentence based on similarities between the input sentence and the retrieved source sentences, and use these to weight the retrieved translation pieces. Finally, an existing NMT model is used to translate the input sentence, with an additional bonus given to outputs that contain the collected translation pieces. We show our method improves NMT translation results up to 6 BLEU points on three narrow domain translation tasks where repetitiveness of the target sentences is particularly salient. It also causes little increase in the translation time, and compares favorably to another alternative retrieval-based method with respect to accuracy, speed, and simplicity of implementation.", "keyphrases": ["neural machine translation", "translation piece", "search engine", "sentence pair", "n-gram"]} +{"id": "shing-etal-2018-expert", "title": "Expert, Crowdsourced, and Machine Assessment of Suicide Risk via Online Postings", "abstract": "We report on the creation of a dataset for studying assessment of suicide risk via online postings in Reddit. Evaluation of risk-level annotations by experts yields what is, to our knowledge, the first demonstration of reliability in risk assessment by clinicians based on social media postings. We also introduce and demonstrate the value of a new, detailed rubric for assessing suicide risk, compare crowdsourced with expert performance, and present baseline predictive modeling experiments using the new dataset, which will be made available to researchers through the American Association of Suicidology.", "keyphrases": ["suicide risk", "online posting", "post", "suicidology", "expert"]} +{"id": "malmasi-dras-2014-language", "title": "Language Transfer Hypotheses with Linear SVM Weights", "abstract": "Language transfer, the characteristic second language usage patterns caused by native language interference, is investigated by Second Language Acquisition (SLA) researchers seeking to find overused and underused linguistic features. In this paper we develop and present a methodology for deriving ranked lists of such features. Using very large learner data, we show our method\u2019s ability to find relevant candidates using sophisticated linguistic features. To illustrate its applicability to SLA research, we formulate plausible language transfer hypotheses supported by current evidence. This is the first work to extend Native Language Identification to a broader linguistic interpretation of learner data and address the automatic extraction of underused features on a per-native language basis.", "keyphrases": ["linear svm weight", "sla", "linguistic feature", "language transfer hypothesis", "learner feedback"]} +{"id": "chambers-2013-event", "title": "Event Schema Induction with a Probabilistic Entity-Driven Model", "abstract": "Event schema induction is the task of learning high-level representations of complex events (e.g., a bombing) and their entity roles (e.g., perpetrator and victim) from unlabeled text. Event schemas have important connections to early NLP research on frames and scripts, as well as modern applications like template extraction. Recent research suggests event schemas can be learned from raw text. Inspired by a pipelined learner based on named entity coreference, this paper presents the first generative model for schema induction that integrates coreference chains into learning. Our generative model is conceptually simpler than the pipelined approach and requires far less training data. It also provides an interesting contrast with a recent HMM-based model. We evaluate on a common dataset for template schema extraction. Our generative model matches the pipeline\u2019s performance, and outperforms the HMM by 7 F1 points (20%).", "keyphrases": ["high-level representation", "entity role", "generative model", "event schema induction"]} +{"id": "bulte-tezcan-2019-neural", "title": "Neural Fuzzy Repair: Integrating Fuzzy Matches into Neural Machine Translation", "abstract": "We present a simple yet powerful data augmentation method for boosting Neural Machine Translation (NMT) performance by leveraging information retrieved from a Translation Memory (TM). We propose and test two methods for augmenting NMT training data with fuzzy TM matches. Tests on the DGT-TM data set for two language pairs show consistent and substantial improvements over a range of baseline systems. The results suggest that this method is promising for any translation environment in which a sizeable TM is available and a certain amount of repetition across translations is to be expected, especially considering its ease of implementation.", "keyphrases": ["fuzzy match", "neural machine translation", "translation memory", "source sentence"]} +{"id": "dugast-etal-2007-statistical", "title": "Statistical Post-Editing on SYSTRAN`s Rule-Based Translation System", "abstract": "This article describes the combination of a SYSTRAN system with a \"statistical post-editing\" (SPE) system. We document qualitative analysis on two experiments performed in the shared task of the ACL 2007 Workshop on Statistical Machine Translation. Comparative results and more integrated \"hybrid\" techniques are discussed.", "keyphrases": ["post-editing", "systran", "rule-based machine translation", "smt system"]} +{"id": "coppersmith-etal-2014-quantifying", "title": "Quantifying Mental Health Signals in Twitter", "abstract": "The ubiquity of social media provides a rich opportunity to enhance the data available to mental health clinicians and researchers, enabling a better-informed and better-equipped mental health field. We present analysis of mental health phenomena in publicly available Twitter data, demonstrating how rigorous application of simple natural language processing methods can yield insight into specific disorders as well as mental health writ large, along with evidence that as-of-yet undiscovered linguistic signals relevant to mental health exist in social media. We present a novel method for gathering data for a range of mental illnesses quickly and cheaply, then focus on analysis of four in particular: post-traumatic stress disorder (PTSD), depression, bipolar disorder, and seasonal affective disorder (SAD). We intend for these proof-of-concept results to inform the necessary ethical discussion regarding the balance between the utility of such data and the privacy of mental health related information.", "keyphrases": ["mental health", "twitter", "social medium data", "facebook", "self-report"]} +{"id": "nooralahzadeh-etal-2020-zero", "title": "Zero-Shot Cross-Lingual Transfer with Meta Learning", "abstract": "Learning what to share between tasks has become a topic of great importance, as strategic sharing of knowledge has been shown to improve downstream task performance. This is particularly important for multilingual applications, as most languages in the world are under-resourced. Here, we consider the setting of training models on multiple different languages at the same time, when little or no data is available for languages other than English. We show that this challenging setup can be approached using meta-learning: in addition to training a source language model, another model learns to select which training instances are the most beneficial to the first. We experiment using standard supervised, zero-shot cross-lingual, as well as few-shot cross-lingual settings for different natural language understanding tasks (natural language inference, question answering). Our extensive experimental setup demonstrates the consistent effectiveness of meta-learning for a total of 15 languages. We improve upon the state-of-the-art for zero-shot and few-shot NLI (on MultiNLI and XNLI) and QA (on the MLQA dataset). A comprehensive error analysis indicates that the correlation of typological features between languages can partly explain when parameter sharing learned via meta-learning is beneficial.", "keyphrases": ["cross-lingual transfer", "natural language inference", "few-shot nli"]} +{"id": "tsvetkov-etal-2015-evaluation", "title": "Evaluation of Word Vector Representations by Subspace Alignment", "abstract": "Unsupervisedly learned word vectors have proven to provide exceptionally effective features in many NLP tasks. Most common intrinsic evaluations of vector quality measure correlation with similarity judgments. However, these often correlate poorly with how well the learned representations perform as features in downstream evaluation tasks. We present QVEC\u2014a computationally inexpensive intrinsic evaluation measure of the quality of word embeddings based on alignment to a matrix of features extracted from manually crafted lexical resources\u2014that obtains strong correlation with performance of the vectors in a battery of downstream semantic evaluation tasks.1", "keyphrases": ["intrinsic evaluation", "word embedding", "qvec"]} +{"id": "gaussier-etal-2004-geometric", "title": "A Geometric View on Bilingual Lexicon Extraction from Comparable Corpora", "abstract": "We present a geometric view on bilingual lexicon extraction from comparable corpora, which allows to re-interpret the methods proposed so far and identify unresolved problems. This motivates three new methods that aim at solving these problems. Empirical evaluation shows the strengths and weaknesses of these methods, as well as a significant gain in the accuracy of extracted lexicons.", "keyphrases": ["lexicon extraction", "comparable corpora", "cross-lingual signal", "statistical method"]} +{"id": "peng-etal-2020-shot", "title": "Few-shot Natural Language Generation for Task-Oriented Dialog", "abstract": "As a crucial component in task-oriented dialog systems, the Natural Language Generation (NLG) module converts a dialog act represented in a semantic form into a response in natural language. The success of traditional template-based or statistical models typically relies on heavily annotated data, which is infeasible for new domains. Therefore, it is pivotal for an NLG system to generalize well with limited labelled data in real applications. To this end, we present FewshotWOZ, the first NLG benchmark to simulate the few-shot learning setting in task-oriented dialog systems. Further, we develop the SC-GPT model. It is pre-trained on a large set of annotated NLG corpus to acquire the controllable generation ability, and fine-tuned with only a few domain-specific labels to adapt to new domains. Experiments on FewshotWOZ and the large Multi-Domain-WOZ datasets show that the proposed SC-GPT significantly outperforms existing methods, measured by various automatic metrics and human evaluations.", "keyphrases": ["natural language generation", "few-shot learning", "task-oriented dialogue system"]} +{"id": "sogaard-2016-evaluating", "title": "Evaluating word embeddings with fMRI and eye-tracking", "abstract": "The workshop CfP assumes that down-stream evaluation of word embeddings is impractical, and that a valid evaluation metric for pairs of word embeddings can be found. I argue below that if so, the only meaningful evaluation procedure is comparison with measures of human word processing in the wild . Such evaluation is non-trivial, but I present a practical procedure here, evaluating word embeddings as features in a multi-dimensional regression model predicting brain imaging or eye-tracking word-level aggregate statistics.", "keyphrases": ["word embedding", "fmri", "text stimulus"]} +{"id": "li-etal-2019-incremental", "title": "Incremental Transformer with Deliberation Decoder for Document Grounded Conversations", "abstract": "Document Grounded Conversations is a task to generate dialogue responses when chatting about the content of a given document. Obviously, document knowledge plays a critical role in Document Grounded Conversations, while existing dialogue models do not exploit this kind of knowledge effectively enough. In this paper, we propose a novel Transformer-based architecture for multi-turn document grounded conversations. In particular, we devise an Incremental Transformer to encode multi-turn utterances along with knowledge in related documents. Motivated by the human cognitive process, we design a two-pass decoder (Deliberation Decoder) to improve context coherence and knowledge correctness. Our empirical study on a real-world Document Grounded Dataset proves that responses generated by our model significantly outperform competitive baselines on both context coherence and knowledge relevance.", "keyphrases": ["deliberation decoder", "multi-turn utterance", "incremental transformer", "dialogue generation"]} +{"id": "forascu-tufis-2012-romanian", "title": "Romanian TimeBank: An Annotated Parallel Corpus for Temporal Information", "abstract": "The paper describes the main steps for the construction, annotation and validation of the Romanian version of the TimeBank corpus. Starting from the English TimeBank corpus \u2015 the reference annotated corpus in the temporal domain, we have translated all the 183 English news texts into Romanian and mapped the English annotations onto Romanian, with a success rate of 96.53%. Based on ISO-Time - the emerging standard for representing temporal information, which includes many of the previous annotations schemes -, we have evaluated the automatic transfer onto Romanian and, and, when necessary, corrected the Romanian annotations so that in the end we obtained a 99.18% transfer rate for the TimeML annotations. In very few cases, due to language peculiarities, some original annotations could not be transferred. For the portability of the temporal annotation standard to Romanian, we suggested some additions for the ISO-Time standard, concerning especially the EVENT tag, based on linguistic evidence, the Romanian grammar, and also on the localisations of TimeML to other Romance languages. Future improvements to the Ro-TimeBank will take into consideration all temporal expressions, signals and events in texts, even those with a not very clear temporal anchoring.", "keyphrases": ["annotated parallel corpus", "temporal information", "romanian timebank"]} +{"id": "chen-etal-2018-keyphrase", "title": "Keyphrase Generation with Correlation Constraints", "abstract": "In this paper, we study automatic keyphrase generation. Although conventional approaches to this task show promising results, they neglect correlation among keyphrases, resulting in duplication and coverage issues. To solve these problems, we propose a new sequence-to-sequence architecture for keyphrase generation named CorrRNN, which captures correlation among multiple keyphrases in two ways. First, we employ a coverage vector to indicate whether the word in the source document has been summarized by previous phrases to improve the coverage for keyphrases. Second, preceding phrases are taken into account to eliminate duplicate phrases and improve result coherence. Experiment results show that our model significantly outperforms the state-of-the-art method on benchmark datasets in terms of both accuracy and diversity.", "keyphrases": ["correlation constraint", "sequence-to-sequence architecture", "keyphrase generation", "review mechanism", "generative model"]} +{"id": "lu-etal-2009-natural", "title": "Natural Language Generation with Tree Conditional Random Fields", "abstract": "This paper presents an effective method for generating natural language sentences from their underlying meaning representations. The method is built on top of a hybrid tree representation that jointly encodes both the meaning representation as well as the natural language in a tree structure. By using a tree conditional random field on top of the hybrid tree representation, we are able to explicitly model phrase-level dependencies amongst neighboring natural language phrases and meaning representation components in a simple and natural way. We show that the additional dependencies captured by the tree conditional random field allows it to perform better than directly inverting a previously developed hybrid tree semantic parser. Furthermore, we demonstrate that the model performs better than a previous state-of-the-art natural language generation model. Experiments are performed on two benchmark corpora with standard automatic evaluation metrics.", "keyphrases": ["conditional random field", "meaning representation", "language generation model"]} +{"id": "du-cardie-2020-event", "title": "Event Extraction by Answering (Almost) Natural Questions", "abstract": "The problem of event extraction requires detecting the event trigger and extracting its corresponding arguments. Existing work in event argument extraction typically relies heavily on entity recognition as a preprocessing/concurrent step, causing the well-known problem of error propagation. To avoid this issue, we introduce a new paradigm for event extraction by formulating it as a question answering (QA) task that extracts the event arguments in an end-to-end manner. Empirical results demonstrate that our framework outperforms prior methods substantially; in addition, it is capable of extracting event arguments for roles not seen at training time (i.e., in a zero-shot learning setting).", "keyphrases": ["trigger", "entity recognition", "event extraction", "machine reading comprehension", "eae"]} +{"id": "snover-etal-2009-fluency", "title": "Fluency, Adequacy, or HTER? Exploring Different Human Judgments with a Tunable MT Metric", "abstract": "Automatic Machine Translation (MT) evaluation metrics have traditionally been evaluated by the correlation of the scores they assign to MT output with human judgments of translation performance. Different types of human judgments, such as Fluency, Adequacy, and HTER, measure varying aspects of MT performance that can be captured by automatic MT metrics. We explore these differences through the use of a new tunable MT metric: TER-Plus, which extends the Translation Edit Rate evaluation metric with tunable parameters and the incorporation of morphology, synonymy and paraphrases. TER-Plus was shown to be one of the top metrics in NIST's Metrics MATR 2008 Challenge, having the highest average rank in terms of Pearson and Spearman correlation. Optimizing TER-Plus to different types of human judgments yields significantly improved correlations and meaningful changes in the weight of different types of edits, demonstrating significant differences between the types of human judgments.", "keyphrases": ["hter", "human judgment", "paraphrase", "fluency", "translation error rate"]} +{"id": "vaibhav-etal-2019-improving", "title": "Improving Robustness of Machine Translation with Synthetic Noise", "abstract": "Modern Machine Translation (MT) systems perform remarkably well on clean, in-domain text. However most of the human generated text, particularly in the realm of social media, is full of typos, slang, dialect, idiolect and other noise which can have a disastrous impact on the accuracy of MT. In this paper we propose methods to enhance the robustness of MT systems by emulating naturally occurring noise in otherwise clean data. Synthesizing noise in this manner we are ultimately able to make a vanilla MT system more resilient to naturally occurring noise, partially mitigating loss in accuracy resulting therefrom.", "keyphrases": ["robustness", "machine translation", "synthetic noise", "clean data", "medium"]} +{"id": "gehman-etal-2020-realtoxicityprompts", "title": "RealToxicityPrompts: Evaluating Neural Toxic Degeneration in Language Models", "abstract": "Pretrained neural language models (LMs) are prone to generating racist, sexist, or otherwise toxic language which hinders their safe deployment. We investigate the extent to which pretrained LMs can be prompted to generate toxic language, and the effectiveness of controllable text generation algorithms at preventing such toxic degeneration. We create and release RealToxicityPrompts, a dataset of 100K naturally occurring, sentence-level prompts derived from a large corpus of English web text, paired with toxicity scores from a widely-used toxicity classifier. Using RealToxicityPrompts, we find that pretrained LMs can degenerate into toxic text even from seemingly innocuous prompts. We empirically assess several controllable generation methods, and find that while data- or compute-intensive methods (e.g., adaptive pretraining on non-toxic data) are more effective at steering away from toxicity than simpler solutions (e.g., banning \u201cbad\u201d words), no current method is failsafe against neural toxic degeneration. To pinpoint the potential cause of such persistent toxic degeneration, we analyze two web text corpora used to pretrain several LMs (including GPT-2; Radford et. al, 2019), and find a significant amount of offensive, factually unreliable, and otherwise toxic content. Our work provides a test bed for evaluating toxic generations by LMs and stresses the need for better data selection processes for pretraining.", "keyphrases": ["neural toxic degeneration", "language model", "toxic language", "realtoxicityprompt"]} +{"id": "edunov-etal-2020-evaluation", "title": "On The Evaluation of Machine Translation Systems Trained With Back-Translation", "abstract": "Back-translation is a widely used data augmentation technique which leverages target monolingual data. However, its effectiveness has been challenged since automatic metrics such as BLEU only show significant improvements for test examples where the source itself is a translation, or translationese. This is believed to be due to translationese inputs better matching the back-translated training data. In this work, we show that this conjecture is not empirically supported and that back-translation improves translation quality of both naturally occurring text as well as translationese according to professional human translators. We provide empirical evidence to support the view that back-translation is preferred by humans because it produces more fluent outputs. BLEU cannot capture human preferences because references are translationese when source sentences are natural text. We recommend complementing BLEU with a language model score to measure fluency.", "keyphrases": ["back-translation", "monolingual data", "professional human translator", "fluent output", "bleu score"]} +{"id": "verga-etal-2017-generalizing", "title": "Generalizing to Unseen Entities and Entity Pairs with Row-less Universal Schema", "abstract": "Universal schema predicts the types of entities and relations in a knowledge base (KB) by jointly embedding the union of all available schema types\u2014not only types from multiple structured databases (such as Freebase or Wikipedia infoboxes), but also types expressed as textual patterns from raw text. This prediction is typically modeled as a matrix completion problem, with one type per column, and either one or two entities per row (in the case of entity types or binary relation types, respectively). Factorizing this sparsely observed matrix yields a learned vector embedding for each row and each column. In this paper we explore the problem of making predictions for entities or entity-pairs unseen at training time (and hence without a pre-learned row embedding). We propose an approach having no per-row parameters at all; rather we produce a row vector on the fly using a learned aggregation function of the vectors of the observed columns for that row. We experiment with various aggregation functions, including neural network attention models. Our approach can be understood as a natural language database, in that questions about KB entities are answered by attending to textual or database evidence. In experiments predicting both relations and entity types, we demonstrate that despite having an order of magnitude fewer parameters than traditional universal schema, we can match the accuracy of the traditional model, and more importantly, we can now make predictions about unseen rows with nearly the same accuracy as rows available at training time.", "keyphrases": ["entity pair", "universal schema", "matrix"]} +{"id": "fan-etal-2014-distant", "title": "Distant Supervision for Relation Extraction with Matrix Completion", "abstract": "The essence of distantly supervised relation extraction is that it is an incomplete multi-label classification problem with sparse and noisy features. To tackle the sparsity and noise challenges, we propose solving the classification problem using matrix completion on factorized matrix of minimized rank. We formulate relation classification as completing the unknown labels of testing items (entity pairs) in a sparse matrix that concatenates training and testing textual features with training labels. Our algorithmic framework is based on the assumption that the rank of item-byfeature and item-by-label joint matrix is low. We apply two optimization models to recover the underlying low-rank matrix leveraging the sparsity of feature-label matrix. The matrix completion problem is then solved by the fixed point continuation (FPC) algorithm, which can find the global optimum. Experiments on two widely used datasets with different dimensions of textual features demonstrate that our low-rank matrix completion approach significantly outperforms the baseline and the state-of-the-art methods.", "keyphrases": ["relation extraction", "matrix completion", "distant supervision"]} +{"id": "li-caragea-2019-multi", "title": "Multi-Task Stance Detection with Sentiment and Stance Lexicons", "abstract": "Stance detection aims to detect whether the opinion holder is in support of or against a given target. Recent works show improvements in stance detection by using either the attention mechanism or sentiment information. In this paper, we propose a multi-task framework that incorporates target-specific attention mechanism and at the same time takes sentiment classification as an auxiliary task. Moreover, we used a sentiment lexicon and constructed a stance lexicon to provide guidance for the attention layer. Experimental results show that the proposed model significantly outperforms state-of-the-art deep learning methods on the SemEval-2016 dataset.", "keyphrases": ["stance detection", "sentiment information", "auxiliary task"]} +{"id": "sordoni-etal-2015-neural", "title": "A Neural Network Approach to Context-Sensitive Generation of Conversational Responses", "abstract": "We present a novel response generation system that can be trained end to end on large quantities of unstructured Twitter conversations. A neural network architecture is used to address sparsity issues that arise when integrating contextual information into classic statistical models, allowing the system to take into account previous dialog utterances. Our dynamic-context generative models show consistent gains over both context-sensitive and non-context-sensitive Machine Translation and Information Retrieval baselines.", "keyphrases": ["conversation", "recurrent neural network", "non-task-oriented dialogue system"]} +{"id": "medelyan-etal-2009-human", "title": "Human-competitive tagging using automatic keyphrase extraction", "abstract": "This paper connects two research areas: automatic tagging on the web and statistical keyphrase extraction. First, we analyze the quality of tags in a collaboratively created folksonomy using traditional evaluation techniques. Next, we demonstrate how documents can be tagged automatically with a state-of-the-art keyphrase extraction algorithm, and further improve performance in this new domain using a new algorithm, \"Maui\", that utilizes semantic information extracted from Wikipedia. Maui outperforms existing approaches and extracts tags that are competitive with those assigned by the best performing human taggers.", "keyphrases": ["tagging", "keyphrase extraction", "wikipedia", "hand-crafted rule", "large set"]} +{"id": "bunescu-mooney-2007-learning", "title": "Learning to Extract Relations from the Web using Minimal Supervision", "abstract": "We present a new approach to relation extraction that requires only a handful of training examples. Given a few pairs of named entities known to exhibit or not exhibit a particular relation, bags of sentences containing the pairs are extracted from the web. We extend an existing relation extraction method to handle this weaker form of supervision, and present experimental results demonstrating that our approach can reliably extract relations from web documents.", "keyphrases": ["web", "relation extraction", "distant supervision", "knowledge base", "multi-instance learning"]} +{"id": "herbelot-baroni-2017-high", "title": "High-risk learning: acquiring new word vectors from tiny data", "abstract": "Distributional semantics models are known to struggle with small data. It is generally accepted that in order to learn `a good vector' for a word, a model must have sufficient examples of its usage. This contradicts the fact that humans can guess the meaning of a word from a few occurrences only. In this paper, we show that a neural language model such as Word2Vec only necessitates minor modifications to its standard architecture to learn new terms from tiny data, using background knowledge from a previously learnt semantic space. We test our model on word definitions and on a nonce task involving 2-6 sentences' worth of context, showing a large increase in performance over state-of-the-art models on the definitional task.", "keyphrases": ["word vector", "tiny data", "usage", "learning rate"]} +{"id": "wang-etal-2021-voxpopuli", "title": "VoxPopuli: A Large-Scale Multilingual Speech Corpus for Representation Learning, Semi-Supervised Learning and Interpretation", "abstract": "We introduce VoxPopuli, a large-scale multilingual corpus providing 400K hours of unlabeled speech data in 23 languages. It is the largest open data to date for unsupervised representation learning as well as semi-supervised learning. VoxPopuli also contains 1.8K hours of transcribed speeches in 15 languages and their aligned oral interpretations into 15 target languages totaling 17.3K hours. We provide speech recognition (ASR) baselines and validate the versatility of VoxPopuli unlabeled data in semi-supervised ASR and speech-to-text translation under challenging out-of-domain settings. The corpus is available at .", "keyphrases": ["semi-supervised learning", "interpretation", "unlabeled speech data", "voxpopuli"]} +{"id": "luong-etal-2013-better", "title": "Better Word Representations with Recursive Neural Networks for Morphology", "abstract": "Vector-space word representations have been very successful in recent years at improving performance across a variety of NLP tasks. However, common to most existing work, words are regarded as independent entities without any explicit relationship among morphologically related words being modeled. As a result, rare and complex words are often poorly estimated, and all unknown words are represented in a rather crude way using only one or a few vectors. This paper addresses this shortcoming by proposing a novel model that is capable of building representations for morphologically complex words from their morphemes. We combine recursive neural networks (RNNs), where each morpheme is a basic unit, with neural language models (NLMs) to consider contextual information in learning morphologicallyaware word representations. Our learned models outperform existing word representations by a good margin on word similarity tasks across many datasets, including a new dataset we introduce focused on rare words to complement existing ones in an interesting way.", "keyphrases": ["recursive neural networks", "morphology", "word embedding", "rich language", "trend"]} +{"id": "wang-etal-2014-beam", "title": "A Beam-Search Decoder for Disfluency Detection", "abstract": "In this paper 1 , we present a novel beam-search decoder for disfluency detection. We first propose node-weighted max-margin Markov networks (M3N) to boost the performance on words belonging to specific part-of-speech (POS) classes. Next, we show the importance of measuring the quality of cleaned-up sentences and performing multiple passes of disfluency detection. Finally, we propose using the beam-search decoder to combine multiple discriminative models such as M3N and multiple generative models such as language models (LM) and perform multiple passes of disfluency detection. The decoder iteratively generates new hypotheses from current hypotheses by making incremental corrections to the current sentence based on certain patterns as well as information provided by existing models. It then rescores each hypothesis based on features of lexical correctness and fluency. Our decoder achieves an edit-word F1 score higher than all previous published scores on the same data set, both with and without using external sources of information.", "keyphrases": ["beam-search decoder", "disfluency detection", "language model"]} +{"id": "filippova-2010-multi", "title": "Multi-Sentence Compression: Finding Shortest Paths in Word Graphs", "abstract": "We consider the task of summarizing a cluster of related sentences with a short sentence which we call multi-sentence compression and present a simple approach based on shortest paths in word graphs. The advantage and the novelty of the proposed method is that it is syntaxlean and requires little more than a tokenizer and a tagger. Despite its simplicity, it is capable of generating grammatical and informative summaries as our experiments with English and Spanish data demonstrate.", "keyphrases": ["cluster", "tagger", "multi-sentence compression", "short path", "graph-based approach"]} +{"id": "feng-etal-2012-syntactic", "title": "Syntactic Stylometry for Deception Detection", "abstract": "Most previous studies in computerized deception detection have relied only on shallow lexico-syntactic patterns. This paper investigates syntactic stylometry for deception detection, adding a somewhat unconventional angle to prior literature. Over four different datasets spanning from the product review to the essay domain, we demonstrate that features driven from Context Free Grammar (CFG) parse trees consistently improve the detection performance over several baselines that are based only on shallow lexico-syntactic features. Our results improve the best published result on the hotel review data (Ott et al., 2011) reaching 91.2% accuracy with 14% error reduction.", "keyphrases": ["deception detection", "parse tree", "syntactic stylometry", "production rule"]} +{"id": "grabar-etal-2018-cas", "title": "CAS: French Corpus with Clinical Cases", "abstract": "Textual corpora are extremely important for various NLP applications as they provide information necessary for creating, setting and testing these applications and the corresponding tools. They are also crucial for designing reliable methods and reproducible results. Yet, in some areas, such as the medical area, due to confidentiality or to ethical reasons, it is complicated and even impossible to access textual data representative of those produced in these areas. We propose the CAS corpus built with clinical cases, such as they are reported in the published scientific literature in French. We describe this corpus, currently containing over 397,000 word occurrences, and the existing linguistic and semantic annotations.", "keyphrases": ["french corpus", "clinical case", "scientific literature"]} +{"id": "schneider-smith-2015-corpus", "title": "A Corpus and Model Integrating Multiword Expressions and Supersenses", "abstract": "This paper introduces a task of identifying and semantically classifying lexical expressions in running text. We investigate the online reviews genre, adding semantic supersense annotations to a 55,000 word English corpus that was previously annotated for multiword expressions. The noun and verb supersenses apply to full lexical expressions, whether single- or multiword. We then present a sequence tagging model that jointly infers lexical expressions and their supersenses. Results show that even with our relatively small training corpus in a noisy domain, the joint task can be performed to attain 70% class labeling F1.", "keyphrases": ["multiword expression", "supersense", "semantic class", "mwe"]} +{"id": "banea-etal-2010-multilingual", "title": "Multilingual Subjectivity: Are More Languages Better?", "abstract": "While subjectivity related research in other languages has increased, most of the work focuses on single languages. This paper explores the integration of features originating from multiple languages into a machine learning approach to subjectivity analysis, and aims to show that this enriched feature set provides for more effective modeling for the source as well as the target languages. We show not only that we are able to achieve over 75% macro accuracy in all of the six languages we experiment with, but also that by using features drawn from multiple languages we can construct high-precision meta-classifiers with a precision of over 83%.", "keyphrases": ["subjectivity", "other language", "romanian", "disambiguation task", "sentiment classification"]} +{"id": "mcdonald-nivre-2007-characterizing", "title": "Characterizing the Errors of Data-Driven Dependency Parsing Models", "abstract": "We present a comparative error analysis of the two dominant approaches in datadriven dependency parsing: global, exhaustive, graph-based models, and local, greedy, transition-based models. We show that, in spite of similar performance overall, the two models produce different types of errors, in a way that can be explained by theoretical properties of the two models. This analysis leads to new directions for parser development.", "keyphrases": ["graph-based model", "different type", "transition-based parser", "error propagation", "long dependency"]} +{"id": "negri-etal-2012-semeval", "title": "Semeval-2012 Task 8: Cross-lingual Textual Entailment for Content Synchronization", "abstract": "This paper presents the first round of the task on Cross-lingual Textual Entailment for Content Synchronization, organized within SemEval-2012. The task was designed to promote research on semantic inference over texts written in different languages, targeting at the same time a real application scenario. Participants were presented with datasets for different language pairs, where multi-directional entailment relations (\"forward\", \"backward\", \"bidirectional\", \"no_entailment\") had to be identified. We report on the training and test data used for evaluation, the process of their creation, the participating systems (10 teams, 92 runs), the approaches adopted and the results achieved.", "keyphrases": ["textual entailment", "content synchronization", "different language"]} +{"id": "karan-etal-2012-evaluation", "title": "Evaluation of Classification Algorithms and Features for Collocation Extraction in Croatian", "abstract": "Collocations can be defined as words that occur together significantly more often than it would be expected by chance. Many natural language processing applications such as natural language generation, word sense disambiguation and machine translation can benefit from having access to information about collocated words. We approach collocation extraction as a classification problem where the task is to classify a given n-gram as either a collocation (positive) or a non-collocation (negative). Among the features used are word frequencies, classical association measures (Dice, PMI, chi2), and POS tags. In addition, semantic word relatedness modeled by latent semantic analysis is also included. We apply wrapper feature subset selection to determine the best set of features. Performance of various classification algorithms is tested. Experiments are conducted on a manually annotated set of bigrams and trigrams sampled from a Croatian newspaper corpus. Best results obtained are 79.8 F1 measure for bigrams and 67.5 F1 measure for trigrams. The best classifier for bigrams was SVM, while for trigrams the decision tree gave the best performance. Features which contributed the most to overall performance were PMI, semantic relatedness, and POS information.", "keyphrases": ["collocation extraction", "croatian", "classification problem"]} +{"id": "watanabe-etal-2007-graph", "title": "A Graph-Based Approach to Named Entity Categorization in Wikipedia Using Conditional Random Fields", "abstract": "This paper presents a method for categorizing named entities in Wikipedia. In Wikipedia, an anchor text is glossed in a linked HTML text. We formalize named entity categorization as a task of categorizing anchor texts with linked HTML texts which glosses a named entity. Using this representation, we introduce a graph structure in which anchor texts are regarded as nodes. In order to incorporate HTML structure on the graph, three types of cliques are defined based on the HTML tree structure. We propose a method with Conditional Random Fields (CRFs) to categorize the nodes on the graph. Since the defined graph may include cycles, the exact inference of CRFs is computationally expensive. We introduce an approximate inference method using Treebased Reparameterization (TRP) to reduce computational cost. In experiments, our proposed model obtained significant improvements compare to baseline models that use Support Vector Machines.", "keyphrases": ["graph-based approach", "entity categorization", "wikipedia", "html structure"]} +{"id": "rashkin-etal-2019-towards", "title": "Towards Empathetic Open-domain Conversation Models: A New Benchmark and Dataset", "abstract": "One challenge for dialogue agents is recognizing feelings in the conversation partner and replying accordingly, a key communicative skill. While it is straightforward for humans to recognize and acknowledge others' feelings in a conversation, this is a significant challenge for AI systems due to the paucity of suitable publicly-available datasets for training and evaluation. This work proposes a new benchmark for empathetic dialogue generation and EmpatheticDialogues, a novel dataset of 25k conversations grounded in emotional situations. Our experiments indicate that dialogue models that use our dataset are perceived to be more empathetic by human evaluators, compared to models merely trained on large-scale Internet conversation data. We also present empirical comparisons of dialogue model adaptations for empathetic responding, leveraging existing models or datasets without requiring lengthy re-training of the full model.", "keyphrases": ["conversation", "empathetic dialogue generation", "emotion class"]} +{"id": "blloshmi-etal-2020-xl", "title": "XL-AMR: Enabling Cross-Lingual AMR Parsing with Transfer Learning Techniques", "abstract": "Abstract Meaning Representation (AMR) is a popular formalism of natural language that represents the meaning of a sentence as a semantic graph. It is agnostic about how to derive meanings from strings and for this reason it lends itself well to the encoding of semantics across languages. However, cross-lingual AMR parsing is a hard task, because training data are scarce in languages other than English and the existing English AMR parsers are not directly suited to being used in a cross-lingual setting. In this work we tackle these two problems so as to enable cross-lingual AMR parsing: we explore different transfer learning techniques for producing automatic AMR annotations across languages and develop a cross-lingual AMR parser, XL-AMR. This can be trained on the produced data and does not rely on AMR aligners or source-copy mechanisms as is commonly the case in English AMR parsing. The results of XL-AMR significantly surpass those previously reported in Chinese, German, Italian and Spanish. Finally we provide a qualitative analysis which sheds light on the suitability of AMR across languages. We release XL-AMR at github.com/SapienzaNLP/xl-amr.", "keyphrases": ["cross-lingual amr", "xl-amr", "english sentence", "alignment-based parser", "seq2seq problem"]} +{"id": "chang-lee-2003-topic", "title": "Topic Segmentation for Short Texts", "abstract": "Topic segmentation, which aims to fmd the boundaries between topic blocks in a text, is an important task for semantic analysis of texts. Although different solutions have been proposed for the task, many limitations and difficulties exist in the approaches. In particular most of the methods do not work well for such case as short texts, internet news and student's writings. In this paper, we focus on the short texts and present a method for topic segmentation. It can overcome the limitations in previous works. In preliminary experiments, the method show the accuracy of topic segmentation is increased effectively.", "keyphrases": ["boundary", "writing", "topic segmentation"]} +{"id": "kwiatkowski-etal-2019-natural", "title": "Natural Questions: A Benchmark for Question Answering Research", "abstract": "We present the Natural Questions corpus, a question answering data set. Questions consist of real anonymized, aggregated queries issued to the Google search engine. An annotator is presented with a question along with a Wikipedia page from the top 5 search results, and annotates a long answer (typically a paragraph) and a short answer (one or more entities) if present on the page, or marks null if no long/short answer is present. The public release consists of 307,373 training examples with single annotations; 7,830 examples with 5-way annotations for development data; and a further 7,842 examples with 5-way annotated sequestered as test data. We present experiments validating quality of the data. We also describe analysis of 25-way annotations on 302 examples, giving insights into human variability on the annotation task. We introduce robust metrics for the purposes of evaluating question answering systems; demonstrate high human upper bounds on these metrics; and establish baseline results using competitive methods drawn from related literature.", "keyphrases": ["annotator", "natural question", "passage", "wikipedia article", "source domain"]} +{"id": "schick-schutze-2021-just", "title": "It's Not Just Size That Matters: Small Language Models Are Also Few-Shot Learners", "abstract": "When scaled to hundreds of billions of parameters, pretrained language models such as GPT-3 (Brown et al., 2020) achieve remarkable few-shot performance. However, enormous amounts of compute are required for training and applying such big models, resulting in a large carbon footprint and making it difficult for researchers and practitioners to use them. We show that performance similar to GPT-3 can be obtained with language models that are much \u201cgreener\u201d in that their parameter count is several orders of magnitude smaller. This is achieved by converting textual inputs into cloze questions that contain a task description, combined with gradient-based optimization; exploiting unlabeled data gives further improvements. We identify key factors required for successful natural language understanding with small language models.", "keyphrases": ["language model", "learner", "few-shot learning", "prompt", "small model"]} +{"id": "yessenalina-cardie-2011-compositional", "title": "Compositional Matrix-Space Models for Sentiment Analysis", "abstract": "We present a general learning-based approach for phrase-level sentiment analysis that adopts an ordinal sentiment scale and is explicitly compositional in nature. Thus, we can model the compositional effects required for accurate assignment of phrase-level sentiment. For example, combining an adverb (e.g., \"very\") with a positive polar adjective (e.g., \"good\") produces a phrase (\"very good\") with increased polarity over the adjective alone. Inspired by recent work on distributional approaches to compositionality, we model each word as a matrix and combine words using iterated matrix multiplication, which allows for the modeling of both additive and multiplicative semantic effects. Although the multiplication-based matrix-space framework has been shown to be a theoretically elegant way to model composition (Rudolph and Giesbrecht, 2010), training such models has to be done carefully: the optimization is non-convex and requires a good initial starting point. This paper presents the first such algorithm for learning a matrix-space model for semantic composition. In the context of the phrase-level sentiment analysis task, our experimental results show statistically significant improvements in performance over a bag-of-words model.", "keyphrases": ["matrix-space model", "sentiment analysis", "semantic effect", "composition"]} +{"id": "shi-etal-2017-fast", "title": "Fast(er) Exact Decoding and Global Training for Transition-Based Dependency Parsing via a Minimal Feature Set", "abstract": "We first present a minimal feature set for transition-based dependency parsing, continuing a recent trend started by Kiperwasser and Goldberg (2016a) and Cross and Huang (2016a) of using bi-directional LSTM features. We plug our minimal feature set into the dynamic-programming framework of Huang and Sagae (2010) and Kuhlmann et al. (2011) to produce the first implementation of worst-case O(n^3) exact decoders for arc-hybrid and arc-eager transition systems. With our minimal features, we also present O(n^3) global training methods. Finally, using ensembles including our new parsers, we achieve the best unlabeled attachment score reported (to our knowledge) on the Chinese Treebank and the \u201csecond-best-in-class\u201d result on the English Penn Treebank.", "keyphrases": ["exact decoding", "dependency parsing", "implementation"]} +{"id": "goldberg-nivre-2013-training", "title": "Training Deterministic Parsers with Non-Deterministic Oracles", "abstract": "Greedy transition-based parsers are very fast but tend to suffer from error propagation. This problem is aggravated by the fact that they are normally trained using oracles that are deterministic and incomplete in the sense that they assume a unique canonical path through the transition system and are only valid as long as the parser does not stray from this path. In this paper, we give a general characterization of oracles that are nondeterministic and complete, present a method for deriving such oracles for transition systems that satisfy a property we call arc decomposition, and instantiate this method for three well-known transition systems from the literature. We say that these oracles are dynamic, because they allow us to dynamically explore alternative and nonoptimal paths during training \u2014 in contrast to oracles that statically assume a unique optimal path. Experimental evaluation on a wide range of data sets clearly shows that using dynamic oracles to train greedy parsers gives substantial improvements in accuracy. Moreover, this improvement comes at no cost in terms of efficiency, unlike other techniques like beam search.", "keyphrases": ["oracle", "dependency parsing", "imitation"]} +{"id": "chen-qian-2020-relation", "title": "Relation-Aware Collaborative Learning for Unified Aspect-Based Sentiment Analysis", "abstract": "Aspect-based sentiment analysis (ABSA) involves three subtasks, i.e., aspect term extraction, opinion term extraction, and aspect-level sentiment classification. Most existing studies focused on one of these subtasks only. Several recent researches made successful attempts to solve the complete ABSA problem with a unified framework. However, the interactive relations among three subtasks are still under-exploited. We argue that such relations encode collaborative signals between different subtasks. For example, when the opinion term is \u201cdelicious\u201d, the aspect term must be \u201cfood\u201d rather than \u201cplace\u201d. In order to fully exploit these relations, we propose a Relation-Aware Collaborative Learning (RACL) framework which allows the subtasks to work coordinately via the multi-task learning and relation propagation mechanisms in a stacked multi-layer network. Extensive experiments on three real-world datasets demonstrate that RACL significantly outperforms the state-of-the-art methods for the complete ABSA task.", "keyphrases": ["sentiment analysis", "aspect term", "relation-aware collaborative learning"]} +{"id": "tan-bansal-2020-vokenization", "title": "Vokenization: Improving Language Understanding with Contextualized, Visual-Grounded Supervision", "abstract": "Humans learn language by listening, speaking, writing, reading, and also, via interaction with the multimodal real world. Existing language pre-training frameworks show the effectiveness of text-only self-supervision while we explore the idea of a visually-supervised language model in this paper. We find that the main reason hindering this exploration is the large divergence in magnitude and distributions between the visually-grounded language datasets and pure-language corpora. Therefore, we develop a technique named \u201cvokenization\u201d that extrapolates multimodal alignments to language-only data by contextually mapping language tokens to their related images (which we call \u201cvokens\u201d). The \u201cvokenizer\u201d is trained on relatively small image captioning datasets and we then apply it to generate vokens for large language corpora. Trained with these contextually generated vokens, our visually-supervised language models show consistent improvements over self-supervised alternatives on multiple pure-language tasks such as GLUE, SQuAD, and SWAG.", "keyphrases": ["language understanding", "visual-grounded supervision", "vokenization", "visual information"]} +{"id": "hassan-menezes-2013-social", "title": "Social Text Normalization using Contextual Graph Random Walks", "abstract": "We introduce a social media text normalization system that can be deployed as a preprocessing step for Machine Translation and various NLP applications to handle social media text. The proposed system is based on unsupervised learning of the normalization equivalences from unlabeled text. The proposed approach uses Random Walks on a contextual similarity bipartite graph constructed from n-gram sequences on large unlabeled text corpus. We show that the proposed approach has a very high precision of (92.43) and a reasonable recall of (56.4). When used as a preprocessing step for a state-of-the-art machine translation system, the translation quality on social media text improved by 6%. The proposed approach is domain and language independent and can be deployed as a preprocessing step for any NLP application to handle social media text.", "keyphrases": ["contextual similarity", "bipartite graph", "n-gram sequence", "unlabeled text corpus", "social medium text"]} +{"id": "mehdad-etal-2013-towards", "title": "Towards Topic Labeling with Phrase Entailment and Aggregation", "abstract": "We propose a novel framework for topic labeling that assigns the most representative phrases for a given set of sentences covering the same topic. We build an entailment graph over phrases that are extracted from the sentences, and use the entailment relations to identify and select the most relevant phrases. We then aggregate those selected phrases by means of phrase generalization and merging. We motivate our approach by applying over conversational data, and show that our framework improves performance significantly over baseline algorithms.", "keyphrases": ["topic labeling", "entailment graph", "noun phrase"]} +{"id": "etxeberria-etal-2016-evaluating", "title": "Evaluating the Noisy Channel Model for the Normalization of Historical Texts: Basque, Spanish and Slovene", "abstract": "This paper presents a method for the normalization of historical texts using a combination of weighted finite-state transducers and language models. We have extended our previous work on the normalization of dialectal texts and tested the method against a 17th century literary work in Basque. This preprocessed corpus is made available in the LREC repository. The performance of this method for learning relations between historical and contemporary word forms is evaluated against resources in three languages. The method we present learns to map phonological changes using a noisy channel model. The model is based on techniques commonly used for phonological inference and producing Grapheme-to-Grapheme conversion systems encoded as weighted transducers and produces F-scores above 80% in the task for Basque. A wider evaluation shows that the approach performs equally well with all the languages in our evaluation suite: Basque, Spanish and Slovene. A comparison against other methods that address the same task is also provided.", "keyphrases": ["noisy channel model", "normalization", "historical text", "language model"]} +{"id": "ritter-etal-2011-data", "title": "Data-Driven Response Generation in Social Media", "abstract": "We present a data-driven approach to generating responses to Twitter status posts, based on phrase-based Statistical Machine Translation. We find that mapping conversational stimuli onto responses is more difficult than translating between languages, due to the wider range of possible responses, the larger fraction of unaligned words/phrases, and the presence of large phrase pairs whose alignment cannot be further decomposed. After addressing these challenges, we compare approaches based on SMT and Information Retrieval in a human evaluation. We show that SMT outperforms IR on this task, and its output is preferred over actual human responses in 15% of cases. As far as we are aware, this is the first work to investigate the use of phrase-based SMT to directly translate a linguistic stimulus into an appropriate response.", "keyphrases": ["conversation", "data-driven response generation", "social medium"]} +{"id": "yamada-matsumoto-2003-statistical", "title": "Statistical Dependency Analysis with Support Vector Machines", "abstract": "In this paper, we propose a method for analyzing word-word dependencies using deterministic bottom-up manner using Support Vector machines. We experimented with dependency trees converted from Penn treebank data, and achieved over 90% accuracy of word-word dependency. Though the result is little worse than the most up-to-date phrase structure based parsers, it looks satisfactorily accurate considering that our parser uses no information from phrase structures.", "keyphrases": ["support vector machines", "dependency parsing", "decision", "series", "yamada"]} +{"id": "webster-etal-2018-mind", "title": "Mind the GAP: A Balanced Corpus of Gendered Ambiguous Pronouns", "abstract": "Coreference resolution is an important task for natural language understanding, and the resolution of ambiguous pronouns a longstanding challenge. Nonetheless, existing corpora do not capture ambiguous pronouns in sufficient volume or diversity to accurately indicate the practical utility of models. Furthermore, we find gender bias in existing corpora and systems favoring masculine entities. To address this, we present and release GAP, a gender-balanced labeled corpus of 8,908 ambiguous pronoun\u2013name pairs sampled to provide diverse coverage of challenges posed by real-world text. We explore a range of baselines that demonstrate the complexity of the challenge, the best achieving just 66.9% F1. We show that syntactic structure and continuous neural models provide promising, complementary cues for approaching the challenge.", "keyphrases": ["balanced corpus", "gendered ambiguous pronouns", "masculine", "coreference resolution dataset"]} +{"id": "liang-etal-2011-semi", "title": "Semi-Automatic Identification of Bilingual Synonymous Technical Terms from Phrase Tables and Parallel Patent Sentences", "abstract": "In the research field of machine translation of patent documents, the issue of ac- quiring technical term translation equivalent pairs automatically from parallel patent docu- ments is one of those most important. We take an approach of utilizing the phrase table of a state-of-the-art phrase-based statistical machine translation model. In this task, we con- sider situations where a technical term is observed in many parallel patent sentences and is translated into many translation equivalents. We apply SVM to the task of identifying synonymous translation equivalent pairs and achieve almost 98% precision and over 40% F- measure. Then, in order to improve recall, we introduce a semi-automatic framework, where we employ the strategy of selecting more than one seeds for each set of candidates bilingual synonymous term pairs. By manually judging whether each pair of two seeds is synonymous or not, we achieve over 95% precision and 50% recall.", "keyphrases": ["parallel patent sentence", "technical term translation", "equivalent pair"]} +{"id": "wu-etal-2021-bass", "title": "BASS: Boosting Abstractive Summarization with Unified Semantic Graph", "abstract": "Abstractive summarization for long-document or multi-document remains challenging for the Seq2Seq architecture, as Seq2Seq is not good at analyzing long-distance relations in text. In this paper, we present BASS, a novel framework for Boosting Abstractive Summarization based on a unified Semantic graph, which aggregates co-referent phrases distributing across a long range of context and conveys rich relations between phrases. Further, a graph-based encoder-decoder model is proposed to improve both the document representation and summary generation process by leveraging the graph structure. Specifically, several graph augmentation methods are designed to encode both the explicit and implicit relations in the text while the graph-propagation attention mechanism is developed in the decoder to select salient content into the summary. Empirical results show that the proposed architecture brings substantial improvements for both long-document and multi-document summarization tasks.", "keyphrases": ["abstractive summarization", "unified semantic graph", "co-referent phrase", "bass"]} +{"id": "akbik-etal-2018-contextual", "title": "Contextual String Embeddings for Sequence Labeling", "abstract": "Recent advances in language modeling using recurrent neural networks have made it viable to model language as distributions over characters. By learning to predict the next character on the basis of previous characters, such models have been shown to automatically internalize linguistic concepts such as words, sentences, subclauses and even sentiment. In this paper, we propose to leverage the internal states of a trained character language model to produce a novel type of word embedding which we refer to as contextual string embeddings. Our proposed embeddings have the distinct properties that they (a) are trained without any explicit notion of words and thus fundamentally model words as sequences of characters, and (b) are contextualized by their surrounding text, meaning that the same word will have different embeddings depending on its contextual use. We conduct a comparative evaluation against previous embeddings and find that our embeddings are highly useful for downstream tasks: across four classic sequence labeling tasks we consistently outperform the previous state-of-the-art. In particular, we significantly outperform previous work on English and German named entity recognition (NER), allowing us to report new state-of-the-art F1-scores on the CoNLL03 shared task. We release all code and pre-trained language models in a simple-to-use framework to the research community, to enable reproduction of these experiments and application of our proposed embeddings to other tasks: ", "keyphrases": ["language model", "downstream task", "sequence labeling task", "contextual string embeddings", "well result"]} +{"id": "cocarascu-toni-2017-identifying", "title": "Identifying attack and support argumentative relations using deep learning", "abstract": "We propose a deep learning architecture to capture argumentative relations of attack and support from one piece of text to another, of the kind that naturally occur in a debate. The architecture uses two (unidirectional or bidirectional) Long Short-Term Memory networks and (trained or non-trained) word embeddings, and allows to considerably improve upon existing techniques that use syntactic features and supervised classifiers for the same form of (relation-based) argument mining.", "keyphrases": ["attack", "argumentative relation", "deep learning"]} +{"id": "johnson-zhang-2015-effective", "title": "Effective Use of Word Order for Text Categorization with Convolutional Neural Networks", "abstract": "Convolutional neural network (CNN) is a neural network that can make use of the internal structure of data such as the 2D structure of image data. This paper studies CNN on text categorization to exploit the 1D structure (namely, word order) of text data for accurate prediction. Instead of using low-dimensional word vectors as input as is often done, we directly apply CNN to high-dimensional text data, which leads to directly learning embedding of small text regions for use in classification. In addition to a straightforward adaptation of CNN from image to text, a simple but new variation which employs bag-of-word conversion in the convolution layer is proposed. An extension to combine multiple convolution layers is also explored for higher accuracy. The experiments demonstrate the effectiveness of our approach in comparison with state-of-the-art methods.", "keyphrases": ["text categorization", "convolutional neural networks", "cnn", "one-hot vector"]} +{"id": "tan-etal-2019-hierarchical", "title": "Hierarchical Modeling of Global Context for Document-Level Neural Machine Translation", "abstract": "Document-level machine translation (MT) remains challenging due to the difficulty in efficiently using document context for translation. In this paper, we propose a hierarchical model to learn the global context for document-level neural machine translation (NMT). This is done through a sentence encoder to capture intra-sentence dependencies and a document encoder to model document-level inter-sentence consistency and coherence. With this hierarchical architecture, we feedback the extracted global document context to each word in a top-down fashion to distinguish different translations of a word according to its specific surrounding context. In addition, since large-scale in-domain document-level parallel corpora are usually unavailable, we use a two-step training strategy to take advantage of a large-scale corpus with out-of-domain parallel sentence pairs and a small-scale corpus with in-domain parallel document pairs to achieve the domain adaptability. Experimental results on several benchmark corpora show that our proposed model can significantly improve document-level translation performance over several strong NMT baselines.", "keyphrases": ["global context", "neural machine translation", "document-level nmt"]} +{"id": "min-etal-2020-ambigqa", "title": "AmbigQA: Answering Ambiguous Open-domain Questions", "abstract": "Ambiguity is inherent to open-domain question answering; especially when exploring new topics, it can be difficult to ask questions that have a single, unambiguous answer. In this paper, we introduce AmbigQA, a new open-domain question answering task which involves finding every plausible answer, and then rewriting the question for each one to resolve the ambiguity. To study this task, we construct AmbigNQ, a dataset covering 14,042 questions from NQ-open, an existing open-domain QA benchmark. We find that over half of the questions in NQ-open are ambiguous, with diverse sources of ambiguity such as event and entity references. We also present strong baseline models for AmbigQA which we show benefit from weakly supervised learning that incorporates NQ-open, strongly suggesting our new task and data will support significant future research effort. Our data and baselines are available at .", "keyphrases": ["ambiguity", "open-domain question", "ambigqa"]} +{"id": "fiszman-etal-2007-interpreting", "title": "Interpreting comparative constructions in biomedical text", "abstract": "We propose a methodology using underspecified semantic interpretation to process comparative constructions in MEDLINE citations, concentrating on two structures that are prevalent in the research literature reporting on clinical trials for drug therapies. The method exploits an existing semantic processor, SemRep, which constructs predications based on the Unified Medical Language System. Results of a preliminary evaluation were recall of 70%, precision of 96%, and F-score of 81%. We discuss the generalization of the methodology to other entities such as therapeutic and diagnostic procedures. The available structures in computable format are potentially useful for interpreting outcome statements in MEDLINE citations.", "keyphrases": ["comparative construction", "biomedical text", "interpretation"]} +{"id": "dyer-etal-2008-generalizing", "title": "Generalizing Word Lattice Translation", "abstract": "Word lattice decoding has proven useful in spoken language translation; we argue that it provides a compelling model for translation of text genres, as well. We show that prior work in translating lattices using finite state techniques can be naturally extended to more expressive synchronous context-free grammarbased models. Additionally, we resolve a significant complication that non-linear word lattice inputs introduce in reordering models. Our experiments evaluating the approach demonstrate substantial gains for ChineseEnglish and Arabic-English translation.", "keyphrases": ["lattice", "segmentation", "source sentence", "value"]} +{"id": "tang-etal-2015-learning", "title": "Learning Semantic Representations of Users and Products for Document Level Sentiment Classification", "abstract": "Neural network methods have achieved promising results for sentiment classification of text. However, these models only use semantics of texts, while ignoring users who express the sentiment and products which are evaluated, both of which have great influences on interpreting the sentiment of text. In this paper, we address this issue by incorporating userand productlevel information into a neural network approach for document level sentiment classification. Users and products are modeled using vector space models, the representations of which capture important global clues such as individual preferences of users or overall qualities of products. Such global evidence in turn facilitates embedding learning procedure at document level, yielding better text representations. By combining evidence at user-, productand documentlevel in a unified neural framework, the proposed model achieves state-of-the-art performances on IMDB and Yelp datasets1.", "keyphrases": ["product", "sentiment classification", "attention model"]} +{"id": "zmigrod-etal-2019-counterfactual", "title": "Counterfactual Data Augmentation for Mitigating Gender Stereotypes in Languages with Rich Morphology", "abstract": "Gender stereotypes are manifest in most of the world's languages and are consequently propagated or amplified by NLP systems. Although research has focused on mitigating gender stereotypes in English, the approaches that are commonly employed produce ungrammatical sentences in morphologically rich languages. We present a novel approach for converting between masculine-inflected and feminine-inflected sentences in such languages. For Spanish and Hebrew, our approach achieves F1 scores of 82% and 73% at the level of tags and accuracies of 90% and 87% at the level of forms. By evaluating our approach using four different languages, we show that, on average, it reduces gender stereotyping by a factor of 2.5 without any sacrifice to grammaticality.", "keyphrases": ["gender", "rich language", "feminine-inflected sentence", "counterfactual data augmentation"]} +{"id": "samardzic-etal-2016-archimob", "title": "ArchiMob - A Corpus of Spoken Swiss German", "abstract": "Swiss dialects of German are, unlike most dialects of well standardised languages, widely used in everyday communication. Despite this fact, automatic processing of Swiss German is still a considerable challenge due to the fact that it is mostly a spoken variety rarely recorded and that it is subject to considerable regional variation. This paper presents a freely available general-purpose corpus of spoken Swiss German suitable for linguistic research, but also for training automatic tools. The corpus is a result of a long design process, intensive manual work and specially adapted computational processing. We first describe how the documents were transcribed, segmented and aligned with the sound source, and how inconsistent transcriptions were unified through an additional normalisation layer. We then present a bootstrapping approach to automatic normalisation using different machine-translation-inspired methods. Furthermore, we evaluate the performance of part-of-speech taggers on our data and show how the same bootstrapping approach improves part-of-speech tagging by 10% over four rounds. Finally, we present the modalities of access of the corpus as well as the data format.", "keyphrases": ["dialect", "archimob", "normalization"]} +{"id": "wang-etal-2020-pyramid", "title": "Pyramid: A Layered Model for Nested Named Entity Recognition", "abstract": "This paper presents Pyramid, a novel layered model for Nested Named Entity Recognition (nested NER). In our approach, token or text region embeddings are recursively inputted into L flat NER layers, from bottom to top, stacked in a pyramid shape. Each time an embedding passes through a layer of the pyramid, its length is reduced by one. Its hidden state at layer l represents an l-gram in the input text, which is labeled only if its corresponding text region represents a complete entity mention. We also design an inverse pyramid to allow bidirectional interaction between layers. The proposed method achieves state-of-the-art F1 scores in nested NER on ACE-2004, ACE-2005, GENIA, and NNE, which are 80.27, 79.42, 77.78, and 93.70 with conventional embeddings, and 87.74, 86.34, 79.31, and 94.68 with pre-trained contextualized embeddings. In addition, our model can be used for the more general task of Overlapping Named Entity Recognition. A preliminary experiment confirms the effectiveness of our method in overlapping NER.", "keyphrases": ["flat ner layer", "entity mention", "pyramid"]} +{"id": "xu-etal-2019-scaling", "title": "Scaling up Open Tagging from Tens to Thousands: Comprehension Empowered Attribute Value Extraction from Product Title", "abstract": "Supplementing product information by extracting attribute values from title is a crucial task in e-Commerce domain. Previous studies treat each attribute only as an entity type and build one set of NER tags (e.g., BIO) for each of them, leading to a scalability issue which unfits to the large sized attribute system in real world e-Commerce. In this work, we propose a novel approach to support value extraction scaling up to thousands of attributes without losing performance: (1) We propose to regard attribute as a query and adopt only one global set of BIO tags for any attributes to reduce the burden of attribute tag or model explosion; (2) We explicitly model the semantic representations for attribute and title, and develop an attention mechanism to capture the interactive semantic relations in-between to enforce our framework to be attribute comprehensive. We conduct extensive experiments in real-life datasets. The results show that our model not only outperforms existing state-of-the-art NER tagging models, but also is robust and generates promising results for up to 8,906 attributes.", "keyphrases": ["attribute", "product title", "e-commerce domain", "bio tag"]} +{"id": "kumar-etal-2019-reinforcement", "title": "Reinforcement Learning based Curriculum Optimization for Neural Machine Translation", "abstract": "We consider the problem of making efficient use of heterogeneous training data in neural machine translation (NMT). Specifically, given a training dataset with a sentence-level feature such as noise, we seek an optimal curriculum, or order for presenting examples to the system during training. Our curriculum framework allows examples to appear an arbitrary number of times, and thus generalizes data weighting, filtering, and fine-tuning schemes. Rather than relying on prior knowledge to design a curriculum, we use reinforcement learning to learn one automatically, jointly with the NMT system, in the course of a single training run. We show that this approach can beat uniform baselines on Paracrawl and WMT English-to-French datasets by +3.4 and +1.3 BLEU respectively. Additionally, we match the performance of strong filtering baselines and hand-designed, state-of-the-art curricula.", "keyphrases": ["curriculum", "neural machine translation", "reinforcement learning", "model training"]} +{"id": "mohammad-kiritchenko-2018-understanding", "title": "Understanding Emotions: A Dataset of Tweets to Study Interactions between Affect Categories", "abstract": "Human emotions are complex and nuanced. Yet, an overwhelming majority of the work in automatically detecting emotions from text has focused only on classifying text into positive, negative, and neutral classes, and a much smaller amount on classifying text into basic emotion categories such as joy, sadness, and fear. Our goal is to create a single textual dataset that is annotated for many emotion (or affect) dimensions (from both the basic emotion model and the VAD model). For each emotion dimension, we annotate the data for not just coarse classes (such as anger or no anger) but also for fine-grained real-valued scores indicating the intensity of emotion (anger, sadness, valence, etc.). We use Best\u2013Worst Scaling (BWS) to address the limitations of traditional rating scale methods such as interand intra-annotator inconsistency by employing comparative annotations. We show that the fine-grained intensity scores thus obtained are reliable (repeat annotations lead to similar scores). We choose Twitter as the source of the textual data we annotate because tweets are self-contained, widely used, public posts, and tend to be rich in emotions. The new dataset is useful for training and testing supervised machine learning algorithms for multi-label emotion classification, emotion intensity regression, detecting valence, detecting ordinal class of intensity of emotion (slightly sad, very angry, etc.), and detecting ordinal class of valence (or sentiment). We make the data available for the recent SemEval-2018 Task 1: Affect in Tweets, which explores these five tasks. The dataset also sheds light on crucial research questions such as: which emotions often present together in tweets?; how do the intensities of the three negative emotions relate to each other?; and how do the intensities of the basic emotions relate to valence?", "keyphrases": ["emotion", "tweets", "dimension"]} +{"id": "bjerva-augenstein-2018-phonology", "title": "From Phonology to Syntax: Unsupervised Linguistic Typology at Different Levels with Language Embeddings", "abstract": "A core part of linguistic typology is the classification of languages according to linguistic properties, such as those detailed in the World Atlas of Language Structure (WALS). Doing this manually is prohibitively time-consuming, which is in part evidenced by the fact that only 100 out of over 7,000 languages spoken in the world are fully covered in WALS. We learn distributed language representations, which can be used to predict typological properties on a massively multilingual scale. Additionally, quantitative and qualitative analyses of these language embeddings can tell us how language similarities are encoded in NLP models for tasks at different typological levels. The representations are learned in an unsupervised manner alongside tasks at three typological levels: phonology (grapheme-to-phoneme prediction, and phoneme reconstruction), morphology (morphological inflection), and syntax (part-of-speech tagging). We consider more than 800 languages and find significant differences in the language representations encoded, depending on the target task. For instance, although Norwegian Bokm\u00e5l and Danish are typologically close to one another, they are phonologically distant, which is reflected in their language embeddings growing relatively distant in a phonological task. We are also able to predict typological features in WALS with high accuracies, even for unseen language families.", "keyphrases": ["phonology", "syntax", "language embedding"]} +{"id": "xu-etal-2018-unpaired", "title": "Unpaired Sentiment-to-Sentiment Translation: A Cycled Reinforcement Learning Approach", "abstract": "The goal of sentiment-to-sentiment \u201ctranslation\u201d is to change the underlying sentiment of a sentence while keeping its content. The main challenge is the lack of parallel data. To solve this problem, we propose a cycled reinforcement learning method that enables training on unpaired data by collaboration between a neutralization module and an emotionalization module. We evaluate our approach on two review datasets, Yelp and Amazon. Experimental results show that our approach significantly outperforms the state-of-the-art systems. Especially, the proposed method substantially improves the content preservation performance. The BLEU score is improved from 1.64 to 22.46 and from 0.56 to 14.06 on the two datasets, respectively.", "keyphrases": ["reinforcement learning", "style", "sentiment transfer"]} +{"id": "xu-etal-2016-question", "title": "Question Answering on Freebase via Relation Extraction and Textual Evidence", "abstract": "Existing knowledge-based question answering systems often rely on small annotated training data. While shallow methods like relation extraction are robust to data scarcity, they are less expressive than the deep meaning representation methods like semantic parsing, thereby failing at answering questions involving multiple constraints. Here we alleviate this problem by empowering a relation extraction method with additional evidence from Wikipedia. We first present a neural network based relation extractor to retrieve the candidate answers from Freebase, and then infer over Wikipedia to validate these answers. Experiments on the WebQuestions question answering dataset show that our method achieves an F_1 of 53.3%, a substantial improvement over the state-of-the-art.", "keyphrases": ["relation extraction", "wikipedia", "candidate answer"]} +{"id": "ghanimifard-dobnik-2019-goes", "title": "What goes into a word: generating image descriptions with top-down spatial knowledge", "abstract": "Generating grounded image descriptions requires associating linguistic units with their corresponding visual clues. A common method is to train a decoder language model with attention mechanism over convolutional visual features. Attention weights align the stratified visual features arranged by their location with tokens, most commonly words, in the target description. However, words such as spatial relations (e.g. next to and under) are not directly referring to geometric arrangements of pixels but to complex geometric and conceptual representations. The aim of this paper is to evaluate what representations facilitate generating image descriptions with spatial relations and lead to better grounded language generation. In particular, we investigate the contribution of three different representational modalities in generating relational referring expressions: (i) pre-trained convolutional visual features, (ii) different top-down geometric relational knowledge between objects, and (iii) world knowledge captured by contextual embeddings in language models.", "keyphrases": ["image description", "spatial knowledge", "language model"]} +{"id": "jiang-etal-2008-cascaded", "title": "A Cascaded Linear Model for Joint Chinese Word Segmentation and Part-of-Speech Tagging", "abstract": "We propose a cascaded linear model for joint Chinese word segmentation and partof-speech tagging. With a character-based perceptron as the core, combined with realvalued features such as language models, the cascaded model is able to efficiently utilize knowledge sources that are inconvenient to incorporate into the perceptron directly. Experiments show that the cascaded model achieves improved accuracies on both segmentation only and joint segmentation and part-of-speech tagging. On the Penn Chinese Treebank 5.0, we obtain an error reduction of 18.5% on segmentation and 12% on joint segmentation and part-of-speech tagging over the perceptron-only baseline.", "keyphrases": ["linear model", "part-of-speech tagging", "joint segmentation"]} +{"id": "cui-etal-2021-template", "title": "Template-Based Named Entity Recognition Using BART", "abstract": "There is a recent interest in investigating few-shot NER, where the low-resource target domain has different label sets compared with a resource-rich source domain. Existing methods use a similarity-based metric. However, they cannot make full use of knowledge transfer in NER model parameters. To address the issue, we propose a template-based method for NER, treating NER as a language model ranking problem in a sequence-to-sequence framework, where original sentences and statement templates filled by candidate named entity span are regarded as the source sequence and the target sequence, respectively. For inference, the model is required to classify each candidate span based on the corresponding template scores. Our experiments demonstrate that the proposed method achieves 92.55% F1 score on the CoNLL03 (rich-resource task), and significantly better than fine-tuning BERT 10.88%, 15.34%, and 11.73% F1 score on the MIT Movie, the MIT Restaurant, and the ATIS (low-resource task), respectively.", "keyphrases": ["entity recognition", "bart", "template-based method", "language model"]} +{"id": "dredze-etal-2010-entity", "title": "Entity Disambiguation for Knowledge Base Population", "abstract": "The integration of facts derived from information extraction systems into existing knowledge bases requires a system to disambiguate entity mentions in the text. This is challenging due to issues such as non-uniform variations in entity names, mention ambiguity, and entities absent from a knowledge base. We present a state of the art system for entity disambiguation that not only addresses these challenges but also scales to knowledge bases with several million entries using very little resources. Further, our approach achieves performance of up to 95% on entities mentioned from newswire and 80% on a public test set that was designed to include challenging queries.", "keyphrases": ["knowledge base", "entity disambiguation", "wikipedia entry", "ranking problem", "large number"]} +{"id": "beilharz-etal-2020-librivoxdeen", "title": "LibriVoxDeEn: A Corpus for German-to-English Speech Translation and German Speech Recognition", "abstract": "We present a corpus of sentence-aligned triples of German audio, German text, and English translation, based on German audio books. The speech translation data consist of 110 hours of audio material aligned to over 50k parallel sentences. An even larger dataset comprising 547 hours of German speech aligned to German text is available for speech recognition. The audio data is read speech and thus low in disfluencies. The quality of audio and sentence alignments has been checked by a manual evaluation, showing that speech alignment quality is in general very high. The sentence alignment quality is comparable to well-used parallel translation data and can be adjusted by cutoffs on the automatic alignment score. To our knowledge, this corpus is to date the largest resource for German speech recognition and for end-to-end German-to-English speech translation.", "keyphrases": ["german-to-english speech translation", "speech recognition", "librivoxdeen"]} +{"id": "gur-etal-2018-dialsql", "title": "DialSQL: Dialogue Based Structured Query Generation", "abstract": "The recent advance in deep learning and semantic parsing has significantly improved the translation accuracy of natural language questions to structured queries. However, further improvement of the existing approaches turns out to be quite challenging. Rather than solely relying on algorithmic innovations, in this work, we introduce DialSQL, a dialogue-based structured query generation framework that leverages human intelligence to boost the performance of existing algorithms via user interaction. DialSQL is capable of identifying potential errors in a generated SQL query and asking users for validation via simple multi-choice questions. User feedback is then leveraged to revise the query. We design a generic simulator to bootstrap synthetic training dialogues and evaluate the performance of DialSQL on the WikiSQL dataset. Using SQLNet as a black box query generation tool, DialSQL improves its performance from 61.3% to 69.0% using only 2.4 validation questions per dialogue.", "keyphrases": ["query", "semantic parsing", "dialsql"]} +{"id": "hu-etal-2019-domain-adaptation", "title": "Domain Adaptation of Neural Machine Translation by Lexicon Induction", "abstract": "It has been previously noted that neural machine translation (NMT) is very sensitive to domain shift. In this paper, we argue that this is a dual effect of the highly lexicalized nature of NMT, resulting in failure for sentences with large numbers of unknown words, and lack of supervision for domain-specific words. To remedy this problem, we propose an unsupervised adaptation method which fine-tunes a pre-trained out-of-domain NMT model using a pseudo-in-domain corpus. Specifically, we perform lexicon induction to extract an in-domain lexicon, and construct a pseudo-parallel in-domain corpus by performing word-for-word back-translation of monolingual in-domain target sentences. In five domains over twenty pairwise adaptation settings and two model architectures, our method achieves consistent improvements without using any in-domain parallel sentences, improving up to 14 BLEU over unadapted models, and up to 2 BLEU over strong back-translation baselines.", "keyphrases": ["neural machine translation", "lexicon induction", "adaptation method", "pseudo-parallel in-domain corpus", "back-translation"]} +{"id": "culotta-etal-2007-first", "title": "First-Order Probabilistic Models for Coreference Resolution", "abstract": "Traditional noun phrase coreference resolution systems represent features only of pairs of noun phrases. In this paper, we propose a machine learning method that enables features over sets of noun phrases, resulting in a first-order probabilistic model for coreference. We outline a set of approximations that make this approach practical, and apply our method to the ACE coreference dataset, achieving a 45% error reduction over a comparable method that only considers features of pairs of noun phrases. This result demonstrates an example of how a firstorder logic representation can be incorporated into a probabilistic model and scaled efficiently.", "keyphrases": ["coreference resolution", "mention", "first-order probabilistic model", "cluster"]} +{"id": "ritter-etal-2013-modeling", "title": "Modeling Missing Data in Distant Supervision for Information Extraction", "abstract": "Distant supervision algorithms learn information extraction models given only large readily available databases and text collections. Most previous work has used heuristics for generating labeled data, for example assuming that facts not contained in the database are not mentioned in the text, and facts in the database must be mentioned at least once. In this paper, we propose a new latent-variable approach that models missing data. This provides a natural way to incorporate side information, for instance modeling the intuition that text will often mention rare entities which are likely to be missing in the database. Despite the added complexity introduced by reasoning about missing data, we demonstrate that a carefully designed local search approach to inference is very accurate and scales to large datasets. Experiments demonstrate improved performance for binary and unary relation extraction when compared to learning with heuristic labels, including on average a 27% increase in area under the precision recall curve in the binary case.", "keyphrases": ["distant supervision", "relation extraction", "knowledge base", "graphical model"]} +{"id": "dong-zhang-2016-automatic", "title": "Automatic Features for Essay Scoring \u2013 An Empirical Study", "abstract": "Essay scoring is a complicated processing requiring analyzing, summarizing and judging expertise. Traditional work on essay scoring focused on automatic handcrafted features, which are expensive yet sparse. Neural models offer a way to learn syntactic and semantic features automatically, which can potentially improve upon discrete features. In this paper, we employ convolutional neural network (CNN) for the effect of automatically learning features, and compare the result with the state-of-art discrete baselines. For in-domain and domain-adaptation essay scoring tasks, our neural model empirically outperforms discrete models.", "keyphrases": ["essay scoring", "automatic feature", "sentence representation"]} +{"id": "guu-etal-2015-traversing", "title": "Traversing Knowledge Graphs in Vector Space", "abstract": "Path queries on a knowledge graph can be used to answer compositional questions such as \"What languages are spoken by people living in Lisbon?\". However, knowledge graphs often have missing facts (edges) which disrupts path queries. Recent models for knowledge base completion impute missing facts by embedding knowledge graphs in vector spaces. We show that these models can be recursively applied to answer path queries, but that they suffer from cascading errors. This motivates a new \"compositional\" training objective, which dramatically improves all models' ability to answer path queries, in some cases more than doubling accuracy. On a standard knowledge base completion task, we also demonstrate that compositional training acts as a novel form of structural regularization, reliably improving performance across all base models (reducing errors by up to 43%) and achieving new state-of-the-art results.", "keyphrases": ["knowledge graph", "vector space", "path", "reasoning", "entity pair"]} +{"id": "yamangil-nelken-2008-mining", "title": "Mining Wikipedia Revision Histories for Improving Sentence Compression", "abstract": "A well-recognized limitation of research on supervised sentence compression is the dearth of available training data. We propose a new and bountiful resource for such training data, which we obtain by mining the revision history of Wikipedia for sentence compressions and expansions. Using only a fraction of the available Wikipedia data, we have collected a training corpus of over 380,000 sentence pairs, two orders of magnitude larger than the standardly used Ziff-Davis corpus. Using this new-found data, we propose a novel lexicalized noisy channel model for sentence compression, achieving improved results in grammaticality and compression rate criteria with a slight decrease in importance.", "keyphrases": ["wikipedia", "revision history", "sentence compression"]} +{"id": "belinkov-etal-2019-adversarial", "title": "On Adversarial Removal of Hypothesis-only Bias in Natural Language Inference", "abstract": "Popular Natural Language Inference (NLI) datasets have been shown to be tainted by hypothesis-only biases. Adversarial learning may help models ignore sensitive biases and spurious correlations in data. We evaluate whether adversarial learning can be used in NLI to encourage models to learn representations free of hypothesis-only biases. Our analyses indicate that the representations learned via adversarial learning may be less biased, with only small drops in NLI accuracy.", "keyphrases": ["hypothesis-only bias", "natural language inference", "nli", "adversarial training"]} +{"id": "zhou-etal-2016-deep", "title": "Deep Recurrent Models with Fast-Forward Connections for Neural Machine Translation", "abstract": "Neural machine translation (NMT) aims at solving machine translation (MT) problems using neural networks and has exhibited promising results in recent years. However, most of the existing NMT models are shallow and there is still a performance gap between a single NMT model and the best conventional MT system. In this work, we introduce a new type of linear connections, named fast-forward connections, based on deep Long Short-Term Memory (LSTM) networks, and an interleaved bi-directional architecture for stacking the LSTM layers. Fast-forward connections play an essential role in propagating the gradients and building a deep topology of depth 16. On the WMT'14 English-to-French task, we achieve BLEU=37.7 with a single attention model, which outperforms the corresponding single shallow model by 6.2 BLEU points. This is the first time that a single NMT model achieves state-of-the-art performance and outperforms the best conventional model by 0.7 BLEU points. We can still achieve BLEU=36.3 even without using an attention mechanism. After special handling of unknown words and model ensembling, we obtain the best score reported to date on this task with BLEU=40.4. Our models are also validated on the more difficult WMT'14 English-to-German task.", "keyphrases": ["fast-forward connection", "connection", "neural machine translation", "attention model"]} +{"id": "lakew-etal-2018-transfer", "title": "Transfer Learning in Multilingual Neural Machine Translation with Dynamic Vocabulary", "abstract": "We propose a method to transfer knowledge across neural machine translation (NMT) models by means of a shared dynamic vocabulary. Our approach allows to extend an initial model for a given language pair to cover new languages by adapting its vocabulary as long as new data become available (i.e., introducing new vocabulary items if they are not included in the initial model). The parameter transfer mechanism is evaluated in two scenarios: i) to adapt a trained single language NMT system to work with a new language pair and ii) to continuously add new language pairs to grow to a multilingual NMT system. In both the scenarios our goal is to improve the translation performance, while minimizing the training convergence time. Preliminary experiments spanning five languages with different training data sizes (i.e., 5k and 50k parallel sentences) show a significant performance gain ranging from +3.85 up to +13.63 BLEU in different language directions. Moreover, when compared with training an NMT model from scratch, our transfer-learning approach allows us to reach higher performance after training up to 4% of the total training steps.", "keyphrases": ["neural machine translation", "dynamic vocabulary", "transfer learning", "low-resource language pair"]} +{"id": "graham-etal-2013-continuous", "title": "Continuous Measurement Scales in Human Evaluation of Machine Translation", "abstract": "We explore the use of continuous rating scales for human evaluation in the context of machine translation evaluation, comparing two assessor-intrinsic qualitycontrol techniques that do not rely on agreement with expert judgments. Experiments employing Amazon\u2019s Mechanical Turk service show that quality-control techniques made possible by the use of the continuous scale show dramatic improvements to intra-annotator agreement of up to +0.101 in the kappa coefficient, with inter-annotator agreement increasing by up to+0.144 when additional standardization of scores is applied.", "keyphrases": ["human evaluation", "continuous scale", "inter-annotator agreement", "direct assessment"]} +{"id": "piad-morffis-etal-2019-general", "title": "A General-Purpose Annotation Model for Knowledge Discovery: Case Study in Spanish Clinical Text", "abstract": "Knowledge discovery from text in natural language is a task usually aided by the manual construction of annotated corpora. Specifically in the clinical domain, several annotation models are used depending on the characteristics of the task to solve (e.g., named entity recognition, relation extraction, etc.). However, few general-purpose annotation models exist, that can support a broad range of knowledge extraction tasks. This paper presents an annotation model designed to capture a large portion of the semantics of natural language text. The structure of the annotation model is presented, with examples of annotated sentences and a brief description of each semantic role and relation defined. This research focuses on an application to clinical texts in the Spanish language. Nevertheless, the presented annotation model is extensible to other domains and languages. An example of annotated sentences, guidelines, and suitable configuration files for an annotation tool are also provided for the research community.", "keyphrases": ["annotation model", "knowledge discovery", "domain independence"]} +{"id": "jauhar-etal-2016-tables", "title": "Tables as Semi-structured Knowledge for Question Answering", "abstract": "Question answering requires access to a knowledge base to check facts and reason about information. Knowledge in the form of natural language text is easy to acquire, but difficult for automated reasoning. Highly-structured knowledge bases can facilitate reasoning, but are difficult to acquire. In this paper we explore tables as a semi-structured formalism that provides a balanced compromise to this trade-off. We first use the structure of tables to guide the construction of a dataset of over 9000 multiple-choice questions with rich alignment annotations, easily and efficiently via crowd-sourcing. We then use this annotated data to train a semi-structured feature-driven model for question answering that uses tables as a knowledge base. In benchmark evaluations, we significantly outperform both a strong unstructured retrieval baseline and a highly structured Markov Logic Network model.", "keyphrases": ["question answering", "table", "highly-structured knowledge basis"]} +{"id": "dorow-widdows-2003-discovering", "title": "Discovering Corpus-Specific Word Senses", "abstract": "This paper presents an unsupervised algorithm which automatically discovers word senses from text. The algorithm is based on a graph model representing words and relationships between them. Sense clusters are iteratively computed by clustering the local graph of similar words around an ambiguous word. Discrimination against previously extracted sense clusters enables us to discover new senses. We use the same data for both recognising and resolving ambiguity.", "keyphrases": ["word sense", "co-occurrence graph", "subgraph"]} +{"id": "gupta-etal-2015-distributional", "title": "Distributional vectors encode referential attributes", "abstract": "Distributional methods have proven to excel at capturing fuzzy, graded aspects of meaning (Italy is more similar to Spain than to Germany). In contrast, it is difficult to extract the values of more specific attributes of word referents from distributional representations, attributes of the kind typically found in structured knowledge bases (Italy has 60 million inhabitants). In this paper, we pursue the hypothesis that distributional vectors also implicitly encode referential attributes. We show that a standard supervised regression model is in fact sufficient to retrieve such attributes to a reasonable degree of accuracy: When evaluated on the prediction of both categorical and numeric attributes of countries and cities, the model consistently reduces baseline error by 30%, and is not far from the upper bound. Further analysis suggests that our model is able to \u201cobjectify\u201d distributional representations for entities, anchoring them more firmly in the external world in measurable ways.", "keyphrases": ["attribute", "regression model", "country", "word embedding", "knowledge basis"]} +{"id": "gong-etal-2011-cache", "title": "Cache-based Document-level Statistical Machine Translation", "abstract": "Statistical machine translation systems are usually trained on a large amount of bilingual sentence pairs and translate one sentence at a time, ignoring document-level information. In this paper, we propose a cache-based approach to document-level translation. Since caches mainly depend on relevant data to supervise subsequent decisions, it is critical to fill the caches with highly-relevant data of a reasonable size. In this paper, we present three kinds of caches to store relevant document-level information: 1) a dynamic cache, which stores bilingual phrase pairs from the best translation hypotheses of previous sentences in the test document; 2) a static cache, which stores relevant bilingual phrase pairs extracted from similar bilingual document pairs (i.e. source documents similar to the test document and their corresponding target documents) in the training parallel corpus; 3) a topic cache, which stores the target-side topic words related with the test document in the source-side. In particular, three new features are designed to explore various kinds of document-level information in above three kinds of caches. Evaluation shows the effectiveness of our cache-based approach to document-level translation with the performance improvement of 0.81 in BLUE score over Moses. Especially, detailed analysis and discussion are presented to give new insights to document-level translation.", "keyphrases": ["statistical machine translation", "document-level information", "cache", "topic cache", "consistency"]} +{"id": "brody-lapata-2009-bayesian", "title": "Bayesian Word Sense Induction", "abstract": "Sense induction seeks to automatically identify word senses directly from a corpus. A key assumption underlying previous work is that the context surrounding an ambiguous word is indicative of its meaning. Sense induction is thus typically viewed as an unsupervised clustering problem where the aim is to partition a word's contexts into different classes, each representing a word sense. Our work places sense induction in a Bayesian context by modeling the contexts of the ambiguous word as samples from a multinomial distribution over senses which are in turn characterized as distributions over words. The Bayesian framework provides a principled way to incorporate a wide range of features beyond lexical co-occurrences and to systematically assess their utility on the sense induction task. The proposed approach yields improvements over state-of-the-art systems on a benchmark dataset.", "keyphrases": ["ambiguous word", "wsi", "latent dirichlet allocation", "lda model", "bayesian approach"]} +{"id": "sennrich-etal-2016-edinburgh", "title": "Edinburgh Neural Machine Translation Systems for WMT 16", "abstract": "We participated in the WMT 2016 shared news translation task by building neural translation systems for four language pairs, each trained in both directions: English Czech, English German, English Romanian and English Russian. Our systems are based on an attentional encoder-decoder, using BPE subword segmentation for open-vocabulary translation with a fixed vocabulary. We experimented with using automatic back-translations of the monolingual News corpus as additional training data, pervasive dropout, and target-bidirectional models. All reported methods give substantial improvements, and we see improvements of 4.3--11.2 BLEU over our baseline systems. In the human evaluation, our systems were the (tied) best constrained system for 7 out of 8 translation directions in which we participated.", "keyphrases": ["machine translation", "wmt", "direction"]} +{"id": "deng-etal-2021-htcinfomax", "title": "HTCInfoMax: A Global Model for Hierarchical Text Classification via Information Maximization", "abstract": "The current state-of-the-art model HiAGM for hierarchical text classification has two limitations. First, it correlates each text sample with all labels in the dataset which contains irrelevant information. Second, it does not consider any statistical constraint on the label representations learned by the structure encoder, while constraints for representation learning are proved to be helpful in previous work. In this paper, we propose HTCInfoMax to address these issues by introducing information maximization which includes two modules: text-label mutual information maximization and label prior matching. The first module can model the interaction between each text sample and its ground truth labels explicitly which filters out irrelevant information. The second one encourages the structure encoder to learn better representations with desired characteristics for all labels which can better handle label imbalance in hierarchical text classification. Experimental results on two benchmark datasets demonstrate the effectiveness of the proposed HTCInfoMax.", "keyphrases": ["hierarchical text classification", "information maximization", "htcinfomax"]} +{"id": "card-etal-2018-neural", "title": "Neural Models for Documents with Metadata", "abstract": "Most real-world document collections involve various types of metadata, such as author, source, and date, and yet the most commonly-used approaches to modeling text corpora ignore this information. While specialized models have been developed for particular applications, few are widely used in practice, as customization typically requires derivation of a custom inference algorithm. In this paper, we build on recent advances in variational inference methods and propose a general neural framework, based on topic models, to enable flexible incorporation of metadata and allow for rapid exploration of alternative models. Our approach achieves strong performance, with a manageable tradeoff between perplexity, coherence, and sparsity. Finally, we demonstrate the potential of our framework through an exploration of a corpus of articles about US immigration.", "keyphrases": ["metadata", "neural framework", "topic model", "scholar"]} +{"id": "maier-etal-2012-annotating", "title": "Annotating Coordination in the Penn Treebank", "abstract": "Finding coordinations provides useful information for many NLP endeavors. However, the task has not received much attention in the literature. A major reason for that is that the annotation of major treebanks does not reliably annotate coordination. This makes it virtually impossible to detect coordinations in which two conjuncts are separated by punctuation rather than by a coordinating conjunction. In this paper, we present an annotation scheme for the Penn Treebank which introduces a distinction between coordinating from non-coordinating punctuation. We discuss the general annotation guidelines as well as problematic cases. Eventually, we show that this additional annotation allows the retrieval of a considerable number of coordinate structures beyond the ones having a coordinating conjunction.", "keyphrases": ["coordination", "penn treebank", "punctuation"]} +{"id": "sun-etal-2019-hierarchical", "title": "Hierarchical Attention Prototypical Networks for Few-Shot Text Classification", "abstract": "Most of the current effective methods for text classification tasks are based on large-scale labeled data and a great number of parameters, but when the supervised training data are few and difficult to be collected, these models are not available. In this work, we propose a hierarchical attention prototypical networks (HAPN) for few-shot text classification. We design the feature level, word level, and instance level multi cross attention for our model to enhance the expressive ability of semantic space, so it can highlight or weaken the importance of the features, words, and instances separately. We verify the effectiveness of our model on two standard benchmark few-shot text classification datasets\u2014FewRel and CSID, and achieve the state-of-the-art performance. The visualization of hierarchical attention layers illustrates that our model can capture more important features, words, and instances. In addition, our attention mechanism increases support set augmentability and accelerates convergence speed in the training stage.", "keyphrases": ["prototypical network", "text classification", "hapn", "semantic space", "hierarchical attention"]} +{"id": "yang-etal-2021-document", "title": "Document-level Event Extraction via Parallel Prediction Networks", "abstract": "Document-level event extraction (DEE) is indispensable when events are described throughout a document. We argue that sentence-level extractors are ill-suited to the DEE task where event arguments always scatter across sentences and multiple events may co-exist in a document. It is a challenging task because it requires a holistic understanding of the document and an aggregated ability to assemble arguments across multiple sentences. In this paper, we propose an end-to-end model, which can extract structured events from a document in a parallel manner. Specifically, we first introduce a document-level encoder to obtain the document-aware representations. Then, a multi-granularity non-autoregressive decoder is used to generate events in parallel. Finally, to train the entire model, a matching loss function is proposed, which can bootstrap a global optimization. The empirical results on the widely used DEE dataset show that our approach significantly outperforms current state-of-the-art methods in the challenging DEE task. Code will be available at .", "keyphrases": ["parallel prediction network", "dee", "document-aware representation", "document-level event extraction"]} +{"id": "firat-etal-2016-zero", "title": "Zero-Resource Translation with Multi-Lingual Neural Machine Translation", "abstract": "In this paper, we propose a novel finetuning algorithm for the recently introduced multi-way, mulitlingual neural machine translate that enables zero-resource machine translation. When used together with novel many-to-one translation strategies, we empirically show that this finetuning algorithm allows the multi-way, multilingual model to translate a zero-resource language pair (1) as well as a single-pair neural translation model trained with up to 1M direct parallel sentences of the same language pair and (2) better than pivot-based translation strategy, while keeping only one additional copy of attention-related parameters.", "keyphrases": ["neural machine translation", "multilingual model", "zero-resource translation", "zero-shot language pair", "fine-tuning"]} +{"id": "wang-etal-2020-rat", "title": "RAT-SQL: Relation-Aware Schema Encoding and Linking for Text-to-SQL Parsers", "abstract": "When translating natural language questions into SQL queries to answer questions from a database, contemporary semantic parsing models struggle to generalize to unseen database schemas. The generalization challenge lies in (a) encoding the database relations in an accessible way for the semantic parser, and (b) modeling alignment between database columns and their mentions in a given query. We present a unified framework, based on the relation-aware self-attention mechanism, to address schema encoding, schema linking, and feature representation within a text-to-SQL encoder. On the challenging Spider dataset this framework boosts the exact match accuracy to 57.2%, surpassing its best counterparts by 8.7% absolute improvement. Further augmented with BERT, it achieves the new state-of-the-art performance of 65.6% on the Spider leaderboard. In addition, we observe qualitative improvements in the model's understanding of schema linking and alignment. Our implementation will be open-sourced at .", "keyphrases": ["schema encoding", "text-to-sql parser", "relation-aware transformer"]} +{"id": "litkowski-2004-senseval", "title": "Senseval-3 task: Automatic labeling of semantic roles", "abstract": "The SENSEVAL-3 task to perform automatic labeling of semantic roles was designed to encourage research into and use of the FrameNet dataset. The task was based on the considerable expansion of the FrameNet data since the baseline study of automatic labeling of semantic roles by Gildea and Jurafsky. The FrameNet data provide an extensive body of \u201cgold standard\u201d data that can be used in lexical semantics research, as the basis for its further exploitation in NLP applications. Eight teams participated in the task, with a total of 20 runs. Discussions among participants during development of the task and the scoring of their runs contributed to a successful task. Participants used a wide variety of techniques, investigating many aspects of the FrameNet data. They achieved results showing considerable improvements from Gildea and Jurafsky\u2019s baseline study. Importantly, their efforts have contributed considerably to making the complex FrameNet dataset more accessible. They have amply demonstrated that FrameNet is a substantial lexical resource that will permit extensive further research and exploitation in NLP applications in the future.", "keyphrases": ["labeling", "semantic role", "senseval-3 task"]} +{"id": "hosseini-etal-2019-duality", "title": "Duality of Link Prediction and Entailment Graph Induction", "abstract": "Link prediction and entailment graph induction are often treated as different problems. In this paper, we show that these two problems are actually complementary. We train a link prediction model on a knowledge graph of assertions extracted from raw text. We propose an entailment score that exploits the new facts discovered by the link prediction model, and then form entailment graphs between relations. We further use the learned entailments to predict improved link prediction scores. Our results show that the two tasks can benefit from each other. The new entailment score outperforms prior state-of-the-art results on a standard entialment dataset and the new link prediction scores show improvements over the raw link prediction scores.", "keyphrases": ["link prediction", "entailment graph induction", "bipartite graph", "triple"]} +{"id": "bertoldi-federico-2009-domain", "title": "Domain Adaptation for Statistical Machine Translation with Monolingual Resources", "abstract": "Domain adaptation has recently gained interest in statistical machine translation to cope with the performance drop observed when testing conditions deviate from training conditions. The basic idea is that in-domain training data can be exploited to adapt all components of an already developed system. Previous work showed small performance gains by adapting from limited in-domain bilingual data. Here, we aim instead at significant performance gains by exploiting large but cheap monolingual in-domain data, either in the source or in the target language. We propose to synthesize a bilingual corpus by translating the monolingual adaptation data into the counterpart language. Investigations were conducted on a state-of-the-art phrase-based system trained on the Spanish--English part of the UN corpus, and adapted on the corresponding Europarl data. Translation, re-ordering, and language models were estimated after translating in-domain texts with the baseline. By optimizing the interpolation of these models on a development set the BLEU score was improved from 22.60% to 28.10% on a test set.", "keyphrases": ["bilingual corpus", "in-domain text", "domain adaptation", "pseudo"]} +{"id": "ding-riloff-2018-human", "title": "Human Needs Categorization of Affective Events Using Labeled and Unlabeled Data", "abstract": "We often talk about events that impact us positively or negatively. For example \u201cI got a job\u201d is good news, but \u201cI lost my job\u201d is bad news. When we discuss an event, we not only understand its affective polarity but also the reason why the event is beneficial or detrimental. For example, getting or losing a job has affective polarity primarily because it impacts us financially. Our work aims to categorize affective events based upon human need categories that often explain people's motivations and desires: PHYSIOLOGICAL, HEALTH, LEISURE, SOCIAL, FINANCIAL, COGNITION, and FREEDOM. We create classification models based on event expressions as well as models that use contexts surrounding event mentions. We also design a co-training model that learns from unlabeled data by simultaneously training event expression and event context classifiers in an iterative learning process. Our results show that co-training performs well, producing substantially better results than the individual classifiers.", "keyphrases": ["affective event", "unlabeled data", "human need category"]} +{"id": "springorum-etal-2012-automatic", "title": "Automatic classification of German an particle verbs", "abstract": "The current study works at the interface of theoretical and computational linguistics to explore the semantic properties of an particle verbs, i.e., German particle verbs with the particle an. Based on a thorough analysis of the particle verbs from a theoretical point of view, we identified empirical features and performed an automatic semantic classification. A focus of the study was on the mutual profit of theoretical and empirical perspectives with respect to salient semantic properties of the an particle verbs: (a) how can we transform the theoretical insights into empirical, corpus-based features, (b) to what extent can we replicate the theoretical classification by a machine learning approach, and (c) can the computational analysis in turn deepen our insights to the semantic properties of the particle verbs? The best classification result of 70% correct class assignments was reached through a GermaNet-based generalization of direct object nouns plus a prepositional phrase feature. These particle verb features in combination with a detailed analysis of the results at the same time confirmed and enlarged our knowledge about salient properties.", "keyphrases": ["german", "particle verb", "automatic classification"]} +{"id": "shimizu-nakagawa-2007-structural", "title": "Structural Correspondence Learning for Dependency Parsing", "abstract": "Following (Blitzer et al., 2006), we present an application of structural correspondence learning to non-projective dependency parsing (McDonald et al., 2005). To induce the correspondences among dependency edges from different domains, we looked at every two tokens in a sentence and examined whether or not there is a preposition, a determiner or a helping verb between them. Three binary linear classifiers were trained to predict the existence of a preposition, etc, on unlabeled data and we used singular value decomposition to induce new features. During the training, the parser was trained with these additional features in addition to these described in (McDonald et al., 2005). We discriminatively trained our parser in an on-line fashion using a variant of the voted perceptron (Collins, 2002; Collins and Roark, 2004; Crammer and Singer, 2003).", "keyphrases": ["dependency parsing", "preposition", "determiner"]} +{"id": "klein-nabi-2020-contrastive", "title": "Contrastive Self-Supervised Learning for Commonsense Reasoning", "abstract": "We propose a self-supervised method to solve Pronoun Disambiguation and Winograd Schema Challenge problems. Our approach exploits the characteristic structure of training corpora related to so-called \u201ctrigger\u201d words, which are responsible for flipping the answer in pronoun disambiguation. We achieve such commonsense reasoning by constructing pair-wise contrastive auxiliary predictions. To this end, we leverage a mutual exclusive loss regularized by a contrastive margin. Our architecture is based on the recently introduced transformer networks, BERT, that exhibits strong performance on many NLP benchmarks. Empirical results show that our method alleviates the limitation of current supervised approaches for commonsense reasoning. This study opens up avenues for exploiting inexpensive self-supervision to achieve performance gain in commonsense reasoning tasks.", "keyphrases": ["commonsense reasoning", "loss", "contrastive margin", "contrastive self-supervised learning"]} +{"id": "choi-etal-2006-joint", "title": "Joint Extraction of Entities and Relations for Opinion Recognition", "abstract": "We present an approach for the joint extraction of entities and relations in the context of opinion recognition and analysis. We identify two types of opinion-related entities --- expressions of opinions and sources of opinions --- along with the linking relation that exists between them. Inspired by Roth and Yih (2004), we employ an integer linear programming approach to solve the joint opinion recognition task, and show that global, constraint-based inference can significantly boost the performance of both relation extraction and the extraction of opinion-related entities. Performance further improves when a semantic role labeling system is incorporated. The resulting system achieves F-measures of 79 and 69 for entity and relation extraction, respectively, improving substantially over prior results in the area.", "keyphrases": ["opinion recognition", "integer linear programming", "joint extraction", "opinion holder extraction", "named-entity tagger"]} +{"id": "lee-etal-2011-stanfords", "title": "Stanford's Multi-Pass Sieve Coreference Resolution System at the CoNLL-2011 Shared Task", "abstract": "This paper details the coreference resolution system submitted by Stanford at the CoNLL-2011 shared task. Our system is a collection of deterministic coreference resolution models that incorporate lexical, syntactic, semantic, and discourse information. All these models use global document-level information by sharing mention attributes, such as gender and number, across mentions in the same cluster. We participated in both the open and closed tracks and submitted results using both predicted and gold mentions. Our system was ranked \ufb01rst in both tracks, with a score of 57.8 in the closed track and 58.3 in the open track.", "keyphrases": ["coreference resolution system", "stanford", "pronoun", "candidate mention", "rule-based approach"]} +{"id": "goldberg-tsarfaty-2008-single", "title": "A Single Generative Model for Joint Morphological Segmentation and Syntactic Parsing", "abstract": "Morphological processes in Semitic languages deliver space-delimited words which introduce multiple, distinct, syntactic units into the structure of the input sentence. These words are in turn highly ambiguous, breaking the assumption underlying most parsers that the yield of a tree for a given sentence is known in advance. Here we propose a single joint model for performing both morphological segmentation and syntactic disambiguation which bypasses the associated circularity. Using a treebank grammar, a data-driven lexicon, and a linguistically motivated unknown-tokens handling technique our model outperforms previous pipelined, integrated or factorized systems for Hebrew morphological and syntactic processing, yielding an error reduction of 12% over the best published results so far.", "keyphrases": ["single generative model", "joint morphological segmentation", "syntactic parsing", "semitic language"]} +{"id": "zlatkova-etal-2019-fact", "title": "Fact-Checking Meets Fauxtography: Verifying Claims About Images", "abstract": "The recent explosion of false claims in social media and on the Web in general has given rise to a lot of manual fact-checking initiatives. Unfortunately, the number of claims that need to be fact-checked is several orders of magnitude larger than what humans can handle manually. Thus, there has been a lot of research aiming at automating the process. Interestingly, previous work has largely ignored the growing number of claims about images. This is despite the fact that visual imagery is more influential than text and naturally appears alongside fake news. Here we aim at bridging this gap. In particular, we create a new dataset for this problem, and we explore a variety of features modeling the claim, the image, and the relationship between the claim and the image. The evaluation results show sizable improvements over the baseline. We release our dataset, hoping to enable further research on fact-checking claims about images.", "keyphrases": ["claim", "image", "fake news"]} +{"id": "moneglia-etal-2012-imagact", "title": "The IMAGACT Cross-linguistic Ontology of Action. A new infrastructure for natural language disambiguation", "abstract": "Action verbs, which are highly frequent in speech, cause disambiguation problems that are relevant to Language Technologies. This is a consequence of the peculiar way each natural language categorizes Action i.e. it is a consequence of semantic factors. Action verbs are frequently \u0093general\u0094, since they extend productively to actions belonging to different ontological types. Moreover, each language categorizes action in its own way and therefore the cross-linguistic reference to everyday activities is puzzling. This paper briefly sketches the IMAGACT project, which aims at setting up a cross-linguistic Ontology of Action for grounding disambiguation tasks in this crucial area of the lexicon. The project derives information on the actual variation of action verbs in English and Italian from spontaneous speech corpora, where references to action are high in frequency. Crucially it makes use of the universal language of images to identify action types, avoiding the underdeterminacy of semantic definitions. Action concept entries are implemented as prototypic scenes; this will make it easier to extend the Ontology to other languages.", "keyphrases": ["cross-linguistic ontology", "action verb", "disambiguation task"]} +{"id": "shao-etal-2017-generating", "title": "Generating High-Quality and Informative Conversation Responses with Sequence-to-Sequence Models", "abstract": "Sequence-to-sequence models have been applied to the conversation response generation problem where the source sequence is the conversation history and the target sequence is the response. Unlike translation, conversation responding is inherently creative. The generation of long, informative, coherent, and diverse responses remains a hard task. In this work, we focus on the single turn setting. We add self-attention to the decoder to maintain coherence in longer responses, and we propose a practical approach, called the glimpse-model, for scaling to large datasets. We introduce a stochastic beam-search algorithm with segment-by-segment reranking which lets us inject diversity earlier in the generation process. We trained on a combined data set of over 2.3B conversation messages mined from the web. In human evaluation studies, our method produces longer responses overall, with a higher proportion rated as acceptable and excellent as length increases, compared to baseline sequence-to-sequence models with explicit length-promotion. A back-off strategy produces better responses overall, in the full spectrum of lengths.", "keyphrases": ["conversation", "sequence-to-sequence model", "diverse response"]} +{"id": "cohn-etal-2016-incorporating", "title": "Incorporating Structural Alignment Biases into an Attentional Neural Translation Model", "abstract": "Neural encoder-decoder models of machine translation have achieved impressive results, rivalling traditional translation models. However their modelling formulation is overly simplistic, and omits several key inductive biases built into traditional models. In this paper we extend the attentional neural translation model to include structural biases from word based alignment models, including positional bias, Markov conditioning, fertility and agreement over translation directions. We show improvements over a baseline attentional model and standard phrase-based model over several language pairs, evaluating on difficult languages in a low resource setting.", "keyphrases": ["translation direction", "attention model", "structural bias", "machine translation model"]} +{"id": "fazly-etal-2009-unsupervised", "title": "Unsupervised Type and Token Identification of Idiomatic Expressions", "abstract": "Idiomatic expressions are plentiful in everyday language, yet they remain mysterious, as it is not clear exactly how people learn and understand them. They are of special interest to linguists, psycholinguists, and lexicographers, mainly because of their syntactic and semantic idiosyncrasies as well as their unclear lexical status. Despite a great deal of research on the properties of idioms in the linguistics literature, there is not much agreement on which properties are characteristic of these expressions. Because of their peculiarities, idiomatic expressions have mostly been overlooked by researchers in computational linguistics. In this article, we look into the usefulness of some of the identified linguistic properties of idioms for their automatic recognition. Specifically, we develop statistical measures that each model a specific property of idiomatic expressions by looking at their actual usage patterns in text. We use these statistical measures in a type-based classification task where we automatically separate idiomatic expressions (expressions with a possible idiomatic interpretation) from similar-on-the-surface literal phrases (for which no idiomatic interpretation is possible). In addition, we use some of the measures in a token identification task where we distinguish idiomatic and literal usages of potentially idiomatic expressions in context.", "keyphrases": ["idiomatic expression", "token identification task", "noun", "lexico-syntactic fixedness", "unsupervised method"]} +{"id": "wang-etal-2017-statistical", "title": "A Statistical Framework for Product Description Generation", "abstract": "We present in this paper a statistical framework that generates accurate and fluent product description from product attributes. Specifically, after extracting templates and learning writing knowledge from attribute-description parallel data, we use the learned knowledge to decide what to say and how to say for product description generation. To evaluate accuracy and fluency for the generated descriptions, in addition to BLEU and Recall, we propose to measure what to say (in terms of attribute coverage) and to measure how to say (by attribute-specified generation) separately. Experimental results show that our framework is effective.", "keyphrases": ["statistical framework", "product description generation", "template"]} +{"id": "nelken-shieber-2006-towards", "title": "Towards Robust Context-Sensitive Sentence Alignment for Monolingual Corpora", "abstract": "Aligning sentences belonging to comparable monolingual corpora has been suggested as a first step towards training text rewriting algorithms, for tasks such as summarization or paraphrasing. We present here a new monolingual sentence alignment algorithm, combining a sentence-based TF*IDF score, turned into a probability distribution using logistic regression, with a global alignment dynamic programming algorithm. Our approach provides a simpler and more robust solution achieving a substantial improvement in accuracy over existing systems.", "keyphrases": ["monolingual corpora", "tf*idf score", "logistic regression"]} +{"id": "belinkov-etal-2017-evaluating", "title": "Evaluating Layers of Representation in Neural Machine Translation on Part-of-Speech and Semantic Tagging Tasks", "abstract": "While neural machine translation (NMT) models provide improved translation quality in an elegant framework, it is less clear what they learn about language. Recent work has started evaluating the quality of vector representations learned by NMT models on morphological and syntactic tasks. In this paper, we investigate the representations learned at different layers of NMT encoders. We train NMT systems on parallel data and use the models to extract features for training a classifier on two tasks: part-of-speech and semantic tagging. We then measure the performance of the classifier as a proxy to the quality of the original NMT model for the given task. Our quantitative analysis yields interesting insights regarding representation learning in NMT models. For instance, we find that higher layers are better at learning semantics while lower layers tend to be better for part-of-speech tagging. We also observe little effect of the target language on source-side representations, especially in higher quality models.", "keyphrases": ["neural machine translation", "part-of-speech", "semantic tagging"]} +{"id": "rosti-etal-2007-combining", "title": "Combining Outputs from Multiple Machine Translation Systems", "abstract": "Currently there are several approaches to machine translation (MT) based on different paradigms; e.g., phrasal, hierarchical and syntax-based. These three approaches yield similar translation accuracy despite using fairly different levels of linguistic knowledge. The availability of such a variety of systems has led to a growing interest toward finding better translations by combining outputs from multiple systems. This paper describes three different approaches to MT system combination. These combination methods operate on sentence, phrase and word level exploiting information from -best lists, system scores and target-to-source phrase alignments. The word-level combination provides the most robust gains but the best results on the development test sets (NIST MT05 and the newsgroup portion of GALE 2006 dry-run) were achieved by combining all three methods.", "keyphrases": ["system combination", "source sentence", "confusion network", "hypothesis", "confidence score"]} +{"id": "cettolo-etal-2012-wit3", "title": "WIT3: Web Inventory of Transcribed and Translated Talks", "abstract": "We describe here a Web inventory named WIT 3 that offers access to a collection of transcribed and translated talks. The core of WIT 3 is the TED Talks corpus, that basically redistributes the original content published by the TED Conference website (http://www.ted.com). Since 2007, the TED Conference, based in California, has been posting all video recordings of its talks together with subtitles in English and their translations in more than 80 languages. Aside from its cultural and social relevance, this content, which is published under the Creative Commons BYNC-ND license, also represents a precious language resource for the machine translation research community, thanks to its size, variety of topics, and covered languages. This effort repurposes the original content in a way which is more convenient for machine translation researchers.", "keyphrases": ["web inventory", "transcribed", "parallel corpus"]} +{"id": "ghaddar-langlais-2016-wikicoref", "title": "WikiCoref: An English Coreference-annotated Corpus of Wikipedia Articles", "abstract": "This paper presents WikiCoref, an English corpus annotated for anaphoric relations, where all documents are from the English version of Wikipedia. Our annotation scheme follows the one of OntoNotes with a few disparities. We annotated each markable with coreference type, mention type and the equivalent Freebase topic. Since most similar annotation efforts concentrate on very specific types of written text, mainly newswire, there is a lack of resources for otherwise over-used Wikipedia texts. The corpus described in this paper addresses this issue. We present a freely available resource we initially devised for improving coreference resolution algorithms dedicated to Wikipedia texts. Our corpus has no restriction on the topics of the documents being annotated, and documents of various sizes have been considered for annotation.", "keyphrases": ["anaphoric relation", "wikicoref", "out-of-domain evaluation", "ontonotes guideline", "small dataset"]} +{"id": "aharoni-goldberg-2017-morphological", "title": "Morphological Inflection Generation with Hard Monotonic Attention", "abstract": "We present a neural model for morphological inflection generation which employs a hard attention mechanism, inspired by the nearly-monotonic alignment commonly found between the characters in a word and the characters in its inflection. We evaluate the model on three previously studied morphological inflection generation datasets and show that it provides state of the art results in various setups compared to previous neural and non-neural approaches. Finally we present an analysis of the continuous representations learned by both the hard and soft (Bahdanau, 2014) attention models for the task, shedding some light on the features such models extract.", "keyphrases": ["hard monotonic attention", "morphological inflection generation", "input character", "monotonic alignment"]} +{"id": "spitkovsky-etal-2013-breaking", "title": "Breaking Out of Local Optima with Count Transforms and Model Recombination: A Study in Grammar Induction", "abstract": "Many statistical learning problems in NLP call for local model search methods. But accuracy tends to suffer with current techniques, which often explore either too narrowly or too broadly: hill-climbers can get stuck in local optima, whereas samplers may be inefficient. We propose to arrange individual local optimizers into organized networks. Our building blocks are operators of two types: (i) transform, which suggests new places to search, via non-random restarts from already-found local optima; and (ii) join, which merges candidate solutions to find better optima. Experiments on grammar induction show that pursuing different transforms (e.g., discarding parts of a learned model or ignoring portions of training data) results in improvements. Groups of locally-optimal solutions can be further perturbed jointly, by constructing mixtures. Using these tools, we designed several modular dependency grammar induction networks of increasing complexity. Our complete system achieves 48.6% accuracy (directed dependency macro-average over all 19 languages in the 2006/7 CoNLL data) \u2014 more than 5% higher than the previous state-of-the-art.", "keyphrases": ["local optima", "model recombination", "grammar induction", "punctuation"]} +{"id": "foster-etal-2006-phrasetable", "title": "Phrasetable Smoothing for Statistical Machine Translation", "abstract": "We discuss different strategies for smoothing the phrasetable in Statistical MT, and give results over a range of translation settings. We show that any type of smoothing is a better idea than the relative-frequency estimates that are often used. The best smoothing techniques yield consistent gains of approximately 1% (absolute) according to the BLEU metric.", "keyphrases": ["smoothing", "translation model probability", "phrase pair"]} +{"id": "platanios-etal-2019-competence", "title": "Competence-based Curriculum Learning for Neural Machine Translation", "abstract": "Current state-of-the-art NMT systems use large neural networks that are not only slow to train, but also often require many heuristics and optimization tricks, such as specialized learning rate schedules and large batch sizes. This is undesirable as it requires extensive hyperparameter tuning. In this paper, we propose a curriculum learning framework for NMT that reduces training time, reduces the need for specialized heuristics or large batch sizes, and results in overall better performance. Our framework consists of a principled way of deciding which training samples are shown to the model at different times during training, based on the estimated difficulty of a sample and the current competence of the model. Filtering training samples in this manner prevents the model from getting stuck in bad local optima, making it converge faster and reach a better solution than the common approach of uniformly sampling training examples. Furthermore, the proposed method can be easily applied to existing NMT models by simply modifying their input data pipelines. We show that our framework can help improve the training time and the performance of both recurrent neural network models and Transformers, achieving up to a 70% decrease in training time, while at the same time obtaining accuracy improvements of up to 2.2 BLEU.", "keyphrases": ["curriculum learning", "neural machine translation", "competence", "training example"]} +{"id": "konstantinova-etal-2012-review", "title": "A review corpus annotated for negation, speculation and their scope", "abstract": "This paper presents a freely available resource for research on handling negation and speculation in review texts. The SFU Review Corpus, consisting of 400 documents of movie, book, and consumer product reviews, was annotated at the token level with negative and speculative keywords and at the sentence level with their linguistic scope. We report statistics on corpus size and the consistency of annotations. The annotated corpus will be useful in many applications, such as document mining and sentiment analysis.", "keyphrases": ["negation", "sfu review corpus", "token level", "speculative keyword"]} +{"id": "bjerva-etal-2016-semantic", "title": "Semantic Tagging with Deep Residual Networks", "abstract": "We propose a novel semantic tagging task, semtagging, tailored for the purpose of multilingual semantic parsing, and present the first tagger using deep residual networks (ResNets). Our tagger uses both word and character representations, and includes a novel residual bypass architecture. We evaluate the tagset both intrinsically on the new task of semantic tagging, as well as on Part-of-Speech (POS) tagging. Our system, consisting of a ResNet and an auxiliary loss function predicting our semantic tags, significantly outperforms prior results on English Universal Dependencies POS tagging (95.71% accuracy on UD v1.2 and 95.67% accuracy on UD v1.3).", "keyphrases": ["deep residual network", "resnets", "semantic tagging"]} +{"id": "belinkov-etal-2019-dont", "title": "Don't Take the Premise for Granted: Mitigating Artifacts in Natural Language Inference", "abstract": "Natural Language Inference (NLI) datasets often contain hypothesis-only biases\u2014artifacts that allow models to achieve non-trivial performance without learning whether a premise entails a hypothesis. We propose two probabilistic methods to build models that are more robust to such biases and better transfer across datasets. In contrast to standard approaches to NLI, our methods predict the probability of a premise given a hypothesis and NLI label, discouraging models from ignoring the premise. We evaluate our methods on synthetic and existing NLI datasets by training on datasets containing biases and testing on datasets containing no (or different) hypothesis-only biases. Our results indicate that these methods can make NLI models more robust to dataset-specific artifacts, transferring better than a baseline architecture in 9 out of 12 NLI datasets. Additionally, we provide an extensive analysis of the interplay of our methods with known biases in NLI datasets, as well as the effects of encouraging models to ignore biases and fine-tuning on target datasets.", "keyphrases": ["premise", "artifact", "natural language inference", "nli dataset"]} +{"id": "wang-wang-2019-riemannian", "title": "Riemannian Normalizing Flow on Variational Wasserstein Autoencoder for Text Modeling", "abstract": "Recurrent Variational Autoencoder has been widely used for language modeling and text generation tasks. These models often face a difficult optimization problem, also known as KL vanishing, where the posterior easily collapses to the prior and model will ignore latent codes in generative tasks. To address this problem, we introduce an improved Variational Wasserstein Autoencoder (WAE) with Riemannian Normalizing Flow (RNF) for text modeling. The RNF transforms a latent variable into a space that respects the geometric characteristics of input space, which makes posterior impossible to collapse to the non-informative prior. The Wasserstein objective minimizes the distance between marginal distribution and the prior directly and therefore does not force the posterior to match the prior. Empirical experiments show that our model avoids KL vanishing over a range of datasets and has better performance in tasks such as language modeling, likelihood approximation, and text generation. Through a series of experiments and analysis over latent space, we show that our model learns latent distributions that respect latent space geometry and is able to generate sentences that are more diverse.", "keyphrases": ["variational wasserstein autoencoder", "text modeling", "riemannian normalizing flow"]} +{"id": "ghosh-etal-2017-affect", "title": "Affect-LM: A Neural Language Model for Customizable Affective Text Generation", "abstract": "Human verbal communication includes affective messages which are conveyed through use of emotionally colored words. There has been a lot of research effort in this direction but the problem of integrating state-of-the-art neural language models with affective information remains an area ripe for exploration. In this paper, we propose an extension to an LSTM (Long Short-Term Memory) language model for generation of conversational text, conditioned on affect categories. Our proposed model, Affect-LM enables us to customize the degree of emotional content in generated sentences through an additional design parameter. Perception studies conducted using Amazon Mechanical Turk show that Affect-LM can generate naturally looking emotional sentences without sacrificing grammatical correctness. Affect-LM also learns affect-discriminative word representations, and perplexity experiments show that additional affective information in conversational text can improve language model prediction.", "keyphrases": ["neural language model", "text generation", "affect-lm"]} +{"id": "chen-etal-2017-recurrent", "title": "Recurrent Attention Network on Memory for Aspect Sentiment Analysis", "abstract": "We propose a novel framework based on neural networks to identify the sentiment of opinion targets in a comment/review. Our framework adopts multiple-attention mechanism to capture sentiment features separated by a long distance, so that it is more robust against irrelevant information. The results of multiple attentions are non-linearly combined with a recurrent neural network, which strengthens the expressive power of our model for handling more complications. The weighted-memory mechanism not only helps us avoid the labor-intensive feature engineering work, but also provides a tailor-made memory for different opinion targets of a sentence. We examine the merit of our model on four datasets: two are from SemEval2014, i.e. reviews of restaurants and laptops; a twitter dataset, for testing its performance on social media data; and a Chinese news comment dataset, for testing its language sensitivity. The experimental results show that our model consistently outperforms the state-of-the-art methods on different types of data.", "keyphrases": ["attention network", "memory", "aspect sentiment analysis", "sentence representation", "deep learning"]} +{"id": "ford-etal-2018-importance", "title": "The Importance of Generation Order in Language Modeling", "abstract": "Neural language models are a critical component of state-of-the-art systems for machine translation, summarization, audio transcription, and other tasks. These language models are almost universally autoregressive in nature, generating sentences one token at a time from left to right. This paper studies the influence of token generation order on model quality via a novel two-pass language model that produces partially-filled sentence \u201ctemplates\u201d and then fills in missing tokens. We compare various strategies for structuring these two passes and observe a surprisingly large variation in model quality. We find the most effective strategy generates function words in the first pass followed by content words in the second. We believe these experimental results justify a more extensive investigation of the generation order for neural language models.", "keyphrases": ["generation order", "language modeling", "partially-filled sentence", "template"]} +{"id": "kate-wong-2010-semantic", "title": "Semantic Parsing: The Task, the State of the Art and the Future", "abstract": "Semantic parsing is the task of mapping natural language sentences into complete formal meaning representations which a computer can execute for some domain-specific application. This is a challenging task and is critical for developing computing systems that can understand and process natural language input, for example, a computing system that answers natural language queries about a database, or a robot that takes commands in natural language. While the importance of semantic parsing was realized a long time ago, it is only in the past few years that the state-of-the-art in semantic parsing has been significantly advanced with more accurate and robust semantic parser learners that use a variety of statistical learning methods. Semantic parsers have also been extended to work beyond a single sentence, for example, to use discourse contexts and to learn domain-specific language from perceptual contexts. Some of the future research directions of semantic parsing with potentially large impacts include mapping entire natural language documents into machine processable form to enable automated reasoning about them and to convert natural language web pages into machine processable representations for the Semantic Web to support automated high-end web applications. This tutorial will introduce the semantic parsing task and will bring the audience up-to-date with the current research and state-of-the-art in semantic parsing. It will also provide insights about semantic parsing and how it relates to and differs from other natural language processing tasks. It will point out research challenges and some promising future directions for semantic parsing.", "keyphrases": ["mapping", "domain-specific application", "semantic parsing"]} +{"id": "zhang-etal-2018-learning-control", "title": "Learning to Control the Specificity in Neural Response Generation", "abstract": "In conversation, a general response (e.g., \u201cI don't know\u201d) could correspond to a large variety of input utterances. Previous generative conversational models usually employ a single model to learn the relationship between different utterance-response pairs, thus tend to favor general and trivial responses which appear frequently. To address this problem, we propose a novel controlled response generation mechanism to handle different utterance-response relationships in terms of specificity. Specifically, we introduce an explicit specificity control variable into a sequence-to-sequence model, which interacts with the usage representation of words through a Gaussian Kernel layer, to guide the model to generate responses at different specificity levels. We describe two ways to acquire distant labels for the specificity control variable in learning. Empirical studies show that our model can significantly outperform the state-of-the-art response generation models under both automatic and human evaluations.", "keyphrases": ["control", "specificity", "neural response generation", "conversation", "sequence-to-sequence model"]} +{"id": "doan-etal-2021-phomt", "title": "PhoMT: A High-Quality and Large-Scale Benchmark Dataset for Vietnamese-English Machine Translation", "abstract": "We introduce a high-quality and large-scale Vietnamese-English parallel dataset of 3.02M sentence pairs, which is 2.9M pairs larger than the benchmark Vietnamese-English machine translation corpus IWSLT15. We conduct experiments comparing strong neural baselines and well-known automatic translation engines on our dataset and find that in both automatic and human evaluations: the best performance is obtained by fine-tuning the pre-trained sequence-to-sequence denoising auto-encoder mBART. To our best knowledge, this is the first large-scale Vietnamese-English machine translation study. We hope our publicly available dataset and study can serve as a starting point for future research and applications on Vietnamese-English machine translation. We release our dataset at: ", "keyphrases": ["high-quality", "vietnamese-english machine translation", "phomt"]} +{"id": "kozareva-etal-2008-semantic", "title": "Semantic Class Learning from the Web with Hyponym Pattern Linkage Graphs", "abstract": "We present a novel approach to weakly supervised semantic class learning from the web, using a single powerful hyponym pattern combined with graph structures, which capture two properties associated with pattern-based extractions: popularity and productivity. Intuitively, a candidate is popular if it was discovered many times by other instances in the hyponym pattern. A candidate is productive if it frequently leads to the discovery of other instances. Together, these two measures capture not only frequency of occurrence, but also cross-checking that the candidate occurs both near the class name and near other class members. We developed two algorithms that begin with just a class name and one seed instance and then automatically generate a ranked list of new class instances. We conducted experiments on four semantic classes and consistently achieved high accuracies.", "keyphrases": ["web", "hyponym pattern", "class member", "semantic class learning", "concept pair"]} +{"id": "clark-etal-2018-neural", "title": "Neural Text Generation in Stories Using Entity Representations as Context", "abstract": "We introduce an approach to neural text generation that explicitly represents entities mentioned in the text. Entity representations are vectors that are updated as the text proceeds; they are designed specifically for narrative text like fiction or news stories. Our experiments demonstrate that modeling entities offers a benefit in two automatic evaluations: mention generation (in which a model chooses which entity to mention next and which words to use in the mention) and selection between a correct next sentence and a distractor from later in the same story. We also conduct a human evaluation on automatically generated text in story contexts; this study supports our emphasis on entities and suggests directions for further research.", "keyphrases": ["story", "neural text generation", "entity context", "language model"]} +{"id": "nivre-2008-algorithms", "title": "Algorithms for Deterministic Incremental Dependency Parsing", "abstract": "Abstract Parsing algorithms that process the input from left to right and construct a single derivation have often been considered inadequate for natural language parsing because of the massive ambiguity typically found in natural language grammars. Nevertheless, it has been shown that such algorithms, combined with treebank-induced classifiers, can be used to build highly accurate disambiguating parsers, in particular for dependency-based syntactic representations. In this article, we first present a general framework for describing and analyzing algorithms for deterministic incremental dependency parsing, formalized as transition systems. We then describe and analyze two families of such algorithms: stack-based and list-based algorithms. In the former family, which is restricted to projective dependency structures, we describe an arc-eager and an arc-standard variant; in the latter family, we present a projective and a non-projective variant. For each of the four algorithms, we give proofs of correctness and complexity. In addition, we perform an experimental evaluation of all algorithms in combination with SVM classifiers for predicting the next parsing action, using data from thirteen languages. We show that all four algorithms give competitive accuracy, although the non-projective list-based algorithm generally outperforms the projective algorithms for languages with a non-negligible proportion of non-projective constructions. However, the projective algorithms often produce comparable results when combined with the technique known as pseudo-projective parsing. The linear time complexity of the stack-based algorithms gives them an advantage with respect to efficiency both in learning and in parsing, but the projective list-based algorithm turns out to be equally efficient in practice. Moreover, when the projective algorithms are used to implement pseudo-projective parsing, they sometimes become less efficient in parsing (but not in learning) than the non-projective list-based algorithm. Although most of the algorithms have been partially described in the literature before, this is the first comprehensive analysis and evaluation of the algorithms within a unified framework.", "keyphrases": ["dependency parsing", "derivation", "action", "generative model", "local inference"]} +{"id": "ahmad-kondrak-2005-learning", "title": "Learning a Spelling Error Model from Search Query Logs", "abstract": "Applying the noisy channel model to search query spelling correction requires an error model and a language model. Typically, the error model relies on a weighted string edit distance measure. The weights can be learned from pairs of misspelled words and their corrections. This paper investigates using the Expectation Maximization algorithm to learn edit distance weights directly from search query logs, without relying on a corpus of paired words.", "keyphrases": ["spelling error model", "query log", "character-based error probability"]} +{"id": "zhao-etal-2019-improving", "title": "Improving Grammatical Error Correction via Pre-Training a Copy-Augmented Architecture with Unlabeled Data", "abstract": "Neural machine translation systems have become state-of-the-art approaches for Grammatical Error Correction (GEC) task. In this paper, we propose a copy-augmented architecture for the GEC task by copying the unchanged words from the source sentence to the target sentence. Since the GEC suffers from not having enough labeled training data to achieve high accuracy. We pre-train the copy-augmented architecture with a denoising auto-encoder using the unlabeled One Billion Benchmark and make comparisons between the fully pre-trained model and a partially pre-trained model. It is the first time copying words from the source context and fully pre-training a sequence to sequence model are experimented on the GEC task. Moreover, We add token-level and sentence-level multi-task learning for the GEC task. The evaluation results on the CoNLL-2014 test set show that our approach outperforms all recently published state-of-the-art results by a large margin.", "keyphrases": ["grammatical error correction", "neural machine translation", "pre-trained model"]} +{"id": "gu-feng-2020-investigating", "title": "Investigating Catastrophic Forgetting During Continual Training for Neural Machine Translation", "abstract": "Neural machine translation (NMT) models usually suffer from catastrophic forgetting during continual training where the models tend to gradually forget previously learned knowledge and swing to fit the newly added data which may have a different distribution, e.g. a different domain. Although many methods have been proposed to solve this problem, we cannot get to know what causes this phenomenon yet. Under the background of domain adaptation, we investigate the cause of catastrophic forgetting from the perspectives of modules and parameters (neurons). The investigation on the modules of the NMT model shows that some modules have tight relation with the general-domain knowledge while some other modules are more essential in the domain adaptation. And the investigation on the parameters shows that some parameters are important for both the general-domain and in-domain translation and the great change of them during continual training brings about the performance decline in general-domain. We conducted experiments across different language pairs and domains to ensure the validity and reliability of our findings.", "keyphrases": ["catastrophic forgetting", "neural machine translation", "domain adaptation"]} +{"id": "baziotis-etal-2017-datastories-semeval", "title": "DataStories at SemEval-2017 Task 4: Deep LSTM with Attention for Message-level and Topic-based Sentiment Analysis", "abstract": "In this paper we present two deep-learning systems that competed at SemEval-2017 Task 4 \u201cSentiment Analysis in Twitter\u201d. We participated in all subtasks for English tweets, involving message-level and topic-based sentiment polarity classification and quantification. We use Long Short-Term Memory (LSTM) networks augmented with two kinds of attention mechanisms, on top of word embeddings pre-trained on a big collection of Twitter messages. Also, we present a text processing tool suitable for social network messages, which performs tokenization, word normalization, segmentation and spell correction. Moreover, our approach uses no hand-crafted features or sentiment lexicons. We ranked 1st (tie) in Subtask A, and achieved very competitive results in the rest of the Subtasks. Both the word embeddings and our text processing tool are available to the research community.", "keyphrases": ["semeval-2017 task", "deep lstm", "sentiment analysis", "tweet preprocessor"]} +{"id": "wallace-etal-2019-allennlp", "title": "AllenNLP Interpret: A Framework for Explaining Predictions of NLP Models", "abstract": "Neural NLP models are increasingly accurate but are imperfect and opaque\u2014they break in counterintuitive ways and leave end users puzzled at their behavior. Model interpretation methods ameliorate this opacity by providing explanations for specific model predictions. Unfortunately, existing interpretation codebases make it difficult to apply these methods to new models and tasks, which hinders adoption for practitioners and burdens interpretability researchers. We introduce AllenNLP Interpret, a flexible framework for interpreting NLP models. The toolkit provides interpretation primitives (e.g., input gradients) for any AllenNLP model and task, a suite of built-in interpretation methods, and a library of front-end visualization components. We demonstrate the toolkit's flexibility and utility by implementing live demos for five interpretation methods (e.g., saliency maps and adversarial attacks) on a variety of models and tasks (e.g., masked language modeling using BERT and reading comprehension using BiDAF). These demos, alongside our code and tutorials, are available at .", "keyphrases": ["interpretation method", "explanation", "allennlp interpret"]} +{"id": "merlo-van-der-plas-2009-abstraction", "title": "Abstraction and Generalisation in Semantic Role Labels: PropBank, VerbNet or both?", "abstract": "Semantic role labels are the representation of the grammatically relevant aspects of a sentence meaning. Capturing the nature and the number of semantic roles in a sentence is therefore fundamental to correctly describing the interface between grammar and meaning. In this paper, we compare two annotation schemes, Prop-Bank and VerbNet, in a task-independent, general way, analysing how well they fare in capturing the linguistic generalisations that are known to hold for semantic role labels, and consequently how well they grammaticalise aspects of meaning. We show that VerbNet is more verb-specific and better able to generalise to new semantic role instances, while PropBank better captures some of the structural constraints among roles. We conclude that these two resources should be used together, as they are complementary.", "keyphrases": ["generalisation", "verbnet", "propbank role"]} +{"id": "patry-langlais-2011-identifying", "title": "Identifying Parallel Documents from a Large Bilingual Collection of Texts: Application to Parallel Article Extraction in Wikipedia.", "abstract": "While several recent works on dealing with large bilingual collections of texts, e.g. (Smith et al., 2010), seek for extracting parallel sentences from comparable corpora, we present Paradocs, a system designed to recognize pairs of parallel documents in a (large) bilingual collection of texts. We show that this system outperforms a fair baseline (Enright and Kondrak, 2007) in a number of controlled tasks. We applied it on the French-English cross-language linked article pairs of Wikipedia in order see whether parallel articles in this resource are available, and if our system is able to locate them. According to some manual evaluation we conducted, a fourth of the article pairs in Wikipedia are indeed in translation relation, and Paradocs identifies parallel or noisy parallel article pairs with a precision of 80%.", "keyphrases": ["parallel document", "large bilingual collection", "wikipedia"]} +{"id": "tanev-magnini-2006-weakly", "title": "Weakly Supervised Approaches for Ontology Population", "abstract": "We present a weakly supervised approach to automatic ontology population from text and compare it with two other unsupervised approaches. In our experiments we populate a part of our ontology of Named Entities. We considered two high level categories-geographical locations and person names and ten sub-classes for each category. For each sub-class we automatically learn a syntactic model from a list of training examples and a parsed corpus. A novel syntactic indexing method allowed us to use large quantities of syntactically annotated data. The syntactic model for each named entity sub-class is a set of weighted syntactic features, i.e. words which typically co-occur with the members of the class in the corpus. The method is weakly supervised, since no manually annotated corpus is used in the learning process. The syntactic models are used to classify the unknown Named Entities in the test set. The method achieved promising results, i.e. 65% accuracy, and outperforms significantly the other two approaches.", "keyphrases": ["ontology population", "person name", "entity classification"]} +{"id": "gkatzia-etal-2015-virtual", "title": "From the Virtual to the RealWorld: Referring to Objects in Real-World Spatial Scenes", "abstract": "Predicting the success of referring expressions (RE) is vital for real-world applications such as navigation systems. Traditionally, research has focused on studying Referring Expression Generation (REG) in virtual, controlled environments. In this paper, we describe a novel study of spatial references from real scenes rather than virtual. First, we investigate how humans describe objects in open, uncontrolled scenarios and compare our findings to those reported in virtual environments. We show that REs in real-world scenarios differ significantly to those in virtual worlds. Second, we propose a novel approach to quantifying image complexity when complete annotations are not present (e.g. due to poor object recognition capabitlities), and third, we present a model for success prediction of REs for objects in real scenes. Finally, we discuss implications for Natural Language Generation (NLG) systems and future directions.", "keyphrases": ["object", "real-world image", "setup"]} +{"id": "guan-etal-2020-neuinfer", "title": "NeuInfer: Knowledge Inference on N-ary Facts", "abstract": "Knowledge inference on knowledge graph has attracted extensive attention, which aims to find out connotative valid facts in knowledge graph and is very helpful for improving the performance of many downstream applications. However, researchers have mainly poured attention to knowledge inference on binary facts. The studies on n-ary facts are relatively scarcer, although they are also ubiquitous in the real world. Therefore, this paper addresses knowledge inference on n-ary facts. We represent each n-ary fact as a primary triple coupled with a set of its auxiliary descriptive attribute-value pair(s). We further propose a neural network model, NeuInfer, for knowledge inference on n-ary facts. Besides handling the common task to infer an unknown element in a whole fact, NeuInfer can cope with a new type of task, flexible knowledge inference. It aims to infer an unknown element in a partial fact consisting of the primary triple coupled with any number of its auxiliary description(s). Experimental results demonstrate the remarkable superiority of NeuInfer.", "keyphrases": ["knowledge inference", "n-ary fact", "neuinfer"]} +{"id": "hu-etal-2020-monalog", "title": "MonaLog: a Lightweight System for Natural Language Inference Based on Monotonicity", "abstract": "We present a new logic-based inference engine for natural language inference (NLI) called MonaLog, which is based on natural logic and the monotonicity calculus. In contrast to existing logic-based approaches, our system is intentionally designed to be as lightweight as possible, and operates using a small set of well-known (surface-level) monotonicity facts about quantifiers, lexical items and tokenlevel polarity information. Despite its simplicity, we find our approach to be competitive with other logic-based NLI models on the SICK benchmark. We also use MonaLog in combination with the current state-of-the-art model BERT in a variety of settings, including for compositional data augmentation. We show that MonaLog is capable of generating large amounts of high-quality training data for BERT, improving its accuracy on SICK.", "keyphrases": ["natural language inference", "monotonicity", "monalog"]} +{"id": "mihalcea-strapparava-2012-lyrics", "title": "Lyrics, Music, and Emotions", "abstract": "In this paper, we explore the classification of emotions in songs, using the music and the lyrics representation of the songs. We introduce a novel corpus of music and lyrics, consisting of 100 songs annotated for emotions. We show that textual and musical features can both be successfully used for emotion recognition in songs. Moreover, through comparative experiments, we show that the joint use of lyrics and music brings significant improvements over each of the individual textual and musical classifiers, with error rate reductions of up to 31%.", "keyphrases": ["music", "emotion", "lyric"]} +{"id": "mallinson-etal-2020-felix", "title": "FELIX: Flexible Text Editing Through Tagging and Insertion", "abstract": "We present FELIX \u2013 a flexible text-editing approach for generation, designed to derive maximum benefit from the ideas of decoding with bi-directional contexts and self-supervised pretraining. In contrast to conventional sequenceto-sequence (seq2seq) models, FELIX is efficient in low-resource settings and fast at inference time, while being capable of modeling flexible input-output transformations. We achieve this by decomposing the text-editing task into two sub-tasks: tagging to decide on the subset of input tokens and their order in the output text and insertion to in-fill the missing tokens in the output not present in the input. The tagging model employs a novel Pointer mechanism, while the insertion model is based on a Masked Language Model (MLM). Both of these models are chosen to be non-autoregressive to guarantee faster inference. FELIX performs favourably when compared to recent text-editing methods and strong seq2seq baselines when evaluated on four NLG tasks: Sentence Fusion, Machine Translation Automatic Post-Editing, Summarization, and Text Simplification", "keyphrases": ["insertion", "output text", "text-editing method", "felix", "edit-based model"]} +{"id": "christensen-etal-2013-towards", "title": "Towards Coherent Multi-Document Summarization", "abstract": "This paper presents G-FLOW, a novel system for coherent extractive multi-document summarization (MDS). 1 Where previous work on MDS considered sentence selection and ordering separately, G-FLOW introduces a joint model for selection and ordering that balances coherence and salience. G-FLOW\u2019s core representation is a graph that approximates the discourse relations across sentences based on indicators including discourse cues, deverbal nouns, co-reference, and more. This graph enables G-FLOW to estimate the coherence of a candidate summary. We evaluate G-FLOW on Mechanical Turk, and find that it generates dramatically better summaries than an extractive summarizer based on a pipeline of state-of-the-art sentence selection and reordering components, underscoring the value of our joint model.", "keyphrases": ["summarization", "discourse relation", "multi-document graph", "passage"]} +{"id": "michaelov-bergen-2020-well", "title": "How well does surprisal explain N400 amplitude under different experimental conditions?", "abstract": "We investigate the extent to which word surprisal can be used to predict a neural measure of human language processing difficulty\u2014the N400. To do this, we use recurrent neural networks to calculate the surprisal of stimuli from previously published neurolinguistic studies of the N400. We find that surprisal can predict N400 amplitude in a wide range of cases, and the cases where it cannot do so provide valuable insight into the neurocognitive processes underlying the response.", "keyphrases": ["surprisal", "n400 amplitude", "stimulus"]} +{"id": "sennrich-etal-2017-university", "title": "The University of Edinburgh's Neural MT Systems for WMT17", "abstract": "This paper describes the University of Edinburgh's submissions to the WMT17 shared news translation and biomedical translation tasks. We participated in 12 translation directions for news, translating between English and Czech, German, Latvian, Russian, Turkish and Chinese. For the biomedical task we submitted systems for English to Czech, German, Polish and Romanian. Our systems are neural machine translation systems trained with Nematus, an attentional encoder-decoder. We follow our setup from last year and build BPE-based models with parallel and back-translated monolingual training data. Novelties this year include the use of deep architectures, layer normalization, and more compact models due to weight tying and improvements in BPE segmentations. We perform extensive ablative experiments, reporting on the effectivenes of layer normalization, deep architectures, and different ensembling techniques.", "keyphrases": ["edinburgh", "wmt17", "neural machine translation"]} +{"id": "wieting-etal-2017-learning", "title": "Learning Paraphrastic Sentence Embeddings from Back-Translated Bitext", "abstract": "We consider the problem of learning general-purpose, paraphrastic sentence embeddings in the setting of Wieting et al. (2016b). We use neural machine translation to generate sentential paraphrases via back-translation of bilingual sentence pairs. We evaluate the paraphrase pairs by their ability to serve as training data for learning paraphrastic sentence embeddings. We find that the data quality is stronger than prior work based on bitext and on par with manually-written English paraphrase pairs, with the advantage that our approach can scale up to generate large training sets for many languages and domains. We experiment with several language pairs and data sources, and develop a variety of data filtering techniques. In the process, we explore how neural machine translation output differs from human-written sentences, finding clear differences in length, the amount of repetition, and the use of rare words.", "keyphrases": ["back-translation", "neural machine translation", "paraphrase", "sentence pair"]} +{"id": "chen-etal-2019-meta", "title": "Meta Relational Learning for Few-Shot Link Prediction in Knowledge Graphs", "abstract": "Link prediction is an important way to complete knowledge graphs (KGs), while embedding-based methods, effective for link prediction in KGs, perform poorly on relations that only have a few associative triples. In this work, we propose a Meta Relational Learning (MetaR) framework to do the common but challenging few-shot link prediction in KGs, namely predicting new triples about a relation by only observing a few associative triples. We solve few-shot link prediction by focusing on transferring relation-specific meta information to make model learn the most important knowledge and learn faster, corresponding to relation meta and gradient meta respectively in MetaR. Empirically, our model achieves state-of-the-art results on few-shot link prediction KG benchmarks.", "keyphrases": ["link prediction", "knowledge graph", "meta relational learning"]} +{"id": "li-fung-2014-language", "title": "Language Modeling with Functional Head Constraint for Code Switching Speech Recognition", "abstract": "In this paper, we propose novel structured language modeling methods for code mixing speech recognition by incorporating a well-known syntactic constraint for switching code, namely the Functional Head Constraint (FHC). Code mixing data is not abundantly available for training language models. Our proposed methods successfully alleviate this core problem for code mixing speech recognition by using bilingual data to train a structured language model with syntactic constraint. Linguists and bilingual speakers found that code switch do not happen between the functional head and its complements. We propose to learn the code mixing language model from bilingual data with this constraint in a weighted finite state transducer (WFST) framework. The constrained code switch language model is obtained by first expanding the search network with a translation model, and then using parsing to restrict paths to those permissible under the constraint. We im", "keyphrases": ["functional head constraint", "speech recognition", "language model"]} +{"id": "mimno-etal-2009-polylingual", "title": "Polylingual Topic Models", "abstract": "Topic models are a useful tool for analyzing large text collections, but have previously been applied in only monolingual, or at most bilingual, contexts. Meanwhile, massive collections of interlinked documents in dozens of languages, such as Wikipedia, are now widely available, calling for tools that can characterize content in many languages. We introduce a polylingual topic model that discovers topics aligned across multiple languages. We explore the model's characteristics using two large corpora, each with over ten different languages, and demonstrate its usefulness in supporting machine translation and tracking topic trends across languages.", "keyphrases": ["polylingual topic models", "lda", "tuple", "string similarity", "induction"]} +{"id": "hahn-etal-2006-agreement", "title": "Agreement/Disagreement Classification: Exploiting Unlabeled Data using Contrast Classifiers", "abstract": "Several semi-supervised learning methods have been proposed to leverage unlabeled data, but imbalanced class distributions in the data set can hurt the performance of most algorithms. In this paper, we adapt the new approach of contrast classifiers for semi-supervised learning. This enables us to exploit large amounts of unlabeled data with a skewed distribution. In experiments on a speech act (agreement/disagreement) classification problem, we achieve better results than other semi-supervised methods. We also obtain performance comparable to the best results reported so far on this task and outperform systems with equivalent feature sets.", "keyphrases": ["unlabeled data", "contrast classifier", "agreement"]} +{"id": "angeli-etal-2014-combining", "title": "Combining Distant and Partial Supervision for Relation Extraction", "abstract": "Broad-coverage relation extraction either requires expensive supervised training data, or suffers from drawbacks inherent to distant supervision. We present an approach for providing partial supervision to a distantly supervised relation extractor using a small number of carefully selected examples. We compare against established active learning criteria and propose a novel criterion to sample examples which are both uncertain and representative. In this way, we combine the benefits of fine-grained supervision for difficult examples with the coverage of a large distantly supervised corpus. Our approach gives a substantial increase of 3.9% endto-end F1 on the 2013 KBP Slot Filling evaluation, yielding a net F1 of 37.7%.", "keyphrases": ["partial supervision", "relation extraction", "active learning method", "crowdsourcing"]} +{"id": "petrovic-etal-2010-streaming", "title": "Streaming First Story Detection with application to Twitter", "abstract": "With the recent rise in popularity and size of social media, there is a growing need for systems that can extract useful information from this amount of data. We address the problem of detecting new events from a stream of Twitter posts. To make event detection feasible on web-scale corpora, we present an algorithm based on locality-sensitive hashing which is able overcome the limitations of traditional approaches, while maintaining competitive results. In particular, a comparison with a state-of-the-art system on the first story detection task shows that we achieve over an order of magnitude speedup in processing time, while retaining comparable performance. Event detection experiments on a collection of 160 million Twitter posts show that celebrity deaths are the fastest spreading news on Twitter.", "keyphrases": ["story detection", "twitter", "event detection", "stream", "incoming tweet"]} +{"id": "baldwin-2006-compositionality", "title": "Compositionality and Multiword Expressions: Six of One, Half a Dozen of the Other?", "abstract": "In this talk, I will investigate the relationship between compositionality and multiword expressions, as part of which I will outline different approaches for formalising the notion of compositionality. I will then briefly review computational methods that have been proposed for modelling compositionality, and applications thereof. Finally, I will discuss possible future directions for modelling compositionality, and present some preliminary results.", "keyphrases": ["compositionality", "chance", "multi-word expression", "mwes"]} +{"id": "guo-etal-2019-attention", "title": "Attention Guided Graph Convolutional Networks for Relation Extraction", "abstract": "Dependency trees convey rich structural information that is proven useful for extracting relations among entities in text. However, how to effectively make use of relevant information while ignoring irrelevant information from the dependency trees remains a challenging research question. Existing approaches employing rule based hard-pruning strategies for selecting relevant partial dependency structures may not always yield optimal results. In this work, we propose Attention Guided Graph Convolutional Networks (AGGCNs), a novel model which directly takes full dependency trees as inputs. Our model can be understood as a soft-pruning approach that automatically learns how to selectively attend to the relevant sub-structures useful for the relation extraction task. Extensive results on various tasks including cross-sentence n-ary relation extraction and large-scale sentence-level relation extraction show that our model is able to better leverage the structural information of the full dependency trees, giving significantly better results than previous approaches.", "keyphrases": ["convolutional network", "relation extraction", "relevant sub-structure", "input sentence", "previous study"]} +{"id": "arnold-etal-2008-exploiting", "title": "Exploiting Feature Hierarchy for Transfer Learning in Named Entity Recognition", "abstract": "We present a novel hierarchical prior structure for supervised transfer learning in named entity recognition, motivated by the common structure of feature spaces for this task across natural language data sets. The problem of transfer learning, where information gained in one learning task is used to improve performance in another related task, is an important new area of research. In the subproblem of domain adaptation, a model trained over a source domain is generalized to perform well on a related target domain, where the two domains\u2019 data are distributed similarly, but not identically. We introduce the concept of groups of closely-related domains, called genres, and show how inter-genre adaptation is related to domain adaptation. We also examine multitask learning, where two domains may be related, but where the concept to be learned in each case is distinct. We show that our prior conveys useful information across domains, genres and tasks, while remaining robust to spurious signals not related to the target domain and concept. We further show that our model generalizes a class of similar hierarchical priors, smoothed to varying degrees, and lay the groundwork for future exploration in this area.", "keyphrases": ["transfer learning", "entity recognition", "domain adaptation"]} +{"id": "ogren-2006-knowtator", "title": "Knowtator: A Prot\u00e9g\u00e9 plug-in for annotated corpus construction", "abstract": "A general-purpose text annotation tool called Knowtator is introduced. Knowtator facilitates the manual creation of annotated corpora that can be used for evaluating or training a variety of natural language processing systems. Building on the strengths of the widely used Protege knowledge representation system, Knowtator has been developed as a Protege plug-in that leverages Protege's knowledge representation capabilities to specify annotation schemas. Knowtator's unique advantage over other annotation tools is the ease with which complex annotation schemas (e.g. schemas which have constrained relationships between annotation types) can be defined and incorporated into use. Knowtator is available under the Mozilla Public License 1.1 at http://bionlp.sourceforge.net/Knowtator.", "keyphrases": ["annotation tool", "protege", "knowtator"]} +{"id": "florian-etal-2004-statistical", "title": "A Statistical Model for Multilingual Entity Detection and Tracking", "abstract": "Abstract : Entity detection and tracking is a relatively new addition to the repertoire of natural language tasks. In this paper, we present a statistical language-independent framework for identifying and tracking named, nominal and pronominal references to entities within unrestricted text documents, and chaining them into clusters corresponding to each logical entity present in the text. Both the mention detection model and the novel entity tracking model can use arbitrary feature types, being able to integrate a wide array of lexical, syntactic and semantic features. In addition, the mention detection model crucially uses feature streams derived from different named entity classifiers. The proposed framework is evaluated with several experiments run in Arabic, Chinese and English texts; a system based on the approach described here and submitted to the latest Automatic Content Extraction (ACE) evaluation achieved top-tier results in all three evaluation languages.", "keyphrases": ["tracking", "language-independent framework", "mention", "automatic content extraction"]} +{"id": "bejan-harabagiu-2010-unsupervised", "title": "Unsupervised Event Coreference Resolution with Rich Linguistic Features", "abstract": "This paper examines how a new class of nonparametric Bayesian models can be effectively applied to an open-domain event coreference task. Designed with the purpose of clustering complex linguistic objects, these models consider a potentially infinite number of features and categorical outcomes. The evaluation performed for solving both within- and cross-document event coreference shows significant improvements of the models when compared against two baselines for this task.", "keyphrases": ["event coreference", "nonparametric bayesian model", "ecb", "information extraction", "multiple document"]} +{"id": "yan-etal-2020-unknown", "title": "Unknown Intent Detection Using Gaussian Mixture Model with an Application to Zero-shot Intent Classification", "abstract": "User intent classification plays a vital role in dialogue systems. Since user intent may frequently change over time in many realistic scenarios, unknown (new) intent detection has become an essential problem, where the study has just begun. This paper proposes a semantic-enhanced Gaussian mixture model (SEG) for unknown intent detection. In particular, we model utterance embeddings with a Gaussian mixture distribution and inject dynamic class semantic information into Gaussian means, which enables learning more class-concentrated embeddings that help to facilitate downstream outlier detection. Coupled with a density-based outlier detection algorithm, SEG achieves competitive results on three real task-oriented dialogue datasets in two languages for unknown intent detection. On top of that, we propose to integrate SEG as an unknown intent identifier into existing generalized zero-shot intent classification models to improve their performance. A case study on a state-of-the-art method, ReCapsNet, shows that SEG can push the classification performance to a significantly higher level.", "keyphrases": ["intent detection", "gaussian mixture model", "downstream outlier detection"]} +{"id": "smith-etal-2013-dirt", "title": "Dirt Cheap Web-Scale Parallel Text from the Common Crawl", "abstract": "Parallel text is the fuel that drives modern machine translation systems. The Web is a comprehensive source of preexisting parallel text, but crawling the entire web is impossible for all but the largest companies. We bring web-scale parallel text to the masses by mining the Common Crawl, a public Web crawl hosted on Amazon\u2019s Elastic Cloud. Starting from nothing more than a set of common two-letter language codes, our open-source extension of the STRAND algorithm mined 32 terabytes of the crawl in just under a day, at a cost of about $500. Our large-scale experiment uncovers large amounts of parallel text in dozens of language pairs across a variety of domains and genres, some previously unavailable in curated datasets. Even with minimal cleaning and filtering, the resulting data boosts translation performance across the board for five different language pairs in the news domain, and on open domain test sets we see improvements of up to 5 BLEU. We make our code and data available for other researchers seeking to mine this rich new data resource. 1", "keyphrases": ["common crawl", "strand algorithm", "different language", "parallel document"]} +{"id": "hassan-etal-2008-language", "title": "Language Independent Text Correction using Finite State Automata", "abstract": "Many natural language applications, like machine translation and information extraction, are required to operate on text with spelling errors. Those spelling mistakes have to be corrected automatically to avoid deteriorating the performance of such applications. In this work, we introduce a novel approach for automatic correction of spelling mistakes by deploying finite state automata to propose candidates corrections within a specified edit distance from the misspelled word. After choosing candidate corrections, a language model is used to assign scores the candidate corrections and choose best correction in the given context. The proposed approach is language independent and requires only a dictionary and text data for building a language model. The approach have been tested on both Arabic and English text and achieved accuracy of 89%.", "keyphrases": ["finite state automata", "candidate correction", "edit-distance measure", "morphological analyzer"]} +{"id": "mrini-etal-2020-rethinking", "title": "Rethinking Self-Attention: Towards Interpretability in Neural Parsing", "abstract": "Attention mechanisms have improved the performance of NLP tasks while allowing models to remain explainable. Self-attention is currently widely used, however interpretability is difficult due to the numerous attention distributions. Recent work has shown that model representations can benefit from label-specific information, while facilitating interpretation of predictions. We introduce the Label Attention Layer: a new form of self-attention where attention heads represent labels. We test our novel layer by running constituency and dependency parsing experiments and show our new model obtains new state-of-the-art results for both tasks on both the Penn Treebank (PTB) and Chinese Treebank. Additionally, our model requires fewer self-attention layers compared to existing work. Finally, we find that the Label Attention heads learn relations between syntactic categories and show pathways to analyze errors.", "keyphrases": ["self-attention", "interpretability", "label attention layer"]} +{"id": "narayan-etal-2018-dont", "title": "Don't Give Me the Details, Just the Summary! Topic-Aware Convolutional Neural Networks for Extreme Summarization", "abstract": "We introduce \u201cextreme summarization\u201d, a new single-document summarization task which does not favor extractive strategies and calls for an abstractive modeling approach. The idea is to create a short, one-sentence news summary answering the question \u201cWhat is the article about?\u201d. We collect a real-world, large-scale dataset for this task by harvesting online articles from the British Broadcasting Corporation (BBC). We propose a novel abstractive model which is conditioned on the article's topics and based entirely on convolutional neural networks. We demonstrate experimentally that this architecture captures long-range dependencies in a document and recognizes pertinent content, outperforming an oracle extractive system and state-of-the-art abstractive approaches when evaluated automatically and by humans.", "keyphrases": ["convolutional neural network", "extreme summarization", "large-scale dataset", "topical information"]} +{"id": "fischer-etal-2020-royal", "title": "The Royal Society Corpus 6.0: Providing 300+ Years of Scientific Writing for Humanistic Study", "abstract": "We present a new, extended version of the Royal Society Corpus (RSC), a diachronic corpus of scientific English now covering 300+ years of scientific writing (1665\u20131996). The corpus comprises 47 837 texts, primarily scientific articles, and is based on publications of the Royal Society of London, mainly its Philosophical Transactions and Proceedings. The corpus has been built on the basis of the FAIR principles and is freely available under a Creative Commons license, excluding copy-righted parts. We provide information on how the corpus can be found, the file formats available for download as well as accessibility via a web-based corpus query platform. We show a number of analytic tools that we have implemented for better usability and provide an example of use of the corpus for linguistic analysis as well as examples of subsequent, external uses of earlier releases. We place the RSC against the background of existing English diachronic/scientific corpora, elaborating on its value for linguistic and humanistic study.", "keyphrases": ["royal society corpus", "scientific writing", "humanistic study"]} +{"id": "vickrey-etal-2005-word", "title": "Word-Sense Disambiguation for Machine Translation", "abstract": "In word sense disambiguation, a system attempts to determine the sense of a word from contextual features. Major barriers to building a high-performing word sense disambiguation system include the difficulty of labeling data for this task and of predicting fine-grained sense distinctions. These issues stem partly from the fact that the task is being treated in isolation from possible uses of automatically disambiguated data. In this paper, we consider the related task of word translation, where we wish to determine the correct translation of a word from context. We can use parallel language corpora as a large supply of partially labeled data for this task. We present algorithms for solving the word translation problem and demonstrate a significant improvement over a baseline system. We then show that the word-translation system can be used to improve performance on a simplified machine-translation task and can effectively and accurately prune the set of candidate translations for a word.", "keyphrases": ["machine translation", "word-sense disambiguation", "wsd", "smt system", "blank filling task"]} +{"id": "judge-etal-2006-questionbank", "title": "QuestionBank: Creating a Corpus of Parse-Annotated Questions", "abstract": "This paper describes the development of QuestionBank, a corpus of 4000 parse-annotated questions for (i) use in training parsers employed in QA, and (ii) evaluation of question parsing. We present a series of experiments to investigate the effectiveness of QuestionBank as both an exclusive and supplementary training resource for a state-of-the-art parser in parsing both question and non-question test sets. We introduce a new method for recovering empty nodes and their antecedents (capturing long distance dependencies) from parser output in CFG trees using LFG f-structure reentrancies. Our main findings are (i) using QuestionBank training data improves parser performance to 89.75% labelled bracketing f-score, an increase of almost 11% over the baseline; (ii) back-testing experiments on non-question data (Penn-II WSJ Section 23) shows that the retrained parser does not suffer a performance drop on non-question material; (iii) ablation experiments show that the size of training material provided by QuestionBank is sufficient to achieve optimal results; (iv) our method for recovering empty nodes captures long distance dependencies in questions from the ATIS corpus with high precision (96.82%) and low recall (39.38%). In summary, QuestionBank provides a useful new resource in parser-based QA research.", "keyphrases": ["test set", "questionbank", "sentence construction"]} +{"id": "pichotta-mooney-2014-statistical", "title": "Statistical Script Learning with Multi-Argument Events", "abstract": "Scripts represent knowledge of stereotypical event sequences that can aid text understanding. Initial statistical methods have been developed to learn probabilistic scripts from raw text corpora; however, they utilize a very impoverished representation of events, consisting of a verb and one dependent argument. We present a script learning approach that employs events with multiple arguments. Unlike previous work, we model the interactions between multiple entities in a script. Experiments on a large corpus using the task of inferring held-out events (the \u201cnarrative cloze evaluation\u201d) demonstrate that modeling multi-argument events improves predictive accuracy.", "keyphrases": ["script learning", "multi-argument event", "event sequence", "narrative chain", "co-occurrence"]} +{"id": "abney-bird-2010-human", "title": "The Human Language Project: Building a Universal Corpus of the World's Languages", "abstract": "We present a grand challenge to build a corpus that will include all of the world's languages, in a consistent structure that permits large-scale cross-linguistic processing, enabling the study of universal linguistics. The focal data types, bilingual texts and lexicons, relate each language to one of a set of reference languages. We propose that the ability to train systems to translate into and out of a given language be the yardstick for determining when we have successfully captured a language. We call on the computational linguistics community to begin work on this Universal Corpus, pursuing the many strands of activity described here, as their contribution to the global effort to document the world's linguistic heritage before more languages fall silent.", "keyphrases": ["universal corpus", "processing", "low-resource language"]} +{"id": "wang-etal-2021-link", "title": "Link Prediction on N-ary Relational Facts: A Graph-based Approach", "abstract": "Link prediction on knowledge graphs (KGs) is a key research topic. Previous work mainly focused on binary relations, paying less attention to higher-arity relations although they are ubiquitous in real-world KGs. This paper considers link prediction upon n-ary relational facts and proposes a graph-based approach to this task. The key to our approach is to represent the n-ary structure of a fact as a small heterogeneous graph, and model this graph with edge-biased fully-connected attention. The fully-connected attention captures universal inter-vertex interactions, while with edge-aware attentive biases to particularly encode the graph structure and its heterogeneity. In this fashion, our approach fully models global and local dependencies in each n-ary fact, and hence can more effectively capture associations therein. Extensive evaluation verifies the effectiveness and superiority of our approach. It performs substantially and consistently better than current state-of-the-art across a variety of n-ary relational benchmarks. Our code is publicly available.", "keyphrases": ["n-ary relational fact", "graph-based approach", "link prediction"]} +{"id": "ostling-2016-bayesian", "title": "A Bayesian model for joint word alignment and part-of-speech transfer", "abstract": "Current methods for word alignment require considerable amounts of parallel text to deliver accurate results, a requirement which is met only for a small minority of the world's approximately 7,000 languages. We show that by jointly performing word alignment and annotation transfer in a novel Bayesian model, alignment accuracy can be improved for language pairs where annotations are available for only one of the languages\u2014a finding which could facilitate the study and processing of a vast number of low-resource languages. We also present an evaluation where our method is used to perform single-source and multi-source part-of-speech transfer with 22 translations of the same text in four different languages. This allows us to quantify the considerable variation in accuracy depending on the specific source text(s) used, even with different translations into the same language.", "keyphrases": ["bayesian model", "word alignment", "part-of-speech transfer"]} +{"id": "calixto-etal-2017-doubly", "title": "Doubly-Attentive Decoder for Multi-modal Neural Machine Translation", "abstract": "We introduce a Multi-modal Neural Machine Translation model in which a doubly-attentive decoder naturally incorporates spatial visual features obtained using pre-trained convolutional neural networks, bridging the gap between image description and translation. Our decoder learns to attend to source-language words and parts of an image independently by means of two separate attention mechanisms as it generates words in the target language. We find that our model can efficiently exploit not just back-translated in-domain multi-modal data but also large general-domain text-only MT corpora. We also report state-of-the-art results on the Multi30k data set.", "keyphrases": ["machine translation", "visual feature", "doubly-attentive decoder"]} +{"id": "hu-etal-2015-lcsts", "title": "LCSTS: A Large Scale Chinese Short Text Summarization Dataset", "abstract": "Automatic text summarization is widely regarded as the highly difficult problem, partially because of the lack of large text summarization data set. Due to the great challenge of constructing the large scale summaries for full text, in this paper, we introduce a large corpus of Chinese short text summarization dataset constructed from the Chinese microblogging website Sina Weibo, which is released to the public {this http URL}. This corpus consists of over 2 million real Chinese short texts with short summaries given by the author of each text. We also manually tagged the relevance of 10,666 short summaries with their corresponding short texts. Based on the corpus, we introduce recurrent neural network for the summary generation and achieve promising results, which not only shows the usefulness of the proposed corpus for short text summarization research, but also provides a baseline for further research on this topic.", "keyphrases": ["text summarization", "large corpus", "lcsts"]} +{"id": "blevins-etal-2018-deep", "title": "Deep RNNs Encode Soft Hierarchical Syntax", "abstract": "We present a set of experiments to demonstrate that deep recurrent neural networks (RNNs) learn internal representations that capture soft hierarchical notions of syntax from highly varied supervision. We consider four syntax tasks at different depths of the parse tree; for each word, we predict its part of speech as well as the first (parent), second (grandparent) and third level (great-grandparent) constituent labels that appear above it. These predictions are made from representations produced at different depths in networks that are pretrained with one of four objectives: dependency parsing, semantic role labeling, machine translation, or language modeling. In every case, we find a correspondence between network depth and syntactic depth, suggesting that a soft syntactic hierarchy emerges. This effect is robust across all conditions, indicating that the models encode significant amounts of syntax even in the absence of an explicit syntactic training supervision.", "keyphrases": ["rnns", "syntax", "internal representation", "deep layer"]} +{"id": "shindo-etal-2012-bayesian", "title": "Bayesian Symbol-Refined Tree Substitution Grammars for Syntactic Parsing", "abstract": "We propose Symbol-Refined Tree Substitution Grammars (SR-TSGs) for syntactic parsing. An SR-TSG is an extension of the conventional TSG model where each nonterminal symbol can be refined (subcategorized) to fit the training data. We aim to provide a unified model where TSG rules and symbol refinement are learned from training data in a fully automatic and consistent fashion. We present a novel probabilistic SR-TSG model based on the hierarchical Pitman-Yor Process to encode backoff smoothing from a fine-grained SR-TSG to simpler CFG rules, and develop an efficient training method based on Markov Chain Monte Carlo (MCMC) sampling. Our SR-TSG parser achieves an F1 score of 92.4% in the Wall Street Journal (WSJ) English Penn Treebank parsing task, which is a 7.7 point improvement over a conventional Bayesian TSG parser, and better than state-of-the-art discriminative reranking parsers.", "keyphrases": ["syntactic parsing", "refined latent variable", "pcfg"]} +{"id": "raganato-tiedemann-2018-analysis", "title": "An Analysis of Encoder Representations in Transformer-Based Machine Translation", "abstract": "The attention mechanism is a successful technique in modern NLP, especially in tasks like machine translation. The recently proposed network architecture of the Transformer is based entirely on attention mechanisms and achieves new state of the art results in neural machine translation, outperforming other sequence-to-sequence models. However, so far not much is known about the internal properties of the model and the representations it learns to achieve that performance. To study this question, we investigate the information that is learned by the attention mechanism in Transformer models with different translation quality. We assess the representations of the encoder by extracting dependency relations based on self-attention weights, we perform four probing tasks to study the amount of syntactic and semantic captured information and we also test attention in a transfer learning scenario. Our analysis sheds light on the relative strengths and weaknesses of the various encoder representations. We observe that specific attention heads mark syntactic dependency relations and we can also confirm that lower layers tend to learn more about syntax while higher layers tend to encode more semantics.", "keyphrases": ["machine translation", "transformer", "syntactic dependency relation"]} +{"id": "razmara-etal-2013-graph", "title": "Graph Propagation for Paraphrasing Out-of-Vocabulary Words in Statistical Machine Translation", "abstract": "Out-of-vocabulary (oov) words or phrases still remain a challenge in statistical machine translation especially when a limited amount of parallel text is available for training or when there is a domain shift from training data to test data. In this paper, we propose a novel approach to finding translations for oov words. We induce a lexicon by constructing a graph on source language monolingual text and employ a graph propagation technique in order to find translations for all the source language phrases. Our method differs from previous approaches by adopting a graph propagation approach that takes into account not only one-step (from oov directly to a source language phrase that has a translation) but multi-step paraphrases from oov source language words to other source language phrases and eventually to target language translations. Experimental results show that our graph propagation method significantly improves performance over two strong baselines under intrinsic and extrinsic evaluation metrics.", "keyphrases": ["paraphrasing", "machine translation", "graph propagation", "oov problem"]} +{"id": "schlangen-etal-2016-resolving", "title": "Resolving References to Objects in Photographs using the Words-As-Classifiers Model", "abstract": "A common use of language is to refer to visually present objects. Modelling it in computers requires modelling the link between language and perception. The \"words as classifiers\" model of grounded semantics views words as classifiers of perceptual contexts, and composes the meaning of a phrase through composition of the denotations of its component words. It was recently shown to perform well in a game-playing scenario with a small number of object types. We apply it to two large sets of real-world photographs that contain a much larger variety of types and for which referring expressions are available. Using a pre-trained convolutional neural network to extract image features, and augmenting these with in-picture positional information, we show that the model achieves performance competitive with the state of the art in a reference resolution task (given expression, find bounding box of its referent), while, as we argue, being conceptually simpler and more flexible.", "keyphrases": ["object", "word use", "train image classifier"]} +{"id": "misra-etal-2015-using", "title": "Using Summarization to Discover Argument Facets in Online Idealogical Dialog", "abstract": "More and more of the information available on the web is dialogic, and a significant portion of it takes place in online forum conversations about current social and political topics. We aim to develop tools to summarize what these conversations are about. What are the CENTRAL PROPOSITIONS associated with different stances on an issue, what are the abstract objects under discussion that are central to a speaker's argument? How can we recognize that two CENTRAL PROPOSITIONS realize the same FACET of the argument? We hypothesize that the CENTRAL PROPOSITIONS are exactly those arguments that people find most salient, and use human summarization as a probe for discovering them. We describe our corpus of human summaries of opinionated dialogs, then show how we can identify similar repeated arguments, and group them into FACETS across many discussions of a topic. We define a new task, ARGUMENT FACET SIMILARITY (AFS), and show that we can predict AFS with a .54 correlation score, versus an ngram system baseline of .39 and a semantic textual similarity system baseline of .45.", "keyphrases": ["summarization", "argument facet", "dialog"]} +{"id": "mitchell-etal-2013-open", "title": "Open Domain Targeted Sentiment", "abstract": "We propose a novel approach to sentiment analysis for a low resource setting. The intuition behind this work is that sentiment expressed towards an entity, targeted sentiment, may be viewed as a span of sentiment expressed across the entity. This representation allows us to model sentiment detection as a sequence tagging problem, jointly discovering people and organizations along with whether there is sentiment directed towards them. We compare performance in both Spanish and English on microblog data, using only a sentiment lexicon as an external resource. By leveraging linguisticallyinformed features within conditional random fields (CRFs) trained to minimize empirical risk, our best models in Spanish significantly outperform a strong baseline, and reach around 90% accuracy on the combined task of named entity recognition and sentiment prediction. Our models in English, trained on a much smaller dataset, are not yet statistically significant against their baselines.", "keyphrases": ["sentiment analysis", "conditional random field", "polarity", "hand-crafted linguistic feature", "open domain"]} +{"id": "gao-callan-2021-condenser", "title": "Condenser: a Pre-training Architecture for Dense Retrieval", "abstract": "Pre-trained Transformer language models (LM) have become go-to text representation encoders. Prior research fine-tunes deep LMs to encode text sequences such as sentences and passages into single dense vector representations for efficient text comparison and retrieval. However, dense encoders require a lot of data and sophisticated techniques to effectively train and suffer in low data situations. This paper finds a key reason is that standard LMs' internal attention structure is not ready-to-use for dense encoders, which needs to aggregate text information into the dense representation. We propose to pre-train towards dense encoder with a novel Transformer architecture, Condenser, where LM prediction CONditions on DENSE Representation. Our experiments show Condenser improves over standard LM by large margins on various text retrieval and similarity tasks.", "keyphrases": ["pre-training architecture", "retrieval", "condenser"]} +{"id": "kambhatla-2004-combining", "title": "Combining Lexical, Syntactic, and Semantic Features with Maximum Entropy Models for Information Extraction", "abstract": "Extracting semantic relationships between entities is challenging because of a paucity of annotated data and the errors induced by entity detection modules. We employ Maximum Entropy models to combine diverse lexical, syntactic and semantic features derived from the text. Our system obtained competitive results in the Automatic Content Extraction (ACE) evaluation. Here we present our general approach and describe our ACE results", "keyphrases": ["semantic feature", "maximum entropy model", "ace", "hand-crafted feature", "traditional supervised approach"]} +{"id": "menezes-quirk-2008-syntactic", "title": "Syntactic Models for Structural Word Insertion and Deletion during Translation", "abstract": "An important problem in translation neglected by most recent statistical machine translation systems is insertion and deletion of words, such as function words, motivated by linguistic structure rather than adjacent lexical context. Phrasal and hierarchical systems can only insert or delete words in the context of a larger phrase or rule. While this may suffice when translating in-domain, it performs poorly when trying to translate broad domains such as web text. Various syntactic approaches have been proposed that begin to address this problem by learning lexicalized and unlexicalized rules. Among these, the treelet approach uses unlexicalized order templates to model ordering separately from lexical choice. We introduce an extension to the latter that allows for structural word insertion and deletion, without requiring a lexical anchor, and show that it produces gains of more than 1.0% BLEU over both phrasal and baseline treelet systems on broad domain text.", "keyphrases": ["deletion", "lexical anchor", "treelet system"]} +{"id": "isabelle-etal-2017-challenge", "title": "A Challenge Set Approach to Evaluating Machine Translation", "abstract": "Neural machine translation represents an exciting leap forward in translation quality. But what longstanding weaknesses does it resolve, and which remain? We address these questions with a challenge set approach to translation evaluation and error analysis. A challenge set consists of a small set of sentences, each hand-designed to probe a system's capacity to bridge a particular structural divergence between languages. To exemplify this approach, we present an English-French challenge set, and use it to analyze phrase-based and neural systems. The resulting analysis provides not only a more fine-grained picture of the strengths of neural systems, but also insight into which linguistic phenomena remain out of reach.", "keyphrases": ["challenge set", "machine translation", "strength", "nmt system", "agreement"]} +{"id": "shwartz-etal-2020-unsupervised", "title": "Unsupervised Commonsense Question Answering with Self-Talk", "abstract": "Natural language understanding involves reading between the lines with implicit background knowledge. Current systems either rely on pre-trained language models as the sole implicit source of world knowledge, or resort to external knowledge bases (KBs) to incorporate additional relevant knowledge. We propose an unsupervised framework based on self-talk as a novel alternative to multiple-choice commonsense tasks. Inspired by inquiry-based discovery learning (Bruner, 1961), our approach inquires language models with a number of information seeking questions such as \u201cwhat is the definition of...\u201d to discover additional background knowledge. Empirical results demonstrate that the self-talk procedure substantially improves the performance of zero-shot language model baselines on four out of six commonsense benchmarks, and competes with models that obtain knowledge from external KBs. While our approach improves performance on several benchmarks, the self-talk induced knowledge even when leading to correct answers is not always seen as helpful by human judges, raising interesting questions about the inner-workings of pre-trained language models for commonsense reasoning.", "keyphrases": ["self-talk", "commonsense task", "zero-shot setting", "prompt", "knowledge generation"]} +{"id": "xu-etal-2007-domain", "title": "Domain dependent statistical machine translation", "abstract": "While statistical machine translation (SMT) has advanced significantly with better modeling techniques and much more training data, domain specific SMT has received much less attention and leaves much room for further improvements. In this work, we address domain issues and propose to use the combination of feature weights and language model adaptation, to distinguish multiple domains, which share a general translation engine with phrase-based log-linear models. The proposed method requires much less parallel data than what is typically used to build a domain independent system, which makes it easy, cheap and efficient to capture as many domains as required. Domain adaptation during decoding is approached with source text classification methods. Our results on the GALE tasks show significant improvements with the proposed domain dependent translation than domain independent translation.", "keyphrases": ["statistical machine translation", "feature weight", "smt model"]} +{"id": "ma-collins-2018-noise", "title": "Noise Contrastive Estimation and Negative Sampling for Conditional Models: Consistency and Statistical Efficiency", "abstract": "Noise Contrastive Estimation (NCE) is a powerful parameter estimation method for log-linear models, which avoids calculation of the partition function or its derivatives at each training step, a computationally demanding step in many cases. It is closely related to negative sampling methods, now widely used in NLP. This paper considers NCE-based estimation of conditional models. Conditional models are frequently encountered in practice; however there has not been a rigorous theoretical analysis of NCE in this setting, and we will argue there are subtle but important questions when generalizing NCE to the conditional case. In particular, we analyze two variants of NCE for conditional models: one based on a classification objective, the other based on a ranking objective. We show that the ranking-based variant of NCE gives consistent parameter estimates under weaker assumptions than the classification-based method; we analyze the statistical efficiency of the ranking-based and classification-based variants of NCE; finally we describe experiments on synthetic data and language modeling showing the effectiveness and tradeoffs of both methods.", "keyphrases": ["negative sampling", "statistical efficiency", "noise contrastive estimation"]} +{"id": "meng-etal-2017-deep", "title": "Deep Keyphrase Generation", "abstract": "Keyphrase provides highly-summative information that can be effectively used for understanding, organizing and retrieving text content. Though previous studies have provided many workable solutions for automated keyphrase extraction, they commonly divided the to-be-summarized content into multiple text chunks, then ranked and selected the most meaningful ones. These approaches could neither identify keyphrases that do not appear in the text, nor capture the real semantic meaning behind the text. We propose a generative model for keyphrase prediction with an encoder-decoder framework, which can effectively overcome the above drawbacks. We name it as deep keyphrase generation since it attempts to capture the deep semantic meaning of the content with a deep learning method. Empirical analysis on six datasets demonstrates that our proposed model not only achieves a significant performance boost on extracting keyphrases that appear in the source text, but also can generate absent keyphrases based on the semantic meaning of the text. Code and dataset are available at .", "keyphrases": ["generative model", "encoder-decoder framework", "source text", "deep keyphrase generation", "sequence-to-sequence"]} +{"id": "yuan-felice-2013-constrained", "title": "Constrained Grammatical Error Correction using Statistical Machine Translation", "abstract": "This paper describes our use of phrasebased statistical machine translation (PBSMT) for the automatic correction of errors in learner text in our submission to the CoNLL 2013 Shared Task on Grammatical Error Correction. Since the limited training data provided for the task was insufficient for training an effective SMT system, we also explored alternative ways of generating pairs of incorrect and correct sentences automatically from other existing learner corpora. Our approach does not yield particularly high performance but reveals many problems that require careful attention when building SMT systems for error correction.", "keyphrases": ["statistical machine translation", "learner corpora", "error type"]} +{"id": "liu-etal-2019-xqa", "title": "XQA: A Cross-lingual Open-domain Question Answering Dataset", "abstract": "Open-domain question answering (OpenQA) aims to answer questions through text retrieval and reading comprehension. Recently, lots of neural network-based models have been proposed and achieved promising results in OpenQA. However, the success of these models relies on a massive volume of training data (usually in English), which is not available in many other languages, especially for those low-resource languages. Therefore, it is essential to investigate cross-lingual OpenQA. In this paper, we construct a novel dataset XQA for cross-lingual OpenQA research. It consists of a training set in English as well as development and test sets in eight other languages. Besides, we provide several baseline systems for cross-lingual OpenQA, including two machine translation-based methods and one zero-shot cross-lingual method (multilingual BERT). Experimental results show that the multilingual BERT model achieves the best results in almost all target languages, while the performance of cross-lingual OpenQA is still much lower than that of English. Our analysis indicates that the performance of cross-lingual OpenQA is related to not only how similar the target language and English are, but also how difficult the question set of the target language is. The XQA dataset is publicly available at .", "keyphrases": ["open-domain question", "other language", "cross-lingual openqa research", "xqa", "wikipedia"]} +{"id": "marton-resnik-2008-soft", "title": "Soft Syntactic Constraints for Hierarchical Phrased-Based Translation", "abstract": "In adding syntax to statistical MT, there is a tradeoff between taking advantage of linguistic analysis, versus allowing the model to exploit linguistically unmotivated mappings learned from parallel training data. A number of previous efforts have tackled this tradeoff by starting with a commitment to linguistically motivated analyses and then nding appropriate ways to soften that commitment. We present an approach that explores the tradeoff from the other direction, starting with a context-free translation model learned directly from aligned parallel text, and then adding soft constituent-level constraints based on parses of the source language. We obtain substantial improvements in performance for translation from Chinese and Arabic to English.", "keyphrases": ["syntactic constraint", "parallel text", "soft constraint"]} +{"id": "rosenberg-binkowski-2004-augmenting", "title": "Augmenting the kappa statistic to determine interannotator reliability for multiply labeled data points", "abstract": "This paper describes a method for evaluating interannotator reliability in an email corpus annotated for type (e.g., question, answer, social chat) when annotators are allowed to assign multiple labels to a message. An augmentation is proposed to Cohen's kappa statistic which permits all data to be included in the reliability measure and which further permits the identification of more or less reliably annotated data points.", "keyphrases": ["interannotator reliability", "multiply", "data point"]} +{"id": "shen-etal-2017-deep", "title": "Deep Active Learning for Named Entity Recognition", "abstract": "Deep neural networks have advanced the state of the art in named entity recognition. However, under typical training procedures, advantages over classical methods emerge only with large datasets. As a result, deep learning is employed only when large public datasets or a large budget for manually labeling data is available. In this work, we show otherwise: by combining deep learning with active learning, we can outperform classical methods even with a significantly smaller amount of training data.", "keyphrases": ["active learning", "entity recognition", "deep learning"]} +{"id": "zhao-grishman-2005-extracting", "title": "Extracting Relations with Integrated Information Using Kernel Methods", "abstract": "Entity relation detection is a form of information extraction that finds predefined relations between pairs of entities in text. This paper describes a relation detection approach that combines clues from different levels of syntactic processing using kernel methods. Information from three different levels of processing is considered: tokenization, sentence parsing and deep dependency analysis. Each source of information is represented by kernel functions. Then composite kernels are developed to integrate and extend individual kernels so that processing errors occurring at one level can be overcome by information from other levels. We present an evaluation of these methods on the 2004 ACE relation detection task, using Support Vector Machines, and show that each level of syntactic processing contributes useful information for this task. When evaluated on the official test data, our approach produced very competitive ACE value scores. We also compare the SVM with KNN on different kernels.", "keyphrases": ["kernel", "different level", "svm", "relation extraction", "feature-based method"]} +{"id": "schwenk-2008-investigations", "title": "Investigations on large-scale lightly-supervised training for statistical machine translation.", "abstract": "Sentence-aligned bilingual texts are a crucial resource to build statistical machine translation (SMT) systems. In this paper we propose to apply lightly-supervised training to produce additional parallel data. The idea is to translate large amounts of monolingual data (up to 275M words) with an SMT system, and to use those as additional training data. Results are reported for the translation from French into English. We consider two setups: first the intial SMT system is only trained with a very limited amount of human-produced translations, and then the case where we have more than 100 million words. In both conditions, lightly-supervised training achieves significant improvements of the BLEU score.", "keyphrases": ["lightly-supervised training", "statistical machine translation", "phrase-based smt", "similar work", "monolingual source data"]} +{"id": "denkowski-etal-2014-learning", "title": "Learning from Post-Editing: Online Model Adaptation for Statistical Machine Translation", "abstract": "Using machine translation output as a starting point for human translation has become an increasingly common application of MT. We propose and evaluate three computationally efficient online methods for updating statistical MT systems in a scenario where post-edited MT output is constantly being returned to the system: (1) adding new rules to the translation model from the post-edited content, (2) updating a Bayesian language model of the target language that is used by the MT system, and (3) updating the MT system\u2019s discriminative parameters with a MIRA step. Individually, these techniques can substantially improve MT quality, even over strong baselines. Moreover, we see super-additive improvements when all three techniques are used in tandem.", "keyphrases": ["online model adaptation", "statistical machine translation", "online learning"]} +{"id": "li-etal-2017-deep", "title": "Deep Recurrent Generative Decoder for Abstractive Text Summarization", "abstract": "We propose a new framework for abstractive text summarization based on a sequence-to-sequence oriented encoder-decoder model equipped with a deep recurrent generative decoder (DRGN). Latent structure information implied in the target summaries is learned based on a recurrent latent random model for improving the summarization quality. Neural variational inference is employed to address the intractable posterior inference for the recurrent latent variables. Abstractive summaries are generated based on both the generative latent variables and the discriminative deterministic states. Extensive experiments on some benchmark datasets in different languages show that DRGN achieves improvements over the state-of-the-art methods.", "keyphrases": ["abstractive text summarization", "encoder-decoder model", "latent structure information", "deep recurrent"]} +{"id": "zhou-etal-2008-diagnostic", "title": "Diagnostic Evaluation of Machine Translation Systems Using Automatically Constructed Linguistic Check-Points", "abstract": "We present a diagnostic evaluation platform which provides multi-factored evaluation based on automatically constructed check-points. A check-point is a linguistically motivated unit (e.g. an ambiguous word, a noun phrase, a verb~obj collocation, a prepositional phrase etc.), which are pre-defined in a linguistic taxonomy. We present a method that automatically extracts check-points from parallel sentences. By means of checkpoints, our method can monitor a MT system in translating important linguistic phenomena to provide diagnostic evaluation. The effectiveness of our approach for diagnostic evaluation is verified through experiments on various types of MT systems.", "keyphrases": ["check-point", "diagnostic evaluation", "linguistic checkpoint"]} +{"id": "madaan-etal-2020-politeness", "title": "Politeness Transfer: A Tag and Generate Approach", "abstract": "This paper introduces a new task of politeness transfer which involves converting non-polite sentences to polite sentences while preserving the meaning. We also provide a dataset of more than 1.39 instances automatically labeled for politeness to encourage benchmark evaluations on this new task. We design a tag and generate pipeline that identifies stylistic attributes and subsequently generates a sentence in the target style while preserving most of the source content. For politeness as well as five other transfer tasks, our model outperforms the state-of-the-art methods on automatic metrics for content preservation, with a comparable or better performance on style transfer accuracy. Additionally, our model surpasses existing methods on human evaluations for grammaticality, meaning preservation and transfer accuracy across all the six style transfer tasks. The data and code is located at .", "keyphrases": ["style transfer task", "politeness transfer", "source text"]} +{"id": "huang-etal-2019-achieving", "title": "Achieving Verified Robustness to Symbol Substitutions via Interval Bound Propagation", "abstract": "Neural networks are part of many contemporary NLP systems, yet their empirical successes come at the price of vulnerability to adversarial attacks. Previous work has used adversarial training and data augmentation to partially mitigate such brittleness, but these are unlikely to find worst-case adversaries due to the complexity of the search space arising from discrete text perturbations. In this work, we approach the problem from the opposite direction: to formally verify a system's robustness against a predefined class of adversarial attacks. We study text classification under synonym replacements or character flip perturbations. We propose modeling these input perturbations as a simplex and then using Interval Bound Propagation \u2013 a formal model verification method. We modify the conventional log-likelihood training objective to train models that can be efficiently verified, which would otherwise come with exponential search complexity. The resulting models show only little difference in terms of nominal accuracy, but have much improved verified accuracy under perturbations and come with an efficiently computable formal guarantee on worst case adversaries.", "keyphrases": ["robustness", "interval bound propagation", "ibp", "edit distance", "loss"]} +{"id": "mccarthy-etal-2004-finding", "title": "Finding Predominant Word Senses in Untagged Text", "abstract": "In word sense disambiguation (WSD), the heuristic of choosing the most common sense is extremely powerful because the distribution of the senses of a word is often skewed. The problem with using the predominant, or first sense heuristic, aside from the fact that it does not take surrounding context into account, is that it assumes some quantity of hand-tagged data. Whilst there are a few hand-tagged corpora available for some languages, one would expect the frequency distribution of the senses of words, particularly topical words, to depend on the genre and domain of the text under consideration. We present work on the use of a thesaurus acquired from raw textual corpora and the WordNet similarity package to find predominant noun senses automatically. The acquired predominant senses give a precision of 64% on the nouns of the SENSEVAL-2 English all-words task. This is a very promising result given that our method does not require any hand-tagged text, such as SemCor. Furthermore, we demonstrate that our method discovers appropriate predominant senses for words from two domain-specific corpora.", "keyphrases": ["disambiguation", "predominant sense", "ranking"]} +{"id": "edunov-etal-2019-pre", "title": "Pre-trained language model representations for language generation", "abstract": "Pre-trained language model representations have been successful in a wide range of language understanding tasks. In this paper, we examine different strategies to integrate pre-trained representations into sequence to sequence models and apply it to neural machine translation and abstractive summarization. We find that pre-trained representations are most effective when added to the encoder network which slows inference by only 14%. Our experiments in machine translation show gains of up to 5.3 BLEU in a simulated resource-poor setup. While returns diminish with more labeled data, we still observe improvements when millions of sentence-pairs are available. Finally, on abstractive summarization we achieve a new state of the art on the full text version of CNN/DailyMail.", "keyphrases": ["language model representation", "neural machine translation", "elmo"]} +{"id": "bhat-etal-2018-universal", "title": "Universal Dependency Parsing for Hindi-English Code-Switching", "abstract": "Code-switching is a phenomenon of mixing grammatical structures of two or more languages under varied social constraints. The code-switching data differ so radically from the benchmark corpora used in NLP community that the application of standard technologies to these data degrades their performance sharply. Unlike standard corpora, these data often need to go through additional processes such as language identification, normalization and/or back-transliteration for their efficient processing. In this paper, we investigate these indispensable processes and other problems associated with syntactic parsing of code-switching data and propose methods to mitigate their effects. In particular, we study dependency parsing of code-switching data of Hindi and English multilingual speakers from Twitter. We present a treebank of Hindi-English code-switching tweets under Universal Dependencies scheme and propose a neural stacking model for parsing that efficiently leverages the part-of-speech tag and syntactic tree annotations in the code-switching treebank and the preexisting Hindi and English treebanks. We also present normalization and back-transliteration models with a decoding process tailored for code-switching data. Results show that our neural stacking parser is 1.5% LAS points better than the augmented parsing model and 3.8% LAS points better than the one which uses first-best normalization and/or back-transliteration.", "keyphrases": ["dependency parsing", "code-switching", "language identification"]} +{"id": "jeong-etal-2009-efficient", "title": "Efficient Inference of CRFs for Large-Scale Natural Language Data", "abstract": "This paper presents an efficient inference algorithm of conditional random fields (CRFs) for large-scale data. Our key idea is to decompose the output label state into an active set and an inactive set in which most unsupported transitions become a constant. Our method unifies two previous methods for efficient inference of CRFs, and also derives a simple but robust special case that performs faster than exact inference when the active sets are sufficiently small. We demonstrate that our method achieves dramatic speedup on six standard natural language processing problems.", "keyphrases": ["crfs", "efficient inference", "similar idea"]} +{"id": "ren-etal-2020-simulspeech", "title": "SimulSpeech: End-to-End Simultaneous Speech to Text Translation", "abstract": "In this work, we develop SimulSpeech, an end-to-end simultaneous speech to text translation system which translates speech in source language to text in target language concurrently. SimulSpeech consists of a speech encoder, a speech segmenter and a text decoder, where 1) the segmenter builds upon the encoder and leverages a connectionist temporal classification (CTC) loss to split the input streaming speech in real time, 2) the encoder-decoder attention adopts a wait-k strategy for simultaneous translation. SimulSpeech is more challenging than previous cascaded systems (with simultaneous automatic speech recognition (ASR) and simultaneous neural machine translation (NMT)). We introduce two novel knowledge distillation methods to ensure the performance: 1) Attention-level knowledge distillation transfers the knowledge from the multiplication of the attention matrices of simultaneous NMT and ASR models to help the training of the attention mechanism in SimulSpeech; 2) Data-level knowledge distillation transfers the knowledge from the full-sentence NMT model and also reduces the complexity of data distribution to help on the optimization of SimulSpeech. Experiments on MuST-C English-Spanish and English-German spoken language translation datasets show that SimulSpeech achieves reasonable BLEU scores and lower delay compared to full-sentence end-to-end speech to text translation (without simultaneous translation), and better performance than the two-stage cascaded simultaneous translation model in terms of BLEU scores and translation delay.", "keyphrases": ["end-to-end", "connectionist temporal classification", "simultaneous translation", "simulspeech", "low latency"]} +{"id": "oh-etal-2010-co", "title": "Co-STAR: A Co-training Style Algorithm for Hyponymy Relation Acquisition from Structured and Unstructured Text", "abstract": "This paper proposes a co-training style algorithm called Co-STAR that acquires hyponymy relations simultaneously from structured and unstructured text. In Co-STAR, two independent processes for hyponymy relation acquisition -- one handling structured text and the other handling unstructured text -- collaborate by repeatedly exchanging the knowledge they acquired about hyponymy relations. Unlike conventional co-training, the two processes in Co-STAR are applied to different source texts and training data. We show the effectiveness of this algorithm through experiments on large-scale hyponymy-relation acquisition from Japanese Wikipedia and Web texts. We also show that Co-STAR is robust against noisy training data.", "keyphrases": ["co-training style algorithm", "hyponymy relation acquisition", "unstructured text"]} +{"id": "habernal-etal-2018-adapting", "title": "Adapting Serious Game for Fallacious Argumentation to German: Pitfalls, Insights, and Best Practices", "abstract": "As argumentation about controversies is culture- and language-dependent, porting a serious game that deals with daily argumentation to another language requires substantial adaptation. This article presents a study of deploying Argotario (serious game for learning argumentation fallacies) into the German context. We examine all steps that are necessary to end up with a successful serious game platform, such as topic selection, initial data creation, or effective campaigns. Moreover, we analyze users\u2019 behavior and in-game created data in order to assess the dissemination strategies and qualitative aspects of the resulting corpus. We also report on classification experiments based on neural networks and feature-based models.", "keyphrases": ["serious game", "fallacy", "propaganda technique"]} +{"id": "van-de-cruys-2009-non", "title": "A Non-negative Tensor Factorization Model for Selectional Preference Induction", "abstract": "Distributional similarity methods have proven to be a valuable tool for the induction of semantic similarity. Up till now, most algorithms use two-way cooccurrence data to compute the meaning of words. Co-occurrence frequencies, however, need not be pairwise. One can easily imagine situations where it is desirable to investigate co-occurrence frequencies of three modes and beyond. This paper will investigate a tensor factorization method called non-negative tensor factorization to build a model of three-way cooccurrences. The approach is applied to the problem of selectional preference induction, and automatically evaluated in a pseudo-disambiguation task. The results show that non-negative tensor factorization is a promising tool for NLP.", "keyphrases": ["tensor factorization", "selectional preference induction", "co-occurrence"]} +{"id": "stefanescu-etal-2012-hybrid", "title": "Hybrid Parallel Sentence Mining from Comparable Corpora", "abstract": "Mining for parallel sentences in comparable corpora is much more difficult than aligning sentences in parallel corpora. Sentence alignment in parallel corpora usually exploits simple empirical evidence (turned into assumptions) such as (i) the length of a sentence is proportional with the length of its translation and (ii) the discourse flow is necessarily the same in both parts of the bi-text (Gale and Church, 1993). Thus, the extraction tools search for parallel sentences around the same (relative) text positions, making sentence alignment a much easier task when compared to kind of work undertaken here. For comparable corpora, the second assumption does not hold. Parallel sentences, should they exist at all, are scattered all around the source and target documents, and so, any two sentences 1 have to be processed in order to determine if they are parallel or not. Also, we aim at finding pairs of quasi-parallel sentences that are not entirely parallel but contain spans of contiguous text that is parallel. Thus, finding parallel sentences in comparable corpora is confronted", "keyphrases": ["parallel sentence", "mining", "comparable corpora"]} +{"id": "prabhakaran-etal-2015-new", "title": "A New Dataset and Evaluation for Belief/Factuality", "abstract": "The terms \u201cbelief\u201d and \u201cfactuality\u201d both refer to the intention of the writer to present the propositional content of an utterance as firmly believed by the writer, not firmly believed, or having some other status. This paper presents an ongoing annotation effort and an associated evaluation.", "keyphrases": ["belief", "factuality", "annotation effort", "discussion"]} +{"id": "van-den-beukel-aroyo-2018-homonym", "title": "Homonym Detection For Humor Recognition In Short Text", "abstract": "In this paper, automatic homophone- and homograph detection are suggested as new useful features for humor recognition systems. The system combines style-features from previous studies on humor recognition in short text with ambiguity-based features. The performance of two potentially useful homograph detection methods is evaluated using crowdsourced annotations as ground truth. Adding homophones and homographs as features to the classifier results in a small but significant improvement over the style-features alone. For the task of humor recognition, recall appears to be a more important quality measure than precision. Although the system was designed for humor recognition in oneliners, it also performs well at the classification of longer humorous texts.", "keyphrases": ["humor recognition", "short text", "homonym detection"]} +{"id": "garg-ramakrishnan-2020-bae", "title": "BAE: BERT-based Adversarial Examples for Text Classification", "abstract": "Modern text classification models are susceptible to adversarial examples, perturbed versions of the original text indiscernible by humans which get misclassified by the model. Recent works in NLP use rule-based synonym replacement strategies to generate adversarial examples. These strategies can lead to out-of-context and unnaturally complex token replacements, which are easily identifiable by humans. We present BAE, a black box attack for generating adversarial examples using contextual perturbations from a BERT masked language model. BAE replaces and inserts tokens in the original text by masking a portion of the text and leveraging the BERT-MLM to generate alternatives for the masked tokens. Through automatic and human evaluations, we show that BAE performs a stronger attack, in addition to generating adversarial examples with improved grammaticality and semantic coherence as compared to prior work.", "keyphrases": ["attack", "bert", "language model"]} +{"id": "lauscher-etal-2020-specializing", "title": "Specializing Unsupervised Pretraining Models for Word-Level Semantic Similarity", "abstract": "Unsupervised pretraining models have been shown to facilitate a wide range of downstream NLP applications. These models, however, retain some of the limitations of traditional static word embeddings. In particular, they encode only the distributional knowledge available in raw text corpora, incorporated through language modeling objectives. In this work, we complement such distributional knowledge with external lexical knowledge, that is, we integrate the discrete knowledge on word-level semantic similarity into pretraining. To this end, we generalize the standard BERT model to a multi-task learning setting where we couple BERT's masked language modeling and next sentence prediction objectives with an auxiliary task of binary word relation classification. Our experiments suggest that our \u201cLexically Informed\u201d BERT (LIBERT), specialized for the word-level semantic similarity, yields better performance than the lexically blind \u201cvanilla\u201d BERT on several language understanding tasks. Concretely, LIBERT outperforms BERT in 9 out of 10 tasks of the GLUE benchmark and is on a par with BERT in the remaining one. Moreover, we show consistent gains on 3 benchmarks for lexical simplification, a task where knowledge about word-level semantic similarity is paramount, as well as large gains on lexical reasoning probes.", "keyphrases": ["word-level semantic similarity", "distributional knowledge", "bert"]} +{"id": "hulpus-etal-2019-spreading", "title": "A Spreading Activation Framework for Tracking Conceptual Complexity of Texts", "abstract": "We propose an unsupervised approach for assessing conceptual complexity of texts, based on spreading activation. Using DBpedia knowledge graph as a proxy to long-term memory, mentioned concepts become activated and trigger further activation as the text is sequentially traversed. Drawing inspiration from psycholinguistic theories of reading comprehension, we model memory processes such as semantic priming, sentence wrap-up, and forgetting. We show that our models capture various aspects of conceptual text complexity and significantly outperform current state of the art.", "keyphrases": ["activation framework", "conceptual complexity", "priming"]} +{"id": "skantze-hjalmarsson-2010-towards", "title": "Towards Incremental Speech Generation in Dialogue Systems", "abstract": "We present a first step towards a model of speech generation for incremental dialogue systems. The model allows a dialogue system to incrementally interpret spoken input, while simultaneously planning, realising and self-monitoring the system response. The model has been implemented in a general dialogue system framework. Using this framework, we have implemented a specific application and tested it in a Wizard-of-Oz setting, comparing it with a non-incremental version of the same system. The results show that the incremental version, while producing longer utterances, has a shorter response time and is perceived as more efficient by the users.", "keyphrases": ["incremental speech generation", "dialogue system", "pause"]} +{"id": "kiela-bottou-2014-learning", "title": "Learning Image Embeddings using Convolutional Neural Networks for Improved Multi-Modal Semantics", "abstract": "We construct multi-modal concept representations by concatenating a skip-gram linguistic representation vector with a visual concept representation vector computed using the feature extraction layers of a deep convolutional neural network (CNN) trained on a large labeled object recognition dataset. This transfer learning approach brings a clear performance gain over features based on the traditional bag-of-visual-word approach. Experimental results are reported on the WordSim353 and MEN semantic relatedness evaluation tasks. We use visual features computed using either ImageNet or ESP Game images.", "keyphrases": ["convolutional neural networks", "multi-modal semantic", "cnn", "visual feature"]} +{"id": "bentivogli-etal-2004-revising", "title": "Revising the Wordnet Domains Hierarchy: semantics, coverage and balancing", "abstract": "The continuous expansion of the multilingual information society has led in recent years to a pressing demand for multilingual linguistic resources suitable to be used for different applications. \n \nIn this paper we present the WordNet Domains Hierarchy (WDH), a language-independent resource composed of 164, hierarchically organized, domain labels (e.g. Architecture, Sport, Medicine). Although WDH has been successfully applied to various Natural Language Processing tasks, the first available version presented some problems, mostly related to the lack of a clear semantics of the domain labels. Other correlated issues were the coverage and the balancing of the domains. We illustrate a new version of WDH addressing these problems by an explicit and systematic reference to the Dewey Decimal Classification. The new version of WDH has a better defined semantics and is applicable to a wider range of tasks.", "keyphrases": ["wordnet domains hierarchy", "coverage", "balancing"]} +{"id": "liu-etal-2018-knowledge", "title": "Knowledge Diffusion for Neural Dialogue Generation", "abstract": "End-to-end neural dialogue generation has shown promising results recently, but it does not employ knowledge to guide the generation and hence tends to generate short, general, and meaningless responses. In this paper, we propose a neural knowledge diffusion (NKD) model to introduce knowledge into dialogue generation. This method can not only match the relevant facts for the input utterance but diffuse them to similar entities. With the help of facts matching and entity diffusion, the neural dialogue generation is augmented with the ability of convergent and divergent thinking over the knowledge base. Our empirical study on a real-world dataset prove that our model is capable of generating meaningful, diverse and natural responses for both factoid-questions and knowledge grounded chi-chats. The experiment results also show that our model outperforms competitive baseline models significantly.", "keyphrases": ["neural dialogue generation", "knowledge diffusion", "external knowledge graph"]} +{"id": "gao-etal-2014-modeling", "title": "Modeling Interestingness with Deep Neural Networks", "abstract": "This paper presents a deep semantic similarity model (DSSM), a special type of deep neural networks designed for text analysis, for recommending target documents to be of interest to a user based on a source document that she is reading. We observe, identify, and detect naturally occurring signals of interestingness in click transitions on the Web between source and target documents, which we collect from commercial Web browser logs. The DSSM is trained on millions of Web transitions, and maps source-target document pairs to feature vectors in a latent space in such a way that the distance between source documents and their corresponding interesting targets in that space is minimized. The effectiveness of the DSSM is demonstrated using two interestingness tasks: automatic highlighting and contextual entity search. The results on large-scale, real-world datasets show that the semantics of documents are important for modeling interestingness and that the DSSM leads to significant quality improvement on both tasks, outperforming not only the classic document models that do not use semantics but also state-of-the-art topic models.", "keyphrases": ["interestingness", "source document", "convolutional-pooling structure"]} +{"id": "fang-cohn-2016-learning", "title": "Learning when to trust distant supervision: An application to low-resource POS tagging using cross-lingual projection", "abstract": "Cross lingual projection of linguistic annotation suffers from many sources of bias and noise, leading to unreliable annotations that cannot be used directly. In this paper, we introduce a novel approach to sequence tagging that learns to correct the errors from cross-lingual projection using an explicit debiasing layer. This is framed as joint learning over two corpora, one tagged with gold standard and the other with projected tags. We evaluated with only 1,000 tokens tagged with gold standard tags, along with more plentiful parallel data. Our system equals or exceeds the state-of-the-art on eight simulated low-resource settings, as well as two real low-resource languages, Malagasy and Kinyarwanda.", "keyphrases": ["distant supervision", "low-resource pos tagging", "cross-lingual projection"]} +{"id": "conneau-etal-2017-supervised", "title": "Supervised Learning of Universal Sentence Representations from Natural Language Inference Data", "abstract": "Many modern NLP systems rely on word embeddings, previously trained in an unsupervised manner on large corpora, as base features. Efforts to obtain embeddings for larger chunks of text, such as sentences, have however not been so successful. Several attempts at learning unsupervised representations of sentences have not reached satisfactory enough performance to be widely adopted. In this paper, we show how universal sentence representations trained using the supervised data of the Stanford Natural Language Inference datasets can consistently outperform unsupervised methods like SkipThought vectors on a wide range of transfer tasks. Much like how computer vision uses ImageNet to obtain features, which can then be transferred to other tasks, our work tends to indicate the suitability of natural language inference for transfer learning to other NLP tasks. Our encoder is publicly available.", "keyphrases": ["universal sentence representations", "supervised learning", "infersent", "textual similarity", "entailment"]} +{"id": "ferreira-vlachos-2016-emergent", "title": "Emergent: a novel data-set for stance classification", "abstract": "We present Emergent, a novel data-set derived from a digital journalism project for rumour debunking. The data-set contains 300 rumoured claims and 2,595 associated news articles, collected and labelled by journalists with an estimation of their veracity (true, false or unverified). Each associated article is summarized into a headline and labelled to indicate whether its stance is for, against, or observing the claim, where observing indicates that the article merely repeats the claim. Thus, Emergent provides a real-world data source for a variety of natural language processing tasks in the context of fact-checking. Further to presenting the dataset, we address the task of determining the article headline stance with respect to the claim. For this purpose we use a logistic regression classifier and develop features that examine the headline and its agreement with the claim. The accuracy achieved was 73% which is 26% higher than the one achieved by the Excitement Open Platform (Magnini et al., 2014).", "keyphrases": ["novel data-set", "news article", "emergent"]} +{"id": "malmasi-etal-2022-semeval", "title": "SemEval-2022 Task 11: Multilingual Complex Named Entity Recognition (MultiCoNER)", "abstract": "We present the findings of SemEval-2022 Task 11 on Multilingual Complex Named Entity Recognition MULTICONER. Divided into 13 tracks, the task focused on methods to identify complex named entities (like names of movies, products and groups) in 11 languages in both monolingual and multi-lingual scenarios. Eleven tracks required building monolingual NER models for individual languages, one track focused on multilingual models able to work on all languages, and the last track featured code-mixed texts within any of these languages. The task is based on the MULTICONER dataset comprising of 2.3 millions instances in Bangla, Chinese, Dutch, English, Farsi, German, Hindi, Korean, Russian, Spanish, and Turkish. Results showed that methods fusing external knowledge into transformer models achieved the best results. However, identifying entities like creative works is still challenging even with external knowledge. MULTICONER was one of the most popular tasks in SemEval-2022 and it attracted 377 participants during the practice phase. 236 participants signed up for the final test phase and 55 teams submitted their systems.", "keyphrases": ["entity recognition", "multiconer", "semeval-2022 task"]} +{"id": "hoang-etal-2017-towards", "title": "Towards Decoding as Continuous Optimisation in Neural Machine Translation", "abstract": "We propose a novel decoding approach for neural machine translation (NMT) based on continuous optimisation. We reformulate decoding, a discrete optimization problem, into a continuous problem, such that optimization can make use of efficient gradient-based techniques. Our powerful decoding framework allows for more accurate decoding for standard neural machine translation models, as well as enabling decoding in intractable models such as intersection of several different NMT models. Our empirical results show that our decoding framework is effective, and can leads to substantial improvements in translations, especially in situations where greedy search and beam search are not feasible. Finally, we show how the technique is highly competitive with, and complementary to, reranking.", "keyphrases": ["decoding", "continuous optimisation", "neural machine translation"]} +{"id": "zaidan-callison-burch-2011-arabic", "title": "The Arabic Online Commentary Dataset: an Annotated Dataset of Informal Arabic with High Dialectal Content", "abstract": "The written form of Arabic, Modern Standard Arabic (MSA), differs quite a bit from the spoken dialects of Arabic, which are the true \"native\" languages of Arabic speakers used in daily life. However, due to MSA's prevalence in written form, almost all Arabic datasets have predominantly MSA content. We present the Arabic Online Commentary Dataset, a 52M-word monolingual dataset rich in dialectal content, and we describe our long-term annotation effort to identify the dialect level (and dialect itself) in each sentence of the dataset. So far, we have labeled 108K sentences, 41% of which as having dialectal content. We also present experimental results on the task of automatic dialect identification, using the collected labels for training and evaluation.", "keyphrases": ["arabic", "dialect", "n-gram"]} +{"id": "lee-etal-2021-dialogue", "title": "Dialogue State Tracking with a Language Model using Schema-Driven Prompting", "abstract": "Task-oriented conversational systems often use dialogue state tracking to represent the user's intentions, which involves filling in values of pre-defined slots. Many approaches have been proposed, often using task-specific architectures with special-purpose classifiers. Recently, good results have been obtained using more general architectures based on pretrained language models. Here, we introduce a new variation of the language modeling approach that uses schema-driven prompting to provide task-aware history encoding that is used for both categorical and non-categorical slots. We further improve performance by augmenting the prompting with schema descriptions, a naturally occurring source of in-domain knowledge. Our purely generative system achieves state-of-the-art performance on MultiWOZ 2.2 and achieves competitive performance on two other benchmarks: MultiWOZ 2.1 and M2M. The data and code will be available at .", "keyphrases": ["language model", "prompting", "dialogue state tracking"]} +{"id": "rudzewitz-2016-exploring", "title": "Exploring the Intersection of Short Answer Assessment, Authorship Attribution, and Plagiarism Detection", "abstract": "In spite of methodological and conceptual parallels, the computational linguistic applications short answer scoring (Burrows et al., 2015), authorship attribution (Stamatatos, 2009), and plagiarism detection (Zesch and Gurevych, 2012) have not been linked in practice. This work explores the practical usefulness of the combination of features from each of these fields for two tasks: short answer assessment, and plagiarism detection. The experiments show that incorporating features from the other domain yields significant improvements. A feature analysis reveals that robust lexical and semantic features are most informative for these tasks.", "keyphrases": ["short answer assessment", "authorship attribution", "plagiarism detection"]} +{"id": "honnibal-johnson-2014-joint", "title": "Joint Incremental Disfluency Detection and Dependency Parsing", "abstract": "We present an incremental dependency parsing model that jointly performs disfluency detection. The model handles speech repairs using a novel non-monotonic transition system, and includes several novel classes of features. For comparison, we evaluated two pipeline systems, using state-of-the-art disfluency detectors. The joint model performed better on both tasks, with a parse accuracy of 90.5% and 84.0% accuracy at disfluency detection. The model runs in expected linear time, and processes over 550 tokens a second.", "keyphrases": ["disfluency detection", "dependency parsing", "new joint model"]} +{"id": "schlechtweg-etal-2019-wind", "title": "A Wind of Change: Detecting and Evaluating Lexical Semantic Change across Times and Domains", "abstract": "We perform an interdisciplinary large-scale evaluation for detecting lexical semantic divergences in a diachronic and in a synchronic task: semantic sense changes across time, and semantic sense changes across domains. Our work addresses the superficialness and lack of comparison in assessing models of diachronic lexical change, by bringing together and extending benchmark models on a common state-of-the-art evaluation task. In addition, we demonstrate that the same evaluation task and modelling approaches can successfully be utilised for the synchronic detection of domain-specific sense divergences in the field of term extraction.", "keyphrases": ["change", "evaluation task", "semantic change detection", "word sense"]} +{"id": "caglayan-etal-2019-probing", "title": "Probing the Need for Visual Context in Multimodal Machine Translation", "abstract": "Current work on multimodal machine translation (MMT) has suggested that the visual modality is either unnecessary or only marginally beneficial. We posit that this is a consequence of the very simple, short and repetitive sentences used in the only available dataset for the task (Multi30K), rendering the source text sufficient as context. In the general case, however, we believe that it is possible to combine visual and textual information in order to ground translations. In this paper we probe the contribution of the visual modality to state-of-the-art MMT models by conducting a systematic analysis where we partially deprive the models from source-side textual context. Our results show that under limited textual context, models are capable of leveraging the visual input to generate better translations. This contradicts the current belief that MMT models disregard the visual modality because of either the quality of the image features or the way they are integrated into the model.", "keyphrases": ["multimodal machine translation", "visual modality", "textual context"]} +{"id": "gillick-favre-2009-scalable", "title": "A Scalable Global Model for Summarization", "abstract": "We present an Integer Linear Program for exact inference under a maximum coverage model for automatic summarization. We compare our model, which operates at the sub-sentence or \"concept-level, to a sentence-level model, previously solved with an ILP. Our model scales more efficiently to larger problems because it does not require a quadratic number of variables to address redundancy in pairs of selected sentences. We also show how to include sentence compression in the ILP formulation, which has the desirable property of performing compression and sentence selection simultaneously. The resulting system performs at least as well as the best systems participating in the recent Text Analysis Conference, as judged by a variety of automatic and manual content-based metrics.", "keyphrases": ["summarization", "maximum coverage model", "thread", "approximation", "relevant concept"]} +{"id": "tan-etal-2014-effect", "title": "The effect of wording on message propagation: Topic- and author-controlled natural experiments on Twitter", "abstract": "Consider a person trying to spread an important message on a social network. He/she can spend hours trying to craft the message. Does it actually matter? While there has been extensive prior work looking into predicting popularity of social-media content, the effect of wording per se has rarely been studied since it is often confounded with the popularity of the author and the topic. To control for these confounding factors, we take advantage of the surprising fact that there are many pairs of tweets containing the same url and written by the same user but employing different wording. Given such pairs, we ask: which version attracts more retweets? This turns out to be a more difficult task than predicting popular topics. Still, humans can answer this question better than chance (but far from perfectly), and the computational methods we develop can do better than both an average human and a strong competing method trained on non-controlled data.", "keyphrases": ["wording", "message propagation", "twitter", "popularity", "retweet"]} +{"id": "de-cao-etal-2021-editing", "title": "Editing Factual Knowledge in Language Models", "abstract": "The factual knowledge acquired during pre-training and stored in the parameters of Language Models (LMs) can be useful in downstream tasks (e.g., question answering or textual inference). However, some facts can be incorrectly induced or become obsolete over time. We present KnowledgeEditor, a method which can be used to edit this knowledge and, thus, fix `bugs' or unexpected predictions without the need for expensive re-training or fine-tuning. Besides being computationally efficient, KnowledgeEditordoes not require any modifications in LM pre-training (e.g., the use of meta-learning). In our approach, we train a hyper-network with constrained optimization to modify a fact without affecting the rest of the knowledge; the trained hyper-network is then used to predict the weight update at test time. We show KnowledgeEditor's efficacy with two popular architectures and knowledge-intensive tasks: i) a BERT model fine-tuned for fact-checking, and ii) a sequence-to-sequence BART model for question answering. With our method, changing a prediction on the specific wording of a query tends to result in a consistent change in predictions also for its paraphrases. We show that this can be further encouraged by exploiting (e.g., automatically-generated) paraphrases during training. Interestingly, our hyper-network can be regarded as a `probe' revealing which components need to be changed to manipulate factual knowledge; our analysis shows that the updates tend to be concentrated on a small subset of components. Source code available at ", "keyphrases": ["factual knowledge", "language models", "update"]} +{"id": "weiss-2014-muck", "title": "MUCK: A toolkit for extracting and visualizing semantic dimensions of large text collections", "abstract": "Users with large text collections are often faced with one of two problems; either they wish to retrieve a semanticallyrelevant subset of data from the collection for further scrutiny (needle-in-a-haystack) or they wish to glean a high-level understanding of how a subset compares to the parent corpus in the context of aforementioned semantic dimensions (forestfor-the-trees). In this paper, I describe MUCK 1 , an open-source toolkit that addresses both of these problems through a distributed text processing engine with an interactive visualization interface.", "keyphrases": ["semantic dimension", "text collection", "muck"]} +{"id": "ajjour-etal-2019-modeling", "title": "Modeling Frames in Argumentation", "abstract": "In argumentation, framing is used to emphasize a specific aspect of a controversial topic while concealing others. When talking about legalizing drugs, for instance, its economical aspect may be emphasized. In general, we call a set of arguments that focus on the same aspect a frame. An argumentative text has to serve the \u201cright\u201d frame(s) to convince the audience to adopt the author's stance (e.g., being pro or con legalizing drugs). More specifically, an author has to choose frames that fit the audience's cultural background and interests. This paper introduces frame identification, which is the task of splitting a set of arguments into non-overlapping frames. We present a fully unsupervised approach to this task, which first removes topical information and then identifies frames using clustering. For evaluation purposes, we provide a corpus with 12, 326 debate-portal arguments, organized along the frames of the debates' topics. On this corpus, our approach outperforms different strong baselines, achieving an F1-score of 0.28.", "keyphrases": ["frame", "argumentation", "same aspect", "clustering"]} +{"id": "cao-etal-2018-retrieve", "title": "Retrieve, Rerank and Rewrite: Soft Template Based Neural Summarization", "abstract": "Most previous seq2seq summarization systems purely depend on the source text to generate summaries, which tends to work unstably. Inspired by the traditional template-based summarization approaches, this paper proposes to use existing summaries as soft templates to guide the seq2seq model. To this end, we use a popular IR platform to Retrieve proper summaries as candidate templates. Then, we extend the seq2seq framework to jointly conduct template Reranking and template-aware summary generation (Rewriting). Experiments show that, in terms of informativeness, our model significantly outperforms the state-of-the-art methods, and even soft templates themselves demonstrate high competitiveness. In addition, the import of high-quality external summaries improves the stability and readability of generated summaries.", "keyphrases": ["template", "summarization", "language generation task"]} +{"id": "mcdonald-pereira-2006-online", "title": "Online Learning of Approximate Dependency Parsing Algorithms", "abstract": "In this paper we extend the maximum spanning tree (MST) dependency parsing framework of McDonald et al. (2005c) to incorporate higher-order feature representations and allow dependency structures with multiple parents per word. We show that those extensions can make the MST framework computationally intractable, but that the intractability can be circumvented with new approximate parsing algorithms. We conclude with experiments showing that discriminative online learning using those approximate algorithms achieves the best reported parsing accuracy for Czech and Danish.", "keyphrases": ["dependency parsing", "maximum spanning tree", "approximate algorithm", "online learning", "head"]} +{"id": "omelianchuk-etal-2020-gector", "title": "GECToR \u2013 Grammatical Error Correction: Tag, Not Rewrite", "abstract": "In this paper, we present a simple and efficient GEC sequence tagger using a Transformer encoder. Our system is pre-trained on synthetic data and then fine-tuned in two stages: first on errorful corpora, and second on a combination of errorful and error-free parallel corpora. We design custom token-level transformations to map input tokens to target corrections. Our best single-model/ensemble GEC tagger achieves an F_0.5 of 65.3/66.5 on CONLL-2014 (test) and F_0.5 of 72.4/73.6 on BEA-2019 (test). Its inference speed is up to 10 times as fast as a Transformer-based seq2seq GEC system.", "keyphrases": ["grammatical error correction", "tagging", "token-level transformation"]} +{"id": "ziser-reichart-2017-neural", "title": "Neural Structural Correspondence Learning for Domain Adaptation", "abstract": "We introduce a neural network model that marries together ideas from two prominent strands of research on domain adaptation through representation learning: structural correspondence learning (SCL, (Blitzer et al., 2006)) and autoencoder neural networks (NNs). Our model is a three-layer NN that learns to encode the non-pivot features of an input example into a low dimensional representation, so that the existence of pivot features (features that are prominent in both domains and convey useful information for the NLP task) in the example can be decoded from that representation. The low-dimensional representation is then employed in a learning algorithm for the task. Moreover, we show how to inject pre-trained word embeddings into our model in order to improve generalization across examples with similar pivot features. We experiment with the task of cross-domain sentiment classification on 16 domain pairs and show substantial improvements over strong baselines.", "keyphrases": ["domain adaptation", "neural network model", "scl", "pivot"]} +{"id": "macherey-och-2007-empirical", "title": "An Empirical Study on Computing Consensus Translations from Multiple Machine Translation Systems", "abstract": "This paper presents an empirical study on how different selections of input translation systems affect translation quality in system combination. We give empirical evidence that the systems to be combined should be of similar quality and need to be almost uncorrelated in order to be beneficial for system combination. Experimental results are presented for composite translations computed from large numbers of different research systems as well as a set of translation systems derived from one of the bestranked machine translation engines in the 2006 NIST machine translation evaluation.", "keyphrases": ["empirical study", "system combination", "similar quality", "translation output"]} +{"id": "gui-etal-2016-event", "title": "Event-Driven Emotion Cause Extraction with Corpus Construction", "abstract": "In this paper, we present our work in emotion cause extraction. Since there is no open dataset available, the lack of annotated resources has limited the research in this area. Thus, we \ufb01rst present a dataset we built using SINA city news. The annotation is based on the scheme of the W3C Emotion Markup Language. Second, we propose a 7-tuple de\ufb01nition to describe emotion cause events. Based on this general de\ufb01nition, we propose a new event-driven emotion cause extraction method using multi-kernel SVMs where a syntactical tree based approach is used to represent events in text. A convolution kernel based multi-kernel SVM are used to extract emotion causes. Because traditional convolution kernels do not use lexical information at the terminal nodes of syntactic trees, we modify the kernel function with a synonym based improvement. Even with very limited training data, we can still extract suf\ufb01cient features for the task. Evaluations show that our approach achieves 11.6% higher F-measure compared to referenced methods. The contributions of our work include resource construction, concept de\ufb01nition and algorithm development.", "keyphrases": ["emotion cause extraction", "sina city news", "syntactical tree", "ece", "public corpus"]} +{"id": "cotterell-etal-2016-joint", "title": "A Joint Model of Orthography and Morphological Segmentation", "abstract": "We present a model of morphological segmentation that jointly learns to segment and restore orthographic changes, e.g., funniest7! fun-y-est. We term this form of analysis canonical segmentation and contrast it with the traditional surface segmentation, which segments a surface form into a sequence of substrings, e.g., funniest7! funn-i-est. We derive an importance sampling algorithm for approximate inference in the model and report experimental results on English, German and Indonesian.", "keyphrases": ["morphological segmentation", "change", "surface form"]} +{"id": "turney-etal-2011-literal", "title": "Literal and Metaphorical Sense Identification through Concrete and Abstract Context", "abstract": "Metaphor is ubiquitous in text, even in highly technical text. Correct inference about textual entailment requires computers to distinguish the literal and metaphorical senses of a word. Past work has treated this problem as a classical word sense disambiguation task. In this paper, we take a new approach, based on research in cognitive linguistics that views metaphor as a method for transferring knowledge from a familiar, well-understood, or concrete domain to an unfamiliar, less understood, or more abstract domain. This view leads to the hypothesis that metaphorical word usage is correlated with the degree of abstractness of the word's context. We introduce an algorithm that uses this hypothesis to classify a word sense in a given context as either literal (denotative) or metaphorical (connotative). We evaluate this algorithm with a set of adjective-noun phrases (e.g., in dark comedy, the adjective dark is used metaphorically; in dark hair, it is used literally) and with the TroFi (Trope Finder) Example Base of literal and nonliteral usage for fifty verbs. We achieve state-of-the-art performance on both datasets.", "keyphrases": ["metaphor", "concreteness level", "physical experience"]} +{"id": "cheng-etal-2019-robust", "title": "Robust Neural Machine Translation with Doubly Adversarial Inputs", "abstract": "Neural machine translation (NMT) often suffers from the vulnerability to noisy perturbations in the input. We propose an approach to improving the robustness of NMT models, which consists of two parts: (1) attack the translation model with adversarial source examples; (2) defend the translation model with adversarial target inputs to improve its robustness against the adversarial source inputs. For the generation of adversarial inputs, we propose a gradient-based method to craft adversarial examples informed by the translation loss over the clean inputs. Experimental results on Chinese-English and English-German translation tasks demonstrate that our approach achieves significant improvements (2.8 and 1.6 BLEU points) over Transformer on standard clean benchmarks as well as exhibiting higher robustness on noisy data.", "keyphrases": ["neural machine translation", "translation model", "gradient-based method", "adversarial example", "noise"]} +{"id": "may-knight-2007-syntactic", "title": "Syntactic Re-Alignment Models for Machine Translation", "abstract": "We present a method for improving word alignment for statistical syntax-based machine translation that employs a syntactically informed alignment model closer to the translation model than commonly-used word alignment models. This leads to extraction of more useful linguistic patterns and improved BLEU scores on translation experiments in Chinese and Arabic.", "keyphrases": ["machine translation", "link", "ssmt"]} +{"id": "lowe-etal-2017-towards", "title": "Towards an Automatic Turing Test: Learning to Evaluate Dialogue Responses", "abstract": "Automatically evaluating the quality of dialogue responses for unstructured domains is a challenging problem. Unfortunately, existing automatic evaluation metrics are biased and correlate very poorly with human judgements of response quality (Liu et al., 2016). Yet having an accurate automatic evaluation procedure is crucial for dialogue research, as it allows rapid prototyping and testing of new models with fewer expensive human evaluations. In response to this challenge, we formulate automatic dialogue evaluation as a learning problem. We present an evaluation model (ADEM)that learns to predict human-like scores to input responses, using a new dataset of human response scores. We show that the ADEM model's predictions correlate significantly, and at a level much higher than word-overlap metrics such as BLEU, with human judgements at both the utterance and system-level. We also show that ADEM can generalize to evaluating dialogue mod-els unseen during training, an important step for automatic dialogue evaluation.", "keyphrases": ["dialogue response", "human response score", "dialog evaluation metric", "annotator"]} +{"id": "yoshikawa-etal-2009-jointly", "title": "Jointly Identifying Temporal Relations with Markov Logic", "abstract": "Recent work on temporal relation identification has focused on three types of relations between events: temporal relations between an event and a time expression, between a pair of events and between an event and the document creation time. These types of relations have mostly been identified in isolation by event pairwise comparison. However, this approach neglects logical constraints between temporal relations of different types that we believe to be helpful. We therefore propose a Markov Logic model that jointly identifies relations of all three relation types simultaneously. By evaluating our model on the TempEval data we show that this approach leads to about 2% higher accuracy for all three types of relations ---and to the best results for the task when compared to those of other machine learning based systems.", "keyphrases": ["markov logic", "global information", "transitivity constraint"]} +{"id": "ghosal-etal-2021-cider", "title": "CIDER: Commonsense Inference for Dialogue Explanation and Reasoning", "abstract": "Commonsense inference to understand and explain human language is a fundamental research problem in natural language processing. Explaining human conversations poses a great challenge as it requires contextual understanding, planning, inference, and several aspects of reasoning including causal, temporal, and commonsense reasoning. In this work, we introduce CIDER \u2013 a manually curated dataset that contains dyadic dialogue explanations in the form of implicit and explicit knowledge triplets inferred using contextual commonsense inference. Extracting such rich explanations from conversations can be conducive to improving several downstream applications. The annotated triplets are categorized by the type of commonsense knowledge present (e.g., causal, conditional, temporal). We set up three different tasks conditioned on the annotated dataset: Dialogue-level Natural Language Inference, Span Extraction, and Multi-choice Span Selection. Baseline results obtained with transformer-based models reveal that the tasks are difficult, paving the way for promising future research. The dataset and the baseline implementations are publicly available at .", "keyphrases": ["commonsense inference", "dialogue explanation", "cider"]} +{"id": "mohammad-turney-2010-emotions", "title": "Emotions Evoked by Common Words and Phrases: Using Mechanical Turk to Create an Emotion Lexicon", "abstract": "Even though considerable attention has been given to semantic orientation of words and the creation of large polarity lexicons, research in emotion analysis has had to rely on limited and small emotion lexicons. In this paper, we show how we create a high-quality, moderate-sized emotion lexicon using Mechanical Turk. In addition to questions about emotions evoked by terms, we show how the inclusion of a word choice question can discourage malicious data entry, help identify instances where the annotator may not be familiar with the target term (allowing us to reject such annotations), and help obtain annotations at sense level (rather than at word level). We perform an extensive analysis of the annotations to better understand the distribution of emotions evoked by terms of different parts of speech. We identify which emotions tend to be evoked simultaneously by the same term and show that certain emotions indeed go hand in hand.", "keyphrases": ["mechanical turk", "annotator", "different part", "emotion"]} +{"id": "agic-etal-2016-multilingual", "title": "Multilingual Projection for Parsing Truly Low-Resource Languages", "abstract": "We propose a novel approach to cross-lingual part-of-speech tagging and dependency parsing for truly low-resource languages. Our annotation projection-based approach yields tagging and parsing models for over 100 languages. All that is needed are freely available parallel texts, and taggers and parsers for resource-rich languages. The empirical evaluation across 30 test languages shows that our method consistently provides top-level accuracies, close to established upper bounds, and outperforms several competitive baselines.", "keyphrases": ["low-resource language", "annotation projection", "parallel corpora"]} +{"id": "nayeem-etal-2018-abstractive", "title": "Abstractive Unsupervised Multi-Document Summarization using Paraphrastic Sentence Fusion", "abstract": "In this work, we aim at developing an unsupervised abstractive summarization system in the multi-document setting. We design a paraphrastic sentence fusion model which jointly performs sentence fusion and paraphrasing using skip-gram word embedding model at the sentence level. Our model improves the information coverage and at the same time abstractiveness of the generated sentences. We conduct our experiments on the human-generated multi-sentence compression datasets and evaluate our system on several newly proposed Machine Translation (MT) evaluation metrics. Furthermore, we apply our sentence level model to implement an abstractive multi-document summarization system where documents usually contain a related set of sentences. We also propose an optimal solution for the classical summary length limit problem which was not addressed in the past research. For the document level summary, we conduct experiments on the datasets of two different domains (e.g., news article and user reviews) which are well suited for multi-document abstractive summarization. Our experiments demonstrate that the methods bring significant improvements over the state-of-the-art methods.", "keyphrases": ["summarization", "sentence fusion", "paraphrasing"]} +{"id": "girju-2003-automatic", "title": "Automatic Detection of Causal Relations for Question Answering", "abstract": "Causation relations are a pervasive feature of human language. Despite this, the automatic acquisition of causal information in text has proved to be a difficult task in NLP. This paper provides a method for the automatic detection and extraction of causal relations. We also present an inductive learning approach to the automatic discovery of lexical and semantic constraints necessary in the disambiguation of causal relations that are then used in question answering. We devised a classification of causal questions and tested the procedure on a QA system.", "keyphrases": ["causal relation", "question answering", "automatic detection", "lexico-syntactic pattern", "reasoning"]} +{"id": "min-etal-2019-compositional", "title": "Compositional Questions Do Not Necessitate Multi-hop Reasoning", "abstract": "Multi-hop reading comprehension (RC) questions are challenging because they require reading and reasoning over multiple paragraphs. We argue that it can be difficult to construct large multi-hop RC datasets. For example, even highly compositional questions can be answered with a single hop if they target specific entity types, or the facts needed to answer them are redundant. Our analysis is centered on HotpotQA, where we show that single-hop reasoning can solve much more of the dataset than previously thought. We introduce a single-hop BERT-based RC model that achieves 67 F1\u2014comparable to state-of-the-art multi-hop models. We also design an evaluation setting where humans are not shown all of the necessary paragraphs for the intended multi-hop reasoning but can still answer over 80% of questions. Together with detailed error analysis, these results suggest there should be an increasing focus on the role of evidence in multi-hop reasoning and possibly even a shift towards information retrieval style evaluations with large and diverse evidence collections.", "keyphrases": ["multi-hop", "reasoning", "correct answer", "passage", "annotation artifact"]} +{"id": "duong-etal-2017-multilingual-semantic", "title": "Multilingual Semantic Parsing And Code-Switching", "abstract": "Extending semantic parsing systems to new domains and languages is a highly expensive, time-consuming process, so making effective use of existing resources is critical. In this paper, we describe a transfer learning method using crosslingual word embeddings in a sequence-to-sequence model. On the NLmaps corpus, our approach achieves state-of-the-art accuracy of 85.7% for English. Most importantly, we observed a consistent improvement for German compared with several baseline domain adaptation techniques. As a by-product of this approach, our models that are trained on a combination of English and German utterances perform reasonably well on code-switching utterances which contain a mixture of English and German, even though the training data does not contain any such. As far as we know, this is the first study of code-switching in semantic parsing. We manually constructed the set of code-switching test utterances for the NLmaps corpus and achieve 78.3% accuracy on this dataset.", "keyphrases": ["semantic parsing", "code-switching", "word embedding", "sequence-to-sequence model", "multiple language"]} +{"id": "bostrom-durrett-2020-byte", "title": "Byte Pair Encoding is Suboptimal for Language Model Pretraining", "abstract": "The success of pretrained transformer language models (LMs) in natural language processing has led to a wide range of pretraining setups. In particular, these models employ a variety of subword tokenization methods, most notably byte-pair encoding (BPE) (Sennrich et al., 2016; Gage, 1994), the WordPiece method (Schuster and Nakajima, 2012), and unigram language modeling (Kudo, 2018), to segment text. However, to the best of our knowledge, the literature does not contain a direct evaluation of the impact of tokenization on language model pretraining. We analyze differences between BPE and unigram LM tokenization, finding that the latter method recovers subword units that align more closely with morphology and avoids problems stemming from BPE's greedy construction procedure. We then compare the fine-tuned task performance of identical transformer masked language models pretrained with these tokenizations. Across downstream tasks and two languages (English and Japanese), we find that the unigram LM tokenization method matches or outperforms BPE. We hope that developers of future pretrained LMs will consider adopting the unigram LM method over the more prevalent BPE.", "keyphrases": ["language model pretraining", "unigram", "byte pair encoding"]} +{"id": "calzolari-etal-2012-lre", "title": "The LRE Map. Harmonising Community Descriptions of Resources", "abstract": "Accurate and reliable documentation of Language Resources is an undisputable need: documentation is the gateway to discovery of Language Resources, a necessary step towards promoting the data economy. Language resources that are not documented virtually do not exist: for this reason every initiative able to collect and harmonise metadata about resources represents a valuable opportunity for the NLP community. In this paper we describe the LRE Map, reporting statistics on resources associated with LREC2012 papers and providing comparisons with LREC2010 data. The LRE Map, jointly launched by FLaReNet and ELRA in conjunction with the LREC 2010 Conference, is an instrument for enhancing availability of information about resources, either new or already existing ones. It wants to reinforce and facilitate the use of standards in the community. The LRE Map web interface provides the possibility of searching according to a fixed set of metadata and to view the details of extracted resources. The LRE Map is continuing to collect bottom-up input about resources from authors of other conferences through standard submission process. This will help broadening the notion of \u0093language resources\u0094 and attract to the field neighboring disciplines that so far have been only marginally involved by the standard notion of language resources.", "keyphrases": ["lre map", "language resource", "conference", "availability"]} +{"id": "qiu-etal-2013-mining", "title": "Mining User Relations from Online Discussions using Sentiment Analysis and Probabilistic Matrix Factorization", "abstract": "Advances in sentiment analysis have enabled extraction of user relations implied in online textual exchanges such as forum posts. However, recent studies in this direction only consider direct relation extraction from text. As user interactions can be sparse in online discussions, we propose to apply collaborative filtering through probabilistic matrix factorization to generalize and improve the opinion matrices extracted from forum posts. Experiments with two tasks show that the learned latent factor representation can give good performance on a relation polarity prediction task and improve the performance of a subgroup detection task.", "keyphrases": ["online discussion", "sentiment analysis", "probabilistic matrix factorization"]} +{"id": "marcus-etal-2016-cocogen", "title": "CoCoGen - Complexity Contour Generator: Automatic Assessment of Linguistic Complexity Using a Sliding-Window Technique", "abstract": "We present a novel approach to the automatic assessment of text complexity based on a sliding-window technique that tracks the distribution of complexity within a text. Such distribution is captured by what we term \u201ccomplexity contours\u201d derived from a series of measurements for a given linguistic complexity measure. This approach is implemented in an automatic computational tool, CoCoGen \u2013 Complexity Contour Generator, which in its current version supports 32 indices of linguistic complexity. The goal of the paper is twofold: (1) to introduce the design of our computational tool based on a sliding-window technique and (2) to showcase this approach in the area of second language (L2) learning, i.e. more specifically, in the area of L2 writing.", "keyphrases": ["complexity contour generator", "automatic assessment", "sliding-window technique"]} +{"id": "yang-etal-2019-enhancing-topic", "title": "Enhancing Topic-to-Essay Generation with External Commonsense Knowledge", "abstract": "Automatic topic-to-essay generation is a challenging task since it requires generating novel, diverse, and topic-consistent paragraph-level text with a set of topics as input. Previous work tends to perform essay generation based solely on the given topics while ignoring massive commonsense knowledge. However, this commonsense knowledge provides additional background information, which can help to generate essays that are more novel and diverse. Towards filling this gap, we propose to integrate commonsense from the external knowledge base into the generator through dynamic memory mechanism. Besides, the adversarial training based on a multi-label discriminator is employed to further improve topic-consistency. We also develop a series of automatic evaluation metrics to comprehensively assess the quality of the generated essay. Experiments show that with external commonsense knowledge and adversarial training, the generated essays are more novel, diverse, and topic-consistent than existing methods in terms of both automatic and human evaluation.", "keyphrases": ["external commonsense knowledge", "topic-consistency", "essay generation"]} +{"id": "santus-etal-2015-evalution", "title": "EVALution 1.0: an Evolving Semantic Dataset for Training and Evaluation of Distributional Semantic Models", "abstract": "In this paper, we introduce EVALution 1.0, a dataset designed for the training and the evaluation of Distributional Semantic Models (DSMs). This version consists of almost 7.5K tuples, instantiating several semantic relations between word pairs (including hypernymy, synonymy, antonymy, meronymy). The dataset is enriched with a large amount of additional information (i.e. relation domain, word frequency, word POS, word semantic field, etc.) that can be used for either filtering the pairs or performing an in-depth analysis of the results. The tuples were extracted from a combination of ConceptNet 5.0 and WordNet 4.0, and subsequently filtered through automatic methods and crowdsourcing in order to ensure their quality. The dataset is freely downloadable1. An extension in RDF format, including also scripts for data processing, is under development.", "keyphrases": ["distributional semantic models", "hypernymy", "evalution"]} +{"id": "zhang-etal-2019-curriculum", "title": "Curriculum Learning for Domain Adaptation in Neural Machine Translation", "abstract": "We introduce a curriculum learning approach to adapt generic neural machine translation models to a specific domain. Samples are grouped by their similarities to the domain of interest and each group is fed to the training algorithm with a particular schedule. This approach is simple to implement on top of any neural framework or architecture, and consistently outperforms both unadapted and adapted baselines in experiments with two distinct domains and two language pairs.", "keyphrases": ["domain adaptation", "neural machine translation", "curriculum"]} +{"id": "castilho-2020-page", "title": "On the Same Page? Comparing Inter-Annotator Agreement in Sentence and Document Level Human Machine Translation Evaluation", "abstract": "Document-level evaluation of machine translation has raised interest in the community especially since responses to the claims of \u201chuman parity\u201d (Toral et al., 2018; L\u00e4ubli et al., 2018) with document-level human evaluations have been published. Yet, little is known about best practices regarding human evaluation of machine translation at the document-level. This paper presents a comparison of the differences in inter-annotator agreement between quality assessments using sentence and document-level set-ups. We report results of the agreement between professional translators for fluency and adequacy scales, error annotation, and pair-wise ranking, along with the effort needed to perform the different tasks. To best of our knowledge, this is the first study of its kind.", "keyphrases": ["inter-annotator agreement", "machine translation", "error annotation"]} +{"id": "zhang-etal-2008-improved", "title": "Improved Statistical Machine Translation by Multiple Chinese Word Segmentation", "abstract": "Chinese word segmentation (CWS) is a necessary step in Chinese-English statistical machine translation (SMT) and its performance has an impact on the results of SMT. However, there are many settings involved in creating a CWS system such as various specifications and CWS methods. This paper investigates the effect of these settings to SMT. We tested dictionary-based and CRF-based approaches and found there was no significant difference between the two in the qualty of the resulting translations. We also found the correlation between the CWS F-score and SMT BLEU score was very weak. This paper also proposes two methods of combining advantages of different specifications: a simple concatenation of training data and a feature interpolation approach in which the same types of features of translation models from various CWS schemes are linearly interpolated. We found these approaches were very effective in improving quality of translations.", "keyphrases": ["chinese", "word segmentation", "different specification", "translation performance", "tokenization"]} +{"id": "tetreault-etal-2010-using", "title": "Using Parse Features for Preposition Selection and Error Detection", "abstract": "We evaluate the effect of adding parse features to a leading model of preposition usage. Results show a significant improvement in the preposition selection task on native speaker text and a modest increment in precision and recall in an ESL error detection task. Analysis of the parser output indicates that it is robust enough in the face of noisy non-native writing to extract useful information.", "keyphrases": ["parse feature", "preposition", "error detection"]} +{"id": "higashinaka-isozaki-2008-corpus", "title": "Corpus-based Question Answering for why-Questions", "abstract": "This paper proposes a corpus-based approach for answering why-questions. Conventional systems use hand-crafted patterns to extract and evaluate answer candidates. However, such hand-crafted patterns are likely to have low coverage of causal expressions, and it is also difficult to assign suitable weights to the patterns by hand. In our approach, causal expressions are automatically collected from corpora tagged with semantic relations. From the collected expressions, features are created to train an answer candidate ranker that maximizes the QA performance with regards to the corpus of why-questions and answers. NAZEQA, a Japanese why-QA system based on our approach, clearly outperforms a baseline that uses hand-crafted patterns with a Mean Reciprocal Rank (top-5) of 0.305, making it presumably the best-performing fully implemented why-QA system.", "keyphrases": ["why-question", "nazeqa", "candidate answer paragraph", "negative example"]} +{"id": "zaghouani-charfi-2018-arap", "title": "Arap-Tweet: A Large Multi-Dialect Twitter Corpus for Gender, Age and Language Variety Identification", "abstract": "In this paper, we present Arap-Tweet, which is a large-scale and multi-dialectal corpus of Tweets from 11 regions and 16 countries in the Arab world representing the major Arabic dialectal varieties. To build this corpus, we collected data from Twitter and we provided a team of experienced annotators with annotation guidelines that they used to annotate the corpus for age categories, gender, and dialectal variety. During the data collection effort, we based our search on distinctive keywords that are specific to the different Arabic dialects and we also validated the location using Twitter API. In this paper, we report on the corpus data collection and annotation efforts. We also present some issues that we encountered during these phases. Then, we present the results of the evaluation performed to ensure the consistency of the annotation. The provided corpus will enrich the limited set of available language resources for Arabic and will be an invaluable enabler for developing author profiling tools and NLP tools for Arabic.", "keyphrases": ["twitter", "gender", "country", "arab world", "arap-tweet"]} +{"id": "uzzaman-allen-2010-trips", "title": "TRIPS and TRIOS System for TempEval-2: Extracting Temporal Information from Text", "abstract": "Extracting temporal information from raw text is fundamental for deep language understanding, and key to many applications like question answering, information extraction, and document summarization. In this paper, we describe two systems we submitted to the TempEval 2 challenge, for extracting temporal information from raw text. The systems use a combination of deep semantic parsing, Markov Logic Networks and Conditional Random Field classifiers. Our two submitted systems, TRIPS and TRIOS, approached all tasks and outperformed all teams in two tasks. Furthermore, TRIOS mostly had second-best performances in other tasks. TRIOS also outperformed the other teams that attempted all the tasks. Our system is notable in that for tasks C -- F, they operated on raw text while all other systems used tagged events and temporal expressions in the corpus as input.", "keyphrases": ["trios", "temporal information", "trips"]} +{"id": "artetxe-etal-2018-unsupervised", "title": "Unsupervised Statistical Machine Translation", "abstract": "While modern machine translation has relied on large parallel corpora, a recent line of work has managed to train Neural Machine Translation (NMT) systems from monolingual corpora only (Artetxe et al., 2018c; Lample et al., 2018). Despite the potential of this approach for low-resource settings, existing systems are far behind their supervised counterparts, limiting their practical interest. In this paper, we propose an alternative approach based on phrase-based Statistical Machine Translation (SMT) that significantly closes the gap with supervised systems. Our method profits from the modular architecture of SMT: we first induce a phrase table from monolingual corpora through cross-lingual embedding mappings, combine it with an n-gram language model, and fine-tune hyperparameters through an unsupervised MERT variant. In addition, iterative backtranslation improves results further, yielding, for instance, 14.08 and 26.22 BLEU points in WMT 2014 English-German and English-French, respectively, an improvement of more than 7-10 BLEU points over previous unsupervised systems, and closing the gap with supervised SMT (Moses trained on Europarl) down to 2-5 BLEU points. Our implementation is available at .", "keyphrases": ["machine translation", "alternative approach", "mapping", "smt model", "downstream task"]} +{"id": "saers-etal-2010-word", "title": "Word Alignment with Stochastic Bracketing Linear Inversion Transduction Grammar", "abstract": "The class of Linear Inversion Transduction Grammars (litgs) is introduced, and used to induce a word alignment over a parallel corpus. We show that alignment via Stochastic Bracketing litgs is considerably faster than Stochastic Bracketing itgs, while still yielding alignments superior to the widely-used heuristic of intersecting bidirectional ibm alignments. Performance is measured as the translation quality of a phrase-based machine translation system built upon the word alignments, and an improvementof 2.85 bleu points over baseline is noted for French--English.", "keyphrases": ["transduction grammar", "word alignment", "restriction"]} +{"id": "das-etal-2017-question", "title": "Question Answering on Knowledge Bases and Text using Universal Schema and Memory Networks", "abstract": "Existing question answering methods infer answers either from a knowledge base or from raw text. While knowledge base (KB) methods are good at answering compositional questions, their performance is often affected by the incompleteness of the KB. Au contraire, web text contains millions of facts that are absent in the KB, however in an unstructured form. Universal schema can support reasoning on the union of both structured KBs and unstructured text by aligning them in a common embedded space. In this paper we extend universal schema to natural language question answering, employing Memory networks to attend to the large body of facts in the combination of text and KB. Our models can be trained in an end-to-end fashion on question-answer pairs. Evaluation results on Spades fill-in-the-blank question answering dataset show that exploiting universal schema for question answering is better than using either a KB or text alone. This model also outperforms the current state-of-the-art by 8.5 F1 points.", "keyphrases": ["universal schema", "memory network", "unstructured text", "knowledge basis"]} +{"id": "sanchez-cartagena-etal-2018-prompsits", "title": "Prompsit's submission to WMT 2018 Parallel Corpus Filtering shared task", "abstract": "This paper describes Prompsit Language Engineering's submissions to the WMT 2018 parallel corpus filtering shared task. Our four submissions were based on an automatic classifier for identifying pairs of sentences that are mutual translations. A set of hand-crafted hard rules for discarding sentences with evident flaws were applied before the classifier. We explored different strategies for achieving a training corpus with diverse vocabulary and fluent sentences: language model scoring, an active-learning-inspired data selection algorithm and n-gram saturation. Our submissions were very competitive in comparison with other participants on the 100 million word training corpus.", "keyphrases": ["submission", "wmt", "sentence pair"]} +{"id": "kolachina-ranta-2016-abstract", "title": "From Abstract Syntax to Universal Dependencies", "abstract": "Abstract syntax is a semantic tree representation that lies between parse trees and logical forms. It abstracts away from word order and lexical items, but contains enough information to generate both surface strings and logical forms. Abstract syntax is commonly used in compilers as an intermediate between source and target languages. Grammatical Framework (GF) is a grammar formalism that generalizes the idea to natural languages, to capture cross-lingual generalizations and perform interlingual translation. As one of the main results, the GF Resource Grammar Library (GF-RGL) has implemented a shared abstract syntax for over 30 languages. Each language has its own set of concrete syntax rules (morphology and syntax), by which it can be generated from the abstract syntax and parsed into it. This paper presents a conversion method from abstract syntax trees to dependency trees. The method is applied for converting GF-RGL trees to Universal Dependencies (UD), which uses a common set of labels for different languages. The correspondence between GF-RGL and UD turns out to be good, and the relatively few discrepancies give rise to interesting questions about universality. The conversion also has potential for practical applications: (1) it makes the GF parser usable as a rule-based dependency parser; (2) it enables bootstrapping UD treebanks from GF treebanks; (3) it defines formal criteria to assess the informal annotation schemes of UD; (4) it gives a method to check the consistency of manually annotated UD trees with respect to the annotation schemes; (5) it makes information from UD treebanks available.", "keyphrases": ["universal dependencies", "conversion", "syntax tree"]} +{"id": "eisele-etal-2008-using", "title": "Using Moses to Integrate Multiple Rule-Based Machine Translation Engines into a Hybrid System", "abstract": "Based on an architecture that allows to combine statistical machine translation (SMT) with rule-based machine translation (RBMT) in a multi-engine setup, we present new results that show that this type of system combination can actually increase the lexical coverage of the resulting hybrid system, at least as far as this can be measured via BLEU score.", "keyphrases": ["hybrid system", "rbmt system", "smt system", "phrase table", "high quality"]} +{"id": "agirre-etal-2006-methodology", "title": "A methodology for the joint development of the Basque WordNet and Semcor", "abstract": "This paper describes the methodology adopted to jointly develop the Basque WordNet and a hand annotated corpora (the Basque Semcor). This joint development allows for better motivated sense distinctions, and a tighter coupling between both resources. The methodology involves edition, tagging and refereeing tasks. We are currently half way through the nominal part of the 300.000 word corpus (roughly equivalent to a 500.000 word corpus for English). We present a detailed description of the task, including the main criteria for difficult cases in the edition of the senses and the tagging of the corpus, with special mention to multiword entries. Finally we give a detailed picture of the current figures, as well as an analysis of the agreement rates.", "keyphrases": ["methodology", "joint development", "basque wordnet"]} +{"id": "hendricks-etal-2018-localizing", "title": "Localizing Moments in Video with Temporal Language", "abstract": "Localizing moments in a longer video via natural language queries is a new, challenging task at the intersection of language and video understanding. Though moment localization with natural language is similar to other language and vision tasks like natural language object retrieval in images, moment localization offers an interesting opportunity to model temporal dependencies and reasoning in text. We propose a new model that explicitly reasons about different temporal segments in a video, and shows that temporal context is important for localizing phrases which include temporal language. To benchmark whether our model, and other recent video localization models, can effectively reason about temporal language, we collect the novel TEMPOral reasoning in video and language (TEMPO) dataset. Our dataset consists of two parts: a dataset with real videos and template sentences (TEMPO - Template Language) which allows for controlled studies on temporal language, and a human language dataset which consists of temporal sentences annotated by humans (TEMPO - Human Language).", "keyphrases": ["moment", "video", "temporal language"]} +{"id": "feng-hirst-2012-text", "title": "Text-level Discourse Parsing with Rich Linguistic Features", "abstract": "In this paper, we develop an RST-style text-level discourse parser, based on the HILDA discourse parser (Hernault et al., 2010b). We significantly improve its tree-building step by incorporating our own rich linguistic features. We also analyze the difficulty of extending traditional sentence-level discourse parsing to text-level parsing by comparing discourse-parsing performance under different discourse conditions.", "keyphrases": ["discourse", "feature engineering", "pdtb", "well performance"]} +{"id": "zampieri-etal-2019-semeval", "title": "SemEval-2019 Task 6: Identifying and Categorizing Offensive Language in Social Media (OffensEval)", "abstract": "We present the results and the main findings of SemEval-2019 Task 6 on Identifying and Categorizing Offensive Language in Social Media (OffensEval). The task was based on a new dataset, the Offensive Language Identification Dataset (OLID), which contains over 14,000 English tweets, and it featured three sub-tasks. In sub-task A, systems were asked to discriminate between offensive and non-offensive posts. In sub-task B, systems had to identify the type of offensive content in the post. Finally, in sub-task C, systems had to detect the target of the offensive posts. OffensEval attracted a large number of participants and it was one of the most popular tasks in SemEval-2019. In total, nearly 800 teams signed up to participate in the task and 115 of them submitted results, which are presented and analyzed in this report.", "keyphrases": ["categorizing offensive language", "social media", "english tweet", "semeval-2019 task", "offenseval task"]} +{"id": "dugan-etal-2020-roft", "title": "RoFT: A Tool for Evaluating Human Detection of Machine-Generated Text", "abstract": "In recent years, large neural networks for natural language generation (NLG) have made leaps and bounds in their ability to generate fluent text. However, the tasks of evaluating quality differences between NLG systems and understanding how humans perceive the generated text remain both crucial and difficult. In this system demonstration, we present Real or Fake Text (RoFT), a website that tackles both of these challenges by inviting users to try their hand at detecting machine-generated text in a variety of domains. We introduce a novel evaluation task based on detecting the boundary at which a text passage that starts off human-written transitions to being machine-generated. We show preliminary results of using RoFT to evaluate detection of machine-generated news articles.", "keyphrases": ["human detection", "machine-generated text", "boundary", "roft", "annotator"]} +{"id": "provilkov-etal-2020-bpe", "title": "BPE-Dropout: Simple and Effective Subword Regularization", "abstract": "Subword segmentation is widely used to address the open vocabulary problem in machine translation. The dominant approach to subword segmentation is Byte Pair Encoding (BPE), which keeps the most frequent words intact while splitting the rare ones into multiple tokens. While multiple segmentations are possible even with the same vocabulary, BPE splits words into unique sequences; this may prevent a model from better learning the compositionality of words and being robust to segmentation errors. So far, the only way to overcome this BPE imperfection, its deterministic nature, was to create another subword segmentation algorithm (Kudo, 2018). In contrast, we show that BPE itself incorporates the ability to produce multiple segmentations of the same word. We introduce BPE-dropout - simple and effective subword regularization method based on and compatible with conventional BPE. It stochastically corrupts the segmentation procedure of BPE, which leads to producing multiple segmentations within the same fixed BPE framework. Using BPE-dropout during training and the standard BPE during inference improves translation quality up to 2.3 BLEU compared to BPE and up to 0.9 BLEU compared to the previous subword regularization.", "keyphrases": ["segmentation", "bpe", "tokenization", "same word", "downstream task"]} +{"id": "li-jurafsky-2015-multi", "title": "Do Multi-Sense Embeddings Improve Natural Language Understanding?", "abstract": "Learning a distinct representation for each sense of an ambiguous word could lead to more powerful and fine-grained models of vector-space representations. Yet while \u2018multi-sense\u2019 methods have been proposed and tested on artificial wordsimilarity tasks, we don\u2019t know if they improve real natural language understanding tasks. In this paper we introduce a multisense embedding model based on Chinese Restaurant Processes that achieves state of the art performance on matching human word similarity judgments, and propose a pipelined architecture for incorporating multi-sense embeddings into language understanding. We then test the performance of our model on part-of-speech tagging, named entity recognition, sentiment analysis, semantic relation identification and semantic relatedness, controlling for embedding dimensionality. We find that multi-sense embeddings do improve performance on some tasks (part-of-speech tagging, semantic relation identification, semantic relatedness) but not on others (named entity recognition, various forms of sentiment analysis). We discuss how these differences may be caused by the different role of word sense information in each of the tasks. The results highlight the importance of testing embedding models in real applications.", "keyphrases": ["multi-sense embedding", "tagging", "entity recognition", "sentiment analysis", "semantic relation identification"]} +{"id": "malmi-etal-2019-encode", "title": "Encode, Tag, Realize: High-Precision Text Editing", "abstract": "We propose LaserTagger - a sequence tagging approach that casts text generation as a text editing task. Target texts are reconstructed from the inputs using three main edit operations: keeping a token, deleting it, and adding a phrase before the token. To predict the edit operations, we propose a novel model, which combines a BERT encoder with an autoregressive Transformer decoder. This approach is evaluated on English text on four tasks: sentence fusion, sentence splitting, abstractive summarization, and grammar correction. LaserTagger achieves new state-of-the-art results on three of these tasks, performs comparably to a set of strong seq2seq baselines with a large number of training examples, and outperforms them when the number of examples is limited. Furthermore, we show that at inference time tagging can be more than two orders of magnitude faster than comparable seq2seq models, making it more attractive for running in a live environment.", "keyphrases": ["lasertagger", "text generation", "edit operation", "sequence tagging model"]} +{"id": "rahman-ng-2012-resolving", "title": "Resolving Complex Cases of Definite Pronouns: The Winograd Schema Challenge", "abstract": "We examine the task of resolving complex cases of definite pronouns, specifically those for which traditional linguistic constraints on coreference (e.g., Binding Constraints, gender and number agreement) as well as commonly-used resolution heuristics (e.g., string-matching facilities, syntactic salience) are not useful. Being able to solve this task has broader implications in artificial intelligence: a restricted version of it, sometimes referred to as the Winograd Schema Challenge, has been suggested as a conceptually and practically appealing alternative to the Turing Test. We employ a knowledge-rich approach to this task, which yields a pronoun resolver that outperforms state-of-the-art resolvers by nearly 18 points in accuracy on our dataset.", "keyphrases": ["pronoun", "winograd schema challenge", "coreference resolution problem"]} +{"id": "corley-mihalcea-2005-measuring", "title": "Measuring the Semantic Similarity of Texts", "abstract": "This paper presents a knowledge-based method for measuring the semantic-similarity of texts. While there is a large body of previous work focused on finding the semantic similarity of concepts and words, the application of these word-oriented methods to text similarity has not been yet explored. In this paper, we introduce a method that combines word-to-word similarity metrics into a text-to-text metric, and we show that this method outperforms the traditional text similarity metrics based on lexical matching.", "keyphrases": ["semantic similarity", "paraphrase", "implication", "recognition", "sts"]} +{"id": "morin-etal-2007-bilingual", "title": "Bilingual Terminology Mining - Using Brain, not brawn comparable corpora", "abstract": "Current research in text mining favours the quantity of texts over their quality. But for bilingual terminology mining, and for many language pairs, large comparable corpora are not available. More importantly, as terms are defined vis-a-vis a specific domain with a restricted register, it is expected that the quality rather than the quantity of the corpus matters more in terminology mining. Our hypothesis, therefore, is that the quality of the corpus is more important than the quantity and ensures the quality of the acquired terminological resources. We show how important the type of discourse is as a characteristic of the comparable corpus.", "keyphrases": ["comparable corpora", "quantity", "specific domain", "lexicon extraction"]} +{"id": "roark-etal-2004-discriminative", "title": "Discriminative Language Modeling with Conditional Random Fields and the Perceptron Algorithm", "abstract": "This paper describes discriminative language modeling for a large vocabulary speech recognition task. We contrast two parameter estimation methods: the perceptron algorithm, and a method based on conditional random fields (CRFs). The models are encoded as deterministic weighted finite state automata, and are applied by intersecting the automata with word-lattices that are the output from a baseline recognizer. The perceptron algorithm has the benefit of automatically selecting a relatively small feature set in just a couple of passes over the training data. However, using the feature set output from the perceptron algorithm (initialized with their weights), CRF training provides an additional 0.5% reduction in word error rate, for a total 1.8% absolute reduction from the baseline of 39.2%.", "keyphrases": ["conditional random field", "perceptron algorithm", "candidate"]} +{"id": "duan-etal-2010-mixture", "title": "Mixture Model-based Minimum Bayes Risk Decoding using Multiple Machine Translation Systems", "abstract": "We present Mixture Model-based Minimum Bayes Risk (MMMBR) decoding, an approach that makes use of multiple SMT systems to improve translation accuracy. Unlike existing MBR decoding methods defined on the basis of single SMT systems, an MMMBR decoder reranks translation outputs in the combined search space of multiple systems using the MBR decision rule and a mixture distribution of component SMT models for translation hypotheses. MMMBR decoding is a general method that is independent of specific SMT models and can be applied to various commonly used search spaces. Experimental results on the NIST Chinese-to-English MT evaluation tasks show that our approach brings significant improvements to single system-based MBR decoding and outperforms a state-of-the-art system combination method.", "keyphrases": ["minimum bayes risk", "translation accuracy", "mixture"]} +{"id": "terra-clarke-2003-frequency", "title": "Frequency Estimates for Statistical Word Similarity Measures", "abstract": "Statistical measures of word similarity have application in many areas of natural language processing, such as language modeling and information retrieval. We report a comparative study of two methods for estimating word co-occurrence frequencies required by word similarity measures. Our frequency estimates are generated from a terabyte-sized corpus of Web data, and we study the impact of corpus size on the effectiveness of the measures. We base the evaluation on one TOEFL question set and two practice questions sets, each consisting of a number of multiple choice questions seeking the best synonym for a given target word. For two question sets, a context for the target word is provided, and we examine a number of word similarity measures that exploit this context. Our best combination of similarity measure and frequency estimation method answers 6-8% more questions than the best results previously reported for the same question sets.", "keyphrases": ["word similarity measure", "web data", "frequency estimate"]} +{"id": "wieting-gimpel-2018-paranmt", "title": "ParaNMT-50M: Pushing the Limits of Paraphrastic Sentence Embeddings with Millions of Machine Translations", "abstract": "We describe ParaNMT-50M, a dataset of more than 50 million English-English sentential paraphrase pairs. We generated the pairs automatically by using neural machine translation to translate the non-English side of a large parallel corpus, following Wieting et al. (2017). Our hope is that ParaNMT-50M can be a valuable resource for paraphrase generation and can provide a rich source of semantic knowledge to improve downstream natural language understanding tasks. To show its utility, we use ParaNMT-50M to train paraphrastic sentence embeddings that outperform all supervised systems on every SemEval semantic textual similarity competition, in addition to showing how it can be used for paraphrase generation.", "keyphrases": ["paraphrase", "parallel corpus", "back-translation", "pivot language"]} +{"id": "gu-etal-2018-meta", "title": "Meta-Learning for Low-Resource Neural Machine Translation", "abstract": "In this paper, we propose to extend the recently introduced model-agnostic meta-learning algorithm (MAML, Finn, et al., 2017) for low-resource neural machine translation (NMT). We frame low-resource translation as a meta-learning problem where we learn to adapt to low-resource languages based on multilingual high-resource language tasks. We use the universal lexical representation (Gu et al., 2018b) to overcome the input-output mismatch across different languages. We evaluate the proposed meta-learning strategy using eighteen European languages (Bg, Cs, Da, De, El, Es, Et, Fr, Hu, It, Lt, Nl, Pl, Pt, Sk, Sl, Sv and Ru) as source tasks and five diverse languages (Ro,Lv, Fi, Tr and Ko) as target tasks. We show that the proposed approach significantly outperforms the multilingual, transfer learning based approach (Zoph et al., 2016) and enables us to train a competitive NMT system with only a fraction of training examples. For instance, the proposed approach can achieve as high as 22.04 BLEU on Romanian-English WMT'16 by seeing only 16,000 translated words (~600 parallel sentences)", "keyphrases": ["machine translation", "meta-learning algorithm", "cross-lingual transfer"]} +{"id": "sharma-etal-2016-shallow", "title": "Shallow Parsing Pipeline - Hindi-English Code-Mixed Social Media Text", "abstract": "In this study, the problem of shallow parsing of Hindi-English code-mixed social media text (CSMT) has been addressed. We have annotated the data, developed a language identifier, a normalizer, a part-of-speech tagger and a shallow parser. To the best of our knowledge, we are the first to attempt shallow parsing on CSMT. The pipeline developed has been made available to the research community with the goal of enabling better text analysis of Hindi English CSMT. The pipeline is accessible at 1.", "keyphrases": ["language identifier", "shallow parsing", "medium text", "pos tag", "code-mixed data"]} +{"id": "huang-carley-2019-syntax", "title": "Syntax-Aware Aspect Level Sentiment Classification with Graph Attention Networks", "abstract": "Aspect level sentiment classification aims to identify the sentiment expressed towards an aspect given a context sentence. Previous neural network based methods largely ignore the syntax structure in one sentence. In this paper, we propose a novel target-dependent graph attention network (TD-GAT) for aspect level sentiment classification, which explicitly utilizes the dependency relationship among words. Using the dependency graph, it propagates sentiment features directly from the syntactic context of an aspect target. In our experiments, we show our method outperforms multiple baselines with GloVe embeddings. We also demonstrate that using BERT representations further substantially boosts the performance.", "keyphrases": ["graph attention network", "dependency tree", "recent effort"]} +{"id": "tseng-etal-2005-conditional", "title": "A Conditional Random Field Word Segmenter for Sighan Bakeoff 2005", "abstract": "We present a Chinese word segmentation system submitted to the closed track of Sighan bakeoff 2005. Our segmenter was built using a conditional random field sequence model that provides a framework to use a large number of linguistic features such as character identity, morphological and character reduplication features. Because our morphological features were extracted from the training corpora automatically, our system was not biased toward any particular variety of Mandarin. Thus, our system does not overfit the variety of Mandarin most familiar to the system's designers. Our final system achieved a F-score of 0.947 (AS), 0.943 (HK), 0.950 (PK) and 0.964 (MSR).", "keyphrases": ["random field", "word segmentation", "sighan bakeoff", "crf", "end"]} +{"id": "rashkin-etal-2018-modeling", "title": "Modeling Naive Psychology of Characters in Simple Commonsense Stories", "abstract": "Understanding a narrative requires reading between the lines and reasoning about the unspoken but obvious implications about events and people's mental states \u2014 a capability that is trivial for humans but remarkably hard for machines. To facilitate research addressing this challenge, we introduce a new annotation framework to explain naive psychology of story characters as fully-specified chains of mental states with respect to motivations and emotional reactions. Our work presents a new large-scale dataset with rich low-level annotations and establishes baseline performance on several new tasks, suggesting avenues for future research.", "keyphrases": ["naive psychology", "story", "mental state", "chain", "emotional reaction"]} +{"id": "marjou-2021-oteann", "title": "OTEANN: Estimating the Transparency of Orthographies with an Artificial Neural Network", "abstract": "To transcribe spoken language to written medium, most alphabets enable an unambiguous sound-to-letter rule. However, some writing systems have distanced themselves from this simple concept and little work exists in Natural Language Processing (NLP) on measuring such distance. In this study, we use an Artificial Neural Network (ANN) model to evaluate the transparency between written words and their pronunciation, hence its name Orthographic Transparency Estimation with an ANN (OTEANN). Based on datasets derived from Wikimedia dictionaries, we trained and tested this model to score the percentage of false predictions in phoneme-to-grapheme and grapheme-to-phoneme translation tasks. The scores obtained on 17 orthographies were in line with the estimations of other studies. Interestingly, the model also provided insight into typical mistakes made by learners who only consider the phonemic rule in reading and writing.", "keyphrases": ["transparency", "orthography", "artificial neural network", "oteann", "purpose"]} +{"id": "prakash-etal-2016-neural", "title": "Neural Paraphrase Generation with Stacked Residual LSTM Networks", "abstract": "In this paper, we propose a novel neural approach for paraphrase generation. Conventional paraphrase generation methods either leverage hand-written rules and thesauri-based alignments, or use statistical machine learning principles. To the best of our knowledge, this work is the first to explore deep learning models for paraphrase generation. Our primary contribution is a stacked residual LSTM network, where we add residual connections between LSTM layers. This allows for efficient training of deep LSTMs. We evaluate our model and other state-of-the-art deep learning models on three different datasets: PPDB, WikiAnswers, and MSCOCO. Evaluation results demonstrate that our model outperforms sequence to sequence, attention-based, and bi-directional LSTM models on BLEU, METEOR, TER, and an embedding-based sentence similarity metric.", "keyphrases": ["lstm network", "neural paraphrase generation", "syntax"]} +{"id": "cui-etal-2013-bilingual", "title": "Bilingual Data Cleaning for SMT using Graph-based Random Walk", "abstract": "The quality of bilingual data is a key factor in Statistical Machine Translation (SMT). Low-quality bilingual data tends to produce incorrect translation knowledge and also degrades translation modeling performance. Previous work often used supervised learning methods to filter lowquality data, but a fair amount of human labeled examples are needed which are not easy to obtain. To reduce the reliance on labeled examples, we propose an unsupervised method to clean bilingual data. The method leverages the mutual reinforcement between the sentence pairs and the extracted phrase pairs, based on the observation that better sentence pairs often lead to better phrase extraction and vice versa. End-to-end experiments show that the proposed method substantially improves the performance in largescale Chinese-to-English translation tasks.", "keyphrases": ["graph-based random walk", "sentence pair", "bilingual data cleaning"]} +{"id": "reddy-etal-2014-large", "title": "Large-scale Semantic Parsing without Question-Answer Pairs", "abstract": "In this paper we introduce a novel semantic parsing approach to query Freebase in natural language without requiring manual annotations or question-answer pairs. Our key insight is to represent natural language via semantic graphs whose topology shares many commonalities with Freebase. Given this representation, we conceptualize semantic parsing as a graph matching problem. Our model converts sentences to semantic graphs using CCG and subsequently grounds them to Freebase guided by denotations as a form of weak supervision. Evaluation experiments on a subset of the Free917 and WebQuestions benchmark datasets show our semantic parser improves over the state of the art.", "keyphrases": ["semantic parsing", "question-answer pair", "freebase", "distant supervision"]} +{"id": "eck-etal-2005-low", "title": "Low Cost Portability for Statistical Machine Translation based on N-gram Frequency and TF-IDF", "abstract": "Statistical machine translation relies heavily on the available training data. In some cases it is necessary to limit the amount of training data that can be created for or actually used by the systems. We introduce weighting schemes which allow us to sort sentences based on the frequency of unseen n-grams. A second approach uses TF-IDF to rank the sentences. After sorting we can select smaller training corpora and we are able to show that systems trained on much less training data achieve a very competitive performance compared to baseline systems using all available training data.", "keyphrases": ["statistical machine translation", "tf-idf", "weighting scheme"]} +{"id": "och-etal-2004-smorgasbord", "title": "A Smorgasbord of Features for Statistical Machine Translation", "abstract": "We describe a methodology for rapid experimentation in statistical machine translation which we use to add a large number of features to a baseline system exploiting features from a wide range of levels of syntactic representation. Feature values were combined in a log-linear model to select the highest scoring candidate translation from an n-best list. Feature weights were optimized directly against the BLEU evaluation metric on held-out data. We present results for a small selection of features at each level of syntactic representation.", "keyphrases": ["statistical machine translation", "list", "distortion model", "och"]} +{"id": "goswami-etal-2020-unsupervised-relation", "title": "Unsupervised Relation Extraction from Language Models using Constrained Cloze Completion", "abstract": "We show that state-of-the-art self-supervised language models can be readily used to extract relations from a corpus without the need to train a fine-tuned extractive head. We introduce RE-Flex, a simple framework that performs constrained cloze completion over pretrained language models to perform unsupervised relation extraction. RE-Flex uses contextual matching to ensure that language model predictions matches supporting evidence from the input corpus that is relevant to a target relation. We perform an extensive experimental study over multiple relation extraction benchmarks and demonstrate that RE-Flex outperforms competing unsupervised relation extraction methods based on pretrained language models by up to 27.8 F1 points compared to the next-best method. Our results show that constrained inference queries against a language model can enable accurate unsupervised relation extraction.", "keyphrases": ["fine-tuned extractive head", "unsupervised relation extraction", "template"]} +{"id": "guo-diab-2012-modeling", "title": "Modeling Sentences in the Latent Space", "abstract": "Sentence Similarity is the process of computing a similarity score between two sentences. Previous sentence similarity work finds that latent semantics approaches to the problem do not perform well due to insufficient information in single sentences. In this paper, we show that by carefully handling words that are not in the sentences (missing words), we can train a reliable latent variable model on sentences. In the process, we propose a new evaluation framework for sentence similarity: Concept Definition Retrieval. The new framework allows for large scale tuning and testing of Sentence Similarity models. Experiments on the new task and previous data sets show significant improvement of our model over baselines and other traditional latent variable models. Our results indicate comparable and even better performance than current state of the art systems addressing the problem of sentence similarity.", "keyphrases": ["latent space", "missing word", "wtmf", "paraphrase", "cross-lingual setting"]} +{"id": "ding-etal-2020-self", "title": "Self-Attention with Cross-Lingual Position Representation", "abstract": "Position encoding (PE), an essential part of self-attention networks (SANs), is used to preserve the word order information for natural language processing tasks, generating fixed position indices for input sequences. However, in cross-lingual scenarios, machine translation, the PEs of source and target sentences are modeled independently. Due to word order divergences in different languages, modeling the cross-lingual positional relationships might help SANs tackle this problem. In this paper, we augment SANs with cross-lingual position representations to model the bilingually aware latent structure for the input sentence. Specifically, we utilize bracketing transduction grammar (BTG)-based reordering information to encourage SANs to learn bilingual diagonal alignments. Experimental results on WMT'14 English\u21d2German, WAT'17 Japanese\u21d2English, and WMT'17 Chinese\u21d4English translation tasks demonstrate that our approach significantly and consistently improves translation quality over strong baselines. Extensive analyses confirm that the performance gains come from the cross-lingual information.", "keyphrases": ["cross-lingual position representation", "position representation", "self-attention"]} +{"id": "cowan-collins-2005-morphology", "title": "Morphology and Reranking for the Statistical Parsing of Spanish", "abstract": "We present two methods for incorporating detailed features in a Spanish parser, building on a baseline model that is a lexicalized PCFG. The first method exploits Spanish morphology, and achieves an F1 constituency score of 83.6%. This is an improvement over 81.2% accuracy for the baseline, which makes little or no use of morphological information. The second model uses a reranking approach to add arbitrary global features of parse trees to the morphological model. The reranking model reaches 85.1% F1 accuracy on the Spanish parsing task. The resulting model for Spanish parsing combines an approach that specifically targets morphological information with an approach that makes use of general structural features.", "keyphrases": ["spanish", "morphology", "pronoun"]} +{"id": "tromble-eisner-2009-learning", "title": "Learning Linear Ordering Problems for Better Translation", "abstract": "We apply machine learning to the Linear Ordering Problem in order to learn sentence-specific reordering models for machine translation. We demonstrate that even when these models are used as a mere preprocessing step for German-English translation, they significantly outperform Moses' integrated lexicalized reordering model. \n \nOur models are trained on automatically aligned bitext. Their form is simple but novel. They assess, based on features of the input sentence, how strongly each pair of input word tokens wi, wj would like to reverse their relative order. Combining all these pairwise preferences to find the best global reordering is NP-hard. However, we present a non-trivial O(n3) algorithm, based on chart parsing, that at least finds the best reordering within a certain exponentially large neighborhood. We show how to iterate this reordering process within a local search algorithm, which we use in training.", "keyphrases": ["linear ordering problem", "machine translation", "learning objective", "minimal syntactic information", "interpolation"]} +{"id": "cao-rei-2016-joint", "title": "A Joint Model for Word Embedding and Word Morphology", "abstract": "This paper presents a joint model for performing unsupervised morphological analysis on words, and learning a character-level composition function from morphemes to word embeddings. Our model splits individual words into segments, and weights each segment according to its ability to predict context words. Our morphological analysis is comparable to dedicated morphological analyzers at the task of morpheme boundary recovery, and also performs better than word-based embedding models at the task of syntactic analogy answering. Finally, we show that incorporating morphology explicitly into character-level models helps them produce embeddings for unseen words which correlate better with human judgments.", "keyphrases": ["joint model", "word embedding", "segmentation"]} +{"id": "martin-2017-community2vec", "title": "community2vec: Vector representations of online communities encode semantic relationships", "abstract": "Vector embeddings of words have been shown to encode meaningful semantic relationships that enable solving of complex analogies. This vector embedding concept has been extended successfully to many different domains and in this paper we both create and visualize vector representations of an unstructured collection of online communities based on user participation. Further, we quantitatively and qualitatively show that these representations allow solving of semantically meaningful community analogies and also other more general types of relationships. These results could help improve community recommendation engines and also serve as a tool for sociological studies of community relatedness.", "keyphrases": ["vector representation", "online community", "semantic relationship"]} +{"id": "costa-jussa-fonollosa-2006-statistical", "title": "Statistical Machine Reordering", "abstract": "Reordering is currently one of the most important problems in statistical machine translation systems. This paper presents a novel strategy for dealing with it: statistical machine reordering (SMR). It consists in using the powerful techniques developed for statistical machine translation (SMT) to translate the source language (S) into a reordered source language (S'), which allows for an improved translation into the target language (T). The SMT task changes from S2T to S'2T which leads to a monotonized word alignment and shorter translation units. In addition, the use of classes in SMR helps to infer new word reorderings. Experiments are reported in the EsEn WMT06 tasks and the ZhEn IWSLT05 task and show significant improvement in translation quality.", "keyphrases": ["statistical machine", "source sentence", "smt technique", "example costa-jussa\u0300"]} +{"id": "jin-chen-2008-fourth", "title": "The Fourth International Chinese Language Processing Bakeoff: Chinese Word Segmentation, Named Entity Recognition and Chinese POS Tagging", "abstract": "The Fourth International Chinese Language Processing Bakeoff was held in 2007 to assess the state of the art in three important tasks: Chinese word segmentation, named entity recognition and Chinese POS tagging. Twenty-eight groups submitted result sets in the three tasks across two tracks and a total of seven corpora. Strong results have been found in all the tasks as well as continuing challenges.", "keyphrases": ["chinese word segmentation", "entity recognition", "pos tagging", "cws"]} +{"id": "misra-etal-2017-mapping", "title": "Mapping Instructions and Visual Observations to Actions with Reinforcement Learning", "abstract": "We propose to directly map raw visual observations and text input to actions for instruction execution. While existing approaches assume access to structured environment representations or use a pipeline of separately trained models, we learn a single model to jointly reason about linguistic and visual input. We use reinforcement learning in a contextual bandit setting to train a neural network agent. To guide the agent's exploration, we use reward shaping with different forms of supervision. Our approach does not require intermediate representations, planning procedures, or training different models. We evaluate in a simulated environment, and show significant improvements over supervised learning and common reinforcement learning variants.", "keyphrases": ["visual observation", "action", "reinforcement learning", "environment", "mapping instruction"]} +{"id": "kim-etal-2020-efficient", "title": "Efficient Dialogue State Tracking by Selectively Overwriting Memory", "abstract": "Recent works in dialogue state tracking (DST) focus on an open vocabulary-based setting to resolve scalability and generalization issues of the predefined ontology-based approaches. However, they are inefficient in that they predict the dialogue state at every turn from scratch. Here, we consider dialogue state as an explicit fixed-sized memory and propose a selectively overwriting mechanism for more efficient DST. This mechanism consists of two steps: (1) predicting state operation on each of the memory slots, and (2) overwriting the memory with new values, of which only a few are generated according to the predicted state operations. Our method decomposes DST into two sub-tasks and guides the decoder to focus only on one of the tasks, thus reducing the burden of the decoder. This enhances the effectiveness of training and DST performance. Our SOM-DST (Selectively Overwriting Memory for Dialogue State Tracking) model achieves state-of-the-art joint goal accuracy with 51.72% in MultiWOZ 2.0 and 53.01% in MultiWOZ 2.1 in an open vocabulary-based DST setting. In addition, we analyze the accuracy gaps between the current and the ground truth-given situations and suggest that it is a promising direction to improve state operation prediction to boost the DST performance.", "keyphrases": ["dialogue state tracking", "memory", "state operation", "previous turn"]} +{"id": "gonzalez-agirre-etal-2019-pharmaconer", "title": "PharmaCoNER: Pharmacological Substances, Compounds and proteins Named Entity Recognition track", "abstract": "One of the biomedical entity types of relevance for medicine or biosciences are chemical compounds and drugs. The correct detection these entities is critical for other text mining applications building on them, such as adverse drug-reaction detection, medication-related fake news or drug-target extraction. Although a significant effort was made to detect mentions of drugs/chemicals in English texts, so far only very limited attempts were made to recognize them in medical documents in other languages. Taking into account the growing amount of medical publications and clinical records written in Spanish, we have organized the first shared task on detecting drug and chemical entities in Spanish medical documents. Additionally, we included a clinical concept-indexing sub-track asking teams to return SNOMED-CT identifiers related to drugs/chemicals for a collection of documents. For this task, named PharmaCoNER, we generated annotation guidelines together with a corpus of 1,000 manually annotated clinical case studies. A total of 22 teams participated in the sub-track 1, (77 system runs), and 7 teams in the sub-track 2 (19 system runs). Top scoring teams used sophisticated deep learning approaches yielding very competitive results with F-measures above 0.91. These results indicate that there is a real interest in promoting biomedical text mining efforts beyond English. We foresee that the PharmaCoNER annotation guidelines, corpus and participant systems will foster the development of new resources for clinical and biomedical text mining systems of Spanish medical data.", "keyphrases": ["compound", "protein", "entity recognition track", "pharmaconer"]} +{"id": "ionescu-etal-2014-characters", "title": "Can characters reveal your native language? A language-independent approach to native language identification", "abstract": "A common approach in text mining tasks such as text categorization, authorship identification or plagiarism detection is to rely on features like words, part-of-speech tags, stems, or some other high-level linguistic features. In this work, an approach that uses character n-grams as features is proposed for the task of native language identification. Instead of doing standard feature selection, the proposed approach combines several string kernels using multiple kernel learning. Kernel Ridge Regression and Kernel Discriminant Analysis are independently used in the learning stage. The empirical results obtained in all the experiments conducted in this work indicate that the proposed approach achieves state of the art performance in native language identification, reaching an accuracy that is 1.7% above the top scoring system of the 2013 NLI Shared Task. Furthermore, the proposed approach has an important advantage in that it is language independent and linguistic theory neutral. In the cross-corpus experiment, the proposed approach shows that it can also be topic independent, improving the state of the art system by 32.3%.", "keyphrases": ["character", "native language identification", "kernel discriminant analysis", "text analysis task"]} +{"id": "rothe-etal-2016-ultradense", "title": "Ultradense Word Embeddings by Orthogonal Transformation", "abstract": "Embeddings are generic representations that are useful for many NLP tasks. In this paper, we introduce DENSIFIER, a method that learns an orthogonal transformation of the embedding space that focuses the information relevant for a task in an ultradense subspace of a dimensionality that is smaller by a factor of 100 than the original space. We show that ultradense embeddings generated by DENSIFIER reach state of the art on a lexicon creation task in which words are annotated with three types of lexical information - sentiment, concreteness and frequency. On the SemEval2015 10B sentiment analysis task we show that no information is lost when the ultradense subspace is used, but training is an order of magnitude more efficient due to the compactness of the ultradense space.", "keyphrases": ["orthogonal transformation", "ultradense subspace", "sentiment analysis task", "ultradense word embeddings", "downstream task"]} +{"id": "ozgur-radev-2009-detecting", "title": "Detecting Speculations and their Scopes in Scientific Text", "abstract": "Distinguishing speculative statements from factual ones is important for most biomedical text mining applications. We introduce an approach which is based on solving two sub-problems to identify speculative sentence fragments. The first sub-problem is identifying the speculation keywords in the sentences and the second one is resolving their linguistic scopes. We formulate the first sub-problem as a supervised classification task, where we classify the potential keywords as real speculation keywords or not by using a diverse set of linguistic features that represent the contexts of the keywords. After detecting the actual speculation keywords, we use the syntactic structures of the sentences to determine their scopes.", "keyphrases": ["scope", "syntactic structure", "speculation cue", "heuristic rule"]} +{"id": "mohammad-etal-2016-semeval", "title": "SemEval-2016 Task 6: Detecting Stance in Tweets", "abstract": "Here for the \ufb01rst time we present a shared task on detecting stance from tweets: given a tweet and a target entity (person, organization, etc.), automatic natural language systems must determine whether the tweeter is in favor of the given target, against the given target, or whether neither inference is likely. The target of interest may or may not be referred to in the tweet, and it may or may not be the target of opinion. Two tasks are proposed. Task A is a traditional supervised classi\ufb01cation task where 70% of the annotated data for a target is used as training and the rest for testing. For Task B, we use as test data all of the instances for a new target (not used in task A) and no training data is provided. Our shared task received submissions from 19 teams for Task A and from 9 teams for Task B. The highest clas-si\ufb01cation F-score obtained was 67.82 for Task A and 56.28 for Task B. However, systems found it markedly more dif\ufb01cult to infer stance towards the target of interest from tweets that express opinion towards another entity.", "keyphrases": ["tweets", "stance detection", "semeval"]} +{"id": "pereira-2009-zac", "title": "ZAC.PB: An Annotated Corpus for Zero Anaphora Resolution in Portuguese", "abstract": "This paper describes the methodology adopted in the construction of an annotated corpus for the study of zero anaphora in Portuguese, the ZAC corpus. To our knowledge, no such corpus exists at this time for the Portuguese language. The purpose of this linguistic resource is to promote the use of automatic discovery of linguistic parameters for anaphora resolution systems. Because of the complexity of the linguistic phenomena involved, a detailed description of the different situations is provided. This paper will only focus on the annotation of subject zero anaphors. The main issues regarding zero anaphora in Portuguese are: indefinite subjects, either without verbal agreement marks or with first person plural or third person plural verbal agreement; position of the anaphor relative to its antecedent, i.e. anaphoric and cataphoric relations; coreference chains inside the same sentence and spanning several sentences; and determining the head of the antecedent noun phrase for a given anaphor. Finally, preliminary observations taken from the ZAC corpus are presented.", "keyphrases": ["annotated corpus", "portuguese", "anaphor"]} +{"id": "ghosh-etal-2011-shallow", "title": "Shallow Discourse Parsing with Conditional Random Fields", "abstract": "Parsing discourse is a challenging natural language processing task. In this paper we take a data driven approach to identify arguments of explicit discourse connectives. In contrast to previous work we do not make any assumptions on the span of arguments and consider parsing as a token-level sequence labeling task. We design the argument segmentation task as a cascade of decisions based on conditional random fields (CRFs). We train the CRFs on lexical, syntactic and semantic features extracted from the Penn Discourse Treebank and evaluate feature combinations on the commonly used test split. We show that the best combination of features includes syntactic and semantic features. The comparative error analysis investigates the performance variability over connective types and argument positions.", "keyphrases": ["discourse", "conditional random fields", "sequence labeling task", "crfs", "linear tagging approach"]} +{"id": "sasano-etal-2008-fully", "title": "A Fully-Lexicalized Probabilistic Model for Japanese Zero Anaphora Resolution", "abstract": "This paper presents a probabilistic model for Japanese zero anaphora resolution. First, this model recognizes discourse entities and links all mentions to them. Zero pronouns are then detected by case structure analysis based on automatically constructed case frames. Their appropriate antecedents are selected from the entities with high salience scores, based on the case frames and several preferences on the relation between a zero pronoun and an antecedent. Case structure and zero anaphora relation are simultaneously determined based on probabilistic evaluation metrics.", "keyphrases": ["probabilistic model", "anaphora resolution", "predicate"]} +{"id": "levy-goldberg-2014-linguistic", "title": "Linguistic Regularities in Sparse and Explicit Word Representations", "abstract": "Recent work has shown that neuralembedded word representations capture many relational similarities, which can be recovered by means of vector arithmetic in the embedded space. We show that Mikolov et al.\u2019s method of first adding and subtracting word vectors, and then searching for a word similar to the result, is equivalent to searching for a word that maximizes a linear combination of three pairwise word similarities. Based on this observation, we suggest an improved method of recovering relational similarities, improving the state-of-the-art results on two recent word-analogy datasets. Moreover, we demonstrate that analogy recovery is not restricted to neural word embeddings, and that a similar amount of relational similarities can be recovered from traditional distributional word representations.", "keyphrases": ["regularity", "word embedding", "closely-related vector"]} +{"id": "akbik-etal-2014-exploratory", "title": "Exploratory Relation Extraction in Large Text Corpora", "abstract": "In this paper, we propose and demonstrate Exploratory Relation Extraction (ERE), a novel approach to identifying and extracting relations from large text corpora based on user-driven and data-guided incremental exploration. We draw upon ideas from the information seeking paradigm of Exploratory Search (ES) to enable an exploration process in which users begin with a vaguely defined information need and progressively sharpen their definition of extraction tasks as they identify relations of interest in the underlying data. This process extends the application of Relation Extraction to use cases characterized by imprecise information needs and uncertainty regarding the information content of available data. We present an interactive workflow that allows users to build extractors based on entity types and human-readable extraction patterns derived from subtrees in dependency trees. In order to evaluate the viability of our approach on large text corpora, we conduct experiments on a dataset of over 160 million sentences with mentions of over 6 million FREEBASE entities extracted from the CLUEWEB09 corpus. Our experiments indicate that even non-expert users can intuitively use our approach to identify relations and create high precision extractors with minimal effort.", "keyphrases": ["large text corpora", "paradigm", "exploratory relation extraction"]} +{"id": "ferres-saggion-2022-alexsis", "title": "ALEXSIS: A Dataset for Lexical Simplification in Spanish", "abstract": "Lexical Simplification is the process of reducing the lexical complexity of a text by replacing difficult words with easier to read (or understand) expressions while preserving the original information and meaning. In this paper we introduce ALEXSIS, a new dataset for this task, and we use ALEXSIS to benchmark Lexical Simplification systems in Spanish. The paper describes the evaluation of three kind of approaches to Lexical Simplification, a thesaurus-based approach, a single transformers-based approach, and a combination of transformers. We also report state of the art results on a previous Lexical Simplification dataset for Spanish.", "keyphrases": ["lexical simplification", "spanish", "alexsis"]} +{"id": "guo-etal-2019-autosem", "title": "AutoSeM: Automatic Task Selection and Mixing in Multi-Task Learning", "abstract": "Multi-task learning (MTL) has achieved success over a wide range of problems, where the goal is to improve the performance of a primary task using a set of relevant auxiliary tasks. However, when the usefulness of the auxiliary tasks w.r.t. the primary task is not known a priori, the success of MTL models depends on the correct choice of these auxiliary tasks and also a balanced mixing ratio of these tasks during alternate training. These two problems could be resolved via manual intuition or hyper-parameter tuning over all combinatorial task choices, but this introduces inductive bias or is not scalable when the number of candidate auxiliary tasks is very large. To address these issues, we present AutoSeM, a two-stage MTL pipeline, where the first stage automatically selects the most useful auxiliary tasks via a Beta-Bernoulli multi-armed bandit with Thompson Sampling, and the second stage learns the training mixing ratio of these selected auxiliary tasks via a Gaussian Process based Bayesian optimization framework. We conduct several MTL experiments on the GLUE language understanding tasks, and show that our AutoSeM framework can successfully find relevant auxiliary tasks and automatically learn their mixing ratio, achieving significant performance boosts on several primary tasks. Finally, we present ablations for each stage of AutoSeM and analyze the learned auxiliary task choices.", "keyphrases": ["multi-task learning", "ratio", "useful auxiliary task", "thompson sampling", "autosem"]} +{"id": "harashima-etal-2016-large", "title": "A Large-scale Recipe and Meal Data Collection as Infrastructure for Food Research", "abstract": "Everyday meals are an important part of our daily lives and, currently, there are many Internet sites that help us plan these meals. Allied to the growth in the amount of food data such as recipes available on the Internet is an increase in the number of studies on these data, such as recipe analysis and recipe search. However, there are few publicly available resources for food research; those that do exist do not include a wide range of food data or any meal data (that is, likely combinations of recipes). In this study, we construct a large-scale recipe and meal data collection as the underlying infrastructure to promote food research. Our corpus consists of approximately 1.7 million recipes and 36000 meals in cookpad, one of the largest recipe sites in the world. We made the corpus available to researchers in February 2015 and as of February 2016, 82 research groups at 56 universities have made use of it to enhance their studies.", "keyphrases": ["recipe", "meal data collection", "food research"]} +{"id": "wang-etal-2018-joint-embedding", "title": "Joint Embedding of Words and Labels for Text Classification", "abstract": "Word embeddings are effective intermediate representations for capturing semantic regularities between words, when learning the representations of text sequences. We propose to view text classification as a label-word joint embedding problem: each label is embedded in the same space with the word vectors. We introduce an attention framework that measures the compatibility of embeddings between text sequences and labels. The attention is learned on a training set of labeled samples to ensure that, given a text sequence, the relevant words are weighted higher than the irrelevant ones. Our method maintains the interpretability of word embeddings, and enjoys a built-in ability to leverage alternative sources of information, in addition to input text sequences. Extensive results on the several large text datasets show that the proposed framework outperforms the state-of-the-art methods by a large margin, in terms of both accuracy and speed.", "keyphrases": ["text classification", "same space", "state-of-the-art method", "previous label", "deep learning"]} +{"id": "toutanova-etal-2015-representing", "title": "Representing Text for Joint Embedding of Text and Knowledge Bases", "abstract": "Models that learn to represent textual and knowledge base relations in the same continuous latent space are able to perform joint inferences among the two kinds of relations and obtain high accuracy on knowledge base completion (Riedel et al., 2013). In this paper we propose a model that captures the compositional structure of textual relations, and jointly optimizes entity, knowledge base, and textual relation representations. The proposed model significantly improves performance over a model that does not share parameters among textual relations with common sub-structure.", "keyphrases": ["knowledge base completion", "textual relation", "schema"]} +{"id": "morey-etal-2018-dependency", "title": "A Dependency Perspective on RST Discourse Parsing and Evaluation", "abstract": "Computational text-level discourse analysis mostly happens within Rhetorical Structure Theory (RST), whose structures have classically been presented as constituency trees, and relies on data from the RST Discourse Treebank (RST-DT); as a result, the RST discourse parsing community has largely borrowed from the syntactic constituency parsing community. The standard evaluation procedure for RST discourse parsers is thus a simplified variant of PARSEVAL, and most RST discourse parsers use techniques that originated in syntactic constituency parsing. In this article, we isolate a number of conceptual and computational problems with the constituency hypothesis. We then examine the consequences, for the implementation and evaluation of RST discourse parsers, of adopting a dependency perspective on RST structures, a view advocated so far only by a few approaches to discourse parsing. While doing that, we show the importance of the notion of headedness of RST structures. We analyze RST discourse parsing as dependency parsing by adapting to RST a recent proposal in syntactic parsing that relies on head-ordered dependency trees, a representation isomorphic to headed constituency trees. We show how to convert the original trees from the RST corpus, RST-DT, and their binarized versions used by all existing RST parsers to head-ordered dependency trees. We also propose a way to convert existing simple dependency parser output to constituent trees. This allows us to evaluate and to compare approaches from both constituent-based and dependency-based perspectives in a unified framework, using constituency and dependency metrics. We thus propose an evaluation framework to compare extant approaches easily and uniformly, something the RST parsing community has lacked up to now. We can also compare parsers' predictions to each other across frameworks. This allows us to characterize families of parsing strategies across the different frameworks, in particular with respect to the notion of headedness. Our experiments provide evidence for the conceptual similarities between dependency parsers and shift-reduce constituency parsers, and confirm that dependency parsing constitutes a viable approach to RST discourse parsing.", "keyphrases": ["dependency perspective", "rst", "edu"]} +{"id": "mukherjee-liu-2012-aspect", "title": "Aspect Extraction through Semi-Supervised Modeling", "abstract": "Aspect extraction is a central problem in sentiment analysis. Current methods either extract aspects without categorizing them, or extract and categorize them using unsupervised topic modeling. By categorizing, we mean the synonymous aspects should be clustered into the same category. In this paper, we solve the problem in a different setting where the user provides some seed words for a few aspect categories and the model extracts and clusters aspect terms into categories simultaneously. This setting is important because categorizing aspects is a subjective task. For different application purposes, different categorizations may be needed. Some form of user guidance is desired. In this paper, we propose two statistical models to solve this seeded problem, which aim to discover exactly what the user wants. Our experimental results show that the two proposed models are indeed able to perform the task effectively.", "keyphrases": ["extraction", "sentiment analysis", "topic model", "aspect category"]} +{"id": "somasundaran-etal-2007-detecting", "title": "Detecting Arguing and Sentiment in Meetings", "abstract": "This paper analyzes opinion categories like Sentiment and Arguing in meetings. We first annotate the categories manually. We then develop genre-specific lexicons using interesting function word combinations for detecting the opinions. We analyze relations between dialog structure information and opinion expression in context of multiparty discourse. Finally we show that classifiers using lexical and discourse knowledge have significant improvement over baseline.", "keyphrases": ["arguing", "meeting", "dialog structure information"]} +{"id": "mullen-collier-2004-sentiment", "title": "Sentiment Analysis using Support Vector Machines with Diverse Information Sources", "abstract": "This paper introduces an approach to sentiment analysis which uses support vector machines (SVMs) to bring together diverse sources of potentially pertinent information, including several fa-vorability measures for phrases and adjectives and, where available, knowledge of the topic of the text. Models using the features introduced are further combined with unigram models which have been shown to be effective in the past (Pang et al., 2002) and lemmatized versions of the unigram models. Experiments on movie review data from Epinions.com demonstrate that hybrid SVMs which combine unigram-style feature-based SVMs with those based on real-valued favorability measures obtain superior performance, producing the best re-sults yet published using this data. Further experiments using a feature set enriched with topic information on a smaller dataset of music reviews hand-annotated for topic are also reported, the results of which suggest that incorporating topic information into such models may also yield improvement.", "keyphrases": ["support vector machines", "svm", "diverse source", "favorability measure", "sentiment analysis"]} +{"id": "shekhar-etal-2019-beyond", "title": "Beyond task success: A closer look at jointly learning to see, ask, and GuessWhat", "abstract": "We propose a grounded dialogue state encoder which addresses a foundational issue on how to integrate visual grounding with dialogue system components. As a test-bed, we focus on the GuessWhat?! game, a two-player game where the goal is to identify an object in a complex visual scene by asking a sequence of yes/no questions. Our visually-grounded encoder leverages synergies between guessing and asking questions, as it is trained jointly using multi-task learning. We further enrich our model via a cooperative learning regime. We show that the introduction of both the joint architecture and cooperative learning lead to accuracy improvements over the baseline system. We compare our approach to an alternative system which extends the baseline with reinforcement learning. Our in-depth analysis shows that the linguistic skills of the two models differ dramatically, despite approaching comparable performance levels. This points at the importance of analyzing the linguistic output of competing systems beyond numeric comparison solely based on task success.", "keyphrases": ["task success", "guesswhat", "reinforcement learning", "gdse"]} +{"id": "qiu-etal-2006-paraphrase", "title": "Paraphrase Recognition via Dissimilarity Significance Classification", "abstract": "We propose a supervised, two-phase framework to address the problem of paraphrase recognition (PR). Unlike most PR systems that focus on sentence similarity, our framework detects dissimilarities between sentences and makes its paraphrase judgment based on the significance of such dissimilarities. The ability to differentiate significant dissimilarities not only reveals what makes two sentences a non-paraphrase, but also helps to recall additional paraphrases that contain extra but insignificant information. Experimental results show that while being accurate at discerning non-paraphrasing dissimilarities, our implemented system is able to achieve higher paraphrase recall (93%), at an overall performance comparable to the alternatives.", "keyphrases": ["dissimilarity", "significance", "paraphrase recognition"]} +{"id": "maegaard-etal-2006-kunsti", "title": "KUNSTI - Knowledge Generation for Norwegian Language Technology", "abstract": "KUNSTI is the Norwegian national language technology programme, running 2001-2006 inclusive. The goal of the programme is to boost Norwegian language technology research. In this paper we describe the background, the objectives, the methodology applied in the management of the programme, the projects selected, and our first conclusions. We also describe national programmes form Sweden, France and Germany and compare objectives and methods.", "keyphrases": ["knowledge generation", "norwegian language technology", "kunsti"]} +{"id": "oostdijk-etal-2008-coi", "title": "From D-Coi to SoNaR: a reference corpus for Dutch", "abstract": "The computational linguistics community in The Netherlands and Belgium has long recognized the dire need for a major reference corpus of written Dutch. In part to answer this need, the STEVIN programme was established. To pave the way for the effective building of a 500-million-word reference corpus of written Dutch, a pilot project was established. The Dutch Corpus Initiative project or D-Coi was highly successful in that it not only realized about 10% of the projected large reference corpus, but also established the best practices and developed all the protocols and the necessary tools for building the larger corpus within the confines of a necessarily limited budget. We outline the steps involved in an endeavour of this kind, including the major highlights and possible pitfalls. Once converted to a suitable XML format, further linguistic annotation based on the state-of-the-art tools developed either before or during the pilot by the consortium partners proved easily and fruitfully applicable. Linguistic enrichment of the corpus includes PoS tagging, syntactic parsing and semantic annotation, involving both semantic role labeling and spatiotemporal annotation. D-Coi is expected to be followed by SoNaR, during which the 500-million-word reference corpus of Dutch should be built.", "keyphrases": ["sonar", "reference corpus", "dutch"]} +{"id": "ravi-knight-2011-bayesian", "title": "Bayesian Inference for Zodiac and Other Homophonic Ciphers", "abstract": "We introduce a novel Bayesian approach for deciphering complex substitution ciphers. Our method uses a decipherment model which combines information from letter n-gram language models as well as word dictionaries. Bayesian inference is performed on our model using an efficient sampling technique. We evaluate the quality of the Bayesian decipherment output on simple and homophonic letter substitution ciphers and show that unlike a previous approach, our method consistently produces almost 100% accurate decipherments. The new method can be applied on more complex substitution ciphers and we demonstrate its utility by cracking the famous Zodiac-408 cipher in a fully automated fashion, which has never been done before.", "keyphrases": ["decipherment", "zodiac-408 cipher", "bayesian inference"]} +{"id": "fraser-etal-2013-automatic", "title": "Automatic speech recognition in the diagnosis of primary progressive aphasia", "abstract": "Narrative speech can provide a valuable source of information about an individual\u2019s linguistic abilities across lexical, syntactic, and pragmatic levels. However, analysis of narrative speech is typically done by hand, and is therefore extremely time-consuming. Use of automatic speech recognition (ASR) software could make this type of analysis more efficient and widely available. In this paper, we present the results of an initial attempt to use ASR technology to generate transcripts of spoken narratives from participants with semantic dementia (SD), progressive nonfluent aphasia (PNFA), and healthy controls. We extract text features from the transcripts and use these features, alone and in combination with acoustic features from the speech signals, to classify transcripts as patient versus control, and SD versus PNFA. Additionally, we generate artificially noisy transcripts by applying insertions, substitutions, and deletions to manually-transcribed data, allowing experiments to be conducted across a wider range of noise levels than are produced by a tuned ASR system. We find that reasonably good classification accuracies can be achieved by selecting appropriate features from the noisy transcripts. We also find that the choice of using ASR data or manually transcribed data as the training set can have a strong effect on the accuracy of the classifiers.", "keyphrases": ["diagnosis", "aphasia", "automatic speech recognition"]} +{"id": "vilar-etal-2007-translate", "title": "Can We Translate Letters?", "abstract": "Current statistical machine translation systems handle the translation process as the transformation of a string of symbols into another string of symbols. Normally the symbols dealt with are the words in different languages, sometimes with some additional information included, like morphological data. In this work we try to push the approach to the limit, working not on the level of words, but treating both the source and target sentences as a string of letters. We try to find out if a nearly unmodified state-of-the-art translation system is able to cope with the problem and whether it is capable to further generalize translation rules, for example at the level of word suffixes and translation of unseen words. Experiments are carried out for the translation of Catalan to Spanish.", "keyphrases": ["letter", "translation system", "csmt", "address", "spelling-variant oov"]} +{"id": "hsieh-etal-2019-robustness", "title": "On the Robustness of Self-Attentive Models", "abstract": "This work examines the robustness of self-attentive neural networks against adversarial input perturbations. Specifically, we investigate the attention and feature extraction mechanisms of state-of-the-art recurrent neural networks and self-attentive architectures for sentiment analysis, entailment and machine translation under adversarial attacks. We also propose a novel attack algorithm for generating more natural adversarial examples that could mislead neural models but not humans. Experimental results show that, compared to recurrent neural models, self-attentive models are more robust against adversarial perturbation. In addition, we provide theoretical explanations for their superior robustness to support our claims.", "keyphrases": ["robustness", "self-attentive model", "machine translation", "attack"]} +{"id": "jiao-etal-2006-semi", "title": "Semi-Supervised Conditional Random Fields for Improved Sequence Segmentation and Labeling", "abstract": "We present a new semi-supervised training procedure for conditional random fields (CRFs) that can be used to train sequence segmentors and labelers from a combination of labeled and unlabeled training data. Our approach is based on extending the minimum entropy regularization framework to the structured prediction case, yielding a training objective that combines unlabeled conditional entropy with labeled conditional likelihood. Although the training objective is no longer concave, it can still be used to improve an initial model (e.g. obtained from supervised training) by iterative ascent. We apply our new training algorithm to the problem of identifying gene and protein mentions in biological texts, and show that incorporating unlabeled data improves the performance of the supervised CRF in this case.", "keyphrases": ["crf", "conditional entropy", "unlabeled data"]} +{"id": "ostermann-etal-2018-mcscript", "title": "MCScript: A Novel Dataset for Assessing Machine Comprehension Using Script Knowledge", "abstract": "We introduce a large dataset of narrative texts and questions about these texts, intended to be used in a machine comprehension task that requires reasoning using commonsense knowledge. Our dataset complements similar datasets in that we focus on stories about everyday activities, such as going to the movies or working in the garden, and that the questions require commonsense knowledge, or more specifically, script knowledge, to be answered. We show that our mode of data collection via crowdsourcing results in a substantial amount of such inference questions. The dataset forms the basis of a shared task on commonsense and script knowledge organized at SemEval 2018 and provides challenging test cases for the broader natural language understanding community.", "keyphrases": ["machine comprehension", "script knowledge", "story", "mcscript"]} +{"id": "li-etal-2014-text", "title": "Text-level Discourse Dependency Parsing", "abstract": "Previous researches on Text-level discourse parsing mainly made use of constituency structure to parse the whole document into one discourse tree. In this paper, we present the limitations of constituency based discourse parsing and first propose to use dependency structure to directly represent the relations between elementary discourse units (EDUs). The state-of-the-art dependency parsing techniques, the Eisner algorithm and maximum spanning tree (MST) algorithm, are adopted to parse an optimal discourse dependency tree based on the arcfactored model and the large-margin learning techniques. Experiments show that our discourse dependency parsers achieve a competitive performance on text-level discourse parsing.", "keyphrases": ["dependency structure", "discourse unit", "edu", "rst tree"]} +{"id": "mi-etal-2008-forest", "title": "Forest-Based Translation", "abstract": "Among syntax-based translation models, the tree-based approach, which takes as input a parse tree of the source sentence, is a promising direction being faster and simpler than its string-based counterpart. However, current tree-based systems suffer from a major drawback: they only use the 1-best parse to direct the translation, which potentially introduces translation mistakes due to parsing errors. We propose a forest-based approach that translates a packed forest of exponentially many parses, which encodes many more alternatives than standard n-best lists. Large-scale experiments show an absolute improvement of 1.7 BLEU points over the 1-best baseline. This result is also 0.8 points higher than decoding with 30-best parses, and takes even less time.", "keyphrases": ["parse tree", "forest", "alternative"]} +{"id": "neelakantan-etal-2015-compositional", "title": "Compositional Vector Space Models for Knowledge Base Completion", "abstract": "Knowledge base (KB) completion adds new facts to a KB by making inferences from existing facts, for example by inferring with high likelihood nationality(X,Y) from bornIn(X,Y). Most previous methods infer simple one-hop relational synonyms like this, or use as evidence a multi-hop relational path treated as an atomic feature, like bornIn(X,Z)\u2192 containedIn(Z,Y). This paper presents an approach that reasons about conjunctions of multi-hop relations non-atomically, composing the implications of a path using a recurrent neural network (RNN) that takes as inputs vector embeddings of the binary relation in the path. Not only does this allow us to generalize to paths unseen at training time, but also, with a single high-capacity RNN, to predict new relation types not seen when the compositional model was trained (zero-shot learning). We assemble a new dataset of over 52M relational triples, and show that our method improves over a traditional classifier by 11%, and a method leveraging pre-trained embeddings by 7%.", "keyphrases": ["knowledge base completion", "path", "reasoning", "recurrent neural network", "compositional model"]} +{"id": "garcia-etal-2021-probing", "title": "Probing for idiomaticity in vector space models", "abstract": "Contextualised word representation models have been successfully used for capturing different word usages and they may be an attractive alternative for representing idiomaticity in language. In this paper, we propose probing measures to assess if some of the expected linguistic properties of noun compounds, especially those related to idiomatic meanings, and their dependence on context and sensitivity to lexical choice, are readily available in some standard and widely used representations. For that, we constructed the Noun Compound Senses Dataset, which contains noun compounds and their paraphrases, in context neutral and context informative naturalistic sentences, in two languages: English and Portuguese. Results obtained using four types of probing measures with models like ELMo, BERT and some of its variants, indicate that idiomaticity is not yet accurately represented by contextualised models", "keyphrases": ["idiomaticity", "vector space model", "noun compound", "paraphrase", "bert"]} +{"id": "bansal-etal-2020-self", "title": "Self-Supervised Meta-Learning for Few-Shot Natural Language Classification Tasks", "abstract": "Self-supervised pre-training of transformer models has revolutionized NLP applications. Such pre-training with language modeling objectives provides a useful initial point for parameters that generalize well to new tasks with fine-tuning. However, fine-tuning is still data inefficient \u2014 when there are few labeled examples, accuracy can be low. Data efficiency can be improved by optimizing pre-training directly for future fine-tuning with few examples; this can be treated as a meta-learning problem. However, standard meta-learning techniques require many training tasks in order to generalize; unfortunately, finding a diverse set of such supervised tasks is usually difficult. This paper proposes a self-supervised approach to generate a large, rich, meta-learning task distribution from unlabeled text. This is achieved using a cloze-style objective, but creating separate multi-class classification tasks by gathering tokens-to-be blanked from among only a handful of vocabulary terms. This yields as many unique meta-training tasks as the number of subsets of vocabulary terms. We meta-train a transformer model on this distribution of tasks using a recent meta-learning framework. On 17 NLP tasks, we show that this meta-training leads to better few-shot generalization than language-model pre-training followed by finetuning. Furthermore, we show how the self-supervised tasks can be combined with supervised tasks for meta-learning, providing substantial accuracy gains over previous supervised meta-learning.", "keyphrases": ["meta-learning", "generalization", "unlabeled text"]} +{"id": "bel-etal-2012-automatic", "title": "Automatic lexical semantic classification of nouns", "abstract": "The work we present here addresses cue-based noun classification in English and Spanish. Its main objective is to automatically acquire lexical semantic information by classifying nouns into previously known noun lexical classes. This is achieved by using particular aspects of linguistic contexts as cues that identify a specific lexical class. Here we concentrate on the task of identifying such cues and the theoretical background that allows for an assessment of the complexity of the task. The results show that, despite of the a-priori complexity of the task, cue-based classification is a useful tool in the automatic acquisition of lexical semantic classes.", "keyphrases": ["noun", "semantic class", "most approach"]} +{"id": "pool-nissim-2016-distant", "title": "Distant supervision for emotion detection using Facebook reactions", "abstract": "We exploit the Facebook reaction feature in a distant supervised fashion to train a support vector machine classifier for emotion detection, using several feature combinations and combining different Facebook pages. We test our models on existing benchmarks for emotion detection and show that employing only information that is derived completely automatically, thus without relying on any handcrafted lexicon as it's usually done, we can achieve competitive results. The results also show that there is large room for improvement, especially by gearing the collection of Facebook pages, with a view to the target domain.", "keyphrases": ["emotion detection", "facebook reaction", "distant supervision"]} +{"id": "sirts-etal-2017-idea", "title": "Idea density for predicting Alzheimer's disease from transcribed speech", "abstract": "Idea Density (ID) measures the rate at which ideas or elementary predications are expressed in an utterance or in a text. Lower ID is found to be associated with an increased risk of developing Alzheimer's disease (AD) (Snowdon et al., 1996; Engelman et al., 2010). ID has been used in two different versions: propositional idea density (PID) counts the expressed ideas and can be applied to any text while semantic idea density (SID) counts pre-defined information content units and is naturally more applicable to normative domains, such as picture description tasks. In this paper, we develop DEPID, a novel dependency-based method for computing PID, and its version DEPID-R that enables to exclude repeating ideas\u2014a feature characteristic to AD speech. We conduct the first comparison of automatically extracted PID and SID in the diagnostic classification task on two different AD datasets covering both closed-topic and free-recall domains. While SID performs better on the normative dataset, adding PID leads to a small but significant improvement (+1.7 F-score). On the free-topic dataset, PID performs better than SID as expected (77.6 vs 72.3 in F-score) but adding the features derived from the word embedding clustering underlying the automatic SID increases the results considerably, leading to an F-score of 84.8.", "keyphrases": ["alzheimer", "disease", "idea density"]} +{"id": "barrett-etal-2018-sequence", "title": "Sequence Classification with Human Attention", "abstract": "Learning attention functions requires large volumes of data, but many NLP tasks simulate human behavior, and in this paper, we show that human attention really does provide a good inductive bias on many attention functions in NLP. Specifically, we use estimated human attention derived from eye-tracking corpora to regularize attention functions in recurrent neural networks. We show substantial improvements across a range of tasks, including sentiment analysis, grammatical error detection, and detection of abusive language.", "keyphrases": ["human attention", "sentiment analysis", "rationale"]} +{"id": "tamburini-melandri-2012-anita", "title": "AnIta: a powerful morphological analyser for Italian", "abstract": "In this paper we present AnIta, a powerful morphological analyser for Italian implemented within the framework of finite-state-automata models. It is provided by a large lexicon containing more than 110,000 lemmas that enable it to cover relevant portions of Italian texts. We describe our design choices for the management of inflectional phenomena as well as some interesting new features to explicitly handle derivational and compositional processes in Italian, namely the wordform segmentation structure and Derivation Graph. Two different evaluation experiments, for testing coverage (Recall) and Precision, are described in detail, comparing the AnIta performances with some other freely available tools to handle Italian morphology. The experiments results show that the AnIta Morphological Analyser obtains the best performances among the tested systems, with Recall = 97.21% and Precision = 98.71%. This tool was a fundamental building block for designing a performant PoS-tagger and Lemmatiser for the Italian language that participated to two EVALITA evaluation campaigns ranking, in both cases, together with the best performing systems.", "keyphrases": ["powerful morphological analyser", "italian", "anita"]} +{"id": "daume-iii-jagarlamudi-2011-domain", "title": "Domain Adaptation for Machine Translation by Mining Unseen Words", "abstract": "We show that unseen words account for a large part of the translation error when moving to new domains. Using an extension of a recent approach to mining translations from comparable corpora (Haghighi et al., 2008), we are able to find translations for otherwise OOV terms. We show several approaches to integrating such translations into a phrase-based translation system, yielding consistent improvements in translations quality (between 0.5 and 1.5 Bleu points) on four domains and two language pairs.", "keyphrases": ["machine translation", "new domain", "induction", "dictionary mining technique"]} +{"id": "wang-lu-2018-neural", "title": "Neural Segmental Hypergraphs for Overlapping Mention Recognition", "abstract": "In this work, we propose a novel segmental hypergraph representation to model overlapping entity mentions that are prevalent in many practical datasets. We show that our model built on top of such a new representation is able to capture features and interactions that cannot be captured by previous models while maintaining a low time complexity for inference. We also present a theoretical analysis to formally assess how our representation is better than alternative representations reported in the literature in terms of representational power. Coupled with neural networks for feature learning, our model achieves the state-of-the-art performance in three benchmark datasets annotated with overlapping mentions.", "keyphrases": ["hypergraph", "ambiguity", "neural segmental hypergraph"]} +{"id": "liu-etal-2020-norm", "title": "Norm-Based Curriculum Learning for Neural Machine Translation", "abstract": "A neural machine translation (NMT) system is expensive to train, especially with high-resource settings. As the NMT architectures become deeper and wider, this issue gets worse and worse. In this paper, we aim to improve the efficiency of training an NMT by introducing a novel norm-based curriculum learning method. We use the norm (aka length or module) of a word embedding as a measure of 1) the difficulty of the sentence, 2) the competence of the model, and 3) the weight of the sentence. The norm-based sentence difficulty takes the advantages of both linguistically motivated and model-based sentence difficulties. It is easy to determine and contains learning-dependent features. The norm-based model competence makes NMT learn the curriculum in a fully automated way, while the norm-based sentence weight further enhances the learning of the vector representation of the NMT. Experimental results for the WMT'14 English-German and WMT'17 Chinese-English translation tasks demonstrate that the proposed method outperforms strong baselines in terms of BLEU score (+1.17/+1.56) and training speedup (2.22x/3.33x).", "keyphrases": ["curriculum", "neural machine translation", "norm", "training example"]} +{"id": "chen-etal-2017-neural", "title": "Neural Machine Translation with Source Dependency Representation", "abstract": "Source dependency information has been successfully introduced into statistical machine translation. However, there are only a few preliminary attempts for Neural Machine Translation (NMT), such as concatenating representations of source word and its dependency label together. In this paper, we propose a novel NMT with source dependency representation to improve translation performance of NMT, especially long sentences. Empirical results on NIST Chinese-to-English translation task show that our method achieves 1.6 BLEU improvements on average over a strong NMT system.", "keyphrases": ["source dependency representation", "neural machine translation", "cnn"]} +{"id": "gardent-etal-2017-webnlg", "title": "The WebNLG Challenge: Generating Text from RDF Data", "abstract": "The WebNLG challenge consists in mapping sets of RDF triples to text. It provides a common benchmark on which to train, evaluate and compare \u201cmicroplanners\u201d, i.e. generation systems that verbalise a given content by making a range of complex interacting choices including referring expression generation, aggregation, lexicalisation, surface realisation and sentence segmentation. In this paper, we introduce the microplanning task, describe data preparation, introduce our evaluation methodology, analyse participant results and provide a brief description of the participating systems.", "keyphrases": ["webnlg challenge", "rdf data", "generation task", "table"]} +{"id": "cherry-bergsma-2005-expectation", "title": "An Expectation Maximization Approach to Pronoun Resolution", "abstract": "We propose an unsupervised Expectation Maximization approach to pronoun resolution. The system learns from a fixed list of potential antecedents for each pronoun. We show that unsupervised learning is possible in this context, as the performance of our system is comparable to supervised methods. Our results indicate that a probabilistic gender/number model, determined automatically from unlabeled text, is a powerful feature for this task.", "keyphrases": ["expectation maximization approach", "pronoun resolution", "coreference resolution"]} +{"id": "li-etal-2018-one", "title": "One Sentence One Model for Neural Machine Translation", "abstract": "Neural machine translation (NMT) becomes a new state-of-the-art and achieves promising translation results using a simple encoder-decoder neural network. This neural network is trained once on the parallel corpus and the fixed network is used to translate all the test sentences. We argue that the general fixed network cannot best fit the specific test sentences. In this paper, we propose the dynamic NMT which learns a general network as usual, and then fine-tunes the network for each test sentence. The fine-tune work is done on a small set of the bilingual training data that is obtained through similarity search according to the test sentence. Extensive experiments demonstrate that this method can significantly improve the translation performance, especially when highly similar sentences are available.", "keyphrases": ["neural machine translation", "similar sentence", "nmt model", "training corpus"]} +{"id": "janssen-2012-neotag", "title": "NeoTag: a POS Tagger for Grammatical Neologism Detection", "abstract": "POS Taggers typically fail to correctly tag grammatical neologisms: for known words, a tagger will only take known tags into account, and hence discard any possibility that the word is used in a novel or deviant grammatical category in the text at hand. Grammatical neologisms are relatively rare, and therefore do not pose a significant problem for the overall performance of a tagger. But for studies on neologisms and grammaticalization processes, this makes traditional taggers rather unfit. This article describes a modified POS tagger that explicitly considers new tags for known words, hence making it better fit for neologism research. This tagger, called NeoTag, has an overall accuracy that is comparable to other taggers, but scores much better for grammatical neologisms. To achieve this, the tagger applies a system of \\em lexical smoothing, which adds new categories to known words based on known homographs. NeoTag also lemmatizes words as part of the tagging system, achieving a high accuracy on lemmatization for both known and unknown words, without the need for an external lexicon. The use of NeoTag is not restricted to grammatical neologism detection, and it can be used for other purposes as well.", "keyphrases": ["pos tagger", "grammatical neologism detection", "neotag"]} +{"id": "yin-etal-2018-structvae", "title": "StructVAE: Tree-structured Latent Variable Models for Semi-supervised Semantic Parsing", "abstract": "Semantic parsing is the task of transducing natural language (NL) utterances into formal meaning representations (MRs), commonly represented as tree structures. Annotating NL utterances with their corresponding MRs is expensive and time-consuming, and thus the limited availability of labeled data often becomes the bottleneck of data-driven, supervised models. We introduce StructVAE, a variational auto-encoding model for semi-supervised semantic parsing, which learns both from limited amounts of parallel data, and readily-available unlabeled NL utterances. StructVAE models latent MRs not observed in the unlabeled data as tree-structured latent variables. Experiments on semantic parsing on the ATIS domain and Python code generation show that with extra unlabeled data, StructVAE outperforms strong supervised models.", "keyphrases": ["latent variable", "semi-supervised semantic parsing", "limited amount", "unlabeled data", "logical form"]} +{"id": "miculicich-werlen-popescu-belis-2017-validation", "title": "Validation of an Automatic Metric for the Accuracy of Pronoun Translation (APT)", "abstract": "In this paper, we define and assess a reference-based metric to evaluate the accuracy of pronoun translation (APT). The metric automatically aligns a candidate and a reference translation using GIZA++ augmented with specific heuristics, and then counts the number of identical or different pronouns, with provision for legitimate variations and omitted pronouns. All counts are then combined into one score. The metric is applied to the results of seven systems (including the baseline) that participated in the DiscoMT 2015 shared task on pronoun translation from English to French. The APT metric reaches around 0.993-0.999 Pearson correlation with human judges (depending on the parameters of APT), while other automatic metrics such as BLEU, METEOR, or those specific to pronouns used at DiscoMT 2015 reach only 0.972-0.986 Pearson correlation.", "keyphrases": ["automatic metric", "pronoun translation", "apt"]} +{"id": "mcclosky-etal-2008-self", "title": "When is Self-Training Effective for Parsing?", "abstract": "Self-training has been shown capable of improving on state-of-the-art parser performance (McClosky et al., 2006) despite the conventional wisdom on the matter and several studies to the contrary (Charniak, 1997; Steedman et al., 2003). However, it has remained unclear when and why self-training is helpful. In this paper, we test four hypotheses (namely, presence of a phase transition, impact of search errors, value of non-generative reranker features, and effects of unknown words). From these experiments, we gain a better understanding of why self-training works for parsing. Since improvements from self-training are correlated with unknown bigrams and biheads but not unknown words, the benefit of self-training appears most influenced by seeing known words in new combinations.", "keyphrases": ["self-training", "known word", "factor", "new context"]} +{"id": "pantel-etal-2007-isp", "title": "ISP: Learning Inferential Selectional Preferences", "abstract": "Semantic inference is a key component for advanced natural language understanding. However, existing collections of automatically acquired inference rules have shown disappointing results when used in applications such as textual entailment and question answering. This paper presents ISP, a collection of methods for automatically learning admissible argument values to which an inference rule can be applied, which we call inferential selectional preferences, and methods for filtering out incorrect inferences. We evaluate ISP and present empirical evidence of its effectiveness.", "keyphrases": ["inferential selectional preferences", "inference rule", "isp", "semantic class", "argument type"]} +{"id": "ljubesic-etal-2018-predicting", "title": "Predicting Concreteness and Imageability of Words Within and Across Languages via Word Embeddings", "abstract": "The notions of concreteness and imageability, traditionally important in psycholinguistics, are gaining significance in semantic-oriented natural language processing tasks. In this paper we investigate the predictability of these two concepts via supervised learning, using word embeddings as explanatory variables. We perform predictions both within and across languages by exploiting collections of cross-lingual embeddings aligned to a single vector space. We show that the notions of concreteness and imageability are highly predictable both within and across languages, with a moderate loss of up to 20% in correlation when predicting across languages. We further show that the cross-lingual transfer via word embeddings is more efficient than the simple transfer via bilingual dictionaries.", "keyphrases": ["concreteness", "imageability", "word embedding"]} +{"id": "boudin-2016-pke", "title": "pke: an open source python-based keyphrase extraction toolkit", "abstract": "We describe pke, an open source python-based keyphrase extraction toolkit. It provides an end-to-end keyphrase extraction pipeline in which each component can be easily modified or extented to develop new approaches. pke also allows for easy benchmarking of state-of-the-art keyphrase extraction approaches, and ships with supervised models trained on the SemEval-2010 dataset.", "keyphrases": ["open source", "keyphrase extraction toolkit", "pke"]} +{"id": "shain-etal-2016-memory-access", "title": "Memory access during incremental sentence processing causes reading time latency", "abstract": "Studies on the role of memory as a predictor of reading time latencies (1) differ in their predictions about when memory effects should occur in processing and (2) have had mixed results, with strong positive effects emerging from isolated constructed stimuli and weak or even negative effects emerging from naturally-occurring stimuli. Our study addresses these concerns by comparing several implementations of prominent sentence processing theories on an exploratory corpus and evaluating the most successful of these on a confirmatory corpus, using a new self-paced reading corpus of seemingly natural narratives constructed to contain an unusually high proportion of memory-intensive constructions. We show highly significant and complementary broad-coverage latency effects both for predictors based on the Dependency Locality Theory and for predictors based on a left-corner parsing model of sentence processing. Our results indicate that memory access during sentence processing does take time, but suggest that stimuli requiring many memory access events may be necessary in order to observe the effect.", "keyphrases": ["sentence processing", "time latency", "memory access"]} +{"id": "wadden-etal-2020-fact", "title": "Fact or Fiction: Verifying Scientific Claims", "abstract": "We introduce scientific claim verification, a new task to select abstracts from the research literature containing evidence that SUPPORTS or REFUTES a given scientific claim, and to identify rationales justifying each decision. To study this task, we construct SciFact, a dataset of 1.4K expert-written scientific claims paired with evidence-containing abstracts annotated with labels and rationales. We develop baseline models for SciFact, and demonstrate that simple domain adaptation techniques substantially improve performance compared to models trained on Wikipedia or political news. We show that our system is able to verify claims related to COVID-19 by identifying evidence from the CORD-19 corpus. Our experiments indicate that SciFact will provide a challenging testbed for the development of new systems designed to retrieve and reason over corpora containing specialized domain knowledge. Data and code for this new task are publicly available at . A leaderboard and COVID-19 fact-checking demo are available at .", "keyphrases": ["claim", "scifact", "wikipedia"]} +{"id": "guo-etal-2013-improved", "title": "Improved Information Structure Analysis of Scientific Documents Through Discourse and Lexical Constraints", "abstract": "Inferring the information structure of scientific documents is useful for many downstream applications. Existing feature-based machine learning approaches to this task require substantial training data and suffer from limited performance. Our idea is to guide feature-based models with declarative domain knowledge encoded as posterior distribution constraints. We explore a rich set of discourse and lexical constraints which we incorporate through the Generalized Expectation (GE) criterion. Our constrained model improves the performance of existing fully and lightly supervised models. Even a fully unsupervised version of this model outperforms lightly supervised feature-based models, showing that our approach can be useful even when no labeled data is available.", "keyphrases": ["information structure analysis", "scientific document", "discourse"]} +{"id": "deng-byrne-2005-hmm", "title": "HMM Word and Phrase Alignment for Statistical Machine Translation", "abstract": "Estimation and alignment procedures for word and phrase alignment hidden Markov models (HMMs) are developed for the alignment of parallel text. The development of these models is motivated by an analysis of the desirable features of IBM Model 4, one of the original and most effective models for word alignment. These models are formulated to capture the desirable aspects of Model 4 in an HMM alignment formalism. Alignment behavior is analyzed and compared to human-generated reference alignments, and the ability of these models to capture different types of alignment phenomena is evaluated. In analyzing alignment performance, Chinese-English word alignments are shown to be comparable to those of IBM Model 4 even when models are trained over large parallel texts. In translation performance, phrase-based statistical machine translation systems based on these HMM alignments can equal and exceed systems based on Model 4 alignments, and this is shown in Arabic-English and Chinese-English translation. These alignment models can also be used to generate posterior statistics over collections of parallel text, and this is used to refine and extend phrase translation tables with a resulting improvement in translation quality.", "keyphrases": ["phrase alignment", "markov model", "hmm model"]} +{"id": "elfardy-diab-2013-sentence", "title": "Sentence Level Dialect Identification in Arabic", "abstract": "This paper introduces a supervised approach for performing sentence level dialect identification between Modern Standard Arabic and Egyptian Dialectal Arabic. We use token level labels to derive sentence-level features. These features are then used with other core and meta features to train a generative classifier that predicts the correct label for each sentence in the given input text. The system achieves an accuracy of 85.5% on an Arabic online-commentary dataset outperforming a previously proposed approach achieving 80.9% and reflecting a significant gain over a majority baseline of 51.9% and two strong baseline systems of 78.5% and 80.4%, respectively.", "keyphrases": ["dialect", "arabic", "egyptian dialectal arabic", "sentence level"]} +{"id": "schuster-manning-2016-enhanced", "title": "Enhanced English Universal Dependencies: An Improved Representation for Natural Language Understanding Tasks", "abstract": "Many shallow natural language understanding tasks use dependency trees to extract relations between content words. However, strict surface-structure dependency trees tend to follow the linguistic structure of sentences too closely and frequently fail to provide direct relations between content words. To mitigate this problem, the original Stanford Dependencies representation also defines two dependency graph representations which contain additional and augmented relations that explicitly capture otherwise implicit relations between content words. In this paper, we revisit and extend these dependency graph representations in light of the recent Universal Dependencies (UD) initiative and provide a detailed account of an enhanced and an enhanced++ English UD representation. We further present a converter from constituency to basic, i.e., strict surface structure, UD trees, and a converter from basic UD trees to enhanced and enhanced++ English UD graphs. We release both converters as part of Stanford CoreNLP and the Stanford Parser.", "keyphrases": ["content word", "implicit relation", "enhanced universal dependencies"]} +{"id": "das-etal-2010-probabilistic", "title": "Probabilistic Frame-Semantic Parsing", "abstract": "This paper contributes a formalization of frame-semantic parsing as a structure prediction problem and describes an implemented parser that transforms an English sentence into a frame-semantic representation. It finds words that evoke FrameNet frames, selects frames for them, and locates the arguments for each frame. The system uses two feature-based, discriminative probabilistic (log-linear) models, one with latent variables to permit disambiguation of new predicate words. The parser is demonstrated to significantly outperform previously published results.", "keyphrases": ["frame-semantic parsing", "frame", "disambiguation", "predicate", "srl"]} +{"id": "wang-etal-2018-joint", "title": "Joint Training of Candidate Extraction and Answer Selection for Reading Comprehension", "abstract": "While sophisticated neural-based techniques have been developed in reading comprehension, most approaches model the answer in an independent manner, ignoring its relations with other answer candidates. This problem can be even worse in open-domain scenarios, where candidates from multiple passages should be combined to answer a single question. In this paper, we formulate reading comprehension as an extract-then-select two-stage procedure. We first extract answer candidates from passages, then select the final answer by combining information from all the candidates. Furthermore, we regard candidate extraction as a latent variable and train the two-stage process jointly with reinforcement learning. As a result, our approach has improved the state-of-the-art performance significantly on two challenging open-domain reading comprehension datasets. Further analysis demonstrates the effectiveness of our model components, especially the information fusion of all the candidates and the joint training of the extract-then-select procedure.", "keyphrases": ["candidate extraction", "answer selection", "reading comprehension"]} +{"id": "gupta-etal-2010-capturing", "title": "Capturing the Stars: Predicting Ratings for Service and Product Reviews", "abstract": "Bloggers, professional reviewers, and consumers continuously create opinion--rich web reviews about products and services, with the result that textual reviews are now abundant on the web and often convey a useful overall rating (number of stars). However, an overall rating cannot express the multiple or conflicting opinions that might be contained in the text, or explicitly rate the different aspects of the evaluated entity. This work addresses the task of automatically predicting ratings, for given aspects of a textual review, by assigning a numerical score to each evaluated aspect in the reviews. We handle this task as both a regression and a classification modeling problem and explore several combinations of syntactic and semantic features. Our results suggest that classification techniques perform better than ranking modeling when handling evaluative text.", "keyphrases": ["star", "rating", "service"]} +{"id": "li-2010-understanding", "title": "Understanding the Semantic Structure of Noun Phrase Queries", "abstract": "Determining the semantic intent of web queries not only involves identifying their semantic class, which is a primary focus of previous works, but also understanding their semantic structure. In this work, we formally define the semantic structure of noun phrase queries as comprised of intent heads and intent modifiers. We present methods that automatically identify these constituents as well as their semantic roles based on Markov and semi-Markov conditional random fields. We show that the use of semantic features and syntactic features significantly contribute to improving the understanding performance.", "keyphrases": ["semantic structure", "query", "intent modifier", "conditional random field", "wonderland"]} +{"id": "abdul-mageed-etal-2018-tweet", "title": "You Tweet What You Speak: A City-Level Dataset of Arabic Dialects", "abstract": "Arabic has a wide range of varieties or dialects. Although a number of pioneering works have targeted some Arabic dialects, other dialects remain largely without investigation. A serious bottleneck for studying these dialects is lack of any data that can be exploited in computational models. In this work, we aim to bridge this gap: We present a considerably large dataset of > 1/4 billion tweets representing a wide range of dialects. Our dataset is more nuanced than previously reported work in that it is labeled at the fine-grained level of city. More specifically, the data represent 29 major Arab cities from 10 Arab countries with varying dialects (e.g., Egyptian, Gulf, KSA, Levantine, Yemeni).", "keyphrases": ["arabic dialect", "large dataset", "country"]} +{"id": "stanojevic-simaan-2015-beer", "title": "BEER 1.1: ILLC UvA submission to metrics and tuning task", "abstract": "We describe the submissions of ILLC UvA to the metrics and tuning tasks on WMT15. Both submissions are based on the BEER evaluation metric originally presented on WMT14 (Stanojevic and Sima\u2019an, 2014a). The main changes introduced this year are: (i) extending the learning-to-rank trained sentence level metric to the corpus level (but still decomposable to sentence level), (ii) incorporating syntactic ingredients based on dependency trees, and (iii) a technique for finding parameters of BEER that avoid \u201cgaming of the metric\u201d during tuning.", "keyphrases": ["illc", "submission", "beer"]} +{"id": "koponen-2012-comparing", "title": "Comparing human perceptions of post-editing effort with post-editing operations", "abstract": "Post-editing performed by translators is an increasingly common use of machine translated texts. While high quality MT may increase productivity, post-editing poor translations can be a frustrating task which requires more effort than translating from scratch. For this reason, estimating whether machine translations are of sufficient quality to be used for post-editing and finding means to reduce post-editing effort are an important field of study. Post-editing effort consists of different aspects, of which temporal effort, or the time spent on post-editing, is the most visible and involves not only the technical effort needed to perform the editing, but also the cognitive effort required to detect and plan necessary corrections. Cognitive effort is difficult to examine directly, but ways to reduce the cognitive effort in particular may prove valuable in reducing the frustration associated with post-editing work. In this paper, we describe an experiment aimed at studying the relationship between technical post-editing effort and cognitive post-editing effort by comparing cases where the edit distance and a manual score reflecting perceived effort differ. We present results of an error analysis performed on such sentences and discuss the clues they may provide about edits requiring great cognitive effort compared to the technical effort, on one hand, or little cognitive effort, on the other.", "keyphrases": ["post-editing effort", "cognitive effort", "edit distance", "error analysis"]} +{"id": "shu-etal-2017-doc", "title": "DOC: Deep Open Classification of Text Documents", "abstract": "Traditional supervised learning makes the closed-world assumption that the classes appeared in the test data must have appeared in training. This also applies to text learning or text classification. As learning is used increasingly in dynamic open environments where some new/test documents may not belong to any of the training classes, identifying these novel documents during classification presents an important problem. This problem is called open-world classification or open classification. This paper proposes a novel deep learning based approach. It outperforms existing state-of-the-art techniques dramatically.", "keyphrases": ["doc", "multi-class classifier", "unknown intent detection", "final layer", "space risk"]} +{"id": "dou-etal-2014-beyond", "title": "Beyond Parallel Data: Joint Word Alignment and Decipherment Improves Machine Translation", "abstract": "Inspired by previous work, where decipherment is used to improve machine translation, we propose a new idea to combine word alignment and decipherment into a single learning process. We use EM to estimate the model parameters, not only to maximize the probability of parallel corpus, but also the monolingual corpus. We apply our approach to improve Malagasy-English machine translation, where only a small amount of parallel data is available. In our experiments, we observe gains of 0.9 to 2.1 Bleu over a strong baseline.", "keyphrases": ["parallel data", "decipherment", "machine translation"]} +{"id": "duong-etal-2013-simpler", "title": "Simpler unsupervised POS tagging with bilingual projections", "abstract": "We present an unsupervised approach to part-of-speech tagging based on projections of tags in a word-aligned bilingual parallel corpus. In contrast to the existing state-of-the-art approach of Das and Petrov, we have developed a substantially simpler method by automatically identifying \u201cgood\u201d training sentences from the parallel corpus and applying self-training. In experimental results on eight languages, our method achieves state-of-the-art results.", "keyphrases": ["projection", "training sentence", "pos tagger", "european language"]} +{"id": "lievers-huang-2016-lexicon", "title": "A lexicon of perception for the identification of synaesthetic metaphors in corpora", "abstract": "Synaesthesia is a type of metaphor associating linguistic expressions that refer to two different sensory modalities. Previous studies, based on the analysis of poetic texts, have shown that synaesthetic transfers tend to go from the lower toward the higher senses (e.g., sweet music vs. musical sweetness). In non-literary language synaesthesia is rare, and finding a sufficient number of examples manually would be too time-consuming. In order to verify whether the directionality also holds for conventional synaesthesia found in non-literary texts, an automatic procedure for the identification of instances of synaesthesia is therefore highly desirable. In this paper, we first focus on the preliminary step of this procedure, that is, the creation of a controlled lexicon of perception. Next, we present the results of a small pilot study that applies the extraction procedure to English and Italian corpus data.", "keyphrases": ["perception", "identification", "synaesthetic aspect"]} +{"id": "tromble-etal-2008-lattice", "title": "Lattice Minimum Bayes-Risk Decoding for Statistical Machine Translation", "abstract": "We present Minimum Bayes-Risk (MBR) decoding over translation lattices that compactly encode a huge number of translation hypotheses. We describe conditions on the loss function that will enable efficient implementation of MBR decoders on lattices. We introduce an approximation to the BLEU score (Papineni et al., 2001) that satisfies these conditions. The MBR decoding under this approximate BLEU is realized using Weighted Finite State Automata. Our experiments show that the Lattice MBR decoder yields moderate, consistent gains in translation performance over N-best MBR decoding on Arabic-to-English, Chinese-to-English and English-to-Chinese translation tasks. We conduct a range of experiments to understand why Lattice MBR improves upon N-best MBR and study the impact of various parameters on MBR performance.", "keyphrases": ["mbr", "approximation", "translation task", "lattice minimum bayes-risk", "compact representation"]} +{"id": "yu-etal-2016-building", "title": "Building Chinese Affective Resources in Valence-Arousal Dimensions", "abstract": "An increasing amount of research has recently focused on representing affective states as continuous numerical values on multiple dimensions, such as the valence-arousal (VA) space. Compared to the categorical approach that represents affective states as several classes (e.g., positive and negative), the dimensional approach can provide more fine-grained sentiment analysis. However, affective resources with valence-arousal ratings are still very rare, especially for the Chinese language. Therefore, this study builds 1) an affective lexicon called Chinese valence-arousal words (CVAW) containing 1,653 words, and 2) an affective corpus called Chinese valence-arousal text (CVAT) containing 2,009 sentences extracted from web texts. To improve the annotation quality, a corpus cleanup procedure is used to remove outlier ratings and improper texts. Experiments using CVAW words to predict the VA ratings of the CVAT corpus show results comparable to those obtained using English affective resources.", "keyphrases": ["dimension", "affective state", "chinese sentence"]} +{"id": "pado-lapata-2007-dependency", "title": "Dependency-Based Construction of Semantic Space Models", "abstract": "Traditionally, vector-based semantic space models use word co-occurrence counts from large corpora to represent lexical meaning. In this article we present a novel framework for constructing semantic spaces that takes syntactic relations into account. We introduce a formalization for this class of models, which allows linguistic knowledge to guide the construction process. We evaluate our framework on a range of tasks relevant for cognitive science and natural language processing: semantic priming, synonymy detection, and word sense disambiguation. In all cases, our framework obtains results that are comparable or superior to the state of the art.", "keyphrases": ["semantic space", "co-occurrence", "pado\u0301", "target word", "statistic"]} +{"id": "purver-battersby-2012-experimenting", "title": "Experimenting with Distant Supervision for Emotion Classification", "abstract": "We describe a set of experiments using automatically labelled data to train supervised classifiers for multi-class emotion detection in Twitter messages with no manual intervention. By cross-validating between models trained on different labellings for the same six basic emotion classes, and testing on manually labelled data, we conclude that the method is suitable for some emotions (happiness, sadness and anger) but less able to distinguish others; and that different labelling conventions are more suitable for some emotions than others.", "keyphrases": ["distant supervision", "emotion classification", "twitter message", "hashtag", "community"]} +{"id": "toutanova-etal-2008-applying", "title": "Applying Morphology Generation Models to Machine Translation", "abstract": "We improve the quality of statistical machine translation (SMT) by applying models that predict word forms from their stems using extensive morphological and syntactic information from both the source and target languages. Our inflection generation models are trained independently of the SMT system. We investigate different ways of combining the inflection prediction component with the SMT system by training the base MT system on fully inflected forms or on word stems. We applied our inflection generation models in translating English into two morphologically complex languages, Russian and Arabic, and show that our model improves the quality of SMT over both phrasal and syntax-based SMT systems according to BLEU and human judgements.", "keyphrases": ["morphological generation", "machine translation", "inflection", "morpho-syntactic information"]} +{"id": "kong-zhou-2011-combining", "title": "Combining Dependency and Constituent-based Syntactic Information for Anaphoricity Determination in Coreference Resolution", "abstract": "This paper systematically explores the effectiveness of dependency and constituent-based syntactic information for anaphoricity determination. In particular, this paper proposes two ways to combine dependency and constituent-based syntactic information to explore their complementary advantage. One is a dependency-driven constituent-based structured representation, and the other uses a composite kernel. Evaluation on the Automatic Content Extraction (ACE) 2003 corpus shows that dependency and constituent-based syntactic information are quite complementary and proper combination can much improve the performance of anaphoricity determination, and further improve the performance of coreference resolution.", "keyphrases": ["constituent-based syntactic information", "anaphoricity determination", "coreference resolution"]} +{"id": "chen-bansal-2018-fast", "title": "Fast Abstractive Summarization with Reinforce-Selected Sentence Rewriting", "abstract": "Inspired by how humans summarize long documents, we propose an accurate and fast summarization model that first selects salient sentences and then rewrites them abstractively (i.e., compresses and paraphrases) to generate a concise overall summary. We use a novel sentence-level policy gradient method to bridge the non-differentiable computation between these two neural networks in a hierarchical way, while maintaining language fluency. Empirically, we achieve the new state-of-the-art on all metrics (including human evaluation) on the CNN/Daily Mail dataset, as well as significantly higher abstractiveness scores. Moreover, by first operating at the sentence-level and then the word-level, we enable parallel decoding of our neural generative model that results in substantially faster (10-20x) inference speed as well as 4x faster training convergence than previous long-paragraph encoder-decoder models. We also demonstrate the generalization of our model on the test-only DUC-2002 dataset, where we achieve higher scores than a state-of-the-art model.", "keyphrases": ["summarization", "extractor", "content selection", "extract-then-rewrite architecture", "pipeline"]} +{"id": "kumar-etal-2009-efficient", "title": "Efficient Minimum Error Rate Training and Minimum Bayes-Risk Decoding for Translation Hypergraphs and Lattices", "abstract": "Minimum Error Rate Training (MERT) and Minimum Bayes-Risk (MBR) decoding are used in most current state-of-the-art Statistical Machine Translation (SMT) systems. The algorithms were originally developed to work with N-best lists of translations, and recently extended to lattices that encode many more hypotheses than typical N-best lists. We here extend lattice-based MERT and MBR algorithms to work with hypergraphs that encode a vast number of translations produced by MT systems based on Synchronous Context Free Grammars. These algorithms are more efficient than the lattice-based versions presented earlier. We show how MERT can be employed to optimize parameters for MBR decoding. Our experiments show speedups from MERT and MBR as well as performance improvements from MBR decoding on several language pairs.", "keyphrases": ["minimum bayes-risk", "lattice", "mbr", "context free grammars", "loss function"]} +{"id": "kodaira-etal-2016-controlled", "title": "Controlled and Balanced Dataset for Japanese Lexical Simplification", "abstract": "We propose a new dataset for evaluating a Japanese lexical simplification method. Previous datasets have several deficiencies. All of them substitute only a single target word, and some of them extract sentences only from newswire corpus. In addition, most of these datasets do not allow ties and integrate simplification ranking from all the annotators without considering the quality. In contrast, our dataset has the following advantages: (1) it is the first controlled and balanced dataset for Japanese lexical simplification with high correlation with human judgment and (2) the consistency of the simplification ranking is improved by allowing candidates to have ties and by considering the reliability of annotators.", "keyphrases": ["balanced dataset", "japanese lexical simplification", "annotator"]} +{"id": "ribeiro-etal-2018-semantically", "title": "Semantically Equivalent Adversarial Rules for Debugging NLP models", "abstract": "Complex machine learning models for NLP are often brittle, making different predictions for input instances that are extremely similar semantically. To automatically detect this behavior for individual instances, we present semantically equivalent adversaries (SEAs) \u2013 semantic-preserving perturbations that induce changes in the model's predictions. We generalize these adversaries into semantically equivalent adversarial rules (SEARs) \u2013 simple, universal replacement rules that induce adversaries on many instances. We demonstrate the usefulness and flexibility of SEAs and SEARs by detecting bugs in black-box state-of-the-art models for three domains: machine comprehension, visual question-answering, and sentiment analysis. Via user studies, we demonstrate that we generate high-quality local adversaries for more instances than humans, and that SEARs induce four times as many mistakes as the bugs discovered by human experts. SEARs are also actionable: retraining models using data augmentation significantly reduces bugs, while maintaining accuracy.", "keyphrases": ["equivalent adversarial rules", "nlp model", "semantic-preserving perturbation", "attack", "paraphrase"]} +{"id": "tomokiyo-boitet-2016-corpus", "title": "Corpus and dictionary development for classifiers/quantifiers towards a French-Japanese machine translation", "abstract": "Although quantifiers/classifiers expressions occur frequently in everyday communications or written documents, there is no description for them in classical bilingual paper dictionaries, nor in machine-readable dictionaries. The paper describes a corpus and dictionary development for quantifiers/classifiers, and their usage in the framework of French-Japanese machine translation (MT). They often cause problems of lexical ambiguity and of set phrase recognition during analysis, in particular for a long-distance language pair like French and Japanese. For the development of a dictionary aiming at ambiguity resolution for expressions including quantifiers and classifiers which may be ambiguous with common nouns, we have annotated our corpus with UWs (interlingual lexemes) of UNL (Universal Networking Language) found on the UNL-jp dictionary. The extraction of potential classifiers/quantifiers from corpus is made by UNLexplorer web service. Keywords : classifiers, quantifiers, phraseology study, corpus annotation, UNL (Universal Networking Language), UWs dictionary, Tori Bank, French-Japanese machine translation (MT).", "keyphrases": ["dictionary development", "quantifier", "french-japanese machine translation"]} +{"id": "zhang-etal-2017-corpus", "title": "A Corpus of Annotated Revisions for Studying Argumentative Writing", "abstract": "This paper presents ArgRewrite, a corpus of between-draft revisions of argumentative essays. Drafts are manually aligned at the sentence level, and the writer's purpose for each revision is annotated with categories analogous to those used in argument mining and discourse analysis. The corpus should enable advanced research in writing comparison and revision analysis, as demonstrated via our own studies of student revision behavior and of automatic revision purpose prediction.", "keyphrases": ["revision", "argumentative writing", "argrewrite corpus"]} +{"id": "kreuz-caucci-2007-lexical", "title": "Lexical Influences on the Perception of Sarcasm", "abstract": "Speakers and listeners make use of a variety of pragmatic factors to produce and identify sarcastic statements. It is also possible that lexical factors play a role, although this possibility has not been investigated previously. College students were asked to read excerpts from published works that originally contained the phrase said sarcastically, although the word sarcastically was deleted. The participants rated the characters' statements in these excerpts as more likely to be sarcastic than those from similar excerpts that did not originally contain the word sarcastically. The use of interjections, such as gee or gosh, predicted a significant amount of the variance in the participants' ratings of sarcastic intent. This outcome suggests that sarcastic statements may be more formulaic than previously realized. It also suggests that computer software could be written to recognize such lexical factors, greatly increasing the likelihood that non-literal intent could be correctly interpreted by such programs, even if they are unable to identify the pragmatic components of nonliteral language.", "keyphrases": ["sarcasm", "factor", "lexical feature"]} +{"id": "xiao-etal-2016-transg", "title": "TransG : A Generative Model for Knowledge Graph Embedding", "abstract": "Recently, knowledge graph embedding, which projects symbolic entities and relations into continuous vector space, has become a new, hot topic in artificial intelligence. This paper addresses a new issue of multiple relation semantics that a relation may have multiple meanings revealed by the entity pairs associated with the corresponding triples, and proposes a novel Gaussian mixture model for embedding, TransG. The new model can discover latent semantics for a relation and leverage a mixture of relation component vectors for embedding a fact triple. To the best of our knowledge, this is the first generative model for knowledge graph embedding, which is able to deal with multiple relation semantics. Extensive experiments show that the proposed model achieves substantial improvements against the state-of-the-art baselines.", "keyphrases": ["generative model", "knowledge graph", "relation semantic"]} +{"id": "zhang-wallace-2017-sensitivity", "title": "A Sensitivity Analysis of (and Practitioners' Guide to) Convolutional Neural Networks for Sentence Classification", "abstract": "Convolutional Neural Networks (CNNs) have recently achieved remarkably strong performance on the practically important task of sentence classification (Kim, 2014; Kalchbrenner et al., 2014; Johnson and Zhang, 2014; Zhang et al., 2016). However, these models require practitioners to specify an exact model architecture and set accompanying hyperparameters, including the filter region size, regularization parameters, and so on. It is currently unknown how sensitive model performance is to changes in these configurations for the task of sentence classification. We thus conduct a sensitivity analysis of one-layer CNNs to explore the effect of architecture components on model performance; our aim is to distinguish between important and comparatively inconsequential design decisions for sentence classification. We focus on one-layer CNNs (to the exclusion of more complex models) due to their comparative simplicity and strong empirical performance, which makes it a modern standard baseline method akin to Support Vector Machine (SVMs) and logistic regression. We derive practical advice from our extensive empirical results for those interested in getting the most out of CNNs for sentence classification in real world settings.", "keyphrases": ["convolutional neural networks", "sentence classification", "cnn", "model performance", "architecture component"]} +{"id": "gui-etal-2017-part", "title": "Part-of-Speech Tagging for Twitter with Adversarial Neural Networks", "abstract": "In this work, we study the problem of part-of-speech tagging for Tweets. In contrast to newswire articles, Tweets are usually informal and contain numerous out-of-vocabulary words. Moreover, there is a lack of large scale labeled datasets for this domain. To tackle these challenges, we propose a novel neural network to make use of out-of-domain labeled data, unlabeled in-domain data, and labeled in-domain data. Inspired by adversarial neural networks, the proposed method tries to learn common features through adversarial discriminator. In addition, we hypothesize that domain-specific features of target domain should be preserved in some degree. Hence, the proposed method adopts a sequence-to-sequence autoencoder to perform this task. Experimental results on three different datasets show that our method achieves better performance than state-of-the-art methods.", "keyphrases": ["twitter", "adversarial discriminator", "part-of-speech tagging"]} +{"id": "zhou-etal-2006-paraeval", "title": "ParaEval: Using Paraphrases to Evaluate Summaries Automatically", "abstract": "ParaEval is an automated evaluation method for comparing reference and peer summaries. It facilitates a tiered-comparison strategy where recall-oriented global optimal and local greedy searches for paraphrase matching are enabled in the top tiers. We utilize a domain-independent paraphrase table extracted from a large bilingual parallel corpus using methods from Machine Translation (MT). We show that the quality of ParaEval's evaluations, measured by correlating with human judgments, closely resembles that of ROUGE's.", "keyphrases": ["paraphrase", "machine translation", "summarization"]} +{"id": "hashimoto-etal-2012-excitatory", "title": "Excitatory or Inhibitory: A New Semantic Orientation Extracts Contradiction and Causality from the Web", "abstract": "We propose a new semantic orientation, Excitation, and its automatic acquisition method. Excitation is a semantic property of predicates that classifies them into excitatory, inhibitory and neutral. We show that Excitation is useful for extracting contradiction pairs (e.g., destroy cancer b develop cancer) and causality pairs (e.g., increase in crime \u21d2 heighten anxiety). Our experiments show that with automatically acquired Excitation knowledge we can extract one million contradiction pairs and 500,000 causality pairs with about 70% precision from a 600 million page Web corpus. Furthermore, by combining these extracted causality and contradiction pairs, we can generate one million plausible causality hypotheses that are not written in any single sentence in our corpus with reasonable precision.", "keyphrases": ["causality", "predicate", "excitatory"]} +{"id": "koper-schulte-im-walde-2014-rank", "title": "A Rank-based Distance Measure to Detect Polysemy and to Determine Salient Vector-Space Features for German Prepositions", "abstract": "This paper addresses vector space models of prepositions, a notoriously ambiguous word class. We propose a rank-based distance measure to explore the vector-spatial properties of the ambiguous objects, focusing on two research tasks: (i) to distinguish polysemous from monosemous prepositions in vector space; and (ii) to determine salient vector-space features for a classification of preposition senses. The rank-based measure predicts the polysemy vs. monosemy of prepositions with a precision of up to 88%, and suggests preposition-subcategorised nouns as more salient preposition features than preposition-subcategorising verbs.", "keyphrases": ["rank-based distance measure", "polysemy", "preposition"]} +{"id": "yasunaga-etal-2017-graph", "title": "Graph-based Neural Multi-Document Summarization", "abstract": "We propose a neural multi-document summarization system that incorporates sentence relation graphs. We employ a Graph Convolutional Network (GCN) on the relation graphs, with sentence embeddings obtained from Recurrent Neural Networks as input node features. Through multiple layer-wise propagation, the GCN generates high-level hidden sentence features for salience estimation. We then use a greedy heuristic to extract salient sentences that avoid redundancy. In our experiments on DUC 2004, we consider three types of sentence relation graphs and demonstrate the advantage of combining sentence relations in graphs with the representation power of deep neural networks. Our model improves upon other traditional graph-based extractive approaches and the vanilla GRU sequence model with no graph, and it achieves competitive results against other state-of-the-art multi-document summarization systems.", "keyphrases": ["summarization", "sentence relation graph", "graph convolutional network"]} +{"id": "liu-etal-2018-negpar", "title": "NegPar: A parallel corpus annotated for negation", "abstract": "Although the existence of English corpora annotated for negation has allowed for extensive work on monolingual negation detection, little is understood on how negation-related phenomena translate across languages. The current study fills this gap by presenting NegPar, the first English-Chinese parallel corpus annotated for negation in the narrative domain (a collection of stories from Conan Doyle\u2019s Sherlock Holmes). While we followed the annotation guidelines in the CONANDOYLE-NEG corpus (Morante and Daelemans, 2012), we reannotated certain scope-related phenomena to ensure more consistent and interpretable semantic representation. To both ease the annotation process and analyze how similar negation is signaled in the two languages, we experimented with first projecting the annotations from English and then manually correcting the projection output in Chinese. Results show that projecting negation via word-alignment offers limited help to the annotation process, as negation can be rendered in different ways across languages.", "keyphrases": ["parallel corpus", "negation", "annotation process"]} +{"id": "mohammad-etal-2013-nrc", "title": "NRC-Canada: Building the State-of-the-Art in Sentiment Analysis of Tweets", "abstract": "In this paper, we describe how we created two state-of-the-art SVM classifiers, one to detect the sentiment of messages such as tweets and SMS (message-level task) and one to detect the sentiment of a term within a message (term-level task). Among submissions from 44 teams in a competition, our submissions stood first in both tasks on tweets, obtaining an F-score of 69.02 in the message-level task and 88.93 in the term-level task. We implemented a variety of surface-form, semantic, and sentiment features. We also generated two large word\u2010sentiment association lexicons, one from tweets with sentiment-word hashtags, and one from tweets with emoticons. In the message-level task, the lexicon-based features provided a gain of 5 F-score points over all others. Both of our systems can be replicated using freely available resources. 1", "keyphrases": ["sentiment analysis", "hashtag", "lexicon-based feature", "n-gram", "twitter data"]} +{"id": "rahimi-etal-2017-continuous", "title": "Continuous Representation of Location for Geolocation and Lexical Dialectology using Mixture Density Networks", "abstract": "We propose a method for embedding two-dimensional locations in a continuous vector space using a neural network-based model incorporating mixtures of Gaussian distributions, presenting two model variants for text-based geolocation and lexical dialectology. Evaluated over Twitter data, the proposed model outperforms conventional regression-based geolocation and provides a better estimate of uncertainty. We also show the effectiveness of the representation for predicting words from location in lexical dialectology, and evaluate it using the DARE dataset.", "keyphrases": ["location", "lexical dialectology", "mixture density networks"]} +{"id": "bruni-etal-2013-vsem", "title": "VSEM: An open library for visual semantics representation", "abstract": "VSEM is an open library for visual semantics. Starting from a collection of tagged images, it is possible to automatically construct an image-based representation of concepts by using off-theshelf VSEM functionalities. VSEM is entirely written in MATLAB and its objectoriented design allows a large flexibility and reusability. The software is accompanied by a website with supporting documentation and examples.", "keyphrases": ["open library", "visual semantic representation", "vsem"]} +{"id": "dredze-crammer-2008-online", "title": "Online Methods for Multi-Domain Learning and Adaptation", "abstract": "NLP tasks are often domain specific, yet systems can learn behaviors across multiple domains. We develop a new multi-domain online learning framework based on parameter combination from multiple classifiers. Our algorithms draw from multi-task learning and domain adaptation to adapt multiple source domain classifiers to a new target domain, learn across multiple similar domains, and learn across a large number of disparate domains. We evaluate our algorithms on two popular NLP domain adaptation tasks: sentiment classification and spam filtering.", "keyphrases": ["multi-domain learning", "adaptation", "learning framework"]} +{"id": "zhu-etal-2020-attend", "title": "Attend, Translate and Summarize: An Efficient Method for Neural Cross-Lingual Summarization", "abstract": "Cross-lingual summarization aims at summarizing a document in one language (e.g., Chinese) into another language (e.g., English). In this paper, we propose a novel method inspired by the translation pattern in the process of obtaining a cross-lingual summary. We first attend to some words in the source text, then translate them into the target language, and summarize to get the final summary. Specifically, we first employ the encoder-decoder attention distribution to attend to the source words. Second, we present three strategies to acquire the translation probability, which helps obtain the translation candidates for each source word. Finally, each summary word is generated either from the neural distribution or from the translation candidates of source words. Experimental results on Chinese-to-English and English-to-Chinese summarization tasks have shown that our proposed method can significantly outperform the baselines, achieving comparable performance with the state-of-the-art.", "keyphrases": ["summarization", "translation pattern", "probabilistic bilingual lexicon"]} +{"id": "shah-etal-2016-shef", "title": "SHEF-Multimodal: Grounding Machine Translation on Images", "abstract": "This paper describes the University of \nSheffield\u2019s submission for the WMT16 \nMultimodal Machine Translation shared \ntask, where we participated in Task 1 to \ndevelop German-to-English and Englishto-German \nstatistical machine translation \n(SMT) systems in the domain of image \ndescriptions. Our proposed systems are \nstandard phrase-based SMT systems based \non the Moses decoder, trained only on the \nprovided data. We investigate how image \nfeatures can be used to re-rank the n-best \nlist produced by the SMT model, with the \naim of improving performance by grounding \nthe translations on images. Our submissions \nare able to outperform the strong, \ntext-only baseline system for both directions", "keyphrases": ["image", "multimodal machine translation", "phrase-based smt system", "textual feature", "input bitext"]} +{"id": "jung-etal-2019-earlier", "title": "Earlier Isn't Always Better: Sub-aspect Analysis on Corpus and System Biases in Summarization", "abstract": "Despite the recent developments on neural summarization systems, the underlying logic behind the improvements from the systems and its corpus-dependency remains largely unexplored. Position of sentences in the original text, for example, is a well known bias for news summarization. Following in the spirit of the claim that summarization is a combination of sub-functions, we define three sub-aspects of summarization: position, importance, and diversity and conduct an extensive analysis of the biases of each sub-aspect with respect to the domain of nine different summarization corpora (e.g., news, academic papers, meeting minutes, movie script, books, posts). We find that while position exhibits substantial bias in news articles, this is not the case, for example, with academic papers and meeting minutes. Furthermore, our empirical study shows that different types of summarization systems (e.g., neural-based) are composed of different degrees of the sub-aspects. Our study provides useful lessons regarding consideration of underlying sub-aspects when collecting a new summarization dataset or developing a new system.", "keyphrases": ["sub-aspect", "summarization", "dataset bias"]} +{"id": "zheng-etal-2019-simultaneous", "title": "Simultaneous Translation with Flexible Policy via Restricted Imitation Learning", "abstract": "Simultaneous translation is widely useful but remains one of the most difficult tasks in NLP. Previous work either uses fixed-latency policies, or train a complicated two-staged model using reinforcement learning. We propose a much simpler single model that adds a \u201cdelay\u201d token to the target vocabulary, and design a restricted dynamic oracle to greatly simplify training. Experiments on Chinese - English simultaneous translation show that our work leads to flexible policies that achieve better BLEU scores and lower latencies compared to both fixed and RL-learned policies.", "keyphrases": ["policy", "imitation learning", "target vocabulary", "simultaneous translation"]} +{"id": "wubben-etal-2012-sentence", "title": "Sentence Simplification by Monolingual Machine Translation", "abstract": "In this paper we describe a method for simplifying sentences using Phrase Based Machine Translation, augmented with a re-ranking heuristic based on dissimilarity, and trained on a monolingual parallel corpus. We compare our system to a word-substitution baseline and two state-of-the-art systems, all trained and tested on paired sentences from the English part of Wikipedia and Simple Wikipedia. Human test subjects judge the output of the different systems. Analysing the judgements shows that by relatively careful phrase-based paraphrasing our model achieves similar simplification results to state-of-the-art systems, while generating better formed output. We also argue that text readability metrics such as the Flesch-Kincaid grade level should be used with caution when evaluating the output of simplification systems.", "keyphrases": ["dissimilarity", "sentence simplification", "pbmt", "fluency", "adequacy"]} +{"id": "teufel-etal-2006-automatic", "title": "Automatic classification of citation function", "abstract": "Citation function is defined as the author's reason for citing a given paper (e.g. acknowledgement of the use of the cited method). The automatic recognition of the rhetorical function of citations in scientific text has many applications, from improvement of impact factor calculations to text summarisation and more informative citation indexers. We show that our annotation scheme for citation function is reliable, and present a supervised machine learning framework to automatically classify citation function, using both shallow and linguistically-inspired features. We find, amongst other things, a strong relationship between citation function and sentiment classification.", "keyphrases": ["citation function", "automatic classification", "table", "explicit sentiment", "neutral"]} +{"id": "tian-etal-2020-improving", "title": "Improving Constituency Parsing with Span Attention", "abstract": "Constituency parsing is a fundamental and important task for natural language understanding, where a good representation of contextual information can help this task. N-grams, which is a conventional type of feature for contextual information, have been demonstrated to be useful in many tasks, and thus could also be beneficial for constituency parsing if they are appropriately modeled. In this paper, we propose span attention for neural chart-based constituency parsing to leverage n-gram information. Considering that current chart-based parsers with Transformer-based encoder represent spans by subtraction of the hidden states at the span boundaries, which may cause information loss especially for long spans, we incorporate n-grams into span representations by weighting them according to their contributions to the parsing process. Moreover, we propose categorical span attention to further enhance the model by weighting n-grams within different length categories, and thus benefit long-sentence parsing. Experimental results on three widely used benchmark datasets demonstrate the effectiveness of our approach in parsing Arabic, Chinese, and English, where state-of-the-art performance is obtained by our approach on all of them.", "keyphrases": ["constituency", "span attention", "hidden state"]} +{"id": "hou-etal-2021-learning", "title": "Learning to Bridge Metric Spaces: Few-shot Joint Learning of Intent Detection and Slot Filling", "abstract": "In this paper, we investigate few-shot joint learning for dialogue language understanding. Most existing few-shot models learn a single task each time with only a few examples. However, dialogue language understanding contains two closely related tasks, i.e., intent detection and slot filling, and often benefits from jointly learning the two tasks. This calls for new few-shot learning techniques that are able to capture task relations from only a few examples and jointly learn multiple tasks. To achieve this, we propose a similarity-based few-shot learning scheme, named Contrastive Prototype Merging network (ConProm), that learns to bridge metric spaces of intent and slot on data-rich domains, and then adapt the bridged metric space to the specific few-shot domain. Experiments on two public datasets, Snips and FewJoint, show that our model significantly outperforms the strong baselines in one and five shots settings.", "keyphrases": ["few-shot joint learning", "intent detection", "slot filling"]} +{"id": "qi-etal-2020-prophetnet", "title": "ProphetNet: Predicting Future N-gram for Sequence-to-SequencePre-training", "abstract": "This paper presents a new sequence-to-sequence pre-training model called ProphetNet, which introduces a novel self-supervised objective named future n-gram prediction and the proposed n-stream self-attention mechanism. Instead of optimizing one-step-ahead prediction in the traditional sequence-to-sequence model, the ProphetNet is optimized by n-step ahead prediction that predicts the next n tokens simultaneously based on previous context tokens at each time step. The future n-gram prediction explicitly encourages the model to plan for the future tokens and prevent overfitting on strong local correlations. We pre-train ProphetNet using a base scale dataset (16GB) and a large-scale dataset (160GB), respectively. Then we conduct experiments on CNN/DailyMail, Gigaword, and SQuAD 1.1 benchmarks for abstractive summarization and question generation tasks. Experimental results show that ProphetNet achieves new state-of-the-art results on all these datasets compared to the models using the same scale pre-training corpus.", "keyphrases": ["future n-gram prediction", "future token", "prophetnet", "summarization task", "generation model"]} +{"id": "al-khatib-etal-2016-news", "title": "A News Editorial Corpus for Mining Argumentation Strategies", "abstract": "Many argumentative texts, and news editorials in particular, follow a specific strategy to persuade their readers of some opinion or attitude. This includes decisions such as when to tell an anecdote or where to support an assumption with statistics, which is reflected by the composition of different types of argumentative discourse units in a text. While several argument mining corpora have recently been published, they do not allow the study of argumentation strategies due to incomplete or coarse-grained unit annotations. This paper presents a novel corpus with 300 editorials from three diverse news portals that provides the basis for mining argumentation strategies. Each unit in all editorials has been assigned one of six types by three annotators with a high Fleiss' Kappa agreement of 0.56. We investigate various challenges of the annotation process and we conduct a first corpus analysis. Our results reveal different strategies across the news portals, exemplifying the benefit of studying editorials\u2014a so far underresourced text genre in argument mining.", "keyphrases": ["mining argumentation strategy", "news editorial", "decision", "discourse unit"]} +{"id": "shaw-etal-2018-self", "title": "Self-Attention with Relative Position Representations", "abstract": "Relying entirely on an attention mechanism, the Transformer introduced by Vaswani et al. (2017) achieves state-of-the-art results for machine translation. In contrast to recurrent and convolutional neural networks, it does not explicitly model relative or absolute position information in its structure. Instead, it requires adding representations of absolute positions to its inputs. In this work we present an alternative approach, extending the self-attention mechanism to efficiently consider representations of the relative positions, or distances between sequence elements. On the WMT 2014 English-to-German and English-to-French translation tasks, this approach yields improvements of 1.3 BLEU and 0.3 BLEU over absolute position representations, respectively. Notably, we observe that combining relative and absolute position representations yields no further improvement in translation quality. We describe an efficient implementation of our method and cast it as an instance of relation-aware self-attention mechanisms that can generalize to arbitrary graph-labeled inputs.", "keyphrases": ["relative position representations", "sequence element", "self-attention", "san"]} +{"id": "wu-dredze-2020-languages", "title": "Are All Languages Created Equal in Multilingual BERT?", "abstract": "Multilingual BERT (mBERT) trained on 104 languages has shown surprisingly good cross-lingual performance on several NLP tasks, even without explicit cross-lingual signals. However, these evaluations have focused on cross-lingual transfer with high-resource languages, covering only a third of the languages covered by mBERT. We explore how mBERT performs on a much wider set of languages, focusing on the quality of representation for low-resource languages, measured by within-language performance. We consider three tasks: Named Entity Recognition (99 languages), Part-of-speech Tagging and Dependency Parsing (54 languages each). mBERT does better than or comparable to baselines on high resource languages but does much worse for low resource languages. Furthermore, monolingual BERT models for these languages do even worse. Paired with similar languages, the performance gap between monolingual BERT and mBERT can be narrowed. We find that better models for low resource languages require more efficient pretraining techniques or more data.", "keyphrases": ["multilingual bert", "low-resource language", "monolingual ability"]} +{"id": "freedman-etal-2011-extreme", "title": "Extreme Extraction \u2013 Machine Reading in a Week", "abstract": "We report on empirical results in extreme extraction. It is extreme in that (1) from receipt of the ontology specifying the target concepts and relations, development is limited to one week and that (2) relatively little training data is assumed. We are able to surpass human recall and achieve an F1 of 0.51 on a question-answering task with less than 50 hours of effort using a hybrid approach that mixes active learning, bootstrapping, and limited (5 hours) manual rule writing. We compare the performance of three systems: extraction with handwritten rules, bootstrapped extraction, and a combination. We show that while the recall of the handwritten rules surpasses that of the learned system, the learned system is able to improve the overall recall and F1.", "keyphrases": ["week", "ontology", "active learning", "extreme extraction"]} +{"id": "jiang-etal-2020-smart", "title": "SMART: Robust and Efficient Fine-Tuning for Pre-trained Natural Language Models through Principled Regularized Optimization", "abstract": "Transfer learning has fundamentally changed the landscape of natural language processing (NLP). Many state-of-the-art models are first pre-trained on a large text corpus and then fine-tuned on downstream tasks. However, due to limited data resources from downstream tasks and the extremely high complexity of pre-trained models, aggressive fine-tuning often causes the fine-tuned model to overfit the training data of downstream tasks and fail to generalize to unseen data. To address such an issue in a principled manner, we propose a new learning framework for robust and efficient fine-tuning for pre-trained models to attain better generalization performance. The proposed framework contains two important ingredients: 1. Smoothness-inducing regularization, which effectively manages the complexity of the model; 2. Bregman proximal point optimization, which is an instance of trust-region methods and can prevent aggressive updating. Our experiments show that the proposed framework achieves new state-of-the-art performance on a number of NLP tasks including GLUE, SNLI, SciTail and ANLI. Moreover, it also outperforms the state-of-the-art T5 model, which is the largest pre-trained model containing 11 billion parameters, on GLUE.", "keyphrases": ["fine-tuning", "downstream task", "unseen data", "smart", "adversarial training"]} +{"id": "erkan-radev-2004-lexpagerank", "title": "LexPageRank: Prestige in Multi-Document Text Summarization", "abstract": "Multidocument extractive summarization relies on the concept of sentence centrality to identify the most important sentences in a document. Central-ity is typically de\ufb01ned in terms of the presence of particular important words or in terms of similarity to a centroid pseudo-sentence. We are now considering an approach for computing sentence importance based on the concept of eigenvector centrality (prestige) that we call LexPageRank. In this model, a sentence connectivity matrix is constructed based on cosine similarity. If the cosine similarity be-tween two sentences exceeds a particular prede\ufb01ned threshold, a corresponding edge is added to the connectivity matrix. We provide an evaluation of our method on DUC 2004 data. The results show that our approach outperforms centroid-based summarization and is quite successful compared to other summarization systems.", "keyphrases": ["prestige", "text summarization", "eigenvector centrality", "lexpagerank"]} +{"id": "peng-etal-2017-deep", "title": "Deep Multitask Learning for Semantic Dependency Parsing", "abstract": "We present a deep neural architecture that parses sentences into three semantic dependency graph formalisms. By using efficient, nearly arc-factored inference and a bidirectional-LSTM composed with a multi-layer perceptron, our base system is able to significantly improve the state of the art for semantic dependency parsing, without using hand-engineered features or syntax. We then explore two multitask learning approaches\u2014one that shares parameters across formalisms, and one that uses higher-order structures to predict the graphs jointly. We find that both approaches improve performance across formalisms on average, achieving a new state of the art. Our code is open-source and available at .", "keyphrases": ["semantic dependency parsing", "multi-task learning", "bilstm"]} +{"id": "sap-etal-2019-social", "title": "Social IQa: Commonsense Reasoning about Social Interactions", "abstract": "We introduce Social IQa, the first large-scale benchmark for commonsense reasoning about social situations. Social IQa contains 38,000 multiple choice questions for probing emotional and social intelligence in a variety of everyday situations (e.g., Q: \u201cJordan wanted to tell Tracy a secret, so Jordan leaned towards Tracy. Why did Jordan do this?\u201d A: \u201cMake sure no one else could hear\u201d). Through crowdsourcing, we collect commonsense questions along with correct and incorrect answers about social interactions, using a new framework that mitigates stylistic artifacts in incorrect answers by asking workers to provide the right answer to a different but related question. Empirical results show that our benchmark is challenging for existing question-answering models based on pretrained language models, compared to human performance (20% gap). Notably, we further establish Social IQa as a resource for transfer learning of commonsense knowledge, achieving state-of-the-art performance on multiple commonsense reasoning tasks (Winograd Schemas, COPA).", "keyphrases": ["commonsense reasoning", "multiple choice question", "human performance", "social iqa", "life"]} +{"id": "cao-etal-2020-incremental", "title": "Incremental Event Detection via Knowledge Consolidation Networks", "abstract": "Conventional approaches to event detection usually require a fixed set of pre-defined event types. Such a requirement is often challenged in real-world applications, as new events continually occur. Due to huge computation cost and storage budge, it is infeasible to store all previous data and re-train the model with all previous data and new data, every time new events arrive. We formulate such challenging scenarios as incremental event detection, which requires a model to learn new classes incrementally without performance degradation on previous classes. However, existing incremental learning methods cannot handle semantic ambiguity and training data imbalance problems between old and new classes in the task of incremental event detection. In this paper, we propose a Knowledge Consolidation Network (KCN) to address the above issues. Specifically, we devise two components, prototype enhanced retrospection and hierarchical distillation, to mitigate the adverse effects of semantic ambiguity and class imbalance, respectively. Experimental results demonstrate the effectiveness of the proposed method, outperforming the state-of-the-art model by 19% and 13.4% of whole F1 score on ACE benchmark and TAC KBP benchmark, respectively.", "keyphrases": ["event detection", "knowledge consolidation network", "catastrophic forgetting"]} +{"id": "ma-etal-2019-sentence", "title": "Sentence-Level Evidence Embedding for Claim Verification with Hierarchical Attention Networks", "abstract": "Claim verification is generally a task of verifying the veracity of a given claim, which is critical to many downstream applications. It is cumbersome and inefficient for human fact-checkers to find consistent pieces of evidence, from which solid verdict could be inferred against the claim. In this paper, we propose a novel end-to-end hierarchical attention network focusing on learning to represent coherent evidence as well as their semantic relatedness with the claim. Our model consists of three main components: 1) A coherence-based attention layer embeds coherent evidence considering the claim and sentences from relevant articles; 2) An entailment-based attention layer attends on sentences that can semantically infer the claim on top of the first attention; and 3) An output layer predicts the verdict based on the embedded evidence. Experimental results on three public benchmark datasets show that our proposed model outperforms a set of state-of-the-art baselines.", "keyphrases": ["claim verification", "hierarchical attention networks", "sentence-level evidence"]} +{"id": "wadden-etal-2019-entity", "title": "Entity, Relation, and Event Extraction with Contextualized Span Representations", "abstract": "We examine the capabilities of a unified, multi-task framework for three information extraction tasks: named entity recognition, relation extraction, and event extraction. Our framework (called DyGIE++) accomplishes all tasks by enumerating, refining, and scoring text spans designed to capture local (within-sentence) and global (cross-sentence) context. Our framework achieves state-of-the-art results across all tasks, on four datasets from a variety of domains. We perform experiments comparing different techniques to construct span representations. Contextualized embeddings like BERT perform well at capturing relationships among entities in the same or adjacent sentences, while dynamic span graph updates model long-range cross-sentence relationships. For instance, propagating span representations via predicted coreference links can enable the model to disambiguate challenging entity mentions. Our code is publicly available at and can be easily adapted for new tasks or datasets.", "keyphrases": ["event extraction", "span representation", "multi-task framework", "entity recognition", "argument extraction"]} +{"id": "niehues-2012-continuous", "title": "Continuous space language models using restricted Boltzmann machines", "abstract": "We present a novel approach for continuous space language models in statistical machine translation by using Restricted Boltzmann Machines (RBMs). The probability of an n-gram is calculated by the free energy of the RBM instead of a feedforward neural net. Therefore, the calculation is much faster and can be integrated into the translation process instead of using the language model only in a re-ranking step. Furthermore, it is straightforward to introduce additional word factors into the language model. We observed a faster convergence in training if we include automatically generated word classes as an additional word factor. We evaluated the RBM-based language model on the German to English and English to French translation task of TED lectures. Instead of replacing the conventional n-gram-based language model, we trained the RBM-based language model on the more important but smaller in-domain data and combined them in a log-linear way. With this approach we could show improvements of about half a BLEU point on the translation task.", "keyphrases": ["space language model", "boltzmann machine", "probability"]} +{"id": "kryscinski-etal-2019-neural", "title": "Neural Text Summarization: A Critical Evaluation", "abstract": "Text summarization aims at compressing long documents into a shorter form that conveys the most important parts of the original document. Despite increased interest in the community and notable research effort, progress on benchmark datasets has stagnated. We critically evaluate key ingredients of the current research setup: datasets, evaluation metrics, and models, and highlight three primary shortcomings: 1) automatically collected datasets leave the task underconstrained and may contain noise detrimental to training and evaluation, 2) current evaluation protocol is weakly correlated with human judgment and does not account for important characteristics such as factual correctness, 3) models overfit to layout biases of current datasets and offer limited diversity in their outputs.", "keyphrases": ["evaluation metric", "neural text summarization", "source document", "tendency"]} +{"id": "badlani-etal-2019-ensemble", "title": "An Ensemble of Humour, Sarcasm, and Hate Speechfor Sentiment Classification in Online Reviews", "abstract": "Due to the nature of online user reviews, sentiment analysis on such data requires a deep semantic understanding of the text. Many online reviews are sarcastic, humorous, or hateful. Signals from such language nuances may reinforce or completely alter the sentiment of a review as predicted by a machine learning model that attempts to detect sentiment alone. Thus, having a model that is explicitly aware of these features should help it perform better on reviews that are characterized by them. We propose a composite two-step model that extracts features pertaining to sarcasm, humour, hate speech, as well as sentiment, in the first step, feeding them in conjunction to inform sentiment classification in the second step. We show that this multi-step approach leads to a better empirical performance for sentiment classification than a model that predicts sentiment alone. A qualitative analysis reveals that the conjunctive approach can better capture the nuances of sentiment as expressed in online reviews.", "keyphrases": ["humour", "sarcasm", "sentiment classification", "review", "composite two-step model"]} +{"id": "schulte-im-walde-etal-2016-ghost", "title": "GhoSt-NN: A Representative Gold Standard of German Noun-Noun Compounds", "abstract": "This paper presents a novel gold standard of German noun-noun compounds (Ghost-NN) including 868 compounds annotated with corpus frequencies of the compounds and their constituents, productivity and ambiguity of the constituents, semantic relations between the constituents, and compositionality ratings of compound-constituent pairs. Moreover, a subset of the compounds containing 180 compounds is balanced for the productivity of the modifiers (distinguishing low/mid/high productivity) and the ambiguity of the heads (distinguishing between heads with 1, 2 and 2 senses", "keyphrases": ["german noun-noun compound", "semantic relation", "ghost-nn"]} +{"id": "perez-etal-2022-robertuito", "title": "RoBERTuito: a pre-trained language model for social media text in Spanish", "abstract": "Since BERT appeared, Transformer language models and transfer learning have become state-of-the-art for natural language processing tasks. Recently, some works geared towards pre-training specially-crafted models for particular domains, such as scientific papers, medical documents, user-generated texts, among others. These domain-specific models have been shown to improve performance significantly in most tasks; however, for languages other than English, such models are not widely available. In this work, we present RoBERTuito, a pre-trained language model for user-generated text in Spanish, trained on over 500 million tweets. Experiments on a benchmark of tasks involving user-generated text showed that RoBERTuito outperformed other pre-trained language models in Spanish. In addition to this, our model has some cross-lingual abilities, achieving top results for English-Spanish tasks of the Linguistic Code-Switching Evaluation benchmark (LinCE) and also competitive performance against monolingual models in English Twitter tasks. To facilitate further research, we make RoBERTuito publicly available at the HuggingFace model hub together with the dataset used to pre-train it.", "keyphrases": ["language model", "spanish", "social medium text"]} +{"id": "zhou-etal-2021-defense", "title": "Defense against Synonym Substitution-based Adversarial Attacks via Dirichlet Neighborhood Ensemble", "abstract": "Although deep neural networks have achieved prominent performance on many NLP tasks, they are vulnerable to adversarial examples. We propose Dirichlet Neighborhood Ensemble (DNE), a randomized method for training a robust model to defense synonym substitution-based attacks. During training, DNE forms virtual sentences by sampling embedding vectors for each word in an input sentence from a convex hull spanned by the word and its synonyms, and it augments them with the training data. In such a way, the model is robust to adversarial attacks while maintaining the performance on the original clean data. DNE is agnostic to the network architectures and scales to large models (e.g., BERT) for NLP applications. Through extensive experimentation, we demonstrate that our method consistently outperforms recently proposed defense methods by a significant margin across different network architectures and multiple data sets.", "keyphrases": ["synonyms", "attack", "dirichlet neighborhood ensemble"]} +{"id": "kolluru-etal-2020-openie6", "title": "OpenIE6: Iterative Grid Labeling and Coordination Analysis for Open Information Extraction", "abstract": "A recent state-of-the-art neural open information extraction (OpenIE) system generates extractions iteratively, requiring repeated encoding of partial outputs. This comes at a significant computational cost. On the other hand,sequence labeling approaches for OpenIE are much faster, but worse in extraction quality. In this paper, we bridge this trade-off by presenting an iterative labeling-based system that establishes a new state of the art for OpenIE, while extracting 10x faster. This is achieved through a novel Iterative Grid Labeling (IGL) architecture, which treats OpenIE as a 2-D grid labeling task. We improve its performance further by applying coverage (soft) constraints on the grid at training time. Moreover, on observing that the best OpenIE systems falter at handling coordination structures, our OpenIE system also incorporates a new coordination analyzer built with the same IGL architecture. This IGL based coordination analyzer helps our OpenIE system handle complicated coordination structures, while also establishing a new state of the art on the task of coordination analysis, with a 12.3 pts improvement in F1 over previous analyzers. Our OpenIE system - OpenIE6 - beats the previous systems by as much as 4 pts in F1, while being much faster.", "keyphrases": ["iterative grid labeling", "open information extraction", "openie6"]} +{"id": "shutova-etal-2016-black", "title": "Black Holes and White Rabbits: Metaphor Identification with Visual Features", "abstract": "Metaphor is pervasive in our communication, which makes it an important problem for nat-ural language processing (NLP). Numerous approaches to metaphor processing have thus been proposed, all of which relied on linguistic features and textual data to construct their models. Human metaphor comprehension is, however, known to rely on both our linguistic and perceptual experience, and vision can play a particularly important role when metaphorically projecting imagery across domains. In this paper, we present the \ufb01rst metaphor identi\ufb01cation method that simultaneously draws knowledge from linguistic and visual data. Our results demonstrate that it outperforms linguistic and visual models in isolation, as well as being competitive with the best-performing metaphor identi\ufb01cation methods, that rely on hand-crafted knowledge about domains and perception.", "keyphrases": ["metaphor identification", "visual feature", "experience"]} +{"id": "klein-nabi-2019-attention", "title": "Attention Is (not) All You Need for Commonsense Reasoning", "abstract": "The recently introduced BERT model exhibits strong performance on several language understanding benchmarks. In this paper, we describe a simple re-implementation of BERT for commonsense reasoning. We show that the attentions produced by BERT can be directly utilized for tasks such as the Pronoun Disambiguation Problem and Winograd Schema Challenge. Our proposed attention-guided commonsense reasoning method is conceptually simple yet empirically powerful. Experimental analysis on multiple datasets demonstrates that our proposed system performs remarkably well on all cases while outperforming the previously reported state of the art by a margin. While results suggest that BERT seems to implicitly learn to establish complex relationships between entities, solving commonsense reasoning tasks might require more than unsupervised models learned from huge text corpora.", "keyphrases": ["commonsense reasoning", "huge text corpora", "maximum attention score"]} +{"id": "u-maji-2006-computational", "title": "Computational Complexity of Statistical Machine Translation", "abstract": "In this paper we study a set of problems that are of considerable importance to Statistical Machine Translation (SMT) but which have not been addressed satisfactorily by the SMT research community. Over the last decade, a variety of SMT algorithms have been built and empirically tested whereas little is known about the computational complexity of some of the fundamental problems of SMT. Our work aims at providing useful insights into the the computational complexity of those problems. We prove that while IBM Models 1-2 are conceptually and computationally simple, computations involving the higher (and more useful) models are hard. Since it is unlikely that there exists a polynomial time solution for any of these hard problems (unless P = NP and P#P = P), our results highlight and justify the need for developing polynomial time approximations for these computations. We also discuss some practical ways of dealing with complexity.", "keyphrases": ["statistical machine translation", "computational complexity", "viterbi alignment"]} +{"id": "wang-ng-2013-beam", "title": "A Beam-Search Decoder for Normalization of Social Media Text with Application to Machine Translation", "abstract": "Social media texts are written in an informal style, which hinders other natural language processing (NLP) applications such as machine translation. Text normalization is thus important for processing of social media text. Previous work mostly focused on normalizing words by replacing an informal word with its formal form. In this paper, to further improve other downstream NLP applications, we argue that other normalization operations should also be performed, e.g., missing word recovery and punctuation correction. A novel beam-search decoder is proposed to effectively integrate various normalization operations. Empirical results show that our system obtains statistically significant improvements over two strong baselines in both normalization and translation tasks, for both Chinese and English.", "keyphrases": ["beam-search decoder", "machine translation", "punctuation correction", "social medium text"]} +{"id": "chambers-jurafsky-2009-unsupervised", "title": "Unsupervised Learning of Narrative Schemas and their Participants", "abstract": "We describe an unsupervised system for learning narrative schemas, coherent sequences or sets of events (arrested(POLICE, SUSPECT), convicted(JUDGE, SUSPECT)) whose arguments are filled with participant semantic roles defined over words (Judge = {judge, jury, court}, Police = {police, agent, authorities}). Unlike most previous work in event structure or semantic role learning, our system does not use supervised techniques, hand-built knowledge, or predefined classes of events or roles. Our unsupervised learning algorithm uses coreferring arguments in chains of verbs to learn both rich narrative event structure and argument roles. By jointly addressing both tasks, we improve on previous results in narrative/frame learning and induce rich frame-specific semantic roles.", "keyphrases": ["narrative schemas", "semantic role", "text corpora", "protagonist"]} +{"id": "sun-etal-2019-pullnet", "title": "PullNet: Open Domain Question Answering with Iterative Retrieval on Knowledge Bases and Text", "abstract": "We consider open-domain question answering (QA) where answers are drawn from either a corpus, a knowledge base (KB), or a combination of both of these. We focus on a setting in which a corpus is supplemented with a large but incomplete KB, and on questions that require non-trivial (e.g., \u201cmulti-hop\u201d) reasoning. We describe PullNet, an integrated framework for (1) learning what to retrieve and (2) reasoning with this heterogeneous information to find the best answer. PullNet uses an iterative process to construct a question-specific subgraph that contains information relevant to the question. In each iteration, a graph convolutional network (graph CNN) is used to identify subgraph nodes that should be expanded using retrieval (or \u201cpull\u201d) operations on the corpus and/or KB. After the subgraph is complete, another graph CNN is used to extract the answer from the subgraph. This retrieve-and-reason process allows us to answer multi-hop questions using large KBs and corpora. PullNet is weakly supervised, requiring question-answer pairs but not gold inference paths. Experimentally PullNet improves over the prior state-of-the art, and in the setting where a corpus is used with incomplete KB these improvements are often dramatic. PullNet is also often superior to prior systems in a KB-only setting or a text-only setting.", "keyphrases": ["pullnet", "answer entity", "query", "unstructured text"]} +{"id": "vala-etal-2015-mr", "title": "Mr. Bennet, his coachman, and the Archbishop walk into a bar but only one of them gets recognized: On The Difficulty of Detecting Characters in Literary Texts", "abstract": "Characters are fundamental to literary analysis. Current approaches are heavily reliant on NER to identify characters, causing many to be overlooked. We propose a novel technique for character detection, achieving significant improvements over state of the art on multiple datasets.", "keyphrases": ["character", "literary text", "novel technique"]} +{"id": "zheng-etal-2019-chid", "title": "ChID: A Large-scale Chinese IDiom Dataset for Cloze Test", "abstract": "Cloze-style reading comprehension in Chinese is still limited due to the lack of various corpora. In this paper we propose a large-scale Chinese cloze test dataset ChID, which studies the comprehension of idiom, a unique language phenomenon in Chinese. In this corpus, the idioms in a passage are replaced by blank symbols and the correct answer needs to be chosen from well-designed candidate idioms. We carefully study how the design of candidate idioms and the representation of idioms affect the performance of state-of-the-art models. Results show that the machine accuracy is substantially worse than that of human, indicating a large space for further research.", "keyphrases": ["idiom dataset", "chid", "multiple-choice mrc dataset", "chengyu"]} +{"id": "kementchedjhieva-lopez-2018-indicatements", "title": "`Indicatements' that character language models learn English morpho-syntactic units and regularities", "abstract": "Character language models have access to surface morphological patterns, but it is not clear whether or how they learn abstract morphological regularities. We instrument a character language model with several probes, finding that it can develop a specific unit to identify word boundaries and, by extension, morpheme boundaries, which allows it to capture linguistic properties and regularities of these units. Our language model proves surprisingly good at identifying the selectional restrictions of English derivational morphemes, a task that requires both morphological and syntactic awareness. Thus we conclude that, when morphemes overlap extensively with the words of a language, a character language model can perform morphological abstraction.", "keyphrases": ["character language model", "unit", "regularity"]} +{"id": "zhang-etal-2020-top", "title": "A Top-down Neural Architecture towards Text-level Parsing of Discourse Rhetorical Structure", "abstract": "Due to its great importance in deep natural language understanding and various down-stream applications, text-level parsing of discourse rhetorical structure (DRS) has been drawing more and more attention in recent years. However, all the previous studies on text-level discourse parsing adopt bottom-up approaches, which much limit the DRS determination on local information and fail to well benefit from global information of the overall discourse. In this paper, we justify from both computational and perceptive points-of-view that the top-down architecture is more suitable for text-level DRS parsing. On the basis, we propose a top-down neural architecture toward text-level DRS parsing. In particular, we cast discourse parsing as a recursive split point ranking task, where a split point is classified to different levels according to its rank and the elementary discourse units (EDUs) associated with it are arranged accordingly. In this way, we can determine the complete DRS as a hierarchical tree structure via an encoder-decoder with an internal stack. Experimentation on both the English RST-DT corpus and the Chinese CDTB corpus shows the great effectiveness of our proposed top-down approach towards text-level DRS parsing.", "keyphrases": ["top-down neural architecture", "text-level parsing", "rhetorical structure"]} +{"id": "isozaki-etal-2010-head", "title": "Head Finalization: A Simple Reordering Rule for SOV Languages", "abstract": "English is a typical SVO (Subject-Verb-Object) language, while Japanese is a typical SOV language. Conventional Statistical Machine Translation (SMT) systems work well within each of these language families. However, SMT-based translation from an SVO language to an SOV language does not work well because their word orders are completely different. Recently, a few groups have proposed rule-based preprocessing methods to mitigate this problem (Xu et al., 2009; Hong et al., 2009). These methods rewrite SVO sentences to derive more SOV-like sentences by using a set of handcrafted rules. In this paper, we propose an alternative single reordering rule: Head Finalization. This is a syntax-based preprocessing approach that offers the advantage of simplicity. We do not have to be concerned about part-of-speech tags or rule weights because the powerful Enju parser allows us to implement the rule at a general level. Our experiments show that its result, Head Final English (HFE), follows almost the same order as Japanese. We also show that this rule improves automatic evaluation scores.", "keyphrases": ["word order", "preprocessing method", "head finalization", "translation quality", "english-to-japanese translation"]} +{"id": "waseem-2016-racist", "title": "Are You a Racist or Am I Seeing Things? Annotator Influence on Hate Speech Detection on Twitter", "abstract": "Hate speech in the form of racism and sexism is commonplace on the internet (Waseem and Hovy, 2016). For this reason, there has been both an academic and an industry interest in detection of hate speech. The volume of data to be reviewed for creating data sets encourages a use of crowd sourcing for the annotation efforts. In this paper, we provide an examination of the in\ufb02uence of annotator knowledge of hate speech on classi\ufb01cation models by comparing classi\ufb01cation results obtained from training on expert and amateur annotations. We provide an evaluation on our own data set and run our models on the data set released by Waseem and Hovy (2016). We \ufb01nd that amateur annotators are more likely than expert annotators to label items as hate speech, and that systems trained on expert annotations outperform systems trained on amateur annotations.", "keyphrases": ["racist", "hate speech detection", "twitter", "abusive language"]} +{"id": "graham-2015-evaluating", "title": "Re-evaluating Automatic Summarization with BLEU and 192 Shades of ROUGE", "abstract": "We provide an analysis of current evaluation methodologies applied to summarization metrics and identify the following areas of concern: (1) movement away from evaluation by correlation with human assessment; (2) omission of important components of human assessment from evaluations, in addition to large numbers of metric variants; (3) absence of methods of significance testing improvements over a baseline. We outline an evaluation methodology that overcomes all such challenges, providing the first method of significance testing suitable for evaluation of summarization metrics. Our evaluation reveals for the first time which metric variants significantly outperform others, optimal metric variants distinct from current recommended best variants, as well as machine translation metric BLEU to have performance on-par with ROUGE for the purpose of evaluation of summarization systems. We subsequently replicate a recent large-scale evaluation that relied on, what we now know to be, suboptimal ROUGE variants revealing distinct conclusions about the relative performance of state-of-the-art summarization systems.", "keyphrases": ["rouge", "summarization system", "evaluation metric", "human judgment", "bleu metric"]} +{"id": "hasan-ng-2013-stance", "title": "Stance Classification of Ideological Debates: Data, Models, Features, and Constraints", "abstract": "Determining the stance expressed in a post written for a two-sided debate in an online debate forum is a relatively new and challenging problem in opinion mining. We seek to gain a better understanding of how to improve machine learning approaches to stance classification of ideological debates, specifically by examining how the performance of a learning-based stance classification system varies with the amount and quality of the training data, the complexity of the underlying model, the richness of the feature set, as well as the application of extra-linguistic constraints.", "keyphrases": ["online debate forum", "stance classification", "politician", "collective classification"]} +{"id": "zhang-etal-2017-ordinal", "title": "Ordinal Common-sense Inference", "abstract": "Humans have the capacity to draw common-sense inferences from natural language: various things that are likely but not certain to hold based on established discourse, and are rarely stated explicitly. We propose an evaluation of automated common-sense inference based on an extension of recognizing textual entailment: predicting ordinal human responses on the subjective likelihood of an inference holding in a given context. We describe a framework for extracting common-sense knowledge from corpora, which is then used to construct a dataset for this ordinal entailment task. We train a neural sequence-to-sequence model on this dataset, which we use to score and generate possible inferences. Further, we annotate subsets of previously established datasets via our ordinal annotation protocol in order to then analyze the distinctions between these and what we have constructed.", "keyphrases": ["common-sense inference", "human response", "joci", "plausibility", "situation"]} +{"id": "jia-liang-2016-data", "title": "Data Recombination for Neural Semantic Parsing", "abstract": "Modeling crisp logical regularities is crucial in semantic parsing, making it difficult for neural models with no task-specific prior knowledge to achieve good results. In this paper, we introduce data recombination, a novel framework for injecting such prior knowledge into a model. From the training data, we induce a high-precision synchronous context-free grammar, which captures important conditional independence properties commonly found in semantic parsing. We then train a sequence-to-sequence recurrent network (RNN) model with a novel attention-based copying mechanism on datapoints sampled from this grammar, thereby teaching the model about these structural properties. Data recombination improves the accuracy of our RNN model on three semantic parsing datasets, leading to new state-of-the-art performance on the standard GeoQuery dataset for models with comparable supervision.", "keyphrases": ["neural semantic parsing", "context-free grammar", "data recombination", "generalization", "brevity"]} +{"id": "demberg-etal-2007-phonological", "title": "Phonological Constraints and Morphological Preprocessing for Grapheme-to-Phoneme Conversion", "abstract": "Grapheme-to-phoneme conversion (g2p) is a core component of any text-to-speech system. We show that adding simple syllabification and stress assignment constraints, namely \u2018one nucleus per syllable\u2019 and \u2018one main stress per word\u2019, to a joint n-gram model for g2p conversion leads to a dramatic improvement in conversion accuracy. Secondly, we assessed morphological preprocessing for g2p conversion. While morphological information has been incorporated in some past systems, its contribution has never been quantitatively assessed for German. We compare the relevance of morphological preprocessing with respect to the morphological segmentation method, training set size, the g2p conversion algorithm, and two languages, English and German.", "keyphrases": ["morphological preprocessing", "grapheme-to-phoneme conversion", "german"]} +{"id": "petrov-etal-2012-universal", "title": "A Universal Part-of-Speech Tagset", "abstract": "To facilitate future research in unsupervised induction of syntactic structure and to standardize best-practices, we propose a tagset that consists of twelve universal part-of-speech categories. In addition to the tagset, we develop a mapping from 25 different treebank tagsets to this universal set. As a result, when combined with the original treebank data, this universal tagset and mapping produce a dataset consisting of common parts-of-speech for 22 different languages. We highlight the use of this resource via three experiments, that (1) compare tagging accuracies across languages, (2) present an unsupervised grammar induction approach that does not use gold standard part-of-speech tags, and (3) use the universal tags to transfer dependency parsers between languages, achieving state-of-the-art results.", "keyphrases": ["universal part-of-speech tagset", "mapping", "treebank", "universal set", "pos tag"]} +{"id": "whitelaw-etal-2009-using", "title": "Using the Web for Language Independent Spellchecking and Autocorrection", "abstract": "We have designed, implemented and evaluated an end-to-end system spellchecking and autocorrection system that does not require any manually annotated training data. The World Wide Web is used as a large noisy corpus from which we infer knowledge about misspellings and word usage. This is used to build an error model and an n-gram language model. A small secondary set of news texts with artificially inserted misspellings are used to tune confidence classifiers. Because no manual annotation is required, our system can easily be instantiated for new languages. When evaluated on human typed data with real misspellings in English and German, our web-based systems outperform baselines which use candidate corrections based on hand-curated dictionaries. Our system achieves 3.8% total error rate in English. We show similar improvements in preliminary results on artificial data for Russian and Arabic.", "keyphrases": ["web", "error model", "edit distance", "spelling correction", "human-compiled lexicon"]} +{"id": "reiter-2007-architecture", "title": "An Architecture for Data-to-Text Systems", "abstract": "I present an architecture for data-to-text systems, that is NLG systems which produce texts from non-linguistic input data; this essentially extends the architecture of Reiter and Dale (2000) to systems whose input is raw data instead of AI knowledge bases. This architecture is being used in the BabyTalk project, and is based on experiences in several projects at Aberdeen; it also seems to be compatible with many data-to-text systems developed elsewhere. It consists of four stages which are organised in a pipeline: Signal Analysis, Data Interpretation, Document Planning, and Microplanning and Realisation.", "keyphrases": ["data-to-text system", "nlg system", "document planning", "natural language generation"]} +{"id": "chaganty-etal-2018-price", "title": "The price of debiasing automatic metrics in natural language evalaution", "abstract": "For evaluating generation systems, automatic metrics such as BLEU cost nothing to run but have been shown to correlate poorly with human judgment, leading to systematic bias against certain model improvements. On the other hand, averaging human judgments, the unbiased gold standard, is often too expensive. In this paper, we use control variates to combine automatic metrics with human evaluation to obtain an unbiased estimator with lower cost than human evaluation alone. In practice, however, we obtain only a 7-13% cost reduction on evaluating summarization and open-response question answering systems. We then prove that our estimator is optimal: there is no unbiased estimator with lower cost. Our theory further highlights the two fundamental bottlenecks\u2014the automatic metric and the prompt shown to human evaluators\u2014both of which need to be improved to obtain greater cost savings.", "keyphrases": ["automatic metric", "judgment", "summarization", "natural language generation"]} +{"id": "sajous-etal-2020-englawi", "title": "ENGLAWI: From Human- to Machine-Readable Wiktionary", "abstract": "This paper introduces ENGLAWI, a large, versatile, XML-encoded machine-readable dictionary extracted from Wiktionary. ENGLAWI contains 752,769 articles encoding the full body of information included in Wiktionary: simple words, compounds and multiword expressions, lemmas and inflectional paradigms, etymologies, phonemic transcriptions in IPA, definition glosses and usage examples, translations, semantic and morphological relations, spelling variants, etc. It is fully documented, released under a free license and supplied with G-PeTo, a series of scripts allowing easy information extraction from ENGLAWI. Additional resources extracted from ENGLAWI, such as an inflectional lexicon, a lexicon of diatopic variants and the inclusion dates of headwords in Wiktionary's nomenclature are also provided. The paper describes the content of the resource and illustrates how it can be - and has been - used in previous studies. We finally introduce an ongoing work that computes lexicographic word embeddings from ENGLAWI's definitions.", "keyphrases": ["wiktionary", "machine-readable dictionary", "englawi"]} +{"id": "schwarm-ostendorf-2005-reading", "title": "Reading Level Assessment Using Support Vector Machines and Statistical Language Models", "abstract": "Reading proficiency is a fundamental component of language competency. However, finding topical texts at an appropriate reading level for foreign and second language learners is a challenge for teachers. This task can be addressed with natural language processing technology to assess reading level. Existing measures of reading level are not well suited to this task, but previous work and our own pilot experiments have shown the benefit of using statistical language models. In this paper, we also use support vector machines to combine features from traditional reading level measures, statistical language models, and other language processing tools to produce a better method of assessing reading level.", "keyphrases": ["support vector machines", "language model", "reading level measure", "readability", "verb phrase"]} +{"id": "yao-etal-2013-answer", "title": "Answer Extraction as Sequence Tagging with Tree Edit Distance", "abstract": "Our goal is to extract answers from preretrieved sentences for Question Answering (QA). We construct a linear-chain Conditional Random Field based on pairs of questions and their possible answer sentences, learning the association between questions and answer types. This casts answer extraction as an answer sequence tagging problem for the first time, where knowledge of shared structure between question and source sentence is incorporated through features based on Tree Edit Distance (TED). Our model is free of manually created question and answer templates, fast to run (processing 200 QA pairs per second excluding parsing time), and yields an F1 of 63.3% on a new public dataset based on prior TREC QA evaluations. The developed system is open-source, and includes an implementation of the TED model that is state of the art in the task of ranking QA pairs.", "keyphrases": ["tree edit distance", "conditional random field", "answer extraction", "pre-selected sentence", "search engine"]} +{"id": "herdagdelen-etal-2009-measuring", "title": "Measuring semantic relatedness with vector space models and random walks", "abstract": "Both vector space models and graph random walk models can be used to determine similarity between concepts. Noting that vectors can be regarded as local views of a graph, we directly compare vector space models and graph random walk models on standard tasks of predicting human similarity ratings, concept categorization, and semantic priming, varying the size of the dataset from which vector space and graph are extracted.", "keyphrases": ["vector space model", "random walk model", "semantic priming"]} +{"id": "romanello-etal-2009-citations", "title": "Citations in the Digital Library of Classics: Extracting Canonical References by Using Conditional Random Fields", "abstract": "Scholars of Classics cite ancient texts by using abridged citations called canonical references. In the scholarly digital library, canonical references create a complex textile of links between ancient and modern sources reflecting the deep hypertextual nature of texts in this field. This paper aims to demonstrate the suitability of Conditional Random Fields (CRF) for extracting this particular kind of reference from unstructured texts in order to enhance the capabilities of navigating and aggregating scholarly electronic resources. In particular, we developed a parser which recognizes word level n-grams of a text as being canonical references by using a CRF model trained with both positive and negative examples.", "keyphrases": ["digital library", "conditional random fields", "crf", "unstructured text"]} +{"id": "garg-etal-2019-jointly", "title": "Jointly Learning to Align and Translate with Transformer Models", "abstract": "The state of the art in machine translation (MT) is governed by neural approaches, which typically provide superior translation accuracy over statistical approaches. However, on the closely related task of word alignment, traditional statistical word alignment models often remain the go-to solution. In this paper, we present an approach to train a Transformer model to produce both accurate translations and alignments. We extract discrete alignments from the attention probabilities learnt during regular neural machine translation model training and leverage them in a multi-task framework to optimize towards translation and alignment objectives. We demonstrate that our approach produces competitive results compared to GIZA++ trained IBM alignment models without sacrificing translation accuracy and outperforms previous attempts on Transformer model based word alignment. Finally, by incorporating IBM model alignments into our multi-task training, we report significantly better alignment accuracies compared to GIZA++ on three publicly available data sets.", "keyphrases": ["word alignment", "multi-task framework", "training pipeline"]} +{"id": "chakravarthi-etal-2022-overview", "title": "Overview of The Shared Task on Homophobia and Transphobia Detection in Social Media Comments", "abstract": "Homophobia and Transphobia Detection is the task of identifying homophobia, transphobia, and non-anti-LGBT+ content from the given corpus. Homophobia and transphobia are both toxic languages directed at LGBTQ+ individuals that are described as hate speech. This paper summarizes our findings on the \u201cHomophobia and Transphobia Detection in social media comments\u201d shared task held at LT-EDI 2022 - ACL 2022 1. This shared taskfocused on three sub-tasks for Tamil, English, and Tamil-English (code-mixed) languages. It received 10 systems for Tamil, 13 systems for English, and 11 systems for Tamil-English. The best systems for Tamil, English, and Tamil-English scored 0.570, 0.870, and 0.610, respectively, on average macro F1-score.", "keyphrases": ["transphobia detection", "hate speech", "social medium"]} +{"id": "wisniewski-etal-2013-design", "title": "Design and Analysis of a Large Corpus of Post-Edited Translations: Quality Estimation, Failure Analysis and the Variability of Post-Edition", "abstract": "Machine Translation (MT) is now often used to produce approximate translations that are then corrected by trained professional post-editors. As a result, more and more datasets of post-edited translations are being collected. These datasets are very useful for training, adapting or testing existing MT systems. In this work, we present the design and content of one such corpus of post-edited translations, and consider less studied possible uses of these data, notably the development of an automatic Quality Estimation (QE) system and the detection of frequent errors in automatic translations. Both applications require a careful assessment of the variability in post-editions, that we study here.", "keyphrases": ["post-edited translation", "quality estimation", "design"]} +{"id": "bayerl-paul-2011-determines", "title": "What Determines Inter-Coder Agreement in Manual Annotations? A Meta-Analytic Investigation", "abstract": "Recent discussions of annotator agreement have mostly centered around its calculation and interpretation, and the correct choice of indices. Although these discussions are important, they only consider the \u201cback-end\u201d of the story, namely, what to do once the data are collected. Just as important in our opinion is to know how agreement is reached in the first place and what factors influence coder agreement as part of the annotation process or setting, as this knowledge can provide concrete guidelines for the planning and set-up of annotation projects. To investigate whether there are factors that consistently impact annotator agreement we conducted a meta-analytic investigation of annotation studies reporting agreement percentages. Our meta-analysis synthesized factors reported in 96 annotation studies from three domains (word-sense disambiguation, prosodic transcriptions, and phonetic transcriptions) and was based on a total of 346 agreement indices. Our analysis identified seven factors that influence reported agreement values: annotation domain, number of categories in a coding scheme, number of annotators in a project, whether annotators received training, the intensity of annotator training, the annotation purpose, and the method used for the calculation of percentage agreements. Based on our results we develop practical recommendations for the assessment, interpretation, calculation, and reporting of coder agreement. We also briefly discuss theoretical implications for the concept of annotation quality.", "keyphrases": ["agreement", "meta-analytic investigation", "factor", "annotation study", "scheme"]} +{"id": "nguyen-etal-2015-improving-topic", "title": "Improving Topic Models with Latent Feature Word Representations", "abstract": "Probabilistic topic models are widely used to discover latent topics in document collections, while latent feature vector representations of words have been used to obtain high performance in many NLP tasks. In this paper, we extend two different Dirichlet multinomial topic models by incorporating latent feature vector representations of words trained on very large corpora to improve the word-topic mapping learnt on a smaller corpus. Experimental results show that by using information from the external corpora, our new models produce significant improvements on topic coherence, document clustering and document classification tasks, especially on datasets with few or short documents.", "keyphrases": ["dirichlet", "word embedding", "lftm", "mixture"]} +{"id": "xu-etal-2021-document", "title": "Document-level Event Extraction via Heterogeneous Graph-based Interaction Model with a Tracker", "abstract": "Document-level event extraction aims to recognize event information from a whole piece of article. Existing methods are not effective due to two challenges of this task: a) the target event arguments are scattered across sentences; b) the correlation among events in a document is non-trivial to model. In this paper, we propose Heterogeneous Graph-based Interaction Model with a Tracker (GIT) to solve the aforementioned two challenges. For the first challenge, GIT constructs a heterogeneous graph interaction network to capture global interactions among different sentences and entity mentions. For the second, GIT introduces a Tracker module to track the extracted events and hence capture the interdependency among the events. Experiments on a large-scale dataset (Zheng et al, 2019) show GIT outperforms the previous methods by 2.8 F1. Further analysis reveals is effective in extracting multiple correlated events and event arguments that scatter across the document.", "keyphrases": ["graph-based interaction model", "tracker", "entity mention", "document-level event extraction"]} +{"id": "prabhakaran-etal-2019-perturbation", "title": "Perturbation Sensitivity Analysis to Detect Unintended Model Biases", "abstract": "Data-driven statistical Natural Language Processing (NLP) techniques leverage large amounts of language data to build models that can understand language. However, most language data reflect the public discourse at the time the data was produced, and hence NLP models are susceptible to learning incidental associations around named referents at a particular point in time, in addition to general linguistic meaning. An NLP system designed to model notions such as sentiment and toxicity should ideally produce scores that are independent of the identity of such entities mentioned in text and their social associations. For example, in a general purpose sentiment analysis system, a phrase such as I hate Katy Perry should be interpreted as having the same sentiment as I hate Taylor Swift. Based on this idea, we propose a generic evaluation framework, Perturbation Sensitivity Analysis, which detects unintended model biases related to named entities, and requires no new annotations or corpora. We demonstrate the utility of this analysis by employing it on two different NLP models \u2014 a sentiment model and a toxicity model \u2014 applied on online comments in English language from four different genres.", "keyphrases": ["nlp model", "toxicity", "perturbation sensitivity analysis"]} +{"id": "asahara-matsumoto-2004-japanese", "title": "Japanese Unknown Word Identification by Character-based Chunking", "abstract": "We introduce a character-based chunking for unknown word identification in Japanese text. A major advantage of our method is an ability to detect low frequency unknown words of unrestricted character type patterns. The method is built upon SVM-based chunking, by use of character n-gram and surrounding context of n-best word segmentation candidates from statistical morphological analysis as features. It is applied to newspapers and patent texts, achieving 95% precision and 55-70% recall for newspapers and more than 85% precision for patent texts.", "keyphrases": ["unknown word identification", "character-based chunking", "japanese text"]} +{"id": "obeid-etal-2019-adida", "title": "ADIDA: Automatic Dialect Identification for Arabic", "abstract": "This demo paper describes ADIDA, a web-based system for automatic dialect identification for Arabic text. The system distinguishes among the dialects of 25 Arab cities (from Rabat to Muscat) in addition to Modern Standard Arabic. The results are presented with either a point map or a heat map visualizing the automatic identification probabilities over a geographical map of the Arab World.", "keyphrases": ["automatic dialect identification", "arab city", "adida"]} +{"id": "elsahar-etal-2018-zero", "title": "Zero-Shot Question Generation from Knowledge Graphs for Unseen Predicates and Entity Types", "abstract": "We present a neural model for question generation from knowledge graphs triples in a \u201cZero-shot\u201d setup, that is generating questions for predicate, subject types or object types that were not seen at training time. Our model leverages triples occurrences in the natural language corpus in a encoder-decoder architecture, paired with an original part-of-speech copy action mechanism to generate questions. Benchmark and human evaluation show that our model outperforms state-of-the-art on this task.", "keyphrases": ["question generation", "unseen predicate", "zero-shot learning"]} +{"id": "yu-etal-2021-improving", "title": "Improving Math Word Problems with Pre-trained Knowledge and Hierarchical Reasoning", "abstract": "The recent algorithms for math word problems (MWP) neglect to use outside knowledge not present in the problems. Most of them only capture the word-level relationship and ignore to build hierarchical reasoning like the human being for mining the contextual structure between words and sentences. In this paper, we propose a Reasoning with Pre-trained Knowledge and Hierarchical Structure (RPKHS) network, which contains a pre-trained knowledge encoder and a hierarchical reasoning encoder. Firstly, our pre-trained knowledge encoder aims at reasoning the MWP by using outside knowledge from the pre-trained transformer-based models. Secondly, the hierarchical reasoning encoder is presented for seamlessly integrating the word-level and sentence-level reasoning to bridge the entity and context domain on MWP. Extensive experiments show that our RPKHS significantly outperforms state-of-the-art approaches on two large-scale commonly-used datasets, and boosts performance from 77.4% to 83.9% on Math23K, from 75.5 to 82.2% on Math23K with 5-fold cross-validation and from 83.7% to 89.8% on MAWPS. More extensive ablations are shown to demonstrate the effectiveness and interpretability of our proposed method.", "keyphrases": ["math word problem", "pre-trained knowledge", "hierarchical reasoning"]} +{"id": "lin-etal-2021-batch", "title": "In-Batch Negatives for Knowledge Distillation with Tightly-Coupled Teachers for Dense Retrieval", "abstract": "We present an efficient training approach to text retrieval with dense representations that applies knowledge distillation using the ColBERT late-interaction ranking model. Specifically, we propose to transfer the knowledge from a bi-encoder teacher to a student by distilling knowledge from ColBERT's expressive MaxSim operator into a simple dot product. The advantage of the bi-encoder teacher\u2013student setup is that we can efficiently add in-batch negatives during knowledge distillation, enabling richer interactions between teacher and student models. In addition, using ColBERT as the teacher reduces training cost compared to a full cross-encoder. Experiments on the MS MARCO passage and document ranking tasks and data from the TREC 2019 Deep Learning Track demonstrate that our approach helps models learn robust representations for dense retrieval effectively and efficiently.", "keyphrases": ["knowledge distillation", "dense retrieval", "in-batch negative"]} +{"id": "wang-jiang-2019-explicit", "title": "Explicit Utilization of General Knowledge in Machine Reading Comprehension", "abstract": "To bridge the gap between Machine Reading Comprehension (MRC) models and human beings, which is mainly reflected in the hunger for data and the robustness to noise, in this paper, we explore how to integrate the neural networks of MRC models with the general knowledge of human beings. On the one hand, we propose a data enrichment method, which uses WordNet to extract inter-word semantic connections as general knowledge from each given passage-question pair. On the other hand, we propose an end-to-end MRC model named as Knowledge Aided Reader (KAR), which explicitly uses the above extracted general knowledge to assist its attention mechanisms. Based on the data enrichment method, KAR is comparable in performance with the state-of-the-art MRC models, and significantly more robust to noise than them. When only a subset (20%-80%) of the training examples are available, KAR outperforms the state-of-the-art MRC models by a large margin, and is still reasonably robust to noise.", "keyphrases": ["general knowledge", "machine reading comprehension", "downstream task"]} +{"id": "han-sun-2011-generative", "title": "A Generative Entity-Mention Model for Linking Entities with Knowledge Base", "abstract": "Linking entities with knowledge base (entity linking) is a key issue in bridging the textual data with the structural knowledge base. Due to the name variation problem and the name ambiguity problem, the entity linking decisions are critically depending on the heterogenous knowledge of entities. In this paper, we propose a generative probabilistic model, called entity-mention model, which can leverage heterogenous entity knowledge (including popularity knowledge, name knowledge and context knowledge) for the entity linking task. In our model, each name mention to be linked is modeled as a sample generated through a three-step generative story, and the entity knowledge is encoded in the distribution of entities in document P(e), the distribution of possible names of a specific entity P(s\\e), and the distribution of possible contexts of a specific entity P(c\\e). To find the referent entity of a name mention, our method combines the evidences from all the three distributions P(e), P(s\\e) and P(c\\e). Experimental results show that our method can significantly outperform the traditional methods.", "keyphrases": ["entity-mention model", "knowledge base", "probabilistic model"]} +{"id": "kumar-etal-2020-noisy", "title": "Noisy Text Data: Achilles' Heel of BERT", "abstract": "Owing to the phenomenal success of BERT on various NLP tasks and benchmark datasets, industry practitioners are actively experimenting with fine-tuning BERT to build NLP applications for solving industry use cases. For most datasets that are used by practitioners to build industrial NLP applications, it is hard to guarantee absence of any noise in the data. While BERT has performed exceedingly well for transferring the learnings from one use case to another, it remains unclear how BERT performs when fine-tuned on noisy text. In this work, we explore the sensitivity of BERT to noise in the data. We work with most commonly occurring noise (spelling mistakes, typos) and show that this results in significant degradation in the performance of BERT. We present experimental results to show that BERT's performance on fundamental NLP tasks like sentiment analysis and textual similarity drops significantly in the presence of (simulated) noise on benchmark datasets viz. IMDB Movie Review, STS-B, SST-2. Further, we identify shortcomings in the existing BERT pipeline that are responsible for this drop in performance. Our findings suggest that practitioners need to be vary of presence of noise in their datasets while fine-tuning BERT to solve industry use cases.", "keyphrases": ["bert", "benchmark dataset", "noise", "spelling mistake", "presence"]} +{"id": "kurokawa-etal-2009-automatic", "title": "Automatic Detection of Translated Text and its Impact on Machine Translation", "abstract": "We investigate the possibility of automatically detecting whether a piece of text is an original or a translation. On a large parallel English-French corpus where reference information is available, we \ufb01nd that this is possible with around 90% accuracy. We further study the implication this has on Machine Translation performance. After separating our corpus according to translation direction, we train direction-speci\ufb01c phrase-based MT systems and show that they yield improved translation performance. This suggests that taking directionality into account when training SMT systems may have a signi\ufb01cant effect on output quality.", "keyphrases": ["machine translation", "automatic detection", "french-translated-to-english text"]} +{"id": "yang-etal-2019-exploring", "title": "Exploring Pre-trained Language Models for Event Extraction and Generation", "abstract": "Traditional approaches to the task of ACE event extraction usually depend on manually annotated data, which is often laborious to create and limited in size. Therefore, in addition to the difficulty of event extraction itself, insufficient training data hinders the learning process as well. To promote event extraction, we first propose an event extraction model to overcome the roles overlap problem by separating the argument prediction in terms of roles. Moreover, to address the problem of insufficient training data, we propose a method to automatically generate labeled data by editing prototypes and screen out generated samples by ranking the quality. Experiments on the ACE2005 dataset demonstrate that our extraction model can surpass most existing extraction methods. Besides, incorporating our generation method exhibits further significant improvement. It obtains new state-of-the-art results on the event extraction task, including pushing the F1 score of trigger classification to 81.1%, and the F1 score of argument classification to 58.9%.", "keyphrases": ["event extraction", "argument extraction", "prior study"]} +{"id": "al-sallab-etal-2015-deep", "title": "Deep Learning Models for Sentiment Analysis in Arabic", "abstract": "In this paper, deep learning framework is proposed for text sentiment classification in Arabic. Four different architectures are explored. Three are based on Deep Belief Networks and Deep Auto Encoders, where the input data model is based on the ordinary Bag-of-Words, with features based on the recently developed Arabic Sentiment Lexicon in combination with other standard lexicon features. The fourth model, based on the Recursive Auto Encoder, is proposed to tackle the lack of context handling in the first three models. The evaluation is carried out using Linguistic Data Consortium Arabic Tree Bank dataset, with benchmarking against the state of the art systems in sentiment classification with reported results on the same dataset. The results show high improvement of the fourth model over the state of the art, with the advantage of using no lexicon resources that are scarce and costly in terms of their development.", "keyphrases": ["sentiment analysis", "arabic", "deep learning model"]} +{"id": "varadi-etal-2008-clarin", "title": "CLARIN: Common Language Resources and Technology Infrastructure", "abstract": "The paper provides a general introduction to the CLARIN project, a large-scale European research infrastructure project designed to establish an integrated and interoperable infrastructure of language resources and technologies. The goal is to make language resources and technology much more accessible to all researchers working with language material, particularly non-expert users in the Humanities and Social Sciences. CLARIN intends to build a virtual, distributed infrastructure consisting of a federation of trusted digital archives and repositories where language resources and tools are accessible through web services. The CLARIN project consists of 32 partners from 22 countries and is currently engaged in the preparatory phase of developing the infrastructure. The paper describes the objectives of the project in terms of its technical, legal, linguistic and user dimensions.", "keyphrases": ["language resource", "technology", "infrastructure", "humanity", "clarin"]} +{"id": "papandrea-etal-2017-supwsd", "title": "SupWSD: A Flexible Toolkit for Supervised Word Sense Disambiguation", "abstract": "In this demonstration we present SupWSD, a Java API for supervised Word Sense Disambiguation (WSD). This toolkit includes the implementation of a state-of-the-art supervised WSD system, together with a Natural Language Processing pipeline for preprocessing and feature extraction. Our aim is to provide an easy-to-use tool for the research community, designed to be modular, fast and scalable for training and testing on large datasets. The source code of SupWSD is available at .", "keyphrases": ["toolkit", "word sense disambiguation", "supwsd"]} +{"id": "zhou-etal-2007-tree", "title": "Tree Kernel-Based Relation Extraction with Context-Sensitive Structured Parse Tree Information", "abstract": "This paper proposes a tree kernel with contextsensitive structured parse tree information for relation extraction. It resolves two critical problems in previous tree kernels for relation extraction in two ways. First, it automatically determines a d ynamic context-sensitive tree span for relation extraction by extending the widely -used Shortest Path-enclosed Tree (SPT) to include necessary context information outside SPT. Second, it pr oposes a context -sensitive convolution tree kernel, which enumerates both context-free and contextsensitive sub-trees by consid ering their ancestor node paths as their contexts. Moreover, this paper evaluates the complementary nature between our tree kernel and a state -of-the-art linear kernel. Evaluation on the ACE RDC corpora shows that our dynamic context-sensitive tree span is much more suitable for relation extraction than SPT and our tree kernel outperforms the state-of-the-art Collins and Duffy\u2019s convolution tree kernel. It also shows that our tree kernel achieves much better performance than the state-of-the-art linear kernels . Finally, it shows that feature-based and tree kernel-based methods much complement each other and the composite kernel can well integrate both flat and structured features.", "keyphrases": ["relation extraction", "tree kernel", "shortest path-enclosed tree", "syntactic feature", "cs-spt"]} +{"id": "xie-etal-2012-exploring", "title": "Exploring Content Features for Automated Speech Scoring", "abstract": "Most previous research on automated speech scoring has focused on restricted, predictable speech. For automated scoring of unrestricted spontaneous speech, speech proficiency has been evaluated primarily on aspects of pronunciation, fluency, vocabulary and language usage but not on aspects of content and topicality. In this paper, we explore features representing the accuracy of the content of a spoken response. Content features are generated using three similarity measures, including a lexical matching method (Vector Space Model) and two semantic similarity measures (Latent Semantic Analysis and Pointwise Mutual Information). All of the features exhibit moderately high correlations with human proficiency scores on human speech transcriptions. The correlations decrease somewhat due to recognition errors when evaluated on the output of an automatic speech recognition system; however, the additional use of word confidence scores can achieve correlations at a similar level as for human transcriptions.", "keyphrases": ["content feature", "speech scoring", "similarity measure", "latent semantic analysis", "sample response"]} +{"id": "chang-etal-2007-guiding", "title": "Guiding Semi-Supervision with Constraint-Driven Learning", "abstract": "Over the last few years, two of the main research directions in machine learning of natural language processing have been the study of semi-supervised learning algorithms as a way to train classiers when the labeled data is scarce, and the study of ways to exploit knowledge and global information in structured learning tasks. In this paper, we suggest a method for incorporating domain knowledge in semi-supervised learning algorithms. Our novel framework unies and can exploit several kinds of task specic constraints. The experimental results presented in the information extraction domain demonstrate that applying constraints helps the model to generate better feedback during learning, and hence the framework allows for high performance learning with significantly less training data than was possible before on these tasks.", "keyphrases": ["constraint-driven learning", "semi-supervised learning", "domain knowledge", "declarative constraint", "hard constraint"]} +{"id": "zhang-clark-2010-fast", "title": "A Fast Decoder for Joint Word Segmentation and POS-Tagging Using a Single Discriminative Model", "abstract": "We show that the standard beam-search algorithm can be used as an efficient decoder for the global linear model of Zhang and Clark (2008) for joint word segmentation and POS-tagging, achieving a significant speed improvement. Such decoding is enabled by: (1) separating full word features from partial word features so that feature templates can be instantiated incrementally, according to whether the current character is separated or appended; (2) deciding the POS-tag of a potential word when its first character is processed. Early-update is used with perceptron training so that the linear model gives a high score to a correct partial candidate as well as a full output. Effective scoring of partial structures allows the decoder to give high accuracy with a small beam-size of 16. In our 10-fold cross-validation experiments with the Chinese Tree-bank, our system performed over 10 times as fast as Zhang and Clark (2008) with little accuracy loss. The accuracy of our system on the standard CTB 5 test was competitive with the best in the literature.", "keyphrases": ["decoding", "joint word segmentation", "pos-tagging"]} +{"id": "stoyanov-cardie-2008-topic", "title": "Topic Identification for Fine-Grained Opinion Analysis", "abstract": "Within the area of general-purpose fine-grained subjectivity analysis, opinion topic identification has, to date, received little attention due to both the difficulty of the task and the lack of appropriately annotated resources. In this paper, we provide an operational definition of opinion topic and present an algorithm for opinion topic identification that, following our new definition, treats the task as a problem in topic coreference resolution. We develop a methodology for the manual annotation of opinion topics and use it to annotate topic information for a portion of an existing general-purpose opinion corpus. In experiments using the corpus, our topic identification approach statistically significantly outperforms several non-trivial baselines according to three evaluation measures.", "keyphrases": ["opinion", "subjectivity analysis", "topic identification"]} +{"id": "britz-etal-2017-effective", "title": "Effective Domain Mixing for Neural Machine Translation", "abstract": "Neural Machine Translation (NMT) models are often trained on heterogeneous mixtures of domains, from news to parliamentary proceedings, each with unique distributions and language. In this work we show that training NMT systems on naively mixed data can degrade performance versus models fit to each constituent domain. We demonstrate that this problem can be circumvented, and propose three models that do so by jointly learning domain discrimination and translation. We demonstrate the efficacy of these techniques by merging pairs of domains in three languages: Chinese, French, and Japanese. After training on composite data, each approach out-performs its domain-specific counter-parts, with a model based on a discriminator network doing so most reliably. We obtain consistent performance improvements and an average increase of 1.1 BLEU.", "keyphrases": ["neural machine translation", "discriminator network", "domain adaptation"]} +{"id": "chang-etal-2020-convokit", "title": "ConvoKit: A Toolkit for the Analysis of Conversations", "abstract": "This paper describes the design and functionality of ConvoKit, an open-source toolkit for analyzing conversations and the social interactions embedded within. ConvoKit provides an unified framework for representing and manipulating conversational data, as well as a large and diverse collection of conversational datasets. By providing an intuitive interface for exploring and interacting with conversational data, this toolkit lowers the technical barriers for the broad adoption of computational methods for conversational analysis.", "keyphrases": ["toolkit", "conversation", "convokit"]} +{"id": "wang-etal-2021-semeval", "title": "SemEval-2021 Task 9: Fact Verification and Evidence Finding for Tabular Data in Scientific Documents (SEM-TAB-FACTS)", "abstract": "Understanding tables is an important and relevant task that involves understanding table structure as well as being able to compare and contrast information within cells. In this paper, we address this challenge by presenting a new dataset and tasks that addresses this goal in a shared task in SemEval 2020 Task 9: Fact Verification and Evidence Finding for Tabular Data in Scientific Documents (SEM-TAB-FACTS). Our dataset contains 981 manually-generated tables and an auto-generated dataset of 1980 tables providing over 180K statement and over 16M evidence annotations. SEM-TAB-FACTS featured two sub-tasks. In sub-task A, the goal was to determine if a statement is supported, refuted or unknown in relation to a table. In sub-task B, the focus was on identifying the specific cells of a table that provide evidence for the statement. 69 teams signed up to participate in the task with 19 successful submissions to subtask A and 12 successful submissions to subtask B. We present our results and main findings from the competition.", "keyphrases": ["fact verification", "scientific documents", "sem-tab-facts"]} +{"id": "oraby-etal-2016-creating", "title": "Creating and Characterizing a Diverse Corpus of Sarcasm in Dialogue", "abstract": "The use of irony and sarcasm in social media allows us to study them at scale for the first time. However, their diversity has made it difficult to construct a high-quality corpus of sarcasm in dialogue. Here, we describe the process of creating a large- scale, highly-diverse corpus of online debate forums dialogue, and our novel methods for operationalizing classes of sarcasm in the form of rhetorical questions and hyperbole. We show that we can use lexico-syntactic cues to reliably retrieve sarcastic utterances with high accuracy. To demonstrate the properties and quality of our corpus, we conduct supervised learning experiments with simple features, and show that we achieve both higher precision and F than previous work on sarcasm in debate forums dialogue. We apply a weakly-supervised linguistic pattern learner and qualitatively analyze the linguistic differences in each class.", "keyphrases": ["sarcasm", "debate forum", "rhetorical question"]} +{"id": "hashimoto-etal-2017-joint", "title": "A Joint Many-Task Model: Growing a Neural Network for Multiple NLP Tasks", "abstract": "Transfer and multi-task learning have traditionally focused on either a single source-target pair or very few, similar tasks. Ideally, the linguistic levels of morphology, syntax and semantics would benefit each other by being trained in a single model. We introduce a joint many-task model together with a strategy for successively growing its depth to solve increasingly complex tasks. Higher layers include shortcut connections to lower-level task predictions to reflect linguistic hierarchies. We use a simple regularization term to allow for optimizing all model weights to improve one task's loss without exhibiting catastrophic interference of the other tasks. Our single end-to-end model obtains state-of-the-art or competitive results on five different tasks from tagging, parsing, relatedness, and entailment tasks.", "keyphrases": ["joint many-task model", "depth", "complex task", "relatedness", "neural architecture"]} +{"id": "wang-etal-2017-instance", "title": "Instance Weighting for Neural Machine Translation Domain Adaptation", "abstract": "Instance weighting has been widely applied to phrase-based machine translation domain adaptation. However, it is challenging to be applied to Neural Machine Translation (NMT) directly, because NMT is not a linear model. In this paper, two instance weighting technologies, i.e., sentence weighting and domain weighting with a dynamic weight learning strategy, are proposed for NMT domain adaptation. Empirical results on the IWSLT English-German/French tasks show that the proposed methods can substantially improve NMT performance by up to 2.7-6.7 BLEU points, outperforming the existing baselines by up to 1.6-3.6 BLEU points.", "keyphrases": ["weight learning strategy", "instance weighting", "language model", "sentence pair", "out-of-domain data"]} +{"id": "barrault-etal-2018-findings", "title": "Findings of the Third Shared Task on Multimodal Machine Translation", "abstract": "We present the results from the third shared task on multimodal machine translation. In this task a source sentence in English is supplemented by an image and participating systems are required to generate a translation for such a sentence into German, French or Czech. The image can be used in addition to (or instead of) the source sentence. This year the task was extended with a third target language (Czech) and a new test set. In addition, a variant of this task was introduced with its own test set where the source sentence is given in multiple languages: English, French and German, and participating systems are required to generate a translation in Czech. Seven teams submitted 45 different systems to the two variants of the task. Compared to last year, the performance of the multimodal submissions improved, but text-only systems remain competitive.", "keyphrases": ["multimodal machine translation", "image", "french", "change"]} +{"id": "cohan-etal-2018-smhd", "title": "SMHD: a Large-Scale Resource for Exploring Online Language Usage for Multiple Mental Health Conditions", "abstract": "Mental health is a significant and growing public health concern. As language usage can be leveraged to obtain crucial insights into mental health conditions, there is a need for large-scale, labeled, mental health-related datasets of users who have been diagnosed with one or more of such conditions. In this paper, we investigate the creation of high-precision patterns to identify self-reported diagnoses of nine different mental health conditions, and obtain high-quality labeled data without the need for manual labelling. We introduce the SMHD (Self-reported Mental Health Diagnoses) dataset and make it available. SMHD is a novel large dataset of social media posts from users with one or multiple mental health conditions along with matched control users. We examine distinctions in users' language, as measured by linguistic and psychological variables. We further explore text classification methods to identify individuals with mental conditions through their language.", "keyphrases": ["mental health condition", "insight", "smhd", "disorder"]} +{"id": "tan-etal-2017-abstractive", "title": "Abstractive Document Summarization with a Graph-Based Attentional Neural Model", "abstract": "Abstractive summarization is the ultimate goal of document summarization research, but previously it is less investigated due to the immaturity of text generation techniques. Recently impressive progress has been made to abstractive sentence summarization using neural models. Unfortunately, attempts on abstractive document summarization are still in a primitive stage, and the evaluation results are worse than extractive methods on benchmark datasets. In this paper, we review the difficulties of neural abstractive document summarization, and propose a novel graph-based attention mechanism in the sequence-to-sequence framework. The intuition is to address the saliency factor of summarization, which has been overlooked by prior works. Experimental results demonstrate our model is able to achieve considerable improvement over previous neural abstractive models. The data-driven neural abstractive method is also competitive with state-of-the-art extractive methods.", "keyphrases": ["attentional neural model", "graph-based attention mechanism", "abstractive document summarization", "input text"]} +{"id": "eisenstein-etal-2010-latent", "title": "A Latent Variable Model for Geographic Lexical Variation", "abstract": "The rapid growth of geotagged social media raises new computational possibilities for investigating geographic linguistic variation. In this paper, we present a multi-level generative model that reasons jointly about latent topics and geographical regions. High-level topics such as \"sports\" or \"entertainment\" are rendered differently in each geographic region, revealing topic-specific regional distinctions. Applied to a new dataset of geotagged microblogs, our model recovers coherent topics and their regional variants, while identifying geographic areas of linguistic consistency. The model also enables prediction of an author's geographic location from raw text, outperforming both text regression and supervised topic models.", "keyphrases": ["latent variable model", "variation", "regional distinction", "location", "demographic language variation"]} +{"id": "peyrard-2019-simple", "title": "A Simple Theoretical Model of Importance for Summarization", "abstract": "Research on summarization has mainly been driven by empirical approaches, crafting systems to perform well on standard datasets with the notion of information Importance remaining latent. We argue that establishing theoretical models of Importance will advance our understanding of the task and help to further improve summarization systems. To this end, we propose simple but rigorous definitions of several concepts that were previously used only intuitively in summarization: Redundancy, Relevance, and Informativeness. Importance arises as a single quantity naturally unifying these concepts. Additionally, we provide intuitions to interpret the proposed quantities and experiments to demonstrate the potential of the framework to inform and guide subsequent works.", "keyphrases": ["theoretical model", "summarization", "notion", "source document"]} +{"id": "ansell-etal-2021-mad-g", "title": "MAD-G: Multilingual Adapter Generation for Efficient Cross-Lingual Transfer", "abstract": "Adapter modules have emerged as a general parameter-efficient means to specialize a pretrained encoder to new domains. Massively multilingual transformers (MMTs) have particularly benefited from additional training of language-specific adapters. However, this approach is not viable for the vast majority of languages, due to limitations in their corpus size or compute budgets. In this work, we propose MAD-G (Multilingual ADapter Generation), which contextually generates language adapters from language representations based on typological features. In contrast to prior work, our time- and space-efficient MAD-G approach enables (1) sharing of linguistic knowledge across languages and (2) zero-shot inference by generating language adapters for unseen languages. We thoroughly evaluate MAD-G in zero-shot cross-lingual transfer on part-of-speech tagging, dependency parsing, and named entity recognition. While offering (1) improved fine-tuning efficiency (by a factor of around 50 in our experiments), (2) a smaller parameter budget, and (3) increased language coverage, MAD-G remains competitive with more expensive methods for language-specific adapter training across the board. Moreover, it offers substantial benefits for low-resource languages, particularly on the NER task in low-resource African languages. Finally, we demonstrate that MAD-G's transfer performance can be further improved via: (i) multi-source training, i.e., by generating and combining adapters of multiple languages with available task-specific training data; and (ii) by further fine-tuning generated MAD-G adapters for languages with monolingual data.", "keyphrases": ["multilingual adapter generation", "cross-lingual transfer", "part-of-speech tagging", "dependency parsing", "mad-g"]} +{"id": "akkaya-etal-2009-subjectivity", "title": "Subjectivity Word Sense Disambiguation", "abstract": "Many approaches to opinion and sentiment analysis rely on lexicons of words that may be used to express subjectivity. These are compiled as lists of keywords, rather than word meanings (senses). However, many keywords have both subjective and objective senses. False hits -- subjectivity clues used with objective senses -- are a significant source of error in subjectivity and sentiment analysis. This talk will focus on sense-level opinion and sentiment analysis. First, I will give the results of a study showing that even words judged in previous work to be reliable opinion clues have significant degrees of subjectivity sense ambiguity. Then, we will consider the task of distinguishing between the subjective and objective senses of words in a dictionary, and the related task of creating \"usage inventories\" of opinion clues. Given such distinctions, the next step is to automatically determine which word instances in a corpus are being used with subjective senses, and which are being used with objective senses (we call this task \"SWSD\"). We will see evidence that SWSD is more feasible than full word sense disambiguation, because it is more coarse grained -- often, the exact sense need not be pinpointed, and that SWSD can be exploited to improve the performance of opinion and sentiment analysis systems via sense-aware classification. Finally, I will discuss experiments in acquiring SWSD data, via token-based context discrimination where the context vector representation is adapted to distinguish between subjective and objective contexts, and the clustering process is enriched by pair-wise constraints, making it semi-supervised.", "keyphrases": ["sentiment analysis", "word instance", "swsd", "subjectivity"]} +{"id": "thorne-vlachos-2018-automated", "title": "Automated Fact Checking: Task Formulations, Methods and Future Directions", "abstract": "The recently increased focus on misinformation has stimulated research in fact checking, the task of assessing the truthfulness of a claim. Research in automating this task has been conducted in a variety of disciplines including natural language processing, machine learning, knowledge representation, databases, and journalism. While there has been substantial progress, relevant papers and articles have been published in research communities that are often unaware of each other and use inconsistent terminology, thus impeding understanding and further progress. In this paper we survey automated fact checking research stemming from natural language processing and related disciplines, unifying the task formulations and methodologies across papers and authors. Furthermore, we highlight the use of evidence as an important distinguishing factor among them cutting across task formulations and methods. We conclude with proposing avenues for future NLP research on automated fact checking.", "keyphrases": ["misinformation", "truthfulness", "claim", "knowledge representation", "survey"]} +{"id": "mathias-etal-2018-eyes", "title": "Eyes are the Windows to the Soul: Predicting the Rating of Text Quality Using Gaze Behaviour", "abstract": "Predicting a reader's rating of text quality is a challenging task that involves estimating different subjective aspects of the text, like structure, clarity, etc. Such subjective aspects are better handled using cognitive information. One such source of cognitive information is gaze behaviour. In this paper, we show that gaze behaviour does indeed help in effectively predicting the rating of text quality. To do this, we first we model text quality as a function of three properties - organization, coherence and cohesion. Then, we demonstrate how capturing gaze behaviour helps in predicting each of these properties, and hence the overall quality, by reporting improvements obtained by adding gaze features to traditional textual features for score prediction. We also hypothesize that if a reader has fully understood the text, the corresponding gaze behaviour would give a better indication of the assigned rating, as opposed to partial understanding. Our experiments validate this hypothesis by showing greater agreement between the given rating and the predicted rating when the reader has a full understanding of the text.", "keyphrases": ["rating", "text quality", "gaze behaviour"]} +{"id": "shi-zhou-2005-error", "title": "Error Detection Using Linguistic Features", "abstract": "Recognition errors hinder the proliferation of speech recognition (SR) systems. Based on the observation that recognition errors may result in ungrammatical sentences, especially in dictation application where an acceptable level of accuracy of generated documents is indispensable, we propose to incorporate two kinds of linguistic features into error detection: lexical features of words, and syntactic features from a robust lexicalized parser. Transformation-based learning is chosen to predict recognition errors by integrating word confidence scores with linguistic features. The experimental results on a dictation data corpus show that linguistic features alone are not as useful as word confidence scores in detecting errors. However, linguistic features provide complementary information when combined with word confidence scores, which collectively reduce the classification error rate by 12.30% and improve the F measure by 53.62%.", "keyphrases": ["linguistic feature", "speech recognition", "error detection"]} +{"id": "luyckx-daelemans-2008-authorship", "title": "Authorship Attribution and Verification with Many Authors and Limited Data", "abstract": "Most studies in statistical or machine learning based authorship attribution focus on two or a few authors. This leads to an overestimation of the importance of the features extracted from the training data and found to be discriminating for these small sets of authors. Most studies also use sizes of training data that are unrealistic for situations in which stylometry is applied (e.g., forensics), and thereby overestimate the accuracy of their approach in these situations. A more realistic interpretation of the task is as an authorship verification problem that we approximate by pooling data from many different authors as negative examples. In this paper, we show, on the basis of a new corpus with 145 authors, what the effect is of many authors on feature selection and learning, and show robustness of a memory-based learning approach in doing authorship attribution and verification with many authors and limited training data when compared to eager learning methods such as SVMs and maximum entropy learning.", "keyphrases": ["verification", "authorship attribution", "essay"]} +{"id": "titov-klementiev-2011-bayesian", "title": "A Bayesian Model for Unsupervised Semantic Parsing", "abstract": "We propose a non-parametric Bayesian model for unsupervised semantic parsing. Following Poon and Domingos (2009), we consider a semantic parsing setting where the goal is to (1) decompose the syntactic dependency tree of a sentence into fragments, (2) assign each of these fragments to a cluster of semantically equivalent syntactic structures, and (3) predict predicate-argument relations between the fragments. We use hierarchical Pitman-Yor processes to model statistical dependencies between meaning representations of predicates and those of their arguments, as well as the clusters of their syntactic realizations. We develop a modification of the Metropolis-Hastings split-merge sampler, resulting in an efficient inference algorithm for the model. The method is experimentally evaluated by using the induced semantic representation for the question answering task in the biomedical domain.", "keyphrases": ["bayesian model", "unsupervised semantic parsing", "fragment"]} +{"id": "el-mekki-etal-2021-domain", "title": "Domain Adaptation for Arabic Cross-Domain and Cross-Dialect Sentiment Analysis from Contextualized Word Embedding", "abstract": "Finetuning deep pre-trained language models has shown state-of-the-art performances on a wide range of Natural Language Processing (NLP) applications. Nevertheless, their generalization performance drops under domain shift. In the case of Arabic language, diglossia makes building and annotating corpora for each dialect and/or domain a more challenging task. Unsupervised Domain Adaptation tackles this issue by transferring the learned knowledge from labeled source domain data to unlabeled target domain data. In this paper, we propose a new unsupervised domain adaptation method for Arabic cross-domain and cross-dialect sentiment analysis from Contextualized Word Embedding. Several experiments are performed adopting the coarse-grained and the fine-grained taxonomies of Arabic dialects. The obtained results show that our method yields very promising results and outperforms several domain adaptation methods for most of the evaluated datasets. On average, our method increases the performance by an improvement rate of 20.8% over the zero-shot transfer learning from BERT.", "keyphrases": ["arabic cross-domain", "cross-dialect sentiment analysis", "contextualized word"]} +{"id": "passonneau-carpenter-2013-benefits", "title": "The Benefits of a Model of Annotation", "abstract": "Standard agreement measures for interannotator reliability are neither necessary nor sufficient to ensure a high quality corpus. In a case study of word sense annotation, conventional methods for evaluating labels from trained annotators are contrasted with a probabilistic annotation model applied to crowdsourced data. The annotation model provides far more information, including a certainty measure for each gold standard label; the crowdsourced data was collected at less than half the cost of the conventional approach.", "keyphrases": ["annotator", "reliability", "probabilistic model"]} +{"id": "pramanick-etal-2021-momenta-multimodal", "title": "MOMENTA: A Multimodal Framework for Detecting Harmful Memes and Their Targets", "abstract": "Internet memes have become powerful means to transmit political, psychological, and socio-cultural ideas. Although memes are typically humorous, recent days have witnessed an escalation of harmful memes used for trolling, cyberbullying, and abuse. Detecting such memes is challenging as they can be highly satirical and cryptic. Moreover, while previous work has focused on specific aspects of memes such as hate speech and propaganda, there has been little work on harm in general. Here, we aim to bridge this gap. In particular, we focus on two tasks: (i)detecting harmful memes, and (ii) identifying the social entities they target. We further extend the recently released HarMeme dataset, which covered COVID-19, with additional memes and a new topic: US politics. To solve these tasks, we propose MOMENTA (MultimOdal framework for detecting harmful MemEs aNd Their tArgets), a novel multimodal deep neural network that uses global and local perspectives to detect harmful memes. MOMENTA systematically analyzes the local and the global perspective of the input meme (in both modalities) and relates it to the background context. MOMENTA is interpretable and generalizable, and our experiments show that it outperforms several strong rivaling approaches.", "keyphrases": ["multimodal framework", "harmful memes", "momenta"]} +{"id": "christopoulou-etal-2019-connecting", "title": "Connecting the Dots: Document-level Neural Relation Extraction with Edge-oriented Graphs", "abstract": "Document-level relation extraction is a complex human process that requires logical inference to extract relationships between named entities in text. Existing approaches use graph-based neural models with words as nodes and edges as relations between them, to encode relations across sentences. These models are node-based, i.e., they form pair representations based solely on the two target node representations. However, entity relations can be better expressed through unique edge representations formed as paths between nodes. We thus propose an edge-oriented graph neural model for document-level relation extraction. The model utilises different types of nodes and edges to create a document-level graph. An inference mechanism on the graph edges enables to learn intra- and inter-sentence relations using multi-instance learning internally. Experiments on two document-level biomedical datasets for chemical-disease and gene-disease associations show the usefulness of the proposed edge-oriented approach.", "keyphrases": ["relation extraction", "edge", "document-level graph", "logical reasoning"]} +{"id": "nozza-etal-2021-honest", "title": "HONEST: Measuring Hurtful Sentence Completion in Language Models", "abstract": "Language models have revolutionized the field of NLP. However, language models capture and proliferate hurtful stereotypes, especially in text generation. Our results show that 4.3% of the time, language models complete a sentence with a hurtful word. These cases are not random, but follow language and gender-specific patterns. We propose a score to measure hurtful sentence completions in language models (HONEST). It uses a systematic template- and lexicon-based bias evaluation methodology for six languages. Our findings suggest that these models replicate and amplify deep-seated societal stereotypes about gender roles. Sentence completions refer to sexual promiscuity when the target is female in 9% of the time, and in 4% to homosexuality when the target is male. The results raise questions about the use of these models in production settings.", "keyphrases": ["hurtful sentence completion", "language model", "honest"]} +{"id": "hsu-etal-2018-emotionlines", "title": "EmotionLines: An Emotion Corpus of Multi-Party Conversations", "abstract": "Feeling emotion is a critical characteristic to distinguish people from machines. Among all the multi-modal resources for emotion detection, textual datasets are those containing the least additional information in addition to semantics, and hence are adopted widely for testing the developed systems. However, most of the textual emotional datasets consist of emotion labels of only individual words, sentences or documents, which makes it challenging to discuss the contextual flow of emotions. In this paper, we introduce EmotionLines, the first dataset with emotions labeling on all utterances in each dialogue only based on their textual content. Dialogues in EmotionLines are collected from Friends TV scripts and private Facebook messenger dialogues. Then one of seven emotions, six Ekman's basic emotions plus the neutral emotion, is labeled on each utterance by 5 Amazon MTurkers. A total of 29,245 utterances from 2,000 dialogues are labeled in EmotionLines. We also provide several strong baselines for emotion detection models on EmotionLines in this paper.", "keyphrases": ["conversation", "emotion detection", "textual content", "few year"]} +{"id": "yao-etal-2012-unsupervised", "title": "Unsupervised Relation Discovery with Sense Disambiguation", "abstract": "To discover relation types from text, most methods cluster shallow or syntactic patterns of relation mentions, but consider only one possible sense per pattern. In practice this assumption is often violated. In this paper we overcome this issue by inducing clusters of pattern senses from feature representations of patterns. In particular, we employ a topic model to partition entity pairs associated with patterns into sense clusters using local and global features. We merge these sense clusters into semantic relations using hierarchical agglomerative clustering. We compare against several baselines: a generative latent-variable model, a clustering method that does not disambiguate between path senses, and our own approach but with only local features. Experimental results show our proposed approach discovers dramatically more accurate clusters than models without sense disambiguation, and that incorporating global features, such as the document theme, is crucial.", "keyphrases": ["sense disambiguation", "cluster", "probabilistic model"]} +{"id": "song-etal-2016-amr", "title": "AMR-to-text generation as a Traveling Salesman Problem", "abstract": "The task of AMR-to-text generation is to generate grammatical text that sustains the semantic meaning for a given AMR graph. We at- tack the task by first partitioning the AMR graph into smaller fragments, and then generating the translation for each fragment, before finally deciding the order by solving an asymmetric generalized traveling salesman problem (AGTSP). A Maximum Entropy classifier is trained to estimate the traveling costs, and a TSP solver is used to find the optimized solution. The final model reports a BLEU score of 22.44 on the SemEval-2016 Task8 dataset.", "keyphrases": ["salesman problem", "agtsp", "amr-to-text generation", "small fragment"]} +{"id": "wang-etal-2020-heterogeneous", "title": "Heterogeneous Graph Neural Networks for Extractive Document Summarization", "abstract": "As a crucial step in extractive document summarization, learning cross-sentence relations has been explored by a plethora of approaches. An intuitive way is to put them in the graph-based neural network, which has a more complex structure for capturing inter-sentence relationships. In this paper, we present a heterogeneous graph-based neural network for extractive summarization (HETERSUMGRAPH), which contains semantic nodes of different granularity levels apart from sentences. These additional nodes act as the intermediary between sentences and enrich the cross-sentence relations. Besides, our graph structure is flexible in natural extension from a single-document setting to multi-document via introducing document nodes. To our knowledge, we are the first one to introduce different types of nodes into graph-based neural networks for extractive document summarization and perform a comprehensive qualitative analysis to investigate their benefits. The code will be released on Github.", "keyphrases": ["extractive document summarization", "graph-based neural network", "inter-sentence relationship", "heterogeneous graph", "multiple document"]} +{"id": "chollampatt-etal-2016-adapting", "title": "Adapting Grammatical Error Correction Based on the Native Language of Writers with Neural Network Joint Models", "abstract": "An important aspect for the task of grammatical error correction (GEC) that has not yet been adequately explored is adaptation based on the native language (L1) of writers, despite the marked in\ufb02uences of L1 on second language (L2) writing. In this paper, we adapt a neural network joint model (NNJM) using L1-speci\ufb01c learner text and integrate it into a statistical machine translation (SMT) based GEC system. Speci\ufb01cally, we train an NNJM on general learner text (not L1-speci\ufb01c) and subsequently train on L1-speci\ufb01c data using a Kullback-Leibler divergence regularized ob-jective function in order to preserve generalization of the model. We incorporate this adapted NNJM as a feature in an SMT-based English GEC system and show that adaptation achieves signi\ufb01cant F 0 . 5 score gains on English texts written by L1 Chinese, Russian, and Spanish writers.", "keyphrases": ["grammatical error correction", "native language", "neural language model"]} +{"id": "xu-etal-2018-double", "title": "Double Embeddings and CNN-based Sequence Labeling for Aspect Extraction", "abstract": "One key task of fine-grained sentiment analysis of product reviews is to extract product aspects or features that users have expressed opinions on. This paper focuses on supervised aspect extraction using deep learning. Unlike other highly sophisticated supervised deep learning models, this paper proposes a novel and yet simple CNN model employing two types of pre-trained embeddings for aspect extraction: general-purpose embeddings and domain-specific embeddings. Without using any additional supervision, this model achieves surprisingly good results, outperforming state-of-the-art sophisticated existing methods. To our knowledge, this paper is the first to report such double embeddings based CNN model for aspect extraction and achieve very good results.", "keyphrases": ["aspect extraction", "sentiment analysis", "cnn", "convolutional neural network", "sequence tagging problem"]} +{"id": "wiseman-etal-2018-learning", "title": "Learning Neural Templates for Text Generation", "abstract": "While neural, encoder-decoder models have had significant empirical success in text generation, there remain several unaddressed problems with this style of generation. Encoder-decoder models are largely (a) uninterpretable, and (b) difficult to control in terms of their phrasing or content. This work proposes a neural generation system using a hidden semi-markov model (HSMM) decoder, which learns latent, discrete templates jointly with learning to generate. We show that this model learns useful templates, and that these templates make generation both more interpretable and controllable. Furthermore, we show that this approach scales to real data sets and achieves strong performance nearing that of encoder-decoder text generation models.", "keyphrases": ["text generation", "control", "model decoder"]} +{"id": "dione-2012-morphological", "title": "A Morphological Analyzer For Wolof Using Finite-State Techniques", "abstract": "This paper reports on the design and implementation of a morphological analyzer for Wolof. The main motivation for this work is to obtain a linguistically motivated tool using finite-state techniques. The finite-state technology is especially attractive in dealing with human language morphologies. Finite-state transducers (FST) are fast, efficient and can be fully reversible, enabling users to perform analysis as well as generation. Hence, I use this approach to construct a new FST tool for Wolof, as a first step towards a computational grammar for the language in the Lexical Functional Grammar framework. This article focuses on the methods used to model complex morphological issues and on developing strategies to limit ambiguities. It discusses experimental evaluations conducted to assess the performance of the analyzer with respect to various statistical criteria. In particular, I also wanted to create morphosyntactically annotated resources for Wolof, obtained by automatically analyzing text corpora with a computational morphology.", "keyphrases": ["morphological analyzer", "wolof", "finite-state technique"]} +{"id": "lee-yeung-2016-annotated", "title": "An Annotated Corpus of Direct Speech", "abstract": "We propose a scheme for annotating direct speech in literary texts, based on the Text Encoding Initiative (TEI) and the coreference annotation guidelines from the Message Understanding Conference (MUC). The scheme encodes the speakers and listeners of utterances in a text, as well as the quotative verbs that reports the utterances. We measure inter-annotator agreement on this annotation task. We then present statistics on a manually annotated corpus that consists of books from the New Testament. Finally, we visualize the corpus as a conversational network.", "keyphrases": ["annotated corpus", "direct speech", "new testament"]} +{"id": "kiperwasser-ballesteros-2018-scheduled", "title": "Scheduled Multi-Task Learning: From Syntax to Translation", "abstract": "Neural encoder-decoder models of machine translation have achieved impressive results, while learning linguistic knowledge of both the source and target languages in an implicit end-to-end manner. We propose a framework in which our model begins learning syntax and translation interleaved, gradually putting more focus on translation. Using this approach, we achieve considerable improvements in terms of BLEU score on relatively large parallel corpus (WMT14 English to German) and a low-resource (WIT German to English) setup.", "keyphrases": ["multi-task learning", "syntax", "part-of-speech tagging"]} +{"id": "suhr-etal-2019-executing", "title": "Executing Instructions in Situated Collaborative Interactions", "abstract": "We study a collaborative scenario where a user not only instructs a system to complete tasks, but also acts alongside it. This allows the user to adapt to the system abilities by changing their language or deciding to simply accomplish some tasks themselves, and requires the system to effectively recover from errors as the user strategically assigns it new goals. We build a game environment to study this scenario, and learn to map user instructions to system actions. We introduce a learning approach focused on recovery from cascading errors between instructions, and modeling methods to explicitly reason about instructions with multiple goals. We evaluate with a new evaluation protocol using recorded interactions and online games with human users, and observe how users adapt to the system abilities.", "keyphrases": ["instruction", "collaborative scenario", "environment"]} +{"id": "chieu-ng-2003-named", "title": "Named Entity Recognition with a Maximum Entropy Approach", "abstract": "The named entity recognition (NER) task involves identifying noun phrases that are names, and assigning a class to each name. This task has its origin from the Message Understanding Conferences (MUC) in the 1990s, a series of conferences aimed at evaluating systems that extract information from natural language texts. It became evident that in order to achieve good performance in information extraction, a system needs to be able to recognize names. A separate subtask on NER was created in MUC-6 and MUC-7 (Chinchor, 1998).", "keyphrases": ["entity recognition", "global feature", "same token", "gazetteer"]} +{"id": "klementiev-etal-2012-inducing", "title": "Inducing Crosslingual Distributed Representations of Words", "abstract": "Distributed representations of words have proven extremely useful in numerous natural language processing tasks. Their appeal is that they can help alleviate data sparsity problems common to supervised learning. Methods for inducing these representations require only unlabeled language data, which are plentiful for many natural languages. In this work, we induce distributed representations for a pair of languages jointly. We treat it as a multitask learning problem where each task corresponds to a single word, and task relatedness is derived from co-occurrence statistics in bilingual parallel data. These representations can be used for a number of crosslingual learning tasks, where a learner can be trained on annotations present in one language and applied to test data in another. We show that our representations are informative by using them for crosslingual document classification, where classifiers trained on these representations substantially outperform strong baselines (e.g. machine translation) when applied to a new language.", "keyphrases": ["bilingual parallel data", "new language", "word embedding", "cross-lingual representation", "mapping"]} +{"id": "yan-etal-2021-partition", "title": "A Partition Filter Network for Joint Entity and Relation Extraction", "abstract": "In joint entity and relation extraction, existing work either sequentially encode task-specific features, leading to an imbalance in inter-task feature interaction where features extracted later have no direct contact with those that come first. Or they encode entity features and relation features in a parallel manner, meaning that feature representation learning for each task is largely independent of each other except for input sharing. We propose a partition filter network to model two-way interaction between tasks properly, where feature encoding is decomposed into two steps: partition and filter. In our encoder, we leverage two gates: entity and relation gate, to segment neurons into two task partitions and one shared partition. The shared partition represents inter-task information valuable to both tasks and is evenly shared across two tasks to ensure proper two-way interaction. The task partitions represent intra-task information and are formed through concerted efforts of both gates, making sure that encoding of task-specific features is dependent upon each other. Experiment results on six public datasets show that our model performs significantly better than previous approaches. In addition, contrary to what previous work has claimed, our auxiliary experiments suggest that relation prediction is contributory to named entity prediction in a non-negligible way. The source code can be found at .", "keyphrases": ["partition filter network", "joint entity", "relation extraction"]} +{"id": "zhang-nivre-2011-transition", "title": "Transition-based Dependency Parsing with Rich Non-local Features", "abstract": "Transition-based dependency parsers generally use heuristic decoding algorithms but can accommodate arbitrarily rich feature representations. In this paper, we show that we can improve the accuracy of such parsers by considering even richer feature sets than those employed in previous systems. In the standard Penn Treebank setup, our novel features improve attachment score form 91.4% to 92.9%, giving the best results so far for transition-based parsing and rivaling the best results overall. For the Chinese Treebank, they give a signficant improvement of the state of the art. An open source release of our parser is freely available.", "keyphrases": ["dependency parsing", "non-local feature", "transition-based model", "graph-based model", "linear time"]} +{"id": "lai-nguyen-2019-extending", "title": "Extending Event Detection to New Types with Learning from Keywords", "abstract": "Traditional event detection classifies a word or a phrase in a given sentence for a set of prede- fined event types. The limitation of such pre- defined set is that it prevents the adaptation of the event detection models to new event types. We study a novel formulation of event detec- tion that describes types via several keywords to match the contexts in documents. This fa- cilitates the operation of the models to new types. We introduce a novel feature-based attention mechanism for convolutional neural networks for event detection in the new for- mulation. Our extensive experiments demon- strate the benefits of the new formulation for new type extension for event detection as well as the proposed attention mechanism for this problem", "keyphrases": ["event detection", "new type", "keyword"]} +{"id": "choi-etal-2018-ultra", "title": "Ultra-Fine Entity Typing", "abstract": "We introduce a new entity typing task: given a sentence with an entity mention, the goal is to predict a set of free-form phrases (e.g. skyscraper, songwriter, or criminal) that describe appropriate types for the target entity. This formulation allows us to use a new type of distant supervision at large scale: head words, which indicate the type of the noun phrases they appear in. We show that these ultra-fine types can be crowd-sourced, and introduce new evaluation sets that are much more diverse and fine-grained than existing benchmarks. We present a model that can predict ultra-fine types, and is trained using a multitask objective that pools our new head-word supervision with prior supervision from entity linking. Experimental results demonstrate that our model is effective in predicting entity types at varying granularity; it achieves state of the art performance on an existing fine-grained entity typing benchmark, and sets baselines for our newly-introduced datasets.", "keyphrases": ["entity type", "noun phrase", "granularity", "ultra-fine entity typing", "large type set"]} +{"id": "poon-domingos-2009-unsupervised", "title": "Unsupervised Semantic Parsing", "abstract": "We present the first unsupervised approach to the problem of learning a semantic parser, using Markov logic. Our USP system transforms dependency trees into quasi-logical forms, recursively induces lambda forms from these, and clusters them to abstract away syntactic variations of the same meaning. The MAP semantic parse of a sentence is obtained by recursively assigning its parts to lambda-form clusters and composing them. We evaluate our approach by using it to extract a knowledge base from biomedical abstracts and answer questions. USP substantially outperforms TextRunner, DIRT and an informed baseline on both precision and recall on this task.", "keyphrases": ["usp", "cluster", "unsupervised semantic parsing", "formalism", "negation"]} +{"id": "irvine-callison-burch-2017-comprehensive", "title": "A Comprehensive Analysis of Bilingual Lexicon Induction", "abstract": "Bilingual lexicon induction is the task of inducing word translations from monolingual corpora in two languages. In this article we present the most comprehensive analysis of bilingual lexicon induction to date. We present experiments on a wide range of languages and data sizes. We examine translation into English from 25 foreign languages: Albanian, Azeri, Bengali, Bosnian, Bulgarian, Cebuano, Gujarati, Hindi, Hungarian, Indonesian, Latvian, Nepali, Romanian, Serbian, Slovak, Somali, Spanish, Swedish, Tamil, Telugu, Turkish, Ukrainian, Uzbek, Vietnamese, and Welsh. We analyze the behavior of bilingual lexicon induction on low-frequency words, rather than testing solely on high-frequency words, as previous research has done. Low-frequency words are more relevant to statistical machine translation, where systems typically lack translations of rare words that fall outside of their training data. We systematically explore a wide range of features and phenomena that affect the quality of the translations discovered by bilingual lexicon induction. We provide illustrative examples of the highest ranking translations for orthogonal signals of translation equivalence like contextual similarity and temporal similarity. We analyze the effects of frequency and burstiness, and the sizes of the seed bilingual dictionaries and the monolingual training corpora. Additionally, we introduce a novel discriminative approach to bilingual lexicon induction. Our discriminative model is capable of combining a wide variety of features that individually provide only weak indications of translation equivalence. When feature weights are discriminatively set, these signals produce dramatically higher translation quality than previous approaches that combined signals in an unsupervised fashion (e.g., using minimum reciprocal rank). We also directly compare our model's performance against a sophisticated generative approach, the matching canonical correlation analysis (MCCA) algorithm used by Haghighi et al. (2008). Our algorithm achieves an accuracy of 42% versus MCCA's 15%.", "keyphrases": ["comprehensive analysis", "bilingual lexicon induction", "bli"]} +{"id": "zhu-etal-2021-enhancing", "title": "Enhancing Factual Consistency of Abstractive Summarization", "abstract": "Automatic abstractive summaries are found to often distort or fabricate facts in the article. This inconsistency between summary and original text has seriously impacted its applicability. We propose a fact-aware summarization model FASum to extract and integrate factual relations into the summary generation process via graph attention. We then design a factual corrector model FC to automatically correct factual errors from summaries generated by existing systems. Empirical results show that the fact-aware summarization can produce abstractive summaries with higher factual consistency compared with existing systems, and the correction model improves the factual consistency of given summaries via modifying only a few keywords.", "keyphrases": ["factual consistency", "graph attention", "knowledge graph"]} +{"id": "zhang-mcdonald-2012-generalized", "title": "Generalized Higher-Order Dependency Parsing with Cube Pruning", "abstract": "State-of-the-art graph-based parsers use features over higher-order dependencies that rely on decoding algorithms that are slow and difficult to generalize. On the other hand, transition-based dependency parsers can easily utilize such features without increasing the linear complexity of the shift-reduce system beyond a constant. In this paper, we attempt to address this imbalance for graph-based parsing by generalizing the Eisner (1996) algorithm to handle arbitrary features over higher-order dependencies. The generalization is at the cost of asymptotic efficiency. To account for this, cube pruning for decoding is utilized (Chiang, 2007). For the first time, label tuple and structural features such as valencies can be scored efficiently with third-order features in a graph-based parser. Our parser achieves the state-of-art unlabeled accuracy of 93.06% and labeled accuracy of 91.86% on the standard test set for English, at a faster speed than a reimplementation of the third-order model of Koo et al. (2010).", "keyphrases": ["cube pruning", "dependency parser", "higher-order feature"]} +{"id": "chernodub-etal-2019-targer", "title": "TARGER: Neural Argument Mining at Your Fingertips", "abstract": "We present TARGER, an open source neural argument mining framework for tagging arguments in free input texts and for keyword-based retrieval of arguments from an argument-tagged web-scale corpus. The currently available models are pre-trained on three recent argument mining datasets and enable the use of neural argument mining without any reproducibility effort on the user's side. The open source code ensures portability to other domains and use cases.", "keyphrases": ["neural argument mining", "input text", "retrieval", "targer"]} +{"id": "kuhlmann-nivre-2006-mildly", "title": "Mildly Non-Projective Dependency Structures", "abstract": "Syntactic parsing requires a fine balance between expressivity and complexity, so that naturally occurring structures can be accurately parsed without compromising efficiency. In dependency-based parsing, several constraints have been proposed that restrict the class of permissible structures, such as projectivity, planarity, multi-planarity, well-nestedness, gap degree, and edge degree. While projectivity is generally taken to be too restrictive for natural language syntax, it is not clear which of the other proposals strikes the best balance between expressivity and complexity. In this paper, we review and compare the different constraints theoretically, and provide an experimental evaluation using data from two treebanks, investigating how large a proportion of the structures found in the treebanks are permitted under different constraints. The results indicate that a combination of the well-nestedness constraint and a parametric constraint on discontinuity gives a very good fit with the linguistic data.", "keyphrases": ["dependency structure", "planarity", "gap degree", "edge degree", "non-projective structure"]} +{"id": "ringger-etal-2008-assessing", "title": "Assessing the Costs of Machine-Assisted Corpus Annotation through a User Study", "abstract": "Fixed, limited budgets often constrain the amount of expert annotation that can go into the construction of annotated corpora. Estimating the cost of annotation is the first step toward using annotation resources wisely. We present here a study of the cost of annotation. This study includes the participation of annotators at various skill levels and with varying backgrounds. Conducted over the web, the study consists of tests that simulate machine-assisted pre-annotation, requiring correction by the annotator rather than annotation from scratch. The study also includes tests representative of an annotation scenario involving Active Learning as it progresses from a na\u00efve model to a knowledgeable model; in particular, annotators encounter pre-annotation of varying degrees of accuracy. The annotation interface lists tags considered likely by the annotation model in preference to other tags. We present the experimental parameters of the study and report both descriptive and inferential statistics on the results of the study. We conclude with a model for estimating the hourly cost of annotation for annotators of various skill levels. We also present models for two granularities of annotation: sentence at a time and word at a time.", "keyphrases": ["cost", "corpus annotation", "user study", "background", "active learning"]} +{"id": "frermann-lapata-2016-bayesian", "title": "A Bayesian Model of Diachronic Meaning Change", "abstract": "Word meanings change over time and an automated procedure for extracting this information from text would be useful for historical exploratory studies, information retrieval or question answering. We present a dynamic Bayesian model of diachronic meaning change, which infers temporal word representations as a set of senses and their prevalence. Unlike previous work, we explicitly model language change as a smooth, gradual process. We experimentally show that this modeling decision is beneficial: our model performs competitively on meaning change detection tasks whilst inducing discernible word senses and their development over time. Application of our model to the SemEval-2015 temporal classification benchmark datasets further reveals that it performs on par with highly optimized task-specific systems.", "keyphrases": ["diachronic meaning change", "gradual process", "word sense"]} +{"id": "okuma-etal-2009-bypassed", "title": "Bypassed alignment graph for learning coordination in Japanese sentences", "abstract": "Past work on English coordination has focused on coordination scope disambiguation. In Japanese, detecting whether coordination exists in a sentence is also a problem, and the state-of-the-art alignment-based method specialized for scope disambiguation does not perform well on Japanese sentences. To take the detection of coordination into account, this paper introduces a 'bypass' to the alignment graph used by this method, so as to explicitly represent the non-existence of coordinate structures in a sentence. We also present an effective feature decomposition scheme based on the distance between words in conjuncts.", "keyphrases": ["alignment graph", "coordination", "japanese sentence"]} +{"id": "vashishtha-etal-2020-temporal", "title": "Temporal Reasoning in Natural Language Inference", "abstract": "We introduce five new natural language inference (NLI) datasets focused on temporal reasoning. We recast four existing datasets annotated for event duration\u2014how long an event lasts\u2014and event ordering\u2014how events are temporally arranged\u2014into more than one million NLI examples. We use these datasets to investigate how well neural models trained on a popular NLI corpus capture these forms of temporal reasoning.", "keyphrases": ["natural language inference", "nli", "temporal reasoning", "duration"]} +{"id": "lee-etal-2020-postech", "title": "POSTECH-ETRI's Submission to the WMT2020 APE Shared Task: Automatic Post-Editing with Cross-lingual Language Model", "abstract": "This paper describes POSTECH-ETRI's submission to WMT2020 for the shared task on automatic post-editing (APE) for 2 language pairs: English-German (En-De) and English-Chinese (En-Zh). We propose APE systems based on a cross-lingual language model, which jointly adopts translation language modeling (TLM) and masked language modeling (MLM) training objectives in the pre-training stage; the APE models then utilize jointly learned language representations between the source language and the target language. In addition, we created 19 million new sythetic triplets as additional training data for our final ensemble model. According to experimental results on the WMT2020 APE development data set, our models showed an improvement over the baseline by TER of -3.58 and a BLEU score of +5.3 for the En-De subtask; and TER of -5.29 and a BLEU score of +7.32 for the En-Zh subtask.", "keyphrases": ["automatic post-editing", "language model", "postech-etri"]} +{"id": "luan-etal-2018-multi", "title": "Multi-Task Identification of Entities, Relations, and Coreference for Scientific Knowledge Graph Construction", "abstract": "We introduce a multi-task setup of identifying entities, relations, and coreference clusters in scientific articles. We create SciERC, a dataset that includes annotations for all three tasks and develop a unified framework called SciIE with shared span representations. The multi-task setup reduces cascading errors between tasks and leverages cross-sentence relations through coreference links. Experiments show that our multi-task model outperforms previous models in scientific information extraction without using any domain-specific features. We further show that the framework supports construction of a scientific knowledge graph, which we use to analyze information in scientific literature.", "keyphrases": ["coreference", "knowledge graph", "scientific article", "information extraction", "multi-task learning"]} +{"id": "feng-hirst-2011-classifying", "title": "Classifying arguments by scheme", "abstract": "Argumentation schemes are structures or templates for various kinds of arguments. Given the text of an argument with premises and conclusion identified, we classify it as an instance of one of five common schemes, using features specific to each scheme. We achieve accuracies of 63--91% in one-against-others classification and 80--94% in pairwise classification (baseline = 50% in both cases).", "keyphrases": ["scheme", "argumentation scheme", "one-against-other classification", "monologic text", "araucaria corpus"]} +{"id": "liu-etal-2021-fast", "title": "Fast, Effective, and Self-Supervised: Transforming Masked Language Models into Universal Lexical and Sentence Encoders", "abstract": "Previous work has indicated that pretrained Masked Language Models (MLMs) are not effective as universal lexical and sentence encoders off-the-shelf, i.e., without further task-specific fine-tuning on NLI, sentence similarity, or paraphrasing tasks using annotated task data. In this work, we demonstrate that it is possible to turn MLMs into effective lexical and sentence encoders even without any additional data, relying simply on self-supervision. We propose an extremely simple, fast, and effective contrastive learning technique, termed Mirror-BERT, which converts MLMs (e.g., BERT and RoBERTa) into such encoders in 20-30 seconds with no access to additional external knowledge. Mirror-BERT relies on identical and slightly modified string pairs as positive (i.e., synonymous) fine-tuning examples, and aims to maximise their similarity during \u201cidentity fine-tuning\u201d. We report huge gains over off-the-shelf MLMs with Mirror-BERT both in lexical-level and in sentence-level tasks, across different domains and different languages. Notably, in sentence similarity (STS) and question-answer entailment (QNLI) tasks, our self-supervised Mirror-BERT model even matches the performance of the Sentence-BERT models from prior work which rely on annotated task data. Finally, we delve deeper into the inner workings of MLMs, and suggest some evidence on why this simple Mirror-BERT fine-tuning approach can yield effective universal lexical and sentence encoders.", "keyphrases": ["masked language models", "sentence encoder", "fine-tuning", "contrastive learning technique"]} +{"id": "sinha-etal-2019-clutrr", "title": "CLUTRR: A Diagnostic Benchmark for Inductive Reasoning from Text", "abstract": "The recent success of natural language understanding (NLU) systems has been troubled by results highlighting the failure of these models to generalize in a systematic and robust way. In this work, we introduce a diagnostic benchmark suite, named CLUTRR, to clarify some key issues related to the robustness and systematicity of NLU systems. Motivated by the classic work on inductive logic programming, CLUTRR requires that an NLU system infer kinship relations between characters in short stories. Successful performance on this task requires both extracting relationships between entities, as well as inferring the logical rules governing these relationships. CLUTRR allows us to precisely measure a model's ability for systematic generalization by evaluating on held-out combinations of logical rules, and allows us to evaluate a model's robustness by adding curated noise facts. Our empirical results highlight a substantial performance gap between state-of-the-art NLU models (e.g., BERT and MAC) and a graph neural network model that works directly with symbolic inputs\u2014with the graph-based model exhibiting both stronger generalization and greater robustness.", "keyphrases": ["generalization", "story", "clutrr", "synthetic dataset"]} +{"id": "regneri-etal-2013-grounding", "title": "Grounding Action Descriptions in Videos", "abstract": "Recent work has shown that the integration of visual information into text-based models can substantially improve model predictions, but so far only visual information extracted from static images has been used. In this paper, we consider the problem of grounding sentences describing actions in visual information extracted from videos. We present a general purpose corpus that aligns high quality videos with multiple natural language descriptions of the actions portrayed in the videos, together with an annotation of how similar the action descriptions are to each other. Experimental results demonstrate that a text-based model of similarity between actions improves substantially when combined with visual information from videos depicting the described actions.", "keyphrases": ["action", "video", "visual information", "language description"]} +{"id": "luu-etal-2014-taxonomy", "title": "Taxonomy Construction Using Syntactic Contextual Evidence", "abstract": "Taxonomies are the backbone of many structured, semantic knowledge resources. Recent works for extracting taxonomic relations from text focused on collecting lexical-syntactic patterns to extract the taxonomic relations by matching the patterns to text. These approaches, however, often show low coverage due to the lack of contextual analysis across sentences. To address this issue, we propose a novel approach that collectively utilizes contextual information of terms in syntactic structures such that if the set of contexts of a term includes most of contexts of another term, a subsumption relation between the two terms is inferred. We apply this method to the task of taxonomy construction from scratch, where we introduce another novel graph-based algorithm for taxonomic structure induction. Our experiment results show that the proposed method is well complementary with previous methods of linguistic pattern matching and significantly improves recall and thus F-measure.", "keyphrases": ["contextual information", "recall", "taxonomy construction"]} +{"id": "lee-etal-2019-latent", "title": "Latent Retrieval for Weakly Supervised Open Domain Question Answering", "abstract": "Recent work on open domain question answering (QA) assumes strong supervision of the supporting evidence and/or assumes a blackbox information retrieval (IR) system to retrieve evidence candidates. We argue that both are suboptimal, since gold evidence is not always available, and QA is fundamentally different from IR. We show for the first time that it is possible to jointly learn the retriever and reader from question-answer string pairs and without any IR system. In this setting, evidence retrieval from all of Wikipedia is treated as a latent variable. Since this is impractical to learn from scratch, we pre-train the retriever with an Inverse Cloze Task. We evaluate on open versions of five QA datasets. On datasets where the questioner already knows the answer, a traditional IR system such as BM25 is sufficient. On datasets where a user is genuinely seeking an answer, we show that learned retrieval is crucial, outperforming BM25 by up to 19 points in exact match.", "keyphrases": ["retriever", "domain question", "inverse cloze task", "self-supervised task", "sentence encoder"]} +{"id": "purandare-litman-2006-humor", "title": "Humor: Prosody Analysis and Automatic Recognition for F*R*I*E*N*D*S*", "abstract": "We analyze humorous spoken conversations from a classic comedy television show, FRIENDS, by examining acoustic-prosodic and linguistic features and their utility in automatic humor recognition. Using a simple annotation scheme, we automatically label speaker turns in our corpus that are followed by laughs as humorous and the rest as non-humorous. Our humor-prosody analysis reveals significant differences in prosodic characteristics (such as pitch, tempo, energy etc.) of humorous and non-humorous speech, even when accounted for the gender and speaker differences. Humor recognition was carried out using standard supervised learning classifiers, and shows promising results significantly above the baseline.", "keyphrases": ["conversation", "comedy television show", "characteristic", "humor"]} +{"id": "weeds-etal-2004-characterising", "title": "Characterising Measures of Lexical Distributional Similarity", "abstract": "This work investigates the variation in a word's distributionally nearest neighbours with respect to the similarity measure used. We identify one type of variation as being the relative frequency of the neighbour words with respect to the frequency of the target word. We then demonstrate a three-way connection between relative frequency of similar words, a concept of distributional gnerality and the semantic relation of hyponymy. Finally, we consider the impact that this has on one application of distributional similarity methods (judging the compositionality of collocations).", "keyphrases": ["similarity measure", "target word", "distributional inclusion hypothesis", "entailment", "generality"]} +{"id": "swanson-etal-2015-argument", "title": "Argument Mining: Extracting Arguments from Online Dialogue", "abstract": "Online forums are now one of the primary venues for public dialogue on current social and political issues. The related corpora are often huge, covering any topic imaginable. Our aim is to use these dialogue corpora to automatically discover the semantic aspects of arguments that conversants are making across multiple dialogues on a topic. We frame this goal as consisting of two tasks: argument extraction and argument facet similarity. We focus here on the argument extraction task, and show that we can train regressors to predict the quality of extracted arguments with RRSE values as low as .73 for some topics. A secondary goal is to develop regressors that are topic independent: we report results of cross-domain training and domain-adaptation with RRSE values for several topics as low as .72, when trained on topic independent features.", "keyphrases": ["regressor", "argument mining", "online forum dialogue"]} +{"id": "elliott-etal-2017-findings", "title": "Findings of the Second Shared Task on Multimodal Machine Translation and Multilingual Image Description", "abstract": "We present the results from the second shared task on multimodal machine translation and multilingual image description. Nine teams submitted 19 systems to two tasks. The multimodal translation task, in which the source sentence is supplemented by an image, was extended with a new language (French) and two new test sets. The multilingual image description task was changed such that at test time, only the image is given. Compared to last year, multimodal systems improved, but text-only systems remain competitive.", "keyphrases": ["multimodal machine translation", "multilingual image description", "edition"]} +{"id": "lusetti-etal-2018-encoder", "title": "Encoder-Decoder Methods for Text Normalization", "abstract": "Text normalization is the task of mapping non-canonical language, typical of speech transcription and computer-mediated communication, to a standardized writing. It is an up-stream task necessary to enable the subsequent direct employment of standard natural language processing tools and indispensable for languages such as Swiss German, with strong regional variation and no written standard. Text normalization has been addressed with a variety of methods, most successfully with character-level statistical machine translation (CSMT). In the meantime, machine translation has changed and the new methods, known as neural encoder-decoder (ED) models, resulted in remarkable improvements. Text normalization, however, has not yet followed. A number of neural methods have been tried, but CSMT remains the state-of-the-art. In this work, we normalize Swiss German WhatsApp messages using the ED framework. We exploit the flexibility of this framework, which allows us to learn from the same training data in different ways. In particular, we modify the decoding stage of a plain ED model to include target-side language models operating at different levels of granularity: characters and words. Our systematic comparison shows that our approach results in an improvement over the CSMT state-of-the-art.", "keyphrases": ["text normalization", "non-canonical language", "speech transcription", "german whatsapp message", "encoder-decoder"]} +{"id": "munteanu-marcu-2006-extracting", "title": "Extracting Parallel Sub-Sentential Fragments from Non-Parallel Corpora", "abstract": "We present a novel method for extracting parallel sub-sentential fragments from comparable, non-parallel bilingual corpora. By analyzing potentially similar sentence pairs using a signal processing-inspired approach, we detect which segments of the source sentence are translated into segments in the target sentence, and which are not. This method enables us to extract useful machine translation training data even from very non-parallel corpora, which contain no parallel sentence pairs. We evaluate the quality of the extracted data by showing that it improves the performance of a state-of-the-art statistical machine translation system.", "keyphrases": ["parallel sub-sentential fragment", "fragment", "non-parallel corpora", "parallel data", "word translation probability"]} +{"id": "lapata-2006-automatic", "title": "Automatic Evaluation of Information Ordering: Kendall's Tau", "abstract": "This article considers the automatic evaluation of information ordering, a task underlying many text-based applications such as concept-to-text generation and multidocument summarization. We propose an evaluation method based on Kendall's , a metric of rank correlation. The method is inexpensive, robust, and representation independent. We show that Kendall's correlates reliably with human ratings and reading times.", "keyphrases": ["information ordering", "reading time", "automatic evaluation"]} +{"id": "shinyama-sekine-2003-paraphrase", "title": "Paraphrase Acquisition for Information Extraction", "abstract": "We are trying to find paraphrases from Japanese news articles which can be used for Information Extraction. We focused on the fact that a single event can be reported in more than one article in different ways. However, certain kinds of noun phrases such as names, dates and numbers behave as \"anchors\" which are unlikely to change across articles. Our key idea is to identify these anchors among comparable articles and extract portions of expressions which share the anchors. This way we can extract expressions which convey the same information. Obtained paraphrases are generalized as templates and stored for future use.In this paper, first we describe our basic idea of paraphrase acquisition. Our method is divided into roughly four steps, each of which is explained in turn. Then we illustrate several issues which we encounter in real texts. To solve these problems, we introduce two techniques: coreference resolution and structural restriction of possible portions of expressions. Finally we discuss the experimental results and conclusions.", "keyphrases": ["information extraction", "news article", "paraphrase acquisition", "same event"]} +{"id": "wang-etal-2018-modeling", "title": "Modeling Semantic Plausibility by Injecting World Knowledge", "abstract": "Distributional data tells us that a man can swallow candy, but not that a man can swallow a paintball, since this is never attested. However both are physically plausible events. This paper introduces the task of semantic plausibility: recognizing plausible but possibly novel events. We present a new crowdsourced dataset of semantic plausibility judgments of single events such as man swallow paintball. Simple models based on distributional representations perform poorly on this task, despite doing well on selection preference, but injecting manually elicited knowledge about entity properties provides a substantial performance boost. Our error analysis shows that our new dataset is a great testbed for semantic plausibility models: more sophisticated knowledge representation and propagation could address many of the remaining errors.", "keyphrases": ["semantic plausibility", "world knowledge", "distributional data"]} +{"id": "chiu-etal-2016-intrinsic", "title": "Intrinsic Evaluation of Word Vectors Fails to Predict Extrinsic Performance", "abstract": "The quality of word representations is frequently assessed using correlation with human judgements of word similarity. Here, we question whether such intrinsic evaluation can predict the merits of the representations for downstream tasks. We study the correlation between results on ten word similarity benchmarks and tagger performance on three standard sequence labeling tasks using a variety of word vectors induced from an unannotated corpus of 3.8 billion words, and demonstrate that most intrinsic evaluations are poor predictors of downstream performance. We argue that this issue can be traced in part to a failure to distinguish specific similarity from relatedness in intrinsic evaluation datasets. We make our evaluation tools openly available to facilitate further study.", "keyphrases": ["word vector", "poor predictor", "intrinsic evaluation"]} +{"id": "daya-etal-2004-learning", "title": "Learning Hebrew Roots: Machine Learning with Linguistic Constraints", "abstract": "The morphology of Semitic languages is unique in the sense that the major word-formation mechanism is an inherently non-concatenative process of interdigitation, whereby two morphemes, a root and a pattern, are interwoven. Identifying the root of a given word in a Semitic language is an important task, in some cases a crucial part of morphological analysis. It is also a non-trivial task, which many humans find challenging. We present a machine learning approach to the problem of extracting roots of Hebrew words. Given the large number of potential roots (thousands), we address the problem as one of combining several classifiers, each predicting the value of one of the root\u2019s consonants. We show that when these predictors are combined by enforcing some fairly simple linguistics constraints, high accuracy, which compares favorably with human performance on this task, can be achieved.", "keyphrases": ["hebrew root", "machine learning", "linguistic constraint", "morphology"]} +{"id": "rajagopal-etal-2022-curie", "title": "CURIE: An Iterative Querying Approach for Reasoning About Situations", "abstract": "Predicting the effects of unexpected situations is an important reasoning task, e.g., would cloudy skies help or hinder plant growth? Given a context, the goal of such situational reasoning is to elicit the consequences of a new situation (st) that arises in that context. We propose CURIE, a method to iteratively build a graph of relevant consequences explicitly in a structured situational graph (st graph) using natural language queries over a finetuned language model. Across multiple domains, CURIE generates st graphs that humans find relevant and meaningful in eliciting the consequences of a new situation (75% of the graphs were judged correct by humans). We present a case study of a situation reasoning end task (WIQA-QA), where simply augmenting their input with st graphs improves accuracy by 3 points. We show that these improvements mainly come from a hard subset of the data, that requires background knowledge and multi-hop reasoning.", "keyphrases": ["reasoning", "situation", "curie"]} +{"id": "cai-etal-2017-crf", "title": "CRF Autoencoder for Unsupervised Dependency Parsing", "abstract": "Unsupervised dependency parsing, which tries to discover linguistic dependency structures from unannotated data, is a very challenging task. Almost all previous work on this task focuses on learning generative models. In this paper, we develop an unsupervised dependency parsing model based on the CRF autoencoder. The encoder part of our model is discriminative and globally normalized which allows us to use rich features as well as universal linguistic priors. We propose an exact algorithm for parsing as well as a tractable learning algorithm. We evaluated the performance of our model on eight multilingual treebanks and found that our model achieved comparable performance with state-of-the-art approaches.", "keyphrases": ["unsupervised dependency parsing", "generative model", "crf autoencoder", "head"]} +{"id": "kocisky-etal-2018-narrativeqa", "title": "The NarrativeQA Reading Comprehension Challenge", "abstract": "Reading comprehension (RC)\u2014in contrast to information retrieval\u2014requires integrating information and reasoning about events, entities, and their relations across a full document. Question answering is conventionally used to assess RC ability, in both artificial agents and children learning to read. However, existing RC datasets and tasks are dominated by questions that can be solved by selecting answers using superficial information (e.g., local context similarity or global term frequency); they thus fail to test for the essential integrative aspect of RC. To encourage progress on deeper comprehension of language, we present a new dataset and set of tasks in which the reader must answer questions about stories by reading entire books or movie scripts. These tasks are designed so that successfully answering their questions requires understanding the underlying narrative rather than relying on shallow pattern matching or salience. We show that although humans solve the tasks easily, standard RC models struggle on the tasks presented here. We provide an analysis of the dataset and the challenges it presents.", "keyphrases": ["narrativeqa", "comprehension", "question answering", "book", "annotator"]} +{"id": "pivovarova-etal-2013-event", "title": "Event representation across genre", "abstract": "This paper describes an approach for investigating the representation of events and their distribution in a corpus. We collect and analyze statistics about subject-verb-object triplets and their content, which helps us compare corpora belonging to the same domain but to different genre/text type. We argue that event structure is strongly related to the genre of the corpus, and propose statistical properties that are able to capture these genre differences. The results obtained can be used for the improvement of Information Extraction.", "keyphrases": ["genre", "statistic", "event representation"]} +{"id": "xie-etal-2018-noising", "title": "Noising and Denoising Natural Language: Diverse Backtranslation for Grammar Correction", "abstract": "Translation-based methods for grammar correction that directly map noisy, ungrammatical text to their clean counterparts are able to correct a broad range of errors; however, such techniques are bottlenecked by the need for a large parallel corpus of noisy and clean sentence pairs. In this paper, we consider synthesizing parallel data by noising a clean monolingual corpus. While most previous approaches introduce perturbations using features computed from local context windows, we instead develop error generation processes using a neural sequence transduction model trained to translate clean examples to their noisy counterparts. Given a corpus of clean examples, we propose beam search noising procedures to synthesize additional noisy examples that human evaluators were nearly unable to discriminate from nonsynthesized examples. Surprisingly, when trained on additional data synthesized using our best-performing noising scheme, our model approaches the same performance as when trained on additional nonsynthesized data.", "keyphrases": ["grammar correction", "noise", "back-translation"]} +{"id": "zarrella-marsh-2016-mitre", "title": "MITRE at SemEval-2016 Task 6: Transfer Learning for Stance Detection", "abstract": "We describe MITRE's submission to the SemEval-2016 Task 6, Detecting Stance in Tweets. This effort achieved the top score in Task A on supervised stance detection, producing an average F1 score of 67.8 when assessing whether a tweet author was in favor or against a topic. We employed a recurrent neural network initialized with features learned via distant supervision on two large unlabeled datasets. We trained embeddings of words and phrases with the word2vec skip-gram method, then used those features to learn sentence representations via a hashtag prediction auxiliary task. These sentence vectors were then fine-tuned for stance detection on several hundred labeled examples. The result was a high performing system that used transfer learning to maximize the value of the available training data.", "keyphrases": ["semeval-2016 task", "transfer learning", "stance detection", "sentence representation", "auxiliary task"]} +{"id": "iyyer-etal-2014-political", "title": "Political Ideology Detection Using Recursive Neural Networks", "abstract": "An individual\u2019s words often reveal their political ideology. Existing automated techniques to identify ideology from text focus on bags of words or wordlists, ignoring syntax. Taking inspiration from recent work in sentiment analysis that successfully models the compositional aspect of language, we apply a recursive neural network (RNN) framework to the task of identifying the political position evinced by a sentence. To show the importance of modeling subsentential elements, we crowdsource political annotations at a phrase and sentence level. Our model outperforms existing models on our newly annotated dataset and an existing dataset.", "keyphrases": ["recursive neural network", "political ideology detection", "text classification task"]} +{"id": "agirre-etal-2014-random", "title": "Random Walks for Knowledge-Based Word Sense Disambiguation", "abstract": "Word Sense Disambiguation (WSD) systems automatically choose the intended meaning of a word in context. In this article we present a WSD algorithm based on random walks over large Lexical Knowledge Bases (LKB). We show that our algorithm performs better than other graph-based methods when run on a graph built from WordNet and eXtended WordNet. Our algorithm and LKB combination compares favorably to other knowledge-based approaches in the literature that use similar knowledge on a variety of English data sets and a data set on Spanish. We include a detailed analysis of the factors that affect the algorithm. The algorithm and the LKBs used are publicly available, and the results easily reproducible.", "keyphrases": ["word sense disambiguation", "graph-based algorithm", "random walk"]} +{"id": "habernal-gurevych-2016-makes", "title": "What makes a convincing argument? Empirical analysis and detecting attributes of convincingness in Web argumentation", "abstract": "This article tackles a new challenging task in computational argumentation. Given a pair of two arguments to a certain controversial topic, we aim to directly assess qualitative properties of the arguments in order to explain why one argument is more convincing than the other one. We approach this task in a fully empirical manner by annotating 26k explanations written in natural language. These explanations describe convincingness of arguments in the given argument pair, such as their strengths or flaws. We create a new crowd-sourced corpus containing 9,111 argument pairs, multi-labeled with 17 classes, which was cleaned and curated by employing several strict quality measures. We propose two tasks on this data set, namely (1) predicting the full label distribution and (2) classifying types of flaws in less convincing arguments. Our experiments with feature-rich SVM learners and Bidirectional LSTM neural networks with convolution and attention mechanism reveal that such a novel fine-grained analysis of Web argument convincingness is a very challenging task. We release the new UKPConvArg2 corpus and software under permissive licenses to the research community.", "keyphrases": ["convincingness", "empirical manner", "argument pair"]} +{"id": "sun-korhonen-2009-improving", "title": "Improving Verb Clustering with Automatically Acquired Selectional Preferences", "abstract": "In previous research in automatic verb classification, syntactic features have proved the most useful features, although manual classifications rely heavily on semantic features. We show, in contrast with previous work, that considerable additional improvement can be obtained by using semantic features in automatic classification: verb selectional preferences acquired from corpus data using a fully unsupervised method. We report these promising results using a new framework for verb clustering which incorporates a recent subcategorization acquisition system, rich syntactic-semantic feature sets, and a variation of spectral clustering which performs particularly well in high dimensional feature space.", "keyphrases": ["verb clustering", "selectional preference", "feature space"]} +{"id": "peled-reichart-2017-sarcasm", "title": "Sarcasm SIGN: Interpreting Sarcasm with Sentiment Based Monolingual Machine Translation", "abstract": "Sarcasm is a form of speech in which speakers say the opposite of what they truly mean in order to convey a strong sentiment. In other words, \u201cSarcasm is the giant chasm between what I say, and the person who doesn't get it.\u201d. In this paper we present the novel task of sarcasm interpretation, defined as the generation of a non-sarcastic utterance conveying the same message as the original sarcastic one. We introduce a novel dataset of 3000 sarcastic tweets, each interpreted by five human judges. Addressing the task as monolingual machine translation (MT), we experiment with MT algorithms and evaluation measures. We then present SIGN: an MT based sarcasm interpretation algorithm that targets sentiment words, a defining element of textual sarcasm. We show that while the scores of n-gram based automatic measures are similar for all interpretation models, SIGN's interpretations are scored higher by humans for adequacy and sentiment polarity. We conclude with a discussion on future research directions for our new task.", "keyphrases": ["machine translation", "non-sarcastic utterance", "sarcasm"]} +{"id": "callison-burch-etal-2007-meta", "title": "(Meta-) Evaluation of Machine Translation", "abstract": "This paper evaluates the translation quality of machine translation systems for 8 language pairs: translating French, German, Spanish, and Czech to English and back. We carried out an extensive human evaluation which allowed us not only to rank the different MT systems, but also to perform higher-level analysis of the evaluation process. We measured timing and intra- and inter-annotator agreement for three types of subjective evaluation. We measured the correlation of automatic evaluation metrics with human judgments. This meta-evaluation reveals surprising facts about the most commonly used methodologies.", "keyphrases": ["machine translation", "inter-annotator agreement", "judgment", "gim\u00e9nez", "fluency"]} +{"id": "muzny-zettlemoyer-2013-automatic", "title": "Automatic Idiom Identification in Wiktionary", "abstract": "Online resources, such as Wiktionary, provide an accurate but incomplete source of idiomatic phrases. In this paper, we study the problem of automatically identifying idiomatic dictionary entries with such resources. We train an idiom classifier on a newly gathered corpus of over 60,000 Wiktionary multi-word definitions, incorporating features that model whether phrase meanings are constructed compositionally. Experiments demonstrate that the learned classifier can provide high quality idiom labels, more than doubling the number of idiomatic entries from 7,764 to 18,155 at precision levels of over 65%. These gains also translate to idiom detection in sentences, by simply using known word sense disambiguation algorithms to match phrases to their definitions. In a set of Wiktionary definition example sentences, the more complete set of idioms boosts detection recall by over 28 percentage points.", "keyphrases": ["wiktionary", "precision level", "graph-based feature", "wordnet"]} +{"id": "chisholm-hachey-2015-entity", "title": "Entity Disambiguation with Web Links", "abstract": "Entity disambiguation with Wikipedia relies on structured information from redirect pages, article text, inter-article links, and categories. We explore whether web links can replace a curated encyclopaedia, obtaining entity prior, name, context, and coherence models from a corpus of web pages with links to Wikipedia. Experiments compare web link models to Wikipedia models on well-known conll and tac data sets. Results show that using 34 million web links approaches Wikipedia performance. Combining web link and Wikipedia models produces the best-known disambiguation accuracy of 88.7 on standard newswire test data.", "keyphrases": ["web link", "wikipedia", "entity disambiguation"]} +{"id": "breit-etal-2021-wic", "title": "WiC-TSV: An Evaluation Benchmark for Target Sense Verification of Words in Context", "abstract": "We present WiC-TSV, a new multi-domain evaluation benchmark for Word Sense Disambiguation. More specifically, we introduce a framework for Target Sense Verification of Words in Context which grounds its uniqueness in the formulation as binary classification task thus being independent of external sense inventories, and the coverage of various domains. This makes the dataset highly flexible for the evaluation of a diverse set of models and systems in and across domains. WiC-TSV provides three different evaluation settings, depending on the input signals provided to the model. We set baseline performance on the dataset using state-of-the-art language models. Experimental results show that even though these models can perform decently on the task, there remains a gap between machine and human performance, especially in out-of-domain settings. WiC-TSV data is available at .", "keyphrases": ["evaluation benchmark", "target sense verification", "wic-tsv"]} +{"id": "fan-etal-2018-multi", "title": "Multi-grained Attention Network for Aspect-Level Sentiment Classification", "abstract": "We propose a novel multi-grained attention network (MGAN) model for aspect level sentiment classification. Existing approaches mostly adopt coarse-grained attention mechanism, which may bring information loss if the aspect has multiple words or larger context. We propose a fine-grained attention mechanism, which can capture the word-level interaction between aspect and context. And then we leverage the fine-grained and coarse-grained attention mechanisms to compose the MGAN framework. Moreover, unlike previous works which train each aspect with its context separately, we design an aspect alignment loss to depict the aspect-level interactions among the aspects that have the same context. We evaluate the proposed approach on three datasets: laptop and restaurant are from SemEval 2014, and the last one is a twitter dataset. Experimental results show that the multi-grained attention network consistently outperforms the state-of-the-art methods on all three datasets. We also conduct experiments to evaluate the effectiveness of aspect alignment loss, which indicates the aspect-level interactions can bring extra useful information and further improve the performance.", "keyphrases": ["sentiment classification", "aspect-level interaction", "multi-grained attention network"]} +{"id": "ritter-etal-2011-named", "title": "Named Entity Recognition in Tweets: An Experimental Study", "abstract": "People tweet more than 100 Million times daily, yielding a noisy, informal, but sometimes informative corpus of 140-character messages that mirrors the zeitgeist in an unprecedented manner. The performance of standard NLP tools is severely degraded on tweets. This paper addresses this issue by re-building the NLP pipeline beginning with part-of-speech tagging, through chunking, to named-entity recognition. Our novel T-ner system doubles F1 score compared with the Stanford NER system. T-ner leverages the redundancy inherent in tweets to achieve this performance, using LabeledLDA to exploit Freebase dictionaries as a source of distant supervision. LabeledLDA outperforms co-training, increasing F1 by 25% over ten common entity types. \n \nOur NLP tools are available at: http://github.com/aritter/twitter_nlp", "keyphrases": ["entity recognition", "tweets", "standard pos tagger", "noisy text", "dependency parser"]} +{"id": "gkotsis-etal-2016-dont", "title": "Don't Let Notes Be Misunderstood: A Negation Detection Method for Assessing Risk of Suicide in Mental Health Records", "abstract": "Mental Health Records (MHRs) contain freetext documentation about patients\u2019 suicide and suicidality. In this paper, we address the problem of determining whether grammatic variants (inflections) of the word \u201csuicide\u201d are affirmed or negated. To achieve this, we populate and annotate a dataset with over 6,000 sentences originating from a large repository of MHRs. The resulting dataset has high InterAnnotator Agreement ( 0.93). Furthermore, we develop and propose a negation detection method that leverages syntactic features of text 1 . Using parse trees, we build a set of basic rules that rely on minimum domain knowledge and render the problem as binary classification (affirmed vs. negated). Since the overall goal is to identify patients who are expected to be at high risk of suicide, we focus on the evaluation of positive (affirmed) cases as determined by our classifier. Our negation detection approach yields a recall (sensitivity) value of 94.6% for the positive cases and an overall accuracy value of 91.9%. We believe that our approach can be integrated with other clinical Natural Language Processing tools in order to further advance information extraction capabilities.", "keyphrases": ["negation detection method", "suicide", "mental health records", "parse tree"]} +{"id": "niemann-gurevych-2011-peoples", "title": "The People's Web meets Linguistic Knowledge: Automatic Sense Alignment of Wikipedia and WordNet", "abstract": "We propose a method to automatically align WordNet synsets and Wikipedia articles to obtain a sense inventory of higher coverage and quality. For each WordNet synset, we first extract a set of Wikipedia articles as alignment candidates; in a second step, we determine which article (if any) is a valid alignment, i.e. is about the same sense or concept. In this paper, we go significantly beyond state-of-the-art word overlap approaches, and apply a threshold-based Personalized PageRank method for the disambiguation step. We show that WordNet synsets can be aligned to Wikipedia articles with a performance of up to 0.78 F1-Measure based on a comprehensive, well-balanced reference dataset consisting of 1,815 manually annotated sense alignment candidates. The fully-aligned resource as well as the reference dataset is publicly available.", "keyphrases": ["wordnet", "alignment candidate", "personalized pagerank method"]} +{"id": "kominek-black-2006-learning", "title": "Learning Pronunciation Dictionaries: Language Complexity and Word Selection Strategies", "abstract": "The speed with which pronunciation dictionaries can be bootstrapped depends on the efficiency of learning algorithms and on the ordering of words presented to the user. This paper presents an active-learning word selection strategy that is mindful of human limitations. Learning rates approach that of an oracle system that knows the final LTS rule set.", "keyphrases": ["pronunciation dictionary", "word selection strategy", "letter-to-sound rule"]} +{"id": "ionescu-etal-2016-string", "title": "String Kernels for Native Language Identification: Insights from Behind the Curtains", "abstract": "The most common approach in text mining classification tasks is to rely on features like words, part-of-speech tags, stems, or some other high-level linguistic features. Recently, an approach that uses only character p-grams as features has been proposed for the task of native language identification (NLI). The approach obtained state-of-the-art results by combining several string kernels using multiple kernel learning. Despite the fact that the approach based on string kernels performs so well, several questions about this method remain unanswered. First, it is not clear why such a simple approach can compete with far more complex approaches that take words, lemmas, syntactic information, or even semantics into account. Second, although the approach is designed to be language independent, all experiments to date have been on English. This work is an extensive study that aims to systematically present the string kernel approach and to clarify the open questions mentioned above.A broad set of native language identification experiments were conducted to compare the string kernels approach with other state-of-the-art methods. The empirical results obtained in all of the experiments conducted in this work indicate that the proposed approach achieves state-of-the-art performance in NLI, reaching an accuracy that is 1.7% above the top scoring system of the 2013 NLI Shared Task. Furthermore, the results obtained on both the Arabic and the Norwegian corpora demonstrate that the proposed approach is language independent. In the Arabic native language identification task, string kernels show an increase of more than 17% over the best accuracy reported so far. The results of string kernels on Norwegian native language identification are also significantly better than the state-of-the-art approach. In addition, in a cross-corpus experiment, the proposed approach shows that it can also be topic independent, improving the state-of-the-art system by 32.3%.To gain additional insights about the string kernels approach, the features selected by the classifier as being more discriminating are analyzed in this work. The analysis also offers information about localized language transfer effects, since the features used by the proposed model are p-grams of various lengths. The features captured by the model typically include stems, function words, and word prefixes and suffixes, which have the potential to generalize over purely word-based features. By analyzing the discriminating features, this article offers insights into two kinds of language transfer effects, namely, word choice (lexical transfer) and morphological differences. The goal of the current study is to give a full view of the string kernels approach and shed some light on why this approach works so well.", "keyphrases": ["native language identification", "nli shared task", "string kernel", "text analysis task"]} +{"id": "wallace-etal-2014-humans", "title": "Humans Require Context to Infer Ironic Intent (so Computers Probably do, too)", "abstract": "Automatically detecting verbal irony (roughly, sarcasm) is a challenging task because ironists say something other than \u2010 and often opposite to \u2010 what they actually mean. Discerning ironic intent exclusively from the words and syntax comprising texts (e.g., tweets, forum posts) is therefore not always possible: additional contextual information about the speaker and/or the topic at hand is often necessary. We introduce a new corpus that provides empirical evidence for this claim. We show that annotators frequently require context to make judgements concerning ironic intent, and that machine learning approaches tend to misclassify those same comments for which annotators required additional context.", "keyphrases": ["intent", "irony", "contextual information", "annotator", "sarcastic intent"]} +{"id": "bohnet-etal-2013-joint", "title": "Joint Morphological and Syntactic Analysis for Richly Inflected Languages", "abstract": "Joint morphological and syntactic analysis has been proposed as a way of improving parsing accuracy for richly inflected languages. Starting from a transition-based model for joint part-of-speech tagging and dependency parsing, we explore different ways of integrating morphological features into the model. We also investigate the use of rule-based morphological analyzers to provide hard or soft lexical constraints and the use of word clusters to tackle the sparsity of lexical features. Evaluation on five morphologically rich languages (Czech, Finnish, German, Hungarian, and Russian) shows consistent improvements in both morphological and syntactic accuracy for joint prediction over a pipeline model, with further improvements thanks to lexical constraints and word clusters. The final results improve the state of the art in dependency parsing for all languages.", "keyphrases": ["syntactic analysis", "tagging", "morphological analyzer", "czech", "transition-based parser"]} +{"id": "morris-etal-2020-textattack", "title": "TextAttack: A Framework for Adversarial Attacks, Data Augmentation, and Adversarial Training in NLP", "abstract": "While there has been substantial research using adversarial attacks to analyze NLP models, each attack is implemented in its own code repository. It remains challenging to develop NLP attacks and utilize them to improve model performance. This paper introduces TextAttack, a Python framework for adversarial attacks, data augmentation, and adversarial training in NLP. TextAttack builds attacks from four components: a goal function, a set of constraints, a transformation, and a search method. TextAttack's modular design enables researchers to easily construct attacks from combinations of novel and existing components. TextAttack provides implementations of 16 adversarial attacks from the literature and supports a variety of models and datasets, including BERT and other transformers, and all GLUE tasks. TextAttack also includes data augmentation and adversarial training modules for using components of adversarial attacks to improve model accuracy and robustness. TextAttack is democratizing NLP: anyone can try data augmentation and adversarial training on any model or dataset, with just a few lines of code. Code and tutorials are available at .", "keyphrases": ["data augmentation", "adversarial training", "nlp model", "textattack"]} +{"id": "condoravdi-etal-2003-entailment", "title": "Entailment, intensionality and text understanding", "abstract": "We argue that the detection of entailment and contradiction relations between texts is a minimal metric for the evaluation of text understanding systems. Intensionality, which is widespread in natural language, raises a number of detection issues that cannot be brushed aside. We describe a contexted clausal representation, derived from approaches in formal semantics, that permits an extended range of intensional entailments and contradictions to be tractably detected.", "keyphrases": ["intensionality", "text understanding", "contradiction", "entailment", "nli"]} +{"id": "kummerfeld-etal-2019-large", "title": "A Large-Scale Corpus for Conversation Disentanglement", "abstract": "Disentangling conversations mixed together in a single stream of messages is a difficult task, made harder by the lack of large manually annotated datasets. We created a new dataset of 77,563 messages manually annotated with reply-structure graphs that both disentangle conversations and define internal conversation structure. Our data is 16 times larger than all previously released datasets combined, the first to include adjudication of annotation disagreements, and the first to include context. We use our data to re-examine prior work, in particular, finding that 89% of conversations in a widely used dialogue corpus are either missing messages or contain extra messages. Our manually-annotated data presents an opportunity to develop robust data-driven methods for conversation disentanglement, which will help advance dialogue research.", "keyphrases": ["conversation disentanglement", "message", "large-scale dataset"]} +{"id": "rossiello-etal-2017-centroid", "title": "Centroid-based Text Summarization through Compositionality of Word Embeddings", "abstract": "The textual similarity is a crucial aspect for many extractive text summarization methods. A bag-of-words representation does not allow to grasp the semantic relationships between concepts when comparing strongly related sentences with no words in common. To overcome this issue, in this paper we propose a centroid-based method for text summarization that exploits the compositional capabilities of word embeddings. The evaluations on multi-document and multilingual datasets prove the effectiveness of the continuous vector representation of words compared to the bag-of-words model. Despite its simplicity, our method achieves good performance even in comparison to more complex deep learning models. Our method is unsupervised and it can be adopted in other summarization tasks.", "keyphrases": ["text summarization", "word embedding", "semantic relationship", "centroid-based method"]} +{"id": "yang-etal-2021-pcfgs", "title": "PCFGs Can Do Better: Inducing Probabilistic Context-Free Grammars with Many Symbols", "abstract": "Probabilistic context-free grammars (PCFGs) with neural parameterization have been shown to be effective in unsupervised phrase-structure grammar induction. However, due to the cubic computational complexity of PCFG representation and parsing, previous approaches cannot scale up to a relatively large number of (nonterminal and preterminal) symbols. In this work, we present a new parameterization form of PCFGs based on tensor decomposition, which has at most quadratic computational complexity in the symbol number and therefore allows us to use a much larger number of symbols. We further use neural parameterization for the new form to improve unsupervised parsing performance. We evaluate our model across ten languages and empirically demonstrate the effectiveness of using more symbols.", "keyphrases": ["context-free grammar", "complexity", "pcfg"]} +{"id": "delpech-etal-2012-extraction", "title": "Extraction of Domain-Specific Bilingual Lexicon from Comparable Corpora: Compositional Translation and Ranking", "abstract": "This paper proposes a method for extracting translations of morphologically constructed terms from comparable corpora. The method is based on compositional translation and exploits translation equivalences at the morpheme-level, which allows for the generation of \"fertile\" translations (translation pairs in which the target term has more words than the source term). Ranking methods relying on corpus-based and translation-based features are used to select the best candidate translation. We obtain an average precision of 91% on the Top1 candidate translation. The method was tested on two language pairs (English-French and English-German) and with a small specialized comparable corpora (400k words per language).", "keyphrases": ["comparable corpora", "compositional translation", "morpheme-level", "target term"]} +{"id": "su-etal-2012-translation", "title": "Translation Model Adaptation for Statistical Machine Translation with Monolingual Topic Information", "abstract": "To adapt a translation model trained from the data in one domain to another, previous works paid more attention to the studies of parallel corpus while ignoring the in-domain monolingual corpora which can be obtained more easily. In this paper, we propose a novel approach for translation model adaptation by utilizing in-domain monolingual topic information instead of the in-domain bilingual corpora, which incorporates the topic information into translation probability estimation. Our method establishes the relationship between the out-of-domain bilingual corpus and the in-domain monolingual corpora via topic mapping and phrase-topic distribution probability estimation from in-domain monolingual corpora. Experimental result on the NIST Chinese-English translation task shows that our approach significantly outperforms the baseline system.", "keyphrases": ["topic information", "in-domain monolingual corpora", "distribution probability estimation", "translation model adaptation"]} +{"id": "nikolaus-fourtassi-2021-evaluating", "title": "Evaluating the Acquisition of Semantic Knowledge from Cross-situational Learning in Artificial Neural Networks", "abstract": "When learning their native language, children acquire the meanings of words and sentences from highly ambiguous input without much explicit supervision. One possible learning mechanism is cross-situational learning, which has been successfully tested in laboratory experiments with children. Here we use Artificial Neural Networks to test if this mechanism scales up to more natural language and visual scenes using a large dataset of crowd-sourced images with corresponding descriptions. We evaluate learning using a series of tasks inspired by methods commonly used in laboratory studies of language acquisition. We show that the model acquires rich semantic knowledge both at the word- and sentence-level, mirroring the patterns and trajectory of learning in early childhood. Our work highlights the usefulness of low-level co-occurrence statistics across modalities in facilitating the early acquisition of higher-level semantic knowledge.", "keyphrases": ["semantic knowledge", "cross-situational learning", "artificial neural networks"]} +{"id": "ghader-monz-2017-attention", "title": "What does Attention in Neural Machine Translation Pay Attention to?", "abstract": "Attention in neural machine translation provides the possibility to encode relevant parts of the source sentence at each translation step. As a result, attention is considered to be an alignment model as well. However, there is no work that specifically studies attention and provides analysis of what is being learned by attention models. Thus, the question still remains that how attention is similar or different from the traditional alignment. In this paper, we provide detailed analysis of attention and compare it to traditional alignment. We answer the question of whether attention is only capable of modelling translational equivalent or it captures more information. We show that attention is different from alignment in some cases and is capturing useful information other than alignments.", "keyphrases": ["neural machine translation", "source sentence", "attention weight"]} +{"id": "wright-etal-2017-vectors", "title": "Vectors for Counterspeech on Twitter", "abstract": "A study of conversations on Twitter found that some arguments between strangers led to favorable change in discourse and even in attitudes. The authors propose that such exchanges can be usefully distinguished according to whether individuals or groups take part on each side, since the opportunity for a constructive exchange of views seems to vary accordingly.", "keyphrases": ["counterspeech", "twitter", "stranger", "attitude"]} +{"id": "surdeanu-manning-2010-ensemble", "title": "Ensemble Models for Dependency Parsing: Cheap and Good?", "abstract": "Previous work on dependency parsing used various kinds of combination models but a systematic analysis and comparison of these approaches is lacking. In this paper we implemented such a study for English dependency parsing and find several non-obvious facts: (a) the diversity of base parsers is more important than complex models for learning (e.g., stacking, supervised meta-classification), (b) approximate, linear-time re-parsing algorithms guarantee well-formed dependency trees without significant performance loss, and (c) the simplest scoring model for re-parsing (unweighted voting) performs essentially as well as other more complex models. This study proves that fast and accurate ensemble parsers can be built with minimal effort.", "keyphrases": ["dependency parsing", "voting", "linear combination"]} +{"id": "alsentzer-etal-2019-publicly", "title": "Publicly Available Clinical BERT Embeddings", "abstract": "Contextual word embedding models such as ELMo and BERT have dramatically improved performance for many natural language processing (NLP) tasks in recent months. However, these models have been minimally explored on specialty corpora, such as clinical text; moreover, in the clinical domain, no publicly-available pre-trained BERT models yet exist. In this work, we address this need by exploring and releasing BERT models for clinical text: one for generic clinical text and another for discharge summaries specifically. We demonstrate that using a domain-specific model yields performance improvements on 3/5 clinical NLP tasks, establishing a new state-of-the-art on the MedNLI dataset. We find that these domain-specific models are not as performant on 2 clinical de-identification tasks, and argue that this is a natural consequence of the differences between de-identified source text and synthetically non de-identified task text.", "keyphrases": ["clinicalbert", "biomedical domain", "pre-trained model"]} +{"id": "deutsch-roth-2019-summary", "title": "Summary Cloze: A New Task for Content Selection in Topic-Focused Summarization", "abstract": "A key challenge in topic-focused summarization is determining what information should be included in the summary, a problem known as content selection. In this work, we propose a new method for studying content selection in topic-focused summarization called the summary cloze task. The goal of the summary cloze task is to generate the next sentence of a summary conditioned on the beginning of the summary, a topic, and a reference document(s). The main challenge is deciding what information in the references is relevant to the topic and partial summary and should be included in the summary. Although the cloze task does not address all aspects of the traditional summarization problem, the more narrow scope of the task allows us to collect a large-scale datset of nearly 500k summary cloze instances from Wikipedia. We report experimental results on this new dataset using various extractive models and a two-step abstractive model that first extractively selects a small number of sentences and then abstractively summarizes them. Our results show that the topic and partial summary help the models identify relevant content, but the task remains a significant challenge.", "keyphrases": ["content selection", "summarization", "cloze task"]} +{"id": "chiang-scheffler-2008-flexible", "title": "Flexible Composition and Delayed Tree-Locality", "abstract": "Flexible composition is an extension of TAG that has been used in a variety of TAG-analyses. In this paper, we present a dedicated study of the formal and linguistic properties of TAGs with flexible composition (TAG-FC). We start by presenting a survey of existing applications of flexible composition. In the main part of the paper, we discuss a formal definition of TAGFCs and give a proof of equivalence of TAG-FC to tree-local MCTAG, via a formalism called delayed tree-local MCTAG. We then proceed to argue that delayed treelocality is more intuitive for the analysis of many cases where flexible composition has been employed.", "keyphrases": ["flexible composition", "derivation", "standard tag"]} +{"id": "davidov-rappoport-2010-extraction", "title": "Extraction and Approximation of Numerical Attributes from the Web", "abstract": "We present a novel framework for automated extraction and approximation of numerical object attributes such as height and weight from the Web. Given an object-attribute pair, we discover and analyze attribute information for a set of comparable objects in order to infer the desired value. This allows us to approximate the desired numerical values even when no exact values can be found in the text. \n \nOur framework makes use of relation defining patterns and WordNet similarity information. First, we obtain from the Web and WordNet a list of terms similar to the given object. Then we retrieve attribute values for each term in this list, and information that allows us to compare different objects in the list and to infer the attribute value range. Finally, we combine the retrieved data for all terms from the list to select or approximate the requested value. \n \nWe evaluate our method using automated question answering, WordNet enrichment, and comparison with answers given in Wikipedia and by leading search engines. In all of these, our framework provides a significant improvement.", "keyphrases": ["numerical attribute", "web", "object", "height", "value"]} +{"id": "wang-etal-2017-learning", "title": "Learning to Rank Semantic Coherence for Topic Segmentation", "abstract": "Topic segmentation plays an important role for discourse parsing and information retrieval. Due to the absence of training data, previous work mainly adopts unsupervised methods to rank semantic coherence between paragraphs for topic segmentation. In this paper, we present an intuitive and simple idea to automatically create a \u201cquasi\u201d training dataset, which includes a large amount of text pairs from the same or different documents with different semantic coherence. With the training corpus, we design a symmetric CNN neural network to model text pairs and rank the semantic coherence within the learning to rank framework. Experiments show that our algorithm is able to achieve competitive performance over strong baselines on several real-world datasets.", "keyphrases": ["topic segmentation", "text pair", "cnn", "coherence score"]} +{"id": "lee-etal-2012-joint", "title": "Joint Entity and Event Coreference Resolution across Documents", "abstract": "We introduce a novel coreference resolution system that models entities and events jointly. Our iterative method cautiously constructs clusters of entity and event mentions using linear regression to model cluster merge operations. As clusters are built, information flows between entity and event clusters through features that model semantic role dependencies. Our system handles nominal and verbal events as well as entities, and our joint formulation allows information from event coreference to help entity coreference, and vice versa. In a cross-document domain with comparable documents, joint coreference resolution performs significantly better (over 3 CoNLL F1 points) than two strong baselines that resolve entities and events separately.", "keyphrases": ["event coreference resolution", "cluster", "joint entity", "ecb corpus", "cross-document coreference"]} +{"id": "li-etal-2008-pnr2", "title": "PNR2: Ranking Sentences with Positive and Negative Reinforcement for Query-Oriented Update Summarization", "abstract": "Query-oriented update summarization is an emerging summarization task very recently. It brings new challenges to the sentence ranking algorithms that require not only to locate the important and query-relevant information, but also to capture the new information when document collections evolve. In this paper, we propose a novel graph based sentence ranking algorithm, namely PNR2, for update summarization. Inspired by the intuition that \"a sentence receives a positive influence from the sentences that correlate to it in the same collection, whereas a sentence receives a negative influence from the sentences that correlates to it in the different (perhaps previously read) collection\", PNR2 models both the positive and the negative mutual reinforcement in the ranking process. Automatic evaluation on the DUC 2007 data set pilot task demonstrates the effectiveness of the algorithm.", "keyphrases": ["reinforcement", "update summarization", "ranking process"]} +{"id": "anthonio-etal-2020-wikihowtoimprove", "title": "wikiHowToImprove: A Resource and Analyses on Edits in Instructional Texts", "abstract": "Instructional texts, such as articles in wikiHow, describe the actions necessary to accomplish a certain goal. In wikiHow and other resources, such instructions are subject to revision edits on a regular basis. Do these edits improve instructions only in terms of style and correctness, or do they provide clarifications necessary to follow the instructions and to accomplish the goal? We describe a resource and first studies towards answering this question. Specifically, we create wikiHowToImprove, a collection of revision histories for about 2.7 million sentences from about 246000 wikiHow articles. We describe human annotation studies on categorizing a subset of sentence-level edits and provide baseline models for the task of automatically distinguishing \u201colder\u201d from \u201cnewer\u201d revisions of a sentence.", "keyphrases": ["edit", "instructional text", "wikihowtoimprove"]} +{"id": "budanitsky-hirst-2006-evaluating", "title": "Evaluating WordNet-based Measures of Lexical Semantic Relatedness", "abstract": "The quantification of lexical semantic relatedness has many applications in NLP, and many different measures have been proposed. We evaluate five of these measures, all of which use WordNet as their central resource, by comparing their performance in detecting and correcting real-word spelling errors. An information-content-based measure proposed by Jiang and Conrath is found superior to those proposed by Hirst and St-Onge, Leacock and Chodorow, Lin, and Resnik. In addition, we explain why distributional similarity is not an adequate proxy for lexical semantic relatedness.", "keyphrases": ["lexical semantic relatedness", "central resource", "overview", "distance", "knowledge-based measure"]} +{"id": "xiong-etal-2017-deeppath", "title": "DeepPath: A Reinforcement Learning Method for Knowledge Graph Reasoning", "abstract": "We study the problem of learning to reason in large scale knowledge graphs (KGs). More specifically, we describe a novel reinforcement learning framework for learning multi-hop relational paths: we use a policy-based agent with continuous states based on knowledge graph embeddings, which reasons in a KG vector-space by sampling the most promising relation to extend its path. In contrast to prior work, our approach includes a reward function that takes the accuracy, diversity, and efficiency into consideration. Experimentally, we show that our proposed method outperforms a path-ranking based algorithm and knowledge graph embedding methods on Freebase and Never-Ending Language Learning datasets.", "keyphrases": ["reinforcement learning method", "knowledge graph reasoning", "deeppath"]} +{"id": "falke-gurevych-2017-bringing", "title": "Bringing Structure into Summaries: Crowdsourcing a Benchmark Corpus of Concept Maps", "abstract": "Concept maps can be used to concisely represent important information and bring structure into large document collections. Therefore, we study a variant of multi-document summarization that produces summaries in the form of concept maps. However, suitable evaluation datasets for this task are currently missing. To close this gap, we present a newly created corpus of concept maps that summarize heterogeneous collections of web documents on educational topics. It was created using a novel crowdsourcing approach that allows us to efficiently determine important elements in large document collections. We release the corpus along with a baseline system and proposed evaluation protocol to enable further research on this variant of summarization.", "keyphrases": ["concept map", "large document collection", "summarization"]} +{"id": "shutova-teufel-2010-metaphor", "title": "Metaphor Corpus Annotated for Source - Target Domain Mappings", "abstract": "Besides making our thoughts more vivid and filling our communication with richer imagery, metaphor also plays an important structural role in our cognition. Although there is a consensus in the linguistics and NLP research communities that the phenomenon of metaphor is not restricted to similarity-based extensions of meanings of isolated words, but rather involves reconceptualization of a whole area of experience (target domain) in terms of another (source domain), there still has been no proposal for a comprehensive procedure for annotation of cross-domain mappings. However, a corpus annotated for conceptual mappings could provide a new starting point for both linguistic and cognitive experiments. The annotation scheme we present in this paper is a step towards filling this gap. We test our procedure in an experimental setting involving multiple annotators and estimate their agreement on the task. The associated corpus annotated for source \u2015 target domain mappings will be publicly available.", "keyphrases": ["annotator", "target domain", "metaphor", "language understanding"]} +{"id": "vu-etal-2014-acquiring", "title": "Acquiring a Dictionary of Emotion-Provoking Events", "abstract": "This paper is concerned with the discovery and aggregation of events that provoke a particular emotion in the person who experiences them, or emotion-provoking events. We first describe the creation of a small manually-constructed dictionary of events through a survey of 30 subjects. Next, we describe first attempts at automatically acquiring and aggregating these events from web data, with a baseline from previous work and some simple extensions using seed expansion and clustering. Finally, we propose several evaluation measures for evaluating the automatically acquired events, and perform an evaluation of the effectiveness of automatic event extraction.", "keyphrases": ["dictionary", "emotion-provoking event", "psychologist"]} +{"id": "kao-jurafsky-2015-computational", "title": "A computational analysis of poetic style: Imagism and its influence on modern professional and amateur poetry", "abstract": "How do standards of poetic beauty change as a function of time and expertise? Here we use computational methods to compare the stylistic features of 359 English poems written by 19th century professional poets, Imagist poets, contemporary professional poets, and contemporary amateur poets. Building upon techniques designed to analyze style and sentiment in texts, we examine elements of poetic craft such as imagery, sound devices, emotive language, and diction. We find that contemporary professional poets use significantly more concrete words than 19th century poets, fewer emotional words, and more complex sound devices. These changes are consistent with the tenets of Imagism, an early 20thcentury literary movement. Further analyses show that contemporary amateur poems resemble 19th century professional poems more than contemporary professional poems on several dimensions. The stylistic similarities between contemporary amateur poems and 19th century professional poems suggest that elite standards of poetic beauty in the past \u201ctrickled down\u201d to influence amateur works in the present. Our results highlight the influence of Imagism on the modern aesthetic and reveal the dynamics between \u201chigh\u201d and \u201clow\u201d art. We suggest that computational linguistics may shed light on the forces and trends that shape poetic style.", "keyphrases": ["poetic style", "imagism", "stylistic feature"]} +{"id": "jiang-etal-2009-mining", "title": "Mining Bilingual Data from the Web with Adaptively Learnt Patterns", "abstract": "Mining bilingual data (including bilingual sentences and terms) from the Web can benefit many NLP applications, such as machine translation and cross language information retrieval. In this paper, based on the observation that bilingual data in many web pages appear collectively following similar patterns, an adaptive pattern-based bilingual data mining method is proposed. Specifically, given a web page, the method contains four steps: 1) preprocessing: parse the web page into a DOM tree and segment the inner text of each node into snippets; 2) seed mining: identify potential translation pairs (seeds) using a word based alignment model which takes both translation and transliteration into consideration; 3) pattern learning: learn generalized patterns with the identified seeds; 4) pattern based mining: extract all bilingual data in the page using the learned patterns. Our experiments on Chinese web pages produced more than 7.5 million pairs of bilingual sentences and more than 5 million pairs of bilingual terms, both with over 80% accuracy.", "keyphrases": ["bilingual data", "web", "potential translation pair", "adaptive pattern-based method"]} +{"id": "paperno-etal-2016-lambada", "title": "The LAMBADA dataset: Word prediction requiring a broad discourse context", "abstract": "We introduce LAMBADA, a dataset to evaluate the capabilities of computational models for text understanding by means of a word prediction task. LAMBADA is a collection of narrative passages sharing the characteristic that human subjects are able to guess their last word if they are exposed to the whole passage, but not if they only see the last sentence preceding the target word. To succeed on LAMBADA, computational models cannot simply rely on local context, but must be able to keep track of information in the broader discourse. We show that LAMBADA exemplifies a wide range of linguistic phenomena, and that none of several state-of-the-art language models reaches accuracy above 1% on this novel benchmark. We thus propose LAMBADA as a challenging test set, meant to encourage the development of new models capable of genuine understanding of broad context in natural language text.", "keyphrases": ["lambada dataset", "word prediction task", "broad context"]} +{"id": "shen-etal-2017-conditional", "title": "A Conditional Variational Framework for Dialog Generation", "abstract": "Deep latent variable models have been shown to facilitate the response generation for open-domain dialog systems. However, these latent variables are highly randomized, leading to uncontrollable generated responses. In this paper, we propose a framework allowing conditional response generation based on specific attributes. These attributes can be either manually assigned or automatically detected. Moreover, the dialog states for both speakers are modeled separately in order to reflect personal features. We validate this framework on two different scenarios, where the attribute refers to genericness and sentiment states respectively. The experiment result testified the potential of our model, where meaningful responses can be generated in accordance with the specified attributes.", "keyphrases": ["conditional variational framework", "latent variable", "response generation", "dialogue generation", "generative adversarial network"]} +{"id": "wang-etal-2018-watch", "title": "Watch, Listen, and Describe: Globally and Locally Aligned Cross-Modal Attentions for Video Captioning", "abstract": "A major challenge for video captioning is to combine audio and visual cues. Existing multi-modal fusion methods have shown encouraging results in video understanding. However, the temporal structures of multiple modalities at different granularities are rarely explored, and how to selectively fuse the multi-modal representations at different levels of details remains uncharted. In this paper, we propose a novel hierarchically aligned cross-modal attention (HACA) framework to learn and selectively fuse both global and local temporal dynamics of different modalities. Furthermore, for the first time, we validate the superior performance of the deep audio features on the video captioning task. Finally, our HACA model significantly outperforms the previous best systems and achieves new state-of-the-art results on the widely used MSR-VTT dataset.", "keyphrases": ["cross-modal attention", "video captioning", "local temporal dynamic"]} +{"id": "lapshinova-koltunski-etal-2018-parcorfull", "title": "ParCorFull: a Parallel Corpus Annotated with Full Coreference", "abstract": "ParCorFull is a parallel corpus annotated with full coreference chains that has been created to address an important problem that machine translation and other multilingual natural language processing (NLP) technologies face -- translation of coreference across languages. Our corpus contains parallel texts for the language pair English-German, two major European languages. Despite being typologically very close, these languages still have systemic differences in the realisation of coreference, and thus pose problems for multilingual coreference resolution and machine translation. Our parallel corpus covers the genres of planned speech (public lectures) and newswire. It is richly annotated for coreference in both languages, including annotation of both nominal coreference and reference to antecedents expressed as clauses, sentences and verb phrases. This resource supports research in the areas of natural language processing, contrastive linguistics and translation studies on the mechanisms involved in coreference translation in order to develop a better understanding of the phenomenon.", "keyphrases": ["parallel corpus", "coreference chain", "parcorfull", "pronoun", "incongruence"]} +{"id": "siddharthan-teufel-2007-whose", "title": "Whose Idea Was This, and Why Does it Matter? Attributing Scientific Work to Citations", "abstract": "Scientific papers revolve around citations, and for many discourse level tasks one needs to know whose work is being talked about at any point in the discourse. In this paper, we introduce the scientific attribution task, which links different linguistic expressions to citations. We discuss the suitability of different evaluation metrics and evaluate our classification approach to deciding attribution both intrinsically and in an extrinsic evaluation where information about scientific attribution is shown to improve performance on Argumentative Zoning, a rhetorical classification task.", "keyphrases": ["citation", "scientific attribution", "argumentative zoning"]} +{"id": "chen-etal-2015-event", "title": "Event Extraction via Dynamic Multi-Pooling Convolutional Neural Networks", "abstract": "Traditional approaches to the task of ACE event extraction primarily rely on elaborately designed features and complicated natural language processing (NLP) tools. These traditional approaches lack generalization, take a large amount of human effort and are prone to error propagation and data sparsity problems. This paper proposes a novel event-extraction method, which aims to automatically extract lexical-level and sentence-level features without using complicated NLP tools. We introduce a word-representation model to capture meaningful semantic regularities for words and adopt a framework based on a convolutional neural network (CNN) to capture sentence-level clues. However, CNN can only capture the most important information in a sentence and may miss valuable facts when considering multiple-event sentences. We propose a dynamic multi-pooling convolutional neural network (DMCNN), which uses a dynamic multi-pooling layer according to event triggers and arguments, to reserve more crucial information. The experimental results show that our approach significantly outperforms other state-of-the-art methods.", "keyphrases": ["convolutional neural networks", "event extraction", "significant progress", "trigger word", "feature representation"]} +{"id": "son-etal-2017-recognizing", "title": "Recognizing Counterfactual Thinking in Social Media Texts", "abstract": "Counterfactual statements, describing events that did not occur and their consequents, have been studied in areas including problem-solving, affect management, and behavior regulation. People with more counterfactual thinking tend to perceive life events as more personally meaningful. Nevertheless, counterfactuals have not been studied in computational linguistics. We create a counterfactual tweet dataset and explore approaches for detecting counterfactuals using rule-based and supervised statistical approaches. A combined rule-based and statistical approach yielded the best results (F1 = 0.77) outperforming either approach used alone.", "keyphrases": ["counterfactual thinking", "counterfactual tweet dataset", "social medium"]} +{"id": "munro-etal-2010-crowdsourcing", "title": "Crowdsourcing and language studies: the new generation of linguistic data", "abstract": "We present a compendium of recent and current projects that utilize crowdsourcing technologies for language studies, finding that the quality is comparable to controlled laboratory experiments, and in some cases superior. While crowdsourcing has primarily been used for annotation in recent language studies, the results here demonstrate that far richer data may be generated in a range of linguistic disciplines from semantics to psycholinguistics. For these, we report a number of successful methods for evaluating data quality in the absence of a 'correct' response for any given data point.", "keyphrases": ["language study", "crowdsourcing", "survey"]} +{"id": "maccartney-etal-2008-phrase", "title": "A Phrase-Based Alignment Model for Natural Language Inference", "abstract": "The alignment problem---establishing links between corresponding phrases in two related sentences---is as important in natural language inference (NLI) as it is in machine translation (MT). But the tools and techniques of MT alignment do not readily transfer to NLI, where one cannot assume semantic equivalence, and for which large volumes of bitext are lacking. We present a new NLI aligner, the MANLI system, designed to address these challenges. It uses a phrase-based alignment representation, exploits external lexical resources, and capitalizes on a new set of supervised training data. We compare the performance of MANLI to existing NLI and MT aligners on an NLI alignment task over the well-known Recognizing Textual Entailment data. We show that MANLI significantly outperforms existing aligners, achieving gains of 6.2% in F1 over a representative NLI aligner and 10.5% over GIZA++.", "keyphrases": ["aligner", "natural language inference", "giza++", "unit", "predicate"]} +{"id": "shang-etal-2015-neural", "title": "Neural Responding Machine for Short-Text Conversation", "abstract": "We propose Neural Responding Machine (NRM), a neural network-based response generator for Short-Text Conversation. NRM takes the general encoder-decoder framework: it formalizes the generation of response as a decoding process based on the latent representation of the input text, while both encoding and decoding are realized with recurrent neural networks (RNN). The NRM is trained with a large amount of one-round conversation data collected from a microblogging service. Empirical study shows that NRM can generate grammatically correct and content-wise appropriate responses to over 75% of the input text, outperforming state-of-the-arts in the same setting, including retrieval-based and SMT-based models.", "keyphrases": ["short-text conversation", "neural responding machine", "dialog system", "previous utterance", "smt phrase table"]} +{"id": "gerz-etal-2016-simverb", "title": "SimVerb-3500: A Large-Scale Evaluation Set of Verb Similarity", "abstract": "Verbs play a critical role in the meaning of sentences, but these ubiquitous words have received little attention in recent distributional semantics research. We introduce SimVerb-3500, an evaluation resource that provides human ratings for the similarity of 3,500 verb pairs. SimVerb-3500 covers all normed verb types from the USF free-association database, providing at least three examples for every VerbNet class. This broad coverage facilitates detailed analyses of how syntactic and semantic phenomena together influence human understanding of verb meaning. Further, with significantly larger development and test sets than existing benchmarks, SimVerb-3500 enables more robust evaluation of representation learning architectures and promotes the development of methods tailored to verbs. We hope that SimVerb-3500 will enable a richer understanding of the diversity and complexity of verb semantics and guide the development of systems that can effectively represent and interpret this meaning.", "keyphrases": ["human understanding", "verb semantic", "simverb-3500"]} +{"id": "albrecht-hwa-2007-regression", "title": "Regression for Sentence-Level MT Evaluation with Pseudo References", "abstract": "Many automatic evaluation metrics for machine translation (MT) rely on making comparisons to human translations, a resource that may not always be available. We present a method for developing sentence-level MT evaluation metrics that do not directly rely on human reference translations. Our metrics are developed using regression learning and are based on a set of weaker indicators of fluency and adequacy ( pseudo references). Experimental results suggest that they rival standard reference-based metrics in terms of correlations with human judgments on new test instances.", "keyphrases": ["human reference translation", "fluency", "regression", "state-of-the-art correlation"]} +{"id": "junczys-dowmunt-2018-dual", "title": "Dual Conditional Cross-Entropy Filtering of Noisy Parallel Corpora", "abstract": "In this work we introduce dual conditional cross-entropy filtering for noisy parallel data. For each sentence pair of the noisy parallel corpus we compute cross-entropy scores according to two inverse translation models trained on clean data. We penalize divergent cross-entropies and weigh the penalty by the cross-entropy average of both models. Sorting or thresholding according to these scores results in better subsets of parallel data. We achieve higher BLEU scores with models trained on parallel data filtered only from Paracrawl than with models trained on clean WMT data. We further evaluate our method in the context of the WMT2018 shared task on parallel corpus filtering and achieve the overall highest ranking scores of the shared task, scoring top in three out of four subtasks.", "keyphrases": ["filtering", "sentence pair", "dual cross-entropy", "high-resource language"]} +{"id": "zhang-etal-2021-supporting", "title": "Supporting Clustering with Contrastive Learning", "abstract": "Unsupervised clustering aims at discovering the semantic categories of data according to some distance measured in the representation space. However, different categories often overlap with each other in the representation space at the beginning of the learning process, which poses a significant challenge for distance-based clustering in achieving good separation between different categories. To this end, we propose Supporting Clustering with Contrastive Learning (SCCL) \u2013 a novel framework to leverage contrastive learning to promote better separation. We assess the performance of SCCL on short text clustering and show that SCCL significantly advances the state-of-the-art results on most benchmark datasets with 3%-11% improvement on Accuracy and 4%-15% improvement on Normalized Mutual Information. Furthermore, our quantitative analysis demonstrates the effectiveness of SCCL in leveraging the strengths of both bottom-up instance discrimination and top-down clustering to achieve better intra-cluster and inter-cluster distances when evaluated with the ground truth cluster labels.", "keyphrases": ["clustering", "contrastive learning", "sccl", "loss", "remarkable success"]} +{"id": "jarrold-etal-2014-aided", "title": "Aided diagnosis of dementia type through computer-based analysis of spontaneous speech", "abstract": "This pilot study evaluates the ability of machined learned algorithms to assist with the differential diagnosis of dementia subtypes based on brief (< 10 min) spontaneous speech samples. We analyzed 1 recordings of a brief spontaneous speech sample from 48 participants from 5 different groups: 4 types of dementia plus healthy controls. Recordings were analyzed using a speech recognition system optimized for speakerindependent spontaneous speech. Lexical and acoustic features were automatically extracted. The resulting feature profiles were used as input to a machine learning system that was trained to identify the diagnosis assigned to each research participant. Between groups lexical and acoustic differences features were detected in accordance with expectations from prior research literature suggesting that classifications were based on features consistent with human-observed symptomatology. Machine learning algorithms were able to identify participants' diagnostic group with accuracy comparable to existing diagnostic methods in use today. Results suggest this clinical speech analytic approach offers promise as an additional, objective and easily obtained source of diagnostic information for clinicians.", "keyphrases": ["diagnosis", "dementia", "spontaneous speech"]} +{"id": "mirza-tonelli-2014-analysis", "title": "An Analysis of Causality between Events and its Relation to Temporal Information", "abstract": "In this work we present an annotation framework to capture causality between events, inspired by TimeML, and a language resource covering both temporal and causal relations. This data set is then used to build an automatic extraction system for causal signals and causal links between given event pairs. The evaluation and analysis of the system\u2019s performance provides an insight into explicit causality in text and the connection between temporal and causal relations.", "keyphrases": ["causality", "temporal information", "broader-coverage linguistic approach"]} +{"id": "elsner-charniak-2008-talking", "title": "You Talking to Me? A Corpus and Algorithm for Conversation Disentanglement", "abstract": "When multiple conversations occur simultaneously, a listener must decide which conversation each utterance is part of in order to interpret and respond to it appropriately. We refer to this task as disentanglement. We present a corpus of Internet Relay Chat (IRC) dialogue in which the various conversations have been manually disentangled, and evaluate annotator reliability. This is, to our knowledge, the first such corpus for internet chat. We propose a graph-theoretic model for disentanglement, using discourse-based features which have not been previously applied to this task. The model\u2019s predicted disentanglements are highly correlated with manual annotations.", "keyphrases": ["conversation disentanglement", "chat", "thread"]} +{"id": "sharma-etal-2022-disarm", "title": "DISARM: Detecting the Victims Targeted by Harmful Memes", "abstract": "Internet memes have emerged as an increasingly popular means of communication on the web. Although memes are typically intended to elicit humour, they have been increasingly used to spread hatred, trolling, and cyberbullying, as well as to target specific individuals, communities, or society on political, socio-cultural, and psychological grounds. While previous work has focused on detecting harmful, hateful, and offensive memes in general, identifying whom these memes attack (i.e., the `victims') remains a challenging and underexplored area. We attempt to address this problem in this paper. To this end, we create a dataset in which we annotate each meme with its victim(s) such as the name of the targeted person(s), organization(s), and community(ies). We then propose DISARM (Detecting vIctimS targeted by hARmful Memes), a framework that uses named-entity recognition and person identification to detect all entities a meme is referring to, and then, incorporates a novel contextualized multimodal deep neural network to classify whether the meme intends to harm these entities. We perform several systematic experiments on three different test sets, corresponding to entities that are (i) all seen while training, (ii) not seen as a harmful target while training, and (iii) not seen at all while training. The evaluation shows that DISARM significantly outperforms 10 unimodal and multimodal systems. Finally, we demonstrate that DISARM is interpretable and comparatively more generalizable and that it can reduce the relative error rate of harmful target identification by up to 9 % absolute over multimodal baseline systems.", "keyphrases": ["victim", "meme", "disarm"]} +{"id": "xing-etal-2015-normalized", "title": "Normalized Word Embedding and Orthogonal Transform for Bilingual Word Translation", "abstract": "Word embedding has been found to be highly powerful to translate words from one language to another by a simple linear transform. However, we found some inconsistence among the objective functions of the embedding and the transform learning, as well as the distance measurement. This paper proposes a solution which normalizes the word vectors on a hypersphere and constrains the linear transform as an orthogonal transform. The experimental results confirmed that the proposed solution can offer better performance on a word similarity task and an English-toSpanish word translation task.", "keyphrases": ["word embedding", "orthogonal transform", "improved result"]} +{"id": "choshen-abend-2018-reference", "title": "Reference-less Measure of Faithfulness for Grammatical Error Correction", "abstract": "We propose USim, a semantic measure for Grammatical Error Correction (that measures the semantic faithfulness of the output to the source, thereby complementing existing reference-less measures (RLMs) for measuring the output's grammaticality. USim operates by comparing the semantic symbolic structure of the source and the correction, without relying on manually-curated references. Our experiments establish the validity of USim, by showing that the semantic structures can be consistently applied to ungrammatical text, that valid corrections obtain a high USim similarity score to the source, and that invalid corrections obtain a lower score.", "keyphrases": ["faithfulness", "grammatical error correction", "reference-less measure"]} +{"id": "lin-etal-2018-multi-lingual", "title": "A Multi-lingual Multi-task Architecture for Low-resource Sequence Labeling", "abstract": "We propose a multi-lingual multi-task architecture to develop supervised models with a minimal amount of labeled data for sequence labeling. In this new architecture, we combine various transfer models using two layers of parameter sharing. On the first layer, we construct the basis of the architecture to provide universal word representation and feature extraction capability for all models. On the second level, we adopt different parameter sharing strategies for different transfer schemes. This architecture proves to be particularly effective for low-resource settings, when there are less than 200 training sentences for the target task. Using Name Tagging as a target task, our approach achieved 4.3%-50.5% absolute F-score gains compared to the mono-lingual single-task baseline model.", "keyphrases": ["multi-task architecture", "name tagging", "cross-lingual transfer"]} +{"id": "haponchyk-moschitti-2017-dont", "title": "Don't understand a measure? Learn it: Structured Prediction for Coreference Resolution optimizing its measures", "abstract": "An interesting aspect of structured prediction is the evaluation of an output structure against the gold standard. Especially in the loss-augmented setting, the need of finding the max-violating constraint has severely limited the expressivity of effective loss functions. In this paper, we trade off exact computation for enabling the use and study of more complex loss functions for coreference resolution. Most interestingly, we show that such functions can be (i) automatically learned also from controversial but commonly accepted coreference measures, e.g., MELA, and (ii) successfully used in learning algorithms. The accurate model comparison on the standard CoNLL-2012 setting shows the benefit of more expressive loss functions.", "keyphrases": ["structured prediction", "coreference resolution", "expressive loss function"]} +{"id": "nadejde-tetreault-2019-personalizing", "title": "Personalizing Grammatical Error Correction: Adaptation to Proficiency Level and L1", "abstract": "Grammar error correction (GEC) systems have become ubiquitous in a variety of software applications, and have started to approach human-level performance for some datasets. However, very little is known about how to efficiently personalize these systems to the user's characteristics, such as their proficiency level and first language, or to emerging domains of text. We present the first results on adapting a general purpose neural GEC system to both the proficiency level and the first language of a writer, using only a few thousand annotated sentences. Our study is the broadest of its kind, covering five proficiency levels and twelve different languages, and comparing three different adaptation scenarios: adapting to the proficiency level only, to the first language only, or to both aspects simultaneously. We show that tailoring to both scenarios achieves the largest performance improvement (3.6 F0.5) relative to a strong baseline.", "keyphrases": ["proficiency level", "learner data", "native language"]} +{"id": "nakov-2008-improving", "title": "Improving English-Spanish Statistical Machine Translation: Experiments in Domain Adaptation, Sentence Paraphrasing, Tokenization, and Recasing", "abstract": "We describe the experiments of the UC Berkeley team on improving English-Spanish machine translation of news text, as part of the WMT'08 Shared Translation Task. We experiment with domain adaptation, combining a small in-domain news bi-text and a large out-of-domain one from the Europarl corpus, building two separate phrase translation models and two separate language models. We further add a third phrase translation model trained on a version of the news bi-text augmented with monolingual sentence-level syntactic paraphrases on the source-language side, and we combine all models in a log-linear model using minimum error rate training. Finally, we experiment with different tokenization and recasing rules, achieving 35.09% Bleu score on the WMT'07 news test data when translating from English to Spanish, which is a sizable improvement over the highest Bleu score achieved on that dataset at WMT'07: 33.10% (in fact, by our system). On the WMT'08 English to Spanish news translation, we achieve 21.92%, which makes our team the second best on Bleu score.", "keyphrases": ["domain adaptation", "tokenization", "phrase table"]} +{"id": "patro-etal-2019-deep", "title": "A deep-learning framework to detect sarcasm targets", "abstract": "In this paper we propose a deep learning framework for sarcasm target detection in predefined sarcastic texts. Identification of sarcasm targets can help in many core natural language processing tasks such as aspect based sentiment analysis, opinion mining etc. To begin with, we perform an empirical study of the socio-linguistic features and identify those that are statistically significant in indicating sarcasm targets (p-values in the range(0.05,0.001)). Finally, we present a deep-learning framework augmented with socio-linguistic features to detect sarcasm targets in sarcastic book-snippets and tweets. We achieve a huge improvement in the performance in terms of exact match and dice scores compared to the current state-of-the-art baseline.", "keyphrases": ["deep-learning framework", "sarcasm target", "socio-linguistic feature"]} +{"id": "zhang-etal-2017-end", "title": "End-to-End Neural Relation Extraction with Global Optimization", "abstract": "Neural networks have shown promising results for relation extraction. State-of-the-art models cast the task as an end-to-end problem, solved incrementally using a local classifier. Yet previous work using statistical models have demonstrated that global optimization can achieve better performances compared to local classification. We build a globally optimized neural model for end-to-end relation extraction, proposing novel LSTM features in order to better learn context representations. In addition, we present a novel method to integrate syntactic information to facilitate global learning, yet requiring little background on syntactic grammars thus being easy to extend. Experimental results show that our proposed model is highly effective, achieving the best performances on two standard benchmarks.", "keyphrases": ["relation extraction", "global optimization", "entity recognition"]} +{"id": "bramsen-etal-2011-extracting", "title": "Extracting Social Power Relationships from Natural Language", "abstract": "Sociolinguists have long argued that social context influences language use in all manner of ways, resulting in lects. This paper explores a text classification problem we will call lect modeling, an example of what has been termed computational sociolinguistics. In particular, we use machine learning techniques to identify social power relationships between members of a social network, based purely on the content of their interpersonal communication. We rely on statistical methods, as opposed to language-specific engineering, to extract features which represent vocabulary and grammar usage indicative of social power lect. We then apply support vector machines to model the social power lects representing superior-subordinate communication in the Enron email corpus. Our results validate the treatment of lect modeling as a text classification problem -- albeit a hard one -- and constitute a case for future research in computational sociolinguistics.", "keyphrases": ["enron email corpus", "power relation", "subordinate", "organizational structure", "mean"]} +{"id": "foster-etal-2010-discriminative", "title": "Discriminative Instance Weighting for Domain Adaptation in Statistical Machine Translation", "abstract": "We describe a new approach to SMT adaptation that weights out-of-domain phrase pairs according to their relevance to the target domain, determined by both how similar to it they appear to be, and whether they belong to general language or not. This extends previous work on discriminative weighting by using a finer granularity, focusing on the properties of instances rather than corpus components, and using a simpler training procedure. We incorporate instance weighting into a mixture-model framework, and find that it yields consistent improvements over a wide range of baselines.", "keyphrases": ["domain adaptation", "phrase pair", "perplexity", "perform weighting"]} +{"id": "ratinov-roth-2009-design", "title": "Design Challenges and Misconceptions in Named Entity Recognition", "abstract": "We analyze some of the fundamental design challenges and misconceptions that underlie the development of an efficient and robust NER system. In particular, we address issues such as the representation of text chunks, the inference approach needed to combine local NER decisions, the sources of prior knowledge and how to use them within an NER system. In the process of comparing several solutions to these challenges we reach some surprising conclusions, as well as develop an NER system that achieves 90.8 F1 score on the CoNLL-2003 NER shared task, the best reported result for this dataset.", "keyphrases": ["named entity recognition", "ner system", "conditional random fields", "wikipedia", "tagging schema"]} +{"id": "yang-etal-2020-csp", "title": "CSP:Code-Switching Pre-training for Neural Machine Translation", "abstract": "This paper proposes a new pre-training method, called Code-Switching Pre-training (CSP for short) for Neural Machine Translation (NMT). Unlike traditional pre-training method which randomly masks some fragments of the input sentence, the proposed CSP randomly replaces some words in the source sentence with their translation words in the target language. Specifically, we firstly perform lexicon induction with unsupervised word embedding mapping between the source and target languages, and then randomly replace some words in the input sentence with their translation words according to the extracted translation lexicons. CSP adopts the encoder-decoder framework: its encoder takes the code-mixed sentence as input, and its decoder predicts the replaced fragment of the input sentence. In this way, CSP is able to pre-train the NMT model by explicitly making the most of the alignment information extracted from the source and target monolingual corpus. Additionally, we relieve the pretrain-finetune discrepancy caused by the artificial symbols like [mask]. To verify the effectiveness of the proposed method, we conduct extensive experiments on unsupervised and supervised NMT. Experimental results show that CSP achieves significant improvements over baselines without pre-training or with other pre-training methods.", "keyphrases": ["code-switching", "neural machine translation", "lexicon induction", "other pre-training method", "csp"]} +{"id": "glockner-etal-2018-breaking", "title": "Breaking NLI Systems with Sentences that Require Simple Lexical Inferences", "abstract": "We create a new NLI test set that shows the deficiency of state-of-the-art models in inferences that require lexical and world knowledge. The new examples are simpler than the SNLI test set, containing sentences that differ by at most one word from sentences in the training set. Yet, the performance on the new test set is substantially worse across systems trained on SNLI, demonstrating that these systems are limited in their generalization ability, failing to capture many simple inferences.", "keyphrases": ["snli", "natural language inference", "hypernym", "premise", "entailment"]} +{"id": "hiraoka-etal-2019-stochastic", "title": "Stochastic Tokenization with a Language Model for Neural Text Classification", "abstract": "For unsegmented languages such as Japanese and Chinese, tokenization of a sentence has a significant impact on the performance of text classification. Sentences are usually segmented with words or subwords by a morphological analyzer or byte pair encoding and then encoded with word (or subword) representations for neural networks. However, segmentation is potentially ambiguous, and it is unclear whether the segmented tokens achieve the best performance for the target task. In this paper, we propose a method to simultaneously learn tokenization and text classification to address these problems. Our model incorporates a language model for unsupervised tokenization into a text classifier and then trains both models simultaneously. To make the model robust against infrequent tokens, we sampled segmentation for each sentence stochastically during training, which resulted in improved performance of text classification. We conducted experiments on sentiment analysis as a text classification task and show that our method achieves better performance than previous methods.", "keyphrases": ["tokenization", "language model", "text classification task"]} +{"id": "hernandez-mena-etal-2020-masri", "title": "MASRI-HEADSET: A Maltese Corpus for Speech Recognition", "abstract": "Maltese, the national language of Malta, is spoken by approximately 500,000 people. Speech processing for Maltese is still in its early stages of development. In this paper, we present the first spoken Maltese corpus designed purposely for Automatic Speech Recognition (ASR). The MASRI-HEADSET corpus was developed by the MASRI project at the University of Malta. It consists of 8 hours of speech paired with text, recorded by using short text snippets in a laboratory environment. The speakers were recruited from different geographical locations all over the Maltese islands, and were roughly evenly distributed by gender. This paper also presents some initial results achieved in baseline experiments for Maltese ASR using Sphinx and Kaldi. The MASRI HEADSET Corpus is publicly available for research/academic purposes.", "keyphrases": ["maltese corpus", "speech recognition", "asr"]} +{"id": "florian-etal-2003-named", "title": "Named Entity Recognition through Classifier Combination", "abstract": "This paper presents a classifier-combination experimental framework for named entity recognition in which four diverse classifiers (robust linear classifier, maximum entropy, transformation-based learning, and hidden Markov model) are combined under different conditions. When no gazetteer or other additional training resources are used, the combined system attains a performance of 91.6F on the English development data; integrating name, location and person gazetteers, and named entity systems trained on additional, more general, data reduces the F-measure error by a factor of 15 to 21% on the English data.", "keyphrases": ["entity recognition", "gazetteer", "location", "linguistic feature", "large annotated training"]} +{"id": "verhoeven-etal-2014-automatic", "title": "Automatic Compound Processing: Compound Splitting and Semantic Analysis for Afrikaans and Dutch", "abstract": "Compounding, the process of combining several simplex words into a complex whole, is a productive process in a wide range of languages. In particular, concatenative compounding, in which the components are \u201cglued\u201d together, leads to problems, for instance, in computational tools that rely on a predefined lexicon. Here we present the AuCoPro project, which focuses on compounding in the closely related languages Afrikaans and Dutch. The project consists of subprojects focusing on compound splitting (identifying the boundaries of the components) and compound semantics (identifying semantic relations between the components). We describe the developed datasets as well as results showing the effectiveness of the developed datasets.", "keyphrases": ["compound splitting", "semantic analysis", "dutch"]} +{"id": "patra-etal-2019-bilingual", "title": "Bilingual Lexicon Induction with Semi-supervision in Non-Isometric Embedding Spaces", "abstract": "Recent work on bilingual lexicon induction (BLI) has frequently depended either on aligned bilingual lexicons or on distribution matching, often with an assumption about the isometry of the two spaces. We propose a technique to quantitatively estimate this assumption of the isometry between two embedding spaces and empirically show that this assumption weakens as the languages in question become increasingly etymologically distant. We then propose Bilingual Lexicon Induction with Semi-Supervision (BLISS) \u2014 a semi-supervised approach that relaxes the isometric assumption while leveraging both limited aligned bilingual lexicons and a larger set of unaligned word embeddings, as well as a novel hubness filtering technique. Our proposed method obtains state of the art results on 15 of 18 language pairs on the MUSE dataset, and does particularly well when the embedding spaces don't appear to be isometric. In addition, we also show that adding supervision stabilizes the learning procedure, and is effective even with minimal supervision.", "keyphrases": ["semi-supervision", "isometric assumption", "bilingual lexicon induction"]} +{"id": "lareau-etal-2011-collocations", "title": "Collocations in Multilingual Natural Language Generation: Lexical Functions meet Lexical Functional Grammar", "abstract": "In a collocation, the choice of one lexical item depends on the choice made for another. This poses a problem for simple approaches to lexicalisation in natural language generation systems. In the Meaning-Text framework, recurrent patterns of collocations have been characterised by lexical functions, which offer an elegant way of describing these relationships. Previous work has shown that using lexical functions in the context of multilingual natural language generation allows for a more efficient development of linguistic resources. We propose a way to encode lexical functions in the Lexical Functional Grammar framework.", "keyphrases": ["language generation", "lexical function", "collocation"]} +{"id": "do-etal-2011-minimally", "title": "Minimally Supervised Event Causality Identification", "abstract": "This paper develops a minimally supervised approach, based on focused distributional similarity methods and discourse connectives, for identifying of causality relations between events in context. While it has been shown that distributional similarity can help identifying causality, we observe that discourse connectives and the particular discourse relation they evoke in context provide additional information towards determining causality between events. We show that combining discourse relation predictions and distributional similarity methods in a global inference procedure provides additional improvements towards determining event causality.", "keyphrases": ["causality", "supervised approach", "similarity method", "discourse relation", "pmi"]} +{"id": "peldszus-stede-2015-joint", "title": "Joint prediction in MST-style discourse parsing for argumentation mining", "abstract": "We introduce a new approach to argumentation mining that we applied to a parallel German/English corpus of short texts annotated with argumentation structure. We focus on structure prediction, which we break into a number of subtasks: relation identification, central claim identification, role classification, and function classification. Our new model jointly predicts different aspects of the structure by combining the different subtask predictions in the edge weights of an evidence graph; we then apply a standard MST decoding algorithm. This model not only outperforms two reasonable baselines and two datadriven models of global argument structure for the difficult subtask of relation identification, but also improves the results for central claim identification and function classification and it compares favorably to a complex mstparser pipeline.", "keyphrases": ["argumentation mining", "short text", "discourse unit"]} +{"id": "elson-etal-2010-extracting", "title": "Extracting Social Networks from Literary Fiction", "abstract": "We present a method for extracting social networks from literature, namely, nineteenth-century British novels and serials. We derive the networks from dialogue interactions, and thus our method depends on the ability to determine when two characters are in conversation. Our approach involves character name chunking, quoted speech attribution and conversation detection given the set of quotes. We extract features from the social networks and examine their correlation with one another, as well as with metadata such as the novel's setting. Our results provide evidence that the majority of novels in this time period do not fit two characterizations provided by literacy scholars. Instead, our results suggest an alternative explanation for differences in social networks.", "keyphrases": ["social network", "literary fiction", "british novel", "narrative text", "network extraction"]} +{"id": "teufel-etal-2006-annotation", "title": "An annotation scheme for citation function", "abstract": "We study the interplay of the discourse structure of a scientific argument with formal citations. One subproblem of this is to classify academic citations in scientific articles according to their rhetorical function, e.g., as a rival approach, as a part of the solution, or as a flawed approach that justifies the current research. Here, we introduce our annotation scheme with 12 categories, and present an agreement study.", "keyphrases": ["annotation scheme", "citation function", "rhetorical function"]} +{"id": "xue-li-2018-aspect", "title": "Aspect Based Sentiment Analysis with Gated Convolutional Networks", "abstract": "Aspect based sentiment analysis (ABSA) can provide more detailed information than general sentiment analysis, because it aims to predict the sentiment polarities of the given aspects or entities in text. We summarize previous approaches into two subtasks: aspect-category sentiment analysis (ACSA) and aspect-term sentiment analysis (ATSA). Most previous approaches employ long short-term memory and attention mechanisms to predict the sentiment polarity of the concerned targets, which are often complicated and need more training time. We propose a model based on convolutional neural networks and gating mechanisms, which is more accurate and efficient. First, the novel Gated Tanh-ReLU Units can selectively output the sentiment features according to the given aspect or entity. The architecture is much simpler than attention layer used in the existing models. Second, the computations of our model could be easily parallelized during training, because convolutional layers do not have time dependency as in LSTM layers, and gating units also work independently. The experiments on SemEval datasets demonstrate the efficiency and effectiveness of our models.", "keyphrases": ["convolutional neural network", "sentiment classification", "aspect information"]} +{"id": "shuster-etal-2021-retrieval-augmentation", "title": "Retrieval Augmentation Reduces Hallucination in Conversation", "abstract": "Despite showing increasingly human-like conversational abilities, state-of-the-art dialogue models often suffer from factual incorrectness and hallucination of knowledge (Roller et al., 2020). In this work we explore the use of neural-retrieval-in-the-loop architectures - recently shown to be effective in open-domain QA (Lewis et al., 2020b; Izacard and Grave, 2020) - for knowledge-grounded dialogue, a task that is arguably more challenging as it requires querying based on complex multi-turn dialogue context and generating conversationally coherent responses. We study various types of architectures with multiple components - retrievers, rankers, and encoder-decoders - with the goal of maximizing knowledgeability while retaining conversational ability. We demonstrate that our best models obtain state-of-the-art performance on two knowledge-grounded conversational tasks. The models exhibit open-domain conversational capabilities, generalize effectively to scenarios not within the training data, and, as verified by human evaluations, substantially reduce the well-known problem of knowledge hallucination in state-of-the-art chatbots.", "keyphrases": ["hallucination", "conversation", "knowledge-grounded dialogue"]} +{"id": "sennrich-zhang-2019-revisiting", "title": "Revisiting Low-Resource Neural Machine Translation: A Case Study", "abstract": "It has been shown that the performance of neural machine translation (NMT) drops starkly in low-resource conditions, underperforming phrase-based statistical machine translation (PBSMT) and requiring large amounts of auxiliary data to achieve competitive results. In this paper, we re-assess the validity of these results, arguing that they are the result of lack of system adaptation to low-resource settings. We discuss some pitfalls to be aware of when training low-resource NMT systems, and recent techniques that have shown to be especially helpful in low-resource settings, resulting in a set of best practices for low-resource NMT. In our experiments on German\u2013English with different amounts of IWSLT14 training data, we show that, without the use of any auxiliary monolingual or multilingual data, an optimized NMT system can outperform PBSMT with far less data than previously claimed. We also apply these techniques to a low-resource Korean\u2013English dataset, surpassing previously reported results by 4 BLEU.", "keyphrases": ["low-resource setting", "low-resource nmt", "nmt model", "hyperparameter"]} +{"id": "rentoumi-etal-2010-united", "title": "United we Stand: Improving Sentiment Analysis by Joining Machine Learning and Rule Based Methods", "abstract": "In the past, we have succesfully used machine learning approaches for sentiment analysis. In the course of those experiments, we observed that our machine learning method, although able to cope well with figurative language could not always reach a certain decision about the polarity orientation of sentences, yielding erroneous evaluations. We support the conjecture that these cases bearing mild figurativeness could be better handled by a rule-based system. These two systems, acting complementarily, could bridge the gap between machine learning and rule-based approaches. Experimental results using the corpus of the Affective Text Task of SemEval \u009207, provide evidence in favor of this direction.", "keyphrases": ["sentiment analysis", "machine learning", "rule-based system"]} +{"id": "tang-etal-2018-self", "title": "Why Self-Attention? A Targeted Evaluation of Neural Machine Translation Architectures", "abstract": "Recently, non-recurrent architectures (convolutional, self-attentional) have outperformed RNNs in neural machine translation. CNNs and self-attentional networks can connect distant words via shorter network paths than RNNs, and it has been speculated that this improves their ability to model long-range dependencies. However, this theoretical argument has not been tested empirically, nor have alternative explanations for their strong performance been explored in-depth. We hypothesize that the strong performance of CNNs and self-attentional networks could also be due to their ability to extract semantic features from the source text, and we evaluate RNNs, CNNs and self-attention networks on two tasks: subject-verb agreement (where capturing long-range dependencies is required) and word sense disambiguation (where semantic feature extraction is required). Our experimental results show that: 1) self-attentional networks and CNNs do not outperform RNNs in modeling subject-verb agreement over long distances; 2) self-attentional networks perform distinctly better than RNNs and CNNs on word sense disambiguation.", "keyphrases": ["self-attention", "cnns", "long-range dependency", "agreement", "word sense disambiguation"]} +{"id": "friedrich-etal-2016-situation", "title": "Situation entity types: automatic classification of clause-level aspect", "abstract": "This paper describes the \ufb01rst robust approach to automatically labeling clauses with their situation entity type (Smith, 2003), capturing aspectual phenomena at the clause level which are relevant for interpreting both semantics at the clause level and discourse structure. Previous work on this task used a small data set from a limited domain, and relied mainly on words as features, an approach which is impractical in larger settings. We provide a new corpus of texts from 13 genres (40,000 clauses) annotated with situation entity types. We show that our sequence labeling approach using distributional information in the form of Brown clusters, as well as syntactic-semantic features targeted to the task, is robust across genres, reaching accuracies of up to 76%.", "keyphrases": ["automatic classification", "clause-level aspect", "genre", "situation entity type"]} +{"id": "kumar-jena-etal-2020-c", "title": "C-Net: Contextual Network for Sarcasm Detection", "abstract": "Automatic Sarcasm Detection in conversations is a difficult and tricky task. Classifying an utterance as sarcastic or not in isolation can be futile since most of the time the sarcastic nature of a sentence heavily relies on its context. This paper presents our proposed model, C-Net, which takes contextual information of a sentence in a sequential manner to classify it as sarcastic or non-sarcastic. Our model showcases competitive performance in the Sarcasm Detection shared task organised on CodaLab and achieved 75.0% F1-score on the Twitter dataset and 66.3% F1-score on Reddit dataset.", "keyphrases": ["sarcasm detection", "contextual information", "c-net"]} +{"id": "he-etal-2010-improving", "title": "Improving the Post-Editing Experience using Translation Recommendation: A User Study", "abstract": "We report findings from a user study with professional post-editors using a translation recommendation framework (He et al., 2010) to integrate Statistical Machine Translation (SMT) output with Translation Memory (TM) systems. The framework recommends SMT outputs to a TM user when it predicts that SMT outputs are more suitable for post-editing than the hits provided by the TM. We analyze the effectiveness of the model as well as the reaction of potential users. Based on the performance statistics and the users' comments, we find that translation recommendation can reduce the workload of professional post-editors and improve the acceptance of MT in the localization industry.", "keyphrases": ["translation recommendation", "user study", "post-editor"]} +{"id": "li-etal-2020-active-learning", "title": "Active Learning for Coreference Resolution using Discrete Annotation", "abstract": "We improve upon pairwise annotation for active learning in coreference resolution, by asking annotators to identify mention antecedents if a presented mention pair is deemed not coreferent. This simple modification, when combined with a novel mention clustering algorithm for selecting which examples to label, is much more efficient in terms of the performance obtained per annotation budget. In experiments with existing benchmark coreference datasets, we show that the signal from this additional question leads to significant performance gains per human-annotation hour. Future work can use our annotation protocol to effectively develop coreference models for new domains. Our code is publicly available.", "keyphrases": ["coreference resolution", "discrete annotation", "mention", "active learning"]} +{"id": "weeds-weir-2003-general", "title": "A General Framework for Distributional Similarity", "abstract": "We present a general framework for distributional similarity based on the concepts of precision and recall. Different parameter settings within this framework approximate different existing similarity measures as well as many more which have, until now, been unexplored. We show that optimal parameter settings outperform two existing state-of-the-art similarity measures on two evaluation tasks for high and low frequency nouns.", "keyphrases": ["distributional similarity", "dih", "inclusion hypothesis", "research area", "weight"]} +{"id": "tuggener-etal-2020-ledgar", "title": "LEDGAR: A Large-Scale Multi-label Corpus for Text Classification of Legal Provisions in Contracts", "abstract": "We present LEDGAR, a multilabel corpus of legal provisions in contracts. The corpus was crawled and scraped from the public domain (SEC filings) and is, to the best of our knowledge, the first freely available corpus of its kind. Since the corpus was constructed semi-automatically, we apply and discuss various approaches to noise removal. Due to the rather large labelset of over 12'000 labels annotated in almost 100'000 provisions in over 60'000 contracts, we believe the corpus to be of interest for research in the field of Legal NLP, (large-scale or extreme) text classification, as well as for legal studies. We discuss several methods to sample subcopora from the corpus and implement and evaluate different automatic classification approaches. Finally, we perform transfer experiments to evaluate how well the classifiers perform on contracts stemming from outside the corpus.", "keyphrases": ["text classification", "contract", "ledgar"]} +{"id": "fan-etal-2018-hierarchical", "title": "Hierarchical Neural Story Generation", "abstract": "We explore story generation: creative systems that can build coherent and fluent passages of text about a topic. We collect a large dataset of 300K human-written stories paired with writing prompts from an online forum. Our dataset enables hierarchical story generation, where the model first generates a premise, and then transforms it into a passage of text. We gain further improvements with a novel form of model fusion that improves the relevance of the story to the prompt, and adding a new gated multi-scale self-attention mechanism to model long-range context. Experiments show large improvements over strong baselines on both automated and human evaluations. Human judges prefer stories generated by our approach to those from a strong non-hierarchical model by a factor of two to one.", "keyphrases": ["story generation", "coherence", "language model", "generation model", "perplexity"]} +{"id": "luo-etal-2020-grace", "title": "GRACE: Gradient Harmonized and Cascaded Labeling for Aspect-based Sentiment Analysis", "abstract": "In this paper, we focus on the imbalance issue, which is rarely studied in aspect term extraction and aspect sentiment classification when regarding them as sequence labeling tasks. Besides, previous works usually ignore the interaction between aspect terms when labeling polarities. We propose a GRadient hArmonized and CascadEd labeling model (GRACE) to solve these problems. Specifically, a cascaded labeling module is developed to enhance the interchange between aspect terms and improve the attention of sentiment tokens when labeling sentiment polarities. The polarities sequence is designed to depend on the generated aspect terms labels. To alleviate the imbalance issue, we extend the gradient harmonized mechanism used in object detection to the aspect-based sentiment analysis by adjusting the weight of each label dynamically. The proposed GRACE adopts a post-pretraining BERT as its backbone. Experimental results demonstrate that the proposed model achieves consistency improvement on multiple benchmark datasets and generates state-of-the-art results.", "keyphrases": ["gradient", "sentiment analysis", "aspect term"]} +{"id": "he-etal-2019-unlearn", "title": "Unlearn Dataset Bias in Natural Language Inference by Fitting the Residual", "abstract": "Statistical natural language inference (NLI) models are susceptible to learning dataset bias: superficial cues that happen to associate with the label on a particular dataset, but are not useful in general, e.g., negation words indicate contradiction. As exposed by several recent challenge datasets, these models perform poorly when such association is absent, e.g., predicting that \u201cI love dogs.\u201d contradicts \u201cI don't love cats.\u201d. Our goal is to design learning algorithms that guard against known dataset bias. We formalize the concept of dataset bias under the framework of distribution shift and present a simple debiasing algorithm based on residual fitting, which we call DRiFt. We first learn a biased model that only uses features that are known to relate to dataset bias. Then, we train a debiased model that fits to the residual of the biased model, focusing on examples that cannot be predicted well by biased features only. We use DRiFt to train three high-performing NLI models on two benchmark datasets, SNLI and MNLI. Our debiased models achieve significant gains over baseline models on two challenge test sets, while maintaining reasonable performance on the original test sets.", "keyphrases": ["dataset bias", "natural language inference", "residual", "training objective", "adversarial dataset"]} +{"id": "khashabi-etal-2018-looking", "title": "Looking Beyond the Surface: A Challenge Set for Reading Comprehension over Multiple Sentences", "abstract": "We present a reading comprehension challenge in which questions can only be answered by taking into account information from multiple sentences. We solicit and verify questions and answers for this challenge through a 4-step crowdsourcing experiment. Our challenge dataset contains 6,500+ questions for 1000+ paragraphs across 7 different domains (elementary school science, news, travel guides, fiction stories, etc) bringing in linguistic diversity to the texts and to the questions wordings. On a subset of our dataset, we found human solvers to achieve an F1-score of 88.1%. We analyze a range of baselines, including a recent state-of-art reading comprehension system, and demonstrate the difficulty of this challenge, despite a high human performance. The dataset is the first to study multi-sentence inference at scale, with an open-ended set of question types that requires reasoning skills.", "keyphrases": ["reading comprehension", "multiple sentence", "reasoning", "correct answer"]} +{"id": "borin-etal-2012-open", "title": "The open lexical infrastructure of Spr\u00e5kbanken", "abstract": "We present our ongoing work on Karp, Spr\u00e5kbanken's (the Swedish Language Bank) open lexical infrastructure, which has two main functions: (1) to support the work on creating, curating, and integrating our various lexical resources; and (2) to publish daily versions of the resources, making them searchable and downloadable. An important requirement on the lexical infrastructure is also that we maintain a strong bidirectional connection to our corpus infrastructure. At the heart of the infrastructure is the SweFN++ project with the goal to create free Swedish lexical resources geared towards language technology applications. The infrastructure currently hosts 15 Swedish lexical resources, including historical ones, some of which have been created from scratch using existing free resources, both external and in-house. The resources are integrated through links to a pivot lexical resource, SALDO, a large morphological and lexical-semantic resource for modern Swedish. SALDO has been selected as the pivot partly because of its size and quality, but also because its form and sense units have been assigned persistent identifiers (PIDs) to which the lexical information in other lexical resources and in corpora are linked.", "keyphrases": ["lexical infrastructure", "spr\u00e5kbanken", "strong bidirectional connection"]} +{"id": "huang-lee-2008-contrastive", "title": "Contrastive Approach towards Text Source Classification based on Top-Bag-of-Word Similarity", "abstract": "This paper proposes a method to automatically classify texts from different varieties of the same language. We show that similarity measure is a robust tool for studying comparable corpora of language variations. We take LDC\u2019s Chinese Gigaword Corpus composed of three varieties of Chinese from Mainland China, Singapore, and Taiwan, as the comparable corpora. Top-bag-of-word similarity measures reflect distances among the three varieties of the same language. A Top-bag-of-word similarity based contrastive approach was taken to solve the text source classification problem. Our results show that a contrastive approach using similarity to rule out identity of source and to arrive actual source by inference is more robust that directly confirmation of source by similarity. We show that this approach is robust when applied to other texts.", "keyphrases": ["top-bag-of-word similarity", "same language", "chinese"]} +{"id": "zhao-etal-2006-improved", "title": "An Improved Chinese Word Segmentation System with Conditional Random Field", "abstract": "In this paper, we describe a Chinese word segmentation system that we developed for the Third SIGHAN Chinese Language Processing Bakeoff (Bakeoff2006). We took part in six tracks, namely the closed and open track on three corpora, Academia Sinica (CKIP), City University of Hong Kong (CityU), and University of Pennsylvania/University of Colorado (UPUC). Based on a conditional random field based approach, our word segmenter achieved the highest F measures in four tracks, and the third highest in the other two tracks. We found that the use of a 6-tag set, tone feature of Chinese character and assistant segmenters trained on other corpora further improve Chinese word segmentation performance.", "keyphrases": ["word segmentation system", "conditional random field", "chinese character"]} +{"id": "chen-etal-2015-long", "title": "Long Short-Term Memory Neural Networks for Chinese Word Segmentation", "abstract": "Currently most of state-of-the-art methods for Chinese word segmentation are based on supervised learning, whose features aremostly extracted from a local context. Thesemethods cannot utilize the long distance information which is also crucial for word segmentation. In this paper, we propose a novel neural network model for Chinese word segmentation, which adopts the long short-term memory (LSTM) neural network to keep the previous important information inmemory cell and avoids the limit of window size of local context. Experiments on PKU, MSRA and CTB6 benchmark datasets show that our model outperforms the previous neural network models and state-of-the-art methods.", "keyphrases": ["chinese word segmentation", "long distance information", "neural network model", "long short-term memory", "context window"]} +{"id": "jiang-etal-2019-challenge", "title": "A Challenge Dataset and Effective Models for Aspect-Based Sentiment Analysis", "abstract": "Aspect-based sentiment analysis (ABSA) has attracted increasing attention recently due to its broad applications. In existing ABSA datasets, most sentences contain only one aspect or multiple aspects with the same sentiment polarity, which makes ABSA task degenerate to sentence-level sentiment analysis. In this paper, we present a new large-scale Multi-Aspect Multi-Sentiment (MAMS) dataset, in which each sentence contains at least two different aspects with different sentiment polarities. The release of this dataset would push forward the research in this field. In addition, we propose simple yet effective CapsNet and CapsNet-BERT models which combine the strengths of recent NLP advances. Experiments on our new dataset show that the proposed model significantly outperforms the state-of-the-art baseline methods", "keyphrases": ["challenge dataset", "sentiment analysis", "semeval dataset"]} +{"id": "gamon-etal-2005-sentence", "title": "Sentence-level MT evaluation without reference translations: beyond language modeling", "abstract": "In this paper we investigate the possibility of evaluating MT quality and fluency at the sentence level in the absence of reference translations. We measure the correlation between automatically-generated scores and human judgments, and we evaluate the per- formance of our system when used as a classifier for identifying highly dysfluent and ill- formed sentences. We show that we can substantially improve on the correlation between language model perplexity scores and human judgment by combining these perplexity scores with class probabilities from a machine-learned classifier. The classifier uses linguis- tic features and has been trained to distinguish human translations from machine transla- tions. We show that this approach also performs well in identifying dysfluent sentences.", "keyphrases": ["reference translation", "sentence level", "judgment", "perplexity score", "svm classifier"]} +{"id": "pei-etal-2014-max", "title": "Max-Margin Tensor Neural Network for Chinese Word Segmentation", "abstract": "Recently, neural network models for natural language processing tasks have been increasingly focused on for their ability to alleviate the burden of manual feature engineering. In this paper, we propose a novel neural network model for Chinese word segmentation called Max-Margin Tensor Neural Network (MMTNN). By exploiting tag embeddings and tensorbased transformation, MMTNN has the ability to model complicated interactions between tags and context characters. Furthermore, a new tensor factorization approach is proposed to speed up the model and avoid overfitting. Experiments on the benchmark dataset show that our model achieves better performances than previous neural network models and that our model can achieve a competitive performance with minimal feature engineering. Despite Chinese word segmentation being a specific case, MMTNN can be easily generalized and applied to other sequence labeling tasks.", "keyphrases": ["chinese word segmentation", "neural network model", "tensor model"]} +{"id": "mccoy-etal-2012-linking", "title": "Linking Uncertainty in Physicians' Narratives to Diagnostic Correctness", "abstract": "In the medical domain, misdiagnoses and diagnostic uncertainty put lives at risk and incur substantial financial costs. Clearly, medical reasoning and decision-making need to be better understood. We explore a possible link between linguistic expression and diagnostic correctness. We report on an unusual data set of spoken diagnostic narratives used to computationally model and predict diagnostic correctness based on automatically extracted and linguistically motivated features that capture physicians' uncertainty. A multimodal data set was collected as dermatologists viewed images of skin conditions and explained their diagnostic process and observations aloud. We discuss experimentation and analysis in initial and secondary pilot studies. In both cases, we experimented with computational modeling using features from the acoustic-prosodic and lexical-structural linguistic modalities.", "keyphrases": ["uncertainty", "narrative", "diagnostic correctness"]} +{"id": "kafle-etal-2017-data", "title": "Data Augmentation for Visual Question Answering", "abstract": "Data augmentation is widely used to train deep neural networks for image classification tasks. Simply flipping images can help learning tremendously by increasing the number of training images by a factor of two. However, little work has been done studying data augmentation in natural language processing. Here, we describe two methods for data augmentation for Visual Question Answering (VQA). The first uses existing semantic annotations to generate new questions. The second method is a generative approach using recurrent neural networks. Experiments show that the proposed data augmentation improves performance of both baseline and state-of-the-art VQA algorithms.", "keyphrases": ["visual question answering", "new question", "data augmentation"]} +{"id": "lee-seneff-2008-correcting", "title": "Correcting Misuse of Verb Forms", "abstract": "This paper proposes a method to correct English verb form errors made by non-native speakers. A basic approach is template matching on parse trees. The proposed method improves on this approach in two ways. To improve recall, irregularities in parse trees caused by verb form errors are taken into account; to improve precision, n-gram counts are utilized to filter proposed corrections. Evaluation on non-native corpora, representing two genres and mother tongues, shows promising results.", "keyphrases": ["misuse", "verb form error", "parse tree", "correction"]} +{"id": "pustejovsky-etal-2010-iso", "title": "ISO-TimeML: An International Standard for Semantic Annotation", "abstract": "In this paper, we present ISO-TimeML, a revised and interoperable version of the temporal markup language, TimeML. We describe the changes and enrichments made, while framing the effort in a more general methodology of semantic annotation. In particular, we assume a principled distinction between the annotation of an expression and the representation which that annotation denotes. This involves not only the specification of an annotation language for a particular phenomenon, but also the development of a meta-model that allows one to interpret the syntactic expressions of the specification semantically.", "keyphrases": ["standard", "semantic annotation", "timeml"]} +{"id": "ion-2012-pexacc", "title": "PEXACC: A Parallel Sentence Mining Algorithm from Comparable Corpora", "abstract": "Extracting parallel data from comparable corpora in order to enrich existing statistical translation models is an avenue that attracted a lot of research in recent years. There are experiments that convincingly show how parallel data extracted from comparable corpora is able to improve statistical machine translation. Yet, the existing body of research on parallel sentence mining from comparable corpora does not take into account the degree of comparability of the corpus being processed or the computation time it takes to extract parallel sentences from a corpus of a given size. We will show that the performance of a parallel sentence extractor crucially depends on the degree of comparability such that it is more difficult to process a weakly comparable corpus than a strongly comparable corpus. In this paper we describe PEXACC, a distributed (running on multiple CPUs), trainable parallel sentence/phrase extractor from comparable corpora. PEXACC is freely available for download with the ACCURAT Toolkit, a collection of MT-related tools developed in the ACCURAT project.", "keyphrases": ["parallel sentence", "comparable corpora", "pexacc"]} +{"id": "margatina-etal-2021-active", "title": "Active Learning by Acquiring Contrastive Examples", "abstract": "Common acquisition functions for active learning use either uncertainty or diversity sampling, aiming to select difficult and diverse data points from the pool of unlabeled data, respectively. In this work, leveraging the best of both worlds, we propose an acquisition function that opts for selecting contrastive examples, i.e. data points that are similar in the model feature space and yet the model outputs maximally different predictive likelihoods. We compare our approach, CAL (Contrastive Active Learning), with a diverse set of acquisition functions in four natural language understanding tasks and seven datasets. Our experiments show that CAL performs consistently better or equal than the best performing baseline across all tasks, on both in-domain and out-of-domain data. We also conduct an extensive ablation study of our method and we further analyze all actively acquired datasets showing that CAL achieves a better trade-off between uncertainty and diversity compared to other strategies.", "keyphrases": ["contrastive example", "likelihood", "active learning"]} +{"id": "gonen-goldberg-2019-language", "title": "Language Modeling for Code-Switching: Evaluation, Integration of Monolingual Data, and Discriminative Training", "abstract": "We focus on the problem of language modeling for code-switched language, in the context of automatic speech recognition (ASR). Language modeling for code-switched language is challenging for (at least) three reasons: (1) lack of available large-scale code-switched data for training; (2) lack of a replicable evaluation setup that is ASR directed yet isolates language modeling performance from the other intricacies of the ASR system; and (3) the reliance on generative modeling. We tackle these three issues: we propose an ASR-motivated evaluation setup which is decoupled from an ASR system and the choice of vocabulary, and provide an evaluation dataset for English-Spanish code-switching. This setup lends itself to a discriminative training approach, which we demonstrate to work better than generative language modeling. Finally, we explore a variety of training protocols and verify the effectiveness of training with large amounts of monolingual data followed by fine-tuning with small amounts of code-switched data, for both the generative and discriminative cases.", "keyphrases": ["code-switching", "monolingual data", "language modeling"]} +{"id": "firat-etal-2016-multi", "title": "Multi-Way, Multilingual Neural Machine Translation with a Shared Attention Mechanism", "abstract": "We propose multi-way, multilingual neural machine translation. The proposed approach enables a single neural translation model to translate between multiple languages, with a number of parameters that grows only linearly with the number of languages. This is made possible by having a single attention mechanism that is shared across all language pairs. We train the proposed multi-way, multilingual model on ten language pairs from WMT'15 simultaneously and observe clear performance improvements over models trained on only one language pair. In particular, we observe that the proposed model significantly improves the translation quality of low-resource language pairs.", "keyphrases": ["machine translation", "single attention mechanism", "multi-way multilingual nmt", "main goal"]} +{"id": "abend-rappoport-2017-state", "title": "The State of the Art in Semantic Representation", "abstract": "Semantic representation is receiving growing attention in NLP in the past few years, and many proposals for semantic schemes (e.g., AMR, UCCA, GMB, UDS) have been put forth. Yet, little has been done to assess the achievements and the shortcomings of these new contenders, compare them with syntactic schemes, and clarify the general goals of research on semantic representation. We address these gaps by critically surveying the state of the art in the field.", "keyphrases": ["art", "semantic representation", "annotator"]} +{"id": "zhang-etal-2020-seqmix", "title": "SeqMix: Augmenting Active Sequence Labeling via Sequence Mixup", "abstract": "Active learning is an important technique for low-resource sequence labeling tasks. However, current active sequence labeling methods use the queried samples alone in each iteration, which is an inefficient way of leveraging human annotations. We propose a simple but effective data augmentation method to improve label efficiency of active sequence labeling. Our method, SeqMix, simply augments the queried samples by generating extra labeled sequences in each iteration. The key difficulty is to generate plausible sequences along with token-level labels. In SeqMix, we address this challenge by performing mixup for both sequences and token-level labels of the queried samples. Furthermore, we design a discriminator during sequence mixup, which judges whether the generated sequences are plausible or not. Our experiments on Named Entity Recognition and Event Detection tasks show that SeqMix can improve the standard active sequence labeling method by 2.27%\u20133.75% in terms of F_1 scores. The code and data for SeqMix can be found at .", "keyphrases": ["sequence mixup", "active learning", "sequence labeling task"]} +{"id": "dagan-etal-2006-direct", "title": "Direct Word Sense Matching for Lexical Substitution", "abstract": "This paper investigates conceptually and empirically the novel sense matching task, which requires to recognize whether the senses of two synonymous words match in context. We suggest direct approaches to the problem, which avoid the intermediate step of explicit word sense disambiguation, and demonstrate their appealing advantages and stimulating potential for future research.", "keyphrases": ["lexical substitution", "indirect approach", "direction"]} +{"id": "gerber-chai-2012-semantic", "title": "Semantic Role Labeling of Implicit Arguments for Nominal Predicates", "abstract": "Nominal predicates often carry implicit arguments. Recent work on semantic role labeling has focused on identifying arguments within the local context of a predicate; implicit arguments, however, have not been systematically examined. To address this limitation, we have manually annotated a corpus of implicit arguments for ten predicates from NomBank. Through analysis of this corpus, we find that implicit arguments add 71% to the argument structures that are present in NomBank. Using the corpus, we train a discriminative model that is able to identify implicit arguments with an F1 score of 50%, significantly outperforming an informed baseline model. This article describes our investigation, explores a wide variety of features important for the task, and discusses future directions for work on implicit argument identification.", "keyphrases": ["implicit argument", "nombank", "semantic role labeling"]} +{"id": "su-etal-2017-sample", "title": "Sample-efficient Actor-Critic Reinforcement Learning with Supervised Data for Dialogue Management", "abstract": "Deep reinforcement learning (RL) methods have significant potential for dialogue policy optimisation. However, they suffer from a poor performance in the early stages of learning. This is especially problematic for on-line learning with real users. Two approaches are introduced to tackle this problem. Firstly, to speed up the learning process, two sample-efficient neural networks algorithms: trust region actor-critic with experience replay (TRACER) and episodic natural actor-critic with experience replay (eNACER) are presented. For TRACER, the trust region helps to control the learning step size and avoid catastrophic model changes. For eNACER, the natural gradient identifies the steepest ascent direction in policy space to speed up the convergence. Both models employ off-policy learning with experience replay to improve sample-efficiency. Secondly, to mitigate the cold start issue, a corpus of demonstration data is utilised to pre-train the models prior to on-line reinforcement learning. Combining these two approaches, we demonstrate a practical approach to learn deep RL-based dialogue policies and demonstrate their effectiveness in a task-oriented information seeking domain.", "keyphrases": ["reinforcement learning", "dialogue policy", "action"]} +{"id": "mihalcea-strapparava-2005-making", "title": "Making Computers Laugh: Investigations in Automatic Humor Recognition", "abstract": "Humor is one of the most interesting and puzzling aspects of human behavior. Despite the attention it has received in fields such as philosophy, linguistics, and psychology, there have been only few attempts to create computational models for humor recognition or generation. In this paper, we bring empirical evidence that computational approaches can be successfully applied to the task of humor recognition. Through experiments performed on very large data sets, we show that automatic classification techniques can be effectively used to distinguish between humorous and non-humorous texts, with significant improvements observed over apriori known baselines.", "keyphrases": ["automatic humor recognition", "alliteration", "stylistic feature", "joke", "human-centric feature"]} +{"id": "xue-etal-2014-interlingua", "title": "Not an Interlingua, But Close: Comparison of English AMRs to Chinese and Czech", "abstract": "Abstract Meaning Representations (AMRs) are rooted, directional and labeled graphs that abstract away from morpho-syntactic idiosyncrasies such as word category (verbs and nouns), word order, and function words (determiners, some prepositions). Because these syntactic idiosyncrasies account for many of the cross-lingual differences, it would be interesting to see if this representation can serve, e.g., as a useful, minimally divergent transfer layer in machine translation. To answer this question, we have translated 100 English sentences that have existing AMRs into Chinese and Czech to create AMRs for them. A cross-linguistic comparison of English to Chinese and Czech AMRs reveals both cases where the AMRs for the language pairs align well structurally and cases of linguistic divergence. We found that the level of compatibility of AMR between English and Chinese is higher than between English and Czech. We believe this kind of comparison is beneficial to further refining the annotation standards for each of the three languages and will lead to more compatible annotation guidelines between the languages.", "keyphrases": ["interlingua", "english amr", "chinese", "word order", "non-english language"]} +{"id": "bicici-yuret-2011-instance", "title": "Instance Selection for Machine Translation using Feature Decay Algorithms", "abstract": "We present an empirical study of instance selection techniques for machine translation. In an active learning setting, instance selection minimizes the human effort by identifying the most informative sentences for translation. In a transductive learning setting, selection of training instances relevant to the test set improves the final translation quality. After reviewing the state of the art in the field, we generalize the main ideas in a class of instance selection algorithms that use feature decay. Feature decay algorithms increase diversity of the training set by devaluing features that are already included. We show that the feature decay rate has a very strong effect on the final translation quality whereas the initial feature values, inclusion of higher order features, or sentence length normalizations do not. We evaluate the best instance selection methods using a standard Moses baseline using the whole 1.6 million sentence English-German section of the Europarl corpus. We show that selecting the best 3000 training sentences for a specific test sentence is sufficient to obtain a score within 1 BLEU of the baseline, using 5% of the training data is sufficient to exceed the baseline, and a ~ 2 BLEU improvement over the baseline is possible by optimally selected subset of the training data. In out-of-domain translation, we are able to reduce the training set size to about 7% and achieve a similar performance with the baseline.", "keyphrases": ["machine translation", "feature decay algorithm", "fda", "bic\u0327ici", "similar one"]} +{"id": "redkar-etal-2017-hindi", "title": "Hindi Shabdamitra: A Wordnet based E-Learning Tool for Language Learning and Teaching", "abstract": "In today's technology driven digital era, education domain is undergoing a transformation from traditional approaches to more learner controlled and flexible methods of learning. This transformation has opened the new avenues for interdisciplinary research in the field of educational technology and natural language processing in developing quality digital aids for learning and teaching. The tool presented here - Hindi Shabhadamitra, developed using Hindi Wordnet for Hindi language learning, is one such e-learning tool. It has been developed as a teaching and learning aid suitable for formal school based curriculum and informal setup for self learning users. Besides vocabulary, it also provides word based grammar along with images and pronunciation for better learning and retention. This aid demonstrates that how a rich lexical resource like wordnet can be systematically remodeled for practical usage in the educational domain.", "keyphrases": ["wordnet", "language learning", "teaching"]} +{"id": "guo-etal-2019-star", "title": "Star-Transformer", "abstract": "Although Transformer has achieved great successes on many NLP tasks, its heavy structure with fully-connected attention connections leads to dependencies on large training data. In this paper, we present Star-Transformer, a lightweight alternative by careful sparsification. To reduce model complexity, we replace the fully-connected structure with a star-shaped topology, in which every two non-adjacent nodes are connected through a shared relay node. Thus, complexity is reduced from quadratic to linear, while preserving the capacity to capture both local composition and long-range dependency. The experiments on four tasks (22 datasets) show that Star-Transformer achieved significant improvements against the standard Transformer for the modestly sized datasets.", "keyphrases": ["transformer", "star-shaped topology", "star-transformer"]} +{"id": "cai-etal-2011-language", "title": "Language-Independent Parsing with Empty Elements", "abstract": "We present a simple, language-independent method for integrating recovery of empty elements into syntactic parsing. This method outperforms the best published method we are aware of on English and a recently published method on Chinese.", "keyphrases": ["empty element", "syntactic parsing", "chinese"]} +{"id": "ferret-2017-turning", "title": "Turning Distributional Thesauri into Word Vectors for Synonym Extraction and Expansion", "abstract": "In this article, we propose to investigate a new problem consisting in turning a distributional thesaurus into dense word vectors. We propose more precisely a method for performing such task by associating graph embedding and distributed representation adaptation. We have applied and evaluated it for English nouns at a large scale about its ability to retrieve synonyms. In this context, we have also illustrated the interest of the developed method for three different tasks: the improvement of already existing word embeddings, the fusion of heterogeneous representations and the expansion of synsets.", "keyphrases": ["distributional thesauri", "word vector", "thesaurus"]} +{"id": "mohammad-etal-2009-generating", "title": "Generating High-Coverage Semantic Orientation Lexicons From Overtly Marked Words and a Thesaurus", "abstract": "Sentiment analysis often relies on a semantic orientation lexicon of positive and negative words. A number of approaches have been proposed for creating such lexicons, but they tend to be computationally expensive, and usually rely on significant manual annotation and large corpora. Most of these methods use WordNet. In contrast, we propose a simple approach to generate a high-coverage semantic orientation lexicon, which includes both individual words and multi-word expressions, using only a Roget-like thesaurus and a handful of affixes. Further, the lexicon has properties that support the Polyanna Hypothesis. Using the General Inquirer as gold standard, we show that our lexicon has 14 percentage points more correct entries than the leading WordNet-based high-coverage lexicon (SentiWordNet). In an extrinsic evaluation, we obtain significantly higher performance in determining phrase polarity using our thesaurus-based lexicon than with any other. Additionally, we explore the use of visualization techniques to gain insight into the our algorithm beyond the evaluations mentioned above.", "keyphrases": ["thesaurus", "entry", "sentiment lexicon"]} +{"id": "he-etal-2010-exploring", "title": "Exploring English Lexicon Knowledge for Chinese Sentiment Analysis", "abstract": "This paper presents a weakly-supervised method for Chinese sentiment analysis by incorporating lexical prior knowledge obtained from English sentiment lexicons through machine translation. A mechanism is introduced to incorporate the prior information about polarity bearing words obtained from existing sentiment lexicons into latent Dirichlet allocation (LDA) where sentiment labels are considered as topics. Experiments on Chinese product reviews on mobile phones, digital cameras, MP3 players, and monitors demonstrate the feasibility and effectiveness of the proposed approach and show that the weakly supervised LDA model performs as well as supervised classifiers such as Naive Bayes and Support vector Machines with an average of 83% accuracy achieved over a total of 5484 review documents. Moreover, the LDA model is able to extract highly domain-salient polarity words from text.", "keyphrases": ["sentiment analysis", "lexical prior knowledge", "machine translation", "latent dirichlet allocation"]} +{"id": "duan-etal-2008-searching", "title": "Searching Questions by Identifying Question Topic and Question Focus", "abstract": "This paper is concerned with the problem of question search. In question search, given a question as query, we are to return questions semantically equivalent or close to the queried question. In this paper, we propose to conduct question search by identifying question topic and question focus. More specifically, we first summarize questions in a data structure consisting of question topic and question focus. Then we model question topic and question focus in a language modeling framework for search. We also propose to use the MDLbased tree cut model for identifying question topic and question focus automatically. Experimental results indicate that our approach of identifying question topic and question focus for search significantly outperforms the baseline methods such as Vector Space Model (VSM) and Language Model for Information Retrieval (LMIR).", "keyphrases": ["question topic", "language model", "many study", "yahoo"]} +{"id": "hu-etal-2020-systematic", "title": "A Systematic Assessment of Syntactic Generalization in Neural Language Models", "abstract": "While state-of-the-art neural network models continue to achieve lower perplexity scores on language modeling benchmarks, it remains unknown whether optimizing for broad-coverage predictive performance leads to human-like syntactic knowledge. Furthermore, existing work has not provided a clear picture about the model properties required to produce proper syntactic generalizations. We present a systematic evaluation of the syntactic knowledge of neural language models, testing 20 combinations of model types and data sizes on a set of 34 English-language syntactic test suites. We find substantial differences in syntactic generalization performance by model architecture, with sequential models underperforming other architectures. Factorially manipulating model architecture and training dataset size (1M-40M words), we find that variability in syntactic generalization performance is substantially greater by architecture than by dataset size for the corpora tested in our experiments. Our results also reveal a dissociation between perplexity and syntactic generalization performance.", "keyphrases": ["syntactic generalization", "perplexity", "generalization ability"]} +{"id": "tillmann-2004-unigram", "title": "A Unigram Orientation Model for Statistical Machine Translation", "abstract": "In this paper, we present a unigram segmentation model for statistical machine translation where the segmentation units are blocks: pairs of phrases without internal structure. The segmentation model uses a novel orientation component to handle swapping of neighbor blocks. During training, we collect block unigram counts with orientation: we count how often a block occurs to the left or to the right of some predecessor block. The orientation model is shown to improve translation performance over two models: 1) no block re-ordering is used, and 2) the block swapping is controlled only by a language model. We show experimental results on a standard Arabic-English translation task.", "keyphrases": ["orientation", "statistical machine translation", "swap", "distortion", "several researcher"]} +{"id": "chollampatt-ng-2018-neural", "title": "Neural Quality Estimation of Grammatical Error Correction", "abstract": "Grammatical error correction (GEC) systems deployed in language learning environments are expected to accurately correct errors in learners' writing. However, in practice, they often produce spurious corrections and fail to correct many errors, thereby misleading learners. This necessitates the estimation of the quality of output sentences produced by GEC systems so that instructors can selectively intervene and re-correct the sentences which are poorly corrected by the system and ensure that learners get accurate feedback. We propose the first neural approach to automatic quality estimation of GEC output sentences that does not employ any hand-crafted features. Our system is trained in a supervised manner on learner sentences and corresponding GEC system outputs with quality score labels computed using human-annotated references. Our neural quality estimation models for GEC show significant improvements over a strong feature-based baseline. We also show that a state-of-the-art GEC system can be improved when quality scores are used as features for re-ranking the N-best candidates.", "keyphrases": ["quality estimation", "grammatical error correction", "neural approach"]} +{"id": "gala-etal-2014-model", "title": "A model to predict lexical complexity and to grade words (Un mod\u00e8le pour pr\u00e9dire la complexit\u00e9 lexicale et graduer les mots) [in French]", "abstract": "Analysing lexical complexity is a task that has mainly attracted the attention of psycholinguists and language teachers. More recently, this issue has seen a growing interest in the field of Natural Language Processing (NLP) and, in particular, that of automatic text simplification. The aim of this task is to identify words and structures which may be difficult to understand by a target audience and provide automated tools to simplify these contents. This article focuses on the lexical issue by identifying a set of predictors of the lexical complexity whose efficiency are assessed with a correlational analysis. The best of those variables are integrated into a model able to predict the difficulty of words for learners of French. Mots-cl\u00e9s : complexit\u00e9 lexicale, analyse morphologique, mots gradu\u00e9s, ressources lexicales.", "keyphrases": ["lexical complexity", "french", "learner", "foreign language"]} +{"id": "li-etal-2018-self", "title": "A Self-Attentive Model with Gate Mechanism for Spoken Language Understanding", "abstract": "Spoken Language Understanding (SLU), which typically involves intent determination and slot filling, is a core component of spoken dialogue systems. Joint learning has shown to be effective in SLU given that slot tags and intents are supposed to share knowledge with each other. However, most existing joint learning methods only consider joint learning by sharing parameters on surface level rather than semantic level. In this work, we propose a novel self-attentive model with gate mechanism to fully utilize the semantic correlation between slot and intent. Our model first obtains intent-augmented embeddings based on neural network with self-attention mechanism. And then the intent semantic representation is utilized as the gate for labelling slot tags. The objectives of both tasks are optimized simultaneously via joint learning in an end-to-end way. We conduct experiment on popular benchmark ATIS. The results show that our model achieves state-of-the-art and outperforms other popular methods by a large margin in terms of both intent detection error rate and slot filling F1-score. This paper gives a new perspective for research on SLU.", "keyphrases": ["self-attentive model", "spoken language understanding", "part-of-speech tag"]} +{"id": "daume-iii-campbell-2007-bayesian", "title": "A Bayesian Model for Discovering Typological Implications", "abstract": "A standard form of analysis for linguistic typology is the universal implication. These implications state facts about the range of extant languages, such as \u201cif objects come after verbs, then adjectives come after nouns.\u201d Such implications are typically discovered by painstaking hand analysis over a small sample of languages. We propose a computational model for assisting at this process. Our model is able to discover both well-known implications as well as some novel implications that deserve further study. Moreover, through a careful application of hierarchical analysis, we are able to cope with the well-known sampling problem: languages are not independent.", "keyphrases": ["bayesian model", "implication", "linguistic typology", "daume\u0301"]} +{"id": "lin-etal-2014-cmu", "title": "The CMU Submission for the Shared Task on Language Identification in Code-Switched Data", "abstract": "We describe the CMU submission for the 2014 shared task on language identification in code-switched data. We participated in all four language pairs: Spanish\u2010English, Mandarin\u2010English, Nepali\u2010English, and Modern Standard Arabic\u2010Arabic dialects. After describing our CRF-based baseline system, we discuss three extensions for learning from unlabeled data: semi-supervised learning, word embeddings, and word lists.", "keyphrases": ["cmu submission", "language identification", "code-switched data"]} +{"id": "zhou-etal-2008-context", "title": "Context-Sensitive Convolution Tree Kernel for Pronoun Resolution", "abstract": "This paper proposes a context-sensitive convolution tree kernel for pronoun resolution. It resolves two critical problems in previous researches in two ways. First, given a parse tree and a pair of an anaphor and an antecedent candidate, it implements a dynamic-expansion scheme to automatically determine a proper tree span for pronoun resolution by taking predicateand antecedent competitor-related information into consideration. Second, it applies a context-sensitive convolution tree kernel, which enumerates both context-free and context-sensitive sub-trees by considering their ancestor node paths as their contexts. Evaluation on the ACE 2003 corpus shows that our dynamic-expansion tree span scheme can well cover necessary structured information in the parse tree for pronoun resolution and the context-sensitive tree kernel much outperforms previous tree kernels.", "keyphrases": ["convolution tree kernel", "pronoun resolution", "parse tree structure"]} +{"id": "nguyen-etal-2009-building", "title": "Building a Large Syntactically-Annotated Corpus of Vietnamese", "abstract": "Treebank is an important resource for both research and application of natural language processing. For Vietnamese, we still lack such kind of corpora. This paper presents up-to-date results of a project for Vietnamese treebank construction. Since Vietnamese is an isolating language and has no word delimiter, there are many ambiguities in sentence analysis. We systematically applied a lot of linguistic techniques to handle such ambiguities. Annotators are supported by automatic-labeling tools and a tree-editor tool. Raw texts are extracted from Tuoi Tre (Youth), an online Vietnamese daily newspaper. The current annotation agreement is around 90 percent.", "keyphrases": ["vietnamese", "treebank", "processing", "syllable", "underscore-based representation"]} +{"id": "nguyen-etal-2017-distinguishing", "title": "Distinguishing Antonyms and Synonyms in a Pattern-based Neural Network", "abstract": "Distinguishing between antonyms and synonyms is a key task to achieve high performance in NLP systems. While they are notoriously difficult to distinguish by distributional co-occurrence models, pattern-based methods have proven effective to differentiate between the relations. In this paper, we present a novel neural network model AntSynNET that exploits lexico-syntactic patterns from syntactic parse trees. In addition to the lexical and syntactic information, we successfully integrate the distance between the related words along the syntactic path as a new pattern feature. The results from classification experiments show that AntSynNET improves the performance over prior pattern-based methods.", "keyphrases": ["synonyms", "pattern-based neural network", "syntactic parse tree"]} +{"id": "chen-etal-2020-joint-aspect", "title": "Joint Aspect Extraction and Sentiment Analysis with Directional Graph Convolutional Networks", "abstract": "End-to-end aspect-based sentiment analysis (EASA) consists of two sub-tasks: the first extracts the aspect terms in a sentence and the second predicts the sentiment polarities for such terms. For EASA, compared to pipeline and multi-task approaches, joint aspect extraction and sentiment analysis provides a one-step solution to predict both aspect terms and their sentiment polarities through a single decoding process, which avoid the mismatches in between the results of aspect terms and sentiment polarities, as well as error propagation. Previous studies, especially recent ones, for this task focus on using powerful encoders (e.g., Bi-LSTM and BERT) to model contextual information from the input, with limited efforts paid to using advanced neural architectures (such as attentions and graph convolutional networks) or leveraging extra knowledge (such as syntactic information). To extend such efforts, in this paper, we propose directional graph convolutional networks (D-GCN) to jointly perform aspect extraction and sentiment analysis with encoding syntactic information, where dependency among words are integrated in our model to enhance its ability of representing input sentences and help EASA accordingly. Experimental results on three benchmark datasets demonstrate the effectiveness of our approach, where D-GCN achieves state-of-the-art performance on all datasets.", "keyphrases": ["sentiment analysis", "input sentence", "joint aspect extraction"]} +{"id": "tiedemann-2010-context", "title": "Context Adaptation in Statistical Machine Translation Using Models with Exponentially Decaying Cache", "abstract": "We report results from a domain adaptation task for statistical machine translation (SMT) using cache-based adaptive language and translation models. We apply an exponential decay factor and integrate the cache models in a standard phrase-based SMT decoder. Without the need for any domain-specific resources we obtain a 2.6% relative improvement on average in BLEU scores using our dynamic adaptation procedure.", "keyphrases": ["machine translation", "cache-based translation model", "cache-model"]} +{"id": "martins-etal-2010-turbo", "title": "Turbo Parsers: Dependency Parsing by Approximate Variational Inference", "abstract": "We present a unified view of two state-of-the-art non-projective dependency parsers, both approximate: the loopy belief propagation parser of Smith and Eisner (2008) and the relaxed linear program of Martins et al. (2009). By representing the model assumptions with a factor graph, we shed light on the optimization problems tackled in each method. We also propose a new aggressive online algorithm to learn the model parameters, which makes use of the underlying variational representation. The algorithm does not require a learning rate parameter and provides a single framework for a wide family of convex loss functions, including CRFs and structured SVMs. Experiments show state-of-the-art performance for 14 languages.", "keyphrases": ["factor graph", "turbo parser", "exact inference"]} +{"id": "mann-mccallum-2008-generalized", "title": "Generalized Expectation Criteria for Semi-Supervised Learning of Conditional Random Fields", "abstract": "This paper presents a semi-supervised training method for linear-chain conditional random fields that makes use of labeled features rather than labeled instances. This is accomplished by using generalized expectation criteria to express a preference for parameter settings in which the model\u2019s distribution on unlabeled data matches a target distribution. We induce target conditional probability distributions of labels given features from both annotated feature occurrences in context and adhoc feature majority label assignment. The use of generalized expectation criteria allows for a dramatic reduction in annotation time by shifting from traditional instance-labeling to feature-labeling, and the methods presented outperform traditional CRF training and other semi-supervised methods when limited human effort is available.", "keyphrases": ["expectation", "semi-supervised learning", "penalty"]} +{"id": "guillou-2012-improving", "title": "Improving Pronoun Translation for Statistical Machine Translation", "abstract": "Machine Translation is a well--established field, yet the majority of current systems translate sentences in isolation, losing valuable contextual information from previously translated sentences in the discourse. One important type of contextual information concerns who or what a coreferring pronoun corefers to (i.e., its antecedent). Languages differ significantly in how they achieve coreference, and awareness of antecedents is important in choosing the correct pronoun. Disregarding a pronoun's antecedent in translation can lead to inappropriate coreferring forms in the target text, seriously degrading a reader's ability to understand it. \n \nThis work assesses the extent to which source-language annotation of coreferring pronouns can improve English--Czech Statistical Machine Translation (SMT). As with previous attempts that use this method, the results show little improvement. This paper attempts to explain why and to provide insight into the factors affecting performance.", "keyphrases": ["pronoun translation", "anaphora resolution", "imperfect coreference"]} +{"id": "huang-etal-2011-nonparametric", "title": "Nonparametric Bayesian Machine Transliteration with Synchronous Adaptor Grammars", "abstract": "Machine transliteration is defined as automatic phonetic translation of names across languages. In this paper, we propose synchronous adaptor grammar, a novel nonpara-metric Bayesian learning approach, for machine transliteration. This model provides a general framework without heuristic or restriction to automatically learn syllable equivalents between languages. The proposed model outperforms the state-of-the-art EM-based model in the English to Chinese transliteration task.", "keyphrases": ["synchronous adaptor grammar", "grapheme-based transliteration", "nonparametric bayesian"]} +{"id": "salazar-etal-2020-masked", "title": "Masked Language Model Scoring", "abstract": "Pretrained masked language models (MLMs) require finetuning for most NLP tasks. Instead, we evaluate MLMs out of the box via their pseudo-log-likelihood scores (PLLs), which are computed by masking tokens one by one. We show that PLLs outperform scores from autoregressive language models like GPT-2 in a variety of tasks. By rescoring ASR and NMT hypotheses, RoBERTa reduces an end-to-end LibriSpeech model's WER by 30% relative and adds up to +1.7 BLEU on state-of-the-art baselines for low-resource translation pairs, with further gains from domain adaptation. We attribute this success to PLL's unsupervised expression of linguistic acceptability without a left-to-right bias, greatly improving on scores from GPT-2 (+10 points on island effects, NPI licensing in BLiMP). One can finetune MLMs to give scores without masking, enabling computation in a single inference pass. In all, PLLs and their associated pseudo-perplexities (PPPLs) enable plug-and-play use of the growing number of pretrained MLMs; e.g., we use a single cross-lingual model to rescore translations in multiple languages. We release our library for language model scoring at .", "keyphrases": ["language model", "scoring", "mlm"]} +{"id": "iyyer-etal-2017-search", "title": "Search-based Neural Structured Learning for Sequential Question Answering", "abstract": "Recent work in semantic parsing for question answering has focused on long and complicated questions, many of which would seem unnatural if asked in a normal conversation between two humans. In an effort to explore a conversational QA setting, we present a more realistic task: answering sequences of simple but inter-related questions. We collect a dataset of 6,066 question sequences that inquire about semi-structured tables from Wikipedia, with 17,553 question-answer pairs in total. To solve this sequential question answering task, we propose a novel dynamic neural semantic parsing framework trained using a weakly supervised reward-guided search. Our model effectively leverages the sequential context to outperform state-of-the-art QA systems that are designed to answer highly complex questions.", "keyphrases": ["sequential question", "semantic parsing", "query"]} +{"id": "esteve-etal-2010-epac", "title": "The EPAC Corpus: Manual and Automatic Annotations of Conversational Speech in French Broadcast News", "abstract": "This paper presents the EPAC corpus which is composed by a set of 100 hours of conversational speech manually transcribed and by the outputs of automatic tools (automatic segmentation, transcription, POS tagging, etc.) applied on the entire French ESTER 1 audio corpus: this concerns about 1700 hours of audio recordings from radiophonic shows. This corpus was built during the EPAC project funded by the French Research Agency (ANR) from 2007 to 2010. This corpus increases significantly the amount of French manually transcribed audio recordings easily available and it is now included as a part of the ESTER 1 corpus in the ELRA catalog without additional cost. By providing a large set of automatic outputs of speech processing tools, the EPAC corpus should be useful to researchers who want to work on such data without having to develop and deal with such tools. These automatic annotations are various: segmentation and speaker diarization, one-best hypotheses from the LIUM automatic speech recognition system with confidence measures, but also word-lattices and confusion networks, named entities, part-of-speech tags, chunks, etc. The 100 hours of speech manually transcribed were split into three data sets in order to get an official training corpus, an official development corpus and an official test corpus. These data sets were used to develop and to evaluate some automatic tools which have been used to process the 1700 hours of audio recording. For example, on the EPAC test data set our ASR system yields a word error rate equals to 17.25%.", "keyphrases": ["epac corpus", "conversational speech", "hour"]} +{"id": "liang-etal-2017-neural", "title": "Neural Symbolic Machines: Learning Semantic Parsers on Freebase with Weak Supervision", "abstract": "Harnessing the statistical power of neural networks to perform language understanding and symbolic reasoning is difficult, when it requires executing efficient discrete operations against a large knowledge-base. In this work, we introduce a Neural Symbolic Machine, which contains (a) a neural \u201cprogrammer\u201d, i.e., a sequence-to-sequence model that maps language utterances to programs and utilizes a key-variable memory to handle compositionality (b) a symbolic \u201ccomputer\u201d, i.e., a Lisp interpreter that performs program execution, and helps find good programs by pruning the search space. We apply REINFORCE to directly optimize the task reward of this structured prediction problem. To train with weak supervision and improve the stability of REINFORCE, we augment it with an iterative maximum-likelihood training process. NSM outperforms the state-of-the-art on the WebQuestionsSP dataset when trained from question-answer pairs only, without requiring any feature engineering or domain-specific knowledge.", "keyphrases": ["weak supervision", "neural symbolic machines", "semantic parsing", "natural language question", "knowledge graph"]} +{"id": "kong-zhou-2010-tree", "title": "A Tree Kernel-Based Unified Framework for Chinese Zero Anaphora Resolution", "abstract": "This paper proposes a unified framework for zero anaphora resolution, which can be divided into three sub-tasks: zero anaphor detection, anaphoricity determination and antecedent identification. In particular, all the three sub-tasks are addressed using tree kernel-based methods with appropriate syntactic parse tree structures. Experimental results on a Chinese zero anaphora corpus show that the proposed tree kernel-based methods significantly outperform the feature-based ones. This indicates the critical role of the structural information in zero anaphora resolution and the necessity of tree kernel-based methods in modeling such structural information. To our best knowledge, this is the first systematic work dealing with all the three sub-tasks in Chinese zero anaphora resolution via a unified framework. Moreover, we release a Chinese zero anaphora corpus of 100 documents, which adds a layer of annotation to the manually-parsed sentences in the Chinese Treebank (CTB) 6.0.", "keyphrases": ["unified framework", "anaphora resolution", "pronoun", "azp resolution"]} +{"id": "bohnet-kuhn-2012-best", "title": "The Best of Both Worlds \u2013 A Graph-based Completion Model for Transition-based Parsers", "abstract": "Transition-based dependency parsers are often forced to make attachment decisions at a point when only partial information about the relevant graph configuration is available. In this paper, we describe a model that takes into account complete structures as they become available to rescore the elements of a beam, combining the advantages of transition-based and graph-based approaches. We also propose an efficient implementation that allows for the use of sophisticated features and show that the completion model leads to a substantial increase in accuracy. We apply the new transition-based parser on typologically different languages such as English, Chinese, Czech, and German and report competitive labeled and unlabeled attachment scores.", "keyphrases": ["completion model", "transition-based parser", "search"]} +{"id": "liu-zhang-2012-unsupervised", "title": "Unsupervised Domain Adaptation for Joint Segmentation and POS-Tagging", "abstract": "We report an empirical investigation on type-supervised domain adaptation for joint Chinese word segmentation and POS-tagging, making use of domainspecific tag dictionaries and only unlabeled target domain data to improve target-domain accuracies, given a set of annotated source domain sentences. Previous work on POS-tagging of other languages showed that type-supervision can be a competitive alternative to tokensupervision, while semi-supervised techniques such as label propagation are important to the effectiveness of typesupervision. We report similar findings using a novel approach for joint Chinese segmentation and POS-tagging, under a cross-domain setting. With the help of unlabeled sentences and a lexicon of 3,000 words, we obtain 33% error reduction in target-domain tagging. In addition, combined type- and token-supervision can lead to improved cost-effectiveness.", "keyphrases": ["pos-tagging", "unsupervised domain adaptation", "cws", "character clustering", "newswire"]} +{"id": "xu-etal-2013-gathering", "title": "Gathering and Generating Paraphrases from Twitter with Application to Normalization", "abstract": "We present a new and unique paraphrase resource, which contains meaningpreserving transformations between informal user-generated text. Sentential paraphrases are extracted from a comparable corpus of temporally and topically related messages on Twitter which often express semantically identical information through distinct surface forms. We demonstrate the utility of this new resource on the task of paraphrasing and normalizing noisy text, showing improvement over several state-of-the-art paraphrase and normalization systems 1 .", "keyphrases": ["twitter", "user-generated text", "text normalization", "paraphrase research"]} +{"id": "brooke-hirst-2012-robust", "title": "Robust, Lexicalized Native Language Identification", "abstract": "Previous approaches to the task of native language identification (Koppel et al., 2005) have been limited to small, within-corpus evaluations. Because these are restrictive and unreliable, we apply cross-corpus evaluation to the task. We demonstrate the efficacy of lexical features, which had previously been avoided due to the within-corpus topic confounds, and provide a detailed evaluation of various options, including a simple bias adaptation technique and a number of classifier algorithms. Using a new web corpus as a training set, we reach high classification accuracy for a 7-language task, performance which is robust across two independent test sets. Although we show that even higher accuracy is possible using crossvalidation, we present strong evidence calling into question the validity of cross-validation evaluation using the standard dataset.", "keyphrases": ["native language identification", "cross-corpus evaluation", "nli"]} +{"id": "pettersson-etal-2013-normalisation", "title": "Normalisation of Historical Text Using Context-Sensitive Weighted Levenshtein Distance and Compound Splitting", "abstract": "Natural language processing for historical text imposes a variety of challenges, such as to deal with a high degree of spelling variation. Furthermore, there is often not enough linguistically annotated data available for training part-of-speech taggers and other tools aimed at handling this specific kind of text. In this paper we present a Levenshtein-based approach to normalisation of historical text to a modern spelling. This enables us to apply standard NLP tools trained on contemporary corpora on the normalised version of the historical input text. In its basic version, no annotated historical data is needed, since the only data used for the Levenshtein comparisons are a contemporary dictionary or corpus. In addition, a (small) corpus of manually normalised historical text can optionally be included to learn normalisation for frequent words and weights for edit operations in a supervised fashion, which improves precision. We show that this method is successful both in terms of normalisation accuracy, and by the performance of a standard modern tagger applied to the historical text. We also compare our method to a previously implemented approach using a set of hand-written normalisation rules, and we see that the Levenshtein-based approach clearly outperforms the hand-crafted rules. Furthermore, the experiments were carried out on Swedish data with promising results and we believe that our method could be successfully applicable to analyse historical text for other languages, including those with less resources.", "keyphrases": ["historical text", "compound splitting", "spelling", "normalisation", "edit distance calculation"]} +{"id": "ruder-etal-2016-hierarchical", "title": "A Hierarchical Model of Reviews for Aspect-based Sentiment Analysis", "abstract": "Opinion mining from customer reviews has become pervasive in recent years. Sentences in reviews, however, are usually classified independently, even though they form part of a review's argumentative structure. Intuitively, sentences in a review build and elaborate upon each other; knowledge of the review structure and sentential context should thus inform the classification of each sentence. We demonstrate this hypothesis for the task of aspect-based sentiment analysis by modeling the interdependencies of sentences in a review with a hierarchical bidirectional LSTM. We show that the hierarchical model outperforms two non-hierarchical baselines, obtains results competitive with the state-of-the-art, and outperforms the state-of-the-art on five multilingual, multi-domain datasets without any hand-engineered features or external resources.", "keyphrases": ["hierarchical model", "sentiment analysis", "aspect category"]} +{"id": "nerima-wehrli-2008-generating", "title": "Generating Bilingual Dictionaries by Transitivity", "abstract": "Recently the LATL has undertaken the development of a multilingual translation system based on a symbolic parsing technology and on a transfer-based translation model. A crucial component of the system is the lexical database, notably the bilingual dictionaries containing the information for the lexical transfer from one language to another. As the number of necessary bilingual dictionaries is a quadratic function of the number of languages considered, we will face the problem of getting a large number of dictionaries. In this paper we discuss a solution to derive a bilingual dictionary by transitivity using existing ones and to check the generated translations in a parallel corpus. Our first experiments concerns the generation of two bilingual dictionaries and the quality of the entries are very promising. The number of generated entries could however be improved and we conclude the paper with the possible ways we plan to explore.", "keyphrases": ["bilingual dictionary", "transitivity", "database"]} +{"id": "peshterliev-etal-2019-active", "title": "Active Learning for New Domains in Natural Language Understanding", "abstract": "We explore active learning (AL) for improving the accuracy of new domains in a natural language understanding (NLU) system. We propose an algorithm called Majority-CRF that uses an ensemble of classification models to guide the selection of relevant utterances, as well as a sequence labeling model to help prioritize informative examples. Experiments with three domains show that Majority-CRF achieves 6.6%-9% relative error rate reduction compared to random sampling with the same annotation budget, and statistically significant improvements compared to other AL approaches. Additionally, case studies with human-in-the-loop AL on six new domains show 4.6%-9% improvement on an existing NLU system.", "keyphrases": ["new domain", "natural language understanding", "active learning"]} +{"id": "salaberri-etal-2015-ixagroupehuspaceeval", "title": "IXAGroupEHUSpaceEval: (X-Space) A WordNet-based approach towards the Automatic Recognition of Spatial Information following the ISO-Space Annotation Scheme", "abstract": "This paper presents X-Space, a system that follows the ISO-Space annotation scheme in order to capture spatial information as well as our contribution to the SemEval-2015 task 8 (SpaceEval). Our system is the only participant system that reported results for all three evaluation configurations in SpaceEval.", "keyphrases": ["spatial information", "iso-space annotation scheme", "wordnet"]} +{"id": "gu-etal-2018-universal", "title": "Universal Neural Machine Translation for Extremely Low Resource Languages", "abstract": "In this paper, we propose a new universal machine translation approach focusing on languages with a limited amount of parallel data. Our proposed approach utilizes a transfer-learning approach to share lexical and sentence level representations across multiple source languages into one target language. The lexical part is shared through a Universal Lexical Representation to support multi-lingual word-level sharing. The sentence-level sharing is represented by a model of experts from all source languages that share the source encoders with all other languages. This enables the low-resource language to utilize the lexical and sentence representations of the higher resource languages. Our approach is able to achieve 23 BLEU on Romanian-English WMT2016 using a tiny parallel corpus of 6k sentences, compared to the 18 BLEU of strong baseline system which uses multi-lingual training and back-translation. Furthermore, we show that the proposed approach can achieve almost 20 BLEU on the same dataset through fine-tuning a pre-trained multi-lingual system in a zero-shot setting.", "keyphrases": ["machine translation", "resource language", "transfer learning"]} +{"id": "pasupat-etal-2018-mapping", "title": "Mapping natural language commands to web elements", "abstract": "The web provides a rich, open-domain environment with textual, structural, and spatial properties. We propose a new task for grounding language in this environment: given a natural language command (e.g., \u201cclick on the second article\u201d), choose the correct element on the web page (e.g., a hyperlink or text box). We collected a dataset of over 50,000 commands that capture various phenomena such as functional references (e.g. \u201cfind who made this site\u201d), relational reasoning (e.g. \u201carticle by john\u201d), and visual reasoning (e.g. \u201ctop-most article\u201d). We also implemented and analyzed three baseline models that capture different phenomena present in the dataset.", "keyphrases": ["natural language command", "hyperlink", "mapping"]} +{"id": "greene-etal-2010-automatic", "title": "Automatic Analysis of Rhythmic Poetry with Applications to Generation and Translation", "abstract": "We employ statistical methods to analyze, generate, and translate rhythmic poetry. We first apply unsupervised learning to reveal word-stress patterns in a corpus of raw poetry. We then use these word-stress patterns, in addition to rhyme and discourse models, to generate English love poetry. Finally, we translate Italian poetry into English, choosing target realizations that conform to desired rhythmic patterns.", "keyphrases": ["rhythmic poetry", "statistical method", "rhyme scheme", "finite-state transducer"]} +{"id": "zhou-etal-2019-learning", "title": "Learning to Discriminate Perturbations for Blocking Adversarial Attacks in Text Classification", "abstract": "Adversarial attacks against machine learning models have threatened various real-world applications such as spam filtering and sentiment analysis. In this paper, we propose a novel framework, learning to discriminate perturbations (DISP), to identify and adjust malicious perturbations, thereby blocking adversarial attacks for text classification models. To identify adversarial attacks, a perturbation discriminator validates how likely a token in the text is perturbed and provides a set of potential perturbations. For each potential perturbation, an embedding estimator learns to restore the embedding of the original word based on the context and a replacement token is chosen based on approximate kNN search. DISP can block adversarial attacks for any NLP model without modifying the model structure or training procedure. Extensive experiments on two benchmark datasets demonstrate that DISP significantly outperforms baseline methods in blocking adversarial attacks for text classification. In addition, in-depth analysis shows the robustness of DISP across different situations.", "keyphrases": ["perturbation", "attack", "text classification", "adversarial input"]} +{"id": "chen-etal-2010-twin", "title": "A Twin-Candidate Based Approach for Event Pronoun Resolution using Composite Kernel", "abstract": "Event Anaphora Resolution is an important task for cascaded event template extraction and other NLP study. In this paper, we provide a first systematic study of resolving pronouns to their event verb antecedents for general purpose. First, we explore various positional, lexical and syntactic features useful for the event pronoun resolution. We further explore tree kernel to model structural information embedded in syntactic parses. A composite kernel is then used to combine the above diverse information. In addition, we employed a twin-candidate based preferences learning model to capture the pair wise candidates' preference knowledge. Besides we also look into the incorporation of the negative training instances with anaphoric pronouns whose antecedents are not verbs. Although these negative training instances are not used in previous study on anaphora resolution, our study shows that they are very useful for the final resolution through random sampling strategy. Our experiments demonstrate that it's meaningful to keep certain training data as development data to help SVM select a more accurate hyper plane which provides significant improvement over the default setting with all training data.", "keyphrases": ["event pronoun resolution", "composite kernel", "antecedent"]} +{"id": "dai-etal-2019-style", "title": "Style Transformer: Unpaired Text Style Transfer without Disentangled Latent Representation", "abstract": "Disentangling the content and style in the latent space is prevalent in unpaired text style transfer. However, two major issues exist in most of the current neural models. 1) It is difficult to completely strip the style information from the semantics for a sentence. 2) The recurrent neural network (RNN) based encoder and decoder, mediated by the latent representation, cannot well deal with the issue of the long-term dependency, resulting in poor preservation of non-stylistic semantic content. In this paper, we propose the Style Transformer, which makes no assumption about the latent representation of source sentence and equips the power of attention mechanism in Transformer to achieve better style transfer and better content preservation.", "keyphrases": ["latent representation", "style transformer", "output sentence"]} +{"id": "szpektor-etal-2008-contextual", "title": "Contextual Preferences", "abstract": "The validity of semantic inferences depends on the contexts in which they are applied. We propose a generic framework for handling contextual considerations within applied inference, termed Contextual Preferences. This framework defines the various context-aware components needed for inference and their relationships. Contextual preferences extend and generalize previous notions, such as selectional preferences, while experiments show that the extended framework allows improving inference quality on real application data.", "keyphrases": ["generic framework", "contextual preferences", "context matching"]} +{"id": "chen-etal-2019-semantically", "title": "Semantically Conditioned Dialog Response Generation via Hierarchical Disentangled Self-Attention", "abstract": "Semantically controlled neural response generation on limited-domain has achieved great performance. However, moving towards multi-domain large-scale scenarios are shown to be difficult because the possible combinations of semantic inputs grow exponentially with the number of domains. To alleviate such scalability issue, we exploit the structure of dialog acts to build a multi-layer hierarchical graph, where each act is represented as a root-to-leaf route on the graph. Then, we incorporate such graph structure prior as an inductive bias to build a hierarchical disentangled self-attention network, where we disentangle attention heads to model designated nodes on the dialog act graph. By activating different (disentangled) heads at each layer, combinatorially many dialog act semantics can be modeled to control the neural response generation. On the large-scale Multi-Domain-WOZ dataset, our model can yield a significant improvement over the baselines on various automatic and human evaluation metrics.", "keyphrases": ["response generation", "hierarchical disentangled self-attention", "dialog act"]} +{"id": "sajjad-etal-2020-arabench", "title": "AraBench: Benchmarking Dialectal Arabic-English Machine Translation", "abstract": "Low-resource machine translation suffers from the scarcity of training data and the unavailability of standard evaluation sets. While a number of research efforts target the former, the unavailability of evaluation benchmarks remain a major hindrance in tracking the progress in low-resource machine translation. In this paper, we introduce AraBench, an evaluation suite for dialectal Arabic to English machine translation. Compared to Modern Standard Arabic, Arabic dialects are challenging due to their spoken nature, non-standard orthography, and a large variation in dialectness. To this end, we pool together already available Dialectal Arabic-English resources and additionally build novel test sets. AraBench offers 4 coarse, 15 fine-grained and 25 city-level dialect categories, belonging to diverse genres, such as media, chat, religion and travel with varying level of dialectness. We report strong baselines using several training settings: fine-tuning, back-translation and data augmentation. The evaluation suite opens a wide range of research frontiers to push efforts in low-resource machine translation, particularly Arabic dialect translation. The evaluation suite and the dialectal system are publicly available for research purposes.", "keyphrases": ["machine translation", "evaluation benchmark", "dialectal arabic"]} +{"id": "parikh-etal-2020-totto", "title": "ToTTo: A Controlled Table-To-Text Generation Dataset", "abstract": "We present ToTTo, an open-domain English table-to-text dataset with over 120,000 training examples that proposes a controlled generation task: given a Wikipedia table and a set of highlighted table cells, produce a one-sentence description. To obtain generated targets that are natural but also faithful to the source table, we introduce a dataset construction process where annotators directly revise existing candidate sentences from Wikipedia. We present systematic analyses of our dataset and annotation process as well as results achieved by several state-of-the-art baselines. While usually fluent, existing methods often hallucinate phrases that are not supported by the table, suggesting that this dataset can serve as a useful research benchmark for high-precision conditional text generation.", "keyphrases": ["table-to-text generation dataset", "wikipedia table", "table cell", "annotator", "totto"]} +{"id": "lee-etal-2019-countering", "title": "Countering Language Drift via Visual Grounding", "abstract": "Emergent multi-agent communication protocols are very different from natural language and not easily interpretable by humans. We find that agents that were initially pretrained to produce natural language can also experience detrimental language drift: when a non-linguistic reward is used in a goal-based task, e.g. some scalar success metric, the communication protocol may easily and radically diverge from natural language. We recast translation as a multi-agent communication game and examine auxiliary training constraints for their effectiveness in mitigating language drift. We show that a combination of syntactic (language model likelihood) and semantic (visual grounding) constraints gives the best communication performance, allowing pre-trained agents to retain English syntax while learning to accurately convey the intended meaning.", "keyphrases": ["language drift", "visual grounding", "agent"]} +{"id": "haghighi-klein-2006-prototype", "title": "Prototype-Driven Learning for Sequence Models", "abstract": "We investigate prototype-driven learning for primarily unsupervised sequence modeling. Prior knowledge is specified declaratively, by providing a few canonical examples of each target annotation label. This sparse prototype information is then propagated across a corpus using distributional similarity features in a log-linear generative model. On part-of-speech induction in English and Chinese, as well as an information extraction task, prototype features provide substantial error rate reductions over competitive baselines and outperform previous work. For example, we can achieve an English part-of-speech tagging accuracy of 80.5% using only three examples of each tag and no dictionary constraints. We also compare to semi-supervised learning and discuss the system's error trends.", "keyphrases": ["prior knowledge", "prototype", "prototype-driven learning"]} +{"id": "garcia-etal-2019-comparison", "title": "A comparison of statistical association measures for identifying dependency-based collocations in various languages.", "abstract": "This paper presents an exploration of different statistical association measures to automatically identify collocations from corpora in English, Portuguese, and Spanish. To evaluate the impact of the association metrics we manually annotated corpora with three different syntactic patterns of collocations (adjective-noun, verb-object and nominal compounds). We took advantage of the PARSEME 1.1 Shared Task corpora by selecting a subset of 155k tokens in the three referred languages, in which we annotated 1,526 collocations with the corresponding Lexical Functions according to the Meaning-Text Theory. Using the resulting gold-standard, we have carried out a comparison between frequency data and several well-known association measures, both symmetric and asymmetric. The results show that the combination of dependency triples with raw frequency information is as powerful as the best association measures in most syntactic patterns and languages. Furthermore, and despite the asymmetric behaviour of collocations, directional approaches perform worse than the symmetric ones in the extraction of these phraseological combinations.", "keyphrases": ["statistical association measure", "collocation", "portuguese"]} +{"id": "zaidan-callison-burch-2014-arabic", "title": "Arabic Dialect Identification", "abstract": "The written form of the Arabic language, Modern Standard Arabic (MSA), differs in a non-trivial manner from the various spoken regional dialects of Arabic\u2014the true \u201cnative languages\u201d of Arabic speakers. Those dialects, in turn, differ quite a bit from each other. However, due to MSA's prevalence in written form, almost all Arabic data sets have predominantly MSA content. In this article, we describe the creation of a novel Arabic resource with dialect annotations. We have created a large monolingual data set rich in dialectal Arabic content called the Arabic On-line Commentary Data set (Zaidan and Callison-Burch 2011). We describe our annotation effort to identify the dialect level (and dialect itself) in each of more than 100,000 sentences from the data set by crowdsourcing the annotation task, and delve into interesting annotator behaviors (like over-identification of one's own dialect). Using this new annotated data set, we consider the task of Arabic dialect identification: Given the word sequence forming an Arabic sentence, determine the variety of Arabic in which it is written. We use the data to train and evaluate automatic classifiers for dialect identification, and establish that classifiers using dialectal data significantly and dramatically outperform baselines that use MSA-only data, achieving near-human classification accuracy. Finally, we apply our classifiers to discover dialectical data from a large Web crawl consisting of 3.5 million pages mined from on-line Arabic newspapers.", "keyphrases": ["arabic resource", "zaidan", "arabic dialect identification", "language variation"]} +{"id": "bhagat-etal-2007-ledir", "title": "LEDIR: An Unsupervised Algorithm for Learning Directionality of Inference Rules", "abstract": "Semantic inference is a core component of many natural language applications. In response, several researchers have developed algorithms for automatically learning inference rules from textual corpora. However, these rules are often either imprecise or underspecified in directionality. In this paper we propose an algorithm called LEDIR that filters incorrect inference rules and identifies the directionality of correct ones. Based on an extension to Harris\u2019s distributional hypothesis, we use selectional preferences to gather evidence of inference directionality and plausibility. Experiments show empirical evidence that our approach can classify inference rules significantly better than several baselines.", "keyphrases": ["directionality", "inference rule", "selectional preference", "ledir", "distributional similarity method"]} +{"id": "kondrak-2009-identification", "title": "Identification of Cognates and Recurrent Sound Correspondences in Word Lists", "abstract": "Identification of cognates and recurrent sound corresponde nces is a component of two principal tasks of historical linguistics: demonstrat ing the relatedness of languages, and reconstructing the histories of language families. We prop ose methods for detecting and quan- tifying three characteristics of cognates: recurrent soun d correspondences, phonetic similarity, and semantic affinity. The ultimate goal is to identify cogna tes and correspondences directly from lists of words representing pairs of languages that areknown to be related. The proposed solutions are language independent, and are evaluated agai nst authentic linguistic data. The results of evaluation experiments involving the Indo-Euro pean, Algonquian, and Totonac lan- guage families indicate that our methods are more accurate t han comparable programs, and achieve high precision and recall on various test sets. The r", "keyphrases": ["cognate", "phonetic similarity", "semantic affinity", "identification"]} +{"id": "chen-he-2013-automated", "title": "Automated Essay Scoring by Maximizing Human-Machine Agreement", "abstract": "Previous approaches for automated essay scoring (AES) learn a rating model by minimizing either the classification, regression, or pairwise classification loss, depending on the learning algorithm used. In this paper, we argue that the current AES systems can be further improved by taking into account the agreement between human and machine raters. To this end, we propose a rankbased approach that utilizes listwise learning to rank algorithms for learning a rating model, where the agreement between the human and machine raters is directly incorporated into the loss function. Various linguistic and statistical features are utilized to facilitate the learning algorithms. Experiments on the publicly available English essay dataset, Automated Student Assessment Prize (ASAP), show that our proposed approach outperforms the state-of-the-art algorithms, and achieves performance comparable to professional human raters, which suggests the effectiveness of our proposed method for automated essay scoring.", "keyphrases": ["essay", "machine rater", "ranking problem"]} +{"id": "nicolai-etal-2015-inflection", "title": "Inflection Generation as Discriminative String Transduction", "abstract": "We approach the task of morphological inflection generation as discriminative string transduction. Our supervised system learns to generate word-forms from lemmas accompanied by morphological tags, and refines them by referring to the other forms within a paradigm. Results of experiments on six diverse languages with varying amounts of training data demonstrate that our approach improves the state of the art in terms of predicting inflected word-forms.", "keyphrases": ["discriminative string transduction", "inflection generation", "word form", "semi-supervised learning"]} +{"id": "rink-harabagiu-2011-generative", "title": "A generative model for unsupervised discovery of relations and argument classes from clinical texts", "abstract": "This paper presents a generative model for the automatic discovery of relations between entities in electronic medical records. The model discovers relation instances and their types by determining which context tokens express the relation. Additionally, the valid semantic classes for each type of relation are determined. We show that the model produces clusters of relation trigger words which better correspond with manually annotated relations than several existing clustering techniques. The discovered relations reveal some of the implicit semantic structure present in patient records.", "keyphrases": ["generative model", "discovery", "clinical text"]} +{"id": "zesch-melamud-2014-automatic", "title": "Automatic Generation of Challenging Distractors Using Context-Sensitive Inference Rules", "abstract": "Automatically generating challenging distractors for multiple-choice gap-fill items is still an unsolved problem. We propose to employ context-sensitive lexical inference rules in order to generate distractors that are semantically similar to the gap target word in some sense, but not in the particular sense induced by the gap-fill context. We hypothesize that such distractors should be particularly hard to distinguish from the correct answer. We focus on verbs as they are especially difficult to master for language learners and find that our approach is quite effective. In our test set of 20 items, our proposed method decreases the number of invalid distractors in 90% of the cases, and fully eliminates all of them in 65%. Further analysis on that dataset does not support our hypothesis regarding item difficulty as measured by average error rate of language learners. We conjecture that this may be due to limitations in our evaluation setting, which we plan to address in future work.", "keyphrases": ["distractor", "inference rule", "unsolved problem", "target word", "gap-fill context"]} +{"id": "kartsaklis-sadrzadeh-2016-distributional", "title": "Distributional Inclusion Hypothesis for Tensor-based Composition", "abstract": "According to the distributional inclusion hypothesis, entailment between words can be measured via the feature inclusions of their distributional vectors. In recent work, we showed how this hypothesis can be extended from words to phrases and sentences in the setting of compositional distributional semantics. This paper focuses on inclusion properties of tensors; its main contribution is a theoretical and experimental analysis of how feature inclusion works in different concrete models of verb tensors. We present results for relational, Frobenius, projective, and holistic methods and compare them to the simple vector addition, multiplication, min, and max models. The degrees of entailment thus obtained are evaluated via a variety of existing word-based measures, such as Weed's and Clarke's, KL-divergence, APinc, balAPinc, and two of our previously proposed metrics at the phrase/sentence level. We perform experiments on three entailment datasets, investigating which version of tensor-based composition achieves the highest performance when combined with the sentence-level measures.", "keyphrases": ["tensor-based composition", "entailment", "distributional inclusion hypothesis"]} +{"id": "hill-etal-2016-learning", "title": "Learning Distributed Representations of Sentences from Unlabelled Data", "abstract": "Unsupervised methods for learning distributed representations of words are ubiquitous in today's NLP research, but far less is known about the best ways to learn distributed phrase or sentence representations from unlabelled data. This paper is a systematic comparison of models that learn such representations. We find that the optimal approach depends critically on the intended application. Deeper, more complex models are preferable for representations to be used in supervised systems, but shallow log-linear models work best for building representation spaces that can be decoded with simple spatial distance metrics. We also propose two new unsupervised representation-learning objectives designed to optimise the trade-off between training time, domain portability and performance.", "keyphrases": ["unlabelled data", "sentence representation", "sequential denoising autoencoder", "bag-of-word", "hypothesis"]} +{"id": "gollapalli-li-2015-emnlp", "title": "EMNLP versus ACL: Analyzing NLP research over time", "abstract": "The conferences ACL (Association for Computational Linguistics) and EMNLP (Empirical Methods in Natural Language Processing) rank among the premier venues that track the research developments in Natural Language Processing and Computational Linguistics. In this paper, we present a study on the research papers of approximately two decades from these two NLP conferences. We apply keyphrase extraction and corpus analysis tools to the proceedings from these venues and propose probabilistic and vector-based representations to represent the topics published in a venue for a given year. Next, similarity metrics are studied over pairs of venue representations to capture the progress of the two venues with respect to each other and over time.", "keyphrases": ["venue", "keyphrase extraction", "emnlp"]} +{"id": "zhang-etal-2019-improving", "title": "Improving Deep Transformer with Depth-Scaled Initialization and Merged Attention", "abstract": "The general trend in NLP is towards increasing model capacity and performance via deeper neural networks. However, simply stacking more layers of the popular Transformer architecture for machine translation results in poor convergence and high computational overhead. Our empirical analysis suggests that convergence is poor due to gradient vanishing caused by the interaction between residual connection and layer normalization. We propose depth-scaled initialization (DS-Init), which decreases parameter variance at the initialization stage, and reduces output variance of residual connections so as to ease gradient back-propagation through normalization layers. To address computational cost, we propose a merged attention sublayer (MAtt) which combines a simplified average-based self-attention sublayer and the encoder-decoder attention sublayer on the decoder side. Results on WMT and IWSLT translation tasks with five translation directions show that deep Transformers with DS-Init and MAtt can substantially outperform their base counterpart in terms of BLEU (+1.1 BLEU on average for 12-layer models), while matching the decoding speed of the baseline model thanks to the efficiency improvements of MAtt. Source code for reproduction will be released soon.", "keyphrases": ["deep transformer", "initialization", "attention sublayer", "deep model"]} +{"id": "cettolo-etal-2015-iwslt", "title": "The IWSLT 2015 Evaluation Campaign", "abstract": "The IWSLT 2015 Evaluation Campaign featured three tracks: automatic speech recognition (ASR), spoken language translation (SLT), and machine translation (MT). For ASR we offered two tasks, on English and German, while for SLT and MT a number of tasks were proposed, involving English, German, French, Chinese, Czech, Thai, and Viet-namese. All tracks involved the transcription or translation of TED talks, either made available by the of\ufb01cial TED web-site or by other TEDx events. A notable change with respect to previous evaluations was the use of unsegmented speech in the SLT track in order to better \ufb01t a real application scenario. Thus, from one side participants were encouraged to develop advanced methods for sentence segmentation, from the other side organisers had to cope with the automatic evaluation of SLT outputs not matching the sentence-wise arrangement of the human references. A new evaluation server was also developed to allow participants to score their MT and SLT systems on selected dev and test sets. This year 16 teams participated in the evaluation, for a total of 63 primary submissions. All runs were evaluated with objective metrics, and submissions for two of the MT translation tracks were also evaluated with human post-editing.", "keyphrases": ["iwslt", "evaluation campaign", "ted talk", "participant", "cascade system"]} +{"id": "georgi-etal-2012-measuring", "title": "Measuring the Divergence of Dependency Structures Cross-Linguistically to Improve Syntactic Projection Algorithms", "abstract": "Syntactic parses can provide valuable information for many NLP tasks, such as machine translation, semantic analysis, etc. However, most of the world's languages do not have large amounts of syntactically annotated corpora available for building parsers. Syntactic projection techniques attempt to address this issue by using parallel corpora between resource-poor and resource-rich languages, bootstrapping the resource-poor language with the syntactic analysis of the resource-rich language. In this paper, we investigate the possibility of using small, parallel, annotated corpora to automatically detect divergent structural patterns between two languages. These patterns can then be used to improve structural projection algorithms, allowing for better performing NLP tools for resource-poor languages, in particular those that may not have large amounts of annotated data necessary for traditional, fully-supervised methods. While this detection process is not exhaustive, we demonstrate that important instances of divergence are picked up with minimal prior knowledge of a given language pair.", "keyphrases": ["divergence", "parallel corpora", "resource-rich language"]} +{"id": "chrupala-etal-2008-learning", "title": "Learning Morphology with Morfette", "abstract": "Morfette is a modular, data-driven, probabilistic system which learns to perform joint morphological tagging and lemmatization from morphologically annotated corpora. The system is composed of two learning modules which are trained to predict morphological tags and lemmas using the Maximum Entropy classifier. The third module dynamically combines the predictions of the Maximum-Entropy models and outputs a probability distribution over tag-lemma pair sequences. The lemmatization module exploits the idea of recasting lemmatization as a classification task by using class labels which encode mappings from word forms to lemmas. Experimental evaluation results and error analysis on three morphologically rich languages show that the system achieves high accuracy with no language-specific feature engineering or additional resources.", "keyphrases": ["morfette", "probabilistic system", "joint morphological tagging", "classification task", "edit tree"]} +{"id": "roberts-etal-2010-linguistic", "title": "A Linguistic Resource for Semantic Parsing of Motion Events", "abstract": "This paper presents a corpus of annotated motion events and their event structure. We consider motion events triggered by a set of motion evoking words and contemplate both literal and figurative interpretations of them. Figurative motion events are extracted into the same event structure but are marked as figurative in the corpus. To represent the event structure of motion, we use the FrameNet annotation standard, which encodes motion in over 70 frames. In order to acquire a diverse set of texts that are different from FrameNet's, we crawled blog and news feeds for five different domains: sports, newswire, finance, military, and gossip. We then annotated these documents with an automatic FrameNet parser. Its output was manually corrected to account for missing and incorrect frames as well as missing and incorrect frame elements. The corpus, UTD-MotionEvent, may act as a resource for semantic parsing, detection of figurative language, spatial reasoning, and other tasks.", "keyphrases": ["semantic parsing", "motion event", "framenet"]} +{"id": "yeniterzi-oflazer-2010-syntax", "title": "Syntax-to-Morphology Mapping in Factored Phrase-Based Statistical Machine Translation from English to Turkish", "abstract": "We present a novel scheme to apply factored phrase-based SMT to a language pair with very disparate morphological structures. Our approach relies on syntactic analysis on the source side (English) and then encodes a wide variety of local and non-local syntactic structures as complex structural tags which appear as additional factors in the training data. On the target side (Turkish), we only perform morphological analysis and disambiguation but treat the complete complex morphological tag as a factor, instead of separating morphemes. We incrementally explore capturing various syntactic substructures as complex tags on the English side, and evaluate how our translations improve in BLEU scores. Our maximal set of source and target side transformations, coupled with some additional techniques, provide an 39% relative improvement from a baseline 17.08 to 23.78 BLEU, all averaged over 10 training and test sets. Now that the syntactic analysis on the English side is available, we also experiment with more long distance constituent reordering to bring the English constituent order close to Turkish, but find that these transformations do not provide any additional consistent tangible gains when averaged over the 10 sets.", "keyphrases": ["factor", "statistical machine translation", "morphological structure", "syntax-to-morphology mapping", "rich language"]} +{"id": "cai-etal-2016-bidirectional", "title": "Bidirectional Recurrent Convolutional Neural Network for Relation Classification", "abstract": "Relation classification is an important semantic processing task in the field of natural language processing (NLP). In this paper, we present a novel model BRCNN to classify the relation of two entities in a sentence. Some state-of-the-art systems concentrate on modeling the shortest dependency path (SDP) between two entities leveraging convolutional or recurrent neural networks. We further explore how to make full use of the dependency relations information in the SDP, by combining convolutional neural networks and twochannel recurrent neural networks with long short term memory (LSTM) units. We propose a bidirectional architecture to learn relation representations with directional information along the SDP forwards and backwards at the same time, which benefits classifying the direction of relations. Experimental results show that our method outperforms the state-of-theart approaches on the SemEval-2010 Task 8 dataset.", "keyphrases": ["relation classification", "state-of-the-art system", "rnn"]} +{"id": "ethayarajh-2020-classifier", "title": "Is Your Classifier Actually Biased? Measuring Fairness under Uncertainty with Bernstein Bounds", "abstract": "Most NLP datasets are not annotated with protected attributes such as gender, making it difficult to measure classification bias using standard measures of fairness (e.g., equal opportunity). However, manually annotating a large dataset with a protected attribute is slow and expensive. Instead of annotating all the examples, can we annotate a subset of them and use that sample to estimate the bias? While it is possible to do so, the smaller this annotated sample is, the less certain we are that the estimate is close to the true bias. In this work, we propose using Bernstein bounds to represent this uncertainty about the bias estimate as a confidence interval. We provide empirical evidence that a 95% confidence interval derived this way consistently bounds the true bias. In quantifying this uncertainty, our method, which we call Bernstein-bounded unfairness, helps prevent classifiers from being deemed biased or unbiased when there is insufficient evidence to make either claim. Our findings suggest that the datasets currently used to measure specific biases are too small to conclusively identify bias except in the most egregious cases. For example, consider a co-reference resolution system that is 5% more accurate on gender-stereotypical sentences \u2013 to claim it is biased with 95% confidence, we need a bias-specific dataset that is 3.8 times larger than WinoBias, the largest available.", "keyphrases": ["fairness", "uncertainty", "claim"]} +{"id": "swayamdipta-etal-2018-syntactic", "title": "Syntactic Scaffolds for Semantic Structures", "abstract": "We introduce the syntactic scaffold, an approach to incorporating syntactic information into semantic tasks. Syntactic scaffolds avoid expensive syntactic processing at runtime, only making use of a treebank during training, through a multitask objective. We improve over strong baselines on PropBank semantics, frame semantics, and coreference resolution, achieving competitive performance on all three tasks.", "keyphrases": ["scaffold", "semantic task", "coreference resolution", "srl", "multi-task learning"]} +{"id": "kotani-etal-2014-listenability", "title": "A Listenability Measuring Method for an Adaptive Computer-assisted Language Learningand Teaching System", "abstract": "In teaching and learning of English as a foreign language, the Internet serves as a source of authentic listening material, enabling learners to practice English in real contexts. An adaptive computer-assisted language learning and teaching system can pick up news clips as authentic materials from the Internet according to learner listening proficiency if it is equipped with a listenability measuring method that takes into both linguistic features of a news clip and the listening proficiency. Therefore, we developed a method for measuring listening proficiency-based listenability. With our method, listenability is measured through multiple regression analysis using both learner and linguistic features as independent variables. Learner features account for learner listening proficiency, and linguistic features explain lexical, syntactic, and phonological complexities of sentences. A cross validation test showed that listenability measured with our method exhibited higher correlation (r = 0.57) than listenability measured with other methods using either learner features (r = 0.43) or other linguistic features (r = 0.32, r = 0.36). A comparison of our method with other methods showed a statistically significant difference (p < 0.003 after Bonferroni correction). These results suggest the effectiveness of learner and linguistic features for measuring listening proficiency-based listenability.", "keyphrases": ["listenability", "teaching system", "learner"]} +{"id": "wang-etal-2020-asking", "title": "Asking and Answering Questions to Evaluate the Factual Consistency of Summaries", "abstract": "Practical applications of abstractive summarization models are limited by frequent factual inconsistencies with respect to their input. Existing automatic evaluation metrics for summarization are largely insensitive to such errors. We propose QAGS (pronounced \u201ckags\u201d), an automatic evaluation protocol that is designed to identify factual inconsistencies in a generated summary. QAGS is based on the intuition that if we ask questions about a summary and its source, we will receive similar answers if the summary is factually consistent with the source. To evaluate QAGS, we collect human judgments of factual consistency on model-generated summaries for the CNN/DailyMail (Hermann et al., 2015) and XSUM (Narayan et al., 2018) summarization datasets. QAGS has substantially higher correlations with these judgments than other automatic evaluation metrics. Also, QAGS offers a natural form of interpretability: The answers and questions generated while computing QAGS indicate which tokens of a summary are inconsistent and why. We believe QAGS is a promising tool in automatically generating usable and factually consistent text. Code for QAGS will be available at .", "keyphrases": ["factual consistency", "summarizer", "evaluation metric", "similar answer", "question generation"]} +{"id": "cheng-etal-2016-semi", "title": "Semi-Supervised Learning for Neural Machine Translation", "abstract": "While end-to-end neural machine translation (NMT) has made remarkable progress recently, NMT systems only rely on parallel corpora for parameter estimation. Since parallel corpora are usually limited in quantity, quality, and coverage, especially for low-resource languages, it is appealing to exploit monolingual corpora to improve NMT. We propose a semi-supervised approach for training NMT models on the concatenation of labeled (parallel corpora) and unlabeled (monolingual corpora) data. The central idea is to reconstruct the monolingual corpora using an autoencoder, in which the source-to-target and target-to-source translation models serve as the encoder and decoder, respectively. Our approach can not only exploit the monolingual corpora of the target language, but also of the source language. Experiments on the Chinese-English dataset show that our approach achieves significant improvements over state-of-the-art SMT and NMT systems.", "keyphrases": ["neural machine translation", "target-to-source translation model", "semi-supervised learning", "monolingual data", "usage"]} +{"id": "wang-etal-2010-character-based", "title": "A Character-Based Joint Model for Chinese Word Segmentation", "abstract": "The character-based tagging approach is a dominant technique for Chinese word segmentation, and both discriminative and generative models can be adopted in that framework. However, generative and discriminative character-based approaches are significantly different and complement each other. A simple joint model combining the character-based generative model and the discriminative one is thus proposed in this paper to take advantage of both approaches. Experiments on the Second SIGHAN Bakeoff show that this joint approach achieves 21% relative error reduction over the discriminative model and 14% over the generative one. In addition, closed tests also show that the proposed joint model outperforms all the existing approaches reported in the literature and achieves the best F-score in four out of five corpora.", "keyphrases": ["joint model", "chinese word segmentation", "character-based tagging approach"]} +{"id": "tron-etal-2006-morphdb", "title": "Morphdb.hu: Hungarian lexical database and morphological grammar", "abstract": "This paper describes morphdb.hu, a Hungarian lexical database and morphological grammar. Morphdb.hu is the outcome of a several-year collaborative effort and represents the resource with the widest coverage and broadest range of applicability presently available for Hungarian. The grammar resource is the formalization of well-founded theoretical decisions handling inflection and productive derivation. The lexical database was created by merging three independent lexical databases, and the resulting resource was further extended.", "keyphrases": ["hungarian", "lexical database", "morphdb.hu"]} +{"id": "fukui-etal-2016-multimodal", "title": "Multimodal Compact Bilinear Pooling for Visual Question Answering and Visual Grounding", "abstract": "Modeling textual or visual information with vector representations trained from large language or visual datasets has been successfully explored in recent years. However, tasks such as visual question answering require combining these vector representations with each other. Approaches to multimodal pooling include element-wise product or sum, as well as concatenation of the visual and textual representations. We hypothesize that these methods are not as expressive as an outer product of the visual and textual vectors. As the outer product is typically infeasible due to its high dimensionality, we instead propose utilizing Multimodal Compact Bilinear pooling (MCB) to efficiently and expressively combine multimodal features. We extensively evaluate MCB on the visual question answering and grounding tasks. We consistently show the benefit of MCB over ablations without MCB. For visual question answering, we present an architecture which uses MCB twice, once for predicting attention over spatial features and again to combine the attended representation with the question representation. This model outperforms the state-of-the-art on the Visual7W dataset and the VQA challenge.", "keyphrases": ["visual question", "image", "multimodal compact bilinear"]} +{"id": "zou-lu-2019-text2math", "title": "Text2Math: End-to-end Parsing Text into Math Expressions", "abstract": "We propose Text2Math, a model for semantically parsing text into math expressions. The model can be used to solve different math related problems including arithmetic word problems and equation parsing problems. Unlike previous approaches, we tackle the problem from an end-to-end structured prediction perspective where our algorithm aims to predict the complete math expression at once as a tree structure, where minimal manual efforts are involved in the process. Empirical results on benchmark datasets demonstrate the efficacy of our approach.", "keyphrases": ["math expression", "word problem", "text2math"]} +{"id": "zhou-etal-2016-attention", "title": "Attention-Based Bidirectional Long Short-Term Memory Networks for Relation Classification", "abstract": "Relation classification is an important semantic processing task in the field of natural language processing (NLP). State-ofthe-art systems still rely on lexical resources such as WordNet or NLP systems like dependency parser and named entity recognizers (NER) to get high-level features. Another challenge is that important information can appear at any position in the sentence. To tackle these problems, we propose Attention-Based Bidirectional Long Short-Term Memory Networks(AttBLSTM) to capture the most important semantic information in a sentence. The experimental results on the SemEval-2010 relation classification task show that our method outperforms most of the existing methods, with only word vectors.", "keyphrases": ["relation classification", "deep neural network", "self-attention", "argument representation", "powerful encoder"]} +{"id": "hogan-2007-coordinate", "title": "Coordinate Noun Phrase Disambiguation in a Generative Parsing Model", "abstract": "In this paper we present methods for improving the disambiguation of noun phrase (NP) coordination within the framework of a lexicalised history-based parsing model. As well as reducing noise in the data, we look at modelling two main sources of information for disambiguation: symmetry in conjunct structure, and the dependency between conjunct lexical heads. Our changes to the baseline model result in an increase in NP coordination dependency f-score from 69.9% to 73.8%, which represents a relative reduction in f-score error of 13%.", "keyphrases": ["generative parsing model", "conjunct", "coordination disambiguation"]} +{"id": "macher-etal-2021-read", "title": "Do we read what we hear? Modeling orthographic influences on spoken word recognition", "abstract": "Theories and models of spoken word recognition aim to explain the process of accessing lexical knowledge given an acoustic realization of a word form. There is consensus that phonological and semantic information is crucial for this process. However, there is accumulating evidence that orthographic information could also have an impact on auditory word recognition. This paper presents two models of spoken word recognition that instantiate different hypotheses regarding the influence of orthography on this process. We show that these models reproduce human-like behavior in different ways and provide testable hypotheses for future research on the source of orthographic effects in spoken word recognition.", "keyphrases": ["influence", "word recognition", "orthographic effect"]} +{"id": "martschat-strube-2014-recall", "title": "Recall Error Analysis for Coreference Resolution", "abstract": "We present a novel method for coreference resolution error analysis which we apply to perform a recall error analysis of four state-of-the-art English coreference resolution systems. Our analysis highlights differences between the systems and identifies that the majority of recall errors for nouns and names are shared by all systems. We characterize this set of common challenging errors in terms of a broad range of lexical and semantic properties.", "keyphrases": ["coreference resolution", "name", "recall error analysis"]} +{"id": "othman-etal-2003-chart", "title": "A chart parser for analyzing modern standard Arabic sentence", "abstract": "The parsing of Arabic sentence is a necessary prerequisite for many natural language processing applications such as machine translation and information retrieval. In this paper we report our attempt to develop an efficient chart parser for Analyzing Modern Standard Arabic (MSA) sentence. From a practical point of view, the parser is able to satisfy syntactic constraints reducing parsing ambiguity. Lexical semantic features are also used to disambiguate the sentence structure. We explain also an Arabic morphological analyzer based on ATN technique. Both the Arabic parser and the Arabic morphological analyzer are implemented in Prolog. The linguistic rules were acquired from a set of sentences from MSA sentence in the Agriculture domain.", "keyphrases": ["chart parser", "arabic sentence", "prolog"]} +{"id": "reimers-etal-2019-classification", "title": "Classification and Clustering of Arguments with Contextualized Word Embeddings", "abstract": "We experiment with two recent contextualized word embedding methods (ELMo and BERT) in the context of open-domain argument search. For the first time, we show how to leverage the power of contextualized word embeddings to classify and cluster topic-dependent arguments, achieving impressive results on both tasks and across multiple datasets. For argument classification, we improve the state-of-the-art for the UKP Sentential Argument Mining Corpus by 20.8 percentage points and for the IBM Debater - Evidence Sentences dataset by 7.4 percentage points. For the understudied task of argument clustering, we propose a pre-training step which improves by 7.8 percentage points over strong baselines on a novel dataset, and by 12.3 percentage points for the Argument Facet Similarity (AFS) Corpus.", "keyphrases": ["clustering", "argument classification", "unseen topic"]} +{"id": "fan-etal-2019-strategies", "title": "Strategies for Structuring Story Generation", "abstract": "Writers often rely on plans or sketches to write long stories, but most current language models generate word by word from left to right. We explore coarse-to-fine models for creating narrative texts of several hundred words, and introduce new models which decompose stories by abstracting over actions and entities. The model first generates the predicate-argument structure of the text, where different mentions of the same entity are marked with placeholder tokens. It then generates a surface realization of the predicate-argument structure, and finally replaces the entity placeholders with context-sensitive names and references. Human judges prefer the stories from our models to a wide range of previous approaches to hierarchical text generation. Extensive analysis shows that our methods can help improve the diversity and coherence of events and entities in generated stories.", "keyphrases": ["story", "language model", "predicate-argument structure", "coherence"]} +{"id": "pradhan-etal-2012-conll", "title": "CoNLL-2012 Shared Task: Modeling Multilingual Unrestricted Coreference in OntoNotes", "abstract": "The CoNLL-2012 shared task involved predicting coreference in three languages -- English, Chinese and Arabic -- using OntoNotes data. It was a follow-on to the English-only task organized in 2011. Until the creation of the OntoNotes corpus, resources in this subfield of language processing have tended to be limited to noun phrase coreference, often on a restricted set of entities, such as ACE entities. OntoNotes provides a large-scale corpus of general anaphoric coreference not restricted to noun phrases or to a specified set of entity types and covering multiple languages. OntoNotes also provides additional layers of integrated annotation, capturing additional shallow semantic structure. This paper briefly describes the OntoNotes annotation (coreference and other layers) and then describes the parameters of the shared task including the format, pre-processing information, evaluation criteria, and presents and discusses the results achieved by the participating systems. Being a task that has a complex evaluation history, and multiple evalation conditions, it has, in the past, been difficult to judge the improvement in new algorithms over previously reported results. Having a standard test set and evaluation parameters, all based on a resource that provides multiple integrated annotation layers (parses, semantic roles, word senses, named entities and coreference) that could support joint models, should help to energize ongoing research in the task of entity and event coreference.", "keyphrases": ["conll-2012", "coreference resolution", "mention", "discourse deixis", "identity anaphora"]} +{"id": "pennacchiotti-pantel-2009-entity", "title": "Entity Extraction via Ensemble Semantics", "abstract": "Combining information extraction systems yields significantly higher quality resources than each system in isolation. In this paper, we generalize such a mixing of sources and features in a framework called Ensemble Semantics. We show very large gains in entity extraction by combining state-of-the-art distributional and pattern-based systems with a large set of features from a webcrawl, query logs, and Wikipedia. Experimental results on a web-scale extraction of actors, athletes and musicians show significantly higher mean average precision scores (29% gain) compared with the current state of the art.", "keyphrases": ["ensemble semantic", "query log", "wikipedia", "entity extraction"]} +{"id": "zhang-etal-2009-hpsg", "title": "HPSG Supertagging: A Sequence Labeling View", "abstract": "Supertagging is a widely used speed-up technique for deep parsing. In another aspect, supertagging has been exploited in other NLP tasks than parsing for utilizing the rich syntactic information given by the supertags. However, the performance of supertagger is still a bottleneck for such applications. In this paper, we investigated the relationship between supertagging and parsing, not just to speed up the deep parser; We started from a sequence labeling view of HPSG supertagging, examining how well a supertagger can do when separated from parsing. Comparison of two types of supertagging model, point-wise model and sequential model, showed that the former model works competitively well despite its simplicity, which indicates the true dependency among supertag assignments is far more complex than the crude first-order approximation made in the sequential model. We then analyzed the limitation of separated supertagging by using a CFG-filter. The results showed that big gains could be acquired by resorting to a light-weight parser.", "keyphrases": ["sequence labeling view", "sequential model", "hpsg"]} +{"id": "chklovski-etal-2004-senseval", "title": "The Senseval-3 Multilingual English-Hindi lexical sample task", "abstract": "This paper describes the English\u2010Hindi Multilingual lexical sample task in SENSEVAL\u20103. Rather than tagging an English word with a sense from an English dictionary, this task seeks to assign the most appropriate Hindi translation to an ambiguous target word. Training data was solicited via the Open Mind Word Expert (OMWE) from Web users who are fluent in English and Hindi.", "keyphrases": ["senseval-3", "lexical sample task", "distinction"]} +{"id": "li-etal-2021-document", "title": "Document-Level Event Argument Extraction by Conditional Generation", "abstract": "Event extraction has long been treated as a sentence-level task in the IE community. We argue that this setting does not match human informative seeking behavior and leads to incomplete and uninformative extraction results. We propose a document-level neural event argument extraction model by formulating the task as conditional generation following event templates. We also compile a new document-level event extraction benchmark dataset WikiEvents which includes complete event and coreference annotation. On the task of argument extraction, we achieve an absolute gain of 7.6% F1 and 5.7% F1 over the next best model on the RAMS and WikiEvents dataset respectively. On the more challenging task of informative argument extraction, which requires implicit coreference reasoning, we achieve a 9.3% F1 gain over the best baseline. To demonstrate the portability of our model, we also create the first end-to-end zero-shot event extraction framework and achieve 97% of fully supervised model's trigger extraction performance and 82% of the argument extraction performance given only access to 10 out of the 33 types on ACE.", "keyphrases": ["conditional generation", "event extraction", "template", "wikievents"]} +{"id": "tsuboi-etal-2008-training", "title": "Training Conditional Random Fields Using Incomplete Annotations", "abstract": "We address corpus building situations, where complete annotations to the whole corpus is time consuming and unrealistic. Thus, annotation is done only on crucial part of sentences, or contains unresolved label ambiguities. We propose a parameter estimation method for Conditional Random Fields (CRFs), which enables us to use such incomplete annotations. We show promising results of our method as applied to two types of NLP tasks: a domain adaptation task of a Japanese word segmentation using partial annotations, and a part-of-speech tagging task using ambiguous tags in the Penn treebank corpus.", "keyphrases": ["conditional random fields", "incomplete annotation", "parameter estimation method", "crf", "japanese word segmentation"]} +{"id": "klerke-etal-2016-improving", "title": "Improving sentence compression by learning to predict gaze", "abstract": "We show how eye-tracking corpora can be used to improve sentence compression models, presenting a novel multi-task learning algorithm based on multi-layer LSTMs. We obtain performance competitive with or better than state-of-the-art approaches.", "keyphrases": ["sentence compression", "gaze", "multi-task learning approach", "function"]} +{"id": "rottger-etal-2021-hatecheck", "title": "HateCheck: Functional Tests for Hate Speech Detection Models", "abstract": "Detecting online hate is a difficult task that even state-of-the-art models struggle with. Typically, hate speech detection models are evaluated by measuring their performance on held-out test data using metrics such as accuracy and F1 score. However, this approach makes it difficult to identify specific model weak points. It also risks overestimating generalisable model performance due to increasingly well-evidenced systematic gaps and biases in hate speech datasets. To enable more targeted diagnostic insights, we introduce HateCheck, a suite of functional tests for hate speech detection models. We specify 29 model functionalities motivated by a review of previous research and a series of interviews with civil society stakeholders. We craft test cases for each functionality and validate their quality through a structured annotation process. To illustrate HateCheck's utility, we test near-state-of-the-art transformer models as well as two popular commercial models, revealing critical model weaknesses.", "keyphrases": ["hate speech detection", "speech detection model", "online hate", "review", "hatecheck"]} +{"id": "wu-etal-2019-extract", "title": "Extract and Edit: An Alternative to Back-Translation for Unsupervised Neural Machine Translation", "abstract": "The overreliance on large parallel corpora significantly limits the applicability of machine translation systems to the majority of language pairs. Back-translation has been dominantly used in previous approaches for unsupervised neural machine translation, where pseudo sentence pairs are generated to train the models with a reconstruction loss. However, the pseudo sentences are usually of low quality as translation errors accumulate during training. To avoid this fundamental issue, we propose an alternative but more effective approach, extract-edit, to extract and then edit real sentences from the target monolingual corpora. Furthermore, we introduce a comparative translation loss to evaluate the translated target sentences and thus train the unsupervised translation systems. Experiments show that the proposed approach consistently outperforms the previous state-of-the-art unsupervised machine translation systems across two benchmarks (English-French and English-German) and two low-resource language pairs (English-Romanian and English-Russian) by more than 2 (up to 3.63) BLEU points.", "keyphrases": ["alternative", "back-translation", "parallel corpora", "extract-edit", "real sentence"]} +{"id": "hahn-powell-etal-2016-causal", "title": "This before That: Causal Precedence in the Biomedical Domain", "abstract": "Causal precedence between biochemical interactions is crucial in the biomedical domain, because it transforms collections of individual interactions, e.g., bindings and phosphorylations, into the causal mechanisms needed to inform meaningful search and inference. Here, we analyze causal precedence in the biomedical domain as distinct from open-domain, temporal precedence. First, we describe a novel, hand-annotated text corpus of causal precedence in the biomedical domain. Second, we use this corpus to investigate a battery of models of precedence, covering rule-based, feature-based, and latent representation models. The highest-performing individual model achieved a micro F1 of 43 points, approaching the best performers on the simpler temporal-only precedence tasks. Feature-based and latent representation models each outperform the rule-based models, but their performance is complementary to one another. We apply a sieve-based architecture to capitalize on this lack of overlap, achieving a micro F1 score of 46 points.", "keyphrases": ["causal precedence", "biomedical domain", "event mention"]} +{"id": "dong-etal-2010-hownet", "title": "HowNet and Its Computation of Meaning", "abstract": "The presentation will mainly cover (1) What is HowNet? HowNet is an on-line common-sense knowledgebase unveiling inter-conceptual relationships and inter-attribute relationships of concepts as connoting in lexicons of the Chinese and their English equivalents. (2) How it functions in the computation of meaning and as a NLP platform? The presentation will show 9 HowNet-based application tools. All of them are not merely demonstration of some methodology or algo-rithm, but are real application tools that can be tested by users themselves. Apart from the tools that are specially designed to deal with Chinese, most of the tools are bilingual, even the WSD tool.", "keyphrases": ["computation", "english equivalent", "hownet"]} +{"id": "gardent-etal-2003-bridges", "title": "Which bridges for bridging definite descriptions?", "abstract": "This paper presents a corpus study of bridging definite descriptions in the french corpus PAROLE. It proposes a typology of bridging relations; describes a system for annotating NPs which allows for a user friendly collection of all relevant information on the bridging definite descriptions occurring in the corpus and discusses the results of the corpus study", "keyphrases": ["definite description", "corpus study", "bridging relation"]} +{"id": "cao-etal-2018-joint", "title": "Joint Representation Learning of Cross-lingual Words and Entities via Attentive Distant Supervision", "abstract": "Jointly representation learning of words and entities benefits many NLP tasks, but has not been well explored in cross-lingual settings. In this paper, we propose a novel method for joint representation learning of cross-lingual words and entities. It captures mutually complementary knowledge, and enables cross-lingual inferences among knowledge bases and texts. Our method does not require parallel corpus, and automatically generates comparable data via distant supervision using multi-lingual knowledge bases. We utilize two types of regularizers to align cross-lingual words and entities, and design knowledge attention and cross-lingual attention to further reduce noises. We conducted a series of experiments on three tasks: word translation, entity relatedness, and cross-lingual entity linking. The results, both qualitative and quantitative, demonstrate the significance of our method.", "keyphrases": ["cross-lingual word", "distant supervision", "joint representation learning"]} +{"id": "koehn-etal-2005-edinburgh", "title": "Edinburgh System Description for the 2005 IWSLT Speech Translation Evaluation", "abstract": "Our participation in the IWSLT 2005 speech translation task is our first effort to work on limited domain speech data. We adapted our statistical machine translation system that performed successfully in previous DARPA competitions on open domain text translations. We participated in the supplied corpora transcription track. We achieved the highest BLEU score in 2 out of 5 language pairs and had competitive results for the other language pairs.", "keyphrases": ["bleu score", "reordering model", "phrase-based smt"]} +{"id": "liu-etal-2015-fine", "title": "Fine-grained Opinion Mining with Recurrent Neural Networks and Word Embeddings", "abstract": "The tasks in fine-grained opinion mining can be regarded as either a token-level sequence labeling problem or as a semantic compositional task. We propose a general class of discriminative models based on recurrent neural networks (RNNs) and word embeddings that can be successfully applied to such tasks without any taskspecific feature engineering effort. Our experimental results on the task of opinion target identification show that RNNs, without using any hand-crafted features, outperform feature-rich CRF-based models. Our framework is flexible, allows us to incorporate other linguistic features, and achieves results that rival the top performing systems in SemEval-2014.", "keyphrases": ["recurrent neural networks", "linguistic feature", "fine-grained opinion mining", "sentiment analysis"]} +{"id": "song-etal-2015-light", "title": "From Light to Rich ERE: Annotation of Entities, Relations, and Events", "abstract": "We describe the evolution of the Entities, Relations and Events (ERE) annotation task, created to support research and technology development within the DARPA DEFT program. We begin by describing the specification for Light ERE annotation, including the motivation for the task within the context of DEFT. We discuss the transition from Light ERE to a more complex Rich ERE specification, enabling more comprehensive treatment of phenomena of interest to DEFT.", "keyphrases": ["rich ere", "entities", "event type", "automatic content extraction", "trigger"]} +{"id": "elsner-etal-2013-joint", "title": "A Joint Learning Model of Word Segmentation, Lexical Acquisition, and Phonetic Variability", "abstract": "We present a cognitive model of early lexical acquisition which jointly performs word segmentation and learns an explicit model of phonetic variation. We define the model as a Bayesian noisy channel; we sample segmentations and word forms simultaneously from the posterior, using beam sampling to control the size of the search space. Compared to a pipelined approach in which segmentation is performed first, our model is qualitatively more similar to human learners. On data with variable pronunciations, the pipelined approach learns to treat syllables or morphemes as words. In contrast, our joint model, like infant learners, tends to learn multiword collocations. We also conduct analyses of the phonetic variations that the model learns to accept and its patterns of word recognition errors, and relate these to developmental evidence.", "keyphrases": ["word segmentation", "lexical acquisition", "learner"]} +{"id": "fraser-etal-2021-understanding", "title": "Understanding and Countering Stereotypes: A Computational Approach to the Stereotype Content Model", "abstract": "Stereotypical language expresses widely-held beliefs about different social categories. Many stereotypes are overtly negative, while others may appear positive on the surface, but still lead to negative consequences. In this work, we present a computational approach to interpreting stereotypes in text through the Stereotype Content Model (SCM), a comprehensive causal theory from social psychology. The SCM proposes that stereotypes can be understood along two primary dimensions: warmth and competence. We present a method for defining warmth and competence axes in semantic embedding space, and show that the four quadrants defined by this subspace accurately represent the warmth and competence concepts, according to annotated lexicons. We then apply our computational SCM model to textual stereotype data and show that it compares favourably with survey-based studies in the psychological literature. Furthermore, we explore various strategies to counter stereotypical beliefs with anti-stereotypes. It is known that countering stereotypes with anti-stereotypical examples is one of the most effective ways to reduce biased thinking, yet the problem of generating anti-stereotypes has not been previously studied. Thus, a better understanding of how to generate realistic and effective anti-stereotypes can contribute to addressing pressing societal concerns of stereotyping, prejudice, and discrimination.", "keyphrases": ["stereotype", "computational approach", "scm"]} +{"id": "lagarda-etal-2009-statistical", "title": "Statistical Post-Editing of a Rule-Based Machine Translation System", "abstract": "Automatic post-editing (APE) systems aim at correcting the output of machine translation systems to produce better quality translations, i.e. produce translations can be manually post-edited with an increase in productivity. In this work, we present an APE system that uses statistical models to enhance a commercial rule-based machine translation (RBMT) system. In addition, a procedure for effortless human evaluation has been established. We have tested the APE system with two corpora of different complexity. For the Parliament corpus, we show that the APE system significantly complements and improves the RBMT system. Results for the Protocols corpus, although less conclusive, are promising as well. Finally, several possible sources of errors have been identified which will help develop future system enhancements.", "keyphrases": ["post-editing", "ape system", "statistical information"]} +{"id": "liu-etal-2016-effective", "title": "Effective Crowd Annotation for Relation Extraction", "abstract": "Can crowdsourced annotation of training data boost performance for relation extraction over methods based solely on distant supervision? While crowdsourcing has been shown effective for many NLP tasks, previous researchers found only minimal improvement when applying the method to relation extraction. This paper demonstrates that a much larger boost is possible, e.g., raising F1 from 0.40 to 0.60. Furthermore, the gains are due to a simple, generalizable technique, Gated Instruction , which combines an interactive tutorial, feedback to correct errors during training, and improved screening.", "keyphrases": ["relation extraction", "crowdsourcing", "distant supervision", "correct error", "screening"]} +{"id": "procopio-etal-2021-sgl", "title": "SGL: Speaking the Graph Languages of Semantic Parsing via Multilingual Translation", "abstract": "Graph-based semantic parsing aims to represent textual meaning through directed graphs. As one of the most promising general-purpose meaning representations, these structures and their parsing have gained a significant interest momentum during recent years, with several diverse formalisms being proposed. Yet, owing to this very heterogeneity, most of the research effort has focused mainly on solutions specific to a given formalism. In this work, instead, we reframe semantic parsing towards multiple formalisms as Multilingual Neural Machine Translation (MNMT), and propose SGL, a many-to-many seq2seq architecture trained with an MNMT objective. Backed by several experiments, we show that this framework is indeed effective once the learning procedure is enhanced with large parallel corpora coming from Machine Translation: we report competitive performances on AMR and UCCA parsing, especially once paired with pre-trained architectures. Furthermore, we find that models trained under this configuration scale remarkably well to tasks such as cross-lingual AMR parsing: SGL outperforms all its competitors by a large margin without even explicitly seeing non-English to AMR examples at training time and, once these examples are included as well, sets an unprecedented state of the art in this task. We release our code and our models for research purposes at .", "keyphrases": ["semantic parsing", "seq2seq architecture", "sgl"]} +{"id": "xie-etal-2021-factual-consistency", "title": "Factual Consistency Evaluation for Text Summarization via Counterfactual Estimation", "abstract": "Despite significant progress has been achieved in text summarization, factual inconsistency in generated summaries still severely limits its practical applications. Among the key factors to ensure factual consistency, a reliable automatic evaluation metric is the first and the most crucial one. However, existing metrics either neglect the intrinsic cause of the factual inconsistency or rely on auxiliary tasks, leading to an unsatisfied correlation with human judgments or increasing the inconvenience of usage in practice. In light of these challenges, we propose a novel metric to evaluate the factual consistency in text summarization via counterfactual estimation, which formulates the causal relationship among the source document, the generated summary, and the language prior. We remove the effect of language prior, which can cause factual inconsistency, from the total causal effect on the generated summary, and provides a simple yet effective way to evaluate consistency without relying on other auxiliary tasks. We conduct a series of experiments on three public abstractive text summarization datasets, and demonstrate the advantages of the proposed metric in both improving the correlation with human judgments and the convenience of usage. The source code is available at .", "keyphrases": ["text summarization", "counterfactual estimation", "causal relationship"]} +{"id": "batchkarov-etal-2016-critique", "title": "A critique of word similarity as a method for evaluating distributional semantic models", "abstract": "This paper aims to re-think the role of the word similarity task in distributional semantics research. We argue while it is a valuable tool, it should be used with care because it provides only an approximate measure of the quality of a distributional model. Word similarity evaluations assume there exists a single notion of similarity that is independent of a particular application. Further, the small size and low inter-annotator agreement of existing data sets makes it challenging to find significant differences between models.", "keyphrases": ["critique", "word similarity", "inter-annotator agreement", "consequence"]} +{"id": "fan-etal-2018-controllable", "title": "Controllable Abstractive Summarization", "abstract": "Current models for document summarization disregard user preferences such as the desired length, style, the entities that the user might be interested in, or how much of the document the user has already read. We present a neural summarization model with a simple but effective mechanism to enable users to specify these high level attributes in order to control the shape of the final summaries to better suit their needs. With user input, our system can produce high quality summaries that follow user preferences. Without user input, we set the control variables automatically \u2013 on the full text CNN-Dailymail dataset, we outperform state of the art abstractive systems (both in terms of F1-ROUGE1 40.38 vs. 39.53 F1-ROUGE and human evaluation.", "keyphrases": ["length", "attribute", "controllable abstractive summarization", "special token", "speaker style"]} +{"id": "bergsma-etal-2008-discriminative", "title": "Discriminative Learning of Selectional Preference from Unlabeled Text", "abstract": "We present a discriminative method for learning selectional preferences from unlabeled text. Positive examples are taken from observed predicate-argument pairs, while negatives are constructed from unobserved combinations. We train a Support Vector Machine classifier to distinguish the positive from the negative instances. We show how to partition the examples for efficient training with 57 thousand features and 6.5 million training instances. The model outperforms other recent approaches, achieving excellent correlation with human plausibility judgments. Compared to Mutual Information, it identifies 66% more verb-object pairs in unseen text, and resolves 37% more pronouns correctly in a pronoun resolution experiment.", "keyphrases": ["selectional preference", "unlabeled text", "svm classifier", "predicate"]} +{"id": "wang-etal-2020-building", "title": "Building a Bridge: A Method for Image-Text Sarcasm Detection Without Pretraining on Image-Text Data", "abstract": "Sarcasm detection in social media with text and image is becoming more challenging. Previous works of image-text sarcasm detection were mainly to fuse the summaries of text and image: different sub-models read the text and image respectively to get the summaries, and fuses the summaries. Recently, some multi-modal models based on the architecture of BERT are proposed such as ViLBERT. However, they can only be pretrained on the image-text data. In this paper, we propose an image-text model for sarcasm detection using the pretrained BERT and ResNet without any further pretraining. BERT and ResNet have been pretrained on much larger text or image data than image-text data. We connect the vector spaces of BERT and ResNet to utilize more data. We use the pretrained Multi-Head Attention of BERT to model the text and image. Besides, we propose a 2D-Intra-Attention to extract the relationships between words and images. In experiments, our model outperforms the state-of-the-art model.", "keyphrases": ["sarcasm detection", "image-text data", "bert"]} +{"id": "gao-etal-2020-explicit", "title": "Explicit Memory Tracker with Coarse-to-Fine Reasoning for Conversational Machine Reading", "abstract": "The goal of conversational machine reading is to answer user questions given a knowledge base text which may require asking clarification questions. Existing approaches are limited in their decision making due to struggles in extracting question-related rules and reasoning about them. In this paper, we present a new framework of conversational machine reading that comprises a novel Explicit Memory Tracker (EMT) to track whether conditions listed in the rule text have already been satisfied to make a decision. Moreover, our framework generates clarification questions by adopting a coarse-to-fine reasoning strategy, utilizing sentence-level entailment scores to weight token-level distributions. On the ShARC benchmark (blind, held-out) testset, EMT achieves new state-of-the-art results of 74.6% micro-averaged decision accuracy and 49.5 BLEU4. We also show that EMT is more interpretable by visualizing the entailment-oriented reasoning process as the conversation flows. Code and models are released at .", "keyphrases": ["conversational machine reading", "reasoning strategy", "explicit memory tracker"]} +{"id": "ferreira-freitas-2021-star", "title": "STAR: Cross-modal [STA]tement [R]epresentation for selecting relevant mathematical premises", "abstract": "Mathematical statements written in natural language are usually composed of two different modalities: mathematical elements and natural language. These two modalities have several distinct linguistic and semantic properties. State-of-the-art representation techniques have demonstrated an inability in capturing such an entangled style of discourse. In this work, we propose STAR, a model that uses cross-modal attention to learn how to represent mathematical text for the task of Natural Language Premise Selection. This task uses conjectures written in both natural and mathematical language to recommend premises that most likely will be relevant to prove a particular statement. We found that STAR not only outperforms baselines that do not distinguish between natural language and mathematical elements, but it also achieves better performance than state-of-the-art models.", "keyphrases": ["premise", "mathematical text", "star"]} +{"id": "nivre-etal-2006-labeled", "title": "Labeled Pseudo-Projective Dependency Parsing with Support Vector Machines", "abstract": "We use SVM classifiers to predict the next action of a deterministic parser that builds labeled projective dependency graphs in an incremental fashion. Non-projective dependencies are captured indirectly by projectivizing the training data for the classifiers and applying an inverse transformation to the output of the parser. We present evaluation results and an error analysis focusing on Swedish and Turkish.", "keyphrases": ["dependency parsing", "non-projective dependency", "transformation"]} +{"id": "artzi-zettlemoyer-2013-weakly", "title": "Weakly Supervised Learning of Semantic Parsers for Mapping Instructions to Actions", "abstract": "The context in which language is used provides a strong signal for learning to recover its meaning. In this paper, we show it can be used within a grounded CCG semantic parsing approach that learns a joint model of meaning and context for interpreting and executing natural language instructions, using various types of weak supervision. The joint nature provides crucial benefits by allowing situated cues, such as the set of visible objects, to directly influence learning. It also enables algorithms that learn while executing instructions, for example by trying to replicate human actions. Experiments on a benchmark navigational dataset demonstrate strong performance under differing forms of supervision, including correctly executing 60% more instruction sets relative to the previous state of the art.", "keyphrases": ["mapping instruction", "semantic parsing", "weak supervision", "program"]} +{"id": "maccartney-manning-2008-modeling", "title": "Modeling Semantic Containment and Exclusion in Natural Language Inference", "abstract": "We propose an approach to natural language inference based on a model of natural logic, which identifies valid inferences by their lexical and syntactic features, without full semantic interpretation. We greatly extend past work in natural logic, which has focused solely on semantic containment and monotonicity, to incorporate both semantic exclusion and implicativity. Our system decomposes an inference problem into a sequence of atomic edits linking premise to hypothesis; predicts a lexical entailment relation for each edit using a statistical classifier; propagates these relations upward through a syntax tree according to semantic properties of intermediate nodes; and composes the resulting entailment relations across the edit sequence. We evaluate our system on the FraCaS test suite, and achieve a 27% reduction in error from previous work. We also show that hybridizing an existing RTE system with our natural logic system yields significant gains on the RTE3 test suite.", "keyphrases": ["exclusion", "natural language inference", "atomic edit", "entailment"]} +{"id": "qazvinian-radev-2008-scientific", "title": "Scientific Paper Summarization Using Citation Summary Networks", "abstract": "Quickly moving to a new area of research is painful for researchers due to the vast amount of scientific literature in each field of study. One possible way to overcome this problem is to summarize a scientific topic. In this paper, we propose a model of summarizing a single article, which can be further used to summarize an entire topic. Our model is based on analyzing others' viewpoint of the target article's contributions and the study of its citation summary network using a clustering approach.", "keyphrases": ["scientific paper summarization", "citation network", "c-lexrank"]} +{"id": "komatani-etal-2006-multi", "title": "Multi-Domain Spoken Dialogue System with Extensibility and Robustness against Speech Recognition Errors", "abstract": "We developed a multi-domain spoken dialogue system that can handle user requests across multiple domains. Such systems need to satisfy two requirements: extensibility and robustness against speech recognition errors. Extensibility is required to allow for the modification and addition of domains independent of other domains. Robustness against speech recognition errors is required because such errors are inevitable in speech recognition. However, the systems should still behave appropriately, even when their inputs are erroneous. Our system was constructed on an extensible architecture and is equipped with a robust and extensible domain selection method. Domain selection was based on three choices: (I) the previous domain, (II) the domain in which the speech recognition result can be accepted with the highest recognition score, and (III) other domains. With the third choice we newly introduced, our system can prevent dialogues from continuously being stuck in an erroneous domain. Our experimental results, obtained with 10 subjects, showed that our method reduced the domain selection errors by 18.3%, compared to a conventional method.", "keyphrases": ["spoken dialogue system", "speech recognition error", "multiple domain"]} +{"id": "choi-etal-2017-coarse", "title": "Coarse-to-Fine Question Answering for Long Documents", "abstract": "We present a framework for question answering that can efficiently scale to longer documents while maintaining or even improving performance of state-of-the-art models. While most successful approaches for reading comprehension rely on recurrent neural networks (RNNs), running them over long documents is prohibitively slow because it is difficult to parallelize over sequences. Inspired by how people first skim the document, identify relevant parts, and carefully read these parts to produce an answer, we combine a coarse, fast model for selecting relevant sentences and a more expensive RNN for producing the answer from those sentences. We treat sentence selection as a latent variable trained jointly from the answer only using reinforcement learning. Experiments demonstrate state-of-the-art performance on a challenging subset of the WikiReading dataset and on a new dataset, while speeding up the model by 3.5x-6.7x.", "keyphrases": ["long document", "relevant sentence", "coarse-to-fine framework"]} +{"id": "tran-etal-2020-revisiting", "title": "Revisiting Unsupervised Relation Extraction", "abstract": "Unsupervised relation extraction (URE) extracts relations between named entities from raw text without manually-labelled data and existing knowledge bases (KBs). URE methods can be categorised into generative and discriminative approaches, which rely either on hand-crafted features or surface form. However, we demonstrate that by using only named entities to induce relation types, we can outperform existing methods on two popular datasets. We conduct a comparison and evaluation of our findings with other URE techniques, to ascertain the important features in URE. We conclude that entity types provide a strong inductive bias for URE.", "keyphrases": ["unsupervised relation extraction", "important feature", "entity type"]} +{"id": "levy-etal-2014-context", "title": "Context Dependent Claim Detection", "abstract": "While discussing a concrete controversial topic, most humans will find it challenging to swiftly raise a diverse set of convincing and relevant claims that should set the basis of their arguments. Here, we formally define the challenging task of automatic claim detection in a given context and discuss its associated unique difficulties. Further, we outline a preliminary solution to this task, and assess its performance over annotated real world data, collected specifically for that purpose over hundreds of Wikipedia articles. We report promising results of a supervised learning approach, which is based on a cascade of classifiers designed to properly handle the skewed data which is inherent to the defined task. These results demonstrate the viability of the introduced task.", "keyphrases": ["claim", "detection", "wikipedia", "argumentation mining", "cdcs"]} +{"id": "madnani-etal-2012-exploring", "title": "Exploring Grammatical Error Correction with Not-So-Crummy Machine Translation", "abstract": "To date, most work in grammatical error correction has focused on targeting specific error types. We present a probe study into whether we can use round-trip translations obtained from Google Translate via 8 different pivot languages for whole-sentence grammatical error correction. We develop a novel alignment algorithm for combining multiple round-trip translations into a lattice using the TERp machine translation metric. We further implement six different methods for extracting whole-sentence corrections from the lattice. Our preliminary experiments yield fairly satisfactory results but leave significant room for improvement. Most importantly, though, they make it clear the methods we propose have strong potential and require further study.", "keyphrases": ["grammatical error correction", "round-trip translation", "different pivot language"]} +{"id": "he-etal-2008-improving", "title": "Improving Statistical Machine Translation using Lexicalized Rule Selection", "abstract": "This paper proposes a novel lexicalized approach for rule selection for syntax-based statistical machine translation (SMT). We build maximum entropy (MaxEnt) models which combine rich context information for selecting translation rules during decoding. We successfully integrate the MaxEnt-based rule selection models into the state-of-the-art syntax-based SMT model. Experiments show that our lexicalized approach for rule selection achieves statistically significant improvements over the state-of-the-art SMT system.", "keyphrases": ["statistical machine translation", "context information", "entropy model", "source-side"]} +{"id": "roark-etal-2007-syntactic", "title": "Syntactic complexity measures for detecting Mild Cognitive Impairment", "abstract": "We consider the diagnostic utility of various syntactic complexity measures when extracted from spoken language samples of healthy and cognitively impaired subjects. We examine measures calculated from manually built parse trees, as well as the same measures calculated from automatic parses. We show statistically significant differences between clinical subject groups for a number of syntactic complexity measures, and these differences are preserved with automatic parsing. Different measures show different patterns for our data set, indicating that using multiple, complementary measures is important for such an application.", "keyphrases": ["mild cognitive impairment", "language sample", "syntactic complexity measure"]} +{"id": "neelakantan-etal-2014-efficient", "title": "Efficient Non-parametric Estimation of Multiple Embeddings per Word in Vector Space", "abstract": "There is rising interest in vector-space word embeddings and their use in NLP, especially given recent methods for their fast estimation at very large scale. Nearly all this work, however, assumes a single vector per word type\u2014ignoring polysemy and thus jeopardizing their usefulness for downstream tasks. We present an extension to the Skip-gram model that efficiently learns multiple embeddings per word type. It differs from recent related work by jointly performing word sense discrimination and embedding learning, by non-parametrically estimating the number of senses per word type, and by its efficiency and scalability. We present new state-of-the-art results in the word similarity in context task and demonstrate its scalability by training with one machine on a corpus of nearly 1 billion tokens in less than 6 hours.", "keyphrases": ["vector space", "skip-gram model", "multi-prototype embedding", "polysemous word", "assumption"]} +{"id": "erk-2007-simple", "title": "A Simple, Similarity-based Model for Selectional Preferences", "abstract": "We propose a new, simple model for the automatic induction of selectional preferences, using corpus-based semantic similarity metrics. Focusing on the task of semantic role labeling, we compute selectional preferences for semantic roles. In evaluations the similarity-based model shows lower error rates than both Resnik\u2019s WordNet-based model and the EM-based clustering model, but has coverage problems.", "keyphrases": ["similarity-based model", "selectional preferences", "simple model", "induction"]} +{"id": "nogueira-dos-santos-etal-2018-fighting", "title": "Fighting Offensive Language on Social Media with Unsupervised Text Style Transfer", "abstract": "We introduce a new approach to tackle the problem of offensive language in online social media. Our approach uses unsupervised text style transfer to translate offensive sentences into non-offensive ones. We propose a new method for training encoder-decoders using non-parallel data that combines a collaborative classifier, attention and the cycle consistency loss. Experimental results on data from Twitter and Reddit show that our method outperforms a state-of-the-art text style transfer system in two out of three quantitative metrics and produces reliable non-offensive transferred sentences.", "keyphrases": ["offensive language", "style transfer", "social medium"]} +{"id": "baroni-lenci-2011-blessed", "title": "How we BLESSed distributional semantic evaluation", "abstract": "We introduce BLESS, a data set specifically designed for the evaluation of distributional semantic models. BLESS contains a set of tuples instantiating different, explicitly typed semantic relations, plus a number of controlled random tuples. It is thus possible to assess the ability of a model to detect truly related word pairs, as well as to perform in-depth analyses of the types of semantic relations that a model favors. We discuss the motivations for BLESS, describe its construction and structure, and present examples of its usage in the evaluation of distributional semantic models.", "keyphrases": ["bless", "word pair", "hypernymy"]} +{"id": "kanojia-etal-2019-utilizing", "title": "Utilizing Wordnets for Cognate Detection among Indian Languages", "abstract": "Automatic Cognate Detection (ACD) is a challenging task which has been utilized to help NLP applications like Machine Translation, Information Retrieval and Computational Phylogenetics. Unidentified cognate pairs can pose a challenge to these applications and result in a degradation of performance. In this paper, we detect cognate word pairs among ten Indian languages with Hindi and use deep learning methodologies to predict whether a word pair is cognate or not. We identify IndoWordnet as a potential resource to detect cognate word pairs based on orthographic similarity-based methods and train neural network models using the data obtained from it. We identify parallel corpora as another potential resource and perform the same experiments for them. We also validate the contribution of Wordnets through further experimentation and report improved performance of up to 26%. We discuss the nuances of cognate detection among closely related Indian languages and release the lists of detected cognates as a dataset. We also observe the behaviour of, to an extent, unrelated Indian language pairs and release the lists of detected cognates among them as well.", "keyphrases": ["wordnet", "cognate detection", "indian language"]} +{"id": "pavlick-etal-2015-framenet", "title": "FrameNet+: Fast Paraphrastic Tripling of FrameNet", "abstract": "We increase the lexical coverage of FrameNet through automatic paraphrasing. We use crowdsourcing to manually filter out bad paraphrases in order to ensure a high-precision resource. Our expanded FrameNet contains an additional 22K lexical units, a 3-fold increase over the current FrameNet, and achieves 40% better coverage when evaluated in a practical setting on New York Times data.", "keyphrases": ["framenet", "paraphrasing", "crowdsourcing", "lexical unit"]} +{"id": "zhu-etal-2020-babywalk", "title": "BabyWalk: Going Farther in Vision-and-Language Navigation by Taking Baby Steps", "abstract": "Learning to follow instructions is of fundamental importance to autonomous agents for vision-and-language navigation (VLN). In this paper, we study how an agent can navigate long paths when learning from a corpus that consists of shorter ones. We show that existing state-of-the-art agents do not generalize well. To this end, we propose BabyWalk, a new VLN agent that is learned to navigate by decomposing long instructions into shorter ones (BabySteps) and completing them sequentially. A special design memory buffer is used by the agent to turn its past experiences into contexts for future steps. The learning process is composed of two phases. In the first phase, the agent uses imitation learning from demonstration to accomplish BabySteps. In the second phase, the agent uses curriculum-based reinforcement learning to maximize rewards on navigation tasks with increasingly longer instructions. We create two new benchmark datasets (of long navigation tasks) and use them in conjunction with existing ones to examine BabyWalk's generalization ability. Empirical results show that BabyWalk achieves state-of-the-art results on several metrics, in particular, is able to follow long instructions better. The codes and the datasets are released on our project page: .", "keyphrases": ["vision-and-language navigation", "long instruction", "babywalk"]} +{"id": "nagata-etal-2020-supervised", "title": "A Supervised Word Alignment Method based on Cross-Language Span Prediction using Multilingual BERT", "abstract": "We present a novel supervised word alignment method based on cross-language span prediction. We first formalize a word alignment problem as a collection of independent predictions from a token in the source sentence to a span in the target sentence. Since this step is equivalent to a SQuAD v2.0 style question answering task, we solve it using the multilingual BERT, which is fine-tuned on manually created gold word alignment data. It is nontrivial to obtain accurate alignment from a set of independently predicted spans. We greatly improved the word alignment accuracy by adding to the question the source token's context and symmetrizing two directional predictions. In experiments using five word alignment datasets from among Chinese, Japanese, German, Romanian, French, and English, we show that our proposed method significantly outperformed previous supervised and unsupervised word alignment methods without any bitexts for pretraining. For example, we achieved 86.7 F1 score for the Chinese-English data, which is 13.3 points higher than the previous state-of-the-art supervised method.", "keyphrases": ["cross-language span prediction", "multilingual bert", "neural word alignment"]} +{"id": "nguyen-etal-2017-hierarchical", "title": "Hierarchical Embeddings for Hypernymy Detection and Directionality", "abstract": "We present a novel neural model HyperVec to learn hierarchical embeddings for hypernymy detection and directionality. While previous embeddings have shown limitations on prototypical hypernyms, HyperVec represents an unsupervised measure where embeddings are learned in a specific order and capture the hypernym\u2013hyponym distributional hierarchy. Moreover, our model is able to generalize over unseen hypernymy pairs, when using only small sets of training data, and by mapping to other languages. Results on benchmark datasets show that HyperVec outperforms both state-of-the-art unsupervised measures and embedding models on hypernymy detection and directionality, and on predicting graded lexical entailment.", "keyphrases": ["hypernymy detection", "directionality", "word embedding"]} +{"id": "jansen-etal-2018-worldtree", "title": "WorldTree: A Corpus of Explanation Graphs for Elementary Science Questions supporting Multi-hop Inference", "abstract": "Developing methods of automated inference that are able to provide users with compelling human-readable justifications for why the answer to a question is correct is critical for domains such as science and medicine, where user trust and detecting costly errors are limiting factors to adoption. One of the central barriers to training question answering models on explainable inference tasks is the lack of gold explanations to serve as training data. In this paper we present a corpus of explanations for standardized science exams, a recent challenge task for question answering. We manually construct a corpus of detailed explanations for nearly all publicly available standardized elementary science question (approximately 1,680 3rd through 5th grade questions) and represent these as \"explanation graphs\" -- sets of lexically overlapping sentences that describe how to arrive at the correct answer to a question through a combination of domain and world knowledge. We also provide an explanation-centered tablestore, a collection of semi-structured tables that contain the knowledge to construct these elementary science explanations. Together, these two knowledge resources map out a substantial portion of the knowledge required for answering and explaining elementary science exams, and provide both structured and free-text training data for the explainable inference task.", "keyphrases": ["explanation", "semi-structured table", "worldtree", "semantic drift", "science exam question"]} +{"id": "kiela-etal-2018-dynamic", "title": "Dynamic Meta-Embeddings for Improved Sentence Representations", "abstract": "While one of the first steps in many NLP systems is selecting what pre-trained word embeddings to use, we argue that such a step is better left for neural networks to figure out by themselves. To that end, we introduce dynamic meta-embeddings, a simple yet effective method for the supervised learning of embedding ensembles, which leads to state-of-the-art performance within the same model class on a variety of tasks. We subsequently show how the technique can be used to shed new light on the usage of word embeddings in NLP systems.", "keyphrases": ["meta-embedding", "ensemble", "natural language inference"]} +{"id": "dong-etal-2020-multi", "title": "Multi-Fact Correction in Abstractive Text Summarization", "abstract": "Pre-trained neural abstractive summarization systems have dominated extractive strategies on news summarization performance, at least in terms of ROUGE. However, system-generated abstractive summaries often face the pitfall of factual inconsistency: generating incorrect facts with respect to the source text. To address this challenge, we propose Span-Fact, a suite of two factual correction models that leverages knowledge learned from question answering models to make corrections in system-generated summaries via span selection. Our models employ single or multi-masking strategies to either iteratively or auto-regressively replace entities in order to ensure semantic consistency w.r.t. the source text, while retaining the syntactic structure of summaries generated by abstractive summarization models. Experiments show that our models significantly boost the factual consistency of system-generated summaries without sacrificing summary quality in terms of both automatic metrics and human evaluation.", "keyphrases": ["correction", "abstractive summarization system", "consistency"]} +{"id": "agarwal-etal-2020-history", "title": "History for Visual Dialog: Do we really need it?", "abstract": "Visual Dialogue involves \u201cunderstanding\u201d the dialogue history (what has been discussed previously) and the current question (what is asked), in addition to grounding information in the image, to accurately generate the correct response. In this paper, we show that co-attention models which explicitly encode dialoh history outperform models that don't, achieving state-of-the-art performance (72 % NDCG on val set). However, we also expose shortcomings of the crowdsourcing dataset collection procedure, by showing that dialogue history is indeed only required for a small amount of the data, and that the current evaluation metric encourages generic replies. To that end, we propose a challenging subset (VisdialConv) of the VisdialVal set and the benchmark NDCG of 63%.", "keyphrases": ["visual dialog", "dialogue history", "history"]} +{"id": "freitag-etal-2019-ape", "title": "APE at Scale and Its Implications on MT Evaluation Biases", "abstract": "In this work, we train an Automatic Post-Editing (APE) model and use it to reveal biases in standard MT evaluation procedures. The goal of our APE model is to correct typical errors introduced by the translation process, and convert the \u201ctranslationese\u201d output into natural text. Our APE model is trained entirely on monolingual data that has been round-trip translated through English, to mimic errors that are similar to the ones introduced by NMT. We apply our model to the output of existing NMT systems, and demonstrate that, while the human-judged quality improves in all cases, BLEU scores drop with forward-translated test sets. We verify these results for the WMT18 English to German, WMT15 English to French, and WMT16 English to Romanian tasks. Furthermore, we selectively apply our APE model on the output of the top submissions of the most recent WMT evaluation campaigns. We see quality improvements on all tasks of up to 2.5 BLEU points.", "keyphrases": ["monolingual data", "bleu score", "test set", "ape"]} +{"id": "jain-etal-2018-mixed", "title": "A Mixed Hierarchical Attention Based Encoder-Decoder Approach for Standard Table Summarization", "abstract": "Structured data summarization involves generation of natural language summaries from structured input data. In this work, we consider summarizing structured data occurring in the form of tables as they are prevalent across a wide variety of domains. We formulate the standard table summarization problem, which deals with tables conforming to a single predefined schema. To this end, we propose a mixed hierarchical attention based encoder-decoder model which is able to leverage the structure in addition to the content of the tables. Our experiments on the publicly available weathergov dataset show around 18 BLEU (around 30%) improvement over the current state-of-the-art.", "keyphrases": ["mixed hierarchical attention", "table", "encoder-decoder model"]} +{"id": "fu-etal-2014-learning", "title": "Learning Semantic Hierarchies via Word Embeddings", "abstract": "Semantic hierarchy construction aims to build structures of concepts linked by hypernym\u2010hyponym (\u201cis-a\u201d) relations. A major challenge for this task is the automatic discovery of such relations. This paper proposes a novel and effective method for the construction of semantic hierarchies based on word embeddings, which can be used to measure the semantic relationship between words. We identify whether a candidate word pair has hypernym\u2010hyponym relation by using the word-embedding-based semantic projections between words and their hypernyms. Our result, an F-score of 73.74%, outperforms the state-of-theart methods on a manually labeled test dataset. Moreover, combining our method with a previous manually-built hierarchy extension method can further improve Fscore to 80.29%.", "keyphrases": ["word embedding", "hypernym", "projection learning", "chinese thesaurus", "cluster"]} +{"id": "eck-etal-2005-low-cost", "title": "Low Cost Portability for Statistical Machine Translation based on N-gram Coverage", "abstract": "Statistical machine translation relies heavily on the available training data. However, in some cases, it is necessary to limit the amount of training data that can be created for or actually used by the systems. To solve that problem, we introduce a weighting scheme that tries to select more informative sentences first. This selection is based on the previously unseen n-grams the sentences contain, and it allows us to sort the sentences according to their estimated importance. After sorting, we can construct smaller training corpora, and we are able to demonstrate that systems trained on much less training data show a very competitive performance compared to baseline systems using all available training data.", "keyphrases": ["statistical machine translation", "n-gram coverage", "sentence pair"]} +{"id": "giulianelli-etal-2018-hood", "title": "Under the Hood: Using Diagnostic Classifiers to Investigate and Improve how Language Models Track Agreement Information", "abstract": "How do neural language models keep track of number agreement between subject and verb? We show that `diagnostic classifiers', trained to predict number from the internal states of a language model, provide a detailed understanding of how, when, and where this information is represented. Moreover, they give us insight into when and where number information is corrupted in cases where the language model ends up making agreement errors. To demonstrate the causal role played by the representations we find, we then use agreement information to influence the course of the LSTM during the processing of difficult sentences. Results from such an intervention reveal a large increase in the language model's accuracy. Together, these results show that diagnostic classifiers give us an unrivalled detailed look into the representation of linguistic information in neural models, and demonstrate that this knowledge can be used to improve their performance.", "keyphrases": ["diagnostic classifier", "language model", "track", "agreement", "subject-verb agreement"]} +{"id": "berzak-etal-2015-contrastive", "title": "Contrastive Analysis with Predictive Power: Typology Driven Estimation of Grammatical Error Distributions in ESL", "abstract": "This work examines the impact of crosslinguistic transfer on grammatical errors in English as Second Language (ESL) texts. Using a computational framework that formalizes the theory of Contrastive Analysis (CA), we demonstrate that language specific error distributions in ESL writing can be predicted from the typological properties of the native language and their relation to the typology of English. Our typology driven model enables to obtain accurate estimates of such distributions without access to any ESL data for the target languages. Furthermore, we present a strategy for adjusting our method to low-resource languages that lack typological documentation using a bootstrapping approach which approximates native language typology from ESL texts. Finally, we show that our framework is instrumental for linguistic inquiry seeking to identify first language factors that contribute to a wide range of difficulties in second language acquisition.", "keyphrases": ["esl", "native language", "contrastive analysis"]} +{"id": "kao-jurafsky-2012-computational", "title": "A Computational Analysis of Style, Affect, and Imagery in Contemporary Poetry", "abstract": "What makes a poem beautiful? We use computational methods to compare the stylistic and content features employed by awardwinning poets and amateur poets. Building upon existing techniques designed to quantitatively analyze style and affect in texts, we examined elements of poetic craft such as diction, sound devices, emotive language, and imagery. Results showed that the most important indicator of high-quality poetry we could detect was the frequency of references to concrete objects. This result highlights the influence of Imagism in contemporary professional poetry, and suggests that concreteness may be one of the most appealing features of poetry to the modern aesthetic. We also report on other features that characterize high-quality poetry and argue that methods from computational linguistics may provide important insights into the analysis of beauty in verbal art.", "keyphrases": ["style", "contemporary poetry", "imagism"]} +{"id": "pimentel-etal-2020-information", "title": "Information-Theoretic Probing for Linguistic Structure", "abstract": "The success of neural networks on a diverse set of NLP tasks has led researchers to question how much these networks actually \u201cknow\u201d about natural language. Probes are a natural way of assessing this. When probing, a researcher chooses a linguistic task and trains a supervised model to predict annotations in that linguistic task from the network's learned representations. If the probe does well, the researcher may conclude that the representations encode knowledge related to the task. A commonly held belief is that using simpler models as probes is better; the logic is that simpler models will identify linguistic structure, but not learn the task itself. We propose an information-theoretic operationalization of probing as estimating mutual information that contradicts this received wisdom: one should always select the highest performing probe one can, even if it is more complex, since it will result in a tighter estimate, and thus reveal more of the linguistic information inherent in the representation. The experimental portion of our paper focuses on empirically estimating the mutual information between a linguistic property and BERT, comparing these estimates to several baselines. We evaluate on a set of ten typologically diverse languages often underrepresented in NLP research\u2014plus English\u2014totalling eleven languages. Our implementation is available in .", "keyphrases": ["probing", "linguistic structure", "estimate", "bert", "diagnostic classifier"]} +{"id": "baldwin-etal-2015-shared", "title": "Shared Tasks of the 2015 Workshop on Noisy User-generated Text: Twitter Lexical Normalization and Named Entity Recognition", "abstract": "This paper presents the results of the two shared tasks associated with W-NUT 2015: (1) a text normalization task with 10 participants; and (2) a named entity tagging task with 8 participants. We outline the task, annotation process and dataset statistics, and provide a high-level overview of the participating systems for each shared task.", "keyphrases": ["workshop", "noisy user-generated text", "lexical normalization", "named entity recognition", "english tweet"]} +{"id": "herzig-etal-2011-annotation", "title": "An Annotation Scheme for Automated Bias Detection in Wikipedia", "abstract": "BiasML is a novel annotation scheme with the purpose of identifying the presence as well as nuances of biased language within the subset of Wikipedia articles dedicated to service providers. Whereas Wikipedia currently uses only manual flagging to detect possible bias, our scheme provides a foundation for the automating of bias flagging by improving upon the methodology of annotation schemes in classic sentiment analysis. We also address challenges unique to the task of identifying biased writing within the specific context of Wikipedia's neutrality policy. We perform a detailed analysis of inter-annotator agreement, which shows that although the agreement scores for intra-sentential tags were relatively low, the agreement scores on the sentence and entry levels were encouraging (74.8% and 66.7%, respectively). Based on an analysis of our first implementation of our scheme, we suggest possible improvements to our guidelines, in hope that further rounds of annotation after incorporating them could provide appropriate data for use within a machine learning framework for automated detection of bias within Wikipedia.", "keyphrases": ["annotation scheme", "wikipedia", "biased language"]} +{"id": "luu-malamud-2020-non", "title": "Non-Topical Coherence in Social Talk: A Call for Dialogue Model Enrichment", "abstract": "Current models of dialogue mainly focus on utterances within a topically coherent discourse segment, rather than new-topic utterances (NTUs), which begin a new topic not correlating with the content of prior discourse. As a result, these models may sufficiently account for discourse context of task-oriented but not social conversations. We conduct a pilot annotation study of NTUs as a first step towards a model capable of rationalizing conversational coherence in social talk. We start with the naturally occurring social dialogues in the Disco-SPICE corpus, annotated with discourse relations in the Penn Discourse Treebank and Cognitive approach to Coherence Relations frameworks. We first annotate content-based coherence relations that are not available in Disco-SPICE, and then heuristically identify NTUs, which lack a coherence relation to prior discourse. Based on the interaction between NTUs and their discourse context, we construct a classification for NTUs that actually convey certain non-topical coherence in social talk. This classification introduces new sequence-based social intents that traditional taxonomies of speech acts do not capture. The new findings advocates the development of a Bayesian game-theoretic model for social talk.", "keyphrases": ["social talk", "new-topic utterance", "prior discourse", "non-topical coherence"]} +{"id": "quirk-2004-training", "title": "Training a Sentence-Level Machine Translation Confidence Measure", "abstract": "We present a supervised method for training a sentence level confidence measure on translation output using a human-annotated corpus. We evaluate a variety of machine learning methods. The resultant measure, while trained on a very small dataset, correlates well with human judgments, and proves to be effective on one task based evaluation. Although the experiments have only been run on one MT system, we believe the nature of the features gathered are general enough that the approach will also work well on other systems.", "keyphrases": ["translation output", "confidence estimation", "small set"]} +{"id": "hovy-etal-2013-events", "title": "Events are Not Simple: Identity, Non-Identity, and Quasi-Identity", "abstract": "Despite considerable theoretical and computational work on coreference, deciding when two entities or events are identical is very difficult. In a project to build corpora containing coreference links between events, we have identified three levels of event identity (full, partial, and none). Event coreference annotation on two corpora was performed to validate the findings.", "keyphrases": ["identity", "coreference", "event relation"]} +{"id": "li-etal-2015-hierarchical", "title": "A Hierarchical Neural Autoencoder for Paragraphs and Documents", "abstract": "Natural language generation of coherent long texts like paragraphs or longer documents is a challenging problem for recurrent networks models. In this paper, we explore an important step toward this generation task: training an LSTM (Longshort term memory) auto-encoder to preserve and reconstruct multi-sentence paragraphs. We introduce an LSTM model that hierarchically builds an embedding for a paragraph from embeddings for sentences and words, then decodes this embedding to reconstruct the original paragraph. We evaluate the reconstructed paragraph using standard metrics like ROUGE and Entity Grid, showing that neural models are able to encode texts in a way that preserve syntactic, semantic, and discourse coherence. While only a first step toward generating coherent text units from neural models, our work has the potential to significantly impact natural language generation and summarization1.", "keyphrases": ["autoencoder", "paragraph", "natural language generation"]} +{"id": "roller-etal-2012-supervised", "title": "Supervised Text-based Geolocation Using Language Models on an Adaptive Grid", "abstract": "The geographical properties of words have recently begun to be exploited for geolocating documents based solely on their text, often in the context of social media and online content. One common approach for geolocating texts is rooted in information retrieval. Given training documents labeled with latitude/longitude coordinates, a grid is overlaid on the Earth and pseudo-documents constructed by concatenating the documents within a given grid cell; then a location for a test document is chosen based on the most similar pseudo-document. Uniform grids are normally used, but they are sensitive to the dispersion of documents over the earth. We define an alternative grid construction using k-d trees that more robustly adapts to data, especially with larger training sets. We also provide a better way of choosing the locations for pseudo-documents. We evaluate these strategies on existing Wikipedia and Twitter corpora, as well as a new, larger Twitter corpus. The adaptive grid achieves competitive results with a uniform grid on small training sets and outperforms it on the large Twitter corpus. The two grid constructions can also be combined to produce consistently strong results across all training sets.", "keyphrases": ["adaptive grid", "location", "similar pseudo-document", "geolocation prediction"]} +{"id": "erk-etal-2010-flexible", "title": "A Flexible, Corpus-Driven Model of Regular and Inverse Selectional Preferences", "abstract": "We present a vector space\u2013based model for selectional preferences that predicts plausibility scores for argument headwords. It does not require any lexical resources (such as WordNet). It can be trained either on one corpus with syntactic annotation, or on a combination of a small semantically annotated primary corpus and a large, syntactically analyzed generalization corpus. Our model is able to predict inverse selectional preferences, that is, plausibility scores for predicates given argument heads. We evaluate our model on one NLP task (pseudo-disambiguation) and one cognitive task (prediction of human plausibility judgments), gauging the influence of different parameters and comparing our model against other model classes. We obtain consistent benefits from using the disambiguation and semantic role information provided by a semantically tagged primary corpus. As for parameters, we identify settings that yield good performance across a range of experimental conditions. However, frequency remains a major influence of prediction quality, and we also identify more robust parameter settings suitable for applications with many infrequent items.", "keyphrases": ["selectional preference", "plausibility", "predicate", "distributional similarity metric", "thematic fit"]} +{"id": "neves-etal-2016-scielo", "title": "The Scielo Corpus: a Parallel Corpus of Scientific Publications for Biomedicine", "abstract": "The biomedical scientific literature is a rich source of information not only in the English language, for which it is more abundant, but also in other languages, such as Portuguese, Spanish and French. We present the first freely available parallel corpus of scientific publications for the biomedical domain. Documents from the \u201dBiological Sciences\u201d and \u201dHealth Sciences\u201d categories were retrieved from the Scielo database and parallel titles and abstracts are available for the following language pairs: Portuguese/English (about 86,000 documents in total), Spanish/English (about 95,000 documents) and French/English (about 2,000 documents). Additionally, monolingual data was also collected for all four languages. Sentences in the parallel corpus were automatically aligned and a manual analysis of 200 documents by native experts found that a minimum of 79% of sentences were correctly aligned in all language pairs. We demonstrate the utility of the corpus by running baseline machine translation experiments. We show that for all language pairs, a statistical machine translation system trained on the parallel corpora achieves performance that rivals or exceeds the state of the art in the biomedical domain. Furthermore, the corpora are currently being used in the biomedical task in the First Conference on Machine Translation (WMT'16).", "keyphrases": ["scielo corpus", "parallel corpus", "scientific publication"]} +{"id": "bogin-etal-2019-global", "title": "Global Reasoning over Database Structures for Text-to-SQL Parsing", "abstract": "State-of-the-art semantic parsers rely on auto-regressive decoding, emitting one symbol at a time. When tested against complex databases that are unobserved at training time (zero-shot), the parser often struggles to select the correct set of database constants in the new database, due to the local nature of decoding. %since their decisions are based on weak, local information only. In this work, we propose a semantic parser that globally reasons about the structure of the output query to make a more contextually-informed selection of database constants. We use message-passing through a graph neural network to softly select a subset of database constants for the output query, conditioned on the question. Moreover, we train a model to rank queries based on the global alignment of database constants to question words. We apply our techniques to the current state-of-the-art model for Spider, a zero-shot semantic parsing dataset with complex databases, increasing accuracy from 39.4% to 47.4%.", "keyphrases": ["database", "text-to-sql", "global reasoning"]} +{"id": "lai-etal-2017-natural", "title": "Natural Language Inference from Multiple Premises", "abstract": "We define a novel textual entailment task that requires inference over multiple premise sentences. We present a new dataset for this task that minimizes trivial lexical inferences, emphasizes knowledge of everyday events, and presents a more challenging setting for textual entailment. We evaluate several strong neural baselines and analyze how the multiple premise task differs from standard textual entailment.", "keyphrases": ["premise", "entailment", "natural language inference"]} +{"id": "nopp-hanbury-2015-detecting", "title": "Detecting Risks in the Banking System by Sentiment Analysis", "abstract": "In November 2014, the European Central Bank (ECB) started to directly supervise the largest banks in the Eurozone via the Single Supervisory Mechanism (SSM). While supervisory risk assessments are usually based on quantitative data and surveys, this work explores whether sentiment analysis is capable of measuring a bank\u2019s attitude and opinions towards risk by analyzing text data. For realizing this study, a collection consisting of more than 500 CEO letters and outlook sections extracted from bank annual reports is built up. Based on these data, two distinct experiments are conducted. The evaluations find promising opportunities, but also limitations for risk sentiment analysis in banking supervision. At the level of individual banks, predictions are relatively inaccurate. In contrast, the analysis of aggregated figures revealed strong and significant correlations between uncertainty or negativity in textual disclosures and the quantitative risk indicator\u2019s future evolution. Risk sentiment analysis should therefore rather be used for macroprudential analyses than for assessments of individual banks.", "keyphrases": ["bank", "sentiment analysis", "textual disclosure"]} +{"id": "kobayashi-etal-2020-efficient", "title": "Efficient Estimation of Influence of a Training Instance", "abstract": "Understanding the influence of a training instance on a neural network model leads to improving interpretability. However, it is difficult and inefficient to evaluate the influence, which shows how a model's prediction would be changed if a training instance were not used. In this paper, we propose an efficient method for estimating the influence. Our method is inspired by dropout, which zero-masks a sub-network and prevents the sub-network from learning each training instance. By switching between dropout masks, we can use sub-networks that learned or did not learn each training instance and estimate its influence. Through experiments with BERT and VGGNet on classification datasets, we demonstrate that the proposed method can capture training influences, enhance the interpretability of error predictions, and cleanse the training dataset for improving generalization.", "keyphrases": ["influence", "training instance", "dropout mask"]} +{"id": "bonafonte-etal-2006-tc", "title": "TC-STAR:Specifications of Language Resources and Evaluation for Speech Synthesis", "abstract": "In the framework of the EU funded project TC-STAR (Technology and Corpora for Speech to Speech Translation),research on TTS aims on providing a synthesized voice sounding like the source speaker speaking the target language. To progress in this direction, research is focused on naturalness, intelligibility, expressivity and voice conversion both, in the TC-STAR framework. For this purpose, specifications on large, high quality TTS databases have been developed and the data have been recorded for UK English, Spanish and Mandarin. The development of speech technology in TC-STAR is evaluation driven. Assessment of speech synthesis is needed to determine how well a system or technique performs in comparison to previous versions as well as other approaches (systems & methods). Apart from testing the whole system, all components of the system will be evaluated separately. This approach grants better assesment of each component as well as identification of the best techniques in the different speech synthesisprocesses.This paper describes the specifications of Language Resources for speech synthesis and the specifications for evaluation of speech synthesis activities.", "keyphrases": ["specification", "language resources", "speech synthesis", "tc-star"]} +{"id": "xu-etal-2013-mining", "title": "Mining Opinion Words and Opinion Targets in a Two-Stage Framework", "abstract": "This paper proposes a novel two-stage method for mining opinion words and opinion targets. In the first stage, we propose a Sentiment Graph Walking algorithm, which naturally incorporates syntactic patterns in a Sentiment Graph to extract opinion word/target candidates. Then random walking is employed to estimate confidence of candidates, which improves extraction accuracy by considering confidence of patterns. In the second stage, we adopt a self-learning strategy to refine the results from the first stage, especially for filtering out high-frequency noise terms and capturing the long-tail terms, which are not investigated by previous methods. The experimental results on three real world datasets demonstrate the effectiveness of our approach compared with stateof-the-art unsupervised methods.", "keyphrases": ["opinion target", "two-stage framework", "syntactic pattern"]} +{"id": "kim-etal-2021-self", "title": "Self-Guided Contrastive Learning for BERT Sentence Representations", "abstract": "Although BERT and its variants have reshaped the NLP landscape, it still remains unclear how best to derive sentence embeddings from such pre-trained Transformers. In this work, we propose a contrastive learning method that utilizes self-guidance for improving the quality of BERT sentence representations. Our method fine-tunes BERT in a self-supervised fashion, does not rely on data augmentation, and enables the usual [CLS] token embeddings to function as sentence vectors. Moreover, we redesign the contrastive learning objective (NT-Xent) and apply it to sentence representation learning. We demonstrate with extensive experiments that our approach is more effective than competitive baselines on diverse sentence-related tasks. We also show it is efficient at inference and robust to domain shifts.", "keyphrases": ["contrastive learning", "sentence representation", "self-guidance"]} +{"id": "severyn-etal-2014-opinion", "title": "Opinion Mining on YouTube", "abstract": "This paper defines a systematic approach to Opinion Mining (OM) on YouTube comments by (i) modeling classifiers for predicting the opinion polarity and the type of comment and (ii) proposing robust shallow syntactic structures for improving model adaptability. We rely on the tree kernel technology to automatically extract and learn features with better generalization power than bag-of-words. An extensive empirical evaluation on our manually annotated YouTube comments corpus shows a high classification accuracy and highlights the benefits of structural models in a cross-domain setting.", "keyphrases": ["youtube", "systematic approach", "opinion mining", "social medium", "facebook"]} +{"id": "malik-2006-punjabi", "title": "Punjabi Machine Transliteration", "abstract": "Machine Transliteration is to transcribe a word written in a script with approximate phonetic equivalence in another language. It is useful for machine translation, cross-lingual information retrieval, multilingual text and speech processing. Punjabi Machine Transliteration (PMT) is a special case of machine transliteration and is a process of converting a word from Shahmukhi (based on Arabic script) to Gurmukhi (derivation of Landa, Shardha and Takri, old scripts of Indian subcontinent), two scripts of Punjabi, irrespective of the type of word.The Punjabi Machine Transliteration System uses transliteration rules (character mappings and dependency rules) for transliteration of Shahmukhi words into Gurmukhi. The PMT system can transliterate every word written in Shahmukhi.", "keyphrases": ["shahmukhi", "arabic script", "punjabi machine transliteration"]} +{"id": "fung-cheung-2004-mining", "title": "Mining Very-Non-Parallel Corpora: Parallel Sentence and Lexicon Extraction via Bootstrapping and E", "abstract": "We present a method capable of extracting parallel sentences from far more disparate \u201cvery-non-parallel corpora\u201d than previous \u201ccomparable corpora\u201d methods, by exploiting bootstrapping on top of IBM Model 4 EM. Step 1 of our method, like previous methods, uses similarity measures to find matching documents in a corpus first, and then extracts parallel sentences as well as new word translations from these documents. But unlike previous methods, we extend this with an iterative bootstrapping framework based on the principle of \u201cfind-one-get-more\u201d, which claims that documents found to contain one pair of parallel sentences must contain others even if the documents are judged to be of low similarity. We re-match documents based on extracted sentence pairs, and refine the mining process iteratively until convergence. This novel \u201cfind-one-get-more\u201d principle allows us to add more parallel sentences from dissimilar documents, to the baseline set. Experimental results show that our proposed method is nearly 50% more effective than the baseline method without iteration. We also show that our method is effective in boosting the performance of the IBM Model 4 EM lexical learner as the latter, though stronger than Model 1 used in previous work, does not perform well on data from very-non-parallel corpus. Figure1. Parallel sentence and lexicon extraction via Bootstrapping and EM The most challenging task is to extract bilingual sentences and lexicon from very-non-parallel data. Recent work (Munteanu et al., 2004, Zhao and Vogel, 2002) on extracting parallel sentences from comparable data, and others on extracting paraphrasing sentences from monolingual corpora (Barzilay and Elhadad 2003) are based on the \u201cfind-topic-extract-sentence\u201d principle which claims that parallel sentences only exist in document pairs with high similarity. They all use lexical information (e.g. word overlap, cosine similarity) to match documents first, before extracting sentences from these documents.", "keyphrases": ["lexicon extraction", "bootstrapping", "comparable corpora", "parallel sentence pair", "cross-lingual signal"]} +{"id": "yang-etal-2017-identifying-semantic", "title": "Identifying Semantic Edit Intentions from Revisions in Wikipedia", "abstract": "Most studies on human editing focus merely on syntactic revision operations, failing to capture the intentions behind revision changes, which are essential for facilitating the single and collaborative writing process. In this work, we develop in collaboration with Wikipedia editors a 13-category taxonomy of the semantic intention behind edits in Wikipedia articles. Using labeled article edits, we build a computational classifier of intentions that achieved a micro-averaged F1 score of 0.621. We use this model to investigate edit intention effectiveness: how different types of edits predict the retention of newcomers and changes in the quality of articles, two key concerns for Wikipedia today. Our analysis shows that the types of edits that users make in their first session predict their subsequent survival as Wikipedia editors, and articles in different stages need different types of edits.", "keyphrases": ["edit", "wikipedia", "semantic intention"]} +{"id": "clark-etal-2019-bam", "title": "BAM! Born-Again Multi-Task Networks for Natural Language Understanding", "abstract": "It can be challenging to train multi-task neural networks that outperform or even match their single-task counterparts. To help address this, we propose using knowledge distillation where single-task models teach a multi-task model. We enhance this training with teacher annealing, a novel method that gradually transitions the model from distillation to supervised learning, helping the multi-task model surpass its single-task teachers. We evaluate our approach by multi-task fine-tuning BERT on the GLUE benchmark. Our method consistently improves over standard single-task and multi-task training.", "keyphrases": ["natural language understanding", "counterpart", "multi-task model", "teacher", "bam"]} +{"id": "bender-koller-2020-climbing", "title": "Climbing towards NLU: On Meaning, Form, and Understanding in the Age of Data", "abstract": "The success of the large neural language models on many NLP tasks is exciting. However, we find that these successes sometimes lead to hype in which these models are being described as \u201cunderstanding\u201d language or capturing \u201cmeaning\u201d. In this position paper, we argue that a system trained only on form has a priori no way to learn meaning. In keeping with the ACL 2020 theme of \u201cTaking Stock of Where We've Been and Where We're Going\u201d, we argue that a clear understanding of the distinction between form and meaning will help guide the field towards better science around natural language understanding.", "keyphrases": ["nlu", "language understanding", "reason"]} +{"id": "zheng-etal-2018-multi", "title": "Multi-Reference Training with Pseudo-References for Neural Translation and Text Generation", "abstract": "Neural text generation, including neural machine translation, image captioning, and summarization, has been quite successful recently. However, during training time, typically only one reference is considered for each example, even though there are often multiple references available, e.g., 4 references in NIST MT evaluations, and 5 references in image captioning data. We first investigate several different ways of utilizing multiple human references during training. But more importantly, we then propose an algorithm to generate exponentially many pseudo-references by first compressing existing human references into lattices and then traversing them to generate new pseudo-references. These approaches lead to substantial improvements over strong baselines in both machine translation (+1.5 BLEU) and image captioning (+3.1 BLEU / +11.7 CIDEr).", "keyphrases": ["pseudo-reference", "text generation", "image captioning"]} +{"id": "zayed-etal-2020-contextual", "title": "Contextual Modulation for Relation-Level Metaphor Identification", "abstract": "Identifying metaphors in text is very challenging and requires comprehending the underlying comparison. The automation of this cognitive process has gained wide attention lately. However, the majority of existing approaches concentrate on word-level identification by treating the task as either single-word classification or sequential labelling without explicitly modelling the interaction between the metaphor components. On the other hand, while existing relation-level approaches implicitly model this interaction, they ignore the context where the metaphor occurs. In this work, we address these limitations by introducing a novel architecture for identifying relation-level metaphoric expressions of certain grammatical relations based on contextual modulation. In a methodology inspired by works in visual reasoning, our approach is based on conditioning the neural network computation on the deep contextualised features of the candidate expressions using feature-wise linear modulation. We demonstrate that the proposed architecture achieves state-of-the-art results on benchmark datasets. The proposed methodology is generic and could be applied to other textual classification problems that benefit from contextual interaction.", "keyphrases": ["metaphoric expression", "grammatical relation", "contextual modulation"]} +{"id": "valitutti-etal-2013-everything", "title": "\u201cLet Everything Turn Well in Your Wife\u201d: Generation of Adult Humor Using Lexical Constraints", "abstract": "We propose a method for automated generation of adult humor by lexical replacement and present empirical evaluation results of the obtained humor. We propose three types of lexical constraints as building blocks of humorous word substitution: constraints concerning the similarity of sounds or spellings of the original word and the substitute, a constraint requiring the substitute to be a taboo word, and constraints concerning the position and context of the replacement. Empirical evidence from extensive user studies indicates that these constraints can increase the effectiveness of humor generation significantly.", "keyphrases": ["adult humor", "lexical constraint", "creativity"]} +{"id": "meaney-etal-2021-semeval", "title": "SemEval 2021 Task 7: HaHackathon, Detecting and Rating Humor and Offense", "abstract": "SemEval 2021 Task 7, HaHackathon, was the first shared task to combine the previously separate domains of humor detection and offense detection. We collected 10,000 texts from Twitter and the Kaggle Short Jokes dataset, and had each annotated for humor and offense by 20 annotators aged 18-70. Our subtasks were binary humor detection, prediction of humor and offense ratings, and a novel controversy task: to predict if the variance in the humor ratings was higher than a specific threshold. The subtasks attracted 36-58 submissions, with most of the participants choosing to use pre-trained language models. Many of the highest performing teams also implemented additional optimization techniques, including task-adaptive training and adversarial training. The results suggest that the participating systems are well suited to humor detection, but that humor controversy is a more challenging task. We discuss which models excel in this task, which auxiliary techniques boost their performance, and analyze the errors which were not captured by the best systems.", "keyphrases": ["hahackathon", "humor", "offense", "semeval"]} +{"id": "mueller-etal-2013-efficient", "title": "Efficient Higher-Order CRFs for Morphological Tagging", "abstract": "Training higher-order conditional random fields is prohibitive for huge tag sets. We present an approximated conditional random field using coarse-to-fine decoding and early updating. We show that our implementation yields fast and accurate morphological taggers across six languages with different morphological properties and that across languages higher-order models give significant improvements over 1-order models.", "keyphrases": ["crf", "morphological tagging", "random field", "coarse-to-fine decoding", "structured prediction model"]} +{"id": "deriu-etal-2020-spot", "title": "Spot The Bot: A Robust and Efficient Framework for the Evaluation of Conversational Dialogue Systems", "abstract": "The lack of time efficient and reliable evalu-ation methods is hampering the development of conversational dialogue systems (chat bots). Evaluations that require humans to converse with chat bots are time and cost intensive, put high cognitive demands on the human judges, and tend to yield low quality results. In this work, we introduce Spot The Bot, a cost-efficient and robust evaluation framework that replaces human-bot conversations with conversations between bots. Human judges then only annotate for each entity in a conversation whether they think it is human or not (assuming there are humans participants in these conversations). These annotations then allow us to rank chat bots regarding their ability to mimic conversational behaviour of humans. Since we expect that all bots are eventually recognized as such, we incorporate a metric that measures which chat bot is able to uphold human-like be-havior the longest, i.e.Survival Analysis. This metric has the ability to correlate a bot's performance to certain of its characteristics (e.g.fluency or sensibleness), yielding interpretable results. The comparably low cost of our frame-work allows for frequent evaluations of chatbots during their evaluation cycle. We empirically validate our claims by applying Spot The Bot to three domains, evaluating several state-of-the-art chat bots, and drawing comparisonsto related work. The framework is released asa ready-to-use tool.", "keyphrases": ["bot", "conversation", "human judge", "chatbot"]} +{"id": "mani-etal-2006-machine", "title": "Machine Learning of Temporal Relations", "abstract": "This paper investigates a machine learning approach for temporally ordering and anchoring events in natural language texts. To address data sparseness, we used temporal reasoning as an over-sampling method to dramatically expand the amount of training data, resulting in predictive accuracy on link labeling as high as 93% using a Maximum Entropy classifier on human annotated data. This method compared favorably against a series of increasingly sophisticated baselines involving expansion of rules derived from human intuitions.", "keyphrases": ["temporal relation", "reasoning", "series", "machine learning", "relation extraction"]} +{"id": "anderson-etal-2017-guided", "title": "Guided Open Vocabulary Image Captioning with Constrained Beam Search", "abstract": "Existing image captioning models do not generalize well to out-of-domain images containing novel scenes or objects. This limitation severely hinders the use of these models in real world applications dealing with images in the wild. We address this problem using a flexible approach that enables existing deep captioning architectures to take advantage of image taggers at test time, without re-training. Our method uses constrained beam search to force the inclusion of selected tag words in the output, and fixed, pretrained word embeddings to facilitate vocabulary expansion to previously unseen tag words. Using this approach we achieve state of the art results for out-of-domain captioning on MSCOCO (and improved results for in-domain captioning). Perhaps surprisingly, our results significantly outperform approaches that incorporate the same tag predictions into the learning algorithm. We also show that we can significantly improve the quality of generated ImageNet captions by leveraging ground-truth labels.", "keyphrases": ["image", "caption", "constrained beam search", "vocabulary expansion"]} +{"id": "sumita-etal-2005-measuring", "title": "Measuring Non-native Speakers' Proficiency of English by Using a Test with Automatically-Generated Fill-in-the-Blank Questions", "abstract": "This paper proposes the automatic generation of Fill-in-the-Blank Questions (FBQs) together with testing based on Item Response Theory (IRT) to measure English proficiency. First, the proposal generates an FBQ from a given sentence in English. The position of a blank in the sentence is determined, and the word at that position is considered as the correct choice. The candidates for incorrect choices for the blank are hypothesized through a thesaurus. Then, each of the candidates is verified by using the Web. Finally, the blanked sentence, the correct choice and the incorrect choices surviving the verification are together laid out to form the FBQ. Second, the proficiency of non-native speakers who took the test consisting of such FBQs is estimated through IRT. \n \nOur experimental results suggest that: (1) the generated questions plus IRT estimate the non-native speakers' English proficiency; (2) while on the other hand, the test can be completed almost perfectly by English native speakers; and (3) the number of questions can be reduced by using item information in IRT. \n \nThe proposed method provides teachers and testers with a tool that reduces time and expenditure for testing English proficiency.", "keyphrases": ["proficiency", "fill-in-the-blank question", "english language learning"]} +{"id": "stratos-2017-sub", "title": "A Sub-Character Architecture for Korean Language Processing", "abstract": "We introduce a novel sub-character architecture that exploits a unique compositional structure of the Korean language. Our method decomposes each character into a small set of primitive phonetic units called jamo letters from which character- and word-level representations are induced. The jamo letters divulge syntactic and semantic information that is difficult to access with conventional character-level units. They greatly alleviate the data sparsity problem, reducing the observation space to 1.6% of the original while increasing accuracy in our experiments. We apply our architecture to dependency parsing and achieve dramatic improvement over strong lexical baselines.", "keyphrases": ["sub-character architecture", "korean language processing", "primitive phonetic unit"]} +{"id": "lazic-etal-2015-plato", "title": "Plato: A Selective Context Model for Entity Resolution", "abstract": "We present Plato, a probabilistic model for entity resolution that includes a novel approach for handling noisy or uninformative features, and supplements labeled training data derived from Wikipedia with a very large unlabeled text corpus. Training and inference in the proposed model can easily be distributed across many servers, allowing it to scale to over 107 entities. We evaluate Plato on three standard datasets for entity resolution. Our approach achieves the best results to-date on TAC KBP 2011 and is highly competitive on both the CoNLL 2003 and TAC KBP 2012 datasets.", "keyphrases": ["context model", "entity resolution", "wikipedia", "plato", "textual feature"]} +{"id": "luong-etal-2015-bilingual", "title": "Bilingual Word Representations with Monolingual Quality in Mind", "abstract": "Recent work in learning bilingual representations tend to tailor towards achieving good performance on bilingual tasks, most often the crosslingual document classification (CLDC) evaluation, but to the detriment of preserving clustering structures of word representations monolingually. In this work, we propose a joint model to learn word representations from scratch that utilizes both the context coocurrence information through the monolingual component and the meaning equivalent signals from the bilingual constraint. Specifically, we extend the recently popular skipgram model to learn high quality bilingual representations efficiently. Our learned embeddings achieve a new state-of-the-art accuracy of 80.3 for the German to English CLDC task and a highly competitive performance of 90.7 for the other classification direction. At the same time, our models outperform best embeddings from past bilingual representation work by a large margin in the monolingual word similarity evaluation. 1", "keyphrases": ["scratch", "word embedding", "bilingual model", "joint training"]} +{"id": "fabbri-etal-2021-summeval", "title": "SummEval: Re-evaluating Summarization Evaluation", "abstract": "The scarcity of comprehensive up-to-date studies on evaluation metrics for text summarization and the lack of consensus regarding evaluation protocols continue to inhibit progress. We address the existing shortcomings of summarization evaluation methods along five dimensions: 1) we re-evaluate 14 automatic evaluation metrics in a comprehensive and consistent fashion using neural summarization model outputs along with expert and crowd-sourced human annotations; 2) we consistently benchmark 23 recent summarization models using the aforementioned automatic evaluation metrics; 3) we assemble the largest collection of summaries generated by models trained on the CNN/DailyMail news dataset and share it in a unified format; 4) we implement and share a toolkit that provides an extensible and unified API for evaluating summarization models across a broad range of automatic metrics; and 5) we assemble and share the largest and most diverse, in terms of model types, collection of human judgments of model-generated summaries on the CNN/Daily Mail dataset annotated by both expert judges and crowd-source workers. We hope that this work will help promote a more complete evaluation protocol for text summarization as well as advance research in developing evaluation metrics that better correlate with human judgments.", "keyphrases": ["evaluation metric", "summarization model", "summeval", "human judgement", "insight"]} +{"id": "yang-etal-2017-semi", "title": "Semi-Supervised QA with Generative Domain-Adaptive Nets", "abstract": "We study the problem of semi-supervised question answering\u2014utilizing unlabeled text to boost the performance of question answering models. We propose a novel training framework, the Generative Domain-Adaptive Nets. In this framework, we train a generative model to generate questions based on the unlabeled text, and combine model-generated questions with human-generated questions for training question answering models. We develop novel domain adaptation algorithms, based on reinforcement learning, to alleviate the discrepancy between the model-generated data distribution and the human-generated data distribution. Experiments show that our proposed framework obtains substantial improvement from unlabeled text.", "keyphrases": ["generative domain-adaptive net", "unlabeled text", "question generation", "passage", "rule-based method"]} +{"id": "antypas-etal-2021-covid", "title": "COVID-19 and Misinformation: A Large-Scale Lexical Analysis on Twitter", "abstract": "Social media is often used by individuals and organisations as a platform to spread misinformation. With the recent coronavirus pandemic we have seen a surge of misinformation on Twitter, posing a danger to public health. In this paper, we compile a large COVID-19 Twitter misinformation corpus and perform an analysis to discover patterns with respect to vocabulary usage. Among others, our analysis reveals that the variety of topics and vocabulary usage are considerably more limited and negative in tweets related to misinformation than in randomly extracted tweets. In addition to our qualitative analysis, our experimental results show that a simple linear model based only on lexical features is effective in identifying misinformation-related tweets (with accuracy over 80%), providing evidence to the fact that the vocabulary used in misinformation largely differs from generic tweets.", "keyphrases": ["misinformation", "twitter", "lexical feature"]} +{"id": "tan-etal-2017-neural", "title": "Neural Post-Editing Based on Quality Estimation", "abstract": "Automatic post-editing (APE) is a challenging task on WMT evaluation campaign. We find that only a small number of edit operations are required for most machine translation outputs, through analysis of the training set of WMT17 APE en-de task. Based on this statistics analysis, two neural post-editing (NPE) models are trained depended on the edit numbers: single edit and minor edits. The improved quality estimation (QE) approach is exploited to rank models, and select the best translation as the post-edited output from the n -best list translation hypotheses generated by the best APE model and the raw translation system. Experimental results on the datasets of WMT16 APE test set show that the proposed approach significantly outperformed the baseline. Our approach can bring considerable relief from the overcorrection problem in APE.", "keyphrases": ["quality estimation", "ape", "neural post-editing"]} +{"id": "zeng-etal-2020-double", "title": "Double Graph Based Reasoning for Document-level Relation Extraction", "abstract": "Document-level relation extraction aims to extract relations among entities within a document. Different from sentence-level relation extraction, it requires reasoning over multiple sentences across paragraphs. In this paper, we propose Graph Aggregation-and-Inference Network (GAIN), a method to recognize such relations for long paragraphs. GAIN constructs two graphs, a heterogeneous mention-level graph (MG) and an entity-level graph (EG). The former captures complex interaction among different mentions and the latter aggregates mentions underlying for the same entities. Based on the graphs we propose a novel path reasoning mechanism to infer relations between entities. Experiments on the public dataset, DocRED, show GAIN achieves a significant performance improvement (2.85 on F1) over the previous state-of-the-art. Our code is available at .", "keyphrases": ["document-level relation extraction", "graph aggregation-and-inference network", "same entity", "path reasoning mechanism", "double graph"]} +{"id": "nivre-nilsson-2005-pseudo", "title": "Pseudo-Projective Dependency Parsing", "abstract": "In order to realize the full potential of dependency-based syntactic parsing, it is desirable to allow non-projective dependency structures. We show how a data-driven deterministic dependency parser, in itself restricted to projective structures, can be combined with graph transformation techniques to produce non-projective structures. Experiments using data from the Prague Dependency Treebank show that the combined system can handle non-projective constructions with a precision sufficient to yield a significant improvement in overall parsing accuracy. This leads to the best reported performance for robust non-projective parsing of Czech.", "keyphrases": ["non-projectivity", "transformation", "pseudo-projective parsing", "dependency tree", "transition-based parsing"]} +{"id": "collins-etal-2005-discriminative", "title": "Discriminative Syntactic Language Modeling for Speech Recognition", "abstract": "We describe a method for discriminative training of a language model that makes use of syntactic features. We follow a reranking approach, where a baseline recogniser is used to produce 1000-best output for each acoustic input, and a second \"reranking\" model is then used to choose an utterance from these 1000-best lists. The reranking model makes use of syntactic features together with a parameter estimation method that is based on the perception algorithm. We describe experiments on the Switchboard speech recognition task. The syntactic features provide an additional 0.3% reduction in test-set error rate beyond the model of (Roark et al., 2004a; Roark et al., 2004b) (significant at p < 0.001), which makes use of a discriminatively trained n-gram model, giving a total reduction of 1.2% over the baseline Switchboard system.", "keyphrases": ["speech recognition", "syntactic feature", "reduction", "n-gram"]} +{"id": "wang-etal-2020-global", "title": "Global-to-Local Neural Networks for Document-Level Relation Extraction", "abstract": "Relation extraction (RE) aims to identify the semantic relations between named entities in text. Recent years have witnessed it raised to the document level, which requires complex reasoning with entities and mentions throughout an entire document. In this paper, we propose a novel model to document-level RE, by encoding the document information in terms of entity global and local representations as well as context relation representations. Entity global representations model the semantic information of all entities in the document, entity local representations aggregate the contextual information of multiple mentions of specific entities, and context relation representations encode the topic information of other relations. Experimental results demonstrate that our model achieves superior performance on two public datasets for document-level RE. It is particularly effective in extracting relations between entities of long distance and having multiple mentions.", "keyphrases": ["relation extraction", "specific entity", "document graph"]} +{"id": "kaljahi-etal-2014-syntax", "title": "Syntax and Semantics in Quality Estimation of Machine Translation", "abstract": "We employ syntactic and semantic information in estimating the quality of machine translation from a new data set which contains source text from English customer support forums and target text consisting of its machine translation into French. These translations have been both post-edited and evaluated by professional translators. We find that quality estimation using syntactic and semantic information on this data set can hardly improve over a baseline which uses only surface features. However, the performance can be improved when they are combined with such surface features. We also introduce a novel metric to measure translation adequacy based on predicate-argument structure match using word alignments. While word alignments can be reliably used, the two main factors affecting the performance of all semantic-based methods seems to be the low quality of semantic role labelling (especially on ill-formed text) and the lack of nominal predicate annotation.", "keyphrases": ["quality estimation", "machine translation", "semantic information"]} +{"id": "xiao-etal-2019-lattice", "title": "Lattice-Based Transformer Encoder for Neural Machine Translation", "abstract": "Neural machine translation (NMT) takes deterministic sequences for source representations. However, either word-level or subword-level segmentations have multiple choices to split a source sequence with different word segmentors or different subword vocabulary sizes. We hypothesize that the diversity in segmentations may affect the NMT performance. To integrate different segmentations with the state-of-the-art NMT model, Transformer, we propose lattice-based encoders to explore effective word or subword representation in an automatic way during training. We propose two methods: 1) lattice positional encoding and 2) lattice-aware self-attention. These two methods can be used together and show complementary to each other to further improve translation performance. Experiment results show superiorities of lattice-based encoders in word-level and subword-level representations over conventional Transformer encoder.", "keyphrases": ["transformer encoder", "encoding", "neural machine translation"]} +{"id": "liu-etal-2018-efficient-low", "title": "Efficient Low-rank Multimodal Fusion With Modality-Specific Factors", "abstract": "Multimodal research is an emerging field of artificial intelligence, and one of the main research problems in this field is multimodal fusion. The fusion of multimodal data is the process of integrating multiple unimodal representations into one compact multimodal representation. Previous research in this field has exploited the expressiveness of tensors for multimodal representation. However, these methods often suffer from exponential increase in dimensions and in computational complexity introduced by transformation of input into tensor. In this paper, we propose the Low-rank Multimodal Fusion method, which performs multimodal fusion using low-rank tensors to improve efficiency. We evaluate our model on three different tasks: multimodal sentiment analysis, speaker trait analysis, and emotion recognition. Our model achieves competitive results on all these tasks while drastically reducing computational complexity. Additional experiments also show that our model can perform robustly for a wide range of low-rank settings, and is indeed much more efficient in both training and inference compared to other methods that utilize tensor representations.", "keyphrases": ["low-rank multimodal fusion", "factor", "complexity", "lmf"]} +{"id": "abu-farha-magdy-2021-benchmarking", "title": "Benchmarking Transformer-based Language Models for Arabic Sentiment and Sarcasm Detection", "abstract": "The introduction of transformer-based language models has been a revolutionary step for natural language processing (NLP) research. These models, such as BERT, GPT and ELECTRA, led to state-of-the-art performance in many NLP tasks. Most of these models were initially developed for English and other languages followed later. Recently, several Arabic-specific models started emerging. However, there are limited direct comparisons between these models. In this paper, we evaluate the performance of 24 of these models on Arabic sentiment and sarcasm detection. Our results show that the models achieving the best performance are those that are trained on only Arabic data, including dialectal Arabic, and use a larger number of parameters, such as the recently released MARBERT. However, we noticed that AraELECTRA is one of the top performing models while being much more efficient in its computational cost. Finally, the experiments on AraGPT2 variants showed low performance compared to BERT models, which indicates that it might not be suitable for classification tasks.", "keyphrases": ["language model", "arabic sentiment", "sarcasm detection"]} +{"id": "bastings-etal-2018-jump", "title": "Jump to better conclusions: SCAN both left and right", "abstract": "Lake and Baroni (2018) recently introduced the SCAN data set, which consists of simple commands paired with action sequences and is intended to test the strong generalization abilities of recurrent sequence-to-sequence models. Their initial experiments suggested that such models may fail because they lack the ability to extract systematic rules. Here, we take a closer look at SCAN and show that it does not always capture the kind of generalization that it was designed for. To mitigate this we propose a complementary dataset, which requires mapping actions back to the original commands, called NACS. We show that models that do well on SCAN do not necessarily do well on NACS, and that NACS exhibits properties more closely aligned with realistic use-cases for sequence-to-sequence models.", "keyphrases": ["conclusion", "scan", "generalization ability", "nacs"]} +{"id": "scarton-etal-2010-simplifica", "title": "SIMPLIFICA: a tool for authoring simplified texts in Brazilian Portuguese guided by readability assessments", "abstract": "SIMPLIFICA is an authoring tool for producing simplified texts in Portuguese. It provides functionalities for lexical and syntactic simplification and for readability assessment. This tool is the first of its kind for Portuguese; it brings innovative aspects for simplification tools in general, since the authoring process is guided by readability assessment based on the levels of literacy of the Brazilian population.", "keyphrases": ["portuguese", "readability assessment", "simplifica"]} +{"id": "chen-etal-2020-question", "title": "Question Directed Graph Attention Network for Numerical Reasoning over Text", "abstract": "Numerical reasoning over texts, such as addition, subtraction, sorting and counting, is a challenging machine reading comprehension task, since it requires both natural language understanding and arithmetic computation. To address this challenge, we propose a heterogeneous graph representation for the context of the passage and question needed for such reasoning, and design a question directed graph attention network to drive multi-step numerical reasoning over this context graph. Our model, which combines deep learning and graph reasoning, achieves remarkable results in benchmark datasets such as DROP.", "keyphrases": ["graph attention network", "numerical reasoning", "passage"]} +{"id": "nakagawa-etal-2010-dependency", "title": "Dependency Tree-based Sentiment Classification using CRFs with Hidden Variables", "abstract": "In this paper, we present a dependency tree-based method for sentiment classification of Japanese and English subjective sentences using conditional random fields with hidden variables. Subjective sentences often contain words which reverse the sentiment polarities of other words. Therefore, interactions between words need to be considered in sentiment classification, which is difficult to be handled with simple bag-of-words approaches, and the syntactic dependency structures of subjective sentences are exploited in our method. In the method, the sentiment polarity of each dependency subtree in a sentence, which is not observable in training data, is represented by a hidden variable. The polarity of the whole sentence is calculated in consideration of interactions between the hidden variables. Sum-product belief propagation is used for inference. Experimental results of sentiment classification for Japanese and English subjective sentences showed that the method performs better than other methods based on bag-of-features.", "keyphrases": ["sentiment classification", "variable", "syntactic structure"]} +{"id": "ma-etal-2016-letter", "title": "Letter Sequence Labeling for Compound Splitting", "abstract": "For languages such as German where compounds occur frequently and are written as single tokens, a wide variety of NLP applications bene\ufb01ts from recognizing and splitting compounds. As the traditional word frequency-based approach to compound splitting has several drawbacks, this paper introduces a letter sequence labeling approach, which can utilize rich word form features to build discriminative learning models that are optimized for splitting. Experiments show that the proposed method signi\ufb01cantly outperforms state-of-the-art compound splitters.", "keyphrases": ["compound splitting", "letter sequence", "conditional random fields"]} +{"id": "cao-etal-2018-rst", "title": "The RST Spanish-Chinese Treebank", "abstract": "Discourse analysis is necessary for different tasks of Natural Language Processing (NLP). As two of the most spoken languages in the world, discourse analysis between Spanish and Chinese is important for NLP research. This paper aims to present the first open Spanish-Chinese parallel corpus annotated with discourse information, whose theoretical framework is based on the Rhetorical Structure Theory (RST). We have evaluated and harmonized each annotation part to obtain a high annotated-quality corpus. The corpus is already available to the public.", "keyphrases": ["rst", "spanish-chinese treebank", "discourse analysis"]} +{"id": "lee-etal-2013-deterministic", "title": "Deterministic Coreference Resolution Based on Entity-Centric, Precision-Ranked Rules", "abstract": "We propose a new deterministic approach to coreference resolution that combines the global information and precise features of modern machine-learning models with the transparency and modularity of deterministic, rule-based systems. Our sieve architecture applies a battery of deterministic coreference models one at a time from highest to lowest precision, where each model builds on the previous model's cluster output. The two stages of our sieve-based architecture, a mention detection stage that heavily favors recall, followed by coreference sieves that are precision-oriented, offer a powerful way to achieve both high precision and high recall. Further, our approach makes use of global information through an entity-centric model that encourages the sharing of features across all mentions that point to the same real-world entity. Despite its simplicity, our approach gives state-of-the-art performance on several corpora and genres, and has also been incorporated into hybrid state-of-the-art coreference systems for Chinese and Arabic. Our system thus offers a new paradigm for combining knowledge in rule-based systems that has implications throughout computational linguistics.", "keyphrases": ["coreference resolution", "rule-based system", "mention", "entity-centric model", "semantic knowledge"]} +{"id": "kondratyuk-straka-2019-75", "title": "75 Languages, 1 Model: Parsing Universal Dependencies Universally", "abstract": "We present UDify, a multilingual multi-task model capable of accurately predicting universal part-of-speech, morphological features, lemmas, and dependency trees simultaneously for all 124 Universal Dependencies treebanks across 75 languages. By leveraging a multilingual BERT self-attention model pretrained on 104 languages, we found that fine-tuning it on all datasets concatenated together with simple softmax classifiers for each UD task can meet or exceed state-of-the-art UPOS, UFeats, Lemmas, (and especially) UAS, and LAS scores, without requiring any recurrent or language-specific components. We evaluate UDify for multilingual learning, showing that low-resource languages benefit the most from cross-linguistic annotations. We also evaluate for zero-shot learning, with results suggesting that multilingual training provides strong UD predictions even for languages that neither UDify nor BERT have ever been trained on.", "keyphrases": ["universal dependency", "udify", "bert", "low-resource language", "multilingual training"]} +{"id": "banea-mihalcea-2011-word", "title": "Word Sense Disambiguation with Multilingual Features", "abstract": "This paper explores the role played by a multilingual feature representation for the task of word sense disambiguation. We translate the context of an ambiguous word in multiple languages, and show through experiments on standard datasets that by using a multilingual vector space we can obtain error rate reductions of up to 25%, as compared to a monolingual classifier.", "keyphrases": ["multiple language", "vector space", "word sense disambiguation"]} +{"id": "zheng-lapata-2019-sentence", "title": "Sentence Centrality Revisited for Unsupervised Summarization", "abstract": "Single document summarization has enjoyed renewed interest in recent years thanks to the popularity of neural network models and the availability of large-scale datasets. In this paper we develop an unsupervised approach arguing that it is unrealistic to expect large-scale and high-quality training data to be available or created for different types of summaries, domains, or languages. We revisit a popular graph-based ranking algorithm and modify how node (aka sentence) centrality is computed in two ways: (a) we employ BERT, a state-of-the-art neural representation learning model to better capture sentential meaning and (b) we build graphs with directed edges arguing that the contribution of any two nodes to their respective centrality is influenced by their relative position in a document. Experimental results on three news summarization datasets representative of different languages and writing styles show that our approach outperforms strong baselines by a wide margin.", "keyphrases": ["centrality", "document summarization", "bert", "position", "unsupervised model"]} +{"id": "hassan-etal-2007-supertagged", "title": "Supertagged Phrase-Based Statistical Machine Translation", "abstract": "Until quite recently, extending Phrase-based Statistical Machine Translation (PBSMT) with syntactic structure caused system performance to deteriorate. In this work we show that incorporating lexical syntactic descriptions in the form of supertags can yield significantly better PBSMT systems. We describe a novel PBSMT model that integrates supertags into the target language model and the target side of the translation model. Two kinds of supertags are employed: those from Lexicalized Tree-Adjoining Grammar and Combinatory Categorial Grammar. Despite the differences between these two approaches, the supertaggers give similar improvements. In addition to supertagging, we also explore the utility of a surface global grammaticality measure based on combinatory operators. We perform various experiments on the Arabic to English NIST 2005 test set addressing issues such as sparseness, scalability and the utility of system subcomponents. Our best result (0.4688 BLEU) improves by 6.1% relative to a state-of-theart PBSMT model, which compares very favourably with the leading systems on the NIST 2005 task.", "keyphrases": ["statistical machine translation", "target side", "supertag"]} +{"id": "howes-etal-2014-linguistic", "title": "Linguistic Indicators of Severity and Progress in Online Text-based Therapy for Depression", "abstract": "Mental illnesses such as depression and anxiety are highly prevalent, and therapy is increasingly being offered online. This new setting is a departure from face-toface therapy, and offers both a challenge and an opportunity \u2010 it is not yet known what features or approaches are likely to lead to successful outcomes in such a different medium, but online text-based therapy provides large amounts of data for linguistic analysis. We present an initial investigation into the application of computational linguistic techniques, such as topic and sentiment modelling, to online therapy for depression and anxiety. We find that important measures such as symptom severity can be predicted with comparable accuracy to face-to-face data, using general features such as discussion topic and sentiment; however, measures of patient progress are captured only by finergrained lexical features, suggesting that aspects of style or dialogue structure may also be important.", "keyphrases": ["online text-based therapy", "depression", "lexical feature"]} +{"id": "jagarlamudi-etal-2012-incorporating", "title": "Incorporating Lexical Priors into Topic Models", "abstract": "Topic models have great potential for helping users understand document corpora. This potential is stymied by their purely unsupervised nature, which often leads to topics that are neither entirely meaningful nor effective in extrinsic tasks (Chang et al., 2009). We propose a simple and effective way to guide topic models to learn topics of specific interest to a user. We achieve this by providing sets of seed words that a user believes are representative of the underlying topics in a corpus. Our model uses these seeds to improve both topic-word distributions (by biasing topics to produce appropriate seed words) and to improve document-topic distributions (by biasing documents to select topics related to the seed words they contain). Extrinsic evaluation on a document clustering task reveals a significant improvement when using seed information, even over other models that use seed information naively.", "keyphrases": ["prior", "seed word", "domain knowledge"]} +{"id": "wang-etal-2019-second", "title": "Second-Order Semantic Dependency Parsing with End-to-End Neural Networks", "abstract": "Semantic dependency parsing aims to identify semantic relationships between words in a sentence that form a graph. In this paper, we propose a second-order semantic dependency parser, which takes into consideration not only individual dependency edges but also interactions between pairs of edges. We show that second-order parsing can be approximated using mean field (MF) variational inference or loopy belief propagation (LBP). We can unfold both algorithms as recurrent layers of a neural network and therefore can train the parser in an end-to-end manner. Our experiments show that our approach achieves state-of-the-art performance.", "keyphrases": ["semantic dependency", "recurrent layer", "second-order information"]} +{"id": "an-etal-2020-multimodal", "title": "Multimodal Topic-Enriched Auxiliary Learning for Depression Detection", "abstract": "From the perspective of health psychology, human beings with long-term and sustained negativity are highly possible to be diagnosed with depression. Inspired by this, we argue that the global topic information derived from user-generated contents (e.g., texts and images) is crucial to boost the performance of the depression detection task, though this information has been neglected by almost all previous studies on depression detection. To this end, we propose a new Multimodal Topic-enriched Auxiliary Learning (MTAL) approach, aiming at capturing the topic information inside different modalities (i.e., texts and images) for depression detection. Especially, in our approach, a modality-agnostic topic model is proposed to be capable of mining the topical clues from either the discrete textual signals or the continuous visual signals. On this basis, the topic modeling w.r.t. the two modalities are cast as two auxiliary tasks for improving the performance of the primary task (i.e., depression detection). Finally, the detailed evaluation demonstrates the great advantage of our MTAL approach to depression detection over the state-of-the-art baselines. This justifies the importance of the multimodal topic information to depression detection and the effectiveness of our approach in capturing such information.", "keyphrases": ["auxiliary learning", "depression detection", "topic model"]} +{"id": "kovelamudi-etal-2011-domain", "title": "Domain Independent Model for Product Attribute Extraction from User Reviews using Wikipedia", "abstract": "The world of E-commerce is expanding, posing a large arena of products, their descriptions, customer and professional reviews that are pertinent to them. Most of the product attribute extraction techniques in literature work on structured descriptions using several text analysis tools. However, attributes in these descriptions are limited compared to those in customer reviews of a product, where users discuss deeper and more specific attributes. In this paper, we propose a novel supervised domain independent model for product attribute extraction from user reviews. The user generated content contains unstructured and semi-structured text where conventional language grammar dependent tools like parts-of-speech taggers, named entity recognizers, parsers do not perform at expected levels. We used Wikipedia and Web to identify product attributes from customer reviews and achieved F1score of 0.73.", "keyphrases": ["product attribute extraction", "user review", "wikipedia"]} +{"id": "shutova-etal-2013-statistical", "title": "Statistical Metaphor Processing", "abstract": "Metaphor is highly frequent in language, which makes its computational processing indispensable for real-world NLP applications addressing semantic tasks. Previous approaches to metaphor modeling rely on task-specific hand-coded knowledge and operate on a limited domain or a subset of phenomena. We present the first integrated open-domain statistical model of metaphor processing in unrestricted text. Our method first identifies metaphorical expressions in running text and then paraphrases them with their literal paraphrases. Such a text-to-text model of metaphor interpretation is compatible with other NLP applications that can benefit from metaphor resolution. Our approach is minimally supervised, relies on the state-of-the-art parsing and lexical acquisition technologies (distributional clustering and selectional preference induction), and operates with a high accuracy.", "keyphrases": ["metaphor", "selectional preference", "increase"]} +{"id": "dasigi-etal-2012-genre", "title": "Genre Independent Subgroup Detection in Online Discussion Threads: A Study of Implicit Attitude using Textual Latent Semantics", "abstract": "We describe an unsupervised approach to the problem of automatically detecting subgroups of people holding similar opinions in a discussion thread. An intuitive way of identifying this is to detect the attitudes of discussants towards each other or named entities or topics mentioned in the discussion. Sentiment tags play an important role in this detection, but we also note another dimension to the detection of people\u2019s attitudes in a discussion: if two persons share the same opinion, they tend to use similar language content. We consider the latter to be an implicit attitude. In this paper, we investigate the impact of implicit and explicit attitude in two genres of social media discussion data, more formal wikipedia discussions and a debate discussion forum that is much more informal. Experimental results strongly suggest that implicit attitude is an important complement for explicit attitudes (expressed via sentiment) and it can improve the sub-group detection performance independent of genre.", "keyphrases": ["subgroup", "discussion thread", "implicit attitude"]} +{"id": "briakou-carpuat-2021-beyond", "title": "Beyond Noise: Mitigating the Impact of Fine-grained Semantic Divergences on Neural Machine Translation", "abstract": "While it has been shown that Neural Machine Translation (NMT) is highly sensitive to noisy parallel training samples, prior work treats all types of mismatches between source and target as noise. As a result, it remains unclear how samples that are mostly equivalent but contain a small number of semantically divergent tokens impact NMT training. To close this gap, we analyze the impact of different types of fine-grained semantic divergences on Transformer models. We show that models trained on synthetic divergences output degenerated text more frequently and are less confident in their predictions. Based on these findings, we introduce a divergent-aware NMT framework that uses factors to help NMT recover from the degradation caused by naturally occurring divergences, improving both translation quality and model calibration on EN-FR tasks.", "keyphrases": ["noise", "neural machine translation", "divergence output"]} +{"id": "wei-etal-2020-novel", "title": "A Novel Cascade Binary Tagging Framework for Relational Triple Extraction", "abstract": "Extracting relational triples from unstructured text is crucial for large-scale knowledge graph construction. However, few existing works excel in solving the overlapping triple problem where multiple relational triples in the same sentence share the same entities. In this work, we introduce a fresh perspective to revisit the relational triple extraction task and propose a novel cascade binary tagging framework (CasRel) derived from a principled problem formulation. Instead of treating relations as discrete labels as in previous works, our new framework models relations as functions that map subjects to objects in a sentence, which naturally handles the overlapping problem. Experiments show that the CasRel framework already outperforms state-of-the-art methods even when its encoder module uses a randomly initialized BERT encoder, showing the power of the new tagging framework. It enjoys further performance boost when employing a pre-trained BERT encoder, outperforming the strongest baseline by 17.5 and 30.2 absolute gain in F1-score on two public datasets NYT and WebNLG, respectively. In-depth analysis on different scenarios of overlapping triples shows that the method delivers consistent performance gain across all these scenarios. The source code and data are released online.", "keyphrases": ["binary tagging framework", "relational triple extraction", "unstructured text"]} +{"id": "long-etal-2017-world", "title": "World Knowledge for Reading Comprehension: Rare Entity Prediction with Hierarchical LSTMs Using External Descriptions", "abstract": "Humans interpret texts with respect to some background information, or world knowledge, and we would like to develop automatic reading comprehension systems that can do the same. In this paper, we introduce a task and several models to drive progress towards this goal. In particular, we propose the task of rare entity prediction: given a web document with several entities removed, models are tasked with predicting the correct missing entities conditioned on the document context and the lexical resources. This task is challenging due to the diversity of language styles and the extremely large number of rare entities. We propose two recurrent neural network architectures which make use of external knowledge in the form of entity descriptions. Our experiments show that our hierarchical LSTM model performs significantly better at the rare entity prediction task than those that do not make use of external resources.", "keyphrases": ["rare entity prediction", "reading comprehension system", "world knowledge"]} +{"id": "liu-etal-2010-pem", "title": "PEM: A Paraphrase Evaluation Metric Exploiting Parallel Texts", "abstract": "We present PEM, the first fully automatic metric to evaluate the quality of paraphrases, and consequently, that of paraphrase generation systems. Our metric is based on three criteria: adequacy, fluency, and lexical dissimilarity. The key component in our metric is a robust and shallow semantic similarity measure based on pivot language N-grams that allows us to approximate adequacy independently of lexical similarity. Human evaluation shows that PEM achieves high correlation with human judgments.", "keyphrases": ["paraphrase", "automatic metric", "lexical dissimilarity", "pem"]} +{"id": "weller-seppi-2019-humor", "title": "Humor Detection: A Transformer Gets the Last Laugh", "abstract": "Much previous work has been done in attempting to identify humor in text. In this paper we extend that capability by proposing a new task: assessing whether or not a joke is humorous. We present a novel way of approaching this problem by building a model that learns to identify humorous jokes based on ratings gleaned from Reddit pages, consisting of almost 16,000 labeled instances. Using these ratings to determine the level of humor, we then employ a Transformer architecture for its advantages in learning from sentence context. We demonstrate the effectiveness of this approach and show results that are comparable to human performance. We further demonstrate our model's increased capabilities on humor identification problems, such as the previously created datasets for short jokes and puns. These experiments show that this method outperforms all previous work done on these tasks, with an F-measure of 93.1% for the Puns dataset and 98.6% on the Short Jokes dataset.", "keyphrases": ["joke", "human performance", "humor detection", "bert", "annotator"]} +{"id": "beigman-klebanov-flor-2013-associative", "title": "Associative Texture Is Lost In Translation", "abstract": "We present a suggestive finding regarding the loss of associative texture in the process of machine translation, using comparisons between (a) original and backtranslated texts, (b) reference and system translations, and (c) better and worse MT systems. We represent the amount of association in a text using word association profile \u2010 a distribution of pointwise mutual information between all pairs of content word types in a text. We use the average of the distribution, which we term lexical tightness, as a single measure of the amount of association in a text. We show that the lexical tightness of humancomposed texts is higher than that of the machine translated materials; human references are tighter than machine translations, and better MT systems produce lexically tighter translations. While the phenomenon of the loss of associative texture has been theoretically predicted by translation scholars, we present a measure capable of quantifying the extent of this phenomenon.", "keyphrases": ["loss", "machine translation", "lexical tightness", "associative texture"]} +{"id": "xu-etal-2020-end", "title": "End-to-End Slot Alignment and Recognition for Cross-Lingual NLU", "abstract": "Natural language understanding (NLU) in the context of goal-oriented dialog systems typically includes intent classification and slot labeling tasks. Existing methods to expand an NLU system to new languages use machine translation with slot label projection from source to the translated utterances, and thus are sensitive to projection errors. In this work, we propose a novel end-to-end model that learns to align and predict target slot labels jointly for cross-lingual transfer. We introduce MultiATIS++, a new multilingual NLU corpus that extends the Multilingual ATIS corpus to nine languages across four language families, and evaluate our method using the corpus. Results show that our method outperforms a simple label projection method using fast-align on most languages, and achieves competitive performance to the more complex, state-of-the-art projection method with only half of the training time. We release our MultiATIS++ corpus to the community to continue future research on cross-lingual NLU.", "keyphrases": ["slot alignment", "cross-lingual nlu", "natural language understanding"]} +{"id": "barbosa-feng-2010-robust", "title": "Robust Sentiment Detection on Twitter from Biased and Noisy Data", "abstract": "In this paper, we propose an approach to automatically detect sentiments on Twitter messages (tweets) that explores some characteristics of how tweets are written and meta-information of the words that compose these messages. Moreover, we leverage sources of noisy labels as our training data. These noisy labels were provided by a few sentiment detection websites over twitter data. In our experiments, we show that since our features are able to capture a more abstract representation of tweets, our solution is more effective than previous ones and also more robust regarding biased and noisy data, which is the kind of data provided by these sources.", "keyphrases": ["twitter", "sentiment analysis", "machine translation", "pos tagging"]} +{"id": "toutanova-etal-2005-joint", "title": "Joint Learning Improves Semantic Role Labeling", "abstract": "Despite much recent progress on accurate semantic role labeling, previous work has largely used independent classifiers, possibly combined with separate label sequence models via Viterbi decoding. This stands in stark contrast to the linguistic observation that a core argument frame is a joint structure, with strong dependencies between arguments. We show how to build a joint model of argument frames, incorporating novel features that model these interactions into discriminative log-linear models. This system achieves an error reduction of 22% on all arguments and 32% on core arguments over a state-of-the art independent classifier for gold-standard parse trees on PropBank.", "keyphrases": ["semantic role labeling", "argument frame", "strong dependency", "error reduction", "joint learning"]} +{"id": "bach-etal-2011-goodness", "title": "Goodness: A Method for Measuring Machine Translation Confidence", "abstract": "State-of-the-art statistical machine translation (MT) systems have made significant progress towards producing user-acceptable translation output. However, there is still no efficient way for MT systems to inform users which words are likely translated correctly and how confident it is about the whole sentence. We propose a novel framework to predict word-level and sentence-level MT errors with a large number of novel features. Experimental results show that the MT error prediction accuracy is increased from 69.1 to 72.2 in F-score. The Pearson correlation between the proposed confidence measure and the human-targeted translation edit rate (HTER) is 0.6. Improvements between 0.4 and 0.9 TER reduction are obtained with the n-best list reranking task using the proposed confidence measure. Also, we present a visualization prototype of MT errors at the word and sentence levels with the objective to improve post-editor productivity.", "keyphrases": ["machine translation", "f-score", "goodness"]} +{"id": "michel-etal-2019-evaluation", "title": "On Evaluation of Adversarial Perturbations for Sequence-to-Sequence Models", "abstract": "Adversarial examples \u2014 perturbations to the input of a model that elicit large changes in the output \u2014 have been shown to be an effective way of assessing the robustness of sequence-to-sequence (seq2seq) models. However, these perturbations only indicate weaknesses in the model if they do not change the input so significantly that it legitimately results in changes in the expected output. This fact has largely been ignored in the evaluations of the growing body of related literature. Using the example of untargeted attacks on machine translation (MT), we propose a new evaluation framework for adversarial attacks on seq2seq models that takes the semantic equivalence of the pre- and post-perturbation input into account. Using this framework, we demonstrate that existing methods may not preserve meaning in general, breaking the aforementioned assumption that source side perturbations should not result in changes in the expected output. We further use this framework to demonstrate that adding additional constraints on attacks allows for adversarial perturbations that are more meaning-preserving, but nonetheless largely change the output sequence. Finally, we show that performing untargeted adversarial training with meaning-preserving attacks is beneficial to the model in terms of adversarial robustness, without hurting test performance. A toolkit implementing our evaluation framework is released at .", "keyphrases": ["perturbation", "sequence-to-sequence model", "attack", "machine translation", "adversarial training"]} +{"id": "sapkota-etal-2016-domain", "title": "Domain Adaptation for Authorship Attribution: Improved Structural Correspondence Learning", "abstract": "We present the \ufb01rst domain adaptation model for authorship attribution to leverage unlabeled data. The model includes extensions to structural correspondence learning needed to make it appropriate for the task. For example, we propose a median-based classi\ufb01cation instead of the standard binary classi\ufb01cation used in previous work. Our results show that punctuation-based character n -grams form excellent pivot features. We also show how singular value decomposition plays a critical role in achieving domain adaptation, and that replacing (instead of concatenating) non-pivot features with correspondence features yields better performance.", "keyphrases": ["authorship attribution", "pivot feature", "domain adaptation"]} +{"id": "ettinger-etal-2016-probing", "title": "Probing for semantic evidence of composition by means of simple classification tasks", "abstract": "We propose a diagnostic method for probing specific information captured in vector representations of sentence meaning, via simple classification tasks with strategically constructed sentence sets. We identify some key types of semantic information that we might expect to be captured in sentence composition, and illustrate example classification tasks for targeting this information.", "keyphrases": ["classification task", "linguistic knowledge", "body"]} +{"id": "kobayashi-etal-2016-dynamic", "title": "Dynamic Entity Representation with Max-pooling Improves Machine Reading", "abstract": "We propose a novel neural network model for machine reading, DER Network, which explicitly implements a reader building dynamic meaning representations for entities by gathering and accumulating information around the entities as it reads a document. Evaluated on a recent large scale dataset (Hermann et al., 2015), our model exhibits better results than previous research, and we find that max-pooling is suited for modeling the accumulation of information on entities. Further analysis suggests that our model can put together multiple pieces of information encoded in different sentences to answer complicated questions. Our code for the model is available at https://github. com/soskek/der-network", "keyphrases": ["machine reading", "dynamic entity representation", "discourse"]} +{"id": "botha-etal-2020-entity", "title": "Entity Linking in 100 Languages", "abstract": "We propose a new formulation for multilingual entity linking, where language-specific mentions resolve to a language-agnostic Knowledge Base. We train a dual encoder in this new setting, building on prior work with improved feature representation, negative mining, and an auxiliary entity-pairing task, to obtain a single entity retrieval model that covers 100+ languages and 20 million entities. The model outperforms state-of-the-art results from a far more limited cross-lingual linking task. Rare entities and low-resource languages pose challenges at this large-scale, so we advocate for an increased focus on zero- and few-shot evaluation. To this end, we provide Mewsli-9, a large new multilingual dataset matched to our setting, and show how frequency-based analysis provided key insights for our model and training enhancements.", "keyphrases": ["formulation", "entity linking", "wikidata item", "hyperlink"]} +{"id": "chen-etal-2020-seqvat", "title": "SeqVAT: Virtual Adversarial Training for Semi-Supervised Sequence Labeling", "abstract": "Virtual adversarial training (VAT) is a powerful technique to improve model robustness in both supervised and semi-supervised settings. It is effective and can be easily adopted on lots of image classification and text classification tasks. However, its benefits to sequence labeling tasks such as named entity recognition (NER) have not been shown as significant, mostly, because the previous approach can not combine VAT with the conditional random field (CRF). CRF can significantly boost accuracy for sequence models by putting constraints on label transitions, which makes it an essential component in most state-of-the-art sequence labeling model architectures. In this paper, we propose SeqVAT, a method which naturally applies VAT to sequence labeling models with CRF. Empirical studies show that SeqVAT not only significantly improves the sequence labeling performance over baselines under supervised settings, but also outperforms state-of-the-art approaches under semi-supervised settings.", "keyphrases": ["virtual adversarial training", "image classification", "seqvat"]} +{"id": "hermann-etal-2014-semantic", "title": "Semantic Frame Identification with Distributed Word Representations", "abstract": "We present a novel technique for semantic frame identi\ufb01cation using distributed representations of predicates and their syntactic context; this technique leverages automatic syntactic parses and a generic set of word embeddings. Given labeled data annotated with frame-semantic parses, we learn a model that projects the set of word representations for the syntactic context around a predicate to a low dimensional representation. The latter is used for semantic frame identi\ufb01cation; with a standard argument identi\ufb01cation method inspired by prior work, we achieve state-of-the-art results on FrameNet-style frame-semantic analysis. Additionally, we report strong results on PropBank-style semantic role labeling in comparison to prior work.", "keyphrases": ["frame", "word embedding", "wsabie algorithm"]} +{"id": "zhong-etal-2020-extractive", "title": "Extractive Summarization as Text Matching", "abstract": "This paper creates a paradigm shift with regard to the way we build neural extractive summarization systems. Instead of following the commonly used framework of extracting sentences individually and modeling the relationship between sentences, we formulate the extractive summarization task as a semantic text matching problem, in which a source document and candidate summaries will be (extracted from the original text) matched in a semantic space. Notably, this paradigm shift to semantic matching framework is well-grounded in our comprehensive analysis of the inherent gap between sentence-level and summary-level extractors based on the property of the dataset. Besides, even instantiating the framework with a simple form of a matching model, we have driven the state-of-the-art extractive result on CNN/DailyMail to a new level (44.41 in ROUGE-1). Experiments on the other five datasets also show the effectiveness of the matching framework. We believe the power of this matching-based summarization framework has not been fully exploited. To encourage more instantiations in the future, we have released our codes, processed dataset, as well as generated summaries in .", "keyphrases": ["paradigm", "semantic space", "extractive summarization", "matchsum"]} +{"id": "chambers-etal-2014-dense", "title": "Dense Event Ordering with a Multi-Pass Architecture", "abstract": "The past 10 years of event ordering research has focused on learning partial orderings over document events and time expressions. The most popular corpus, the TimeBank, contains a small subset of the possible ordering graph. Many evaluations follow suit by only testing certain pairs of events (e.g., only main verbs of neighboring sentences). This has led most research to focus on specific learners for partial labelings. This paper attempts to nudge the discussion from identifying some relations to all relations. We present new experiments on strongly connected event graphs that contain \u223c10 times more relations per document than the TimeBank. We also describe a shift away from the single learner to a sieve-based architecture that naturally blends multiple learners into a precision-ranked cascade of sieves. Each sieve adds labels to the event graph one at a time, and earlier sieves inform later ones through transitive closure. This paper thus describes innovations in both approach and task. We experiment on the densest event graphs to date and show a 14% gain over state-of-the-art.", "keyphrases": ["ordering", "sieve", "temporal relation extraction", "caevo", "task-specific model"]} +{"id": "lee-etal-2010-qualia", "title": "Qualia Modification in Noun-Noun Compounds: A Cross-Language Survey", "abstract": "In analyzing the formation of a given compound, both its internal syntactic structure and semantic relations need to be considered. The Generative Lexicon Theory (GL Theory) provides us with an explanatory model of compounds that captures the qualia modification relations in the semantic composition within a compound, which can be applied to natural language processing tasks. In this paper, we primarily discuss the qualia structure of noun-noun compounds found in Chinese as well as a couple of other languages like German, Spanish, Japanese and Italian. We briefly review the construction of compounds and focus on the noun-noun construction. While analyzing the semantic relationship between the words that compose a compound, we use the GL Theory to demonstrate that the proposed qualia structure enables compositional interpretation within the compound. Besides, we attempt to examine whether or not for each semantic head, its modifier can fit in one of the four quales. Finally, our analysis reveals the potentials and limits of qualia-based treatment of composition of nominal compounds and suggests a path for future work.", "keyphrases": ["noun-noun compound", "other language", "qualia modification"]} +{"id": "bingel-sogaard-2016-text", "title": "Text Simplification as Tree Labeling", "abstract": "We present a new, structured approach to text simplification using conditional random fields over top-down traversals of dependency graphs that jointly predicts possible compressions and paraphrases. Our model reaches readability scores comparable to word-based compression approaches across a range of metrics and human judgements while maintaining more of the important information.", "keyphrases": ["structured approach", "paraphrase", "text simplification"]} +{"id": "lewis-etal-2019-unsupervised", "title": "Unsupervised Question Answering by Cloze Translation", "abstract": "Obtaining training data for Question Answering (QA) is time-consuming and resource-intensive, and existing QA datasets are only available for limited domains and languages. In this work, we explore to what extent high quality training data is actually required for Extractive QA, and investigate the possibility of unsupervised Extractive QA. We approach this problem by first learning to generate context, question and answer triples in an unsupervised manner, which we then use to synthesize Extractive QA training data automatically. To generate such triples, we first sample random context paragraphs from a large corpus of documents and then random noun phrases or Named Entity mentions from these paragraphs as answers. Next we convert answers in context to \u201cfill-in-the-blank\u201d cloze questions and finally translate them into natural questions. We propose and compare various unsupervised ways to perform cloze-to-natural question translation, including training an unsupervised NMT model using non-aligned corpora of natural questions and cloze questions as well as a rule-based approach. We find that modern QA models can learn to answer human questions surprisingly well using only synthetic training data. We demonstrate that, without using the SQuAD training data at all, our approach achieves 56.4 F1 on SQuAD v1 (64.5 F1 when the answer is a Named Entity mention), outperforming early supervised models.", "keyphrases": ["cloze translation", "triple", "large corpus", "noun phrase", "unsupervised question"]} +{"id": "louis-etal-2010-discourse", "title": "Discourse indicators for content selection in summarization", "abstract": "We present analyses aimed at eliciting which specific aspects of discourse provide the strongest indication for text importance. In the context of content selection for single document summarization of news, we examine the benefits of both the graph structure of text provided by discourse relations and the semantic sense of these relations. We find that structure information is the most robust indicator of importance. Semantic sense only provides constraints on content selection but is not indicative of important content by itself. However, sense features complement structure information and lead to improved performance. Further, both types of discourse information prove complementary to non-discourse features. While our results establish the usefulness of discourse features, we also find that lexical overlap provides a simple and cheap alternative to discourse for computing text structure with comparable performance for the task of content selection.", "keyphrases": ["indicator", "content selection", "document summarization", "discourse relation"]} +{"id": "francois-miltsakaki-2012-nlp", "title": "Do NLP and machine learning improve traditional readability formulas?", "abstract": "Readability formulas are methods used to match texts with the readers' reading level. Several methodological paradigms have previously been investigated in the field. The most popular paradigm dates several decades back and gave rise to well known readability formulas such as the Flesch formula (among several others). This paper compares this approach (henceforth \"classic\") with an emerging paradigm which uses sophisticated NLP-enabled features and machine learning techniques. Our experiments, carried on a corpus of texts for French as a foreign language, yield four main results: (1) the new readability formula performed better than the \"classic\" formula; (2) \"non-classic\" features were slightly more informative than \"classic\" features; (3) modern machine learning algorithms did not improve the explanatory power of our readability model, but allowed to better classify new observations; and (4) combining \"classic\" and \"non-classic\" features resulted in a significant gain in performance.", "keyphrases": ["machine learning", "readability formula", "significant gain"]} +{"id": "marcheggiani-titov-2020-graph", "title": "Graph Convolutions over Constituent Trees for Syntax-Aware Semantic Role Labeling", "abstract": "Semantic role labeling (SRL) is the task of identifying predicates and labeling argument spans with semantic roles. Even though most semantic-role formalisms are built upon constituent syntax, and only syntactic constituents can be labeled as arguments (e.g., FrameNet and PropBank), all the recent work on syntax-aware SRL relies on dependency representations of syntax. In contrast, we show how graph convolutional networks (GCNs) can be used to encode constituent structures and inform an SRL system. Nodes in our SpanGCN correspond to constituents. The computation is done in 3 stages. First, initial node representations are produced by `composing' word representations of the first and last words in the constituent. Second, graph convolutions relying on the constituent tree are performed, yielding syntactically-informed constituent representations. Finally, the constituent representations are `decomposed' back into word representations, which are used as input to the SRL classifier. We evaluate SpanGCN against alternatives, including a model using GCNs over dependency trees, and show its effectiveness on standard English SRL benchmarks CoNLL-2005, CoNLL-2012, and FrameNet.", "keyphrases": ["semantic role labeling", "graph convolution", "pos tag"]} +{"id": "tatsumi-2009-correlation", "title": "Correlation between Automatic Evaluation Metric Scores, Post-Editing Speed, and Some Other Factors", "abstract": "This paper summarises the results of a pilot project conducted to investigate the correlation between automatic evaluation metric scores and post-editing speed on a segment by segment basis. Firstly, the results from the comparison of various automatic metrics and post-editing speed will be reported. Secondly, further analysis is carried out by taking into consideration other relevant variables, such as text length and structures, and by means of multiple regression. It has been found that different automatic metrics achieve different levels and types of correlation with post-editing speed. We suggest that some of the source text characteristics and machine translation errors may be able to account for the gap between the automatic metric scores and post-editing speed, and may also help with understanding human post-editing process.", "keyphrases": ["speed", "automatic metric", "post-editing time"]} +{"id": "peng-dredze-2015-named", "title": "Named Entity Recognition for Chinese Social Media with Jointly Trained Embeddings", "abstract": "We consider the task of named entity recognition for Chinese social media. The long line of work in Chinese NER has focused on formal domains, and NER for social media has been largely restricted to English. We present a new corpus of Weibo messages annotated for both name and nominal mentions. Additionally, we evaluate three types of neural embeddings for representing Chinese text. Finally, we propose a joint training objective for the embeddings that makes use of both (NER) labeled and unlabeled raw text. Our methods yield a 9% improvement over a stateof-the-art baseline.", "keyphrases": ["entity recognition", "neural embedding", "chinese text"]} +{"id": "li-etal-2021-structurallm", "title": "StructuralLM: Structural Pre-training for Form Understanding", "abstract": "Large pre-trained language models achieve state-of-the-art results when fine-tuned on downstream NLP tasks. However, they almost exclusively focus on text-only representation, while neglecting cell-level layout information that is important for form image understanding. In this paper, we propose a new pre-training approach, StructuralLM, to jointly leverage cell and layout information from scanned documents. Specifically, we pre-train StructuralLM with two new designs to make the most of the interactions of cell and layout information: 1) each cell as a semantic unit; 2) classification of cell positions. The pre-trained StructuralLM achieves new state-of-the-art results in different types of downstream tasks, including form understanding (from 78.95 to 85.14), document visual question answering (from 72.59 to 83.94) and document image classification (from 94.43 to 96.08).", "keyphrases": ["form understanding", "pre-training approach", "structurallm"]} +{"id": "gkatzia-etal-2015-game", "title": "A Game-Based Setup for Data Collection and Task-Based Evaluation of Uncertain Information Presentation", "abstract": "Decision-making is often dependent on uncertain data, e.g. data associated with confidence scores, such as probabilities. A concrete example of such data is weather data. We will demo a game-based setup for exploring the effectiveness of different approaches (graphics vs NLG) to communicating uncertainty in rainfall and temperature predictions (www.macs.hw.ac.uk/ InteractionLab/weathergame/ ). The game incorporates a natural language extension of the MetOffice Weather game1. The extended version of the game can be used in three ways: (1) to compare the effectiveness of different information presentations of uncertain data; (2) to collect data for the development of effective data-driven approaches; and (3) to serve as a task-based evaluation setup for Natural Language Generation (NLG).", "keyphrases": ["game-based setup", "task-based evaluation", "decision-making"]} +{"id": "wang-etal-2019-self", "title": "Self-Attention with Structural Position Representations", "abstract": "Although self-attention networks (SANs) have advanced the state-of-the-art on various NLP tasks, one criticism of SANs is their ability of encoding positions of input words (Shaw et al., 2018). In this work, we propose to augment SANs with structural position representations to model the latent structure of the input sentence, which is complementary to the standard sequential positional representations. Specifically, we use dependency tree to represent the grammatical structure of a sentence, and propose two strategies to encode the positional relationships among words in the dependency tree. Experimental results on NIST Chinese-to-English and WMT14 English-to-German translation tasks show that the proposed approach consistently boosts performance over both the absolute and relative sequential position representations.", "keyphrases": ["position", "dependency tree", "self-attention"]} +{"id": "selfridge-etal-2011-stability", "title": "Stability and Accuracy in Incremental Speech Recognition", "abstract": "Conventional speech recognition approaches usually wait until the user has finished talking before returning a recognition hypothesis. This results in spoken dialogue systems that are unable to react while the user is still speaking. Incremental Speech Recognition (ISR), where partial phrase results are returned during user speech, has been used to create more reactive systems. However, ISR output is unstable and so prone to revision as more speech is decoded. This paper tackles the problem of stability in ISR. We first present a method that increases the stability and accuracy of ISR output, without adding delay. Given that some revisions are unavoidable, we next present a pair of methods for predicting the stability and accuracy of ISR results. Taken together, we believe these approaches give ISR more utility for real spoken dialogue systems.", "keyphrases": ["incremental speech recognition", "isr", "delay", "stability"]} +{"id": "bergsma-etal-2010-creating", "title": "Creating Robust Supervised Classifiers via Web-Scale N-Gram Data", "abstract": "In this paper, we systematically assess the value of using web-scale N-gram data in state-of-the-art supervised NLP classifiers. We compare classifiers that include or exclude features for the counts of various N-grams, where the counts are obtained from a web-scale auxiliary corpus. We show that including N-gram count features can advance the state-of-the-art accuracy on standard data sets for adjective ordering, spelling correction, noun compound bracketing, and verb part-of-speech disambiguation. More importantly, when operating on new domains, or when labeled training data is not plentiful, we show that using web-scale N-gram features is essential for achieving robust performance.", "keyphrases": ["web-scale n-gram data", "ordering", "spelling correction", "noun compound bracketing", "disambiguation"]} +{"id": "lo-etal-2012-fully", "title": "Fully Automatic Semantic MT Evaluation", "abstract": "We introduce the first fully automatic, fully semantic frame based MT evaluation metric, MEANT, that outperforms all other commonly used automatic metrics in correlating with human judgment on translation adequacy. Recent work on HMEANT, which is a human metric, indicates that machine translation can be better evaluated via semantic frames than other evaluation paradigms, requiring only minimal effort from monolingual humans to annotate and align semantic frames in the reference and machine translations. We propose a surprisingly effective Occam's razor automation of HMEANT that combines standard shallow semantic parsing with a simple maximum weighted bipartite matching algorithm for aligning semantic frames. The matching criterion is based on lexical similarity scoring of the semantic role fillers through a simple context vector model which can readily be trained using any publicly available large monolingual corpus. Sentence level correlation analysis, following standard NIST MetricsMATR protocol, shows that this fully automated version of HMEANT achieves significantly higher Kendall correlation with human adequacy judgments than BLEU, NIST, METEOR, PER, CDER, WER, or TER. Furthermore, we demonstrate that performing the semantic frame alignment automatically actually tends to be just as good as performing it manually. Despite its high performance, fully automated MEANT is still able to preserve HMEANT's virtues of simplicity, representational transparency, and inexpensiveness.", "keyphrases": ["semantic frame", "meant", "role filler", "development set", "propose hmeant"]} +{"id": "chiruzzo-etal-2020-development", "title": "Development of a Guarani - Spanish Parallel Corpus", "abstract": "This paper presents the development of a Guarani - Spanish parallel corpus with sentence-level alignment. The Guarani sentences of the corpus use the Jopara Guarani dialect, the dialect of Guarani spoken in Paraguay, which is based on Guarani grammar and may include several Spanish loanwords or neologisms. The corpus has around 14,500 sentence pairs aligned using a semi-automatic process, containing 228,000 Guarani tokens and 336,000 Spanish tokens extracted from web sources.", "keyphrases": ["guarani", "spanish parallel corpus", "web source"]} +{"id": "lewis-etal-2015-joint", "title": "Joint A* CCG Parsing and Semantic Role Labelling", "abstract": "Joint models of syntactic and semantic parsing have the potential to improve performance on both tasks\u2014but to date, the best results have been achieved with pipelines. We introduce a joint model using CCG, which is motivated by the close link between CCG syntax and semantics. Semantic roles are recovered by labelling the deep dependency structures produced by the grammar. Furthermore, because CCG is lexicalized, we show it is possible to factor the parsing model over words and introduce a new A parsing algorithm\u2014 which we demonstrate is faster and more accurate than adaptive supertagging. Our joint model is the first to substantially improve both syntactic and semantic accuracy over a comparable pipeline, and also achieves state-of-the-art results for a nonensemble semantic role labelling model.", "keyphrases": ["ccg", "semantic role labeling", "joint model"]} +{"id": "wang-fu-2020-item", "title": "Item-based Collaborative Filtering with BERT", "abstract": "In e-commerce, recommender systems have become an indispensable part of helping users explore the available inventory. In this work, we present a novel approach for item-based collaborative filtering, by leveraging BERT to understand items, and score relevancy between different items. Our proposed method could address problems that plague traditional recommender systems such as cold start, and \u201cmore of the same\u201d recommended content. We conducted experiments on a large-scale real-world dataset with full cold-start scenario, and the proposed approach significantly outperforms the popular Bi-LSTM model.", "keyphrases": ["bert", "e-commerce", "item-based collaborative filtering"]} +{"id": "saito-etal-2014-morphological", "title": "Morphological Analysis for Japanese Noisy Text based on Character-level and Word-level Normalization", "abstract": "Social media texts are often written in a non-standard style and include many lexical variants such as insertions, phonetic substitutions, abbreviations that mimic spoken language. The normalization of such a variety of non-standard tokens is one promising solution for handling noisy text. A normalization task is very difficult to conduct in Japanese morphological analysis because there are no explicit boundaries between words. To address this issue, in this paper we propose a novel method for normalizing and morphologically analyzing Japanese noisy text. We generate both character-level and word-level normalization candidates and use discriminative methods to formulate a cost function. Experimental results show that the proposed method achieves acceptable levels in both accuracy and recall for word segmentation, POS tagging, and normalization. These levels exceed those achieved with the conventional rule-based system.", "keyphrases": ["japanese noisy text", "non-standard token", "morphological analysis"]} +{"id": "li-etal-2014-weakly", "title": "Weakly Supervised User Profile Extraction from Twitter", "abstract": "While user attribute extraction on social media has received considerable attention, existing approaches, mostly supervised, encounter great difficulty in obtaining gold standard data and are therefore limited to predicting unary predicates (e.g., gender). In this paper, we present a weaklysupervised approach to user profile extraction from Twitter. Users\u2019 profiles from social media websites such as Facebook or Google Plus are used as a distant source of supervision for extraction of their attributes from user-generated text. In addition to traditional linguistic features used in distant supervision for information extraction, our approach also takes into account network information, a unique opportunity offered by social media. We test our algorithm on three attribute domains: spouse, education and job; experimental results demonstrate our approach is able to make accurate predictions for users\u2019 attributes based on their tweets. 1", "keyphrases": ["twitter", "facebook", "distant supervision", "job"]} +{"id": "mirza-tonelli-2016-catena", "title": "CATENA: CAusal and TEmporal relation extraction from NAtural language texts", "abstract": "We present CATENA, a sieve-based system to perform temporal and causal relation extraction and classification from English texts, exploiting the interaction between the temporal and the causal model. We evaluate the performance of each sieve, showing that the rule-based, the machine-learned and the reasoning components all contribute to achieving state-of-the-art performance on TempEval-3 and TimeBank-Dense data. Although causal relations are much sparser than temporal ones, the architecture and the selected features are mostly suitable to serve both tasks. The effects of the interaction between the temporal and the causal components, although limited, yield promising results and confirm the tight connection between the temporal and the causal dimension of texts.", "keyphrases": ["causal", "temporal relation extraction", "natural language text"]} +{"id": "dong-etal-2019-editnts", "title": "EditNTS: An Neural Programmer-Interpreter Model for Sentence Simplification through Explicit Editing", "abstract": "We present the first sentence simplification model that learns explicit edit operations (ADD, DELETE, and KEEP) via a neural programmer-interpreter approach. Most current neural sentence simplification systems are variants of sequence-to-sequence models adopted from machine translation. These methods learn to simplify sentences as a byproduct of the fact that they are trained on complex-simple sentence pairs. By contrast, our neural programmer-interpreter is directly trained to predict explicit edit operations on targeted parts of the input sentence, resembling the way that humans perform simplification and revision. Our model outperforms previous state-of-the-art neural sentence simplification models (without external knowledge) by large margins on three benchmark text simplification corpora in terms of SARI (+0.95 WikiLarge, +1.89 WikiSmall, +1.41 Newsela), and is judged by humans to produce overall better and simpler output sentences.", "keyphrases": ["sentence simplification", "neural programmer-interpreter approach", "editnt", "interpreter"]} +{"id": "alam-etal-2022-survey", "title": "A Survey on Multimodal Disinformation Detection", "abstract": "Recent years have witnessed the proliferation of offensive content online such as fake news, propaganda, misinformation, and disinformation. While initially this was mostly about textual content, over time images and videos gained popularity, as they are much easier to consume, attract more attention, and spread further than text. As a result, researchers started leveraging different modalities and combinations thereof to tackle online multimodal offensive content. In this study, we offer a survey on the state-of-the-art on multimodal disinformation detection covering various combinations of modalities: text, images, speech, video, social media network structure, and temporal information. Moreover, while some studies focused on factuality, others investigated how harmful the content is. While these two components in the definition of disinformation \u2013 (i) factuality, and (ii) harmfulness \u2013, are equally important, they are typically studied in isolation. Thus, we argue for the need to tackle disinformation detection by taking into account multiple modalities as well as both factuality and harmfulness, in the same framework. Finally, we discuss current challenges and future research directions.", "keyphrases": ["survey", "multimodal disinformation detection", "factuality", "harmfulness"]} +{"id": "bonin-etal-2010-contrastive-filtering", "title": "Contrastive Filtering of Domain-Specific Multi-Word Terms from Different Types of Corpora", "abstract": "In this paper we tackle the challenging task of Multi-word term (MWT) extraction from different types of specialized corpora. Contrastive filtering of previously extracted MWTs results in a considerable increment of acquired domainspecific terms.", "keyphrases": ["filtering", "multi-word term", "wikipedia", "sublanguage"]} +{"id": "flanigan-etal-2016-generation", "title": "Generation from Abstract Meaning Representation using Tree Transducers", "abstract": "Language generation from purely semantic representations is a challenging task. This paper addresses generating English from the Ab-stract Meaning Representation (AMR), consisting of re-entrant graphs whose nodes are concepts and edges are relations. The new method is trained statistically from AMR-annotated English and consists of two major steps: (i) generating an appropriate spanning tree for the AMR, and (ii) applying tree-to-string transducers to generate English. The method relies on discriminative learning and an argument realization model to overcome data sparsity. Initial tests on held-out data show good promise despite the complexity of the task. The system is available open-source as part of JAMR at:", "keyphrases": ["language generation", "semantic representation", "tree-to-string transducer", "amr graph", "statistical method"]} +{"id": "iyyer-etal-2016-feuding", "title": "Feuding Families and Former Friends: Unsupervised Learning for Dynamic Fictional Relationships", "abstract": "Understanding how a fictional relationship between two characters changes over time (e.g., from best friends to sworn enemies) is a key challenge in digital humanities scholarship. We present a novel unsupervised neural network for this task that incorporates dictionary learning to generate interpretable, accurate relationship trajectories. While previous work on characterizing literary relationships relies on plot summaries annotated with predefined labels, our model jointly learns a set of global relationship descriptors as well as a trajectory over these descriptors for each relationship in a dataset of raw text from novels. We find that our model learns descriptors of events (e.g., marriage or murder) as well as interpersonal states (love, sadness). Our model outperforms topic model baselines on two crowdsourced tasks, and we also find interesting correlations to annotations in an existing dataset.", "keyphrases": ["friend", "relationship trajectory", "fictional character"]} +{"id": "wang-etal-2020-formality", "title": "Formality Style Transfer with Shared Latent Space", "abstract": "Conventional approaches for formality style transfer borrow models from neural machine translation, which typically requires massive parallel data for training. However, the dataset for formality style transfer is considerably smaller than translation corpora. Moreover, we observe that informal and formal sentences closely resemble each other, which is different from the translation task where two languages have different vocabularies and grammars. In this paper, we present a new approach, Sequence-to-Sequence with Shared Latent Space (S2S-SLS), for formality style transfer, where we propose two auxiliary losses and adopt joint training of bi-directional transfer and auto-encoding. Experimental results show that S2S-SLS (with either RNN or Transformer architectures) consistently outperforms baselines in various settings, especially when we have limited data.", "keyphrases": ["shared latent space", "sequence-to-sequence", "loss", "formality style transfer"]} +{"id": "light-etal-2004-language", "title": "The Language of Bioscience: Facts, Speculations, and Statements In Between", "abstract": "We explore the use of speculative language in MEDLINE abstracts. Results from a manual annotation experiment suggest that the notion of speculative sentence can be reliably annotated by humans. In addition, an experiment with automated methods also suggest that reliable automated methods might also be developed. Distributional observations are also presented as well as a discussion of possible uses for a system that can recognize speculative language.", "keyphrases": ["statement", "speculative language", "medline abstract", "biomedicine"]} +{"id": "hofmann-etal-2020-appraisal", "title": "Appraisal Theories for Emotion Classification in Text", "abstract": "Automatic emotion categorization has been predominantly formulated as text classification in which textual units are assigned to an emotion from a predefined inventory, for instance following the fundamental emotion classes proposed by Paul Ekman (fear, joy, anger, disgust, sadness, surprise) or Robert Plutchik (adding trust, anticipation). This approach ignores existing psychological theories to some degree, which provide explanations regarding the perception of events. For instance, the description that somebody discovers a snake is associated with fear, based on the appraisal as being an unpleasant and non-controllable situation. This emotion reconstruction is even possible without having access to explicit reports of a subjective feeling (for instance expressing this with the words \u201cI am afraid.\u201d). Automatic classification approaches therefore need to learn properties of events as latent variables (for instance that the uncertainty and the mental or physical effort associated with the encounter of a snake leads to fear). With this paper, we propose to make such interpretations of events explicit, following theories of cognitive appraisal of events, and show their potential for emotion classification when being encoded in classification models. Our results show that high quality appraisal dimension assignments in event descriptions lead to an improvement in the classification of discrete emotion categories. We make our corpus of appraisal-annotated emotion-associated event descriptions publicly available.", "keyphrases": ["emotion classification", "latent variable", "appraisal"]} +{"id": "kozareva-ravi-2019-proseqo", "title": "ProSeqo: Projection Sequence Networks for On-Device Text Classification", "abstract": "We propose a novel on-device sequence model for text classification using recurrent projections. Our model ProSeqo uses dynamic recurrent projections without the need to store or look up any pre-trained embeddings. This results in fast and compact neural networks that can perform on-device inference for complex short and long text classification tasks. We conducted exhaustive evaluation on multiple text classification tasks. Results show that ProSeqo outperformed state-of-the-art neural and on-device approaches for short text classification tasks such as dialog act and intent prediction. To the best of our knowledge, ProSeqo is the first on-device long text classification neural model. It achieved comparable results to previous neural approaches for news article, answers and product categorization, while preserving small memory footprint and maintaining high accuracy.", "keyphrases": ["projection sequence networks", "text classification task", "proseqo"]} +{"id": "soricut-echihabi-2010-trustrank", "title": "TrustRank: Inducing Trust in Automatic Translations via Ranking", "abstract": "The adoption of Machine Translation technology for commercial applications is hampered by the lack of trust associated with machine-translated output. In this paper, we describe TrustRank, an MT system enhanced with a capability to rank the quality of translation outputs from good to bad. This enables the user to set a quality threshold, granting the user control over the quality of the translations. \n \nWe quantify the gains we obtain in translation quality, and show that our solution works on a wide variety of domains and language pairs.", "keyphrases": ["trust", "reference translation", "bleu score"]} +{"id": "neveol-etal-2014-language", "title": "Language Resources for French in the Biomedical Domain", "abstract": "The biomedical domain offers a wealth of linguistic resources for Natural Language Processing, including terminologies and corpora. While many of these resources are prominently available for English, other languages including French benefit from substantial coverage thanks to the contribution of an active community over the past decades. However, access to terminological resources in languages other than English may not be as straight-forward as access to their English counterparts. Herein, we review the extent of resource coverage for French and give pointers to access French-language resources. We also discuss the sources and methods for making additional material available for French.", "keyphrases": ["french", "biomedical domain", "linguistic resource"]} +{"id": "habash-roth-2009-catib", "title": "CATiB: The Columbia Arabic Treebank", "abstract": "The Columbia Arabic Treebank (CATiB) is a database of syntactic analyses of Arabic sentences. CATiB contrasts with previous approaches to Arabic treebanking in its emphasis on speed with some constraints on linguistic richness. Two basic ideas inspire the CATiB approach: no annotation of redundant information and using representations and terminology inspired by traditional Arabic syntax. We describe CATiB's representation and annotation procedure, and report on inter-annotator agreement and speed.", "keyphrases": ["columbia arabic treebank", "syntax", "catib"]} +{"id": "pang-lee-2005-seeing", "title": "Seeing Stars: Exploiting Class Relationships for Sentiment Categorization with Respect to Rating Scales", "abstract": "We address the rating-inference problem, wherein rather than simply decide whether a review is \"thumbs up\" or \"thumbs down\", as in previous sentiment analysis work, one must determine an author's evaluation with respect to a multi-point scale (e.g., one to five \"stars\"). This task represents an interesting twist on standard multi-class text categorization because there are several different degrees of similarity between class labels; for example, \"three stars\" is intuitively closer to \"four stars\" than to \"one star\".We first evaluate human performance at the task. Then, we apply a meta-algorithm, based on a metric labeling formulation of the problem, that alters a given n-ary classifier's output in an explicit attempt to ensure that similar items receive similar labels. We show that the meta-algorithm can provide significant improvements over both multi-class and regression versions of SVMs when we employ a novel similarity measure appropriate to the problem.", "keyphrases": ["rating scale", "sentiment analysis", "movie review", "multi-class classification"]} +{"id": "whitelaw-patrick-2004-selecting", "title": "Selecting Systemic Features for Text Classification", "abstract": "Systemic features use linguisticallyderived language models as a basis for text classification. The graph structure of these models allows for feature representations not available with traditional bag-of-words approaches. This paper explores the set of possible representations, and proposes feature selection methods that aim to produce the most compact and effective set of attributes for a given classification problem. We show that small sets of systemic features can outperform larger sets of wordbased features in the task of identifying financial scam documents.", "keyphrases": ["systemic feature", "text classification", "financial scam document"]} +{"id": "qian-etal-2017-linguistically", "title": "Linguistically Regularized LSTM for Sentiment Classification", "abstract": "This paper deals with sentence-level sentiment classification. Though a variety of neural network models have been proposed recently, however, previous models either depend on expensive phrase-level annotation, most of which has remarkably degraded performance when trained with only sentence-level annotation; or do not fully employ linguistic resources (e.g., sentiment lexicons, negation words, intensity words). In this paper, we propose simple models trained with sentence-level annotation, but also attempt to model the linguistic role of sentiment lexicons, negation words, and intensity words. Results show that our models are able to capture the linguistic role of sentiment words, negation words, and intensity words in sentiment expression.", "keyphrases": ["sentiment classification", "neural network model", "linguistic role"]} +{"id": "liu-gildea-2005-syntactic", "title": "Syntactic Features for Evaluation of Machine Translation", "abstract": "Automatic evaluation of machine translation, based on computing n-gram similarity between system output and human reference translations, has revolutionized the development of MT systems. We explore the use of syntactic information, including constituent labels and head-modier dependencies, in computing similarity between output and reference. Our results show that adding syntactic information to the evaluation metric improves both sentence-level and corpus-level correlation with human judgments.", "keyphrases": ["machine translation", "adequacy judgment", "dependency information", "meteor", "stm"]} +{"id": "mi-etal-2016-supervised", "title": "Supervised Attentions for Neural Machine Translation", "abstract": "In this paper, we improve the attention or alignment accuracy of neural machine translation by utilizing the alignments of training sentence pairs. We simply compute the distance between the machine attentions and the \"true\" alignments, and minimize this cost in the training procedure. Our experiments on large-scale Chinese-to-English task show that our model improves both translation and alignment qualities significantly over the large-vocabulary neural machine translation system, and even beats a state-of-the-art traditional syntax-based system.", "keyphrases": ["neural machine translation", "nmt attention model", "usefulness", "extra term"]} +{"id": "chang-etal-2012-learning", "title": "Learning to Find Translations and Transliterations on the Web", "abstract": "In recent years, state-of-the-art cross-linguistic systems have been based on parallel corpora. Nevertheless, it is difficult at times to find translations of a certain technical term or named entity even with a very large parallel corpora. In this paper, we present a new method for learning to find translations on the Web for a given term. In our approach, we use a small set of terms and translations to obtain mixed-code snippets returned by a search engine. We then automatically annotate the data with translation tags, automatically generate features to augment the tagged data, and automatically train a conditional random fields model for identifying translations. At runtime, we obtain mixed-code webpages containing the given term and run the model to extract translations as output. Preliminary experiments and evaluation results show our method cleanly combines various features, resulting in a system that outperforms previous works.", "keyphrases": ["transliteration", "web", "technical term"]} +{"id": "galley-manning-2008-simple", "title": "A Simple and Effective Hierarchical Phrase Reordering Model", "abstract": "While phrase-based statistical machine translation systems currently deliver state-of-the-art performance, they remain weak on word order changes. Current phrase reordering models can properly handle swaps between adjacent phrases, but they typically lack the ability to perform the kind of long-distance re-orderings possible with syntax-based systems. In this paper, we present a novel hierarchical phrase reordering model aimed at improving non-local reorderings, which seamlessly integrates with a standard phrase-based system with little loss of computational efficiency. We show that this model can successfully handle the key examples often used to motivate syntax-based systems, such as the rotation of a prepositional phrase around a noun phrase. We contrast our model with reordering models commonly used in phrase-based systems, and show that our approach provides statistically significant BLEU point gains for two language pairs: Chinese-English (+0.53 on MT05 and +0.71 on MT08) and Arabic-English (+0.55 on MT05).", "keyphrases": ["reordering model", "hrm", "orientation", "phrase-based decoding", "shift reduce algorithm"]} +{"id": "li-nenkova-2014-reducing", "title": "Reducing Sparsity Improves the Recognition of Implicit Discourse Relations", "abstract": "The earliest work on automatic detection of implicit discourse relations relied on lexical features. More recently, researchers have demonstrated that syntactic features are superior to lexical features for the task. In this paper we re-examine the two classes of state of the art representations: syntactic production rules and word pair features. In particular, we focus on the need to reduce sparsity in instance representation, demonstrating that different representation choices even for the same class of features may exacerbate sparsity issues and reduce performance. We present results that clearly reveal that lexicalization of the syntactic features is necessary for good performance. We introduce a novel, less sparse, syntactic representation which leads to improvement in discourse relation recognition. Finally, we demonstrate that classifiers trained on different representations, especially lexical ones, behave rather differently and thus could likely be combined in future systems.", "keyphrases": ["sparsity", "implicit discourse relation", "syntactic representation"]} +{"id": "bao-etal-2020-plato", "title": "PLATO: Pre-trained Dialogue Generation Model with Discrete Latent Variable", "abstract": "Pre-training models have been proved effective for a wide range of natural language processing tasks. Inspired by this, we propose a novel dialogue generation pre-training framework to support various kinds of conversations, including chit-chat, knowledge grounded dialogues, and conversational question answering. In this framework, we adopt flexible attention mechanisms to fully leverage the bi-directional context and the uni-directional characteristic of language generation. We also introduce discrete latent variables to tackle the inherent one-to-many mapping problem in response generation. Two reciprocal tasks of response generation and latent act recognition are designed and carried out simultaneously within a shared network. Comprehensive experiments on three publicly available datasets verify the effectiveness and superiority of the proposed framework.", "keyphrases": ["dialogue generation model", "discrete latent variable", "open-domain conversational data", "original pre-training task"]} +{"id": "yang-etal-2015-humor", "title": "Humor Recognition and Humor Anchor Extraction", "abstract": "Humor is an essential component in personal communication. How to create computational models to discover the structures behind humor, recognize humor and even extract humor anchors remains a challenge. In this work, we first identify several semantic structures behind humor and design sets of features for each structure, and next employ a computational approach to recognize humor. Furthermore, we develop a simple and effective method to extract anchors that enable humor in a sentence. Experiments conducted on two datasets demonstrate that our humor recognizer is effective in automatically distinguishing between humorous and non-humorous texts and our extracted humor anchors correlate quite well with human annotations.", "keyphrases": ["semantic structure", "humor recognition", "incongruity", "phonetic style", "interpersonal effect"]} +{"id": "socher-etal-2014-grounded", "title": "Grounded Compositional Semantics for Finding and Describing Images with Sentences", "abstract": "Previous work on Recursive Neural Networks (RNNs) shows that these models can produce compositional feature vectors for accurately representing and classifying sentences or images. However, the sentence vectors of previous models cannot accurately represent visually grounded meaning. We introduce the DT-RNN model which uses dependency trees to embed sentences into a vector space in order to retrieve images that are described by those sentences. Unlike previous RNN-based models which use constituency trees, DT-RNNs naturally focus on the action and agents in a sentence. They are better able to abstract from the details of word order and syntactic expression. DT-RNNs outperform other recursive and recurrent neural networks, kernelized CCA and a bag-of-words baseline on the tasks of finding an image that fits a sentence description and vice versa. They also give more similar representations to sentences that describe the same image.", "keyphrases": ["image", "dependency tree", "composition function", "multimodal embedding"]} +{"id": "plank-van-noord-2011-effective", "title": "Effective Measures of Domain Similarity for Parsing", "abstract": "It is well known that parsing accuracy suffers when a model is applied to out-of-domain data. It is also known that the most beneficial data to parse a given domain is data that matches the domain (Sekine, 1997; Gildea, 2001). Hence, an important task is to select appropriate domains. However, most previous work on domain adaptation relied on the implicit assumption that domains are somehow given. As more and more data becomes available, automatic ways to select data that is beneficial for a new (unknown) target domain are becoming attractive. This paper evaluates various ways to automatically acquire related training data for a given test set. The results show that an unsupervised technique based on topic models is effective -- it outperforms random data selection on both languages examined, English and Dutch. Moreover, the technique works better than manually assigned labels gathered from meta-data that is available for English.", "keyphrases": ["plank", "van", "domain similarity measure"]} +{"id": "freitag-etal-2021-experts", "title": "Experts, Errors, and Context: A Large-Scale Study of Human Evaluation for Machine Translation", "abstract": "Human evaluation of modern high-quality machine translation systems is a difficult problem, and there is increasing evidence that inadequate evaluation procedures can lead to erroneous conclusions. While there has been considerable research on human evaluation, the field still lacks a commonly accepted standard procedure. As a step toward this goal, we propose an evaluation methodology grounded in explicit error analysis, based on the Multidimensional Quality Metrics (MQM) framework. We carry out the largest MQM research study to date, scoring the outputs of top systems from the WMT 2020 shared task in two language pairs using annotations provided by professional translators with access to full document context. We analyze the resulting data extensively, finding among other results a substantially different ranking of evaluated systems from the one established by the WMT crowd workers, exhibiting a clear preference for human over machine output. Surprisingly, we also find that automatic metrics based on pre-trained embeddings can outperform human crowd workers. We make our corpus publicly available for further research.", "keyphrases": ["human evaluation", "machine translation", "wmt", "professional translator", "document context"]} +{"id": "nakov-hearst-2005-search", "title": "Search Engine Statistics Beyond the n-Gram: Application to Noun Compound Bracketing", "abstract": "In order to achieve the long-range goal of semantic interpretation of noun compounds, it is often necessary to first determine their syntactic structure. This paper describes an unsupervised method for noun compound bracketing which extracts statistics from Web search engines using a X2 measure, a new set of surface features, and paraphrases. On a gold standard, the system achieves results of 89.34% (baseline 66.80%), which is a sizable improvement over the state of the art (80.70%).", "keyphrases": ["noun compound bracketing", "paraphrase", "marker", "hyphen", "search engine"]} +{"id": "heinz-etal-2011-tier", "title": "Tier-based Strictly Local Constraints for Phonology", "abstract": "Beginning with Goldsmith (1976), the phonological tier has a long history in phonological theory to describe non-local phenomena. This paper defines a class of formal languages, the Tier-based Strictly Local languages, which begin to describe such phenomena. Then this class is located within the Subregular Hierarchy (McNaughton and Papert, 1971). It is found that these languages contain the Strictly Local languages, are star-free, are incomparable with other known sub-star-free classes, and have other interesting properties.", "keyphrases": ["local", "phonology", "tier", "formal language"]} +{"id": "bos-2016-squib", "title": "Squib: Expressive Power of Abstract Meaning Representations", "abstract": "The syntax of abstract meaning representations (AMRs) can be defined recursively, and a systematic translation to first-order logic (FOL) can be specified, including a proper treatment of negation. AMRs without recurrent variables are in the decidable two-variable fragment of FOL. The current definition of AMRs has limited expressive power for universal quantification (up to one universal quantifier per sentence). A simple extension of the AMR syntax and translation to FOL provides the means to represent projection and scope phenomena.", "keyphrases": ["expressive power", "amr", "first-order logic", "negation", "syntactic idiosyncrasy"]} +{"id": "li-etal-2020-bert-vision", "title": "What Does BERT with Vision Look At?", "abstract": "Pre-trained visually grounded language models such as ViLBERT, LXMERT, and UNITER have achieved significant performance improvement on vision-and-language tasks but what they learn during pre-training remains unclear. In this work, we demonstrate that certain attention heads of a visually grounded language model actively ground elements of language to image regions. Specifically, some heads can map entities to image regions, performing the task known as entity grounding. Some heads can even detect the syntactic relations between non-entity words and image regions, tracking, for example, associations between verbs and regions corresponding to their arguments. We denote this ability as syntactic grounding. We verify grounding both quantitatively and qualitatively, using Flickr30K Entities as a testbed.", "keyphrases": ["bert", "attention head", "image region"]} +{"id": "cui-etal-2020-edge", "title": "Edge-Enhanced Graph Convolution Networks for Event Detection with Syntactic Relation", "abstract": "Event detection (ED), a key subtask of information extraction, aims to recognize instances of specific event types in text. Previous studies on the task have verified the effectiveness of integrating syntactic dependency into graph convolutional networks. However, these methods usually ignore dependency label information, which conveys rich and useful linguistic knowledge for ED. In this paper, we propose a novel architecture named Edge-Enhanced Graph Convolution Networks (EE-GCN), which simultaneously exploits syntactic structure and typed dependency label information to perform ED. Specifically, an edge-aware node update module is designed to generate expressive word representations by aggregating syntactically-connected words through specific dependency types. Furthermore, to fully explore clues hidden from dependency edges, a node-aware edge update module is introduced, which refines the relation representations with contextual information. These two modules are complementary to each other and work in a mutual promotion way. We conduct experiments on the widely used ACE2005 dataset and the results show significant improvement over competitive baseline methods.", "keyphrases": ["convolutional network", "event detection", "syntactic structure"]} +{"id": "misra-etal-2018-mapping", "title": "Mapping Instructions to Actions in 3D Environments with Visual Goal Prediction", "abstract": "We propose to decompose instruction execution to goal prediction and action generation. We design a model that maps raw visual observations to goals using LINGUNET, a language-conditioned image generation network, and then generates the actions required to complete them. Our model is trained from demonstration only without external resources. To evaluate our approach, we introduce two benchmarks for instruction following: LANI, a navigation task; and CHAI, where an agent executes household instructions. Our evaluation demonstrates the advantages of our model decomposition, and illustrates the challenges posed by our new benchmarks.", "keyphrases": ["environment", "goal prediction", "natural language instruction", "vision-and-language navigation"]} +{"id": "loukina-etal-2016-textual", "title": "Textual complexity as a predictor of difficulty of listening items in language proficiency tests", "abstract": "In this paper we explore to what extent the difficulty of listening items in an English language proficiency test can be predicted by the textual properties of the prompt. We show that a system based on multiple text complexity features can predict item difficulty for several different item types and for some items achieves higher accuracy than human estimates of item difficulty.", "keyphrases": ["language proficiency test", "text complexity feature", "comprehension question"]} +{"id": "nan-etal-2021-dart", "title": "DART: Open-Domain Structured Data Record to Text Generation", "abstract": "We present DART, an open domain structured DAta Record to Text generation dataset with over 82k instances (DARTs). Data-to-text annotations can be a costly process, especially when dealing with tables which are the major source of structured data and contain nontrivial structures. To this end, we propose a procedure of extracting semantic triples from tables that encodes their structures by exploiting the semantic dependencies among table headers and the table title. Our dataset construction framework effectively merged heterogeneous sources from open domain semantic parsing and spoken dialogue systems by utilizing techniques including tree ontology annotation, question-answer pair to declarative sentence conversion, and predicate unification, all with minimum post-editing. We present systematic evaluation on DART as well as new state-of-the-art results on WebNLG 2017 to show that DART (1) poses new challenges to existing data-to-text datasets and (2) facilitates out-of-domain generalization. Our data and code can be found at .", "keyphrases": ["text generation", "data-to-text dataset", "dart"]} +{"id": "sun-etal-2019-patient", "title": "Patient Knowledge Distillation for BERT Model Compression", "abstract": "Pre-trained language models such as BERT have proven to be highly effective for natural language processing (NLP) tasks. However, the high demand for computing resources in training such models hinders their application in practice. In order to alleviate this resource hunger in large-scale model training, we propose a Patient Knowledge Distillation approach to compress an original large model (teacher) into an equally-effective lightweight shallow network (student). Different from previous knowledge distillation methods, which only use the output from the last layer of the teacher network for distillation, our student model patiently learns from multiple intermediate layers of the teacher model for incremental knowledge extraction, following two strategies: (i) PKD-Last: learning from the last k layers; and (ii) PKD-Skip: learning from every k layers. These two patient distillation schemes enable the exploitation of rich information in the teacher's hidden layers, and encourage the student model to patiently learn from and imitate the teacher through a multi-layer distillation process. Empirically, this translates into improved results on multiple NLP tasks with a significant gain in training efficiency, without sacrificing model accuracy.", "keyphrases": ["bert model compression", "distillation method", "patient knowledge distillation"]} +{"id": "baldwin-etal-2013-noisy", "title": "How Noisy Social Media Text, How Diffrnt Social Media Sources?", "abstract": "While various claims have been made about text in social media text being noisy, there has never been a systematic study to investigate just how linguistically noisy or otherwise it is over a range of social media sources. We explore this question empirically over popular social media text types, in the form of YouTube comments, Twitter posts, web user forum posts, blog posts and Wikipedia, which we compare to a reference corpus of edited English text. We first extract out various descriptive statistics from each data type (including the distribution of languages, average sentence length and proportion of out-ofvocabulary words), and then investigate the proportion of grammatical sentences in each, based on a linguistically-motivated parser. We also investigate the relative similarity between different data types.", "keyphrases": ["comment", "blog", "social medium", "disfluency", "twitter text"]} +{"id": "mubarak-etal-2021-arabic", "title": "Arabic Offensive Language on Twitter: Analysis and Experiments", "abstract": "Detecting offensive language on Twitter has many applications ranging from detecting/predicting bullying to measuring polarization. In this paper, we focus on building a large Arabic offensive tweet dataset. We introduce a method for building a dataset that is not biased by topic, dialect, or target. We produce the largest Arabic dataset to date with special tags for vulgarity and hate speech. We thoroughly analyze the dataset to determine which topics, dialects, and gender are most associated with offensive tweets and how Arabic speakers useoffensive language. Lastly, we conduct many experiments to produce strong results (F1 =83.2) on the dataset using SOTA techniques.", "keyphrases": ["twitter", "offensive tweet", "arabic offensive language"]} +{"id": "jia-etal-2009-noisy", "title": "A Noisy Channel Model for Grapheme-based Machine Transliteration", "abstract": "Machine transliteration is an important Natural Language Processing task. This paper proposes a Noisy Channel Model for Grapheme-based machine transliteration. Moses, a phrase-based Statistical Machine Translation tool, is employed for the implementation of the system. Experiments are carried out on the NEWS 2009 Machine Transliteration Shared Task English-Chinese track. English-Chinese back transliteration is studied as well.", "keyphrases": ["noisy channel model", "grapheme-based machine transliteration", "machine translation"]} +{"id": "narisawa-etal-2013-204", "title": "Is a 204 cm Man Tall or Small ? Acquisition of Numerical Common Sense from the Web", "abstract": "This paper presents novel methods for modeling numerical common sense: the ability to infer whether a given number (e.g., three billion) is large, small, or normal for a given context (e.g., number of people facing a water shortage). We first discuss the necessity of numerical common sense in solving textual entailment problems. We explore two approaches for acquiring numerical common sense. Both approaches start with extracting numerical expressions and their context from the Web. One approach estimates the distribution of numbers co-occurring within a context and examines whether a given value is large, small, or normal, based on the distribution. Another approach utilizes textual patterns with which speakers explicitly expresses their judgment about the value of a numerical expression. Experimental results demonstrate the effectiveness of both approaches.", "keyphrases": ["numerical common sense", "value", "object"]} +{"id": "nguyen-etal-2021-trankit", "title": "Trankit: A Light-Weight Transformer-based Toolkit for Multilingual Natural Language Processing", "abstract": "We introduce Trankit, a light-weight Transformer-based Toolkit for multilingual Natural Language Processing (NLP). It provides a trainable pipeline for fundamental NLP tasks over 100 languages, and 90 pretrained pipelines for 56 languages. Built on a state-of-the-art pretrained language model, Trankit significantly outperforms prior multilingual NLP pipelines over sentence segmentation, part-of-speech tagging, morphological feature tagging, and dependency parsing while maintaining competitive performance for tokenization, multi-word token expansion, and lemmatization over 90 Universal Dependencies treebanks. Despite the use of a large pretrained transformer, our toolkit is still efficient in memory usage and speed. This is achieved by our novel plug-and-play mechanism with Adapters where a multilingual pretrained transformer is shared across pipelines for different languages. Our toolkit along with pretrained models and code are publicly available at: . A demo website for our toolkit is also available at: . Finally, we create a demo video for Trankit at: .", "keyphrases": ["light-weight transformer-based toolkit", "fundamental nlp task", "language model", "trankit"]} +{"id": "choi-palmer-2011-statistical", "title": "Statistical Dependency Parsing in Korean: From Corpus Generation To Automatic Parsing", "abstract": "This paper gives two contributions to dependency parsing in Korean. First, we build a Korean dependency Treebank from an existing constituent Treebank. For a morphologically rich language like Korean, dependency parsing shows some advantages over constituent parsing. Since there is not much training data available, we automatically generate dependency trees by applying head-percolation rules and heuristics to the constituent trees. Second, we show how to extract useful features for dependency parsing from rich morphology in Korean. Once we build the dependency Treebank, any statistical parsing approach can be applied. The challenging part is how to extract features from tokens consisting of multiple morphemes. We suggest a way of selecting important morphemes and use only these as features to avoid sparsity. Our parsing approach is evaluated on three different genres using both gold-standard and automatic morphological analysis. We also test the impact of fine vs. coarse-grained morphologies on dependency parsing. With automatic morphological analysis, we achieve labeled attachment scores of 80%+. To the best of our knowledge, this is the first time that Korean dependency parsing has been evaluated on labeled edges with such a large variety of data.", "keyphrases": ["dependency parsing", "korean", "important morpheme"]} +{"id": "hwang-etal-2015-aligning", "title": "Aligning Sentences from Standard Wikipedia to Simple Wikipedia", "abstract": "This work improves monolingual sentence alignment for text simplification, specifically for text in standard and simple Wikipedia. We introduce a method that improves over past efforts by using a greedy (vs. ordered) search over the document and a word-level semantic similarity score based on Wiktionary (vs. WordNet) that also accounts for structural similarity through syntactic dependencies. Experiments show improved performance on a hand-aligned set, with the largest gain coming from structural similarity. Resulting datasets of manually and automatically aligned sentence pairs are made available.", "keyphrases": ["simple wikipedia", "text simplification", "sentence pair", "match"]} +{"id": "lukasik-etal-2016-hawkes", "title": "Hawkes Processes for Continuous Time Sequence Classification: an Application to Rumour Stance Classification in Twitter", "abstract": "Classification of temporal textual data sequences is a common task in various domains such as social media and the Web. In this paper we propose to use Hawkes Processes for classifying sequences of temporal textual data, which exploit both temporal and textual information. Our experiments on rumour stance classification on four Twitter datasets show the importance of using the temporal information of tweets along with the textual content.", "keyphrases": ["rumour stance classification", "hawkes processes", "temporal sequence"]} +{"id": "han-zhao-2010-structural", "title": "Structural Semantic Relatedness: A Knowledge-Based Method to Named Entity Disambiguation", "abstract": "Name ambiguity problem has raised urgent demands for efficient, high-quality named entity disambiguation methods. In recent years, the increasing availability of large-scale, rich semantic knowledge sources (such as Wikipedia and WordNet) creates new opportunities to enhance the named entity disambiguation by developing algorithms which can exploit these knowledge sources at best. The problem is that these knowledge sources are heterogeneous and most of the semantic knowledge within them is embedded in complex structures, such as graphs and networks. This paper proposes a knowledge-based method, called Structural Semantic Relatedness (SSR), which can enhance the named entity disambiguation by capturing and leveraging the structural semantic knowledge in multiple knowledge sources. Empirical results show that, in comparison with the classical BOW based methods and social network based methods, our method can significantly improve the disambiguation performance by respectively 8.7% and 14.7%.", "keyphrases": ["knowledge-based method", "entity disambiguation", "wikipedia"]} +{"id": "cai-lam-2020-amr", "title": "AMR Parsing via Graph-Sequence Iterative Inference", "abstract": "We propose a new end-to-end model that treats AMR parsing as a series of dual decisions on the input sequence and the incrementally constructed graph. At each time step, our model performs multiple rounds of attention, reasoning, and composition that aim to answer two critical questions: (1) which part of the input sequence to abstract; and (2) where in the output graph to construct the new concept. We show that the answers to these two questions are mutually causalities. We design a model based on iterative inference that helps achieve better answers in both perspectives, leading to greatly improved parsing accuracy. Our experimental results significantly outperform all previously reported Smatch scores by large margins. Remarkably, without the help of any large-scale pre-trained language model (e.g., BERT), our model already surpasses previous state-of-the-art using BERT. With the help of BERT, we can push the state-of-the-art results to 80.2% on LDC2017T10 (AMR 2.0) and 75.4% on LDC2014T12 (AMR 1.0).", "keyphrases": ["graph-sequence iterative inference", "decision", "new concept", "amr"]} +{"id": "de-marneffe-etal-2012-happen", "title": "Did It Happen? The Pragmatic Complexity of Veridicality Assessment", "abstract": "Natural language understanding depends heavily on assessing veridicality\u2014whether events mentioned in a text are viewed as happening or not\u2014but little consideration is given to this property in current relation and event extraction systems. Furthermore, the work that has been done has generally assumed that veridicality can be captured by lexical semantic properties whereas we show that context and world knowledge play a significant role in shaping veridicality. We extend the FactBank corpus, which contains semantically driven veridicality annotations, with pragmatically informed ones. Our annotations are more complex than the lexical assumption predicts but systematic enough to be included in computational work on textual understanding. They also indicate that veridicality judgments are not always categorical, and should therefore be modeled as distributions. We build a classifier to automatically assign event veridicality distributions based on our new annotations. The classifier relies not only on lexical features like hedges or negations, but also on structural features and approximations of world knowledge, thereby providing a nuanced picture of the diverse factors that shape veridicality.\u201cAll I know is what I read in the papers\u201d\u2014Will Rogers", "keyphrases": ["veridicality assessment", "factbank", "uncertainty"]} +{"id": "miao-blunsom-2016-language", "title": "Language as a Latent Variable: Discrete Generative Models for Sentence Compression", "abstract": "In this work we explore deep generative models of text in which the latent representation of a document is itself drawn from a discrete language model distribution. We formulate a variational auto-encoder for inference in this model and apply it to the task of compressing sentences. In this application the generative model first draws a latent summary sentence from a background language model, and then subsequently draws the observed sentence conditioned on this latent summary. In our empirical evaluation we show that generative formulations of both abstractive and extractive compression yield state-of-the-art results when trained on a large amount of supervised data. Further, we explore semi-supervised compression scenarios where we show that it is possible to achieve performance competitive with previously proposed supervised models while training on a fraction of the supervised data.", "keyphrases": ["latent variable", "sentence compression", "deep generative model", "auto-encoder", "prior"]} +{"id": "guerini-etal-2015-echoes", "title": "Echoes of Persuasion: The Effect of Euphony in Persuasive Communication", "abstract": "While the effect of various lexical, syntactic, semantic and stylistic features have been addressed in persuasive language from a computational point of view, the persuasive effect of phonetics has received little attention. By modeling a notion of euphony and analyzing four datasets comprising persuasive and non-persuasive sentences in different domains (political speeches, movie quotes, slogans and tweets), we explore the impact of sounds on different forms of persuasiveness. We conduct a series of analyses and prediction experiments within and across datasets. Our results highlight the positive role of phonetic devices on persuasion.", "keyphrases": ["persuasion", "euphony", "alliteration"]} +{"id": "kurita-etal-2020-weight", "title": "Weight Poisoning Attacks on Pretrained Models", "abstract": "Recently, NLP has seen a surge in the usage of large pre-trained models. Users download weights of models pre-trained on large datasets, then fine-tune the weights on a task of their choice. This raises the question of whether downloading untrusted pre-trained weights can pose a security threat. In this paper, we show that it is possible to construct \u201cweight poisoning\u201d attacks where pre-trained weights are injected with vulnerabilities that expose \u201cbackdoors\u201d after fine-tuning, enabling the attacker to manipulate the model prediction simply by injecting an arbitrary keyword. We show that by applying a regularization method which we call RIPPLe and an initialization procedure we call Embedding Surgery, such attacks are possible even with limited knowledge of the dataset and fine-tuning procedure. Our experiments on sentiment classification, toxicity detection, and spam detection show that this attack is widely applicable and poses a serious threat. Finally, we outline practical defenses against such attacks.", "keyphrases": ["attacker", "pre-trained model", "backdoor", "weight"]} +{"id": "bisazza-tump-2018-lazy", "title": "The Lazy Encoder: A Fine-Grained Analysis of the Role of Morphology in Neural Machine Translation", "abstract": "Neural sequence-to-sequence models have proven very effective for machine translation, but at the expense of model interpretability. To shed more light into the role played by linguistic structure in the process of neural machine translation, we perform a fine-grained analysis of how various source-side morphological features are captured at different levels of the NMT encoder while varying the target language. Differently from previous work, we find no correlation between the accuracy of source morphology encoding and translation quality. We do find that morphological features are only captured in context and only to the extent that they are directly transferable to the target words.", "keyphrases": ["fine-grained analysis", "neural machine translation", "nmt encoder"]} +{"id": "francois-fairon-2012-ai", "title": "An \u201cAI readability\u201d Formula for French as a Foreign Language", "abstract": "This paper present a new readability formula for French as a foreign language (FFL), which relies on 46 textual features representative of the lexical, syntactic, and semantic levels as well as some of the specificities of the FFL context. We report comparisons between several techniques for feature selection and various learning algorithms. Our best model, based on support vector machines (SVM), significantly outperforms previous FFL formulas. We also found that semantic features behave poorly in our case, in contrast with some previous readability studies on English as a first language.", "keyphrases": ["french", "foreign language", "readability formula"]} +{"id": "li-etal-2021-kfcnet-knowledge", "title": "KFCNet: Knowledge Filtering and Contrastive Learning for Generative Commonsense Reasoning", "abstract": "Pre-trained language models have led to substantial gains over a broad range of natural language processing (NLP) tasks, but have been shown to have limitations for natural language generation tasks with high-quality requirements on the output, such as commonsense generation and ad keyword generation. In this work, we present a novel Knowledge Filtering and Contrastive learning Network (KFCNet) which references external knowledge and achieves better generation performance. Specifically, we propose a BERT-based filter model to remove low-quality candidates, and apply contrastive learning separately to each of the encoder and decoder, within a general encoder\u2013decoder architecture. The encoder contrastive module helps to capture global target semantics during encoding, and the decoder contrastive module enhances the utility of retrieved prototypes while learning general features. Extensive experiments on the CommonGen benchmark show that our model outperforms the previous state of the art by a large margin: +6.6 points (42.5 vs. 35.9) for BLEU-4, +3.7 points (33.3 vs. 29.6) for SPICE, and +1.3 points (18.3 vs. 17.0) for CIDEr. We further verify the effectiveness of the proposed contrastive module on ad keyword generation, and show that our model has potential commercial value.", "keyphrases": ["contrastive learning", "external knowledge", "kfcnet"]} +{"id": "kim-hovy-2006-automatic", "title": "Automatic Identification of Pro and Con Reasons in Online Reviews", "abstract": "In this paper, we present a system that automatically extracts the pros and cons from online reviews. Although many approaches have been developed for extracting opinions from text, our focus here is on extracting the reasons of the opinions, which may themselves be in the form of either fact or opinion. Leveraging online review sites with author-generated pros and cons, we propose a system for aligning the pros and cons to their sentences in review texts. A maximum entropy model is then trained on the resulting labeled set to subsequently extract pros and cons from online review sites that do not explicitly provide them. Our experimental results show that our resulting system identifies pros and cons with 66% precision and 76% recall.", "keyphrases": ["reason", "review", "subjectivity analysis resource"]} +{"id": "vu-haffari-2018-automatic", "title": "Automatic Post-Editing of Machine Translation: A Neural Programmer-Interpreter Approach", "abstract": "Automated Post-Editing (PE) is the task of automatically correct common and repetitive errors found in machine translation (MT) output. In this paper, we present a neural programmer-interpreter approach to this task, resembling the way that human perform post-editing using discrete edit operations, wich we refer to as programs. Our model outperforms previous neural models for inducing PE programs on the WMT17 APE task for German-English up to +1 BLEU score and -0.7 TER scores.", "keyphrases": ["machine translation", "neural programmer-interpreter approach", "automatic post-editing"]} +{"id": "wang-etal-2021-cline", "title": "CLINE: Contrastive Learning with Semantic Negative Examples for Natural Language Understanding", "abstract": "Despite pre-trained language models have proven useful for learning high-quality semantic representations, these models are still vulnerable to simple perturbations. Recent works aimed to improve the robustness of pre-trained models mainly focus on adversarial training from perturbed examples with similar semantics, neglecting the utilization of different or even opposite semantics. Different from the image processing field, the text is discrete and few word substitutions can cause significant semantic changes. To study the impact of semantics caused by small perturbations, we conduct a series of pilot experiments and surprisingly find that adversarial training is useless or even harmful for the model to detect these semantic changes. To address this problem, we propose Contrastive Learning with semantIc Negative Examples (CLINE), which constructs semantic negative examples unsupervised to improve the robustness under semantically adversarial attacking. By comparing with similar and opposite semantic examples, the model can effectively perceive the semantic changes caused by small perturbations. Empirical results show that our approach yields substantial improvements on a range of sentiment analysis, reasoning, and reading comprehension tasks. And CLINE also ensures the compactness within the same semantics and separability across different semantics in sentence-level.", "keyphrases": ["contrastive learning", "semantic negative examples", "pre-trained language model", "adversarial training"]} +{"id": "zhou-etal-2009-generating", "title": "Generating Chinese Couplets and Quatrain Using a Statistical Approach", "abstract": "We propose a novel statistical approach to automatically generate Chinese couplets and Chinese poetry. For Chinese couplets, the system takes as input the first sentence and generates as output an N-best list of second sentences using a phrase-based SMT model. A comprehensive evaluation using both human judgments and BLEU scores has been conducted and the results demonstrate that this approach is very successful. We then extended this approach to generate classic Chinese poetry using the quatrain as a case study. Given a few keywords describing a user's intention, a statistical model is used to generate the first sentence. Then a phrase-based SMT model is used to generate the other three quatrain sentences one by one. Evaluation using human judgment over individual lines as well as the quality of the generated poem as a whole demonstrates promising results.", "keyphrases": ["couplet", "quatrain", "classic chinese poetry"]} +{"id": "sporleder-lapata-2005-discourse", "title": "Discourse Chunking and its Application to Sentence Compression", "abstract": "In this paper we consider the problem of analysing sentence-level discourse structure. We introduce discourse chunking (i.e., the identification of intra-sentential nucleus and satellite spans) as an alternative to full-scale discourse parsing. Our experiments show that the proposed modelling approach yields results comparable to state-of-the-art while exploiting knowledge-lean features and small amounts of discourse annotations. We also demonstrate how discourse chunking can be successfully applied to a sentence compression task.", "keyphrases": ["discourse chunking", "sentence length", "low-resourced language"]} +{"id": "xiao-etal-2011-document", "title": "Document-level Consistency Verification in Machine Translation", "abstract": "Translation consistency is an important issue in document-level translation. However, the consistency in Machine Translation (MT) output is generally overlooked in most MT systems due to the lack of the use of document contexts. To address this issue, we present a simple and effective approach that incorporates document contexts into an existing Statistical Machine Translation (SMT) system for document-level translation. Experimental results show that our approach effectively reduces the errors caused by inconsistent translations (25% error reduction). More interestingly, it is observed that as a \u201c bonus \u201d our approach is able to improve the BLEU score of the SMT system.", "keyphrases": ["consistency", "machine translation", "smt system", "hard constraint"]} +{"id": "bao-etal-2018-deriving", "title": "Deriving Machine Attention from Human Rationales", "abstract": "Attention-based models are successful when trained on large amounts of data. In this paper, we demonstrate that even in the low-resource scenario, attention can be learned effectively. To this end, we start with discrete human-annotated rationales and map them into continuous attention. Our central hypothesis is that this mapping is general across domains, and thus can be transferred from resource-rich domains to low-resource ones. Our model jointly learns a domain-invariant representation and induces the desired mapping between rationales and attention. Our empirical results validate this hypothesis and show that our approach delivers significant gains over state-of-the-art baselines, yielding over 15% average error reduction on benchmark datasets.", "keyphrases": ["machine attention", "rationale", "mapping", "training example"]} +{"id": "wang-etal-2019-make", "title": "Does it Make Sense? And Why? A Pilot Study for Sense Making and Explanation", "abstract": "Introducing common sense to natural language understanding systems has received increasing research attention. It remains a fundamental question on how to evaluate whether a system has the sense-making capability. Existing benchmarks measure common sense knowledge indirectly or without reasoning. In this paper, we release a benchmark to directly test whether a system can differentiate natural language statements that make sense from those that do not make sense. In addition, a system is asked to identify the most crucial reason why a statement does not make sense. We evaluate models trained over large-scale language modeling tasks as well as human performance, showing that there are different challenges for system sense-making.", "keyphrases": ["explanation", "capability", "natural language statement", "human performance"]} +{"id": "rehm-hegele-2018-language", "title": "Language Technology for Multilingual Europe: An Analysis of a Large-Scale Survey regarding Challenges, Demands, Gaps and Needs", "abstract": "We present the analysis of a large-scale survey titled \u201cLanguage Technology for Multilingual Europe\u201d, conducted between May and June 2017. A total of 634 participants in 52 countries responded to the survey. Its main purpose was to collect input, feedback and ideas from the European Language Technology research and innovation community in order to assess the most prominent research areas, projects and applications, but, more importantly to identify the biggest challenges, obstacles and gaps Europe is currently facing with regard to its multilingual setup and technological solutions. Participants were encouraged to share concrete suggestions and recommendations on how present challenges can be turned into opportunities in the context of a potential long-term, large-scale, Europe-wide research, development and innovation funding programme, currently titled Human Language Project.", "keyphrases": ["multilingual europe", "survey", "country"]} +{"id": "frank-etal-2003-integrated", "title": "Integrated Shallow and Deep Parsing: TopP Meets HPSG", "abstract": "We present a novel, data-driven method for integrated shallow and deep parsing. Mediated by an XML-based multi-layer annotation architecture, we interleave a robust, but accurate stochastic topological field parser of German with a constraint-based HPSG parser. Our annotation-based method for dovetailing shallow and deep phrasal constraints is highly flexible, allowing targeted and fine-grained guidance of constraint-based parsing. We conduct systematic experiments that demonstrate substantial performance gains.", "keyphrases": ["deep parsing", "hpsg", "field parser", "german"]} +{"id": "hamdan-etal-2015-lsislif", "title": "Lsislif: Feature Extraction and Label Weighting for Sentiment Analysis in Twitter", "abstract": "This paper describes our sentiment analysis systems which have been built for SemEval2015 Task 10 Subtask B and E. For subtask B, a Logistic Regression classifier has been trained after extracting several groups of features including lexical, syntactic, lexiconbased, Z score and semantic features. A weighting schema has been adapted for positive and negative labels in order to take into account the unbalanced distribution of tweets between the positive and negative classes. This system is ranked third over 40 participants, it achieves average F1 64.27 on Twitter data set 2015 just 0.57% less than the first system. We also present our participation in Subtask E in which our system has got the second rank with Kendall metric but the first one with Spearman for ranking twitter terms according to their association with the positive sentiment.", "keyphrases": ["feature extraction", "twitter", "sentiment lexicon"]} +{"id": "ghosh-etal-2010-clause", "title": "Clause Identification and Classification in Bengali", "abstract": "This paper reports about the development of clause identification and classification techniques for Bengali language. A syntactic rule based model has been used to identify the clause boundary. For clause type identification a Conditional random Field (CRF) based statistical model has been used. The clause identification system and clause classification system demonstrated 73% and 78% precision values respectively.", "keyphrases": ["bengali", "clause identification", "dependency relation"]} +{"id": "collins-thompson-callan-2004-language", "title": "A Language Modeling Approach to Predicting Reading Difficulty", "abstract": "We demonstrate a new research approach to the problem of predicting the reading difficulty of a text passage, by recasting readability in terms of statistical language modeling. We derive a measure based on an extension of multinomial na\u00efve Bayes classification that combines multiple language models to estimate the most likely grade level for a given passage. The resulting classifier is not specific to any particular subject and can be trained with relatively little labeled data. We perform predictions for individual Web pages in English and compare our performance to widely-used semantic variables from traditional readability measures. We show that with minimal changes, the classifier may be retrained for use with French Web documents. For both English and French, the classifier maintains consistently good correlation with labeled grade level (0.63 to 0.79) across all test sets. Some traditional semantic variables such as type-token ratio gave the best performance on commercial calibrated test passages, while our language modeling approach gave better accuracy for Web documents and very short passages (less than 10 words).", "keyphrases": ["language modeling approach", "reading difficulty", "grade level", "complexity"]} +{"id": "choi-etal-2010-multilingual", "title": "Multilingual Propbank Annotation Tools: Cornerstone and Jubilee", "abstract": "This paper demonstrates two annotation tools related to Propbank: Cornerstone and Jubilee. Propbank is a corpus in which the arguments of each verb predicate are annotated with their semantic roles. Propbank annotation also requires the choice of a sense ID for each predicate, defined in the corresponding frameset file. Jubilee expedites the annotation process by displaying several resources of syntactic and semantic information simultaneously; easy access to each of these resources allows the annotator to quickly absorb and apply the necessary syntactic and semantic information pertinent to each predicate for consistent and efficient annotation. Cornerstone is a user-friendly XML editor, customized to allow frame authors to create and edit frameset files. Both tools have been successfully adapted to many Prop-bank projects; they run platform independently, are light enough to run as X11 applications and support multiple languages such as Arabic, Chinese, English, Hindi and Korean.", "keyphrases": ["propbank", "cornerstone", "semantic role"]} +{"id": "rogati-etal-2003-unsupervised", "title": "Unsupervised Learning of Arabic Stemming Using a Parallel Corpus", "abstract": "This paper presents an unsupervised learning approach to building a non-English (Arabic) stemmer. The stemming model is based on statistical machine translation and it uses an English stemmer and a small (10 K sentences) parallel corpus as its sole training resources. No parallel text is needed after the training phase. Monolingual, unannotated text can be used to further improve the stemmer by allowing it to adapt to a desired domain or genre. Examples and results will be given for Arabic, but the approach is applicable to any language that needs affix removal. Our resource-frugal approach results in 87.5% agreement with a state of the art, proprietary Arabic stemmer built using rules, affix lists, and human annotated text, in addition to an unsupervised component. Task-based evaluation using Arabic information retrieval indicates an improvement of 22-38% in average precision over unstemmed text, and 96% of the performance of the proprietary stemmer above.", "keyphrases": ["arabic", "stemmer", "statistical machine translation"]} +{"id": "blanchard-etal-2018-getting", "title": "Getting the subtext without the text: Scalable multimodal sentiment classification from visual and acoustic modalities", "abstract": "In the last decade, video blogs (vlogs) have become an extremely popular method through which people express sentiment. The ubiquitousness of these videos has increased the importance of multimodal fusion models, which incorporate video and audio features with traditional text features for automatic sentiment detection. Multimodal fusion offers a unique opportunity to build models that learn from the full depth of expression available to human viewers. In the detection of sentiment in these videos, acoustic and video features provide clarity to otherwise ambiguous transcripts. In this paper, we present a multimodal fusion model that exclusively uses high-level video and audio features to analyze spoken sentences for sentiment. We discard traditional transcription features in order to minimize human intervention and to maximize the deployability of our model on at-scale real-world data. We select high-level features for our model that have been successful in non-affect domains in order to test their generalizability in the sentiment detection domain. We train and test our model on the newly released CMU Multimodal Opinion Sentiment and Emotion Intensity (CMU-MOSEI) dataset, obtaining an F1 score of 0.8049 on the validation set and an F1 score of 0.6325 on the held-out challenge test set.", "keyphrases": ["sentiment classification", "modality", "video feature"]} +{"id": "severyn-etal-2013-learning-semantic", "title": "Learning Semantic Textual Similarity with Structural Representations", "abstract": "Measuring semantic textual similarity (STS) is at the cornerstone of many NLP applications. Different from the majority of approaches, where a large number of pairwise similarity features are used to represent a text pair, our model features the following: (i) it directly encodes input texts into relational syntactic structures; (ii) relies on tree kernels to handle feature engineering automatically; (iii) combines both structural and feature vector representations in a single scoring model, i.e., in Support Vector Regression (SVR); and (iv) delivers significant improvement over the best STS systems.", "keyphrases": ["semantic textual similarity", "edition distance", "syntactic dependency"]} +{"id": "snow-etal-2006-effectively", "title": "Effectively Using Syntax for Recognizing False Entailment", "abstract": "Recognizing textual entailment is a challenging problem and a fundamental component of many applications in natural language processing. We present a novel framework for recognizing textual entailment that focuses on the use of syntactic heuristics to recognize false entailment. We give a thorough analysis of our system, which demonstrates state-of-the-art performance on a widely-used test set.", "keyphrases": ["false entailment", "heuristic", "syntactic clue"]} +{"id": "luo-etal-2015-joint", "title": "Joint Entity Recognition and Disambiguation", "abstract": "Extracting named entities in text and linking extracted names to a given knowledge base are fundamental tasks in applications for text understanding. Existing systems typically run a named entity recognition (NER) model to extract entity names \ufb01rst, then run an entity linking model to link extracted names to a knowledge base. NER and linking models are usually trained separately, and the mutual dependency be-tween the two tasks is ignored. We pro-pose JERL, Joint Entity Recognition and Linking, to jointly model NER and linking tasks and capture the mutual dependency between them. It allows the information from each task to improve the performance of the other. To the best of our knowledge, JERL is the \ufb01rst model to jointly optimize NER and linking tasks together completely. In experiments on the CoNLL\u201903/AIDA data set, JERL outperforms state-of-art NER and linking systems, and we \ufb01nd improvements of 0.4% absolute F 1 for NER on CoNLL\u201903, and 0.36% absolute precision@1 for linking on AIDA.", "keyphrases": ["joint entity recognition", "conditional random fields", "lexicon feature", "task-specific resource"]} +{"id": "denero-etal-2006-generative", "title": "Why Generative Phrase Models Underperform Surface Heuristics", "abstract": "We investigate why weights from generative models underperform heuristic estimates in phrase-based machine translation. We first propose a simple generative, phrase-based model and verify that its estimates are inferior to those given by surface statistics. The performance gap stems primarily from the addition of a hidden segmentation variable, which increases the capacity for overfitting during maximum likelihood training with EM. In particular, while word level models benefit greatly from re-estimation, phrase-level models do not: the crucial difference is that distinct word alignments cannot all be correct, while distinct segmentations can. Alternate segmentations rather than alternate alignments compete, resulting in increased deter-minization of the phrase table, decreased generalization, and decreased final BLEU score. We also show that interpolation of the two methods can result in a modest increase in BLEU score.", "keyphrases": ["generative model", "phrase-based model", "probability"]} +{"id": "berg-kirkpatrick-etal-2011-jointly", "title": "Jointly Learning to Extract and Compress", "abstract": "We learn a joint model of sentence extraction and compression for multi-document summarization. Our model scores candidate summaries according to a combined linear model whose features factor over (1) the n-gram types in the summary and (2) the compressions used. We train the model using a margin-based objective whose loss captures end summary quality. Because of the exponentially large set of candidate summaries, we use a cutting-plane algorithm to incrementally detect and add active constraints efficiently. Inference in our model can be cast as an ILP and thereby solved in reasonable time; we also present a fast approximation scheme which achieves similar performance. Our jointly extracted and compressed summaries outperform both unlearned baselines and our learned extraction-only system on both ROUGE and Pyramid, without a drop in judged linguistic quality. We achieve the highest published ROUGE results to date on the TAC 2008 data set.", "keyphrases": ["compress", "joint model", "summarization"]} +{"id": "zubiaga-etal-2016-stance", "title": "Stance Classification in Rumours as a Sequential Task Exploiting the Tree Structure of Social Media Conversations", "abstract": "Rumour stance classification, the task that determines if each tweet in a collection discussing a rumour is supporting, denying, questioning or simply commenting on the rumour, has been attracting substantial interest. Here we introduce a novel approach that makes use of the sequence of transitions observed in tree-structured conversation threads in Twitter. The conversation threads are formed by harvesting users' replies to one another, which results in a nested tree-like structure. Previous work addressing the stance classification task has treated each tweet as a separate unit. Here we analyse tweets by virtue of their position in a sequence and test two sequential classifiers, Linear-Chain CRF and Tree CRF, each of which makes different assumptions about the conversational structure. We experiment with eight Twitter datasets, collected during breaking news, and show that exploiting the sequential structure of Twitter conversations achieves significant improvements over the non-sequential methods. Our work is the first to model Twitter conversations as a tree structure in this manner, introducing a novel way of tackling NLP tasks on Twitter conversations.", "keyphrases": ["twitter conversation", "stance classification", "rumor"]} +{"id": "li-etal-2019-rumor-detection", "title": "Rumor Detection by Exploiting User Credibility Information, Attention and Multi-task Learning", "abstract": "In this study, we propose a new multi-task learning approach for rumor detection and stance classification tasks. This neural network model has a shared layer and two task specific layers. We incorporate the user credibility information into the rumor detection layer, and we also apply attention mechanism in the rumor detection process. The attended information include not only the hidden states in the rumor detection layer, but also the hidden states from the stance detection layer. The experiments on two datasets show that our proposed model outperforms the state-of-the-art rumor detection approaches.", "keyphrases": ["user credibility information", "rumor detection", "auxiliary task"]} +{"id": "shen-etal-2008-new", "title": "A New String-to-Dependency Machine Translation Algorithm with a Target Dependency Language Model", "abstract": "In this paper, we propose a novel string-todependency algorithm for statistical machine translation. With this new framework, we employ a target dependency language model during decoding to exploit long distance word relations, which are unavailable with a traditional n-gram language model. Our experiments show that the string-to-dependency decoder achieves 1.48 point improvement in BLEU and 2.53 point improvement in TER compared to a standard hierarchical string-tostring system on the NIST 04 Chinese-English evaluation set.", "keyphrases": ["dependency language model", "translation quality", "string", "syntax-based system", "target-side"]} +{"id": "zhang-etal-2022-treemix", "title": "TreeMix: Compositional Constituency-based Data Augmentation for Natural Language Understanding", "abstract": "Data augmentation is an effective approach to tackle over-fitting. Many previous works have proposed different data augmentations strategies for NLP, such as noise injection, word replacement, back-translation etc. Though effective, they missed one important characteristic of language\u2013compositionality, meaning of a complex expression is built from its sub-parts. Motivated by this, we propose a compositional data augmentation approach for natural language understanding called TreeMix. Specifically, TreeMix leverages constituency parsing tree to decompose sentences into constituent sub-structures and the Mixup data augmentation technique to recombine them to generate new sentences. Compared with previous approaches, TreeMix introduces greater diversity to the samples generated and encourages models to learn compositionality of NLP data. Extensive experiments on text classification and SCAN demonstrate that TreeMix outperforms current state-of-the-art data augmentation methods.", "keyphrases": ["data augmentation", "natural language understanding", "treemix"]} +{"id": "gardiner-dras-2007-exploring", "title": "Exploring Approaches to Discriminating among Near-Synonyms", "abstract": "Near-synonyms are words that mean approximately the same thing, and which tend to be assigned to the same leaf in ontologies such as WordNet. However, they can differ from each other subtly in both meaning and usage\u2014consider the pair of nearsynonyms frugal and stingy\u2014and therefore choosing the appropriate near-synonym for a given context is not a trivial problem. Initial work by Edmonds (1997) suggested that corpus statistics methods would not be particularly effective, and led to subsequent work adopting methods based on specific lexical resources. In earlier work (Gardiner and Dras, 2007) we discussed the hypothesis that some kind of corpus statistics approach may still be effective in some situations, particularly if the near-synonyms differ in sentiment from each other, and we presented some preliminary confirmation of the truth of this hypothesis. This suggests that problems involving this type of nearsynonym may be particularly amenable to corpus statistics methods. In this paper we investigate whether this result extends to a different corpus statistics method and in addition we analyse the results with respect to a possible confounding factor discussed in the previous work: the skewness of the sets of near synonyms. Our results show that the relationship between success in prediction and the nature of the near-synonyms is method dependent and that skewness is a more significant factor.", "keyphrases": ["near-synonyms", "different corpus", "web"]} +{"id": "erk-pado-2008-structured", "title": "A Structured Vector Space Model for Word Meaning in Context", "abstract": "We address the task of computing vector space representations for the meaning of word occurrences, which can vary widely according to context. This task is a crucial step towards a robust, vector-based compositional account of sentence meaning. We argue that existing models for this task do not take syntactic structure sufficiently into account. \n \nWe present a novel structured vector space model that addresses these issues by incorporating the selectional preferences for words' argument positions. This makes it possible to integrate syntax into the computation of word meaning in context. In addition, the model performs at and above the state of the art for modeling the contextual adequacy of paraphrases.", "keyphrases": ["vector space", "syntactic context", "denotation"]} +{"id": "zhang-vogel-2007-pandora", "title": "PanDoRA: a large-scale two-way statistical machine translation system for hand-held devices", "abstract": "The statistical machine translation (SMT) approach has taken a lead place in the field of Machine Translation for its better translation quality and lower cost in training compared to other approaches. However, due to the high demand of computing resources, an SMT system can not be directly run on hand-held devices. Most existing hand-held translation systems are either interlingua-based, which require non-trivial human efforts to write grammar rules, or using the client/server architecture, which are constrained by the availability of wireless connections. In this paper we present PanDoRA, a two-way phrase-based statistical machine translation system for stand-alone hand-held devices. Powered by special designs such as integerized computation and compact data structure, PanDoRA can translate dialogue speech on off-the-shelf PDAs in real time. PanDoRA uses 64K words vocabulary and millions of phrase pairs for each translation directions. To our knowledge, PanDoRA is the first large-scale SMT system with build-in reordering models running on hand-held devices. We have successfully developed several speech-to-speech translation systems using PanDoRA and our experiments show that PanDoRA's translation quality is comparable to that of the state-of-the-art phrase-based statistical machine translation systems such as Pharaoh and STTK.", "keyphrases": ["two-way", "device", "smt system"]} +{"id": "karamanolakis-etal-2020-cross", "title": "Cross-Lingual Text Classification with Minimal Resources by Transferring a Sparse Teacher", "abstract": "Cross-lingual text classification alleviates the need for manually labeled documents in a target language by leveraging labeled documents from other languages. Existing approaches for transferring supervision across languages require expensive cross-lingual resources, such as parallel corpora, while less expensive cross-lingual representation learning approaches train classifiers without target labeled documents. In this work, we propose a cross-lingual teacher-student method, CLTS, that generates \u201cweak\u201d supervision in the target language using minimal cross-lingual resources, in the form of a small number of word translations. Given a limited translation budget, CLTS extracts and transfers only the most important task-specific seed words across languages and initializes a teacher classifier based on the translated seed words. Then, CLTS iteratively trains a more powerful student that also exploits the context of the seed words in unlabeled target documents and outperforms the teacher. CLTS is simple and surprisingly effective in 18 diverse languages: by transferring just 20 seed words, even a bag-of-words logistic regression student outperforms state-of-the-art cross-lingual methods (e.g., based on multilingual BERT). Moreover, CLTS can accommodate any type of student classifier: leveraging a monolingual BERT student leads to further improvements and outperforms even more expensive approaches by up to 12% in accuracy. Finally, CLTS addresses emerging tasks in low-resource languages using just a small number of word translations.", "keyphrases": ["teacher", "seed word", "cross-lingual text classification"]} +{"id": "li-ji-2014-incremental", "title": "Incremental Joint Extraction of Entity Mentions and Relations", "abstract": "We present an incremental joint framework to simultaneously extract entity mentions and relations using structured perceptron with efficient beam-search. A segment-based decoder based on the idea of semi-Markov chain is adopted to the new framework as opposed to traditional token-based tagging. In addition, by virtue of the inexact search, we developed a number of new and effective global features as soft constraints to capture the interdependency among entity mentions and relations. Experiments on Automatic Content Extraction (ACE) 1 corpora demonstrate that our joint model significantly outperforms a strong pipelined baseline, which attains better performance than the best-reported end-to-end system.", "keyphrases": ["joint extraction", "entity mention", "feature engineering", "subtask", "end-to-end relation extraction"]} +{"id": "huang-papineni-2007-hierarchical", "title": "Hierarchical System Combination for Machine Translation", "abstract": "Given multiple translations of the same source sentence, how to combine them to produce a translation that is better than any single system output? We propose a hierarchical system combination framework for machine translation. This framework integrates multiple MT systems\u2019 output at the word-, phrase- and sentence- levels. By boosting common word and phrase translation pairs, pruning unused phrases, and exploring decoding paths adopted by other MT systems, this framework achieves better translation quality with much less redecoding time. The full sentence translation hypotheses from multiple systems are additionally selected based on N-gram language models trained on word/word-POS mixed stream, which further improves the translation quality. We consistently observed significant improvements on several test sets in multiple languages covering different genres.", "keyphrases": ["machine translation", "system output", "path"]} +{"id": "barker-2010-cosubstitution", "title": "Cosubstitution, Derivational Locality, and Quantifier Scope", "abstract": "Quantifier scope challenges the mantra of Tree Adjoining Grammar (TAG) that all syntactic dependencies are local once syntactic recursion has been factored out. The reason is that on current TAG analyses, a quantifier and the furthest reaches of its scope domain are in general not part of any (unicomponent) elementary tree. In this paper, I consider a novel basic TAG operation called COSUBSTITUTION. In normal substitution, the root of one tree (the argument) replaces a matching non-terminal on the frontier of another tree (the functor). In cosubstitution, the syntactic result is the same, leaving weak and strong generative capacity unchanged, but the derivational and semantic roles are reversed: the embedded subtree is viewed as the functor, and the embedding matrix is viewed as its semantic argument, i.e., as its nuclear scope. On this view, a quantifier taking scope amounts to entering a derivation at the exact moment that its nuclear scope has been constructed. Thus the relationship of a quantifier and its scope is constrained by DERIVATIONAL LOCALITY rather than by elementary-tree locality.", "keyphrases": ["derivational locality", "quantifier scope", "substitution", "cosubstitution"]} +{"id": "ribeiro-etal-2018-local", "title": "Local String Transduction as Sequence Labeling", "abstract": "We show that the general problem of string transduction can be reduced to the problem of sequence labeling. While character deletion and insertions are allowed in string transduction, they do not exist in sequence labeling. We show how to overcome this difference. Our approach can be used with any sequence labeling algorithm and it works best for problems in which string transduction imposes a strong notion of locality (no long range dependencies). We experiment with spelling correction for social media, OCR correction, and morphological inflection, and we see that it behaves better than seq2seq models and yields state-of-the-art results in several cases.", "keyphrases": ["string transduction", "general problem", "character deletion"]} +{"id": "mi-huang-2008-forest", "title": "Forest-based Translation Rule Extraction", "abstract": "Translation rule extraction is a fundamental problem in machine translation, especially for linguistically syntax-based systems that need parse trees from either or both sides of the bi-text. The current dominant practice only uses 1-best trees, which adversely affects the rule set quality due to parsing errors. So we propose a novel approach which extracts rules from a packed forest that compactly encodes exponentially many parses. Experiments show that this method improves translation quality by over 1 BLEU point on a state-of-the-art tree-to-string system, and is 0.5 points better than (and twice as fast as) extracting on 30-best parses. When combined with our previous work on forest-based decoding, it achieves a 2.5 BLEU points improvement over the base-line, and even outperforms the hierarchical system of Hiero by 0.7 points.", "keyphrases": ["translation rule extraction", "forest", "decoding", "tree-to-string model"]} +{"id": "paul-etal-2009-importance", "title": "On the Importance of Pivot Language Selection for Statistical Machine Translation", "abstract": "Recent research on multilingual statistical machine translation focuses on the usage of pivot languages in order to overcome resource limitations for certain language pairs. Due to the richness of available language resources, English is in general the pivot language of choice. In this paper, we investigate the appropriateness of languages other than English as pivot languages. Experimental results using state-of-the-art statistical machine translation techniques to translate between twelve languages revealed that the translation quality of 61 out of 110 language pairs improved when a non-English pivot language was chosen.", "keyphrases": ["pivot language", "statistical machine translation", "recent research"]} +{"id": "minkov-etal-2007-generating", "title": "Generating Complex Morphology for Machine Translation", "abstract": "We present a novel method for predicting inflected word forms for generating morphologically rich languages in machine translation. We utilize a rich set of syntactic and morphological knowledge sources from both source and target sentences in a probabilistic model, and evaluate their contribution in generating Russian and Arabic sentences. Our results show that the proposed model substantially outperforms the commonly used baseline of a trigram target language model; in particular, the use of morphological and syntactic features leads to large gains in prediction accuracy. We also show that the proposed method is effective with a relatively small amount of data.", "keyphrases": ["machine translation", "inflection", "arabic sentence", "post-processing step", "morphological generation"]} +{"id": "bruni-etal-2011-distributional", "title": "Distributional semantics from text and images", "abstract": "We present a distributional semantic model combining text- and image-based features. We evaluate this multimodal semantic model on simulating similarity judgments, concept clustering and the BLESS benchmark. When integrated with the same core text-based model, image-based features are at least as good as further text-based features, and they capture different qualitative aspects of the tasks, suggesting that the two sources of information are complementary.", "keyphrases": ["image", "semantic model", "distributional approach"]} +{"id": "finkel-manning-2009-nested", "title": "Nested Named Entity Recognition", "abstract": "Many named entities contain other named entities inside them. Despite this fact, the field of named entity recognition has almost entirely ignored nested named entity recognition, but due to technological, rather than ideological reasons. In this paper, we present a new technique for recognizing nested named entities, by using a discriminative constituency parser. To train the model, we transform each sentence into a tree, with constituents for each named entity (and no other syntactic structure). We present results on both newspaper and biomedical corpora which contain nested named entities. In three out of four sets of experiments, our model outperforms a standard semi-CRF on the more traditional top-level entities. At the same time, we improve the overall F-score by up to 30% over the flat model, which is unable to recover any nested entities.", "keyphrases": ["named entity recognition", "constituency tree", "node", "parsing method", "crf-based constituency parser"]} +{"id": "sahlgren-lenci-2016-effects", "title": "The Effects of Data Size and Frequency Range on Distributional Semantic Models", "abstract": "This paper investigates the effects of data size and frequency range on distributional semantic models. We compare the performance of a number of representative models for several test settings over data of varying sizes, and over test items of various frequency. Our results show that neural network-based models underperform when the data is small, and that the most reliable model over data of varying sizes and frequency ranges is the inverted factorized model.", "keyphrases": ["data size", "frequency range", "distributional semantic model"]} +{"id": "blunsom-osborne-2008-probabilistic", "title": "Probabilistic Inference for Machine Translation", "abstract": "We advance the state-of-the-art for discriminatively trained machine translation systems by presenting novel probabilistic inference and search methods for synchronous grammars. By approximating the intractable space of all candidate translations produced by intersecting an ngram language model with a synchronous grammar, we are able to train and decode models incorporating millions of sparse, heterogeneous features. Further, we demonstrate the power of the discriminative training paradigm by extracting structured syntactic features, and achieving increases in translation performance.", "keyphrases": ["synchronous grammar", "probabilistic inference", "derivation"]} +{"id": "li-etal-2019-understanding", "title": "Understanding Data Augmentation in Neural Machine Translation: Two Perspectives towards Generalization", "abstract": "Many Data Augmentation (DA) methods have been proposed for neural machine translation. Existing works measure the superiority of DA methods in terms of their performance on a specific test set, but we find that some DA methods do not exhibit consistent improvements across translation tasks. Based on the observation, this paper makes an initial attempt to answer a fundamental question: what benefits, which are consistent across different methods and tasks, does DA in general obtain? Inspired by recent theoretic advances in deep learning, the paper understands DA from two perspectives towards the generalization ability of a model: input sensitivity and prediction margin, which are defined independent of specific test set thereby may lead to findings with relatively low variance. Extensive experiments show that relatively consistent benefits across five DA methods and four translation tasks are achieved regarding both perspectives.", "keyphrases": ["data augmentation", "neural machine translation", "prediction margin"]} +{"id": "zhang-etal-2016-mgnc", "title": "MGNC-CNN: A Simple Approach to Exploiting Multiple Word Embeddings for Sentence Classification", "abstract": "We introduce a novel, simple convolution neural network (CNN) architecture - multi-group norm constraint CNN (MGNC-CNN) that capitalizes on multiple sets of word embeddings for sentence classification. MGNC-CNN extracts features from input embedding sets independently and then joins these at the penultimate layer in the network to form a final feature vector. We then adopt a group regularization strategy that differentially penalizes weights associated with the subcomponents generated from the respective embedding sets. This model is much simpler than comparable alternative architectures and requires substantially less training time. Furthermore, it is flexible in that it does not require input word embeddings to be of the same dimensionality. We show that MGNC-CNN consistently outperforms baseline models.", "keyphrases": ["sentence classification", "mgnc-cnn", "convolutional layer"]} +{"id": "williams-2012-extracting", "title": "Extracting fine-grained durations for verbs from Twitter", "abstract": "This paper presents recent work on a new method to automatically extract finegrained duration information for common verbs using a large corpus of Twitter tweets. Regular expressions were used to extract verbs and durations from each tweet in a corpus of more than 14 million tweets with 90.38% precision covering 486 verb lemmas. Descriptive statistics for each verb lemma were found as well as the most typical fine-grained duration measure. Mean durations were compared with previous work by Gusev et al. (2011) and it was found that there is a small positive correlation.", "keyphrases": ["duration", "twitter", "verb lemmas"]} +{"id": "ballesteros-etal-2015-improved", "title": "Improved Transition-based Parsing by Modeling Characters instead of Words with LSTMs", "abstract": "We present extensions to a continuousstate dependency parsing method that makes it applicable to morphologically rich languages. Starting with a highperformance transition-based parser that uses long short-term memory (LSTM) recurrent neural networks to learn representations of the parser state, we replace lookup-based word representations with representations constructed from the orthographic representations of the words, also using LSTMs. This allows statistical sharing across word forms that are similar on the surface. Experiments for morphologically rich languages show that the parsing model benefits from incorporating the character-based encodings of words.", "keyphrases": ["dependency parsing", "pos tag", "character-based representation", "bidirectional lstm"]} +{"id": "zhou-etal-2020-hierarchy", "title": "Hierarchy-Aware Global Model for Hierarchical Text Classification", "abstract": "Hierarchical text classification is an essential yet challenging subtask of multi-label text classification with a taxonomic hierarchy. Existing methods have difficulties in modeling the hierarchical label structure in a global view. Furthermore, they cannot make full use of the mutual interactions between the text feature space and the label space. In this paper, we formulate the hierarchy as a directed graph and introduce hierarchy-aware structure encoders for modeling label dependencies. Based on the hierarchy encoder, we propose a novel end-to-end hierarchy-aware global model (HiAGM) with two variants. A multi-label attention variant (HiAGM-LA) learns hierarchy-aware label embeddings through the hierarchy encoder and conducts inductive fusion of label-aware text features. A text feature propagation model (HiAGM-TP) is proposed as the deductive variant that directly feeds text features into hierarchy encoders. Compared with previous works, both HiAGM-LA and HiAGM-TP achieve significant and consistent improvements on three benchmark datasets.", "keyphrases": ["hierarchical text classification", "hierarchy-aware global model", "gcn"]} +{"id": "naseem-etal-2010-using", "title": "Using Universal Linguistic Knowledge to Guide Grammar Induction", "abstract": "We present an approach to grammar induction that utilizes syntactic universals to improve dependency parsing across a range of languages. Our method uses a single set of manually-specified language-independent rules that identify syntactic dependencies between pairs of syntactic categories that commonly occur across languages. During inference of the probabilistic model, we use posterior expectation constraints to require that a minimum proportion of the dependencies we infer be instances of these rules. We also automatically refine the syntactic categories given in our coarsely tagged input. Across six languages our approach outperforms state-of-the-art unsupervised methods by a significant margin.", "keyphrases": ["grammar induction", "noun", "other work"]} +{"id": "ammar-etal-2016-many", "title": "Many Languages, One Parser", "abstract": "We train one multilingual model for dependency parsing and use it to parse sentences in several languages. The parsing model uses (i) multilingual word clusters and embeddings; (ii) token-level language information; and (iii) language-specific features (fine-grained POS tags). This input representation enables the parser not only to parse effectively in multiple languages, but also to generalize across languages based on linguistic universals and typological similarities, making it more effective to learn from limited annotations. Our parser's performance compares favorably to strong baselines in a range of data scenarios, including when the target language has a large treebank, a small treebank, or no treebank for training.", "keyphrases": ["dependency parsing", "pos tag", "multiple language", "word embedding", "single model"]} +{"id": "chen-etal-2020-recall", "title": "Recall and Learn: Fine-tuning Deep Pretrained Language Models with Less Forgetting", "abstract": "Deep pretrained language models have achieved great success in the way of pretraining first and then fine-tuning. But such a sequential transfer learning paradigm often confronts the catastrophic forgetting problem and leads to sub-optimal performance. To fine-tune with less forgetting, we propose a recall and learn mechanism, which adopts the idea of multi-task learning and jointly learns pretraining tasks and downstream tasks. Specifically, we introduce a Pretraining Simulation mechanism to recall the knowledge from pretraining tasks without data, and an Objective Shifting mechanism to focus the learning on downstream tasks gradually. Experiments show that our method achieves state-of-the-art performance on the GLUE benchmark. Our method also enables BERT-base to achieve better average performance than directly fine-tuning of BERT-large. Further, we provide the open-source RecAdam optimizer, which integrates the proposed mechanisms into Adam optimizer, to facility the NLP community.", "keyphrases": ["fine-tuning", "language model", "forgetting", "recadam", "recall"]} +{"id": "borg-etal-2009-evolutionary", "title": "Evolutionary Algorithms for Definition Extraction", "abstract": "Books and other text-based learning material contain implicit information which can aid the learner but which usually can only be accessed through a semantic analysis of the text. Definitions of new concepts appearing in the text are one such instance. If extracted and presented to the learner in form of a glossary, they can provide an excellent reference for the study of the main text. One way of extracting definitions is by reading through the text and annotating definitions manually --- a tedious and boring job. In this paper, we explore the use of machine learning to extract definitions from nontechnical texts, reducing human expert input to a minimum. We report on experiments we have conducted on the use of genetic programming to learn the typical linguistic forms of definitions and a genetic algorithm to learn the relative importance of these forms. Results are very positive, showing the feasibility of exploring further the use of these techniques in definition extraction. The genetic program is able to learn similar rules derived by a human linguistic expert, and the genetic algorithm is able to rank candidate definitions in an order of confidence.", "keyphrases": ["definition extraction", "programming", "linguistic form", "genetic algorithm", "weight"]} +{"id": "joanis-etal-2020-nunavut", "title": "The Nunavut Hansard Inuktitut\u2013English Parallel Corpus 3.0 with Preliminary Machine Translation Results", "abstract": "The Inuktitut language, a member of the Inuit-Yupik-Unangan language family, is spoken across Arctic Canada and noted for its morphological complexity. It is an official language of two territories, Nunavut and the Northwest Territories, and has recognition in additional regions. This paper describes a newly released sentence-aligned Inuktitut\u2013English corpus based on the proceedings of the Legislative Assembly of Nunavut, covering sessions from April 1999 to June 2017. With approximately 1.3 million aligned sentence pairs, this is, to our knowledge, the largest parallel corpus of a polysynthetic language or an Indigenous language of the Americas released to date. The paper describes the alignment methodology used, the evaluation of the alignments, and preliminary experiments on statistical and neural machine translation (SMT and NMT) between Inuktitut and English, in both directions.", "keyphrases": ["parallel corpus", "inuktitut", "official language"]} +{"id": "koo-collins-2010-efficient", "title": "Efficient Third-Order Dependency Parsers", "abstract": "We present algorithms for higher-order dependency parsing that are \"third-order\" in the sense that they can evaluate substructures containing three dependencies, and \"efficient\" in the sense that they require only O(n4) time. Importantly, our new parsers can utilize both sibling-style and grandchild-style interactions. We evaluate our parsers on the Penn Treebank and Prague Dependency Treebank, achieving unlabeled attachment scores of 93.04% and 87.38%, respectively.", "keyphrases": ["third-order", "dependency parsing", "projective tree", "graph-based model", "global feature"]} +{"id": "cimiano-wenderoth-2007-automatic", "title": "Automatic Acquisition of Ranked Qualia Structures from the Web", "abstract": "This paper presents an approach for the automatic acquisition of qualia structures for nouns from the Web and thus opens the possibility to explore the impact of qualia structures for natural language processing at a larger scale. The approach builds on earlier work based on the idea of matching specific lexico-syntactic patterns conveying a certain semantic relation on the World Wide Web using standard search engines. In our approach, the qualia elements are actually ranked for each qualia role with respect to some measure. The specific contribution of the paper lies in the extensive analysis and quantitative comparison of different measures for ranking the qualia elements. Further, for the first time, we present a quantitative evaluation of such an approach for learning qualia structures with respect to a handcrafted gold standard.", "keyphrases": ["acquisition", "qualia structure", "web"]} +{"id": "moldovan-etal-2004-models", "title": "Models for the Semantic Classification of Noun Phrases", "abstract": "This paper presents an approach for detecting semantic relations in noun phrases. A learning algorithm, called semantic scattering, is used to automatically label complex nominals, genitives and adjectival noun phrases with the corresponding semantic relation.", "keyphrases": ["noun phrase", "semantic scattering", "complex nominal", "scheme", "inventory"]} +{"id": "zhang-etal-2016-towards", "title": "Towards Constructing Sports News from Live Text Commentary", "abstract": "In this paper, we investigate the possibility to automatically generate sports news from live text commentary scripts. As a preliminary study, we treat this task as a special kind of document summarization based on sentence extraction. We formulate the task in a supervised learning to rank framework, utilizing both traditional sentence features for generic document summarization and novelly designed task-speci\ufb01c features. To tackle the problem of local redundancy, we also propose a probabilistic sentence selection algorithm. Experiments on our collected data from football live commentary scripts and corresponding sports news demonstrate the feasibility of this task. Evaluation results show that our methods are indeed appropriate for this task, outperforming several baseline methods in different aspects.", "keyphrases": ["sport news", "text commentary script", "document summarization"]} +{"id": "chen-etal-2018-effects", "title": "Effects of Stimulus Duration and Vowel Quality in Tone Perception by English Musicians and Non-musicians", "abstract": "The link between music and language has been a subject of great interest, and evidence suggesting a connection between musical abilities and prosodic processing skills in language is growing. Acoustic fundamental frequency (F0), perceived as pitch, differentiates notes in music and word meaning in lexical tone languages. This study examines categorical perception of pitch stimuli among 14 English musicians and 15 English non-musicians, both groups having no exposure to tonal languages. The stimuli consist of continua of falling and rising F0 contours produced on high and low vowels with 9 different durations. The results revealed that musicians were more sensitive to variation in stimulus duration than nonmusicians were, and music experience enhanced the sharpness of category boundaries. Significant main effects of vowel quality and pitch directions as well as two-way interactions between vowel and pitch direction, vowel and duration, group and duration, and pitch direction and duration on identification rate were also found. Formulae for minimum duration required for English musicians and non-English musicians to perceive rising and falling F0 were derived, revealing that musicians require less time to perceive a pitch fall and rise if the change is less than 12semitones.", "keyphrases": ["stimulus duration", "vowel quality", "non-musician"]} +{"id": "chakravarthi-etal-2020-sentiment", "title": "A Sentiment Analysis Dataset for Code-Mixed Malayalam-English", "abstract": "There is an increasing demand for sentiment analysis of text from social media which are mostly code-mixed. Systems trained on monolingual data fail for code-mixed data due to the complexity of mixing at different levels of the text. However, very few resources are available for code-mixed data to create models specific for this data. Although much research in multilingual and cross-lingual sentiment analysis has used semi-supervised or unsupervised methods, supervised methods still performs better. Only a few datasets for popular languages such as English-Spanish, English-Hindi, and English-Chinese are available. There are no resources available for Malayalam-English code-mixed data. This paper presents a new gold standard corpus for sentiment analysis of code-mixed text in Malayalam-English annotated by voluntary annotators. This gold standard corpus obtained a Krippendorff's alpha above 0.8 for the dataset. We use this new corpus to provide the benchmark for sentiment analysis in Malayalam-English code-mixed texts.", "keyphrases": ["sentiment analysis", "code-mixed text", "non-native script", "malayalam language"]} +{"id": "lu-etal-2019-distilling", "title": "Distilling Discrimination and Generalization Knowledge for Event Detection via Delta-Representation Learning", "abstract": "Event detection systems rely on discrimination knowledge to distinguish ambiguous trigger words and generalization knowledge to detect unseen/sparse trigger words. Current neural event detection approaches focus on trigger-centric representations, which work well on distilling discrimination knowledge, but poorly on learning generalization knowledge. To address this problem, this paper proposes a Delta-learning approach to distill discrimination and generalization knowledge by effectively decoupling, incrementally learning and adaptively fusing event representation. Experiments show that our method significantly outperforms previous approaches on unseen/sparse trigger words, and achieves state-of-the-art performance on both ACE2005 and KBP2017 datasets.", "keyphrases": ["discrimination", "generalization knowledge", "event detection"]} +{"id": "rieger-etal-2021-rollinglda-update", "title": "RollingLDA: An Update Algorithm of Latent Dirichlet Allocation to Construct Consistent Time Series from Textual Data", "abstract": "We propose a rolling version of the Latent Dirichlet Allocation, called RollingLDA. By a sequential approach, it enables the construction of LDA-based time series of topics that are consistent with previous states of LDA models. After an initial modeling, updates can be computed efficiently, allowing for real-time monitoring and detection of events or structural breaks. For this purpose, we propose suitable similarity measures for topics and provide simulation evidence of superiority over other commonly used approaches. The adequacy of the resulting method is illustrated by an application to an example corpus. In particular, we compute the similarity of sequentially obtained topic and word distributions over consecutive time periods. For a representative example corpus consisting of The New York Times articles from 1980 to 2020, we analyze the effect of several tuning parameter choices and we run the RollingLDA method on the full dataset of approximately 4 million articles to demonstrate its feasibility.", "keyphrases": ["latent dirichlet allocation", "time series", "rollinglda"]} +{"id": "kang-etal-2017-detecting", "title": "Detecting and Explaining Causes From Text For a Time Series Event", "abstract": "Explaining underlying causes or effects about events is a challenging but valuable task. We define a novel problem of generating explanations of a time series event by (1) searching cause and effect relationships of the time series with textual data and (2) constructing a connecting chain between them to generate an explanation. To detect causal features from text, we propose a novel method based on the Granger causality of time series between features extracted from text such as N-grams, topics, sentiments, and their composition. The generation of the sequence of causal entities requires a commonsense causative knowledge base with efficient reasoning. To ensure good interpretability and appropriate lexical usage we combine symbolic and neural representations, using a neural reasoning algorithm trained on commonsense causal tuples to predict the next cause step. Our quantitative and human analysis show empirical evidence that our method successfully extracts meaningful causality relationships between time series with textual features and generates appropriate explanation between them.", "keyphrases": ["time series event", "granger causality", "news", "stock price"]} +{"id": "fang-etal-2019-implicit", "title": "Implicit Deep Latent Variable Models for Text Generation", "abstract": "Deep latent variable models (LVM) such as variational auto-encoder (VAE) have recently played an important role in text generation. One key factor is the exploitation of smooth latent structures to guide the generation. However, the representation power of VAEs is limited due to two reasons: (1) the Gaussian assumption is often made on the variational posteriors; and meanwhile (2) a notorious \u201cposterior collapse\u201d issue occurs. In this paper, we advocate sample-based representations of variational distributions for natural language, leading to implicit latent features, which can provide flexible representation power compared with Gaussian-based posteriors. We further develop an LVM to directly match the aggregated posterior to the prior. It can be viewed as a natural extension of VAEs with a regularization of maximizing mutual information, mitigating the \u201cposterior collapse\u201d issue. We demonstrate the effectiveness and versatility of our models in various text generation scenarios, including language modeling, unaligned style transfer, and dialog response generation. The source code to reproduce our experimental results is available on GitHub.", "keyphrases": ["text generation", "posterior", "sample-based representation", "implicit latent feature"]} +{"id": "pasupat-liang-2015-compositional", "title": "Compositional Semantic Parsing on Semi-Structured Tables", "abstract": "Two important aspects of semantic parsing for question answering are the breadth of the knowledge source and the depth of logical compositionality. While existing work trades off one aspect for another, this paper simultaneously makes progress on both fronts through a new task: answering complex questions on semi-structured tables using question-answer pairs as supervision. The central challenge arises from two compounding factors: the broader domain results in an open-ended set of relations, and the deeper compositionality results in a combinatorial explosion in the space of logical forms. We propose a logical-form driven parsing algorithm guided by strong typing constraints and show that it obtains significant improvements over natural baselines. For evaluation, we created a new dataset of 22,033 complex questions on Wikipedia tables, which is made publicly available.", "keyphrases": ["semantic parsing", "table", "wikipedia", "natural language question"]} +{"id": "das-petrov-2011-unsupervised", "title": "Unsupervised Part-of-Speech Tagging with Bilingual Graph-Based Projections", "abstract": "We describe a novel approach for inducing unsupervised part-of-speech taggers for languages that have no labeled training data, but have translated text in a resource-rich language. Our method does not assume any knowledge about the target language (in particular no tagging dictionary is assumed), making it applicable to a wide array of resource-poor languages. We use graph-based label propagation for cross-lingual knowledge transfer and use the projected labels as features in an unsupervised model (Berg-Kirkpatrick et al., 2010). Across eight European languages, our approach results in an average absolute improvement of 10.4% over a state-of-the-art baseline, and 16.7% over vanilla hidden Markov models induced with the Expectation Maximization algorithm.", "keyphrases": ["resource-rich language", "label propagation", "pos tag", "annotation projection", "parallel corpus"]} +{"id": "de-cao-etal-2008-combining", "title": "Combining Word Sense and Usage for Modeling Frame Semantics", "abstract": "Models of lexical semantics are core paradigms in most NLP applications, such as dialogue, information extraction and document understanding. Unfortunately, the coverage of currently available resources (e.g. FrameNet) is still unsatisfactory. This paper presents a largely applicable approach for extending frame semantic resources, combining word sense information derived from WordNet and corpus-based distributional information. We report a large scale evaluation over the English FrameNet, and results on extending FrameNet to the Italian language, as the basis of the development of a full FrameNet for Italian.", "keyphrases": ["frame", "wordnet", "italian"]} +{"id": "sokolova-bobicev-2011-sentiments", "title": "Sentiments and Opinions in Health-related Web messages", "abstract": "In this work, we analyze sentiments and opinions expressed in user-written Web messages. The messages discuss healthrelated topics: medications, treatment, illness and cure, etc. Recognition of sentiments and opinions is a challenging task for humans as well as an automated text analysis. In this work, we apply both the approaches. The paper presents the annotation model, discusses characteristics of subjectivity annotations in health-related messages, and reports the results of the annotation agreement. For external evaluation of the labeling results, we apply Machine Learning methods on the annotated data and present the obtained results.", "keyphrases": ["opinion", "web message", "medication"]} +{"id": "manuvinakurike-etal-2017-using", "title": "Using Reinforcement Learning to Model Incrementality in a Fast-Paced Dialogue Game", "abstract": "We apply Reinforcement Learning (RL) to the problem of incremental dialogue policy learning in the context of a fast-paced dialogue game. We compare the policy learned by RL with a high-performance baseline policy which has been shown to perform very efficiently (nearly as well as humans) in this dialogue game. The RL policy outperforms the baseline policy in offline simulations (based on real user data). We provide a detailed comparison of the RL policy and the baseline policy, including information about how much effort and time it took to develop each one of them. We also highlight the cases where the RL policy performs better, and show that understanding the RL policy can provide valuable insights which can inform the creation of an even better rule-based policy.", "keyphrases": ["reinforcement learning", "fast-paced dialogue game", "dialogue policy learning"]} +{"id": "liang-etal-2021-super", "title": "Super Tickets in Pre-Trained Language Models: From Model Compression to Improving Generalization", "abstract": "The Lottery Ticket Hypothesis suggests that an over-parametrized network consists of \u201dlottery tickets\u201d, and training a certain collection of them (i.e., a subnetwork) can match the performance of the full model. In this paper, we study such a collection of tickets, which is referred to as \u201dwinning tickets\u201d, in extremely over-parametrized models, e.g., pre-trained language models. We observe that at certain compression ratios, the generalization performance of the winning tickets can not only match but also exceed that of the full model. In particular, we observe a phase transition phenomenon: As the compression ratio increases, generalization performance of the winning tickets first improves then deteriorates after a certain threshold. We refer to the tickets on the threshold as \u201dsuper tickets\u201d. We further show that the phase transition is task and model dependent \u2014 as the model size becomes larger and the training data set becomes smaller, the transition becomes more pronounced. Our experiments on the GLUE benchmark show that the super tickets improve single task fine-tuning by 0.9 points on BERT-base and 1.0 points on BERT-large, in terms of task-average score. We also demonstrate that adaptively sharing the super tickets across tasks benefits multi-task learning.", "keyphrases": ["generalization", "model size", "super ticket"]} +{"id": "poria-etal-2016-deeper", "title": "A Deeper Look into Sarcastic Tweets Using Deep Convolutional Neural Networks", "abstract": "Sarcasm detection is a key task for many natural language processing tasks. In sentiment analysis, for example, sarcasm can flip the polarity of an \u201capparently positive\u201d sentence and, hence, negatively affect polarity detection performance. To date, most approaches to sarcasm detection have treated the task primarily as a text categorization problem. Sarcasm, however, can be expressed in very subtle ways and requires a deeper understanding of natural language that standard text categorization techniques cannot grasp. In this work, we develop models based on a pre-trained convolutional neural network for extracting sentiment, emotion and personality features for sarcasm detection. Such features, along with the network's baseline features, allow the proposed models to outperform the state of the art on benchmark datasets. We also address the often ignored generalizability issue of classifying data that have not been seen by the models at learning phase.", "keyphrases": ["convolutional neural network", "sarcasm", "emotion"]} +{"id": "cano-basave-etal-2014-automatic", "title": "Automatic Labelling of Topic Models Learned from Twitter by Summarisation", "abstract": "Latent topics derived by topic models such as Latent Dirichlet Allocation (LDA) are the result of hidden thematic structures which provide further insights into the data. The automatic labelling of such topics derived from social media poses however new challenges since topics may characterise novel events happening in the real world. Existing automatic topic labelling approaches which depend on external knowledge sources become less applicable here since relevant articles/concepts of the extracted topics may not exist in external sources. In this paper we propose to address the problem of automatic labelling of latent topics learned from Twitter as a summarisation problem. We introduce a framework which apply summarisation algorithms to generate topic labels. These algorithms are independent of external sources and only rely on the identification of dominant terms in documents related to the latent topic. We compare the efficiency of existing state of the art summarisation algorithms. Our results suggest that summarisation algorithms generate better topic labels which capture event-related context compared to the top-n terms returned by LDA.", "keyphrases": ["twitter", "latent topic", "automatic labelling"]} +{"id": "lin-etal-2017-reasoning", "title": "Reasoning with Heterogeneous Knowledge for Commonsense Machine Comprehension", "abstract": "Reasoning with commonsense knowledge is critical for natural language understanding. Traditional methods for commonsense machine comprehension mostly only focus on one specific kind of knowledge, neglecting the fact that commonsense reasoning requires simultaneously considering different kinds of commonsense knowledge. In this paper, we propose a multi-knowledge reasoning method, which can exploit heterogeneous knowledge for commonsense machine comprehension. Specifically, we first mine different kinds of knowledge (including event narrative knowledge, entity semantic knowledge and sentiment coherent knowledge) and encode them as inference rules with costs. Then we propose a multi-knowledge reasoning model, which selects inference rules for a specific reasoning context using attention mechanism, and reasons by summarizing all valid inference rules. Experiments on RocStories show that our method outperforms traditional models significantly.", "keyphrases": ["heterogeneous knowledge", "different kind", "inference rule"]} +{"id": "list-2012-lexstat", "title": "LexStat: Automatic Detection of Cognates in Multilingual Wordlists", "abstract": "In this paper, a new method for automatic cognate detection in multilingual wordlists will be presented. The main idea behind the method is to combine different approaches to sequence comparison in historical linguistics and evolutionary biology into a new framework which closely models the most important aspects of the comparative method. The method is implemented as a Python program and provides a convenient tool which is publicly available, easily applicable, and open for further testing and improvement. Testing the method on a large gold standard of IPA-encoded wordlists showed that its results are highly consistent and outperform previous methods.", "keyphrases": ["automatic detection", "cognate detection", "lexstat"]} +{"id": "mcdonald-etal-2013-universal", "title": "Universal Dependency Annotation for Multilingual Parsing", "abstract": "We present a new collection of treebanks with homogeneous syntactic dependency annotation for six languages: German, English, Swedish, Spanish, French and Korean. To show the usefulness of such a resource, we present a case study of crosslingual transfer parsing with more reliable evaluation than has been possible before. This \u2018universal\u2019 treebank is made freely available in order to facilitate research on multilingual dependency parsing. 1", "keyphrases": ["swedish", "universal dependency annotation", "schema", "project", "different language"]} +{"id": "barkarson-steingrimsson-2019-compiling", "title": "Compiling and Filtering ParIce: An English-Icelandic Parallel Corpus", "abstract": "We present ParIce, a new English-Icelandic parallel corpus. This is the first parallel corpus built for the purposes of language technology development and research for Icelandic, although some Icelandic texts can be found in various other multilingual parallel corpora. We map out which Icelandic texts are available for these purposes, collect aligned data and align other bilingual texts we acquired. We describe the alignment process and how we filter the data to weed out noise and bad alignments. In total we collected 43 million Icelandic words in 4.3 million aligned segment pairs, but after filtering, our corpus includes 38.8 million Icelandic words in 3.5 million segment pairs. We estimate that approximately 5% of the corpus data is noise or faulty alignments while more than 50% of the segments we deleted were faulty. We estimate that our filtering process reduced the number of faulty segments in the corpus by more than 60% while only reducing the number of good alignments by approximately 8%.", "keyphrases": ["parice", "english-icelandic parallel corpus", "language technology development"]} +{"id": "wu-etal-2009-domain", "title": "Domain adaptive bootstrapping for named entity recognition", "abstract": "Bootstrapping is the process of improving the performance of a trained classifier by iteratively adding data that is labeled by the classifier itself to the training set, and retraining the classifier. It is often used in situations where labeled training data is scarce but unlabeled data is abundant. In this paper, we consider the problem of domain adaptation: the situation where training data may not be scarce, but belongs to a different domain from the target application domain. As the distribution of unlabeled data is different from the training data, standard bootstrapping often has difficulty selecting informative data to add to the training set. We propose an effective domain adaptive bootstrapping algorithm that selects unlabeled target domain data that are informative about the target domain and easy to automatically label correctly. We call these instances bridges, as they are used to bridge the source domain to the target domain. We show that the method outperforms supervised, transductive and bootstrapping algorithms on the named entity recognition task.", "keyphrases": ["bootstrapping", "entity recognition", "unlabeled data", "domain adaptation"]} +{"id": "nie-etal-2020-adversarial", "title": "Adversarial NLI: A New Benchmark for Natural Language Understanding", "abstract": "We introduce a new large-scale NLI benchmark dataset, collected via an iterative, adversarial human-and-model-in-the-loop procedure. We show that training models on this new dataset leads to state-of-the-art performance on a variety of popular NLI benchmarks, while posing a more difficult challenge with its new test set. Our analysis sheds light on the shortcomings of current state-of-the-art models, and shows that non-expert annotators are successful at finding their weaknesses. The data collection method can be applied in a never-ending learning scenario, becoming a moving target for NLU, rather than a static benchmark that will quickly saturate.", "keyphrases": ["natural language understanding", "state-of-the-art model", "adversarial nli", "hamlet", "crowdworker"]} +{"id": "belz-etal-2015-describing", "title": "Describing Spatial Relationships between Objects in Images in English and French", "abstract": "The context for the work we report here is the automatic description of spatial relationships between pairs of objects in images. We investigate the task of selecting prepositions for such spatial relationships. We describe the two datasets of object pairs and prepositions we have created for English and French, and report results for predicting prepositions for object pairs in both of these languages, using two methods: (a) an existing approach which manually fixes the mapping from geometrical features to prepositions, and (b) a Naive Bayes classifier trained on the English and French datasets. For the latter we use features based on object class labels and geometrical measurements of object bounding boxes. We evaluate the automatically generated prepositions on unseen data in terms of accuracy against the human-selected prepositions.", "keyphrases": ["spatial relationship", "object", "french"]} +{"id": "xu-etal-2007-seed", "title": "A Seed-driven Bottom-up Machine Learning Framework for Extracting Relations of Various Complexity", "abstract": "A minimally supervised machine learning framework is described for extracting relations of various complexity. Bootstrapping starts from a small set of n-ary relation instances as \u201cseeds\u201d, in order to automatically learn pattern rules from parsed data, which then can extract new instances of the relation and its projections. We propose a novel rule representation enabling the composition of n-ary relation rules on top of the rules for projections of the relation. The compositional approach to rule construction is supported by a bottom-up pattern extraction method. In comparison to other automatic approaches, our rules cannot only localize relation arguments but also assign their exact target argument roles. The method is evaluated in two tasks: the extraction of Nobel Prize awards and management succession events. Performance for the new Nobel Prize task is strong. For the management succession task the results compare favorably with those of existing pattern acquisition approaches.", "keyphrases": ["various complexity", "seed", "relation extraction"]} +{"id": "alberti-etal-2015-improved", "title": "Improved Transition-Based Parsing and Tagging with Neural Networks", "abstract": "We extend and improve upon recent work in structured training for neural network transition-based dependency parsing. We do this by experimenting with novel features, additional transition systems and by testing on a wider array of languages. In particular, we introduce set-valued features to encode the predicted morphological properties and part-ofspeech confusion sets of the words being parsed. We also investigate the use of joint parsing and partof-speech tagging in the neural paradigm. Finally, we conduct a multi-lingual evaluation that demonstrates the robustness of the overall structured neural approach, as well as the benefits of the extensions proposed in this work. Our research further demonstrates the breadth of the applicability of neural network methods to dependency parsing, as well as the ease with which new features can be added to neural parsing models.", "keyphrases": ["tagging", "transition system", "dependency parser"]} +{"id": "qadir-etal-2016-automatically", "title": "Automatically Inferring Implicit Properties in Similes", "abstract": "Author(s): Qadir, A; Riloff, E; Walker, MA | Abstract: \u00a92016 Association for Computational Linguistics. A simile is a figure of speech comparing two fundamentally different things. Sometimes, a simile will explain the basis of a comparison by explicitly mentioning a shared property. For example, \"my room is as cold as Antarctica\" gives \"cold\" as the property shared by the room and Antarctica. But most similes do not give an explicit property (e.g., \"my room feels like Antarctica\") leaving the reader to infer that the room is cold. We tackle the problem of automatically inferring implicit properties evoked by similes. Our approach involves three steps: (1) generating candidate properties from different sources, (2) evaluating properties based on the influence of multiple simile components, and (3) aggregated ranking of the properties. We also present an analysis showing that the difficulty of inferring an implicit property for a simile correlates with its interpretive diversity.", "keyphrases": ["implicit property", "simile", "interpretation"]} +{"id": "kobayashi-ng-2020-bridging", "title": "Bridging Resolution: A Survey of the State of the Art", "abstract": "Bridging reference resolution is an anaphora resolution task that is arguably more challenging and less studied than entity coreference resolution. Given that significant progress has been made on coreference resolution in recent years, we believe that bridging resolution will receive increasing attention in the NLP community. Nevertheless, progress on bridging resolution is currently hampered in part by the scarcity of large annotated corpora for model training as well as the lack of standardized evaluation protocols. This paper presents a survey of the current state of research on bridging reference resolution and discusses future research directions.", "keyphrases": ["resolution", "survey", "anaphora resolution"]} +{"id": "niklaus-etal-2018-survey", "title": "A Survey on Open Information Extraction", "abstract": "We provide a detailed overview of the various approaches that were proposed to date to solve the task of Open Information Extraction. We present the major challenges that such systems face, show the evolution of the suggested approaches over time and depict the specific issues they address. In addition, we provide a critique of the commonly applied evaluation procedures for assessing the performance of Open IE systems and highlight some directions for future work.", "keyphrases": ["survey", "open information extraction", "rule-based system", "oie system", "predicate"]} +{"id": "zhou-etal-2010-active", "title": "Active Deep Networks for Semi-Supervised Sentiment Classification", "abstract": "This paper presents a novel semi-supervised learning algorithm called Active Deep Networks (ADN), to address the semi-supervised sentiment classification problem with active learning. First, we propose the semi-supervised learning method of ADN. ADN is constructed by Restricted Boltzmann Machines (RBM) with unsupervised learning using labeled data and abundant of unlabeled data. Then the constructed structure is fine-tuned by gradient-descent based supervised learning with an exponential loss function. Second, we apply active learning in the semi-supervised learning framework to identify reviews that should be labeled as training data. Then ADN architecture is trained by the selected labeled data and all unlabeled data. Experiments on five sentiment classification datasets show that ADN outperforms the semi-supervised learning algorithm and deep learning techniques applied for sentiment classification.", "keyphrases": ["semi-supervised sentiment classification", "active learning", "active deep networks"]} +{"id": "pradhan-etal-2011-conll", "title": "CoNLL-2011 Shared Task: Modeling Unrestricted Coreference in OntoNotes", "abstract": "The CoNLL-2011 shared task involved predicting coreference using OntoNotes data. Resources in this field have tended to be limited to noun phrase coreference, often on a restricted set of entities, such as ace entities. OntoNotes provides a large-scale corpus of general anaphoric coreference not restricted to noun phrases or to a specified set of entity types. OntoNotes also provides additional layers of integrated annotation, capturing additional shallow semantic structure. This paper briefly describes the OntoNotes annotation (coreference and other layers) and then describes the parameters of the shared task including the format, pre-processing information, and evaluation criteria, and presents and discusses the results achieved by the participating systems. Having a standard test set and evaluation parameters, all based on a new resource that provides multiple integrated annotation layers (parses, semantic roles, word senses, named entities and coreference) that could support joint models, should help to energize ongoing research in the task of entity and event coreference.", "keyphrases": ["unrestricted coreference", "ontonotes", "noun phrase"]} +{"id": "khapra-etal-2010-words", "title": "All Words Domain Adapted WSD: Finding a Middle Ground between Supervision and Unsupervision", "abstract": "In spite of decades of research on word sense disambiguation (WSD), all-words general purpose WSD has remained a distant goal. Many supervised WSD systems have been built, but the effort of creating the training corpus - annotated sense marked corpora - has always been a matter of concern. Therefore, attempts have been made to develop unsupervised and knowledge based techniques for WSD which do not need sense marked corpora. However such approaches have not proved effective, since they typically do not better Wordnet first sense baseline accuracy. Our research reported here proposes to stick to the supervised approach, but with far less demand on annotation. We show that if we have ANY sense marked corpora, be it from mixed domain or a specific domain, a small amount of annotation in ANY other domain can deliver the goods almost as if exhaustive sense marking were available in that domain. We have tested our approach across Tourism and Health domain corpora, using also the well known mixed domain SemCor corpus. Accuracy figures close to self domain training lend credence to the viability of our approach. Our contribution thus lies in finding a convenient middle ground between pure supervised and pure unsupervised WSD. Finally, our approach is not restricted to any specific set of target words, a departure from a commonly observed practice in domain specific WSD.", "keyphrases": ["wsd", "middle ground", "semi-supervised approach"]} +{"id": "rudra-etal-2016-understanding", "title": "Understanding Language Preference for Expression of Opinion and Sentiment: What do Hindi-English Speakers do on Twitter?", "abstract": "Linguistic research on multilingual societies has indicated that there is usually a preferred language for expression of emotion and sentiment (Dewaele, 2010). Paucity of data has limited such studies to participant interviews and speech transcriptions from small groups of speakers. In this paper, we report a study on 430,000 unique tweets from Indian users, specifically Hindi-English bilinguals, to understand the language of preference, if any, for expressing opinion and sentiment. To this end, we develop classifiers for opinion detection in these languages, and further classifying opinionated tweets into positive, negative and neutral sentiments. Our study indicates that Hindi (i.e., the native language) is preferred over English for expression of negative opinion and swearing. As an aside, we explore some common pragmatic functions of code-switching through sentiment detection.", "keyphrases": ["opinion", "twitter", "hindi-english bilingual", "emotion expression"]} +{"id": "yimam-etal-2017-multilingual", "title": "Multilingual and Cross-Lingual Complex Word Identification", "abstract": "Complex Word Identification (CWI) is an important task in lexical simplification and text accessibility. Due to the lack of CWI datasets, previous works largely depend on Simple English Wikipedia and edit histories for obtaining `gold standard' annotations, which are of doubtable quality, and limited only to English. We collect complex words/phrases (CP) for English, German and Spanish, annotated by both native and non-native speakers, and propose language independent features that can be used to train multilingual and cross-lingual CWI models. We show that the performance of cross-lingual CWI systems (using a model trained on one language and applying it on the other languages) is comparable to the performance of monolingual CWI systems.", "keyphrases": ["complex word identification", "spanish", "non-native speaker", "difficulty"]} +{"id": "giampiccolo-etal-2007-third", "title": "The Third PASCAL Recognizing Textual Entailment Challenge", "abstract": "This paper presents the Third PASCAL Recognising Textual Entailment Challenge (RTE-3), providing an overview of the dataset creating methodology and the submitted systems. In creating this year\u2019s dataset, a number of longer texts were introduced to make the challenge more oriented to realistic scenarios. Ad-ditionally, a pool of resources was of-fered so that the participants could share common tools. A pilot task was also set up, aimed at differentiating unknown en-tailments from identified contradictions and providing justifications for overall system decisions. 26 participants submitted 44 runs, using different approaches and generally presenting new entailment models and achieving higher scores than in the previous challenges.", "keyphrases": ["pascal", "textual entailment challenge", "hypothesis", "question answering"]} +{"id": "temnikova-2010-cognitive", "title": "Cognitive Evaluation Approach for a Controlled Language Post-Editing Experiment", "abstract": "In emergency situations it is crucial that instructions are straightforward to understand. For this reason a controlled language for crisis management (CLCM), based on psycholinguistic studies of human comprehension under stress, was developed. In order to test the impact of CLCM machine translatability of this particular kind of sub-language text, a previous experiment involving machine translation and human post-editing has been conducted. Employing two automatic evaluation metrics, a previous evaluation of the experiment has proved that instructions written according to this CL can improve machine translation (MT) performance. This paper presents a new cognitive evaluation approach for MT post-editing, which is tested on the previous controlled and uncontrolled textual data. The presented evaluation approach allows a deeper look into the post-editing process and specifically how much effort post-editors put into correcting the different kinds of MT errors. The method is based on existing MT error classification, which is enriched with a new error ranking motivated by the cognitive effort involved in the detection and correction of these MT errors. The preliminary results of applying this approach to a subset of the original data confirmed once again the positive impact of CLCM on emergency instructions' machine translatability and thus the validity of the approach.", "keyphrases": ["post-editor", "cognitive evaluation approach", "edit operation"]} +{"id": "kiddon-etal-2016-globally", "title": "Globally Coherent Text Generation with Neural Checklist Models", "abstract": "Recurrent neural networks can generate locally coherent text but often have dif\ufb01culties representing what has already been generated and what still needs to be said \u2013 especially when constructing long texts. We present the neural checklist model , a recurrent neural network that models global coherence by storing and updating an agenda of text strings which should be mentioned somewhere in the output. The model generates output by dynamically adjusting the interpolation among a language model and a pair of attention models that encourage references to agenda items. Evaluations on cooking recipes and dialogue system responses demonstrate high coherence with greatly improved semantic coverage of the agenda.", "keyphrases": ["coherence", "neural checklist model", "language model", "recipe", "story"]} +{"id": "okanohara-tsujii-2007-discriminative", "title": "A discriminative language model with pseudo-negative samples", "abstract": "In this paper, we propose a novel discriminative language model, which can be applied quite generally. Compared to the well known N-gram language models, discriminative language models can achieve more accurate discrimination because they can employ overlapping features and nonlocal information. However, discriminative language models have been used only for re-ranking in specific applications because negative examples are not available. We propose sampling pseudo-negative examples taken from probabilistic language models. However, this approach requires prohibitive computational cost if we are dealing with quite a few features and training samples. We tackle the problem by estimating the latent information in sentences using a semiMarkov class model, and then extracting features from them. We also use an online margin-based algorithm with efficient kernel computation. Experimental results show that pseudo-negative examples can be treated as real negative examples and our model can classify these sentences correctly.", "keyphrases": ["language model", "sample", "specific application", "negative example", "start symbol"]} +{"id": "shaalan-2014-survey", "title": "A Survey of Arabic Named Entity Recognition and Classification", "abstract": "As more and more Arabic textual information becomes available through the Web in homes and businesses, via Internet and Intranet services, there is an urgent need for technologies and tools to process the relevant information. Named Entity Recognition (NER) is an Information Extraction task that has become an integral part of many other Natural Language Processing (NLP) tasks, such as Machine Translation and Information Retrieval. Arabic NER has begun to receive attention in recent years. The characteristics and peculiarities of Arabic, a member of the Semitic languages family, make dealing with NER a challenge. The performance of an Arabic NER component affects the overall performance of the NLP system in a positive manner. This article attempts to describe and detail the recent increase in interest and progress made in Arabic NER research. The importance of the NER task is demonstrated, the main characteristics of the Arabic language are highlighted, and the aspects of standardization in annotating named entities are illustrated. Moreover, the different Arabic linguistic resources are presented and the approaches used in Arabic NER field are explained. The features of common tools used in Arabic NER are described, and standard evaluation metrics are illustrated. In addition, a review of the state of the art of Arabic NER research is discussed. Finally, we present our conclusions. Throughout the presentation, illustrative examples are used for clarification.", "keyphrases": ["arabic", "entity recognition", "affix"]} +{"id": "yang-cardie-2013-joint", "title": "Joint Inference for Fine-grained Opinion Extraction", "abstract": "This paper addresses the task of finegrained opinion extraction \u2010 the identification of opinion-related entities: the opinion expressions, the opinion holders, and the targets of the opinions, and the relations between opinion expressions and their targets and holders. Most existing approaches tackle the extraction of opinion entities and opinion relations in a pipelined manner, where the interdependencies among different extraction stages are not captured. We propose a joint inference model that leverages knowledge from predictors that optimize subtasks of opinion extraction, and seeks a globally optimal solution. Experimental results demonstrate that our joint inference approach significantly outperforms traditional pipeline methods and baselines that tackle subtasks in isolation for the problem of opinion extraction.", "keyphrases": ["opinion expression", "joint inference", "pipelined approach"]} +{"id": "geiger-etal-2019-posing", "title": "Posing Fair Generalization Tasks for Natural Language Inference", "abstract": "Deep learning models for semantics are generally evaluated using naturalistic corpora. Adversarial testing methods, in which models are evaluated on new examples with known semantic properties, have begun to reveal that good performance at these naturalistic tasks can hide serious shortcomings. However, we should insist that these evaluations be fair \u2013 that the models are given data sufficient to support the requisite kinds of generalization. In this paper, we define and motivate a formal notion of fairness in this sense. We then apply these ideas to natural language inference by constructing very challenging but provably fair artificial datasets and showing that standard neural models fail to generalize in the required ways; only task-specific models that jointly compose the premise and hypothesis are able to achieve high performance, and even these models do not solve the task perfectly.", "keyphrases": ["natural language inference", "synthetic data", "such perfection"]} +{"id": "mohammad-etal-2007-cross", "title": "Cross-Lingual Distributional Profiles of Concepts for Measuring Semantic Distance", "abstract": "We present the idea of estimating semantic distance in one, possibly resource-poor, language using a knowledge source in another, possibly resource-rich, language. We do so by creating cross-lingual distributional profiles of concepts, using a bilingual lexicon and a bootstrapping algorithm, but without the use of any sense-annotated data or word-aligned corpora. The cross-lingual measures of semantic distance are evaluated on two tasks: (1) estimating semantic distance between words and ranking the word pairs according to semantic distance, and (2) solving Reader\u2019s Digest \u2018Word Power\u2019 problems. In task (1), cross-lingual measures are superior to conventional monolingual measures based on a wordnet. In task (2), cross-lingual measures are able to solve more problems correctly, and despite scores being affected by many tied answers, their overall performance is again better than the best monolingual measures.", "keyphrases": ["semantic distance", "bilingual lexicon", "cross-lingual distributional profile", "thesaurus"]} +{"id": "dong-de-melo-2019-robust", "title": "A Robust Self-Learning Framework for Cross-Lingual Text Classification", "abstract": "Based on massive amounts of data, recent pretrained contextual representation models have made significant strides in advancing a number of different English NLP tasks. However, for other languages, relevant training data may be lacking, while state-of-the-art deep learning methods are known to be data-hungry. In this paper, we present an elegantly simple robust self-learning framework to include unlabeled non-English samples in the fine-tuning process of pretrained multilingual representation models. We leverage a multilingual model's own predictions on unlabeled non-English data in order to obtain additional information that can be used during further fine-tuning. Compared with original multilingual models and other cross-lingual classification models, we observe significant gains in effectiveness on document and sentiment classification for a range of diverse languages.", "keyphrases": ["self-learning framework", "learning method", "unlabeled data"]} +{"id": "hanselowski-etal-2018-ukp", "title": "UKP-Athene: Multi-Sentence Textual Entailment for Claim Verification", "abstract": "The Fact Extraction and VERification (FEVER) shared task was launched to support the development of systems able to verify claims by extracting supporting or refuting facts from raw text. The shared task organizers provide a large-scale dataset for the consecutive steps involved in claim verification, in particular, document retrieval, fact extraction, and claim classification. In this paper, we present our claim verification pipeline approach, which, according to the preliminary results, scored third in the shared task, out of 23 competing systems. For the document retrieval, we implemented a new entity linking approach. In order to be able to rank candidate facts and classify a claim on the basis of several selected facts, we introduce two extensions to the Enhanced LSTM (ESIM).", "keyphrases": ["claim verification", "mediawiki api", "evidence sentence"]} +{"id": "zhang-etal-2020-dialogpt", "title": "DIALOGPT : Large-Scale Generative Pre-training for Conversational Response Generation", "abstract": "We present a large, tunable neural conversational response generation model, DIALOGPT (dialogue generative pre-trained transformer). Trained on 147M conversation-like exchanges extracted from Reddit comment chains over a period spanning from 2005 through 2017, DialoGPT extends the Hugging Face PyTorch transformer to attain a performance close to human both in terms of automatic and human evaluation in single-turn dialogue settings. We show that conversational systems that leverage DialoGPT generate more relevant, contentful and context-consistent responses than strong baseline systems. The pre-trained model and training pipeline are publicly released to facilitate research into neural response generation and the development of more intelligent open-domain dialogue systems.", "keyphrases": ["conversational response generation", "response generation model", "open-domain dialogue system", "pre-trained language model", "generation task"]} +{"id": "songyot-chiang-2014-improving", "title": "Improving Word Alignment using Word Similarity", "abstract": "We show that semantic relationships can be used to improve word alignment, in addition to the lexical and syntactic features that are typically used. In this paper, we present a method based on a neural network to automatically derive word similarity from monolingual data. We present an extension to word alignment models that exploits word similarity. Our experiments, in both large-scale and resourcelimited settings, show improvements in word alignment tasks as well as translation tasks.", "keyphrases": ["word alignment", "semantic knowledge", "few study", "giza++", "feedforward neural network"]} +{"id": "wang-etal-2010-structuring", "title": "Re-structuring, Re-labeling, and Re-aligning for Syntax-Based Machine Translation", "abstract": "This article shows that the structure of bilingual material from standard parsing and alignment tools is not optimal for training syntax-based statistical machine translation (SMT) systems. We present three modifications to the MT training data to improve the accuracy of a state-of-the-art syntax MT system: re-structuring changes the syntactic structure of training parse trees to enable reuse of substructures; re-labeling alters bracket labels to enrich rule application context; and re-aligning unifies word alignment across sentences to remove bad word alignments and refine good ones. Better structures, labels, and word alignments are learned by the EM algorithm. We show that each individual technique leads to improvement as measured by BLEU, and we also show that the greatest improvement is achieved by combining them. We report an overall 1.48 BLEU improvement on the NIST08 evaluation set over a strong baseline in Chinese/English translation.", "keyphrases": ["machine translation", "alignment tool", "syntax-based model", "rule extraction"]} +{"id": "eyal-etal-2019-question", "title": "Question Answering as an Automatic Evaluation Metric for News Article Summarization", "abstract": "Recent work in the field of automatic summarization and headline generation focuses on maximizing ROUGE scores for various news datasets. We present an alternative, extrinsic, evaluation metric for this task, Answering Performance for Evaluation of Summaries. APES utilizes recent progress in the field of reading-comprehension to quantify the ability of a summary to answer a set of manually created questions regarding central entities in the source article. We first analyze the strength of this metric by comparing it to known manual evaluation metrics. We then present an end-to-end neural abstractive model that maximizes APES, while increasing ROUGE scores to competitive results.", "keyphrases": ["evaluation metric", "summarization", "apes", "question answering"]} +{"id": "roark-hollingshead-2008-classifying", "title": "Classifying Chart Cells for Quadratic Complexity Context-Free Inference", "abstract": "In this paper, we consider classifying word positions by whether or not they can either start or end multi-word constituents. This provides a mechanism for \"closing\" chart cells during context-free inference, which is demonstrated to improve efficiency and accuracy when used to constrain the well-known Charniak parser. Additionally, we present a method for \"closing\" a sufficient number of chart cells to ensure quadratic worst-case complexity of context-free inference. Empirical results show that this O(n2) bound can be achieved without impacting parsing accuracy.", "keyphrases": ["chart cell", "complexity", "multi-word constituent"]} +{"id": "goh-etal-2004-chinese", "title": "Chinese Word Segmentation by Classification of Characters", "abstract": "During the process of Chinese word segmentation, two main problems occur: segmentation ambiguities and unknown word occurrences. This paper describes a method to solve the segmentation problem. First, we use a dictionary-based approach to segment the text. We apply the Maximum Matching algorithm to segment the text forwards (FMM) and backwards (BMM). Based on the difference between FMM and BMM, and the context, we apply a classification method based on Support Vector Machines to re-assign the word boundaries. In so doing, we use the output of a dictionary-based approach, and then apply a machine-learning-based approach to solve the segmentation problem. Experimental results show that our model can achieve an F-measure of 99.0 for overall segmentation, given the condition that there are no unknown words in the text, and an F-measure of 95.1 if unknown words exist.", "keyphrases": ["word segmentation", "dictionary-based approach", "machine-learning-based approach"]} +{"id": "fersini-etal-2022-semeval", "title": "SemEval-2022 Task 5: Multimedia Automatic Misogyny Identification", "abstract": "The paper describes the SemEval-2022 Task 5: Multimedia Automatic Misogyny Identification (MAMI),which explores the detection of misogynous memes on the web by taking advantage of available texts and images. The task has been organised in two related sub-tasks: the first one is focused on recognising whether a meme is misogynous or not (Sub-task A), while the second one is devoted to recognising types of misogyny (Sub-task B). MAMI has been one of the most popular tasks at SemEval-2022 with more than 400 participants, 65 teams involved in Sub-task A and 41 in Sub-task B from 13 countries. The MAMI challenge received 4214 submitted runs (of which 166 uploaded on the leader-board), denoting an enthusiastic participation for the proposed problem. The collection and annotation is described for the task dataset. The paper provides an overview of the systems proposed for the challenge, reports the results achieved in both sub-tasks and outlines a description of the main errors for a comprehension of the systems capabilities and for detailing future research perspectives.", "keyphrases": ["misogyny", "meme", "semeval-2022 task"]} +{"id": "escalante-etal-2011-local", "title": "Local Histograms of Character N-grams for Authorship Attribution", "abstract": "This paper proposes the use of local histograms (LH) over character n-grams for authorship attribution (AA). LHs are enriched histogram representations that preserve sequential information in documents; they have been successfully used for text categorization and document visualization using word histograms. In this work we explore the suitability of LHs over n-grams at the character-level for AA. We show that LHs are particularly helpful for AA, because they provide useful information for uncovering, to some extent, the writing style of authors. We report experimental results in AA data sets that confirm that LHs over character n-grams are more helpful for AA than the usual global histograms, yielding results far superior to state of the art approaches. We found that LHs are even more advantageous in challenging conditions, such as having imbalanced and small training sets. Our results motivate further research on the use of LHs for modeling the writing style of authors for related tasks, such as authorship verification and plagiarism detection.", "keyphrases": ["histogram", "authorship attribution", "text analysis task", "impressive performance level"]} +{"id": "chen-etal-2018-best", "title": "The Best of Both Worlds: Combining Recent Advances in Neural Machine Translation", "abstract": "The past year has witnessed rapid advances in sequence-to-sequence (seq2seq) modeling for Machine Translation (MT). The classic RNN-based approaches to MT were first out-performed by the convolutional seq2seq model, which was then out-performed by the more recent Transformer model. Each of these new approaches consists of a fundamental architecture accompanied by a set of modeling and training techniques that are in principle applicable to other seq2seq architectures. In this paper, we tease apart the new architectures and their accompanying techniques in two ways. First, we identify several key modeling and training techniques, and apply them to the RNN architecture, yielding a new RNMT+ model that outperforms all of the three fundamental architectures on the benchmark WMT'14 English to French and English to German tasks. Second, we analyze the properties of each fundamental seq2seq architecture and devise new hybrid architectures intended to combine their strengths. Our hybrid models obtain further improvements, outperforming the RNMT+ model on both benchmark datasets.", "keyphrases": ["neural machine translation", "rnmt+ model", "strength", "multi-head attention", "rnn-based encoder"]} +{"id": "levy-etal-2017-zero", "title": "Zero-Shot Relation Extraction via Reading Comprehension", "abstract": "We show that relation extraction can be reduced to answering simple reading comprehension questions, by associating one or more natural-language questions with each relation slot. This reduction has several advantages: we can (1) learn relation-extraction models by extending recent neural reading-comprehension techniques, (2) build very large training sets for those models by combining relation-specific crowd-sourced questions with distant supervision, and even (3) do zero-shot learning by extracting new relation types that are only specified at test-time, for which we have no labeled training examples. Experiments on a Wikipedia slot-filling task demonstrate that the approach can generalize to new questions for known relation types with high accuracy, and that zero-shot generalization to unseen relation types is possible, at lower accuracy levels, setting the bar for future work on this task.", "keyphrases": ["comprehension", "slot-filling task", "zero-shot relation extraction", "relation extraction task", "language question"]} +{"id": "wu-etal-2019-depth", "title": "Depth Growing for Neural Machine Translation", "abstract": "While very deep neural networks have shown effectiveness for computer vision and text classification applications, how to increase the network depth of the neural machine translation (NMT) models for better translation quality remains a challenging problem. Directly stacking more blocks to the NMT model results in no improvement and even drop in performance. In this work, we propose an effective two-stage approach with three specially designed components to construct deeper NMT models, which result in significant improvements over the strong Transformer baselines on WMT14 English\u2192German and English\u2192French translation tasks.", "keyphrases": ["neural machine translation", "nmt model", "depth"]} +{"id": "fabbri-etal-2019-multi", "title": "Multi-News: A Large-Scale Multi-Document Summarization Dataset and Abstractive Hierarchical Model", "abstract": "Automatic generation of summaries from multiple news articles is a valuable tool as the number of online publications grows rapidly. Single document summarization (SDS) systems have benefited from advances in neural encoder-decoder model thanks to the availability of large datasets. However, multi-document summarization (MDS) of news articles has been limited to datasets of a couple of hundred examples. In this paper, we introduce Multi-News, the first large-scale MDS news dataset. Additionally, we propose an end-to-end model which incorporates a traditional extractive summarization model with a standard SDS model and achieves competitive results on MDS datasets. We benchmark several methods on Multi-News and hope that this work will promote advances in summarization in the multi-document setting.", "keyphrases": ["summarization", "large dataset", "mds news dataset", "multi-new"]} +{"id": "vu-etal-2018-sentence", "title": "Sentence Simplification with Memory-Augmented Neural Networks", "abstract": "Sentence simplification aims to simplify the content and structure of complex sentences, and thus make them easier to interpret for human readers, and easier to process for downstream NLP applications. Recent advances in neural machine translation have paved the way for novel approaches to the task. In this paper, we adapt an architecture with augmented memory capacities called Neural Semantic Encoders (Munkhdalai and Yu, 2017) for sentence simplification. Our experiments demonstrate the effectiveness of our approach on different simplification datasets, both in terms of automatic evaluation measures and human judgments.", "keyphrases": ["memory-augmented neural network", "neural semantic encoders", "sentence simplification"]} +{"id": "johansson-moschitti-2013-relational", "title": "Relational Features in Fine-Grained Opinion Analysis", "abstract": "Fine-grained opinion analysis methods often make use of linguistic features but typically do not take the interaction between opinions into account. This article describes a set of experiments that demonstrate that relational features, mainly derived from dependency-syntactic and semantic role structures, can significantly improve the performance of automatic systems for a number of fine-grained opinion analysis tasks: marking up opinion expressions, finding opinion holders, and determining the polarities of opinion expressions. These features make it possible to model the way opinions expressed in natural-language discourse interact in a sentence over arbitrary distances. The use of relations requires us to consider multiple opinions simultaneously, which makes the search for the optimal analysis intractable. However, a reranker can be used as a sufficiently accurate and efficient approximation.A number of feature sets and machine learning approaches for the rerankers are evaluated. For the task of opinion expression extraction, the best model shows a 10-point absolute improvement in soft recall on the MPQA corpus over a conventional sequence labeler based on local contextual features, while precision decreases only slightly. Significant improvements are also seen for the extended tasks where holders and polarities are considered: 10 and 7 points in recall, respectively. In addition, the systems outperform previously published results for unlabeled (6 F-measure points) and polarity-labeled (10\u201315 points) opinion expression extraction. Finally, as an extrinsic evaluation, the extracted MPQA-style opinion expressions are used in practical opinion mining tasks. In all scenarios considered, the machine learning features derived from the opinion expressions lead to statistically significant improvements.", "keyphrases": ["opinion expression", "reranker", "relational feature"]} +{"id": "liu-etal-2018-stochastic", "title": "Stochastic Answer Networks for Machine Reading Comprehension", "abstract": "We propose a simple yet robust stochastic answer network (SAN) that simulates multi-step reasoning in machine reading comprehension. Compared to previous work such as ReasoNet which used reinforcement learning to determine the number of steps, the unique feature is the use of a kind of stochastic prediction dropout on the answer module (final layer) of the neural network during the training. We show that this simple trick improves robustness and achieves results competitive to the state-of-the-art on the Stanford Question Answering Dataset (SQuAD), the Adversarial SQuAD, and the Microsoft MAchine Reading COmprehension Dataset (MS MARCO).", "keyphrases": ["machine reading comprehension", "multi-step reasoning", "final layer", "stochastic answer network", "memory network"]} +{"id": "womack-etal-2012-disfluencies", "title": "Disfluencies as Extra-Propositional Indicators of Cognitive Processing", "abstract": "We explore filled pause usage in spontaneous medical narration. Expert physicians viewed images of dermatological conditions and provided a description while working toward a diagnosis. The narratives were analyzed for differences in filled pauses used by attending (experienced) and resident (in-training) physicians and by male and female physicians. Attending physicians described more and used more filled pauses than residents. No difference was found by speaker gender. Acoustic speech features were examined for two types of filled pauses: nasal (e.g. um) and non-nasal (e.g. uh). Nasal filled pauses were more often followed by longer silent pauses. Scores capturing diagnostic correctness and diagnostic thoroughness for each narrative were compared against filled pauses. The number of filled and silent pauses trends upward as correctness scores increase, indicating a tentative relationship between filled pause usage and expertise. Also, we report on a computational model for predicting types of filled pause.", "keyphrases": ["extra-propositional indicator", "cognitive processing", "disfluency"]} +{"id": "weiss-etal-2018-practical", "title": "On the Practical Computational Power of Finite Precision RNNs for Language Recognition", "abstract": "While Recurrent Neural Networks (RNNs) are famously known to be Turing complete, this relies on infinite precision in the states and unbounded computation time. We consider the case of RNNs with finite precision whose computation time is linear in the input length. Under these limitations, we show that different RNN variants have different computational power. In particular, we show that the LSTM and the Elman-RNN with ReLU activation are strictly stronger than the RNN with a squashing activation and the GRU. This is achieved because LSTMs and ReLU-RNNs can easily implement counting behavior. We show empirically that the LSTM does indeed learn to effectively use the counting mechanism.", "keyphrases": ["power", "rnn", "formal language", "k-counter machine"]} +{"id": "tang-etal-2016-effective", "title": "Effective LSTMs for Target-Dependent Sentiment Classification", "abstract": "Target-dependent sentiment classification remains a challenge: modeling the semantic relatedness of a target with its context words in a sentence. Different context words have different influences on determining the sentiment polarity of a sentence towards the target. Therefore, it is desirable to integrate the connections between target word and context words when building a learning system. In this paper, we develop two target dependent long short-term memory (LSTM) models, where target information is automatically taken into account. We evaluate our methods on a benchmark dataset from Twitter. Empirical results show that modeling sentence representation with standard LSTM does not perform well. Incorporating target information into LSTM can significantly boost the classification accuracy. The target-dependent LSTM models achieve state-of-the-art performances without using syntactic parser or external sentiment lexicons.", "keyphrases": ["sentiment classification", "semantic relatedness", "target-dependent lstm", "deep learning", "input sentence"]} +{"id": "troiano-etal-2018-computational", "title": "A Computational Exploration of Exaggeration", "abstract": "Several NLP studies address the problem of figurative language, but among non-literal phenomena, they have neglected exaggeration. This paper presents a first computational approach to this figure of speech. We explore the possibility to automatically detect exaggerated sentences. First, we introduce HYPO, a corpus containing overstatements (or hyperboles) collected on the web and validated via crowdsourcing. Then, we evaluate a number of models trained on HYPO, and bring evidence that the task of hyperbole identification can be successfully performed based on a small set of semantic features.", "keyphrases": ["exaggeration", "figure", "counterpart"]} +{"id": "martin-etal-2020-controllable", "title": "Controllable Sentence Simplification", "abstract": "Text simplification aims at making a text easier to read and understand by simplifying grammar and structure while keeping the underlying information identical. It is often considered an all-purpose generic task where the same simplification is suitable for all; however multiple audiences can benefit from simplified text in different ways. We adapt a discrete parametrization mechanism that provides explicit control on simplification systems based on Sequence-to-Sequence models. As a result, users can condition the simplifications returned by a model on attributes such as length, amount of paraphrasing, lexical complexity and syntactic complexity. We also show that carefully chosen values of these attributes allow out-of-the-box Sequence-to-Sequence models to outperform their standard counterparts on simplification benchmarks. Our model, which we call ACCESS (as shorthand for AudienCe-CEntric Sentence Simplification), establishes the state of the art at 41.87 SARI on the WikiLarge test set, a +1.42 improvement over the best previously reported score.", "keyphrases": ["simplification", "attribute", "length", "paraphrasing", "syntactic complexity"]} +{"id": "nuhn-etal-2013-beam", "title": "Beam Search for Solving Substitution Ciphers", "abstract": "In this paper we address the problem of solving substitution ciphers using a beam search approach. We present a conceptually consistent and easy to implement method that improves the current state of the art for decipherment of substitution ciphers and is able to use high ordern-gram language models. We show experiments with 1:1 substitution ciphers in which the guaranteed optimal solution for 3-gram language models has 38.6% decipherment error, while our approach achieves 4.13% decipherment error in a fraction of time by using a 6-gram language model. We also apply our approach to the famous Zodiac-408 cipher and obtain slightly better (and near to optimal) results than previously published. Unlike the previous state-of-the-art approach that uses additional word lists to evaluate possible decipherments, our approach only uses a letterbased 6-gram language model. Furthermore we use our algorithm to solve large vocabulary substitution ciphers and improve the best published decipherment error rate based on the Gigaword corpus of 7.8% to 6.0% error rate.", "keyphrases": ["art", "decipherment", "beam search"]} +{"id": "luan-etal-2017-multi", "title": "Multi-Task Learning for Speaker-Role Adaptation in Neural Conversation Models", "abstract": "Building a persona-based conversation agent is challenging owing to the lack of large amounts of speaker-specific conversation data for model training. This paper addresses the problem by proposing a multi-task learning approach to training neural conversation models that leverages both conversation data across speakers and other types of data pertaining to the speaker and speaker roles to be modeled. Experiments show that our approach leads to significant improvements over baseline model quality, generating responses that capture more precisely speakers' traits and speaking styles. The model offers the benefits of being algorithmically simple and easy to implement, and not relying on large quantities of data representing specific individual speakers.", "keyphrases": ["conversation data", "multi-task learning", "autoencoder"]} +{"id": "shen-evang-2022-drs", "title": "DRS Parsing as Sequence Labeling", "abstract": "We present the first fully trainable semantic parser for English, German, Italian, and Dutch discourse representation structures (DRSs) that is competitive in accuracy with recent sequence-to-sequence models and at the same time compositional in the sense that the output maps each token to one of a finite set of meaning fragments, and the meaning of the utterance is a function of the meanings of its parts. We argue that this property makes the system more transparent and more useful for human-in-the-loop annotation. We achieve this simply by casting DRS parsing as a sequence labeling task, where tokens are labeled with both fragments (lists of abstracted clauses with relative referent indices indicating unification) and symbols like word senses or names. We give a comprehensive error analysis that highlights areas for future work.", "keyphrases": ["sequence-to-sequence model", "same time", "drs parsing"]} +{"id": "cybulska-vossen-2014-using", "title": "Using a sledgehammer to crack a nut? Lexical diversity and event coreference resolution", "abstract": "In this paper we examine the representativeness of the EventCorefBank (ECB, Bejan and Harabagiu, 2010) with regards to the language population of large-volume streams of news. The ECB corpus is one of the data sets used for evaluation of the task of event coreference resolution. Our analysis shows that the ECB in most cases covers one seminal event per domain, what considerably simplifies event and so language diversity that one comes across in the news. We augmented the corpus with a new corpus component, consisting of 502 texts, describing different instances of event types that were already captured by the 43 topics of the ECB, making it more representative of news articles on the web. The new \u201cECB+\u201d corpus is available for further research.", "keyphrases": ["diversity", "event coreference resolution", "news article", "ecb+"]} +{"id": "al-shargi-etal-2016-morphologically", "title": "Morphologically Annotated Corpora and Morphological Analyzers for Moroccan and Sanaani Yemeni Arabic", "abstract": "We present new language resources for Moroccan and Sanaani Yemeni Arabic. The resources include corpora for each dialect which have been morphologically annotated, and morphological analyzers for each dialect which are derived from these corpora. These are the first sets of resources for Moroccan and Yemeni Arabic. The resources will be made available to the public.", "keyphrases": ["morphological analyzer", "moroccan", "arabic"]} +{"id": "lin-etal-2019-sequence", "title": "Sequence-to-Nuggets: Nested Entity Mention Detection via Anchor-Region Networks", "abstract": "Sequential labeling-based NER approaches restrict each word belonging to at most one entity mention, which will face a serious problem when recognizing nested entity mentions. In this paper, we propose to resolve this problem by modeling and leveraging the head-driven phrase structures of entity mentions, i.e., although a mention can nest other mentions, they will not share the same head word. Specifically, we propose Anchor-Region Networks (ARNs), a sequence-to-nuggets architecture for nested mention detection. ARNs first identify anchor words (i.e., possible head words) of all mentions, and then recognize the mention boundaries for each anchor word by exploiting regular phrase structures. Furthermore, we also design Bag Loss, an objective function which can train ARNs in an end-to-end manner without using any anchor word annotation. Experiments show that ARNs achieve the state-of-the-art performance on three standard nested entity mention detection benchmarks.", "keyphrases": ["mention", "head-driven phrase structure", "sequence-to-nugget architecture"]} +{"id": "georgila-2013-reinforcement", "title": "Reinforcement Learning of Two-Issue Negotiation Dialogue Policies", "abstract": "We use hand-crafted simulated negotiators (SNs) to train and evaluate dialogue policies for two-issue negotiation between two agents. These SNs differ in their goals and in the use of strong and weak arguments to persuade their counterparts. They may also make irrational moves, i.e., moves not consistent with their goals, to generate a variety of negotiation patterns. Different versions of these SNs interact with each other to generate corpora for Reinforcement Learning (RL) of argumentation dialogue policies for each of the two agents. We evaluate the learned policies against hand-crafted SNs similar to the ones used for training but with the modification that these SNs no longer make irrational moves and thus are harder to beat. The learned policies generally do as well as, or better than the hand-crafted SNs showing that RL can be successfully used for learning argumentation dialogue policies in twoissue negotiation scenarios.", "keyphrases": ["negotiation", "argumentation dialogue policy", "reinforcement learning"]} +{"id": "dinu-wang-2009-inference", "title": "Inference Rules and their Application to Recognizing Textual Entailment", "abstract": "In this paper, we explore ways of improving an inference rule collection and its application to the task of recognizing textual entailment. For this purpose, we start with an automatically acquired collection and we propose methods to refine it and obtain more rules using a hand-crafted lexical resource. Following this, we derive a dependency-based structure representation from texts, which aims to provide a proper base for the inference rule application. The evaluation of our approach on the recognizing textual entailment data shows promising results on precision and the error analysis suggests possible improvements.", "keyphrases": ["textual entailment", "inference rule collection", "hand-crafted lexical resource"]} +{"id": "yang-etal-2021-mtag", "title": "MTAG: Modal-Temporal Attention Graph for Unaligned Human Multimodal Language Sequences", "abstract": "Human communication is multimodal in nature; it is through multiple modalities such as language, voice, and facial expressions, that opinions and emotions are expressed. Data in this domain exhibits complex multi-relational and temporal interactions. Learning from this data is a fundamentally challenging research problem. In this paper, we propose Modal-Temporal Attention Graph (MTAG). MTAG is an interpretable graph-based neural model that provides a suitable framework for analyzing multimodal sequential data. We first introduce a procedure to convert unaligned multimodal sequence data into a graph with heterogeneous nodes and edges that captures the rich interactions across modalities and through time. Then, a novel graph fusion operation, called MTAG fusion, along with a dynamic pruning and read-out technique, is designed to efficiently process this modal-temporal graph and capture various interactions. By learning to focus only on the important interactions within the graph, MTAG achieves state-of-the-art performance on multimodal sentiment analysis and emotion recognition benchmarks, while utilizing significantly fewer model parameters.", "keyphrases": ["attention graph", "multimodal sentiment analysis", "mtag"]} +{"id": "wu-etal-2020-tod", "title": "TOD-BERT: Pre-trained Natural Language Understanding for Task-Oriented Dialogue", "abstract": "The underlying difference of linguistic patterns between general text and task-oriented dialogue makes existing pre-trained language models less useful in practice. In this work, we unify nine human-human and multi-turn task-oriented dialogue datasets for language modeling. To better model dialogue behavior during pre-training, we incorporate user and system tokens into the masked language modeling. We propose a contrastive objective function to simulate the response selection task. Our pre-trained task-oriented dialogue BERT (TOD-BERT) outperforms strong baselines like BERT on four downstream task-oriented dialogue applications, including intention recognition, dialogue state tracking, dialogue act prediction, and response selection. We also show that TOD-BERT has a stronger few-shot ability that can mitigate the data scarcity problem for task-oriented dialogue.", "keyphrases": ["task-oriented dialogue", "pre-trained language model", "dialogue dataset"]} +{"id": "wolf-etal-2020-transformers", "title": "Transformers: State-of-the-Art Natural Language Processing", "abstract": "Recent progress in natural language processing has been driven by advances in both model architecture and model pretraining. Transformer architectures have facilitated building higher-capacity models and pretraining has made it possible to effectively utilize this capacity for a wide variety of tasks. Transformers is an open-source library with the goal of opening up these advances to the wider machine learning community. The library consists of carefully engineered state-of-the art Transformer architectures under a unified API. Backing this library is a curated collection of pretrained models made by and available for the community. Transformers is designed to be extensible by researchers, simple for practitioners, and fast and robust in industrial deployments. The library is available at .", "keyphrases": ["api", "transformer", "language model", "pre-trained model", "huggingface transformers library"]} +{"id": "kim-etal-2010-cross", "title": "A Cross-lingual Annotation Projection Approach for Relation Detection", "abstract": "While extensive studies on relation extraction have been conducted in the last decade, statistical systems based on supervised learning are still limited because they require large amounts of training data to achieve high performance. In this paper, we develop a cross-lingual annotation projection method that leverages parallel corpora to bootstrap a relation detector without significant annotation efforts for a resource-poor language. In order to make our method more reliable, we introduce three simple projection noise reduction methods. The merit of our method is demonstrated through a novel Korean relation detection task.", "keyphrases": ["annotation projection approach", "parallel corpora", "relation detector"]} +{"id": "santus-etal-2016-cogalex", "title": "The CogALex-V Shared Task on the Corpus-Based Identification of Semantic Relations", "abstract": "The shared task of the 5th Workshop on Cognitive Aspects of the Lexicon (CogALex-V) aims at providing a common benchmark for testing current corpus-based methods for the identification of lexical semantic relations (synonymy, antonymy, hypernymy, part-whole meronymy) and at gaining a better understanding of their respective strengths and weaknesses. The shared task uses a challenging dataset extracted from EVALution 1.0, which contains word pairs holding the above-mentioned relations as well as semantically unrelated control items (random). The task is split into two subtasks: (i) identification of related word pairs vs. unrelated ones; (ii) classification of the word pairs according to their semantic relation. This paper describes the subtasks, the dataset, the evaluation metrics, the seven participating systems and their results. The best performing system in subtask 1 is GHHH (F1 = 0.790), while the best system in subtask 2 is LexNet (F1 = 0.445). The dataset and the task description are available at .", "keyphrases": ["cogalex-v", "identification", "meronymy"]} +{"id": "anastasopoulos-etal-2019-neural", "title": "Neural Machine Translation of Text from Non-Native Speakers", "abstract": "Neural Machine Translation (NMT) systems are known to degrade when confronted with noisy data, especially when the system is trained only on clean data. In this paper, we show that augmenting training data with sentences containing artificially-introduced grammatical errors can make the system more robust to such errors. In combination with an automatic grammar error correction system, we can recover 1.0 BLEU out of 2.4 BLEU lost due to grammatical errors. We also present a set of Spanish translations of the JFLEG grammar error correction corpus, which allows for testing NMT robustness to real grammatical errors.", "keyphrases": ["grammatical error", "spanish translation", "neural machine translation", "noise"]} +{"id": "bittar-etal-2011-french", "title": "French TimeBank: An ISO-TimeML Annotated Reference Corpus", "abstract": "This article presents the main points in the creation of the French TimeBank (Bittar, 2010), a reference corpus annotated according to the ISO-TimeML standard for temporal annotation. A number of improvements were made to the markup language to deal with linguistic phenomena not yet covered by ISO-TimeML, including cross-language modifications and others specific to French. An automatic pre-annotation system was used to speed up the annotation process. A preliminary evaluation of the methodology adopted for this project yields positive results in terms of data quality and annotation time.", "keyphrases": ["markup language", "project", "french timebank", "linguistic phenomenon"]} +{"id": "susanto-etal-2014-system", "title": "System Combination for Grammatical Error Correction", "abstract": "Different approaches to high-quality grammatical error correction have been proposed recently, many of which have their own strengths and weaknesses. Most of these approaches are based on classification or statistical machine translation (SMT). In this paper, we propose to combine the output from a classification-based system and an SMT-based system to improve the correction quality. We adopt the system combination technique of Heafield and Lavie (2010). We achieve an F0.5 score of 39.39% on the test set of the CoNLL-2014 shared task, outperforming the best system in the shared task.", "keyphrases": ["grammatical error correction", "classification-based system", "gec system combination"]} +{"id": "davidov-rappoport-2006-efficient", "title": "Efficient Unsupervised Discovery of Word Categories Using Symmetric Patterns and High Frequency Words", "abstract": "We present a novel approach for discovering word categories, sets of words sharing a significant aspect of their meaning. We utilize meta-patterns of high-frequency words and content words in order to discover pattern candidates. Symmetric patterns are then identified using graph-based measures, and word categories are created based on graph clique sets. Our method is the first pattern-based method that requires no corpus annotation or manually provided seed patterns or words. We evaluate our algorithm on very large corpora in two languages, using both human judgments and WordNet-based evaluation. Our fully unsupervised results are superior to previous work that used a POS tagged corpus, and computation time for huge corpora are orders of magnitude faster than previously reported.", "keyphrases": ["discovery", "symmetric pattern", "high frequency word"]} +{"id": "hwa-2004-sample", "title": "Sample Selection for Statistical Parsing", "abstract": "Corpus-based statistical parsing relies on using large quantities of annotated text as training examples. Building this kind of resource is expensive and labor-intensive. This work proposes to use sample selection to find helpful training examples and reduce human effort spent on annotating less informative ones. We consider several criteria for predicting whether unlabeled data might be a helpful training example. Experiments are performed across two syntactic learning tasks and within the single task of parsing across two learning models to compare the effect of different predictive criteria. We find that sample selection can significantly reduce the size of annotated training corpora and that uncertainty is a robust predictive criterion that can be easily applied to different learning models.", "keyphrases": ["unlabeled data", "uncertainty", "sample selection", "active learning", "tree entropy"]} +{"id": "tillmann-ney-2003-word", "title": "Word Reordering and a Dynamic Programming Beam Search Algorithm for Statistical Machine Translation", "abstract": "In this article, we describe an efficient beam search algorithm for statistical machine translation based on dynamic programming (DP). The search algorithm uses the translation model presented in Brown et al. (1993). Starting from a DP-based solution to the traveling-salesman problem, we present a novel technique to restrict the possible word reorderings between source and target language in order to achieve an efficient search algorithm. Word reordering restrictions especially useful for the translation direction German to English are presented. The restrictions are generalized, and a set of four parameters to control the word reordering is introduced, which then can easily be adopted to new translation directions. The beam search procedure has been successfully tested on the Verbmobil task (German to English, 8,000-word vocabulary) and on the Canadian Hansards task (French to English, 100,000-word vocabulary). For the medium-sized Verbmobil task, a sentence can be translated in a few seconds, only a small number of search errors occur, and there is no performance degradation as measured by the word error criterion used in this article.", "keyphrases": ["beam search algorithm", "statistical machine translation", "cost", "tsp"]} +{"id": "roesiger-2018-rule", "title": "Rule- and Learning-based Methods for Bridging Resolution in the ARRAU Corpus", "abstract": "We present two systems for bridging resolution, which we submitted to the CRAC shared task on bridging anaphora resolution in the ARRAU corpus (track 2): a rule-based approach following Hou et al. 2014 and a learning-based approach. The re-implementation of Hou et al. 2014 achieves very poor performance when being applied to ARRAU. We found that the reasons for this lie in the different bridging annotations: whereas the rule-based system suggests many referential bridging pairs, ARRAU contains mostly lexical bridging. We describe the differences between these two types of bridging and adapt the rule-based approach to be able to handle lexical bridging. The modified rule-based approach achieves reasonable performance on all (sub)-tasks and outperforms a simple learning-based approach.", "keyphrases": ["bridging resolution", "arrau corpus", "anaphora resolution"]} +{"id": "augenstein-sogaard-2017-multi", "title": "Multi-Task Learning of Keyphrase Boundary Classification", "abstract": "Keyphrase boundary classification (KBC) is the task of detecting keyphrases in scientific articles and labelling them with respect to predefined types. Although important in practice, this task is so far underexplored, partly due to the lack of labelled data. To overcome this, we explore several auxiliary tasks, including semantic super-sense tagging and identification of multi-word expressions, and cast the task as a multi-task learning problem with deep recurrent neural networks. Our multi-task models perform significantly better than previous state of the art approaches on two scientific KBC datasets, particularly for long keyphrases.", "keyphrases": ["keyphrase boundary classification", "super-sense tagging", "recurrent neural network", "multi-task learning"]} +{"id": "khanuja-etal-2020-gluecos", "title": "GLUECoS: An Evaluation Benchmark for Code-Switched NLP", "abstract": "Code-switching is the use of more than one language in the same conversation or utterance. Recently, multilingual contextual embedding models, trained on multiple monolingual corpora, have shown promising results on cross-lingual and multilingual tasks. We present an evaluation benchmark, GLUECoS, for code-switched languages, that spans several NLP tasks in English-Hindi and English-Spanish. Specifically, our evaluation benchmark includes Language Identification from text, POS tagging, Named Entity Recognition, Sentiment Analysis, Question Answering and a new task for code-switching, Natural Language Inference. We present results on all these tasks using cross-lingual word embedding models and multilingual models. In addition, we fine-tune multilingual models on artificially generated code-switched data. Although multilingual models perform significantly better than cross-lingual models, our results show that in most tasks, across both language pairs, multilingual models fine-tuned on code-switched data perform best, showing that multilingual models can be further optimized for code-switching tasks.", "keyphrases": ["evaluation benchmark", "gluecos", "code-mixed version"]} +{"id": "xu-etal-2018-skeleton", "title": "A Skeleton-Based Model for Promoting Coherence Among Sentences in Narrative Story Generation", "abstract": "Narrative story generation is a challenging problem because it demands the generated sentences with tight semantic connections, which has not been well studied by most existing generative models. To address this problem, we propose a skeleton-based model to promote the coherence of generated stories. Different from traditional models that generate a complete sentence at a stroke, the proposed model first generates the most critical phrases, called skeleton, and then expands the skeleton to a complete and fluent sentence. The skeleton is not manually defined, but learned by a reinforcement learning method. Compared to the state-of-the-art models, our skeleton-based model can generate significantly more coherent text according to human evaluation and automatic evaluation. The G-score is improved by 20.1% in human evaluation.", "keyphrases": ["coherence", "narrative story generation", "fluent sentence"]} +{"id": "wei-etal-2021-towards", "title": "Towards Propagation Uncertainty: Edge-enhanced Bayesian Graph Convolutional Networks for Rumor Detection", "abstract": "Detecting rumors on social media is a very critical task with significant implications to the economy, public health, etc. Previous works generally capture effective features from texts and the propagation structure. However, the uncertainty caused by unreliable relations in the propagation structure is common and inevitable due to wily rumor producers and the limited collection of spread data. Most approaches neglect it and may seriously limit the learning of features. Towards this issue, this paper makes the first attempt to explore propagation uncertainty for rumor detection. Specifically, we propose a novel Edge-enhanced Bayesian Graph Convolutional Network (EBGCN) to capture robust structural features. The model adaptively rethinks the reliability of latent relations by adopting a Bayesian approach. Besides, we design a new edge-wise consistency training framework to optimize the model by enforcing consistency on relations. Experiments on three public benchmark datasets demonstrate that the proposed model achieves better performance than baseline methods on both rumor detection and early rumor detection tasks.", "keyphrases": ["propagation uncertainty", "rumor detection", "robust structural feature"]} +{"id": "ma-etal-2017-blend", "title": "Blend: a Novel Combined MT Metric Based on Direct Assessment \u2014 CASICT-DCU submission to WMT17 Metrics Task", "abstract": "Existing metrics to evaluate the quality of Machine Translation hypotheses take different perspectives into account. DPM-Fcomb, a metric combining the merits of a range of metrics, achieved the best performance for evaluation of to-English language pairs in the previous two years of WMT Metrics Shared Tasks. This year, we submit a novel combined metric, Blend, to WMT17 Metrics task. Compared to DPMFcomb, Blend includes the following adaptations: i) We use DA human evaluation to guide the training process with a vast reduction in required training data, while still achieving improved performance when evaluated on WMT16 to-English language pairs; ii) We carry out experiments to explore the contribution of metrics incorporated in Blend, in order to \ufb01nd a trade-off between performance and ef\ufb01ciency.", "keyphrases": ["wmt17 metrics task", "blend", "regression"]} +{"id": "narasimhan-etal-2015-language", "title": "Language Understanding for Text-based Games using Deep Reinforcement Learning", "abstract": "In this paper, we consider the task of learning control policies for text-based games. In these games, all interactions in the virtual world are through text and the underlying state is not observed. The resulting language barrier makes such environments challenging for automatic game players. We employ a deep reinforcement learning framework to jointly learn state representations and action policies using game rewards as feedback. This framework enables us to map text descriptions into vector representations that capture the semantics of the game states. We evaluate our approach on two game worlds, comparing against baselines using bag-ofwords and bag-of-bigrams for state representations. Our algorithm outperforms the baselines on both worlds demonstrating the importance of learning expressive representations. 1", "keyphrases": ["text-based games", "deep reinforcement learning", "language representation learning"]} +{"id": "batliner-etal-2004-stupid", "title": "\u201cYou Stupid Tin Box\u201d - Children Interacting with the AIBO Robot: A Cross-linguistic Emotional Speech Corpus", "abstract": "This paper deals with databases that combine different aspects: children's speech, emotional speech, human-robot communication, cross-linguistics, and read vs. spontaneous speech: in a Wizard-of-Oz scenario, German and English children had to instruct Sony's AIBO robot to fulfil specific tasks. In one experimental condition, strictly parallel for German and English, the AIBO behaved `disobedient' by following it's own script irrespective of the child's commands. By that, reactions of different children to the same sequence of AIBO's actions could be obtained. In addition, both the German and the English children were recorded reading texts. The data are transliterated orthographically; emotional user states and some other phenomena will be annotated. We report preliminary word recognition rates and classification results.", "keyphrases": ["aibo robot", "emotional speech corpus", "english child"]} +{"id": "wan-yang-2006-improved", "title": "Improved Affinity Graph Based Multi-Document Summarization", "abstract": "This paper describes an affinity graph based approach to multi-document summarization. We incorporate a diffusion process to acquire semantic relationships between sentences, and then compute information richness of sentences by a graph rank algorithm on differentiated intra-document links and inter-document links between sentences. A greedy algorithm is employed to impose diversity penalty on sentences and the sentences with both high information richness and high information novelty are chosen into the summary. Experimental results on task 2 of DUC 2002 and task 2 of DUC 2004 demonstrate that the proposed approach outperforms existing state-of-the-art systems.", "keyphrases": ["affinity graph", "summarization", "inter-document link"]} +{"id": "callison-burch-etal-2008-meta", "title": "Further Meta-Evaluation of Machine Translation", "abstract": "This paper analyzes the translation quality of machine translation systems for 10 language pairs translating between Czech, English, French, German, Hungarian, and Spanish. We report the translation quality of over 30 diverse translation systems based on a large-scale manual evaluation involving hundreds of hours of effort. We use the human judgments of the systems to analyze automatic evaluation metrics for translation quality, and we report the strength of the correlation with human judgments at both the system-level and at the sentence-level. We validate our manual evaluation methodology by measuring intra- and inter-annotator agreement, and collecting timing information.", "keyphrases": ["translation quality", "human judgment", "evaluation metric", "improved correlation", "wmt"]} +{"id": "yimam-etal-2020-exploring", "title": "Exploring Amharic Sentiment Analysis from Social Media Texts: Building Annotation Tools and Classification Models", "abstract": "This paper presents the study of sentiment analysis for Amharic social media texts. As the number of social media users is ever-increasing, social media platforms would like to understand the latent meaning and sentiments of a text to enhance decision-making procedures. However, low-resource languages such as Amharic have received less attention due to several reasons such as lack of well-annotated datasets, unavailability of computing resources, and fewer or no expert researchers in the area. This research addresses three main research questions. We first explore the suitability of existing tools for the sentiment analysis task. Annotation tools are scarce to support large-scale annotation tasks in Amharic. Also, the existing crowdsourcing platforms do not support Amharic text annotation. Hence, we build a social-network-friendly annotation tool called `ASAB' using the Telegram bot. We collect 9.4k tweets, where each tweet is annotated by three Telegram users. Moreover, we explore the suitability of machine learning approaches for Amharic sentiment analysis. The FLAIR deep learning text classifier, based on network embeddings that are computed from a distributional thesaurus, outperforms other supervised classifiers. We further investigate the challenges in building a sentiment analysis system for Amharic and we found that the widespread usage of sarcasm and figurative speech are the main issues in dealing with the problem. To advance the sentiment analysis research in Amharic and other related low-resource languages, we release the dataset, the annotation tool, source code, and models publicly under a permissive.", "keyphrases": ["amharic sentiment analysis", "low-resource language", "social medium text"]} +{"id": "park-fung-2017-one", "title": "One-step and Two-step Classification for Abusive Language Detection on Twitter", "abstract": "Automatic abusive language detection is a difficult but important task for online social media. Our research explores a two-step approach of performing classification on abusive language and then classifying into specific types and compares it with one-step approach of doing one multi-class classification for detecting sexist and racist languages. With a public English Twitter corpus of 20 thousand tweets in the type of sexism and racism, our approach shows a promising performance of 0.827 F-measure by using HybridCNN in one-step and 0.824 F-measure by using logistic regression in two-steps.", "keyphrases": ["abusive language detection", "twitter", "text classification"]} +{"id": "diao-etal-2012-finding", "title": "Finding Bursty Topics from Microblogs", "abstract": "Microblogs such as Twitter reflect the general public's reactions to major events. Bursty topics from microblogs reveal what events have attracted the most online attention. Although bursty event detection from text streams has been studied before, previous work may not be suitable for microblogs because compared with other text streams such as news articles and scientific publications, microblog posts are particularly diverse and noisy. To find topics that have bursty patterns on microblogs, we propose a topic model that simultaneously captures two observations: (1) posts published around the same time are more likely to have the same topic, and (2) posts published by the same user are more likely to have the same topic. The former helps find event-driven posts while the latter helps identify and filter out \"personal\" posts. Our experiments on a large Twitter dataset show that there are more meaningful and unique bursty topics in the top-ranked results returned by our model than an LDA baseline and two degenerate variations of our model. We also show some case studies that demonstrate the importance of considering both the temporal information and users' personal interests for bursty topic detection from microblogs.", "keyphrases": ["microblog", "event detection", "bursty topic detection"]} +{"id": "chen-etal-2013-exploiting", "title": "Exploiting Domain Knowledge in Aspect Extraction", "abstract": "Aspect extraction is one of the key tasks in sentiment analysis. In recent years, statistical models have been used for the task. However, such models without any domain knowledge often produce aspects that are not interpretable in applications. To tackle the issue, some knowledge-based topic models have been proposed, which allow the user to input some prior domain knowledge to generate coherent aspects. However, existing knowledge-based topic models have several major shortcomings, e.g., little work has been done to incorporate the cannot-link type of knowledge or to automatically adjust the number of topics based on domain knowledge. This paper proposes a more advanced topic model, called MC-LDA (LDA with m-set and c-set), to address these problems, which is based on an Extended generalized Polya urn (E-GPU) model (which is also proposed in this paper). Experiments on real-life product reviews from a variety of domains show that MCLDA outperforms the existing state-of-the-art models markedly.", "keyphrases": ["domain knowledge", "aspect extraction", "sentiment analysis", "topic model"]} +{"id": "contractor-etal-2010-unsupervised", "title": "Unsupervised cleansing of noisy text", "abstract": "In this paper we look at the problem of cleansing noisy text using a statistical machine translation model. Noisy text is produced in informal communications such as Short Message Service (SMS), Twitter and chat. A typical Statistical Machine Translation system is trained on parallel text comprising noisy and clean sentences. In this paper we propose an unsupervised method for the translation of noisy text to clean text. Our method has two steps. For a given noisy sentence, a weighted list of possible clean tokens for each noisy token are obtained. The clean sentence is then obtained by maximizing the product of the weighted lists and the language model scores.", "keyphrases": ["noisy text", "language model", "candidate"]} +{"id": "zaidan-etal-2007-using", "title": "Using \u201cAnnotator Rationales\u201d to Improve Machine Learning for Text Categorization", "abstract": "We propose a new framework for supervised machine learning. Our goal is to learn from smaller amounts of supervised training data, by collecting a richer kind of training data: annotations with \u201crationales.\u201d When annotating an example, the human teacher will also highlight evidence supporting this annotation\u2014thereby teaching the machine learner why the example belongs to the category. We provide some rationale-annotated data and present a learning method that exploits the rationales during training to boost performance significantly on a sample task, namely sentiment classification of movie reviews. We hypothesize that in some situations, providing rationales is a more fruitful use of an annotator\u2019s time than annotating more examples.", "keyphrases": ["annotator rationales", "machine learning", "separate word"]} +{"id": "das-etal-2017-chains", "title": "Chains of Reasoning over Entities, Relations, and Text using Recurrent Neural Networks", "abstract": "Our goal is to combine the rich multi-step inference of symbolic logical reasoning with the generalization capabilities of neural networks. We are particularly interested in complex reasoning about entities and relations in text and large-scale knowledge bases (KBs). Neelakantan et al. (2015) use RNNs to compose the distributed semantics of multi-hop paths in KBs; however for multiple reasons, the approach lacks accuracy and practicality. This paper proposes three significant modeling advances: (1) we learn to jointly reason about relations, entities, and entity-types; (2) we use neural attention modeling to incorporate multiple paths; (3) we learn to share strength in a single RNN that represents logical composition across all relations. On a large-scale Freebase+ClueWeb prediction task, we achieve 25% error reduction, and a 53% error reduction on sparse relations due to shared strength. On chains of reasoning in WordNet we reduce error in mean quantile by 84% versus previous state-of-the-art.", "keyphrases": ["reasoning", "chain", "knowledge base completion"]} +{"id": "ahlberg-etal-2015-paradigm", "title": "Paradigm classification in supervised learning of morphology", "abstract": "Supervised morphological paradigm learning by identifying and aligning the longest common subsequence found in inflection tables has recently been proposed as a simple yet competitive way to induce morphological patterns. We combine this non-probabilistic strategy of inflection table generalization with a discriminative classifier to permit the reconstruction of complete inflection tables of unseen words. Our system learns morphological paradigms from labeled examples of inflection patterns (inflection tables) and then produces inflection tables from unseen lemmas or base forms. We evaluate the approach on datasets covering 11 different languages and show that this approach results in consistently higher accuracies vis-` other methods on the same task, thus indicating that the general method is a viable approach to quickly creating highaccuracy morphological resources.", "keyphrases": ["supervised learning", "morphology", "inflection"]} +{"id": "dasgupta-etal-2018-hyte", "title": "HyTE: Hyperplane-based Temporally aware Knowledge Graph Embedding", "abstract": "Knowledge Graph (KG) embedding has emerged as an active area of research resulting in the development of several KG embedding methods. Relational facts in KG often show temporal dynamics, e.g., the fact (Cristiano_Ronaldo, playsFor, Manchester_United) is valid only from 2003 to 2009. Most of the existing KG embedding methods ignore this temporal dimension while learning embeddings of the KG elements. In this paper, we propose HyTE, a temporally aware KG embedding method which explicitly incorporates time in the entity-relation space by associating each timestamp with a corresponding hyperplane. HyTE not only performs KG inference using temporal guidance, but also predicts temporal scopes for relational facts with missing time annotations. Through extensive experimentation on temporal datasets extracted from real-world KGs, we demonstrate the effectiveness of our model over both traditional as well as temporal KG embedding methods.", "keyphrases": ["knowledge graph", "temporal hyperplane", "link prediction"]} +{"id": "saha-etal-2021-multiprover", "title": "multiPRover: Generating Multiple Proofs for Improved Interpretability in Rule Reasoning", "abstract": "We focus on a type of linguistic formal reasoning where the goal is to reason over explicit knowledge in the form of natural language facts and rules (Clark et al., 2020). A recent work, named PRover (Saha et al., 2020), performs such reasoning by answering a question and also generating a proof graph that explains the answer. However, compositional reasoning is not always unique and there may be multiple ways of reaching the correct answer. Thus, in our work, we address a new and challenging problem of generating multiple proof graphs for reasoning over natural language rule-bases. Each proof provides a different rationale for the answer, thereby improving the interpretability of such reasoning systems. In order to jointly learn from all proof graphs and exploit the correlations between multiple proofs for a question, we pose this task as a set generation problem over structured output spaces where each proof is represented as a directed graph. We propose two variants of a proof-set generation model, multiPRover. Our first model, Multilabel-multiPRover, generates a set of proofs via multi-label classification and implicit conditioning between the proofs; while the second model, Iterative-multiPRover, generates proofs iteratively by explicitly conditioning on the previously generated proofs. Experiments on multiple synthetic, zero-shot, and human-paraphrased datasets reveal that both multiPRover models significantly outperform PRover on datasets containing multiple gold proofs. Iterative-multiPRover obtains state-of-the-art proof F1 in zero-shot scenarios where all examples have single correct proofs. It also generalizes better to questions requiring higher depths of reasoning where multiple proofs are more frequent.", "keyphrases": ["proof", "interpretability", "multiprover"]} +{"id": "caswell-etal-2019-tagged", "title": "Tagged Back-Translation", "abstract": "Recent work in Neural Machine Translation (NMT) has shown significant quality gains from noised-beam decoding during back-translation, a method to generate synthetic parallel data. We show that the main role of such synthetic noise is not to diversify the source side, as previously suggested, but simply to indicate to the model that the given source is synthetic. We propose a simpler alternative to noising techniques, consisting of tagging back-translated source sentences with an extra token. Our results on WMT outperform noised back-translation in English-Romanian and match performance on English-German, redefining the state-of-the-art on the former.", "keyphrases": ["noise", "tagged back-translation", "synthetic data", "nmt system", "monolingual sentence"]} +{"id": "scialom-etal-2019-answers", "title": "Answers Unite! Unsupervised Metrics for Reinforced Summarization Models", "abstract": "Abstractive summarization approaches based on Reinforcement Learning (RL) have recently been proposed to overcome classical likelihood maximization. RL enables to consider complex, possibly non differentiable, metrics that globally assess the quality and relevance of the generated outputs. ROUGE, the most used summarization metric, is known to suffer from bias towards lexical similarity as well as from sub-optimal accounting for fluency and readability of the generated abstracts. We thus explore and propose alternative evaluation measures: the reported human-evaluation analysis shows that the proposed metrics, based on Question Answering, favorably compare to ROUGE \u2013 with the additional property of not requiring reference summaries. Training a RL-based model on these metrics leads to improvements (both in terms of human or automated metrics) over current approaches that use ROUGE as reward.", "keyphrases": ["summarization", "reward", "source document", "evaluation metric"]} +{"id": "han-etal-2018-hierarchical", "title": "Hierarchical Relation Extraction with Coarse-to-Fine Grained Attention", "abstract": "Distantly supervised relation extraction employs existing knowledge graphs to automatically collect training data. While distant supervision is effective to scale relation extraction up to large-scale corpora, it inevitably suffers from the wrong labeling problem. Many efforts have been devoted to identifying valid instances from noisy data. However, most existing methods handle each relation in isolation, regardless of rich semantic correlations located in relation hierarchies. In this paper, we aim to incorporate the hierarchical information of relations for distantly supervised relation extraction and propose a novel hierarchical attention scheme. The multiple layers of our hierarchical attention scheme provide coarse-to-fine granularity to better identify valid instances, which is especially effective for extracting those long-tail relations. The experimental results on a large-scale benchmark dataset demonstrate that our models are capable of modeling the hierarchical information of relations and significantly outperform other baselines. The source code of this paper can be obtained from .", "keyphrases": ["relation extraction", "hierarchy", "attention scheme", "long-tail relation", "deep neural network"]} +{"id": "zhang-barzilay-2015-hierarchical", "title": "Hierarchical Low-Rank Tensors for Multilingual Transfer Parsing", "abstract": "Accurate multilingual transfer parsing typically relies on careful feature engineering. In this paper, we propose a hierarchical tensor-based approach for this task. This approach induces a compact feature representation by combining atomic features. However, unlike traditional tensor models, it enables us to incorporate prior knowledge about desired feature interactions, eliminating invalid feature combinations. To this end, we use a hierarchical structure that uses intermediate embeddings to capture desired feature combinations. Algebraically, this hierarchical tensor is equivalent to the sum of traditional tensors with shared components, and thus can be effectively trained with standard online algorithms. In both unsupervised and semi-supervised transfer scenarios, our hierarchical tensor consistently improves UAS and LAS over state-of-theart multilingual transfer parsers and the traditional tensor model across 10 different languages. 1", "keyphrases": ["tensor", "multilingual transfer", "dependency parsing", "linguistic typology"]} +{"id": "lei-etal-2021-mtvr", "title": "mTVR: Multilingual Moment Retrieval in Videos", "abstract": "We introduce mTVR, a large-scale multilingual video moment retrieval dataset, containing 218K English and Chinese queries from 21.8K TV show video clips. The dataset is collected by extending the popular TVR dataset (in English) with paired Chinese queries and subtitles. Compared to existing moment retrieval datasets, mTVR is multilingual, larger, and comes with diverse annotations. We further propose mXML, a multilingual moment retrieval model that learns and operates on data from both languages, via encoder parameter sharing and language neighborhood constraints. We demonstrate the effectiveness of mXML on the newly collected mTVR dataset, where mXML outperforms strong monolingual baselines while using fewer parameters. In addition, we also provide detailed dataset analyses and model ablations. Data and code are publicly available at ", "keyphrases": ["multilingual moment retrieval", "query", "mtvr"]} +{"id": "sperber-etal-2017-toward", "title": "Toward Robust Neural Machine Translation for Noisy Input Sequences", "abstract": "Translating noisy inputs, such as the output of a speech recognizer, is a difficult but important challenge for neural machine translation. One way to increase robustness of neural models is by introducing artificial noise to the training data. In this paper, we experiment with appropriate forms of such noise, exploring a middle ground between general-purpose regularizers and highly task-specific forms of noise induction. We show that with a simple generative noise model, moderate gains can be achieved in translating erroneous speech transcripts, provided that type and amount of noise are properly calibrated. The optimal amount of noise at training time is much smaller than the amount of noise in our test data, indicating limitations due to trainability issues. We note that unlike our baseline model, models trained on noisy data are able to generate outputs of proper length even for noisy inputs, while gradually reducing output length for higher amount of noise, as might also be expected from a human translator. We discuss these findings in details and give suggestions for future work.", "keyphrases": ["speech recognizer", "noise", "insertion"]} +{"id": "finkel-etal-2007-infinite", "title": "The Infinite Tree", "abstract": "Historically, unsupervised learning techniques have lacked a principled technique for selecting the number of unseen components. Research into non-parametric priors, such as the Dirichlet process, has enabled instead the use of infinite models, in which the number of hidden categories is not fixed, but can grow with the amount of training data. Here we develop the infinite tree, a new infinite model capable of representing recursive branching structure over an arbitrarily large set of hidden categories. Specifically, we develop three infinite tree models, each of which enforces different independence assumptions, and for each model we define a simple direct assignmentsampling inference procedure. We demonstrate the utility of our models by doing unsupervised learning of part-of-speech tags from treebank dependency skeleton structure, achieving an accuracy of 75.34%, and by doing unsupervised splitting of part-of-speech tags, which increases the accuracy of a generative dependency parser from 85.11% to 87.35%.", "keyphrases": ["infinite tree", "dirichlet process", "pos tag"]} +{"id": "imamura-etal-2012-grammar", "title": "Grammar Error Correction Using Pseudo-Error Sentences and Domain Adaptation", "abstract": "This paper presents grammar error correction for Japanese particles that uses discriminative sequence conversion, which corrects erroneous particles by substitution, insertion, and deletion. The error correction task is hindered by the difficulty of collecting large error corpora. We tackle this problem by using pseudo-error sentences generated automatically. Furthermore, we apply domain adaptation, the pseudo-error sentences are from the source domain, and the real-error sentences are from the target domain. Experiments show that stable improvement is achieved by using domain adaptation.", "keyphrases": ["domain adaptation", "particle", "grammar error correction", "learner"]} +{"id": "wang-etal-2021-x", "title": "X-Class: Text Classification with Extremely Weak Supervision", "abstract": "In this paper, we explore text classification with extremely weak supervision, i.e., only relying on the surface text of class names. This is a more challenging setting than the seed-driven weak supervision, which allows a few seed words per class. We opt to attack this problem from a representation learning perspective\u2014ideal document representations should lead to nearly the same results between clustering and the desired classification. In particular, one can classify the same corpus differently (e.g., based on topics and locations), so document representations should be adaptive to the given class names. We propose a novel framework X-Class to realize the adaptive representations. Specifically, we first estimate class representations by incrementally adding the most similar word to each class until inconsistency arises. Following a tailored mixture of class attention mechanisms, we obtain the document representation via a weighted average of contextualized word representations. With the prior of each document assigned to its nearest class, we then cluster and align the documents to classes. Finally, we pick the most confident documents from each cluster to train a text classifier. Extensive experiments demonstrate that X-Class can rival and even outperform seed-driven weakly supervised methods on 7 benchmark datasets.", "keyphrases": ["text classification", "weak supervision", "x-class"]} +{"id": "gui-etal-2019-lexicon", "title": "A Lexicon-Based Graph Neural Network for Chinese NER", "abstract": "Recurrent neural networks (RNN) used for Chinese named entity recognition (NER) that sequentially track character and word information have achieved great success. However, the characteristic of chain structure and the lack of global semantics determine that RNN-based models are vulnerable to word ambiguities. In this work, we try to alleviate this problem by introducing a lexicon-based graph neural network with global semantics, in which lexicon knowledge is used to connect characters to capture the local composition, while a global relay node can capture global sentence semantics and long-range dependency. Based on the multiple graph-based interactions among characters, potential words, and the whole-sentence semantics, word ambiguities can be effectively tackled. Experiments on four NER datasets show that the proposed model achieves significant improvements against other baseline models.", "keyphrases": ["graph neural network", "chinese ner", "lexicon knowledge"]} +{"id": "zens-ney-2003-comparative", "title": "A Comparative Study on Reordering Constraints in Statistical Machine Translation", "abstract": "In statistical machine translation, the generation of a translation hypothesis is computationally expensive. If arbitrary word-reorderings are permitted, the search problem is NP-hard. On the other hand, if we restrict the possible word-reorderings in an appropriate way, we obtain a polynomial-time search algorithm.In this paper, we compare two different reordering constraints, namely the ITG constraints and the IBM constraints. This comparison includes a theoretical discussion on the permitted number of reorderings for each of these constraints. We show a connection between the ITG constraints and the since 1870 known Schroder numbers.We evaluate these constraints on two tasks: the Verbmobil task and the Canadian Hansards task. The evaluation consists of two parts: First, we check how many of the Viterbi alignments of the training corpus satisfy each of these constraints. Second, we restrict the search to each of these constraints and compare the resulting translation hypotheses.The experiments will show that the baseline ITG constraints are not sufficient on the Canadian Hansards task. Therefore, we present an extension to the ITG constraints. These extended ITG constraints increase the alignment coverage from about 87% to 96%.", "keyphrases": ["comparative study", "statistical machine translation", "different reordering constraint", "alignment coverage", "normal form itg"]} +{"id": "mi-etal-2020-continual", "title": "Continual Learning for Natural Language Generation in Task-oriented Dialog Systems", "abstract": "Natural language generation (NLG) is an essential component of task-oriented dialog systems. Despite the recent success of neural approaches for NLG, they are typically developed in an offline manner for particular domains. To better fit real-life applications where new data come in a stream, we study NLG in a \u201ccontinual learning\u201d setting to expand its knowledge to new domains or functionalities incrementally. The major challenge towards this goal is catastrophic forgetting, meaning that a continually trained model tends to forget the knowledge it has learned before. To this end, we propose a method called ARPER (Adaptively Regularized Prioritized Exemplar Replay) by replaying prioritized historical exemplars, together with an adaptive regularization technique based on Elastic Weight Consolidation. Extensive experiments to continually learn new domains and intents are conducted on MultiWoZ-2.0 to benchmark ARPER with a wide range of techniques. Empirical results demonstrate that ARPER significantly outperforms other methods by effectively mitigating the detrimental catastrophic forgetting issue.", "keyphrases": ["natural language generation", "dialog system", "continual learning"]} +{"id": "ueffing-ney-2007-word", "title": "Word-Level Confidence Estimation for Machine Translation", "abstract": "This article introduces and evaluates several different word-level confidence measures for machine translation. These measures provide a method for labeling each word in an automatically generated translation as correct or incorrect. All approaches to confidence estimation presented here are based on word posterior probabilities. Different concepts of word posterior probabilities as well as different ways of calculating them will be introduced and compared. They can be divided into two categories: System-based methods that explore knowledge provided by the translation system that generated the translations, and direct methods that are independent of the translation system. The system-based techniques make use of system output, such as word graphs or N-best lists. The word posterior probability is determined by summing the probabilities of the sentences in the translation hypothesis space that contains the target word. The direct confidence measures take other knowledge sources, such as word or phrase lexica, into account. They can be applied to output from nonstatistical machine translation systems as well. Experimental assessment of the different confidence measures on various translation tasks and in several language pairs will be presented. Moreover,the application of confidence measures for rescoring of translation hypotheses will be investigated.", "keyphrases": ["machine translation", "word-level confidence measure", "system-dependent", "n-b list", "smt system"]} +{"id": "guillou-hardmeier-2016-protest", "title": "PROTEST: A Test Suite for Evaluating Pronouns in Machine Translation", "abstract": "We present PROTEST, a test suite for the evaluation of pronoun translation by MT systems. The test suite comprises 250 hand-selected pronoun tokens and an automatic evaluation method which compares the translations of pronouns in MT output with those in the reference translation. Pronoun translations that do not match the reference are referred for manual evaluation. PROTEST is designed to support analysis of system performance at the level of individual pronoun groups, rather than to provide a single aggregate measure over all pronouns. We wish to encourage detailed analyses to highlight issues in the handling of specific linguistic mechanisms by MT systems, thereby contributing to a better understanding of those problems involved in translating pronouns. We present two use cases for PROTEST: a) for measuring improvement/degradation of an incremental system change, and b) for comparing the performance of a group of systems whose design may be largely unrelated. Following the latter use case, we demonstrate the application of PROTEST to the evaluation of the systems submitted to the DiscoMT 2015 shared task on pronoun translation.", "keyphrases": ["machine translation", "evaluation method", "protest"]} +{"id": "cherry-lin-2003-probability", "title": "A Probability Model to Improve Word Alignment", "abstract": "Word alignment plays a crucial role in statistical machine translation. Word-aligned corpora have been found to be an excellent source of translation-related knowledge. We present a statistical model for computing the probability of an alignment given a sentence pair. This model allows easy integration of context-specific features. Our experiments show that this model can be an effective tool for improving an existing word alignment.", "keyphrases": ["word alignment", "integration", "context-specific feature", "knowledge source", "many researcher"]} +{"id": "kottur-etal-2017-natural", "title": "Natural Language Does Not Emerge `Naturally' in Multi-Agent Dialog", "abstract": "A number of recent works have proposed techniques for end-to-end learning of communication protocols among cooperative multi-agent populations, and have simultaneously found the emergence of grounded human-interpretable language in the protocols developed by the agents, learned without any human supervision! In this paper, using a Task & Talk reference game between two agents as a testbed, we present a sequence of `negative' results culminating in a `positive' one \u2013 showing that while most agent-invented languages are effective (i.e. achieve near-perfect task rewards), they are decidedly not interpretable or compositional. In essence, we find that natural language does not emerge `naturally',despite the semblance of ease of natural-language-emergence that one may gather from recent literature. We discuss how it is possible to coax the invented languages to become more and more human-like and compositional by increasing restrictions on how two agents may communicate.", "keyphrases": ["agent", "human language", "compositionality", "emergent communication"]} +{"id": "ganesan-etal-2010-opinosis", "title": "Opinosis: A Graph Based Approach to Abstractive Summarization of Highly Redundant Opinions", "abstract": "We present a novel graph-based summarization framework (Opinosis) that generates concise abstractive summaries of highly redundant opinions. Evaluation results on summarizing user reviews show that Opinosis summaries have better agreement with human summaries compared to the baseline extractive method. The summaries are readable, reasonably well-formed and are informative enough to convey the major opinions.", "keyphrases": ["abstractive summarization", "redundancy", "graph-based method"]} +{"id": "chan-etal-2020-poison", "title": "Poison Attacks against Text Datasets with Conditional Adversarially Regularized Autoencoder", "abstract": "This paper demonstrates a fatal vulnerability in natural language inference (NLI) and text classification systems. More concretely, we present a `backdoor poisoning' attack on NLP models. Our poisoning attack utilizes conditional adversarially regularized autoencoder (CARA) to generate poisoned training samples by poison injection in latent space. Just by adding 1% poisoned data, our experiments show that a victim BERT finetuned classifier's predictions can be steered to the poison target class with success rates of >80% when the input hypothesis is injected with the poison signature, demonstrating that NLI and text classification systems face a huge security risk.", "keyphrases": ["attack", "conditional", "autoencoder"]} +{"id": "bost-etal-2020-serial", "title": "Serial Speakers: a Dataset of TV Series", "abstract": "For over a decade, TV series have been drawing increasing interest, both from the audience and from various academic fields. But while most viewers are hooked on the continuous plots of TV serials, the few annotated datasets available to researchers focus on standalone episodes of classical TV series. We aim at filling this gap by providing the multimedia/speech processing communities with \u201cSerial Speakers\u201d, an annotated dataset of 155 episodes from three popular American TV serials: \u201cBreaking Bad\u201d, \u201cGame of Thrones\u201d and \u201cHouse of Cards\u201d. \u201cSerial Speakers\u201d is suitable both for investigating multimedia retrieval in realistic use case scenarios, and for addressing lower level speech related tasks in especially challenging conditions. We publicly release annotations for every speech turn (boundaries, speaker) and scene boundary, along with annotations for shot boundaries, recurring shots, and interacting speakers in a subset of episodes. Because of copyright restrictions, the textual content of the speech turns is encrypted in the public version of the dataset, but we provide the users with a simple online tool to recover the plain text from their own subtitle files.", "keyphrases": ["continuous plot", "game", "serial speakers"]} +{"id": "pitler-etal-2013-finding", "title": "Finding Optimal 1-Endpoint-Crossing Trees", "abstract": "Dependency parsing algorithms capable of producing the types of crossing dependencies seen in natural language sentences have traditionally been orders of magnitude slower than algorithms for projective trees. For 95.8\u201399.8% of dependency parses in various natural language treebanks, whenever an edge is crossed, the edges that cross it all have a common vertex. The optimal dependency tree that satisfies this 1-Endpoint-Crossing property can be found with an O(n4) parsing algorithm that recursively combines forests over intervals with one exterior point. 1-Endpoint-Crossing trees also have natural connections to linguistics and another class of graphs that has been studied in NLP.", "keyphrases": ["1-endpoint-crossing tree", "vertex", "interval", "non-projective tree", "restriction"]} +{"id": "rimell-2014-distributional", "title": "Distributional Lexical Entailment by Topic Coherence", "abstract": "Automatic detection of lexical entailment, or hypernym detection, is an important NLP task. Recent hypernym detection measures have been based on the Distributional Inclusion Hypothesis (DIH). This paper assumes that the DIH sometimes fails, and investigates other ways of quantifying the relationship between the cooccurrence contexts of two terms. We consider the top features in a context vector as a topic, and introduce a new entailment detection measure based on Topic Coherence (TC). Our measure successfully detects hypernyms, and a TC-based family of measures contributes to multi-way relation classification.", "keyphrases": ["topic coherence", "distributional inclusion hypothesis", "dih", "top feature"]} +{"id": "su-etal-2018-global", "title": "Global Relation Embedding for Relation Extraction", "abstract": "We study the problem of textual relation embedding with distant supervision. To combat the wrong labeling problem of distant supervision, we propose to embed textual relations with global statistics of relations, i.e., the co-occurrence statistics of textual and knowledge base relations collected from the entire corpus. This approach turns out to be more robust to the training noise introduced by distant supervision. On a popular relation extraction dataset, we show that the learned textual relation embedding can be used to augment existing relation extraction models and significantly improve their performance. Most remarkably, for the top 1,000 relational facts discovered by the best existing model, the precision can be improved from 83.9% to 89.3%.", "keyphrases": ["relation extraction", "distant supervision", "labeling problem", "global statistic"]} +{"id": "roth-lapata-2016-neural", "title": "Neural Semantic Role Labeling with Dependency Path Embeddings", "abstract": "This paper introduces a novel model for semantic role labeling that makes use of neural sequence modeling techniques. Our approach is motivated by the observation that complex syntactic structures and related phenomena, such as nested subordinations and nominal predicates, are not handled well by existing models. Our model treats such instances as subsequences of lexicalized dependency paths and learns suitable embedding representations. We experimentally demonstrate that such embeddings can improve results over previous state-of-the-art semantic role labelers, and showcase qualitative improvements obtained by our method.", "keyphrases": ["dependency path", "predicate", "lstm model"]} +{"id": "eriguchi-etal-2016-tree", "title": "Tree-to-Sequence Attentional Neural Machine Translation", "abstract": "Most of the existing Neural Machine Translation (NMT) models focus on the conversion of sequential data and do not directly use syntactic information. We propose a novel end-to-end syntactic NMT model, extending a sequence-to-sequence model with the source-side phrase structure. Our model has an attention mechanism that enables the decoder to generate a translated word while softly aligning it with phrases as well as words of the source sentence. Experimental results on the WAT'15 English-to-Japanese dataset demonstrate that our proposed model considerably outperforms sequence-to-sequence attentional NMT models and compares favorably with the state-of-the-art tree-to-string SMT system.", "keyphrases": ["neural machine translation", "syntactic information", "nmt model", "tree-based encoder", "annotation vector"]} +{"id": "platt-etal-2010-translingual", "title": "Translingual Document Representations from Discriminative Projections", "abstract": "Representing documents by vectors that are independent of language enhances machine translation and multilingual text categorization. We use discriminative training to create a projection of documents from multiple languages into a single translingual vector space. We explore two variants to create these projections: Oriented Principal Component Analysis (OPCA) and Coupled Probabilistic Latent Semantic Analysis (CPLSA). Both of these variants start with a basic model of documents (PCA and PLSA). Each model is then made discriminative by encouraging comparable document pairs to have similar vector representations. We evaluate these algorithms on two tasks: parallel document retrieval for Wikipedia and Europarl documents, and cross-lingual text classification on Reuters. The two discriminative variants, OPCA and CPLSA, significantly outperform their corresponding baselines. The largest differences in performance are observed on the task of retrieval when the documents are only comparable and not parallel. The OPCA method is shown to perform best.", "keyphrases": ["principal component analysis", "cplsa", "document pair", "different language"]} +{"id": "marelli-etal-2014-sick", "title": "A SICK cure for the evaluation of compositional distributional semantic models", "abstract": "Shared and internationally recognized benchmarks are fundamental for the development of any computational system. We aim to help the research community working on compositional distributional semantic models (CDSMs) by providing SICK (Sentences Involving Compositional Knowldedge), a large size English benchmark tailored for them. SICK consists of about 10,000 English sentence pairs that include many examples of the lexical, syntactic and semantic phenomena that CDSMs are expected to account for, but do not require dealing with other aspects of existing sentential data sets (idiomatic multiword expressions, named entities, telegraphic language) that are not within the scope of CDSMs. By means of crowdsourcing techniques, each pair was annotated for two crucial semantic tasks: relatedness in meaning (with a 5-point rating scale as gold score) and entailment relation between the two elements (with three possible gold labels: entailment, contradiction, and neutral). The SICK data set was used in SemEval-2014 Task 1, and it freely available for research purposes.", "keyphrases": ["distributional semantic model", "sentence pair", "entailment relation", "compositional knowledge", "linguistic capacity"]} +{"id": "ha-etal-2016-toward", "title": "Toward Multilingual Neural Machine Translation with Universal Encoder and Decoder", "abstract": "In this paper, we present our first attempts in building a multilingual Neural Machine Translation framework under a unified approach in which the information shared among languages can be helpful in the translation of individual language pairs. We are then able to employ attention-based Neural Machine Translation for many-to-many multilingual translation tasks. Our approach does not require any special treatment on the network architecture and it allows us to learn minimal number of free parameters in a standard way of training. Our approach has shown its effectiveness in an under-resourced translation scenario with considerable improvements up to 2.6 BLEU points. In addition, we point out a novel way to make use of monolingual data with Neural Machine Translation using the same approach with a 3.15-BLEU-score gain in IWSLT'16 English\u2192German translation task.", "keyphrases": ["neural machine translation", "universal encoder", "multilingual nmt", "zero-shot translation", "language tag"]} +{"id": "abdul-rauf-schwenk-2009-use", "title": "On the Use of Comparable Corpora to Improve SMT performance", "abstract": "We present a simple and effective method for extracting parallel sentences from comparable corpora. We employ a statistical machine translation (SMT) system built from small amounts of parallel texts to translate the source side of the non-parallel corpus. The target side texts are used, along with other corpora, in the language model of this SMT system. We then use information retrieval techniques and simple filters to create French/English parallel data from a comparable news corpora. We evaluate the quality of the extracted data by showing that it significantly improves the performance of an SMT systems.", "keyphrases": ["comparable corpora", "parallel sentence", "small amount", "smt system", "rich-resources"]} +{"id": "anastasopoulos-2019-analysis", "title": "An Analysis of Source-Side Grammatical Errors in NMT", "abstract": "The quality of Neural Machine Translation (NMT) has been shown to significantly degrade when confronted with source-side noise. We present the first large-scale study of state-of-the-art English-to-German NMT on real grammatical noise, by evaluating on several Grammar Correction corpora. We present methods for evaluating NMT robustness without true references, and we use them for extensive analysis of the effects that different grammatical errors have on the NMT output. We also introduce a technique for visualizing the divergence distribution caused by a source-side error, which allows for additional insights.", "keyphrases": ["grammatical error", "source-side error", "nmt system"]} +{"id": "toutanova-etal-2003-feature", "title": "Feature-Rich Part-of-Speech Tagging with a Cyclic Dependency Network", "abstract": "We present a new part-of-speech tagger that demonstrates the following ideas: (i) explicit use of both preceding and following tag contexts via a dependency network representation, (ii) broad use of lexical features, including jointly conditioning on multiple consecutive words, (iii) effective use of priors in conditional loglinear models, and (iv) fine-grained modeling of unknown word features. Using these ideas together, the resulting tagger gives a 97.24% accuracy on the Penn Treebank WSJ, an error reduction of 4.4% on the best previous single automatically learned tagging result.", "keyphrases": ["part-of-speech", "cyclic dependency network", "tagger", "lexical feature", "pos"]} +{"id": "zhou-etal-2003-fast", "title": "A Fast Algorithm for Feature Selection in Conditional Maximum Entropy Modeling", "abstract": "This paper describes a fast algorithm that selects features for conditional maximum entropy modeling. Berger et al. (1996) presents an incremental feature selection (IFS) algorithm, which computes the approximate gains for all candidate features at each selection stage, and is very time-consuming for any problems with large feature spaces. In this new algorithm, instead, we only compute the approximate gains for the top-ranked features based on the models obtained from previous stages. Experiments on WSJ data in Penn Treebank are conducted to show that the new algorithm greatly speeds up the feature selection process while maintaining the same quality of selected features. One variant of this new algorithm with look-ahead functionality is also tested to further confirm the good quality of the selected features. The new algorithm is easy to implement, and given a feature space of size F, it only uses O(F) more space than the original IFS algorithm.", "keyphrases": ["feature selection", "ifs", "gain"]} +{"id": "bansal-etal-2014-structured", "title": "Structured Learning for Taxonomy Induction with Belief Propagation", "abstract": "We present a structured learning approach to inducing hypernym taxonomies using a probabilistic graphical model formulation. Our model incorporates heterogeneous relational evidence about both hypernymy and siblinghood, captured by semantic features based on patterns and statistics from Web n-grams and Wikipedia abstracts. For efficient inference over taxonomy structures, we use loopy belief propagation along with a directed spanning tree algorithm for the core hypernymy factor. To train the system, we extract sub-structures of WordNet and discriminatively learn to reproduce them, using adaptive subgradient stochastic optimization. On the task of reproducing sub-hierarchies of WordNet, our approach achieves a 51% error reduction over a chance baseline, including a 15% error reduction due to the non-hypernym-factored sibling features. On a comparison setup, we find up to 29% relative error reduction over previous work on ancestor F1.", "keyphrases": ["belief propagation", "taxonomy structure", "structured learning problem"]} +{"id": "schlechtweg-etal-2018-diachronic", "title": "Diachronic Usage Relatedness (DURel): A Framework for the Annotation of Lexical Semantic Change", "abstract": "We propose a framework that extends synchronic polysemy annotation to diachronic changes in lexical meaning, to counteract the lack of resources for evaluating computational models of lexical semantic change. Our framework exploits an intuitive notion of semantic relatedness, and distinguishes between innovative and reductive meaning changes with high inter-annotator agreement. The resulting test set for German comprises ratings from five annotators for the relatedness of 1,320 use pairs across 22 target words.", "keyphrases": ["change", "diachronic usage relatedness", "semantic change detection"]} +{"id": "li-etal-2016-persona", "title": "A Persona-Based Neural Conversation Model", "abstract": "We present persona-based models for handling the issue of speaker consistency in neural response generation. A speaker model encodes personas in distributed embeddings that capture individual characteristics such as background information and speaking style. A dyadic speaker-addressee model captures properties of interactions between two interlocutors. Our models yield qualitative performance improvements in both perplexity and BLEU scores over baseline sequence-to-sequence models, with similar gains in speaker consistency as measured by human judges.", "keyphrases": ["neural conversation model", "neural response generation", "agent", "dialog system", "dialogue generation"]} +{"id": "snyder-barzilay-2007-multiple", "title": "Multiple Aspect Ranking Using the Good Grief Algorithm", "abstract": "We address the problem of analyzing multiple related opinions in a text. For instance, in a restaurant review such opinions may include food, ambience and service. We formulate this task as a multiple aspect ranking problem, where the goal is to produce a set of numerical scores, one for each aspect. We present an algorithm that jointly learns ranking models for individual aspects by modeling the dependencies between assigned ranks. This algorithm guides the prediction of individual rankers by analyzing meta-relations between opinions, such as agreement and contrast. We prove that our agreementbased joint model is more expressive than individual ranking models. Our empirical results further conrm the strength of the model: the algorithm provides signicant improvement over both individual rankers and a state-of-the-art joint ranking model.", "keyphrases": ["good grief algorithm", "review", "service", "sentiment classification", "contrastive rst relation"]} +{"id": "ueffing-etal-2007-transductive", "title": "Transductive learning for statistical machine translation", "abstract": "Statistical machine translation systems are usually trained on large amounts of bilingual text and monolingual text in the target language. In this paper we explore the use of transductive semi-supervised methods for the effective use of monolingual data from the source language in order to improve translation quality. We propose several algorithms with this aim, and present the strengths and weaknesses of each one. We present detailed experimental evaluations on the French\u2010English EuroParl data set and on data from the NIST Chinese\u2010English largedata track. We show a significant improvement in translation quality on both tasks.", "keyphrases": ["monolingual data", "transductive", "semi-supervised learning", "conventional smt"]} +{"id": "jain-etal-2020-learning", "title": "Learning to Faithfully Rationalize by Construction", "abstract": "In many settings it is important for one to be able to understand why a model made a particular prediction. In NLP this often entails extracting snippets of an input text `responsible for' corresponding model output; when such a snippet comprises tokens that indeed informed the model's prediction, it is a faithful explanation. In some settings, faithfulness may be critical to ensure transparency. Lei et al. (2016) proposed a model to produce faithful rationales for neural text classification by defining independent snippet extraction and prediction modules. However, the discrete selection over input tokens performed by this method complicates training, leading to high variance and requiring careful hyperparameter tuning. We propose a simpler variant of this approach that provides faithful explanations by construction. In our scheme, named FRESH, arbitrary feature importance scores (e.g., gradients from a trained model) are used to induce binary labels over token inputs, which an extractor can be trained to predict. An independent classifier module is then trained exclusively on snippets provided by the extractor; these snippets thus constitute faithful explanations, even if the classifier is arbitrarily complex. In both automatic and manual evaluations we find that variants of this simple framework yield predictive performance superior to `end-to-end' approaches, while being more general and easier to train. Code is available at .", "keyphrases": ["input text", "rationale", "predictive performance"]} +{"id": "callison-burch-etal-2004-statistical", "title": "Statistical Machine Translation with Word- and Sentence-Aligned Parallel Corpora", "abstract": "The parameters of statistical translation models are typically estimated from sentence-aligned parallel corpora. We show that significant improvements in the alignment and translation quality of such models can be achieved by additionally including word-aligned data during training. Incorporating word-level alignments into the parameter estimation of the IBM models reduces alignment error rate and increases the Bleu score when compared to training the same models only on sentence-aligned data. On the Verbmobil data set, we attain a 38% reduction in the alignment error rate and a higher Bleu score with half as many training examples. We discuss how varying the ratio of word-aligned to sentence-aligned data affects the expected performance gain.", "keyphrases": ["parallel corpora", "translation quality", "alignment error rate", "aer"]} +{"id": "dong-etal-2015-multi", "title": "Multi-Task Learning for Multiple Language Translation", "abstract": "In this paper, we investigate the problem of learning a machine translation model that can simultaneously translate sentences from one source language to multiple target languages. Our solution is inspired by the recently proposed neural machine translation model which generalizes machine translation as a sequence learning problem. We extend the neural machine translation to a multi-task learning framework which shares source language representation and separates the modeling of different target language translation. Our framework can be applied to situations where either large amounts of parallel data or limited parallel data is available. Experiments show that our multi-task learning model is able to achieve significantly higher translation quality over individually learned model in both situations on the data sets publicly available.", "keyphrases": ["multiple language", "multi-task learning", "paradigm", "attentional decoder", "parameter sharing"]} +{"id": "pham-etal-2020-priming", "title": "Priming Neural Machine Translation", "abstract": "Priming is a well known and studied psychology phenomenon based on the prior presentation of one stimulus (cue) to influence the processing of a response. In this paper, we propose a framework to mimic the process of priming in the context of neural machine translation (NMT). We evaluate the effect of using similar translations as priming cues on the NMT network. We propose a method to inject priming cues into the NMT network and compare our framework to other mechanisms that perform micro-adaptation during inference. Overall, experiments conducted in a multi-domain setting confirm that adding priming cues in the NMT decoder can go a long way towards improving the translation accuracy. Besides, we show the suitability of our framework to gather valuable information for an NMT network from monolingual resources.", "keyphrases": ["neural machine translation", "similar translation", "priming"]} +{"id": "moraes-etal-2016-university", "title": "University of Houston at CL-SciSumm 2016: SVMs with tree kernels and Sentence Similarity", "abstract": "This paper describes the University of Houston team\u2019s efforts toward the problem of identifying reference spans in a reference document given sentences from other documents that cite the reference document. We investigated the following approaches: cosine similarity with multiple incremental modifications and SVMs with a tree kernel. Although the best performing approach in our experiments is quite simple, it is not the best under every metric used for comparison. We also present a brief analysis of the dataset which includes information on its sparsity and frequency of section titles.", "keyphrases": ["svm", "tree kernel", "incremental modification"]} +{"id": "suhara-etal-2020-opiniondigest", "title": "OpinionDigest: A Simple Framework for Opinion Summarization", "abstract": "We present OpinionDigest, an abstractive opinion summarization framework, which does not rely on gold-standard summaries for training. The framework uses an Aspect-based Sentiment Analysis model to extract opinion phrases from reviews, and trains a Transformer model to reconstruct the original reviews from these extractions. At summarization time, we merge extractions from multiple reviews and select the most popular ones. The selected opinions are used as input to the trained Transformer model, which verbalizes them into an opinion summary. OpinionDigest can also generate customized summaries, tailored to specific user needs, by filtering the selected opinions according to their aspect and/or sentiment. Automatic evaluation on Yelp data shows that our framework outperforms competitive baselines. Human studies on two corpora verify that OpinionDigest produces informative summaries and shows promising customization capabilities.", "keyphrases": ["opinion summarization framework", "opiniondigest", "pipeline framework"]} +{"id": "ben-abacha-etal-2019-overview", "title": "Overview of the MEDIQA 2019 Shared Task on Textual Inference, Question Entailment and Question Answering", "abstract": "This paper presents the MEDIQA 2019 shared task organized at the ACL-BioNLP workshop. The shared task is motivated by a need to develop relevant methods, techniques and gold standards for inference and entailment in the medical domain, and their application to improve domain specific information retrieval and question answering systems. MEDIQA 2019 includes three tasks: Natural Language Inference (NLI), Recognizing Question Entailment (RQE), and Question Answering (QA) in the medical domain. 72 teams participated in the challenge, achieving an accuracy of 98% in the NLI task, 74.9% in the RQE task, and 78.3% in the QA task. In this paper, we describe the tasks, the datasets, and the participants' approaches and results. We hope that this shared task will attract further research efforts in textual inference, question entailment, and question answering in the medical domain.", "keyphrases": ["textual inference", "question answering", "mediqa challenge", "medical nli"]} +{"id": "ye-etal-2007-sentence", "title": "Sentence Level Machine Translation Evaluation as a Ranking", "abstract": "The paper proposes formulating MT evaluation as a ranking problem, as is often done in the practice of assessment by human. Under the ranking scenario, the study also investigates the relative utility of several features. The results show greater correlation with human assessment at the sentence level, even when using an n-gram match score as a baseline feature. The feature contributing the most to the rank order correlation between automatic ranking and human assessment was the dependency structure relation rather than BLEU score and reference language model feature.", "keyphrases": ["ranking", "human assessment", "sentence level"]} +{"id": "stoyanov-etal-2009-conundrums", "title": "Conundrums in Noun Phrase Coreference Resolution: Making Sense of the State-of-the-Art", "abstract": "We aim to shed light on the state-of-the-art in NP coreference resolution by teasing apart the differences in the MUC and ACE task definitions, the assumptions made in evaluation methodologies, and inherent differences in text corpora. First, we examine three subproblems that play a role in coreference resolution: named entity recognition, anaphoricity determination, and coreference element detection. We measure the impact of each subproblem on coreference resolution and confirm that certain assumptions regarding these subproblems in the evaluation methodology can dramatically simplify the overall task. Second, we measure the performance of a state-of-the-art coreference resolver on several classes of anaphora and use these results to develop a quantitative measure for estimating coreference resolution performance on new data sets.", "keyphrases": ["coreference resolution", "mention", "state-of-the-art system"]} +{"id": "krishnakumaran-zhu-2007-hunting", "title": "Hunting Elusive Metaphors Using Lexical Resources.", "abstract": "In this paper we propose algorithms to automatically classify sentences into metaphoric or normal usages. Our algorithms only need the WordNet and bigram counts, and does not require training. We present empirical results on a test set derived from the Master Metaphor List. We also discuss issues that make classification of metaphors a tough problem in general.", "keyphrases": ["metaphor", "hyponymy relation", "sentence level"]} +{"id": "cassidy-etal-2014-annotation", "title": "An Annotation Framework for Dense Event Ordering", "abstract": "Today\u2019s event ordering research is heavily dependent on annotated corpora. Current corpora influence shared evaluations and drive algorithm development. Partly due to this dependence, most research focuses on partial orderings of a document\u2019s events. For instance, the TempEval competitions and the TimeBank only annotate small portions of the event graph, focusing on the most salient events or on specific types of event pairs (e.g., only events in the same sentence). Deeper temporal reasoners struggle with this sparsity because the entire temporal picture is not represented. This paper proposes a new annotation process with a mechanism to force annotators to label connected graphs. It generates 10 times more relations per document than the TimeBank, and our TimeBank-Dense corpus is larger than all current corpora. We hope this process and its dense corpus encourages research on new global models with deeper reasoning.", "keyphrases": ["annotator", "ordering", "event pair", "timebank-dense", "temporal relation"]} +{"id": "liu-etal-2020-unsupervised", "title": "Unsupervised Paraphrasing by Simulated Annealing", "abstract": "We propose UPSA, a novel approach that accomplishes Unsupervised Paraphrasing by Simulated Annealing. We model paraphrase generation as an optimization problem and propose a sophisticated objective function, involving semantic similarity, expression diversity, and language fluency of paraphrases. UPSA searches the sentence space towards this objective by performing a sequence of local editing. We evaluate our approach on various datasets, namely, Quora, Wikianswers, MSCOCO, and Twitter. Extensive results show that UPSA achieves the state-of-the-art performance compared with previous unsupervised methods in terms of both automatic and human evaluations. Further, our approach outperforms most existing domain-adapted supervised models, showing the generalizability of UPSA.", "keyphrases": ["simulated annealing", "upsa", "sophisticated objective function", "sentence space", "unsupervised paraphrasing"]} +{"id": "jiang-etal-2019-improved", "title": "Improved Differentiable Architecture Search for Language Modeling and Named Entity Recognition", "abstract": "In this paper, we study differentiable neural architecture search (NAS) methods for natural language processing. In particular, we improve differentiable architecture search by removing the softmax-local constraint. Also, we apply differentiable NAS to named entity recognition (NER). It is the first time that differentiable NAS methods are adopted in NLP tasks other than language modeling. On both the PTB language modeling and CoNLL-2003 English NER data, our method outperforms strong baselines. It achieves a new state-of-the-art on the NER task.", "keyphrases": ["differentiable architecture search", "language modeling", "entity recognition", "help"]} +{"id": "palogiannidi-etal-2016-tweester", "title": "Tweester at SemEval-2016 Task 4: Sentiment Analysis in Twitter Using Semantic-Affective Model Adaptation", "abstract": "We describe our submission to SemEval2016 Task 4: Sentiment Analysis in Twitter. The proposed system ranked first for the subtask B. Our system comprises of multiple independent models such as neural networks, semantic-affective models and topic modeling that are combined in a probabilistic way. The novelty of the system is the employment of a topic modeling approach in order to adapt the semantic-affective space for each tweet. In addition, significant enhancements were made in the main system dealing with the data preprocessing and feature extraction including the employment of word embeddings. Each model is used to predict a tweet\u2019s sentiment (positive, negative or neutral) and a late fusion scheme is adopted for the final decision.", "keyphrases": ["sentiment analysis", "twitter", "semantic-affective model"]} +{"id": "gildea-2020-efficient", "title": "Efficient Outside Computation", "abstract": "Weighted deduction systems provide a framework for describing parsing algorithms that can be used with a variety of operations for combining the values of partial derivations. For some operations, inside values can be computed efficiently, but outside values cannot. We view out-side values as functions from inside values to the total value of all derivations, and we analyze outside computation in terms of function composition. This viewpoint helps explain why efficient outside computation is possible in many settings, despite the lack of a general outside algorithm for semiring operations.", "keyphrases": ["computation", "deduction system", "outside value"]} +{"id": "she-chai-2017-interactive", "title": "Interactive Learning of Grounded Verb Semantics towards Human-Robot Communication", "abstract": "To enable human-robot communication and collaboration, previous works represent grounded verb semantics as the potential change of state to the physical world caused by these verbs. Grounded verb semantics are acquired mainly based on the parallel data of the use of a verb phrase and its corresponding sequences of primitive actions demonstrated by humans. The rich interaction between teachers and students that is considered important in learning new skills has not yet been explored. To address this limitation, this paper presents a new interactive learning approach that allows robots to proactively engage in interaction with human partners by asking good questions to learn models for grounded verb semantics. The proposed approach uses reinforcement learning to allow the robot to acquire an optimal policy for its question-asking behaviors by maximizing the long-term reward. Our empirical results have shown that the interactive learning approach leads to more reliable models for grounded verb semantics, especially in the noisy environment which is full of uncertainties. Compared to previous work, the models acquired from interactive learning result in a 48% to 145% performance gain when applied in new situations.", "keyphrases": ["verb semantic", "human-robot communication", "interactive learning"]} +{"id": "turcan-mckeown-2019-dreaddit", "title": "Dreaddit: A Reddit Dataset for Stress Analysis in Social Media", "abstract": "Stress is a nigh-universal human experience, particularly in the online world. While stress can be a motivator, too much stress is associated with many negative health outcomes, making its identification useful across a range of domains. However, existing computational research typically only studies stress in domains such as speech, or in short genres such as Twitter. We present Dreaddit, a new text corpus of lengthy multi-domain social media data for the identification of stress. Our dataset consists of 190K posts from five different categories of Reddit communities; we additionally label 3.5K total segments taken from 3K posts using Amazon Mechanical Turk. We present preliminary supervised learning methods for identifying stress, both neural and traditional, and analyze the complexity and diversity of the data and characteristics of each category.", "keyphrases": ["stress", "computational research", "dreaddit"]} +{"id": "bao-etal-2019-generating", "title": "Generating Sentences from Disentangled Syntactic and Semantic Spaces", "abstract": "Variational auto-encoders (VAEs) are widely used in natural language generation due to the regularization of the latent space. However, generating sentences from the continuous latent space does not explicitly model the syntactic information. In this paper, we propose to generate sentences from disentangled syntactic and semantic spaces. Our proposed method explicitly models syntactic information in the VAE's latent space by using the linearized tree sequence, leading to better performance of language generation. Additionally, the advantage of sampling in the disentangled syntactic and semantic latent spaces enables us to perform novel applications, such as the unsupervised paraphrase generation and syntax transfer generation. Experimental results show that our proposed model achieves similar or better performance in various tasks, compared with state-of-the-art related work.", "keyphrases": ["latent space", "paraphrase", "text generation"]} +{"id": "plank-etal-2016-multilingual", "title": "Multilingual Part-of-Speech Tagging with Bidirectional Long Short-Term Memory Models and Auxiliary Loss", "abstract": "Bidirectional long short-term memory (bi-LSTM) networks have recently proven successful for various NLP sequence modeling tasks, but little is known about their reliance to input representations, target languages, data set size, and label noise. We address these issues and evaluate bi-LSTMs with word, character, and unicode byte embeddings for POS tagging. We compare bi-LSTMs to traditional POS taggers across languages and data sizes. We also present a novel bi-LSTM model, which combines the POS tagging loss function with an auxiliary loss function that accounts for rare words. The model obtains state-of-the-art performance across 22 languages, and works especially well for morphologically complex languages. Our analysis suggests that bi-LSTMs are less sensitive to training data size and label corruptions (at small noise levels) than previously assumed.", "keyphrases": ["part-of-speech tagging", "long short-term memory", "auxiliary loss", "bi-lstms", "pos"]} +{"id": "springorum-etal-2013-regular", "title": "Regular Meaning Shifts in German Particle Verbs: A Case Study", "abstract": "This paper provides a corpus-based study on German particle verbs. We hypothesize that there are regular mechanisms in meaning shifts of a base verb in combination with a particle that do not only apply to the individual verb, but across a semantically coherent set of verbs. For example, the syntactically similar base verbs brummen \u2018hum\u2019 and donnern \u2018rumble\u2019 both describe an irritating, displeasing loud sound. Combined with the particle auf, they result in near-synonyms roughly meaning \u2018forcefully assigning a task\u2019 (in one of their senses). Covering 6 base verb groups and 3 particles with 4 particle meanings, we demonstrate that corpus-based information on the verbs\u2019 subcategorization frames plus conceptual properties of the nominal complements is a sufficient basis for defining such meaning shifts. While the paper is considerably more extensive than earlier related work, we view it as a case study toward a more automatic approach to identify and formalize meaning shifts in German particle verbs.", "keyphrases": ["particle", "base verb", "non-literal meaning"]} +{"id": "chen-etal-2014-unified", "title": "A Unified Model for Word Sense Representation and Disambiguation", "abstract": "Most word representation methods assume that each word owns a single semantic vector. This is usually problematic because lexical ambiguity is ubiquitous, which is also the problem to be resolved by word sense disambiguation. In this paper, we present a unified model for joint word sense representation and disambiguation, which will assign distinct representations for each word sense. 1 The basic idea is that both word sense representation (WSR) and word sense disambiguation (WSD) will benefit from each other: (1) highquality WSR will capture rich information about words and senses, which should be helpful for WSD, and (2) high-quality WSD will provide reliable disambiguated corpora for learning better sense representations. Experimental results show that, our model improves the performance of contextual word similarity compared to existing WSR methods, outperforms stateof-the-art supervised methods on domainspecific WSD, and achieves competitive performance on coarse-grained all-words WSD.", "keyphrases": ["word sense representation", "knowledge base", "gloss"]} +{"id": "zens-etal-2004-reordering", "title": "Reordering Constraints for Phrase-Based Statistical Machine Translation", "abstract": "In statistical machine translation, the generation of a translation hypothesis is computationally expensive. If arbitrary reorderings are permitted, the search problem is NP-hard. On the other hand, if we restrict the possible reorderings in an appropriate way, we obtain a polynomial-time search algorithm. We investigate different reordering constraints for phrase-based statistical machine translation, namely the IBM constraints and the ITG constraints. We present efficient dynamic programming algorithms for both constraints. We evaluate the constraints with respect to translation quality on two Japanese-English tasks. We show that the reordering constraints improve translation quality compared to an unconstrained search that permits arbitrary phrase reorderings. The ITG constraints preform best on both tasks and yield statistically significant improvements compared to the unconstrained search.", "keyphrases": ["statistical machine translation", "itg constraint", "efficiency", "flat reordering model", "expressiveness"]} +{"id": "dos-santos-etal-2015-classifying", "title": "Classifying Relations by Ranking with Convolutional Neural Networks", "abstract": "Relation classification is an important semantic processing task for which state-ofthe-art systems still rely on costly handcrafted features. In this work we tackle the relation classification task using a convolutional neural network that performs classification by ranking (CR-CNN). We propose a new pairwise ranking loss function that makes it easy to reduce the impact of artificial classes. We perform experiments using the the SemEval-2010 Task 8 dataset, which is designed for the task of classifying the relationship between two nominals marked in a sentence. Using CRCNN, we outperform the state-of-the-art for this dataset and achieve a F1 of 84.1 without using any costly handcrafted features. Additionally, our experimental results show that: (1) our approach is more effective than CNN followed by a softmax classifier; (2) omitting the representation of the artificial class Other improves both precision and recall; and (3) using only word embeddings as input features is enough to achieve state-of-the-art results if we consider only the text between the two target nominals.", "keyphrases": ["convolutional neural networks", "relation extraction", "cnn model", "learning model"]} +{"id": "taghipour-ng-2015-one", "title": "One Million Sense-Tagged Instances for Word Sense Disambiguation and Induction", "abstract": "Supervised word sense disambiguation (WSD) systems are usually the best performing systems when evaluated on standard benchmarks. However, these systems need annotated training data to function properly. While there are some publicly available open source WSD systems, very few large annotated datasets are available to the research community. The two main goals of this paper are to extract and annotate a large number of samples and release them for public use, and also to evaluate this dataset against some word sense disambiguation and induction tasks. We show that the open source IMS WSD system trained on our dataset achieves stateof-the-art results in standard disambiguation tasks and a recent word sense induction task, outperforming several task submissions and strong baselines.", "keyphrases": ["sense-tagged instance", "omsti", "english word"]} +{"id": "wang-etal-2018-target", "title": "Target-Sensitive Memory Networks for Aspect Sentiment Classification", "abstract": "Aspect sentiment classification (ASC) is a fundamental task in sentiment analysis. Given an aspect/target and a sentence, the task classifies the sentiment polarity expressed on the target in the sentence. Memory networks (MNs) have been used for this task recently and have achieved state-of-the-art results. In MNs, attention mechanism plays a crucial role in detecting the sentiment context for the given target. However, we found an important problem with the current MNs in performing the ASC task. Simply improving the attention mechanism will not solve it. The problem is referred to as target-sensitive sentiment, which means that the sentiment polarity of the (detected) context is dependent on the given target and it cannot be inferred from the context alone. To tackle this problem, we propose the target-sensitive memory networks (TMNs). Several alternative techniques are designed for the implementation of TMNs and their effectiveness is experimentally evaluated.", "keyphrases": ["memory network", "aspect sentiment classification", "absa task"]} +{"id": "chen-etal-2018-collective", "title": "Collective Event Detection via a Hierarchical and Bias Tagging Networks with Gated Multi-level Attention Mechanisms", "abstract": "Traditional approaches to the task of ACE event detection primarily regard multiple events in one sentence as independent ones and recognize them separately by using sentence-level information. However, events in one sentence are usually interdependent and sentence-level information is often insufficient to resolve ambiguities for some types of events. This paper proposes a novel framework dubbed as Hierarchical and Bias Tagging Networks with Gated Multi-level Attention Mechanisms (HBTNGMA) to solve the two problems simultaneously. Firstly, we propose a hierachical and bias tagging networks to detect multiple events in one sentence collectively. Then, we devise a gated multi-level attention to automatically extract and dynamically fuse the sentence-level and document-level information. The experimental results on the widely used ACE 2005 dataset show that our approach significantly outperforms other state-of-the-art methods.", "keyphrases": ["event detection", "bias tagging networks", "multi-level attention mechanisms"]} +{"id": "ladhak-etal-2020-wikilingua", "title": "WikiLingua: A New Benchmark Dataset for Cross-Lingual Abstractive Summarization", "abstract": "We introduce WikiLingua, a large-scale, multilingual dataset for the evaluation of cross-lingual abstractive summarization systems. We extract article and summary pairs in 18 languages from WikiHow, a high quality, collaborative resource of how-to guides on a diverse set of topics written by human authors. We create gold-standard article-summary alignments across languages by aligning the images that are used to describe each how-to step in an article. As a set of baselines for further studies, we evaluate the performance of existing cross-lingual abstractive summarization methods on our dataset. We further propose a method for direct cross-lingual summarization (i.e., without requiring translation at inference time) by leveraging synthetic data and Neural Machine Translation as a pre-training step. Our method significantly outperforms the baseline approaches, while being more cost efficient during inference.", "keyphrases": ["multilingual dataset", "abstractive summarization system", "wikilingua"]} +{"id": "schulte-im-walde-2006-experiments", "title": "Experiments on the Automatic Induction of German Semantic Verb Classes", "abstract": "This article presents clustering experiments on German verbs: A statistical grammar model for German serves as the source for a distributional verb description at the lexical syntax-semantics interface, and the unsupervised clustering algorithm k-means uses the empirical verb properties to perform an automatic induction of verb classes. Various evaluation measures are applied to compare the clustering results to gold standard German semantic verb classes under different criteria. The primary goals of the experiments are (1) to empirically utilize and investigate the well-established relationship between verb meaning and verb behavior within a cluster analysis and (2) to investigate the required technical parameters of a cluster analysis with respect to this specific linguistic task. The clustering methodology is developed on a small-scale verb set and then applied to a larger-scale verb set including 883 German verbs.", "keyphrases": ["automatic induction", "verb class", "grammar model"]} +{"id": "camgoz-etal-2016-bosphorussign", "title": "BosphorusSign: A Turkish Sign Language Recognition Corpus in Health and Finance Domains", "abstract": "There are as many sign languages as there are deaf communities in the world. Linguists have been collecting corpora of different sign languages and annotating them extensively in order to study and understand their properties. On the other hand, the field of computer vision has approached the sign language recognition problem as a grand challenge and research efforts have intensified in the last 20 years. However, corpora collected for studying linguistic properties are often not suitable for sign language recognition as the statistical methods used in the field require large amounts of data. Recently, with the availability of inexpensive depth cameras, groups from the computer vision community have started collecting corpora with large number of repetitions for sign language recognition research. In this paper, we present the BosphorusSign Turkish Sign Language corpus, which consists of 855 sign and phrase samples from the health, finance and everyday life domains. The corpus is collected using the state-of-the-art Microsoft Kinect v2 depth sensor, and will be the first in this sign language research field. Furthermore, there will be annotations rendered by linguists so that the corpus will appeal both to the linguistic and sign language recognition research communities.", "keyphrases": ["sign language", "health", "bosphorussign"]} +{"id": "foster-kuhn-2007-mixture", "title": "Mixture-Model Adaptation for SMT", "abstract": "We describe a mixture-model approach to adapting a Statistical Machine Translation System for new domains, using weights that depend on text distances to mixture components. We investigate a number of variants on this approach, including cross-domain versus dynamic adaptation; linear versus loglinear mixtures; language and translation model adaptation; different methods of assigning weights; and granularity of the source unit being adapted to. The best methods achieve gains of approximately one BLEU percentage point over a state-of-the art non-adapted baseline system.", "keyphrases": ["adaptation", "mixture-model approach", "sub-model", "mixture weight", "corpus level"]} +{"id": "he-etal-2021-realformer", "title": "RealFormer: Transformer Likes Residual Attention", "abstract": "Transformer is the backbone of modern NLP models. In this paper, we propose RealFormer, a simple and generic technique to create Residual Attention Layer Transformer networks that significantly outperform the canonical Transformer and its variants (BERT, ETC, etc.) on a wide spectrum of tasks including Masked Language Modeling, GLUE, SQuAD, Neural Machine Translation, WikiHop, HotpotQA, Natural Questions, and OpenKP. We also observe empirically that RealFormer stabilizes training and leads to models with sparser attention. Source code and pre-trained checkpoints for RealFormer can be found at https://github.com/google-research/google-research/tree/master/realformer.", "keyphrases": ["transformer", "realformer", "motivation"]} +{"id": "abu-sheikha-inkpen-2011-generation", "title": "Generation of Formal and Informal Sentences", "abstract": "This paper addresses the task of using natural language generation (NLG) techniques to generate sentences with formal and with informal style. We studied the main characteristics of each style, which helped us to choose parameters that can produce sentences in one of the two styles. We collected some ready-made parallel list of formal and informal words and phrases, from different sources. In addition, we added two more parallel lists: one that contains most of the contractions in English (short forms) and their full forms, and another one that consists in some common abbreviations and their full forms. These parallel lists might help to generate sentences in the preferred style, by changing words or expressions for that style. Our NLG system is built on top of the SimpleNLG package (Gatt and Reiter, 2009). We used templates from which we generated valid English texts with formal or informal style. In order to evaluate the quality of the generated sentences and their level of formality, we used human judges. The evaluation results show that our system can generate formal and informal style successfully, with high accuracy. The main contribution of our work consists in designing a set of parameters that led to good results for the task of generating texts with different formality levels.", "keyphrases": ["informal sentence", "list", "different formality level"]} +{"id": "conroy-dang-2008-mind", "title": "Mind the Gap: Dangers of Divorcing Evaluations of Summary Content from Linguistic Quality", "abstract": "In this paper, we analyze the state of current human and automatic evaluation of topic-focused summarization in the Document Understanding Conference main task for 2005--2007. The analyses show that while ROUGE has very strong correlation with responsiveness for both human and automatic summaries, there is a significant gap in responsiveness between humans and systems which is not accounted for by the ROUGE metrics. In addition to teasing out gaps in the current automatic evaluation, we propose a method to maximize the strength of current automatic evaluations by using the method of canonical correlation. We apply this new evaluation method, which we call ROSE (ROUGE Optimal Summarization Evaluation), to find the optimal linear combination of ROUGE scores to maximize correlation with human responsiveness.", "keyphrases": ["gap", "linguistic quality", "responsiveness"]} +{"id": "williams-etal-2018-latent", "title": "Do latent tree learning models identify meaningful structure in sentences?", "abstract": "Recent work on the problem of latent tree learning has made it possible to train neural networks that learn to both parse a sentence and use the resulting parse to interpret the sentence, all without exposure to ground-truth parse trees at training time. Surprisingly, these models often perform better at sentence understanding tasks than models that use parse trees from conventional parsers. This paper aims to investigate what these latent tree learning models learn. We replicate two such models in a shared codebase and find that (i) only one of these models outperforms conventional tree-structured models on sentence classification, (ii) its parsing strategies are not especially consistent across random restarts, (iii) the parses it produces tend to be shallower than standard Penn Treebank (PTB) parses, and (iv) they do not resemble those of PTB or any other semantic or syntactic formalism that the authors are aware of.", "keyphrases": ["latent tree", "syntactic formalism", "brief survey"]} +{"id": "geva-etal-2021-transformer", "title": "Transformer Feed-Forward Layers Are Key-Value Memories", "abstract": "Feed-forward layers constitute two-thirds of a transformer model's parameters, yet their role in the network remains under-explored. We show that feed-forward layers in transformer-based language models operate as key-value memories, where each key correlates with textual patterns in the training examples, and each value induces a distribution over the output vocabulary. Our experiments show that the learned patterns are human-interpretable, and that lower layers tend to capture shallow patterns, while upper layers learn more semantic ones. The values complement the keys' input patterns by inducing output distributions that concentrate probability mass on tokens likely to appear immediately after each pattern, particularly in the upper layers. Finally, we demonstrate that the output of a feed-forward layer is a composition of its memories, which is subsequently refined throughout the model's layers via residual connections to produce the final output distribution.", "keyphrases": ["feed-forward layer", "key-value memory", "transformer"]} +{"id": "mysore-gopinath-etal-2018-supervised", "title": "Supervised and Unsupervised Methods for Robust Separation of Section Titles and Prose Text in Web Documents", "abstract": "The text in many web documents is organized into a hierarchy of section titles and corresponding prose content, a structure which provides potentially exploitable information on discourse structure and topicality. However, this organization is generally discarded during text collection, and collecting it is not straightforward: the same visual organization can be implemented in a myriad of different ways in the underlying HTML. To remedy this, we present a flexible system for automatically extracting the hierarchical section titles and prose organization of web documents irrespective of differences in HTML representation. This system uses features from syntax, semantics, discourse and markup to build two models which classify HTML text into section titles and prose text. When tested on three different domains of web text, our domain-independent system achieves an overall precision of 0.82 and a recall of 0.98. The domain-dependent variation produces very high precision (0.99) at the expense of recall (0.75). These results exhibit a robust level of accuracy suitable for enhancing question answering, information extraction, and summarization.", "keyphrases": ["prose text", "web document", "header"]} +{"id": "lechelle-etal-2019-wire57", "title": "WiRe57 : A Fine-Grained Benchmark for Open Information Extraction", "abstract": "We build a reference for the task of Open Information Extraction, on five documents. We tentatively resolve a number of issues that arise, including coreference and granularity, and we take steps toward addressing inference, a significant problem. We seek to better pinpoint the requirements for the task. We produce our annotation guidelines specifying what is correct to extract and what is not. In turn, we use this reference to score existing Open IE systems. We address the non-trivial problem of evaluating the extractions produced by systems against the reference tuples, and share our evaluation script. Among seven compared extractors, we find the MinIE system to perform best.", "keyphrases": ["open information extraction", "wire57", "scorer"]} +{"id": "quan-ren-2009-construction", "title": "Construction of a Blog Emotion Corpus for Chinese Emotional Expression Analysis", "abstract": "There is plenty of evidence that emotion analysis has many valuable applications. In this study a blog emotion corpus is constructed for Chinese emotional expression analysis. This corpus contains manual annotation of eight emotional categories (expect, joy, love, surprise, anxiety, sorrow, angry and hate), emotion intensity, emotion holder/target, emotional word/phrase, degree word, negative word, conjunction, rhetoric, punctuation and other linguistic expressions that indicate emotion. Annotation agreement analyses for emotion classes and emotional words and phrases are described. Then, using this corpus, we explore emotion expressions in Chinese and present the analyses on them.", "keyphrases": ["blog emotion corpus", "emotional expression analysis", "negative word"]} +{"id": "pan-etal-2021-contrastive", "title": "Contrastive Learning for Many-to-many Multilingual Neural Machine Translation", "abstract": "Existing multilingual machine translation approaches mainly focus on English-centric directions, while the non-English directions still lag behind. In this work, we aim to build a many-to-many translation system with an emphasis on the quality of non-English language directions. Our intuition is based on the hypothesis that a universal cross-language representation leads to better multilingual translation performance. To this end, we propose mRASP2, a training method to obtain a single unified multilingual translation model. mRASP2 is empowered by two techniques: a) a contrastive learning scheme to close the gap among representations of different languages, and b) data augmentation on both multiple parallel and monolingual data to further align token representations. For English-centric directions, mRASP2 achieves competitive or even better performance than a strong pre-trained model mBART on tens of WMT benchmarks. For non-English directions, mRASP2 achieves an improvement of average 10+ BLEU compared with the multilingual baseline", "keyphrases": ["translation system", "mrasp2", "contrastive learning"]} +{"id": "zhang-etal-2016-keyphrase", "title": "Keyphrase Extraction Using Deep Recurrent Neural Networks on Twitter", "abstract": "Keyphrases can provide highly condensed and valuable information that allows users to quickly acquire the main ideas. The task of automatically extracting them have received considerable attention in recent decades. Different from previous studies, which are usually focused on automatically extracting keyphrases from documents or articles, in this study, we considered the problem of automatically extracting keyphrases from tweets. Because of the length limitations of Twitter-like sites, the performances of existing methods usually drop sharply. We proposed a novel deep recurrent neural network (RNN) model to combine keywords and context information to perform this problem. To evaluate the proposed method, we also constructed a large-scale dataset collected from Twitter. The experimental results showed that the proposed method performs signi\ufb01cantly better than previous methods.", "keyphrases": ["recurrent neural network", "twitter", "keyphrase extraction"]} +{"id": "liu-etal-2009-joint", "title": "Joint Decoding with Multiple Translation Models", "abstract": "Current SMT systems usually decode with single translation models and cannot benefit from the strengths of other models in decoding phase. We instead propose joint decoding, a method that combines multiple translation models in one decoder. Our joint decoder draws connections among multiple models by integrating the translation hypergraphs they produce individually. Therefore, one model can share translations and even derivations with other models. Comparable to the state-of-the-art system combination technique, joint decoding achieves an absolute improvement of 1.5 BLEU points over individual decoding.", "keyphrases": ["translation hypergraph", "joint decoding", "tree-to-string model"]} +{"id": "balikas-etal-2016-modeling", "title": "Modeling topic dependencies in semantically coherent text spans with copulas", "abstract": "The exchangeability assumption in topic models like Latent Dirichlet Allocation (LDA) often results in inferring inconsistent topics for the words of text spans like noun-phrases, which are usually expected to be topically coherent. We propose copulaLDA, that extends LDA by integrating part of the text structure to the model and relaxes the conditional independence assumption between the word-specific latent topics given the per-document topic distributions. To this end, we assume that the words of text spans like noun-phrases are topically bound and we model this dependence with copulas. We demonstrate empirically the effectiveness of copulaLDA on both intrinsic and extrinsic evaluation tasks on several publicly available corpora.", "keyphrases": ["text span", "copula", "topic distribution"]} +{"id": "gimenez-marquez-2007-context", "title": "Context-aware Discriminative Phrase Selection for Statistical Machine Translation", "abstract": "In this work we revise the application of discriminative learning to the problem of phrase selection in Statistical Machine Translation. Inspired by common techniques used in Word Sense Disambiguation, we train classifiers based on local context to predict possible phrase translations. Our work extends that of Vickrey et al. (2005) in two main aspects. First, we move from word translation to phrase translation. Second, we move from the 'blank-filling' task to the 'full translation' task. We report results on a set of highly frequent source phrases, obtaining a significant improvement, specially with respect to adequacy, according to a rigorous process of manual evaluation.", "keyphrases": ["statistical machine translation", "recent approach", "smt system"]} +{"id": "ji-etal-2017-dynamic", "title": "Dynamic Entity Representations in Neural Language Models", "abstract": "Understanding a long document requires tracking how entities are introduced and evolve over time. We present a new type of language model, EntityNLM, that can explicitly model entities, dynamically update their representations, and contextually generate their mentions. Our model is generative and flexible; it can model an arbitrary number of entities in context while generating each entity mention at an arbitrary length. In addition, it can be used for several different tasks such as language modeling, coreference resolution, and entity prediction. Experimental results with all these tasks demonstrate that our model consistently outperforms strong baselines and prior work.", "keyphrases": ["entity representation", "language model", "coreference", "dynamic representation"]} +{"id": "bryant-etal-2017-automatic", "title": "Automatic Annotation and Evaluation of Error Types for Grammatical Error Correction", "abstract": "Until now, error type performance for Grammatical Error Correction (GEC) systems could only be measured in terms of recall because system output is not annotated. To overcome this problem, we introduce ERRANT, a grammatical ERRor ANnotation Toolkit designed to automatically extract edits from parallel original and corrected sentences and classify them according to a new, dataset-agnostic, rule-based framework. This not only facilitates error type evaluation at different levels of granularity, but can also be used to reduce annotator workload and standardise existing GEC datasets. Human experts rated the automatic edits as \u201cGood\u201d or \u201cAcceptable\u201d in at least 95% of cases, so we applied ERRANT to the system output of the CoNLL-2014 shared task to carry out a detailed error type analysis for the first time.", "keyphrases": ["grammatical error correction", "edit", "rule-based framework"]} +{"id": "tillmann-zhang-2005-localized", "title": "A Localized Prediction Model for Statistical Machine Translation", "abstract": "In this paper, we present a novel training method for a localized phrase-based prediction model for statistical machine translation (SMT). The model predicts blocks with orientation to handle local phrase re-ordering. We use a maximum likelihood criterion to train a log-linear block bigram model which uses real-valued features (e.g. a language model score) as well as binary features based on the block identities themselves, e.g. block bigram features. Our training algorithm can easily handle millions of features. The best system obtains a 18.6% improvement over the baseline on a standard Arabic-English translation task.", "keyphrases": ["statistical machine translation", "block", "orientation"]} +{"id": "bjorne-salakoski-2011-generalizing", "title": "Generalizing Biomedical Event Extraction", "abstract": "We present a system for extracting biomedical events (detailed descriptions of biomolecular interactions) from research articles. This system was developed for the BioNLP'11 Shared Task and extends our BioNLP'09 Shared Task winning Turku Event Extraction System. It uses support vector machines to first detect event-defining words, followed by detection of their relationships. The theme of the BioNLP'11 Shared Task is generalization, extending event extraction to varied biomedical domains. Our current system successfully predicts events for every domain case introduced in the BioNLP'11 Shared Task, being the only system to participate in all eight tasks and all of their subtasks, with best performance in four tasks.", "keyphrases": ["biomedical event extraction", "support vector machine", "hand-crafted feature"]} +{"id": "zhao-etal-2020-spanmlt", "title": "SpanMlt: A Span-based Multi-Task Learning Framework for Pair-wise Aspect and Opinion Terms Extraction", "abstract": "Aspect terms extraction and opinion terms extraction are two key problems of fine-grained Aspect Based Sentiment Analysis (ABSA). The aspect-opinion pairs can provide a global profile about a product or service for consumers and opinion mining systems. However, traditional methods can not directly output aspect-opinion pairs without given aspect terms or opinion terms. Although some recent co-extraction methods have been proposed to extract both terms jointly, they fail to extract them as pairs. To this end, this paper proposes an end-to-end method to solve the task of Pair-wise Aspect and Opinion Terms Extraction (PAOTE). Furthermore, this paper treats the problem from a perspective of joint term and relation extraction rather than under the sequence tagging formulation performed in most prior works. We propose a multi-task learning framework based on shared spans, where the terms are extracted under the supervision of span boundaries. Meanwhile, the pair-wise relations are jointly identified using the span representations. Extensive experiments show that our model consistently outperforms state-of-the-art methods.", "keyphrases": ["multi-task learning framework", "pair-wise aspect", "opinion terms extraction", "spanmlt"]} +{"id": "postolache-etal-2006-transferring", "title": "Transferring Coreference Chains through Word Alignment", "abstract": "This paper investigates the problem of automatically annotating resources with NP coreference information using a parallel corpus, English-Romanian, in order to transfer, through word alignment, coreference chains from the English part to the Romanian part of the corpus. The results show that we can detect Romanian referential expressions and coreference chains with over 80% F-measure, thus using our method as a preprocessing step followed by manual correction as part of an annotation effort for creating a large Romanian corpus with coreference information is worthwhile.", "keyphrases": ["word alignment", "coreference resolution", "english-romanian corpus", "annotation projection"]} +{"id": "pustejovsky-stubbs-2011-increasing", "title": "Increasing Informativeness in Temporal Annotation", "abstract": "In this paper, we discuss some of the challenges of adequately applying a specification language to an annotation task, as embodied in a specific guideline. In particular, we discuss some issues with TimeML motivated by error analysis on annotated TLINKs in TimeBank. We introduce a document level information structure we call a narrative container (NC), designed to increase informativeness and accuracy of temporal relation identification. The narrative container is the default interval containing the events being discussed in the text, when no explicit temporal anchor is given. By exploiting this notion in the creation of a new temporal annotation over TimeBank, we were able to reduce inconsistencies and increase informativeness when compared to existing TLINKs in TimeBank.", "keyphrases": ["informativeness", "temporal annotation", "narrative container"]} +{"id": "deschacht-moens-2007-text", "title": "Text Analysis for Automatic Image Annotation", "abstract": "We present a novel approach to automatically annotate images using associated text. We detect and classify all entities (persons and objects) in the text after which we determine the salience (the importance of an entity in a text) and visualness (the extent to which an entity can be perceived visually) of these entities. We combine these measures to compute the probability that an entity is present in the image. The suitability of our approach was successfully tested on 50 image-text pairs of Yahoo! News.", "keyphrases": ["image", "salience", "news article"]} +{"id": "almeida-martins-2013-fast", "title": "Fast and Robust Compressive Summarization with Dual Decomposition and Multi-Task Learning", "abstract": "We present a dual decomposition framework for multi-document summarization, using a model that jointly extracts and compresses sentences. Compared with previous work based on integer linear programming, our approach does not require external solvers, is significantly faster, and is modular in the three qualities a summary should have: conciseness, informativeness, and grammaticality. In addition, we propose a multi-task learning framework to take advantage of existing data for extractive summarization and sentence compression. Experiments in the TAC2008 dataset yield the highest published ROUGE scores to date, with runtimes that rival those of extractive summarizers.", "keyphrases": ["summarization", "dual decomposition", "approximation"]} +{"id": "tan-etal-2020-detecting", "title": "Detecting Cross-Modal Inconsistency to Defend Against Neural Fake News", "abstract": "Large-scale dissemination of disinformation online intended to mislead or deceive the general population is a major societal problem. Rapid progression in image, video, and natural language generative models has only exacerbated this situation and intensified our need for an effective defense mechanism. While existing approaches have been proposed to defend against neural fake news, they are generally constrained to the very limited setting where articles only have text and metadata such as the title and authors. In this paper, we introduce the more realistic and challenging task of defending against machine-generated news that also includes images and captions. To identify the possible weaknesses that adversaries can exploit, we create a NeuralNews dataset which is comprised of 4 different types of generated articles as well as conduct a series of human user study experiments based on this dataset. Coupled with providing a relatively effective approach based on detecting visual-semantic inconsistencies, the valuable insights gleaned from our user study experiments and, consequently, this paper will serve as an effective first line of defense and a valuable reference for future work in defending against machine-generated disinformation.", "keyphrases": ["inconsistency", "large-scale dissemination", "video", "neural fake news"]} +{"id": "mou-etal-2016-sequence", "title": "Sequence to Backward and Forward Sequences: A Content-Introducing Approach to Generative Short-Text Conversation", "abstract": "Using neural networks to generate replies in human-computer dialogue systems is attracting increasing attention over the past few years. However, the performance is not satisfactory: the neural network tends to generate safe, universally relevant replies which carry little meaning. In this paper, we propose a content-introducing approach to neural network-based generative dialogue systems. We first use pointwise mutual information (PMI) to predict a noun as a keyword, reflecting the main gist of the reply. We then propose seq2BF, a \u201csequence to backward and forward sequences\u201d model, which generates a reply containing the given keyword. Experimental results show that our approach significantly outperforms traditional sequence-to-sequence models in terms of human evaluation and the entropy measure, and that the predicted keyword can appear at an appropriate position in the reply.", "keyphrases": ["content-introducing approach", "generative short-text conversation", "dialogue system", "traditional sequence-to-sequence model", "response generation"]} +{"id": "zirn-etal-2011-fine", "title": "Fine-Grained Sentiment Analysis with Structural Features", "abstract": "Sentiment analysis is the problem of determining the polarity of a text with respect to a particular topic. For most applications, however, it is not only necessary to derive the polarity of a text as a whole but also to extract negative and positive utterances on a more finegrained level. Sentiment analysis systems working on the (sub-)sentence level, however, are difficult to develop since shorter textual segments rarely carry enough information to determine their polarity out of context. In this paper, therefore, we present a fully automatic framework for fine-grained sentiment analysis on the subsentence level combining multiple sentiment lexicons and neighborhood as well as discourse relations to overcome this problem. We use Markov logic to integrate polarity scores from different sentiment lexicons with information about relations between neighboring segments, and evaluate the approach on product reviews. The experiments show that the use of structural features improves the accuracy of polarity predictions achieving accuracy scores of up to 69%.", "keyphrases": ["structural feature", "discourse relation", "polarity prediction", "fine-grained sentiment analysis"]} +{"id": "novikova-etal-2017-e2e", "title": "The E2E Dataset: New Challenges For End-to-End Generation", "abstract": "This paper describes the E2E data, a new dataset for training end-to-end, data-driven natural language generation systems in the restaurant domain, which is ten times bigger than existing, frequently used datasets in this area. The E2E dataset poses new challenges: (1) its human reference texts show more lexical richness and syntactic variation, including discourse phenomena; (2) generating from this set requires content selection. As such, learning from this dataset promises more natural, varied and less template-like system utterances. We also establish a baseline on this dataset, which illustrates some of the difficulties associated with this data.", "keyphrases": ["e2e dataset", "end-to-end", "restaurant domain", "nlg", "creation"]} +{"id": "imran-etal-2016-twitter", "title": "Twitter as a Lifeline: Human-annotated Twitter Corpora for NLP of Crisis-related Messages", "abstract": "Microblogging platforms such as Twitter provide active communication channels during mass convergence and emergency events such as earthquakes, typhoons. During the sudden onset of a crisis situation, affected people post useful information on Twitter that can be used for situational awareness and other humanitarian disaster response efforts, if processed timely and effectively. Processing social media information pose multiple challenges such as parsing noisy, brief and informal messages, learning information categories from the incoming stream of messages and classifying them into different classes among others. One of the basic necessities of many of these tasks is the availability of data, in particular human-annotated data. In this paper, we present human-annotated Twitter corpora collected during 19 different crises that took place between 2013 and 2015. To demonstrate the utility of the annotations, we train machine learning classifiers. Moreover, we publish first largest word2vec word embeddings trained on 52 million crisis-related tweets. To deal with tweets language issues, we present human-annotated normalized lexical resources for different lexical variations.", "keyphrases": ["crisis-related tweet", "twitter", "crisisnlp"]} +{"id": "ju-etal-2018-neural", "title": "A Neural Layered Model for Nested Named Entity Recognition", "abstract": "Entity mentions embedded in longer entity mentions are referred to as nested entities. Most named entity recognition (NER) systems deal only with the flat entities and ignore the inner nested ones, which fails to capture finer-grained semantic information in underlying texts. To address this issue, we propose a novel neural model to identify nested entities by dynamically stacking flat NER layers. Each flat NER layer is based on the state-of-the-art flat NER model that captures sequential context representation with bidirectional Long Short-Term Memory (LSTM) layer and feeds it to the cascaded CRF layer. Our model merges the output of the LSTM layer in the current flat NER layer to build new representation for detected entities and subsequently feeds them into the next flat NER layer. This allows our model to extract outer entities by taking full advantage of information encoded in their corresponding inner entities, in an inside-to-outside way. Our model dynamically stacks the flat NER layers until no outer entities are extracted. Extensive evaluation shows that our dynamic model outperforms state-of-the-art feature-based systems on nested NER, achieving 74.7% and 72.2% on GENIA and ACE2005 datasets, respectively, in terms of F-score.", "keyphrases": ["entity mention", "novel neural model", "ner layer", "sequence labeling model"]} +{"id": "hai-etal-2016-deceptive", "title": "Deceptive Review Spam Detection via Exploiting Task Relatedness and Unlabeled Data", "abstract": "Existing work on detecting deceptive reviews primarily focuses on feature engineering and applies off-the-shelf supervised classi\ufb01cation algorithms to the problem. Then, one real challenge would be to manually recognize plentiful ground truth spam review data for model building, which is rather dif\ufb01cult and often requires domain expertise in practice. In this paper, we propose to exploit the relatedness of multiple review spam detection tasks and readily available unlabeled data to address the scarcity of labeled opinion spam data. We \ufb01rst develop a multi-task learning method based on logistic regression (MTL-LR), which can boost the learning for a task by sharing the knowledge contained in the training signals of other related tasks. To leverage the unlabeled data, we introduce a graph Laplacian regularizer into each base model. We then propose a novel semi-supervised multi-task learning method via Laplacian regularized logistic regression (SMTL-LLR) to further improve the review spam detection performance. We also develop a stochastic alternating method to cope with the optimization for SMTL-LLR. Experimental results on real-world review data demonstrate the bene\ufb01t of SMTL-LLR over several well-established baseline methods.", "keyphrases": ["relatedness", "unlabeled data", "opinion spam data", "deceptive review"]} +{"id": "grishina-stede-2015-knowledge", "title": "Knowledge-lean projection of coreference chains across languages", "abstract": "Common technologies for automatic coreference resolution require either a language-specific rule set or large collections of manually annotated data, which is typically limited to newswire texts in major languages. This makes it difficult to develop coreference resolvers for a large number of the so-called low-resourced languages. We apply a direct projection algorithm on a multi-genre and multilingual corpus (English, German, Russian) to automatically produce coreference annotations for two target languages without exploiting any linguistic knowledge of the languages. Our evaluation of the projected annotations shows promising results, and the error analysis reveals structural differences of referring expressions and coreference chains for the three languages, which can now be targeted with more linguistically-informed projection algorithms.", "keyphrases": ["coreference chain", "linguistic knowledge", "knowledge-lean projection"]} +{"id": "vincze-etal-2011-multiword", "title": "Multiword Expressions and Named Entities in the Wiki50 Corpus", "abstract": "Multiword expressions (MWEs) and named entities (NEs) exhibit unique and idiosyncratic features, thus, they often pose a problem to NLP systems. In order to facilitate their identification we developed the first corpus of Wikipedia articles in which several types of multiword expressions and named entities are manually annotated at the same time. The corpus can be used for training or testing MWE-detectors or NER systems, which we illustrate with experiments and it also makes it possible to investigate the co-occurrences of different types of MWEs and NEs within the same domain.", "keyphrases": ["wiki50 corpus", "different type", "multiword expressions", "sequence tagger"]} +{"id": "verga-etal-2016-multilingual", "title": "Multilingual Relation Extraction using Compositional Universal Schema", "abstract": "Universal schema builds a knowledge base (KB) of entities and relations by jointly embedding all relation types from input KBs as well as textual patterns expressing relations from raw text. In most previous applications of universal schema, each textual pattern is represented as a single embedding, preventing generalization to unseen patterns. Recent work employs a neural network to capture patterns' compositional semantics, providing generalization to all possible input text. In response, this paper introduces significant further improvements to the coverage and flexibility of universal schema relation extraction: predictions for entities unseen in training and multilingual transfer learning to domains with no annotation. We evaluate our model through extensive experiments on the English and Spanish TAC KBP benchmark, outperforming the top system from TAC 2013 slot-filling using no handwritten patterns or additional annotation. We also consider a multilingual setting in which English training data entities overlap with the seed KB, but Spanish text does not. Despite having no annotation for Spanish data, we train an accurate predictor, with additional improvements obtained by tying word embeddings across languages. Furthermore, we find that multilingual training improves English relation extraction accuracy. Our approach is thus suited to broad-coverage automated knowledge base construction in a variety of languages and domains.", "keyphrases": ["universal schema", "knowledge base", "relation type", "textual pattern", "multilingual transfer"]} +{"id": "brockett-etal-2006-correcting", "title": "Correcting ESL Errors Using Phrasal SMT Techniques", "abstract": "This paper presents a pilot study of the use of phrasal Statistical Machine Translation (SMT) techniques to identify and correct writing errors made by learners of English as a Second Language (ESL). Using examples of mass noun errors found in the Chinese Learner Error Corpus (CLEC) to guide creation of an engineered training set, we show that application of the SMT paradigm can capture errors not well addressed by widely-used proofing tools designed for native speakers. Our system was able to correct 61.81% of mistakes in a set of naturally-occurring examples of mass noun errors found on the World Wide Web, suggesting that efforts to collect alignable corpora of pre- and post-editing ESL writing samples offer can enable the development of SMT-based writing assistance tools capable of repairing many of the complex syntactic and lexical problems found in the writing of ESL learners.", "keyphrases": ["esl error", "machine translation", "uncountable noun", "smt system", "regular expression"]} +{"id": "kartsaklis-etal-2018-mapping", "title": "Mapping Text to Knowledge Graph Entities using Multi-Sense LSTMs", "abstract": "This paper addresses the problem of mapping natural language text to knowledge base entities. The mapping process is approached as a composition of a phrase or a sentence into a point in a multi-dimensional entity space obtained from a knowledge graph. The compositional model is an LSTM equipped with a dynamic disambiguation mechanism on the input word embeddings (a Multi-Sense LSTM), addressing polysemy issues. Further, the knowledge base space is prepared by collecting random walks from a graph enhanced with textual features, which act as a set of semantic bridges between text and knowledge base entities. The ideas of this work are demonstrated on large-scale text-to-entity mapping and entity classification tasks, with state of the art results.", "keyphrases": ["knowledge graph", "multi-sense lstm", "textual feature"]} +{"id": "cherry-etal-2018-revisiting", "title": "Revisiting Character-Based Neural Machine Translation with Capacity and Compression", "abstract": "Translating characters instead of words or word-fragments has the potential to simplify the processing pipeline for neural machine translation (NMT), and improve results by eliminating hyper-parameters and manual feature engineering. However, it results in longer sequences in which each symbol contains less information, creating both modeling and computational challenges. In this paper, we show that the modeling problem can be solved by standard sequence-to-sequence architectures of sufficient depth, and that deep models operating at the character level outperform identical models operating over word fragments. This result implies that alternative architectures for handling character input are better viewed as methods for reducing computation time than as improved ways of modeling longer sequences. From this perspective, we evaluate several techniques for character-level NMT, verify that they do not match the performance of our deep character baseline model, and evaluate the performance versus computation time tradeoffs they offer. Within this framework, we also perform the first evaluation for NMT of conditional computation over time, in which the model learns which timesteps can be skipped, rather than having them be dictated by a fixed schedule specified before training begins.", "keyphrases": ["neural machine translation", "character", "rich language", "translation quality", "long sequence"]} +{"id": "pasupat-etal-2021-controllable", "title": "Controllable Semantic Parsing via Retrieval Augmentation", "abstract": "In practical applications of semantic parsing, we often want to rapidly change the behavior of the parser, such as enabling it to handle queries in a new domain, or changing its predictions on certain targeted queries. While we can introduce new training examples exhibiting the target behavior, a mechanism for enacting such behavior changes without expensive model re-training would be preferable. To this end, we propose ControllAble Semantic Parser via Exemplar Retrieval (CASPER). Given an input query, the parser retrieves related exemplars from a retrieval index, augments them to the query, and then applies a generative seq2seq model to produce an output parse. The exemplars act as a control mechanism over the generic generative model: by manipulating the retrieval index or how the augmented query is constructed, we can manipulate the behavior of the parser. On the MTOP dataset, in addition to achieving state-of-the-art on the standard setup, we show that CASPER can parse queries in a new domain, adapt the prediction toward the specified patterns, or adapt to new semantic schemas without having to further re-train the model.", "keyphrases": ["semantic parsing", "exemplar retrieval", "casper"]} +{"id": "zhao-etal-2004-language", "title": "Language Model Adaptation for Statistical Machine Translation via Structured Query Models", "abstract": "We explore unsupervised language model adaptation techniques for Statistical Machine Translation. The hypotheses from the machine translation output are converted into queries at different levels of representation power and used to extract similar sentences from very large monolingual text collection. Specific language models are then build from the retrieved data and interpolated with a general background model. Experiments show significant improvements when translating with these adapted language models.", "keyphrases": ["query", "similar sentence", "language model adaptation", "data selection approach", "training corpus"]} +{"id": "moradi-samwald-2021-evaluating", "title": "Evaluating the Robustness of Neural Language Models to Input Perturbations", "abstract": "High-performance neural language models have obtained state-of-the-art results on a wide range of Natural Language Processing (NLP) tasks. However, results for common benchmark datasets often do not reflect model reliability and robustness when applied to noisy, real-world data. In this study, we design and implement various types of character-level and word-level perturbation methods to simulate realistic scenarios in which input texts may be slightly noisy or different from the data distribution on which NLP systems were trained. Conducting comprehensive experiments on different NLP tasks, we investigate the ability of high-performance language models such as BERT, XLNet, RoBERTa, and ELMo in handling different types of input perturbations. The results suggest that language models are sensitive to input perturbations and their performance can decrease even when small changes are introduced. We highlight that models need to be further improved and that current benchmarks are not reflecting model robustness well. We argue that evaluations on perturbed inputs should routinely complement widely-used benchmarks in order to yield a more realistic understanding of NLP systems' robustness.", "keyphrases": ["robustness", "input perturbation", "different type"]} +{"id": "oda-etal-2014-optimizing", "title": "Optimizing Segmentation Strategies for Simultaneous Speech Translation", "abstract": "In this paper, we propose new algorithms for learning segmentation strategies for simultaneous speech translation. In contrast to previously proposed heuristic methods, our method finds a segmentation that directly maximizes the performance of the machine translation system. We describe two methods based on greedy search and dynamic programming that search for the optimal segmentation strategy. An experimental evaluation finds that our algorithm is able to segment the input two to three times more frequently than conventional methods in terms of number of words, while maintaining the same score of automatic evaluation. 1", "keyphrases": ["segmentation strategy", "simultaneous speech translation", "dynamic programming", "bleu score", "translation quality"]} +{"id": "putthividhya-hu-2011-bootstrapped", "title": "Bootstrapped Named Entity Recognition for Product Attribute Extraction", "abstract": "We present a named entity recognition (NER) system for extracting product attributes and values from listing titles. Information extraction from short listing titles present a unique challenge, with the lack of informative context and grammatical structure. In this work, we combine supervised NER with bootstrapping to expand the seed list, and output normalized results. Focusing on listings from eBay's clothing and shoes categories, our bootstrapped NER system is able to identify new brands corresponding to spelling variants and typographical errors of the known brands, as well as identifying novel brands. Among the top 300 new brands predicted, our system achieves 90.33% precision. To output normalized attribute values, we explore several string comparison algorithms and found n-gram substring matching to work well in practice.", "keyphrases": ["entity recognition", "product attribute extraction", "title", "brand", "e-commerce domain"]} +{"id": "reiplinger-etal-2012-extracting", "title": "Extracting glossary sentences from scholarly articles: A comparative evaluation of pattern bootstrapping and deep analysis", "abstract": "The paper reports on a comparative study of two approaches to extracting definitional sentences from a corpus of scholarly discourse: one based on bootstrapping lexico-syntactic patterns and another based on deep analysis. Computational Linguistics was used as the target domain and the ACL Anthology as the corpus. Definitional sentences extracted for a set of well-defined concepts were rated by domain experts. Results show that both methods extract high-quality definition sentences intended for automated glossary construction.", "keyphrases": ["scholarly article", "deep analysis", "lexical-syntactic pattern"]} +{"id": "tantug-etal-2008-bleu", "title": "BLEU+: a Tool for Fine-Grained BLEU Computation", "abstract": "We present a tool, BLEU+, which implements various extension to BLEU computation to allow for a better understanding of the translation performance, especially for morphologically complex languages. BLEU+ takes into account both \u0093closeness\u0094 in morphological structure, \u0093closeness\u0094 of the root words in the WordNet hierarchy while comparing tokens in the candidate and reference sentence. In addition to gauging performance at a finer level of granularity, BLEU+ also allows the computation of various upper bound oracle scores: comparing all tokens considering only the roots allows us to get an upper bound when all errors due to morphological structure are fixed, while comparing tokens in an error-tolerant way considering minor morpheme edit operations, allows us to get a (more realistic) upper bound when tokens that differ in morpheme insertions/deletions and substitutions are fixed. We use BLEU+ in the fine-grained evaluation of the output of our English-to-Turkish statistical MT system.", "keyphrases": ["various extension", "translation performance", "bleu+"]} +{"id": "callison-burch-etal-2005-scaling", "title": "Scaling Phrase-Based Statistical Machine Translation to Larger Corpora and Longer Phrases", "abstract": "In this paper we describe a novel data structure for phrase-based statistical machine translation which allows for the retrieval of arbitrarily long phrases while simultaneously using less memory than is required by current decoder implementations. We detail the computational complexity and average retrieval times for looking up phrase translations in our suffix array-based data structure. We show how sampling can be used to reduce the retrieval time by orders of magnitude with no loss in translation quality.", "keyphrases": ["machine translation", "data structure", "memory"]} +{"id": "chen-etal-2018-detecting", "title": "Detecting Free Translation in Parallel Corpora from Attention Scores", "abstract": "In this study, we propose a method for extracting free translation examples from bilingual parallel corpora based on an innovative use of attention scores. Preliminary results show that the approach is promising and paraphrases at both sentential and sub-sentential levels covering diverse surface forms could be identified. The extracted data, upon further filtering, have great potential to supplement the example sentences available in existing bilingual dictionaries in an effective and systematic way.", "keyphrases": ["parallel corpora", "attention score", "free translation example"]} +{"id": "iida-etal-2013-investigation", "title": "Investigation of annotator's behaviour using eye-tracking data", "abstract": "This paper presents an analysis of an annotator\u2019s behaviour during her/his annotation process for eliciting useful information for natural language processing (NLP) tasks. Text annotation is essential for machine learning-based NLP where annotated texts are used for both training and evaluating supervised systems. Since an annotator\u2019s behaviour during annotation can be seen as reflecting her/his cognitive process during her/his attempt to understand the text for annotation, analysing the process of text annotation has potential to reveal useful information for NLP tasks, in particular semantic and discourse processing that require deeper language understanding. We conducted an experiment for collecting annotator actions and eye gaze during the annotation of predicate-argument relations in Japanese texts. Our analysis of the collected data suggests that obtained insight into human annotation behaviour is useful for exploring effective linguistic features in machine learning-based approaches.", "keyphrases": ["annotator", "behaviour", "eye-tracking data"]} +{"id": "gildea-2003-loosely", "title": "Loosely Tree-Based Alignment for Machine Translation", "abstract": "We augment a model of translation based on re-ordering nodes in syntactic trees in order to allow alignments not conforming to the original tree structure, while keeping computational complexity polynomial in the sentence length. This is done by adding a new subtree cloning operation to either tree-to-string or tree-to-tree alignment algorithms.", "keyphrases": ["tree-based alignment", "subtree", "well result", "translation model"]} +{"id": "mirkin-etal-2018-listening", "title": "Listening Comprehension over Argumentative Content", "abstract": "This paper presents a task for machine listening comprehension in the argumentation domain and a corresponding dataset in English. We recorded 200 spontaneous speeches arguing for or against 50 controversial topics. For each speech, we formulated a question, aimed at confirming or rejecting the occurrence of potential arguments in the speech. Labels were collected by listening to the speech and marking which arguments were mentioned by the speaker. We applied baseline methods addressing the task, to be used as a benchmark for future work over this dataset. All data used in this work is freely available for research.", "keyphrases": ["argumentative content", "machine listening comprehension", "controversial topic"]} +{"id": "li-etal-2013-listwise", "title": "Listwise Approach to Learning to Rank for Automatic Evaluation of Machine Translation", "abstract": "The listwise approach to learning to rank has been applied successfully to information retrieval. However, it has not drawn much attention in research on the automatic evaluation of machine translation. In this paper, we present the listwise approach to learning to rank for the automatic evaluation of machine translation. Unlike previous automatic metrics that give absolute scores to translation outputs, our approach directly ranks the translation outputs relative to each other using features extracted from the translation outputs. Two representative listwise approaches, ListNet and ListMLE, are applied to automatic evaluation of machine translation. When evaluated using the dataset of the WMT 2012 Metrics task, the proposed approach achieves higher segment-level correlation with human judgments than the pairwise approach, RankNet, and with all the other metrics that were evaluated during the workshop, and it achieves honorably a comparable system-level correlation with the performance of most competitors.", "keyphrases": ["automatic evaluation", "translation output", "listwise approach"]} +{"id": "lao-etal-2012-reading", "title": "Reading The Web with Learned Syntactic-Semantic Inference Rules", "abstract": "We study how to extend a large knowledge base (Freebase) by reading relational information from a large Web text corpus. Previous studies on extracting relational knowledge from text show the potential of syntactic patterns for extraction, but they do not exploit background knowledge of other relations in the knowledge base. We describe a distributed, Web-scale implementation of a path-constrained random walk model that learns syntactic-semantic inference rules for binary relations from a graph representation of the parsed text and the knowledge base. Experiments show significant accuracy improvements in binary relation prediction over methods that consider only text, or only the existing knowledge base.", "keyphrases": ["inference rule", "pra", "knowledge base completion", "syntactic information", "large corpus"]} +{"id": "guo-etal-2019-densely", "title": "Densely Connected Graph Convolutional Networks for Graph-to-Sequence Learning", "abstract": "We focus on graph-to-sequence learning, which can be framed as transducing graph structures to sequences for text generation. To capture structural information associated with graphs, we investigate the problem of encoding graphs using graph convolutional networks (GCNs). Unlike various existing approaches where shallow architectures were used for capturing local structural information only, we introduce a dense connection strategy, proposing a novel Densely Connected Graph Convolutional Network (DCGCN). Such a deep architecture is able to integrate both local and non-local features to learn a better structural representation of a graph. Our model outperforms the state-of-the-art neural models significantly on AMR-to-text generation and syntax-based neural machine translation.", "keyphrases": ["graph convolutional networks", "graph-to-sequence learning", "non-local feature", "gnn", "dense connectivity"]} +{"id": "apostolova-etal-2011-automatic", "title": "Automatic Extraction of Lexico-Syntactic Patterns for Detection of Negation and Speculation Scopes", "abstract": "Detecting the linguistic scope of negated and speculated information in text is an important Information Extraction task. This paper presents ScopeFinder, a linguistically motivated rule-based system for the detection of negation and speculation scopes. The system rule set consists of lexico-syntactic patterns automatically extracted from a corpus annotated with negation/speculation cues and their scopes (the BioScope corpus). The system performs on par with state-of-the-art machine learning systems. Additionally, the intuitive and linguistically motivated rules will allow for manual adaptation of the rule set to new domains and corpora.", "keyphrases": ["lexico-syntactic pattern", "detection", "negation", "speculation scope", "rule-based system"]} +{"id": "chi-etal-2021-infoxlm", "title": "InfoXLM: An Information-Theoretic Framework for Cross-Lingual Language Model Pre-Training", "abstract": "In this work, we present an information-theoretic framework that formulates cross-lingual language model pre-training as maximizing mutual information between multilingual-multi-granularity texts. The unified view helps us to better understand the existing methods for learning cross-lingual representations. More importantly, inspired by the framework, we propose a new pre-training task based on contrastive learning. Specifically, we regard a bilingual sentence pair as two views of the same meaning and encourage their encoded representations to be more similar than the negative examples. By leveraging both monolingual and parallel corpora, we jointly train the pretext tasks to improve the cross-lingual transferability of pre-trained models. Experimental results on several benchmarks show that our approach achieves considerably better performance. The code and pre-trained models are available at .", "keyphrases": ["contrastive learning", "parallel corpora", "pre-trained model", "infoxlm"]} +{"id": "recasens-etal-2013-linguistic", "title": "Linguistic Models for Analyzing and Detecting Biased Language", "abstract": "Unbiased language is a requirement for reference sources like encyclopedias and scientific texts. Bias is, nonetheless, ubiquitous, making it crucial to understand its nature and linguistic realization and hence detect bias automatically. To this end we analyze real instances of human edits designed to remove bias from Wikipedia articles. The analysis uncovers two classes of bias: framing bias, such as praising or perspective-specific words, which we link to the literature on subjectivity; and epistemological bias, related to whether propositions that are presupposed or entailed in the text are uncontroversially accepted as true. We identify common linguistic cues for these classes, including factive verbs, implicatives, hedges, and subjective intensifiers. These insights help us develop features for a model to solve a new prediction task of practical importance: given a biased sentence, identify the bias-inducing word. Our linguistically-informed model performs almost as well as humans tested on the same task.", "keyphrases": ["edit", "wikipedia", "linguistic indicator", "neutral point", "other work"]} +{"id": "chang-etal-2014-typed", "title": "Typed Tensor Decomposition of Knowledge Bases for Relation Extraction", "abstract": "While relation extraction has traditionally been viewed as a task relying solely on textual data, recent work has shown that by taking as input existing facts in the form of entity-relation triples from both knowledge bases and textual data, the performance of relation extraction can be improved significantly. Following this new paradigm, we propose a tensor decomposition approach for knowledge base embedding that is highly scalable, and is especially suitable for relation extraction. By leveraging relational domain knowledge about entity type information, our learning algorithm is significantly faster than previous approaches and is better able to discover new relations missing from the database. In addition, when applied to a relation extraction task, our approach alone is comparable to several existing systems, and improves the weighted mean average precision of a state-of-theart method by 10 points when used as a subcomponent.", "keyphrases": ["relation extraction", "knowledge basis", "tensor factorization"]} +{"id": "yessenalina-etal-2010-multi", "title": "Multi-Level Structured Models for Document-Level Sentiment Classification", "abstract": "In this paper, we investigate structured models for document-level sentiment classification. When predicting the sentiment of a subjective document (e.g., as positive or negative), it is well known that not all sentences are equally discriminative or informative. But identifying the useful sentences automatically is itself a difficult learning problem. This paper proposes a joint two-level approach for document-level sentiment classification that simultaneously extracts useful (i.e., subjective) sentences and predicts document-level sentiment based on the extracted sentences. Unlike previous joint learning methods for the task, our approach (1) does not rely on gold standard sentence-level subjectivity annotations (which may be expensive to obtain), and (2) optimizes directly for document-level performance. Empirical evaluations on movie reviews and U.S. Congressional floor debates show improved performance over previous approaches.", "keyphrases": ["sentiment classification", "previous approach", "latent variable"]} +{"id": "wang-etal-2013-conditional", "title": "Conditional Random Field-based Parser and Language Model for Tradi-tional Chinese Spelling Checker", "abstract": "This paper describes our Chinese spelling check system submitted to SIGHAN Bake-off 2013 evaluation. The main idea is to exchange potential error character with its confusable ones and rescore the modified sentence using a conditional random field (CRF)-based word segmentation/part of speech (POS) tagger and a tri-gram language model (LM) to detect and correct possible spelling errors. Experimental results on the Bakeoff 2013 tasks showed the proposed method achieved 0.50 location detection and 0.24 error location F-scores in subtask1 and 0.49 location and 0.40 correction accuracies and 0.40 correction precision in subtask2.", "keyphrases": ["language model", "chinese spelling checker", "error character", "random field"]} +{"id": "korhonen-etal-2003-clustering", "title": "Clustering Polysemic Subcategorization Frame Distributions Semantically", "abstract": "Previous research has demonstrated the utility of clustering in inducing semantic verb classes from undisambiguated corpus data. We describe a new approach which involves clustering subcategorization frame (SCF) distributions using the Information Bottleneck and nearest neighbour methods. In contrast to previous work, we particularly focus on clustering polysemic verbs. A novel evaluation scheme is proposed which accounts for the effect of polysemy on the clusters, offering us a good insight into the potential and limitations of semantically classifying undisambiguated SCF data.", "keyphrases": ["subcategorization frame", "corpus data", "information bottleneck", "clustering method"]} +{"id": "qian-etal-2009-semi", "title": "Semi-Supervised Learning for Semantic Relation Classification using Stratified Sampling Strategy", "abstract": "This paper presents a new approach to selecting the initial seed set using stratified sampling strategy in bootstrapping-based semi-supervised learning for semantic relation classification. First, the training data is partitioned into several strata according to relation types/subtypes, then relation instances are randomly sampled from each stratum to form the initial seed set. We also investigate different augmentation strategies in iteratively adding reliable instances to the labeled set, and find that the bootstrapping procedure may stop at a reasonable point to significantly decrease the training time without degrading too much in performance. Experiments on the ACE RDC 2003 and 2004 corpora show the stratified sampling strategy contributes more than the bootstrapping procedure itself. This suggests that a proper sampling strategy is critical in semi-supervised learning.", "keyphrases": ["semantic relation classification", "stratified sampling strategy", "seed set", "semi-supervised learning"]} +{"id": "toutanova-etal-2008-global", "title": "A Global Joint Model for Semantic Role Labeling", "abstract": "We present a model for semantic role labeling that effectively captures the linguistic intuition that a semantic argument frame is a joint structure, with strong dependencies among the arguments. We show how to incorporate these strong dependencies in a statistical joint model with a rich set of features over multiple argument phrases. The proposed model substantially outperforms a similar state-of-the-art local model that does not include dependencies among different arguments. We evaluate the gains from incorporating this joint information on the Propbank corpus, when using correct syntactic parse trees as input, and when using automatically derived parse trees. The gains amount to 24.1% error reduction on all arguments and 36.8% on core arguments for gold-standard parse trees on Propbank. For automatic parse trees, the error reductions are 8.3% and 10.3% on all and core arguments, respectively. We also present results on the CoNLL 2005 shared task data set. Additionally, we explore considering multiple syntactic analyses to cope with parser noise and uncertainty.", "keyphrases": ["semantic role labeling", "argument frame", "joint structure", "strong dependency", "re-ranking model"]} +{"id": "yang-cardie-2012-extracting", "title": "Extracting Opinion Expressions with semi-Markov Conditional Random Fields", "abstract": "Extracting opinion expressions from text is usually formulated as a token-level sequence labeling task tackled using Conditional Random Fields (CRFs). CRFs, however, do not readily model potentially useful segment-level information like syntactic constituent structure. Thus, we propose a semi-CRF-based approach to the task that can perform sequence labeling at the segment level. We extend the original semi-CRF model (Sarawagi and Cohen, 2004) to allow the modeling of arbitrarily long expressions while accounting for their likely syntactic structure when modeling segment boundaries. We evaluate performance on two opinion extraction tasks, and, in contrast to previous sequence labeling approaches to the task, explore the usefulness of segmentlevel syntactic parse features. Experimental results demonstrate that our approach outperforms state-of-the-art methods for both opinion expression tasks.", "keyphrases": ["opinion expression", "conditional random fields", "strong baseline", "named-entity tagger", "dependency parser"]} +{"id": "lukin-etal-2017-argument", "title": "Argument Strength is in the Eye of the Beholder: Audience Effects in Persuasion", "abstract": "Americans spend about a third of their time online, with many participating in online conversations on social and political issues. We hypothesize that social media arguments on such issues may be more engaging and persuasive than traditional media summaries, and that particular types of people may be more or less convinced by particular styles of argument, e.g. emotional arguments may resonate with some personalities while factual arguments resonate with others. We report a set of experiments testing at large scale how audience variables interact with argument style to affect the persuasiveness of an argument, an under-researched topic within natural language processing. We show that belief change is affected by personality factors, with conscientious, open and agreeable people being more convinced by emotional arguments.", "keyphrases": ["persuasion", "audience variable", "belief change"]} +{"id": "iida-etal-2007-annotating", "title": "Annotating a Japanese Text Corpus with Predicate-Argument and Coreference Relations", "abstract": "In this paper, we discuss how to annotate coreference and predicate-argument relations in Japanese written text. There have been research activities for building Japanese text corpora annotated with coreference and predicate-argument relations as are done in the Kyoto Text Corpus version 4.0 (Kawahara et al., 2002) and the GDA-Tagged Corpus (Hasida, 2005). However, there is still much room for refining their specifications. For this reason, we discuss issues in annotating these two types of relations, and propose a new specification for each. In accordance with the specification, we built a large-scaled annotated corpus, and examined its reliability. As a result of our current work, we have released an annotated corpus named the NAIST Text Corpus1, which is used as the evaluation data set in the coreference and zero-anaphora resolution tasks in Iida et al. (2005) and Iida et al. (2006).", "keyphrases": ["naist text corpus", "anaphor", "dative"]} +{"id": "blunsom-etal-2008-discriminative", "title": "A Discriminative Latent Variable Model for Statistical Machine Translation", "abstract": "Large-scale discriminative machine translation promises to further the state-of-the-art, but has failed to deliver convincing gains over current heuristic frequency count systems. We argue that a principle reason for this failure is not dealing with multiple, equivalent translations. We present a translation model which models derivations as a latent variable, in both training and decoding, and is fully discriminative and globally optimised. Results show that accounting for multiple derivations does indeed improve performance. Additionally, we show that regularisation is essential for maximum conditional likelihood models in order to avoid degenerate solutions.", "keyphrases": ["latent variable", "machine translation", "derivation", "hiero", "million"]} +{"id": "bloem-etal-2019-evaluating", "title": "Evaluating the Consistency of Word Embeddings from Small Data", "abstract": "In this work, we address the evaluation of distributional semantic models trained on smaller, domain-specific texts, specifically, philosophical text. Specifically, we inspect the behaviour of models using a pre-trained background space in learning. We propose a measure of consistency which can be used as an evaluation metric when no in-domain gold-standard data is available. This measure simply computes the ability of a model to learn similar embeddings from different parts of some homogeneous data. We show that in spite of being a simple evaluation, consistency actually depends on various combinations of factors, including the nature of the data itself, the model used to train the semantic space, and the frequency of the learnt terms, both in the background space and in the in-domain data of interest.", "keyphrases": ["consistency", "small data", "evaluation metric"]} +{"id": "lee-etal-2018-transfer", "title": "Transfer Learning for Named-Entity Recognition with Neural Networks", "abstract": "Recent approaches based on artificial neural networks (ANNs) have shown promising results for named-entity recognition (NER). In order to achieve high performances, ANNs need to be trained on a large labeled dataset. However, labels might be difficult to obtain for the dataset on which the user wants to perform NER: label scarcity is particularly pronounced for patient note de-identification, which is an instance of NER. In this work, we analyze to what extent transfer learning may address this issue. In particular, we demonstrate that transferring an ANN model trained on a large labeled dataset to another dataset with a limited number of labels improves upon the state-of-the-art results on two different datasets for patient note de-identification.", "keyphrases": ["named-entity recognition", "different dataset", "transfer learning", "target domain"]} +{"id": "nguyen-dogruoz-2013-word", "title": "Word Level Language Identification in Online Multilingual Communication", "abstract": "Multilingual speakers switch between languages in online and spoken communication. Analyses of large scale multilingual data require automatic language identification at the word level. For our experiments with multilingual online discussions, we first tag the language of individual words using language models and dictionaries. Secondly, we incorporate context to improve the performance. We achieve an accuracy of 98%. Besides word level accuracy, we use two new metrics to evaluate this task.", "keyphrases": ["language identification", "communication", "multilingual online discussion"]} +{"id": "chen-zechner-2011-computing", "title": "Computing and Evaluating Syntactic Complexity Features for Automated Scoring of Spontaneous Non-Native Speech", "abstract": "This paper focuses on identifying, extracting and evaluating features related to syntactic complexity of spontaneous spoken responses as part of an effort to expand the current feature set of an automated speech scoring system in order to cover additional aspects considered important in the construct of communicative competence. \n \nOur goal is to find effective features, selected from a large set of features proposed previously and some new features designed in analogous ways from a syntactic complexity perspective that correlate well with human ratings of the same spoken responses, and to build automatic scoring models based on the most promising features by using machine learning methods. \n \nOn human transcriptions with manually annotated clause and sentence boundaries, our best scoring model achieves an overall Pearson correlation with human rater scores of r=0.49 on an unseen test set, whereas correlations of models using sentence or clause boundaries from automated classifiers are around r=0.2.", "keyphrases": ["complexity", "non-native speech", "speech scoring", "scoring system", "syntactic competence"]} +{"id": "wang-etal-2019-vizseq", "title": "VizSeq: a visual analysis toolkit for text generation tasks", "abstract": "Automatic evaluation of text generation tasks (e.g. machine translation, text summarization, image captioning and video description) usually relies heavily on task-specific metrics, such as BLEU and ROUGE. They, however, are abstract numbers and are not perfectly aligned with human assessment. This suggests inspecting detailed examples as a complement to identify system error patterns. In this paper, we present VizSeq, a visual analysis toolkit for instance-level and corpus-level system evaluation on a wide variety of text generation tasks. It supports multimodal sources and multiple text references, providing visualization in Jupyter notebook or a web app interface. It can be used locally or deployed onto public servers for centralized data hosting and benchmarking. It covers most common n-gram based metrics accelerated with multiprocessing, and also provides latest embedding-based metrics such as BERTScore.", "keyphrases": ["visual analysis toolkit", "text generation task", "interface", "n-gram", "vizseq"]} +{"id": "simard-isabelle-2009-phrase", "title": "Phrase-based Machine Translation in a Computer-assisted Translation Environment", "abstract": "We explore the problem of integrating a phrase-based MT system within a computer-assisted translation (CAT) environment. We argue that one way of achieving successful integration is to design an MT system that behaves more like the translation memory (TM) component of CAT systems. This implies producing MT output that is consistent with that of a TM when high-similarity material exists in the training data; it also implies providing the MT system with a component that is capable of \ufb01ltering out machine translations that are less likely to be useful. We propose solutions to both problems, and evaluate their impact on three different data sets. Our results indicate that the proposed approach leads to systems that produce better output than a TM, for a larger portion of the source text.", "keyphrases": ["machine translation", "smt system", "large probability value", "second strand", "tm-based feature function"]} +{"id": "mitra-etal-2014-thats", "title": "That's sick dude!: Automatic identification of word sense change across different timescales", "abstract": "In this paper, we propose an unsupervised method to identify noun sense changes based on rigorous analysis of time-varying text data available in the form of millions of digitized books. We construct distributional thesauri based networks from data at different time points and cluster each of them separately to obtain word-centric sense clusters corresponding to the different time points. Subsequently, we compare these sense clusters of two different time points to find if (i) there is birth of a new sense or (ii) if an older sense has got split into more than one sense or (iii) if a newer sense has been formed from the joining of older senses or (iv) if a particular sense has died. We conduct a thorough evaluation of the proposed methodology both manually as well as through comparison with WordNet. Manual evaluation indicates that the algorithm could correctly identify 60.4% birth cases from a set of 48 randomly picked samples and 57% split/join cases from a set of 21 randomly picked samples. Remarkably, in 44% cases the birth of a novel sense is attested by WordNet, while in 46% cases and 43% cases split and join are respectively confirmed by WordNet. Our approach can be applied for lexicography, as well as for applications like word sense disambiguation or semantic search.", "keyphrases": ["identification", "word sense", "different timescale"]} +{"id": "ettinger-etal-2018-assessing", "title": "Assessing Composition in Sentence Vector Representations", "abstract": "An important component of achieving language understanding is mastering the composition of sentence meaning, but an immediate challenge to solving this problem is the opacity of sentence vector representations produced by current neural sentence composition models. We present a method to address this challenge, developing tasks that directly target compositional meaning information in sentence vector representations with a high degree of precision and control. To enable the creation of these controlled tasks, we introduce a specialized sentence generation system that produces large, annotated sentence sets meeting specified syntactic, semantic and lexical constraints. We describe the details of the method and generation system, and then present results of experiments applying our method to probe for compositional information in embeddings from a number of existing sentence composition models. We find that the method is able to extract useful information about the differing capacities of these models, and we discuss the implications of our results with respect to these systems' capturing of sentence information. We make available for public use the datasets used for these experiments, as well as the generation system.", "keyphrases": ["composition", "sentence generation system", "linguistic knowledge"]} +{"id": "schneider-etal-2014-discriminative", "title": "Discriminative Lexical Semantic Segmentation with Gaps: Running the MWE Gamut", "abstract": "We present a novel representation, evaluation measure, and supervised models for the task of identifying the multiword expressions (MWEs) in a sentence, resulting in a lexical semantic segmentation. Our approach generalizes a standard chunking representation to encode MWEs containing gaps, thereby enabling efficient sequence tagging algorithms for feature-rich discriminative models. Experiments on a new dataset of English web text offer the first linguistically-driven evaluation of MWE identification with truly heterogeneous expression types. Our statistical sequence model greatly outperforms a lookup-based segmentation procedure, achieving nearly 60% F1 for MWE identification.", "keyphrases": ["gap", "mwe", "multiword expression", "sequence tagging model", "supervised approach"]} +{"id": "hill-korhonen-2014-concreteness", "title": "Concreteness and Subjectivity as Dimensions of Lexical Meaning", "abstract": "We quantify the lexical subjectivity of adjectives using a corpus-based method, and show for the first time that it correlates with noun concreteness in large corpora. These cognitive dimensions together influence how word meanings combine, and we exploit this fact to achieve performance improvements on the semantic classification of adjective-noun pairs.", "keyphrases": ["subjectivity", "concreteness", "semantic composition"]} +{"id": "mosbach-etal-2019-incom", "title": "incom.py - A Toolbox for Calculating Linguistic Distances and Asymmetries between Related Languages", "abstract": "Languages may be differently distant from each other and their mutual intelligibility may be asymmetric. In this paper we introduce incom.py, a toolbox for calculating linguistic distances and asymmetries between related languages. incom.py allows linguist experts to quickly and easily perform statistical analyses and compare those with experimental results. We demonstrate the efficacy of incom.py in an incomprehension experiment on two Slavic languages: Bulgarian and Russian. Using incom.py we were able to validate three methods to measure linguistic distances and asymmetries: Levenshtein distance, word adaptation surprisal, and conditional entropy as predictors of success in a reading intercomprehension experiment.", "keyphrases": ["toolbox", "asymmetry", "related language", "intelligibility"]} +{"id": "jiang-etal-2012-iterative", "title": "Iterative Annotation Transformation with Predict-Self Reestimation for Chinese Word Segmentation", "abstract": "In this paper we first describe the technology of automatic annotation transformation, which is based on the annotation adaptation algorithm (Jiang et al., 2009). It can automatically transform a human-annotated corpus from one annotation guideline to another. We then propose two optimization strategies, iterative training and predict-self reestimation, to further improve the accuracy of annotation guideline transformation. Experiments on Chinese word segmentation show that, the iterative training strategy together with predict-self reestimation brings significant improvement over the simple annotation transformation baseline, and leads to classifiers with significantly higher accuracy and several times faster processing than annotation adaptation does. On the Penn Chinese Treebank 5.0, it achieves an F-measure of 98.43%, significantly outperforms previous works although using a single classifier with only local features.", "keyphrases": ["annotation transformation", "chinese word segmentation", "iterative training"]} +{"id": "li-etal-2012-employing", "title": "Employing Compositional Semantics and Discourse Consistency in Chinese Event Extraction", "abstract": "Current Chinese event extraction systems suffer much from two problems in trigger identification: unknown triggers and word segmentation errors to known triggers. To resolve these problems, this paper proposes two novel inference mechanisms to explore special characteristics in Chinese via compositional semantics inside Chinese triggers and discourse consistency between Chinese trigger mentions. Evaluation on the ACE 2005 Chinese corpus justifies the effectiveness of our approach over a strong baseline.", "keyphrases": ["compositional semantic", "discourse consistency", "chinese event extraction", "word segmentation error"]} +{"id": "mitchell-2009-class", "title": "Class-Based Ordering of Prenominal Modifiers", "abstract": "This paper introduces a class-based approach to ordering prenominal modifiers. Modifiers are grouped into broad classes based on where they tend to occur prenominally, and a framework is developed to order sets of modifiers based on their classes. This system is developed to generate several orderings for modifiers with more flexible positional constraints, and lends itself to bootstrapping for the classification of previously unseen modifiers.", "keyphrases": ["ordering", "modifier", "class-based approach"]} +{"id": "lakomkin-etal-2018-kt", "title": "KT-Speech-Crawler: Automatic Dataset Construction for Speech Recognition from YouTube Videos", "abstract": "We describe KT-Speech-Crawler: an approach for automatic dataset construction for speech recognition by crawling YouTube videos. We outline several filtering and post-processing steps, which extract samples that can be used for training end-to-end neural speech recognition systems. In our experiments, we demonstrate that a single-core version of the crawler can obtain around 150 hours of transcribed speech within a day, containing an estimated 3.5% word error rate in the transcriptions. Automatically collected samples contain reading and spontaneous speech recorded in various conditions including background noise and music, distant microphone recordings, and a variety of accents and reverberation. When training a deep neural network on speech recognition, we observed around 40% word error rate reduction on the Wall Street Journal dataset by integrating 200 hours of the collected samples into the training set.", "keyphrases": ["automatic dataset construction", "speech recognition", "youtube video"]} +{"id": "crysmann-etal-2008-hybrid", "title": "Hybrid Processing for Grammar and Style Checking", "abstract": "This paper presents an implemented hybrid approach to grammar and style checking, combining an industrial pattern-based grammar and style checker with bidirectional, large-scale HPSG grammars for German and English. Under this approach, deep processing is applied selectively based on the error hypotheses of a shallow system. We have conducted a comparative evaluation of the two components, supporting an integration scenario where the shallow system is best used for error detection, whereas the HPSG grammars add error correction for both grammar and controlled language style errors.", "keyphrases": ["style checking", "hybrid approach", "hpsg grammar"]} +{"id": "talbot-brants-2008-randomized", "title": "Randomized Language Models via Perfect Hash Functions", "abstract": "We propose a succinct randomized language model which employs a perfect hash function to encode fingerprints of n-grams and their associated probabilities, backoff weights, or other parameters. The scheme can represent any standard n-gram model and is easily combined with existing model reduction techniques such as entropy-pruning. We demonstrate the space-savings of the scheme via machine translation experiments within a distributed language modeling framework.", "keyphrases": ["language model", "hash function", "bit"]} +{"id": "saeidi-etal-2018-interpretation", "title": "Interpretation of Natural Language Rules in Conversational Machine Reading", "abstract": "Most work in machine reading focuses on question answering problems where the answer is directly expressed in the text to read. However, many real-world question answering problems require the reading of text not because it contains the literal answer, but because it contains a recipe to derive an answer together with the reader's background knowledge. One example is the task of interpreting regulations to answer \u201cCan I...?\u201d or \u201cDo I have to...?\u201d questions such as \u201cI am working in Canada. Do I have to carry on paying UK National Insurance?\u201d after reading a UK government website about this topic. This task requires both the interpretation of rules and the application of background knowledge. It is further complicated due to the fact that, in practice, most questions are underspecified, and a human assistant will regularly have to ask clarification questions such as \u201cHow long have you been working abroad?\u201d when the answer cannot be directly derived from the question and text. In this paper, we formalise this task and develop a crowd-sourcing strategy to collect 37k task instances based on real-world rules and crowd-generated questions and scenarios. We analyse the challenges of this task and assess its difficulty by evaluating the performance of rule-based and machine-learning baselines. We observe promising results when no background knowledge is necessary, and substantial room for improvement whenever background knowledge is needed.", "keyphrases": ["conversational machine reading", "interpretation", "rule document", "user question"]} +{"id": "knight-etal-2006-unsupervised", "title": "Unsupervised Analysis for Decipherment Problems", "abstract": "We study a number of natural language decipherment problems using unsupervised learning. These include letter substitution ciphers, character code conversion, phonetic decipherment, and word-based ciphers with relevance to machine translation. Straightforward unsupervised learning techniques most often fail on the first try, so we describe techniques for understanding errors and significantly increasing performance.", "keyphrases": ["decipherment problem", "unsupervised learning", "cipher", "phonetic decipherment", "expectation-maximization"]} +{"id": "reddy-knight-2011-know", "title": "What We Know About The Voynich Manuscript", "abstract": "The Voynich Manuscript is an undeciphered document from medieval Europe. We present current knowledge about the manuscript's text through a series of questions about its linguistic properties.", "keyphrases": ["voynich manuscript", "linguistic property", "letter sequence"]} +{"id": "schilder-kondadadi-2008-fastsum", "title": "FastSum: Fast and Accurate Query-based Multi-document Summarization", "abstract": "We present a fast query-based multi-document summarizer called FastSum based solely on word-frequency features of clusters, documents and topics. Summary sentences are ranked by a regression SVM. The summarizer does not use any expensive NLP techniques such as parsing, tagging of names or even part of speech information. Still, the achieved accuracy is comparable to the best systems presented in recent academic competitions (i.e., Document Understanding Conference (DUC)). Because of a detailed feature analysis using Least Angle Regression (LARS), FastSum can rely on a minimal set of features leading to fast processing times: 1250 news documents in 60 seconds.", "keyphrases": ["query-based multi-document summarizer", "summarization", "fastsum"]} +{"id": "deriu-etal-2016-swisscheese", "title": "SwissCheese at SemEval-2016 Task 4: Sentiment Classification Using an Ensemble of Convolutional Neural Networks with Distant Supervision", "abstract": "In this paper, we propose a classifier for predicting message-level sentiments of English micro-blog messages from Twitter. Our method builds upon the convolutional sentence embedding approach proposed by (Severyn and Moschitti, 2015a; Severyn and Moschitti, 2015b). We leverage large amounts of data with distant supervision to train an ensemble of 2-layer convolutional neural networks whose predictions are combined using a random forest classifier. Our approach was evaluated on the datasets of the SemEval-2016 competition (Task 4) outperforming all other approaches for the Message Polarity Classification task.", "keyphrases": ["convolutional neural networks", "distant supervision", "negative emoticon"]} +{"id": "de-melo-bansal-2013-good", "title": "Good, Great, Excellent: Global Inference of Semantic Intensities", "abstract": "Adjectives like good, great, and excellent are similar in meaning, but differ in intensity. Intensity order information is very useful for language learners as well as in several NLP tasks, but is missing in most lexical resources (dictionaries, WordNet, and thesauri). In this paper, we present a primarily unsupervised approach that uses semantics from Web-scale data (e.g., phrases like good but not excellent) to rank words by assigning them positions on a continuous scale. We rely on Mixed Integer Linear Programming to jointly determine the ranks, such that individual decisions benefit from global information. When ranking English adjectives, our global algorithm achieves substantial improvements over previous work on both pairwise and rank correlation metrics (specifically, 70% pairwise accuracy as compared to only 56% by previous work). Moreover, our approach can incorporate external synonymy information (increasing its pairwise accuracy to 78%) and extends easily to new languages. We also make our code and data freely available.", "keyphrases": ["intensity", "adjective", "unsupervised approach", "web-scale data"]} +{"id": "rozen-etal-2019-diversify", "title": "Diversify Your Datasets: Analyzing Generalization via Controlled Variance in Adversarial Datasets", "abstract": "Phenomenon-specific \u201cadversarial\u201d datasets have been recently designed to perform targeted stress-tests for particular inference types. Recent work (Liu et al., 2019a) proposed that such datasets can be utilized for training NLI and other types of models, often allowing to learn the phenomenon in focus and improve on the challenge dataset, indicating a \u201cblind spot\u201d in the original training data. Yet, although a model can improve in such a training process, it might still be vulnerable to other challenge datasets targeting the same phenomenon but drawn from a different distribution, such as having a different syntactic complexity level. In this work, we extend this method to drive conclusions about a model's ability to learn and generalize a target phenomenon rather than to \u201clearn\u201d a dataset, by controlling additional aspects in the adversarial datasets. We demonstrate our approach on two inference phenomena \u2013 dative alternation and numerical reasoning, elaborating, and in some cases contradicting, the results of Liu et al.. Our methodology enables building better challenge datasets for creating more robust models, and may yield better model understanding and subsequent overarching improvements.", "keyphrases": ["adversarial dataset", "nli", "reasoning"]} +{"id": "lichtarge-etal-2020-data", "title": "Data Weighted Training Strategies for Grammatical Error Correction", "abstract": "Recent progress in the task of Grammatical Error Correction (GEC) has been driven by addressing data sparsity, both through new methods for generating large and noisy pretraining data and through the publication of small and higher-quality finetuning data in the BEA-2019 shared task. Building upon recent work in Neural Machine Translation (NMT), we make use of both kinds of data by deriving example-level scores on our large pretraining data based on a smaller, higher-quality dataset. In this work, we perform an empirical study to discover how to best incorporate delta-log-perplexity, a type of example scoring, into a training schedule for GEC. In doing so, we perform experiments that shed light on the function and applicability of delta-log-perplexity. Models trained on scored data achieve state- of-the-art results on common GEC test sets.", "keyphrases": ["grammatical error correction", "neural machine translation", "example-level score"]} +{"id": "wang-etal-2018-denoising", "title": "Denoising Neural Machine Translation Training with Trusted Data and Online Data Selection", "abstract": "Measuring domain relevance of data and identifying or selecting well-fit domain data for machine translation (MT) is a well-studied topic, but denoising is not yet. Denoising is concerned with a different type of data quality and tries to reduce the negative impact of data noise on MT training, in particular, neural MT (NMT) training. This paper generalizes methods for measuring and selecting data for domain MT and applies them to denoising NMT training. The proposed approach uses trusted data and a denoising curriculum realized by online data selection. Intrinsic and extrinsic evaluations of the approach show its significant effectiveness for NMT to train on data with severe noise.", "keyphrases": ["trusted data", "online data selection", "noisy data", "model training", "sentence pair"]} +{"id": "shavarani-etal-2015-learning", "title": "Learning segmentations that balance latency versus quality in spoken language translation", "abstract": "Segmentation of the incoming speech stream and translating segments incrementally is a commonly used technique that improves latency in spoken language translation. Previous work (Oda et al. 2014) [1] has explored creating training data for segmentation by finding segments that maximize translation quality with a user-defined bound on segment length. In this work, we provide a new algorithm, using Pareto-optimality, for finding good segment boundaries that can balance the trade-off between latency versus translation quality. We compare against the state-of-the-art greedy algorithm from (Oda et al. 2014) [1]. Our experimental results show that we can improve latency by up to 12% without harming the BLEU score for the same average segment length. Another benefit is that for any segment size, Paretooptimal segments maximize latency and translation quality.", "keyphrases": ["segmentation", "latency", "spoken language translation"]} +{"id": "li-etal-2021-improving-bert", "title": "Improving BERT with Syntax-aware Local Attention", "abstract": "Pre-trained Transformer-based neural language models, such as BERT, have achieved remarkable results on varieties of NLP tasks. Recent works have shown that attention-based models can benefit from more focused attention over local regions. Most of them restrict the attention scope within a linear span, or confine to certain tasks such as machine translation and question answering. In this paper, we propose a syntax-aware local attention, where the attention scopes are restrained based on the distances in the syntactic structure. The proposed syntax-aware local attention can be integrated with pretrained language models, such as BERT, to render the model to focus on syntactically relevant words. We conduct experiments on various single-sentence benchmarks, including sentence classification and sequence labeling tasks. Experimental results show consistent gains over BERT on all benchmark datasets. The extensive studies verify that our model achieves better performance owing to more focused attention over syntactically relevant words.", "keyphrases": ["bert", "syntax-aware local attention", "syntax information", "similar effort"]} +{"id": "ni-etal-2017-weakly", "title": "Weakly Supervised Cross-Lingual Named Entity Recognition via Effective Annotation and Representation Projection", "abstract": "The state-of-the-art named entity recognition (NER) systems are supervised machine learning models that require large amounts of manually annotated data to achieve high accuracy. However, annotating NER data by human is expensive and time-consuming, and can be quite difficult for a new language. In this paper, we present two weakly supervised approaches for cross-lingual NER with no human annotation in a target language. The first approach is to create automatically labeled NER data for a target language via annotation projection on comparable corpora, where we develop a heuristic scheme that effectively selects good-quality projection-labeled data from noisy data. The second approach is to project distributed representations of words (word embeddings) from a target language to a source language, so that the source-language NER system can be applied to the target language without re-training. We also design two co-decoding schemes that effectively combine the outputs of the two projection-based approaches. We evaluate the performance of the proposed approaches on both in-house and open NER data for several target languages. The results show that the combined systems outperform three other weakly supervised approaches on the CoNLL data.", "keyphrases": ["entity recognition", "representation projection", "cross-lingual ner", "source language", "parallel corpora"]} +{"id": "hermjakob-etal-2008-name", "title": "Name Translation in Statistical Machine Translation - Learning When to Transliterate", "abstract": "We present a method to transliterate names in the framework of end-to-end statistical machine translation. The system is trained to learn when to transliterate. For Arabic to English MT, we developed and trained a transliterator on a bitext of 7 million sentences and Google\u2019s English terabyte ngrams and achieved better name translation accuracy than 3 out of 4 professional translators. The paper also includes a discussion of challenges in name translation evaluation.", "keyphrases": ["statistical machine translation", "name", "correct conversion", "transliteration module"]} +{"id": "fitzgerald-etal-2015-semantic", "title": "Semantic Role Labeling with Neural Network Factors", "abstract": "We present a new method for semantic role labeling in which arguments and semantic roles are jointly embedded in a shared vector space for a given predicate. These embeddings belong to a neural network, whose output represents the potential functions of a graphical model designed for the SRL task. We consider both local and structured learning methods and obtain strong results on standard PropBank and FrameNet corpora with a straightforward product-of-experts model. We further show how the model can learn jointly from PropBank and FrameNet annotations to obtain additional improvements on the smaller FrameNet dataset.", "keyphrases": ["factor", "graphical model", "propbank", "semantic role labeling", "feature representation"]} +{"id": "walter-pinkal-2006-automatic", "title": "Automatic Extraction of Definitions from German Court Decisions", "abstract": "This paper deals with the use of computational linguistic analysis techniques for information access and ontology learning within the legal domain. We present a rule-based approach for extracting and analysing definitions from parsed text and evaluate it on a corpus of about 6000 German court decisions. The results are applied to improve the quality of a text based ontology learning method on this corpus.", "keyphrases": ["definition", "german court decision", "parsed text"]} +{"id": "xu-etal-2012-learning", "title": "Learning from Bullying Traces in Social Media", "abstract": "We introduce the social study of bullying to the NLP community. Bullying, in both physical and cyber worlds (the latter known as cyberbullying), has been recognized as a serious national health issue among adolescents. However, previous social studies of bullying are handicapped by data scarcity, while the few computational studies narrowly restrict themselves to cyberbullying which accounts for only a small fraction of all bullying episodes. Our main contribution is to present evidence that social media, with appropriate natural language processing techniques, can be a valuable and abundant data source for the study of bullying in both worlds. We identify several key problems in using such data sources and formulate them as NLP tasks, including text classification, role labeling, sentiment analysis, and topic modeling. Since this is an introductory paper, we present baseline results on these tasks using off-the-shelf NLP solutions, and encourage the NLP community to contribute better models in the future.", "keyphrases": ["bullying", "cyberbullying", "social medium", "victim"]} +{"id": "schuster-etal-2019-cross-lingual", "title": "Cross-lingual Transfer Learning for Multilingual Task Oriented Dialog", "abstract": "One of the first steps in the utterance interpretation pipeline of many task-oriented conversational AI systems is to identify user intents and the corresponding slots. Since data collection for machine learning models for this task is time-consuming, it is desirable to make use of existing data in a high-resource language to train models in low-resource languages. However, development of such models has largely been hindered by the lack of multilingual training data. In this paper, we present a new data set of 57k annotated utterances in English (43k), Spanish (8.6k) and Thai (5k) across the domains weather, alarm, and reminder. We use this data set to evaluate three different cross-lingual transfer methods: (1) translating the training data, (2) using cross-lingual pre-trained embeddings, and (3) a novel method of using a multilingual machine translation encoder as contextual word representations. We find that given several hundred training examples in the the target language, the latter two methods outperform translating the training data. Further, in very low-resource settings, multilingual contextual word representations give better results than using cross-lingual static embeddings. We also compare the cross-lingual methods to using monolingual resources in the form of contextual ELMo representations and find that given just small amounts of target language data, this method outperforms all cross-lingual methods, which highlights the need for more sophisticated cross-lingual methods.", "keyphrases": ["dialog", "cross-lingual transfer", "task-oriented dialogue dataset"]} +{"id": "madnani-etal-2007-using", "title": "Using Paraphrases for Parameter Tuning in Statistical Machine Translation", "abstract": "Most state-of-the-art statistical machine translation systems use log-linear models, which are defined in terms of hypothesis features and weights for those features. It is standard to tune the feature weights in order to maximize a translation quality metric, using held-out test sentences and their corresponding reference translations. However, obtaining reference translations is expensive. In this paper, we introduce a new full-sentence paraphrase technique, based on English-to-English decoding with an MT system, and we demonstrate that the resulting paraphrases can be used to drastically reduce the number of human reference translations needed for parameter tuning, without a significant decrease in translation quality.", "keyphrases": ["paraphrase", "parameter tuning", "statistical machine translation", "summarization"]} +{"id": "ott-etal-2018-scaling", "title": "Scaling Neural Machine Translation", "abstract": "Sequence to sequence learning models still require several days to reach state of the art performance on large benchmark datasets using a single machine. This paper shows that reduced precision and large batch training can speedup training by nearly 5x on a single 8-GPU machine with careful tuning and implementation. On WMT'14 English-German translation, we match the accuracy of Vaswani et al. (2017) in under 5 hours when training on 8 GPUs and we obtain a new state of the art of 29.3 BLEU after training for 85 minutes on 128 GPUs. We further improve these results to 29.8 BLEU by training on the much larger Paracrawl dataset. On the WMT'14 English-French task, we obtain a state-of-the-art BLEU of 43.2 in 8.5 hours on 128 GPUs.", "keyphrases": ["neural machine translation", "gradient", "large batch size", "translation quality"]} +{"id": "damonte-etal-2017-incremental", "title": "An Incremental Parser for Abstract Meaning Representation", "abstract": "Abstract Meaning Representation (AMR) is a semantic representation for natural language that embeds annotations related to traditional tasks such as named entity recognition, semantic role labeling, word sense disambiguation and co-reference resolution. We describe a transition-based parser for AMR that parses sentences left-to-right, in linear time. We further propose a test-suite that assesses specific subtasks that are helpful in comparing AMR parsers, and show that our parser is competitive with the state of the art on the LDC2015E86 dataset and that it outperforms state-of-the-art parsers for recovering named entities and handling polarity.", "keyphrases": ["abstract meaning representation", "amr", "entity recognition", "transition-based parser", "node"]} +{"id": "kshirsagar-etal-2018-predictive", "title": "Predictive Embeddings for Hate Speech Detection on Twitter", "abstract": "We present a neural-network based approach to classifying online hate speech in general, as well as racist and sexist speech in particular. Using pre-trained word embeddings and max/mean pooling from simple, fully-connected transformations of these embeddings, we are able to predict the occurrence of hate speech on three commonly used publicly available datasets. Our models match or outperform state of the art F1 performance on all three datasets using significantly fewer parameters and minimal feature preprocessing compared to previous methods.", "keyphrases": ["hate speech detection", "twitter", "word embedding"]} +{"id": "woodsend-lapata-2012-multiple", "title": "Multiple Aspect Summarization Using Integer Linear Programming", "abstract": "Multi-document summarization involves many aspects of content selection and surface realization. The summaries must be informative, succinct, grammatical, and obey stylistic writing conventions. We present a method where such individual aspects are learned separately from data (without any hand-engineering) but optimized jointly using an integer linear programme. The ILP framework allows us to combine the decisions of the expert learners and to select and rewrite source content through a mixture of objective setting, soft and hard constraints. Experimental results on the TAC-08 data set show that our model achieves state-of-the-art performance using ROUGE and significantly improves the informativeness of the summaries.", "keyphrases": ["summarization", "content selection", "different aspect", "optimization ilp model"]} +{"id": "wiseman-etal-2017-challenges", "title": "Challenges in Data-to-Document Generation", "abstract": "Recent neural models have shown significant progress on the problem of generating short descriptive texts conditioned on a small number of database records. In this work, we suggest a slightly more difficult data-to-text generation task, and investigate how effective current approaches are on this task. In particular, we introduce a new, large-scale corpus of data records paired with descriptive documents, propose a series of extractive evaluation methods for analyzing performance, and obtain baseline results using current neural generation methods. Experiments show that these models produce fluent text, but fail to convincingly approximate human-generated documents. Moreover, even templated baselines exceed the performance of these neural models on some metrics, though copy- and reconstruction-based extensions lead to noticeable improvements.", "keyphrases": ["data-to-text generation", "content selection", "structured data", "summarization model", "long document"]} +{"id": "krichene-etal-2021-dot", "title": "DoT: An efficient Double Transformer for NLP tasks with tables", "abstract": "Transformer-based approaches have been successfully used to obtain state-of-the-art accuracy on natural language processing (NLP) tasks with semi-structured tables. These model architectures are typically deep, resulting in slow training and inference, especially for long inputs. To improve efficiency while maintaining a high accuracy, we propose a new architecture, DoT, a double transformer model, that decomposes the problem into two sub-tasks: A shallow pruning transformer that selects the top-K tokens, followed by a deep task-specific transformer that takes as input those K tokens. Additionally, we modify the task-specific attention to incorporate the pruning scores. The two transformers are jointly trained by optimizing the task-specific loss. We run experiments on three benchmarks, including entailment and question-answering. We show that for a small drop of accuracy, DoT improves training and inference time by at least 50%. We also show that the pruning transformer effectively selects relevant tokens enabling the end-to-end model to maintain similar accuracy as slower baseline models. Finally, we analyse the pruning and give some insight into its impact on the task model.", "keyphrases": ["table", "task model", "dot"]} +{"id": "shen-etal-2021-structformer", "title": "StructFormer: Joint Unsupervised Induction of Dependency and Constituency Structure from Masked Language Modeling", "abstract": "There are two major classes of natural language grammars \u2014 the dependency grammar that models one-to-one correspondences between words and the constituency grammar that models the assembly of one or several corresponded words. While previous unsupervised parsing methods mostly focus on only inducing one class of grammars, we introduce a novel model, StructFormer, that can induce dependency and constituency structure at the same time. To achieve this, we propose a new parsing framework that can jointly generate a constituency tree and dependency graph. Then we integrate the induced dependency relations into the transformer, in a differentiable manner, through a novel dependency-constrained self-attention mechanism. Experimental results show that our model can achieve strong results on unsupervised constituency parsing, unsupervised dependency parsing, and masked language modeling at the same time.", "keyphrases": ["constituency structure", "self-attention mechanism", "structformer", "dependency structure", "head"]} +{"id": "garimella-etal-2017-demographic", "title": "Demographic-aware word associations", "abstract": "Variations of word associations across different groups of people can provide insights into people's psychologies and their world views. To capture these variations, we introduce the task of demographic-aware word associations. We build a new gold standard dataset consisting of word association responses for approximately 300 stimulus words, collected from more than 800 respondents of different gender (male/female) and from different locations (India/United States), and show that there are significant variations in the word associations made by these groups. We also introduce a new demographic-aware word association model based on a neural net skip-gram architecture, and show how computational methods for measuring word associations that specifically account for writer demographics can outperform generic methods that are agnostic to such information.", "keyphrases": ["word association", "gender", "location"]} +{"id": "bapna-firat-2019-non", "title": "Non-Parametric Adaptation for Neural Machine Translation", "abstract": "Neural Networks trained with gradient descent are known to be susceptible to catastrophic forgetting caused by parameter shift during the training process. In the context of Neural Machine Translation (NMT) this results in poor performance on heterogeneous datasets and on sub-tasks like rare phrase translation. On the other hand, non-parametric approaches are immune to forgetting, perfectly complementing the generalization ability of NMT. However, attempts to combine non-parametric or retrieval based approaches with NMT have only been successful on narrow domains, possibly due to over-reliance on sentence level retrieval. We propose a novel n-gram level retrieval approach that relies on local phrase level similarities, allowing us to retrieve neighbors that are useful for translation even when overall sentence similarity is low. We complement this with an expressive neural network, allowing our model to extract information from the noisy retrieved context. We evaluate our Semi-parametric NMT approach on a heterogeneous dataset composed of WMT, IWSLT, JRC-Acquis and OpenSubtitles, and demonstrate gains on all 4 evaluation sets. The Semi-parametric nature of our approach also opens the door for non-parametric domain adaptation, demonstrating strong inference-time adaptation performance on new domains without the need for any parameter updates.", "keyphrases": ["adaptation", "neural machine translation", "n-gram"]} +{"id": "neubig-hu-2018-rapid", "title": "Rapid Adaptation of Neural Machine Translation to New Languages", "abstract": "This paper examines the problem of adapting neural machine translation systems to new, low-resourced languages (LRLs) as effectively and rapidly as possible. We propose methods based on starting with massively multilingual \u201cseed models\u201d, which can be trained ahead-of-time, and then continuing training on data related to the LRL. We contrast a number of strategies, leading to a novel, simple, yet effective method of \u201csimilar-language regularization\u201d, where we jointly train on both a LRL of interest and a similar high-resourced language to prevent over-fitting to small LRL data. Experiments demonstrate that massively multilingual models, even without any explicit adaptation, are surprisingly effective, achieving BLEU scores of up to 15.5 with no data from the LRL, and that the proposed similar-language regularization method improves over other adaptation methods by 1.7 BLEU points average over 4 LRL settings.", "keyphrases": ["neural machine translation", "low-resource language", "rapid adaptation", "joint vocabulary", "mnmt model"]} +{"id": "xia-etal-2018-zero", "title": "Zero-shot User Intent Detection via Capsule Neural Networks", "abstract": "User intent detection plays a critical role in question-answering and dialog systems. Most previous works treat intent detection as a classification problem where utterances are labeled with predefined intents. However, it is labor-intensive and time-consuming to label users' utterances as intents are diversely expressed and novel intents will continually be involved. Instead, we study the zero-shot intent detection problem, which aims to detect emerging user intents where no labeled utterances are currently available. We propose two capsule-based architectures: IntentCapsNet that extracts semantic features from utterances and aggregates them to discriminate existing intents, and IntentCapsNet-ZSL which gives IntentCapsNet the zero-shot learning ability to discriminate emerging intents via knowledge transfer from existing intents. Experiments on two real-world datasets show that our model not only can better discriminate diversely expressed existing intents, but is also able to discriminate emerging intents when no labeled utterances are available.", "keyphrases": ["intent detection", "zero-shot learning", "capsule network"]} +{"id": "deng-wiebe-2014-sentiment", "title": "Sentiment Propagation via Implicature Constraints", "abstract": "Opinions may be expressed implicitly via inference over explicit sentiments and events that positively/negatively affect entities (goodFor/badFor events). We investigate how such inferences may be exploited to improve sentiment analysis, given goodFor/badFor event information. We apply Loopy Belief Propagation to propagate sentiments among entities. The graph-based model improves over explicit sentiment classification by 10 points in precision and, in an evaluation of the model itself, we find it has an 89% chance of propagating sentiments correctly.", "keyphrases": ["badfor event", "sentiment propagation", "agent", "generalized set", "theme"]} +{"id": "li-etal-2019-biomedical", "title": "Biomedical Event Extraction based on Knowledge-driven Tree-LSTM", "abstract": "Event extraction for the biomedical domain is more challenging than that in the general news domain since it requires broader acquisition of domain-specific knowledge and deeper understanding of complex contexts. To better encode contextual information and external background knowledge, we propose a novel knowledge base (KB)-driven tree-structured long short-term memory networks (Tree-LSTM) framework, incorporating two new types of features: (1) dependency structures to capture wide contexts; (2) entity properties (types and category descriptions) from external ontologies via entity linking. We evaluate our approach on the BioNLP shared task with Genia dataset and achieve a new state-of-the-art result. In addition, both quantitative and qualitative studies demonstrate the advancement of the Tree-LSTM and the external knowledge representation for biomedical event extraction.", "keyphrases": ["knowledge base", "biomedical event extraction", "tree-lstm model", "gene ontology"]} +{"id": "vulic-moens-2013-study", "title": "A Study on Bootstrapping Bilingual Vector Spaces from Non-Parallel Data (and Nothing Else)", "abstract": "We present a new language pair agnostic approach to inducing bilingual vector spaces from non-parallel data without any other resource in a bootstrapping fashion. The paper systematically introduces and describes all key elements of the bootstrapping procedure: (1) starting point or seed lexicon, (2) the confidence estimation and selection of new dimensions of the space, and (3) convergence. We test the quality of the induced bilingual vector spaces, and analyze the influence of the different components of the bootstrapping approach in the task of bilingual lexicon extraction (BLE) for two language pairs. Results reveal that, contrary to conclusions from prior work, the seeding of the bootstrapping process has a heavy impact on the quality of the learned lexicons. We also show that our approach outperforms the best performing fully corpus-based BLE methods on these test sets.", "keyphrases": ["vector space", "non-parallel data", "seed lexicon", "cross-lingual signal"]} +{"id": "koehn-etal-2019-findings", "title": "Findings of the WMT 2019 Shared Task on Parallel Corpus Filtering for Low-Resource Conditions", "abstract": "Following the WMT 2018 Shared Task on Parallel Corpus Filtering, we posed the challenge of assigning sentence-level quality scores for very noisy corpora of sentence pairs crawled from the web, with the goal of sub-selecting 2% and 10% of the highest-quality data to be used to train machine translation systems. This year, the task tackled the low resource condition of Nepali-English and Sinhala-English. Eleven participants from companies, national research labs, and universities participated in this task.", "keyphrases": ["shared task", "parallel corpus filtering", "low-resource condition", "low resource language"]} +{"id": "kang-etal-2020-dynamic", "title": "Dynamic Context Selection for Document-level Neural Machine Translation via Reinforcement Learning", "abstract": "Document-level neural machine translation has yielded attractive improvements. However, majority of existing methods roughly use all context sentences in a fixed scope. They neglect the fact that different source sentences need different sizes of context. To address this problem, we propose an effective approach to select dynamic context so that the document-level translation model can utilize the more useful selected context sentences to produce better translations. Specifically, we introduce a selection module that is independent of the translation module to score each candidate context sentence. Then, we propose two strategies to explicitly select a variable number of context sentences and feed them into the translation module. We train the two modules end-to-end via reinforcement learning. A novel reward is proposed to encourage the selection and utilization of dynamic context sentences. Experiments demonstrate that our approach can select adaptive context sentences for different source sentences, and significantly improves the performance of document-level translation methods.", "keyphrases": ["neural machine translation", "reinforcement learning", "context sentence", "dynamic context selection"]} +{"id": "song-etal-2014-applying", "title": "Applying Argumentation Schemes for Essay Scoring", "abstract": "Under the framework of the argumentation scheme theory (Walton, 1996), we developed annotation protocols for an argumentative writing task to support identification and classification of the arguments being made in essays. Each annotation protocol defined argumentation schemes (i.e., reasoning patterns) in a given writing prompt and listed questions to help evaluate an argument based on these schemes, to make the argument structure in a text explicit and classifiable. We report findings based on an annotation of 600 essays. Most annotation categories were applied reliably by human annotators, and some categories significantly contributed to essay score. An NLP system to identify sentences containing scheme-relevant critical questions was developed based on the human annotations.", "keyphrases": ["argumentation scheme", "essay scoring", "annotator", "student", "writing support system"]} +{"id": "anthonio-etal-2022-clarifying", "title": "Clarifying Implicit and Underspecified Phrases in Instructional Text", "abstract": "Natural language inherently consists of implicit and underspecified phrases, which represent potential sources of misunderstanding. In this paper, we present a data set of such phrases in English from instructional texts together with multiple possible clarifications. Our data set, henceforth called CLAIRE, is based on a corpus of revision histories from wikiHow, from which we extract human clarifications that resolve an implicit or underspecified phrase. We show how language modeling can be used to generate alternate clarifications, which may or may not be compatible with the human clarification. Based on plausibility judgements for each clarification, we define the task of distinguishing between plausible and implausible clarifications. We provide several baseline models for this task and analyze to what extent different clarifications represent multiple readings as a first step to investigate misunderstandings caused by implicit/underspecified language in instructional texts.", "keyphrases": ["instructional text", "clarification", "underspecified language"]} +{"id": "sassano-kurohashi-2010-using", "title": "Using Smaller Constituents Rather Than Sentences in Active Learning for Japanese Dependency Parsing", "abstract": "We investigate active learning methods for Japanese dependency parsing. We propose active learning methods of using partial dependency relations in a given sentence for parsing and evaluate their effectiveness empirically. Furthermore, we utilize syntactic constraints of Japanese to obtain more labeled examples from precious labeled ones that annotators give. Experimental results show that our proposed methods improve considerably the learning curve of Japanese dependency parsing. In order to achieve an accuracy of over 88.3%, one of our methods requires only 34.4% of labeled examples as compared to passive learning.", "keyphrases": ["active learning", "japanese dependency parsing", "partial annotation"]} +{"id": "hathout-sajous-2016-wiktionnaires", "title": "Wiktionnaire's Wikicode GLAWIfied: a Workable French Machine-Readable Dictionary", "abstract": "GLAWI is a free, large-scale and versatile Machine-Readable Dictionary (MRD) that has been extracted from the French language edition of Wiktionary, called Wiktionnaire. In (Sajous and Hathout, 2015), we introduced GLAWI, gave the rationale behind the creation of this lexicographic resource and described the extraction process, focusing on the conversion and standardization of the heterogeneous data provided by this collaborative dictionary. In the current article, we describe the content of GLAWI and illustrate how it is structured. We also suggest various applications, ranging from linguistic studies, NLP applications to psycholinguistic experimentation. They all can take advantage of the diversity of the lexical knowledge available in GLAWI. Besides this diversity and extensive lexical coverage, GLAWI is also remarkable because it is the only free lexical resource of contemporary French that contains definitions. This unique material opens way to the renewal of MRD-based methods, notably the automated extraction and acquisition of semantic relations.", "keyphrases": ["glawi", "creation", "wiktionnaire"]} +{"id": "wang-etal-2017-sentence", "title": "Sentence Embedding for Neural Machine Translation Domain Adaptation", "abstract": "Although new corpora are becoming increasingly available for machine translation, only those that belong to the same or similar domains are typically able to improve translation performance. Recently Neural Machine Translation (NMT) has become prominent in the field. However, most of the existing domain adaptation methods only focus on phrase-based machine translation. In this paper, we exploit the NMT's internal embedding of the source sentence and use the sentence embedding similarity to select the sentences which are close to in-domain data. The empirical adaptation results on the IWSLT English-French and NIST Chinese-English tasks show that the proposed methods can substantially improve NMT performance by 2.4-9.0 BLEU points, outperforming the existing state-of-the-art baseline by 2.3-4.5 BLEU points.", "keyphrases": ["domain adaptation", "translation performance", "sentence embedding"]} +{"id": "plank-agic-2018-distant", "title": "Distant Supervision from Disparate Sources for Low-Resource Part-of-Speech Tagging", "abstract": "a cross-lingual neural part-of-speech tagger that learns from disparate sources of distant supervision, and realistically scales to hundreds of low-resource languages. The model exploits annotation projection, instance selection, tag dictionaries, morphological lexicons, and distributed representations, all in a uniform framework. The approach is simple, yet surprisingly effective, resulting in a new state of the art without access to any gold annotated data.", "keyphrases": ["low-resource language", "annotation projection", "distant supervision", "pos tagger"]} +{"id": "pareti-2012-database", "title": "A Database of Attribution Relations", "abstract": "The importance of attribution is becoming evident due to its relevance in particular for Opinion Analysis and Information Extraction applications. Attribution would allow to identify different perspectives on a given topic or retrieve the statements of a specific source of interest, but also to select more relevant and reliable information. However, the scarce and partial resources available to date to conduct attribution studies have determined that only a portion of attribution structures has been identified and addressed. This paper presents the collection and further annotation of a database of over 9800 attributions relations from the Penn Discourse TreeBank (PDTB). The aim is to build a large and complete resource that fills a key gap in the field and enables the training and testing of robust attribution extraction systems.", "keyphrases": ["database", "attribution", "perspective"]} +{"id": "li-etal-2010-learning", "title": "Learning the Scope of Negation via Shallow Semantic Parsing", "abstract": "In this paper we present a simplified shallow semantic parsing approach to learning the scope of negation (SoN). This is done by formulating it as a shallow semantic parsing problem with the negation signal as the predicate and the negation scope as its arguments. Our parsing approach to SoN learning differs from the state-of-the-art chunking ones in two aspects. First, we extend SoN learning from the chunking level to the parse tree level, where structured syntactic information is available. Second, we focus on determining whether a constituent, rather than a word, is negated or not, via a simplified shallow semantic parsing framework. Evaluation on the BioScope corpus shows that structured syntactic information is effective in capturing the domination relationship between a negation signal and its dominated arguments. It also shows that our parsing approach much outperforms the state-of-the-art chunking ones.", "keyphrases": ["scope", "negation", "shallow semantic parsing", "bioscope corpus"]} +{"id": "wallace-etal-2015-sparse", "title": "Sparse, Contextually Informed Models for Irony Detection: Exploiting User Communities, Entities and Sentiment", "abstract": "Automatically detecting verbal irony (roughly, sarcasm) in online content is important for many practical applications (e.g., sentiment detection), but it is difficult. Previous approaches have relied predominantly on signal gleaned from word counts and grammatical cues. But such approaches fail to exploit the context in which comments are embedded. We thus propose a novel strategy for verbal irony classification that exploits contextual features, specifically by combining noun phrases and sentiment extracted from comments with the forum type (e.g., conservative or liberal) to which they were posted. We show that this approach improves verbal irony classification performance. Furthermore, because this method generates a very large feature space (and we expect predictive contextual features to be strong but few), we propose a mixed regularization strategy that places a sparsity-inducing `1 penalty on the contextual feature weights on top of the `2 penalty applied to all model coefficients. This increases model sparsity and reduces the variance of model performance.", "keyphrases": ["irony", "noun phrase", "sarcasm detection"]} +{"id": "khapra-etal-2010-everybody", "title": "Everybody loves a rich cousin: An empirical study of transliteration through bridge languages", "abstract": "Most state of the art approaches for machine transliteration are data driven and require significant parallel names corpora between languages. As a result, developing transliteration functionality among n languages could be a resource intensive task requiring parallel names corpora in the order of nC2. In this paper, we explore ways of reducing this high resource requirement by leveraging the available parallel data between subsets of the n languages, transitively. We propose, and show empirically, that reasonable quality transliteration engines may be developed between two languages, X and Y, even when no direct parallel names data exists between them, but only transitively through language Z. Such systems alleviate the need for O(nC2) corpora, significantly. In addition we show that the performance of such transitive transliteration systems is in par with direct transliteration systems, in practical applications, such as CLIR systems.", "keyphrases": ["bridge language", "machine transliteration", "pivot language"]} +{"id": "yao-etal-2012-probabilistic", "title": "Probabilistic Databases of Universal Schema", "abstract": "In data integration we transform information from a source into a target schema. A general problem in this task is loss of fidelity and coverage: the source expresses more knowledge than can fit into the target schema, or knowledge that is hard to fit into any schema at all. This problem is taken to an extreme in information extraction (IE) where the source is natural language. To address this issue, one can either automatically learn a latent schema emergent in text (a brittle and ill-defined task), or manually extend schemas. We propose instead to store data in a probabilistic database of universal schema. This schema is simply the union of all source schemas, and the probabilistic database learns how to predict the cells of each source relation in this union. For example, the database could store Freebase relations and relations that correspond to natural language surface patterns. The database would learn to predict what freebase relations hold true based on what surface patterns appear, and vice versa. We describe an analogy between such databases and collaborative filtering models, and use it to implement our paradigm with probabilistic PCA, a scalable and effective collaborative filtering method.", "keyphrases": ["universal schema", "information extraction", "freebase entity"]} +{"id": "kwong-tsou-2007-extending", "title": "Extending a Thesaurus in the Pan-Chinese Context", "abstract": "In this paper, we address a unique problem in Chinese language processing and report on our study on extending a Chinese thesaurus with region-specific words, mostly from the financial domain, from various Chinese speech communities. With the larger goal of automatically constructing a Pan-Chinese lexical resource, this work aims at taking an existing semantic classificatory structure as leverage and incorporating new words into it. In particular, it is important to see if the classification could accommodate new words from heterogeneous data sources, and whether simple similarity measures and clustering methods could cope with such variation. We use the cosine function for similarity and test it on automatically classifying 120 target words from four regions, using different datasets for the extraction of feature vectors. The automatic classification results were evaluated against human judgement, and the performance was encouraging, with accuracy reaching over 85% in some cases. Thus while human judgement is not straightforward and it is difficult to create a PanChinese lexicon manually, it is observed that combining simple clustering methods with the appropriate data sources appears to be a promising approach toward its automatic construction.", "keyphrases": ["thesaurus", "classificatory structure", "new word"]} +{"id": "vulic-etal-2020-multi", "title": "Multi-SimLex: A Large-Scale Evaluation of Multilingual and Crosslingual Lexical Semantic Similarity", "abstract": "We introduce Multi-SimLex, a large-scale lexical resource and evaluation benchmark covering data sets for 12 typologically diverse languages, including major languages (e.g., Mandarin Chinese, Spanish, Russian) as well as less-resourced ones (e.g., Welsh, Kiswahili). Each language data set is annotated for the lexical relation of semantic similarity and contains 1,888 semantically aligned concept pairs, providing a representative coverage of word classes (nouns, verbs, adjectives, adverbs), frequency ranks, similarity intervals, lexical fields, and concreteness levels. Additionally, owing to the alignment of concepts across languages, we provide a suite of 66 crosslingual semantic similarity data sets. Because of its extensive size and language coverage, Multi-SimLex provides entirely novel opportunities for experimental evaluation and analysis. On its monolingual and crosslingual benchmarks, we evaluate and analyze a wide array of recent state-of-the-art monolingual and crosslingual representation models, including static and contextualized word embeddings (such as fastText, monolingual and multilingual BERT, XLM), externally informed lexical representations, as well as fully unsupervised and (weakly) supervised crosslingual word embeddings. We also present a step-by-step data set creation protocol for creating consistent, Multi-Simlex\u2013style resources for additional languages. We make these contributions\u2014the public release of Multi-SimLex data sets, their creation protocol, strong baseline results, and in-depth analyses which can be helpful in guiding future developments in multilingual lexical semantics and representation learning\u2014available via a Web site that will encourage community effort in further expansion of Multi-Simlex to many more languages. Such a large-scale semantic resource could inspire significant further advances in NLP across languages.", "keyphrases": ["major language", "concept pair", "coverage", "word embedding", "multi-simlex"]} +{"id": "yu-siskind-2013-grounded", "title": "Grounded Language Learning from Video Described with Sentences", "abstract": "We present a method that learns representations for word meanings from short video clips paired with sentences. Unlike prior work on learning language from symbolic input, our input consists of video of people interacting with multiple complex objects in outdoor environments. Unlike prior computer-vision approaches that learn from videos with verb labels or images with noun labels, our labels are sentences containing nouns, verbs, prepositions, adjectives, and adverbs. The correspondence between words and concepts in the video is learned in an unsupervised fashion, even when the video depicts simultaneous events described by multiple sentences or when different aspects of a single event are described with multiple sentences. The learned word meanings can be subsequently used to automatically generate description of new video.", "keyphrases": ["video clip", "object", "environment", "noun", "language acquisition"]} +{"id": "caglayan-etal-2021-cross", "title": "Cross-lingual Visual Pre-training for Multimodal Machine Translation", "abstract": "Pre-trained language models have been shown to improve performance in many natural language tasks substantially. Although the early focus of such models was single language pre-training, recent advances have resulted in cross-lingual and visual pre-training methods. In this paper, we combine these two approaches to learn visually-grounded cross-lingual representations. Specifically, we extend the translation language modelling (Lample and Conneau, 2019) with masked region classification and perform pre-training with three-way parallel vision & language corpora. We show that when fine-tuned for multimodal machine translation, these models obtain state-of-the-art performance. We also provide qualitative insights into the usefulness of the learned grounded representations.", "keyphrases": ["multimodal machine translation", "visual pre-training method", "cross-lingual representation"]} +{"id": "szekely-etal-2012-winktalk", "title": "WinkTalk: a demonstration of a multimodal speech synthesis platform linking facial expressions to expressive synthetic voices", "abstract": "This paper describes a demonstration of the WinkTalk system, which is a speech synthesis platform using expressive synthetic voices. With the help of a webcamera and facial expression analysis, the system allows the user to control the expressive features of the synthetic speech for a particular utterance with their facial expressions. Based on a personalised mapping between three expressive synthetic voices and the users facial expressions, the system selects a voice that matches their face at the moment of sending a message. The WinkTalk system is an early research prototype that aims to demonstrate that facial expressions can be used as a more intuitive control over expressive speech synthesis than manual selection of voice types, thereby contributing to an improved communication experience for users of speech generating devices.", "keyphrases": ["demonstration", "speech synthesis platform", "facial expression", "expressive feature"]} +{"id": "zou-etal-2018-adversarial", "title": "Adversarial Feature Adaptation for Cross-lingual Relation Classification", "abstract": "Relation Classification aims to classify the semantic relationship between two marked entities in a given sentence. It plays a vital role in a variety of natural language processing applications. Most existing methods focus on exploiting mono-lingual data, e.g., in English, due to the lack of annotated data in other languages. In this paper, we come up with a feature adaptation approach for cross-lingual relation classification, which employs a generative adversarial network (GAN) to transfer feature representations from one language with rich annotated data to another language with scarce annotated data. Such a feature adaptation approach enables feature imitation via the competition between a relation classification network and a rival discriminator. Experimental results on the ACE 2005 multilingual training corpus, treating English as the source language and Chinese the target, demonstrate the effectiveness of our proposed approach, yielding an improvement of 5.7% over the state-of-the-art.", "keyphrases": ["cross-lingual relation classification", "feature adaptation approach", "generative adversarial network"]} +{"id": "wang-etal-2011-new", "title": "A New Unsupervised Approach to Word Segmentation", "abstract": "This article proposes ESA, a new unsupervised approach to word segmentation. ESA is an iterative process consisting of 3 phases: Evaluation, Selection, and Adjustment. In Evaluation, both certainty and uncertainty of character sequence co-occurrence in corpora are considered as the statistical evidence supporting goodness measurement. Additionally, the statistical data of character sequences with various lengths become comparable with each other by using a simple process called Balancing. In Selection, a local maximum strategy is adopted without thresholds, and the strategy can be implemented with dynamic programming. In Adjustment, a part of the statistical data is updated to improve successive results. In our experiment, ESA was evaluated on the SIGHAN Bakeoff-2 data set. The results suggest that ESA is effective on Chinese corpora. It is noteworthy that the F-measures of the results are basically monotone increasing and can rapidly converge to relatively high values. Furthermore, the empirical formulae based on the results can be used to predict the parameter in ESA to avoid parameter estimation that is usually time-consuming.", "keyphrases": ["word segmentation", "esa", "iterative process", "adjustment"]} +{"id": "bitton-etal-2021-automatic", "title": "Automatic Generation of Contrast Sets from Scene Graphs: Probing the Compositional Consistency of GQA", "abstract": "Recent works have shown that supervised models often exploit data artifacts to achieve good test scores while their performance severely degrades on samples outside their training distribution. Contrast sets (Gardneret al., 2020) quantify this phenomenon by perturbing test samples in a minimal way such that the output label is modified. While most contrast sets were created manually, requiring intensive annotation effort, we present a novel method which leverages rich semantic input representation to automatically generate contrast sets for the visual question answering task. Our method computes the answer of perturbed questions, thus vastly reducing annotation cost and enabling thorough evaluation of models' performance on various semantic aspects (e.g., spatial or relational reasoning). We demonstrate the effectiveness of our approach on the GQA dataset and its semantic scene graph image representation. We find that, despite GQA's compositionality and carefully balanced label distribution, two high-performing models drop 13-17% in accuracy compared to the original test set. Finally, we show that our automatic perturbation can be applied to the training set to mitigate the degradation in performance, opening the door to more robust models.", "keyphrases": ["contrast set", "scene graph", "compositional consistency", "gqa"]} +{"id": "bird-etal-2008-acl", "title": "The ACL Anthology Reference Corpus: A Reference Dataset for Bibliographic Research in Computational Linguistics", "abstract": "The ACL Anthology is a digital archive of conference and journal papers in natural language processing and computational linguistics. Its primary purpose is to serve as a reference repository of research results, but we believe that it can also be an object of study and a platform for research in its own right. We describe an enriched and standardized reference corpus derived from the ACL Anthology that can be used for research in scholarly document processing. This corpus, which we call the ACL Anthology Reference Corpus (ACL ARC), brings together the recent activities of a number of research groups around the world. Our goal is to make the corpus widely available, and to encourage other researchers to use it as a standard testbed for experiments in both bibliographic and bibliometric research.", "keyphrases": ["acl anthology", "reference corpus", "research result"]} +{"id": "filippova-strube-2008-sentence", "title": "Sentence Fusion via Dependency Graph Compression", "abstract": "We present a novel unsupervised sentence fusion method which we apply to a corpus of biographies in German. Given a group of related sentences, we align their dependency trees and build a dependency graph. Using integer linear programming we compress this graph to a new tree, which we then linearize. We use GermaNet and Wikipedia for checking semantic compatibility of co-arguments. In an evaluation with human judges our method outperforms the fusion approach of Barzilay & McKeown (2005) with respect to readability.", "keyphrases": ["biography", "integer linear programming", "sentence fusion", "cluster", "fusion system"]} +{"id": "yancheva-etal-2015-using", "title": "Using linguistic features longitudinally to predict clinical scores for Alzheimer's disease and related dementias", "abstract": "We use a set of 477 lexicosyntactic, acoustic, and semantic features extracted from 393 speech samples in DementiaBank to predict clinical MMSE scores, an indicator of the severity of cognitive decline associated with dementia. We use a bivariate dynamic Bayes net to represent the longitudinal progression of observed linguistic features and MMSE scores over time, and obtain a mean absolute error (MAE) of 3.83 in predicting MMSE, comparable to within-subject interrater standard deviation of 3.9 to 4.8 [1]. When focusing on individuals with more longitudinal samples, we improve MAE to 2.91, which suggests at the importance of longitudinal data collection. Index Terms- Alzheimer\u2019s disease, dementia, Mini-Mental State Examination (MMSE), dynamic Bayes network, feature selection", "keyphrases": ["linguistic feature", "dementia", "cognitive impairment"]} +{"id": "geng-etal-2018-adaptive", "title": "Adaptive Multi-pass Decoder for Neural Machine Translation", "abstract": "Although end-to-end neural machine translation (NMT) has achieved remarkable progress in the recent years, the idea of adopting multi-pass decoding mechanism into conventional NMT is not well explored. In this paper, we propose a novel architecture called adaptive multi-pass decoder, which introduces a flexible multi-pass polishing mechanism to extend the capacity of NMT via reinforcement learning. More specifically, we adopt an extra policy network to automatically choose a suitable and effective number of decoding passes, according to the complexity of source sentences and the quality of the generated translations. Extensive experiments on Chinese-English translation demonstrate the effectiveness of our proposed adaptive multi-pass decoder upon the conventional NMT with a significant improvement about 1.55 BLEU.", "keyphrases": ["neural machine translation", "mechanism", "source sentence", "adaptive multi-pass decoder"]} +{"id": "labutov-lipson-2012-humor", "title": "Humor as Circuits in Semantic Networks", "abstract": "This work presents a first step to a general implementation of the Semantic-Script Theory of Humor (SSTH). Of the scarce amount of research in computational humor, no research had focused on humor generation beyond simple puns and punning riddles. We propose an algorithm for mining simple humorous scripts from a semantic network (Concept-Net) by specifically searching for dual scripts that jointly maximize overlap and incongruity metrics in line with Raskin's Semantic-Script Theory of Humor. Initial results show that a more relaxed constraint of this form is capable of generating humor of deeper semantic content than wordplay riddles. We evaluate the said metrics through a user-assessed quality of the generated two-liners.", "keyphrases": ["riddle", "humor", "semantic script theory"]} +{"id": "rosti-etal-2007-improved", "title": "Improved Word-Level System Combination for Machine Translation", "abstract": "Recently, confusion network decoding has been applied in machine translation system combination. Due to errors in the hypothesis alignment, decoding may result in ungrammatical combination outputs. This paper describes an improved confusion network based method to combine outputs from multiple MT systems. In this approach, arbitrary features may be added log-linearly into the objective function, thus allowing language model expansion and re-scoring. Also, a novel method to automatically select the hypothesis which other hypotheses are aligned against is proposed. A generic weight tuning algorithm may be used to optimize various automatic evaluation metrics including TER, BLEU and METEOR. The experiments using the 2005 Arabic to English and Chinese to English NIST MT evaluation tasks show significant improvements in BLEU scores compared to earlier confusion network decoding based methods.", "keyphrases": ["machine translation", "hypothesis", "system combination method"]} +{"id": "shah-specia-2014-quality", "title": "Quality estimation for translation selection", "abstract": "We describe experiments on quality estimation to select the best translation among multiple options for a given source sentence. We consider a realistic and challenging setting where the translation systems used are unknown, and no relative quality assessments are available for the training of prediction models. Our \ufb01ndings indicate that prediction errors are higher in this blind setting. However, these errors do not have a negative impact in performance when the predictions are used to select the best translation, compared to non-blind settings. This holds even when test conditions (text domains, MT systems) are different from model building conditions. In addition, we experiment with quality prediction for translations produced by both translation systems and human translators. Although the latter are on average of much higher quality, we show that automatically distinguishing the two types of translation is not a trivial problem.", "keyphrases": ["option", "source sentence", "quality estimation"]} +{"id": "goodman-etal-2016-noise", "title": "Noise reduction and targeted exploration in imitation learning for Abstract Meaning Representation parsing", "abstract": "Semantic parsers map natural language statements into meaning representations, and must abstract over syntactic phenomena, resolve anaphora, and identify word senses to eliminate ambiguous interpretations. Abstract meaning representation (AMR) is a recent example of one such semantic formalism which, similar to a dependency parse, utilizes a graph to represent relationships between concepts (Ba-narescu et al., 2013). As with dependency parsing, transition-based approaches are a common approach to this problem. However, when trained in the traditional manner these systems are susceptible to the accumulation of errors when they \ufb01nd undesirable states during greedy decoding. Imitation learning algorithms have been shown to help these systems recover from such errors. To effectively use these meth-ods for AMR parsing we \ufb01nd it highly bene\ufb01cial to introduce two novel extensions: noise reduction and targeted exploration. The former mitigates the noise in the feature representation, a result of the complexity of the task. The latter targets the exploration steps of imitation learning towards areas which are likely to provide the most information in the context of a large action-space. We achieve state-of-the art results, and improve upon standard transition-based parsing by 4.7 F 1 points.", "keyphrases": ["exploration", "imitation", "amr", "dependency tree"]} +{"id": "chaimongkol-etal-2014-corpus", "title": "Corpus for Coreference Resolution on Scientific Papers", "abstract": "The ever-growing number of published scientific papers prompts the need for automatic knowledge extraction to help scientists keep up with the state-of-the-art in their respective fields. To construct a good knowledge extraction system, annotated corpora in the scientific domain are required to train machine learning models. As described in this paper, we have constructed an annotated corpus for coreference resolution in multiple scientific domains, based on an existing corpus. We have modified the annotation scheme from Message Understanding Conference to better suit scientific texts. Then we applied that to the corpus. The annotated corpus is then compared with corpora in general domains in terms of distribution of resolution classes and performance of the Stanford Dcoref coreference resolver. Through these comparisons, we have demonstrated quantitatively that our manually annotated corpus differs from a general-domain corpus, which suggests deep differences between general-domain texts and scientific texts and which shows that different approaches can be made to tackle coreference resolution for general texts and scientific texts.", "keyphrases": ["coreference resolution", "scientific paper", "annotated corpus"]} +{"id": "yin-etal-2016-abcnn", "title": "ABCNN: Attention-Based Convolutional Neural Network for Modeling Sentence Pairs", "abstract": "How to model a pair of sentences is a critical issue in many NLP tasks such as answer selection (AS), paraphrase identification (PI) and textual entailment (TE). Most prior work (i) deals with one individual task by fine-tuning a specific system; (ii) models each sentence's representation separately, rarely considering the impact of the other sentence; or (iii) relies fully on manually designed, task-specific linguistic features. This work presents a general Attention Based Convolutional Neural Network (ABCNN) for modeling a pair of sentences. We make three contributions. (i) The ABCNN can be applied to a wide variety of tasks that require modeling of sentence pairs. (ii) We propose three attention schemes that integrate mutual influence between sentences into CNNs; thus, the representation of each sentence takes into consideration its counterpart. These interdependent sentence pair representations are more powerful than isolated sentence representations. (iii) ABCNNs achieve state-of-the-art performance on AS, PI and TE tasks. We release code at: .", "keyphrases": ["convolutional neural network", "sentence pair", "cnns", "attention-based cnn model"]} +{"id": "bilmes-kirchhoff-2003-factored", "title": "Factored Language Models and Generalized Parallel Backoff", "abstract": "We introduce factored language models (FLMs) and generalized parallel backoff (GPB). An FLM represents words as bundles of features (e.g., morphological classes, stems, data-driven clusters, etc.), and induces a probability model covering sequences of bundles rather than just words. GPB extends standard backoff to general conditional probability tables where variables might be heterogeneous types, where no obvious natural (temporal) backoff order exists, and where multiple dynamic backoff strategies are allowed. These methodologies were implemented during the JHU 2002 workshop as extensions to the SRI language modeling toolkit. This paper provides initial perplexity results on both CallHome Arabic and on Penn Treebank Wall Street Journal articles. Significantly, FLMs with GPB can produce bigrams with significantly lower perplexity, sometimes lower than highly-optimized baseline trigrams. In a multi-pass speech recognition context, where bigrams are used to create first-pass bigram lattices or N-best lists, these results are highly relevant.", "keyphrases": ["language model", "perplexity", "research work"]} +{"id": "marcheggiani-perez-beltrachini-2018-deep", "title": "Deep Graph Convolutional Encoders for Structured Data to Text Generation", "abstract": "Most previous work on neural text generation from graph-structured data relies on standard sequence-to-sequence methods. These approaches linearise the input graph to be fed to a recurrent neural network. In this paper, we propose an alternative encoder based on graph convolutional networks that directly exploits the input structure. We report results on two graph-to-sequence datasets that empirically show the benefits of explicitly encoding the input graph structure.", "keyphrases": ["text generation", "input graph", "convolutional network"]} +{"id": "ethayarajh-etal-2019-understanding", "title": "Understanding Undesirable Word Embedding Associations", "abstract": "Word embeddings are often criticized for capturing undesirable word associations such as gender stereotypes. However, methods for measuring and removing such biases remain poorly understood. We show that for any embedding model that implicitly does matrix factorization, debiasing vectors post hoc using subspace projection (Bolukbasi et al., 2016) is, under certain conditions, equivalent to training on an unbiased corpus. We also prove that WEAT, the most common association test for word embeddings, systematically overestimates bias. Given that the subspace projection method is provably effective, we use it to derive a new measure of association called the relational inner product association (RIPA). Experiments with RIPA reveal that, on average, skipgram with negative sampling (SGNS) does not make most words any more gendered than they are in the training corpus. However, for gender-stereotyped words, SGNS actually amplifies the gender association in the corpus.", "keyphrases": ["word embedding", "association", "weat", "subspace projection method", "gender bias"]} +{"id": "aramaki-etal-2011-twitter", "title": "Twitter Catches The Flu: Detecting Influenza Epidemics using Twitter", "abstract": "With the recent rise in popularity and scale of social media, a growing need exists for systems that can extract useful information from huge amounts of data. We address the issue of detecting influenza epidemics. First, the proposed system extracts influenza related tweets using Twitter API. Then, only tweets that mention actual influenza patients are extracted by the support vector machine (SVM) based classifier. The experiment results demonstrate the feasibility of the proposed approach (0.89 correlation to the gold standard). Especially at the outbreak and early spread (early epidemic stage), the proposed method shows high correlation (0.97 correlation), which outperforms the state-of-the-art methods. This paper describes that Twitter texts reflect the real world, and that NLP techniques can be applied to extract only tweets that contain useful information.", "keyphrases": ["flu", "influenza epidemic", "twitter", "flu-related tweet", "social medium"]} +{"id": "shimaoka-etal-2017-neural", "title": "Neural Architectures for Fine-grained Entity Type Classification", "abstract": "In this work, we investigate several neural network architectures for fine-grained entity type classification and make three key contributions. Despite being a natural comparison and addition, previous work on attentive neural architectures have not considered hand-crafted features and we combine these with learnt features and establish that they complement each other. Additionally, through quantitative analysis we establish that the attention mechanism learns to attend over syntactic heads and the phrase containing the mention, both of which are known to be strong hand-crafted features for our task. We introduce parameter sharing between labels through a hierarchical encoding method, that in low-dimensional projections show clear clusters for each type hierarchy. Lastly, despite using the same evaluation dataset, the literature frequently compare models trained using different data. We demonstrate that the choice of training data has a drastic impact on performance, which decreases by as much as 9.85% loose micro F1 score for a previously proposed method. Despite this discrepancy, our best model achieves state-of-the-art results with 75.36% loose micro F1 score on the well-established Figer (GOLD) dataset and we report the best results for models trained using publicly available data for the OntoNotes dataset with 64.93% loose micro F1 score.", "keyphrases": ["entity typing", "hand-crafted feature", "neural architecture", "relevant expression"]} +{"id": "plank-2009-structural", "title": "Structural Correspondence Learning for Parse Disambiguation", "abstract": "The paper presents an application of Structural Correspondence Learning (SCL) (Blitzer et al., 2006) for domain adaptation of a stochastic attribute-value grammar (SAVG). So far, SCL has been applied successfully in NLP for Part-of-Speech tagging and Sentiment Analysis (Blitzer et al., 2006; Blitzer et al., 2007). An attempt was made in the CoNLL 2007 shared task to apply SCL to non-projective dependency parsing (Shimizu and Nakagawa, 2007), however, without any clear conclusions. We report on our exploration of applying SCL to adapt a syntactic disambiguation model and show promising initial results on Wikipedia domains.", "keyphrases": ["structural correspondence learning", "many nlp task", "engineering problem"]} +{"id": "wu-etal-2021-polyjuice", "title": "Polyjuice: Generating Counterfactuals for Explaining, Evaluating, and Improving Models", "abstract": "While counterfactual examples are useful for analysis and training of NLP models, current generation methods either rely on manual labor to create very few counterfactuals, or only instantiate limited types of perturbations such as paraphrases or word substitutions. We present Polyjuice, a general-purpose counterfactual generator that allows for control over perturbation types and locations, trained by finetuning GPT-2 on multiple datasets of paired sentences. We show that Polyjuice produces diverse sets of realistic counterfactuals, which in turn are useful in various distinct applications: improving training and evaluation on three different tasks (with around 70% less annotation effort than manual generation), augmenting state-of-the-art explanation techniques, and supporting systematic counterfactual error analysis by revealing behaviors easily missed by human experts.", "keyphrases": ["counterfactual", "perturbation", "gpt-2", "expert", "polyjuice"]} +{"id": "itagaki-aikawa-2008-post", "title": "Post-MT Term Swapper: Supplementing a Statistical Machine Translation System with a User Dictionary", "abstract": "A statistical machine translation (SMT) system requires homogeneous training data in order to get domain-sensitive (or context-sensitive) terminology translations. If the data contains various domains, it is difficult for an SMT to learn context-sensitive terminology mappings probabilistically. Yet, terminology translation accuracy is an important issue for MT users. This paper explores an approach to tackle this terminology translation problem for an SMT. We propose a way to identify terminology translations from MT output and automatically swap them with user-defined translations. Our approach is simple and can be applied to any type of MT system. We call our prototype \u0093Term Swapper\u0094. Term Swapper allows MT users to draw on their own dictionaries without affecting any parts of the MT output except for the terminology translation(s) in question. Using an SMT developed at Microsoft Research, called MSR-MT (Quirk et al., (2005); Menezes & Quirk (2005)), we conducted initial experiments to investigate the coverage rate of Term Swapper and its impact on the overall quality of MT output. The results from our experiments show high coverage and positive impact on the overall MT quality.", "keyphrases": ["term swapper", "user-defined translation", "post-processing step"]} +{"id": "barrett-etal-2016-weakly", "title": "Weakly Supervised Part-of-speech Tagging Using Eye-tracking Data", "abstract": "For many of the world\u2019s languages, there are no or very few linguistically annotated resources. On the other hand, raw text, and often also dictionaries, can be harvested from the web for many of these languages, and part-of-speech taggers can be trained with these resources. At the same time, previous research shows that eye-tracking data, which can be obtained without explicit annotation, contains clues to part-of-speech information. In this work, we bring these two ideas together and show that given raw text, a dictionary, and eye-tracking data obtained from naive participants reading text, we can train a weakly supervised PoS tagger using a second-order HMM with maximum entropy emissions. The best model use type-level aggregates of eye-tracking data and signi\ufb01-cantly outperforms a baseline that does not have access to eye-tracking data.", "keyphrases": ["part-of-speech tagging", "eye-tracking data", "participant", "token level average", "dundee corpus"]} +{"id": "akhtar-etal-2016-hybrid", "title": "A Hybrid Deep Learning Architecture for Sentiment Analysis", "abstract": "In this paper, we propose a novel hybrid deep learning archtecture which is highly efficient for sentiment analysis in resource-poor languages. We learn sentiment embedded vectors from the Convolutional Neural Network (CNN). These are augmented to a set of optimized features selected through a multi-objective optimization (MOO) framework. The sentiment augmented optimized vector obtained at the end is used for the training of SVM for sentiment classification. We evaluate our proposed approach for coarse-grained (i.e. sentence level) as well as fine-grained (i.e. aspect level) sentiment analysis on four Hindi datasets covering varying domains. In order to show that our proposed method is generic in nature we also evaluate it on two benchmark English datasets. Evaluation shows that the results of the proposed method are consistent across all the datasets and often outperforms the state-of-art systems. To the best of our knowledge, this is the very first attempt where such a deep learning model is used for less-resourced languages such as Hindi.", "keyphrases": ["sentiment analysis", "hindi dataset", "neural network architecture"]} +{"id": "thurmair-2009-comparing", "title": "Comparing different architectures of hybrid Machine Translation systems", "abstract": "The contribution discusses variants of architectures of hybrid MT systems. The three main types of architectures are: coupling of systems (serial or parallel), architecture adaptations (integrating novel components into SMT or RMT architectures, either by pre/post-editing, or by system core modifications), and genuine hybrid systems, combining components of different paradigms. The interest is to investigate which resources are required for which types of systems, and to which extent the proposals contribute to an overall increase in MT quality.", "keyphrases": ["different architecture", "hybrid system", "linguistic information"]} +{"id": "scialom-etal-2020-mlsum", "title": "MLSUM: The Multilingual Summarization Corpus", "abstract": "We present MLSUM, the first large-scale MultiLingual SUMmarization dataset. Obtained from online newspapers, it contains 1.5M+ article/summary pairs in five different languages \u2013 namely, French, German, Spanish, Russian, Turkish. Together with English news articles from the popular CNN/Daily mail dataset, the collected data form a large scale multilingual dataset which can enable new research directions for the text summarization community. We report cross-lingual comparative analyses based on state-of-the-art systems. These highlight existing biases which motivate the use of a multi-lingual dataset.", "keyphrases": ["multilingual summarization", "different language", "research direction", "mlsum"]} +{"id": "zhao-etal-2012-identifying", "title": "Identifying Event-related Bursts via Social Media Activities", "abstract": "Activities on social media increase at a dramatic rate. When an external event happens, there is a surge in the degree of activities related to the event. These activities may be temporally correlated with one another, but they may also capture different aspects of an event and therefore exhibit different bursty patterns. In this paper, we propose to identify event-related bursts via social media activities. We study how to correlate multiple types of activities to derive a global bursty pattern. To model smoothness of one state sequence, we propose a novel function which can capture the state context. The experiments on a large Twitter dataset shows our methods are very effective.", "keyphrases": ["event-related burst", "activity", "state context"]} +{"id": "zhang-etal-2020-bert", "title": "BERT-XML: Large Scale Automated ICD Coding Using BERT Pretraining", "abstract": "ICD coding is the task of classifying and cod-ing all diagnoses, symptoms and proceduresassociated with a patient's visit. The process isoften manual, extremely time-consuming andexpensive for hospitals as clinical interactionsare usually recorded in free text medical notes. In this paper, we propose a machine learningmodel, BERT-XML, for large scale automatedICD coding of EHR notes, utilizing recentlydeveloped unsupervised pretraining that haveachieved state of the art performance on a va-riety of NLP tasks. We train a BERT modelfrom scratch on EHR notes, learning with vo-cabulary better suited for EHR tasks and thusoutperform off-the-shelf models. We furtheradapt the BERT architecture for ICD codingwith multi-label attention. We demonstratethe effectiveness of BERT-based models on thelarge scale ICD code classification task usingmillions of EHR notes to predict thousands ofunique codes.", "keyphrases": ["icd", "bert", "bert-xml"]} +{"id": "ohki-etal-2011-recognizing", "title": "Recognizing Confinement in Web Texts", "abstract": "In the Recognizing Textual Entailment (RTE) task, sentence pairs are classified into one of three semantic relations: Entailment, Contradiction or Unknown. While we find some sentence pairs hold full entailments or contradictions, there are a number of pairs that partially entail or contradict one another depending on a specific situation. These partial contradiction sentence pairs contain useful information for opinion mining and other such tasks, but it is difficult for Internet users to access this knowledge because current frameworks do not differentiate between full contradictions and partial contradictions. In this paper, under current approaches to semantic relation recognition, we define a new semantic relation known as Confinement in order to recognize this useful information. This information is classified as either Contradiction or Entailment. We provide a series of semantic templates to recognize Confinement relations in Web texts, and then implement a system for recognizing Confinement between sentence pairs. We show that our proposed system can obtains a F-score of 61% for recognizing Confinement in Japanese-language Web texts, and it outperforms a baseline which does not use a manually compiled list of lexico-syntactic patterns to instantiate the semantic templates.", "keyphrases": ["confinement", "web text", "semantic relation", "contradiction"]} +{"id": "kemps-snijders-etal-2008-isocat", "title": "ISOcat: Corralling Data Categories in the Wild", "abstract": "To achieve true interoperability for valuable linguistic resources different levels of variation need to be addressed. ISO Technical Committee 37, Terminology and other language and content resources, is developing a Data Category Registry. This registry will provide a reusable set of data categories. A new implementation, dubbed ISOcat, of the registry is currently under construction. This paper shortly describes the new data model for data categories that will be introduced in this implementation. It goes on with a sketch of the standardization process. Completed data categories can be reused by the community. This is done by either making a selection of data categories using the ISOcat web interface, or by other tools which interact with the ISOcat system using one of its various Application Programming Interfaces. Linguistic resources that use data categories from the registry should include persistent references, e.g. in the metadata or schemata of the resource, which point back to their origin. These data category references can then be used to determine if two or more resources share common semantics, thus providing a level of interoperability close to the source data and a promising layer for semantic alignment on higher levels.", "keyphrases": ["data category", "registry", "isocat"]} +{"id": "qin-etal-2017-adversarial", "title": "Adversarial Connective-exploiting Networks for Implicit Discourse Relation Classification", "abstract": "Implicit discourse relation classification is of great challenge due to the lack of connectives as strong linguistic cues, which motivates the use of annotated implicit connectives to improve the recognition. We propose a feature imitation framework in which an implicit relation network is driven to learn from another neural network with access to connectives, and thus encouraged to extract similarly salient features for accurate classification. We develop an adversarial model to enable an adaptive imitation scheme through competition between the implicit network and a rival feature discriminator. Our method effectively transfers discriminability of connectives to the implicit features, and achieves state-of-the-art performance on the PDTB benchmark.", "keyphrases": ["discourse relation", "implicit connective", "various nlp task"]} +{"id": "gulordava-etal-2018-colorless", "title": "Colorless Green Recurrent Networks Dream Hierarchically", "abstract": "Recurrent neural networks (RNNs) achieved impressive results in a variety of linguistic processing tasks, suggesting that they can induce non-trivial properties of language. We investigate to what extent RNNs learn to track abstract hierarchical syntactic structure. We test whether RNNs trained with a generic language modeling objective in four languages (Italian, English, Hebrew, Russian) can predict long-distance number agreement in various constructions. We include in our evaluation nonsensical sentences where RNNs cannot rely on semantic or lexical cues (\u201cThe colorless green ideas I ate with the chair sleep furiously\u201d), and, for Italian, we compare model performance to human intuitions. Our language-model-trained RNNs make reliable predictions about long-distance agreement, and do not lag much behind human performance. We thus bring support to the hypothesis that RNNs are not just shallow-pattern extractors, but they also acquire deeper grammatical competence.", "keyphrases": ["hebrew", "neural language model", "linguistic knowledge", "awareness", "perplexity"]} +{"id": "augenstein-etal-2017-semeval", "title": "SemEval 2017 Task 10: ScienceIE - Extracting Keyphrases and Relations from Scientific Publications", "abstract": "We describe the SemEval task of extracting keyphrases and relations between them from scientific documents, which is crucial for understanding which publications describe which processes, tasks and materials. Although this was a new task, we had a total of 26 submissions across 3 evaluation scenarios. We expect the task and the findings reported in this paper to be relevant for researchers working on understanding scientific content, as well as the broader knowledge base population and information extraction communities.", "keyphrases": ["scienceie", "information extraction", "semeval"]} +{"id": "thompson-etal-2011-promoting", "title": "Promoting Interoperability of Resources in META-SHARE", "abstract": "META-NET is a Network of Excellence\naiming to improve significantly on the number\nof language technologies that can assist\nEuropean citizens, by enabling enhanced\ncommunication and cooperation across\nlanguages. A major outcome will be META-\nSHARE, a searchable network of repositories\nthat collect resources such as language data,\ntools and related web services, covering a\nlarge number of European languages. These\nresources are intended to facilitate the\ndevelopment and evaluation of a wide range of\nnew language processing applications and\nservices. An important aim of META-SHARE\nis the promotion of interoperability amongst\nresources. In this paper, we describe our\nplanned efforts to help to achieve this aim,\nthrough the adoption of the UIMA framework\nand the integration of the U-Compare system\nwithin the META-SHARE network. U-\nCompare facilitates the rapid construction and\nevaluation of NLP applications that make use\nof interoperable components, and, as such, can\nhelp to speed up the development of a new\ngeneration of European language technology\napplications.", "keyphrases": ["interoperability", "meta-net", "european language"]} +{"id": "ringland-etal-2019-nne", "title": "NNE: A Dataset for Nested Named Entity Recognition in English Newswire", "abstract": "Named entity recognition (NER) is widely used in natural language processing applications and downstream tasks. However, most NER tools target flat annotation from popular datasets, eschewing the semantic information available in nested entity mentions. We describe NNE\u2014a fine-grained, nested named entity dataset over the full Wall Street Journal portion of the Penn Treebank (PTB). Our annotation comprises 279,795 mentions of 114 entity types with up to 6 layers of nesting. We hope the public release of this large dataset for English newswire will encourage development of new techniques for nested NER.", "keyphrases": ["english newswire", "entity mention", "penn treebank"]} +{"id": "toral-2019-post", "title": "Post-editese: an Exacerbated Translationese", "abstract": "Post-editing (PE) machine translation (MT) is widely used for dissemination because it leads to higher productivity than human translation from scratch (HT). In addition, PE translations are found to be of equal or better quality than HTs. However, most such studies measure quality solely as the number of errors. We conduct a set of computational analyses in which we compare PE against HT on three different datasets that cover five translation directions with measures that address different translation universals and laws of translation: simplification, normalisation and interference. We find out that PEs are simpler and more normalised and have a higher degree of interference from the source language than HTs.", "keyphrases": ["source language", "post-editese", "lexical density"]} +{"id": "hua-etal-2021-dyploc", "title": "DYPLOC: Dynamic Planning of Content Using Mixed Language Models for Text Generation", "abstract": "We study the task of long-form opinion text generation, which faces at least two distinct challenges. First, existing neural generation models fall short of coherence, thus requiring efficient content planning. Second, diverse types of information are needed to guide the generator to cover both subjective and objective content. To this end, we propose DYPLOC, a generation framework that conducts dynamic planning of content while generating the output based on a novel design of mixed language models. To enrich the generation with diverse content, we further propose to use large pre-trained models to predict relevant concepts and to generate claims. We experiment with two challenging tasks on newly collected datasets: (1) argument generation with Reddit ChangeMyView, and (2) writing articles using New York Times' Opinion section. Automatic evaluation shows that our model significantly outperforms competitive comparisons. Human judges further confirm that our generations are more coherent with richer content.", "keyphrases": ["planning", "mixed language model", "dyploc"]} +{"id": "imamura-etal-2009-discriminative", "title": "Discriminative Approach to Predicate-Argument Structure Analysis with Zero-Anaphora Resolution", "abstract": "This paper presents a predicate-argument structure analysis that simultaneously conducts zero-anaphora resolution. By adding noun phrases as candidate arguments that are not only in the sentence of the target predicate but also outside of the sentence, our analyzer identifies arguments regardless of whether they appear in the sentence or not. Because we adopt discriminative models based on maximum entropy for argument identification, we can easily add new features. We add language model scores as well as contextual features. We also use contextual information to restrict candidate arguments.", "keyphrases": ["structure analysis", "zero-anaphora resolution", "predicate"]} +{"id": "murray-etal-2006-incorporating", "title": "Incorporating Speaker and Discourse Features into Speech Summarization", "abstract": "We have explored the usefulness of incorporating speech and discourse features in an automatic speech summarization system applied to meeting recordings from the ICSI Meetings corpus. By analyzing speaker activity, turn-taking and discourse cues, we hypothesize that such a system can outperform solely text-based methods inherited from the field of text summarization. The summarization methods are described, two evaluation methods are applied and compared, and the results clearly show that utilizing such features is advantageous and efficient. Even simple methods relying on discourse cues and speaker activity can outperform text summarization approaches.", "keyphrases": ["discourse feature", "text summarization", "broadcast news"]} +{"id": "eisele-chen-2010-multiun", "title": "MultiUN: A Multilingual Corpus from United Nation Documents", "abstract": "This paper describes the acquisition, preparation and properties of a corpus extracted from the official documents of the United Nations (UN). This corpus is available in all 6 official languages of the UN, consisting of around 300 million words per language. We describe the methods we used for crawling, document formatting, and sentence alignment. This corpus also includes a common test set for machine translation. We present the results of a French-Chinese machine translation experiment performed on this corpus.", "keyphrases": ["multilingual corpus", "united nations", "official document", "multiun"]} +{"id": "aharoni-etal-2014-benchmark", "title": "A Benchmark Dataset for Automatic Detection of Claims and Evidence in the Context of Controversial Topics", "abstract": "We describe a novel and unique argumentative structure dataset. This corpus consists of data extracted fro m hundreds of Wikipedia articles using a meticulously monitored manual annotation process. The result is 2,683 argument elements, collected in the context of 33 controversial topics, organized under a simp le claim-evidence structure. The obtained data are publicly available for academic research.", "keyphrases": ["claim", "controversial topic", "argumentative structure dataset", "academic research"]} +{"id": "vincze-etal-2013-dependency", "title": "Dependency Parsing for Identifying Hungarian Light Verb Constructions", "abstract": "Light verb constructions (LVCs) are verb and noun combinations in which the verb has lost its meaning to some degree and the noun is used in one of its original senses. They often share their syntactic pattern with other constructions (e.g. verbobject pairs) thus LVC detection can be viewed as classifying certain syntactic patterns as light verb constructions or not. In this paper, we explore a novel way to detect LVCs in texts: we apply a dependency parser to carry out the task. We present our experiments on a Hungarian treebank, which has been manually annotated for dependency relations and light verb constructions. Our results outperformed those achieved by state-of-the-art techniques for Hungarian LVC detection, especially due to the high precision and the treatment of long-distance dependencies.", "keyphrases": ["light verb construction", "dependency parser", "hungarian lvc detection"]} +{"id": "mizumoto-matsumoto-2016-discriminative", "title": "Discriminative Reranking for Grammatical Error Correction with Statistical Machine Translation", "abstract": "Research on grammatical error correction has received considerable attention. For dealing with all types of errors, grammatical error correction methods that employ statistical machine translation (SMT) have been proposed in recent years. An SMT system generates candidates with scores for all candidates and selects the sentence with the highest score as the correction result. However, the 1-best result of an SMT system is not always the best result. Thus, we propose a reranking approach for grammatical error correction. The reranking approach is used to re-score N-best results of the SMT and reorder the results. Our experiments show that our reranking system using parts of speech and syntactic features improves performance and achieves state-of-theart quality, with an F0.5 score of 40.0.", "keyphrases": ["correction", "statistical machine translation", "re-ranking"]} +{"id": "utiyama-isahara-2003-reliable", "title": "Reliable Measures for Aligning Japanese-English News Articles and Sentences", "abstract": "We have aligned Japanese and English news articles and sentences to make a large parallel corpus. We first used a method based on cross-language information retrieval (CLIR) to align the Japanese and English articles and then used a method based on dynamic programming (DP) matching to align the Japanese and English sentences in these articles. However, the results included many incorrect alignments. To remove these, we propose two measures (scores) that evaluate the validity of alignments. The measure for article alignment uses similarities in sentences aligned by DP matching and that for sentence alignment uses similarities in articles aligned by CLIR. They enhance each other to improve the accuracy of alignment. Using these measures, we have successfully constructed a large-scale article and sentence alignment corpus available to the public.", "keyphrases": ["news article", "parallel sentence", "comparable corpora"]} +{"id": "peng-mccallum-2004-accurate", "title": "Accurate Information Extraction from Research Papers using Conditional Random Fields", "abstract": "With the increasing use of research paper search engines, such as CiteSeer, for both literature search and hiring decisions, the accuracy of such systems is of paramount importance. This paper employs Conditional Random Fields (CRFs) for the task of extracting various common fields from the headers and citation of research papers. The basic theory of CRFs is becoming well-understood, but best-practices for applying them to real-world data requires additional exploration. This paper makes an empirical exploration of several factors, including variations on Gaussian, exponential and hyperbolic-L1 priors for improved regularization, and several classes of features and Markov order. On a standard benchmark data set, we achieve new state-of-the-art performance, reducing error in average F1 by 36%, and word error rate by 78% in comparison with the previous best SVM results. Accuracy compares even more favorably against HMMs.", "keyphrases": ["research paper", "conditional random fields", "crfs"]} +{"id": "chisholm-etal-2017-learning", "title": "Learning to generate one-sentence biographies from Wikidata", "abstract": "We investigate the generation of one-sentence Wikipedia biographies from facts derived from Wikidata slot-value pairs. We train a recurrent neural network sequence-to-sequence model with attention to select facts and generate textual summaries. Our model incorporates a novel secondary objective that helps ensure it generates sentences that contain the input facts. The model achieves a BLEU score of 41, improving significantly upon the vanilla sequence-to-sequence model and scoring roughly twice that of a simple template baseline. Human preference evaluation suggests the model is nearly as good as the Wikipedia reference. Manual analysis explores content selection, suggesting the model can trade the ability to infer knowledge against the risk of hallucinating incorrect information.", "keyphrases": ["biography", "wikipedia", "natural language generation"]} +{"id": "ding-luo-2021-attentionrank", "title": "AttentionRank: Unsupervised Keyphrase Extraction using Self and Cross Attentions", "abstract": "Keyword or keyphrase extraction is to identify words or phrases presenting the main topics of a document. This paper proposes the AttentionRank, a hybrid attention model, to identify keyphrases from a document in an unsupervised manner. AttentionRank calculates self-attention and cross-attention using a pre-trained language model. The self-attention is designed to determine the importance of a candidate within the context of a sentence. The cross-attention is calculated to identify the semantic relevance between a candidate and sentences within a document. We evaluate the AttentionRank on three publicly available datasets against seven baselines. The results show that the AttentionRank is an effective and robust unsupervised keyphrase extraction model on both long and short documents. Source code is available on Github.", "keyphrases": ["self-attention", "language model", "attentionrank"]} +{"id": "mullenbach-etal-2018-explainable", "title": "Explainable Prediction of Medical Codes from Clinical Text", "abstract": "Clinical notes are text documents that are created by clinicians for each patient encounter. They are typically accompanied by medical codes, which describe the diagnosis and treatment. Annotating these codes is labor intensive and error prone; furthermore, the connection between the codes and the text is not annotated, obscuring the reasons and details behind specific diagnoses and treatments. We present an attentional convolutional network that predicts medical codes from clinical text. Our method aggregates information across the document using a convolutional neural network, and uses an attention mechanism to select the most relevant segments for each of the thousands of possible codes. The method is accurate, achieving precision@8 of 0.71 and a Micro-F1 of 0.54, which are both better than the prior state of the art. Furthermore, through an interpretability evaluation by a physician, we show that the attention mechanism identifies meaningful explanations for each code assignment.", "keyphrases": ["clinical text", "interpretability", "icd code", "medical code prediction", "document representation"]} +{"id": "passos-etal-2014-lexicon", "title": "Lexicon Infused Phrase Embeddings for Named Entity Resolution", "abstract": "Most state-of-the-art approaches for named-entity recognition (NER) use semi supervised information in the form of word clusters and lexicons. Recently neural network-based language models have been explored, as they as a byproduct generate highly informative vector representations for words, known as word embeddings. In this paper we present two contributions: a new form of learning word embeddings that can leverage information from relevant lexicons to improve the representations, and the first system to use neural word embeddings to achieve state-of-the-art results on named-entity recognition in both CoNLL and Ontonotes NER. Our system achieves an F1 score of 90.90 on the test set for CoNLL 2003---significantly better than any previous system trained on public data, and matching a system employing massive private industrial query-log data.", "keyphrases": ["word embedding", "hidden markov models", "ner system", "task-specific resource", "entity recognition"]} +{"id": "peng-etal-2004-chinese", "title": "Chinese Segmentation and New Word Detection using Conditional Random Fields", "abstract": "Chinese word segmentation is a difficult, important and widely-studied sequence modeling problem. This paper demonstrates the ability of linear-chain conditional random fields (CRFs) to perform robust and accurate Chinese word segmentation by providing a principled framework that easily supports the integration of domain knowledge in the form of multiple lexicons of characters and words. We also present a probabilistic new word detection method, which further improves performance. Our system is evaluated on four datasets used in a recent comprehensive Chinese word segmentation competition. State-of-the-art performance is obtained.", "keyphrases": ["new word detection", "conditional random field", "cws", "position", "character-based sequence"]} +{"id": "richman-schone-2008-mining", "title": "Mining Wiki Resources for Multilingual Named Entity Recognition", "abstract": "In this paper, we describe a system by which the multilingual characteristics of Wikipedia can be utilized to annotate a large corpus of text with Named Entity Recognition (NER) tags requiring minimal human intervention and no linguistic expertise. This process, though of value in languages for which resources exist, is particularly useful for less commonly taught languages. We show how the Wikipedia format can be used to identify possible named entities and discuss in detail the process by which we use the Category structure inherent to Wikipedia to determine the named entity type of a proposed entity. We further describe the methods by which English language data can be used to bootstrap the NER process in other languages. We demonstrate the system by using the generated corpus as training sets for a variant of BBN's Identifinder in French, Ukrainian, Spanish, Polish, Russian, and Portuguese, achieving overall F-scores as high as 84.7% on independent, human-annotated corpora, comparable to a system trained on up to 40,000 words of human-annotated newswire.", "keyphrases": ["wikipedia", "entity type", "other language", "human-annotated corpora"]} +{"id": "gaddy-klein-2020-digital", "title": "Digital Voicing of Silent Speech", "abstract": "In this paper, we consider the task of digitally voicing silent speech, where silently mouthed words are converted to audible speech based on electromyography (EMG) sensor measurements that capture muscle impulses. While prior work has focused on training speech synthesis models from EMG collected during vocalized speech, we are the first to train from EMG collected during silently articulated speech. We introduce a method of training on silent EMG by transferring audio targets from vocalized to silent signals. Our method greatly improves intelligibility of audio generated from silent EMG compared to a baseline that only trains with vocalized data, decreasing transcription word error rate from 64% to 4% in one data condition and 88% to 68% in another. To spur further development on this task, we share our new dataset of silent and vocalized facial EMG measurements.", "keyphrases": ["silent speech", "signal", "intelligibility"]} +{"id": "cohan-etal-2020-specter", "title": "SPECTER: Document-level Representation Learning using Citation-informed Transformers", "abstract": "Representation learning is a critical ingredient for natural language processing systems. Recent Transformer language models like BERT learn powerful textual representations, but these models are targeted towards token- and sentence-level training objectives and do not leverage information on inter-document relatedness, which limits their document-level representation power. For applications on scientific documents, such as classification and recommendation, accurate embeddings of documents are a necessity. We propose SPECTER, a new method to generate document-level embedding of scientific papers based on pretraining a Transformer language model on a powerful signal of document-level relatedness: the citation graph. Unlike existing pretrained language models, Specter can be easily applied to downstream applications without task-specific fine-tuning. Additionally, to encourage further research on document-level models, we introduce SciDocs, a new evaluation benchmark consisting of seven document-level tasks ranging from citation prediction, to document classification and recommendation. We show that Specter outperforms a variety of competitive baselines on the benchmark.", "keyphrases": ["scientific document", "recommendation", "document-level embedding", "citation graph", "specter"]} +{"id": "pimentel-etal-2020-pareto", "title": "Pareto Probing: Trading Off Accuracy for Complexity", "abstract": "The question of how to probe contextual word representations in a way that is principled and useful has seen significant recent attention. In our contribution to this discussion, we argue, first, for a probe metric that reflects the trade-off between probe complexity and performance: the Pareto hypervolume. To measure complexity, we present a number of parametric and non-parametric metrics. Our experiments with such metrics show that probe's performance curves often fail to align with widely accepted rankings between language representations (with, e.g., non-contextual representations outperforming contextual ones). These results lead us to argue, second, that common simplistic probe tasks such as POS labeling and dependency arc labeling, are inadequate to evaluate the properties encoded in contextual word representations. We propose full dependency parsing as an example probe task, and demonstrate it with the Pareto hypervolume. In support of our arguments, the results of this illustrative experiment conform closer to accepted rankings among contextual word representations.", "keyphrases": ["probe", "complexity", "trade-off", "pareto hypervolume"]} +{"id": "wilcox-etal-2019-structural", "title": "Structural Supervision Improves Learning of Non-Local Grammatical Dependencies", "abstract": "State-of-the-art LSTM language models trained on large corpora learn sequential contingencies in impressive detail, and have been shown to acquire a number of non-local grammatical dependencies with some success. Here we investigate whether supervision with hierarchical structure enhances learning of a range of grammatical dependencies, a question that has previously been addressed only for subject-verb agreement. Using controlled experimental methods from psycholinguistics, we compare the performance of word-based LSTM models versus Recurrent Neural Network Grammars (RNNGs) (Dyer et al. 2016) which represent hierarchical syntactic structure and use neural control to deploy it in left-to-right processing, on two classes of non-local grammatical dependencies in English\u2014Negative Polarity licensing and Filler-Gap Dependencies\u2014tested in a range of configurations. Using the same training data for both models, we find that the RNNG outperforms the LSTM on both types of grammatical dependencies and even learns many of the Island Constraints on the filler-gap dependency. Structural supervision thus provides data efficiency advantages over purely string-based training of neural language models in acquiring human-like generalizations about non-local grammatical dependencies.", "keyphrases": ["non-local grammatical dependency", "language model", "filler-gap dependency", "structural supervision"]} +{"id": "andersen-etal-2013-developing", "title": "Developing and testing a self-assessment and tutoring system", "abstract": "Automated feedback on writing may be a useful complement to teacher comments in the process of learning a foreign language. This paper presents a self-assessment and tutoring system which combines an holistic score with detection and correction of frequent errors and furthermore provides a qualitative assessment of each individual sentence, thus making the language learner aware of potentially problematic areas rather than providing a panacea. The system has been tested by learners in a range of educational institutions, and their feedback has guided its development.", "keyphrases": ["tutoring system", "assessment", "language learner"]} +{"id": "liang-etal-2007-infinite", "title": "The Infinite PCFG Using Hierarchical Dirichlet Processes", "abstract": "We present a nonparametric Bayesian model of tree structures based on the hierarchical Dirichlet process (HDP). Our HDP-PCFG model allows the complexity of the grammar to grow as more training data is available. In addition to presenting a fully Bayesian model for the PCFG, we also develop an efficient variational inference procedure. On synthetic data, we recover the correct grammar without having to specify its complexity in advance. We also show that our techniques can be applied to full-scale parsing applications by demonstrating its effectiveness in learning state-split grammars.", "keyphrases": ["infinite pcfg", "hierarchical dirichlet process", "bayesian model", "variable grammar"]} +{"id": "ramachandran-etal-2017-unsupervised", "title": "Unsupervised Pretraining for Sequence to Sequence Learning", "abstract": "This work presents a general unsupervised learning method to improve the accuracy of sequence to sequence (seq2seq) models. In our method, the weights of the encoder and decoder of a seq2seq model are initialized with the pretrained weights of two language models and then fine-tuned with labeled data. We apply this method to challenging benchmarks in machine translation and abstractive summarization and find that it significantly improves the subsequent supervised models. Our main result is that pretraining improves the generalization of seq2seq models. We achieve state-of-the-art results on the WMT English\u2192German task, surpassing a range of methods using both phrase-based machine translation and neural machine translation. Our method achieves a significant improvement of 1.3 BLEU from th previous best models on both WMT'14 and WMT'15 English\u2192German. We also conduct human evaluations on abstractive summarization and find that our method outperforms a purely supervised learning baseline in a statistically significant manner.", "keyphrases": ["seq2seq model", "language model", "fine-tune", "unsupervised pretraining"]} +{"id": "durmus-etal-2019-role", "title": "The Role of Pragmatic and Discourse Context in Determining Argument Impact", "abstract": "Research in the social sciences and psychology has shown that the persuasiveness of an argument depends not only the language employed, but also on attributes of the source/communicator, the audience, and the appropriateness and strength of the argument's claims given the pragmatic and discourse context of the argument. Among these characteristics of persuasive arguments, prior work in NLP does not explicitly investigate the effect of the pragmatic and discourse context when determining argument quality. This paper presents a new dataset to initiate the study of this aspect of argumentation: it consists of a diverse collection of arguments covering 741 controversial topics and comprising over 47,000 claims. We further propose predictive models that incorporate the pragmatic and discourse context of argumentative claims and show that they outperform models that rely only on claim-specific linguistic features for predicting the perceived impact of individual claims within a particular line of argument.", "keyphrases": ["discourse context", "argument impact", "influence"]} +{"id": "li-yarowsky-2008-mining", "title": "Mining and Modeling Relations between Formal and Informal Chinese Phrases from Web Corpora", "abstract": "We present a novel method for discovering and modeling the relationship between informal Chinese expressions (including colloquialisms and instant-messaging slang) and their formal equivalents. Specifically, we proposed a bootstrapping procedure to identify a list of candidate informal phrases in web corpora. Given an informal phrase, we retrieve contextual instances from the web using a search engine, generate hypotheses of formal equivalents via this data, and rank the hypotheses using a conditional log-linear model. In the log-linear model, we incorporate as feature functions both rule-based intuitions and data co-occurrence phenomena (either as an explicit or indirect definition, or through formal/informal usages occurring in free variation in a discourse). We test our system on manually collected test examples, and find that the (formal-informal) relationship discovery and extraction process using our method achieves an average 1-best precision of 62%. Given the ubiquity of informal conversational style on the internet, this work has clear applications for text normalization in text-processing systems including machine translation aspiring to broad coverage.", "keyphrases": ["chinese", "web corpora", "formal equivalent", "conditional log-linear model", "informal word"]} +{"id": "zhou-etal-2021-amr", "title": "AMR Parsing with Action-Pointer Transformer", "abstract": "Abstract Meaning Representation parsing is a sentence-to-graph prediction task where target nodes are not explicitly aligned to sentence tokens. However, since graph nodes are semantically based on one or more sentence tokens, implicit alignments can be derived. Transition-based parsers operate over the sentence from left to right, capturing this inductive bias via alignments at the cost of limited expressiveness. In this work, we propose a transition-based system that combines hard-attention over sentences with a target-side action pointer mechanism to decouple source tokens from node representations and address alignments. We model the transitions as well as the pointer mechanism through straightforward modifications within a single Transformer architecture. Parser state and graph structure information are efficiently encoded using attention heads. We show that our action-pointer approach leads to increased expressiveness and attains large gains (+1.6 points) against the best transition-based AMR parser in very similar conditions. While using no graph re-categorization, our single model yields the second best Smatch score on AMR 2.0 (81.8), which is further improved to 83.4 with silver data and ensemble decoding.", "keyphrases": ["transition-based amr parser", "amr", "pre-trained language model"]} +{"id": "gu-etal-2019-improved", "title": "Improved Zero-shot Neural Machine Translation via Ignoring Spurious Correlations", "abstract": "Zero-shot translation, translating between language pairs on which a Neural Machine Translation (NMT) system has never been trained, is an emergent property when training the system in multilingual settings. However, naive training for zero-shot NMT easily fails, and is sensitive to hyper-parameter setting. The performance typically lags far behind the more conventional pivot-based approach which translates twice using a third language as a pivot. In this work, we address the degeneracy problem due to capturing spurious correlations by quantitatively analyzing the mutual information between language IDs of the source and decoded sentences. Inspired by this analysis, we propose to use two simple but effective approaches: (1) decoder pre-training; (2) back-translation. These methods show significant improvement (4 22 BLEU points) over the vanilla zero-shot translation on three challenging multilingual datasets, and achieve similar or better results than the pivot-based approach.", "keyphrases": ["zero-shot translation", "different language", "mnmt model"]} +{"id": "de-marneffe-etal-2014-universal", "title": "Universal Stanford dependencies: A cross-linguistic typology", "abstract": "Revisiting the now de facto standard Stanford dependency representation, we propose an improved taxonomy to capture grammatical relations across languages, including morphologically rich ones. We suggest a two-layered taxonomy: a set of broadly attested universal grammatical relations, to which language-specific relations can be added. We emphasize the lexicalist stance of the Stanford Dependencies, which leads to a particular, partially new treatment of compounding, prepositions, and morphology. We show how existing dependency schemes for several languages map onto the universal taxonomy proposed here and close with consideration of practical implications of dependency representation choices for NLP applications, in particular parsing.", "keyphrases": ["morphology", "rich one", "universal dependencies", "annotation scheme", "final sentence"]} +{"id": "grundkiewicz-etal-2019-neural", "title": "Neural Grammatical Error Correction Systems with Unsupervised Pre-training on Synthetic Data", "abstract": "Considerable effort has been made to address the data sparsity problem in neural grammatical error correction. In this work, we propose a simple and surprisingly effective unsupervised synthetic error generation method based on confusion sets extracted from a spellchecker to increase the amount of training data. Synthetic data is used to pre-train a Transformer sequence-to-sequence model, which not only improves over a strong baseline trained on authentic error-annotated data, but also enables the development of a practical GEC system in a scenario where little genuine error-annotated data is available. The developed systems placed first in the BEA19 shared task, achieving 69.47 and 64.24 F_0.5 in the restricted and low-resource tracks respectively, both on the W&I+LOCNESS test set. On the popular CoNLL 2014 test set, we report state-of-the-art results of 64.16 M for the submitted system, and 61.30 M for the constrained system trained on the NUCLE and Lang-8 data.", "keyphrases": ["error correction", "synthetic data", "confusion", "spellchecker", "fine-tuning"]} +{"id": "kang-etal-2018-dataset", "title": "A Dataset of Peer Reviews (PeerRead): Collection, Insights and NLP Applications", "abstract": "Peer reviewing is a central component in the scientific publishing process. We present the first public dataset of scientific peer reviews available for research purposes (PeerRead v1),1 providing an opportunity to study this important artifact. The dataset consists of 14.7K paper drafts and the corresponding accept/reject decisions in top-tier venues including ACL, NIPS and ICLR. The dataset also includes 10.7K textual peer reviews written by experts for a subset of the papers. We describe the data collection process and report interesting observed phenomena in the peer reviews. We also propose two novel NLP tasks based on this dataset and provide simple baseline models. In the first task, we show that simple models can predict whether a paper is accepted with up to 21% error reduction compared to the majority baseline. In the second task, we predict the numerical scores of review aspects and show that simple models can outperform the mean baseline for aspects with high variance such as `originality' and `impact'.", "keyphrases": ["peerread", "public dataset", "research purpose", "venue"]} +{"id": "reiter-2018-structured", "title": "A Structured Review of the Validity of BLEU", "abstract": "The BLEU metric has been widely used in NLP for over 15 years to evaluate NLP systems, especially in machine translation and natural language generation. I present a structured review of the evidence on whether BLEU is a valid evaluation technique\u2014in other words, whether BLEU scores correlate with real-world utility and user-satisfaction of NLP systems; this review covers 284 correlations reported in 34 papers. Overall, the evidence supports using BLEU for diagnostic evaluation of MT systems (which is what it was originally proposed for), but does not support using BLEU outside of MT, for evaluation of individual texts, or for scientific hypothesis testing.", "keyphrases": ["structured review", "rouge", "human judgment"]} +{"id": "muller-etal-2020-domain", "title": "Domain Robustness in Neural Machine Translation", "abstract": "Translating text that diverges from the training domain is a key challenge for neural machine translation (NMT). Domain robustness - the generalization of models to unseen test domains - is low compared to statistical machine translation. In this paper, we investigate the performance of NMT on out-of-domain test sets, and ways to improve it. \nWe observe that hallucination (translations that are fluent but unrelated to the source) is common in out-of-domain settings, and we empirically compare methods that improve adequacy (reconstruction), out-of-domain translation (subword regularization), or robustness against adversarial examples (defensive distillation), as well as noisy channel models. \nIn experiments on German to English OPUS data, and German to Romansh, a low-resource scenario, we find that several methods improve domain robustness, reconstruction standing out as a method that not only improves automatic scores, but also shows improvements in a manual assessments of adequacy, albeit at some loss in fluency. However, out-of-domain performance is still relatively low and domain robustness remains an open problem.", "keyphrases": ["neural machine translation", "out-of-domain translation", "adversarial example", "domain robustness"]} +{"id": "baldini-soares-etal-2019-matching", "title": "Matching the Blanks: Distributional Similarity for Relation Learning", "abstract": "General purpose relation extractors, which can model arbitrary relations, are a core aspiration in information extraction. Efforts have been made to build general purpose extractors that represent relations with their surface forms, or which jointly embed surface forms with relations from an existing knowledge graph. However, both of these approaches are limited in their ability to generalize. In this paper, we build on extensions of Harris' distributional hypothesis to relations, as well as recent advances in learning text representations (specifically, BERT), to build task agnostic relation representations solely from entity-linked text. We show that these representations significantly outperform previous work on exemplar based relation extraction (FewRel) even without using any of that task's training data. We also show that models initialized with our task agnostic representations, and then tuned on supervised relation extraction datasets, significantly outperform the previous methods on SemEval 2010 Task 8, KBP37, and TACRED", "keyphrases": ["relation extraction", "fewrel", "language model", "distant supervision", "external knowledge"]} +{"id": "hamilton-etal-2016-inducing", "title": "Inducing Domain-Specific Sentiment Lexicons from Unlabeled Corpora", "abstract": "A word's sentiment depends on the domain in which it is used. Computational social science research thus requires sentiment lexicons that are specific to the domains being studied. We combine domain-specific word embeddings with a label propagation framework to induce accurate domain-specific sentiment lexicons using small sets of seed words. We show that our approach achieves state-of-the-art performance on inducing sentiment lexicons from domain-specific corpora and that our purely corpus-based approach outperforms methods that rely on hand-curated resources (e.g., WordNet). Using our framework, we induce and release historical sentiment lexicons for 150 years of English and community-specific sentiment lexicons for 250 online communities from the social media forum Reddit. The historical lexicons we induce show that more than 5% of sentiment-bearing (non-neutral) English words completely switched polarity during the last 150 years, and the community-specific lexicons highlight how sentiment varies drastically between different communities.", "keyphrases": ["word embedding", "small set", "hand-curated resource"]} +{"id": "takala-etal-2014-gold", "title": "Gold-standard for Topic-specific Sentiment Analysis of Economic Texts", "abstract": "Public opinion, as measured by media sentiment, can be an important indicator in the financial and economic context. These are domains where traditional sentiment estimation techniques often struggle, and existing annotated sentiment text collections are of less use. Though considerable progress has been made in analyzing sentiments at sentence-level, performing topic-dependent sentiment analysis is still a relatively uncharted territory. The computation of topic-specific sentiments has commonly relied on naive aggregation methods without much consideration to the relevance of the sentences to the given topic. Clearly, the use of such methods leads to a substantial increase in noise-to-signal ratio. To foster development of methods for measuring topic-specific sentiments in documents, we have collected and annotated a corpus of financial news that have been sampled from Thomson Reuters newswire. In this paper, we describe the annotation process and evaluate the quality of the dataset using a number of inter-annotator agreement metrics. The annotations of 297 documents and over 9000 sentences can be used for research purposes when developing methods for detecting topic-wise sentiment in financial text.", "keyphrases": ["sentiment analysis", "financial news", "thomson reuters newswire", "different topic"]} +{"id": "ouchi-etal-2018-span", "title": "A Span Selection Model for Semantic Role Labeling", "abstract": "We present a simple and accurate span-based model for semantic role labeling (SRL). Our model directly takes into account all possible argument spans and scores them for each label. At decoding time, we greedily select higher scoring labeled spans. One advantage of our model is to allow us to design and use span-level features, that are difficult to use in token-based BIO tagging approaches. Experimental results demonstrate that our ensemble model achieves the state-of-the-art results, 87.4 F1 and 87.0 F1 on the CoNLL-2005 and 2012 datasets, respectively.", "keyphrases": ["span", "semantic role labeling", "state-of-the-art result", "srl model"]} +{"id": "cotterell-etal-2018-languages", "title": "Are All Languages Equally Hard to Language-Model?", "abstract": "For general modeling methods applied to diverse languages, a natural question is: how well should we expect our models to work on languages with differing typological profiles? In this work, we develop an evaluation framework for fair cross-linguistic comparison of language models, using translated text so that all models are asked to predict approximately the same information. We then conduct a study on 21 languages, demonstrating that in some languages, the textual expression of the information is harder to predict with both n-gram and LSTM language models. We show complex inflectional morphology to be a cause of performance differences among languages.", "keyphrases": ["language modeling", "morphology", "typology effect"]} +{"id": "nilsson-etal-2006-graph", "title": "Graph Transformations in Data-Driven Dependency Parsing", "abstract": "Transforming syntactic representations in order to improve parsing accuracy has been exploited successfully in statistical parsing systems using constituency-based representations. In this paper, we show that similar transformations can give substantial improvements also in data-driven dependency parsing. Experiments on the Prague Dependency Treebank show that systematic transformations of coordinate structures and verb groups result in a 10% error reduction for a deterministic data-driven dependency parser. Combining these transformations with previously proposed techniques for recovering non-projective dependencies leads to state-of-the-art accuracy for the given data set.", "keyphrases": ["transformation", "data-driven dependency parsing", "verb group", "maltparser"]} +{"id": "roy-roth-2015-solving", "title": "Solving General Arithmetic Word Problems", "abstract": "This paper presents a novel approach to automatically solving arithmetic word problems. This is the first algorithmic approach that can handle arithmetic problems with multiple steps and operations, without depending on additional annotations or predefined templates. We develop a theory for expression trees that can be used to represent and evaluate the target arithmetic expressions; we use it to uniquely decompose the target arithmetic problem to multiple classification problems; we then compose an expression tree, combining these with world knowledge through a constrained inference framework. Our classifiers gain from the use of quantity schemas that supports better extraction of features. Experimental results show that our method outperforms existing systems, achieving state of the art performance on benchmark datasets of arithmetic word problems.", "keyphrases": ["word problem", "quantity", "many researcher", "target math problem", "mwp"]} +{"id": "chen-etal-2017-teacher", "title": "A Teacher-Student Framework for Zero-Resource Neural Machine Translation", "abstract": "While end-to-end neural machine translation (NMT) has made remarkable progress recently, it still suffers from the data scarcity problem for low-resource language pairs and domains. In this paper, we propose a method for zero-resource NMT by assuming that parallel sentences have close probabilities of generating a sentence in a third language. Based on the assumption, our method is able to train a source-to-target NMT model (\u201cstudent\u201d) without parallel corpora available guided by an existing pivot-to-target NMT model (\u201cteacher\u201d) on a source-pivot parallel corpus. Experimental results show that the proposed method significantly improves over a baseline pivot-based model by +3.0 BLEU points across various language pairs.", "keyphrases": ["teacher-student framework", "neural machine translation", "third language", "student", "zero-shot translation"]} +{"id": "tan-etal-2019-learning", "title": "Learning to Navigate Unseen Environments: Back Translation with Environmental Dropout", "abstract": "A grand goal in AI is to build a robot that can accurately navigate based on natural language instructions, which requires the agent to perceive the scene, understand and ground language, and act in the real-world environment. One key challenge here is to learn to navigate in new environments that are unseen during training. Most of the existing approaches perform dramatically worse in unseen environments as compared to seen ones. In this paper, we present a generalizable navigational agent. Our agent is trained in two stages. The first stage is training via mixed imitation and reinforcement learning, combining the benefits from both off-policy and on-policy optimization. The second stage is fine-tuning via newly-introduced `unseen' triplets (environment, path, instruction). To generate these unseen triplets, we propose a simple but effective `environmental dropout' method to mimic unseen environments, which overcomes the problem of limited seen environment variability. Next, we apply semi-supervised learning (via back-translation) on these dropout environments to generate new paths and instructions. Empirically, we show that our agent is substantially better at generalizability when fine-tuned with these triplets, outperforming the state-of-art approaches by a large margin on the private unseen test set of the Room-to-Room task, and achieving the top rank on the leaderboard.", "keyphrases": ["unseen environment", "environmental dropout", "natural language instruction", "path", "learning method"]} +{"id": "liu-etal-2011-insertion", "title": "Insertion, Deletion, or Substitution? Normalizing Text Messages without Pre-categorization nor Supervision", "abstract": "Most text message normalization approaches are based on supervised learning and rely on human labeled training data. In addition, the nonstandard words are often categorized into different types and specific models are designed to tackle each type. In this paper, we propose a unified letter transformation approach that requires neither pre-categorization nor human supervision. Our approach models the generation process from the dictionary words to nonstandard tokens under a sequence labeling framework, where each letter in the dictionary word can be retained, removed, or substituted by other letters/digits. To avoid the expensive and time consuming hand labeling process, we automatically collected a large set of noisy training pairs using a novel web-based approach and performed character-level alignment for model training. Experiments on both Twitter and SMS messages show that our system significantly outperformed the state-of-the-art deletion-based abbreviation system and the jazzy spell checker (absolute accuracy gain of 21.69% and 18.16% over jazzy spell checker on the two test sets respectively).", "keyphrases": ["substitution", "pre-categorization", "noisy training pair", "sms", "conditional random field"]} +{"id": "seo-etal-2018-phrase", "title": "Phrase-Indexed Question Answering: A New Challenge for Scalable Document Comprehension", "abstract": "We formalize a new modular variant of current question answering tasks by enforcing complete independence of the document encoder from the question encoder. This formulation addresses a key challenge in machine comprehension by building a standalone representation of the document discourse. It additionally leads to a significant scalability advantage since the encoding of the answer candidate phrases in the document can be pre-computed and indexed offline for efficient retrieval. We experiment with baseline models for the new task, which achieve a reasonable accuracy but significantly underperform unconstrained QA models. We invite the QA research community to engage in Phrase-Indexed Question Answering (PIQA, pika) for closing the gap. The leaderboard is at: ", "keyphrases": ["question answering", "encoding", "text span"]} +{"id": "gabriel-etal-2021-go", "title": "GO FIGURE: A Meta Evaluation of Factuality in Summarization", "abstract": "While neural language models can generate text with remarkable fluency and coherence, controlling for factual correctness in generation remains an open research question. This major discrepancy between the surface-level fluency and the content-level correctness of neural generation has motivated a new line of research that seeks automatic metrics for evaluating the factuality of machine text. In this paper, we introduce GO FIGURE, a meta-evaluation framework for evaluating factuality evaluation metrics. We propose five necessary and intuitive conditions to evaluate factuality metrics on diagnostic factuality data across three different summarization tasks. Our benchmark analysis on ten factuality metrics reveals that our meta-evaluation framework provides a robust and efficient evaluation that is extensible to multiple types of factual consistency and standard generation metrics, including QA metrics. It also reveals that while QA metrics generally improve over standard metrics that measure factuality across domains, performance is highly dependent on the way in which questions are generated.", "keyphrases": ["factuality", "meta-evaluation framework", "evaluation metric"]} +{"id": "szpektor-dagan-2008-learning", "title": "Learning Entailment Rules for Unary Templates", "abstract": "Most work on unsupervised entailment rule acquisition focused on rules between templates with two variables, ignoring unary rules - entailment rules between templates with a single variable. In this paper we investigate two approaches for unsupervised learning of such rules and compare the proposed methods with a binary rule learning method. The results show that the learned unary rule-sets outperform the binary rule-set. In addition, a novel directional similarity measure for learning entailment, termed Balanced-Inclusion, is the best performing measure.", "keyphrases": ["entailment rule", "unsupervised learning", "similarity measure"]} +{"id": "guo-etal-2021-parameter", "title": "Parameter-Efficient Transfer Learning with Diff Pruning", "abstract": "The large size of pretrained networks makes them difficult to deploy for multiple tasks in storage-constrained settings. Diff pruning enables parameter-efficient transfer learning that scales well with new tasks. The approach learns a task-specific \u201cdiff\u201d vector that extends the original pretrained parameters. This diff vector is adaptively pruned during training with a differentiable approximation to the L0-norm penalty to encourage sparsity. As the number of tasks increases, diff pruning remains parameter-efficient, as it requires storing only a small diff vector for each task. Since it does not require access to all tasks during training, it is attractive in on-device deployment settings where tasks arrive in stream or even from different providers. Diff pruning can match the performance of finetuned baselines on the GLUE benchmark while only modifying 0.5% of the pretrained model's parameters per task and scales favorably in comparison to popular pruning approaches.", "keyphrases": ["diff pruning", "new task", "parameter-efficient transfer", "difference-vector"]} +{"id": "mihalcea-2005-unsupervised", "title": "Unsupervised Large-Vocabulary Word Sense Disambiguation with Graph-based Algorithms for Sequence Data Labeling", "abstract": "This paper introduces a graph-based algorithm for sequence data labeling, using random walks on graphs encoding label dependencies. The algorithm is illustrated and tested in the context of an unsupervised word sense disambiguation problem, and shown to significantly outperform the accuracy achieved through individual label assignment, as measured on standard sense-annotated data sets.", "keyphrases": ["word sense disambiguation", "graph-based algorithm", "sequence data labeling", "wsd", "node"]} +{"id": "jarvis-etal-2013-maximizing", "title": "Maximizing Classification Accuracy in Native Language Identification", "abstract": "This paper reports our contribution to the 2013 NLI Shared Task. The purpose of the task was to train a machine-learning system to identify the native-language affiliations of 1,100 texts written in English by nonnative speakers as part of a high-stakes test of general academic English proficiency. We trained our system on the new TOEFL11 corpus, which includes 11,000 essays written by nonnative speakers from 11 native-language backgrounds. Our final system used an SVM classifier with over 400,000 unique features consisting of lexical and POS n-grams occurring in at least two texts in the training set. Our system identified the correct nativelanguage affiliations of 83.6% of the texts in the test set. This was the highest classification accuracy achieved in the 2013 NLI Shared Task.", "keyphrases": ["classification accuracy", "native language identification", "lexical feature", "part-of-speech tag"]} +{"id": "daume-iii-etal-2010-frustratingly", "title": "Frustratingly Easy Semi-Supervised Domain Adaptation", "abstract": "In this work, we propose a semisupervised extension to a well-known supervised domain adaptation approach (EA) (Daume III, 2007). Our proposed approach (EA++) builds on the notion of augmented space (introduced in EA) and harnesses unlabeled data in target domain to ameliorate the transfer of information from source to target. This semisupervised approach to domain adaptation is extremely simple to implement, and can be applied as a pre-processing step to any supervised learner. Experimental results on sequential labeling tasks demonstrate the efficacy of the proposed method.", "keyphrases": ["domain adaptation", "unlabeled data", "performance drop"]} +{"id": "de-vassimon-manela-etal-2021-stereotype", "title": "Stereotype and Skew: Quantifying Gender Bias in Pre-trained and Fine-tuned Language Models", "abstract": "This paper proposes two intuitive metrics, skew and stereotype, that quantify and analyse the gender bias present in contextual language models when tackling the WinoBias pronoun resolution task. We find evidence that gender stereotype correlates approximately negatively with gender skew in out-of-the-box models, suggesting that there is a trade-off between these two forms of bias. We investigate two methods to mitigate bias. The first approach is an online method which is effective at removing skew at the expense of stereotype. The second, inspired by previous work on ELMo, involves the fine-tuning of BERT using an augmented gender-balanced dataset. We show that this reduces both skew and stereotype relative to its unaugmented fine-tuned counterpart. However, we find that existing gender bias benchmarks do not fully probe professional bias as pronoun resolution may be obfuscated by cross-correlations from other manifestations of gender prejudice.", "keyphrases": ["skew", "gender bias", "language model", "stereotype"]} +{"id": "rosario-hearst-2005-multi", "title": "Multi-way Relation Classification: Application to Protein-Protein Interactions", "abstract": "We address the problem of multi-way relation classification, applied to identification of the interactions between proteins in bioscience text. A major impediment to such work is the acquisition of appropriately labeled training data; for our experiments we have identified a database that serves as a proxy for training data. We use two graphical models and a neural net for the classification of the interactions, achieving an accuracy of 64% for a 10-way distinction between relation types. We also provide evidence that the exploitation of the sentences surrounding a citation to a paper can yield higher accuracy than other sentences.", "keyphrases": ["relation classification", "protein", "graphical model"]} +{"id": "nguyen-etal-2017-aggregating", "title": "Aggregating and Predicting Sequence Labels from Crowd Annotations", "abstract": "Despite sequences being core to NLP, scant work has considered how to handle noisy sequence labels from multiple annotators for the same text. Given such annotations, we consider two complementary tasks: (1) aggregating sequential crowd labels to infer a best single set of consensus annotations; and (2) using crowd annotations as training data for a model that can predict sequences in unannotated text. For aggregation, we propose a novel Hidden Markov Model variant. To predict sequences in unannotated text, we propose a neural approach using Long Short Term Memory. We evaluate a suite of methods across two different applications and text genres: Named-Entity Recognition in news articles and Information Extraction from biomedical abstracts. Results show improvement over strong baselines. Our source code and data are available online.", "keyphrases": ["crowd annotation", "hidden markov model", "information extraction", "labeling task", "noise"]} +{"id": "nivre-2003-efficient", "title": "An Efficient Algorithm for Projective Dependency Parsing", "abstract": "This paper presents a deterministic parsing algorithm for projective dependency grammar. The running time of the algorithm is linear in the length of the input string, and the dependency graph produced is guaranteed to be projective and acyclic. The algorithm has been experimentally evaluated in parsing unrestricted Swedish text, achieving an accuracy above 85% with a very simple grammar.", "keyphrases": ["projective dependency parsing", "transition-based parser", "nivre", "linear time", "natural language text"]} +{"id": "poesio-etal-2004-learning", "title": "Learning to Resolve Bridging References", "abstract": "We use machine learning techniques to find the best combination of local focus and lexical distance features for identifying the anchor of mereological bridging references. We find that using first mention, utterance distance, and lexical distance computed using either Google or WordNet results in an accuracy significantly higher than obtained in previous experiments.", "keyphrases": ["bridging reference", "anaphora resolution", "antecedent", "semantic relatedness", "web"]} +{"id": "liang-etal-2020-xglue", "title": "XGLUE: A New Benchmark Dataset for Cross-lingual Pre-training, Understanding and Generation", "abstract": "In this paper, we introduce XGLUE, a new benchmark dataset to train large-scale cross-lingual pre-trained models using multilingual and bilingual corpora, and evaluate their performance across a diverse set of cross-lingual tasks. Comparing to GLUE (Wang et al.,2019), which is labeled in English and includes natural language understanding tasks only, XGLUE has three main advantages: (1) it provides two corpora with different sizes for cross-lingual pre-training; (2) it provides 11 diversified tasks that cover both natural language understanding and generation scenarios; (3) for each task, it provides labeled data in multiple languages. We extend a recent cross-lingual pre-trained model Unicoder (Huang et al., 2019) to cover both understanding and generation tasks, which is evaluated on XGLUE as a strong baseline. We also evaluate the base versions (12-layer) of Multilingual BERT, XLM and XLM-R for comparison.", "keyphrases": ["cross-lingual pre-training", "pre-trained model", "diverse set", "generation task"]} +{"id": "bouamor-etal-2019-madar", "title": "The MADAR Shared Task on Arabic Fine-Grained Dialect Identification", "abstract": "In this paper, we present the results and findings of the MADAR Shared Task on Arabic Fine-Grained Dialect Identification. This shared task was organized as part of The Fourth Arabic Natural Language Processing Workshop, collocated with ACL 2019. The shared task includes two subtasks: the MADAR Travel Domain Dialect Identification subtask (Subtask 1) and the MADAR Twitter User Dialect Identification subtask (Subtask 2). This shared task is the first to target a large set of dialect labels at the city and country levels. The data for the shared task was created or collected under the Multi-Arabic Dialect Applications and Resources (MADAR) project. A total of 21 teams from 15 countries participated in the shared task.", "keyphrases": ["madar shared task", "arabic", "dialect"]} +{"id": "hiraoka-etal-2020-optimizing", "title": "Optimizing Word Segmentation for Downstream Task", "abstract": "In traditional NLP, we tokenize a given sentence as a preprocessing, and thus the tokenization is unrelated to a target downstream task. To address this issue, we propose a novel method to explore a tokenization which is appropriate for the downstream task. Our proposed method, optimizing tokenization (OpTok), is trained to assign a high probability to such appropriate tokenization based on the downstream task loss. OpTok can be used for any downstream task which uses a vector representation of a sentence such as text classification. Experimental results demonstrate that OpTok improves the performance of sentiment analysis and textual entailment. In addition, we introduce OpTok into BERT, the state-of-the-art contextualized embeddings and report a positive effect.", "keyphrases": ["downstream task", "tokenizer", "optok"]} +{"id": "marcheggiani-etal-2017-simple", "title": "A Simple and Accurate Syntax-Agnostic Neural Model for Dependency-based Semantic Role Labeling", "abstract": "We introduce a simple and accurate neural model for dependency-based semantic role labeling. Our model predicts predicate-argument dependencies relying on states of a bidirectional LSTM encoder. The semantic role labeler achieves competitive performance on English, even without any kind of syntactic information and only using local inference. However, when automatically predicted part-of-speech tags are provided as input, it substantially outperforms all previous local models and approaches the best reported results on the English CoNLL-2009 dataset. We also consider Chinese, Czech and Spanish where our approach also achieves competitive results. Syntactic parsers are unreliable on out-of-domain data, so standard (i.e., syntactically-informed) SRL models are hindered when tested in this setting. Our syntax-agnostic model appears more robust, resulting in the best reported results on standard out-of-domain test sets.", "keyphrases": ["syntax-agnostic neural model", "semantic role labeling", "dependency srl"]} +{"id": "mager-etal-2020-gpt", "title": "GPT-too: A Language-Model-First Approach for AMR-to-Text Generation", "abstract": "Abstract Meaning Representations (AMRs) are broad-coverage sentence-level semantic graphs. Existing approaches to generating text from AMR have focused on training sequence-to-sequence or graph-to-sequence models on AMR annotated data only. In this paper, we propose an alternative approach that combines a strong pre-trained language model with cycle consistency-based re-scoring. Despite the simplicity of the approach, our experimental results show these models outperform all previous techniques on the English LDC2017T10 dataset, including the recent use of transformer architectures. In addition to the standard evaluation metrics, we provide human evaluation experiments that further substantiate the strength of our approach.", "keyphrases": ["amr-to-text generation", "language model", "pre-trained generative model"]} +{"id": "miyao-tsujii-2008-feature", "title": "Feature Forest Models for Probabilistic HPSG Parsing", "abstract": "Probabilistic modeling of lexicalized grammars is difficult because these grammars exploit complicated data structures, such as typed feature structures. This prevents us from applying common methods of probabilistic modeling in which a complete structure is divided into sub-structures under the assumption of statistical independence among sub-structures. For example, part-of-speech tagging of a sentence is decomposed into tagging of each word, and CFG parsing is split into applications of CFG rules. These methods have relied on the structure of the target problem, namely lattices or trees, and cannot be applied to graph structures including typed feature structures. This article proposes the feature forest model as a solution to the problem of probabilistic modeling of complex data structures including typed feature structures. The feature forest model provides a method for probabilistic modeling without the independence assumption when probabilistic events are represented with feature forests. Feature forests are generic data structures that represent ambiguous trees in a packed forest structure. Feature forest models are maximum entropy models defined over feature forests. A dynamic programming algorithm is proposed for maximum entropy estimation without unpacking feature forests. Thus probabilistic modeling of any data structures is possible when they are represented by feature forests. This article also describes methods for representing HPSG syntactic structures and predicate-argument structures with feature forests. Hence, we describe a complete strategy for developing probabilistic models for HPSG parsing. The effectiveness of the proposed methods is empirically evaluated through parsing experiments on the Penn Treebank, and the promise of applicability to parsing of real-world sentences is discussed.", "keyphrases": ["hpsg", "english sentence", "head information"]} +{"id": "kim-zhang-2014-credibility", "title": "Credibility Adjusted Term Frequency: A Supervised Term Weighting Scheme for Sentiment Analysis and Text Classification", "abstract": "We provide a simple but novel supervised weighting scheme for adjusting term frequency in tf-idf for sentiment analysis and text classification. We compare our method to baseline weighting schemes and find that it outperforms them on multiple benchmarks. The method is robust and works well on both snippets and longer documents.", "keyphrases": ["weighting scheme", "sentiment analysis", "text classification", "tf-idf"]} +{"id": "agirre-etal-2008-improving", "title": "Improving Parsing and PP Attachment Performance with Sense Information", "abstract": "To date, parsers have made limited use of semantic information, but there is evidence to suggest that semantic features can enhance parse disambiguation. This paper shows that semantic classes help to obtain significant improvement in both parsing and PP attachment tasks. We devise a gold-standard sense- and parse tree-annotated dataset based on the intersection of the Penn Treebank and SemCor, and experiment with different approaches to both semantic representation and disambiguation. For the Bikel parser, we achieved a maximal error reduction rate over the baseline parser of 6.9% and 20.5%, for parsing and PP-attachment respectively, using an unsupervised WSD strategy. This demonstrates that word sense information can indeed enhance the performance of syntactic disambiguation.", "keyphrases": ["semantic class", "penn treebank", "pp-attachment", "prepositional phrase attachment"]} +{"id": "li-etal-2015-tree", "title": "When Are Tree Structures Necessary for Deep Learning of Representations?", "abstract": "Recursive neural models, which use syntactic parse trees to recursively generate representations bottom-up, are a popular architecture. However there have not been rigorous evaluations showing for exactly which tasks this syntax-based method is appropriate. In this paper, we benchmark recursive neural models against sequential recurrent neural models, enforcing applesto-apples comparison as much as possible. We investigate 4 tasks: (1) sentiment classification at the sentence level and phrase level; (2) matching questions to answerphrases; (3) discourse parsing; (4) semantic relation extraction. Our goal is to understand better when, and why, recursive models can outperform simpler models. We find that recursive models help mainly on tasks (like semantic relation extraction) that require longdistance connection modeling, particularly on very long sequences. We then introduce a method for allowing recurrent models to achieve similar performance: breaking long sentences into clause-like units at punctuation and processing them separately before combining. Our results thus help understand the limitations of both classes of models, and suggest directions for improving recurrent models.", "keyphrases": ["tree structure", "recurrent model", "various task"]} +{"id": "shah-etal-2018-adversarial", "title": "Adversarial Domain Adaptation for Duplicate Question Detection", "abstract": "We address the problem of detecting duplicate questions in forums, which is an important step towards automating the process of answering new questions. As finding and annotating such potential duplicates manually is very tedious and costly, automatic methods based on machine learning are a viable alternative. However, many forums do not have annotated data, i.e., questions labeled by experts as duplicates, and thus a promising solution is to use domain adaptation from another forum that has such annotations. Here we focus on adversarial domain adaptation, deriving important findings about when it performs well and what properties of the domains are important in this regard. Our experiments with StackExchange data show an average improvement of 5.6% over the best baseline across multiple pairs of domains.", "keyphrases": ["duplicate question detection", "adversarial domain adaptation", "target domain", "supervised training"]} +{"id": "he-etal-2008-indirect", "title": "Indirect-HMM-based Hypothesis Alignment for Combining Outputs from Machine Translation Systems", "abstract": "This paper presents a new hypothesis alignment method for combining outputs of multiple machine translation (MT) systems. An indirect hidden Markov model (IHMM) is proposed to address the synonym matching and word ordering issues in hypothesis alignment. Unlike traditional HMMs whose parameters are trained via maximum likelihood estimation (MLE), the parameters of the IHMM are estimated indirectly from a variety of sources including word semantic similarity, word surface similarity, and a distance-based distortion penalty. The IHMM-based method significantly outperforms the state-of-the-art TER-based alignment model in our experiments on NIST benchmark datasets. Our combined SMT system using the proposed method achieved the best Chinese-to-English translation result in the constrained training track of the 2008 NIST Open MT Evaluation.", "keyphrases": ["hypothesis alignment", "ihmm", "synonym matching", "translation result", "system combination"]} +{"id": "li-etal-2012-wiki", "title": "Wiki-ly Supervised Part-of-Speech Tagging", "abstract": "Despite significant recent work, purely unsupervised techniques for part-of-speech (POS) tagging have not achieved useful accuracies required by many language processing tasks. Use of parallel text between resource-rich and resource-poor languages is one source of weak supervision that significantly improves accuracy. However, parallel text is not always available and techniques for using it require multiple complex algorithmic steps. In this paper we show that we can build POS-taggers exceeding state-of-the-art bilingual methods by using simple hidden Markov models and a freely available and naturally growing resource, the Wiktionary. Across eight languages for which we have labeled data to evaluate results, we achieve accuracy that significantly exceeds best unsupervised and parallel text methods. We achieve highest accuracy reported for several languages and show that our approach yields better out-of-domain taggers than those trained using fully supervised Penn Treebank.", "keyphrases": ["markov model", "wiktionary", "dictionary", "pos tagging"]} +{"id": "rubinstein-etal-2015-well", "title": "How Well Do Distributional Models Capture Different Types of Semantic Knowledge?", "abstract": "In recent years, distributional models (DMs) have shown great success in representing lexical semantics. In this work we show that the extent to which DMs represent semantic knowledge is highly dependent on the type of knowledge. We pose the task of predicting properties of concrete nouns in a supervised setting, and compare between learning taxonomic properties (e.g., animacy) and attributive properties (e.g., size, color). We employ four state-of-the-art DMs as sources of feature representation for this task, and show that they all yield poor results when tested on attributive properties, achieving no more than an average F-score of 0.37 in the binary property prediction task, compared to 0.73 on taxonomic properties. Our results suggest that the distributional hypothesis may not be equally applicable to all types of semantic information.", "keyphrases": ["semantic knowledge", "taxonomic property", "attributive property", "distributional hypothesis", "word embedding"]} +{"id": "patwardhan-riloff-2009-unified", "title": "A Unified Model of Phrasal and Sentential Evidence for Information Extraction", "abstract": "Information Extraction (IE) systems that extract role fillers for events typically look at the local context surrounding a phrase when deciding whether to extract it. Often, however, role fillers occur in clauses that are not directly linked to an event word. We present a new model for event extraction that jointly considers both the local context around a phrase along with the wider sentential context in a probabilistic framework. Our approach uses a sentential event recognizer and a plausible role-filler recognizer that is conditioned on event sentences. We evaluate our system on two IE data sets and show that our model performs well in comparison to existing IE systems that rely on local phrasal context.", "keyphrases": ["sentential evidence", "information extraction", "role filler", "local context", "probabilistic framework"]} +{"id": "melamed-etal-2004-generalized", "title": "Generalized Multitext Grammars", "abstract": "Generalized Multitext Grammar (GMTG) is a synchronous grammar formalism that is weakly equivalent to Linear Context-Free Rewriting Systems (LCFRS), but retains much of the notational and intuitive simplicity of Context-Free Grammar (CFG). GMTG allows both synchronous and independent rewriting. Such flexibility facilitates more perspicuous modeling of parallel text than what is possible with other synchronous formalisms. This paper investigates the generative capacity of GMTG, proves that each component grammar of a GMTG retains its generative power, and proposes a generalization of Chomsky Normal Form, which is necessary for synchronous CKY-style parsing.", "keyphrases": ["lcfrs", "generalized multitext grammar", "machine translation"]} +{"id": "dickinson-meurers-2003-detecting", "title": "Detecting Errors in Part-of-Speech Annotation", "abstract": "We propose a new method for detecting errors in \"gold-standard\" part-of-speech annotation. The approach locates errors with high precision based on n-grams occurring in the corpus with multiple taggings. Two further techniques, closed-class analysis and finite-state tagging guide patterns, are discussed. The success of the three approaches is illustrated for the Wall Street Journal corpus as part of the Penn Tree-bank.", "keyphrases": ["part-of-speech annotation", "pos tag", "variation", "annotation error", "word ngram"]} +{"id": "pruthi-etal-2019-combating", "title": "Combating Adversarial Misspellings with Robust Word Recognition", "abstract": "To combat adversarial spelling mistakes, we propose placing a word recognition model in front of the downstream classifier. Our word recognition models build upon the RNN semi-character architecture, introducing several new backoff strategies for handling rare and unseen words. Trained to recognize words corrupted by random adds, drops, swaps, and keyboard mistakes, our method achieves 32% relative (and 3.3% absolute) error reduction over the vanilla semi-character model. Notably, our pipeline confers robustness on the downstream classifier, outperforming both adversarial training and off-the-shelf spell checkers. Against a BERT model fine-tuned for sentiment analysis, a single adversarially-chosen character attack lowers accuracy from 90.3% to 45.8%. Our defense restores accuracy to 75%. Surprisingly, better word recognition does not always entail greater robustness. Our analysis reveals that robustness also depends upon a quantity that we denote the sensitivity.", "keyphrases": ["misspelling", "robustness", "word recognition model", "attack", "input sentence"]} +{"id": "henderson-etal-2013-multilingual", "title": "Multilingual Joint Parsing of Syntactic and Semantic Dependencies with a Latent Variable Model", "abstract": "Current investigations in data-driven models of parsing have shifted from purely syntactic analysis to richer semantic representations, showing that the successful recovery of the meaning of text requires structured analyses of both its grammar and its semantics. In this article, we report on a joint generative history-based model to predict the most likely derivation of a dependency parser for both syntactic and semantic dependencies, in multiple languages. Because these two dependency structures are not isomorphic, we propose a weak synchronization at the level of meaningful subsequences of the two derivations. These synchronized subsequences encompass decisions about the left side of each individual word. We also propose novel derivations for semantic dependency structures, which are appropriate for the relatively unconstrained nature of these graphs. To train a joint model of these synchronized derivations, we make use of a latent variable model of parsing, the Incremental Sigmoid Belief Network (ISBN) architecture. This architecture induces latent feature representations of the derivations, which are used to discover correlations both within and between the two derivations, providing the first application of ISBNs to a multi-task learning problem. This joint model achieves competitive performance on both syntactic and semantic dependency parsing for several languages. Because of the general nature of the approach, this extension of the ISBN architecture to weakly synchronized syntactic-semantic derivations is also an exemplification of its applicability to other problems where two independent, but related, representations are being learned.", "keyphrases": ["latent variable model", "semantic representation", "multi-task learning"]} +{"id": "rashtchian-etal-2010-collecting", "title": "Collecting Image Annotations Using Amazon's Mechanical Turk", "abstract": "Crowd-sourcing approaches such as Amazon's Mechanical Turk (MTurk) make it possible to annotate or collect large amounts of linguistic data at a relatively low cost and high speed. However, MTurk offers only limited control over who is allowed to particpate in a particular task. This is particularly problematic for tasks requiring free-form text entry. Unlike multiple-choice tasks there is no correct answer, and therefore control items for which the correct answer is known cannot be used. Furthermore, MTurk has no effective built-in mechanism to guarantee workers are proficient English writers. We describe our experience in creating corpora of images annotated with multiple one-sentence descriptions on MTurk and explore the effectiveness of different quality control strategies for collecting linguistic data using Mechanical MTurk. We find that the use of a qualification test provides the highest improvement of quality, whereas refining the annotations through follow-up tasks works rather poorly. Using our best setup, we construct two image corpora, totaling more than 40,000 descriptive captions for 9000 images.", "keyphrases": ["amazon", "whole image", "human-created description"]} +{"id": "stahlberg-etal-2022-uncertainty", "title": "Uncertainty Determines the Adequacy of the Mode and the Tractability of Decoding in Sequence-to-Sequence Models", "abstract": "In many natural language processing (NLP) tasks the same input (e.g. source sentence) can have multiple possible outputs (e.g. translations). To analyze how this ambiguity (also known as intrinsic uncertainty) shapes the distribution learned by neural sequence models we measure sentence-level uncertainty by computing the degree of overlap between references in multi-reference test sets from two different NLP tasks: machine translation (MT) and grammatical error correction (GEC). At both the sentence- and the task-level, intrinsic uncertainty has major implications for various aspects of search such as the inductive biases in beam search and the complexity of exact search. In particular, we show that well-known pathologies such as a high number of beam search errors, the inadequacy of the mode, and the drop in system performance with large beam sizes apply to tasks with high level of ambiguity such as MT but not to less uncertain tasks such as GEC. Furthermore, we propose a novel exact n-best search algorithm for neural sequence models, and show that intrinsic uncertainty affects model uncertainty as the model tends to overly spread out the probability mass for uncertain tasks and sentences.", "keyphrases": ["mode", "source sentence", "uncertainty"]} +{"id": "johnson-etal-2007-bayesian", "title": "Bayesian Inference for PCFGs via Markov Chain Monte Carlo", "abstract": "This paper presents two Markov chain Monte Carlo (MCMC) algorithms for Bayesian inference of probabilistic context free grammars (PCFGs) from terminal strings, providing an alternative to maximum-likelihood estimation using the Inside-Outside algorithm. We illustrate these methods by estimating a sparse grammar describing the morphology of the Bantu language Sesotho, demonstrating that with suitable priors Bayesian techniques can infer linguistic structure in situations where maximum likelihood methods such as the Inside-Outside algorithm only produce a trivial grammar.", "keyphrases": ["pcfg", "terminal string", "bayesian inference", "induction", "recent research"]} +{"id": "wang-etal-2018-learning-ask", "title": "Learning to Ask Questions in Open-domain Conversational Systems with Typed Decoders", "abstract": "Asking good questions in open-domain conversational systems is quite significant but rather untouched. This task, substantially different from traditional question generation, requires to question not only with various patterns but also on diverse and relevant topics. We observe that a good question is a natural composition of interrogatives, topic words, and ordinary words. Interrogatives lexicalize the pattern of questioning, topic words address the key information for topic transition in dialogue, and ordinary words play syntactical and grammatical roles in making a natural sentence. We devise two typed decoders (soft typed decoder and hard typed decoder) in which a type distribution over the three types is estimated and the type distribution is used to modulate the final generation distribution. Extensive experiments show that the typed decoders outperform state-of-the-art baselines and can generate more meaningful questions.", "keyphrases": ["conversation", "typed decoder", "good question"]} +{"id": "feng-etal-2009-lattice", "title": "Lattice-based System Combination for Statistical Machine Translation", "abstract": "Current system combination methods usually use confusion networks to find consensus translations among different systems. Requiring one-to-one mappings between the words in candidate translations, confusion networks have difficulty in handling more general situations in which several words are connected to another several words. Instead, we propose a lattice-based system combination model that allows for such phrase alignments and uses lattices to encode all candidate translations. Experiments show that our approach achieves significant improvements over the state-of-the-art baseline system on Chinese-to-English translation test sets.", "keyphrases": ["system combination", "lattice", "hypothesis"]} +{"id": "du-etal-2010-facilitating", "title": "Facilitating Translation Using Source Language Paraphrase Lattices", "abstract": "For resource-limited language pairs, coverage of the test set by the parallel corpus is an important factor that affects translation quality in two respects: 1) out of vocabulary words; 2) the same information in an input sentence can be expressed in different ways, while current phrase-based SMT systems cannot automatically select an alternative way to transfer the same information. Therefore, given limited data, in order to facilitate translation from the input side, this paper proposes a novel method to reduce the translation difficulty using source-side lattice-based paraphrases. We utilise the original phrases from the input sentence and the corresponding paraphrases to build a lattice with estimated weights for each edge to improve translation quality. Compared to the baseline system, our method achieves relative improvements of 7.07%, 6.78% and 3.63% in terms of BLEU score on small, medium and large-scale English-to-Chinese translation tasks respectively. The results show that the proposed method is effective not only for resource-limited language pairs, but also for resource-sufficient pairs to some extent.", "keyphrases": ["paraphrase", "lattice", "translation task"]} +{"id": "tsvetkov-wintner-2010-extraction", "title": "Extraction of Multi-word Expressions from Small Parallel Corpora", "abstract": "Abstract We present a general, novel methodology for extracting multi-word expressions (MWEs) of various types, along with their translations, from small, word-aligned parallel corpora. Unlike existing approaches, we focus on misalignments; these typically indicate expressions in the source language that are translated to the target in a non-compositional way. We introduce a simple algorithm that proposes MWE candidates based on such misalignments, relying on 1:1 alignments as anchors that delimit the search space. We use a large monolingual corpus to rank and filter these candidates. Evaluation of the quality of the extraction algorithm reveals significant improvements over na\u00efve alignment-based methods. The extracted MWEs, with their translations, are used in the training of a statistical machine translation system, showing a small but significant improvement in its performance.", "keyphrases": ["parallel corpora", "mwe", "extraction", "hebrew multiword expression"]} +{"id": "brazinskas-etal-2020-unsupervised", "title": "Unsupervised Opinion Summarization as Copycat-Review Generation", "abstract": "Opinion summarization is the task of automatically creating summaries that reflect subjective information expressed in multiple documents, such as product reviews. While the majority of previous work has focused on the extractive setting, i.e., selecting fragments from input reviews to produce a summary, we let the model generate novel sentences and hence produce abstractive summaries. Recent progress in summarization has seen the development of supervised models which rely on large quantities of document-summary pairs. Since such training data is expensive to acquire, we instead consider the unsupervised setting, in other words, we do not use any summaries in training. We define a generative model for a review collection which capitalizes on the intuition that when generating a new review given a set of other reviews of a product, we should be able to control the \u201camount of novelty\u201d going into the new review or, equivalently, vary the extent to which it deviates from the input. At test time, when generating summaries, we force the novelty to be minimal, and produce a text reflecting consensus opinions. We capture this intuition by defining a hierarchical variational autoencoder model. Both individual reviews and the products they correspond to are associated with stochastic latent codes, and the review generator (\u201cdecoder\u201d) has direct access to the text of input reviews through the pointer-generator mechanism. Experiments on Amazon and Yelp datasets, show that setting at test time the review's latent code to its mean, allows the model to produce fluent and coherent summaries reflecting common opinions.", "keyphrases": ["review", "novel sentence", "unsupervised opinion summarization"]} +{"id": "narasimhan-etal-2015-unsupervised", "title": "An Unsupervised Method for Uncovering Morphological Chains", "abstract": "Most state-of-the-art systems today produce morphological analysis based only on orthographic patterns. In contrast, we propose a model for unsupervised morphological analysis that integrates orthographic and semantic views of words. We model word formation in terms of morphological chains, from base words to the observed words, breaking the chains into parent-child relations. We use log-linear models with morpheme and word-level features to predict possible parents, including their modifications, for each word. The limited set of candidate parents for each word render contrastive estimation feasible. Our model consistently matches or outperforms five state-of-the-art systems on Arabic, English and Turkish.", "keyphrases": ["unsupervised method", "chain", "log-linear model", "turkish", "word form"]} +{"id": "koncel-kedziorski-etal-2019-text", "title": "Text Generation from Knowledge Graphs with Graph Transformers", "abstract": "Generating texts which express complex ideas spanning multiple sentences requires a structured representation of their content (document plan), but these representations are prohibitively expensive to manually produce. In this work, we address the problem of generating coherent multi-sentence texts from the output of an information extraction system, and in particular a knowledge graph. Graphical knowledge representations are ubiquitous in computing, but pose a significant challenge for text generation techniques due to their non-hierarchical nature, collapsing of long-distance dependencies, and structural variety. We introduce a novel graph transforming encoder which can leverage the relational structure of such knowledge graphs without imposing linearization or hierarchical constraints. Incorporated into an encoder-decoder setup, we provide an end-to-end trainable system for graph-to-text generation that we apply to the domain of scientific text. Automatic and human evaluations show that our technique produces more informative texts which exhibit better document structure than competitive encoder-decoder methods.", "keyphrases": ["knowledge graph", "graph transformer", "text generation"]} +{"id": "bojar-etal-2013-chimera", "title": "Chimera \u2013 Three Heads for English-to-Czech Translation", "abstract": "This paper describes our WMT submissions CU-BOJAR and CU-DEPFIX, the latter dubbed \u201cCHIMERA\u201d because it combines on three diverse approaches: TectoMT, a system with transfer at the deep syntactic level of representation, factored phrase-based translation using Moses, and finally automatic rule-based correction of frequent grammatical and meaning errors. We do not use any off-the-shelf systemcombination method.", "keyphrases": ["moses", "chimera", "depfix"]} +{"id": "wolfson-etal-2020-break", "title": "Break It Down: A Question Understanding Benchmark", "abstract": "Understanding natural language questions entails the ability to break down a question into the requisite steps for computing its answer. In this work, we introduce a Question Decomposition Meaning Representation (QDMR) for questions. QDMR constitutes the ordered list of steps, expressed through natural language, that are necessary for answering a question. We develop a crowdsourcing pipeline, showing that quality QDMRs can be annotated at scale, and release the Break dataset, containing over 83K pairs of questions and their QDMRs. We demonstrate the utility of QDMR by showing that (a) it can be used to improve open-domain question answering on the HotpotQA dataset, (b) it can be deterministically converted to a pseudo-SQL formal language, which can alleviate annotation in semantic parsing applications. Last, we use Break to train a sequence-to-sequence model with copying that parses questions into QDMR structures, and show that it substantially outperforms several natural baselines.", "keyphrases": ["question decomposition", "qdmr", "formal language", "break"]} +{"id": "li-etal-2020-optimus", "title": "Optimus: Organizing Sentences via Pre-trained Modeling of a Latent Space", "abstract": "When trained effectively, the Variational Autoencoder (VAE) can be both a powerful generative model and an effective representation learning framework for natural language. In this paper, we propose the first large-scale language VAE model Optimus (Organizing sentences via Pre-Trained Modeling of a Universal Space). A universal latent embedding space for sentences is first pre-trained on large text corpus, and then fine-tuned for various language generation and understanding tasks. Compared with GPT-2, Optimus enables guided language generation from an abstract level using the latent vectors. Compared with BERT, Optimus can generalize better on low-resource language understanding tasks due to the smooth latent space structure. Extensive experimental results on a wide range of language tasks demonstrate the effectiveness of Optimus. It achieves new state-of-the-art on VAE language modeling benchmarks.", "keyphrases": ["modeling", "vae", "language generation", "gpt-2", "optimus"]} +{"id": "fan-etal-2019-using", "title": "Using Local Knowledge Graph Construction to Scale Seq2Seq Models to Multi-Document Inputs", "abstract": "Query-based open-domain NLP tasks require information synthesis from long and diverse web results. Current approaches extractively select portions of web text as input to Sequence-to-Sequence models using methods such as TF-IDF ranking. We propose constructing a local graph structured knowledge base for each query, which compresses the web search information and reduces redundancy. We show that by linearizing the graph into a structured input sequence, models can encode the graph representations within a standard Sequence-to-Sequence setting. For two generative tasks with very long text input, long-form question answering and multi-document summarization, feeding graph representations as input can achieve better performance than using retrieved text portions.", "keyphrases": ["knowledge graph", "seq2seq model", "multi-document input"]} +{"id": "kasewa-etal-2018-wronging", "title": "Wronging a Right: Generating Better Errors to Improve Grammatical Error Detection", "abstract": "Grammatical error correction, like other machine learning tasks, greatly benefits from large quantities of high quality training data, which is typically expensive to produce. While writing a program to automatically generate realistic grammatical errors would be difficult, one could learn the distribution of naturally-occurring errors and attempt to introduce them into other datasets. Initial work on inducing errors in this way using statistical machine translation has shown promise; we investigate cheaply constructing synthetic samples, given a small corpus of human-annotated data, using an off-the-rack attentive sequence-to-sequence model and a straight-forward post-processing procedure. Our approach yields error-filled artificial data that helps a vanilla bi-directional LSTM to outperform the previous state of the art at grammatical error detection, and a previously introduced model to gain further improvements of over 5% F0.5 score. When attempting to determine if a given sentence is synthetic, a human annotator at best achieves 39.39 F1 score, indicating that our model generates mostly human-like instances.", "keyphrases": ["grammatical error detection", "error correction", "artificial data", "back-translation"]} +{"id": "wu-etal-2018-word", "title": "Word Mover's Embedding: From Word2Vec to Document Embedding", "abstract": "While the celebrated Word2Vec technique yields semantically rich representations for individual words, there has been relatively less success in extending to generate unsupervised sentences or documents embeddings. Recent work has demonstrated that a distance measure between documents called Word Mover's Distance (WMD) that aligns semantically similar words, yields unprecedented KNN classification accuracy. However, WMD is expensive to compute, and it is hard to extend its use beyond a KNN classifier. In this paper, we propose the Word Mover's Embedding (WME), a novel approach to building an unsupervised document (sentence) embedding from pre-trained word embeddings. In our experiments on 9 benchmark text classification datasets and 22 textual similarity tasks, the proposed technique consistently matches or outperforms state-of-the-art techniques, with significantly higher accuracy on problems of short length.", "keyphrases": ["wmd", "word mover", "transport"]} +{"id": "aharoni-goldberg-2020-unsupervised", "title": "Unsupervised Domain Clusters in Pretrained Language Models", "abstract": "The notion of \u201cin-domain data\u201d in NLP is often over-simplistic and vague, as textual data varies in many nuanced linguistic aspects such as topic, style or level of formality. In addition, domain labels are many times unavailable, making it challenging to build domain-specific systems. We show that massive pre-trained language models implicitly learn sentence representations that cluster by domains without supervision \u2013 suggesting a simple data-driven definition of domains in textual data. We harness this property and propose domain data selection methods based on such models, which require only a small set of in-domain monolingual data. We evaluate our data selection methods for neural machine translation across five diverse domains, where they outperform an established approach as measured by both BLEU and precision and recall with respect to an oracle selection.", "keyphrases": ["cluster", "language model", "data selection method"]} +{"id": "seraji-etal-2012-basic", "title": "A Basic Language Resource Kit for Persian", "abstract": "Persian with its about 100,000,000 speakers in the world belongs to the group of languages with less developed linguistically annotated resources and tools. The few existing resources and tools are neither open source nor freely available. Thus, our goal is to develop open source resources such as corpora and treebanks, and tools for data-driven linguistic analysis of Persian. We do this by exploring the reusability of existing resources and adapting state-of-the-art methods for the linguistic annotation. We present fully functional tools for text normalization, sentence segmentation, tokenization, part-of-speech tagging, and parsing. As for resources, we describe the Uppsala PErsian Corpus (UPEC) which is a modified version of the Bijankhan corpus with additional sentence segmentation and consistent tokenization modified for more appropriate syntactic annotation. The corpus consists of 2,782,109 tokens and is annotated with parts of speech and morphological features. A treebank is derived from UPEC with an annotation scheme based on Stanford Typed Dependencies and is planned to consist of 10,000 sentences of which 215 have already been annotated. Keywords: BLARK for Persian, PoS tagged corpus, Persian treebank", "keyphrases": ["persian", "sentence segmentation", "tokenization"]} +{"id": "elazar-goldberg-2018-adversarial", "title": "Adversarial Removal of Demographic Attributes from Text Data", "abstract": "Recent advances in Representation Learning and Adversarial Training seem to succeed in removing unwanted features from the learned representation. We show that demographic information of authors is encoded in\u2014and can be recovered from\u2014the intermediate representations learned by text-based neural classifiers. The implication is that decisions of classifiers trained on textual data are not agnostic to\u2014and likely condition on\u2014demographic attributes. When attempting to remove such demographic information using adversarial training, we find that while the adversarial component achieves chance-level development-set accuracy during training, a post-hoc classifier, trained on the encoded sentences from the first part, still manages to reach substantially higher classification accuracies on the same data. This behavior is consistent across several tasks, demographic properties and datasets. We explore several techniques to improve the effectiveness of the adversarial component. Our main conclusion is a cautionary one: do not rely on the adversarial training to achieve invariant representation to sensitive features.", "keyphrases": ["attribute", "text data", "demographic information", "post-hoc classifier", "gender"]} +{"id": "kim-etal-2012-multilingual", "title": "Multilingual Named Entity Recognition using Parallel Data and Metadata from Wikipedia", "abstract": "In this paper we propose a method to automatically label multi-lingual data with named entity tags. We build on prior work utilizing Wikipedia metadata and show how to effectively combine the weak annotations stemming from Wikipedia metadata with information obtained through English-foreign language parallel Wikipedia sentences. The combination is achieved using a novel semi-CRF model for foreign sentence tagging in the context of a parallel English sentence. The model outperforms both standard annotation projection methods and methods based solely on Wikipedia metadata.", "keyphrases": ["wikipedia", "entity tag", "english-foreign language", "parallel corpora"]} +{"id": "jiang-bansal-2019-avoiding", "title": "Avoiding Reasoning Shortcuts: Adversarial Evaluation, Training, and Model Development for Multi-Hop QA", "abstract": "Multi-hop question answering requires a model to connect multiple pieces of evidence scattered in a long context to answer the question. In this paper, we show that in the multi-hop HotpotQA (Yang et al., 2018) dataset, the examples often contain reasoning shortcuts through which models can directly locate the answer by word-matching the question with a sentence in the context. We demonstrate this issue by constructing adversarial documents that create contradicting answers to the shortcut but do not affect the validity of the original answer. The performance of strong baseline models drops significantly on our adversarial test, indicating that they are indeed exploiting the shortcuts rather than performing multi-hop reasoning. After adversarial training, the baseline's performance improves but is still limited on the adversarial test. Hence, we use a control unit that dynamically attends to the question at different reasoning hops to guide the model's multi-hop reasoning. We show that our 2-hop model trained on the regular data is more robust to the adversaries than the baseline. After adversarial training, it not only achieves significant improvements over its counterpart trained on regular data, but also outperforms the adversarially-trained baseline significantly. Finally, we sanity-check that these improvements are not obtained by exploiting potential new shortcuts in the adversarial data, but indeed due to robust multi-hop reasoning skills of the models.", "keyphrases": ["reasoning shortcut", "adversarial evaluation", "multi-hop reasoning"]} +{"id": "elsner-2012-character", "title": "Character-based kernels for novelistic plot structure", "abstract": "Better representations of plot structure could greatly improve computational methods for summarizing and generating stories. Current representations lack abstraction, focusing too closely on events. We present a kernel for comparing novelistic plots at a higher level, in terms of the cast of characters they depict and the social relationships between them. Our kernel compares the characters of different novels to one another by measuring their frequency of occurrence over time and the descriptive and emotional language associated with them. Given a corpus of 19th-century novels as training data, our method can accurately distinguish held-out novels in their original form from artificially disordered or reversed surrogates, demonstrating its ability to robustly represent important aspects of plot structure.", "keyphrases": ["kernel", "plot structure", "rich representation", "literary text"]} +{"id": "ramage-etal-2009-labeled", "title": "Labeled LDA: A supervised topic model for credit attribution in multi-labeled corpora", "abstract": "A significant portion of the world's text is tagged by readers on social bookmarking websites. Credit attribution is an inherent problem in these corpora because most pages have multiple tags, but the tags do not always apply with equal specificity across the whole document. Solving the credit attribution problem requires associating each word in a document with the most appropriate tags and vice versa. This paper introduces Labeled LDA, a topic model that constrains Latent Dirichlet Allocation by defining a one-to-one correspondence between LDA's latent topics and user tags. This allows Labeled LDA to directly learn word-tag correspondences. We demonstrate Labeled LDA's improved expressiveness over traditional LDA with visualizations of a corpus of tagged web pages from del.icio.us. Labeled LDA outperforms SVMs by more than 3 to 1 when extracting tag-specific document snippets. As a multi-label text classifier, our model is competitive with a discriminative baseline on a variety of datasets.", "keyphrases": ["topic model", "credit attribution", "labeled lda"]} +{"id": "braud-denis-2015-comparing", "title": "Comparing Word Representations for Implicit Discourse Relation Classification", "abstract": "This paper presents a detailed comparative framework for assessing the usefulness of unsupervised word representations for identifying so-called implicit discourse relations. Specifically, we compare standard one-hot word pair representations against low-dimensional ones based on Brown clusters and word embeddings. We also consider various word vector combination schemes for deriving discourse segment representations from word vectors, and compare representations based either on all words or limited to head words. Our main finding is that denser representations systematically outperform sparser ones and give state-of-the-art performance or above without the need for additional hand-crafted features.", "keyphrases": ["discourse relation classification", "word embedding", "one-hot representation"]} +{"id": "ishiwatari-etal-2020-relation", "title": "Relation-aware Graph Attention Networks with Relational Position Encodings for Emotion Recognition in Conversations", "abstract": "Interest in emotion recognition in conversations (ERC) has been increasing in various fields, because it can be used to analyze user behaviors and detect fake news. Many recent ERC methods use graph-based neural networks to take the relationships between the utterances of the speakers into account. In particular, the state-of-the-art method considers self- and inter-speaker dependencies in conversations by using relational graph attention networks (RGAT). However, graph-based neural networks do not take sequential information into account. In this paper, we propose relational position encodings that provide RGAT with sequential information reflecting the relational graph structure. Accordingly, our RGAT model can capture both the speaker dependency and the sequential information. Experiments on four ERC datasets show that our model is beneficial to recognizing emotions expressed in conversations. In addition, our approach empirically outperforms the state-of-the-art on all of the benchmark datasets.", "keyphrases": ["graph attention network", "relational position encoding", "emotion recognition", "conversation"]} +{"id": "dai-etal-2021-apo", "title": "APo-VAE: Text Generation in Hyperbolic Space", "abstract": "Natural language often exhibits inherent hierarchical structure ingrained with complex syntax and semantics. However, most state-of-the-art deep generative models learn embeddings only in Euclidean vector space, without accounting for this structural property of language. In this paper, we investigate text generation in a hyperbolic latent space to learn continuous hierarchical representations. An Adversarial Poincare Variational Autoencoder (APo-VAE) is presented, where both the prior and variational posterior of latent variables are defined over a Poincare ball via wrapped normal distributions. By adopting the primal-dual formulation of Kullback-Leibler divergence, an adversarial learning procedure is introduced to empower robust model training. Extensive experiments in language modeling, unaligned style transfer, and dialog-response generation demonstrate the effectiveness of the proposed APo-VAE model over VAEs in Euclidean latent space, thanks to its superb capabilities in capturing latent language hierarchies in hyperbolic space.", "keyphrases": ["text generation", "hyperbolic space", "inherent hierarchical structure", "apo-vae"]} +{"id": "ide-etal-2010-manually", "title": "The Manually Annotated Sub-Corpus: A Community Resource for and by the People", "abstract": "The Manually Annotated Sub-Corpus (MASC) project provides data and annotations to serve as the base for a communitywide annotation effort of a subset of the American National Corpus. The MASC infrastructure enables the incorporation of contributed annotations into a single, usable format that can then be analyzed as it is or ported to any of a variety of other formats. MASC includes data from a much wider variety of genres than existing multiply-annotated corpora of English, and the project is committed to a fully open model of distribution, without restriction, for all data and annotations produced or contributed. As such, MASC is the first large-scale, open, community-based effort to create much needed language resources for NLP. This paper describes the MASC project, its corpus and annotations, and serves as a call for contributions of data and annotations from the language processing community.", "keyphrases": ["manually annotated sub-corpus", "masc", "language resource"]} +{"id": "sun-etal-2018-answer", "title": "Answer-focused and Position-aware Neural Question Generation", "abstract": "In this paper, we focus on the problem of question generation (QG). Recent neural network-based approaches employ the sequence-to-sequence model which takes an answer and its context as input and generates a relevant question as output. However, we observe two major issues with these approaches: (1) The generated interrogative words (or question words) do not match the answer type. (2) The model copies the context words that are far from and irrelevant to the answer, instead of the words that are close and relevant to the answer. To address these two issues, we propose an answer-focused and position-aware neural question generation model. (1) By answer-focused, we mean that we explicitly model question word generation by incorporating the answer embedding, which can help generate an interrogative word matching the answer type. (2) By position-aware, we mean that we model the relative distance between the context words and the answer. Hence the model can be aware of the position of the context words when copying them to generate a question. We conduct extensive experiments to examine the effectiveness of our model. The experimental results show that our model significantly improves the baseline and outperforms the state-of-the-art system.", "keyphrases": ["question generation", "answer location information", "input text"]} +{"id": "yang-mitchell-2016-joint", "title": "Joint Extraction of Events and Entities within a Document Context", "abstract": "Events and entities are closely related; entities are often actors or participants in events and events without entities are uncommon. The interpretation of events and entities is highly contextually dependent. Existing work in information extraction typically models events separately from entities, and performs inference at the sentence level, ignoring the rest of the document. In this paper, we propose a novel approach that models the dependencies among variables of events, entities, and their relations, and performs joint inference of these variables across a document. The goal is to enable access to document-level contextual information and facilitate context-aware predictions. We demonstrate that our approach substantially outperforms the state-of-the-art methods for event extraction as well as a strong baseline for entity extraction.", "keyphrases": ["document context", "event extraction", "joint extraction", "well-defined feature", "entity mention"]} +{"id": "sultan-etal-2014-dls", "title": "DLS@CU: Sentence Similarity from Word Alignment", "abstract": "We present an algorithm for computing the semantic similarity between two sentences. It adopts the hypothesis that semantic similarity is a monotonically increasing function of the degree to which (1) the two sentences contain similar semantic units, and (2) such units occur in similar semantic contexts. With a simplistic operationalization of the notion of semantic units with individual words, we experimentally show that this hypothesis can lead to state-of-the-art results for sentencelevel semantic similarity. At the SemEval 2014 STS task (task 10), our system demonstrated the best performance (measured by correlation with human annotations) among 38 system runs.", "keyphrases": ["word alignment", "semeval", "sts"]} +{"id": "bisk-etal-2020-experience", "title": "Experience Grounds Language", "abstract": "Language understanding research is held back by a failure to relate language to the physical world it describes and to the social interactions it facilitates. Despite the incredible effectiveness of language processing models to tackle tasks after being trained on text alone, successful linguistic communication relies on a shared experience of the world. It is this shared experience that makes utterances meaningful. Natural language processing is a diverse field, and progress throughout its development has come from new representational theories, modeling techniques, data collection paradigms, and tasks. We posit that the present success of representation learning approaches trained on large, text-only corpora requires the parallel tradition of research on the broader physical and social context of language to address the deeper questions of communication.", "keyphrases": ["grounding", "successful linguistic communication", "experience", "language model", "limitation"]} +{"id": "sangati-etal-2009-generative", "title": "A generative re-ranking model for dependency parsing", "abstract": "We propose a framework for dependency parsing based on a combination of discriminative and generative models. We use a discriminative model to obtain a k-best list of candidate parses, and subsequently rerank those candidates using a generative model. We show how this approach allows us to evaluate a variety of generative models, without needing different parser implementations. Moreover, we present empirical results that show a small improvement over state-of-the-art dependency parsing of English sentences.", "keyphrases": ["dependency parsing", "generative model", "list"]} +{"id": "flor-2012-four", "title": "Four types of context for automatic spelling correction", "abstract": "This paper presents an investigation on using four types of contextual information for improving the accuracy of automatic correction of single-token non-word misspellings. The task is framed as contextually-informed re-ranking of correction candidates. Immediate local context is captured by word n-grams statistics from a Web-scale language model. The second approach measures how well a candidate correction fits in the semantic fabric of the local lexical neighborhood, using a very large Distributional Semantic Model. In the third approach, recognizing a misspelling as an instance of a recurring word can be useful for reranking. The fourth approach looks at context beyond the text itself. If the approximate topic can be known in advance, spelling correction can be biased towards the topic. Effectiveness of proposed methods is demonstrated with an annotated corpus of 3,000 student essays from international high-stakes English language assessments. The paper also describes an implemented system that achieves high accuracy on this task. RESUME. Cet article presente une enquete sur l\u2019utilisation de quatre types d\u2019informations contextuelles pour ameliorer la precision de la correction automatique de fautes d\u2019orthographe de mots seuls. La t\u00e2che est presentee comme un reclassement contextuellement informe. Le contexte local immediat, capture par statistique de mot n-grammes est modelise a partir d\u2019un modele de langage a l\u2019echelle du Web. La deuxieme methode consiste a mesurer a quel point une correction s\u2019inscrit dans le tissu semantique local, en utilisant un tres grand modele semantique distributionnel. La troisieme approche reconnaissant une faute d\u2019orthographe comme une instance d\u2019un mot recurrent peut etre utile pour le reclassement. La quatrieme approche s\u2019attache au contexte au-dela du texte lui-meme. Si le sujet approximatif peut etre connu a l\u2019avance, la correction orthographique peut etre biaisee par rapport au sujet. L\u2019efficacite des methodes proposees est demontree avec un corpus annote de 3 000 travaux d\u2019etudiants des evaluations internationales de langue anglaise. Le document decrit egalement un systeme mis en place qui permet d\u2019obtenir une grande precision sur cette t\u00e2che.", "keyphrases": ["spelling correction", "contextual information", "student essay", "ranking candidate correction", "edit distance"]} +{"id": "chen-dolan-2011-collecting", "title": "Collecting Highly Parallel Data for Paraphrase Evaluation", "abstract": "A lack of standard datasets and evaluation metrics has prevented the field of paraphrasing from making the kind of rapid progress enjoyed by the machine translation community over the last 15 years. We address both problems by presenting a novel data collection framework that produces highly parallel text data relatively inexpensively and on a large scale. The highly parallel nature of this data allows us to use simple n-gram comparisons to measure both the semantic adequacy and lexical dissimilarity of paraphrase candidates. In addition to being simple and efficient to compute, experiments show that these metrics correlate highly with human judgments.", "keyphrases": ["paraphrase", "n-gram", "human judgment", "crowdsourcing"]} +{"id": "van-durme-lall-2011-efficient", "title": "Efficient Online Locality Sensitive Hashing via Reservoir Counting", "abstract": "We describe a novel mechanism called Reservoir Counting for application in online Locality Sensitive Hashing. This technique allows for significant savings in the streaming setting, allowing for maintaining a larger number of signatures, or an increased level of approximation accuracy at a similar memory footprint.", "keyphrases": ["locality sensitive hashing", "reservoir counting", "streaming setting"]} +{"id": "correia-etal-2019-adaptively", "title": "Adaptively Sparse Transformers", "abstract": "Attention mechanisms have become ubiquitous in NLP. Recent architectures, notably the Transformer, learn powerful context-aware word representations through layered, multi-headed attention. The multiple heads learn diverse types of word relationships. However, with standard softmax attention, all attention heads are dense, assigning a non-zero weight to all context words. In this work, we introduce the adaptively sparse Transformer, wherein attention heads have flexible, context-dependent sparsity patterns. This sparsity is accomplished by replacing softmax with alpha-entmax: a differentiable generalization of softmax that allows low-scoring words to receive precisely zero weight. Moreover, we derive a method to automatically learn the alpha parameter \u2013 which controls the shape and sparsity of alpha-entmax \u2013 allowing attention heads to choose between focused or spread-out behavior. Our adaptively sparse Transformer improves interpretability and head diversity when compared to softmax Transformers on machine translation datasets. Findings of the quantitative and qualitative analysis of our approach include that heads in different layers learn different sparsity preferences and tend to be more diverse in their attention distributions than softmax Transformers. Furthermore, at no cost in accuracy, sparsity in attention heads helps to uncover different head specializations.", "keyphrases": ["sparse transformer", "head", "alternative"]} +{"id": "mori-etal-2014-flow", "title": "Flow Graph Corpus from Recipe Texts", "abstract": "In this paper, we present our attempt at annotating procedural texts with a flow graph as a representation of understanding. The domain we focus on is cooking recipe. The flow graphs are directed acyclic graphs with a special root node corresponding to the final dish. The vertex labels are recipe named entities, such as foods, tools, cooking actions, etc. The arc labels denote relationships among them. We converted 266 Japanese recipe texts into flow graphs manually. 200 recipes are randomly selected from a web site and 66 are of the same dish. We detail the annotation framework and report some statistics on our corpus. The most typical usage of our corpus may be automatic conversion from texts to flow graphs which can be seen as an entire understanding of procedural texts. With our corpus, one can also try word segmentation, named entity recognition, predicate-argument structure analysis, and coreference resolution.", "keyphrases": ["recipe", "procedural text", "action", "flow graph", "workflow"]} +{"id": "chen-etal-2007-multi", "title": "Multi-Engine Machine Translation with an Open-Source SMT Decoder", "abstract": "We describe an architecture that allows to combine statistical machine translation (SMT) with rule-based machine translation (RBMT) in a multi-engine setup. We use a variant of standard SMT technology to align translations from one or more RBMT systems with the source text. We incorporate phrases extracted from these alignments into the phrase table of the SMT system and use the open-source decoder Moses to find good combinations of phrases from SMT training data with the phrases derived from RBMT. First experiments based on this hybrid architecture achieve promising results.", "keyphrases": ["machine translation", "rbmt", "multi-engine setup", "smt system", "good combination"]} +{"id": "poliak-etal-2018-collecting", "title": "Collecting Diverse Natural Language Inference Problems for Sentence Representation Evaluation", "abstract": "We present a large-scale collection of diverse natural language inference (NLI) datasets that help provide insight into how well a sentence representation captures distinct types of reasoning. The collection results from recasting 13 existing datasets from 7 semantic phenomena into a common NLI structure, resulting in over half a million labeled context-hypothesis pairs in total. We refer to our collection as the DNC: Diverse Natural Language Inference Collection. The DNC is available online at , and will grow over time as additional resources are recast and added from novel sources.", "keyphrases": ["natural language inference", "nli", "reasoning", "test set", "semantic phenomenon"]} +{"id": "pradhan-etal-2004-shallow", "title": "Shallow Semantic Parsing using Support Vector Machines", "abstract": "In this paper, we propose a machine learning algorithm for shallow semantic parsing, extending the work of Gildea and Jurafsky (2002), Surdeanu et al. (2003) and others. Our algorithm is based on Support Vector Machines which we show give an improvement in performance over earlier classifiers. We show performance improvements through a number of new features and measure their ability to generalize to a new test set drawn from the AQUAINT corpus.", "keyphrases": ["support vector machines", "gildea", "shallow semantic parsing", "central meaning", "predicate"]} +{"id": "alinejad-etal-2018-prediction", "title": "Prediction Improves Simultaneous Neural Machine Translation", "abstract": "Simultaneous speech translation aims to maintain translation quality while minimizing the delay between reading input and incrementally producing the output. We propose a new general-purpose prediction action which predicts future words in the input to improve quality and minimize delay in simultaneous translation. We train this agent using reinforcement learning with a novel reward function. Our agent with prediction has better translation quality and less delay compared to an agent-based simultaneous translation system without prediction.", "keyphrases": ["agent", "reinforcement learning", "predict operation", "next source word"]} +{"id": "pustejovsky-yocum-2013-capturing", "title": "Capturing Motion in ISO-SpaceBank", "abstract": "This paper presents the first description of the motion subcorpus of ISO-SpaceBank (MotionBank) and discusses how motion-events are represented in ISO-Space 1.5, a specification language for the representation of spatial information in language. We present data from this subcorpus with examples from the pilot annotation, focusing specifically on the annotation of motion-events and their various participants. These data inform further discussion of outstanding issues concerning semantic annotation, such as quantification and measurement. We address these questions briefly as they impact the design of ISO-Space.", "keyphrases": ["motion", "iso-spacebank", "spatial information", "pilot annotation"]} +{"id": "miyao-tsujii-2005-probabilistic", "title": "Probabilistic Disambiguation Models for Wide-Coverage HPSG Parsing", "abstract": "This paper reports the development of log-linear models for the disambiguation in wide-coverage HPSG parsing. The estimation of log-linear models requires high computational cost, especially with wide-coverage grammars. Using techniques to reduce the estimation cost, we trained the models using 20 sections of Penn Tree-bank. A series of experiments empirically evaluated the estimation techniques, and also examined the performance of the disambiguation models on the parsing of real-world sentences.", "keyphrases": ["hpsg", "real-world sentence", "probabilistic model"]} +{"id": "chen-etal-2020-mpdd", "title": "MPDD: A Multi-Party Dialogue Dataset for Analysis of Emotions and Interpersonal Relationships", "abstract": "A dialogue dataset is an indispensable resource for building a dialogue system. Additional information like emotions and interpersonal relationships labeled on conversations enables the system to capture the emotion flow of the participants in the dialogue. However, there is no publicly available Chinese dialogue dataset with emotion and relation labels. In this paper, we collect the conversions from TV series scripts, and annotate emotion and interpersonal relationship labels on each utterance. This dataset contains 25,548 utterances from 4,142 dialogues. We also set up some experiments to observe the effects of the responded utterance on the current utterance, and the correlation between emotion and relation types in emotion and relation classification tasks.", "keyphrases": ["dialogue dataset", "emotion", "interpersonal relationship"]} +{"id": "wu-2005-recognizing", "title": "Recognizing Paraphrases and Textual Entailment Using Inversion Transduction Grammars", "abstract": "We present first results using paraphrase as well as textual entailment data to test the language universal constraint posited by Wu's (1995, 1997) Inversion Transduction Grammar (ITG) hypothesis. In machine translation and alignment, the ITG Hypothesis provides a strong inductive bias, and has been shown empirically across numerous language pairs and corpora to yield both efficiency and accuracy gains for various language acquisition tasks. Monolingual paraphrase and textual entailment recognition datasets, however, potentially facilitate closer tests of certain aspects of the hypothesis than bilingual parallel corpora, which simultaneously exhibit many irrelevant dimensions of cross-lingual variation. We investigate this using simple generic Bracketing ITGs containing no language-specific linguistic knowledge. Experimental results on the MSR Paraphrase Corpus show that, even in the absence of any thesaurus to accommodate lexical variation between the paraphrases, an uninterpolated average precision of at least 76% is obtainable from the Bracketing ITG's structure matching bias alone. This is consistent with experimental results on the Pascal Recognising Textual Entailment Challenge Corpus, which show surpisingly strong results for a number of the task subsets.", "keyphrases": ["paraphrase", "inversion transduction grammar", "machine translation"]} +{"id": "jiampojamarn-etal-2007-applying", "title": "Applying Many-to-Many Alignments and Hidden Markov Models to Letter-to-Phoneme Conversion", "abstract": "Letter-to-phoneme conversion generally requires aligned training data of letters and phonemes. Typically, the alignments are limited to one-to-one alignments. We present a novel technique of training with many-to-many alignments. A letter chunking bigram prediction manages double letters and double phonemes automatically as opposed to preprocessing with fixed lists. We also apply an HMM method in conjunction with a local classification model to predict a global phoneme sequence given a word. The many-to-many alignments result in significant improvements over the traditional one-to-one approach. Our system achieves state-of-the-art performance on several languages and data sets.", "keyphrases": ["many-to-many alignment", "letter", "phoneme", "transliteration unit"]} +{"id": "peng-etal-2019-transfer", "title": "Transfer Learning in Biomedical Natural Language Processing: An Evaluation of BERT and ELMo on Ten Benchmarking Datasets", "abstract": "Inspired by the success of the General Language Understanding Evaluation benchmark, we introduce the Biomedical Language Understanding Evaluation (BLUE) benchmark to facilitate research in the development of pre-training language representations in the biomedicine domain. The benchmark consists of five tasks with ten datasets that cover both biomedical and clinical texts with different dataset sizes and difficulties. We also evaluate several baselines based on BERT and ELMo and find that the BERT model pre-trained on PubMed abstracts and MIMIC-III clinical notes achieves the best results. We make the datasets, pre-trained models, and codes publicly available at .", "keyphrases": ["biomedical domain", "language model", "entity recognition"]} +{"id": "sun-etal-2012-fast", "title": "Fast Online Training with Frequency-Adaptive Learning Rates for Chinese Word Segmentation and New Word Detection", "abstract": "We present a joint model for Chinese word segmentation and new word detection. We present high dimensional new features, including word-based features and enriched edge (label-transition) features, for the joint modeling. As we know, training a word segmentation system on large-scale datasets is already costly. In our case, adding high dimensional new features will further slow down the training speed. To solve this problem, we propose a new training method, adaptive online gradient descent based on feature frequency information, for very fast online training of the parameters, even given large-scale datasets with high dimensional features. Compared with existing training methods, our training method is an order magnitude faster in terms of training time, and can achieve equal or even higher accuracies. The proposed fast training method is a general purpose optimization method, and it is not limited in the specific task discussed in this paper.", "keyphrases": ["chinese word segmentation", "joint model", "specific task", "fast online training"]} +{"id": "saluja-etal-2014-graph", "title": "Graph-based Semi-Supervised Learning of Translation Models from Monolingual Data", "abstract": "Statistical phrase-based translation learns translation rules from bilingual corpora, and has traditionally only used monolingual evidence to construct features that rescore existing translation candidates. In this work, we present a semi-supervised graph-based approach for generating new translation rules that leverages bilingual and monolingual data. The proposed technique first constructs phrase graphs using both source and target language monolingual corpora. Next, graph propagation identifies translations of phrases that were not observed in the bilingual corpus, assuming that similar phrases have similar translations. We report results on a large Arabic-English system and a medium-sized Urdu-English system. Our proposed approach significantly improves the performance of competitive phrasebased systems, leading to consistent improvements between 1 and 4 BLEU points on standard evaluation sets.", "keyphrases": ["monolingual data", "new translation rule", "test data"]} +{"id": "chan-etal-2012-community", "title": "Community Answer Summarization for Multi-Sentence Question with Group L1 Regularization", "abstract": "We present a novel answer summarization method for community Question Answering services (cQAs) to address the problem of \"incomplete answer\", i.e., the \"best answer\" of a complex multi-sentence question misses valuable information that is contained in other answers. In order to automatically generate a novel and non-redundant community answer summary, we segment the complex original multi-sentence question into several sub questions and then propose a general Conditional Random Field (CRF) based answer summary method with group L1 regularization. Various textual and non-textual QA features are explored. Specifically, we explore four different types of contextual factors, namely, the information novelty and non-redundancy modeling for local and non-local sentence interactions under question segmentation. To further unleash the potential of the abundant cQA features, we introduce the group L1 regularization for feature learning. Experimental results on a Yahoo! Answers dataset show that our proposed method significantly outperforms state-of-the-art methods on cQA summarization task.", "keyphrases": ["multi-sentence question", "answer summarization method", "good answer"]} +{"id": "shi-etal-2006-dom", "title": "A DOM Tree Alignment Model for Mining Parallel Data from the Web", "abstract": "This paper presents a new web mining scheme for parallel data acquisition. Based on the Document Object Model (DOM), a web page is represented as a DOM tree. Then a DOM tree alignment model is proposed to identify the translationally equivalent texts and hyperlinks between two parallel DOM trees. By tracing the identified parallel hyperlinks, parallel web documents are recursively mined. Compared with previous mining schemes, the benchmarks show that this new mining scheme improves the mining coverage, reduces mining bandwidth, and enhances the quality of mined parallel sentences.", "keyphrases": ["web", "document object model", "parallel document", "metadata information"]} +{"id": "gorinski-lapata-2015-movie", "title": "Movie Script Summarization as Graph-based Scene Extraction", "abstract": "In this paper we study the task of movie script summarization, which we argue could enhance script browsing, give readers a rough idea of the script\u2019s plotline, and speed up reading time. We formalize the process of generating a shorter version of a screenplay as the task of finding an optimal chain of scenes. We develop a graph-based model that selects a chain by jointly optimizing its logical progression, diversity, and importance. Human evaluation based on a question-answering task shows that our model produces summaries which are more informative compared to competitive baselines.", "keyphrases": ["scene", "script browsing", "movie script summarization", "screenplay summarization", "graph-based approach"]} +{"id": "rehbein-van-genabith-2007-treebank", "title": "Treebank Annotation Schemes and Parser Evaluation for German", "abstract": "Recent studies focussed on the question whether less-conguration al languages like German are harder to parse than English, or whether the lower parsing scores are an artefact of treebank encoding schemes and data structures, as claimed by K\u00a4 ubler et al. (2006). This claim is based on the assumption that PARSEVAL metrics fully reect parse quality across treebank encoding schemes. In this paper we present new experiments to test this claim. We use the PARSEVAL metric, the Leaf-Ancestor metric as well as a dependency-based evaluation, and present novel approaches measuring the effect of controlled error insertion on treebank trees and parser output. We also provide extensive past-parsing crosstreebank conversion. The results of the experiments show that, contrary to K\u00a4 ubler et al. (2006), the question whether or not German is harder to parse than English remains undecided.", "keyphrases": ["german", "treebank annotation scheme", "parser error"]} +{"id": "hahn-keller-2016-modeling", "title": "Modeling Human Reading with Neural Attention", "abstract": "When humans read text, they fixate some words and skip others. However, there have been few attempts to explain skipping behavior with computational models, as most existing work has focused on predicting reading times (e.g.,~using surprisal). In this paper, we propose a novel approach that models both skipping and reading, using an unsupervised architecture that combines a neural attention with autoencoding, trained on raw text using reinforcement learning. Our model explains human reading behavior as a tradeoff between precision of language understanding (encoding the input accurately) and economy of attention (fixating as few words as possible). We evaluate the model on the Dundee eye-tracking corpus, showing that it accurately predicts skipping behavior and reading times, is competitive with surprisal, and captures known qualitative features of human reading.", "keyphrases": ["human reading", "neural attention", "novel approach", "reinforcement learning"]} +{"id": "fazly-stevenson-2006-automatically", "title": "Automatically Constructing a Lexicon of Verb Phrase Idiomatic Combinations", "abstract": "We investigate the lexical and syntactic flexibility of a class of idiomatic expressions. We develop measures that draw on such linguistic properties, and demonstrate that these statistical, corpus-based measures can be successfully used for distinguishing idiomatic combinations from non-idiomatic ones. We also propose a means for automatically determining which syntactic forms a particular idiom can appear in, and hence should be included in its lexical representation.", "keyphrases": ["idiomatic expression", "lexical fixedness", "statistical measure"]} +{"id": "mir-etal-2019-evaluating", "title": "Evaluating Style Transfer for Text", "abstract": "Research in the area of style transfer for text is currently bottlenecked by a lack of standard evaluation practices. This paper aims to alleviate this issue by experimentally identifying best practices with a Yelp sentiment dataset. We specify three aspects of interest (style transfer intensity, content preservation, and naturalness) and show how to obtain more reliable measures of them from human evaluation than in previous work. We propose a set of metrics for automated evaluation and demonstrate that they are more strongly correlated and in agreement with human judgment: direction-corrected Earth Mover's Distance, Word Mover's Distance on style-masked texts, and adversarial classification for the respective aspects. We also show that the three examined models exhibit tradeoffs between aspects of interest, demonstrating the importance of evaluating style transfer models at specific points of their tradeoff plots. We release software with our evaluation metrics to facilitate research.", "keyphrases": ["style transfer", "standard evaluation practice", "content preservation", "distance"]} +{"id": "biadsy-etal-2009-spoken", "title": "Spoken Arabic Dialect Identification Using Phonotactic Modeling", "abstract": "The Arabic language is a collection of multiple variants, among which Modern Standard Arabic (MSA) has a special status as the formal written standard language of the media, culture and education across the Arab world. The other variants are informal spoken dialects that are the media of communication for daily life. Arabic dialects differ substantially from MSA and each other in terms of phonology, morphology, lexical choice and syntax. In this paper, we describe a system that automatically identifies the Arabic dialect (Gulf, Iraqi, Levantine, Egyptian and MSA) of a speaker given a sample of his/her speech. The phonotactic approach we use proves to be effective in identifying these dialects with considerable overall accuracy --- 81.60% using 30s test utterances.", "keyphrases": ["dialect", "modern standard arabic", "phonotactic approach", "speech data"]} +{"id": "kahn-etal-2005-effective", "title": "Effective Use of Prosody in Parsing Conversational Speech", "abstract": "We identify a set of prosodic cues for parsing conversational speech and show how such features can be effectively incorporated into a statistical parsing model. On the Switchboard corpus of conversational speech, the system achieves improved parse accuracy over a state-of-the-art system which uses only lexical and syntactic features. Since removal of edit regions is known to improve downstream parse accuracy, we explore alternatives for edit detection and show that PCFGs are not competitive with more specialized techniques.", "keyphrases": ["conversational speech", "textual information", "posterior"]} +{"id": "vilar-etal-2007-human", "title": "Human Evaluation of Machine Translation Through Binary System Comparisons", "abstract": "We introduce a novel evaluation scheme for the human evaluation of different machine translation systems. Our method is based on direct comparison of two sentences at a time by human judges. These binary judgments are then used to decide between all possible rankings of the systems. The advantages of this new method are the lower dependency on extensive evaluation guidelines, and a tighter focus on a typical evaluation task, namely the ranking of systems. Furthermore we argue that machine translation evaluations should be regarded as statistical processes, both for human and automatic evaluation. We show how confidence ranges for state-of-the-art evaluation measures such as WER and TER can be computed accurately and efficiently without having to resort to Monte Carlo estimates. We give an example of our new evaluation scheme, as well as a comparison with classical automatic and human evaluation on data from a recent international evaluation campaign.", "keyphrases": ["human evaluation", "adequacy", "fluency"]} +{"id": "sun-etal-2020-knowledge", "title": "Knowledge Association with Hyperbolic Knowledge Graph Embeddings", "abstract": "Capturing associations for knowledge graphs (KGs) through entity alignment, entity type inference and other related tasks benefits NLP applications with comprehensive knowledge representations. Recent related methods built on Euclidean embeddings are challenged by the hierarchical structures and different scales of KGs. They also depend on high embedding dimensions to realize enough expressiveness. Differently, we explore with low-dimensional hyperbolic embeddings for knowledge association. We propose a hyperbolic relational graph neural network for KG embedding and capture knowledge associations with a hyperbolic transformation. Extensive experiments on entity alignment and type inference demonstrate the effectiveness and efficiency of our method.", "keyphrases": ["entity alignment", "hyperbolic relational graph", "knowledge association"]} +{"id": "lala-specia-2018-multimodal", "title": "Multimodal Lexical Translation", "abstract": "Inspired by the tasks of Multimodal Machine Translation and Visual Sense Disambiguation we introduce a task called Multimodal Lexical Translation (MLT). The aim of this new task is to correctly translate an ambiguous word given its context - an image and a sentence in the source language. To facilitate the task, we introduce the MLT dataset, where each data point is a 4 -tuple consisting of an ambiguous source word, its visual context (an image), its textual context (a source sentence), and its translation that conforms with the visual and textual contexts. The dataset has been created from the Multi30K corpus using word-alignment followed by human inspection for translations from English to German and English to French. We also introduce a simple heuristic to quantify the extent of the ambiguity of a word from the distribution of its translations and use it to select subsets of the MLT Dataset which are dif\ufb01cult to translate. These form a valuable multimodal and multilingual language resource with several potential uses including evaluation of lexical disambiguation within (Multimodal) Machine Translation systems.", "keyphrases": ["mlt", "ambiguity", "visual context", "multimodal lexical translation"]} +{"id": "nguyen-etal-2017-word", "title": "From Word Segmentation to POS Tagging for Vietnamese", "abstract": "This paper presents an empirical comparison of two strategies for Vietnamese Part-of-Speech (POS) tagging from unsegmented text: (i) a pipeline strategy where we consider the output of a word segmenter as the input of a POS tagger, and (ii) a joint strategy where we predict a combined segmentation and POS tag for each syllable. We also make a comparison between state-of-the-art (SOTA) feature-based and neural network-based models. On the benchmark Vietnamese treebank (Nguyen et al., 2009), experimental results show that the pipeline strategy produces better scores of POS tagging from unsegmented text than the joint strategy, and the highest accuracy is obtained by using a feature-based model.", "keyphrases": ["word segmentation", "pos tagging", "vietnamese"]} +{"id": "zhu-etal-2018-msmo", "title": "MSMO: Multimodal Summarization with Multimodal Output", "abstract": "Multimodal summarization has drawn much attention due to the rapid growth of multimedia data. The output of the current multimodal summarization systems is usually represented in texts. However, we have found through experiments that multimodal output can significantly improve user satisfaction for informativeness of summaries. In this paper, we propose a novel task, multimodal summarization with multimodal output (MSMO). To handle this task, we first collect a large-scale dataset for MSMO research. We then propose a multimodal attention model to jointly generate text and select the most relevant image from the multimodal input. Finally, to evaluate multimodal outputs, we construct a novel multimodal automatic evaluation (MMAE) method which considers both intra-modality salience and inter-modality relevance. The experimental results show the effectiveness of MMAE.", "keyphrases": ["summarization", "multimodal output", "msmo", "video"]} +{"id": "gupta-etal-2017-entity", "title": "Entity Linking via Joint Encoding of Types, Descriptions, and Context", "abstract": "For accurate entity linking, we need to capture various information aspects of an entity, such as its description in a KB, contexts in which it is mentioned, and structured knowledge. Additionally, a linking system should work on texts from different domains without requiring domain-specific training data or hand-engineered features. In this work we present a neural, modular entity linking system that learns a unified dense representation for each entity using multiple sources of information, such as its description, contexts around its mentions, and its fine-grained types. We show that the resulting entity linking system is effective at combining these sources, and performs competitively, sometimes out-performing current state-of-the-art systems across datasets, without requiring any domain-specific training data or hand-engineered features. We also show that our model can effectively \u201cembed\u201d entities that are new to the KB, and is able to link its mentions accurately.", "keyphrases": ["joint encoding", "domain-specific training data", "hand-engineered feature", "entity linking"]} +{"id": "bramsen-etal-2006-inducing", "title": "Inducing Temporal Graphs", "abstract": "We consider the problem of constructing a directed acyclic graph that encodes temporal relations found in a text. The unit of our analysis is a temporal segment, a fragment of text that maintains temporal coherence. The strength of our approach lies in its ability to simultaneously optimize pairwise ordering preferences and global constraints on the graph topology. Our learning method achieves 83% F-measure in temporal segmentation and 84% accuracy in inferring temporal relations between two segments.", "keyphrases": ["acyclic graph", "segment", "integer linear programming", "global information", "temporal structure"]} +{"id": "zettlemoyer-collins-2007-online", "title": "Online Learning of Relaxed CCG Grammars for Parsing to Logical Form", "abstract": "We consider the problem of learning to parse sentences to lambda-calculus representations of their underlying semantics and present an algorithm that learns a weighted combinatory categorial grammar (CCG). A key idea is to introduce non-standard CCG combinators that relax certain parts of the grammar\u2014for example allowing flexible word order, or insertion of lexical items\u2014 with learned costs. We also present a new, online algorithm for inducing a weighted CCG. Results for the approach on ATIS data show 86% F-measure in recovering fully correct semantic analyses and 95.9% F-measure by a partial-match criterion, a more than 5% improvement over the 90.3% partial-match figure reported by He and Young (2006).", "keyphrases": ["ccg", "semantic parser", "learning algorithm", "much work", "query"]} +{"id": "wang-cho-2016-larger", "title": "Larger-Context Language Modelling with Recurrent Neural Network", "abstract": "In this work, we propose a novel method to incorporate corpus-level discourse information into language modelling. We call this larger-context language model. We introduce a late fusion approach to a recurrent language model based on long short-term memory units (LSTM), which helps the LSTM unit keep intra-sentence dependencies and inter-sentence dependencies separate from each other. Through the evaluation on three corpora (IMDB, BBC, and PennTree Bank), we demon- strate that the proposed model improves perplexity significantly. In the experi- ments, we evaluate the proposed approach while varying the number of context sentences and observe that the proposed late fusion is superior to the usual way of incorporating additional inputs to the LSTM. By analyzing the trained larger- context language model, we discover that content words, including nouns, adjec- tives and verbs, benefit most from an increasing number of context sentences. This analysis suggests that larger-context language model improves the unconditional language model by capturing the theme of a document better and more easily.", "keyphrases": ["language modelling", "corpus-level discourse information", "context information", "thread"]} +{"id": "jehl-etal-2014-source", "title": "Source-side Preordering for Translation using Logistic Regression and Depth-first Branch-and-Bound Search", "abstract": "We present a simple preordering approach for machine translation based on a featurerich logistic regression model to predict whether two children of the same node in the source-side parse tree should be swapped or not. Given the pair-wise children regression scores we conduct an efficient depth-first branch-and-bound search through the space of possible children permutations, avoiding using a cascade of classifiers or limiting the list of possible ordering outcomes. We report experiments in translating English to Japanese and Korean, demonstrating superior performance as (a) the number of crossing links drops by more than 10% absolute with respect to other state-of-the-art preordering approaches, (b) BLEU scores improve on 2.2 points over the baseline with lexicalised reordering model, and (c) decoding can be carried out 80 times faster.", "keyphrases": ["depth-first branch-and-bound search", "logistic regression model", "parse tree"]} +{"id": "shwartz-dagan-2016-path", "title": "Path-based vs. Distributional Information in Recognizing Lexical Semantic Relations", "abstract": "Recognizing various semantic relations between terms is beneficial for many NLP tasks. While path-based and distributional information sources are considered complementary for this task, the superior results the latter showed recently suggested that the former's contribution might have become obsolete. We follow the recent success of an integrated neural method for hypernymy detection (Shwartz et al., 2016) and extend it to recognize multiple relations. The empirical results show that this method is effective in the multiclass setting as well. We further show that the path-based information source always contributes to the classification, and analyze the cases in which it mostly complements the distributional information.", "keyphrases": ["distributional information", "multiple relation", "path-based approach"]} +{"id": "liu-etal-2013-modeling", "title": "Modeling Collaborative Referring for Situated Referential Grounding", "abstract": "In situated dialogue, because humans and agents have mismatched capabilities of perceiving the shared physical world, referential grounding becomes difficult. Humans and agents will need to make extra efforts by collaborating with each other to mediate a shared perceptual basis and to come to a mutual understanding of intended referents in the environment. In this paper, we have extended our previous graph-matching based approach to explicitly incorporate collaborative referring behaviors into the referential grounding algorithm. In addition, hypergraph-based representations have been used to account for group descriptions that are likely to occur in spatial communications. Our empirical results have shown that incorporating the most prevalent pattern of collaboration with our hypergraph-based approach significantly improves reference resolution in situated dialogue by an absolute gain of over 18%.", "keyphrases": ["referential grounding", "grounding algorithm", "communication", "reference resolution"]} +{"id": "fevry-phang-2018-unsupervised", "title": "Unsupervised Sentence Compression using Denoising Auto-Encoders", "abstract": "In sentence compression, the task of shortening sentences while retaining the original meaning, models tend to be trained on large corpora containing pairs of verbose and compressed sentences. To remove the need for paired corpora, we emulate a summarization task and add noise to extend sentences and train a denoising auto-encoder to recover the original, constructing an end-to-end training regime without the need for any examples of compressed sentences. We conduct a human evaluation of our model on a standard text summarization dataset and show that it performs comparably to a supervised baseline based on grammatical correctness and retention of meaning. Despite being exposed to no target data, our unsupervised models learn to generate imperfect but reasonably readable sentence summaries. Although we underperform supervised models based on ROUGE scores, our models are competitive with a supervised baseline based on human evaluation for grammatical correctness and retention of meaning.", "keyphrases": ["sentence compression", "noise", "autoencoder"]} +{"id": "habash-etal-2005-morphological", "title": "Morphological Analysis and Generation for Arabic Dialects", "abstract": "We present Magead, a morphological analyzer and generator for the Arabic language family. Our work is novel in that it explicitly addresses the need for processing the morphology of the dialects. Magead provides an analysis to a root+pattern representation, it has separate phonological and orthographic representations, and it allows for combining morphemes from different dialects.", "keyphrases": ["arabic dialect", "language family", "levantine arabic"]} +{"id": "moore-2004-improving", "title": "Improving IBM Word Alignment Model 1", "abstract": "We investigate a number of simple methods for improving the word-alignment accuracy of IBM Model 1. We demonstrate reduction in alignment error rate of approximately 30% resulting from (1) giving extra weight to the probability of alignment to the null word, (2) smoothing probability estimates for rare words, and (3) using a simple heuristic estimation method to initialize, or replace, EM training of model parameters.", "keyphrases": ["ibm model", "alignment error rate", "null word"]} +{"id": "decadt-etal-2004-gambl", "title": "GAMBL, genetic algorithm optimization of memory-based WSD", "abstract": "GAMBL is a word expert approach to WSD in which each word expert is trained using memory based learning. Joint feature selection and algorithm parameter optimization are achieved with a genetic algorithm (GA). We use a cascaded classifier approach in which the GA optimizes local context features and the output of a separate keyword classifier (rather than also optimizing the keyword features together with the local context features). A further innovation on earlier versions of memory based WSD is the use of grammatical relation and chunk features. This paper presents the architecture of the system briefly, and discusses its performance on the English lexical sample and all words tasks in SENSEVAL-3.", "keyphrases": ["genetic algorithm", "wsd", "gambl", "memory-based classifier"]} +{"id": "hu-etal-2019-improved", "title": "Improved Lexically Constrained Decoding for Translation and Monolingual Rewriting", "abstract": "Lexically-constrained sequence decoding allows for explicit positive or negative phrase-based constraints to be placed on target output strings in generation tasks such as machine translation or monolingual text rewriting. We describe vectorized dynamic beam allocation, which extends work in lexically-constrained decoding to work with batching, leading to a five-fold improvement in throughput when working with positive constraints. Faster decoding enables faster exploration of constraint strategies: we illustrate this via data augmentation experiments with a monolingual rewriter applied to the tasks of natural language inference, question answering and machine translation, showing improvements in all three.", "keyphrases": ["decoding", "lexical constraint", "unconstrained generation", "same approach"]} +{"id": "zhang-vogel-2005-efficient", "title": "An efficient phrase-to-phrase alignment model for arbitrarily long phrase and large corpora", "abstract": "Most statistical machine translation (SMT) systems use phrase-to-phrase transla- tions to capture local context information, leading to better lexical choices and more reli- able word reordering. Long phrases capture more contexts than short phrases and result in better translation qualities. On the other hand, the increasing amount of bilingual data poses serious problems for storing all possible phrases. In this paper, we describe a novel phrase- to-phrase alignment model which allows for arbitrarily long phrases and works for very large bilingual corpora. This model is very efficient in both time and space and the resulting translations are better than the state-of-the-art systems.", "keyphrases": ["long phrase", "bilingual data", "suffix array", "fly"]} +{"id": "bhatia-etal-2019-joint", "title": "Joint Entity Extraction and Assertion Detection for Clinical Text", "abstract": "Negative medical findings are prevalent in clinical reports, yet discriminating them from positive findings remains a challenging task for in-formation extraction. Most of the existing systems treat this task as a pipeline of two separate tasks, i.e., named entity recognition (NER)and rule-based negation detection. We consider this as a multi-task problem and present a novel end-to-end neural model to jointly extract entities and negations. We extend a standard hierarchical encoder-decoder NER model and first adopt a shared encoder followed by separate decoders for the two tasks. This architecture performs considerably better than the previous rule-based and machine learning-based systems. To overcome the problem of increased parameter size especially for low-resource settings, we propose the Conditional Softmax Shared Decoder architecture which achieves state-of-art results for NER and negation detection on the 2010 i2b2/VA challenge dataset and a proprietary de-identified clinical dataset.", "keyphrases": ["assertion detection", "negation", "joint entity extraction"]} +{"id": "trivedi-etal-2018-iit", "title": "IIT (BHU) Submission for the ACL Shared Task on Named Entity Recognition on Code-switched Data", "abstract": "This paper describes the best performing system for the shared task on Named Entity Recognition (NER) on code-switched data for the language pair Spanish-English (ENG-SPA). We introduce a gated neural architecture for the NER task. Our final model achieves an F1 score of 63.76%, outperforming the baseline by 10%.", "keyphrases": ["entity recognition", "noisy mixed-language text", "word embedding"]} +{"id": "eric-etal-2017-key", "title": "Key-Value Retrieval Networks for Task-Oriented Dialogue", "abstract": "Neural task-oriented dialogue systems often struggle to smoothly interface with a knowledge base. In this work, we seek to address this problem by proposing a new neural dialogue agent that is able to effectively sustain grounded, multi-domain discourse through a novel key-value retrieval mechanism. The model is end-to-end differentiable and does not need to explicitly model dialogue state or belief trackers. We also release a new dataset of 3,031 dialogues that are grounded through underlying knowledge bases and span three distinct tasks in the in-car personal assistant space: calendar scheduling, weather information retrieval, and point-of-interest navigation. Our architecture is simultaneously trained on data from all domains and significantly outperforms a competitive rule-based system and other existing neural dialogue architectures on the provided domains according to both automatic and human evaluation metrics.", "keyphrases": ["dialogue system", "multi-domain discourse", "key-value retrieval mechanism"]} +{"id": "sedoc-etal-2019-chateval", "title": "ChatEval: A Tool for Chatbot Evaluation", "abstract": "Open-domain dialog systems (i.e. chatbots) are difficult to evaluate. The current best practice for analyzing and comparing these dialog systems is the use of human judgments. However, the lack of standardization in evaluation procedures, and the fact that model parameters and code are rarely published hinder systematic human evaluation experiments. We introduce a unified framework for human evaluation of chatbots that augments existing tools and provides a web-based hub for researchers to share and compare their dialog systems. Researchers can submit their trained models to the ChatEval web interface and obtain comparisons with baselines and prior work. The evaluation code is open-source to ensure standardization and transparency. In addition, we introduce open-source baseline models and evaluation datasets. ChatEval can be found at .", "keyphrases": ["dialog system", "human evaluation", "chateval"]} +{"id": "johnson-2007-doesnt", "title": "Why Doesn't EM Find Good HMM POS-Taggers?", "abstract": "This paper investigates why the HMMs estimated by Expectation-Maximization (EM) produce such poor results as Part-of-Speech (POS) taggers. We find that the HMMs estimated by EM generally assign a roughly equal number of word tokens to each hidden state, while the empirical distribution of tokens to POS tags is highly skewed. This motivates a Bayesian approach using a sparse prior to bias the estimator toward such a skewed distribution. We investigate Gibbs Sampling (GS) and Variational Bayes (VB) estimators and show that VB converges faster than GS for this task and that VB significantly improves 1-to-1 tagging accuracy over EM. We also show that EM does nearly as well as VB when the number of hidden HMM states is dramatically reduced. We also point out the high variance in all of these estimators, and that they require many more iterations to approach convergence than usually thought.", "keyphrases": ["hmm", "hidden state", "pos tagging"]} +{"id": "hopkins-may-2011-tuning", "title": "Tuning as Ranking", "abstract": "We offer a simple, effective, and scalable method for statistical machine translation parameter tuning based on the pairwise approach to ranking (Herbrich et al., 1999). Unlike the popular MERT algorithm (Och, 2003), our pairwise ranking optimization (PRO) method is not limited to a handful of parameters and can easily handle systems with thousands of features. Moreover, unlike recent approaches built upon the MIRA algorithm of Crammer and Singer (2003) (Watanabe et al., 2007; Chiang et al., 2008b), PRO is easy to implement. It uses off-the-shelf linear binary classifier software and can be built on top of an existing MERT framework in a matter of hours. We establish PRO's scalability and effectiveness by comparing it to MERT and MIRA and demonstrate parity on both phrase-based and syntax-based systems in a variety of language pairs, using large scale data scenarios.", "keyphrases": ["pairwise ranking optimization", "pro", "thousand"]} +{"id": "duh-kirchhoff-2008-beyond", "title": "Beyond Log-Linear Models: Boosted Minimum Error Rate Training for N-best Re-ranking", "abstract": "Current re-ranking algorithms for machine translation rely on log-linear models, which have the potential problem of underfitting the training data. We present BoostedMERT, a novel boosting algorithm that uses Minimum Error Rate Training (MERT) as a weak learner and builds a re-ranker far more expressive than log-linear models. BoostedMERT is easy to implement, inherits the efficient optimization properties of MERT, and can quickly boost the BLEU score on N-best re-ranking tasks. In this paper, we describe the general algorithm and present preliminary results on the IWSLT 2007 Arabic-English task.", "keyphrases": ["log-linear model", "boostedmert", "weak learner", "bleu score", "beam search"]} +{"id": "li-etal-2012-active-learning", "title": "Active Learning for Imbalanced Sentiment Classification", "abstract": "Active learning is a promising way for sentiment classification to reduce the annotation cost. In this paper, we focus on the imbalanced class distribution scenario for sentiment classification, wherein the number of positive samples is quite different from that of negative samples. This scenario posits new challenges to active learning. To address these challenges, we propose a novel active learning approach, named co-selecting, by taking both the imbalanced class distribution issue and uncertainty into account. Specifically, our co-selecting approach employs two feature subspace classifiers to collectively select most informative minority-class samples for manual annotation by leveraging a certainty measurement and an uncertainty measurement, and in the meanwhile, automatically label most informative majority-class samples, to reduce human-annotation efforts. Extensive experiments across four domains demonstrate great potential and effectiveness of our proposed co-selecting approach to active learning for imbalanced sentiment classification.", "keyphrases": ["imbalanced sentiment classification", "sample", "manual annotation", "active learning"]} +{"id": "haghighi-klein-2007-unsupervised", "title": "Unsupervised Coreference Resolution in a Nonparametric Bayesian Model", "abstract": "We present an unsupervised, nonparametric Bayesian approach to coreference resolution which models both global entity identity across a corpus as well as the sequential anaphoric structure within each document. While most existing coreference work is driven by pairwise decisions, our model is fully generative, producing each mention from a combination of global entity properties and local attentional state. Despite being unsupervised, our system achieves a 70.3 MUC F1 measure on the MUC-6 test set, broadly in the range of some recent supervised results.", "keyphrases": ["nonparametric bayesian model", "unsupervised coreference resolution", "pronoun", "good result"]} +{"id": "marge-rudnicky-2010-comparing", "title": "Comparing Spoken Language Route Instructions for Robots across Environment Representations", "abstract": "Spoken language interaction between humans and robots in natural environments will necessarily involve communication about space and distance. The current study examines people's close-range route instructions for robots and how the presentation format (schematic, virtual or natural) and the complexity of the route affect the content of instructions. We find that people have a general preference for providing metric-based instructions. At the same time, presentation format appears to have less impact on the formulation of these instructions. We conclude that understanding of spatial language requires handling both landmark-based and metric-based expressions.", "keyphrases": ["route instruction", "robot", "communication"]} +{"id": "lin-etal-2009-recognizing", "title": "Recognizing Implicit Discourse Relations in the Penn Discourse Treebank", "abstract": "We present an implicit discourse relation classifier in the Penn Discourse Treebank (PDTB). Our classifier considers the context of the two arguments, word pair information, as well as the arguments' internal constituent and dependency parses. Our results on the PDTB yields a significant 14.1% improvement over the baseline. In our error analysis, we discuss four challenges in recognizing implicit relations in the PDTB.", "keyphrases": ["penn discourse treebank", "implicit relation", "cross-argument word pair"]} +{"id": "shen-etal-2018-baseline", "title": "Baseline Needs More Love: On Simple Word-Embedding-Based Models and Associated Pooling Mechanisms", "abstract": "Many deep learning architectures have been proposed to model the compositionality in text sequences, requiring substantial number of parameters and expensive computations. However, there has not been a rigorous evaluation regarding the added value of sophisticated compositional functions. In this paper, we conduct a point-by-point comparative study between Simple Word-Embedding-based Models (SWEMs), consisting of parameter-free pooling operations, relative to word-embedding-based RNN/CNN models. Surprisingly, SWEMs exhibit comparable or even superior performance in the majority of cases considered. Based upon this understanding, we propose two additional pooling strategies over learned word embeddings: (i) a max-pooling operation for improved interpretability; and (ii) a hierarchical pooling operation, which preserves spatial (n-gram) information within text sequences. We present experiments on 17 datasets encompassing three tasks: (i) (long) document classification; (ii) text sequence matching; and (iii) short text tasks, including classification and tagging.", "keyphrases": ["simple", "superior performance", "word embedding", "hierarchical pooling operation", "text sequence matching"]} +{"id": "romeo-etal-2014-choosing", "title": "Choosing which to use? A study of distributional models for nominal lexical semantic classification", "abstract": "This paper empirically evaluates the performances of different state-of-the-art distributional models in a nominal lexical semantic classification task. We consider models that exploit various types of distributional features, which thereby provide different representations of nominal behavior in context. The experiments presented in this work demonstrate the advantages and disadvantages of each model considered. This analysis also considers a combined strategy that we found to be capable of leveraging the bottlenecks of each model, especially when large robust data is not available.", "keyphrases": ["distributional model", "classification task", "bottleneck"]} +{"id": "he-etal-2018-unsupervised", "title": "Unsupervised Learning of Syntactic Structure with Invertible Neural Projections", "abstract": "Unsupervised learning of syntactic structure is typically performed using generative models with discrete latent variables and multinomial parameters. In most cases, these models have not leveraged continuous word representations. In this work, we propose a novel generative model that jointly learns discrete syntactic structure and continuous word representations in an unsupervised fashion by cascading an invertible neural network with a structured generative prior. We show that the invertibility condition allows for efficient exact inference and marginal likelihood computation in our model so long as the prior is well-behaved. In experiments we instantiate our approach with both Markov and tree-structured priors, evaluating on two tasks: part-of-speech (POS) induction, and unsupervised dependency parsing without gold POS annotation. On the Penn Treebank, our Markov-structured model surpasses state-of-the-art results on POS induction. Similarly, we find that our tree-structured model achieves state-of-the-art performance on unsupervised dependency parsing for the difficult training condition where neither gold POS annotation nor punctuation-based constraints are available.", "keyphrases": ["syntactic structure", "part-of-speech", "induction", "unsupervised learning"]} +{"id": "derczynski-etal-2013-twitter", "title": "Twitter Part-of-Speech Tagging for All: Overcoming Sparse and Noisy Data", "abstract": "Part-of-speech information is a pre-requisite in many NLP algorithms. However, Twitter text is difficult to part-of-speech tag: it is noisy, with linguistic errors and idiosyncratic style. We present a detailed error analysis of existing taggers, motivating a series of tagger augmentations which are demonstrated to improve performance. We identify and evaluate techniques for improving English part-of-speech tagging performance in this genre. Further, we present a novel approach to system combination for the case where available taggers use different tagsets, based on voteconstrained bootstrapping with unlabeled data. Coupled with assigning prior probabilities to some tokens and handling of unknown words and slang, we reach 88.7% tagging accuracy (90.5% on development data). This is a new high in PTB-compatible tweet part-of-speech tagging, reducing token error by 26.8% and sentence error by 12.2%. The model, training data and tools are made available.", "keyphrases": ["probability", "pos tagger", "english tweet", "gate twitie"]} +{"id": "bentivogli-etal-2016-neural", "title": "Neural versus Phrase-Based Machine Translation Quality: a Case Study", "abstract": "Within the field of Statistical Machine Translation (SMT), the neural approach (NMT) has recently emerged as the first technology able to challenge the long-standing dominance of phrase-based approaches (PBMT). In particular, at the IWSLT 2015 evaluation campaign, NMT outperformed well established state-of-the-art PBMT systems on English-German, a language pair known to be particularly hard because of morphology and syntactic differences. To understand in what respects NMT provides better translation quality than PBMT, we perform a detailed analysis of neural versus phrase-based SMT outputs, leveraging high quality post-edits performed by professional translators on the IWSLT data. For the first time, our analysis provides useful insights on what linguistic phenomena are best modeled by neural models -- such as the reordering of verbs -- while pointing out other aspects that remain to be improved.", "keyphrases": ["machine translation", "phrase-based smt", "post-editing", "professional translator", "negation"]} +{"id": "yu-etal-2016-retrofitting", "title": "Retrofitting Word Vectors of MeSH Terms to Improve Semantic Similarity Measures", "abstract": "Estimation of the semantic relatedness be-tween biomedical concepts has utility for many informatics applications. Automated methods fall into two broad categories: meth-ods based on distributional statistics drawn from text corpora, and methods based on the structure of existing knowledge resources. In the former case, taxonomic structure is disre-garded. In the latter, semantically relevant empirical information is not considered. In this paper, we present a method that retro\ufb01ts the context vector representation of MeSH terms by using additional linkage information from UMLS/MeSH hierarchy such that linked concepts have similar vector representations. We evaluated the method relative to previously published physician and coder\u2019s ratings on sets of MeSH terms. Our experimental re-sults demonstrate that the retro\ufb01tted word vector measures obtain a higher correlation with physician judgments. The results also demonstrate a clear improvement on the correlation with experts\u2019 ratings from the retro\ufb01tted vector representation in comparison to the vector representation without retro\ufb01tting.", "keyphrases": ["mesh term", "biomedical concept", "additional linkage information"]} +{"id": "virga-khudanpur-2003-transliteration", "title": "Transliteration of Proper Names in Cross-Lingual Information Retrieval", "abstract": "We address the problem of transliterating English names using Chinese orthography in support of cross-lingual speech and text processing applications. We demonstrate the application of statistical machine translation techniques to \"translate\" the phonemic representation of an English name, obtained by using an automatic text-to-speech system, to a sequence of initials and finals, commonly used sub-word units of pronunciation for Chinese. We then use another statistical translation model to map the initial/final sequence to Chinese characters. We also present an evaluation of this module in retrieval of Mandarin spoken documents from the TDT corpus using English text queries.", "keyphrases": ["name", "sub-word unit", "mandarin", "text query", "transliteration"]} +{"id": "shardlow-2013-cw", "title": "The CW Corpus: A New Resource for Evaluating the Identification of Complex Words", "abstract": "The task of identifying complex words (CWs) is important for lexical simplification, however it is often carried out with no evaluation of success. There is no basis for comparison of current techniques and, prior to this work, there has been no standard corpus or evaluation technique for the CW identification task. This paper addresses these shortcomings with a new corpus for evaluating a system\u2019s performance in identifying CWs. Simple Wikipedia edit histories were mined for instances of single word lexical simplifications. The corpus contains 731 sentences, each with one annotated CW. This paper describes the method used to produce the CW corpus and presents the results of evaluation, showing its validity.", "keyphrases": ["identification", "complex word", "simple wikipedia", "edit history"]} +{"id": "holla-etal-2020-learning", "title": "Learning to Learn to Disambiguate: Meta-Learning for Few-Shot Word Sense Disambiguation", "abstract": "The success of deep learning methods hinges on the availability of large training datasets annotated for the task of interest. In contrast to human intelligence, these methods lack versatility and struggle to learn and adapt quickly to new tasks, where labeled data is scarce. Meta-learning aims to solve this problem by training a model on a large number of few-shot tasks, with an objective to learn new tasks quickly from a small number of examples. In this paper, we propose a meta-learning framework for few-shot word sense disambiguation (WSD), where the goal is to learn to disambiguate unseen words from only a few labeled instances. Meta-learning approaches have so far been typically tested in an N-way, K-shot classification setting where each task has N classes with K examples per class. Owing to its nature, WSD deviates from this controlled setup and requires the models to handle a large number of highly unbalanced classes. We extend several popular meta-learning approaches to this scenario, and analyze their strengths and weaknesses in this new challenging setting.", "keyphrases": ["word sense disambiguation", "meta-learning approach", "n-way"]} +{"id": "farajian-etal-2017-multi", "title": "Multi-Domain Neural Machine Translation through Unsupervised Adaptation", "abstract": "We investigate the application of Neural Machine Translation (NMT) under the following three conditions posed by real-world application scenarios. First, we operate with an input stream of sentences coming from many different domains and with no prede\ufb01ned order. Second, the sentences are presented without domain information. Third, the input stream should be processed by a single generic NMT model. To tackle the weaknesses of current NMT technology in this unsupervised multi-domain setting, we explore an ef-\ufb01cient instance-based adaptation method that, by exploiting the similarity between the training instances and each test sentence, dynamically sets the hyperparameters of the learning algorithm and updates the generic model on-the-\ufb02y. The results of our experiments with multi-domain data show that local adaptation outperforms not only the original generic NMT system, but also a strong phrase-based system and even single-domain NMT models speci\ufb01-cally optimized on each domain and applicable only by violating two of our afore-mentioned assumptions.", "keyphrases": ["neural machine translation", "adaptation", "learning algorithm", "n-gram", "similar sentence"]} +{"id": "tatu-srikanth-2008-experiments", "title": "Experiments with Reasoning for Temporal Relations between Events", "abstract": "Few attempts have been made to investigate the utility of temporal reasoning within machine learning frameworks for temporal relation classification between events in news articles. This paper presents three settings where temporal reasoning aids machine learned classifiers of temporal relations: (1) expansion of the dataset used for learning; (2) detection of inconsistencies among the automatically identified relations; and (3) selection among multiple temporal relations. Feature engineering is another effort in our work to improve classification accuracy.", "keyphrases": ["reasoning", "temporal information processing", "summarization"]} +{"id": "melville-sindhwani-2009-active", "title": "Active Dual Supervision: Reducing the Cost of Annotating Examples and Features", "abstract": "When faced with the task of building machine learning or NLP models, it is often worthwhile to turn to active learning to obtain human annotations at minimal costs. Traditional active learning schemes query a human for labels of intelligently chosen examples. However, human effort can also be expended in collecting alternative forms of annotations. For example, one may attempt to learn a text classifier by labeling class-indicating words, instead of, or in addition to, documents. Learning from two different kinds of supervision brings a new, unexplored dimension to the problem of active learning. In this paper, we demonstrate the value of such active dual supervision in the context of sentiment analysis. We show how interleaving queries for both documents and words significantly reduces human effort -- more than what is possible through traditional one-dimensional active learning, or by passive combinations of supervisory inputs.", "keyphrases": ["cost", "annotator", "active dual supervision"]} +{"id": "koller-etal-2008-regular", "title": "Regular Tree Grammars as a Formalism for Scope Underspecification", "abstract": "We propose the use of regular tree grammars (RTGs) as a formalism for the underspecified processing of scope ambiguities. By applying standard results on RTGs, we obtain a novel algorithm for eliminating equivalent readings and the first efficient algorithm for computing the best reading of a scope ambiguity. We also show how to derive RTGs from more traditional underspecified descriptions.", "keyphrases": ["scope underspecification", "reading", "regular tree grammars"]} +{"id": "su-etal-2018-natural", "title": "Natural Language Generation by Hierarchical Decoding with Linguistic Patterns", "abstract": "Natural language generation (NLG) is a critical component in spoken dialogue systems. Classic NLG can be divided into two phases: (1) sentence planning: deciding on the overall sentence structure, (2) surface realization: determining specific word forms and flattening the sentence structure into a string. Many simple NLG models are based on recurrent neural networks (RNN) and sequence-to-sequence (seq2seq) model, which basically contains a encoder-decoder structure; these NLG models generate sentences from scratch by jointly optimizing sentence planning and surface realization using a simple cross entropy loss training criterion. However, the simple encoder-decoder architecture usually suffers from generating complex and long sentences, because the decoder has to learn all grammar and diction knowledge. This paper introduces a hierarchical decoding NLG model based on linguistic patterns in different levels, and shows that the proposed method outperforms the traditional one with a smaller model size. Furthermore, the design of the hierarchical decoding is flexible and easily-extendible in various NLG systems.", "keyphrases": ["hierarchical decoding", "nlg", "sequence-to-sequence", "natural language generation"]} +{"id": "ganchev-das-2013-cross", "title": "Cross-Lingual Discriminative Learning of Sequence Models with Posterior Regularization", "abstract": "We present a framework for cross-lingual transfer of sequence information from a resource-rich source language to a resource-impoverished target language that incorporates soft constraints via posterior regularization. To this end, we use automatically word aligned bitext between the source and target language pair, and learn a discriminative conditional random \ufb01eld model on the target side. Our posterior regularization constraints are derived from simple intuitions about the task at hand and from cross-lingual alignment information. We show improvements over strong baselines for two tasks: part-of-speech tagging and named-entity segmentation.", "keyphrases": ["posterior regularization", "lingual projection", "cross"]} +{"id": "press-wolf-2017-using", "title": "Using the Output Embedding to Improve Language Models", "abstract": "We study the topmost weight matrix of neural network language models. We show that this matrix constitutes a valid word embedding. When training language models, we recommend tying the input embedding and this output embedding. We analyze the resulting update rules and show that the tied embedding evolves in a more similar way to the output embedding than to the input embedding in the untied model. We also offer a new method of regularizing the output embedding. Our methods lead to a significant reduction in perplexity, as we are able to show on a variety of neural network language models. Finally, we show that weight tying can reduce the size of neural translation models to less than half of their original size without harming their performance.", "keyphrases": ["output embedding", "language model", "perplexity"]} +{"id": "zhou-etal-2018-dataset", "title": "A Dataset for Document Grounded Conversations", "abstract": "This paper introduces a document grounded dataset for conversations. We define \u201cDocument Grounded Conversations\u201d as conversations that are about the contents of a specified document. In this dataset the specified documents were Wikipedia articles about popular movies. The dataset contains 4112 conversations with an average of 21.43 turns per conversation. This positions this dataset to not only provide a relevant chat history while generating responses but also provide a source of information that the models could use. We describe two neural architectures that provide benchmark performance on the task of generating the next response. We also evaluate our models for engagement and fluency, and find that the information from the document helps in generating more engaging and fluent responses.", "keyphrases": ["conversation", "wikipedia article", "popular movie", "knowledge grounding", "participant"]} +{"id": "ravi-knight-2011-deciphering", "title": "Deciphering Foreign Language", "abstract": "In this work, we tackle the task of machine translation (MT) without parallel training data. We frame the MT problem as a decipherment task, treating the foreign text as a cipher for English and present novel methods for training translation models from non-parallel text.", "keyphrases": ["foreign language", "decipherment", "monolingual data", "other work"]} +{"id": "lambert-banchs-2005-data", "title": "Data Inferred Multi-word Expressions for Statistical Machine Translation", "abstract": "This paper presents a strategy for detecting and using multi-word expressions in Statistical Machine Translation. Performance of the proposed strategy is evaluated in terms of alignment quality as well as translation accuracy. Evaluations are performed by using the Verbmobil corpus. Results from translation tasks from English-to-Spanish and from Spanish-to-English are presented and discussed.", "keyphrases": ["multi-word expression", "statistical machine translation", "mwes", "unit", "semantic perspective"]} +{"id": "gupta-etal-2021-vita", "title": "ViTA: Visual-Linguistic Translation by Aligning Object Tags", "abstract": "Multimodal Machine Translation (MMT) enriches the source text with visual information for translation. It has gained popularity in recent years, and several pipelines have been proposed in the same direction. Yet, the task lacks quality datasets to illustrate the contribution of visual modality in the translation systems. In this paper, we propose our system under the team name Volta for the Multimodal Translation Task of WAT 2021 from English to Hindi. We also participate in the textual-only subtask of the same language pair for which we use mBART, a pretrained multilingual sequence-to-sequence model. For multimodal translation, we propose to enhance the textual input by bringing the visual information to a textual domain by extracting object tags from the image. We also explore the robustness of our system by systematically degrading the source text. Finally, we achieve a BLEU score of 44.6 and 51.6 on the test set and challenge set of the multimodal task.", "keyphrases": ["object tag", "visual information", "textual domain"]} +{"id": "small-etal-2003-hitiqa", "title": "HITIQA: An Interactive Question Answering System: A Preliminary Report", "abstract": "HITIQA is an interactive question answering technology designed to allow intelligence analysts and other users of information systems to pose questions in natural language and obtain relevant answers, or the assistance they require in order to perform their tasks. Our objective in HITIQA is to allow the user to submit exploratory, analytical, non-factual questions, such as \"What has been Russia's reaction to U.S. bombing of Kosovo?\" The distinguishing property of such questions is that one cannot generally anticipate what might constitute the answer. While certain types of things may be expected (e.g., diplomatic statements), the answer is heavily conditioned by what information is in fact available on the topic. From a practical viewpoint, analytical questions are often under-specified, thus casting a broad net on a space of possible answers. Therefore, clarification dialogue is often needed to negotiate with the user the exact scope and intent of the question.", "keyphrases": ["interactive question", "objective", "hitiqa"]} +{"id": "dasgupta-etal-2013-summarization", "title": "Summarization Through Submodularity and Dispersion", "abstract": "We propose a new optimization framework for summarization by generalizing the submodular framework of (Lin and Bilmes, 2011). In our framework the summarization desideratum is expressed as a sum of a submodular function and a nonsubmodular function, which we call dispersion; the latter uses inter-sentence dissimilarities in different ways in order to ensure non-redundancy of the summary. We consider three natural dispersion functions and show that a greedy algorithm can obtain an approximately optimal summary in all three cases. We conduct experiments on two corpora\u2014DUC 2004 and user comments on news articles\u2014and show that the performance of our algorithm outperforms those that rely only on submodularity.", "keyphrases": ["submodularity", "function", "greedy algorithm", "summarization"]} +{"id": "zhu-etal-2021-hitsz", "title": "HITSZ-HLT at SemEval-2021 Task 5: Ensemble Sequence Labeling and Span Boundary Detection for Toxic Span Detection", "abstract": "This paper presents the winning system that participated in SemEval-2021 Task 5: Toxic Spans Detection. This task aims to locate those spans that attribute to the text's toxicity within a text, which is crucial for semi-automated moderation in online discussions. We formalize this task as the Sequence Labeling (SL) problem and the Span Boundary Detection (SBD) problem separately and employ three state-of-the-art models. Next, we integrate predictions of these models to produce a more credible and complement result. Our system achieves a char-level score of 70.83%, ranking 1/91. In addition, we also explore the lexicon-based method, which is strongly interpretable and flexible in practice.", "keyphrases": ["semeval-2021 task", "sequence labeling", "span boundary detection", "conditional random fields"]} +{"id": "rosti-etal-2008-incremental", "title": "Incremental Hypothesis Alignment for Building Confusion Networks with Application to Machine Translation System Combination", "abstract": "Confusion network decoding has been the most successful approach in combining outputs from multiple machine translation (MT) systems in the recent DARPA GALE and NIST Open MT evaluations. Due to the varying word order between outputs from different MT systems, the hypothesis alignment presents the biggest challenge in confusion network decoding. This paper describes an incremental alignment method to build confusion networks based on the translation edit rate (TER) algorithm. This new algorithm yields significant BLEU score improvements over other recent alignment methods on the GALE test sets and was used in BBN's submission to the WMT08 shared translation task.", "keyphrases": ["confusion network", "translation system", "system combination"]} +{"id": "smith-etal-2005-logarithmic", "title": "Logarithmic Opinion Pools for Conditional Random Fields", "abstract": "Recent work on Conditional Random Fields (CRFs) has demonstrated the need for regularisation to counter the tendency of these models to overfit. The standard approach to regularising CRFs involves a prior distribution over the model parameters, typically requiring search over a hyperparameter space. In this paper we address the overfitting problem from a different perspective, by factoring the CRF distribution into a weighted product of individual \"expert\" CRF distributions. We call this model a logarithmic opinion pool (LOP) of CRFs (LOP-CRFs). We apply the LOP-CRF to two sequencing tasks. Our results show that unregularised expert CRFs with an unregularised CRF under a LOP can outperform the unregularised CRF, and attain a performance level close to the regularised CRF. LOP-CRFs therefore provide a viable alternative to CRF regularisation without the need for hyperparameter search.", "keyphrases": ["conditional random fields", "crfs", "lop", "logarithmic opinion pool"]} +{"id": "tsarfaty-etal-2013-parsing", "title": "Parsing Morphologically Rich Languages: Introduction to the Special Issue", "abstract": "Parsing is a key task in natural language processing. It involves predicting, for each natural language sentence, an abstract representation of the grammatical entities in the sentence and the relations between these entities. This representation provides an interface to compositional semantics and to the notions of \u201cwho did what to whom.\u201d The last two decades have seen great advances in parsing English, leading to major leaps also in the performance of applications that use parsers as part of their backbone, such as systems for information extraction, sentiment analysis, text summarization, and machine translation. Attempts to replicate the success of parsing English for other languages have often yielded unsatisfactory results. In particular, parsing languages with complex word structure and flexible word order has been shown to require non-trivial adaptation. This special issue reports on methods that successfully address the challenges involved in parsing a range of morphologically rich languages (MRLs). This introduction characterizes MRLs, describes the challenges in parsing MRLs, and outlines the contributions of the articles in the special issue. These contributions present up-to-date research efforts that address parsing in varied, cross-lingual settings. They show that parsing MRLs addresses challenges that transcend particular representational and algorithmic choices.", "keyphrases": ["rich language", "special issue", "syntactic analysis"]} +{"id": "nawaz-etal-2010-evaluating", "title": "Evaluating a meta-knowledge annotation scheme for bio-events", "abstract": "The correct interpretation of biomedical texts by text mining systems requires the recognition of a range of types of high-level information (or meta-knowledge) about the text. Examples include expressions of negation and speculation, as well as pragmatic/rhetorical intent (e.g. whether the information expressed represents a hypothesis, generally accepted knowledge, new experimental knowledge, etc.) Although such types of information have previously been annotated at the text-span level (most commonly sentences), annotation at the level of the event is currently quite sparse. In this paper, we focus on the evaluation of the multi-dimensional annotation scheme that we have developed specifically for enriching bio-events with meta-knowledge information. Our annotation scheme is intended to be general enough to allow integration with different types of bio-event annotation, whilst being detailed enough to capture important subtleties in the nature of the meta-knowledge expressed in the text. To our knowledge, our scheme is unique within the field with regards to the diversity of meta-knowledge aspects annotated for each event, whilst the evaluation results have confirmed its feasibility and soundness.", "keyphrases": ["annotation scheme", "bio-event", "meta-knowledge aspect"]} +{"id": "qu-etal-2021-rocketqa", "title": "RocketQA: An Optimized Training Approach to Dense Passage Retrieval for Open-Domain Question Answering", "abstract": "In open-domain question answering, dense passage retrieval has become a new paradigm to retrieve relevant passages for finding answers. Typically, the dual-encoder architecture is adopted to learn dense representations of questions and passages for semantic matching. However, it is difficult to effectively train a dual-encoder due to the challenges including the discrepancy between training and inference, the existence of unlabeled positives and limited training data. To address these challenges, we propose an optimized training approach, called RocketQA, to improving dense passage retrieval. We make three major technical contributions in RocketQA, namely cross-batch negatives, denoised hard negatives and data augmentation. The experiment results show that RocketQA significantly outperforms previous state-of-the-art models on both MSMARCO and Natural Questions. We also conduct extensive experiments to examine the effectiveness of the three strategies in RocketQA. Besides, we demonstrate that the performance of end-to-end QA can be improved based on our RocketQA retriever.", "keyphrases": ["retriever", "open-domain question answering", "pre-trained language model"]} +{"id": "belz-varges-2007-generation", "title": "Generation of repeated references to discourse entities", "abstract": "Generation of Referring Expressions is a thriving subfield of Natural Language Generation which has traditionally focused on the task of selecting a set of attributes that unambiguously identify a given referent. In this paper, we address the complementary problem of generating repeated, potentially different referential expressions that refer to the same entity in the context of a piece of discourse longer than a sentence. We describe a corpus of short encyclopaedic texts we have compiled and annotated for reference to the main subject of the text, and report results for our experiments in which we set human subjects and automatic methods the task of selecting a referential expression from a wide range of choices in a full-text context. We find that our human subjects agree on choice of expression to a considerable degree, with three identical expressions selected in 50% of cases. We tested automatic selection strategies based on most frequent choice heuristics, involving different combinations of information about syntactic MSR type and domain type. We find that more information generally produces better results, achieving a best overall test set accuracy of 53.9% when both syntactic MSR type and domain type are known.", "keyphrases": ["reference", "wikipedia article", "subdomain"]} +{"id": "kanayama-nasukawa-2006-fully", "title": "Fully Automatic Lexicon Expansion for Domain-oriented Sentiment Analysis", "abstract": "This paper proposes an unsupervised lexicon building method for the detection of polar clauses, which convey positive or negative aspects in a specific domain. The lexical entries to be acquired are called polar atoms, the minimum human-understandable syntactic structures that specify the polarity of clauses. As a clue to obtain candidate polar atoms, we use context coherency, the tendency for same polarities to appear successively in contexts. Using the overall density and precision of coherency in the corpus, the statistical estimation picks up appropriate polar atoms among candidates, without any manual tuning of the threshold values. The experimental results show that the precision of polarity assignment with the automatically acquired lexicon was 94% on average, and our method is robust for corpora in diverse domains and for the size of the initial lexicon.", "keyphrases": ["sentiment analysis", "polarity", "syntactic feature", "seed word", "co-occurrence"]} +{"id": "li-etal-2016-visualizing", "title": "Visualizing and Understanding Neural Models in NLP", "abstract": "While neural networks have been successfully applied to many NLP tasks the resulting vector-based models are very difficult to interpret. For example it's not clear how they achieve {\\em compositionality}, building sentence meaning from the meanings of words and phrases. In this paper we describe four strategies for visualizing compositionality in neural models for NLP, inspired by similar work in computer vision. We first plot unit values to visualize compositionality of negation, intensification, and concessive clauses, allow us to see well-known markedness asymmetries in negation. We then introduce three simple and straightforward methods for visualizing a unit's {\\em salience}, the amount it contributes to the final composed meaning: (1) gradient back-propagation, (2) the variance of a token from the average word node, (3) LSTM-style gates that measure information flow. We test our methods on sentiment using simple recurrent nets and LSTMs. Our general-purpose methods may have wide applications for understanding compositionality and other semantic properties of deep networks , and also shed light on why LSTMs outperform simple recurrent nets,", "keyphrases": ["negation", "rnn", "recurrent network", "saliency method", "visualization"]} +{"id": "neubig-etal-2011-pointwise", "title": "Pointwise Prediction for Robust, Adaptable Japanese Morphological Analysis", "abstract": "We present a pointwise approach to Japanese morphological analysis (MA) that ignores structure information during learning and tagging. Despite the lack of structure, it is able to outperform the current state-of-the-art structured approach for Japanese MA, and achieves accuracy similar to that of structured predictors using the same feature set. We also find that the method is both robust to out-of-domain data, and can be easily adapted through the use of a combination of partial annotation and active learning.", "keyphrases": ["japanese morphological analysis", "pointwise prediction", "pos tagging", "state-of-the-art model", "rich character-level"]} +{"id": "kober-etal-2020-aspectuality", "title": "Aspectuality Across Genre: A Distributional Semantics Approach", "abstract": "The interpretation of the lexical aspect of verbs in English plays a crucial role in tasks such as recognizing textual entailment and learning discourse-level inferences. We show that two elementary dimensions of aspectual class, states vs. events, and telic vs. atelic events, can be modelled effectively with distributional semantics. We find that a verb's local context is most indicative of its aspectual class, and we demonstrate that closed class words tend to be stronger discriminating contexts than content words. Our approach outperforms previous work on three datasets. Further, we present a new dataset of human-human conversations annotated with lexical aspects and present experiments that show the correlation of telicity with genre and discourse goals.", "keyphrases": ["genre", "distributional semantic", "telicity"]} +{"id": "sil-etal-2012-linking", "title": "Linking Named Entities to Any Database", "abstract": "Existing techniques for disambiguating named entities in text mostly focus on Wikipedia as a target catalog of entities. Yet for many types of entities, such as restaurants and cult movies, relational databases exist that contain far more extensive information than Wikipedia. This paper introduces a new task, called Open-Database Named-Entity Disambiguation (Open-DB NED), in which a system must be able to resolve named entities to symbols in an arbitrary database, without requiring labeled data for each new database. We introduce two techniques for Open-DB NED, one based on distant supervision and the other based on domain adaptation. In experiments on two domains, one with poor coverage by Wikipedia and the other with near-perfect coverage, our Open-DB NED strategies outperform a state-of-the-art Wikipedia NED system by over 25% in accuracy.", "keyphrases": ["database", "wikipedia", "mention"]} +{"id": "pantel-etal-2009-web", "title": "Web-Scale Distributional Similarity and Entity Set Expansion", "abstract": "Computing the pairwise semantic similarity between all words on the Web is a computationally challenging task. Parallelization and optimizations are necessary. We propose a highly scalable implementation based on distributional similarity, implemented in the MapReduce framework and deployed over a 200 billion word crawl of the Web. The pairwise similarity between 500 million terms is computed in 50 hours using 200 quad-core nodes. We apply the learned similarity matrix to the task of automatic set expansion and present a large empirical study to quantify the effect on expansion performance of corpus size, corpus quality, seed composition and seed size. We make public an experimental testbed for set expansion analysis that includes a large collection of diverse entity sets extracted from Wikipedia.", "keyphrases": ["distributional similarity", "mapreduce framework", "quad-core node", "node", "memory"]} +{"id": "yu-etal-2020-named", "title": "Named Entity Recognition as Dependency Parsing", "abstract": "Named Entity Recognition (NER) is a fundamental task in Natural Language Processing, concerned with identifying spans of text expressing references to entities. NER research is often focused on flat entities only (flat NER), ignoring the fact that entity references can be nested, as in [Bank of [China]] (Finkel and Manning, 2009). In this paper, we use ideas from graph-based dependency parsing to provide our model a global view on the input via a biaffine model (Dozat and Manning, 2017). The biaffine model scores pairs of start and end tokens in a sentence which we use to explore all spans, so that the model is able to predict named entities accurately. We show that the model works well for both nested and flat NER through evaluation on 8 corpora and achieving SoTA performance on all of them, with accuracy gains of up to 2.2 percentage points.", "keyphrases": ["entity recognition", "span", "graph-based dependency", "biaffine model", "language model"]} +{"id": "khot-etal-2017-answering", "title": "Answering Complex Questions Using Open Information Extraction", "abstract": "While there has been substantial progress in factoid question-answering (QA), answering complex questions remains challenging, typically requiring both a large body of knowledge and inference techniques. Open Information Extraction (Open IE) provides a way to generate semi-structured knowledge for QA, but to date such knowledge has only been used to answer simple questions with retrieval-based methods. We overcome this limitation by presenting a method for reasoning with Open IE knowledge, allowing more complex questions to be handled. Using a recently proposed support graph optimization framework for QA, we develop a new inference model for Open IE, in particular one that can work effectively with multiple short facts, noise, and the relational structure of tuples. Our model significantly outperforms a state-of-the-art structured solver on complex questions of varying difficulty, while also removing the reliance on manually curated knowledge.", "keyphrases": ["open information extraction", "knowledge representation", "oie task"]} +{"id": "goldwater-etal-2006-contextual", "title": "Contextual Dependencies in Unsupervised Word Segmentation", "abstract": "Developing better methods for segmenting continuous text into words is important for improving the processing of Asian languages, and may shed light on how humans learn to segment speech. We propose two new Bayesian word segmentation methods that assume unigram and bigram models of word dependencies respectively. The bigram model greatly outperforms the unigram model (and previous probabilistic models), demonstrating the importance of such dependencies for word segmentation. We also show that previous probabilistic models rely crucially on sub-optimal search procedures.", "keyphrases": ["word segmentation", "dirichlet process", "hdp"]} +{"id": "sagae-lavie-2005-classifier", "title": "A Classifier-Based Parser with Linear Run-Time Complexity", "abstract": "We present a classifier-based parser that produces constituent trees in linear time. The parser uses a basic bottom-up shift-reduce algorithm, but employs a classifier to determine parser actions instead of a grammar. This can be seen as an extension of the deterministic dependency parser of Nivre and Scholz (2004) to full constituent parsing. We show that, with an appropriate feature set used in classification, a very simple one-path greedy parser can perform at the same level of accuracy as more complex parsers. We evaluate our parser on section 23 of the WSJ section of the Penn Treebank, and obtain precision and recall of 87.54% and 87.61%, respectively.", "keyphrases": ["classifier-based parser", "constituent", "linear time"]} +{"id": "shen-etal-2019-towards", "title": "Towards Generating Long and Coherent Text with Multi-Level Latent Variable Models", "abstract": "Variational autoencoders (VAEs) have received much attention recently as an end-to-end architecture for text generation with latent variables. However, previous works typically focus on synthesizing relatively short sentences (up to 20 words), and the posterior collapse issue has been widely identified in text-VAEs. In this paper, we propose to leverage several multi-level structures to learn a VAE model for generating long, and coherent text. In particular, a hierarchy of stochastic layers between the encoder and decoder networks is employed to abstract more informative and semantic-rich latent codes. Besides, we utilize a multi-level decoder structure to capture the coherent long-term structure inherent in long-form texts, by generating intermediate sentence representations as high-level plan vectors. Extensive experimental results demonstrate that the proposed multi-level VAE model produces more coherent and less repetitive long text compared to baselines as well as can mitigate the posterior-collapse issue.", "keyphrases": ["coherent text", "latent variable", "variational autoencoder"]} +{"id": "manning-etal-2014-stanford", "title": "The Stanford CoreNLP Natural Language Processing Toolkit", "abstract": "We describe the design and use of the Stanford CoreNLP toolkit, an extensible pipeline that provides core natural language analysis. This toolkit is quite widely used, both in the research NLP community and also among commercial and government users of open source NLP technology. We suggest that this follows from a simple, approachable design, straightforward interfaces, the inclusion of robust and good quality analysis components, and not requiring use of a large amount of associated baggage.", "keyphrases": ["stanford corenlp", "tagging", "dependency parse", "meta language", "rule match"]} +{"id": "hasan-ney-2009-comparison", "title": "Comparison of Extended Lexicon Models in Search and Rescoring for SMT", "abstract": "We show how the integration of an extended lexicon model into the decoder can improve translation performance. The model is based on lexical triggers that capture long-distance dependencies on the sentence level. The results are compared to variants of the model that are applied in reranking of n-best lists. We present how a combined application of these models in search and rescoring gives promising results. Experiments are reported on the GALE Chinese-English task with improvements of up to +0.9% BLEU and -1.5% TER absolute on a competitive baseline.", "keyphrases": ["lexicon model", "search", "translation performance"]} +{"id": "eger-etal-2017-neural", "title": "Neural End-to-End Learning for Computational Argumentation Mining", "abstract": "We investigate neural techniques for end-to-end computational argumentation mining (AM). We frame AM both as a token-based dependency parsing and as a token-based sequence tagging problem, including a multi-task learning setup. Contrary to models that operate on the argument component level, we find that framing AM as dependency parsing leads to subpar performance results. In contrast, less complex (local) tagging models based on BiLSTMs perform robustly across classification scenarios, being able to catch long-range dependencies inherent to the AM problem. Moreover, we find that jointly learning `natural' subtasks, in a multi-task learning setup, improves performance.", "keyphrases": ["computational argumentation mining", "neural end-to-end model", "raw text", "token level"]} +{"id": "elsner-charniak-2010-disentangling", "title": "Disentangling Chat", "abstract": "When multiple conversations occur simultaneously, a listener must decide which conversation each utterance is part of in order to interpret and respond to it appropriately. We refer to this task as disentanglement. We present a corpus of Internet Relay Chat dialogue in which the various conversations have been manually disentangled, and evaluate annotator reliability. We propose a graph-based clustering model for disentanglement, using lexical, timing, and discourse-based features. The model's predicted disentanglements are highly correlated with manual annotations. We conclude by discussing two extensions to the model, specificity tuning and conversation start detection, both of which are promising but do not currently yield practical improvements.", "keyphrases": ["chat", "discourse-based feature", "thread", "disentanglement", "coherence model"]} +{"id": "koo-etal-2007-structured", "title": "Structured Prediction Models via the Matrix-Tree Theorem", "abstract": "This paper provides an algorithmic framework for learning statistical models involving directed spanning trees, or equivalently non-projective dependency structures. We show how partition functions and marginals for directed spanning trees can be computed by an adaptation of Kirchhoff\u2019s Matrix-Tree Theorem. To demonstrate an application of the method, we perform experiments which use the algorithm in training both log-linear and max-margin dependency parsers. The new training methods give improvements in accuracy over perceptron-trained models.", "keyphrases": ["matrix-tree theorem", "dependency structure", "kirchhoff"]} +{"id": "siddiqua-etal-2019-tweet", "title": "Tweet Stance Detection Using an Attention based Neural Ensemble Model", "abstract": "Stance detection in twitter aims at mining user stances expressed in a tweet towards a single or multiple target entities. To tackle this problem, most of the prior studies have been explored the traditional deep learning models, e.g., LSTM and GRU. However, in compared to these traditional approaches, recently proposed densely connected Bi-LSTM and nested LSTMs architectures effectively address the vanishing-gradient and overfitting problems as well as dealing with long-term dependencies. In this paper, we propose a neural ensemble model that adopts the strengths of these two LSTM variants to learn better long-term dependencies, where each module coupled with an attention mechanism that amplifies the contribution of important elements in the final representation. We also employ a multi-kernel convolution on top of them to extract the higher-level tweet representations. Results of extensive experiments on single and multi-target stance detection datasets show that our proposed method achieves substantial improvement over the current state-of-the-art deep learning based methods.", "keyphrases": ["stance detection", "neural ensemble model", "tweet representation"]} +{"id": "williams-etal-2020-predicting", "title": "Predicting Declension Class from Form and Meaning", "abstract": "The noun lexica of many natural languages are divided into several declension classes with characteristic morphological properties. Class membership is far from deterministic, but the phonological form of a noun and/or its meaning can often provide imperfect clues. Here, we investigate the strength of those clues. More specifically, we operationalize this by measuring how much information, in bits, we can glean about declension class from knowing the form and/or meaning of nouns. We know that form and meaning are often also indicative of grammatical gender\u2014which, as we quantitatively verify, can itself share information with declension class\u2014so we also control for gender. We find for two Indo-European languages (Czech and German) that form and meaning respectively share significant amounts of information with class (and contribute additional information above and beyond gender). The three-way interaction between class, form, and meaning (given gender) is also significant. Our study is important for two reasons: First, we introduce a new method that provides additional quantitative support for a classic linguistic finding that form and meaning are relevant for the classification of nouns into declensions. Secondly, we show not only that individual declensions classes vary in the strength of their clues within a language, but also that these variations themselves vary across languages.", "keyphrases": ["declension class", "noun", "strength", "gender", "czech"]} +{"id": "bauer-koller-2010-sentence", "title": "Sentence Generation as Planning with Probabilistic LTAG", "abstract": "We present PCRISP, a sentence generation system for probabilistic TAG grammars which performs sentence planning and surface realization in an integrated fashion, in the style of the SPUD system. PCRISP operates by converting the generation problem into a metric planning problem and solving it using an offthe-shelf planner. We evaluate PCRISP on the WSJ corpus and identify trade-offs between coverage, efficiency, and accuracy.", "keyphrases": ["planning", "pcrisp", "sentence generation"]} +{"id": "forascu-2008-gmt", "title": "GMT to +2 or how can TimeML be used in Romanian", "abstract": "The paper describes the construction and usage of the Romanian version of the TimeBank corpus. The success rate of 96.53% for the automatic import of the temporal annotation from English to Romanian shows that the automatic transfer is a worth doing enterprise if temporality is to be studied in another language than the one for which TimeML, the annotation standard used, was developed. A preliminary study identifies the main situations that occurred during the automatic transfer, as well as temporal elements not (yet) marked in the English corpus.", "keyphrases": ["romanian", "temporal annotation", "preliminary study"]} +{"id": "dou-etal-2021-gsum", "title": "GSum: A General Framework for Guided Neural Abstractive Summarization", "abstract": "Neural abstractive summarization models are flexible and can produce coherent summaries, but they are sometimes unfaithful and can be difficult to control. While previous studies attempt to provide different types of guidance to control the output and increase faithfulness, it is not clear how these strategies compare and contrast to each other. In this paper, we propose a general and extensible guided summarization framework (GSum) that can effectively take different kinds of external guidance as input, and we perform experiments across several different varieties. Experiments demonstrate that this model is effective, achieving state-of-the-art performance according to ROUGE on 4 popular summarization datasets when using highlighted sentences as guidance. In addition, we show that our guided model can generate more faithful summaries and demonstrate how different types of guidance generate qualitatively different summaries, lending a degree of controllability to the learned models.", "keyphrases": ["summarization", "highlighted sentence", "gsum"]} +{"id": "sanguinetti-bosco-2011-building", "title": "Building the multilingual TUT parallel treebank", "abstract": "The paper introduces an ongoing project for the development of a parallel treebank for Italian, English and French annotated in the pure dependency format of the Turin University Treebank, i.e. Parallel\u2010TUT. We hypothesize that the major features of this annotation format can be of some help in addressing the typical issues related to parallel corpora, e.g. alignment at various levels. Therefore, benefitting from the tools previously used for TUT, we applied the TUT format to a multilingual sample set of sentences from the JRCAcquis Multilingual Parallel Corpus and the whole text of the Universal Declaration of Human Rights.", "keyphrases": ["tut", "parallel treebank", "ongoing project", "pure dependency format"]} +{"id": "clark-curran-2007-formalism", "title": "Formalism-Independent Parser Evaluation with CCG and DepBank", "abstract": "A key question facing the parsing community is how to compare parsers which use different grammar formalisms and produce different output. Evaluating a parser on the same resource used to create it can lead to non-comparable accuracy scores and an over-optimistic view of parser performance. In this paper we evaluate a CCG parser on DepBank, and demonstrate the difficulties in converting the parser output into DepBank grammatical relations. In addition we present a method for measuring the effectiveness of the conversion, which provides an upper bound on parsing accuracy. The CCG parser obtains an F-score of 81.9% on labelled dependencies, against an upper bound of 84.8%. We compare the CCG parser against the RASP parser, outperforming RASP by over 5% overall and on the majority of dependency types.", "keyphrases": ["depbank", "ccg parser", "lexical category"]} +{"id": "faruqui-kumar-2015-multilingual", "title": "Multilingual Open Relation Extraction Using Cross-lingual Projection", "abstract": "Open domain relation extraction systems identify relation and argument phrases in a sentence without relying on any underlying schema. However, current state-of-the-art relation extraction systems are available only for English because of their heavy reliance on linguistic tools such as part-of-speech taggers and dependency parsers. We present a cross-lingual annotation projection method for language independent relation extraction. We evaluate our method on a manually annotated test set and present results on three typologically different languages. We release these manual annotations and extracted relations in ten languages from Wikipedia.", "keyphrases": ["cross-lingual projection", "relation extraction system", "wikipedia", "source language"]} +{"id": "li-etal-2019-improving", "title": "Improving Relation Extraction with Knowledge-attention", "abstract": "While attention mechanisms have been proven to be effective in many NLP tasks, majority of them are data-driven. We propose a novel knowledge-attention encoder which incorporates prior knowledge from external lexical resources into deep neural networks for relation extraction task. Furthermore, we present three effective ways of integrating knowledge-attention with self-attention to maximize the utilization of both knowledge and data. The proposed relation extraction system is end-to-end and fully attention-based. Experiment results show that the proposed knowledge-attention mechanism has complementary strengths with self-attention, and our integrated models outperform existing CNN, RNN, and self-attention based models. State-of-the-art performance is achieved on TACRED, a complex and large-scale relation extraction dataset.", "keyphrases": ["relation extraction", "knowledge-attention", "knowledge basis"]} +{"id": "bollmann-etal-2017-learning", "title": "Learning attention for historical text normalization by learning to pronounce", "abstract": "Automated processing of historical texts often relies on pre-normalization to modern word forms. Training encoder-decoder architectures to solve such problems typically requires a lot of training data, which is not available for the named task. We address this problem by using several novel encoder-decoder architectures, including a multi-task learning (MTL) architecture using a grapheme-to-phoneme dictionary as auxiliary data, pushing the state-of-the-art by an absolute 2% increase in performance. We analyze the induced models across 44 different texts from Early New High German. Interestingly, we observe that, as previously conjectured, multi-task learning can learn to focus attention during decoding, in ways remarkably similar to recently proposed attention mechanisms. This, we believe, is an important step toward understanding how MTL works.", "keyphrases": ["historical text normalization", "grapheme-to-phoneme dictionary", "auxiliary task"]} +{"id": "poesio-etal-2004-centering", "title": "Centering: A Parametric Theory and Its Instantiations", "abstract": "Centering theory is the best-known framework for theorizing about local coherence and salience; however, its claims are articulated in terms of notions which are only partially specified, such as utterance, realization, or ranking. A great deal of research has attempted to arrive at more detailed specifications of these parameters of the theory; as a result, the claims of centering can be instantiated in many different ways. We investigated in a systematic fashion the effect on the theory's claims of these different ways of setting the parameters. Doing this required, first of all, clarifying what the theory's claims are (one of our conclusions being that what has become known as Constraint 1 is actually a central claim of the theory). Secondly, we had to clearly identify these parametric aspects: For example, we argue that the notion of pronoun used in Rule 1 should be considered a parameter. Thirdly, we had to find appropriate methods for evaluating these claims. We found that while the theory's main claim about salience and pronominalization, Rule 1a preference for pronominalizing the backward-looking center (CB)is verified with most instantiations, Constraint 1a claim about (entity) coherence and CB uniquenessis much more instantiation-dependent: It is not verified if the parameters are instantiated according to very mainstream views (vanilla instantiation), it holds only if indirect realization is allowed, and is violated by between 20 and 25 of utterances in our corpus even with the most favorable instantiations. We also found a trade-off between Rule 1, on the one hand, and Constraint 1 and Rule 2, on the other: Setting the parameters to minimize the violations of local coherence leads to increased violations of salience, and vice versa. Our results suggest that entity coherencecontinuous reference to the same entitiesmust be supplemented at least by an account of relational coherence.", "keyphrases": ["instantiation", "centering theory", "salience"]} +{"id": "cetinoglu-kuhn-2013-towards", "title": "Towards Joint Morphological Analysis and Dependency Parsing of Turkish", "abstract": "Turkish is an agglutinative language with rich morphology-syntax interactions. As an extension of this property, the Turkish Treebank is designed to represent sublexical dependencies, which brings extra challenges to parsing raw text. In this work, we use a joint POS tagging and parsing approach to parse Turkish raw text, and we show it outperforms a pipeline approach. Then we experiment with incorporating morphological feature prediction into the joint system. Our results show statistically significant improvements with the joint systems and achieve the state-ofthe-art accuracy for Turkish dependency parsing.", "keyphrases": ["dependency parsing", "agglutinative language", "joint pos tagging"]} +{"id": "cahill-etal-2013-robust", "title": "Robust Systems for Preposition Error Correction Using Wikipedia Revisions", "abstract": "We show that existing methods for training preposition error correction systems, whether using well-edited text or error-annotated corpora, do not generalize across very different test sets. We present a new, large errorannotated corpus and use it to train systems that generalize across three different test sets, each from a different domain and with different error characteristics. This new corpus is automatically extracted from Wikipedia revisions and contains over one million instances of preposition corrections.", "keyphrases": ["preposition error", "wikipedia revision", "error-annotated corpus"]} +{"id": "hershcovich-etal-2018-multitask", "title": "Multitask Parsing Across Semantic Representations", "abstract": "The ability to consolidate information of different types is at the core of intelligence, and has tremendous practical value in allowing learning for one task to benefit from generalizations learned for others. In this paper we tackle the challenging task of improving semantic parsing performance, taking UCCA parsing as a test case, and AMR, SDP and Universal Dependencies (UD) parsing as auxiliary tasks. We experiment on three languages, using a uniform transition-based system and learning architecture for all parsing tasks. Despite notable conceptual, formal and domain differences, we show that multitask learning significantly improves UCCA parsing in both in-domain and out-of-domain settings.", "keyphrases": ["ucca", "amr", "sdp", "formalism", "semantic graphbank"]} +{"id": "hoang-kan-2010-towards", "title": "Towards Automated Related Work Summarization", "abstract": "We introduce the novel problem of automatic related work summarization. Given multiple articles (e.g., conference/journal papers) as input, a related work summarization system creates a topic-biased summary of related work specific to the target paper. Our prototype Related Work Summarization system, ReWoS, takes in set of keywords arranged in a hierarchical fashion that describes a target paper's topics, to drive the creation of an extractive summary using two different strategies for locating appropriate sentences for general topics as well as detailed ones. Our initial results show an improvement over generic multi-document summarization baselines in a human evaluation.", "keyphrases": ["work summarization system", "work section", "work generation"]} +{"id": "dobrovoljc-nivre-2016-universal", "title": "The Universal Dependencies Treebank of Spoken Slovenian", "abstract": "This paper presents the construction of an open-source dependency treebank of spoken Slovenian, the first syntactically annotated collection of spontaneous speech in Slovenian. The treebank has been manually annotated using the Universal Dependencies annotation scheme, a one-layer syntactic annotation scheme with a high degree of cross-modality, cross-framework and cross-language interoperability. In this original application of the scheme to spoken language transcripts, we address a wide spectrum of syntactic particularities in speech, either by extending the scope of application of existing universal labels or by proposing new speech-specific extensions. The initial analysis of the resulting treebank and its comparison with the written Slovenian UD treebank confirms significant syntactic differences between the two language modalities, with spoken data consisting of shorter and more elliptic sentences, less and simpler nominal phrases, and more relations marking disfluencies, interaction, deixis and modality.", "keyphrases": ["dependency treebank", "spoken slovenian", "disfluency"]} +{"id": "meng-etal-2020-text", "title": "Text Classification Using Label Names Only: A Language Model Self-Training Approach", "abstract": "Current text classification methods typically require a good number of human-labeled documents as training data, which can be costly and difficult to obtain in real applications. Humans can perform classification without seeing any labeled examples but only based on a small set of words describing the categories to be classified. In this paper, we explore the potential of only using the label name of each class to train classification models on unlabeled data, without using any labeled documents. We use pre-trained neural language models both as general linguistic knowledge sources for category understanding and as representation learning models for document classification. Our method (1) associates semantically related words with the label names, (2) finds category-indicative words and trains the model to predict their implied categories, and (3) generalizes the model via self-training. We show that our model achieves around 90% accuracy on four benchmark datasets including topic and sentiment classification without using any labeled documents but learning from unlabeled data supervised by at most 3 words (1 in most cases) per class as the label name.", "keyphrases": ["self-training", "unlabeled data", "text classification", "lotclass"]} +{"id": "ando-zhang-2005-high", "title": "A High-Performance Semi-Supervised Learning Method for Text Chunking", "abstract": "In machine learning, whether one can build a more accurate classifier by using unlabeled data (semi-supervised learning) is an important issue. Although a number of semi-supervised methods have been proposed, their effectiveness on NLP tasks is not always clear. This paper presents a novel semi-supervised method that employs a learning paradigm which we call structural learning. The idea is to find \"what good classifiers are like\" by learning from thousands of automatically generated auxiliary classification problems on unlabeled data. By doing so, the common predictive structure shared by the multiple classification problems can be discovered, which can then be used to improve performance on the target problem. The method produces performance higher than the previous best results on CoNLL'00 syntactic chunking and CoNLL'03 named entity chunking (English and German).", "keyphrases": ["learning method", "chunking", "unlabeled data"]} +{"id": "pouliquen-etal-2011-tapta", "title": "Tapta: A user-driven translation system for patent documents based on domain-aware Statistical Machine Translation", "abstract": "This paper presents a study conducted in the course of implementing a project in the World Intellectual Property Organization (WIPO) on assisted translation of patent abstracts and titles from English to French. The tool (called \u2018Tapta\u2019) is trained on an extensive corpus of manually translated patents. These patents are classified, each class belonging to one of the 32 predefined domains. The trained Statistical Machine Translation (SMT) tool uses this additional information to propose more accurate translations according to the context. The performance of the SMT system was shown to be above the current state of the art, but, in order to produce an acceptable translation, a human has to supervise the process. Therefore, a graphical user interface was built in which the translator drives the automatic translation process. A significant experiment with human operators was conducted within WIPO, the output was judged to be successful and a project to use Tapta in production is now under discussion.", "keyphrases": ["patent", "statistical machine translation", "input text"]} +{"id": "lakretz-etal-2019-emergence", "title": "The emergence of number and syntax units in LSTM language models", "abstract": "Recent work has shown that LSTMs trained on a generic language modeling objective capture syntax-sensitive generalizations such as long-distance number agreement. We have however no mechanistic understanding of how they accomplish this remarkable feat. Some have conjectured it depends on heuristics that do not truly take hierarchical structure into account. We present here a detailed study of the inner mechanics of number tracking in LSTMs at the single neuron level. We discover that long-distance number information is largely managed by two \u201cnumber units\u201d. Importantly, the behaviour of these units is partially controlled by other units independently shown to track syntactic structure. We conclude that LSTMs are, to some extent, implementing genuinely syntactic processing mechanisms, paving the way to a more general understanding of grammatical encoding in LSTMs.", "keyphrases": ["lstm language model", "syntactic structure", "individual neuron"]} +{"id": "suzuki-etal-2009-empirical", "title": "An Empirical Study of Semi-supervised Structured Conditional Models for Dependency Parsing", "abstract": "This paper describes an empirical study of high-performance dependency parsers based on a semi-supervised learning approach. We describe an extension of semi-supervised structured conditional models (SS-SCMs) to the dependency parsing problem, whose framework is originally proposed in (Suzuki and Isozaki, 2008). Moreover, we introduce two extensions related to dependency parsing: The first extension is to combine SS-SCMs with another semi-supervised approach, described in (Koo et al., 2008). The second extension is to apply the approach to second-order parsing models, such as those described in (Carreras, 2007), using a two-stage semi-supervised learning approach. We demonstrate the effectiveness of our proposed methods on dependency parsing experiments using two widely used test collections: the Penn Treebank for English, and the Prague Dependency Tree-bank for Czech. Our best results on test data in the above datasets achieve 93.79% parent-prediction accuracy for English, and 88.05% for Czech.", "keyphrases": ["empirical study", "dependency parsing", "semi-supervised learning approach"]} +{"id": "heilman-smith-2010-tree", "title": "Tree Edit Models for Recognizing Textual Entailments, Paraphrases, and Answers to Questions", "abstract": "We describe tree edit models for representing sequences of tree transformations involving complex reordering phenomena and demonstrate that they offer a simple, intuitive, and effective method for modeling pairs of semantically related sentences. To efficiently extract sequences of edits, we employ a tree kernel as a heuristic in a greedy search routine. We describe a logistic regression model that uses 33 syntactic features of edit sequences to classify the sentence pairs. The approach leads to competitive performance in recognizing textual entailment, paraphrase identification, and answer selection for question answering.", "keyphrases": ["textual entailment", "tree transformation", "edit sequence", "ted", "pre-selected sentence"]} +{"id": "nie-etal-2019-revealing", "title": "Revealing the Importance of Semantic Retrieval for Machine Reading at Scale", "abstract": "Machine Reading at Scale (MRS) is a challenging task in which a system is given an input query and is asked to produce a precise output by \u201creading\u201d information from a large knowledge base. The task has gained popularity with its natural combination of information retrieval (IR) and machine comprehension (MC). Advancements in representation learning have led to separated progress in both IR and MC; however, very few studies have examined the relationship and combined design of retrieval and comprehension at different levels of granularity, for development of MRS systems. In this work, we give general guidelines on system design for MRS by proposing a simple yet effective pipeline system with special consideration on hierarchical semantic retrieval at both paragraph and sentence level, and their potential effects on the downstream task. The system is evaluated on both fact verification and open-domain multihop QA, achieving state-of-the-art results on the leaderboard test sets of both FEVER and HOTPOTQA. To further demonstrate the importance of semantic retrieval, we present ablation and analysis studies to quantify the contribution of neural retrieval modules at both paragraph-level and sentence-level, and illustrate that intermediate semantic retrieval modules are vital for not only effectively filtering upstream information and thus saving downstream computation, but also for shaping upstream data distribution and providing better data for downstream modeling.", "keyphrases": ["semantic retrieval", "scale", "fact verification", "complex question", "hyperlink"]} +{"id": "shen-etal-2019-multi", "title": "Multi-Task Learning for Conversational Question Answering over a Large-Scale Knowledge Base", "abstract": "We consider the problem of conversational question answering over a large-scale knowledge base. To handle huge entity vocabulary of a large-scale knowledge base, recent neural semantic parsing based approaches usually decompose the task into several subtasks and then solve them sequentially, which leads to following issues: 1) errors in earlier subtasks will be propagated and negatively affect downstream ones; and 2) each subtask cannot naturally share supervision signals with others. To tackle these issues, we propose an innovative multi-task learning framework where a pointer-equipped semantic parsing model is designed to resolve coreference in conversations, and naturally empower joint learning with a novel type-aware entity detection model. The proposed framework thus enables shared supervisions and alleviates the effect of error propagation. Experiments on a large-scale conversational question answering dataset containing 1.6M question answering pairs over 12.8M entities show that the proposed framework improves overall F1 score from 67% to 79% compared with previous state-of-the-art work.", "keyphrases": ["conversational question", "large-scale knowledge base", "semantic parsing", "joint learning", "mention"]} +{"id": "specia-gimenez-2010-combining", "title": "Combining Confidence Estimation and Reference-based Metrics for Segment-level MT Evaluation", "abstract": "We describe an effort to improve standard reference-based metrics for Machine Translation (MT) evaluation by enriching them with Confidence Estimation (CE) features and using a learning mechanism trained on human annotations. Reference-based MT evaluation metrics compare the system output against reference translations looking for overlaps at different levels (lexical, syntactic, and semantic). These metrics aim at comparing MT systems or analyzing the progress of a given system and are known to have reasonably good correlation with human judgments at the corpus level, but not at the segment level. CE metrics, on the other hand, target the system in use, providing a quality score to the end-user for each translated segment. They cannot rely on reference translations, and use instead information extracted from the input text, system output and possibly external corpora to train machine learning algorithms. These metrics correlate better with human judgments at the segment level. However, they are usually highly biased by difficulty level of the input segment, and therefore are less appropriate for comparing multiple systems translating the same input segments. We show that these two classes of metrics are complementary and can be combined to provide MT evaluation metrics that achieve higher correlation with human judgments at the segment level.", "keyphrases": ["confidence estimation", "reference-based metric", "human annotation"]} +{"id": "zeman-etal-2012-hamledt", "title": "HamleDT: To Parse or Not to Parse?", "abstract": "We propose HamleDT \u2015 HArmonized Multi-LanguagE Dependency Treebank. HamleDT is a compilation of existing dependency treebanks (or dependency conversions of other treebanks), transformed so that they all conform to the same annotation style. While the license terms prevent us from directly redistributing the corpora, most of them are easily acquirable for research purposes. What we provide instead is the software that normalizes tree structures in the data obtained by the user from their original providers.", "keyphrases": ["treebank", "hamledt", "clause", "verb group"]} +{"id": "ma-etal-2020-simuleval", "title": "SIMULEVAL: An Evaluation Toolkit for Simultaneous Translation", "abstract": "Simultaneous translation on both text and speech focuses on a real-time and low-latency scenario where the model starts translating before reading the complete source input. Evaluating simultaneous translation models is more complex than offline models because the latency is another factor to consider in addition to translation quality. The research community, despite its growing focus on novel modeling approaches to simultaneous translation, currently lacks a universal evaluation procedure. Therefore, we present SimulEval, an easy-to-use and general evaluation toolkit for both simultaneous text and speech translation. A server-client scheme is introduced to create a simultaneous translation scenario, where the server sends source input and receives predictions for evaluation and the client executes customized policies. Given a policy, it automatically performs simultaneous decoding and collectively reports several popular latency metrics. We also adapt latency metrics from text simultaneous translation to the speech task. Additionally, SimulEval is equipped with a visualization interface to provide better understanding of the simultaneous decoding process of a system. SimulEval has already been extensively used for the IWSLT 2020 shared task on simultaneous speech translation. Code will be released upon publication.", "keyphrases": ["evaluation toolkit", "latency", "speech translation", "simuleval"]} +{"id": "popovic-ney-2007-word", "title": "Word Error Rates: Decomposition over POS classes and Applications for Error Analysis", "abstract": "Evaluation and error analysis of machine translation output are important but difficult tasks. In this work, we propose a novel method for obtaining more details about actual translation errors in the generated output by introducing the decomposition of Word Error Rate (Wer) and Position independent word Error Rate (Per) over different Part-of-Speech (Pos) classes. Furthermore, we investigate two possible aspects of the use of these decompositions for automatic error analysis: estimation of inflectional errors and distribution of missing words over Pos classes. The obtained results are shown to correspond to the results of a human error analysis. The results obtained on the European Parliament Plenary Session corpus in Spanish and English give a better overview of the nature of translation errors as well as ideas of where to put efforts for possible improvements of the translation system.", "keyphrases": ["decomposition", "pos class", "error analysis", "different part-of-speech", "content word"]} +{"id": "blinov-etal-2019-large", "title": "Large Dataset and Language Model Fun-Tuning for Humor Recognition", "abstract": "The task of humor recognition has attracted a lot of attention recently due to the urge to process large amounts of user-generated texts and rise of conversational agents. We collected a dataset of jokes and funny dialogues in Russian from various online resources and complemented them carefully with unfunny texts with similar lexical properties. The dataset comprises of more than 300,000 short texts, which is significantly larger than any previous humor-related corpus. Manual annotation of 2,000 items proved the reliability of the corpus construction approach. Further, we applied language model fine-tuning for text classification and obtained an F1 score of 0.91 on a test set, which constitutes a considerable gain over baseline methods. The dataset is freely available for research community.", "keyphrases": ["humor recognition", "funny dialogue", "russian", "various online resource"]} +{"id": "moschitti-etal-2008-tree", "title": "Tree Kernels for Semantic Role Labeling", "abstract": "The availability of large scale data sets of manually annotated predicate-argument structures has recently favored the use of machine learning approaches to the design of automated semantic role labeling (SRL) systems. The main research in this area relates to the design choices for feature representation and for effective decompositions of the task in different learning models. Regarding the former choice, structural properties of full syntactic parses are largely employed as they represent ways to encode different principles suggested by the linking theory between syntax and semantics. The latter choice relates to several learning schemes over global views of the parses. For example, re-ranking stages operating over alternative predicate-argument sequences of the same sentence have shown to be very effective. In this article, we propose several kernel functions to model parse tree properties in kernel-based machines, for example, perceptrons or support vector machines. In particular, we define different kinds of tree kernels as general approaches to feature engineering in SRL. Moreover, we extensively experiment with such kernels to investigate their contribution to individual stages of an SRL architecture both in isolation and in combination with other traditional manually coded features. The results for boundary recognition, classification, and re-ranking stages provide systematic evidence about the significant impact of tree kernels on the overall accuracy, especially when the amount of training data is small. As a conclusive result, tree kernels allow for a general and easily portable feature engineering method which is applicable to a large family of natural language processing tasks.", "keyphrases": ["semantic role labeling", "srl", "different kind", "tree kernels"]} +{"id": "bisazza-federico-2010-chunk", "title": "Chunk-Based Verb Reordering in VSO Sentences for Arabic-English Statistical Machine Translation", "abstract": "In Arabic-to-English phrase-based statistical machine translation, a large number of syntactic disfluencies are due to wrong long-range reordering of the verb in VSO sentences, where the verb is anticipated with respect to the English word order. In this paper, we propose a chunk-based reordering technique to automatically detect and displace clause-initial verbs in the Arabic side of a word-aligned parallel corpus. This method is applied to preprocess the training data, and to collect statistics about verb movements. From this analysis, specific verb reordering lattices are then built on the test sentences before decoding them. The application of our reordering methods on the training and test sets results in consistent BLEU score improvements on the NIST-MT 2009 Arabic-English benchmark.", "keyphrases": ["machine translation", "chunk-based reordering technique", "clause-initial verb"]} +{"id": "lu-roth-2015-joint", "title": "Joint Mention Extraction and Classification with Mention Hypergraphs", "abstract": "We present a novel model for the task of joint mention extraction and classification. Unlike existing approaches, our model is able to effectively capture overlapping mentions with unbounded lengths. The model is highly scalable, with a time complexity that is linear in the number of words in the input sentence and linear in the number of possible mention classes. Our model can be extended to additionally capture mention heads explicitly in a joint manner under the same time complexity. We demonstrate the effectiveness of our model through extensive experiments on standard datasets.", "keyphrases": ["hypergraph", "unbounded length", "joint mention extraction", "multiple node", "hand-crafted feature"]} +{"id": "roy-etal-2020-lareqa", "title": "LAReQA: Language-Agnostic Answer Retrieval from a Multilingual Pool", "abstract": "We present LAReQA, a challenging new benchmark for language-agnostic answer retrieval from a multilingual candidate pool. Unlike previous cross-lingual tasks, LAReQA tests for \u201cstrong\u201d cross-lingual alignment, requiring semantically related cross-language pairs to be closer in representation space than unrelated same-language pairs. This level of alignment is important for the practical task of cross-lingual information retrieval. Building on multilingual BERT (mBERT), we study different strategies for achieving strong alignment. We find that augmenting training data via machine translation is effective, and improves significantly over using mBERT out-of-the-box. Interestingly, model performance on zero-shot variants of our task that only target \u201cweak\u201d alignment is not predictive of performance on LAReQA. This finding underscores our claim that language-agnostic retrieval is a substantively new kind of cross-lingual evaluation, and suggests that measuring both weak and strong alignment will be important for improving cross-lingual systems going forward. We release our dataset and evaluation code at .", "keyphrases": ["language-agnostic answer retrieval", "candidate pool", "same-language pair", "lareqa"]} +{"id": "wang-etal-2008-semi", "title": "Semi-Supervised Convex Training for Dependency Parsing", "abstract": "We present a novel semi-supervised training algorithm for learning dependency parsers. By combining a supervised large margin loss with an unsupervised least squares loss, a discriminative, convex, semi-supervised learning algorithm can be obtained that is applicable to large-scale problems. To demonstrate the benefits of this approach, we apply the technique to learning dependency parsers from combined labeled and unlabeled corpora. Using a stochastic gradient descent algorithm, a parsing model can be efficiently learned from semi-supervised data that significantly outperforms corresponding supervised methods.", "keyphrases": ["dependency parsing", "least square loss", "learning algorithm", "semi-supervised approach"]} +{"id": "schwenk-etal-2012-large", "title": "Large, Pruned or Continuous Space Language Models on a GPU for Statistical Machine Translation", "abstract": "Language models play an important role in large vocabulary speech recognition and statistical machine translation systems. The dominant approach since several decades are back-off language models. Some years ago, there was a clear tendency to build huge language models trained on hundreds of billions of words. Lately, this tendency has changed and recent works concentrate on data selection. Continuous space methods are a very competitive approach, but they have a high computational complexity and are not yet in widespread use. This paper presents an experimental comparison of all these approaches on a large statistical machine translation task. We also describe an open-source implementation to train and use continuous space language models (CSLM) for such large tasks. We describe an efficient implementation of the CSLM using graphical processing units from Nvidia. By these means, we are able to train an CSLM on more than 500 million words in 20 hours. This CSLM provides an improvement of up to 1.8 BLEU points with respect to the best back-off language model that we were able to build.", "keyphrases": ["statistical machine translation", "implementation", "network-based language"]} +{"id": "bannard-etal-2003-statistical", "title": "A Statistical Approach to the Semantics of Verb-Particles", "abstract": "This paper describes a distributional approach to the semantics of verb-particle constructions (e.g. put up, make off). We report first on a framework for implementing and evaluating such models. We then go on to report on the implementation of some techniques for using statistical models acquired from corpus data to infer the meaning of verb-particle constructions.", "keyphrases": ["verb-particle construction", "statistical model", "compositionality", "vpc"]} +{"id": "chiticariu-etal-2010-domain", "title": "Domain Adaptation of Rule-Based Annotators for Named-Entity Recognition Tasks", "abstract": "Named-entity recognition (NER) is an important task required in a wide variety of applications. While rule-based systems are appealing due to their well-known \"explainability,\" most, if not all, state-of-the-art results for NER tasks are based on machine learning techniques. Motivated by these results, we explore the following natural question in this paper: Are rule-based systems still a viable approach to named-entity recognition? Specifically, we have designed and implemented a high-level language NERL on top of SystemT, a general-purpose algebraic information extraction system. NERL is tuned to the needs of NER tasks and simplifies the process of building, understanding, and customizing complex rule-based named-entity annotators. We show that these customized annotators match or outperform the best published results achieved with machine learning techniques. These results confirm that we can reap the benefits of rule-based extractors' explainability without sacrificing accuracy. We conclude by discussing lessons learned while building and customizing complex rule-based annotators and outlining several research directions towards facilitating rule development.", "keyphrases": ["domain adaptation", "entity recognition", "rule-based ner system"]} +{"id": "joty-mohiuddin-2018-modeling", "title": "Modeling Speech Acts in Asynchronous Conversations: A Neural-CRF Approach", "abstract": "Participants in an asynchronous conversation (e.g., forum, e-mail) interact with each other at different times, performing certain communicative acts, called speech acts (e.g., question, request). In this article, we propose a hybrid approach to speech act recognition in asynchronous conversations. Our approach works in two main steps: a long short-term memory recurrent neural network (LSTM-RNN) first encodes each sentence separately into a task-specific distributed representation, and this is then used in a conditional random field (CRF) model to capture the conversational dependencies between sentences. The LSTM-RNN model uses pretrained word embeddings learned from a large conversational corpus and is trained to classify sentences into speech act types. The CRF model can consider arbitrary graph structures to model conversational dependencies in an asynchronous conversation. In addition, to mitigate the problem of limited annotated data in the asynchronous domains, we adapt the LSTM-RNN model to learn from synchronous conversations (e.g., meetings), using domain adversarial training of neural networks. Empirical evaluation shows the effectiveness of our approach over existing ones: (i) LSTM-RNNs provide better task-specific representations, (ii) conversational word embeddings benefit the LSTM-RNNs more than the off-the-shelf ones, (iii) adversarial training gives better domain-invariant representations, and (iv) the global CRF model improves over local models.", "keyphrases": ["conversation", "speech act recognition", "word embedding"]} +{"id": "punyakanok-etal-2004-semantic", "title": "Semantic Role Labeling Via Integer Linear Programming Inference", "abstract": "We present a system for the semantic role labeling task. The system combines a machine learning technique with an inference procedure based on integer linear programming that supports the incorporation of linguistic and structural constraints into the decision process. The system is tested on the data provided in CoNLL-2004 shared task on semantic role labeling and achieves very competitive results.", "keyphrases": ["integer linear programming", "ilp", "semantic role labeling", "srl"]} +{"id": "baker-etal-2010-modality", "title": "A Modality Lexicon and its use in Automatic Tagging", "abstract": "This paper describes our resource-building results for an eight-week JHU Human Language Technology Center of Excellence Summer Camp for Applied Language Exploration (SCALE-2009) on Semantically-Informed Machine Translation. Specifically, we describe the construction of a modality annotation scheme, a modality lexicon, and two automated modality taggers that were built using the lexicon and annotation scheme. Our annotation scheme is based on identifying three components of modality: a trigger, a target and a holder. We describe how our modality lexicon was produced semi-automatically, expanding from an initial hand-selected list of modality trigger words and phrases. The resulting expanded modality lexicon is being made publicly available. We demonstrate that one tagger\u2015a structure-based tagger\u2015results in precision around 86% (depending on genre) for tagging of a standard LDC data set. In a machine translation application, using the structure-based tagger to annotate English modalities on an English-Urdu training corpus improved the translation quality score for Urdu by 0.3 Bleu points in the face of sparse training data.", "keyphrases": ["modality lexicon", "machine translation", "annotation scheme", "tagger", "requirement"]} +{"id": "velardi-etal-2013-ontolearn", "title": "OntoLearn Reloaded: A Graph-Based Algorithm for Taxonomy Induction", "abstract": "In 2004 we published in this journal an article describing OntoLearn, one of the first systems to automatically induce a taxonomy from documents and Web sites. Since then, OntoLearn has continued to be an active area of research in our group and has become a reference work within the community. In this paper we describe our next-generation taxonomy learning methodology, which we name OntoLearn Reloaded. Unlike many taxonomy learning approaches in the literature, our novel algorithm learns both concepts and relations entirely from scratch via the automated extraction of terms, definitions, and hypernyms. This results in a very dense, cyclic and potentially disconnected hypernym graph. The algorithm then induces a taxonomy from this graph via optimal branching and a novel weighting policy. Our experiments show that we obtain high-quality results, both when building brand-new taxonomies and when reconstructing sub-hierarchies of existing taxonomies.", "keyphrases": ["graph-based algorithm", "taxonomy", "hypernym graph", "ontolearn reloaded"]} +{"id": "wang-manning-2010-probabilistic", "title": "Probabilistic Tree-Edit Models with Structured Latent Variables for Textual Entailment and Question Answering", "abstract": "A range of Natural Language Processing tasks involve making judgments about the semantic relatedness of a pair of sentences, such as Recognizing Textual Entailment (RTE) and answer selection for Question Answering (QA). A key challenge that these tasks face in common is the lack of explicit alignment annotation between a sentence pair. We capture the alignment by using a novel probabilistic model that models tree-edit operations on dependency parse trees. Unlike previous tree-edit models which require a separate alignment-finding phase and resort to ad-hoc distance metrics, our method treats alignments as structured latent variables, and offers a principled framework for incorporating complex linguistic features. We demonstrate the robustness of our model by conducting experiments for RTE and QA, and show that our model performs competitively on both tasks with the same set of general features.", "keyphrases": ["textual entailment", "question answering", "tree-edit operation", "dependency parse tree", "passage"]} +{"id": "poesio-etal-2008-anawiki", "title": "ANAWIKI: Creating Anaphorically Annotated Resources through Web Cooperation", "abstract": "The ability to make progress in Computational Linguistics depends on the availability of large annotated corpora, but creating such corpora by hand annotation is very expensive and time consuming; in practice, it is unfeasible to think of annotating more than one million words. However, the success of Wikipedia and other projects shows that another approach might be possible: take advantage of the willingness of Web users to contribute to collaborative resource creation. AnaWiki is a recently started project that will develop tools to allow and encourage large numbers of volunteers over the Web to collaborate in the creation of semantically annotated corpora (in the first instance, of a corpus annotated with information about anaphora).", "keyphrases": ["creation", "anawiki", "web community"]} +{"id": "iyer-etal-2017-learning", "title": "Learning a Neural Semantic Parser from User Feedback", "abstract": "We present an approach to rapidly and easily build natural language interfaces to databases for new domains, whose performance improves over time based on user feedback, and requires minimal intervention. To achieve this, we adapt neural sequence models to map utterances directly to SQL with its full expressivity, bypassing any intermediate meaning representations. These models are immediately deployed online to solicit feedback from real users to flag incorrect queries. Finally, the popularity of SQL facilitates gathering annotations for incorrect predictions using the crowd, which is directly used to improve our models. This complete feedback loop, without intermediate representations or database specific engineering, opens up new ways of building high quality semantic parsers. Experiments suggest that this approach can be deployed quickly for any new target domain, as we show by learning a semantic parser for an online academic database from scratch.", "keyphrases": ["feedback", "semantic parsing", "text-to-sql", "neural network-based approach", "encoder-decoder model"]} +{"id": "zang-etal-2020-word", "title": "Word-level Textual Adversarial Attacking as Combinatorial Optimization", "abstract": "Adversarial attacks are carried out to reveal the vulnerability of deep neural networks. Textual adversarial attacking is challenging because text is discrete and a small perturbation can bring significant change to the original input. Word-level attacking, which can be regarded as a combinatorial optimization problem, is a well-studied class of textual attack methods. However, existing word-level attack models are far from perfect, largely because unsuitable search space reduction methods and inefficient optimization algorithms are employed. In this paper, we propose a novel attack model, which incorporates the sememe-based word substitution method and particle swarm optimization-based search algorithm to solve the two problems separately. We conduct exhaustive experiments to evaluate our attack model by attacking BiLSTM and BERT on three benchmark datasets. Experimental results demonstrate that our model consistently achieves much higher attack success rates and crafts more high-quality adversarial examples as compared to baseline methods. Also, further experiments show our model has higher transferability and can bring more robustness enhancement to victim models by adversarial training. All the code and data of this paper can be obtained on .", "keyphrases": ["combinatorial optimization problem", "attack model", "adversarial example", "victim model"]} +{"id": "briakou-etal-2021-ola", "title": "Ol\u00e1, Bonjour, Salve! XFORMAL: A Benchmark for Multilingual Formality Style Transfer", "abstract": "We take the first step towards multilingual style transfer by creating and releasing XFORMAL, a benchmark of multiple formal reformulations of informal text in Brazilian Portuguese, French, and Italian. Results on XFORMAL suggest that state-of-the-art style transfer approaches perform close to simple baselines, indicating that style transfer is even more challenging when moving multilingual.", "keyphrases": ["xformal", "multilingual style transfer", "multiple formal reformulation", "informal text", "brazilian portuguese"]} +{"id": "park-caragea-2022-calibration", "title": "On the Calibration of Pre-trained Language Models using Mixup Guided by Area Under the Margin and Saliency", "abstract": "A well-calibrated neural model produces confidence (probability outputs) closely approximated by the expected accuracy. While prior studies have shown that mixup training as a data augmentation technique can improve model calibration on image classification tasks, little is known about using mixup for model calibration on natural language understanding (NLU) tasks. In this paper, we explore mixup for model calibration on several NLU tasks and propose a novel mixup strategy for pre-trained language models that improves model calibration further. Our proposed mixup is guided by both the Area Under the Margin (AUM) statistic (Pleiss et al., 2020) and the saliency map of each sample (Simonyan et al., 2013). Moreover, we combine our mixup strategy with model miscalibration correction techniques (i.e., label smoothing and temperature scaling) and provide detailed analyses of their impact on our proposed mixup. We focus on systematically designing experiments on three NLU tasks: natural language inference, paraphrase detection, and commonsense reasoning. Our method achieves the lowest expected calibration error compared to strong baselines on both in-domain and out-of-domain test samples while maintaining competitive accuracy.", "keyphrases": ["pre-trained language model", "margin", "saliency map"]} +{"id": "kim-etal-2010-chunk", "title": "Chunk-Based EBMT", "abstract": "Corpus driven machine translation approaches such as Phrase-Based Statistical Machine Translation and Example-Based Machine Translation have been successful by using word alignment to find translation fragments for matched source parts in a bilingual training corpus. However, they still cannot properly deal with systematic translation for insertion or deletion words between two distant languages. In this work, we used syntactic chunks as translation units to alleviate this problem, improve alignments and show improvement in BLEU for Korean to English and Chinese to English translation tasks.", "keyphrases": ["ebmt", "chunk", "translation unit"]} +{"id": "ding-etal-2020-daga", "title": "DAGA: Data Augmentation with a Generation Approach for Low-resource Tagging Tasks", "abstract": "Data augmentation techniques have been widely used to improve machine learning performance as they facilitate generalization. In this work, we propose a novel augmentation method to generate high quality synthetic data for low-resource tagging tasks with language models trained on the linearized labeled sentences. Our method is applicable to both supervised and semi-supervised settings. For the supervised settings, we conduct extensive experiments on named entity recognition (NER), part of speech (POS) tagging and end-to-end target based sentiment analysis (E2E-TBSA) tasks. For the semi-supervised settings, we evaluate our method on the NER task under the conditions of given unlabeled data only and unlabeled data plus a knowledge base. The results show that our method can consistently outperform the baselines, particularly when the given gold training data are less.", "keyphrases": ["data augmentation", "language model", "semi-supervised setting"]} +{"id": "zhu-etal-2019-graph", "title": "Graph Neural Networks with Generated Parameters for Relation Extraction", "abstract": "In this paper, we propose a novel graph neural network with generated parameters (GP-GNNs). The parameters in the propagation module, i.e. the transition matrices used in message passing procedure, are produced by a generator taking natural language sentences as inputs. We verify GP-GNNs in relation extraction from text, both on bag- and instance-settings. Experimental results on a human-annotated dataset and two distantly supervised datasets show that multi-hop reasoning mechanism yields significant improvements. We also perform a qualitative analysis to demonstrate that our model could discover more accurate relations by multi-hop relational reasoning.", "keyphrases": ["relation extraction", "reasoning", "gnn", "previous approach"]} +{"id": "cotterell-heigold-2017-cross", "title": "Cross-lingual Character-Level Neural Morphological Tagging", "abstract": "Even for common NLP tasks, sufficient supervision is not available in many languages \u2013 morphological tagging is no exception. In the work presented here, we explore a transfer learning scheme, whereby we train character-level recurrent neural taggers to predict morphological taggings for high-resource languages and low-resource languages together. Learning joint character representations among multiple related languages successfully enables knowledge transfer from the high-resource languages to the low-resource ones.", "keyphrases": ["morphological tagging", "high-resource language", "language family"]} +{"id": "sogaard-2011-data", "title": "Data point selection for cross-language adaptation of dependency parsers", "abstract": "We consider a very simple, yet effective, approach to cross language adaptation of dependency parsers. We first remove lexical items from the treebanks and map part-of-speech tags into a common tagset. We then train a language model on tag sequences in otherwise unlabeled target data and rank labeled source data by perplexity per word of tag sequences from less similar to most similar to the target. We then train our target language parser on the most similar data points in the source labeled data. The strategy achieves much better results than a non-adapted baseline and state-of-the-art unsupervised dependency parsing, and results are comparable to more complex projection-based cross language adaptation algorithms.", "keyphrases": ["dependency parser", "perplexity", "data point selection", "cross-lingual transfer"]} +{"id": "mccallum-li-2003-early", "title": "Early results for Named Entity Recognition with Conditional Random Fields, Feature Induction and Web-Enhanced Lexicons", "abstract": "Models for many natural language tasks benefit from the flexibility to use overlapping, non-independent features. For example, the need for labeled data can be drastically reduced by taking advantage of domain knowledge in the form of word lists, part-of-speech tags, character n-grams, and capitalization patterns. While it is difficult to capture such inter-dependent features with a generative probabilistic model, conditionally-trained models, such as conditional maximum entropy models, handle them well. There has been significant work with such models for greedy sequence modeling in NLP (Ratnaparkhi, 1996; Borthwick et al., 1998).", "keyphrases": ["named entity recognition", "conditional random field", "formal text"]} +{"id": "king-abney-2013-labeling", "title": "Labeling the Languages of Words in Mixed-Language Documents using Weakly Supervised Methods", "abstract": "In this paper we consider the problem of labeling the languages of words in mixed-language documents. This problem is approached in a weakly supervised fashion, as a sequence labeling problem with monolingual text samples for training data. Among the approaches evaluated, a conditional random field model trained with generalized expectation criteria was the most accurate and performed consistently as the amount of training data was varied.", "keyphrases": ["mixed-language document", "semi-supervised method", "language identification", "minority language", "web page"]} +{"id": "muzerelle-etal-2014-ancor", "title": "ANCOR_Centre, a large free spoken French coreference corpus: description of the resource and reliability measures", "abstract": "This article presents ANCOR_Centre, a French coreference corpus, available under the Creative Commons Licence. With a size of around 500,000 words, the corpus is large enough to serve the needs of data-driven approaches in NLP and represents one of the largest coreference resources currently available. The corpus focuses exclusively on spoken language, it aims at representing a certain variety of spoken genders. ANCOR_Centre includes anaphora as well as coreference relations which involve nominal and pronominal mentions. The paper describes into details the annotation scheme and the reliability measures computed on the resource.", "keyphrases": ["french coreference corpus", "reliability measure", "spoken language", "ancor_centre", "substantial dataset"]} +{"id": "kolomiyets-etal-2012-extracting", "title": "Extracting Narrative Timelines as Temporal Dependency Structures", "abstract": "We propose a new approach to characterizing the timeline of a text: temporal dependency structures, where all the events of a narrative are linked via partial ordering relations like BEFORE, AFTER, OVERLAP and IDENTITY. We annotate a corpus of children's stories with temporal dependency trees, achieving agreement (Krippendorff's Alpha) of 0.856 on the event words, 0.822 on the links between events, and of 0.700 on the ordering relation labels. We compare two parsing models for temporal dependency structures, and show that a deterministic non-projective dependency parser outperforms a graph-based maximum spanning tree parser, achieving labeled attachment accuracy of 0.647 and labeled tree edit distance of 0.596. Our analysis of the dependency parser errors gives some insights into future research directions.", "keyphrases": ["timeline", "dependency structure", "story"]} +{"id": "yamada-etal-2020-luke", "title": "LUKE: Deep Contextualized Entity Representations with Entity-aware Self-attention", "abstract": "Entity representations are useful in natural language tasks involving entities. In this paper, we propose new pretrained contextualized representations of words and entities based on the bidirectional transformer. The proposed model treats words and entities in a given text as independent tokens, and outputs contextualized representations of them. Our model is trained using a new pretraining task based on the masked language model of BERT. The task involves predicting randomly masked words and entities in a large entity-annotated corpus retrieved from Wikipedia. We also propose an entity-aware self-attention mechanism that is an extension of the self-attention mechanism of the transformer, and considers the types of tokens (words or entities) when computing attention scores. The proposed model achieves impressive empirical performance on a wide range of entity-related tasks. In particular, it obtains state-of-the-art results on five well-known datasets: Open Entity (entity typing), TACRED (relation classification), CoNLL-2003 (named entity recognition), ReCoRD (cloze-style question answering), and SQuAD 1.1 (extractive question answering). Our source code and pretrained representations are available at .", "keyphrases": ["entity-aware self-attention", "contextualized representation", "wikipedia", "entity information", "pre-trained language model"]} +{"id": "jain-sharma-2016-explicit", "title": "Explicit Argument Identification for Discourse Parsing In Hindi: A Hybrid Pipeline", "abstract": "Shallow discourse parsing enables us to study discourse as a coherent piece of information rather than a sequence of clauses, sentences and paragraphs. In this paper, we identify arguments of explicit discourse relations in Hindi. This is the first such work carried out for Hindi. Building upon previous work carried out on discourse connective identification in Hindi, we propose a hybrid pipeline which makes use of both sub-tree extraction and linear tagging approaches. We report state-ofthe-art performance for this task.", "keyphrases": ["discourse", "sub-tree extraction", "linear tagging approach"]} +{"id": "chrupala-etal-2020-analyzing", "title": "Analyzing analytical methods: The case of phonology in neural models of spoken language", "abstract": "Given the fast development of analysis techniques for NLP and speech processing systems, few systematic studies have been conducted to compare the strengths and weaknesses of each method. As a step in this direction we study the case of representations of phonology in neural network models of spoken language. We use two commonly applied analytical techniques, diagnostic classifiers and representational similarity analysis, to quantify to what extent neural activation patterns encode phonemes and phoneme sequences. We manipulate two factors that can affect the outcome of analysis. First, we investigate the role of learning by comparing neural activations extracted from trained versus randomly-initialized models. Second, we examine the temporal scope of the activations by probing both local activations corresponding to a few milliseconds of the speech signal, and global activations pooled over the whole utterance. We conclude that reporting analysis results with randomly initialized models is crucial, and that global-scope methods tend to yield more consistent and interpretable results and we recommend their use as a complement to local-scope diagnostic methods.", "keyphrases": ["phonology", "representational similarity analysis", "neural activation pattern", "temporal scope"]} +{"id": "ganchev-etal-2009-dependency", "title": "Dependency Grammar Induction via Bitext Projection Constraints", "abstract": "Broad-coverage annotated treebanks necessary to train parsers do not exist for many resource-poor languages. The wide availability of parallel text and accurate parsers in English has opened up the possibility of grammar induction through partial transfer across bitext. We consider generative and discriminative models for dependency grammar induction that use word-level alignments and a source language parser (English) to constrain the space of possible target trees. Unlike previous approaches, our framework does not require full projected parses, allowing partial, approximate transfer through linear expectation constraints on the space of distributions over trees. We consider several types of constraints that range from generic dependency conservation to language-specific annotation rules for auxiliary verb analysis. We evaluate our approach on Bulgarian and Spanish CoNLL shared task data and show that we consistently outperform unsupervised methods and can outperform supervised learning for limited training data.", "keyphrases": ["bitext", "projection", "dependency grammar induction", "parallel corpora", "hand-written rule"]} +{"id": "desai-etal-2020-compressive", "title": "Compressive Summarization with Plausibility and Salience Modeling", "abstract": "Compressive summarization systems typically rely on a seed set of syntactic rules to determine under what circumstances deleting a span is permissible, then learn which compressions to actually apply by optimizing for ROUGE. In this work, we propose to relax these explicit syntactic constraints on candidate spans, and instead leave the decision about what to delete to two data-driven criteria: plausibility and salience. Deleting a span is plausible if removing it maintains the grammaticality and factuality of a sentence, and it is salient if it removes important information from the summary. Each of these is judged by a pre-trained Transformer model, and only deletions that are both plausible and not salient can be applied. When integrated into a simple extraction-compression pipeline, our method achieves strong in-domain results on benchmark datasets, and human evaluation shows that the plausibility model generally selects for grammatical and factual deletions. Furthermore, the flexibility of our approach allows it to generalize cross-domain, and we show that our system fine-tuned on only 500 samples from a new domain can match or exceed a strong in-domain extractive model.", "keyphrases": ["plausibility", "salience", "compressive summarization system"]} +{"id": "zhou-etal-2022-distributed", "title": "Distributed NLI: Learning to Predict Human Opinion Distributions for Language Reasoning", "abstract": "We introduce distributed NLI, a new NLU task with a goal to predict the distribution of human judgements for natural language inference. We show that by applying additional distribution estimation methods, namely, Monte Carlo (MC) Dropout, Deep Ensemble, Re-Calibration, and Distribution Distillation, models can capture human judgement distribution more effectively than the softmax baseline. We show that MC Dropout is able to achieve decent performance without any distribution annotations while Re-Calibration can give further improvements with extra distribution annotations, suggesting the value of multiple annotations for one example in modeling the distribution of human judgements. Despite these improvements, the best results are still far below the estimated human upper-bound, indicating that predicting the distribution of human judgements is still an open, challenging problem with a large room for improvements. We showcase the common errors for MC Dropout and Re-Calibration. Finally, we give guidelines on the usage of these methods with different levels of data availability and encourage future work on modeling the human opinion distribution for language reasoning.", "keyphrases": ["nli", "language reasoning", "judgement", "distribution estimation method"]} +{"id": "sarawgi-etal-2011-gender", "title": "Gender Attribution: Tracing Stylometric Evidence Beyond Topic and Genre", "abstract": "Sociolinguistic theories (e.g., Lakoff (1973)) postulate that women's language styles differ from that of men. In this paper, we explore statistical techniques that can learn to identify the gender of authors in modern English text, such as web blogs and scientific papers. Although recent work has shown the efficacy of statistical approaches to gender attribution, we conjecture that the reported performance might be overly optimistic due to non-stylistic factors such as topic bias in gender that can make the gender detection task easier. Our work is the first that consciously avoids gender bias in topics, thereby providing stronger evidence to gender-specific styles in language beyond topic. In addition, our comparative study provides new insights into robustness of various stylometric techniques across topic and genre.", "keyphrases": ["genre", "topic bias", "gender attribution"]} +{"id": "zhang-etal-2016-generating", "title": "Generating Abbreviations for Chinese Named Entities Using Recurrent Neural Network with Dynamic Dictionary", "abstract": "Chinese named entities occur frequently in formal and informal environments. Various approaches have been formalized the problem as a sequence labelling task and utilize a character-based methodology, in which character is treated as the basic classi\ufb01cation unit. One of the main drawbacks of these methods is that some of the generated abbreviations may not follow the conventional wisdom of Chinese. To address this problem, we pro-pose a novel neural network architecture to perform task. It combines recurrent neural network (RNN) with an architecture determining whether a given sequence of characters can be a word or not. For demonstrating the effectiveness of the proposed method, we evaluate it on Chinese named entity generation and opinion target extraction tasks. Experimental results show that the proposed method can achieve better performance than state-of-the-art methods.", "keyphrases": ["abbreviation", "chinese", "recurrent neural network"]} +{"id": "specia-2011-exploiting", "title": "Exploiting Objective Annotations for Minimising Translation Post-editing Effort", "abstract": "With the noticeable improvement of the overall quality of Machine Translation (MT) systems in recent years, post-editing of MT output is starting to become a common practice among human translators. However, it is well known that the quality of a given MT system can vary sig-ni\ufb01cantly across translation segments and that post-editing bad quality translations is a tedious task that may require more effort than translating texts from scratch. Previous research dedicated to learning quality estimation models to \ufb02ag such segments has shown that models based on human annotation achieve more promising results. However, it is not clear yet what is the most appropriate form of human annotation for building such models. We exper-iment with models based on three annotation types (post-editing time, post-editing distance and post-editing effort scores) and show that estimations resulting from using post-editing time , a simple and objective annotation, can minimise translation post-editing effort in a practical, task-based scenario. We also discuss some perspectives on the effectiveness, reliability and cost of each type of annotation.", "keyphrases": ["annotator", "translator", "post-editing time"]} +{"id": "poudyal-etal-2020-echr", "title": "ECHR: Legal Corpus for Argument Mining", "abstract": "In this paper, we publicly release an annotated corpus of 42 decisions of the European Court of Human Rights (ECHR). The corpus is annotated in terms of three types of clauses useful in argument mining: premise, conclusion, and non-argument parts of the text. Furthermore, relationships among the premises and conclusions are mapped. We present baselines for three tasks that lead from unstructured texts to structured arguments. The tasks are argument clause recognition, clause relation prediction, and premise/conclusion recognition. Despite a straightforward application of the bidirectional encoders from Transformers (BERT), we obtained very promising results F1 0.765 on argument recognition, 0.511 on relation prediction, and 0.859/0.628 on premise/conclusion recognition). The results suggest the usefulness of pre-trained language models based on deep neural network architectures in argument mining. Because of the simplicity of the baselines, there is ample space for improvement in future work based on the released corpus.", "keyphrases": ["argument mining", "human rights", "echr", "legal domain"]} +{"id": "cook-stevenson-2006-classifying", "title": "Classifying Particle Semantics in English Verb-Particle Constructions", "abstract": "Previous computational work on learning the semantic properties of verb-particle constructions (VPCs) has focused on their compositionality, and has left unaddressed the issue of which meaning of the component words is being used in a given VPC. We develop a feature space for use in classification of the sense contributed by the particle in a VPC, and test this on VPCs using the particle up. The features that capture linguistic properties of VPCs that are relevant to the semantics of the particle outperform linguistically uninformed word co-occurrence features in our experiments on unseen test VPCs.", "keyphrases": ["particle", "verb-particle construction", "compositionality"]} +{"id": "ghazvininejad-etal-2017-hafez", "title": "Hafez: an Interactive Poetry Generation System", "abstract": "Hafez is an automatic poetry generation system that integrates a Recurrent Neural Network (RNN) with a Finite State Accep-tor (FSA). It generates sonnets given arbitrary topics. Furthermore, Hafez enables users to revise and polish generated poems by adjusting various style con\ufb01gurations. Experiments demonstrate that such \u201cpol-ish\u201d mechanisms consider the user\u2019s intention and lead to a better poem. For evaluation, we build a web interface where users can rate the quality of each poem from 1 to 5 stars. We also speed up the whole system by a factor of 10, via vocabulary pruning and GPU computation, so that adequate feedback can be collected at a fast pace. Based on such feedback, the system learns to adjust its parameters to improve poetry quality.", "keyphrases": ["poetry generation", "hafez", "speaker style"]} +{"id": "schlangen-skantze-2009-general", "title": "A General, Abstract Model of Incremental Dialogue Processing", "abstract": "We present a general model and conceptual framework for specifying architectures for incremental processing in dialogue systems, in particular with respect to the topology of the network of modules that make up the system, the way information flows through this network, how information increments are 'packaged', and how these increments are processed by the modules. This model enables the precise specification of incremental systems and hence facilitates detailed comparisons between systems, as well as giving guidance on designing new systems.", "keyphrases": ["abstract model", "incremental dialogue processing", "incrementality", "processor", "system response"]} +{"id": "kotani-etal-2011-compiling", "title": "Compiling Learner Corpus Data of Linguistic Output and Language Processing in Speaking, Listening, Writing, and Reading", "abstract": "A learner\u2019s language data of speaking, writing, listening, and reading have been compiled for a learner corpus in this study. The language data consist of linguistic output and language processing. Linguistic output refers to data of pronunciation, sentences, listening comprehension rate, and reading comprehension rate. Language processing refers to processing time and learners\u2019 self-judgment of their difficulty of processing in speaking, listening, and reading and the fluency of their writing. This learner corpus will contribute to making the language learning process more clearly visible.", "keyphrases": ["learner corpus", "writing", "pronunciation"]} +{"id": "xie-etal-2018-large", "title": "Large-scale Cloze Test Dataset Created by Teachers", "abstract": "Cloze tests are widely adopted in language exams to evaluate students' language proficiency. In this paper, we propose the first large-scale human-created cloze test dataset CLOTH, containing questions used in middle-school and high-school language exams. With missing blanks carefully created by teachers and candidate choices purposely designed to be nuanced, CLOTH requires a deeper language understanding and a wider attention span than previously automatically-generated cloze datasets. We test the performance of dedicatedly designed baseline models including a language model trained on the One Billion Word Corpus and show humans outperform them by a significant margin. We investigate the source of the performance gap, trace model deficiencies to some distinct properties of CLOTH, and identify the limited ability of comprehending the long-term context to be the key bottleneck.", "keyphrases": ["cloze test", "teacher", "middle-school"]} +{"id": "zhou-etal-2021-rica", "title": "RICA: Evaluating Robust Inference Capabilities Based on Commonsense Axioms", "abstract": "Pre-trained language models (PTLMs) have achieved impressive performance on commonsense inference benchmarks, but their ability to employ commonsense to make robust inferences, which is crucial for effective communications with humans, is debated. In the pursuit of advancing fluid human-AI communication, we propose a new challenge, RICA: Robust Inference using Commonsense Axioms, that evaluates robust commonsense inference despite textual perturbations. To generate data for this challenge, we develop a systematic and scalable procedure using commonsense knowledge bases and probe PTLMs across two different evaluation settings. Extensive experiments on our generated probe sets with more than 10k statements show that PTLMs perform no better than random guessing on the zero-shot setting, are heavily impacted by statistical biases, and are not robust to perturbation attacks. We also find that fine-tuning on similar statements offer limited gains, as PTLMs still fail to generalize to unseen inferences. Our new large-scale benchmark exposes a significant gap between PTLMs and human-level language understanding and offers a new challenge for PTLMs to demonstrate commonsense.", "keyphrases": ["commonsense axioms", "perturbation", "random guessing", "rica"]} +{"id": "hosseini-etal-2018-learning", "title": "Learning Typed Entailment Graphs with Global Soft Constraints", "abstract": "This paper presents a new method for learning typed entailment graphs from text. We extract predicate-argument structures from multiple-source news corpora, and compute local distributional similarity scores to learn entailments between predicates with typed arguments (e.g., person contracted disease). Previous work has used transitivity constraints to improve local decisions, but these constraints are intractable on large graphs. We instead propose a scalable method that learns globally consistent similarity scores based on new soft constraints that consider both the structures across typed entailment graphs and inside each graph. Learning takes only a few hours to run over 100K predicates and our results show large improvements over local similarity scores on two entailment data sets. We further show improvements over paraphrases and entailments from the Paraphrase Database, and prior state-of-the-art entailment graphs. We show that the entailment graphs improve performance in a downstream task.", "keyphrases": ["entailment graph", "similarity score", "edge"]} +{"id": "winata-etal-2018-bilingual", "title": "Bilingual Character Representation for Efficiently Addressing Out-of-Vocabulary Words in Code-Switching Named Entity Recognition", "abstract": "We propose an LSTM-based model with hierarchical architecture on named entity recognition from code-switching Twitter data. Our model uses bilingual character representation and transfer learning to address out-of-vocabulary words. In order to mitigate data noise, we propose to use token replacement and normalization. In the 3rd Workshop on Computational Approaches to Linguistic Code-Switching Shared Task, we achieved second place with 62.76% harmonic mean F1-score for English-Spanish language pair without using any gazetteer and knowledge-based information.", "keyphrases": ["out-of-vocabulary word", "code-switching", "entity recognition", "bilingual character representation"]} +{"id": "gupta-etal-2019-simple", "title": "Simple, Fast, Accurate Intent Classification and Slot Labeling for Goal-Oriented Dialogue Systems", "abstract": "With the advent of conversational assistants, like Amazon Alexa, Google Now, etc., dialogue systems are gaining a lot of traction, especially in industrial setting. These systems typically consist of Spoken Language understanding component which, in turn, consists of two tasks - Intent Classification (IC) and Slot Labeling (SL). Generally, these two tasks are modeled together jointly to achieve best performance. However, this joint modeling adds to model obfuscation. In this work, we first design framework for a modularization of joint IC-SL task to enhance architecture transparency. Then, we explore a number of self-attention, convolutional, and recurrent models, contributing a large-scale analysis of modeling paradigms for IC+SL across two datasets. Finally, using this framework, we propose a class of `label-recurrent' models that otherwise non-recurrent, with a 10-dimensional representation of the label history, and show that our proposed systems are easy to interpret, highly accurate (achieving over 30% error reduction in SL over the state-of-the-art on the Snips dataset), as well as fast, at 2x the inference and 2/3 to 1/2 the training time of comparable recurrent models, thus giving an edge in critical real-world systems.", "keyphrases": ["intent classification", "slot labeling", "dialogue system"]} +{"id": "nguyen-2018-comparing", "title": "Comparing Automatic and Human Evaluation of Local Explanations for Text Classification", "abstract": "Text classification models are becoming increasingly complex and opaque, however for many applications it is essential that the models are interpretable. Recently, a variety of approaches have been proposed for generating local explanations. While robust evaluations are needed to drive further progress, so far it is unclear which evaluation approaches are suitable. This paper is a first step towards more robust evaluations of local explanations. We evaluate a variety of local explanation approaches using automatic measures based on word deletion. Furthermore, we show that an evaluation using a crowdsourcing experiment correlates moderately with these automatic measures and that a variety of other factors also impact the human judgements.", "keyphrases": ["human evaluation", "explanation", "text classification"]} +{"id": "denkowski-lavie-2014-meteor", "title": "Meteor Universal: Language Specific Translation Evaluation for Any Target Language", "abstract": "This paper describes Meteor Universal, released for the 2014 ACL Workshop on Statistical Machine Translation. Meteor Universal brings language specific evaluation to previously unsupported target languages by (1) automatically extracting linguistic resources (paraphrase tables and function word lists) from the bitext used to train MT systems and (2) using a universal parameter set learned from pooling human judgments of translation quality from several language directions. Meteor Universal is shown to significantly outperform baseline BLEU on two new languages, Russian (WMT13) and Hindi (WMT14).", "keyphrases": ["meteor universal", "synonym", "paraphrase match", "late version", "stem"]} +{"id": "yih-etal-2014-semantic", "title": "Semantic Parsing for Single-Relation Question Answering", "abstract": "We develop a semantic parsing framework based on semantic similarity for open domain question answering (QA). We focus on single-relation questions and decompose each question into an entity mention and a relation pattern. Using convolutional neural network models, we measure the similarity of entity mentions with entities in the knowledge base (KB) and the similarity of relation patterns and relations in the KB. We score relational triples in the KB using these measures and select the top scoring relational triple to answer the question. When evaluated on an open-domain QA task, our method achieves higher precision across different recall points compared to the previous approach, and can improve F1 by 7 points.", "keyphrases": ["question answering", "knowledge base", "semantic parsing", "kbqa"]} +{"id": "siddharthan-etal-2011-information", "title": "Information Status Distinctions and Referring Expressions: An Empirical Study of References to People in News Summaries", "abstract": "Although there has been much theoretical work on using various information status distinctions to explain the form of references in written text, there have been few studies that attempt to automatically learn these distinctions for generating references in the context of computer-regenerated text. In this article, we present a model for generating references to people in news summaries that incorporates insights from both theory and a corpus analysis of human written summaries. In particular, our model captures how two properties of a person referred to in the summary\u2014familiarity to the reader and global salience in the news story\u2014affect the content and form of the initial reference to that person in a summary. We demonstrate that these two distinctions can be learned from a typical input for multi-document summarization and that they can be used to make regeneration decisions that improve the quality of extractive summaries.", "keyphrases": ["distinction", "news summary", "hearer-old"]} +{"id": "hermet-etal-2008-using", "title": "Using the Web as a Linguistic Resource to Automatically Correct Lexico-Syntactic Errors", "abstract": "This paper presents an algorithm for correcting language errors typical of second-language learners. We focus on preposition errors, which are very common among second-language learners but are not addressed well by current commercial grammar correctors and editing aids. The algorithm takes as input a sentence containing a preposition error (and possibly other errors as well), and outputs the correct preposition for that particular sentence context. We use a two-phase hybrid rule-based and statistical approach. In the first phase, rule-based processing is used to generate a short expression that captures the context of use of the preposition in the input sentence. In the second phase, Web searches are used to evaluate the frequency of this expression, when alternative prepositions are used instead of the original one. We tested this algorithm on a corpus of 133 French sentences written by intermediate second-language learners, and found that it could address 69.9% of those cases. In contrast, we found that the best French grammar and spell checker currently on the market, Antidote, addressed only 3% of those cases. We also showed that performance degrades gracefully when using a corpus of frequent n-grams to evaluate frequencies.", "keyphrases": ["web", "correction", "learner", "preposition error", "french-as-a-second-language"]} +{"id": "tam-etal-2021-improving", "title": "Improving and Simplifying Pattern Exploiting Training", "abstract": "Recently, pre-trained language models (LMs) have achieved strong performance when fine-tuned on difficult benchmarks like SuperGLUE. However, performance can suffer when there are very few labeled examples available for fine-tuning. Pattern Exploiting Training (PET) is a recent approach that leverages patterns for few-shot learning. However, PET uses task-specific unlabeled data. In this paper, we focus on few-shot learning without any unlabeled data and introduce ADAPET, which modifies PET's objective to provide denser supervision during fine-tuning. As a result, ADAPET outperforms PET on SuperGLUE without any task-specific unlabeled data.", "keyphrases": ["language model", "fine-tuning", "plm"]} +{"id": "ye-etal-2017-jointly", "title": "Jointly Extracting Relations with Class Ties via Effective Deep Ranking", "abstract": "Connections between relations in relation extraction, which we call class ties, are common. In distantly supervised scenario, one entity tuple may have multiple relation facts. Exploiting class ties between relations of one entity tuple will be promising for distantly supervised relation extraction. However, previous models are not effective or ignore to model this property. In this work, to effectively leverage class ties, we propose to make joint relation extraction with a unified model that integrates convolutional neural network (CNN) with a general pairwise ranking framework, in which three novel ranking loss functions are introduced. Additionally, an effective method is presented to relieve the severe class imbalance problem from NR (not relation) for model training. Experiments on a widely used dataset show that leveraging class ties will enhance extraction and demonstrate the effectiveness of our model to learn class ties. Our model outperforms the baselines significantly, achieving state-of-the-art performance.", "keyphrases": ["class tie", "relation extraction", "entity tuple"]} +{"id": "xia-etal-2019-generalized", "title": "Generalized Data Augmentation for Low-Resource Translation", "abstract": "Low-resource language pairs with a paucity of parallel data pose challenges for machine translation in terms of both adequacy and fluency. Data augmentation utilizing a large amount of monolingual data is regarded as an effective way to alleviate the problem. In this paper, we propose a general framework of data augmentation for low-resource machine translation not only using target-side monolingual data, but also by pivoting through a related high-resource language. Specifically, we experiment with a two-step pivoting method to convert high-resource data to the low-resource language, making best use of available resources to better approximate the true distribution of the low-resource language. First, we inject low-resource words into high-resource sentences through an induced bilingual dictionary. Second, we further edit the high-resource data injected with low-resource words using a modified unsupervised machine translation framework. Extensive experiments on four low-resource datasets show that under extreme low-resource settings, our data augmentation techniques improve translation quality by up to 1.5 to 8 BLEU points compared to supervised back-translation baselines.", "keyphrases": ["data augmentation", "low-resource language", "monolingual data", "two-step pivoting method"]} +{"id": "zhang-etal-2019-mitigating", "title": "Mitigating Uncertainty in Document Classification", "abstract": "The uncertainty measurement of classifiers' predictions is especially important in applications such as medical diagnoses that need to ensure limited human resources can focus on the most uncertain predictions returned by machine learning models. However, few existing uncertainty models attempt to improve overall prediction accuracy where human resources are involved in the text classification task. In this paper, we propose a novel neural-network-based model that applies a new dropout-entropy method for uncertainty measurement. We also design a metric learning method on feature representations, which can boost the performance of dropout-based uncertainty methods with smaller prediction variance in accurate prediction trials. Extensive experiments on real-world data sets demonstrate that our method can achieve a considerable improvement in overall prediction accuracy compared to existing approaches. In particular, our model improved the accuracy from 0.78 to 0.92 when 30% of the most uncertain predictions were handed over to human experts in \u201c20NewsGroup\u201d data.", "keyphrases": ["uncertainty", "text classification", "dropout-entropy method"]} +{"id": "williams-reiter-2005-generating", "title": "Generating Readable Texts for Readers with Low Basic Skills", "abstract": "Most NLG systems generate texts for readers with good reading ability, but SkillSum adapts its output for readers with poor literacy. Evaluation with lowskilled readers confirms that SkillSum's knowledge-based microplanning choices enhance readability. We also discuss future readability improvements.", "keyphrases": ["reader", "most nlg system", "reading ability"]} +{"id": "littell-etal-2017-uriel", "title": "URIEL and lang2vec: Representing languages as typological, geographical, and phylogenetic vectors", "abstract": "We introduce the URIEL knowledge base for massively multilingual NLP and the lang2vec utility, which provides information-rich vector identifications of languages drawn from typological, geographical, and phylogenetic databases and normalized to have straightforward and consistent formats, naming, and semantics. The goal of URIEL and lang2vec is to enable multilingual NLP, especially on less-resourced languages and make possible types of experiments (especially but not exclusively related to NLP tasks) that are otherwise difficult or impossible due to the sparsity and incommensurability of the data sources. lang2vec vectors have been shown to reduce perplexity in multilingual language modeling, when compared to one-hot language identification vectors.", "keyphrases": ["lang2vec", "distance", "typological database", "language feature"]} +{"id": "sedoc-etal-2017-predicting", "title": "Predicting Emotional Word Ratings using Distributional Representations and Signed Clustering", "abstract": "Inferring the emotional content of words is important for text-based sentiment analysis, dialogue systems and psycholinguistics, but word ratings are expensive to collect at scale and across languages or domains. We develop a method that automatically extends word-level ratings to unrated words using signed clustering of vector space word representations along with affect ratings. We use our method to determine a word's valence and arousal, which determine its position on the circumplex model of affect, the most popular dimensional model of emotion. Our method achieves superior out-of-sample word rating prediction on both affective dimensions across three different languages when compared to state-of-the-art word similarity based methods. Our method can assist building word ratings for new languages and improve downstream tasks such as sentiment analysis and emotion detection.", "keyphrases": ["emotion", "rating", "clustering"]} +{"id": "herbelot-vecchi-2015-building", "title": "Building a shared world: mapping distributional to model-theoretic semantic spaces", "abstract": "In this paper, we introduce an approach to automatically map a standard distributional semantic space onto a set-theoretic model. We predict that there is a functional relationship between distributional information and vectorial concept representations in which dimensions are predicates and weights are generalised quantifiers. In order to test our prediction, we learn a model of such relationship over a publicly available dataset of feature norms annotated with natural language quantifiers. Our initial experimental results show that, at least for domain-specific data, we can indeed map between formalisms, and generate high-quality vector representations which encapsulate set overlap information. We further investigate the generation of natural language quantifiers from such vectors.", "keyphrases": ["semantic space", "functional relationship", "quantifier"]} +{"id": "roller-etal-2014-inclusive", "title": "Inclusive yet Selective: Supervised Distributional Hypernymy Detection", "abstract": "We test the Distributional Inclusion Hypothesis, which states that hypernyms tend to occur in a superset of contexts in which their hyponyms are found. We find that this hypothesis only holds when it is applied to relevant dimensions. We propose a robust supervised approach that achieves accuracies of .84 and .85 on two existing datasets and that can be interpreted as selecting the dimensions that are relevant for distributional inclusion.", "keyphrases": ["hypernymy", "distributional inclusion hypothesis", "word representation"]} +{"id": "amdal-etal-2008-rundkast", "title": "RUNDKAST: an Annotated Norwegian Broadcast News Speech Corpus", "abstract": "This paper describes the Norwegian broadcast news speech corpus RUNDKAST. The corpus contains recordings of approximately 77 hours of broadcast news shows from the Norwegian broadcasting company NRK. The corpus covers both read and spontaneous speech as well as spontaneous dialogues and multipart discussions, including frequent occurrences of non-speech material (e.g. music, jingles). The recordings have large variations in speaking styles, dialect use and recording/transmission quality. RUNDKAST has been annotated for research in speech technology. The entire corpus has been manually segmented and transcribed using hierarchical levels. A subset of one hour of read and spontaneous speech from 10 different speakers has been manually annotated using broad phonetic labels. We provide a description of the database content, the annotation tools and strategies, and the conventions used for the different levels of annotation. A corpus of this kind has up to this point not been available for Norwegian, but is considered a necessary part of the infrastructure for language technology research in Norway. The RUNDKAST corpus is planned to be included in a future national Norwegian language resource bank.", "keyphrases": ["recording", "broadcast news show", "rundkast"]} +{"id": "vanhainen-salvi-2014-free", "title": "Free Acoustic and Language Models for Large Vocabulary Continuous Speech Recognition in Swedish", "abstract": "This paper presents results for large vocabulary continuous speech recognition (LVCSR) in Swedish. We trained acoustic models on the public domain NST Swedish corpus and made them freely available to the community. The training procedure corresponds to the reference recogniser (RefRec) developed for the SpeechDat databases during the COST249 action. We describe the modifications we made to the procedure in order to train on the NST database, and the language models we created based on the N-gram data available at the Norwegian Language Council. Our tests include medium vocabulary isolated word recognition and LVCSR. Because no previous results are available for LVCSR in Swedish, we use as baseline the performance of the SpeechDat models on the same tasks. We also compare our best results to the ones obtained in similar conditions on resource rich languages such as American English. We tested the acoustic models with HTK and Julius and plan to make them available in CMU Sphinx format as well in the near future. We believe that the free availability of these resources will boost research in speech and language technology in Swedish, even in research groups that do not have resources to develop ASR systems.", "keyphrases": ["language model", "speech recognition", "swedish"]} +{"id": "wu-mooney-2019-faithful", "title": "Faithful Multimodal Explanation for Visual Question Answering", "abstract": "AI systems' ability to explain their reasoning is critical to their utility and trustworthiness. Deep neural networks have enabled significant progress on many challenging problems such as visual question answering (VQA). However, most of them are opaque black boxes with limited explanatory capability. This paper presents a novel approach to developing a high-performing VQA system that can elucidate its answers with integrated textual and visual explanations that faithfully reflect important aspects of its underlying reasoning while capturing the style of comprehensible human explanations. Extensive experimental evaluation demonstrates the advantages of this approach compared to competing methods using both automated metrics and human evaluation.", "keyphrases": ["explanation", "visual question", "vqa"]} +{"id": "tandon-etal-2018-reasoning", "title": "Reasoning about Actions and State Changes by Injecting Commonsense Knowledge", "abstract": "Comprehending procedural text, e.g., a paragraph describing photosynthesis, requires modeling actions and the state changes they produce, so that questions about entities at different timepoints can be answered. Although several recent systems have shown impressive progress in this task, their predictions can be globally inconsistent or highly improbable. In this paper, we show how the predicted effects of actions in the context of a paragraph can be improved in two ways: (1) by incorporating global, commonsense constraints (e.g., a non-existent entity cannot be destroyed), and (2) by biasing reading with preferences from large-scale corpora (e.g., trees rarely move). Unlike earlier methods, we treat the problem as a neural structured prediction task, allowing hard and soft constraints to steer the model away from unlikely predictions. We show that the new model significantly outperforms earlier systems on a benchmark dataset for procedural text comprehension (+8% relative gain), and that it also avoids some of the nonsensical predictions that earlier systems make.", "keyphrases": ["state change", "commonsense knowledge", "recent system", "structured prediction task"]} +{"id": "asano-etal-2017-reference", "title": "Reference-based Metrics can be Replaced with Reference-less Metrics in Evaluating Grammatical Error Correction Systems", "abstract": "In grammatical error correction (GEC), automatically evaluating system outputs requires gold-standard references, which must be created manually and thus tend to be both expensive and limited in coverage. To address this problem, a reference-less approach has recently emerged; however, previous reference-less metrics that only consider the criterion of grammaticality, have not worked as well as reference-based metrics. This study explores the potential of extending a prior grammaticality-based method to establish a reference-less evaluation method for GEC systems. Further, we empirically show that a reference-less metric that combines fluency and meaning preservation with grammaticality provides a better estimate of manual scores than that of commonly used reference-based metrics. To our knowledge, this is the first study that provides empirical evidence that a reference-less metric can replace reference-based metrics in evaluating GEC systems.", "keyphrases": ["reference-less metric", "grammaticality", "error correction", "manual evaluation"]} +{"id": "wuebker-etal-2010-training", "title": "Training Phrase Translation Models with Leaving-One-Out", "abstract": "Several attempts have been made to learn phrase translation probabilities for phrase-based statistical machine translation that go beyond pure counting of phrases in word-aligned training data. Most approaches report problems with over-fitting. We describe a novel leaving-one-out approach to prevent over-fitting that allows us to train phrase models that show improved translation performance on the WMT08 Europarl German-English task. In contrast to most previous work where phrase models were trained separately from other models used in translation, we include all components such as single word lexica and reordering models in training. Using this consistent training of phrase models we are able to achieve improvements of up to 1.4 points in BLEU. As a side effect, the phrase table size is reduced by more than 80%.", "keyphrases": ["phrase translation model", "leaving-one-out", "pure counting", "training procedure"]} +{"id": "xue-2008-labeling", "title": "Labeling Chinese Predicates with Semantic Roles", "abstract": "In this article we report work on Chinese semantic role labeling, taking advantage of two recently completed corpora, the Chinese PropBank, a semantically annotated corpus of Chinese verbs, and the Chinese Nombank, a companion corpus that annotates the predicate-argument structure of nominalized predicates. Because the semantic role labels are assigned to the constituents in a parse tree, we first report experiments in which semantic role labels are automatically assigned to hand-crafted parses in the Chinese Treebank. This gives us a measure of the extent to which semantic role labels can be bootstrapped from the syntactic annotation provided in the treebank. We then report experiments using automatic parses with decreasing levels of human annotation in the input to the syntactic parser: parses that use gold-standard segmentation and POS-tagging, parses that use only gold-standard segmentation, and fully automatic parses. These experiments gauge how successful semantic role labeling for Chinese can be in more realistic situations. Our results show that when hand-crafted parses are used, semantic role labeling accuracy for Chinese is comparable to what has been reported for the state-of-the-art English semantic role labeling systems trained and tested on the English PropBank, even though the Chinese PropBank is significantly smaller in size. When an automatic parser is used, however, the accuracy of our system is significantly lower than the English state of the art. This indicates that an improvement in Chinese parsing is critical to high-performance semantic role labeling for Chinese.", "keyphrases": ["semantic role", "chinese srl", "statistical classifier", "systematic research", "svm"]} +{"id": "riedel-mccallum-2011-robust", "title": "Robust Biomedical Event Extraction with Dual Decomposition and Minimal Domain Adaptation", "abstract": "We present a joint model for biomedical event extraction and apply it to four tracks of the BioNLP 2011 Shared Task. Our model decomposes into three sub-models that concern (a) event triggers and outgoing arguments, (b) event triggers and incoming arguments and (c) protein-protein bindings. For efficient decoding we employ dual decomposition. Our results are very competitive: With minimal adaptation of our model we come in second for two of the tasks---right behind a version of the system presented here that includes predictions of the Stanford event extractor as features. We also show that for the Infectious Diseases task using data from the Genia track is a very effective way to improve accuracy.", "keyphrases": ["biomedical event extraction", "dual decomposition", "joint model"]} +{"id": "wang-etal-2020-maven", "title": "MAVEN: A Massive General Domain Event Detection Dataset", "abstract": "Event detection (ED), which means identifying event trigger words and classifying event types, is the first and most fundamental step for extracting event knowledge from plain text. Most existing datasets exhibit the following issues that limit further development of ED: (1) Data scarcity. Existing small-scale datasets are not sufficient for training and stably benchmarking increasingly sophisticated modern neural methods. (2) Low coverage. Limited event types of existing datasets cannot well cover general-domain events, which restricts the applications of ED models. To alleviate these problems, we present a MAssive eVENt detection dataset (MAVEN), which contains 4,480 Wikipedia documents, 118,732 event mention instances, and 168 event types. MAVEN alleviates the data scarcity problem and covers much more general event types. We reproduce the recent state-of-the-art ED models and conduct a thorough evaluation on MAVEN. The experimental results show that existing ED methods cannot achieve promising results on MAVEN as on the small datasets, which suggests that ED in the real world remains a challenging task and requires further research efforts. We also discuss further directions for general domain ED with empirical analyses. The source code and dataset can be obtained from .", "keyphrases": ["event detection", "data scarcity", "maven"]} +{"id": "giorgi-etal-2021-declutr", "title": "DeCLUTR: Deep Contrastive Learning for Unsupervised Textual Representations", "abstract": "Sentence embeddings are an important component of many natural language processing (NLP) systems. Like word embeddings, sentence embeddings are typically learned on large text corpora and then transferred to various downstream tasks, such as clustering and retrieval. Unlike word embeddings, the highest performing solutions for learning sentence embeddings require labelled data, limiting their usefulness to languages and domains where labelled data is abundant. In this paper, we present DeCLUTR: Deep Contrastive Learning for Unsupervised Textual Representations. Inspired by recent advances in deep metric learning (DML), we carefully design a self-supervised objective for learning universal sentence embeddings that does not require labelled training data. When used to extend the pretraining of transformer-based language models, our approach closes the performance gap between unsupervised and supervised pretraining for universal sentence encoders. Importantly, our experiments suggest that the quality of the learned embeddings scale with both the number of trainable parameters and the amount of unlabelled training data. Our code and pretrained models are publicly available and can be easily adapted to new domains or used to embed unseen text.", "keyphrases": ["deep contrastive learning", "unsupervised textual representations", "same document", "various nlp task"]} +{"id": "grenager-manning-2006-unsupervised", "title": "Unsupervised Discovery of a Statistical Verb Lexicon", "abstract": "This paper demonstrates how unsupervised techniques can be used to learn models of deep linguistic structure. Determining the semantic roles of a verb's dependents is an important step in natural language understanding. We present a method for learning models of verb argument patterns directly from unannotated text. The learned models are similar to existing verb lexicons such as VerbNet and PropBank, but additionally include statistics about the linkings used by each verb. The method is based on a structured probabilistic model of the domain, and unsupervised learning is performed with the EM algorithm. The learned models can also be used discriminatively as semantic role labelers, and when evaluated relative to the PropBank annotation, the best learned model reduces 28% of the error between an informed baseline and an oracle upper bound.", "keyphrases": ["semantic role", "important step", "linking", "unsupervised learning", "graphical model"]} +{"id": "voita-etal-2018-context", "title": "Context-Aware Neural Machine Translation Learns Anaphora Resolution", "abstract": "Standard machine translation systems process sentences in isolation and hence ignore extra-sentential information, even though extended context can both prevent mistakes in ambiguous cases and improve translation coherence. We introduce a context-aware neural machine translation model designed in such way that the flow of information from the extended context to the translation model can be controlled and analyzed. We experiment with an English-Russian subtitles dataset, and observe that much of what is captured by our model deals with improving pronoun translation. We measure correspondences between induced attention distributions and coreference relations and observe that the model implicitly captures anaphora. It is consistent with gains for sentences where pronouns need to be gendered in translation. Beside improvements in anaphoric cases, the model also improves in overall BLEU, both over its context-agnostic version (+0.7) and over simple concatenation of the context and source sentences (+0.6).", "keyphrases": ["neural machine translation", "anaphora resolution", "additional encoder", "context sentence", "translation quality"]} +{"id": "hu-etal-2020-ocnli", "title": "OCNLI: Original Chinese Natural Language Inference", "abstract": "Despite the tremendous recent progress on natural language inference (NLI), driven largely by large-scale investment in new datasets (e.g.,SNLI, MNLI) and advances in modeling, most progress has been limited to English due to a lack of reliable datasets for most of the world's languages. In this paper, we present the first large-scale NLI dataset (consisting of ~56,000 annotated sentence pairs) for Chinese called the Original Chinese Natural Language Inference dataset (OCNLI). Unlike recent attempts at extending NLI to other languages, our dataset does not rely on any automatic translation or non-expert annotation. Instead, we elicit annotations from native speakers specializing in linguistics. We follow closely the annotation protocol used for MNLI, but create new strategies for eliciting diverse hypotheses. We establish several baseline results on our dataset using state-of-the-art pre-trained models for Chinese, and find even the best performing models to be far outpaced by human performance (~12% absolute performance gap), making it a challenging new resource that we hope will help to accelerate progress in Chinese NLU. To the best of our knowledge, this is the first human-elicited MNLI-style corpus for a non-English language.", "keyphrases": ["chinese", "natural language inference", "large-scale nli dataset", "human performance", "ocnli"]} +{"id": "burkett-klein-2008-two", "title": "Two Languages are Better than One (for Syntactic Parsing)", "abstract": "We show that jointly parsing a bitext can substantially improve parse quality on both sides. In a maximum entropy bitext parsing model, we define a distribution over source trees, target trees, and node-to-node alignments between them. Features include monolingual parse scores and various measures of syntactic divergence. Using the translated portion of the Chinese treebank, our model is trained iteratively to maximize the marginal likelihood of training tree pairs, with alignments treated as latent variables. The resulting bitext parser outperforms state-of-the-art monolingual parser baselines by 2.5 F1 at predicting English side trees and 1.8 F1 at predicting Chinese side trees (the highest published numbers on these corpora). Moreover, these improved trees yield a 2.4 BLEU increase when used in a downstream MT evaluation.", "keyphrases": ["syntactic parsing", "bitext", "side", "parallel data", "log-linear model"]} +{"id": "zhang-etal-2019-integrating", "title": "Integrating Semantic Knowledge to Tackle Zero-shot Text Classification", "abstract": "Insufficient or even unavailable training data of emerging classes is a big challenge of many classification tasks, including text classification. Recognising text documents of classes that have never been seen in the learning stage, so-called zero-shot text classification, is therefore difficult and only limited previous works tackled this problem. In this paper, we propose a two-phase framework together with data augmentation and feature augmentation to solve this problem. Four kinds of semantic knowledge (word embeddings, class descriptions, class hierarchy, and a general knowledge graph) are incorporated into the proposed framework to deal with instances of unseen classes effectively. Experimental results show that each and the combination of the two phases achieve the best overall accuracy compared with baselines and recent approaches in classifying real-world texts under the zero-shot scenario.", "keyphrases": ["semantic knowledge", "zero-shot text classification", "word embedding"]} +{"id": "goyal-etal-2010-automatically", "title": "Automatically Producing Plot Unit Representations for Narrative Text", "abstract": "In the 1980s, plot units were proposed as a conceptual knowledge structure for representing and summarizing narrative stories. Our research explores whether current NLP technology can be used to automatically produce plot unit representations for narrative text. We create a system called AESOP that exploits a variety of existing resources to identify affect states and applies \"projection rules\" to map the affect states onto the characters in a story. We also use corpus-based techniques to generate a new type of affect knowledge base: verbs that impart positive or negative states onto their patients (e.g., being eaten is an undesirable state, but being fed is a desirable state). We harvest these \"patient polarity verbs\" from a Web corpus using two techniques: co-occurrence with Evil/Kind Agent patterns, and bootstrapping over conjunctions of verbs. We evaluate the plot unit representations produced by our system on a small collection of Aesop's fables.", "keyphrases": ["plot unit representation", "narrative text", "aesop", "character", "polarity verb"]} +{"id": "rashkin-etal-2020-plotmachines", "title": "PlotMachines: Outline-Conditioned Generation with Dynamic Plot State Tracking", "abstract": "We propose the task of outline-conditioned story generation: given an outline as a set of phrases that describe key characters and events to appear in a story, the task is to generate a coherent narrative that is consistent with the provided outline. This task is challenging as the input only provides a rough sketch of the plot, and thus, models need to generate a story by interweaving the key points provided in the outline. This requires the model to keep track of the dynamic states of the latent plot, conditioning on the input outline while generating the full story. We present PlotMachines, a neural narrative model that learns to transform an outline into a coherent story by tracking the dynamic plot states. In addition, we enrich PlotMachines with high-level discourse structure so that the model can learn different writing styles corresponding to different parts of the narrative. Comprehensive experiments over three fiction and non-fiction datasets demonstrate that large-scale language models, such as GPT-2 and Grover, despite their impressive generation performance, are not sufficient in generating coherent narratives for the given outline, and dynamic plot state tracking is important for composing narratives with tighter, more consistent plots.", "keyphrases": ["plot state tracking", "story generation", "outline", "plotmachines"]} +{"id": "yang-etal-2015-hierarchical", "title": "A Hierarchical Distance-dependent Bayesian Model for Event Coreference Resolution", "abstract": "We present a novel hierarchical distance-dependent Bayesian model for event coreference resolution. While existing generative models for event coreference resolution are completely unsupervised, our model allows for the incorporation of pairwise distances between event mentions \u2014 information that is widely used in supervised coreference models to guide the generative clustering processing for better event clustering both within and across documents. We model the distances between event mentions using a feature-rich learnable distance function and encode them as Bayesian priors for nonparametric clustering. Experiments on the ECB+ corpus show that our model outperforms state-of-the-art methods for both within- and cross-document event coreference resolution.", "keyphrases": ["distance-dependent bayesian model", "event coreference resolution", "cluster", "complex structure", "semantic role"]} +{"id": "peyrard-2019-studying", "title": "Studying Summarization Evaluation Metrics in the Appropriate Scoring Range", "abstract": "In summarization, automatic evaluation metrics are usually compared based on their ability to correlate with human judgments. Unfortunately, the few existing human judgment datasets have been created as by-products of the manual evaluations performed during the DUC/TAC shared tasks. However, modern systems are typically better than the best systems submitted at the time of these shared tasks. We show that, surprisingly, evaluation metrics which behave similarly on these datasets (average-scoring range) strongly disagree in the higher-scoring range in which current systems now operate. It is problematic because metrics disagree yet we can't decide which one to trust. This is a call for collecting human judgments for high-scoring summaries as this would resolve the debate over which metrics to trust. This would also be greatly beneficial to further improve summarization systems and metrics alike.", "keyphrases": ["summarization", "evaluation metric", "higher-scoring range"]} +{"id": "poon-vanderwende-2010-joint", "title": "Joint Inference for Knowledge Extraction from Biomedical Literature", "abstract": "Knowledge extraction from online repositories such as PubMed holds the promise of dramatically speeding up biomedical research and drug design. After initially focusing on recognizing proteins and binary interactions, the community has recently shifted their attention to the more ambitious task of recognizing complex, nested event structures. State-of-the-art systems use a pipeline architecture in which the candidate events are identified first, and subsequently the arguments. This fails to leverage joint inference among events and arguments for mutual disambiguation. Some joint approaches have been proposed, but they still lag much behind in accuracy. In this paper, we present the first joint approach for bio-event extraction that obtains state-of-the-art results. Our system is based on Markov logic and adopts a novel formulation by jointly predicting events and arguments, as well as individual dependency edges that compose the argument paths. On the BioNLP'09 Shared Task dataset, it reduced F1 errors by more than 10% compared to the previous best joint approach.", "keyphrases": ["knowledge extraction", "markov logic", "joint inference", "event extraction", "mlns"]} +{"id": "chrupala-etal-2017-representations", "title": "Representations of language in a model of visually grounded speech signal", "abstract": "We present a visually grounded model of speech perception which projects spoken utterances and images to a joint semantic space. We use a multi-layer recurrent highway network to model the temporal nature of spoken speech, and show that it learns to extract both form and meaning-based linguistic knowledge from the input signal. We carry out an in-depth analysis of the representations used by different components of the trained model and show that encoding of semantic aspects tends to become richer as we go up the hierarchy of layers, whereas encoding of form-related aspects of the language input tends to initially increase and then plateau or decrease.", "keyphrases": ["speech signal", "image", "semantic information"]} +{"id": "lazaridou-etal-2015-combining", "title": "Combining Language and Vision with a Multimodal Skip-gram Model", "abstract": "We extend the SKIP-GRAM model of Mikolov et al. (2013a) by taking visual information into account. Like SKIP-GRAM, our multimodal models (MMSKIP-GRAM) build vector-based word representations by learning to predict linguistic contexts in text corpora. However, for a restricted set of words, the models are also exposed to visual representations of the objects they denote (extracted from natural images), and must predict linguistic and visual features jointly. The MMSKIP-GRAM models achieve good performance on a variety of semantic benchmarks. Moreover, since they propagate visual information to all words, we use them to improve image labeling and retrieval in the zero-shot setup, where the test concepts are never seen during model training. Finally, the MMSKIP-GRAM models discover intriguing visual properties of abstract words, paving the way to realistic implementations of embodied theories of meaning.", "keyphrases": ["vision", "multimodal skip-gram model", "word embedding", "fusion approach"]} +{"id": "venugopalan-etal-2015-translating", "title": "Translating Videos to Natural Language Using Deep Recurrent Neural Networks", "abstract": "Solving the visual symbol grounding problem has long been a goal of artificial intelligence. The field appears to be advancing closer to this goal with recent breakthroughs in deep learning for natural language grounding in static images. In this paper, we propose to translate videos directly to sentences using a unified deep neural network with both convolutional and recurrent structure. Described video datasets are scarce, and most existing methods have been applied to toy domains with a small vocabulary of possible words. By transferring knowledge from 1.2M+ images with category labels and 100,000+ images with captions, our method is able to create sentence descriptions of open-domain videos with large vocabularies. We compare our approach with recent work using language generation metrics, subject, verb, and object prediction accuracy, and a human evaluation.", "keyphrases": ["video", "image captioning", "cnn-rnn encoder-decoder framework"]} +{"id": "lu-etal-2015-deep", "title": "Deep Multilingual Correlation for Improved Word Embeddings", "abstract": "Word embeddings have been found useful for many NLP tasks, including part-of-speech tagging, named entity recognition, and parsing. Adding multilingual context when learning embeddings can improve their quality, for example via canonical correlation analysis (CCA) on embeddingsfromtwo languages. In this paper, we extend this idea to learn deep non-linear transformations of word embeddings of the two languages, using the recently proposed deep canonical correlation analysis. The resulting embeddings, when evaluated on multiple word and bigram similarity tasks, consistently improve over monolingual embeddings and over embeddings transformed with linear CCA.", "keyphrases": ["multilingual context", "canonical correlation analysis", "semantic space"]} +{"id": "sheth-etal-2021-bootstrapping", "title": "Bootstrapping Multilingual AMR with Contextual Word Alignments", "abstract": "We develop high performance multilingual Abstract Meaning Representation (AMR) systems by projecting English AMR annotations to other languages with weak supervision. We achieve this goal by bootstrapping transformer-based multilingual word embeddings, in particular those from cross-lingual RoBERTa (XLM-R large). We develop a novel technique for foreign-text-to-English AMR alignment, using the contextual word alignment between English and foreign language tokens. This word alignment is weakly supervised and relies on the contextualized XLM-R word embeddings. We achieve a highly competitive performance that surpasses the best published results for German, Italian, Spanish and Chinese.", "keyphrases": ["amr", "word alignment", "annotation projection"]} +{"id": "denkowski-etal-2012-cmu", "title": "The CMU-Avenue French-English Translation System", "abstract": "This paper describes the French-English translation system developed by the Avenue research group at Carnegie Mellon University for the Seventh Workshop on Statistical Machine Translation (NAACL WMT12). We present a method for training data selection, a description of our hierarchical phrase-based translation system, and a discussion of the impact of data size on best practice for system building.", "keyphrases": ["french-english translation system", "word alignment", "qe-clean system"]} +{"id": "das-etal-2022-container", "title": "CONTaiNER: Few-Shot Named Entity Recognition via Contrastive Learning", "abstract": "Named Entity Recognition (NER) in Few-Shot setting is imperative for entity tagging in low resource domains. Existing approaches only learn class-specific semantic features and intermediate representations from source domains. This affects generalizability to unseen target domains, resulting in suboptimal performances. To this end, we present CONTaiNER, a novel contrastive learning technique that optimizes the inter-token distribution distance for Few-Shot NER. Instead of optimizing class-specific attributes, CONTaiNER optimizes a generalized objective of differentiating between token categories based on their Gaussian-distributed embeddings. This effectively alleviates overfitting issues originating from training domains. Our experiments in several traditional test domains (OntoNotes, CoNLL'03, WNUT `17, GUM) and a new large scale Few-Shot NER dataset (Few-NERD) demonstrate that on average, CONTaiNER outperforms previous methods by 3%-13% absolute F1 points while showing consistent performance trends, even in challenging scenarios where previous approaches could not achieve appreciable performance.", "keyphrases": ["entity recognition", "contrastive learning", "few-shot ner"]} +{"id": "cook-stevenson-2009-unsupervised", "title": "An Unsupervised Model for Text Message Normalization", "abstract": "Cell phone text messaging users express themselves briefly and colloquially using a variety of creative forms. We analyze a sample of creative, non-standard text message word forms to determine frequent word formation processes in texting language. Drawing on these observations, we construct an unsupervised noisy-channel model for text message normalization. On a test set of 303 text message forms that differ from their standard form, our model achieves 59% accuracy, which is on par with the best supervised results reported on this dataset.", "keyphrases": ["text message normalization", "word formation process", "noisy channel model", "probabilistic model", "variation"]} +{"id": "yu-etal-2020-ch", "title": "CH-SIMS: A Chinese Multimodal Sentiment Analysis Dataset with Fine-grained Annotation of Modality", "abstract": "Previous studies in multimodal sentiment analysis have used limited datasets, which only contain unified multimodal annotations. However, the unified annotations do not always reflect the independent sentiment of single modalities and limit the model to capture the difference between modalities. In this paper, we introduce a Chinese single- and multi-modal sentiment analysis dataset, CH-SIMS, which contains 2,281 refined video segments in the wild with both multimodal and independent unimodal annotations. It allows researchers to study the interaction between modalities or use independent unimodal annotations for unimodal sentiment analysis. Furthermore, we propose a multi-task learning framework based on late fusion as the baseline. Extensive experiments on the CH-SIMS show that our methods achieve state-of-the-art performance and learn more distinctive unimodal representations. The full dataset and codes are available for use at .", "keyphrases": ["multimodal sentiment analysis", "unimodal annotation", "ch-sims"]} +{"id": "xiong-etal-2018-one", "title": "One-Shot Relational Learning for Knowledge Graphs", "abstract": "Knowledge graphs (KG) are the key components of various natural language processing applications. To further expand KGs' coverage, previous studies on knowledge graph completion usually require a large number of positive examples for each relation. However, we observe long-tail relations are actually more common in KGs and those newly added relations often do not have many known triples for training. In this work, we aim at predicting new facts under a challenging setting where only one training instance is available. We propose a one-shot relational learning framework, which utilizes the knowledge distilled by embedding models and learns a matching metric by considering both the learned embeddings and one-hop graph structures. Empirically, our model yields considerable performance improvements over existing embedding models, and also eliminates the need of re-training the embedding models when dealing with newly added relations.", "keyphrases": ["relational learning", "knowledge graph", "triple", "graph structure", "one-shot learning"]} +{"id": "geva-etal-2020-injecting", "title": "Injecting Numerical Reasoning Skills into Language Models", "abstract": "Large pre-trained language models (LMs) are known to encode substantial amounts of linguistic information. However, high-level reasoning skills, such as numerical reasoning, are difficult to learn from a language-modeling objective only. Consequently, existing models for numerical reasoning have used specialized architectures with limited flexibility. In this work, we show that numerical reasoning is amenable to automatic data generation, and thus one can inject this skill into pre-trained LMs, by generating large amounts of data, and training in a multi-task setup. We show that pre-training our model, GenBERT, on this data, dramatically improves performance on DROP (49.3 \u2013 72.3 F1), reaching performance that matches state-of-the-art models of comparable size, while using a simple and general-purpose encoder-decoder architecture. Moreover, GenBERT generalizes well to math word problem datasets, while maintaining high performance on standard RC tasks. Our approach provides a general recipe for injecting skills into large pre-trained LMs, whenever the skill is amenable to automatic data augmentation.", "keyphrases": ["numerical reasoning", "skill", "language model", "large amount", "drop"]} +{"id": "darwish-mubarak-2016-farasa", "title": "Farasa: A New Fast and Accurate Arabic Word Segmenter", "abstract": "In this paper, we present Farasa (meaning insight in Arabic), which is a fast and accurate Arabic segmenter. Segmentation involves breaking Arabic words into their constituent clitics. Our approach is based on SVMrank using linear kernels. The features that we utilized account for: likelihood of stems, prefixes, suffixes, and their combination; presence in lexicons containing valid stems and named entities; and underlying stem templates. Farasa outperforms or equalizes state-of-the-art Arabic segmenters, namely QATARA and MADAMIRA. Meanwhile, Farasa is nearly one order of magnitude faster than QATARA and two orders of magnitude faster than MADAMIRA. The segmenter should be able to process one billion words in less than 5 hours. Farasa is written entirely in native Java, with no external dependencies, and is open-source.", "keyphrases": ["segmentation", "arabic segmenter", "farasa"]} +{"id": "zhang-etal-2007-improved", "title": "Improved chunk-level reordering for statistical machine translation", "abstract": "Inspired by previous chunk-level reordering approaches to statistical machine translation, this paper presents two methods to improve the reordering at the chunk level. By introducing a new lattice weighting factor and by reordering the training source data, an improvement is reported on TER and BLEU. Compared to the previous chunklevel reordering approach, the BLEU score improves 1.4% absolutely. The translation results are reported on IWSLT Chinese-English task.", "keyphrases": ["statistical machine translation", "chunk", "option", "psmt system", "discrepancy"]} +{"id": "seo-etal-2015-solving", "title": "Solving Geometry Problems: Combining Text and Diagram Interpretation", "abstract": "This paper introduces GEOS, the first automated system to solve unaltered SAT geometry questions by combining text understanding and diagram interpretation. We model the problem of understanding geometry questions as submodular optimization, and identify a formal problem description likely to be compatible with both the question text and diagram. GEOS then feeds the description to a geometric solver that attempts to determine the correct answer. In our experiments, GEOS achieves a 49% score on official SAT questions, and a score of 61% on practice questions. 1 Finally, we show that by integrating textual and visual information, GEOS boosts the accuracy of dependency and semantic parsing of the question text.", "keyphrases": ["geometry problem", "diagram interpretation", "geos", "reasoning"]} +{"id": "boston-etal-2008-surprising", "title": "Surprising Parser Actions and Reading Difficulty", "abstract": "An incremental dependency parser's probability model is entered as a predictor in a linear mixed-effects model of German readers' eye-fixation durations. This dependency-based predictor improves a baseline that takes into account word length, n-gram probability, and Cloze predictability that are typically applied in models of human reading. This improvement obtains even when the dependency parser explores a tiny fraction of its search space, as suggested by narrow-beam accounts of human sentence processing such as Garden Path theory.", "keyphrases": ["dependency parser", "predictor", "surprisal"]} +{"id": "uryupina-poesio-2012-domain", "title": "Domain-specific vs. Uniform Modeling for Coreference Resolution", "abstract": "Several corpora annotated for coreference have been made available in the past decade. These resources differ with respect to their size and the underlying structure: the number of domains and their similarity. Our study compares domain-specific models, learned from small heterogeneous subsets of the investigated corpora, against uniform models, that utilize all the available data. We show that for knowledge-poor baseline systems, domain-specific and uniform modeling yield same results. Systems, relying on large amounts of linguistic knowledge, however, exhibit differences in their performance: with all the designed features in use, domain-specific models suffer from over-fitting, whereas with pre-selected feature sets they tend to outperform union models.", "keyphrases": ["uniform modeling", "coreference resolution", "domain adaptation"]} +{"id": "ma-xia-2014-unsupervised", "title": "Unsupervised Dependency Parsing with Transferring Distribution via Parallel Guidance and Entropy Regularization", "abstract": "We present a novel approach for inducing unsupervised dependency parsers for languages that have no labeled training data, but have translated text in a resourcerich language. We train probabilistic parsing models for resource-poor languages by transferring cross-lingual knowledge from resource-rich language with entropy regularization. Our method can be used as a purely monolingual dependency parser, requiring no human translations for the test data, thus making it applicable to a wide range of resource-poor languages. We perform experiments on three Data sets \u2014 Version 1.0 and version 2.0 of Google Universal Dependency Treebanks and Treebanks from CoNLL shared-tasks, across ten languages. We obtain stateof-the art performance of all the three data sets when compared with previously studied unsupervised and projected parsing systems.", "keyphrases": ["entropy regularization", "dependency parser", "resource-rich language", "parallel data", "annotation projection"]} +{"id": "barikeri-etal-2021-redditbias", "title": "RedditBias: A Real-World Resource for Bias Evaluation and Debiasing of Conversational Language Models", "abstract": "Text representation models are prone to exhibit a range of societal biases, reflecting the non-controlled and biased nature of the underlying pretraining data, which consequently leads to severe ethical issues and even bias amplification. Recent work has predominantly focused on measuring and mitigating bias in pretrained language models. Surprisingly, the landscape of bias measurements and mitigation resources and methods for conversational language models is still very scarce: it is limited to only a few types of bias, artificially constructed resources, and completely ignores the impact that debiasing methods may have on the final perfor mance in dialog tasks, e.g., conversational response generation. In this work, we present REDDITBIAS, the first conversational data set grounded in the actual human conversations from Reddit, allowing for bias measurement and mitigation across four important bias dimensions: gender,race,religion, and queerness. Further, we develop an evaluation framework which simultaneously 1)measures bias on the developed REDDITBIAS resource, and 2)evaluates model capability in dialog tasks after model debiasing. We use the evaluation framework to benchmark the widely used conversational DialoGPT model along with the adaptations of four debiasing methods. Our results indicate that DialoGPT is biased with respect to religious groups and that some debiasing techniques can remove this bias while preserving downstream task performance.", "keyphrases": ["language model", "conversational data", "redditbias"]} +{"id": "yuan-etal-2020-emotion", "title": "Emotion-Cause Pair Extraction as Sequence Labeling Based on A Novel Tagging Scheme", "abstract": "The task of emotion-cause pair extraction deals with finding all emotions and the corresponding causes in unannotated emotion texts. Most recent studies are based on the likelihood of Cartesian product among all clause candidates, resulting in a high computational cost. Targeting this issue, we regard the task as a sequence labeling problem and propose a novel tagging scheme with coding the distance between linked components into the tags, so that emotions and the corresponding causes can be extracted simultaneously. Accordingly, an end-to-end model is presented to process the input texts from left to right, always with linear time complexity, leading to a speed up. Experimental results show that our proposed model achieves the best performance, outperforming the state-of-the-art method by 2.26% (p0.001) in F1 measure.", "keyphrases": ["novel tagging scheme", "emotion", "cause"]} +{"id": "shen-etal-2021-directed", "title": "Directed Acyclic Graph Network for Conversational Emotion Recognition", "abstract": "The modeling of conversational context plays a vital role in emotion recognition from conversation (ERC). In this paper, we put forward a novel idea of encoding the utterances with a directed acyclic graph (DAG) to better model the intrinsic structure within a conversation, and design a directed acyclic neural network, namely DAG-ERC, to implement this idea. In an attempt to combine the strengths of conventional graph-based neural models and recurrence-based neural models, DAG-ERC provides a more intuitive way to model the information flow between long-distance conversation background and nearby context. Extensive experiments are conducted on four ERC benchmarks with state-of-the-art models employed as baselines for comparison. The empirical results demonstrate the superiority of this new model and confirm the motivation of the directed acyclic graph architecture for ERC.", "keyphrases": ["acyclic graph", "conversation", "graph-based neural model"]} +{"id": "chen-etal-2019-multi-source", "title": "Multi-Source Cross-Lingual Model Transfer: Learning What to Share", "abstract": "Modern NLP applications have enjoyed a great boost utilizing neural networks models. Such deep neural models, however, are not applicable to most human languages due to the lack of annotated training data for various NLP tasks. Cross-lingual transfer learning (CLTL) is a viable method for building NLP models for a low-resource target language by leveraging labeled data from other (source) languages. In this work, we focus on the multilingual transfer setting where training data in multiple source languages is leveraged to further boost target language performance. Unlike most existing methods that rely only on language-invariant features for CLTL, our approach coherently utilizes both language-invariant and language-specific features at instance level. Our model leverages adversarial networks to learn language-invariant features, and mixture-of-experts models to dynamically exploit the similarity between the target language and each individual source language. This enables our model to learn effectively what to share between various languages in the multilingual setup. Moreover, when coupled with unsupervised multilingual embeddings, our model can operate in a zero-resource setting where neither target language training data nor cross-lingual resources are available. Our model achieves significant performance gains over prior art, as shown in an extensive set of experiments over multiple text classification and sequence tagging tasks including a large-scale industry dataset.", "keyphrases": ["various nlp task", "cross-lingual transfer learning", "source language", "language-invariant feature", "mixture-of-expert model"]} +{"id": "szarvas-etal-2012-cross", "title": "Cross-Genre and Cross-Domain Detection of Semantic Uncertainty", "abstract": "Uncertainty is an important linguistic phenomenon that is relevant in various Natural Language Processing applications, in diverse genres from medical to community generated, newswire or scientific discourse, and domains from science to humanities. The semantic uncertainty of a proposition can be identified in most cases by using a finite dictionary (i.e., lexical cues) and the key steps of uncertainty detection in an application include the steps of locating the (genre- and domain-specific) lexical cues, disambiguating them, and linking them with the units of interest for the particular application (e.g., identified events in information extraction). In this study, we focus on the genre and domain differences of the context-dependent semantic uncertainty cue recognition task.We introduce a unified subcategorization of semantic uncertainty as different domain applications can apply different uncertainty categories. Based on this categorization, we normalized the annotation of three corpora and present results with a state-of-the-art uncertainty cue recognition model for four fine-grained categories of semantic uncertainty.Our results reveal the domain and genre dependence of the problem; nevertheless, we also show that even a distant source domain data set can contribute to the recognition and disambiguation of uncertainty cues, efficiently reducing the annotation costs needed to cover a new domain. Thus, the unified subcategorization and domain adaptation for training the models offer an efficient solution for cross-domain and cross-genre semantic uncertainty recognition.", "keyphrases": ["detection", "semantic uncertainty", "unified subcategorization", "wikipedia article"]} +{"id": "alkhouli-ney-2017-biasing", "title": "Biasing Attention-Based Recurrent Neural Networks Using External Alignment Information", "abstract": "This work explores extending attention-based neural models to include alignment information as input. We modify the attention component to have dependence on the current source position. The attention model is then used as a lexical model together with an additional alignment model to generate translation. The attention model is trained using external alignment information, and it is applied in decoding by performing beam search over the lexical and alignment hypotheses. The alignment model is used to score these alignment candidates. We demonstrate that the attention layer is capable of using the alignment information to improve over the baseline attention model that uses no such alignments. Our experiments are performed on two tasks: WMT 2016 English \u2192 Romanian and WMT 2017 German \u2192 English.", "keyphrases": ["external alignment information", "attention component", "neural machine translation"]} +{"id": "marino-etal-2006-n", "title": "N-gram-based Machine Translation", "abstract": "This article describes in detail an n-gram approach to statistical machine translation. This approach consists of a log-linear combination of a translation model based on n-grams of bilingual units, which are referred to as tuples, along with four specific feature functions. Translation performance, which happens to be in the state of the art, is demonstrated with Spanish-to-English and English-to-Spanish translations of the European Parliament Plenary Sessions (EPPS).", "keyphrases": ["machine translation", "bilingual unit", "tuple", "n-gram-based approach", "smt framework"]} +{"id": "danilevsky-etal-2020-survey", "title": "A Survey of the State of Explainable AI for Natural Language Processing", "abstract": "Recent years have seen important advances in the quality of state-of-the-art models, but this has come at the expense of models becoming less interpretable. This survey presents an overview of the current state of Explainable AI (XAI), considered within the domain of Natural Language Processing (NLP). We discuss the main categorization of explanations, as well as the various ways explanations can be arrived at and visualized. We detail the operations and explainability techniques currently available for generating explanations for NLP model predictions, to serve as a resource for model developers in the community. Finally, we point out the current gaps and encourage directions for future work in this important research area.", "keyphrases": ["survey", "explanation method", "model output", "xai method"]} +{"id": "habash-2007-syntactic", "title": "Syntactic preprocessing for statistical machine translation", "abstract": "We describe an approach to automatic source-language syntactic preprocessing in the context of Arabic-English phrase-based machine translation. Source-language labeled dependencies, that are word aligned with target language words in a parallel corpus, are used to automatically extract syntactic reordering rules in the same spirit of Xia and McCord (2004) and Zhang et al. (2007). The extracted rules are used to reorder the source-language side of the training and test data. Our results show that when using monotonic decoding and translations for unigram source-language phrases only, source-language reordering gives very signi\ufb01cant gains over no reordering (25% relative increase in BLEU score). With decoder distortion turned on and with access to all phrase translations, the differences in BLEU scores are diminished. However, an analysis of sentence-level BLEU scores shows reordering outperforms no-reordering in over 40% of the sentences. These results suggest that the approach holds big promise but much more work on Arabic parsing may be needed.", "keyphrases": ["parallel corpus", "side", "syntactic preprocessing", "other language", "parse quality"]} +{"id": "grefenstette-2013-towards", "title": "Towards a Formal Distributional Semantics: Simulating Logical Calculi with Tensors", "abstract": "The development of compositional distributional models of semantics reconciling the empirical aspects of distributional semantics with the compositional aspects of formal semantics is a popular topic in the contemporary literature. This paper seeks to bring this reconciliation one step further by showing how the mathematical constructs commonly used in compositional distributional models, such as tensors and matrices, can be used to simulate di erent aspects of predicate logic. This paper discusses how the canonical isomorphism between tensors and multilinear maps can be exploited to simulate a full-blown quantifier-free predicate calculus using tensors. It provides tensor interpretations of the set of logical connectives required to model propositional calculi. It suggests a variant of these tensor calculi capable of modelling quantifiers, using few non-linear operations. It finally discusses the relation between these variants, and how this relation should constitute the subject of future work.", "keyphrases": ["logical calculi", "distributional model", "tensor calculus"]} +{"id": "xie-etal-2013-semantic", "title": "Semantic Frames to Predict Stock Price Movement", "abstract": "Semantic frames are a rich linguistic resource. There has been much work on semantic frame parsers, but less that applies them to general NLP problems. We address a task to predict change in stock price from financial news. Semantic frames help to generalize from specific sentences to scenarios, and to detect the (positive or negative) roles of specific companies. We introduce a novel tree representation, and use it to train predictive models with tree kernels using support vector machines. Our experiments test multiple text representations on two binary classification tasks, change of price and polarity. Experiments show that features derived from semantic frame parsing have significantly better performance across years on the polarity task.", "keyphrases": ["stock price movement", "semantic frame parser", "news", "specific company", "predictive model"]} +{"id": "denis-baldridge-2008-specialized", "title": "Specialized Models and Ranking for Coreference Resolution", "abstract": "This paper investigates two strategies for improving coreference resolution: (1) training separate models that specialize in particular types of mentions (e.g., pronouns versus proper nouns) and (2) using a ranking loss function rather than a classification function. In addition to being conceptually simple, these modifications of the standard single-model, classification-based approach also deliver significant performance improvements. Specifically, we show that on the ACE corpus both strategies produce f-score gains of more than 3% across the three coreference evaluation metrics (MUC, B3, and CEAF).", "keyphrases": ["coreference resolution", "mention", "classification function", "ranker", "candidate antecedent"]} +{"id": "joty-etal-2015-codra", "title": "CODRA: A Novel Discriminative Framework for Rhetorical Analysis", "abstract": "Clauses and sentences rarely stand on their own in an actual discourse; rather, the relationship between them carries important information that allows the discourse to express a meaning as a whole beyond the sum of its individual parts. Rhetorical analysis seeks to uncover this coherence structure. In this article, we present CODRA\u2014 a COmplete probabilistic Discriminative framework for performing Rhetorical Analysis in accordance with Rhetorical Structure Theory, which posits a tree representation of a discourse.CODRA comprises a discourse segmenter and a discourse parser. First, the discourse segmenter, which is based on a binary classifier, identifies the elementary discourse units in a given text. Then the discourse parser builds a discourse tree by applying an optimal parsing algorithm to probabilities inferred from two Conditional Random Fields: one for intra-sentential parsing and the other for multi-sentential parsing. We present two approaches to combine these two stages of parsing effectively. By conducting a series of empirical evaluations over two different data sets, we demonstrate that CODRA significantly outperforms the state-of-the-art, often by a wide margin. We also show that a reranking of the k-best parse hypotheses generated by CODRA can potentially improve the accuracy even further.", "keyphrases": ["rhetorical analysis", "conditional random fields", "multi-sentential parsing", "codra", "unlabelled structure"]} +{"id": "nepveu-etal-2004-adaptive", "title": "Adaptive Language and Translation Models for Interactive Machine Translation", "abstract": "We describe experiments carried out with adaptive language and translation models in the context of an interactive computer-assisted translation program. We developed cache-based language models which were then extended to the bilingual case for a cachebased translation model. We present the improvements we obtained in two contexts: in a theoretical setting, we achieved a drop in perplexity for the new models and, in a more practical situation simulating a user working with the system, we showed that fewer keystrokes would be needed to enter a translation.", "keyphrases": ["interactive machine translation", "adaptation", "model extension", "imt system", "cache-based translation model"]} +{"id": "denero-etal-2009-fast", "title": "Fast Consensus Decoding over Translation Forests", "abstract": "The minimum Bayes risk (MBR) decoding objective improves BLEU scores for machine translation output relative to the standard Viterbi objective of maximizing model score. However, MBR targeting BLEU is prohibitively slow to optimize over k-best lists for large k. In this paper, we introduce and analyze an alternative to MBR that is equally effective at improving performance, yet is asymptotically faster --- running 80 times faster than MBR in experiments with 1000-best lists. Furthermore, our fast decoding procedure can select output sentences based on distributions over entire forests of translations, in addition to k-best lists. We evaluate our procedure on translation forests from two large-scale, state-of-the-art hierarchical machine translation systems. Our forest-based decoding objective consistently outperforms k-best list MBR, giving improvements of up to 1.0 BLEU.", "keyphrases": ["consensus", "forest", "minimum bayes risk"]} +{"id": "imamura-etal-2018-enhancement", "title": "Enhancement of Encoder and Attention Using Target Monolingual Corpora in Neural Machine Translation", "abstract": "A large-scale parallel corpus is required to train encoder-decoder neural machine translation. The method of using synthetic parallel texts, in which target monolingual corpora are automatically translated into source sentences, is effective in improving the decoder, but is unreliable for enhancing the encoder. In this paper, we propose a method that enhances the encoder and attention using target monolingual corpora by generating multiple source sentences via sampling. By using multiple source sentences, diversity close to that of humans is achieved. Our experimental results show that the translation quality is improved by increasing the number of synthetic source sentences for each given target sentence, and quality close to that using a manually created parallel corpus was achieved.", "keyphrases": ["target monolingual corpora", "neural machine translation", "back-translation"]} +{"id": "qin-etal-2021-neural", "title": "Neural-Symbolic Solver for Math Word Problems with Auxiliary Tasks", "abstract": "Previous math word problem solvers following the encoder-decoder paradigm fail to explicitly incorporate essential math symbolic constraints, leading to unexplainable and unreasonable predictions. Herein, we propose Neural-Symbolic Solver (NS-Solver) to explicitly and seamlessly incorporate different levels of symbolic constraints by auxiliary tasks. Our NS-Solver consists of a problem reader to encode problems, a programmer to generate symbolic equations, and a symbolic executor to obtain answers. Along with target expression supervision, our solver is also optimized via 4 new auxiliary objectives to enforce different symbolic reasoning: a) self-supervised number prediction task predicting both number quantity and number locations; b) commonsense constant prediction task predicting what prior knowledge (e.g. how many legs a chicken has) is required; c) program consistency checker computing the semantic loss between predicted equation and target equation to ensure reasonable equation mapping; d) duality exploiting task exploiting the quasi-duality between symbolic equation generation and problem's part-of-speech generation to enhance the understanding ability of a solver. Besides, to provide a more realistic and challenging benchmark for developing a universal and scalable solver, we also construct a new largescale MWP benchmark CM17K consisting of 4 kinds of MWPs (arithmetic, one-unknown linear, one-unknown non-linear, equation set) with more than 17K samples. Extensive experiments on Math23K and our CM17k demonstrate the superiority of our NS-Solver compared to state-of-the-art methods.", "keyphrases": ["math word problem", "auxiliary task", "neural-symbolic solver"]} +{"id": "chen-ji-2020-learning", "title": "Learning Variational Word Masks to Improve the Interpretability of Neural Text Classifiers", "abstract": "To build an interpretable neural text classifier, most of the prior work has focused on designing inherently interpretable models or finding faithful explanations. A new line of work on improving model interpretability has just started, and many existing methods require either prior information or human annotations as additional inputs in training. To address this limitation, we propose the variational word mask (VMASK) method to automatically learn task-specific important words and reduce irrelevant information on classification, which ultimately improves the interpretability of model predictions. The proposed method is evaluated with three neural text classifiers (CNN, LSTM, and BERT) on seven benchmark text classification datasets. Experiments show the effectiveness of VMASK in improving both model prediction accuracy and interpretability.", "keyphrases": ["variational word mask", "interpretability", "neural text classifier", "information bottleneck"]} +{"id": "ciaramita-altun-2006-broad", "title": "Broad-Coverage Sense Disambiguation and Information Extraction with a Supersense Sequence Tagger", "abstract": "In this paper we approach word sense disambiguation and information extraction as a unified tagging problem. The task consists of annotating text with the tagset defined by the 41 Wordnet supersense classes for nouns and verbs. Since the tagset is directly related to Wordnet synsets, the tagger returns partial word sense disambiguation. Furthermore, since the noun tags include the standard named entity detection classes -- person, location, organization, time, etc. -- the tagger, as a by-product, returns extended named entity information. We cast the problem of supersense tagging as a sequential labeling task and investigate it empirically with a discriminatively-trained Hidden Markov Model. Experimental evaluation on the main sense-annotated datasets available, i.e., Semcor and Senseval, shows considerable improvements over the best known \"first-sense\" baseline.", "keyphrases": ["information extraction", "tagset", "supersense tagging", "sense-grouping"]} +{"id": "huang-wang-2017-deep", "title": "Deep Residual Learning for Weakly-Supervised Relation Extraction", "abstract": "Deep residual learning (ResNet) is a new method for training very deep neural networks using identity mapping for shortcut connections. ResNet has won the ImageNet ILSVRC 2015 classification task, and achieved state-of-the-art performances in many computer vision tasks. However, the effect of residual learning on noisy natural language processing tasks is still not well understood. In this paper, we design a novel convolutional neural network (CNN) with residual learning, and investigate its impacts on the task of distantly supervised noisy relation extraction. In contradictory to popular beliefs that ResNet only works well for very deep networks, we found that even with 9 layers of CNNs, using identity mapping could significantly improve the performance for distantly-supervised relation extraction.", "keyphrases": ["relation extraction", "identity mapping", "cnn", "deep residual learning"]} +{"id": "keller-2004-entropy", "title": "The Entropy Rate Principle as a Predictor of Processing Effort: An Evaluation against Eye-tracking Data", "abstract": "This paper provides evidence for Genzel and Charniak\u2019s (2002) entropy rate principle, which predicts that the entropy of a sentence increases with its position in the text. We show that this principle holds for individual sentences (not just for averages), but we also find that the entropy rate effect is partly an artifact of sentence length, which also correlates with sentence position. Secondly, we evaluate a set of predictions that the entropy rate principle makes for human language processing; using a corpus of eye-tracking data, we show that entropy and processing effort are correlated, and that processing effort is constant throughout a text.", "keyphrases": ["entropy rate principle", "processing effort", "eye-tracking data", "sentence position", "signal"]} +{"id": "headden-iii-etal-2009-improving", "title": "Improving Unsupervised Dependency Parsing with Richer Contexts and Smoothing", "abstract": "Unsupervised grammar induction models tend to employ relatively simple models of syntax when compared to their supervised counterparts. Traditionally, the unsupervised models have been kept simple due to tractability and data sparsity concerns. In this paper, we introduce basic valence frames and lexical information into an unsupervised dependency grammar inducer and show how this additional information can be leveraged via smoothing. Our model produces state-of-the-art results on the task of unsupervised grammar induction, improving over the best previous work by almost 10 percentage points.", "keyphrases": ["smoothing", "grammar induction", "dependency model"]} +{"id": "lyu-titov-2018-amr", "title": "AMR Parsing as Graph Prediction with Latent Alignment", "abstract": "Abstract meaning representations (AMRs) are broad-coverage sentence-level semantic representations. AMRs represent sentences as rooted labeled directed acyclic graphs. AMR parsing is challenging partly due to the lack of annotated alignments between nodes in the graphs and words in the corresponding sentences. We introduce a neural parser which treats alignments as latent variables within a joint probabilistic model of concepts, relations and alignments. As exact inference requires marginalizing over alignments and is infeasible, we use the variational autoencoding framework and a continuous relaxation of the discrete alignments. We show that joint modeling is preferable to using a pipeline of align and parse. The parser achieves the best reported results on the standard benchmark (74.4% on LDC2016E25).", "keyphrases": ["node", "latent variable", "probabilistic model", "amr parsing", "pipeline design"]} +{"id": "tafjord-etal-2021-proofwriter", "title": "ProofWriter: Generating Implications, Proofs, and Abductive Statements over Natural Language", "abstract": "Transformers have been shown to emulate logical deduction over natural language theories (logical rules expressed in natural language), reliably assigning true/false labels to candidate implications. However, their ability to generate implications of a theory has not yet been demonstrated, and methods for reconstructing proofs of answers are imperfect. In this work we show that a generative model, called ProofWriter, can reliably generate both implications of a theory and the natural language proof(s) that support them. In particular, iterating a 1-step implication generator results in proofs that are highly reliable, and represent actual model decisions (rather than post-hoc rationalizations). On the RuleTaker dataset, the accuracy of ProofWriter's proofs exceed previous methods by +9% absolute, and in a way that generalizes to proof depths unseen in training and on out-of-domain problems. We also show that generative techniques can perform a type of abduction with high precision: Given a theory and an unprovable conclusion, identify a missing fact that allows the conclusion to be proved, along with a proof. These results significantly improve the viability of neural methods for systematically reasoning over natural language.", "keyphrases": ["abduction", "depth", "generative technique", "proofwriter", "entailment tree"]} +{"id": "clarke-grieve-2017-dimensions", "title": "Dimensions of Abusive Language on Twitter", "abstract": "In this paper, we use a new categorical form of multidimensional register analysis to identify the main dimensions of functional linguistic variation in a corpus of abusive language, consisting of racist and sexist Tweets. By analysing the use of a wide variety of parts-of-speech and grammatical constructions, as well as various features related to Twitter and computer-mediated communication, we discover three dimensions of linguistic variation in this corpus, which we interpret as being related to the degree of interactive, antagonistic and attitudinal language exhibited by individual Tweets. We then demonstrate that there is a significant functional difference between racist and sexist Tweets, with sexists Tweets tending to be more interactive and attitudinal than racist Tweets.", "keyphrases": ["abusive language", "twitter", "sexist tweet", "multidimensional linguistic variation"]} +{"id": "hendrickx-etal-2013-semeval", "title": "SemEval-2013 Task 4: Free Paraphrases of Noun Compounds", "abstract": "In this paper, we describe SemEval-2013 Task 4: the definition, the data, the evaluation and the results. The task is to capture some of the meaning of English noun compounds via paraphrasing. Given a two-word noun compound, the participating system is asked to produce an explicitly ranked list of its free-form paraphrases. The list is automatically compared and evaluated against a similarly ranked list of paraphrases proposed by human annotators, recruited and managed through Amazon\u2019s Mechanical Turk. The comparison of raw paraphrases is sensitive to syntactic and morphological variation. The \u201cgold\u201d ranking is based on the relative popularity of paraphrases among annotators. To make the ranking more reliable, highly similar paraphrases are grouped, so as to downplay superficial differences in syntax and morphology. Three systems participated in the task. They all beat a simple baseline on one of the two evaluation measures, but not on both measures. This shows that the task is difficult.", "keyphrases": ["paraphrase", "noun compound", "annotator", "semeval-2013 task"]} +{"id": "he-etal-2019-pun", "title": "Pun Generation with Surprise", "abstract": "We tackle the problem of generating a pun sentence given a pair of homophones (e.g., \u201cdied\u201d and \u201cdyed\u201d). Puns are by their very nature statistically anomalous and not amenable to most text generation methods that are supervised by a large corpus. In this paper, we propose an unsupervised approach to pun generation based on lots of raw (unhumorous) text and a surprisal principle. Specifically, we posit that in a pun sentence, there is a strong association between the pun word (e.g., \u201cdyed\u201d) and the distant context, but a strong association between the alternative word (e.g., \u201cdied\u201d) and the immediate context. We instantiate the surprisal principle in two ways: (i) as a measure based on the ratio of probabilities given by a language model, and (ii) a retrieve-and-edit approach based on words suggested by a skip-gram model. Based on human evaluation, our retrieve-and-edit approach generates puns successfully 30% of the time, doubling the success rate of a neural generation baseline.", "keyphrases": ["surprisal principle", "strong association", "immediate context", "pun generation"]} +{"id": "tars-fishel-2018-multi", "title": "Multi-Domain Neural Machine Translation", "abstract": "We present an approach to neural machine translation (NMT) that supports multiple domains in a single model and allows switching between the domains when translating. The core idea is to treat text domainsasdistinctlanguagesandusemultilingual NMT methods to create multi-domain translation systems; we show that this approach results in significant translation quality gains over fine-tuning. We also explore whether the knowledge of pre-specified text domains is necessary; turns out that it is after all, but also that when it is not known quite high translation quality can be reached, and even higher than with known domains in some cases.", "keyphrases": ["neural machine translation", "domain token", "multi-domain nmt", "cluster"]} +{"id": "flachs-etal-2020-grammatical", "title": "Grammatical Error Correction in Low Error Density Domains: A New Benchmark and Analyses", "abstract": "Evaluation of grammatical error correction (GEC) systems has primarily focused on essays written by non-native learners of English, which however is only part of the full spectrum of GEC applications. We aim to broaden the target domain of GEC and release CWEB, a new benchmark for GEC consisting of website text generated by English speakers of varying levels of proficiency. Website data is a common and important domain that contains far fewer grammatical errors than learner essays, which we show presents a challenge to state-of-the-art GEC systems. We demonstrate that a factor behind this is the inability of systems to rely on a strong internal language model in low error density domains. We hope this work shall facilitate the development of open-domain GEC models that generalize to different topics and genres.", "keyphrases": ["new benchmark", "website text", "learner essay", "language model", "grammatical error correction"]} +{"id": "bose-etal-2018-adversarial", "title": "Adversarial Contrastive Estimation", "abstract": "Learning by contrasting positive and negative samples is a general strategy adopted by many methods. Noise contrastive estimation (NCE) for word embeddings and translating embeddings for knowledge graphs are examples in NLP employing this approach. In this work, we view contrastive learning as an abstraction of all such methods and augment the negative sampler into a mixture distribution containing an adversarially learned sampler. The resulting adaptive sampler finds harder negative examples, which forces the main model to learn a better representation of the data. We evaluate our proposal on learning word embeddings, order embeddings and knowledge graph embeddings and observe both faster convergence and improved results on multiple metrics.", "keyphrases": ["contrastive estimation", "sampler", "hard negative example"]} +{"id": "xue-etal-2021-mt5", "title": "mT5: A Massively Multilingual Pre-trained Text-to-Text Transformer", "abstract": "The recent \u201cText-to-Text Transfer Transformer\u201d (T5) leveraged a unified text-to-text format and scale to attain state-of-the-art results on a wide variety of English-language NLP tasks. In this paper, we introduce mT5, a multilingual variant of T5 that was pre-trained on a new Common Crawl-based dataset covering 101 languages. We detail the design and modified training of mT5 and demonstrate its state-of-the-art performance on many multilingual benchmarks. We also describe a simple technique to prevent \u201caccidental translation\u201d in the zero-shot setting, where a generative model chooses to (partially) translate its prediction into the wrong language. All of the code and model checkpoints used in this work are publicly available.", "keyphrases": ["pre-training", "text-to-text transformer", "language model", "cross-lingual transfer", "input text"]} +{"id": "vajjala-meurers-2014-assessing", "title": "Assessing the relative reading level of sentence pairs for text simplification", "abstract": "While the automatic analysis of the readability of texts has a long history, the use of readability assessment for text simplification has received only little attention so far. In this paper, we explore readability models for identifying differences in the reading levels of simplified and unsimplified versions of sentences. Our experiments show that a relative ranking is preferable to an absolute binary one and that the accuracy of identifying relative simplification depends on the initial reading level of the unsimplified version. The approach is particularly successful in classifying the relative reading level of harder sentences. In terms of practical relevance, the approach promises to be useful for identifying particularly relevant targets for simplification and to evaluate simplifications given specific readability constraints.", "keyphrases": ["relative reading level", "text simplification", "language learner", "different level"]} +{"id": "bastings-etal-2019-interpretable", "title": "Interpretable Neural Predictions with Differentiable Binary Variables", "abstract": "The success of neural networks comes hand in hand with a desire for more interpretability. We focus on text classifiers and make them more interpretable by having them provide a justification\u2013a rationale\u2013for their predictions. We approach this problem by jointly training two neural network models: a latent model that selects a rationale (i.e. a short and informative part of the input text), and a classifier that learns from the words in the rationale alone. Previous work proposed to assign binary latent masks to input positions and to promote short selections via sparsity-inducing penalties such as L0 regularisation. We propose a latent model that mixes discrete and continuous behaviour allowing at the same time for binary selections and gradient-based training without REINFORCE. In our formulation, we can tractably compute the expected value of penalties such as L0, which allows us to directly optimise the model towards a pre-specified text selection rate. We show that our approach is competitive with previous work on rationale extraction, and explore further uses in attention mechanisms.", "keyphrases": ["latent model", "rationale", "mask", "text classification", "inter-pretable model"]} +{"id": "roth-schulte-im-walde-2014-combining", "title": "Combining Word Patterns and Discourse Markers for Paradigmatic Relation Classification", "abstract": "Distinguishing between paradigmatic relations such as synonymy, antonymy and hypernymy is an important prerequisite in a range of NLP applications. In this paper, we explore discourse relations as an alternative set of features to lexico-syntactic patterns. We demonstrate that statistics over discourse relations, collected via explicit discourse markers as proxies, can be utilized as salient indicators for paradigmatic relations in multiple languages, outperforming patterns in terms of recall and F1-score. In addition, we observe that markers and patterns provide complementary information, leading to significant classification improvements when applied in combination.", "keyphrases": ["discourse marker", "paradigmatic relation", "antonymy"]} +{"id": "herbig-etal-2020-mmpe-multi", "title": "MMPE: A Multi-Modal Interface using Handwriting, Touch Reordering, and Speech Commands for Post-Editing Machine Translation", "abstract": "The shift from traditional translation to post-editing (PE) of machine-translated (MT) text can save time and reduce errors, but it also affects the design of translation interfaces, as the task changes from mainly generating text to correcting errors within otherwise helpful translation proposals. Since this paradigm shift offers potential for modalities other than mouse and keyboard, we present MMPE, the first prototype to combine traditional input modes with pen, touch, and speech modalities for PE of MT. Users can directly cross out or hand-write new text, drag and drop words for reordering, or use spoken commands to update the text in place. All text manipulations are logged in an easily interpretable format to simplify subsequent translation process research. The results of an evaluation with professional translators suggest that pen and touch interaction are suitable for deletion and reordering tasks, while speech and multi-modal combinations of select & speech are considered suitable for replacements and insertions. Overall, experiment participants were enthusiastic about the new modalities and saw them as useful extensions to mouse & keyboard, but not as a complete substitute.", "keyphrases": ["translator", "modality", "mmpe"]} +{"id": "teh-2006-hierarchical", "title": "A Hierarchical Bayesian Language Model Based On Pitman-Yor Processes", "abstract": "We propose a new hierarchical Bayesian n-gram model of natural languages. Our model makes use of a generalization of the commonly used Dirichlet distributions called Pitman-Yor processes which produce power-law distributions more closely resembling those in natural languages. We show that an approximation to the hierarchical Pitman-Yor language model recovers the exact formulation of interpolated Kneser-Ney, one of the best smoothing methods for n-gram language models. Experiments verify that our model gives cross entropy results superior to interpolated Kneser-Ney and comparable to modified Kneser-Ney.", "keyphrases": ["bayesian language model", "pitman-yor process", "formulation", "dirichlet process"]} +{"id": "barnes-etal-2019-sentiment", "title": "Sentiment Analysis Is Not Solved! Assessing and Probing Sentiment Classification", "abstract": "Neural methods for sentiment analysis have led to quantitative improvements over previous approaches, but these advances are not always accompanied with a thorough analysis of the qualitative differences. Therefore, it is not clear what outstanding conceptual challenges for sentiment analysis remain. In this work, we attempt to discover what challenges still prove a problem for sentiment classifiers for English and to provide a challenging dataset. We collect the subset of sentences that an (oracle) ensemble of state-of-the-art sentiment classifiers misclassify and then annotate them for 18 linguistic and paralinguistic phenomena, such as negation, sarcasm, modality, etc. Finally, we provide a case study that demonstrates the usefulness of the dataset to probe the performance of a given sentiment classifier with respect to linguistic phenomena.", "keyphrases": ["ensemble", "negation", "sarcasm", "sentiment analysis"]} +{"id": "cowan-etal-2006-discriminative", "title": "A Discriminative Model for Tree-to-Tree Translation", "abstract": "This paper proposes a statistical, tree-to-tree model for producing translations. Two main contributions are as follows: (1) a method for the extraction of syntactic structures with alignment information from a parallel corpus of translations, and (2) use of a discriminative, feature-based model for prediction of these target-language syntactic structures---which we call aligned extended projections, or AEPs. An evaluation of the method on translation from German to English shows similar performance to the phrase-based model of Koehn et al. (2003).", "keyphrases": ["discriminative model", "clause", "modifier"]} +{"id": "mohammad-etal-2013-computing", "title": "Computing Lexical Contrast", "abstract": "Knowing the degree of semantic contrast between words has widespread application in natural language processing, including machine translation, information retrieval, and dialogue systems. Manually created lexicons focus on opposites, such as hot and cold. Opposites are of many kinds such as antipodals, complementaries, and gradable. Existing lexicons often do not classify opposites into the different kinds, however. They also do not explicitly list word pairs that are not opposites but yet have some degree of contrast in meaning, such as warm and cold or tropical and freezing. We propose an automatic method to identify contrasting word pairs that is based on the hypothesis that if a pair of words, A and B, are contrasting, then there is a pair of opposites, C and D, such that A and C are strongly related and B and D are strongly related. (For example, there exists the pair of opposites hot and cold such that tropical is related to hot, and freezing is related to cold.) We will call this the contrast hypothesis.We begin with a large crowdsourcing experiment to determine the amount of human agreement on the concept of oppositeness and its different kinds. In the process, we flesh out key features of different kinds of opposites. We then present an automatic and empirical measure of lexical contrast that relies on the contrast hypothesis, corpus statistics, and the structure of a Roget-like thesaurus. We show how, using four different data sets, we evaluated our approach on two different tasks, solving \u201cmost contrasting word\u201d questions and distinguishing synonyms from opposites. The results are analyzed across four parts of speech and across five different kinds of opposites. We show that the proposed measure of lexical contrast obtains high precision and large coverage, outperforming existing methods.", "keyphrases": ["opposite", "word pair", "automatic method", "ranking", "negation"]} +{"id": "wu-fung-2009-semantic-roles", "title": "Semantic Roles for SMT: A Hybrid Two-Pass Model", "abstract": "We present results on a novel hybrid semantic SMT model that incorporates the strengths of both semantic role labeling and phrase-based statistical machine translation. The approach avoids major complexity limitations via a two-pass architecture. The first pass is performed using a conventional phrase-based SMT model. The second pass is performed by a re-ordering strategy guided by shallow semantic parsers that produce both semantic frame and role labels. Evaluation on a Wall Street Journal newswire genre test set showed the hybrid model to yield an improvement of roughly half a point in BLEU score over a strong pure phrase-based SMT baseline -- to our knowledge, the first successful application of semantic role labeling to SMT.", "keyphrases": ["smt model", "semantic role", "cross-lingual match"]} +{"id": "yamada-etal-2016-joint", "title": "Joint Learning of the Embedding of Words and Entities for Named Entity Disambiguation", "abstract": "Named Entity Disambiguation (NED) refers to the task of resolving multiple named entity mentions in a document to their correct references in a knowledge base (KB) (e.g., Wikipedia). In this paper, we propose a novel embedding method specifically designed for NED. The proposed method jointly maps words and entities into the same continuous vector space. We extend the skip-gram model by using two models. The KB graph model learns the relatedness of entities using the link structure of the KB, whereas the anchor context model aims to align vectors such that similar words and entities occur close to one another in the vector space by leveraging KB anchors and their context words. By combining contexts based on the proposed embedding with standard NED features, we achieved state-of-the-art accuracy of 93.1% on the standard CoNLL dataset and 85.2% on the TAC 2010 dataset.", "keyphrases": ["named entity disambiguation", "skip-gram model", "knowledge graph"]} +{"id": "groschwitz-etal-2018-amr", "title": "AMR dependency parsing with a typed semantic algebra", "abstract": "We present a semantic parser for Abstract Meaning Representations which learns to parse strings into tree representations of the compositional structure of an AMR graph. This allows us to use standard neural techniques for supertagging and dependency tree parsing, constrained by a linguistically principled type system. We present two approximative decoding algorithms, which achieve state-of-the-art accuracy and outperform strong baselines.", "keyphrases": ["algebra", "compositional structure", "amr", "dependency parser"]} +{"id": "shen-lapata-2007-using", "title": "Using Semantic Roles to Improve Question Answering", "abstract": "Shallow semantic parsing, the automatic identification and labeling of sentential constituents, has recently received much attention. Our work examines whether semantic role information is beneficial to question answering. We introduce a general framework for answer extraction which exploits semantic role annotations in the FrameNet paradigm. We view semantic role assignment as an optimization problem in a bipartite graph and answer extraction as an instance of graph matching. Experimental results on the TREC datasets demonstrate improvements over state-of-the-art models.", "keyphrases": ["question answering", "role labeling", "predicate argument structure"]} +{"id": "shen-etal-2009-effective", "title": "Effective Use of Linguistic and Contextual Information for Statistical Machine Translation", "abstract": "Current methods of using lexical features in machine translation have difficulty in scaling up to realistic MT tasks due to a prohibitively large number of parameters involved. In this paper, we propose methods of using new linguistic and contextual features that do not suffer from this problem and apply them in a state-of-the-art hierarchical MT system. The features used in this work are non-terminal labels, non-terminal length distribution, source string context and source dependency LM scores. The effectiveness of our techniques is demonstrated by significant improvements over a strong base-line. On Arabic-to-English translation, improvements in lower-cased BLEU are 2.0 on NIST MT06 and 1.7 on MT08 newswire data on decoding output. On Chinese-to-English translation, the improvements are 1.0 on MT06 and 0.8 on MT08 newswire data.", "keyphrases": ["contextual information", "statistical machine translation", "string-to-dependency language model", "constraint modeling"]} +{"id": "li-etal-2010-report", "title": "Report of NEWS 2010 Transliteration Generation Shared Task", "abstract": "This report documents the Transliteration Generation Shared Task conducted as a part of the Named Entities Workshop (NEWS 2010), an ACL 2010 workshop. The shared task features machine transliteration of proper names from English to 9 languages and from 3 languages to English. In total, 12 tasks are provided. 7 teams from 5 different countries participated in the evaluations. Finally, 33 standard and 8 non-standard runs are submitted, where diverse transliteration methodologies are explored and reported on the evaluation data. We report the results with 4 performance metrics. We believe that the shared task has successfully achieved its objective by providing a common benchmarking platform for the research community to evaluate the state-of-the-art technologies that benefit the future research and development.", "keyphrases": ["transliteration", "named entities workshop", "news", "direction", "quality metric"]} +{"id": "li-etal-2014-recursive", "title": "Recursive Deep Models for Discourse Parsing", "abstract": "Text-level discourse parsing remains a challenge: most approaches employ features that fail to capture the intentional, semantic, and syntactic aspects that govern discourse coherence. In this paper, we propose a recursive model for discourse parsing that jointly models distributed representations for clauses, sentences, and entire discourses. The learned representations can to some extent learn the semantic and intentional import of words and larger discourse units automatically,. The proposed framework obtains comparable performance regarding standard discoursing parsing evaluations when compared against current state-of-art systems.", "keyphrases": ["discourse", "recursive neural network", "edu", "network model"]} +{"id": "lenci-benotto-2012-identifying", "title": "Identifying hypernyms in distributional semantic spaces", "abstract": "In this paper we apply existing directional similarity measures to identify hypernyms with a state-of-the-art distributional semantic model. We also propose a new directional measure that achieves the best performance in hypernym identification.", "keyphrases": ["hypernym", "semantic relation", "distributional inclusion hypothesis"]} +{"id": "bamman-etal-2013-learning", "title": "Learning Latent Personas of Film Characters", "abstract": "We present two latent variable models for learning character types, or personas, in film, in which a persona is defined as a set of mixtures over latent lexical classes. These lexical classes capture the stereotypical actions of which a character is the agent and patient, as well as attributes by which they are described. As the first attempt to solve this problem explicitly, we also present a new dataset for the text-driven analysis of film, along with a benchmark testbed to help drive future work in this area.", "keyphrases": ["persona", "character", "new dataset", "text-driven analysis", "movie plot summary"]} +{"id": "liu-etal-2012-locally", "title": "Locally Training the Log-Linear Model for SMT", "abstract": "In statistical machine translation, minimum error rate training (MERT) is a standard method for tuning a single weight with regard to a given development data. However, due to the diversity and uneven distribution of source sentences, there are two problems suffered by this method. First, its performance is highly dependent on the choice of a development set, which may lead to an unstable performance for testing. Second, translations become inconsistent at the sentence level since tuning is performed globally on a document level. In this paper, we propose a novel local training method to address these two problems. Unlike a global training method, such as MERT, in which a single weight is learned and used for all the input sentences, we perform training and testing in one step by learning a sentence-wise weight for each input sentence. We propose efficient incremental training methods to put the local training into practice. In NIST Chinese-to-English translation tasks, our local training method significantly outperforms MERT with the maximal improvements up to 2.0 BLEU points, meanwhile its efficiency is comparable to that of the global method.", "keyphrases": ["log-linear model", "machine translation", "weight", "local training method"]} +{"id": "koehn-etal-2018-findings", "title": "Findings of the WMT 2018 Shared Task on Parallel Corpus Filtering", "abstract": "We posed the shared task of assigning sentence-level quality scores for a very noisy corpus of sentence pairs crawled from the web, with the goal of sub-selecting 1% and 10% of high-quality data to be used to train machine translation systems. Seventeen participants from companies, national research labs, and universities participated in this task.", "keyphrases": ["shared task", "parallel corpus filtering", "sentence pair", "wmt18"]} +{"id": "hu-wan-2014-automatic", "title": "Automatic Generation of Related Work Sections in Scientific Papers: An Optimization Approach", "abstract": "In this paper, we investigate a challenging task of automatic related work generation. Given multiple reference papers as input, the task aims to generate a related work section for a target paper. The generated related work section can be used as a draft for the author to complete his or her final related work section. We propose our Automatic Related Work Generation system called ARWG to address this task. It first exploits a PLSA model to split the sentence set of the given papers into different topic-biased parts, and then applies regression models to learn the importance of the sentences. At last it employs an optimization framework to generate the related work section. Our evaluation results on a test set of 150 target papers along with their reference papers show that our proposed ARWG system can generate related work sections with better quality. A user study is also performed to show ARWG can achieve an improvement over generic multi-document summarization baselines.", "keyphrases": ["work section", "work generation", "topic-biased part"]} +{"id": "thompson-post-2020-paraphrase", "title": "Paraphrase Generation as Zero-Shot Multilingual Translation: Disentangling Semantic Similarity from Lexical and Syntactic Diversity", "abstract": "Recent work has shown that a multilingual neural machine translation (NMT) model can be used to judge how well a sentence paraphrases another sentence in the same language (Thompson and Post, 2020); however, attempting to generate paraphrases from such a model using standard beam search produces trivial copies or near copies. We introduce a simple paraphrase generation algorithm which discourages the production of n-grams that are present in the input. Our approach enables paraphrase generation in many languages from a single multilingual NMT model. Furthermore, the amount of lexical diversity between the input and output can be controlled at generation time. We conduct a human evaluation to compare our method to a paraphraser trained on the large English synthetic paraphrase database ParaBank 2 (Hu et al., 2019c) and find that our method produces paraphrases that better preserve meaning and are more gramatical, for the same level of lexical diversity. Additional smaller human assessments demonstrate our approach also works in two non-English languages.", "keyphrases": ["multilingual translation", "n-gram", "paraphrase generation"]} +{"id": "junczys-dowmunt-etal-2018-approaching", "title": "Approaching Neural Grammatical Error Correction as a Low-Resource Machine Translation Task", "abstract": "Previously, neural methods in grammatical error correction (GEC) did not reach state-of-the-art results compared to phrase-based statistical machine translation (SMT) baselines. We demonstrate parallels between neural GEC and low-resource neural MT and successfully adapt several methods from low-resource MT to neural GEC. We further establish guidelines for trustable results in neural GEC and propose a set of model-independent methods for neural GEC that can be easily applied in most GEC settings. Proposed methods include adding source-side noise, domain-adaptation techniques, a GEC-specific training-objective, transfer learning with monolingual data, and ensembling of independently trained GEC models and language models. The combined effects of these methods result in better than state-of-the-art neural GEC models that outperform previously best neural GEC systems by more than 10% M on the CoNLL-2014 benchmark and 5.9% on the JFLEG test set. Non-neural state-of-the-art systems are outperformed by more than 2% on the CoNLL-2014 benchmark and by 4% on JFLEG.", "keyphrases": ["error correction", "machine translation", "neural gec system", "ungrammatical text"]} +{"id": "baldwin-2005-bootstrapping", "title": "Bootstrapping Deep Lexical Resources: Resources for Courses", "abstract": "We propose a range of deep lexical acquisition methods which make use of morphological, syntactic and ontological language resources to model word similarity and bootstrap from a seed lexicon. The different methods are deployed in learning lexical items for a precision grammar, and shown to each have strengths and weaknesses over different word classes. A particular focus of this paper is the relative accessibility of different language resource types, and predicted \"bang for the buck\" associated with each in deep lexical acquisition applications.", "keyphrases": ["lexical acquisition", "item", "dla", "deep grammar", "secondary language resource"]} +{"id": "ganitkevitch-callison-burch-2014-multilingual", "title": "The Multilingual Paraphrase Database", "abstract": "We release a massive expansion of the paraphrase database (PPDB) that now includes a collection of paraphrases in 23 different languages. The resource is derived from large volumes of bilingual parallel data. Our collection is extracted and ranked using state of the art methods. The multilingual PPDB has over a billion paraphrase pairs in total, covering the following languages: Arabic, Bulgarian, Chinese, Czech, Dutch, Estonian, Finnish, French, German, Greek, Hungarian, Italian, Latvian, Lithuanian, Polish, Portugese, Romanian, Russian, Slovak, Slovenian, and Swedish.", "keyphrases": ["multilingual paraphrase database", "ppdb", "parallel text"]} +{"id": "bunt-2020-annotation", "title": "Annotation of Quantification: The Current State of ISO 24617-12", "abstract": "This paper discusses the current state of developing an ISO standard annotation scheme for quantification phenomena in natural language, as part of the ISO Semantic Annotation Framework (ISO 24617). A proposed approach that combines ideas from the theory of generalised quantifiers and from neo-Davidsonian event semantics was adopted by the ISO organisation in 2019 as a starting point for developing such an annotation scheme. * This scheme consists of (1) a conceptual `metamodel' that visualises the types of entities, functions and relations that go into annotations of quantification; (2) an abstract syntax which defines `annotation structures' as triples and other set-theoretic constructs; (3) an XML-based representation of annotation structures (`concrete syntax'); and (4) a compositional semantics of annotation structures. The latter three components together define the interpreted markup language QuantML. The focus in this paper is on the structuring of the semantic information needed to characterise quantification in natural language and the representation of these structures in QuantML.", "keyphrases": ["current state", "iso", "annotation scheme", "quantification phenomenon"]} +{"id": "he-etal-2010-bridging", "title": "Bridging SMT and TM with Translation Recommendation", "abstract": "We propose a translation recommendation framework to integrate Statistical Machine Translation (SMT) output with Translation Memory (TM) systems. The framework recommends SMT outputs to a TM user when it predicts that SMT outputs are more suitable for post-editing than the hits provided by the TM. We describe an implementation of this framework using an SVM binary classifier. We exploit methods to fine-tune the classifier and investigate a variety of features of different types. We rely on automatic MT evaluation metrics to approximate human judgements in our experiments. Experimental results show that our system can achieve 0.85 precision at 0.89 recall, excluding exact matches. Furthermore, it is possible for the end-user to achieve a desired balance between precision and recall by adjusting confidence levels.", "keyphrases": ["translation memory", "smt output", "translation recommendation system", "post-editor", "sentence level"]} +{"id": "huang-etal-2019-improving", "title": "Improving Event Coreference Resolution by Learning Argument Compatibility from Unlabeled Data", "abstract": "Argument compatibility is a linguistic condition that is frequently incorporated into modern event coreference resolution systems. If two event mentions have incompatible arguments in any of the argument roles, they cannot be coreferent. On the other hand, if these mentions have compatible arguments, then this may be used as information towards deciding their coreferent status. One of the key challenges in leveraging argument compatibility lies in the paucity of labeled data. In this work, we propose a transfer learning framework for event coreference resolution that utilizes a large amount of unlabeled data to learn argument compatibility of event mentions. In addition, we adopt an interactive inference network based model to better capture the compatible and incompatible relations between the context words of event mentions. Our experiments on the KBP 2017 English dataset confirm the effectiveness of our model in learning argument compatibility, which in turn improves the performance of the overall event coreference model.", "keyphrases": ["event coreference resolution", "unlabeled data", "mention"]} +{"id": "kim-hovy-2007-crystal", "title": "Crystal: Analyzing Predictive Opinions on the Web", "abstract": "In this paper, we present an election prediction system (Crystal) based on web users\u2019 opinions posted on an election prediction website. Given a prediction message, Crystal first identifies which party the message predicts to win and then aggregates prediction analysis results of a large amount of opinions to project the election results. We collect past election prediction messages from the Web and automatically build a gold standard. We focus on capturing lexical patterns that people frequently use when they express their predictive opinions about a coming election. To predict election results, we apply SVM-based supervised learning. To improve performance, we propose a novel technique which generalizes n-gram feature patterns. Experimental results show that Crystal significantly outperforms several baselines as well as a non-generalized n-gram approach. Crystal predicts future elections with 81.68% accuracy.", "keyphrases": ["opinion", "web", "election prediction", "crystal"]} +{"id": "tu-etal-2020-empirical", "title": "An Empirical Study on Robustness to Spurious Correlations using Pre-trained Language Models", "abstract": "Recent work has shown that pre-trained language models such as BERT improve robustness to spurious correlations in the dataset. Intrigued by these results, we find that the key to their success is generalization from a small amount of counterexamples where the spurious correlations do not hold. When such minority examples are scarce, pre-trained models perform as poorly as models trained from scratch. In the case of extreme minority, we propose to use multi-task learning (MTL) to improve generalization. Our experiments on natural language inference and paraphrase identification show that MTL with the right auxiliary tasks significantly improves performance on challenging examples without hurting the in-distribution performance. Further, we show that the gain from MTL mainly comes from improved generalization from the minority examples. Our results highlight the importance of data diversity for overcoming spurious correlations.1", "keyphrases": ["robustness", "language model", "pre-trained model", "auxiliary task", "plm"]} +{"id": "huang-chiang-2007-forest", "title": "Forest Rescoring: Faster Decoding with Integrated Language Models", "abstract": "Efficient decoding has been a fundamental problem in machine translation, especially with an integrated language model which is essential for achieving good translation quality. We develop faster approaches for this problem based on k-best parsing algorithms and demonstrate their effectiveness on both phrase-based and syntax-based MT systems. In both cases, our methods achieve significant speed improvements, often by more than a factor of ten, over the conventional beam-search method at the same levels of search error and translation accuracy.", "keyphrases": ["machine translation", "forest rescoring", "hiero search refinements"]} +{"id": "titov-klementiev-2012-bayesian", "title": "A Bayesian Approach to Unsupervised Semantic Role Induction", "abstract": "We introduce two Bayesian models for unsupervised semantic role labeling (SRL) task. The models treat SRL as clustering of syntactic signatures of arguments with clusters corresponding to semantic roles. The first model induces these clusterings independently for each predicate, exploiting the Chinese Restaurant Process (CRP) as a prior. In a more refined hierarchical model, we inject the intuition that the clusterings are similar across different predicates, even though they are not necessarily identical. This intuition is encoded as a distance-dependent CRP with a distance between two syntactic signatures indicating how likely they are to correspond to a single semantic role. These distances are automatically induced within the model and shared across predicates. Both models achieve state-of-the-art results when evaluated on PropBank, with the coupled model consistently outperforming the factored counterpart in all experimental set-ups.", "keyphrases": ["semantic role", "bayesian model", "argument signature"]} +{"id": "yang-kirchhoff-2006-phrase", "title": "Phrase-Based Backoff Models for Machine Translation of Highly Inflected Languages", "abstract": "We propose a backoff model for phrasebased machine translation that translates unseen word forms in foreign-language text by hierarchical morphological abstractions at the word and the phrase level. The model is evaluated on the Europarl corpus for German-English and FinnishEnglish translation and shows improvements over state-of-the-art phrase-based models.", "keyphrases": ["backoff model", "machine translation", "french"]} +{"id": "dinu-lapata-2010-measuring", "title": "Measuring Distributional Similarity in Context", "abstract": "The computation of meaning similarity as operationalized by vector-based models has found widespread use in many tasks ranging from the acquisition of synonyms and paraphrases to word sense disambiguation and textual entailment. Vector-based models are typically directed at representing words in isolation and thus best suited for measuring similarity out of context. In his paper we propose a probabilistic framework for measuring similarity in context. Central to our approach is the intuition that word meaning is represented as a probability distribution over a set of latent senses and is modulated by context. Experimental results on lexical substitution and word similarity show that our algorithm outperforms previously proposed models.", "keyphrases": ["sense disambiguation", "probabilistic framework", "substitution", "latent variable"]} +{"id": "strassel-tracey-2016-lorelei", "title": "LORELEI Language Packs: Data, Tools, and Resources for Technology Development in Low Resource Languages", "abstract": "In this paper, we describe the textual linguistic resources in nearly 3 dozen languages being produced by Linguistic Data Consortium for DARPA's LORELEI (Low Resource Languages for Emergent Incidents) Program. The goal of LORELEI is to improve the performance of human language technologies for low-resource languages and enable rapid re-training of such technologies for new languages, with a focus on the use case of deployment of resources in sudden emergencies such as natural disasters. Representative languages have been selected to provide broad typological coverage for training, and surprise incident languages for testing will be selected over the course of the program. Our approach treats the full set of language packs as a coherent whole, maintaining LORELEI-wide specifications, tagsets, and guidelines, while allowing for adaptation to the specific needs created by each language. Each representative language corpus, therefore, both stands on its own as a resource for the specific language and forms part of a large multilingual resource for broader cross-language technology development.", "keyphrases": ["technology development", "low resource languages", "lorelei"]} +{"id": "bhandari-armstrong-2019-tkol", "title": "Tkol, Httt, and r/radiohead: High Affinity Terms in Reddit Communities", "abstract": "Language is an important marker of a cultural group, large or small. One aspect of language variation between communities is the employment of highly specialized terms with unique significance to the group. We study these high affinity terms across a wide variety of communities by leveraging the rich diversity of Reddit.com. We provide a systematic exploration of high affinity terms, the often rapid semantic shifts they undergo, and their relationship to subreddit characteristics across 2600 diverse subreddits. Our results show that high affinity terms are effective signals of loyal communities, they undergo more semantic shift than low affinity terms, and that they are partial barrier to entry for new users. We conclude that Reddit is a robust and valuable data source for testing further theories about high affinity terms across communities.", "keyphrases": ["high affinity term", "reddit", "community"]} +{"id": "akbik-etal-2015-generating", "title": "Generating High Quality Proposition Banks for Multilingual Semantic Role Labeling", "abstract": "Semantic role labeling (SRL) is crucial to natural language understanding as it identifies the predicate-argument structure in text with semantic labels. Unfortunately, resources required to construct SRL models are expensive to obtain and simply do not exist for most languages. In this paper, we present a two-stage method to enable the construction of SRL models for resourcepoor languages by exploiting monolingual SRL and multilingual parallel data. Experimental results show that our method outperforms existing methods. We use our method to generate Proposition Banks with high to reasonable quality for 7 languages in three language families and release these resources to the research community.", "keyphrases": ["annotation projection", "propbanks", "new target language"]} +{"id": "zhong-etal-2019-searching", "title": "Searching for Effective Neural Extractive Summarization: What Works and What's Next", "abstract": "The recent years have seen remarkable success in the use of deep neural networks on text summarization. However, there is no clear understanding of why they perform so well, or how they might be improved. In this paper, we seek to better understand how neural extractive summarization systems could benefit from different types of model architectures, transferable knowledge and learning schemas. Besides, we find an effective way to improve the current framework and achieve the state-of-the-art result on CNN/DailyMail by a large margin based on our observations and analysis. Hopefully, our work could provide more hints for future research on extractive summarization.", "keyphrases": ["summarization", "deep neural network", "sentence embedding"]} +{"id": "shen-etal-2010-string", "title": "String-to-Dependency Statistical Machine Translation", "abstract": "We propose a novel string-to-dependency algorithm for statistical machine translation. This algorithm employs a target dependency language model during decoding to exploit long distance word relations, which cannot be modeled with a traditional n-gram language model. Experiments show that the algorithm achieves significant improvement in MT performance over a state-of-the-art hierarchical string-to-string system on NIST MT06 and MT08 newswire evaluation sets.", "keyphrases": ["machine translation", "string-to-dependency model", "neighbouring word", "target side"]} +{"id": "oepen-lonning-2006-discriminant", "title": "Discriminant-Based MRS Banking", "abstract": "We present an approach to discriminant-based MRS banking, i.e. the construction of an annotated corpus where each input item is paired with a logical-form semantics. Semantic annotations are produced by parsing with a broad-coverage precision grammar, followed by manual disambiguation. The selection of the preferred analysis for each item (and hence its semantic form) builds on a notion of semantic discriminants, essentially localized dependencies extracted from a full-fledged, underspecified semantic representation.", "keyphrases": ["discriminant-based mrs banking", "eds", "elementary dependency structures", "flavor"]} +{"id": "white-2011-glue", "title": "Glue Rules for Robust Chart Realization", "abstract": "This paper shows how glue rules can be used to increase the robustness of statistical chart realization in a manner inspired by dependency realization. Unlike the use of glue rules in MT---but like previous work with XLE on improving robustness with hand-crafted grammars---they are invoked here as a fall-back option when no grammatically complete realization can be found. The method works with Combinatory Categorial Grammar (CCG) and has been implemented in OpenCCG. As the techniques are not overly tied to CCG, they are expected to be applicable to other grammar-based chart realizers where robustness is a common problem. Unlike an earlier robustness technique of greedily assembling fragments, glue rules enable n-best outputs and are compatible with disjunctive inputs. Experimental results indicate that glue rules yield improved realizations in comparison to greedy fragment assembly, though a sizeable gap remains between the quality of grammatically complete realizations and fragmentary ones.", "keyphrases": ["robustness", "realization", "ccg", "glue rule"]} +{"id": "adelani-etal-2021-masakhaner", "title": "MasakhaNER: Named Entity Recognition for African Languages", "abstract": "We take a step towards addressing the under- representation of the African continent in NLP research by bringing together different stakeholders to create the first large, publicly available, high-quality dataset for named entity recognition (NER) in ten African languages. We detail the characteristics of these languages to help researchers and practitioners better understand the challenges they pose for NER tasks. We analyze our datasets and conduct an extensive empirical evaluation of state- of-the-art methods across both supervised and transfer learning settings. Finally, we release the data, code, and models to inspire future research on African NLP.1", "keyphrases": ["entity recognition", "african language", "masakhaner"]} +{"id": "lambert-banchs-2006-grouping", "title": "Grouping Multi-word Expressions According to Part-Of-Speech in Statistical Machine Translation", "abstract": "This paper studies a strategy for identifying and using multi-word expressions in Statistical Machine Translation. The performance of the proposed strategy for various types of multi-word expressions (like nouns or verbs) is evaluated in terms of alignment quality as well as translation accuracy. Evaluations are performed by using real-life data, namely the European Parliament corpus. Results from translation tasks from English-to-Spanish and from Spanish-to-English are presented and discussed.", "keyphrases": ["part-of-speech", "mwes", "further study"]} +{"id": "ma-etal-2020-entity", "title": "Entity-Aware Dependency-Based Deep Graph Attention Network for Comparative Preference Classification", "abstract": "This paper studies the task of comparative preference classification (CPC). Given two entities in a sentence, our goal is to classify whether the first (or the second) entity is preferred over the other or no comparison is expressed at all between the two entities. Existing works either do not learn entity-aware representations well and fail to deal with sentences involving multiple entity pairs or use sequential modeling approaches that are unable to capture long-range dependencies between the entities. Some also use traditional machine learning approaches that do not generalize well. This paper proposes a novel Entity-aware Dependency-based Deep Graph Attention Network (ED-GAT) that employs a multi-hop graph attention over a dependency graph sentence representation to leverage both the semantic information from word embeddings and the syntactic information from the dependency graph to solve the problem. Empirical evaluation shows that the proposed model achieves the state-of-the-art performance in comparative preference classification.", "keyphrases": ["graph attention network", "comparative preference classification", "dependency graph"]} +{"id": "vania-lopez-2017-characters", "title": "From Characters to Words to in Between: Do We Capture Morphology?", "abstract": "Words can be represented by composing the representations of subword units such as word segments, characters, and/or character n-grams. While such representations are effective and may capture the morphological regularities of words, they have not been systematically compared, and it is not understood how they interact with different morphological typologies. On a language modeling task, we present experiments that systematically vary (1) the basic unit of representation, (2) the composition of these representations, and (3) the morphological typology of the language modeled. Our results extend previous findings that character representations are effective across typologies, and we find that a previously unstudied combination of character trigram representations composed with bi-LSTMs outperforms most others. But we also find room for improvement: none of the character-level models match the predictive accuracy of a model with access to true morphological analyses, even when learned from an order of magnitude more data.", "keyphrases": ["character", "segmentation", "character-aware nlms", "bpe", "bilstm"]} +{"id": "zhu-etal-2007-unified", "title": "A Unified Tagging Approach to Text Normalization", "abstract": "This paper addresses the issue of text normalization, an important yet often overlooked problem in natural language processing. By text normalization, we mean converting \u2018informally inputted\u2019 text into the canonical form, by eliminating \u2018noises\u2019 in the text and detecting paragraph and sentence boundaries in the text. Previously, text normalization issues were often undertaken in an ad-hoc fashion or studied separately. This paper first gives a formalization of the entire problem. It then proposes a unified tagging approach to perform the task using Conditional Random Fields (CRF). The paper shows that with the introduction of a small set of tags, most of the text normalization tasks can be performed within the approach. The accuracy of the proposed method is high, because the subtasks of normalization are interdependent and should be performed together. Experimental results on email data cleaning show that the proposed method significantly outperforms the approach of using cascaded models and that of employing independent models.", "keyphrases": ["text normalization", "noise", "conditional random fields", "crf"]} +{"id": "zampieri-etal-2020-semeval", "title": "SemEval-2020 Task 12: Multilingual Offensive Language Identification in Social Media (OffensEval 2020)", "abstract": "We present the results and the main findings of SemEval-2020 Task 12 on Multilingual Offensive Language Identification in Social Media (OffensEval-2020). The task included three subtasks corresponding to the hierarchical taxonomy of the OLID schema from OffensEval-2019, and it was offered in five languages: Arabic, Danish, English, Greek, and Turkish. OffensEval-2020 was one of the most popular tasks at SemEval-2020, attracting a large number of participants across all subtasks and languages: a total of 528 teams signed up to participate in the task, 145 teams submitted official runs on the test data, and 70 teams submitted system description papers.", "keyphrases": ["offensive language identification", "social media", "semeval-2020 task", "multilingual dataset"]} +{"id": "chen-etal-2020-uncertain", "title": "Uncertain Natural Language Inference", "abstract": "We introduce Uncertain Natural Language Inference (UNLI), a refinement of Natural Language Inference (NLI) that shifts away from categorical labels, targeting instead the direct prediction of subjective probability assessments. We demonstrate the feasibility of collecting annotations for UNLI by relabeling a portion of the SNLI dataset under a probabilistic scale, where items even with the same categorical label differ in how likely people judge them to be true given a premise. We describe a direct scalar regression modeling approach, and find that existing categorically-labeled NLI data can be used in pre-training. Our best models correlate well with humans, demonstrating models are capable of more subtle inferences than the categorical bin assignment employed in current NLI tasks.", "keyphrases": ["natural language inference", "nli", "judgement"]} +{"id": "basaldella-etal-2020-cometa", "title": "COMETA: A Corpus for Medical Entity Linking in the Social Media", "abstract": "Whilst there has been growing progress in Entity Linking (EL) for general language, existing datasets fail to address the complex nature of health terminology in layman's language. Meanwhile, there is a growing need for applications that can understand the public's voice in the health domain. To address this we introduce a new corpus called COMETA, consisting of 20k English biomedical entity mentions from Reddit expert-annotated with links to SNOMED CT, a widely-used medical knowledge graph. Our corpus satisfies a combination of desirable properties, from scale and coverage to diversity and quality, that to the best of our knowledge has not been met by any of the existing resources in the field. Through benchmark experiments on 20 EL baselines from string- to neural-based models we shed light on the ability of these systems to perform complex inference on entities and concepts under 2 challenging evaluation scenarios. Our experimental results on COMETA illustrate that no golden bullet exists and even the best mainstream techniques still have a significant performance gap to fill, while the best solution relies on combining different views of data.", "keyphrases": ["entity linking", "health terminology", "cometa"]} +{"id": "feng-etal-2010-comparison", "title": "A Comparison of Features for Automatic Readability Assessment", "abstract": "Several sets of explanatory variables - including shallow, language modeling, POS, syntactic, and discourse features - are compared and evaluated in terms of their impact on predicting the grade level of reading material for primary school students. We find that features based on in-domain language models have the highest predictive power. Entity-density (a discourse feature) and POS-features, in particular nouns, are individually very useful but highly correlated. Average sentence length (a shallow feature) is more useful - and less expensive to compute - than individual syntactic features. A judicious combination of features examined here results in a significant improvement over the state of the art.", "keyphrases": ["automatic readability assessment", "grade level", "lexical chain", "reader", "discourse-based feature"]} +{"id": "lee-2015-morphological", "title": "Morphological Paradigms: Computational Structure and Unsupervised Learning", "abstract": "This thesis explores the computational structure of morphological paradigms from the perspective of unsupervised learning. Three topics are studied: (i) stem identification, (ii) paradigmatic similarity, and (iii) paradigm induction. All the three topics progress in terms of the scope of data in question. The first and second topics explore structure when morphological paradigms are given, first within a paradigm and then across paradigms. The third topic asks where morphological paradigms come from in the first place, and explores strategies of paradigm induction from child-directed speech. This research is of interest to linguists and natural language processing researchers, for both theoretical questions and applied areas.", "keyphrases": ["computational structure", "unsupervised learning", "morphological paradigms"]} +{"id": "settles-2011-closing", "title": "Closing the Loop: Fast, Interactive Semi-Supervised Annotation With Queries on Features and Instances", "abstract": "This paper describes DUALIST, an active learning annotation paradigm which solicits and learns from labels on both features (e.g., words) and instances (e.g., documents). We present a novel semi-supervised training algorithm developed for this setting, which is (1) fast enough to support real-time interactive speeds, and (2) at least as accurate as preexisting methods for learning with mixed feature and instance labels. Human annotators in user studies were able to produce near-state-of-the-art classifiers---on several corpora in a variety of application domains---with only a few minutes of effort.", "keyphrases": ["annotator", "active learning", "few minute", "text classification"]} +{"id": "axelrod-etal-2015-class", "title": "Class-based N-gram language difference models for data selection", "abstract": "We present a simple method for representing text that explicitly encodes differences between two corpora in a domain adaptation or data selection scenario. We do this by replacing every word in the corpora with its part-of-speech tag plus a suffix that indicates the relative bias of the word, or how much likelier it is to be in the task corpus versus the pool. By changing the representation of the text, we can use basic n-gram models to create language difference models that characterize the difference between the corpora. This process enables us to use common models with robust statistics that are tailored to computing the similarity score via cross-entropy difference. These improvements come despite using zero of the original words in the texts during our selection process. We replace the entire vocabulary during the selection process from 3.6M to under 200 automatically-derived tags, greatly reducing the model size for selection. When used to select data for machine translation systems, our language difference models lead to MT system improvements of up to +1.8 BLEU when used in isolation, and up to +1.3 BLEU when used in a multimodel translation system. Language models trained on data selected with our method have 35% fewer OOV\u2019s on the task data than the most common approach. These LMs also have a lower perplexity on in-domain data than the baselines.", "keyphrases": ["part-of-speech tag", "cross-entropy difference", "language model", "in-domain data"]} +{"id": "pyysalo-etal-2015-universal", "title": "Universal Dependencies for Finnish", "abstract": "There has been substantial recent interest in annotation schemes that can be applied consistently to many languages. Building on several recent efforts to unify morphological and syntactic annotation, the Universal Dependencies (UD) project seeks to introduce a cross-linguistically applicable part-of-speech tagset, feature inventory, and set of dependency relations as well as a large number of uniformly annotated treebanks. We present Universal Dependencies for Finnish, one of the ten languages in the recent first release of UD project treebank data. We detail the mapping of previously introduced annotation to the UD standard, describing specific challenges and their resolution. We additionally present parsing experiments comparing the performance of a stateof-the-art parser trained on a languagespecific annotation schema to performance on the corresponding UD annotation. The results show improvement compared to the source annotation, indicating that the conversion is accurate and supporting the feasibility of UD as a parsing target. The introduced tools and resources are available under open licenses from http://bionlp.utu.fi/ud-finnish.html.", "keyphrases": ["finnish", "treebank", "universal dependencies"]} +{"id": "jager-etal-2017-using", "title": "Using support vector machines and state-of-the-art algorithms for phonetic alignment to identify cognates in multi-lingual wordlists", "abstract": "Most current approaches in phylogenetic linguistics require as input multilingual word lists partitioned into sets of etymologically related words (cognates). Cognate identification is so far done manually by experts, which is time consuming and as of yet only available for a small number of well-studied language families. Automatizing this step will greatly expand the empirical scope of phylogenetic methods in linguistics, as raw wordlists (in phonetic transcription) are much easier to obtain than wordlists in which cognate words have been fully identified and annotated, even for under-studied languages. A couple of different methods have been proposed in the past, but they are either disappointing regarding their performance or not applicable to larger datasets. Here we present a new approach that uses support vector machines to unify different state-of-the-art methods for phonetic alignment and cognate detection within a single framework. Training and evaluating these method on a typologically broad collection of gold-standard data shows it to be superior to the existing state of the art.", "keyphrases": ["support vector machine", "phonetic alignment", "cognate detection"]} +{"id": "pradhan-etal-2013-towards", "title": "Towards Robust Linguistic Analysis using OntoNotes", "abstract": "Large-scale linguistically annotated corpora have played a crucial role in advancing the state of the art of key natural language technologies such as syntactic, semantic and discourse analyzers, and they serve as training data as well as evaluation benchmarks. Up till now, however, most of the evaluation has been done on monolithic corpora such as the Penn Treebank, the Proposition Bank. As a result, it is still unclear how the state-of-the-art analyzers perform in general on data from a variety of genres or domains. The completion of the OntoNotes corpus, a large-scale, multi-genre, multilingual corpus manually annotated with syntactic, semantic and discourse information, makes it possible to perform such an evaluation. This paper presents an analysis of the performance of publicly available, state-of-the-art tools on all layers and languages in the OntoNotes v5.0 corpus. This should set the benchmark for future development of various NLP components in syntax and semantics, and possibly encourage research towards an integrated system that makes use of the various layers jointly to improve overall performance.", "keyphrases": ["ontonotes", "discourse analyzer", "syntax", "similar corpora ontonotes", "human-annotated corpus"]} +{"id": "kobayashi-etal-2020-attention", "title": "Attention is Not Only a Weight: Analyzing Transformers with Vector Norms", "abstract": "Attention is a key component of Transformers, which have recently achieved considerable success in natural language processing. Hence, attention is being extensively studied to investigate various linguistic capabilities of Transformers, focusing on analyzing the parallels between attention weights and specific linguistic phenomena. This paper shows that attention weights alone are only one of the two factors that determine the output of attention and proposes a norm-based analysis that incorporates the second factor, the norm of the transformed input vectors. The findings of our norm-based analyses of BERT and a Transformer-based neural machine translation system include the following: (i) contrary to previous studies, BERT pays poor attention to special tokens, and (ii) reasonable word alignment can be extracted from attention mechanisms of Transformer. These findings provide insights into the inner workings of Transformers.", "keyphrases": ["weight", "transformer", "attention weight", "norm-based analysis", "input vector"]} +{"id": "soni-etal-2014-modeling", "title": "Modeling Factuality Judgments in Social Media Text", "abstract": "How do journalists mark quoted content as certain or uncertain, and how do readers interpret these signals? Predicates such as thinks, claims, and admits offer a range of options for framing quoted content according to the author\u2019s own perceptions of its credibility. We gather a new dataset of direct and indirect quotes from Twitter, and obtain annotations of the perceived certainty of the quoted statements. We then compare the ability of linguistic and extra-linguistic features to predict readers\u2019 assessment of the certainty of quoted content. We see that readers are indeed influenced by such framing devices \u2014 and we find no evidence that they consider other factors, such as the source, journalist, or the content itself. In addition, we examine the impact of specific framing devices on perceptions of credibility.", "keyphrases": ["factuality", "journalist", "twitter"]} +{"id": "he-etal-2013-identification", "title": "Identification of Speakers in Novels", "abstract": "Speaker identification is the task of attributing utterances to characters in a literary narrative. It is challenging to automate because the speakers of the majority of utterances are not explicitly identified in novels. In this paper, we present a supervised machine learning approach for the task that incorporates several novel features. The experimental results show that our method is more accurate and general than previous approaches to the problem.", "keyphrases": ["novel", "speaker identification", "literary text"]} +{"id": "dai-adel-2020-analysis", "title": "An Analysis of Simple Data Augmentation for Named Entity Recognition", "abstract": "Simple yet effective data augmentation techniques have been proposed for sentence-level and sentence-pair natural language processing tasks. Inspired by these efforts, we design and compare data augmentation for named entity recognition, which is usually modeled as a token-level sequence labeling problem. Through experiments on two data sets from the biomedical and materials science domains (i2b2-2010 and MaSciP), we show that simple augmentation can boost performance for both recurrent and transformer-based models, especially for small training sets.", "keyphrases": ["data augmentation", "named entity recognition", "synonym replacement"]} +{"id": "pan-etal-2019-reinforced", "title": "Reinforced Dynamic Reasoning for Conversational Question Generation", "abstract": "This paper investigates a new task named Conversational Question Generation (CQG) which is to generate a question based on a passage and a conversation history (i.e., previous turns of question-answer pairs). CQG is a crucial task for developing intelligent agents that can drive question-answering style conversations or test user understanding of a given passage. Towards that end, we propose a new approach named Reinforced Dynamic Reasoning network, which is based on the general encoder-decoder framework but incorporates a reasoning procedure in a dynamic manner to better understand what has been asked and what to ask next about the passage into the general encoder-decoder framework. To encourage producing meaningful questions, we leverage a popular question answering (QA) model to provide feedback and fine-tune the question generator using a reinforcement learning mechanism. Empirical results on the recently released CoQA dataset demonstrate the effectiveness of our method in comparison with various baselines and model variants. Moreover, to show the applicability of our method, we also apply it to create multi-turn question-answering conversations for passages in SQuAD.", "keyphrases": ["conversational question generation", "dynamic reasoning network", "feedback", "answer-unaware cqg"]} +{"id": "rink-harabagiu-2010-utd", "title": "UTD: Classifying Semantic Relations by Combining Lexical and Semantic Resources", "abstract": "This paper describes our system for SemEval-2010 Task 8 on multi-way classification of semantic relations between nominals. First, the type of semantic relation is classified. Then a relation type-specific classifier determines the relation direction. Classification is performed using SVM classifiers and a number of features that capture the context, semantic role affiliation, and possible pre-existing relations of the nominals. This approach achieved an F1 score of 82.19% and an accuracy of 77.92%.", "keyphrases": ["svm", "support vector machine", "relation classification", "framenet"]} +{"id": "collins-2003-head", "title": "Head-Driven Statistical Models for Natural Language Parsing", "abstract": "This article describes three statistical models for natural language parsing. The models extend methods from probabilistic context-free grammars to lexicalized grammars, leading to approaches in which a parse tree is represented as the sequence of decisions corresponding to a head-centered, top-down derivation of the tree. Independence assumptions then lead to parameters that encode the X-bar schema, subcategorization, ordering of complements, placement of adjuncts, bigram lexical dependencies, wh-movement, and preferences for close attachment. All of these preferences are expressed by probabilities conditioned on lexical heads. The models are evaluated on the Penn Wall Street Journal Treebank, showing that their accuracy is competitive with other models in the literature. To gain a better understanding of the models, we also give results on different constituent types, as well as a breakdown of precision/recall results in recovering various types of dependencies. We analyze various characteristics of the models through experiments on parsing accuracy, by collecting frequencies of various structures in the treebank, and through linguistically motivated examples. Finally, we compare the models to others that have been applied to parsing the treebank, aiming to give some explanation of the difference in performance of the various models.", "keyphrases": ["head", "treebank", "generative model", "state-of-the-art parser", "production rule"]} +{"id": "nguyen-daume-iii-2019-help", "title": "Help, Anna! Visual Navigation with Natural Multimodal Assistance via Retrospective Curiosity-Encouraging Imitation Learning", "abstract": "Mobile agents that can leverage help from humans can potentially accomplish more complex tasks than they could entirely on their own. We develop \u201cHelp, Anna!\u201d (HANNA), an interactive photo-realistic simulator in which an agent fulfills object-finding tasks by requesting and interpreting natural language-and-vision assistance. An agent solving tasks in a HANNA environment can leverage simulated human assistants, called ANNA (Automatic Natural Navigation Assistants), which, upon request, provide natural language and visual instructions to direct the agent towards the goals. To address the HANNA problem, we develop a memory-augmented neural agent that hierarchically models multiple levels of decision-making, and an imitation learning algorithm that teaches the agent to avoid repeating past mistakes while simultaneously predicting its own chances of making future progress. Empirically, our approach is able to ask for help more effectively than competitive baselines and, thus, attains higher task success rate on both previously seen and previously unseen environments.", "keyphrases": ["help", "navigation task", "natural language instruction"]} +{"id": "guo-etal-2013-linking", "title": "Linking Tweets to News: A Framework to Enrich Short Text Data in Social Media", "abstract": "Many current Natural Language Processing [NLP] techniques work well assuming a large context of text as input data. However they become ineffective when applied to short texts such as Twitter feeds. To overcome the issue, we want to find a related newswire document to a given tweet to provide contextual support for NLP tasks. This requires robust modeling and understanding of the semantics of short texts. The contribution of the paper is two-fold: 1. we introduce the Linking-Tweets-toNews task as well as a dataset of linked tweet-news pairs, which can benefit many NLP applications; 2. in contrast to previous research which focuses on lexical features within the short texts (text-to-word information), we propose a graph based latent variable model that models the inter short text correlations (text-to-text information). This is motivated by the observation that a tweet usually only covers one aspect of an event. We show that using tweet specific feature (hashtag) and news specific feature (named entities) as well as temporal constraints, we are able to extract text-to-text correlations, and thus completes the semantic picture of a short text. Our experiments show significant improvement of our new model over baselines with three evaluation metrics in the new task.", "keyphrases": ["twitter feed", "contextual support", "news"]} +{"id": "lo-etal-2014-xmeant", "title": "XMEANT: Better semantic MT evaluation without reference translations", "abstract": "We introduce XMEANT\u2014a new cross-lingual version of the semantic frame based MT evaluation metric MEANT\u2014which can correlate even more closely with human adequacy judgments than monolingual MEANT and eliminates the need for expensive human references. Previous work established that MEANT reflects translation adequacy with state-of-the-art accuracy, and optimizing MT systems against MEANT robustly improves translation quality. However, to go beyond tuning weights in the loglinear SMT model, a cross-lingual objective function that can deeply integrate semantic frame criteria into the MT training pipeline is needed. We show that cross-lingual XMEANT outperforms monolingual MEANT by (1) replacing the monolingual context vector model in MEANT with simple translation probabilities, and (2) incorporating bracketing ITG constraints.", "keyphrases": ["reference", "semantic frame", "xmeant"]} +{"id": "sorodoc-etal-2016-look", "title": "\u201cLook, some Green Circles!\u201d: Learning to Quantify from Images", "abstract": "In this paper, we investigate whether a neural network model can learn the meaning of natural language quantifiers (no, some and all) from their use in visual contexts. We show that memory networks perform well in this task, and that explicit counting is not necessary to the system\u2019s performance, supporting psycholinguistic evidence on the acquisition of quantifiers.", "keyphrases": ["quantifier", "image", "dot"]} +{"id": "bamman-etal-2014-distributed", "title": "Distributed Representations of Geographically Situated Language", "abstract": "We introduce a model for incorporating contextual information (such as geography) in learning vector-space representations of situated language. In contrast to approaches to multimodal representation learning that have used properties of the object being described (such as its color), our model includes information about the subject (i.e., the speaker), allowing us to learn the contours of a word\u2019s meaning that are shaped by the context in which it is uttered. In a quantitative evaluation on the task of judging geographically informed semantic similarity between representations learned from 1.1 billion words of geo-located tweets, our joint model outperforms comparable independent models that learn meaning in isolation.", "keyphrases": ["situated language", "contextual information", "word embedding"]} +{"id": "addawood-bashir-2016-evidence", "title": "\u201cWhat Is Your Evidence?\u201d A Study of Controversial Topics on Social Media", "abstract": "In recent years, social media has revolutionized how people communicate and share information. One function of social media, besides connecting with friends, is sharing opinions with others. Micro blogging sites, like Twitter, have often provided an online forum for social activism. When users debate about controversial topics on social media, they typically share different types of evidence to support their claims. Classifying these types of evidence can provide an estimate for how adequately the arguments have been supported. We first introduce a manually built gold standard dataset of 3000 tweets related to the recent FBI and Apple encryption debate. We develop a framework for automatically classifying six evidence types typically used on Twitter to discuss the debate. Our findings show that a Support Vector Machine (SVM) classifier trained with n-gram and additional features is capable of capturing the different forms of representing evidence on Twitter, and exhibits significant improvements over the unigram baseline, achieving a F1 macroaveraged of 82.8%.", "keyphrases": ["twitter", "claim", "evidence type", "expert opinion", "argumentative tweet"]} +{"id": "li-etal-2019-coherent", "title": "Coherent Comments Generation for Chinese Articles with a Graph-to-Sequence Model", "abstract": "Automatic article commenting is helpful in encouraging user engagement on online news platforms. However, the news documents are usually too long for models under traditional encoder-decoder frameworks, which often results in general and irrelevant comments. In this paper, we propose to generate comments with a graph-to-sequence model that models the input news as a topic interaction graph. By organizing the article into graph structure, our model can better understand the internal structure of the article and the connection between topics, which makes it better able to generate coherent and informative comments. We collect and release a large scale news-comment corpus from a popular Chinese online news platform Tencent Kuaibao. Extensive experiment results show that our model can generate much more coherent and informative comments compared with several strong baseline models.", "keyphrases": ["graph-to-sequence model", "input news", "topic interaction graph"]} +{"id": "watanabe-etal-2007-online", "title": "Online Large-Margin Training for Statistical Machine Translation", "abstract": "We achieved a state of the art performance in statistical machine translation by using a large number of features with an online large-margin training algorithm. The millions of parameters were tuned only on a small development set consisting of less than 1K sentences. Experiments on Arabic-toEnglish translation indicated that a model trained with sparse binary features outperformed a conventional SMT system with a small number of features.", "keyphrases": ["statistical machine translation", "large number", "smt system", "mira"]} +{"id": "oortwijn-etal-2021-interrater", "title": "Interrater Disagreement Resolution: A Systematic Procedure to Reach Consensus in Annotation Tasks", "abstract": "We present a systematic procedure for interrater disagreement resolution. The procedure is general, but of particular use in multiple-annotator tasks geared towards ground truth construction. We motivate our proposal by arguing that, barring cases in which the researchers' goal is to elicit different viewpoints, interrater disagreement is a sign of poor quality in the design or the description of a task. Consensus among annotators, we maintain, should be striven for, through a systematic procedure for disagreement resolution such as the one we describe.", "keyphrases": ["systematic procedure", "annotation task", "interrater disagreement resolution"]} +{"id": "mizumoto-etal-2011-mining", "title": "Mining Revision Log of Language Learning SNS for Automated Japanese Error Correction of Second Language Learners", "abstract": "We present an attempt to extract a largescale Japanese learners\u2019 corpus from the revision log of a language learning SNS. This corpus is easy to obtain in largescale, covers a wide variety of topics and styles, and can be a great source of knowledge for both language learners and instructors. We also demonstrate that the extracted learners\u2019 corpus of Japanese as a second language can be used as training data for learners\u2019 error correction using an SMT approach. We evaluate different granularities of tokenization to alleviate the problem of word segmentation errors caused by erroneous input from language learners. Experimental results show that the character-wise model outperforms the word-wise model.", "keyphrases": ["revision log", "learner", "grammatical error"]} +{"id": "sorodoc-etal-2020-probing", "title": "Probing for Referential Information in Language Models", "abstract": "Language models keep track of complex information about the preceding context \u2013 including, e.g., syntactic relations in a sentence. We investigate whether they also capture information beneficial for resolving pronominal anaphora in English. We analyze two state of the art models with LSTM and Transformer architectures, via probe tasks and analysis on a coreference annotated corpus. The Transformer outperforms the LSTM in all analyses. Our results suggest that language models are more successful at learning grammatical constraints than they are at learning truly referential information, in the sense of capturing the fact that we use language to refer to entities in the world. However, we find traces of the latter aspect, too.", "keyphrases": ["referential information", "language model", "coreference"]} +{"id": "lalor-etal-2019-learning", "title": "Learning Latent Parameters without Human Response Patterns: Item Response Theory with Artificial Crowds", "abstract": "Incorporating Item Response Theory (IRT) into NLP tasks can provide valuable information about model performance and behavior. Traditionally, IRT models are learned using human response pattern (RP) data, presenting a significant bottleneck for large data sets like those required for training deep neural networks (DNNs). In this work we propose learning IRT models using RPs generated from artificial crowds of DNN models. We demonstrate the effectiveness of learning IRT models using DNN-generated data through quantitative and qualitative analyses for two NLP tasks. Parameters learned from human and machine RPs for natural language inference and sentiment analysis exhibit medium to large positive correlations. We demonstrate a use-case for latent difficulty item parameters, namely training set filtering, and show that using difficulty to sample training data outperforms baseline methods. Finally, we highlight cases where human expectation about item difficulty does not match difficulty as estimated from the machine RPs.", "keyphrases": ["item response theory", "artificial crowd", "difficulty", "irt parameter"]} +{"id": "logeswaran-etal-2019-zero", "title": "Zero-Shot Entity Linking by Reading Entity Descriptions", "abstract": "We present the zero-shot entity linking task, where mentions must be linked to unseen entities without in-domain labeled data. The goal is to enable robust transfer to highly specialized domains, and so no metadata or alias tables are assumed. In this setting, entities are only identified by text descriptions, and models must rely strictly on language understanding to resolve the new entities. First, we show that strong reading comprehension models pre-trained on large unlabeled data can be used to generalize to unseen entities. Second, we propose a simple and effective adaptive pre-training strategy, which we term domain-adaptive pre-training (DAP), to address the domain shift problem associated with linking unseen entities in a new domain. We present experiments on a new dataset that we construct for this task and show that DAP improves over strong pre-training baselines, including BERT. The data and code are available at .", "keyphrases": ["entity linking", "pre-training", "zero-shot entity", "candidate", "wikipedia"]} +{"id": "andor-etal-2016-globally", "title": "Globally Normalized Transition-Based Neural Networks", "abstract": "We introduce a globally normalized transition-based neural network model that achieves state-of-the-art part-of-speech tagging, dependency parsing and sentence compression results. Our model is a simple feed-forward neural network that operates on a task-specific transition system, yet achieves comparable or better accuracies than recurrent models. We discuss the importance of global as opposed to local normalization: a key insight is that the label bias problem implies that globally normalized models can be strictly more expressive than locally normalized models.", "keyphrases": ["normalization", "dependency parsing", "action", "neural network architecture", "improved performance"]} +{"id": "yin-etal-2021-batchmixup", "title": "BatchMixup: Improving Training by Interpolating Hidden States of the Entire Mini-batch", "abstract": "Usually, we train a neural system on a sequence of mini-batches of labeled instances. Each mini-batch is composed of k samples, and each sample will learn a representation vector. M IXUP implicitly generates synthetic samples through linearly interpolating inputs and their corresponding labels of random sample pairs in the same mini-batch. This means that M IXUP only generates new points on the edges connecting every two original points in the representation space. We observed that the new points by the standard M IXUP cover pretty limited regions in the entire space of the mini-batch. In this work, we propose B ATCH M IXUP \u2014improving the model learning by interpolating hidden states of the entire mini-batch. B ATCH M IXUP can generate new points scattered throughout the space corresponding to the mini-batch. In experiments, B ATCH M IXUP shows superior performance than competitive baselines in improving the performance of NLP tasks while using different ratios of training data.", "keyphrases": ["hidden state", "entire mini-batch", "sample", "mixup"]} +{"id": "hangya-fraser-2018-unsupervised", "title": "An Unsupervised System for Parallel Corpus Filtering", "abstract": "In this paper we describe LMU Munich's submission for the WMT 2018 Parallel Corpus Filtering shared task which addresses the problem of cleaning noisy parallel corpora. The task of mining and cleaning parallel sentences is important for improving the quality of machine translation systems, especially for low-resource languages. We tackle this problem in a fully unsupervised fashion relying on bilingual word embeddings created without any bilingual signal. After pre-filtering noisy data we rank sentence pairs by calculating bilingual sentence-level similarities and then remove redundant data by employing monolingual similarity as well. Our unsupervised system achieved good performance during the official evaluation of the shared task, scoring only a few BLEU points behind the best systems, while not requiring any parallel training data.", "keyphrases": ["unsupervised system", "parallel corpus filtering", "sentence pair"]} +{"id": "wiseman-rush-2016-sequence", "title": "Sequence-to-Sequence Learning as Beam-Search Optimization", "abstract": "Sequence-to-Sequence (seq2seq) modeling has rapidly become an important general-purpose NLP tool that has proven effective for many text-generation and sequence-labeling tasks. Seq2seq builds on deep neural language modeling and inherits its remarkable accuracy in estimating local, next-word distributions. In this work, we introduce a model and beam-search training scheme, based on the work of Daume III and Marcu (2005), that extends seq2seq to learn global sequence scores. This structured approach avoids classical biases associated with local training and unifies the training loss with the test-time usage, while preserving the proven model architecture of seq2seq and its efficient training approach. We show that our system outperforms a highly-optimized attention-based seq2seq system and other baselines on three different sequence to sequence tasks: word ordering, parsing, and machine translation.", "keyphrases": ["beam search", "seq2seq model", "sequence-to-sequence", "improved performance", "contrastive learning"]} +{"id": "kudugunta-etal-2019-investigating", "title": "Investigating Multilingual NMT Representations at Scale", "abstract": "Multilingual Neural Machine Translation (NMT) models have yielded large empirical success in transfer learning settings. However, these black-box representations are poorly understood, and their mode of transfer remains elusive. In this work, we attempt to understand massively multilingual NMT representations (with 103 languages) using Singular Value Canonical Correlation Analysis (SVCCA), a representation similarity framework that allows us to compare representations across different languages, layers and models. Our analysis validates several empirical results and long-standing intuitions, and unveils new observations regarding how representations evolve in a multilingual translation model. We draw three major results from our analysis, with implications on cross-lingual transfer learning: (i) Encoder representations of different languages cluster based on linguistic similarity, (ii) Representations of a source language learned by the encoder are dependent on the target language, and vice-versa, and (iii) Representations of high resource and/or linguistically similar languages are more robust when fine-tuning on an arbitrary language pair, which is critical to determining how much cross-lingual transfer can be expected in a zero or few-shot setting. We further connect our findings with existing empirical observations in multilingual NMT and transfer learning.", "keyphrases": ["scale", "svcca", "different language"]} +{"id": "uehara-etal-2015-detecting", "title": "Detecting an Infant's Developmental Reactions in Reviews on Picture Books", "abstract": "We extract the book reviews on picture books written on the Web site specialized in picture books, and found that those reviews reflect infants\u2019 behavioral expressions as well as their parents\u2019 reading activities in detail. Analysis of the reviews reveals that infants\u2019 reactions written on the reviews are coincident with the findings of developmental psychology concerning infants\u2019 behaviors. In order to examine how the stimuli of picture books induces varieties of infants\u2019 reactions, this paper proposes to detect an infant\u2019s developmental reactions in reviews on picture books and shows effectiveness of the proposed method through experimental evaluation.", "keyphrases": ["infant", "developmental reaction", "review"]} +{"id": "huang-etal-2019-hubless", "title": "Hubless Nearest Neighbor Search for Bilingual Lexicon Induction", "abstract": "Bilingual Lexicon Induction (BLI) is the task of translating words from corpora in two languages. Recent advances in BLI work by aligning the two word embedding spaces. Following that, a key step is to retrieve the nearest neighbor (NN) in the target space given the source word. However, a phenomenon called hubness often degrades the accuracy of NN. Hubness appears as some data points, called hubs, being extra-ordinarily close to many of the other data points. Reducing hubness is necessary for retrieval tasks. One successful example is Inverted SoFtmax (ISF), recently proposed to improve NN. This work proposes a new method, Hubless Nearest Neighbor (HNN), to mitigate hubness. HNN differs from NN by imposing an additional equal preference assumption. Moreover, the HNN formulation explains why ISF works as well as it does. Empirical results demonstrate that HNN outperforms NN, ISF and other state-of-the-art. For reproducibility and follow-ups, we have published all code.", "keyphrases": ["bilingual lexicon induction", "bli", "hub", "other data point"]} +{"id": "zhong-ng-2010-makes", "title": "It Makes Sense: A Wide-Coverage Word Sense Disambiguation System for Free Text", "abstract": "Word sense disambiguation (WSD) systems based on supervised learning achieved the best performance in SensEval and SemEval workshops. However, there are few publicly available open source WSD systems. This limits the use of WSD in other applications, especially for researchers whose research interests are not in WSD. \n \nIn this paper, we present IMS, a supervised English all-words WSD system. The flexible framework of IMS allows users to integrate different preprocessing tools, additional features, and different classifiers. By default, we use linear support vector machines as the classifier with multiple knowledge-based features. In our implementation, IMS achieves state-of-the-art results on several SensEval and SemEval tasks.", "keyphrases": ["word sense disambiguation", "wsd system", "supervised approach", "lemma", "state-of-the-art system"]} +{"id": "turner-etal-2008-using", "title": "Using Spatial Reference Frames to Generate Grounded Textual Summaries of Georeferenced Data", "abstract": "Summarising georeferenced (can be identified according to it's location) data in natural language is challenging because it requires linking events describing its non-geographic attributes to their underlying geography. This mapping is not straightforward as often the only explicit geographic information such data contains is latitude and longitude. In this paper we present an approach to generating textual summaries of georeferenced data based on spatial reference frames. This approach has been implemented in a data-to-text system we have deployed in the weather forecasting domain.", "keyphrases": ["georeferenced data", "explicit geographic information", "spatio-temporal data"]} +{"id": "fan-etal-2019-eli5", "title": "ELI5: Long Form Question Answering", "abstract": "We introduce the first large-scale corpus for long form question answering, a task requiring elaborate and in-depth answers to open-ended questions. The dataset comprises 270K threads from the Reddit forum \u201cExplain Like I'm Five\u201d (ELI5) where an online community provides answers to questions which are comprehensible by five year olds. Compared to existing datasets, ELI5 comprises diverse questions requiring multi-sentence answers. We provide a large set of web documents to help answer the question. Automatic and human evaluations show that an abstractive model trained with a multi-task objective outperforms conventional Seq2Seq, language modeling, as well as a strong extractive baseline. However, our best model is still far from human performance since raters prefer gold responses in over 86% of cases, leaving ample opportunity for future improvement.", "keyphrases": ["multi-sentence answer", "eli5", "explanation"]} +{"id": "hewitt-manning-2019-structural", "title": "A Structural Probe for Finding Syntax in Word Representations", "abstract": "Recent work has improved our ability to detect linguistic knowledge in word representations. However, current methods for detecting syntactic knowledge do not test whether syntax trees are represented in their entirety. In this work, we propose a structural probe, which evaluates whether syntax trees are embedded in a linear transformation of a neural network's word representation space. The probe identifies a linear transformation under which squared L2 distance encodes the distance between words in the parse tree, and one in which squared L2 norm encodes depth in the parse tree. Using our probe, we show that such transformations exist for both ELMo and BERT but not in baselines, providing evidence that entire syntax trees are embedded implicitly in deep models' vector geometry.", "keyphrases": ["structural probe", "syntax tree", "distance", "elmo"]} +{"id": "gamon-etal-2008-using", "title": "Using Contextual Speller Techniques and Language Modeling for ESL Error Correction", "abstract": "We present a modular system for detection and correction of errors made by nonnative (English as a Second Language = ESL) writers. We focus on two error types: the incorrect use of determiners and the choice of prepositions. We use a decisiontree approach inspired by contextual spelling systems for detection and correction suggestions, and a large language model trained on the Gigaword corpus to provide additional information to filter out spurious suggestions. We show how this system performs on a corpus of non-native English text and discuss strategies for future enhancements.", "keyphrases": ["language modeling", "esl", "grammatical error"]} +{"id": "bilu-slonim-2016-claim", "title": "Claim Synthesis via Predicate Recycling", "abstract": "Computational Argumentation has two main goals - the detection and analysis of arguments on the one hand, and the synthesis of arguments on the other. Much attention has been given to the former, but considerably less to the latter. A key component in synthesizing arguments is the synthesis of claims. One way to do so is by employing argumentation mining to detect claims within an appropriate corpus. In general, this appears to be a hard problem. Thus, it is interesting to explore if - for the sake of synthesis - there may be other ways to generate claims. Here we explore such a method: we extract the predicate of simple, manually-detected, claims, and attempt to generate novel claims from them. Surprisingly, this simple method yields fairly good results.", "keyphrases": ["synthesis", "predicate", "claim", "multiple interaction"]} +{"id": "negri-etal-2018-escape", "title": "ESCAPE: a Large-scale Synthetic Corpus for Automatic Post-Editing", "abstract": "Training models for the automatic correction of machine-translated text usually relies on data consisting of (source, MT, human post- edit) triplets providing, for each source sentence, examples of translation errors with the corresponding corrections made by a human post-editor. Ideally, a large amount of data of this kind should allow the model to learn reliable correction patterns and effectively apply them at test stage on unseen (source, MT) pairs. In practice, however, their limited availability calls for solutions that also integrate in the training process other sources of knowledge. Along this direction, state-of-the-art results have been recently achieved by systems that, in addition to a limited amount of available training data, exploit artificial corpora that approximate elements of the \"gold\" training instances with automatic translations. Following this idea, we present eSCAPE, the largest freely-available Synthetic Corpus for Automatic Post-Editing released so far. eSCAPE consists of millions of entries in which the MT element of the training triplets has been obtained by translating the source side of publicly-available parallel corpora, and using the target side as an artificial human post-edit. Translations are obtained both with phrase-based and neural models. For each MT paradigm, eSCAPE contains 7.2 million triplets for English-German and 3.3 millions for English-Italian, resulting in a total of 14,4 and 6,6 million instances respectively. The usefulness of eSCAPE is proved through experiments in a general-domain scenario, the most challenging one for automatic post-editing. For both language directions, the models trained on our artificial data always improve MT quality with statistically significant gains. The current version of eSCAPE can be freely downloaded from: this http URL", "keyphrases": ["synthetic corpus", "automatic post-editing", "source sentence", "ape data", "escape"]} +{"id": "eisner-2003-learning", "title": "Learning Non-Isomorphic Tree Mappings for Machine Translation", "abstract": "Often one may wish to learn a tree-to-tree mapping, training it on unaligned pairs of trees, or on a mixture of trees and strings. Unlike previous statistical formalisms (limited to isomorphic trees), synchronous TSG allows local distortion of the tree topology. We reformulate it to permit dependency trees, and sketch EM/Viterbi algorithms for alignment, training, and decoding.", "keyphrases": ["non-isomorphism", "machine translation", "tree fragment", "cross-lingual structure divergence", "synchronous grammar"]} +{"id": "singh-etal-2011-large", "title": "Large-Scale Cross-Document Coreference Using Distributed Inference and Hierarchical Models", "abstract": "Cross-document coreference, the task of grouping all the mentions of each entity in a document collection, arises in information extraction and automated knowledge base construction. For large collections, it is clearly impractical to consider all possible groupings of mentions into distinct entities. To solve the problem we propose two ideas: (a) a distributed inference technique that uses parallelism to enable large scale processing, and (b) a hierarchical model of coreference that represents uncertainty over multiple granularities of entities to facilitate more effective approximate inference. To evaluate these ideas, we constructed a labeled corpus of 1.5 million disambiguated mentions in Web pages by selecting link anchors referring to Wikipedia entities. We show that the combination of the hierarchical model with distributed inference quickly obtains high accuracy (with error reduction of 38%) on this large dataset, demonstrating the scalability of our approach.", "keyphrases": ["cross-document coreference", "knowledge base", "equivalence class", "ccr"]} +{"id": "chen-etal-2016-neural-sentiment", "title": "Neural Sentiment Classification with User and Product Attention", "abstract": "Document-level sentiment classi\ufb01cation aims to predict user\u2019s overall sentiment in a document about a product. However, most of existing methods only focus on local text information and ignore the global user preference and product characteristics. Even though some works take such information into account, they usually suffer from high model complexity and only consider word-level preference rather than semantic levels. To address this issue, we propose a hierarchical neural network to incorporate global user and product information into sentiment clas-si\ufb01cation. Our model \ufb01rst builds a hierarchical LSTM model to generate sentence and document representations. Afterwards, user and product information is considered via at-tentions over different semantic levels due to its ability of capturing crucial semantic components. The experimental results show that our model achieves signi\ufb01cant and consistent improvements compared to all state-of-the-art methods. The source code of this paper can be obtained from https://github. com/thunlp/NSC .", "keyphrases": ["sentiment classification", "product", "word-level preference", "attention model", "state-of-the-art model"]} +{"id": "apidianaki-2009-data", "title": "Data-Driven Semantic Analysis for Multilingual WSD and Lexical Selection in Translation", "abstract": "A common way of describing the senses of ambiguous words in multilingual Word Sense Disambiguation (WSD) is by reference to their translation equivalents in another language. The theoretical soundness of the senses induced in this way can, however, be doubted. This type of cross-lingual sense identification has implications for multilingual WSD and MT evaluation as well. In this article, we first present some arguments in favour of a more thorough analysis of the semantic information that may be induced by the equivalents of ambiguous words found in parallel corpora. Then, we present an unsupervised WSD method and a lexical selection method that exploit the results of a data-driven sense induction method. Finally, we show how this automatically acquired information can be exploited for a multilingual WSD and MT evaluation more sensitive to lexical semantics.", "keyphrases": ["wsd", "sense inventory", "cross-lingual evidence idea", "machine translation"]} +{"id": "beck-etal-2018-graph", "title": "Graph-to-Sequence Learning using Gated Graph Neural Networks", "abstract": "Many NLP applications can be framed as a graph-to-sequence learning problem. Previous work proposing neural architectures on graph-to-sequence obtained promising results compared to grammar-based approaches but still rely on linearisation heuristics and/or standard recurrent networks to achieve the best performance. In this work propose a new model that encodes the full structural information contained in the graph. Our architecture couples the recently proposed Gated Graph Neural Networks with an input transformation that allows nodes and edges to have their own hidden representations, while tackling the parameter explosion problem present in previous work. Experimental results shows that our model outperforms strong baselines in generation from AMR graphs and syntax-based neural machine translation.", "keyphrases": ["neural architecture", "gnn", "graph-to-sequence model", "sequential encoder", "amr-to-text problem"]} +{"id": "mishra-etal-2017-learning", "title": "Learning Cognitive Features from Gaze Data for Sentiment and Sarcasm Classification using Convolutional Neural Network", "abstract": "Cognitive NLP systems- i.e., NLP systems that make use of behavioral data - augment traditional text-based features with cognitive features extracted from eye-movement patterns, EEG signals, brain-imaging etc. Such extraction of features is typically manual. We contend that manual extraction of features may not be the best way to tackle text subtleties that characteristically prevail in complex classification tasks like Sentiment Analysis and Sarcasm Detection, and that even the extraction and choice of features should be delegated to the learning system. We introduce a framework to automatically extract cognitive features from the eye-movement/gaze data of human readers reading the text and use them as features along with textual features for the tasks of sentiment polarity and sarcasm detection. Our proposed framework is based on Convolutional Neural Network (CNN). The CNN learns features from both gaze and text and uses them to classify the input text. We test our technique on published sentiment and sarcasm labeled datasets, enriched with gaze information, to show that using a combination of automatically learned text and gaze features often yields better classification performance over (i) CNN based systems that rely on text input alone and (ii) existing systems that rely on handcrafted gaze and textual features.", "keyphrases": ["sarcasm classification", "convolutional neural network", "gaze feature"]} +{"id": "max-wisniewski-2010-mining", "title": "Mining Naturally-occurring Corrections and Paraphrases from Wikipedia's Revision History", "abstract": "Naturally-occurring instances of linguistic phenomena are important both for training and for evaluating automatic text processing. When available in large quantities, they also prove interesting material for linguistic studies. In this article, we present WiCoPaCo (Wikipedia Correction and Paraphrase Corpus), a new freely-available resource built by automatically mining Wikipedia\u0092s revision history. The WiCoPaCo corpus focuses on local modifications made by human revisors and include various types of corrections (such as spelling error or typographical corrections) and rewritings, which can be categorized broadly into meaning-preserving and meaning-altering revisions. We present an initial hand-built typology of these revisions, but the resource allows for any possible annotation scheme. We discuss the main motivations for building such a resource and describe the main technical details guiding its construction. We also present applications and data analysis on French and report initial results on spelling error correction and morphosyntactic rewriting. The WiCoPaCo corpus can be freely downloaded from .", "keyphrases": ["wikipedia", "revision history", "paraphrase corpus", "spelling error", "rewriting"]} +{"id": "sun-etal-2011-semi", "title": "Semi-supervised Relation Extraction with Large-scale Word Clustering", "abstract": "We present a simple semi-supervised relation extraction system with large-scale word clustering. We focus on systematically exploring the effectiveness of different cluster-based features. We also propose several statistical methods for selecting clusters at an appropriate level of granularity. When training on different sizes of data, our semi-supervised approach consistently outperformed a state-of-the-art supervised baseline system.", "keyphrases": ["relation extraction", "large-scale word", "kernel", "brown cluster"]} +{"id": "mcdonald-etal-2005-simple", "title": "Simple Algorithms for Complex Relation Extraction with Applications to Biomedical IE", "abstract": "A complex relation is any n-ary relation in which some of the arguments may be be unspecified. We present here a simple two-stage method for extracting complex relations between named entities in text. The first stage creates a graph from pairs of entities that are likely to be related, and the second stage scores maximal cliques in that graph as potential complex relation instances. We evaluate the new method against a standard baseline for extracting genomic variation relations from biomedical text.", "keyphrases": ["complex relation", "maximal clique", "biomedical domain"]} +{"id": "goldman-etal-2018-weakly", "title": "Weakly Supervised Semantic Parsing with Abstract Examples", "abstract": "Training semantic parsers from weak supervision (denotations) rather than strong supervision (programs) complicates training in two ways. First, a large search space of potential programs needs to be explored at training time to find a correct program. Second, spurious programs that accidentally lead to a correct denotation add noise to training. In this work we propose that in closed worlds with clear semantic types, one can substantially alleviate these problems by utilizing an abstract representation, where tokens in both the language utterance and program are lifted to an abstract form. We show that these abstractions can be defined with a handful of lexical rules and that they result in sharing between different examples that alleviates the difficulties in training. To test our approach, we develop the first semantic parser for CNLVR, a challenging visual reasoning dataset, where the search space is large and overcoming spuriousness is critical, because denotations are either TRUE or FALSE, and thus random programs are likely to lead to a correct denotation. Our method substantially improves performance, and reaches 82.5% accuracy, a 14.7% absolute accuracy improvement compared to the best reported accuracy so far.", "keyphrases": ["abstract example", "semantic parser", "search space"]} +{"id": "pantel-ravichandran-2004-automatically", "title": "Automatically Labeling Semantic Classes", "abstract": "Systems that automatically discover semantic classes have emerged in part to address the limitations of broad-coverage lexical resources such as WordNet and Cyc. The current state of the art discovers many semantic classes but fails to label their concepts. We propose an algorithm labeling semantic classes and for leveraging them to extract is-a relationships using a top-down approach.", "keyphrases": ["semantic class", "noun", "cluster"]} +{"id": "thadani-mckeown-2011-optimal", "title": "Optimal and Syntactically-Informed Decoding for Monolingual Phrase-Based Alignment", "abstract": "The task of aligning corresponding phrases across two related sentences is an important component of approaches for natural language problems such as textual inference, paraphrase detection and text-to-text generation. In this work, we examine a state-of-the-art structured prediction model for the alignment task which uses a phrase-based representation and is forced to decode alignments using an approximate search approach. We propose instead a straightforward exact decoding technique based on integer linear programming that yields order-of-magnitude improvements in decoding speed. This ILP-based decoding strategy permits us to consider syntactically-informed constraints on alignments which significantly increase the precision of the model.", "keyphrases": ["decoding", "phrase-based alignment", "integer linear programming", "semantic unit"]} +{"id": "tu-etal-2019-multi", "title": "Multi-hop Reading Comprehension across Multiple Documents by Reasoning over Heterogeneous Graphs", "abstract": "Multi-hop reading comprehension (RC) across documents poses new challenge over single-document RC because it requires reasoning over multiple documents to reach the final answer. In this paper, we propose a new model to tackle the multi-hop RC problem. We introduce a heterogeneous graph with different types of nodes and edges, which is named as Heterogeneous Document-Entity (HDE) graph. The advantage of HDE graph is that it contains different granularity levels of information including candidates, documents and entities in specific document contexts. Our proposed model can do reasoning over the HDE graph with nodes representation initialized with co-attention and self-attention based context encoders. We employ Graph Neural Networks (GNN) based message passing algorithms to accumulate evidences on the proposed HDE graph. Evaluated on the blind test set of the Qangaroo WikiHop data set, our HDE graph based single model delivers competitive result, and the ensemble model achieves the state-of-the-art performance.", "keyphrases": ["comprehension", "heterogeneous graph", "candidate", "multi-hop", "multi-hop reading comprehension"]} +{"id": "yamamoto-etal-2003-learning", "title": "Learning Sequence-to-Sequence Correspondences from Parallel Corpora via Sequential Pattern Mining", "abstract": "We present an unsupervised extraction of sequence-to-sequence correspondences from parallel corpora by sequential pattern mining. The main characteristics of our method are two-fold. First, we propose a systematic way to enumerate all possible translation pair candidates of rigid and gapped sequences without falling into combinatorial explosion. Second, our method uses an efficient data structure and algorithm for calculating frequencies in a contingency table for each translation pair candidate. Our method is empirically evaluated using English-Japanese parallel corpora of 6 million words. Results indicate that it works well for multi-word translations, giving 56--84% accuracy at 19% token coverage and 11% type coverage.", "keyphrases": ["sequence-to-sequence correspondence", "parallel corpora", "sequential pattern mining"]} +{"id": "peters-etal-2019-sparse", "title": "Sparse Sequence-to-Sequence Models", "abstract": "Sequence-to-sequence models are a powerful workhorse of NLP. Most variants employ a softmax transformation in both their attention mechanism and output layer, leading to dense alignments and strictly positive output probabilities. This density is wasteful, making models less interpretable and assigning probability mass to many implausible outputs. In this paper, we propose sparse sequence-to-sequence models, rooted in a new family of \u03b1-entmax transformations, which includes softmax and sparsemax as particular cases, and is sparse for any \u03b1 > 1. We provide fast algorithms to evaluate these transformations and their gradients, which scale well for large vocabulary sizes. Our models are able to produce sparse alignments and to assign nonzero probability to a short list of plausible outputs, sometimes rendering beam search exact. Experiments on morphological inflection and machine translation reveal consistent gains over dense models.", "keyphrases": ["probability", "\u03b1-entmax transformation", "sparse sequence-to-sequence model"]} +{"id": "mou-etal-2016-transferable", "title": "How Transferable are Neural Networks in NLP Applications?", "abstract": "Transfer learning is aimed to make use of valuable knowledge in a source domain to help model performance in a target domain. It is particularly important to neural networks, which are very likely to be overfitting. In some fields like image processing, many studies have shown the effectiveness of neural network-based transfer learning. For neural NLP, however, existing studies have only casually applied transfer learning, and conclusions are inconsistent. In this paper, we conduct systematic case studies and provide an illuminating picture on the transferability of neural networks in NLP.", "keyphrases": ["target domain", "semantic relatedness", "language inference"]} +{"id": "wenzek-etal-2020-ccnet", "title": "CCNet: Extracting High Quality Monolingual Datasets from Web Crawl Data", "abstract": "Pre-training text representations have led to significant improvements in many areas of natural language processing. The quality of these models benefits greatly from the size of the pretraining corpora as long as its quality is preserved. In this paper, we describe an automatic pipeline to extract massive high-quality monolingual datasets from Common Crawl for a variety of languages. Our pipeline follows the data processing introduced in fastText (Mikolov et al., 2017; Grave et al., 2018), that deduplicates documents and identifies their language. We augment this pipeline with a filtering step to select documents that are close to high quality corpora like Wikipedia.", "keyphrases": ["common crawl", "ccnet", "low-resource language"]} +{"id": "yang-katiyar-2020-simple", "title": "Simple and Effective Few-Shot Named Entity Recognition with Structured Nearest Neighbor Learning", "abstract": "We present a simple few-shot named entity recognition (NER) system based on nearest neighbor learning and structured inference. Our system uses a supervised NER model trained on the source domain, as a feature extractor. Across several test domains, we show that a nearest neighbor classifier in this feature-space is far more effective than the standard meta-learning approaches. We further propose a cheap but effective method to capture the label dependencies between entity tags without expensive CRF training. We show that our method of combining structured decoding with nearest neighbor learning achieves state-of-the-art performance on standard few-shot NER evaluation tasks, improving F1 scores by 6% to 16% absolute points over prior meta-learning based systems.", "keyphrases": ["entity recognition", "neighbor", "few-shot ner", "annotated data", "viterbi decoder"]} +{"id": "liu-etal-2008-understanding", "title": "Understanding and Summarizing Answers in Community-Based Question Answering Services", "abstract": "Community-based question answering (cQA) services have accumulated millions of questions and their answers over time. In the process of accumulation, cQA services assume that questions always have unique best answers. However, with an in-depth analysis of questions and answers on cQA services, we find that the assumption cannot be true. According to the analysis, at least 78% of the cQA best answers are reusable when similar questions are asked again, but no more than 48% of them are indeed the unique best answers. We conduct the analysis by proposing taxonomies for cQA questions and answers. To better reuse the cQA content, we also propose applying automatic summarization techniques to summarize answers. Our results show that question-type oriented summarization techniques can improve cQA answer quality significantly.", "keyphrases": ["cqa service", "good answer", "answer summarization"]} +{"id": "lu-etal-2007-improving", "title": "Improving Statistical Machine Translation Performance by Training Data Selection and Optimization", "abstract": "Parallel corpus is an indispensable resource for translation model training in statistical machine translation (SMT). Instead of collecting more and more parallel training corpora, this paper aims to improve SMT performance by exploiting full potential of the existing parallel corpora. Two kinds of methods are proposed: offline data optimization and online model optimization. The offline method adapts the training data by redistributing the weight of each training sentence pairs. The online method adapts the translation model by redistributing the weight of each predefined submodels. Information retrieval model is used for the weighting scheme in both methods. Experimental results show that without using any additional resource, both methods can improve SMT performance significantly.", "keyphrases": ["training data selection", "parallel corpus", "translation model", "tf-idf", "information retrieval method"]} +{"id": "mayfield-etal-2003-named", "title": "Named Entity Recognition using Hundreds of Thousands of Features", "abstract": "We present an approach to named entity recognition that uses support vector machines to capture transition probabilities in a lattice. The support vector machines are trained with hundreds of thousands of features drawn from the CoNLL-2003 Shared Task training data. Margin outputs are converted to estimated probabilities using a simple static function. Performance is evaluated using the CoNLL-2003 Shared Task test set; Test B results were F\u03b2=1 = 84.67 for English, and F\u03b2=1 = 69.96 for German.", "keyphrases": ["entity recognition", "hundred", "dependent feature", "pos tag", "position"]} +{"id": "khapra-etal-2011-together", "title": "Together We Can: Bilingual Bootstrapping for WSD", "abstract": "Recent work on bilingual Word Sense Disambiguation (WSD) has shown that a resource deprived language (L1) can benefit from the annotation work done in a resource rich language (L2) via parameter projection. However, this method assumes the presence of sufficient annotated data in one resource rich language which may not always be possible. Instead, we focus on the situation where there are two resource deprived languages, both having a very small amount of seed annotated data and a large amount of untagged data. We then use bilingual bootstrapping, wherein, a model trained using the seed annotated data of L1 is used to annotate the untagged data of L2 and vice versa using parameter projection. The untagged instances of L1 and L2 which get annotated with high confidence are then added to the seed data of the respective languages and the above process is repeated. Our experiments show that such a bilingual bootstrapping algorithm when evaluated on two different domains with small seed sizes using Hindi (L1) and Marathi (L2) as the language pair performs better than monolingual bootstrapping and significantly reduces annotation cost.", "keyphrases": ["bilingual bootstrapping", "wsd", "parameter projection", "untagged data", "vice"]} +{"id": "wang-etal-2020-inference", "title": "On the Inference Calibration of Neural Machine Translation", "abstract": "Confidence calibration, which aims to make model predictions equal to the true correctness measures, is important for neural machine translation (NMT) because it is able to offer useful indicators of translation errors in the generated output. While prior studies have shown that NMT models trained with label smoothing are well-calibrated on the ground-truth training data, we find that miscalibration still remains a severe challenge for NMT during inference due to the discrepancy between training and inference. By carefully designing experiments on three language pairs, our work provides in-depth analyses of the correlation between calibration and translation performance as well as linguistic properties of miscalibration and reports a number of interesting findings that might help humans better analyze, understand and improve NMT models. Based on these observations, we further propose a new graduated label smoothing method that can improve both inference calibration and translation performance.", "keyphrases": ["inference calibration", "neural machine translation", "miscalibration", "label smoothing method", "in-depth analysis"]} +{"id": "melamed-2003-multitext", "title": "Multitext Grammars and Synchronous Parsers", "abstract": "Multitext Grammars (MTGs) generate arbitrarily many parallel texts via production rules of arbitrary length. Both ordinary MTGs and their bilexical subclass admit relatively efficient parsers. Yet, MTGs are more expressive than other synchronous formalisms for which parsers have been described in the literature. The combination of greater expressive power and relatively low cost of inference makes MTGs an attractive foundation for practical models of translational equivalence.", "keyphrases": ["multitext grammars", "synchronous parsing", "discontinuous constituent"]} +{"id": "duh-kirchhoff-2005-pos", "title": "POS Tagging of Dialectal Arabic: A Minimally Supervised Approach", "abstract": "Natural language processing technology for the dialects of Arabic is still in its infancy, due to the problem of obtaining large amounts of text data for spoken Arabic. In this paper we describe the development of a part-of-speech (POS) tagger for Egyptian Colloquial Arabic. We adopt a minimally supervised approach that only requires raw text data from several varieties of Arabic and a morphological analyzer for Modern Standard Arabic. No dialect-specific tools are used. We present several statistical modeling and cross-dialectal data sharing techniques to enhance the performance of the baseline tagger and compare the results to those obtained by a supervised tagger trained on hand-annotated data and, by a state-of-the-art Modern Standard Arabic tagger applied to Egyptian Arabic.", "keyphrases": ["dialectal arabic", "pos tagging", "disambiguation tool"]} +{"id": "zhang-etal-2013-wordtopic", "title": "WordTopic-MultiRank: A New Method for Automatic Keyphrase Extraction", "abstract": "Automatic keyphrase extraction aims to pick out a set of terms as a representation of a document without manual assignment efforts. Supervised and unsupervised graph-based ranking methods have been studied for this task. However, previous methods usually computed importance scores of words under the assumption of single relation between words. In this work, we propose WordTopic-MultiRank as a new method for keyphrase extraction, based on the idea that words relate with each other via multiple relations. First we treat various latent topics in documents as heterogeneous relations between words and construct a multi-relational word network. Then, a novel ranking algorithm, named Biased-MultiRank, is applied to score the importance of words and topics simultaneously, as words and topics are considered to have mutual influence on each other. Experimental results on two different data sets show the outstanding performance and robustness of our proposed approach in automatic keyphrase extraction task.", "keyphrases": ["new method", "automatic keyphrase extraction", "wordtopic-multirank"]} +{"id": "klein-etal-2003-named", "title": "Named Entity Recognition with Character-Level Models", "abstract": "We discuss two named-entity recognition models which use characters and character n-grams either exclusively or as an important part of their data representation. The first model is a character-level HMM with minimal context information, and the second model is a maximum-entropy conditional markov model with substantially richer context features. Our best model achieves an overall F1 of 86.07% on the English test data (92.31% on the development data). This number represents a 25% error reduction over the same model without word-internal (substring) features.", "keyphrases": ["character-level model", "hmm", "conditional markov model", "test data", "rich context feature"]} +{"id": "zhang-etal-2006-distributed", "title": "Distributed Language Modeling for N-best List Re-ranking", "abstract": "In this paper we describe a novel distributed language model for N-best list re-ranking. The model is based on the client/server paradigm where each server hosts a portion of the data and provides information to the client. This model allows for using an arbitrarily large corpus in a very efficient way. It also provides a natural platform for relevance weighting and selection. We applied this model on a 2.97 billion-word corpus and re-ranked the N-best list from Hiero, a state-of-the-art phrase-based system. Using BLEU as a metric, the re-ranked translation achieves a relative improvement of 4.8%, significantly better than the model-best translation.", "keyphrases": ["language model", "n-best list", "server"]} +{"id": "jiang-etal-2016-unsupervised", "title": "Unsupervised Neural Dependency Parsing", "abstract": "Unsupervised dependency parsing aims to learn a dependency grammar from text annotated with only POS tags. Various features and inductive biases are often used to incorporate prior knowledge into learning. One useful type of prior information is that there exist correlations between the parameters of grammar rules involving different POS tags. Previous work employed manually designed features or special prior distributions to encode such information. In this paper, we propose a novel approach to unsupervised dependency parsing that uses a neural model to predict grammar rule probabilities based on distributed representation of POS tags. The distributed representation is automatically learned from data and captures the correlations between POS tags. Our experiments show that our approach outperforms previous approaches utilizing POS correlations and is competitive with recent state-of-the-art approaches on nine different languages. \u00a9 2016 Association for Computational Linguistics", "keyphrases": ["pos tag", "grammar rule probability", "neural dmv model"]} +{"id": "chambers-etal-2007-classifying", "title": "Classifying Temporal Relations Between Events", "abstract": "This paper describes a fully automatic two-stage machine learning architecture that learns temporal relations between pairs of events. The first stage learns the temporal attributes of single event descriptions, such as tense, grammatical aspect, and aspectual class. These imperfect guesses, combined with other linguistic features, are then used in a second stage to classify the temporal relationship between two events. We present both an analysis of our new features and results on the TimeBank Corpus that is 3% higher than previous work that used perfect human tagged features.", "keyphrases": ["temporal relation", "stage", "linguistic feature"]} +{"id": "yoshikawa-etal-2017-stair", "title": "STAIR Captions: Constructing a Large-Scale Japanese Image Caption Dataset", "abstract": "In recent years, automatic generation of image descriptions (captions), that is, image captioning, has attracted a great deal of attention. In this paper, we particularly consider generating Japanese captions for images. Since most available caption datasets have been constructed for English language, there are few datasets for Japanese. To tackle this problem, we construct a large-scale Japanese image caption dataset based on images from MS-COCO, which is called STAIR Captions. STAIR Captions consists of 820,310 Japanese captions for 164,062 images. In the experiment, we show that a neural network trained using STAIR Captions can generate more natural and better Japanese captions, compared to those generated using English-Japanese machine translation after generating English captions.", "keyphrases": ["image caption dataset", "image description", "stair captions"]} +{"id": "grenager-etal-2005-unsupervised", "title": "Unsupervised Learning of Field Segmentation Models for Information Extraction", "abstract": "The applicability of many current information extraction techniques is severely limited by the need for supervised training data. We demonstrate that for certain field structured extraction tasks, such as classified advertisements and bibliographic citations, small amounts of prior knowledge can be used to learn effective models in a primarily unsupervised fashion. Although hidden Markov models (HMMs) provide a suitable generative model for field structured text, general unsupervised HMM learning fails to learn useful structure in either of our domains. However, one can dramatically improve the quality of the learned structure by exploiting simple prior knowledge of the desired solutions. In both domains, we found that unsupervised methods can attain accuracies with 400 unlabeled examples comparable to those attained by supervised methods on 50 labeled examples, and that semi-supervised methods can make good use of small amounts of labeled data.", "keyphrases": ["field segmentation", "information extraction", "markov model", "hmm", "unsupervised learning"]} +{"id": "barrachina-etal-2009-statistical", "title": "Statistical Approaches to Computer-Assisted Translation", "abstract": "Current machine translation (MT) systems are still not perfect. In practice, the output from these systems needs to be edited to correct errors. A way of increasing the productivity of the whole translation process (MT plus human work) is to incorporate the human correction activities within the translation process itself, thereby shifting the MT paradigm to that of computer-assisted translation. This model entails an iterative process in which the human translator activity is included in the loop: In each iteration, a prefix of the translation is validated (accepted or amended) by the human and the system computes its best (or n-best) translation suffix hypothesis to complete this prefix. A successful framework for MT is the so-called statistical (or pattern recognition) framework. Interestingly, within this framework, the adaptation of MT systems to the interactive scenario affects mainly the search process, allowing a great reuse of successful techniques and models. In this article, alignment templates, phrase-based models, and stochastic finite-state transducers are used to develop computer-assisted translation systems. These systems were assessed in a European project (TransType2) in two real tasks: The translation of printer manuals; manuals and the translation of the Bulletin of the European Union. In each task, the following three pairs of languages were involved (in both translation directions): English-Spanish, English-German, and English-French.", "keyphrases": ["machine translation", "translator activity", "prefix", "applicability"]} +{"id": "shimizu-etal-2008-metric", "title": "Metric Learning for Synonym Acquisition", "abstract": "The distance or similarity metric plays an important role in many natural language processing (NLP) tasks. Previous studies have demonstrated the effectiveness of a number of metrics such as the Jaccard coefficient, especially in synonym acquisition. While the existing metrics perform quite well, to further improve performance, we propose the use of a supervised machine learning algorithm that fine-tunes them. Given the known instances of similar or dissimilar words, we estimated the parameters of the Mahalanobis distance. We compared a number of metrics in our experiments, and the results show that the proposed metric has a higher mean average precision than other metrics.", "keyphrases": ["synonym acquisition", "mahalanobis distance", "metric learning"]} +{"id": "melamed-etal-2003-precision", "title": "Precision and Recall of Machine Translation", "abstract": "Machine translation can be evaluated using precision, recall, and the F-measure. These standard measures have significantly higher correlation with human judgments than recently proposed alternatives. More importantly, the standard measures have an intuitive interpretation, which can facilitate insights into how MT systems might be improved. The relevant software is publicly available.", "keyphrases": ["machine translation", "evaluation method", "gtm metric", "brevity penalty"]} +{"id": "elfardy-diab-2012-token", "title": "Token Level Identification of Linguistic Code Switching", "abstract": "Typically native speakers of Arabic mix dialectal Arabic and Modern Standard Arabic in the same utterance. This phenomenon is known as linguistic code switching (LCS). It is a very challenging task to identify these LCS points in written text where we don\u2019t have an accompanying speech signal. In this paper, we address automatic identification of LCS points in Arabic social media text by identifying token level dialectal words. We present an unsupervised approach that employs a set of dictionaries, sound-change rules, and language models to tackle this problem. We tune and test the performance of our approach against human-annotated Egyptian and Levantine discussion fora datasets. Two types of annotations on the token level are obtained for each dataset: context sensitive and context insensitive annotation. We achieve a token level F\u03b2=1 score of 74% and 72.4% on the context-sensitive development and test datasets, respectively. On the context insensitive annotated data, we achieve a token level F\u03b2=1 score of 84.4% and 84.9% on the development and test datasets, respectively.", "keyphrases": ["identification", "linguistic code switching", "dialectal arabic", "sound-change rule"]} +{"id": "malandrakis-etal-2019-controlled", "title": "Controlled Text Generation for Data Augmentation in Intelligent Artificial Agents", "abstract": "Data availability is a bottleneck during early stages of development of new capabilities for intelligent artificial agents. We investigate the use of text generation techniques to augment the training data of a popular commercial artificial agent across categories of functionality, with the goal of faster development of new functionality. We explore a variety of encoder-decoder generative models for synthetic training data generation and propose using conditional variational auto-encoders. Our approach requires only direct optimization, works well with limited data and significantly outperforms the previous controlled text generation techniques. Further, the generated data are used as additional training samples in an extrinsic intent classification task, leading to improved performance by up to 5% absolute f-score in low-resource cases, validating the usefulness of our approach.", "keyphrases": ["data augmentation", "text generation technique", "auto-encoder"]} +{"id": "yan-etal-2020-multi", "title": "Multi-Unit Transformers for Neural Machine Translation", "abstract": "Transformer models achieve remarkable success in Neural Machine Translation. Many efforts have been devoted to deepening the Transformer by stacking several units (i.e., a combination of Multihead Attentions and FFN) in a cascade, while the investigation over multiple parallel units draws little attention. In this paper, we propose the Multi-Unit Transformer (MUTE) , which aim to promote the expressiveness of the Transformer by introducing diverse and complementary units. Specifically, we use several parallel units and show that modeling with multiple units improves model performance and introduces diversity. Further, to better leverage the advantage of the multi-unit setting, we design biased module and sequential dependency that guide and encourage complementariness among different units. Experimental results on three machine translation tasks, the NIST Chinese-to-English, WMT'14 English-to-German and WMT'18 Chinese-to-English, show that the MUTE models significantly outperform the Transformer-Base, by up to +1.52, +1.90 and +1.10 BLEU points, with only a mild drop in inference speed (about 3.1%). In addition, our methods also surpass the Transformer-Big model, with only 54% of its parameters. These results demonstrate the effectiveness of the MUTE, as well as its efficiency in both the inference process and parameter usage.", "keyphrases": ["transformer", "neural machine translation", "remarkable success", "model performance"]} +{"id": "waltinger-2010-germanpolarityclues", "title": "GermanPolarityClues: A Lexical Resource for German Sentiment Analysis", "abstract": "In this paper, we propose GermanPolarityClues, a new publicly available lexical resource for sentiment analysis for the German language. While sentiment analysis and polarity classification has been extensively studied at different document levels (e.g. sentences and phrases), only a few approaches explored the effect of a polarity-based feature selection and subjectivity resources for the German language. This paper evaluates four different English and three different German sentiment resources in a comparative manner by combining a polarity-based feature selection with SVM-based machine learning classifier. Using a semi-automatic translation approach, we were able to construct three different resources for a German sentiment analysis. The manually finalized GermanPolarityClues dictionary offers thereby a number of 10, 141 polarity features, associated to three numerical polarity scores, determining the positive, negative and neutral direction of specific term features. While the results show that the size of dictionaries clearly correlate to polarity-based feature coverage, this property does not correlate to classification accuracy. Using a polarity-based feature selection, considering a minimum amount of prior polarity features, in combination with SVM-based machine learning methods exhibits for both languages the best performance (F1: 0.83-0.88).", "keyphrases": ["lexical resource", "german sentiment analysis", "germanpolarityclue"]} +{"id": "fei-liu-2016-breaking", "title": "Breaking the Closed World Assumption in Text Classification", "abstract": "Existing research on multiclass text classific a-tion mostly makes the closed world assum p-tion, which focus es on designing accurate classifiers under the assumption that all test classes are known at training time. A more r e-alistic scenario is to expect unseen classes dur ing testing ( open world ) . In this case, the goal is to design a learning system that class i-fies documents of the known classes into their respective cl asses and also to reject doc u-ments from unknown classes . This problem is called open ( world ) classification . This paper approach es the problem by reducing the open space risk while balancing the empirical risk . It proposes to use a new learn ing strategy, called c enter - based similarity (CBS) space learning (or CBS learning ) , to provide a novel solution to the problem . Extensive e xper i-ments across two datasets show that CBS learning give s promising results on multiclass open text classification compared to state - of - the - art baselines.", "keyphrases": ["assumption", "text classification", "cbs learning", "unseen intent"]} +{"id": "lu-ng-2011-probabilistic", "title": "A Probabilistic Forest-to-String Model for Language Generation from Typed Lambda Calculus Expressions", "abstract": "This paper describes a novel probabilistic approach for generating natural language sentences from their underlying semantics in the form of typed lambda calculus. The approach is built on top of a novel reduction-based weighted synchronous context free grammar formalism, which facilitates the transformation process from typed lambda calculus into natural language sentences. Sentences can then be generated based on such grammar rules with a log-linear model. To acquire such grammar rules automatically in an unsupervised manner, we also propose a novel approach with a generative model, which maps from sub-expressions of logical forms to word sequences in natural language sentences. Experiments on benchmark datasets for both English and Chinese generation tasks yield significant improvements over results obtained by two state-of-the-art machine translation models, in terms of both automatic metrics and human evaluation.", "keyphrases": ["language generation", "lambda calculus", "synchronous context-free grammar"]} +{"id": "mehdad-etal-2013-abstractive", "title": "Abstractive Meeting Summarization with Entailment and Fusion", "abstract": "We propose a novel end-to-end framework for abstractive meeting summarization. We cluster sentences in the input into communities and build an entailment graph over the sentence communities to identify and select the most relevant sentences. We then aggregate those selected sentences by means of a word graph model. We exploit a ranking strategy to select the best path in the word graph as an abstract sentence. Despite not relying on the syntactic structure, our approach significantly outperforms previous models for meeting summarization in terms of informativeness. Moreover, the longer sentences generated by our method are competitive with shorter sentences generated by the previous word graph model in terms of grammaticality.", "keyphrases": ["entailment graph", "ranking strategy", "abstractive meeting summarization", "non-redundant sentence"]} +{"id": "quirk-etal-2015-language", "title": "Language to Code: Learning Semantic Parsers for If-This-Then-That Recipes", "abstract": "Using natural language to write programs is a touchstone problem for computational linguistics. We present an approach that learns to map natural-language descriptions of simple \u201cif-then\u201d rules to executable code. By training and testing on a large corpus of naturally-occurring programs (called \u201crecipes\u201d) and their natural language descriptions, we demonstrate the ability to effectively map language to code. We compare a number of semantic parsing approaches on the highly noisy training data collected from ordinary users, and find that loosely synchronous systems perform best.", "keyphrases": ["recipe", "executable code", "semantic parsing"]} +{"id": "chen-yih-2020-open", "title": "Open-Domain Question Answering", "abstract": "This tutorial provides a comprehensive and coherent overview of cutting-edge research in open-domain question answering (QA), the task of answering questions using a large collection of documents of diversified topics. We will start by first giving a brief historical background, discussing the basic setup and core technical challenges of the research problem, and then describe modern datasets with the common evaluation metrics and benchmarks. The focus will then shift to cutting-edge models proposed for open-domain QA, including two-stage retriever-reader approaches, dense retriever and end-to-end training, and retriever-free methods. Finally, we will cover some hybrid approaches using both text and large knowledge bases and conclude the tutorial with important open questions. We hope that the tutorial will not only help the audience to acquire up-to-date knowledge but also provide new perspectives to stimulate the advances of open-domain QA research in the next phase.", "keyphrases": ["retrieval", "open-domain question", "major step"]} +{"id": "mccarthy-carroll-2003-disambiguating", "title": "Disambiguating Nouns, Verbs, and Adjectives Using Automatically Acquired Selectional Preferences", "abstract": "Selectional preferences have been used by word sense disambiguation (WSD) systems as one source of disambiguating information. We evaluate WSD using selectional preferences acquired for English adjectivenoun, subject, and direct object grammatical relationships with respect to a standard test corpus. The selectional preferences are specific to verb or adjective classes, rather than individual word forms, so they can be used to disambiguate the co-occurring adjectives and verbs, rather than just the nominal argument heads. We also investigate use of the one-senseper-discourse heuristic to propagate a sense tag for a word to other occurrences of the same word within the current document in order to increase coverage. Although the preferences perform well in comparison with other unsupervised WSD systems on the same corpus, the results show that for many applications, further knowledge sources would be required to achieve an adequate level of accuracy and coverage. In addition to quantifying performance, we analyze the results to investigate the situations in which the selectional preferences achieve the best precision and in which the one-sense-per-discourse heuristic increases performance.", "keyphrases": ["adjective", "selectional preference", "word sense disambiguation"]} +{"id": "braud-etal-2017-cross", "title": "Cross-lingual RST Discourse Parsing", "abstract": "Discourse parsing is an integral part of understanding information flow and argumentative structure in documents. Most previous research has focused on inducing and evaluating models from the English RST Discourse Treebank. However, discourse treebanks for other languages exist, including Spanish, German, Basque, Dutch and Brazilian Portuguese. The treebanks share the same underlying linguistic theory, but differ slightly in the way documents are annotated. In this paper, we present (a) a new discourse parser which is simpler, yet competitive (significantly better on 2/3 metrics) to state of the art for English, (b) a harmonization of discourse treebanks across languages, enabling us to present (c) what to the best of our knowledge are the first experiments on cross-lingual discourse parsing.", "keyphrases": ["other language", "basque", "discourse parser"]} +{"id": "druck-etal-2009-semi", "title": "Semi-supervised Learning of Dependency Parsers using Generalized Expectation Criteria", "abstract": "In this paper, we propose a novel method for semi-supervised learning of non-projective log-linear dependency parsers using directly expressed linguistic prior knowledge (e.g. a noun's parent is often a verb). Model parameters are estimated using a generalized expectation (GE) objective function that penalizes the mismatch between model predictions and linguistic expectation constraints. In a comparison with two prominent \"unsupervised\" learning methods that require indirect biasing toward the correct syntactic structure, we show that GE can attain better accuracy with as few as 20 intuitive constraints. We also present positive experimental results on longer sentences in multiple languages.", "keyphrases": ["dependency parser", "noun", "parent", "semi-supervised learning"]} +{"id": "hong-etal-2019-faspell", "title": "FASPell: A Fast, Adaptable, Simple, Powerful Chinese Spell Checker Based On DAE-Decoder Paradigm", "abstract": "We propose a Chinese spell checker \u2013 FASPell based on a new paradigm which consists of a denoising autoencoder (DAE) and a decoder. In comparison with previous state-of-the-art models, the new paradigm allows our spell checker to be Faster in computation, readily Adaptable to both simplified and traditional Chinese texts produced by either humans or machines, and to require much Simpler structure to be as much Powerful in both error detection and correction. These four achievements are made possible because the new paradigm circumvents two bottlenecks. First, the DAE curtails the amount of Chinese spell checking data needed for supervised learning (to 10k sentences) by leveraging the power of unsupervisedly pre-trained masked language model as in BERT, XLNet, MASS etc. Second, the decoder helps to eliminate the use of confusion set that is deficient in flexibility and sufficiency of utilizing the salient feature of Chinese character similarity.", "keyphrases": ["chinese spell checker", "dae-decoder paradigm", "autoencoder", "language model", "candidate"]} +{"id": "min-etal-2020-syntactic", "title": "Syntactic Data Augmentation Increases Robustness to Inference Heuristics", "abstract": "Pretrained neural models such as BERT, when fine-tuned to perform natural language inference (NLI), often show high accuracy on standard datasets, but display a surprising lack of sensitivity to word order on controlled challenge sets. We hypothesize that this issue is not primarily caused by the pretrained model's limitations, but rather by the paucity of crowdsourced NLI examples that might convey the importance of syntactic structure at the fine-tuning stage. We explore several methods to augment standard training sets with syntactically informative examples, generated by applying syntactic transformations to sentences from the MNLI corpus. The best-performing augmentation method, subject/object inversion, improved BERT's accuracy on controlled examples that diagnose sensitivity to word order from 0.28 to 0.73, without affecting performance on the MNLI test set. This improvement generalized beyond the particular construction used for data augmentation, suggesting that augmentation causes BERT to recruit abstract syntactic representations.", "keyphrases": ["robustness", "nli example", "augmentation method", "object inversion", "syntactic data augmentation"]} +{"id": "irvine-etal-2013-measuring", "title": "Measuring Machine Translation Errors in New Domains", "abstract": "We develop two techniques for analyzing the effect of porting a machine translation system to a new domain. One is a macro-level analysis that measures how domain shift affects corpus-level evaluation; the second is a micro-level analysis for word-level errors. We apply these methods to understand what happens when a Parliament-trained phrase-based machine translation system is applied in four very different domains: news, medical texts, scientific articles and movie subtitles. We present quantitative and qualitative experiments that highlight opportunities for future research in domain adaptation for machine translation.", "keyphrases": ["machine translation error", "domain adaptation", "choice"]} +{"id": "hu-etal-2011-interactive", "title": "Interactive Topic Modeling", "abstract": "Topic models have been used extensively as a tool for corpus exploration, and a cottage industry has developed to tweak topic models to better encode human intuitions or to better model data. However, creating such extensions requires expertise in machine learning unavail-able to potential end-users of topic modeling software. In this work, we develop a frame-work for allowing users to iteratively re\ufb01ne the topics discovered by models such as latent Dirichlet allocation (LDA) by adding constraints that enforce that sets of words must ap-pear together in the same topic. We incorporate these constraints interactively by selectively removing elements in the state of a Markov Chain used for inference; we investigate a va-riety of methods for incorporating this information and demonstrate that these interactively added constraints improve topic usefulness for simulated and actual user sessions.", "keyphrases": ["exploration", "iteration", "interactive topic modeling", "itm", "user feedback"]} +{"id": "bourgonje-etal-2017-clickbait", "title": "From Clickbait to Fake News Detection: An Approach based on Detecting the Stance of Headlines to Articles", "abstract": "We present a system for the detection of the stance of headlines with regard to their corresponding article bodies. The approach can be applied in fake news, especially clickbait detection scenarios. The component is part of a larger platform for the curation of digital content; we consider veracity and relevancy an increasingly important part of curating online information. We want to contribute to the debate on how to deal with fake news and related online phenomena with technological means, by providing means to separate related from unrelated headlines and further classifying the related headlines. On a publicly available data set annotated for the stance of headlines with regard to their corresponding article bodies, we achieve a (weighted) accuracy score of 89.59.", "keyphrases": ["clickbait", "fake news", "propaganda"]} +{"id": "wang-etal-2021-transprompt", "title": "TransPrompt: Towards an Automatic Transferable Prompting Framework for Few-shot Text Classification", "abstract": "Recent studies have shown that prompts improve the performance of large pre-trained language models for few-shot text classification. Yet, it is unclear how the prompting knowledge can be transferred across similar NLP tasks for the purpose of mutual reinforcement. Based on continuous prompt embeddings, we propose TransPrompt, a transferable prompting framework for few-shot learning across similar tasks. In TransPrompt, we employ a multi-task meta-knowledge acquisition procedure to train a meta-learner that captures cross-task transferable knowledge. Two de-biasing techniques are further designed to make it more task-agnostic and unbiased towards any tasks. After that, the meta-learner can be adapted to target tasks with high accuracy. Extensive experiments show that TransPrompt outperforms single-task and cross-task strong baselines over multiple NLP tasks and datasets. We further show that the meta-learner can effectively improve the performance on previously unseen tasks; and TransPrompt also outperforms strong fine-tuning baselines when learning with full training sets.", "keyphrases": ["prompt", "few-shot text classification", "language model", "similar task"]} +{"id": "fan-gardent-2020-multilingual", "title": "Multilingual AMR-to-Text Generation", "abstract": "Generating text from structured data is challenging because it requires bridging the gap between (i) structure and natural language (NL) and (ii) semantically underspecified input and fully specified NL output. Multilingual generation brings in an additional challenge: that of generating into languages with varied word order and morphological properties. In this work, we focus on Abstract Meaning Representations (AMRs) as structured input, where previous research has overwhelmingly focused on generating only into English. We leverage advances in cross-lingual embeddings, pretraining, and multilingual models to create multilingual AMR-to-text models that generate in twenty one different languages. Our multilingual models surpass baselines that generate into one language in eighteen languages, based on automatic metrics. We analyze the ability of our multilingual models to accurately capture morphology and word order using human evaluation, and find that native speakers judge our generations to be fluent.", "keyphrases": ["amr", "different language", "multilingual amr-to-text generation"]} +{"id": "lai-etal-2021-thank", "title": "Thank you BART! Rewarding Pre-Trained Models Improves Formality Style Transfer", "abstract": "Scarcity of parallel data causes formality style transfer models to have scarce success in preserving content. We show that fine-tuning pre-trained language (GPT-2) and sequence-to-sequence (BART) models boosts content preservation, and that this is possible even with limited amounts of parallel data. Augmenting these models with rewards that target style and content \u2013the two core aspects of the task\u2013 we achieve a new state-of-the-art.", "keyphrases": ["bart", "style transfer model", "sequence-to-sequence", "language model"]} +{"id": "limsopatham-collier-2016-normalising", "title": "Normalising Medical Concepts in Social Media Texts by Learning Semantic Representation", "abstract": "Automatically recognising medical concepts mentioned in social media messages (e.g. tweets) enables several applications for enhancing health quality of people in a community, e.g. real-time monitoring of infectious diseases in population. However, the discrepancy between the type of language used in social media and medical ontologies poses a major challenge. Existing studies deal with this challenge by employing techniques, such as lexical term matching and statistical machine translation. In this work, we handle the medical concept normalisation at the semantic level. We investigate the use of neural networks to learn the transition between layman\u2019s language used in social media messages and formal medical language used in the descriptions of medical concepts in a standard ontology. We evaluate our approaches using three different datasets, where social media texts are extracted from Twitter messages and blog posts. Our experimental results show that our proposed approaches significantly and consistently outperform existing effective baselines, which achieved state-of-the-art performance on several medical concept normalisation tasks, by up to 44%.", "keyphrases": ["medical concept", "standard ontology", "social medium text"]} +{"id": "mao-etal-2019-hierarchical", "title": "Hierarchical Text Classification with Reinforced Label Assignment", "abstract": "While existing hierarchical text classification (HTC) methods attempt to capture label hierarchies for model training, they either make local decisions regarding each label or completely ignore the hierarchy information during inference. To solve the mismatch between training and inference as well as modeling label dependencies in a more principled way, we formulate HTC as a Markov decision process and propose to learn a Label Assignment Policy via deep reinforcement learning to determine where to place an object and when to stop the assignment process. The proposed method, HiLAP, explores the hierarchy during both training and inference time in a consistent manner and makes inter-dependent decisions. As a general framework, HiLAP can incorporate different neural encoders as base models for end-to-end training. Experiments on five public datasets and four base models show that HiLAP yields an average improvement of 33.4% in Macro-F1 over flat classifiers and outperforms state-of-the-art HTC methods by a large margin. Data and code can be found at .", "keyphrases": ["htc", "reinforcement learning", "hierarchical text classification"]} +{"id": "van-der-plas-etal-2011-scaling", "title": "Scaling up Automatic Cross-Lingual Semantic Role Annotation", "abstract": "Broad-coverage semantic annotations for training statistical learners are only available for a handful of languages. Previous approaches to cross-lingual transfer of semantic annotations have addressed this problem with encouraging results on a small scale. In this paper, we scale up previous efforts by using an automatic approach to semantic annotation that does not rely on a semantic ontology for the target language. Moreover, we improve the quality of the transferred semantic annotations by using a joint syntactic-semantic parser that learns the correlations between syntax and semantics of the target language and smooths out the errors from automatic transfer. We reach a labelled F-measure for predicates and arguments of only 4% and 9% points, respectively, lower than the upper bound from manual annotations.", "keyphrases": ["cross-lingual transfer", "previous effort", "joint syntactic-semantic parser", "word alignment", "annotation projection"]} +{"id": "ma-etal-2018-rumor", "title": "Rumor Detection on Twitter with Tree-structured Recursive Neural Networks", "abstract": "Automatic rumor detection is technically very challenging. In this work, we try to learn discriminative features from tweets content by following their non-sequential propagation structure and generate more powerful representations for identifying different type of rumors. We propose two recursive neural models based on a bottom-up and a top-down tree-structured neural networks for rumor representation learning and classification, which naturally conform to the propagation layout of tweets. Results on two public Twitter datasets demonstrate that our recursive neural models 1) achieve much better performance than state-of-the-art approaches; 2) demonstrate superior capacity on detecting rumors at very early stage.", "keyphrases": ["twitter", "recursive neural network", "rumor detection", "propagation tree"]} +{"id": "li-etal-2019-logic", "title": "A Logic-Driven Framework for Consistency of Neural Models", "abstract": "While neural models show remarkable accuracy on individual predictions, their internal beliefs can be inconsistent across examples. In this paper, we formalize such inconsistency as a generalization of prediction error. We propose a learning framework for constraining models using logic rules to regularize them away from inconsistency. Our framework can leverage both labeled and unlabeled examples and is directly compatible with off-the-shelf learning schemes without model redesign. We instantiate our framework on natural language inference, where experiments show that enforcing invariants stated in logic can help make the predictions of neural models both accurate and consistent.", "keyphrases": ["logic-driven framework", "consistency", "belief", "language inference"]} +{"id": "sulubacak-etal-2016-universal", "title": "Universal Dependencies for Turkish", "abstract": "The Universal Dependencies (UD) project was conceived after the substantial recent interest in unifying annotation schemes across languages. With its own annotation principles and abstract inventory for parts of speech, morphosyntactic features and dependency relations, UD aims to facilitate multilingual parser development, cross-lingual learning, and parsing research from a language typology perspective. This paper presents the Turkish IMST-UD Treebank, the first Turkish treebank to be in a UD release. The IMST-UD Treebank was automatically converted from the IMST Treebank, which was also recently released. We describe this conversion procedure in detail, complete with mapping tables. We also present our evaluation of the parsing performances of both versions of the IMST Treebank. Our findings suggest that the UD framework is at least as viable for Turkish as the original annotation framework of the IMST Treebank.", "keyphrases": ["treebank", "imst", "universal dependencies"]} +{"id": "kumar-etal-2019-submodular", "title": "Submodular Optimization-based Diverse Paraphrasing and its Effectiveness in Data Augmentation", "abstract": "Inducing diversity in the task of paraphrasing is an important problem in NLP with applications in data augmentation and conversational agents. Previous paraphrasing approaches have mainly focused on the issue of generating semantically similar paraphrases while paying little attention towards diversity. In fact, most of the methods rely solely on top-k beam search sequences to obtain a set of paraphrases. The resulting set, however, contains many structurally similar sentences. In this work, we focus on the task of obtaining highly diverse paraphrases while not compromising on paraphrasing quality. We provide a novel formulation of the problem in terms of monotone submodular function maximization, specifically targeted towards the task of paraphrasing. Additionally, we demonstrate the effectiveness of our method for data augmentation on multiple tasks such as intent classification and paraphrase recognition. In order to drive further research, we have made the source code available.", "keyphrases": ["paraphrase", "data augmentation", "submodular function maximization"]} +{"id": "mordido-meinel-2020-mark", "title": "Mark-Evaluate: Assessing Language Generation using Population Estimation Methods", "abstract": "We propose a family of metrics to assess language generation derived from population estimation methods widely used in ecology. More specifically, we use mark-recapture and maximum-likelihood methods that have been applied over the past several decades to estimate the size of closed populations in the wild. We propose three novel metrics: ME_Petersen and ME_CAPTURE, which retrieve a single-valued assessment, and ME_Schnabel which returns a double-valued metric to assess the evaluation set in terms of quality and diversity, separately. In synthetic experiments, our family of methods is sensitive to drops in quality and diversity. Moreover, our methods show a higher correlation to human evaluation than existing metrics on several challenging tasks, namely unconditional language generation, machine translation, and text summarization.", "keyphrases": ["language generation", "population estimation method", "family", "ecology"]} +{"id": "reynolds-etal-2014-view", "title": "A VIEW of Russian: Visual Input Enhancement and Adaptive Feedback", "abstract": "We explore the challenges and opportunities which arise in developing automatic visual input enhancement activities for Russian with a focus on target selection and adaptive feedback. Russian, a language with a rich fusional morphology, has many syntactically relevant forms that are not transparent to the language learner, which makes it a good candidate for visual input enhancement (VIE). VIE essentially supports incidental focus on form by increasing the salience of language forms to support noticing by the learner. The freely available VIEW system (Meurers et al., 2010) was designed to automatically generate VIE activities from any web content. We extend VIEW to Russian and discuss connected research issues regarding target selection, ambiguity management, prompt generation, and distractor generation. We show that the same information and techniques used for target selection can often be repurposed for adaptive feedback. Authentic Text ICALL (ATICALL) systems incorporating only native-language NLP, without the NLP analysis specific to learner language that is characteristic of Intelligent Language Tutoring Systems (ILTS), thus can support some forms of adaptive feedback. ATICALL and ILTS represent a spectrum of possibilities rather than two categorically distinct enterprises.", "keyphrases": ["view", "visual input enhancement", "adaptive feedback"]} +{"id": "li-etal-2021-mtop", "title": "MTOP: A Comprehensive Multilingual Task-Oriented Semantic Parsing Benchmark", "abstract": "Scaling semantic parsing models for task-oriented dialog systems to new languages is often expensive and time-consuming due to the lack of available datasets. Available datasets suffer from several shortcomings: a) they contain few languages b) they contain small amounts of labeled examples per language c) they are based on the simple intent and slot detection paradigm for non-compositional queries. In this paper, we present a new multilingual dataset, called MTOP, comprising of 100k annotated utterances in 6 languages across 11 domains. We use this dataset and other publicly available datasets to conduct a comprehensive benchmarking study on using various state-of-the-art multilingual pre-trained models for task-oriented semantic parsing. We achieve an average improvement of +6.3 points on Slot F1 for the two existing multilingual datasets, over best results reported in their experiments. Furthermore, we demonstrate strong zero-shot performance using pre-trained models combined with automatic translation and alignment, and a proposed distant supervision method to reduce the noise in slot label projection.", "keyphrases": ["semantic parsing", "intent", "multilingual dataset", "pre-trained model", "mtop"]} +{"id": "ishigaki-etal-2017-summarizing", "title": "Summarizing Lengthy Questions", "abstract": "In this research, we propose the task of question summarization. We first analyzed question-summary pairs extracted from a Community Question Answering (CQA) site, and found that a proportion of questions cannot be summarized by extractive approaches but requires abstractive approaches. We created a dataset by regarding the question-title pairs posted on the CQA site as question-summary pairs. By using the data, we trained extractive and abstractive summarization models, and compared them based on ROUGE scores and manual evaluations. Our experimental results show an abstractive method using an encoder-decoder model with a copying mechanism achieves better scores for both ROUGE-2 F-measure and the evaluations by human judges.", "keyphrases": ["lengthy question", "site", "summarization"]} +{"id": "marie-etal-2021-scientific", "title": "Scientific Credibility of Machine Translation Research: A Meta-Evaluation of 769 Papers", "abstract": "This paper presents the first large-scale meta-evaluation of machine translation (MT). We annotated MT evaluations conducted in 769 research papers published from 2010 to 2020. Our study shows that practices for automatic MT evaluation have dramatically changed during the past decade and follow concerning trends. An increasing number of MT evaluations exclusively rely on differences between BLEU scores to draw conclusions, without performing any kind of statistical significance testing nor human evaluation, while at least 108 metrics claiming to be better than BLEU have been proposed. MT evaluations in recent papers tend to copy and compare automatic metric scores from previous work to claim the superiority of a method or an algorithm without confirming neither exactly the same training, validating, and testing data have been used nor the metric scores are comparable. Furthermore, tools for reporting standardized metric scores are still far from being widely adopted by the MT community. After showing how the accumulation of these pitfalls leads to dubious evaluation, we propose a guideline to encourage better automatic MT evaluation along with a simple meta-evaluation scoring method to assess its credibility.", "keyphrases": ["credibility", "machine translation", "meta-evaluation"]} +{"id": "huang-chiang-2005-better", "title": "Better k-best Parsing", "abstract": "We discuss the relevance of k-best parsing to recent applications in natural language processing, and develop efficient algorithms for k-best trees in the framework of hypergraph parsing. To demonstrate the efficiency, scalability and accuracy of these algorithms, we present experiments on Bikel's implementation of Collins' lexicalized PCFG model, and on Chiang's CFG-based decoder for hierarchical phrase-based translation. We show in particular how the improved output of our algorithms has the potential to improve results from parse reranking systems and other applications.", "keyphrases": ["k-best", "hypergraph", "chiang", "good parse", "weight"]} +{"id": "bosselut-etal-2018-discourse", "title": "Discourse-Aware Neural Rewards for Coherent Text Generation", "abstract": "In this paper, we investigate the use of discourse-aware rewards with reinforcement learning to guide a model to generate long, coherent text. In particular, we propose to learn neural rewards to model cross-sentence ordering as a means to approximate desired discourse structure. Empirical results demonstrate that a generator trained with the learned reward produces more coherent and less repetitive text than models trained with cross-entropy or with reinforcement learning with commonly used scores as rewards.", "keyphrases": ["reward", "text generation", "discourse structure"]} +{"id": "devitt-ahmad-2007-sentiment", "title": "Sentiment Polarity Identification in Financial News: A Cohesion-based Approach", "abstract": "Text is not unadulterated fact. A text can make you laugh or cry but can it also make you short sell your stocks in company A and buy up options in company B? Research in the domain of finance strongly suggests that it can. Studies have shown that both the informational and affective aspects of news text affect the markets in profound ways, impacting on volumes of trades, stock prices, volatility and even future firm earnings. This paper aims to explore a computable metric of positive or negative polarity in financial news text which is consistent with human judgments and can be used in a quantitative analysis of news sentiment impact on financial markets. Results from a preliminary evaluation are presented and discussed.", "keyphrases": ["polarity", "financial news", "sentiment analysis"]} +{"id": "neves-etal-2018-findings", "title": "Findings of the WMT 2018 Biomedical Translation Shared Task: Evaluation on Medline test sets", "abstract": "Machine translation enables the automatic translation of textual documents between languages and can facilitate access to information only available in a given language for non-speakers of this language, e.g. research results presented in scientific publications. In this paper, we provide an overview of the Biomedical Translation shared task in the Workshop on Machine Translation (WMT) 2018, which specifically examined the performance of machine translation systems for biomedical texts. This year, we provided test sets of scientific publications from two sources (EDP and Medline) and for six language pairs (English with each of Chinese, French, German, Portuguese, Romanian and Spanish). We describe the development of the various test sets, the submissions that we received and the evaluations that we carried out. We obtained a total of 39 runs from six teams and some of this year's BLEU scores were somewhat higher that last year's, especially for teams that made use of biomedical resources or state-of-the-art MT algorithms (e.g. Transformer). Finally, our manual evaluation scored automatic translations higher than the reference translations for German and Spanish.", "keyphrases": ["wmt", "biomedical translation", "medline", "scientific abstract"]} +{"id": "minervini-riedel-2018-adversarially", "title": "Adversarially Regularising Neural NLI Models to Integrate Logical Background Knowledge", "abstract": "Adversarial examples are inputs to machine learning models designed to cause the model to make a mistake. They are useful for understanding the shortcomings of machine learning models, interpreting their results, and for regularisation. In NLP, however, most example generation strategies produce input text by using known, pre-specified semantic transformations, requiring significant manual effort and in-depth understanding of the problem and domain. In this paper, we investigate the problem of automatically generating adversarial examples that violate a set of given First-Order Logic constraints in Natural Language Inference (NLI). We reduce the problem of identifying such adversarial examples to a combinatorial optimisation problem, by maximising a quantity measuring the degree of violation of such constraints and by using a language model for generating linguistically-plausible examples. Furthermore, we propose a method for adversarially regularising neural NLI models for incorporating background knowledge. Our results show that, while the proposed method does not always improve results on the SNLI and MultiNLI datasets, it significantly and consistently increases the predictive accuracy on adversarially-crafted datasets \u2013 up to a 79.6% relative improvement \u2013 while drastically reducing the number of background knowledge violations. Furthermore, we show that adversarial examples transfer among model architectures, and that the proposed adversarial training procedure improves the robustness of NLI models to adversarial examples.", "keyphrases": ["nli", "adversarial example", "natural language inference"]} +{"id": "mao-etal-2008-chinese", "title": "Chinese Word Segmentation and Named Entity Recognition Based on Conditional Random Fields", "abstract": "Chinese word segmentation (CWS), named entity recognition (NER) and part-ofspeech tagging is the lexical processing in Chinese language. This paper describes the work on these tasks done by France Telecom Team (Beijing) at the fourth International Chinese Language Processing Bakeoff. In particular, we employ Conditional Random Fields with different features for these tasks. In order to improve NER relatively low recall; we exploit non-local features and alleviate class imbalanced distribution on NER dataset to enhance the recall and keep its relatively high precision. Some other post-processing measures such as consistency checking and transformation-based error-driven learning are used to improve word segmentation performance. Our systems participated in most CWS and POS tagging evaluations and all the NER tracks. As a result, our NER system achieves the first ranks on MSRA open track and MSRA/CityU closed track. Our CWS system achieves the first rank on CityU open track, which means that our systems achieve state-of-the-art performance on Chinese lexical processing.", "keyphrases": ["entity recognition", "non-local feature", "ner system", "chinese word segmentation"]} +{"id": "finkel-manning-2009-hierarchical", "title": "Hierarchical Bayesian Domain Adaptation", "abstract": "Multi-task learning is the problem of maximizing the performance of a system across a number of related tasks. When applied to multiple domains for the same task, it is similar to domain adaptation, but symmetric, rather than limited to improving performance on a target domain. We present a more principled, better performing model for this problem, based on the use of a hierarchical Bayesian prior. Each domain has its own domain-specific parameter for each feature but, rather than a constant prior over these parameters, the model instead links them via a hierarchical Bayesian global prior. This prior encourages the features to have similar weights across domains, unless there is good evidence to the contrary. We show that the method of (Daume III, 2007), which was presented as a simple \"preprocessing step,\" is actually equivalent, except our representation explicitly separates hyperparameters which were tied in his work. We demonstrate that allowing different values for these hyperparameters significantly improves performance over both a strong baseline and (Daume III, 2007) within both a conditional random field sequence model for named entity recognition and a discriminatively trained dependency parser.", "keyphrases": ["domain adaptation", "entity recognition", "bayesian extension"]} +{"id": "ng-2004-learning", "title": "Learning Noun Phrase Anaphoricity to Improve Conference Resolution: Issues in Representation and Optimization", "abstract": "Knowledge of the anaphoricity of a noun phrase might be profitably exploited by a coreference system to bypass the resolution of non-anaphoric noun phrases. Perhaps surprisingly, recent attempts to incorporate automatically acquired anaphoricity information into coreference systems, however, have led to the degradation in resolution performance. This paper examines several key issues in computing and using anaphoricity information to improve learning-based coreference systems. In particular, we present a new corpus-based approach to anaphoricity determination. Experiments on three standard coreference data sets demonstrate the effectiveness of our approach.", "keyphrases": ["noun phrase", "coreference system", "anaphoricity information"]} +{"id": "abdul-mageed-etal-2020-nadi", "title": "NADI 2020: The First Nuanced Arabic Dialect Identification Shared Task", "abstract": "We present the results and findings of the First Nuanced Arabic Dialect Identification Shared Task (NADI). This Shared Task includes two subtasks: country-level dialect identification (Subtask 1) and province-level sub-dialect identification (Subtask 2). The data for the shared task covers a total of 100 provinces from 21 Arab countries and is collected from the Twitter domain. As such, NADI is the first shared task to target naturally-occurring fine-grained dialectal text at the sub-country level. A total of 61 teams from 25 countries registered to participate in the tasks, thus reflecting the interest of the community in this area. We received 47 submissions for Subtask 1 from 18 teams and 9 submissions for Subtask 2 from 9 teams.", "keyphrases": ["dialect identification", "arab country", "nadi shared task"]} +{"id": "maurya-etal-2021-zmbart", "title": "ZmBART: An Unsupervised Cross-lingual Transfer Framework for Language Generation", "abstract": "Despite the recent advancement in NLP research, cross-lingual transfer for natural language generation is relatively understudied. In this work, we transfer supervision from high resource language (HRL) to multiple low-resource languages (LRLs) for natural language generation (NLG). We consider four NLG tasks (text summarization, question generation, news headline generation, and distractor generation) and three syntactically diverse languages, i.e., English, Hindi, and Japanese. We propose an unsupervised cross-lingual language generation framework (called ZmBART) that does not use any parallel or pseudo-parallel/back-translated data. In this framework, we further pre-train mBART sequence-to-sequence denoising auto-encoder model with an auxiliary task using monolingual data of three languages. The objective function of the auxiliary task is close to the target tasks which enriches the multi-lingual latent representation of mBART and provides good initialization for target tasks. Then, this model is fine-tuned with task-specific supervised English data and directly evaluated with low-resource languages in the Zero-shot setting. To overcome catastrophic forgetting and spurious correlation issues, we applied freezing model component and data argumentation approaches respectively. This simple modeling approach gave us promising results.We experimented with few-shot training (with 1000 supervised data points) which boosted the model performance further. We performed several ablations and cross-lingual transferability analyses to demonstrate the robustness of ZmBART.", "keyphrases": ["cross-lingual transfer", "language generation", "mbart"]} +{"id": "cao-etal-2020-hypercore", "title": "HyperCore: Hyperbolic and Co-graph Representation for Automatic ICD Coding", "abstract": "The International Classification of Diseases (ICD) provides a standardized way for classifying diseases, which endows each disease with a unique code. ICD coding aims to assign proper ICD codes to a medical record. Since manual coding is very laborious and prone to errors, many methods have been proposed for the automatic ICD coding task. However, most of existing methods independently predict each code, ignoring two important characteristics: Code Hierarchy and Code Co-occurrence. In this paper, we propose a Hyperbolic and Co-graph Representation method (HyperCore) to address the above problem. Specifically, we propose a hyperbolic representation method to leverage the code hierarchy. Moreover, we propose a graph convolutional network to utilize the code co-occurrence. Experimental results on two widely used datasets demonstrate that our proposed model outperforms previous state-of-the-art methods.", "keyphrases": ["hyperbolic", "icd code", "code co-occurrence", "hypercore"]} +{"id": "pasini-navigli-2017-train", "title": "Train-O-Matic: Large-Scale Supervised Word Sense Disambiguation in Multiple Languages without Manual Training Data", "abstract": "Annotating large numbers of sentences with senses is the heaviest requirement of current Word Sense Disambiguation. We present Train-O-Matic, a language-independent method for generating millions of sense-annotated training instances for virtually all meanings of words in a language's vocabulary. The approach is fully automatic: no human intervention is required and the only type of human knowledge used is a WordNet-like resource. Train-O-Matic achieves consistently state-of-the-art performance across gold standard datasets and languages, while at the same time removing the burden of manual annotation. All the training data is available for research purposes at .", "keyphrases": ["word sense disambiguation", "multiple language", "large number", "train-o-matic"]} +{"id": "yao-etal-2013-automatic", "title": "Automatic Coupling of Answer Extraction and Information Retrieval", "abstract": "Information Retrieval (IR) and Answer Extraction are often designed as isolated or loosely connected components in Question Answering (QA), with repeated overengineering on IR, and not necessarily performance gain for QA. We propose to tightly integrate them by coupling automatically learned features for answer extraction to a shallow-structured IR model. Our method is very quick to implement, and significantly improves IR for QA (measured in Mean Average Precision and Mean Reciprocal Rank) by 10%-20% against an uncoupled retrieval baseline in both document and passage retrieval, which further leads to a downstream 20% improvement in QAF1.", "keyphrases": ["answer extraction", "information retrieval", "automatic coupling"]} +{"id": "ruppenhofer-etal-2008-finding", "title": "Finding the Sources and Targets of Subjective Expressions", "abstract": "As many popular text genres such as blogs or news contain opinions by multiple sources and about multiple targets, finding the sources and targets of subjective expressions becomes an important sub-task for automatic opinion analysis systems. We argue that while automatic semantic role labeling systems (ASRL) have an important contribution to make, they cannot solve the problem for all cases. Based on the experience of manually annotating opinions, sources, and targets in various genres, we present linguistic phenomena that require knowledge beyond that of ASRL systems. In particular, we address issues relating to the attribution of opinions to sources; sources and targets that are realized as zero-forms; and inferred opinions. We also discuss in some depth that for arguing attitudes we need to be able to recover propositions and not only argued-about entities. A recurrent theme of the discussion is that close attention to specific discourse contexts is needed to identify sources and targets correctly.", "keyphrases": ["subjective expression", "semantic role labeling", "well-trained srl model", "role technique"]} +{"id": "lui-cook-2013-classifying", "title": "Classifying English Documents by National Dialect", "abstract": "We investigate national dialect identification, the task of classifying English documents according to their country of origin. We use corpora of known national origin as a proxy for national dialect. In order to identify general (as opposed to corpus-specific) characteristics of national dialects of English, we make use of a variety of corpora of different sources, with inter-corpus variation in length, topic and register. The central intuition is that features that are predictive of national origin across different data sources are features that characterize a national dialect. We examine a number of classification approaches motivated by different areas of research, and evaluate the performance of each method across 3 national dialects: Australian, British, and Canadian English. Our results demonstrate that there are lexical and syntactic characteristics of each national dialect that are consistent across data sources.", "keyphrases": ["national dialect", "canadian english", "language identification", "text categorization", "statistical approach"]} +{"id": "lichtarge-etal-2019-corpora", "title": "Corpora Generation for Grammatical Error Correction", "abstract": "Grammatical Error Correction (GEC) has been recently modeled using the sequence-to-sequence framework. However, unlike sequence transduction problems such as machine translation, GEC suffers from the lack of plentiful parallel data. We describe two approaches for generating large parallel datasets for GEC using publicly available Wikipedia data. The first method extracts source-target pairs from Wikipedia edit histories with minimal filtration heuristics while the second method introduces noise into Wikipedia sentences via round-trip translation through bridge languages. Both strategies yield similar sized parallel corpora containing around 4B tokens. We employ an iterative decoding strategy that is tailored to the loosely supervised nature of our constructed corpora. We demonstrate that neural GEC models trained using either type of corpora give similar performance. Fine-tuning these models on the Lang-8 corpus and ensembling allows us to surpass the state of the art on both the CoNLL `14 benchmark and the JFLEG task. We present systematic analysis that compares the two approaches to data generation and highlights the effectiveness of ensembling.", "keyphrases": ["grammatical error correction", "gec", "wikipedia edit history", "noise", "data generation"]} +{"id": "chan-etal-2019-neural", "title": "Neural Keyphrase Generation via Reinforcement Learning with Adaptive Rewards", "abstract": "Generating keyphrases that summarize the main points of a document is a fundamental task in natural language processing. Although existing generative models are capable of predicting multiple keyphrases for an input document as well as determining the number of keyphrases to generate, they still suffer from the problem of generating too few keyphrases. To address this problem, we propose a reinforcement learning (RL) approach for keyphrase generation, with an adaptive reward function that encourages a model to generate both sufficient and accurate keyphrases. Furthermore, we introduce a new evaluation method that incorporates name variations of the ground-truth keyphrases using the Wikipedia knowledge base. Thus, our evaluation method can more robustly evaluate the quality of predicted keyphrases. Extensive experiments on five real-world datasets of different scales demonstrate that our RL approach consistently and significantly improves the performance of the state-of-the-art generative models with both conventional and new evaluation methods.", "keyphrases": ["keyphrase", "reinforcement learning", "generative model", "recall", "pre-trained model"]} +{"id": "gonen-etal-2020-greek", "title": "It's not Greek to mBERT: Inducing Word-Level Translations from Multilingual BERT", "abstract": "Recent works have demonstrated that multilingual BERT (mBERT) learns rich cross-lingual representations, that allow for transfer across languages. We study the word-level translation information embedded in mBERT and present two simple methods that expose remarkable translation capabilities with no fine-tuning. The results suggest that most of this information is encoded in a non-linear way, while some of it can also be recovered with purely linear tools. As part of our analysis, we test the hypothesis that mBERT learns representations which contain both a language-encoding component and an abstract, cross-lingual component, and explicitly identify an empirical language-identity subspace within mBERT representations.", "keyphrases": ["mbert", "word-level translation", "multilingual bert"]} +{"id": "zhu-etal-2020-return", "title": "The Return of Lexical Dependencies: Neural Lexicalized PCFGs", "abstract": "In this paper we demonstrate that context free grammar (CFG) based methods for grammar induction benefit from modeling lexical dependencies. This contrasts to the most popular current methods for grammar induction, which focus on discovering either constituents or dependencies. Previous approaches to marry these two disparate syntactic formalisms (e.g., lexicalized PCFGs) have been plagued by sparsity, making them unsuitable for unsupervised grammar induction. However, in this work, we present novel neural models of lexicalized PCFGs that allow us to overcome sparsity problems and effectively induce both constituents and dependencies within a single model. Experiments demonstrate that this unified framework results in stronger results on both representations than achieved when modeling either formalism alone.1", "keyphrases": ["lexical dependency", "pcfg", "neural lpcfg", "equation"]} +{"id": "goldberg-elhadad-2010-efficient", "title": "An Efficient Algorithm for Easy-First Non-Directional Dependency Parsing", "abstract": "We present a novel deterministic dependency parsing algorithm that attempts to create the easiest arcs in the dependency structure first in a non-directional manner. Traditional deterministic parsing algorithms are based on a shift-reduce framework: they traverse the sentence from left-to-right and, at each step, perform one of a possible set of actions, until a complete tree is built. A drawback of this approach is that it is extremely local: while decisions can be based on complex structures on the left, they can look only at a few words to the right. In contrast, our algorithm builds a dependency tree by iteratively selecting the best pair of neighbours to connect at each parsing step. This allows incorporation of features from already built structures both to the left and to the right of the attachment point. The parser learns both the attachment preferences and the order in which they should be performed. The result is a deterministic, best-first, O(nlogn) parser, which is significantly more accurate than best-first transition based parsers, and nears the performance of globally optimized parsing models.", "keyphrases": ["dependency parsing", "arc", "easy-first strategy"]} +{"id": "feng-lapata-2010-visual", "title": "Visual Information in Semantic Representation", "abstract": "The question of how meaning might be acquired by young children and represented by adult speakers of a language is one of the most debated topics in cognitive science. Existing semantic representation models are primarily amodal based on information provided by the linguistic input despite ample evidence indicating that the cognitive system is also sensitive to perceptual information. In this work we exploit the vast resource of images and associated documents available on the web and develop a model of multimodal meaning representation which is based on the linguistic and visual context. Experimental results show that a closer correspondence to human data can be obtained by taking the visual modality into account.", "keyphrases": ["visual information", "latent topic", "word meaning", "joint bimodal representation", "lda"]} +{"id": "hu-etal-2019-constrained", "title": "CAN: Constrained Attention Networks for Multi-Aspect Sentiment Analysis", "abstract": "Aspect level sentiment classification is a fine-grained sentiment analysis task. To detect the sentiment towards a particular aspect in a sentence, previous studies have developed various attention-based methods for generating aspect-specific sentence representations. However, the attention may inherently introduce noise and downgrade the performance. In this paper, we propose constrained attention networks (CAN), a simple yet effective solution, to regularize the attention for multi-aspect sentiment analysis, which alleviates the drawback of the attention mechanism. Specifically, we introduce orthogonal regularization on multiple aspects and sparse regularization on each single aspect. Experimental results on two public datasets demonstrate the effectiveness of our approach. We further extend our approach to multi-task settings and outperform the state-of-the-art methods.", "keyphrases": ["attention network", "multi-aspect sentiment analysis", "multiple aspect"]} +{"id": "katti-etal-2018-chargrid", "title": "Chargrid: Towards Understanding 2D Documents", "abstract": "We introduce a novel type of text representation that preserves the 2D layout of a document. This is achieved by encoding each document page as a two-dimensional grid of characters. Based on this representation, we present a generic document understanding pipeline for structured documents. This pipeline makes use of a fully convolutional encoder-decoder network that predicts a segmentation mask and bounding boxes. We demonstrate its capabilities on an information extraction task from invoices and show that it significantly outperforms approaches based on sequential text or document images.", "keyphrases": ["text representation", "document page", "chargrid"]} +{"id": "lane-bird-2020-interactive", "title": "Interactive Word Completion for Morphologically Complex Languages", "abstract": "Text input technologies for low-resource languages support literacy, content authoring, and language learning. However, tasks such as word completion pose a challenge for morphologically complex languages thanks to the combinatorial explosion of possible words. We have developed a method for morphologically-aware text input in Kunwinjku, a polysynthetic language of northern Australia. We modify an existing finite state recognizer to map input morph prefixes to morph completions, respecting the morphosyntax and morphophonology of the language. We demonstrate the portability of the method by applying it to Turkish. We show that the space of proximal morph completions is many orders of magnitude smaller than the space of full word completions for Kunwinjku. We provide a visualization of the morph completion space to enable the text completion parameters to be fine-tuned. Finally, we report on a web services deployment, along with a web interface which helps users enter morphologically complex words and which retrieves corresponding entries from the lexicon.", "keyphrases": ["word completion", "polysynthetic language", "australia", "turkish"]} +{"id": "kurniawan-etal-2021-ppt", "title": "PPT: Parsimonious Parser Transfer for Unsupervised Cross-Lingual Adaptation", "abstract": "Cross-lingual transfer is a leading technique for parsing low-resource languages in the absence of explicit supervision. Simple `direct transfer' of a learned model based on a multilingual input encoding has provided a strong benchmark. This paper presents a method for unsupervised cross-lingual transfer that improves over direct transfer systems by using their output as implicit supervision as part of self-training on unlabelled text in the target language. The method assumes minimal resources and provides maximal flexibility by (a) accepting any pre-trained arc-factored dependency parser; (b) assuming no access to source language data; (c) supporting both projective and non-projective parsing; and (d) supporting multi-source transfer. With English as the source language, we show significant improvements over state-of-the-art transfer models on both distant and nearby languages, despite our conceptually simpler approach. We provide analyses of the choice of source languages for multi-source transfer, and the advantage of non-projective parsing. Our code is available online.", "keyphrases": ["cross-lingual transfer", "dependency parser", "ppt"]} +{"id": "kiss-strunk-2006-unsupervised", "title": "Unsupervised Multilingual Sentence Boundary Detection", "abstract": "In this article, we present a language-independent, unsupervised approach to sentence boundary detection. It is based on the assumption that a large number of ambiguities in the determination of sentence boundaries can be eliminated once abbreviations have been identified. Instead of relying on orthographic clues, the proposed system is able to detect abbreviations with high accuracy using three criteria that only require information about the candidate type itself and are independent of context: Abbreviations can be defined as a very tight collocation consisting of a truncated word and a final period, abbreviations are usually short, and abbreviations sometimes contain internal periods. We also show the potential of collocational evidence for two other important subtasks of sentence boundary disambiguation, namely, the detection of initials and ordinal numbers. The proposed system has been tested extensively on eleven different languages and on different text genres. It achieves good results without any further amendments or language-specific resources. We evaluate its performance against three different baselines and compare it to other systems for sentence boundary detection proposed in the literature.", "keyphrases": ["sentence boundary", "boundary detection", "abbreviation", "ordinal number"]} +{"id": "bugliarello-okazaki-2020-enhancing", "title": "Enhancing Machine Translation with Dependency-Aware Self-Attention", "abstract": "Most neural machine translation models only rely on pairs of parallel sentences, assuming syntactic information is automatically learned by an attention mechanism. In this work, we investigate different approaches to incorporate syntactic knowledge in the Transformer model and also propose a novel, parameter-free, dependency-aware self-attention mechanism that improves its translation quality, especially for long sentences and in low-resource scenarios. We show the efficacy of each approach on WMT English-German and English-Turkish, and WAT English-Japanese translation tasks.", "keyphrases": ["self-attention", "translation task", "dependency structure"]} +{"id": "chiang-etal-2008-online", "title": "Online Large-Margin Training of Syntactic and Structural Translation Features", "abstract": "Minimum-error-rate training (MERT) is a bottleneck for current development in statistical machine translation because it is limited in the number of weights it can reliably optimize. Building on the work of Watanabe et al., we explore the use of the MIRA algorithm of Crammer et al. as an alternative to MERT. We first show that by parallel processing and exploiting more of the parse forest, we can obtain results using MIRA that match or surpass MERT in terms of both translation quality and computational cost. We then test the method on two classes of features that address deficiencies in the Hiero hierarchical phrase-based model: first, we simultaneously train a large number of Marton and Resnik's soft syntactic constraints, and, second, we introduce a novel structural distortion model. In both cases we obtain significant improvements in translation performance. Optimizing them in combination, for a total of 56 feature weights, we improve performance by 2.6 Bleu on a subset of the NIST 2006 Arabic-English evaluation data.", "keyphrases": ["mira", "alternative", "hierarchical phrase-based model", "large number", "margin"]} +{"id": "ding-etal-2017-visualizing", "title": "Visualizing and Understanding Neural Machine Translation", "abstract": "While neural machine translation (NMT) has made remarkable progress in recent years, it is hard to interpret its internal workings due to the continuous representations and non-linearity of neural networks. In this work, we propose to use layer-wise relevance propagation (LRP) to compute the contribution of each contextual word to arbitrary hidden states in the attention-based encoder-decoder framework. We show that visualization with LRP helps to interpret the internal workings of NMT and analyze translation errors.", "keyphrases": ["neural machine translation", "layer-wise relevance propagation", "contextual word", "attention-based encoder-decoder framework", "translation error"]} +{"id": "subramanian-lee-2020-hierarchical", "title": "Hierarchical Evidence Set Modeling for Automated Fact Extraction and Verification", "abstract": "Automated fact extraction and verification is a challenging task that involves finding relevant evidence sentences from a reliable corpus to verify the truthfulness of a claim. Existing models either (i) concatenate all the evidence sentences, leading to the inclusion of redundant and noisy information; or (ii) process each claim-evidence sentence pair separately and aggregate all of them later, missing the early combination of related sentences for more accurate claim verification. Unlike the prior works, in this paper, we propose Hierarchical Evidence Set Modeling (HESM), a framework to extract evidence sets (each of which may contain multiple evidence sentences), and verify a claim to be supported, refuted or not enough info, by encoding and attending the claim and evidence sets at different levels of hierarchy. Our experimental results show that HESM outperforms 7 state-of-the-art methods for fact extraction and claim verification. Our source code is available at .", "keyphrases": ["evidence set modeling", "automated fact extraction", "verification"]} +{"id": "eisenstein-etal-2011-discovering", "title": "Discovering Sociolinguistic Associations with Structured Sparsity", "abstract": "We present a method to discover robust and interpretable sociolinguistic associations from raw geotagged text data. Using aggregate demographic statistics about the authors' geographic communities, we solve a multi-output regression problem between demographics and lexical frequencies. By imposing a composite e1,\u221e regularizer, we obtain structured sparsity, driving entire rows of coefficients to zero. We perform two regression studies. First, we use term frequencies to predict demographic attributes; our method identifies a compact set of words that are strongly associated with author demographics. Next, we conjoin demographic attributes into features, which we use to predict term frequencies. The composite regularizer identifies a small number of features, which correspond to communities of authors united by shared demographic and linguistic properties.", "keyphrases": ["structured sparsity", "text data", "demographic", "sociolinguistic pattern"]} +{"id": "wan-etal-2020-self", "title": "Self-Paced Learning for Neural Machine Translation", "abstract": "Recent studies have proven that the training of neural machine translation (NMT) can be facilitated by mimicking the learning process of humans. Nevertheless, achievements of such kind of curriculum learning rely on the quality of artificial schedule drawn up with the handcrafted features, e.g. sentence length or word rarity. We ameliorate this procedure with a more flexible manner by proposing self-paced learning, where NMT model is allowed to 1) automatically quantify the learning confidence over training examples; and 2) flexibly govern its learning via regulating the loss in each iteration step. Experimental results over multiple translation tasks demonstrate that the proposed model yields better performance than strong baselines and those models trained with human-designed curricula on both translation quality and convergence speed.", "keyphrases": ["neural machine translation", "training example", "translation quality", "self-paced learning", "well performance"]} +{"id": "angeli-etal-2015-leveraging", "title": "Leveraging Linguistic Structure For Open Domain Information Extraction", "abstract": "Relation triples produced by open domain information extraction (open IE) systems are useful for question answering, inference, and other IE tasks. Traditionally these are extracted using a large set of patterns; however, this approach is brittle on out-of-domain text and long-range dependencies, and gives no insight into the substructure of the arguments. We replace this large pattern set with a few patterns for canonically structured sentences, and shift the focus to a classifier which learns to extract self-contained clauses from longer sentences. We then run natural logic inference over these short clauses to determine the maximally specific arguments for each candidate triple. We show that our approach outperforms a state-of-the-art open IE system on the end-to-end TAC-KBP 2013 Slot Filling task.", "keyphrases": ["domain information extraction", "relation triple", "openie system", "knowledge base", "argument span"]} +{"id": "zhang-clark-2011-syntactic", "title": "Syntactic Processing Using the Generalized Perceptron and Beam Search", "abstract": "We study a range of syntactic processing tasks using a general statistical framework that consists of a global linear model, trained by the generalized perceptron together with a generic beam-search decoder. We apply the framework to word segmentation, joint segmentation and POS-tagging, dependency parsing, and phrase-structure parsing. Both components of the framework are conceptually and computationally very simple. The beam-search decoder only requires the syntactic processing task to be broken into a sequence of decisions, such that, at each stage in the process, the decoder is able to consider the top-n candidates and generate all possibilities for the next stage. Once the decoder has been defined, it is applied to the training data, using trivial updates according to the generalized perceptron to induce a model. This simple framework performs surprisingly well, giving accuracy results competitive with the state-of-the-art on all the tasks we consider. The computational simplicity of the decoder and training algorithm leads to significantly higher test speeds and lower training times than their main alternatives, including log-linear and large-margin training algorithms and dynamic-programming for decoding. Moreover, the framework offers the freedom to define arbitrary features which can make alternative training and decoding algorithms prohibitively slow. We discuss how the general framework is applied to each of the problems studied in this article, making comparisons with alternative learning and decoding algorithms. We also show how the comparability of candidates considered by the beam is an important factor in the performance. We argue that the conceptual and computational simplicity of the framework, together with its language-independent nature, make it a competitive choice for a range of syntactic processing tasks and one that should be considered for comparison by developers of alternative approaches.", "keyphrases": ["generalized perceptron", "beam search", "segmentation", "shift-reduce parser"]} +{"id": "kiritchenko-mohammad-2016-capturing", "title": "Capturing Reliable Fine-Grained Sentiment Associations by Crowdsourcing and Best\u2013Worst Scaling", "abstract": "Access to word-sentiment associations is useful for many applications, including sentiment analysis, stance detection, and linguistic analysis. However, manually assigning fine-grained sentiment association scores to words has many challenges with respect to keeping annotations consistent. We apply the annotation technique of Best-Worst Scaling to obtain real-valued sentiment association scores for words and phrases in three different domains: general English, English Twitter, and Arabic Twitter. We show that on all three domains the ranking of words by sentiment remains remarkably consistent even when the annotation process is repeated with a different set of annotators. We also, for the first time, determine the minimum difference in sentiment association that is perceptible to native speakers of a language.", "keyphrases": ["sentiment association", "crowdsourcing", "item"]} +{"id": "barriere-balahur-2020-improving", "title": "Improving Sentiment Analysis over non-English Tweets using Multilingual Transformers and Automatic Translation for Data-Augmentation", "abstract": "Tweets are specific text data when compared to general text. Although sentiment analysis over tweets has become very popular in the last decade for English, it is still difficult to find huge annotated corpora for non-English languages. The recent rise of the transformer models in Natural Language Processing allows to achieve unparalleled performances in many tasks, but these models need a consequent quantity of text to adapt to the tweet domain. We propose the use of a multilingual transformer model, that we pre-train over English tweets on which we apply data-augmentation using automatic translation to adapt the model to non-English languages. Our experiments in French, Spanish, German and Italian suggest that the proposed technique is an efficient way to improve the results of the transformers over small corpora of tweets in a non-English language.", "keyphrases": ["sentiment analysis", "automatic translation", "multilingual transformer model", "english tweet"]} +{"id": "roder-etal-2014-n3", "title": "N - A Collection of Datasets for Named Entity Recognition and Disambiguation in the NLP Interchange Format", "abstract": "Extracting Linked Data following the Semantic Web principle from unstructured sources has become a key challenge for scientific research. Named Entity Recognition and Disambiguation are two basic operations in this extraction process. One step towards the realization of the Semantic Web vision and the development of highly accurate tools is the availability of data for validating the quality of processes for Named Entity Recognition and Disambiguation as well as for algorithm tuning. This article presents three novel, manually curated and annotated corpora (N3). All of them are based on a free license and stored in the NLP Interchange Format to leverage the Linked Data character of our datasets.", "keyphrases": ["named entity recognition", "disambiguation", "nlp interchange format"]} +{"id": "foster-etal-2003-statistical", "title": "Statistical machine translation: rapid development with limited resources", "abstract": "We describe an experiment in rapid development of a statistical machine translation (SMT) system from scratch, using limited resources: under this heading we include not only training data, but also computing power, linguistic knowledge, programming effort, and absolute time.", "keyphrases": ["rapid development", "limited resource", "statistical machine translation"]} +{"id": "perret-etal-2016-integer", "title": "Integer Linear Programming for Discourse Parsing", "abstract": "In this paper we present the first, to the best of our knowledge, discourse parser that is able to predict non-tree DAG structures. We use Integer Linear Programming (ILP) to encode both the objective function and the constraints as global decoding over local scores. Our underlying data come from multi-party chat dialogues, which require the prediction of DAGs. We use the dependency parsing paradigm, as has been done in the past (Muller et al., 2012; Li et al., 2014; Afantenos et al., 2015), but we use the underlying formal framework of SDRT and exploit SDRT's notions of left and right distributive relations. We achieve an F-measure of 0.531 for fully labeled structures which beats the previous state of the art.", "keyphrases": ["discourse", "integer linear programming", "stac corpus", "approximation", "sdrt graph"]} +{"id": "mazare-etal-2018-training", "title": "Training Millions of Personalized Dialogue Agents", "abstract": "Current dialogue systems fail at being engaging for users, especially when trained end-to-end without relying on proactive reengaging scripted strategies. Zhang et al. (2018) showed that the engagement level of end-to-end dialogue models increases when conditioning them on text personas providing some personalized back-story to the model. However, the dataset used in Zhang et al. (2018) is synthetic and only contains around 1k different personas. In this paper we introduce a new dataset providing 5 million personas and 700 million persona-based dialogues. Our experiments show that, at this scale, training using personas still improves the performance of end-to-end systems. In addition, we show that other tasks benefit from the wide coverage of our dataset by fine-tuning our model on the data from Zhang et al. (2018) and achieving state-of-the-art results.", "keyphrases": ["personalization", "agent", "conversation"]} +{"id": "poon-2013-grounded", "title": "Grounded Unsupervised Semantic Parsing", "abstract": "We present the first unsupervised approach for semantic parsing that rivals the accuracy of supervised approaches in translating natural-language questions to database queries. Our GUSP system produces a semantic parse by annotating the dependency-tree nodes and edges with latent states, and learns a probabilistic grammar using EM. To compensate for the lack of example annotations or question-answer pairs, GUSP adopts a novel grounded-learning approach to leverage database for indirect supervision. On the challenging ATIS dataset, GUSP attained an accuracy of 84%, effectively tying with the best published results by supervised approaches.", "keyphrases": ["unsupervised semantic parsing", "query", "node"]} +{"id": "birke-sarkar-2006-clustering", "title": "A Clustering Approach for Nearly Unsupervised Recognition of Nonliteral Language", "abstract": "In this paper we present TroFi (Trope Finder), a system for automatically classifying literal and nonliteral usages of verbs through nearly unsupervised word-sense disambiguation and clustering techniques. TroFi uses sentential context instead of selectional constraint violations or paths in semantic hierarchies. It also uses literal and nonliteral seed sets acquired and cleaned without human supervision in order to bootstrap learning. We adapt a word-sense disambiguation algorithm to our task and augment it with multiple seed set learners, a voting schema, and additional features like SuperTags and extrasentential context. Detailed experiments on hand-annotated data show that our enhanced algorithm outperforms the baseline by 24.4%. Using the TroFi algorithm, we also build the TroFi Example Base, an extensible resource of annotated literal/nonliteral examples which is freely available to the NLP research community.", "keyphrases": ["seed set", "sentence clustering approach", "non-literal classification", "language recognition", "new input sentence"]} +{"id": "fares-etal-2017-word", "title": "Word vectors, reuse, and replicability: Towards a community repository of large-text resources", "abstract": "This paper describes an emerging shared repository of large-text resources for creating word vectors, including pre-processed corpora and pre-trained vectors for a range of frameworks and configurations. This will facilitate reuse, rapid experimentation, and replicability of results.", "keyphrases": ["reuse", "community repository", "large-text resource"]} +{"id": "styler-iv-etal-2014-temporal", "title": "Temporal Annotation in the Clinical Domain", "abstract": "This article discusses the requirements of a formal specification for the annotation of temporal information in clinical narratives. We discuss the implementation and extension of ISO-TimeML for annotating a corpus of clinical notes, known as the THYME corpus. To reflect the information task and the heavily inference-based reasoning demands in the domain, a new annotation guideline has been developed, \u201cthe THYME Guidelines to ISO-TimeML (THYME-TimeML)\u201d. To clarify what relations merit annotation, we distinguish between linguistically-derived and inferentially-derived temporal orderings in the text. We also apply a top performing TempEval 2013 system against this new resource to measure the difficulty of adapting systems to the clinical domain. The corpus is available to the community and has been proposed for use in a SemEval 2015 task.", "keyphrases": ["clinical domain", "narrative", "thyme-timeml", "temporal annotation", "relation type"]} +{"id": "park-cardie-2018-corpus", "title": "A Corpus of eRulemaking User Comments for Measuring Evaluability of Arguments", "abstract": "eRulemaking is a means for government agencies to directly reach citizens to solicit their opinions and experiences regarding newly proposed rules. The effort, however, is partly hampered by citizens\u2019 comments that lack reasoning and evidence, which are largely ignored since government agencies are unable to evaluate the validity and strength. We present Cornell eRulemaking Corpus \u2013 CDCP , an argument mining corpus annotated with argumentative structure information capturing the evaluability of arguments. The corpus consists of 731 user comments on Consumer Debt Collection Practices (CDCP) rule by the Consumer Financial Protection Bureau (CFPB); the resulting dataset contains 4931 elementary unit and 1221 support relation annotations. It is a resource for building argument mining systems that can not only extract arguments from unstructured text, but also identify what additional information is necessary for readers to understand and evaluate a given argument. Immediate applications include providing real-time feedback to commenters, specifying which types of support for which propositions can be added to construct better-formed arguments.", "keyphrases": ["comment", "evaluability", "cornell erulemaking corpus", "argument model"]} +{"id": "kitaev-klein-2018-constituency", "title": "Constituency Parsing with a Self-Attentive Encoder", "abstract": "We demonstrate that replacing an LSTM encoder with a self-attentive architecture can lead to improvements to a state-of-the-art discriminative constituency parser. The use of attention makes explicit the manner in which information is propagated between different locations in the sentence, which we use to both analyze our model and propose potential improvements. For example, we find that separating positional and content information in the encoder can lead to improved parsing accuracy. Additionally, we evaluate different approaches for lexical representation. Our parser achieves new state-of-the-art results for single models trained on the Penn Treebank: 93.55 F1 without the use of any external data, and 95.13 F1 when using pre-trained word representations. Our parser also outperforms the previous best-published accuracy figures on 8 of the 9 languages in the SPMRL dataset.", "keyphrases": ["self-attentive encoder", "constituency parser", "chart-based parser"]} +{"id": "tsao-wible-2009-method", "title": "A Method for Unsupervised Broad-Coverage Lexical Error Detection and Correction", "abstract": "We describe and motivate an unsupervised lexical error detection and correction algorithm and its application in a tool called Lexbar appearing as a query box on the Web browser toolbar or as a search engine interface. Lexbar accepts as user input candidate strings of English to be checked for acceptability and, where errors are detected, offers corrections. We introduce the notion of hybrid n-gram and extract these from BNC as the knowledgebase against which to compare user input. An extended notion of edit distance is used to identify most likely candidates for correcting detected errors. Results are illustrated with four types of errors.", "keyphrases": ["lexical error detection", "correction", "n-gram"]} +{"id": "panyam-etal-2016-asm", "title": "ASM Kernel: Graph Kernel using Approximate Subgraph Matching for Relation Extraction", "abstract": "Kernel methods have been widely studied in several natural language processing tasks such as relation extraction and sentence classification. In this work, we present a new graph kernel that is derived from a distance measure described in prior work as Approximate Subgraph Matching (ASM). The classical ASM distance, shown to be effective for event extraction, is not a valid kernel and was primarily designed to work with rule based systems. We modify this distance suitably to render it a valid kernel (ASM kernel) and enable its use in powerful learning algorithms such as Support Vector Machine (SVM). We compare the ASM kernel with SVMs to the classical ASM with a rule based approach, for two relation extraction tasks and show an improved performance with the kernel based approach. Compared to other kernels such as the Subset tree kernel and the Partial tree kernel, ASM kernel outperforms in relation extraction tasks and is of comparable performance in a general sentence classification task. We describe the advantages of the ASM kernel such as its flexibility and ease of modification, which offers further directions for improvement.", "keyphrases": ["approximate subgraph matching", "relation extraction", "asm kernel"]} +{"id": "lin-bilmes-2010-multi", "title": "Multi-document Summarization via Budgeted Maximization of Submodular Functions", "abstract": "We treat the text summarization problem as maximizing a submodular function under a budget constraint. We show, both theoretically and empirically, a modified greedy algorithm can efficiently solve the budgeted submodular maximization problem near-optimally, and we derive new approximation bounds in doing so. Experiments on DUC'04 task show that our approach is superior to the best-performing method from the DUC'04 evaluation on ROUGE-1 scores.", "keyphrases": ["submodular function", "greedy algorithm", "multi-document summarization", "coverage"]} +{"id": "ghazvininejad-etal-2016-generating", "title": "Generating Topical Poetry", "abstract": "We describe Hafez, a program that generates any number of distinct poems on a user-supplied topic. Poems obey rhythmic and rhyme constraints. We describe the poetry-generation algorithm, give experimental data concerning its parameters, and show its generality with respect to language and poetic form.", "keyphrases": ["poem", "user-supplied topic", "rhyme", "language model"]} +{"id": "uresova-etal-2018-synonymy", "title": "Synonymy in Bilingual Context: The CzEngClass Lexicon", "abstract": "This paper describes CzEngClass, a bilingual lexical resource being built to investigate verbal synonymy in bilingual context and to relate semantic roles common to one synonym class to verb arguments (verb valency). In addition, the resource is linked to existing resources with the same of a similar aim: English and Czech WordNet, FrameNet, PropBank, VerbNet (SemLink), and valency lexicons for Czech and English (PDT-Vallex, Vallex, and EngVallex). There are several goals of this work and resource: (a) to provide gold standard data for automatic experiments in the future (such as automatic discovery of synonym classes, word sense disambiguation, assignment of classes to occurrences of verbs in text, coreferential linking of verb and event arguments in text, etc.), (b) to build a core (bilingual) lexicon linked to existing resources, for comparative studies and possibly for training automatic tools, and (c) to enrich the annotation of a parallel treebank, the Prague Czech English Dependency Treebank, which so far contained valency annotation but has not linked synonymous senses of verbs together. The method used for extracting the synonym classes is a semi-automatic process with a substantial amount of manual work during filtering, role assignment to classes and individual Class members' arguments, and linking to the external lexical resources. We present the first version with 200 classes (about 1800 verbs) and evaluate interannotator agreement using several metrics.", "keyphrases": ["bilingual context", "czengclass lexicon", "synonymy"]} +{"id": "zhila-gelbukh-2014-open", "title": "Open Information Extraction for Spanish Language based on Syntactic Constraints", "abstract": "Open Information Extraction (Open IE) serves for the analysis of vast amounts of texts by extraction of assertions, or relations, in the form of tupleshargument 1; relation; argument 2i. Various approaches to Open IE have been designed to perform in a fast, unsupervised manner. All of them require language specific information for their implementation. In this work, we introduce an approach to Open IE based on syntactic constraints over POS tag sequences targeted at Spanish language. We describe the rules specific for Spanish language constructions and their implementation in EXTRHECH, an Open IE system for Spanish. We also discuss language-specific issues of implementation. We compare EXTRHECH\u2019s performance with that of REVERB, a similar Open IE system for English, on a parallel dataset and show that these systems perform at a very similar level. We also compare EXTRHECH\u2019s performance on a dataset of grammatically correct sentences against its performance on a dataset of random texts extracted from the Web, drastically different in their quality from the first dataset. The latter experiment shows robustness of EXTRHECH on texts from the Web.", "keyphrases": ["spanish language", "syntactic constraint", "open information extraction"]} +{"id": "chen-etal-2008-learning", "title": "Learning Reliable Information for Dependency Parsing Adaptation", "abstract": "In this paper, we focus on the adaptation problem that has a large labeled data in the source domain and a large but unlabeled data in the target domain. Our aim is to learn reliable information from unlabeled target domain data for dependency parsing adaptation. Current state-of-the-art statistical parsers perform much better for shorter dependencies than for longer ones. Thus we propose an adaptation approach by learning reliable information on shorter dependencies in an unlabeled target data to help parse longer distance words. The unlabeled data is parsed by a dependency parser trained on labeled source domain data. The experimental results indicate that our proposed approach outperforms the baseline system, and is better than current state-of-the-art adaptation techniques.", "keyphrases": ["reliable information", "dependency parsing adaptation", "target domain data", "chinese", "unlabeled attachment score"]} +{"id": "clarke-lapata-2010-discourse", "title": "Discourse Constraints for Document Compression", "abstract": "Sentence compression holds promise for many applications ranging from summarization to subtitle generation. The task is typically performed on isolated sentences without taking the surrounding context into account, even though most applications would operate over entire documents. In this article we present a discourse-informed model which is capable of producing document compressions that are coherent and informative. Our model is inspired by theories of local coherence and formulated within the framework of integer linear programming. Experimental results show significant improvements over a state-of-the-art discourse agnostic approach.", "keyphrases": ["document compression", "summarization", "human evaluation"]} +{"id": "mohammad-2012-emotional", "title": "#Emotional Tweets", "abstract": "Detecting emotions in microblogs and social media posts has applications for industry, health, and security. However, there exists no microblog corpus with instances labeled for emotions for developing supervised systems. In this paper, we describe how we created such a corpus from Twitter posts using emotion-word hashtags. We conduct experiments to show that the self-labeled hashtag annotations are consistent and match with the annotations of trained judges. We also show how the Twitter emotion corpus can be used to improve emotion classification accuracy in a different domain. Finally, we extract a word-emotion association lexicon from this Twitter corpus, and show that it leads to significantly better results than the manually crafted WordNet Affect lexicon in an emotion classification task.", "keyphrases": ["emotion", "hashtag", "judge"]} +{"id": "song-etal-2018-leveraging", "title": "Leveraging Context Information for Natural Question Generation", "abstract": "The task of natural question generation is to generate a corresponding question given the input passage (fact) and answer. It is useful for enlarging the training set of QA systems. Previous work has adopted sequence-to-sequence models that take a passage with an additional bit to indicate answer position as input. However, they do not explicitly model the information between answer and other context within the passage. We propose a model that matches the answer with the passage before generating the question. Experiments show that our model outperforms the existing state of the art using rich features.", "keyphrases": ["natural question generation", "well quality answer", "word position"]} +{"id": "vilar-etal-2006-aer", "title": "AER: do we need to \u201cimprove\u201d our alignments?", "abstract": "Currently most statistical machine translation systems make use of alignments as a first step in the process of training the actual translation models. Several researchers have investigated how to improve the alignment quality, with the (intuitive) assumption that better alignments increase the translation quality. In this paper we will investigate this assumption and show that this is not always the case.", "keyphrases": ["translation quality", "aer", "well alignment"]} +{"id": "arabsorkhi-shamsfard-2006-unsupervised", "title": "Unsupervised Discovery of Persian Morphemes", "abstract": "This paper reports the present results of a research on unsupervised Persian morpheme discovery. In this paper we present a method for discovering the morphemes of Persian language through automatic analysis of corpora. We utilized a Minimum Description Length (MDL) based algorithm with some improvements and applied it to Persian corpus. Our improvements include enhancing the cost function using some heuristics, preventing the split of high frequency chunks, exploiting penalty for first and last letters and distinguishing pre-parts and post-parts. Our improved approach has raised the precision, recall and f-measure of discovery by respectively %32, %17 and %23.", "keyphrases": ["morpheme", "persian language", "automatic analysis"]} +{"id": "kumar-byrne-2005-local", "title": "Local Phrase Reordering Models for Statistical Machine Translation", "abstract": "We describe stochastic models of local phrase movement that can be incorporated into a Statistical Machine Translation (SMT) system. These models provide properly formulated, non-deficient, probability distributions over reordered phrase sequences. They are implemented by Weighted Finite State Transducers. We describe EM-style parameter re-estimation procedures based on phrase alignment under the complete translation model incorporating reordering. Our experiments show that the reordering model yields substantial improvements in translation performance on Arabic-to-English and Chinese-to-English MT tasks. We also show that the procedure scales as the bitext size is increased.", "keyphrases": ["statistical machine translation", "state transducer", "orientation"]} +{"id": "bethard-parker-2016-semantically", "title": "A Semantically Compositional Annotation Scheme for Time Normalization", "abstract": "We present a new annotation scheme for normalizing time expressions, such as \u201cthree days ago\u201d, to computer-readable forms, such as 2016-03-07. The annotation scheme addresses several weaknesses of the existing TimeML standard, allowing the representation of time expressions that align to more than one calendar unit (e.g., \u201cthe past three summers\u201d), that are defined relative to events (e.g., \u201cthree weeks postoperative\u201d), and that are unions or intersections of smaller time expressions (e.g., \u201cTuesdays and Thursdays\u201d). It achieves this by modeling time expression interpretation as the semantic composition of temporal operators like UNION, NEXT, and AFTER. We have applied the annotation scheme to 34 documents so far, producing 1104 annotations, and achieving inter-annotator agreement of 0.821.", "keyphrases": ["semantically compositional annotation", "time normalization", "scate"]} +{"id": "zhang-etal-2023-survey", "title": "A Survey of Multi-task Learning in Natural Language Processing: Regarding Task Relatedness and Training Methods", "abstract": "Multi-task learning (MTL) has become increasingly popular in natural language processing (NLP) because it improves the performance of related tasks by exploiting their commonalities and differences. Nevertheless, it is still not understood very well how multi-task learning can be implemented based on the relatedness of training tasks. In this survey, we review recent advances of multi-task learning methods in NLP, with the aim of summarizing them into two general multi-task training methods based on their task relatedness: (i) joint training and (ii) multi-step training. We present examples in various NLP downstream applications, summarize the task relationships and discuss future directions of this promising topic.", "keyphrases": ["survey", "multi-task learning", "task relatedness"]} +{"id": "potash-etal-2017-heres", "title": "Here's My Point: Joint Pointer Architecture for Argument Mining", "abstract": "In order to determine argument structure in text, one must understand how individual components of the overall argument are linked. This work presents the first neural network-based approach to link extraction in argument mining. Specifically, we propose a novel architecture that applies Pointer Network sequence-to-sequence attention modeling to structural prediction in discourse parsing tasks. We then develop a joint model that extends this architecture to simultaneously address the link extraction task and the classification of argument components. The proposed joint model achieves state-of-the-art results on two separate evaluation corpora, showing far superior performance than the previously proposed corpus-specific and heavily feature-engineered models. Furthermore, our results demonstrate that jointly optimizing for both tasks is crucial for high performance.", "keyphrases": ["joint pointer architecture", "argument mining", "link", "joint model", "subtask"]} +{"id": "yin-etal-2016-simple", "title": "Simple Question Answering by Attentive Convolutional Neural Network", "abstract": "This work focuses on answering single-relation factoid questions over Freebase. Each question can acquire the answer from a single fact of form (subject, predicate, object) in Freebase. This task, simple question answering (SimpleQA), can be addressed via a two-step pipeline: entity linking and fact selection. In fact selection, we match the subject entity in a fact candidate with the entity mention in the question by a character-level convolutional neural network (char-CNN), and match the predicate in that fact with the question by a word-level CNN (word-CNN). This work makes two main contributions. (i) A simple and effective entity linker over Freebase is proposed. Our entity linker outperforms the state-of-the-art entity linker over SimpleQA task. (ii) A novel attentive maxpooling is stacked over word-CNN, so that the predicate representation can be matched with the predicate-focused question representation more effectively. Experiments show that our system sets new state-of-the-art in this task.", "keyphrases": ["convolutional neural network", "word-level cnn", "simple question"]} +{"id": "li-2010-query", "title": "Query Understanding in Web Search - by Large Scale Log Data Mining and Statistical Learning", "abstract": "Query understanding is an important component of web search, like document understanding, query document matching, ranking, and user understanding. The goal of query understanding is to predict the user\u2019s search intent from the given query. Needless to say, search log mining and statistical learning are fundamental technologies to address the task of query understanding. In this talk, I will first introduce a large-scale search log mining platform which we have developed at MSRA. I will then explain our approach to query understanding, as well as document understanding, query document matching, and user understanding. After that, I will describe in details about our methods for query understanding based on statistical learning. They include query refinement using CRF, named entity recognition in query using topic model, context aware query topic prediction using HMM. This is joint work with Gu Xu, Daxin Jiang and other collaborators.", "keyphrases": ["web search", "statistical learning", "query understanding"]} +{"id": "arora-etal-2018-linear", "title": "Linear Algebraic Structure of Word Senses, with Applications to Polysemy", "abstract": "Word embeddings are ubiquitous in NLP and information retrieval, but it is unclear what they represent when the word is polysemous. Here it is shown that multiple word senses reside in linear superposition within the word embedding and simple sparse coding can recover vectors that approximately capture the senses. The success of our approach, which applies to several embedding methods, is mathematically explained using a variant of the random walk on discourses model (Arora et al., 2016). A novel aspect of our technique is that each extracted word sense is accompanied by one of about 2000 \u201cdiscourse atoms\u201d that gives a succinct description of which other words co-occur with that word sense. Discourse atoms can be of independent interest, and make the method potentially more useful. Empirical tests are used to verify and support the theory.", "keyphrases": ["polysemy", "linear algebraic structure", "different sense"]} +{"id": "shin-etal-2021-constrained", "title": "Constrained Language Models Yield Few-Shot Semantic Parsers", "abstract": "We explore the use of large pretrained language models as few-shot semantic parsers. The goal in semantic parsing is to generate a structured meaning representation given a natural language input. However, language models are trained to generate natural language. To bridge the gap, we use language models to paraphrase inputs into a controlled sublanguage resembling English that can be automatically mapped to a target meaning representation. Our results demonstrate that with only a small amount of data and very little code to convert into English-like representations, our blueprint for rapidly bootstrapping semantic parsers leads to surprisingly effective performance on multiple community tasks, greatly exceeding baseline methods also trained on the same limited data.", "keyphrases": ["semantic parser", "gap", "canonical utterance", "gpt-3"]} +{"id": "farra-etal-2015-scoring", "title": "Scoring Persuasive Essays Using Opinions and their Targets", "abstract": "In this work, we investigate whether the analysis of opinion expressions can help in scoring persuasive essays. For this, we develop systems that predict holistic essay scores based on features extracted from opinion expressions, topical elements, and their combinations. Experiments on test taker essays show that essay scores produced using opinion features are indeed correlated with human scores. Moreover, we find that combining opinions with their targets (what the opinions are about) produces the best result when compared to using only opinions or only topics.", "keyphrases": ["persuasive essay", "opinion expression", "essay score"]} +{"id": "zhang-etal-2015-bidirectional", "title": "Bidirectional Long Short-Term Memory Networks for Relation Classification", "abstract": "Relation classification is an important semantic processing, which has achieved great attention in recent years. The main challenge is the fact that important information can appear at any position in the sentence. Therefore, we propose bidirectional long short-term memory networks (BLSTM) to model the sentence with complete, sequential information about all words. At the same time, we also use features derived from the lexical resources such as WordNet or NLP systems such as dependency parser and named entity recognizers (NER). The experimental results on SemEval-2010 show that BLSTMbased method only with word embeddings as input features is sufficient to achieve state-of-the-art performance, and importing more features could further improve the performance.", "keyphrases": ["short-term memory network", "relation classification", "sequential information", "powerful encoder"]} +{"id": "han-etal-2019-joint", "title": "Joint Event and Temporal Relation Extraction with Shared Representations and Structured Prediction", "abstract": "We propose a joint event and temporal relation extraction model with shared representation learning and structured prediction. The proposed method has two advantages over existing work. First, it improves event representation by allowing the event and relation modules to share the same contextualized embeddings and neural representation learner. Second, it avoids error propagation in the conventional pipeline systems by leveraging structured inference and learning methods to assign both the event labels and the temporal relation labels jointly. Experiments show that the proposed method can improve both event extraction and temporal relation extraction over state-of-the-art systems, with the end-to-end F1 improved by 10% and 6.8% on two benchmark datasets respectively.", "keyphrases": ["temporal relation extraction", "structured prediction", "joint event"]} +{"id": "li-eisner-2009-first", "title": "First- and Second-Order Expectation Semirings with Applications to Minimum-Risk Training on Translation Forests", "abstract": "Many statistical translation models can be regarded as weighted logical deduction. Under this paradigm, we use weights from the expectation semiring (Eisner, 2002), to compute first-order statistics (e.g., the expected hypothesis length or feature counts) over packed forests of translations (lattices or hypergraphs). We then introduce a novel second-order expectation semiring, which computes second-order statistics (e.g., the variance of the hypothesis length or the gradient of entropy). This second-order semiring is essential for many interesting training paradigms such as minimum risk, deterministic annealing, active learning, and semi-supervised learning, where gradient descent optimization requires computing the gradient of entropy or risk. We use these semirings in an open-source machine translation toolkit, Joshua, enabling minimum-risk training for a benefit of up to 1.0 bleu point.", "keyphrases": ["expectation", "semiring", "minimum-risk training", "translation forest", "risk"]} +{"id": "long-etal-2017-cognition", "title": "A Cognition Based Attention Model for Sentiment Analysis", "abstract": "Attention models are proposed in sentiment analysis because some words are more important than others. However,most existing methods either use local context based text information or user preference information. In this work, we propose a novel attention model trained by cognition grounded eye-tracking data. A reading prediction model is first built using eye-tracking data as dependent data and other features in the context as independent data. The predicted reading time is then used to build a cognition based attention (CBA) layer for neural sentiment analysis. As a comprehensive model, We can capture attentions of words in sentences as well as sentences in documents. Different attention mechanisms can also be incorporated to capture other aspects of attentions. Evaluations show the CBA based method outperforms the state-of-the-art local context based attention methods significantly. This brings insight to how cognition grounded data can be brought into NLP tasks.", "keyphrases": ["cognition", "attention model", "sentiment analysis"]} +{"id": "rudinger-etal-2017-social", "title": "Social Bias in Elicited Natural Language Inferences", "abstract": "We analyze the Stanford Natural Language Inference (SNLI) corpus in an investigation of bias and stereotyping in NLP data. The SNLI human-elicitation protocol makes it prone to amplifying bias and stereotypical associations, which we demonstrate statistically (using pointwise mutual information) and with qualitative examples.", "keyphrases": ["social bias", "nli dataset", "hypothesis"]} +{"id": "stoia-etal-2008-scare", "title": "SCARE: a Situated Corpus with Annotated Referring Expressions", "abstract": "Even though a wealth of speech data is available for the dialog systems research community, the particular field of situated language has yet to find an appropriate free resource. The corpus required to answer research questions related to situated language should connect world information to the human language. In this paper we report on the release of a corpus of English spontaneous instruction giving situated dialogs. The corpus was collected using the Quake environment, a first-person virtual reality game, and consists of pairs of participants completing a direction giver- direction follower scenario. The corpus contains the collected audio and video, as well as word-aligned transcriptions and the positional/gaze information of the player. Referring expressions in the corpus are annotated with the IDs of their virtual world referents.", "keyphrases": ["instruction", "environment", "participant", "scare"]} +{"id": "li-etal-2018-towards", "title": "Towards Robust and Privacy-preserving Text Representations", "abstract": "Written text often provides sufficient clues to identify the author, their gender, age, and other important attributes. Consequently, the authorship of training and evaluation corpora can have unforeseen impacts, including differing model performance for different user groups, as well as privacy implications. In this paper, we propose an approach to explicitly obscure important author characteristics at training time, such that representations learned are invariant to these attributes. Evaluating on two tasks, we show that this leads to increased privacy in the learned representations, as well as more robust models to varying evaluation conditions, including out-of-domain corpora.", "keyphrases": ["robustness", "attribute", "privacy", "part-of-speech tagging", "high prediction accuracy"]} +{"id": "nouri-yangarber-2016-alignment", "title": "From alignment of etymological data to phylogenetic inference via population genetics", "abstract": "This paper presents a method for linking models for aligning linguistic etymological data with models for phylogenetic inference from population genetics. We begin with a large database of genetically related words\u2014sets of cognates\u2014from languages in a language family. We process the cognate sets to obtain a complete alignment of the data. We use the alignments as input to a model developed for phylogenetic reconstruction in population genetics. This is achieved via a natural novel projection of the linguistic data onto genetic primitives. As a result, we induce phylogenies based on aligned linguistic data. We place the method in the context of those reported in the literature, and illustrate its operation on data from the Uralic language family, which results in family trees that are very close to the \u201ctrue\u201d (expected) phylogenies.", "keyphrases": ["etymological data", "phylogenetic inference", "population genetic"]} +{"id": "luong-etal-2015-pronoun", "title": "Pronoun Translation and Prediction with or without Coreference Links", "abstract": "The Idiap NLP Group has participated in both DiscoMT 2015 sub-tasks: pronoun-focused translation and pronoun prediction. The system for the first sub-task combines two knowledge sources: gram matical constraints from the hypothesized coreference links, and candidate translations from an SMT decoder. The system for the second sub-task avoids hypothesizing a coreference link, and uses instead a large set of source-side and target-side features from the noun phrases surrounding the pronoun to train a pronoun predictor.", "keyphrases": ["coreference link", "smt pronoun translation", "moses decoder"]} +{"id": "wing-baldridge-2011-simple", "title": "Simple supervised document geolocation with geodesic grids", "abstract": "We investigate automatic geolocation (i.e. identification of the location, expressed as latitude/longitude coordinates) of documents. Geolocation can be an effective means of summarizing large document collections and it is an important component of geographic information retrieval. We describe several simple supervised methods for document geolocation using only the document's raw text as evidence. All of our methods predict locations in the context of geodesic grids of varying degrees of resolution. We evaluate the methods on geotagged Wikipedia articles and Twitter feeds. For Wikipedia, our best method obtains a median prediction error of just 11.8 kilometers. Twitter geolocation is more challenging: we obtain a median error of 479 km, an improvement on previous results for the dataset.", "keyphrases": ["document geolocation", "geodesic grid", "location", "wikipedia", "word distribution"]} +{"id": "saito-etal-2006-using", "title": "Using Phrasal Patterns to Identify Discourse Relations", "abstract": "This paper describes a system which identifies discourse relations between two successive sentences in Japanese. On top of the lexical information previously proposed, we used phrasal pattern information. Adding phrasal information improves the system's accuracy 12%, from 53% to 65%.", "keyphrases": ["phrasal pattern", "discourse relation", "cross-argument word pair"]} +{"id": "goyal-etal-2010-sketch", "title": "Sketch Techniques for Scaling Distributional Similarity to the Web", "abstract": "In this paper, we propose a memory, space, and time efficient framework to scale distributional similarity to the web. We exploit sketch techniques, especially the Count-Min sketch, which approximates the frequency of an item in the corpus without explicitly storing the item itself. These methods use hashing to deal with massive amounts of the streaming text. We store all item counts computed from 90 GB of web data in just 2 billion counters (8 GB main memory) of CM sketch. Our method returns semantic similarity between word pairs in O(K) time and can compute similarity between any word pairs that are stored in the sketch. In our experiments, we show that our framework is as effective as using the exact counts.", "keyphrases": ["distributional similarity", "web", "sketch technique"]} +{"id": "gao-etal-2018-neural", "title": "Neural Metaphor Detection in Context", "abstract": "We present end-to-end neural models for detecting metaphorical word use in context. We show that relatively standard BiLSTM models which operate on complete sentences work well in this setting, in comparison to previous work that used more restricted forms of linguistic context. These models establish a new state-of-the-art on existing verb metaphor detection benchmarks, and show strong performance on jointly predicting the metaphoricity of all words in a running text.", "keyphrases": ["metaphor detection", "elmo embedding", "top", "concreteness score"]} +{"id": "chang-etal-2010-discriminative", "title": "Discriminative Learning over Constrained Latent Representations", "abstract": "This paper proposes a general learning framework for a class of problems that require learning over latent intermediate representations. Many natural language processing (NLP) decision problems are defined over an expressive intermediate representation that is not explicit in the input, leaving the algorithm with both the task of recovering a good intermediate representation and learning to classify correctly. Most current systems separate the learning problem into two stages by solving the first step of recovering the intermediate representation heuristically and using it to learn the final classifier. This paper develops a novel joint learning algorithm for both tasks, that uses the final prediction to guide the selection of the best intermediate representation. We evaluate our algorithm on three different NLP tasks -- transliteration, paraphrase identification and textual entailment -- and show that our joint method significantly improves performance.", "keyphrases": ["constrained latent representations", "paraphrase identification", "lclr"]} +{"id": "sharma-etal-2015-adjective", "title": "Adjective Intensity and Sentiment Analysis", "abstract": "For fine-grained sentiment analysis, we need to go beyond zero-one polarity and find a way to compare adjectives that share a common semantic property. In this paper, we present a semi-supervised approach to assign intensity levels to adjectives, viz. high, medium and low, where adjectives are compared when they belong to the same semantic category. For example, in the semantic category of EXPERTISE, expert, experienced and familiar are respectively of level high, medium and low. We obtain an overall accuracy of 77% for intensity assignment. We show the significance of considering intensity information of adjectives in predicting star-rating of reviews. Our intensity based prediction system results in an accuracy of 59% for a 5-star rated movie review corpus.", "keyphrases": ["intensity", "sentiment analysis", "semantic property", "adjective"]} +{"id": "zhang-etal-2020-multi", "title": "Multi-modal Multi-label Emotion Detection with Modality and Label Dependence", "abstract": "As an important research issue in the natural language processing community, multi-label emotion detection has been drawing more and more attention in the last few years. However, almost all existing studies focus on one modality (e.g., textual modality). In this paper, we focus on multi-label emotion detection in a multi-modal scenario. In this scenario, we need to consider both the dependence among different labels (label dependence) and the dependence between each predicting label and different modalities (modality dependence). Particularly, we propose a multi-modal sequence-to-set approach to effectively model both kinds of dependence in multi-modal multi-label emotion detection. The detailed evaluation demonstrates the effectiveness of our approach.", "keyphrases": ["multi-label emotion detection", "modality", "label dependence"]} +{"id": "he-etal-2018-effective", "title": "Effective Attention Modeling for Aspect-Level Sentiment Classification", "abstract": "Aspect-level sentiment classification aims to determine the sentiment polarity of a review sentence towards an opinion target. A sentence could contain multiple sentiment-target pairs; thus the main challenge of this task is to separate different opinion contexts for different targets. To this end, attention mechanism has played an important role in previous state-of-the-art neural models. The mechanism is able to capture the importance of each context word towards a target by modeling their semantic associations. We build upon this line of research and propose two novel approaches for improving the effectiveness of attention. First, we propose a method for target representation that better captures the semantic meaning of the opinion target. Second, we introduce an attention model that incorporates syntactic information into the attention mechanism. We experiment on attention-based LSTM (Long Short-Term Memory) models using the datasets from SemEval 2014, 2015, and 2016. The experimental results show that the conventional attention-based LSTM can be substantially improved by incorporating the two approaches.", "keyphrases": ["aspect-level sentiment classification", "attention weight", "asc"]} +{"id": "pei-li-2018-s2spmn", "title": "S2SPMN: A Simple and Effective Framework for Response Generation with Relevant Information", "abstract": "How to generate relevant and informative responses is one of the core topics in response generation area. Following the task formulation of machine translation, previous works mainly consider response generation task as a mapping from a source sentence to a target sentence. To realize this mapping, existing works tend to design intuitive but complex models. However, the relevant information existed in large dialogue corpus is mainly overlooked. In this paper, we propose Sequence to Sequence with Prototype Memory Network (S2SPMN) to exploit the relevant information provided by the large dialogue corpus to enhance response generation. Specifically, we devise two simple approaches in S2SPMN to select the relevant information (named prototypes) from the dialogue corpus. These prototypes are then saved into prototype memory network (PMN). Furthermore, a hierarchical attention mechanism is devised to extract the semantic information from the PMN to assist the response generation process. Empirical studies reveal the advantage of our model over several classical and strong baselines.", "keyphrases": ["response generation", "relevant information", "s2spmn"]} +{"id": "girju-etal-2006-automatic", "title": "Automatic Discovery of Part-Whole Relations", "abstract": "An important problem in knowledge discovery from text is the automatic extraction of semantic relations. This paper presents a supervised, semantically intensive, domain independent approach for the automatic detection of part-whole relations in text. First an algorithm is described that identifies lexico-syntactic patterns that encode part-whole relations. A difficulty is that these patterns also encode other semantic relations, and a learning method is necessary to discriminate whether or not a pattern contains a part-whole relation. A large set of training examples have been annotated and fed into a specialized learning system that learns classification rules. The rules are learned through an iterative semantic specialization (ISS) method applied to noun phrase constituents. Classification rules have been generated this way for different patterns such as genitives, noun compounds, and noun phrases containing prepositional phrases to extract part-whole relations from them. The applicability of these rules has been tested on a test corpus obtaining an overall average precision of 80.95% and recall of 75.91%. The results demonstrate the importance of word sense disambiguation for this task. They also demonstrate that different lexico-syntactic patterns encode different semantic information and should be treated separately in the sense that different clarification rules apply to different patterns.", "keyphrases": ["discovery", "part-whole relation", "extraction", "noun compound", "semantic information"]} +{"id": "xie-xing-2018-neural", "title": "A Neural Architecture for Automated ICD Coding", "abstract": "The International Classification of Diseases (ICD) provides a hierarchy of diagnostic codes for classifying diseases. Medical coding \u2013 which assigns a subset of ICD codes to a patient visit \u2013 is a mandatory process that is crucial for patient care and billing. Manual coding is time-consuming, expensive, and error prone. In this paper, we build a neural architecture for automated coding. It takes the diagnosis descriptions (DDs) of a patient as inputs and selects the most relevant ICD codes. This architecture contains four major ingredients: (1) tree-of-sequences LSTM encoding of code descriptions (CDs), (2) adversarial learning for reconciling the different writing styles of DDs and CDs, (3) isotonic constraints for incorporating the importance order among the assigned codes, and (4) attentional matching for performing many-to-one and one-to-many mappings from DDs to CDs. We demonstrate the effectiveness of the proposed methods on a clinical datasets with 59K patient visits.", "keyphrases": ["neural architecture", "icd", "code description", "tree lstm"]} +{"id": "mcdonald-etal-2010-distributed", "title": "Distributed Training Strategies for the Structured Perceptron", "abstract": "Perceptron training is widely applied in the natural language processing community for learning complex structured models. Like all structured prediction learning frameworks, the structured perceptron can be costly to train as training complexity is proportional to inference, which is frequently non-linear in example sequence length. In this paper we investigate distributed training strategies for the structured perceptron as a means to reduce training times when computing clusters are available. We look at two strategies and provide convergence bounds for a particular mode of distributed structured perceptron training based on iterative parameter mixing (or averaging). We present experiments on two structured prediction problems -- named-entity recognition and dependency parsing -- to highlight the efficiency of this method.", "keyphrases": ["training strategy", "structured perceptron", "particular mode", "iterative parameter mixing"]} +{"id": "shmueli-etal-2020-reactive", "title": "Reactive Supervision: A New Method for Collecting Sarcasm Data", "abstract": "Sarcasm detection is an important task in affective computing, requiring large amounts of labeled data. We introduce reactive supervision, a novel data collection method that utilizes the dynamics of online conversations to overcome the limitations of existing data collection techniques. We use the new method to create and release a first-of-its-kind large dataset of tweets with sarcasm perspective labels and new contextual features. The dataset is expected to advance sarcasm detection research. Our method can be adapted to other affective computing domains, thus opening up new research opportunities.", "keyphrases": ["reactive supervision", "sarcasm detection task", "user history", "spirs dataset"]} +{"id": "arisoy-etal-2012-deep", "title": "Deep Neural Network Language Models", "abstract": "In recent years, neural network language models (NNLMs) have shown success in both peplexity and word error rate (WER) compared to conventional n-gram language models. Most NNLMs are trained with one hidden layer. Deep neural networks (DNNs) with more hidden layers have been shown to capture higher-level discriminative information about input features, and thus produce better networks. Motivated by the success of DNNs in acoustic modeling, we explore deep neural network language models (DNN LMs) in this paper. Results on a Wall Street Journal (WSJ) task demonstrate that DNN LMs offer improvements over a single hidden layer NNLM. Furthermore, our preliminary results are competitive with a model M language model, considered to be one of the current state-of-the-art techniques for language modeling.", "keyphrases": ["language modeling", "word error rate", "speech recognition"]} +{"id": "yahya-etal-2014-renoun", "title": "ReNoun: Fact Extraction for Nominal Attributes", "abstract": "Search engines are increasingly relying on large knowledge bases of facts to provide direct answers to users\u2019 queries. However, the construction of these knowledge bases is largely manual and does not scale to the long and heavy tail of facts. Open information extraction tries to address this challenge, but typically assumes that facts are expressed with verb phrases, and therefore has had difficulty extracting facts for noun-based relations. We describe ReNoun, an open information extraction system that complements previous efforts by focusing on nominal attributes and on the long tail. ReNoun\u2019s approach is based on leveraging a large ontology of noun attributes mined from a text corpus and from user queries. ReNoun creates a seed set of training data by using specialized patterns and requiring that the facts mention an attribute in the ontology. ReNoun then generalizes from this seed set to produce a much larger set of extractions that are then scored. We describe experiments that show that we extract facts with high precision and for attributes that cannot be extracted with verb-based techniques.", "keyphrases": ["attribute", "information extraction", "renoun", "knowledge basis"]} +{"id": "hajishirzi-etal-2013-joint", "title": "Joint Coreference Resolution and Named-Entity Linking with Multi-Pass Sieves", "abstract": "Many errors in coreference resolution come from semantic mismatches due to inadequate world knowledge. Errors in named-entity linking (NEL), on the other hand, are often caused by superficial modeling of entity context. This paper demonstrates that these two tasks are complementary. We introduce NECO, a new model for named entity linking and coreference resolution, which solves both problems jointly, reducing the errors made on each. NECO extends the Stanford deterministic coreference system by automatically linking mentions to Wikipedia and introducing new NEL-informed mention-merging sieves. Linking improves mention-detection and enables new semantic attributes to be incorporated from Freebase, while coreference provides better context modeling by propagating named-entity links within mention clusters. Experiments show consistent improvements across a number of datasets and experimental conditions, including over 11% reduction in MUC coreference error and nearly 21% reduction in F1 NEL error on ACE 2004 newswire data.", "keyphrases": ["coreference resolution", "named-entity", "linking", "wikipedia", "joint model"]} +{"id": "wang-etal-2015-chinese", "title": "Chinese Semantic Role Labeling with Bidirectional Recurrent Neural Networks", "abstract": "Traditional approaches to Chinese Semantic Role Labeling (SRL) almost heavily rely on feature engineering. Even worse, the long-range dependencies in a sentence can hardly be modeled by these methods. In this paper, we introduce bidirectional recurrent neural network (RNN) with long-short-term memory (LSTM) to capture bidirectional and long-range dependencies in a sentence with minimal feature engineering. Experimental results on Chinese Proposition Bank (CPB) show a significant improvement over the state-ofthe-art methods. Moreover, our model makes it convenient to introduce heterogeneous resource, which makes a further improvement on our experimental performance.", "keyphrases": ["bidirectional", "recurrent neural network", "chinese srl"]} +{"id": "khodak-etal-2018-la", "title": "A La Carte Embedding: Cheap but Effective Induction of Semantic Feature Vectors", "abstract": "Motivations like domain adaptation, transfer learning, and feature learning have fueled interest in inducing embeddings for rare or unseen words, n-grams, synsets, and other textual features. This paper introduces a la carte embedding, a simple and general alternative to the usual word2vec-based approaches for building such representations that is based upon recent theoretical results for GloVe-like embeddings. Our method relies mainly on a linear transformation that is efficiently learnable using pretrained word vectors and linear regression. This transform is applicable on the fly in the future when a new text feature or rare word is encountered, even if only a single usage example is available. We introduce a new dataset showing how the a la carte method requires fewer examples of words in context to learn high-quality embeddings and we obtain state-of-the-art results on a nonce task and some unsupervised document classification tasks.", "keyphrases": ["transformation", "word vector", "irony"]} +{"id": "mao-etal-2021-banditmtl", "title": "BanditMTL: Bandit-based Multi-task Learning for Text Classification", "abstract": "Task variance regularization, which can be used to improve the generalization of Multi-task Learning (MTL) models, remains unexplored in multi-task text classification. Accordingly, to fill this gap, this paper investigates how the task might be effectively regularized, and consequently proposes a multi-task learning method based on adversarial multi-armed bandit. The proposed method, named BanditMTL, regularizes the task variance by means of a mirror gradient ascent-descent algorithm. Adopting BanditMTL in the multi-task text classification context is found to achieve state-of-the-art performance. The results of extensive experiments back up our theoretical analysis and validate the superiority of our proposals.", "keyphrases": ["multi-task learning", "text classification", "banditmtl"]} +{"id": "feng-etal-2004-accessor", "title": "Accessor Variety Criteria for Chinese Word Extraction", "abstract": "We are interested in the problem of word extraction from Chinese text collections. We define a word to be a meaningful string composed of several Chinese characters. For example, percent, and, more and more, are not recognized as traditional Chinese words from the viewpoint of some people. However, in our work, they are words because they are very widely used and have specific meanings. We start with the viewpoint that a word is a distinguished linguistic entity that can be used in many different language environments. We consider the characters that are directly before a string (predecessors) and the characters that are directly after a string (successors) as important factors for determining the independence of the string. We call such characters accessors of the string, consider the number of distinct predecessors and successors of a string in a large corpus (TREC 5 and TREC 6 documents), and use them as the measurement of the context independency of a string from the rest of the sentences in the document. Our experiments confirm our hypothesis and show that this simple rule gives quite good results for Chinese word extraction and is comparable to, and for long words outperforms, other iterative methods.", "keyphrases": ["chinese word extraction", "string", "factor", "accessor variety criteria"]} +{"id": "brockett-dolan-2005-support", "title": "Support Vector Machines for Paraphrase Identification and Corpus Construction", "abstract": "The lack of readily-available large corpora of aligned monolingual sentence pairs is a major obstacle to the development of Statistical Machine Translation-based paraphrase models. In this paper, we describe the use of annotated datasets and Support Vector Machines to induce larger monolingual paraphrase corpora from a comparable corpus of news clusters found on the World Wide Web. Features include: morphological variants; WordNet synonyms and hypernyms; loglikelihood-based word pairings dynamically obtained from baseline sentence alignments; and formal string features such as word-based edit distance. Use of this technique dramatically reduces the Alignment Error Rate of the extracted corpora over heuristic methods based on position of the sentences in the text.", "keyphrases": ["paraphrase corpora", "world wide web", "edit distance", "support vector machines"]} +{"id": "muller-etal-2021-unseen", "title": "When Being Unseen from mBERT is just the Beginning: Handling New Languages With Multilingual Language Models", "abstract": "Transfer learning based on pretraining language models on a large amount of raw data has become a new norm to reach state-of-the-art performance in NLP. Still, it remains unclear how this approach should be applied for unseen languages that are not covered by any available large-scale multilingual language model and for which only a small amount of raw data is generally available. In this work, by comparing multilingual and monolingual models, we show that such models behave in multiple ways on unseen languages. Some languages greatly benefit from transfer learning and behave similarly to closely related high resource languages whereas others apparently do not. Focusing on the latter, we show that this failure to transfer is largely related to the impact of the script used to write such languages. We show that transliterating those languages significantly improves the potential of large-scale multilingual language models on downstream tasks. This result provides a promising direction towards making these massively multilingual models useful for a new set of unseen languages.", "keyphrases": ["unseen language", "dependency parsing", "maltese"]} +{"id": "abdelali-etal-2016-farasa", "title": "Farasa: A Fast and Furious Segmenter for Arabic", "abstract": "In this paper, we present Farasa, a fast and accurate Arabic segmenter. Our approach is based on SVM-rank using linear kernels. We measure the performance of the seg-menter in terms of accuracy and ef\ufb01ciency, in two NLP tasks, namely Machine Translation (MT) and Information Retrieval (IR). Farasa outperforms or is at par with the state-of-the-art Arabic segmenters (Stanford and MADAMIRA), while being more than one order of magnitude faster.", "keyphrases": ["segmenter", "arabic", "disambiguation"]} +{"id": "roark-etal-2009-deriving", "title": "Deriving lexical and syntactic expectation-based measures for psycholinguistic modeling via incremental top-down parsing", "abstract": "A number of recent publications have made use of the incremental output of stochastic parsers to derive measures of high utility for psycholinguistic modeling, following the work of Hale (2001; 2003; 2006). In this paper, we present novel methods for calculating separate lexical and syntactic surprisal measures from a single incremental parser using a lexicalized PCFG. We also present an approximation to entropy measures that would otherwise be intractable to calculate for a grammar of that size. Empirical results demonstrate the utility of our methods in predicting human reading times.", "keyphrases": ["psycholinguistic modeling", "surprisal", "entropy", "processing difficulty", "pcfg parser"]} +{"id": "wan-2008-using", "title": "Using Bilingual Knowledge and Ensemble Techniques for Unsupervised Chinese Sentiment Analysis", "abstract": "It is a challenging task to identify sentiment polarity of Chinese reviews because the resources for Chinese sentiment analysis are limited. Instead of leveraging only monolingual Chinese knowledge, this study proposes a novel approach to leverage reliable English resources to improve Chinese sentiment analysis. Rather than simply projecting English resources onto Chinese resources, our approach first translates Chinese reviews into English reviews by machine translation services, and then identifies the sentiment polarity of English reviews by directly leveraging English resources. Furthermore, our approach performs sentiment analysis for both Chinese reviews and English reviews, and then uses ensemble methods to combine the individual analysis results. Experimental results on a dataset of 886 Chinese product reviews demonstrate the effectiveness of the proposed approach. The individual analysis of the translated English reviews outperforms the individual analysis of the original Chinese reviews, and the combination of the individual analysis results further improves the performance.", "keyphrases": ["bilingual knowledge", "chinese", "sentiment analysis", "ensemble method", "english lexicon"]} +{"id": "philip-etal-2020-monolingual", "title": "Monolingual Adapters for Zero-Shot Neural Machine Translation", "abstract": "We propose a novel adapter layer formalism for adapting multilingual models. They are more parameter-efficient than existing adapter layers while obtaining as good or better performance. The layers are specific to one language (as opposed to bilingual adapters) allowing to compose them and generalize to unseen language-pairs. In this zero-shot setting, they obtain a median improvement of +2.77 BLEU points over a strong 20-language multilingual Transformer baseline trained on TED talks.", "keyphrases": ["adapter", "multilingual model", "zero-shot setting"]} +{"id": "abend-etal-2017-uccaapp", "title": "UCCAApp: Web-application for Syntactic and Semantic Phrase-based Annotation", "abstract": "We present UCCAApp, an open-source, flexible web-application for syntactic and semantic phrase-based annotation in general, and for UCCA annotation in particular. UCCAApp supports a variety of formal properties that have proven useful for syntactic and semantic representation, such as discontiguous phrases, multiple parents and empty elements, making it useful to a variety of other annotation schemes with similar formal properties. UCCAApp\u2019s user interface is intuitive and user friendly, so as to support annotation by users with no background in linguistics or formal representation. Indeed, a pilot version of the application has been successfully used in the compilation of the UCCA Wikipedia treebank by annotators with no previous linguistic training. The application and all accompanying resources are released as open source under the GNU public license, and are available online along with a live demo.1", "keyphrases": ["web-application", "semantic phrase-based annotation", "uccaapp"]} +{"id": "li-etal-2011-composing", "title": "Composing Simple Image Descriptions using Web-scale N-grams", "abstract": "Studying natural language, and especially how people describe the world around them can help us better understand the visual world. In turn, it can also help us in the quest to generate natural language that describes this world in a human manner. We present a simple yet effective approach to automatically compose image descriptions given computer vision based inputs and using web-scale n-grams. Unlike most previous work that summarizes or retrieves pre-existing text relevant to an image, our method composes sentences entirely from scratch. Experimental results indicate that it is viable to generate simple textual descriptions that are pertinent to the specific content of an image, while permitting creativity in the description -- making for more human-like annotations than previous approaches.", "keyphrases": ["image", "textual description", "previous approach", "spatial relationship"]} +{"id": "cheng-etal-2020-ent", "title": "ENT-DESC: Entity Description Generation by Exploring Knowledge Graph", "abstract": "Previous works on knowledge-to-text generation take as input a few RDF triples or key-value pairs conveying the knowledge of some entities to generate a natural language description. Existing datasets, such as WIKIBIO, WebNLG, and E2E, basically have a good alignment between an input triple/pair set and its output text. However, in practice, the input knowledge could be more than enough, since the output description may only cover the most significant knowledge. In this paper, we introduce a large-scale and challenging dataset to facilitate the study of such a practical scenario in KG-to-text. Our dataset involves retrieving abundant knowledge of various types of main entities from a large knowledge graph (KG), which makes the current graph-to-sequence models severely suffer from the problems of information loss and parameter explosion while generating the descriptions. We address these challenges by proposing a multi-graph structure that is able to represent the original graph information more comprehensively. Furthermore, we also incorporate aggregation methods that learn to extract the rich graph information. Extensive experiments demonstrate the effectiveness of our model architecture.", "keyphrases": ["knowledge graph", "input knowledge", "multi-graph structure", "well text description"]} +{"id": "schwenk-li-2018-corpus", "title": "A Corpus for Multilingual Document Classification in Eight Languages", "abstract": "Cross-lingual document classification aims at training a document classifier on resources in one language and transferring it to a different language without any additional resources. Several approaches have been proposed in the literature and the current best practice is to evaluate them on a subset of the Reuters Corpus Volume 2. However, this subset covers only few languages (English, German, French and Spanish) and almost all published works focus on the the transfer between English and German. In addition, we have observed that the class prior distributions differ significantly between the languages. We argue that this complicates the evaluation of the multilinguality. In this paper, we propose a new subset of the Reuters corpus with balanced class priors for eight languages. By adding Italian, Russian, Japanese and Chinese, we cover languages which are very different with respect to syntax, morphology, etc. We provide strong baselines for all language transfer directions using multilingual word and sentence embeddings respectively. Our goal is to offer a freely available framework to evaluate cross-lingual document classification, and we hope to foster by these means, research in this important area.", "keyphrases": ["multilingual document classification", "strong baseline", "sentence embedding"]} +{"id": "shardlow-2014-open", "title": "Out in the Open: Finding and Categorising Errors in the Lexical Simplification Pipeline", "abstract": "Lexical simplification is the task of automatically reducing the complexity of a text by identifying difficult words and replacing them with simpler alternatives. Whilst this is a valuable application of natural language generation, rudimentary lexical simplification systems suffer from a high error rate which often results in nonsensical, non-simple text. This paper seeks to characterise and quantify the errors which occur in a typical baseline lexical simplification system. We expose 6 distinct categories of error and propose a classification scheme for these. We also quantify these errors for a moderate size corpus, showing the magnitude of each error type. We find that for 183 identified simplification instances, only 19 (10.38%) result in a valid simplification, with the rest causing errors of varying gravity.", "keyphrases": ["lexical simplification pipeline", "alternative", "cwi"]} +{"id": "lin-su-2021-fast", "title": "How Fast can BERT Learn Simple Natural Language Inference?", "abstract": "This paper empirically studies whether BERT can really learn to conduct natural language inference (NLI) without utilizing hidden dataset bias; and how efficiently it can learn if it could. This is done via creating a simple entailment judgment case which involves only binary predicates in plain English. The results show that the learning process of BERT is very slow. However, the efficiency of learning can be greatly improved (data reduction by a factor of 1,500) if task-related features are added. This suggests that domain knowledge greatly helps when conducting NLI with neural networks.", "keyphrases": ["bert", "natural language inference", "efficiency", "task-related feature"]} +{"id": "iida-etal-2003-incorporating", "title": "Incorporating Contextual Cues in Trainable Models for Coreference Resolution", "abstract": "We propose a method that incorporates various novel contextual cues into a machine learning for resolving coreference. Distinct characteristics of our model are (i) incorporating more linguistic features capturing contextual information that is more sophisticated than what is offered in Centering Theory, and (ii) a tournament model for selecting a referent. Our experiments show that this model significantly outperforms earlier machine learning approaches, such as Soon et al. (2001).", "keyphrases": ["coreference resolution", "candidate", "anaphor"]} +{"id": "das-2019-nuclearity", "title": "Nuclearity in RST and signals of coherence relations", "abstract": "We investigate the relationship between the notion of nuclearity as proposed in Rhetorical Structure Theory (RST) and the signalling of coherence relations. RST relations are categorized as either mononuclear (comprising a nucleus and a satellite span) or multinuclear (comprising two or more nuclei spans). We examine how mononuclear relations (e.g., Antithesis, Condition) and multinuclear relations (e.g., Contrast, List) are indicated by relational signals, more particularly by discourse markers (e.g., because, however, if, therefore). We conduct a corpus study, examining the distribution of either type of relations in the RST Discourse Treebank (Carlson et al., 2002) and the distribution of discourse markers for those relations in the RST Signalling Corpus (Das et al., 2015). Our results show that discourse markers are used more often to signal multinuclear relations than mononuclear relations. The findings also suggest a complex relationship between the relation types and syntactic categories of discourse markers (subordinating and coordinating conjunctions).", "keyphrases": ["rst", "coherence relation", "nuclearity"]} +{"id": "eom-etal-2012-using", "title": "Using semi-experts to derive judgments on word sense alignment: a pilot study", "abstract": "The overall goal of this project is to evaluate the performance of word sense alignment (WSA) systems, focusing on obtaining examples appropriate to language learners. Building a gold standard dataset based on human expert judgments is costly in time and labor, and thus we gauge the utility of using semi-experts in performing the annotation. In an online survey, we present a sense of a target word from one dictionary with senses from the other dictionary, asking for judgments of relatedness. We note the difficulty of agreement, yet the utility in using such results to evaluate WSA work. We find that one's treatment of related senses heavily impacts the results for WSA.", "keyphrases": ["semi-expert", "judgment", "word sense alignment"]} +{"id": "gillick-etal-2016-multilingual", "title": "Multilingual Language Processing From Bytes", "abstract": "We describe an LSTM-based model which we call Byte-to-Span (BTS) that reads text as bytes and outputs span annotations of the form [start, length, label] where start positions, lengths, and labels are separate entries in our vocabulary. Because we operate directly on unicode bytes rather than language-specific words or characters, we can analyze text in many languages with a single model. Due to the small vocabulary size, these multilingual models are very compact, but produce results similar to or better than the state-of- the-art in Part-of-Speech tagging and Named Entity Recognition that use only the provided training datasets (no external data sources). Our models are learning \"from scratch\" in that they do not rely on any elements of the standard pipeline in Natural Language Processing (including tokenization), and thus can run in standalone fashion on raw text.", "keyphrases": ["byte", "part-of-speech tagging", "entity recognition", "tokenization", "multilingual language processing"]} +{"id": "williams-koehn-2011-agreement", "title": "Agreement Constraints for Statistical Machine Translation into German", "abstract": "Languages with rich inflectional morphology pose a difficult challenge for statistical machine translation. To address the problem of morphologically inconsistent output, we add unification-based constraints to the target-side of a string-to-tree model. By integrating constraint evaluation into the decoding process, implausible hypotheses can be penalised or filtered out during search. We use a simple heuristic process to extract agreement constraints for German and test our approach on an English-German system trained on WMT data, achieving a small improvement in translation accuracy as measured by BLEU.", "keyphrases": ["statistical machine translation", "string-to-tree model", "agreement constraint"]} +{"id": "berant-etal-2015-efficient", "title": "Efficient Global Learning of Entailment Graphs", "abstract": "Entailment rules between predicates are fundamental to many semantic-inference applications. Consequently, learning such rules has been an active field of research in recent years. Methods for learning entailment rules between predicates that take into account dependencies between different rules (e.g., entailment is a transitive relation) have been shown to improve rule quality, but suffer from scalability issues, that is, the number of predicates handled is often quite small. In this article, we present methods for learning transitive graphs that contain tens of thousands of nodes, where nodes represent predicates and edges correspond to entailment rules (termed entailment graphs). Our methods are able to scale to a large number of predicates by exploiting structural properties of entailment graphs such as the fact that they exhibit a \u201ctree-like\u201d property. We apply our methods on two data sets and demonstrate that our methods find high-quality solutions faster than methods proposed in the past, and moreover our methods for the first time scale to large graphs containing 20,000 nodes and more than 100,000 edges.", "keyphrases": ["entailment graph", "approximation method", "treenode-fix"]} +{"id": "kocisky-etal-2016-semantic", "title": "Semantic Parsing with Semi-Supervised Sequential Autoencoders", "abstract": "We present a novel semi-supervised approach for sequence transduction and apply it to semantic parsing. The unsupervised component is based on a generative model in which latent sentences generate the unpaired logical forms. We apply this method to a number of semantic parsing tasks focusing on domains with limited access to labelled training data and extend those datasets with synthetically generated logical forms.", "keyphrases": ["generative model", "semantic parsing", "natural language utterance", "program"]} +{"id": "simianer-etal-2012-joint", "title": "Joint Feature Selection in Distributed Stochastic Learning for Large-Scale Discriminative Training in SMT", "abstract": "With a few exceptions, discriminative training in statistical machine translation (SMT) has been content with tuning weights for large feature sets on small development data. Evidence from machine learning indicates that increasing the training sample size results in better prediction. The goal of this paper is to show that this common wisdom can also be brought to bear upon SMT. We deploy local features for SCFG-based SMT that can be read off from rules at runtime, and present a learning algorithm that applies l1/l2 regularization for joint feature selection over distributed stochastic learning processes. We present experiments on learning on 1.5 million training sentences, and show significant improvements over tuning discriminative models on small development sets.", "keyphrases": ["tuning", "million", "joint feature selection", "inter alia"]} +{"id": "ghosh-etal-2016-coarse", "title": "Coarse-grained Argumentation Features for Scoring Persuasive Essays", "abstract": "Scoring the quality of persuasive essays is an important goal of discourse analysis, addressed most recently with highlevel persuasion-related features such as thesis clarity, or opinions and their targets. We investigate whether argumentation features derived from a coarse-grained argumentative structure of essays can help predict essays scores. We introduce a set of argumentation features related to argument components (e.g., the number of claims and premises), argument relations (e.g., the number of supported claims) and typology of argumentative structure (chains, trees). We show that these features are good predictors of human scores for TOEFL essays, both when the coarsegrained argumentative structure is manually annotated and automatically predicted.", "keyphrases": ["essay", "argument component", "automatic essay scoring"]} +{"id": "liang-etal-2018-multimodal", "title": "Multimodal Language Analysis with Recurrent Multistage Fusion", "abstract": "Computational modeling of human multimodal language is an emerging research area in natural language processing spanning the language, visual and acoustic modalities. Comprehending multimodal language requires modeling not only the interactions within each modality (intra-modal interactions) but more importantly the interactions between modalities (cross-modal interactions). In this paper, we propose the Recurrent Multistage Fusion Network (RMFN) which decomposes the fusion problem into multiple stages, each of them focused on a subset of multimodal signals for specialized, effective fusion. Cross-modal interactions are modeled using this multistage fusion approach which builds upon intermediate representations of previous stages. Temporal and intra-modal interactions are modeled by integrating our proposed fusion approach with a system of recurrent neural networks. The RMFN displays state-of-the-art performance in modeling human multimodal language across three public datasets relating to multimodal sentiment analysis, emotion recognition, and speaker traits recognition. We provide visualizations to show that each stage of fusion focuses on a different subset of multimodal signals, learning increasingly discriminative multimodal representations.", "keyphrases": ["fusion", "cross-modal interaction", "stage", "discriminative multimodal representation"]} +{"id": "yao-etal-2011-structured", "title": "Structured Relation Discovery using Generative Models", "abstract": "We explore unsupervised approaches to relation extraction between two named entities; for instance, the semantic bornIn relation between a person and location entity. Concretely, we propose a series of generative probabilistic models, broadly similar to topic models, each which generates a corpus of observed triples of entity mention pairs and the surface syntactic dependency path between them. The output of each model is a clustering of observed relation tuples and their associated textual expressions to underlying semantic relation types. Our proposed models exploit entity type constraints within a relation as well as features on the dependency path between entity mentions. We examine effectiveness of our approach via multiple evaluations and demonstrate 12% error reduction in precision over a state-of-the-art weakly supervised baseline.", "keyphrases": ["relation discovery", "topic model", "openre", "group"]} +{"id": "muller-gurevych-2009-study", "title": "A Study on the Semantic Relatedness of Query and Document Terms in Information Retrieval", "abstract": "The use of lexical semantic knowledge in information retrieval has been a field of active study for a long time. Collaborative knowledge bases like Wikipedia and Wiktionary, which have been applied in computational methods only recently, offer new possibilities to enhance information retrieval. In order to find the most beneficial way to employ these resources, we analyze the lexical semantic relations that hold among query and document terms and compare how these relations are represented by a measure for semantic relatedness. We explore the potential of different indicators of document relevance that are based on semantic relatedness and compare the characteristics and performance of the knowledge bases Wikipedia, Wiktionary and WordNet.", "keyphrases": ["semantic relatedness", "query", "document term"]} +{"id": "de-melo-2014-etymological", "title": "Etymological Wordnet: Tracing The History of Words", "abstract": "Research on the history of words has led to remarkable insights about language and also about the history of human civilization more generally. This paper presents the Etymological Wordnet, the first database that aims at making word origin information available as a large, machine-readable network of words in many languages. The information in this resource is obtained from Wiktionary. Extracting a network of etymological information from Wiktionary requires significant effort, as much of the etymological information is only given in prose. We rely on custom pattern matching techniques and mine a large network with over 500,000 word origin links as well as over 2 million derivational/compositional links.", "keyphrases": ["history", "wiktionary", "etymological wordnet"]} +{"id": "clematide-etal-2012-mlsa", "title": "MLSA \u2014 A Multi-layered Reference Corpus for German Sentiment Analysis", "abstract": "In this paper, we describe MLSA, a publicly available multi-layered reference corpus for German-language sentiment analysis. The construction of the corpus is based on the manual annotation of 270 German-language sentences considering three different layers of granularity. The sentence-layer annotation, as the most coarse-grained annotation, focuses on aspects of objectivity, subjectivity and the overall polarity of the respective sentences. Layer 2 is concerned with polarity on the word- and phrase-level, annotating both subjective and factual language. The annotations on Layer 3 focus on the expression-level, denoting frames of private states such as objective and direct speech events. These three layers and their respective annotations are intended to be fully independent of each other. At the same time, exploring for and discovering interactions that may exist between different layers should also be possible. The reliability of the respective annotations was assessed using the average pairwise agreement and Fleiss' multi-rater measures. We believe that MLSA is a beneficial resource for sentiment analysis research, algorithms and applications that focus on the German language.", "keyphrases": ["multi-layered reference corpus", "german sentiment analysis", "subjectivity", "mlsa"]} +{"id": "joulin-etal-2018-loss", "title": "Loss in Translation: Learning Bilingual Word Mapping with a Retrieval Criterion", "abstract": "Continuous word representations learned separately on distinct languages can be aligned so that their words become comparable in a common space. Existing works typically solve a quadratic problem to learn a orthogonal matrix aligning a bilingual lexicon, and use a retrieval criterion for inference. In this paper, we propose an unified formulation that directly optimizes a retrieval criterion in an end-to-end fashion. Our experiments on standard benchmarks show that our approach outperforms the state of the art on word translation, with the biggest improvements observed for distant language pairs such as English-Chinese.", "keyphrases": ["retrieval criterion", "loss", "monolingual word embedding", "new objective function", "semantic space"]} +{"id": "white-etal-2017-inference", "title": "Inference is Everything: Recasting Semantic Resources into a Unified Evaluation Framework", "abstract": "We propose to unify a variety of existing semantic classification tasks, such as semantic role labeling, anaphora resolution, and paraphrase detection, under the heading of Recognizing Textual Entailment (RTE). We present a general strategy to automatically generate one or more sentential hypotheses based on an input sentence and pre-existing manual semantic annotations. The resulting suite of datasets enables us to probe a statistical RTE model's performance on different aspects of semantics. We demonstrate the value of this approach by investigating the behavior of a popular neural network RTE model.", "keyphrases": ["anaphora resolution", "nli", "natural language inference"]} +{"id": "rodriguez-etal-2008-arabic", "title": "Arabic WordNet: Semi-automatic Extensions using Bayesian Inference", "abstract": "This presentation focuses on the semi-automatic extension of Arabic WordNet (AWN) using lexical and morphological rules and applying Bayesian inference. We briefly report on the current status of AWN and propose a way of extending its coverage by taking advantage of a limited set of highly productive Arabic morphological rules for deriving a range of semantically related word forms from verb entries. The application of this set of rules, combined with the use of bilingual Arabic-English resources and Princeton\u0092s WordNet, allows the generation of a graph representing the semantic neighbourhood of the original word. In previous work, a set of associations between the hypothesized Arabic words and English synsets was proposed on the basis of this graph. Here, a novel approach to extending AWN is presented whereby a Bayesian Network is automatically built from the graph and then the net is used as an inferencing mechanism for scoring the set of candidate associations. Both on its own and in combination with the previous technique, this new approach has led to improved results.", "keyphrases": ["semi-automatic extension", "bayesian inference", "arabic wordnet"]} +{"id": "kim-etal-2019-pivot", "title": "Pivot-based Transfer Learning for Neural Machine Translation between Non-English Languages", "abstract": "We present effective pre-training strategies for neural machine translation (NMT) using parallel corpora involving a pivot language, i.e., source-pivot and pivot-target, leading to a significant improvement in source-target translation. We propose three methods to increase the relation among source, pivot, and target languages in the pre-training: 1) step-wise training of a single model for different language pairs, 2) additional adapter component to smoothly connect pre-trained encoder and decoder, and 3) cross-lingual encoder training via autoencoding of the pivot language. Our methods greatly outperform multilingual models up to +2.6% BLEU in WMT 2019 French-German and German-Czech tasks. We show that our improvements are valid also in zero-shot/zero-resource scenarios.", "keyphrases": ["transfer learning", "neural machine translation", "pivot language", "resource language pair"]} +{"id": "raghu-etal-2019-disentangling", "title": "Disentangling Language and Knowledge in Task-Oriented Dialogs", "abstract": "The Knowledge Base (KB) used for real-world applications, such as booking a movie or restaurant reservation, keeps changing over time. End-to-end neural networks trained for these task-oriented dialogs are expected to be immune to any changes in the KB. However, existing approaches breakdown when asked to handle such changes. We propose an encoder-decoder architecture (BoSsNet) with a novel Bag-of-Sequences (BoSs) memory, which facilitates the disentangled learning of the response's language model and its knowledge incorporation. Consequently, the KB can be modified with new knowledge without a drop in interpretability. We find that BoSsNeT outperforms state-of-the-art models, with considerable improvements (10%) on bAbI OOV test sets and other human-human datasets. We also systematically modify existing datasets to measure disentanglement and show BoSsNeT to be robust to KB modifications.", "keyphrases": ["task-oriented dialog", "language model", "knowledge incorporation"]} +{"id": "marcheggiani-titov-2016-discrete", "title": "Discrete-State Variational Autoencoders for Joint Discovery and Factorization of Relations", "abstract": "We present a method for unsupervised open-domain relation discovery. In contrast to previous (mostly generative and agglomerative clustering) approaches, our model relies on rich contextual features and makes minimal independence assumptions. The model is composed of two parts: a feature-rich relation extractor, which predicts a semantic relation between two entities, and a factorization model, which reconstructs arguments (i.e., the entities) relying on the predicted relation. The two components are estimated jointly so as to minimize errors in recovering arguments. We study factorization models inspired by previous work in relation factorization and selectional preference modeling. Our models substantially outperform the generative and agglomerative-clustering counterparts and achieve state-of-the-art performance.", "keyphrases": ["variational autoencoder", "relation discovery", "vae"]} +{"id": "gan-etal-2021-towards", "title": "Towards Robustness of Text-to-SQL Models against Synonym Substitution", "abstract": "Recently, there has been significant progress in studying neural networks to translate text descriptions into SQL queries. Despite achieving good performance on some public benchmarks, existing text-to-SQL models typically rely on the lexical matching between words in natural language (NL) questions and tokens in table schemas, which may render the models vulnerable to attacks that break the schema linking mechanism. In this work, we investigate the robustness of text-to-SQL models to synonym substitution. In particular, we introduce Spider-Syn, a human-curated dataset based on the Spider benchmark for text-to-SQL translation. NL questions in Spider-Syn are modified from Spider, by replacing their schema-related words with manually selected synonyms that reflect real-world question paraphrases. We observe that the accuracy dramatically drops by eliminating such explicit correspondence between NL questions and table schemas, even if the synonyms are not adversarially selected to conduct worst-case attacks. Finally, we present two categories of approaches to improve the model robustness. The first category of approaches utilizes additional synonym annotations for table schemas by modifying the model input, while the second category is based on adversarial training. We demonstrate that both categories of approaches significantly outperform their counterparts without the defense, and the first category of approaches are more effective.", "keyphrases": ["text-to-sql model", "synonym substitution", "schema-related word"]} +{"id": "xiao-etal-2012-niutrans", "title": "NiuTrans: An Open Source Toolkit for Phrase-based and Syntax-based Machine Translation", "abstract": "We present a new open source toolkit for phrase-based and syntax-based machine translation. The toolkit supports several state-of-the-art models developed in statistical machine translation, including the phrase-based model, the hierachical phrase-based model, and various syntax-based models. The key innovation provided by the toolkit is that the decoder can work with various grammars and offers different choices of decoding algrithms, such as phrase-based decoding, decoding as parsing/tree-parsing and forest-based decoding. Moreover, several useful utilities were distributed with the toolkit, including a discriminative reordering model, a simple and fast language model, and an implementation of minimum error rate training for weight tuning.", "keyphrases": ["open source toolkit", "syntax-based machine translation", "niutran"]} +{"id": "rottger-pierrehumbert-2021-temporal-adaptation", "title": "Temporal Adaptation of BERT and Performance on Downstream Document Classification: Insights from Social Media", "abstract": "Language use differs between domains and even within a domain, language use changes over time. For pre-trained language models like BERT, domain adaptation through continued pre-training has been shown to improve performance on in-domain downstream tasks. In this article, we investigate whether temporal adaptation can bring additional benefits. For this purpose, we introduce a corpus of social media comments sampled over three years. It contains unlabelled data for adaptation and evaluation on an upstream masked language modelling task as well as labelled data for fine-tuning and evaluation on a downstream document classification task. We find that temporality matters for both tasks: temporal adaptation improves upstream and temporal fine-tuning downstream task performance. Time-specific models generally perform better on past than on future test sets, which matches evidence on the bursty usage of topical words. However, adapting BERT to time and domain does not improve performance on the downstream task over only adapting to domain. Token-level analysis shows that temporal adaptation captures event-driven changes in language use in the downstream task, but not those changes that are actually relevant to task performance. Based on our findings, we discuss when temporal adaptation may be more effective.", "keyphrases": ["bert", "downstream task", "temporal adaptation", "upto-date corpora"]} +{"id": "mehryary-etal-2016-deep", "title": "Deep Learning with Minimal Training Data: TurkuNLP Entry in the BioNLP Shared Task 2016", "abstract": "We present the TurkuNLP entry to the BioNLP Shared Task 2016 Bacteria Biotopes event extraction (BB3-event) subtask. We propose a deep learning-based approach to event extraction using a combination of several Long Short-Term Memory (LSTM) networks over syntactic dependency graphs. Features for the proposed neural network are generated based on the shortest path connecting the two candidate entities in the dependency graph. We further detail how this network can be ef\ufb01ciently trained to have good generalization performance even when only a very limited number of training examples are available and part-of-speech (POS) and dependency type feature representations must be learned from scratch. Our method ranked second among the entries to the shared task, achieving an F-score of 52.1% with 62.3% precision and 44.8% re-call.", "keyphrases": ["entry", "bionlp shared task", "deep learning"]} +{"id": "tran-etal-2010-treematch", "title": "TreeMatch: A Fully Unsupervised WSD System Using Dependency Knowledge on a Specific Domain", "abstract": "Word sense disambiguation (WSD) is one of the main challenges in Computational Linguistics. TreeMatch is a WSD system originally developed using data from SemEval 2007 Task 7 (Coarse-grained English All-words Task) that has been adapted for use in SemEval 2010 Task 17 (All-words Word Sense Disambiguation on a Specific Domain). The system is based on a fully unsupervised method using dependency knowledge drawn from a domain specific knowledge base that was built for this task. When evaluated on the task, the system precision performs above the First Sense Baseline.", "keyphrases": ["wsd system", "dependency knowledge", "treematch"]} +{"id": "srivastava-etal-2018-zero", "title": "Zero-shot Learning of Classifiers from Natural Language Quantification", "abstract": "Humans can efficiently learn new concepts using language. We present a framework through which a set of explanations of a concept can be used to learn a classifier without access to any labeled examples. We use semantic parsing to map explanations to probabilistic assertions grounded in latent class labels and observed attributes of unlabeled data, and leverage the differential semantics of linguistic quantifiers (e.g., `usually' vs `always') to drive model training. Experiments on three domains show that the learned classifiers outperform previous approaches for learning with limited data, and are comparable with fully supervised classifiers trained from a small number of labeled examples.", "keyphrases": ["quantifier", "zero-shot learning", "semantic parser"]} +{"id": "shimbo-hara-2007-discriminative", "title": "A Discriminative Learning Model for Coordinate Conjunctions", "abstract": "We propose a sequence-alignment based method for detecting and disambiguatingcoordinate conjunctions. In this method, averaged perceptron learning is used to adapt the substitution matrix to the training data drawn from the target language and domain. To reduce the cost of training data construction, our method accepts training examples in which complete word-by-word alignment labels are missing, but instead only the boundaries of coordinated conjuncts are marked. We report promising empirical results in detecting and disambiguating coordinated noun phrases in the GENIA corpus, despite a relatively small number of training examples and minimal features are employed.", "keyphrases": ["discriminative learning model", "conjunct", "coordination disambiguation", "coordinate structure", "alignment-based method"]} +{"id": "morgan-etal-2022-isl", "title": "ISL-LEX v.1: An Online Lexical Resource of Israeli Sign Language", "abstract": "This paper describes a new online lexical resource and interactive tool for Israeli Sign Language, ISL-LEX v.1. The dataset contains 961 non-compound ISL signs with the following information: subjective frequency ratings from native signers, iconicity ratings from native and non-native signers (presented separately), and phonological properties in six domains. The selection of signs was also designed to reflect a broad distinction between those signs acquired early in childhood and those acquired later. ISL-LEX is an online interface built using the SIGN-LEX visualization (Caselli et al. 2022), and is intended for use by researchers, educators, and students. It is therefore offered in two text-based versions, English and Hebrew, with video instructions in ISL.", "keyphrases": ["online lexical resource", "israeli sign language", "isl-lex v.1"]} +{"id": "bunescu-2008-learning", "title": "Learning with Probabilistic Features for Improved Pipeline Models", "abstract": "We present a novel learning framework for pipeline models aimed at improving the communication between consecutive stages in a pipeline. Our method exploits the confidence scores associated with outputs at any given stage in a pipeline in order to compute probabilistic features used at other stages downstream. We describe a simple method of integrating probabilistic features into the linear scoring functions used by state of the art machine learning algorithms. Experimental evaluation on dependency parsing and named entity recognition demonstrate the superiority of our approach over the baseline pipeline models, especially when upstream stages in the pipeline exhibit low accuracy.", "keyphrases": ["probabilistic feature", "pipeline model", "weight"]} +{"id": "espla-etal-2019-paracrawl", "title": "ParaCrawl: Web-scale parallel corpora for the languages of the EU", "abstract": "We describe two projects funded by the Connecting Europe Facility, Provision of Web-Scale Parallel Corpora for Of\ufb01cial European Languages (2016-EU-IA-0114, completed) and Broader Web-Scale Pro-vision of Parallel Corpora for European Languages (2017-EU-IA-0178, ongoing), which aim at harvesting parallel corpora from the Internet for languages used in the European Union. In addition to parallel corpora, the project releases successive versions of the free/open-source web crawling software used.", "keyphrases": ["web-scale parallel corpora", "web", "paracrawl"]} +{"id": "rozovskaya-roth-2013-joint", "title": "Joint Learning and Inference for Grammatical Error Correction", "abstract": "State-of-the-art systems for grammatical error correction are based on a collection of independently-trained models for specific errors. Such models ignore linguistic interactions at the sentence level and thus do poorly on mistakes that involve grammatical dependencies among several words. In this paper, we identify linguistic structures with interacting grammatical properties and propose to address such dependencies via joint inference and joint learning. We show that it is possible to identify interactions well enough to facilitate a joint approach and, consequently, that joint methods correct incoherent predictions that independentlytrained classifiers tend to produce. Furthermore, because the joint learning model considers interacting phenomena during training, it is able to identify mistakes that require making multiple changes simultaneously and that standard approaches miss. Overall, our model significantly outperforms the Illinois system that placed first in the CoNLL-2013 shared task on grammatical error correction.", "keyphrases": ["grammatical error correction", "such dependency", "illinois system", "joint learning", "linear programming"]} +{"id": "guo-etal-2018-effective", "title": "Effective Parallel Corpus Mining using Bilingual Sentence Embeddings", "abstract": "This paper presents an effective approach for parallel corpus mining using bilingual sentence embeddings. Our embedding models are trained to produce similar representations exclusively for bilingual sentence pairs that are translations of each other. This is achieved using a novel training method that introduces hard negatives consisting of sentences that are not translations but have some degree of semantic similarity. The quality of the resulting embeddings are evaluated on parallel corpus reconstruction and by assessing machine translation systems trained on gold vs. mined sentence pairs. We find that the sentence embeddings can be used to reconstruct the United Nations Parallel Corpus (Ziemski et al., 2016) at the sentence-level with a precision of 48.9% for en-fr and 54.9% for en-es. When adapted to document-level matching, we achieve a parallel document matching accuracy that is comparable to the significantly more computationally intensive approach of Uszkoreit et al. (2010). Using reconstructed parallel data, we are able to train NMT models that perform nearly as well as models trained on the original data (within 1-2 BLEU).", "keyphrases": ["parallel corpus mining", "sentence embedding", "semantic similarity", "different language"]} +{"id": "dale-kilgarriff-2010-helping", "title": "Helping Our Own: Text Massaging for Computational Linguistics as a New Shared Task", "abstract": "In this paper, we propose a new shared task called HOO: Helping Our Own. The aim is to use tools and techniques developed in computational linguistics to help people writing about computational linguistics. We describe a text-to-text generation scenario that poses challenging research questions, and delivers practical outcomes that are useful in the first case to our own community and potentially much more widely. Two specific factors make us optimistic that this task will generate useful outcomes: one is the availability of the ACL Anthology, a large corpus of the target text type; the other is that CL researchers who are non-native speakers of English will be motivated to use prototype systems, providing informed and precise feedback in large quantity. We lay out our plans in detail and invite comment and critique with the aim of improving the nature of the planned exercise.", "keyphrases": ["writing", "non-native speaker", "grammatical error"]} +{"id": "avraham-goldberg-2016-improving", "title": "Improving Reliability of Word Similarity Evaluation by Redesigning Annotation Task and Performance Measure", "abstract": "We suggest a new method for creating and using gold-standard datasets for word similarity evaluation. Our goal is to improve the reliability of the evaluation, and we do this by redesigning the annotation task to achieve higher inter-rater agreement, and by defining a performance measure which takes the reliability of each annotation decision in the dataset into account.", "keyphrases": ["reliability", "word similarity evaluation", "annotation task"]} +{"id": "kotonya-toni-2020-explainable-automated", "title": "Explainable Automated Fact-Checking for Public Health Claims", "abstract": "Fact-checking is the task of verifying the veracity of claims by assessing their assertions against credible evidence. The vast majority of fact-checking studies focus exclusively on political claims. Very little research explores fact-checking for other topics, specifically subject matters for which expertise is required. We present the first study of explainable fact-checking for claims which require specific expertise. For our case study we choose the setting of public health. To support this case study we construct a new dataset PUBHEALTH of 11.8K claims accompanied by journalist crafted, gold standard explanations (i.e., judgments) to support the fact-check labels for claims. We explore two tasks: veracity prediction and explanation generation. We also define and evaluate, with humans and computationally, three coherence properties of explanation quality. Our results indicate that, by training on in-domain data, gains can be made in explainable, automated fact-checking for claims which require specific expertise.", "keyphrases": ["automated fact-checking", "public health claim", "claim", "pubhealth", "explanation quality"]} +{"id": "buck-etal-2014-n", "title": "N-gram Counts and Language Models from the Common Crawl", "abstract": "We contribute 5-gram counts and language models trained on the Common Crawl corpus, a collection over 9 billion web pages. This release improves upon the Google n-gram counts in two key ways: the inclusion of low-count entries and deduplication to reduce boilerplate. By preserving singletons, we were able to use Kneser-Ney smoothing to build large language models. This paper describes how the corpus was processed with emphasis on the problems that arise in working with data at this scale. Our unpruned Kneser-Ney English 5-gram language model, built on 975 billion deduplicated tokens, contains over 500 billion unique n-grams. We show gains of 0.5-1.4 BLEU by using large language models to translate into various languages.", "keyphrases": ["language model", "common crawl", "user-generated content"]} +{"id": "li-sporleder-2010-using", "title": "Using Gaussian Mixture Models to Detect Figurative Language in Context", "abstract": "We present a Gaussian Mixture model for detecting different types of figurative language in context. We show that this model performs well when the parameters are estimated in an unsupervised fashion using EM. Performance can be improved further by estimating the parameters from a small annotated data set.", "keyphrases": ["gaussian mixture models", "figurative language", "cohesion", "tokens", "idiom recognition"]} +{"id": "li-etal-2020-docbank", "title": "DocBank: A Benchmark Dataset for Document Layout Analysis", "abstract": "Document layout analysis usually relies on computer vision models to understand documents while ignoring textual information that is vital to capture. Meanwhile, high quality labeled datasets with both visual and textual information are still insufficient. In this paper, we present DocBank, a benchmark dataset that contains 500K document pages with fine-grained token-level annotations for document layout analysis. DocBank is constructed using a simple yet effective way with weak supervision from the LaTeX documents available on the arXiv.com. With DocBank, models from different modalities can be compared fairly and multi-modal approaches will be further investigated and boost the performance of document layout analysis. We build several strong baselines and manually split train/dev/test sets for evaluation. Experiment results show that models trained on DocBank accurately recognize the layout information for a variety of documents. The DocBank dataset is publicly available at .", "keyphrases": ["benchmark dataset", "document layout analysis", "docbank"]} +{"id": "meyer-etal-2011-multilingual", "title": "Multilingual Annotation and Disambiguation of Discourse Connectives for Machine Translation", "abstract": "Many discourse connectives can signal several types of relations between sentences. Their automatic disambiguation, i.e. the labeling of the correct sense of each occurrence, is important for discourse parsing, but could also be helpful to machine translation. We describe new approaches for improving the accuracy of manual annotation of three discourse connectives (two English, one French) by using parallel corpora. An appropriate set of labels for each connective can be found using information from their translations. Our results for automatic disambiguation are state-of-the-art, at up to 85% accuracy using surface features. Using feature analysis, contextual features are shown to be useful across languages and connectives.", "keyphrases": ["disambiguation", "discourse connective", "machine translation"]} +{"id": "artetxe-etal-2020-cross", "title": "On the Cross-lingual Transferability of Monolingual Representations", "abstract": "State-of-the-art unsupervised multilingual models (e.g., multilingual BERT) have been shown to generalize in a zero-shot cross-lingual setting. This generalization ability has been attributed to the use of a shared subword vocabulary and joint training across multiple languages giving rise to deep multilingual abstractions. We evaluate this hypothesis by designing an alternative approach that transfers a monolingual model to new languages at the lexical level. More concretely, we first train a transformer-based masked language model on one language, and transfer it to a new language by learning a new embedding matrix with the same masked language modeling objective, freezing parameters of all other layers. This approach does not rely on a shared vocabulary or joint training. However, we show that it is competitive with multilingual BERT on standard cross-lingual classification benchmarks and on a new Cross-lingual Question Answering Dataset (XQuAD). Our results contradict common beliefs of the basis of the generalization ability of multilingual models and suggest that deep monolingual models learn some abstractions that generalize across languages. We also release XQuAD as a more comprehensive cross-lingual benchmark, which comprises 240 paragraphs and 1190 question-answer pairs from SQuAD v1.1 translated into ten languages by professional translators.", "keyphrases": ["cross-lingual transferability", "language model", "paragraph", "professional translator"]} +{"id": "oco-roxas-2018-survey", "title": "A Survey of Machine Translation Work in the Philippines: From 1998 to 2018", "abstract": "In this paper, we present a survey covering the last 20 years of machine translation work in the Philippines. We detail the various approaches used and innovations applied. We also discuss the various mechanisms and support that keep the MT community thriving, as well as the challenges ahead.", "keyphrases": ["survey", "machine translation work", "philippines"]} +{"id": "shrestha-mckeown-2004-detection", "title": "Detection of Question-Answer Pairs in Email Conversations", "abstract": "While sentence extraction as an approach to summarization has been shown to work in documents of certain genres, because of the conversational nature of email communication where utterances are made in relation to one made previously, sentence extraction may not capture the necessary segments of dialogue that would make a summary coherent. In this paper, we present our work on the detection of question-answer pairs in an email conversation for the task of email summarization. We show that various features based on the structure of email-threads can be used to improve upon lexical similarity of discourse segments for question-answer pairing.", "keyphrases": ["question-answer pair", "email conversation", "email summarization"]} +{"id": "isonuma-etal-2017-extractive", "title": "Extractive Summarization Using Multi-Task Learning with Document Classification", "abstract": "The need for automatic document summarization that can be used for practical applications is increasing rapidly. In this paper, we propose a general framework for summarization that extracts sentences from a document using externally related information. Our work is aimed at single document summarization using small amounts of reference summaries. In particular, we address document summarization in the framework of multi-task learning using curriculum learning for sentence extraction and document classification. The proposed framework enables us to obtain better feature representations to extract sentences from documents. We evaluate our proposed summarization method on two datasets: financial report and news corpus. Experimental results demonstrate that our summarizers achieve performance that is comparable to state-of-the-art systems.", "keyphrases": ["summarization", "multi-task learning", "document classification"]} +{"id": "isozaki-etal-2010-automatic", "title": "Automatic Evaluation of Translation Quality for Distant Language Pairs", "abstract": "Automatic evaluation of Machine Translation (MT) quality is essential to developing high-quality MT systems. Various evaluation metrics have been proposed, and BLEU is now used as the de facto standard metric. However, when we consider translation between distant language pairs such as Japanese and English, most popular metrics (e.g., BLEU, NIST, PER, and TER) do not work well. It is well known that Japanese and English have completely different word orders, and special care must be paid to word order in translation. Otherwise, translations with wrong word order often lead to misunderstanding and incomprehensibility. For instance, SMT-based Japanese-to-English translators tend to translate 'A because B' as 'B because A.' Thus, word order is the most important problem for distant language translation. However, conventional evaluation metrics do not significantly penalize such word order mistakes. Therefore, locally optimizing these metrics leads to inadequate translations. In this paper, we propose an automatic evaluation metric based on rank correlation coefficients modified with precision. Our meta-evaluation of the NTCIR-7 PATMT JE task data shows that this metric outperforms conventional metrics.", "keyphrases": ["translation quality", "distant language pair", "word order", "conventional metric", "automatic evaluation"]} +{"id": "malmasi-dras-2015-language", "title": "Language Identification using Classifier Ensembles", "abstract": "In this paper we describe the language identification system we developed for the Discriminating Similar Languages (DSL) 2015 shared task. We constructed a classifier ensemble composed of several Support Vector Machine (SVM) base classifiers, each trained on a single feature type. Our feature types include character 1\u20136 grams and word unigrams and bigrams. Using this system we were able to outperform the other entries in the closed training track of the DSL 2015 shared task, achieving the best accuracy of 95.54%.", "keyphrases": ["ensemble", "feature type", "language identification"]} +{"id": "quirk-etal-2005-dependency", "title": "Dependency Treelet Translation: Syntactically Informed Phrasal SMT", "abstract": "We describe a novel approach to statistical machine translation that combines syntactic information in the source language with recent advances in phrasal translation. This method requires a source-language dependency parser, target language word segmentation and an unsupervised word alignment component. We align a parallel corpus, project the source dependency parse onto the target sentence, extract dependency treelet translation pairs, and train a tree-based ordering model. We describe an efficient decoder and show that using these tree-based models in combination with conventional SMT models provides a promising approach that incorporates the power of phrasal SMT with the linguistic generality available in a parser.", "keyphrases": ["phrasal smt", "generalization", "dependency treelet translation", "smt system", "source side"]} +{"id": "welbl-etal-2018-constructing", "title": "Constructing Datasets for Multi-hop Reading Comprehension Across Documents", "abstract": "Most Reading Comprehension methods limit themselves to queries which can be answered using a single sentence, paragraph, or document. Enabling models to combine disjoint pieces of textual evidence would extend the scope of machine comprehension methods, but currently no resources exist to train and test this capability. We propose a novel task to encourage the development of models for text understanding across multiple documents and to investigate the limits of existing methods. In our task, a model learns to seek and combine evidence \u2014 effectively performing multihop, alias multi-step, inference. We devise a methodology to produce datasets for this task, given a collection of query-answer pairs and thematically linked documents. Two datasets from different domains are induced, and we identify potential pitfalls and devise circumvention strategies. We evaluate two previously proposed competitive models and find that one can integrate information across documents. However, both models struggle to select relevant information; and providing documents guaranteed to be relevant greatly improves their performance. While the models outperform several strong baselines, their best accuracy reaches 54.5% on an annotated test set, compared to human performance at 85.0%, leaving ample room for improvement.", "keyphrases": ["comprehension", "textual evidence", "multiple document"]} +{"id": "he-etal-2017-deep", "title": "Deep Semantic Role Labeling: What Works and What's Next", "abstract": "We introduce a new deep learning model for semantic role labeling (SRL) that significantly improves the state of the art, along with detailed analyses to reveal its strengths and limitations. We use a deep highway BiLSTM architecture with constrained decoding, while observing a number of recent best practices for initialization and regularization. Our 8-layer ensemble model achieves 83.2 F1 on theCoNLL 2005 test set and 83.4 F1 on CoNLL 2012, roughly a 10% relative error reduction over the previous state of the art. Extensive empirical analysis of these gains show that (1) deep models excel at recovering long-distance dependencies but can still make surprisingly obvious errors, and (2) that there is still room for syntactic parsers to improve these results.", "keyphrases": ["semantic role labeling", "neural srl model", "structured prediction task", "dependency srl", "output structure"]} +{"id": "shi-etal-2016-deep", "title": "Deep LSTM based Feature Mapping for Query Classification", "abstract": "Traditional convolutional neural network ( CNN ) based query classi\ufb01cation uses linear feature mapping in its convolution operation. The recurrent neural network ( RNN ), differs from a CNN in representing word sequence with their ordering information kept explicitly. We propose using a deep long-short-term-memory ( DLSTM ) based feature mapping to learn feature representation for CNN . The DLSTM , which is a stack of LSTM units, has different order of feature representations at different depth of LSTM unit. The bottom LSTM unit equipped with input and output gates, extracts the \ufb01rst order feature representation from current word. To extract higher order nonlinear feature representation, the LSTM unit at higher position gets input from two parts. First part is the lower LSTM unit\u2019s memory cell from previous word. Second part is the lower LSTM unit\u2019s hidden output from current word. In this way, the DLSTM captures the nonlinear nonconsecutive interaction within n -grams. Using an architecture that combines a stack of the DLSTM layers with a tradition CNN layer, we have observed new state-of-the-art query classi\ufb01cation accuracy on benchmark data sets for query classi\ufb01cation.", "keyphrases": ["feature mapping", "query classification", "cnn", "deep lstm"]} +{"id": "huang-etal-2017-moodswipe", "title": "MoodSwipe: A Soft Keyboard that Suggests MessageBased on User-Specified Emotions", "abstract": "We present MoodSwipe, a soft keyboard that suggests text messages given the user-specified emotions utilizing the real dialog data. The aim of MoodSwipe is to create a convenient user interface to enjoy the technology of emotion classification and text suggestion, and at the same time to collect labeled data automatically for developing more advanced technologies. While users select the MoodSwipe keyboard, they can type as usual but sense the emotion conveyed by their text and receive suggestions for their message as a benefit. In MoodSwipe, the detected emotions serve as the medium for suggested texts, where viewing the latter is the incentive to correcting the former. We conduct several experiments to show the superiority of the emotion classification models trained on the dialog data, and further to verify good emotion cues are important context for text suggestion.", "keyphrases": ["soft keyboard", "user-specified emotion", "moodswipe"]} +{"id": "tu-etal-2020-engine", "title": "ENGINE: Energy-Based Inference Networks for Non-Autoregressive Machine Translation", "abstract": "We propose to train a non-autoregressive machine translation model to minimize the energy defined by a pretrained autoregressive model. In particular, we view our non-autoregressive translation system as an inference network (Tu and Gimpel, 2018) trained to minimize the autoregressive teacher energy. This contrasts with the popular approach of training a non-autoregressive model on a distilled corpus consisting of the beam-searched outputs of such a teacher model. Our approach, which we call ENGINE (ENerGy-based Inference NEtworks), achieves state-of-the-art non-autoregressive results on the IWSLT 2014 DE-EN and WMT 2016 RO-EN datasets, approaching the performance of autoregressive models.", "keyphrases": ["energy-based inference networks", "non-autoregressive machine translation", "engine"]} +{"id": "kreutzer-etal-2017-bandit", "title": "Bandit Structured Prediction for Neural Sequence-to-Sequence Learning", "abstract": "Bandit structured prediction describes a stochastic optimization framework where learning is performed from partial feedback. This feedback is received in the form of a task loss evaluation to a predicted output structure, without having access to gold standard structures. We advance this framework by lifting linear bandit learning to neural sequence-to-sequence learning problems using attention-based recurrent neural networks. Furthermore, we show how to incorporate control variates into our learning algorithms for variance reduction and improved generalization. We present an evaluation on a neural machine translation task that shows improvements of up to 5.89 BLEU points for domain adaptation from simulated bandit feedback.", "keyphrases": ["structured prediction", "stochastic optimization framework", "bandit feedback"]} +{"id": "bansal-etal-2020-learning", "title": "Learning to Few-Shot Learn Across Diverse Natural Language Classification Tasks", "abstract": "Pre-trained transformer models have shown enormous success in improving performance on several downstream tasks. However, fine-tuning on a new task still requires large amounts of task-specific labeled data to achieve good performance. We consider this problem of learning to generalize to new tasks, with a few examples, as a meta-learning problem. While meta-learning has shown tremendous progress in recent years, its application is still limited to simulated problems or problems with limited diversity across tasks. We develop a novel method, LEOPARD, which enables optimization-based meta-learning across tasks with a different number of classes, and evaluate different methods on generalization to diverse NLP classification tasks. LEOPARD is trained with the state-of-the-art transformer architecture and shows better generalization to tasks not seen at all during training, with as few as 4 examples per label. Across 17 NLP tasks, including diverse domains of entity typing, natural language inference, sentiment analysis, and several other text classification tasks, we show that LEOPARD learns better initial parameters for few-shot learning than self-supervised pre-training or multi-task training, outperforming many strong baselines, for example, yielding 14.6% average relative gain in accuracy on unseen tasks with only 4 examples per label.", "keyphrases": ["few-shot learning", "language inference", "sentiment analysis", "maml", "diverse nlp task"]} +{"id": "miura-etal-2014-teamx", "title": "TeamX: A Sentiment Analyzer with Enhanced Lexicon Mapping and Weighting Scheme for Unbalanced Data", "abstract": "This paper describes the system that has been used by TeamX in SemEval-2014 Task 9 Subtask B. The system is a sentiment analyzer based on a supervised text categorization approach designed with following two concepts. Firstly, since lexicon features were shown to be effective in SemEval-2013 Task 2, various lexicons and pre-processors for them are introduced to enhance lexical information. Secondly, since a distribution of sentiment on tweets is known to be unbalanced, an weighting scheme is introduced to bias an output of a machine learner. For the test run, the system was tuned towards Twitter texts and successfully achieved high scoring results on Twitter data, average F1 70.96 on Twitter2014 and average F1 56.50 on Twitter2014Sarcasm.", "keyphrases": ["sentiment analyzer", "weighting scheme", "machine learner"]} +{"id": "pedler-mitton-2010-large", "title": "A Large List of Confusion Sets for Spellchecking Assessed Against a Corpus of Real-word Errors", "abstract": "One of the methods that has been proposed for dealing with real-word errors (errors that occur when a correctly spelled word is substituted for the one intended) is the \u201d\u201cconfusion-set\u201d\u201d approach - a confusion set being a small group of words that are likely to be confused with one another. Using a list of confusion sets drawn up in advance, a spellchecker, on finding one of these words in a text, can assess whether one of the other members of its set would be a better fit and, if it appears to be so, propose that word as a correction. Much of the research using this approach has suffered from two weaknesses. The first is the small number of confusion sets used. The second is that systems have largely been tested on artificial errors. In this paper we address these two weaknesses. We describe the creation of a realistically sized list of confusion sets, then the assembling of a corpus of real-word errors, and then we assess the potential of that list in relation to that corpus.", "keyphrases": ["list", "confusion set", "real-word error"]} +{"id": "turney-2006-expressing", "title": "Expressing Implicit Semantic Relations without Supervision", "abstract": "We present an unsupervised learning algorithm that mines large text corpora for patterns that express implicit semantic relations. For a given input word pair X:Y with some unspecified semantic relations, the corresponding output list of patterns (P1,..., Pm) is ranked according to how well each pattern Pi expresses the relations between X and Y. For example, given X = ostrich and Y = bird, the two highest ranking output patterns are \"X is the largest Y\" and \"Y such as the X\". The output patterns are intended to be useful for finding further pairs with the same relations, to support the construction of lexicons, ontologies, and semantic networks. The patterns are sorted by pertinence, where the pertinence of a pattern Pi for a word pair X:Y is the expected relational similarity between the given pair and typical pairs for Pi. The algorithm is empirically evaluated on two tasks, solving multiple-choice SAT word analogy questions and classifying semantic relations in noun-modifier pairs. On both tasks, the algorithm achieves state-of-the-art results, performing significantly better than several alternative pattern ranking algorithms, based on tf-idf.", "keyphrases": ["semantic relation", "web", "unsupervised algorithm"]} +{"id": "chairatanakul-etal-2021-cross-lingual", "title": "Cross-lingual Transfer for Text Classification with Dictionary-based Heterogeneous Graph", "abstract": "In cross-lingual text classification, it is required that task-specific training data in high-resource source languages are available, where the task is identical to that of a low-resource target language. However, collecting such training data can be infeasible because of the labeling cost, task characteristics, and privacy concerns. This paper proposes an alternative solution that uses only task-independent word embeddings of high-resource languages and bilingual dictionaries. First, we construct a dictionary-based heterogeneous graph (DHG) from bilingual dictionaries. This opens the possibility to use graph neural networks for cross-lingual transfer. The remaining challenge is the heterogeneity of DHG because multiple languages are considered. To address this challenge, we propose dictionary-based heterogeneous graph neural network (DHGNet) that effectively handles the heterogeneity of DHG by two-step aggregations, which are word-level and language-level aggregations. Experimental results demonstrate that our method outperforms pretrained models even though it does not access to large corpora. Furthermore, it can perform well even though dictionaries contain many incorrect translations. Its robustness allows the usage of a wider range of dictionaries such as an automatically constructed dictionary and crowdsourced dictionary, which are convenient for real-world applications.", "keyphrases": ["text classification", "heterogeneous graph", "cross-lingual transfer"]} +{"id": "cao-etal-2017-parsing", "title": "Parsing to 1-Endpoint-Crossing, Pagenumber-2 Graphs", "abstract": "We study the Maximum Subgraph problem in deep dependency parsing. We consider two restrictions to deep dependency graphs: (a) 1-endpoint-crossing and (b) pagenumber-2. Our main contribution is an exact algorithm that obtains maximum subgraphs satisfying both restrictions simultaneously in time O(n5). Moreover, ignoring one linguistically-rare structure descreases the complexity to O(n4). We also extend our quartic-time algorithm into a practical parser with a discriminative disambiguation model and evaluate its performance on four linguistic data sets used in semantic dependency parsing.", "keyphrases": ["1-endpoint-crossing", "pagenumber-2", "dependency parsing", "appropriate graph class"]} +{"id": "dozat-manning-2018-simpler", "title": "Simpler but More Accurate Semantic Dependency Parsing", "abstract": "While syntactic dependency annotations concentrate on the surface or functional structure of a sentence, semantic dependency annotations aim to capture between-word relationships that are more closely related to the meaning of a sentence, using graph-structured representations. We extend the LSTM-based syntactic parser of Dozat and Manning (2017) to train on and generate these graph structures. The resulting system on its own achieves state-of-the-art performance, beating the previous, substantially more complex state-of-the-art system by 0.6% labeled F1. Adding linguistically richer input representations pushes the margin even higher, allowing us to beat it by 1.9% labeled F1.", "keyphrases": ["dependency parsing", "arc", "bilstms", "second-order information"]} +{"id": "hasan-etal-2007-large", "title": "Are Very Large N-Best Lists Useful for SMT?", "abstract": "This paper describes an efficient method to extract large n-best lists from a word graph produced by a statistical machine translation system. The extraction is based on the k shortest paths algorithm which is efficient even for very large k. We show that, although we can generate large amounts of distinct translation hypotheses, these numerous candidates are not able to significantly improve overall system performance. We conclude that large n-best lists would benefit from better discriminating models.", "keyphrases": ["n-b list", "translation hypothesis", "different size"]} +{"id": "vilnis-etal-2018-probabilistic", "title": "Probabilistic Embedding of Knowledge Graphs with Box Lattice Measures", "abstract": "Embedding methods which enforce a partial order or lattice structure over the concept space, such as Order Embeddings (OE), are a natural way to model transitive relational data (e.g. entailment graphs). However, OE learns a deterministic knowledge base, limiting expressiveness of queries and the ability to use uncertainty for both prediction and learning (e.g. learning from expectations). Probabilistic extensions of OE have provided the ability to somewhat calibrate these denotational probabilities while retaining the consistency and inductive bias of ordered models, but lack the ability to model the negative correlations found in real-world knowledge. In this work we show that a broad class of models that assign probability measures to OE can never capture negative correlation, which motivates our construction of a novel box lattice and accompanying probability measure to capture anti-correlation and even disjoint concepts, while still providing the benefits of probabilistic modeling, such as the ability to perform rich joint and conditional queries over arbitrary sets of concepts, and both learning from and predicting calibrated uncertainty. We show improvements over previous approaches in modeling the Flickr and WordNet entailment graphs, and investigate the power of the model.", "keyphrases": ["lattice structure", "probability measure", "box embedding"]} +{"id": "song-etal-2018-structure", "title": "Structure-Infused Copy Mechanisms for Abstractive Summarization", "abstract": "Seq2seq learning has produced promising results on summarization. However, in many cases, system summaries still struggle to keep the meaning of the original intact. They may miss out important words or relations that play critical roles in the syntactic structure of source sentences. In this paper, we present structure-infused copy mechanisms to facilitate copying important words and relations from the source sentence to summary sentence. The approach naturally combines source dependency structure with the copy mechanism of an abstractive sentence summarizer. Experimental results demonstrate the effectiveness of incorporating source-side syntactic information in the system, and our proposed approach compares favorably to state-of-the-art methods.", "keyphrases": ["abstractive summarization", "structure-infused copy mechanism", "detail"]} +{"id": "pratapa-etal-2018-language", "title": "Language Modeling for Code-Mixing: The Role of Linguistic Theory based Synthetic Data", "abstract": "Training language models for Code-mixed (CM) language is known to be a difficult problem because of lack of data compounded by the increased confusability due to the presence of more than one language. We present a computational technique for creation of grammatically valid artificial CM data based on the Equivalence Constraint Theory. We show that when training examples are sampled appropriately from this synthetic data and presented in certain order (aka training curriculum) along with monolingual and real CM data, it can significantly reduce the perplexity of an RNN-based language model. We also show that randomly generated CM data does not help in decreasing the perplexity of the LMs.", "keyphrases": ["linguistic theory", "synthetic data", "code-mixed data"]} +{"id": "camacho-collados-etal-2015-nasari", "title": "NASARI: a Novel Approach to a Semantically-Aware Representation of Items", "abstract": "The semantic representation of individual word senses and concepts is of fundamental importance to several applications in Natural Language Processing. To date, concept modeling techniques have in the main based their representation either on lexicographic resources, such as WordNet, or on encyclopedic resources, such as Wikipedia. We propose a vector representation technique that combines the complementary knowledge of both these types of resource. Thanks to its use of explicit semantics combined with a novel cluster-based dimensionality reduction and an effective weighting scheme, our representation attains state-of-the-art performance on multiple datasets in two standard benchmarks: word similarity and sense clustering. We are releasing our vector representations at http://lcl.uniroma1.it/nasari/.", "keyphrases": ["wikipedia", "nasari", "sense inventory"]} +{"id": "strassel-etal-2006-integrated", "title": "Integrated Linguistic Resources for Language Exploitation Technologies", "abstract": "Linguistic Data Consortium has recently embarked on an effort to create integrated linguistic resources and related infrastructure for language exploitation technologies within the DARPA GALE (Global Autonomous Language Exploitation) Program. GALE targets an end-to-end system consisting of three major engines: Transcription, Translation and Distillation. Multilingual speech or text from a variety of genres is taken as input and English text is given as output, with information of interest presented in an integrated and consolidated fashion to the end user. GALE's goals require a quantum leap in the performance of human language technology, while also demanding solutions that are more intelligent, more robust, more adaptable, more efficient and more integrated. LDC has responded to this challenge with a comprehensive approach to linguistic resource development designed to support GALE's research and evaluation needs and to provide lasting resources for the larger Human Language Technology community.", "keyphrases": ["linguistic data consortium", "darpa gale", "translation agency"]} +{"id": "gaspari-hutchins-2007-online", "title": "Online and free! Ten years of online machine translation: origins, developments, current use and future prospects", "abstract": "Marking the ten-year anniversary of the launch of Babel Fish, the first ever free online machine translation (MT) service that went live on the Internet in late 1997, this paper sketches the background that led to its development, giving an account of its origins and of the early stages of its evolution. Several competitors have entered the field of web-based MT over the last decade, and the paper offers a review of the most significant contributions in the literature with a particular focus on two key issues: firstly, the role that these online MT tools have played in meeting the translation needs of the users, and secondly the impact that they have had on the MT-related industry and business. Information coming from a variety of sources, including data on current usage supplied by the online MT providers themselves for the purposes of this study, testifies to the massive increase in the use of the leading multilingual online MT services over the last ten years. On this basis, the conclusion assesses the future prospects of Internet-based MT.", "keyphrases": ["online machine translation", "origin", "future prospect"]} +{"id": "jadhav-rajan-2018-extractive", "title": "Extractive Summarization with SWAP-NET: Sentences and Words from Alternating Pointer Networks", "abstract": "We present a new neural sequence-to-sequence model for extractive summarization called SWAP-NET (Sentences and Words from Alternating Pointer Networks). Extractive summaries comprising a salient subset of input sentences, often also contain important key words. Guided by this principle, we design SWAP-NET that models the interaction of key words and salient sentences using a new two-level pointer network based architecture. SWAP-NET identifies both salient sentences and key words in an input document, and then combines them to form the extractive summary. Experiments on large scale benchmark corpora demonstrate the efficacy of SWAP-NET that outperforms state-of-the-art extractive summarizers.", "keyphrases": ["swap-net", "pointer networks", "extractive summarization"]} +{"id": "petrov-klein-2007-improved", "title": "Improved Inference for Unlexicalized Parsing", "abstract": "We present several improvements to unlexicalized parsing with hierarchically state-split PCFGs. First, we present a novel coarse-to-fine method in which a grammar\u2019s own hierarchical projections are used for incremental pruning, including a method for efficiently computing projections of a grammar without a treebank. In our experiments, hierarchical pruning greatly accelerates parsing with no loss in empirical accuracy. Second, we compare various inference procedures for state-split PCFGs from the standpoint of risk minimization, paying particular attention to their practical tradeoffs. Finally, we present multilingual experiments which show that parsing with hierarchical state-splitting is fast and accurate in multiple languages and domains, even without any language-specific tuning.", "keyphrases": ["procedure", "berkeley parser", "subcategorie", "dependency structure"]} +{"id": "rashkin-etal-2016-connotation", "title": "Connotation Frames: A Data-Driven Investigation", "abstract": "Through a particular choice of a predicate (e.g., \"x violated y\"), a writer can subtly connote a range of implied sentiments and presupposed facts about the entities x and y: (1) writer's perspective: projecting x as an \"antagonist\"and y as a \"victim\", (2) entities' perspective: y probably dislikes x, (3) effect: something bad happened to y, (4) value: y is something valuable, and (5) mental state: y is distressed by the event. We introduce connotation frames as a representation formalism to organize these rich dimensions of connotation using typed relations. First, we investigate the feasibility of obtaining connotative labels through crowdsourcing experiments. We then present models for predicting the connotation frames of verb predicates based on their distributional word representations and the interplay between different types of connotative relations. Empirical results confirm that connotation frames can be induced from various data sources that reflect how people use language and give rise to the connotative meanings. We conclude with analytical results that show the potential use of connotation frames for analyzing subtle biases in online news media.", "keyphrases": ["predicate", "writer", "connotation frames"]} +{"id": "pyysalo-etal-2007-unification", "title": "On the unification of syntactic annotations under the Stanford dependency scheme: A case study on BioInfer and GENIA", "abstract": "Several incompatible syntactic annotation schemes are currently used by parsers and corpora in biomedical information extraction. The recently introduced Stanford dependency scheme has been suggested to be a suitable unifying syntax formalism. In this paper, we present a step towards such unification by creating a conversion from the Link Grammar to the Stanford scheme. Further, we create a version of the BioInfer corpus with syntactic annotation in this scheme. We present an application-oriented evaluation of the transformation and assess the suitability of the scheme and our conversion to the unification of the syntactic annotations of BioInfer and the GENIA Treebank. \n \nWe find that a highly reliable conversion is both feasible to create and practical, increasing the applicability of both the parser and the corpus to information extraction.", "keyphrases": ["unification", "syntactic annotation", "bioinfer"]} +{"id": "wiseman-etal-2016-learning", "title": "Learning Global Features for Coreference Resolution", "abstract": "There is compelling evidence that coreference prediction would benefit from modeling global information about entity-clusters. Yet, state-of-the-art performance can be achieved with systems treating each mention prediction independently, which we attribute to the inherent difficulty of crafting informative cluster-level features. We instead propose to use recurrent neural networks (RNNs) to learn latent, global representations of entity clusters directly from their mentions. We show that such representations are especially useful for the prediction of pronominal mentions, and can be incorporated into an end-to-end coreference system that outperforms the state of the art without requiring any additional search.", "keyphrases": ["coreference resolution", "mention", "recurrent neural network", "entity-level feature"]} +{"id": "mundra-etal-2021-wassa", "title": "WASSA@IITK at WASSA 2021: Multi-task Learning and Transformer Finetuning for Emotion Classification and Empathy Prediction", "abstract": "This paper describes our contribution to the WASSA 2021 shared task on Empathy Prediction and Emotion Classification. The broad goal of this task was to model an empathy score, a distress score and the overall level of emotion of an essay written in response to a newspaper article associated with harm to someone. We have used the ELECTRA model abundantly and also advanced deep learning approaches like multi-task learning. Additionally, we also leveraged standard machine learning techniques like ensembling. Our system achieves a Pearson Correlation Coefficient of 0.533 on sub-task I and a macro F1 score of 0.5528 on sub-task II. We ranked 1st in Emotion Classification sub-task and 3rd in Empathy Prediction sub-task.", "keyphrases": ["multi-task learning", "emotion classification", "empathy prediction"]} +{"id": "bapna-etal-2018-training", "title": "Training Deeper Neural Machine Translation Models with Transparent Attention", "abstract": "While current state-of-the-art NMT models, such as RNN seq2seq and Transformers, possess a large number of parameters, they are still shallow in comparison to convolutional models used for both text and vision applications. In this work we attempt to train significantly (2-3x) deeper Transformer and Bi-RNN encoders for machine translation. We propose a simple modification to the attention mechanism that eases the optimization of deeper models, and results in consistent gains of 0.7-1.1 BLEU on the benchmark WMT'14 English-German and WMT'15 Czech-English tasks for both architectures.", "keyphrases": ["transparent attention", "optimization", "deep model"]} +{"id": "roy-roth-2018-mapping", "title": "Mapping to Declarative Knowledge for Word Problem Solving", "abstract": "Math word problems form a natural abstraction to a range of quantitative reasoning problems, such as understanding financial news, sports results, and casualties of war. Solving such problems requires the understanding of several mathematical concepts such as dimensional analysis, subset relationships, etc. In this paper, we develop declarative rules which govern the translation of natural language description of these concepts to math expressions. We then present a framework for incorporating such declarative knowledge into word problem solving. Our method learns to map arithmetic word problem text to math expressions, by learning to select the relevant declarative knowledge for each operation of the solution expression. This provides a way to handle multiple concepts in the same problem while, at the same time, supporting interpretability of the answer expression. Our method models the mapping to declarative knowledge as a latent variable, thus removing the need for expensive annotations. Experimental evaluation suggests that our domain knowledge based solver outperforms all other systems, and that it generalizes better in the realistic case where the training data it is exposed to is biased in a different way than the test data.", "keyphrases": ["declarative knowledge", "math word problem", "mapping"]} +{"id": "reddy-waxmonsky-2009-substring", "title": "Substring-based Transliteration with Conditional Random Fields", "abstract": "Motivated by phrase-based translation research, we present a transliteration system where characters are grouped into substrings to be mapped atomically into the target language. We show how this substring representation can be incorporated into a Conditional Random Field model that uses local context and phonemic information.", "keyphrases": ["transliteration", "substring", "local context", "phonemic information"]} +{"id": "cahill-etal-2007-pruning", "title": "Pruning the Search Space of a Hand-Crafted Parsing System with a Probabilistic Parser", "abstract": "The demand for deep linguistic analysis for huge volumes of data means that it is increasingly important that the time taken to parse such data is minimized. In the XLE parsing model which is a hand-crafted, unification-based parsing system, most of the time is spent on unification, searching for valid f-structures (dependency attribute-value matrices) within the space of the many valid c-structures (phrase structure trees). We carried out an experiment to determine whether pruning the search space at an earlier stage of the parsing process results in an improvement in the overall time taken to parse, while maintaining the quality of the f-structures produced. We retrained a state-of-the-art probabilistic parser and used it to pre-bracket input to the XLE, constraining the valid c-structure space for each sentence. We evaluated against the PARC 700 Dependency Bank and show that it is possible to decrease the time taken to parse by ~18% while maintaining accuracy.", "keyphrases": ["search space", "probabilistic parser", "c-structure"]} +{"id": "yang-etal-2018-unsupervised", "title": "Unsupervised Neural Machine Translation with Weight Sharing", "abstract": "Unsupervised neural machine translation (NMT) is a recently proposed approach for machine translation which aims to train the model without using any labeled data. The models proposed for unsupervised NMT often use only one shared encoder to map the pairs of sentences from different languages to a shared-latent space, which is weak in keeping the unique and internal characteristics of each language, such as the style, terminology, and sentence structure. To address this issue, we introduce an extension by utilizing two independent encoders but sharing some partial weights which are responsible for extracting high-level representations of the input sentences. Besides, two different generative adversarial networks (GANs), namely the local GAN and global GAN, are proposed to enhance the cross-language translation. With this new approach, we achieve significant improvements on English-German, English-French and Chinese-to-English translation tasks.", "keyphrases": ["weight", "weakness", "independent encoder", "back-translation", "monolingual data"]} +{"id": "yuan-etal-2020-one", "title": "One Size Does Not Fit All: Generating and Evaluating Variable Number of Keyphrases", "abstract": "Different texts shall by nature correspond to different number of keyphrases. This desideratum is largely missing from existing neural keyphrase generation models. In this study, we address this problem from both modeling and evaluation perspectives. We first propose a recurrent generative model that generates multiple keyphrases as delimiter-separated sequences. Generation diversity is further enhanced with two novel techniques by manipulating decoder hidden states. In contrast to previous approaches, our model is capable of generating diverse keyphrases and controlling number of outputs. We further propose two evaluation metrics tailored towards the variable-number generation. We also introduce a new dataset StackEx that expands beyond the only existing genre (i.e., academic writing) in keyphrase generation tasks. With both previous and new evaluation metrics, our model outperforms strong baselines on all datasets.", "keyphrases": ["keyphrase", "generative model", "previous approach", "end"]} +{"id": "gu-etal-2020-token", "title": "Token-level Adaptive Training for Neural Machine Translation", "abstract": "There exists a token imbalance phenomenon in natural language as different tokens appear with different frequencies, which leads to different learning difficulties for tokens in Neural Machine Translation (NMT). The vanilla NMT model usually adopts trivial equal-weighted objectives for target tokens with different frequencies and tends to generate more high-frequency tokens and less low-frequency tokens compared with the golden token distribution. However, low-frequency tokens may carry critical semantic information that will affect the translation quality once they are neglected. In this paper, we explored target token-level adaptive objectives based on token frequencies to assign appropriate weights for each target token during training. We aimed that those meaningful but relatively low-frequency words could be assigned with larger weights in objectives to encourage the model to pay more attention to these tokens. Our method yields consistent improvements in translation quality on ZH-EN, EN-RO, and EN-DE translation tasks, especially on sentences that contain more low-frequency tokens where we can get 1.68, 1.02, and 0.52 BLEU increases compared with baseline, respectively. Further analyses show that our method can also improve the lexical diversity of translation.", "keyphrases": ["neural machine translation", "low-frequency token", "weight"]} +{"id": "yu-etal-2018-transition", "title": "Transition-based Neural RST Parsing with Implicit Syntax Features", "abstract": "Syntax has been a useful source of information for statistical RST discourse parsing. Under the neural setting, a common approach integrates syntax by a recursive neural network (RNN), requiring discrete output trees produced by a supervised syntax parser. In this paper, we propose an implicit syntax feature extraction approach, using hidden-layer vectors extracted from a neural syntax parser. In addition, we propose a simple transition-based model as the baseline, further enhancing it with dynamic oracle. Experiments on the standard dataset show that our baseline model with dynamic oracle is highly competitive. When implicit syntax features are integrated, we are able to obtain further improvements, better than using explicit Tree-RNN.", "keyphrases": ["implicit syntax feature", "neural syntax parser", "shift-reduce parser", "transition"]} +{"id": "rumshisky-batiukova-2008-polysemy", "title": "Polysemy in Verbs: Systematic Relations between Senses and their Effect on Annotation", "abstract": "Sense inventories for polysemous predicates are often comprised by a number of related senses. In this paper, we examine different types of relations within sense inventories and give a qualitative analysis of the effects they have on decisions made by the annotators and annotator error. We also discuss some common traps and pitfalls in design of sense inventories. We use the data set developed specifically for the task of annotating sense distinctions dependent predominantly on semantics of the arguments and only to a lesser extent on syntactic frame.", "keyphrases": ["annotator", "sense inventory", "polysemy"]} +{"id": "hermann-blunsom-2013-role", "title": "The Role of Syntax in Vector Space Models of Compositional Semantics", "abstract": "Modelling the compositional process by which the meaning of an utterance arises from the meaning of its parts is a fundamental task of Natural Language Processing. In this paper we draw upon recent advances in the learning of vector space representations of sentential semantics and the transparent interface between syntax and semantics provided by Combinatory Categorial Grammar to introduce Combinatory Categorial Autoencoders. This model leverages the CCG combinatory operators to guide a non-linear transformation of meaning within a sentence. We use this model to learn high dimensional embeddings for sentences and evaluate them in a range of tasks, demonstrating that the incorporation of syntax allows a concise model to learn representations that are both effective and general.", "keyphrases": ["syntax", "compositionality", "sentiment classification"]} +{"id": "hande-etal-2020-kancmd", "title": "KanCMD: Kannada CodeMixed Dataset for Sentiment Analysis and Offensive Language Detection", "abstract": "We introduce Kannada CodeMixed Dataset (KanCMD), a multi-task learning dataset for sentiment analysis and offensive language identification. The KanCMD dataset highlights two real-world issues from the social media text. First, it contains actual comments in code mixed text posted by users on YouTube social media, rather than in monolingual text from the textbook. Second, it has been annotated for two tasks, namely sentiment analysis and offensive language detection for under-resourced Kannada language. Hence, KanCMD is meant to stimulate research in under-resourced Kannada language on real-world code-mixed social media text and multi-task learning. KanCMD was obtained by crawling the YouTube, and a minimum of three annotators annotates each comment. We release KanCMD 7,671 comments for multitask learning research purpose.", "keyphrases": ["kannada codemixed dataset", "sentiment analysis", "offensive language detection"]} +{"id": "lee-etal-2019-sumbt", "title": "SUMBT: Slot-Utterance Matching for Universal and Scalable Belief Tracking", "abstract": "In goal-oriented dialog systems, belief trackers estimate the probability distribution of slot-values at every dialog turn. Previous neural approaches have modeled domain- and slot-dependent belief trackers, and have difficulty in adding new slot-values, resulting in lack of flexibility of domain ontology configurations. In this paper, we propose a new approach to universal and scalable belief tracker, called slot-utterance matching belief tracker (SUMBT). The model learns the relations between domain-slot-types and slot-values appearing in utterances through attention mechanisms based on contextual semantic vectors. Furthermore, the model predicts slot-value labels in a non-parametric way. From our experiments on two dialog corpora, WOZ 2.0 and MultiWOZ, the proposed model showed performance improvement in comparison with slot-dependent methods and achieved the state-of-the-art joint accuracy.", "keyphrases": ["ontology", "matching belief tracker", "slot", "sumbt"]} +{"id": "phan-ogunbona-2020-modelling", "title": "Modelling Context and Syntactical Features for Aspect-based Sentiment Analysis", "abstract": "The aspect-based sentiment analysis (ABSA) consists of two conceptual tasks, namely an aspect extraction and an aspect sentiment classification. Rather than considering the tasks separately, we build an end-to-end ABSA solution. Previous works in ABSA tasks did not fully leverage the importance of syntactical information. Hence, the aspect extraction model often failed to detect the boundaries of multi-word aspect terms. On the other hand, the aspect sentiment classifier was unable to account for the syntactical correlation between aspect terms and the context words. This paper explores the grammatical aspect of the sentence and employs the self-attention mechanism for syntactical learning. We combine part-of-speech embeddings, dependency-based embeddings and contextualized embeddings (e.g. BERT, RoBERTa) to enhance the performance of the aspect extractor. We also propose the syntactic relative distance to de-emphasize the adverse effects of unrelated words, having weak syntactic connection with the aspect terms. This increases the accuracy of the aspect sentiment classifier. Our solutions outperform the state-of-the-art models on SemEval-2014 dataset in both two subtasks.", "keyphrases": ["sentiment analysis", "aspect term", "syntactic relative distance"]} +{"id": "sun-etal-2020-colake", "title": "CoLAKE: Contextualized Language and Knowledge Embedding", "abstract": "With the emerging branch of incorporating factual knowledge into pre-trained language models such as BERT, most existing models consider shallow, static, and separately pre-trained entity embeddings, which limits the performance gains of these models. Few works explore the potential of deep contextualized knowledge representation when injecting knowledge. In this paper, we propose the Contextualized Language and Knowledge Embedding (CoLAKE), which jointly learns contextualized representation for both language and knowledge with the extended MLM objective. Instead of injecting only entity embeddings, CoLAKE extracts the knowledge context of an entity from large-scale knowledge bases. To handle the heterogeneity of knowledge context and language context, we integrate them in a unified data structure, word-knowledge graph (WK graph). CoLAKE is pre-trained on large-scale WK graphs with the modified Transformer encoder. We conduct experiments on knowledge-driven tasks, knowledge probing tasks, and language understanding tasks. Experimental results show that CoLAKE outperforms previous counterparts on most of the tasks. Besides, CoLAKE achieves surprisingly high performance on our synthetic task called word-knowledge graph completion, which shows the superiority of simultaneously contextualizing language and knowledge representation.", "keyphrases": ["contextualized language", "knowledge embedding", "colake"]} +{"id": "tao-etal-2006-unsupervised", "title": "Unsupervised Named Entity Transliteration Using Temporal and Phonetic Correlation", "abstract": "In this paper we investigate unsupervised name transliteration using comparable corpora, corpora where texts in the two languages deal in some of the same topics --- and therefore share references to named entities --- but are not translations of each other. We present two distinct methods for transliteration, one approach using an unsupervised phonetic transliteration method, and the other using the temporal distribution of candidate pairs. Each of these approaches works quite well, but by combining the approaches one can achieve even better results. We believe that the novelty of our approach lies in the phonetic-based scoring method, which is based on a combination of carefully crafted phonetic features, and empirical results from the pronunciation errors of second-language learners of English. Unlike previous approaches to transliteration, this method can in principle work with any pair of languages in the absence of a training dictionary, provided one has an estimate of the pronunciation of words in text.", "keyphrases": ["transliteration", "comparable corpora", "phonetic mapping"]} +{"id": "ramanathan-etal-2009-case", "title": "Case markers and Morphology: Addressing the crux of the fluency problem in English-Hindi SMT", "abstract": "We report in this paper our work on accurately generating case markers and suffixes in English-to-Hindi SMT. Hindi is a relatively free word-order language, and makes use of a comparatively richer set of case markers and morphological suffixes for correct meaning representation. From our experience of large-scale English-Hindi MT, we are convinced that fluency and fidelity in the Hindi output get an order of magnitude facelift if accurate case markers and suffixes are produced. Now, the moot question is: what entity on the English side encodes the information contained in case markers and suffixes on the Hindi side? Our studies of correspondences in the two languages show that case markers and suffixes in Hindi are predominantly determined by the combination of suffixes and semantic relations on the English side. We, therefore, augment the aligned corpus of the two languages, with the correspondence of English suffixes and semantic relations with Hindi suffixes and case markers. Our results on 400 test sentences, translated using an SMT system trained on around 13000 parallel sentences, show that suffix + semantic relation \u2192 case marker/suffix is a very useful translation factor, in the sense of making a significant difference to output quality as indicated by subjective evaluation as well as BLEU scores.", "keyphrases": ["fluency", "suffix", "semantic relation", "english-hindi smt system"]} +{"id": "rosa-etal-2013-deepfix", "title": "Deepfix: Statistical Post-editing of Statistical Machine Translation Using Deep Syntactic Analysis", "abstract": "Deepfix is a statistical post-editing system for improving the quality of statistical machine translation outputs. It attempts to correct errors in verb-noun valency using deep syntactic analysis and a simple probabilistic model of valency. On the English-to-Czech translation pair, we show that statistical post-editing of statistical machine translation leads to an improvement of the translation quality when helped by deep linguistic knowledge.", "keyphrases": ["post-editing", "statistical machine translation", "deepfix"]} +{"id": "dellorletta-etal-2011-read", "title": "READ\u2013IT: Assessing Readability of Italian Texts with a View to Text Simplification", "abstract": "In this paper, we propose a new approach to readability assessment with a specific view to the task of text simplification: the intended audience includes people with low literacy skills and/or with mild cognitive impairment. READ-IT represents the first advanced readability assessment tool for what concerns Italian, which combines traditional raw text features with lexical, morpho-syntactic and syntactic information. In READ-IT readability assessment is carried out with respect to both documents and sentences where the latter represents an important novelty of the proposed approach creating the prerequisites for aligning the readability assessment step with the text simplification process. READ-IT shows a high accuracy in the document classification task and promising results in the sentence classification scenario.", "keyphrases": ["text simplification", "read-it", "readability assessment tool"]} +{"id": "fritzinger-fraser-2010-avoid", "title": "How to Avoid Burning Ducks: Combining Linguistic Analysis and Corpus Statistics for German Compound Processing", "abstract": "Compound splitting is an important problem in many Nlp applications which must be solved in order to address issues of data sparsity. Previous work has shown that linguistic approaches for German compound splitting produce a correct splitting more often, but corpus-driven approaches work best for phrase-based statistical machine translation from German to English, a worrisome contradiction. We address this situation by combining linguistic analysis with corpus-driven statistics and obtaining better results in terms of both producing splittings according to a gold standard and statistical machine translation performance.", "keyphrases": ["linguistic analysis", "statistic", "compound splitting"]} +{"id": "li-etal-2020-graph-tree", "title": "Graph-to-Tree Neural Networks for Learning Structured Input-Output Translation with Applications to Semantic Parsing and Math Word Problem", "abstract": "The celebrated Seq2Seq technique and its numerous variants achieve excellent performance on many tasks such as neural machine translation, semantic parsing, and math word problem solving. However, these models either only consider input objects as sequences while ignoring the important structural information for encoding, or they simply treat output objects as sequence outputs instead of structural objects for decoding. In this paper, we present a novel Graph-to-Tree Neural Networks, namely Graph2Tree consisting of a graph encoder and a hierarchical tree decoder, that encodes an augmented graph-structured input and decodes a tree-structured output. In particular, we investigated our model for solving two problems, neural semantic parsing and math word problem. Our extensive experiments demonstrate that our Graph2Tree model outperforms or matches the performance of other state-of-the-art models on these tasks.", "keyphrases": ["semantic parsing", "math word problem", "graph-to-tree neural networks"]} +{"id": "castillo-estrella-2012-semantic", "title": "Semantic Textual Similarity for MT evaluation", "abstract": "This paper describes the system used for our participation in the WMT12 Machine Translation evaluation shared task. \n \nWe also present a new approach to Machine Translation evaluation based on the recently defined task Semantic Textual Similarity. This problem is addressed using a textual entailment engine entirely based on WordNet semantic features. \n \nWe described results for the Spanish-English, Czech-English and German-English language pairs according to our submission on the Eight Workshop on Statistical Machine Translation. Our first experiments reports a competitive score to system level.", "keyphrases": ["semantic textual similarity", "pipeline", "sagan"]} +{"id": "gregoire-2007-design", "title": "Design and Implementation of a Lexicon of Dutch Multiword Expressions", "abstract": "This paper describes the design and implementation of a lexicon of Dutch multiword expressions (MWEs). No exhaustive research on a standard lexical representation of MWEs has been done for Dutch before. The approach taken is innovative, since it is based on the Equivalence Class Method. Furthermore, the selection of the lexical entries and their properties is corpus-based. The design of the lexicon and the standard representation will be tested in Dutch NLP systems. The purpose of the current paper is to give an overview of the decisions made in order to come to a standard lexical representation and to discuss the description fields this representation comprises.", "keyphrases": ["implementation", "syntactic fixedness", "idiomatic expression"]} +{"id": "wu-etal-2021-one", "title": "One Teacher is Enough? Pre-trained Language Model Distillation from Multiple Teachers", "abstract": "Pre-trained language models (PLMs) achieve great success in NLP. However, their huge model sizes hinder their applications in many practical systems. Knowledge distillation is a popular technique to compress PLMs, which learns a small student model from a large teacher PLM. However, the knowledge learned from a single teacher may be limited and even biased, resulting in low-quality student model. In this paper, we propose a multi-teacher knowledge distillation framework named MT-BERT for pre-trained language model compression, which can train high-quality student model from multiple teacher PLMs. In MT-BERT we design a multi-teacher co-finetuning method to jointly finetune multiple teacher PLMs in downstream tasks with shared pooling and prediction layers to align their output space for better collaborative teaching. In addition, we propose a multi-teacher hidden loss and a multi-teacher distillation loss to transfer the useful knowledge in both hidden states and soft labels from multiple teacher PLMs to the student model. Experiments on three benchmark datasets validate the effectiveness of MT-BERT in compressing PLMs.", "keyphrases": ["teacher", "plm", "knowledge distillation framework", "language model compression"]} +{"id": "deri-knight-2015-make", "title": "How to Make a Frenemy: Multitape FSTs for Portmanteau Generation", "abstract": "A portmanteau is a type of compound word that fuses the sounds and meanings of two component words; for example, \u201cfrenemy\u201d (friend + enemy) or \u201csmog\u201d (smoke + fog). We develop a system, including a novel multitape FST, that takes an input of two words and outputs possible portmanteaux. Our system is trained on a list of known portmanteaux and their component words, and achieves 45% exact matches in cross-validated experiments.", "keyphrases": ["frenemy", "multitape fst", "portmanteau"]} +{"id": "dusek-etal-2019-automatic", "title": "Automatic Quality Estimation for Natural Language Generation: Ranting (Jointly Rating and Ranking)", "abstract": "We present a recurrent neural network based system for automatic quality estimation of natural language generation (NLG) outputs, which jointly learns to assign numerical ratings to individual outputs and to provide pairwise rankings of two different outputs. The latter is trained using pairwise hinge loss over scores from two copies of the rating network. We use learning to rank and synthetic data to improve the quality of ratings assigned by our system: We synthesise training pairs of distorted system outputs and train the system to rank the less distorted one higher. This leads to a 12% increase in correlation with human ratings over the previous benchmark. We also establish the state of the art on the dataset of relative rankings from the E2E NLG Challenge (Dusek et al., 2019), where synthetic data lead to a 4% accuracy increase over the base model.", "keyphrases": ["natural language generation", "ranking", "automatic quality estimation"]} +{"id": "simon-etal-2013-leveraging", "title": "Leveraging Lexical Cohesion and Disruption for Topic Segmentation", "abstract": "Topic segmentation classically relies on one of two criteria, either finding areas with coherent vocabulary use or detecting discontinuities. In this paper, we propose a segmentation criterion combining both lexical cohesion and disruption, enabling a trade-off between the two. We provide the mathematical formulation of the criterion and an efficient graph based decoding algorithm for topic segmentation. Experimental results on standard textual data sets and on a more challenging corpus of automatically transcribed broadcast news shows demonstrate the benefit of such a combination. Gains were observed in all conditions, with segments of either regular or varying length and abrupt or smooth topic shifts. Long segments benefit more than short segments. However the algorithm has proven robust on automatic transcripts with short segments and limited vocabulary reoccurrences.", "keyphrases": ["lexical cohesion", "disruption", "topic segmentation"]} +{"id": "gu-etal-2020-train", "title": "Train No Evil: Selective Masking for Task-Guided Pre-Training", "abstract": "Recently, pre-trained language models mostly follow the pre-train-then-fine-tuning paradigm and have achieved great performance on various downstream tasks. However, since the pre-training stage is typically task-agnostic and the fine-tuning stage usually suffers from insufficient supervised data, the models cannot always well capture the domain-specific and task-specific patterns. In this paper, we propose a three-stage framework by adding a task-guided pre-training stage with selective masking between general pre-training and fine-tuning. In this stage, the model is trained by masked language modeling on in-domain unsupervised data to learn domain-specific patterns and we propose a novel selective masking strategy to learn task-specific patterns. Specifically, we design a method to measure the importance of each token in sequences and selectively mask the important tokens. Experimental results on two sentiment analysis tasks show that our method can achieve comparable or even better performance with less than 50% of computation cost, which indicates our method is both effective and efficient. The source code of this paper can be obtained from .", "keyphrases": ["selective masking", "pre-training", "downstream task", "important token", "plm"]} +{"id": "yin-etal-2020-robustness", "title": "On the Robustness of Language Encoders against Grammatical Errors", "abstract": "We conduct a thorough study to diagnose the behaviors of pre-trained language encoders (ELMo, BERT, and RoBERTa) when confronted with natural grammatical errors. Specifically, we collect real grammatical errors from non-native speakers and conduct adversarial attacks to simulate these errors on clean text data. We use this approach to facilitate debugging models on downstream applications. Results confirm that the performance of all tested models is affected but the degree of impact varies. To interpret model behaviors, we further design a linguistic acceptability task to reveal their abilities in identifying ungrammatical sentences and the position of errors. We find that fixed contextual encoders with a simple classifier trained on the prediction of sentence correctness are able to locate error positions. We also design a cloze test for BERT and discover that BERT captures the interaction between errors and specific tokens in context. Our results shed light on understanding the robustness and behaviors of language encoders against grammatical errors.", "keyphrases": ["robustness", "language encoder", "grammatical error"]} +{"id": "purandare-pedersen-2004-word", "title": "Word Sense Discrimination by Clustering Contexts in Vector and Similarity Spaces", "abstract": "This paper systematically compares unsupervised word sense discrimination techniques that cluster instances of a target word that occur in raw text using both vector and similarity spaces. The context of each instance is represented as a vector in a high dimensional feature space. Discrimination is achieved by clustering these context vectors directly in vector space and also by finding pairwise similarities among the vectors and then clustering in similarity space. We employ two different representations of the context in which a target word occurs. First order context vectors represent the context of each instance of a target word as a vector of features that occur in that context. Second order context vectors are an indirect representation of the context based on the average of vectors that represent the words that occur in the context. We evaluate the discriminated clusters by carrying out experiments using sense\u2013tagged instances of 24 SENSEVAL2 words and the well known Line, Hard and Serve sense\u2013tagged corpora.", "keyphrases": ["cluster", "word sense discrimination", "probability distribution"]} +{"id": "sagae-tsujii-2008-shift", "title": "Shift-Reduce Dependency DAG Parsing", "abstract": "Most data-driven dependency parsing approaches assume that sentence structure is represented as trees. Although trees have several desirable properties from both computational and linguistic perspectives, the structure of linguistic phenomena that goes beyond shallow syntax often cannot be fully captured by tree representations. We present a parsing approach that is nearly as simple as current data-driven transition-based dependency parsing frameworks, but outputs directed acyclic graphs (DAGs). We demonstrate the benefits of DAG parsing in two experiments where its advantages over dependency tree parsing can be clearly observed: predicate-argument analysis of English and syntactic analysis of Danish with a representation that includes long-distance dependencies and anaphoric reference links.", "keyphrases": ["dag", "dependency parsing", "predicate-argument analysis", "long-distance dependency"]} +{"id": "tebbifakhr-etal-2020-automatic", "title": "Automatic Translation for Multiple NLP tasks: a Multi-task Approach to Machine-oriented NMT Adaptation", "abstract": "Although machine translation (MT) traditionally pursues \u201chuman-oriented\u201d objectives, humans are not the only possible consumers of MT output. For instance, when automatic translations are used to feed downstream Natural Language Processing (NLP) components in cross-lingual settings, they should ideally pursue \u201cmachine-oriented\u201d objectives that maximize the performance of these components. Tebbifakhr et al. (2019) recently proposed a reinforcement learning approach to adapt a generic neural MT(NMT) system by exploiting the reward from a downstream sentiment classifier. But what if the downstream NLP tasks to serve are more than one? How to avoid the costs of adapting and maintaining one dedicated NMT system for each task? We address this problem by proposing a multi-task approach to machine-oriented NMT adaptation, which is capable to serve multiple downstream tasks with a single system. Through experiments with Spanish and Italian data covering three different tasks, we show that our approach can outperform a generic NMT system, and compete with single-task models in most of the settings.", "keyphrases": ["multi-task approach", "machine-oriented nmt adaptation", "automatic translation"]} +{"id": "fath-etal-2020-fintan", "title": "Fintan - Flexible, Integrated Transformation and Annotation eNgineering", "abstract": "We introduce the Flexible and Integrated Transformation and Annotation eNgeneering (Fintan) platform for converting heterogeneous linguistic resources to RDF. With its modular architecture, workflow management and visualization features, Fintan facilitates the development of complex transformation pipelines by integrating generic RDF converters and augmenting them with extended graph processing capabilities: Existing converters can be easily deployed to the system by means of an ontological data structure which renders their properties and the dependencies between transformation steps. Development of subsequent graph transformation steps for resource transformation, annotation engineering or entity linking is further facilitated by a novel visual rendering of SPARQL queries. A graphical workflow manager allows to easily manage the converter modules and combine them to new transformation pipelines. Employing the stream-based graph processing approach first implemented with CoNLL-RDF, we address common challenges and scalability issues when transforming resources and showcase the performance of Fintan by means of a purely graph-based transformation of the Universal Morphology data to RDF.", "keyphrases": ["flexible", "integrated transformation", "annotation engineering"]} +{"id": "tan-bond-2011-building", "title": "Building and Annotating the Linguistically Diverse NTU-MC (NTU-Multilingual Corpus)", "abstract": "The NTU-MC compilation taps on the linguistic diversity of multilingual texts available within Singapore. The current version of NTU-MC contains 375,000 words (15,000 sentences) in 6 languages (English, Chinese, Japanese, Korean, Indonesian and Vietnamese) from 6 language families (Indo-European, Sino-Tibetan, Japonic, Korean as a language isolate, Austronesian and Austro-Asiatic). The NTU-MC is annotated with a layer of monolingual annotation (POS tags) and cross-lingual annotation (sentence-level alignments). The diverse language data and cross-lingual annotations provide valuable information on linguistic diversity for traditional linguistic research as well as natural language processing tasks. This paper describes the corpus compilation process with the evaluation of the monolingual and cross-lingual annotations of the corpus data. The corpus is available under the Creative Commons - Attribute 3.0 Unported license (CC by).", "keyphrases": ["ntu-mc", "language family", "indo-european"]} +{"id": "zoph-knight-2016-multi", "title": "Multi-Source Neural Translation", "abstract": "We build a multi-source machine translation model and train it to maximize the probability of a target English string given French and German sources. Using the neural encoder-decoder framework, we explore several combination methods and report up to +4.8 Bleu increases on top of a very strong attention-based neural translation model.", "keyphrases": ["machine translation", "multi-source nmt", "well performance"]} +{"id": "chen-etal-2020-logical", "title": "Logical Natural Language Generation from Open-Domain Tables", "abstract": "Neural natural language generation (NLG) models have recently shown remarkable progress in fluency and coherence. However, existing studies on neural NLG are primarily focused on surface-level realizations with limited emphasis on logical inference, an important aspect of human thinking and language. In this paper, we suggest a new NLG task where a model is tasked with generating natural language statements that can be logically entailed by the facts in an open-domain semi-structured table. To facilitate the study of the proposed logical NLG problem, we use the existing TabFact dataset~(CITATION) featured with a wide range of logical/symbolic inferences as our testbed, and propose new automatic metrics to evaluate the fidelity of generation models w.r.t. logical inference. The new task poses challenges to the existing monotonic generation frameworks due to the mismatch between sequence order and logical order. In our experiments, we comprehensively survey different generation architectures (LSTM, Transformer, Pre-Trained LM) trained with different algorithms (RL, Adversarial Training, Coarse-to-Fine) on the dataset and made following observations: 1) Pre-Trained LM can significantly boost both the fluency and logical fidelity metrics, 2) RL and Adversarial Training are trading fluency for fidelity, 3) Coarse-to-Fine generation can help partially alleviate the fidelity issue while maintaining high language fluency. The code and data are available at .", "keyphrases": ["table", "logicnlg", "factual correctness rate"]} +{"id": "de-marneffe-etal-2006-generating", "title": "Generating Typed Dependency Parses from Phrase Structure Parses", "abstract": "This paper describes a system for extracting typed dependency parses of English sentences from phrase structure parses. In order to capture inherent relations occurring in corpus texts that can be critical in real-world applications, many NP relations are included in the set of grammatical relations used. We provide a comparison of our system with Minipar and the Link parser. The typed dependency extraction facility described here is integrated in the Stanford Parser, available for download.", "keyphrases": ["stanford parser", "dependency relation", "formalism", "modifier"]} +{"id": "knowles-koehn-2016-neural", "title": "Neural Interactive Translation Prediction", "abstract": "We present an interactive translation prediction method based on neural machine translation. Even with the same translation quality of the underlying machine translation systems, the neural prediction method yields much higher word prediction accuracy (61.6% vs. 43.3%) than the traditional method based on search graphs, mainly due to better recovery from errors. We also develop efficient means to enable practical deployment.", "keyphrases": ["translator", "neural machine translation", "decoding"]} +{"id": "saers-wu-2011-linear", "title": "Linear Transduction Grammars and Zipper Finite-State Transducers", "abstract": "We examine how the recently explored class of linear transductions relates to finite-state models. Linear transductions have been neglected historically, but gainined recent interest in statistical machine translation modeling, due to empirical studies demonstrating that their attractive balance of generative capacity and complexity characteristics lead to improved accuracy and speed in learning alignment and translation models. Such work has until now characterized the class of linear transductions in terms of either (a) linear inversion transduction grammars (LITGs) which are linearized restrictions of inversion transduction grammars or (b) linear transduction grammars (LTGs) which are bilingualized generalizations of linear grammars. In this paper, we offer a new alternative characterization of linear transductions, as relating four finite-state languages to each other. We introduce the devices of zipper finite-state automata (ZFSAs) and zipper finite-state transducers (ZFSTs) in order to construct the bridge between linear transductions and finite-state models.", "keyphrases": ["zipper", "transducer", "linear transduction grammars"]} +{"id": "lu-roth-2012-automatic", "title": "Automatic Event Extraction with Structured Preference Modeling", "abstract": "This paper presents a novel sequence labeling model based on the latent-variable semi-Markov conditional random fields for jointly extracting argument roles of events from texts. The model takes in coarse mention and type information and predicts argument roles for a given event template. \n \nThis paper addresses the event extraction problem in a primarily unsupervised setting, where no labeled training instances are available. Our key contribution is a novel learning framework called structured preference modeling (PM), that allows arbitrary preference to be assigned to certain structures during the learning procedure. We establish and discuss connections between this framework and other existing works. We show empirically that the structured preferences are crucial to the success of our task. Our model, trained without annotated data and with a small number of structured preferences, yields performance competitive to some baseline supervised approaches.", "keyphrases": ["event extraction", "conditional random field", "annotated data"]} +{"id": "pustejovsky-etal-2019-modeling", "title": "Modeling Quantification and Scope in Abstract Meaning Representations", "abstract": "In this paper, we propose an extension to Abstract Meaning Representations (AMRs) to encode scope information of quantifiers and negation, in a way that overcomes the semantic gaps of the schema while maintaining its cognitive simplicity. Specifically, we address three phenomena not previously part of the AMR specification: quantification, negation (generally), and modality. The resulting representation, which we call \u201cUniform Meaning Representation\u201d (UMR), adopts the predicative core of AMR and embeds it under a \u201cscope\u201d graph when appropriate. UMR representations differ from other treatments of quantification and modal scope phenomena in two ways: (a) they are more transparent; and (b) they specify default scope when possible.`", "keyphrases": ["quantification", "scope", "abstract meaning representations"]} +{"id": "mayn-etal-2021-familiar", "title": "Familiar words but strange voices: Modelling the influence of speech variability on word recognition", "abstract": "We present a deep neural model of spoken word recognition which is trained to retrieve the meaning of a word (in the form of a word embedding) given its spoken form, a task which resembles that faced by a human listener. Furthermore, we investigate the influence of variability in speech signals on the model's performance. To this end, we conduct of set of controlled experiments using word-aligned read speech data in German. Our experiments show that (1) the model is more sensitive to dialectical variation than gender variation, and (2) recognition performance of word cognates from related languages reflect the degree of relatedness between languages in our study. Our work highlights the feasibility of modeling human speech perception using deep neural networks.", "keyphrases": ["influence", "speech variability", "word recognition"]} +{"id": "xuan-bach-etal-2012-reranking", "title": "A Reranking Model for Discourse Segmentation using Subtree Features", "abstract": "This paper presents a discriminative reranking model for the discourse segmentation task, the first step in a discourse parsing system. Our model exploits subtree features to rerank N-best outputs of a base segmenter, which uses syntactic and lexical features in a CRF framework. Experimental results on the RST Discourse Treebank corpus show that our model outperforms existing discourse segmenters in both settings that use gold standard Penn Treebank parse trees and Stanford parse trees.", "keyphrases": ["reranking model", "discourse segmentation", "subtree feature"]} +{"id": "lyu-etal-2004-toward", "title": "Toward Constructing A Multilingual Speech Corpus for Taiwanese (Min-nan), Hakka, and Mandarin", "abstract": "The Formosa speech database (ForSDat) is a multilingual speech corpus collected at Chang Gung University and sponsored by the National Science Council of Taiwan. It is expected that a multilingual speech corpus will be collected, covering the three most frequently used languages in Taiwan: Taiwanese (Min-nan), Hakka, and Mandarin. This 3-year project has the goal of collecting a phonetically abundant speech corpus of more than 1,800 speakers and hundreds of hours of speech. Recently, the first version of this corpus containing speech of 600 speakers of Taiwanese and Mandarin was finished and is ready to be released. It contains about 49 hours of speech and 247,000 utterances.", "keyphrases": ["multilingual speech corpus", "taiwanese", "formosa speech database"]} +{"id": "carpuat-etal-2013-sensespotting", "title": "SenseSpotting: Never let your parallel data tie you to an old domain", "abstract": "Words often gain new senses in new domains. Being able to automatically identify, from a corpus of monolingual text, which word tokens are being used in a previously unseen sense has applications to machine translation and other tasks sensitive to lexical semantics. We define a task, SENSESPOTTING, in which we build systems to spot tokens that have new senses in new domain text. Instead of difficult and expensive annotation, we build a goldstandard by leveraging cheaply available parallel corpora, targeting our approach to the problem of domain adaptation for machine translation. Our system is able to achieve F-measures of as much as 80%, when applied to word types it has never seen before. Our approach is based on a large set of novel features that capture varied aspects of how words change when used in new domains.", "keyphrases": ["parallel data", "domain adaptation", "central interest"]} +{"id": "iyyer-etal-2015-deep", "title": "Deep Unordered Composition Rivals Syntactic Methods for Text Classification", "abstract": "Many existing deep learning models for natural language processing tasks focus on learning the compositionality of their inputs, which requires many expensive computations. We present a simple deep neural network that competes with and, in some cases, outperforms such models on sentiment analysis and factoid question answering tasks while taking only a fraction of the training time. While our model is syntactically-ignorant, we show significant improvements over previous bag-of-words models by deepening our network and applying a novel variant of dropout. Moreover, our model performs better than syntactic models on datasets with high syntactic variance. We show that our model makes similar errors to syntactically-aware models, indicating that for the tasks we consider, nonlinearly transforming the input is more important than tailoring a network to incorporate word order and syntax.", "keyphrases": ["text classification", "compositionality", "sentiment analysis", "average", "neural architecture"]} +{"id": "che-etal-2013-named", "title": "Named Entity Recognition with Bilingual Constraints", "abstract": "Different languages contain complementary cues about entities, which can be used to improve Named Entity Recognition (NER) systems. We propose a method that formulates the problem of exploring such signals on unannotated bilingual text as a simple Integer Linear Program, which encourages entity tags to agree via bilingual constraints. Bilingual NER experiments on the large OntoNotes 4.0 Chinese-English corpus show that the proposed method can improve strong baselines for both Chinese and English. In particular, Chinese performance improves by over 5% absolute F1 score. We can then annotate a large amount of bilingual text (80k sentence pairs) using our method, and add it as uptraining data to the original monolingual NER training corpus. The Chinese model retrained on this new combined dataset outperforms the strong baseline by over 3% F1 score.", "keyphrases": ["entity recognition", "bilingual constraint", "cue", "ner performance"]} +{"id": "abdul-mageed-etal-2012-samar", "title": "SAMAR: A System for Subjectivity and Sentiment Analysis of Arabic Social Media", "abstract": "In this work, we present SAMAR, a system for Subjectivity and Sentiment Analysis (SSA) for Arabic social media genres. We investigate: how to best represent lexical information; whether standard features are useful; how to treat Arabic dialects; and, whether genre specific features have a measurable impact on performance. Our results suggest that we need individualized solutions for each domain and task, but that lemmatization is a feature in all the best approaches.", "keyphrases": ["subjectivity", "sentiment analysis", "arabic social medium"]} +{"id": "panicheva-etal-2010-personal", "title": "Personal Sense and Idiolect: Combining Authorship Attribution and Opinion Analysis", "abstract": "Subjectivity analysis and authorship attribution are very popular areas of research. However, work in these two areas has been done separately. We believe that by combining information about subjectivity in texts and authorship, the performance of both tasks can be improved. In the paper a personalized approach to opinion mining is presented, in which the notions of personal sense and idiolect are introduced; the approach is applied to the polarity classification task. It is assumed that different authors express their private states in text individually, and opinion mining results could be improved by analyzing texts by different authors separately. The hypothesis is tested on a corpus of movie reviews by ten authors. The results of applying the personalized approach to opinion mining are presented, confirming that the approach increases the performance of the opinion mining task. Automatic authorship attribution is further applied to model the personalized approach, classifying documents by their assumed authorship. Although the automatic authorship classification imposes a number of limitations on the dataset for further experiments, after overcoming these issues the authorship attribution technique modeling the personalized approach confirms the increase over the baseline with no authorship information used.", "keyphrases": ["idiolect", "authorship attribution", "personal sense"]} +{"id": "xie-etal-2018-neural", "title": "Neural Cross-Lingual Named Entity Recognition with Minimal Resources", "abstract": "For languages with no annotated resources, unsupervised transfer of natural language processing models such as named-entity recognition (NER) from resource-rich languages would be an appealing capability. However, differences in words and word order across languages make it a challenging problem. To improve mapping of lexical items across languages, we propose a method that finds translations based on bilingual word embeddings. To improve robustness to word order differences, we propose to use self-attention, which allows for a degree of flexibility with respect to word order. We demonstrate that these methods achieve state-of-the-art or competitive NER performance on commonly tested languages under a cross-lingual setting, with much lower resource requirements than past approaches. We also evaluate the challenges of applying these methods to Uyghur, a low-resource language.", "keyphrases": ["entity recognition", "word embedding", "low-resource language", "self-attention layer"]} +{"id": "shibata-etal-2014-large", "title": "A Large Scale Database of Strongly-related Events in Japanese", "abstract": "The knowledge about the relation between events is quite useful for coreference resolution, anaphora resolution, and several NLP applications such as dialogue system. This paper presents a large scale database of strongly-related events in Japanese, which has been acquired with our proposed method (Shibata and Kurohashi, 2011). In languages, where omitted arguments or zero anaphora are often utilized, such as Japanese, the coreference-based event extraction methods are hard to be applied, and so our method extracts strongly-related events in a two-phrase construct. This method first calculates the co-occurrence measure between predicate-arguments (events), and regards an event pair, whose mutual information is high, as strongly-related events. To calculate the co-occurrence measure efficiently, we adopt an association rule mining method. Then, we identify the remaining arguments by using case frames. The database contains approximately 100,000 unique events, with approximately 340,000 strongly-related event pairs, which is much larger than an existing automatically-constructed event database. We evaluated randomly-chosen 100 event pairs, and the accuracy was approximately 68%.", "keyphrases": ["large scale database", "strongly-related event", "japanese"]} +{"id": "schatzmann-etal-2007-agenda", "title": "Agenda-Based User Simulation for Bootstrapping a POMDP Dialogue System", "abstract": "This paper investigates the problem of bootstrapping a statistical dialogue manager without access to training data and proposes a new probabilistic agenda-based method for simulating user behaviour. In experiments with a statistical POMDP dialogue system, the simulator was realistic enough to successfully test the prototype system and train a dialogue policy. An extensive study with human subjects showed that the learned policy was highly competitive, with task completion rates above 90%.", "keyphrases": ["user simulation", "pomdp dialogue system", "dialogue management", "agenda-based user simulator", "human conversational data"]} +{"id": "wang-etal-2008-chinese", "title": "Chinese Word Sense Disambiguation with PageRank and HowNet", "abstract": "Word sense disambiguation is a basic problem in natural language processing. This paper proposed an unsupervised word sense disambiguation method based PageRank and HowNet. In the method, a free text is firstly represented as a sememe graph with sememes as vertices and relatedness of sememes as weighted edges based on HowNet. Then UW-PageRank is applied on the sememe graph to score the importance of sememes. Score of each definition of one word can be computed from the score of sememes it contains. Finally, the highest scored definition is assigned to the word. This approach is tested on SENSEVAL-3 and the experimental results prove practical and effective.", "keyphrases": ["word sense disambiguation", "pagerank", "hownet"]} +{"id": "chen-etal-2021-finqa", "title": "FinQA: A Dataset of Numerical Reasoning over Financial Data", "abstract": "The sheer volume of financial statements makes it difficult for humans to access and analyze a business's financials. Robust numerical reasoning likewise faces unique challenges in this domain. In this work, we focus on answering deep questions over financial data, aiming to automate the analysis of a large corpus of financial documents. In contrast to existing tasks on general domain, the finance domain includes complex numerical reasoning and understanding of heterogeneous representations. To facilitate analytical progress, we propose a new large-scale dataset, FinQA, with Question-Answering pairs over Financial reports, written by financial experts. We also annotate the gold reasoning programs to ensure full explainability. We further introduce baselines and conduct comprehensive experiments in our dataset. The results demonstrate that popular, large, pre-trained models fall far short of expert humans in acquiring finance knowledge and in complex multi-step numerical reasoning on that knowledge. Our dataset \u2013 the first of its kind \u2013 should therefore enable significant, new community research into complex application domains. The dataset and code are publicly available at .", "keyphrases": ["numerical reasoning", "financial data", "finqa"]} +{"id": "torrens-urrutia-2018-approach", "title": "An Approach to Measuring Complexity with a Fuzzy Grammar & Degrees of Grammaticality", "abstract": "This paper presents an approach to evaluate complexity of a given natural language input by means of a Fuzzy Grammar with some fuzzy logic formulations. Usually, the approaches in linguistics has described a natural language grammar by means of discrete terms. However, a grammar can be explained in terms of degrees by following the concepts of linguistic gradience & fuzziness. Understanding a grammar as a fuzzy or gradient object allows us to establish degrees of grammaticality for every linguistic input. This shall be meaningful for linguistic complexity considering that the less grammatical an input is the more complex its processing will be. In this regard, the degree of complexity of a linguistic input (which is a linguistic representation of a natural language expression) depends on the chosen grammar. The bases of the fuzzy grammar are shown here. Some of these are described by Fuzzy Type Theory. The linguistic inputs are characterized by constraints through a Property Grammar.", "keyphrases": ["complexity", "fuzzy grammar", "grammaticality"]} +{"id": "green-etal-2010-improved", "title": "Improved Models of Distortion Cost for Statistical Machine Translation", "abstract": "The distortion cost function used in Moses-style machine translation systems has two flaws. First, it does not estimate the future cost of known required moves, thus increasing search errors. Second, all distortion is penalized linearly, even when appropriate re-orderings are performed. Because the cost function does not effectively constrain search, translation quality decreases at higher distortion limits, which are often needed when translating between languages of different typologies such as Arabic and English. To address these problems, we introduce a method for estimating future linear distortion cost, and a new discriminative distortion model that predicts word movement during translation. In combination, these extensions give a statistically significant improvement over a baseline distortion parameterization. When we triple the distortion limit, our model achieves a +2.32 BLEU average gain over Moses.", "keyphrases": ["distortion cost", "word movement", "length", "jump"]} +{"id": "arthur-etal-2015-semantic", "title": "Semantic Parsing of Ambiguous Input through Paraphrasing and Verification", "abstract": "We propose a new method for semantic parsing of ambiguous and ungrammatical input, such as search queries. We do so by building on an existing semantic parsing framework that uses synchronous context free grammars (SCFG) to jointly model the input sentence and output meaning representation. We generalize this SCFG framework to allow not one, but multiple outputs. Using this formalism, we construct a grammar that takes an ambiguous input string and jointly maps it into both a meaning representation and a natural language paraphrase that is less ambiguous than the original input. This paraphrase can be used to disambiguate the meaning representation via verification using a language model that calculates the probability of each paraphrase.", "keyphrases": ["ambiguous input", "verification", "semantic parsing"]} +{"id": "fujii-etal-2006-test", "title": "Test Collections for Patent Retrieval and Patent Classification in the Fifth NTCIR Workshop", "abstract": "This paper describes the test collections produced for the Patent Retrieval Task in the Fifth NTCIR Workshop. We performed the invalidity search task, in which each participant group searches a patent collection for the patents that can invalidate the demand in an existing claim. For this purpose, we performed both document and passage retrieval tasks. We also performed the automatic patent classification task using the F-term classification system. The test collections will be available to the public for research purposes.", "keyphrases": ["fifth ntcir workshop", "patent retrieval task", "information retrieval"]} +{"id": "vu-etal-2020-exploring", "title": "Exploring and Predicting Transferability across NLP Tasks", "abstract": "Recent advances in NLP demonstrate the effectiveness of training large-scale language models and transferring them to downstream tasks. Can fine-tuning these models on tasks other than language modeling further improve performance? In this paper, we conduct an extensive study of the transferability between 33 NLP tasks across three broad classes of problems (text classification, question answering, and sequence labeling). Our results show that transfer learning is more beneficial than previously thought, especially when target task data is scarce, and can improve performance even with low-data source tasks that differ substantially from the target task (e.g., part-of-speech tagging transfers well to the DROP QA dataset). We also develop task embeddings that can be used to predict the most transferable source tasks for a given target task, and we validate their effectiveness in experiments controlled for source and target data size. Overall, our experiments reveal that factors such as data size, task and domain similarity, and task complexity all play a role in determining transferability.", "keyphrases": ["transferability", "task data", "task embedding", "relatedness"]} +{"id": "veale-2011-creative", "title": "Creative Language Retrieval: A Robust Hybrid of Information Retrieval and Linguistic Creativity", "abstract": "Information retrieval (IR) and figurative language processing (FLP) could scarcely be more different in their treatment of language and meaning. IR views language as an open-ended set of mostly stable signs with which texts can be indexed and retrieved, focusing more on a text's potential relevance than its potential meaning. In contrast, FLP views language as a system of unstable signs that can be used to talk about the world in creative new ways. There is another key difference: IR is practical, scalable and robust, and in daily use by millions of casual users. FLP is neither scalable nor robust, and not yet practical enough to migrate beyond the lab. This paper thus presents a mutually beneficial hybrid of IR and FLP, one that enriches IR with new operators to enable the non-literal retrieval of creative expressions, and which also transplants FLP into a robust, scalable framework in which practical applications of linguistic creativity can be implemented.", "keyphrases": ["hybrid", "information retrieval", "linguistic creativity"]} +{"id": "choi-etal-2014-lexical", "title": "Lexical Acquisition for Opinion Inference: A Sense-Level Lexicon of Benefactive and Malefactive Events", "abstract": "Opinion inference arises when opinions are expressed toward states and events which positive or negatively affect entities, i.e., benefactive and malefactive events. This paper addresses creating a lexicon of such events, which would be helpful to infer opinions. Verbs may be ambiguous, in that some meanings may be benefactive and others may be malefactive or neither. Thus, we use WordNet to create a sense-level lexicon. We begin with seed senses culled from FrameNet and expand the lexicon using WordNet relationships. The evaluations show that the accuracy of the approach is well above baseline accuracy.", "keyphrases": ["opinion inference", "sense-level lexicon", "malefactive event"]} +{"id": "majidi-crane-2013-active", "title": "Active Learning for Dependency Parsing by A Committee of Parsers", "abstract": "Data-driven dependency parsers need a large annotated corpus to learn how to generate dependency graph of a given sentence. But annotations on structured corpora are expensive to collect and requires a labor intensive task. Active learning is a machine learning approach that allows only informative examples to be selected for annotation and is usually used when the number of annotated data is abundant and acquisition of more labeled data is expensive. We will provide a novel framework in which a committee of dependency parsers collaborate to improve their ef\ufb01ciency using active learning techniques. Queries are made up only from uncertain tokens, and the annotations of the remaining tokens of selected sentences are voted among committee members.", "keyphrases": ["dependency parsing", "committee", "active learning"]} +{"id": "rahimi-etal-2019-massively", "title": "Massively Multilingual Transfer for NER", "abstract": "In cross-lingual transfer, NLP models over one or more source languages are applied to a low-resource target language. While most prior work has used a single source model or a few carefully selected models, here we consider a \u201cmassive\u201d setting with many such models. This setting raises the problem of poor transfer, particularly from distant languages. We propose two techniques for modulating the transfer, suitable for zero-shot or few-shot learning, respectively. Evaluating on named entity recognition, we show that our techniques are much more effective than strong baselines, including standard ensembling, and our unsupervised method rivals oracle selection of the single best individual model.", "keyphrases": ["multilingual transfer", "single-source transfer", "inter alia"]} +{"id": "che-etal-2010-ltp", "title": "LTP: A Chinese Language Technology Platform", "abstract": "LTP (Language Technology Platform) is an integrated Chinese processing platform which includes a suite of high performance natural language processing (NLP) modules and relevant corpora. Especially for the syntactic and semantic parsing modules, we achieved good results in some relevant evaluations, such as CoNLL and SemEval. Based on XML internal data representation, users can easily use these modules and corpora by invoking DLL (Dynamic Link Library) or Web service APIs (Application Program Interface), and view the processing results directly by the visualization tool.", "keyphrases": ["ltp", "word segmentation", "pos tagging"]} +{"id": "laban-etal-2020-summary", "title": "The Summary Loop: Learning to Write Abstractive Summaries Without Examples", "abstract": "This work presents a new approach to unsupervised abstractive summarization based on maximizing a combination of coverage and fluency for a given length constraint. It introduces a novel method that encourages the inclusion of key terms from the original document into the summary: key terms are masked out of the original document and must be filled in by a coverage model using the current generated summary. A novel unsupervised training procedure leverages this coverage model along with a fluency model to generate and score summaries. When tested on popular news summarization datasets, the method outperforms previous unsupervised methods by more than 2 R-1 points, and approaches results of competitive supervised methods. Our model attains higher levels of abstraction with copied passages roughly two times shorter than prior work, and learns to compress and merge sentences without supervision.", "keyphrases": ["summarization", "fluency", "length constraint", "unsupervised method"]} +{"id": "yao-van-durme-2014-information", "title": "Information Extraction over Structured Data: Question Answering with Freebase", "abstract": "Answering natural language questions using the Freebase knowledge base has recently been explored as a platform for advancing the state of the art in open domain semantic parsing. Those efforts map questions to sophisticated meaning representations that are then attempted to be matched against viable answer candidates in the knowledge base. Here we show that relatively modest information extraction techniques, when paired with a webscale corpus, can outperform these sophisticated approaches by roughly 34% relative gain.", "keyphrases": ["natural language question", "knowledge base", "information extraction", "query", "topic graph"]} +{"id": "kumar-etal-2018-aggression", "title": "Aggression-annotated Corpus of Hindi-English Code-mixed Data", "abstract": "As the interaction over the web has increased, incidents of aggression and related events like trolling, cyberbullying, flaming, hate speech, etc. too have increased manifold across the globe. While most of these behaviour like bullying or hate speech have predated the Internet, the reach and extent of the Internet has given these an unprecedented power and influence to affect the lives of billions of people. So it is of utmost significance and importance that some preventive measures be taken to provide safeguard to the people using the web such that the web remains a viable medium of communication and connection, in general. In this paper, we discuss the development of an aggression tagset and an annotated corpus of Hindi-English code-mixed data from two of the most popular social networking and social media platforms in India, Twitter and Facebook. The corpus is annotated using a hierarchical tagset of 3 top-level tags and 10 level 2 tags. The final dataset contains approximately 18k tweets and 21k facebook comments and is being released for further research in the field.", "keyphrases": ["hindi-english code-mixed data", "aggression", "facebook", "distinction"]} +{"id": "liu-etal-2019-original", "title": "Original Semantics-Oriented Attention and Deep Fusion Network for Sentence Matching", "abstract": "Sentence matching is a key issue in natural language inference and paraphrase identification. Despite the recent progress on multi-layered neural network with cross sentence attention, one sentence learns attention to the intermediate representations of another sentence, which are propagated from preceding layers and therefore are uncertain and unstable for matching, particularly at the risk of error propagation. In this paper, we present an original semantics-oriented attention and deep fusion network (OSOA-DFN) for sentence matching. Unlike existing models, each attention layer of OSOA-DFN is oriented to the original semantic representation of another sentence, which captures the relevant information from a fixed matching target. The multiple attention layers allow one sentence to repeatedly read the important information of another sentence for better matching. We then additionally design deep fusion to propagate the attention information at each matching layer. At last, we introduce a self-attention mechanism to capture global context to enhance attention-aware representation within each sentence. Experiment results on three sentence matching benchmark datasets SNLI, SciTail and Quora show that OSOA-DFN has the ability to model sentence matching more precisely.", "keyphrases": ["deep fusion network", "sentence matching", "original semantics-oriented attention"]} +{"id": "li-etal-2020-hitrans", "title": "HiTrans: A Transformer-Based Context- and Speaker-Sensitive Model for Emotion Detection in Conversations", "abstract": "Emotion detection in conversations (EDC) is to detect the emotion for each utterance in conversations that have multiple speakers. Different from the traditional non-conversational emotion detection, the model for EDC should be context-sensitive (e.g., understanding the whole conversation rather than one utterance) and speaker-sensitive (e.g., understanding which utterance belongs to which speaker). In this paper, we propose a transformer-based context- and speaker-sensitive model for EDC, namely HiTrans, which consists of two hierarchical transformers. We utilize BERT as the low-level transformer to generate local utterance representations, and feed them into another high-level transformer so that utterance representations could be sensitive to the global context of the conversation. Moreover, we exploit an auxiliary task to make our model speaker-sensitive, called pairwise utterance speaker verification (PUSV), which aims to classify whether two utterances belong to the same speaker. We evaluate our model on three benchmark datasets, namely EmoryNLP, MELD and IEMOCAP. Results show that our model outperforms previous state-of-the-art models.", "keyphrases": ["speaker-sensitive model", "emotion detection", "conversation"]} +{"id": "dreyer-etal-2008-latent", "title": "Latent-Variable Modeling of String Transductions with Finite-State Methods", "abstract": "String-to-string transduction is a central problem in computational linguistics and natural language processing. It occurs in tasks as diverse as name transliteration, spelling correction, pronunciation modeling and inflectional morphology. We present a conditional loglinear model for string-to-string transduction, which employs overlapping features over latent alignment sequences, and which learns latent classes and latent string pair regions from incomplete training data. We evaluate our approach on morphological tasks and demonstrate that latent variables can dramatically improve results, even when trained on small data sets. On the task of generating morphological forms, we outperform a baseline method reducing the error rate by up to 48%. On a lemmatization task, we reduce the error rates in Wicentowski (2002) by 38--92%.", "keyphrases": ["string transduction", "latent variable", "lemmatization", "finite-state machine"]} +{"id": "bonial-etal-2020-infoforager", "title": "InfoForager: Leveraging Semantic Search with AMR for COVID-19 Research", "abstract": "This paper examines how Abstract Meaning Representation (AMR) can be utilized for finding answers to research questions in medical scientific documents, in particular, to advance the study of UV (ultraviolet) inactivation of the novel coronavirus that causes the disease COVID-19. We describe the development of a proof-of-concept prototype tool, InfoForager, which uses AMR to conduct a semantic search, targeting the meaning of the user question, and matching this to sentences in medical documents that may contain information to answer that question. This work was conducted as a sprint over a period of six weeks, and reveals both promising results and challenges in reducing the user search time relating to COVID-19 research, and in general, domain adaption of AMR for this task.", "keyphrases": ["semantic search", "covid-19 research", "infoforager"]} +{"id": "haagsma-etal-2020-magpie", "title": "MAGPIE: A Large Corpus of Potentially Idiomatic Expressions", "abstract": "Given the limited size of existing idiom corpora, we aim to enable progress in automatic idiom processing and linguistic analysis by creating the largest-to-date corpus of idioms for English. Using a fixed idiom list, automatic pre-extraction, and a strictly controlled crowdsourced annotation procedure, we show that it is feasible to build a high-quality corpus comprising more than 50K instances, an order of a magnitude larger than previous resources. Crucial ingredients of crowdsourcing were the selection of crowdworkers, clear and comprehensive instructions, and an interface that breaks down the task in small, manageable steps. Analysis of the resulting corpus revealed strong effects of genre on idiom distribution, providing new evidence for existing theories on what influences idiom usage. The corpus also contains rich metadata, and is made publicly available.", "keyphrases": ["potential idiomatic expression", "crowdsourcing", "magpie"]} +{"id": "zhang-etal-2007-grammar", "title": "A Grammar-driven Convolution Tree Kernel for Semantic Role Classification", "abstract": "Convolution tree kernel has shown promising results in semantic role classification. However, it only carries out hard matching, which may lead to over-fitting and less accurate similarity measure. To remove the constraint, this paper proposes a grammardriven convolution tree kernel for semantic role classification by introducing more linguistic knowledge into the standard tree kernel. The proposed grammar-driven tree kernel displays two advantages over the previous one: 1) grammar-driven approximate substructure matching and 2) grammardriven approximate tree node matching. The two improvements enable the grammardriven tree kernel explore more linguistically motivated structure features than the previous one. Experiments on the CoNLL-2005 SRL shared task show that the grammardriven tree kernel significantly outperforms the previous non-grammar-driven one in SRL. Moreover, we present a composite kernel to integrate feature-based and tree kernel-based methods. Experimental results show that the composite kernel outperforms the previously best-reported methods.", "keyphrases": ["tree kernel", "semantic role classification", "grammar-driven tree kernel"]} +{"id": "vulic-etal-2019-really", "title": "Do We Really Need Fully Unsupervised Cross-Lingual Embeddings?", "abstract": "Recent efforts in cross-lingual word embedding (CLWE) learning have predominantly focused on fully unsupervised approaches that project monolingual embeddings into a shared cross-lingual space without any cross-lingual signal. The lack of any supervision makes such approaches conceptually attractive. Yet, their only core difference from (weakly) supervised projection-based CLWE methods is in the way they obtain a seed dictionary used to initialize an iterative self-learning procedure. The fully unsupervised methods have arguably become more robust, and their primary use case is CLWE induction for pairs of resource-poor and distant languages. In this paper, we question the ability of even the most robust unsupervised CLWE approaches to induce meaningful CLWEs in these more challenging settings. A series of bilingual lexicon induction (BLI) experiments with 15 diverse languages (210 language pairs) show that fully unsupervised CLWE methods still fail for a large number of language pairs (e.g., they yield zero BLI performance for 87/210 pairs). Even when they succeed, they never surpass the performance of weakly supervised methods (seeded with 500-1,000 translation pairs) using the same self-learning procedure in any BLI setup, and the gaps are often substantial. These findings call for revisiting the main motivations behind fully unsupervised CLWE methods.", "keyphrases": ["cross-lingual embedding", "distant language", "low-resource language"]} +{"id": "plank-etal-2014-learning", "title": "Learning part-of-speech taggers with inter-annotator agreement loss", "abstract": "In natural language processing (NLP) annotation projects, we use inter-annotator agreement measures and annotation guidelines to ensure consistent annotations. However, annotation guidelines often make linguistically debatable and even somewhat arbitrary decisions, and interannotator agreement is often less than perfect. While annotation projects usually specify how to deal with linguistically debatable phenomena, annotator disagreements typically still stem from these \u201chard\u201d cases. This indicates that some errors are more debatable than others. In this paper, we use small samples of doublyannotated part-of-speech (POS) data for Twitter to estimate annotation reliability and show how those metrics of likely interannotator agreement can be implemented in the loss functions of POS taggers. We find that these cost-sensitive algorithms perform better across annotation projects and, more surprisingly, even on data annotated according to the same guidelines. Finally, we show that POS tagging models sensitive to inter-annotator agreement perform better on the downstream task of chunking.", "keyphrases": ["part-of-speech", "tagger", "inter-annotator agreement loss", "pos", "loss function"]} +{"id": "fan-etal-2017-transfer", "title": "Transfer Learning for Neural Semantic Parsing", "abstract": "The goal of semantic parsing is to map natural language to a machine interpretable meaning representation language (MRL). One of the constraints that limits full exploration of deep learning technologies for semantic parsing is the lack of sufficient annotation training data. In this paper, we propose using sequence-to-sequence in a multi-task setup for semantic parsing with focus on transfer learning. We explore three multi-task architectures for sequence-to-sequence model and compare their performance with the independently trained model. Our experiments show that the multi-task setup aids transfer learning from an auxiliary task with large labeled data to the target task with smaller labeled data. We see an absolute accuracy gain ranging from 1.0% to 4.4% in in our in-house data set and we also see good gains ranging from 2.5% to 7.0% on the ATIS semantic parsing tasks with syntactic and semantic auxiliary tasks.", "keyphrases": ["semantic parsing", "transfer learning", "different domain"]} +{"id": "li-srikumar-2019-augmenting", "title": "Augmenting Neural Networks with First-order Logic", "abstract": "Today, the dominant paradigm for training neural networks involves minimizing task loss on a large dataset. Using world knowledge to inform a model, and yet retain the ability to perform end-to-end training remains an open question. In this paper, we present a novel framework for introducing declarative knowledge to neural network architectures in order to guide training and prediction. Our framework systematically compiles logical statements into computation graphs that augment a neural network without extra learnable parameters or manual redesign. We evaluate our modeling strategy on three tasks: machine comprehension, natural language inference, and text chunking. Our experiments show that knowledge-augmented networks can strongly improve over baselines, especially in low-data regimes.", "keyphrases": ["first-order logic", "declarative knowledge", "neuron"]} +{"id": "kiela-etal-2018-learning", "title": "Learning Visually Grounded Sentence Representations", "abstract": "We investigate grounded sentence representations, where we train a sentence encoder to predict the image features of a given caption\u2014i.e., we try to \u201cimagine\u201d how a sentence would be depicted visually\u2014and use the resultant features as sentence representations. We examine the quality of the learned representations on a variety of standard sentence representation quality benchmarks, showing improved performance for grounded models over non-grounded ones. In addition, we thoroughly analyze the extent to which grounding contributes to improved performance, and show that the system also learns improved word embeddings.", "keyphrases": ["sentence representation", "image feature", "caption"]} +{"id": "dieng-etal-2020-topic", "title": "Topic Modeling in Embedding Spaces", "abstract": "Topic modeling analyzes documents to learn meaningful patterns of words. However, existing topic models fail to learn interpretable topics when working with large and heavy-tailed vocabularies. To this end, we develop the embedded topic model (etm), a generative model of documents that marries traditional topic models with word embeddings. More specifically, the etm models each word with a categorical distribution whose natural parameter is the inner product between the word's embedding and an embedding of its assigned topic. To fit the etm, we develop an efficient amortized variational inference algorithm. The etm discovers interpretable topics even with large vocabularies that include rare words and stop words. It outperforms existing document models, such as latent Dirichlet allocation, in terms of both topic quality and predictive performance.", "keyphrases": ["etm", "topic modeling", "pre-trained word embedding"]} +{"id": "specia-etal-2009-improving", "title": "Improving the Confidence of Machine Translation Quality Estimates", "abstract": "We investigate the problem of estimating the quality of the output of machine translation systems at the sentence level when reference translations are not available. The focus is on automatically identifying a threshold to map a continuous predicted score into \u201cgood\u201d / \u201cbad\u201d categories for filtering out bad-quality cases in a translation post-edition task. We use the theory of Inductive Confidence Machines (ICM) to identify this threshold according to a confidence level that is expected for a given task. Experiments show that this approach gives improved estimates when compared to those based on classification or regression algorithms without ICM.", "keyphrases": ["confidence", "reference translation", "post-editing effort", "unseen machine"]} +{"id": "lee-etal-2015-event", "title": "Event Detection and Factuality Assessment with Non-Expert Supervision", "abstract": "Events are communicated in natural language with varying degrees of certainty. For example, if you are \u201choping for a raise,\u201d it may be somewhat less likely than if you are \u201cexpecting\u201d one. To study these distinctions, we present scalable, highquality annotation schemes for event detection and fine-grained factuality assessment. We find that non-experts, with very little training, can reliably provide judgments about what events are mentioned and the extent to which the author thinks they actually happened. We also show how such data enables the development of regression models for fine-grained scalar factuality predictions that outperform strong baselines.", "keyphrases": ["factuality assessment", "certainty", "event detection"]} +{"id": "wong-etal-2008-extractive", "title": "Extractive Summarization Using Supervised and Semi-Supervised Learning", "abstract": "It is difficult to identify sentence importance from a single point of view. In this paper, we propose a learning-based approach to combine various sentence features. They are categorized as surface, content, relevance and event features. Surface features are related to extrinsic aspects of a sentence. Content features measure a sentence based on content-conveying words. Event features represent sentences by events they contained. Relevance features evaluate a sentence from its relatedness with other sentences. Experiments show that the combined features improved summarization performance significantly. Although the evaluation results are encouraging, supervised learning approach requires much labeled data. Therefore we investigate co-training by combining labeled and unlabeled data. Experiments show that this semi-supervised learning approach achieves comparable performance to its supervised counterpart and saves about half of the labeling time cost.", "keyphrases": ["summarization", "unlabeled data", "machine learning technique"]} +{"id": "wang-hua-2014-semiparametric", "title": "A Semiparametric Gaussian Copula Regression Model for Predicting Financial Risks from Earnings Calls", "abstract": "Earnings call summarizes the financial performance of a company, and it is an important indicator of the future financial risks of the company. We quantitatively study how earnings calls are correlated with the financial risks, with a special focus on the financial crisis of 2009. In particular, we perform a text regression task: given the transcript of an earnings call, we predict the volatility of stock prices from the week after the call is made. We propose the use of copula: a powerful statistical framework that separately models the uniform marginals and their complex multivariate stochastic dependencies, while not requiring any prior assumptions on the distributions of the covariate and the dependent variable. By performing probability integral transform, our approach moves beyond the standard count-based bag-ofwords models in NLP, and improves previous work on text regression by incorporating the correlation among local features in the form of semiparametric Gaussian copula. In experiments, we show that our model significantly outperforms strong linear and non-linear discriminative baselines on three datasets under various settings.", "keyphrases": ["gaussian copula", "financial risk", "earning call", "volatility"]} +{"id": "levy-etal-2021-collecting-large", "title": "Collecting a Large-Scale Gender Bias Dataset for Coreference Resolution and Machine Translation", "abstract": "Recent works have found evidence of gender bias in models of machine translation and coreference resolution using mostly synthetic diagnostic datasets. While these quantify bias in a controlled experiment, they often do so on a small scale and consist mostly of artificial, out-of-distribution sentences. In this work, we find grammatical patterns indicating stereotypical and non-stereotypical gender-role assignments (e.g., female nurses versus male dancers) in corpora from three domains, resulting in a first large-scale gender bias dataset of 108K diverse real-world English sentences. We manually verify the quality of our corpus and use it to evaluate gender bias in various coreference resolution and machine translation models. We find that all tested models tend to over-rely on gender stereotypes when presented with natural inputs, which may be especially harmful when deployed in commercial systems. Finally, we show that our dataset lends itself to finetuning a coreference resolution model, finding it mitigates bias on a held out set. Our dataset and models are publicly available at github.com/SLAB-NLP/BUG. We hope they will spur future research into gender bias evaluation mitigation techniques in realistic settings.", "keyphrases": ["gender bias dataset", "coreference resolution", "machine translation"]} +{"id": "iwai-etal-2019-applying", "title": "Applying Machine Translation to Psychology: Automatic Translation of Personality Adjectives", "abstract": "We introduce our approach to apply machine translation to psychology, especially to translate English adjectives in a psychological personality questionnaire. We first extend seed English personality adjectives with a word2vec model trained with web sentences, and then feed the acquired words to a phrase-based machine translation model. We use Moses trained with bilingual corpora that consist of TED subtitles, movie\u2019 subtitles and Wikipedia. We collect Japanese translations whose translation probabilities are higher than .01 and filter them based on human evaluations. This resulted in 507 Japanese personality descriptors. We conducted a web-survey (N=17,751) and finalized a personality questionnaire. Statistical analyses supported the five-factor structure, reliability and criterion-validity of the newly developed questionnaire. This shows the potential applicability of machine translation to psychology. We discuss further issues related to machine translation application to psychology.", "keyphrases": ["machine translation", "personality adjective", "questionnaire"]} +{"id": "hendrickx-etal-2012-modality", "title": "Modality in Text: a Proposal for Corpus Annotation", "abstract": "We present a annotation scheme for modality in Portuguese. In our annotation scheme we have tried to combine a more theoretical linguistic viewpoint with a practical annotation scheme that will also be useful for NLP research but is not geared towards one specific application. Our notion of modality focuses on the attitude and opinion of the speaker, or of the subject of the sentence. We validated the annotation scheme on a corpus sample of approximately 2000 sentences that we fully annotated with modal information using the MMAX2 annotation tool to produce XML annotation. We discuss our main findings and give attention to the difficult cases that we encountered as they illustrate the complexity of modality and its interactions with other elements in the text.", "keyphrases": ["annotation scheme", "portuguese", "modality"]} +{"id": "hassan-etal-2017-synthetic", "title": "Synthetic Data for Neural Machine Translation of Spoken-Dialects", "abstract": "In this paper, we introduce a novel approach to generate synthetic data for training Neural Machine Translation systems. The proposed approach supports language variants and dialects with very limited parallel training data. This is achieved using a seed data to project words from a closely-related resource-rich language to an under-resourced language variant via word embedding representations. The proposed approach is based on localized embedding projection of distributed representations which utilizes monolingual embeddings and approximate nearest neighbors queries to transform parallel data across language variants. Our approach is language independent and can be used to generate data for any variant of the source language such as slang or spoken dialect or even for a different language that is related to the source language. We report experimental results on Levantine to English translation using Neural Machine Translation. We show that the synthetic data can provide significant improvements over a very large scale system by more than 2.8 Bleu points and it can be used to provide a reliable translation system for a spoken dialect which does not have sufficient parallel data.", "keyphrases": ["neural machine translation", "dialect", "synthetic data"]} +{"id": "liu-etal-2021-enriching", "title": "Enriching Non-Autoregressive Transformer with Syntactic and Semantic Structures for Neural Machine Translation", "abstract": "The non-autoregressive models have boosted the efficiency of neural machine translation through parallelized decoding at the cost of effectiveness, when comparing with the autoregressive counterparts. In this paper, we claim that the syntactic and semantic structures among natural language are critical for non-autoregressive machine translation and can further improve the performance. However, these structures are rarely considered in the existing non-autoregressive models. Inspired by this intuition, we propose to incorporate the explicit syntactic and semantic structure of languages into a non-autoregressive Transformer, for the task of neural machine translation. Moreover, we also consider the intermediate latent alignment within target sentences to better learn the long-term token dependencies. Experimental results on two real-world datasets (i.e., WMT14 En-De and WMT16 En- Ro) show that our model achieves a significantly faster speed, as well as keeps the translation quality when compared with several state-of-the-art non-autoregressive models.", "keyphrases": ["non-autoregressive transformer", "semantic structure", "neural machine translation"]} +{"id": "jiao-etal-2020-exploiting", "title": "Exploiting Unsupervised Data for Emotion Recognition in Conversations", "abstract": "Emotion Recognition in Conversations (ERC) aims to predict the emotional state of speakers in conversations, which is essentially a text classification task. Unlike the sentence-level text classification problem, the available supervised data for the ERC task is limited, which potentially prevents the models from playing their maximum effect. In this paper, we propose a novel approach to leverage unsupervised conversation data, which is more accessible. Specifically, we propose the Conversation Completion (ConvCom) task, which attempts to select the correct answer from candidate answers to fill a masked utterance in a conversation. Then, we Pre-train a basic COntext-Dependent Encoder (Pre-CODE) on the ConvCom task. Finally, we fine-tune the Pre-CODE on the datasets of ERC. Experimental results demonstrate that pre-training on unsupervised data achieves significant improvement of performance on the ERC datasets, particularly on the minority emotion classes.", "keyphrases": ["unsupervised data", "emotion recognition", "conversation"]} +{"id": "schmitt-etal-2011-modeling", "title": "Modeling and Predicting Quality in Spoken Human-Computer Interaction", "abstract": "In this work we describe the modeling and prediction of Interaction Quality (IQ) in Spoken Dialogue Systems (SDS) using Support Vector Machines. The model can be employed to estimate the quality of the ongoing interaction at arbitrary points in a spoken human-computer interaction. We show that the use of 52 completely automatic features characterizing the system-user exchange significantly outperforms state-of-the-art approaches. The model is evaluated on publically available data from the CMU Let's Go Bus Information system. It reaches a performance of 61.6% unweighted average recall when discriminating between 5 classes (good to very poor). It can be further shown that incorporating knowledge about the user's emotional state does hardly improve the performance.", "keyphrases": ["human-computer interaction", "interaction quality", "spoken dialogue systems", "average recall", "modeling"]} +{"id": "nagoudi-etal-2022-arat5", "title": "AraT5: Text-to-Text Transformers for Arabic Language Generation", "abstract": "Transfer learning with a unified Transformer framework (T5) that converts all language problems into a text-to-text format was recently proposed as a simple and effective transfer learning approach. Although a multilingual version of the T5 model (mT5) was also introduced, it is not clear how well it can fare on non-English tasks involving diverse data. To investigate this question, we apply mT5 on a language with a wide variety of dialects\u2013Arabic. For evaluation, we introduce a novel benchmark for ARabic language GENeration (ARGEN), covering seven important tasks. For model comparison, we pre-train three powerful Arabic T5-style models and evaluate them on ARGEN. Although pre-trained with ~49 less data, our new models perform significantly better than mT5 on all ARGEN tasks (in 52 out of 59 test sets) and set several new SOTAs. Our models also establish new SOTA on the recently-proposed, large Arabic language understanding evaluation benchmark ARLUE (Abdul-Mageed et al., 2021). Our new models are publicly available. We also link to ARGEN datasets through our repository: .", "keyphrases": ["text-to-text transformer", "arabic language generation", "arat5"]} +{"id": "liao-grishman-2011-acquiring", "title": "Acquiring Topic Features to improve Event Extraction: in Pre-selected and Balanced Collections", "abstract": "Event extraction is a particularly challenging type of information extraction (IE) that may require inferences from the whole article. However, most current event extraction systems rely on local information at the phrase or sentence level, and do not consider the article as a whole, thus limiting extraction performance. Moreover, most annotated corpora are artificially enriched to include enough positive samples of the events of interest; event identification on a more balanced collection, such as unfiltered newswire, may perform much worse. In this paper, we investigate the use of unsupervised topic models to extract topic features to improve event extraction both on test data similar to training data, and on more balanced collections. We compare this unsupervised approach to a supervised multi-label text classifier, and show that unsupervised topic modeling can get better results for both collections, and especially for a more balanced collection. We show that the unsupervised topic model can improve trigger, argument and role labeling by 3.5%, 6.9% and 6% respectively on a pre-selected corpus, and by 16.8%, 12.5% and 12.7% on a balanced corpus.", "keyphrases": ["topic feature", "event extraction", "balanced collection"]} +{"id": "yoshimura-etal-2019-filtering", "title": "Filtering Pseudo-References by Paraphrasing for Automatic Evaluation of Machine Translation", "abstract": "In this paper, we introduce our participation in the WMT 2019 Metric Shared Task. We propose an improved version of sentence BLEU using filtered pseudo-references. We propose a method to filter pseudo-references by paraphrasing for automatic evaluation of machine translation (MT). We use the outputs of off-the-shelf MT systems as pseudo-references filtered by paraphrasing in addition to a single human reference (gold reference). We use BERT fine-tuned with paraphrase corpus to filter pseudo-references by checking the paraphrasability with the gold reference. Our experimental results of the WMT 2016 and 2017 datasets show that our method achieved higher correlation with human evaluation than the sentence BLEU (SentBLEU) baselines with a single reference and with unfiltered pseudo-references.", "keyphrases": ["pseudo-reference", "paraphrasing", "machine translation"]} +{"id": "wan-etal-2009-improving", "title": "Improving Grammaticality in Statistical Sentence Generation: Introducing a Dependency Spanning Tree Algorithm with an Argument Satisfaction Model", "abstract": "Abstract-like text summarisation requires a means of producing novel summary sentences. In order to improve the grammaticality of the generated sentence, we model a global (sentence) level syntactic structure. We couch statistical sentence generation as a spanning tree problem in order to search for the best dependency tree spanning a set of chosen words. We also introduce a new search algorithm for this task that models argument satisfaction to improve the linguistic validity of the generated tree. We treat the allocation of modifiers to heads as a weighted bipartite graph matching (or assignment) problem, a well studied problem in graph theory. Using BLEU to measure performance on a string regeneration task, we found an improvement, illustrating the benefit of the spanning tree approach armed with an argument satisfaction model.", "keyphrases": ["statistical sentence generation", "argument satisfaction model", "n-gram language model"]} +{"id": "perez-almendros-etal-2020-dont", "title": "Don't Patronize Me! An Annotated Dataset with Patronizing and Condescending Language towards Vulnerable Communities", "abstract": "In this paper, we introduce a new annotated dataset which is aimed at supporting the development of NLP models to identify and categorize language that is patronizing or condescending towards vulnerable communities (e.g. refugees, homeless people, poor families). While the prevalence of such language in the general media has long been shown to have harmful effects, it differs from other types of harmful language, in that it is generally used unconsciously and with good intentions. We furthermore believe that the often subtle nature of patronizing and condescending language (PCL) presents an interesting technical challenge for the NLP community. Our analysis of the proposed dataset shows that identifying PCL is hard for standard NLP models, with language models such as BERT achieving the best results.", "keyphrases": ["patronizing", "vulnerable community", "refugee", "poor family"]} +{"id": "parton-etal-2012-automatic", "title": "Can Automatic Post-Editing Make MT More Meaningful", "abstract": "Automatic post-editors (APEs) enable the re-use of black box machine translation (MT) systems for a variety of tasks where different aspects of translation are important. In this paper, we describe APEs that target adequacy errors, a critical problem for tasks such as cross-lingual question-answering, and compare different approaches for post-editing: a rule-based system and a feedback approach that uses a computer in the loop to suggest improvements to the MT system. We test the APEs on two different MT systems and across two different genres. Human evaluation shows that the APEs significantly improve adequacy, regardless of approach, MT system or genre: 30-56% of the post-edited sentences have improved adequacy compared to the original MT.", "keyphrases": ["ape", "adequacy", "text analysis"]} +{"id": "levenberg-etal-2010-stream", "title": "Stream-based Translation Models for Statistical Machine Translation", "abstract": "Typical statistical machine translation systems are trained with static parallel corpora. Here we account for scenarios with a continuous incoming stream of parallel training data. Such scenarios include daily governmental proceedings, sustained output from translation agencies, or crowd-sourced translations. We show incorporating recent sentence pairs from the stream improves performance compared with a static baseline. Since frequent batch retraining is computationally demanding we introduce a fast incremental alternative using an online version of the EM algorithm. To bound our memory requirements we use a novel data-structure and associated training regime. When compared to frequent batch retraining, our online time and space-bounded model achieves the same performance with significantly less computational overhead.", "keyphrases": ["statistical machine translation", "stream", "sentence pair", "online version", "smt model"]} +{"id": "bergsma-etal-2008-distributional", "title": "Distributional Identification of Non-Referential Pronouns", "abstract": "We present an automatic approach to determining whether a pronoun in text refers to a preceding noun phrase or is instead nonreferential. We extract the surrounding textual context of the pronoun and gather, from a large corpus, the distribution of words that occur within that context. We learn to reliably classify these distributions as representing either referential or non-referential pronoun instances. Despite its simplicity, experimental results on classifying the English pronoun it show the system achieves the highest performance yet attained on this important task.", "keyphrases": ["pronoun", "textual context", "distributional method"]} +{"id": "ghaeini-etal-2018-dr", "title": "DR-BiLSTM: Dependent Reading Bidirectional LSTM for Natural Language Inference", "abstract": "We present a novel deep learning architecture to address the natural language inference (NLI) task. Existing approaches mostly rely on simple reading mechanisms for independent encoding of the premise and hypothesis. Instead, we propose a novel dependent reading bidirectional LSTM network (DR-BiLSTM) to efficiently model the relationship between a premise and a hypothesis during encoding and inference. We also introduce a sophisticated ensemble strategy to combine our proposed models, which noticeably improves final predictions. Finally, we demonstrate how the results can be improved further with an additional preprocessing step. Our evaluation shows that DR-BiLSTM obtains the best single model and ensemble model results achieving the new state-of-the-art scores on the Stanford NLI dataset.", "keyphrases": ["natural language inference", "nli", "dr-bilstm"]} +{"id": "barron-cedeno-etal-2013-plagiarism", "title": "Plagiarism Meets Paraphrasing: Insights for the Next Generation in Automatic Plagiarism Detection", "abstract": "Although paraphrasing is the linguistic mechanism underlying many plagiarism cases, little attention has been paid to its analysis in the framework of automatic plagiarism detection. Therefore, state-of-the-art plagiarism detectors find it difficult to detect cases of paraphrase plagiarism. In this article, we analyze the relationship between paraphrasing and plagiarism, paying special attention to which paraphrase phenomena underlie acts of plagiarism and which of them are detected by plagiarism detection systems. With this aim in mind, we created the P4P corpus, a new resource that uses a paraphrase typology to annotate a subset of the PAN-PC-10 corpus for automatic plagiarism detection. The results of the Second International Competition on Plagiarism Detection were analyzed in the light of this annotation.The presented experiments show that (i) more complex paraphrase phenomena and a high density of paraphrase mechanisms make plagiarism detection more difficult, (ii) lexical substitutions are the paraphrase mechanisms used the most when plagiarizing, and (iii) paraphrase mechanisms tend to shorten the plagiarized text. For the first time, the paraphrase mechanisms behind plagiarism have been analyzed, providing critical insights for the improvement of automatic plagiarism detection systems.", "keyphrases": ["paraphrasing", "insight", "automatic plagiarism detection"]} +{"id": "drozdov-etal-2019-unsupervised-latent", "title": "Unsupervised Latent Tree Induction with Deep Inside-Outside Recursive Auto-Encoders", "abstract": "We introduce the deep inside-outside recursive autoencoder (DIORA), a fully-unsupervised method for discovering syntax that simultaneously learns representations for constituents within the induced tree. Our approach predicts each word in an input sentence conditioned on the rest of the sentence. During training we use dynamic programming to consider all possible binary trees over the sentence, and for inference we use the CKY algorithm to extract the highest scoring parse. DIORA outperforms previously reported results for unsupervised binary constituency parsing on the benchmark WSJ dataset.", "keyphrases": ["recursive autoencoder", "diora", "dynamic programming", "latent representation", "inside-outside algorithm"]} +{"id": "han-etal-2020-continual", "title": "Continual Relation Learning via Episodic Memory Activation and Reconsolidation", "abstract": "Continual relation learning aims to continually train a model on new data to learn incessantly emerging novel relations while avoiding catastrophically forgetting old relations. Some pioneering work has proved that storing a handful of historical relation examples in episodic memory and replaying them in subsequent training is an effective solution for such a challenging problem. However, these memory-based methods usually suffer from overfitting the few memorized examples of old relations, which may gradually cause inevitable confusion among existing relations. Inspired by the mechanism in human long-term memory formation, we introduce episodic memory activation and reconsolidation (EMAR) to continual relation learning. Every time neural models are activated to learn both new and memorized data, EMAR utilizes relation prototypes for memory reconsolidation exercise to keep a stable understanding of old relations. The experimental results show that EMAR could get rid of catastrophically forgetting old relations and outperform the state-of-the-art continual learning models.", "keyphrases": ["episodic memory activation", "reconsolidation", "continual relation learning"]} +{"id": "bronner-monz-2012-user", "title": "User Edits Classification Using Document Revision Histories", "abstract": "Document revision histories are a useful and abundant source of data for natural language processing, but selecting relevant data for the task at hand is not trivial. In this paper we introduce a scalable approach for automatically distinguishing between factual and fluency edits in document revision histories. The approach is based on supervised machine learning using language model probabilities, string similarity measured over different representations of user edits, comparison of part-of-speech tags and named entities, and a set of adaptive features extracted from large amounts of unlabeled user edits. Applied to contiguous edit segments, our method achieves statistically significant improvements over a simple yet effective edit-distance baseline. It reaches high classification accuracy (88%) and is shown to generalize to additional sets of unseen data.", "keyphrases": ["fluency edit", "wikipedia revision", "revision analysis"]} +{"id": "sadat-habash-2006-combination", "title": "Combination of Arabic Preprocessing Schemes for Statistical Machine Translation", "abstract": "Statistical machine translation is quite robust when it comes to the choice of input representation. It only requires consistency between training and testing. As a result, there is a wide range of possible preprocessing choices for data used in statistical machine translation. This is even more so for morphologically rich languages such as Arabic. In this paper, we study the effect of different word-level preprocessing schemes for Arabic on the quality of phrase-based statistical machine translation. We also present and evaluate different methods for combining preprocessing schemes resulting in improved translation quality.", "keyphrases": ["arabic", "preprocessing scheme", "statistical machine translation", "wide range", "rich language"]} +{"id": "batchelor-2014-gdbank", "title": "gdbank: The beginnings of a corpus of dependency structures and type-logical grammar in Scottish Gaelic", "abstract": "We present gdbank, a small handbuilt corpus of 32 sentences with dependency structures and categorial grammar type assignments. The sentences have been chosen to illustrate as broad a range of the unusual features of Scottish Gaelic as possible, particularly nouns being used to represent psychological states where more thoroughly-studied languages such as English and French would prefer a verb, and prepositions marking aspect, as is also seen in Welsh and, for example, Irish Gaelic. We provide hand-built dependency trees, building on previous work on Irish Gaelic and using the Universal Dependency Scheme. We also provide a tentative categorial grammar account of the words in the sentences, based largely on previous work on English.", "keyphrases": ["dependency structure", "scottish gaelic", "gdbank"]} +{"id": "swanson-charniak-2014-data", "title": "Data Driven Language Transfer Hypotheses", "abstract": "Language transfer, the preferential second language behavior caused by similarities to the speaker\u2019s native language, requires considerable expertise to be detected by humans alone. Our goal in this work is to replace expert intervention by data-driven methods wherever possible. We define a computational methodology that produces a concise list of lexicalized syntactic patterns that are controlled for redundancy and ranked by relevancy to language transfer. We demonstrate the ability of our methodology to detect hundreds of such candidate patterns from currently available data sources, and validate the quality of the proposed patterns through classification experiments.", "keyphrases": ["language transfer", "list", "syntactic pattern"]} +{"id": "zeng-etal-2019-automatic", "title": "Automatic Generation of Personalized Comment Based on User Profile", "abstract": "Comments on social media are very diverse, in terms of content, style and vocabulary, which make generating comments much more challenging than other existing natural language generation (NLG) tasks. Besides, since different user has different expression habits, it is necessary to take the user's profile into consideration when generating comments. In this paper, we introduce the task of automatic generation of personalized comment (AGPC) for social media. Based on tens of thousands of users' real comments and corresponding user profiles on weibo, we propose Personalized Comment Generation Network (PCGN) for AGPC. The model utilizes user feature embedding with a gated memory and attends to user description to model personality of users. In addition, external user representation is taken into consideration during the decoding to enhance the comments generation. Experimental results show that our model can generate natural, human-like and personalized comments.", "keyphrases": ["personalized comment", "user profile", "automatic generation"]} +{"id": "lebret-etal-2016-neural", "title": "Neural Text Generation from Structured Data with Application to the Biography Domain", "abstract": "This paper introduces a neural model for concept-to-text generation that scales to large, rich domains. We experiment with a new dataset of biographies from Wikipedia that is an order of magnitude larger than existing resources with over 700k samples. The dataset is also vastly more diverse with a 400k vocabulary, compared to a few hundred words for Weathergov or Robocup. Our model builds upon recent work on conditional neural language model for text generation. To deal with the large vocabulary, we extend these models to mix a fixed vocabulary with copy actions that transfer sample-specific words from the input database to the generated output sentence. Our neural model significantly out-performs a classical Kneser-Ney language model adapted to this task by nearly 15 BLEU.", "keyphrases": ["neural text generation", "first sentence", "table-to-text generation", "generation model"]} +{"id": "henderson-2003-inducing", "title": "Inducing History Representations for Broad Coverage Statistical Parsing", "abstract": "We present a neural network method for inducing representations of parse histories and using these history representations to estimate the probabilities needed by a statistical left-corner parser. The resulting statistical parser achieves performance (89.1% F-measure) on the Penn Treebank which is only 0.6% below the best current parser for this task, despite using a smaller vocabulary size and less prior linguistic knowledge. Crucial to this success is the use of structurally determined soft biases in inducing the representation of the parse history, and no use of hard independence assumptions.", "keyphrases": ["statistical parser", "approximation", "derivation history", "isbn"]} +{"id": "baeza-yates-etal-2015-cassa", "title": "CASSA: A Context-Aware Synonym Simplification Algorithm", "abstract": "We present a new context-aware method for lexical simplification that uses two free language resources and real web frequencies. We compare it with the state-of-the-art method for lexical simplification in Spanish and the established simplification baseline, that is, the most frequent synonym. Our method improves upon the other methods in the detection of complex words, in meaning preservation, and in simplicity. Although we use Spanish, the method can be extended to other languages since it does not require alignment of parallel corpora.", "keyphrases": ["simplification", "spanish", "cassa"]} +{"id": "zaghouani-etal-2014-large", "title": "Large Scale Arabic Error Annotation: Guidelines and Framework", "abstract": "We present annotation guidelines and a web-based annotation framework developed as part of an effort to create a manually annotated Arabic corpus of errors and corrections for various text types. Such a corpus will be invaluable for developing Arabic error correction tools, both for training models and as a gold standard for evaluating error correction algorithms. We summarize the guidelines we created. We also describe issues encountered during the training of the annotators, as well as problems that are specific to the Arabic language that arose during the annotation process. Finally, we present the annotation tool that was developed as part of this project, the annotation pipeline, and the quality of the resulting annotations.", "keyphrases": ["arabic", "qalb corpus", "spelling error"]} +{"id": "zhang-etal-2020-multi-task", "title": "A Multi-task Learning Framework for Opinion Triplet Extraction", "abstract": "The state-of-the-art Aspect-based Sentiment Analysis (ABSA) approaches are mainly based on either detecting aspect terms and their corresponding sentiment polarities, or co-extracting aspect and opinion terms. However, the extraction of aspect-sentiment pairs lacks opinion terms as a reference, while co-extraction of aspect and opinion terms would not lead to meaningful pairs without determining their sentiment dependencies. To address the issue, we present a novel view of ABSA as an opinion triplet extraction task, and propose a multi-task learning framework to jointly extract aspect terms and opinion terms, and simultaneously parses sentiment dependencies between them with a biaffine scorer. At inference phase, the extraction of triplets is facilitated by a triplet decoding method based on the above outputs. We evaluate the proposed framework on four SemEval benchmarks for ASBA. The results demonstrate that our approach significantly outperforms a range of strong baselines and state-of-the-art approaches.", "keyphrases": ["multi-task learning framework", "aspect term", "sentiment polarity"]} +{"id": "li-etal-2013-modeling", "title": "Modeling Syntactic and Semantic Structures in Hierarchical Phrase-based Translation", "abstract": "Incorporating semantic structure into a linguistics-free translation model is challenging, since semantic structures are closely tied to syntax. In this paper, we propose a two-level approach to exploiting predicate-argument structure reordering in a hierarchical phrase-based translation model. First, we introduce linguistically motivated constraints into a hierarchical model, guiding translation phrase choices in favor of those that respect syntactic boundaries. Second, based on such translation phrases, we propose a predicate-argument structure reordering model that predicts reordering not only between an argument and its predicate, but also between two arguments. Experiments on Chinese-to-English translation demonstrate that both advances significantly improve translation accuracy.", "keyphrases": ["phrase-based translation", "predicate-argument structure", "extraction"]} +{"id": "liu-etal-2010-automatic", "title": "Automatic Keyphrase Extraction via Topic Decomposition", "abstract": "Existing graph-based ranking methods for keyphrase extraction compute a single importance score for each word via a single random walk. Motivated by the fact that both documents and words can be represented by a mixture of semantic topics, we propose to decompose traditional random walk into multiple random walks specific to various topics. We thus build a Topical PageRank (TPR) on word graph to measure word importance with respect to different topics. After that, given the topic distribution of the document, we further calculate the ranking scores of words and extract the top ranked ones as keyphrases. Experimental results show that TPR outperforms state-of-the-art keyphrase extraction methods on two datasets under various evaluation metrics.", "keyphrases": ["mixture", "pagerank", "automatic keyphrase extraction", "article topic information", "similar meaning"]} +{"id": "anastasiou-2008-identification", "title": "Identification of idioms by machine translation: a hybrid research system vs. three commercial systems", "abstract": "We compare three commercial Machine Translation (MT) systems, Power Translator Pro, SYSTRAN, and T1 Langenscheidt, with the research hybrid, statistical and rule-based system, METIS-II, with respect to identification of idioms. Firstly, we make a distinction between continuous (adjacent constituents) and discontinuous idioms (non-adjacent constituents). Secondly, we describe our idiom resources within METIS-II, the system\u2019s identification process, and we evaluate the results with simple techniques. From the translation outputs of the commercial systems we deduce that they cannot identify discontinuous idioms. We prove that, within METIS-II, the identification of discontinuous idioms is feasible, even with low resources.", "keyphrases": ["idiom", "machine translation", "commercial system"]} +{"id": "zhao-2009-character", "title": "Character-Level Dependencies in Chinese: Usefulness and Learning", "abstract": "We investigate the possibility of exploiting character-based dependency for Chinese information processing. As Chinese text is made up of character sequences rather than word sequences, word in Chinese is not so natural a concept as in English, nor is word easy to be defined without argument for such a language. Therefore we propose a character-level dependency scheme to represent primary linguistic relationships within a Chinese sentence. The usefulness of character dependencies are verified through two specialized dependency parsing tasks. The first is to handle trivial character dependencies that are equally transformed from traditional word boundaries. The second furthermore considers the case that annotated internal character dependencies inside a word are involved. Both of these results from character-level dependency parsing are positive. This study provides an alternative way to formularize basic character-and word-level representation for Chinese.", "keyphrases": ["chinese", "usefulness", "word boundary", "character-level dependency", "internal structure"]} +{"id": "mesgar-strube-2018-neural", "title": "A Neural Local Coherence Model for Text Quality Assessment", "abstract": "We propose a local coherence model that captures the flow of what semantically connects adjacent sentences in a text. We represent the semantics of a sentence by a vector and capture its state at each word of the sentence. We model what relates two adjacent sentences based on the two most similar semantic states, each of which is in one of the sentences. We encode the perceived coherence of a text by a vector, which represents patterns of changes in salient information that relates adjacent sentences. Our experiments demonstrate that our approach is beneficial for two downstream tasks: Readability assessment, in which our model achieves new state-of-the-art results; and essay scoring, in which the combination of our coherence vectors and other task-dependent features significantly improves the performance of a strong essay scorer.", "keyphrases": ["local coherence model", "adjacent sentence", "readability assessment", "essay scoring"]} +{"id": "wang-etal-2018-semi-autoregressive", "title": "Semi-Autoregressive Neural Machine Translation", "abstract": "Existing approaches to neural machine translation are typically autoregressive models. While these models attain state-of-the-art translation quality, they are suffering from low parallelizability and thus slow at decoding long sequences. In this paper, we propose a novel model for fast sequence generation \u2014 the semi-autoregressive Transformer (SAT). The SAT keeps the autoregressive property in global but relieves in local and thus are able to produce multiple successive words in parallel at each time step. Experiments conducted on English-German and Chinese-English translation tasks show that the SAT achieves a good balance between translation quality and decoding speed. On WMT'14 English-German translation, the SAT achieves 5.58 speedup while maintaining 88% translation quality, significantly better than the previous non-autoregressive methods. When produces two words at each time step, the SAT is almost lossless (only 1% degeneration in BLEU score).", "keyphrases": ["translation quality", "semi-autoregressive transformer", "parallel", "time step"]} +{"id": "che-liu-2010-jointly", "title": "Jointly Modeling WSD and SRL with Markov Logic", "abstract": "Semantic role labeling (SRL) and word sense disambiguation (WSD) are two fundamental tasks in natural language processing to find a sentence-level semantic representation. To date, they have mostly been modeled in isolation. However, this approach neglects logical constraints between them. We therefore exploit some pipeline systems which verify the automatic all word sense disambiguation could help the semantic role labeling and vice versa. We further propose a Markov logic model that jointly labels semantic roles and disambiguates all word senses. By evaluating our model on the OntoNotes 3.0 data, we show that this joint approach leads to a higher performance for word sense disambiguation and semantic role labeling than those pipeline approaches.", "keyphrases": ["wsd", "srl", "markov logic"]} +{"id": "carpuat-simard-2012-trouble", "title": "The Trouble with SMT Consistency", "abstract": "SMT typically models translation at the sentence level, ignoring wider document context. Does this hurt the consistency of translated documents? Using a phrase-based SMT system in various data conditions, we show that SMT translates documents remarkably consistently, even without document knowledge. Nevertheless, translation inconsistencies often indicate translation errors. However, unlike in human translation, these errors are rarely due to terminology inconsistency. They are more often symptoms of deeper issues with SMT models instead.", "keyphrases": ["trouble", "consistency", "smt system", "translation error"]} +{"id": "gui-etal-2017-question", "title": "A Question Answering Approach for Emotion Cause Extraction", "abstract": "Emotion cause extraction aims to identify the reasons behind a certain emotion expressed in text. It is a much more difficult task compared to emotion classification. Inspired by recent advances in using deep memory networks for question answering (QA), we propose a new approach which considers emotion cause identification as a reading comprehension task in QA. Inspired by convolutional neural networks, we propose a new mechanism to store relevant context in different memory slots to model context information. Our proposed approach can extract both word level sequence features and lexical features. Performance evaluation shows that our method achieves the state-of-the-art performance on a recently released emotion cause dataset, outperforming a number of competitive baselines by at least 3.01% in F-measure.", "keyphrases": ["emotion", "extraction", "clause"]} +{"id": "hua-etal-2019-argument-generation", "title": "Argument Generation with Retrieval, Planning, and Realization", "abstract": "Automatic argument generation is an appealing but challenging task. In this paper, we study the specific problem of counter-argument generation, and present a novel framework, CANDELA. It consists of a powerful retrieval system and a novel two-step generation model, where a text planning decoder first decides on the main talking points and a proper language style for each sentence, then a content realization decoder reflects the decisions and constructs an informative paragraph-level argument. Furthermore, our generation model is empowered by a retrieval system indexed with 12 million articles collected from Wikipedia and popular English news media, which provides access to high-quality content with diversity. Automatic evaluation on a large-scale dataset collected from Reddit shows that our model yields significantly higher BLEU, ROUGE, and METEOR scores than the state-of-the-art and non-trivial comparisons. Human evaluation further indicates that our system arguments are more appropriate for refutation and richer in content.", "keyphrases": ["retrieval", "argument generation", "stance"]} +{"id": "belz-gatt-2007-attribute", "title": "The attribute selection for GRE challenge: overview and evaluation results", "abstract": ") Challenge wasthe \ufb01rst shared-task evaluation challenge inthe \ufb01eld of Natural Language Generation.Six teams submitted a total of 22 systems.All submitted systems were tested automat-ically for minimality, uniqueness and \u2018hu-manlikeness\u2019. In addition, the output of 15systems was tested in a task-based exper-iment where subjects were asked to iden-tify referents, and the speed and accuracy ofidenti\ufb01cation was measured. This report de-scribes the", "keyphrases": ["attribute selection", "nlg", "expression generation"]} +{"id": "huang-yates-2014-improving", "title": "Improving Word Alignment Using Linguistic Code Switching Data", "abstract": "Linguist Code Switching (LCS) is a situation where two or more languages show up in the context of a single conversation. For example, in EnglishChinese code switching, there might be a sentence like \u201c\u00b7 \u201a15\u00a9 \u00a8 k \u2021meeting (We will have a meeting in 15 minutes)\u201d. Traditional machine translation (MT) systems treat LCS data as noise, or just as regular sentences. However, if LCS data is processed intelligently, it can provide a useful signal for training word alignment and MT models. Moreover, LCS data is from non-news sources which can enhance the diversity of training data for MT. In this paper, we first extract constraints from this code switching data and then incorporate them into a word alignment model training procedure. We also show that by using the code switching data, we can jointly train a word alignment model and a language model using cotraining. Our techniques for incorporating LCS data improve by 2.64 in BLEU score over a baseline MT system trained using only standard sentence-aligned corpora.", "keyphrases": ["word alignment", "noise", "code-switching"]} +{"id": "mohammad-2011-even", "title": "Even the Abstract have Color: Consensus in Word-Colour Associations", "abstract": "Colour is a key component in the successful dissemination of information. Since many real-world concepts are associated with colour, for example danger with red, linguistic information is often complemented with the use of appropriate colours in information visualization and product marketing. Yet, there is no comprehensive resource that captures concept--colour associations. We present a method to create a large word--colour association lexicon by crowdsourcing. A word-choice question was used to obtain sense-level annotations and to ensure data quality. We focus especially on abstract concepts and emotions to show that even they tend to have strong colour associations. Thus, using the right colours can not only improve semantic coherence, but also inspire the desired emotional response.", "keyphrases": ["color", "association", "crowdsourcing"]} +{"id": "duboue-mckeown-2003-statistical", "title": "Statistical Acquisition of Content Selection Rules for Natural Language Generation", "abstract": "A Natural Language Generation system produces text using as input semantic data. One of its very first tasks is to decide which pieces of information to convey in the output. This task, called Content Selection, is quite domain dependent, requiring considerable re-engineering to transport the system from one scenario to another. In this paper, we present a method to acquire content selection rules automatically from a corpus of text and associated semantics. Our proposed technique was evaluated by comparing its output with information selected by human authors in unseen texts, where we were able to filter half the input data set without loss of recall.", "keyphrases": ["content selection rule", "statistical approach", "biographical summary"]} +{"id": "hong-ong-2009-automatically", "title": "Automatically Extracting Word Relationships as Templates for Pun Generation", "abstract": "Computational models can be built to capture the syntactic structures and semantic patterns of human punning riddles. This model is then used as rules by a computer to generate its own puns. This paper presents T-PEG, a system that utilizes phonetic and semantic linguistic resources to automatically extract word relationships in puns and store the knowledge in template form. Given a set of training examples, it is able to extract 69.2% usable templates, resulting in computer-generated puns that received an average score of 2.13 as compared to 2.70 for human-generated puns from user feedback.", "keyphrases": ["template", "pun generation", "riddle", "linguistic resource", "creativity"]} +{"id": "huang-etal-2008-quality", "title": "Quality Assurance of Automatic Annotation of Very Large Corpora: a Study based on heterogeneous Tagging System", "abstract": "We propose a set of heuristics for improving annotation quality of very large corpora efficiently. The Xinhua News portion of the Chinese Gigaword Corpus was tagged independently with both the Peking University ICL tagset and the Academia Sinica CKIP tagset. The corpus-based POS tags mapping will serve as the basis of the possible contrast in grammatical systems between PRC and Taiwan. And it can serve as the basic model for mapping between the CKIP and ICL tagging systems for any data.", "keyphrases": ["large corpora", "heterogeneous tagging system", "quality assurance"]} +{"id": "liu-etal-2018-exploiting-contextual", "title": "Exploiting Contextual Information via Dynamic Memory Network for Event Detection", "abstract": "The task of event detection involves identifying and categorizing event triggers. Contextual information has been shown effective on the task. However, existing methods which utilize contextual information only process the context once. We argue that the context can be better exploited by processing the context multiple times, allowing the model to perform complex reasoning and to generate better context representation, thus improving the overall performance. Meanwhile, dynamic memory network (DMN) has demonstrated promising capability in capturing contextual information and has been applied successfully to various tasks. In light of the multi-hop mechanism of the DMN to model the context, we propose the trigger detection dynamic memory network (TD-DMN) to tackle the event detection problem. We performed a five-fold cross-validation on the ACE-2005 dataset and experimental results show that the multi-hop mechanism does improve the performance and the proposed model achieves best F1 score compared to the state-of-the-art methods.", "keyphrases": ["contextual information", "dynamic memory network", "event detection"]} +{"id": "strapparava-etal-2004-pattern", "title": "Pattern abstraction and term similarity for Word Sense Disambiguation: IRST at Senseval-3", "abstract": "This paper summarizes IRST\u2019s participation in Senseval-3. We participated both in the English allwords task and in some lexical sample tasks (English, Basque, Catalan, Italian, Spanish). We followed two perspectives. On one hand, for the allwords task, we tried to refine the Domain Driven Disambiguation that we presented at Senseval-2. The refinements consist of both exploiting a new technique (Domain Relevance Estimation) for domain detection in texts, and experimenting with the use of Latent Semantic Analysis to avoid reliance on manually annotated domain resources (e.g. WORDNET DOMAINS). On the other hand, for the lexical sample tasks, we explored the direction of pattern abstraction and we demonstrated the feasibility of leveraging external knowledge using kernel methods.", "keyphrases": ["word sense disambiguation", "senseval-3", "kernel method", "pattern abstraction"]} +{"id": "qian-etal-2016-speculation", "title": "Speculation and Negation Scope Detection via Convolutional Neural Networks", "abstract": "Speculation and negation are important information to identify text factuality. In this paper, we propose a Convolutional Neural Network (CNN)-based model with probabilistic weighted average pooling to address speculation and negation scope detection. In particular, our CNN-based model extracts those meaningful features from various syntactic paths be-tween the cues and the candidate tokens in both constituency and dependency parse trees. Evaluation on BioScope shows that our CNN-based model significantly outperforms the state-of-the-art systems on Abstracts, a sub-corpus in BioScope, and achieves comparable performances on Clinical Records, another sub-corpus in BioScope.", "keyphrases": ["negation scope detection", "convolutional neural network", "candidate token", "speculation"]} +{"id": "rozovskaya-roth-2010-generating", "title": "Generating Confusion Sets for Context-Sensitive Error Correction", "abstract": "In this paper, we consider the problem of generating candidate corrections for the task of correcting errors in text. We focus on the task of correcting errors in preposition usage made by non-native English speakers, using discriminative classifiers. The standard approach to the problem assumes that the set of candidate corrections for a preposition consists of all preposition choices participating in the task. We determine likely preposition confusions using an annotated corpus of non-native text and use this knowledge to produce smaller sets of candidates. \n \nWe propose several methods of restricting candidate sets. These methods exclude candidate prepositions that are not observed as valid corrections in the annotated corpus and take into account the likelihood of each preposition confusion in the non-native text. We find that restricting candidates to those that are observed in the non-native data improves both the precision and the recall compared to the approach that views all prepositions as possible candidates. Furthermore, the approach that takes into account the likelihood of each preposition confusion is shown to be the most effective.", "keyphrases": ["confusion set", "error correction", "non-native data", "preposition error", "writer"]} +{"id": "ippolito-etal-2019-unsupervised", "title": "Unsupervised Hierarchical Story Infilling", "abstract": "Story infilling involves predicting words to go into a missing span from a story. This challenging task has the potential to transform interactive tools for creative writing. However, state-of-the-art conditional language models have trouble balancing fluency and coherence with novelty and diversity. We address this limitation with a hierarchical model which first selects a set of rare words and then generates text conditioned on that set. By relegating the high entropy task of picking rare words to a word-sampling model, the second-stage model conditioned on those words can achieve high fluency and coherence by searching for likely sentences, without sacrificing diversity.", "keyphrases": ["story", "span", "rare word", "text infilling"]} +{"id": "bhatia-etal-2010-empty", "title": "Empty Categories in a Hindi Treebank", "abstract": "We are in the process of creating a multi-representational and multi-layered treebank for Hindi/Urdu (Palmer et al., 2009), which has three main layers: dependency structure, predicate-argument structure (PropBank), and phrase structure. This paper discusses an important issue in treebank design which is often neglected: the use of empty categories (ECs). All three levels of representation make use of ECs. We make a high-level distinction between two types of ECs, trace and silent, on the basis of whether they are postulated to mark displacement or not. Each type is further refined into several subtypes based on the underlying linguistic phenomena which the ECs are introduced to handle. This paper discusses the stages at which we add ECs to the Hindi/Urdu treebank and why. We investigate methodically the different types of ECs and their role in our syntactic and semantic representations. We also examine our decisions whether or not to coindex each type of ECs with other elements in the representation.", "keyphrases": ["hindi treebank", "predicate", "hutb"]} +{"id": "schuff-etal-2017-annotation", "title": "Annotation, Modelling and Analysis of Fine-Grained Emotions on a Stance and Sentiment Detection Corpus", "abstract": "There is a rich variety of data sets for sentiment analysis (viz., polarity and subjectivity classification). For the more challenging task of detecting discrete emotions following the definitions of Ekman and Plutchik, however, there are much fewer data sets, and notably no resources for the social media domain. This paper contributes to closing this gap by extending the SemEval 2016 stance and sentiment datasetwith emotion annotation. We (a) analyse annotation reliability and annotation merging; (b) investigate the relation between emotion annotation and the other annotation layers (stance, sentiment); (c) report modelling results as a baseline for future work.", "keyphrases": ["stance", "emotion annotation", "inter-annotator agreement"]} +{"id": "tseng-etal-2015-introduction", "title": "Introduction to SIGHAN 2015 Bake-off for Chinese Spelling Check", "abstract": "This paper introduces the SIGHAN 2015 Bake-off for Chinese Spelling Check, including task description, data preparation, performance metrics, and evaluation results. The competition reveals current state-of-the-art NLP techniques in dealing with Chinese spelling checking. All data sets with gold standards and evaluation tool used in this bake-off are publicly available for future research.", "keyphrases": ["sighan", "bake-off", "chinese spelling check", "language model", "correct candidate"]} +{"id": "stroppa-etal-2007-exploiting", "title": "Exploiting source similarity for SMT using context-informed features", "abstract": "In this paper, we introduce context informed features in a log-linear phrase-based SMT framework; these features enable us to exploit source similarity in addition to target similarity modeled by the language model. We \npresent a memory-based classification framework that enables the estimation of these features while avoiding \nsparseness problems. We evaluate the performance of our approach on Italian-to-English and Chinese-to-English translation tasks using a state-of-the-art phrase-based SMT \nsystem, and report significant improvements for both BLEU and NIST scores when adding the context-informed features.", "keyphrases": ["source similarity", "context-informed feature", "phrase-based smt", "log-linear pb-smt system", "part-of-speech tag"]} +{"id": "kiela-etal-2014-improving", "title": "Improving Multi-Modal Representations Using Image Dispersion: Why Less is Sometimes More", "abstract": "Models that learn semantic representations from both linguistic and perceptual input outperform text-only models in many contexts and better reflect human concept acquisition. However, experiments suggest that while the inclusion of perceptual input improves representations of certain concepts, it degrades the representations of others. We propose an unsupervised method to determine whether to include perceptual input for a concept, and show that it significantly improves the ability of multi-modal models to learn and represent word meanings. The method relies solely on image data, and can be applied to a variety of other NLP tasks.", "keyphrases": ["image dispersion", "inclusion", "unsupervised method", "visual information", "filter"]} +{"id": "francois-etal-2014-flelex", "title": "FLELex: a graded lexical resource for French foreign learners", "abstract": "In this paper we present FLELex, the first graded lexicon for French as a foreign language (FFL) that reports word frequencies by difficulty level (according to the CEFR scale). It has been obtained from a tagged corpus of 777,000 words from available textbooks and simplified readers intended for FFL learners. Our goal is to freely provide this resource to the community to be used for a variety of purposes going from the assessment of the lexical difficulty of a text, to the selection of simpler words within text simplification systems, and also as a dictionary in assistive tools for writing.", "keyphrases": ["lexical resource", "french", "learner"]} +{"id": "huang-etal-2019-ana", "title": "ANA at SemEval-2019 Task 3: Contextual Emotion detection in Conversations through hierarchical LSTMs and BERT", "abstract": "This paper describes the system submitted by ANA Team for the SemEval-2019 Task 3: EmoContext. We propose a novel Hierarchi- cal LSTMs for Contextual Emotion Detection (HRLCE) model. It classifies the emotion of an utterance given its conversational con- text. The results show that, in this task, our HRCLE outperforms the most recent state-of- the-art text classification framework: BERT. We combine the results generated by BERT and HRCLE to achieve an overall score of 0.7709 which ranked 5th on the final leader board of the competition among 165 Teams.", "keyphrases": ["semeval-2019 task", "contextual emotion detection", "conversation"]} +{"id": "liu-etal-2011-automatic", "title": "Automatic Keyphrase Extraction by Bridging Vocabulary Gap", "abstract": "Keyphrase extraction aims to select a set of terms from a document as a short summary of the document. Most methods extract keyphrases according to their statistical properties in the given document. Appropriate keyphrases, however, are not always statistically significant or even do not appear in the given document. This makes a large vocabulary gap between a document and its keyphrases. In this paper, we consider that a document and its keyphrases both describe the same object but are written in two different languages. By regarding keyphrase extraction as a problem of translating from the language of documents to the language of keyphrases, we use word alignment models in statistical machine translation to learn translation probabilities between the words in documents and the words in keyphrases. According to the translation model, we suggest keyphrases given a new document. The suggested keyphrases are not necessarily statistically frequent in the document, which indicates that our method is more flexible and reliable. Experiments on news articles demonstrate that our method outperforms existing unsupervised methods on precision, recall and F-measure.", "keyphrases": ["keyphrase", "vocabulary gap", "large set"]} +{"id": "qadir-riloff-2014-learning", "title": "Learning Emotion Indicators from Tweets: Hashtags, Hashtag Patterns, and Phrases", "abstract": "We present a weakly supervised approach for learning hashtags, hashtag patterns, and phrases associated with five emotions: AFFECTION, ANGER/RAGE, FEAR/ANXIETY, JOY, and SADNESS/DISAPPOINTMENT. Starting with seed hashtags to label an initial set of tweets, we train emotion classifiers and use them to learn new emotion hashtags and hashtag patterns. This process then repeats in a bootstrapping framework. Emotion phrases are also extracted from the learned hashtags and used to create phrase-based emotion classifiers. We show that the learned set of emotion indicators yields a substantial improvement in F-scores, ranging from +%5 to +%18 over baseline classifiers.", "keyphrases": ["emotion", "hashtag", "social medium"]} +{"id": "williams-2012-belief", "title": "A belief tracking challenge task for spoken dialog systems", "abstract": "Belief tracking is a promising technique for adding robustness to spoken dialog systems, but current research is fractured across different teams, techniques, and domains. This paper amplifies past informal discussions (Raux, 2011) to call for a belief tracking challenge task, based on the Spoken dialog challenge corpus (Black et al., 2011). Benefits, limitations, evaluation design issues, and next steps are presented.", "keyphrases": ["belief", "challenge task", "spoken dialog system"]} +{"id": "velldal-oepen-2005-maximum", "title": "Maximum Entropy Models for Realization Ranking", "abstract": "In this paper we describe and evaluate different statistical models for the task of realization ranking, i.e. the problem of discriminating between competing surface realizations generated for a given input semantics. Three models are trained and tested; an n-gram language model, a discriminative maximum entropy model using structural features, and a combination of these two. Our realization component forms part of a larger, hybrid MT system.", "keyphrases": ["realization ranking", "log-linear model", "disambiguation model", "hpsg grammar"]} +{"id": "chi-etal-2017-speaker", "title": "Speaker Role Contextual Modeling for Language Understanding and Dialogue Policy Learning", "abstract": "Language understanding (LU) and dialogue policy learning are two essential components in conversational systems. Human-human dialogues are not well-controlled and often random and unpredictable due to their own goals and speaking habits. This paper proposes a role-based contextual model to consider different speaker roles independently based on the various speaking patterns in the multi-turn dialogues. The experiments on the benchmark dataset show that the proposed role-based model successfully learns role-specific behavioral patterns for contextual encoding and then significantly improves language understanding and dialogue policy learning tasks.", "keyphrases": ["language understanding", "dialogue policy learning", "conversation"]} +{"id": "ganea-hofmann-2017-deep", "title": "Deep Joint Entity Disambiguation with Local Neural Attention", "abstract": "We propose a novel deep learning model for joint document-level entity disambiguation, which leverages learned neural representations. Key components are entity embeddings, a neural attention mechanism over local context windows, and a differentiable joint inference stage for disambiguation. Our approach thereby combines benefits of deep learning with more traditional approaches such as graphical models and probabilistic mention-entity maps. Extensive experiments show that we are able to obtain competitive or state-of-the-art accuracy at moderate computational costs.", "keyphrases": ["disambiguation", "deep learning", "mention", "wikipedia"]} +{"id": "afrin-litman-2018-annotation", "title": "Annotation and Classification of Sentence-level Revision Improvement", "abstract": "Studies of writing revisions rarely focus on revision quality. To address this issue, we introduce a corpus of between-draft revisions of student argumentative essays, annotated as to whether each revision improves essay quality. We demonstrate a potential usage of our annotations by developing a machine learning model to predict revision improvement. With the goal of expanding training data, we also extract revisions from a dataset edited by expert proofreaders. Our results indicate that blending expert and non-expert revisions increases model performance, with expert data particularly important for predicting low-quality revisions.", "keyphrases": ["revision", "student", "argumentative essay"]} +{"id": "ustun-etal-2020-udapter", "title": "UDapter: Language Adaptation for Truly Universal Dependency Parsing", "abstract": "Recent advances in multilingual dependency parsing have brought the idea of a truly universal parser closer to reality. However, cross-language interference and restrained model capacity remain major obstacles. To address this, we propose a novel multilingual task adaptation approach based on contextual parameter generation and adapter modules. This approach enables to learn adapters via language embeddings while sharing model parameters across languages. It also allows for an easy but effective integration of existing linguistic typology features into the parsing network. The resulting parser, UDapter, outperforms strong monolingual and multilingual baselines on the majority of both high-resource and low-resource (zero-shot) languages, showing the success of the proposed adaptation approach. Our in-depth analyses show that soft parameter sharing via typological features is key to this success.", "keyphrases": ["adapter", "dependency parsing", "udapter", "multilingual bert"]} +{"id": "wu-etal-2020-perturbed", "title": "Perturbed Masking: Parameter-free Probing for Analyzing and Interpreting BERT", "abstract": "By introducing a small set of additional parameters, a probe learns to solve specific linguistic tasks (e.g., dependency parsing) in a supervised manner using feature representations (e.g., contextualized embeddings). The effectiveness of such probing tasks is taken as evidence that the pre-trained model encodes linguistic knowledge. However, this approach of evaluating a language model is undermined by the uncertainty of the amount of knowledge that is learned by the probe itself. Complementary to those works, we propose a parameter-free probing technique for analyzing pre-trained language models (e.g., BERT). Our method does not require direct supervision from the probing tasks, nor do we introduce additional parameters to the probing process. Our experiments on BERT show that syntactic trees recovered from BERT using our method are significantly better than linguistically-uninformed baselines. We further feed the empirically induced dependency structures into a downstream sentiment classification task and find its improvement compatible with or even superior to a human-designed dependency schema.", "keyphrases": ["bert", "perturbed", "masking technique"]} +{"id": "yeh-chen-2019-flowdelta", "title": "FlowDelta: Modeling Flow Information Gain in Reasoning for Conversational Machine Comprehension", "abstract": "Conversational machine comprehension requires deep understanding of the dialogue flow, and the prior work proposed FlowQA to implicitly model the context representations in reasoning for better understanding. This paper proposes to explicitly model the information gain through the dialogue reasoning in order to allow the model to focus on more informative cues. The proposed model achieves the state-of-the-art performance in a conversational QA dataset QuAC and sequential instruction understanding dataset SCONE, which shows the effectiveness of the proposed mechanism and demonstrate its capability of generalization to different QA models and tasks.", "keyphrases": ["reasoning", "conversational machine comprehension", "flowdelta"]} +{"id": "li-etal-2021-search", "title": "Search from History and Reason for Future: Two-stage Reasoning on Temporal Knowledge Graphs", "abstract": "Temporal Knowledge Graphs (TKGs) have been developed and used in many different areas. Reasoning on TKGs that predicts potential facts (events) in the future brings great challenges to existing models. When facing a prediction task, human beings usually search useful historical information (i.e., clues) in their memories and then reason for future meticulously. Inspired by this mechanism, we propose CluSTeR to predict future facts in a two-stage manner, Clue Searching and Temporal Reasoning, accordingly. Specifically, at the clue searching stage, CluSTeR learns a beam search policy via reinforcement learning (RL) to induce multiple clues from historical facts. At the temporal reasoning stage, it adopts a graph convolution network based sequence method to deduce answers from clues. Experiments on four datasets demonstrate the substantial advantages of CluSTeR compared with the state-of-the-art methods. Moreover, the clues found by CluSTeR further provide interpretability for the results.", "keyphrases": ["reason", "future", "temporal knowledge graphs"]} +{"id": "da-san-martino-etal-2020-semeval", "title": "SemEval-2020 Task 11: Detection of Propaganda Techniques in News Articles", "abstract": "We present the results and the main findings of SemEval-2020 Task 11 on Detection of Propaganda Techniques in News Articles. The task featured two subtasks. Subtask SI is about Span Identification: given a plain-text document, spot the specific text fragments containing propaganda. Subtask TC is about Technique Classification: given a specific text fragment, in the context of a full document, determine the propaganda technique it uses, choosing from an inventory of 14 possible propaganda techniques. The task attracted a large number of participants: 250 teams signed up to participate and 44 made a submission on the test set. In this paper, we present the task, analyze the results, and discuss the system submissions and the methods they used. For both subtasks, the best systems used pre-trained Transformers and ensembles.", "keyphrases": ["propaganda techniques", "semeval-2020 task", "news articles"]} +{"id": "salehi-etal-2014-using", "title": "Using Distributional Similarity of Multi-way Translations to Predict Multiword Expression Compositionality", "abstract": "We predict the compositionality of multiword expressions using distributional similarity between each component word and the overall expression, based on translations into multiple languages. We evaluate the method over English noun compounds, English verb particle constructions and German noun compounds. We show that the estimation of compositionality is improved when using translations into multiple languages, as compared to simply using distributional similarity in the source language. We further find that string similarity complements distributional similarity. 1 Compositionality of MWEs Multiword expressions (hereafter MWEs) are combinations of words which are lexically, syntactically, semantically or statistically idiosyncratic (Sag et al., 2002; Baldwin and Kim, 2009). Much research has been carried out on the extraction and identification of MWEs1 in English (Schone and Jurafsky, 2001; Pecina, 2008; Fazly et al., 2009) and other languages (Dias, 2003; Evert and Krenn, 2005; Salehi et al., 2012). However, considerably less work has addressed the task of predicting the meaning of MWEs, especially in non-English languages. As a step in this direction, the focus of this study is on predicting the compositionality of MWEs. An MWE is fully compositional if its meaning is predictable from its component words, and it is non-compositional (or idiomatic) if not. For example, stand up \u201crise to one\u2019s feet\u201d is composiIn this paper, we follow Baldwin and Kim (2009) in considering MWE \u201cidentification\u201d to be a token-level disambiguation task, and MWE \u201cextraction\u201d to be a type-level lexicon induction task. tional, because its meaning is clear from the meaning of the components stand and up. However, the meaning of strike up \u201cto start playing\u201d is largely unpredictable from the component words strike and up. In this study, following McCarthy et al. (2003) and Reddy et al. (2011), we consider compositionality to be graded, and aim to predict the degree of compositionality. For example, in the dataset of Reddy et al. (2011), climate change is judged to be 99% compositional, while silver screen is 48% compositional and ivory tower is 9% compositional. Formally, we model compositionality prediction as a regression task. An explicit handling of MWEs has been shown to be useful in NLP applications (Ramisch, 2012). As an example, Carpuat and Diab (2010) proposed two strategies for integrating MWEs into statistical machine translation. They show that even a large scale bilingual corpus cannot capture all the necessary information to translate MWEs, and that in adding the facility to model the compositionality of MWEs into their system, they could improve translation quality. Acosta et al. (2011) showed that treating non-compositional MWEs as a single unit in information retrieval improves retrieval effectiveness. For example, while searching for documents related to ivory tower, we are almost certainly not interested in documents relating to elephant tusks. Our approach is to use a large-scale multi-way translation lexicon to source translations of MWEs and their component words, and then model the relative similarity between each of the component words and the MWE, using distributional similarity based on monolingual corpora for the source language and each of the target languages. Our hypothesis is that using distributional similarity in more than one language will improve the prediction of compositionality. Importantly, in order to make the method as language-independent and", "keyphrases": ["distributional similarity", "german noun compound", "source language", "non-compositional mwe"]} +{"id": "snyder-etal-2010-statistical", "title": "A Statistical Model for Lost Language Decipherment", "abstract": "In this paper we propose a method for the automatic decipherment of lost languages. Given a non-parallel corpus in a known related language, our model produces both alphabetic mappings and translations of words into their corresponding cognates. We employ a non-parametric Bayesian framework to simultaneously capture both low-level character mappings and high-level morphemic correspondences. This formulation enables us to encode some of the linguistic intuitions that have guided human decipherers. When applied to the ancient Semitic language Ugaritic, the model correctly maps 29 of 30 letters to their Hebrew counterparts, and deduces the correct Hebrew cognate for 60% of the Ugaritic words which have cognates in Hebrew.", "keyphrases": ["decipherment", "hebrew", "dead language"]} +{"id": "emelin-etal-2021-moral", "title": "Moral Stories: Situated Reasoning about Norms, Intents, Actions, and their Consequences", "abstract": "In social settings, much of human behavior is governed by unspoken rules of conduct rooted in societal norms. For artificial systems to be fully integrated into social environments, adherence to such norms is a central prerequisite. To investigate whether language generation models can serve as behavioral priors for systems deployed in social settings, we evaluate their ability to generate action descriptions that achieve predefined goals under normative constraints. Moreover, we examine if models can anticipate likely consequences of actions that either observe or violate known norms, or explain why certain actions are preferable by generating relevant norm hypotheses. For this purpose, we introduce Moral Stories, a crowd-sourced dataset of structured, branching narratives for the study of grounded, goal-oriented social reasoning. Finally, we propose decoding strategies that combine multiple expert models to significantly improve the quality of generated actions, consequences, and norms compared to strong baselines.", "keyphrases": ["action", "goal-oriented social reasoning", "moral story"]} +{"id": "peng-etal-2014-classifying", "title": "Classifying Idiomatic and Literal Expressions Using Topic Models and Intensity of Emotions", "abstract": "We describe an algorithm for automatic classification of idiomatic and literal expressions. Our starting point is that words in a given text segment, such as a paragraph, that are highranking representatives of a common topic of discussion are less likely to be a part of an idiomatic expression. Our additional hypothesis is that contexts in which idioms occur, typically, are more affective and therefore, we incorporate a simple analysis of the intensity of the emotions expressed by the contexts. We investigate the bag of words topic representation of one to three paragraphs containing an expression that should be classified as idiomatic or literal (a target phrase). We extract topics from paragraphs containing idioms and from paragraphs containing literals using an unsupervised clustering method, Latent Dirichlet Allocation (LDA) (Blei et al., 2003). Since idiomatic expressions exhibit the property of non-compositionality, we assume that they usually present different semantics than the words used in the local topic. We treat idioms as semantic outliers, and the identification of a semantic shift as outlier detection. Thus, this topic representation allows us to differentiate idioms from literals using local semantic contexts. Our results are encouraging.", "keyphrases": ["emotion", "paragraph", "idiom", "word topic representation"]} +{"id": "delbrouck-etal-2020-transformer", "title": "A Transformer-based joint-encoding for Emotion Recognition and Sentiment Analysis", "abstract": "Understanding expressed sentiment and emotions are two crucial factors in human multimodal language. This paper describes a Transformer-based joint-encoding (TBJE) for the task of Emotion Recognition and Sentiment Analysis. In addition to use the Transformer architecture, our approach relies on a modular co-attention and a glimpse layer to jointly encode one or more modalities. The proposed solution has also been submitted to the ACL20: Second Grand-Challenge on Multimodal Language to be evaluated on the CMU-MOSEI dataset. The code to replicate the presented experiments is open-source .", "keyphrases": ["joint-encoding", "emotion recognition", "sentiment analysis"]} +{"id": "wubben-etal-2010-paraphrase", "title": "Paraphrase Generation as Monolingual Translation: Data and Evaluation", "abstract": "In this paper we investigate the automatic generation and evaluation of sentential paraphrases. We describe a method for generating sentential paraphrases by using a large aligned monolingual corpus of news headlines acquired automatically from Google News and a standard Phrase-Based Machine Translation (PBMT) framework. The output of this system is compared to a word substitution baseline. Human judges prefer the PBMT paraphrasing system over the word substitution system. We demonstrate that BLEU correlates well with human judgements provided that the generated paraphrased sentence is sufficiently different from the source sentence.", "keyphrases": ["monolingual translation", "paraphrase generation", "phrase-based smt framework", "machine paclic"]} +{"id": "fu-etal-2020-sscr", "title": "SSCR: Iterative Language-Based Image Editing via Self-Supervised Counterfactual Reasoning", "abstract": "Iterative Language-Based Image Editing (ILBIE) tasks follow iterative instructions to edit images step by step. Data scarcity is a significant issue for ILBIE as it is challenging to collect large-scale examples of images before and after instruction-based changes. Yet, humans still accomplish these editing tasks even when presented with an unfamiliar image-instruction pair. Such ability results from counterfactual thinking, the ability to think about possible alternatives to events that have happened already. In this paper, we introduce a Self-Supervised Counterfactual Reasoning (SSCR) framework that incorporates counterfactual thinking to overcome data scarcity. SSCR allows the model to consider out-of-distribution instructions paired with previous images. With the help of cross-task consistency (CTC), we train these counterfactual instructions in a self-supervised scenario. Extensive results show that SSCR improves the correctness of ILBIE in terms of both object identity and position, establishing a new state of the art (SOTA) on two IBLIE datasets (i-CLEVR and CoDraw). Even with only 50% of the training data, SSCR achieves a comparable result to using complete data.", "keyphrases": ["image editing", "self-supervised counterfactual reasoning", "sscr"]} +{"id": "szpektor-etal-2004-scaling", "title": "Scaling Web-based Acquisition of Entailment Relations", "abstract": "Paraphrase recognition is a critical step for natural language interpretation. Accordingly, many NLP applications would benefit from high coverage knowledge bases of paraphrases. However, the scalability of state-of-the-art paraphrase acquisition approaches is still limited. We present a fully unsupervised learning algorithm for Web-based extraction of entailment relations, an extended model of paraphrases. We focus on increased scalability and generality with respect to prior work, eventually aiming at a full scale knowledge base. Our current implementation of the algorithm takes as its input a verb lexicon and for each verb searches the Web for related syntactic entailment templates. Experiments show promising results with respect to the ultimate goal, achieving much better scalability than prior Web-based methods.", "keyphrases": ["many nlp application", "paraphrase acquisition", "web", "entailment rule"]} +{"id": "mohammady-ardehaly-culotta-2015-inferring", "title": "Inferring latent attributes of Twitter users with label regularization", "abstract": "Inferring latent attributes of online users has many applications in public health, politics, and marketing. Most existing approaches rely on supervised learning algorithms, which require manual data annotation and therefore are costly to develop and adapt over time. In this paper, we propose a lightly supervised approach based on label regularization to infer the age, ethnicity, and political orientation of Twitter users. Our approach learns from a heterogeneous collection of soft constraints derived from Census demographics, trends in baby names, and Twitter accounts that are emblematic of class labels. To counteract the imprecision of such constraints, we compare several constraint selection algorithms that optimize classification accuracy on a tuning set. We find that using no user-annotated data, our approach is within 2% of a fully supervised baseline for three of four tasks. Using a small set of labeled data for tuning further improves accuracy on all tasks.", "keyphrases": ["latent attribute", "twitter user", "label regularization"]} +{"id": "pan-etal-2011-annotating", "title": "Annotating and Learning Event Durations in Text", "abstract": "This article presents our work on constructing a corpus of news articles in which events are annotated for estimated bounds on their duration, and automatically learning from this corpus. We describe the annotation guidelines, the event classes we categorized to reduce gross discrepancies in inter-annotator judgments, and our use of normal distributions to model vague and implicit temporal information and to measure inter-annotator agreement for these event duration distributions. We then show that machine learning techniques applied to this data can produce coarse-grained event duration information automatically, considerably outperforming a baseline and approaching human performance. The methods described here should be applicable to other kinds of vague but substantive information in texts.", "keyphrases": ["duration", "annotating", "timebank", "news story", "full-length weblog"]} +{"id": "cho-etal-2012-segmentation", "title": "Segmentation and punctuation prediction in speech language translation using a monolingual translation system", "abstract": "In spoken language translation (SLT), finding proper segmentation and reconstructing punctuation marks are not only significant but also challenging tasks. In this paper we present our recent work on speech translation quality analysis for German-English by improving sentence segmentation and punctuation. From oracle experiments, we show an upper bound of translation quality if we had human-generated segmentation and punctuation on the output stream of speech recognition systems. In our oracle experiments we gain 1.78 BLEU points of improvements on the lecture test set. We build a monolingual translation system from German to German implementing segmentation and punctuation prediction as a machine translation task. Using the monolingual translation system we get an improvement of 1.53 BLEU points on the lecture test set, which is a comparable performance against the upper bound drawn by the oracle experiments.", "keyphrases": ["punctuation prediction", "monolingual translation system", "segmentation"]} +{"id": "mukerjee-etal-2006-detecting", "title": "Detecting Complex Predicates in Hindi using POS Projection across Parallel Corpora", "abstract": "Complex Predicates or CPs are multiword complexes functioning as single verbal units. CPs are particularly pervasive in Hindi and other Indo-Aryan languages, but an usage account driven by corpus-based identification of these constructs has not been possible since single-language systems based on rules and statistical approaches require reliable tools (POS taggers, parsers, etc.) that are unavailable for Hindi. This paper highlights the development of first such database based on the simple idea of projecting POS tags across an English-Hindi parallel corpus. The CP types considered include adjective-verb (AV), noun-verb (NV), adverb-verb (Adv-V), and verb-verb (VV) composites. CPs are hypothesized where a verb in English is projected onto a multi-word sequence in Hindi. While this process misses some CPs, those that are detected appear to be more reliable (83% precision, 46% recall). The resulting database lists usage instances of 1439 CPs in 4400 sentences.", "keyphrases": ["complex predicates", "pos projection", "serial verb"]} +{"id": "dhingra-etal-2019-handling", "title": "Handling Divergent Reference Texts when Evaluating Table-to-Text Generation", "abstract": "Automatically constructed datasets for generating text from semi-structured data (tables), such as WikiBio, often contain reference texts that diverge from the information in the corresponding semi-structured data. We show that metrics which rely solely on the reference texts, such as BLEU and ROUGE, show poor correlation with human judgments when those references diverge. We propose a new metric, PARENT, which aligns n-grams from the reference and generated texts to the semi-structured data before computing their precision and recall. Through a large scale human evaluation study of table-to-text models for WikiBio, we show that PARENT correlates with human judgments better than existing text generation metrics. We also adapt and evaluate the information extraction based evaluation proposed by Wiseman et al (2017), and show that PARENT has comparable correlation to it, while being easier to use. We show that PARENT is also applicable when the reference texts are elicited from humans using the data from the WebNLG challenge.", "keyphrases": ["semi-structured data", "table", "data-to-text generation"]} +{"id": "cabezas-garcia-san-martin-2017-semantic", "title": "Semantic annotation to characterize contextual variation in terminological noun compounds: a pilot study", "abstract": "Noun compounds (NCs) are semantically complex and not fully compositional, as is often assumed. This paper presents a pilot study regarding the semantic annotation of environmental NCs with a view to accessing their semantics and exploring their domain-based contextual variation. Our results showed that the semantic annotation of NCs afforded important insights into how context impacts their conceptualization.", "keyphrases": ["contextual variation", "noun compound", "semantic annotation"]} +{"id": "zhang-litman-2015-annotation", "title": "Annotation and Classification of Argumentative Writing Revisions", "abstract": "This paper explores the annotation and classification of students\u2019 revision behaviors in argumentative writing. A sentence-level revision schema is proposed to capture why and how students make revisions. Based on the proposed schema, a small corpus of student essays and revisions was annotated. Studies show that manual annotation is reliable with the schema and the annotated information helpful for revision analysis. Furthermore, features and methods are explored for the automatic classification of revisions. Intrinsic evaluations demonstrate promising performance in high-level revision classification (surface vs. text-based). Extrinsic evaluations demonstrate that our method for automatic revision classification can be used to predict a writer\u2019s improvement.", "keyphrases": ["argumentative writing", "revision", "student essay"]} +{"id": "gaillard-etal-2010-query", "title": "Query translation using Wikipedia-based resources for analysis and disambiguation", "abstract": "This work investigates query translation using only Wikipedia-based resources in a two step approach: analysis and disam- biguation. After arguing that data mined from Wikipedia is particularly relevant to query translation, both from a lexical and a semantic perspective, we detail the im- plementation of the approach. In the analysis phase, lexical units are extracted from queries and associated to several possible translations using a Wikipedia- based bilingual dictionary. During the second phase, one translation is chosen amongst the many candidates, based on topic homogeneity, asserted with the help of semantic information carried by cate- gories of Wikipedia articles. We report promising results regarding translation accuracy.", "keyphrases": ["wikipedia-based resource", "disambiguation", "query translation"]} +{"id": "wen-etal-2016-multi", "title": "Multi-domain Neural Network Language Generation for Spoken Dialogue Systems", "abstract": "\u00a92016 Association for Computational Linguistics. Moving from limited-domain natural language generation (NLG) to open domain is difficult because the number of semantic input combinations grows exponentially with the number of domains. Therefore, it is important to leverage existing resources and exploit similarities between domains to facilitate domain adaptation. In this paper, we propose a procedure to train multi-domain, Recurrent Neural Network-based (RNN) language generators via multiple adaptation steps. In this procedure, a model is first trained on counterfeited data synthesised from an out-of-domain dataset, and then fine tuned on a small set of in-domain utterances with a discriminative objective function. Corpus-based evaluation results show that the proposed procedure can achieve competitive performance in terms of BLEU score and slot error rate while significantly reducing the data needed to train generators in new, unseen domains. In subjective testing, human judges confirm that the procedure greatly improves generator performance when only a small amount of data is available in the domain.", "keyphrases": ["language generation", "dialogue system", "domain adaptation", "attentive encoder-decoder"]} +{"id": "feng-etal-2020-doc2dial", "title": "doc2dial: A Goal-Oriented Document-Grounded Dialogue Dataset", "abstract": "We introduce doc2dial, a new dataset of goal-oriented dialogues that are grounded in the associated documents. Inspired by how the authors compose documents for guiding end users, we first construct dialogue flows based on the content elements that corresponds to higher-level relations across text sections as well as lower-level relations between discourse units within a section. Then we present these dialogue flows to crowd contributors to create conversational utterances. The dataset includes over 4500 annotated conversations with an average of 14 turns that are grounded in over 450 documents from four domains. Compared to the prior document-grounded dialogue datasets, this dataset covers a variety of dialogue scenes in information-seeking conversations. For evaluating the versatility of the dataset, we introduce multiple dialogue modeling tasks and present baseline approaches.", "keyphrases": ["document-grounded dialogue dataset", "goal-oriented dialogue", "associated document", "doc2dial", "knowledge identification"]} +{"id": "hartung-frank-2010-structured", "title": "A Structured Vector Space Model for Hidden Attribute Meaning in Adjective-Noun Phrases", "abstract": "We present an approach to model hidden attributes in the compositional semantics of adjective-noun phrases in a distributional model. For the representation of adjective meanings, we reformulate the pattern-based approach for attribute learning of Almuhareb (2006) in a structured vector space model (VSM). This model is complemented by a structured vector space representing attribute dimensions of noun meanings. The combination of these representations along the lines of compositional semantic principles exposes the underlying semantic relations in adjective-noun phrases. We show that our compositional VSM outperforms simple pattern-based approaches by circumventing their inherent sparsity problems.", "keyphrases": ["attribute", "adjective-noun phrase", "compositional vsm"]} +{"id": "rabinovich-etal-2018-native", "title": "Native Language Cognate Effects on Second Language Lexical Choice", "abstract": "We present a computational analysis of cognate effects on the spontaneous linguistic productions of advanced non-native speakers. Introducing a large corpus of highly competent non-native English speakers, and using a set of carefully selected lexical items, we show that the lexical choices of non-natives are affected by cognates in their native language. This effect is so powerful that we are able to reconstruct the phylogenetic language tree of the Indo-European language family solely from the frequencies of specific lexical items in the English of authors with various native languages. We quantitatively analyze non-native lexical choice, highlighting cognate facilitation as one of the important phenomena shaping the language of non-native speakers.", "keyphrases": ["cognate", "lexical choice", "native language"]} +{"id": "zhang-etal-2015-shallow", "title": "Shallow Convolutional Neural Network for Implicit Discourse Relation Recognition", "abstract": "Implicit discourse relation recognition remains a serious challenge due to the absence of discourse connectives. In this paper, we propose a Shallow Convolutional Neural Network (SCNN) for implicit discourse relation recognition, which contains only one hidden layer but is effective in relation recognition. The shallow structure alleviates the overfitting problem, while the convolution and nonlinear operations help preserve the recognition and generalization ability of our model. Experiments on the benchmark data set show that our model achieves comparable and even better performance when comparing against current state-of-the-art systems.", "keyphrases": ["convolutional neural network", "discourse relation", "relation classification"]} +{"id": "baziotis-etal-2020-language", "title": "Language Model Prior for Low-Resource Neural Machine Translation", "abstract": "The scarcity of large parallel corpora is an important obstacle for neural machine translation. A common solution is to exploit the knowledge of language models (LM) trained on abundant monolingual data. In this work, we propose a novel approach to incorporate a LM as prior in a neural translation model (TM). Specifically, we add a regularization term, which pushes the output distributions of the TM to be probable under the LM prior, while avoiding wrong predictions when the TM \u201cdisagrees\u201d with the LM. This objective relates to knowledge distillation, where the LM can be viewed as teaching the TM about the target language. The proposed approach does not compromise decoding speed, because the LM is used only at training time, unlike previous work that requires it during inference. We present an analysis of the effects that different methods have on the distributions of the TM. Results on two low-resource machine translation datasets show clear improvements even with limited monolingual data.", "keyphrases": ["regularization term", "output distribution", "language model"]} +{"id": "dasgupta-etal-2018-automatic-extraction", "title": "Automatic Extraction of Causal Relations from Text using Linguistically Informed Deep Neural Networks", "abstract": "In this paper we have proposed a linguistically informed recursive neural network architecture for automatic extraction of cause-effect relations from text. These relations can be expressed in arbitrarily complex ways. The architecture uses word level embeddings and other linguistic features to detect causal events and their effects mentioned within a sentence. The extracted events and their relations are used to build a causal-graph after clustering and appropriate generalization, which is then used for predictive purposes. We have evaluated the performance of the proposed extraction model with respect to two baseline systems,one a rule-based classifier, and the other a conditional random field (CRF) based supervised model. We have also compared our results with related work reported in the past by other authors on SEMEVAL data set, and found that the proposed bi-directional LSTM model enhanced with an additional linguistic layer performs better. We have also worked extensively on creating new annotated datasets from publicly available data, which we are willing to share with the community.", "keyphrases": ["extraction", "causal relation", "other linguistic feature", "lstm model"]} +{"id": "lai-hockenmaier-2014-illinois", "title": "Illinois-LH: A Denotational and Distributional Approach to Semantics", "abstract": "This paper describes and analyzes our SemEval 2014 Task 1 system. Its features are based on distributional and denotational similarities; word alignment; negation; and hypernym/hyponym, synonym, and antonym relations.", "keyphrases": ["negation", "hypernym", "synonym", "rte"]} +{"id": "muntes-mulero-etal-2012-multiplying", "title": "Multiplying the Potential of Crowdsourcing with Machine Translation", "abstract": "Machine Translation (MT) is said to be the next lingua franca. With the evolution of new technologies and the capacity to produce a humungous number of written digital documents, human translators will not be able to translate documentation fast enough. However, some applications require a level of quality that is still beyond that provided by MT. Thanks to the increased capacity of communication provided by new technologies, people can also interact and collaborate to work remotely. With this, crowd computing is becoming more common and it has been proposed as a feasible solution for translation. In this paper, we discuss about the relationship between crowdsourcing and MT, and the main challenges for the MT community to multiply the potential of the crowd.", "keyphrases": ["potential", "crowdsourcing", "machine translation"]} +{"id": "shi-etal-2021-refine-imitate", "title": "Refine and Imitate: Reducing Repetition and Inconsistency in Persuasion Dialogues via Reinforcement Learning and Human Demonstration", "abstract": "Persuasion dialogue system reflects the machine's ability to make strategic moves beyond verbal communication, and therefore differentiates itself from task-oriented or open-domain dialogues and has its own unique values. However, the repetition and inconsistency problems still persist in dialogue response generation and could substantially impact user experience and impede the persuasion outcome. Besides, although reinforcement learning (RL) approaches have achieved big success in strategic tasks such as games, it requires a sophisticated user simulator to provide real-time feedback to the dialogue system, which limits the application of RL on persuasion dialogues. To address these issues towards a better persuasion dialogue system, we apply RL to refine a language model baseline without user simulators, and distill sentence-level information about repetition, inconsistency, and task relevance through rewards. Moreover, to better accomplish the persuasion task, the model learns from human demonstration to imitate human persuasion behavior and selects the most persuasive responses. Experiments show that our model outperforms previous state-of-the-art dialogue models on both automatic metrics and human evaluation results on a donation persuasion task, and generates more diverse, consistent and persuasive conversations according to the user feedback. We will make the code and model publicly available.", "keyphrases": ["repetition", "reinforcement learning", "human demonstration", "persuasive response"]} +{"id": "gabbard-etal-2006-fully", "title": "Fully Parsing the Penn Treebank", "abstract": "We present a two stage parser that recovers Penn Treebank style syntactic analyses of new sentences including skeletal syntactic structure, and, for the first time, both function tags and empty categories. The accuracy of the first-stage parser on the standard Parseval metric matches that of the (Collins, 2003) parser on which it is based, despite the data fragmentation caused by the greatly enriched space of possible node labels. This first stage simultaneously achieves near state-of-the-art performance on recovering function tags with minimal modifications to the underlying parser, modifying less than ten lines of code. The second stage achieves state-of-the-art performance on the recovery of empty categories by combining a linguistically-informed architecture and a rich feature set with the power of modern machine learning methods.", "keyphrases": ["penn treebank", "function tag", "empty category", "machine learning method", "ptb"]} +{"id": "hagiwara-etal-2006-selection", "title": "Selection of Effective Contextual Information for Automatic Synonym Acquisition", "abstract": "Various methods have been proposed for automatic synonym acquisition, as synonyms are one of the most fundamental lexical knowledge. Whereas many methods are based on contextual clues of words, little attention has been paid to what kind of categories of contextual information are useful for the purpose. This study has experimentally investigated the impact of contextual information selection, by extracting three kinds of word relationships from corpora: dependency, sentence co-occurrence, and proximity. The evaluation result shows that while dependency and proximity perform relatively well by themselves, combination of two or more kinds of contextual information gives more stable performance. We've further investigated useful selection of dependency relations and modification categories, and it is found that modification has the greatest contribution, even greater than the widely adopted subject-object combination.", "keyphrases": ["contextual information", "automatic synonym acquisition", "selection"]} +{"id": "miyao-etal-2006-semantic", "title": "Semantic Retrieval for the Accurate Identification of Relational Concepts in Massive Textbases", "abstract": "This paper introduces a novel framework for the accurate retrieval of relational concepts from huge texts. Prior to retrieval, all sentences are annotated with predicate argument structures and ontological identifiers by applying a deep parser and a term recognizer. During the run time, user requests are converted into queries of region algebra on these annotations. Structural matching with pre-computed semantic annotations establishes the accurate and efficient retrieval of relational concepts. This framework was applied to a text retrieval system for MEDLINE. Experiments on the retrieval of biomedical correlations revealed that the cost is sufficiently small for real-time applications and that the retrieval precision is significantly improved.", "keyphrases": ["relational concept", "semantic retrieval", "biomedicine"]} +{"id": "fisch-etal-2020-capwap", "title": "CapWAP: Image Captioning with a Purpose", "abstract": "The traditional image captioning task uses generic reference captions to provide textual information about images. Different user populations, however, will care about different visual aspects of images. In this paper, we propose a new task, Captioning with A Purpose (CapWAP). Our goal is to develop systems that can be tailored to be useful for the information needs of an intended population, rather than merely provide generic information about an image. In this task, we use question-answer (QA) pairs\u2014a natural expression of information need\u2014from users, instead of reference captions, for both training and post-inference evaluation. We show that it is possible to use reinforcement learning to directly optimize for the intended information need, by rewarding outputs that allow a question answering model to provide correct answers to sampled user questions. We convert several visual question answering datasets into CapWAP datasets, and demonstrate that under a variety of scenarios our purposeful captioning system learns to anticipate and fulfill specific information needs better than its generic counterparts, as measured by QA performance on user questions from unseen images, when using the caption alone as context.", "keyphrases": ["image", "caption", "purpose", "visual question"]} +{"id": "poria-etal-2014-rule", "title": "A Rule-Based Approach to Aspect Extraction from Product Reviews", "abstract": "Sentiment analysis is a rapidly growing research field that has attracted both academia and industry because of the challenging research problems it poses and the potential benefits it can provide in many real life applications. Aspect-based opinion mining, in particular, is one of the fundamental challenges within this research field. In this work, we aim to solve the problem of aspect extraction from product reviews by proposing a novel rule-based approach that exploits common-sense knowledge and sentence dependency trees to detect both explicit and implicit aspects. Two popular review datasets were used for evaluating the system against state-of-the-art aspect extraction techniques, obtaining higher detection accuracy for both datasets.", "keyphrases": ["rule-based approach", "aspect extraction", "product review"]} +{"id": "kouno-etal-2015-unsupervised", "title": "Unsupervised Domain Adaptation for Word Sense Disambiguation using Stacked Denoising Autoencoder", "abstract": "In this paper, we propose an unsupervised domain adaptation for Word Sense Disambiguation (WSD) using Stacked Denoising Autoencoder (SdA). SdA is an unsupervised learning method of obtaining the abstract feature set of input data using Neural Network. The abstract feature set absorbs the difference of domains, and thus SdA can solve a problem of domain adaptation. However, SdA does not always cope with any problems of domain adaptation. Especially, difficulty of domain adaptation for WSD depends on the combination of a source domain, a target domain and a target word. As a result, any method of domain adaptation for WSD has adverse effect for a part of the problem, Therefore, we defined the similarity between two domains, and judge whether we use SdA or not through this similarity. This approach avoids an adverse effect of SdA. In the experiments, we have used three domains from the Balanced Corpus of Contemporary Written Japanese and 16 target words. In comparison with baseline, our method has got higher average accuracies for all combinations of two domains. Furthermore, we have obtained better results against conventional domain adaptation methods.", "keyphrases": ["word sense disambiguation", "stacked denoising autoencoder", "unsupervised domain adaptation"]} +{"id": "sugiyama-yoshinaga-2019-data", "title": "Data augmentation using back-translation for context-aware neural machine translation", "abstract": "A single sentence does not always convey information that is enough to translate it into other languages. Some target languages need to add or specialize words that are omitted or ambiguous in the source languages (e.g, zero pronouns in translating Japanese to English or epicene pronouns in translating English to French). To translate such ambiguous sentences, we need contexts beyond a single sentence, and have so far explored context-aware neural machine translation (NMT). However, a large amount of parallel corpora is not easily available to train accurate context-aware NMT models. In this study, we first obtain large-scale pseudo parallel corpora by back-translating monolingual data, and then investigate its impact on the translation accuracy of context-aware NMT models. We evaluated context-aware NMT models trained with small parallel corpora and the large-scale pseudo parallel corpora on English-Japanese and English-French datasets to demonstrate the large impact of the data augmentation for context-aware NMT models.", "keyphrases": ["back-translation", "neural machine translation", "data augmentation"]} +{"id": "williams-etal-2013-dialog", "title": "The Dialog State Tracking Challenge", "abstract": "In a spoken dialog system, dialog state tracking deduces information about the user\u2019s goal as the dialog progresses, synthesizing evidence such as dialog acts over multiple turns with external data sources. Recent approaches have been shown to overcome ASR and SLU errors in some applications. However, there are currently no common testbeds or evaluation measures for this task, hampering progress. The dialog state tracking challenge seeks to address this by providing a heterogeneous corpus of 15K human-computer dialogs in a standard format, along with a suite of 11 evaluation metrics. The challenge received a total of 27 entries from 9 research groups. The results show that the suite of performance metrics cluster into 4 natural groups. Moreover, the dialog systems that benefit most from dialog state tracking are those with less discriminative speech recognition confidence scores. Finally, generalization is a key problem: in 2 of the 4 test sets, fewer than half of the entries out-performed simple baselines. 1 Overview and motivation Spoken dialog systems interact with users via natural language to help them achieve a goal. As the interaction progresses, the dialog manager maintains a representation of the state of the dialog in a process called dialog state tracking (DST). For example, in a bus schedule information system, the dialog state might indicate the user\u2019s desired bus route, origin, and destination. Dialog state tracking is difficult because automatic speech \u2217Most of the work for the challenge was performed when the second and third authors were with Honda Research Institute, Mountain View, CA, USA recognition (ASR) and spoken language understanding (SLU) errors are common, and can cause the system to misunderstand the user\u2019s needs. At the same time, state tracking is crucial because the system relies on the estimated dialog state to choose actions \u2013 for example, which bus schedule information to present to the user. Most commercial systems use hand-crafted heuristics for state tracking, selecting the SLU result with the highest confidence score, and discarding alternatives. In contrast, statistical approaches compute scores for many hypotheses for the dialog state (Figure 1). By exploiting correlations between turns and information from external data sources \u2013 such as maps, bus timetables, or models of past dialogs \u2013 statistical approaches can overcome some SLU errors. Numerous techniques for dialog state tracking have been proposed, including heuristic scores (Higashinaka et al., 2003), Bayesian networks (Paek and Horvitz, 2000; Williams and Young, 2007), kernel density estimators (Ma et al., 2012), and discriminative models (Bohus and Rudnicky, 2006). Techniques have been fielded which scale to realistically sized dialog problems and operate in real time (Young et al., 2010; Thomson and Young, 2010; Williams, 2010; Mehta et al., 2010). In end-to-end dialog systems, dialog state tracking has been shown to improve overall system performance (Young et al., 2010; Thomson and Young, 2010). Despite this progress, direct comparisons between methods have not been possible because past studies use different domains and system components, for speech recognition, spoken language understanding, dialog control, etc. Moreover, there is little agreement on how to evaluate dialog state tracking. Together these issues limit progress in this research area. The Dialog State Tracking Challenge (DSTC) provides a first common testbed and evaluation", "keyphrases": ["dialog state tracking", "spoken language understanding", "template", "task-oriented dialogue", "user goal"]} +{"id": "atanasova-etal-2020-diagnostic", "title": "A Diagnostic Study of Explainability Techniques for Text Classification", "abstract": "Recent developments in machine learning have introduced models that approach human performance at the cost of increased architectural complexity. Efforts to make the rationales behind the models' predictions transparent have inspired an abundance of new explainability techniques. Provided with an already trained model, they compute saliency scores for the words of an input instance. However, there exists no definitive guide on (i) how to choose such a technique given a particular application task and model architecture, and (ii) the benefits and drawbacks of using each such technique. In this paper, we develop a comprehensive list of diagnostic properties for evaluating existing explainability techniques. We then employ the proposed list to compare a set of diverse explainability techniques on downstream text classification tasks and neural network architectures. We also compare the saliency scores assigned by the explainability techniques with human annotations of salient input regions to find relations between a model's performance and the agreement of its rationales with human ones. Overall, we find that the gradient-based explanations perform best across tasks and model architectures, and we present further insights into the properties of the reviewed explainability techniques.", "keyphrases": ["diagnostic property", "explanation", "faithfulness"]} +{"id": "quan-etal-2020-risawoz", "title": "RiSAWOZ: A Large-Scale Multi-Domain Wizard-of-Oz Dataset with Rich Semantic Annotations for Task-Oriented Dialogue Modeling", "abstract": "In order to alleviate the shortage of multi-domain data and to capture discourse phenomena for task-oriented dialogue modeling, we propose RiSAWOZ, a large-scale multi-domain Chinese Wizard-of-Oz dataset with Rich Semantic Annotations. RiSAWOZ contains 11.2K human-to-human (H2H) multi-turn semantically annotated dialogues, with more than 150K utterances spanning over 12 domains, which is larger than all previous annotated H2H conversational datasets. Both single- and multi-domain dialogues are constructed, accounting for 65% and 35%, respectively. Each dialogue is labeled with comprehensive dialogue annotations, including dialogue goal in the form of natural language description, domain, dialogue states and acts at both the user and system side. In addition to traditional dialogue annotations, we especially provide linguistic annotations on discourse phenomena, e.g., ellipsis and coreference, in dialogues, which are useful for dialogue coreference and ellipsis resolution tasks. Apart from the fully annotated dataset, we also present a detailed description of the data collection procedure, statistics and analysis of the dataset. A series of benchmark models and results are reported, including natural language understanding (intent detection & slot filling), dialogue state tracking and dialogue context-to-text generation, as well as coreference and ellipsis resolution, which facilitate the baseline comparison for future research on this corpus.", "keyphrases": ["large-scale multi-domain", "rich semantic annotations", "task-oriented dialogue modeling"]} +{"id": "filatova-hatzivassiloglou-2004-formal", "title": "A Formal Model for Information Selection in Multi-Sentence Text Extraction", "abstract": "Selecting important information while accounting for repetitions is a hard task for both summarization and question answering. We propose a formal model that represents a collection of documents in a two-dimensional space of textual and conceptual units with an associated mapping between these two dimensions. This representation is then used to describe the task of selecting textual units for a summary or answer as a formal optimization task. We provide approximation algorithms and empirically validate the performance of the proposed model when used with two very different sets of features, words and atomic events.", "keyphrases": ["formal model", "text summarization", "maximum coverage problem"]} +{"id": "dos-santos-guimaraes-2015-boosting", "title": "Boosting Named Entity Recognition with Neural Character Embeddings", "abstract": "Most state-of-the-art named entity recognition (NER) systems rely on handcrafted features and on the output of other NLP tasks such as part-of-speech (POS) tagging and text chunking. In this work we propose a language-independent NER system that uses automatically learned features only. Our approach is based on the CharWNN deep neural network, which uses word-level and character-level representations (embeddings) to perform sequential classification. We perform an extensive number of experiments using two annotated corpora in two different languages: HAREM I corpus, which contains texts in Portuguese; and the SPA CoNLL-2002 corpus, which contains texts in Spanish. Our experimental results shade light on the contribution of neural character embeddings for NER. Moreover, we demonstrate that the same neural network which has been successfully applied to POS tagging can also achieve state-of-the-art results for language-independet NER, using the same hyperparameters, and without any handcrafted features. For the HAREM I corpus, CharWNN outperforms the state-of-the-art system by 7.9 points in the F1-score for the total scenario (ten NE classes), and by 7.2 points in the F1 for the selective scenario (five NE classes).", "keyphrases": ["entity recognition", "character embedding", "deep neural network", "pos tagging"]} +{"id": "yang-etal-2020-efficient", "title": "Efficient Transfer Learning for Quality Estimation with Bottleneck Adapter Layer", "abstract": "The Predictor-Estimator framework for quality estimation (QE) is commonly used for its strong performance. Where the predictor and estimator works on feature extraction and quality evaluation, respectively. However, training the predictor from scratch is computationally expensive. In this paper, we propose an efficient transfer learning framework to transfer knowledge from NMT dataset into QE models. A Predictor-Estimator alike model named BAL-QE is also proposed, aiming to extract high quality features with pre-trained NMT model, and make classification with a fine-tuned Bottleneck Adapter Layer (BAL). The experiment shows that BAL-QE achieves 97% of the SOTA performance in WMT19 En-De and En-Ru QE tasks by only training 3% of parameters within 4 hours on 4 Titan XP GPUs. Compared with the commonly used NuQE baseline, BAL-QE achieves 47% (En-Ru) and 75% (En-De) of performance promotions.", "keyphrases": ["quality estimation", "bottleneck adapter layer", "efficient transfer learning"]} +{"id": "huang-etal-2019-cross", "title": "Cross-lingual Multi-Level Adversarial Transfer to Enhance Low-Resource Name Tagging", "abstract": "We focus on improving name tagging for low-resource languages using annotations from related languages. Previous studies either directly project annotations from a source language to a target language using cross-lingual representations or use a shared encoder in a multitask network to transfer knowledge. These approaches inevitably introduce noise to the target language annotation due to mismatched source-target sentence structures. To effectively transfer the resources, we develop a new neural architecture that leverages multi-level adversarial transfer: (1) word-level adversarial training, which projects source language words into the same semantic space as those of the target language without using any parallel corpora or bilingual gazetteers, and (2) sentence-level adversarial training, which yields language-agnostic sequential features. Our neural architecture outperforms previous approaches on CoNLL data sets. Moreover, on 10 low-resource languages, our approach achieves up to 16% absolute F-score gain over all high-performing baselines on cross-lingual transfer without using any target-language resources.", "keyphrases": ["multi-level adversarial transfer", "name tagging", "cross-lingual transfer"]} +{"id": "schneider-waibel-2019-kits", "title": "KIT's Submission to the IWSLT 2019 Shared Task on Text Translation", "abstract": "In this paper, we describe KIT's submission for the IWSLT 2019 shared task on text translation. Our system is based on the transformer model [1] using our in-house implementation. We augment the available training data using back-translation and employ fine-tuning for the final model. For our best results, we used a 12-layer transformer-big config- uration, achieving state-of-the-art results on the WMT2018 test set. We also experiment with student-teacher models to improve performance of smaller models.", "keyphrases": ["iwslt", "text translation", "kit"]} +{"id": "li-etal-2019-findings", "title": "Findings of the First Shared Task on Machine Translation Robustness", "abstract": "We share the findings of the first shared task on improving robustness of Machine Translation (MT). The task provides a testbed representing challenges facing MT models deployed in the real world, and facilitates new approaches to improve models' robustness to noisy input and domain mismatch. We focus on two language pairs (English-French and English-Japanese), and the submitted systems are evaluated on a blind test set consisting of noisy comments on Reddit and professionally sourced translations. As a new task, we received 23 submissions by 11 participating teams from universities, companies, national labs, etc. All submitted systems achieved large improvements over baselines, with the best improvement having +22.33 BLEU. We evaluated submissions by both human judgment and automatic evaluation (BLEU), which shows high correlations (Pearson's r = 0.94 and 0.95). Furthermore, we conducted a qualitative analysis of the submitted systems using compare-mt, which revealed their salient differences in handling challenges in this task. Such analysis provides additional insights when there is occasional disagreement between human judgment and BLEU, e.g. systems better at producing colloquial expressions received higher score from human judgment.", "keyphrases": ["first shared task", "machine translation robustness", "noise", "nmt model"]} +{"id": "davani-etal-2022-dealing", "title": "Dealing with Disagreements: Looking Beyond the Majority Vote in Subjective Annotations", "abstract": "Majority voting and averaging are common approaches used to resolve annotator disagreements and derive single ground truth labels from multiple annotations. However, annotators may systematically disagree with one another, often reflecting their individual biases and values, especially in the case of subjective tasks such as detecting affect, aggression, and hate speech. Annotator disagreements may capture important nuances in such tasks that are often ignored while aggregating annotations to a single ground truth. In order to address this, we investigate the efficacy of multi-annotator models. In particular, our multi-task based approach treats predicting each annotators' judgements as separate subtasks, while sharing a common learned representation of the task. We show that this approach yields same or better performance than aggregating labels in the data prior to training across seven different binary classification tasks. Our approach also provides a way to estimate uncertainty in predictions, which we demonstrate better correlate with annotation disagreements than traditional methods. Being able to model uncertainty is especially useful in deployment scenarios where knowing when not to make a prediction is important.", "keyphrases": ["disagreement", "single ground truth", "value", "separate subtask"]} +{"id": "maruf-etal-2019-selective", "title": "Selective Attention for Context-aware Neural Machine Translation", "abstract": "Despite the progress made in sentence-level NMT, current systems still fall short at achieving fluent, good quality translation for a full document. Recent works in context-aware NMT consider only a few previous sentences as context and may not scale to entire documents. To this end, we propose a novel and scalable top-down approach to hierarchical attention for context-aware NMT which uses sparse attention to selectively focus on relevant sentences in the document context and then attends to key words in those sentences. We also propose single-level attention approaches based on sentence or word-level information in the context. The document-level context representation, produced from these attention modules, is integrated into the encoder or decoder of the Transformer model depending on whether we use monolingual or bilingual context. Our experiments and evaluation on English-German datasets in different document MT settings show that our selective attention approach not only significantly outperforms context-agnostic baselines but also surpasses context-aware baselines in most cases.", "keyphrases": ["machine translation", "context-aware nmt", "document context", "attention module"]} +{"id": "zhao-etal-2017-generative", "title": "Generative Encoder-Decoder Models for Task-Oriented Spoken Dialog Systems with Chatting Capability", "abstract": "Generative encoder-decoder models offer great promise in developing domain-general dialog systems. However, they have mainly been applied to open-domain conversations. This paper presents a practical and novel framework for building task-oriented dialog systems based on encoder-decoder models. This framework enables encoder-decoder models to accomplish slot-value independent decision-making and interact with external databases. Moreover, this paper shows the flexibility of the proposed method by interleaving chatting capability with a slot-filling system for better out-of-domain recovery. The models were trained on both real-user data from a bus information system and human-human chat data. Results show that the proposed framework achieves good performance in both offline evaluation metrics and in task success rate with human users.", "keyphrases": ["encoder-decoder model", "chatting capability", "task-oriented dialog system"]} +{"id": "banerjee-etal-2010-combining", "title": "Combining Multi-Domain Statistical Machine Translation Models using Automatic Classifiers", "abstract": "This paper presents a set of experiments on Domain Adaptation of Statistical Machine Translation systems. The experiments focus on Chinese-English and two domain-specific corpora. The paper presents a novel approach for combining multiple domain-trained translation models to achieve improved translation quality for both domain-specific as well as combined sets of sentences. We train a statistical classifier to classify sentences according to the appropriate domain and utilize the corresponding domain-specific MT models to translate them. Experimental results show that the method achieves a statistically significant absolute improvement of 1.58 BLEU (2.86% relative improvement) score over a translation model trained on combined data, and considerable improvements over a model using multiple decoding paths of the Moses decoder, for the combined domain test set. Furthermore, even for domain-specific test sets, our approach works almost as well as dedicated domain-specific models and perfect classification.", "keyphrases": ["translation model", "statistical classifier", "domain-specific model"]} +{"id": "wang-etal-2018-label", "title": "Label-Free Distant Supervision for Relation Extraction via Knowledge Graph Embedding", "abstract": "Distant supervision is an effective method to generate large scale labeled data for relation extraction, which assumes that if a pair of entities appears in some relation of a Knowledge Graph (KG), all sentences containing those entities in a large unlabeled corpus are then labeled with that relation to train a relation classifier. However, when the pair of entities has multiple relationships in the KG, this assumption may produce noisy relation labels. This paper proposes a label-free distant supervision method, which makes no use of the relation labels under this inadequate assumption, but only uses the prior knowledge derived from the KG to supervise the learning of the classifier directly and softly. Specifically, we make use of the type information and the translation law derived from typical KG embedding model to learn embeddings for certain sentence patterns. As the supervision signal is only determined by the two aligned entities, neither hard relation labels nor extra noise-reduction model for the bag of sentences is needed in this way. The experiments show that the approach performs well in current distant supervision dataset.", "keyphrases": ["distant supervision", "relation extraction", "knowledge graph"]} +{"id": "durrett-denero-2013-supervised", "title": "Supervised Learning of Complete Morphological Paradigms", "abstract": "We describe a supervised approach to predicting the set of all inflected forms of a lexical item. Our system automatically acquires the orthographic transformation rules of morphological paradigms from labeled examples, and then learns the contexts in which those transformations apply using a discriminative sequence model. Because our approach is completely data-driven and the model is trained on examples extracted from Wiktionary, our method can extend to new languages without change. Our end-to-end system is able to predict complete paradigms with 86.1% accuracy and individual inflected forms with 94.9% accuracy, averaged across three languages and two parts of speech.", "keyphrases": ["paradigm", "inflection", "semi-supervised learning"]} +{"id": "yeh-etal-2015-condition", "title": "Condition Random Fields-based Grammatical Error Detection for Chinese as Second Language", "abstract": "The foreign learners are not easy to learn Chinese as a second language. Because there are many special rules different from other languages in Chinese. When the people learn Chinese as a foreign language usually make some grammatical errors, such as missing, redundant, selection and disorder. In this paper, we proposed the conditional random fields (CRFs) to detect the grammatical errors. The features based on statistical word and part-ofspeech (POS) pattern were adopted here. The relationships between words by part-of-speech are helpful for Chinese grammatical error detection. Finally, we according to CRF determined which error types in sentences. According to the observation of experimental results, the performance of the proposed model is acceptable in precision and recall rates.", "keyphrases": ["grammatical error detection", "chinese", "second language"]} +{"id": "dou-etal-2019-investigating", "title": "Investigating Meta-Learning Algorithms for Low-Resource Natural Language Understanding Tasks", "abstract": "Learning general representations of text is a fundamental problem for many natural language understanding (NLU) tasks. Previously, researchers have proposed to use language model pre-training and multi-task learning to learn robust representations. However, these methods can achieve sub-optimal performance in low-resource scenarios. Inspired by the recent success of optimization-based meta-learning algorithms, in this paper, we explore the model-agnostic meta-learning algorithm (MAML) and its variants for low-resource NLU tasks. We validate our methods on the GLUE benchmark and show that our proposed models can outperform several strong baselines. We further empirically demonstrate that the learned representations can be adapted to new tasks efficiently and effectively.", "keyphrases": ["meta-learning algorithm", "natural language understanding", "maml", "low-resource nlu task", "new task"]} +{"id": "xu-etal-2019-alter", "title": "ALTER: Auxiliary Text Rewriting Tool for Natural Language Generation", "abstract": "In this paper, we describe ALTER, an auxiliary text rewriting tool that facilitates the rewriting process for natural language generation tasks, such as paraphrasing, text simplification, fairness-aware text rewriting, and text style transfer. Our tool is characterized by two features, i) recording of word-level revision histories and ii) flexible auxiliary edit support and feedback to annotators. The text rewriting assist and traceable rewriting history are potentially beneficial to the future research of natural language generation.", "keyphrases": ["auxiliary text", "natural language generation", "alter"]} +{"id": "durmus-etal-2020-feqa", "title": "FEQA: A Question Answering Evaluation Framework for Faithfulness Assessment in Abstractive Summarization", "abstract": "Neural abstractive summarization models are prone to generate content inconsistent with the source document, i.e. unfaithful. Existing automatic metrics do not capture such mistakes effectively. We tackle the problem of evaluating faithfulness of a generated summary given its source document. We first collected human annotations of faithfulness for outputs from numerous models on two datasets. We find that current models exhibit a trade-off between abstractiveness and faithfulness: outputs with less word overlap with the source document are more likely to be unfaithful. Next, we propose an automatic question answering (QA) based metric for faithfulness, FEQA, which leverages recent advances in reading comprehension. Given question-answer pairs generated from the summary, a QA model extracts answers from the document; non-matched answers indicate unfaithful information in the summary. Among metrics based on word overlap, embedding similarity, and learned language understanding models, our QA-based metric has significantly higher correlation with human faithfulness scores, especially on highly abstractive summaries.", "keyphrases": ["evaluation framework", "abstractive summarization", "question generation"]} +{"id": "mao-etal-2020-jass", "title": "JASS: Japanese-specific Sequence to Sequence Pre-training for Neural Machine Translation", "abstract": "Neural machine translation (NMT) needs large parallel corpora for state-of-the-art translation quality. Low-resource NMT is typically addressed by transfer learning which leverages large monolingual or parallel corpora for pre-training. Monolingual pre-training approaches such as MASS (MAsked Sequence to Sequence) are extremely effective in boosting NMT quality for languages with small parallel corpora. However, they do not account for linguistic information obtained using syntactic analyzers which is known to be invaluable for several Natural Language Processing (NLP) tasks. To this end, we propose JASS, Japanese-specific Sequence to Sequence, as a novel pre-training alternative to MASS for NMT involving Japanese as the source or target language. JASS is joint BMASS (Bunsetsu MASS) and BRSS (Bunsetsu Reordering Sequence to Sequence) pre-training which focuses on Japanese linguistic units called bunsetsus. In our experiments on ASPEC Japanese\u2013English and News Commentary Japanese\u2013Russian translation we show that JASS can give results that are competitive with if not better than those given by MASS. Furthermore, we show for the first time that joint MASS and JASS pre-training gives results that significantly surpass the individual methods indicating their complementary nature. We will release our code, pre-trained models and bunsetsu annotated data as resources for researchers to use in their own NLP tasks.", "keyphrases": ["japanese-specific sequence", "neural machine translation", "jass"]} +{"id": "yancheva-rudzicz-2016-vector", "title": "Vector-space topic models for detecting Alzheimer's disease", "abstract": "Semantic de\ufb01cit is a symptom of language impairment in Alzheimer\u2019s disease (AD). We present a generalizable method for automatic generation of information content units (ICUs) for a picture used in a standard clinical task, achieving high recall, 96.8%, of human-supplied ICUs. We use the automatically generated topic model to extract semantic features, and train a random forest classi\ufb01er to achieve an F-score of 0.74 in binary classi\ufb01cation of controls versus people with AD using a set of only 12 features. This is comparable to re-sults (0.72 F-score) with a set of 85 manual features. Adding semantic information to a set of standard lexicosyntactic and acoustic features improves F-score to 0.80. While control and dementia subjects discuss the same topics in the same contexts, controls are more informative per second of speech.", "keyphrases": ["topic model", "alzheimer", "disease"]} +{"id": "jin-etal-2018-unsupervised", "title": "Unsupervised Grammar Induction with Depth-bounded PCFG", "abstract": "There has been recent interest in applying cognitively- or empirically-motivated bounds on recursion depth to limit the search space of grammar induction models (Ponvert et al., 2011; Noji and Johnson, 2016; Shain et al., 2016). This work extends this depth-bounding approach to probabilistic context-free grammar induction (DB-PCFG), which has a smaller parameter space than hierarchical sequence models, and therefore more fully exploits the space reductions of depth-bounding. Results for this model on grammar acquisition from transcribed child-directed speech and newswire text exceed or are competitive with those of other models when evaluated on parse accuracy. Moreover, grammars acquired from this model demonstrate a consistent use of category labels, something which has not been demonstrated by other acquisition models.", "keyphrases": ["induction", "pcfg", "search space", "grammar acquisition"]} +{"id": "tinsley-etal-2012-iptranslator", "title": "IPTranslator: Facilitating Patent Search with Machine Translation", "abstract": "Intellectual Property professionals frequently need to carry out patent searches for a variety of reasons. During a typical search, they will retrieve approximately 30% of their results in a foreign language. The machine translation (MT) options currently available to patent searchers for these foreign-language patents vary in their quality, consistency, and general level of service. In this article, we introduce IPTranslator; an MT web service designed to cater for the needs of patent searchers. At the core of IPTranslator is a set of MT systems developed specifically for translating patent text. We describe the challenges faced in adapting MT technology to such a complex domain, and how the systems were evaluated to ensure that the quality was fit for purpose. Finally, we present the framework through which the IPTranslator service is delivered to users, and the value-adding features which address many of the issues with existing solutions.", "keyphrases": ["patent search", "machine translation", "iptranslator"]} +{"id": "tannier-etal-2011-grawltcq", "title": "GrawlTCQ: Terminology and Corpora Building by Ranking Simultaneously Terms, Queries and Documents using Graph Random Walks", "abstract": "In this paper, we present GrawlTCQ, a new bootstrapping algorithm for building specialized terminology, corpora and queries, based on a graph model. We model links between documents, terms and queries, and use a random walk with restart algorithm to compute relevance propagation. We have evaluated GrawlTCQ on an AFP English corpus of 57,441 news over 10 categories. For corpora building, GrawlTCQ outperforms the BootCaT tool, which is vastly used in the domain. For 1,000 documents retrieved, we improve mean precision by 25%. GrawlTCQ has also shown to be faster and more robust than BootCaT over iterations.", "keyphrases": ["terminology", "corpora building", "grawltcq"]} +{"id": "goyal-durrett-2020-evaluating", "title": "Evaluating Factuality in Generation with Dependency-level Entailment", "abstract": "Despite significant progress in text generation models, a serious limitation is their tendency to produce text that is factually inconsistent with information in the input. Recent work has studied whether textual entailment systems can be used to identify factual errors; however, these sentence-level entailment models are trained to solve a different problem than generation filtering and they do not localize which part of a generation is non-factual. In this paper, we propose a new formulation of entailment that decomposes it at the level of dependency arcs. Rather than focusing on aggregate decisions, we instead ask whether the semantic relationship manifested by individual dependency arcs in the generated output is supported by the input. Human judgments on this task are difficult to obtain; we therefore propose a method to automatically create data based on existing entailment or paraphrase corpora. Experiments show that our dependency arc entailment model trained on this data can identify factual inconsistencies in paraphrasing and summarization better than sentence-level methods or those based on question generation, while additionally localizing the erroneous parts of the generation.", "keyphrases": ["dependency-level entailment", "factual error", "dependency arc"]} +{"id": "cai-lam-2019-core", "title": "Core Semantic First: A Top-down Approach for AMR Parsing", "abstract": "We introduce a novel scheme for parsing a piece of text into its Abstract Meaning Representation (AMR): Graph Spanning based Parsing (GSP). One novel characteristic of GSP is that it constructs a parse graph incrementally in a top-down fashion. Starting from the root, at each step, a new node and its connections to existing nodes will be jointly predicted. The output graph spans the nodes by the distance to the root, following the intuition of first grasping the main ideas then digging into more details. The core semantic first principle emphasizes capturing the main ideas of a sentence, which is of great interest. We evaluate our model on the latest AMR sembank and achieve the state-of-the-art performance in the sense that no heuristic graph re-categorization is adopted. More importantly, the experiments show that our parser is especially good at obtaining the core semantics.", "keyphrases": ["amr", "more detail", "core semantic"]} +{"id": "hale-etal-2006-pcfgs", "title": "PCFGs with Syntactic and Prosodic Indicators of Speech Repairs", "abstract": "A grammatical method of combining two kinds of speech repair cues is presented. One cue, prosodic disjuncture, is detected by a decision tree-based ensemble classifier that uses acoustic cues to identify where normal prosody seems to be interrupted (Lickley, 1996). The other cue, syntactic parallelism, codifies the expectation that repairs continue a syntactic category that was left unfinished in the reparandum (Levelt, 1983). The two cues are combined in a Treebank PCFG whose states are split using a few simple tree transformations. Parsing performance on the Switchboard and Fisher corpora suggests that these two cues help to locate speech repairs in a synergistic way.", "keyphrases": ["speech repair", "repair", "constituent"]} +{"id": "hara-etal-2009-coordinate", "title": "Coordinate Structure Analysis with Global Structural Constraints and Alignment-Based Local Features", "abstract": "We propose a hybrid approach to coordinate structure analysis that combines a simple grammar to ensure consistent global structure of coordinations in a sentence, and features based on sequence alignment to capture local symmetry of conjuncts. The weight of the alignment-based features, which in turn determines the score of coordinate structures, is optimized by perceptron training on a given corpus. A bottom-up chart parsing algorithm efficiently finds the best scoring structure, taking both nested or non-overlapping flat coordinations into account. We demonstrate that our approach outperforms existing parsers in coordination scope detection on the Genia corpus.", "keyphrases": ["alignment-based local feature", "sequence alignment", "local symmetry", "coordination", "dual decomposition"]} +{"id": "li-etal-2022-ultra", "title": "Ultra-fine Entity Typing with Indirect Supervision from Natural Language Inference", "abstract": "The task of ultra-fine entity typing (UFET) seeks to predict diverse and free-form words or phrases that describe the appropriate types of entities mentioned in sentences. A key challenge for this task lies in the large number of types and the scarcity of annotated data per type. Existing systems formulate the task as a multi-way classification problem and train directly or distantly supervised classifiers. This causes two issues: (i) the classifiers do not capture the type semantics because types are often converted into indices; (ii) systems developed in this way are limited to predicting within a pre-defined type set, and often fall short of generalizing to types that are rarely seen or unseen in training. This work presents LITE\ud83c\udf7b, a new approach that formulates entity typing as a natural language inference (NLI) problem, making use of (i) the indirect supervision from NLI to infer type information meaningfully represented as textual hypotheses and alleviate the data scarcity issue, as well as (ii) a learning-to-rank objective to avoid the pre-defining of a type set. Experiments show that, with limited training data, LITE obtains state-of-the-art performance on the UFET task. In addition, LITE demonstrates its strong generalizability by not only yielding best results on other fine-grained entity typing benchmarks, more importantly, a pre-trained LITE system works well on new data containing unseen types.1", "keyphrases": ["indirect supervision", "natural language inference", "ultra-fine entity typing"]} +{"id": "chen-durrett-2019-understanding", "title": "Understanding Dataset Design Choices for Multi-hop Reasoning", "abstract": "Learning multi-hop reasoning has been a key challenge for reading comprehension models, leading to the design of datasets that explicitly focus on it. Ideally, a model should not be able to perform well on a multi-hop question answering task without doing multi-hop reasoning. In this paper, we investigate two recently proposed datasets, WikiHop and HotpotQA. First, we explore sentence-factored models for these tasks; by design, these models cannot do multi-hop reasoning, but they are still able to solve a large number of examples in both datasets. Furthermore, we find spurious correlations in the unmasked version of WikiHop, which make it easy to achieve high performance considering only the questions and answers. Finally, we investigate one key difference between these datasets, namely span-based vs. multiple-choice formulations of the QA task. Multiple-choice versions of both datasets can be easily gamed, and two models we examine only marginally exceed a baseline in this setting. Overall, while these datasets are useful testbeds, high-performing models may not be learning as much multi-hop reasoning as previously thought.", "keyphrases": ["multi-hop reasoning", "reasoning", "hotpotqa", "large number"]} +{"id": "navigli-vannella-2013-semeval", "title": "SemEval-2013 Task 11: Word Sense Induction and Disambiguation within an End-User Application", "abstract": "In this paper we describe our Semeval-2013 task on Word Sense Induction and Disambiguation within an end-user application, namely Web search result clustering and diversification. Given a target query, induction and disambiguation systems are requested to cluster and diversify the search results returned by a search engine for that query. The task enables the end-to-end evaluation and comparison of systems.", "keyphrases": ["disambiguation", "end-user application", "semeval-2013 task"]} +{"id": "voutilainen-purtonen-2011-double", "title": "A double-blind experiment on interannotator agreement: the case of dependency syntax and Finnish", "abstract": "Manually performed treebanking is an expensive effort compared with automatic annotation. In return, manual treebanking is generally believed to provide higherquality/value syntactic annotation than automatic methods. Unfortunately, there is little or no empirical evidence for or against this belief, though arguments have been voiced for the high degree of subjectivity in other levels of linguistic analysis (e.g. morphological annotation). We report a double-blind annotation experiment at the level of dependency syntax, using a small Finnish corpus as the analysis data. The results suggest that an interannotator agreement can be reached as a result of reviews and negotiations that is much higher than the corresponding labelled attachment scores (LAS) reported for stateof-the-art dependency parsers.", "keyphrases": ["double-blind experiment", "interannotator agreement", "dependency syntax"]} +{"id": "sudo-etal-2003-improved", "title": "An Improved Extraction Pattern Representation Model for Automatic IE Pattern Acquisition", "abstract": "Several approaches have been described for the automatic unsupervised acquisition of patterns for information extraction. Each approach is based on a particular model for the patterns to be acquired, such as a predicate-argument structure or a dependency chain. The effect of these alternative models has not been previously studied. In this paper, we compare the prior models and introduce a new model, the Subtree model, based on arbitrary subtrees of dependency trees. We describe a discovery procedure for this model and demonstrate experimentally an improvement in recall using Subtree patterns.", "keyphrases": ["event-specific document", "extractor", "common word pattern"]} +{"id": "abrahamsson-etal-2014-medical", "title": "Medical text simplification using synonym replacement: Adapting assessment of word difficulty to a compounding language", "abstract": "Medical texts can be difficult to understand for laymen, due to a frequent occurrence of specialised medical terms. Replacing these difficult terms with easier synonyms can, however, lead to improved readability. In this study, we have adapted a method for assessing difficulty of words to make it more suitable to medical Swedish. The difficulty of a word was assessed not only by measuring the frequency of the word in a general corpus, but also by measuring the frequency of substrings of words, thereby adapting the method to the compounding nature of Swedish. All words having a MeSH synonym that was assessed as easier, were replaced in a corpus of medical text. According to the readability measure LIX, the replacement resulted in a slightly more difficult text, while the readability increased according to the OVIX measure and to a preliminary reader study.", "keyphrases": ["simplification", "synonym replacement", "medical swedish", "medical text"]} +{"id": "vaswani-etal-2011-rule", "title": "Rule Markov Models for Fast Tree-to-String Translation", "abstract": "Most statistical machine translation systems rely on composed rules (rules that can be formed out of smaller rules in the grammar). Though this practice improves translation by weakening independence assumptions in the translation model, it nevertheless results in huge, redundant grammars, making both training and decoding inefficient. Here, we take the opposite approach, where we only use minimal rules (those that cannot be formed out of other rules), and instead rely on a rule Markov model of the derivation history to capture dependencies between minimal rules. Large-scale experiments on a state-of-the-art tree-to-string translation system show that our approach leads to a slimmer model, a faster decoder, yet the same translation quality (measured using B) as composed rules.", "keyphrases": ["translation model", "rule markov model", "fast decoder"]} +{"id": "friedrich-palmer-2014-situation", "title": "Situation Entity Annotation", "abstract": "This paper presents an annotation scheme for a new semantic annotation task with relevance for analysis and computation at both the clause level and the discourse level. More specifically, we label the finite clauses of texts with the type of situation entity (e.g., eventualities, statements about kinds, or statements of belief) they introduce to the discourse, following and extending work by Smith (2003). We take a feature-driven approach to annotation, with the result that each clause is also annotated with fundamental aspectual class, whether the main NP referent is specific or generic, and whether the situation evoked is episodic or habitual. This annotation is performed (so far) on three sections of the MASC corpus, with each clause labeled by at least two annotators. In this paper we present the annotation scheme, statistics of the corpus in its current version, and analyses of both inter-annotator agreement and intra-annotator consistency.", "keyphrases": ["annotation scheme", "clause", "situation entity"]} +{"id": "xu-carpuat-2021-editor", "title": "EDITOR: An Edit-Based Transformer with Repositioning for Neural Machine Translation with Soft Lexical Constraints", "abstract": "We introduce an Edit-Based TransfOrmer with Repositioning (EDITOR), which makes sequence generation flexible by seamlessly allowing users to specify preferences in output lexical choice. Building on recent models for non-autoregressive sequence generation (Gu et al., 2019), EDITOR generates new sequences by iteratively editing hypotheses. It relies on a novel reposition operation designed to disentangle lexical choice from word positioning decisions, while enabling efficient oracles for imitation learning and parallel edits at decoding time. Empirically, EDITOR uses soft lexical constraints more effectively than the Levenshtein Transformer (Gu et al., 2019) while speeding up decoding dramatically compared to constrained beam search (Post and Vilar, 2018). EDITOR also achieves comparable or better translation quality with faster decoding speed than the Levenshtein Transformer on standard Romanian-English, English-German, and English-Japanese machine translation tasks.", "keyphrases": ["edit-based transformer", "lexical constraint", "editor"]} +{"id": "lo-etal-2013-improving", "title": "Improving machine translation by training against an automatic semantic frame based evaluation metric", "abstract": "We present the first ever results showing that tuning a machine translation system against a semantic frame based objective function, MEANT, produces more robustly adequate translations than tuning against BLEU or TER as measured across commonly used metrics and human subjective evaluation. Moreover, for informal web forum data, human evaluators preferred MEANT-tuned systems over BLEU- or TER-tuned systems by a significantly wider margin than that for formal newswire\u2014even though automatic semantic parsing might be expected to fare worse on informal language. We argue that by preserving the meaning of the translations as captured by semantic frames right in the training process, an MT system is constrained to make more accurate choices of both lexical and reordering rules. As a result, MT systems tuned against semantic frame based MT evaluation metrics produce output that is more adequate. Tuning a machine translation system against a semantic frame based objective function is independent of the translation model paradigm, so, any translation model can benefit from the semantic knowledge incorporated to improve translation adequacy through our approach.", "keyphrases": ["machine translation", "semantic frame", "evaluation metric"]} +{"id": "vossen-etal-2008-kyoto", "title": "KYOTO: a System for Mining, Structuring and Distributing Knowledge across Languages and Cultures", "abstract": "We outline work performed within the framework of a current EC project. The goal is to construct a language-independent information system for a specific domain (environment/ecology/biodiversity) anchored in a language-independent ontology that is linked to wordnets in seven languages. For each language, information extraction and identification of lexicalized concepts with ontological entries is carried out by text miners (\u0093Kybots\u0094). The mapping of language-specific lexemes to the ontology allows for crosslinguistic identification and translation of equivalent terms. The infrastructure developed within this project enables long-range knowledge sharing and transfer across many languages and cultures, addressing the need for global and uniform transition of knowledge beyond the specific domains addressed here.", "keyphrases": ["culture", "project", "specific domain", "information extraction", "kyoto"]} +{"id": "yoshikawa-etal-2016-joint", "title": "Joint Transition-based Dependency Parsing and Disfluency Detection for Automatic Speech Recognition Texts", "abstract": "Joint dependency parsing with dis\ufb02uency detection is an important task in speech language processing. Recent methods show high performance for this task, although most authors make the unrealistic assumption that input texts are transcribed by human annotators. In real-world applications, the input text is typically the output of an automatic speech recognition (ASR) system, which implies that the text contains not only dis\ufb02uency noises but also recognition errors from the ASR system. In this work, we propose a parsing method that handles both dis\ufb02uency and ASR errors us-ing an incremental shift-reduce algorithm with several novel features suited to ASR output texts. Because the gold dependency information is usually annotated only on transcribed texts, we also introduce an alignment-based method for transferring the gold dependency annotation to the ASR output texts to construct training data for our parser. We conducted an experiment on the Switchboard corpus and show that our method outperforms conventional methods in terms of dependency parsing and dis\ufb02uency detection.", "keyphrases": ["disfluency detection", "asr output text", "transition-based dependency parser"]} +{"id": "goutte-etal-2014-nrc", "title": "The NRC System for Discriminating Similar Languages", "abstract": "We describe the system built by the National Research Council Canada for the \u201dDiscriminating between similar languages\u201d (DSL) shared task. Our system uses various statistical classifiers and makes predictions based on a two-stage process: we first predict the language group, then discriminate between languages or variants within the group. Language groups are predicted using a generative classifier with 99.99% accuracy on the five target groups. Within each group (except English), we use a voting combination of discriminative classifiers trained on a variety of feature spaces, achieving an average accuracy of 95.71%, with per-group accuracy between 90.95% and 100% depending on the group. This approach turns out to reach the best performance among all systems submitted to the open and closed tasks.", "keyphrases": ["dsl", "language group", "two-step classification approach", "submission track", "good result"]} +{"id": "deng-etal-2021-compression", "title": "Compression, Transduction, and Creation: A Unified Framework for Evaluating Natural Language Generation", "abstract": "Natural language generation (NLG) spans a broad range of tasks, each of which serves for specific objectives and desires different properties of generated text. The complexity makes automatic evaluation of NLG particularly challenging. Previous work has typically focused on a single task and developed individual evaluation metrics based on specific intuitions. In this paper, we propose a unifying perspective based on the nature of information change in NLG tasks, including compression (e.g., summarization), transduction (e.g., text rewriting), and creation (e.g., dialog). _Information alignment_ between input, context, and output text plays a common central role in characterizing the generation. With automatic alignment prediction models, we develop a family of interpretable metrics that are suitable for evaluating key aspects of different NLG tasks, often without need of gold reference data. Experiments show the uniformly designed metrics achieve stronger or comparable correlations with human judgement compared to state-of-the-art metrics in each of diverse tasks, including text summarization, style transfer, and knowledge-grounded dialog.", "keyphrases": ["transduction", "creation", "compression"]} +{"id": "li-2009-use", "title": "On the Use of Virtual Evidence in Conditional Random Fields", "abstract": "Virtual evidence (VE), first introduced by (Pearl, 1988), provides a convenient way of incorporating prior knowledge into Bayesian networks. This work generalizes the use of VE to undirected graphical models and, in particular, to conditional random fields (CRFs). We show that VE can be naturally encoded into a CRF model as potential functions. More importantly, we propose a novel semi-supervised machine learning objective for estimating a CRF model integrated with VE. The objective can be optimized using the Expectation-Maximization algorithm while maintaining the discriminative nature of CRFs. When evaluated on the CLASSIFIEDS data, our approach significantly outperforms the best known solutions reported on this task.", "keyphrases": ["virtual evidence", "conditional random field", "prior knowledge", "function"]} +{"id": "yamada-etal-2019-incorporating", "title": "Incorporating Textual Information on User Behavior for Personality Prediction", "abstract": "Several recent studies have shown that textual information of user posts and user behaviors such as liking and sharing the specific posts are useful for predicting the personality of social media users. However, less attention has been paid to the textual information derived from the user behaviors. In this paper, we investigate the effect of textual information on user behaviors for personality prediction. Our experiments on the personality prediction of Twitter users show that the textual information of user behaviors is more useful than the co-occurrence information of the user behaviors. They also show that taking user behaviors into account is crucial for predicting the personality of users who do not post frequently.", "keyphrases": ["textual information", "user behavior", "personality prediction"]} +{"id": "stewart-2014-now", "title": "Now We Stronger than Ever: African-American English Syntax in Twitter", "abstract": "African American English (AAE) is a well-established dialect that exhibits a distinctive syntax, including constructions like habitual be . Using data mined from the social media service Twitter, the proposed senior thesis project intends to study the demographic distribution of a sub-set of AAE syntactic constructions. This study expands on previous sociolinguistic Twitter work (Eisenstein et al., 2011) by adding part-of-speech tags to the data, thus enabling detection of short-range syntactic features. Through an analysis of ethnic and gender data associated with AAE tweets, this project will provide a more accurate description of the dialect\u2019s speakers and distribution.", "keyphrases": ["african-american english", "twitter", "dialect"]} +{"id": "dimitrov-etal-2021-semeval", "title": "SemEval-2021 Task 6: Detection of Persuasion Techniques in Texts and Images", "abstract": "We describe SemEval-2021 task 6 on Detection of Persuasion Techniques in Texts and Images: the data, the annotation guidelines, the evaluation setup, the results, and the participating systems. The task focused on memes and had three subtasks: (i) detecting the techniques in the text, (ii) detecting the text spans where the techniques are used, and (iii) detecting techniques in the entire meme, i.e., both in the text and in the image. It was a popular task, attracting 71 registrations, and 22 teams that eventually made an official submission on the test set. The evaluation results for the third subtask confirmed the importance of both modalities, the text and the image. Moreover, some teams reported benefits when not just combining the two modalities, e.g., by using early or late fusion, but rather modeling the interaction between them in a joint model.", "keyphrases": ["persuasion techniques", "texts", "semeval-2021 task"]} +{"id": "awasthi-etal-2019-parallel", "title": "Parallel Iterative Edit Models for Local Sequence Transduction", "abstract": "We present a Parallel Iterative Edit (PIE) model for the problem of local sequence transduction arising in tasks like Grammatical error correction (GEC). Recent approaches are based on the popular encoder-decoder (ED) model for sequence to sequence learning. The ED model auto-regressively captures full dependency among output tokens but is slow due to sequential decoding. The PIE model does parallel decoding, giving up the advantage of modeling full dependency in the output, yet it achieves accuracy competitive with the ED model for four reasons: 1. predicting edits instead of tokens, 2. labeling sequences instead of generating sequences, 3. iteratively refining predictions to capture dependencies, and 4. factorizing logits over edits and their token argument to harness pre-trained language models like BERT. Experiments on tasks spanning GEC, OCR correction and spell correction demonstrate that the PIE model is an accurate and significantly faster alternative for local sequence transduction.", "keyphrases": ["local sequence transduction", "gec", "alternative", "parallel iterative edit", "text-editing method"]} +{"id": "chambers-jurafsky-2011-template", "title": "Template-Based Information Extraction without the Templates", "abstract": "Standard algorithms for template-based information extraction (IE) require predefined template schemas, and often labeled data, to learn to extract their slot fillers (e.g., an embassy is the Target of a Bombing template). This paper describes an approach to template-based IE that removes this requirement and performs extraction without knowing the template structure in advance. Our algorithm instead learns the template structure automatically from raw text, inducing template schemas as sets of linked events (e.g., bombings include detonate, set off, and destroy events) associated with semantic roles. We also solve the standard IE task, using the induced syntactic patterns to extract role fillers from specific documents. We evaluate on the MUC-4 terrorism dataset and show that we induce template structure very similar to hand-created gold structure, and we extract role fillers with an F1 score of .40, approaching the performance of algorithms that require full knowledge of the templates.", "keyphrases": ["information extraction", "semantic role", "event template", "frame"]} +{"id": "kadlec-etal-2016-text", "title": "Text Understanding with the Attention Sum Reader Network", "abstract": "Several large cloze-style context-question-answer datasets have been introduced recently: the CNN and Daily Mail news data and the Children's Book Test. Thanks to the size of these datasets, the associated text comprehension task is well suited for deep-learning techniques that currently seem to outperform all alternative approaches. We present a new, simple model that uses attention to directly pick the answer from the context as opposed to computing the answer using a blended representation of words in the document as is usual in similar models. This makes the model particularly suitable for question-answering problems where the answer is a single word from the document. Ensemble of our models sets new state of the art on all evaluated datasets.", "keyphrases": ["reader", "attention-sum", "human-level performance"]} +{"id": "solorio-liu-2008-learning", "title": "Learning to Predict Code-Switching Points", "abstract": "Predicting possible code-switching points can help develop more accurate methods for automatically processing mixed-language text, such as multilingual language models for speech recognition systems and syntactic analyzers. We present in this paper exploratory results on learning to predict potential code-switching points in Spanish-English. We trained different learning algorithms using a transcription of code-switched discourse. To evaluate the performance of the classifiers, we used two different criteria: 1) measuring precision, recall, and F-measure of the predictions against the reference in the transcription, and 2) rating the naturalness of artificially generated code-switched sentences. Average scores for the code-switched sentences generated by our machine learning approach were close to the scores of those generated by humans.", "keyphrases": ["point", "learning algorithm", "discourse", "code alternation point", "past"]} +{"id": "davidov-etal-2007-fully", "title": "Fully Unsupervised Discovery of Concept-Specific Relationships by Web Mining", "abstract": "We present a web mining method for discovering and enhancing relationships in which a specified concept (word class) participates. We discover a whole range of relationships focused on the given concept, rather than generic known relationships as in most previous work. Our method is based on clustering patterns that contain concept words and other words related to them. We evaluate the method on three different rich concepts and find that in each case the method generates a broad variety of relationships with good precision.", "keyphrases": ["unsupervised discovery", "concept word", "seed", "concept acquisition method"]} +{"id": "wang-etal-2020-galileo", "title": "Galileo at SemEval-2020 Task 12: Multi-lingual Learning for Offensive Language Identification Using Pre-trained Language Models", "abstract": "This paper describes Galileo's performance in SemEval-2020 Task 12 on detecting and categorizing offensive language in social media. For Offensive Language Identification, we proposed a multi-lingual method using Pre-trained Language Models, ERNIE and XLM-R. For offensive language categorization, we proposed a knowledge distillation method trained on soft labels generated by several supervised models. Our team participated in all three sub-tasks. In Sub-task A - Offensive Language Identification, we ranked first in terms of average F1 scores in all languages. We are also the only team which ranked among the top three across all languages. We also took the first place in Sub-task B - Automatic Categorization of Offense Types and Sub-task C - Offence Target Identification.", "keyphrases": ["semeval-2020 task", "offensive language identification", "pre-trained language models"]} +{"id": "qin-etal-2018-robust", "title": "Robust Distant Supervision Relation Extraction via Deep Reinforcement Learning", "abstract": "Distant supervision has become the standard method for relation extraction. However, even though it is an efficient method, it does not come at no cost\u2014The resulted distantly-supervised training samples are often very noisy. To combat the noise, most of the recent state-of-the-art approaches focus on selecting one-best sentence or calculating soft attention weights over the set of the sentences of one specific entity pair. However, these methods are suboptimal, and the false positive problem is still a key stumbling bottleneck for the performance. We argue that those incorrectly-labeled candidate sentences must be treated with a hard decision, rather than being dealt with soft attention weights. To do this, our paper describes a radical solution\u2014We explore a deep reinforcement learning strategy to generate the false-positive indicator, where we automatically recognize false positives for each relation type without any supervised information. Unlike the removal operation in the previous studies, we redistribute them into the negative examples. The experimental results show that the proposed strategy significantly improves the performance of distant supervision comparing to state-of-the-art systems.", "keyphrases": ["distant supervision", "relation extraction", "deep reinforcement learning"]} +{"id": "alam-etal-2021-fighting-covid", "title": "Fighting the COVID-19 Infodemic: Modeling the Perspective of Journalists, Fact-Checkers, Social Media Platforms, Policy Makers, and the Society", "abstract": "With the emergence of the COVID-19 pandemic, the political and the medical aspects of disinformation merged as the problem got elevated to a whole new level to become the first global infodemic. Fighting this infodemic has been declared one of the most important focus areas of the World Health Organization, with dangers ranging from promoting fake cures, rumors, and conspiracy theories to spreading xenophobia and panic. Addressing the issue requires solving a number of challenging problems such as identifying messages containing claims, determining their check-worthiness and factuality, and their potential to do harm as well as the nature of that harm, to mention just a few. To address this gap, we release a large dataset of 16K manually annotated tweets for fine-grained disinformation analysis that (i) focuses on COVID-19, (ii) combines the perspectives and the interests of journalists, fact-checkers, social media platforms, policy makers, and society, and (iii) covers Arabic, Bulgarian, Dutch, and English. Finally, we show strong evaluation results using pretrained Transformers, thus confirming the practical utility of the dataset in monolingual vs. multilingual, and single task vs. multitask settings.", "keyphrases": ["covid-19 infodemic", "journalist", "fact-checker"]} +{"id": "wen-etal-2015-stochastic", "title": "Stochastic Language Generation in Dialogue using Recurrent Neural Networks with Convolutional Sentence Reranking", "abstract": "The natural language generation (NLG) component of a spoken dialogue system (SDS) usually needs a substantial amount of handcrafting or a well-labeled dataset to be trained on. These limitations add significantly to development costs and make cross-domain, multi-lingual dialogue systems intractable. Moreover, human languages are context-aware. The most natural response should be directly learned from data rather than depending on predefined syntaxes or rules. This paper presents a statistical language generator based on a joint recurrent and convolutional neural network structure which can be trained on dialogue act-utterance pairs without any semantic alignments or predefined grammar trees. Objective metrics suggest that this new model outperforms previous methods under the same experimental conditions. Results of an evaluation by human judges indicate that it produces not only high quality but linguistically varied utterances which are preferred compared to n-gram and rule-based systems.", "keyphrases": ["dialogue system", "stochastic language generation", "backward rnn reranker"]} +{"id": "chen-etal-2020-conditional", "title": "Conditional Causal Relationships between Emotions and Causes in Texts", "abstract": "The causal relationships between emotions and causes in text have recently received a lot of attention. Most of the existing works focus on the extraction of the causally related clauses from documents. However, none of these works has considered the possibility that the causal relationships among the extracted emotion and cause clauses may only be valid under a specific context, without which the extracted clauses may not be causally related. To address such an issue, we propose a new task of determining whether or not an input pair of emotion and cause has a valid causal relationship under different contexts, and construct a corresponding dataset via manual annotation and negative sampling based on an existing benchmark dataset. Furthermore, we propose a prediction aggregation module with low computational overhead to fine-tune the prediction results based on the characteristics of the input clauses. Experiments demonstrate the effectiveness and generality of our aggregation module.", "keyphrases": ["causal relationship", "emotion", "clause"]} +{"id": "krishna-etal-2021-hurdles", "title": "Hurdles to Progress in Long-form Question Answering", "abstract": "The task of long-form question answering (LFQA) involves retrieving documents relevant to a given question and using them to generate a paragraph-length answer. While many models have recently been proposed for LFQA, we show in this paper that the task formulation raises fundamental challenges regarding evaluation and dataset creation that currently preclude meaningful modeling progress. To demonstrate these challenges, we first design a new system that relies on sparse attention and contrastive retriever learning to achieve state-of-the-art performance on the ELI5 LFQA dataset. While our system tops the public leaderboard, a detailed analysis reveals several troubling trends: (1) our system's generated answers are not actually grounded in the documents that it retrieves; (2) ELI5 contains significant train / validation overlap, as at least 81% of ELI5 validation questions occur in paraphrased form in the training set; (3) ROUGE-L is not an informative metric of generated answer quality and can be easily gamed; and (4) human evaluations used for other text generation tasks are unreliable for LFQA. We offer suggestions to mitigate each of these issues, which we hope will lead to more rigorous LFQA research and meaningful progress in the future.", "keyphrases": ["long-form question", "lfqa", "retriever", "human evaluation"]} +{"id": "liakata-etal-2010-corpora", "title": "Corpora for the Conceptualisation and Zoning of Scientific Papers", "abstract": "We present two complementary annotation schemes for sentence based annotation of full scientific papers, CoreSC and AZ-II, applied to primary research articles in chemistry. AZ-II is the extension of AZ for chemistry papers. AZ has been shown to have been reliably annotated by independent human coders and useful for various information access tasks. Like AZ, AZ-II follows the rhetorical structure of a scientific paper and the knowledge claims made by the authors. The CoreSC scheme takes a different view of scientific papers, treating them as the humanly readable representations of scientific investigations. It seeks to retrieve the structure of the investigation from the paper as generic high-level Core Scientific Concepts (CoreSC). CoreSCs have been annotated by 16 chemistry experts over a total of 265 full papers in physical chemistry and biochemistry. We describe the differences and similarities between the two schemes in detail and present the two corpora produced using each scheme. There are 36 shared papers in the corpora, which allows us to quantitatively compare aspects of the annotation schemes. We show the correlation between the two schemes, their strengths and weeknesses and discuss the benefits of combining a rhetorical based analysis of the papers with a content-based one.", "keyphrases": ["annotation scheme", "research article", "rhetorical structure"]} +{"id": "liu-etal-2020-context", "title": "How Does Context Matter? On the Robustness of Event Detection with Context-Selective Mask Generalization", "abstract": "Event detection (ED) aims to identify and classify event triggers in texts, which is a crucial subtask of event extraction (EE). Despite many advances in ED, the existing studies are typically centered on improving the overall performance of an ED model, which rarely consider the robustness of an ED model. This paper aims to fill this research gap by stressing the importance of robustness modeling in ED models. We first pinpoint three stark cases demonstrating the brittleness of the existing ED models. After analyzing the underlying reason, we propose a new training mechanism, called context-selective mask generalization for ED, which can effectively mine context-specific patterns for learning and robustify an ED model. The experimental results have confirmed the effectiveness of our model regarding defending against adversarial attacks, exploring unseen predicates, and tackling ambiguity cases. Moreover, a deeper analysis suggests that our approach can learn a complementary predictive bias with most ED models that use full context for feature learning.", "keyphrases": ["robustness", "event detection", "context-selective mask generalization"]} +{"id": "sennrich-2017-grammatical", "title": "How Grammatical is Character-level Neural Machine Translation? Assessing MT Quality with Contrastive Translation Pairs", "abstract": "Analysing translation quality in regards to specific linguistic phenomena has historically been difficult and time-consuming. Neural machine translation has the attractive property that it can produce scores for arbitrary translations, and we propose a novel method to assess how well NMT systems model specific linguistic phenomena such as agreement over long distances, the production of novel words, and the faithful translation of polarity. The core idea is that we measure whether a reference translation is more probable under a NMT model than a contrastive translation which introduces a specific type of error. We present LingEval97, a large-scale data set of 97000 contrastive translation pairs based on the WMT English-German translation task, with errors automatically created with simple rules. We report results for a number of systems, and find that recently introduced character-level NMT systems perform better at transliteration than models with byte-pair encoding (BPE) segmentation, but perform more poorly at morphosyntactic agreement, and translating discontiguous units of meaning.", "keyphrases": ["contrastive translation pair", "polarity", "incorrect translation"]} +{"id": "mann-yarowsky-2003-unsupervised", "title": "Unsupervised Personal Name Disambiguation", "abstract": "This paper presents a set of algorithms for distinguishing personal names with multiple real referents in text, based on little or no supervision. The approach utilizes an unsupervised clustering technique over a rich feature space of biographic facts, which are automatically extracted via a language-independent bootstrapping process. The induced clustering of named entities are then partitioned and linked to their real referents via the automatically extracted biographic data. Performance is evaluated based on both a test set of handlabeled multi-referent personal names and via automatically generated pseudonames.", "keyphrases": ["name", "disambiguation", "agglomerative clustering algorithm", "local biographical information"]} +{"id": "manion-sainudiin-2013-daebak", "title": "DAEBAK!: Peripheral Diversity for Multilingual Word Sense Disambiguation", "abstract": "We introduce Peripheral Diversity (PD) as a knowledge-based approach to achieve multilingual Word Sense Disambiguation (WSD). PD exploits the frequency and diverse use of word senses in semantic subgraphs derived from larger sense inventories such as BabelNet, Wikipedia, and WordNet in order to achieve WSD. PD\u2019s f -measure scores for SemEval 2013 Task 12 outperform the Most Frequent Sense (MFS) baseline for two of the five languages: English, French, German, Italian, and Spanish. Despite PD remaining under-developed and under-explored, it demonstrates that it is robust, competitive, and encourages development.", "keyphrases": ["peripheral diversity", "word sense disambiguation", "daebak"]} +{"id": "jwalapuram-etal-2019-evaluating", "title": "Evaluating Pronominal Anaphora in Machine Translation: An Evaluation Measure and a Test Suite", "abstract": "The ongoing neural revolution in machine translation has made it easier to model larger contexts beyond the sentence-level, which can potentially help resolve some discourse-level ambiguities such as pronominal anaphora, thus enabling better translations. Unfortunately, even when the resulting improvements are seen as substantial by humans, they remain virtually unnoticed by traditional automatic evaluation measures like BLEU, as only a few words end up being affected. Thus, specialized evaluation measures are needed. With this aim in mind, we contribute an extensive, targeted dataset that can be used as a test suite for pronoun translation, covering multiple source languages and different pronoun errors drawn from real system translations, for English. We further propose an evaluation measure to differentiate good and bad pronoun translations. We also conduct a user study to report correlations with human judgments.", "keyphrases": ["pronominal anaphora", "machine translation", "evaluation measure"]} +{"id": "feng-etal-2019-misleading", "title": "Misleading Failures of Partial-input Baselines", "abstract": "Recent work establishes dataset difficulty and removes annotation artifacts via partial-input baselines (e.g., hypothesis-only model for SNLI or question-only model for VQA). A successful partial-input baseline indicates that the dataset is cheatable. But the converse is not necessarily true: failures of partial-input baselines do not mean the dataset is free of artifacts. We first design artificial datasets to illustrate how the trivial patterns that are only visible in the full input can evade any partial-input baseline. Next, we identify such artifacts in the SNLI dataset\u2014a hypothesis-only model augmented with trivial patterns in the premise can solve 15% of previously-thought \u201chard\u201d examples. Our work provides a caveat for the use and creation of partial-input baselines for datasets.", "keyphrases": ["partial-input baseline", "artifact", "premise", "future dataset creation"]} +{"id": "villemonte-de-la-clergerie-etal-2008-passage", "title": "PASSAGE: from French Parser Evaluation to Large Sized Treebank", "abstract": "In this paper we present the PASSAGE project which aims at building automatically a French Treebank of large size by combining the output of several parsers, using the EASY annotation scheme. We present also the results of the of the first evaluation campaign of the project and the preliminary results we have obtained with our ROVER procedure for combining parsers automatically.", "keyphrases": ["passage", "parse combination algorithm", "new option"]} +{"id": "fernandez-etal-2014-gplsi", "title": "GPLSI: Supervised Sentiment Analysis in Twitter using Skipgrams", "abstract": "In this paper we describe the system submitted for the SemEval 2014 Task 9 (Sentiment Analysis in Twitter) Subtask B. Our contribution consists of a supervised approach using machine learning techniques, which uses the terms in the dataset as features. In this work we do not employ any external knowledge and resources. The novelty of our approach lies in the use of words, ngrams and skipgrams (notadjacent ngrams) as features, and how they are weighted.", "keyphrases": ["sentiment analysis", "twitter", "skipgram"]} +{"id": "pasunuru-bansal-2017-multi", "title": "Multi-Task Video Captioning with Video and Entailment Generation", "abstract": "Video captioning, the task of describing the content of a video, has seen some promising improvements in recent years with sequence-to-sequence models, but accurately learning the temporal and logical dynamics involved in the task still remains a challenge, especially given the lack of sufficient annotated data. We improve video captioning by sharing knowledge with two related directed-generation tasks: a temporally-directed unsupervised video prediction task to learn richer context-aware video encoder representations, and a logically-directed language entailment generation task to learn better video-entailing caption decoder representations. For this, we present a many-to-many multi-task learning model that shares parameters across the encoders and decoders of the three tasks. We achieve significant improvements and the new state-of-the-art on several standard video captioning datasets using diverse automatic and human evaluations. We also show mutual multi-task improvements on the entailment generation task.", "keyphrases": ["video", "entailment generation", "multi-task learning"]} +{"id": "mellebeek-etal-2006-multi", "title": "Multi-Engine Machine Translation by Recursive Sentence Decomposition", "abstract": "In this paper, we present a novel approach to combine the outputs of multiple MT engines into a consensus translation. In contrast to previous Multi-Engine Machine Translation (MEMT) techniques, we do not rely on word alignments of output hypotheses, but prepare the input sentence for multi-engine processing. We do this by using a recursive decomposition algorithm that produces simple chunks as input to the MT engines. A consensus translation is produced by combining the best chunk translations, selected through majority voting, a trigram language model score and a confidence score assigned to each MT engine. We report statistically significant relative improvements of up to 9% BLEU score in experiments (English\u2192Spanish) carried out on an 800-sentence test set extracted from the Penn-II Treebank.", "keyphrases": ["recursive decomposition algorithm", "chunk", "multi-engine machine translation"]} +{"id": "ling-etal-2013-microblogs", "title": "Microblogs as Parallel Corpora", "abstract": "In the ever-expanding sea of microblog data, there is a surprising amount of naturally occurring parallel text: some users create post multilingual messages targeting international audiences while others \u201cretweet\u201d translations. We present an efficient method for detecting these messages and extracting parallel segments from them. We have been able to extract over 1M Chinese-English parallel segments from Sina Weibo (the Chinese counterpart of Twitter) using only their public APIs. As a supplement to existing parallel training data, our automatically extracted parallel data yields substantial translation quality improvements in translating microblog text and modest improvements in translating edited news commentary. The resources in described in this paper are available at http://www.cs.cmu.edu/ lingwang/utopia.", "keyphrases": ["parallel data", "microblog", "social medium", "machine translation"]} +{"id": "cao-gete-2018-using", "title": "Using Discourse Information for Education with a Spanish-Chinese Parallel Corpus", "abstract": "Nowadays, with the fruitful achievements in Natural Language Processing (NLP) studies, the concern of using NLP technologies for education has called much attention. As two of the most spoken languages in the world, Spanish and Chinese occupy important positions in both NLP studies and bilingual education. In this paper, we present a Spanish-Chinese parallel corpus with annotated discourse information that aims to serve for bilingual language education. The theoretical framework of this work is Rhetorical Structure Theory (RST). The corpus is composed of 100 Spanish-Chinese parallel texts, and all the discourse markers (DM) have been annotated to form the education source. With pedagogical aim, we also present two programs that generate automatic exercises for both Spanish and Chinese students using our corpus. The reliability of this work has been evaluated using Kappa coefficient.", "keyphrases": ["discourse information", "education", "spanish-chinese parallel corpus"]} +{"id": "chen-etal-2021-improving", "title": "Improving Faithfulness in Abstractive Summarization with Contrast Candidate Generation and Selection", "abstract": "Despite significant progress in neural abstractive summarization, recent studies have shown that the current models are prone to generating summaries that are unfaithful to the original context. To address the issue, we study contrast candidate generation and selection as a model-agnostic post-processing technique to correct the extrinsic hallucinations (i.e. information not present in the source text) in unfaithful summaries. We learn a discriminative correction model by generating alternative candidate summaries where named entities and quantities in the generated summary are replaced with ones with compatible semantic types from the source document. This model is then used to select the best candidate as the final output summary. Our experiments and analysis across a number of neural summarization systems show that our proposed method is effective in identifying and correcting extrinsic hallucinations. We analyze the typical hallucination phenomenon by different types of neural summarization systems, in hope to provide insights for future work on the direction.", "keyphrases": ["faithfulness", "abstractive summarization", "selection"]} +{"id": "iosif-mishra-2014-speaker", "title": "From Speaker Identification to Affective Analysis: A Multi-Step System for Analyzing Children's Stories", "abstract": "We propose a multi-step system for the analysis of children\u2019s stories that is intended to be part of a larger text-to-speechbased storytelling system. A hybrid approach is adopted, where pattern-based and statistical methods are used along with utilization of external knowledge sources. This system performs the following story analysis tasks: identification of characters in each story; attribution of quotes to specific story characters; identification of character age, gender and other salient personality attributes; and finally, affective analysis of the quoted material. The different types of analyses were evaluated using several datasets. For the quote attribution, as well as for the gender and age estimation, substantial improvement over baseline was realized, whereas results for personality attribute estimation and valence estimation are more modest.", "keyphrases": ["affective analysis", "multi-step system", "story"]} +{"id": "liang-etal-2020-towards", "title": "Towards Debiasing Sentence Representations", "abstract": "As natural language processing methods are increasingly deployed in real-world scenarios such as healthcare, legal systems, and social science, it becomes necessary to recognize the role they potentially play in shaping social biases and stereotypes. Previous work has revealed the presence of social biases in widely used word embeddings involving gender, race, religion, and other social constructs. While some methods were proposed to debias these word-level embeddings, there is a need to perform debiasing at the sentence-level given the recent shift towards new contextualized sentence representations such as ELMo and BERT. In this paper, we investigate the presence of social biases in sentence-level representations and propose a new method, Sent-Debias, to reduce these biases. We show that Sent-Debias is effective in removing biases, and at the same time, preserves performance on sentence-level downstream tasks such as sentiment analysis, linguistic acceptability, and natural language understanding. We hope that our work will inspire future research on characterizing and removing social biases from widely adopted sentence representations for fairer NLP.", "keyphrases": ["sentence representation", "gender", "bert", "sent-debias"]} +{"id": "ge-etal-2021-baco", "title": "BACO: A Background Knowledge- and Content-Based Framework for Citing Sentence Generation", "abstract": "In this paper, we focus on the problem of citing sentence generation, which entails generating a short text to capture the salient information in a cited paper and the connection between the citing and cited paper. We present BACO, a BAckground knowledge- and COntent-based framework for citing sentence generation, which considers two types of information: (1) background knowledge by leveraging structural information from a citation network; and (2) content, which represents in-depth information about what to cite and why to cite. First, a citation network is encoded to provide background knowledge. Second, we apply salience estimation to identify what to cite by estimating the importance of sentences in the cited paper. During the decoding stage, both types of information are combined to facilitate the text generation, and then we conduct a joint training for the generator and citation function classification to make the model aware of why to cite. Our experimental results show that our framework outperforms comparative baselines.", "keyphrases": ["background knowledge-", "sentence generation", "cited paper"]} +{"id": "ebert-etal-2015-cis", "title": "CIS-positive: A Combination of Convolutional Neural Networks and Support Vector Machines for Sentiment Analysis in Twitter", "abstract": "This paper describes our automatic sentiment analysis system \u2013 CIS-positive \u2013 for SemEval 2015 Task 10 \u201cSentiment Analysis in Twitter\u201d, subtask B \u201cMessage Polarity Classification\u201d. In this system, we propose to normalize the Twitter data in a way that maximizes the coverage of sentiment lexicons and minimizes distracting elements. Furthermore, we integrate the output of Convolutional Neural Networks into Support Vector Machines for the polarity classification. Our system achieves a macro F1 score of the positive and negative class of 59.57 on the SemEval 2015 test data.", "keyphrases": ["convolutional neural networks", "sentiment analysis", "twitter"]} +{"id": "abdul-mageed-ungar-2017-emonet", "title": "EmoNet: Fine-Grained Emotion Detection with Gated Recurrent Neural Networks", "abstract": "Accurate detection of emotion from natural language has applications ranging from building emotional chatbots to better understanding individuals and their lives. However, progress on emotion detection has been hampered by the absence of large labeled datasets. In this work, we build a very large dataset for fine-grained emotions and develop deep learning models on it. We achieve a new state-of-the-art on 24 fine-grained types of emotions (with an average accuracy of 87.58%). We also extend the task beyond emotion types to model Robert Plutick's 8 primary emotion dimensions, acquiring a superior accuracy of 95.68%.", "keyphrases": ["emotion", "deep learning", "distant supervision", "hashtag", "writer"]} +{"id": "berant-etal-2014-modeling", "title": "Modeling Biological Processes for Reading Comprehension", "abstract": "Machine reading calls for programs that read and understand text, but most current work only attempts to extract facts from redundant web-scale corpora. In this paper, we focus on a new reading comprehension task that requires complex reasoning over a single document. The input is a paragraph describing a biological process, and the goal is to answer questions that require an understanding of the relations between entities and events in the process. To answer the questions, we first predict a rich structure representing the process in the paragraph. Then, we map the question to a formal query, which is executed against the predicted structure. We demonstrate that answering questions via predicted structures substantially improves accuracy over baselines that use shallower representations.", "keyphrases": ["biological process", "reading comprehension task", "reasoning", "event argument", "processbank dataset"]} +{"id": "yu-etal-2019-gumdrop", "title": "GumDrop at the DISRPT2019 Shared Task: A Model Stacking Approach to Discourse Unit Segmentation and Connective Detection", "abstract": "In this paper we present GumDrop, Georgetown University's entry at the DISRPT 2019 Shared Task on automatic discourse unit segmentation and connective detection. Our approach relies on model stacking, creating a heterogeneous ensemble of classifiers, which feed into a metalearner for each final task. The system encompasses three trainable component stacks: one for sentence splitting, one for discourse unit segmentation and one for connective detection. The flexibility of each ensemble allows the system to generalize well to datasets of different sizes and with varying levels of homogeneity.", "keyphrases": ["discourse unit segmentation", "connective detection", "gumdrop"]} +{"id": "sujana-etal-2020-rumor", "title": "Rumor Detection on Twitter Using Multiloss Hierarchical BiLSTM with an Attenuation Factor", "abstract": "Social media platforms such as Twitter have become a breeding ground for unverified information or rumors. These rumors can threaten people's health, endanger the economy, and affect the stability of a country. Many researchers have developed models to classify rumors using traditional machine learning or vanilla deep learning models. However, previous studies on rumor detection have achieved low precision and are time consuming. Inspired by the hierarchical model and multitask learning, a multiloss hierarchical BiLSTM model with an attenuation factor is proposed in this paper. The model is divided into two BiLSTM modules: post level and event level. By means of this hierarchical structure, the model can extract deep information from limited quantities of text. Each module has a loss function that helps to learn bilateral features and reduce the training time. An attenuation factor is added at the post level to increase the accuracy. The results on two rumor datasets demonstrate that our model achieves better performance than that of state-of-the-art machine learning and vanilla deep learning models.", "keyphrases": ["twitter", "bilstm model", "rumor detection"]} +{"id": "krahmer-etal-2003-graph", "title": "Graph-Based Generation of Referring Expressions", "abstract": "This article describes a new approach to the generation of referring expressions. We propose to formalize a scene (consisting of a set of objects with various properties and relations) as a labeled directed graph and describe content selection (which properties to include in a referring expression) as a subgraph construction problem. Cost functions are used to guide the search process and to give preference to some solutions over others. The current approach has four main advantages: (1) Graph structures have been studied extensively, and by moving to a graph perspective we get direct access to the many theories and algorithms for dealing with graphs; (2) many existing generation algorithms can be reformulated in terms of graphs, and this enhances comparison and integration of the various approaches; (3) the graph perspective allows us to solve a number of problems that have plagued earlier algorithms for the generation of referring expressions; and (4) the combined use of graphs and cost functions paves the way for an integration of rule-based generation techniques with more recent stochastic approaches.", "keyphrases": ["referring expression", "natural language generation", "graph-based approach"]} +{"id": "benamara-saint-dizier-2003-dynamic", "title": "Dynamic Generation of Cooperative Natural Language Responses in WEBCOOP", "abstract": "We present in this paper a formal approach for the dynamic generation of cooperative NL responses in WEBCOOP, a system that provides intelligent responses in French to natural language queries on the Web. The system integrates reasoning procedures and NLG techniques paired with hypertext links. Content determination is organized in two steps: providing explanations that report user misconceptions and then offering flexible solutions that reflect the cooperative know howof the system.", "keyphrases": ["natural language response", "webcoop", "dynamic generation", "cooperative question"]} +{"id": "chen-etal-2018-attacking", "title": "Attacking Visual Language Grounding with Adversarial Examples: A Case Study on Neural Image Captioning", "abstract": "Visual language grounding is widely studied in modern neural image captioning systems, which typically adopts an encoder-decoder framework consisting of two principal components: a convolutional neural network (CNN) for image feature extraction and a recurrent neural network (RNN) for language caption generation. To study the robustness of language grounding to adversarial perturbations in machine vision and perception, we propose Show-and-Fool, a novel algorithm for crafting adversarial examples in neural image captioning. The proposed algorithm provides two evaluation approaches, which check if we can mislead neural image captioning systems to output some randomly chosen captions or keywords. Our extensive experiments show that our algorithm can successfully craft visually-similar adversarial examples with randomly targeted captions or keywords, and the adversarial examples can be made highly transferable to other image captioning systems. Consequently, our approach leads to new robustness implications of neural image captioning and novel insights in visual language grounding.", "keyphrases": ["visual language grounding", "adversarial example", "neural image captioning"]} +{"id": "hasan-ng-2010-conundrums", "title": "Conundrums in Unsupervised Keyphrase Extraction: Making Sense of the State-of-the-Art", "abstract": "State-of-the-art approaches for unsupervised keyphrase extraction are typically evaluated on a single dataset with a single parameter setting. Consequently, it is unclear how effective these approaches are on a new dataset from a different domain, and how sensitive they are to changes in parameter settings. To gain a better understanding of state-of-the-art unsupervised keyphrase extraction algorithms, we conduct a systematic evaluation and analysis of these algorithms on a variety of standard evaluation datasets.", "keyphrases": ["unsupervised keyphrase extraction", "tf-idf", "ranking"]} +{"id": "mostafazadeh-etal-2016-caters", "title": "CaTeRS: Causal and Temporal Relation Scheme for Semantic Annotation of Event Structures", "abstract": "Learning commonsense causal and temporal relation between events is one of the major steps towards deeper language understanding. This is even more crucial for understanding stories and script learning. A prerequisite for learning scripts is a semantic framework which enables capturing rich event structures. In this paper we introduce a novel semantic annotation framework, called Causal and Temporal Relation Scheme (CaTeRS), which is unique in simultaneously capturing a comprehensive set of temporal and causal relations between events. By annotating a total of 1,600 sentences in the context of 320 five-sentence short stories sampled from ROCStories corpus, we demonstrate that these stories are indeed full of causal and temporal relations. Furthermore, we show that the CaTeRS annotation scheme enables high inter-annotator agreement for broad-coverage event entity annotation and moderate agreement on semantic link annotation.", "keyphrases": ["causal", "temporal relation scheme", "caters annotation scheme", "commonsense reasoning standpoint"]} +{"id": "wang-etal-2020-combining", "title": "Combining Self-Training and Self-Supervised Learning for Unsupervised Disfluency Detection", "abstract": "Most existing approaches to disfluency detection heavily rely on human-annotated corpora, which is expensive to obtain in practice. There have been several proposals to alleviate this issue with, for instance, self-supervised learning techniques, but they still require human-annotated corpora. In this work, we explore the unsupervised learning paradigm which can potentially work with unlabeled text corpora that are cheaper and easier to obtain. Our model builds upon the recent work on Noisy Student Training, a semi-supervised learning approach that extends the idea of self-training. Experimental results on the commonly used English Switchboard test set show that our approach achieves competitive performance compared to the previous state-of-the-art supervised systems using contextualized word embeddings (e.g. BERT and ELECTRA).", "keyphrases": ["self-training", "disfluency detection", "pseudo label", "teacher"]} +{"id": "dusek-jurcicek-2016-sequence", "title": "Sequence-to-Sequence Generation for Spoken Dialogue via Deep Syntax Trees and Strings", "abstract": "We present a natural language generator based on the sequence-to-sequence approach that can be trained to produce natural language strings as well as deep syntax dependency trees from input dialogue acts, and we use it to directly compare two-step generation with separate sentence planning and surface realization stages to a joint, one-step approach. We were able to train both setups successfully using very little training data. The joint setup offers better performance, surpassing state-of-the-art with regards to n-gram-based scores while providing more relevant outputs.", "keyphrases": ["language generator", "sentence planning", "encoder-decoder model"]} +{"id": "badaro-etal-2018-ema", "title": "EMA at SemEval-2018 Task 1: Emotion Mining for Arabic", "abstract": "While significant progress has been achieved for Opinion Mining in Arabic (OMA), very limited efforts have been put towards the task of Emotion mining in Arabic. In fact, businesses are interested in learning a fine-grained representation of how users are feeling towards their products or services. In this work, we describe the methods used by the team Emotion Mining in Arabic (EMA), as part of the SemEval-2018 Task 1 for Affect Mining for Arabic tweets. EMA participated in all 5 subtasks. For the five tasks, several preprocessing steps were evaluated and eventually the best system included diacritics removal, elongation adjustment, replacement of emojis by the corresponding Arabic word, character normalization and light stemming. Moreover, several features were evaluated along with different classification and regression techniques. For the 5 subtasks, word embeddings feature turned out to perform best along with Ensemble technique. EMA achieved the 1st place in subtask 5, and 3rd place in subtasks 1 and 3.", "keyphrases": ["semeval-2018 task", "emotion mining", "arabic"]} +{"id": "yang-etal-2020-ted", "title": "TED: A Pretrained Unsupervised Summarization Model with Theme Modeling and Denoising", "abstract": "Text summarization aims to extract essential information from a piece of text and transform the text into a concise version. Existing unsupervised abstractive summarization models leverage recurrent neural networks framework while the recently proposed transformer exhibits much more capability. Moreover, most of previous summarization models ignore abundant unlabeled corpora resources available for pretraining. In order to address these issues, we propose TED, a transformer-based unsupervised abstractive summarization system with pretraining on large-scale data. We first leverage the lead bias in news articles to pretrain the model on millions of unlabeled corpora. Next, we finetune TED on target domains through theme modeling and a denoising autoencoder to enhance the quality of generated summaries. Notably, TED outperforms all unsupervised abstractive baselines on NYT, CNN/DM and English Gigaword datasets with various document styles. Further analysis shows that the summaries generated by TED are highly abstractive, and each component in the objective function of TED is highly effective.", "keyphrases": ["theme modeling", "news article", "ted"]} +{"id": "voita-etal-2019-bottom", "title": "The Bottom-up Evolution of Representations in the Transformer: A Study with Machine Translation and Language Modeling Objectives", "abstract": "We seek to understand how the representations of individual tokens and the structure of the learned feature space evolve between layers in deep neural networks under different learning objectives. We chose the Transformers for our analysis as they have been shown effective with various tasks, including machine translation (MT), standard left-to-right language models (LM) and masked language modeling (MLM). Previous work used black-box probing tasks to show that the representations learned by the Transformer differ significantly depending on the objective. In this work, we use canonical correlation analysis and mutual information estimators to study how information flows across Transformer layers and observe that the choice of the objective determines this process. For example, as you go from bottom to top layers, information about the past in left-to-right language models gets vanished and predictions about the future get formed. In contrast, for MLM, representations initially acquire information about the context around the token, partially forgetting the token identity and producing a more generalized token representation. The token identity then gets recreated at the top MLM layers.", "keyphrases": ["evolution", "machine translation", "information flow", "transformer layer", "gpt-2"]} +{"id": "akbik-etal-2019-pooled", "title": "Pooled Contextualized Embeddings for Named Entity Recognition", "abstract": "Contextual string embeddings are a recent type of contextualized word embedding that were shown to yield state-of-the-art results when utilized in a range of sequence labeling tasks. They are based on character-level language models which treat text as distributions over characters and are capable of generating embeddings for any string of characters within any textual context. However, such purely character-based approaches struggle to produce meaningful embeddings if a rare string is used in a underspecified context. To address this drawback, we propose a method in which we dynamically aggregate contextualized embeddings of each unique string that we encounter. We then use a pooling operation to distill a \u201dglobal\u201d word representation from all contextualized instances. We evaluate these \u201dpooled contextualized embeddings\u201d on common named entity recognition (NER) tasks such as CoNLL-03 and WNUT and show that our approach significantly improves the state-of-the-art for NER. We make all code and pre-trained models available to the research community for use and reproduction.", "keyphrases": ["entity recognition", "language model", "underspecified context", "pre-trained model"]} +{"id": "hopkins-kiela-2017-automatically", "title": "Automatically Generating Rhythmic Verse with Neural Networks", "abstract": "We propose two novel methodologies for the automatic generation of rhythmic poetry in a variety of forms. The first approach uses a neural language model trained on a phonetic encoding to learn an implicit representation of both the form and content of English poetry. This model can effectively learn common poetic devices such as rhyme, rhythm and alliteration. The second approach considers poetry generation as a constraint satisfaction problem where a generative neural language model is tasked with learning a representation of content, and a discriminative weighted finite state machine constrains it on the basis of form. By manipulating the constraints of the latter model, we can generate coherent poetry with arbitrary forms and themes. A large-scale extrinsic evaluation demonstrated that participants consider machine-generated poems to be written by humans 54% of the time. In addition, participants rated a machine-generated poem to be the best amongst all evaluated.", "keyphrases": ["language model", "rhythm", "poetry generation"]} +{"id": "rastogi-etal-2016-weighting", "title": "Weighting Finite-State Transductions With Neural Context", "abstract": "How should one apply deep learning to tasks such as morphological rein\ufb02ection, which stochastically edit one string to get another? A recent approach to such sequence-to-sequence tasks is to compress the input string into a vector that is then used to generate the output string, using recurrent neural networks. In contrast, we propose to keep the traditional architecture, which uses a \ufb01nite-state transducer to score all possible output strings , but to augment the scoring function with the help of recurrent networks. A stack of bidirectional LSTMs reads the input string from left-to-right and right-to-left, in order to summarize the input context in which a transducer arc is applied. We combine these learned features with the transducer to de\ufb01ne a probability distribution over aligned output strings, in the form of a weighted \ufb01nite-state automaton. This reduces hand-engineering of features, allows learned features to examine unbounded context in the input string, and still permits exact inference through dynamic programming. We illustrate our method on the tasks of morphological rein\ufb02ection and lemmatization.", "keyphrases": ["morphology", "transducer", "possible output string", "bidirectional lstm", "finite-state transducer"]} +{"id": "barreto-etal-2006-open", "title": "Open Resources and Tools for the Shallow Processing of Portuguese: The TagShare Project", "abstract": "This paper presents the TagShare project and the linguistic resources and tools for the shallow processing of Portuguese developed in its scope. These resources include a 1 million token corpus that has been accurately hand annotated with a variety of linguistic information, as well as several state of the art shallow processing tools capable of automatically producing that type of annotation. At present, the linguistic annotations in the corpus are sentence and paragraph boundaries, token boundaries, morphosyntactic POS categories, values of inflection features, lemmas and namedentities. Hence, the set of tools comprise a sentence chunker, a tokenizer, a POS tagger, nominal and verbal analyzers and lemmatizers, a verbal conjugator, a nominal \u0093inflector\u0094, and a namedentity recognizer, some of which underline several online services.", "keyphrases": ["shallow processing", "portuguese", "tagshare project"]} +{"id": "louis-nenkova-2013-automatically", "title": "Automatically Assessing Machine Summary Content Without a Gold Standard", "abstract": "The most widely adopted approaches for evaluation of summary content follow some protocol for comparing a summary with gold-standard human summaries, which are traditionally called model summaries. This evaluation paradigm falls short when human summaries are not available and becomes less accurate when only a single model is available. We propose three novel evaluation techniques. Two of them are model-free and do not rely on a gold standard for the assessment. The third technique improves standard automatic evaluations by expanding the set of available model summaries with chosen system summaries.We show that quantifying the similarity between the source text and its summary with appropriately chosen measures produces summary scores which replicate human assessments accurately. We also explore ways of increasing evaluation quality when only one human model summary is available as a gold standard. We introduce pseudomodels, which are system summaries deemed to contain good content according to automatic evaluation. Combining the pseudomodels with the single human model to form the gold-standard leads to higher correlations with human judgments compared to using only the one available model. Finally, we explore the feasibility of another measure\u2014similarity between a system summary and the pool of all other system summaries for the same input. This method of comparison with the consensus of systems produces impressively accurate rankings of system summaries, achieving correlation with human rankings above 0.9.", "keyphrases": ["gold standard", "model summary", "pyramid"]} +{"id": "rubino-etal-2015-abu", "title": "Abu-MaTran at WMT 2015 Translation Task: Morphological Segmentation and Web Crawling", "abstract": "This paper presents the machine translation systems submitted by the Abu-MaTran project for the Finnish\u2010English language pair at the WMT 2015 translation task. We tackle the lack of resources and complex morphology of the Finnish language by (i) crawling parallel and monolingual data from the Web and (ii) applying rule-based and unsupervised methods for morphological segmentation. Several statistical machine translation approaches are evaluated and then combined to obtain our final submissions, which are the top performing English-to-Finnish unconstrained (all automatic metrics) and constrained (BLEU), and Finnish-to-English constrained (TER) systems.", "keyphrases": ["wmt", "translation task", "morphological segmentation"]} +{"id": "wu-etal-2018-neural", "title": "Neural Metaphor Detecting with CNN-LSTM Model", "abstract": "Metaphors are figurative languages widely used in daily life and literatures. It's an important task to detect the metaphors evoked by texts. Thus, the metaphor shared task is aimed to extract metaphors from plain texts at word level. We propose to use a CNN-LSTM model for this task. Our model combines CNN and LSTM layers to utilize both local and long-range contextual information for identifying metaphorical information. In addition, we compare the performance of the softmax classifier and conditional random field (CRF) for sequential labeling in this task. We also incorporated some additional features such as part of speech (POS) tags and word cluster to improve the performance of model. Our best model achieved 65.06% F-score in the all POS testing subtask and 67.15% in the verbs testing subtask.", "keyphrases": ["metaphor", "cnn-lstm model", "word2vec"]} +{"id": "cheng-etal-2018-towards", "title": "Towards Robust Neural Machine Translation", "abstract": "Small perturbations in the input can severely distort intermediate representations and thus impact translation quality of neural machine translation (NMT) models. In this paper, we propose to improve the robustness of NMT models with adversarial stability training. The basic idea is to make both the encoder and decoder in NMT models robust against input perturbations by enabling them to behave similarly for the original input and its perturbed counterpart. Experimental results on Chinese-English, English-German and English-French translation tasks show that our approaches can not only achieve significant improvements over strong NMT systems but also improve the robustness of NMT models.", "keyphrases": ["neural machine translation", "adversarial stability training", "noise", "feature level"]} +{"id": "touileb-etal-2020-gender", "title": "Gender and sentiment, critics and authors: a dataset of Norwegian book reviews", "abstract": "Gender bias in models and datasets is widely studied in NLP. The focus has usually been on analysing how females and males express themselves, or how females and males are described. However, a less studied aspect is the combination of these two perspectives, how female and male describe the same or opposite gender. In this paper, we present a new gender annotated sentiment dataset of critics reviewing the works of female and male authors. We investigate if this newly annotated dataset contains differences in how the works of male and female authors are critiqued, in particular in terms of positive and negative sentiment. We also explore the differences in how this is done by male and female critics. We show that there are differences in how critics assess the works of authors of the same or opposite gender. For example, male critics rate crime novels written by females, and romantic and sentimental works written by males, more negatively.", "keyphrases": ["critic", "norwegian book review", "gender"]} +{"id": "al-onaizan-papineni-2006-distortion", "title": "Distortion Models for Statistical Machine Translation", "abstract": "In this paper, we argue that n-gram language models are not sufficient to address word reordering required for Machine Translation. We propose a new distortion model that can be used with existing phrase-based SMT decoders to address those n-gram language model limitations. We present empirical results in Arabic to English Machine Translation that show statistically significant improvements when our proposed model is used. We also propose a novel metric to measure word order similarity (or difference) between any pair of languages based on word alignments.", "keyphrases": ["distortion model", "scope", "deterministic choice"]} +{"id": "ma-etal-2022-template", "title": "Template-free Prompt Tuning for Few-shot NER", "abstract": "Prompt-based methods have been successfully applied in sentence-level few-shot learning tasks, mostly owing to the sophisticated design of templates and label words. However, when applied to token-level labeling tasks such as NER, it would be time-consuming to enumerate the template queries over all potential entity spans. In this work, we propose a more elegant method to reformulate NER tasks as LM problems without any templates. Specifically, we discard the template construction process while maintaining the word prediction paradigm of pre-training models to predict a class-related pivot word (or label word) at the entity position. Meanwhile, we also explore principled ways to automatically search for appropriate label words that the pre-trained models can easily adapt to. While avoiding the complicated template-based process, the proposed LM objective also reduces the gap between different objectives used in pre-training and fine-tuning, thus it can better benefit the few-shot performance. Experimental results demonstrate the effectiveness of the proposed method over bert-tagger and template-based method under few-shot settings. Moreover, the decoding speed of the proposed method is up to 1930.12 times faster than the template-based method.", "keyphrases": ["prompt tuning", "few-shot setting", "low-resource ner"]} +{"id": "williams-koehn-2012-ghkm", "title": "GHKM Rule Extraction and Scope-3 Parsing in Moses", "abstract": "We developed a string-to-tree system for English--German, achieving competitive results against a hierarchical model baseline. We provide details of our implementation of GHKM rule extraction and scope-3 parsing in the Moses toolkit. We compare systems trained on the same data using different grammar extraction methods.", "keyphrases": ["scope-3", "moses", "ghkm rule extraction"]} +{"id": "zwarts-etal-2010-detecting", "title": "Detecting Speech Repairs Incrementally Using a Noisy Channel Approach", "abstract": "Unrehearsed spoken language often contains disfluencies. In order to correctly interpret a spoken utterance, any such disfluencies must be identified and removed or otherwise dealt with. Operating on transcripts of speech which contain disfluencies, our particular focus here is the identification and correction of speech repairs using a noisy channel model. Our aim is to develop a high-accuracy mechanism that can identify speech repairs in an incremental fashion, as the utterance is processed word-by-word. \n \nWe also address the issue of the evaluation of such incremental systems. We propose a novel approach to evaluation, which evaluates performance in detecting and correcting disfluencies incrementally, rather than only assessing performance once the processing of an utterance is complete. This demonstrates some shortcomings in our basic incremental model, and so we then demonstrate a technique that improves performance on the detection of disfluencies as they happen.", "keyphrases": ["repair", "noisy channel model", "detection"]} +{"id": "wang-etal-2007-chinese", "title": "Chinese Syntactic Reordering for Statistical Machine Translation", "abstract": "Syntactic reordering approaches are an effective method for handling word-order differences between source and target languages in statistical machine translation (SMT) systems. This paper introduces a reordering approach for translation from Chinese to English. We describe a set of syntactic reordering rules that exploit systematic differences between Chinese and English word order. The resulting system is used as a preprocessor for both training and test sentences, transforming Chinese sentences to be much closer to English in terms of their word order. We evaluated the reordering approach within the MOSES phrase-based SMT system (Koehn et al., 2007). The reordering approach improved the BLEU score for the MOSES system from 28.52 to 30.86 on the NIST 2006 evaluation data. We also conducted a series of experiments to analyze the accuracy and impact of different types of reordering rules.", "keyphrases": ["statistical machine translation", "clause restructuring", "structural difference", "preprocessing step", "localizer phrase"]} +{"id": "tsarfaty-etal-2010-statistical", "title": "Statistical Parsing of Morphologically Rich Languages (SPMRL) What, How and Whither", "abstract": "The term Morphologically Rich Languages (MRLs) refers to languages in which significant information concerning syntactic units and relations is expressed at word-level. There is ample evidence that the application of readily available statistical parsing models to such languages is susceptible to serious performance degradation. The first workshop on statistical parsing of MRLs hosts a variety of contributions which show that despite language-specific idiosyncrasies, the problems associated with parsing MRLs cut across languages and parsing frameworks. In this paper we review the current state-of-affairs with respect to parsing MRLs and point out central challenges. We synthesize the contributions of researchers working on parsing Arabic, Basque, French, German, Hebrew, Hindi and Korean to point out shared solutions across languages. The overarching analysis suggests itself as a source of directions for future investigations.", "keyphrases": ["morphologically rich languages", "parsing model", "characteristic", "constituency parser"]} +{"id": "dibia-2020-neuralqa", "title": "NeuralQA: A Usable Library for Question Answering (Contextual Query Expansion + BERT) on Large Datasets", "abstract": "Existing tools for Question Answering (QA) have challenges that limit their use in practice. They can be complex to set up or integrate with existing infrastructure, do not offer configurable interactive interfaces, and do not cover the full set of subtasks that frequently comprise the QA pipeline (query expansion, retrieval, reading, and explanation/sensemaking). To help address these issues, we introduce NeuralQA - a usable library for QA on large datasets. NeuralQA integrates well with existing infrastructure (e.g., ElasticSearch instances and reader models trained with the HuggingFace Transformers API) and offers helpful defaults for QA subtasks. It introduces and implements contextual query expansion (CQE) using a masked language model (MLM) as well as relevant snippets (RelSnip) - a method for condensing large documents into smaller passages that can be speedily processed by a document reader model. Finally, it offers a flexible user interface to support workflows for research explorations (e.g., visualization of gradient-based explanations to support qualitative inspection of model behaviour) and large scale search deployment. Code and documentation for NeuralQA is available as open source on Github.", "keyphrases": ["question answering", "large dataset", "neuralqa"]} +{"id": "kovatchev-etal-2020-decomposing", "title": "Decomposing and Comparing Meaning Relations: Paraphrasing, Textual Entailment, Contradiction, and Specificity", "abstract": "In this paper, we present a methodology for decomposing and comparing multiple meaning relations (paraphrasing, textual entailment, contradiction, and specificity). The methodology includes SHARel - a new typology that consists of 26 linguistic and 8 reason-based categories. We use the typology to annotate a corpus of 520 sentence pairs in English and we demonstrate that unlike previous typologies, SHARel can be applied to all relations of interest with a high inter-annotator agreement. We analyze and compare the frequency and distribution of the linguistic and reason-based phenomena involved in paraphrasing, textual entailment, contradiction, and specificity. This comparison allows for a much more in-depth analysis of the workings of the individual relations and the way they interact and compare with each other. We release all resources (typology, annotation guidelines, and annotated corpus) to the community.", "keyphrases": ["meaning relation", "contradiction", "specificity"]} +{"id": "liu-etal-2009-weighted", "title": "Weighted Alignment Matrices for Statistical Machine Translation", "abstract": "Current statistical machine translation systems usually extract rules from bilingual corpora annotated with 1-best alignments. They are prone to learn noisy rules due to alignment mistakes. We propose a new structure called weighted alignment matrix to encode all possible alignments for a parallel text compactly. The key idea is to assign a probability to each word pair to indicate how well they are aligned. We design new algorithms for extracting phrase pairs from weighted alignment matrices and estimating their probabilities. Our experiments on multiple language pairs show that using weighted matrices achieves consistent improvements over using n-best lists in significant less extraction time.", "keyphrases": ["alignment matrix", "n-best list", "parallel sentence", "posterior probability"]} +{"id": "llorens-etal-2010-tipsem", "title": "TIPSem (English and Spanish): Evaluating CRFs and Semantic Roles in TempEval-2", "abstract": "This paper presents TIPSem, a system to extract temporal information from natural language texts for English and Spanish. TIPSem, learns CRF models from training data. Although the used features include different language analysis levels, the approach is focused on semantic information. For Spanish, TIPSem achieved the best F1 score in all the tasks. For English, it obtained the best F1 in tasks B (events) and D (event-dct links); and was among the best systems in the rest.", "keyphrases": ["spanish", "tempeval-2", "semantic information"]} +{"id": "petukhova-etal-2014-interoperability", "title": "Interoperability of Dialogue Corpora through ISO 24617-2-based Querying", "abstract": "This paper explores a way of achieving interoperability: developing a query format for accessing existing annotated corpora whose expressions make use of the annotation language defined by the standard. The interpretation of expressions in the query implements a mapping from ISO 24617-2 concepts to those of the annotation scheme used in the corpus. We discuss two possible ways to query existing annotated corpora using DiAML. One way is to transform corpora into DiAML compliant format, and subsequently query these data using XQuery or XPath. The second approach is to define a DiAML query that can be directly used to retrieve requested information from the annotated data. Both approaches are valid. The first one presents a standard way of querying XML data. The second approach is a DiAML-oriented querying of dialogue act annotated data, for which we designed an interface. The proposed approach is tested on two important types of existing dialogue corpora: spoken two-person dialogue corpora collected and annotated within the HCRC Map Task paradigm, and multiparty face-to-face dialogues of the AMI corpus. We present the results and evaluate them with respect to accuracy and completeness through statistical comparisons between retrieved and manually constructed reference annotations.", "keyphrases": ["dialogue corpora", "querying", "interoperability"]} +{"id": "kallmeyer-maier-2010-data", "title": "Data-Driven Parsing with Probabilistic Linear Context-Free Rewriting Systems", "abstract": "This paper presents the first efficient implementation of a weighted deductive CYK parser for Probabilistic Linear Context-Free Rewriting Systems (PLCFRSs). LCFRS, an extension of CFG, can describe discontinuities in a straightforward way and is therefore a natural candidate to be used for data-driven parsing. To speed up parsing, we use different context-summary estimates of parse items, some of them allowing for A* parsing. We evaluate our parser with grammars extracted from the German NeGra treebank. Our experiments show that data-driven LCFRS parsing is feasible and yields output of competitive quality.", "keyphrases": ["lcfrs", "constituent", "data-driven parsing"]} +{"id": "zhang-komachi-2018-neural", "title": "Neural Machine Translation of Logographic Language Using Sub-character Level Information", "abstract": "Recent neural machine translation (NMT) systems have been greatly improved by encoder-decoder models with attention mechanisms and sub-word units. However, important differences between languages with logographic and alphabetic writing systems have long been overlooked. This study focuses on these differences and uses a simple approach to improve the performance of NMT systems utilizing decomposed sub-character level information for logographic languages. Our results indicate that our approach not only improves the translation capabilities of NMT systems between Chinese and English, but also further improves NMT systems between Chinese and Japanese, because it utilizes the shared information brought by similar sub-character units.", "keyphrases": ["logographic language", "sub-character level information", "neural machine translation"]} +{"id": "lopes-etal-2019-unbabels", "title": "Unbabel's Submission to the WMT2019 APE Shared Task: BERT-Based Encoder-Decoder for Automatic Post-Editing", "abstract": "This paper describes Unbabel's submission to the WMT2019 APE Shared Task for the English-German language pair. Following the recent rise of large, powerful, pre-trained models, we adapt the BERT pretrained model to perform Automatic Post-Editing in an encoder-decoder framework. Analogously to dual-encoder architectures we develop a BERT-based encoder-decoder (BED) model in which a single pretrained BERT encoder receives both the source src and machine translation mt strings. Furthermore, we explore a conservativeness factor to constrain the APE system to perform fewer edits. As the official results show, when trained on a weighted combination of in-domain and artificial training data, our BED system with the conservativeness penalty improves significantly the translations of a strong NMT system by -0.78 and +1.23 in terms of TER and BLEU, respectively. Finally, our submission achieves a new state-of-the-art, ex-aequo, in English-German APE of NMT.", "keyphrases": ["submission", "encoder-decoder", "automatic post-editing"]} +{"id": "grossman-etal-2020-segbo", "title": "SegBo: A Database of Borrowed Sounds in the World's Languages", "abstract": "Phonological segment borrowing is a process through which languages acquire new contrastive speech sounds as the result of borrowing new words from other languages. Despite the fact that phonological segment borrowing is documented in many of the world's languages, to date there has been no large-scale quantitative study of the phenomenon. In this paper, we present SegBo, a novel cross-linguistic database of borrowed phonological segments. We describe our data aggregation pipeline and the resulting language sample. We also present two short case studies based on the database. The first deals with the impact of large colonial languages on the sound systems of the world's languages; the second deals with universals of borrowing in the domain of rhotic consonants.", "keyphrases": ["database", "world", "phonological segment", "cross-linguistic database", "segbo"]} +{"id": "gupta-etal-2020-infotabs", "title": "INFOTABS: Inference on Tables as Semi-structured Data", "abstract": "In this paper, we observe that semi-structured tabulated text is ubiquitous; understanding them requires not only comprehending the meaning of text fragments, but also implicit relationships between them. We argue that such data can prove as a testing ground for understanding how we reason about information. To study this, we introduce a new dataset called INFOTABS, comprising of human-written textual hypotheses based on premises that are tables extracted from Wikipedia info-boxes. Our analysis shows that the semi-structured, multi-domain and heterogeneous nature of the premises admits complex, multi-faceted reasoning. Experiments reveal that, while human annotators agree on the relationships between a table-hypothesis pair, several standard modeling strategies are unsuccessful at the task, suggesting that reasoning about tables can pose a difficult modeling challenge.", "keyphrases": ["table", "infotabs", "natural language inference", "claim"]} +{"id": "faruqui-etal-2018-wikiatomicedits", "title": "WikiAtomicEdits: A Multilingual Corpus of Wikipedia Edits for Modeling Language and Discourse", "abstract": "We release a corpus of 43 million atomic edits across 8 languages. These edits are mined from Wikipedia edit history and consist of instances in which a human editor has inserted a single contiguous phrase into, or deleted a single contiguous phrase from, an existing sentence. We use the collected data to show that the language generated during editing differs from the language that we observe in standard corpora, and that models trained on edits encode different aspects of semantics and discourse than models trained on raw text. We release the full corpus as a resource to aid ongoing research in semantics, discourse, and representation learning.", "keyphrases": ["discourse", "atomic edit", "wikiatomicedit", "sentence-to-sentence generation task"]} +{"id": "han-etal-2019-micron", "title": "MICRON: Multigranular Interaction for Contextualizing RepresentatiON in Non-factoid Question Answering", "abstract": "This paper studies the problem of non-factoid question answering, where the answer may span over multiple sentences. Existing solutions can be categorized into representation- and interaction-focused approaches. We combine their complementary strength, by a hybrid approach allowing multi-granular interactions, but represented at word level, enabling an easy integration with strong word-level signals. Specifically, we propose MICRON: Multigranular Interaction for Contextualizing RepresentatiON, a novel approach which derives contextualized uni-gram representation from n-grams. Our contributions are as follows: First, we enable multi-granular matches between question and answer n-grams. Second, by contextualizing word representation with surrounding n-grams, MICRON can naturally utilize word-based signals for query term weighting, known to be effective in information retrieval. We validate MICRON in two public non-factoid question answering datasets: WikiPassageQA and InsuranceQA, showing our model achieves the state of the art among baselines with reported performances on both datasets.", "keyphrases": ["multigranular interaction", "contextualizing representation", "non-factoid question answering"]} +{"id": "hessel-schofield-2021-effective", "title": "How effective is BERT without word ordering? Implications for language understanding and data privacy", "abstract": "Ordered word sequences contain the rich structures that define language. However, it's often not clear if or how modern pretrained language models utilize these structures. We show that the token representations and self-attention activations within BERT are surprisingly resilient to shuffling the order of input tokens, and that for several GLUE language understanding tasks, shuffling only minimally degrades performance, e.g., by 4% for QNLI. While bleak from the perspective of language understanding, our results have positive implications for cases where copyright or ethics necessitates the consideration of bag-of-words data (vs. full documents). We simulate such a scenario for three sensitive classification tasks, demonstrating minimal performance degradation vs. releasing full language sequences.", "keyphrases": ["bert", "implication", "language understanding"]} +{"id": "zhou-zhao-2019-head", "title": "Head-Driven Phrase Structure Grammar Parsing on Penn Treebank", "abstract": "Head-driven phrase structure grammar (HPSG) enjoys a uniform formalism representing rich contextual syntactic and even semantic meanings. This paper makes the first attempt to formulate a simplified HPSG by integrating constituent and dependency formal representations into head-driven phrase structure. Then two parsing algorithms are respectively proposed for two converted tree representations, division span and joint span. As HPSG encodes both constituent and dependency structure information, the proposed HPSG parsers may be regarded as a sort of joint decoder for both types of structures and thus are evaluated in terms of extracted or converted constituent and dependency parsing trees. Our parser achieves new state-of-the-art performance for both parsing tasks on Penn Treebank (PTB) and Chinese Penn Treebank, verifying the effectiveness of joint learning constituent and dependency structures. In details, we report 95.84 F1 of constituent parsing and 97.00% UAS of dependency parsing on PTB.", "keyphrases": ["penn treebank", "hpsg", "constituent"]} +{"id": "allen-etal-2014-detecting", "title": "Detecting Disagreement in Conversations using Pseudo-Monologic Rhetorical Structure", "abstract": "Casual online forums such as Reddit, Slashdot and Digg, are continuing to increase in popularity as a means of communication. Detecting disagreement in this domain is a considerable challenge. Many topics are unique to the conversation on the forum, and the appearance of disagreement may be much more subtle than on political blogs or social media sites such as twitter. In this analysis we present a crowd-sourced annotated corpus for topic level disagreement detection in Slashdot, showing that disagreement detection in this domain is difficult even for humans. We then proceed to show that a new set of features determined from the rhetorical structure of the conversation significantly improves the performance on disagreement detection over a baseline consisting of unigram/bigram features, discourse markers, structural features and meta-post features.", "keyphrases": ["disagreement", "conversation", "rhetorical structure"]} +{"id": "banerjee-2019-asu", "title": "ASU at TextGraphs 2019 Shared Task: Explanation ReGeneration using Language Models and Iterative Re-Ranking", "abstract": "In this work we describe the system from Natural Language Processing group at Arizona State University for the TextGraphs 2019 Shared Task. The task focuses on Explanation Regeneration, an intermediate step towards general multi-hop inference on large graphs. Our approach consists of modeling the explanation regeneration task as a learning to rank problem, for which we use state-of-the-art language models and explore dataset preparation techniques. We utilize an iterative reranking based approach to further improve the rankings. Our system secured 2nd rank in the task with a mean average precision (MAP) of 41.3% on the test set.", "keyphrases": ["textgraphs", "shared task", "explanation regeneration"]} +{"id": "zhang-etal-2019-hibert", "title": "HIBERT: Document Level Pre-training of Hierarchical Bidirectional Transformers for Document Summarization", "abstract": "Neural extractive summarization models usually employ a hierarchical encoder for document encoding and they are trained using sentence-level labels, which are created heuristically using rule-based methods. Training the hierarchical encoder with these inaccurate labels is challenging. Inspired by the recent work on pre-training transformer sentence encoders (Devlin et al., 2018), we propose Hibert (as shorthand for HIerachical Bidirectional Encoder Representations from Transformers) for document encoding and a method to pre-train it using unlabeled data. We apply the pre-trained Hibert to our summarization model and it outperforms its randomly initialized counterpart by 1.25 ROUGE on the CNN/Dailymail dataset and by 2.0 ROUGE on a version of New York Times dataset. We also achieve the state-of-the-art performance on these two datasets.", "keyphrases": ["document summarization", "document encoding", "hierarchical transformer encoder", "important sentence"]} +{"id": "strubell-etal-2019-energy", "title": "Energy and Policy Considerations for Deep Learning in NLP", "abstract": "Recent progress in hardware and methodology for training neural networks has ushered in a new generation of large networks trained on abundant data. These models have obtained notable gains in accuracy across many NLP tasks. However, these accuracy improvements depend on the availability of exceptionally large computational resources that necessitate similarly substantial energy consumption. As a result these models are costly to train and develop, both financially, due to the cost of hardware and electricity or cloud compute time, and environmentally, due to the carbon footprint required to fuel modern tensor processing hardware. In this paper we bring this issue to the attention of NLP researchers by quantifying the approximate financial and environmental costs of training a variety of recently successful neural network models for NLP. Based on these findings, we propose actionable recommendations to reduce costs and improve equity in NLP research and practice.", "keyphrases": ["deep learning", "computational resource", "pretraining", "nlp model"]} +{"id": "christodoulopoulos-etal-2010-two", "title": "Two Decades of Unsupervised POS Induction: How Far Have We Come?", "abstract": "Part-of-speech (POS) induction is one of the most popular tasks in research on unsupervised NLP. Many different methods have been proposed, yet comparisons are difficult to make since there is little consensus on evaluation framework, and many papers evaluate against only one or two competitor systems. Here we evaluate seven different POS induction systems spanning nearly 20 years of work, using a variety of measures. We show that some of the oldest (and simplest) systems stand up surprisingly well against more recent approaches. Since most of these systems were developed and tested using data from the WSJ corpus, we compare their generalization abilities by testing on both WSJ and the multilingual Multext-East corpus. Finally, we introduce the idea of evaluating systems based on their ability to produce cluster prototypes that are useful as input to a prototype-driven learner. In most cases, the prototype-driven learner outperforms the unsupervised system used to initialize it, yielding state-of-the-art results on WSJ and improvements on non-English corpora.", "keyphrases": ["pos", "induction", "tagger", "bible corpus"]} +{"id": "zuo-etal-2020-knowdis", "title": "KnowDis: Knowledge Enhanced Data Augmentation for Event Causality Detection via Distant Supervision", "abstract": "Modern models of event causality detection (ECD) are mainly based on supervised learning from small hand-labeled corpora. However, hand-labeled training data is expensive to produce, low coverage of causal expressions, and limited in size, which makes supervised methods hard to detect causal relations between events. To solve this data lacking problem, we investigate a data augmentation framework for ECD, dubbed as Knowledge Enhanced Distant Data Augmentation (KnowDis). Experimental results on two benchmark datasets EventStoryLine corpus and Causal-TimeBank show that 1) KnowDis can augment available training data assisted with the lexical and causal commonsense knowledge for ECD via distant supervision, and 2) our method outperforms previous methods by a large margin assisted with automatically labeled training data.", "keyphrases": ["event causality detection", "distant supervision", "data lacking problem", "knowdis"]} +{"id": "vincze-etal-2014-automatic", "title": "Automatic Error Detection concerning the Definite and Indefinite Conjugation in the HunLearner Corpus", "abstract": "In this paper we present the results of automatic error detection, concerning the definite and indefinite conjugation in the extended version of the HunLearner corpus, the learners\u0092 corpus of the Hungarian language. We present the most typical structures that trigger definite or indefinite conjugation in Hungarian and we also discuss the most frequent types of errors made by language learners in the corpus texts. We also illustrate the error types with sentences taken from the corpus. Our results highlight grammatical structures that might pose problems for learners of Hungarian, which can be fruitfully applied in the teaching and practicing of such constructions from the language teacher\u0092s or learners\u0092 point of view. On the other hand, these results may be exploited in extending the functionalities of a grammar checker, concerning the definiteness of the verb. Our automatic system was able to achieve perfect recall, i.e. it could find all the mismatches between the type of the object and the conjugation of the verb, which is promising for future studies in this area.", "keyphrases": ["indefinite conjugation", "hunlearner corpus", "automatic error detection"]} +{"id": "berant-etal-2011-global", "title": "Global Learning of Typed Entailment Rules", "abstract": "Extensive knowledge bases of entailment rules between predicates are crucial for applied semantic inference. In this paper we propose an algorithm that utilizes transitivity constraints to learn a globally-optimal set of entailment rules for typed predicates. We model the task as a graph learning problem and suggest methods that scale the algorithm to larger graphs. We apply the algorithm over a large data set of extracted predicate instances, from which a resource of typed entailment rules has been recently released (Schoenmackers et al., 2010). Our results show that using global transitivity information substantially improves performance over this resource and several baselines, and that our scaling methods allow us to increase the scope of global learning of entailment-rule graphs.", "keyphrases": ["transitivity", "global learning", "entailment graph", "inference rule"]} +{"id": "joulin-etal-2017-bag", "title": "Bag of Tricks for Efficient Text Classification", "abstract": "This paper explores a simple and efficient baseline for text classification. Our experiments show that our fast text classifier fastText is often on par with deep learning classifiers in terms of accuracy, and many orders of magnitude faster for training and evaluation. We can train fastText on more than one billion words in less than ten minutes using a standard multicore CPU, and classify half a million sentences among 312K classes in less than a minute.", "keyphrases": ["bag", "learning model", "fasttext n-gram model", "tweet text", "document representation"]} +{"id": "clark-etal-2019-dont", "title": "Don't Take the Easy Way Out: Ensemble Based Methods for Avoiding Known Dataset Biases", "abstract": "State-of-the-art models often make use of superficial patterns in the data that do not generalize well to out-of-domain or adversarial settings. For example, textual entailment models often learn that particular key words imply entailment, irrespective of context, and visual question answering models learn to predict prototypical answers, without considering evidence in the image. In this paper, we show that if we have prior knowledge of such biases, we can train a model to be more robust to domain shift. Our method has two stages: we (1) train a naive model that makes predictions exclusively based on dataset biases, and (2) train a robust model as part of an ensemble with the naive one in order to encourage it to focus on other patterns in the data that are more likely to generalize. Experiments on five datasets with out-of-domain test sets show significantly improved robustness in all settings, including a 12 point gain on a changing priors visual question answering dataset and a 9 point gain on an adversarial question answering test set.", "keyphrases": ["ensemble", "adversarial setting", "prior knowledge", "robustness", "vqa model"]} +{"id": "brantley-etal-2020-active", "title": "Active Imitation Learning with Noisy Guidance", "abstract": "Imitation learning algorithms provide state-of-the-art results on many structured prediction tasks by learning near-optimal search policies. Such algorithms assume training-time access to an expert that can provide the optimal action at any queried state; unfortunately, the number of such queries is often prohibitive, frequently rendering these approaches impractical. To combat this query complexity, we consider an active learning setting in which the learning algorithm has additional access to a much cheaper noisy heuristic that provides noisy guidance. Our algorithm, LEAQI, learns a difference classifier that predicts when the expert is likely to disagree with the heuristic, and queries the expert only when necessary. We apply LEAQI to three sequence labelling tasks, demonstrating significantly fewer queries to the expert and comparable (or better) accuracies over a passive approach.", "keyphrases": ["imitation", "noisy guidance", "prediction task"]} +{"id": "muller-etal-2012-constrained", "title": "Constrained Decoding for Text-Level Discourse Parsing", "abstract": "This paper presents a novel approach to document-based discourse analysis by performing a global A* search over the space of possible structures while optimizing a global criterion over the set of potential coherence relations. Existing approaches to discourse analysis have so far relied on greedy search strategies or restricted themselves to sentence-level discourse parsing. Another advantage of our approach, over other global alternatives (like Maximum Spanning Tree decoding algorithms), is its flexibility in being able to integrate constraints (including linguistically motivated ones like the Right Frontier Constraint). Finally, our paper provides the first discourse parsing system for French; our evaluation is carried out on the Annodis corpus. While using a lot less training data than earlier approaches than previous work on English, our system manages to achieve state-of-the-art results, with F1-scores of 66.2 and 46.8 when compared to unlabeled and labeled reference structures.", "keyphrases": ["discourse", "difficulty", "attachment"]} +{"id": "dogruoz-etal-2021-survey", "title": "A Survey of Code-switching: Linguistic and Social Perspectives for Language Technologies", "abstract": "The analysis of data in which multiple languages are represented has gained popularity among computational linguists in recent years. So far, much of this research focuses mainly on the improvement of computational methods and largely ignores linguistic and social aspects of C-S discussed across a wide range of languages within the long-established literature in linguistics. To fill this gap, we offer a survey of code-switching (C-S) covering the literature in linguistics with a reflection on the key issues in language technologies. From the linguistic perspective, we provide an overview of structural and functional patterns of C-S focusing on the literature from European and Indian contexts as highly multilingual areas. From the language technologies perspective, we discuss how massive language models fail to represent diverse C-S types due to lack of appropriate training data, lack of robust evaluation benchmarks for C-S (across multilingual situations and types of C-S) and lack of end-to- end systems that cover sociolinguistic aspects of C-S as well. Our survey will be a step to- wards an outcome of mutual benefit for computational scientists and linguists with a shared interest in multilingualism and C-S.", "keyphrases": ["survey", "code-switching", "linguistic"]} +{"id": "cai-etal-2017-pay", "title": "Pay Attention to the Ending:Strong Neural Baselines for the ROC Story Cloze Task", "abstract": "We consider the ROC story cloze task (Mostafazadeh et al., 2016) and present several findings. We develop a model that uses hierarchical recurrent networks with attention to encode the sentences in the story and score candidate endings. By discarding the large training set and only training on the validation set, we achieve an accuracy of 74.7%. Even when we discard the story plots (sentences before the ending) and only train to choose the better of two endings, we can still reach 72.5%. We then analyze this \u201cending-only\u201d task setting. We estimate human accuracy to be 78% and find several types of clues that lead to this high accuracy, including those related to sentiment, negation, and general ending likelihood regardless of the story context.", "keyphrases": ["story", "hierarchical recurrent network", "negation"]} +{"id": "yin-etal-2021-docnli", "title": "DocNLI: A Large-scale Dataset for Document-level Natural Language Inference", "abstract": "Natural language inference (NLI) is formulated as a unified framework for solving various NLP problems such as relation extraction, question answering, summarization, etc. It has been studied intensively in the past few years thanks to the availability of large-scale labeled datasets. However, most existing studies focus on merely sentence-level inference, which limits the scope of NLI's application in downstream NLP problems. This work presents DocNLI -- a newly-constructed large-scale dataset for document-level NLI. DocNLI is transformed from a broad range of NLP problems and covers multiple genres of text. The premises always stay in the document granularity, whereas the hypotheses vary in length from single sentences to passages with hundreds of words. Additionally, DocNLI has pretty limited artifacts which unfortunately widely exist in some popular sentence-level NLI datasets. Our experiments demonstrate that, even without fine-tuning, a model pretrained on DocNLI shows promising performance on popular sentence-level benchmarks, and generalizes well to out-of-domain NLP tasks that rely on inference at document granularity. Task-specific fine-tuning can bring further improvements. Data, code, and pretrained models can be found at https://github.com/salesforce/DocNLI.", "keyphrases": ["large-scale dataset", "natural language inference", "summarization", "docnli"]} +{"id": "stab-etal-2018-argumentext", "title": "ArgumenText: Searching for Arguments in Heterogeneous Sources", "abstract": "Argument mining is a core technology for enabling argument search in large corpora. However, most current approaches fall short when applied to heterogeneous texts. In this paper, we present an argument retrieval system capable of retrieving sentential arguments for any given controversial topic. By analyzing the highest-ranked results extracted from Web sources, we found that our system covers 89% of arguments found in expert-curated lists of arguments from an online debate portal, and also identifies additional valid arguments.", "keyphrases": ["retrieval", "controversial topic", "argumentext", "common crawl", "con"]} +{"id": "bonin-etal-2010-contrastive", "title": "A Contrastive Approach to Multi-word Extraction from Domain-specific Corpora", "abstract": "In this paper, we present a novel approach to multi-word terminology extraction combining a well-known automatic term recognition approach, the C\u2013NC value method, with a contrastive ranking technique, aimed at refining obtained results either by filtering noise due to common words or by discerning between semantically different types of terms within heterogeneous terminologies. Differently from other contrastive methods proposed in the literature that focus on single terms to overcome the multi-word terms' sparsity problem, the proposed contrastive function is able to handle variation in low frequency events by directly operating on pre-selected multi-word terms. This methodology has been tested in two case studies carried out in the History of Art and Legal domains. Evaluation of achieved results showed that the proposed two\u2013stage approach improves significantly multi\u2013word term extraction results. In particular, for what concerns the legal domain it provides an answer to a well-known problem in the semi\u2013automatic construction of legal ontologies, namely that of singling out law terms from terms of the specific domain being regulated.", "keyphrases": ["contrastive approach", "different type", "multi-word term"]} +{"id": "bicici-van-genabith-2013-cngl", "title": "CNGL-CORE: Referential Translation Machines for Measuring Semantic Similarity", "abstract": "We invent referential translation machines (RTMs), a computational model for identifying the translation acts between any two data sets with respect to a reference corpus selected in the same domain, which can be used for judging the semantic similarity between text. RTMs make quality and semantic similarity judgments possible by using retrieved relevant training data as interpretants for reaching shared semantics. An MTPP (machine translation performance predictor) model derives features measuring the closeness of the test sentences to the training data, the difficulty of translating them, and the presence of acts of translation involved. We view semantic similarity as paraphrasing between any two given texts. Each view is modeled by an RTM model, giving us a new perspective on the binary relationship between the two. Our prediction model is the 15th on some tasks and 30th overall out of 89 submissions in total according to the official results of the Semantic Textual Similarity (STS 2013) challenge.", "keyphrases": ["referential translation machine", "semantic similarity", "sts task"]} +{"id": "liu-etal-2011-recognizing", "title": "Recognizing Named Entities in Tweets", "abstract": "The challenges of Named Entities Recognition (NER) for tweets lie in the insufficient information in a tweet and the unavailability of training data. We propose to combine a K-Nearest Neighbors (KNN) classifier with a linear Conditional Random Fields (CRF) model under a semi-supervised learning framework to tackle these challenges. The KNN based classifier conducts pre-labeling to collect global coarse evidence across tweets while the CRF model conducts sequential labeling to capture fine-grained information encoded in a tweet. The semi-supervised learning plus the gazetteers alleviate the lack of training data. Extensive experiments show the advantages of our method over the baselines as well as the effectiveness of KNN and semi-supervised learning.", "keyphrases": ["semi-supervised learning framework", "twitter", "nlp tool", "social medium data", "presence"]} +{"id": "donoso-sanchez-2017-dialectometric", "title": "Dialectometric analysis of language variation in Twitter", "abstract": "In the last few years, microblogging platforms such as Twitter have given rise to a deluge of textual data that can be used for the analysis of informal communication between millions of individuals. In this work, we propose an information-theoretic approach to geographic language variation using a corpus based on Twitter. We test our models with tens of concepts and their associated keywords detected in Spanish tweets geolocated in Spain. We employ dialectometric measures (cosine similarity and Jensen-Shannon divergence) to quantify the linguistic distance on the lexical level between cells created in a uniform grid over the map. This can be done for a single concept or in the general case taking into account an average of the considered variants. The latter permits an analysis of the dialects that naturally emerge from the data. Interestingly, our results reveal the existence of two dialect macrovarieties. The first group includes a region-specific speech spoken in small towns and rural areas whereas the second cluster encompasses cities that tend to use a more uniform variety. Since the results obtained with the two different metrics qualitatively agree, our work suggests that social media corpora can be efficiently used for dialectometric analyses.", "keyphrases": ["language variation", "twitter", "dialectometric analysis"]} +{"id": "yasui-etal-2019-using", "title": "Using Semantic Similarity as Reward for Reinforcement Learning in Sentence Generation", "abstract": "Traditional model training for sentence generation employs cross-entropy loss as the loss function. While cross-entropy loss has convenient properties for supervised learning, it is unable to evaluate sentences as a whole, and lacks flexibility. We present the approach of training the generation model using the estimated semantic similarity between the output and reference sentences to alleviate the problems faced by the training with cross-entropy loss. We use the BERT-based scorer fine-tuned to the Semantic Textual Similarity (STS) task for semantic similarity estimation, and train the model with the estimated scores through reinforcement learning (RL). Our experiments show that reinforcement learning with semantic similarity reward improves the BLEU scores from the baseline LSTM NMT model.", "keyphrases": ["semantic similarity", "reinforcement learning", "sentence generation"]} +{"id": "rabinovich-etal-2017-personalized", "title": "Personalized Machine Translation: Preserving Original Author Traits", "abstract": "The language that we produce reflects our personality, and various personal and demographic characteristics can be detected in natural language texts. We focus on one particular personal trait of the author, gender, and study how it is manifested in original texts and in translations. We show that author's gender has a powerful, clear signal in originals texts, but this signal is obfuscated in human and machine translation. We then propose simple domain-adaptation techniques that help retain the original gender traits in the translation, without harming the quality of the translation, thereby creating more personalized machine translation systems.", "keyphrases": ["machine translation", "author trait", "gender", "speaker-specific data"]} +{"id": "ni-wang-2017-learning", "title": "Learning to Explain Non-Standard English Words and Phrases", "abstract": "We describe a data-driven approach for automatically explaining new, non-standard English expressions in a given sentence, building on a large dataset that includes 15 years of crowdsourced examples from UrbanDictionary.com. Unlike prior studies that focus on matching keywords from a slang dictionary, we investigate the possibility of learning a neural sequence-to-sequence model that generates explanations of unseen non-standard English expressions given context. We propose a dual encoder approach\u2014a word-level encoder learns the representation of context, and a second character-level encoder to learn the hidden representation of the target non-standard expression. Our model can produce reasonable definitions of new non-standard English expressions given their context with certain confidence.", "keyphrases": ["english expression", "sequence-to-sequence model", "definition", "translation task"]} +{"id": "hashimoto-etal-2013-simple", "title": "Simple Customization of Recursive Neural Networks for Semantic Relation Classification", "abstract": "In this paper, we present a recursive neural network (RNN) model that works on a syntactic tree. Our model differs from previous RNN models in that the model allows for an explicit weighting of important phrases for the target task. We also propose to average parameters in training. Our experimental results on semantic relation classification show that both phrase categories and task-specific weighting significantly improve the prediction accuracy of the model. We also show that averaging the model parameters is effective in stabilizing the learning and improves generalization capacity. The proposed model marks scores competitive with state-of-the-art RNN-based models.", "keyphrases": ["recursive neural networks", "relation classification", "important phrase"]} +{"id": "gella-etal-2017-image", "title": "Image Pivoting for Learning Multilingual Multimodal Representations", "abstract": "In this paper we propose a model to learn multimodal multilingual representations for matching images and sentences in different languages, with the aim of advancing multilingual versions of image search and image understanding. Our model learns a common representation for images and their descriptions in two different languages (which need not be parallel) by considering the image as a pivot between two languages. We introduce a new pairwise ranking loss function which can handle both symmetric and asymmetric similarity between the two modalities. We evaluate our models on image-description ranking for German and English, and on semantic textual similarity of image descriptions in English. In both cases we achieve state-of-the-art performance.", "keyphrases": ["pivot", "different language", "image"]} +{"id": "smith-eisner-2006-minimum", "title": "Minimum Risk Annealing for Training Log-Linear Models", "abstract": "When training the parameters for a natural language system, one would prefer to minimize 1-best loss (error) on an evaluation set. Since the error surface for many natural language problems is piecewise constant and riddled with local minima, many systems instead optimize log-likelihood, which is conveniently differentiable and convex. We propose training instead to minimize the expected loss, or risk. We define this expectation using a probability distribution over hypotheses that we gradually sharpen (anneal) to focus on the 1-best hypothesis. Besides the linear loss functions used in previous work, we also describe techniques for optimizing nonlinear functions such as precision or the BLEU metric. We present experiments training log-linear combinations of models for dependency parsing and for machine translation. In machine translation, annealed minimum risk training achieves significant improvements in BLEU over standard minimum error training. We also show improvements in labeled dependency parsing.", "keyphrases": ["log-linear model", "loss", "machine translation", "minimum risk training"]} +{"id": "eck-etal-2007-translation", "title": "Translation Model Pruning via Usage Statistics for Statistical Machine Translation", "abstract": "We describe a new pruning approach to remove phrase pairs from translation models of statistical machine translation systems. The approach applies the original translation system to a large amount of text and calculates usage statistics for the phrase pairs. Using these statistics the relevance of each phrase pair can be estimated. The approach is tested against a strong baseline based on previous work and shows significant improvements.", "keyphrases": ["usage statistic", "pruning approach", "phrase pair", "large amount"]} +{"id": "gupta-etal-2014-text", "title": "Text Summarization through Entailment-based Minimum Vertex Cover", "abstract": "Sentence Connectivity is a textual characteristic that may be incorporated intelligently for the selection of sentences of a well meaning summary. However, the existing summarization methods do not utilize its potential fully. The present paper introduces a novel method for singledocument text summarization. It poses the text summarization task as an optimization problem, and attempts to solve it using Weighted Minimum Vertex Cover (WMVC), a graph-based algorithm. Textual entailment, an established indicator of semantic relationships between text units, is used to measure sentence connectivity and construct the graph on which WMVC operates. Experiments on a standard summarization dataset show that the suggested algorithm outperforms related methods.", "keyphrases": ["summarization", "textual entailment recognition", "non-redundant sentence"]} +{"id": "palaskar-etal-2019-multimodal", "title": "Multimodal Abstractive Summarization for How2 Videos", "abstract": "In this paper, we study abstractive summarization for open-domain videos. Unlike the traditional text news summarization, the goal is less to \u201ccompress\u201d text information but rather to provide a fluent textual summary of information that has been collected and fused from different source modalities, in our case video and audio transcripts (or text). We show how a multi-source sequence-to-sequence model with hierarchical attention can integrate information from different modalities into a coherent output, compare various models trained with different modalities and present pilot experiments on the How2 corpus of instructional videos. We also propose a new evaluation metric (Content F1) for abstractive summarization task that measures semantic adequacy rather than fluency of the summaries, which is covered by metrics like ROUGE and BLEU.", "keyphrases": ["video", "modality", "hierarchical attention", "multimodal abstractive summarization"]} +{"id": "li-etal-2009-collaborative", "title": "Collaborative Decoding: Partial Hypothesis Re-ranking Using Translation Consensus between Decoders", "abstract": "This paper presents collaborative decoding (co-decoding), a new method to improve machine translation accuracy by leveraging translation consensus between multiple machine translation decoders. Different from system combination and MBR decoding, which post-process the n-best lists or word lattice of machine translation decoders, in our method multiple machine translation decoders collaborate by exchanging partial translation results. Using an iterative decoding approach, n-gram agreement statistics between translations of multiple decoders are employed to re-rank both full and partial hypothesis explored in decoding. Experimental results on data sets for NIST Chinese-to-English machine translation task show that the co-decoding method can bring significant improvements to all baseline decoders, and the outputs from co-decoding can be used to further improve the result of system combination.", "keyphrases": ["hypothesis", "translation consensus", "multiple decoder"]} +{"id": "schmitt-etal-2018-joint", "title": "Joint Aspect and Polarity Classification for Aspect-based Sentiment Analysis with End-to-End Neural Networks", "abstract": "In this work, we propose a new model for aspect-based sentiment analysis. In contrast to previous approaches, we jointly model the detection of aspects and the classification of their polarity in an end-to-end trainable neural network. We conduct experiments with different neural architectures and word representations on the recent GermEval 2017 dataset. We were able to show considerable performance gains by using the joint modeling approach in all settings compared to pipeline approaches. The combination of a convolutional neural network and fasttext embeddings outperformed the best submission of the shared task in 2017, establishing a new state of the art.", "keyphrases": ["sentiment analysis", "convolutional neural network", "aspect category"]} +{"id": "volkova-etal-2017-separating", "title": "Separating Facts from Fiction: Linguistic Models to Classify Suspicious and Trusted News Posts on Twitter", "abstract": "Pew research polls report 62 percent of U.S. adults get news on social media (Gottfried and Shearer, 2016). In a December poll, 64 percent of U.S. adults said that \u201cmade-up news\u201d has caused a \u201cgreat deal of confusion\u201d about the facts of current events (Barthel et al., 2016). Fabricated stories in social media, ranging from deliberate propaganda to hoaxes and satire, contributes to this confusion in addition to having serious effects on global stability. In this work we build predictive models to classify 130 thousand news posts as suspicious or verified, and predict four sub-types of suspicious news \u2013 satire, hoaxes, clickbait and propaganda. We show that neural network models trained on tweet content and social network interactions outperform lexical models. Unlike previous work on deception detection, we find that adding syntax and grammar features to our models does not improve performance. Incorporating linguistic features improves classification results, however, social interaction features are most informative for finer-grained separation between four types of suspicious news posts.", "keyphrases": ["twitter", "news post", "linguistic feature", "misinformation", "social medium"]} +{"id": "fried-klein-2018-policy", "title": "Policy Gradient as a Proxy for Dynamic Oracles in Constituency Parsing", "abstract": "Dynamic oracles provide strong supervision for training constituency parsers with exploration, but must be custom defined for a given parser's transition system. We explore using a policy gradient method as a parser-agnostic alternative. In addition to directly optimizing for a tree-level metric such as F1, policy gradient has the potential to reduce exposure bias by allowing exploration during training; moreover, it does not require a dynamic oracle for supervision. On four constituency parsers in three languages, the method substantially outperforms static oracle likelihood training in almost all settings. For parsers where a dynamic oracle is available (including a novel oracle which we define for the transition system of Dyer et al., 2016), policy gradient typically recaptures a substantial fraction of the performance gain afforded by the dynamic oracle.", "keyphrases": ["dynamic oracle", "constituency parser", "policy gradient"]} +{"id": "zhang-etal-2021-need", "title": "When Do You Need Billions of Words of Pretraining Data?", "abstract": "NLP is currently dominated by language models like RoBERTa which are pretrained on billions of words. But what exact knowledge or skills do Transformer LMs learn from large-scale pretraining that they cannot learn from less data? To explore this question, we adopt five styles of evaluation: classifier probing, information-theoretic probing, unsupervised relative acceptability judgments, unsupervised language model knowledge probing, and fine-tuning on NLU tasks. We then draw learning curves that track the growth of these different measures of model ability with respect to pretraining data volume using the MiniBERTas, a group of RoBERTa models pretrained on 1M, 10M, 100M and 1B words. We find that these LMs require only about 10M to 100M words to learn to reliably encode most syntactic and semantic features we test. They need a much larger quantity of data in order to acquire enough commonsense knowledge and other skills required to master typical downstream NLU tasks. The results suggest that, while the ability to encode linguistic features is almost certainly necessary for language understanding, it is likely that other, unidentified, forms of knowledge are the major drivers of recent improvements in language understanding among large pretrained models.", "keyphrases": ["billion", "roberta", "skill", "semantic feature"]} +{"id": "aghajanyan-etal-2021-muppet", "title": "Muppet: Massive Multi-task Representations with Pre-Finetuning", "abstract": "We propose pre-finetuning, an additional large-scale learning stage between language model pre-training and fine-tuning. Pre-finetuning is massively multi-task learning (around 50 datasets, over 4.8 million total labeled examples), and is designed to encourage learning of representations that generalize better to many different tasks. We show that pre-finetuning consistently improves performance for pretrained discriminators (e.g. RoBERTa) and generation models (e.g. BART) on a wide range of tasks (sentence prediction, commonsense reasoning, MRC, etc.), while also significantly improving sample efficiency during fine-tuning. We also show that large-scale multi-tasking is crucial; pre-finetuning can hurt performance when few tasks are used up until a critical point (usually above 15) after which performance improves linearly in the number of tasks.", "keyphrases": ["language model", "muppet", "downstream task"]} +{"id": "patwardhan-riloff-2007-effective", "title": "Effective Information Extraction with Semantic Affinity Patterns and Relevant Regions", "abstract": "We present an information extraction system that decouples the tasks of finding relevant regions of text and applying extraction patterns. We create a self-trained relevant sentence classifier to identify relevant regions, and use a semantic affinity measure to automatically learn domain-relevant extraction patterns. We then distinguish primary patterns from secondary patterns and apply the patterns selectively in the relevant regions. The resulting IE system achieves good performance on the MUC-4 terrorism corpus and ProMed disease outbreak stories. This approach requires only a few seed extraction patterns and a collection of relevant and irrelevant documents for training.", "keyphrases": ["information extraction", "relevant region", "affinity measure", "common word pattern", "event-specific document"]} +{"id": "gardner-etal-2013-improving", "title": "Improving Learning and Inference in a Large Knowledge-Base using Latent Syntactic Cues", "abstract": "Automatically constructed Knowledge Bases (KBs) are often incomplete and there is a genuine need to improve their coverage. Path Ranking Algorithm (PRA) is a recently proposed method which aims to improve KB coverage by performing inference directly over the KB graph. For the first time, we demonstrate that addition of edges labeled with latent features mined from a large dependency parsed corpus of 500 million Web documents can significantly outperform previous PRAbased approaches on the KB inference task. We present extensive experimental results validating this finding. The resources presented in this paper are publicly available.", "keyphrases": ["path", "pra", "edge label", "pre-trained vector representation", "relation extraction"]} +{"id": "dreyer-eisner-2009-graphical", "title": "Graphical Models over Multiple Strings", "abstract": "We study graphical modeling in the case of string-valued random variables. Whereas a weighted finite-state transducer can model the probabilistic relationship between two strings, we are interested in building up joint models of three or more strings. This is needed for inflectional paradigms in morphology, cognate modeling or language reconstruction, and multiple-string alignment. We propose a Markov Random Field in which each factor (potential function) is a weighted finite-state machine, typically a transducer that evaluates the relationship between just two of the strings. The full joint distribution is then a product of these factors. Though decoding is actually undecidable in general, we can still do efficient joint inference using approximate belief propagation; the necessary computations and messages are all finite-state. We demonstrate the methods by jointly predicting morphological forms.", "keyphrases": ["paradigm", "markov random field", "message", "graphical model"]} +{"id": "luong-manning-2016-achieving", "title": "Achieving Open Vocabulary Neural Machine Translation with Hybrid Word-Character Models", "abstract": "Nearly all previous work on neural machine translation (NMT) has used quite restricted vocabularies, perhaps with a subsequent method to patch in unknown words. This paper presents a novel word-character solution to achieving open vocabulary NMT. We build hybrid systems that translate mostly at the word level and consult the character components for rare words. Our character-level recurrent neural networks compute source word representations and recover unknown target words when needed. The twofold advantage of such a hybrid approach is that it is much faster and easier to train than character-based ones; at the same time, it never produces unknown words as in the case of word-based models. On the WMT'15 English to Czech translation task, this hybrid approach offers an addition boost of +2.1-11.4 BLEU points over models that already handle unknown words. Our best system achieves a new state-of-the-art result with 20.7 BLEU score. We demonstrate that our character models can successfully learn to not only generate well-formed words for Czech, a highly-inflected language with a very complex vocabulary, but also build correct representations for English source words.", "keyphrases": ["open vocabulary", "machine translation", "recurrent neural network", "character-based embedding"]} +{"id": "beck-etal-2021-investigating", "title": "Investigating label suggestions for opinion mining in German Covid-19 social media", "abstract": "This work investigates the use of interactively updated label suggestions to improve upon the efficiency of gathering annotations on the task of opinion mining in German Covid-19 social media data. We develop guidelines to conduct a controlled annotation study with social science students and find that suggestions from a model trained on a small, expert-annotated dataset already lead to a substantial improvement \u2013 in terms of inter-annotator agreement (+.14 Fleiss' \u03ba) and annotation quality \u2013 compared to students that do not receive any label suggestions. We further find that label suggestions from interactively trained models do not lead to an improvement over suggestions from a static model. Nonetheless, our analysis of suggestion bias shows that annotators remain capable of reflecting upon the suggested label in general. Finally, we confirm the quality of the annotated data in transfer learning experiments between different annotator groups. To facilitate further research in opinion mining on social media data, we release our collected data consisting of 200 expert and 2,785 student annotations.", "keyphrases": ["opinion mining", "german covid-19", "social medium"]} +{"id": "costa-branco-2012-aspectual", "title": "Aspectual Type and Temporal Relation Classification", "abstract": "In this paper we investigate the relevance of aspectual type for the problem of temporal information processing, i.e. the problems of the recent TempEval challenges. \n \nFor a large list of verbs, we obtain several indicators about their lexical aspect by querying the web for expressions where these verbs occur in contexts associated with specific aspectual types. \n \nWe then proceed to extend existing solutions for the problem of temporal information processing with the information extracted this way. The improved performance of the resulting models shows that (i) aspectual type can be data-mined with unsupervised methods with a level of noise that does not prevent this information from being useful and that (ii) temporal information processing can profit from information about aspectual type.", "keyphrases": ["temporal relation classification", "information processing", "aspectual type"]} +{"id": "borgeaud-emerson-2020-leveraging", "title": "Leveraging Sentence Similarity in Natural Language Generation: Improving Beam Search using Range Voting", "abstract": "We propose a method for natural language generation, choosing the most representative output rather than the most likely output. By viewing the language generation process from the voting theory perspective, we define representativeness using range voting and a similarity measure. The proposed method can be applied when generating from any probabilistic language model, including n-gram models and neural network models. We evaluate different similarity measures on an image captioning task and a machine translation task, and show that our method generates longer and more diverse sentences, providing a solution to the common problem of short outputs being preferred over longer and more informative ones. The generated sentences obtain higher BLEU scores, particularly when the beam size is large. We also perform a human evaluation on both tasks and find that the outputs generated using our method are rated higher.", "keyphrases": ["natural language generation", "beam search", "range voting"]} +{"id": "chen-etal-2021-geoqa", "title": "GeoQA: A Geometric Question Answering Benchmark Towards Multimodal Numerical Reasoning", "abstract": "Automatic math problem solving has recently attracted increasing attention as a long-standing AI benchmark. In this paper, we focus on solving geometric problems, which requires a comprehensive understanding of textual descriptions, visual diagrams, and theorem knowledge. However, the existing methods were highly dependent on handcraft rules and were merely evaluated on small-scale datasets. Therefore, we propose a Geometric Question Answering dataset GeoQA, containing 4,998 geometric problems with corresponding annotated programs, which illustrate the solving process of the given problems. Compared with another publicly available dataset GeoS, GeoQA is 25 times larger, in which the program annotations can provide a practical testbed for future research on explicit and explainable numerical reasoning. Moreover, we introduce a Neural Geometric Solver (NGS) to address geometric problems by comprehensively parsing multimodal information and generating interpretable programs. We further add multiple self-supervised auxiliary tasks on NGS to enhance cross-modal semantic representation. Extensive experiments on GeoQA validate the effectiveness of our proposed NGS and auxiliary tasks. However, the results are still significantly lower than human performance, which leaves large room for future research. Our benchmark and code are released at https://github.com/chen-judge/GeoQA .", "keyphrases": ["numerical reasoning", "geometric problem", "small-scale dataset", "geoqa"]} +{"id": "tsai-etal-2016-cross", "title": "Cross-Lingual Named Entity Recognition via Wikification", "abstract": "Named Entity Recognition (NER) models for language L are typically trained using annotated data in that language. We study cross-lingual NER , where a model for NER in L is trained on another, source, language (or multiple source languages). We introduce a language independent method for NER, building on cross-lingual wiki\ufb01cation, a technique that grounds words and phrases in non-English text into English Wikipedia entries. Thus, mentions in any language can be described using a set of categories and FreeBase types, yielding, as we show, strong language-independent features. With this insight, we propose an NER model that can be applied to all languages in Wikipedia. When trained on English, our model outperforms comparable approaches on the standard CoNLL datasets (Spanish, German, and Dutch) and also performs very well on low-resource languages (e.g., Turkish, Taga-log, Yoruba, Bengali, and Tamil) that have signi\ufb01cantly smaller Wikipedia. More-over, our method allows us to train on multiple source languages, typically improving NER results on the target languages. Finally, we show that our language-independent features can be used also to enhance monolingual NER systems, yielding improved results for all 9 languages.", "keyphrases": ["entity recognition", "cross-lingual ner", "wikipedia", "low-resource language", "name tagging"]} +{"id": "matusov-etal-2004-symmetric", "title": "Symmetric Word Alignments for Statistical Machine Translation", "abstract": "In this paper, we address the word alignment problem for statistical machine translation. We aim at creating a symmetric word alignment allowing for reliable one-to-many and many-to-one word relationships. We perform the iterative alignment training in the source-to-target and the target-to-source direction with the well-known IBM and HMM alignment models. Using these models, we robustly estimate the local costs of aligning a source word and a target word in each sentence pair. Then, we use efficient graph algorithms to determine the symmetric alignment with minimal total costs (i. e. maximal alignment probability). We evaluate the automatic alignments created in this way on the German--English Verbmobil task and the French--English Canadian Hansards task. We show statistically significant improvements of the alignment quality compared to the best results reported so far. On the Verbmobil task, we achieve an improvement of more than 1% absolute over the baseline error rate of 4.7%.", "keyphrases": ["statistical machine translation", "cost", "symmetric alignment"]} +{"id": "mohammad-etal-2008-computing", "title": "Computing Word-Pair Antonymy", "abstract": "Knowing the degree of antonymy between words has widespread applications in natural language processing. Manually-created lexicons have limited coverage and do not include most semantically contrasting word pairs. We present a new automatic and empirical measure of antonymy that combines corpus statistics with the structure of a published thesaurus. The approach is evaluated on a set of closest-opposite questions, obtaining a precision of over 80%. Along the way, we discuss what humans consider antonymous and how antonymy manifests itself in utterances.", "keyphrases": ["word-pair antonymy", "corpus statistic", "thesaurus"]} +{"id": "chaturvedi-etal-2017-story", "title": "Story Comprehension for Predicting What Happens Next", "abstract": "Automatic story comprehension is a fundamental challenge in Natural Language Understanding, and can enable computers to learn about social norms, human behavior and commonsense. In this paper, we present a story comprehension model that explores three distinct semantic aspects: (i) the sequence of events described in the story, (ii) its emotional trajectory, and (iii) its plot consistency. We judge the model's understanding of real-world stories by inquiring if, like humans, it can develop an expectation of what will happen next in a given story. Specifically, we use it to predict the correct ending of a given short story from possible alternatives. The model uses a hidden variable to weigh the semantic aspects in the context of the story. Our experiments demonstrate the potential of our approach to characterize these semantic aspects, and the strength of the hidden variable based approach. The model outperforms the state-of-the-art approaches and achieves best results on a publicly available dataset.", "keyphrases": ["semantic aspect", "story comprehension", "sentiment trajectory", "event sequence", "coherence model"]} +{"id": "zhang-etal-2013-chinese", "title": "Chinese Parsing Exploiting Characters", "abstract": "Characters play an important role in the Chinese language, yet computational processing of Chinese has been dominated by word-based approaches, with leaves in syntax trees being words. We investigate Chinese parsing from the character-level, extending the notion of phrase-structure trees by annotating internal structures of words. We demonstrate the importance of character-level information to Chinese processing by building a joint segmentation, part-of-speech (POS) tagging and phrase-structure parsing system that integrates character-structure features. Our joint system significantly outperforms a state-of-the-art word-based baseline on the standard CTB5 test, and gives the best published results for Chinese parsing.", "keyphrases": ["character", "internal structure", "segmentation", "chinese", "pos tag"]} +{"id": "striegnitz-etal-2011-report", "title": "Report on the Second Second Challenge on Generating Instructions in Virtual Environments (GIVE-2.5)", "abstract": "GIVE-2.5 evaluates eight natural language generation (NLG) systems that guide human users through solving a task in a virtual environment. The data is collected via the Internet, and to date, 536 interactions of subjects with one of the NLG systems have been recorded. The systems are compared using both task performance measures and subjective ratings by human users.", "keyphrases": ["generating instruction", "virtual environments", "give-2.5"]} +{"id": "lee-yeung-2018-personalizing", "title": "Personalizing Lexical Simplification", "abstract": "A lexical simplification (LS) system aims to substitute complex words with simple words in a text, while preserving its meaning and grammaticality. Despite individual users' differences in vocabulary knowledge, current systems do not consider these variations; rather, they are trained to find one optimal substitution or ranked list of substitutions for all users. We evaluate the performance of a state-of-the-art LS system on individual learners of English at different proficiency levels, and measure the benefits of using complex word identification (CWI) models to personalize the system. Experimental results show that even a simple personalized CWI model, based on graded vocabulary lists, can help the system avoid some unnecessary simplifications and produce more readable output.", "keyphrases": ["lexical simplification", "individual user", "learner", "cwi"]} +{"id": "gao-huang-2017-detecting", "title": "Detecting Online Hate Speech Using Context Aware Models", "abstract": "In the wake of a polarizing election, the cyber world is laden with hate speech. Context accompanying a hate speech text is useful for identifying hate speech, which however has been largely overlooked in existing datasets and hate speech detection models. In this paper, we provide an annotated corpus of hate speech with context information well kept. Then we propose two types of hate speech detection models that incorporate context information, a logistic regression model with context features and a neural network model with learning components for context. Our evaluation shows that both models outperform a strong baseline by around 3% to 4% in F1 score and combining these two models further improve the performance by another 7% in F1 score.", "keyphrases": ["hate speech", "annotated corpus", "context information", "logistic regression model", "news article"]} +{"id": "xia-mccord-2004-improving", "title": "Improving a Statistical MT System with Automatically Learned Rewrite Patterns", "abstract": "Current clump-based statistical MT systems have two limitations with respect to word ordering: First, they lack a mechanism for expressing and using generalization that accounts for reorderings of linguistic phrases. Second, the ordering of target words in such systems does not respect linguistic phrase boundaries. To address these limitations, we propose to use automatically learned rewrite patterns to preprocess the source sentences so that they have a word order similar to that of the target language. Our system is a hybrid one. The basic model is statistical, but we use broad-coverage rule-based parsers in two ways - during training for learning rewrite patterns, and at runtime for reordering the source sentences. Our experiments show 10% relative improvement in Bleu measure.", "keyphrases": ["source sentence", "rewrite pattern extraction", "structure-based reordering", "psmt system", "principle"]} +{"id": "wu-etal-2019-neural", "title": "Neural News Recommendation with Heterogeneous User Behavior", "abstract": "News recommendation is important for online news platforms to help users find interested news and alleviate information overload. Existing news recommendation methods usually rely on the news click history to model user interest. However, these methods may suffer from the data sparsity problem, since the news click behaviors of many users in online news platforms are usually very limited. Fortunately, some other kinds of user behaviors such as webpage browsing and search queries can also provide useful clues of users' news reading interest. In this paper, we propose a neural news recommendation approach which can exploit heterogeneous user behaviors. Our approach contains two major modules, i.e., news representation and user representation. In the news representation module, we learn representations of news from their titles via CNN networks, and apply attention networks to select important words. In the user representation module, we propose an attentive multi-view learning framework to learn unified representations of users from their heterogeneous behaviors such as search queries, clicked news and browsed webpages. In addition, we use word- and record-level attentions to select informative words and behavior records. Experiments on a real-world dataset validate the effectiveness of our approach.", "keyphrases": ["heterogeneous user behavior", "news", "short-term user interest"]} +{"id": "li-sporleder-2010-linguistic", "title": "Linguistic Cues for Distinguishing Literal and Non-Literal Usages", "abstract": "We investigate the effectiveness of different linguistic cues for distinguishing literal and non-literal usages of potentially idiomatic expressions. We focus specifically on features that generalize across different target expressions. While idioms on the whole are frequent, instances of each particular expression can be relatively infrequent and it will often not be feasible to extract and annotate a sufficient number of examples for each expression one might want to disambiguate. We experimented with a number of different features and found that features encoding lexical cohesion as well as some syntactic features can generalize well across idioms.", "keyphrases": ["usage", "idiomatic expression", "lexical context"]} +{"id": "jin-etal-2004-segmentation", "title": "Segmentation of Chinese Long Sentences Using Commas", "abstract": "The comma is the most common form of punctuation. As such, it may have the greatest effect on the syntactic analysis of a sentence. As an isolate language, Chinese sentences have fewer cues for parsing. The clues for segmentation of a long Chinese sentence are even fewer. However, the average frequency of comma usage in Chinese is higher than other languages. The comma plays an important role in long Chinese sentence segmentation. This paper proposes a method for classifying commas in Chinese sentences by their context, then segments a long sentence according to the classification results. Experimental results show that accuracy for the comma classification reaches 87.1 percent, and with our segmentation model, our parser\u0092s dependency parsing accuracy improves by 9.6 percent.", "keyphrases": ["chinese", "long sentence", "segmentation"]} +{"id": "moro-navigli-2015-semeval", "title": "SemEval-2015 Task 13: Multilingual All-Words Sense Disambiguation and Entity Linking", "abstract": "In this paper we present the Multilingual AllWords Sense Disambiguation and Entity Linking task. Word Sense Disambiguation (WSD) and Entity Linking (EL) are well-known problems in the Natural Language Processing field and both address the lexical ambiguity of language. Their main difference lies in the kind of meaning inventories that are used: EL uses encyclopedic knowledge, while WSD uses lexicographic information. Our aim with this task is to analyze whether, and if so, how, using a resource that integrates both kinds of inventories (i.e., BabelNet 2.5.1) might enable WSD and EL to be solved by means of similar (even, the same) methods. Moreover, we investigate this task in a multilingual setting and for some specific domains.", "keyphrases": ["entity linking", "wsd", "multilingual setting"]} +{"id": "ellsworth-janin-2007-mutaphrase", "title": "Mutaphrase: Paraphrasing with FrameNet", "abstract": "We describe a preliminary version of Mutaphrase, a system that generates paraphrases of semantically labeled input sentences using the semantics and syntax encoded in FrameNet, a freely available lexicosemantic database. The algorithm generates a large number of paraphrases with a wide range of syntactic and semantic distances from the input. For example, given the input \"I like eating cheese\", the system outputs the syntactically distant \"Eating cheese is liked by me\", the semantically distant \"I fear sipping juice\", and thousands of other sentences. The wide range of generated paraphrases makes the algorithm ideal for a range of statistical machine learning problems such as machine translation and language modeling as well as other semantics-dependent tasks such as query and language generation.", "keyphrases": ["paraphrasing", "framenet", "machine translation", "mutaphrase"]} +{"id": "hernandez-farias-etal-2015-valento", "title": "ValenTo: Sentiment Analysis of Figurative Language Tweets with Irony and Sarcasm", "abstract": "This paper describes the system used by the ValenTo team in the Task 11, Sentiment Analysis of Figurative Language in Twitter, at SemEval 2015. Our system used a regression model and additional external resources to assign polarity values. A distinctive feature of our approach is that we used not only wordsentiment lexicons providing polarity annotations, but also novel resources for dealing with emotions and psycholinguistic information. These are important aspects to tackle in figurative language such as irony and sarcasm, which were represented in the dataset. The system also exploited novel and standard structural features of tweets. Considering the different kinds of figurative language in the dataset our submission obtained good results in recognizing sentiment polarity in both ironic and sarcastic tweets.", "keyphrases": ["sentiment analysis", "irony", "sarcasm"]} +{"id": "gonzalez-rubio-etal-2010-balancing", "title": "Balancing User Effort and Translation Error in Interactive Machine Translation via Confidence Measures", "abstract": "This work deals with the application of confidence measures within an interactive-predictive machine translation system in order to reduce human effort. If a small loss in translation quality can be tolerated for the sake of efficiency, user effort can be saved by interactively translating only those initial translations which the confidence measure classifies as incorrect. We apply confidence estimation as a way to achieve a balance between user effort savings and final translation error. Empirical results show that our proposal allows to obtain almost perfect translations while significantly reducing user effort.", "keyphrases": ["user effort", "translation error", "confidence measure"]} +{"id": "pantel-etal-2004-towards", "title": "Towards Terascale Semantic Acquisition", "abstract": "Although vast amounts of textual data are freely available, many NLP algorithms exploit only a minute percentage of it. In this paper, we study the challenges of working at the terascale. We present an algorithm, designed for the terascale, for mining is-a relations that achieves similar performance to a state-of-the-art linguistically-rich method. We focus on the accuracy of these two systems as a function of processing time and corpus size.", "keyphrases": ["terascale", "large corpora", "generic pattern"]} +{"id": "arase-tsujii-2017-monolingual", "title": "Monolingual Phrase Alignment on Parse Forests", "abstract": "We propose an efficient method to conduct phrase alignment on parse forests for paraphrase detection. Unlike previous studies, our method identifies syntactic paraphrases under linguistically motivated grammar. In addition, it allows phrases to non-compositionally align to handle paraphrases with non-homographic phrase correspondences. A dataset that provides gold parse trees and their phrase alignments is created. The experimental results confirm that the proposed method conducts highly accurate phrase alignment compared to human performance.", "keyphrases": ["phrase alignment", "parse forest", "paraphrase"]} +{"id": "ive-etal-2019-distilling", "title": "Distilling Translations with Visual Awareness", "abstract": "Previous work on multimodal machine translation has shown that visual information is only needed in very specific cases, for example in the presence of ambiguous words where the textual context is not sufficient. As a consequence, models tend to learn to ignore this information. We propose a translate-and-refine approach to this problem where images are only used by a second stage decoder. This approach is trained jointly to generate a good first draft translation and to improve over this draft by (i) making better use of the target language textual context (both left and right-side contexts) and (ii) making use of visual context. This approach leads to the state of the art results. Additionally, we show that it has the ability to recover from erroneous or missing words in the source language.", "keyphrases": ["ambiguous word", "translate-and-refine approach", "image", "visual context", "well use"]} +{"id": "chang-lai-2004-preliminary", "title": "A Preliminary Study on Probabilistic Models for Chinese Abbreviations", "abstract": "Chinese abbreviations are widely used in the modern Chinese texts. They are a special form of unknown words, including many named entities. This results in difficulty for correct Chinese processing. In this study, the Chinese abbreviation problem is regarded as an error recovery problem in which the suspect root words are the \u201cerrors\u201d to be recovered from a set of candidates. Such a problem is mapped to an HMM-based generation model for both abbreviation identification and root word recovery, and is integrated as part of a unified word segmentation model when the input extends to a complete sentence. Two major experiments are conducted to test the abbreviation models. In the first experiment, an attempt is made to guess the abbreviations of the root words. An accuracy rate of 72% is observed. In contrast, a second experiment is conducted to guess the root words from abbreviations. Some submodels could achieve as high as 51% accuracy with the simple HMM-based model. Some quantitative observations against heuristic abbreviation knowledge about Chinese are also observed.", "keyphrases": ["abbreviation", "markov model", "full-form phrase", "news article"]} +{"id": "federmann-etal-2019-multilingual", "title": "Multilingual Whispers: Generating Paraphrases with Translation", "abstract": "Naturally occurring paraphrase data, such as multiple news stories about the same event, is a useful but rare resource. This paper compares translation-based paraphrase gathering using human, automatic, or hybrid techniques to monolingual paraphrasing by experts and non-experts. We gather translations, paraphrases, and empirical human quality assessments of these approaches. Neural machine translation techniques, especially when pivoting through related languages, provide a relatively robust source of paraphrases with diversity comparable to expert human paraphrases. Surprisingly, human translators do not reliably outperform neural systems. The resulting data release will not only be a useful test set, but will also allow additional explorations in translation and paraphrase quality assessments and relationships.", "keyphrases": ["paraphrasing", "multilingual whisper", "back-translation"]} +{"id": "koufakou-etal-2020-hurtbert", "title": "HurtBERT: Incorporating Lexical Features with BERT for the Detection of Abusive Language", "abstract": "The detection of abusive or offensive remarks in social texts has received significant attention in research. In several related shared tasks, BERT has been shown to be the state-of-the-art. In this paper, we propose to utilize lexical features derived from a hate lexicon towards improving the performance of BERT in such tasks. We explore different ways to utilize the lexical features in the form of lexicon-based encodings at the sentence level or embeddings at the word level. We provide an extensive dataset evaluation that addresses in-domain as well as cross-domain detection of abusive content to render a complete picture. Our results indicate that our proposed models combining BERT with lexical features help improve over a baseline BERT model in many of our in-domain and cross-domain experiments.", "keyphrases": ["lexical feature", "detection", "abusive language"]} +{"id": "lu-etal-2016-joint", "title": "Joint Inference for Event Coreference Resolution", "abstract": "Event coreference resolution is a challenging problem since it relies on several components of the information extraction pipeline that typically yield noisy outputs. We hypothesize that exploiting the inter-dependencies between these components can significantly improve the performance of an event coreference resolver, and subsequently propose a novel joint inference based event coreference resolver using Markov Logic Networks (MLNs). However, the rich features that are important for this task are typically very hard to explicitly encode as MLN formulas since they significantly increase the size of the MLN, thereby making joint inference and learning infeasible. To address this problem, we propose a novel solution where we implicitly encode rich features into our model by augmenting the MLN distribution with low dimensional unit clauses. Our approach achieves state-of-the-art results on two standard evaluation corpora.", "keyphrases": ["event coreference resolution", "markov logic networks", "joint inference"]} +{"id": "shlain-etal-2020-syntactic", "title": "Syntactic Search by Example", "abstract": "We present a system that allows a user to search a large linguistically annotated corpus using syntactic patterns over dependency graphs. In contrast to previous attempts to this effect, we introduce a light-weight query language that does not require the user to know the details of the underlying syntactic representations, and instead to query the corpus by providing an example sentence coupled with simple markup. Search is performed at an interactive speed due to efficient linguistic graph-indexing and retrieval engine. This allows for rapid exploration, development and refinement of syntax-based queries. We demonstrate the system using queries over two corpora: the English wikipedia, and a collection of English pubmed abstracts. A demo of the wikipedia system is available at .", "keyphrases": ["search", "example sentence", "wikipedia"]} +{"id": "miwa-etal-2010-evaluating", "title": "Evaluating Dependency Representations for Event Extraction", "abstract": "The detailed analyses of sentence structure provided by parsers have been applied to address several information extraction tasks. In a recent bio-molecular event extraction task, state-of-the-art performance was achieved by systems building specifically on dependency representations of parser output. While intrinsic evaluations have shown significant advances in both general and domain-specific parsing, the question of how these translate into practical advantage is seldom considered. In this paper, we analyze how event extraction performance is affected by parser and dependency representation, further considering the relation between intrinsic evaluation and performance at the extraction task. We find that good intrinsic evaluation results do not always imply good extraction performance, and that the types and structures of different dependency representations have specific advantages and disadvantages for the event extraction task.", "keyphrases": ["dependency representation", "event extraction", "pipeline"]} +{"id": "finkel-etal-2006-solving", "title": "Solving the Problem of Cascading Errors: Approximate Bayesian Inference for Linguistic Annotation Pipelines", "abstract": "The end-to-end performance of natural language processing systems for compound tasks, such as question answering and textual entailment, is often hampered by use of a greedy 1-best pipeline architecture, which causes errors to propagate and compound at each stage. We present a novel architecture, which models these pipelines as Bayesian networks, with each low level task corresponding to a variable in the network, and then we perform approximate inference to find the best labeling. Our approach is extremely simple to apply but gains the benefits of sampling the entire distribution over labels at each stage in the pipeline. We apply our method to two tasks -- semantic role labeling and recognizing textual entailment -- and achieve useful performance gains from the superior pipeline architecture.", "keyphrases": ["linguistic annotation", "bayesian network", "variable", "pipeline approach"]} +{"id": "jhamtani-etal-2017-shakespearizing", "title": "Shakespearizing Modern Language Using Copy-Enriched Sequence to Sequence Models", "abstract": "Variations in writing styles are commonly used to adapt the content to a specific context, audience, or purpose. However, applying stylistic variations is still by and large a manual process, and there have been little efforts towards automating it. In this paper we explore automated methods to transform text from modern English to Shakespearean English using an end to end trainable neural model with pointers to enable copy action. To tackle limited amount of parallel data, we pre-train embeddings of words by leveraging external dictionaries mapping Shakespearean words to modern English words as well as additional text. Our methods are able to get a BLEU score of 31+, an improvement of 6 points above the strongest baseline. We publicly release our code to foster further research in this area.", "keyphrases": ["style", "external dictionary mapping", "parallel corpus", "sequence-to-sequence"]} +{"id": "saha-etal-2021-towards", "title": "Towards Sentiment and Emotion aided Multi-modal Speech Act Classification in Twitter", "abstract": "Speech Act Classification determining the communicative intent of an utterance has been investigated widely over the years as a standalone task. This holds true for discussion in any fora including social media platform such as Twitter. But the emotional state of the tweeter which has a considerable effect on the communication has not received the attention it deserves. Closely related to emotion is sentiment, and understanding of one helps understand the other. In this work, we firstly create a new multi-modal, emotion-TA (`TA' means tweet act, i.e., speech act in Twitter) dataset called EmoTA collected from open-source Twitter dataset. We propose a Dyadic Attention Mechanism (DAM) based multi-modal, adversarial multi-tasking framework. DAM incorporates intra-modal and inter-modal attention to fuse multiple modalities and learns generalized features across all the tasks. Experimental results indicate that the proposed framework boosts the performance of the primary task, i.e., TA classification (TAC) by benefitting from the two secondary tasks, i.e., Sentiment and Emotion Analysis compared to its uni-modal and single task TAC (tweet act classification) variants.", "keyphrases": ["emotion", "speech act classification", "twitter"]} +{"id": "zhang-etal-2021-textoir", "title": "TEXTOIR: An Integrated and Visualized Platform for Text Open Intent Recognition", "abstract": "TEXTOIR is the first integrated and visualized platform for text open intent recognition. It is composed of two main modules: open intent detection and open intent discovery. Each module integrates most of the state-of-the-art algorithms and benchmark intent datasets. It also contains an overall framework connecting the two modules in a pipeline scheme. In addition, this platform has visualized tools for data and model management, training, evaluation and analysis of the performance from different aspects. TEXTOIR provides useful toolkits and convenient visualized interfaces for each sub-module, and designs a framework to implement a complete process to both identify known intents and discover open intents.", "keyphrases": ["visualized platform", "intent", "textoir"]} +{"id": "koomen-etal-2005-generalized", "title": "Generalized Inference with Multiple Semantic Role Labeling Systems", "abstract": "We present an approach to semantic role labeling (SRL) that takes the output of multiple argument classifiers and combines them into a coherent predicate-argument output by solving an optimization problem. The optimization stage, which is solved via integer linear programming, takes into account both the recommendation of the classifiers and a set of problem specific constraints, and is thus used both to clean the classification results and to ensure structural integrity of the final role labeling. We illustrate a significant improvement in overall SRL performance through this inference.", "keyphrases": ["semantic role labeling", "srl", "optimization problem", "previous research"]} +{"id": "van-hee-etal-2018-semeval", "title": "SemEval-2018 Task 3: Irony Detection in English Tweets", "abstract": "This paper presents the first shared task on irony detection: given a tweet, automatic natural language processing systems should determine whether the tweet is ironic (Task A) and which type of irony (if any) is expressed (Task B). The ironic tweets were collected using irony-related hashtags (i.e. #irony, #sarcasm, #not) and were subsequently manually annotated to minimise the amount of noise in the corpus. Prior to distributing the data, hashtags that were used to collect the tweets were removed from the corpus. For both tasks, a training corpus of 3,834 tweets was provided, as well as a test set containing 784 tweets. Our shared tasks received submissions from 43 teams for the binary classification Task A and from 31 teams for the multiclass Task B. The highest classification scores obtained for both subtasks are respectively F1= 0.71 and F1= 0.51 and demonstrate that fine-grained irony classification is much more challenging than binary irony detection.", "keyphrases": ["english tweets", "sarcasm detection", "semeval"]} +{"id": "rios-kavuluru-2018-shot", "title": "Few-Shot and Zero-Shot Multi-Label Learning for Structured Label Spaces", "abstract": "Large multi-label datasets contain labels that occur thousands of times (frequent group), those that occur only a few times (few-shot group), and labels that never appear in the training dataset (zero-shot group). Multi-label few- and zero-shot label prediction is mostly unexplored on datasets with large label spaces, especially for text classification. In this paper, we perform a fine-grained evaluation to understand how state-of-the-art methods perform on infrequent labels. Furthermore, we develop few- and zero-shot methods for multi-label text classification when there is a known structure over the label space, and evaluate them on two publicly available medical text datasets: MIMIC II and MIMIC III. For few-shot labels we achieve improvements of 6.2% and 4.8% in R@10 for MIMIC II and MIMIC III, respectively, over prior efforts; the corresponding R@10 improvements for zero-shot labels are 17.3% and 19%.", "keyphrases": ["label space", "zero-shot learning", "promising result"]} +{"id": "paetzel-etal-2014-multimodal", "title": "A Multimodal Corpus of Rapid Dialogue Games", "abstract": "This paper presents a multimodal corpus of spoken human-human dialogues collected as participants played a series of Rapid Dialogue Games (RDGs). The corpus consists of a collection of about 11 hours of spoken audio, video, and Microsoft Kinect data taken from 384 game interactions (dialogues). The games used for collecting the corpus required participants to give verbal descriptions of linguistic expressions or visual images and were specifically designed to engage players in a fast-paced conversation under time pressure. As a result, the corpus contains many examples of participants attempting to communicate quickly in specific game situations, and it also includes a variety of spontaneous conversational phenomena such as hesitations, filled pauses, overlapping speech, and low-latency responses. The corpus has been created to facilitate research in incremental speech processing for spoken dialogue systems. Potentially, the corpus could be used in several areas of speech and language research, including speech recognition, natural language understanding, natural language generation, and dialogue management.", "keyphrases": ["multimodal corpus", "rapid dialogue games", "series", "audio"]} +{"id": "kiomourtzis-etal-2014-nomad", "title": "NOMAD: Linguistic Resources and Tools Aimed at Policy Formulation and Validation", "abstract": "The NOMAD project (Policy Formulation and Validation through non Moderated Crowd-sourcing) is a project that supports policy making, by providing rich, actionable information related to how citizens perceive different policies. NOMAD automatically analyzes citizen contributions to the informal web (e.g. forums, social networks, blogs, newsgroups and wikis) using a variety of tools. These tools comprise text retrieval, topic classification, argument detection and sentiment analysis, as well as argument summarization. NOMAD provides decision-makers with a full arsenal of solutions starting from describing a domain and a policy to applying content search and acquisition, categorization and visualization. These solutions work in a collaborative manner in the policy-making arena. NOMAD, thus, embeds editing, analysis and visualization technologies into a concrete framework, applicable in a variety of policy-making and decision support settings In this paper we provide an overview of the linguistic tools and resources of NOMAD.", "keyphrases": ["policy formulation", "validation", "nomad"]} +{"id": "niehues-cho-2017-exploiting", "title": "Exploiting Linguistic Resources for Neural Machine Translation Using Multi-task Learning", "abstract": "Linguistic resources such as part-of-speech (POS) tags have been extensively used in statistical machine translation (SMT) frameworks and have yielded better performances. However, usage of such linguistic annotations in neural machine translation (NMT) systems has been left under-explored. \nIn this work, we show that multi-task learning is a successful and a easy approach to introduce an additional knowledge into an end-to-end neural attentional model. By jointly training several natural language processing (NLP) tasks in one system, we are able to leverage common information and improve the performance of the individual task. \nWe analyze the impact of three design decisions in multi-task learning: the tasks used in training, the training schedule, and the degree of parameter sharing across the tasks, which is defined by the network architecture. The experiments are conducted for an German to English translation task. As additional linguistic resources, we exploit POS information and named-entities (NE). Experiments show that the translation quality can be improved by up to 1.5 BLEU points under the low-resource condition. The performance of the POS tagger is also improved using the multi-task learning scheme.", "keyphrases": ["neural machine translation", "multi-task learning", "part-of-speech tagging", "dependency parsing"]} +{"id": "yang-etal-2020-hw", "title": "HW-TSC's Participation at WMT 2020 Automatic Post Editing Shared Task", "abstract": "The paper presents the submission by HW-TSC in the WMT 2020 Automatic Post Editing Shared Task. We participate in the English-German and English-Chinese language pairs. Our system is built based on the Transformer pre-trained on WMT 2019 and WMT 2020 News Translation corpora, and fine-tuned on the APE corpus. Bottleneck Adapter Layers are integrated into the model to prevent over-fitting. We further collect external translations as the augmented MT candidates to improve the performance. The experiment demonstrates that pre-trained NMT models are effective when fine-tuning with the APE corpus of a limited size, and the performance can be further improved with external MT augmentation. Our system achieves competitive results on both directions in the final evaluation.", "keyphrases": ["automatic post", "shared task", "hw-tsc"]} +{"id": "gururangan-etal-2019-variational", "title": "Variational Pretraining for Semi-supervised Text Classification", "abstract": "We introduce VAMPIRE, a lightweight pretraining framework for effective text classification when data and computing resources are limited. We pretrain a unigram document model as a variational autoencoder on in-domain, unlabeled data and use its internal states as features in a downstream classifier. Empirically, we show the relative strength of VAMPIRE against computationally expensive contextual embeddings and other popular semi-supervised baselines under low resource settings. We also find that fine-tuning to in-domain data is crucial to achieving decent performance from contextual embeddings when working with limited supervision. We accompany this paper with code to pretrain and use VAMPIRE embeddings in downstream tasks.", "keyphrases": ["semi-supervised text classification", "vampire", "variational autoencoder", "unlabeled data"]} +{"id": "negri-etal-2012-chinese", "title": "Chinese Whispers: Cooperative Paraphrase Acquisition", "abstract": "We present a framework for the acquisition of sentential paraphrases based on crowdsourcing. The proposed method maximizes the lexical divergence between an original sentence s and its valid paraphrases by running a sequence of paraphrasing jobs carried out by a crowd of non-expert workers. Instead of collecting direct paraphrases of s, at each step of the sequence workers manipulate semantically equivalent reformulations produced in the previous round. We applied this method to paraphrase English sentences extracted from Wikipedia. Our results show that, keeping at each round n the most promising paraphrases (i.e. the more lexically dissimilar from those acquired at round n-1), the monotonic increase of divergence allows to collect good-quality paraphrases in a cost-effective manner.", "keyphrases": ["paraphrase", "crowdsourcing", "previous round"]} +{"id": "machacek-bojar-2013-results", "title": "Results of the WMT13 Metrics Shared Task", "abstract": "This paper presents the results of the WMT17 Metrics Shared Task. We asked participants of this task to score the outputs of the MT systems involved in the WMT17 news translation task and Neural MT training task. We collected scores of 14 metrics from 8 research groups. In addition to that, we computed scores of 7 standard metrics (BLEU, SentBLEU, NIST, WER, PER, TER and CDER) as baselines. The collected scores were evaluated in terms of system-level correlation (how well each metric\u2019s scores correlate with WMT17 of\ufb01cial manual ranking of systems) and in terms of segment level correlation (how often a metric agrees with humans in judging the quality of a particular sentence). This year, we build upon two types of manual judgements: direct assessment (DA) and HUME manual semantic judgements.", "keyphrases": ["direct assessment", "evaluation metric", "wmt", "high correlation", "translation quality"]} +{"id": "chambers-jurafsky-2008-jointly", "title": "Jointly Combining Implicit Constraints Improves Temporal Ordering", "abstract": "Previous work on ordering events in text has typically focused on local pairwise decisions, ignoring globally inconsistent labels. However, temporal ordering is the type of domain in which global constraints should be relatively easy to represent and reason over. This paper presents a framework that informs local decisions with two types of implicit global constraints: transitivity (A before B and B before C implies A before C) and time expression normalization (e.g. last month is before yesterday). We show how these constraints can be used to create a more densely-connected network of events, and how global consistency can be enforced by incorporating these constraints into an integer linear programming framework. We present results on two event ordering tasks, showing a 3.6% absolute increase in the accuracy of before/after classification over a pairwise model.", "keyphrases": ["temporal ordering", "integer linear programming", "global information", "relation extraction", "transitivity constraint"]} +{"id": "wu-etal-2019-generating", "title": "Generating Question Relevant Captions to Aid Visual Question Answering", "abstract": "Visual question answering (VQA) and image captioning require a shared body of general knowledge connecting language and vision. We present a novel approach to better VQA performance that exploits this connection by jointly generating captions that are targeted to help answer a specific visual question. The model is trained using an existing caption dataset by automatically determining question-relevant captions using an online gradient-based method. Experimental results on the VQA v2 challenge demonstrates that our approach obtains state-of-the-art VQA performance (e.g. 68.4% in the Test-standard set using a single model) by simultaneously generating question-relevant captions.", "keyphrases": ["caption", "visual question", "vision-language task"]} +{"id": "chen-2009-performance", "title": "Performance Prediction for Exponential Language Models", "abstract": "We investigate the task of performance prediction for language models belonging to the exponential family. First, we attempt to empirically discover a formula for predicting test set cross-entropy for n-gram language models. We build models over varying domains, data set sizes, and n-gram orders, and perform linear regression to see whether we can model test set performance as a simple function of training set performance and various model statistics. Remarkably, we find a simple relationship that predicts test set performance with a correlation of 0.9997. We analyze why this relationship holds and show that it holds for other exponential language models as well, including class-based models and minimum discrimination information models. Finally, we discuss how this relationship can be applied to improve language model performance.", "keyphrases": ["exponential language model", "cross-entropy", "performance prediction"]} +{"id": "jing-etal-2018-automatic", "title": "On the Automatic Generation of Medical Imaging Reports", "abstract": "Medical imaging is widely used in clinical practice for diagnosis and treatment. Report-writing can be error-prone for unexperienced physicians, and time-consuming and tedious for experienced physicians. To address these issues, we study the automatic generation of medical imaging reports. This task presents several challenges. First, a complete report contains multiple heterogeneous forms of information, including findings and tags. Second, abnormal regions in medical images are difficult to identify. Third, the reports are typically long, containing multiple sentences. To cope with these challenges, we (1) build a multi-task learning framework which jointly performs the prediction of tags and the generation of paragraphs, (2) propose a co-attention mechanism to localize regions containing abnormalities and generate narrations for them, (3) develop a hierarchical LSTM model to generate long paragraphs. We demonstrate the effectiveness of the proposed methods on two publicly available dataset.", "keyphrases": ["automatic generation", "image", "radiologist", "medical report"]} +{"id": "rodriguez-etal-2010-anaphoric", "title": "Anaphoric Annotation of Wikipedia and Blogs in the Live Memories Corpus", "abstract": "The Live Memories corpus is an Italian corpus annotated for anaphoric relations. This annotation effort aims to contribute to two significant issues for the CL research: the lack of annotated anaphoric resources for Italian and the increasing interest for the social Web. The Live Memories Corpus contains texts from the Italian Wikipedia about the region Trentino/S\u00fcd Tirol and from blog sites with users' comments. It is planned to add a set of articles of local news papers. The corpus includes manual annotated information about morphosyntactic agreement, anaphoricity, and semantic class of the NPs. The anaphoric annotation includes discourse deixis, bridging relations and markes cases of ambiguity with the annotation of alternative interpretations. For the annotation of the anaphoric links the corpus takes into account specific phenomena of the Italian language like incorporated clitics and phonetically non realized pronouns. Reliability studies for the annotation of the mentioned phenomena and for annotation of anaphoric links in general offer satisfactory results. The Wikipedia and blogs dataset will be distributed under Creative Commons Attributions licence.", "keyphrases": ["wikipedia", "live memories corpus", "anaphoric annotation"]} +{"id": "glavas-vulic-2018-explicit", "title": "Explicit Retrofitting of Distributional Word Vectors", "abstract": "Semantic specialization of distributional word vectors, referred to as retrofitting, is a process of fine-tuning word vectors using external lexical knowledge in order to better embed some semantic relation. Existing retrofitting models integrate linguistic constraints directly into learning objectives and, consequently, specialize only the vectors of words from the constraints. In this work, in contrast, we transform external lexico-semantic relations into training examples which we use to learn an explicit retrofitting model (ER). The ER model allows us to learn a global specialization function and specialize the vectors of words unobserved in the training data as well. We report large gains over original distributional vector spaces in (1) intrinsic word similarity evaluation and on (2) two downstream tasks \u2212 lexical simplification and dialog state tracking. Finally, we also successfully specialize vector spaces of new languages (i.e., unseen in the training data) by coupling ER with shared multilingual distributional vector spaces.", "keyphrases": ["distributional word vector", "specialization", "explicit retrofitting model"]} +{"id": "stab-etal-2018-cross", "title": "Cross-topic Argument Mining from Heterogeneous Sources", "abstract": "Argument mining is a core technology for automating argument search in large document collections. Despite its usefulness for this task, most current approaches are designed for use only with specific text types and fall short when applied to heterogeneous texts. In this paper, we propose a new sentential annotation scheme that is reliably applicable by crowd workers to arbitrary Web texts. We source annotations for over 25,000 instances covering eight controversial topics. We show that integrating topic information into bidirectional long short-term memory networks outperforms vanilla BiLSTMs by more than 3 percentage points in F1 in two- and three-label cross-topic settings. We also show that these results can be further improved by leveraging additional data for topic relevance using multi-task learning.", "keyphrases": ["argumentation", "heterogeneous source", "web text", "topic information", "claim detection"]} +{"id": "nakov-hearst-2005-using", "title": "Using the Web as an Implicit Training Set: Application to Structural Ambiguity Resolution", "abstract": "Recent work has shown that very large corpora can act as training data for NLP algorithms even without explicit labels. In this paper we show how the use of surface features and paraphrases in queries against search engines can be used to infer labels for structural ambiguity resolution tasks. Using unsupervised algorithms, we achieve 84% precision on PP-attachment and 80% on noun compound coordination.", "keyphrases": ["web", "paraphrase", "unsupervised algorithm", "noun"]} +{"id": "frontini-etal-2012-verb", "title": "Verb interpretation for basic action types: annotation, ontology induction and creation of prototypical scenes", "abstract": "In the last 20 years dictionaries and lexicographic resources such as WordNet have started to be enriched with multimodal content. Short videos depicting basic actions support the user\u2019s need (especially in second language acquisition) to fully understand the range of applicability of verbs. The IMAGACT project has among its results a repository of action verbs ontologically organised around prototypical action scenes in the form of both video recordings and 3D animations. The creation of the IMAGACT ontology, which consists in deriving action types from corpus instances of action verbs, intra and cross linguistically validating them and producing the prototypical scenes thereof, is the preliminary step for the creation of a resouce that users can browse by verb, learning how to match different action prototypes with the correct verbs in the target language. The mapping of IMAGACT types onto WordNet synsets allows for a mutual enrichment of both resources. Interpretazione dei verbi per tipi azionali di base: annotazione, induzione di ontologia e creazione di scene prototipiche Negli ultimi venti anni dizionari e risorse lessicografiche come WordNet sono stati arricchiti con contenuto multimediale. Brevi video in grado di rappresentare azioni di base supportano i bisogni degli utenti (in particolar modo per quanto riguarda l' acquisizione della seconda lingua) nel comprendere l' ambito di applicabilita dei verbi. Il progetto IMAGACT ha tra i suoi risultati una base di dati di verbi d'azione ontologicamente organizzati e raffiguranti scene che riproducono azioni prototipiche sottoforma di registrazioni video e animazioni 3D. La creazione dell' ontologia IMAGACT che consiste nella derivazione di tipi azionali da istanze di verbi d'azione estratte da un corpus, nella loro validazione intra e crosslinguisticamente e nella conseguente produzione di scene prototipiche, e il passaggio preliminare per la creazione di una risorsa che gli utenti possono consultare partendo dal verbo, imparando come alllineare differenti prototipi d'azione con il verbo corretto nella lingua da apprendere. Il mapping dei tipi di IMAGACT sui synsets di WordNet consente un arricchimento reciproco di entrambe le risorse.", "keyphrases": ["action type", "creation", "prototypical scene"]} +{"id": "huang-carley-2018-parameterized", "title": "Parameterized Convolutional Neural Networks for Aspect Level Sentiment Classification", "abstract": "We introduce a novel parameterized convolutional neural network for aspect level sentiment classification. Using parameterized filters and parameterized gates, we incorporate aspect information into convolutional neural networks (CNN). Experiments demonstrate that our parameterized filters and parameterized gates effectively capture the aspect-specific features, and our CNN-based models achieve excellent results on SemEval 2014 datasets.", "keyphrases": ["convolutional neural network", "sentiment classification", "cnn"]} +{"id": "roy-etal-2015-reasoning", "title": "Reasoning about Quantities in Natural Language", "abstract": "Little work from the Natural Language Processing community has targeted the role of quantities in Natural Language Understanding. This paper takes some key steps towards facilitating reasoning about quantities expressed in natural language. We investigate two different tasks of numerical reasoning. First, we consider Quantity Entailment, a new task formulated to understand the role of quantities in general textual inference tasks. Second, we consider the problem of automatically understanding and solving elementary school math word problems. In order to address these quantitative reasoning problems we first develop a computational approach which we show to successfully recognize and normalize textual expressions of quantities. We then use these capabilities to further develop algorithms to assist reasoning in the context of the aforementioned tasks.", "keyphrases": ["quantity", "word problem", "operand"]} +{"id": "korkontzelos-etal-2013-semeval", "title": "SemEval-2013 Task 5: Evaluating Phrasal Semantics", "abstract": "This paper describes the SemEval-2013 Task 5: \u201cEvaluating Phrasal Semantics\u201d. Its first subtask is about computing the semantic similarity of words and compositional phrases of minimal length. The second one addresses deciding the compositionality of phrases in a given context. The paper discusses the importance and background of these subtasks and their structure. In succession, it introduces the systems that participated and discusses evaluation results.", "keyphrases": ["evaluating phrasal semantics", "semantic similarity", "semeval-2013 task"]} +{"id": "wang-etal-2020-sentiment", "title": "Sentiment Forecasting in Dialog", "abstract": "Sentiment forecasting in dialog aims to predict the polarity of next utterance to come, and can help speakers revise their utterances in sentimental utterances generation. However, the polarity of next utterance is normally hard to predict, due to the lack of content of next utterance (yet to come). In this study, we propose a Neural Sentiment Forecasting (NSF) model to address inherent challenges. In particular, we employ a neural simulation model to simulate the next utterance based on the context (previous utterances encountered). Moreover, we employ a sequence influence model to learn both pair-wise and seq-wise influence. Empirical studies illustrate the importance of proposed sentiment forecasting task, and justify the effectiveness of our NSF model over several strong baselines.", "keyphrases": ["dialog", "next utterance", "sentiment forecasting"]} +{"id": "li-2015-abstractive", "title": "Abstractive Multi-document Summarization with Semantic Information Extraction", "abstract": "This paper proposes a novel approach to generate abstractive summary for multiple documents by extracting semantic information from texts. The concept of Basic Semantic Unit (BSU) is defined to describe the semantics of an event or action. A semantic link network on BSUs is constructed to capture the semantic information of texts. Summary structure is planned with sentences generated based on the semantic link network. Experiments demonstrate that the approach is effective in generating informative, coherent and compact summary.", "keyphrases": ["basic semantic unit", "compact summary", "abstractive multi-document summarization"]} +{"id": "bicici-way-2014-rtm", "title": "RTM-DCU: Referential Translation Machines for Semantic Similarity", "abstract": "We use referential translation machines (RTMs) for predicting the semantic similarity of text. RTMs are a computational model for identifying the translation acts between any two data sets with respect to interpretants selected in the same domain, which are effective when making monolingual and bilingual similarity judgments. RTMs judge the quality or the semantic similarity of text by using retrieved relevant training data as interpretants for reaching shared semantics. We derive features measuring the closeness of the test sentences to the training data via interpretants, the difficulty of translating them, and the presence of the acts of translation, which may ubiquitously be observed in communication. RTMs provide a language independent approach to all similarity tasks and achieve top performance when predicting monolingual cross-level semantic similarity (Task 3) and good results in semantic relatedness and entailment (Task 1) and multilingual semantic textual similarity (STS) (Task 10). RTMs remove the need to access any task or domain specific information or resource.", "keyphrases": ["referential translation machine", "semantic similarity", "sts task"]} +{"id": "banko-etzioni-2008-tradeoffs", "title": "The Tradeoffs Between Open and Traditional Relation Extraction", "abstract": "Traditional Information Extraction (IE) takes a relation name and hand-tagged examples of that relation as input. Open IE is a relationindependent extraction paradigm that is tailored to massive and heterogeneous corpora such as the Web. An Open IE system extracts a diverse set of relational tuples from text without any relation-specific input. How is Open IE possible? We analyze a sample of English sentences to demonstrate that numerous relationships are expressed using a compact set of relation-independent lexico-syntactic patterns, which can be learned by an Open IE system. What are the tradeoffs between Open IE and traditional IE? We consider this question in the context of two tasks. First, when the number of relations is massive, and the relations themselves are not pre-specified, we argue that Open IE is necessary. We then present a new model for Open IE called O-CRF and show that it achieves increased precision and nearly double the recall than the model employed by TEXTRUNNER, the previous stateof-the-art Open IE system. Second, when the number of target relations is small, and their names are known in advance, we show that O-CRF is able to match the precision of a traditional extraction system, though at substantially lower recall. Finally, we show how to combine the two types of systems into a hybrid that achieves higher precision than a traditional extractor, with comparable recall.", "keyphrases": ["traditional relation extraction", "hand-tagged example", "tuple", "textrunner", "low recall"]} +{"id": "pichotta-denero-2013-identifying", "title": "Identifying Phrasal Verbs Using Many Bilingual Corpora", "abstract": "We address the problem of identifying multiword expressions in a language, focusing on English phrasal verbs. Our polyglot ranking approach integrates frequency statistics from translated corpora in 50 different languages. Our experimental evaluation demonstrates that combining statistical evidence from many parallel corpora using a novel ranking-oriented boosting algorithm produces a comprehensive set of English phrasal verbs, achieving performance comparable to a human-curated set.", "keyphrases": ["phrasal verb", "bilingual corpora", "frequency statistic"]} +{"id": "elliott-etal-2016-multi30k", "title": "Multi30K: Multilingual English-German Image Descriptions", "abstract": "We introduce the Multi30K dataset to stimulate multilingual multimodal research. Recent advances in image description have been demonstrated on English-language datasets almost exclusively, but image description should not be limited to English. This dataset extends the Flickr30K dataset with i) German translations created by professional translators over a subset of the English descriptions, and ii) descriptions crowdsourced independently of the original English descriptions. We outline how the data can be used for multilingual image description and multimodal machine translation, but we anticipate the data will be useful for a broader range of tasks.", "keyphrases": ["english-german image descriptions", "multimodal machine translation", "mmt model", "french"]} +{"id": "spitkovsky-etal-2010-viterbi", "title": "Viterbi Training Improves Unsupervised Dependency Parsing", "abstract": "We show that Viterbi (or \"hard\") EM is well-suited to unsupervised grammar induction. It is more accurate than standard inside-outside re-estimation (classic EM), significantly faster, and simpler. Our experiments with Klein and Manning's Dependency Model with Valence (DMV) attain state-of-the-art performance --- 44.8% accuracy on Section 23 (all sentences) of the Wall Street Journal corpus --- without clever initialization; with a good initializer, Viterbi training improves to 47.9%. This generalizes to the Brown corpus, our held-out set, where accuracy reaches 50.8% --- a 7.5% gain over previous best results. We find that classic EM learns better from short sentences but cannot cope with longer ones, where Viterbi thrives. However, we explain that both algorithms optimize the wrong objectives and prove that there are fundamental disconnects between the likelihoods of sentences, best parses, and true parses, beyond the well-established discrepancies between likelihood, accuracy and extrinsic performance.", "keyphrases": ["unsupervised dependency parsing", "objective", "viterbi training"]} +{"id": "forbes-choi-2017-verb", "title": "Verb Physics: Relative Physical Knowledge of Actions and Objects", "abstract": "Learning commonsense knowledge from natural language text is nontrivial due to reporting bias: people rarely state the obvious, e.g., \u201cMy house is bigger than me.\u201d However, while rarely stated explicitly, this trivial everyday knowledge does influence the way people talk about the world, which provides indirect clues to reason about the world. For example, a statement like, \u201cTyler entered his house\u201d implies that his house is bigger than Tyler. In this paper, we present an approach to infer relative physical knowledge of actions and objects along five dimensions (e.g., size, weight, and strength) from unstructured natural language text. We frame knowledge acquisition as joint inference over two closely related problems: learning (1) relative physical knowledge of object pairs and (2) physical implications of actions when applied to those object pairs. Empirical results demonstrate that it is possible to extract knowledge of actions and objects from language and that joint inference over different types of knowledge improves performance.", "keyphrases": ["relative physical knowledge", "object", "dimension"]} +{"id": "ostling-2016-morphological", "title": "Morphological reinflection with convolutional neural networks", "abstract": "We present a system for morphological reinflection based on an encoder-decoder neural network model with extra convolutional layers. In spite of its simplicity, the method performs reasonably well on all the languages of the SIGMORPHON 2016 shared task, particularly for the most challenging problem of limited-resources reinflection (track 2, task 3). We also find that using only convolution achieves surprisingly good results in this task, surpassing the accuracy of our encoder-decoder model for several languages.", "keyphrases": ["convolutional neural network", "morphological reinflection", "neural sequence-to-sequence model"]} +{"id": "culotta-etal-2006-integrating", "title": "Integrating Probabilistic Extraction Models and Data Mining to Discover Relations and Patterns in Text", "abstract": "In order for relation extraction systems to obtain human-level performance, they must be able to incorporate relational patterns inherent in the data (for example, that one's sister is likely one's mother's daughter, or that children are likely to attend the same college as their parents). Hand-coding such knowledge can be time-consuming and inadequate. Additionally, there may exist many interesting, unknown relational patterns that both improve extraction performance and provide insight into text. We describe a probabilistic extraction model that provides mutual benefits to both \"top-down\" relational pattern discovery and \"bottom-up\" relation extraction.", "keyphrases": ["relation extraction", "contextual pattern", "wikipedia"]} +{"id": "dahlmeier-etal-2012-nus", "title": "NUS at the HOO 2012 Shared Task", "abstract": "This paper describes the submission of the National University of Singapore (NUS) to the HOO 2012 shared task. Our system uses a pipeline of confidence-weighted linear classifiers to correct determiner and preposition errors. Our system achieves the highest correction F1 score on the official test set among all 14 participating teams, based on gold-standard edits both before and after revision.", "keyphrases": ["hoo", "preposition error", "word usage", "ill-formed grammatical construction", "error correction"]} +{"id": "gupta-ji-2009-predicting", "title": "Predicting Unknown Time Arguments based on Cross-Event Propagation", "abstract": "Many events in news articles don't include time arguments. This paper describes two methods, one based on rules and the other based on statistical learning, to predict the unknown time argument for an event by the propagation from its related events. The results are promising - the rule based approach was able to correctly predict 74% of the unknown event time arguments with 70% precision.", "keyphrases": ["propagation", "time information", "cross-event information", "ace event task", "more clue"]} +{"id": "sagae-lavie-2006-best", "title": "A Best-First Probabilistic Shift-Reduce Parser", "abstract": "Recently proposed deterministic classifier-based parsers (Nivre and Scholz, 2004; Sagae and Lavie, 2005; Yamada and Mat-sumoto, 2003) offer attractive alternatives to generative statistical parsers. Deterministic parsers are fast, efficient, and simple to implement, but generally less accurate than optimal (or nearly optimal) statistical parsers. We present a statistical shift-reduce parser that bridges the gap between deterministic and probabilistic parsers. The parsing model is essentially the same as one previously used for deterministic parsing, but the parser performs a best-first search instead of a greedy search. Using the standard sections of the WSJ corpus of the Penn Treebank for training and testing, our parser has 88.1% precision and 87.8% recall (using automatically assigned part-of-speech tags). Perhaps more interestingly, the parsing model is significantly different from the generative models used by other well-known accurate parsers, allowing for a simple combination that produces precision and recall of 90.9% and 90.7%, respectively.", "keyphrases": ["shift-reduce parser", "best-first search", "penn treebank"]} +{"id": "venkatapathy-joshi-2006-using", "title": "Using Information about Multi-word Expressions for the Word-Alignment Task", "abstract": "It is well known that multi-word expressions are problematic in natural language processing. In previous literature, it has been suggested that information about their degree of compositionality can be helpful in various applications but it has not been proven empirically. In this paper, we propose a framework in which information about the multi-word expressions can be used in the word-alignment task. We have shown that even simple features like point-wise mutual information are useful for word-alignment task in English-Hindi parallel corpora. The alignment error rate which we achieve (AER = 0.5040) is significantly better (about 10% decrease in AER) than the alignment error rates of the state-of-art models (Och and Ney, 2003) (Best AER = 0.5518) on the English-Hindi dataset.", "keyphrases": ["word-alignment task", "verb-based multi-word expression", "compositionality information"]} +{"id": "dev-etal-2021-oscar", "title": "OSCaR: Orthogonal Subspace Correction and Rectification of Biases in Word Embeddings", "abstract": "Language representations are known to carry stereotypical biases and, as a result, lead to biased predictions in downstream tasks. While existing methods are effective at mitigating biases by linear projection, such methods are too aggressive: they not only remove bias, but also erase valuable information from word embeddings. We develop new measures for evaluating specific information retention that demonstrate the tradeoff between bias removal and information retention. To address this challenge, we propose OSCaR (Orthogonal Subspace Correction and Rectification), a bias-mitigating method that focuses on disentangling biased associations between concepts instead of removing concepts wholesale. Our experiments on gender biases show that OSCaR is a well-balanced approach that ensures that semantic information is retained in the embeddings and bias is also effectively mitigated.", "keyphrases": ["orthogonal subspace correction", "rectification", "oscar"]} +{"id": "daume-iii-marcu-2005-large", "title": "A Large-Scale Exploration of Effective Global Features for a Joint Entity Detection and Tracking Model", "abstract": "Entity detection and tracking (EDT) is the task of identifying textual mentions of real-world entities in documents, extending the named entity detection and coreference resolution task by considering mentions other than names (pronouns, definite descriptions, etc.). Like NE tagging and coreference resolution, most solutions to the EDT task separate out the mention detection aspect from the coreference aspect. By doing so, these solutions are limited to using only local features for learning. In contrast, by modeling both aspects of the EDT task simultaneously, we are able to learn using highly complex, non-local features. We develop a new joint EDT model and explore the utility of many features, demonstrating their effectiveness on this task.", "keyphrases": ["entity detection", "coreference resolution", "search optimization framework"]} +{"id": "reiter-belz-2009-investigation", "title": "An Investigation into the Validity of Some Metrics for Automatically Evaluating Natural Language Generation Systems", "abstract": "Abstract There is growing interest in using automatically computed corpus-based evaluation metrics to evaluate Natural Language Generation (NLG) systems, because these are often considerably cheaper than the human-based evaluations which have traditionally been used in NLG. We review previous work on NLG evaluation and on validation of automatic metrics in NLP, and then present the results of two studies of how well some metrics which are popular in other areas of NLP (notably BLEU and ROUGE) correlate with human judgments in the domain of computer-generated weather forecasts. Our results suggest that, at least in this domain, metrics may provide a useful measure of language quality, although the evidence for this is not as strong as we would ideally like to see; however, they do not provide a useful measure of content quality. We also discuss a number of caveats which must be kept in mind when interpreting this and other validation studies.", "keyphrases": ["nlg", "automatic metric", "text quality"]} +{"id": "belz-etal-2020-disentangling", "title": "Disentangling the Properties of Human Evaluation Methods: A Classification System to Support Comparability, Meta-Evaluation and Reproducibility Testing", "abstract": "Current standards for designing and reporting human evaluations in NLP mean it is generally unclear which evaluations are comparable and can be expected to yield similar results when applied to the same system outputs. This has serious implications for reproducibility testing and meta-evaluation, in particular given that human evaluation is considered the gold standard against which the trustworthiness of automatic metrics is gauged. %and merging others, as well as deciding which evaluations should be able to reproduce each other's results. Using examples from NLG, we propose a classification system for evaluations based on disentangling (i) what is being evaluated (which aspect of quality), and (ii) how it is evaluated in specific (a) evaluation modes and (b) experimental designs. We show that this approach provides a basis for determining comparability, hence for comparison of evaluations across papers, meta-evaluation experiments, reproducibility testing.", "keyphrases": ["classification system", "meta-evaluation", "reproducibility testing", "natural language generation"]} +{"id": "do-etal-2022-text", "title": "Text-to-Speech for Under-Resourced Languages: Phoneme Mapping and Source Language Selection in Transfer Learning", "abstract": "We propose a new approach for phoneme mapping in cross-lingual transfer learning for text-to-speech (TTS) in under-resourced languages (URLs), using phonological features from the PHOIBLE database and a language-independent mapping rule. This approach was validated through our experiment, in which we pre-trained acoustic models in Dutch, Finnish, French, Japanese, and Spanish, and fine-tuned them with 30 minutes of Frisian training data. The experiment showed an improvement in both naturalness and pronunciation accuracy in the synthesized Frisian speech when our mapping approach was used. Since this improvement also depended on the source language, we then experimented on finding a good criterion for selecting source languages. As an alternative to the traditionally used language family criterion, we tested a novel idea of using Angular Similarity of Phoneme Frequencies (ASPF), which measures the similarity between the phoneme systems of two languages. ASPF was empirically confirmed to be more effective than language family as a criterion for source language selection, and also to affect the phoneme mapping's effectiveness. Thus, a combination of our phoneme mapping approach and the ASPF measure can be beneficially adopted by other studies involving multilingual or cross-lingual TTS for URLs.", "keyphrases": ["phoneme mapping", "source language selection", "text-to-speech"]} +{"id": "bao-etal-2016-constraint", "title": "Constraint-Based Question Answering with Knowledge Graph", "abstract": "WebQuestions and SimpleQuestions are two benchmark data-sets commonly used in recent knowledge-based question answering (KBQA) work. Most questions in them are `simple' questions which can be answered based on a single relation in the knowledge base. Such data-sets lack the capability of evaluating KBQA systems on complicated questions. Motivated by this issue, we release a new data-set, namely ComplexQuestions, aiming to measure the quality of KBQA systems on `multi-constraint' questions which require multiple knowledge base relations to get the answer. Beside, we propose a novel systematic KBQA approach to solve multi-constraint questions. Compared to state-of-the-art methods, our approach not only obtains comparable results on the two existing benchmark data-sets, but also achieves significant improvements on the ComplexQuestions.", "keyphrases": ["knowledge graph", "complex question", "query graph", "reasoning", "semantic parsing"]} +{"id": "zhang-etal-2007-partial", "title": "Partial Parse Selection for Robust Deep Processing", "abstract": "This paper presents an approach to partial parse selection for robust deep processing. The work is based on a bottom-up chart parser for HPSG parsing. Following the definition of partial parses in (Kasper et al., 1999), different partial parse selection methods are presented and evaluated on the basis of multiple metrics, from both the syntactic and semantic viewpoints. The application of the partial parsing in spontaneous speech texts processing shows promising competence of the method.", "keyphrases": ["robust deep processing", "definition", "partial parse selection"]} +{"id": "kuhlmann-satta-2009-treebank", "title": "Treebank Grammar Techniques for Non-Projective Dependency Parsing", "abstract": "An open problem in dependency parsing is the accurate and efficient treatment of non-projective structures. We propose to attack this problem using chart-parsing algorithms developed for mildly context-sensitive grammar formalisms. In this paper, we provide two key tools for this approach. First, we show how to reduce non-projective dependency parsing to parsing with Linear Context-Free Rewriting Systems (LCFRS), by presenting a technique for extracting LCFRS from dependency treebanks. For efficient parsing, the extracted grammars need to be transformed in order to minimize the number of nonterminal symbols per production. Our second contribution is an algorithm that computes this transformation for a large, empirically relevant class of grammars.", "keyphrases": ["non-projective dependency", "chart-parsing algorithm", "lcfrs", "dependency treebank"]} +{"id": "andreas-etal-2016-learning", "title": "Learning to Compose Neural Networks for Question Answering", "abstract": "We describe a question answering model that applies to both images and structured knowledge bases. The model uses natural language strings to automatically assemble neural networks from a collection of composable modules. Parameters for these modules are learned jointly with network-assembly parameters via reinforcement learning, with only (world, question, answer) triples as supervision. Our approach, which we term a dynamic neural model network, achieves state-of-the-art results on benchmark datasets in both visual and structured domains.", "keyphrases": ["module", "reinforcement learning", "neural modular network"]} +{"id": "lu-ng-2010-better", "title": "Better Punctuation Prediction with Dynamic Conditional Random Fields", "abstract": "This paper focuses on the task of inserting punctuation symbols into transcribed conversational speech texts, without relying on prosodic cues. We investigate limitations associated with previous methods, and propose a novel approach based on dynamic conditional random fields. Different from previous work, our proposed approach is designed to jointly perform both sentence boundary and sentence type prediction, and punctuation prediction on speech utterances. \n \nWe performed evaluations on a transcribed conversational speech domain consisting of both English and Chinese texts. Empirical results show that our method outperforms an approach based on linear-chain conditional random fields and other previous approaches.", "keyphrases": ["punctuation prediction", "conditional random field", "prosodic cue", "machine translation"]} +{"id": "xiong-etal-2008-linguistically", "title": "Linguistically Annotated BTG for Statistical Machine Translation", "abstract": "Bracketing Transduction Grammar (BTG) is a natural choice for effective integration of desired linguistic knowledge into statistical machine translation (SMT). In this paper, we propose a Linguistically Annotated BTG (LABTG) for SMT. It conveys linguistic knowledge of source-side syntax structures to BTG hierarchical structures through linguistic annotation. From the linguistically annotated data, we learn annotated BTG rules and train linguistically motivated phrase translation model and reordering model. We also present an annotation algorithm that captures syntactic information for BTG nodes. The experiments show that the LABTG approach significantly outperforms a baseline BTG-based system and a state-of-the-art phrase-based system on the NIST MT-05 Chinese-to-English translation task. Moreover, we empirically demonstrate that the proposed method achieves better translation selection and phrase reordering.", "keyphrases": ["statistical machine translation", "labtg", "linguistically annotated btg"]} +{"id": "li-etal-2012-exploiting", "title": "Exploiting Multiple Treebanks for Parsing with Quasi-synchronous Grammars", "abstract": "We present a simple and effective framework for exploiting multiple monolingual treebanks with different annotation guidelines for parsing. Several types of transformation patterns (TP) are designed to capture the systematic annotation inconsistencies among different tree-banks. Based on such TPs, we design quasi-synchronous grammar features to augment the baseline parsing models. Our approach can significantly advance the state-of-the-art parsing accuracy on two widely used target tree-banks (Penn Chinese Treebank 5.1 and 6.0) using the Chinese Dependency Treebank as the source treebank. The improvements are respectively 1.37% and 1.10% with automatic part-of-speech tags. Moreover, an indirect comparison indicates that our approach also outperforms previous work based on treebank conversion.", "keyphrases": ["treebank", "several type", "annotation inconsistency", "quasi-synchronous grammar feature"]} +{"id": "pham-etal-2017-nnvlp", "title": "NNVLP: A Neural Network-Based Vietnamese Language Processing Toolkit", "abstract": "This paper demonstrates neural network-based toolkit namely NNVLP for essential Vietnamese language processing tasks including part-of-speech (POS) tagging, chunking, Named Entity Recognition (NER). Our toolkit is a combination of bidirectional Long Short-Term Memory (Bi-LSTM), Convolutional Neural Network (CNN), Conditional Random Field (CRF), using pre-trained word embeddings as input, which outperforms previously published toolkits on these three tasks. We provide both of API and web demo for this toolkit.", "keyphrases": ["neural network-based toolkit", "language processing task", "entity recognition", "nnvlp", "feature-based model"]} +{"id": "salesky-etal-2021-robust", "title": "Robust Open-Vocabulary Translation from Visual Text Representations", "abstract": "Machine translation models have discrete vocabularies and commonly use subword segmentation techniques to achieve an `open vocabulary.' This approach relies on consistent and correct underlying unicode sequences, and makes models susceptible to degradation from common types of noise and variation. Motivated by the robustness of human language processing, we propose the use of visual text representations, which dispense with a finite set of text embeddings in favor of continuous vocabularies created by processing visually rendered text with sliding windows. We show that models using visual text representations approach or match performance of traditional text models on small and larger datasets. More importantly, models with visual embeddings demonstrate significant robustness to varied types of noise, achieving e.g., 25.9 BLEU on a character permuted German\u2013English task where subword models degrade to 1.9.", "keyphrases": ["open-vocabulary translation", "visual text representation", "noise"]} +{"id": "wang-etal-2021-secoco-self", "title": "Secoco: Self-Correcting Encoding for Neural Machine Translation", "abstract": "This paper presents Self-correcting Encoding (Secoco), a framework that effectively deals with noisy input for robust neural machine translation by introducing self-correcting predictors. Different from previous robust approaches, Secoco enables NMT to explicitly correct noisy inputs and delete specific errors simultaneously with the translation decoding process. Secoco is able to achieve significant improvements over strong baselines on two real-world test sets and a benchmark WMT dataset with good interpretability. We will make our code and dataset publicly available soon.", "keyphrases": ["self-correcting encoding", "neural machine translation", "noisy input", "secoco"]} +{"id": "maletti-2012-every", "title": "Every sensible extended top-down tree transducer is a multi bottom-up tree transducer", "abstract": "A tree transformation is sensible if the size of each output tree is uniformly bounded by a linear function in the size of the corresponding input tree. Every sensible tree transformation computed by an arbitrary weighted extended top-down tree transducer can also be computed by a weighted multi bottom-up tree transducer. This further motivates weighted multi bottom-up tree transducers as suitable translation models for syntax-based machine translation.", "keyphrases": ["top-down tree transducer", "sensible translation", "mbot"]} +{"id": "sultan-etal-2014-back", "title": "Back to Basics for Monolingual Alignment: Exploiting Word Similarity and Contextual Evidence", "abstract": "We present a simple, easy-to-replicate monolingual aligner that demonstrates state-of-the-art performance while relying on almost no supervision and a very small number of external resources. Based on the hypothesis that words with similar meanings represent potential pairs for alignment if located in similar contexts, we propose a system that operates by finding such pairs. In two intrinsic evaluations on alignment test data, our system achieves F1 scores of 88\u201392%, demonstrating 1\u20133% absolute improvement over the previous best system. Moreover, in two extrinsic evaluations our aligner outperforms existing aligners, and even a naive application of the aligner approaches state-of-the-art performance in each extrinsic task.", "keyphrases": ["monolingual alignment", "contextual evidence", "dependency type", "equivalence"]} +{"id": "zens-ney-2006-n", "title": "N-Gram Posterior Probabilities for Statistical Machine Translation", "abstract": "Word posterior probabilities are a common approach for confidence estimation in automatic speech recognition and machine translation. We will generalize this idea and introduce n-gram posterior probabilities and show how these can be used to improve translation quality. Additionally, we will introduce a sentence length model based on posterior probabilities. \n \nWe will show significant improvements on the Chinese-English NIST task. The absolute improvements of the BLEU score is between 1.1% and 1.6%.", "keyphrases": ["posterior probability", "n-gram", "performance improvement"]} +{"id": "yih-etal-2011-learning", "title": "Learning Discriminative Projections for Text Similarity Measures", "abstract": "Traditional text similarity measures consider each term similar only to itself and do not model semantic relatedness of terms. We propose a novel discriminative training method that projects the raw term vectors into a common, low-dimensional vector space. Our approach operates by finding the optimal matrix to minimize the loss of the pre-selected similarity function (e.g., cosine) of the projected vectors, and is able to efficiently handle a large number of training examples in the high-dimensional space. Evaluated on two very different tasks, cross-lingual document retrieval and ad relevance measure, our method not only outperforms existing state-of-the-art approaches, but also achieves high accuracy at low dimensions and is thus more efficient.", "keyphrases": ["text similarity measure", "different language", "s2net", "query"]} +{"id": "yamamoto-isahara-2007-extracting", "title": "Extracting Word Sets with Non-Taxonomical Relation", "abstract": "At least two kinds of relations exist among related words: taxonomical relations and thematic relations. Both relations identify related words useful to language understanding and generation, information retrieval, and so on. However, although words with taxonomical relations are easy to identify from linguistic resources such as dictionaries and thesauri, words with thematic relations are difficult to identify because they are rarely maintained in linguistic resources. In this paper, we sought to extract thematically (non-taxonomically) related word sets among words in documents by employing case-marking particles derived from syntactic analysis. We then verified the usefulness of word sets with non-taxonomical relation that seems to be a thematic relation for information retrieval.", "keyphrases": ["word set", "non-taxonomical relation", "thematic relation"]} +{"id": "komninos-manandhar-2017-feature", "title": "Feature-Rich Networks for Knowledge Base Completion", "abstract": "We propose jointly modelling Knowledge Bases and aligned text with Feature-Rich Networks. Our models perform Knowledge Base Completion by learning to represent and compose diverse feature types from partially aligned and noisy resources. We perform experiments on Freebase utilizing additional entity type information and syntactic textual relations. Our evaluation suggests that the proposed models can better incorporate side information than previously proposed combinations of bilinear models with convolutional neural networks, showing large improvements when scoring the plausibility of unobserved facts with associated textual mentions.", "keyphrases": ["knowledge base completion", "mention", "feature-rich networks"]} +{"id": "losnegaard-etal-2016-parseme", "title": "PARSEME Survey on MWE Resources", "abstract": "This paper summarizes the preliminary results of an ongoing survey on multiword resources carried out within the IC1207 Cost Action PARSEME (PARSing and Multi-word Expressions). Despite the availability of language resource catalogs and the inventory of multiword datasets on the SIGLEX-MWE website, multiword resources are scattered and difficult to find. In many cases, language resources such as corpora, treebanks, or lexical databases include multiwords as part of their data or take them into account in their annotations. However, these resources need to be centralized to make them accessible. The aim of this survey is to create a portal where researchers can easily find multiword(-aware) language resources for their research. We report on the design of the survey and analyze the data gathered so far. We also discuss the problems we have detected upon examination of the data as well as possible ways of enhancing the survey.", "keyphrases": ["survey", "mwe resource", "treebank"]} +{"id": "brooke-etal-2014-unsupervised", "title": "Unsupervised Multiword Segmentation of Large Corpora using Prediction-Driven Decomposition of n-grams", "abstract": "We present a new, efficient unsupervised approach to the segmentation of corpora into multiword units. Our method involves initial decomposition of common n-grams into segments which maximize within-segment predictability of words, and then further refinement of these segments into a multiword lexicon. Evaluating in four large, distinct corpora, we show that this method creates segments which correspond well to known multiword expressions; our model is particularly strong with regards to longer (3+ word) multiword units, which are often ignored or minimized in relevant work.", "keyphrases": ["segmentation", "large corpora", "decomposition", "predictability"]} +{"id": "dhingra-etal-2022-time", "title": "Time-Aware Language Models as Temporal Knowledge Bases", "abstract": "Many facts come with an expiration date, from the name of the President to the basketball team Lebron James plays for. However, most language models (LMs) are trained on snapshots of data collected at a specific moment in time. This can limit their utility, especially in the closed-book setting where the pretraining corpus must contain the facts the model should memorize. We introduce a diagnostic dataset aimed at probing LMs for factual knowledge that changes over time and highlight problems with LMs at either end of the spectrum\u2014those trained on specific slices of temporal data, as well as those trained on a wide range of temporal data. To mitigate these problems, we propose a simple technique for jointly modeling text with its timestamp. This improves memorization of seen facts from the training time period, as well as calibration on predictions about unseen facts from future time periods. We also show that models trained with temporal context can be efficiently \u201crefreshed\u201d as new data arrives, without the need for retraining from scratch.", "keyphrases": ["language model", "factual knowledge", "time period"]} +{"id": "becker-etal-2016-argumentative", "title": "Argumentative texts and clause types", "abstract": "Argumentative texts have been thoroughly analyzed for their argumentative structure, and recent efforts aim at their automatic classification. This work investigates linguistic properties of argumentative texts and text passages in terms of their semantic clause types. We annotate argumentative texts with Situation Entity (SE) classes, which combine notions from lexical aspect (states, events) with genericity and habituality of clauses. We analyse the correlation of SE classes with argumentative text genres, components of argument structures, and some functions of those components. Our analysis reveals interesting relations between the distribution of SE types and the argumentative text genre, compared to other genres like fiction or report. We also see tendencies in the correlations between argument components (such as premises and conclusions) and SE types, as well as between argumentative functions (such as support and rebuttal) and SE types. The observed tendencies can be deployed for automatic recognition and fine-grained classification of argumentative text passages.", "keyphrases": ["clause type", "support", "argumentative text"]} +{"id": "giuliano-gliozzo-2007-instance", "title": "Instance Based Lexical Entailment for Ontology Population", "abstract": "In this paper we propose an instance based method for lexical entailment and apply it to automatic ontology population from text. The approach is fully unsupervised and based on kernel methods. We demonstrate the effectiveness of our technique largely surpassing both the random and most frequent baselines and outperforming current state-of-the-art unsupervised approaches on a benchmark ontology available in the literature.", "keyphrases": ["lexical entailment", "ontology population", "occurrence"]} +{"id": "ma-etal-2011-extraction", "title": "Extraction of Broad-Scale, High-Precision Japanese-English Parallel Translation Expressions Using Lexical Information and Rules", "abstract": "Extraction was attempted of broad-scale, high-precision Japanese-English paral- lel translation expressions from large aligned parallel corpora. To acquire broad-scale parallel translation expressions, a new method was used to extract single Japanese and English word n-grams, by which as many parallel translation expressions as possible could then be ex- tracted. To achieve high extraction precision, first, hand-crafted rules were used to prune the unnecessary words often found in expressions extracted on the basis of word n-grams, and lexical information was used to refine the parallel translation expressions. Computer exper- iments with aligned parallel corpora consisting of about 280,000 pairs of Japanese-English parallel sentences found that more than 125,000 pairs of parallel translation expressions could be extracted with a precision of 0.96. These figures show that the proposed methods for ex- tracting a broad range of parallel translation expressions have reached a level high enough for practical use.", "keyphrases": ["parallel translation expression", "lexical information", "extraction"]} +{"id": "kaneko-etal-2020-encoder", "title": "Encoder-Decoder Models Can Benefit from Pre-trained Masked Language Models in Grammatical Error Correction", "abstract": "This paper investigates how to effectively incorporate a pre-trained masked language model (MLM), such as BERT, into an encoder-decoder (EncDec) model for grammatical error correction (GEC). The answer to this question is not as straightforward as one might expect because the previous common methods for incorporating a MLM into an EncDec model have potential drawbacks when applied to GEC. For example, the distribution of the inputs to a GEC model can be considerably different (erroneous, clumsy, etc.) from that of the corpora used for pre-training MLMs; however, this issue is not addressed in the previous methods. Our experiments show that our proposed method, where we first fine-tune a MLM with a given GEC corpus and then use the output of the fine-tuned MLM as additional features in the GEC model, maximizes the benefit of the MLM. The best-performing model achieves state-of-the-art performances on the BEA-2019 and CoNLL-2014 benchmarks. Our code is publicly available at: .", "keyphrases": ["language model", "grammatical error correction", "encoder-decoder model", "fine-tuned bert", "english gec task"]} +{"id": "hardmeier-etal-2010-fbk", "title": "FBK at WMT 2010: Word Lattices for Morphological Reduction and Chunk-Based Reordering", "abstract": "FBK participated in the WMT 2010 Machine Translation shared task with phrase-based Statistical Machine Translation systems based on the Moses decoder for English-German and German-English translation. Our work concentrates on exploiting the available language modelling resources by using linear mixtures of large 6-gram language models and on addressing linguistic differences between English and German with methods based on word lattices. In particular, we use lattices to integrate a morphological analyser for German into our system, and we present some initial work on rule-based word reordering.", "keyphrases": ["wmt", "morphological reduction", "german\u2192english smt system", "edge"]} +{"id": "aguilar-etal-2017-multi", "title": "A Multi-task Approach for Named Entity Recognition in Social Media Data", "abstract": "Named Entity Recognition for social media data is challenging because of its inherent noisiness. In addition to improper grammatical structures, it contains spelling inconsistencies and numerous informal abbreviations. We propose a novel multi-task approach by employing a more general secondary task of Named Entity (NE) segmentation together with the primary task of fine-grained NE categorization. The multi-task neural network architecture learns higher order feature representations from word and character sequences along with basic Part-of-Speech tags and gazetteer information. This neural network acts as a feature extractor to feed a Conditional Random Fields classifier. We were able to obtain the first position in the 3rd Workshop on Noisy User-generated Text (WNUT-2017) with a 41.86% entity F1-score and a 40.24% surface F1-score.", "keyphrases": ["multi-task approach", "part-of-speech tag", "gazetteer", "feature extractor", "noisy user-generated text"]} +{"id": "schwartz-etal-2015-extracting", "title": "Extracting Human Temporal Orientation from Facebook Language", "abstract": "People vary widely in their temporal orientation\u2014how often they emphasize the past, present, and future\u2014and this affects their finances, health, and happiness. Traditionally, temporal orientation has been assessed by self-report questionnaires. In this paper, we develop a novel behavior-based assessment using human language on Facebook. We first create a past, present, and future message classifier, engineering features and evaluating a variety of classification techniques. Our message classifier achieves an accuracy of 71.8%, compared with 52.8% from the most frequent class and 58.6% from a model based entirely on time expression features. We quantify a users\u2019 overall temporal orientation based on their distribution of messages and validate it against known human correlates: conscientiousness, age, and gender. We then explore social scientific questions, finding novel associations with the factors openness to experience, satisfaction with life, depression, IQ, and one\u2019s number of friends. Further, demonstrating how one can track orientation over time, we find differences in future orientation around birthdays.", "keyphrases": ["human temporal orientation", "human correlate", "age", "gender"]} +{"id": "jiang-etal-2011-relaxed", "title": "Relaxed Cross-lingual Projection of Constituent Syntax", "abstract": "We propose a relaxed correspondence assumption for cross-lingual projection of constituent syntax, which allows a supposed constituent of the target sentence to correspond to an unrestricted treelet in the source parse. Such a relaxed assumption fundamentally tolerates the syntactic non-isomorphism between languages, and enables us to learn the target-language-specific syntactic idiosyncrasy rather than a strained grammar directly projected from the source language syntax. Based on this assumption, a novel constituency projection method is also proposed in order to induce a projected constituent tree-bank from the source-parsed bilingual corpus. Experiments show that, the parser trained on the projected treebank dramatically outperforms previous projected and unsupervised parsers.", "keyphrases": ["cross-lingual projection", "constituent syntax", "non-isomorphism"]} +{"id": "han-etal-2020-explaining", "title": "Explaining Black Box Predictions and Unveiling Data Artifacts through Influence Functions", "abstract": "Modern deep learning models for NLP are notoriously opaque. This has motivated the development of methods for interpreting such models, e.g., via gradient-based saliency maps or the visualization of attention weights. Such approaches aim to provide explanations for a particular model prediction by highlighting important words in the corresponding input text. While this might be useful for tasks where decisions are explicitly influenced by individual tokens in the input, we suspect that such highlighting is not suitable for tasks where model decisions should be driven by more complex reasoning. In this work, we investigate the use of influence functions for NLP, providing an alternative approach to interpreting neural text classifiers. Influence functions explain the decisions of a model by identifying influential training examples. Despite the promise of this approach, influence functions have not yet been extensively evaluated in the context of NLP, a gap addressed by this work. We conduct a comparison between influence functions and common word-saliency methods on representative tasks. As suspected, we find that influence functions are particularly useful for natural language inference, a task in which `saliency maps' may not have clear interpretation. Furthermore, we develop a new quantitative measure based on influence functions that can reveal artifacts in training data.", "keyphrases": ["data artifact", "influence function", "explanation"]} +{"id": "lison-etal-2020-named", "title": "Named Entity Recognition without Labelled Data: A Weak Supervision Approach", "abstract": "Named Entity Recognition (NER) performance often degrades rapidly when applied to target domains that differ from the texts observed during training. When in-domain labelled data is available, transfer learning techniques can be used to adapt existing NER models to the target domain. But what should one do when there is no hand-labelled data for the target domain? This paper presents a simple but powerful approach to learn NER models in the absence of labelled data through weak supervision. The approach relies on a broad spectrum of labelling functions to automatically annotate texts from the target domain. These annotations are then merged together using a hidden Markov model which captures the varying accuracies and confusions of the labelling functions. A sequence labelling model can finally be trained on the basis of this unified annotation. We evaluate the approach on two English datasets (CoNLL 2003 and news articles from Reuters and Bloomberg) and demonstrate an improvement of about 7 percentage points in entity-level F1 scores compared to an out-of-domain neural NER model.", "keyphrases": ["entity recognition", "weak supervision approach", "target domain", "hidden markov model"]} +{"id": "dichy-farghaly-2003-roots", "title": "Roots & patterns vs. stems plus grammar-lexis specifications: on what basis should a multilingual database centred on Arabic be built?", "abstract": "Machine translation engines draw on various types of databases. This paper is concerned with Arabic as a source or target language, and focuses on lexical databases. The non-concatenative nature of Arabic morphology, the complex structure of Arabic word-forms, and the general use of vowel-free writing present a real challenge to NLP developers. We show here how and why a stem-grounded lexical database, the items of which are associated with grammar-lexis specifications \u2013 as opposed to a root-&-pattern database \u2013, is motivated both linguistically and with regards to efficiency, economy and modularity. Arguments in favour of databases relying on stems associated with grammar-lexis specifications (such as DIINAR.1 or the Arabic dB under development at SYSTRAN), rather than on roots and patterns, are the following: (a) The latter include huge numbers of rule-generated word-forms, which do not actually appear in the language. (b) Rule-generated lemmas \u2013 as opposed to existing ones \u2013 are widely under-specified with regards to grammar-lexis relations. (c) In a Semitic language such as Arabic, the mapping of grammar-lexis specifications that need to be associated with every lexical entry of the database is decisive. (d) These specifications can only be included in a stem-based dB. Points (a) to (d) are crucial and in the context of machine translation involving Arabic.", "keyphrases": ["grammar-lexis specification", "arabic", "root"]} +{"id": "basile-etal-2014-enhanced", "title": "An Enhanced Lesk Word Sense Disambiguation Algorithm through a Distributional Semantic Model", "abstract": "This paper describes a new Word Sense Disambiguation (WSD) algorithm which extends two well-known variations of the Lesk WSD method. Given a word and its context, Lesk algorithm exploits the idea of maximum number of shared words (maximum overlaps) between the context of a word and each definition of its senses (gloss) in order to select the proper meaning. The main contribution of our approach relies on the use of a word similarity function defined on a distributional semantic space to compute the gloss-context overlap. As sense inventory we adopt BabelNet, a large multilingual semantic network built exploiting both WordNet and Wikipedia. Besides linguistic knowledge, BabelNet also represents encyclopedic concepts coming from Wikipedia. The evaluation performed on SemEval-2013 Multilingual Word Sense Disambiguation shows that our algorithm goes beyond the most frequent sense baseline and the simplified version of the Lesk algorithm. Moreover, when compared with the other participants in SemEval-2013 task, our approach is able to outperform the best system for English.", "keyphrases": ["lesk algorithm", "overlap", "lexical knowledge"]} +{"id": "declerck-etal-2012-ontology", "title": "Ontology-Based Incremental Annotation of Characters in Folktales", "abstract": "We present on-going work on the automated ontology-based detection and recognition of characters in folktales, restricting ourselves for the time being to the analysis of referential nominal phrases occurring in such texts. Focus of the presently reported work was to investigate the interaction between an ontology and linguistic analysis of indefinite and indefinite nominal phrase for both the incremental annotation of characters in folktales text, including some inference based co-reference resolution, and the incremental population of the ontology. This in depth study was done at this early stage using only a very small textual base, but the demonstrated feasibility and the promising results of our small-scale experiment are encouraging us to deploy the strategy on a larger text base, covering more linguistic phenomena in a multilingual fashion.", "keyphrases": ["incremental annotation", "folktale", "ontology-based method"]} +{"id": "luan-etal-2021-sparse", "title": "Sparse, Dense, and Attentional Representations for Text Retrieval", "abstract": "Dual encoders perform retrieval by encoding documents and queries into dense low-dimensional vectors, scoring each document by its inner product with the query. We investigate the capacity of this architecture relative to sparse bag-of-words models and attentional neural networks. Using both theoretical and empirical analysis, we establish connections between the encoding dimension, the margin between gold and lower-ranked documents, and the document length, suggesting limitations in the capacity of fixed-length encodings to support precise retrieval of long documents. Building on these insights, we propose a simple neural model that combines the efficiency of dual encoders with some of the expressiveness of more costly attentional architectures, and explore sparse-dense hybrids to capitalize on the precision of sparse retrieval. These models outperform strong alternatives in large-scale retrieval.", "keyphrases": ["query", "efficiency", "me-bert", "bi-encoder", "text sequence"]} +{"id": "zhang-etal-2013-punctuation", "title": "Punctuation Prediction with Transition-based Parsing", "abstract": "Punctuations are not available in automatic speech recognition outputs, which could create barriers to many subsequent text processing tasks. This paper proposes a novel method to predict punctuation symbols for the stream of words in transcribed speech texts. Our method jointly performs parsing and punctuation prediction by integrating a rich set of syntactic features when processing words from left to right. It can exploit a global view to capture long-range dependencies for punctuation prediction with linear complexity. The experimental results on the test data sets of IWSLT and TDT4 show that our method can achieve high-level performance in punctuation prediction over the stream of words in transcribed speech text.", "keyphrases": ["transition-based parsing", "syntactic feature", "punctuation prediction"]} +{"id": "bonial-etal-2020-dialogue", "title": "Dialogue-AMR: Abstract Meaning Representation for Dialogue", "abstract": "This paper describes a schema that enriches Abstract Meaning Representation (AMR) in order to provide a semantic representation for facilitating Natural Language Understanding (NLU) in dialogue systems. AMR offers a valuable level of abstraction of the propositional content of an utterance; however, it does not capture the illocutionary force or speaker's intended contribution in the broader dialogue context (e.g., make a request or ask a question), nor does it capture tense or aspect. We explore dialogue in the domain of human-robot interaction, where a conversational robot is engaged in search and navigation tasks with a human partner. To address the limitations of standard AMR, we develop an inventory of speech acts suitable for our domain, and present \u201cDialogue-AMR\u201d, an enhanced AMR that represents not only the content of an utterance, but the illocutionary force behind it, as well as tense and aspect. To showcase the coverage of the schema, we use both manual and automatic methods to construct the \u201cDialAMR\u201d corpus\u2014a corpus of human-robot dialogue annotated with standard AMR and our enriched Dialogue-AMR schema. Our automated methods can be used to incorporate AMR into a larger NLU pipeline supporting human-robot dialogue.", "keyphrases": ["abstract meaning representation", "amr", "dialogue-amr"]} +{"id": "lin-etal-2019-kagnet", "title": "KagNet: Knowledge-Aware Graph Networks for Commonsense Reasoning", "abstract": "Commonsense reasoning aims to empower machines with the human ability to make presumptions about ordinary situations in our daily life. In this paper, we propose a textual inference framework for answering commonsense questions, which effectively utilizes external, structured commonsense knowledge graphs to perform explainable inferences. The framework first grounds a question-answer pair from the semantic space to the knowledge-based symbolic space as a schema graph, a related sub-graph of external knowledge graphs. It represents schema graphs with a novel knowledge-aware graph network module named KagNet, and finally scores answers with graph representations. Our model is based on graph convolutional networks and LSTMs, with a hierarchical path-based attention mechanism. The intermediate attention scores make it transparent and interpretable, which thus produce trustworthy inferences. Using ConceptNet as the only external resource for Bert-based models, we achieved state-of-the-art performance on the CommonsenseQA, a large-scale dataset for commonsense reasoning.", "keyphrases": ["commonsense reasoning", "knowledge graph", "kagnet", "subgraph", "question answering"]} +{"id": "ni-mcauley-2018-personalized", "title": "Personalized Review Generation By Expanding Phrases and Attending on Aspect-Aware Representations", "abstract": "In this paper, we focus on the problem of building assistive systems that can help users to write reviews. We cast this problem using an encoder-decoder framework that generates personalized reviews by expanding short phrases (e.g. review summaries, product titles) provided as input to the system. We incorporate aspect-level information via an aspect encoder that learns aspect-aware user and item representations. An attention fusion layer is applied to control generation by attending on the outputs of multiple encoders. Experimental results show that our model successfully learns representations capable of generating coherent and diverse reviews. In addition, the learned aspect-aware representations discover those aspects that users are more inclined to discuss and bias the generated text toward their personalized aspect preferences.", "keyphrases": ["review", "aspect-aware representation", "item representation"]} +{"id": "kulick-etal-2004-integrated", "title": "Integrated Annotation for Biomedical Information Extraction", "abstract": "We describe an approach to two areas of biomedical information extraction, drug development and cancer genomics. We have developed a framework which includes corpus annotation integrated at multiple levels: a Treebank containing syntactic structure, a Propbank containing predicate-argument structure, and annotation of entities and relations among the entities. Crucial to this approach is the proper characterization of entities as relation components, which allows the integration of the entity annotation with the syntactic structure while retaining the capacity to annotate and extract more complex events. We are training statistical taggers using this annotation for such extraction as well as using them for improving the annotation process.", "keyphrases": ["biomedical information extraction", "integrated annotation", "abstract"]} +{"id": "wang-etal-2006-capitalizing", "title": "Capitalizing Machine Translation", "abstract": "We present a probabilistic bilingual capitalization model for capitalizing machine translation outputs using conditional random fields. Experiments carried out on three language pairs and a variety of experiment conditions show that our model significantly outperforms a strong monolingual capitalization model baseline, especially when working with small datasets and/or European language pairs.", "keyphrases": ["machine translation", "bilingual capitalization model", "conditional random field"]} +{"id": "lease-johnson-2006-early", "title": "Early Deletion of Fillers In Processing Conversational Speech", "abstract": "This paper evaluates the benefit of deleting fillers (e.g. you know, like) early in parsing conversational speech. Readability studies have shown that disfluencies (fillers and speech repairs) may be deleted from transcripts without compromising meaning (Jones et al., 2003), and deleting repairs prior to parsing has been shown to improve its accuracy (Charniak and Johnson, 2001). We explore whether this strategy of early deletion is also beneficial with regard to fillers. Reported experiments measure the effect of early deletion under in-domain and out-of-domain parser training conditions using a state-of-the-art parser (Charniak, 2000). While early deletion is found to yield only modest benefit for in-domain parsing, significant improvement is achieved for out-of-domain adaptation. This suggests a potentially broader role for disfluency modeling in adapting text-based tools for processing conversational speech.", "keyphrases": ["disfluency", "early deletion", "pcfg parser"]} +{"id": "vanmassenhove-etal-2018-getting", "title": "Getting Gender Right in Neural Machine Translation", "abstract": "Speakers of different languages must attend to and encode strikingly different aspects of the world in order to use their language correctly (Sapir, 1921; Slobin, 1996). One such difference is related to the way gender is expressed in a language. Saying \u201cI am happy\u201d in English, does not encode any additional knowledge of the speaker that uttered the sentence. However, many other languages do have grammatical gender systems and so such knowledge would be encoded. In order to correctly translate such a sentence into, say, French, the inherent gender information needs to be retained/recovered. The same sentence would become either \u201cJe suis heureux\u201d, for a male speaker or \u201cJe suis heureuse\u201d for a female one. Apart from morphological agreement, demographic factors (gender, age, etc.) also influence our use of language in terms of word choices or syntactic constructions (Tannen, 1991; Pennebaker et al., 2003). We integrate gender information into NMT systems. Our contribution is two-fold: (1) the compilation of large datasets with speaker information for 20 language pairs, and (2) a simple set of experiments that incorporate gender information into NMT for multiple language pairs. Our experiments show that adding a gender feature to an NMT system significantly improves the translation quality for some language pairs.", "keyphrases": ["gender", "morphological agreement", "speaker information", "translation quality"]} +{"id": "peng-etal-2020-learning", "title": "Learning from Context or Names? An Empirical Study on Neural Relation Extraction", "abstract": "Neural models have achieved remarkable success on relation extraction (RE) benchmarks. However, there is no clear understanding what information in text affects existing RE models to make decisions and how to further improve the performance of these models. To this end, we empirically study the effect of two main information sources in text: textual context and entity mentions (names). We find that (i) while context is the main source to support the predictions, RE models also heavily rely on the information from entity mentions, most of which is type information, and (ii) existing datasets may leak shallow heuristics via entity mentions and thus contribute to the high performance on RE benchmarks. Based on the analyses, we propose an entity-masked contrastive pre-training framework for RE to gain a deeper understanding on both textual context and type information while avoiding rote memorization of entities or use of superficial cues in mentions. We carry out extensive experiments to support our views, and show that our framework can improve the effectiveness and robustness of neural models in different RE scenarios. All the code and datasets are released at .", "keyphrases": ["textual context", "shallow heuristic", "cue", "entity type"]} +{"id": "van-halteren-teufel-2003-examining", "title": "Examining the consensus between human summaries: initial experiments with factoid analysis", "abstract": "We present a new approach to summary evaluation which combines two novel aspects, namely (a) content comparison between gold standard summary and system summary via factoids, a pseudo-semantic representation based on atomic information units which can be robustly marked in text, and (b) use of a gold standard consensus summary, in our case based on 50 individual summaries of one text. Even though future work on more than one source text is imperative, our experiments indicate that (1) ranking with regard to a single gold standard summary is insufficient as rankings based on any two randomly chosen summaries are very dissimilar (correlations average \u03c1 = 0.20), (2) a stable consensus summary can only be expected if a larger number of summaries are collected (in the range of at least 30--40 summaries), and (3) similarity measurement using unigrams shows a similarly low ranking correlation when compared with factoid-based ranking.", "keyphrases": ["factoid", "ranking", "large number", "model summary"]} +{"id": "haghighi-klein-2009-simple", "title": "Simple Coreference Resolution with Rich Syntactic and Semantic Features", "abstract": "Coreference systems are driven by syntactic, semantic, and discourse constraints. We present a simple approach which completely modularizes these three aspects. In contrast to much current work, which focuses on learning and on the discourse component, our system is deterministic and is driven entirely by syntactic and semantic compatibility as learned from a large, unlabeled corpus. Despite its simplicity and discourse naivete, our system substantially outperforms all unsupervised systems and most supervised ones. Primary contributions include (1) the presentation of a simple-to-reproduce, high-performing baseline and (2) the demonstration that most remaining errors can be attributed to syntactic and semantic factors external to the coreference phenomenon (and perhaps best addressed by non-coreference systems).", "keyphrases": ["coreference resolution", "semantic compatibility", "pronoun", "rule-based system", "nominal"]} +{"id": "izacard-grave-2021-leveraging", "title": "Leveraging Passage Retrieval with Generative Models for Open Domain Question Answering", "abstract": "Generative models for open domain question answering have proven to be competitive, without resorting to external knowledge. While promising, this approach requires to use models with billions of parameters, which are expensive to train and query. In this paper, we investigate how much these models can benefit from retrieving text passages, potentially containing evidence. We obtain state-of-the-art results on the Natural Questions and TriviaQA open benchmarks. Interestingly, we observe that the performance of this method significantly improves when increasing the number of retrieved passages. This is evidence that sequence-to-sequence models offers a flexible framework to efficiently aggregate and combine evidence from multiple passages.", "keyphrases": ["passage retrieval", "generative reader", "knowledge-intensive nlp task"]} +{"id": "berend-etal-2020-prosperamnet", "title": "ProsperAMnet at the FinSim Task: Detecting hypernyms of financial concepts via measuring the information stored in sparse word representations", "abstract": "In this paper we propose and carefully evaluate the application of an information theoretic approach for the detection of hypernyms for financial concepts. Our algorithm is based on the application of sparse word embeddings, meaning that \u2013 unlike in the case of traditional word embeddings \u2013 most of the coefficients in the embeddings are exactly zero. We apply an approach that quantify the extent to which the individual dimensions for such word representations convey the property that some word is the hyponym of a certain top-level concept according to an external ontology. Our experimental results demonstrate that substantial improvements can be gained by our approach compared to the direct utilization of the traditional dense word embeddings. Our team ranked second and fourth according to average rank score and mean accuracy that were the two evaluation criteria applied at the shared task.", "keyphrases": ["hypernyms", "financial concept", "word representation"]} +{"id": "schwartz-etal-2017-effect", "title": "The Effect of Different Writing Tasks on Linguistic Style: A Case Study of the ROC Story Cloze Task", "abstract": "A writer's style depends not just on personal traits but also on her intent and mental state. In this paper, we show how variants of the same writing task can lead to measurable differences in writing style. We present a case study based on the story cloze task (Mostafazadeh et al., 2016a), where annotators were assigned similar writing tasks with different constraints: (1) writing an entire story, (2) adding a story ending for a given story context, and (3) adding an incoherent ending to a story. We show that a simple linear classifier informed by stylistic features is able to successfully distinguish among the three cases, without even looking at the story context. In addition, combining our stylistic features with language model predictions reaches state of the art performance on the story cloze challenge. Our results demonstrate that different task framings can dramatically affect the way people write.", "keyphrases": ["style", "story", "task framing"]} +{"id": "miao-etal-2021-generative", "title": "A Generative Framework for Simultaneous Machine Translation", "abstract": "We propose a generative framework for simultaneous machine translation. Conventional approaches use a fixed number of source words to translate or learn dynamic policies for the number of source words by reinforcement learning. Here we formulate simultaneous translation as a structural sequence-to-sequence learning problem. A latent variable is introduced to model read or translate actions at every time step, which is then integrated out to consider all the possible translation policies. A re-parameterised Poisson prior is used to regularise the policies which allows the model to explicitly balance translation quality and latency. The experiments demonstrate the effectiveness and robustness of the generative framework, which achieves the best BLEU scores given different average translation latencies on benchmark datasets.", "keyphrases": ["generative framework", "simultaneous machine translation", "simt policy"]} +{"id": "verhoeven-daelemans-2014-clips", "title": "CLiPS Stylometry Investigation (CSI) corpus: A Dutch corpus for the detection of age, gender, personality, sentiment and deception in text", "abstract": "We present the CLiPS Stylometry Investigation (CSI) corpus, a new Dutch corpus containing reviews and essays written by university students. It is designed to serve multiple purposes: detection of age, gender, authorship, personality, sentiment, deception, topic and genre. Another major advantage is its planned yearly expansion with each year's new students. The corpus currently contains about 305,000 tokens spread over 749 documents. The average review length is 128 tokens; the average essay length is 1126 tokens. The corpus will be made available on the CLiPS website (www.clips.uantwerpen.be/datasets) and can freely be used for academic research purposes. An initial deception detection experiment was performed on this data. Deception detection is the task of automatically classifying a text as being either truthful or deceptive, in our case by examining the writing style of the author. This task has never been investigated for Dutch before. We performed a supervised machine learning experiment using the SVM algorithm in a 10-fold cross-validation setup. The only features were the token unigrams present in the training data. Using this simple method, we reached a state-of-the-art F-score of 72.2%.", "keyphrases": ["detection", "personality", "clips stylometry investigation"]} +{"id": "li-etal-2016-commonsense", "title": "Commonsense Knowledge Base Completion", "abstract": "We enrich a curated resource of commonsense knowledge by formulating the problem as one of knowledge base completion (KBC). Most work in KBC focuses on knowledge bases like Freebase that relate entities drawn from a fixed set. However, the tuples in ConceptNet (Speer and Havasi, 2012) define relations between an unbounded set of phrases. We develop neural network models for scoring tuples on arbitrary phrases and evaluate them by their ability to distinguish true held-out tuples from false ones. We find strong performance from a bilinear model using a simple additive architecture to model phrases. We manually evaluate our trained model\u2019s ability to assign quality scores to novel tuples, finding that it can propose tuples at the same quality level as mediumconfidence tuples from ConceptNet.", "keyphrases": ["knowledge base completion", "conceptnet", "neural network model", "sparsity", "wikipedia"]} +{"id": "chahuneau-etal-2013-translating", "title": "Translating into Morphologically Rich Languages with Synthetic Phrases", "abstract": "Translation into morphologically rich languages is an important but recalcitrant problem in MT. We present a simple and effective approach that deals with the problem in two phases. First, a discriminative model is learned to predict inflections of target words from rich source-side annotations. Then, this model is used to create additional sentencespecific word- and phrase-level translations that are added to a standard translation model as \u201csynthetic\u201d phrases. Our approach relies on morphological analysis of the target language, but we show that an unsupervised Bayesian model of morphology can successfully be used in place of a supervised analyzer. We report significant improvements in translation quality when translating from English to Russian, Hebrew and Swahili.", "keyphrases": ["rich language", "synthetic phrase", "morphological analysis", "translation quality", "swahili"]} +{"id": "wang-jiang-2016-learning", "title": "Learning Natural Language Inference with LSTM", "abstract": "Natural language inference (NLI) is a fundamentally important task in natural language processing that has many applications. The recently released Stanford Natural Language Inference (SNLI) corpus has made it possible to develop and evaluate learning-centered methods such as deep neural networks for natural language inference (NLI). In this paper, we propose a special long short-term memory (LSTM) architecture for NLI. Our model builds on top of a recently proposed neural attention model for NLI but is based on a significantly different idea. Instead of deriving sentence embeddings for the premise and the hypothesis to be used for classification, our solution uses a match-LSTM to perform word-by-word matching of the hypothesis with the premise. This LSTM is able to place more emphasis on important word-level matching results. In particular, we observe that this LSTM remembers important mismatches that are critical for predicting the contradiction or the neutral relationship label. On the SNLI corpus, our model achieves an accuracy of 86.1%, outperforming the state of the art.", "keyphrases": ["natural language inference", "nli", "attention model", "matching"]} +{"id": "wilson-wiebe-2005-annotating", "title": "Annotating Attributions and Private States", "abstract": "This paper describes extensions to a corpus annotation scheme for the manual annotation of attributions, as well as opinions, emotions, sentiments, speculations, evaluations and other private states in language. It discusses the scheme with respect to the \"Pie in the Sky\" Check List of Desirable Semantic Information for Annotation. We believe that the scheme is a good foundation for adding private state annotations to other layers of semantic meaning.", "keyphrases": ["private state", "manual annotation", "opinion", "subjectivity"]} +{"id": "alikaniotis-etal-2016-automatic", "title": "Automatic Text Scoring Using Neural Networks", "abstract": "Automated Text Scoring (ATS) provides a cost-effective and consistent alternative to human marking. However, in order to achieve good performance, the predictive features of the system need to be manually engineered by human experts. We introduce a model that forms word representations by learning the extent to which specific words contribute to the text's score. Using Long-Short Term Memory networks to represent the meaning of texts, we demonstrate that a fully automated framework is able to achieve excellent results over similar approaches. In an attempt to make our results more interpretable, and inspired by recent advances in visualizing neural networks, we introduce a novel method for identifying the regions of the text that the model has found more discriminative.", "keyphrases": ["automatic text scoring", "neural network model", "essay scoring"]} +{"id": "naplava-straka-2019-grammatical", "title": "Grammatical Error Correction in Low-Resource Scenarios", "abstract": "Grammatical error correction in English is a long studied problem with many existing systems and datasets. However, there has been only a limited research on error correction of other languages. In this paper, we present a new dataset AKCES-GEC on grammatical error correction for Czech. We then make experiments on Czech, German and Russian and show that when utilizing synthetic parallel corpus, Transformer neural machine translation model can reach new state-of-the-art results on these datasets. AKCES-GEC is published under CC BY-NC-SA 4.0 license at , and the source code of the GEC model is available at .", "keyphrases": ["other language", "czech", "grammatical error correction", "pre-training"]} +{"id": "bodrumlu-etal-2009-new", "title": "A New Objective Function for Word Alignment", "abstract": "We develop a new objective function for word alignment that measures the size of the bilingual dictionary induced by an alignment. A word alignment that results in a small dictionary is preferred over one that results in a large dictionary. In order to search for the alignment that minimizes this objective, we cast the problem as an integer linear program. We then extend our objective function to align corpora at the sub-word level, which we demonstrate on a small Turkish-English corpus.", "keyphrases": ["new objective function", "word alignment", "mdl"]} +{"id": "liu-lapata-2018-learning", "title": "Learning Structured Text Representations", "abstract": "In this paper, we focus on learning structure-aware document representations from data without recourse to a discourse parser or additional annotations. Drawing inspiration from recent efforts to empower neural networks with a structural bias (Cheng et al., 2016; Kim et al., 2017), we propose a model that can encode a document while automatically inducing rich structural dependencies. Specifically, we embed a differentiable non-projective parsing algorithm into a neural model and use attention mechanisms to incorporate the structural biases. Experimental evaluations across different tasks and datasets show that the proposed model achieves state-of-the-art results on document modeling tasks while inducing intermediate structures which are both interpretable and meaningful.", "keyphrases": ["document representation", "discourse parser", "attention weight", "latent structure", "summarization"]} +{"id": "gargett-etal-2010-give", "title": "The GIVE-2 Corpus of Giving Instructions in Virtual Environments", "abstract": "We present the GIVE-2 Corpus, a new corpus of human instruction giving. The corpus was collected by asking one person in each pair of subjects to guide the other person towards completing a task in a virtual 3D environment with typed instructions. This is the same setting as that of the recent GIVE Challenge, and thus the corpus can serve as a source of data and as a point of comparison for NLG systems that participate in the GIVE Challenge. The instruction-giving data we collect is multilingual (45 German and 63 English dialogues), and can easily be extended to further languages by using our software, which we have made available. We analyze the corpus to study the effects of learning by repeated participation in the task and the effects of the participants' spatial navigation abilities. Finally, we present a novel annotation scheme for situated referring expressions and compare the referring expressions in the German and English data.", "keyphrases": ["give-2 corpus", "instruction", "virtual environments", "hearer"]} +{"id": "xiong-etal-2015-hanspeller", "title": "HANSpeller: A Unified Framework for Chinese Spelling Correction", "abstract": "The number of people learning Chinese as a Foreign Language (CFL) has been booming in recent decades. The problem of spelling error correction for CFL learners increasingly is becoming important. Compared to the regular text spelling check task, more error types need to be considered in CFL cases. In this paper, we propose a unified framework for Chinese spelling correction. Instead of conventional methods, which focus on rules or statistics separately, our approach is based on extended HMM and ranker-based models, together with a rule-based model for further polishing, and a final decision-making step is adopted to decide whether to output the corrections or not. Experimental results on the test data of foreigner's Chinese essays provided by the SIGHAN 2014 bake-off illustrate the performance of our approach.", "keyphrases": ["unified framework", "chinese spelling correction", "hmm", "conditional random field", "machine learning algorithm"]} +{"id": "amigo-etal-2011-corroborating", "title": "Corroborating Text Evaluation Results with Heterogeneous Measures", "abstract": "Automatically produced texts (e.g. translations or summaries) are usually evaluated with n-gram based measures such as BLEU or ROUGE, while the wide set of more sophisticated measures that have been proposed in the last years remains largely ignored for practical purposes. In this paper we first present an in-depth analysis of the state of the art in order to clarify this issue. After this, we formalize and verify empirically a set of properties that every text evaluation measure based on similarity to human-produced references satisfies. These properties imply that corroborating system improvements with additional measures always increases the overall reliability of the evaluation process. In addition, the greater the heterogeneity of the measures (which is measurable) the higher their combined reliability. These results support the use of heterogeneous measures in order to consolidate text evaluation results.", "keyphrases": ["text evaluation result", "heterogeneous measure", "score increase"]} +{"id": "popat-etal-2019-stancy", "title": "STANCY: Stance Classification Based on Consistency Cues", "abstract": "Controversial claims are abundant in online media and discussion forums. A better understanding of such claims requires analyzing them from different perspectives. Stance classification is a necessary step for inferring these perspectives in terms of supporting or opposing the claim. In this work, we present a neural network model for stance classification leveraging BERT representations and augmenting them with a novel consistency constraint. Experiments on the Perspectrum dataset, consisting of claims and users' perspectives from various debate websites, demonstrate the effectiveness of our approach over state-of-the-art baselines.", "keyphrases": ["stance classification", "claim", "consistency constraint"]} +{"id": "eisenstein-davis-2006-gesture", "title": "Gesture Improves Coreference Resolution", "abstract": "Coreference resolution, like many problems in natural language processing, has most often been explored using datasets of written text. While spontaneous spoken language poses well-known challenges, it also offers additional modalities that may help disambiguate some of the inherent disfluency. We explore features of hand gesture that are correlated with coreference. Combining these features with a traditional textual model yields a statistically significant improvement in overall performance.", "keyphrases": ["coreference resolution", "gesture", "co-reference resolution"]} +{"id": "xie-etal-2011-novel", "title": "A novel dependency-to-string model for statistical machine translation", "abstract": "Dependency structure, as a first step towards semantics, is believed to be helpful to improve translation quality. However, previous works on dependency structure based models typically resort to insertion operations to complete translations, which make it difficult to specify ordering information in translation rules. In our model of this paper, we handle this problem by directly specifying the ordering information in head-dependents rules which represent the source side as head-dependents relations and the target side as strings. The head-dependents rules require only substitution operation, thus our model requires no heuristics or separate ordering models of the previous works to control the word order of translations. Large-scale experiments show that our model performs well on long distance reordering, and outperforms the state-of-the-art constituency-to-string model (+1.47 BLEU on average) and hierarchical phrase-based model (+0.46 BLEU on average) on two Chinese-English NIST test sets without resort to phrases or parse forest. For the first time, a source dependency structure based model catches up with and surpasses the state-of-the-art translation models.", "keyphrases": ["statistical machine translation", "source side", "head-dependents relation", "long distance", "tree-based model"]} +{"id": "wilkens-etal-2016-multiword", "title": "Multiword Expressions in Child Language", "abstract": "The goal of this work is to introduce CHILDES-MWE, which contains English CHILDES corpora automatically annotated with Multiword Expressions (MWEs) information. The result is a resource with almost 350,000 sentences annotated with more than 70,000 distinct MWEs of various types from both longitudinal and latitudinal corpora. This resource can be used for large scale language acquisition studies of how MWEs feature in child language. Focusing on compound nouns (CN), we then verify in a longitudinal study if there are differences in the distribution and compositionality of CNs in child-directed and child-produced sentences across ages. Moreover, using additional latitudinal data, we investigate if there are further differences in CN usage and in compositionality preferences. The results obtained for the child-produced sentences reflect CN distribution and compositionality in child-directed sentences.", "keyphrases": ["child language", "multiword expressions", "english verb-particle constructions"]} +{"id": "dickinson-meurers-2005-detecting", "title": "Detecting Errors in Discontinuous Structural Annotation", "abstract": "Consistency of corpus annotation is an essential property for the many uses of annotated corpora in computational and theoretical linguistics. While some research addresses the detection of inconsistencies in positional annotation (e.g., part-of-speech) and continuous structural annotation (e.g., syntactic constituency), no approach has yet been developed for automatically detecting annotation errors in discontinuous structural annotation. This is significant since the annotation of potentially discontinuous stretches of material is increasingly relevant, from tree-banks for free-word order languages to semantic and discourse annotation.In this paper we discuss how the variation n-gram error detection approach (Dickinson and Meurers, 2003a) can be extended to discontinuous structural annotation. We exemplify the approach by showing how it successfully detects errors in the syntactic annotation of the German TIGER corpus (Brants et al., 2002).", "keyphrases": ["discontinuous structural annotation", "constituency annotation", "variation nucleus"]} +{"id": "duh-etal-2010-n", "title": "N-Best Reranking by Multitask Learning", "abstract": "We propose a new framework for N-best reranking on sparse feature sets. The idea is to reformulate the reranking problem as a Multitask Learning problem, where each N-best list corresponds to a distinct task. \n \nThis is motivated by the observation that N-best lists often show significant differences in feature distributions. Training a single reranker directly on this heteroge-nous data can be difficult. \n \nOur proposed meta-algorithm solves this challenge by using multitask learning (such as e1/e2 regularization) to discover common feature representations across N-best lists. This meta-algorithm is simple to implement, and its modular approach allows one to plug-in different learning algorithms from existing literature. As a proof of concept, we show statistically significant improvements on a machine translation system involving millions of features.", "keyphrases": ["multitask learning", "n-best reranking", "language task"]} +{"id": "qiu-etal-2019-graph", "title": "Graph-Based Semi-Supervised Learning for Natural Language Understanding", "abstract": "Semi-supervised learning is an efficient method to augment training data automatically from unlabeled data. Development of many natural language understanding (NLU) applications has a challenge where unlabeled data is relatively abundant while labeled data is rather limited. In this work, we propose transductive graph-based semi-supervised learning models as well as their inductive variants for NLU. We evaluate the approach's applicability using publicly available NLU data and models. In order to find similar utterances and construct a graph, we use a paraphrase detection model. Results show that applying the inductive graph-based semi-supervised learning can improve the error rate of the NLU model by 5%.", "keyphrases": ["natural language understanding", "unlabeled data", "graph-based semi-supervised learning"]} +{"id": "feldman-etal-2006-cross", "title": "A Cross-language Approach to Rapid Creation of New Morpho-syntactically Annotated Resources", "abstract": "We take a novel approach to rapid, low-cost development of morpho-syntactically annotated resources without using parallel corpora or bilingual lexicons. The overall research question is how to exploit language resources and properties to facilitate and automate the creation of morphologically annotated corpora for new languages. This portability issue is especially relevant to minority languages, for which such resources are likely to remain unavailable in the foreseeable future. We compare the performance of our system on languages that belong to different language families (Romance vs. Slavic), as well as different language pairs within the same language family (Portuguese via Spanish vs. Catalan via Spanish). We show that across language families, the most difficult category is the category of nominals (the noun homonymy is challenging for morphological analysis and the order variation of adjectives within a sentence makes it challenging to create a realiable model), whereas different language families present different challenges with respect to their morpho-syntactic descriptions: for the Slavic languages, case is the most challenging category; for the Romance languages, gender is more challenging than case. In addition, we present an alternative evaluation metric for our system, where we measure how much human labor will be needed to convert the result of our tagging to a high precision annotated resource.", "keyphrases": ["parallel corpora", "tagger", "czech"]} +{"id": "suzuki-isozaki-2008-semi", "title": "Semi-Supervised Sequential Labeling and Segmentation Using Giga-Word Scale Unlabeled Data", "abstract": "This paper provides evidence that the use of more unlabeled data in semi-supervised learning can improve the performance of Natural Language Processing (NLP) tasks, such as part-of-speech tagging, syntactic chunking, and named entity recognition. We first propose a simple yet powerful semi-supervised discriminative model appropriate for handling large scale unlabeled data. Then, we describe experiments performed on widely used test collections, namely, PTB III data, CoNLL\u201900 and \u201903 shared task data for the above three NLP tasks, respectively. We incorporate up to 1G-words (one billion tokens) of unlabeled data, which is the largest amount of unlabeled data ever used for these tasks, to investigate the performance improvement. In addition, our results are superior to the best reported results for all of the above test collections.", "keyphrases": ["unlabeled data", "part-of-speech tagging", "chunking", "entity recognition", "probability model"]} +{"id": "xia-yarowsky-2017-deriving", "title": "Deriving Consensus for Multi-Parallel Corpora: an English Bible Study", "abstract": "What can you do with multiple noisy versions of the same text? We present a method which generates a single consensus between multi-parallel corpora. By maximizing a function of linguistic features between word pairs, we jointly learn a single corpus-wide multiway alignment: a consensus between 27 versions of the English Bible. We additionally produce English paraphrases, word-level distributions of tags, and consensus dependency parses. Our method is language independent and applicable to any multi-parallel corpora. Given the Bible's unique role as alignable bitext for over 800 of the world's languages, this consensus alignment and resulting resources offer value for multilingual annotation projection, and also shed potential insights into the Bible itself.", "keyphrases": ["consensus", "multi-parallel corpora", "paraphrase"]} +{"id": "degaetano-ortlieb-2018-stylistic", "title": "Stylistic variation over 200 years of court proceedings according to gender and social class", "abstract": "We present an approach to detect stylistic variation across social variables (here: gender and social class), considering also diachronic change in language use. For detection of stylistic variation, we use relative entropy, measuring the difference between probability distributions at different linguistic levels (here: lexis and grammar). In addition, by relative entropy, we can determine which linguistic units are related to stylistic variation.", "keyphrases": ["gender", "social class", "stylistic variation"]} +{"id": "guevara-2010-regression", "title": "A Regression Model of Adjective-Noun Compositionality in Distributional Semantics", "abstract": "In this paper we explore the computational modelling of compositionality in distributional models of semantics. In particular, we model the semantic composition of pairs of adjacent English Adjectives and Nouns from the British National Corpus. We build a vector-based semantic space from a lemmatised version of the BNC, where the most frequent A-N lemma pairs are treated as single tokens. We then extrapolate three different models of compositionality: a simple additive model, a pointwise-multiplicative model and a Partial Least Squares Regression (PLSR) model. We propose two evaluation methods for the implemented models. Our study leads to the conclusion that regression-based models of compositionality generally out-perform additive and multiplicative approaches, and also show a number of advantages that make them very promising for future research.", "keyphrases": ["compositionality", "adjective-noun phrase", "distributional semantic model"]} +{"id": "mcclosky-etal-2011-event", "title": "Event Extraction as Dependency Parsing", "abstract": "Nested event structures are a common occurrence in both open domain and domain specific extraction tasks, e.g., a \"crime\" event can cause a \"investigation\" event, which can lead to an \"arrest\" event. However, most current approaches address event extraction with highly local models that extract each event and argument independently. We propose a simple approach for the extraction of such structures by taking the tree of event-argument relations and using it directly as the representation in a reranking dependency parser. This provides a simple framework that captures global properties of both nested and flat event structures. We explore a rich feature space that models both the events to be parsed and context from the original supporting text. Our approach obtains competitive results in the extraction of biomedical events from the BioNLP'09 shared task with a F1 score of 53.5% in development and 48.6% in testing.", "keyphrases": ["reranking dependency parser", "event extraction", "feature-based method"]} +{"id": "metallinou-etal-2013-discriminative", "title": "Discriminative state tracking for spoken dialog systems", "abstract": "In spoken dialog systems, statistical state tracking aims to improve robustness to speech recognition errors by tracking a posterior distribution over hidden dialog states. Current approaches based on generative or discriminative models have different but important shortcomings that limit their accuracy. In this paper we discuss these limitations and introduce a new approach for discriminative state tracking that overcomes them by leveraging the problem structure. An offline evaluation with dialog data collected from real users shows improvements in both state tracking accuracy and the quality of the posterior probabilities. Features that encode speech recognition error patterns are particularly helpful, and training requires relatively few dialogs.", "keyphrases": ["spoken dialog system", "discriminative state tracking", "maximum-entropy model"]} +{"id": "srikumar-roth-2011-joint", "title": "A Joint Model for Extended Semantic Role Labeling", "abstract": "This paper presents a model that extends semantic role labeling. Existing approaches independently analyze relations expressed by verb predicates or those expressed as nominalizations. However, sentences express relations via other linguistic phenomena as well. Furthermore, these phenomena interact with each other, thus restricting the structures they articulate. In this paper, we use this intuition to define a joint inference model that captures the inter-dependencies between verb semantic role labeling and relations expressed using prepositions. The scarcity of jointly labeled data presents a crucial technical challenge for learning a joint model. The key strength of our model is that we use existing structure predictors as black boxes. By enforcing consistency constraints between their predictions, we show improvements in the performance of both tasks without retraining the individual models.", "keyphrases": ["joint model", "semantic role labeling", "preposition"]} +{"id": "kober-etal-2021-data", "title": "Data Augmentation for Hypernymy Detection", "abstract": "The automatic detection of hypernymy relationships represents a challenging problem in NLP. The successful application of state-of-the-art supervised approaches using distributed representations has generally been impeded by the limited availability of high quality training data. We have developed two novel data augmentation techniques which generate new training examples from existing ones. First, we combine the linguistic principles of hypernym transitivity and intersective modifier-noun composition to generate additional pairs of vectors, such as \u201csmall dog - dog\u201d or \u201csmall dog - animal\u201d, for which a hypernymy relationship can be assumed. Second, we use generative adversarial networks (GANs) to generate pairs of vectors for which the hypernymy relation can also be assumed. We furthermore present two complementary strategies for extending an existing dataset by leveraging linguistic resources such as WordNet. Using an evaluation across 3 different datasets for hypernymy detection and 2 different vector spaces, we demonstrate that both of the proposed automatic data augmentation and dataset extension strategies substantially improve classifier performance.", "keyphrases": ["hypernymy detection", "data augmentation", "entailment"]} +{"id": "hughes-ramage-2007-lexical", "title": "Lexical Semantic Relatedness with Random Graph Walks", "abstract": "Many systems for tasks such as question answering, multi-document summarization, and information retrieval need robust numerical measures of lexical relatedness. Standard thesaurus-based measures of word pair similarity are based on only a single path between those words in the thesaurus graph. By contrast, we propose a new model of lexical semantic relatedness that incorporates information from every explicit or implicit path connecting the two words in the entire graph. Our model uses a random walk over nodes and edges derived from WordNet links and corpus statistics. We treat the graph as a Markov chain and compute a word-specific stationary distribution via a generalized PageRank algorithm. Semantic relatedness of a word pair is scored by a novel divergence measure, ZKL, that outperforms existing measures on certain classes of distributions. In our experiments, the resulting relatedness measure is the WordNet-based measure most highly correlated with human similarity judgments by rank ordering at = .90.", "keyphrases": ["random walk", "lexical semantic relatedness", "thesauri", "knowledge-based measure"]} +{"id": "liu-etal-2005-using-conditional", "title": "Using Conditional Random Fields for Sentence Boundary Detection in Speech", "abstract": "Sentence boundary detection in speech is important for enriching speech recognition output, making it easier for humans to read and downstream modules to process. In previous work, we have developed hidden Markov model (HMM) and maximum entropy (Maxent) classifiers that integrate textual and prosodic knowledge sources for detecting sentence boundaries. In this paper, we evaluate the use of a conditional random field (CRF) for this task and relate results with this model to our prior work. We evaluate across two corpora (conversational telephone speech and broadcast news speech) on both human transcriptions and speech recognition output. In general, our CRF model yields a lower error rate than the HMM and Maxent models on the NIST sentence boundary detection task in speech, although it is interesting to note that the best results are achieved by three-way voting among the classifiers. This probably occurs because each model has different strengths and weaknesses for modeling the knowledge sources.", "keyphrases": ["conditional random fields", "boundary detection", "low error rate", "speech processing task", "phone"]} +{"id": "xu-etal-2021-document-graph", "title": "Document Graph for Neural Machine Translation", "abstract": "Previous works have shown that contextual information can improve the performance of neural machine translation (NMT). However, most existing document-level NMT methods failed to leverage contexts beyond a few set of previous sentences. How to make use of the whole document as global contexts is still a challenge. To address this issue, we hypothesize that a document can be represented as a graph that connects relevant contexts regardless of their distances. We employ several types of relations, including adjacency, syntactic dependency, lexical consistency, and coreference, to construct the document graph. Then, we incorporate both source and target graphs into the conventional Transformer architecture with graph convolutional networks. Experiments on various NMT benchmarks, including IWSLT English\u2013French, Chinese-English, WMT English\u2013German and Opensubtitle English\u2013Russian, demonstrate that using document graphs can significantly improve the translation quality. Extensive analysis verifies that the document graph is beneficial for capturing discourse phenomena.", "keyphrases": ["neural machine translation", "relevant context", "distance", "document graph"]} +{"id": "ye-ling-2018-hybrid", "title": "Hybrid semi-Markov CRF for Neural Sequence Labeling", "abstract": "This paper proposes hybrid semi-Markov conditional random fields (SCRFs) for neural sequence labeling in natural language processing. Based on conventional conditional random fields (CRFs), SCRFs have been designed for the tasks of assigning labels to segments by extracting features from and describing transitions between segments instead of words. In this paper, we improve the existing SCRF methods by employing word-level and segment-level information simultaneously. First, word-level labels are utilized to derive the segment scores in SCRFs. Second, a CRF output layer and an SCRF output layer are integrated into a unified neural network and trained jointly. Experimental results on CoNLL 2003 named entity recognition (NER) shared task show that our model achieves state-of-the-art performance when no external knowledge is used.", "keyphrases": ["crf", "random field", "segment", "word-level label"]} +{"id": "li-etal-2020-connecting", "title": "Connecting the Dots: Event Graph Schema Induction with Path Language Modeling", "abstract": "Event schemas can guide our understanding and ability to make predictions with respect to what might happen next. We propose a new Event Graph Schema, where two event types are connected through multiple paths involving entities that fill important roles in a coherent story. We then introduce Path Language Model, an auto-regressive language model trained on event-event paths, and select salient and coherent paths to probabilistically construct these graph schemas. We design two evaluation metrics, instance coverage and instance coherence, to evaluate the quality of graph schema induction, by checking when coherent event instances are covered by the schema graph. Intrinsic evaluations show that our approach is highly effective at inducing salient and coherent schemas. Extrinsic evaluations show the induced schema repository provides significant improvement to downstream end-to-end Information Extraction over a state-of-the-art joint neural extraction model, when used as additional global features to unfold instance graphs.", "keyphrases": ["schema", "event type", "story", "auto-regressive language model"]} +{"id": "huang-etal-2006-statistical", "title": "Statistical Syntax-Directed Translation with Extended Domain of Locality", "abstract": "In syntax-directed translation, the source-language input is first parsed into a parse-tree, which is then recursively converted into a string in the target-language. We model this conversion by an extended tree-to-string transducer that has multi-level trees on the source-side, which gives our system more expressive power and flexibility. We also define a direct probability model and use a linear-time dynamic programming algorithm to search for the best derivation. The model is then extended to the general log-linear frame-work in order to incorporate other features like n-gram language models. We devise a simple-yet-effective algorithm to generate non-duplicate k-best translations for n-gram rescoring. Preliminary experiments on English-to-Chinese translation show a significant improvement in terms of translation quality compared to a state-of-the- art phrase-based system.", "keyphrases": ["syntax-directed translation", "string", "translation quality", "tree-based model"]} +{"id": "finch-sumita-2008-dynamic", "title": "Dynamic Model Interpolation for Statistical Machine Translation", "abstract": "This paper presents a technique for class-dependent decoding for statistical machine translation (SMT). The approach differs from previous methods of class-dependent translation in that the class-dependent forms of all models are integrated directly into the decoding process. We employ probabilistic mixture weights between models that can change dynamically on a segment-by-segment basis depending on the characteristics of the source segment. The effectiveness of this approach is demonstrated by evaluating its performance on travel conversation data. We used the approach to tackle the translation of questions and declarative sentences using class-dependent models. To achieve this, our system integrated two sets of models specifically built to deal with sentences that fall into one of two classes of dialog sentence: questions and declarations, with a third set of models built to handle the general class. The technique was thoroughly evaluated on data from 17 language pairs using 6 machine translation evaluation metrics. We found the results were corpus-dependent, but in most cases our system was able to improve translation performance, and for some languages the improvements were substantial.", "keyphrases": ["statistical machine translation", "declarative sentence", "general model"]} +{"id": "merrill-etal-2020-formal", "title": "A Formal Hierarchy of RNN Architectures", "abstract": "We develop a formal hierarchy of the expressive capacity of RNN architectures. The hierarchy is based on two formal properties: space complexity, which measures the RNN's memory, and rational recurrence, defined as whether the recurrent update can be described by a weighted finite-state machine. We place several RNN variants within this hierarchy. For example, we prove the LSTM is not rational, which formally separates it from the related QRNN (Bradbury et al., 2016). We also show how these models' expressive capacity is expanded by stacking multiple layers or composing them with different pooling functions. Our results build on the theory of \u201csaturated\u201d RNNs (Merrill, 2019). While formally extending these findings to unsaturated RNNs is left to future work, we hypothesize that the practical learnable capacity of unsaturated RNNs obeys a similar hierarchy. We provide empirical results to support this conjecture. Experimental findings from training unsaturated networks on formal languages support this conjecture.", "keyphrases": ["formal hierarchy", "rnn architecture", "finite-state machine"]} +{"id": "wu-yarowsky-2020-computational", "title": "Computational Etymology and Word Emergence", "abstract": "We developed an extensible, comprehensive Wiktionary parser that improves over several existing parsers. We predict the etymology of a word across the full range of etymology types and languages in Wiktionary, showing improvements over a strong baseline. We also model word emergence and show the application of etymology in modeling this phenomenon. We release our parser to further research in this understudied field.", "keyphrases": ["etymology", "word emergence", "wiktionary parser"]} +{"id": "duong-etal-2015-cross", "title": "Cross-lingual Transfer for Unsupervised Dependency Parsing Without Parallel Data", "abstract": "Cross-lingual transfer has been shown to produce good results for dependency parsing of resource-poor languages. Although this avoids the need for a target language treebank, most approaches have still used large parallel corpora. However, parallel data is scarce for low-resource languages, and we report a new method that does not need parallel data. Our method learns syntactic word embeddings that generalise over the syntactic contexts of a bilingual vocabulary, and incorporates these into a neural network parser. We show empirical improvements over a baseline delexicalised parser on both the CoNLL and Universal Dependency Treebank datasets. We analyse the importance of the source languages, and show that combining multiple source-languages leads to a substantial improvement.", "keyphrases": ["dependency parsing", "parallel data", "cross-lingual transfer", "pos tag", "similar approach"]} +{"id": "xie-etal-2021-knowledge-interactive", "title": "Knowledge-Interactive Network with Sentiment Polarity Intensity-Aware Multi-Task Learning for Emotion Recognition in Conversations", "abstract": "Emotion Recognition in Conversation (ERC) has gained much attention from the NLP community recently. Some models concentrate on leveraging commonsense knowledge or multi-task learning to help complicated emotional reasoning. However, these models neglect direct utterance-knowledge interaction. In addition, these models utilize emotion-indirect auxiliary tasks, which provide limited affective information for the ERC task. To address the above issues, we propose a Knowledge-Interactive Network with sentiment polarity intensity-aware multi-task learning, namely KI-Net, which leverages both commonsense knowledge and sentiment lexicon to augment semantic information. Specifically, we use a self-matching module for internal utterance-knowledge interaction. Considering correlations with the ERC task, a phrase-level Sentiment Polarity Intensity Prediction (SPIP) task is devised as an auxiliary task. Experiments show that all knowledge integration, self-matching and SPIP modules improve the model performance respectively on three datasets. Moreover, our KI-Net model shows 1.04% performance improvement over the state-of-the-art model on the IEMOCAP dataset.", "keyphrases": ["intensity-aware multi-task learning", "emotion recognition", "conversation"]} +{"id": "hassan-etal-2006-graph", "title": "Graph Based Semi-Supervised Approach for Information Extraction", "abstract": "Classification techniques deploy supervised labeled instances to train classifiers for various classification problems. However labeled instances are limited, expensive, and time consuming to obtain, due to the need of experienced human annotators. Meanwhile large amount of unlabeled data is usually easy to obtain. Semi-supervised learning addresses the problem of utilizing unlabeled data along with supervised labeled data, to build better classifiers. In this paper we introduce a semi-supervised approach based on mutual reinforcement in graphs to obtain more labeled data to enhance the classifier accuracy. The approach has been used to supplement a maximum entropy model for semi-supervised training of the ACE Relation Detection and Characterization (RDC) task. ACE RDC is considered a hard task in information extraction due to lack of large amounts of training data and inconsistencies in the available data. The proposed approach provides 10% relative improvement over the state of the art supervised baseline system.", "keyphrases": ["semi-supervised approach", "information extraction", "review"]} +{"id": "soricut-brill-2004-unified", "title": "A Unified Framework For Automatic Evaluation Using 4-Gram Co-occurrence Statistics", "abstract": "In this paper we propose a unified framework for automatic evaluation of NLP applications using N-gram co-occurrence statistics. The automatic evaluation metrics proposed to date for Machine Translation and Automatic Summarization are particular instances from the family of metrics we propose. We show that different members of the same family of metrics explain best the variations obtained with human evaluations, according to the application being evaluated (Machine Translation, Automatic Summarization, and Automatic Question Answering) and the evaluation guidelines used by humans for evaluating such applications.", "keyphrases": ["unified framework", "automatic evaluation", "n-gram co-occurrence statistic"]} +{"id": "xu-durrett-2018-spherical", "title": "Spherical Latent Spaces for Stable Variational Autoencoders", "abstract": "A hallmark of variational autoencoders (VAEs) for text processing is their combination of powerful encoder-decoder models, such as LSTMs, with simple latent distributions, typically multivariate Gaussians. These models pose a difficult optimization problem: there is an especially bad local optimum where the variational posterior always equals the prior and the model does not use the latent variable at all, a kind of \u201ccollapse\u201d which is encouraged by the KL divergence term of the objective. In this work, we experiment with another choice of latent distribution, namely the von Mises-Fisher (vMF) distribution, which places mass on the surface of the unit hypersphere. With this choice of prior and posterior, the KL divergence term now only depends on the variance of the vMF distribution, giving us the ability to treat it as a fixed hyperparameter. We show that doing so not only averts the KL collapse, but consistently gives better likelihoods than Gaussians across a range of modeling conditions, including recurrent language modeling and bag-of-words document modeling. An analysis of the properties of our vMF representations shows that they learn richer and more nuanced structures in their latent representations than their Gaussian counterparts.", "keyphrases": ["variational autoencoder", "vae", "language modeling", "text generation"]} +{"id": "kate-mooney-2007-semi", "title": "Semi-Supervised Learning for Semantic Parsing using Support Vector Machines", "abstract": "We present a method for utilizing unan-notated sentences to improve a semantic parser which maps natural language (NL) sentences into their formal meaning representations (MRs). Given NL sentences annotated with their MRs, the initial supervised semantic parser learns the mapping by training Support Vector Machine (SVM) classifiers for every production in the MR grammar. Our new method applies the learned semantic parser to the unannotated sentences and collects unla-beled examples which are then used to retrain the classifiers using a variant of transductive SVMs. Experimental results show the improvements obtained over the purely supervised parser, particularly when the annotated training set is small.", "keyphrases": ["semantic parsing", "svm", "semi-supervised learning"]} +{"id": "pado-etal-2009-robust", "title": "Robust Machine Translation Evaluation with Entailment Features", "abstract": "Existing evaluation metrics for machine translation lack crucial robustness: their correlations with human quality judgments vary considerably across languages and genres. We believe that the main reason is their inability to properly capture meaning: A good translation candidate means the same thing as the reference translation, regardless of formulation. We propose a metric that evaluates MT output based on a rich set of features motivated by textual entailment, such as lexical-semantic (in-)compatibility and argument structure overlap. We compare this metric against a combination metric of four state-of-the-art scores (BLEU, NIST, TER, and METEOR) in two different settings. The combination metric out-performs the individual scores, but is bested by the entailment-based metric. Combining the entailment and traditional features yields further improvements.", "keyphrases": ["machine translation", "entailment feature", "rich set"]} +{"id": "eguchi-lavrenko-2006-sentiment", "title": "Sentiment Retrieval using Generative Models", "abstract": "Ranking documents or sentences according to both topic and sentiment relevance should serve a critical function in helping users when topics and sentiment polarities of the targeted text are not explicitly given, as is often the case on the web. In this paper, we propose several sentiment information retrieval models in the framework of probabilistic language models, assuming that a user both inputs query terms expressing a certain topic and also specifies a sentiment polarity of interest in some manner. We combine sentiment relevance models and topic relevance models with model parameters estimated from training data, considering the topic dependence of the sentiment. Our experiments prove that our models are effective.", "keyphrases": ["probabilistic language model", "dependence", "sentiment retrieval"]} +{"id": "zhang-xue-2018-structured", "title": "Structured Interpretation of Temporal Relations", "abstract": "Temporal relations between events and time expressions in a document are often modeled in an unstructured manner where relations between individual pairs of time expressions and events are considered in isolation. This often results in inconsistent and incomplete annotation and computational modeling. We propose a novel annotation approach where events and time expressions in a document form a dependency tree in which each dependency relation corresponds to an instance of temporal anaphora where the antecedent is the parent and the anaphor is the child. We annotate a corpus of 235 documents using this approach in the two genres of news and narratives, with 48 documents doubly annotated. We report a stable and high inter-annotator agreement on the doubly annotated subset, validating our approach, and perform a quantitative comparison between the two genres of the entire corpus. We make this corpus publicly available.", "keyphrases": ["parent", "inter-annotator agreement", "temporal dependency tree"]} +{"id": "izumi-etal-2003-automatic", "title": "Automatic Error Detection in the Japanese Learners' English Spoken Data", "abstract": "This paper describes a method of detecting grammatical and lexical errors made by Japanese learners of English and other techniques that improve the accuracy of error detection with a limited amount of training data. In this paper, we demonstrate to what extent the proposed methods hold promise by conducting experiments using our learner corpus, which contains information on learners' errors.", "keyphrases": ["japanese learners", "error type", "preposition error", "native english speaker"]} +{"id": "huang-etal-2005-machine", "title": "Machine Translation as Lexicalized Parsing with Hooks", "abstract": "We adapt the \"hook\" trick for speeding up bilexical parsing to the decoding problem for machine translation models that are based on combining a synchronous context free grammar as the translation model with an n-gram language model. This dynamic programming technique yields lower complexity algorithms than have previously been described for an important class of translation models.", "keyphrases": ["complexity", "machine translation", "hook trick"]} +{"id": "piperidis-2012-meta", "title": "The META-SHARE Language Resources Sharing Infrastructure: Principles, Challenges, Solutions", "abstract": "Language resources have become a key factor in the development cycle of language technology. The current prevailing methodologies, the sheer number of languages and the vast volumes of digital content together with the wide palette of useful content processing applications, render new models for managing the underlying language resources indispensable. This paper presents META-SHARE, an open resource exchange infrastructure, which aims to boost visibility, documentation, identification, openness and sharing, collaboration, preservation and interoperability of language data and basic language processing tools. META-SHARE is implemented as a network of distributed repositories of language resources. It offers providers and consumers of resources the necessary functionalities for describing, storing, searching, licensing and downloading language resources in a single integrated technical platform. META-SHARE favours and aligns itself with the growing open data and open source tools movement. To this end, it has prepared the necessary underlying legal framework consisting of a Charter for language resource sharing, as well as a set of licensing templates aiming to act as recommended licence models in an attempt to facilitate the legal interoperability of language resources. In its current version, META-SHARE features 13 resource repositories, with over 1200 resource packages.", "keyphrases": ["meta-share", "language resource", "metadata schema"]} +{"id": "yamamoto-sumita-2007-bilingual", "title": "Bilingual Cluster Based Models for Statistical Machine Translation", "abstract": "We propose a domain specific model for statistical machine translation. It is well-known that domain specific language models perform well in automatic speech recognition. We show that domain specific language and translation models also benefit statistical machine translation. However, there are two problems with using domain specific models. The first is the data sparseness problem. We employ an adaptation technique to overcome this problem. The second issue is domain prediction. In order to perform adaptation, the domain must be provided, however in many cases, the domain is not known or changes dynamically. For these cases, not only the translation target sentence but also the domain must be predicted. This paper focuses on the domain prediction problem for statistical machine translation. In the proposed method, a bilingual training corpus, is automatically clustered into sub-corpora. Each sub-corpus is deemed to be a domain. The domain of a source sentence is predicted by using its similarity to the sub-corpora. The predicted domain (sub-corpus) specific language and translation models are then used for the translation decoding. This approach gave an improvement of 2.7 in BLEU score on the IWSLT05 Japanese to English evaluation corpus (improving the score from 52.4 to 55.1). This is a substantial gain and indicates the validity of the proposed bilingual cluster based models.", "keyphrases": ["cluster", "statistical machine translation", "training corpus"]} +{"id": "li-etal-2013-recursive", "title": "Recursive Autoencoders for ITG-Based Translation", "abstract": "While inversion transduction grammar (ITG) is well suited for modeling ordering shifts between languages, how to make applying the two reordering rules (i.e., straight and inverted) dependent on actual blocks being merged remains a challenge. Unlike previous work that only uses boundary words, we propose to use recursive autoencoders to make full use of the entire merging blocks alternatively. The recursive autoencoders are capable of generating vector space representations for variable-sized phrases, which enable predicting orders to exploit syntactic and semantic information from a neural language modeling\u2019s perspective. Experiments on the NIST 2008 dataset show that our system significantly improves over the MaxEnt classifier by 1.07 BLEU points.", "keyphrases": ["block", "recursive autoencoder", "machine translation"]} +{"id": "schwenk-etal-2007-smooth", "title": "Smooth Bilingual N-Gram Translation", "abstract": "We address the problem of smoothing translation probabilities in a bilingual N-grambased statistical machine translation system. It is proposed to project the bilingual tuples onto a continuous space and to estimate the translation probabilities in this representation. A neural network is used to perform the projection and the probability estimation. Smoothing probabilities is most important for tasks with a limited amount of training material. We consider here the BTEC task of the 2006 IWSLT evaluation. Improvements in all official automatic measures are reported when translating from Italian to English. Using a continuous space model for the translation model and the target language model, an improvement of 1.5 BLEU on the test data is observed.", "keyphrases": ["translation probability", "tuple", "continuous space"]} +{"id": "choudhary-etal-2018-neural", "title": "Neural Machine Translation for English-Tamil", "abstract": "A huge amount of valuable resources is available on the web in English, which are often translated into local languages to facilitate knowledge sharing among local people who are not much familiar with English. However, translating such content manually is very tedious, costly, and time-consuming process. To this end, machine translation is an efficient approach to translate text without any human involvement. Neural machine translation (NMT) is one of the most recent and effective translation technique amongst all existing machine translation systems. In this paper, we apply NMT for English-Tamil language pair. We propose a novel neural machine translation technique using word-embedding along with Byte-Pair-Encoding (BPE) to develop an efficient translation system that overcomes the OOV (Out Of Vocabulary) problem for languages which do not have much translations available online. We use the BLEU score for evaluating the system performance. Experimental results confirm that our proposed MIDAS translator (8.33 BLEU score) outperforms Google translator (3.75 BLEU score).", "keyphrases": ["english-tamil", "vocabulary", "neural machine translation"]} +{"id": "amidei-etal-2019-agreement", "title": "Agreement is overrated: A plea for correlation to assess human evaluation reliability", "abstract": "Inter-Annotator Agreement (IAA) is used as a means of assessing the quality of NLG evaluation data, in particular, its reliability. According to existing scales of IAA interpretation \u2013 see, for example, Lommel et al. (2014), Liu et al. (2016), Sedoc et al. (2018) and Amidei et al. (2018a) \u2013 most data collected for NLG evaluation fail the reliability test. We confirmed this trend by analysing papers published over the last 10 years in NLG-specific conferences (in total 135 papers that included some sort of human evaluation study). Following Sampson and Babarczy (2008), Lommel et al. (2014), Joshi et al. (2016) and Amidei et al. (2018b), such phenomena can be explained in terms of irreducible human language variability. Using three case studies, we show the limits of considering IAA as the only criterion for checking evaluation reliability. Given human language variability, we propose that for human evaluation of NLG, correlation coefficients and agreement coefficients should be used together to obtain a better assessment of the evaluation data reliability. This is illustrated using the three case studies.", "keyphrases": ["evaluation reliability", "case study", "agreement"]} +{"id": "cui-etal-2019-cross", "title": "Cross-Lingual Machine Reading Comprehension", "abstract": "Though the community has made great progress on Machine Reading Comprehension (MRC) task, most of the previous works are solving English-based MRC problems, and there are few efforts on other languages mainly due to the lack of large-scale training data. In this paper, we propose Cross-Lingual Machine Reading Comprehension (CLMRC) task for the languages other than English. Firstly, we present several back-translation approaches for CLMRC task which is straightforward to adopt. However, to exactly align the answer into source language is difficult and could introduce additional noise. In this context, we propose a novel model called Dual BERT, which takes advantage of the large-scale training data provided by rich-resource language (such as English) and learn the semantic relations between the passage and question in bilingual context, and then utilize the learned knowledge to improve reading comprehension performance of low-resource language. We conduct experiments on two Chinese machine reading comprehension datasets CMRC 2018 and DRCD. The results show consistent and significant improvements over various state-of-the-art systems by a large margin, which demonstrate the potentials in CLMRC task. Resources available: ", "keyphrases": ["machine reading comprehension", "mrc", "cross-lingual mrc"]} +{"id": "pan-etal-2006-annotated", "title": "An Annotated Corpus of Typical Durations of Events", "abstract": "In this paper, we present our work on generating an annotated corpus for extracting information about the typical durations of events from texts. We include the annotation guidelines, the event classes we categorized, the way we use normal distributions to model vague and implicit temporal information, and how we evaluate inter-annotator agreement. The experimental results show that our guidelines are effective in improving the inter-annotator agreement.", "keyphrases": ["annotated corpus", "duration", "guideline"]} +{"id": "belinkov-2022-probing", "title": "Probing Classifiers: Promises, Shortcomings, and Advances", "abstract": "Probing classifiers have emerged as one of the prominent methodologies for interpreting and analyzing deep neural network models of natural language processing. The basic idea is simple\u2014a classifier is trained to predict some linguistic property from a model's representations\u2014and has been used to examine a wide variety of models and properties. However, recent studies have demonstrated various methodological limitations of this approach. This squib critically reviews the probing classifiers framework, highlighting their promises, shortcomings, and advances.", "keyphrases": ["methodology", "linguistic property", "probing classifier"]} +{"id": "sterckx-etal-2016-supervised", "title": "Supervised Keyphrase Extraction as Positive Unlabeled Learning", "abstract": "The problem of noisy and unbalanced training data for supervised keyphrase extraction results from the subjectivity of keyphrase assignment, which we quantify by crowdsourcing keyphrases for news and fashion magazine articles with many annotators per document. We show that annotators exhibit substantial disagreement, meaning that single annotator data could lead to very different training sets for supervised keyphrase extractors. Thus, annotations from single authors or readers lead to noisy training data and poor extraction performance of the resulting supervised extractor. We provide a simple but effective solution to still work with such data by reweighting the importance of unlabeled candidate phrases in a two stage Positive Unlabeled Learning setting. We show that performance of trained keyphrase extractors approximates a classi-\ufb01er trained on articles labeled by multiple an-notators, leading to higher average F 1 scores and better rankings of keyphrases. We apply this strategy to a variety of test collections from different backgrounds and show improvements over strong baseline models.", "keyphrases": ["positive unlabeled learning", "annotator", "supervised keyphrase extraction"]} +{"id": "chiticariu-etal-2013-rule", "title": "Rule-Based Information Extraction is Dead! Long Live Rule-Based Information Extraction Systems!", "abstract": "The rise of \u201cBig Data\u201d analytics over unstructured text has led to renewed interest in information extraction (IE). We surveyed the landscape of IE technologies and identified a major disconnect between industry and academia: while rule-based IE dominates the commercial world, it is widely regarded as dead-end technology by the academia. We believe the disconnect stems from the way in which the two communities measure the benefits and costs of IE, as well as academia\u2019s perception that rulebased IE is devoid of research challenges. We make a case for the importance of rule-based IE to industry practitioners. We then lay out a research agenda in advancing the state-of-theart in rule-based IE systems which we believe has the potential to bridge the gap between academic research and industry practice.", "keyphrases": ["information extraction", "machine learning", "rule-based system", "high level"]} +{"id": "li-etal-2020-multi-encoder", "title": "Does Multi-Encoder Help? A Case Study on Context-Aware Neural Machine Translation", "abstract": "In encoder-decoder neural models, multiple encoders are in general used to represent the contextual information in addition to the individual sentence. In this paper, we investigate multi-encoder approaches in document-level neural machine translation (NMT). Surprisingly, we find that the context encoder does not only encode the surrounding sentences but also behaves as a noise generator. This makes us rethink the real benefits of multi-encoder in context-aware translation - some of the improvements come from robust training. We compare several methods that introduce noise and/or well-tuned dropout setup into the training of these encoders. Experimental results show that noisy training plays an important role in multi-encoder-based NMT, especially when the training data is small. Also, we establish a new state-of-the-art on IWSLT Fr-En task by careful use of noise generation and dropout methods.", "keyphrases": ["neural machine translation", "context encoder", "regularization"]} +{"id": "luo-etal-2017-unsupervised", "title": "Unsupervised Learning of Morphological Forests", "abstract": "This paper focuses on unsupervised modeling of morphological families, collectively comprising a forest over the language vocabulary. This formulation enables us to capture edge-wise properties reflecting single-step morphological derivations, along with global distributional properties of the entire forest. These global properties constrain the size of the affix set and encourage formation of tight morphological families. The resulting objective is solved using Integer Linear Programming (ILP) paired with contrastive estimation. We train the model by alternating between optimizing the local log-linear model and the global ILP objective. We evaluate our system on three tasks: root detection, clustering of morphological families, and segmentation. Our experiments demonstrate that our model yields consistent gains in all three tasks compared with the best published results.", "keyphrases": ["morphological family", "integer linear programming", "unsupervised learning"]} +{"id": "finch-etal-2005-using", "title": "Using Machine Translation Evaluation Techniques to Determine Sentence-level Semantic Equivalence", "abstract": "The task of machine translation (MT) evaluation is closely related to the task of sentence-level semantic equivalence classification. This paper investigates the utility of applying standard MT evaluation methods (BLEU, NIST, WER and PER) to building classifiers to predict semantic equivalence and entailment. We also introduce a novel classification method based on PER which leverages part of speech information of the words contributing to the word matches and non-matches in the sentence. Our results show that MT evaluation techniques are able to produce useful features for paraphrase classification and to a lesser extent entailment. Our technique gives a substantial improvement in paraphrase classification accuracy over all of the other models used in the experiments.", "keyphrases": ["wer", "classification accuracy", "paraphrase identification"]} +{"id": "filippova-strube-2008-dependency", "title": "Dependency Tree Based Sentence Compression", "abstract": "We present a novel unsupervised method for sentence compression which relies on a dependency tree representation and shortens sentences by removing subtrees. An automatic evaluation shows that our method obtains result comparable or superior to the state of the art. We demonstrate that the choice of the parser affects the performance of the system. We also apply the method to German and report the results of an evaluation with humans.", "keyphrases": ["sentence compression", "unsupervised method", "weight"]} +{"id": "bohnet-2010-top", "title": "Top Accuracy and Fast Dependency Parsing is not a Contradiction", "abstract": "In addition to a high accuracy, short parsing and training times are the most important properties of a parser. However, parsing and training times are still relatively long. To determine why, we analyzed the time usage of a dependency parser. We illustrate that the mapping of the features onto their weights in the support vector machine is the major factor in time complexity. To resolve this problem, we implemented the passive-aggressive perceptron algorithm as a Hash Kernel. The Hash Kernel substantially improves the parsing times and takes into account the features of negative examples built during the training. This has lead to a higher accuracy. We could further increase the parsing and training speed with a parallel feature extraction and a parallel parsing algorithm. We are convinced that the Hash Kernel and the parallelization can be applied successful to other NLP applications as well such as transition based dependency parsers, phrase structrue parsers, and machine translation.", "keyphrases": ["dependency parser", "feature extraction", "head", "execution time"]} +{"id": "agirre-lopez-de-lacalle-2007-ubc", "title": "UBC-ALM: Combining k-NN with SVD for WSD", "abstract": "This work describes the University of the Basque Country system (UBC-ALM) for lexical sample and all-words WSD subtasks of SemEval-2007 task 17, where it performed in the second and fifth positions respectively. The system is based on a combination of k-Nearest Neighbor classifiers, with each classifier learning from a distinct set of features: local features (syntactic, collocations features), topical features (bag-of-words, domain information) and latent features learned from a reduced space using Singular Value Decomposition.", "keyphrases": ["svd", "feature-to-document matrix", "unlabeled data"]} +{"id": "choshen-abend-2018-automatic", "title": "Automatic Metric Validation for Grammatical Error Correction", "abstract": "Metric validation in Grammatical Error Correction (GEC) is currently done by observing the correlation between human and metric-induced rankings. However, such correlation studies are costly, methodologically troublesome, and suffer from low inter-rater agreement. We propose MAEGE, an automatic methodology for GEC metric validation, that overcomes many of the difficulties in the existing methodology. Experiments with MAEGE shed a new light on metric quality, showing for example that the standard M^2 metric fares poorly on corpus-level ranking. Moreover, we use MAEGE to perform a detailed analysis of metric behavior, showing that some types of valid edits are consistently penalized by existing metrics.", "keyphrases": ["metric validation", "grammatical error correction", "maege"]} +{"id": "lee-dernoncourt-2016-sequential", "title": "Sequential Short-Text Classification with Recurrent and Convolutional Neural Networks", "abstract": "Recent approaches based on artificial neural networks (ANNs) have shown promising results for short-text classification. However, many short texts occur in sequences (e.g., sentences in a document or utterances in a dialog), and most existing ANN-based systems do not leverage the preceding short texts when classifying a subsequent one. In this work, we present a model based on recurrent neural networks and convolutional neural networks that incorporates the preceding short texts. Our model achieves state-of-the-art results on three different datasets for dialog act prediction.", "keyphrases": ["recurrent", "sequential short-text classification", "conversation", "deep"]} +{"id": "cai-zhao-2016-neural", "title": "Neural Word Segmentation Learning for Chinese", "abstract": "Most previous approaches to Chinese word segmentation formalize this problem as a character-based sequence labeling task where only contextual information within fixed sized local windows and simple interactions between adjacent tags can be captured. In this paper, we propose a novel neural framework which thoroughly eliminates context windows and can utilize complete segmentation history. Our model employs a gated combination neural network over characters to produce distributed representations of word candidates, which are then given to a long short-term memory (LSTM) language scoring model. Experiments on the benchmark datasets show that without the help of feature engineering as most existing approaches, our models achieve competitive or better performances with previous state-of-the-art methods.", "keyphrases": ["word segmentation", "chinese", "novel neural framework"]} +{"id": "liu-etal-2017-learning", "title": "Learning Character-level Compositionality with Visual Features", "abstract": "Previous work has modeled the compositionality of words by creating character-level models of meaning, reducing problems of sparsity for rare words. However, in many writing systems compositionality has an effect even on the character-level: the meaning of a character is derived by the sum of its parts. In this paper, we model this effect by creating embeddings for characters based on their visual characteristics, creating an image for the character and running it through a convolutional neural network to produce a visual character embedding. Experiments on a text classification task demonstrate that such model allows for better processing of instances with rare characters in languages such as Chinese, Japanese, and Korean. Additionally, qualitative analyses demonstrate that our proposed model learns to focus on the parts of characters that carry topical content which resulting in embeddings that are coherent in visual space.", "keyphrases": ["compositionality", "visual feature", "convolutional neural network", "chinese", "japanese"]} +{"id": "yatskar-etal-2014-see", "title": "See No Evil, Say No Evil: Description Generation from Densely Labeled Images", "abstract": "This paper studies generation of descriptive sentences from densely annotated images. Previous work studied generation from automatically detected visual information but produced a limited class of sentences, hindered by currently unreliable recognition of activities and attributes. Instead, we collect human annotations of objects, parts, attributes and activities in images. These annotations allow us to build a significantly more comprehensive model of language generation and allow us to study what visual information is required to generate human-like descriptions. Experiments demonstrate high quality output and that activity annotations and relative spatial location of objects contribute most to producing high quality sentences.", "keyphrases": ["image", "attribute", "human-like description"]} +{"id": "pavlopoulos-etal-2021-semeval", "title": "SemEval-2021 Task 5: Toxic Spans Detection", "abstract": "The Toxic Spans Detection task of SemEval-2021 required participants to predict the spans of toxic posts that were responsible for the toxic label of the posts. The task could be addressed as supervised sequence labeling, using training data with gold toxic spans provided by the organisers. It could also be treated as rationale extraction, using classifiers trained on potentially larger external datasets of posts manually annotated as toxic or not, without toxic span annotations. For the supervised sequence labeling approach and evaluation purposes, posts previously labeled as toxic were crowd-annotated for toxic spans. Participants submitted their predicted spans for a held-out test set and were scored using character-based F1. This overview summarises the work of the 36 teams that provided system descriptions.", "keyphrases": ["toxic spans detection", "offensive span identification", "english language"]} +{"id": "mcdonald-nivre-2011-analyzing", "title": "Analyzing and Integrating Dependency Parsers", "abstract": "There has been a rapid increase in the volume of research on data-driven dependency parsers in the past five years. This increase has been driven by the availability of treebanks in a wide variety of languages\u2014due in large part to the CoNLL shared tasks\u2014as well as the straightforward mechanisms by which dependency theories of syntax can encode complex phenomena in free word order languages. In this article, our aim is to take a step back and analyze the progress that has been made through an analysis of the two predominant paradigms for data-driven dependency parsing, which are often called graph-based and transition-based dependency parsing. Our analysis covers both theoretical and empirical aspects and sheds light on the kinds of errors each type of parser makes and how they relate to theoretical expectations. Using these observations, we present an integrated system based on a stacking learning framework and show that such a system can learn to overcome the shortcomings of each non-integrated system.", "keyphrases": ["dependency parser", "sentence length", "discontinuous construction"]} +{"id": "mao-etal-2020-tchebycheff", "title": "Tchebycheff Procedure for Multi-task Text Classification", "abstract": "Multi-task Learning methods have achieved great progress in text classification. However, existing methods assume that multi-task text classification problems are convex multiobjective optimization problems, which is unrealistic in real-world applications. To address this issue, this paper presents a novel Tchebycheff procedure to optimize the multi-task classification problems without convex assumption. The extensive experiments back up our theoretical analysis and validate the superiority of our proposals.", "keyphrases": ["multi-task text classification", "optimization problem", "tchebycheff procedure"]} +{"id": "sharoff-etal-2010-web", "title": "The Web Library of Babel: evaluating genre collections", "abstract": "We present experiments in automatic genre classification on web corpora, comparing a wide variety of features on several different genreannotated datasets (HGC, I-EN, KI-04, KRYS-I, MGC and SANTINIS).We investigate the performance of several types of features (POS n-grams, character n-grams and word n-grams) and show that simple character n-grams perform best on current collections because of their ability to generalise both lexical and syntactic phenomena related to genres. However, we also show that these impressive results might not be transferrable to the wider web due to the lack of comparability between different annotation labels (many webpages cannot be described in terms of the genre labels in individual collections), lack of representativeness of existing collections (many genres are represented by webpages coming from a small number of sources) as well as problems in the reliability of genre annotation (many pages from the web are difficult to interpret in terms of the labels available). This suggests that more research is needed to understand genres on the Web.", "keyphrases": ["web", "genre collection", "mgc", "character n-gram", "register"]} +{"id": "huang-etal-2010-soft", "title": "Soft Syntactic Constraints for Hierarchical Phrase-Based Translation Using Latent Syntactic Distributions", "abstract": "In this paper, we present a novel approach to enhance hierarchical phrase-based machine translation systems with linguistically motivated syntactic features. Rather than directly using treebank categories as in previous studies, we learn a set of linguistically-guided latent syntactic categories automatically from a source-side parsed, word-aligned parallel corpus, based on the hierarchical structure among phrase pairs as well as the syntactic structure of the source side. In our model, each X nonterminal in a SCFG rule is decorated with a real-valued feature vector computed based on its distribution of latent syntactic categories. These feature vectors are utilized at decoding time to measure the similarity between the syntactic analysis of the source side and the syntax of the SCFG rules that are applied to derive translations. Our approach maintains the advantages of hierarchical phrase-based translation systems while at the same time naturally incorporates soft syntactic constraints.", "keyphrases": ["latent syntactic distribution", "feature vector", "translation rule", "soft constraint modeling"]} +{"id": "stanovsky-etal-2018-supervised", "title": "Supervised Open Information Extraction", "abstract": "We present data and methods that enable a supervised learning approach to Open Information Extraction (Open IE). Central to the approach is a novel formulation of Open IE as a sequence tagging problem, addressing challenges such as encoding multiple extractions for a predicate. We also develop a bi-LSTM transducer, extending recent deep Semantic Role Labeling models to extract Open IE tuples and provide confidence scores for tuning their precision-recall tradeoff. Furthermore, we show that the recently released Question-Answer Meaning Representation dataset can be automatically converted into an Open IE corpus which significantly increases the amount of available training data. Our supervised model outperforms the existing state-of-the-art Open IE systems on benchmark datasets.", "keyphrases": ["open information extraction", "sequence tagging", "predicate", "confidence score"]} +{"id": "alegria-etal-2008-spelling", "title": "Spelling Correction: from Two-Level Morphology to Open Source", "abstract": "Basque is a highly inflected and agglutinative language (Alegria et al., 1996). Two-level morphology has been applied successfully to this kind of languages and there are two-level based descriptions for very different languages. After doing the morphological description for a language, it is easy to develop a spelling checker/corrector for this language. However, what happens if we want to use the speller in the \u0093free world\u0094 (OpenOffice, Mozilla, emacs, LaTeX, etc.)? Ispell and similar tools (aspell, hunspell, myspell) are the usual mechanisms for these purposes, but they do not fit the two-level model. In the absence of two-level morphology based mechanisms, an automatic conversion from two-level description to hunspell is described in this paper.", "keyphrases": ["two-level morphology", "agglutinative language", "spelling correction"]} +{"id": "alfter-volodina-2018-towards", "title": "Towards Single Word Lexical Complexity Prediction", "abstract": "In this paper we present work-in-progress where we investigate the usefulness of previously created word lists to the task of single-word lexical complexity analysis and prediction of the complexity level for learners of Swedish as a second language. The word lists used map each word to a single CEFR level, and the task consists of predicting CEFR levels for unseen words. In contrast to previous work on word-level lexical complexity, we experiment with topics as additional features and show that linking words to topics significantly increases accuracy of classification.", "keyphrases": ["complexity level", "learner", "swedish", "second language"]} +{"id": "garmash-monz-2016-ensemble", "title": "Ensemble Learning for Multi-Source Neural Machine Translation", "abstract": "In this paper we describe and evaluate methods to perform ensemble prediction in neural machine translation (NMT). We compare two methods of ensemble set induction: sampling parameter initializations for an NMT system, which is a relatively established method in NMT (Sutskever et al., 2014), and NMT systems translating from different source languages into the same target language, i.e., multi-source ensembles, a method recently introduced by Firat et al. (2016). We are motivated by the observation that for different language pairs systems make different types of mistakes. We propose several methods with different degrees of parameterization to combine individual predictions of NMT systems so that they mutually compensate for each other's mistakes and improve overall performance. We find that the biggest improvements can be obtained from a context-dependent weighting scheme for multi-source ensembles. This result offers stronger support for the linguistic motivation of using multi-source ensembles than previous approaches. Evaluation is carried out for German and French into English translation. The best multi-source ensemble method achieves an improvement of up to 2.2 BLEU points over the strongest single-source ensemble baseline, and a 2 BLEU improvement over a multi-source ensemble baseline.", "keyphrases": ["neural machine translation", "nmt system", "ensemble"]} +{"id": "ali-renals-2018-word", "title": "Word Error Rate Estimation for Speech Recognition: e-WER", "abstract": "Measuring the performance of automatic speech recognition (ASR) systems requires manually transcribed data in order to compute the word error rate (WER), which is often time-consuming and expensive. In this paper, we propose a novel approach to estimate WER, or e-WER, which does not require a gold-standard transcription of the test set. Our e-WER framework uses a comprehensive set of features: ASR recognised text, character recognition results to complement recognition output, and internal decoder features. We report results for the two features; black-box and glass-box using unseen 24 Arabic broadcast programs. Our system achieves 16.9% WER root mean squared error (RMSE) across 1,400 sentences. The estimated overall WER e-WER was 25.3% for the three hours test set, while the actual WER was 28.5%.", "keyphrases": ["speech recognition", "e-wer", "word error rate"]} +{"id": "vulic-moens-2013-cross", "title": "Cross-Lingual Semantic Similarity of Words as the Similarity of Their Semantic Word Responses", "abstract": "We propose a new approach to identifying semantically similar words across languages. The approach is based on an idea that two words in different languages are similar if they are likely to generate similar words (which includes both source and target language words) as their top semantic word responses. Semantic word responding is a concept from cognitive science which addresses detecting most likely words that humans output as free word associations given some cue word. The method consists of two main steps: (1) it utilizes a probabilistic multilingual topic model trained on comparable data to learn and quantify the semantic word responses, (2) it provides ranked lists of similar words according to the similarity of their semantic word response vectors. We evaluate our approach in the task of bilingual lexicon extraction (BLE) for a variety of language pairs. We show that in the cross-lingual settings without any language pair dependent knowledge the response-based method of similarity is more robust and outperforms current state-of-the art methods that directly operate in the semantic space of latent cross-lingual concepts/topics.", "keyphrases": ["semantic word response", "cross-lingual semantic similarity", "non-parallel data"]} +{"id": "zhang-etal-2019-broad", "title": "Broad-Coverage Semantic Parsing as Transduction", "abstract": "We unify different broad-coverage semantic parsing tasks into a transduction parsing paradigm, and propose an attention-based neural transducer that incrementally builds meaning representation via a sequence of semantic relations. By leveraging multiple attention mechanisms, the neural transducer can be effectively trained without relying on a pre-trained aligner. Experiments separately conducted on three broad-coverage semantic parsing tasks \u2013 AMR, SDP and UCCA \u2013 demonstrate that our attention-based neural transducer improves the state of the art on both AMR and UCCA, and is competitive with the state of the art on SDP.", "keyphrases": ["transduction", "semantic relation", "amr", "sdp", "node"]} +{"id": "agarwal-etal-2011-scisumm", "title": "SciSumm: A Multi-Document Summarization System for Scientific Articles", "abstract": "In this demo, we present SciSumm, an interactive multi-document summarization system for scientific articles. The document collection to be summarized is a list of papers cited together within the same source article, otherwise known as a co-citation. At the heart of the approach is a topic based clustering of fragments extracted from each article based on queries generated from the context surrounding the co-cited list of papers. This analysis enables the generation of an overview of common themes from the co-cited papers that relate to the context in which the co-citation was found. SciSumm is currently built over the 2008 ACL Anthology, however the generalizable nature of the summarization techniques and the extensible architecture makes it possible to use the system with other corpora where a citation network is available. Evaluation results on the same corpus demonstrate that our system performs better than an existing widely used multi-document summarization system (MEAD).", "keyphrases": ["multi-document summarization system", "fragment", "scisumm"]} +{"id": "loureiro-etal-2022-timelms", "title": "TimeLMs: Diachronic Language Models from Twitter", "abstract": "Despite its importance, the time variable has been largely neglected in the NLP and language model literature. In this paper, we present TimeLMs, a set of language models specialized on diachronic Twitter data. We show that a continual learning strategy contributes to enhancing Twitter-based language models' capacity to deal with future and out-of-distribution tweets, while making them competitive with standardized and more monolithic benchmarks. We also perform a number of qualitative analyses showing how they cope with trends and peaks in activity involving specific named entities or concept drift. TimeLMs is available at github.com/cardiffnlp/timelms.", "keyphrases": ["language model", "twitter data", "timelms"]} +{"id": "federico-etal-2014-matecat", "title": "The MateCat Tool", "abstract": "We present a new web-based CAT tool providing translators with a professional work environment, integrating translation memories, terminology bases, concordancers, and machine translation. The tool is completely developed as open source software and has been already successfully deployed for business, research and education. The MateCat Tool represents today probably the best available open source platform for investigating, integrating, and evaluating under realistic conditions the impact of new machine translation technology on human post-editing.", "keyphrases": ["matecat tool", "translator", "post-editing", "wordbee", "machine-translated document"]} +{"id": "poncelas-etal-2018-data", "title": "Data Selection with Feature Decay Algorithms Using an Approximated Target Side", "abstract": "Data selection techniques applied to neural machine translation (NMT) aim to increase the performance of a model by retrieving a subset of sentences for use as training data. One of the possible data selection techniques are transductive learning methods, which select the data based on the test set, i.e. the document to be translated. A limitation of these methods to date is that using the source-side test set does not by itself guarantee that sentences are selected with correct translations, or translations that are suitable given the test-set domain. Some corpora, such as subtitle corpora, may contain parallel sentences with inaccurate translations caused by localization or length restrictions. In order to try to fix this problem, in this paper we propose to use an approximated target-side in addition to the source-side when selecting suitable sentence-pairs for training a model. This approximated target-side is built by pre-translating the source-side. In this work, we explore the performance of this general idea for one specific data selection approach called Feature Decay Algorithms (FDA). We train German-English NMT models on data selected by using the test set (source), the approximated target side, and a mixture of both. Our findings reveal that models built using a combination of outputs of FDA (using the test set and an approximated target side) perform better than those solely using the test set. We obtain a statistically significant improvement of more than 1.5 BLEU points over a model trained with all data, and more than 0.5 BLEU points over a strong FDA baseline that uses source-side information only.", "keyphrases": ["feature decay algorithms", "approximated target side", "target-side"]} +{"id": "shao-etal-2018-greedy", "title": "Greedy Search with Probabilistic N-gram Matching for Neural Machine Translation", "abstract": "Neural machine translation (NMT) models are usually trained with the word-level loss using the teacher forcing algorithm, which not only evaluates the translation improperly but also suffers from exposure bias. Sequence-level training under the reinforcement framework can mitigate the problems of the word-level loss, but its performance is unstable due to the high variance of the gradient estimation. On these grounds, we present a method with a differentiable sequence-level training objective based on probabilistic n-gram matching which can avoid the reinforcement framework. In addition, this method performs greedy search in the training which uses the predicted words as context just as at inference to alleviate the problem of exposure bias. Experiment results on the NIST Chinese-to-English translation tasks show that our method significantly outperforms the reinforcement-based algorithms and achieves an improvement of 1.5 BLEU points on average over a strong baseline system.", "keyphrases": ["probabilistic n-gram matching", "neural machine translation", "loss"]} +{"id": "bauer-etal-2018-commonsense", "title": "Commonsense for Generative Multi-Hop Question Answering Tasks", "abstract": "Reading comprehension QA tasks have seen a recent surge in popularity, yet most works have focused on fact-finding extractive QA. We instead focus on a more challenging multi-hop generative task (NarrativeQA), which requires the model to reason, gather, and synthesize disjoint pieces of information within the context to generate an answer. This type of multi-step reasoning also often requires understanding implicit relations, which humans resolve via external, background commonsense knowledge. We first present a strong generative baseline that uses a multi-attention mechanism to perform multiple hops of reasoning and a pointer-generator decoder to synthesize the answer. This model performs substantially better than previous generative models, and is competitive with current state-of-the-art span prediction models. We next introduce a novel system for selecting grounded multi-hop relational commonsense information from ConceptNet via a pointwise mutual information and term-frequency based scoring function. Finally, we effectively use this extracted commonsense information to fill in gaps of reasoning between context hops, using a selectively-gated attention mechanism. This boosts the model's performance significantly (also verified via human evaluation), establishing a new state-of-the-art for the task. We also show that our background knowledge enhancements are generalizable and improve performance on QAngaroo-WikiHop, another multi-hop reasoning dataset.", "keyphrases": ["narrativeqa", "multi-step reasoning", "commonsense knowledge"]} +{"id": "moudjari-etal-2020-algerian", "title": "An Algerian Corpus and an Annotation Platform for Opinion and Emotion Analysis", "abstract": "In this paper, we address the lack of resources for opinion and emotion analysis related to North African dialects, targeting Algerian dialect. We present TWIFIL (TWItter proFILing) a collaborative annotation platform for crowdsourcing annotation of tweets at different levels of granularity. The plateform allowed the creation of the largest Algerian dialect dataset annotated for both sentiment (9,000 tweets), emotion (about 5,000 tweets) and extra-linguistic information including author profiling (age and gender). The annotation resulted also in the creation of the largest Algerien dialect subjectivity lexicon of about 9,000 entries which can constitute a valuable resources for the development of future NLP applications for Algerian dialect. To test the validity of the dataset, a set of deep learning experiments were conducted to classify a given tweet as positive, negative or neutral. We discuss our results and provide an error analysis to better identify classification errors.", "keyphrases": ["annotation platform", "opinion", "emotion analysis", "dialect"]} +{"id": "wang-etal-2019-tell", "title": "Can You Tell Me How to Get Past Sesame Street? Sentence-Level Pretraining Beyond Language Modeling", "abstract": "Natural language understanding has recently seen a surge of progress with the use of sentence encoders like ELMo (Peters et al., 2018a) and BERT (Devlin et al., 2019) which are pretrained on variants of language modeling. We conduct the first large-scale systematic study of candidate pretraining tasks, comparing 19 different tasks both as alternatives and complements to language modeling. Our primary results support the use language modeling, especially when combined with pretraining on additional labeled-data tasks. However, our results are mixed across pretraining tasks and show some concerning trends: In ELMo's pretrain-then-freeze paradigm, random baselines are worryingly strong and results vary strikingly across target tasks. In addition, fine-tuning BERT on an intermediate task often negatively impacts downstream transfer. In a more positive trend, we see modest gains from multitask training, suggesting the development of more sophisticated multitask and transfer learning techniques as an avenue for further research.", "keyphrases": ["language modeling", "bert", "target task"]} +{"id": "lee-etal-2004-supervised", "title": "Supervised Word Sense Disambiguation with Support Vector Machines and multiple knowledge sources", "abstract": "We participated in the SENSEVAL-3 English lexical sample task and multilingual lexical sample task. We adopted a supervised learning approach with Support Vector Machines, using only the of\ufb01cial training data provided. No other external resources were used. The knowledge sources used were part-of-speech of neighboring words, single words in the surrounding context, local collocations, and syntactic relations. For the translation and sense subtask of the multilingual lexical sample task, the English sense given for the target word was also used as an additional knowledge source. For the English lexical sample task, we obtained \ufb01ne-grained and coarse-grained score (for both recall and precision) of 0.724 and 0.788 respectively. For the multilingual lexical sample task, we obtained recall (and precision) of 0.634 for the translation subtask, and 0.673 for the translation and sense subtask.", "keyphrases": ["word sense disambiguation", "support vector machines", "knowledge source", "wsd approach"]} +{"id": "de-clercq-etal-2013-normalization", "title": "Normalization of Dutch User-Generated Content", "abstract": "This paper describes a phrase-based machine translation approach to normalize Dutch user-generated content (UGC). We compiled a corpus of three different social media genres (text messages, message board posts and tweets) to have a sample of this recent domain. We describe the various characteristics of this noisy text material and explain how it has been manually normalized using newly developed guidelines. For the automatic normalization task we focus on text messages, and find that a cascaded SMT system where a token-based module is followed by a translation at the character level gives the best word error rate reduction. After these initial experiments, we investigate the system\u2019s robustness on the complete domain of UGC by testing it on the other two social media genres, and find that the cascaded approach performs best on these genres as well. To our knowledge, we deliver the first proof-of-concept system for Dutch UGC normalization, which can serve as a baseline for future work.", "keyphrases": ["dutch user-generated content", "ugc", "genre", "phrase-based method"]} +{"id": "moiron-tiedemann-2006-identifying", "title": "Identifying idiomatic expressions using automatic word-alignment", "abstract": "For NLP applications that require some sort of semantic interpretation it would be helpful to know what expressions exhibit an idiomatic meaning and what expressions exhibit a literal meaning. We investigate whether automatic word-alignment in existing parallel corpora facilitates the classification of candidate expressions along a continuum ranging from literal and transparent expressions to idiomatic and opaque expressions. Our method relies on two criteria: (i) meaning predictability that is measured as semantic entropy and (ii), the overlap between the meaning of an expression and the meaning of its component words. We approximate the mentioned overlap as the proportion of default alignments. We obtain a significant improvement over the baseline with both measures.", "keyphrases": ["automatic word-alignment", "parallel corpora", "predictability", "mwe"]} +{"id": "goldfarb-tarrant-etal-2021-intrinsic", "title": "Intrinsic Bias Metrics Do Not Correlate with Application Bias", "abstract": "Natural Language Processing (NLP) systems learn harmful societal biases that cause them to amplify inequality as they are deployed in more and more situations. To guide efforts at debiasing these systems, the NLP community relies on a variety of metrics that quantify bias in models. Some of these metrics are intrinsic, measuring bias in word embedding spaces, and some are extrinsic, measuring bias in downstream tasks that the word embeddings enable. Do these intrinsic and extrinsic metrics correlate with each other? We compare intrinsic and extrinsic metrics across hundreds of trained models covering different tasks and experimental conditions. Our results show no reliable correlation between these metrics that holds in all scenarios across tasks and languages. We urge researchers working on debiasing to focus on extrinsic measures of bias, and to make using these measures more feasible via creation of new challenge sets and annotated test data. To aid this effort, we release code, a new intrinsic metric, and an annotated test set focused on gender bias in hate speech.", "keyphrases": ["application bias", "downstream task", "intrinsic metric"]} +{"id": "cieri-etal-2018-introducing", "title": "Introducing NIEUW: Novel Incentives and Workflows for Eliciting Linguistic Data", "abstract": "This paper introduces the NIEUW (Novel Incentives and Workflows) project funded by the United States National Science Foundation and part of the Linguistic Data Consortium\u2019s strategy to provide order of magnitude improvement in the scale, cost, variety, linguistic diversity and quality of Language Resources available for education, research and technology development. Notwithstanding decades of effort and progress in collecting and distributing Language Resources, it remains the case that demand still far exceeds supply for all of the approximately 7000 languages in the world, even the most well documented languages with global economic and political influence. The absence of Language Resources, regardless of the language, stifles teaching and technology building, inhibiting the creation of language enabled applications and, as a result, commerce and communication. Project oriented approaches which focus intensive funding and effort on problems of limited scope over short durations can only address part of the problem. The HLT community instead requires approaches that do not rely upon highly constrained resources such as project funding and can be sustained across many languages and many years. In this paper, we describe a new initiative to harness the power of alternative incentives to elicit linguistic data and annotation. We also describe changes to the workflows necessary to collect data from workforces attracted by these incentives.", "keyphrases": ["nieuw", "novel incentives", "workflows"]} +{"id": "chakraborty-etal-2011-semantic", "title": "Semantic Clustering: an Attempt to Identify Multiword Expressions in Bengali", "abstract": "One of the key issues in both natural language understanding and generation is the appropriate processing of Multiword Expressions (MWEs). MWE can be defined as a semantic issue of a phrase where the meaning of the phrase may not be obtained from its constituents in a straightforward manner. This paper presents an approach of identifying bigram noun-noun MWEs from a medium-size Bengali corpus by clustering the semantically related nouns and incorporating a vector space model for similarity measurement. Additional inclusion of the English WordNet::Similarity module also improves the results considerably. The present approach also contributes to locate clusters of the synonymous noun words present in a document. Experimental results draw a satisfactory conclusion after analyzing the Precision, Recall and F-score values.", "keyphrases": ["multiword expressions", "medium-size bengali corpus", "semantic clustering"]} +{"id": "han-etal-2012-automatically", "title": "Automatically Constructing a Normalisation Dictionary for Microblogs", "abstract": "Microblog normalisation methods often utilise complex models and struggle to differentiate between correctly-spelled unknown words and lexical variants of known words. In this paper, we propose a method for constructing a dictionary of lexical variants of known words that facilitates lexical normalisation via simple string substitution (e.g. tomorrow for tmrw). We use context information to generate possible variant and normalisation pairs and then rank these by string similarity. Highly-ranked pairs are selected to populate the dictionary. We show that a dictionary-based approach achieves state-of-the-art performance for both F-score and word error rate on a standard dataset. Compared with other methods, this approach offers a fast, lightweight and easy-to-use solution, and is thus suitable for high-volume microblog pre-processing.", "keyphrases": ["microblog", "distributional similarity", "noisy text", "oov word"]} +{"id": "bhowmick-etal-2008-agreement", "title": "An Agreement Measure for Determining Inter-Annotator Reliability of Human Judgements on Affective Text", "abstract": "An affective text may be judged to belong to multiple affect categories as it may evoke different affects with varying degree of intensity. For affect classification of text, it is often required to annotate text corpus with affect categories. This task is often performed by a number of human judges. This paper presents a new agreement measure inspired by Kappa coefficient to compute inter-annotator reliability when the annotators have freedom to categorize a text into more than one class. The extended reliability coefficient has been applied to measure the quality of an affective text corpus. An analysis of the factors that influence corpus quality has been provided.", "keyphrases": ["inter-annotator reliability", "affective text", "factor"]} +{"id": "rimell-etal-2009-unbounded", "title": "Unbounded Dependency Recovery for Parser Evaluation", "abstract": "This paper introduces a new parser evaluation corpus containing around 700 sentences annotated with unbounded dependencies, from seven different grammatical constructions. We run a series of off-the-shelf parsers on the corpus to evaluate how well state-of-the-art parsing technology is able to recover such dependencies. The overall results range from 25% accuracy to 59%. These low scores call into question the validity of using Parseval scores as a general measure of parsing capability. We discuss the importance of parsers being able to recover unbounded dependencies, given their relatively low frequency in corpora. We also analyse the various errors made on these constructions by one of the more successful parsers.", "keyphrases": ["unbounded dependency", "ccg parser", "linguistic capacity"]} +{"id": "aji-etal-2020-neural", "title": "In Neural Machine Translation, What Does Transfer Learning Transfer?", "abstract": "Transfer learning improves quality for low-resource machine translation, but it is unclear what exactly it transfers. We perform several ablation studies that limit information transfer, then measure the quality impact across three language pairs to gain a black-box understanding of transfer learning. Word embeddings play an important role in transfer learning, particularly if they are properly aligned. Although transfer learning can be performed without embeddings, results are sub-optimal. In contrast, transferring only the embeddings but nothing else yields catastrophic results. We then investigate diagonal alignments with auto-encoders over real languages and randomly generated sequences, finding even randomly generated sequences as parents yield noticeable but smaller gains. Finally, transfer learning can eliminate the need for a warm-up phase when training transformer models in high resource language pairs.", "keyphrases": ["transfer learning", "parent", "vocabulary"]} +{"id": "tsai-chen-2003-context", "title": "Context-rule Model for Pos Tagging", "abstract": "Part-of-speech tagging for a large corpus is a labour intensive and time-consuming task. In order to achieve fast and high quality tagging, algorithms should be high precision and in particular, its tagging results should require less manual proofreading. In this paper, we proposed a context-rule model to achieve both the above goals for pos tagging. We compared the tagging precisions between Markov bi-gram model and context-rule classifier. According to the experiments, context-rule classifier performs better than those two other algorithms. Also, it covers the data sparseness problem by utilizing more context features, and reduces the amount of corpus that is need to be manual proofread by introducing the confidence measure.", "keyphrases": ["pos tagging", "markov bi-gram model", "context-rule model"]} +{"id": "colombo-etal-2019-affect", "title": "Affect-Driven Dialog Generation", "abstract": "The majority of current systems for end-to-end dialog generation focus on response quality without an explicit control over the affective content of the responses. In this paper, we present an affect-driven dialog system, which generates emotional responses in a controlled manner using a continuous representation of emotions. The system achieves this by modeling emotions at a word and sequence level using: (1) a vector representation of the desired emotion, (2) an affect regularizer, which penalizes neutral words, and (3) an affect sampling method, which forces the neural network to generate diverse words that are emotionally relevant. During inference, we use a re-ranking procedure that aims to extract the most emotionally relevant responses using a human-in-the-loop optimization process. We study the performance of our system in terms of both quantitative (BLEU score and response diversity), and qualitative (emotional appropriateness) measures.", "keyphrases": ["dialog system", "emotion", "response generation"]} +{"id": "huang-etal-2020-grade", "title": "GRADE: Automatic Graph-Enhanced Coherence Metric for Evaluating Open-Domain Dialogue Systems", "abstract": "Automatically evaluating dialogue coherence is a challenging but high-demand ability for developing high-quality open-domain dialogue systems. However, current evaluation metrics consider only surface features or utterance-level semantics, without explicitly considering the fine-grained topic transition dynamics of dialogue flows. Here, we first consider that the graph structure constituted with topics in a dialogue can accurately depict the underlying communication logic, which is a more natural way to produce persuasive metrics. Capitalized on the topic-level dialogue graph, we propose a new evaluation metric GRADE, which stands for Graph-enhanced Representations for Automatic Dialogue Evaluation. Specifically, GRADE incorporates both coarse-grained utterance-level contextualized representations and fine-grained topic-level graph representations to evaluate dialogue coherence. The graph representations are obtained by reasoning over topic-level dialogue graphs enhanced with the evidence from a commonsense graph, including k-hop neighboring representations and hop-attention weights. Experimental results show that our GRADE significantly outperforms other state-of-the-art metrics on measuring diverse dialogue models in terms of the Pearson and Spearman correlations with human judgments. Besides, we release a new large-scale human evaluation benchmark to facilitate future research on automatic metrics.", "keyphrases": ["coherence metric", "topic-level graph representation", "dialogue topic transition"]} +{"id": "tonelli-pighin-2009-new", "title": "New Features for FrameNet - WordNet Mapping", "abstract": "Many applications in the context of natural language processing or information retrieval may be largely improved if they were able to fully exploit the rich semantic information annotated in high-quality, publicly available resources such as the FrameNet and the WordNet databases. Nevertheless, the practical use of similar resources is often biased by the limited coverage of semantic phenomena that they provide. \n \nA natural solution to this problem would be to automatically establish anchors between these resources that would allow us 1) to jointly use the encoded information, thus possibly overcoming limitations of the individual corpora, and 2) to extend each resource coverage by exploiting the information encoded in the others. \n \nIn this paper, we present a supervised learning framework for the mapping of FrameNet lexical units onto WordNet synsets based on a reduced set of novel and semantically rich features. The automatically learnt mapping, which we call MapNet, can be used 1) to extend frame sets in the English FrameNet, 2) to populate frame sets in the Italian FrameNet via MultiWordNet and 3) to add frame labels to the MultiSemCor corpus. Our evaluation on these tasks shows that the proposed approach is viable and can result in accurate automatic annotations.", "keyphrases": ["framenet", "mapping", "wordnet synset"]} +{"id": "yannakoudakis-etal-2011-new", "title": "A New Dataset and Method for Automatically Grading ESOL Texts", "abstract": "We demonstrate how supervised discriminative machine learning techniques can be used to automate the assessment of 'English as a Second or Other Language' (ESOL) examination scripts. In particular, we use rank preference learning to explicitly model the grade relationships between scripts. A number of different features are extracted and ablation tests are used to investigate their contribution to overall performance. A comparison between regression and rank preference models further supports our method. Experimental results on the first publically available dataset show that our system can achieve levels of performance close to the upper bound for the task, as defined by the agreement between human examiners on the same corpus. Finally, using a set of 'outlier' texts, we test the validity of our model and identify cases where the model's scores diverge from that of a human examiner.", "keyphrases": ["learner", "essay score", "grammatical feature", "cambridge fce corpus"]} +{"id": "beigman-klebanov-beigman-2014-difficult", "title": "Difficult Cases: From Data to Learning, and Back", "abstract": "This article contributes to the ongoing discussion in the computational linguistics community regarding instances that are difficult to annotate reliably. Is it worthwhile to identify those? What information can be inferred from them regarding the nature of the task? What should be done with them when building supervised machine learning systems? We address these questions in the context of a subjective semantic task. In this setting, we show that the presence of such instances in training data misleads a machine learner into misclassifying clear-cut cases. We also show that considering machine learning outcomes with and without the difficult cases, it is possible to identify specific weaknesses of the problem representation.", "keyphrases": ["presence", "machine learner", "difficult case", "beigman"]} +{"id": "rastogi-etal-2015-multiview", "title": "Multiview LSA: Representation Learning via Generalized CCA", "abstract": "Multiview LSA (MVLSA) is a generalization of Latent Semantic Analysis (LSA) that supports the fusion of arbitrary views of data and relies on Generalized Canonical Correlation Analysis (GCCA). We present an algorithm for fast approximate computation of GCCA, which when coupled with methods for handling missing values, is general enough to approximate some recent algorithms for inducing vector representations of words. Experiments across a comprehensive collection of test-sets show our approach to be competitive with the state of the art.", "keyphrases": ["generalized cca", "art", "multiview lsa"]} +{"id": "schwitter-2010-controlled", "title": "Controlled Natural Languages for Knowledge Representation", "abstract": "This paper presents a survey of research in controlled natural languages that can be used as high-level knowledge representation languages. Over the past 10 years or so, a number of machine-oriented controlled natural languages have emerged that can be used as high-level interface languages to various kinds of knowledge systems. These languages are relevant to the area of computational linguistics since they have two very interesting properties: firstly, they look informal like natural languages and are therefore easier to write and understand by humans than formal languages; secondly, they are precisely defined subsets of natural languages and can be translated automatically (and often deterministically) into a formal target language and then be used for automated reasoning. We present and compare the most mature of these novel languages, show how they can balance the disadvantages of natural languages and formal languages for knowledge representation, and discuss how domain specialists can be supported writing specifications in controlled natural language.", "keyphrases": ["knowledge representation", "cnl", "full natural language", "ambiguity"]} +{"id": "sood-etal-2020-interpreting", "title": "Interpreting Attention Models with Human Visual Attention in Machine Reading Comprehension", "abstract": "While neural networks with attention mechanisms have achieved superior performance on many natural language processing tasks, it remains unclear to which extent learned attention resembles human visual attention. In this paper, we propose a new method that leverages eye-tracking data to investigate the relationship between human visual attention and neural attention in machine reading comprehension. To this end, we introduce a novel 23 participant eye tracking dataset - MQA-RC, in which participants read movie plots and answered pre-defined questions. We compare state of the art networks based on long short-term memory (LSTM), convolutional neural models (CNN) and XLNet Transformer architectures. We find that higher similarity to human attention and performance significantly correlates to the LSTM and CNN models. However, we show this relationship does not hold true for the XLNet models \u2013 despite the fact that the XLNet performs best on this challenging task. Our results suggest that different architectures seem to learn rather different neural attention strategies and similarity of neural to human attention does not guarantee best performance.", "keyphrases": ["human visual attention", "machine reading comprehension", "cnn"]} +{"id": "dhingra-etal-2018-embedding", "title": "Embedding Text in Hyperbolic Spaces", "abstract": "Natural language text exhibits hierarchical structure in a variety of respects. Ideally, we could incorporate our prior knowledge of this hierarchical structure into unsupervised learning algorithms that work on text data. Recent work by Nickel and Kiela (2017) proposed using hyperbolic instead of Euclidean embedding spaces to represent hierarchical data and demonstrated encouraging results when embedding graphs. In this work, we extend their method with a re-parameterization technique that allows us to learn hyperbolic embeddings of arbitrarily parameterized objects. We apply this framework to learn word and sentence embeddings in hyperbolic space in an unsupervised manner from text corpora. The resulting embeddings seem to encode certain intuitive notions of hierarchy, such as word-context frequency and phrase constituency. However, the implicit continuous hierarchy in the learned hyperbolic space makes interrogating the model's learned hierarchies more difficult than for models that learn explicit edges between items. The learned hyperbolic embeddings show improvements over Euclidean embeddings in some \u2013 but not all \u2013 downstream tasks, suggesting that hierarchical organization is more useful for some tasks than others.", "keyphrases": ["hyperbolic space", "language text", "hierarchical structure"]} +{"id": "junczys-dowmunt-etal-2016-neural", "title": "Is Neural Machine Translation Ready for Deployment? A Case Study on 30 Translation Directions", "abstract": "In this paper we provide the largest published comparison of translation quality for phrase-based SMT and neural machine translation across 30 translation directions. For ten directions we also include hierarchical phrase-based MT. Experiments are performed for the recently published United Nations Parallel Corpus v1.0 and its large six-way sentence-aligned subcorpus. In the second part of the paper we investigate aspects of translation speed, introducing AmuNMT, our efficient neural machine translation decoder. We demonstrate that current neural machine translation could already be used for in-production systems when comparing words-persecond ratios.", "keyphrases": ["neural machine translation", "translation direction", "translation quality"]} +{"id": "xu-yang-2017-cross", "title": "Cross-lingual Distillation for Text Classification", "abstract": "Cross-lingual text classification(CLTC) is the task of classifying documents written in different languages into the same taxonomy of categories. This paper presents a novel approach to CLTC that builds on model distillation, which adapts and extends a framework originally proposed for model compression. Using soft probabilistic predictions for the documents in a label-rich language as the (induced) supervisory labels in a parallel corpus of documents, we train classifiers successfully for new languages in which labeled training data are not available. An adversarial feature adaptation technique is also applied during the model training to reduce distribution mismatch. We conducted experiments on two benchmark CLTC datasets, treating English as the source language and German, French, Japan and Chinese as the unlabeled target languages. The proposed approach had the advantageous or comparable performance of the other state-of-art methods.", "keyphrases": ["text classification", "source language", "french", "chinese", "knowledge distillation"]} +{"id": "berant-etal-2012-efficient", "title": "Efficient Tree-based Approximation for Entailment Graph Learning", "abstract": "Learning entailment rules is fundamental in many semantic-inference applications and has been an active field of research in recent years. In this paper we address the problem of learning transitive graphs that describe entailment rules between predicates (termed entailment graphs). We first identify that entailment graphs exhibit a \"tree-like\" property and are very similar to a novel type of graph termed forest-reducible graph. We utilize this property to develop an iterative efficient approximation algorithm for learning the graph edges, where each iteration takes linear time. We compare our approximation algorithm to a recently-proposed state-of-the-art exact algorithm and show that it is more efficient and scalable both theoretically and empirically, while its output quality is close to that given by the optimal solution of the exact algorithm.", "keyphrases": ["entailment graph", "approximation method", "treenode-fix"]} +{"id": "becker-etal-2011-discuss", "title": "DISCUSS: A dialogue move taxonomy layered over semantic representations", "abstract": "In this paper we describe DISCUSS, a dialogue move taxonomy layered over semantic representations. We designed this scheme to enable development of computational models of tutorial dialogues and to provide an intermediate representation suitable for question and tutorial act generation. As such, DISCUSS captures semantic and pragmatic elements across four dimensions: Dialogue Act, Rhetorical Form, Predicate Type, Semantic Roles. Together these dimensions provide a summary of an utterance's propositional content and how it may change the underlying information state of the conversation. This taxonomy builds on previous work in both general dialogue act taxonomies as well as work in tutorial act and tutorial question categorization. The types and values found within our taxonomy are based on preliminary observations and on-going annotation from our corpus of multimodal tutorial dialogues for elementary school science education.", "keyphrases": ["dialogue move taxonomy", "semantic representation", "discuss", "human tutor"]} +{"id": "vulic-etal-2018-post", "title": "Post-Specialisation: Retrofitting Vectors of Words Unseen in Lexical Resources", "abstract": "Word vector specialisation (also known as retrofitting) is a portable, light-weight approach to fine-tuning arbitrary distributional word vector spaces by injecting external knowledge from rich lexical resources such as WordNet. By design, these post-processing methods only update the vectors of words occurring in external lexicons, leaving the representations of all unseen words intact. In this paper, we show that constraint-driven vector space specialisation can be extended to unseen words. We propose a novel post-specialisation method that: a) preserves the useful linguistic knowledge for seen words; while b) propagating this external signal to unseen words in order to improve their vector representations as well. Our post-specialisation approach explicits a non-linear specialisation function in the form of a deep neural network by learning to predict specialised vectors from their original distributional counterparts. The learned function is then used to specialise vectors of unseen words. This approach, applicable to any post-processing model, yields considerable gains over the initial specialisation models both in intrinsic word similarity tasks, and in two downstream tasks: dialogue state tracking and lexical text simplification. The positive effects persist across three languages, demonstrating the importance of specialising the full vocabulary of distributional word vector spaces.", "keyphrases": ["retrofitting", "vocabulary", "post-specialization"]} +{"id": "cherry-2013-improved", "title": "Improved Reordering for Phrase-Based Translation using Sparse Features", "abstract": "There have been many recent investigations into methods to tune SMT systems using large numbers of sparse features. However, there have not been nearly so many examples of helpful sparse features, especially for phrasebased systems. We use sparse features to address reordering, which is often considered a weak point of phrase-based translation. Using a hierarchical reordering model as our baseline, we show that simple features coupling phrase orientation to frequent words or wordclusters can improve translation quality, with boosts of up to 1.2 BLEU points in ChineseEnglish and 1.8 in Arabic-English. We compare this solution to a more traditional maximum entropy approach, where a probability model with similar features is trained on wordaligned bitext. We show that sparse decoder features outperform maximum entropy handily, indicating that there are major advantages to optimizing reordering features directly for BLEU with the decoder in the loop.", "keyphrases": ["reordering", "phrase-based translation", "sparse feature"]} +{"id": "duran-aluisio-2012-propbank", "title": "Propbank-Br: a Brazilian Treebank annotated with semantic role labels", "abstract": "This paper reports the annotation of a Brazilian Portuguese Treebank with semantic role labels following Propbank guidelines. A different language and a different parser output impact the task and require some decisions on how to annotate the corpus. Therefore, a new annotation guide \u2015 called Propbank-Br - has been generated to deal with specific language phenomena and parser problems. In this phase of the project, the corpus was annotated by a unique linguist. The annotation task reported here is inserted in a larger projet for the Brazilian Portuguese language. This project aims to build Brazilian verbs frames files and a broader and distributed annotation of semantic role labels in Brazilian Portuguese, allowing inter-annotator agreement measures. The corpus, available in web, is already being used to build a semantic tagger for Portuguese language.", "keyphrases": ["semantic role label", "portuguese", "propbank-br"]} +{"id": "zhang-etal-2020-every", "title": "Every Document Owns Its Structure: Inductive Text Classification via Graph Neural Networks", "abstract": "Text classification is fundamental in natural language processing (NLP) and Graph Neural Networks (GNN) are recently applied in this task. However, the existing graph-based works can neither capture the contextual word relationships within each document nor fulfil the inductive learning of new words. Therefore in this work, to overcome such problems, we propose TextING for inductive text classification via GNN. We first build individual graphs for each document and then use GNN to learn the fine-grained word representations based on their local structure, which can also effectively produce embeddings for unseen words in the new document. Finally, the word nodes are aggregated as the document embedding. Extensive experiments on four benchmark datasets show that our method outperforms state-of-the-art text classification methods.", "keyphrases": ["inductive text classification", "graph neural networks", "gnn"]} +{"id": "suhr-etal-2020-exploring", "title": "Exploring Unexplored Generalization Challenges for Cross-Database Semantic Parsing", "abstract": "We study the task of cross-database semantic parsing (XSP), where a system that maps natural language utterances to executable SQL queries is evaluated on databases unseen during training. Recently, several datasets, including Spider, were proposed to support development of XSP systems. We propose a challenging evaluation setup for cross-database semantic parsing, focusing on variation across database schemas and in-domain language use. We re-purpose eight semantic parsing datasets that have been well-studied in the setting where in-domain training data is available, and instead use them as additional evaluation data for XSP systems instead. We build a system that performs well on Spider, and find that it struggles to generalize to our re-purposed set. Our setup uncovers several generalization challenges for cross-database semantic parsing, demonstrating the need to use and develop diverse training and evaluation datasets.", "keyphrases": ["cross-database semantic parsing", "database", "state-of-the-art model"]} +{"id": "xia-lewis-2007-multilingual", "title": "Multilingual Structural Projection across Interlinear Text", "abstract": "This paper explores the potential for annotating and enriching data for low-density languages via the alignment and projection of syntactic structure from parsed data for resource-rich languages such as English. We seek to develop enriched resources for a large number of the world\u2019s languages, most of which have no significant digital presence. We do this by tapping the body of Web-based linguistic data, most of which exists in small, analyzed chunks embedded in scholarly papers, journal articles, Web pages, and other online documents. By harvesting and enriching these data, we can provide the means for knowledge discovery across the resulting corpus that can lead to building computational resources such as grammars and transfer rules, which, in turn, can be used as bootstraps for building additional tools and resources for the languages represented. 1", "keyphrases": ["projection", "interlinear text", "syntactic structure"]} +{"id": "hardmeier-etal-2012-tree", "title": "Tree Kernels for Machine Translation Quality Estimation", "abstract": "This paper describes Uppsala University's submissions to the Quality Estimation (QE) shared task at WMT 2012. We present a QE system based on Support Vector Machine regression, using a number of explicitly defined features extracted from the Machine Translation input, output and models in combination with tree kernels over constituency and dependency parse trees for the input and output sentences. We confirm earlier results suggesting that tree kernels can be a useful tool for QE system construction especially in the early stages of system design.", "keyphrases": ["quality estimation", "dependency parse tree", "output sentence", "tree kernel"]} +{"id": "wang-etal-2015-building", "title": "Building a Semantic Parser Overnight", "abstract": "How do we build a semantic parser in a new domain starting with zero training examples? We introduce a new methodology for this setting: First, we use a simple grammar to generate logical forms paired with canonical utterances. The logical forms are meant to cover the desired set of compositional operators, and the canonical utterances are meant to capture the meaning of the logical forms (although clumsily). We then use crowdsourcing to paraphrase these canonical utterances into natural utterances. The resulting data is used to train the semantic parser. We further study the role of compositionality in the resulting paraphrases. Finally, we test our methodology on seven domains and show that we can build an adequate semantic parser in just a few hours.", "keyphrases": ["canonical utterance", "compositionality", "semantic parsing", "sql", "data collection"]} +{"id": "taghipour-etal-2011-parallel", "title": "Parallel Corpus Refinement as an Outlier Detection Algorithm", "abstract": "Filtering noisy parallel corpora or removing mistranslations out of training sets can improve the quality of a statistical machine translation. Discriminative methods for \ufb01lter-ing the corpora such as a maximum entropy model, need properly labeled training data, which are usually unavailable. Generating all possible sentence pairs (the Cartesian product) to generate labeled data, produces an imbalanced training set, containing a few correct translations and thus inappropriate for training a classi\ufb01er. In order to treat this problem effectively, unsupervised methods are utilized and the problem is modeled as an outlier detection procedure. The experiments show that a \ufb01ltered corpus, results in an improved translation quality, even with some sentence pairs removed.", "keyphrases": ["outlier detection", "sentence pair", "improved translation quality", "small portion"]} +{"id": "li-etal-2011-clustering", "title": "Clustering Comparable Corpora For Bilingual Lexicon Extraction", "abstract": "We study in this paper the problem of enhancing the comparability of bilingual corpora in order to improve the quality of bilingual lexicons extracted from comparable corpora. We introduce a clustering-based approach for enhancing corpus comparability which exploits the homogeneity feature of the corpus, and finally preserves most of the vocabulary of the original corpus. Our experiments illustrate the well-foundedness of this method and show that the bilingual lexicons obtained from the homogeneous corpus are of better quality than the lexicons obtained with previous approaches.", "keyphrases": ["comparability", "bilingual lexicon extraction", "clustering-based approach"]} +{"id": "wu-etal-2016-bilingually", "title": "Bilingually-constrained Synthetic Data for Implicit Discourse Relation Recognition", "abstract": "To alleviate the shortage of labeled data, we propose to use bilingually-constrained synthetic implicit data for implicit discourse relation recognition. These data are extracted from a bilingual sentence-aligned corpus according to the implicit/explicit mismatch be-tween different languages. Incorporating these data via a multi-task neural network model achieves signi\ufb01cant improvements over baselines, on both the English PDTB and Chinese CDTB data sets.", "keyphrases": ["discourse relation recognition", "synthetic implicit data", "english-chinese corpus"]} +{"id": "huang-etal-2012-tweet", "title": "Tweet Ranking Based on Heterogeneous Networks", "abstract": "Ranking tweets is a fundamental task to make it easier to distill the vast amounts of information shared by users. In this paper, we explore the novel idea of ranking tweets on a topic using heterogeneous networks. We construct heterogeneous networks by harnessing cross-genre linkages between tweets and semantically-related web documents from formal genres, and inferring implicit links between tweets and users. To rank tweets effectively by capturing the semantics and importance of different linkages, we introduce Tri-HITS, a model to iteratively propagate ranking scores across heterogeneous networks. We show that integrating both formal genre and inferred social networks with tweet networks produces a higher-quality ranking than the tweet networks alone. 1 Title and Abstract in Chinese u", "keyphrases": ["web document", "social network", "tweet ranking"]} +{"id": "yao-wan-2020-multimodal", "title": "Multimodal Transformer for Multimodal Machine Translation", "abstract": "Multimodal Machine Translation (MMT) aims to introduce information from other modality, generally static images, to improve the translation quality. Previous works propose various incorporation methods, but most of them do not consider the relative importance of multiple modalities. Equally treating all modalities may encode too much useless information from less important modalities. In this paper, we introduce the multimodal self-attention in Transformer to solve the issues above in MMT. The proposed method learns the representation of images based on the text, which avoids encoding irrelevant information in images. Experiments and visualization analysis demonstrate that our model benefits from visual information and substantially outperforms previous works and competitive baselines in terms of various metrics.", "keyphrases": ["multimodal machine translation", "multimodal self-attention", "noise"]} +{"id": "narayan-cohen-2015-diversity", "title": "Diversity in Spectral Learning for Natural Language Parsing", "abstract": "We describe an approach to create a diverse set of predictions with spectral learning of latent-variable PCFGs (L-PCFGs). Our approach works by creating multiple spectral models where noise is added to the underlying features in the training set before the estimation of each model. We describe three ways to decode with multiple models. In addition, we describe a simple variant of the spectral algorithm for L-PCFGs that is fast and leads to compact models. Our experiments for natural language parsing, for English and German, show that we get a significant improvement over baselines comparable to state of the art. For English, we achieve the F1 score of 90.18, and for German we achieve theF1 score of 83.38.", "keyphrases": ["spectral learning", "natural language parsing", "l-pcfg"]} +{"id": "mohler-mihalcea-2009-text", "title": "Text-to-Text Semantic Similarity for Automatic Short Answer Grading", "abstract": "In this paper, we explore unsupervised techniques for the task of automatic short answer grading. We compare a number of knowledge-based and corpus-based measures of text similarity, evaluate the effect of domain and size on the corpus-based measures, and also introduce a novel technique to improve the performance of the system by integrating automatic feedback from the student answers. Overall, our system significantly and consistently outperforms other unsupervised methods for short answer grading that have been proposed in the past.", "keyphrases": ["short answer", "text similarity", "student"]} +{"id": "fader-etal-2013-paraphrase", "title": "Paraphrase-Driven Learning for Open Question Answering", "abstract": "We study question answering as a machine learning problem, and induce a function that maps open-domain questions to queries over a database of web extractions. Given a large, community-authored, question-paraphrase corpus, we demonstrate that it is possible to learn a semantic lexicon and linear ranking function without manually annotating questions. Our approach automatically generalizes a seed lexicon and includes a scalable, parallelized perceptron parameter estimation scheme. Experiments show that our approach more than quadruples the recall of the seed lexicon, with only an 8% loss in precision.", "keyphrases": ["open-domain question", "paraphrasing", "search query log", "knowledge basis"]} +{"id": "hardy-etal-2019-highres", "title": "HighRES: Highlight-based Reference-less Evaluation of Summarization", "abstract": "There has been substantial progress in summarization research enabled by the availability of novel, often large-scale, datasets and recent advances on neural network-based approaches. However, manual evaluation of the system generated summaries is inconsistent due to the difficulty the task poses to human non-expert readers. To address this issue, we propose a novel approach for manual evaluation, Highlight-based Reference-less Evaluation of Summarization (HighRES), in which summaries are assessed by multiple annotators against the source document via manually highlighted salient content in the latter. Thus summary assessment on the source document by human judges is facilitated, while the highlights can be used for evaluating multiple systems. To validate our approach we employ crowd-workers to augment with highlights a recently proposed dataset and compare two state-of-the-art systems. We demonstrate that HighRES improves inter-annotator agreement in comparison to using the source document directly, while they help emphasize differences among systems that would be ignored under other evaluation approaches.", "keyphrases": ["highlight-based reference-less evaluation", "summarization", "evaluation protocol"]} +{"id": "sharma-etal-2019-entity", "title": "An Entity-Driven Framework for Abstractive Summarization", "abstract": "Abstractive summarization systems aim to produce more coherent and concise summaries than their extractive counterparts. Popular neural models have achieved impressive results for single-document summarization, yet their outputs are often incoherent and unfaithful to the input. In this paper, we introduce SENECA, a novel System for ENtity-drivEn Coherent Abstractive summarization framework that leverages entity information to generate informative and coherent abstracts. Our framework takes a two-step approach: (1) an entity-aware content selection module first identifies salient sentences from the input, then (2) an abstract generation module conducts cross-sentence information compression and abstraction to generate the final summary, which is trained with rewards to promote coherence, conciseness, and clarity. The two components are further connected using reinforcement learning. Automatic evaluation shows that our model significantly outperforms previous state-of-the-art based on ROUGE and our proposed coherence measures on New York Times and CNN/Daily Mail datasets. Human judges further rate our system summaries as more informative and coherent than those by popular summarization models.", "keyphrases": ["abstractive summarization", "coherence", "entity information"]} +{"id": "nilsson-etal-2007-generalizing", "title": "Generalizing Tree Transformations for Inductive Dependency Parsing", "abstract": "Previous studies in data-driven dependency parsing have shown that tree transformations can improve parsing accuracy for specific parsers and data sets. We investigate to what extent this can be generalized across languages/treebanks and parsers, focusing on pseudo-projective parsing, as a way of capturing non-projective dependencies, and transformations used to facilitate parsing of coordinate structures and verb groups. The results indicate that the beneficial effect of pseudo-projective parsing is independent of parsing strategy but sensitive to language or treebank specific properties. By contrast, the construction specific transformations appear to be more sensitive to parsing strategy but have a constant positive effect over several languages.", "keyphrases": ["transformation", "treebank", "verb group"]} +{"id": "p-v-s-meyer-2017-joint", "title": "Joint Optimization of User-desired Content in Multi-document Summaries by Learning from User Feedback", "abstract": "In this paper, we propose an extractive multi-document summarization (MDS) system using joint optimization and active learning for content selection grounded in user feedback. Our method interactively obtains user feedback to gradually improve the results of a state-of-the-art integer linear programming (ILP) framework for MDS. Our methods complement fully automatic methods in producing high-quality summaries with a minimum number of iterations and feedbacks. We conduct multiple simulation-based experiments and analyze the effect of feedback-based concept selection in the ILP setup in order to maximize the user-desired content in the summary.", "keyphrases": ["user-desired content", "user feedback", "joint optimization"]} +{"id": "kang-etal-2019-dual", "title": "Dual Attention Networks for Visual Reference Resolution in Visual Dialog", "abstract": "Visual dialog (VisDial) is a task which requires a dialog agent to answer a series of questions grounded in an image. Unlike in visual question answering (VQA), the series of questions should be able to capture a temporal context from a dialog history and utilizes visually-grounded information. Visual reference resolution is a problem that addresses these challenges, requiring the agent to resolve ambiguous references in a given question and to find the references in a given image. In this paper, we propose Dual Attention Networks (DAN) for visual reference resolution in VisDial. DAN consists of two kinds of attention modules, REFER and FIND. Specifically, REFER module learns latent relationships between a given question and a dialog history by employing a multi-head attention mechanism. FIND module takes image features and reference-aware representations (i.e., the output of REFER module) as input, and performs visual grounding via bottom-up attention mechanism. We qualitatively and quantitatively evaluate our model on VisDial v1.0 and v0.9 datasets, showing that DAN outperforms the previous state-of-the-art model by a significant margin.", "keyphrases": ["visual reference resolution", "visual dialog", "multi-head attention mechanism", "dual attention networks"]} +{"id": "zarriess-kuhn-2009-exploiting", "title": "Exploiting Translational Correspondences for Pattern-Independent MWE Identification", "abstract": "Based on a study of verb translations in the Europarl corpus, we argue that a wide range of MWE patterns can be identified in translations that exhibit a correspondence between a single lexical item in the source language and a group of lexical items in the target language. We show that these correspondences can be reliably detected on dependency-parsed, word-aligned sentences. We propose an extraction method that combines word alignment with syntactic filters and is independent of the structural pattern of the translation.", "keyphrases": ["correspondence", "mwe pattern", "parallel corpora"]} +{"id": "horvat-byrne-2014-graph", "title": "A Graph-Based Approach to String Regeneration", "abstract": "The string regeneration problem is the problem of generating a fluent sentence from a bag of words. We explore the Ngram language model approach to string regeneration. The approach computes the highest probability permutation of the input bag of words under an N-gram language model. We describe a graph-based approach for finding the optimal permutation. The evaluation of the approach on a number of datasets yielded promising results, which were confirmed by conducting a manual evaluation study.", "keyphrases": ["graph-based approach", "string regeneration", "n-gram language model"]} +{"id": "lucking-2017-indexicals", "title": "Indexicals as Weak Descriptors", "abstract": "Indexicals have a couple of uses that are in conflict with the traditional view that they directly refer to indices in the utterance situation. But how do they refer instead? It is argued that indexicals have both an indexical and a descriptive aspect \u2013 why they are called weak descriptors here. The indexical aspect anchors them in the actual situation of utterance, the weak descriptive aspect singles out the referent. Descriptive uses of \u201ctoday\u201d are then attributed to calendric coercion which is triggered by qunatificational elements. This account provides a grammatically motivated formal link to descriptive uses. With regard to some uses of \u201cI\u201d, a tentative contiguity rule is proposed as the reference rule for the first person pronoun, which is oriented along recent hearer-oriented accounts in philosophy, but finally has to be criticized. 1 Descriptive Indexicals Indexicals have descriptive uses as exemplified in (1a) (taken from Nunberg, 2004, p. 265): (1) a. Today is always the biggest party day of the year. b. *November 1, 2000 is always the biggest party day of the year. According to Nunberg (2004), today in (1a) is interpreted as picking out a day type or day property instead of referring to a concrete day, since \u201c[. . . ] the interpretations of these uses of indexicals are the very things that their linguistic meanings pick out of the context.\u201d (p. 272). The full date in (1b), to the contrary, refers to a particular day and has no such type or property reading. The interpretations of both sentences in (1) diverge, even if both sentences are produced on November 1, 2000. Based on these observations (and criticizing his earlier account which rests on distinguishing the index from the referent and bridging between both by means of a salient relation (Nunberg, 1993)) Nunberg (2004) comes up with his granularization of context hypothesis: indexical expressions are evaluated in contexts which are \u201cindividuated by the conversationally relevant properties\u201d (p. 273). However, the \u201cconversationally relevant properties\u201d seem to be restricted by the linguistic meaning of the indexical in question. For instance, descriptive uses of today always rest on a temporal interpretation (combining today with atemporal descriptions sound awkward, e.g., \u201c*Today is always 2+2 = 4\u201d or \u201c*Today is always the largest tree in the park\u201d). Thus, a more restrictive account should be possible. Accordingly, contrary to the context granularization account \u2013 at least with respect to the kinds of examples in (1) \u2013 it is argued in the following, that descriptive interpretations of indexical expressions are functional abstractions over indices that follow from the grammar of descriptive constructions in addition to type raising (Section 2). Part of the argument is that indexicals have a weak descriptive content that allows for functional abstraction in the first place. This re-analysis is spelled out more precisely in Section 3. Contrary to this line, however, in Section 4 it is suggested that \u2018I\u2019, instead of exhibiting descriptive use, should be interpreted according to a addressee-oriented semantic rule resting on a contiguity relation.", "keyphrases": ["weak descriptor", "indexical", "modality"]} +{"id": "zhou-etal-2016-multi", "title": "Multi-view Response Selection for Human-Computer Conversation", "abstract": "In this paper, we study the task of response selection for multi-turn human-computer conversation. Previous approaches take word as a unit and view context and response as sequences of words. This kind of approaches do not explicitly take each utterance as a unit, therefore it is dif\ufb01cult to catch utterance-level discourse information and dependencies. In this paper, we propose a multi-view response selection model that integrates information from two different views, i.e., word sequence view and utterance sequence view. We jointly model the two views via deep neural networks. Experimental results on a public corpus for context-sensitive response selection demonstrate the effectiveness of the proposed multi-view model, which signi\ufb01cantly outperforms other single-view baselines.", "keyphrases": ["response selection", "conversation", "multi-view model", "multi-turn context"]} +{"id": "zhao-etal-2021-closer", "title": "A Closer Look at Few-Shot Crosslingual Transfer: The Choice of Shots Matters", "abstract": "Few-shot crosslingual transfer has been shown to outperform its zero-shot counterpart with pretrained encoders like multilingual BERT. Despite its growing popularity, little to no attention has been paid to standardizing and analyzing the design of few-shot experiments. In this work, we highlight a fundamental risk posed by this shortcoming, illustrating that the model exhibits a high degree of sensitivity to the selection of few shots. We conduct a large-scale experimental study on 40 sets of sampled few shots for six diverse NLP tasks across up to 40 languages. We provide an analysis of success and failure cases of few-shot transfer, which highlights the role of lexical features. Additionally, we show that a straightforward full model finetuning approach is quite effective for few-shot transfer, outperforming several state-of-the-art few-shot approaches. As a step towards standardizing few-shot crosslingual experimental designs, we make our sampled few shots publicly available.", "keyphrases": ["crosslingual transfer", "shot", "few-shot cross-lingual transfer"]} +{"id": "savary-waszczuk-2020-polish", "title": "Polish corpus of verbal multiword expressions", "abstract": "This paper describes a manually annotated corpus of verbal multi-word expressions in Polish. It is among the 4 biggest datasets in release 1.2 of the PARSEME multiligual corpus. We describe the data sources, as well as the annotation process and its outcomes. We also present interesting phenomena encountered during the annotation task and put forward enhancements for the PARSEME annotation guidelines.", "keyphrases": ["verbal multiword expression", "parseme", "polish corpus"]} +{"id": "xu-etal-2019-clickbait", "title": "Clickbait? Sensational Headline Generation with Auto-tuned Reinforcement Learning", "abstract": "Sensational headlines are headlines that capture people's attention and generate reader interest. Conventional abstractive headline generation methods, unlike human writers, do not optimize for maximal reader attention. In this paper, we propose a model that generates sensational headlines without labeled data. We first train a sensationalism scorer by classifying online headlines with many comments (\u201cclickbait\u201d) against a baseline of headlines generated from a summarization model. The score from the sensationalism scorer is used as the reward for a reinforcement learner. However, maximizing the noisy sensationalism reward will generate unnatural phrases instead of sensational headlines. To effectively leverage this noisy reward, we propose a novel loss function, Auto-tuned Reinforcement Learning (ARL), to dynamically balance reinforcement learning (RL) with maximum likelihood estimation (MLE). Human evaluation shows that 60.8% of samples generated by our model are sensational, which is significantly better than the Pointer-Gen baseline and other RL models.", "keyphrases": ["sensational headline", "auto-tuned reinforcement learning", "clickbait"]} +{"id": "lin-etal-2011-sentence", "title": "Sentence Subjectivity Detection with Weakly-Supervised Learning", "abstract": "This paper presents a hierarchical Bayesian model based on latent Dirichlet allocation (LDA), called subjLDA, for sentence-level subjectivity detection, which automatically identifies whether a given sentence expresses opinion or states facts. In contrast to most of the existing methods relying on either labelled corpora for classifier training or linguistic pattern extraction for subjectivity classification, we view the problem as weakly-supervised generative model learning, where the only input to the model is a small set of domain independent subjectivity lexical clues. A mechanism is introduced to incorporate the prior information about the subjectivity lexical clues into model learning by modifying the Dirichlet priors of topic-word distributions. The subjLDA model has been evaluated on the Multi-Perspective Question Answering (MPQA) dataset and promising results have been observed in the preliminary experiments. We have also explored adding neutral words as prior information for model learning. It was found that while incorporating subjectivity clues bearing positive or negative polarity can achieve a significant performance gain, the prior lexical information from neutral words is less effective.", "keyphrases": ["subjectivity detection", "latent dirichlet allocation", "opinion"]} +{"id": "spitkovsky-etal-2011-lateen", "title": "Lateen EM: Unsupervised Training with Multiple Objectives, Applied to Dependency Grammar Induction", "abstract": "We present new training methods that aim to mitigate local optima and slow convergence in unsupervised training by using additional imperfect objectives. In its simplest form, lateen EM alternates between the two objectives of ordinary \"soft\" and \"hard\" expectation maximization (EM) algorithms. Switching objectives when stuck can help escape local optima. We find that applying a single such alternation already yields state-of-the-art results for English dependency grammar induction. More elaborate lateen strategies track both objectives, with each validating the moves proposed by the other. Disagreements can signal earlier opportunities to switch or terminate, saving iterations. De-emphasizing fixed points in these ways eliminates some guesswork from tuning EM. An evaluation against a suite of unsupervised dependency parsing tasks, for a variety of languages, showed that lateen strategies significantly speed up training of both EM algorithms, and improve accuracy for hard EM.", "keyphrases": ["unsupervised training", "grammar induction", "iteration"]} +{"id": "pouran-ben-veyseh-etal-2021-modeling", "title": "Modeling Document-Level Context for Event Detection via Important Context Selection", "abstract": "The task of Event Detection (ED) in Information Extraction aims to recognize and classify trigger words of events in text. The recent progress has featured advanced transformer-based language models (e.g., BERT) as a critical component in state-of-the-art models for ED. However, the length limit for input texts is a barrier for such ED models as they cannot encode long-range document-level context that has been shown to be beneficial for ED. To address this issue, we propose a novel method to model document-level context for ED that dynamically selects relevant sentences in the document for the event prediction of the target sentence. The target sentence will be then augmented with the selected sentences and consumed entirely by transformer-based language models for improved representation learning for ED. To this end, the REINFORCE algorithm is employed to train the relevant sentence selection for ED. Several information types are then introduced to form the reward function for the training process, including ED performance, sentence similarity, and discourse relations. Our extensive experiments on multiple benchmark datasets reveal the effectiveness of the proposed model, leading to new state-of-the-art performance.", "keyphrases": ["document-level context", "event detection", "information extraction"]} +{"id": "huang-mi-2010-efficient", "title": "Efficient Incremental Decoding for Tree-to-String Translation", "abstract": "Syntax-based translation models should in principle be efficient with polynomially-sized search space, but in practice they are often embarassingly slow, partly due to the cost of language model integration. In this paper we borrow from phrase-based decoding the idea to generate a translation incrementally left-to-right, and show that for tree-to-string models, with a clever encoding of derivation history, this method runs in average-case polynomial-time in theory, and linear-time with beam search in practice (whereas phrase-based decoding is exponential-time in theory and quadratic-time in practice). Experiments show that, with comparable translation quality, our tree-to-string system (in Python) can run more than 30 times faster than the phrase-based system Moses (in C++).", "keyphrases": ["tree-to-string model", "beam search", "time complexity", "source sentence"]} +{"id": "bar-etal-2012-ukp", "title": "UKP: Computing Semantic Textual Similarity by Combining Multiple Content Similarity Measures", "abstract": "We present the UKP system which performed best in the Semantic Textual Similarity (STS) task at SemEval-2012 in two out of three metrics. It uses a simple log-linear regression model, trained on the training data, to combine multiple text similarity measures of varying complexity. These range from simple character and word n-grams and common subsequences to complex features such as Explicit Semantic Analysis vector comparisons and aggregation of word similarity based on lexical-semantic resources. Further, we employ a lexical substitution system and statistical machine translation to add additional lexemes, which alleviates lexical gaps. Our final models, one per dataset, consist of a log-linear combination of about 20 features, out of the possible 300+ features implemented.", "keyphrases": ["semantic textual similarity", "complexity", "machine translation"]} +{"id": "yang-eisenstein-2016-part", "title": "Part-of-Speech Tagging for Historical English", "abstract": "As more historical texts are digitized, there is interest in applying natural language processing tools to these archives. However, the performance of these tools is often unsatisfactory, due to language change and genre differences. Spelling normalization heuristics are the dominant solution for dealing with historical texts, but this approach fails to account for changes in usage and vocabulary. In this empirical paper, we assess the capability of domain adaptation techniques to cope with historical texts, focusing on the classic benchmark task of part-of-speech tagging. We evaluate several domain adaptation methods on the task of tagging Early Modern English and Modern British English texts in the Penn Corpora of Historical English. We demonstrate that the Feature Embedding method for unsupervised domain adaptation outperforms word embeddings and Brown clusters, showing the importance of embedding the entire feature space, rather than just individual words. Feature Embeddings also give better performance than spelling normalization, but the combination of the two methods is better still, yielding a 5% raw improvement in tagging accuracy on Early Modern English texts.", "keyphrases": ["historical english", "domain adaptation method", "part-of-speech tagging"]} +{"id": "kiela-etal-2015-visual", "title": "Visual Bilingual Lexicon Induction with Transferred ConvNet Features", "abstract": "This paper is concerned with the task of bilingual lexicon induction using imagebased features. By applying features from a convolutional neural network (CNN), we obtain state-of-the-art performance on a standard dataset, obtaining a 79% relative improvement over previous work which uses bags of visual words based on SIFT features. The CNN image-based approach is also compared with state-of-the-art linguistic approaches to bilingual lexicon induction, even outperforming these for one of three language pairs on another standard dataset. Furthermore, we shed new light on the type of visual similarity metric to use for genuine similarity versus relatedness tasks, and experiment with using multiple layers from the same network in an attempt to improve performance.", "keyphrases": ["bilingual lexicon induction", "convolutional neural network", "image", "van"]} +{"id": "shi-etal-2016-neural", "title": "Why Neural Translations are the Right Length", "abstract": "We investigate how neural, encoder-decoder translation systems output target strings of appropriate lengths, \ufb01nding that a collection of hidden units learns to explicitly implement this functionality.", "keyphrases": ["right length", "neuron", "mechanism"]} +{"id": "mizumoto-etal-2015-grammatical", "title": "Grammatical Error Correction Considering Multi-word Expressions", "abstract": "Multi-word expressions (MWEs) have been recognized as important linguistic information and much research has been conducted especially on their extraction and interpretation. On the other hand, they have hardly been used in real application areas. While those who are learning English as a second language (ESL) use MWEs in their writings just like native speakers, MWEs haven\u2019t been taken into consideration in grammatical error correction tasks. In this paper, we investigate the grammatical error correction method using MWEs. Our method proposes a straightforward application of MWEs to grammatical error correction, but experimental results show that MWEs have a beneficial effect on grammatical error correction.", "keyphrases": ["multi-word expression", "consideration", "grammatical error correction"]} +{"id": "ma-etal-2014-prune", "title": "Prune-and-Score: Learning for Greedy Coreference Resolution", "abstract": "We propose a novel search-based approach for greedy coreference resolution, where the mentions are processed in order and added to previous coreference clusters. Our method is distinguished by the use of two functions to make each coreference decision: a pruning function that prunes bad coreference decisions from further consideration, and a scoring function that then selects the best among the remaining decisions. Our framework reduces learning of these functions to rank learning, which helps leverage powerful off-the-shelf rank-learners. We show that our Prune-and-Score approach is superior to using a single scoring function to make both decisions and outperforms several state-of-the-art approaches on multiple benchmark corpora including OntoNotes.", "keyphrases": ["greedy coreference resolution", "mention", "decision"]} +{"id": "jiang-etal-2020-hover", "title": "HoVer: A Dataset for Many-Hop Fact Extraction And Claim Verification", "abstract": "We introduce HoVer (HOppy VERification), a dataset for many-hop evidence extraction and fact verification. It challenges models to extract facts from several Wikipedia articles that are relevant to a claim and classify whether the claim is supported or not-supported by the facts. In HoVer, the claims require evidence to be extracted from as many as four English Wikipedia articles and embody reasoning graphs of diverse shapes. Moreover, most of the 3/4-hop claims are written in multiple sentences, which adds to the complexity of understanding long-range dependency relations such as coreference. We show that the performance of an existing state-of-the-art semantic-matching model degrades significantly on our dataset as the number of reasoning hops increases, hence demonstrating the necessity of many-hop reasoning to achieve strong results. We hope that the introduction of this challenging dataset and the accompanying evaluation task will encourage research in many-hop fact retrieval and information verification.", "keyphrases": ["claim", "many-hop evidence extraction", "wikipedia article"]} +{"id": "kirchhoff-bilmes-2014-submodularity", "title": "Submodularity for Data Selection in Machine Translation", "abstract": "We introduce submodular optimization to the problem of training data subset selection for statistical machine translation (SMT). By explicitly formulating data selection as a submodular program, we obtain fast scalable selection algorithms with mathematical performance guarantees, resulting in a uni\ufb01ed framework that clari\ufb01es existing approaches and also makes both new and many previous approaches easily accessible. We present a new class of submodular functions designed speci\ufb01cally for SMT and evaluate them on two different translation tasks. Our results show that our best submodular method signi\ufb01cantly outperforms several baseline methods, including the widely-used cross-entropy based data selection method. In addition, our approach easily scales to large data sets and is applicable to other data selection problems in natural language processing.", "keyphrases": ["data selection", "machine translation", "submodular optimization"]} +{"id": "popovic-2016-chrf", "title": "chrF deconstructed: beta parameters and n-gram weights", "abstract": "Character n-gram F-score (CHRF) is shown to correlate very well with human rankings of different machine translation outputs, especially for morphologically rich target languages. However, only two versions have been explored so far, namely CHRF1 (standard F-score, \u03b2 = 1) and CHRF3 (\u03b2 = 3), both with uniform n-gram weights. In this work, we investigated CHRF in more details, namely \u03b2 parameters in range from 1/6 to 6, and we found out that CHRF2 is the most promising version. Then we investigated different n-gram weights for CHRF2 and found out that the uniform weights are the best option. Apart from this, CHRF scores were systematically compared with WORDF scores, and a preliminary experiment carried out on small amount of data with direct human scores indicates that the main advantage of CHRF is that it does not penalise too hard acceptable variations in high quality translations.", "keyphrases": ["n-gram weight", "preliminary experiment", "chrf"]} +{"id": "reidsma-op-den-akker-2008-exploiting", "title": "Exploiting `Subjective' Annotations", "abstract": "Many interesting phenomena in conversation can only be annotated as a subjective task, requiring interpretative judgements from annotators. This leads to data which is annotated with lower levels of agreement not only due to errors in the annotation, but also due to the differences in how annotators interpret conversations. This paper constitutes an attempt to find out how subjective annotations with a low level of agreement can profitably be used for machine learning purposes. We analyse the (dis)agreements between annotators for two different cases in a multimodal annotated corpus and explicitly relate the results to the way machine-learning algorithms perform on the annotated data. Finally we present two new concepts, namely 'subjective entity' classifiers resp. 'consensus objective' classifiers, and give recommendations for using subjective data in machine-learning applications.", "keyphrases": ["annotator", "reidsma", "slip"]} +{"id": "saunders-etal-2019-domain", "title": "Domain Adaptive Inference for Neural Machine Translation", "abstract": "We investigate adaptive ensemble weighting for Neural Machine Translation, addressing the case of improving performance on a new and potentially unknown domain without sacrificing performance on the original domain. We adapt sequentially across two Spanish-English and three English-German tasks, comparing unregularized fine-tuning, L2 and Elastic Weight Consolidation. We then report a novel scheme for adaptive NMT ensemble decoding by extending Bayesian Interpolation with source information, and report strong improvements across test domains without access to the domain label.", "keyphrases": ["neural machine translation", "ensemble weighting", "fine-tuning", "forgetting"]} +{"id": "yamamura-etal-2016-kyutech", "title": "The Kyutech corpus and topic segmentation using a combined method", "abstract": "Summarization of multi-party conversation is one of the important tasks in natural language processing. In this paper, we explain a Japanese corpus and a topic segmentation task. To the best of our knowledge, the corpus is the first Japanese corpus annotated for summarization tasks and freely available to anyone. We call it \u201cthe Kyutech corpus.\u201d The task of the corpus is a decision-making task with four participants and it contains utterances with time information, topic segmentation and reference summaries. As a case study for the corpus, we describe a method combined with LCSeg and TopicTiling for a topic segmentation task. We discuss the effectiveness and the problems of the combined method through the experiment with the Kyutech corpus.", "keyphrases": ["kyutech corpus", "topic segmentation", "decision-making task"]} +{"id": "tran-nguyen-2017-natural", "title": "Natural Language Generation for Spoken Dialogue System using RNN Encoder-Decoder Networks", "abstract": "Natural language generation (NLG) is a critical component in a spoken dialogue system. This paper presents a Recurrent Neural Network based Encoder-Decoder architecture, in which an LSTM-based decoder is introduced to select, aggregate semantic elements produced by an attention mechanism over the input elements, and to produce the required utterances. The proposed generator can be jointly trained both sentence planning and surface realization to produce natural language sentences. The proposed model was extensively evaluated on four different NLG datasets. The experimental results showed that the proposed generators not only consistently outperform the previous methods across all the NLG domains but also show an ability to generalize from a new, unseen domain and learn from multi-domain datasets.", "keyphrases": ["spoken dialogue system", "recurrent neural network", "natural language generation"]} +{"id": "broeder-etal-2012-standardizing", "title": "Standardizing a Component Metadata Infrastructure", "abstract": "This paper describes the status of the standardization efforts of a Component Metadata approach for describing Language Resources with metadata. Different linguistic and Language & Technology communities as CLARIN, META-SHARE and NaLiDa use this component approach and see its standardization of as a matter for cooperation that has the possibility to create a large interoperable domain of joint metadata. Starting with an overview of the component metadata approach together with the related semantic interoperability tools and services as the ISOcat data category registry and the relation registry we explain the standardization plan and efforts for component metadata within ISO TC37/SC4. Finally, we present information about uptake and plans of the use of component metadata within the three mentioned linguistic and L&T communities.", "keyphrases": ["component metadata infrastructure", "clarin", "meta-share"]} +{"id": "andreas-2020-good", "title": "Good-Enough Compositional Data Augmentation", "abstract": "We propose a simple data augmentation protocol aimed at providing a compositional inductive bias in conditional and unconditional sequence models. Under this protocol, synthetic training examples are constructed by taking real training examples and replacing (possibly discontinuous) fragments with other fragments that appear in at least one similar environment. The protocol is model-agnostic and useful for a variety of tasks. Applied to neural sequence-to-sequence models, it reduces error rate by as much as 87% on diagnostic tasks from the SCAN dataset and 16% on a semantic parsing task. Applied to n-gram language models, it reduces perplexity by roughly 1% on small corpora in several languages.", "keyphrases": ["data augmentation", "training example", "compositional generalization", "geca", "sentence fragment"]} +{"id": "white-rajkumar-2009-perceptron", "title": "Perceptron Reranking for CCG Realization", "abstract": "This paper shows that discriminative reranking with an averaged perceptron model yields substantial improvements in realization quality with CCG. The paper confirms the utility of including language model log probabilities as features in the model, which prior work on discriminative training with log linear models for HPSG realization had called into question. The perceptron model allows the combination of multiple n-gram models to be optimized and then augmented with both syntactic features and discriminative n-gram features. The full model yields a state-of-the-art BLEU score of 0.8506 on Section 23 of the CCGbank, to our knowledge the best score reported to date using a reversible, corpus-engineered grammar.", "keyphrases": ["ccg", "perceptron reranking", "surface realization"]} +{"id": "may-knight-2006-better", "title": "A Better N-Best List: Practical Determinization of Weighted Finite Tree Automata", "abstract": "Ranked lists of output trees from syntactic statistical NLP applications frequently contain multiple repeated entries. This redundancy leads to misrepresentation of tree weight and reduced information for debugging and tuning purposes. It is chiefly due to nondeterminism in the weighted automata that produce the results. We introduce an algorithm that determinizes such automata while preserving proper weights, returning the sum of the weight of all multiply derived trees. We also demonstrate our algorithm's effectiveness on two large-scale tasks.", "keyphrases": ["n-best list", "determinization", "wta"]} +{"id": "yoshimoto-etal-2015-coordination", "title": "Coordination-Aware Dependency Parsing (Preliminary Report)", "abstract": "Coordinate structures pose difficulties in dependency parsers. In this paper, we propose a set of parsing rules specifically designed to handle coordination, which are intended to be used in combination with Eisner and Satta\u2019s dependency rules. The new rules are compatible with existing similarity-based approaches to coordination structure analysis, and thus the syntactic and semantic similarity of conjuncts can be incorporated to the parse scoring function. Although we are yet to implement such a scoring function, we analyzed the time complexity of the proposed rules as well as their coverage of the Penn Treebank converted to the Stanford basic dependencies.", "keyphrases": ["dependency parsing", "coordination", "new rule"]} +{"id": "han-etal-2021-improving", "title": "Improving Multimodal Fusion with Hierarchical Mutual Information Maximization for Multimodal Sentiment Analysis", "abstract": "In multimodal sentiment analysis (MSA), the performance of a model highly depends on the quality of synthesized embeddings. These embeddings are generated from the upstream process called multimodal fusion, which aims to extract and combine the input unimodal raw data to produce a richer multimodal representation. Previous work either back-propagates the task loss or manipulates the geometric property of feature spaces to produce favorable fusion results, which neglects the preservation of critical task-related information that flows from input to the fusion results. In this work, we propose a framework named MultiModal InfoMax (MMIM), which hierarchically maximizes the Mutual Information (MI) in unimodal input pairs (inter-modality) and between multimodal fusion result and unimodal input in order to maintain task-related information through multimodal fusion. The framework is jointly trained with the main task (MSA) to improve the performance of the downstream MSA task. To address the intractable issue of MI bounds, we further formulate a set of computationally simple parametric and non-parametric methods to approximate their truth value. Experimental results on the two widely used datasets demonstrate the efficacy of our approach.", "keyphrases": ["multimodal fusion", "mutual information", "multimodal sentiment analysis"]} +{"id": "novikova-etal-2018-rankme", "title": "RankME: Reliable Human Ratings for Natural Language Generation", "abstract": "Human evaluation for natural language generation (NLG) often suffers from inconsistent user ratings. While previous research tends to attribute this problem to individual user preferences, we show that the quality of human judgements can also be improved by experimental design. We present a novel rank-based magnitude estimation method (RankME), which combines the use of continuous scales and relative assessments. We show that RankME significantly improves the reliability and consistency of human ratings compared to traditional evaluation methods. In addition, we show that it is possible to evaluate NLG systems according to multiple, distinct criteria, which is important for error analysis. Finally, we demonstrate that RankME, in combination with Bayesian estimation of system quality, is a cost-effective alternative for ranking multiple NLG systems.", "keyphrases": ["natural language generation", "human judgement", "rankme"]} +{"id": "wei-etal-2020-uncertainty", "title": "Uncertainty-Aware Semantic Augmentation for Neural Machine Translation", "abstract": "As a sequence-to-sequence generation task, neural machine translation (NMT) naturally contains intrinsic uncertainty, where a single sentence in one language has multiple valid counterparts in the other. However, the dominant methods for NMT only observe one of them from the parallel corpora for the model training but have to deal with adequate variations under the same meaning at inference. This leads to a discrepancy of the data distribution between the training and the inference phases. To address this problem, we propose uncertainty-aware semantic augmentation, which explicitly captures the universal semantic information among multiple semantically-equivalent source sentences and enhances the hidden representations with this information for better translations. Extensive experiments on various translation tasks reveal that our approach significantly outperforms the strong baselines and the existing methods.", "keyphrases": ["semantic augmentation", "neural machine translation", "source sentence"]} +{"id": "zhao-etal-2021-relation", "title": "A Relation-Oriented Clustering Method for Open Relation Extraction", "abstract": "The clustering-based unsupervised relation discovery method has gradually become one of the important methods of open relation extraction (OpenRE). However, high-dimensional vectors can encode complex linguistic information which leads to the problem that the derived clusters cannot explicitly align with the relational semantic classes. In this work, we propose a relation-oriented clustering model and use it to identify the novel relations in the unlabeled data. Specifically, to enable the model to learn to cluster relational data, our method leverages the readily available labeled data of pre-defined relations to learn a relation-oriented representation. We minimize distance between the instance with same relation by gathering the instances towards their corresponding relation centroids to form a cluster structure, so that the learned representation is cluster-friendly. To reduce the clustering bias on predefined classes, we optimize the model by minimizing a joint objective on both labeled and unlabeled data. Experimental results show that our method reduces the error rate by 29.2% and 15.7%, on two datasets respectively, compared with current SOTA methods.", "keyphrases": ["clustering method", "open relation extraction", "semantic class"]} +{"id": "schwenk-etal-2006-continuous", "title": "Continuous Space Language Models for Statistical Machine Translation", "abstract": "Statistical machine translation systems are based on one or more translation models and a language model of the target language. While many different translation models and phrase extraction algorithms have been proposed, a standard word n-gram back-off language model is used in most systems. \n \nIn this work, we propose to use a new statistical language model that is based on a continuous representation of the words in the vocabulary. A neural network is used to perform the projection and the probability estimation. We consider the translation of European Parliament Speeches. This task is part of an international evaluation organized by the TC-STAR project in 2006. The proposed method achieves consistent improvements in the BLEU score on the development and test data. \n \nWe also present algorithms to improve the estimation of the language model probabilities when splitting long sentences into shorter chunks.", "keyphrases": ["statistical machine translation", "cslm", "reranking"]} +{"id": "labutov-lipson-2014-generating", "title": "Generating Code-switched Text for Lexical Learning", "abstract": "A vast majority of L1 vocabulary acquisition occurs through incidental learning during reading (Nation, 2001; Schmitt et al., 2001). We propose a probabilistic approach to generating code-mixed text as an L2 technique for increasing retention in adult lexical learning through reading. Our model that takes as input a bilingual dictionary and an English text, and generates a code-switched text that optimizes a defined \u201clearnability\u201d metric by constructing a factor graph over lexical mentions. Using an artificial language vocabulary, we evaluate a set of algorithms for generating code-switched text automatically by presenting it to Mechanical Turk subjects and measuring recall in a sentence completion task.", "keyphrases": ["code-switched text", "lexical learning", "incidental learning"]} +{"id": "ma-etal-2021-contrastive", "title": "Contrastive Fine-tuning Improves Robustness for Neural Rankers", "abstract": "The performance of state-of-the-art neural rankers can deteriorate substantially when exposed to noisy inputs or applied to a new domain. In this paper, we present a novel method for fine-tuning neural rankers that can significantly improve their robustness to out-of-domain data and query perturbations. Specifically, a contrastive loss that compares data points in the representation space is combined with the standard ranking loss during fine-tuning. We use relevance labels to denote similar/dissimilar pairs, which allows the model to learn the underlying matching semantics across different query-document pairs and leads to improved robustness. In experiments with four passage ranking datasets, the proposed contrastive fine-tuning method obtains improvements on robustness to query reformulations, noise perturbations, and zero-shot transfer for both BERT and BART based rankers. Additionally, our experiments show that contrastive fine-tuning outperforms data augmentation for robustifying neural rankers.", "keyphrases": ["fine-tuning", "robustness", "neural ranker"]} +{"id": "gatt-etal-2009-tuna", "title": "The TUNA-REG Challenge 2009: Overview and Evaluation Results", "abstract": "The GREC Task at REG '08 required participating systems to select coreference chains to the main subject of short encyclopaedic texts collected from Wikipedia. Three teams submitted a total of 6 systems, and we additionally created four baseline systems. Systems were tested automatically using a range of existing intrinsic metrics. We also evaluated systems extrinsically by applying coreference resolution tools to the outputs and measuring the success of the tools. In addition, systems were tested in a reading/comprehension experiment involving human subjects. This report describes the GREC Task and the evaluation methods, gives brief descriptions of the participating systems, and presents the evaluation results.", "keyphrases": ["evaluation result", "tuna", "expression generation", "recent reg challenge"]} +{"id": "huang-etal-2010-self", "title": "Self-Training with Products of Latent Variable Grammars", "abstract": "We study self-training with products of latent variable grammars in this paper. We show that increasing the quality of the automatically parsed data used for self-training gives higher accuracy self-trained grammars. Our generative self-trained grammars reach F scores of 91.6 on the WSJ test set and surpass even discriminative reranking systems without self-training. Additionally, we show that multiple self-trained grammars can be combined in a product model to achieve even higher accuracy. The product model is most effective when the individual underlying grammars are most diverse. Combining multiple grammars that were self-trained on disjoint sets of unlabeled data results in a final test accuracy of 92.5% on the WSJ test set and 89.6% on our Broadcast News test set.", "keyphrases": ["product", "latent variable grammar", "self-training"]} +{"id": "malmasi-dras-2014-chinese", "title": "Chinese Native Language Identification", "abstract": "We present the first application of Native Language Identification (NLI) to nonEnglish data. Motivated by theories of language transfer, NLI is the task of identifying a writer\u2019s native language (L1) based on their writings in a second language (the L2). An NLI system was applied to Chinese learner texts using topicindependent syntactic models to assess their accuracy. We find that models using part-of-speech tags, context-free grammar production rules and function words are highly effective, achieving a maximum accuracy of 71% . Interestingly, we also find that when applied to equivalent English data, the model performance is almost identical. This finding suggests a systematic pattern of cross-linguistic transfer may exist, where the degree of transfer is independent of the L1 and L2.", "keyphrases": ["native language identification", "nli", "non-english dataset", "syntactic feature"]} +{"id": "ehara-2014-machine", "title": "A machine translation system combining rule-based machine translation and statistical post-editing", "abstract": "System architecture, experimental settings and evaluation results of the EIWA in the WAT2014 Japanese to English (jaen) and Chinese to Japanese (zh-ja) tasks are described. Our system is combining rule-based machine translation (RBMT) and statistical post-editing (SPE). Evaluation results for ja-en task show 19.86 BLEU score, 0.7067 RIBES score, and 22.50 human evaluation score. Evaluation results for zh-ja task show 33.57 BLEU score, 0.8114 RIBES score, and 15.00 human evaluation score.", "keyphrases": ["rule-based machine translation", "statistical post-editing", "spe"]} +{"id": "raghavan-etal-2014-cross", "title": "Cross-narrative Temporal Ordering of Medical Events", "abstract": "Cross-narrative temporal ordering of medical events is essential to the task of generating a comprehensive timeline over a patient\u2019s history. We address the problem of aligning multiple medical event sequences, corresponding to different clinical narratives, comparing the following approaches: (1) A novel weighted finite state transducer representation of medical event sequences that enables composition and search for decoding, and (2) Dynamic programming with iterative pairwise alignment of multiple sequences using global and local alignment algorithms. The cross-narrative coreference and temporal relation weights used in both these approaches are learned from a corpus of clinical narratives. We present results using both approaches and observe that the finite state transducer approach performs performs significantly better than the dynamic programming one by 6.8% for the problem of multiple-sequence alignment.", "keyphrases": ["medical event", "timeline", "cross-narrative temporal ordering"]} +{"id": "otrusina-smrz-2010-new", "title": "A New Approach to Pseudoword Generation", "abstract": "Sense-tagged corpora are used to evaluate word sense disambiguation (WSD) systems. Manual creation of such resources is often prohibitively expensive. That is why the concept of pseudowords - conflations of two or more unambiguous words - has been integrated into WSD evaluation experiments. This paper presents a new method of pseudoword generation which takes into account semantic-relatedness of the candidate words forming parts of the pseudowords to the particular senses of the word to be disambiguated. We compare the new approach to its alternatives and show that the results on pseudowords, that are more similar to real ambiguous words, better correspond to the actual results. Two techniques assessing the similarity are studied - the first one takes advantage of manually created dictionaries (wordnets), the second one builds on the automatically computed statistical data obtained from large corpora. Pros and cons of the two techniques are discussed and the results on a standard task are demonstrated.", "keyphrases": ["new approach", "pseudoword generation", "ambiguous word"]} +{"id": "muller-2007-resolving", "title": "Resolving It, This, and That in Unrestricted Multi-Party Dialog", "abstract": "We present an implemented system for the resolution of it , this , and that in transcribed multi-party dialog. The system handles NP-anaphoric as well as discourse-deictic anaphors, i", "keyphrases": ["multi-party dialog", "anaphor", "pronoun"]} +{"id": "hautli-janisz-etal-2022-qt30", "title": "QT30: A Corpus of Argument and Conflict in Broadcast Debate", "abstract": "Broadcast political debate is a core pillar of democracy: it is the public's easiest access to opinions that shape policies and enables the general public to make informed choices. With QT30, we present the largest corpus of analysed dialogical argumentation ever created (19,842 utterances, 280,000 words) and also the largest corpus of analysed broadcast political debate to date, using 30 episodes of BBC's `Question Time' from 2020 and 2021. Question Time is the prime institution in UK broadcast political debate and features questions from the public on current political issues, which are responded to by a weekly panel of five figures of UK politics and society. QT30 is highly argumentative and combines language of well-versed political rhetoric with direct, often combative, justification-seeking of the general public. QT30 is annotated with Inference Anchoring Theory, a framework well-known in argument mining, which encodes the way arguments and conflicts are created and reacted to in dialogical settings. The resource is freely available at .", "keyphrases": ["argumentation", "conflict", "broadcast", "episode", "question time"]} +{"id": "liang-etal-2015-measuring", "title": "Measuring Prerequisite Relations Among Concepts", "abstract": "A prerequisite relation describes a basic relation among concepts in cognition, education and other areas. However, as a semantic relation, it has not been well studied in computational linguistics. We investigate the problem of measuring prerequisite relations among concepts and propose a simple link-based metric, namely reference distance (RefD), that effectively models the relation by measuring how differently two concepts refer to each other. Evaluations on two datasets that include seven domains show that our single metric based method outperforms existing supervised learning based methods.", "keyphrases": ["prerequisite relation", "reference distance", "refd"]} +{"id": "zhou-etal-2018-relevant", "title": "Relevant Emotion Ranking from Text Constrained with Emotion Relationships", "abstract": "Text might contain or invoke multiple emotions with varying intensities. As such, emotion detection, to predict multiple emotions associated with a given text, can be cast into a multi-label classification problem. We would like to go one step further so that a ranked list of relevant emotions are generated where top ranked emotions are more intensely associated with text compared to lower ranked emotions, whereas the rankings of irrelevant emotions are not important. A novel framework of relevant emotion ranking is proposed to tackle the problem. In the framework, the objective loss function is designed elaborately so that both emotion prediction and rankings of only relevant emotions can be achieved. Moreover, we observe that some emotions co-occur more often while other emotions rarely co-exist. Such information is incorporated into the framework as constraints to improve the accuracy of emotion detection. Experimental results on two real-world corpora show that the proposed framework can effectively deal with emotion detection and performs remarkably better than the state-of-the-art emotion detection approaches and multi-label learning methods.", "keyphrases": ["intensity", "relevant emotion ranking", "relevant label"]} +{"id": "auli-etal-2009-systematic", "title": "A Systematic Analysis of Translation Model Search Spaces", "abstract": "Translation systems are complex, and most metrics do little to pinpoint causes of error or isolate system differences. We use a simple technique to discover induction errors, which occur when good translations are absent from model search spaces. Our results show that a common pruning heuristic drastically increases induction error, and also strongly suggest that the search spaces of phrase-based and hierarchical phrase-based models are highly overlapping despite the well known structural differences.", "keyphrases": ["search space", "phrase-based model", "limit"]} +{"id": "shishtla-etal-2009-language", "title": "A Language-Independent Transliteration Schema Using Character Aligned Models at NEWS 2009", "abstract": "In this paper we present a statistical transliteration technique that is language independent. This technique uses statistical alignment models and Conditional Random Fields (CRF). Statistical alignment models maximizes the probability of the observed (source, target) word pairs using the expectation maximization algorithm and then the character level alignments are set to maximum posterior predictions of the model. CRF has efficient training and decoding processes which is conditioned on both source and target languages and produces globally optimal solution.", "keyphrases": ["alignment model", "statistical transliteration technique", "giza++"]} +{"id": "li-etal-2018-deep", "title": "A Deep Relevance Model for Zero-Shot Document Filtering", "abstract": "In the era of big data, focused analysis for diverse topics with a short response time becomes an urgent demand. As a fundamental task, information filtering therefore becomes a critical necessity. In this paper, we propose a novel deep relevance model for zero-shot document filtering, named DAZER. DAZER estimates the relevance between a document and a category by taking a small set of seed words relevant to the category. With pre-trained word embeddings from a large external corpus, DAZER is devised to extract the relevance signals by modeling the hidden feature interactions in the word embedding space. The relevance signals are extracted through a gated convolutional process. The gate mechanism controls which convolution filters output the relevance signals in a category dependent manner. Experiments on two document collections of two different tasks (i.e., topic categorization and sentiment analysis) demonstrate that DAZER significantly outperforms the existing alternative solutions, including the state-of-the-art deep relevance ranking models.", "keyphrases": ["deep relevance model", "zero-shot document filtering", "seed word"]} +{"id": "zhang-etal-2019-generating-fluent", "title": "Generating Fluent Adversarial Examples for Natural Languages", "abstract": "Efficiently building an adversarial attacker for natural language processing (NLP) tasks is a real challenge. Firstly, as the sentence space is discrete, it is difficult to make small perturbations along the direction of gradients. Secondly, the fluency of the generated examples cannot be guaranteed. In this paper, we propose MHA, which addresses both problems by performing Metropolis-Hastings sampling, whose proposal is designed with the guidance of gradients. Experiments on IMDB and SNLI show that our proposed MHAoutperforms the baseline model on attacking capability. Adversarial training with MHA also leads to better robustness and performance.", "keyphrases": ["adversarial example", "fluency", "metropolis-hasting sampling"]} +{"id": "varanasi-etal-2020-copybert", "title": "CopyBERT: A Unified Approach to Question Generation with Self-Attention", "abstract": "Contextualized word embeddings provide better initialization for neural networks that deal with various natural language understanding (NLU) tasks including Question Answering (QA) and more recently, Question Generation(QG). Apart from providing meaningful word representations, pre-trained transformer models (Vaswani et al., 2017), such as BERT (Devlin et al., 2019) also provide self-attentions which encode syntactic information that can be probed for dependency parsing (Hewitt and Manning, 2019) and POStagging (Coenen et al., 2019). In this paper, we show that the information from selfattentions of BERT are useful for language modeling of questions conditioned on paragraph and answer phrases. To control the attention span, we use semi-diagonal mask and utilize a shared model for encoding and decoding, unlike sequence-to-sequence. We further employ copy-mechanism over self-attentions to acheive state-of-the-art results for Question Generation on SQuAD v1.1 (Rajpurkar et al., 2016).", "keyphrases": ["question generation", "self-attention", "bert"]} +{"id": "zhang-etal-2021-crowdsourcing", "title": "Crowdsourcing Learning as Domain Adaptation: A Case Study on Named Entity Recognition", "abstract": "Crowdsourcing is regarded as one prospective solution for effective supervised learning, aiming to build large-scale annotated training data by crowd workers. Previous studies focus on reducing the influences from the noises of the crowdsourced annotations for supervised models. We take a different point in this work, regarding all crowdsourced annotations as gold-standard with respect to the individual annotators. In this way, we find that crowdsourcing could be highly similar to domain adaptation, and then the recent advances of cross-domain methods can be almost directly applied to crowdsourcing. Here we take named entity recognition (NER) as a study case, suggesting an annotator-aware representation learning model that inspired by the domain adaptation methods which attempt to capture effective domain-aware features. We investigate both unsupervised and supervised crowdsourcing learning, assuming that no or only small-scale expert annotations are available. Experimental results on a benchmark crowdsourced NER dataset show that our method is highly effective, leading to a new state-of-the-art performance. In addition, under the supervised setting, we can achieve impressive performance gains with only a very small scale of expert annotations.", "keyphrases": ["domain adaptation", "entity recognition", "annotator", "crowdsourcing learning"]} +{"id": "luu-etal-2016-learning", "title": "Learning Term Embeddings for Taxonomic Relation Identification Using Dynamic Weighting Neural Network", "abstract": "Taxonomic relation identi\ufb01cation aims to recognize the \u2018is-a\u2019 relation between two terms. Previous works on identifying taxonomic relations are mostly based on statistical and linguistic approaches, but the accuracy of these approaches is far from satisfactory. In this paper, we propose a novel supervised learning approach for identifying taxonomic relations using term embeddings. For this purpose, we \ufb01rst design a dynamic weighting neural network to learn term embeddings based on not only the hypernym and hyponym terms, but also the contextual information between them. We then apply such embeddings as features to identify taxonomic relations using a supervised method. The experimental results show that our proposed approach signi\ufb01cantly out-performs other state-of-the-art methods by 9% to 13% in terms of accuracy for both general and speci\ufb01c domain datasets.", "keyphrases": ["term embedding", "taxonomic relation", "weighting", "linguistic approach", "hypernymy"]} +{"id": "vecchi-etal-2021-towards", "title": "Towards Argument Mining for Social Good: A Survey", "abstract": "This survey builds an interdisciplinary picture of Argument Mining (AM), with a strong focus on its potential to address issues related to Social and Political Science. More specifically, we focus on AM challenges related to its applications to social media and in the multilingual domain, and then proceed to the widely debated notion of argument quality. We propose a novel definition of argument quality which is integrated with that of deliberative quality from the Social Science literature. Under our definition, the quality of a contribution needs to be assessed at multiple levels: the contribution itself, its preceding context, and the consequential effect on the development of the upcoming discourse. The latter has not received the deserved attention within the community. We finally define an application of AM for Social Good: (semi-)automatic moderation, a highly integrative application which (a) represents a challenging testbed for the integrated notion of quality we advocate, (b) allows the empirical quantification of argument/deliberative quality to benefit from the developments in other NLP fields (i.e. hate speech detection, fact checking, debiasing), and (c) has a clearly beneficial potential at the level of its societal thanks to its real-world application (even if extremely ambitious).", "keyphrases": ["argument mining", "social good", "deliberative quality"]} +{"id": "ning-etal-2017-structured", "title": "A Structured Learning Approach to Temporal Relation Extraction", "abstract": "Identifying temporal relations between events is an essential step towards natural language understanding. However, the temporal relation between two events in a story depends on, and is often dictated by, relations among other events. Consequently, effectively identifying temporal relations between events is a challenging problem even for human annotators. This paper suggests that it is important to take these dependencies into account while learning to identify these relations and proposes a structured learning approach to address this challenge. As a byproduct, this provides a new perspective on handling missing relations, a known issue that hurts existing methods. As we show, the proposed approach results in significant improvements on the two commonly used data sets for this problem.", "keyphrases": ["structured learning approach", "temporal relation extraction", "annotator"]} +{"id": "peng-etal-2018-learning", "title": "Learning Joint Semantic Parsers from Disjoint Data", "abstract": "We present a new approach to learning a semantic parser from multiple datasets, even when the target semantic formalisms are drastically different and the underlying corpora do not overlap. We handle such \u201cdisjoint\u201d data by treating annotations for unobserved formalisms as latent structured variables. Building on state-of-the-art baselines, we show improvements both in frame-semantic parsing and semantic dependency parsing by modeling them jointly.", "keyphrases": ["semantic parser", "disjoint data", "latent structured variable"]} +{"id": "sajjad-etal-2013-translating", "title": "Translating Dialectal Arabic to English", "abstract": "We present a dialectal Egyptian Arabic to English statistical machine translation system that leverages dialectal to Modern Standard Arabic (MSA) adaptation. In contrast to previous work, we first narrow down the gap between Egyptian and MSA by applying an automatic characterlevel transformational model that changes Egyptian to EG 0 , which looks similar to MSA. The transformations include morphological, phonological and spelling changes. The transformation reduces the out-of-vocabulary (OOV) words from 5.2% to 2.6% and gives a gain of 1.87 BLEU points. Further, adapting large MSA/English parallel data increases the lexical coverage, reduces OOVs to 0.7% and leads to an absolute BLEU improvement of 2.73 points.", "keyphrases": ["dialectal arabic", "egyptian arabic", "pivot language"]} +{"id": "forster-etal-2012-rwth", "title": "RWTH-PHOENIX-Weather: A Large Vocabulary Sign Language Recognition and Translation Corpus", "abstract": "This paper introduces the RWTH-PHOENIX-Weather corpus, a video-based, large vocabulary corpus of German Sign Language suitable for statistical sign language recognition and translation. In contrastto most available sign language data collections, the RWTH-PHOENIX-Weather corpus has not been recorded for linguistic research but for the use in statistical pattern recognition. The corpus contains weather forecasts recorded from German public TV which are manually annotated using glosses distinguishing sign variants, and time boundaries have been marked on the sentence and the gloss level. Further, the spoken German weather forecast has been transcribed in a semi-automatic fashion using a state-of-the-art automatic speech recognition system. Moreover, an additional translation of the glosses into spoken German has been created to capture allowable translation variability. In addition to the corpus, experimental baseline results for hand and head tracking, statistical sign language recognition and translation are presented.", "keyphrases": ["translation corpus", "german sign language", "weather forecast"]} +{"id": "saraclar-sproat-2004-lattice", "title": "Lattice-Based Search for Spoken Utterance Retrieval", "abstract": "Recent work on spoken document retrieval has suggested that it is adequate to take the singlebest output of ASR, and perform text retrieval on this output. This is reasonable enough for the task of retrieving broadcast news stories, where word error rates are relatively low, and the stories are long enough to contain much redundancy. But it is patently not reasonable if one\u2019s task is to retrieve a short snippet of speech in a domain where WER\u2019s can be as high as 50%; such would be the situation with teleconference speech, where one\u2019s task is to find if and when a participant uttered a certain phrase. In this paper we propose an indexing procedure for spoken utterance retrieval that works on lattices rather than just single-best text. We demonstrate that this procedure can improve F scores by over five points compared to singlebest retrieval on tasks with poor WER and low redundancy. The representation is flexible so that we can represent both word lattices, as well as phone lattices, the latter being important for improving performance when searching for phrases containing OOV words.", "keyphrases": ["search", "spoken utterance retrieval", "word error rate"]} +{"id": "kim-etal-2020-beyond", "title": "Beyond Domain APIs: Task-oriented Conversational Modeling with Unstructured Knowledge Access", "abstract": "Most prior work on task-oriented dialogue systems are restricted to a limited coverage of domain APIs, while users oftentimes have domain related requests that are not covered by the APIs. In this paper, we propose to expand coverage of task-oriented dialogue systems by incorporating external unstructured knowledge sources. We define three sub-tasks: knowledge-seeking turn detection, knowledge selection, and knowledge-grounded response generation, which can be modeled individually or jointly. We introduce an augmented version of MultiWOZ 2.1, which includes new out-of-API-coverage turns and responses grounded on external knowledge sources. We present baselines for each sub-task using both conventional and neural approaches. Our experimental results demonstrate the need for further research in this direction to enable more informative conversational systems.", "keyphrases": ["domain api", "conversational modeling", "unstructured knowledge access", "task-oriented dialog"]} +{"id": "zhang-etal-2019-lattice", "title": "Lattice Transformer for Speech Translation", "abstract": "Recent advances in sequence modeling have highlighted the strengths of the transformer architecture, especially in achieving state-of-the-art machine translation results. However, depending on the up-stream systems, e.g., speech recognition, or word segmentation, the input to translation system can vary greatly. The goal of this work is to extend the attention mechanism of the transformer to naturally consume the lattice in addition to the traditional sequential input. We first propose a general lattice transformer for speech translation where the input is the output of the automatic speech recognition (ASR) which contains multiple paths and posterior scores. To leverage the extra information from the lattice structure, we develop a novel controllable lattice attention mechanism to obtain latent representations. On the LDC Spanish-English speech translation corpus, our experiments show that lattice transformer generalizes significantly better and outperforms both a transformer baseline and a lattice LSTM. Additionally, we validate our approach on the WMT 2017 Chinese-English translation task with lattice inputs from different BPE segmentations. In this task, we also observe the improvements over strong baselines.", "keyphrases": ["speech translation", "asr", "lattice transformer"]} +{"id": "baan-etal-2019-realization", "title": "On the Realization of Compositionality in Neural Networks", "abstract": "We present a detailed comparison of two types of sequence to sequence models trained to conduct a compositional task. The models are architecturally identical at inference time, but differ in the way that they are trained: our baseline model is trained with a task-success signal only, while the other model receives additional supervision on its attention mechanism (Attentive Guidance), which has shown to be an effective method for encouraging more compositional solutions. We first confirm that the models with attentive guidance indeed infer more compositional solutions than the baseline, by training them on the lookup table task presented by Liska et al. (2019). We then do an in-depth analysis of the structural differences between the two model types, focusing in particular on the organisation of the parameter space and the hidden layer activations and find noticeable differences in both these aspects. Guided networks focus more on the components of the input rather than the sequence as a whole and develop small functional groups of neurons with specific purposes that use their gates more selectively. Results from parameter heat maps, component swapping and graph analysis also indicate that guided networks exhibit a more modular structure with a small number of specialized, strongly connected neurons.", "keyphrases": ["compositionality", "in-depth analysis", "neuron"]} +{"id": "schwenk-2012-continuous", "title": "Continuous Space Translation Models for Phrase-Based Statistical Machine Translation", "abstract": "This paper presents a new approach to perform the estimation of the translation model probabilities of a phrase-based statistical machine translation system. We use neural networks to directly learn the translation probability of phrase pairs using continuous representations. The system can be easily trained on the same data used to build standard phrase-based systems. We provide experimental evidence that the approach seems to be able to infer meaningful translation probabilities for phrase pairs not seen in the training data, or even predict a list of the most likely translations given a source phrase. The approach can be used to rescore n-best lists, but we also discuss an integration into the Moses decoder. A preliminary evaluation on the English/French IWSLT task achieved improvements in the BLEU score and a human analysis showed that the new model often chooses semantically better translations. Several extensions of this work are discussed.", "keyphrases": ["translation probability", "phrase-based smt", "maximum length"]} +{"id": "demir-etal-2008-generating", "title": "Generating Textual Summaries of Bar Charts", "abstract": "Information graphics, such as bar charts and line graphs, play an important role in multimodal documents. This paper presents a novel approach to producing a brief textual summary of a simple bar chart. It outlines our approach to augmenting the core message of the graphic to produce a brief summary. Our method simultaneously constructs both the discourse and sentence structures of the textual summary using a bottom-up approach. The result is then realized in natural language. An evaluation study validates our generation methodology.", "keyphrases": ["textual summary", "bar chart", "message"]} +{"id": "felice-buttery-2019-entropy", "title": "Entropy as a Proxy for Gap Complexity in Open Cloze Tests", "abstract": "This paper presents a pilot study of entropy as a measure of gap complexity in open cloze tests aimed at learners of English. Entropy is used to quantify the information content in each gap, which can be used to estimate complexity. Our study shows that average gap entropy correlates positively with proficiency levels while individual gap entropy can capture contextual complexity. To the best of our knowledge, this is the first unsupervised information-theoretical approach to evaluating the quality of cloze tests.", "keyphrases": ["gap complexity", "open cloze test", "contextual complexity", "entropy"]} +{"id": "van-noord-bos-2017-dealing", "title": "Dealing with Co-reference in Neural Semantic Parsing", "abstract": "Linguistic phenomena like pronouns, control constructions, or co-reference give rise to co-indexed variables in meaning representations. We review three different methods for dealing with co-indexed variables in the output of neural semantic parsing of abstract meaning representations: (a) copying concepts during training and restoring co-indexation in a post-processing step; (b) explicit indexing of co-indexation; and (c) using absolute paths to designate co-indexing. The second method gives the best results and outperforms the baseline by 2.9 F-score points.", "keyphrases": ["co-reference", "neural semantic parsing", "van"]} +{"id": "sangati-zuidema-2011-accurate", "title": "Accurate Parsing with Compact Tree-Substitution Grammars: Double-DOP", "abstract": "We present a novel approach to Data-Oriented Parsing (DOP). Like other DOP models, our parser utilizes syntactic fragments of arbitrary size from a treebank to analyze new sentences, but, crucially, it uses only those which are encountered at least twice. This criterion allows us to work with a relatively small but representative set of fragments, which can be employed as the symbolic backbone of several probabilistic generative models. For parsing we define a transform-backtransform approach that allows us to use standard PCFG technology, making our results easily replicable. According to standard Parseval metrics, our best model is on par with many state-of-the-art parsers, while offering some complementary benefits: a simple generative probability model, and an explicit representation of the larger units of grammar.", "keyphrases": ["dop model", "fragment", "treebank"]} +{"id": "suhr-etal-2017-corpus", "title": "A Corpus of Natural Language for Visual Reasoning", "abstract": "We present a new visual reasoning language dataset, containing 92,244 pairs of examples of natural statements grounded in synthetic images with 3,962 unique sentences. We describe a method of crowdsourcing linguistically-diverse data, and present an analysis of our data. The data demonstrates a broad set of linguistic phenomena, requiring visual and set-theoretic reasoning. We experiment with various models, and show the data presents a strong challenge for future research.", "keyphrases": ["reasoning", "image", "quantifier", "creation", "fig"]} +{"id": "boleda-etal-2007-modelling", "title": "Modelling Polysemy in Adjective Classes by Multi-Label Classification", "abstract": "This paper assesses the role of multi-label classification in modelling polysemy for language acquisition tasks. We focus on the acquisition of semantic classes for Catalan adjectives, and show that polysemy acquisition naturally suits architectures used for multilabel classification. Furthermore, we explore the performance of information drawn from different levels of linguistic description, using feature sets based on morphology, syntax, semantics, and n-gram distribution. Finally, we demonstrate that ensemble classifiers are a powerful and adequate way to combine different types of linguistic evidence: a simple, majority voting ensemble classifier improves the accuracy from 62.5% (best single classifier) to 84%.", "keyphrases": ["polysemy", "adjective", "multi-label classification"]} +{"id": "shao-etal-2019-aggregating", "title": "Aggregating Bidirectional Encoder Representations Using MatchLSTM for Sequence Matching", "abstract": "In this work, we propose an aggregation method to combine the Bidirectional Encoder Representations from Transformer (BERT) with a MatchLSTM layer for Sequence Matching. Given a sentence pair, we extract the output representations of it from BERT. Then we extend BERT with a MatchLSTM layer to get further interaction of the sentence pair for sequence matching tasks. Taking natural language inference as an example, we split BERT output into two parts, which is from premise sentence and hypothesis sentence. At each position of the hypothesis sentence, both the weighted representation of the premise sentence and the representation of the current token are fed into LSTM. We jointly train the aggregation layer and pre-trained layer for sequence matching. We conduct an experiment on two publicly available datasets, WikiQA and SNLI. Experiments show that our model achieves significantly improvement compared with state-of-the-art methods on both datasets.", "keyphrases": ["bidirectional encoder representations", "sequence matching", "bert"]} +{"id": "peng-etal-2017-composite", "title": "Composite Task-Completion Dialogue Policy Learning via Hierarchical Deep Reinforcement Learning", "abstract": "Building a dialogue agent to fulfill complex tasks, such as travel planning, is challenging because the agent has to learn to collectively complete multiple subtasks. For example, the agent needs to reserve a hotel and book a flight so that there leaves enough time for commute between arrival and hotel check-in. This paper addresses this challenge by formulating the task in the mathematical framework of options over Markov Decision Processes (MDPs), and proposing a hierarchical deep reinforcement learning approach to learning a dialogue manager that operates at different temporal scales. The dialogue manager consists of: (1) a top-level dialogue policy that selects among subtasks or options, (2) a low-level dialogue policy that selects primitive actions to complete the subtask given by the top-level policy, and (3) a global state tracker that helps ensure all cross-subtask constraints be satisfied. Experiments on a travel planning task with simulated and real users show that our approach leads to significant improvements over three baselines, two based on handcrafted rules and the other based on flat deep reinforcement learning.", "keyphrases": ["reinforcement learning", "dialogue agent", "dialog policy learning"]} +{"id": "bykh-meurers-2014-exploring", "title": "Exploring Syntactic Features for Native Language Identification: A Variationist Perspective on Feature Encoding and Ensemble Optimization", "abstract": "In this paper, we systematically explore lexicalized and non-lexicalized local syntactic features for the task of Native Language Identification (NLI). We investigate different types of feature representations in single- and cross-corpus settings, including two representations inspired by a variationist perspective on the choices made in the linguistic system. To combine the different models, we use a probabilities-based ensemble classifier and propose a technique to optimize and tune it. Combining the best performing syntactic features with four types of n-grams outperforms the best approach of the NLI Shared Task 2013.", "keyphrases": ["syntactic feature", "native language identification", "variationist perspective", "n-gram"]} +{"id": "kohonen-etal-2010-semi", "title": "Semi-Supervised Learning of Concatenative Morphology", "abstract": "We consider morphology learning in a semi-supervised setting, where a small set of linguistic gold standard analyses is available. We extend Morfessor Baseline, which is a method for unsupervised morphological segmentation, to this task. We show that known linguistic segmentations can be exploited by adding them into the data likelihood function and optimizing separate weights for unlabeled and labeled data. Experiments on English and Finnish are presented with varying amount of labeled data. Results of the linguistic evaluation of Morpho Challenge improve rapidly already with small amounts of labeled data, surpassing the state-of-the-art unsupervised methods at 1000 labeled words for English and at 100 labeled words for Finnish.", "keyphrases": ["morfessor", "segmentation", "semi-supervised version", "extension"]} +{"id": "zhou-etal-2016-text", "title": "Text Classification Improved by Integrating Bidirectional LSTM with Two-dimensional Max Pooling", "abstract": "Recurrent Neural Network (RNN) is one of the most popular architectures used in Natural Language Processsing (NLP) tasks because its recurrent structure is very suitable to process variable-length text. RNN can utilize distributed representations of words by first converting the tokens comprising each text into vectors, which form a matrix. And this matrix includes two dimensions: the time-step dimension and the feature vector dimension. Then most existing models usually utilize one-dimensional (1D) max pooling operation or attention-based operation only on the time-step dimension to obtain a fixed-length vector. However, the features on the feature vector dimension are not mutually independent, and simply applying 1D pooling operation over the time-step dimension independently may destroy the structure of the feature representation. On the other hand, applying two-dimensional (2D) pooling operation over the two dimensions may sample more meaningful features for sequence modeling tasks. To integrate the features on both dimensions of the matrix, this paper explores applying 2D max pooling operation to obtain a fixed-length representation of the text. This paper also utilizes 2D convolution to sample more meaningful information of the matrix. Experiments are conducted on six text classification tasks, including sentiment analysis, question classification, subjectivity classification and newsgroup classification. Compared with the state-of-the-art models, the proposed models achieve excellent performance on 4 out of 6 tasks. Specifically, one of the proposed models achieves highest accuracy on Stanford Sentiment Treebank binary classification and fine-grained classification tasks.", "keyphrases": ["bidirectional lstm", "fixed-length representation", "text classification"]} +{"id": "yoon-etal-2021-self", "title": "Self-Adapter at SemEval-2021 Task 10: Entropy-based Pseudo-Labeler for Source-free Domain Adaptation", "abstract": "Source-free domain adaptation is an emerging line of work in deep learning research since it is closely related to the real-world environment. We study the domain adaption in the sequence labeling problem where the model trained on the source domain data is given. We propose two methods: Self-Adapter and Selective Classifier Training. Self-Adapter is a training method that uses sentence-level pseudo-labels filtered by the self-entropy threshold to provide supervision to the whole model. Selective Classifier Training uses token-level pseudo-labels and supervises only the classification layer of the model. The proposed methods are evaluated on data provided by SemEval-2021 task 10 and Self-Adapter achieves 2nd rank performance.", "keyphrases": ["semeval-2021 task", "pseudo-label", "source-free domain adaptation"]} +{"id": "razmara-etal-2012-mixing", "title": "Mixing Multiple Translation Models in Statistical Machine Translation", "abstract": "Statistical machine translation is often faced with the problem of combining training data from many diverse sources into a single translation model which then has to translate sentences in a new domain. We propose a novel approach, ensemble decoding, which combines a number of translation systems dynamically at the decoding step. In this paper, we evaluate performance on a domain adaptation setting where we translate sentences from the medical domain. Our experimental results show that ensemble decoding outperforms various strong baselines including mixture models, the current state-of-the-art for domain adaptation in machine translation.", "keyphrases": ["statistical machine translation", "ensemble decoding", "strong baseline"]} +{"id": "pinnis-etal-2012-accurat", "title": "ACCURAT Toolkit for Multi-Level Alignment and Information Extraction from Comparable Corpora", "abstract": "The lack of parallel corpora and linguistic resources for many languages and domains is one of the major obstacles for the further advancement of automated translation. A possible solution is to exploit comparable corpora (non-parallel bi- or multi-lingual text resources) which are much more widely available than parallel translation data. Our presented toolkit deals with parallel content extraction from comparable corpora. It consists of tools bundled in two workflows: (1) alignment of comparable documents and extraction of parallel sentences and (2) extraction and bilingual mapping of terms and named entities. The toolkit pairs similar bilingual comparable documents and extracts parallel sentences and bilingual terminological and named entity dictionaries from comparable corpora. This demonstration focuses on the English, Latvian, Lithuanian, and Romanian languages.", "keyphrases": ["toolkit", "comparable corpora", "parallel sentence"]} +{"id": "zhang-etal-2018-global", "title": "Global Attention for Name Tagging", "abstract": "Many name tagging approaches use local contextual information with much success, but can fail when the local context is ambiguous or limited. We present a new framework to improve name tagging by utilizing local, document-level, and corpus-level contextual information. For each word, we retrieve document-level context from other sentences within the same document and corpus-level context from sentences in other documents. We propose a model that learns to incorporate document-level and corpus-level contextual information alongside local contextual information via document-level and corpus-level attentions, which dynamically weight their respective contextual information and determines the influence of this information through gating mechanisms. Experiments on benchmark datasets show the effectiveness of our approach, which achieves state-of-the-art results for Dutch, German, and Spanish on the CoNLL-2002 and CoNLL-2003 datasets. We will make our code and pre-trained models publicly available for research purposes.", "keyphrases": ["name tagging", "contextual information", "global attention"]} +{"id": "shutova-etal-2012-unsupervised", "title": "Unsupervised Metaphor Paraphrasing using a Vector Space Model", "abstract": "We present the first fully unsupervised approach to metaphor interpretation, and a system that produces literal paraphrases for metaphorical expressions. Such a form of interpretation is directly transferable to other NLP applications that can benefit from a metaphor processing component. Our method is different from previous work in that it does not rely on any manually annotated data or lexical resources. First, our method computes candidate paraphrases according to the context in which the metaphor appears, using a vector space model. It then uses a selectional preference model to measure the degree of literalness of the paraphrases. The system identifies correct paraphrases with a precision of 0.52 at top rank, which is a promising result for a fully unsupervised approach.", "keyphrases": ["paraphrase", "vector space model", "linguistic metaphor"]} +{"id": "rohrdantz-etal-2011-towards", "title": "Towards Tracking Semantic Change by Visual Analytics", "abstract": "This paper presents a new approach to detecting and tracking changes in word meaning by visually modeling and representing diachronic development in word contexts. Previous studies have shown that computational models are capable of clustering and disambiguating senses, a more recent trend investigates whether changes in word meaning can be tracked by automatic methods. The aim of our study is to offer a new instrument for investigating the diachronic development of word senses in a way that allows for a better understanding of the nature of semantic change in general. For this purpose we combine techniques from the field of Visual Analytics with unsupervised methods from Natural Language Processing, allowing for an interactive visual exploration of semantic change.", "keyphrases": ["semantic change", "visual analytics", "new approach"]} +{"id": "beltagy-etal-2016-representing", "title": "Representing Meaning with a Combination of Logical and Distributional Models", "abstract": "NLP tasks differ in the semantic information they require, and at this time no single semantic representation fulfills all requirements. Logic-based representations characterize sentence structure, but do not capture the graded aspect of meaning. Distributional models give graded similarity ratings for words and phrases, but do not capture sentence structure in the same detail as logic-based approaches. It has therefore been argued that the two are complementary.We adopt a hybrid approach that combines logical and distributional semantics using probabilistic logic, specifically Markov Logic Networks. In this article, we focus on the three components of a practical system:1 1) Logical representation focuses on representing the input problems in probabilistic logic; 2) knowledge base construction creates weighted inference rules by integrating distributional information with other sources; and 3) probabilistic inference involves solving the resulting MLN inference problems efficiently. To evaluate our approach, we use the task of textual entailment, which can utilize the strengths of both logic-based and distributional representations. In particular we focus on the SICK data set, where we achieve state-of-the-art results. We also release a lexical entailment data set of 10,213 rules extracted from the SICK data set, which is a valuable resource for evaluating lexical entailment systems.2", "keyphrases": ["markov logic networks", "distributional information", "boxer"]} +{"id": "shnarch-etal-2011-probabilistic", "title": "A Probabilistic Modeling Framework for Lexical Entailment", "abstract": "Recognizing entailment at the lexical level is an important and commonly-addressed component in textual inference. Yet, this task has been mostly approached by simplified heuristic methods. This paper proposes an initial probabilistic modeling framework for lexical entailment, with suitable EM-based parameter estimation. Our model considers prominent entailment factors, including differences in lexical-resources reliability and the impacts of transitivity and multiple evidence. Evaluations show that the proposed model outperforms most prior systems while pointing at required future improvements.", "keyphrases": ["probabilistic modeling framework", "lexical entailment", "heuristic method"]} +{"id": "niu-bansal-2018-polite", "title": "Polite Dialogue Generation Without Parallel Data", "abstract": "Stylistic dialogue response generation, with valuable applications in personality-based conversational agents, is a challenging task because the response needs to be fluent, contextually-relevant, as well as paralinguistically accurate. Moreover, parallel datasets for regular-to-stylistic pairs are usually unavailable. We present three weakly-supervised models that can generate diverse, polite (or rude) dialogue responses without parallel data. Our late fusion model (Fusion) merges the decoder of an encoder-attention-decoder dialogue model with a language model trained on stand-alone polite utterances. Our label-finetuning (LFT) model prepends to each source sequence a politeness-score scaled label (predicted by our state-of-the-art politeness classifier) during training, and at test time is able to generate polite, neutral, and rude responses by simply scaling the label embedding by the corresponding score. Our reinforcement learning model (Polite-RL) encourages politeness generation by assigning rewards proportional to the politeness classifier score of the sampled response. We also present two retrievalbased, polite dialogue model baselines. Human evaluation validates that while the Fusion and the retrieval-based models achieve politeness with poorer context-relevance, the LFT and Polite-RL models can produce significantly more polite responses without sacrificing dialogue quality.", "keyphrases": ["dialogue generation", "language model", "politeness"]} +{"id": "andrew-2006-hybrid", "title": "A Hybrid Markov/Semi-Markov Conditional Random Field for Sequence Segmentation", "abstract": "Markov order-1 conditional random fields (CRFs) and semi-Markov CRFs are two popular models for sequence segmentation and labeling. Both models have advantages in terms of the type of features they most naturally represent. We propose a hybrid model that is capable of representing both types of features, and describe efficient algorithms for its training and inference. We demonstrate that our hybrid model achieves error reductions of 18% and 25% over a standard order-1 CRF and a semi-Markov CRF (resp.) on the task of Chinese word segmentation. We also propose the use of a powerful feature for the semi-Markov CRF: the log conditional odds that a given token sequence constitutes a chunk according to a generative model, which reduces error by an additional 13%. Our best system achieves 96.8% F-measure, the highest reported score on this test set.", "keyphrases": ["conditional random field", "sequence segmentation", "semi-markov crf"]} +{"id": "cherry-lin-2006-soft", "title": "Soft Syntactic Constraints for Word Alignment through Discriminative Training", "abstract": "Word alignment methods can gain valuable guidance by ensuring that their alignments maintain cohesion with respect to the phrases specified by a monolingual dependency tree. However, this hard constraint can also rule out correct alignments, and its utility decreases as alignment models become more complex. We use a publicly available structured output SVM to create a max-margin syntactic aligner with a soft cohesion constraint. The resulting aligner is the first, to our knowledge, to use a discriminative learning method to train an ITG bitext parser.", "keyphrases": ["word alignment", "cohesion", "reason itg", "more attention"]} +{"id": "zhou-etal-2022-exsum", "title": "ExSum: From Local Explanations to Model Understanding", "abstract": "Interpretability methods are developed to understand the working mechanisms of black-box models, which is crucial to their responsible deployment. Fulfilling this goal requires both that the explanations generated by these methods are correct and that people can easily and reliably understand them. While the former has been addressed in prior work, the latter is often overlooked, resulting in informal model understanding derived from a handful of local explanations. In this paper, we introduce explanation summary (ExSum), a mathematical framework for quantifying model understanding, and propose metrics for its quality assessment. On two domains, ExSum highlights various limitations in the current practice, helps develop accurate model understanding, and reveals easily overlooked properties of the model. We also connect understandability to other properties of explanations such as human alignment, robustness, and counterfactual similarity and plausibility.", "keyphrases": ["model understanding", "black-box model", "exsum"]} +{"id": "kajiwara-komachi-2018-complex", "title": "Complex Word Identification Based on Frequency in a Learner Corpus", "abstract": "We introduce the TMU systems for the Complex Word Identification (CWI) Shared Task 2018. TMU systems use random forest classifiers and regressors whose features are the number of characters, the number of words, and the frequency of target words in various corpora. Our simple systems performed best on 5 tracks out of 12 tracks. Our ablation analysis revealed the usefulness of a learner corpus for CWI task.", "keyphrases": ["learner corpus", "cwi task", "complex word identification"]} +{"id": "zeng-etal-2019-variational", "title": "A Variational Approach to Weakly Supervised Document-Level Multi-Aspect Sentiment Classification", "abstract": "In this paper, we propose a variational approach to weakly supervised document-level multi-aspect sentiment classification. Instead of using user-generated ratings or annotations provided by domain experts, we use target-opinion word pairs as \u201csupervision.\u201d These word pairs can be extracted by using dependency parsers and simple rules. Our objective is to predict an opinion word given a target word while our ultimate goal is to learn a sentiment polarity classifier to predict the sentiment polarity of each aspect given a document. By introducing a latent variable, i.e., the sentiment polarity, to the objective function, we can inject the sentiment polarity classifier to the objective via the variational lower bound. We can learn a sentiment polarity classifier by optimizing the lower bound. We show that our method can outperform weakly supervised baselines on TripAdvisor and BeerAdvocate datasets and can be comparable to the state-of-the-art supervised method with hundreds of labels per aspect.", "keyphrases": ["variational approach", "multi-aspect sentiment classification", "word pair"]} +{"id": "chen-etal-2019-numeracy", "title": "Numeracy-600K: Learning Numeracy for Detecting Exaggerated Information in Market Comments", "abstract": "In this paper, we attempt to answer the question of whether neural network models can learn numeracy, which is the ability to predict the magnitude of a numeral at some specific position in a text description. A large benchmark dataset, called Numeracy-600K, is provided for the novel task. We explore several neural network models including CNN, GRU, BiGRU, CRNN, CNN-capsule, GRU-capsule, and BiGRU-capsule in the experiments. The results show that the BiGRU model gets the best micro-averaged F1 score of 80.16%, and the GRU-capsule model gets the best macro-averaged F1 score of 64.71%. Besides discussing the challenges through comprehensive experiments, we also present an important application scenario, i.e., detecting exaggerated information, for the task.", "keyphrases": ["exaggerated information", "market comment", "network model", "magnitude", "bigru model"]} +{"id": "das-bandyopadhyay-2011-dr", "title": "Dr Sentiment Knows Everything!", "abstract": "Sentiment analysis is one of the hot demanding research areas since last few decades. Although a formidable amount of research have been done, the existing reported solutions or available systems are still far from perfect or do not meet the satisfaction level of end users'. The main issue is the various conceptual rules that govern sentiment and there are even more clues (possibly unlimited) that can convey these concepts from realization to verbalization of a human being. Human psychology directly relates to the unrevealed clues and governs the sentiment realization of us. Human psychology relates many things like social psychology, culture, pragmatics and many more endless intelligent aspects of civilization. Proper incorporation of human psychology into computational sentiment knowledge representation may solve the problem. In the present paper we propose a template based online interactive gaming technology, called Dr Sentiment to automatically create the PsychoSentiWordNet involving internet population. The PsychoSentiWordNet is an extension of SentiWordNet that presently holds human psychological knowledge on a few aspects along with sentiment knowledge.", "keyphrases": ["extension", "sentiwordnet", "human psychological knowledge"]} +{"id": "zhang-etal-2008-bayesian", "title": "Bayesian Learning of Non-Compositional Phrases with Synchronous Parsing", "abstract": "We combine the strengths of Bayesian modeling and synchronous grammar in unsupervised learning of basic translation phrase pairs. The structured space of a synchronous grammar is a natural fit for phrase pair probability estimation, though the search space can be prohibitively large. Therefore we explore efficient algorithms for pruning this space that lead to empirically effective results. Incorporating a sparse prior using Variational Bayes, biases the models toward generalizable, parsimonious parameter sets, leading to significant improvements in word alignment. This preference for sparse solutions together with effective pruning methods forms a phrase alignment regimen that produces better end-to-end translations than standard word alignment approaches.", "keyphrases": ["synchronous grammar", "word alignment", "bayesian learning"]} +{"id": "pradhan-etal-2007-towards", "title": "Towards Robust Semantic Role Labeling", "abstract": "Most semantic role labeling (SRL) research has been focused on training and evaluating on the same corpus. This strategy, although appropriate for initiating research, can lead to over-training to the particular corpus. This article describes the operation of ASSERT , a state-of-the art SRL system, and analyzes the robustness of the system when trained on one genre of data and used to label a different genre. As a starting point, results are \ufb01rst presented for training and testing the system on the PropBank corpus, which is annotated Wall Street Journal (WSJ) data. Experiments are then presented to evaluate the portability of the system to another source of data. These experiments are based on comparisons of performance using PropBanked WSJ data and PropBanked Brown Corpus data. The results indicate that whereas syntactic parses and argument identi\ufb01cation transfer relatively well to a new corpus, argument classi\ufb01cation does not. An analysis of the reasons for this is presented and these generally point to the nature of the more lexical/semantic features dominating the classi\ufb01cation task where more general structural features are dominant in the argument identi\ufb01cation task.", "keyphrases": ["propbank", "reason", "semantic feature", "new domain", "marked decrease"]} +{"id": "yang-etal-2020-program", "title": "Program Enhanced Fact Verification with Verbalization and Graph Attention Network", "abstract": "Performing fact verification based on structured data is important for many real-life applications and is a challenging research problem, particularly when it involves both symbolic operations and informal inference based on language understanding. In this paper, we present a Program-enhanced Verbalization and Graph Attention Network (ProgVGAT) to integrate programs and execution into textual inference models. Specifically, a verbalization with program execution model is proposed to accumulate evidences that are embedded in operations over the tables. Built on that, we construct the graph attention verification networks, which are designed to fuse different sources of evidences from verbalized program execution, program structures, and the original statements and tables, to make the final verification decision. To support the above framework, we propose a program selection module optimized with a new training strategy based on margin loss, to produce more accurate programs, which is shown to be effective in enhancing the final verification results. Experimental results show that the proposed framework achieves the new state-of-the-art performance, a 74.4% accuracy, on the benchmark dataset TABFACT.", "keyphrases": ["fact verification", "verbalization", "graph attention network", "reasoning"]} +{"id": "goel-etal-2021-robustness", "title": "Robustness Gym: Unifying the NLP Evaluation Landscape", "abstract": "Despite impressive performance on standard benchmarks, natural language processing (NLP) models are often brittle when deployed in real-world systems. In this work, we identify challenges with evaluating NLP systems and propose a solution in the form of Robustness Gym (RG), a simple and extensible evaluation toolkit that unifies 4 standard evaluation paradigms: subpopulations, transformations, evaluation sets, and adversarial attacks. By providing a common platform for evaluation, RG enables practitioners to compare results from disparate evaluation paradigms with a single click, and to easily develop and share novel evaluation methods using a built-in set of abstractions. RG is under active development and we welcome feedback & contributions from the community.", "keyphrases": ["nlp system", "subpopulation", "evaluation set", "robustness gym"]} +{"id": "cuadros-rigau-2008-knownet", "title": "KnowNet: Building a Large Net of Knowledge from the Web", "abstract": "This paper presents a new fully automatic method for building highly dense and accurate knowledge bases from existing semantic resources. Basically, the method uses a wide-coverage and accurate knowledge-based Word Sense Disambiguation algorithm to assign the most appropriate senses to large sets of topically related words acquired from the web. KnowNet, the resulting knowledge-base which connects large sets of semantically-related concepts is a major step towards the autonomous acquisition of knowledge from raw corpora. In fact, KnowNet is several times larger than any available knowledge resource encoding relations between synsets, and the knowledge KnowNet contains outperform any other resource when is empirically evaluated in a common framework.", "keyphrases": ["web", "knownet", "knowledge basis"]} +{"id": "el-baff-etal-2018-challenge", "title": "Challenge or Empower: Revisiting Argumentation Quality in a News Editorial Corpus", "abstract": "News editorials are said to shape public opinion, which makes them a powerful tool and an important source of political argumentation. However, rarely do editorials change anyone's stance on an issue completely, nor do they tend to argue explicitly (but rather follow a subtle rhetorical strategy). So, what does argumentation quality mean for editorials then? We develop the notion that an effective editorial challenges readers with opposing stance, and at the same time empowers the arguing skills of readers that share the editorial's stance \u2014 or even challenges both sides. To study argumentation quality based on this notion, we introduce a new corpus with 1000 editorials from the New York Times, annotated for their perceived effect along with the annotators' political orientations. Analyzing the corpus, we find that annotators with different orientation disagree on the effect significantly. While only 1% of all editorials changed anyone's stance, more than 5% meet our notion. We conclude that our corpus serves as a suitable resource for studying the argumentation quality of news editorials.", "keyphrases": ["argumentation quality", "news editorial", "ideology"]} +{"id": "al-kuwatly-etal-2020-identifying", "title": "Identifying and Measuring Annotator Bias Based on Annotators' Demographic Characteristics", "abstract": "Machine learning is recently used to detect hate speech and other forms of abusive language in online platforms. However, a notable weakness of machine learning models is their vulnerability to bias, which can impair their performance and fairness. One type is annotator bias caused by the subjective perception of the annotators. In this work, we investigate annotator bias using classification models trained on data from demographically distinct annotator groups. To do so, we sample balanced subsets of data that are labeled by demographically distinct annotators. We then train classifiers on these subsets, analyze their performances on similarly grouped test sets, and compare them statistically. Our findings show that the proposed approach successfully identifies bias and that demographic features, such as first language, age, and education, correlate with significant performance differences.", "keyphrases": ["annotator", "background", "educational background"]} +{"id": "chu-etal-2014-constructing", "title": "Constructing a Chinese\u2014Japanese Parallel Corpus from Wikipedia", "abstract": "Parallel corpora are crucial for statistical machine translation (SMT). However, they are quite scarce for most language pairs, such as Chinese\u2015Japanese. As comparable corpora are far more available, many studies have been conducted to automatically construct parallel corpora from comparable corpora. This paper presents a robust parallel sentence extraction system for constructing a Chinese\u2015Japanese parallel corpus from Wikipedia. The system is inspired by previous studies that mainly consist of a parallel sentence candidate filter and a binary classifier for parallel sentence identification. We improve the system by using the common Chinese characters for filtering and two novel feature sets for classification. Experiments show that our system performs significantly better than the previous studies for both accuracy in parallel sentence extraction and SMT performance. Using the system, we construct a Chinese\u2015Japanese parallel corpus with more than 126k highly accurate parallel sentences from Wikipedia. The constructed parallel corpus is freely available at .", "keyphrases": ["parallel corpus", "wikipedia", "sentence extraction system"]} +{"id": "teufel-van-halteren-2004-evaluating", "title": "Evaluating Information Content by Factoid Analysis: Human annotation and stability", "abstract": "We present a new approach to intrinsic summary evaluation, based on initial experiments in van Halteren and Teufel (2003), which combines two novel aspects: comparison of information content (rather than string similarity) in gold standard and system summary, measured in shared atomic information units which we call factoids, and comparison to more than one gold standard summary (in our data: 20 and 50 summaries respectively). In this paper, we show that factoid annotation is highly reproducible, introduce a weighted factoid score, estimate how many summaries are required for stable system rankings, and show that the factoid scores cannot be sufficiently approximated by unigrams and the DUC information overlap measure.", "keyphrases": ["information content", "factoid analysis", "van"]} +{"id": "jaffe-etal-2020-coreference", "title": "Coreference information guides human expectations during natural reading", "abstract": "Models of human sentence processing effort tend to focus on costs associated with retrieving structures and discourse referents from memory (memory-based) and/or on costs associated with anticipating upcoming words and structures based on contextual cues (expectation-based) (Levy,2008). Although evidence suggests that expectation and memory may play separable roles in language comprehension (Levy et al., 2013), theories of coreference processing have largely focused on memory: how comprehenders identify likely referents of linguistic expressions. In this study, we hypothesize that coreference tracking also informs human expectations about upcoming words, and we test this hypothesis by evaluating the degree to which incremental surprisal measures generated by a novel coreference-aware semantic parser explain human response times in a naturalistic self-paced reading experiment. Results indicate (1) that coreference information indeed guides human expectations and (2) that coreference effects on memory retrieval may exist independently of coreference effects on expectations. Together, these findings suggest that the language processing system exploits coreference information both to retrieve referents from memory and to anticipate upcoming material.", "keyphrases": ["human expectation", "memory retrieval", "coreference information"]} +{"id": "cheng-etal-2016-long", "title": "Long Short-Term Memory-Networks for Machine Reading", "abstract": "In this paper we address the question of how to render sequence-level networks better at handling structured input. We propose a machine reading simulator which processes text incrementally from left to right and performs shallow reasoning with memory and attention. The reader extends the Long Short-Term Memory architecture with a memory network in place of a single memory cell. This enables adaptive memory usage during recurrence with neural attention, offering a way to weakly induce relations among tokens. The system is initially designed to process a single sequence but we also demonstrate how to integrate it with an encoder-decoder architecture. Experiments on language modeling, sentiment analysis, and natural language inference show that our model matches or outperforms the state of the art.", "keyphrases": ["machine reading", "long short-term memory-network", "mechanism", "strong memorization capability", "context information"]} +{"id": "rogers-etal-2018-whats", "title": "What's in Your Embedding, And How It Predicts Task Performance", "abstract": "Attempts to find a single technique for general-purpose intrinsic evaluation of word embeddings have so far not been successful. We present a new approach based on scaled-up qualitative analysis of word vector neighborhoods that quantifies interpretable characteristics of a given model (e.g. its preference for synonyms or shared morphological forms as nearest neighbors). We analyze 21 such factors and show how they correlate with performance on 14 extrinsic and intrinsic task datasets (and also explain the lack of correlation between some of them). Our approach enables multi-faceted evaluation, parameter search, and generally \u2013 a more principled, hypothesis-driven approach to development of distributional semantic representations.", "keyphrases": ["task performance", "intrinsic evaluation", "word embedding", "factor"]} +{"id": "wang-etal-2019-evidence", "title": "Evidence Sentence Extraction for Machine Reading Comprehension", "abstract": "Remarkable success has been achieved in the last few years on some limited machine reading comprehension (MRC) tasks. However, it is still difficult to interpret the predictions of existing MRC models. In this paper, we focus on extracting evidence sentences that can explain or support the answers of multiple-choice MRC tasks, where the majority of answer options cannot be directly extracted from reference documents. Due to the lack of ground truth evidence sentence labels in most cases, we apply distant supervision to generate imperfect labels and then use them to train an evidence sentence extractor. To denoise the noisy labels, we apply a recently proposed deep probabilistic logic learning framework to incorporate both sentence-level and cross-sentence linguistic indicators for indirect supervision. We feed the extracted evidence sentences into existing MRC models and evaluate the end-to-end performance on three challenging multiple-choice MRC datasets: MultiRC, RACE, and DREAM, achieving comparable or better performance than the same models that take as input the full reference document. To the best of our knowledge, this is the first work extracting evidence sentences for multiple-choice MRC.", "keyphrases": ["machine reading comprehension", "mrc", "noisy label"]} +{"id": "ramteke-etal-2013-detecting", "title": "Detecting Turnarounds in Sentiment Analysis: Thwarting", "abstract": "Thwarting and sarcasm are two uncharted territories in sentiment analysis, the former because of the lack of training corpora and the latter because of the enormous amount of world knowledge it demands. In this paper, we propose a working definition of thwarting amenable to machine learning and create a system that detects if the document is thwarted or not. We focus on identifying thwarting in product reviews, especially in the camera domain. An ontology of the camera domain is created. Thwarting is looked upon as the phenomenon of polarity reversal at a higher level of ontology compared to the polarity expressed at the lower level. This notion of thwarting defined with respect to an ontology is novel, to the best of our knowledge. A rule based implementation building upon this idea forms our baseline. We show that machine learning with annotated corpora (thwarted/nonthwarted) is more effective than the rule based system. Because of the skewed distribution of thwarting, we adopt the Areaunder-the-Curve measure of performance. To the best of our knowledge, this is the first attempt at the difficult problem of thwarting detection, which we hope will at least provide a baseline system to compare against.", "keyphrases": ["sentiment analysis", "thwarting", "sarcasm"]} +{"id": "lee-2011-toward", "title": "Toward a Parallel Corpus of Spoken Cantonese and Written Chinese", "abstract": "We introduce a parallel corpus of spoken Cantonese and written Chinese. This sentencealigned corpus consists of transcriptions of Cantonese spoken in television programs in Hong Kong, and their corresponding Chinese (Mandarin) subtitles. Preliminary evaluation shows that the corpus reflects known syntactic differences between Cantonese and Mandarin, facilitates quantitative analyses on these differences, and already reveals some phenomena not yet discussed in the literature.", "keyphrases": ["parallel corpus", "spoken cantonese", "chinese"]} +{"id": "tan-2022-diversity", "title": "On the Diversity and Limits of Human Explanations", "abstract": "A growing effort in NLP aims to build datasets of human explanations. However, it remains unclear whether these datasets serve their intended goals. This problem is exacerbated by the fact that the term explanation is overloaded and refers to a broad range of notions with different properties and ramifications. Our goal is to provide an overview of the diversity of explanations, discuss human limitations in providing explanations, and ultimately provide implications for collecting and using human explanations in NLP.Inspired by prior work in psychology and cognitive sciences, we group existing human explanations in NLP into three categories: proximal mechanism, evidence, and procedure. These three types differ in nature and have implications for the resultant explanations. For instance, procedure is not considered explanation in psychology and connects with a rich body of work on learning from instructions. The diversity of explanations is further evidenced by proxy questions that are needed for annotators to interpret and answer \u201cwhy is [input] assigned [label]\u201d. Finally, giving explanations may require different, often deeper, understandings than predictions, which casts doubt on whether humans can provide valid explanations in some tasks.", "keyphrases": ["diversity", "human explanation", "instruction"]} +{"id": "tackstrom-etal-2013-token", "title": "Token and Type Constraints for Cross-Lingual Part-of-Speech Tagging", "abstract": "We consider the construction of part-of-speech taggers for resource-poor languages. Recently, manually constructed tag dictionaries from Wiktionary and dictionaries projected via bitext have been used as type constraints to overcome the scarcity of annotated data in this setting. In this paper, we show that additional token constraints can be projected from a resource-rich source language to a resource-poor target language via word-aligned bitext. We present several models to this end; in particular a partially observed conditional random field model, where coupled token and type constraints provide a partial signal for training. Averaged across eight previously studied Indo-European languages, our model achieves a 25% relative error reduction over the prior state of the art. We further present successful results on seven additional languages from different families, empirically demonstrating the applicability of coupled token and type constraints across a diverse set of languages.", "keyphrases": ["type constraint", "tagging", "resource-poor language", "supervised language"]} +{"id": "gimpel-smith-2009-cube", "title": "Cube Summing, Approximate Inference with Non-Local Features, and Dynamic Programming without Semirings", "abstract": "We introduce cube summing, a technique that permits dynamic programming algorithms for summing over structures (like the forward and inside algorithms) to be extended with non-local features that violate the classical structural independence assumptions. It is inspired by cube pruning (Chiang, 2007; Huang and Chiang, 2007) in its computation of non-local features dynamically using scored k-best lists, but also maintains additional residual quantities used in calculating approximate marginals. When restricted to local features, cube summing reduces to a novel semiring (k-best+residual) that generalizes many of the semirings of Goodman (1999). When non-local features are included, cube summing does not reduce to any semiring, but is compatible with generic techniques for solving dynamic programming equations.", "keyphrases": ["semiring", "dynamic programming algorithm", "cube summing"]} +{"id": "ji-etal-2016-latent", "title": "A Latent Variable Recurrent Neural Network for Discourse-Driven Language Models", "abstract": "This paper presents a novel latent variable recurrent neural network architecture for jointly modeling sequences of words and (possibly latent) discourse relations between adjacent sentences. A recurrent neural network generates individual words, thus reaping the benefits of discriminatively-trained vector representations. The discourse relations are represented with a latent variable, which can be predicted or marginalized, depending on the task. The resulting model can therefore employ a training objective that includes not only discourse relation classification, but also word prediction. As a result, it outperforms state-ofthe-art alternatives for two tasks: implicit discourse relation classification in the Penn Discourse Treebank, and dialog act classification in the Switchboard corpus. Furthermore, by marginalizing over latent discourse relations at test time, we obtain a discourse informed language model, which improves over a strong LSTM baseline.", "keyphrases": ["latent variable", "recurrent neural network", "discourse relation classification", "network model"]} +{"id": "wang-goutte-2017-detecting", "title": "Detecting Changes in Twitter Streams using Temporal Clusters of Hashtags", "abstract": "Detecting events from social media data has important applications in public security, political issues, and public health. Many studies have focused on detecting specific or unspecific events from Twitter streams. However, not much attention has been paid to detecting changes, and their impact, in online conversations related to an event. We propose methods for detecting such changes, using clustering of temporal profiles of hashtags, and three change point detection algorithms. The methods were tested on two Twitter datasets: one covering the 2014 Ottawa shooting event, and one covering the Sochi winter Olympics. We compare our approach to a baseline consisting of detecting change from raw counts in the conversation. We show that our method produces large gains in change detection accuracy on both datasets.", "keyphrases": ["hashtag", "temporal profile", "twitter dataset"]} +{"id": "zhu-etal-2020-convlab", "title": "ConvLab-2: An Open-Source Toolkit for Building, Evaluating, and Diagnosing Dialogue Systems", "abstract": "We present ConvLab-2, an open-source toolkit that enables researchers to build task-oriented dialogue systems with state-of-the-art models, perform an end-to-end evaluation, and diagnose the weakness of systems. As the successor of ConvLab, ConvLab-2 inherits ConvLab's framework but integrates more powerful dialogue models and supports more datasets. Besides, we have developed an analysis tool and an interactive tool to assist researchers in diagnosing dialogue systems. The analysis tool presents rich statistics and summarizes common mistakes from simulated dialogues, which facilitates error analysis and system improvement. The interactive tool provides an user interface that allows developers to diagnose an assembled dialogue system by interacting with the system and modifying the output of each system component.", "keyphrases": ["open-source toolkit", "dialogue system", "convlab-2"]} +{"id": "zhao-etal-2012-novel", "title": "A Novel Burst-based Text Representation Model for Scalable Event Detection", "abstract": "Mining retrospective events from text streams has been an important research topic. Classic text representation model (i.e., vector space model) cannot model temporal aspects of documents. To address it, we proposed a novel burst-based text representation model, denoted as BurstVSM. BurstVSM corresponds dimensions to bursty features instead of terms, which can capture semantic and temporal information. Meanwhile, it significantly reduces the number of non-zero entries in the representation. We test it via scalable event detection, and experiments in a 10-year news archive show that our methods are both effective and efficient.", "keyphrases": ["text representation model", "scalable event detection", "research topic"]} +{"id": "lacerra-etal-2021-genesis", "title": "GeneSis: A Generative Approach to Substitutes in Context", "abstract": "The lexical substitution task aims at generating a list of suitable replacements for a target word in context, ideally keeping the meaning of the modified text unchanged. While its usage has increased in recent years, the paucity of annotated data prevents the finetuning of neural models on the task, hindering the full fruition of recently introduced powerful architectures such as language models. Furthermore, lexical substitution is usually evaluated in a framework that is strictly bound to a limited vocabulary, making it impossible to credit appropriate, but out-of-vocabulary, substitutes. To assess these issues, we proposed GeneSis (Generating Substitutes in contexts), the first generative approach to lexical substitution. Thanks to a seq2seq model, we generate substitutes for a word according to the context it appears in, attaining state-of-the-art results on different benchmarks. Moreover, our approach allows silver data to be produced for further improving the performances of lexical substitution systems. Along with an extensive analysis of GeneSis results, we also present a human evaluation of the generated substitutes in order to assess their quality. We release the fine-tuned models, the generated datasets, and the code to reproduce the experiments at .", "keyphrases": ["generative approach", "substitute", "genesis"]} +{"id": "gao-johnson-2008-comparison", "title": "A comparison of Bayesian estimators for unsupervised Hidden Markov Model POS taggers", "abstract": "There is growing interest in applying Bayesian techniques to NLP problems. There are a number of different estimators for Bayesian models, and it is useful to know what kinds of tasks each does well on. This paper compares a variety of different Bayesian estimators for Hidden Markov Model POS taggers with various numbers of hidden states on data sets of different sizes. Recent papers have given contradictory results when comparing Bayesian estimators to Expectation Maximization (EM) for unsupervised HMM POS tagging, and we show that the difference in reported results is largely due to differences in the size of the training data and the number of states in the HMM. We invesigate a variety of samplers for HMMs, including some that these earlier papers did not study. We find that all of Gibbs samplers do well with small data sets and few states, and that Variational Bayes does well on large data sets and is competitive with the Gibbs samplers. In terms of times of convergence, we find that Variational Bayes was the fastest of all the estimators, especially on large data sets, and that explicit Gibbs sampler (both pointwise and sentence-blocked) were generally faster than their collapsed counterparts on large data sets.", "keyphrases": ["bayesian estimator", "hmm", "pos induction"]} +{"id": "el-kholy-habash-2011-automatic", "title": "Automatic Error Analysis for Morphologically Rich Languages", "abstract": "This paper presents AMEANA, an opensource tool for error analysis for natural language processing tasks targeting morphologically rich languages. Unlike standard evaluation metrics such as BLEU or WER, AMEANA automatically provides a detailed error analysis that can help researchers and developers better understand the strengths and weaknesses of their systems. AMEANA is easily adaptable to any language provided the existence of a morphological analyzer. In this paper, we focus on usability in the context of Machine Translation (MT) and demonstrate it specifically for English-to-Arabic MT.", "keyphrases": ["error analysis", "rich language", "morphological analyzer"]} +{"id": "marasovic-etal-2022-shot", "title": "Few-Shot Self-Rationalization with Natural Language Prompts", "abstract": "Self-rationalization models that predict task labels and generate free-text elaborations for their predictions could enable more intuitive interaction with NLP systems. These models are, however, currently trained with a large amount of human-written free-text explanations for each task which hinders their broader usage. We propose to study a more realistic setting of self-rationalization using few training examples. We present FEB\u2014a standardized collection of four existing English-language datasets and associated metrics. We identify the right prompting approach by extensively exploring natural language prompts on FEB. Then, by using this prompt and scaling the model size, we demonstrate that making progress on few-shot self-rationalization is possible. We show there is still ample room for improvement in this task: the average plausibility of generated explanations assessed by human annotators is at most 51% (with GPT-3), while plausibility of human explanations is 76%. We hope that FEB and our proposed approach will spur the community to take on the few-shot self-rationalization challenge.", "keyphrases": ["self-rationalization", "natural language prompt", "explanation"]} +{"id": "bhatia-etal-2016-morphological", "title": "Morphological Priors for Probabilistic Neural Word Embeddings", "abstract": "Word embeddings allow natural language processing systems to share statistical information across related words. These embeddings are typically based on distributional statistics, making it difficult for them to generalize to rare or unseen words. We propose to improve word embeddings by incorporating morphological information, capturing shared sub-word features. Unlike previous work that constructs word embeddings directly from morphemes, we combine morphological and distributional information in a unified probabilistic framework, in which the word embedding is a latent variable. The morphological information provides a prior distribution on the latent word embeddings, which in turn condition a likelihood function over an observed corpus. This approach yields improvements on intrinsic word similarity evaluations, and also in the downstream task of part-of-speech tagging.", "keyphrases": ["word embedding", "morphological information", "probabilistic framework"]} +{"id": "khaltar-etal-2006-extracting", "title": "Extracting Loanwords from Mongolian Corpora and Producing a Japanese-Mongolian Bilingual Dictionary", "abstract": "This paper proposes methods for extracting loanwords from Cyrillic Mongolian corpora and producing a Japanese-Mongolian bilingual dictionary. We extract loanwords from Mongolian corpora using our own handcrafted rules. To complement the rule-based extraction, we also extract words in Mongolian corpora that are phonetically similar to Japanese Katakana words as loanwords. In addition, we correspond the extracted loanwords to Japanese words and produce a bilingual dictionary. We propose a stemming method for Mongolian to extract loanwords correctly. We verify the effectiveness of our methods experimentally.", "keyphrases": ["loanword", "mongolian corpora", "japanese-mongolian bilingual dictionary"]} +{"id": "zarcone-etal-2013-fitting", "title": "Fitting, Not Clashing! A Distributional Semantic Model of Logical Metonymy", "abstract": "Logical metonymy interpretation (e.g. begin the book \u2192 writing) has received wide attention in linguistics. Experimental results have shown higher processing costs for metonymic conditions compared with non-metonymic ones (read the book). According to a widely held interpretation, it is the type clash between the event-selecting verb and the entity-denoting object (begin the book) that triggers coercion mechanisms and leads to additional processing effort. We propose an alternative explanation and argue that the extra processing effort is an effect of thematic fit. This is a more economical hypothesis that does not need to postulate a separate type clash mechanism: entitydenoting objects simply have a low fit as objects of event-selecting verbs. We test linguistic datasets from psycholinguistic experiments and find that a structured distributional model of thematic fit, which does not encode any explicit argument type information, is able to replicate all significant experimental findings. This result provides evidence for a graded account of coercion phenomena in which thematic fit accounts for both the trigger of the coercion and the retrieval of the covert event.", "keyphrases": ["logical metonymy", "object", "thematic fit"]} +{"id": "baker-sato-2003-framenet", "title": "The FrameNet Data and Software", "abstract": "The FrameNet project has developed a lexical knowledge base providing a unique level of detail as to the the possible syntactic realizations of the specific semantic roles evoked by each predicator, for roughly 7,000 lexical units, on the basis of annotating more than 100,000 example sentences extracted from corpora. An interim version of the FrameNet data was released in October, 2002 and is being widely used. A new, more portable version of the FrameNet software is also being made available to researchers elsewhere, including the Spanish FrameNet project.This demo and poster will briefly explain the principles of Frame Semantics and demonstrate the new unified tools for lexicon building and annotation and also FrameSQL, a search tool for finding patterns in annotated sentences. We will discuss the content and format of the data releases and how the software and data can be used by other NLP researchers.", "keyphrases": ["framenet data", "software", "lexical unit"]} +{"id": "oraby-etal-2017-serious", "title": "Are you serious?: Rhetorical Questions and Sarcasm in Social Media Dialog", "abstract": "Effective models of social dialog must understand a broad range of rhetorical and figurative devices. Rhetorical questions (RQs) are a type of figurative language whose aim is to achieve a pragmatic goal, such as structuring an argument, being persuasive, emphasizing a point, or being ironic. While there are computational models for other forms of figurative language, rhetorical questions have received little attention to date. We expand a small dataset from previous work, presenting a corpus of 10,270 RQs from debate forums and Twitter that represent different discourse functions. We show that we can clearly distinguish between RQs and sincere questions (0.76 F1). We then show that RQs can be used both sarcastically and non-sarcastically, observing that non-sarcastic (other) uses of RQs are frequently argumentative in forums, and persuasive in tweets. We present experiments to distinguish between these uses of RQs using SVM and LSTM models that represent linguistic features and post-level context, achieving results as high as 0.76 F1 for \u201csarcastic\u201d and 0.77 F1 for \u201cother\u201d in forums, and 0.83 F1 for both \u201csarcastic\u201d and \u201cother\u201d in tweets. We supplement our quantitative experiments with an in-depth characterization of the linguistic variation in RQs.", "keyphrases": ["rhetorical question", "sarcasm", "debate forum", "twitter"]} +{"id": "huck-etal-2017-target", "title": "Target-side Word Segmentation Strategies for Neural Machine Translation", "abstract": "For ef\ufb01ciency considerations, state-of-the-art neural machine translation (NMT) requires the vocabulary to be restricted to a limited-size set of several thousand symbols. This is highly problematic when translating into in\ufb02ected or compounding languages. A typical remedy is the use of subword units, where words are segmented into smaller components. Byte pair encoding, a purely corpus-based approach, has proved effective recently. In this paper, we investigate word segmentation strategies that incorporate more linguistic knowledge. We demonstrate that linguistically informed target word segmentation is better suited for NMT, leading to improved translation quality on the order of magnitude of +0 . 5 B LEU and \u2212 0 . 9 T ER for a medium-scale English \u2192 German translation task. Our work is important in that it shows that linguistic knowledge can be used to improve NMT results over results based only on the language-agnostic byte pair encoding vocabulary reduction technique.", "keyphrases": ["segmentation", "neural machine translation", "linguistic knowledge", "bpe"]} +{"id": "erk-2009-supporting", "title": "Supporting inferences in semantic space: representing words as regions", "abstract": "Semantic space models represent the meaning of a word as a vector in high-dimensional space. They offer a framework in which the meaning representation of a word can be computed from its context, but the question remains how they support inferences. While there has been some work on paraphrase-based inferences in semantic space, it is not clear how semantic space models would support inferences involving hyponymy, like horse ran \u2192 animal moved. In this paper, we first discuss what a point in semantic space stands for, contrasting semantic space with Gardenforsian conceptual space. Building on this, we propose an extension of the semantic space representation from a point to a region. We present a model for learning a region representation for word meaning in semantic space, based on the fact that points at close distance tend to represent similar meanings. We show that this model can be used to predict, with high precision, when a hyponymy-based inference rule is applicable. Moving beyond paraphrase-based and hyponymy-based inference rules, we last discuss in what way semantic space models can support inferences.", "keyphrases": ["semantic space", "region", "hyponymy"]} +{"id": "batchelor-2019-universal", "title": "Universal dependencies for Scottish Gaelic: syntax", "abstract": "We present universal dependencies for Scottish Gaelic and a treebank of 1021 sentences (20 021 tokens) drawn from the Annotated Reference Corpus Of Scottish Gaelic (ARCOSG). The tokens are annotated for coarse part-of-speech, finegrained part-of-speech, syntactic features and dependency relations. We discuss how the annotations differ from the treebanks developed for two other Celtic languages, Irish and Breton, and in preliminary dependency parsing experiments we obtain a mean labelled attachment score of 0.792. We also discuss some difficult cases for future investigation, including cosubordination. The treebank is available, along with documentation, from https:// universaldependencies.org/.", "keyphrases": ["scottish gaelic", "celtic language", "universal dependency"]} +{"id": "zenkel-etal-2020-end", "title": "End-to-End Neural Word Alignment Outperforms GIZA++", "abstract": "Word alignment was once a core unsupervised learning task in natural language processing because of its essential role in training statistical machine translation (MT) models. Although unnecessary for training neural MT models, word alignment still plays an important role in interactive applications of neural machine translation, such as annotation transfer and lexicon injection. While statistical MT methods have been replaced by neural approaches with superior performance, the twenty-year-old GIZA++ toolkit remains a key component of state-of-the-art word alignment systems. Prior work on neural word alignment has only been able to outperform GIZA++ by using its output during training. We present the first end-to-end neural word alignment method that consistently outperforms GIZA++ on three data sets. Our approach repurposes a Transformer model trained for supervised translation to also serve as an unsupervised word alignment model in a manner that is tightly integrated and does not affect translation quality.", "keyphrases": ["word alignment", "giza++", "end-to-end", "loss function"]} +{"id": "li-etal-2019-cnm", "title": "CNM: An Interpretable Complex-valued Network for Matching", "abstract": "This paper seeks to model human language by the mathematical framework of quantum physics. With the well-designed mathematical formulations in quantum physics, this framework unifies different linguistic units in a single complex-valued vector space, e.g. words as particles in quantum states and sentences as mixed systems. A complex-valued network is built to implement this framework for semantic matching. With well-constrained complex-valued components, the network admits interpretations to explicit physical meanings. The proposed complex-valued network for matching (CNM) achieves comparable performances to strong CNN and RNN baselines on two benchmarking question answering (QA) datasets.", "keyphrases": ["complex-valued network", "unit", "cnm"]} +{"id": "scarton-etal-2016-word", "title": "Word embeddings and discourse information for Quality Estimation", "abstract": "In this paper we present the results of the University of Sheffield (SHEF) submissions for the WMT16 shared task on document-level Quality Estimation (Task 3). Our submission explore discourse and document-aware information and word embeddings as features, with Support Vector Regression and Gaussian Process used to train the Quality Estimation models. The use of word embeddings (combined with baseline features) and a Gaussian Process model with two kernels led to the winning submission in the shared task.", "keyphrases": ["quality estimation", "wmt16", "word embedding"]} +{"id": "severyn-etal-2015-distributional", "title": "Distributional Neural Networks for Automatic Resolution of Crossword Puzzles", "abstract": "Automatic resolution of Crossword Puzzles (CPs) heavily depends on the quality of the answer candidate lists produced by a retrieval system for each clue of the puzzle grid. Previous work has shown that such lists can be generated using Information Retrieval (IR) search algorithms applied to the databases containing previously solved CPs and reranked with tree kernels (TKs) applied to a syntactic tree representation of the clues. In this paper, we create a labelled dataset of 2 million clues on which we apply an innovative Distributional Neural Network (DNN) for reranking clue pairs. Our DNN is computationally efficient and can thus take advantage of such large datasets showing a large improvement over the TK approach, when the latter uses small training data. In contrast, when data is scarce, TKs outperform DNNs.", "keyphrases": ["automatic resolution", "crossword puzzles", "database", "distributional neural network"]} +{"id": "amiri-etal-2016-learning", "title": "Learning Text Pair Similarity with Context-sensitive Autoencoders", "abstract": "We present a pairwise context-sensitive Autoencoder for computing text pair similarity. Our model encodes input text into context-sensitive representations and uses them to compute similarity between text pairs. Our model outperforms the state-of-the-art models in two semantic retrieval tasks and a contextual word similarity task. For retrieval, our unsupervised approach that merely ranks inputs with respect to the cosine similarity between their hidden representations shows comparable performance with the state-of-the-art supervised models and in some cases outperforms them.", "keyphrases": ["text pair similarity", "autoencoder", "input text"]} +{"id": "zhang-etal-2017-macro", "title": "Macro Grammars and Holistic Triggering for Efficient Semantic Parsing", "abstract": "To learn a semantic parser from denotations, a learning algorithm must search over a combinatorially large space of logical forms for ones consistent with the annotated denotations. We propose a new online learning algorithm that searches faster as training progresses. The two key ideas are using macro grammars to cache the abstract patterns of useful logical forms found thus far, and holistic triggering to efficiently retrieve the most relevant patterns based on sentence similarity. On the WikiTableQuestions dataset, we first expand the search space of an existing model to improve the state-of-the-art accuracy from 38.7% to 42.7%, and then use macro grammars and holistic triggering to achieve an 11x speedup and an accuracy of 43.7%.", "keyphrases": ["holistic triggering", "semantic parser", "search space"]} +{"id": "hessel-etal-2019-unsupervised", "title": "Unsupervised Discovery of Multimodal Links in Multi-image, Multi-sentence Documents", "abstract": "Images and text co-occur constantly on the web, but explicit links between images and sentences (or other intra-document textual units) are often not present. We present algorithms that discover image-sentence relationships without relying on explicit multimodal annotation in training. We experiment on seven datasets of varying difficulty, ranging from documents consisting of groups of images captioned post hoc by crowdworkers to naturally-occurring user-generated multimodal documents. We find that a structured training objective based on identifying whether collections of images and sentences co-occur in documents can suffice to predict links between specific sentences and specific images within the same document at test time.", "keyphrases": ["image", "image-sentence relationship", "multimodal document"]} +{"id": "wu-etal-2021-text", "title": "A Text-Centered Shared-Private Framework via Cross-Modal Prediction for Multimodal Sentiment Analysis", "abstract": "Multimodal fusion is a core problem for multimodal sentiment analysis. Previous works usually treat all three modal features equally and implicitly explore the interactions between different modalities. In this paper, we break this kind of methods in two ways. Firstly, we observe that textual modality plays the most important role in multimodal sentiment analysis, and this can be seen from the previous works. Secondly, we observe that comparing to the textual modality, the other two kinds of nontextual modalities (visual and acoustic) can provide two kinds of semantics, shared and private semantics. The shared semantics from the other two modalities can obviously enhance the textual semantics and make the sentiment analysis model more robust, and the private semantics can be complementary to the textual semantics and meanwhile provide different views to improve the performance of sentiment analysis together with the shared semantics. Motivated by these two observations, we propose a text-centered shared-private framework (TCSP) for multimodal fusion, which consists of the cross-modal prediction and sentiment regression parts. Experiments on the MOSEI and MOSI datasets demonstrate the effectiveness of our shared-private framework, which outperforms all baselines. Furthermore, our approach provides a new way to utilize the unlabeled data for multimodal sentiment analysis.", "keyphrases": ["text-centered shared-private framework", "cross-modal prediction", "multimodal sentiment analysis"]} +{"id": "ma-etal-2011-consistent", "title": "Consistent Translation using Discriminative Learning - A Translation Memory-inspired Approach", "abstract": "We present a discriminative learning method to improve the consistency of translations in phrase-based Statistical Machine Translation (SMT) systems. Our method is inspired by Translation Memory (TM) systems which are widely used by human translators in industrial settings. We constrain the translation of an input sentence using the most similar 'translation example' retrieved from the TM. Differently from previous research which used simple fuzzy match thresholds, these constraints are imposed using discriminative learning to optimise the translation performance. We observe that using this method can benefit the SMT system by not only producing consistent translations, but also improved translation outputs. We report a 0.9 point improvement in terms of BLEU score on English--Chinese technical documents.", "keyphrases": ["discriminative learning", "input sentence", "smt system", "consistent translation"]} +{"id": "rozovskaya-roth-2019-grammar", "title": "Grammar Error Correction in Morphologically Rich Languages: The Case of Russian", "abstract": "Until now, most of the research in grammar error correction focused on English, and the problem has hardly been explored for other languages. We address the task of correcting writing mistakes in morphologically rich languages, with a focus on Russian. We present a corrected and error-tagged corpus of Russian learner writing and develop models that make use of existing state-of-the-art methods that have been well studied for English. Although impressive results have recently been achieved for grammar error correction of non-native English writing, these results are limited to domains where plentiful training data are available. Because annotation is extremely costly, these approaches are not suitable for the majority of domains and languages. We thus focus on methods that use \u201cminimal supervision\u201d; that is, those that do not rely on large amounts of annotated training data, and show how existing minimal-supervision approaches extend to a highly inflectional language such as Russian. The results demonstrate that these methods are particularly useful for correcting mistakes in grammatical phenomena that involve rich morphology.", "keyphrases": ["other language", "russian learner writing", "grammar error correction"]} +{"id": "leong-mihalcea-2011-going", "title": "Going Beyond Text: A Hybrid Image-Text Approach for Measuring Word Relatedness", "abstract": "Traditional approaches to semantic relatedness are often restricted to text-based methods, which typically disregard other multimodal knowledge sources. In this paper, we propose a novel image-based metric to estimate the relatedness of words, and demonstrate the promise of this method through comparative evaluations on three standard datasets. We also show that a hybrid image-text approach can lead to improvements in word relatedness, confirming the applicability of visual cues as a possible orthogonal information source.", "keyphrases": ["hybrid image-text approach", "word relatedness", "image"]} +{"id": "roark-bacchiani-2003-supervised", "title": "Supervised and unsupervised PCFG adaptation to novel domains", "abstract": "This paper investigates adapting a lexicalized probabilistic context-free grammar (PCFG) to a novel domain, using maximum a posteriori (MAP) estimation. The MAP framework is general enough to include some previous model adaptation approaches, such as corpus mixing in Gildea (2001), for example. Other approaches falling within this framework are more effective. In contrast to the results in Gildea (2001), we show F-measure parsing accuracy gains of as much as 2.5% for high accuracy lexicalized parsing through the use of out-of-domain treebanks, with the largest gains when the amount of indomain data is small. MAP adaptation can also be based on either supervised or unsupervised adaptation data. Even when no in-domain treebank is available, unsupervised techniques provide a substantial accuracy gain over unadapted grammars, as much as nearly 5% F-measure improvement.", "keyphrases": ["pcfg adaptation", "novel domain", "posteriori", "treebank"]} +{"id": "wang-cohen-2015-joint", "title": "Joint Information Extraction and Reasoning: A Scalable Statistical Relational Learning Approach", "abstract": "A standard pipeline for statistical relational learning involves two steps: one first constructs the knowledge base (KB) from text, and then performs the learning and reasoning tasks using probabilistic first-order logics. However, a key issue is that information extraction (IE) errors from text affect the quality of the KB, and propagate to the reasoning task. In this paper, we propose a statistical relational learning model for joint information extraction and reasoning. More specifically, we incorporate context-based entity extraction with structure learning (SL) in a scalable probabilistic logic framework. We then propose a latent context invention (LCI) approach to improve the performance. In experiments, we show that our approach outperforms state-of-the-art baselines over three real-world Wikipedia datasets from multiple domains; that joint learning and inference for IE and SL significantly improve both tasks; that latent context invention further improves the results.", "keyphrases": ["reasoning", "structure learning", "joint information extraction"]} +{"id": "ye-etal-2021-crossfit", "title": "CrossFit: A Few-shot Learning Challenge for Cross-task Generalization in NLP", "abstract": "Humans can learn a new language task efficiently with only few examples, by leveraging their knowledge obtained when learning prior tasks. In this paper, we explore whether and how such cross-task generalization ability can be acquired, and further applied to build better few-shot learners across diverse NLP tasks. We introduce CrossFit, a problem setup for studying cross-task generalization ability, which standardizes seen/unseen task partitions, data access during different learning stages, and the evaluation protocols. To instantiate different seen/unseen task partitions in CrossFit and facilitate in-depth analysis, we present the NLP Few-shot Gym, a repository of 160 diverse few-shot NLP tasks created from open-access NLP datasets and converted to a unified text-to-text format. Our analysis reveals that the few-shot learning ability on unseen tasks can be improved via an upstream learning stage using a set of seen tasks. We also observe that the selection of upstream learning tasks can significantly influence few-shot performance on unseen tasks, asking further analysis on task similarity and transferability.", "keyphrases": ["cross-task generalization", "problem setup", "crossfit"]} +{"id": "wang-etal-2013-lattice", "title": "A Lattice-based Framework for Joint Chinese Word Segmentation, POS Tagging and Parsing", "abstract": "For the cascaded task of Chinese word segmentation, POS tagging and parsing, the pipeline approach suffers from error propagation while the joint learning approach suffers from inefficient decoding due to the large combined search space. In this paper, we present a novel lattice-based framework in which a Chinese sentence is first segmented into a word lattice, and then a lattice-based POS tagger and a lattice-based parser are used to process the lattice from two different viewpoints: sequential POS tagging and hierarchical tree building. A strategy is designed to exploit the complementary strengths of the tagger and parser, and encourage them to predict agreed structures. Experimental results on Chinese Treebank show that our lattice-based framework significantly improves the accuracy of the three sub-tasks.", "keyphrases": ["pos tagging", "chinese sentence", "word lattice"]} +{"id": "rutherford-xue-2014-discovering", "title": "Discovering Implicit Discourse Relations Through Brown Cluster Pair Representation and Coreference Patterns", "abstract": "Sentences form coherent relations in a discourse without discourse connectives more frequently than with connectives. Senses of these implicit discourse relations that hold between a sentence pair, however, are challenging to infer. Here, we employ Brown cluster pairs to represent discourse relation and incorporate coreference patterns to identify senses of implicit discourse relations in naturally occurring text. Our system improves the baseline performance by as much as 25%. Feature analyses suggest that Brown cluster pairs and coreference patterns can reveal many key linguistic characteristics of each type of discourse relation.", "keyphrases": ["discourse relation", "coreference pattern", "linguistically-informed feature"]} +{"id": "nomoto-2004-multi", "title": "Multi-Engine Machine Translation with Voted Language Model", "abstract": "The paper describes a particular approach to multiengine machine translation (MEMT), where we make use of voted language models to selectively combine translation outputs from multiple off-the-shelf MT systems. Experiments are done using large corpora from three distinct domains. The study found that the use of voted language models leads to an improved performance of MEMT systems.", "keyphrases": ["machine translation", "hypothesis", "such setup"]} +{"id": "dou-knight-2012-large", "title": "Large Scale Decipherment for Out-of-Domain Machine Translation", "abstract": "We apply slice sampling to Bayesian decipherment and use our new decipherment framework to improve out-of-domain machine translation. Compared with the state of the art algorithm, our approach is highly scalable and produces better results, which allows us to decipher ciphertext with billions of tokens and hundreds of thousands of word types with high accuracy. We decipher a large amount of monolingual data to improve out-of-domain translation and achieve significant gains of up to 3.8 BLEU points.", "keyphrases": ["decipherment", "monolingual data", "phrase table", "non parallel data"]} +{"id": "kim-etal-2019-effective", "title": "Effective Cross-lingual Transfer of Neural Machine Translation Models without Shared Vocabularies", "abstract": "Transfer learning or multilingual model is essential for low-resource neural machine translation (NMT), but the applicability is limited to cognate languages by sharing their vocabularies. This paper shows effective techniques to transfer a pretrained NMT model to a new, unrelated language without shared vocabularies. We relieve the vocabulary mismatch by using cross-lingual word embedding, train a more language-agnostic encoder by injecting artificial noises, and generate synthetic data easily from the pretraining data without back-translation. Our methods do not require restructuring the vocabulary or retraining the model. We improve plain NMT transfer by up to +5.1% BLEU in five low-resource translation tasks, outperforming multilingual joint training by a large margin. We also provide extensive ablation studies on pretrained embedding, synthetic data, vocabulary size, and parameter freezing for a better understanding of NMT transfer.", "keyphrases": ["cross-lingual transfer", "vocabulary mismatch", "source language"]} +{"id": "castilho-2021-towards", "title": "Towards Document-Level Human MT Evaluation: On the Issues of Annotator Agreement, Effort and Misevaluation", "abstract": "Document-level human evaluation of machine translation (MT) has been raising interest in the community. However, little is known about the issues of using document-level methodologies to assess MT quality. In this article, we compare the inter-annotator agreement (IAA) scores, the effort to assess the quality in different document-level methodologies, and the issue of misevaluation when sentences are evaluated out of context.", "keyphrases": ["misevaluation", "inter-annotator agreement", "single sentence"]} +{"id": "gao-etal-2019-rebuttal", "title": "Does My Rebuttal Matter? Insights from a Major NLP Conference", "abstract": "Peer review is a core element of the scientific process, particularly in conference-centered fields such as ML and NLP. However, only few studies have evaluated its properties empirically. Aiming to fill this gap, we present a corpus that contains over 4k reviews and 1.2k author responses from ACL-2018. We quantitatively and qualitatively assess the corpus. This includes a pilot study on paper weaknesses given by reviewers and on quality of author responses. We then focus on the role of the rebuttal phase, and propose a novel task to predict after-rebuttal (i.e., final) scores from initial reviews and author responses. Although author responses do have a marginal (and statistically significant) influence on the final scores, especially for borderline papers, our results suggest that a reviewer's final score is largely determined by her initial score and the distance to the other reviewers' initial scores. In this context, we discuss the conformity bias inherent to peer reviewing, a bias that has largely been overlooked in previous research. We hope our analyses will help better assess the usefulness of the rebuttal phase in NLP conferences.", "keyphrases": ["nlp conference", "influence", "conformity bias"]} +{"id": "schwartz-etal-2014-machine", "title": "Machine Translation and Monolingual Postediting: The AFRL WMT-14 System", "abstract": "This paper describes the AFRL statistical MT system and the improvements that were developed during the WMT14 evaluation campaign. As part of these efforts we experimented with a number of extensions to the standard phrase-based model that improve performance on Russian to English and Hindi to English translation tasks. In addition, we describe our efforts to make use of monolingual English speakers to correct the output of machine translation, and present the results of monolingual postediting of the entire 3003 sentences of the WMT14 Russian-English test set.", "keyphrases": ["monolingual postediting", "machine translation", "post-editor"]} +{"id": "dunietz-etal-2017-corpus", "title": "The BECauSE Corpus 2.0: Annotating Causality and Overlapping Relations", "abstract": "Language of cause and effect captures an essential component of the semantics of a text. However, causal language is also intertwined with other semantic relations, such as temporal precedence and correlation. This makes it difficult to determine when causation is the primary intended meaning. This paper presents BECauSE 2.0, a new version of the BECauSE corpus with exhaustively annotated expressions of causal language, but also seven semantic relations that are frequently co-present with causation. The new corpus shows high inter-annotator agreement, and yields insights both about the linguistic expressions of causation and about the process of annotating co-present semantic relations.", "keyphrases": ["because corpus", "causal language", "new version"]} +{"id": "forbes-etal-2020-social", "title": "Social Chemistry 101: Learning to Reason about Social and Moral Norms", "abstract": "Social norms\u2014the unspoken commonsense rules about acceptable social behavior\u2014are crucial in understanding the underlying causes and intents of people's actions in narratives. For example, underlying an action such as \u201cwanting to call cops on my neighbor\u201d are social norms that inform our conduct, such as \u201cIt is expected that you report crimes.\u201d We present SOCIAL CHEMISTRY, a new conceptual formalism to study people's everyday social norms and moral judgments over a rich spectrum of real life situations described in natural language. We introduce SOCIAL-CHEM-101, a large-scale corpus that catalogs 292k rules-of-thumb such as \u201cIt is rude to run a blender at 5am\u201d as the basic conceptual units. Each rule-of-thumb is further broken down with 12 different dimensions of people's judgments, including social judgments of good and bad, moral foundations, expected cultural pressure, and assumed legality, which together amount to over 4.5 million annotations of categorical labels and free-text descriptions. Comprehensive empirical results based on state-of-the-art neural models demonstrate that computational modeling of social norms is a promising research direction. Our model framework, Neural Norm Transformer, learns and generalizes SOCIAL-CHEM-101 to successfully reason about previously unseen situations, generating relevant (and potentially novel) attribute-aware social rules-of-thumb.", "keyphrases": ["action", "narrative", "social norm", "different dimension", "social chemistry"]} +{"id": "subramanya-etal-2010-efficient", "title": "Efficient Graph-Based Semi-Supervised Learning of Structured Tagging Models", "abstract": "We describe a new scalable algorithm for semi-supervised training of conditional random fields (CRF) and its application to part-of-speech (POS) tagging. The algorithm uses a similarity graph to encourage similar n-grams to have similar POS tags. We demonstrate the efficacy of our approach on a domain adaptation task, where we assume that we have access to large amounts of unlabeled data from the target domain, but no additional labeled data. The similarity graph is used during training to smooth the state posteriors on the target domain. Standard inference can be used at test time. Our approach is able to scale to very large problems and yields significantly improved target domain accuracy.", "keyphrases": ["n-gram", "pos tagging", "label propagation"]} +{"id": "chen-etal-2016-implicit", "title": "Implicit Discourse Relation Detection via a Deep Architecture with Gated Relevance Network", "abstract": "Word pairs, which are one of the most easily accessible features between two text segments, have been proven to be very useful for detecting the discourse relations held between text segments. However, because of the data sparsity problem, the performance achieved by using word pair features is limited. In this paper, in order to overcome the data sparsity problem, we propose the use of word embeddings to replace the original words. Moreover, we adopt a gated relevance network to capture the semantic interaction between word pairs, and then aggregate those semantic interactions using a pooling layer to select the most informative interactions. Experimental results on Penn Discourse Tree Bank show that the proposed method without using manually designed features can achieve better performance on recognizing the discourse level relations in all of the relations.", "keyphrases": ["gated relevance network", "word embedding", "semantic interaction", "discourse argument"]} +{"id": "muller-etal-2021-first", "title": "First Align, then Predict: Understanding the Cross-Lingual Ability of Multilingual BERT", "abstract": "Multilingual pretrained language models have demonstrated remarkable zero-shot cross-lingual transfer capabilities. Such transfer emerges by fine-tuning on a task of interest in one language and evaluating on a distinct language, not seen during the fine-tuning. Despite promising results, we still lack a proper understanding of the source of this transfer. Using a novel layer ablation technique and analyses of the model's internal representations, we show that multilingual BERT, a popular multilingual language model, can be viewed as the stacking of two sub-networks: a multilingual encoder followed by a task-specific language-agnostic predictor. While the encoder is crucial for cross-lingual transfer and remains mostly unchanged during fine-tuning, the task predictor has little importance on the transfer and can be reinitialized during fine-tuning. We present extensive experiments with three distinct tasks, seventeen typologically diverse languages and multiple domains to support our hypothesis.", "keyphrases": ["cross-lingual ability", "bert", "language model", "fine-tuning"]} +{"id": "pareti-etal-2013-automatically", "title": "Automatically Detecting and Attributing Indirect Quotations", "abstract": "Direct quotations are used for opinion min- ing and information extraction as they have an easy to extract span and they can be attributed to a speaker with high accuracy. However, simply focusing on direct quotations ignores around half of all reported speech, which is in the form of indirect or mixed speech. This work presents the first large-scale experiments in indirect and mixed quotation extraction and attribution. We propose two methods of ex- tracting all quote types from news articles and evaluate them on two large annotated corpora, one of which is a contribution of this work. We further show that direct quotation attribu- tion methods can be successfully applied to in- direct and mixed quotation attribution.", "keyphrases": ["attribution", "indirect quotation", "information extraction", "news article"]} +{"id": "feng-etal-2011-learning", "title": "Learning General Connotation of Words using Graph-based Algorithms", "abstract": "In this paper, we introduce a connotation lexicon, a new type of lexicon that lists words with connotative polarity, i.e., words with positive connotation (e.g., award, promotion) and words with negative connotation (e.g., cancer, war). Connotation lexicons differ from much studied sentiment lexicons: the latter concerns words that express sentiment, while the former concerns words that evoke or associate with a specific polarity of sentiment. Understanding the connotation of words would seem to require common sense and world knowledge. However, we demonstrate that much of the connotative polarity of words can be inferred from natural language text in a nearly unsupervised manner. The key linguistic insight behind our approach is selectional preference of connotative predicates. We present graph-based algorithms using PageRank and HITS that collectively learn connotation lexicon together with connotative predicates. Our empirical study demonstrates that the resulting connotation lexicon is of great value for sentiment analysis complementing existing sentiment lexicons.", "keyphrases": ["graph-based algorithm", "connotation lexicon", "pagerank", "sentiment analysis"]} +{"id": "lin-etal-2020-mintl", "title": "MinTL: Minimalist Transfer Learning for Task-Oriented Dialogue Systems", "abstract": "In this paper, we propose Minimalist Transfer Learning (MinTL) to simplify the system design process of task-oriented dialogue systems and alleviate the over-dependency on annotated data. MinTL is a simple yet effective transfer learning framework, which allows us to plug-and-play pre-trained seq2seq models, and jointly learn dialogue state tracking and dialogue response generation. Unlike previous approaches, which use a copy mechanism to \u201ccarryover\u201d the old dialogue states to the new one, we introduce Levenshtein belief spans (Lev), that allows efficient dialogue state tracking with a minimal generation length. We instantiate our learning framework with two pre-trained backbones: T5 and BART, and evaluate them on MultiWOZ. Extensive experiments demonstrate that: 1) our systems establish new state-of-the-art results on end-to-end response generation, 2) MinTL-based systems are more robust than baseline methods in the low resource setting, and they achieve competitive results with only 20% training data, and 3) Lev greatly improves the inference efficiency.", "keyphrases": ["minimalist transfer learning", "task-oriented dialogue system", "mintl"]} +{"id": "mirkin-meunier-2015-personalized", "title": "Personalized Machine Translation: Predicting Translational Preferences", "abstract": "Machine Translation (MT) has advanced in recent years to produce better translations for clients\u2019 specific domains, and sophisticated tools allow professional translators to obtain translations according to their prior edits. We suggest that MT should be further personalized to the end-user level \u2010 the receiver or the author of the text \u2010 as done in other applications. As a step in that direction, we propose a method based on a recommender systems approach where the user\u2019s preferred translation is predicted based on preferences of similar users. In our experiments, this method outperforms a set of non-personalized methods, suggesting that user preference information can be employed to provide better-suited translations for each user.", "keyphrases": ["recommender system approach", "preferred translation", "similar user"]} +{"id": "glavas-ponzetto-2017-dual", "title": "Dual Tensor Model for Detecting Asymmetric Lexico-Semantic Relations", "abstract": "Detection of lexico-semantic relations is one of the central tasks of computational semantics. Although some fundamental relations (e.g., hypernymy) are asymmetric, most existing models account for asymmetry only implicitly and use the same concept representations to support detection of symmetric and asymmetric relations alike. In this work, we propose the Dual Tensor model, a neural architecture with which we explicitly model the asymmetry and capture the translation between unspecialized and specialized word embeddings via a pair of tensors. Although our Dual Tensor model needs only unspecialized embeddings as input, our experiments on hypernymy and meronymy detection suggest that it can outperform more complex and resource-intensive models. We further demonstrate that the model can account for polysemy and that it exhibits stable performance across languages.", "keyphrases": ["lexico-semantic relation", "hypernymy", "dual tensor model"]} +{"id": "kovatchev-etal-2018-etpc", "title": "ETPC - A Paraphrase Identification Corpus Annotated with Extended Paraphrase Typology and Negation", "abstract": "We present the Extended Paraphrase Typology (EPT) and the Extended Typology Paraphrase Corpus (ETPC). The EPT typology addresses several practical limitations of existing paraphrase typologies: it is the \ufb01rst typology that copes with the non-paraphrase pairs in the paraphrase identi\ufb01cation corpora and distinguishes between contextual and habitual paraphrase types. ETPC is the largest corpus to date annotated with atomic paraphrase types. It is the \ufb01rst corpus with detailed annotation of both the paraphrase and the non-paraphrase pairs and the \ufb01rst corpus annotated with paraphrase and negation. Both new resources contribute to better understanding the paraphrase phenomenon, and allow for studying the relationship between paraphrasing and negation. To the developers of Paraphrase Identi\ufb01cation systems ETPC corpus offers better means for evaluation and error analysis. Furthermore, the EPT typology and ETPC corpus emphasize the relationship with other areas of NLP such as Semantic Similarity, Textual Entailment, Summarization and Simpli\ufb01cation.", "keyphrases": ["extended paraphrase typology", "negation", "etpc corpus"]} +{"id": "long-etal-2016-simpler", "title": "Simpler Context-Dependent Logical Forms via Model Projections", "abstract": "We consider the task of learning a context-dependent mapping from utterances to denotations. With only denotations at training time, we must search over a combinatorially large space of logical forms, which is even larger with context-dependent utterances. To cope with this challenge, we perform successive projections of the full model onto simpler models that operate over equivalence classes of logical forms. Though less expressive, we find that these simpler models are much faster and can be surprisingly effective. Moreover, they can be used to bootstrap the full model. Finally, we collected three new context-dependent semantic parsing datasets, and develop a new left-to-right parser.", "keyphrases": ["logical form", "scone", "instruction", "deterministic domain", "paragraph"]} +{"id": "hodosh-hockenmaier-2016-focused", "title": "Focused Evaluation for Image Description with Binary Forced-Choice Tasks", "abstract": "Current evaluation metrics for image description may be too coarse. We therefore propose a series of binary forced-choice tasks that each focus on a different aspect of the captions. We evaluate a number of different off-the-shelf image description systems. Our results indicate strengths and shortcomings of both generation and ranking based approaches.", "keyphrases": ["image description", "binary forced-choice task", "caption"]} +{"id": "madnani-etal-2013-automated", "title": "Automated Scoring of a Summary-Writing Task Designed to Measure Reading Comprehension", "abstract": "We introduce a cognitive framework for measuring reading comprehension that includes the use of novel summary writing tasks. We derive NLP features from the holistic rubric used to score the summaries written by students for such tasks and use them to design a preliminary, automated scoring system. Our results show that the automated approach performs well on summaries written by students for two different passages.", "keyphrases": ["scoring", "reading comprehension", "student"]} +{"id": "yin-schutze-2015-multichannel", "title": "Multichannel Variable-Size Convolution for Sentence Classification", "abstract": "We propose MVCNN, a convolution neural network (CNN) architecture for sentence classification. It (i) combines diverse versions of pretrained word embeddings and (ii) extracts features of multigranular phrases with variable-size convolution filters. We also show that pretraining MVCNN is critical for good performance. MVCNN achieves state-of-the-art performance on four tasks: on small-scale binary, small-scale multi-class and largescale Twitter sentiment prediction and on subjectivity classification.", "keyphrases": ["sentence classification", "cnn", "deep neural network"]} +{"id": "kozareva-2015-everyone", "title": "Everyone Likes Shopping! Multi-class Product Categorization for e-Commerce", "abstract": "Online shopping caters the needs of millions of users on a daily basis. To build an accurate system that can retrieve relevant products for a query like \u201cMB252 with travel bags\u201d one requires product and query categorization mechanisms, which classify the text as Home&Garden>Kitchen&Dining>Kitchen Appliances>Blenders. One of the biggest challenges in e-Commerce is that providers like Amazon, e-Bay, Google, Yahoo! and Walmart organize products into different product taxonomies making it hard and time-consuming for sellers to categorize goods for each shopping platform. To address this challenge, we propose an automatic product categorization mechanism, which for a given product title assigns the correct product category from a taxonomy. We conducted an empirical evaluation on445,408 product titles and used a rich product taxonomy of 319 categories organized into 6 levels. We compared performance against multiple algorithms and found that the best performing system reaches.88 f-score.", "keyphrases": ["shopping", "e-commerce", "product taxonomy"]} +{"id": "zhou-etal-2019-improving", "title": "Improving Robustness of Neural Machine Translation with Multi-task Learning", "abstract": "While neural machine translation (NMT) achieves remarkable performance on clean, in-domain text, performance is known to degrade drastically when facing text which is full of typos, grammatical errors and other varieties of noise. In this work, we propose a multi-task learning algorithm for transformer-based MT systems that is more resilient to this noise. We describe our submission to the WMT 2019 Robustness shared task based on this method. Our model achieves a BLEU score of 32.8 on the shared task French to English dataset, which is 7.1 BLEU points higher than the baseline vanilla transformer trained with clean text.", "keyphrases": ["neural machine translation", "multi-task learning", "noisy input"]} +{"id": "tsuruoka-tsujii-2005-bidirectional", "title": "Bidirectional Inference with the Easiest-First Strategy for Tagging Sequence Data", "abstract": "This paper presents a bidirectional inference algorithm for sequence labeling problems such as part-of-speech tagging, named entity recognition and text chunking. The algorithm can enumerate all possible decomposition structures and find the highest probability sequence together with the corresponding decomposition structure in polynomial time. We also present an efficient decoding algorithm based on the easiest-first strategy, which gives comparably good performance to full bidirectional inference with significantly lower computational cost. Experimental results of part-of-speech tagging and text chunking show that the proposed bidirectional inference methods consistently outperform unidirectional inference methods and bidirectional MEMMs give comparable performance to that achieved by state-of-the-art learning algorithms including kernel support vector machines.", "keyphrases": ["easiest-first strategy", "bidirectional inference", "pos tagger"]} +{"id": "xu-etal-2015-semantic", "title": "Semantic Relation Classification via Convolutional Neural Networks with Simple Negative Sampling", "abstract": "Syntactic features play an essential role in identifying relationship in a sentence. Previous neural network models directly work on raw word sequences or constituent parse trees, thus often suffer from irrelevant information introduced when subjects and objects are in a long distance. In this paper, we propose to learn more robust relation representations from shortest dependency paths through a convolution neural network. We further take the relation directionality into account and propose a straightforward negative sampling strategy to improve the assignment of subjects and objects. Experimental results show that our method outperforms the state-of-theart approaches on the SemEval-2010 Task 8 dataset.", "keyphrases": ["convolutional neural network", "semantic relation classification", "cnns"]} +{"id": "hashimoto-etal-2016-word", "title": "Word Embeddings as Metric Recovery in Semantic Spaces", "abstract": "Continuous word representations have been remarkably useful across NLP tasks but remain poorly understood. We ground word embeddings in semantic spaces studied in the cognitive-psychometric literature, taking these spaces as the primary objects to recover. To this end, we relate log co-occurrences of words in large corpora to semantic similarity assessments and show that co-occurrences are indeed consistent with an Euclidean semantic space hypothesis. Framing word embedding as metric recovery of a semantic space unifies existing word embedding algorithms, ties them to manifold learning, and demonstrates that existing algorithms are consistent metric recovery methods given co-occurrence counts from random walks. Furthermore, we propose a simple, principled, direct metric recovery algorithm that performs on par with the state-of-the-art word embedding and manifold learning methods. Finally, we complement recent focus on analogies by constructing two new inductive reasoning datasets\u2014series completion and classification\u2014and demonstrate that word embeddings can be used to solve them as well.", "keyphrases": ["metric recovery", "semantic space", "co-occurrence count"]} +{"id": "escolano-etal-2021-multilingual", "title": "Multilingual Machine Translation: Closing the Gap between Shared and Language-specific Encoder-Decoders", "abstract": "State-of-the-art multilingual machine translation relies on a universal encoder-decoder, which requires retraining the entire system to add new languages. In this paper, we propose an alternative approach that is based on language-specific encoder-decoders, and can thus be more easily extended to new languages by learning their corresponding modules. So as to encourage a common interlingua representation, we simultaneously train the N initial languages. Our experiments show that the proposed approach outperforms the universal encoder-decoder by 3.28 BLEU points on average, while allowing to add new languages without the need to retrain the rest of the modules. All in all, our work closes the gap between shared and language-specific encoderdecoders, advancing toward modular multilingual machine translation systems that can be flexibly extended in lifelong learning settings.", "keyphrases": ["gap", "language-specific encoder-decoder", "mnmt model"]} +{"id": "fei-etal-2022-cqg", "title": "CQG: A Simple and Effective Controlled Generation Framework for Multi-hop Question Generation", "abstract": "Multi-hop question generation focuses on generating complex questions that require reasoning over multiple pieces of information of the input passage. Current models with state-of-the-art performance have been able to generate the correct questions corresponding to the answers. However, most models can not ensure the complexity of generated questions, so they may generate shallow questions that can be answered without multi-hop reasoning. To address this challenge, we propose the CQG, which is a simple and effective controlled framework. CQG employs a simple method to generate the multi-hop questions that contain key entities in multi-hop reasoning chains, which ensure the complexity and quality of the questions. In addition, we introduce a novel controlled Transformer-based decoder to guarantee that key entities appear in the questions. Experiment results show that our model greatly improves performance, which also outperforms the state-of-the-art model about 25% by 5 BLEU points on HotpotQA.", "keyphrases": ["multi-hop question generation", "complex question", "cqg"]} +{"id": "tran-etal-2015-joint", "title": "Joint Graphical Models for Date Selection in Timeline Summarization", "abstract": "Automatic timeline summarization (TLS) generates precise, dated overviews over (often prolonged) events, such as wars or economic crises. One subtask of TLS selects the most important dates for an event within a certain time frame. Date selection has up to now been handled via supervised machine learning approaches that estimate the importance of each date separately, using features such as the frequency of date mentions in news corpora. This approach neglects interactions between different dates that occur due to connections between subevents. We therefore suggest a joint graphical model for date selection. Even unsupervised versions of this model perform as well as supervised state-of-theart approaches. With parameter tuning on training data, it outperforms prior supervised models by a considerable margin.", "keyphrases": ["graphical model", "date selection", "timeline summarization"]} +{"id": "carpuat-etal-2017-detecting", "title": "Detecting Cross-Lingual Semantic Divergence for Neural Machine Translation", "abstract": "Parallel corpora are often not as parallel as one might assume: non-literal translations and noisy translations abound, even in curated corpora routinely used for training and evaluation. We use a cross-lingual textual entailment system to distinguish sentence pairs that are parallel in meaning from those that are not, and show that filtering out divergent examples from training improves translation quality.", "keyphrases": ["semantic divergence", "neural machine translation", "translation quality"]} +{"id": "yang-etal-2018-breaking", "title": "Breaking the Beam Search Curse: A Study of (Re-)Scoring Methods and Stopping Criteria for Neural Machine Translation", "abstract": "Beam search is widely used in neural machine translation, and usually improves translation quality compared to greedy search. It has been widely observed that, however, beam sizes larger than 5 hurt translation quality. We explain why this happens, and propose several methods to address this problem. Furthermore, we discuss the optimal stopping criteria for these methods. Results show that our hyperparameter-free methods outperform the widely-used hyperparameter-free heuristic of length normalization by +2.0 BLEU, and achieve the best results among all methods on Chinese-to-English translation.", "keyphrases": ["beam search", "neural machine translation", "reward", "guarantee", "variation"]} +{"id": "mueller-etal-2020-cross", "title": "Cross-Linguistic Syntactic Evaluation of Word Prediction Models", "abstract": "A range of studies have concluded that neural word prediction models can distinguish grammatical from ungrammatical sentences with high accuracy. However, these studies are based primarily on monolingual evidence from English. To investigate how these models' ability to learn syntax varies by language, we introduce CLAMS (Cross-Linguistic Assessment of Models on Syntax), a syntactic evaluation suite for monolingual and multilingual models. CLAMS includes subject-verb agreement challenge sets for English, French, German, Hebrew and Russian, generated from grammars we develop. We use CLAMS to evaluate LSTM language models as well as monolingual and multilingual BERT. Across languages, monolingual LSTMs achieved high accuracy on dependencies without attractors, and generally poor accuracy on agreement across object relative clauses. On other constructions, agreement accuracy was generally higher in languages with richer morphology. Multilingual models generally underperformed monolingual models. Multilingual BERT showed high syntactic accuracy on English, but noticeable deficiencies in other languages.", "keyphrases": ["syntactic evaluation", "hebrew", "multilingual bert"]} +{"id": "wang-etal-2016-memory", "title": "Memory-enhanced Decoder for Neural Machine Translation", "abstract": "We propose to enhance the RNN decoder in a neural machine translator (NMT) with external memory, as a natural but powerful extension to the state in the decoding RNN. This memory-enhanced RNN decoder is called \\textsc{MemDec}. At each time during decoding, \\textsc{MemDec} will read from this memory and write to this memory once, both with content-based addressing. Unlike the unbounded memory in previous work\\cite{RNNsearch} to store the representation of source sentence, the memory in \\textsc{MemDec} is a matrix with pre-determined size designed to better capture the information important for the decoding process at each time step. Our empirical study on Chinese-English translation shows that it can improve by $4.8$ BLEU upon Groundhog and $5.3$ BLEU upon on Moses, yielding the best performance achieved with the same training set.", "keyphrases": ["neural machine translation", "memory", "good performance"]} +{"id": "ravi-2013-scalable", "title": "Scalable Decipherment for Machine Translation via Hash Sampling", "abstract": "In this paper, we propose a new Bayesian inference method to train statistical machine translation systems using only nonparallel corpora. Following a probabilistic decipherment approach, we first introduce a new framework for decipherment training that is flexible enough to incorporate any number/type of features (besides simple bag-of-words) as side-information used for estimating translation models. In order to perform fast, efficient Bayesian inference in this framework, we then derive a hash sampling strategy that is inspired by the work of Ahmed et al. (2012). The new translation hash sampler enables us to scale elegantly to complex models (for the first time) and large vocabulary/corpora sizes. We show empirical results on the OPUS data\u2014our method yields the best BLEU scores compared to existing approaches, while achieving significant computational speedups (several orders faster). We also report for the first time\u2014BLEU score results for a largescale MT task using only non-parallel data (EMEA corpus).", "keyphrases": ["decipherment", "hash sampling", "translation model"]} +{"id": "finkel-manning-2008-enforcing", "title": "Enforcing Transitivity in Coreference Resolution", "abstract": "A desirable quality of a coreference resolution system is the ability to handle transitivity constraints, such that even if it places high likelihood on a particular mention being coreferent with each of two other mentions, it will also consider the likelihood of those two mentions being coreferent when making a final assignment. This is exactly the kind of constraint that integer linear programming (ILP) is ideal for, but, surprisingly, previous work applying ILP to coreference resolution has not encoded this type of constraint. We train a coreference classifier over pairs of mentions, and show how to encode this type of constraint on top of the probabilities output from our pairwise classifier to extract the most probable legal entity assignments. We present results on two commonly used datasets which show that enforcement of transitive closure consistently improves performance, including improvements of up to 3.6% using the b3 scorer, and up to 16.5% using cluster f-measure.", "keyphrases": ["transitivity", "resolution", "mention-pair model"]} +{"id": "jans-etal-2012-skip", "title": "Skip N-grams and Ranking Functions for Predicting Script Events", "abstract": "In this paper, we extend current state-of-the-art research on unsupervised acquisition of scripts, that is, stereotypical and frequently observed sequences of events. We design, evaluate and compare different methods for constructing models for script event prediction: given a partial chain of events in a script, predict other events that are likely to belong to the script. Our work aims to answer key questions about how best to (1) identify representative event chains from a source text, (2) gather statistics from the event chains, and (3) choose ranking functions for predicting new script events. We make several contributions, introducing skip-grams for collecting event statistics, designing improved methods for ranking event predictions, defining a more reliable evaluation metric for measuring predictiveness, and providing a systematic analysis of the various event prediction models.", "keyphrases": ["script event", "event prediction", "skip-gram"]} +{"id": "bui-etal-2009-extracting", "title": "Extracting Decisions from Multi-Party Dialogue Using Directed Graphical Models and Semantic Similarity", "abstract": "We use directed graphical models (DGMs) to automatically detect decision discussions in multi-party dialogue. Our approach distinguishes between different dialogue act (DA) types based on their role in the formulation of a decision. DGMs enable us to model dependencies, including sequential ones. We summarize decisions by extracting suitable phrases from DAs that concern the issue under discussion and its resolution. Here we use a semantic-similarity metric to improve results on both manual and ASR transcripts.", "keyphrases": ["multi-party dialogue", "directed graphical models", "discourse relation"]} +{"id": "wu-etal-2018-hard", "title": "Hard Non-Monotonic Attention for Character-Level Transduction", "abstract": "Character-level string-to-string transduction is an important component of various NLP tasks. The goal is to map an input string to an output string, where the strings may be of different lengths and have characters taken from different alphabets. Recent approaches have used sequence-to-sequence models with an attention mechanism to learn which parts of the input string the model should focus on during the generation of the output string. Both soft attention and hard monotonic attention have been used, but hard non-monotonic attention has only been used in other sequence modeling tasks and has required a stochastic approximation to compute the gradient. In this work, we introduce an exact, polynomial-time algorithm for marginalizing over the exponential number of non-monotonic alignments between two strings, showing that hard attention models can be viewed as neural reparameterizations of the classical IBM Model 1. We compare soft and hard non-monotonic attention experimentally and find that the exact algorithm significantly improves performance over the stochastic approximation and outperforms soft attention.", "keyphrases": ["transduction", "character", "hard non-monotonic attention"]} +{"id": "qian-etal-2015-transition", "title": "A Transition-based Model for Joint Segmentation, POS-tagging and Normalization", "abstract": "We propose a transition-based model for joint word segmentation, POS tagging and text normalization. Different from previous methods, the model can be trained on standard text corpora, overcoming the lack of annotated microblog corpora. To evaluate our model, we develop an annotated corpus based on microblogs. Experimental results show that our joint model can help improve the performance of word segmentation on microblogs, giving an error reduction in segmentation accuracy of 12.02%, compared to the traditional approach.", "keyphrases": ["transition-based model", "normalization", "pos tagging", "joint model"]} +{"id": "luo-etal-2018-marrying", "title": "Marrying Up Regular Expressions with Neural Networks: A Case Study for Spoken Language Understanding", "abstract": "The success of many natural language processing (NLP) tasks is bound by the number and quality of annotated data, but there is often a shortage of such training data. In this paper, we ask the question: \u201cCan we combine a neural network (NN) with regular expressions (RE) to improve supervised learning for NLP?\u201d. In answer, we develop novel methods to exploit the rich expressiveness of REs at different levels within a NN, showing that the combination significantly enhances the learning effectiveness when a small number of training examples are available. We evaluate our approach by applying it to spoken language understanding for intent detection and slot filling. Experimental results show that our approach is highly effective in exploiting the available training data, giving a clear boost to the RE-unaware NN.", "keyphrases": ["regular expression", "spoken language understanding", "different level"]} +{"id": "cattle-ma-2018-recognizing", "title": "Recognizing Humour using Word Associations and Humour Anchor Extraction", "abstract": "This paper attempts to marry the interpretability of statistical machine learning approaches with the more robust models of joke structure and joke semantics capable of being learned by neural models. Specifically, we explore the use of semantic relatedness features based on word associations, rather than the more common Word2Vec similarity, on a binary humour identification task and identify several factors that make word associations a better fit for humour. We also explore the effects of using joke structure, in the form of humour anchors (Yang et al., 2015), for improving the performance of semantic features and show that, while an intriguing idea, humour anchors contain several pitfalls that can hurt performance.", "keyphrases": ["humour", "word association", "factor"]} +{"id": "aroca-ouellette-etal-2021-prost", "title": "PROST: Physical Reasoning about Objects through Space and Time", "abstract": "We present a new probing dataset named PROST: Physical Reasoning about Objects Through Space and Time. This dataset contains 18,736 multiple-choice questions made from 14 manually curated templates, covering 10 physical reasoning concepts. All questions are designed to probe both causal and masked language models in a zero-shot setting. We conduct an extensive analysis which demonstrates that state-of-the-art pretrained models are inadequate at physical reasoning: they are influenced by the order in which answer options are presented to them, they struggle when the superlative in a question is inverted (e.g., most<->least), and increasing the amount of pretraining data and parameters only yields minimal improvements. These results provide support for the hypothesis that current pretrained models' ability to reason about physical interactions is inherently limited by a lack of real world experience. By highlighting these limitations, we hope to motivate the development of models with a human-like understanding of the physical world.", "keyphrases": ["physical reasoning", "object", "limitation", "prost"]} +{"id": "cohen-smith-2007-joint", "title": "Joint Morphological and Syntactic Disambiguation", "abstract": "In morphologically rich languages, should morphological and syntactic disambiguation be treated sequentially or as a single problem? We describe several efficient, probabilisticallyinterpretable ways to apply joint inference to morphological and syntactic disambiguation using lattice parsing. Joint inference is shown to compare favorably to pipeline parsing methods across a variety of component models. State-of-the-art performance on Hebrew Treebank parsing is demonstrated using the new method. The benefits of joint inference are modest with the current component models, but appear to increase as components themselves improve.", "keyphrases": ["morphology", "syntactic disambiguation", "joint inference", "semitic language"]} +{"id": "shapira-etal-2018-evaluating", "title": "Evaluating Multiple System Summary Lengths: A Case Study", "abstract": "Practical summarization systems are expected to produce summaries of varying lengths, per user needs. While a couple of early summarization benchmarks tested systems across multiple summary lengths, this practice was mostly abandoned due to the assumed cost of producing reference summaries of multiple lengths. In this paper, we raise the research question of whether reference summaries of a single length can be used to reliably evaluate system summaries of multiple lengths. For that, we have analyzed a couple of datasets as a case study, using several variants of the ROUGE metric that are standard in summarization evaluation. Our findings indicate that the evaluation protocol in question is indeed competitive. This result paves the way to practically evaluating varying-length summaries with simple, possibly existing, summarization benchmarks.", "keyphrases": ["length", "case study", "summarization system"]} +{"id": "brantley-etal-2019-non", "title": "Non-Monotonic Sequential Text Generation", "abstract": "Standard sequential generation methods assume a pre-specified generation order, such as text generation methods which generate words from left to right. In this work, we propose a framework for training models of text generation that operate in non-monotonic orders; the model directly learns good orders, without any additional annotation. Our framework operates by generating a word at an arbitrary position, and then recursively generating words to its left and then words to its right, yielding a binary tree. Learning is framed as imitation learning, including a coaching method which moves from imitating an oracle to reinforcing the policy's own preferences. Experimental results demonstrate that using the proposed method, it is possible to learn policies which generate text without pre-specifying a generation order while achieving competitive performance with conventional left-to-right generation.", "keyphrases": ["text generation", "arbitrary position", "policy"]} +{"id": "liu-etal-2016-jointly", "title": "Jointly Learning Grounded Task Structures from Language Instruction and Visual Demonstration", "abstract": "To enable language-based communication and collaboration with cognitive robots, this paper presents an approach where an agent can learn task models jointly from language instruction and visual demonstration using an And-Or Graph (AoG) representation. The learned AoG captures a hierarchical task structure where linguistic labels (for language communication) are grounded to corresponding state changes from the physical environment (for perception and action). Our empirical results on a cloth-folding domain have shown that, although state detection through visual processing is full of uncertainties and error prone, by a tight integration with language the agent is able to learn an effective AoG for task representation. The learned AoG can be further applied to infer and interpret on-going actions from new visual demonstration using linguistic labels at different levels of granularity.", "keyphrases": ["language instruction", "visual demonstration", "action"]} +{"id": "cer-etal-2010-parsing", "title": "Parsing to Stanford Dependencies: Trade-offs between Speed and Accuracy", "abstract": "We investigate a number of approaches to generating Stanford Dependencies, a widely used semantically-oriented dependency representation. We examine algorithms specifically designed for dependency parsing (Nivre, Nivre Eager, Covington, Eisner, and RelEx) as well as dependencies extracted from constituent parse trees created by phrase structure parsers (Charniak, Charniak-Johnson, Bikel, Berkeley and Stanford). We found that constituent parsers systematically outperform algorithms designed specifically for dependency parsing. The most accurate method for generating dependencies is the Charniak-Johnson reranking parser, with 89% (labeled) attachment F1 score. The fastest methods are Nivre, Nivre Eager, and Covington, used with a linear classifier to make local parsing decisions, which can parse the entire Penn Treebank development set (section 22) in less than 10 seconds on an Intel Xeon E5520. However, this speed comes with a substantial drop in F1 score (about 76% for labeled attachment) compared to competing methods. By tuning how much of the search space is explored by the Charniak-Johnson parser, we are able to arrive at a balanced configuration that is both fast and nearly as good as the most accurate approaches.", "keyphrases": ["stanford dependencies", "trade-off", "setup"]} +{"id": "joseph-etal-2017-constance", "title": "ConStance: Modeling Annotation Contexts to Improve Stance Classification", "abstract": "Manual annotations are a prerequisite for many applications of machine learning. However, weaknesses in the annotation process itself are easy to overlook. In particular, scholars often choose what information to give to annotators without examining these decisions empirically. For subjective tasks such as sentiment analysis, sarcasm, and stance detection, such choices can impact results. Here, for the task of political stance detection on Twitter, we show that providing too little context can result in noisy and uncertain annotations, whereas providing too strong a context may cause it to outweigh other signals. To characterize and reduce these biases, we develop ConStance, a general model for reasoning about annotations across information conditions. Given conflicting labels produced by multiple annotators seeing the same instances with different contexts, ConStance simultaneously estimates gold standard labels and also learns a classifier for new instances. We show that the classifier learned by ConStance outperforms a variety of baselines at predicting political stance, while the model's interpretable parameters shed light on the effects of each context.", "keyphrases": ["annotator", "stance detection", "constance"]} +{"id": "marin-etal-2011-detecting", "title": "Detecting Forum Authority Claims in Online Discussions", "abstract": "This paper explores the problem of detecting sentence-level forum authority claims in online discussions. Using a maximum entropy model, we explore a variety of strategies for extracting lexical features in a sparse training scenario, comparing knowledge- and data-driven methods (and combinations). The augmentation of lexical features with parse context is also investigated. We find that certain markup features perform remarkably well alone, but are outperformed by data-driven selection of lexical features augmented with parse context.", "keyphrases": ["authority claim", "online discussion", "wikipedia discussion"]} +{"id": "derczynski-2016-complementarity", "title": "Complementarity, F-score, and NLP Evaluation", "abstract": "This paper addresses the problem of quantifying the differences between entity extraction systems, where in general only a small proportion a document should be selected. Comparing overall accuracy is not very useful in these cases, as small differences in accuracy may correspond to huge differences in selections over the target minority class. Conventionally, one may use per-token complementarity to describe these differences, but it is not very useful when the set is heavily skewed. In such situations, which are common in information retrieval and entity recognition, metrics like precision and recall are typically used to describe performance. However, precision and recall fail to describe the differences between sets of objects selected by different decision strategies, instead just describing the proportional amount of correct and incorrect objects selected. This paper presents a method for measuring complementarity for precision, recall and F-score, quantifying the difference between entity extraction approaches.", "keyphrases": ["f-score", "recall", "complementarity"]} +{"id": "hedderich-etal-2020-transfer", "title": "Transfer Learning and Distant Supervision for Multilingual Transformer Models: A Study on African Languages", "abstract": "Multilingual transformer models like mBERT and XLM-RoBERTa have obtained great improvements for many NLP tasks on a variety of languages. However, recent works also showed that results from high-resource languages could not be easily transferred to realistic, low-resource scenarios. In this work, we study trends in performance for different amounts of available resources for the three African languages Hausa, isiXhosa and on both NER and topic classification. We show that in combination with transfer learning or distant supervision, these models can achieve with as little as 10 or 100 labeled sentences the same performance as baselines with much more supervised training data. However, we also find settings where this does not hold. Our discussions and additional experiments on assumptions such as time and hardware restrictions highlight challenges and opportunities in low-resource learning.", "keyphrases": ["distant supervision", "transfer learning", "few-shot cross-lingual transfer"]} +{"id": "eshghi-etal-2015-feedback", "title": "Feedback in Conversation as Incremental Semantic Update", "abstract": "In conversation, interlocutors routinely indicate whether something said or done has been processed and integrated. Such feedback includes backchannels such as \u2018okay\u2019 or \u2018mhm\u2019, the production of a next relevant turn, and repair initiation via clarification requests. Importantly, such feedback can be produced not only at sentence/turn boundaries, but also sub-sententially. In this paper, we extend an existing model of incremental semantic processing in dialogue, based around the Dynamic Syntax (DS) grammar framework, to provide a low-level, integrated account of backchannels, clarification requests and their responses; demonstrating that they can be accounted for as part of the core semantic structure-building mechanisms of the grammar, rather than via higher level pragmatic phenomena such as intention recognition, or treatment as an \u201cuno cial\u201d part of the conversation. The end result is an incremental model in which words, not turns, are seen as procedures for contextual update and backchannels serve to align participant semantic processing contexts and thus ease the production and interpretation of subsequent conversational actions. We also show how clarification requests and their following responses and repair can be modelled within the same DS framework, wherein the divergence and re-alignment e ort in participants\u2019 semantic processing drives conversations forward.", "keyphrases": ["conversation", "mechanism", "incremental semantic grammar"]} +{"id": "benajiba-etal-2010-arabic", "title": "Arabic Named Entity Recognition: Using Features Extracted from Noisy Data", "abstract": "Building an accurate Named Entity Recognition (NER) system for languages with complex morphology is a challenging task. In this paper, we present research that explores the feature space using both gold and bootstrapped noisy features to build an improved highly accurate Arabic NER system. We bootstrap noisy features by projection from an Arabic-English parallel corpus that is automatically tagged with a baseline NER system. The feature space covers lexical, morphological, and syntactic features. The proposed approach yields an improvement of up to 1.64 F-measure (absolute).", "keyphrases": ["arabic ner system", "syntactic feature", "parallel corpora"]} +{"id": "zhang-etal-2006-word", "title": "Word Segmentation and Named Entity Recognition for SIGHAN Bakeoff3", "abstract": "We have participated in three open tracks of Chinese word segmentation and named entity recognition tasks of SIGHAN Bakeoff3. We take a probabilistic feature based Maximum Entropy (ME) model as our basic frame to combine multiple sources of knowledge. Our named entity recognizer achieved the highest F measure for MSRA, and word segmenter achieved the medium F measure for MSRA. We find effective combining of the external multi-knowledge is crucial to improve performance of word segmentation and named entity recognition.", "keyphrases": ["entity recognition", "sighan bakeoff3", "word segmentation"]} +{"id": "zhou-etal-2011-unsupervised", "title": "Unsupervised Discovery of Discourse Relations for Eliminating Intra-sentence Polarity Ambiguities", "abstract": "Polarity classification of opinionated sentences with both positive and negative sentiments is a key challenge in sentiment analysis. This paper presents a novel unsupervised method for discovering intra-sentence level discourse relations for eliminating polarity ambiguities. Firstly, a discourse scheme with discourse constraints on polarity was defined empirically based on Rhetorical Structure Theory (RST). Then, a small set of cuephrase-based patterns were utilized to collect a large number of discourse instances which were later converted to semantic sequential representations (SSRs). Finally, an unsupervised method was adopted to generate, weigh and filter new SSRs without cue phrases for recognizing discourse relations. Experimental results showed that the proposed methods not only effectively recognized the defined discourse relations but also achieved significant improvement by integrating discourse information in sentence-level polarity classification.", "keyphrases": ["discourse relation", "polarity ambiguity", "sentiment analysis"]} +{"id": "muller-eberstein-etal-2021-genre", "title": "Genre as Weak Supervision for Cross-lingual Dependency Parsing", "abstract": "Recent work has shown that monolingual masked language models learn to represent data-driven notions of language variation which can be used for domain-targeted training data selection. Dataset genre labels are already frequently available, yet remain largely unexplored in cross-lingual setups. We harness this genre metadata as a weak supervision signal for targeted data selection in zero-shot dependency parsing. Specifically, we project treebank-level genre information to the finer-grained sentence level, with the goal to amplify information implicitly stored in unsupervised contextualized representations. We demonstrate that genre is recoverable from multilingual contextual embeddings and that it provides an effective signal for training data selection in cross-lingual, zero-shot scenarios. For 12 low-resource language treebanks, six of which are test-only, our genre-specific methods significantly outperform competitive baselines as well as recent embedding-based methods for data selection. Moreover, genre-based data selection provides new state-of-the-art results for three of these target languages.", "keyphrases": ["dependency parsing", "cross-lingual setup", "genre"]} +{"id": "ovrelid-etal-2010-syntactic", "title": "Syntactic Scope Resolution in Uncertainty Analysis", "abstract": "We show how the use of syntactic structure enables the resolution of hedge scope in a hybrid, two-stage approach to uncertainty analysis. In the first stage, a Maximum Entropy classifier, combining surface-oriented and syntactic features, identifies cue words. With a small set of hand-crafted rules operating over dependency representations in stage two, we attain the best overall result (in terms of both combined ranks and average F1) in the 2010 CoNLL Shared Task.", "keyphrases": ["uncertainty analysis", "syntactic feature", "heuristic rule"]} +{"id": "auli-lopez-2011-training", "title": "Training a Log-Linear Parser with Loss Functions via Softmax-Margin", "abstract": "Log-linear parsing models are often trained by optimizing likelihood, but we would prefer to optimise for a task-specific metric like F-measure. Softmax-margin is a convex objective for such models that minimises a bound on expected risk for a given loss function, but its naive application requires the loss to decompose over the predicted structure, which is not true of F-measure. We use softmax-margin to optimise a log-linear CCG parser for a variety of loss functions, and demonstrate a novel dynamic programming algorithm that enables us to use it with F-measure, leading to substantial gains in accuracy on CCG-Bank. When we embed our loss-trained parser into a larger model that includes supertagging features incorporated via belief propagation, we obtain further improvements and achieve a labelled/unlabelled dependency F-measure of 89.3%/94.0% on gold part-of-speech tags, and 87.2%/92.8% on automatic part-of-speech tags, the best reported results for this task.", "keyphrases": ["softmax-margin", "likelihood", "task-specific metric"]} +{"id": "wing-baldridge-2014-hierarchical", "title": "Hierarchical Discriminative Classification for Text-Based Geolocation", "abstract": "Text-based document geolocation is commonly rooted in language-based information retrieval techniques over geodesic grids. These methods ignore the natural hierarchy of cells in such grids and fall afoul of independence assumptions. We demonstrate the effectiveness of using logistic regression models on a hierarchy of nodes in the grid, which improves upon the state of the art accuracy by several percent and reduces mean error distances by hundreds of kilometers on data from Twitter, Wikipedia, and Flickr. We also show that logistic regression performs feature selection effectively, assigning high weights to geocentric terms.", "keyphrases": ["grid", "feature selection", "city", "different level"]} +{"id": "kavumba-etal-2019-choosing", "title": "When Choosing Plausible Alternatives, Clever Hans can be Clever", "abstract": "Pretrained language models, such as BERT and RoBERTa, have shown large improvements in the commonsense reasoning benchmark COPA. However, recent work found that many improvements in benchmarks of natural language understanding are not due to models learning the task, but due to their increasing ability to exploit superficial cues, such as tokens that occur more often in the correct answer than the wrong one. Are BERT's and RoBERTa's good performance on COPA also caused by this? We find superficial cues in COPA, as well as evidence that BERT exploits these cues. To remedy this problem, we introduce Balanced COPA, an extension of COPA that does not suffer from easy-to-exploit single token cues. We analyze BERT's and RoBERTa's performance on original and Balanced COPA, finding that BERT relies on superficial cues when they are present, but still achieves comparable performance once they are made ineffective, suggesting that BERT learns the task to a certain degree when forced to. In contrast, RoBERTa does not appear to rely on superficial cues.", "keyphrases": ["plausible alternatives", "cue", "balanced copa", "fine-tuning", "commonsense knowledge"]} +{"id": "hovy-2011-invited", "title": "Invited Keynote: What are Subjectivity, Sentiment, and Affect?", "abstract": "Pragmatics \u2014the aspects of text that signal interpersonal and situational information, complementing semantics\u2014 has been almost totally ignored in Natural Language Processing. But in the past five to eight years there has been a surge of research on the general topic of \u2018opinion\u2019, also called \u2018sentiment\u2019. Generally, research focuses on the determining the author\u2019s opinion/sentiment about some topic within a given fragment of text. Since opinions may differ, it is granted that the author\u2019s opinion is \u2018subjective\u2019, and the effectiveness of an opiniondetermination system is measured by comparing against a gold-standard set of human annotations. But what does \u2018subjectivity\u2019 actually mean? What are \u2018opinion\u2019 and \u2018sentiment\u2019? Lately, researchers are also starting to talk about \u2018affect\u2019, and even \u2018emotion\u2019. What are these notions, and how do they differ from one another? Unfortunately, a survey of the research done to date shows a disturbing lack of clarity on these questions. Very few papers bother to define their terms, but simply take a set of valences such as Good\u2013Neutral\u2013Bad to be sufficient. More recent work acknowledges the need to specify what the opinion actually applies to, and attempts also to determine the theme. Lately, several identify the holder of the opinion. Some even try to estimate the strength of the expressed opinion. The trouble is, the same aspect of the same object can be considered Good by one person and Bad by another, and we can often understand both their points of view. There is much more to opinion/sentiment than simply matching words and phrases that attach to the theme, and computing a polarity score. People give reasons why they like or dislike something, and these reasons pertain to their goals and plans in the case of opinions) or their deeper emotional states (in the case of affect). In this talk I outline a model of sentiment/opinion and of affect, and show that they appear in text in a fairly structured way, with various components. I show how proper understanding requires the reader to build some kind of person profile of the author, and claim that for systems to do adequate understanding of sentiments, opinions, and affects, they will need to do so as well. This is not a trivial challenge, and it opens the door to a whole new line of research with many fascinating and practical aspects.", "keyphrases": ["subjectivity", "affect", "election"]} +{"id": "kann-etal-2016-neural", "title": "Neural Morphological Analysis: Encoding-Decoding Canonical Segments", "abstract": "Canonical morphological segmentation aims to divide words into a sequence of standardized segments. In this work, we propose a character-based neural encoder-decoder model for this task. Additionally, we extend our model to include morpheme-level and lexical information through a neural reranker. We set the new state of the art for the task improving previous results by up to 21% accuracy. Our experiments cover three languages: English, German and Indonesian.", "keyphrases": ["canonical segmentation", "encoder-decoder model", "morpheme", "allomorph"]} +{"id": "vu-etal-2009-feature", "title": "Feature-Based Method for Document Alignment in Comparable News Corpora", "abstract": "In this paper, we present a feature-based method to align documents with similar content across two sets of bilingual comparable corpora from daily news texts. We evaluate the contribution of each individual feature and investigate the incorporation of these diverse statistical and heuristic features for the task of bilingual document alignment. Experimental results on the English-Chinese and English-Malay comparable news corpora show that our proposed Discrete Fourier Transform-based term frequency distribution feature is very effective. It contributes 4.1% and 8% to performance improvement over Pearson's correlation method on the two comparable corpora. In addition, when more heuristic and statistical features as well as a bilingual dictionary are utilized, our method shows an absolute performance improvement of 23.2% and 15.3% on the two sets of bilingual corpora when comparing with a prior information retrieval-based method.", "keyphrases": ["document alignment", "feature-based method", "independent unit"]} +{"id": "johansson-nugues-2008-effect", "title": "The Effect of Syntactic Representation on Semantic Role Labeling", "abstract": "Almost all automatic semantic role labeling (SRL) systems rely on a preliminary parsing step that derives a syntactic structure from the sentence being analyzed. This makes the choice of syntactic representation an essential design decision. In this paper, we study the influence of syntactic representation on the performance of SRL systems. Specifically, we compare constituent-based and dependency-based representations for SRL of English in the FrameNet paradigm. \n \nContrary to previous claims, our results demonstrate that the systems based on dependencies perform roughly as well as those based on constituents: For the argument classification task, dependency-based systems perform slightly higher on average, while the opposite holds for the argument identification task. This is remarkable because dependency parsers are still in their infancy while constituent parsing is more mature. Furthermore, the results show that dependency-based semantic role classifiers rely less on lexicalized features, which makes them more robust to domain changes and makes them learn more efficiently with respect to the amount of training data.", "keyphrases": ["semantic role labeling", "srl", "framenet", "argument identification task"]} +{"id": "che-etal-2019-hit", "title": "HIT-SCIR at MRP 2019: A Unified Pipeline for Meaning Representation Parsing via Efficient Training and Effective Encoding", "abstract": "This paper describes our system (HIT-SCIR) for CoNLL 2019 shared task: Cross-Framework Meaning Representation Parsing. We extended the basic transition-based parser with two improvements: a) Efficient Training by realizing Stack LSTM parallel training; b) Effective Encoding via adopting deep contextualized word embeddings BERT. Generally, we proposed a unified pipeline to meaning representation parsing, including framework-specific transition-based parsers, BERT-enhanced word representation, and post-processing. In the final evaluation, our system was ranked first according to ALL-F1 (86.2%) and especially ranked first in UCCA framework (81.67%).", "keyphrases": ["unified pipeline", "meaning representation parsing", "transition-based parser"]} +{"id": "fukuda-etal-2020-improving", "title": "Improving Speech Recognition for the Elderly: A New Corpus of Elderly Japanese Speech and Investigation of Acoustic Modeling for Speech Recognition", "abstract": "In an aging society like Japan, a highly accurate speech recognition system is needed for use in electronic devices for the elderly, but this level of accuracy cannot be obtained using conventional speech recognition systems due to the unique features of the speech of elderly people. S-JNAS, a corpus of elderly Japanese speech, is widely used for acoustic modeling in Japan, but the average age of its speakers is 67.6 years old. Since average life expectancy in Japan is now 84.2 years, we are constructing a new speech corpus, which currently consists of the utterances of 221 speakers with an average age of 79.2, collected from four regions of Japan. In addition, we expand on our previous study (Fukuda, 2019) by further investigating the construction of acoustic models suitable for elderly speech. We create new acoustic models and train them using a combination of existing Japanese speech corpora (JNAS, S-JNAS, CSJ), with and without our `super-elderly' speech data, and conduct speech recognition experiments. Our new acoustic models achieve word error rates (WER) as low as 13.38%, exceeding the results of our previous study in which we used the CSJ acoustic model adapted for elderly speech (17.4% WER).", "keyphrases": ["speech recognition", "elderly japanese speech", "adaptation"]} +{"id": "branavan-etal-2010-reading", "title": "Reading between the Lines: Learning to Map High-Level Instructions to Commands", "abstract": "In this paper, we address the task of mapping high-level instructions to sequences of commands in an external environment. Processing these instructions is challenging---they posit goals to be achieved without specifying the steps required to complete them. We describe a method that fills in missing information using an automatically derived environment model that encodes states, transitions, and commands that cause these transitions to happen. We present an efficient approximate approach for learning this environment model as part of a policy-gradient reinforcement learning algorithm for text interpretation. This design enables learning for mapping high-level instructions, which previous statistical methods cannot handle.", "keyphrases": ["high-level instruction", "command", "environment"]} +{"id": "cocos-etal-2018-learning", "title": "Learning Scalar Adjective Intensity from Paraphrases", "abstract": "Adjectives like \u201cwarm\u201d, \u201chot\u201d, and \u201cscalding\u201d all describe temperature but differ in intensity. Understanding these differences between adjectives is a necessary part of reasoning about natural language. We propose a new paraphrase-based method to automatically learn the relative intensity relation that holds between a pair of scalar adjectives. Our approach analyzes over 36k adjectival pairs from the Paraphrase Database under the assumption that, for example, paraphrase pair \u201creally hot\u201d \u2013 \u201cscalding\u201d suggests that \u201chot\u201d \u201cscalding\u201d. We show that combining this paraphrase evidence with existing, complementary pattern- and lexicon-based approaches improves the quality of systems for automatically ordering sets of scalar adjectives and inferring the polarity of indirect answers to \u201cyes/no\u201d questions.", "keyphrases": ["adjective", "intensity", "paraphrase"]} +{"id": "mayhew-etal-2019-named", "title": "Named Entity Recognition with Partially Annotated Training Data", "abstract": "Supervised machine learning assumes the availability of fully-labeled data, but in many cases, such as low-resource languages, the only data available is partially annotated. We study the problem of Named Entity Recognition (NER) with partially annotated training data in which a fraction of the named entities are labeled, and all other tokens, entities or otherwise, are labeled as non-entity by default. In order to train on this noisy dataset, we need to distinguish between the true and false negatives. To this end, we introduce a constraint-driven iterative algorithm that learns to detect false negatives in the noisy set and downweigh them, resulting in a weighted training set. With this set, we train a weighted NER model. We evaluate our algorithm with weighted variants of neural and non-neural NER models on data in 8 languages from several language and script families, showing strong ability to learn from partial data. Finally, to show real-world efficacy, we evaluate on a Bengali NER corpus annotated by non-speakers, outperforming the prior state-of-the-art by over 5 points F1.", "keyphrases": ["low-resource language", "ner model", "noisy label", "training procedure"]} +{"id": "kaneko-etal-2022-gender", "title": "Gender Bias in Masked Language Models for Multiple Languages", "abstract": "Masked Language Models (MLMs) pre-trained by predicting masked tokens on large corpora have been used successfully in natural language processing tasks for a variety of languages. Unfortunately, it was reported that MLMs also learn discriminative biases regarding attributes such as gender and race. Because most studies have focused on MLMs in English, the bias of MLMs in other languages has rarely been investigated. Manual annotation of evaluation data for languages other than English has been challenging due to the cost and difficulty in recruiting annotators. Moreover, the existing bias evaluation methods require the stereotypical sentence pairs consisting of the same context with attribute words (e.g. He/She is a nurse).We propose Multilingual Bias Evaluation (MBE) score, to evaluate bias in various languages using only English attribute word lists and parallel corpora between the target language and English without requiring manually annotated data. We evaluated MLMs in eight languages using the MBE and confirmed that gender-related biases are encoded in MLMs for all those languages. We manually created datasets for gender bias in Japanese and Russian to evaluate the validity of the MBE.The results show that the bias scores reported by the MBE significantly correlates with that computed from the above manually created datasets and the existing English datasets for gender bias.", "keyphrases": ["masked language models", "mlm", "gender bias"]} +{"id": "bond-etal-2016-cili", "title": "CILI: the Collaborative Interlingual Index", "abstract": "This paper introduces the motivation for and design of the Collaborative InterLingual Index (CILI). It is designed to make possible coordination between multiple loosely coupled wordnet projects. The structure of the CILI is based on the Interlingual index first proposed in the EuroWordNet project with several pragmatic extensions: an explicit open license, definitions in English and links to wordnets in the Global Wordnet Grid.", "keyphrases": ["collaborative interlingual index", "wordnet", "cili"]} +{"id": "huang-etal-2016-attention", "title": "Attention-based Multimodal Neural Machine Translation", "abstract": "We present a novel neural machine translation (NMT) architecture associating visual and textual features for translation tasks with multiple modalities. Transformed global and regional visual features are concatenated with text to form attendable sequences which are dissipated over parallel long short-term memory (LSTM) threads to assist the encoder generating a representation for attention-based decoding. Experiments show that the proposed NMT outperform the text-only baseline.", "keyphrases": ["image feature", "convolutional neural network", "encode word sequence", "multimodal system"]} +{"id": "gamon-2004-linguistic", "title": "Linguistic correlates of style: authorship classification with deep linguistic analysis features", "abstract": "The identification of authorship falls into the category of style classification, an interesting sub-field of text categorization that deals with properties of the form of linguistic expression as opposed to the content of a text. Various feature sets and classification methods have been proposed in the literature, geared towards abstracting away from the content of a text, and focusing on its stylistic properties. We demonstrate that in a realistically difficult authorship attribution scenario, deep linguistic analysis features such as context free production frequencies and semantic relationship frequencies achieve significant error reduction over more commonly used \"shallow\" features such as function word frequencies and part of speech trigrams. Modern machine learning techniques like support vector machines allow us to explore large feature vectors, combining these different feature sets to achieve high classification accuracy in style-based tasks.", "keyphrases": ["authorship classification", "linguistic analysis feature", "support vector machine"]} +{"id": "hoyle-etal-2019-unsupervised", "title": "Unsupervised Discovery of Gendered Language through Latent-Variable Modeling", "abstract": "Studying the ways in which language is gendered has long been an area of interest in sociolinguistics. Studies have explored, for example, the speech of male and female characters in film and the language used to describe male and female politicians. In this paper, we aim not to merely study this phenomenon qualitatively, but instead to quantify the degree to which the language used to describe men and women is different and, moreover, different in a positive or negative way. To that end, we introduce a generative latent-variable model that jointly represents adjective (or verb) choice, with its sentiment, given the natural gender of a head (or dependent) noun. We find that there are significant differences between descriptions of male and female nouns and that these differences align with common gender stereotypes: Positive adjectives used to describe women are more often related to their bodies than adjectives used to describe men.", "keyphrases": ["latent-variable model", "adjective", "woman"]} +{"id": "duong-etal-2015-neural", "title": "A Neural Network Model for Low-Resource Universal Dependency Parsing", "abstract": "Accurate dependency parsing requires large treebanks, which are only available for a few languages. We propose a method that takes advantage of shared structure across languages to build a mature parser using less training data. We propose a model for learning a shared \u201cuniversal\u201d parser that operates over an interlingual continuous representation of language, along with language-specific mapping components. Compared with supervised learning, our methods give a consistent 8-10% improvement across several treebanks in low-resource simulations.", "keyphrases": ["neural network model", "treebank", "low-resource language"]} +{"id": "blunsom-etal-2009-gibbs", "title": "A Gibbs Sampler for Phrasal Synchronous Grammar Induction", "abstract": "We present a phrasal synchronous grammar model of translational equivalence. Unlike previous approaches, we do not resort to heuristics or constraints from a word-alignment model, but instead directly induce a synchronous grammar from parallel sentence-aligned corpora. We use a hierarchical Bayesian prior to bias towards compact grammars with small translation units. Inference is performed using a novel Gibbs sampler over synchronous derivations. This sampler side-steps the intractability issues of previous models which required inference over derivation forests. Instead each sampling iteration is highly efficient, allowing the model to be applied to larger translation corpora than previous approaches.", "keyphrases": ["gibbs sampler", "phrasal", "grammar induction", "word-alignment model", "parallel corpus"]} +{"id": "zhao-etal-2019-data", "title": "Data Augmentation with Atomic Templates for Spoken Language Understanding", "abstract": "Spoken Language Understanding (SLU) converts user utterances into structured semantic representations. Data sparsity is one of the main obstacles of SLU due to the high cost of human annotation, especially when domain changes or a new domain comes. In this work, we propose a data augmentation method with atomic templates for SLU, which involves minimum human efforts. The atomic templates produce exemplars for fine-grained constituents of semantic representations. We propose an encoder-decoder model to generate the whole utterance from atomic exemplars. Moreover, the generator could be transferred from source domains to help a new domain which has little data. Experimental results show that our method achieves significant improvements on DSTC 2&3 dataset which is a domain adaptation setting of SLU.", "keyphrases": ["spoken language understanding", "new domain", "data augmentation"]} +{"id": "jiang-etal-2020-x", "title": "X-FACTR: Multilingual Factual Knowledge Retrieval from Pretrained Language Models", "abstract": "Language models (LMs) have proven surprisingly successful at capturing factual knowledge by completing cloze-style fill-in-the-blank questions such as \u201cPunta Cana is located in _.\u201d However, while knowledge is both written and queried in many languages, studies on LMs' factual representation ability have almost invariably been performed on English. To assess factual knowledge retrieval in LMs in different languages, we create a multilingual benchmark of cloze-style probes for typologically diverse languages. To properly handle language variations, we expand probing methods from single- to multi-word entities, and develop several decoding algorithms to generate multi-token predictions. Extensive experimental results provide insights about how well (or poorly) current state-of-the-art LMs perform at this task in languages with more or fewer available resources. We further propose a code-switching-based method to improve the ability of multilingual LMs to access knowledge, and verify its effectiveness on several benchmark languages. Benchmark data and code have be released at .", "keyphrases": ["factual knowledge retrieval", "language model", "x-factr"]} +{"id": "przepiorkowski-2007-slavic", "title": "Slavic Information Extraction and Partial Parsing", "abstract": "Information Extraction (IE) often involves some amount of partial syntactic processing. This is clear in cases of interesting high-level IE tasks, such as finding information about who did what to whom (when, where, how and why), but it is also true in case of simpler IE tasks, such as finding company names in texts. The aim of this paper is to give an overview of Slavonic phenomena which pose particular problems for IE and partial parsing, and some phenomena which seem easier to treat in Slavonic than in Germanic or Romance; I also mention various tools which have been used for the partial processing of Slavonic.", "keyphrases": ["partial parsing", "slavic language", "word order", "rich inflection", "nes"]} +{"id": "botha-etal-2018-learning", "title": "Learning To Split and Rephrase From Wikipedia Edit History", "abstract": "Split and rephrase is the task of breaking down a sentence into shorter ones that together convey the same meaning. We extract a rich new dataset for this task by mining Wikipedia's edit history: WikiSplit contains one million naturally occurring sentence rewrites, providing sixty times more distinct split examples and a ninety times larger vocabulary than the WebSplit corpus introduced by Narayan et al. (2017) as a benchmark for this task. Incorporating WikiSplit as training data produces a model with qualitatively better predictions that score 32 BLEU points above the prior best result on the WebSplit benchmark.", "keyphrases": ["split", "wikipedia edit history", "new dataset", "sentence-to-sentence generation task"]} +{"id": "kim-lee-2016-recurrent-neural", "title": "Recurrent Neural Network based Translation Quality Estimation", "abstract": "This paper describes the recurrent neural network based model for translation quality estimation. Recurrent neural network based quality estimation model consists of two parts. The first part using two bidirectional recurrent neural networks generates the quality information about whether each word in translation is properly translated. The second part using another recurrent neural network predicts the final quality of translation. We apply this model to sentence, word and phrase level of WMT16 Quality Estimation Shared Task. Our results achieve the excellent performance especially in sentence and phraselevel QE.", "keyphrases": ["translation quality estimation", "recurrent neural network", "predictor-estimator architecture"]} +{"id": "lee-etal-2018-improving", "title": "Improving Large-Scale Fact-Checking using Decomposable Attention Models and Lexical Tagging", "abstract": "Fact-checking of textual sources needs to effectively extract relevant information from large knowledge bases. In this paper, we extend an existing pipeline approach to better tackle this problem. We propose a neural ranker using a decomposable attention model that dynamically selects sentences to achieve promising improvement in evidence retrieval F1 by 38.80%, with (x65) speedup compared to a TF-IDF method. Moreover, we incorporate lexical tagging methods into our pipeline framework to simplify the tasks and render the model more generalizable. As a result, our framework achieves promising performance on a large-scale fact extraction and verification dataset with speedup.", "keyphrases": ["fact-checking", "decomposable attention model", "lexical tagging"]} +{"id": "laokulrat-etal-2013-uttime", "title": "UTTime: Temporal Relation Classification using Deep Syntactic Features", "abstract": "In this paper, we present a system, UTTime, which we submitted to TempEval-3 for Task C: Annotating temporal relations. The system uses logistic regression classifiers and exploits features extracted from a deep syntactic parser, including paths between event words in phrase structure trees and their path lengths, and paths between event words in predicateargument structures and their subgraphs. UTTime achieved an F1 score of 34.9 based on the graphed-based evaluation for Task C (ranked 2 nd ) and 56.45 for Task C-relationonly (ranked 1 st ) in the TempEval-3 evaluation.", "keyphrases": ["deep syntactic parser", "uttime", "predicate-argument structure feature"]} +{"id": "sun-etal-2018-extracting", "title": "Extracting Entities and Relations with Joint Minimum Risk Training", "abstract": "We investigate the task of joint entity relation extraction. Unlike prior efforts, we propose a new lightweight joint learning paradigm based on minimum risk training (MRT). Specifically, our algorithm optimizes a global loss function which is flexible and effective to explore interactions between the entity model and the relation model. We implement a strong and simple neural network where the MRT is executed. Experiment results on the benchmark ACE05 and NYT datasets show that our model is able to achieve state-of-the-art joint extraction performances.", "keyphrases": ["minimum risk training", "joint learning paradigm", "loss function"]} +{"id": "ji-grishman-2004-applying", "title": "Applying Coreference to Improve Name Recognition", "abstract": "We present a novel method of applying the results of coreference resolution to improve Name Recognition for Chinese. We consider first some methods for gauging the confidence of individual tags assigned by a statistical name tagger. For names with low confidence, we show how these names can be filtered using coreference features to improve accuracy. In addition, we present rules which use coreference information to correct some name tagging errors. Finally, we show how these gains can be magnified by clustering documents and using cross-document coreference in these clusters. These combined methods yield an absolute improvement of about 3.1% in tagger F score.", "keyphrases": ["coreference", "name recognition", "chinese"]} +{"id": "gimenez-marquez-2007-linguistic", "title": "Linguistic Features for Automatic Evaluation of Heterogenous MT Systems", "abstract": "Evaluation results recently reported by Callison-Burch et al. (2006) and Koehn and Monz (2006), revealed that, in certain cases, the BLEU metric may not be a reliable MT quality indicator. This happens, for instance, when the systems under evaluation are based on different paradigms, and therefore, do not share the same lexicon. The reason is that, while MT quality aspects are diverse, BLEU limits its scope to the lexical dimension. In this work, we suggest using metrics which take into account linguistic features at more abstract levels. We provide experimental results showing that metrics based on deeper linguistic information (syntactic/shallow-semantic) are able to produce more reliable system rankings than metrics based on lexical matching alone, specially when the systems under evaluation are of a different nature.", "keyphrases": ["translation quality", "improved correlation", "human judgment", "gim\u00e9nez", "predicate-argument structure"]} +{"id": "kok-brockett-2010-hitting", "title": "Hitting the Right Paraphrases in Good Time", "abstract": "We present a random-walk-based approach to learning paraphrases from bilingual parallel corpora. The corpora are represented as a graph in which a node corresponds to a phrase, and an edge exists between two nodes if their corresponding phrases are aligned in a phrase table. We sample random walks to compute the average number of steps it takes to reach a ranking of paraphrases with better ones being \"closer\" to a phrase of interest. This approach allows \"feature\" nodes that represent domain knowledge to be built into the graph, and incorporates truncation techniques to prevent the graph from growing too large for efficiency. Current approaches, by contrast, implicitly presuppose the graph to be bipartite, are limited to finding paraphrases that are of length two away from a phrase, and do not generally permit easy incorporation of domain knowledge. Manual evaluation of generated output shows that our approach outperforms the state-of-the-art system of Callison-Burch (2008).", "keyphrases": ["paraphrase", "parallel corpora", "random walk"]} +{"id": "dungs-etal-2018-rumour", "title": "Can Rumour Stance Alone Predict Veracity?", "abstract": "Prior manual studies of rumours suggested that crowd stance can give insights into the actual rumour veracity. Even though numerous studies of automatic veracity classification of social media rumours have been carried out, none explored the effectiveness of leveraging crowd stance to determine veracity. We use stance as an additional feature to those commonly used in earlier studies. We also model the veracity of a rumour using variants of Hidden Markov Models (HMM) and the collective stance information. This paper demonstrates that HMMs that use stance and tweets' times as the only features for modelling true and false rumours achieve F1 scores in the range of 80%, outperforming those approaches where stance is used jointly with content and user based features.", "keyphrases": ["stance", "veracity", "rumor detection", "claim-level"]} +{"id": "srikanth-murthy-2008-named", "title": "Named Entity Recognition for Telugu", "abstract": "This paper is about Named Entity Recognition (NER) for Telugu. Not much work has been done in NER for Indian languages in general and Telugu in particular. Adequate annotated corpora are not yet available in Telugu. We recognize that named entities are usually nouns. In this paper we therefore start with our experiments in building a CRF (Conditional Random Fields) based Noun Tagger. Trained on a manually tagged data of 13,425 words and tested on a test data set of 6,223 words, this Noun Tagger has given an F-Measure of about 92%. We then develop a rule based NER system for Telugu. Our focus is mainly on identifying person, place and organization names. A manually checked Named Entity tagged corpus of 72,157 words has been developed using this rule based tagger through bootstrapping. We have then developed a CRF based NER system for Telugu and tested it on several data sets from the Eenaadu and Andhra Prabha newspaper corpora developed by us here. Good performance has been obtained using the majority tag concept. We have obtained overall F-measures between 80% and 97% in various experiments.", "keyphrases": ["entity recognition", "telugu", "indian language"]} +{"id": "wachsmuth-etal-2014-modeling", "title": "Modeling Review Argumentation for Robust Sentiment Analysis", "abstract": "Most text classification approaches model text at the lexical and syntactic level only, lacking domain robustness and explainability. In tasks like sentiment analysis, such approaches can result in limited effectiveness if the texts to be classified consist of a series of arguments. In this paper, we claim that even a shallow model of the argumentation of a text allows for an effective and more robust classification, while providing intuitive explanations of the classification results. Here, we apply this idea to the supervised prediction of sentiment scores for reviews. We combine existing approaches from sentiment analysis with novel features that compare the overall argumentation structure of the given review text to a learned set of common sentiment flow patterns. Our evaluation in two domains demonstrates the benefit of modeling argumentation for text classification in terms of effectiveness and robustness.", "keyphrases": ["review", "argumentation", "global sentiment"]} +{"id": "kato-etal-2004-stochastically", "title": "Stochastically Evaluating the Validity of Partial Parse Trees in Incremental Parsing", "abstract": "This paper proposes a method for evaluating the validity of partial parse trees constructed in incremental parsing. Our method is based on stochastic incremental parsing, and it incrementally evaluates the validity for each partial parse tree on a word-by-word basis. In our method, incremental parser returns partial parse trees at the point where the validity for the partial parse tree becomes greater than a threshold. Our technique is effective for improving the accuracy of incremental parsing.", "keyphrases": ["validity", "incremental parser", "transition-based parsing"]} +{"id": "kim-hassan-2020-fastformers", "title": "FastFormers: Highly Efficient Transformer Models for Natural Language Understanding", "abstract": "Transformer-based models are the state-of-the-art for Natural Language Understanding (NLU) applications. Models are getting bigger and better on various tasks. However, Transformer models remain computationally challenging since they are not efficient at inference-time compared to traditional approaches. In this paper, we present FastFormers, a set of recipes to achieve efficient inference-time performance for Transformer-based models on various NLU tasks. We show how carefully utilizing knowledge distillation, structured pruning and numerical optimization can lead to drastic improvements on inference efficiency. We provide effective recipes that can guide practitioners to choose the best settings for various NLU tasks and pretrained models. Applying the proposed recipes to the SuperGLUE benchmark, we achieve from 9.8x up to 233.9x speed-up compared to out-of-the-box models on CPU. On GPU, we also achieve up to 12.4x speed-up with the presented methods. We show that FastFormers can drastically reduce cost of serving 100 million requests from 4,223 USD to just 18 USD on an Azure F16s_v2 instance. This translates to a sustainable runtime by reducing energy consumption 6.9x - 125.8x according to the metrics used in the SustaiNLP 2020 shared task.", "keyphrases": ["natural language understanding", "pruning", "fastformers"]} +{"id": "wang-etal-2021-dynamic", "title": "Dynamic Connected Networks for Chinese Spelling Check", "abstract": "Chinese spelling check (CSC) is a task to detect and correct spelling errors in Chinese text. Most state-of-the-art works on the CSC task adopt a BERT-based non-autoregressive language model, which relies on the output independence assumption. The inappropriate independence assumption prevents BERT-based models from learning the dependencies among target tokens, resulting in an incoherent problem. To address the above issue, we propose a novel architecture named Dynamic Connected Networks (DCN), which generates the candidate Chinese characters via a Pinyin Enhanced Candidate Generator and then utilizes an attention-based network to model the dependencies between two adjacent Chinese characters. The experimental results show that our proposed method achieves a new state-of-the-art performance on three human-annotated datasets.", "keyphrases": ["chinese spelling check", "language model", "dynamic connected networks"]} +{"id": "haque-etal-2009-dependency", "title": "Dependency Relations as Source Context in Phrase-Based SMT", "abstract": "The Phrase-Based Statistical Machine Translation (PB-SMT) model has recently begun to include source context modeling, under the assumption that the proper lexical \nchoice of an ambiguous word can be determined from the context in which it appears. Various types of lexical and syntactic features such as words, parts-of-speech, and \nsupertags have been explored as effective source context in SMT. In this paper, we show that position-independent syntactic dependency relations of the head of a source phrase can be modeled as useful source context to improve target phrase selection and thereby improve overall performance of PB-SMT. On a Dutch\u2014English translation task, by combining dependency relations and syntactic contextual features (part-of-speech), we achieved a 1.0 BLEU (Papineni et al., 2002) point improvement (3.1% relative) over the baseline.", "keyphrases": ["source context", "phrase-based smt", "supertag"]} +{"id": "geertzen-etal-2007-multidimensional", "title": "A Multidimensional Approach to Utterance Segmentation and Dialogue Act Classification", "abstract": "In this paper we present a multidimensional approach to utterance segmentation and automatic dialogue act classification. We show that the use of multiple dimensions in distinguishing and annotating units not only supports a more accurate analysis of human communication, but can also help to solve some notorious problems concerning the segmentation of dialogue into functional units. We introduce the use of per-dimension segmentation for dialogue act taxonomies that feature multi-functionality and show that better classification results are obtained when using a separate segmentation for each dimension than when using one segmentation that fits all dimensions. Three machine learning techniques are applied and compared on the task of automatic classification of multiple communicative functions of utterances. The results are encouraging and indicate that communicative functions in important dimensions are easy machinelearnable.", "keyphrases": ["multidimensional approach", "dialogue act classification", "communicative function"]} +{"id": "cao-etal-2017-quasi", "title": "Quasi-Second-Order Parsing for 1-Endpoint-Crossing, Pagenumber-2 Graphs", "abstract": "We propose a new Maximum Subgraph algorithm for first-order parsing to 1-endpoint-crossing, pagenumber-2 graphs. Our algorithm has two characteristics: (1) it separates the construction for noncrossing edges and crossing edges; (2) in a single construction step, whether to create a new arc is deterministic. These two characteristics make our algorithm relatively easy to be extended to incorporiate crossing-sensitive second-order features. We then introduce a new algorithm for quasi-second-order parsing. Experiments demonstrate that second-order features are helpful for Maximum Subgraph parsing.", "keyphrases": ["1-endpoint-crossing", "pagenumber-2 graph", "quasi-second-order", "dynamic programming"]} +{"id": "jiang-bansal-2019-self", "title": "Self-Assembling Modular Networks for Interpretable Multi-Hop Reasoning", "abstract": "Multi-hop QA requires a model to connect multiple pieces of evidence scattered in a long context to answer the question. The recently proposed HotpotQA (Yang et al., 2018) dataset is comprised of questions embodying four different multi-hop reasoning paradigms (two bridge entity setups, checking multiple properties, and comparing two entities), making it challenging for a single neural network to handle all four. In this work, we present an interpretable, controller-based Self-Assembling Neural Modular Network (Hu et al., 2017, 2018) for multi-hop reasoning, where we design four novel modules (Find, Relocate, Compare, NoOp) to perform unique types of language reasoning. Based on a question, our layout controller RNN dynamically infers a series of reasoning modules to construct the entire network. Empirically, we show that our dynamic, multi-hop modular network achieves significant improvements over the static, single-hop baseline (on both regular and adversarial evaluation). We further demonstrate the interpretability of our model via three analyses. First, the controller can softly decompose the multi-hop question into multiple single-hop sub-questions to promote compositional reasoning behavior of the main network. Second, the controller can predict layouts that conform to the layouts designed by human experts. Finally, the intermediate module can infer the entity that connects two distantly-located supporting facts by addressing the sub-question from the controller.", "keyphrases": ["modular network", "interpretable multi-hop reasoning", "single-hop sub-question"]} +{"id": "emami-etal-2019-knowref", "title": "The KnowRef Coreference Corpus: Removing Gender and Number Cues for Difficult Pronominal Anaphora Resolution", "abstract": "We introduce a new benchmark for coreference resolution and NLI, KnowRef, that targets common-sense understanding and world knowledge. Previous coreference resolution tasks can largely be solved by exploiting the number and gender of the antecedents, or have been handcrafted and do not reflect the diversity of naturally occurring text. We present a corpus of over 8,000 annotated text passages with ambiguous pronominal anaphora. These instances are both challenging and realistic. We show that various coreference systems, whether rule-based, feature-rich, or neural, perform significantly worse on the task than humans, who display high inter-annotator agreement. To explain this performance gap, we show empirically that state-of-the art models often fail to capture context, instead relying on the gender or number of candidate antecedents to make a decision. We then use problem-specific insights to propose a data-augmentation trick called antecedent switching to alleviate this tendency in models. Finally, we show that antecedent switching yields promising results on other tasks as well: we use it to achieve state-of-the-art results on the GAP coreference task.", "keyphrases": ["gender", "coreference resolution", "pronoun", "language bias", "commonsense reasoning"]} +{"id": "kawahara-kurohashi-2006-fully", "title": "A Fully-Lexicalized Probabilistic Model for Japanese Syntactic and Case Structure Analysis", "abstract": "\u672c\u7a3f\u3067\u306f, \u683c\u30d5\u30ec\u30fc\u30e0\u306b\u57fa\u3065\u304d\u69cb\u6587\u30fb\u683c\u89e3\u6790\u3092\u7d71\u5408\u7684\u306b\u884c\u3046\u78ba\u7387\u30e2\u30c7\u30eb\u3092\u63d0\u6848\u3059\u308b.\u683c\u30d5\u30ec\u30fc\u30e0\u306f, \u30a6\u30a7\u30d6\u30c6\u30ad\u30b9\u30c8\u7d045\u5104\u6587\u304b\u3089\u81ea\u52d5\u7684\u306b\u69cb\u7bc9\u3057\u305f\u5927\u898f\u6a21\u306a\u3082\u306e\u3092\u7528\u3044\u308b.\u78ba\u7387\u30e2\u30c7\u30eb\u306f, \u8ff0\u8a9e\u9805\u69cb\u9020\u3092\u57fa\u672c\u5358\u4f4d\u3068\u3057, \u305d\u308c\u3092\u751f\u6210\u3059\u308b\u78ba\u7387\u3067\u3042\u308a, \u683c\u30d5\u30ec\u30fc\u30e0\u306b\u3088\u308b\u8a9e\u5f59\u7684\u306a\u9078\u597d\u3092\u5229\u7528\u3059\u308b\u3082\u306e\u3067\u3042\u308b.\u30a6\u30a7\u30d6\u306e\u30c6\u30ad\u30b9\u30c8\u3092\u7528\u3044\u3066\u5b9f\u9a13\u3092\u884c\u3044, \u7279\u306b\u8ff0\u8a9e\u9805\u69cb\u9020\u306b\u95a2\u9023\u3059\u308b\u4fc2\u308a\u53d7\u3051\u306e\u7cbe\u5ea6\u304c\u5411\u4e0a\u3059\u308b\u3053\u3068\u3092\u78ba\u8a8d\u3057\u305f.\u307e\u305f, \u8a9e\u5f59\u7684\u9078\u597d\u304c\u3069\u306e\u7a0b\u5ea6\u7528\u3044\u3089\u308c\u3066\u3044\u308b\u304b\u3092\u8abf\u67fb\u3057\u305f\u3068\u3053\u308d, 60.7%\u3068\u3044\u3046\u9ad8\u3044\u5272\u5408\u3067\u4f7f\u308f\u308c\u3066\u3044\u308b\u3053\u3068\u304c\u308f\u304b\u308a, \u30ab\u30d0\u30ec\u30fc\u30b8\u306e\u9ad8\u3055\u3092\u78ba\u8a8d\u3059\u308b\u3053\u3068\u304c\u3067\u304d\u305f.", "keyphrases": ["probabilistic model", "japanese", "case structure analysis", "predicate-argument structure"]} +{"id": "tackstrom-etal-2012-cross", "title": "Cross-lingual Word Clusters for Direct Transfer of Linguistic Structure", "abstract": "It has been established that incorporating word cluster features derived from large unlabeled corpora can significantly improve prediction of linguistic structure. While previous work has focused primarily on English, we extend these results to other languages along two dimensions. First, we show that these results hold true for a number of languages across families. Second, and more interestingly, we provide an algorithm for inducing cross-lingual clusters and we show that features derived from these clusters significantly improve the accuracy of cross-lingual structure prediction. Specifically, we show that by augmenting direct-transfer systems with cross-lingual cluster features, the relative error of delexicalized dependency parsers, trained on English treebanks and transferred to foreign languages, can be reduced by up to 13%. When applying the same method to direct transfer of named-entity recognizers, we observe relative improvements of up to 26%.", "keyphrases": ["cluster", "linguistic structure", "transfer method"]} +{"id": "kirchhoff-yang-2005-improved", "title": "Improved Language Modeling for Statistical Machine Translation", "abstract": "Statistical machine translation systems use a combination of one or more translation models and a language model. While there is a significant body of research addressing the improvement of translation models, the problem of optimizing language models for a specific translation task has not received much attention. Typically, standard word trigram models are used as an out-of-the-box component in a statistical machine translation system. In this paper we apply language modeling techniques that have proved beneficial in automatic speech recognition to the ACL05 machine translation shared data task and demonstrate improvements over a baseline system with a standard language model.", "keyphrases": ["language model", "pos information", "n-best list"]} +{"id": "linmei-etal-2019-heterogeneous", "title": "Heterogeneous Graph Attention Networks for Semi-supervised Short Text Classification", "abstract": "Short text classification has found rich and critical applications in news and tweet tagging to help users find relevant information. Due to lack of labeled training data in many practical use cases, there is a pressing need for studying semi-supervised short text classification. Most existing studies focus on long texts and achieve unsatisfactory performance on short texts due to the sparsity and limited labeled data. In this paper, we propose a novel heterogeneous graph neural network based method for semi-supervised short text classification, leveraging full advantage of few labeled data and large unlabeled data through information propagation along the graph. In particular, we first present a flexible HIN (heterogeneous information network) framework for modeling the short texts, which can integrate any type of additional information as well as capture their relations to address the semantic sparsity. Then, we propose Heterogeneous Graph ATtention networks (HGAT) to embed the HIN for short text classification based on a dual-level attention mechanism, including node-level and type-level attentions. The attention mechanism can learn the importance of different neighboring nodes as well as the importance of different node (information) types to a current node. Extensive experimental results have demonstrated that our proposed model outperforms state-of-the-art methods across six benchmark datasets significantly.", "keyphrases": ["text classification", "state-of-the-art method", "heterogeneous graph", "neural graph"]} +{"id": "niculae-danescu-niculescu-mizil-2014-brighter", "title": "Brighter than Gold: Figurative Language in User Generated Comparisons", "abstract": "Comparisons are common linguistic devices used to indicate the likeness of two things. Often, this likeness is not meant in the literal sense\u2014for example, \u201cI slept like a log\u201d does not imply that logs actually sleep. In this paper we propose a computational study of figurative comparisons, or similes. Our starting point is a new large dataset of comparisons extracted from product reviews and annotated for figurativeness. We use this dataset to characterize figurative language in naturally occurring comparisons and reveal linguistic patterns indicative of this phenomenon. We operationalize these insights and apply them to a new task with high relevance to text understanding: distinguishing between figurative and literal comparisons. Finally, we apply this framework to explore the social context in which figurative language is produced, showing that similes are more likely to accompany opinions showing extreme sentiment, and that they are uncommon in reviews deemed helpful.", "keyphrases": ["figurative language", "simile", "tenor"]} +{"id": "wang-etal-2020-docstruct", "title": "DocStruct: A Multimodal Method to Extract Hierarchy Structure in Document for General Form Understanding", "abstract": "Form understanding depends on both textual contents and organizational structure. Although modern OCR performs well, it is still challenging to realize general form understanding because forms are commonly used and of various formats. The table detection and handcrafted features in previous works cannot apply to all forms because of their requirements on formats. Therefore, we concentrate on the most elementary components, the key-value pairs, and adopt multimodal methods to extract features. We consider the form structure as a tree-like or graph-like hierarchy of text fragments. The parent-child relation corresponds to the key-value pairs in forms. We utilize the state-of-the-art models and design targeted extraction modules to extract multimodal features from semantic contents, layout information, and visual images. A hybrid fusion method of concatenation and feature shifting is designed to fuse the heterogeneous features and provide an informative joint representation. We adopt an asymmetric algorithm and negative sampling in our model as well. We validate our method on two benchmarks, MedForm and FUNSD, and extensive experiments demonstrate the effectiveness of our method.", "keyphrases": ["multimodal method", "general form understanding", "graph-like hierarchy", "docstruct"]} +{"id": "garneau-etal-2020-robust", "title": "A Robust Self-Learning Method for Fully Unsupervised Cross-Lingual Mappings of Word Embeddings: Making the Method Robustly Reproducible as Well", "abstract": "In this paper, we reproduce the experiments of Artetxe et al. (2018b) regarding the robust self-learning method for fully unsupervised cross-lingual mappings of word embeddings. We show that the reproduction of their method is indeed feasible with some minor assumptions. We further investigate the robustness of their model by introducing four new languages that are less similar to English than the ones proposed by the original paper. In order to assess the stability of their model, we also conduct a grid search over sensible hyperparameters. We then propose key recommendations that apply to any research project in order to deliver fully reproducible research.", "keyphrases": ["robust self-learning method", "unsupervised cross-lingual mapping", "new language"]} +{"id": "lin-etal-2018-multi", "title": "Multi-Hop Knowledge Graph Reasoning with Reward Shaping", "abstract": "Multi-hop reasoning is an effective approach for query answering (QA) over incomplete knowledge graphs (KGs). The problem can be formulated in a reinforcement learning (RL) setup, where a policy-based agent sequentially extends its inference path until it reaches a target. However, in an incomplete KG environment, the agent receives low-quality rewards corrupted by false negatives in the training data, which harms generalization at test time. Furthermore, since no golden action sequence is used for training, the agent can be misled by spurious search trajectories that incidentally lead to the correct answer. We propose two modeling advances to address both issues: (1) we reduce the impact of false negative supervision by adopting a pretrained one-hop embedding model to estimate the reward of unobserved facts; (2) we counter the sensitivity to spurious paths of on-policy RL by forcing the agent to explore a diverse set of paths using randomly generated edge masks. Our approach significantly improves over existing path-based KGQA models on several benchmark datasets and is comparable or better than embedding-based models.", "keyphrases": ["knowledge graph", "multi-hop reasoning", "multihopkg", "markov decision process", "path-based method"]} +{"id": "stymne-2011-definite", "title": "Definite Noun Phrases in Statistical Machine Translation into Scandinavian Languages", "abstract": "In this thesis I aim to improve phrase-based statistical machine translation (PBSMT) in a number of ways by the use of text harmonization strategies. PBSMT systems are built by training statistical models on large corpora of human translations. This architecture generally performs well for languages with similar structure. If the languages are different for example with respect to word order or morphological complexity, however, the standard methods do not tend to work well. I address this problem through text harmonization, by making texts more similar before training and applying a PBSMT system. I investigate how text harmonization can be used to improve PBSMT with a focus on four areas: compounding, definiteness, word order, and unknown words. For the first three areas, the focus is on linguistic differences between languages, which I address by applying transformation rules, using either rule-based or machine learning-based techniques, to the source or target data. For the last area, unknown words, I harmonize the translation input to the training data by replacing unknown words with known alternatives. I show that translation into languages with closed compounds can be improved by splitting and merging compounds. I develop new merging algorithms that outperform previously suggested algorithms and show how part-of-speech tags can be used to improve the order of compound parts. Scandinavian definite noun phrases are identified as a problem forPBSMT in translation into Scandinavian languages and I propose a preprocessing approach that addresses this problem and gives large improvements over a baseline. Several previous proposals for how to handle differences in reordering exist; I propose two types of extensions, iterating reordering and word alignment and using automatically induced word classes, which allow these methods to be used for less-resourced languages. Finally I identify several ways of replacing unknown words in the translation input, most notably a spell checking-inspired algorithm, which can be trained using character-based PBSMT techniques. Overall I present several approaches for extending PBSMT by the use of pre- and postprocessing techniques for text harmonization, and show experimentally that these methods work. Text harmonization methods are an efficient way to improve statistical machine translation within the phrase-based approach, without resorting to more complex models.", "keyphrases": ["statistical machine translation", "scandinavian language", "danish"]} +{"id": "roy-goldwasser-2020-weakly", "title": "Weakly Supervised Learning of Nuanced Frames for Analyzing Polarization in News Media", "abstract": "In this paper, we suggest a minimally supervised approach for identifying nuanced frames in news article coverage of politically divisive topics. We suggest to break the broad policy frames suggested by Boydstun et al., 2014 into fine-grained subframes which can capture differences in political ideology in a better way. We evaluate the suggested subframes and their embedding, learned using minimal supervision, over three topics, namely, immigration, gun-control, and abortion. We demonstrate the ability of the subframes to capture ideological differences and analyze political discourse in news media.", "keyphrases": ["polarization", "ideology", "news medium"]} +{"id": "tsakalidis-liakata-2020-sequential", "title": "Sequential Modelling of the Evolution of Word Representations for Semantic Change Detection", "abstract": "Semantic change detection concerns the task of identifying words whose meaning has changed over time. Current state-of-the-art approaches operating on neural embeddings detect the level of semantic change in a word by comparing its vector representation in two distinct time periods, without considering its evolution through time. In this work, we propose three variants of sequential models for detecting semantically shifted words, effectively accounting for the changes in the word representations over time. Through extensive experimentation under various settings with synthetic and real data we showcase the importance of sequential modelling of word vectors through time for semantic change detection. Finally, we compare different approaches in a quantitative manner, demonstrating that temporal modelling of word representations yields a clear-cut advantage in performance.", "keyphrases": ["evolution", "semantic change detection", "word vector", "sequential modelling"]} +{"id": "ravi-kozareva-2018-self-governing", "title": "Self-Governing Neural Networks for On-Device Short Text Classification", "abstract": "Deep neural networks reach state-of-the-art performance for wide range of natural language processing, computer vision and speech applications. Yet, one of the biggest challenges is running these complex networks on devices such as mobile phones or smart watches with tiny memory footprint and low computational capacity. We propose on-device Self-Governing Neural Networks (SGNNs), which learn compact projection vectors with local sensitive hashing. The key advantage of SGNNs over existing work is that they surmount the need for pre-trained word embeddings and complex networks with huge parameters. We conduct extensive evaluation on dialog act classification and show significant improvement over state-of-the-art results. Our findings show that SGNNs are effective at capturing low-dimensional semantic text representations, while maintaining high accuracy.", "keyphrases": ["short text classification", "device", "low computational capacity"]} +{"id": "tebbifakhr-etal-2019-machine", "title": "Machine Translation for Machines: the Sentiment Classification Use Case", "abstract": "We propose a neural machine translation (NMT) approach that, instead of pursuing adequacy and fluency (\u201chuman-oriented\u201d quality criteria), aims to generate translations that are best suited as input to a natural language processing component designed for a specific downstream task (a \u201cmachine-oriented\u201d criterion). Towards this objective, we present a reinforcement learning technique based on a new candidate sampling strategy, which exploits the results obtained on the downstream task as weak feedback. Experiments in sentiment classification of Twitter data in German and Italian show that feeding an English classifier with \u201cmachine-oriented\u201d translations significantly improves its performance. Classification results outperform those obtained with translations produced by general-purpose NMT models as well as by an approach based on reinforcement learning. Moreover, our results on both languages approximate the classification accuracy computed on gold standard English tweets.", "keyphrases": ["adequacy", "machine translation", "reward"]} +{"id": "zhu-etal-2008-learning", "title": "Learning a Stopping Criterion for Active Learning for Word Sense Disambiguation and Text Classification", "abstract": "In this paper, we address the problem of knowing when to stop the process of active learning. We propose a new statistical learning approach, called minimum expected error strategy, to defining a stopping criterion through estimation of the classifier\u2019s expected error on future unlabeled examples in the active learning process. In experiments on active learning for word sense disambiguation and text classification tasks, experimental results show that the new proposed stopping criterion can reduce approximately 50% human labeling costs in word sense disambiguation with degradation of 0.5% average accuracy, and approximately 90% costs in text classification with degradation of 2% average accuracy.", "keyphrases": ["stopping criterion", "active learning", "word sense disambiguation", "future unlabeled example"]} +{"id": "yu-etal-2013-compound", "title": "Compound Embedding Features for Semi-supervised Learning", "abstract": "To solve data sparsity problem, recently there has been a trend in discriminative methods of NLP to use representations of lexical items learned from unlabeled data as features. In this paper, we investigated the usage of word representations learned by neural language models, i.e. word embeddings. The direct usage has disadvantages such as large amount of computation, inadequacy with dealing word ambiguity and rare-words, and the problem of linear non-separability. To overcome these problems, we instead built compound features from continuous word embeddings based on clustering. Experiments showed that the compound features not only improved the performances on several NLP tasks, but also ran faster, suggesting the potential of embeddings.", "keyphrases": ["usage", "word embedding", "compound"]} +{"id": "allman-etal-2012-linguists", "title": "Linguist's Assistant: A Multi-Lingual Natural Language Generator based on Linguistic Universals, Typologies, and Primitives", "abstract": "Linguist's Assistant (LA) is a large scale semantic analyzer and multi-lingual natural language generator designed and developed entirely from a linguist's perspective. The system incorporates extensive typological, semantic, syntactic, and discourse research into its semantic representational system and its transfer and synthesizing grammars. LA has been tested with English, Korean, Kewa (Papua New Guinea), Jula (Cote d'Ivoure), and North Tanna (Vanuatu), and proof-of-concept lexicons and grammars have been developed for Spanish, Urdu, Tagalog, Chinantec (Mexico), and Angas (Nigeria). This paper will summarize the major components of the NLG system, and then present the results of experiments that were performed to determine the quality of the generated texts. The experiments indicate that when experienced mother-tongue translators use the drafts generated by LA, their productivity is typically quadrupled without any loss of quality.", "keyphrases": ["assistant", "productivity", "linguist"]} +{"id": "isard-konrad-2022-dgs", "title": "MY DGS \u2013 ANNIS: ANNIS and the Public DGS Corpus", "abstract": "In 2018 the DGS-Korpus project published the first full release of the Public DGS Corpus. The data have already been published in two different ways to fulfil the needs of different user groups, and we have now published the third portal MY DGS \u2013 ANNIS using the ANNIS browser-based corpus software. ANNIS is a corpus query tool for visualization and querying of multi-layer corpus data. It has its own query language, AQL, and is accessed from a web browser without requiring a login. It allows more complex queries and visualizations than those provided by the existing research portal. We introduce ANNIS and its query language AQL, describe the structure of MY DGS \u2013 ANNIS, and give some example queries. The use cases with queries over multiple annotation tiers and metadata illustrate the research potential of this powerful tool and show how students and researchers can explore the Public DGS Corpus.", "keyphrases": ["annis", "public dgs corpus", "project"]} +{"id": "ayan-etal-2008-improving", "title": "Improving Alignments for Better Confusion Networks for Combining Machine Translation Systems", "abstract": "The state-of-the-art system combination method for machine translation (MT) is the word-based combination using confusion networks. One of the crucial steps in confusion network decoding is the alignment of different hypotheses to each other when building a network. In this paper, we present new methods to improve alignment of hypotheses using word synonyms and a two-pass alignment strategy. We demonstrate that combination with the new alignment technique yields up to 2.9 BLEU point improvement over the best input system and up to 1.3 BLEU point improvement over a state-of-the-art combination method on two different language pairs.", "keyphrases": ["confusion network", "hypothesis", "word-level combination"]} +{"id": "chen-etal-2009-cognitive", "title": "A Cognitive-based Annotation System for Emotion Computing", "abstract": "Emotion computing is very important for expressive information extraction. In this paper, we provide a robust and versatile emotion annotation scheme based on cognitive emotion theories, which not only can annotate both explicit and implicit emotion expressions, but also can encode different levels of emotion information for the given emotion content. In addition, motivated by a cognitive framework, an automatic emotion annotation system is developed, and large and comparatively high-quality emotion corpora are created for emotion computing, one in Chinese and the other in English. Such an annotation system can be easily adapted for different kinds of emotion applications and be extended to other languages.", "keyphrases": ["annotation system", "emotion computing", "neutral-sentence"]} +{"id": "moon-etal-2019-unified", "title": "A Unified Neural Coherence Model", "abstract": "Recently, neural approaches to coherence modeling have achieved state-of-the-art results in several evaluation tasks. However, we show that most of these models often fail on harder tasks with more realistic application scenarios. In particular, the existing models underperform on tasks that require the model to be sensitive to local contexts such as candidate ranking in conversational dialogue and in machine translation. In this paper, we propose a unified coherence model that incorporates sentence grammar, inter-sentence coherence relations, and global coherence patterns into a common neural framework. With extensive experiments on local and global discrimination tasks, we demonstrate that our proposed model outperforms existing models by a good margin, and establish a new state-of-the-art.", "keyphrases": ["coherence model", "sentence grammar", "common neural framework", "discrimination task"]} +{"id": "brooke-etal-2012-unsupervised", "title": "Unsupervised Stylistic Segmentation of Poetry with Change Curves and Extrinsic Features", "abstract": "The identification of stylistic inconsistency is a challenging task relevant to a number of genres, including literature. In this work, we carry out stylistic segmentation of a well-known poem, The Waste Land by T.S. Eliot, which is traditionally analyzed in terms of numerous voices which appear throughout the text. Our method, adapted from work in topic segmentation and plagiarism detection, predicts breaks based on a curve of stylistic change which combines information from a diverse set of features, most notably co-occurrence in larger corpora via reduced-dimensionality vectors. We show that this extrinsic information is more useful than (within-text) distributional features. We achieve well above baseline performance on both artificial mixed-style texts and The Waste Land itself.", "keyphrases": ["segmentation", "poetry", "voice"]} +{"id": "goyal-etal-2009-streaming", "title": "Streaming for large scale NLP: Language Modeling", "abstract": "In this paper, we explore a streaming algorithm paradigm to handle large amounts of data for NLP problems. We present an efficient low-memory method for constructing high-order approximate n-gram frequency counts. The method is based on a deterministic streaming algorithm which efficiently computes approximate frequency counts over a stream of data while employing a small memory footprint. We show that this method easily scales to billion-word monolingual corpora using a conventional (8 GB RAM) desktop machine. Statistical machine translation experimental results corroborate that the resulting high-n approximate small language model is as effective as models obtained from other count pruning methods.", "keyphrases": ["language modeling", "n-gram frequency count", "streaming"]} +{"id": "chen-etal-2020-modeling", "title": "Modeling Discourse Structure for Document-level Neural Machine Translation", "abstract": "Recently, document-level neural machine translation (NMT) has become a hot topic in the community of machine translation. Despite its success, most of existing studies ignored the discourse structure information of the input document to be translated, which has shown effective in other tasks. In this paper, we propose to improve document-level NMT with the aid of discourse structure information. Our encoder is based on a hierarchical attention network (HAN) (Miculicich et al., 2018). Specifically, we first parse the input document to obtain its discourse structure. Then, we introduce a Transformer-based path encoder to embed the discourse structure information of each word. Finally, we combine the discourse structure information with the word embedding before it is fed into the encoder. Experimental results on the English-to-German dataset show that our model can significantly outperform both Transformer and Transformer+HAN.", "keyphrases": ["discourse structure", "neural machine translation", "input document"]} +{"id": "kreutzer-etal-2018-reliability", "title": "Reliability and Learnability of Human Bandit Feedback for Sequence-to-Sequence Reinforcement Learning", "abstract": "We present a study on reinforcement learning (RL) from human bandit feedback for sequence-to-sequence learning, exemplified by the task of bandit neural machine translation (NMT). We investigate the reliability of human bandit feedback, and analyze the influence of reliability on the learnability of a reward estimator, and the effect of the quality of reward estimates on the overall RL task. Our analysis of cardinal (5-point ratings) and ordinal (pairwise preferences) feedback shows that their intra- and inter-annotator \u03b1-agreement is comparable. Best reliability is obtained for standardized cardinal feedback, and cardinal feedback is also easiest to learn and generalize from. Finally, improvements of over 1 BLEU can be obtained by integrating a regression-based reward estimator trained on cardinal feedback for 800 translations into RL for NMT. This shows that RL is possible even from small amounts of fairly reliable human feedback, pointing to a great potential for applications at larger scale.", "keyphrases": ["learnability", "human bandit feedback", "reward estimator"]} +{"id": "torisawa-2006-acquiring", "title": "Acquiring Inference Rules with Temporal Constraints by Using Japanese Coordinated Sentences and Noun-Verb Co-occurrences", "abstract": "This paper shows that inference rules with temporal constraints can be acquired by using verb-verb co-occurrences in Japanese coordinated sentences and verb-noun co-occurrences. For example, our unsupervised acquisition method could obtain the inference rule \"If someone enforces a law, usually someone enacts the law at the same time as or before the enforcing of the law\" since the verbs \"enact\" and \"enforce\" frequently co-occurred in coordinated sentences and the verbs also frequently co-occurred with the noun \"law\". We also show that the accuracy of the acquisition is improved by using the occurrence frequency of a single verb, which we assume indicates how generic the meaning of the verb is.", "keyphrases": ["inference rule", "temporal constraint", "coordinated sentence", "verb-noun co-occurrence"]} +{"id": "ramisch-etal-2010-multiword", "title": "Multiword Expressions in the wild? The mwetoolkit comes in handy", "abstract": "The mwetoolkit is a tool for automatic extraction of Multiword Expressions (MWEs) from monolingual corpora. It both generates and validates MWE candidates. The generation is based on surface forms, while for the validation, a series of criteria for removing noise are provided, such as some (language independent) association measures. In this paper, we present the use of the mwetoolkit in a standard configuration, for extracting MWEs from a corpus of general-purpose English. The functionalities of the toolkit are discussed in terms of a set of selected examples, comparing it with related work on MWE extraction.", "keyphrases": ["mwetoolkit", "association measure", "multiword expressions"]} +{"id": "sogaard-kuhn-2009-empirical", "title": "Empirical Lower Bounds on Aligment Error Rates in Syntax-Based Machine Translation", "abstract": "The empirical adequacy of synchronous context-free grammars of rank two (2-SCFGs) (Satta and Peserico, 2005), used in syntax-based machine translation systems such as Wu (1997), Zhang et al. (2006) and Chiang (2007), in terms of what alignments they induce, has been discussed in Wu (1997) and Wellington et al. (2006), but with a one-sided focus on so-called \"inside-out alignments\". Other alignment configurations that cannot be induced by 2-SCFGs are identified in this paper, and their frequencies across a wide collection of hand-aligned parallel corpora are examined. Empirical lower bounds on two measures of alignment error rate, i.e. the one introduced in Och and Ney (2000) and one where only complete translation units are considered, are derived for 2-SCFGs and related formalisms.", "keyphrases": ["syntax-based machine translation", "adequacy", "low bound"]} +{"id": "deri-knight-2016-grapheme", "title": "Grapheme-to-Phoneme Models for (Almost) Any Language", "abstract": "Grapheme-to-phoneme (g2p) models are rarely available in low-resource languages, as the creation of training and evaluation data is expensive and time-consuming. We use Wiktionary to obtain more than 650k word-pronunciation pairs in more than 500 languages. We then develop phoneme and language distance metrics based on phonological and linguistic knowledge; applying those, we adapt g2p models for high-resource languages to create models for related low-resource languages. We provide results for models for 229 adapted languages.", "keyphrases": ["low-resource language", "phoneme", "g2p model"]} +{"id": "tiedemann-ljubesic-2012-efficient", "title": "Efficient Discrimination Between Closely Related Languages", "abstract": "In this paper, we revisit the problem of language identification with the focus on proper discrimination between closely related languages. Strong similarities between certain languages make it very hard to classify them correctly using standard methods that have been proposed in the literature. Dedicated models that focus on specific discrimination tasks help to improve the accuracy of general-purpose language identification tools. We propose and compare methods based on simple document classification techniques trained on parallel corpora of closely related languages and methods that emphasize discriminating features in terms of blacklisted words. Our experiments demonstrate that these techniques are highly accurate for the difficult task of discriminating between Bosnian, Croatian and Serbian. The best setup yields an absolute improvement of over 9% in accuracy over the best performing baseline using a state-of-the-art language identification tool.", "keyphrases": ["discrimination", "related language", "croatian", "main bottleneck", "political motive"]} +{"id": "fornaciari-poesio-2014-identifying", "title": "Identifying fake Amazon reviews as learning from crowds", "abstract": "Customers who buy products such as books online often rely on other customers reviews more than on reviews found on specialist magazines. Unfortunately the confidence in such reviews is often misplaced due to the explosion of so-called sock puppetry-Authors writing glowing reviews of their own books. Identifying such deceptive reviews is not easy. The first contribution of our work is the creation of a collection including a number of genuinely deceptive Amazon book reviews in collaboration with crime writer Jeremy Duns, who has devoted a great deal of effort in unmasking sock puppeting among his colleagues. But there can be no certainty concerning the other reviews in the collection: All we have is a number of cues, also developed in collaboration with Duns, suggesting that a review may be genuine or deceptive. Thus this corpus is an example of a collection where it is not possible to acquire the actual label for all instances, and where clues of deception were treated as annotators who assign them heuristic labels. A number of approaches have been proposed for such cases; we adopt here the 'learning from crowds' approach proposed by Raykar et al. (2010). Thanks to Duns' certainly fake reviews, the second contribution of this work consists in the evaluation of the effectiveness of different methods of annotation, according to the performance of models trained to detect deceptive reviews. \u00a9 2014 Association for Computational Linguistics.", "keyphrases": ["crowd", "amazon book review", "fake review"]} +{"id": "palakurthi-etal-2015-classification", "title": "Classification of Attributes in a Natural Language Query into Different SQL Clauses", "abstract": "Attribute information in a natural language query is one of the key features for converting a natural language query into a Structured Query Language 1 (SQL) in Natural Language Interface to Database systems. In this paper, we explore the task of classifying the attributes present in a natural language query into different SQL clauses in a SQL query. In particular, we investigate the effectiveness of various features and Conditional Random Fields for this task. Our system uses a statistical classifier trained on manually prepared data. We report our results on three different domains and also show how our system can be used for generating a complete SQL query.", "keyphrases": ["attribute", "natural language query", "sql clause"]} +{"id": "tang-etal-2021-multilingual", "title": "Multilingual Translation from Denoising Pre-Training", "abstract": "Recent work demonstrates the potential of training one model for multilingual machine translation. In parallel, denoising pretraining using unlabeled monolingual data as a starting point for \ufb01netuning bitext machine translation systems has demonstrated strong performance gains. However, little has been explored on the potential to combine denoising pretraining with multilingual machine translation in a single model. In this work, we \ufb01ll this gap by studying how multilingual translation models can be created through multilingual \ufb01netuning . Fintuning multilingual model from a denoising pretrained model incorporates the bene\ufb01ts of large quantities of unlabeled monolingual data, which is particularly important for low resource languages where bitext is rare. Further, we create the ML50 benchmark to facilitate re-producible research by standardizing training and evaluation data. On ML50, we show that multilingual \ufb01netuning signi\ufb01cantly improves over multilingual models trained from scratch and bilingual \ufb01netuning for translation into English. We also \ufb01nd that multilingual \ufb01ne-tuning can signi\ufb01cantly improve over multilingual models trained from scratch for zero-shot translation on non-English directions. Finally, we discuss that the pretraining and \ufb01netuning paradigm alone is not enough to address the challenges of multilingual models for to-Many directions performance.", "keyphrases": ["denoising", "pretraining", "multilingual model"]} +{"id": "bos-nissim-2006-empirical", "title": "An Empirical Approach to the Interpretation of Superlatives", "abstract": "In this paper we introduce an empirical approach to the semantic interpretation of superlative adjectives. We present a corpus annotated for superlatives and propose an interpretation algorithm that uses a wide-coverage parser and produces semantic representations. We achieve F-scores between 0.84 and 0.91 for detecting attributive superlatives and an accuracy in the range of 0.69--0.84 for determining the correct comparison set. As far as we are aware, this is the first automated approach to superlatives for open-domain texts and questions.", "keyphrases": ["empirical approach", "interpretation", "superlative", "attributive superlative"]} +{"id": "joshi-penstein-rose-2009-generalizing", "title": "Generalizing Dependency Features for Opinion Mining", "abstract": "We explore how features based on syntactic dependency relations can be utilized to improve performance on opinion mining. Using a transformation of dependency relation triples, we convert them into \"composite back-off features\" that generalize better than the regular lexicalized dependency relation features. Experiments comparing our approach with several other approaches that generalize dependency features or ngrams demonstrate the utility of composite back-off features.", "keyphrases": ["opinion mining", "syntactic feature", "dependency relation pair", "pos tag"]} +{"id": "baranes-sagot-2014-language", "title": "A Language-independent Approach to Extracting Derivational Relations from an Inflectional Lexicon", "abstract": "In this paper, we describe and evaluate an unsupervised method for acquiring pairs of lexical entries belonging to the same morphological family, i.e., derivationally related words, starting from a purely inflectional lexicon. Our approach relies on transformation rules that relate lexical entries with the one another, and which are automatically extracted from the inflected lexicon based on surface form analogies and on part-of-speech information. It is generic enough to be applied to any language with a mainly concatenative derivational morphology. Results were obtained and evaluated on English, French, German and Spanish. Precision results are satisfying, and our French results favorably compare with another resource, although its construction relied on manually developed lexicographic information whereas our approach only requires an inflectional lexicon.", "keyphrases": ["language-independent approach", "inflectional lexicon", "spanish"]} +{"id": "sornil-chaiwanarom-2004-combining", "title": "Combining Prediction by Partial Matching and Logistic Regression for Thai Word Segmentation", "abstract": "Word segmentation is an important part of many applications, including information retrieval, information filtering, document analysis, and text summarization. In Thai language, the process is complicated since words are written continuously, and their structures are not well-defined. A recognized effective approach to word segmentation is Longest Matching, a method based on dictionary. Nevertheless, this method suffers from character-level and syllable-level ambiguities in determining word boundaries. This paper proposes a technique to Thai word segmentation using a two-step approach. First, text is segmented, using an application of Prediction by Partial Matching, into syllables whose structures are more well-defined. This reduces the earlier type of ambiguity. Then, the syllables are combined into words by an application of a syllable-level longest matching method together with a logistic regression model which takes into account contextual information. The experimental results show the syllable segmentation accuracy of more than 96.65% and the overall word segmentation accuracy of 97%.", "keyphrases": ["partial matching", "thai word segmentation", "ambiguity"]} +{"id": "lin-etal-2020-triggerner", "title": "TriggerNER: Learning with Entity Triggers as Explanations for Named Entity Recognition", "abstract": "Training neural models for named entity recognition (NER) in a new domain often requires additional human annotations (e.g., tens of thousands of labeled instances) that are usually expensive and time-consuming to collect. Thus, a crucial research question is how to obtain supervision in a cost-effective way. In this paper, we introduce \u201centity triggers,\u201d an effective proxy of human explanations for facilitating label-efficient learning of NER models. An entity trigger is defined as a group of words in a sentence that helps to explain why humans would recognize an entity in the sentence. We crowd-sourced 14k entity triggers for two well-studied NER datasets. Our proposed model, Trigger Matching Network, jointly learns trigger representations and soft matching module with self-attention such that can generalize to unseen sentences easily for tagging. Our framework is significantly more cost-effective than the traditional neural NER frameworks. Experiments show that using only 20% of the trigger-annotated sentences results in a comparable performance as using 70% of conventional annotated sentences.", "keyphrases": ["explanation", "named entity recognition", "new domain"]} +{"id": "zhou-etal-2016-cross", "title": "Cross-Lingual Sentiment Classification with Bilingual Document Representation Learning", "abstract": "Cross-lingual sentiment classi\ufb01cation aims to adapt the sentiment resource in a resource-rich language to a resource-poor language. In this study, we propose a representation learning approach which simultaneously learns vector representations for the texts in both the source and the target languages. Different from previous research which only gets bilingual word embedding, our Bilingual Document Representation Learning model BiDRL directly learns document representations. Both semantic and sentiment correlations are utilized to map the bilingual texts into the same embedding space. The experiments are based on the multilingual multi-domain Amazon review dataset. We use English as the source language and use Japanese, German and French as the target languages. The experimental results show that BiDRL outperforms the state-of-the-art methods for all the target languages.", "keyphrases": ["sentiment classification", "document representation", "source language"]} +{"id": "iandola-etal-2020-squeezebert", "title": "SqueezeBERT: What can computer vision teach NLP about efficient neural networks?", "abstract": "Humans read and write hundreds of billions of messages every day. Further, due to the availability of large datasets, large computing systems, and better neural network models, natural language processing (NLP) technology has made significant strides in understanding, proofreading, and organizing these messages. Thus, there is a significant opportunity to deploy NLP in myriad applications to help web users, social networks, and businesses. Toward this end, we consider smartphones and other mobile devices as crucial platforms for deploying NLP models at scale. However, today's highly-accurate NLP neural network models such as BERT and RoBERTa are extremely computationally expensive, with BERT-base taking 1.7 seconds to classify a text snippet on a Pixel 3 smartphone. To begin to address this problem, we draw inspiration from the computer vision community, where work such as MobileNet has demonstrated that grouped convolutions (e.g. depthwise convolutions) can enable speedups without sacrificing accuracy. We demonstrate how to replace several operations in self-attention layers with grouped convolutions, and we use this technique in a novel network architecture called SqueezeBERT, which runs 4.3x faster than BERT-base on the Pixel 3 while achieving competitive accuracy on the GLUE test set. A PyTorch-based implementation of SqueezeBERT is available as part of the Hugging Face Transformers library: ", "keyphrases": ["convolution", "squeezebert", "feed-forward network"]} +{"id": "lynn-etal-2017-human", "title": "Human Centered NLP with User-Factor Adaptation", "abstract": "We pose the general task of user-factor adaptation \u2013 adapting supervised learning models to real-valued user factors inferred from a background of their language, reflecting the idea that a piece of text should be understood within the context of the user that wrote it. We introduce a continuous adaptation technique, suited for real-valued user factors that are common in social science and bringing us closer to personalized NLP, adapting to each user uniquely. We apply this technique with known user factors including age, gender, and personality traits, as well as latent factors, evaluating over five tasks: POS tagging, PP-attachment, sentiment analysis, sarcasm detection, and stance detection. Adaptation provides statistically significant benefits for 3 of the 5 tasks: up to +1.2 points for PP-attachment, +3.4 points for sarcasm, and +3.0 points for stance.", "keyphrases": ["user-factor adaptation", "demographic", "domain adaptation problem"]} +{"id": "chen-ng-2013-linguistically", "title": "Linguistically Aware Coreference Evaluation Metrics", "abstract": "Virtually all the commonly-used evaluation metrics for entity coreference resolution are linguistically agnostic, treating the mentions to be clustered as generic rather than linguistic objects. We argue that the performance of an entity coreference resolver cannot be accurately reflected when it is evaluated using linguistically agnostic metrics. Consequently, we propose a framework for incorporating linguistic awareness into commonly-used coreference evaluation metrics.", "keyphrases": ["evaluation metric", "mention", "agnostic metric"]} +{"id": "pavlick-etal-2015-adding", "title": "Adding Semantics to Data-Driven Paraphrasing", "abstract": "We add an interpretable semantics to the paraphrase database (PPDB). To date, the relationship between phrase pairs in the database has been weakly defined as approximately equivalent. We show that these pairs represent a variety of relations, including directed entailment (little girl/girl) and exclusion (nobody/someone). We automatically assign semantic entailment relations to entries in PPDB using features derived from past work on discovering inference rules from text and semantic taxonomy induction. We demonstrate that our model assigns these relations with high accuracy. In a downstream RTE task, our labels rival relations from WordNet and improve the coverage of a proof-based RTE system by 17%.", "keyphrases": ["phrase pair", "entailment", "inference rule"]} +{"id": "zhang-clark-2007-chinese", "title": "Chinese Segmentation with a Word-Based Perceptron Algorithm", "abstract": "Standard approaches to Chinese word segmentation treat the problem as a tagging task, assigning labels to the characters in the sequence indicating whether the character marks a word boundary. Discriminatively trained models based on local character features are used to make the tagging decisions, with Viterbi decoding finding the highest scoring segmentation. In this paper we propose an alternative, word-based segmentor, which uses features based on complete words and word sequences. The generalized perceptron algorithm is used for discriminative training, and we use a beamsearch decoder. Closed tests on the first and second SIGHAN bakeoffs show that our system is competitive with the best in the literature, achieving the highest reported F-scores for a number of corpora.", "keyphrases": ["word segmentation", "perceptron method", "cws approach", "newswire"]} +{"id": "brunato-etal-2018-sentence", "title": "Is this Sentence Difficult? Do you Agree?", "abstract": "In this paper, we present a crowdsourcing-based approach to model the human perception of sentence complexity. We collect a large corpus of sentences rated with judgments of complexity for two typologically-different languages, Italian and English. We test our approach in two experimental scenarios aimed to investigate the contribution of a wide set of lexical, morpho-syntactic and syntactic phenomena in predicting i) the degree of agreement among annotators independently from the assigned judgment and ii) the perception of sentence complexity.", "keyphrases": ["perception", "complexity", "annotator"]} +{"id": "schlangen-etal-2009-incremental", "title": "Incremental Reference Resolution: The Task, Metrics for Evaluation, and a Bayesian Filtering Model that is Sensitive to Disfluencies", "abstract": "In this paper we do two things: a) we discuss in general terms the task of incremental reference resolution (IRR), in particular resolution of exophoric reference, and specify metrics for measuring the performance of dialogue system components tackling this task, and b) we present a simple Bayesian filtering model of IRR that performs reasonably well just using words directly (no structure information and no hand-coded semantics): it picks the right referent out of 12 for around 50% of real-world dialogue utterances in our test corpus. It is also able to learn to interpret not only words but also hesitations, just as humans have shown to do in similar situations, namely as markers of references to hard-to-describe entities.", "keyphrases": ["bayesian filtering model", "incremental reference resolution", "system response"]} +{"id": "yavuz-etal-2019-deepcopy", "title": "DeepCopy: Grounded Response Generation with Hierarchical Pointer Networks", "abstract": "Recent advances in neural sequence-to-sequence models have led to promising results for several language generation-based tasks, including dialogue response generation, summarization, and machine translation. However, these models are known to have several problems, especially in the context of chit-chat based dialogue systems: they tend to generate short and dull responses that are often too generic. Furthermore, these models do not ground conversational responses on knowledge and facts, resulting in turns that are not accurate, informative and engaging for the users. In this paper, we propose and experiment with a series of response generation models that aim to serve in the general scenario where in addition to the dialogue context, relevant unstructured external knowledge in the form of text is also assumed to be available for models to harness. Our proposed approach extends pointer-generator networks (See et al., 2017) by allowing the decoder to hierarchically attend and copy from external knowledge in addition to the dialogue context. We empirically show the effectiveness of the proposed model compared to several baselines including (Ghazvininejadet al., 2018; Zhang et al., 2018) through both automatic evaluation metrics and human evaluation on ConvAI2 dataset.", "keyphrases": ["response generation", "dialogue system", "pointer-generator network", "deepcopy", "copy mechanism"]} +{"id": "zhao-etal-2010-jointly", "title": "Jointly Modeling Aspects and Opinions with a MaxEnt-LDA Hybrid", "abstract": "Discovering and summarizing opinions from online reviews is an important and challenging task. A commonly-adopted framework generates structured review summaries with aspects and opinions. Recently topic models have been used to identify meaningful review aspects, but existing topic models do not identify aspect-specific opinion words. In this paper, we propose a MaxEnt-LDA hybrid model to jointly discover both aspects and aspect-specific opinion words. We show that with a relatively small amount of training data, our model can effectively identify aspect and opinion words simultaneously. We also demonstrate the domain adaptability of our model.", "keyphrases": ["maxent-lda", "topic model", "hybrid model", "sentiment word", "aspect-based opinion mining"]} +{"id": "calzolari-etal-2010-lrec", "title": "The LREC Map of Language Resources and Technologies", "abstract": "In this paper we present the LREC Map of Language Resources and Tools, an innovative feature introduced with this LREC. The purpose of the Map is to shed light on the vast amount of resources and tools that represent the background of the research presented at LREC, in the attempt to fill in a gap in the community knowledge about the resources and tools that are used or created worldwide. It also aims at a change of culture in the field, actively engaging each researcher in the documentation task about resources. The Map has been developed on the basis of the information provided by LREC authors during the submission of papers to the LREC 2010 conference and the LREC workshops, and contains information about almost 2000 resources. The paper illustrates the motivation behind this initiative, its main characteristics, its relevance and future impact in the field, the metadata used to describe the resources, and finally presents some of the most relevant findings.", "keyphrases": ["lrec map", "language resources", "clue-aligner tool"]} +{"id": "miehle-etal-2018-causes", "title": "What Causes the Differences in Communication Styles? A Multicultural Study on Directness and Elaborateness", "abstract": "With the aim of designing a Spoken Dialogue System which adapts to the user\u2019s communication idiosyncrasies, we present a multicultural study to investigate the causes of differences in the communication styles elaborateness and directness in Human-Computer Interaction. By adapting the system\u2019s behaviour to the user, the conversation agent may appear more familiar and trustworthy. 339 persons from Germany, Russia, Poland, Spain and the United Kingdom participated in this web-based study. The participants had to imagine that they are talking to a digital agent. For every dialogue turn, they had to read four different variants of the system output and indicate their preference. With the results of this study, we could demonstrate the influence of the user\u2019s culture and gender, the frequency of use of speech based assistants as well as the system\u2019s role on the user\u2019s preference concerning the system\u2019s communication style in terms of its elaborateness and its directness.", "keyphrases": ["communication style", "multicultural study", "directness", "elaborateness"]} +{"id": "klie-etal-2018-inception", "title": "The INCEpTION Platform: Machine-Assisted and Knowledge-Oriented Interactive Annotation", "abstract": "We introduce INCEpTION, a new annotation platform for tasks including interactive and semantic annotation (e.g., concept linking, fact linking, knowledge base population, semantic frame annotation). These tasks are very time consuming and demanding for annotators, especially when knowledge bases are used. We address these issues by developing an annotation platform that incorporates machine learning capabilities which actively assist and guide annotators. The platform is both generic and modular. It targets a range of research domains in need of semantic annotation, such as digital humanities, bioinformatics, or linguistics. INCEpTION is publicly available as open-source software.", "keyphrases": ["annotation platform", "interactive task", "knowledge basis"]} +{"id": "mayhew-etal-2017-cheap", "title": "Cheap Translation for Cross-Lingual Named Entity Recognition", "abstract": "Recent work in NLP has attempted to deal with low-resource languages but still assumed a resource level that is not present for most languages, e.g., the availability of Wikipedia in the target language. We propose a simple method for cross-lingual named entity recognition (NER) that works well in settings with very minimal resources. Our approach makes use of a lexicon to \u201ctranslate\u201d annotated data available in one or several high resource language(s) into the target language, and learns a standard monolingual NER model there. Further, when Wikipedia is available in the target language, our method can enhance Wikipedia based methods to yield state-of-the-art NER results; we evaluate on 7 diverse languages, improving the state-of-the-art by an average of 5.5% F1 points. With the minimal resources required, this is an extremely portable cross-lingual NER approach, as illustrated using a truly low-resource language, Uyghur.", "keyphrases": ["entity recognition", "low-resource language", "annotated data", "cheap translation", "name tagging"]} +{"id": "kunze-etal-2017-transfer", "title": "Transfer Learning for Speech Recognition on a Budget", "abstract": "End-to-end training of automated speech recognition (ASR) systems requires massive data and compute resources. We explore transfer learning based on model adaptation as an approach for training ASR models under constrained GPU memory, throughput and training data. We conduct several systematic experiments adapting a Wav2Letter convolutional neural network originally trained for English ASR to the German language. We show that this technique allows faster training on consumer-grade resources while requiring less training data in order to achieve the same accuracy, thereby lowering the cost of training ASR models in other languages. Model introspection revealed that small adaptations to the network's weights were sufficient for good performance, especially for inner layers.", "keyphrases": ["speech recognition", "asr", "transfer learning"]} +{"id": "cahill-2009-correlating", "title": "Correlating Human and Automatic Evaluation of a German Surface Realiser", "abstract": "We examine correlations between native speaker judgements on automatically generated German text against automatic evaluation metrics. We look at a number of metrics from the MT and Summarisation communities and find that for a relative ranking task, most automatic metrics perform equally well and have fairly strong correlations to the human judgements. In contrast, on a naturalness judgement task, the General Text Matcher (GTM) tool correlates best overall, although in general, correlation between the human judgements and the automatic metrics was quite weak.", "keyphrases": ["german surface realiser", "automatic metric", "surface realizer"]} +{"id": "chen-etal-2009-pairwise", "title": "A Pairwise Event Coreference Model, Feature Impact and Evaluation for Event Coreference Resolution", "abstract": "In past years, there has been substantial work on the problem of entity coreference resolution whereas much less attention has been paid to event coreference resolution. Starting with some motivating examples, we formally state the problem of event coreference resolution in the ACE program, present an agglomerative clustering algorithm for the task, explore the feature impact in the event coreference model and compare three evaluation metrics that were previously adopted in entity coreference resolution: MUC F-Measure, B-Cubed F-Measure and ECM F-Measure.", "keyphrases": ["feature impact", "event coreference resolution", "symbolic feature"]} +{"id": "gronroos-etal-2014-morfessor", "title": "Morfessor FlatCat: An HMM-Based Method for Unsupervised and Semi-Supervised Learning of Morphology", "abstract": "Morfessor is a family of methods for learning morphological segmentations of words based on unannotated data. We introduce a new variant of Morfessor, FlatCat, that applies a hidden Markov model structure. It builds on previous work on Morfessor, sharing model components with the popular Morfessor Baseline and Categories-MAP variants. Our experiments show that while unsupervised FlatCat does not reach the accuracy of Categories-MAP, with semisupervised learning it provides state-of-the-art results in the Morpho Challenge 2010 tasks for English, Finnish, and Turkish.", "keyphrases": ["semi-supervised learning", "segmentation", "morfessor flatcat"]} +{"id": "riedl-biemann-2013-scaling", "title": "Scaling to Large Data: An Efficient and Effective Method to Compute Distributional Thesauri", "abstract": "We introduce a new highly scalable approach for computing Distributional Thesauri (DTs). By employing pruning techniques and a distributed framework, we make the computation for very large corpora feasible on comparably small computational resources. We demonstrate this by releasing a DT for the whole vocabulary of Google Books syntactic n-grams. Evaluating against lexical resources using two measures, we show that our approach produces higher quality DTs than previous approaches, and is thus preferable in terms of speed and quality for large corpora.", "keyphrases": ["distributional thesauri", "scalable approach", "n-gram", "thesaurus"]} +{"id": "benamara-etal-2018-introduction", "title": "Introduction to the Special Issue on Language in Social Media: Exploiting Discourse and Other Contextual Information", "abstract": "Social media content is changing the way people interact with each other and share information, personal messages, and opinions about situations, objects, and past experiences. Most social media texts are short online conversational posts or comments that do not contain enough information for natural language processing (NLP) tools, as they are often accompanied by non-linguistic contextual information, including meta-data (e.g., the user's profile, the social network of the user, and their interactions with other users). Exploiting such different types of context and their interactions makes the automatic processing of social media texts a challenging research task. Indeed, simply applying traditional text mining tools is clearly sub-optimal, as, typically, these tools take into account neither the interactive dimension nor the particular nature of this data, which shares properties with both spoken and written language. This special issue contributes to a deeper understanding of the role of these interactions to process social media data from a new perspective in discourse interpretation. This introduction first provides the necessary background to understand what context is from both the linguistic and computational linguistic perspectives, then presents the most recent context-based approaches to NLP for social media. We conclude with an overview of the papers accepted in this special issue, highlighting what we believe are the future directions in processing social media texts.", "keyphrases": ["special issue", "discourse", "contextual information", "social medium"]} +{"id": "xu-etal-2020-deep", "title": "A Deep Generative Distance-Based Classifier for Out-of-Domain Detection with Mahalanobis Space", "abstract": "Detecting out-of-domain (OOD) input intents is critical in the task-oriented dialog system. Different from most existing methods that rely heavily on manually labeled OOD samples, we focus on the unsupervised OOD detection scenario where there are no labeled OOD samples except for labeled in-domain data. In this paper, we propose a simple but strong generative distance-based classifier to detect OOD samples. We estimate the class-conditional distribution on feature spaces of DNNs via Gaussian discriminant analysis (GDA) to avoid over-confidence problems. And we use two distance functions, Euclidean and Mahalanobis distances, to measure the confidence score of whether a test sample belongs to OOD. Experiments on four benchmark datasets show that our method can consistently outperform the baselines.", "keyphrases": ["distance-based classifier", "out-of-domain detection", "mahalanobis distance"]} +{"id": "zhang-etal-2018-neural", "title": "Neural Latent Extractive Document Summarization", "abstract": "Extractive summarization models need sentence level labels, which are usually created with rule-based methods since most summarization datasets only have document summary pairs. These labels might be suboptimal. We propose a latent variable extractive model, where sentences are viewed as latent variables and sentences with activated variables are used to infer gold summaries. During training, the loss can come directly from gold summaries. Experiments on CNN/Dailymail dataset show our latent extractive model outperforms a strong extractive baseline trained on rule-based labels and also performs competitively with several recent models.", "keyphrases": ["latent variable", "extractive model", "gold summary"]} +{"id": "de-cao-etal-2019-question", "title": "Question Answering by Reasoning Across Documents with Graph Convolutional Networks", "abstract": "Most research in reading comprehension has focused on answering questions based on individual documents or even single paragraphs. We introduce a neural model which integrates and reasons relying on information spread within documents and across multiple documents. We frame it as an inference problem on a graph. Mentions of entities are nodes of this graph while edges encode relations between different mentions (e.g., within- and cross-document co-reference). Graph convolutional networks (GCNs) are applied to these graphs and trained to perform multi-step reasoning. Our Entity-GCN method is scalable and compact, and it achieves state-of-the-art results on a multi-document question answering dataset, WikiHop (Welbl et al., 2018).", "keyphrases": ["reasoning", "convolutional network", "mention"]} +{"id": "siddharthan-mandya-2014-hybrid", "title": "Hybrid text simplification using synchronous dependency grammars with hand-written and automatically harvested rules", "abstract": "We present an approach to text simplification based on synchronous dependency grammars. The higher level of abstraction afforded by dependency representations allows for a linguistically sound treatment of complex constructs requiring reordering and morphological change, such as conversion of passive voice to active. We present a synchronous grammar formalism in which it is easy to write rules by hand and also acquire them automatically from dependency parses of aligned English and Simple English sentences. The grammar formalism is optimised for monolingual translation in that it reuses ordering information from the source sentence where appropriate. We demonstrate the superiority of our approach over a leading contemporary system based on quasi-synchronous tree substitution grammars, both in terms of expressivity and performance.", "keyphrases": ["text simplification", "synchronous dependency grammar", "clause"]} +{"id": "yazdani-henderson-2015-model", "title": "A Model of Zero-Shot Learning of Spoken Language Understanding", "abstract": "When building spoken dialogue systems for a new domain, a major bottleneck is developing a spoken language understanding (SLU) module that handles the new domain\u2019s terminology and semantic concepts. We propose a statistical SLU model that generalises to both previously unseen input words and previously unseen output classes by leveraging unlabelled data. After mapping the utterance into a vector space, the model exploits the structure of the output labels by mapping each label to a hyperplane that separates utterances with and without that label. Both these mappings are initialised with unsupervised word embeddings, so they can be computed even for words or concepts which were not in the SLU training data.", "keyphrases": ["zero-shot learning", "spoken language understanding", "text classification"]} +{"id": "ng-etal-2006-examining", "title": "Examining the Role of Linguistic Knowledge Sources in the Automatic Identification and Classification of Reviews", "abstract": "This paper examines two problems in document-level sentiment analysis: (1) determining whether a given document is a review or not, and (2) classifying the polarity of a review as positive or negative. We first demonstrate that review identification can be performed with high accuracy using only unigrams as features. We then examine the role of four types of simple linguistic knowledge sources in a polarity classification system.", "keyphrases": ["review identification", "sentiment classification", "low precision"]} +{"id": "zou-etal-2020-pre", "title": "Pre-training for Abstractive Document Summarization by Reinstating Source Text", "abstract": "Abstractive document summarization is usually modeled as a sequence-to-sequence (SEQ2SEQ) learning problem. Unfortunately, training large SEQ2SEQ based summarization models on limited supervised summarization data is challenging. This paper presents three sequence-to-sequence pre-training (in shorthand, STEP) objectives which allow us to pre-train a SEQ2SEQ based abstractive summarization model on unlabeled text. The main idea is that, given an input text artificially constructed from a document, a model is pre-trained to reinstate the original document. These objectives include sentence reordering, next sentence generation and masked document generation, which have close relations with the abstractive document summarization task. Experiments on two benchmark summarization datasets (i.e., CNN/DailyMail and New York Times) show that all three objectives can improve performance upon baselines. Compared to models pre-trained on large-scale data (larger than 160GB), our method, with only 19GB text for pre-training, achieves comparable results, which demonstrates its effectiveness.", "keyphrases": ["abstractive document summarization", "sequence-to-sequence", "objective", "next sentence generation"]} +{"id": "tiedemann-2017-cross", "title": "Cross-lingual dependency parsing for closely related languages - Helsinki's submission to VarDial 2017", "abstract": "This paper describes the submission from the University of Helsinki to the shared task on cross-lingual dependency parsing at VarDial 2017. We present work on annotation projection and treebank translation that gave good results for all three target languages in the test set. In particular, Slovak seems to work well with information coming from the Czech treebank, which is in line with related work. The attachment scores for cross-lingual models even surpass the fully supervised models trained on the target language treebank. Croatian is the most difficult language in the test set and the improvements over the baseline are rather modest. Norwegian works best with information coming from Swedish whereas Danish contributes surprisingly little.", "keyphrases": ["helsinki", "annotation projection", "cross-lingual dependency"]} +{"id": "yoshino-etal-2018-dialogue", "title": "Dialogue Scenario Collection of Persuasive Dialogue with Emotional Expressions via Crowdsourcing", "abstract": "Existing dialogue data collection methods such as the Wizard of Oz method (WoZ) or real dialogue recording are costly, and they prevent launching a new dialogue system. In this study, we requested crowd workers in crowdsourcing to create dialogue scenarios according to the instruction of the situation for persuasive dialogue systems that use emotional expressions. We collected 200 dialogues in 5 scenarios for a total of 1,000 via crowdsourcing. We also annotated emotional states and users\u2019 acceptance for system persuasion by using crowdsourcing. We constructed a persuasive dialogue system with the collected data and evaluated the system by interacting with crowd works. From the experiment, it was investigated that the collected labels have sufficient agreement even if we did not impose any training of annotation to workers.", "keyphrases": ["persuasive dialogue", "emotional expression", "crowdsourcing"]} +{"id": "lin-etal-2019-reasoning", "title": "Reasoning Over Paragraph Effects in Situations", "abstract": "A key component of successfully reading a passage of text is the ability to apply knowledge gained from the passage to a new situation. In order to facilitate progress on this kind of reading, we present ROPES, a challenging benchmark for reading comprehension targeting Reasoning Over Paragraph Effects in Situations. We target expository language describing causes and effects (e.g., \u201canimal pollinators increase efficiency of fertilization in flowers\u201d), as they have clear implications for new situations. A system is presented a background passage containing at least one of these relations, a novel situation that uses this background, and questions that require reasoning about effects of the relationships in the background passage in the context of the situation. We collect background passages from science textbooks and Wikipedia that contain such phenomena, and ask crowd workers to author situations, questions, and answers, resulting in a 14,322 question dataset. We analyze the challenges of this task and evaluate the performance of state-of-the-art reading comprehension models. The best model performs only slightly better than randomly guessing an answer of the correct type, at 61.6% F1, well below the human performance of 89.0%.", "keyphrases": ["paragraph effects", "situation", "rope"]} +{"id": "quan-etal-2012-ku", "title": "KU Leuven at HOO-2012: A Hybrid Approach to Detection and Correction of Determiner and Preposition Errors in Non-native English Text", "abstract": "In this paper we describe the technical implementation of our system that participated in the Helping Our Own 2012 Shared Task (HOO-2012). The system employs a number of preprocessing steps and machine learning classifiers for correction of determiner and preposition errors in non-native English texts. We use maximum entropy classifiers trained on the provided HOO-2012 development data and a large high-quality English text collection. The system proposes a number of highly-probable corrections, which are evaluated by a language model and compared with the original text. A number of deterministic rules are used to increase the precision and recall of the system. Our system is ranked among the three best performing HOO-2012 systems with a precision of 31.15%, recall of 22.08% and F1-score of 25.84% for correction of determiner and preposition errors combined.", "keyphrases": ["determiner", "preposition error", "non-native english text", "article correction"]} +{"id": "saleh-etal-2019-team", "title": "Team QCRI-MIT at SemEval-2019 Task 4: Propaganda Analysis Meets Hyperpartisan News Detection", "abstract": "We describe our submission to SemEval-2019 Task 4 on Hyperpartisan News Detection. We rely on a variety of engineered features originally used to detect propaganda. This is based on the assumption that biased messages are propagandistic and promote a particular political cause or viewpoint. In particular, we trained a logistic regression model with features ranging from simple bag of words to vocabulary richness and text readability. Our system achieved 72.9% accuracy on the manually annotated testset, and 60.8% on the test data that was obtained with distant supervision. Additional experiments showed that significant performance gains can be achieved with better feature pre-processing.", "keyphrases": ["semeval-2019 task", "propaganda", "hyperpartisan news detection"]} +{"id": "han-etal-2020-isobs", "title": "IsOBS: An Information System for Oracle Bone Script", "abstract": "Oracle bone script (OBS) is the earliest known ancient Chinese writing system and the ancestor of modern Chinese. As the Chinese writing system is the oldest continuously-used system in the world, the study of OBS plays an important role in both linguistic and historical research. In order to utilize advanced machine learning methods to automatically process OBS, we construct an information system for OBS (IsOBS) to symbolize, serialize, and store OBS data at the character-level, based on efficient databases and retrieval modules. Moreover, we also apply few-shot learning methods to build an effective OBS character recognition module, which can recognize a large number of OBS characters (especially those characters with a handful of examples) and make the system easy to use. The demo system of IsOBS can be found from . In the future, we will add more OBS data to the system, and hopefully our IsOBS can support further efforts in automatically processing OBS and advance the scientific progress in this field.", "keyphrases": ["information system", "oracle bone script", "isobs"]} +{"id": "beltagy-etal-2013-montague", "title": "Montague Meets Markov: Deep Semantics with Probabilistic Logical Form", "abstract": "We combine logical and distributional representations of natural language meaning by transforming distributional similarity judgments into weighted inference rules using Markov Logic Networks (MLNs). We show that this framework supports both judging sentence similarity and recognizing textual entailment by appropriately adapting the MLN implementation of logical connectives. We also show that distributional phrase similarity, used as textual inference rules created on the fly, improves its performance.", "keyphrases": ["logic", "sentence similarity", "predicate-argument representation"]} +{"id": "gilmanov-etal-2014-swift", "title": "SWIFT Aligner, A Multifunctional Tool for Parallel Corpora: Visualization, Word Alignment, and (Morpho)-Syntactic Cross-Language Transfer", "abstract": "It is well known that word aligned parallel corpora are valuable linguistic resources. Since many factors affect automatic alignment quality, manual post-editing may be required in some applications. While there are several state-of-the-art word-aligners, such as GIZA++ and Berkeley, there is no simple visual tool that would enable correcting and editing aligned corpora of different formats. We have developed SWIFT Aligner, a free, portable software that allows for visual representation and editing of aligned corpora from several most commonly used formats: TALP, GIZA, and NAACL. In addition, our tool has incorporated part-of-speech and syntactic dependency transfer from an annotated source language into an unannotated target language, by means of word-alignment.", "keyphrases": ["visualization", "word alignment", "editing"]} +{"id": "hardmeier-etal-2012-document", "title": "Document-Wide Decoding for Phrase-Based Statistical Machine Translation", "abstract": "Independence between sentences is an assumption deeply entrenched in the models and algorithms used for statistical machine translation (SMT), particularly in the popular dynamic programming beam search decoding algorithm. This restriction is an obstacle to research on more sophisticated discourse-level models for SMT. We propose a stochastic local search decoding method for phrase-based SMT, which permits free document-wide dependencies in the models. We explore the stability and the search parameters of this method and demonstrate that it can be successfully used to optimise a document-level semantic language model.", "keyphrases": ["statistical machine translation", "local search", "phrase-based smt", "semantic language model", "document-level decoder"]} +{"id": "hou-etal-2018-unrestricted", "title": "Unrestricted Bridging Resolution", "abstract": "In contrast to identity anaphors, which indicate coreference between a noun phrase and its antecedent, bridging anaphors link to their antecedent(s) via lexico-semantic, frame, or encyclopedic relations. Bridging resolution involves recognizing bridging anaphors and finding links to antecedents. In contrast to most prior work, we tackle both problems. Our work also follows a more wide-ranging definition of bridging than most previous work and does not impose any restrictions on the type of bridging anaphora or relations between anaphor and antecedent. We create a corpus (ISNotes) annotated for information status (IS), bridging being one of the IS subcategories. The annotations reach high reliability for all categories and marginal reliability for the bridging subcategory. We use a two-stage statistical global inference method for bridging resolution. Given all mentions in a document, the first stage, bridging anaphora recognition, recognizes bridging anaphors as a subtask of learning fine-grained IS. We use a cascading collective classification method where (i) collective classification allows us to investigate relations among several mentions and autocorrelation among IS classes and (ii) cascaded classification allows us to tackle class imbalance, important for minority classes such as bridging. We show that our method outperforms current methods both for IS recognition overall as well as for bridging, specifically. The second stage, bridging antecedent selection, finds the antecedents for all predicted bridging anaphors. We investigate the phenomenon of semantically or syntactically related bridging anaphors that share the same antecedent, a phenomenon we call sibling anaphors. We show that taking sibling anaphors into account in a joint inference model improves antecedent selection performance. In addition, we develop semantic and salience features for antecedent selection and suggest a novel method to build the candidate antecedent list for an anaphor, using the discourse scope of the anaphor. Our model outperforms previous work significantly.", "keyphrases": ["resolution", "coreference", "isnotes", "subtask", "wall street journal"]} +{"id": "liang-etal-2020-alice", "title": "ALICE: Active Learning with Contrastive Natural Language Explanations", "abstract": "Training a supervised neural network classifier typically requires many annotated training samples. Collecting and annotating a large number of data points are costly and sometimes even infeasible. Traditional annotation process uses a low-bandwidth human-machine communication interface: classification labels, each of which only provides a few bits of information. We propose Active Learning with Contrastive Explanations (ALICE), an expert-in-the-loop training framework that utilizes contrastive natural language explanations to improve data efficiency in learning. AL-ICE learns to first use active learning to select the most informative pairs of label classes to elicit contrastive natural language explanations from experts. Then it extracts knowledge from these explanations using a semantic parser. Finally, it incorporates the extracted knowledge through dynamically changing the learning model's structure. We applied ALICEin two visual recognition tasks, bird species classification and social relationship classification. We found by incorporating contrastive explanations, our models outperform baseline models that are trained with 40-100% more training data. We found that adding1expla-nation leads to similar performance gain as adding 13-30 labeled training data points.", "keyphrases": ["active learning", "contrastive explanation", "alice"]} +{"id": "foster-andersen-2009-generrate", "title": "GenERRate: Generating Errors for Use in Grammatical Error Detection", "abstract": "This paper explores the issue of automatically generated ungrammatical data and its use in error detection, with a focus on the task of classifying a sentence as grammatical or ungrammatical. We present an error generation tool called GenERRate and show how GenERRate can be used to improve the performance of a classifier on learner data. We describe initial attempts to replicate Cambridge Learner Corpus errors using GenERRate.", "keyphrases": ["generrate", "artificial error", "error-corrected data"]} +{"id": "hangya-fraser-2019-unsupervised", "title": "Unsupervised Parallel Sentence Extraction with Parallel Segment Detection Helps Machine Translation", "abstract": "Mining parallel sentences from comparable corpora is important. Most previous work relies on supervised systems, which are trained on parallel data, thus their applicability is problematic in low-resource scenarios. Recent developments in building unsupervised bilingual word embeddings made it possible to mine parallel sentences based on cosine similarities of source and target language words. We show that relying only on this information is not enough, since sentences often have similar words but different meanings. We detect continuous parallel segments in sentence pair candidates and rely on them when mining parallel sentences. We show better mining accuracy on three language pairs in a standard shared task on artificial data. We also provide the first experiments showing that parallel sentences mined from real life sources improve unsupervised MT. Our code is available, we hope it will be used to support low-resource MT research.", "keyphrases": ["parallel segment detection", "bilingual word embedding", "bitext mining"]} +{"id": "camacho-collados-pilehvar-2018-role", "title": "On the Role of Text Preprocessing in Neural Network Architectures: An Evaluation Study on Text Categorization and Sentiment Analysis", "abstract": "Text preprocessing is often the first step in the pipeline of a Natural Language Processing (NLP) system, with potential impact in its final performance. Despite its importance, text preprocessing has not received much attention in the deep learning literature. In this paper we investigate the impact of simple text preprocessing decisions (particularly tokenizing, lemmatizing, lowercasing and multiword grouping) on the performance of a standard neural text classifier. We perform an extensive evaluation on standard benchmarks from text categorization and sentiment analysis. While our experiments show that a simple tokenization of input text is generally adequate, they also highlight significant degrees of variability across preprocessing techniques. This reveals the importance of paying attention to this usually-overlooked step in the pipeline, particularly when comparing different models. Finally, our evaluation provides insights into the best preprocessing practices for training word embeddings.", "keyphrases": ["text categorization", "sentiment analysis", "tokenizing"]} +{"id": "park-etal-2018-plusemo2vec", "title": "PlusEmo2Vec at SemEval-2018 Task 1: Exploiting emotion knowledge from emoji and #hashtags", "abstract": "This paper describes our system that has been submitted to SemEval-2018 Task 1: Affect in Tweets (AIT) to solve five subtasks. We focus on modeling both sentence and word level representations of emotion inside texts through large distantly labeled corpora with emojis and hashtags. We transfer the emotional knowledge by exploiting neural network models as feature extractors and use these representations for traditional machine learning models such as support vector regression (SVR) and logistic regression to solve the competition tasks. Our system is placed among the Top3 for all subtasks we participated.", "keyphrases": ["semeval-2018 task", "emojis", "hashtag"]} +{"id": "zhikov-etal-2010-efficient", "title": "An Efficient Algorithm for Unsupervised Word Segmentation with Branching Entropy and MDL", "abstract": "This paper proposes a fast and simple unsupervised word segmentation algorithm that utilizes the local predictability of adjacent character sequences, while searching for a least-effort representation of the data. The model uses branching entropy as a means of constraining the hypothesis space, in order to efficiently obtain a solution that minimizes the length of a two-part MDL code. An evaluation with corpora in Japanese, Thai, English, and the \"CHILDES\" corpus for research in language development reveals that the algorithm achieves an accuracy, comparable to that of the state-of-the-art methods in unsupervised word segmentation, in a significantly reduced computational time.", "keyphrases": ["unsupervised word segmentation", "mdl", "length"]} +{"id": "majumder-etal-2020-like", "title": "Like hiking? You probably enjoy nature: Persona-grounded Dialog with Commonsense Expansions", "abstract": "Existing persona-grounded dialog models often fail to capture simple implications of given persona descriptions, something which humans are able to do seamlessly. For example, state-of-the-art models cannot infer that interest in hiking might imply love for nature or longing for a break. In this paper, we propose to expand available persona sentences using existing commonsense knowledge bases and paraphrasing resources to imbue dialog models with access to an expanded and richer set of persona descriptions. Additionally, we introduce fine-grained grounding on personas by encouraging the model to make a discrete choice among persona sentences while synthesizing a dialog response. Since such a choice is not observed in the data, we model it using a discrete latent random variable and use variational learning to sample from hundreds of persona expansions. Our model outperforms competitive baselines on the Persona-Chat dataset in terms of dialog quality and diversity while achieving persona-consistent and controllable dialog generation.", "keyphrases": ["nature", "dialog", "persona"]} +{"id": "vaswani-etal-2016-supertagging", "title": "Supertagging With LSTMs", "abstract": "In this paper we present new state-of-the-art performance on CCG supertagging and parsing. Our model outperforms existing approaches by an absolute gain of 1.5%. We analyze the performance of several neural models and demonstrate that while feed-forward architectures can compete with bidirectional LSTMs on POS tagging, models that encode the complete sentence are necessary for the long range syntactic information encoded in supertags.", "keyphrases": ["bidirectional lstm", "supertag", "rnn"]} +{"id": "strzalkowski-etal-2010-modeling", "title": "Modeling Socio-Cultural Phenomena in Discourse", "abstract": "In this paper, we describe a novel approach to computational modeling and understanding of social and cultural phenomena in multi-party dialogues. We developed a two-tier approach in which we first detect and classify certain social language uses, including topic control, disagreement, and involvement, that serve as first order models from which presence the higher level social constructs such as leadership, may be inferred.", "keyphrases": ["language use", "construct", "modeling"]} +{"id": "agirre-soroa-2008-using", "title": "Using the Multilingual Central Repository for Graph-Based Word Sense Disambiguation", "abstract": "This paper presents the results of a graph-based method for performing knowledge-based Word Sense Disambiguation (WSD). The technique exploits the structural properties of the graph underlying the chosen knowledge base. The method is general, in the sense that it is not tied to any particular knowledge base, but in this work we have applied it to the Multilingual Central Repository (MCR). The evaluation has been performed on the Senseval-3 all-words task. The main contributions of the paper are twofold: (1) We have evaluated the separate and combined performance of each type of relation in the MCR, and thus indirectly validated the contents of the MCR and their potential for WSD. (2) We obtain state-of-the-art results, and in fact yield the best results that can be obtained using publicly available data.", "keyphrases": ["multilingual central repository", "word sense disambiguation", "pagerank"]} +{"id": "le-etal-2020-dual", "title": "Dual-decoder Transformer for Joint Automatic Speech Recognition and Multilingual Speech Translation", "abstract": "We introduce dual-decoder Transformer, a new model architecture that jointly performs automatic speech recognition (ASR) and multilingual speech translation (ST). Our models are based on the original Transformer architecture (Vaswani et al., 2017) but consist of two decoders, each responsible for one task (ASR or ST). Our major contribution lies in how these decoders interact with each other: one decoder can attend to different information sources from the other via a dual-attention mechanism. We propose two variants of these architectures corresponding to two different levels of dependencies between the decoders, called the parallel and cross dual-decoder Transformers, respectively. Extensive experiments on the MuST-C dataset show that our models outperform the previously-reported highest translation performance in the multilingual settings, and outperform as well bilingual one-to-one results. Furthermore, our parallel models demonstrate no trade-off between ASR and ST compared to the vanilla multi-task architecture. Our code and pre-trained models are available at .", "keyphrases": ["speech recognition", "multilingual speech translation", "dual-decoder transformer"]} +{"id": "nie-etal-2020-named", "title": "Named Entity Recognition for Social Media Texts with Semantic Augmentation", "abstract": "Existing approaches for named entity recognition suffer from data sparsity problems when conducted on short and informal texts, especially user-generated social media content. Semantic augmentation is a potential way to alleviate this problem. Given that rich semantic information is implicitly preserved in pre-trained word embeddings, they are potential ideal resources for semantic augmentation. In this paper, we propose a neural-based approach to NER for social media texts where both local (from running text) and augmented semantics are taken into account. In particular, we obtain the augmented semantic information from a large-scale corpus, and propose an attentive semantic augmentation module and a gate module to encode and aggregate such information, respectively. Extensive experiments are performed on three benchmark datasets collected from English and Chinese social media platforms, where the results demonstrate the superiority of our approach to previous studies across all three datasets.", "keyphrases": ["entity recognition", "semantic augmentation", "word embedding"]} +{"id": "hitschler-etal-2016-multimodal", "title": "Multimodal Pivots for Image Caption Translation", "abstract": "We present an approach to improve statistical machine translation of image descriptions by multimodal pivots defined in visual space. The key idea is to perform image retrieval over a database of images that are captioned in the target language, and use the captions of the most similar images for crosslingual reranking of translation outputs. Our approach does not depend on the availability of large amounts of in-domain parallel data, but only relies on available large datasets of monolingually captioned images, and on state-of-the-art convolutional neural networks to compute image similarities. Our experimental evaluation shows improvements of 1 BLEU point over strong baselines.", "keyphrases": ["pivot", "image caption translation", "image feature", "translation quality", "useful complementary information"]} +{"id": "peyrard-eckle-kohler-2016-general", "title": "A General Optimization Framework for Multi-Document Summarization Using Genetic Algorithms and Swarm Intelligence", "abstract": "Extracting summaries via integer linear programming and submodularity are popular and successful techniques in extractive multi-document summarization. However, many interesting optimization objectives are neither submodular nor factorizable into an integer linear program. We address this issue and present a general optimization framework where any function of input documents and a system summary can be plugged in. Our framework includes two kinds of summarizers \u2013 one based on genetic algorithms, the other using a swarm intelligence approach. In our experimental evaluation, we investigate the optimization of two information-theoretic summary evaluation metrics and find that our framework yields competitive results compared to several strong summarization baselines. Our comparative analysis of the genetic and swarm summarizers reveals interesting complementary properties.", "keyphrases": ["general optimization framework", "multi-document summarization", "genetic algorithm"]} +{"id": "marcinczuk-2015-automatic", "title": "Automatic construction of complex features in Conditional Random Fields for Named Entities Recognition", "abstract": "Conditional Random Fields (CRFs) have been proven to be very useful in many sequence labelling tasks from the field of natural language processing, including named entity recognition (NER). The advantage of CRFs over other statistical models (like Hidden Markov Models) is that they can utilize a large set of features describing a sequence of observations. On the other hand, CRFs potential function is defined as a linear combination of features, what means, that it cannot model relationships between combinations of input features and output labels. This limitation can be overcome by defining the relationships between atomic features as complex features before training the CRFs. In the paper we present the experimental results of automatic generation of complex features for the named entity recognition task for Polish. A rule-induction algorithm called RIPPER is used to generate a set of rules which are latter transformed into a set of complex features. The extended set of features is used to train a CRFs model.", "keyphrases": ["complex feature", "conditional random fields", "crfs"]} +{"id": "wuebker-etal-2018-compact", "title": "Compact Personalized Models for Neural Machine Translation", "abstract": "We propose and compare methods for gradient-based domain adaptation of self-attentive neural machine translation models. We demonstrate that a large proportion of model parameters can be frozen during adaptation with minimal or no reduction in translation quality by encouraging structured sparsity in the set of offset tensors during learning via group lasso regularization. We evaluate this technique for both batch and incremental adaptation across multiple data sets and language pairs. Our system architecture\u2013combining a state-of-the-art self-attentive model with compact domain adaptation\u2013provides high quality personalized machine translation that is both space and time efficient.", "keyphrases": ["neural machine translation", "domain adaptation", "model parameter", "translation quality", "offset tensor"]} +{"id": "sim-etal-2013-measuring", "title": "Measuring Ideological Proportions in Political Speeches", "abstract": "We seek to measure political candidates\u2019 ideological positioning from their speeches. To accomplish this, we infer ideological cues from a corpus of political writings annotated with known ideologies. We then represent the speeches of U.S. Presidential candidates as sequences of cues and lags (filler distinguished only by its length in words). We apply a domain-informed Bayesian HMM to infer the proportions of ideologies each candidate uses in each campaign. The results are validated against a set of preregistered, domain expertauthored hypotheses.", "keyphrases": ["ideology", "proportion", "computational linguistic"]} +{"id": "belz-kow-2010-extracting", "title": "Extracting Parallel Fragments from Comparable Corpora for Data-to-text Generation", "abstract": "Building NLG systems, in particular statistical ones, requires parallel data (paired inputs and outputs) which do not generally occur naturally. In this paper, we investigate the idea of automatically extracting parallel resources for data-to-text generation from comparable corpora obtained from the Web. We describe our comparable corpus of data and texts relating to British hills and the techniques for extracting paired input/output fragments we have developed so far.", "keyphrases": ["comparable corpora", "data-to-text generation", "parallel resource"]} +{"id": "raaijmakers-etal-2008-multimodal", "title": "Multimodal Subjectivity Analysis of Multiparty Conversation", "abstract": "We investigate the combination of several sources of information for the purpose of subjectivity recognition and polarity classification in meetings. We focus on features from two modalities, transcribed words and acoustics, and we compare the performance of three different textual representations: words, characters, and phonemes. Our experiments show that character-level features outperform wordlevel features for these tasks, and that a careful fusion of all features yields the best performance.1", "keyphrases": ["subjectivity", "multiparty conversation", "polarity classification", "prosodic feature"]} +{"id": "ding-etal-2020-hashtags", "title": "Hashtags, Emotions, and Comments: A Large-Scale Dataset to Understand Fine-Grained Social Emotions to Online Topics", "abstract": "This paper studies social emotions to online discussion topics. While most prior work focus on emotions from writers, we investigate readers' responses and explore the public feelings to an online topic. A large-scale dataset is collected from Chinese microblog Sina Weibo with over 13 thousand trending topics, emotion votes in 24 fine-grained types from massive participants, and user comments to allow context understanding. In experiments, we examine baseline performance to predict a topic's possible social emotions in a multilabel classification setting. The results show that a seq2seq model with user comment modeling performs the best, even surpassing human prediction. More analyses shed light on the effects of emotion types, topic description lengths, contexts from user comments, and the limited capacity of the existing models.", "keyphrases": ["emotion", "large-scale dataset", "online topic"]} +{"id": "getman-etal-2018-laying", "title": "Laying the Groundwork for Knowledge Base Population: Nine Years of Linguistic Resources for TAC KBP", "abstract": "Knowledge Base Population (KBP) is an evaluation series within the Text Analysis Conference (TAC) evaluation campaign conducted by the National Institute of Standards and Technology (NIST). Over the past nine years TAC KBP evaluations have targeted information extraction technologies for the population of knowledge bases comprised of entities, relations, and events. Linguistic Data Consortium (LDC) has supported TAC KBP since 2009, developing, maintaining, and distributing linguistic resources in three languages for seven distinct evaluation tracks. This paper describes LDC's resource creation efforts for the various KBP tracks, and highlights changes made over the years to support evolving evaluation requirements.", "keyphrases": ["knowledge base population", "tac kbp", "few language"]} +{"id": "mann-mccallum-2007-efficient", "title": "Efficient Computation of Entropy Gradient for Semi-Supervised Conditional Random Fields", "abstract": "Entropy regularization is a straightforward and successful method of semi-supervised learning that augments the traditional conditional likelihood objective function with an additional term that aims to minimize the predicted label entropy on unlabeled data. It has previously been demonstrated to provide positive results in linear-chain CRFs, but the published method for calculating the entropy gradient requires significantly more computation than supervised CRF training. This paper presents a new derivation and dynamic program for calculating the entropy gradient that is significantly more efficient---having the same asymptotic time complexity as supervised CRF training. We also present efficient generalizations of this method for calculating the label entropy of all sub-sequences, which is useful for active learning, among other applications.", "keyphrases": ["entropy gradient", "unlabeled data", "crf"]} +{"id": "glavas-etal-2016-unsupervised", "title": "Unsupervised Text Segmentation Using Semantic Relatedness Graphs", "abstract": "Segmenting text into semantically coherent \nfragments improves readability of text \nand facilitates tasks like text summarization \nand passage retrieval. In this paper, \nwe present a novel unsupervised algorithm \nfor linear text segmentation (TS) \nthat exploits word embeddings and a measure \nof semantic relatedness of short texts \nto construct a semantic relatedness graph \nof the document. Semantically coherent \nsegments are then derived from maximal \ncliques of the relatedness graph. The algorithm \nperforms competitively on a standard \nsynthetic dataset and outperforms the \nbest-performing method on a real-world \n(i.e., non-artificial) dataset of political manifestos.", "keyphrases": ["text segmentation", "semantic relatedness graph", "fragment", "clique"]} +{"id": "belz-reiter-2006-comparing", "title": "Comparing Automatic and Human Evaluation of NLG Systems", "abstract": "We consider the evaluation problem in Natural Language Generation (NLG) and present results for evaluating several NLG systems with similar functionality, including a knowledge-based generator and several statistical systems. We compare evaluation results for these systems by human domain experts, human non-experts, and several automatic evaluation metrics, including NI ST, B LEU, and ROUGE. We find that NI ST scores correlate best (>0.8) with human judgments, but that all automatic metrics we examined are biased in favour of generators that select on the basis of frequency alone. We conclude that automatic evaluation of NLG systems has considerable potential, in particular where high-quality reference texts and only a small number of human evaluators are available. However, in general it is probably best for automatic evaluations to be supported by human based evaluations, or at least by studies that demonstrate that a particular metric correlates well with human judgments in a given domain.", "keyphrases": ["human evaluation", "natural language generation", "domain expert", "rouge"]} +{"id": "swayamdipta-etal-2020-dataset", "title": "Dataset Cartography: Mapping and Diagnosing Datasets with Training Dynamics", "abstract": "Large datasets have become commonplace in NLP research. However, the increased emphasis on data quantity has made it challenging to assess the quality of data. We introduce Data Maps\u2014a model-based tool to characterize and diagnose datasets. We leverage a largely ignored source of information: the behavior of the model on individual instances during training (training dynamics) for building data maps. This yields two intuitive measures for each example\u2014the model's confidence in the true class, and the variability of this confidence across epochs\u2014obtained in a single run of training. Experiments on four datasets show that these model-dependent measures reveal three distinct regions in the data map, each with pronounced characteristics. First, our data maps show the presence of \u201cambiguous\u201d regions with respect to the model, which contribute the most towards out-of-distribution generalization. Second, the most populous regions in the data are \u201ceasy to learn\u201d for the model, and play an important role in model optimization. Finally, data maps uncover a region with instances that the model finds \u201chard to learn\u201d; these often correspond to labeling errors. Our results indicate that a shift in focus from quantity to quality of data could lead to robust models and improved out-of-distribution generalization.", "keyphrases": ["data map", "region", "model optimization", "easy-to-learn"]} +{"id": "chung-etal-2019-conan", "title": "CONAN - COunter NArratives through Nichesourcing: a Multilingual Dataset of Responses to Fight Online Hate Speech", "abstract": "Although there is an unprecedented effort to provide adequate responses in terms of laws and policies to hate content on social media platforms, dealing with hatred online is still a tough problem. Tackling hate speech in the standard way of content deletion or user suspension may be charged with censorship and overblocking. One alternate strategy, that has received little attention so far by the research community, is to actually oppose hate content with counter-narratives (i.e. informed textual responses). In this paper, we describe the creation of the first large-scale, multilingual, expert-based dataset of hate-speech/counter-narrative pairs. This dataset has been built with the effort of more than 100 operators from three different NGOs that applied their training and expertise to the task. Together with the collected data we also provide additional annotations about expert demographics, hate and response type, and data augmentation through translation and paraphrasing. Finally, we provide initial experiments to assess the quality of our data.", "keyphrases": ["hate speech", "counter-narratives", "textual response"]} +{"id": "agirre-etal-2016-semeval-2016", "title": "SemEval-2016 Task 2: Interpretable Semantic Textual Similarity", "abstract": "Comunicacio presentada al 10th International Workshop on Semantic Evaluation (SemEval-2016), celebrat els dies 16 i 17 de juny de 2016 a San Diego, California.", "keyphrases": ["semantic textual similarity", "relation type", "caption"]} +{"id": "antoine-etal-2014-weighted", "title": "Weighted Krippendorff's alpha is a more reliable metrics for multi-coders ordinal annotations: experimental studies on emotion, opinion and coreference annotation", "abstract": "The question of data reliability is of first importance to assess the quality of manually annotated corpora. Although Cohen ' s \u03ba is the prevailing reliability measure used in NLP, alternative statistics have been proposed. This paper presents an experimental study with four measures (Cohen's \u03ba, Scott's \u03c0, binary and weighted Krippendorff ' s \u03b1) on three tasks: emotion, opinion and coreference annotation. The reported studies investigate the factors of influence (annotator bias, category prevalence, number of coders, number of categories) that should affect reliability estimation. Results show that the use of a weighted measure re- stricts this influence on ordinal annotations. They suggest that weighted \u03b1 is the most reliable metrics for such an annotation scheme.", "keyphrases": ["krippendorff", "opinion", "coreference annotation"]} +{"id": "kim-hovy-2006-identifying", "title": "Identifying and Analyzing Judgment Opinions", "abstract": "In this paper, we introduce a methodology for analyzing judgment opinions. We define a judgment opinion as consisting of a valence, a holder, and a topic. We decompose the task of opinion analysis into four parts: 1) recognizing the opinion; 2) identifying the valence; 3) identifying the holder; and 4) identifying the topic. In this paper, we address the first three parts and evaluate our methodology using both intrinsic and extrinsic measures.", "keyphrases": ["opinion", "polarity classification", "benefit"]} +{"id": "declerck-siegel-2019-ontolex", "title": "OntoLex as a possible Bridge between WordNets and full lexical Descriptions", "abstract": "In this paper we describe our current work on representing a recently created German lexical semantics resource in OntoLex-Lemon and in conformance with WordNet specifications. Besides presenting the representation effort, we show the utilization of OntoLex-Lemon to bridge from WordNet-like resources to full lexical descriptions and extend the coverage of WordNets to other types of lexical data, such as decomposition results, exemplified for German data, and inflectional phenomena, here outlined for English data.", "keyphrases": ["bridge", "wordnets", "german data"]} +{"id": "hasan-etal-2009-learning", "title": "Learning-Based Named Entity Recognition for Morphologically-Rich, Resource-Scarce Languages", "abstract": "Named entity recognition for morphologically rich, case-insensitive languages, including the majority of semitic languages, Iranian languages, and Indian languages, is inherently more difficult than its English counterpart. Worse still, progress on machine learning approaches to named entity recognition for many of these languages is currently hampered by the scarcity of annotated data and the lack of an accurate part-of-speech tagger. While it is possible to rely on manually-constructed gazetteers to combat data scarcity, this gazetteer-centric approach has the potential weakness of creating irreproducible results, since these name lists are not publicly available in general. Motivated in part by this concern, we present a learning-based named entity recognizer that does not rely on manually-constructed gazetteers, using Bengali as our representative resource-scarce, morphologically-rich language. Our recognizer achieves a relative improvement of 7.5% in F-measure over a baseline recognizer. Improvements arise from (1) using induced affixes, (2) extracting information from online lexical databases, and (3) jointly modeling part-of-speech tagging and named entity recognition.", "keyphrases": ["entity recognition", "part-of-speech tagger", "rich language"]} +{"id": "riezler-etal-2003-statistical", "title": "Statistical Sentence Condensation using Ambiguity Packing and Stochastic Disambiguation Methods for Lexical-Functional Grammar", "abstract": "We present an application of ambiguity packing and stochastic disambiguation techniques for Lexical-Functional Grammars (LFG) to the domain of sentence condensation. Our system incorporates a linguistic parser/generator for LFG, a transfer component for parse reduction operating on packed parse forests, and a maximum-entropy model for stochastic output selection. Furthermore, we propose the use of standard parser evaluation methods for automatically evaluating the summarization quality of sentence condensation systems. An experimental evaluation of summarization quality shows a close correlation between the automatic parse-based evaluation and a manual evaluation of generated strings. Overall summarization quality of the proposed system is state-of-the-art, with guaranteed grammaticality of the system output due to the use of a constraint-based parser/generator.", "keyphrases": ["ambiguity packing", "grammaticality", "statistical model"]} +{"id": "harrison-etal-2019-maximizing", "title": "Maximizing Stylistic Control and Semantic Accuracy in NLG: Personality Variation and Discourse Contrast", "abstract": "Neural generation methods for task-oriented dialogue typically generate from a meaning representation that is populated using a database of domain information, such as a table of data describing a restaurant. While earlier work focused solely on the semantic fidelity of outputs, recent work has started to explore methods for controlling the style of the generated text while simultaneously achieving semantic accuracy. Here we experiment with two stylistic benchmark tasks, generating language that exhibits variation in personality, and generating discourse contrast. We report a huge performance improvement in both stylistic control and semantic accuracy over the state of the art on both of these benchmarks. We test several different models and show that putting stylistic conditioning in the decoder and eliminating the semantic re-ranker used in earlier models results in more than 15 points higher BLEU for Personality, with a reduction of semantic error to near zero. We also report an improvement from .75 to .81 in controlling contrast and a reduction in semantic error from 16% to 2%.", "keyphrases": ["stylistic control", "semantic accuracy", "personality", "discourse contrast"]} +{"id": "li-etal-2010-adaptive", "title": "Adaptive Development Data Selection for Log-linear Model in Statistical Machine Translation", "abstract": "This paper addresses the problem of dynamic model parameter selection for log-linear model based statistical machine translation (SMT) systems. In this work, we propose a principled method for this task by transforming it to a test data dependent development set selection problem. We present two algorithms for automatic development set construction, and evaluated our method on several NIST data sets for the Chinese-English translation task. Experimental results show that our method can effectively adapt log-linear model parameters to different test data, and consistently achieves good translation performance compared with conventional methods that use a fixed model parameter setting across different data sets.", "keyphrases": ["log-linear model", "statistical machine translation", "test set"]} +{"id": "ji-etal-2022-vscript", "title": "VScript: Controllable Script Generation with Visual Presentation", "abstract": "In order to offer a customized script tool and inspire professional scriptwriters, we present VScript. It is a controllable pipeline that generates complete scripts, including dialogues and scene descriptions, as well as presents visually using video retrieval. With an interactive interface, our system allows users to select genres and input starting words that control the theme and development of the generated script. We adopt a hierarchical structure, which first generates the plot, then the script and its visual presentation. A novel approach is also introduced to plot-guided dialogue generation by treating it as an inverse dialogue summarization. The experiment results show that our approach outperforms the baselines on both automatic and human evaluations, especially in genre control.", "keyphrases": ["visual presentation", "interface", "vscript"]} +{"id": "xu-etal-2022-beyond", "title": "Beyond Goldfish Memory: Long-Term Open-Domain Conversation", "abstract": "Despite recent improvements in open-domain dialogue models, state of the art models are trained and evaluated on short conversations with little context. In contrast, the long-term conversation setting has hardly been studied. In this work we collect and release a human-human dataset consisting of multiple chat sessions whereby the speaking partners learn about each other's interests and discuss the things they have learnt from past sessions. We show how existing models trained on existing datasets perform poorly in this long-term conversation setting in both automatic and human evaluations, and we study long-context models that can perform much better. In particular, we find retrieval-augmented methods and methods with an ability to summarize and recall previous conversations outperform the standard encoder-decoder architectures currently considered state of the art.", "keyphrases": ["conversation", "partner", "long-context model"]} +{"id": "nadejde-etal-2017-predicting", "title": "Predicting Target Language CCG Supertags Improves Neural Machine Translation", "abstract": "Neural machine translation (NMT) models are able to partially learn syntactic information from sequential lexical information. Still, some complex syntactic phenomena such as prepositional phrase attachment are poorly modeled. This work aims to answer two questions: 1) Does explicitly modeling target language syntax help NMT? 2) Is tight integration of words and syntax better than multitask training? We introduce syntactic information in the form of CCG supertags in the decoder, by interleaving the target supertags with the word sequence. Our results on WMT data show that explicitly modeling target-syntax improves machine translation quality for German->English, a high-resource pair, and for Romanian->English, a low-resource pair and also several syntactic phenomena including prepositional phrase attachment. Furthermore, a tight coupling of words and syntax improves translation quality more than multitask training. By combining target-syntax with adding source-side dependency labels in the embedding layer, we obtain a total improvement of 0.9 BLEU for German->English and 1.2 BLEU for Romanian->English.", "keyphrases": ["ccg supertag", "neural machine translation", "syntactic information"]} +{"id": "chen-etal-2020-shot", "title": "Few-Shot NLG with Pre-Trained Language Model", "abstract": "Neural-based end-to-end approaches to natural language generation (NLG) from structured data or knowledge are data-hungry, making their adoption for real-world applications difficult with limited data. In this work, we propose the new task of few-shot natural language generation. Motivated by how humans tend to summarize tabular data, we propose a simple yet effective approach and show that it not only demonstrates strong performance but also provides good generalization across domains. The design of the model architecture is based on two aspects: content selection from input data and language modeling to compose coherent sentences, which can be acquired from prior knowledge. With just 200 training examples, across multiple domains, we show that our approach achieves very reasonable performances and outperforms the strongest baseline by an average of over 8.0 BLEU points improvement. Our code and data can be found at ", "keyphrases": ["pre-trained language model", "table-to-text generation", "few-shot scenario"]} +{"id": "schick-etal-2020-automatically", "title": "Automatically Identifying Words That Can Serve as Labels for Few-Shot Text Classification", "abstract": "A recent approach for few-shot text classification is to convert textual inputs to cloze questions that contain some form of task description, process them with a pretrained language model and map the predicted words to labels. Manually defining this mapping between words and labels requires both domain expertise and an understanding of the language model's abilities. To mitigate this issue, we devise an approach that automatically finds such a mapping given small amounts of training data. For a number of tasks, the mapping found by our approach performs almost as well as hand-crafted label-to-word mappings.", "keyphrases": ["text classification", "prompt", "well performance"]} +{"id": "gari-soler-apidianaki-2021-lets", "title": "Let's Play Mono-Poly: BERT Can Reveal Words' Polysemy Level and Partitionability into Senses", "abstract": "Pre-trained language models (LMs) encode rich information about linguistic structure but their knowledge about lexical polysemy remains unclear. We propose a novel experimental setup for analyzing this knowledge in LMs specifically trained for different languages (English, French, Spanish, and Greek) and in multilingual BERT. We perform our analysis on datasets carefully designed to reflect different sense distributions, and control for parameters that are highly correlated with polysemy such as frequency and grammatical category. We demonstrate that BERT-derived representations reflect words' polysemy level and their partitionability into senses. Polysemy-related information is more clearly present in English BERT embeddings, but models in other languages also manage to establish relevant distinctions between words at different polysemy levels. Our results contribute to a better understanding of the knowledge encoded in contextualized representations and open up new avenues for multilingual lexical semantics research.", "keyphrases": ["bert", "polysemy level", "partitionability", "language model"]} +{"id": "friedman-etal-2019-relating", "title": "Relating Word Embedding Gender Biases to Gender Gaps: A Cross-Cultural Analysis", "abstract": "Modern models for common NLP tasks often employ machine learning techniques and train on journalistic, social media, or other culturally-derived text. These have recently been scrutinized for racial and gender biases, rooting from inherent bias in their training text. These biases are often sub-optimal and recent work poses methods to rectify them; however, these biases may shed light on actual racial or gender gaps in the culture(s) that produced the training text, thereby helping us understand cultural context through big data. This paper presents an approach for quantifying gender bias in word embeddings, and then using them to characterize statistical gender gaps in education, politics, economics, and health. We validate these metrics on 2018 Twitter data spanning 51 U.S. regions and 99 countries. We correlate state and country word embedding biases with 18 international and 5 U.S.-based statistical gender gaps, characterizing regularities and predictive strength.", "keyphrases": ["word embedding", "cultural context", "gender bias"]} +{"id": "grundkiewicz-junczys-dowmunt-2018-near", "title": "Near Human-Level Performance in Grammatical Error Correction with Hybrid Machine Translation", "abstract": "We combine two of the most popular approaches to automated Grammatical Error Correction (GEC): GEC based on Statistical Machine Translation (SMT) and GEC based on Neural Machine Translation (NMT). The hybrid system achieves new state-of-the-art results on the CoNLL-2014 and JFLEG benchmarks. This GEC system preserves the accuracy of SMT output and, at the same time, generates more fluent sentences as it typical for NMT. Our analysis shows that the created systems are closer to reaching human-level performance than any other GEC system reported so far.", "keyphrases": ["human-level performance", "grammatical error correction", "recall"]} +{"id": "nguyen-etal-2015-tea", "title": "Tea Party in the House: A Hierarchical Ideal Point Topic Model and Its Application to Republican Legislators in the 112th Congress", "abstract": "We introduce the Hierarchical Ideal Point Topic Model, which provides a rich picture of policy issues, framing, and voting behavior using a joint model of votes, bill text, and the language that legislators use when debating bills. We use this model to look at the relationship between Tea Party Republicans and \u201cestablishment\u201d Republicans in the U.S. House of Representatives during the 112th Congress. 1 Capturing Political Polarization Ideal-point models are one of the most widely used tools in contemporary political science research (Poole and Rosenthal, 2007). These models estimate political preferences for legislators, known as their ideal points, from binary data such as legislative votes. Popular formulations analyze legislators\u2019 votes and place them on a one-dimensional scale, most often interpreted as an ideological spectrum from liberal to conservative. Moving beyond a single dimension is attractive, however, since people may lean differently based on policy issues; for example, the conservative movement in the U.S. includes fiscal conservatives who are relatively liberal on social issues, and vice versa. In multi-dimensional ideal point models, therefore, the ideal point of each legislator is no longer characterized by a single number, but by a multi-dimensional vector. With that move comes a new challenge, though: the additional dimensions are often difficult to interpret. To mitigate this problem, recent research has introduced methods that estimate multi-dimensional ideal points using both voting data and the texts of the bills being voted on, e.g., using topic models and associating each dimension of the ideal point space with a topic. The words most strongly associated with the topic can sometimes provide a readable description of its corresponding dimension. In this paper, we develop this idea further by introducing HIPTM, the Hierarchical Ideal Point Topic Model, to estimate multi-dimensional ideal points for legislators in the U.S. Congress. HIPTM differs from previous models in three ways. First, HIPTM uses not only votes and associated bill text, but also the language of the legislators themselves; this allows predictions of ideal points from politicians\u2019 writing alone. Second, HIPTM improves the interpretability of ideal-point dimensions by incorporating data from the Congressional Bills Project (Adler and Wilkerson, 2015), in which bills are labeled with major topics from the Policy Agendas Project Topic Codebook.1 And third, HIPTM discovers a hierarchy of topics, allowing us to analyze both agenda issues and issue-specific frames that legislators use on the congressional floor, following Nguyen et al. (2013) in modeling framing as second-level agenda setting (McCombs, 2005). Using this new model, we focus on Republican legislators during the 112th U.S. Congress, from January 2011 until January 2013. This is a particularly interesting session of Congress for political scientists, because of the rise of the Tea Party, a decentralized political movement with populist, libertarian, and conservative elements. Although united with \u201cestablishment\u201d Republicans against Democrats in the 2010 midterm elections, leading to massive Democratic defeats, the Tea Party was\u2014and still is\u2014wrestling with establishment Republicans for control of the Republican party. The Tea Party is a new and complex phenomenon for political scientists; as Carmines and D\u2019Amico (2015) observe: \u201cConventional views of ideology as a single-dimensional, left-right spectrum experience great difficulty in understanding or explaining the Tea Party.\u201d Our model identifies legislators who have low (or high) levels of \u201cTea Partiness\u201d but are (or are not) members of the Tea Party Caucus, and providing insights into the nahttp://www.policyagendas.org/", "keyphrases": ["topic model", "112th congress", "framing", "tea party"]} +{"id": "gimenez-marquez-2008-smorgasbord", "title": "A Smorgasbord of Features for Automatic MT Evaluation", "abstract": "This document describes the approach by the NLP Group at the Technical University of Catalonia (UPC-LSI), for the shared task on Automatic Evaluation of Machine Translation at the ACL 2008 Third SMT Workshop.", "keyphrases": ["translation quality", "improved correlation", "difficulty", "gim\u00e9nez", "human judgement"]} +{"id": "avramidis-koehn-2008-enriching", "title": "Enriching Morphologically Poor Languages for Statistical Machine Translation", "abstract": "We address the problem of translating from morphologically poor to morphologically rich languages by adding per-word linguistic information to the source language. We use the syntax of the source sentence to extract information for noun cases and verb persons and annotate the corresponding words accordingly. In experiments, we show improved performance for translating from English into Greek and Czech. For English\u2010Greek, we reduce the error on the verb conjugation from 19% to 5.4% and noun case agreement from 9% to 6%.", "keyphrases": ["morphology", "poor language", "source sentence"]} +{"id": "peirsman-pado-2010-cross", "title": "Cross-lingual Induction of Selectional Preferences with Bilingual Vector Spaces", "abstract": "We describe a cross-lingual method for the induction of selectional preferences for resource-poor languages, where no accurate monolingual models are available. The method uses bilingual vector spaces to \"translate\" foreign language predicate-argument structures into a resource-rich language like English. The only prerequisite for constructing the bilingual vector space is a large unparsed corpus in the resource-poor language, although the model can profit from (even noisy) syntactic knowledge. Our experiments show that the cross-lingual predictions correlate well with human ratings, clearly outperforming monolingual baseline models.", "keyphrases": ["induction", "bilingual vector space", "predicate-argument structure"]} +{"id": "elsner-santhanam-2011-learning", "title": "Learning to Fuse Disparate Sentences", "abstract": "We present a system for fusing sentences which are drawn from the same source document but have different content. Unlike previous work, our approach is supervised, training on real-world examples of sentences fused by professional journalists in the process of editing news articles. Like Filippova and Strube (2008), our system merges dependency graphs using Integer Linear Programming. However, instead of aligning the inputs as a preprocess, we integrate the tasks of finding an alignment and selecting a merged sentence into a joint optimization problem, and learn parameters for this optimization using a structured online algorithm. Evaluation by human judges shows that our technique produces fused sentences that are both informative and readable.", "keyphrases": ["news article", "fusion", "similar sentence"]} +{"id": "kubler-2008-page", "title": "The PaGe 2008 Shared Task on Parsing German", "abstract": "The ACL 2008 Workshop on Parsing German features a shared task on parsing German. The goal of the shared task was to find reasons for the radically different behavior of parsers on the different treebanks and between constituent and dependency representations. In this paper, we describe the task and the data sets. In addition, we provide an overview of the test results and a first analysis.", "keyphrases": ["page", "shared task", "dependency parser"]} +{"id": "takamura-etal-2006-latent", "title": "Latent Variable Models for Semantic Orientations of Phrases", "abstract": "We propose models for semantic orientations of phrases as well as classification methods based on the models. Although each phrase consists of multiple words, the semantic orientation of the phrase is not a mere sum of the orientations of the component words. Some words can invert the orientation. In order to capture the property of such phrases, we introduce latent variables into the models. Through experiments, we show that the proposed latent variable models work well in the classification of semantic orientations of phrases and achieved nearly 82% classification accuracy.", "keyphrases": ["variable model", "semantic orientation", "latent", "sentiment classification"]} +{"id": "damonte-cohen-2018-cross", "title": "Cross-Lingual Abstract Meaning Representation Parsing", "abstract": "Abstract Meaning Representation (AMR) research has mostly focused on English. We show that it is possible to use AMR annotations for English as a semantic representation for sentences written in other languages. We exploit an AMR parser for English and parallel corpora to learn AMR parsers for Italian, Spanish, German and Chinese. Qualitative analysis show that the new parsers overcome structural differences between the languages. We further propose a method to evaluate the parsers that does not require gold standard data in the target languages. This method highly correlates with the gold standard evaluation, obtaining a Pearson correlation coefficient of 0.95.", "keyphrases": ["semantic representation", "other language", "amr parser", "parallel corpora"]} +{"id": "chaudhary-etal-2018-adapting", "title": "Adapting Word Embeddings to New Languages with Morphological and Phonological Subword Representations", "abstract": "Much work in Natural Language Processing (NLP) has been for resource-rich languages, making generalization to new, less-resourced languages challenging. We present two approaches for improving generalization to low-resourced languages by adapting continuous word representations using linguistically motivated subword units: phonemes, morphemes and graphemes. Our method requires neither parallel corpora nor bilingual dictionaries and provides a significant gain in performance over previous methods relying on these resources. We demonstrate the effectiveness of our approaches on Named Entity Recognition for four languages, namely Uyghur, Turkish, Bengali and Hindi, of which Uyghur and Bengali are low resource languages, and also perform experiments on Machine Translation. Exploiting subwords with transfer learning gives us a boost of +15.2 NER F1 for Uyghur and +9.7 F1 for Bengali. We also show improvements in the monolingual setting where we achieve (avg.) +3 F1 and (avg.) +1.35 BLEU.", "keyphrases": ["word embedding", "low-resource language", "phoneme"]} +{"id": "lin-chen-2008-ranking", "title": "Ranking Reader Emotions Using Pairwise Loss Minimization and Emotional Distribution Regression", "abstract": "This paper presents two approaches to ranking reader emotions of documents. Past studies assign a document to a single emotion category, so their methods cannot be applied directly to the emotion ranking problem. Furthermore, whereas previous research analyzes emotions from the writer's perspective, this work examines readers' emotional states. The first approach proposed in this paper minimizes pairwise ranking errors. In the second approach, regression is used to model emotional distributions. Experiment results show that the regression method is more effective at identifying the most popular emotion, but the pairwise loss minimization method produces ranked lists of emotions that have better correlations with the correct lists.", "keyphrases": ["reader", "pairwise loss minimization", "emotional distribution regression"]} +{"id": "halder-etal-2020-task", "title": "Task-Aware Representation of Sentences for Generic Text Classification", "abstract": "State-of-the-art approaches for text classification leverage a transformer architecture with a linear layer on top that outputs a class distribution for a given prediction problem. While effective, this approach suffers from conceptual limitations that affect its utility in few-shot or zero-shot transfer learning scenarios. First, the number of classes to predict needs to be pre-defined. In a transfer learning setting, in which new classes are added to an already trained classifier, all information contained in a linear layer is therefore discarded, and a new layer is trained from scratch. Second, this approach only learns the semantics of classes implicitly from training examples, as opposed to leveraging the explicit semantic information provided by the natural language names of the classes. For instance, a classifier trained to predict the topics of news articles might have classes like \u201cbusiness\u201d or \u201csports\u201d that themselves carry semantic information. Extending a classifier to predict a new class named \u201cpolitics\u201d with only a handful of training examples would benefit from both leveraging the semantic information in the name of a new class and using the information contained in the already trained linear layer. This paper presents a novel formulation of text classification that addresses these limitations. It imbues the notion of the task at hand into the transformer model itself by factorizing arbitrary classification problems into a generic binary classification problem. We present experiments in few-shot and zero-shot transfer learning that show that our approach significantly outperforms previous approaches on small training data and can even learn to predict new classes with no training examples at all. The implementation of our model is publicly available at: .", "keyphrases": ["sentences", "new class", "task-aware representation"]} +{"id": "lai-etal-2021-lattice", "title": "Lattice-BERT: Leveraging Multi-Granularity Representations in Chinese Pre-trained Language Models", "abstract": "Chinese pre-trained language models usually process text as a sequence of characters, while ignoring more coarse granularity, e.g., words. In this work, we propose a novel pre-training paradigm for Chinese \u2014 Lattice-BERT, which explicitly incorporates word representations along with characters, thus can model a sentence in a multi-granularity manner. Specifically, we construct a lattice graph from the characters and words in a sentence and feed all these text units into transformers. We design a lattice position attention mechanism to exploit the lattice structures in self-attention layers. We further propose a masked segment prediction task to push the model to learn from rich but redundant information inherent in lattices, while avoiding learning unexpected tricks. Experiments on 11 Chinese natural language understanding tasks show that our model can bring an average increase of 1.5% under the 12-layer setting, which achieves new state-of-the-art among base-size models on the CLUE benchmarks. Further analysis shows that Lattice-BERT can harness the lattice structures, and the improvement comes from the exploration of redundant information and multi-granularity representations. Our code will be available at .", "keyphrases": ["multi-granularity representation", "chinese", "word representation"]} +{"id": "dernoncourt-lee-2017-pubmed", "title": "PubMed 200k RCT: a Dataset for Sequential Sentence Classification in Medical Abstracts", "abstract": "We present PubMed 200k RCT, a new dataset based on PubMed for sequential sentence classification. The dataset consists of approximately 200,000 abstracts of randomized controlled trials, totaling 2.3 million sentences. Each sentence of each abstract is labeled with their role in the abstract using one of the following classes: background, objective, method, result, or conclusion. The purpose of releasing this dataset is twofold. First, the majority of datasets for sequential short-text classification (i.e., classification of short texts that appear in sequences) are small: we hope that releasing a new large dataset will help develop more accurate algorithms for this task. Second, from an application perspective, researchers need better tools to efficiently skim through the literature. Automatically classifying each sentence in an abstract would help researchers read abstracts more efficiently, especially in fields where abstracts may be long, such as the medical field.", "keyphrases": ["sequential sentence classification", "abstract", "pubmed"]} +{"id": "rehbein-etal-2014-kiezdeutsch", "title": "The KiezDeutsch Korpus (KiDKo) Release 1.0", "abstract": "This paper presents the first release of the KiezDeutsch Korpus (KiDKo), a new language resource with multiparty spoken dialogues of Kiezdeutsch, a newly emerging language variety spoken by adolescents from multiethnic urban areas in Germany. The first release of the corpus includes the transcriptions of the data as well as a normalisation layer and part-of-speech annotations. In the paper, we describe the main features of the new resource and then focus on automatic POS tagging of informal spoken language. Our tagger achieves an accuracy of nearly 97% on KiDKo. While we did not succeed in further improving the tagger using ensemble tagging, we present our approach to using the tagger ensembles for identifying error patterns in the automatically tagged data.", "keyphrases": ["kiezdeutsch korpus", "kidko", "kiezdeutsch corpus"]} +{"id": "he-etal-2018-decoupling", "title": "Decoupling Strategy and Generation in Negotiation Dialogues", "abstract": "We consider negotiation settings in which two agents use natural language to bargain on goods. Agents need to decide on both high-level strategy (e.g., proposing $50) and the execution of that strategy (e.g., generating \u201cThe bike is brand new. Selling for just $50!\u201d). Recent work on negotiation trains neural models, but their end-to-end nature makes it hard to control their strategy, and reinforcement learning tends to lead to degenerate solutions. In this paper, we propose a modular approach based on coarse dialogue acts (e.g., propose(price=50)) that decouples strategy and generation. We show that we can flexibly set the strategy using supervised learning, reinforcement learning, or domain-specific knowledge without degeneracy, while our retrieval-based generation can maintain context-awareness and produce diverse utterances. We test our approach on the recently proposed DEALORNODEAL game, and we also collect a richer dataset based on real items on Craigslist. Human evaluation shows that our systems achieve higher task success rate and more human-like negotiation behavior than previous approaches.", "keyphrases": ["negotiation", "dialogue system", "language generation"]} +{"id": "buyko-etal-2010-genereg", "title": "The GeneReg Corpus for Gene Expression Regulation Events \u2014 An Overview of the Corpus and its In-Domain and Out-of-Domain Interoperability", "abstract": "Despite the large variety of corpora in the biomedical domain their annotations differ in many respects, e.g., the coverage of different, highly specialized knowledge domains, varying degrees of granularity of targeted relations, the specificity of linguistic anchoring of relations and named entities in documents, etc. We here present GeneReg (Gene Regulation Corpus), the result of an annotation campaign led by the Jena University Language & Information Engineering (JULIE) Lab. The GeneReg corpus consists of 314 abstracts dealing with the regulation of gene expression in the model organism E. coli. Our emphasis in this paper is on the compatibility of the GeneReg corpus with the alternative Genia event corpus and with several in-domain and out-of-domain lexical resources, e.g., the Specialist Lexicon, FrameNet, and WordNet. The links we established from the GeneReg corpus to these external resources will help improve the performance of the automatic relation extraction engine JREx trained and evaluated on GeneReg.", "keyphrases": ["genereg corpus", "gene expression", "in-domain"]} +{"id": "chali-hasan-2015-towards", "title": "Towards Topic-to-Question Generation", "abstract": "This paper is concerned with automatic generation of all possible questions from a topic of interest. Specifically, we consider that each topic is associated with a body of texts containing useful information about the topic. Then, questions are generated by exploiting the named entity information and the predicate argument structures of the sentences present in the body of texts. The importance of the generated questions is measured using Latent Dirichlet Allocation by identifying the subtopics (which are closely related to the original topic) in the given body of texts and applying the Extended String Subsequence Kernel to calculate their similarity with the questions. We also propose the use of syntactic tree kernels for the automatic judgment of the syntactic correctness of the questions. The questions are ranked by considering both their importance (in the context of the given body of texts) and syntactic correctness. To the best of our knowledge, no previous study has accomplished this task in our setting. A series of experiments demonstrate that the proposed topic-to-question generation approach can significantly outperform the state-of-the-art results.", "keyphrases": ["automatic generation", "possible question", "declarative sentence"]} +{"id": "sun-etal-2021-tita", "title": "TITA: A Two-stage Interaction and Topic-Aware Text Matching Model", "abstract": "In this paper, we focus on the problem of keyword and document matching by considering different relevance levels. In our recommendation system, different people follow different hot keywords with interest. We need to attach documents to each keyword and then distribute the documents to people who follow these keywords. The ideal documents should have the same topic with the keyword, which we call topic-aware relevance. In other words, topic-aware relevance documents are better than partially-relevance ones in this application. However, previous tasks never define topic-aware relevance clearly. To tackle this problem, we define a three-level relevance in keyword-document matching task: topic-aware relevance, partially-relevance and irrelevance. To capture the relevance between the short keyword and the document at above-mentioned three levels, we should not only combine the latent topic of the document with its deep neural representation, but also model complex interactions between the keyword and the document. To this end, we propose a Two-stage Interaction and Topic-Aware text matching model (TITA). In terms of \u201ctopic-aware\u201d, we introduce neural topic model to analyze the topic of the document and then use it to further encode the document. In terms of \u201ctwo-stage interaction\u201d, we propose two successive stages to model complex interactions between the keyword and the document. Extensive experiments reveal that TITA outperforms other well-designed baselines and shows excellent performance in our recommendation system.", "keyphrases": ["two-stage interaction", "text matching model", "latent topic"]} +{"id": "chen-etal-2006-novel", "title": "Novel Association Measures Using Web Search with Double Checking", "abstract": "A web search with double checking model is proposed to explore the web as a live corpus. Five association measures including variants of Dice, Overlap Ratio, Jaccard, and Cosine, as well as Co-Occurrence Double Check (CODC), are presented. In the experiments on Rubenstein-Goodenough's benchmark data set, the CODC measure achieves correlation coefficient 0.8492, which competes with the performance (0.8914) of the model using WordNet. The experiments on link detection of named entities using the strategies of direct association, association matrix and scalar association matrix verify that the double-check frequencies are reliable. Further study on named entity clustering shows that the five measures are quite useful. In particular, CODC measure is very stable on word-word and name-name experiments. The application of CODC measure to expand community chains for personal name disambiguation achieves 9.65% and 14.22% increase compared to the system without community expansion. All the experiments illustrate that the novel model of web search with double checking is feasible for mining associations from the web.", "keyphrases": ["web search", "double checking", "semantic similarity"]} +{"id": "zhou-etal-2020-global", "title": "Global Context-enhanced Graph Convolutional Networks for Document-level Relation Extraction", "abstract": "Document-level Relation Extraction (RE) is particularly challenging due to complex semantic interactions among multiple entities in a document. Among exiting approaches, Graph Convolutional Networks (GCN) is one of the most effective approaches for document-level RE. However, traditional GCN simply takes word nodes and adjacency matrix to represent graphs, which is difficult to establish direct connections between distant entity pairs. In this paper, we propose Global Context-enhanced Graph Convolutional Networks (GCGCN), a novel model which is composed of entities as nodes and context of entity pairs as edges between nodes to capture rich global context information of entities in a document. Two hierarchical blocks, Context-aware Attention Guided Graph Convolution (CAGGC) for partially connected graphs and Multi-head Attention Guided Graph Convolution (MAGGC) for fully connected graphs, could take progressively more global context into account. Meantime, we leverage a large-scale distantly supervised dataset to pre-train a GCGCN model with curriculum learning, which is then fine-tuned on the human-annotated dataset for further improving document-level RE performance. The experimental results on DocRED show that our model could effectively capture rich global context information in the document, leading to a state-of-the-art result. Our code is available at .", "keyphrases": ["graph convolutional networks", "document-level relation extraction", "edge"]} +{"id": "sun-etal-2020-clireval", "title": "CLIReval: Evaluating Machine Translation as a Cross-Lingual Information Retrieval Task", "abstract": "We present CLIReval, an easy-to-use toolkit for evaluating machine translation (MT) with the proxy task of cross-lingual information retrieval (CLIR). Contrary to what the project name might suggest, CLIReval does not actually require any annotated CLIR dataset. Instead, it automatically transforms translations and references used in MT evaluations into a synthetic CLIR dataset; it then sets up a standard search engine (Elasticsearch) and computes various information retrieval metrics (e.g., mean average precision) by treating the translations as documents to be retrieved. The idea is to gauge the quality of MT by its impact on the document translation approach to CLIR. As a case study, we run CLIReval on the \u201cmetrics shared task\u201d of WMT2019; while this extrinsic metric is not intended to replace popular intrinsic metrics such as BLEU, results suggest CLIReval is competitive in many language pairs in terms of correlation to human judgments of quality. CLIReval is publicly available at .", "keyphrases": ["machine translation", "search engine", "clireval"]} +{"id": "barzilay-lapata-2006-aggregation", "title": "Aggregation via Set Partitioning for Natural Language Generation", "abstract": "The role of aggregation in natural language generation is to combine two or more linguistic structures into a single sentence. The task is crucial for generating concise and readable texts. We present an efficient algorithm for automatically learning aggregation rules from a text and its related database. The algorithm treats aggregation as a set partitioning problem and uses a global inference procedure to find an optimal solution. Our experiments show that this approach yields substantial improvements over a clustering-based model which relies exclusively on local information.", "keyphrases": ["natural language generation", "aggregation", "other work"]} +{"id": "fei-li-2020-cross", "title": "Cross-Lingual Unsupervised Sentiment Classification with Multi-View Transfer Learning", "abstract": "Recent neural network models have achieved impressive performance on sentiment classification in English as well as other languages. Their success heavily depends on the availability of a large amount of labeled data or parallel corpus. In this paper, we investigate an extreme scenario of cross-lingual sentiment classification, in which the low-resource language does not have any labels or parallel corpus. We propose an unsupervised cross-lingual sentiment classification model named multi-view encoder-classifier (MVEC) that leverages an unsupervised machine translation (UMT) system and a language discriminator. Unlike previous language model (LM) based fine-tuning approaches that adjust parameters solely based on the classification error on training data, we employ the encoder-decoder framework of a UMT as a regularization component on the shared network parameters. In particular, the cross-lingual encoder of our model learns a shared representation, which is effective for both reconstructing input sentences of two languages and generating more representative views from the input for classification. Extensive experiments on five language pairs verify that our model significantly outperforms other models for 8/11 sentiment classification tasks.", "keyphrases": ["sentiment classification", "transfer learning", "input sentence"]} +{"id": "schick-schutze-2021-generating", "title": "Generating Datasets with Pretrained Language Models", "abstract": "To obtain high-quality sentence embeddings from pretrained language models (PLMs), they must either be augmented with additional pretraining objectives or finetuned on a large set of labeled text pairs. While the latter approach typically outperforms the former, it requires great human effort to generate suitable datasets of sufficient size. In this paper, we show how PLMs can be leveraged to obtain high-quality sentence embeddings without the need for labeled data, finetuning or modifications to the pretraining objective: We utilize the generative abilities of large and high-performing PLMs to generate entire datasets of labeled text pairs from scratch, which we then use for finetuning much smaller and more efficient models. Our fully unsupervised approach outperforms strong baselines on several semantic textual similarity datasets.", "keyphrases": ["language model", "plms", "similarity dataset", "dino"]} +{"id": "dozat-etal-2017-stanfords", "title": "Stanford's Graph-based Neural Dependency Parser at the CoNLL 2017 Shared Task", "abstract": "This paper describes the neural dependency parser submitted by Stanford to the CoNLL 2017 Shared Task on parsing Universal Dependencies. Our system uses relatively simple LSTM networks to produce part of speech tags and labeled dependency parses from segmented and tokenized sequences of words. In order to address the rare word problem that abounds in languages with complex morphology, we include a character-based word representation that uses an LSTM to produce embeddings from sequences of characters. Our system was ranked first according to all five relevant metrics for the system: UPOS tagging (93.09%), XPOS tagging (82.27%), unlabeled attachment score (81.30%), labeled attachment score (76.30%), and content word labeled attachment score (72.57%).", "keyphrases": ["neural dependency parser", "stanford", "pos tagger"]} +{"id": "rahman-ng-2010-inducing", "title": "Inducing Fine-Grained Semantic Classes via Hierarchical and Collective Classification", "abstract": "Research in named entity recognition and mention detection has typically involved a fairly small number of semantic classes, which may not be adequate if semantic class information is intended to support natural language applications. Motivated by this observation, we examine the under-studied problem of semantic subtype induction, where the goal is to automatically determine which of a set of 92 fine-grained semantic classes a noun phrase belongs to. We seek to improve the standard supervised approach to this problem using two techniques: hierarchical classification and collective classification. Experimental results demonstrate the effectiveness of these techniques, whether or not they are applied in isolation or in combination with the standard approach.", "keyphrases": ["semantic class", "collective classification", "noun phrase", "fine-grained typing"]} +{"id": "rashkin-etal-2021-increasing", "title": "Increasing Faithfulness in Knowledge-Grounded Dialogue with Controllable Features", "abstract": "Knowledge-grounded dialogue systems are intended to convey information that is based on evidence provided in a given source text. We discuss the challenges of training a generative neural dialogue model for such systems that is controlled to stay faithful to the evidence. Existing datasets contain a mix of conversational responses that are faithful to selected evidence as well as more subjective or chit-chat style responses. We propose different evaluation measures to disentangle these different styles of responses by quantifying the informativeness and objectivity. At training time, additional inputs based on these evaluation measures are given to the dialogue model. At generation time, these additional inputs act as stylistic controls that encourage the model to generate responses that are faithful to the provided evidence. We also investigate the usage of additional controls at decoding time using resampling techniques. In addition to automatic metrics, we perform a human evaluation study where raters judge the output of these controlled generation models to be generally more objective and faithful to the evidence compared to baseline dialogue systems.", "keyphrases": ["faithfulness", "knowledge-grounded dialogue", "objective sentence"]} +{"id": "stern-etal-2017-minimal", "title": "A Minimal Span-Based Neural Constituency Parser", "abstract": "In this work, we present a minimal neural model for constituency parsing based on independent scoring of labels and spans. We show that this model is not only compatible with classical dynamic programming techniques, but also admits a novel greedy top-down inference algorithm based on recursive partitioning of the input. We demonstrate empirically that both prediction schemes are competitive with recent work, and when combined with basic extensions to the scoring model are capable of achieving state-of-the-art single-model performance on the Penn Treebank (91.79 F1) and strong performance on the French Treebank (82.23 F1).", "keyphrases": ["constituent", "penn treebank", "span-based parser"]} +{"id": "bjorne-salakoski-2018-biomedical", "title": "Biomedical Event Extraction Using Convolutional Neural Networks and Dependency Parsing", "abstract": "Event and relation extraction are central tasks in biomedical text mining. Where relation extraction concerns the detection of semantic connections between pairs of entities, event extraction expands this concept with the addition of trigger words, multiple arguments and nested events, in order to more accurately model the diversity of natural language. In this work we develop a convolutional neural network that can be used for both event and relation extraction. We use a linear representation of the input text, where information is encoded with various vector space embeddings. Most notably, we encode the parse graph into this linear space using dependency path embeddings. We integrate our neural network into the open source Turku Event Extraction System (TEES) framework. Using this system, our machine learning model can be easily applied to a large set of corpora from e.g. the BioNLP, DDI Extraction and BioCreative shared tasks. We evaluate our system on 12 different event, relation and NER corpora, showing good generalizability to many tasks and achieving improved performance on several corpora.", "keyphrases": ["event extraction", "biomedical domain", "deep neural network"]} +{"id": "rubino-etal-2012-dcu", "title": "DCU-Symantec Submission for the WMT 2012 Quality Estimation Task", "abstract": "This paper describes the features and the machine learning methods used by Dublin City University (DCU) and SYMANTEC for the WMT 2012 quality estimation task. Two sets of features are proposed: one constrained, i.e. respecting the data limitation suggested by the workshop organisers, and one unconstrained, i.e. using data or tools trained on data that was not provided by the workshop organisers. In total, more than 300 features were extracted and used to train classifiers in order to predict the translation quality of unseen data. In this paper, we focus on a subset of our feature set that we consider to be relatively novel: features based on a topic model built using the Latent Dirichlet Allocation approach, and features based on source and target language syntax extracted using part-of-speech (POS) taggers and parsers. We evaluate nine feature combinations using four classification-based and four regression-based machine learning techniques.", "keyphrases": ["wmt", "quality estimation task", "topic model"]} +{"id": "kulkarni-etal-2018-multi", "title": "Multi-view Models for Political Ideology Detection of News Articles", "abstract": "A news article's title, content and link structure often reveal its political ideology. However, most existing works on automatic political ideology detection only leverage textual cues. Drawing inspiration from recent advances in neural inference, we propose a novel attention based multi-view model to leverage cues from all of the above views to identify the ideology evinced by a news article. Our model draws on advances in representation learning in natural language processing and network science to capture cues from both textual content and the network structure of news articles. We empirically evaluate our model against a battery of baselines and show that our model outperforms state of the art by 10 percentage points F1 score.", "keyphrases": ["political ideology detection", "news article", "left-vs-right bias classification"]} +{"id": "yates-etzioni-2007-unsupervised", "title": "Unsupervised Resolution of Objects and Relations on the Web", "abstract": "The task of identifying synonymous relations and objects, or Synonym Resolution (SR), is critical for high-quality information extraction. The bulk of previous SR work assumed strong domain knowledge or hand-tagged training examples. This paper investigates SR in the context of unsupervised information extraction, where neither is available. The paper presents a scalable, fully-implemented system for SR that runs in O(KN log N) time in the number of extractions N and the maximum number of synonyms per word, K. The system, called RESOLVER, introduces a probabilistic relational model for predicting whether two strings are co-referential based on the similarity of the assertions containing them. Given two million assertions extracted from the Web, RESOLVER resolves objects with 78% precision and an estimated 68% recall and resolves relations with 90% precision and 35% recall.", "keyphrases": ["object", "web", "resolver"]} +{"id": "erkan-etal-2007-semi", "title": "Semi-Supervised Classification for Extracting Protein Interaction Sentences using Dependency Parsing", "abstract": "We introduce a relation extraction method to identify the sentences in biomedical text that indicate an interaction among the protein names mentioned. Our approach is based on the analysis of the paths between two protein names in the dependency parse trees of the sentences. Given two dependency trees, we define two separate similarity functions (kernels) based on cosine similarity and edit distance among the paths between the protein names. Using these similarity functions, we investigate the performances of two classes of learning algorithms, Support Vector Machines and k-nearest-neighbor, and the semisupervised counterparts of these algorithms, transductive SVMs and harmonic functions, respectively. Significant improvement over the previous results in the literature is reported as well as a new benchmark dataset is introduced. Semi-supervised algorithms perform better than their supervised version by a wide margin especially when the amount of labeled data is limited.", "keyphrases": ["dependency parsing", "semi-supervised classification", "text document"]} +{"id": "hong-etal-2018-self", "title": "Self-regulation: Employing a Generative Adversarial Network to Improve Event Detection", "abstract": "Due to the ability of encoding and mapping semantic information into a high-dimensional latent feature space, neural networks have been successfully used for detecting events to a certain extent. However, such a feature space can be easily contaminated by spurious features inherent in event detection. In this paper, we propose a self-regulated learning approach by utilizing a generative adversarial network to generate spurious features. On the basis, we employ a recurrent network to eliminate the fakes. Detailed experiments on the ACE 2005 and TAC-KBP 2015 corpora show that our proposed method is highly effective and adaptable.", "keyphrases": ["generative adversarial network", "event detection", "spurious feature"]} +{"id": "appidi-etal-2020-creation", "title": "Creation of Corpus and analysis in Code-Mixed Kannada-English Twitter data for Emotion Prediction", "abstract": "Emotion prediction is a critical task in the field of Natural Language Processing (NLP). There has been a significant amount of work done in emotion prediction for resource-rich languages. There has been work done on code-mixed social media corpus but not on emotion prediction of Kannada-English code-mixed Twitter data. In this paper, we analyze the problem of emotion prediction on corpus obtained from code-mixed Kannada-English extracted from Twitter annotated with their respective `Emotion' for each tweet. We experimented with machine learning prediction models using features like Character N-Grams, Word N-Grams, Repetitive characters, and others on SVM and LSTM on our corpus, which resulted in an accuracy of 30% and 32% respectively.", "keyphrases": ["twitter data", "emotion prediction", "resource-rich language"]} +{"id": "reed-etal-2008-language", "title": "Language Resources for Studying Argument", "abstract": "This paper describes the development of a written corpus of argumentative reasoning. Arguments in the corpus have been analysed using state of the art techniques from argumentation theory and have been marked up using an open, reusable markup language. A number of the key challenges enountered during the process are explored, and preliminary observations about features such as inter-coder reliability and corpus statistics are discussed. In addition, several examples are offered of how this kind of language resource can be used in linguistic, computational and philosophical research, and in particular, how the corpus has been used to initiate a programme investigating the automatic detection of argumentative structure.", "keyphrases": ["argumentation theory", "araucaria", "newspaper article", "parliamentary record"]} +{"id": "chandlee-etal-2015-output", "title": "Output Strictly Local Functions", "abstract": "This paper characterizes a subclass of subsequential string-to-string functions called Output Strictly Local (OSL) and presents a learning algorithm which provably learns any OSL function in polynomial time and data. This algorithm is more efficient than other existing ones capable of learning this class. The OSL class is motivated by the study of the nature of string-to-string transformations, a cornerstone of modern phonological grammars.", "keyphrases": ["function", "phonological process", "corollary", "important exception", "prefix function"]} +{"id": "constant-etal-2017-survey", "title": "Survey: Multiword Expression Processing: A Survey", "abstract": "Multiword expressions (MWEs) are a class of linguistic forms spanning conventional word boundaries that are both idiosyncratic and pervasive across different languages. The structure of linguistic processing that depends on the clear distinction between words and phrases has to be re-thought to accommodate MWEs. The issue of MWE handling is crucial for NLP applications, where it raises a number of challenges. The emergence of solutions in the absence of guiding principles motivates this survey, whose aim is not only to provide a focused review of MWE processing, but also to clarify the nature of interactions between MWE processing and downstream applications. We propose a conceptual framework within which challenges and research contributions can be positioned. It offers a shared understanding of what is meant by \u201cMWE processing,\u201d distinguishing the subtasks of MWE discovery and identification. It also elucidates the interactions between MWE processing and two use cases: Parsing and machine translation. Many of the approaches in the literature can be differentiated according to how MWE processing is timed with respect to underlying use cases. We discuss how such orchestration choices affect the scope of MWE-aware systems. For each of the two MWE processing subtasks and for each of the two use cases, we conclude on open issues and research perspectives.", "keyphrases": ["multiword expression", "subtask", "mwe discovery", "survey", "automatic identification"]} +{"id": "ding-etal-2016-knowledge", "title": "Knowledge-Driven Event Embedding for Stock Prediction", "abstract": "Representing structured events as vectors in continuous space offers a new way for defining dense features for natural language processing (NLP) applications. Prior work has proposed effective methods to learn event representations that can capture syntactic and semantic information over text corpus, demonstrating their effectiveness for downstream tasks such as event-driven stock prediction. On the other hand, events extracted from raw texts do not contain background knowledge on entities and relations that they are mentioned. To address this issue, this paper proposes to leverage extra information from knowledge graph, which provides ground truth such as attributes and properties of entities and encodes valuable relations between entities. Specifically, we propose a joint model to combine knowledge graph information into the objective function of an event embedding learning model. Experiments on event similarity and stock market prediction show that our model is more capable of obtaining better event embeddings and making more accurate prediction on stock market volatilities.", "keyphrases": ["event embedding", "stock prediction", "knowledge-driven event"]} +{"id": "mrksic-etal-2017-semantic", "title": "Semantic Specialization of Distributional Word Vector Spaces using Monolingual and Cross-Lingual Constraints", "abstract": "We present Attract-Repel, an algorithm for improving the semantic quality of word vectors by injecting constraints extracted from lexical resources. Attract-Repel facilitates the use of constraints from mono- and cross-lingual resources, yielding semantically specialized cross-lingual vector spaces. Our evaluation shows that the method can make use of existing cross-lingual lexicons to construct high-quality vector spaces for a plethora of different languages, facilitating semantic transfer from high- to lower-resource ones. The effectiveness of our approach is demonstrated with state-of-the-art results on semantic similarity datasets in six languages. We next show that Attract-Repel-specialized vectors boost performance in the downstream task of dialogue state tracking (DST) across multiple languages. Finally, we show that cross-lingual vector spaces produced by our algorithm facilitate the training of multilingual DST models, which brings further performance improvements.", "keyphrases": ["vector space", "semantic specialization", "language understanding task"]} +{"id": "vogel-jurafsky-2010-learning", "title": "Learning to Follow Navigational Directions", "abstract": "We present a system that learns to follow navigational natural language directions. Where traditional models learn from linguistic annotation or word distributions, our approach is grounded in the world, learning by apprenticeship from routes through a map paired with English descriptions. Lacking an explicit alignment between the text and the reference path makes it difficult to determine what portions of the language describe which aspects of the route. We learn this correspondence with a reinforcement learning algorithm, using the deviation of the route we follow from the intended path as a reward signal. We demonstrate that our system successfully grounds the meaning of spatial terms like above and south into geometric properties of paths.", "keyphrases": ["route", "reinforcement learning", "environment", "action", "language instruction"]} +{"id": "curran-2005-supersense", "title": "Supersense Tagging of Unknown Nouns Using Semantic Similarity", "abstract": "The limited coverage of lexical-semantic resources is a significant problem for NLP systems which can be alleviated by automatically classifying the unknown words. Supersense tagging assigns unknown nouns one of 26 broad semantic categories used by lexicographers to organise their manual insertion into WORDNET. Ciaramita and Johnson (2003) present a tagger which uses synonym set glosses as annotated training examples. We describe an unsupervised approach, based on vector-space similarity, which does not require annotated examples but significantly outperforms their tagger. We also demonstrate the use of an extremely large shallow-parsed corpus for calculating vector-space semantic similarity.", "keyphrases": ["noun", "semantic similarity", "wsd"]} +{"id": "xu-etal-2011-passage", "title": "Passage Retrieval for Information Extraction using Distant Supervision", "abstract": "In this paper, we propose a keyword-based passage retrieval algorithm for information extraction, trained by distant supervision. Our goal is to be able to extract attributes of people and organizations more quickly and accurately by first ranking all the potentially relevant passages according to their likelihood of containing the answer and then performing a traditional deeper, slower analysis of individual passages. Using Freebase as our source of known relation instances and Wikipedia as our text source, we collected a weighted set of", "keyphrases": ["information extraction", "distant supervision", "relation instance"]} +{"id": "rubino-etal-2016-information", "title": "Information Density and Quality Estimation Features as Translationese Indicators for Human Translation Classification", "abstract": "This paper introduces information density and machine translation quality estimation inspired features to automatically detect and classify human translated texts. We investigate two settings: discriminating between translations and comparable originally authored texts, and distinguishing two levels of translation professionalism. Our framework is based on delexicalised sentence-level dense feature vector representations combined with a supervised machine learning approach. The results show state-of-the-art performance for mixed-domain translationese detection with information density and quality estimation based features, while results on translation expertise classi\ufb01cation are mixed.", "keyphrases": ["translator", "expertise", "information density"]} +{"id": "qazvinian-radev-2011-learning", "title": "Learning From Collective Human Behavior to Introduce Diversity in Lexical Choice", "abstract": "We analyze collective discourse, a collective human behavior in content generation, and show that it exhibits diversity, a property of general collective systems. Using extensive analysis, we propose a novel paradigm for designing summary generation systems that reflect the diversity of perspectives seen in reallife collective summarization. We analyze 50 sets of summaries written by human about the same story or artifact and investigate the diversity of perspectives across these summaries. We show how different summaries use various phrasal information units (i.e., nuggets) to express the same atomic semantic units, called factoids. Finally, we present a ranker that employs distributional similarities to build a network of words, and captures the diversity of perspectives by detecting communities in this network. Our experiments show how our system outperforms a wide range of other document ranking systems that leverage diversity.", "keyphrases": ["collective human behavior", "diversity", "factoid"]} +{"id": "matuschek-gurevych-2014-high", "title": "High Performance Word Sense Alignment by Joint Modeling of Sense Distance and Gloss Similarity", "abstract": "In this paper, we present a machine learning approach for word sense alignment (WSA) which combines distances between senses in the graph representations of lexical-semantic resources with gloss similarities. In this way, we significantly outperform the state of the art on each of the four datasets we consider. Moreover, we present two novel datasets for WSA between Wiktionary and Wikipedia in English and German. The latter dataset in not only of unprecedented size, but also created by the large community of Wiktionary editors instead of expert annotators, making it an interesting subject of study in its own right as the first crowdsourced WSA dataset. We will make both datasets freely available along with our computed alignments.", "keyphrases": ["distance", "gloss similarity", "wikipedia"]} +{"id": "zheng-etal-2013-dynamic", "title": "Dynamic Knowledge-Base Alignment for Coreference Resolution", "abstract": "Coreference resolution systems can benefit greatly from inclusion of global context, and a number of recent approaches have demonstrated improvements when precomputing an alignment to external knowledge sources. However, since alignment itself is a challenging task and is often noisy, existing systems either align conservatively, resulting in very few links, or combine the attributes of multiple candidates, leading to a conflation of entities. Our approach instead performs joint inference between within-document coreference and entity linking, maintaining ranked lists of candidate entities that are dynamically merged and reranked during inference. Further, we incorporate a large set of surface string variations for each entity by using anchor texts from the web that link to the entity. These forms of global context enables our system to improve classifier-based coreference by 1.09 B 3 F1 points, and improve over the previous state-of-art by 0.41 points, thus introducing a new state-of-art result on the ACE 2004 data.", "keyphrases": ["list", "knowledge base", "tight integration"]} +{"id": "fujita-sato-2008-computing", "title": "Computing Paraphrasability of Syntactic Variants Using Web Snippets", "abstract": "In a broad range of natural language processing tasks, large-scale knowledge-base of paraphrases is anticipated to improve their performance. The key issue in creating such a resource is to establish a practical method of computing semantic equivalence and syntactic substitutability, i.e., paraphrasability, between given pair of expressions. This paper addresses the issues of computing paraphrasability, focusing on syntactic variants of predicate phrases. Our model estimates paraphrasability based on traditional distributional similarity measures, where the Web snippets are used to overcome the data sparseness problem in handling predicate phrases. Several feature sets are evaluated through empirical experiments.", "keyphrases": ["syntactic variant", "web snippet", "distributional similarity measure"]} +{"id": "klyueva-etal-2017-neural", "title": "Neural Networks for Multi-Word Expression Detection", "abstract": "In this paper we describe the MUMULS system that participated to the 2017 shared task on automatic identification of verbal multiword expressions (VMWEs). The MUMULS system was implemented using a supervised approach based on recurrent neural networks using the open source library TensorFlow. The model was trained on a data set containing annotated VMWEs as well as morphological and syntactic information. The MUMULS system performed the identification of VMWEs in 15 languages, it was one of few systems that could categorize VMWEs type in nearly all languages.", "keyphrases": ["supervised approach", "recurrent neural network", "syntactic information"]} +{"id": "sun-duh-2020-clirmatrix", "title": "CLIRMatrix: A massively large collection of bilingual and multilingual datasets for Cross-Lingual Information Retrieval", "abstract": "We present CLIRMatrix, a massively large collection of bilingual and multilingual datasets for Cross-Lingual Information Retrieval extracted automatically from Wikipedia. CLIRMatrix comprises (1) BI-139, a bilingual dataset of queries in one language matched with relevant documents in another language for 139x138=19,182 language pairs, and (2) MULTI-8, a multilingual dataset of queries and documents jointly aligned in 8 different languages. In total, we mined 49 million unique queries and 34 billion (query, document, label) triplets, making it the largest and most comprehensive CLIR dataset to date. This collection is intended to support research in end-to-end neural information retrieval and is publicly available at [url]. We provide baseline neural model results on BI-139, and evaluate MULTI-8 in both single-language retrieval and mix-language retrieval settings.", "keyphrases": ["large collection", "cross-lingual information retrieval", "wikipedia", "query"]} +{"id": "raux-eskenazi-2008-optimizing", "title": "Optimizing Endpointing Thresholds using Dialogue Features in a Spoken Dialogue System", "abstract": "This paper describes a novel algorithm to dynamically set endpointing thresholds based on a rich set of dialogue features to detect the end of user utterances in a dialogue system. By analyzing the relationship between silences in user's speech to a spoken dialogue system and a wide range of automatically extracted features from discourse, semantics, prosody, timing and speaker characteristics, we found that all features correlate with pause duration and with whether a silence indicates the end of the turn, with semantics and timing being the most informative. Based on these features, the proposed method reduces latency by up to 24% over a fixed threshold baseline. Offline evaluation results were confirmed by implementing the proposed algorithm in the Let's Go system.", "keyphrases": ["spoken dialogue system", "prosody", "turn-taking"]} +{"id": "kawahara-etal-2014-inducing", "title": "Inducing Example-based Semantic Frames from a Massive Amount of Verb Uses", "abstract": "We present an unsupervised method for inducing semantic frames from verb uses in giga-word corpora. Our semantic frames are verb-specific example-based frames that are distinguished according to their senses. We use the Chinese Restaurant Process to automatically induce these frames from a massive amount of verb instances. In our experiments, we acquire broad-coverage semantic frames from two giga-word corpora, the larger comprising 20 billion words. Our experimental results indicate the effectiveness of our approach.", "keyphrases": ["massive amount", "verb use", "case frame", "different language"]} +{"id": "abhishek-etal-2017-fine", "title": "Fine-Grained Entity Type Classification by Jointly Learning Representations and Label Embeddings", "abstract": "Fine-grained entity type classification (FETC) is the task of classifying an entity mention to a broad set of types. Distant supervision paradigm is extensively used to generate training data for this task. However, generated training data assigns same set of labels to every mention of an entity without considering its local context. Existing FETC systems have two major drawbacks: assuming training data to be noise free and use of hand crafted features. Our work overcomes both drawbacks. We propose a neural network model that jointly learns entity mentions and their context representation to eliminate use of hand crafted features. Our model treats training data as noisy and uses non-parametric variant of hinge loss function. Experiments show that the proposed model outperforms previous state-of-the-art methods on two publicly available datasets, namely FIGER (GOLD) and BBN with an average relative improvement of 2.69% in micro-F1 score. Knowledge learnt by our model on one dataset can be transferred to other datasets while using same model or other FETC systems. These approaches of transferring knowledge further improve the performance of respective models.", "keyphrases": ["mention", "neural network model", "hinge loss function"]} +{"id": "chen-etal-2016-thorough", "title": "A Thorough Examination of the CNN/Daily Mail Reading Comprehension Task", "abstract": "Enabling a computer to understand a document so that it can answer comprehension questions is a central, yet unsolved goal of NLP. A key factor impeding its solution by machine learned systems is the limited availability of human-annotated data. Hermann et al. (2015) seek to solve this problem by creating over a million training examples by pairing CNN and Daily Mail news articles with their summarized bullet points, and show that a neural network can then be trained to give good performance on this task. In this paper, we conduct a thorough examination of this new reading comprehension task. Our primary aim is to understand what depth of language understanding is required to do well on this task. We approach this from one side by doing a careful hand-analysis of a small subset of the problems and from the other by showing that simple, carefully designed systems can obtain accuracies of 73.6% and 76.6% on these two datasets, exceeding current state-of-the-art results by 7-10% and approaching what we believe is the ceiling for performance on this task.", "keyphrases": ["daily mail", "comprehension task", "language understanding", "human-level performance", "reader"]} +{"id": "huang-riloff-2013-multi", "title": "Multi-faceted Event Recognition with Bootstrapped Dictionaries", "abstract": "Identifying documents that describe a specific type of event is challenging due to the high complexity and variety of event descriptions. We propose a multi-faceted event recognition approach, which identifies documents about an event using event phrases as well as defining characteristics of the event. Our research focuses on civil unrest events and learns civil unrest expressions as well as phrases corresponding to potential agents and reasons for civil unrest. We present a bootstrapping algorithm that automatically acquires event phrases, agent terms, and purpose (reason) phrases from unannotated texts. We use the bootstrapped dictionaries to identify civil unrest documents and show that multi-faceted event recognition can yield high accuracy.", "keyphrases": ["bootstrapped dictionary", "unrest event", "agent", "multi-faceted event recognition", "event expression"]} +{"id": "sohrab-miwa-2018-deep", "title": "Deep Exhaustive Model for Nested Named Entity Recognition", "abstract": "We propose a simple deep neural model for nested named entity recognition (NER). Most NER models focused on flat entities and ignored nested entities, which failed to fully capture underlying semantic information in texts. The key idea of our model is to enumerate all possible regions or spans as potential entity mentions and classify them with deep neural networks. To reduce the computational costs and capture the information of the contexts around the regions, the model represents the regions using the outputs of shared underlying bidirectional long short-term memory. We evaluate our exhaustive model on the GENIA and JNLPBA corpora in biomedical domain, and the results show that our model outperforms state-of-the-art models on nested and flat NER, achieving 77.1% and 78.4% respectively in terms of F-score, without any external knowledge resources.", "keyphrases": ["entity mention", "neural exhaustive model", "subsequence", "genia data", "region classification model"]} +{"id": "cao-etal-2019-low", "title": "Low-Resource Name Tagging Learned with Weakly Labeled Data", "abstract": "Name tagging in low-resource languages or domains suffers from inadequate training data. Existing work heavily relies on additional information, while leaving those noisy annotations unexplored that extensively exist on the web. In this paper, we propose a novel neural model for name tagging solely based on weakly labeled (WL) data, so that it can be applied in any low-resource settings. To take the best advantage of all WL sentences, we split them into high-quality and noisy portions for two modules, respectively: (1) a classification module focusing on the large portion of noisy data can efficiently and robustly pretrain the tag classifier by capturing textual context semantics; and (2) a costly sequence labeling module focusing on high-quality data utilizes Partial-CRFs with non-entity sampling to achieve global optimum. Two modules are combined via shared parameters. Extensive experiments involving five low-resource languages and fine-grained food domain demonstrate our superior performance (6% and 7.8% F1 gains on average) as well as efficiency.", "keyphrases": ["high-quality", "ds-ner", "noisy label", "knowledge basis"]} +{"id": "reimers-gurevych-2017-reporting", "title": "Reporting Score Distributions Makes a Difference: Performance Study of LSTM-networks for Sequence Tagging", "abstract": "In this paper we show that reporting a single performance score is insufficient to compare non-deterministic approaches. We demonstrate for common sequence tagging tasks that the seed value for the random number generator can result in statistically significant (p < 10^-4) differences for state-of-the-art systems. For two recent systems for NER, we observe an absolute difference of one percentage point F\u2081-score depending on the selected seed value, making these systems perceived either as state-of-the-art or mediocre. Instead of publishing and reporting single performance scores, we propose to compare score distributions based on multiple executions. Based on the evaluation of 50.000 LSTM-networks for five sequence tagging tasks, we present network architectures that produce both superior performance as well as are more stable with respect to the remaining hyperparameters.", "keyphrases": ["score distribution", "lstm-network", "recent system"]} +{"id": "rioux-etal-2014-fear", "title": "Fear the REAPER: A System for Automatic Multi-Document Summarization with Reinforcement Learning", "abstract": "This paper explores alternate algorithms, reward functions and feature sets for performing multi-document summarization using reinforcement learning with a high focus on reproducibility. We show that ROUGE results can be improved using a unigram and bigram similarity metric when training a learner to select sentences for summarization. Learners are trained to summarize document clusters based on various algorithms and reward functions and then evaluated using ROUGE. Our experiments show a statistically significant improvement of 1.33%, 1.58%, and 2.25% for ROUGE-1, ROUGE-2 and ROUGEL scores, respectively, when compared with the performance of the state of the art in automatic summarization with reinforcement learning on the DUC2004 dataset. Furthermore query focused extensions of our approach show an improvement of 1.37% and 2.31% for ROUGE-2 and ROUGE-SU4 respectively over query focused extensions of the state of the art with reinforcement learning on the DUC2006 dataset.", "keyphrases": ["multi-document summarization", "reinforcement learning", "rouge-2"]} +{"id": "zeller-etal-2013-derivbase", "title": "DErivBase: Inducing and Evaluating a Derivational Morphology Resource for German", "abstract": "Derivational models are still an underresearched area in computational morphology. Even for German, a rather resourcerich language, there is a lack of largecoverage derivational knowledge. This paper describes a rule-based framework for inducing derivational families (i.e., clusters of lemmas in derivational relationships) and its application to create a highcoverage German resource, DERIVBASE, mapping over 280k lemmas into more than 17k non-singleton clusters. We focus on the rule component and a qualitative and quantitative evaluation. Our approach achieves up to 93% precision and 71% recall. We attribute the high precision to the fact that our rules are based on information from grammar books.", "keyphrases": ["morphology", "rule-based framework", "derivational family", "german resource", "lexeme"]} +{"id": "kirchhoff-etal-2012-evaluating", "title": "Evaluating User Preferences in Machine Translation Using Conjoint Analysis", "abstract": "In spite of much ongoing research on machine translation evaluation there is little quantitative work that directly measures users\u2019 intuitive or emotional preferences regarding different types of machine translation errors. However, the elicitation and modeling of user preferences is an important prerequisite for future research on user adaptation and customization of machine translation engines. In this paper we explore the use of conjoint analysis as a formal quantitative framework to gain insight into users\u2019 relative preferences for different translation error types. Using English-Spanish as the translation direction we conduct a crowd-sourced conjoint analysis study and obtain utility values for individual error types. Our results indicate that word order errors are clearly the most dispreferred error type, followed by word sense, morphological, and function word errors.", "keyphrases": ["user preference", "machine translation", "conjoint analysis"]} +{"id": "zhai-etal-2010-grouping", "title": "Grouping Product Features Using Semi-Supervised Learning with Soft-Constraints", "abstract": "In opinion mining of product reviews, one often wants to produce a summary of opinions based on product features/attributes. However, for the same feature, people can express it with different words and phrases. To produce a meaningful summary, these words and phrases, which are domain synonyms, need to be grouped under the same feature group. This paper proposes a constrained semi-supervised learning method to solve the problem. Experimental results using reviews from five different domains show that the proposed method is competent for the task. It outperforms the original EM and the state-of-the-art existing methods by a large margin.", "keyphrases": ["product feature", "synonym", "semi-supervised learning method", "group expression", "such knowledge"]} +{"id": "lu-etal-2010-mining", "title": "Mining Large-scale Parallel Corpora from Multilingual Patents: An English-Chinese example and its application to SMT", "abstract": "In this paper, we demonstrate how to mine large-scale parallel corpora with multilingual patents, which have not been thoroughly explored before. We show how a large-scale English-Chinese parallel corpus containing over 14 million sentence pairs with only 1-5% wrong can be mined from a large amount of English-Chinese bilingual patents. To our knowledge, this is the largest single parallel corpus in terms of sentence pairs. Moreover, we estimate the potential for mining multilingual parallel corpora involving English, Chinese, Japanese, Korean, German, etc., which would to some extent reduce the parallel data acquisition bottleneck in multilingual information processing.", "keyphrases": ["large-scale parallel corpora", "multilingual patent", "common verb"]} +{"id": "yuste-etal-2010-pangeamt", "title": "PangeaMT - putting open standards to work... well", "abstract": "PangeaMT is presented from our standpoint as a LSP keen to develop and implement a cost-effective translation automation strategy that is also in line with our full commitment to open standards. Moses lies at the very core of PangeaMT but we have built several pre-/post-processing modules around it, from word reordering to inline mark-up parser to TMX/XLIFF filters. These represent interesting breakthroughs in real-world, customized SMT applications.", "keyphrases": ["open standard", "moses", "pangeamt"]} +{"id": "obeid-etal-2020-camel", "title": "CAMeL Tools: An Open Source Python Toolkit for Arabic Natural Language Processing", "abstract": "We present CAMeL Tools, a collection of open-source tools for Arabic natural language processing in Python. CAMeL Tools currently provides utilities for pre-processing, morphological modeling, Dialect Identification, Named Entity Recognition and Sentiment Analysis. In this paper, we describe the design of CAMeL Tools and the functionalities it provides.", "keyphrases": ["python", "sentiment analysis", "camel tools", "arabic nlp"]} +{"id": "hastie-etal-2013-demonstration", "title": "Demonstration of the PARLANCE system: a data-driven incremental, spoken dialogue system for interactive search", "abstract": "The Parlance system for interactive search processes dialogue at a microturn level, displaying dialogue phenomena that play a vital role in human spoken conversation. These dialogue phenomena include more natural turn-taking through rapid system responses, generation of backchannels, and user barge-ins. The Parlance demonstration system dierentiates from other incremental systems in that it is data-driven with an infrastructure that scales well.", "keyphrases": ["parlance system", "dialogue system", "turn-taking"]} +{"id": "stojanovski-fraser-2018-coreference", "title": "Coreference and Coherence in Neural Machine Translation: A Study Using Oracle Experiments", "abstract": "Cross-sentence context can provide valuable information in Machine Translation and is critical for translation of anaphoric pronouns and for providing consistent translations. In this paper, we devise simple oracle experiments targeting coreference and coherence. Oracles are an easy way to evaluate the effect of different discourse-level phenomena in NMT using BLEU and eliminate the necessity to manually define challenge sets for this purpose. We propose two context-aware NMT models and compare them against models working on a concatenation of consecutive sentences. Concatenation models perform better, but are computationally expensive. We show that NMT models taking advantage of context oracle signals can achieve considerable gains in BLEU, of up to 7.02 BLEU for coreference and 1.89 BLEU for coherence on subtitles translation. Access to strong signals allows us to make clear comparisons between context-aware models.", "keyphrases": ["oracle experiment", "cross-sentence context", "coreference"]} +{"id": "eryani-etal-2020-spelling", "title": "A Spelling Correction Corpus for Multiple Arabic Dialects", "abstract": "Arabic dialects are the non-standard varieties of Arabic commonly spoken \u2013 and increasingly written on social media \u2013 across the Arab world. Arabic dialects do not have standard orthographies, a challenge for natural language processing applications. In this paper, we present the MADAR CODA Corpus, a collection of 10,000 sentences from five Arabic city dialects (Beirut, Cairo, Doha, Rabat, and Tunis) represented in the Conventional Orthography for Dialectal Arabic (CODA) in parallel with their raw original form. The sentences come from the Multi-Arabic Dialect Applications and Resources (MADAR) Project and are in parallel across the cities (2,000 sentences from each city). This publicly available resource is intended to support research on spelling correction and text normalization for Arabic dialects. We present results on a bootstrapping technique we use to speed up the CODA annotation, as well as on the degree of similarity across the dialects before and after CODA annotation.", "keyphrases": ["spelling correction", "dialect", "arabic city dialect"]} +{"id": "fulgoni-etal-2016-empirical", "title": "An Empirical Exploration of Moral Foundations Theory in Partisan News Sources", "abstract": "News sources frame issues in different ways in order to appeal or control the perception of their readers. We present a large scale study of news articles from partisan sources in the US across a variety of different issues. We first highlight that differences between sides exist by predicting the political leaning of articles of unseen political bias. Framing can be driven by different types of morality that each group values. We emphasize differences in framing of different news building on the moral foundations theory quantified using hand crafted lexicons. Our results show that partisan sources frame political issues differently both in terms of words usage and through the moral foundations they relate to.", "keyphrases": ["moral foundation theory", "framing", "news article"]} +{"id": "rei-2017-semi", "title": "Semi-supervised Multitask Learning for Sequence Labeling", "abstract": "We propose a sequence labeling framework with a secondary training objective, learning to predict surrounding words for every word in the dataset. This language modeling objective incentivises the system to learn general-purpose patterns of semantic and syntactic composition, which are also useful for improving accuracy on different sequence labeling tasks. The architecture was evaluated on a range of datasets, covering the tasks of error detection in learner texts, named entity recognition, chunking and POS-tagging. The novel language modeling objective provided consistent performance improvements on every benchmark, without requiring any additional annotated or unannotated data.", "keyphrases": ["secondary training objective", "objective", "language modeling", "sequence labeling task"]} +{"id": "pretorius-etal-2009-setswana", "title": "Setswana Tokenisation and Computational Verb Morphology: Facing the Challenge of a Disjunctive Orthography", "abstract": "Setswana, a Bantu language in the Sotho group, is one of the eleven official languages of South Africa. The language is characterised by a disjunctive orthography, mainly affecting the important word category of verbs. In particular, verbal prefixal morphemes are usually written disjunctively, while suffixal morphemes follow a conjunctive writing style. Therefore, Setswana tokenisation cannot be based solely on whitespace, as is the case in many alphabetic, segmented languages, including the conjunctively written Nguni group of South African Bantu languages. This paper shows how a combination of two tokeniser transducers and a finite-state (rule-based) morphological analyser may be combined to effectively solve the Setswana tokenisation problem. The approach has the important advantage of bringing the processing of Setswana beyond the morphological analysis level in line with what is appropriate for the Nguni languages. This means that the challenge of the disjunctive orthography is met at the tokenisation/morphological analysis level and does not in principle propagate to subsequent levels of analysis such as POS tagging and shallow parsing, etc. Indeed, the approach ensures that an aspect such as orthography does not obfuscate sound linguistics and, ultimately, proper semantic analysis, which remains the ultimate aim of linguistic analysis and therefore also computational linguistic analysis.", "keyphrases": ["disjunctive orthography", "nguni language", "setswana tokenisation"]} +{"id": "miceli-barone-sennrich-2017-parallel", "title": "A Parallel Corpus of Python Functions and Documentation Strings for Automated Code Documentation and Code Generation", "abstract": "Automated documentation of programming source code and automated code generation from natural language are challenging tasks of both practical and scientific interest. Progress in these areas has been limited by the low availability of parallel corpora of code and natural language descriptions, which tend to be small and constrained to specific domains. In this work we introduce a large and diverse parallel corpus of a hundred thousands Python functions with their documentation strings (\u201cdocstrings\u201d) generated by scraping open source repositories on GitHub. We describe baseline results for the code documentation and code generation tasks obtained by neural machine translation. We also experiment with data augmentation techniques to further increase the amount of training data. We release our datasets and processing scripts in order to stimulate research in these areas.", "keyphrases": ["parallel corpus", "code generation", "python project"]} +{"id": "utsuro-etal-2003-effect", "title": "Effect of Cross-Language IR in Bilingual Lexicon Acquisition from Comparable Corpora", "abstract": "Within the framework of translation knowledge acquisition from WWW news sites, this paper studies issues on the effect of cross-language retrieval of relevant texts in bilingual lexicon acquisition from comparable corpora. We experimentally show that it is quite effective to reduce the candidate bilingual term pairs against which bilingual term correspondences are estimated, in terms of both computational complexity and the performance of precise estimation of bilingual term correspondences.", "keyphrases": ["bilingual lexicon acquisition", "comparable corpora", "translation knowledge acquisition"]} +{"id": "jauhiainen-etal-2016-heli", "title": "HeLI, a Word-Based Backoff Method for Language Identification", "abstract": "In this paper we describe the Helsinki language identification method, HeLI, and the resources we created for and used in the 3rd edition of the Discriminating between Similar Languages (DSL) shared task, which was organized as part of the VarDial 2016 workshop. The shared task comprised of a total of 8 tracks, of which we participated in 7. The shared task had a record number of participants, with 17 teams providing results for the closed track of the test set A. Our system reached the 2nd position in 4 tracks (A closed and open, B1 open and B2 open) and in this paper we are focusing on the methods and data used for those tracks. We describe our word-based backoff method in mathematical notation. We also describe how we selected the corpus we used in the open tracks.", "keyphrases": ["word-based backoff method", "heli", "previous vardial workshop"]} +{"id": "hidey-mckeown-2016-identifying", "title": "Identifying Causal Relations Using Parallel Wikipedia Articles", "abstract": "The automatic detection of causal relationships in text is important for natural language understanding. This task has proven to be dif\ufb01cult, however, due to the need for world knowledge and inference. We focus on a sub-task of this problem where an open class set of linguistic markers can provide clues towards understanding causality. Unlike the explicit markers, a closed class, these markers vary signi\ufb01-cantly in their linguistic forms. We leverage parallel Wikipedia corpora to identify new markers that are variations on known causal phrases, creating a training set via distant supervision. We also train a causal classi\ufb01er using features from the open class markers and semantic features providing contextual information. The results show that our features provide an 11.05 point absolute increase over the baseline on the task of identifying causality in text.", "keyphrases": ["causality", "wikipedia", "linguistic marker"]} +{"id": "pan-etal-2020-semantic", "title": "Semantic Graphs for Generating Deep Questions", "abstract": "This paper proposes the problem of Deep Question Generation (DQG), which aims to generate complex questions that require reasoning over multiple pieces of information about the input passage. In order to capture the global structure of the document and facilitate reasoning, we propose a novel framework that first constructs a semantic-level graph for the input document and then encodes the semantic graph by introducing an attention-based GGNN (Att-GGNN). Afterward, we fuse the document-level and graph-level representations to perform joint training of content selection and question decoding. On the HotpotQA deep-question centric dataset, our model greatly improves performance over questions requiring reasoning over multiple facts, leading to state-of-the-art performance. The code is publicly available at .", "keyphrases": ["complex question", "complexity", "reasoning", "input passage", "semantic graph"]} +{"id": "kumar-byrne-2003-weighted", "title": "A Weighted Finite State Transducer Implementation of the Alignment Template Model for Statistical Machine Translation", "abstract": "We present a derivation of the alignment template model for statistical machine translation and an implementation of the model using weighted finite state transducers. The approach we describe allows us to implement each constituent distribution of the model as a weighted finite state transducer or acceptor. We show that bitext word alignment and translation under the model can be performed with standard FSM operations involving these transducers. One of the benefits of using this framework is that it obviates the need to develop specialized search procedures, even for the generation of lattices or N-Best lists of bitext word alignments and translation hypotheses. We evaluate the implementation of the model on the French-to-English Hansards task and report alignment and translation performance.", "keyphrases": ["state transducer", "alignment template model", "statistical machine translation"]} +{"id": "mulcaire-etal-2019-polyglot", "title": "Polyglot Contextual Representations Improve Crosslingual Transfer", "abstract": "We introduce Rosita, a method to produce multilingual contextual word representations by training a single language model on text from multiple languages. Our method combines the advantages of contextual word representations with those of multilingual representation learning. We produce language models from dissimilar language pairs (English/Arabic and English/Chinese) and use them in dependency parsing, semantic role labeling, and named entity recognition, with comparisons to monolingual and non-contextual variants. Our results provide further evidence for the benefits of polyglot learning, in which representations are shared across multiple languages.", "keyphrases": ["multiple language", "dependency parsing", "elmo", "downstream task"]} +{"id": "karimi-etal-2021-aeda-easier", "title": "AEDA: An Easier Data Augmentation Technique for Text Classification", "abstract": "This paper proposes AEDA (An Easier Data Augmentation) technique to help improve the performance on text classification tasks. AEDA includes only random insertion of punctuation marks into the original text. This is an easier technique to implement for data augmentation than EDA method (Wei and Zou, 2019) with which we compare our results. In addition, it keeps the order of the words while changing their positions in the sentence leading to a better generalized performance. Furthermore, the deletion operation in EDA can cause loss of information which, in turn, misleads the network, whereas AEDA preserves all the input information. Following the baseline, we perform experiments on five different datasets for text classification. We show that using the AEDA-augmented data for training, the models show superior performance compared to using the EDA-augmented data in all five datasets. The source code will be made available for further study and reproduction of the results.", "keyphrases": ["text classification", "random insertion", "aeda"]} +{"id": "benton-etal-2017-multitask", "title": "Multitask Learning for Mental Health Conditions with Limited Social Media Data", "abstract": "Language contains information about the author's demographic attributes as well as their mental state, and has been successfully leveraged in NLP to predict either one alone. However, demographic attributes and mental states also interact with each other, and we are the first to demonstrate how to use them jointly to improve the prediction of mental health conditions across the board. We model the different conditions as tasks in a multitask learning (MTL) framework, and establish for the first time the potential of deep learning in the prediction of mental health from online user-generated text. The framework we propose significantly improves over all baselines and single-task models for predicting mental health conditions, with particularly significant gains for conditions with limited data. In addition, our best MTL model can predict the presence of conditions (neuroatypicality) more generally, further reducing the error of the strong feed-forward baseline.", "keyphrases": ["mental health", "condition", "multi-task learning"]} +{"id": "mubarak-etal-2020-overview", "title": "Overview of OSACT4 Arabic Offensive Language Detection Shared Task", "abstract": "This paper provides an overview of the offensive language detection shared task at the 4th workshop on Open-Source Arabic Corpora and Processing Tools (OSACT4). There were two subtasks, namely: Subtask A, involving the detection of offensive language, which contains unacceptable or vulgar content in addition to any kind of explicit or implicit insults or attacks against individuals or groups; and Subtask B, involving the detection of hate speech, which contains insults or threats targeting a group based on their nationality, ethnicity, race, gender, political or sport affiliation, religious belief, or other common characteristics. In total, 40 teams signed up to participate in Subtask A, and 14 of them submitted test runs. For Subtask B, 33 teams signed up to participate and 13 of them submitted runs. We present and analyze all submissions in this paper.", "keyphrases": ["offensive language", "language detection", "arabic tweet"]} +{"id": "yin-etal-2017-chinese", "title": "Chinese Zero Pronoun Resolution with Deep Memory Network", "abstract": "Existing approaches for Chinese zero pronoun resolution typically utilize only syntactical and lexical features while ignoring semantic information. The fundamental reason is that zero pronouns have no descriptive information, which brings difficulty in explicitly capturing their semantic similarities with antecedents. Meanwhile, representing zero pronouns is challenging since they are merely gaps that convey no actual content. In this paper, we address this issue by building a deep memory network that is capable of encoding zero pronouns into vector representations with information obtained from their contexts and potential antecedents. Consequently, our resolver takes advantage of semantic information by using these continuous distributed representations. Experiments on the OntoNotes 5.0 dataset show that the proposed memory network could substantially outperform the state-of-the-art systems in various experimental settings.", "keyphrases": ["pronoun", "deep memory network", "antecedent mention", "pro-drop language"]} +{"id": "nuhn-etal-2012-deciphering", "title": "Deciphering Foreign Language by Combining Language Models and Context Vectors", "abstract": "In this paper we show how to train statistical machine translation systems on real-life tasks using only non-parallel monolingual data from two languages. We present a modification of the method shown in (Ravi and Knight, 2011) that is scalable to vocabulary sizes of several thousand words. On the task shown in (Ravi and Knight, 2011) we obtain better results with only 5% of the computational effort when running our method with an n-gram language model. The efficiency improvement of our method allows us to run experiments with vocabulary sizes of around 5,000 words, such as a non-parallel version of the VERBMOBIL corpus. We also report results using data from the monolingual French and English GIGAWORD corpora.", "keyphrases": ["context vector", "monolingual data", "decipherment technique"]} +{"id": "oraby-etal-2018-controlling", "title": "Controlling Personality-Based Stylistic Variation with Neural Natural Language Generators", "abstract": "Natural language generators for task-oriented dialogue must effectively realize system dialogue actions and their associated semantics. In many applications, it is also desirable for generators to control the style of an utterance. To date, work on task-oriented neural generation has primarily focused on semantic fidelity rather than achieving stylistic goals, while work on style has been done in contexts where it is difficult to measure content preservation. Here we present three different sequence-to-sequence models and carefully test how well they disentangle content and style. We use a statistical generator, Personage, to synthesize a new corpus of over 88,000 restaurant domain utterances whose style varies according to models of personality, giving us total control over both the semantic content and the stylistic variation in the training data. We then vary the amount of explicit stylistic supervision given to the three models. We show that our most explicit model can simultaneously achieve high fidelity to both semantic and stylistic goals: this model adds a context vector of 36 stylistic parameters as input to the hidden state of the encoder at each time step, showing the benefits of explicit stylistic supervision, even when the amount of training data is large.", "keyphrases": ["stylistic variation", "generator", "personality type"]} +{"id": "ziemski-etal-2016-united", "title": "The United Nations Parallel Corpus v1.0", "abstract": "This paper describes the creation process and statistics of the official United Nations Parallel Corpus, the first parallel corpus composed from United Nations documents published by the original data creator. The parallel corpus presented consists of manually translated UN documents from the last 25 years (1990 to 2014) for the six official UN languages, Arabic, Chinese, English, French, Russian, and Spanish. The corpus is freely available for download under a liberal license. Apart from the pairwise aligned documents, a fully aligned subcorpus for the six official UN languages is distributed. We provide baseline BLEU scores of our Moses-based SMT systems trained with the full data of language pairs involving English and for all possible translation directions of the six-way subcorpus.", "keyphrases": ["united nations", "parallel corpus", "spanish"]} +{"id": "ohara-wiebe-2003-preposition", "title": "Preposition Semantic Classification via Treebank and FrameNet", "abstract": "This paper reports on experiments in classifying the semantic role annotations assigned to prepositional phrases in both the PENN TREEBANK and FRAMENET. In both cases, experiments are done to see how the prepositions can be classified given the dataset\u2019s role inventory, using standard word-sense disambiguation features. In addition to using traditional word collocations, the experiments incorporate class-based collocations in the form of WordNet hypernyms. For Treebank, the word collocations achieve slightly better performance: 78.5% versus 77.4% when separate classifiers are used per preposition. When using a single classifier for all of the prepositions together, the combined approach yields a significant gain at 85.8% accuracy versus 81.3% for wordonly collocations. For FrameNet, the combined use of both collocation types achieves better performance for the individual classifiers: 70.3% versus 68.5%. However, classification using a single classifier is not effective due to confusion among the fine-grained roles.", "keyphrases": ["treebank", "framenet", "preposition"]} +{"id": "dodge-etal-2019-show", "title": "Show Your Work: Improved Reporting of Experimental Results", "abstract": "Research in natural language processing proceeds, in part, by demonstrating that new models achieve superior performance (e.g., accuracy) on held-out test data, compared to previous results. In this paper, we demonstrate that test-set performance scores alone are insufficient for drawing accurate conclusions about which model performs best. We argue for reporting additional details, especially performance on validation data obtained during model development. We present a novel technique for doing so: expected validation performance of the best-found model as a function of computation budget (i.e., the number of hyperparameter search trials or the overall training time). Using our approach, we find multiple recent model comparisons where authors would have reached a different conclusion if they had used more (or less) computation. Our approach also allows us to estimate the amount of computation required to obtain a given accuracy; applying it to several recently published results yields massive variation across papers, from hours to weeks. We conclude with a set of best practices for reporting experimental results which allow for robust future comparisons, and provide code to allow researchers to use our technique.", "keyphrases": ["computation", "hyperparameter", "model comparison", "practice", "area"]} +{"id": "camburu-etal-2020-make", "title": "Make Up Your Mind! Adversarial Generation of Inconsistent Natural Language Explanations", "abstract": "To increase trust in artificial intelligence systems, a promising research direction consists of designing neural models capable of generating natural language explanations for their predictions. In this work, we show that such models are nonetheless prone to generating mutually inconsistent explanations, such as \u201dBecause there is a dog in the image.\u201d and \u201dBecause there is no dog in the [same] image.\u201d, exposing flaws in either the decision-making process of the model or in the generation of the explanations. We introduce a simple yet effective adversarial framework for sanity checking models against the generation of inconsistent natural language explanations. Moreover, as part of the framework, we address the problem of adversarial attacks with full target sequences, a scenario that was not previously addressed in sequence-to-sequence attacks. Finally, we apply our framework on a state-of-the-art neural natural language inference model that provides natural language explanations for its predictions. Our framework shows that this model is capable of generating a significant number of inconsistent explanations.", "keyphrases": ["explanation", "such model", "dog", "adversarial framework"]} +{"id": "eisenstein-2009-hierarchical", "title": "Hierarchical Text Segmentation from Multi-Scale Lexical Cohesion", "abstract": "This paper presents a novel unsupervised method for hierarchical topic segmentation. Lexical cohesion -- the workhorse of unsupervised linear segmentation -- is treated as a multi-scale phenomenon, and formalized in a Bayesian setting. Each word token is modeled as a draw from a pyramid of latent topic models, where the structure of the pyramid is constrained to induce a hierarchical segmentation. Inference takes the form of a coordinate-ascent algorithm, iterating between two steps: a novel dynamic program for obtaining the globally-optimal hierarchical segmentation, and collapsed variational Bayesian inference over the hidden variables. The resulting system is fast and accurate, and compares well against heuristic alternatives.", "keyphrases": ["segmentation", "lexical cohesion", "latent topic model", "lda"]} +{"id": "zhang-clark-2009-transition", "title": "Transition-Based Parsing of the Chinese Treebank using a Global Discriminative Model", "abstract": "Transition-based approaches have shown competitive performance on constituent and dependency parsing of Chinese. State-of-the-art accuracies have been achieved by a deterministic shift-reduce parsing model on parsing the Chinese Treebank 2 data (Wang et al., 2006). In this paper, we propose a global discriminative model based on the shift-reduce parsing process, combined with a beam-search decoder, obtaining competitive accuracies on CTB2. We also report the performance of the parser on CTB5 data, obtaining the highest scores in the literature for a dependency-based evaluation.", "keyphrases": ["chinese treebank", "constituent", "shift-reduce parser"]} +{"id": "lal-etal-2021-interpret", "title": "InterpreT: An Interactive Visualization Tool for Interpreting Transformers", "abstract": "With the increasingly widespread use of Transformer-based models for NLU/NLP tasks, there is growing interest in understanding the inner workings of these models, why they are so effective at a wide range of tasks, and how they can be further tuned and improved. To contribute towards this goal of enhanced explainability and comprehension, we present InterpreT, an interactive visualization tool for interpreting Transformer-based models. In addition to providing various mechanisms for investigating general model behaviours, novel contributions made in InterpreT include the ability to track and visualize token embeddings through each layer of a Transformer, highlight distances between certain token embeddings through illustrative plots, and identify task-related functions of attention heads by using new metrics. InterpreT is a task agnostic tool, and its functionalities are demonstrated through the analysis of model behaviours for two disparate tasks: Aspect Based Sentiment Analysis (ABSA) and the Winograd Schema Challenge (WSC).", "keyphrases": ["interactive visualization tool", "transformer", "interpret"]} +{"id": "joshi-etal-2014-knowledge", "title": "Knowledge Graph and Corpus Driven Segmentation and Answer Inference for Telegraphic Entity-seeking Queries", "abstract": "Much recent work focuses on formal interpretation of natural question utterances, with the goal of executing the resulting structured queries on knowledge graphs (KGs) such as Freebase. Here we address two limitations of this approach when applied to open-domain, entity-oriented Web queries. First, Web queries are rarely wellformed questions. They are \u201ctelegraphic\u201d, with missing verbs, prepositions, clauses, case and phrase clues. Second, the KG is always incomplete, unable to directly answer many queries. We propose a novel technique to segment a telegraphic query and assign a coarse-grained purpose to each segment: a base entity e1, a relation type r, a target entity type t2, and contextual words s. The query seeks entity e2 2 t2 where r(e1,e2) holds, further evidenced by schema-agnostic words s. Query segmentation is integrated with the KG and an unstructured corpus where mentions of entities have been linked to the KG. We do not trust the best or any specific query segmentation. Instead, evidence in favor of candidate e2s are aggregated across several segmentations. Extensive experiments on the ClueWeb corpus and parts of Freebase as our KG, using over a thousand telegraphic queries adapted from TREC, INEX, and WebQuestions, show the efficacy of our approach. For one benchmark, MAP improves from 0.2\u20100.29 (competitive baselines) to 0.42 (our system). NDCG@10 improves from 0.29\u20100.36 to 0.54.", "keyphrases": ["query", "knowledge graph", "unstructured data"]} +{"id": "prasov-chai-2010-fusing", "title": "Fusing Eye Gaze with Speech Recognition Hypotheses to Resolve Exophoric References in Situated Dialogue", "abstract": "In situated dialogue humans often utter linguistic expressions that refer to extralinguistic entities in the environment. Correctly resolving these references is critical yet challenging for artificial agents partly due to their limited speech recognition and language understanding capabilities. Motivated by psycholinguistic studies demonstrating a tight link between language production and human eye gaze, we have developed approaches that integrate naturally occurring human eye gaze with speech recognition hypotheses to resolve exophoric references in situated dialogue in a virtual world. In addition to incorporating eye gaze with the best recognized spoken hypothesis, we developed an algorithm to also handle multiple hypotheses modeled as word confusion networks. Our empirical results demonstrate that incorporating eye gaze with recognition hypotheses consistently outperforms the results obtained from processing recognition hypotheses alone. Incorporating eye gaze with word confusion networks further improves performance.", "keyphrases": ["eye gaze", "situated dialogue", "speech recognition hypothesis"]} +{"id": "he-etal-2017-generating", "title": "Generating Natural Answers by Incorporating Copying and Retrieving Mechanisms in Sequence-to-Sequence Learning", "abstract": "Generating answer with natural language sentence is very important in real-world question answering systems, which needs to obtain a right answer as well as a coherent natural response. In this paper, we propose an end-to-end question answering system called COREQA in sequence-to-sequence learning, which incorporates copying and retrieving mechanisms to generate natural answers within an encoder-decoder framework. Specifically, in COREQA, the semantic units (words, phrases and entities) in a natural answer are dynamically predicted from the vocabulary, copied from the given question and/or retrieved from the corresponding knowledge base jointly. Our empirical study on both synthetic and real-world datasets demonstrates the efficiency of COREQA, which is able to generate correct, coherent and natural answers for knowledge inquired questions.", "keyphrases": ["copying", "sequence-to-sequence learning", "sentence generation"]} +{"id": "cholakov-etal-2008-towards", "title": "Towards Domain-Independent Deep Linguistic Processing: Ensuring Portability and Re-Usability of Lexicalised Grammars", "abstract": "In this paper we illustrate and underline the importance of making detailed linguistic information a central part of the process of automatic acquisition of large-scale lexicons as a means for enhancing robustness and at the same time ensuring maintainability and re-usability of deep lexicalised grammars. Using the error mining techniques proposed in (van Noord, 2004) we show very convincingly that the main hindrance to portability of deep lexicalised grammars to domains other than the ones originally developed in, as well as to robustness of systems using such grammars is low lexical coverage. To this effect, we develop linguistically-driven methods that use detailed morphosyntactic information to automatically enhance the performance of deep lexicalised grammars maintaining at the same time their usually already achieved high linguistic quality.", "keyphrases": ["re-usability", "lexicalised grammar", "error mining technique"]} +{"id": "tomuro-2003-interrogative", "title": "Interrogative Reformulation Patterns and Acquisition of Question Paraphrases", "abstract": "We describe a set of paraphrase patterns for questions which we derived from a corpus of questions, and report the result of using them in the automatic recognition of question paraphrases. The aim of our paraphrase patterns is to factor out different syntactic variations of interrogative words, since the interrogative part of a question adds a syntactic superstructure on the sentence part (i.e., the rest of the question), thereby making it difficult for an automatic system to analyze the question. The patterns we derived are rules which map surface syntactic structures to semantic case frames, which serve as the canonical representation of questions. We also describe the process in which we acquired question paraphrases, which we used as the test data. The results obtained by using the patterns in paraphrase recognition were quite promising.", "keyphrases": ["reformulation pattern", "question paraphrase", "interrogative part"]} +{"id": "moradi-etal-2019-interrogating", "title": "Interrogating the Explanatory Power of Attention in Neural Machine Translation", "abstract": "Attention models have become a crucial component in neural machine translation (NMT). They are often implicitly or explicitly used to justify the model's decision in generating a specific token but it has not yet been rigorously established to what extent attention is a reliable source of information in NMT. To evaluate the explanatory power of attention for NMT, we examine the possibility of yielding the same prediction but with counterfactual attention models that modify crucial aspects of the trained attention model. Using these counterfactual attention mechanisms we assess the extent to which they still preserve the generation of function and content words in the translation process. Compared to a state of the art attention model, our counterfactual attention models produce 68% of function words and 21% of content words in our German-English dataset. Our experiments demonstrate that attention models by themselves cannot reliably explain the decisions made by a NMT model.", "keyphrases": ["neural machine translation", "attention weight", "nature"]} +{"id": "meng-etal-2021-mixture", "title": "Mixture-of-Partitions: Infusing Large Biomedical Knowledge Graphs into BERT", "abstract": "Infusing factual knowledge into pre-trained models is fundamental for many knowledge-intensive tasks. In this paper, we proposed Mixture-of-Partitions (MoP), an infusion approach that can handle a very large knowledge graph (KG) by partitioning it into smaller sub-graphs and infusing their specific knowledge into various BERT models using lightweight adapters. To leverage the overall factual knowledge for a target task, these sub-graph adapters are further fine-tuned along with the underlying BERT through a mixture layer. We evaluate our MoP with three biomedical BERTs (SciBERT, BioBERT, PubmedBERT) on six downstream tasks (inc. NLI, QA, Classification), and the results show that our MoP consistently enhances the underlying BERTs in task performance, and achieves new SOTA performances on five evaluated datasets.", "keyphrases": ["knowledge graph", "bert", "mixture-of-partition"]} +{"id": "sakaguchi-etal-2012-naist", "title": "NAIST at the HOO 2012 Shared Task", "abstract": "This paper describes the Nara Institute of Science and Technology (NAIST) error correction system in the Helping Our Own (HOO) 2012 Shared Task. Our system targets preposition and determiner errors with spelling correction as a pre-processing step. The result shows that spelling correction improves the Detection, Correction, and Recognition F-scores for preposition errors. With regard to preposition error correction, F-scores were not improved when using the training set with correction of all but preposition errors. As for determiner error correction, there was an improvement when the constituent parser was trained with a concatenation of treebank and modified treebank where all the articles appearing as the first word of an NP were removed. Our system ranked third in preposition and fourth in determiner error corrections.", "keyphrases": ["shared task", "preposition", "naist", "large feature set", "article correction"]} +{"id": "yu-etal-2018-multilingual", "title": "Multilingual Seq2seq Training with Similarity Loss for Cross-Lingual Document Classification", "abstract": "In this paper we continue experiments where neural machine translation training is used to produce joint cross-lingual fixed-dimensional sentence embeddings. In this framework we introduce a simple method of adding a loss to the learning objective which penalizes distance between representations of bilingually aligned sentences. We evaluate cross-lingual transfer using two approaches, cross-lingual similarity search on an aligned corpus (Europarl) and cross-lingual document classification on a recently published benchmark Reuters corpus, and we find the similarity loss significantly improves performance on both. Furthermore, we notice that while our Reuters results are very competitive, our English results are not as competitive, showing room for improvement in the current cross-lingual state-of-the-art. Our results are based on a set of 6 European languages.", "keyphrases": ["similarity loss", "cross-lingual document classification", "machine translation"]} +{"id": "zhao-he-2009-using", "title": "Using N-gram based Features for Machine Translation System Combination", "abstract": "Conventional confusion network based system combination for machine translation (MT) heavily relies on features that are based on the measure of agreement of words in different translation hypotheses. This paper presents two new features that consider agreement of n-grams in different hypotheses to improve the performance of system combination. The first one is based on a sentence specific online n-gram language model, and the second one is based on n-gram voting. Experiments on a large scale Chinese-to-English MT task show that both features yield significant improvements on the translation performance, and a combination of them produces even better translation results.", "keyphrases": ["n-gram", "system combination", "flexible word order"]} +{"id": "sukhareva-etal-2016-crowdsourcing", "title": "Crowdsourcing a Large Dataset of Domain-Specific Context-Sensitive Semantic Verb Relations", "abstract": "We present a new large dataset of 12403 context-sensitive verb relations manually annotated via crowdsourcing. These relations capture fine-grained semantic information between verb-centric propositions, such as temporal or entailment relations. We propose a novel semantic verb relation scheme and design a multi-step annotation approach for scaling-up the annotations using crowdsourcing. We employ several quality measures and report on agreement scores. The resulting dataset is available under a permissive CreativeCommons license at www.ukp.tu-darmstadt.de/data/verb-relations/. It represents a valuable resource for various applications, such as automatic information consolidation or automatic summarization.", "keyphrases": ["large dataset", "verb relation", "proposition"]} +{"id": "tahmasebi-etal-2012-neer", "title": "NEER: An Unsupervised Method for Named Entity Evolution Recognition", "abstract": "High impact events, political changes and new technologies are reflected in our language and lead to constant evolution of terms, expressions and names. Not knowing about names used in the past for referring to a named entity can severely decrease the performance of many computational linguistic algorithms. We propose NEER, an unsupervised method for named entity evolution recognition independent of external knowledge sources. We find time periods with high likelihood of evolution. By analyzing only these time periods using a sliding window co-occurrence method we capture evolving terms in the same context. We thus avoid comparing terms from widely different periods in time and overcome a severe limitation of existing methods for named entity evolution, as shown by the high recall of 90% on the New York Times corpus. We compare several relatedness measures for filtering to improve precision. Furthermore, using machine learning with minimal supervision improves precision to 94%.", "keyphrases": ["unsupervised method", "entity evolution", "neer"]} +{"id": "napoles-etal-2017-finding", "title": "Finding Good Conversations Online: The Yahoo News Annotated Comments Corpus", "abstract": "This work presents a dataset and annotation scheme for the new task of identifying \u201cgood\u201d conversations that occur online, which we call ERICs: Engaging, Respectful, and/or Informative Conversations. We develop a taxonomy to reflect features of entire threads and individual comments which we believe contribute to identifying ERICs; code a novel dataset of Yahoo News comment threads (2.4k threads and 10k comments) and 1k threads from the Internet Argument Corpus; and analyze the features characteristic of ERICs. This is one of the largest annotated corpora of online human dialogues, with the most detailed set of annotations. It will be valuable for identifying ERICs and other aspects of argumentation, dialogue, and discourse.", "keyphrases": ["conversation", "individual comment", "large annotated corpora"]} +{"id": "sil-yates-2011-extracting", "title": "Extracting STRIPS Representations of Actions and Events", "abstract": "Knowledge about how the world changes over time is a vital component of commonsense knowledge for Artificial Intelligence (AI) and natural language understanding. Actions and events are fundamental components to any knowledge about changes in the state of the world: the states before and after an event differ in regular and predictable ways. We describe a novel system that tackles the problem of extracting knowledge from text about how actions and events change the world over time. We leverage standard language-processing tools, like semantic role labelers and coreference resolvers, as well as large-corpus statistics like pointwise mutual information, to identify STRIPS representations of actions and events, a type of representation commonly used in AI planning systems. In experiments on Web text, our extractor\u2019s Area under the Curve (AUC) improves by more than 31% over the closest system from the literature for identifying the preconditions and add effects of actions. In addition, we also extract significant aspects of STRIPS representations that are missing from previous work, including delete effects and arguments.", "keyphrases": ["strips representation", "world", "web text", "precondition"]} +{"id": "cui-etal-2017-superagent", "title": "SuperAgent: A Customer Service Chatbot for E-commerce Websites", "abstract": "Conventional customer service chatbots are usually based on human dialogue, yet significant issues in terms of data scale and privacy. In this paper, we present SuperAgent, a customer service chatbot that leverages large-scale and publicly available e-commerce data. Distinct from existing counterparts, SuperAgent takes advantage of data from in-page product descriptions as well as user-generated content from e-commerce websites, which is more practical and cost-effective when answering repetitive questions, freeing up human support staff to answer much higher value questions. We demonstrate SuperAgent as an add-on extension to mainstream web browsers and show its usefulness to user\u2019s online shopping experience.", "keyphrases": ["customer service chatbot", "e-commerce websites", "powerful chatbot"]} +{"id": "peng-etal-2018-rational", "title": "Rational Recurrences", "abstract": "Despite the tremendous empirical success of neural models in natural language processing, many of them lack the strong intuitions that accompany classical machine learning approaches. Recently, connections have been shown between convolutional neural networks (CNNs) and weighted finite state automata (WFSAs), leading to new interpretations and insights. In this work, we show that some recurrent neural networks also share this connection to WFSAs. We characterize this connection formally, defining rational recurrences to be recurrent hidden state update functions that can be written as the Forward calculation of a finite set of WFSAs. We show that several recent neural models use rational recurrences. Our analysis provides a fresh view of these models and facilitates devising new neural architectures that draw inspiration from WFSAs. We present one such model, which performs better than two recent baselines on language modeling and text classification. Our results demonstrate that transferring intuitions from classical models like WFSAs can be an effective approach to designing and understanding neural models.", "keyphrases": ["finite state automata", "recurrent neural network", "rational recurrence", "rnn"]} +{"id": "arun-etal-2010-unified", "title": "A Unified Approach to Minimum Risk Training and Decoding", "abstract": "We present a unified approach to performing minimum risk training and minimum Bayes risk (MBR) decoding with BLEU in a phrase-based model. Key to our approach is the use of a Gibbs sampler that allows us to explore the entire probability distribution and maintain a strict probabilistic formulation across the pipeline. We also describe a new sampling algorithm called corpus sampling which allows us at training time to use BLEU instead of an approximation thereof. Our approach is theoretically sound and gives better (up to +0.6%BLEU) and more stable results than the standard MERT optimization algorithm. By comparing our approach to lattice MBR, we are also able to gain crucial insights about both methods.", "keyphrases": ["unified approach", "minimum risk training", "approximation"]} +{"id": "santus-etal-2016-nine", "title": "Nine Features in a Random Forest to Learn Taxonomical Semantic Relations", "abstract": "ROOT9 is a supervised system for the classification of hypernyms, co-hyponyms and random words that is derived from the already introduced ROOT13 (Santus et al., 2016). It relies on a Random Forest algorithm and nine unsupervised corpus-based features. We evaluate it with a 10-fold cross validation on 9,600 pairs, equally distributed among the three classes and involving several Parts-Of-Speech (i.e. adjectives, nouns and verbs). When all the classes are present, ROOT9 achieves an F1 score of 90.7%, against a baseline of 57.2% (vector cosine). When the classification is binary, ROOT9 achieves the following results against the baseline. hypernyms-co-hyponyms 95.7% vs. 69.8%, hypernyms-random 91.8% vs. 64.1% and co-hyponyms-random 97.8% vs. 79.4%. In order to compare the performance with the state-of-the-art, we have also evaluated ROOT9 in subsets of the Weeds et al. (2014) datasets, proving that it is in fact competitive. Finally, we investigated whether the system learns the semantic relation or it simply learns the prototypical hypernyms, as claimed by Levy et al. (2015). The second possibility seems to be the most likely, even though ROOT9 can be trained on negative examples (i.e., switched hypernyms) to drastically reduce this bias.", "keyphrases": ["semantic relation", "supervised system", "random forest algorithm", "co-hyponymy detection"]} +{"id": "joanis-stevenson-2003-general", "title": "A General Feature Space for Automatic Verb Classification", "abstract": "Abstract Lexical semantic classes of verbs play an important role in structuring complex predicate information in a lexicon, thereby avoiding redundancy and enabling generalizations across semantically similar verbs with respect to their usage. Such classes, however, require many person-years of expert effort to create manually, and methods are needed for automatically assigning verbs to appropriate classes. In this work, we develop and evaluate a feature space to support the automatic assignment of verbs into a well-known lexical semantic classification that is frequently used in natural language processing. The feature space is general \u2013 applicable to any class distinctions within the target classification; broad \u2013 tapping into a variety of semantic features of the classes; and inexpensive \u2013 requiring no more than a POS tagger and chunker. We perform experiments using support vector machines (SVMs) with the proposed feature space, demonstrating a reduction in error rate ranging from 48% to 88% over a chance baseline accuracy, across classification tasks of varying difficulty. In particular, we attain performance comparable to or better than that of feature sets manually selected for the particular tasks. Our results show that the approach is generally applicable, and reduces the need for resource-intensive linguistic analysis for each new classification task. We also perform a wide range of experiments to determine the most informative features in the feature space, finding that simple, easily extractable features suffice for good verb classification performance.", "keyphrases": ["general feature space", "automatic verb classification", "difficulty", "recent research"]} +{"id": "stewart-eisenstein-2018-making", "title": "Making \u201cfetch\u201d happen: The influence of social and linguistic context on nonstandard word growth and decline", "abstract": "In an online community, new words come and go: today's \u201chaha\u201d may be replaced by tomorrow's \u201clol.\u201d Changes in online writing are usually studied as a social process, with innovations diffusing through a network of individuals in a speech community. But unlike other types of innovation, language change is shaped and constrained by the grammatical system in which it takes part. To investigate the role of social and structural factors in language change, we undertake a large-scale analysis of the frequencies of non-standard words in Reddit. Dissemination across many linguistic contexts is a predictor of success: words that appear in more linguistic contexts grow faster and survive longer. Furthermore, social dissemination plays a less important role in explaining word growth and decline than previously hypothesized.", "keyphrases": ["linguistic context", "word growth", "decline"]} +{"id": "kiyono-etal-2019-empirical", "title": "An Empirical Study of Incorporating Pseudo Data into Grammatical Error Correction", "abstract": "The incorporation of pseudo data in the training of grammatical error correction models has been one of the main factors in improving the performance of such models. However, consensus is lacking on experimental configurations, namely, choosing how the pseudo data should be generated or used. In this study, these choices are investigated through extensive experiments, and state-of-the-art performance is achieved on the CoNLL-2014 test set (F0.5=65.0) and the official test set of the BEA-2019 shared task (F0.5=70.2) without making any modifications to the model architecture.", "keyphrases": ["pseudo data", "error correction", "back-translation", "english gec"]} +{"id": "song-gildea-2019-sembleu", "title": "SemBleu: A Robust Metric for AMR Parsing Evaluation", "abstract": "Evaluating AMR parsing accuracy involves comparing pairs of AMR graphs. The major evaluation metric, SMATCH (Cai and Knight, 2013), searches for one-to-one mappings between the nodes of two AMRs with a greedy hill-climbing algorithm, which leads to search errors. We propose SEMBLEU, a robust metric that extends BLEU (Papineni et al., 2002) to AMRs. It does not suffer from search errors and considers non-local correspondences in addition to local ones. SEMBLEU is fully content-driven and punishes situations where a system's output does not preserve most information from the input. Preliminary experiments on both sentence and corpus levels show that SEMBLEU has slightly higher consistency with human judgments than SMATCH. Our code is available at .", "keyphrases": ["robust metric", "amr", "sembleu"]} +{"id": "boyd-2007-discontinuity", "title": "Discontinuity Revisited: An Improved Conversion to Context-Free Representations", "abstract": "This paper introduces a new, reversible method for converting syntactic structures with discontinuous constituents into traditional syntax trees. The method is applied to the Tiger Corpus of German and results for PCFG parsing requiring such context-free trees are provided. A labeled dependency evaluation shows that the new conversion method leads to better results by preserving local relationships and introducing fewer inconsistencies into the training data.", "keyphrases": ["constituent", "related idea", "splitting conversion"]} +{"id": "wang-tian-2016-recurrent", "title": "Recurrent Residual Learning for Sequence Classification", "abstract": "In this paper, we explore the possibility of leveraging Residual Networks (ResNet), a powerful structure in constructing extremely deep neural network for image understanding, to improve recurrent neural networks (RNN) for modeling sequential data. We show that for sequence classi\ufb01cation tasks, incorporating residual connections into recurrent structures yields similar accuracy to Long Short Term Memory (LSTM) RNN with much fewer model parameters. In addition, we propose two novel models which combine the best of both residual learning and LSTM. Experiments show that the new models signi\ufb01cantly outperform LSTM.", "keyphrases": ["residual learning", "rnn", "residual connection", "sequence prediction"]} +{"id": "mazur-dale-2010-wikiwars", "title": "WikiWars: A New Corpus for Research on Temporal Expressions", "abstract": "The reliable extraction of knowledge from text requires an appropriate treatment of the time at which reported events take place. Unfortunately, there are very few annotated data sets that support the development of techniques for event time-stamping and tracking the progression of time through a narrative. In this paper, we present a new corpus of temporally-rich documents sourced from English Wikipedia, which we have annotated with TIMEX2 tags. The corpus contains around 120000 tokens, and 2600 TIMEX2 expressions, thus comparing favourably in size to other existing corpora used in these areas. We describe the preparation of the corpus, and compare the profile of the data with other existing temporally annotated corpora. We also report the results obtained when we use DANTE, our temporal expression tagger, to process this corpus, and point to where further work is required. The corpus is publicly available for research purposes.", "keyphrases": ["new corpus", "narrative", "wikiwars"]} +{"id": "hendricks-etal-2021-decoupling", "title": "Decoupling the Role of Data, Attention, and Losses in Multimodal Transformers", "abstract": "Recently, multimodal transformer models have gained popularity because their performance on downstream tasks suggests they learn rich visual-linguistic representations. Focusing on zero-shot image retrieval tasks, we study three important factors that can impact the quality of learned representations: pretraining data, the attention mechanism, and loss functions. By pretraining models on six datasets, we observe that dataset noise and language similarity to our downstream task are important indicators of model performance. Through architectural analysis, we learn that models with a multimodal attention mechanism can outperform deeper models with modality-specific attention mechanisms. Finally, we show that successful contrastive losses used in the self-supervised learning literature do not yield similar performance gains when used in multimodal transformers.", "keyphrases": ["loss", "multimodal transformer", "pre-training task"]} +{"id": "hasler-etal-2018-neural", "title": "Neural Machine Translation Decoding with Terminology Constraints", "abstract": "Despite the impressive quality improvements yielded by neural machine translation (NMT) systems, controlling their translation output to adhere to user-provided terminology constraints remains an open problem. We describe our approach to constrained neural decoding based on finite-state machines and multi-stack decoding which supports target-side constraints as well as constraints with corresponding aligned input text spans. We demonstrate the performance of our framework on multiple translation tasks and motivate the need for constrained decoding with attentions as a means of reducing misplacement and duplication when translating user constraints.", "keyphrases": ["decoding", "terminology constraint", "finite-state machine", "neural machine translation"]} +{"id": "sai-etal-2020-improving", "title": "Improving Dialog Evaluation with a Multi-reference Adversarial Dataset and Large Scale Pretraining", "abstract": "There is an increasing focus on model-based dialog evaluation metrics such as ADEM, RUBER, and the more recent BERT-based metrics. These models aim to assign a high score to all relevant responses and a low score to all irrelevant responses. Ideally, such models should be trained using multiple relevant and irrelevant responses for any given context. However, no such data is publicly available, and hence existing models are usually trained using a single relevant response and multiple randomly selected responses from other contexts (random negatives). To allow for better training and robust evaluation of model-based metrics, we introduce the DailyDialog++ dataset, consisting of (i) five relevant responses for each context and (ii) five adversarially crafted irrelevant responses for each context. Using this dataset, we first show that even in the presence of multiple correct references, n-gram based metrics and embedding based metrics do not perform well at separating relevant responses from even random negatives. While model-based metrics perform better than n-gram and embedding based metrics on random negatives, their performance drops substantially when evaluated on adversarial examples. To check if large scale pretraining could help, we propose a new BERT-based evaluation metric called DEB, which is pretrained on 727M Reddit conversations and then finetuned on our dataset. DEB significantly outperforms existing models, showing better correlation with human judgments and better performance on random negatives (88.27% accuracy). However, its performance again drops substantially when evaluated on adversarial responses, thereby highlighting that even large-scale pretrained evaluation models are not robust to the adversarial examples in our dataset. The dataset1 and code2 are publicly available.", "keyphrases": ["dialog evaluation", "scale pretraining", "adversarial negative response"]} +{"id": "si-etal-2021-better", "title": "Better Robustness by More Coverage: Adversarial and Mixup Data Augmentation for Robust Finetuning", "abstract": "Pretrained language models (PLMs) perform poorly under adversarial attacks. To improve the adversarial robustness, adversarial data augmentation (ADA) has been widely adopted to cover more search space of adversarial attacks by adding textual adversarial examples during training. However, the number of adversarial examples for text augmentation is still extremely insufficient due to the exponentially large attack search space. In this work, we propose a simple and effective method to cover a much larger proportion of the attack search space, called Adversarial and Mixup Data Augmentation (AMDA). Specifically, AMDA linearly interpolates the representations of pairs of training samples to form new virtual samples, which are more abundant and diverse than the discrete text adversarial examples in conventional ADA. Moreover, to fairly evaluate the robustness of different models, we adopt a challenging evaluation setup, which generates a new set of adversarial examples targeting each model. In text classification experiments of BERT and RoBERTa, AMDA achieves significant robustness gains under two strong adversarial attacks and alleviates the performance degradation of ADA on the clean data. Our code is available at: https://github.com/thunlp/MixADA .", "keyphrases": ["robustness", "adversarial", "mixup data augmentation"]} +{"id": "marsi-ozturk-2015-extraction", "title": "Extraction and generalisation of variables from scientific publications", "abstract": "Scientific theories and models in Earth science typically involve changing variables and their complex interactions, including correlations, causal relations and chains of positive/negative feedback loops. Variables tend to be complex rather than atomic entities and expressed as noun phrases containing multiple modifiers, e.g. oxygen depletion in the upper 500 m of the ocean or timing and magnitude of surface temperature evolution in the Southern Hemisphere in deglacial proxy records. Text mining from Earth science literature is therefore significantly different from biomedical text mining and requires different approaches and methods. Our approach aims at automatically locating and extracting variables and their direction of variation: increasing, decreasing or just changing. Variables are initially extracted by matching tree patterns onto the syntax trees of the source texts. Next, variables are generalised in order to enhance their similarity, facilitating hierarchical search and inference. This generalisation is accomplished by progressive pruning of syntax trees using a set of tree transformation operations. Text mining results are presented as a browsable variable hierarchy which allows users to inspect all mentions of a particular variable type in the text as well as any generalisations or specialisations. The approach is demonstrated on a corpus of 10k abstracts of Nature publications in the field of Marine science. We discuss experiences with this early prototype and outline a number of possible improvements and directions for future re", "keyphrases": ["generalisation", "variable", "syntax tree"]} +{"id": "lu-etal-2019-debug", "title": "DEBUG: A Dense Bottom-Up Grounding Approach for Natural Language Video Localization", "abstract": "In this paper, we focus on natural language video localization: localizing (ie, grounding) a natural language description in a long and untrimmed video sequence. All currently published models for addressing this problem can be categorized into two types: (i) top-down approach: it does classification and regression for a set of pre-cut video segment candidates; (ii) bottom-up approach: it directly predicts probabilities for each video frame as the temporal boundaries (ie, start and end time point). However, both two approaches suffer several limitations: the former is computation-intensive for densely placed candidates, while the latter has trailed the performance of the top-down counterpart thus far. To this end, we propose a novel dense bottom-up framework: DEnse Bottom-Up Grounding (DEBUG). DEBUG regards all frames falling in the ground truth segment as foreground, and each foreground frame regresses the unique distances from its location to bi-directional ground truth boundaries. Extensive experiments on three challenging benchmarks (TACoS, Charades-STA, and ActivityNet Captions) show that DEBUG is able to match the speed of bottom-up models while surpassing the performance of the state-of-the-art top-down models.", "keyphrases": ["frame", "temporal boundary", "dense bottom-up framework", "ground truth segment", "distance"]} +{"id": "gong-etal-2018-document", "title": "Document Similarity for Texts of Varying Lengths via Hidden Topics", "abstract": "Measuring similarity between texts is an important task for several applications. Available approaches to measure document similarity are inadequate for document pairs that have non-comparable lengths, such as a long document and its summary. This is because of the lexical, contextual and the abstraction gaps between a long document of rich details and its concise summary of abstract information. In this paper, we present a document matching approach to bridge this gap, by comparing the texts in a common space of hidden topics. We evaluate the matching algorithm on two matching tasks and find that it consistently and widely outperforms strong baselines. We also highlight the benefits of the incorporation of domain knowledge to text matching.", "keyphrases": ["hidden topic", "common space", "matching task", "document similarity"]} +{"id": "pecina-etal-2011-towards", "title": "Towards Using Web-Crawled Data for Domain Adaptation in Statistical Machine Translation", "abstract": "This paper reports on the ongoing work focused on domain adaptation of statistical machine translation using domain-specific data obtained by domain-focused web crawling. We present a strategy for crawling monolingual and parallel data and their exploitation for testing, language modelling, and system tuning in a phrase-based machine translation framework. The proposed approach is evaluated on the domains of Natural Environment and Labour Legislation and two language pairs: English\u2010French and English\u2010Greek.", "keyphrases": ["statistical machine translation", "domain-specific data", "domain-focused web-crawling"]} +{"id": "al-rfou-skiena-2012-speedread", "title": "SpeedRead: A Fast Named Entity Recognition Pipeline", "abstract": "Online content analysis employs algorithmic methods to identify entities in unstructured text. Both machine learning and knowledge-base approaches lie at the foundation of contemporary named entities extraction systems. However, the progress in deploying these approaches on web-scale has been been hampered by the computational cost of NLP over massive text corpora. We present SpeedRead (SR), a named entity recognition pipeline that runs at least 10 times faster than Stanford NLP pipeline. This pipeline consists of a high performance Penn Treebank- compliant tokenizer, close to state-of-art part-of-speech (POS) tagger and knowledge-based named entity recognizer.", "keyphrases": ["entity recognition pipeline", "tokenizer", "tagger", "speedread"]} +{"id": "herbelot-ganesalingam-2013-measuring", "title": "Measuring semantic content in distributional vectors", "abstract": "Some words are more contentful than others: for instance, make is intuitively more general than produce and fifteen is more \u2018precise\u2019 than a group. In this paper, we propose to measure the \u2018semantic content\u2019 of lexical items, as modelled by distributional representations. We investigate the hypothesis that semantic content can be computed using the KullbackLeibler (KL) divergence, an informationtheoretic measure of the relative entropy of two distributions. In a task focusing on retrieving the correct ordering of hyponym-hypernym pairs, the KL divergence achieves close to 80% precision but does not outperform a simpler (linguistically unmotivated) frequency measure. We suggest that this result illustrates the rather \u2018intensional\u2019 aspect of distributions.", "keyphrases": ["semantic content", "divergence", "hypernym", "distributional inclusion hypothesis"]} +{"id": "hasan-ng-2009-weakly", "title": "Weakly Supervised Part-of-Speech Tagging for Morphologically-Rich, Resource-Scarce Languages", "abstract": "This paper examines unsupervised approaches to part-of-speech (POS) tagging for morphologically-rich, resource-scarce languages, with an emphasis on Goldwater and Griffiths's (2007) fully-Bayesian approach originally developed for English POS tagging. We argue that existing unsupervised POS taggers unrealistically assume as input a perfect POS lexicon, and consequently, we propose a weakly supervised fully-Bayesian approach to POS tagging, which relaxes the unrealistic assumption by automatically acquiring the lexicon from a small amount of POS-tagged data. Since such relaxation comes at the expense of a drop in tagging accuracy, we propose two extensions to the Bayesian framework and demonstrate that they are effective in improving a fully-Bayesian POS tagger for Bengali, our representative morphologically-rich, resource-scarce language.", "keyphrases": ["morphologically-rich", "resource-scarce language", "pos tagging"]} +{"id": "althobaiti-etal-2014-aranlp", "title": "AraNLP: a Java-based Library for the Processing of Arabic Text.", "abstract": "We present a free, Java-based library named \u201cAraNLP\u201d that covers various Arabic text preprocessing tools. Although a good number of tools for processing Arabic text already exist, integration and compatibility problems continually occur. AraNLP is an attempt to gather most of the vital Arabic text preprocessing tools into one library that can be accessed easily by integrating or accurately adapting existing tools and by developing new ones when required. The library includes a sentence detector, tokenizer, light stemmer, root stemmer, part-of speech tagger (POS-tagger), word segmenter, normalizer, and a punctuation and diacritic remover.", "keyphrases": ["java-based library", "arabic text", "diacritic remover"]} +{"id": "papalampidi-etal-2019-movie", "title": "Movie Plot Analysis via Turning Point Identification", "abstract": "According to screenwriting theory, turning points (e.g., change of plans, major setback, climax) are crucial narrative moments within a screenplay: they define the plot structure, determine its progression and segment the screenplay into thematic units (e.g., setup, complications, aftermath). We propose the task of turning point identification in movies as a means of analyzing their narrative structure. We argue that turning points and the segmentation they provide can facilitate processing long, complex narratives, such as screenplays, for summarization and question answering. We introduce a dataset consisting of screenplays and plot synopses annotated with turning points and present an end-to-end neural network model that identifies turning points in plot synopses and projects them onto scenes in screenplays. Our model outperforms strong baselines based on state-of-the-art sentence representations and the expected position of turning points.", "keyphrases": ["turning point", "point identification", "progression", "narrative structure", "movie"]} +{"id": "hale-etal-2018-finding", "title": "Finding syntax in human encephalography with beam search", "abstract": "Recurrent neural network grammars (RNNGs) are generative models of (tree , string ) pairs that rely on neural networks to evaluate derivational choices. Parsing with them using beam search yields a variety of incremental complexity metrics such as word surprisal and parser action count. When used as regressors against human electrophysiological responses to naturalistic text, they derive two amplitude effects: an early peak and a P600-like later peak. By contrast, a non-syntactic neural language model yields no reliable effects. Model comparisons attribute the early peak to syntactic composition within the RNNG. This pattern of results recommends the RNNG+beam search combination as a mechanistic model of the syntactic processing that occurs during normal human language comprehension.", "keyphrases": ["encephalography", "beam search", "rnng", "syntactic composition", "processing"]} +{"id": "zhang-etal-2018-exploring", "title": "Exploring Recombination for Efficient Decoding of Neural Machine Translation", "abstract": "In Neural Machine Translation (NMT), the decoder can capture the features of the entire prediction history with neural connections and representations. This means that partial hypotheses with different prefixes will be regarded differently no matter how similar they are. However, this might be inefficient since some partial hypotheses can contain only local differences that will not influence future predictions. In this work, we introduce recombination in NMT decoding based on the concept of the \u201cequivalence\u201d of partial hypotheses. Heuristically, we use a simple n-gram suffix based equivalence function and adapt it into beam search decoding. Through experiments on large-scale Chinese-to-English and English-to-Germen translation tasks, we show that the proposed method can obtain similar translation quality with a smaller beam size, making NMT decoding more efficient.", "keyphrases": ["recombination", "neural machine translation", "similar translation quality"]} +{"id": "zhang-etal-2010-entity", "title": "Entity Linking Leveraging Automatically Generated Annotation", "abstract": "Entity linking refers entity mentions in a document to their representations in a knowledge base (KB). In this paper, we propose to use additional information sources from Wikipedia to find more name variations for entity linking task. In addition, as manually creating a training corpus for entity linking is laborintensive and costly, we present a novel method to automatically generate a large scale corpus annotation for ambiguous mentions leveraging on their unambiguous synonyms in the document collection. Then, a binary classifier is trained to filter out KB entities that are not similar to current mentions. This classifier not only can effectively reduce the ambiguities to the existing entities in KB, but also be very useful to highlight the new entities to KB for the further population. Furthermore, we also leverage on the Wikipedia documents to provide additional information which is not available in our generated corpus through a domain adaption approach which provides further performance improvements. The experiment results show that our proposed method outperforms the state-of-the-art approaches.", "keyphrases": ["entity mention", "knowledge base", "wikipedia", "binary classifier"]} +{"id": "silfverberg-etal-2018-sound", "title": "Sound Analogies with Phoneme Embeddings", "abstract": "Vector space models of words in NLP\u2014 word embeddings\u2014have been recently shown to reliably encode semantic information, offering capabilities such as solving proportional analogy tasks such as man:woman::king:queen. We study how well these distributional properties carry over to similarly learned phoneme embeddings, and whether phoneme vector spaces align with articulatory distinctive features, using several methods of obtaining such continuous-space representations. We demonstrate a statistically significant correlation between distinctive feature spaces and vector spaces learned with wordcontext PPMI+SVD and word2vec, showing that many distinctive feature contrasts are implicitly present in phoneme distributions. Furthermore, these distributed representations allow us to solve proportional analogy tasks with phonemes, such as p is to b as t is to X, where the solution is that X = d. This effect is even stronger when a supervision signal is added where we extract phoneme representations from the embedding layer of an recurrent neural network that is trained to solve a word inflection task, i.e. a model that is made aware of word relatedness.", "keyphrases": ["analogy", "phoneme embedding", "feature space"]} +{"id": "friedrich-etal-2015-annotating", "title": "Annotating genericity: a survey, a scheme, and a corpus", "abstract": "Generics are linguistic expressions that make statements about or refer to kinds, or that report regularities of events. Non-generic expressions make statements about particular individuals or specific episodes. Generics are treated extensively in semantic theory (Krifka et al., 1995). In practice, it is often hard to decide whether a referring expression is generic or non-generic, and to date there is no data set which is both large and satisfactorily annotated. Such a data set would be valuable for creating automatic systems for identifying generic expressions, in turn facilitating knowledge extraction from natural language text. In this paper we provide the next steps for such an annotation endeavor. Our contributions are: (1) we survey the most important previous projects annotating genericity, focusing on resources for English; (2) with a new agreement study we identify problems in the annotation scheme of the largest currentlyavailable resource (ACE-2005); and (3) we introduce a linguistically-motivated annotation scheme for marking both clauses and their subjects with regard to their genericity. (4) We present a corpus of MASC (Ide et al., 2010) and Wikipedia texts annotated according to our scheme, achieving substantial agreement.", "keyphrases": ["genericity", "scheme", "linguistic expression"]} +{"id": "palmer-etal-2004-different", "title": "Different Sense Granularities for Different Applications", "abstract": "This paper describes an hierarchical approach to WordNet sense distinctions that provides different types of automatic Word Sense Disambiguation (WSD) systems, which perform at varying levels of accuracy. For tasks where fine-grained sense distinctions may not be essential, an accurate coarse-grained WSD system may be sufficient. The paper discusses the criteria behind the three different levels of sense granularity, as well as the machine learning approach used by the WSD system.", "keyphrases": ["wsd", "sense inventory", "inter-annotator agreement", "trouble", "choice"]} +{"id": "gurrutxaga-alegria-2012-measuring", "title": "Measuring the compositionality of NV expressions in Basque by means of distributional similarity techniques", "abstract": "We present several experiments aiming at measuring the semantic compositionality of NV expressions in Basque. Our approach is based on the hypothesis that compositionality can be related to distributional similarity. The contexts of each NV expression are compared with the contexts of its corresponding components, by means of different techniques, as similarity measures usually used with the Vector Space Model (VSM), Latent Semantic Analysis (LSA) and some measures implemented in the Lemur Toolkit, as Indri index, tf-idf, Okapi index and Kullback-Leibler divergence. Using our previous work with cooccurrence techniques as a baseline, the results point to improvements using the Indri index or Kullback-Leibler divergence, and a slight further improvement when used in combination with cooccurrence measures such as $t$-score, via rank-aggregation. This work is part of a project for MWE extraction and characterization using different techniques aiming at measuring the properties related to idiomaticity, as institutionalization, non-compositionality and lexico-syntactic fixedness.", "keyphrases": ["compositionality", "basque", "latent semantic analysis"]} +{"id": "asgari-mofrad-2016-comparing", "title": "Comparing Fifty Natural Languages and Twelve Genetic Languages Using Word Embedding Language Divergence (WELD) as a Quantitative Measure of Language Distance", "abstract": "We introduce a new measure of distance between languages based on word embedding, called word embedding language divergence (WELD). WELD is defined as divergence between unified similarity distribution of words between languages. Using such a measure, we perform language comparison for fifty natural languages and twelve genetic languages. Our natural language dataset is a collection of sentence-aligned parallel corpora from bible translations for fifty languages spanning a variety of language families. Although we use parallel corpora, which guarantees having the same content in all languages, interestingly in many cases languages within the same family cluster together. In addition to natural languages, we perform language comparison for the coding regions in the genomes of 12 different organisms (4 plants, 6 animals, and two human subjects). Our result confirms a significant high-level difference in the genetic language model of humans/animals versus plants. The proposed method is a step toward defining a quantitative measure of similarity between languages, with applications in languages classification, genre identification, dialect identification, and evaluation of translations.", "keyphrases": ["genetic language", "language divergence", "co-occurrence"]} +{"id": "ravi-knight-2008-attacking", "title": "Attacking Decipherment Problems Optimally with Low-Order N-gram Models", "abstract": "We introduce a method for solving substitution ciphers using low-order letter n-gram models. This method enforces global constraints using integer programming, and it guarantees that no decipherment key is overlooked. We carry out extensive empirical experiments showing how decipherment accuracy varies as a function of cipher length and n-gram order. We also make an empirical investigation of Shannon's (1949) theory of uncertainty in decipherment.", "keyphrases": ["decipherment", "letter n-gram model", "function"]} +{"id": "edunov-etal-2018-classical", "title": "Classical Structured Prediction Losses for Sequence to Sequence Learning", "abstract": "There has been much recent work on training neural attention models at the sequence-level using either reinforcement learning-style methods or by optimizing the beam. In this paper, we survey a range of classical objective functions that have been widely used to train linear models for structured prediction and apply them to neural sequence to sequence models. Our experiments show that these losses can perform surprisingly well by slightly outperforming beam search optimization in a like for like setup. We also report new state of the art results on both IWSLT'14 German-English translation as well as Gigaword abstractive summarization. On the large WMT'14 English-French task, sequence-level training achieves 41.5 BLEU which is on par with the state of the art.", "keyphrases": ["loss", "beam search", "setup", "sentence-level bleu", "reinforcement learning"]} +{"id": "ilievski-etal-2016-semantic", "title": "Semantic overfitting: what `world' do we consider when evaluating disambiguation of text?", "abstract": "Semantic text processing faces the challenge of defining the relation between lexical expressions and the world to which they make reference within a period of time. It is unclear whether the current test sets used to evaluate disambiguation tasks are representative for the full complexity considering this time-anchored relation, resulting in semantic overfitting to a specific period and the frequent phenomena within. We conceptualize and formalize a set of metrics which evaluate this complexity of datasets. We provide evidence for their applicability on five different disambiguation tasks. To challenge semantic overfitting of disambiguation systems, we propose a time-based, metric-aware method for developing datasets in a systematic and semi-automated manner, as well as an event-based QA task.", "keyphrases": ["world", "semantic overfitting", "low ambiguity"]} +{"id": "wagner-etal-2007-comparative", "title": "A Comparative Evaluation of Deep and Shallow Approaches to the Automatic Detection of Common Grammatical Errors", "abstract": "This paper compares a deep and a shallow processing approach to the problem of classifying a sentence as grammatically wellformed or ill-formed. The deep processing approach uses the XLE LFG parser and English grammar: two versions are presented, one which uses the XLE directly to perform the classification, and another one which uses a decision tree trained on features consisting of the XLE\u2019s output statistics. The shallow processing approach predicts grammaticality based on n-gram frequency statistics: we present two versions, one which uses frequency thresholds and one which uses a decision tree trained on the frequencies of the rarest n-grams in the input sentence. We find that the use of a decision tree improves on the basic approach only for the deep parser-based approach. We also show that combining both the shallow and deep decision tree features is effective. Our evaluation is carried out using a large test set of grammatical and ungrammatical sentences. The ungrammatical test set is generated automatically by inserting grammatical errors into well-formed BNC sentences.", "keyphrases": ["automatic detection", "common grammatical error", "bnc sentence"]} +{"id": "f-astudillo-etal-2015-learning", "title": "Learning Word Representations from Scarce and Noisy Data with Embedding Subspaces", "abstract": "We investigate a technique to adapt unsupervised word embeddings to specific applications, when only small and noisy labeled datasets are available. Current methods use pre-trained embeddings to initialize model parameters, and then use the labeled data to tailor them for the intended task. However, this approach is prone to overfitting when the training is performed with scarce and noisy data. To overcome this issue, we use the supervised data to find an embedding subspace that fits the task complexity. All the word representations are adapted through a projection into this task-specific subspace, even if they do not occur on the labeled dataset. This approach was recently used in the SemEval 2015 Twitter sentiment analysis challenge, attaining state-of-the-art results. Here we show results improving those of the challenge, as well as additional experiments in a Twitter Part-Of-Speech tagging task.", "keyphrases": ["noisy data", "subspace", "pre-trained embedding"]} +{"id": "gurevych-etal-2007-electronic", "title": "What to be? - Electronic Career Guidance Based on Semantic Relatedness", "abstract": "We present a study aimed at investigating the use of semantic information in a novel NLP application, Electronic Career Guidance (ECG), in German. ECG is formulated as an information retrieval (IR) task, whereby textual descriptions of professions (documents) are ranked for their relevance to natural language descriptions of a person\u2019s professional interests (the topic). We compare the performance of two semantic IR models: (IR-1) utilizing semantic relatedness (SR) measures based on either wordnet or Wikipedia and a set of heuristics, and (IR-2) measuring the similarity between the topic and documents based on Explicit Semantic Analysis (ESA) (Gabrilovich and Markovitch, 2007). We evaluate the performance of SR measures intrinsically on the tasks of (T-1) computing SR, and (T-2) solving Reader\u2019s Digest Word Power (RDWP) questions.", "keyphrases": ["electronic career guidance", "semantic relatedness", "profession"]} +{"id": "lu-etal-2020-multi-xscience", "title": "Multi-XScience: A Large-scale Dataset for Extreme Multi-document Summarization of Scientific Articles", "abstract": "Multi-document summarization is a challenging task for which there exists little large-scale datasets. We propose Multi-XScience, a large-scale multi-document summarization dataset created from scientific articles. Multi-XScience introduces a challenging multi-document summarization task: writing the related-work section of a paper based on its abstract and the articles it references. Our work is inspired by extreme summarization, a dataset construction protocol that favours abstractive modeling approaches. Descriptive statistics and empirical results\u2014using several state-of-the-art models trained on the Multi-XScience dataset\u2014reveal that Multi-XScience is well suited for abstractive models.", "keyphrases": ["large-scale dataset", "summarization dataset", "multi-xscience"]} +{"id": "jawahar-etal-2020-automatic", "title": "Automatic Detection of Machine Generated Text: A Critical Survey", "abstract": "Text generative models (TGMs) excel in producing text that matches the style of human language reasonably well. Such TGMs can be misused by adversaries, e.g., by automatically generating fake news and fake product reviews that can look authentic and fool humans. Detectors that can distinguish text generated by TGM from human written text play a vital role in mitigating such misuse of TGMs. Recently, there has been a flurry of works from both natural language processing (NLP) and machine learning (ML) communities to build accurate detectors for English. Despite the importance of this problem, there is currently no work that surveys this fast-growing literature and introduces newcomers to important research challenges. In this work, we fill this void by providing a critical survey and review of this literature to facilitate a comprehensive understanding of this problem. We conduct an in-depth error analysis of the state-of-the-art detector and discuss research directions to guide future work in this exciting area.", "keyphrases": ["critical survey", "automatic detection", "synthetic text"]} +{"id": "hoffmann-etal-2010-learning", "title": "Learning 5000 Relational Extractors", "abstract": "Many researchers are trying to use information extraction (IE) to create large-scale knowledge bases from natural language text on the Web. However, the primary approach (supervised learning of relation-specific extractors) requires manually-labeled training data for each relation and doesn't scale to the thousands of relations encoded in Web text. \n \nThis paper presents LUCHS, a self-supervised, relation-specific IE system which learns 5025 relations --- more than an order of magnitude greater than any previous approach --- with an average F1 score of 61%. Crucial to LUCHS's performance is an automated system for dynamic lexicon learning, which allows it to learn accurately from heuristically-generated training data, which is often noisy and sparse.", "keyphrases": ["luchs", "distant supervision", "relation extraction", "knowledge basis", "wikipedia"]} +{"id": "hauch-etal-2012-linguistic", "title": "Linguistic Cues to Deception Assessed by Computer Programs: A Meta-Analysis", "abstract": "Research syntheses suggest that verbal cues are more diagnostic of deception than other cues. Recently, to avoid human judgmental biases, researchers have sought to find faster and more reliable methods to perform automatic content analyses of statements. However, diversity of methods and inconsistent findings do not present a clear picture of effectiveness. We integrate and statistically synthesize this literature. Our meta-analyses revealed small, but significant effect-sizes on some linguistic categories. Liars use fewer exclusive words, self- and other-references, fewer time-related, but more space-related, negative and positive emotion words, and more motion verbs or negations than truth-tellers.", "keyphrases": ["deception", "meta-analysis", "verbal cue"]} +{"id": "siahbani-etal-2013-efficient", "title": "Efficient Left-to-Right Hierarchical Phrase-Based Translation with Improved Reordering", "abstract": "Left-to-right (LR) decoding (Watanabe et al., 2006b) is a promising decoding algorithm for hierarchical phrase-based translation (Hiero). It generates the target sentence by extending the hypotheses only on the right edge. LR decoding has complexity O(n 2 b) for input of n words and beam sizeb, compared toO(n 3 ) for the CKY algorithm. It requires a single language model (LM) history for each target hypothesis rather than two LM histories per hypothesis as in CKY. In this paper we present an augmented LR decoding algorithm that builds on the original algorithm in (Watanabe et al., 2006b). Unlike that algorithm, using experiments over multiple language pairs we show two new results: our LR decoding algorithm provides demonstrably more efficient decoding than CKY Hiero, four times faster; and by introducing new distortion and reordering features for LR decoding, it maintains the same translation quality (as in BLEU scores) obtained phrase-based and CKY Hiero with the same translation model.", "keyphrases": ["left-to-right", "hiero", "translation quality"]} +{"id": "king-cook-2018-leveraging", "title": "Leveraging distributed representations and lexico-syntactic fixedness for token-level prediction of the idiomaticity of English verb-noun combinations", "abstract": "Verb-noun combinations (VNCs) - e.g., blow the whistle, hit the roof, and see stars - are a common type of English idiom that are ambiguous with literal usages. In this paper we propose and evaluate models for classifying VNC usages as idiomatic or literal, based on a variety of approaches to forming distributed representations. Our results show that a model based on averaging word embeddings performs on par with, or better than, a previously-proposed approach based on skip-thoughts. Idiomatic usages of VNCs are known to exhibit lexico-syntactic fixedness. We further incorporate this information into our models, demonstrating that this rich linguistic knowledge is complementary to the information carried by distributed representations.", "keyphrases": ["lexico-syntactic fixedness", "verb-noun combination", "usage"]} +{"id": "henrich-etal-2012-webcage", "title": "WebCAGe \u2013 A Web-Harvested Corpus Annotated with GermaNet Senses", "abstract": "This paper describes an automatic method for creating a domain-independent sense-annotated corpus harvested from the web. As a proof of concept, this method has been applied to German, a language for which sense-annotated corpora are still in short supply. The sense inventory is taken from the German wordnet GermaNet. The web-harvesting relies on an existing mapping of GermaNet to the German version of the web-based dictionary Wiktionary. The data obtained by this method constitute WebCAGe (short for: Web-Harvested Corpus Annotated with GermaNet Senses), a resource which currently represents the largest sense-annotated corpus available for German. While the present paper focuses on one particular language, the method as such is language-independent.", "keyphrases": ["web-harvested corpus annotated", "germanet senses", "german", "webcage"]} +{"id": "markert-etal-2012-collective", "title": "Collective Classification for Fine-grained Information Status", "abstract": "Previous work on classifying information status (Nissim, 2006; Rahman and Ng, 2011) is restricted to coarse-grained classification and focuses on conversational dialogue. We here introduce the task of classifying fine-grained information status and work on written text. We add a fine-grained information status layer to the Wall Street Journal portion of the OntoNotes corpus. We claim that the information status of a mention depends not only on the mention itself but also on other mentions in the vicinity and solve the task by collectively classifying the information status of all mentions. Our approach strongly outperforms reimplementations of previous work.", "keyphrases": ["fine-grained information status", "isnotes", "joint inference", "bridging recognition", "anaphora"]} +{"id": "itagaki-etal-2007-automatic", "title": "Automatic validation of terminology translation consistenscy with statistical method", "abstract": "This paper presents a novel method to automatically validate terminology consistency in localized materials. The goal of the paper is two-fold. First, we explore a way to extract phrase pair translations for compound nouns from a bilingual corpus using word alignment data. To validate the quality of the extracted phrase pair translations, we use a Gaussian mixture model (GMM) classifier. Second, we quantify consistency of translation as a measurement of quality. With this approach, a quality assurance process for terminology translation can be fully automated. It can also be used for maintaining bilingual training data quality for machine translation.", "keyphrases": ["terminology translation", "statistical method", "automatic validation"]} +{"id": "sasano-etal-2009-effect", "title": "The Effect of Corpus Size on Case Frame Acquisition for Discourse Analysis", "abstract": "This paper reports the effect of corpus size on case frame acquisition for discourse analysis in Japanese. For this study, we collected a Japanese corpus consisting of up to 100 billion words, and constructed case frames from corpora of six different sizes. Then, we applied these case frames to syntactic and case structure analysis, and zero anaphora resolution. We obtained better results by using case frames constructed from larger corpora; the performance was not saturated even with a corpus size of 100 billion words.", "keyphrases": ["corpus size", "case frame", "discourse analysis"]} +{"id": "yu-etal-2016-online", "title": "Online Segment to Segment Neural Transduction", "abstract": "We introduce an online neural sequence to sequence model that learns to alternate between encoding and decoding segments of the input as it is read. By independently tracking the encoding and decoding representations our algorithm permits exact polynomial marginalization of the latent segmentation during training, and during decoding beam search is employed to find the best alignment path together with the predicted output sequence. Our model tackles the bottleneck of vanilla encoder-decoders that have to read and memorize the entire input sequence in their fixed-length hidden states before producing any output. It is different from previous attentive models in that, instead of treating the attention weights as output of a deterministic function, our model assigns attention weights to a sequential latent variable which can be marginalized out and permits online generation. Experiments on abstractive sentence summarization and morphological inflection show significant performance gains over the baseline encoder-decoders.", "keyphrases": ["output sequence", "inflection", "online segment", "neural transduction model", "alignment-based neural model"]} +{"id": "alex-etal-2007-recognising", "title": "Recognising Nested Named Entities in Biomedical Text", "abstract": "Although recent named entity (NE) annotation efforts involve the markup of nested entities, there has been limited focus on recognising such nested structures. This paper introduces and compares three techniques for modelling and recognising nested entities by means of a conventional sequence tagger. The methods are tested and evaluated on two biomedical data sets that contain entity nesting. All methods yield an improvement over the baseline tagger that is only trained on flat annotation.", "keyphrases": ["biomedical text", "conditional random field", "protein", "entity recognition", "eppi corpus"]} +{"id": "green-etal-2013-parsing", "title": "Parsing Models for Identifying Multiword Expressions", "abstract": "Multiword expressions lie at the syntax/semantics interface and have motivated alternative theories of syntax like Construction Grammar. Until now, however, syntactic analysis and multiword expression identification have been modeled separately in natural language processing. We develop two structured prediction models for joint parsing and multiword expression identification. The first is based on context-free grammars and the second uses tree substitution grammars, a formalism that can store larger syntactic fragments. Our experiments show that both models can identify multiword expressions with much higher accuracy than a state-of-the-art system based on word co-occurrence statistics.We experiment with Arabic and French, which both have pervasive multiword expressions. Relative to English, they also have richer morphology, which induces lexical sparsity in finite corpora. To combat this sparsity, we develop a simple factored lexical representation for the context-free parsing model. Morphological analyses are automatically transformed into rich feature tags that are scored jointly with lexical items. This technique, which we call a factored lexicon, improves both standard parsing and multiword expression identification accuracy.", "keyphrases": ["multiword expressions", "arabic", "french", "constituency parsing model", "presence"]} +{"id": "savary-etal-2012-sejfek", "title": "SEJFEK - a Lexicon and a Shallow Grammar of Polish Economic Multi-Word Units", "abstract": "We present a large-coverage lexical and grammatical resource of Polish economic terminology. It consists of two alternative modules. One is a grammatical lexicon of about 11,000 terminological multi-word units, where inflectional and syntactic variation, as well as nesting of terms, are described via graph-based rules. The other one is a fully lexicalized shallow grammar, obtained by an automatic conversion of the lexicon, and partly manually validated. Both resources have a good coverage, evaluated on a manually annotated corpus, and are freely available under the Creative Commons BY-SA license.", "keyphrases": ["shallow grammar", "graph-based rule", "sejfek"]} +{"id": "eirew-etal-2021-wec", "title": "WEC: Deriving a Large-scale Cross-document Event Coreference dataset from Wikipedia", "abstract": "Cross-document event coreference resolution is a foundational task for NLP applications involving multi-text processing. However, existing corpora for this task are scarce and relatively small, while annotating only modest-size clusters of documents belonging to the same topic. To complement these resources and enhance future research, we present Wikipedia Event Coreference (WEC), an efficient methodology for gathering a large-scale dataset for cross-document event coreference from Wikipedia, where coreference links are not restricted within predefined topics. We apply this methodology to the English Wikipedia and extract our large-scale WEC-Eng dataset. Notably, our dataset creation method is generic and can be applied with relatively little effort to other Wikipedia languages. To set baseline results, we develop an algorithm that adapts components of state-of-the-art models for within-document coreference resolution to the cross-document setting. Our model is suitably efficient and outperforms previously published state-of-the-art results for the task.", "keyphrases": ["cross-document event coreference", "wikipedia", "coreference link", "wec-eng dataset"]} +{"id": "rozovskaya-etal-2014-correcting", "title": "Correcting Grammatical Verb Errors", "abstract": "Verb errors are some of the most common mistakes made by non-native writers of English but some of the least studied. The reason is that dealing with verb errors requires a new paradigm; essentially all research done on correcting grammatical errors assumes a closed set of triggers \u2010 e.g., correcting the use of prepositions or articles \u2010 but identifying mistakes in verbs necessitates identifying potentially ambiguous triggers first, and then determining the type of mistake made and correcting it. Moreover, once the verb is identified, modeling verb errors is challenging because verbs fulfill many grammatical functions, resulting in a variety of mistakes. Consequently, the little earlier work done on verb errors assumed that the error type is known in advance. We propose a linguistically-motivated approach to verb error correction that makes use of the notion of verb finiteness to identify triggers and types of mistakes, before using a statistical machine learning approach to correct these mistakes. We show that the linguistically-informed model significantly improves the accuracy of the verb correction approach.", "keyphrases": ["verb error", "mistake", "linguistically-motivated approach", "finiteness"]} +{"id": "wu-cotterell-2019-exact", "title": "Exact Hard Monotonic Attention for Character-Level Transduction", "abstract": "Many common character-level, string-to-string transduction tasks, e.g., grapheme-to-phoneme conversion and morphological inflection, consist almost exclusively of monotonic transduction. Neural sequence-to-sequence models with soft attention, non-monotonic models, outperform popular monotonic models. In this work, we ask the following question: Is monotonicity really a helpful inductive bias in these tasks? We develop a hard attention sequence-to-sequence model that enforces strict monotonicity and learns alignment jointly. With the help of dynamic programming, we are able to compute the exact marginalization over all alignments. Our models achieve state-of-the-art performance on morphological inflection. Furthermore, we find strong performance on two other character-level transduction tasks. Code is available at .", "keyphrases": ["hard monotonic attention", "monotonicity", "transduction", "grapheme-to-phoneme conversion", "sequence-to-sequence model"]} +{"id": "hamidian-diab-2016-rumor", "title": "Rumor Identification and Belief Investigation on Twitter", "abstract": "Social media users spend several hours a day to read, post and search for news on microblogging platforms. Social media is becoming a key means for discovering news. However, verifying the trustworthiness of this information is becoming even more challenging. In this study, we attempt to address the problem of rumor detection and belief investigation on Twitter. Our definition of rumor is an unverifiable statement, which spreads misinformation or disinformation. We adopt a supervised rumors classification task using the standard dataset. By employing the Tweet Latent Vector (TLV) feature, which creates a 100-d vector representative of each tweet, we increased the rumor retrieval task precision up to 0.972. We also introduce the belief score and study the belief change among the rumor posters between 2010 and 2016.", "keyphrases": ["belief investigation", "twitter", "rumor"]} +{"id": "etoori-etal-2018-automatic", "title": "Automatic Spelling Correction for Resource-Scarce Languages using Deep Learning", "abstract": "Spelling correction is a well-known task in Natural Language Processing (NLP). Automatic spelling correction is important for many NLP applications like web search engines, text summarization, sentiment analysis etc. Most approaches use parallel data of noisy and correct word mappings from different sources as training data for automatic spelling correction. Indic languages are resource-scarce and do not have such parallel data due to low volume of queries and non-existence of such prior implementations. In this paper, we show how to build an automatic spelling corrector for resource-scarce languages. We propose a sequence-to-sequence deep learning model which trains end-to-end. We perform experiments on synthetic datasets created for Indic languages, Hindi and Telugu, by incorporating the spelling mistakes committed at character level. A comparative evaluation shows that our model is competitive with the existing spell checking and correction techniques for Indic languages.", "keyphrases": ["spelling correction", "resource-scarce language", "deep learning model"]} +{"id": "nguyen-etal-2015-semantic", "title": "Semantic Representations for Domain Adaptation: A Case Study on the Tree Kernel-based Method for Relation Extraction", "abstract": "We study the application of word embeddings to generate semantic representations for the domain adaptation problem of relation extraction (RE) in the tree kernelbased method. We systematically evaluate various techniques to generate the semantic representations and demonstrate that they are effective to improve the generalization performance of a tree kernel-based relation extractor across domains (up to 7% relative improvement). In addition, we compare the tree kernel-based and the feature-based method for RE in a compatible way, on the same resources and settings, to gain insights into which kind of system is more robust to domain changes. Our results and error analysis shows that the tree kernel-based method outperforms the feature-based approach.", "keyphrases": ["case study", "relation extraction", "feature-based method"]} +{"id": "xu-etal-2020-position", "title": "Position-Aware Tagging for Aspect Sentiment Triplet Extraction", "abstract": "Aspect Sentiment Triplet Extraction (ASTE) is the task of extracting the triplets of target entities, their associated sentiment, and opinion spans explaining the reason for the sentiment. Existing research efforts mostly solve this problem using pipeline approaches, which break the triplet extraction process into several stages. Our observation is that the three elements within a triplet are highly related to each other, and this motivates us to build a joint model to extract such triplets using a sequence tagging approach. However, how to effectively design a tagging approach to extract the triplets that can capture the rich interactions among the elements is a challenging research question. In this work, we propose the first end-to-end model with a novel position-aware tagging scheme that is capable of jointly extracting the triplets. Our experimental results on several existing datasets show that jointly capturing elements in the triplet using our approach leads to improved performance over the existing approaches. We also conducted extensive experiments to investigate the model effectiveness and robustness.", "keyphrases": ["tagging", "opinion term", "aspect span"]} +{"id": "platanios-etal-2018-contextual", "title": "Contextual Parameter Generation for Universal Neural Machine Translation", "abstract": "We propose a simple modification to existing neural machine translation (NMT) models that enables using a single universal model to translate between multiple languages while allowing for language specific parameterization, and that can also be used for domain adaptation. Our approach requires no changes to the model architecture of a standard NMT system, but instead introduces a new component, the contextual parameter generator (CPG), that generates the parameters of the system (e.g., weights in a neural network). This parameter generator accepts source and target language embeddings as input, and generates the parameters for the encoder and the decoder, respectively. The rest of the model remains unchanged and is shared across all languages. We show how this simple modification enables the system to use monolingual data for training and also perform zero-shot translation. We further show it is able to surpass state-of-the-art performance for both the IWSLT-15 and IWSLT-17 datasets and that the learned language embeddings are able to uncover interesting relationships between languages.", "keyphrases": ["universal model", "adapter", "language embedding", "zero-shot translation", "contextual parameter generation"]} +{"id": "le-etal-2012-continuous", "title": "Continuous Space Translation Models with Neural Networks", "abstract": "The use of conventional maximum likelihood estimates hinders the performance of existing phrase-based translation models. For lack of sufficient training data, most models only consider a small amount of context. As a partial remedy, we explore here several continuous space translation models, where translation probabilities are estimated using a continuous representation of translation units in lieu of standard discrete representations. In order to handle a large set of translation units, these representations and the associated estimates are jointly computed using a multi-layer neural network with a SOUL architecture. In small scale and large scale English to French experiments, we show that the resulting models can effectively be trained and used on top of a n-gram translation system, delivering significant improvements in performance.", "keyphrases": ["translation model", "continuous space", "neural network model", "factorization"]} +{"id": "levitan-etal-2012-acoustic", "title": "Acoustic-Prosodic Entrainment and Social Behavior", "abstract": "In conversation, speakers have been shown to entrain, or become more similar to each other, in various ways. We measure entrainment on eight acoustic features extracted from the speech of subjects playing a cooperative computer game and associate the degree of entrainment with a number of manually-labeled social variables acquired using Amazon Mechanical Turk, as well as objective measures of dialogue success. We find that male-female pairs entrain on all features, while male-male pairs entrain only on particular acoustic features (intensity mean, intensity maximum and syllables per second). We further determine that entrainment is more important to the perception of female-male social behavior than it is for same-gender pairs, and it is more important to the smoothness and flow of male-male dialogue than it is for female-female or mixed-gender pairs. Finally, we find that entrainment is more pronounced when intensity or speaking rate is especially high or low.", "keyphrases": ["social behavior", "variable", "speech data"]} +{"id": "wang-etal-2012-improved", "title": "Improved Domain Adaptation for Statistical Machine Translation", "abstract": "We present a simple and effective infrastructure for domain adaptation for statistical machine translation (MT). To build MT systems for different domains, it trains, tunes and deploys a single translation system that is capable of producing adapted domain translations and preserving the original generic accuracy at the same time. The approach unifies automatic domain detection and domain model parameterization into one system. Experiment results on 20 language pairs demonstrate its viability.", "keyphrases": ["statistical machine translation", "different domain", "feature weight", "single-domain decoder"]} +{"id": "zhou-etal-2021-automatic", "title": "Automatic ICD Coding via Interactive Shared Representation Networks with Self-distillation Mechanism", "abstract": "The ICD coding task aims at assigning codes of the International Classification of Diseases in clinical notes. Since manual coding is very laborious and prone to errors, many methods have been proposed for the automatic ICD coding task. However, existing works either ignore the long-tail of code frequency or the noisy clinical notes. To address the above issues, we propose an Interactive Shared Representation Network with Self-Distillation Mechanism. Specifically, an interactive shared representation network targets building connections among codes while modeling the co-occurrence, consequently alleviating the long-tail problem. Moreover, to cope with the noisy text issue, we encourage the model to focus on the clinical note's noteworthy part and extract valuable information through a self-distillation learning mechanism. Experimental results on two MIMIC datasets demonstrate the effectiveness of our method.", "keyphrases": ["icd", "self-distillation mechanism", "multi-label classifier"]} +{"id": "zhang-etal-2008-extracting", "title": "Extracting Synchronous Grammar Rules From Word-Level Alignments in Linear Time", "abstract": "We generalize Uno and Yagiura's algorithm for finding all common intervals of two permutations to the setting of two sequences with many-to-many alignment links across the two sides. We show how to maximally decompose a word-aligned sentence pair in linear time, which can be used to generate all possible phrase pairs or a Synchronous Context-Free Grammar (SCFG) with the simplest rules possible. We also use the algorithm to precisely analyze the maximum SCFG rule length needed to cover hand-aligned data from various language pairs.", "keyphrases": ["linear time", "phrase pair", "synchronous context-free grammar", "scfg"]} +{"id": "damljanovic-etal-2010-identification", "title": "Identification of the Question Focus: Combining Syntactic Analysis and Ontology-based Lookup through the User Interaction", "abstract": "Most question-answering systems contain a classifier module which determines a question category, based on which each question is assigned an answer type. However, setting up syntactic patterns for this classification is a big challenge. In addition, in the case of ontology-based systems, the answer type should be aligned to the queried knowledge structure. In this paper, we present an approach for determining the answer type semi-automatically. We first identify the question focus using syntactic parsing, and then try to identify the answer type by combining the head of the focus with the ontology-based lookup. When this combination is not enough to make conclusions automatically, the user is engaged into a dialog in order to resolve the answer type. User selections are saved and used for training the system in order to improve its performance over time. Further on, the answer type is used to show the feedback and the concise answer to the user. Our approach is evaluated using 250 questions from the Mooney Geoquery dataset.", "keyphrases": ["question focus", "ontology-based lookup", "syntactic parsing"]} +{"id": "sperber-etal-2017-neural", "title": "Neural Lattice-to-Sequence Models for Uncertain Inputs", "abstract": "The input to a neural sequence-to-sequence model is often determined by an up-stream system, e.g. a word segmenter, part of speech tagger, or speech recognizer. These up-stream models are potentially error-prone. Representing inputs through word lattices allows making this uncertainty explicit by capturing alternative sequences and their posterior probabilities in a compact form. In this work, we extend the TreeLSTM (Tai et al., 2015) into a LatticeLSTM that is able to consume word lattices, and can be used as encoder in an attentional encoder-decoder model. We integrate lattice posterior scores into this architecture by extending the TreeLSTM's child-sum and forget gates and introducing a bias term into the attention mechanism. We experiment with speech translation lattices and report consistent improvements over baselines that translate either the 1-best hypothesis or the lattice without posterior scores.", "keyphrases": ["sequence-to-sequence model", "lattice", "posterior score"]} +{"id": "eisenstein-2013-phonological", "title": "Phonological Factors in Social Media Writing", "abstract": "Does phonological variation get transcribed into social media text? This paper investigates examples of the phonological variable of consonant cluster reduction in Twitter. Not only does this variable appear frequently, but it displays the same sensitivity to linguistic context as in spoken language. This suggests that when social media writing transcribes phonological properties of speech, it is not merely a case of inventing orthographic transcriptions. Rather, social media displays influence from structural properties of the phonological system.", "keyphrases": ["twitter", "spelling", "social medium"]} +{"id": "he-etal-2021-model", "title": "Model Extraction and Adversarial Transferability, Your BERT is Vulnerable!", "abstract": "Natural language processing (NLP) tasks, ranging from text classification to text generation, have been revolutionised by the pretrained language models, such as BERT. This allows corporations to easily build powerful APIs by encapsulating fine-tuned BERT models for downstream tasks. However, when a fine-tuned BERT model is deployed as a service, it may suffer from different attacks launched by the malicious users. In this work, we first present how an adversary can steal a BERT-based API service (the victim/target model) on multiple benchmark datasets with limited prior knowledge and queries. We further show that the extracted model can lead to highly transferable adversarial attacks against the victim model. Our studies indicate that the potential vulnerabilities of BERT-based API services still hold, even when there is an architectural mismatch between the victim model and the attack model. Finally, we investigate two defence strategies to protect the victim model, and find that unless the performance of the victim model is sacrificed, both model extraction and adversarial transferability can effectively compromise the target models.", "keyphrases": ["adversarial transferability", "bert", "victim", "attack model", "model extraction"]} +{"id": "vergyri-kirchhoff-2004-automatic", "title": "Automatic Diacritization of Arabic for Acoustic Modeling in Speech Recognition", "abstract": "Automatic recognition of Arabic dialectal speech is a challenging task because Arabic dialects are essentially spoken varieties. Only few dialectal resources are available to date; moreover, most available acoustic data collections are transcribed without diacritics. Such a transcription omits essential pronunciation information about a word, such as short vowels. In this paper we investigate various procedures that enable us to use such training data by automatically inserting the missing diacritics into the transcription. These procedures use acoustic information in combination with different levels of morphological and contextual constraints. We evaluate their performance against manually diacritized transcriptions. In addition, we demonstrate the effect of their accuracy on the recognition performance of acoustic models trained on automatically diacritized training data.", "keyphrases": ["arabic", "automatic diacritization", "acoustic feature"]} +{"id": "duh-2008-ranking", "title": "Ranking vs. Regression in Machine Translation Evaluation", "abstract": "Automatic evaluation of machine translation (MT) systems is an important research topic for the advancement of MT technology. Most automatic evaluation methods proposed to date are score-based: they compute scores that represent translation quality, and MT systems are compared on the basis of these scores. \n \nWe advocate an alternative perspective of automatic MT evaluation based on ranking. Instead of producing scores, we directly produce a ranking over the set of MT systems to be compared. This perspective is often simpler when the evaluation goal is system comparison. We argue that it is easier to elicit human judgments of ranking and develop a machine learning approach to train on rank data. We compare this ranking method to a score-based regression method on WMT07 data. Results indicate that ranking achieves higher correlation to human judgments, especially in cases where ranking-specific features are used.", "keyphrases": ["regression", "machine learning approach", "linguistic information", "pos match"]} +{"id": "mukund-srihari-2010-vector", "title": "A Vector Space Model for Subjectivity Classification in Urdu aided by Co-Training", "abstract": "The goal of this work is to produce a classifier that can distinguish subjective sentences from objective sentences for the Urdu language. The amount of labeled data required for training automatic classifiers can be highly imbalanced especially in the multilingual paradigm as generating annotations is an expensive task. In this work, we propose a cotraining approach for subjectivity analysis in the Urdu language that augments the positive set (subjective set) and generates a negative set (objective set) devoid of all samples close to the positive ones. Using the data set thus generated for training, we conduct experiments based on SVM and VSM algorithms, and show that our modified VSM based approach works remarkably well as a sentence level subjectivity classifier.", "keyphrases": ["vector space model", "subjectivity classification", "urdu language"]} +{"id": "filatova-etal-2006-automatic", "title": "Automatic Creation of Domain Templates", "abstract": "Recently, many Natural Language Processing (NLP) applications have improved the quality of their output by using various machine learning techniques to mine Information Extraction (IE) patterns for capturing information from the input text. Currently, to mine IE patterns one should know in advance the type of the information that should be captured by these patterns. In this work we propose a novel methodology for corpus analysis based on cross-examination of several document collections representing different instances of the same domain. We show that this methodology can be used for automatic domain template creation. As the problem of automatic domain template creation is rather new, there is no well-defined procedure for the evaluation of the domain template quality. Thus, we propose a methodology for identifying what information should be present in the template. Using this information we evaluate the automatically created domain templates through the text snippets retrieved according to the created templates.", "keyphrases": ["domain template", "event-specific document", "extractor", "subtree"]} +{"id": "abercrombie-hovy-2016-putting", "title": "Putting Sarcasm Detection into Context: The Effects of Class Imbalance and Manual Labelling on Supervised Machine Classification of Twitter Conversations", "abstract": "Sarcasm can radically alter or invert a phrase\u2019s meaning. Sarcasm detection can therefore help improve natural language processing (NLP) tasks. The majority of prior research has modeled sarcasm detection as classification, with two important limitations: 1. Balanced datasets, when sarcasm is actually rather rare. 2. Using Twitter users\u2019 self-declarations in the form of hashtags to label data, when sarcasm can take many forms. To address these issues, we create an unbalanced corpus of manually annotated Twitter conversations. We compare human and machine ability to recognize sarcasm on this data under varying amounts of context. Our results indicate that both class imbalance and labelling method affect performance, and should both be considered when designing automatic sarcasm detection systems. We conclude that for progress to be made in real-world sarcasm detection, we will require a new class labelling scheme that is able to access the \u2018common ground\u2019 held between conversational parties.", "keyphrases": ["sarcasm detection", "class imbalance", "twitter conversation"]} +{"id": "sanayai-meetei-etal-2019-wat2019", "title": "WAT2019: English-Hindi Translation on Hindi Visual Genome Dataset", "abstract": "A multimodal translation is a task of translating a source language to a target language with the help of a parallel text corpus paired with images that represent the contextual details of the text. In this paper, we carried out an extensive comparison to evaluate the benefits of using a multimodal approach on translating text in English to a low resource language, Hindi as a part of WAT2019 shared task. We carried out the translation of English to Hindi in three separate tasks with both the evaluation and challenge dataset. First, by using only the parallel text corpora, then through an image caption generation approach and, finally with the multimodal approach. Our experiment shows a significant improvement in the result with the multimodal approach than the other approach.", "keyphrases": ["wat2019", "literature survey", "english-hindi language pair"]} +{"id": "wang-etal-2021-codet5", "title": "CodeT5: Identifier-aware Unified Pre-trained Encoder-Decoder Models for Code Understanding and Generation", "abstract": "Pre-trained models for Natural Languages (NL) like BERT and GPT have been recently shown to transfer well to Programming Languages (PL) and largely benefit a broad set of code-related tasks. Despite their success, most current methods either rely on an encoder-only (or decoder-only) pre-training that is suboptimal for generation (resp. understanding) tasks or process the code snippet in the same way as NL, neglecting the special characteristics of PL such as token types. We present CodeT5, a unified pre-trained encoder-decoder Transformer model that better leverages the code semantics conveyed from the developer-assigned identifiers. Our model employs a unified framework to seamlessly support both code understanding and generation tasks and allows for multi-task learning. Besides, we propose a novel identifier-aware pre-training task that enables the model to distinguish which code tokens are identifiers and to recover them when they are masked. Furthermore, we propose to exploit the user-written code comments with a bimodal dual generation task for better NL-PL alignment. Comprehensive experiments show that CodeT5 significantly outperforms prior methods on understanding tasks such as code defect detection and clone detection, and generation tasks across various directions including PL-NL, NL-PL, and PL-PL. Further analysis reveals that our model can better capture semantic information from code. Our code and pre-trained models are released at .", "keyphrases": ["code understanding", "pre-trained model", "codet5"]} +{"id": "farzindar-lapalme-2004-legal", "title": "Legal Text Summarization by Exploration of the Thematic Structure and Argumentative Roles", "abstract": "In this paper we describe our method for the summarization of legal documents helping a legal expert determine the key ideas of a judgment. Our approach is based on the exploration of the document\u2019s architecture and its thematic structures in order to build a table style summary for improving coherency and readability of the text. We present the components of a system, called LetSum, built with this approach, its implementation and some preliminary evaluation results.", "keyphrases": ["thematic structure", "legal document", "text summarization method"]} +{"id": "matsuyoshi-sato-2008-automatic", "title": "Automatic Paraphrasing of Japanese Functional Expressions Using a Hierarchically Organized Dictionary", "abstract": "Automatic paraphrasing is a transformation of expressions into semantically equivalent expressions within one language. For generating a wider variety of phrasal paraphrases in Japanese, it is necessary to paraphrase functional expressions as well as content expressions. We propose a method of paraphrasing of Japanese functional expressions using a dictionary with two hierarchies: a morphological hierarchy and a semantic hierarchy. Our system generates appropriate alternative expressions for 79% of source phrases in Japanese in an open test. It also accepts style and readability specifications.", "keyphrases": ["japanese functional expression", "functional expression", "automatic paraphrasing"]} +{"id": "wei-etal-2021-cognitive", "title": "A Cognitive Regularizer for Language Modeling", "abstract": "The uniform information density (UID) hypothesis, which posits that speakers behaving optimally tend to distribute information uniformly across a linguistic signal, has gained traction in psycholinguistics as an explanation for certain syntactic, morphological, and prosodic choices. In this work, we explore whether the UID hypothesis can be operationalized as an inductive bias for statistical language modeling. Specifically, we augment the canonical MLE objective for training language models with a regularizer that encodes UID. In experiments on ten languages spanning five language families, we find that using UID regularization consistently improves perplexity in language models, having a larger effect when training data is limited. Moreover, via an analysis of generated sequences, we find that UID-regularized language models have other desirable properties, e.g., they generate text that is more lexically diverse. Our results not only suggest that UID is a reasonable inductive bias for language modeling, but also provide an alternative validation of the UID hypothesis using modern-day NLP tools.", "keyphrases": ["regularizer", "language modeling", "information density"]} +{"id": "pustejovsky-yocum-2014-image", "title": "Image Annotation with ISO-Space: Distinguishing Content from Structure", "abstract": "Natural language descriptions of visual media present interesting problems for linguistic annotation of spatial information. This paper explores the use of ISO-Space, an annotation specification to capturing spatial information, for encoding spatial relations mentioned in descriptions of images. Especially, we focus on the distinction between references to representational content and structural components of images, and the utility of such a distinction within a compositional semantics. We also discuss how such a structure-content distinction within the linguistic annotation can be leveraged to compute further inferences about spatial configurations depicted by images with verbal captions. We construct a composition table to relate content-based relations to structure-based relations in the image, as expressed in the captions. While still preliminary, our initial results suggest that a weak composition table is both sound and informative for deriving new spatial relations.", "keyphrases": ["iso-space", "spatial relation", "configuration", "image"]} +{"id": "welivita-pu-2020-taxonomy", "title": "A Taxonomy of Empathetic Response Intents in Human Social Conversations", "abstract": "Open-domain conversational agents or chatbots are becoming increasingly popular in the natural language processing community. One of the challenges is enabling them to converse in an empathetic manner. Current neural response generation methods rely solely on end-to-end learning from large scale conversation data to generate dialogues. This approach can produce socially unacceptable responses due to the lack of large-scale quality data used to train the neural models. However, recent work has shown the promise of combining dialogue act/intent modelling and neural response generation. This hybrid method improves the response quality of chatbots and makes them more controllable and interpretable. A key element in dialog intent modelling is the development of a taxonomy. Inspired by this idea, we have manually labeled 500 response intents using a subset of a sizeable empathetic dialogue dataset (25K dialogues). Our goal is to produce a large-scale taxonomy for empathetic response intents. Furthermore, using lexical and machine learning methods, we automatically analysed both speaker and listener utterances of the entire dataset with identified response intents and 32 emotion categories. Finally, we use information visualization methods to summarize emotional dialogue exchange patterns and their temporal progression. These results reveal novel and important empathy patterns in human-human open-domain conversations and can serve as heuristics for hybrid approaches.", "keyphrases": ["empathetic response intent", "response generation", "empatheticdialogues dataset"]} +{"id": "el-ballouli-etal-2017-cat", "title": "CAT: Credibility Analysis of Arabic Content on Twitter", "abstract": "Data generated on Twitter has become a rich source for various data mining tasks. Those data analysis tasks that are dependent on the tweet semantics, such as sentiment analysis, emotion mining, and rumor detection among others, suffer considerably if the tweet is not credible, not real, or spam. In this paper, we perform an extensive analysis on credibility of Arabic content on Twitter. We also build a classification model (CAT) to automatically predict the credibility of a given Arabic tweet. Of particular originality is the inclusion of features extracted directly or indirectly from the author's profile and timeline. To train and test CAT, we annotated for credibility a data set of 9,000 Arabic tweets that are topic independent. CAT achieved consistent improvements in predicting the credibility of the tweets when compared to several baselines and when compared to the state-of-the-art approach with an improvement of 21% in weighted average F-measure. We also conducted experiments to highlight the importance of the user-based features as opposed to the content-based features. We conclude our work with a feature reduction experiment that highlights the best indicative features of credibility.", "keyphrases": ["arabic content", "twitter", "arabic tweet"]} +{"id": "tackstrom-mcdonald-2011-semi", "title": "Semi-supervised latent variable models for sentence-level sentiment analysis", "abstract": "We derive two variants of a semi-supervised model for fine-grained sentiment analysis. Both models leverage abundant natural supervision in the form of review ratings, as well as a small amount of manually crafted sentence labels, to learn sentence-level sentiment classifiers. The proposed model is a fusion of a fully supervised structured conditional model and its partially supervised counterpart. This allows for highly efficient estimation and inference algorithms with rich feature definitions. We describe the two variants as well as their component models and verify experimentally that both variants give significantly improved results for sentence-level sentiment analysis compared to all baselines.", "keyphrases": ["sentence-level sentiment analysis", "structured conditional model", "polarity", "whole review"]} +{"id": "chen-etal-2014-aspect", "title": "Aspect Extraction with Automated Prior Knowledge Learning", "abstract": "Aspect extraction is an important task in sentiment analysis. Topic modeling is a popular method for the task. However, unsupervised topic models often generate incoherent aspects. To address the issue, several knowledge-based models have been proposed to incorporate prior knowledge provided by the user to guide modeling. In this paper, we take a major step forward and show that in the big data era, without any user input, it is possible to learn prior knowledge automatically from a large amount of review data available on the Web. Such knowledge can then be used by a topic model to discover more coherent aspects. There are two key challenges: (1) learning quality knowledge from reviews of diverse domains, and (2) making the model fault-tolerant to handle possibly wrong knowledge. A novel approach is proposed to solve these problems. Experimental results using reviews from 36 domains show that the proposed approach achieves significant improvements over state-of-the-art baselines.", "keyphrases": ["extraction", "sentiment analysis", "topic modeling"]} +{"id": "do-etal-2012-joint", "title": "Joint Inference for Event Timeline Construction", "abstract": "This paper addresses the task of constructing a timeline of events mentioned in a given text. To accomplish that, we present a novel representation of the temporal structure of a news article based on time intervals. We then present an algorithmic approach that jointly optimizes the temporal structure by coupling local classifiers that predict associations and temporal relations between pairs of temporal entities with global constraints. Moreover, we present ways to leverage knowledge provided by event coreference to further improve the system performance. Overall, our experiments show that the joint inference model significantly outperformed the local classifiers by 9.2% of relative improvement in F1. The experiments also suggest that good event coreference could make remarkable contribution to a robust event timeline construction system.", "keyphrases": ["event timeline construction", "dense annotation", "integer linear programming", "ilp"]} +{"id": "laparra-rigau-2013-impar", "title": "ImpAr: A Deterministic Algorithm for Implicit Semantic Role Labelling", "abstract": "This paper presents a novel deterministic algorithm for implicit Semantic Role Labeling. The system exploits a very simple but relevant discursive property, the argument coherence over different instances of a predicate. The algorithm solves the implicit arguments sequentially, exploiting not only explicit but also the implicit arguments previously solved. In addition, we empirically demonstrate that the algorithm obtains very competitive and robust performances with respect to supervised approaches that require large amounts of costly training data.", "keyphrases": ["predicate", "corresponding role", "discourse coherence"]} +{"id": "jansen-2018-multi", "title": "Multi-hop Inference for Sentence-level TextGraphs: How Challenging is Meaningfully Combining Information for Science Question Answering?", "abstract": "Question Answering for complex questions is often modelled as a graph construction or traversal task, where a solver must build or traverse a graph of facts that answer and explain a given question. This \u201cmulti-hop\u201d inference has been shown to be extremely challenging, with few models able to aggregate more than two facts before being overwhelmed by \u201csemantic drift\u201d, or the tendency for long chains of facts to quickly drift off topic. This is a major barrier to current inference models, as even elementary science questions require an average of 4 to 6 facts to answer and explain. In this work we empirically characterize the difficulty of building or traversing a graph of sentences connected by lexical overlap, by evaluating chance sentence aggregation quality through 9,784 manually-annotated judgements across knowledge graphs built from three free-text corpora (including study guides and Simple Wikipedia). We demonstrate semantic drift tends to be high and aggregation quality low, at between 0.04 and 3, and highlight scenarios that maximize the likelihood of meaningfully combining information.", "keyphrases": ["science question", "semantic drift", "difficulty", "knowledge graph"]} +{"id": "zhu-etal-2015-ranking", "title": "A Re-ranking Model for Dependency Parser with Recursive Convolutional Neural Network", "abstract": "In this work, we address the problem to model all the nodes (words or phrases) in a dependency tree with the dense representations. We propose a recursive convolutional neural network (RCNN) architecture to capture syntactic and compositional-semantic representations of phrases and words in a dependency tree. Different with the original recursive neural network, we introduce the convolution and pooling layers, which can model a variety of compositions by the feature maps and choose the most informative compositions by the pooling layers. Based on RCNN, we use a discriminative model to re-rank a $k$-best list of candidate dependency parsing trees. The experiments show that RCNN is very effective to improve the state-of-the-art dependency parsing on both English and Chinese datasets.", "keyphrases": ["re-ranking model", "convolutional neural network", "dependency parsing"]} +{"id": "peters-martins-2019-ist", "title": "IT\u2013IST at the SIGMORPHON 2019 Shared Task: Sparse Two-headed Models for Inflection", "abstract": "This paper presents the Instituto de Telecomunica\u00e7\u00f5es\u2013Instituto Superior T\u00e9cnico submission to Task 1 of the SIGMORPHON 2019 Shared Task. Our models combine sparse sequence-to-sequence models with a two-headed attention mechanism that learns separate attention distributions for the lemma and inflectional tags. Among submissions to Task 1, our models rank second and third. Despite the low data setting of the task (only 100 in-language training examples), they learn plausible inflection patterns and often concentrate all probability mass into a small set of hypotheses, making beam search exact.", "keyphrases": ["shared task", "inflection", "two-headed attention mechanism"]} +{"id": "andreevskaia-bergler-2008-specialists", "title": "When Specialists and Generalists Work Together: Overcoming Domain Dependence in Sentiment Tagging", "abstract": "This study presents a novel approach to the problem of system portability across different domains: a sentiment annotation system that integrates a corpus-based classifier trained on a small set of annotated in-domain data and a lexicon-based system trained on WordNet. The paper explores the challenges of system portability across domains and text genres (movie reviews, news, blogs, and product reviews), highlights the factors affecting system performance on out-of-domain and smallset in-domain data, and presents a new system consisting of the ensemble of two classifiers with precision-based vote weighting, that provides significant gains in accuracy and recall over the corpus-based classifier and the lexicon-based system taken individually.", "keyphrases": ["corpus-based classifier", "opinion", "subjectivity analysis"]} +{"id": "hu-etal-2014-minimum", "title": "Minimum Translation Modeling with Recurrent Neural Networks", "abstract": "We introduce recurrent neural networkbased Minimum Translation Unit (MTU) models which make predictions based on an unbounded history of previous bilingual contexts. Traditional back-off n-gram models suffer under the sparse nature of MTUs which makes estimation of highorder sequence models challenging. We tackle the sparsity problem by modeling MTUs both as bags-of-words and as a sequence of individual source and target words. Our best results improve the output of a phrase-based statistical machine translation system trained on WMT 2012 French-English data by up to 1.5 BLEU, and we outperform the traditional n-gram based MTU approach by up to 0.8 BLEU.", "keyphrases": ["recurrent neural network", "minimum translation unit", "recent research", "phrase pair"]} +{"id": "hassan-etal-2020-alt-semeval", "title": "ALT at SemEval-2020 Task 12: Arabic and English Offensive Language Identification in Social Media", "abstract": "This paper describes the systems submitted by the Arabic Language Technology group (ALT) at SemEval-2020 Task 12: Multilingual Offensive Language Identification in Social Media. We focus on sub-task A (Offensive Language Identification) for two languages: Arabic and English. Our efforts for both languages achieved more than 90% macro-averaged F1-score on the official test set. For Arabic, the best results were obtained by a system combination of Support Vector Machine, Deep Neural Network, and fine-tuned Bidirectional Encoder Representations from Transformers (BERT). For English, the best results were obtained by fine-tuning BERT.", "keyphrases": ["semeval-2020 task", "offensive language identification", "social media"]} +{"id": "poerner-etal-2018-evaluating", "title": "Evaluating neural network explanation methods using hybrid documents and morphosyntactic agreement", "abstract": "The behavior of deep neural networks (DNNs) is hard to understand. This makes it necessary to explore post hoc explanation methods. We conduct the first comprehensive evaluation of explanation methods for NLP. To this end, we design two novel evaluation paradigms that cover two important classes of NLP problems: small context and large context problems. Both paradigms require no manual annotation and are therefore broadly applicable. We also introduce LIMSSE, an explanation method inspired by LIME that is designed for NLP. We show empirically that LIMSSE, LRP and DeepLIFT are the most effective explanation methods and recommend them for explaining DNNs in NLP.", "keyphrases": ["explanation method", "hybrid document", "morphosyntactic agreement"]} +{"id": "weiss-etal-2015-structured", "title": "Structured Training for Neural Network Transition-Based Parsing", "abstract": "We present structured perceptron training for neural network transition-based dependency parsing. We learn the neural network representation using a gold corpus augmented by a large number of automatically parsed sentences. Given this fixed network representation, we learn a final layer using the structured perceptron with beam-search decoding. On the Penn Treebank, our parser reaches 94.26% unlabeled and 92.41% labeled attachment accuracy, which to our knowledge is the best accuracy on Stanford Dependencies to date. We also provide indepth ablative analysis to determine which aspects of our model provide the largest gains in accuracy.", "keyphrases": ["dependency parser", "neural network architecture", "structured learning"]} +{"id": "kuhlmann-jonsson-2015-parsing", "title": "Parsing to Noncrossing Dependency Graphs", "abstract": "We study the generalization of maximum spanning tree dependency parsing to maximum acyclic subgraphs. Because the underlying optimization problem is intractable even under an arc-factored model, we consider the restriction to noncrossing dependency graphs. Our main contribution is a cubic-time exact inference algorithm for this class. We extend this algorithm into a practical parser and evaluate its performance on four linguistic data sets used in semantic dependency parsing. We also explore a generalization of our parsing framework to dependency graphs with pagenumber at most k and show that the resulting optimization problem is NP-hard for k 2.", "keyphrases": ["generalization", "dependency parsing", "acyclic subgraph", "pagenumber"]} +{"id": "duann-huang-2015-embodiment", "title": "When Embodiment Meets Generative Lexicon: The Human Body Part Metaphors in Sinica Corpus", "abstract": "This research aims to integrate embodiment with generative lexicon. By analyzing the metaphorically used human body part terms in Sinica Corpus, the first balanced modern Chinese corpus, we reveal how these two theories complement each other. Embodiment strengthens generative lexicon by spelling out the cognitive reasons which underlies the production of meaning, and generative lexicon, specifically the qualia structure, complements embodiment by accounting for the reason underlying the selection of a particular body part for metaphorization. Discussing how the four body part terms\u2014\u8840 xie \u201cblood\u201d, \u8089 rou \u201cflesh\u201d, \u9aa8 gu \u201cbone\u201d, \u8108 mai \u201cmeridian\u201d\u2014 behave metaphorically, this research argues that the visibility and the telic role of the qualia structure are the major reasons motivating the choice of a body part to represent a comparatively abstract notion. The finding accounts for what constrains the selection of body parts for metaphorical uses. It also facilitates the prediction of the behavior of the four body part terms in these uses, which can function as the starting point to examine whether the two factors\u2014visibility and telicity\u2014also motivate the metaphorization of the rest human body parts.", "keyphrases": ["embodiment", "generative lexicon", "telic role"]} +{"id": "ji-etal-2020-dilated", "title": "Dilated Convolutional Attention Network for Medical Code Assignment from Clinical Text", "abstract": "Medical code assignment, which predicts medical codes from clinical texts, is a fundamental task of intelligent medical information systems. The emergence of deep models in natural language processing has boosted the development of automatic assignment methods. However, recent advanced neural architectures with flat convolutions or multi-channel feature concatenation ignore the sequential causal constraint within a text sequence and may not learn meaningful clinical text representations, especially for lengthy clinical notes with long-term sequential dependency. This paper proposes a Dilated Convolutional Attention Network (DCAN), integrating dilated convolutions, residual connections, and label attention, for medical code assignment. It adopts dilated convolutions to capture complex medical patterns with a receptive field which increases exponentially with dilation size. Experiments on a real-world clinical dataset empirically show that our model improves the state of the art.", "keyphrases": ["medical code assignment", "clinical text", "dilated convolution"]} +{"id": "abu-farha-magdy-2020-multitask", "title": "Multitask Learning for Arabic Offensive Language and Hate-Speech Detection", "abstract": "Offensive language and hate-speech are phenomena that spread with the rising popularity of social media. Detecting such content is crucial for understanding and predicting conflicts, understanding polarisation among communities and providing means and tools to filter or block inappropriate content. This paper describes the SMASH team submission to OSACT4's shared task on hate-speech and offensive language detection, where we explore different approaches to perform these tasks. The experiments cover a variety of approaches that include deep learning, transfer learning and multitask learning. We also explore the utilisation of sentiment information to perform the previous task. Our best model is a multitask learning architecture, based on CNN-BiLSTM, that was trained to detect hate-speech and offensive language and predict sentiment.", "keyphrases": ["offensive language", "language detection", "multitask learning", "sarcasm detection"]} +{"id": "siddharthan-2011-text", "title": "Text Simplification using Typed Dependencies: A Comparision of the Robustness of Different Generation Strategies", "abstract": "We present a framework for text simplification based on applying transformation rules to a typed dependency representation produced by the Stanford parser. We test two approaches to regeneration from typed dependencies: (a) gen-light, where the transformed dependency graphs are linearised using the word order and morphology of the original sentence, with any changes coded into the transformation rules, and (b) gen-heavy, where the Stanford dependencies are reduced to a DSyntS representation and sentences are generating formally using the RealPro surface realiser. The main contribution of this paper is to compare the robustness of these approaches in the presence of parsing errors, using both a single parse and an n-best parse setting in an overgenerate and rank approach. We find that the gen-light approach is robust to parser error, particularly in the n-best parse setting. On the other hand, parsing errors cause the realiser in the gen-heavy approach to order words and phrases in ways that are disliked by our evaluators.", "keyphrases": ["robustness", "change", "text simplification", "general purpose generator", "relative clause"]} +{"id": "bordes-etal-2014-question", "title": "Question Answering with Subgraph Embeddings", "abstract": "This paper presents a system which learns to answer questions on a broad range of topics from a knowledge base using few hand-crafted features. Our model learns low-dimensional embeddings of words and knowledge base constituents; these representations are used to score natural language questions against candidate answers. Training our system using pairs of questions and structured representations of their answers, and pairs of question paraphrases, yields competitive results on a recent benchmark of the literature.", "keyphrases": ["knowledge base constituent", "candidate answer", "other work"]} +{"id": "li-etal-2020-conditional", "title": "Conditional Augmentation for Aspect Term Extraction via Masked Sequence-to-Sequence Generation", "abstract": "Aspect term extraction aims to extract aspect terms from review texts as opinion targets for sentiment analysis. One of the big challenges with this task is the lack of sufficient annotated data. While data augmentation is potentially an effective technique to address the above issue, it is uncontrollable as it may change aspect words and aspect labels unexpectedly. In this paper, we formulate the data augmentation as a conditional generation task: generating a new sentence while preserving the original opinion targets and labels. We propose a masked sequence-to-sequence method for conditional augmentation of aspect term extraction. Unlike existing augmentation approaches, ours is controllable and allows to generate more diversified sentences. Experimental results confirm that our method alleviates the data scarcity problem significantly. It also effectively boosts the performances of several current models for aspect term extraction.", "keyphrases": ["aspect term extraction", "generation task", "conditional augmentation"]} +{"id": "troiano-etal-2019-crowdsourcing", "title": "Crowdsourcing and Validating Event-focused Emotion Corpora for German and English", "abstract": "Sentiment analysis has a range of corpora available across multiple languages. For emotion analysis, the situation is more limited, which hinders potential research on crosslingual modeling and the development of predictive models for other languages. In this paper, we fill this gap for German by constructing deISEAR, a corpus designed in analogy to the well-established English ISEAR emotion dataset. Motivated by Scherer's appraisal theory, we implement a crowdsourcing experiment which consists of two steps. In step 1, participants create descriptions of emotional events for a given emotion. In step 2, five annotators assess the emotion expressed by the texts. We show that transferring an emotion classification model from the original English ISEAR to the German crowdsourced deISEAR via machine translation does not, on average, cause a performance drop.", "keyphrases": ["emotion", "crowd-sourcing", "event description"]} +{"id": "liu-etal-2012-broad", "title": "A Broad-Coverage Normalization System for Social Media Language", "abstract": "Social media language contains huge amount and wide variety of nonstandard tokens, created both intentionally and unintentionally by the users. It is of crucial importance to normalize the noisy nonstandard tokens before applying other NLP techniques. A major challenge facing this task is the system coverage, i.e., for any user-created nonstandard term, the system should be able to restore the correct word within its top n output candidates. In this paper, we propose a cognitively-driven normalization system that integrates different human perspectives in normalizing the nonstandard tokens, including the enhanced letter transformation, visual priming, and string/phonetic similarity. The system was evaluated on both word- and message-level using four SMS and Twitter data sets. Results show that our system achieves over 90% word-coverage across all data sets (a 10% absolute increase compared to state-of-the-art); the broad word-coverage can also successfully translate into message-level performance gain, yielding 6% absolute increase compared to the best prior approach.", "keyphrases": ["normalization system", "coverage", "phonetic similarity", "social medium text"]} +{"id": "correia-martins-2019-simple", "title": "A Simple and Effective Approach to Automatic Post-Editing with Transfer Learning", "abstract": "Automatic post-editing (APE) seeks to automatically refine the output of a black-box machine translation (MT) system through human post-edits. APE systems are usually trained by complementing human post-edited data with large, artificial data generated through back-translations, a time-consuming process often no easier than training a MT system from scratch. in this paper, we propose an alternative where we fine-tune pre-trained BERT models on both the encoder and decoder of an APE system, exploring several parameter sharing strategies. By only training on a dataset of 23K sentences for 3 hours on a single GPU we obtain results that are competitive with systems that were trained on 5M artificial sentences. When we add this artificial data our method obtains state-of-the-art results.", "keyphrases": ["effective approach", "automatic post-editing", "transfer learning", "scratch", "pre-trained bert model"]} +{"id": "dreyer-eisner-2011-discovering", "title": "Discovering Morphological Paradigms from Plain Text Using a Dirichlet Process Mixture Model", "abstract": "We present an inference algorithm that organizes observed words (tokens) into structured inflectional paradigms (types). It also naturally predicts the spelling of unobserved forms that are missing from these paradigms, and discovers inflectional principles (grammar) that generalize to wholly unobserved words. \n \nOur Bayesian generative model of the data explicitly represents tokens, types, inflections, paradigms, and locally conditioned string edits. It assumes that inflected word tokens are generated from an infinite mixture of inflectional paradigms (string tuples). Each paradigm is sampled all at once from a graphical model, whose potential functions are weighted finite-state transducers with language-specific parameters to be learned. These assumptions naturally lead to an elegant empirical Bayes inference procedure that exploits Monte Carlo EM, belief propagation, and dynamic programming. Given 50--100 seed paradigms, adding a 10-million-word corpus reduces prediction error for morphological inflections by up to 10%.", "keyphrases": ["morphological paradigms", "mixture", "inflection", "generative model", "finite-state transducer"]} +{"id": "fortuna-etal-2019-hierarchically", "title": "A Hierarchically-Labeled Portuguese Hate Speech Dataset", "abstract": "Over the past years, the amount of online offensive speech has been growing steadily. To successfully cope with it, machine learning are applied. However, ML-based techniques require sufficiently large annotated datasets. In the last years, different datasets were published, mainly for English. In this paper, we present a new dataset for Portuguese, which has not been in focus so far. The dataset is composed of 5,668 tweets. For its annotation, we defined two different schemes used by annotators with different levels of expertise. Firstly, non-experts annotated the tweets with binary labels (`hate' vs. `no-hate'). Secondly, expert annotators classified the tweets following a fine-grained hierarchical multiple label scheme with 81 hate speech categories in total. The inter-annotator agreement varied from category to category, which reflects the insight that some types of hate speech are more subtle than others and that their detection depends on personal perception. This hierarchical annotation scheme is the main contribution of the presented work, as it facilitates the identification of different types of hate speech and their intersections. To demonstrate the usefulness of our dataset, we carried a baseline classification experiment with pre-trained word embeddings and LSTM on the binary classified data, with a state-of-the-art outcome.", "keyphrases": ["portuguese", "hate speech category", "hierarchical annotation scheme"]} +{"id": "martins-almeida-2014-priberam", "title": "Priberam: A Turbo Semantic Parser with Second Order Features", "abstract": "This paper presents our contribution to the SemEval-2014 shared task on BroadCoverage Semantic Dependency Parsing. We employ a feature-rich linear model, including scores for first and second-order dependencies (arcs, siblings, grandparents and co-parents). Decoding is performed in a global manner by solving a linear relaxation with alternating directions dual decomposition (AD 3 ). Our system achieved the top score in the open challenge, and the second highest score in the closed track.", "keyphrases": ["dual decomposition", "open challenge", "track", "dependency parser", "hand-crafted feature"]} +{"id": "pouget-abadie-etal-2014-overcoming", "title": "Overcoming the Curse of Sentence Length for Neural Machine Translation using Automatic Segmentation", "abstract": "The authors of (Cho et al., 2014a) have shown that the recently introduced neural network translation systems suffer from a significant drop in translation quality when translating long sentences, unlike existing phrase-based translation systems. In this paper, we propose a way to address this issue by automatically segmenting an input sentence into phrases that can be easily translated by the neural network translation model. Once each segment has been independently translated by the neural machine translation model, the translated clauses are concatenated to form a final translation. Empirical results show a significant improvement in translation quality for long sentences.", "keyphrases": ["curse", "sentence length", "segment"]} +{"id": "garcia-duran-etal-2018-learning", "title": "Learning Sequence Encoders for Temporal Knowledge Graph Completion", "abstract": "Research on link prediction in knowledge graphs has mainly focused on static multi-relational data. In this work we consider temporal knowledge graphs where relations between entities may only hold for a time interval or a specific point in time. In line with previous work on static knowledge graphs, we propose to address this problem by learning latent entity and relation type representations. To incorporate temporal information, we utilize recurrent neural networks to learn time-aware representations of relation types which can be used in conjunction with existing latent factorization methods. The proposed approach is shown to be robust to common challenges in real-world KGs: the sparsity and heterogeneity of temporal expressions. Experiments show the benefits of our approach on four temporal KGs. The data sets are available under a permissive BSD-3 license.", "keyphrases": ["temporal knowledge graph", "link prediction", "recurrent neural network", "time-aware representation", "kge model"]} +{"id": "siddhant-lipton-2018-deep", "title": "Deep Bayesian Active Learning for Natural Language Processing: Results of a Large-Scale Empirical Study", "abstract": "Several recent papers investigate Active Learning (AL) for mitigating the data dependence of deep learning for natural language processing. However, the applicability of AL to real-world problems remains an open question. While in supervised learning, practitioners can try many different methods, evaluating each against a validation set before selecting a model, AL affords no such luxury. Over the course of one AL run, an agent annotates its dataset exhausting its labeling budget. Thus, given a new task, we have no opportunity to compare models and acquisition functions. This paper provides a large-scale empirical study of deep active learning, addressing multiple tasks and, for each, multiple datasets, multiple models, and a full suite of acquisition functions. We find that across all settings, Bayesian active learning by disagreement, using uncertainty estimates provided either by Dropout or Bayes-by-Backprop significantly improves over i.i.d. baselines and usually outperforms classic uncertainty sampling.", "keyphrases": ["bayesian active learning", "large-scale empirical study", "recent paper", "dropout", "text classification"]} +{"id": "chen-etal-2018-recurrent", "title": "Recurrent Neural Networks as Weighted Language Recognizers", "abstract": "We investigate the computational complexity of various problems for simple recurrent neural networks (RNNs) as formal models for recognizing weighted languages. We focus on the single-layer, ReLU-activation, rational-weight RNNs with softmax, which are commonly used in natural language processing applications. We show that most problems for such RNNs are undecidable, including consistency, equivalence, minimization, and the determination of the highest-weighted string. However, for consistent RNNs the last problem becomes decidable, although the solution length can surpass all computable bounds. If additionally the string is limited to polynomial length, the problem becomes NP-complete. In summary, this shows that approximations and heuristic algorithms are necessary in practical applications of those RNNs.", "keyphrases": ["weighted language", "rnn", "power"]} +{"id": "dakwale-monz-2017-fine", "title": "Fine-Tuning for Neural Machine Translation with Limited Degradation across In- and Out-of-Domain Data", "abstract": "Neural machine translation is a recently proposed approach which has shown competitive results to traditional MT approaches. Similar to other neural network based methods, NMT also suffers from low performance for the domains with less available training data. Domain adaptation deals with improving performance of a model trained on large general domain data over test instances from a new domain. Fine-tuning is a fast and simple domain adaptation method which has demonstrated substantial improvements for various neural network based tasks including NMT. However, it suffers from drastic performance degradation on the general or source domain test sentences, which is undesirable in real-time applications. To address this problem of drastic degradation, in this paper, we propose two simple modi\ufb01cations to the \ufb01ne-tuning approach, namely multi-objective learning and multi-output learning which are based on the \u201cKnowledge distillation\u201d framework. Experiments on English-German translations demonstrate that our approaches achieve results comparable to simple \ufb01ne-tuning on the target domain task with comparatively little loss on the general domain task.", "keyphrases": ["neural machine translation", "new domain", "fine-tuned model", "continued training"]} +{"id": "yuan-etal-2016-semi", "title": "Semi-supervised Word Sense Disambiguation with Neural Models", "abstract": "Determining the intended sense of words in text \u2013 word sense disambiguation (WSD) \u2013 is a long-standing problem in natural language processing. Recently, researchers have shown promising results using word vectors extracted from a neural network language model as features in WSD algorithms. However, a simple average or concatenation of word vectors for each word in a text loses the sequential and syntactic information of the text. In this paper, we study WSD with a sequence learning neural net, LSTM, to better capture the sequential and syntactic patterns of the text. To alleviate the lack of training data in all-words WSD, we employ the same LSTM in a semi-supervised label propagation classifier. We demonstrate state-of-the-art results, especially on verbs.", "keyphrases": ["word sense disambiguation", "wsd", "semi-supervised learning", "lstm language model"]} +{"id": "wu-wang-2009-revisiting", "title": "Revisiting Pivot Language Approach for Machine Translation", "abstract": "This paper revisits the pivot language approach for machine translation. First, we investigate three different methods for pivot translation. Then we employ a hybrid method combining RBMT and SMT systems to fill up the data gap for pivot translation, where the source-pivot and pivot-target corpora are independent. Experimental results on spoken language translation show that this hybrid method significantly improves the translation quality, which outperforms the method using a source-target corpus of the same size. In addition, we propose a system combination approach to select better translations from those produced by various pivot translation methods. This method regards system combination as a translation evaluation problem and formalizes it with a regression learning model. Experimental results indicate that our method achieves consistent and significant improvement over individual translation outputs.", "keyphrases": ["pivot language approach", "machine translation", "bridging", "many researcher"]} +{"id": "liu-etal-2019-hierarchical", "title": "Hierarchical Pointer Net Parsing", "abstract": "Transition-based top-down parsing with pointer networks has achieved state-of-the-art results in multiple parsing tasks, while having a linear time complexity. However, the decoder of these parsers has a sequential structure, which does not yield the most appropriate inductive bias for deriving tree structures. In this paper, we propose hierarchical pointer network parsers, and apply them to dependency and sentence-level discourse parsing tasks. Our results on standard benchmark datasets demonstrate the effectiveness of our approach, outperforming existing methods and setting a new state-of-the-art.", "keyphrases": ["pointer network", "tree structure", "sentence-level discourse parsing"]} +{"id": "ferreira-freitas-2020-premise", "title": "Premise Selection in Natural Language Mathematical Texts", "abstract": "The discovery of supporting evidence for addressing complex mathematical problems is a semantically challenging task, which is still unexplored in the field of natural language processing for mathematical text. The natural language premise selection task consists in using conjectures written in both natural language and mathematical formulae to recommend premises that most likely will be useful to prove a particular statement. We propose an approach to solve this task as a link prediction problem, using Deep Convolutional Graph Neural Networks. This paper also analyses how different baselines perform in this task and shows that a graph structure can provide higher F1-score, especially when considering multi-hop premise selection.", "keyphrases": ["mathematical text", "link prediction problem", "premise selection", "natural language term"]} +{"id": "lehman-etal-2019-inferring", "title": "Inferring Which Medical Treatments Work from Reports of Clinical Trials", "abstract": "How do we know if a particular medical treatment actually works? Ideally one would consult all available evidence from relevant clinical trials. Unfortunately, such results are primarily disseminated in natural language scientific articles, imposing substantial burden on those trying to make sense of them. In this paper, we present a new task and corpus for making this unstructured published scientific evidence actionable. The task entails inferring reported findings from a full-text article describing randomized controlled trials (RCT) with respect to a given intervention, comparator, and outcome of interest, e.g., inferring if a given article provides evidence supporting the use of aspirin to reduce risk of stroke, as compared to placebo. We present a new corpus for this task comprising 10,000+ prompts coupled with full-text articles describing RCTs. Results using a suite of baseline models \u2014 ranging from heuristic (rule-based) approaches to attentive neural architectures \u2014 demonstrate the difficulty of the task, which we believe largely owes to the lengthy, technical input texts. To facilitate further work on this important, challenging problem we make the corpus, documentation, a website and leaderboard, and all source code for baselines and evaluation publicly available.", "keyphrases": ["clinical trial", "full-text article", "intervention", "outcome", "evidence inference dataset"]} +{"id": "s-etal-2015-solving", "title": "Solving Data Sparsity by Morphology Injection in Factored SMT", "abstract": "SMT approaches face the problem of data sparsity while translating into a morphologically rich language. It is very unlikely for a parallel corpus to contain all morphological forms of words. We propose a solution to generate these unseen morphological forms and inject them into original training corpora. We observe that morphology injection improves the quality of translation in terms of both adequacy and fluency. We verify this with the experiments on two morphologically rich languages: Hindi and Marathi, while translating from English.", "keyphrases": ["data sparsity", "morphology injection", "ongoing attempt"]} +{"id": "fu-etal-2019-asking", "title": "Asking the Right Question: Inferring Advice-Seeking Intentions from Personal Narratives", "abstract": "People often share personal narratives in order to seek advice from others. To properly infer the narrator's intention, one needs to apply a certain degree of common sense and social intuition. To test the capabilities of NLP systems to recover such intuition, we introduce the new task of inferring what is the advice-seeking goal behind a personal narrative. We formulate this as a cloze test, where the goal is to identify which of two advice-seeking questions was removed from a given narrative. The main challenge in constructing this task is finding pairs of semantically plausible advice-seeking questions for given narratives. To address this challenge, we devise a method that exploits commonalities in experiences people share online to automatically extract pairs of questions that are appropriate candidates for the cloze task. This results in a dataset of over 20,000 personal narratives, each matched with a pair of related advice-seeking questions: one actually intended by the narrator, and the other one not. The dataset covers a very broad array of human experiences, from dating, to career options, to stolen iPads. We use human annotation to determine the degree to which the task relies on common sense and social intuition in addition to a semantic understanding of the narrative. By introducing several baselines for this new task we demonstrate its feasibility and identify avenues for better modeling the intention of the narrator.", "keyphrases": ["intention", "narrative", "advice"]} +{"id": "gao-etal-2021-abcd", "title": "ABCD: A Graph Framework to Convert Complex Sentences to a Covering Set of Simple Sentences", "abstract": "Atomic clauses are fundamental text units for understanding complex sentences. Identifying the atomic sentences within complex sentences is important for applications such as summarization, argument mining, discourse analysis, discourse parsing, and question answering. Previous work mainly relies on rule-based methods dependent on parsing. We propose a new task to decompose each complex sentence into simple sentences derived from the tensed clauses in the source, and a novel problem formulation as a graph edit task. Our neural model learns to Accept, Break, Copy or Drop elements of a graph that combines word adjacency and grammatical dependencies. The full processing pipeline includes modules for graph construction, graph editing, and sentence generation from the output graph. We introduce DeSSE, a new dataset designed to train and evaluate complex sentence decomposition, and MinWiki, a subset of MinWikiSplit. ABCD achieves comparable performance as two parsing baselines on MinWiki. On DeSSE, which has a more even balance of complex sentence types, our model achieves higher accuracy on the number of atomic sentences than an encoder-decoder baseline. Results include a detailed error analysis.", "keyphrases": ["simple sentence", "subset", "abcd"]} +{"id": "preotiuc-pietro-etal-2019-automatically", "title": "Automatically Identifying Complaints in Social Media", "abstract": "Complaining is a basic speech act regularly used in human and computer mediated communication to express a negative mismatch between reality and expectations in a particular situation. Automatically identifying complaints in social media is of utmost importance for organizations or brands to improve the customer experience or in developing dialogue systems for handling and responding to complaints. In this paper, we introduce the first systematic analysis of complaints in computational linguistics. We collect a new annotated data set of written complaints expressed on Twitter. We present an extensive linguistic analysis of complaining as a speech act in social media and train strong feature-based and neural models of complaints across nine domains achieving a predictive performance of up to 79 F1 using distant supervision.", "keyphrases": ["complaint", "computational linguistic", "social medium"]} +{"id": "deneefe-etal-2007-syntax", "title": "What Can Syntax-Based MT Learn from Phrase-Based MT?", "abstract": "We compare and contrast the strengths and weaknesses of a syntax-based machine translation model with a phrase-based machine translation model on several levels. We briefly describe each model, highlighting points where they differ. We include a quantitative comparison of the phrase pairs that each model has to work with, as well as the reasons why some phrase pairs are not learned by the syntax-based model. We then evaluate proposed improvements to the syntax-based extraction techniques in light of phrase pairs captured. We also compare the translation accuracy for all variations.", "keyphrases": ["translation rule", "string-to-tree model", "coverage"]} +{"id": "meng-etal-2021-gemnet", "title": "GEMNET: Effective Gated Gazetteer Representations for Recognizing Complex Entities in Low-context Input", "abstract": "Named Entity Recognition (NER) remains difficult in real-world settings; current challenges include short texts (low context), emerging entities, and complex entities (e.g. movie names). Gazetteer features can help, but results have been mixed due to challenges with adding extra features, and a lack of realistic evaluation data. It has been shown that including gazetteer features can cause models to overuse or underuse them, leading to poor generalization. We propose GEMNET, a novel approach for gazetteer knowledge integration, including (1) a flexible Contextual Gazetteer Representation (CGR) encoder that can be fused with any word-level model; and (2) a Mixture-of- Experts gating network that overcomes the feature overuse issue by learning to conditionally combine the context and gazetteer features, instead of assigning them fixed weights. To comprehensively evaluate our approaches, we create 3 large NER datasets (24M tokens) reflecting current challenges. In an uncased setting, our methods show large gains (up to +49% F1) in recognizing difficult entities compared to existing baselines. On standard benchmarks, we achieve a new uncased SOTA on CoNLL03 and WNUT17.", "keyphrases": ["complex entity", "gazetteer feature", "word-level model", "gemnet", "complex ner task"]} +{"id": "lin-etal-2021-rockner", "title": "RockNER: A Simple Method to Create Adversarial Examples for Evaluating the Robustness of Named Entity Recognition Models", "abstract": "To audit the robustness of named entity recognition (NER) models, we propose RockNER, a simple yet effective method to create natural adversarial examples. Specifically, at the entity level, we replace target entities with other entities of the same semantic class in Wikidata; at the context level, we use pre-trained language models (e.g., BERT) to generate word substitutions. Together, the two levels of at- tack produce natural adversarial examples that result in a shifted distribution from the training data on which our target models have been trained. We apply the proposed method to the OntoNotes dataset and create a new benchmark named OntoRock for evaluating the robustness of existing NER models via a systematic evaluation protocol. Our experiments and analysis reveal that even the best model has a significant performance drop, and these models seem to memorize in-domain entity patterns instead of reasoning from the context. Our work also studies the effects of a few simple data augmentation methods to improve the robustness of NER models.", "keyphrases": ["robustness", "wikidata", "ner model"]} +{"id": "paul-frank-2019-ranking", "title": "Ranking and Selecting Multi-Hop Knowledge Paths to Better Predict Human Needs", "abstract": "To make machines better understand sentiments, research needs to move from polarity identification to understanding the reasons that underlie the expression of sentiment. Categorizing the goals or needs of humans is one way to explain the expression of sentiment in text. Humans are good at understanding situations described in natural language and can easily connect them to the character's psychological needs using commonsense knowledge. We present a novel method to extract, rank, filter and select multi-hop relation paths from a commonsense knowledge resource to interpret the expression of sentiment in terms of their underlying human needs. We efficiently integrate the acquired knowledge paths in a neural model that interfaces context representations with knowledge using a gated attention mechanism. We assess the model's performance on a recently published dataset for categorizing human needs. Selectively integrating knowledge paths boosts performance and establishes a new state-of-the-art. Our model offers interpretability through the learned attention map over commonsense knowledge paths. Human evaluation highlights the relevance of the encoded knowledge.", "keyphrases": ["human need", "commonsense knowledge", "relevance"]} +{"id": "vajjala-lucic-2018-onestopenglish", "title": "OneStopEnglish corpus: A new corpus for automatic readability assessment and text simplification", "abstract": "This paper describes the collection and compilation of the OneStopEnglish corpus of texts written at three reading levels, and demonstrates its usefulness for through two applications - automatic readability assessment and automatic text simplification. The corpus consists of 189 texts, each in three versions (567 in total). The corpus is now freely available under a CC by-SA 4.0 license and we hope that it would foster further research on the topics of readability assessment and text simplification.", "keyphrases": ["automatic readability assessment", "text simplification", "educational nlp", "agreement"]} +{"id": "lai-etal-2020-event", "title": "Event Detection: Gate Diversity and Syntactic Importance Scores for Graph Convolution Neural Networks", "abstract": "Recent studies on event detection (ED) have shown that the syntactic dependency graph can be employed in graph convolution neural networks (GCN) to achieve state-of-the-art performance. However, the computation of the hidden vectors in such graph-based models is agnostic to the trigger candidate words, potentially leaving irrelevant information for the trigger candidate for event prediction. In addition, the current models for ED fail to exploit the overall contextual importance scores of the words, which can be obtained via the dependency tree, to boost the performance. In this study, we propose a novel gating mechanism to filter noisy information in the hidden vectors of the GCN models for ED based on the information from the trigger candidate. We also introduce novel mechanisms to achieve the contextual diversity for the gates and the importance score consistency for the graphs and models in ED. The experiments show that the proposed model achieves state-of-the-art performance on two ED datasets.", "keyphrases": ["graph-based model", "dependency tree", "event detection"]} +{"id": "spithourakis-etal-2016-numerically", "title": "Numerically Grounded Language Models for Semantic Error Correction", "abstract": "Semantic error detection and correction is an important task for applications such as fact checking, speech-to-text or grammatical error correction. Current approaches generally focus on relatively shallow semantics and do not account for numeric quantities. Our approach uses language models grounded in numbers within the text. Such groundings are easily achieved for recurrent neural language model architectures, which can be further conditioned on incomplete background knowledge bases. Our evaluation on clinical reports shows that numerical grounding improves perplexity by 33% and F1 for semantic error correction by 5 points when compared to ungrounded approaches. Conditioning on a knowledge base yields further improvements.", "keyphrases": ["language model", "semantic error correction", "knowledge base", "report"]} +{"id": "rozovskaya-roth-2011-algorithm", "title": "Algorithm Selection and Model Adaptation for ESL Correction Tasks", "abstract": "We consider the problem of correcting errors made by English as a Second Language (ESL) writers and address two issues that are essential to making progress in ESL error correction - algorithm selection and model adaptation to the first language of the ESL learner. \n \nA variety of learning algorithms have been applied to correct ESL mistakes, but often comparisons were made between incomparable data sets. We conduct an extensive, fair comparison of four popular learning methods for the task, reversing conclusions from earlier evaluations. Our results hold for different training sets, genres, and feature sets. \n \nA second key issue in ESL error correction is the adaptation of a model to the first language of the writer. Errors made by non-native speakers exhibit certain regularities and, as we show, models perform much better when they use knowledge about error patterns of the non-native writers. We propose a novel way to adapt a learned algorithm to the first language of the writer that is both cheaper to implement and performs better than other adaptation methods.", "keyphrases": ["model adaptation", "correction", "determiner", "na\u0131\u0308ve bayes classifier"]} +{"id": "kuznetsova-etal-2012-collective", "title": "Collective Generation of Natural Image Descriptions", "abstract": "We present a holistic data-driven approach to image description generation, exploiting the vast amount of (noisy) parallel image data and associated natural language descriptions available on the web. More specifically, given a query image, we retrieve existing human-composed phrases used to describe visually similar images, then selectively combine those phrases to generate a novel description for the query image. We cast the generation process as constraint optimization problems, collectively incorporating multiple interconnected aspects of language composition for content planning, surface realization and discourse structure. Evaluation by human annotators indicates that our final system generates more semantically correct and linguistically appealing descriptions than two nontrivial baselines.", "keyphrases": ["image", "data-driven approach", "caption", "ilp"]} +{"id": "ke-etal-2018-generating", "title": "Generating Informative Responses with Controlled Sentence Function", "abstract": "Sentence function is a significant factor to achieve the purpose of the speaker, which, however, has not been touched in large-scale conversation generation so far. In this paper, we present a model to generate informative responses with controlled sentence function. Our model utilizes a continuous latent variable to capture various word patterns that realize the expected sentence function, and introduces a type controller to deal with the compatibility of controlling sentence function and generating informative content. Conditioned on the latent variable, the type controller determines the type (i.e., function-related, topic, and ordinary word) of a word to be generated at each decoding position. Experiments show that our model outperforms state-of-the-art baselines, and it has the ability to generate responses with both controlled sentence function and informative content.", "keyphrases": ["sentence function", "compatibility", "informative content", "conditional variational autoencoder"]} +{"id": "ruokolainen-etal-2014-painless", "title": "Painless Semi-Supervised Morphological Segmentation using Conditional Random Fields", "abstract": "We discuss data-driven morphological segmentation, in which word forms are segmented into morphs, that is the surface forms of morphemes. We extend a recent segmentation approach based on conditional random fields from purely supervised to semi-supervised learning by exploiting available unsupervised segmentation techniques. We integrate the unsupervised techniques into the conditional random field model via feature set augmentation. Experiments on three diverse languages show that this straightforward semi-supervised extension greatly improves the segmentation accuracy of the purely supervised CRFs in a computationally efficient manner.", "keyphrases": ["segmentation", "random field", "augmentation", "crf-based model"]} +{"id": "lawrence-riezler-2018-improving", "title": "Improving a Neural Semantic Parser by Counterfactual Learning from Human Bandit Feedback", "abstract": "Counterfactual learning from human bandit feedback describes a scenario where user feedback on the quality of outputs of a historic system is logged and used to improve a target system. We show how to apply this learning framework to neural semantic parsing. From a machine learning perspective, the key challenge lies in a proper reweighting of the estimator so as to avoid known degeneracies in counterfactual learning, while still being applicable to stochastic gradient optimization. To conduct experiments with human users, we devise an easy-to-use interface to collect human feedback on semantic parses. Our work is the first to show that semantic parsers can be improved significantly by counterfactual learning from logged human feedback data.", "keyphrases": ["counterfactual learning", "feedback", "parse"]} +{"id": "tran-etal-2018-parsing", "title": "Parsing Speech: a Neural Approach to Integrating Lexical and Acoustic-Prosodic Information", "abstract": "In conversational speech, the acoustic signal provides cues that help listeners disambiguate difficult parses. For automatically parsing spoken utterances, we introduce a model that integrates transcribed text and acoustic-prosodic features using a convolutional neural network over energy and pitch trajectories coupled with an attention-based recurrent neural network that accepts text and prosodic features. We find that different types of acoustic-prosodic features are individually helpful, and together give statistically significant improvements in parse and disfluency detection F1 scores over a strong text-only baseline. For this study with known sentence boundaries, error analyses show that the main benefit of acoustic-prosodic features is in sentences with disfluencies, attachment decisions are most improved, and transcription errors obscure gains from prosody.", "keyphrases": ["energy", "disfluency", "prosodic cue"]} +{"id": "singh-etal-2019-bert", "title": "BERT is Not an Interlingua and the Bias of Tokenization", "abstract": "Multilingual transfer learning can benefit both high- and low-resource languages, but the source of these improvements is not well understood. Cananical Correlation Analysis (CCA) of the internal representations of a pre- trained, multilingual BERT model reveals that the model partitions representations for each language rather than using a common, shared, interlingual space. This effect is magnified at deeper layers, suggesting that the model does not progressively abstract semantic con- tent while disregarding languages. Hierarchical clustering based on the CCA similarity scores between languages reveals a tree structure that mirrors the phylogenetic trees hand- designed by linguists. The subword tokenization employed by BERT provides a stronger bias towards such structure than character- and word-level tokenizations. We release a subset of the XNLI dataset translated into an additional 14 languages at to assist further research into multilingual representations.", "keyphrases": ["tokenization", "multilinguality", "bert"]} +{"id": "hermann-blunsom-2014-multilingual", "title": "Multilingual Models for Compositional Distributed Semantics", "abstract": "We present a novel technique for learning semantic representations, which extends the distributional hypothesis to multilingual data and joint-space embeddings. Our models leverage parallel data and learn to strongly align the embeddings of semantically equivalent sentences, while maintaining sufficient distance between those of dissimilar sentences. The models do not rely on word alignments or any syntactic information and are successfully applied to a number of diverse languages. We extend our approach to learn semantic representations at the document level, too. We evaluate these models on two cross-lingual document classification tasks, outperforming the prior state of the art. Through qualitative analysis and the study of pivoting effects we demonstrate that our representations are semantically plausible and can capture semantic relationships across languages without parallel data.", "keyphrases": ["parallel data", "document classification task", "word embedding", "different language", "objective"]} +{"id": "coope-etal-2020-span", "title": "Span-ConveRT: Few-shot Span Extraction for Dialog with Pretrained Conversational Representations", "abstract": "We introduce Span-ConveRT, a light-weight model for dialog slot-filling which frames the task as a turn-based span extraction task. This formulation allows for a simple integration of conversational knowledge coded in large pretrained conversational models such as ConveRT (Henderson et al., 2019). We show that leveraging such knowledge in Span-ConveRT is especially useful for few-shot learning scenarios: we report consistent gains over 1) a span extractor that trains representations from scratch in the target domain, and 2) a BERT-based span extractor. In order to inspire more work on span extraction for the slot-filling task, we also release RESTAURANTS-8K, a new challenging data set of 8,198 utterances, compiled from actual conversations in the restaurant booking domain.", "keyphrases": ["span extraction", "dialog", "few-shot setting"]} +{"id": "yang-mitchell-2017-joint", "title": "A Joint Sequential and Relational Model for Frame-Semantic Parsing", "abstract": "We introduce a new method for frame-semantic parsing that significantly improves the prior state of the art. Our model leverages the advantages of a deep bidirectional LSTM network which predicts semantic role labels word by word and a relational network which predicts semantic roles for individual text expressions in relation to a predicate. The two networks are integrated into a single model via knowledge distillation, and a unified graphical model is employed to jointly decode frames and semantic roles during inference. Experiments on the standard FrameNet data show that our model significantly outperforms existing neural and non-neural approaches, achieving a 5.7 F1 gain over the current state of the art, for full frame structure extraction.", "keyphrases": ["frame-semantic parsing", "relational network", "predicate", "frame", "full-text annotation"]} +{"id": "ishiwatari-etal-2019-learning", "title": "Learning to Describe Unknown Phrases with Local and Global Contexts", "abstract": "When reading a text, it is common to become stuck on unfamiliar words and phrases, such as polysemous words with novel senses, rarely used idioms, internet slang, or emerging entities. If we humans cannot figure out the meaning of those expressions from the immediate local context, we consult dictionaries for definitions or search documents or the web to find other global context to help in interpretation. Can machines help us do this work? Which type of context is more important for machines to solve the problem? To answer these questions, we undertake a task of describing a given phrase in natural language based on its local and global contexts. To solve this task, we propose a neural description model that consists of two context encoders and a description decoder. In contrast to the existing methods for non-standard English explanation [Ni+ 2017] and definition generation [Noraset+ 2017; Gadetsky+ 2018], our model appropriately takes important clues from both local and global contexts. Experimental results on three existing datasets (including WordNet, Oxford and Urban Dictionaries) and a dataset newly created from Wikipedia demonstrate the effectiveness of our method over previous work.", "keyphrases": ["local context", "definition", "wikipedia"]} +{"id": "venugopal-etal-2008-wider", "title": "Wider Pipelines: N-Best Alignments and Parses in MT Training", "abstract": "State-of-the-art statistical machine translation systems use hypotheses from several maximum a posteriori inference steps, including word alignments and parse trees, to identify translational structure and estimate the parameters of translation models. While this approach leads to a modular pipeline of independently developed components, errors made in these \u201csingle-best\u201d hypotheses can propagate to downstream estimation steps that treat these inputs as clean, trustworthy training data. In this work we integrate N-best alignments and parses by using a probability distribution over these alternatives to generate posterior fractional counts for use in downstream estimation. Using these fractional counts in a DOP-inspired syntax-based translation system, we show significant improvements in translation quality over a single-best trained baseline.", "keyphrases": ["n-best alignment", "translation model", "posterior fractional count", "hypothesis"]} +{"id": "yangarber-2003-counter", "title": "Counter-Training in Discovery of Semantic Patterns", "abstract": "This paper presents a method for unsupervised discovery of semantic patterns. Semantic patterns are useful for a variety of text understanding tasks, in particular for locating events in text for information extraction. The method builds upon previously described approaches to iterative unsupervised pattern acquisition. One common characteristic of prior approaches is that the output of the algorithm is a continuous stream of patterns, with gradually degrading precision.Our method differs from the previous pattern acquisition algorithms in that it introduces competition among several scenarios simultaneously. This provides natural stopping criteria for the unsupervised learners, while maintaining good precision levels at termination. We discuss the results of experiments with several scenarios, and examine different aspects of the new procedure.", "keyphrases": ["discovery", "information extraction", "negative example"]} +{"id": "kerrigan-etal-2020-differentially", "title": "Differentially Private Language Models Benefit from Public Pre-training", "abstract": "Language modeling is a keystone task in natural language processing. When training a language model on sensitive information, differential privacy (DP) allows us to quantify the degree to which our private data is protected. However, training algorithms which enforce differential privacy often lead to degradation in model quality. We study the feasibility of learning a language model which is simultaneously high-quality and privacy preserving by tuning a public base model on a private corpus. We find that DP fine-tuning boosts the performance of language models in the private domain, making the training of such models possible.", "keyphrases": ["language model", "private data", "recent system"]} +{"id": "morio-fujita-2018-end", "title": "End-to-End Argument Mining for Discussion Threads Based on Parallel Constrained Pointer Architecture", "abstract": "Argument Mining (AM) is a relatively recent discipline, which concentrates on extracting claims or premises from discourses, and inferring their structures. However, many existing works do not consider micro-level AM studies on discussion threads sufficiently. In this paper, we tackle AM for discussion threads. Our main contributions are follows: (1) A novel combination scheme focusing on micro-level inner- and inter- post schemes for a discussion thread. (2) Annotation of large-scale civic discussion threads with the scheme. (3) Parallel constrained pointer architecture (PCPA), a novel end-to-end technique to discriminate sentence types, inner-post relations, and inter-post interactions simultaneously. The experimental results demonstrate that our proposed model shows better accuracy in terms of relations extraction, in comparison to existing state-of-the-art models.", "keyphrases": ["argument mining", "discussion thread", "constrained pointer architecture"]} +{"id": "levinboim-etal-2015-model", "title": "Model Invertibility Regularization: Sequence Alignment With or Without Parallel Data", "abstract": "We present Model Invertibility Regularization (MIR), a method that jointly trains two directional sequence alignment models, one in each direction, and takes into account the invertibility of the alignment task. By coupling the two models through their parameters (as opposed to through their inferences, as in Liang et al.\u2019s Alignment by Agreement (ABA), and Ganchev et al.\u2019s Posterior Regularization (PostCAT)), our method seamlessly extends to all IBMstyle word alignment models as well as to alignment without parallel data. Our proposed algorithm is mathematically sound and inherits convergence guarantees from EM. We evaluate MIR on two tasks: (1) On word alignment, applying MIR on fertility based models we attain higher F-scores than ABA and PostCAT. (2) On Japanese-to-English backtransliteration without parallel data, applied to the decipherment model of Ravi and Knight, MIR learns sparser models that close the gap in whole-name error rate by 33% relative to a model trained on parallel data, and further, beats a previous approach by Mylonakis et al.", "keyphrases": ["parallel data", "sequence alignment model", "agreement", "model invertibility regularization"]} +{"id": "chen-etal-2018-accurate", "title": "Accurate SHRG-Based Semantic Parsing", "abstract": "We demonstrate that an SHRG-based parser can produce semantic graphs much more accurately than previously shown, by relating synchronous production rules to the syntacto-semantic composition process. Our parser achieves an accuracy of 90.35 for EDS (89.51 for DMRS) in terms of elementary dependency match, which is a 4.87 (5.45) point improvement over the best existing data-driven model, indicating, in our view, the importance of linguistically-informed derivation for data-driven semantic parsing. This accuracy is equivalent to that of English Resource Grammar guided models, suggesting that (recurrent) neural network models are able to effectively learn deep linguistic knowledge from annotations.", "keyphrases": ["semantic parsing", "composition process", "eds", "shrg"]} +{"id": "abend-etal-2015-lexical", "title": "Lexical Event Ordering with an Edge-Factored Model", "abstract": "Extensive lexical knowledge is necessary for temporal analysis and planning tasks. We address in this paper a lexical setting that allows for the straightforward incorporation of rich features and structural constraints. We explore a lexical event ordering task, namely determining the likely temporal order of events based solely on the identity of their predicates and arguments. We propose an \u201cedgefactored\u201d model for the task that decomposes over the edges of the event graph. We learn it using the structured perceptron. As lexical tasks require large amounts of text, we do not attempt manual annotation and instead use the textual order of events in a domain where this order is aligned with their temporal order, namely cooking recipes.", "keyphrases": ["edge-factored model", "temporal order", "identity", "lexical event"]} +{"id": "scansani-dugast-2021-glossary", "title": "Glossary functionality in commercial machine translation: does it help? A first step to identify best practices for a language service provider", "abstract": "Recently, a number of commercial Machine Translation (MT) providers have started to offer glossary features allowing users to enforce terminology into the output of a generic model. However, to the best of our knowledge it is not clear how such features would impact terminology accuracy and the overall quality of the output. The present contribution aims at providing a first insight into the performance of the glossary-enhanced generic models offered by four providers. Our tests involve two different domains and language pairs, i.e. Sportswear En\u2013Fr and Industrial Equipment De\u2013En. The output of each generic model and of the glossaryenhanced one will be evaluated relying on Translation Error Rate (TER) to take into account the overall output quality and on accuracy to assess the compliance with the glossary. This is followed by a manual evaluation. The present contribution mainly focuses on understanding how these glossary features can be fruitfully exploited by language service providers (LSPs), especially in a scenario in which a customer glossary is already available and is added to the generic model as is.", "keyphrases": ["commercial machine translation", "language service provider", "output quality", "glossary"]} +{"id": "haouari-etal-2021-arcov", "title": "ArCOV-19: The First Arabic COVID-19 Twitter Dataset with Propagation Networks", "abstract": "In this paper, we present ArCOV-19, an Arabic COVID-19 Twitter dataset that spans one year, covering the period from 27th of January 2020 till 31st of January 2021. ArCOV-19 is the first publicly-available Arabic Twitter dataset covering COVID-19 pandemic that includes about 2.7M tweets alongside the propagation networks of the most-popular subset of them (i.e., most-retweeted and -liked). The propagation networks include both retweetsand conversational threads (i.e., threads of replies). ArCOV-19 is designed to enable research under several domains including natural language processing, information retrieval, and social computing. Preliminary analysis shows that ArCOV-19 captures rising discussions associated with the first reported cases of the disease as they appeared in the Arab world. In addition to the source tweets and the propagation networks, we also release the search queries and the language-independent crawler used to collect the tweets to encourage the curation of similar datasets.", "keyphrases": ["twitter", "propagation networks", "arcov-19"]} +{"id": "zhan-etal-2021-scope", "title": "Out-of-Scope Intent Detection with Self-Supervision and Discriminative Training", "abstract": "Out-of-scope intent detection is of practical importance in task-oriented dialogue systems. Since the distribution of outlier utterances is arbitrary and unknown in the training stage, existing methods commonly rely on strong assumptions on data distribution such as mixture of Gaussians to make inference, resulting in either complex multi-step training procedures or hand-crafted rules such as confidence threshold selection for outlier detection. In this paper, we propose a simple yet effective method to train an out-of-scope intent classifier in a fully end-to-end manner by simulating the test scenario in training, which requires no assumption on data distribution and no additional post-processing or threshold setting. Specifically, we construct a set of pseudo outliers in the training stage, by generating synthetic outliers using inliner features via self-supervision and sampling out-of-scope sentences from easily available open-domain datasets. The pseudo outliers are used to train a discriminative classifier that can be directly applied to and generalize well on the test task. We evaluate our method extensively on four benchmark dialogue datasets and observe significant improvements over state-of-the-art approaches. Our code has been released at .", "keyphrases": ["detection", "self-supervision", "out-of-scope"]} +{"id": "ziering-van-der-plas-2016-towards", "title": "Towards Unsupervised and Language-independent Compound Splitting using Inflectional Morphological Transformations", "abstract": "In this paper, we address the task of languageindependent, knowledge-lean and unsupervised compound splitting, which is an essential component for many natural language processing tasks such as machine translation. Previous methods on statistical compound splitting either include language-specific knowledge (e.g., linking elements) or rely on parallel data, which results in limited applicability. We aim to overcome these limitations by learning compounding morphology from inflectional information derived from lemmatized monolingual corpora. In experiments for Germanic languages, we show that our approach significantly outperforms language-dependent stateof-the-art methods in finding the correct split point and that word inflection is a good approximation for compounding morphology.", "keyphrases": ["inflectional information", "monolingual corpora", "van der"]} +{"id": "huang-etal-2021-efficient", "title": "Efficient Attentions for Long Document Summarization", "abstract": "The quadratic computational and memory complexities of large Transformers have limited their scalability for long document summarization. In this paper, we propose Hepos, a novel efficient encoder-decoder attention with head-wise positional strides to effectively pinpoint salient information from the source. We further conduct a systematic study of existing efficient self-attentions. Combined with Hepos, we are able to process ten times more tokens than existing models that use full attentions. For evaluation, we present a new dataset, GovReport, with significantly longer documents and summaries. Results show that our models produce significantly higher ROUGE scores than competitive comparisons, including new state-of-the-art results on PubMed. Human evaluation also shows that our models generate more informative summaries with fewer unfaithful errors.", "keyphrases": ["long document summarization", "memory complexity", "scalability", "encoder-decoder attention", "new dataset"]} +{"id": "chen-huang-2016-semi", "title": "Semi-supervised Convolutional Networks for Translation Adaptation with Tiny Amount of In-domain Data", "abstract": "In this paper, we propose a method which uses semi-supervised convolutional neural networks (CNNs) to select in-domain training data for statistical machine translation. This approach is particularly effective when only tiny amounts of in-domain data are available. The in-domain data and randomly sampled general-domain data are used to train a data selection model with semi-supervised CNN, then this model computes domain relevance scores for all the sentences in the generaldomain data set. The sentence pairs with top scores are selected to train the system. We carry out experiments on 4 language directions with three test domains. Compared with strong baseline systems trained with large amount of data, this method can improve the performance up to 3.1 BLEU. Its performances are significant better than three state-of-the-art language model based data selection methods. We also show that the in-domain data used to train the selection model could be as fewas 100sentences, whichmakesfinegrained topic-dependent translation adaptation possible.", "keyphrases": ["translation adaptation", "in-domain data", "convolutional neural network", "cnns", "baseline system"]} +{"id": "liu-etal-2018-jointly", "title": "Jointly Multiple Events Extraction via Attention-based Graph Information Aggregation", "abstract": "Event extraction is of practical utility in natural language processing. In the real world, it is a common phenomenon that multiple events existing in the same sentence, where extracting them are more difficult than extracting a single event. Previous works on modeling the associations between events by sequential modeling methods suffer a lot from the low efficiency in capturing very long-range dependencies. In this paper, we propose a novel Jointly Multiple Events Extraction (JMEE) framework to jointly extract multiple event triggers and arguments by introducing syntactic shortcut arcs to enhance information flow and attention-based graph convolution networks to model graph information. The experiment results demonstrate that our proposed framework achieves competitive results compared with state-of-the-art methods.", "keyphrases": ["multiple events extraction", "trigger", "graph convolution network", "dependency tree", "attention-based gcn"]} +{"id": "potash-rumshisky-2017-towards", "title": "Towards Debate Automation: a Recurrent Model for Predicting Debate Winners", "abstract": "In this paper we introduce a practical first step towards the creation of an automated debate agent: a state-of-the-art recurrent predictive model for predicting debate winners. By having an accurate predictive model, we are able to objectively rate the quality of a statement made at a specific turn in a debate. The model is based on a recurrent neural network architecture with attention, which allows the model to effectively account for the entire debate when making its prediction. Our model achieves state-of-the-art accuracy on a dataset of debate transcripts annotated with audience favorability of the debate teams. Finally, we discuss how future work can leverage our proposed model for the creation of an automated debate agent. We accomplish this by determining the model input that will maximize audience favorability toward a given side of a debate at an arbitrary turn.", "keyphrases": ["debate", "winner", "audience favorability"]} +{"id": "magdy-darwish-2006-arabic", "title": "Arabic OCR Error Correction Using Character Segment Correction, Language Modeling, and Shallow Morphology", "abstract": "This paper explores the use of a character segment based character correction model, language modeling, and shallow morphology for Arabic OCR error correction. Experimentation shows that character segment based correction is superior to single character correction and that language modeling boosts correction, by improving the ranking of candidate corrections, while shallow morphology had a small adverse effect. Further, given sufficiently large corpus to extract a dictionary and to train a language model, word based correction works well for a morphologically rich language such as Arabic.", "keyphrases": ["ocr error correction", "language modeling", "arabic"]} +{"id": "lee-etal-2020-iterative", "title": "Iterative Refinement in the Continuous Space for Non-Autoregressive Neural Machine Translation", "abstract": "We propose an efficient inference procedure for non-autoregressive machine translation that iteratively refines translation purely in the continuous space. Given a continuous latent variable model for machine translation (Shu et al., 2020), we train an inference network to approximate the gradient of the marginal log probability of the target sentence, using the latent variable instead. This allows us to use gradient-based optimization to find the target sentence at inference time that approximately maximizes its marginal probability. As each refinement step only involves computation in the latent space of low dimensionality (we use 8 in our experiments), we avoid computational overhead incurred by existing non-autoregressive inference procedures that often refine in token space. We compare our approach to a recently proposed EM-like inference procedure (Shu et al., 2020) that optimizes in a hybrid space, consisting of both discrete and continuous variables. We evaluate our approach on WMT'14 En\u2192De, WMT'16 Ro\u2192En and IWSLT'16 De\u2192En, and observe two advantages over the EM-like inference: (1) it is computationally efficient, i.e. each refinement step is twice as fast, and (2) it is more effective, resulting in higher marginal probabilities and BLEU scores with the same number of refinement steps. On WMT'14 En\u2192De, for instance, our approach is able to decode 6.2 times faster than the autoregressive model with minimal degradation to translation quality (0.9 BLEU).", "keyphrases": ["latent variable", "inference network", "iterative refinement"]} +{"id": "zugarini-etal-2020-vulgaris", "title": "Vulgaris: Analysis of a Corpus for Middle-Age Varieties of Italian Language", "abstract": "Italian is a Romance language that has its roots in Vulgar Latin. The birth of the modern Italian started in Tuscany around the 14th century, and it is mainly attributed to the works of Dante Alighieri, Francesco Petrarca and Giovanni Boccaccio, who are among the most acclaimed authors of the medieval age in Tuscany. However, Italy has been characterized by a high variety of dialects, which are often loosely related to each other, due to the past fragmentation of the territory. Italian has absorbed influences from many of these dialects, as also from other languages due to dominion of portions of the country by other nations, such as Spain and France. In this work we present Vulgaris, a project aimed at studying a corpus of Italian textual resources from authors of different regions, ranging in a time period between 1200 and 1600. Each composition is associated to its author, and authors are also grouped in families, i.e. sharing similar stylistic/chronological characteristics. Hence, the dataset is not only a valuable resource for studying the diachronic evolution of Italian and the differences between its dialects, but it is also useful to investigate stylistic aspects between single authors. We provide a detailed statistical analysis of the data, and a corpus-driven study in dialectology and diachronic varieties.", "keyphrases": ["italian language", "diachronic evolution", "vulgaris"]} +{"id": "freeman-etal-2006-cross", "title": "Cross Linguistic Name Matching in English and Arabic", "abstract": "This paper presents a solution to the problem of matching personal names in English to the same names represented in Arabic script. Standard string comparison measures perform poorly on this task due to varying transliteration conventions in both languages and the fact that Arabic script does not usually represent short vowels. Significant improvement is achieved by augmenting the classic Levenshtein edit-distance algorithm with character equivalency classes.", "keyphrases": ["name matching", "arabic", "in-vocabulary word"]} +{"id": "fu-etal-2013-exploiting", "title": "Exploiting Multiple Sources for Open-Domain Hypernym Discovery", "abstract": "Hypernym discovery aims to extract such noun pairs that one noun is a hypernym of the other. Most previous methods are based on lexical patterns but perform badly on opendomain data. Other work extracts hypernym relations from encyclopedias but has limited coverage. This paper proposes a simple yet effective distant supervision framework for Chinese open-domain hypernym discovery. Given an entity name, we try to discover its hypernyms by leveraging knowledge from multiple sources, i.e., search engine results, encyclopedias, and morphology of the entity name. First, we extract candidate hypernyms from the above sources. Then, we apply a statistical ranking model to select correct hypernyms. A set of novel features is proposed for the ranking model. We also present a heuristic strategy to build a large-scale noisy training data for the model without human annotation. Experimental results demonstrate that our approach outperforms the state-of-the-art methods on a manually labeled test dataset.", "keyphrases": ["open-domain hypernym discovery", "encyclopedia", "entity name", "search engine result"]} +{"id": "guillaume-etal-2016-crowdsourcing", "title": "Crowdsourcing Complex Language Resources: Playing to Annotate Dependency Syntax", "abstract": "This article presents the results we obtained on a complex annotation task (that of dependency syntax) using a specifically designed Game with a Purpose, ZombiLingo. We show that with suitable mechanisms (decomposition of the task, training of the players and regular control of the annotation quality during the game), it is possible to obtain annotations whose quality is significantly higher than that obtainable with a parser, provided that enough players participate. The source code of the game and the resulting annotated corpora (for French) are freely available.", "keyphrases": ["zombilingo", "french", "dependency syntax annotation"]} +{"id": "neubig-etal-2011-unsupervised", "title": "An Unsupervised Model for Joint Phrase Alignment and Extraction", "abstract": "We present an unsupervised model for joint phrase alignment and extraction using non-parametric Bayesian methods and inversion transduction grammars (ITGs). The key contribution is that phrases of many granularities are included directly in the model through the use of a novel formulation that memorizes phrases generated not only by terminal, but also non-terminal symbols. This allows for a completely probabilistic model that is able to create a phrase table that achieves competitive accuracy on phrase-based machine translation tasks directly from unaligned sentence pairs. Experiments on several language pairs demonstrate that the proposed model matches the accuracy of traditional two-step word alignment/phrase extraction approach while reducing the phrase table to a fraction of the original size.", "keyphrases": ["joint phrase alignment", "inversion transduction grammar", "granularity", "pitman-yor process"]} +{"id": "maslennikov-chua-2007-multi", "title": "A Multi-resolution Framework for Information Extraction from Free Text", "abstract": "Extraction of relations between entities is an important part of Information Extraction on free text. Previous methods are mostly based on statistical correlation and dependency relations between entities. This paper re-examines the problem at the multiresolution layers of phrase, clause and sentence using dependency and discourse relations. Our multi-resolution framework ARE (Anchor and Relation) uses clausal relations in 2 ways: 1) to filter noisy dependency paths; and 2) to increase reliability of dependency path extraction. The resulting system outperforms the previous approaches by 3%, 7%, 4% on MUC4, MUC6 and ACE RDC domains respectively.", "keyphrases": ["multi-resolution framework", "information extraction", "discourse tree", "pattern-based framework"]} +{"id": "su-etal-2015-bilingual", "title": "Bilingual Correspondence Recursive Autoencoder for Statistical Machine Translation", "abstract": "Learning semantic representations and tree structures of bilingual phrases is beneficial for statistical machine translation. In this paper, we propose a new neural network model called Bilingual Correspondence Recursive Autoencoder (BCorrRAE) to model bilingual phrases in translation. We incorporate word alignments into BCorrRAE to allow it freely access bilingual constraints at different levels. BCorrRAE minimizes a joint objective on the combination of a recursive autoencoder reconstruction error, a structural alignment consistency error and a crosslingual reconstruction error so as to not only generate alignment-consistent phrase structures, but also capture different levels of semantic relations within bilingual phrases. In order to examine the effectiveness of BCorrRAE, we incorporate both semantic and structural similarity features built on bilingual phrase representations and tree structures learned by BCorrRAE into a state-of-the-art SMT system. Experiments on NIST Chinese-English test sets show that our model achieves a substantial improvement of up to 1.55 BLEU points over the baseline.", "keyphrases": ["correspondence recursive autoencoder", "statistical machine translation", "different level", "semantic relation"]} +{"id": "keswani-etal-2020-iitk-semeval", "title": "IITK at SemEval-2020 Task 8: Unimodal and Bimodal Sentiment Analysis of Internet Memes", "abstract": "Social media is abundant in visual and textual information presented together or in isolation. Memes are the most popular form, belonging to the former class. In this paper, we present our approaches for the Memotion Analysis problem as posed in SemEval-2020 Task 8. The goal of this task is to classify memes based on their emotional content and sentiment. We leverage techniques from Natural Language Processing (NLP) and Computer Vision (CV) towards the sentiment classification of internet memes (Subtask A). We consider Bimodal (text and image) as well as Unimodal (text-only) techniques in our study ranging from the Na \u0308\u0131ve Bayes classifier to Transformer-based approaches. Our results show that a text-only approach, a simple Feed Forward Neural Network (FFNN) with Word2vec embeddings as input, performs superior to all the others. We stand first in the Sentiment analysis task with a relative improvement of 63% over the baseline macro-F1 score. Our work is relevant to any task concerned with the combination of different modalities.", "keyphrases": ["semeval-2020 task", "internet meme", "text-only approach"]} +{"id": "patry-etal-2007-mistral", "title": "MISTRAL: a lattice translation system for IWSLT 2007", "abstract": "This paper describes MISTRAL, the lattice translation system that we developed for the Italian-English track of the International Workshop on Spoken Language Translation 2007. MISTRAL is a discriminative phrase-based system that translates a source word lattice in two passes. The first pass extracts a list of top ranked sentence pairs from the lattice and the second pass rescores this list with more complex features. Our experiments show that our system, when translating pruned lattices, is at least as good as a fair baseline that translates the first ranked sentences returned by a speech recognition system.", "keyphrases": ["lattice translation system", "iwslt", "international workshop", "spoken language translation", "mistral"]} +{"id": "torres-martins-etal-2008-stacking", "title": "Stacking Dependency Parsers", "abstract": "We explore a stacked framework for learning to predict dependency structures for natural language sentences. A typical approach in graph-based dependency parsing has been to assume a factorized model, where local features are used but a global function is optimized (McDonald et al., 2005b). Recently Nivre and McDonald (2008) used the output of one dependency parser to provide features for another. We show that this is an example of stacked learning, in which a second predictor is trained to improve the performance of the first. Further, we argue that this technique is a novel way of approximating rich non-local features in the second parser, without sacrificing efficient, model-optimal prediction. Experiments on twelve languages show that stacking transition-based and graph-based parsers improves performance over existing state-of-the-art dependency parsers.", "keyphrases": ["dependency parsing", "stacked learning", "stacking"]} +{"id": "burchardt-etal-2006-salsa", "title": "The SALSA Corpus: a German Corpus Resource for Lexical Semantics", "abstract": "This paper describes the SALSA corpus, a large German corpus manually annotated with manual role-semantic annotation, based on the syntactically annotated TIGER newspaper corpus. The first release, comprising about 20,000 annotated predicate instances (about half the TIGER corpus), is scheduled for mid-2006. In this paper we discuss the annotation framework (frame semantics) and its cross-lingual applicability, problems arising from exhaustive annotation, strategies for quality control, and possible applications.", "keyphrases": ["salsa corpus", "predicate", "framenet", "lexical unit"]} +{"id": "zarriess-schlangen-2016-easy", "title": "Easy Things First: Installments Improve Referring Expression Generation for Objects in Photographs", "abstract": "Research on generating referring expressions has so far mostly focussed on \u201cone-shot reference\u201d, where the aim is to generate a single, discriminating expression. In interactive settings, however, it is not uncommon for reference to be established in \u201cinstallments\u201d, where referring information is offered piecewise until success has been con\ufb01rmed. We show that this strategy can also be advantageous in technical systems that only have uncertain access to object attributes and categories. We train a recently introduced model of grounded word meaning on a data set of REs for objects in images and learn to predict semantically appropriate expressions. In a human evaluation, we observe that users are sensitive to inadequate object names - which unfortunately are not unlikely to be generated from low-level visual input. We propose a solution inspired from human task-oriented interaction and implement strategies for avoiding and repairing semantically inaccurate words. We enhance a word-based REG with context-aware, referential installments and \ufb01nd that they substantially improve the referential success of the system.", "keyphrases": ["installment", "object", "real-world image"]} +{"id": "weng-etal-2017-neural", "title": "Neural Machine Translation with Word Predictions", "abstract": "In the encoder-decoder architecture for neural machine translation (NMT), the hidden states of the recurrent structures in the encoder and decoder carry the crucial information about the sentence. These vectors are generated by parameters which are updated by back-propagation of translation errors through time. We argue that propagating errors through the end-to-end recurrent structures are not a direct way of control the hidden vectors. In this paper, we propose to use word predictions as a mechanism for direct supervision. More specifically, we require these vectors to be able to predict the vocabulary in target sentence. Our simple mechanism ensures better representations in the encoder and decoder without using any extra data or annotation. It is also helpful in reducing the target side vocabulary and improving the decoding efficiency. Experiments on Chinese-English machine translation task show an average BLEU improvement by 4.53, respectively.", "keyphrases": ["hidden state", "target sentence", "neural machine translation"]} +{"id": "paula-etal-2018-similarity", "title": "Similarity Measures for the Detection of Clinical Conditions with Verbal Fluency Tasks", "abstract": "Semantic Verbal Fluency tests have been used in the detection of certain clinical conditions, like Dementia. In particular, given a sequence of semantically related words, a large number of switches from one semantic class to another has been linked to clinical conditions. In this work, we investigate three similarity measures for automatically identifying switches in semantic chains: semantic similarity from a manually constructed resource, and word association strength and semantic relatedness, both calculated from corpora. This information is used for building classifiers to distinguish healthy controls from clinical cases with early stages of Alzheimer's Disease and Mild Cognitive Deficits. The overall results indicate that for clinical conditions the classifiers that use these similarity measures outperform those that use a gold standard taxonomy.", "keyphrases": ["detection", "verbal fluency task", "word embedding"]} +{"id": "mukherjee-bhattacharyya-2012-sentiment", "title": "Sentiment Analysis in Twitter with Lightweight Discourse Analysis", "abstract": "We propose a lightweight method for using discourse relations for polarity detection of tweets . This method is targeted towards the web-based appli cations that deal with noisy, unstructured text, like the tweets, and cannot afford to use heavy linguistic resource s like parsing due to frequent failure of the parsers to handle noisy dat a. Most of the works in micro-blogs, like Twitter, use a bag-of-words model that ignores the discours e particles like but, since, although etc. In this work, we show how the discourse relations like the connectives and conditionals can be used to incorporate discourse information in any bag-of-words model, to improve sentiment classification accuracy. We also probe the influenc e of the semantic operators like modals and negations on the discourse relations that affect the sentime nt of a sentence. Discourse relations and corresponding rules are identified with minimal processing - just a list look up. We first give a linguistic description of the various discourse r elations which leads to conditions in rules and features in SVM. We show that our discourse-based bag-of-words model performs well in a noisy medium ( Twitter ), where it performs better than an existing Twitte r-based application. Furthermore, we show that our approach is beneficia l to structured reviews as well, where we achieve a better accuracy than a state-of-the-art s ystem in the travel review domain. Our system compares favorably with the state-of-the-art system s and has the additional attractiveness of being less resource intensive.", "keyphrases": ["twitter", "discourse information", "negation", "sentiment analysis"]} +{"id": "li-etal-2010-whitepaper", "title": "Whitepaper of NEWS 2010 Shared Task on Transliteration Generation", "abstract": "Transliteration is defined as phonetic translation of names across languages. Transliteration of Named Entities (NEs) is necessary in many applications, such as machine translation, corpus alignment, cross-language IR, information extraction and automatic lexicon acquisition. All such systems call for high-performance transliteration, which is the focus of shared task in the NEWS 2010 workshop. The objective of the shared task is to promote machine transliteration research by providing a common benchmarking platform for the community to evaluate the state-of-the-art technologies.", "keyphrases": ["transliteration", "news", "direction", "quality metric"]} +{"id": "su-etal-2015-reward", "title": "Reward Shaping with Recurrent Neural Networks for Speeding up On-Line Policy Learning in Spoken Dialogue Systems", "abstract": "Statistical spoken dialogue systems have the attractive property of being able to be optimised from data via interactions with real users. However in the reinforcement learning paradigm the dialogue manager (agent) often requires significant time to explore the state-action space to learn to behave in a desirable manner. This is a critical issue when the system is trained on-line with real users where learning costs are expensive. Reward shaping is one promising technique for addressing these concerns. Here we examine three recurrent neural network (RNN) approaches for providing reward shaping information in addition to the primary (task-orientated) environmental feedback. These RNNs are trained on returns from dialogues generated by a simulated user and attempt to diffuse the overall evaluation of the dialogue back down to the turn level to guide the agent towards good behaviour faster. In both simulated and real user scenarios these RNNs are shown to increase policy learning speed. Importantly, they do not require prior knowledge of the user's goal.", "keyphrases": ["policy learning", "reward", "dialogue policy learning"]} +{"id": "han-eisenstein-2019-unsupervised", "title": "Unsupervised Domain Adaptation of Contextualized Embeddings for Sequence Labeling", "abstract": "Contextualized word embeddings such as ELMo and BERT provide a foundation for strong performance across a wide range of natural language processing tasks by pretraining on large corpora of unlabeled text. However, the applicability of this approach is unknown when the target domain varies substantially from the pretraining corpus. We are specifically interested in the scenario in which labeled data is available in only a canonical source domain such as newstext, and the target domain is distinct from both the labeled and pretraining texts. To address this scenario, we propose domain-adaptive fine-tuning, in which the contextualized embeddings are adapted by masked language modeling on text from the target domain. We test this approach on sequence labeling in two challenging domains: Early Modern English and Twitter. Both domains differ substantially from existing pretraining corpora, and domain-adaptive fine-tuning yields substantial improvements over strong BERT baselines, with particularly impressive results on out-of-vocabulary words. We conclude that domain-adaptive fine-tuning offers a simple and effective approach for the unsupervised adaptation of sequence labeling to difficult new domains.", "keyphrases": ["word embedding", "language modeling", "modern english", "unsupervised domain adaptation", "specific domain"]} +{"id": "yang-zong-2014-multi", "title": "Multi-Predicate Semantic Role Labeling", "abstract": "The current approaches to Semantic Role Labeling (SRL) usually perform role classification for each predicate separately and the interaction among individual predicate\u2019s role labeling is ignored if there is more than one predicate in a sentence. In this paper, we prove that different predicates in a sentence could help each other during SRL. In multi-predicate role labeling, there are mainly two key points: argument identification and role labeling of the arguments shared by multiple predicates. To address these issues, in the stage of argument identification, we propose novel predicate-related features which help remove many argument identification errors; in the stage of argument classification, we adopt a discriminative reranking approach to perform role classification of the shared arguments, in which a large set of global features are proposed. We conducted experiments on two standard benchmarks: Chinese PropBank and English PropBank. The experimental results show that our approach can significantly improve SRL performance, especially in Chinese PropBank.", "keyphrases": ["semantic role labeling", "multi-predicate srl", "chinese proposition bank"]} +{"id": "mihaylov-etal-2018-suit", "title": "Can a Suit of Armor Conduct Electricity? A New Dataset for Open Book Question Answering", "abstract": "We present a new kind of question answering dataset, OpenBookQA, modeled after open book exams for assessing human understanding of a subject. The open book that comes with our questions is a set of 1326 elementary level science facts. Roughly 6000 questions probe an understanding of these facts and their application to novel situations. This requires combining an open book fact (e.g., metals conduct electricity) with broad common knowledge (e.g., a suit of armor is made of metal) obtained from other sources. While existing QA datasets over documents or knowledge bases, being generally self-contained, focus on linguistic understanding, OpenBookQA probes a deeper understanding of both the topic\u2014in the context of common knowledge\u2014and the language it is expressed in. Human performance on OpenBookQA is close to 92%, but many state-of-the-art pre-trained QA methods perform surprisingly poorly, worse than several simple neural baselines we develop. Our oracle experiments designed to circumvent the knowledge retrieval bottleneck demonstrate the value of both the open book and additional facts. We leave it as a challenge to solve the retrieval problem in this multi-hop setting and to close the large gap to human performance.", "keyphrases": ["suit", "openbookqa", "science question"]} +{"id": "plaza-del-arco-etal-2019-sinai-semeval-2019", "title": "SINAI at SemEval-2019 Task 6: Incorporating lexicon knowledge into SVM learning to identify and categorize offensive language in social media", "abstract": "Offensive language has an impact across society. The use of social media has aggravated this issue among online users, causing suicides in the worst cases. For this reason, it is important to develop systems capable of identifying and detecting offensive language in text automatically. In this paper, we developed a system to classify offensive tweets as part of our participation in SemEval-2019 Task 6: OffensEval. Our main contribution is the integration of lexical features in the classification using the SVM algorithm.", "keyphrases": ["semeval-2019 task", "offensive language", "lexical feature"]} +{"id": "dhingra-etal-2016-tweet2vec", "title": "Tweet2Vec: Character-Based Distributed Representations for Social Media", "abstract": "Text from social media provides a set of challenges that can cause traditional NLP approaches to fail. Informal language, spelling errors, abbreviations, and special characters are all commonplace in these posts, leading to a prohibitively large vocabulary size for word-level approaches. We propose a character composition model, tweet2vec, which finds vector-space representations of whole tweets by learning complex, non-local dependencies in character sequences. The proposed model outperforms a word-level baseline at predicting user-annotated hashtags associated with the posts, doing significantly better when the input contains many out-of-vocabulary words or unusual character sequences. Our tweet2vec encoder is publicly available.", "keyphrases": ["hashtag", "tweet2vec", "vector representation", "social medium"]} +{"id": "choi-etal-2020-f", "title": "F2-Softmax: Diversifying Neural Text Generation via Frequency Factorized Softmax", "abstract": "Despite recent advances in neural text generation, encoding the rich diversity in human language remains elusive. We argue that the sub-optimal text generation is mainly attributable to the imbalanced token distribution, which particularly misdirects the learning model when trained with the maximum-likelihood objective. As a simple yet effective remedy, we propose two novel methods, F2-Softmax and MefMax, for a balanced training even with the skewed frequency distribution. MefMax assigns tokens uniquely to frequency classes, trying to group tokens with similar frequencies and equalize frequency mass between the classes. F2-Softmax then decomposes a probability distribution of the target token into a product of two conditional probabilities of (1) frequency class, and (2) token from the target frequency class. Models learn more uniform probability distributions because they are confined to subsets of vocabularies. Significant performance gains on seven relevant metrics suggest the supremacy of our approach in improving not only the diversity but also the quality of generated texts.", "keyphrases": ["neural text generation", "token distribution", "balanced training", "f2-softmax"]} +{"id": "yoneda-etal-2018-ucl", "title": "UCL Machine Reading Group: Four Factor Framework For Fact Finding (HexaF)", "abstract": "In this paper we describe our 2nd place FEVER shared-task system that achieved a FEVER score of 62.52% on the provisional test set (without additional human evaluation), and 65.41% on the development set. Our system is a four stage model consisting of document retrieval, sentence retrieval, natural language inference and aggregation. Retrieval is performed leveraging task-specific features, and then a natural language inference model takes each of the retrieved sentences paired with the claimed fact. The resulting predictions are aggregated across retrieved sentences with a Multi-Layer Perceptron, and re-ranked corresponding to the final prediction.", "keyphrases": ["sentence retrieval", "claim", "multi-layer perceptron"]} +{"id": "jin-etal-2019-imat", "title": "IMaT: Unsupervised Text Attribute Transfer via Iterative Matching and Translation", "abstract": "Text attribute transfer aims to automatically rewrite sentences such that they possess certain linguistic attributes, while simultaneously preserving their semantic content. This task remains challenging due to a lack of supervised parallel data. Existing approaches try to explicitly disentangle content and attribute information, but this is difficult and often results in poor content-preservation and ungrammaticality. In contrast, we propose a simpler approach, Iterative Matching and Translation (IMaT), which: (1) constructs a pseudo-parallel corpus by aligning a subset of semantically similar sentences from the source and the target corpora; (2) applies a standard sequence-to-sequence model to learn the attribute transfer; (3) iteratively improves the learned transfer function by refining imperfections in the alignment. In sentiment modification and formality transfer tasks, our method outperforms complex state-of-the-art systems by a large margin. As an auxiliary contribution, we produce a publicly-available test set with human-generated transfer references.", "keyphrases": ["iterative matching", "similar sentence", "imat"]} +{"id": "quirk-menezes-2006-need", "title": "Do we need phrases? Challenging the conventional wisdom in Statistical Machine Translation", "abstract": "We begin by exploring theoretical and practical issues with phrasal SMT, several of which are addressed by syntax-based SMT. Next, to address problems not handled by syntax, we propose the concept of a Minimal Translation Unit (MTU) and develop MTU sequence models. Finally we incorporate these models into a syntax-based SMT system and demonstrate that it improves on the state of the art translation quality within a theoretically more desirable framework.", "keyphrases": ["mtu", "smt system", "rule markov model", "reordering"]} +{"id": "gravier-etal-2012-etape", "title": "The ETAPE corpus for the evaluation of speech-based TV content processing in the French language", "abstract": "The paper presents a comprehensive overview of existing data for the evaluation of spoken content processing in a multimedia framework for the French language. We focus on the ETAPE corpus which will be made publicly available by ELDA mid 2012, after completion of the evaluation campaign, and recall existing resources resulting from previous evaluation campaigns. The ETAPE corpus consists of 30 hours of TV and radio broadcasts, selected to cover a wide variety of topics and speaking styles, emphasizing spontaneous speech and multiple speaker areas.", "keyphrases": ["etape corpus", "french language", "spontaneous speech"]} +{"id": "johnson-martin-2003-unsupervised", "title": "Unsupervised Learning of Morphology for English and Inuktitut", "abstract": "We describe a simple unsupervised technique for learning morphology by identifying hubs in an automaton. For our purposes, a hub is a node in a graph with in-degree greater than one and out-degree greater than one. We create a word-trie, transform it into a minimal DFA, then identify hubs. Those hubs mark the boundary between root and suffix, achieving similar performance to more complex mixtures of techniques.", "keyphrases": ["morphology", "inuktitut", "automaton"]} +{"id": "pustejovsky-etal-2009-glml", "title": "GLML: Annotating Argument Selection and Coercion", "abstract": "In this paper we introduce a methodology for annotating compositional operations in natural language text, and describe a mark-up language, GLML, based on Generative Lexicon, for identifying such relations. While most annotation systems capture surface relationships, GLML captures the \"compositional history\" of the argument selection relative to the predicate. We provide a brief overview of GL before moving on to our proposed methodology for annotating with GLML. There are three main tasks described in the paper: (i) Compositional mechanisms of argument selection; (ii) Qualia in modification constructions; (iii) Type selection in modification of dot objects. We explain what each task includes and provide a description of the annotation interface. We also include the XML format for GLML including examples of annotated sentences.", "keyphrases": ["coercion", "glml", "underspecification"]} +{"id": "lacoste-julien-etal-2006-word", "title": "Word Alignment via Quadratic Assignment", "abstract": "Recently, discriminative word alignment methods have achieved state-of-the-art accuracies by extending the range of information sources that can be easily incorporated into aligners. The chief advantage of a discriminative framework is the ability to score alignments based on arbitrary features of the matching word tokens, including orthographic form, predictions of other models, lexical context and so on. However, the proposed bipartite matching model of Taskar et al. (2005), despite being tractable and effective, has two important limitations. First, it is limited by the restriction that words have fertility of at most one. More importantly, first order correlations between consecutive words cannot be directly captured by the model. In this work, we address these limitations by enriching the model form. We give estimation and inference algorithms for these enhancements. Our best model achieves a relative AER reduction of 25% over the basic matching formulation, outperforming intersected IBM Model 4 without using any overly compute-intensive features. By including predictions of other models as features, we achieve AER of 3.8 on the standard Hansards dataset.", "keyphrases": ["limitation", "word alignment", "quadratic assignment problem"]} +{"id": "bond-etal-2008-improving", "title": "Improving statistical machine translation by paraphrasing the training data.", "abstract": "Large amounts of training data are essential for training statistical machine translations systems. In this paper we show how training data can be expanded by paraphrasing one side. The new data is made by parsing then generating using a precise HPSG based grammar, which gives sentences with the same meaning, but minor variations in lexical choice and word order. In experiments with Japanese and English, we showed consistent gains on the Tanaka Corpus with less consistent improvement on the IWSLT 2005 evaluation data.", "keyphrases": ["paraphrasing", "word order", "source side"]} +{"id": "reisinger-mooney-2010-multi", "title": "Multi-Prototype Vector-Space Models of Word Meaning", "abstract": "Current vector-space models of lexical semantics create a single \"prototype\" vector to represent the meaning of a word. However, due to lexical ambiguity, encoding word meaning with a single vector is problematic. This paper presents a method that uses clustering to produce multiple \"sense-specific\" vectors for each word. This approach provides a context-dependent vector representation of word meaning that naturally accommodates homonymy and polysemy. Experimental comparisons to human judgements of semantic similarity for both isolated words as well as words in sentential contexts demonstrate the superiority of this approach over both prototype and exemplar based vector-space models.", "keyphrases": ["word meaning", "group", "multi-prototype vector-space model", "sense-specific vector"]} +{"id": "kang-hovy-2019-linguistic", "title": "Linguistic Versus Latent Relations for Modeling Coherent Flow in Paragraphs", "abstract": "Generating a long, coherent text such as a paragraph requires a high-level control of different levels of relations between sentences (e.g., tense, coreference). We call such a logical connection between sentences as a (paragraph) flow. In order to produce a coherent flow of text, we explore two forms of intersentential relations in a paragraph: one is a human-created linguistical relation that forms a structure (e.g., discourse tree) and the other is a relation from latent representation learned from the sentences themselves. Our two proposed models incorporate each form of relations into document-level language models: the former is a supervised model that jointly learns a language model as well as discourse relation prediction, and the latter is an unsupervised model that is hierarchically conditioned by a recurrent neural network (RNN) over the latent information. Our proposed models with both forms of relations outperform the baselines in partially conditioned paragraph generation task. Our codes and data are publicly available.", "keyphrases": ["coherent flow", "paragraph", "language model"]} +{"id": "jiang-etal-2016-transitivity", "title": "Transitivity in Light Verb Variations in Mandarin Chinese \u2013 A Comparable Corpus-based Statistical Approach", "abstract": "This paper adopts a comparable corpus-based approach to light verb variations in two varieties of Mandarin Chinese and proposes a transitivity (Hopper and Thompson 1980) based theoretical account. Light verbs are highly grammaticalized and lack strong collocation restrictions; hence it has been a challenge to empirical accounts. It is even more challenging to consider their variations between different varieties (e.g. Taiwan and Mainland Mandarin). This current study follows the research paradigm set up in Lin et al. (2014) for differentiating different light verbs and Huang et al. (2014) for automatic discovery of light verb variations. In our study, a corpus-based statistical approach is adopted to show that both internal variety differences between light verbs and external differences between different variants can be detected effectively. The distributional differences between Mainland and Taiwan can also shed light on the re-classification of syntactic types of the taken complement. We further argue that the variations in selection of arguments of light verb in two Mandarin variants can in fact be accounted for in terms of their different degree of transitivity. Higher degree of transitivity in Taiwan Mandarin in fact show that light verbs are less grammaticalized and hence consistent with the generalization that varieties away from the main speaking community should be more conservative.", "keyphrases": ["light verb", "corpus-based statistical approach", "transitivity"]} +{"id": "bauer-etal-2021-ernie", "title": "ERNIE-NLI: Analyzing the Impact of Domain-Specific External Knowledge on Enhanced Representations for NLI", "abstract": "We examine the effect of domain-specific external knowledge variations on deep large scale language model performance. Recent work in enhancing BERT with external knowledge has been very popular, resulting in models such as ERNIE (Zhang et al., 2019a). Using the ERNIE architecture, we provide a detailed analysis on the types of knowledge that result in a performance increase on the Natural Language Inference (NLI) task, specifically on the Multi-Genre Natural Language Inference Corpus (MNLI). While ERNIE uses general TransE embeddings, we instead train domain-specific knowledge embeddings and insert this knowledge via an information fusion layer in the ERNIE architecture, allowing us to directly control and analyze knowledge input. Using several different knowledge training objectives, sources of knowledge, and knowledge ablations, we find a strong correlation between knowledge and classification labels within the same polarity, illustrating that knowledge polarity is an important feature in predicting entailment. We also perform classification change analysis across different knowledge variations to illustrate the importance of selecting appropriate knowledge input regarding content and polarity, and show representative examples of these changes.", "keyphrases": ["external knowledge", "knowledge embedding", "ernie-nli"]} +{"id": "thater-etal-2010-contextualizing", "title": "Contextualizing Semantic Representations Using Syntactically Enriched Vector Models", "abstract": "We present a syntactically enriched vector model that supports the computation of contextualized semantic representations in a quasi compositional fashion. It employs a systematic combination of first- and second-order context vectors. We apply our model to two different tasks and show that (i) it substantially outperforms previous work on a paraphrase ranking task, and (ii) achieves promising results on a wordsense similarity task; to our knowledge, it is the first time that an unsupervised method has been applied to this task.", "keyphrases": ["paraphrase", "vector space", "dependency relation", "distributional model", "direct denotation"]} +{"id": "xu-etal-2013-open", "title": "Open Information Extraction with Tree Kernels", "abstract": "Traditional relation extraction seeks to identify pre-specified semantic relations within natural language text, while open Information Extraction (Open IE) takes a more general approach, and looks for a variety of relations without restriction to a fixed relation set. With this generalization comes the question, what is a relation? For example, should the more general task be restricted to relations mediated by verbs, nouns, or both? To help answer this question, we propose two levels of subtasks for Open IE. One task is to determine if a sentence potentially contains a relation between two entities? The other task looks to confirm explicit relation words for two entities. We propose multiple SVM models with dependency tree kernels for both tasks. For explicit relation extraction, our system can extract both noun and verb relations. Our results on three datasets show that our system is superior when compared to state-of-the-art systems like REVERB and OLLIE for both tasks. For example, in some experiments our system achieves 33% improvement on nominal relation extraction over OLLIE. In addition we propose an unsupervised rule-based approach which can serve as a strong baseline for Open IE systems.", "keyphrases": ["dependency tree kernel", "state-of-the-art system", "open information extraction"]} +{"id": "brody-elhadad-2010-unsupervised", "title": "An Unsupervised Aspect-Sentiment Model for Online Reviews", "abstract": "With the increase in popularity of online review sites comes a corresponding need for tools capable of extracting the information most important to the user from the plain text data. Due to the diversity in products and services being reviewed, supervised methods are often not practical. We present an unsuper-vised system for extracting aspects and determining sentiment in review text. The method is simple and flexible with regard to domain and language, and takes into account the influence of aspect on sentiment polarity, an issue largely ignored in previous literature. We demonstrate its effectiveness on both component tasks, where it achieves similar results to more complex semi-supervised methods that are restricted by their reliance on manual annotation and extensive knowledge sources.", "keyphrases": ["text data", "sentiment analysis", "topic model", "different aspect"]} +{"id": "rubino-etal-2012-statistical", "title": "Statistical Post-Editing of Machine Translation for Domain Adaptation", "abstract": "This paper presents a statistical approach to adapt out-of-domain machine translation systems to the medical domain through an unsupervised post-editing step. A statistical post-editing model is built on statistical machine translation (SMT) outputs aligned with their translation references. Evaluations carried out to translate medical texts from French to English show that an out-of-domain machine translation system can be adapted a posteri-ori to a specific domain. Two SMT systems are studied: a state-of-the-art phrase-based implementation and an online publicly available system. Our experiments also indicate that selecting sentences for post-editing leads to significant improvements of translation quality and that more gains are still possible with respect to an oracle measure.", "keyphrases": ["machine translation", "french", "specific domain", "statistical post-editing"]} +{"id": "seeker-kuhn-2013-morphological", "title": "Morphological and Syntactic Case in Statistical Dependency Parsing", "abstract": "Most morphologically rich languages with free word order use case systems to mark the grammatical function of nominal elements, especially for the core argument functions of a verb. The standard pipeline approach in syntactic dependency parsing assumes a complete disambiguation of morphological (case) information prior to automatic syntactic analysis. Parsing experiments on Czech, German, and Hungarian show that this approach is susceptible to propagating morphological annotation errors when parsing languages displaying syncretism in their morphological case paradigms. We develop a different architecture where we use case as a possibly underspecified filtering device restricting the options for syntactic analysis. Carefully designed morpho-syntactic constraints can delimit the search space of a statistical dependency parser and exclude solutions that would violate the restrictions overtly marked in the morphology of the words in a given sentence. The constrained system outperforms a state-of-the-art data-driven pipeline architecture, as we show experimentally, and, in addition, the parser output comes with guarantees about local and global morpho-syntactic wellformedness, which can be useful for downstream applications.", "keyphrases": ["czech", "hungarian", "search space", "dependency parser"]} +{"id": "fivez-etal-2017-unsupervised", "title": "Unsupervised Context-Sensitive Spelling Correction of Clinical Free-Text with Word and Character N-Gram Embeddings", "abstract": "We present an unsupervised context-sensitive spelling correction method for clinical free-text that uses word and character n-gram embeddings. Our method generates misspelling replacement candidates and ranks them according to their semantic fit, by calculating a weighted cosine similarity between the vectorized representation of a candidate and the misspelling context. We greatly outperform two baseline off-the-shelf spelling correction tools on a manually annotated MIMIC-III test set, and counter the frequency bias of an optimized noisy channel model, showing that neural embeddings can be successfully exploited to include context-awareness in a spelling correction model.", "keyphrases": ["spelling correction", "clinical free-text", "character n-gram embedding"]} +{"id": "kim-mooney-2010-generative", "title": "Generative Alignment and Semantic Parsing for Learning from Ambiguous Supervision", "abstract": "We present a probabilistic generative model for learning semantic parsers from ambiguous supervision. Our approach learns from natural language sentences paired with world states consisting of multiple potential logical meaning representations. It disambiguates the meaning of each sentence while simultaneously learning a semantic parser that maps sentences into logical form. Compared to a previous generative model for semantic alignment, it also supports full semantic parsing. Experimental results on the Robocup sportscasting corpora in both English and Korean indicate that our approach produces more accurate semantic alignments than existing methods and also produces competitive semantic parsers and improved language generators.", "keyphrases": ["semantic parsing", "generative model", "generation process", "correspondence"]} +{"id": "kilickaya-etal-2017-evaluating", "title": "Re-evaluating Automatic Metrics for Image Captioning", "abstract": "The task of generating natural language descriptions from images has received a lot of attention in recent years. Consequently, it is becoming increasingly important to evaluate such image captioning approaches in an automatic manner. In this paper, we provide an in-depth evaluation of the existing image captioning metrics through a series of carefully designed experiments. Moreover, we explore the utilization of the recently proposed Word Mover's Distance (WMD) document metric for the purpose of image captioning. Our findings outline the differences and/or similarities between metrics and their relative robustness by means of extensive correlation, accuracy and distraction based evaluations. Our results also demonstrate that WMD provides strong advantages over other metrics.", "keyphrases": ["image captioning", "word mover", "distance", "wmd", "human judgment"]} +{"id": "li-etal-2019-rumor", "title": "Rumor Detection on Social Media: Datasets, Methods and Opportunities", "abstract": "Social media platforms have been used for information and news gathering, and they are very valuable in many applications. However, they also lead to the spreading of rumors and fake news. Many efforts have been taken to detect and debunk rumors on social media by analyzing their content and social context using machine learning techniques. This paper gives an overview of the recent studies in the rumor detection field. It provides a comprehensive list of datasets used for rumor detection, and reviews the important studies based on what types of information they exploit and the approaches they take. And more importantly, we also present several new directions for future research.", "keyphrases": ["rumor detection", "social medium", "twitter", "misinformation"]} +{"id": "narayan-gardent-2012-error", "title": "Error Mining with Suspicion Trees: Seeing the Forest for the Trees", "abstract": "In recent years, error mining approaches have been proposed to identify the most likely sources of errors in symbolic parsers and generators. However the techniques used generate a flat list of suspicious forms ranked by decreasing order of suspicion. We introduce a novel algorithm that structures the output of error mining into a tree (called, suspicion tree) highlighting the relationships between suspicious forms. We illustrate the impact of our approach by applying it to detect and analyse the most likely sources of failure in surface realisation; and we show how the suspicion tree built by our algorithm helps presenting the errors identified by error mining in a linguistically meaningful way thus providing better support for error analysis. The right frontier of the tree highlights the relative importance of the main error cases while the subtrees of a node indicate how a given error case divides into smaller more specific cases", "keyphrases": ["suspicion tree", "likely source", "failure", "error mining"]} +{"id": "ehara-etal-2014-formalizing", "title": "Formalizing Word Sampling for Vocabulary Prediction as Graph-based Active Learning", "abstract": "Predicting vocabulary of second language learners is essential to support their language learning; however, because of the large size of language vocabularies, we cannot collect information on the entire vocabulary. For practical measurements, we need to sample a small portion of words from the entire vocabulary and predict the rest of the words. In this study, we propose a novel framework for this sampling method. Current methods rely on simple heuristic techniques involving inflexible manual tuning by educational experts. We formalize these heuristic techniques as a graph-based non-interactive active learning method as applied to a special graph. We show that by extending the graph, we can support additional functionality such as incorporating domain specificity and sampling from multiple corpora. In our experiments, we show that our extended methods outperform other methods in terms of vocabulary prediction accuracy when the number of samples is small.", "keyphrases": ["vocabulary prediction", "graph-based active learning", "learner", "label propagation algorithm"]} +{"id": "pezzelle-etal-2018-comparatives", "title": "Comparatives, Quantifiers, Proportions: a Multi-Task Model for the Learning of Quantities from Vision", "abstract": "The present work investigates whether different quantification mechanisms (set comparison, vague quantification, and proportional estimation) can be jointly learned from visual scenes by a multi-task computational model. The motivation is that, in humans, these processes underlie the same cognitive, non-symbolic ability, which allows an automatic estimation and comparison of set magnitudes. We show that when information about lower-complexity tasks is available, the higher-level proportional task becomes more accurate than when performed in isolation. Moreover, the multi-task model is able to generalize to unseen combinations of target/non-target objects. Consistently with behavioral evidence showing the interference of absolute number in the proportional task, the multi-task model no longer works when asked to provide the number of target objects in the scene.", "keyphrases": ["quantifier", "multi-task model", "scene", "comparative"]} +{"id": "bartlett-etal-2008-automatic", "title": "Automatic Syllabification with Structured SVMs for Letter-to-Phoneme Conversion", "abstract": "We present the first English syllabification system to improve the accuracy of letter-tophoneme conversion. We propose a novel discriminative approach to automatic syllabification based on structured SVMs. In comparison with a state-of-the-art syllabification system, we reduce the syllabification word error rate for English by 33%. Our approach also performs well on other languages, comparing favorably with published results on German and Dutch.", "keyphrases": ["structured svms", "automatic syllabification", "orthographic form", "phoneme"]} +{"id": "yuret-yatbaz-2010-noisy", "title": "The Noisy Channel Model for Unsupervised Word Sense Disambiguation", "abstract": "Abstract We introduce a generative probabilistic model, the noisy channel model, for unsupervised word sense disambiguation. In our model, each context C is modeled as a distinct channel through which the speaker intends to transmit a particular meaning S using a possibly ambiguous word W. To reconstruct the intended meaning the hearer uses the distribution of possible meanings in the given context P(S|C) and possible words that can express each meaning P(W|S). We assume P(W|S) is independent of the context and estimate it using WordNet sense frequencies. The main problem of unsupervised WSD is estimating context-dependent P(S|C) without access to any sense-tagged text. We show one way to solve this problem using a statistical language model based on large amounts of untagged text. Our model uses coarse-grained semantic classes for S internally and we explore the effect of using different levels of granularity on WSD performance. The system outputs fine-grained senses for evaluation, and its performance on noun disambiguation is better than most previously reported unsupervised systems and close to the best supervised systems.", "keyphrases": ["noisy channel model", "word sense disambiguation", "substitution", "large corpora"]} +{"id": "cohn-lapata-2008-sentence", "title": "Sentence Compression Beyond Word Deletion", "abstract": "In this paper we generalise the sentence compression task. Rather than simply shorten a sentence by deleting words or constituents, as in previous work, we rewrite it using additional operations such as substitution, reordering, and insertion. We present a new corpus that is suited to our task and a discriminative tree-to-tree transduction model that can naturally account for structural and lexical mismatches. The model incorporates a novel grammar extraction method, uses a language model for coherent output, and can be easily tuned to a wide range of compression specific loss functions.", "keyphrases": ["compression", "grammar extraction method", "translation model", "statistical method", "paraphrasing"]} +{"id": "cotterell-etal-2016-morphological", "title": "Morphological Smoothing and Extrapolation of Word Embeddings", "abstract": "Languages with rich inflectional morphology exhibit lexical data sparsity, since the word used to express a given concept will vary with the syntactic context. For instance, each count noun in Czech has 12 forms (where English uses only singular and plural). Even in large corpora, we are unlikely to observe all inflections of a given lemma. This reduces the vocabulary coverage of methods that induce continuous representations for words from distributional corpus information. We solve this problem by exploiting existing morphological resources that can enumerate a word\u2019s component morphemes. We present a latentvariable Gaussian graphical model that allows us to extrapolate continuous representations for words not observed in the training corpus, as well as smoothing the representations provided for the observed words. The latent variables represent embeddings of morphemes, which combine to create embeddings of words. Over several languages and training sizes, our model improves the embeddings for words, when evaluated on an analogy task, skip-gram predictive accuracy, and word similarity.", "keyphrases": ["word embedding", "morphological resource", "graphical model", "morpheme-based post-processor"]} +{"id": "meurers-etal-2011-evaluating", "title": "Evaluating Answers to Reading Comprehension Questions in Context: Results for German and the Role of Information Structure", "abstract": "Reading comprehension activities are an authentic task including a rich, language-based context, which makes them an interesting real-life challenge for research into automatic content analysis. For textual entailment research, content assessment of reading comprehension exercises provides an interesting opportunity for extrinsic, real-purpose evaluation, which also supports the integration of context and task information into the analysis. \n \nIn this paper, we discuss the first results for content assessment of reading comprehension activities for German and present results which are competitive with the current state of the art for English. Diving deeper into the results, we provide an analysis in terms of the different question types and the ways in which the information asked for is encoded in the text. \n \nWe then turn to analyzing the role of the question and argue that the surface-based account of information that is given in the question should be replaced with a more sophisticated, linguistically informed analysis of the information structuring of the answer in the context of the question that it is a response to.", "keyphrases": ["comprehension question", "content assessment", "short answer", "language learner"]} +{"id": "hu-etal-2013-unsupervised", "title": "Unsupervised Induction of Contingent Event Pairs from Film Scenes", "abstract": "Human engagement in narrative is partially driven by reasoning about discourse relations between narrative events, and the expectations about what is likely to happen next that results from such reasoning. Researchers in NLP have tackled modeling such expectations from a range of perspectives, including treating it as the inference of the CONTINGENT discourse relation, or as a type of common-sense causal reasoning. Our approach is to model likelihood between events by drawing on several of these lines of previous work. We implement and evaluate different unsupervised methods for learning event pairs that are likely to be CONTINGENT on one another. We refine event pairs that we learn from a corpus of film scene descriptions utilizing web search counts, and evaluate our results by collecting human judgments of contingency. Our results indicate that the use of web search counts increases the average accuracy of our best method to 85.64% over a baseline of 50%, as compared to an average accuracy of 75.15% without web search.", "keyphrases": ["contingency", "event pair", "film scene description", "causal potential"]} +{"id": "kaji-2003-word", "title": "Word Sense Acquisition from Bilingual Comparable Corpora", "abstract": "Manually constructing an inventory of word senses has suffered from problems including high cost, arbitrary assignment of meaning to words, and mismatch to domains. To overcome these problems, we propose a method to assign word meaning from a bilingual comparable corpus and a bilingual dictionary. It clusters second-language translation equivalents of a first-language target word on the basis of their translingually aligned distribution patterns. Thus it produces a hierarchy of corpus-relevant meanings of the target word, each of which is defined with a set of translation equivalents. The effectiveness of the method has been demonstrated through an experiment using a comparable corpus consisting of Wall Street Journal and Nihon Keizai Shimbun corpora together with the EDR bilingual dictionary.", "keyphrases": ["comparable corpora", "clustering", "word sense acquisition"]} +{"id": "suresh-ong-2021-negatives", "title": "Not All Negatives are Equal: Label-Aware Contrastive Loss for Fine-grained Text Classification", "abstract": "Fine-grained classification involves dealing with datasets with larger number of classes with subtle differences between them. Guiding the model to focus on differentiating dimensions between these commonly confusable classes is key to improving performance on fine-grained tasks. In this work, we analyse the contrastive fine-tuning of pre-trained language models on two fine-grained text classification tasks, emotion classification and sentiment analysis. We adaptively embed class relationships into a contrastive objective function to help differently weigh the positives and negatives, and in particular, weighting closely confusable negatives more than less similar negative examples. We find that Label-aware Contrastive Loss outperforms previous contrastive methods, in the presence of larger number and/or more confusable classes, and helps models to produce output distributions that are more differentiated.", "keyphrases": ["negative", "contrastive loss", "fine-grained classification"]} +{"id": "baldwin-etal-2010-panlex", "title": "PanLex and LEXTRACT: Translating all Words of all Languages of the World", "abstract": "PanLex is a lemmatic translation resource which combines a large number of translation dictionaries and other translingual lexical resources. It currently covers 1353 language varieties and 12M expressions, but aims to cover all languages and up to 350M expressions. This paper describes the resource and current applications of it, as well as lextract, a new effort to expand the coverage of PanLex via semi-automatic dictionary scraping.", "keyphrases": ["lextract", "world", "bilingual dictionary", "database"]} +{"id": "choe-charniak-2013-naive", "title": "Naive Bayes Word Sense Induction", "abstract": "We introduce an extended naive Bayes model for word sense induction (WSI) and apply it to a WSI task. The extended model incorporates the idea the words closer to the target word are more relevant in predicting its sense. The proposed model is very simple yet effective when evaluated on SemEval-2010 WSI data.", "keyphrases": ["naive bayes model", "wsi", "target word"]} +{"id": "jiang-etal-2018-chengyu", "title": "Chengyu Cloze Test", "abstract": "We present a neural recommendation model for Chengyu, which is a special type of Chinese idiom. Given a query, which is a sentence with an empty slot where the Chengyu is taken out, our model will recommend the best Chengyu candidate that best fits the slot context. The main challenge lies in that the literal meaning of a Chengyu is usually very different from it's figurative meaning. We propose a new neural approach to leverage the definition of each Chengyu and incorporate it as background knowledge. Experiments on both Chengyu cloze test and coherence checking in college entrance exams show that our system achieves 89.5% accuracy on cloze test and outperforms human subjects who attended competitive universities in China. We will make all of our data sets and resources publicly available as a new benchmark for research purposes.", "keyphrases": ["cloze test", "idiom", "chengyu"]} +{"id": "ye-etal-2020-coreferential", "title": "Coreferential Reasoning Learning for Language Representation", "abstract": "Language representation models such as BERT could effectively capture contextual semantic information from plain text, and have been proved to achieve promising results in lots of downstream NLP tasks with appropriate fine-tuning. However, most existing language representation models cannot explicitly handle coreference, which is essential to the coherent understanding of the whole discourse. To address this issue, we present CorefBERT, a novel language representation model that can capture the coreferential relations in context. The experimental results show that, compared with existing baseline models, CorefBERT can achieve significant improvements consistently on various downstream NLP tasks that require coreferential reasoning, while maintaining comparable performance to previous models on other common NLP tasks. The source code and experiment details of this paper can be obtained from .", "keyphrases": ["language representation model", "corefbert", "coreferential relation", "pre-training"]} +{"id": "liu-etal-2007-forest", "title": "Forest-to-String Statistical Translation Rules", "abstract": "In this paper, we propose forest-to-string rules to enhance the expressive power of tree-to-string translation models. A forestto-string rule is capable of capturing nonsyntactic phrase pairs by describing the correspondence between multiple parse trees and one string. To integrate these rules into tree-to-string translation models, auxiliary rules are introduced to provide a generalization level. Experimental results show that, on the NIST 2005 Chinese-English test set, the tree-to-string model augmented with forest-to-string rules achieves a relative improvement of 4.3% in terms of BLEU score over the original model which allows treeto-string rules only.", "keyphrases": ["expressive power", "translation model", "auxiliary rule", "tree-to-string model", "forest"]} +{"id": "jin-etal-2020-unsupervised", "title": "Unsupervised Morphological Paradigm Completion", "abstract": "We propose the task of unsupervised morphological paradigm completion. Given only raw text and a lemma list, the task consists of generating the morphological paradigms, i.e., all inflected forms, of the lemmas. From a natural language processing (NLP) perspective, this is a challenging unsupervised task, and high-performing systems have the potential to improve tools for low-resource languages or to assist linguistic annotators. From a cognitive science perspective, this can shed light on how children acquire morphological knowledge. We further introduce a system for the task, which generates morphological paradigms via the following steps: (i) EDIT TREE retrieval, (ii) additional lemma retrieval, (iii) paradigm size discovery, and (iv) inflection generation. We perform an evaluation on 14 typologically diverse languages. Our system outperforms trivial baselines with ease and, for some languages, even obtains a higher accuracy than minimally supervised systems.", "keyphrases": ["morphological paradigm completion", "annotator", "following step", "paradigm size discovery", "baseline system"]} +{"id": "zhang-etal-2013-beyond", "title": "Beyond Left-to-Right: Multiple Decomposition Structures for SMT", "abstract": "Standard phrase-based translation models do not explicitly model context dependence between translation units. As a result, they rely on large phrase pairs and target language models to recover contextual e ects in translation. In this work, we explore n-gram models over Minimal Translation Units (MTUs) to explicitly capture contextual dependencies across phrase boundaries in the channel model. As there is no single best direction in which contextual information should flow, we explore multiple decomposition structures as well as dynamic bidirectional decomposition. The resulting models are evaluated in an intrinsic task of lexical selection for MT as well as a full MT system, through n-best reranking. These experiments demonstrate that additional contextual modeling does indeed benefit a phrase-based system and that the direction of conditioning is important. Integrating multiple conditioning orders provides consistent benefit, and the most important directions di er by language pair.", "keyphrases": ["multiple decomposition structure", "language model", "markov chain ordering", "tuple"]} +{"id": "hossain-etal-2020-covidlies", "title": "COVIDLies: Detecting COVID-19 Misinformation on Social Media", "abstract": "The ongoing pandemic has heightened the need for developing tools to flag COVID-19-related misinformation on the internet, specifically on social media such as Twitter. However, due to novel language and the rapid change of information, existing misinformation detection datasets are not effective for evaluating systems designed to detect misinformation on this topic. Misinformation detection can be divided into two sub-tasks: (i) retrieval of misconceptions relevant to posts being checked for veracity, and (ii) stance detection to identify whether the posts Agree, Disagree, or express No Stance towards the retrieved misconceptions. To facilitate research on this task, we release COVIDLies ( ), a dataset of 6761 expert-annotated tweets to evaluate the performance of misinformation detection systems on 86 different pieces of COVID-19 related misinformation. We evaluate existing NLP systems on this dataset, providing initial benchmarks and identifying key challenges for future models to improve upon.", "keyphrases": ["covid-19 misinformation", "expert-annotated tweet", "covidlie", "social medium"]} +{"id": "evang-etal-2013-elephant", "title": "Elephant: Sequence Labeling for Word and Sentence Segmentation", "abstract": "Tokenization is widely regarded as a solved problem due to the high accuracy that rulebased tokenizers achieve. But rule-based tokenizers are hard to maintain and their rules language specific. We show that highaccuracy word and sentence segmentation can be achieved by using supervised sequence labeling on the character level combined with unsupervised feature learning. We evaluated our method on three languages and obtained error rates of 0.27 \u2030 (English), 0.35 \u2030 (Dutch) and 0.76 \u2030 (Italian) for our best models.", "keyphrases": ["sentence segmentation", "tokenization", "dutch", "elephant"]} +{"id": "heilman-etal-2008-analysis", "title": "An Analysis of Statistical Models and Features for Reading Difficulty Prediction", "abstract": "A reading difficulty measure can be described as a function or model that maps a text to a numerical value corresponding to a difficulty or grade level. We describe a measure of readability that uses a combination of lexical features and grammatical features that are derived from subtrees of syntactic parses. We also tested statistical models for nominal, ordinal, and interval scales of measurement. The results indicate that a model for ordinal regression, such as the proportional odds model, using a combination of grammatical and lexical features is most effective at predicting reading difficulty.", "keyphrases": ["statistical model", "difficulty", "grade level", "grammatical feature"]} +{"id": "purver-etal-2009-cascaded", "title": "Cascaded Lexicalised Classifiers for Second-Person Reference Resolution", "abstract": "This paper examines the resolution of the second person English pronoun you in multi-party dialogue. Following previous work, we attempt to classify instances as generic or referential, and in the latter case identify the singular or plural addressee. We show that accuracy and robustness can be improved by use of simple lexical features, capturing the intuition that different uses and addressees are associated with different vocabularies; and we show that there is an advantage to treating referentiality and addressee identification as separate (but connected) problems.", "keyphrases": ["second-person reference resolution", "referentiality", "second-person pronoun"]} +{"id": "yamada-baldwin-2004-automatic", "title": "Automatic Discovery of Telic and Agentive Roles from Corpus Data", "abstract": "We present two methods for automatically discovering the telic and agentive roles of nouns from corpus data. These relations form part of the qualia structure assumed in generative lexicon theory, where the telic role represents a typical purpose of the entity and the agentive role represents the origin of the entity. The first discovery method uses hand-generated templates for each role type, and the second employs a supervised machine-learning technique. To evaluate the effectiveness of the two methods, we took a sample of 30 nouns, selected 50 verbs for each, and then generated a ranked list of verbs for a given noun. Using a variant of Spearman\u2019s rank correlation, we demonstrate the ability of the methods to identify qualia structure.", "keyphrases": ["telic", "corpus data", "noun", "qualia structure", "regular expression"]} +{"id": "king-etal-2014-iucl", "title": "The IUCL+ System: Word-Level Language Identification via Extended Markov Models", "abstract": "We describe the IUCL+ system for the shared task of the First Workshop on Computational Approaches to Code Switching (Solorio et al., 2014), in which participants were challenged to label each word in Twitter texts as a named entity or one of two candidate languages. Our system combines character n-gram probabilities, lexical probabilities, word label transition probabilities and existing named entity recognitiontools within a Markovmodel framework that weights these components and assigns a label. Our approach is language-independent, and we submitted results for all data sets (five test sets and three \u201csurprise\u201d sets, covering four language pairs), earning the highest accuracy score on the tweet level on two language pairs (Mandarin-English, Arabicdialects 1 & 2) and one of the surprise sets (Arabic-dialects).", "keyphrases": ["iucl+ system", "lexical probability", "markov model framework", "entity recognition tool"]} +{"id": "zens-etal-2012-systematic", "title": "A Systematic Comparison of Phrase Table Pruning Techniques", "abstract": "When trained on very large parallel corpora, the phrase table component of a machine translation system grows to consume vast computational resources. In this paper, we introduce a novel pruning criterion that places phrase table pruning on a sound theoretical foundation. Systematic experiments on four language pairs under various data conditions show that our principled approach is superior to existing ad hoc pruning methods.", "keyphrases": ["phrase table", "pruning technique", "translation quality"]} +{"id": "palmer-etal-2007-sequencing", "title": "A Sequencing Model for Situation Entity Classification", "abstract": "Situation entities (SEs) are the events, states, generic statements, and embedded facts and propositions introduced to a discourse by clauses of text. We report on the first datadriven models for labeling clauses according to the type of SE they introduce. SE classification is important for discourse mode identification and for tracking the temporal progression of a discourse. We show that (a) linguistically-motivated cooccurrence features and grammatical relation information from deep syntactic analysis improve classification accuracy and (b) using a sequencing model provides improvements over assigning labels based on the utterance alone. We report on genre effects which support the analysis of discourse modes having characteristic distributions and sequences of SEs.", "keyphrases": ["situation entity", "clause", "maximum entropy model", "brown corpus"]} +{"id": "tan-etal-2019-multilingual", "title": "Multilingual Neural Machine Translation with Language Clustering", "abstract": "Multilingual neural machine translation (NMT), which translates multiple languages using a single model, is of great practical importance due to its advantages in simplifying the training process, reducing online maintenance costs, and enhancing low-resource and zero-shot translation. Given there are thousands of languages in the world and some of them are very different, it is extremely burdensome to handle them all in a single model or use a separate model for each language pair. Therefore, given a fixed resource budget, e.g., the number of models, how to determine which languages should be supported by one model is critical to multilingual NMT, which, unfortunately, has been ignored by previous work. In this work, we develop a framework that clusters languages into different groups and trains one multilingual model for each cluster. We study two methods for language clustering: (1) using prior knowledge, where we cluster languages according to language family, and (2) using language embedding, in which we represent each language by an embedding vector and cluster them in the embedding space. In particular, we obtain the embedding vectors of all the languages by training a universal neural machine translation model. Our experiments on 23 languages show that the first clustering method is simple and easy to understand but leading to suboptimal translation accuracy, while the second method sufficiently captures the relationship among languages well and improves the translation accuracy for almost all the languages over baseline methods.", "keyphrases": ["neural machine translation", "language clustering", "zero-shot translation", "world", "multilingual setting"]} +{"id": "hu-etal-2019-looking", "title": "Are You Looking? Grounding to Multiple Modalities in Vision-and-Language Navigation", "abstract": "Vision-and-Language Navigation (VLN) requires grounding instructions, such as \u201cturn right and stop at the door\u201d, to routes in a visual environment. The actual grounding can connect language to the environment through multiple modalities, e.g. \u201cstop at the door\u201d might ground into visual objects, while \u201cturn right\u201d might rely only on the geometric structure of a route. We investigate where the natural language empirically grounds under two recent state-of-the-art VLN models. Surprisingly, we discover that visual features may actually hurt these models: models which only use route structure, ablating visual features, outperform their visual counterparts in unseen new environments on the benchmark Room-to-Room dataset. To better use all the available modalities, we propose to decompose the grounding procedure into a set of expert models with access to different modalities (including object detections) and ensemble them at prediction time, improving the performance of state-of-the-art models on the VLN task.", "keyphrases": ["grounding", "modality", "vision-and-language navigation", "visual input", "agent"]} +{"id": "zielinski-mutschke-2018-towards", "title": "Towards a Gold Standard Corpus for Variable Detection and Linking in Social Science Publications", "abstract": "In this paper, we describe our effort to create a new corpus for the evaluation of detecting and linking so-called survey variables in social science publications ( e.g. , \u201dDo you believe in Heaven?\u201d ). The task is to recognize survey variable mentions in a given text, disambiguate them, and link them to the corresponding variable within a knowledge base. Since there are generally hundreds of candidates to link to and due to the wide variety of forms they can take, this is a challenging task within NLP. The contribution of our work is the \ufb01rst gold standard corpus for the variable detection and linking task. We describe the annotation guidelines and the annotation process. The produced corpus is multilingual \u2013 German and English \u2013 and includes manually curated word and phrase alignments. Moreover, it includes text samples that could not be assigned to any variables, denoted as negative examples. Based on the new dataset, we conduct an evaluation of several state-of-the-art text classi\ufb01cation and textual similarity methods. The annotated corpus is made available along with an open-source baseline system for variable mention identi\ufb01cation and linking.", "keyphrases": ["variable detection", "linking", "survey variable"]} +{"id": "morin-daille-2012-revising", "title": "Revising the Compositional Method for Terminology Acquisition from Comparable Corpora", "abstract": "In this paper, we present a new method that improves the alignment of equivalent terms monolingually acquired from bilingual comparable corpora: the Compositional Method with Context-Based Projection (CMCBP). Our overall objective is to identify and to translate high specialized terminology made up of multi-word terms acquired from comparable corpora. Our evaluation in the medical domain and for two pairs of languages demonstrates that CMCBP outperforms the state-of-art compositional approach commonly used for translationally equivalent multi-word term discovery from comparable corpora.", "keyphrases": ["compositional method", "comparable corpora", "multi-word term", "coverage"]} +{"id": "croce-etal-2020-gan", "title": "GAN-BERT: Generative Adversarial Learning for Robust Text Classification with a Bunch of Labeled Examples", "abstract": "Recent Transformer-based architectures, e.g., BERT, provide impressive results in many Natural Language Processing tasks. However, most of the adopted benchmarks are made of (sometimes hundreds of) thousands of examples. In many real scenarios, obtaining high- quality annotated data is expensive and time consuming; in contrast, unlabeled examples characterizing the target task can be, in general, easily collected. One promising method to enable semi-supervised learning has been proposed in image processing, based on Semi- Supervised Generative Adversarial Networks. In this paper, we propose GAN-BERT that ex- tends the fine-tuning of BERT-like architectures with unlabeled data in a generative adversarial setting. Experimental results show that the requirement for annotated examples can be drastically reduced (up to only 50-100 annotated examples), still obtaining good performances in several sentence classification tasks.", "keyphrases": ["fine-tuning", "unlabelled data", "adversarial setting", "gan-bert"]} +{"id": "candito-seddah-2010-parsing", "title": "Parsing Word Clusters", "abstract": "We present and discuss experiments in statistical parsing of French, where terminal forms used during training and parsing are replaced by more general symbols, particularly clusters of words obtained through un-supervised linear clustering. We build on the work of Candito and Crabbe (2009) who proposed to use clusters built over slightly coarsened French inflected forms. We investigate the alternative method of building clusters over lemma/part-of-speech pairs, using a raw corpus automatically tagged and lemmatized. We find that both methods lead to comparable improvement over the baseline (we obtain F1=86.20% and F1=86.21% respectively, compared to a baseline of F1=84.10%). Yet, when we replace gold lemma/POS pairs with their corresponding cluster, we obtain an upper bound (F1=87.80) that suggests room for improvement for this technique, should tag-ging/lemmatisation performance increase for French. \n \nWe also analyze the improvement in performance for both techniques with respect to word frequency. We find that replacing word forms with clusters improves attachment performance for words that are originally either unknown or low-frequency, since these words are replaced by cluster symbols that tend to have higher frequencies. Furthermore, clustering also helps significantly for medium to high frequency words, suggesting that training on word clusters leads to better probability estimates for these words.", "keyphrases": ["word cluster", "french", "terminal form"]} +{"id": "xu-etal-2021-discriminative", "title": "Discriminative Reasoning for Document-level Relation Extraction", "abstract": "Document-level relation extraction (DocRE) models generally use graph networks to implicitly model the reasoning skill (i.e., pattern recognition, logical reasoning, coreference reasoning, etc.) related to the relation between one entity pair in a document. In this paper, we propose a novel discriminative reasoning framework to explicitly model the paths of these reasoning skills between each entity pair in this document. Thus, a discriminative reasoning network is designed to estimate the relation probability distribution of different reasoning paths based on the constructed graph and vectorized document contexts for each entity pair, thereby recognizing their relation. Experimental results show that our method outperforms the previous state-of-the-art performance on the large-scale DocRE dataset. The code is publicly available at https://github.com/xwjim/DRN.", "keyphrases": ["document-level relation extraction", "docre", "logical reasoning"]} +{"id": "reisinger-etal-2015-semantic", "title": "Semantic Proto-Roles", "abstract": "We present the first large-scale, corpus based verification of Dowty's seminal theory of proto-roles. Our results demonstrate both the need for and the feasibility of a property-based annotation scheme of semantic relationships, as opposed to the currently dominant notion of categorical roles.", "keyphrases": ["proto-role", "decompositional semantic", "protoroles"]} +{"id": "liu-etal-2021-dexperts", "title": "DExperts: Decoding-Time Controlled Text Generation with Experts and Anti-Experts", "abstract": "Despite recent advances in natural language generation, it remains challenging to control attributes of generated text. We propose DExperts: Decoding-time Experts, a decoding-time method for controlled text generation that combines a pretrained language model with \u201cexpert\u201d LMs and/or \u201canti-expert\u201d LMs in a product of experts. Intuitively, under the ensemble, tokens only get high probability if they are considered likely by the experts, and unlikely by the anti-experts. We apply DExperts to language detoxification and sentiment-controlled generation, where we outperform existing controllable generation methods on both automatic and human evaluations. Moreover, because DExperts operates only on the output of the pretrained LM, it is effective with (anti-)experts of smaller size, including when operating on GPT-3. Our work highlights the promise of tuning small LMs on text with (un)desirable attributes for efficient decoding-time steering.", "keyphrases": ["expert", "language model", "dexpert"]} +{"id": "rameshkumar-bailey-2020-storytelling", "title": "Storytelling with Dialogue: A Critical Role Dungeons and Dragons Dataset", "abstract": "This paper describes the Critical Role Dungeons and Dragons Dataset (CRD3) and related analyses. Critical Role is an unscripted, live-streamed show where a fixed group of people play Dungeons and Dragons, an open-ended role-playing game. The dataset is collected from 159 Critical Role episodes transcribed to text dialogues, consisting of 398,682 turns. It also includes corresponding abstractive summaries collected from the Fandom wiki. The dataset is linguistically unique in that the narratives are generated entirely through player collaboration and spoken interaction. For each dialogue, there are a large number of turns, multiple abstractive summaries with varying levels of detail, and semantic ties to the previous dialogues. In addition, we provide a data augmentation method that produces 34,243 summary-dialogue chunk pairs to support current neural ML approaches, and we provide an abstractive summarization benchmark and evaluation.", "keyphrases": ["critical role dungeons", "dragons dataset", "conversation"]} +{"id": "blacoe-lapata-2012-comparison", "title": "A Comparison of Vector-based Representations for Semantic Composition", "abstract": "In this paper we address the problem of modeling compositional meaning for phrases and sentences using distributional methods. We experiment with several possible combinations of representation and composition, exhibiting varying degrees of sophistication. Some are shallow while others operate over syntactic structure, rely on parameter learning, or require access to very large corpora. We find that shallow approaches are as good as more computationally intensive alternatives with regards to two particular tests: (1) phrase similarity and (2) paraphrase detection. The sizes of the involved training corpora and the generated vectors are not as important as the fit between the meaning representation and compositional method.", "keyphrases": ["operation", "paraphrase detection", "composition method", "semantic similarity"]} +{"id": "jin-etal-2019-unsupervised", "title": "Unsupervised Learning of PCFGs with Normalizing Flow", "abstract": "Unsupervised PCFG inducers hypothesize sets of compact context-free rules as explanations for sentences. PCFG induction not only provides tools for low-resource languages, but also plays an important role in modeling language acquisition (Bannard et al., 2009; Abend et al. 2017). However, current PCFG induction models, using word tokens as input, are unable to incorporate semantics and morphology into induction, and may encounter issues of sparse vocabulary when facing morphologically rich languages. This paper describes a neural PCFG inducer which employs context embeddings (Peters et al., 2018) in a normalizing flow model (Dinh et al., 2015) to extend PCFG induction to use semantic and morphological information. Linguistically motivated sparsity and categorical distance constraints are imposed on the inducer as regularization. Experiments show that the PCFG induction model with normalizing flow produces grammars with state-of-the-art accuracy on a variety of different languages. Ablation further shows a positive effect of normalizing flow, context embeddings and proposed regularizers.", "keyphrases": ["pcfgs", "normalizing flow", "induction model", "context embedding"]} +{"id": "ling-etal-2013-paraphrasing", "title": "Paraphrasing 4 Microblog Normalization", "abstract": "Compared to the edited genres that have played a central role in NLP research, microblog texts use a more informal register with nonstandard lexical items, abbreviations, and free orthographic variation. When confronted with such input, conventional text analysis tools often perform poorly. Normalization \u2014 replacing orthographically or lexically idiosyncratic forms with more standard variants \u2014 can improve performance. We propose a method for learning normalization rules from machine translations of a parallel corpus of microblog messages. To validate the utility of our approach, we evaluate extrinsically, showing that normalizing English tweets and then translating improves translation quality (compared to translating unnormalized text) using three standard web translation services as well as a phrase-based translation system trained on parallel microblog data.", "keyphrases": ["normalization", "parallel corpus", "microblog message", "twitter"]} +{"id": "biran-etal-2012-detecting", "title": "Detecting Influencers in Written Online Conversations", "abstract": "It has long been established that there is a correlation between the dialog behavior of a participant and how influential he or she is perceived to be by other discourse participants. In this paper we explore the characteristics of communication that make someone an opinion leader and develop a machine learning based approach for the automatic identification of discourse participants that are likely to be influencers in online communication. Our approach relies on identification of three types of conversational behavior: persuasion, agreement/disagreement, and dialog patterns.", "keyphrases": ["influencer", "participant", "disagreement", "dialog pattern"]} +{"id": "amir-etal-2019-mental", "title": "Mental Health Surveillance over Social Media with Digital Cohorts", "abstract": "The ability to track mental health conditions via social media opened the doors for large-scale, automated, mental health surveillance. However, inferring accurate population-level trends requires representative samples of the underlying population, which can be challenging given the biases inherent in social media data. While previous work has adjusted samples based on demographic estimates, the populations were selected based on specific outcomes, e.g. specific mental health conditions. We depart from these methods, by conducting analyses over demographically representative digital cohorts of social media users. To validated this approach, we constructed a cohort of US based Twitter users to measure the prevalence of depression and PTSD, and investigate how these illnesses manifest across demographic subpopulations. The analysis demonstrates that cohort-based studies can help control for sampling biases, contextualize outcomes, and provide deeper insights into the data.", "keyphrases": ["population", "depression", "mental health surveillance", "social medium data"]} +{"id": "mostafazadeh-etal-2020-glucose", "title": "GLUCOSE: GeneraLized and COntextualized Story Explanations", "abstract": "When humans read or listen, they make implicit commonsense inferences that frame their understanding of what happened and why. As a step toward AI systems that can build similar mental models, we introduce GLUCOSE, a large-scale dataset of implicit commonsense causal knowledge, encoded as causal mini-theories about the world, each grounded in a narrative context. To construct GLUCOSE, we drew on cognitive psychology to identify ten dimensions of causal explanation, focusing on events, states, motivations, and emotions. Each GLUCOSE entry includes a story-specific causal statement paired with an inference rule generalized from the statement. This paper details two concrete contributions. First, we present our platform for effectively crowdsourcing GLUCOSE data at scale, which uses semi-structured templates to elicit causal explanations. Using this platform, we collected a total of ~670K specific statements and general rules that capture implicit commonsense knowledge about everyday situations. Second, we show that existing knowledge resources and pretrained language models do not include or readily predict GLUCOSE's rich inferential content. However, when state-of-the-art neural models are trained on this knowledge, they can start to make commonsense inferences on unseen stories that match humans' mental models.", "keyphrases": ["narrative context", "dimension", "causal explanation", "implicit commonsense knowledge", "language model"]} +{"id": "khapra-etal-2011-takes", "title": "It Takes Two to Tango: A Bilingual Unsupervised Approach for Estimating Sense Distributions using Expectation Maximization", "abstract": "Several bilingual WSD algorithms which exploit translation correspondences between parallel corpora have been proposed. However, the availability of such parallel corpora itself is a tall task for some of the resource constrained languages of the world. We propose an unsupervised bilingual EM based algorithm which relies on the counts of translations to estimate sense distributions. No parallel or sense annotated corpora are needed. The algorithm relies on a synset-aligned bilingual dictionary and in-domain corpora from the two languages. A symmetric generalized Expectation Maximization formulation is used wherein the sense distributions of words in one language are estimated based on the raw counts of the words in the aligned synset in the target language. The overall performance of our algorithm when tested on 4 language-domain pairs is better than current state-of-the-art knowledge based and bilingual unsupervised ap", "keyphrases": ["expectation maximization", "wsd", "parallel corpora", "other language"]} +{"id": "lewis-xia-2008-automatically", "title": "Automatically Identifying Computationally Relevant Typological Features", "abstract": "In this paper we explore the potential for identifying computationally relevant typological features from a multilingual corpus of language data built from readily available language data collected off the Web. Our work builds on previous structural projection work, where we extend the work of projection to building individual CFGs for approximately 100 languages. We then use the CFGs to discover the values of typological parameters such as word order, the presence or absence of definite and indefinite determiners, etc. Our methods have the potential of being extended to many more languages and parameters, and can have significant effects on current research focused on tool and resource development for low-density languages and grammar induction from raw corpora.", "keyphrases": ["relevant typological feature", "web", "grammar induction"]} +{"id": "loeff-etal-2006-discriminating", "title": "Discriminating Image Senses by Clustering with Multimodal Features", "abstract": "We discuss Image Sense Discrimination (ISD), and apply a method based on spectral clustering, using multimodal features from the image and text of the embedding web page. We evaluate our method on a new data set of annotated web images, retrieved with ambiguous query terms. Experiments investigate different levels of sense granularity, as well as the impact of text and image features, and global versus local text features.", "keyphrases": ["multimodal feature", "image sense discrimination", "sense disambiguation"]} +{"id": "grunewald-friedrich-2020-robertnlp", "title": "RobertNLP at the IWPT 2020 Shared Task: Surprisingly Simple Enhanced UD Parsing for English", "abstract": "This paper presents our system at the IWPT 2020 Shared Task on Parsing into Enhanced Universal Dependencies. Using a biaffine classifier architecture (Dozat and Manning, 2017) which operates directly on finetuned RoBERTa embeddings, our parser generates enhanced UD graphs by predicting the best dependency label (or absence of a dependency) for each pair of tokens in the sentence. We address label sparsity issues by replacing lexical items in relations with placeholders at prediction time, later retrieving them from the parse in a rule-based fashion. In addition, we ensure structural graph constraints using a simple set of heuristics. On the English blind test data, our system achieves a very high parsing accuracy, ranking 1st out of 10 with an ELAS F1 score of 88.94%.", "keyphrases": ["iwpt", "shared task", "dependency label"]} +{"id": "rello-etal-2012-elliphant", "title": "Elliphant: Improved Automatic Detection of Zero Subjects and Impersonal Constructions in Spanish", "abstract": "In pro-drop languages, the detection of explicit subjects, zero subjects and non-referential impersonal constructions is crucial for anaphora and co-reference resolution. While the identification of explicit and zero subjects has attracted the attention of researchers in the past, the automatic identification of impersonal constructions in Spanish has not been addressed yet and this work is the first such study. In this paper we present a corpus to underpin research on the automatic detection of these linguistic phenomena in Spanish and a novel machine learning-based methodology for their computational treatment. This study also provides an analysis of the features, discusses performance across two different genres and offers error analysis. The evaluation results show that our system performs better in detecting explicit subjects than alternative systems.", "keyphrases": ["automatic detection", "spanish", "pro-drop language"]} +{"id": "feng-lapata-2010-topic", "title": "Topic Models for Image Annotation and Text Illustration", "abstract": "Image annotation, the task of automatically generating description words for a picture, is a key component in various image search and retrieval applications. Creating image databases for model development is, however, costly and time consuming, since the keywords must be hand-coded and the process repeated for new collections. In this work we exploit the vast resource of images and documents available on the web for developing image annotation models without any human involvement. We describe a probabilistic model based on the assumption that images and their co-occurring textual data are generated by mixtures of latent topics. We show that this model outperforms previously proposed approaches when applied to image annotation and the related task of text illustration despite the noisy nature of our dataset.", "keyphrases": ["image annotation", "text illustration", "probabilistic model", "textual data", "latent topic"]} +{"id": "lukasik-etal-2020-text", "title": "Text Segmentation by Cross Segment Attention", "abstract": "Document and discourse segmentation are two fundamental NLP tasks pertaining to breaking up text into constituents, which are commonly used to help downstream tasks such as information retrieval or text summarization. In this work, we propose three transformer-based architectures and provide comprehensive comparisons with previously proposed approaches on three standard datasets. We establish a new state-of-the-art, reducing in particular the error rates by a large margin in all cases. We further analyze model sizes and find that we can build models with many fewer parameters while keeping good performance, thus facilitating real-world applications.", "keyphrases": ["segmentation", "cross segment attention", "fundamental nlp task"]} +{"id": "van-schijndel-schuler-2015-hierarchic", "title": "Hierarchic syntax improves reading time prediction", "abstract": "Previous work has debated whether humans make use of hierarchic syntax when processing language (Frank and Bod, 2011; Fossum and Levy, 2012). This paper uses an eye-tracking corpus to demonstrate that hierarchic syntax significantly improves reading time prediction over a strong n-gram baseline. This study shows that an interpolated 5-gram baseline can be made stronger by combining n-gram statistics over entire eye-tracking regions rather than simply using the last n-gram in each region, but basic hierarchic syntactic measures are still able to achieve significant improvements over this improved baseline.", "keyphrases": ["time prediction", "n-gram", "region", "hierarchic syntax"]} +{"id": "bhattacharyya-etal-2021-energy", "title": "Energy-Based Reranking: Improving Neural Machine Translation Using Energy-Based Models", "abstract": "The discrepancy between maximum likelihood estimation (MLE) and task measures such as BLEU score has been studied before for autoregressive neural machine translation (NMT) and resulted in alternative training algorithms (Ranzato et al., 2016; Norouzi et al., 2016; Shen et al., 2016; Wu et al., 2018). However, MLE training remains the de facto approach for autoregressive NMT because of its computational efficiency and stability. Despite this mismatch between the training objective and task measure, we notice that the samples drawn from an MLE-based trained NMT support the desired distribution \u2013 there are samples with much higher BLEU score comparing to the beam decoding output. To benefit from this observation, we train an energy-based model to mimic the behavior of the task measure (i.e., the energy-based model assigns lower energy to samples with higher BLEU score), which is resulted in a re-ranking algorithm based on the samples drawn from NMT: energy-based re-ranking (EBR). We use both marginal energy models (over target sentence) and joint energy models (over both source and target sentences). Our EBR with the joint energy model consistently improves the performance of the Transformer-based NMT: +3.7 BLEU points on IWSLT'14 German-English, +3.37 BELU points on Sinhala-English, +1.4 BLEU points on WMT'16 English-German tasks.", "keyphrases": ["reranker", "neural machine translation", "bleu score", "low energy"]} +{"id": "ruokolainen-etal-2013-supervised", "title": "Supervised Morphological Segmentation in a Low-Resource Learning Setting using Conditional Random Fields", "abstract": "We discuss data-driven morphological segmentation, in which word forms are segmented into morphs, the surface forms of morphemes. Our focus is on a lowresource learning setting, in which only a small amount of annotated word forms are available for model training, while unannotated word forms are available in abundance. The current state-of-art methods 1) exploit both the annotated and unannotated data in a semi-supervised manner, and 2) learn morph lexicons and subsequently uncover segmentations by generating the most likely morph sequences. In contrast, we discuss 1) employing only the annotated data in a supervised manner, while entirely ignoring the unannotated data, and 2) directly learning to predict morph boundaries given their local sub-string contexts instead of learning the morph lexicons. Specifically, we employ conditional random fields, a popular discriminative log-linear model for segmentation. We present experiments on two data sets comprising five diverse languages. We show that the fully supervised boundary prediction approach outperforms the state-of-art semi-supervised morph lexicon approaches on all languages when using the same annotated data sets.", "keyphrases": ["small amount", "discriminative log-linear model", "supervised morphological segmentation", "crf"]} +{"id": "wan-etal-2005-searching", "title": "Searching for Grammaticality: Propagating Dependencies in the Viterbi Algorithm", "abstract": "In many text-to-text generation scenarios (for instance, summarisation), we encounter humanauthored sentences that could be composed by recycling portions of related sentences to form new sentences. In this paper, we couch the generation of such sentences as a search problem. We investigate a statistical sentence generation method which recombines words to form new sentences. We propose an extension to the Viterbi algorithm designed to improve the grammaticality of generated sentences. Within a statistical framework, the extension favours those partially generated strings with a probable dependency tree structure. Our preliminary evaluations show that our approach generates less fragmented text than a bigram baseline.", "keyphrases": ["grammaticality", "viterbi algorithm", "fragmented text", "fluency"]} +{"id": "felice-specia-2012-linguistic", "title": "Linguistic Features for Quality Estimation", "abstract": "This paper describes a study on the contribution of linguistically-informed features to the task of quality estimation for machine translation at sentence level. A standard regression algorithm is used to build models using a combination of linguistic and non-linguistic features extracted from the input text and its machine translation. Experiments with English-Spanish translations show that linguistic features, although informative on their own, are not yet able to outperform shallower features based on statistics from the input text, its translation and additional corpora. However, further analysis suggests that linguistic information is actually useful but needs to be carefully combined with other features in order to produce better results.", "keyphrases": ["quality estimation", "linguistic feature", "other approach"]} +{"id": "le-thanh-etal-2004-generating", "title": "Generating Discourse Structures for Written Text", "abstract": "This paper presents a system for automatically generating discourse structures from written text. The system is divided into two levels: sentence-level and text-level. The sentence-level discourse parser uses syntactic information and cue phrases to segment sentences into elementary discourse units and to generate discourse structures of sentences. At the text-level, constraints about textual adjacency and textual organization are integrated in a beam search in order to generate best discourse structures. The experiments were done with documents from the RST Discourse Treebank. It shows promising results in a reasonable search space compared to the discourse trees generated by human analysts.", "keyphrases": ["syntactic information", "cue phrase", "textual adjacency", "organization"]} +{"id": "salway-touileb-2014-applying", "title": "Applying Grammar Induction to Text Mining", "abstract": "We report the first steps of a novel investigation into how a grammar induction algorithm can be modified and used to identify salient information structures in a corpus. The information structures are to be used as representations of semantic content for text mining purposes. We modify the learning regime of the ADIOS algorithm (Solan et al., 2005) so that text is presented as increasingly large snippets around key terms, and instances of selected structures are substituted with common identifiers in the input for subsequent iterations. The technique is applied to 1.4m blog posts about climate change which mention diverse topics and reflect multiple perspectives and different points of view. Observation of the resulting information structures suggests that they could be useful as representations of semantic content. Preliminary analysis shows that our modifications had a beneficial effect for inducing more useful structures.", "keyphrases": ["grammar induction", "salient information structure", "text mining purpose"]} +{"id": "bollegala-etal-2015-unsupervised", "title": "Unsupervised Cross-Domain Word Representation Learning", "abstract": "Meaning of a word varies from one domain to another. Despite this important domain dependence in word semantics, existing word representation learning methods are bound to a single domain. Given a pair of \\emph{source}-\\emph{target} domains, we propose an unsupervised method for learning domain-specific word representations that accurately capture the domain-specific aspects of word semantics. First, we select a subset of frequent words that occur in both domains as \\emph{pivots}. Next, we optimize an objective function that enforces two constraints: (a) for both source and target domain documents, pivots that appear in a document must accurately predict the co-occurring non-pivots, and (b) word representations learnt for pivots must be similar in the two domains. Moreover, we propose a method to perform domain adaptation using the learnt word representations. Our proposed method significantly outperforms competitive baselines including the state-of-the-art domain-insensitive word representations, and reports best sentiment classification accuracies for all domain-pairs in a benchmark dataset.", "keyphrases": ["word representation", "non-pivot", "source domain"]} +{"id": "jacovi-etal-2018-understanding", "title": "Understanding Convolutional Neural Networks for Text Classification", "abstract": "We present an analysis into the inner workings of Convolutional Neural Networks (CNNs) for processing text. CNNs used for computer vision can be interpreted by projecting filters into image space, but for discrete sequence inputs CNNs remain a mystery. We aim to understand the method by which the networks process and classify text. We examine common hypotheses to this problem: that filters, accompanied by global max-pooling, serve as ngram detectors. We show that filters may capture several different semantic classes of ngrams by using different activation patterns, and that global max-pooling induces behavior which separates important ngrams from the rest. Finally, we show practical use cases derived from our findings in the form of model interpretability (explaining a trained model by deriving a concrete identity for each filter, bridging the gap between visualization tools in vision tasks and NLP) and prediction interpretability (explaining predictions).", "keyphrases": ["convolutional neural networks", "text classification", "cnn", "filter", "important ngram"]} +{"id": "kumar-etal-2019-topics", "title": "Topics to Avoid: Demoting Latent Confounds in Text Classification", "abstract": "Despite impressive performance on many text classification tasks, deep neural networks tend to learn frequent superficial patterns that are specific to the training data and do not always generalize well. In this work, we observe this limitation with respect to the task of native language identification. We find that standard text classifiers which perform well on the test set end up learning topical features which are confounds of the prediction task (e.g., if the input text mentions Sweden, the classifier predicts that the author's native language is Swedish). We propose a method that represents the latent topical confounds and a model which \u201cunlearns\u201d confounding features by predicting both the label of the input text and the confound; but we train the two predictors adversarially in an alternating fashion to learn a text representation that predicts the correct label but is less prone to using information about the confound. We show that this model generalizes better and learns features that are indicative of the writing style rather than the content.", "keyphrases": ["text classification task", "language identification", "topical confound"]} +{"id": "she-etal-2014-back", "title": "Back to the Blocks World: Learning New Actions through Situated Human-Robot Dialogue", "abstract": "This paper describes an approach for a robotic arm to learn new actions through dialogue in a simplified blocks world. In particular, we have developed a threetier action knowledge representation that on one hand, supports the connection between symbolic representations of language and continuous sensorimotor representations of the robot; and on the other hand, supports the application of existing planning algorithms to address novel situations. Our empirical studies have shown that, based on this representation the robot was able to learn and execute basic actions in the blocks world. When a human is engaged in a dialogue to teach the robot new actions, step-by-step instructions lead to better learning performance compared to one-shot instructions.", "keyphrases": ["action", "robot", "goal state"]} +{"id": "faruqui-dyer-2015-non", "title": "Non-distributional Word Vector Representations", "abstract": "Data-driven representation learning for words is a technique of central importance in NLP. While indisputably useful as a source of features in downstream tasks, such vectors tend to consist of uninterpretable components whose relationship to the categories of traditional lexical semantic theories is tenuous at best. We present a method for constructing interpretable word vectors from hand-crafted linguistic resources like WordNet, FrameNet etc. These vectors are binary (i.e, contain only 0 and 1) and are 99.9% sparse. We analyze their performance on state-of-the-art evaluation methods for distributional models of word vectors and find they are competitive to standard distributional approaches.", "keyphrases": ["word vector", "hand-crafted linguistic resource", "framenet"]} +{"id": "logan-iv-etal-2022-cutting", "title": "Cutting Down on Prompts and Parameters: Simple Few-Shot Learning with Language Models", "abstract": "Prompting language models (LMs) with training examples and task descriptions has been seen as critical to recent successes in few-shot learning. In this work, we show that finetuning LMs in the few-shot setting can considerably reduce the need for prompt engineering. In fact, one can use null prompts, prompts that contain neither task-specific templates nor training examples, and achieve competitive accuracy to manually-tuned prompts across a wide range of tasks. While finetuning LMs does introduce new parameters for each downstream task, we show that this memory overhead can be substantially reduced: finetuning only the bias terms can achieve comparable or better accuracy than standard finetuning while only updating 0.1% of the parameters. All in all, we recommend finetuning LMs for few-shot learning as it is more accurate, robust to different prompts, and can be made nearly as efficient as using frozen LMs.", "keyphrases": ["prompts", "few-shot learning", "language model", "task description", "low performance"]} +{"id": "yanaka-etal-2021-exploring", "title": "Exploring Transitivity in Neural NLI Models through Veridicality", "abstract": "Despite the recent success of deep neural networks in natural language processing, the extent to which they can demonstrate human-like generalization capacities for natural language understanding remains unclear. We explore this issue in the domain of natural language inference (NLI), focusing on the transitivity of inference relations, a fundamental property for systematically drawing inferences. A model capturing transitivity can compose basic inference patterns and draw new inferences. We introduce an analysis method using synthetic and naturalistic NLI datasets involving clause-embedding verbs to evaluate whether models can perform transitivity inferences composed of veridical inferences and arbitrary inference types. We find that current NLI models do not perform consistently well on transitivity inference tasks, suggesting that they lack the generalization capacity for drawing composite inferences from provided training examples. The data and code for our analysis are publicly available at .", "keyphrases": ["transitivity", "nli", "veridicality"]} +{"id": "corro-2020-span", "title": "Span-based discontinuous constituency parsing: a family of exact chart-based algorithms with time complexities from O(n6) down to O(n3)", "abstract": "We introduce a novel chart-based algorithm for span-based parsing of discontinuous constituency trees of block degree two, including ill-nested structures. In particular, we show that we can build variants of our parser with smaller search spaces and time complexities ranging from O(n6) down to O(n3). The cubic time variant covers 98% of constituents observed in linguistic treebanks while having the same complexity as continuous constituency parsers. We evaluate our approach on German and English treebanks (Negra, Tiger, and DPTB) and report state-of-the-art results in the fully supervised setting. We also experiment with pre-trained word embeddings and Bert-based neural networks.", "keyphrases": ["chart-based algorithm", "time complexity", "state-of-the-art result"]} +{"id": "jindal-etal-2020-killed", "title": "Is Killed More Significant than Fled? A Contextual Model for Salient Event Detection", "abstract": "Identifying the key events in a document is critical to holistically understanding its important information. Although measuring the salience of events is highly contextual, most previous work has used a limited representation of events that omits essential information. In this work, we propose a highly contextual model of event salience that uses a rich representation of events, incorporates document-level information and allows for interactions between latent event encodings. Our experimental results on an event salience dataset demonstrate that our model improves over previous work by an absolute 2-4% on standard metrics, establishing a new state-of-the-art performance for the task. We also propose a new evaluation metric that addresses flaws in previous evaluation methodologies. Finally, we discuss the importance of salient event detection for the downstream task of summarization.", "keyphrases": ["contextual model", "salient event detection", "event salience", "summarization"]} +{"id": "lin-xue-2019-parsing", "title": "Parsing Meaning Representations: Is Easier Always Better?", "abstract": "The parsing accuracy varies a great deal for different meaning representations. In this paper, we compare the parsing performances between Abstract Meaning Representation (AMR) and Minimal Recursion Semantics (MRS), and provide an in-depth analysis of what factors contributed to the discrepancy in their parsing accuracy. By crystalizing the trade-off between representation expressiveness and ease of automatic parsing, we hope our results can help inform the design of the next-generation meaning representations.", "keyphrases": ["meaning representation", "amr", "factor", "discrepancy"]} +{"id": "turk-etal-2019-turkish", "title": "Turkish Treebanking: Unifying and Constructing Efforts", "abstract": "In this paper, we present the current version of two different treebanks, the re-annotation of the Turkish PUD Treebank and the first annotation of the Turkish National Corpus Universal Dependency (henceforth TNC-UD). The annotation of both treebanks, the Turkish PUD Treebank and TNC-UD, was carried out based on the decisions concerning linguistic adequacy of re-annotation of the Turkish IMST-UD Treebank (T\u00fcrk et. al., forthcoming). Both of the treebanks were annotated with the same annotation process and morphological and syntactic analyses. The TNC-UD is planned to have 10,000 sentences. In this paper, we will present the first 500 sentences along with the annotation PUD Treebank. Moreover, this paper also offers the parsing results of a graph-based neural parser on the previous and re-annotated PUD, as well as the TNC-UD. In light of the comparisons, even though we observe a slight decrease in the attachment scores of the Turkish PUD treebank, we demonstrate that the annotation of the TNC-UD improves the parsing accuracy of Turkish. In addition to the treebanks, we have also constructed a custom annotation software with advanced filtering and morphological editing options. Both the treebanks, including a full edit-history and the annotation guidelines, and the custom software are publicly available under an open license online.", "keyphrases": ["treebank", "annotation guideline", "turkish"]} +{"id": "zhou-etal-2019-bert", "title": "BERT-based Lexical Substitution", "abstract": "Previous studies on lexical substitution tend to obtain substitute candidates by finding the target word's synonyms from lexical resources (e.g., WordNet) and then rank the candidates based on its contexts. These approaches have two limitations: (1) They are likely to overlook good substitute candidates that are not the synonyms of the target words in the lexical resources; (2) They fail to take into account the substitution's influence on the global context of the sentence. To address these issues, we propose an end-to-end BERT-based lexical substitution approach which can propose and validate substitute candidates without using any annotated data or manually curated resources. Our approach first applies dropout to the target word's embedding for partially masking the word, allowing BERT to take balanced consideration of the target word's semantics and contexts for proposing substitute candidates, and then validates the candidates based on their substitution's influence on the global contextualized representation of the sentence. Experiments show our approach performs well in both proposing and ranking substitute candidates, achieving the state-of-the-art results in both LS07 and LS14 benchmarks.", "keyphrases": ["influence", "state-of-the-art result", "bert-based lexical substitution"]} +{"id": "gao-etal-2018-april", "title": "APRIL: Interactively Learning to Summarise by Combining Active Preference Learning and Reinforcement Learning", "abstract": "We propose a method to perform automatic document summarisation without using reference summaries. Instead, our method interactively learns from users' preferences. The merit of preference-based interactive summarisation is that preferences are easier for users to provide than reference summaries. Existing preference-based interactive learning methods suffer from high sample complexity, i.e. they need to interact with the oracle for many rounds in order to converge. In this work, we propose a new objective function, which enables us to leverage active learning, preference learning and reinforcement learning techniques in order to reduce the sample complexity. Both simulation and real-user experiments suggest that our method significantly advances the state of the art. Our source code is freely available at .", "keyphrases": ["preference", "reference summary", "active learning", "user feedback"]} +{"id": "chen-etal-2009-global", "title": "Global Models of Document Structure using Latent Permutations", "abstract": "We present a novel Bayesian topic model for learning discourse-level document structure. Our model leverages insights from discourse theory to constrain latent topic assignments in a way that reflects the underlying organization of document topics. We propose a global model in which both topic selection and ordering are biased to be similar across a collection of related documents. We show that this space of orderings can be elegantly represented using a distribution over permutations called the generalized Mallows model. Our structure-aware approach substantially outperforms alternative approaches for cross-document comparison and single-document segmentation.", "keyphrases": ["document structure", "permutation", "latent topic assignment"]} +{"id": "liu-etal-2019-incorporating-contextual", "title": "Incorporating Contextual and Syntactic Structures Improves Semantic Similarity Modeling", "abstract": "Semantic similarity modeling is central to many NLP problems such as natural language inference and question answering. Syntactic structures interact closely with semantics in learning compositional representations and alleviating long-range dependency issues. How-ever, such structure priors have not been well exploited in previous work for semantic mod-eling. To examine their effectiveness, we start with the Pairwise Word Interaction Model, one of the best models according to a recent reproducibility study, then introduce components for modeling context and structure using multi-layer BiLSTMs and TreeLSTMs. In addition, we introduce residual connections to the deep convolutional neural network component of the model. Extensive evaluations on eight benchmark datasets show that incorporating structural information contributes to consistent improvements over strong baselines.", "keyphrases": ["syntactic structure", "semantic similarity modeling", "connection"]} +{"id": "ai-etal-2014-sprinter", "title": "Sprinter: Language Technologies for Interactive and Multimedia Language Learning", "abstract": "Modern language learning courses are no longer exclusively based on books or face-to-face lectures. More and more lessons make use of multimedia and personalized learning methods. Many of these are based on e-learning solutions. Learning via the Internet provides 7/24 services that require sizeable human resources. Therefore we witness a growing economic pressure to employ computer-assisted methods for improving language learning in quality, efficiency and scalability. In this paper, we will address three applications of language technologies for language learning: 1) Methods and strategies for pronunciation training in second language learning, e.g., multimodal feedback via visualization of sound features, speech verification and prosody transplantation; 2) Dialogue-based language learning games; 3) Application of parsing and generation technologies to the automatic generation of paraphrases for the semi-automatic production of learning material.", "keyphrases": ["language technology", "interactive", "sprinter"]} +{"id": "nivre-2006-constraints", "title": "Constraints on Non-Projective Dependency Parsing", "abstract": "We investigate a series of graph-theoretic constraints on non-projective dependency parsing and their effect on expressivity, i.e. whether they allow naturally occurring syntactic constructions to be adequately represented, and efficiency, i.e. whether they reduce the search space for the parser. In particular, we define a new measure for the degree of non-projectivity in an acyclic dependency graph obeying the single-head constraint. The constraints are evaluated experimentally using data from the Prague Dependency Treebank and the Danish Dependency Treebank. The results indicate that, whereas complete linguistic coverage in principle requires unrestricted non-projective dependency graphs, limiting the degree of non-projectivity to at most 2 can reduce average running time from quadratic to linear, while excluding less than 0.5% of the dependency graphs found in the two treebanks. This is a substantial improvement over the commonly used projective approximation (degree 0), which excludes 15\u201325% of the graphs.", "keyphrases": ["non-projective dependency parsing", "projectivity", "constituent"]} +{"id": "baldridge-lascarides-2005-probabilistic", "title": "Probabilistic Head-Driven Parsing for Discourse Structure", "abstract": "We describe a data-driven approach to building interpretable discourse structures for appointment scheduling dialogues. We represent discourse structures as headed trees and model them with probabilistic head-driven parsing techniques. We show that dialogue-based features regarding turn-taking and domain specific goals have a large positive impact on performance. Our best model achieves an f-score of 43.2% for labelled discourse relations and 67.9% for unlabelled ones, significantly beating a right-branching baseline that uses the most frequent relations.", "keyphrases": ["discourse structure", "data-driven approach", "verbmobil corpus", "pcfg"]} +{"id": "tien-nguyen-joty-2017-neural", "title": "A Neural Local Coherence Model", "abstract": "We propose a local coherence model based on a convolutional neural network that operates over the entity grid representation of a text. The model captures long range entity transitions along with entity-specific features without loosing generalization, thanks to the power of distributed representation. We present a pairwise ranking method to train the model in an end-to-end fashion on a task and learn task-specific high level features. Our evaluation on three different coherence assessment tasks demonstrates that our model achieves state of the art results outperforming existing models by a good margin.", "keyphrases": ["local coherence model", "convolutional neural network", "entity-specific feature"]} +{"id": "finley-etal-2018-dictations", "title": "From dictations to clinical reports using machine translation", "abstract": "A typical workflow to document clinical encounters entails dictating a summary, running speech recognition, and post-processing the resulting text into a formatted letter. Post-processing entails a host of transformations including punctuation restoration, truecasing, marking sections and headers, converting dates and numerical expressions, parsing lists, etc. In conventional implementations, most of these tasks are accomplished by individual modules. We introduce a novel holistic approach to post-processing that relies on machine callytranslation. We show how this technique outperforms an alternative conventional system\u2014even learning to correct speech recognition errors during post-processing\u2014while being much simpler to maintain.", "keyphrases": ["dictation", "report", "conversation"]} +{"id": "rozovskaya-roth-2016-grammatical", "title": "Grammatical Error Correction: Machine Translation and Classifiers", "abstract": "We focus on two leading state-of-the-art approaches to grammatical error correction \u2013 machine learning classification and machine translation. Based on the comparative study of the two learning frameworks and through error analysis of the output of the state-of-the-art systems, we identify key strengths and weaknesses of each of these approaches and demonstrate their complementarity. In particular, the machine translation method learns from parallel data without requiring further linguistic input and is better at correcting complex mistakes. The classification approach possesses other desirable characteristics, such as the ability to easily generalize beyond what was seen in training, the ability to train without human-annotated data, and the flexibility to adjust knowledge sources for individual error types. Based on this analysis, we develop an algorithmic approach that combines the strengths of both methods. We present several systems based on resources used in previous work with a relative improvement of over 20% (and 7.4 F score points) over the previous state-of-the-art.", "keyphrases": ["weakness", "grammatical error correction", "machine translation approach"]} +{"id": "pecina-2005-extensive", "title": "An Extensive Empirical Study of Collocation Extraction Methods", "abstract": "This paper presents a status quo of an ongoing research study of collocations -- an essential linguistic phenomenon having a wide spectrum of applications in the field of natural language processing. The core of the work is an empirical evaluation of a comprehensive list of automatic collocation extraction methods using precision-recall measures and a proposal of a new approach integrating multiple basic methods and statistical classification. We demonstrate that combining multiple independent techniques leads to a significant performance improvement in comparison with individual basic methods.", "keyphrases": ["collocation extraction method", "empirical evaluation", "list"]} +{"id": "wang-etal-2020-covost", "title": "CoVoST: A Diverse Multilingual Speech-To-Text Translation Corpus", "abstract": "Spoken language translation has recently witnessed a resurgence in popularity, thanks to the development of end-to-end models and the creation of new corpora, such as Augmented LibriSpeech and MuST-C. Existing datasets involve language pairs with English as a source language, involve very specific domains or are low resource. We introduce CoVoST, a multilingual speech-to-text translation corpus from 11 languages into English, diversified with over 11,000 speakers and over 60 accents. We describe the dataset creation methodology and provide empirical evidence of the quality of the data. We also provide initial benchmarks, including, to our knowledge, the first end-to-end many-to-one multilingual models for spoken language translation. CoVoST is released under CC0 license and free to use. We also provide additional evaluation data derived from Tatoeba under CC licenses.", "keyphrases": ["speech-to-text translation corpus", "end-to-end model", "common voice"]} +{"id": "kim-etal-2019-unsupervised", "title": "Unsupervised Recurrent Neural Network Grammars", "abstract": "Recurrent neural network grammars (RNNG) are generative models of language which jointly model syntax and surface structure by incrementally generating a syntax tree and sentence in a top-down, left-to-right order. Supervised RNNGs achieve strong language modeling and parsing performance, but require an annotated corpus of parse trees. In this work, we experiment with unsupervised learning of RNNGs. Since directly marginalizing over the space of latent trees is intractable, we instead apply amortized variational inference. To maximize the evidence lower bound, we develop an inference network parameterized as a neural CRF constituency parser. On language modeling, unsupervised RNNGs perform as well their supervised counterparts on benchmarks in English and Chinese. On constituency grammar induction, they are competitive with recent neural language models that induce tree structures from words through attention mechanisms.", "keyphrases": ["neural network grammar", "language modeling", "variational inference", "chinese", "tree structure"]} +{"id": "etchegoyhen-etal-2016-exploiting", "title": "Exploiting a Large Strongly Comparable Corpus", "abstract": "This article describes a large comparable corpus for Basque and Spanish and the methods employed to build a parallel resource from the original data. The EITB corpus, a strongly comparable corpus in the news domain, is to be shared with the research community, as an aid for the development and testing of methods in comparable corpora exploitation, and as basis for the improvement of data-driven machine translation systems for this language pair. Competing approaches were explored for the alignment of comparable segments in the corpus, resulting in the design of a simple method which outperformed a state-of-the-art method on the corpus test sets. The method we present is highly portable, computationally efficient, and significantly reduces deployment work, a welcome result for the exploitation of comparable corpora.", "keyphrases": ["comparable corpus", "news domain", "basis"]} +{"id": "mitchell-lapata-2009-language", "title": "Language Models Based on Semantic Composition", "abstract": "In this paper we propose a novel statistical language model to capture long-range semantic dependencies. Specifically, we apply the concept of semantic composition to the problem of constructing predictive history representations for upcoming words. We also examine the influence of the underlying semantic space on the composition task by comparing spatial semantic representations against topic-based ones. The composition models yield reductions in perplexity when combined with a standard n-gram language model over the n-gram model alone. We also obtain perplexity reductions when integrating our models with a structured language model.", "keyphrases": ["semantic composition", "language modeling", "dimension"]} +{"id": "gkatzia-etal-2014-comparing", "title": "Comparing Multi-label Classification with Reinforcement Learning for Summarisation of Time-series Data", "abstract": "We present a novel approach for automatic report generation from time-series data, in the context of student feedback generation. Our proposed methodology treats content selection as a multi-label (ML) classification problem, which takes as input time-series data and outputs a set of templates, while capturing the dependencies between selected templates. We show that this method generates output closer to the feedback that lecturers actually generated, achieving 3.5% higher accuracy and 15% higher F-score than multiple simple classifiers that keep a history of selected templates. Furthermore, we compare a ML classifier with a Reinforcement Learning (RL) approach in simulation and using ratings from real student users. We show that the different methods have different benefits, with ML being more accurate for predicting what was seen in the training data, whereas RL is more exploratory and slightly preferred by the students.", "keyphrases": ["reinforcement learning", "time-series data", "student", "content selection"]} +{"id": "kennington-schlangen-2015-simple", "title": "Simple Learning and Compositional Application of Perceptually Grounded Word Meanings for Incremental Reference Resolution", "abstract": "An elementary way of using language is to refer to objects. Often, these objects are physically present in the shared environment and reference is done via mention of perceivable properties of the objects. This is a type of language use that is modelled well neither by logical semantics nor by distributional semantics, the former focusing on inferential relations between expressed propositions, the latter on similarity relations between words or phrases. We present an account of word and phrase meaning that is perceptually grounded, trainable, compositional, and \u2018dialogueplausible\u2019 in that it computes meanings word-by-word. We show that the approach performs well (with an accuracy of 65% on a 1-out-of-32 reference resolution task) on direct descriptions and target/landmark descriptions, even when trained with less than 800 training examples and automatically transcribed utterances.", "keyphrases": ["incremental reference resolution", "object", "environment", "individual word", "low-level visual feature"]} +{"id": "zhou-etal-2019-early", "title": "Early Rumour Detection", "abstract": "Rumours can spread quickly through social media, and malicious ones can bring about significant economical and social impact. Motivated by this, our paper focuses on the task of rumour detection; particularly, we are interested in understanding how early we can detect them. Although there are numerous studies on rumour detection, few are concerned with the timing of the detection. A successfully-detected malicious rumour can still cause significant damage if it isn't detected in a timely manner, and so timing is crucial. To address this, we present a novel methodology for early rumour detection. Our model treats social media posts (e.g. tweets) as a data stream and integrates reinforcement learning to learn the number minimum number of posts required before we classify an event as a rumour. Experiments on Twitter and Weibo demonstrate that our model identifies rumours earlier than state-of-the-art systems while maintaining a comparable accuracy.", "keyphrases": ["post", "reinforcement learning", "early rumour detection"]} +{"id": "cotterell-etal-2016-morphological-segmentation", "title": "Morphological Segmentation Inside-Out", "abstract": "Morphological segmentation has traditionally been modeled with non-hierarchical models, which yield flat segmentations as output. In many cases, however, proper morphological analysis requires hierarchical structure -- especially in the case of derivational morphology. In this work, we introduce a discriminative, joint model of morphological segmentation along with the orthographic changes that occur during word formation. To the best of our knowledge, this is the first attempt to approach discriminative segmentation with a context-free model. Additionally, we release an annotated treebank of 7454 English words with constituency parses, encouraging future research in this area.", "keyphrases": ["segmentation", "joint model", "change"]} +{"id": "clarke-etal-2010-semantic", "title": "Semantic Composition with Quotient Algebras", "abstract": "We describe an algebraic approach for computing with vector based semantics. The tensor product has been proposed as a method of composition, but has the undesirable property that strings of different length are incomparable. We consider how a quotient algebra of the tensor algebra can allow such comparisons to be made, offering the possibility of data-driven models of semantic composition.", "keyphrases": ["quotient algebra", "length", "semantic composition"]} +{"id": "mitkov-ha-2003-computer", "title": "Computer-Aided Generation of Multiple-Choice Tests", "abstract": "Summary form only given. The paper describes a novel automatic procedure for the generation of multiple-choice tests from electronic documents. In addition to employing various NLP techniques including term extraction and shallow parsing, the system makes use of language resources such as corpora and ontologies. The system operates in a fully automatic mode and also a semiautomatic environment where the user is offered the option to post-edit the generated test items. The results from the conducted evaluation suggest that the new procedure is very effective saving time and labour considerably and that the test items produced with the help of the program are not of inferior quality to those produced manually.", "keyphrases": ["multiple-choice test", "nlp technique", "shallow parsing", "question generation", "activity"]} +{"id": "daiber-etal-2015-splitting", "title": "Splitting Compounds by Semantic Analogy", "abstract": "Compounding is a highly productive word-formation process in some languages that is often problematic for natural language processing applications. In this paper, we investigate whether distributional semantics in the form of word embeddings can enable a deeper, i.e., more knowledge-rich, processing of compounds than the standard string-based methods. We present an unsupervised approach that exploits regularities in the semantic vector space (based on analogies such as \"bookshop is to shop as bookshelf is to shelf\") to produce compound analyses of high quality. A subsequent compound splitting algorithm based on these analyses is highly effective, particularly for ambiguous compounds. German to English machine translation experiments show that this semantic analogy-based compound splitter leads to better translations than a commonly used frequency-based method.", "keyphrases": ["semantic analogy", "shelf", "compound split"]} +{"id": "foster-etal-2011-news", "title": "From News to Comment: Resources and Benchmarks for Parsing the Language of Web 2.0", "abstract": "We investigate the problem of parsing the noisy language of social media. We evaluate four Wall-Street-Journal-trained statistical parsers (Berkeley, Brown, Malt and MST) on a new dataset containing 1,000 phrase structure trees for sentences from microblogs (tweets) and discussion forum posts. We compare the four parsers on their ability to produce Stanford dependencies for these Web 2.0 sentences. We find that the parsers have a particular problem with tweets and that a substantial part of this problem is related to POS tagging accuracy. We attempt three retraining experiments involving Malt, Brown and an in-house Berkeley-style parser and obtain a statistically significant improvement for all three parsers.", "keyphrases": ["web", "twitter", "bi-lexical dependency", "part-of-speech"]} +{"id": "kudo-matsumoto-2003-fast", "title": "Fast Methods for Kernel-Based Text Analysis", "abstract": "Kernel-based learning (e.g., Support Vector Machines) has been successfully applied to many hard problems in Natural Language Processing (NLP). In NLP, although feature combinations are crucial to improving performance, they are heuristically selected. Kernel methods change this situation. The merit of the kernel methods is that effective feature combination is implicitly expanded without loss of generality and increasing the computational costs. Kernel-based text analysis shows an excellent performance in terms in accuracy; however, these methods are usually too slow to apply to large-scale text analysis. In this paper, we extend a Basket Mining algorithm to convert a kernel-based classifier into a simple and fast linear classifier. Experimental results on English BaseNP Chunking, Japanese Word Segmentation and Japanese Dependency Parsing show that our new classifiers are about 30 to 300 times faster than the standard kernel-based classifiers.", "keyphrases": ["kernel-based text analysis", "support vector", "pki"]} +{"id": "cheung-etal-2013-probabilistic", "title": "Probabilistic Frame Induction", "abstract": "In natural-language discourse, related events tend to appear near each other to describe a larger scenario. Such structures can be formalized by the notion of a frame (a.k.a. template), which comprises a set of related events and prototypical participants and event transitions. Identifying frames is a prerequisite for information extraction and natural language generation, and is usually done manually. Methods for inducing frames have been proposed recently, but they typically use ad hoc procedures and are difficult to diagnose or extend. In this paper, we propose the first probabilistic approach to frame induction, which incorporates frames, events, and participants as latent topics and learns those frame and event transitions that best explain the text. The number of frame components is inferred by a novel application of a split-merge method from syntactic parsing. In end-to-end evaluations from text to induced frames and extracted facts, our method produces state-of-the-art results while substantially reducing engineering effort.", "keyphrases": ["frame", "participant", "schema induction"]} +{"id": "shi-etal-2014-probabilistic", "title": "A Probabilistic Co-Bootstrapping Method for Entity Set Expansion", "abstract": "Entity Set Expansion (ESE) aims at automatically acquiring instances of a specific target category. Unfortunately, traditional ESE methods usually have the expansion boundary problem and the semantic drift problem. To resolve the above two problems, this paper proposes a probabilistic Co-Bootstrapping method, which can accurately determine the expansion boundary using both the positive and the discriminant negative instances, and resolve the semantic drift problem by effectively maintaining and refining the expansion boundary during bootstrapping iterations. Experimental results show that our method can achieve a competitive performance.", "keyphrases": ["probabilistic co-bootstrapping method", "entity set expansion", "ese", "target category", "expansion boundary"]} +{"id": "chang-etal-2009-discriminative", "title": "Discriminative Reordering with Chinese Grammatical Relations Features", "abstract": "The prevalence in Chinese of grammatical structures that translate into English in different word orders is an important cause of translation difficulty. While previous work has used phrase-structure parses to deal with such ordering problems, we introduce a richer set of Chinese grammatical relations that describes more semantically abstract relations between words. Using these Chinese grammatical relations, we improve a phrase orientation classifier (introduced by Zens and Ney (2006)) that decides the ordering of two phrases when translated into English by adding path features designed over the Chinese typed dependencies. We then apply the log probability of the phrase orientation classifier as an extra feature in a phrase-based MT system, and get significant BLEU point gains on three test sets: MT02 (+0.59), MT03 (+1.00) and MT05 (+0.77). Our Chinese grammatical relations are also likely to be useful for other NLP tasks.", "keyphrases": ["grammatical relation", "extra feature", "chinese-english task"]} +{"id": "luo-2005-coreference", "title": "On Coreference Resolution Performance Metrics", "abstract": "The paper proposes a Constrained Entity-Alignment F-Measure (CEAF) for evaluating coreference resolution. The metric is computed by aligning reference and system entities (or coreference chains) with the constraint that a system (reference) entity is aligned with at most one reference (system) entity. We show that the best alignment is a maximum bipartite matching problem which can be solved by the Kuhn-Munkres algorithm. Comparative experiments are conducted to show that the widely-known MUC F-measure has serious flaws in evaluating a coreference system. The proposed metric is also compared with the ACE-Value, the official evaluation metric in the Automatic Content Extraction (ACE) task, and we conclude that the proposed metric possesses some properties such as symmetry and better interpretability missing in the ACE-Value.", "keyphrases": ["coreference resolution", "ceaf", "mention", "good alignment"]} +{"id": "boujelbane-etal-2013-mapping", "title": "Mapping Rules for Building a Tunisian Dialect Lexicon and Generating Corpora", "abstract": "Nowadays in tunisia, the arabic Tunisian Dialect (TD) has become progressively used in interviews, news and debate programs instead of Modern Standard Arabic (MSA). Thus, this gave birth to a new kind of language. Indeed, the majority of speech is no longer made in MSA but alternates between MSA and TD. This situation has important negative consequences on Automatic Speech Recognition (ASR): since the spoken dialects are not officially written and do not have a standard orthography, it is very costly to obtain adequate annotated corpora to use for training language models and building vocabulary. There are neither parallel corpora involving Tunisian dialect and MSA nor dictionaries. In this paper, we describe a method for building a bilingual dictionary using explicit knowledge about the relation between TD and MSA. We also present an automatic process for creating Tunisian Dialect", "keyphrases": ["tunisian dialect", "arabic", "dictionary"]} +{"id": "nguyen-etal-2017-reinforcement", "title": "Reinforcement Learning for Bandit Neural Machine Translation with Simulated Human Feedback", "abstract": "Machine translation is a natural candidate problem for reinforcement learning from human feedback: users provide quick, dirty ratings on candidate translations to guide a system to improve. Yet, current neural machine translation training focuses on expensive human-generated reference translations. We describe a reinforcement learning algorithm that improves neural machine translation systems from simulated human feedback. Our algorithm combines the advantage actor-critic algorithm (Mnih et al., 2016) with the attention-based neural encoder-decoder architecture (Luong et al., 2015). This algorithm (a) is well-designed for problems with a large action space and delayed rewards, (b) effectively optimizes traditional corpus-level machine translation metrics, and (c) is robust to skewed, high-variance, granular feedback modeled after actual human behaviors.", "keyphrases": ["machine translation", "simulated human feedback", "reinforcement learning", "bandit feedback"]} +{"id": "ren-etal-2019-explicit", "title": "Explicit Cross-lingual Pre-training for Unsupervised Machine Translation", "abstract": "Pre-training has proven to be effective in unsupervised machine translation due to its ability to model deep context information in cross-lingual scenarios. However, the cross-lingual information obtained from shared BPE spaces is inexplicit and limited. In this paper, we propose a novel cross-lingual pre-training method for unsupervised machine translation by incorporating explicit cross-lingual training signals. Specifically, we first calculate cross-lingual n-gram embeddings and infer an n-gram translation table from them. With those n-gram translation pairs, we propose a new pre-training model called Cross-lingual Masked Language Model (CMLM), which randomly chooses source n-grams in the input text stream and predicts their translation candidates at each time step. Experiments show that our method can incorporate beneficial cross-lingual information into pre-trained models. Taking pre-trained CMLM models as the encoder and decoder, we significantly improve the performance of unsupervised machine translation.", "keyphrases": ["unsupervised machine translation", "cross-lingual information", "pre-trained model"]} +{"id": "zhu-etal-2022-diagnosing", "title": "Diagnosing Vision-and-Language Navigation: What Really Matters", "abstract": "Vision-and-language navigation (VLN) is a multimodal task where an agent follows natural language instructions and navigates in visual environments. Multiple setups have been proposed, and researchers apply new model architectures or training techniques to boost navigation performance. However, there still exist non-negligible gaps between machines' performance and human benchmarks. Moreover, the agents' inner mechanisms for navigation decisions remain unclear. To the best of our knowledge, how the agents perceive the multimodal input is under-studied and needs investigation. In this work, we conduct a series of diagnostic experiments to unveil agents' focus during navigation. Results show that indoor navigation agents refer to both object and direction tokens when making decisions. In contrast, outdoor navigation agents heavily rely on direction tokens and poorly understand the object tokens. Transformer-based agents acquire a better cross-modal understanding of objects and display strong numerical reasoning ability than non-Transformer-based agents. When it comes to vision-and-language alignments, many models claim that they can align object tokens with specific visual targets. We find unbalanced attention on the vision and text input and doubt the reliability of such cross-modal alignments.", "keyphrases": ["vision-and-language navigation", "indoor navigation agent", "direction token"]} +{"id": "mills-etal-2018-automatic", "title": "Automatic Identification of Basic-Level Categories", "abstract": "Basic-level categories have been shown to be both psychologically significant and useful in a wide range of practical applications. We build a rule-based system to identify basic-level categories in WordNet, achieving 77% accuracy on a test set derived from prior psychological experiments. With additional annotations we found our system also has low precision, in part due to the existence of many categories that do not fit into the three classes (superordinate, basic-level, and subordinate) relied on in basic-level category research.", "keyphrases": ["basic-level category", "rule-based system", "wordnet"]} +{"id": "flor-etal-2013-lexical", "title": "Lexical Tightness and Text Complexity", "abstract": "We present a computational notion of Lexical Tightness that measures global cohesion of content words in a text. Lexical tightness represents the degree to which a text tends to use words that are highly inter-associated in the language. We demonstrate the utility of this measure for estimating text complexity as measured by US school grade level designations of texts. Lexical tightness strongly correlates with grade level in a collection of expertly rated reading materials. Lexical tightness captures aspects of prose complexity that are not covered by classic readability indexes, especially for literary texts. We also present initial findings on the utility of this measure for automated estimation of complexity for poetry.", "keyphrases": ["literary text", "lexical tightness", "instruction"]} +{"id": "wambsganss-etal-2020-corpus", "title": "A Corpus for Argumentative Writing Support in German", "abstract": "In this paper, we present a novel annotation approach to capture claims and premises of arguments and their relations in student-written persuasive peer reviews on business models in German language. We propose an annotation scheme based on annotation guidelines that allows to model claims and premises as well as support and attack relations for capturing the structure of argumentative discourse in student-written peer reviews. We conduct an annotation study with three annotators on 50 persuasive essays to evaluate our annotation scheme. The obtained inter-rater agreement of \u03b1 = 0.57 for argument components and \u03b1 = 0.49 for argumentative relations indicates that the proposed annotation scheme successfully guides annotators to moderate agreement. Finally, we present our freely available corpus of 1,000 persuasive student-written peer reviews on business models and our annotation guidelines to encourage future research on the design and development of argumentative writing support systems for students.", "keyphrases": ["argumentative writing support", "german language", "student-written text"]} +{"id": "max-etal-2010-contrastive", "title": "Contrastive Lexical Evaluation of Machine Translation", "abstract": "This paper advocates a complementary measure of translation performance that focuses on the constrastive ability of two or more systems or system versions to adequately translate source words. This is motivated by three main reasons : 1) existing automatic metrics sometimes do not show significant differences that can be revealed by fine-grained focussed human evaluation, 2) these metrics are based on direct comparisons between system hypotheses with the corresponding reference translations, thus ignoring the input words that were actually translated, and 3) as these metrics do not take input hypotheses from several systems at once, fine-grained contrastive evaluation can only be done indirectly. This proposal is illustrated on a multi-source Machine Translation scenario where multiple translations of a source text are available. Significant gains (up to +1.3 BLEU point) are achieved on these experiments, and contrastive lexical evaluation is shown to provide new information that can help to better analyse a system's performance.", "keyphrases": ["more system", "reference translation", "contrastive lexical evaluation"]} +{"id": "sun-lu-2020-understanding", "title": "Understanding Attention for Text Classification", "abstract": "Attention has been proven successful in many natural language processing (NLP) tasks. Recently, many researchers started to investigate the interpretability of attention on NLP tasks. Many existing approaches focused on examining whether the local attention weights could reflect the importance of input representations. In this work, we present a study on understanding the internal mechanism of attention by looking into the gradient update process, checking its behavior when approaching a local minimum during training. We propose to analyze for each word token the following two quantities: its polarity score and its attention score, where the latter is a global assessment on the token's significance. We discuss conditions under which the attention mechanism may become more (or less) interpretable, and show how the interplay between the two quantities can contribute towards model performance.", "keyphrases": ["text classification", "attention weight", "mechanism"]} +{"id": "reschke-anand-2011-extracting", "title": "Extracting Contextual Evaluativity", "abstract": "Recent work on evaluativity or sentiment in the language sciences has focused on the contributions that lexical items provide. In this paper, we discuss contextual evaluativity, stance that is inferred from lexical meaning and pragmatic environments. Focusing on assessor-grounding claims like We liked him because he so clearly disliked Margaret Thatcher, we build a corpus and construct a system employing compositional principles of evaluativity calculation to derive that we dislikes Margaret Thatcher. The resulting system has an F-score of 0.90 on our dataset, outperforming reasonable baselines, and indicating the viability of inferencing in the evaluative domain.", "keyphrases": ["contextual evaluativity", "predicate", "opinion inference"]} +{"id": "goyal-etal-2022-flores", "title": "The Flores-101 Evaluation Benchmark for Low-Resource and Multilingual Machine Translation", "abstract": "One of the biggest challenges hindering progress in low-resource and multilingual machine translation is the lack of good evaluation benchmarks. Current evaluation benchmarks either lack good coverage of low-resource languages, consider only restricted domains, or are low quality because they are constructed using semi-automatic procedures. In this work, we introduce the Flores-101 evaluation benchmark, consisting of 3001 sentences extracted from English Wikipedia and covering a variety of different topics and domains. These sentences have been translated in 101 languages by professional translators through a carefully controlled process. The resulting dataset enables better assessment of model quality on the long tail of low-resource languages, including the evaluation of many-to-many multilingual translation systems, as all translations are fully aligned. By publicly releasing such a high-quality and high-coverage dataset, we hope to foster progress in the machine translation community and beyond.", "keyphrases": ["flores-101 evaluation benchmark", "multilingual machine translation", "english wikipedia", "test set"]} +{"id": "li-etal-2012-mandarin", "title": "A Mandarin-English Code-Switching Corpus", "abstract": "Generally the existing monolingual corpora are not suitable for large vocabulary continuous speech recognition (LVCSR) of code-switching speech. The motivation of this paper is to study the rules and constraints code-switching follows and design a corpus for code-switching LVCSR task. This paper presents the development of a Mandarin-English code-switching corpus. This corpus consists of four parts: 1) conversational meeting speech and its data; 2) project meeting speech data; 3) student interviews speech; 4) text data of on-line news. The speech was transcribed by an annotator and verified by Mandarin-English bilingual speakers manually. We propose an approach for automatically downloading from the web text data that contains code-switching. The corpus includes both intra-sentential code-switching (switch in the middle of a sentence) and inter-sentential code-switching (switch at the end of the sentence). The distribution of part-of-speech (POS) tags and code-switching reasons are reported.", "keyphrases": ["mandarin-english code-switching corpus", "other corpora", "standard"]} +{"id": "agirre-lopez-de-lacalle-2009-supervised", "title": "Supervised Domain Adaption for WSD", "abstract": "The lack of positive results on supervised domain adaptation for WSD have cast some doubts on the utility of hand-tagging general corpora and thus developing generic supervised WSD systems. In this paper we show for the first time that our WSD system trained on a general source corpus (Bnc) and the target corpus, obtains up to 22% error reduction when compared to a system trained on the target corpus alone. In addition, we show that as little as 40% of the target corpus (when supplemented with the source corpus) is sufficient to obtain the same results as training on the full target data. The key for success is the use of unlabeled data with svd, a combination of kernels and svm.", "keyphrases": ["wsd", "target data", "supervised domain adaptation"]} +{"id": "moore-2005-discriminative", "title": "A Discriminative Framework for Bilingual Word Alignment", "abstract": "Bilingual word alignment forms the foundation of most approaches to statistical machine translation. Current word alignment methods are predominantly based on generative models. In this paper, we demonstrate a discriminative approach to training simple word alignment models that are comparable in accuracy to the more complex generative models normally used. These models have the the advantages that they are easy to add features to and they allow fast optimization of model parameters using small amounts of annotated data.", "keyphrases": ["bilingual word alignment", "generative model", "annotated data", "llr", "feature function"]} +{"id": "galley-mckeown-2007-lexicalized", "title": "Lexicalized Markov Grammars for Sentence Compression", "abstract": "We present a sentence compression system based on synchronous context-free grammars (SCFG), following the successful noisy-channel approach of (Knight and Marcu, 2000). We define a headdriven Markovization formulation of SCFG deletion rules, which allows us to lexicalize probabilities of constituent deletions. We also use a robust approach for tree-to-tree alignment between arbitrary document-abstract parallel corpora, which lets us train lexicalized models with much more data than previous approaches relying exclusively on scarcely available document-compression corpora. Finally, we evaluate different Markovized models, and find that our selected best model is one that exploits head-modifier bilexicalization to accurately distinguish adjuncts from complements, and that produces sentences that were judged more grammatical than those generated by previous work.", "keyphrases": ["synchronous context-free grammar", "markovization formulation", "deletion rule"]} +{"id": "roark-etal-2012-opengrm", "title": "The OpenGrm open-source finite-state grammar software libraries", "abstract": "In this paper, we present a new collection of open-source software libraries that provides command line binary utilities and library classes and functions for compiling regular expression and context-sensitive rewrite rules into finite-state transducers, and for n-gram language modeling. The OpenGrm libraries use the OpenFst library to provide an efficient encoding of grammars and general algorithms for building, modifying and applying models.", "keyphrases": ["finite-state transducer", "opengrm library", "encoding"]} +{"id": "gan-etal-2017-learning", "title": "Learning Generic Sentence Representations Using Convolutional Neural Networks", "abstract": "We propose a new encoder-decoder approach to learn distributed sentence representations that are applicable to multiple purposes. The model is learned by using a convolutional neural network as an encoder to map an input sentence into a continuous vector, and using a long short-term memory recurrent neural network as a decoder. Several tasks are considered, including sentence reconstruction and future sentence prediction. Further, a hierarchical encoder-decoder model is proposed to encode a sentence to predict multiple future sentences. By training our models on a large collection of novels, we obtain a highly generic convolutional sentence encoder that performs well in practice. Experimental results on several benchmark datasets, and across a broad range of applications, demonstrate the superiority of the proposed model over competing methods.", "keyphrases": ["sentence representation", "convolutional neural network", "encoder-decoder model"]} +{"id": "yu-etal-2020-wasserstein", "title": "Wasserstein Distance Regularized Sequence Representation for Text Matching in Asymmetrical Domains", "abstract": "One approach to matching texts from asymmetrical domains is projecting the input sequences into a common semantic space as feature vectors upon which the matching function can be readily defined and learned. In real-world matching practices, it is often observed that with the training goes on, the feature vectors projected from different domains tend to be indistinguishable. The phenomenon, however, is often overlooked in existing matching models. As a result, the feature vectors are constructed without any regularization, which inevitably increases the difficulty of learning the downstream matching functions. In this paper, we propose a novel match method tailored for text matching in asymmetrical domains, called WD-Match. In WD-Match, a Wasserstein distance-based regularizer is defined to regularize the features vectors projected from different domains. As a result, the method enforces the feature projection function to generate vectors such that those correspond to different domains cannot be easily discriminated. The training process of WD-Match amounts to a game that minimizes the matching loss regularized by the Wasserstein distance. WD-Match can be used to improve different text matching methods, by using the method as its underlying matching model. Four popular text matching methods have been exploited in the paper. Experimental results based on four publicly available benchmarks showed that WD-Match consistently outperformed the underlying methods and the baselines.", "keyphrases": ["regularizer", "text matching", "asymmetrical domain", "wasserstein distance"]} +{"id": "wang-lee-2018-learning", "title": "Learning to Encode Text as Human-Readable Summaries using Generative Adversarial Networks", "abstract": "Auto-encoders compress input data into a latent-space representation and reconstruct the original data from the representation. This latent representation is not easily interpreted by humans. In this paper, we propose training an auto-encoder that encodes input text into human-readable sentences, and unpaired abstractive summarization is thereby achieved. The auto-encoder is composed of a generator and a reconstructor. The generator encodes the input text into a shorter word sequence, and the reconstructor recovers the generator input from the generator output. To make the generator output human-readable, a discriminator restricts the output of the generator to resemble human-written sentences. By taking the generator output as the summary of the input text, abstractive summarization is achieved without document-summary pairs as training data. Promising results are shown on both English and Chinese corpora.", "keyphrases": ["generative adversarial network", "summarization", "discriminator", "gan"]} +{"id": "ellison-kirby-2006-measuring", "title": "Measuring Language Divergence by Intra-Lexical Comparison", "abstract": "This paper presents a method for building genetic language taxonomies based on a new approach to comparing lexical forms. Instead of comparing forms cross-linguistically, a matrix of language-internal similarities between forms is calculated. These matrices are then compared to give distances between languages. We argue that this coheres better with current thinking in linguistics and psycholinguistics. An implementation of this approach, called PHILOLOGICON, is described, along with its application to Dyen et al.'s (1992) ninety-five wordlists from Indo-European languages.", "keyphrases": ["intra-lexical comparison", "distance", "philologicon"]} +{"id": "michel-neubig-2018-extreme", "title": "Extreme Adaptation for Personalized Neural Machine Translation", "abstract": "Every person speaks or writes their own flavor of their native language, influenced by a number of factors: the content they tend to talk about, their gender, their social status, or their geographical origin. When attempting to perform Machine Translation (MT), these variations have a significant effect on how the system should perform translation, but this is not captured well by standard one-size-fits-all models. In this paper, we propose a simple and parameter-efficient adaptation technique that only requires adapting the bias of the output softmax to each particular user of the MT system, either directly or through a factored approximation. Experiments on TED talks in three languages demonstrate improvements in translation accuracy, and better reflection of speaker traits in the target text.", "keyphrases": ["adaptation", "output softmax", "ted talk", "output vocabulary", "speaker-specific data"]} +{"id": "gorman-etal-2020-sigmorphon", "title": "The SIGMORPHON 2020 Shared Task on Multilingual Grapheme-to-Phoneme Conversion", "abstract": "We describe the design and findings of the SIGMORPHON 2020 shared task on multilingual grapheme-to-phoneme conversion. Participants were asked to submit systems which take in a sequence of graphemes in a given language as input, then output a sequence of phonemes representing the pronunciation of that grapheme sequence. Nine teams submitted a total of 23 systems, at best achieving a 18% relative reduction in word error rate (macro-averaged over languages), versus strong neural sequence-to-sequence baselines. To facilitate error analysis, we publicly release the complete outputs for all systems\u2014a first for the SIGMORPHON workshop.", "keyphrases": ["multilingual grapheme-to-phoneme conversion", "grapheme", "phoneme"]} +{"id": "mielke-etal-2019-kind", "title": "What Kind of Language Is Hard to Language-Model?", "abstract": "How language-agnostic are current state-of-the-art NLP tools? Are there some types of language that are easier to model with current methods? In prior work (Cotterell et al., 2018) we attempted to address this question for language modeling, and observed that recurrent neural network language models do not perform equally well over all the high-resource European languages found in the Europarl corpus. We speculated that inflectional morphology may be the primary culprit for the discrepancy. In this paper, we extend these earlier experiments to cover 69 languages from 13 language families using a multilingual Bible corpus. Methodologically, we introduce a new paired-sample multiplicative mixed-effects model to obtain language difficulty coefficients from at-least-pairwise parallel corpora. In other words, the model is aware of inter-sentence variation and can handle missing data. Exploiting this model, we show that \u201ctranslationese\u201d is not any easier to model than natively written language in a fair comparison. Trying to answer the question of what features difficult languages have in common, we try and fail to reproduce our earlier (Cotterell et al., 2018) observation about morphological complexity and instead reveal far simpler statistics of the data that seem to drive complexity in a much larger sample.", "keyphrases": ["language modeling", "large scale analysis", "morphological typology effect"]} +{"id": "bloom-etal-2007-extracting", "title": "Extracting Appraisal Expressions", "abstract": "Sentiment analysis seeks to characterize opinionated or evaluative aspects of natural language text. We suggest here that appraisal expression extraction should be viewed as a fundamental task in sentiment analysis. An appraisal expression is a textual unit expressing an evaluative stance towards some target. The task is to find and characterize the evaluative attributes of such elements. This paper describes a system for effectively extracting and disambiguating adjectival appraisal expressions in English outputting a generic representation in terms of their evaluative function in the text. Data mining on appraisal expressions gives meaningful and non-obvious insights.", "keyphrases": ["appraisal expression", "opinion", "subjectivity analysis"]} +{"id": "ji-bilmes-2004-multi", "title": "Multi-Speaker Language Modeling", "abstract": "In conventional language modeling, the words from only one speaker at a time are represented, even for conversational tasks such as meetings and telephone calls. In a conversational or meeting setting, however, speakers can have significant influence on each other. To recover such un-modeled inter-speaker information, we introduce an approach for conversational language modeling that considers words from other speakers when predicting words from the current one. By augmenting a normal trigram context, our new multi-speaker language model (MSLM) improves on both Switchboard and ICSI Meeting Recorder corpora. Using an MSLM and a conditional mutual information based word clustering algorithm, we achieve a 8.9% perplexity reduction on Switchboard and a 12.2% reduction on the ICSI Meeting Recorder data.", "keyphrases": ["language modeling", "mslm", "multi-speaker language model"]} +{"id": "xu-etal-2019-lexical", "title": "Lexical Micro-adaptation for Neural Machine Translation", "abstract": "This work is inspired by a typical machine translation industry scenario in which translators make use of in-domain data for facilitating translation of similar or repeating sentences. We introduce a generic framework applied at inference in which a subset of segment pairs are first extracted from training data according to their similarity to the input sentences. These segments are then used to dynamically update the parameters of a generic NMT network, thus performing a lexical micro-adaptation. Our approach demonstrates strong adaptation performance to new and existing datasets including pseudo in-domain data. We evaluate our approach on a heterogeneous English-French training dataset showing accuracy gains on all evaluated domains when compared to strong adaptation baselines.", "keyphrases": ["input sentence", "lexical micro-adaptation", "adaptation time"]} +{"id": "shen-etal-2020-blank", "title": "Blank Language Models", "abstract": "We propose Blank Language Model (BLM), a model that generates sequences by dynamically creating and filling in blanks. The blanks control which part of the sequence to expand, making BLM ideal for a variety of text editing and rewriting tasks. The model can start from a single blank or partially completed text with blanks at specified locations. It iteratively determines which word to place in a blank and whether to insert new blanks, and stops generating when no blanks are left to fill. BLM can be efficiently trained using a lower bound of the marginal data likelihood. On the task of filling missing text snippets, BLM significantly outperforms all other baselines in terms of both accuracy and fluency. Experiments on style transfer and damaged ancient text restoration demonstrate the potential of this framework for a wide range of applications.", "keyphrases": ["blm", "blank language models", "length"]} +{"id": "etchegoyhen-azpeitia-2016-set", "title": "Set-Theoretic Alignment for Comparable Corpora", "abstract": "We describe and evaluate a simple method to extract parallel sentences from comparable corpora. The approach, termed STACC, is based on expanded lexical sets and the Jaccard similarity coefficient. We evaluate our system against state-of-theart methods on a large range of datasets in different domains, for ten language pairs, showing that it either matches or outperforms current methods across the board and gives significantly better results on the noisiest datasets. STACC is a portable method, requiring no particular adaptation for new domains or language pairs, thus enabling the efficient mining of parallel sentences in comparable corpora.", "keyphrases": ["comparable corpora", "ibm alignment", "set expansion operation"]} +{"id": "thater-etal-2011-word", "title": "Word Meaning in Context: A Simple and Effective Vector Model", "abstract": "We present a model that represents word meaning in context by vectors which are modified according to the words in the target\u2019s syntactic context. Contextualization of a vector is realized by reweighting its components, based on distributional information about the context words. Evaluation on a paraphrase ranking task derived from the SemEval 2007 Lexical Substitution Task shows that our model outperforms all previous models on this task. We show that our model supports a wider range of applications by evaluating it on a word sense disambiguation task. Results show that our model achieves state-of-the-art performance.", "keyphrases": ["distributional information", "target word", "vector representation", "pado\u0301", "group"]} +{"id": "ayan-dorr-2006-going", "title": "Going Beyond AER: An Extensive Analysis of Word Alignments and Their Impact on MT", "abstract": "This paper presents an extensive evaluation of five different alignments and investigates their impact on the corresponding MT system output. We introduce new measures for intrinsic evaluations and examine the distribution of phrases and untranslated words during decoding to identify which characteristics of different alignments affect translation. We show that precision-oriented alignments yield better MT output (translating more words and using longer phrases) than recall-oriented alignments.", "keyphrases": ["aer", "recall-oriented alignment", "translation performance", "alignment error rate", "direct correlation"]} +{"id": "rodriguez-luna-etal-2020-internal", "title": "Internal and external pressures on language emergence: least effort, object constancy and frequency", "abstract": "In previous work, artificial agents were shown to achieve almost perfect accuracy in referential games where they have to communicate to identify images. Nevertheless, the resulting communication protocols rarely display salient features of natural languages, such as compositionality. In this paper, we propose some realistic sources of pressure on communication that avert this outcome. More specifically, we formalise the principle of least effort through an auxiliary objective. Moreover, we explore several game variants, inspired by the principle of object constancy, in which we alter the frequency, position, and luminosity of the objects in the images. We perform an extensive analysis on their effect through compositionality metrics, diagnostic classifiers, and zero-shot evaluation. Our findings reveal that the proposed sources of pressure result in emerging languages with less redundancy, more focus on high-level conceptual information, and better abilities of generalisation. Overall, our contributions reduce the gap between emergent and natural languages.", "keyphrases": ["least effort", "object constancy", "compositionality"]} +{"id": "le-etal-2020-flaubert-unsupervised", "title": "FlauBERT: Unsupervised Language Model Pre-training for French", "abstract": "Language models have become a key step to achieve state-of-the art results in many different Natural Language Processing (NLP) tasks. Leveraging the huge amount of unlabeled texts nowadays available, they provide an efficient way to pre-train continuous word representations that can be fine-tuned for a downstream task, along with their contextualization at the sentence level. This has been widely demonstrated for English using contextualized representations (Dai and Le, 2015; Peters et al., 2018; Howard and Ruder, 2018; Radford et al., 2018; Devlin et al., 2019; Yang et al., 2019b). In this paper, we introduce and share FlauBERT, a model learned on a very large and heterogeneous French corpus. Models of different sizes are trained using the new CNRS (French National Centre for Scientific Research) Jean Zay supercomputer. We apply our French language models to diverse NLP tasks (text classification, paraphrasing, natural language inference, parsing, word sense disambiguation) and show that most of the time they outperform other pre-training approaches. Different versions of FlauBERT as well as a unified evaluation protocol for the downstream tasks, called FLUE (French Language Understanding Evaluation), are shared to the research community for further reproducible experiments in French NLP.", "keyphrases": ["french language", "text classification", "flaubert", "pos tagging"]} +{"id": "shavarani-sarkar-2021-better", "title": "Better Neural Machine Translation by Extracting Linguistic Information from BERT", "abstract": "Adding linguistic information (syntax or semantics) to neural machine translation (NMT) have mostly focused on using point estimates from pre-trained models. Directly using the capacity of massive pre-trained contextual word embedding models such as BERT(Devlin et al., 2019) has been marginally useful in NMT because effective fine-tuning is difficult to obtain for NMT without making training brittle and unreliable. We augment NMT by extracting dense fine-tuned vector-based linguistic information from BERT instead of using point estimates. Experimental results show that our method of incorporating linguistic information helps NMT to generalize better in a variety of training contexts and is no more difficult to train than conventional Transformer-based NMT.", "keyphrases": ["neural machine translation", "linguistic information", "bert", "pre-trained model"]} +{"id": "hassan-radev-2010-identifying", "title": "Identifying Text Polarity Using Random Walks", "abstract": "Automatically identifying the polarity of words is a very important task in Natural Language Processing. It has applications in text classification, text filtering, analysis of product review, analysis of responses to surveys, and mining online discussions. We propose a method for identifying the polarity of words. We apply a Markov random walk model to a large word related-ness graph, producing a polarity estimate for any given word. A key advantage of the model is its ability to accurately and quickly assign a polarity sign and magnitude to any word. The method could be used both in a semi-supervised setting where a training set of labeled words is used, and in an unsupervised setting where a handful of seeds is used to define the two polarity classes. The method is experimentally tested using a manually labeled set of positive and negative words. It outperforms the state of the art methods in the semi-supervised setting. The results in the unsupervised setting is comparable to the best reported values. However, the proposed method is faster and does not need a large corpus.", "keyphrases": ["polarity", "random walk model", "word relatedness graph", "previous research"]} +{"id": "shapira-etal-2019-crowdsourcing", "title": "Crowdsourcing Lightweight Pyramids for Manual Summary Evaluation", "abstract": "Conducting a manual evaluation is considered an essential part of summary evaluation methodology. Traditionally, the Pyramid protocol, which exhaustively compares system summaries to references, has been perceived as very reliable, providing objective scores. Yet, due to the high cost of the Pyramid method and the required expertise, researchers resorted to cheaper and less thorough manual evaluation methods, such as Responsiveness and pairwise comparison, attainable via crowdsourcing. We revisit the Pyramid approach, proposing a lightweight sampling-based version that is crowdsourcable. We analyze the performance of our method in comparison to original expert-based Pyramid evaluations, showing higher correlation relative to the common Responsiveness method. We release our crowdsourced Summary-Content-Units, along with all crowdsourcing scripts, for future evaluations.", "keyphrases": ["manual evaluation", "pyramid method", "crowdsourcing"]} +{"id": "li-etal-2014-soft", "title": "Soft Cross-lingual Syntax Projection for Dependency Parsing", "abstract": "This paper proposes a simple yet effective framework of soft cross-lingual syntax projection to transfer syntactic structures from source language to target language using monolingual treebanks and large-scale bilingual parallel text. Here, soft means that we only project reliable dependencies to compose high-quality target structures. The projected instances are then used as additional training data to improve the performance of supervised parsers. The major issues for this idea are 1) errors from the source-language parser and unsupervised word aligner; 2) intrinsic syntactic non-isomorphism between languages; 3) incomplete parse trees after projection. To handle the first two issues, we propose to use a probabilistic dependency parser trained on the target-language treebank, and prune out unlikely projected dependencies that have low marginal probabilities. To make use of the incomplete projected syntactic structures, we adopt a new learning technique based on ambiguous labelings. For a word that has no head words after projection, we enrich the projected structure with all other words as its candidate heads as long as the newly-added dependency does not cross any projected dependencies. In this way, the syntactic structure of a sentence becomes a parse forest (ambiguous labels) instead of a single parse tree. During training, the objective is to maximize the mixed likelihood of manually labeled instances and projected instances with ambiguous labelings. Experimental results on benchmark data show that our method significantly outperforms a strong baseline supervised parser and previous syntax projection methods.", "keyphrases": ["cross-lingual syntax projection", "dependency parser", "ambiguous labeling"]} +{"id": "ye-etal-2021-one2set", "title": "One2Set: Generating Diverse Keyphrases as a Set", "abstract": "Recently, the sequence-to-sequence models have made remarkable progress on the task of keyphrase generation (KG) by concatenating multiple keyphrases in a predefined order as a target sequence during training. However, the keyphrases are inherently an unordered set rather than an ordered sequence. Imposing a predefined order will introduce wrong bias during training, which can highly penalize shifts in the order between keyphrases. In this work, we propose a new training paradigm One2Set without predefining an order to concatenate the keyphrases. To fit this paradigm, we propose a novel model that utilizes a fixed set of learned control codes as conditions to generate a set of keyphrases in parallel. To solve the problem that there is no correspondence between each prediction and target during training, we propose a K-step label assignment mechanism via bipartite matching, which greatly increases the diversity and reduces the repetition rate of generated keyphrases. The experimental results on multiple benchmarks demonstrate that our approach significantly outperforms the state-of-the-art methods.", "keyphrases": ["keyphrase", "ordered sequence", "training paradigm one2set"]} +{"id": "sachan-etal-2015-learning", "title": "Learning Answer-Entailing Structures for Machine Comprehension", "abstract": "Understanding open-domain text is one of the primary challenges in NLP. Machine comprehension evaluates the system\u2019s ability to understand text through a series of question-answering tasks on short pieces of text such that the correct answer can be found only in the given text. For this task, we posit that there is a hidden (latent) structure that explains the relation between the question, correct answer, and text. We call this the answer-entailing structure; given the structure, the correctness of the answer is evident. Since the structure is latent, it must be inferred. We present a unified max-margin framework that learns to find these hidden structures (given a corpus of question-answer pairs), and uses what it learns to answer machine comprehension questions on novel texts. We extend this framework to incorporate multi-task learning on the different subtasks that are required to perform machine comprehension. Evaluation on a publicly available dataset shows that our framework outperforms various IR and neuralnetwork baselines, achieving an overall accuracy of 67.8% (vs. 59.9%, the best previously-published result.)", "keyphrases": ["answer-entailing structure", "machine comprehension", "correct answer"]} +{"id": "leusch-etal-2010-multi", "title": "Multi-pivot translation by system combination", "abstract": "This paper describes a technique to exploit multiple pivot languages when using machine translation (MT) on language pairs with scarce bilingual resources, or where no translation system for a language pair is available. The principal idea is to generate intermediate translations in several pivot languages, translate them separately into the target language, and generate a consensus translation out of these using MT system combination techniques. Our technique can also be applied when a translation system for a language pair is available, but is limited in its translation accuracy because of scarce resources. Using statistical MT systems for the 11 different languages of Europarl, we show experimentally that a direct translation system can be replaced by this pivot approach without a loss in translation quality if about six pivot languages are available. Furthermore, we can already improve an existing MT system by adding two pivot systems to it. The maximum improvement was found to be 1.4% abs. in BLEU in our experiments for 8 or more pivot languages.", "keyphrases": ["intermediate translation", "different language", "individual system"]} +{"id": "schwenk-etal-2021-ccmatrix", "title": "CCMatrix: Mining Billions of High-Quality Parallel Sentences on the Web", "abstract": "We show that margin-based bitext mining in a multilingual sentence space can be successfully scaled to operate on monolingual corpora of billions of sentences. We use 32 snapshots of a curated common crawl corpus (Wenzel et al, 2019) totaling 71 billion unique sentences. Using one unified approach for 90 languages, we were able to mine 10.8 billion parallel sentences, out of which only 2.9 billions are aligned with English. We illustrate the capability of our scalable mining system to create high quality training sets from one language to any other by training hundreds of different machine translation models and evaluating them on the many-to-many TED benchmark. Further, we evaluate on competitive translation benchmarks such as WMT and WAT. Using only mined bitext, we set a new state of the art for a single system on the WMT'19 test set for English-German/Russian/Chinese. In particular, our English/German and English/Russian systems outperform the best single ones by over 4 BLEU points and are on par with best WMT'19 systems, which train on the WMT training data and augment it with backtranslation. We also achieve excellent results for distant languages pairs like Russian/Japanese, outperforming the best submission at the 2020 WAT workshop. All of the mined bitext will be freely available.", "keyphrases": ["web", "hundred", "sentence embedding", "train model"]} +{"id": "fort-etal-2012-modeling", "title": "Modeling the Complexity of Manual Annotation Tasks: a Grid of Analysis", "abstract": "Manual corpus annotation is getting widely used in Natural Language Processing (NLP). While being recognized as a difficult task, no in-depth analysis of its complexity has been performed yet. We provide in this article a grid of analysis of the different complexity dimensions of an annotation task, which helps estimating beforehand the difficulties and cost of annotation campaigns. We observe the applicability of this grid on existing annotation campaigns and detail its application on a real-world example.", "keyphrases": ["complexity", "annotation task", "grid", "unit"]} +{"id": "nn-2012-ttc", "title": "TTC - Terminology Extraction, Translation Tools and Comparable Corpora", "abstract": "TTC focuses on semi-/automatic acquisition of aligned bilingual terminologies from comparable corpora, i.e. from texts of the same domain (and possibly genre) in different languages. TTC develops techniques for the collection of comparable corpora, extraction of monolingual term candidates and their contexts for English, German, French, Spanish, Latvian, Russian and Chinese from the collected corpora. Furthermore, TTC defines and combines different symbolic and statistical strategies for the alignment of the extracted monolingual term candidates. TTC develops the software for project languages and tests it on several language pairs. The tools are provided as a standalone package and web service and include components for corpora crawling and management, monolingual term candidate extraction and alignment. An integration with EuroTermBank and selected computer-assisted translation tools and machine translation systems will be provided. TTC will evaluate the quality of machine translation which can be achieved by enhancing machine translation systems with automatically extracted terminology. The main target groups of the TTC tools are professionals from the translation, localization and/or documentation industry. TTC is at the beginning of its second year now and so far the project has made significant progress and achieved its first goals: x Requirements and definitions of the TTC tools were specified. x The first TTC workshop with end users was successfully held. x A crawler was developed and initial comparable corpora for project languages were collected and analyzed. x Term identification issues were researched for project languages and first experiments on term extraction were performed for English, German and French.", "keyphrases": ["comparable corpora", "ttc", "such simplified method", "englishlatvian"]} +{"id": "zhou-etal-2008-semi", "title": "Semi-Supervised Learning for Relation Extraction", "abstract": "This paper proposes a semi-supervised learning method for relation extraction. Given a small amount of labeled data and a large amount of unlabeled data, it first bootstraps a moderate number of weighted support vectors via SVM through a co-training procedure with random feature projection and then applies a label propagation (LP) algorithm via the bootstrapped support vectors. Evaluation on the ACE RDC 2003 corpus shows that our method outperforms the normal LP algorithm via all the available labeled data without SVM bootstrapping. Moreover, our method can largely reduce the computational burden. This suggests that our proposed method can integrate the advantages of both SVM bootstrapping and label propagation.", "keyphrases": ["relation extraction", "unlabeled data", "moderate number", "support vector", "svm"]} +{"id": "wu-wang-2005-boosting", "title": "Boosting Statistical Word Alignment", "abstract": "This paper proposes an approach to improve statistical word alignment with the boosting method. Applying boosting to word alignment must solve two problems. The first is how to build the reference set for the training data. We propose an approach to automatically build a pseudo reference set, which can avoid manual annotation of the training set. The second is how to calculate the error rate of each individual word aligner. We solve this by calculating the error rate of a manually annotated held-out data set instead of the entire training set. In addition, the final ensemble takes into account the weights of the alignment links produced by the individual word aligners. Experimental results indicate that the boosting method proposed in this paper performs much better than the original word aligner, achieving a large error rate reduction.", "keyphrases": ["statistical word alignment", "weight", "error rate reduction"]} +{"id": "li-li-2014-query", "title": "Query-focused Multi-Document Summarization: Combining a Topic Model with Graph-based Semi-supervised Learning", "abstract": "Graph-based learning algorithms have been shown to be an effective approach for query-focused multi-document summarization (MDS). In this paper, we extend the standard graph ranking algorithm by proposing a two-layer (i.e. sentence layer and topic layer) graph-based semi-supervised learning approach based on topic modeling techniques. Experimental results on TAC datasets show that by considering topic information, we can effectively improve the summary performance.", "keyphrases": ["semi-supervised learning approach", "topic modeling technique", "query-focused multi-document summarization"]} +{"id": "labutov-etal-2015-deep", "title": "Deep Questions without Deep Understanding", "abstract": "We develop an approach for generating deep (i.e, high-level) comprehension questions from novel text that bypasses the myriad challenges of creating a full semantic representation. We do this by decomposing the task into an ontologycrowd-relevance workflow, consisting of first representing the original text in a low-dimensional ontology, then crowdsourcing candidate question templates aligned with that space, and finally ranking potentially relevant templates for a novel region of text. If ontological labels are not available, we infer them from the text. We demonstrate the effectiveness of this method on a corpus of articles from Wikipedia alongside human judgments, and find that we can generate relevant deep questions with a precision of over 85% while maintaining a recall of 70%.", "keyphrases": ["novel text", "ontology", "question generation"]} +{"id": "pal-etal-2016-catalog-online", "title": "CATaLog Online: Porting a Post-editing Tool to the Web", "abstract": "This paper presents CATaLog online, a new web-based MT and TM post-editing tool. CATaLog online is a freeware software that can be used through a web browser and it requires only a simple registration. The tool features a number of editing and log functions similar to the desktop version of CATaLog enhanced with several new features that we describe in detail in this paper. CATaLog online is designed to allow users to post-edit both translation memory segments as well as machine translation output. The tool provides a complete set of log information currently not available in most commercial CAT tools. Log information can be used both for project management purposes as well as for the study of the translation process and translator's productivity.", "keyphrases": ["post-editing tool", "translator", "catalog online"]} +{"id": "ng-2007-semantic", "title": "Semantic Class Induction and Coreference Resolution", "abstract": "This paper examines whether a learningbased coreference resolver can be improved using semantic class knowledge that is automatically acquired from a version of the Penn Treebank in which the noun phrases are labeled with their semantic classes. Experiments on the ACE test data show that a resolver that employs such induced semantic class knowledge yields a statistically significant improvement of 2% in F-measure over one that exploits heuristically computed semantic class knowledge. In addition, the induced knowledge improves the accuracy of common noun resolution by 2-6%.", "keyphrases": ["coreference resolution", "semantic class knowledge", "wordnet"]} +{"id": "zhu-etal-2013-fast", "title": "Fast and Accurate Shift-Reduce Constituent Parsing", "abstract": "Shift-reduce dependency parsers give comparable accuracies to their chartbased counterparts, yet the best shiftreduce constituent parsers still lag behind the state-of-the-art. One important reason is the existence of unary nodes in phrase structure trees, which leads to different numbers of shift-reduce actions between different outputs for the same input. This turns out to have a large empirical impact on the framework of global training and beam search. We propose a simple yet effective extension to the shift-reduce process, which eliminates size differences between action sequences in beam-search. Our parser gives comparable accuracies to the state-of-the-art chart parsers. With linear run-time complexity, our parser is over an order of magnitude faster than the fastest chart parser.", "keyphrases": ["constituent", "beam search", "shift-reduce parser"]} +{"id": "choi-cardie-2010-hierarchical", "title": "Hierarchical Sequential Learning for Extracting Opinions and Their Attributes", "abstract": "Automatic opinion recognition involves a number of related tasks, such as identifying the boundaries of opinion expression, determining their polarity, and determining their intensity. Although much progress has been made in this area, existing research typically treats each of the above tasks in isolation. In this paper, we apply a hierarchical parameter sharing technique using Conditional Random Fields for fine-grained opinion analysis, jointly detecting the boundaries of opinion expressions as well as determining two of their key attributes --- polarity and intensity. Our experimental results show that our proposed approach improves the performance over a baseline that does not exploit hierarchical structure among the classes. In addition, we find that the joint approach outperforms a baseline that is based on cascading two separate components.", "keyphrases": ["opinion expression", "isolation", "conditional random fields", "sequence labeler"]} +{"id": "ling-rush-2017-coarse", "title": "Coarse-to-Fine Attention Models for Document Summarization", "abstract": "Sequence-to-sequence models with attention have been successful for a variety of NLP problems, but their speed does not scale well for tasks with long source sequences such as document summarization. We propose a novel coarse-to-fine attention model that hierarchically reads a document, using coarse attention to select top-level chunks of text and fine attention to read the words of the chosen chunks. While the computation for training standard attention models scales linearly with source sequence length, our method scales with the number of top-level chunks and can handle much longer sequences. Empirically, we find that while coarse-to-fine attention models lag behind state-of-the-art baselines, our method achieves the desired behavior of sparsely attending to subsets of the document for generation.", "keyphrases": ["document summarization", "chunk", "coarse-to-fine attention model"]} +{"id": "ailomaa-etal-2006-archivus", "title": "Archivus: A Multimodal System for Multimedia Meeting Browsing and Retrieval", "abstract": "This paper presents Archivus, a multi-modal language-enabled meeting browsing and retrieval system. The prototype is in an early stage of development, and we are currently exploring the role of natural language for interacting in this relatively unfamiliar and complex domain. We briefly describe the design and implementation status of the system, and then focus on how this system is used to elicit useful data for supporting hypotheses about multimodal interaction in the domain of meeting retrieval and for developing NLP modules for this specific domain.", "keyphrases": ["meeting", "retrieval", "archivus"]} +{"id": "colin-etal-2016-webnlg", "title": "The WebNLG Challenge: Generating Text from DBPedia Data", "abstract": "With the emergence of the linked data initiative and the rapid development of RDF (Resource Description Format) datasets, several approaches have recently been proposed for generating text from RDF data (Sun and Mellish, 2006; Duma and Klein, 2013; Bontcheva and Wilks, 2004; Cimiano et al., 2013; Lebret et al., 2016). To support the evaluation and comparison of such systems, we propose a shared task on generating text from DBPedia data. The training data will consist of Data/Text pairs where the data is a set of triples extracted from DBPedia and the text is a verbalisation of these triples. In essence, the task consists in mapping data to text. Specific subtasks include sentence segmentation (how to chunk the input data into sentences), lexicalisation (of the DBPedia properties), aggregation (how to avoid repetitions) and surface realisation (how to build a syntactically correct and natural sounding text).", "keyphrases": ["webnlg challenge", "dbpedia data", "rdf", "data-to-text generation", "meaning representation"]} +{"id": "kedia-chinthakindi-2021-keep", "title": "Keep Learning: Self-supervised Meta-learning for Learning from Inference", "abstract": "A common approach in many machine learning algorithms involves self-supervised learning on large unlabeled data before fine-tuning on downstream tasks to further improve performance. A new approach for language modelling, called dynamic evaluation, further fine-tunes a trained model during inference using trivially-present ground-truth labels, giving a large improvement in performance. However, this approach does not easily extend to classification tasks, where ground-truth labels are absent during inference. We propose to solve this issue by utilizing self-training and back-propagating the loss from the model's own class-balanced predictions (pseudo-labels), adapting the Reptile algorithm from meta-learning, combined with an inductive bias towards pre-trained weights to improve generalization. Our method improves the performance of standard backbones such as BERT, Electra, and ResNet-50 on a wide variety of tasks, such as question answering on SQuAD and NewsQA, benchmark task SuperGLUE, conversation response selection on Ubuntu Dialog corpus v2.0, as well as image classification on MNIST and ImageNet without any changes to the underlying models. Our proposed method outperforms previous approaches, enables self-supervised fine-tuning during inference of any classifier model to better adapt to target domains, can be easily adapted to any model, and is also effective in online and transfer-learning settings.", "keyphrases": ["pseudo-label", "generalization", "image classification"]} +{"id": "del-tredici-etal-2019-short", "title": "Short-Term Meaning Shift: A Distributional Exploration", "abstract": "We present the first exploration of meaning shift over short periods of time in online communities using distributional representations. We create a small annotated dataset and use it to assess the performance of a standard model for meaning shift detection on short-term meaning shift. We find that the model has problems distinguishing meaning shift from referential phenomena, and propose a measure of contextual variability to remedy this.", "keyphrases": ["period", "short-term meaning shift", "del"]} +{"id": "takase-etal-2016-neural", "title": "Neural Headline Generation on Abstract Meaning Representation", "abstract": "Neural network-based encoder-decoder models are among recent attractive methodologies for tackling natural language generation tasks. This paper investigates the usefulness of structural syntactic and semantic information additionally incorporated in a baseline neural attention-based model. We encode results obtained from an abstract meaning representation (AMR) parser using a modified version of Tree-LSTM. Our proposed attention-based AMR encoder-decoder model improves headline generation benchmarks compared with the baseline neural attention-based model.", "keyphrases": ["abstract meaning representation", "encoder-decoder model", "neural attention-based model", "text generation system", "summarization"]} +{"id": "neviarouskaya-aono-2013-extracting", "title": "Extracting Causes of Emotions from Text", "abstract": "This paper focuses on the novel task of automatic extraction of phrases related to causes of emotions. The analysis of emotional causes in sentences, where emotions are explicitly indicated through emotion keywords can provide the foundation for research on challenging task of recognition of implicit affect from text. We developed a corpus of emotion causes specific for 22 emotions. Based on the analysis of this corpus we introduce a method for the detection of the linguistic relations between an emotion and its cause and the extraction of the phrases describing the emotion causes. The method employs syntactic and dependency parser and rules for the analysis of eight types of the emotion-cause linguistic relations. The results of evaluation showed that our method performed with high level of accuracy (82%).", "keyphrases": ["cause", "emotion", "linguistic relation"]} +{"id": "butnariu-etal-2009-semeval", "title": "SemEval-2010 Task 9: The Interpretation of Noun Compounds Using Paraphrasing Verbs and Prepositions", "abstract": "Previous research has shown that the meaning of many noun-noun compounds N1 N2 can be approximated reasonably well by paraphrasing clauses of the form 'N2 that ... N1', where '...' stands for a verb with or without a preposition. For example, malaria mosquito is a 'mosquito that carries malaria'. Evaluating the quality of such paraphrases is the theme of Task 9 at SemEval-2010. This paper describes some background, the task definition, the process of data collection and the task results. We also venture a few general conclusions before the participating teams present their systems at the SemEval-2010 workshop. There were 5 teams who submitted 7 systems.", "keyphrases": ["paraphrasing", "preposition", "semeval task"]} +{"id": "barbu-2015-spotting", "title": "Spotting false translation segments in translation memories", "abstract": "The problem of spotting false translations in the bi-segments of translation memories can be thought of as a classification task. We test the accuracy of various machine learning algorithms to find segments that are not true translations. We show that the Church-Gale scores in two large bisegment sets extracted from MyMemory can be used for finding positive and negative training examples for the machine learning algorithms. The performance of the winning classification algorithms, though high, is not yet sufficient for automatic cleaning of translations memories.", "keyphrases": ["false translation", "translation memory", "bi-segment"]} +{"id": "sachan-etal-2021-syntax", "title": "Do Syntax Trees Help Pre-trained Transformers Extract Information?", "abstract": "Much recent work suggests that incorporating syntax information from dependency trees can improve task-specific transformer models. However, the effect of incorporating dependency tree information into pre-trained transformer models (e.g., BERT) remains unclear, especially given recent studies highlighting how these models implicitly encode syntax. In this work, we systematically study the utility of incorporating dependency trees into pre-trained transformers on three representative information extraction tasks: semantic role labeling (SRL), named entity recognition, and relation extraction. We propose and investigate two distinct strategies for incorporating dependency structure: a late fusion approach, which applies a graph neural network on the output of a transformer, and a joint fusion approach, which infuses syntax structure into the transformer attention layers. These strategies are representative of prior work, but we introduce additional model design elements that are necessary for obtaining improved performance. Our empirical analysis demonstrates that these syntax-infused transformers obtain state-of-the-art results on SRL and relation extraction tasks. However, our analysis also reveals a critical shortcoming of these models: we find that their performance gains are highly contingent on the availability of human-annotated dependency parses, which raises important questions regarding the viability of syntax-augmented transformers in real-world applications.", "keyphrases": ["pre-trained transformer model", "distinct strategy", "dependency structure", "state-of-the-art result"]} +{"id": "mcnamee-etal-2011-cross", "title": "Cross-Language Entity Linking", "abstract": "There has been substantial recent interest in aligning mentions of named entities in unstructured texts to knowledge base descriptors, a task commonly called entity linking. This technology is crucial for applications in knowledge discovery and text data mining. This paper presents experiments in the new problem of crosslanguage entity linking, where documents and named entities are in a different language than that used for the content of the reference knowledge base. We have created a new test collection to evaluate cross-language entity linking performance in twenty-one languages. We present experiments that examine issues such as: the importance of transliteration; the utility of cross-language information retrieval; and, the potential benefit of multilingual named entity recognition. Our best model achieves performance which is 94% of a strong monolingual baseline.", "keyphrases": ["multiple language", "cross-lingual entity", "xel"]} +{"id": "allauzen-etal-2011-limsi", "title": "LIMSI @ WMT11", "abstract": "This paper describes LIMSI's submissions to the Sixth Workshop on Statistical Machine Translation. We report results for the French-English and German-English shared translation tasks in both directions. Our systems use n-code, an open source Statistical Machine Translation system based on bilingual n-grams. For the French-English task, we focussed on finding efficient ways to take advantage of the large and heterogeneous training parallel data. In particular, using a simple filtering strategy helped to improve both processing time and translation quality. To translate from English to French and German, we also investigated the use of the SOUL language model in Machine Translation and showed significant improvements with a 10-gram SOUL model. We also briefly report experiments with several alternatives to the standard n-best MERT procedure, leading to a significant speed-up.", "keyphrases": ["limsi", "impressive accuracy improvement", "translation model"]} +{"id": "ling-etal-2015-two", "title": "Two/Too Simple Adaptations of Word2Vec for Syntax Problems", "abstract": "We present two simple modifications to the models in the popular Word2Vec tool, in order to generate embeddings more suited to tasks involving syntax. The main issue with the original models is the fact that they are insensitive to word order. While order independence is useful for inducing semantic representations, this leads to suboptimal results when they are used to solve syntax-based problems. We show improvements in part-ofspeech tagging and dependency parsing using our proposed models.", "keyphrases": ["word2vec", "syntax-based problem", "skip-gram"]} +{"id": "francis-landau-etal-2016-capturing", "title": "Capturing Semantic Similarity for Entity Linking with Convolutional Neural Networks", "abstract": "A key challenge in entity linking is making effective use of contextual information to disambiguate mentions that might refer to different entities in different contexts. We present a model that uses convolutional neural networks to capture semantic correspondence between a mention's context and a proposed target entity. These convolutional networks operate at multiple granularities to exploit various kinds of topic information, and their rich parameterization gives them the capacity to learn which n-grams characterize different topics. We combine these networks with a sparse linear model to achieve state-of-the-art performance on multiple entity linking datasets, outperforming the prior systems of Durrett and Klein (2014) and Nguyen et al. (2014).", "keyphrases": ["semantic similarity", "entity linking", "convolutional neural network", "wikipedia"]} +{"id": "toral-etal-2018-attaining", "title": "Attaining the Unattainable? Reassessing Claims of Human Parity in Neural Machine Translation", "abstract": "We reassess a recent study (Hassan et al., 2018) that claimed that machine translation (MT) has reached human parity for the translation of news from Chinese into English, using pairwise ranking and considering three variables that were not taken into account in that previous study: the language in which the source side of the test set was originally written, the translation proficiency of the evaluators, and the provision of inter-sentential context. If we consider only original source text (i.e. not translated from another language, or translationese), then we find evidence showing that human parity has not been achieved. We compare the judgments of professional translators against those of non-experts and discover that those of the experts result in higher inter-annotator agreement and better discrimination between human and machine translations. In addition, we analyse the human translations of the test set and identify important translation issues. Finally, based on these findings, we provide a set of recommendations for future human evaluations of MT.", "keyphrases": ["parity", "inter-annotator agreement", "human translation", "several recent study"]} +{"id": "denero-uszkoreit-2011-inducing", "title": "Inducing Sentence Structure from Parallel Corpora for Reordering", "abstract": "When translating among languages that differ substantially in word order, machine translation (MT) systems benefit from syntactic pre-ordering---an approach that uses features from a syntactic parse to permute source words into a target-language-like order. This paper presents a method for inducing parse trees automatically from a parallel corpus, instead of using a supervised parser trained on a tree-bank. These induced parses are used to pre-order source sentences. We demonstrate that our induced parser is effective: it not only improves a state-of-the-art phrase-based system with integrated reordering, but also approaches the performance of a recent pre-ordering method based on a supervised parser. These results show that the syntactic structure which is relevant to MT pre-ordering can be learned automatically from parallel text, thus establishing a new application for unsupervised grammar induction.", "keyphrases": ["sentence structure", "pre-ordering method", "induction"]} +{"id": "wu-etal-2019-self", "title": "Self-Supervised Dialogue Learning", "abstract": "The sequential order of utterances is often meaningful in coherent dialogues, and the order changes of utterances could lead to low-quality and incoherent conversations. We consider the order information as a crucial supervised signal for dialogue learning, which, however, has been neglected by many previous dialogue systems. Therefore, in this paper, we introduce a self-supervised learning task, inconsistent order detection, to explicitly capture the flow of conversation in dialogues. Given a sampled utterance pair triple, the task is to predict whether it is ordered or misordered. Then we propose a sampling-based self-supervised network SSN to perform the prediction with sampled triple references from previous dialogue history. Furthermore, we design a joint learning framework where SSN can guide the dialogue systems towards more coherent and relevant dialogue learning through adversarial training. We demonstrate that the proposed methods can be applied to both open-domain and task-oriented dialogue scenarios, and achieve the new state-of-the-art performance on the OpenSubtitiles and Movie-Ticket Booking datasets.", "keyphrases": ["dialogue learning", "self-supervised learning task", "inconsistent order detection"]} +{"id": "cettolo-etal-2013-report", "title": "Report on the 10th IWSLT evaluation campaign", "abstract": "The paper overviews the tenth evaluation campaign organized by the IWSLT workshop. The 2013 evaluation offered multiple tracks on lecture transcription and translation based on the TED Talks corpus. In particular, this year IWSLT included two automatic speech recognition tracks, on English and German, three speech translation tracks, from English to French, English to German, and German to English, and three text translation track, also from English to French, English to German, and German to English. In addition to the official tracks, speech and text translation optional tracks were offered involving 12 other languages: Arabic, Spanish, Portuguese (B), Italian, Chinese, Polish, Persian, Slovenian, Turkish, Dutch, Romanian, Russian. Overall, 18 teams participated in the evaluation for a total of 217 primary runs submitted. All runs were evaluated with objective metrics on a current test set and two progress test sets, in order to compare the progresses against systems of the previous years. In addition, submissions of one of the official machine translation tracks were also evaluated with human post-editing.", "keyphrases": ["iwslt", "evaluation campaign", "spoken language translation"]} +{"id": "seginer-2007-fast", "title": "Fast Unsupervised Incremental Parsing", "abstract": "This paper describes an incremental parser and an unsupervised learning algorithm for inducing this parser from plain text. The parser uses a representation for syntactic structure similar to dependency links which is well-suited for incremental parsing. In contrast to previous unsupervised parsers, the parser does not use part-of-speech tags and both learning and parsing are local and fast, requiring no explicit clustering or global optimization. The parser is evaluated by converting its output into equivalent bracketing and improves on previously published results for unsupervised parsing from plain text.", "keyphrases": ["unsupervised parser", "optimization", "bracketing", "ccl", "punctuation"]} +{"id": "marcheggiani-artieres-2014-experimental", "title": "An Experimental Comparison of Active Learning Strategies for Partially Labeled Sequences", "abstract": "Active learning (AL) consists of asking human annotators to annotate automatically selected data that are assumed to bring the most benefit in the creation of a classifier. AL allows to learn accurate systems with much less annotated data than what is required by pure supervised learning algorithms, hence limiting the tedious effort of annotating a large collection of data. We experimentally investigate the behavior of several AL strategies for sequence labeling tasks (in a partially-labeled scenario) tailored on Partially-Labeled Conditional Random Fields, on four sequence labeling tasks: phrase chunking, part-of-speech tagging, named-entity recognition, and bioentity recognition.", "keyphrases": ["active learning", "human annotator", "sequence labeling task"]} +{"id": "zollmann-etal-2008-systematic", "title": "A Systematic Comparison of Phrase-Based, Hierarchical and Syntax-Augmented Statistical MT", "abstract": "Probabilistic synchronous context-free grammar (PSCFG) translation models define weighted transduction rules that represent translation and reordering operations via nonterminal symbols. In this work, we investigate the source of the improvements in translation quality reported when using two PSCFG translation models (hierarchical and syntax-augmented), when extending a state-of-the-art phrase-based baseline that serves as the lexical support for both PSCFG models. We isolate the impact on translation quality for several important design decisions in each model. We perform this comparison on three NIST language translation tasks; Chinese-to-English, Arabic-to-English and Urdu-to-English, each representing unique challenges.", "keyphrases": ["translation quality", "phrase-based model", "syntax-based model", "hierarchical model", "samt"]} +{"id": "liu-etal-2022-p", "title": "P-Tuning: Prompt Tuning Can Be Comparable to Fine-tuning Across Scales and Tasks", "abstract": "Prompt tuning, which only tunes continuous prompts with a frozen language model, substantially reduces per-task storage and memory usage at training. However, in the context of NLU, prior work reveals that prompt tuning does not perform well for normal-sized pretrained models. We also find that existing methods of prompt tuning cannot handle hard sequence labeling tasks, indicating a lack of universality. We present a novel empirical finding that properly optimized prompt tuning can be universally effective across a wide range of model scales and NLU tasks. It matches the performance of finetuning while having only 0.1%-3% tuned parameters. Our method P-Tuning v2 is an implementation of Deep Prompt Tuning (CITATION) optimized and adapted for NLU. Given the universality and simplicity of P-Tuning v2, we believe it can serve as an alternative to finetuning and a strong baseline for future research.", "keyphrases": ["prompt tuning", "language model", "comparable performance"]} +{"id": "dahlmeier-ng-2011-grammatical", "title": "Grammatical Error Correction with Alternating Structure Optimization", "abstract": "We present a novel approach to grammatical error correction based on Alternating Structure Optimization. As part of our work, we introduce the NUS Corpus of Learner English (NUCLE), a fully annotated one million words corpus of learner English available for research purposes. We conduct an extensive evaluation for article and preposition errors using various feature sets. Our experiments show that our approach outperforms two baselines trained on non-learner text and learner text, respectively. Our approach also outperforms two commercial grammar checking software packages.", "keyphrases": ["alternating structure optimization", "novel approach", "preposition error", "grammatical error correction", "reason"]} +{"id": "dyer-resnik-2010-context", "title": "Context-free reordering, finite-state translation", "abstract": "We describe a class of translation model in which a set of input variants encoded as a context-free forest is translated using a finite-state translation model. The forest structure of the input is well-suited to representing word order alternatives, making it straightforward to model translation as a two step process: (1) tree-based source reordering and (2) phrase transduction. By treating the reordering process as a latent variable in a probabilistic translation model, we can learn a long-range source reordering model without example reordered sentences, which are problematic to construct. The resulting model has state-of-the-art translation performance, uses linguistically motivated features to effectively model long range reordering, and is significantly smaller than a comparable hierarchical phrase-based translation model.", "keyphrases": ["latent variable", "context-free reordering", "syntax-based model"]} +{"id": "warstadt-etal-2019-neural", "title": "Neural Network Acceptability Judgments", "abstract": "This paper investigates the ability of artificial neural networks to judge the grammatical acceptability of a sentence, with the goal of testing their linguistic competence. We introduce the Corpus of Linguistic Acceptability (CoLA), a set of 10,657 English sentences labeled as grammatical or ungrammatical from published linguistics literature. As baselines, we train several recurrent neural network models on acceptability classification, and find that our models outperform unsupervised models by Lau et al. (2016) on CoLA. Error-analysis on specific grammatical phenomena reveals that both Lau et al.'s models and ours learn systematic generalizations like subject-verb-object order. However, all models we test perform far below human level on a wide range of grammatical constructions.", "keyphrases": ["acceptability", "grammaticality", "10k sentence", "human performance"]} +{"id": "nivre-etal-2018-enhancing", "title": "Enhancing Universal Dependency Treebanks: A Case Study", "abstract": "We evaluate two cross-lingual techniques for adding enhanced dependencies to existing treebanks in Universal Dependencies. We apply a rule-based system developed for English and a data-driven system trained on Finnish to Swedish and Italian. We find that both systems are accurate enough to bootstrap enhanced dependencies in existing UD treebanks. In the case of Italian, results are even on par with those of a prototype language-specific system.", "keyphrases": ["universal dependencies", "treebank", "data-driven system"]} +{"id": "yao-etal-2013-semi", "title": "Semi-Markov Phrase-Based Monolingual Alignment", "abstract": "We introduce a novel discriminative model for phrase-based monolingual alignment using a semi-Markov CRF. Our model achieves stateof-the-art alignment accuracy on two phrasebased alignment datasets (RTE and paraphrase), while doing significantly better than other strong baselines in both non-identical alignment and phrase-only alignment. Additional experiments highlight the potential benefit of our alignment model to RTE, paraphrase identification and question answering, where even a naive application of our model\u2019s alignment score approaches the state of the art.", "keyphrases": ["monolingual alignment", "semi-markov crf", "paraphrase identification", "non-homographic nature", "semantic unit"]} +{"id": "li-etal-2016-generative", "title": "Generative Topic Embedding: a Continuous Representation of Documents", "abstract": "Word embedding maps words into a low-dimensional continuous embedding space by exploiting the local word collocation patterns in a small context window. On the other hand, topic modeling maps documents onto a low-dimensional topic space, by utilizing the global word collocation patterns in the same document. These two types of patterns are complementary. In this paper, we propose a generative topic embedding model to combine the two types of patterns. In our model, topics are represented by embedding vectors, and are shared across documents. The probability of each word is influenced by both its local context and its topic. A variational inference method yields the topic embeddings as well as the topic mixing proportions for each document. Jointly they represent the document in a low-dimensional continuous space. In two document classification tasks, our method performs better than eight existing methods, with fewer features. In addition, we illustrate with an example that our method can generate coherent topics even based on only one document.", "keyphrases": ["word embedding", "variational inference method", "generative topic"]} +{"id": "aker-etal-2013-extracting", "title": "Extracting bilingual terminologies from comparable corpora", "abstract": "In this paper we present a method for extracting bilingual terminologies from comparable corpora. In our approach we treat bilingual term extraction as a classification problem. For classification we use an SVM binary classifier and training data taken from the EUROVOC thesaurus. We test our approach on a held-out test set from EUROVOC and perform precision, recall and f-measure evaluations for 20 European language pairs. The performance of our classifier reaches the 100% precision level for many language pairs. We also perform manual evaluation on bilingual terms extracted from English-German term-tagged comparable corpora. The results of this manual evaluation showed 60-83% of the term pairs generated are exact translations and over 90% exact or partial translations.", "keyphrases": ["bilingual terminology", "classification problem", "eurovoc thesaurus"]} +{"id": "faralli-navigli-2012-new", "title": "A New Minimally-Supervised Framework for Domain Word Sense Disambiguation", "abstract": "We present a new minimally-supervised framework for performing domain-driven Word Sense Disambiguation (WSD). Glossaries for several domains are iteratively acquired from the Web by means of a bootstrapping technique. The acquired glosses are then used as the sense inventory for fully-unsupervised domain WSD. Our experiments, on new and gold-standard datasets, show that our wide-coverage framework enables high-performance results on dozens of domains at a coarse and fine-grained level.", "keyphrases": ["new minimally-supervised framework", "word sense disambiguation", "wsd"]} +{"id": "xi-etal-2012-enhancing", "title": "Enhancing Statistical Machine Translation with Character Alignment", "abstract": "The dominant practice of statistical machine translation (SMT) uses the same Chinese word segmentation specification in both alignment and translation rule induction steps in building Chinese-English SMT system, which may suffer from a suboptimal problem that word segmentation better for alignment is not necessarily better for translation. To tackle this, we propose a framework that uses two different segmentation specifications for alignment and translation respectively: we use Chinese character as the basic unit for alignment, and then convert this alignment to conventional word alignment for translation rule induction. Experimentally, our approach outperformed two baselines: fully word-based system (using word for both alignment and translation) and fully character-based system, in terms of alignment quality and translation performance.", "keyphrases": ["statistical machine translation", "unit", "alignment quality"]} +{"id": "koehn-etal-2009-462", "title": "462 Machine Translation Systems for Europe", "abstract": "We built 462 machine translation systems for all language pairs of the Acquis Communautaire corpus. We report and analyse the performance of these system, and compare them against pivot translation and a number of system combination methods (multi-pivot, multisource) that are possible due to the available systems.", "keyphrases": ["acquis communautaire corpus", "pivot language", "news", "europarl", "other domain"]} +{"id": "ji-eisenstein-2013-discriminative", "title": "Discriminative Improvements to Distributional Sentence Similarity", "abstract": "Matrix and tensor factorization have been applied to a number of semantic relatedness tasks, including paraphrase identification. The key idea is that similarity in the latent space implies semantic relatedness. We describe three ways in which labeled data can improve the accuracy of these approaches on paraphrase classification. First, we design a new discriminative term-weighting metric called TF-KLD, which outperforms TF-IDF. Next, we show that using the latent representation from matrix factorization as features in a classification algorithm substantially improves accuracy. Finally, we combine latent features with fine-grained n-gram overlap features, yielding performance that is 3% more accurate than the prior state-of-the-art.", "keyphrases": ["paraphrase identification", "tf-idf", "matrix factorization"]} +{"id": "gouws-etal-2011-unsupervised", "title": "Unsupervised Mining of Lexical Variants from Noisy Text", "abstract": "The amount of data produced in user-generated content continues to grow at a staggering rate. However, the text found in these media can deviate wildly from the standard rules of orthography, syntax and even semantics and present significant problems to downstream applications which make use of this noisy data. In this paper we present a novel unsupervised method for extracting domain-specific lexical variants given a large volume of text. We demonstrate the utility of this method by applying it to normalize text messages found in the online social media service, Twitter, into their most likely standard English versions. Our method yields a 20% reduction in word error rate over an existing state-of-the-art approach.", "keyphrases": ["noisy text", "strongly-associated word pair", "exception dictionary"]} +{"id": "el-haj-etal-2016-learning", "title": "Learning Tone and Attribution for Financial Text Mining", "abstract": "Attribution bias refers to the tendency of people to attribute successes to their own abilities but failures to external factors. In a business context an internal factor might be the restructuring of the firm and an external factor might be an unfavourable change in exchange or interest rates. In accounting research, the presence of an attribution bias has been demonstrated for the narrative sections of the annual financial reports. Previous studies have applied manual content analysis to this problem but in this paper we present novel work to automate the analysis of attribution bias through using machine learning algorithms. Previous studies have only applied manual content analysis on a small scale to reveal such a bias in the narrative section of annual financial reports. In our work a group of experts in accounting and finance labelled and annotated a list of 32,449 sentences from a random sample of UK Preliminary Earning Announcements (PEAs) to allow us to examine whether sentences in PEAs contain internal or external attribution and which kinds of attributions are linked to positive or negative performance. We wished to examine whether human annotators could agree on coding this difficult task and whether Machine Learning (ML) could be applied reliably to replicate the coding process on a much larger scale. Our best machine learning algorithm correctly classified performance sentences with 70% accuracy and detected tone and attribution in financial PEAs with accuracy of 79%.", "keyphrases": ["attribution", "narrative section", "annual financial report", "report"]} +{"id": "zhang-etal-2019-sp", "title": "SP-10K: A Large-scale Evaluation Set for Selectional Preference Acquisition", "abstract": "Selectional Preference (SP) is a commonly observed language phenomenon and proved to be useful in many natural language processing tasks. To provide a better evaluation method for SP models, we introduce SP-10K, a large-scale evaluation set that provides human ratings for the plausibility of 10,000 SP pairs over five SP relations, covering 2,500 most frequent verbs, nouns, and adjectives in American English. Three representative SP acquisition methods based on pseudo-disambiguation are evaluated with SP-10K. To demonstrate the importance of our dataset, we investigate the relationship between SP-10K and the commonsense knowledge in ConceptNet5 and show the potential of using SP to represent the commonsense knowledge. We also use the Winograd Schema Challenge to prove that the proposed new SP relations are essential for the hard pronoun coreference resolution problem.", "keyphrases": ["large-scale evaluation set", "selectional preference", "plausibility"]} +{"id": "malmasi-cahill-2015-measuring", "title": "Measuring Feature Diversity in Native Language Identification", "abstract": "The task of Native Language Identification (NLI) is typically solved with machine learning methods, and systems make use of a wide variety of features. Some preliminary studies have been conducted to examine the effectiveness of individual features, however, no systematic study of feature interaction has been carried out. We propose a function to measure feature independence and analyze its effectiveness on a standard NLI corpus.", "keyphrases": ["native language identification", "nli", "feature interaction"]} +{"id": "cho-etal-2019-mixture", "title": "Mixture Content Selection for Diverse Sequence Generation", "abstract": "Generating diverse sequences is important in many NLP applications such as question generation or summarization that exhibit semantically one-to-many relationships between source and the target sequences. We present a method to explicitly separate diversification from generation using a general plug-and-play module (called SELECTOR) that wraps around and guides an existing encoder-decoder model. The diversification stage uses a mixture of experts to sample different binary masks on the source sequence for diverse content selection. The generation stage uses a standard encoder-decoder model given each selected content from the source sequence. Due to the non-differentiable nature of discrete sampling and the lack of ground truth labels for binary mask, we leverage a proxy for ground truth mask and adopt stochastic hard-EM for training. In question generation (SQuAD) and abstractive summarization (CNN-DM), our method demonstrates significant improvements in accuracy, diversity and training efficiency, including state-of-the-art top-1 accuracy in both datasets, 6% gain in top-5 accuracy, and 3.7 times faster training over a state-of-the-art model. Our code is publicly available at .", "keyphrases": ["diversity", "question generation", "selector", "different binary mask", "research problem"]} +{"id": "kementchedjhieva-etal-2019-lost", "title": "Lost in Evaluation: Misleading Benchmarks for Bilingual Dictionary Induction", "abstract": "The task of bilingual dictionary induction (BDI) is commonly used for intrinsic evaluation of cross-lingual word embeddings. The largest dataset for BDI was generated automatically, so its quality is dubious. We study the composition and quality of the test sets for five diverse languages from this dataset, with concerning findings: (1) a quarter of the data consists of proper nouns, which can be hardly indicative of BDI performance, and (2) there are pervasive gaps in the gold-standard targets. These issues appear to affect the ranking between cross-lingual embedding systems on individual languages, and the overall degree to which the systems differ in performance. With proper nouns removed from the data, the margin between the top two systems included in the study grows from 3.4% to 17.2%. Manual verification of the predictions, on the other hand, reveals that gaps in the gold standard targets artificially inflate the margin between the two systems on English to Bulgarian BDI from 0.1% to 6.7%. We thus suggest that future research either avoids drawing conclusions from quantitative results on this BDI dataset, or accompanies such evaluation with rigorous error analysis.", "keyphrases": ["bilingual dictionary induction", "test set", "conclusion"]} +{"id": "li-khudanpur-2008-large", "title": "Large-scale Discriminative n-gram Language Models for Statistical Machine Translation", "abstract": "We extend discriminative n-gram language modeling techniques originally proposed for automatic speech recognition to a statistical machine translation task. In this context, we propose a novel data selection method that leads to good models using a fraction of the training data. We carry out systematic experiments on several benchmark tests for Chinese to English translation using a hierarchical phrase-based machine translation system, and show that a discriminative language model significantly improves upon a state-of-the-art baseline. The experiments also highlight the benefits of our data selection method.", "keyphrases": ["statistical machine translation", "speech recognition", "list"]} +{"id": "mishra-etal-2019-modular", "title": "A Modular Architecture for Unsupervised Sarcasm Generation", "abstract": "In this paper, we propose a novel framework for sarcasm generation; the system takes a literal negative opinion as input and translates it into a sarcastic version. Our framework does not require any paired data for training. Sarcasm emanates from context-incongruity which becomes apparent as the sentence unfolds. Our framework introduces incongruity into the literal input version through modules that: (a) filter factual content from the input opinion, (b) retrieve incongruous phrases related to the filtered facts and (c) synthesize sarcastic text from the incongruous filtered and incongruous phrases. The framework employs reinforced neural sequence to sequence learning and information retrieval and is trained only using unlabeled non-sarcastic and sarcastic opinions. Since no labeled dataset exists for such a task, for evaluation, we manually prepare a benchmark dataset containing literal opinions and their sarcastic paraphrases. Qualitative and quantitative performance analyses on the data reveal our system's superiority over baselines built using known unsupervised statistical and neural machine translation and style transfer techniques.", "keyphrases": ["modular architecture", "unsupervised sarcasm generation", "information retrieval", "paraphrase generator", "natural part"]} +{"id": "uchiumi-etal-2015-inducing", "title": "Inducing Word and Part-of-Speech with Pitman-Yor Hidden Semi-Markov Models", "abstract": "We propose a nonparametric Bayesian model for joint unsupervised word segmentation and part-of-speech tagging from raw strings. Extending a previous model for word segmentation, our model is called a Pitman-Yor Hidden SemiMarkov Model (PYHSMM) and considered as a method to build a class n-gram language model directly from strings, while integrating character and word level information. Experimental results on standard datasets on Japanese, Chinese and Thai revealed it outperforms previous results to yield the state-of-the-art accuracies. This model will also serve to analyze a structure of a language whose words are not identified a priori.", "keyphrases": ["semi-markov model", "part-of-speech tagging", "pyhsmm"]} +{"id": "galley-manning-2010-accurate", "title": "Accurate Non-Hierarchical Phrase-Based Translation", "abstract": "A principal weakness of conventional (i.e., non-hierarchical) phrase-based statistical machine translation is that it can only exploit continuous phrases. In this paper, we extend phrase-based decoding to allow both source and target phrasal discontinuities, which provide better generalization on unseen data and yield significant improvements to a standard phrase-based system (Moses). More interestingly, our discontinuous phrase-based system also outperforms a state-of-the-art hierarchical system (Joshua) by a very significant margin (+1.03 BLEU on average on five Chinese-English NIST test sets), even though both Joshua and our system support discontinuous phrases. Since the key difference between these two systems is that ours is not hierarchical---i.e., our system uses a string-based decoder instead of CKY, and it imposes no hard hierarchical reordering constraints during training and decoding---this paper sets out to challenge the commonly held belief that the tree-based parameterization of systems such as Hiero and Joshua is crucial to their good performance against Moses.", "keyphrases": ["generalization", "phrase-based system", "hierarchical system", "joshua", "gap"]} +{"id": "gardner-etal-2014-incorporating", "title": "Incorporating Vector Space Similarity in Random Walk Inference over Knowledge Bases", "abstract": "Much work in recent years has gone into the construction of large knowledge bases (KBs), such as Freebase, DBPedia, NELL, and YAGO. While these KBs are very large, they are still very incomplete, necessitating the use of inference to fill in gaps. Prior work has shown how to make use of a large text corpus to augment random walk inference over KBs. We present two improvements to the use of such large corpora to augment KB inference. First, we present a new technique for combining KB relations and surface text into a single graph representation that is much more compact than graphs used in prior work. Second, we describe how to incorporate vector space similarity into random walk inference over KBs, reducing the feature sparsity inherent in using surface text. This allows us to combine distributional similarity with symbolic logical inference in novel and effective ways. With experiments on many relations from two separate KBs, we show that our methods significantly outperform prior work on KB inference, both in the size of problem our methods can handle and in the quality of predictions made.", "keyphrases": ["vector space similarity", "random walk inference", "much work", "entity pair"]} +{"id": "lee-etal-2022-deduplicating", "title": "Deduplicating Training Data Makes Language Models Better", "abstract": "We find that existing language modeling datasets contain many near-duplicate examples and long repetitive substrings. As a result, over 1% of the unprompted output of language models trained on these datasets is copied verbatim from the training data. We develop two tools that allow us to deduplicate training datasets\u2014for example removing from C4 a single 61 word English sentence that is repeated over 60,000 times. Deduplication allows us to train models that emit memorized text ten times less frequently and require fewer training steps to achieve the same or better accuracy. We can also reduce train-test overlap, which affects over 4% of the validation set of standard datasets, thus allowing for more accurate evaluation. Code for deduplication is released at .", "keyphrases": ["language model", "memorized text", "well accuracy"]} +{"id": "zhao-etal-2009-application", "title": "Application-driven Statistical Paraphrase Generation", "abstract": "Paraphrase generation (PG) is important in plenty of NLP applications. However, the research of PG is far from enough. In this paper, we propose a novel method for statistical paraphrase generation (SPG), which can (1) achieve various applications based on a uniform statistical model, and (2) naturally combine multiple resources to enhance the PG performance. In our experiments, we use the proposed method to generate paraphrases for three different applications. The results show that the method can be easily transformed from one application to another and generate valuable and interesting paraphrases.", "keyphrases": ["statistical paraphrase generation", "sample sentence", "spg use"]} +{"id": "tillmann-etal-2014-improved", "title": "Improved Sentence-Level Arabic Dialect Classification", "abstract": "The paper presents work on improved sentence-level dialect classification of Egyptian Arabic (ARZ) vs. Modern Standard Arabic (MSA). Our approach is based on binary feature functions that can be implemented with a minimal amount of task-specific knowledge. We train a featurerich linear classifier based on a linear support-vector machine (linear SVM) approach. Our best system achieves an accuracy of 89.1 % on the Arabic Online Commentary (AOC) dataset (Zaidan and Callison-Burch, 2011) using 10-fold stratified cross validation: a 1.3 % absolute accuracy improvement over the results published by (Zaidan and Callison-Burch, 2014). We also evaluate the classifier on dialect data from an additional data source. Here, we find that features which measure the informalness of a sentence actually decrease classification accuracy significantly.", "keyphrases": ["dialect", "egyptian arabic", "linear classifier", "aoc dataset"]} +{"id": "baroni-etal-2014-frege", "title": "Frege in Space: A Program for Composition Distributional Semantics", "abstract": "The lexicon of any natural language encodes a huge number of distinct word meanings. Just to understand this article, you will need to know what thousands of words mean. The space of possible sentential meanings is infinite: In this article alone, you will encounter many sentences that express ideas you have never heard before, we hope. Statistical semantics has addressed the issue of the vastness of word meaning by proposing methods to harvest meaning automatically from large collections of text (corpora). Formal semantics in the Fregean tradition has developed methods to account for the infinity of sentential meaning based on the crucial insight of compositionality, the idea that meaning of sentences is built incrementally by combining the meanings of their constituents. This article sketches a new approach to semantics that brings together ideas from statistical and formal semantics to account, in parallel, for the richness of lexical meaning and the combinatorial power of sentential semantics. We adopt, in particular, the idea that word meaning can be approximated by the patterns of co-occurrence of words in corpora from statistical semantics, and the idea that compositionality can be captured in terms of a syntax-driven calculus of function application from formal semantics.", "keyphrases": ["distributional semantic", "function", "contextual information", "compositional vector", "dsm"]} +{"id": "sogaard-wu-2009-empirical", "title": "Empirical lower bounds on translation unit error rate for the full class of inversion transduction grammars", "abstract": "Empirical lower bounds studies in which the frequency of alignment configurations that cannot be induced by a particular formalism is estimated, have been important for the development of syntax-based machine translation formalisms. The formalism that has received most attention has been inversion transduction grammars (ITGs) (Wu, 1997). All previous work on the coverage of ITGs, however, concerns parse failure rates (PFRs) or sentence level coverage, which is not directly related to any of the evaluation measures used in machine translation. Sogaard and Kuhn (2009) induce lower bounds on translation unit error rates (TUERs) for a number of formalisms, incl. normal form ITGs, but not for the full class of ITGs. Many of the alignment configurations that cannot be induced by normal form ITGs can be induced by unrestricted ITGs, however. This paper estimates the difference and shows that the average reduction in lower bounds on TUER is 2.48 in absolute difference (16.01 in average parse failure rate).", "keyphrases": ["inversion transduction grammar", "low bound", "translation model", "adequacy"]} +{"id": "xu-etal-2020-megatron", "title": "MEGATRON-CNTRL: Controllable Story Generation with External Knowledge Using Large-Scale Language Models", "abstract": "Existing pre-trained large language models have shown unparalleled generative capabilities. However, they are not controllable. In this paper, we propose MEGATRON-CNTRL, a novel framework that uses large-scale language models and adds control to text generation by incorporating an external knowledge base. Our framework consists of a keyword predictor, a knowledge retriever, a contextual knowledge ranker, and a conditional text generator. As we do not have access to ground-truth supervision for the knowledge ranker, we make use of weak supervision from sentence embedding. The empirical results show that our model generates more fluent, consistent, and coherent stories with less repetition and higher diversity compared to prior work on the ROC story dataset. We showcase the controllability of our model by replacing the keywords used to generate stories and re-running the generation process. Human evaluation results show that 77.5% of these stories are successfully controlled by the new keywords. Furthermore, by scaling our model from 124 million to 8.3 billion parameters we demonstrate that larger models improve both the quality of generation (from 74.5% to 93.0% for consistency) and controllability (from 77.5% to 91.5%).", "keyphrases": ["story", "language model", "generation process", "megatron-cntrl"]} +{"id": "yoshimoto-etal-2013-naist", "title": "NAIST at 2013 CoNLL Grammatical Error Correction Shared Task", "abstract": "This paper describes the Nara Institute of Science and Technology (NAIST) error correction system in the CoNLL 2013 Shared Task. We constructed three systems: a system based on the Treelet Language Model for verb form and subjectverb agreement errors; a classifier trained on both learner and native corpora for noun number errors; a statistical machine translation (SMT)-based model for preposition and determiner errors. As for subject-verb agreement errors, we show that the Treelet Language Model-based approach can correct errors in which the target verb is distant from its subject. Our system ranked fourth on the official run.", "keyphrases": ["learner", "machine translation", "subject-verb agreement error", "naist"]} +{"id": "deng-etal-2018-alibabas", "title": "Alibaba's Neural Machine Translation Systems for WMT18", "abstract": "This paper describes the submission systems of Alibaba for WMT18 shared news translation task. We participated in 5 translation directions including English \u2194 Russian, English \u2194 Turkish in both directions and English \u2192 Chinese. Our systems are based on Google's Transformer model architecture, into which we integrated the most recent features from the academic research. We also employed most techniques that have been proven effective during the past WMT years, such as BPE, back translation, data selection, model ensembling and reranking, at industrial scale. For some morphologically-rich languages, we also incorporated linguistic knowledge into our neural network. For the translation tasks in which we have participated, our resulting systems achieved the best case sensitive BLEU score in all 5 directions. Notably, our English \u2192 Russian system outperformed the second reranked system by 5 BLEU score.", "keyphrases": ["wmt18", "back translation", "reranking", "alibaba"]} +{"id": "barnes-etal-2018-bilingual", "title": "Bilingual Sentiment Embeddings: Joint Projection of Sentiment Across Languages", "abstract": "Sentiment analysis in low-resource languages suffers from a lack of annotated corpora to estimate high-performing models. Machine translation and bilingual word embeddings provide some relief through cross-lingual sentiment approaches. However, they either require large amounts of parallel data or do not sufficiently capture sentiment information. We introduce Bilingual Sentiment Embeddings (BLSE), which jointly represent sentiment information in a source and target language. This model only requires a small bilingual lexicon, a source-language corpus annotated for sentiment, and monolingual word embeddings for each language. We perform experiments on three language combinations (Spanish, Catalan, Basque) for sentence-level cross-lingual sentiment classification and find that our model significantly outperforms state-of-the-art methods on four out of six experimental setups, as well as capturing complementary information to machine translation. Our analysis of the resulting embedding space provides evidence that it represents sentiment information in the resource-poor target language without any annotated data in that language.", "keyphrases": ["bilingual sentiment embeddings", "cross-lingual approach", "source language"]} +{"id": "li-etal-2016-role", "title": "The Role of Discourse Units in Near-Extractive Summarization", "abstract": "Although human-written summaries of documents tend to involve signi\ufb01cant edits to the source text, most automated summa-rizers are extractive and select sentences verbatim. In this work we examine how elementary discourse units (EDUs) from Rhetorical Structure Theory can be used to extend extractive summarizers to produce a wider range of human-like summaries. Our analysis demonstrates that EDU segmentation is effective in preserving human-labeled summarization concepts within sentences and also aligns with near-extractive summaries constructed by news editors. Finally, we show that us-ing EDUs as units of content selection instead of sentences leads to stronger summarization performance in near-extractive scenarios, especially under tight budgets.", "keyphrases": ["elementary discourse unit", "near-extractive summary", "news editor"]} +{"id": "silberer-etal-2013-models", "title": "Models of Semantic Representation with Visual Attributes", "abstract": "We consider the problem of grounding the meaning of words in the physical world and focus on the visual modality which we represent by visual attributes. We create a new large-scale taxonomy of visual attributes covering more than 500 concepts and their corresponding 688K images. We use this dataset to train attribute classifiers and integrate their predictions with text-based distributional models of word meaning. We show that these bimodal models give a better fit to human word association data compared to amodal models and word representations based on handcrafted norming data.", "keyphrases": ["visual attribute", "image", "distributional model"]} +{"id": "aminian-etal-2019-cross", "title": "Cross-Lingual Transfer of Semantic Roles: From Raw Text to Semantic Roles", "abstract": "We describe a transfer method based on annotation projection to develop a dependency-based semantic role labeling system for languages for which no supervised linguistic information other than parallel data is available. Unlike previous work that presumes the availability of supervised features such as lemmas, part-of-speech tags, and dependency parse trees, we only make use of word and character features. Our deep model considers using character-based representations as well as unsupervised stem embeddings to alleviate the need for supervised features. Our experiments outperform a state-of-the-art method that uses supervised lexico-syntactic features on 6 out of 7 languages in the Universal Proposition Bank.", "keyphrases": ["part-of-speech tag", "lexico-syntactic feature", "cross-lingual transfer"]} +{"id": "tsai-etal-2003-chinese", "title": "Chinese Word Auto-Confirmation Agent", "abstract": "In various Asian languages, including Chinese, there is no space between words in texts. Thus, most Chinese NLP systems must perform word-segmentation (sentence tokenization). However, successful word-segmentation depends on having a sufficiently large lexicon. On the average, about 3% of the words in text are not contained in a lexicon. Therefore, unknown word identification becomes a bottleneck for Chinese NLP systems. In this paper, we present a Chinese word auto-confirmation (CWAC) agent. CWAC agent uses a hybrid approach that takes advantage of statistical and linguistic approaches. The task of a CWAC agent is to auto-confirm whether an n-gram input (n \u2265 2) is a Chinese word. We design our CWAC agent to satisfy two criteria: (1) a greater than 98% precision rate and a greater than 75% recall rate and (2) domain-independent performance (F-measure). These criteria assure our CWAC agents can work automatically without human intervention. Furthermore, by combining several CWAC agents designed based on different principles, we can construct a multi-CWAC agent through a building-block approach. Three experiments are conducted in this study. The results demonstrate that, for n-gram frequency \u2265 4 in large corpus, our CWAC agent can satisfy the two criteria and achieve 97.82% precision, 77.11% recall, and 86.24% domain-independent F-measure. No existing systems can achieve such a high precision and domain-independent F-measure. The proposed method is our first attempt for constructing a CWAC agent. We will continue develop other CWAC agents and integrating them into a multi-CWAC agent system.", "keyphrases": ["unknown word identification", "bottleneck", "chinese", "web corpus", "cws"]} +{"id": "xu-wan-2017-towards", "title": "Towards a Universal Sentiment Classifier in Multiple languages", "abstract": "Existing sentiment classifiers usually work for only one specific language, and different classification models are used in different languages. In this paper we aim to build a universal sentiment classifier with a single classification model in multiple different languages. In order to achieve this goal, we propose to learn multilingual sentiment-aware word embeddings simultaneously based only on the labeled reviews in English and unlabeled parallel data available in a few language pairs. It is not required that the parallel data exist between English and any other language, because the sentiment information can be transferred into any language via pivot languages. We present the evaluation results of our universal sentiment classifier in five languages, and the results are very promising even when the parallel data between English and the target languages are not used. Furthermore, the universal single classifier is compared with a few cross-language sentiment classifiers relying on direct parallel data between the source and target languages, and the results show that the performance of our universal sentiment classifier is very promising compared to that of different cross-language classifiers in multiple target languages.", "keyphrases": ["universal sentiment classifier", "unlabeled parallel data", "pivot language"]} +{"id": "thakur-etal-2021-augmented", "title": "Augmented SBERT: Data Augmentation Method for Improving Bi-Encoders for Pairwise Sentence Scoring Tasks", "abstract": "There are two approaches for pairwise sentence scoring: Cross-encoders, which perform full-attention over the input pair, and Bi-encoders, which map each input independently to a dense vector space. While cross-encoders often achieve higher performance, they are too slow for many practical use cases. Bi-encoders, on the other hand, require substantial training data and fine-tuning over the target task to achieve competitive performance. We present a simple yet efficient data augmentation strategy called Augmented SBERT, where we use the cross-encoder to label a larger set of input pairs to augment the training data for the bi-encoder. We show that, in this process, selecting the sentence pairs is non-trivial and crucial for the success of the method. We evaluate our approach on multiple tasks (in-domain) as well as on a domain adaptation task. Augmented SBERT achieves an improvement of up to 6 points for in-domain and of up to 37 points for domain adaptation tasks compared to the original bi-encoder performance.", "keyphrases": ["data augmentation method", "bi-encoders", "sentence pair", "question detection"]} +{"id": "zeng-etal-2019-iterative", "title": "Iterative Dual Domain Adaptation for Neural Machine Translation", "abstract": "Previous studies on the domain adaptation for neural machine translation (NMT) mainly focus on the one-pass transferring out-of-domain translation knowledge to in-domain NMT model. In this paper, we argue that such a strategy fails to fully extract the domain-shared translation knowledge, and repeatedly utilizing corpora of different domains can lead to better distillation of domain-shared translation knowledge. To this end, we propose an iterative dual domain adaptation framework for NMT. Specifically, we first pretrain in-domain and out-of-domain NMT models using their own training corpora respectively, and then iteratively perform bidirectional translation knowledge transfer (from in-domain to out-of-domain and then vice versa) based on knowledge distillation until the in-domain NMT model convergences. Furthermore, we extend the proposed framework to the scenario of multiple out-of-domain training corpora, where the above-mentioned transfer is performed sequentially between the in-domain and each out-of-domain NMT models in the ascending order of their domain similarities. Empirical results on Chinese-English and English-German translation tasks demonstrate the effectiveness of our framework.", "keyphrases": ["neural machine translation", "domain adaptation framework", "translation knowledge transfer"]} +{"id": "way-gough-2003-webmt", "title": "wEBMT: Developing and Validating an Example-Based Machine Translation System using the World Wide Web", "abstract": "We have developed an example-based machine translation (EBMT) system that uses the World Wide Web for two different purposes: First, we populate the system's memory with translations gathered from rule-based MT systems located on the Web. The source strings input to these systems were extracted automatically from an extremely small subset of the rule types in the Penn-II Treebank. In subsequent stages, the source, target translation pairs obtained are automatically transformed into a series of resources that render the translation process more successful. Despite the fact that the output from on-line MT systems is often faulty, we demonstrate in a number of experiments that when used to seed the memories of an EBMT system, they can in fact prove useful in generating translations of high quality in a robust fashion. In addition, we demonstrate the relative gain of EBMT in comparison to on-line systems. Second, despite the perception that the documents available on the Web are of questionable quality, we demonstrate in contrast that such resources are extremely useful in automatically postediting translation candidates proposed by our system.", "keyphrases": ["world", "wide web", "translation service", "double aim"]} +{"id": "son-etal-2018-causal", "title": "Causal Explanation Analysis on Social Media", "abstract": "Understanding causal explanations - reasons given for happenings in one's life - has been found to be an important psychological factor linked to physical and mental health. Causal explanations are often studied through manual identification of phrases over limited samples of personal writing. Automatic identification of causal explanations in social media, while challenging in relying on contextual and sequential cues, offers a larger-scale alternative to expensive manual ratings and opens the door for new applications (e.g. studying prevailing beliefs about causes, such as climate change). Here, we explore automating causal explanation analysis, building on discourse parsing, and presenting two novel subtasks: causality detection (determining whether a causal explanation exists at all) and causal explanation identification (identifying the specific phrase that is the explanation). We achieve strong accuracies for both tasks but find different approaches best: an SVM for causality prediction (F1 = 0.791) and a hierarchy of Bidirectional LSTMs for causal explanation identification (F1 = 0.853). Finally, we explore applications of our complete pipeline (F1 = 0.868), showing demographic differences in mentions of causal explanation and that the association between a word and sentiment can change when it is used within a causal explanation.", "keyphrases": ["discourse parsing", "causal explanation analysis", "social medium"]} +{"id": "vajjala-rama-2018-experiments", "title": "Experiments with Universal CEFR Classification", "abstract": "The Common European Framework of Reference (CEFR) guidelines describe language proficiency of learners on a scale of 6 levels. While the description of CEFR guidelines is generic across languages, the development of automated proficiency classification systems for different languages follow different approaches. In this paper, we explore universal CEFR classification using domain-specific and domain-agnostic, theory-guided as well as data-driven features. We report the results of our preliminary experiments in monolingual, cross-lingual, and multilingual classification with three languages: German, Czech, and Italian. Our results show that both monolingual and multilingual models achieve similar performance, and cross-lingual classification yields lower, but comparable results to monolingual classification.", "keyphrases": ["universal cefr classification", "learner", "scale"]} +{"id": "gratch-etal-2014-distress", "title": "The Distress Analysis Interview Corpus of human and computer interviews", "abstract": "The Distress Analysis Interview Corpus (DAIC) contains clinical interviews designed to support the diagnosis of psychological distress conditions such as anxiety, depression, and post traumatic stress disorder. The interviews are conducted by humans, human controlled agents and autonomous agents, and the participants include both distressed and non-distressed individuals. Data collected include audio and video recordings and extensive questionnaire responses; parts of the corpus have been transcribed and annotated for a variety of verbal and non-verbal features. The corpus has been used to support the creation of an automated interviewer agent, and for research on the automatic identification of psychological distress.", "keyphrases": ["daic", "clinical interview", "agent", "pause"]} +{"id": "basile-etal-2012-developing", "title": "Developing a large semantically annotated corpus", "abstract": "What would be a good method to provide a large collection of semantically annotated texts with formal, deep semantics rather than shallow? We argue that a bootstrapping approach comprising state-of-the-art NLP tools for parsing and semantic interpretation, in combination with a wiki-like interface for collaborative annotation of experts, and a game with a purpose for crowdsourcing, are the starting ingredients for fulfilling this enterprise. The result is a semantic resource that anyone can edit and that integrates various phenomena, including predicate-argument structure, scope, tense, thematic roles, rhetorical relations and presuppositions, into a single semantic formalism: Discourse Representation Theory. Taking texts rather than sentences as the units of annotation results in deep semantic representations that incorporate discourse structure and dependencies. To manage the various (possibly conflicting) annotations provided by experts and non-experts, we introduce a method that stores \u201cBits of Wisdom\u201d in a database as stand-off annotations.", "keyphrases": ["semantic resource", "discourse representation theory", "groningen meaning bank", "large corpus", "logical form"]} +{"id": "salton-etal-2016-idiom", "title": "Idiom Token Classification using Sentential Distributed Semantics", "abstract": "Idiom token classification is the task of deciding for a set of potentially idiomatic phrases whether each occurrence of a phrase is a literal or idiomatic usage of the phrase. In this work we explore the use of Skip-Thought Vectors to create distributed representations that encode features that are predictive with respect to idiom token classification. We show that classifiers using these representations have competitive performance compared with the state of the art in idiom token classification. Importantly, however, our models use only the sentence containing the target phrase as input and are thus less dependent on a potentially inaccurate or incomplete model of discourse context. We further demonstrate the feasibility of using these representations to train a competitive general idiom token classifier.", "keyphrases": ["usage", "skip-thought vectors", "idiom token classification"]} +{"id": "shen-etal-2021-taxoclass", "title": "TaxoClass: Hierarchical Multi-Label Text Classification Using Only Class Names", "abstract": "Hierarchical multi-label text classification (HMTC) aims to tag each document with a set of classes from a taxonomic class hierarchy. Most existing HMTC methods train classifiers using massive human-labeled documents, which are often too costly to obtain in real-world applications. In this paper, we explore to conduct HMTC based on only class surface names as supervision signals. We observe that to perform HMTC, human experts typically first pinpoint a few most essential classes for the document as its \u201ccore classes\u201d, and then check core classes' ancestor classes to ensure the coverage. To mimic human experts, we propose a novel HMTC framework, named TaxoClass. Specifically, TaxoClass (1) calculates document-class similarities using a textual entailment model, (2) identifies a document's core classes and utilizes confident core classes to train a taxonomy-enhanced classifier, and (3) generalizes the classifier via multi-label self-training. Our experiments on two challenging datasets show TaxoClass can achieve around 0.71 Example-F1 using only class names, outperforming the best previous method by 25%.", "keyphrases": ["multi-label text classification", "hmtc", "supervision signal", "self-training", "taxoclass"]} +{"id": "basu-etal-2013-powergrading", "title": "Powergrading: a Clustering Approach to Amplify Human Effort for Short Answer Grading", "abstract": "We introduce a new approach to the machine-assisted grading of short answer questions. We follow past work in automated grading by first training a similarity metric between student responses, but then go on to use this metric to group responses into clusters and subclusters. The resulting groupings allow teachers to grade multiple responses with a single action, provide rich feedback to groups of similar answers, and discover modalities of misunderstanding among students; we refer to this amplification of grader effort as \u201cpowergrading.\u201d We develop the means to further reduce teacher effort by automatically performing actions when an answer key is available. We show results in terms of grading progress with a small \u201cbudget\u201d of human actions, both from our method and an LDA-based approach, on a test corpus of 10 questions answered by 698 respondents.", "keyphrases": ["cluster", "grading", "student", "short-answer question"]} +{"id": "galley-manning-2009-quadratic", "title": "Quadratic-Time Dependency Parsing for Machine Translation", "abstract": "Efficiency is a prime concern in syntactic MT decoding, yet significant developments in statistical parsing with respect to asymptotic efficiency haven't yet been explored in MT. Recently, McDonald et al. (2005b) formalized dependency parsing as a maximum spanning tree (MST) problem, which can be solved in quadratic time relative to the length of the sentence. They show that MST parsing is almost as accurate as cubic-time dependency parsing in the case of English, and that it is more accurate with free word order languages. This paper applies MST parsing to MT, and describes how it can be integrated into a phrase-based decoder to compute dependency language model scores. Our results show that augmenting a state-of-the-art phrase-based system with this dependency language model leads to significant improvements in TER (0.92%) and BLEU (0.45%) scores on five NIST Chinese-English evaluation test sets.", "keyphrases": ["maximum spanning tree", "mst", "dependency language model"]} +{"id": "bansal-etal-2011-gappy", "title": "Gappy Phrasal Alignment By Agreement", "abstract": "We propose a principled and efficient phrase-to-phrase alignment model, useful in machine translation as well as other related natural language processing problems. In a hidden semi-Markov model, word-to-phrase and phrase-to-word translations are modeled directly by the system. Agreement between two directional models encourages the selection of parsimonious phrasal alignments, avoiding the overfitting commonly encountered in unsupervised training with multi-word units. Expanding the state space to include \"gappy phrases\" (such as French ne * pas) makes the alignment space more symmetric; thus, it allows agreement between discontinuous alignments. The resulting system shows substantial improvements in both alignment quality and translation quality over word-based Hidden Markov Models, while maintaining asymptotically equivalent runtime.", "keyphrases": ["phrasal alignment", "agreement", "hidden semi-markov model"]} +{"id": "petukhova-bunt-2008-lirics", "title": "LIRICS Semantic Role Annotation: Design and Evaluation of a Set of Data Categories", "abstract": "Semantic roles have often proved to be useful labels for stating linguistic generalisations of various sorts. There is, however, a lack of agreement on their defining criteria, which causes serious problems for semantic roles to be a useful classificatory device for predicate-argument relations. These criteria should (a) support the design of a semantic role set which is complete but does not contain redundant relations; (b) be based on semantic rather than morphological, lexical or syntactic properties; and (c) enable formal interpretation. In this paper we report on the analyses of alternative approaches to annotation and representation of semantic role information (such as FrameNet, PropBank and VerbNet) with respect to their models of description, granularity of semantic role sets, definitions of semantic roles concepts, consistency and reliability of annotations. We present methodological principles for characterising well-defined concepts which were developed within the LIRICS (Linguistic InfRastructure for Interoperable ResourCes and Systems; see ) project, as well as the designed set of semantic roles and their definitions in ISO 12620 format. We discuss evaluation results of the defined concepts for semantic role annotation concerning the redundancy and completeness of the tagset and the reliability of annotations in terms of inter-annotator agreement.", "keyphrases": ["semantic role", "linguistic infrastructure", "interoperable resources", "project", "lirics"]} +{"id": "dethlefs-cuayahuitl-2010-hierarchical", "title": "Hierarchical Reinforcement Learning for Adaptive Text Generation", "abstract": "We present a novel approach to natural language generation (NLG) that applies hierarchical reinforcement learning to text generation in the wayfinding domain. Our approach aims to optimise the integration of NLG tasks that are inherently different in nature, such as decisions of content selection, text structure, user modelling, referring expression generation (REG), and surface realisation. It also aims to capture existing interdependencies between these areas. We apply hierarchical reinforcement learning to learn a generation policy that captures these interdependencies, and that can be transferred to other NLG tasks. Our experimental results---in a simulated environment---show that the learnt wayfinding policy outperforms a baseline policy that takes reasonable actions but without optimization.", "keyphrases": ["nlg task", "generation policy", "hierarchical reinforcement learning"]} +{"id": "schulz-etal-2010-multilingual", "title": "Multilingual Corpus Development for Opinion Mining", "abstract": "Opinion Mining is a discipline that has attracted some attention lately. Most of the research in this field has been done for English or Asian languages, due to the lack of resources in other languages. In this paper we describe an approach of building a manually annotated multilingual corpus for the domain of product reviews, which can be used as a basis for fine-grained opinion analysis also considering direct and indirect opinion targets. For each sentence in a review, the mentioned product features with their respective opinion polarity and strength on a scale from 0 to 3 are labelled manually by two annotators. The languages represented in the corpus are English, German and Spanish and the corpus consists of about 500 product reviews per language. After a short introduction and a description of related work, we illustrate the annotation process, including a description of the annotation methodology and the developed tool for the annotation process. Then first results on the inter-annotator agreement for opinions and product features are presented. We conclude the paper with an outlook on future work.", "keyphrases": ["opinion mining", "other language", "spanish"]} +{"id": "zhu-bhat-2021-euphemistic-phrase", "title": "Euphemistic Phrase Detection by Masked Language Model", "abstract": "It is a well-known approach for fringe groups and organizations to use euphemisms\u2014ordinary-sounding and innocent-looking words with a secret meaning\u2014to conceal what they are discussing. For instance, drug dealers often use \u201cpot\u201d for marijuana and \u201cavocado\u201d for heroin. From a social media content moderation perspective, though recent advances in NLP have enabled the automatic detection of such single-word euphemisms, no existing work is capable of automatically detecting multi-word euphemisms, such as \u201cblue dream\u201d (marijuana) and \u201cblack tar\u201d (heroin). Our paper tackles the problem of euphemistic phrase detection without human effort for the first time, as far as we are aware. We first perform phrase mining on a raw text corpus (e.g., social media posts) to extract quality phrases. Then, we utilize word embedding similarities to select a set of euphemistic phrase candidates. Finally, we rank those candidates by a masked language model\u2014SpanBERT. Compared to strong baselines, we report 20-50% higher detection accuracies using our algorithm for detecting euphemistic phrases.", "keyphrases": ["euphemism", "candidate", "euphemistic phrase detection"]} +{"id": "nivre-2009-non", "title": "Non-Projective Dependency Parsing in Expected Linear Time", "abstract": "We present a novel transition system for dependency parsing, which constructs arcs only between adjacent words but can parse arbitrary non-projective trees by swapping the order of words in the input. Adding the swapping operation changes the time complexity for deterministic parsing from linear to quadratic in the worst case, but empirical estimates based on treebank data show that the expected running time is in fact linear for the range of data attested in the corpora. Evaluation on data from five languages shows state-of-the-art accuracy, with especially good results for the labeled exact match score.", "keyphrases": ["linear time", "non-projective tree", "state-of-the-art accuracy"]} +{"id": "raganato-etal-2017-neural", "title": "Neural Sequence Learning Models for Word Sense Disambiguation", "abstract": "Word Sense Disambiguation models exist in many flavors. Even though supervised ones tend to perform best in terms of accuracy, they often lose ground to more flexible knowledge-based solutions, which do not require training by a word expert for every disambiguation target. To bridge this gap we adopt a different perspective and rely on sequence learning to frame the disambiguation problem: we propose and study in depth a series of end-to-end neural architectures directly tailored to the task, from bidirectional Long Short-Term Memory to encoder-decoder models. Our extensive evaluation over standard benchmarks and in multiple languages shows that sequence learning enables more versatile all-words models that consistently lead to state-of-the-art results, even against word experts with engineered features.", "keyphrases": ["word sense disambiguation", "neural architecture", "sequence labeling task"]} +{"id": "wang-etal-2018-metrics", "title": "No Metrics Are Perfect: Adversarial Reward Learning for Visual Storytelling", "abstract": "Though impressive results have been achieved in visual captioning, the task of generating abstract stories from photo streams is still a little-tapped problem. Different from captions, stories have more expressive language styles and contain many imaginary concepts that do not appear in the images. Thus it poses challenges to behavioral cloning algorithms. Furthermore, due to the limitations of automatic metrics on evaluating story quality, reinforcement learning methods with hand-crafted rewards also face difficulties in gaining an overall performance boost. Therefore, we propose an Adversarial REward Learning (AREL) framework to learn an implicit reward function from human demonstrations, and then optimize policy search with the learned reward function. Though automatic evaluation indicates slight performance boost over state-of-the-art (SOTA) methods in cloning expert behaviors, human evaluation shows that our approach achieves significant improvement in generating more human-like stories than SOTA systems.", "keyphrases": ["adversarial reward learning", "caption", "human evaluation", "policy gradient"]} +{"id": "lacroix-etal-2016-frustratingly", "title": "Frustratingly Easy Cross-Lingual Transfer for Transition-Based Dependency Parsing", "abstract": "In this paper, we present a straightforward strategy for transferring dependency parsers across languages. The proposed method learns a parser from partially annotated data obtained through the projection of annotations across unambiguous word alignments. It does not rely on any modeling of the reliability of dependency and/or alignment links and is therefore easy to implement and parameter free. Experiments on six languages show that our method is at par with recent algorithmically demanding methods, at a much cheaper computational cost. It can thus serve as a fair baseline for transferring dependencies across languages with the use of parallel corpora.", "keyphrases": ["cross-lingual transfer", "dependency parser", "alignment link", "parallel corpora", "annotation projection"]} +{"id": "jagannatha-yu-2020-calibrating", "title": "Calibrating Structured Output Predictors for Natural Language Processing", "abstract": "We address the problem of calibrating prediction confidence for output entities of interest in natural language processing (NLP) applications. It is important that NLP applications such as named entity recognition and question answering produce calibrated confidence scores for their predictions, especially if the applications are to be deployed in a safety-critical domain such as healthcare. However the output space of such structured prediction models are often too large to directly adapt binary or multi-class calibration methods. In this study, we propose a general calibration scheme for output entities of interest in neural network based structured prediction models. Our proposed method can be used with any binary class calibration scheme and a neural network model. Additionally, we show that our calibration method can also be used as an uncertainty-aware, entity-specific decoding step to improve the performance of the underlying model at no additional training cost or data requirements. We show that our method outperforms current calibration techniques for Named Entity Recognition, Part-of-speech tagging and Question Answering systems. We also observe an improvement in model performance from our decoding step across several tasks and benchmark datasets. Our method improves the calibration and model performance on out-of-domain test scenarios as well.", "keyphrases": ["output entity", "calibration", "forecaster", "decision tree"]} +{"id": "nie-etal-2018-operation", "title": "Operation-guided Neural Networks for High Fidelity Data-To-Text Generation", "abstract": "Recent neural models for data-to-text generation are mostly based on data-driven end-to-end training over encoder-decoder networks. Even though the generated texts are mostly fluent and informative, they often generate descriptions that are not consistent with the input structured data. This is a critical issue especially in domains that require inference or calculations over raw data. In this paper, we attempt to improve the fidelity of neural data-to-text generation by utilizing pre-executed symbolic operations. We propose a framework called Operation-guided Attention-based sequence-to-sequence network (OpAtt), with a specifically designed gating mechanism as well as a quantization module for operation results to utilize information from pre-executed operations. Experiments on two sports datasets show our proposed method clearly improves the fidelity of the generated texts to the input structured data.", "keyphrases": ["pre-executed symbolic operation", "neural table-to-text generation", "text generation"]} +{"id": "tsai-etal-2019-small", "title": "Small and Practical BERT Models for Sequence Labeling", "abstract": "We propose a practical scheme to train a single multilingual sequence labeling model that yields state of the art results and is small and fast enough to run on a single CPU. Starting from a public multilingual BERT checkpoint, our final model is 6x smaller and 27x faster, and has higher accuracy than a state-of-the-art multilingual baseline. We show that our model especially outperforms on low-resource languages, and works on codemixed input text without being explicitly trained on codemixed examples. We showcase the effectiveness of our method by reporting on part-of-speech tagging and morphological prediction on 70 treebanks and 48 languages.", "keyphrases": ["sequence labeling", "single cpu", "m-bert model"]} +{"id": "rozovskaya-etal-2017-adapting", "title": "Adapting to Learner Errors with Minimal Supervision", "abstract": "This article considers the problem of correcting errors made by English as a Second Language writers from a machine learning perspective, and addresses an important issue of developing an appropriate training paradigm for the task, one that accounts for error patterns of non-native writers using minimal supervision. Existing training approaches present a trade-off between large amounts of cheap data offered by the native-trained models and additional knowledge of learner error patterns provided by the more expensive method of training on annotated learner data. We propose a novel training approach that draws on the strengths offered by the two standard training paradigms\u2014of training either on native or on annotated learner data\u2014and that outperforms both of these standard methods. Using the key observation that parameters relating to error regularities exhibited by non-native writers are relatively simple, we develop models that can incorporate knowledge about error regularities based on a small annotated sample but that are otherwise trained on native English data. The key contribution of this article is the introduction and analysis of two methods for adapting the learned models to error patterns of non-native writers; one method that applies to generative classifiers and a second that applies to discriminative classifiers. Both methods demonstrated state-of-the-art performance in several text correction competitions. In particular, the Illinois system that implements these methods ranked at the top in two recent CoNLL shared tasks on error correction.1 We conduct further evaluation of the proposed approaches studying the effect of using error data from speakers of the same native language, languages that are closely related linguistically, and unrelated languages.", "keyphrases": ["minimal supervision", "learner error pattern", "error type"]} +{"id": "narayan-chen-etal-2019-collaborative", "title": "Collaborative Dialogue in Minecraft", "abstract": "We wish to develop interactive agents that can communicate with humans to collaboratively solve tasks in grounded scenarios. Since computer games allow us to simulate such tasks without the need for physical robots, we define a Minecraft-based collaborative building task in which one player (A, the Architect) is shown a target structure and needs to instruct the other player (B, the Builder) to build this structure. Both players interact via a chat interface. A can observe B but cannot place blocks. We present the Minecraft Dialogue Corpus, a collection of 509 conversations and game logs. As a first step towards our goal of developing fully interactive agents for this task, we consider the subtask of Architect utterance generation, and show how challenging it is.", "keyphrases": ["collaborative building task", "minecraft dialogue corpus", "dialog"]} +{"id": "hutchins-2003-machine", "title": "Has machine translation improved? some historical comparisons", "abstract": "The common assertion that MT systems have improved over the last decades is examined by informal comparisons of translations produced by operational systems in the 1960s, 1970s and 1980s and of translations of the same source texts produced by some currently available commercial and online systems. The scarcity of source and target texts for earlier systems means that the conclusions are consequently tentative and preliminary.", "keyphrases": ["decade", "translation tool", "old system"]} +{"id": "zhang-etal-2021-sparsifying", "title": "On Sparsifying Encoder Outputs in Sequence-to-Sequence Models", "abstract": "Sequence-to-sequence models usually transfer all encoder outputs to the decoder for generation. In this work, by contrast, we hypothesize that these encoder outputs can be compressed to shorten the sequence delivered for decoding. We take Transformer as the testbed and introduce a layer of stochastic gates in-between the encoder and the decoder. The gates are regularized using the expected value of the sparsity-inducing L0penalty, resulting in completely masking-out a subset of encoder outputs. In other words, via joint training, the L0DROP layer forces Transformer to route information through a subset of its encoder states. We investigate the effects of this sparsification on two machine translation and two summarization tasks. Experiments show that, depending on the task, around 40-70% of source encodings can be pruned without significantly compromising quality. The decrease of the output length endows L0DROP with the potential of improving decoding efficiency, where it yields a speedup of up to 1.65x on document summarization tasks against the standard Transformer. We analyze the L0DROP behaviour and observe that it exhibits systematic preferences for pruning certain word types, e.g., function words and punctuation get pruned most. Inspired by these observations, we explore the feasibility of specifying rule-based patterns that mask out encoder outputs based on information such as part-of-speech tags, word frequency and word position.", "keyphrases": ["encoder output", "sequence-to-sequence model", "summarization task"]} +{"id": "liu-etal-2016-agreement", "title": "Agreement on Target-bidirectional Neural Machine Translation", "abstract": "Neural machine translation (NMT) with recurrent neural networks, has proven to be an effective technique for end-to-end machine translation. However, in spite of its promising advances over traditional translation methods, it typically suffers from an issue of unbalanced outputs, that arise from both the nature of recurrent neural networks themselves, and the challenges inherent in machine translation. To overcome this issue, we propose an agreement model for neural machine translation and show its effectiveness on large-scale Japaneseto-English and Chinese-to-English translation tasks. Our results show the model can achieve improvements of up to 1.4 BLEU over the strongest baseline NMT system. With the help of an ensemble technique, this new end-to-end NMT approach finally outperformed phrasebased and hierarchical phrase-based Moses baselines by up to 5.6 BLEU points.", "keyphrases": ["neural machine translation", "bidirectional decoding", "suffix"]} +{"id": "pershina-etal-2015-personalized", "title": "Personalized Page Rank for Named Entity Disambiguation", "abstract": "The task of Named Entity Disambiguation is to map entity mentions in the document to their correct entries in some knowledge base. We present a novel graph-based disambiguation approach based on Personalized PageRank (PPR) that combines local and global evidence for disambiguation and effectively filters out noise introduced by incorrect candidates. Experiments show that our method outperforms state-of-the-art approaches by achieving 91.7% in microand 89.9% in macroaccuracy on a dataset of 27.8K named entity mentions.", "keyphrases": ["named entity disambiguation", "personalized pagerank", "candidate"]} +{"id": "mozes-etal-2021-frequency", "title": "Frequency-Guided Word Substitutions for Detecting Textual Adversarial Examples", "abstract": "Recent efforts have shown that neural text processing models are vulnerable to adversarial examples, but the nature of these examples is poorly understood. In this work, we show that adversarial attacks against CNN, LSTM and Transformer-based classification models perform word substitutions that are identifiable through frequency differences between replaced words and their corresponding substitutions. Based on these findings, we propose frequency-guided word substitutions (FGWS), a simple algorithm exploiting the frequency properties of adversarial word substitutions for the detection of adversarial examples. FGWS achieves strong performance by accurately detecting adversarial examples on the SST-2 and IMDb sentiment datasets, with F1 detection scores of up to 91.4% against RoBERTa-based classification models. We compare our approach against a recently proposed perturbation discrimination framework and show that we outperform it by up to 13.0% F1.", "keyphrases": ["detection", "frequency-guided word substitution", "adversarial sample"]} +{"id": "mazidi-nielsen-2014-linguistic", "title": "Linguistic Considerations in Automatic Question Generation", "abstract": "This paper describes an automatic question generator that uses semantic pattern recognition to create questions of varying depth and type for self-study or tutoring.", "keyphrases": ["automatic question generation", "tutoring", "heuristic rule"]} +{"id": "henderson-etal-2020-convert", "title": "ConveRT: Efficient and Accurate Conversational Representations from Transformers", "abstract": "General-purpose pretrained sentence encoders such as BERT are not ideal for real-world conversational AI applications; they are computationally heavy, slow, and expensive to train. We propose ConveRT (Conversational Representations from Transformers), a pretraining framework for conversational tasks satisfying all the following requirements: it is effective, affordable, and quick to train. We pretrain using a retrieval-based response selection task, effectively leveraging quantization and subword-level parameterization in the dual encoder to build a lightweight memory- and energy-efficient model. We show that ConveRT achieves state-of-the-art performance across widely established response selection tasks. We also demonstrate that the use of extended dialog history as context yields further performance gains. Finally, we show that pretrained representations from the proposed encoder can be transferred to the intent classification task, yielding strong results across three diverse data sets. ConveRT trains substantially faster than standard sentence encoders or previous state-of-the-art dual encoders. With its reduced size and superior performance, we believe this model promises wider portability and scalability for Conversational AI applications.", "keyphrases": ["conversational representations", "language model", "generation task", "original pre-training task"]} +{"id": "liu-etal-2012-towards", "title": "Towards Mediating Shared Perceptual Basis in Situated Dialogue", "abstract": "To enable effective referential grounding in situated human robot dialogue, we have conducted an empirical study to investigate how conversation partners collaborate and mediate shared basis when they have mismatched visual perceptual capabilities. In particular, we have developed a graph-based representation to capture linguistic discourse and visual discourse, and applied inexact graph matching to ground references. Our empirical results have shown that, even when computer vision algorithms produce many errors (e.g. 84.7% of the objects in the environment are mis-recognized), our approach can still achieve 66% accuracy in referential grounding. These results demonstrate that, due to its error-tolerance nature, inexact graph matching provides a potential solution to mediate shared perceptual basis for referential grounding in situated interaction.", "keyphrases": ["situated dialogue", "referential grounding", "perception"]} +{"id": "brahman-etal-2020-cue", "title": "Cue Me In: Content-Inducing Approaches to Interactive Story Generation", "abstract": "Automatically generating stories is a challenging problem that requires producing causally related and logical sequences of events about a topic. Previous approaches in this domain have focused largely on one-shot generation, where a language model outputs a complete story based on limited initial input from a user. Here, we instead focus on the task of interactive story generation, where the user provides the model mid-level sentence abstractions in the form of cue phrases during the generation process. This provides an interface for human users to guide the story generation. We present two content-inducing approaches to effectively incorporate this additional information. Experimental results from both automatic and human evaluations show that these methods produce more topically coherent and personalized stories compared to baseline methods.", "keyphrases": ["interactive story generation", "cue phrase", "generation process"]} +{"id": "zweig-burges-2012-challenge", "title": "A Challenge Set for Advancing Language Modeling", "abstract": "In this paper, we describe a new, publicly available corpus intended to stimulate research into language modeling techniques which are sensitive to overall sentence coherence. The task uses the Scholastic Aptitude Test's sentence completion format. The test set consists of 1040 sentences, each of which is missing a content word. The goal is to select the correct replacement from amongst five alternates. In general, all of the options are syntactically valid, and reasonable with respect to local N-gram statistics. The set was generated by using an N-gram language model to generate a long list of likely words, given the immediate context. These options were then hand-groomed, to identify four decoys which are globally incoherent, yet syntactically correct. To ensure the right to public distribution, all the data is derived from out-of-copyright materials from Project Gutenberg. The test sentences were derived from five of Conan Doyle's Sherlock Holmes novels, and we provide a large set of Nineteenth and early Twentieth Century texts as training material.", "keyphrases": ["language modeling", "immediate context", "sentence completion challenge"]} +{"id": "yin-etal-2016-neural-generative", "title": "Neural Generative Question Answering", "abstract": "This paper presents an end-to-end neural network model, named Neural Generative Question Answering (GENQA), that can generate answers to simple factoid questions, based on the facts in a knowledge-base. More specifically, the model is built on the encoder-decoder framework for sequence-to-sequence learning, while equipped with the ability to enquire the knowledge-base, and is trained on a corpus of question-answer pairs, with their associated triples in the knowledge-base. Empirical study shows the proposed model can effectively deal with the variations of questions and answers, and generate right and natural answers by referring to the facts in the knowledge-base. The experiment on question answering demonstrates that the proposed model can outperform an embedding-based QA model as well as a neural dialogue model trained on the same data.", "keyphrases": ["neural network model", "factoid question", "encoder-decoder framework", "seq2seq learning"]} +{"id": "dohare-etal-2018-unsupervised", "title": "Unsupervised Semantic Abstractive Summarization", "abstract": "Automatic abstractive summary generation remains a significant open problem for natural language processing. In this work, we develop a novel pipeline for Semantic Abstractive Summarization (SAS). SAS, as introduced by Liu et. al. (2015) first generates an AMR graph of an input story, through which it extracts a summary graph and finally, creates summary sentences from this summary graph. Compared to earlier approaches, we develop a more comprehensive method to generate the story AMR graph using state-of-the-art co-reference resolution and Meta Nodes. Which we then use in a novel unsupervised algorithm based on how humans summarize a piece of text to extract the summary sub-graph. Our algorithm outperforms the state of the art SAS method by 1.7% F1 score in node prediction.", "keyphrases": ["summarization", "pipeline", "amr"]} +{"id": "prabhakaran-etal-2012-annotations", "title": "Annotations for Power Relations on Email Threads", "abstract": "Social relations like power and influence are difficult concepts to define, but are easily recognizable when expressed. In this paper, we describe a multi-layer annotation scheme for social power relations that are recognizable from online written interactions. We introduce a typology of four types of power relations between dialog participants: hierarchical power, situational power, influence and control of communication. We also present a corpus of Enron emails comprising of 122 threaded conversations, manually annotated with instances of these power relations between participants. Our annotations also capture attempts at exercise of power or influence and whether those attempts were successful or not. In addition, we also capture utterance level annotations for overt display of power. We describe the annotation definitions using two example email threads from our corpus illustrating each type of power relation. We also present detailed instructions given to the annotators and provide various statistics on annotations in the corpus.", "keyphrases": ["email thread", "social relation", "overt display"]} +{"id": "zeng-etal-2018-multi", "title": "Multi-Domain Neural Machine Translation with Word-Level Domain Context Discrimination", "abstract": "With great practical value, the study of Multi-domain Neural Machine Translation (NMT) mainly focuses on using mixed-domain parallel sentences to construct a unified model that allows translation to switch between different domains. Intuitively, words in a sentence are related to its domain to varying degrees, so that they will exert disparate impacts on the multi-domain NMT modeling. Based on this intuition, in this paper, we devote to distinguishing and exploiting word-level domain contexts for multi-domain NMT. To this end, we jointly model NMT with monolingual attention-based domain classification tasks and improve NMT as follows: 1) Based on the sentence representations produced by a domain classifier and an adversarial domain classifier, we generate two gating vectors and use them to construct domain-specific and domain-shared annotations, for later translation predictions via different attention models; 2) We utilize the attention weights derived from target-side domain classifier to adjust the weights of target words in the training objective, enabling domain-related words to have greater impacts during model training. Experimental results on Chinese-English and English-French multi-domain translation tasks demonstrate the effectiveness of the proposed model. Source codes of this paper are available on Github .", "keyphrases": ["machine translation", "adversarial domain classifier", "domain-agnostic representation"]} +{"id": "huang-etal-2019-multi-modal", "title": "Multi-modal Discriminative Model for Vision-and-Language Navigation", "abstract": "Vision-and-Language Navigation (VLN) is a natural language grounding task where agents have to interpret natural language instructions in the context of visual scenes in a dynamic environment to achieve prescribed navigation goals. Successful agents must have the ability to parse natural language of varying linguistic styles, ground them in potentially unfamiliar scenes, plan and react with ambiguous environmental feedback. Generalization ability is limited by the amount of human annotated data. In particular, paired vision-language sequence data is expensive to collect. We develop a discriminator that evaluates how well an instruction explains a given path in VLN task using multi-modal alignment. Our study reveals that only a small fraction of the high-quality augmented data from Fried et al., as scored by our discriminator, is useful for training VLN agents with similar performance. We also show that a VLN agent warm-started with pre-trained components from the discriminator outperforms the benchmark success rates of 35.5 by 10% relative measure.", "keyphrases": ["vision-and-language navigation", "instruction", "environment"]} +{"id": "batsuren-etal-2019-cognet", "title": "CogNet: A Large-Scale Cognate Database", "abstract": "This paper introduces CogNet, a new, large-scale lexical database that provides cognates -words of common origin and meaning- across languages. The database currently contains 3.1 million cognate pairs across 338 languages using 35 writing systems. The paper also describes the automated method by which cognates were computed from publicly available wordnets, with an accuracy evaluated to 94%. Finally, it presents statistics about the cognate data and some initial insights into it, hinting at a possible future exploitation of the resource by various fields of lingustics.", "keyphrases": ["large-scale lexical database", "common origin", "cognet"]} +{"id": "upadhyay-etal-2018-joint", "title": "Joint Multilingual Supervision for Cross-lingual Entity Linking", "abstract": "Cross-lingual Entity Linking (XEL) aims to ground entity mentions written in any language to an English Knowledge Base (KB), such as Wikipedia. XEL for most languages is challenging, owing to limited availability of resources as supervision. We address this challenge by developing the first XEL approach that combines supervision from multiple languages jointly. This enables our approach to: (a) augment the limited supervision in the target language with additional supervision from a high-resource language (like English), and (b) train a single entity linking model for multiple languages, improving upon individually trained models for each language. Extensive evaluation on three benchmark datasets across 8 languages shows that our approach significantly improves over the current state-of-the-art. We also provide analyses in two limited resource settings: (a) zero-shot setting, when no supervision in the target language is available, and in (b) low-resource setting, when some supervision in the target language is available. Our analysis provides insights into the limitations of zero-shot XEL approaches in realistic scenarios, and shows the value of joint supervision in low-resource settings.", "keyphrases": ["cross-lingual entity linking", "downstream disambiguation model", "english wikipedia"]} +{"id": "hill-etal-2014-multi", "title": "Multi-Modal Models for Concrete and Abstract Concept Meaning", "abstract": "Multi-modal models that learn semantic representations from both linguistic and perceptual input outperform language-only models on a range of evaluations, and better reflect human concept acquisition. Most perceptual input to such models corresponds to concrete noun concepts and the superiority of the multi-modal approach has only been established when evaluating on such concepts. We therefore investigate which concepts can be effectively learned by multi-modal models. We show that concreteness determines both which linguistic features are most informative and the impact of perceptual input in such models. We then introduce ridge regression as a means of propagating perceptual information from concrete nouns to more abstract concepts that is more robust than previous approaches. Finally, we present weighted gram matrix combination, a means of combining representations from distinct modalities that outperforms alternatives when both modalities are sufficiently rich.", "keyphrases": ["noun", "multi-modal model", "visual similarity"]} +{"id": "torroba-hennigen-etal-2020-intrinsic", "title": "Intrinsic Probing through Dimension Selection", "abstract": "Most modern NLP systems make use of pre-trained contextual representations that attain astonishingly high performance on a variety of tasks. Such high performance should not be possible unless some form of linguistic structure inheres in these representations, and a wealth of research has sprung up on probing for it. In this paper, we draw a distinction between intrinsic probing, which examines how linguistic information is structured within a representation, and the extrinsic probing popular in prior work, which only argues for the presence of such information by showing that it can be successfully extracted. To enable intrinsic probing, we propose a novel framework based on a decomposable multivariate Gaussian probe that allows us to determine whether the linguistic information in word embeddings is dispersed or focal. We then probe fastText and BERT for various morphosyntactic attributes across 36 languages. We find that most attributes are reliably encoded by only a few neurons, with fastText concentrating its linguistic structure more than BERT.", "keyphrases": ["presence", "neuron", "intrinsic probing"]} +{"id": "shi-etal-2020-semantic", "title": "Semantic Role Labeling as Syntactic Dependency Parsing", "abstract": "We reduce the task of (span-based) PropBank-style semantic role labeling (SRL) to syntactic dependency parsing. Our approach is motivated by our empirical analysis that shows three common syntactic patterns account for over 98% of the SRL annotations for both English and Chinese data. Based on this observation, we present a conversion scheme that packs SRL annotations into dependency tree representations through joint labels that permit highly accurate recovery back to the original format. This representation allows us to train statistical dependency parsers to tackle SRL and achieve competitive performance with the current state of the art. Our findings show the promise of syntactic dependency trees in encoding semantic role relations within their syntactic domain of locality, and point to potential further integration of syntactic methods into semantic role labeling in the future.", "keyphrases": ["syntactic dependency parsing", "srl", "joint label"]} +{"id": "zhou-etal-2020-temporal", "title": "Temporal Common Sense Acquisition with Minimal Supervision", "abstract": "Temporal common sense (e.g., duration and frequency of events) is crucial for understanding natural language. However, its acquisition is challenging, partly because such information is often not expressed explicitly in text, and human annotation on such concepts is costly. This work proposes a novel sequence modeling approach that exploits explicit and implicit mentions of temporal common sense, extracted from a large corpus, to build TacoLM, a temporal common sense language model. Our method is shown to give quality predictions of various dimensions of temporal common sense (on UDST and a newly collected dataset from RealNews). It also produces representations of events for relevant tasks such as duration comparison, parent-child relations, event coreference and temporal QA (on TimeBank, HiEVE and MCTACO) that are better than using the standard BERT. Thus, it will be an important component of temporal NLP.", "keyphrases": ["duration", "sense language model", "bert", "temporal data", "data-driven method"]} +{"id": "elahi-monachesi-2012-examination", "title": "An Examination of Cross-Cultural Similarities and Differences from Social Media Data with respect to Language Use", "abstract": "We present a methodology for analyzing cross-cultural similarities and differences using language as a medium, love as domain, social media as a data source and 'Terms' and 'Topics' as cultural features. We discuss the techniques necessary for the creation of the social data corpus from which emotion terms have been extracted using NLP techniques. Topics of love discussion were then extracted from the corpus by means of Latent Dirichlet Allocation (LDA). Finally, on the basis of these features, a cross-cultural comparison was carried out. For the purpose of cross-cultural analysis, the experimental focus was on comparing data from a culture from the East (India) with a culture from the West (United States of America). Similarities and differences between these cultures have been analyzed with respect to the usage of emotions, their intensities and the topics used during love discussion in social media.", "keyphrases": ["emotion term", "cross-cultural analysis", "social medium data"]} +{"id": "chambers-jurafsky-2010-database", "title": "A Database of Narrative Schemas", "abstract": "This paper describes a new language resource of events and semantic roles that characterize real-world situations. Narrative schemas contain sets of related events (edit and publish), a temporal ordering of the events (edit before publish), and the semantic roles of the participants (authors publish books). This type of world knowledge was central to early research in natural language understanding, scripts being one of the main formalisms, they represented common sequences of events that occur in the world. Unfortunately, most of this knowledge was hand-coded and time consuming to create. Current machine learning techniques, as well as a new approach to learning through coreference chains, has allowed us to automatically extract rich event structure from open domain text in the form of narrative schemas. The narrative schema resource described in this paper contains approximately 5000 unique events combined into schemas of varying sizes. We describe the resource, how it is learned, and a new evaluation of the coverage of these schemas over unseen documents.", "keyphrases": ["database", "narrative schemas", "world knowledge", "chain", "various event mention"]} +{"id": "vilar-etal-2010-jane", "title": "Jane: Open Source Hierarchical Translation, Extended with Reordering and Lexicon Models", "abstract": "We present Jane, RWTH's hierarchical phrase-based translation system, which has been open sourced for the scientific community. This system has been in development at RWTH for the last two years and has been successfully applied in different machine translation evaluations. It includes extensions to the hierarchical approach developed by RWTH as well as other research institutions. In this paper we give an overview of its main features. \n \nWe also introduce a novel reordering model for the hierarchical phrase-based approach which further enhances translation performance, and analyze the effect some recent extended lexicon models have on the performance of the system.", "keyphrases": ["translation system", "jane", "europarl task"]} +{"id": "vulic-etal-2017-morph", "title": "Morph-fitting: Fine-Tuning Word Vector Spaces with Simple Language-Specific Rules", "abstract": "Morphologically rich languages accentuate two properties of distributional vector space models: 1) the difficulty of inducing accurate representations for low-frequency word forms; and 2) insensitivity to distinct lexical relations that have similar distributional signatures. These effects are detrimental for language understanding systems, which may infer that `inexpensive' is a rephrasing for `expensive' or may not associate `acquire' with `acquires'. In this work, we propose a novel morph-fitting procedure which moves past the use of curated semantic lexicons for improving distributional vector spaces. Instead, our method injects morphological constraints generated using simple language-specific rules, pulling inflectional forms of the same word close together and pushing derivational antonyms far apart. In intrinsic evaluation over four languages, we show that our approach: 1) improves low-frequency word estimates; and 2) boosts the semantic quality of the entire word vector collection. Finally, we show that morph-fitted vectors yield large gains in the downstream task of dialogue state tracking, highlighting the importance of morphology for tackling long-tail phenomena in language understanding tasks.", "keyphrases": ["vector space", "simple language-specific rule", "downstream task"]} +{"id": "xiong-etal-2013-lexical", "title": "Lexical Chain Based Cohesion Models for Document-Level Statistical Machine Translation", "abstract": "Lexical chains provide a representation of the lexical cohesion structure of a text. In this paper, we propose two lexical chain based cohesion models to incorporate lexical cohesion into document-level statistical machine translation: 1) a count cohesion model that rewards a hypothesis whenever a chain word occurs in the hypothesis, 2) and a probability cohesion model that further takes chain word translation probabilities into account. We compute lexical chains for each source document to be translated and generate target lexical chains based on the computed source chains via maximum entropy classifiers. We then use the generated target chains to provide constraints for word selection in document-level machine translation through the two proposed lexical chain based cohesion models. We verify the effectiveness of the two models using a hierarchical phrase-based translation system. Experiments on large-scale training data show that they can substantially improve translation quality in terms of BLEU and that the probability cohesion model outperforms previous models based on lexical cohesion devices.", "keyphrases": ["chain", "statistical machine translation", "lexical cohesion"]} +{"id": "alshawi-2003-effective", "title": "Effective Utterance Classification with Unsupervised Phonotactic Models", "abstract": "This paper describes a method for utterance classification that does not require manual transcription of training data. The method combines domain independent acoustic models with off-the-shelf classifiers to give utterance classification performance that is surprisingly close to what can be achieved using conventional word-trigram recognition requiring manual transcription. In our method, unsupervised training is first used to train a phone n-gram model for a particular domain; the output of recognition with this model is then passed to a phone-string classifier. The classification accuracy of the method is evaluated on three different spoken language system domains.", "keyphrases": ["utterance classification", "transcription", "unsupervised training"]} +{"id": "wang-etal-2012-implicit", "title": "Implicit Discourse Relation Recognition by Selecting Typical Training Examples", "abstract": "Implicit discourse relation recognition is a challenging task in the natural language processing field, but important to many applications such as question answering, summarizat ion and so on. Previous research used either art ificially created implicit discourse relat ions with connectives removed from explicit relations or annotated implicit relat ions as training data to detect the possible implicit relations, and do not further discern which examples are fit to be training data. This paper is the first time to apply a d ifferent typical/atypical perspective to select the most suitable discourse relation examples as training data. To differentiate typical and atypical examples for each discourse relation, a novel single centroid clustering algorithm is proposed. With this typical/atypical distinction, we aim to recognize those easily identified discourse relations more precisely so as to promote the performance of the implicit relation recognition. The experimental results verify that the proposed new method outperforms the state -of-the-art methods.", "keyphrases": ["discourse relation", "implicit", "augment training data"]} +{"id": "qazvinian-radev-2010-identifying", "title": "Identifying Non-Explicit Citing Sentences for Citation-Based Summarization.", "abstract": "Identifying background (context) information in scientific articles can help scholars understand major contributions in their research area more easily. In this paper, we propose a general framework based on probabilistic inference to extract such context information from scientific papers. We model the sentences in an article and their lexical similarities as a Markov Random Field tuned to detect the patterns that context data create, and employ a Belief Propagation mechanism to detect likely context sentences. We also address the problem of generating surveys of scientific papers. Our experiments show greater pyramid scores for surveys generated using such context information rather than citation sentences alone.", "keyphrases": ["citation-based summarization", "context sentence", "non-explicit citation"]} +{"id": "ponvert-etal-2011-simple", "title": "Simple Unsupervised Grammar Induction from Raw Text with Cascaded Finite State Models", "abstract": "We consider a new subproblem of unsupervised parsing from raw text, unsupervised partial parsing---the unsupervised version of text chunking. We show that addressing this task directly, using probabilistic finite-state methods, produces better results than relying on the local predictions of a current best unsu-pervised parser, Seginer's (2007) CCL. These finite-state models are combined in a cascade to produce more general (full-sentence) constituent structures; doing so outperforms CCL by a wide margin in unlabeled PARSEVAL scores for English, German and Chinese. Finally, we address the use of phrasal punctuation as a heuristic indicator of phrasal boundaries, both in our system and in CCL.", "keyphrases": ["grammar induction", "raw text", "syntax-dependent model"]} +{"id": "guinaudeau-strube-2013-graph", "title": "Graph-based Local Coherence Modeling", "abstract": "We propose a computationally efficient graph-based approach for local coherence modeling. We evaluate our system on three tasks: sentence ordering, summary coherence rating and readability assessment. The performance is comparable to entity grid based approaches though these rely on a computationally expensive training phase and face data sparsity problems.", "keyphrases": ["local coherence modeling", "sentence ordering", "entity grid model"]} +{"id": "jahan-etal-2018-new", "title": "A New Approach to Animacy Detection", "abstract": "Animacy is a necessary property for a referent to be an agent, and thus animacy detection is useful for a variety of natural language processing tasks, including word sense disambiguation, co-reference resolution, semantic role labeling, and others. Prior work treated animacy as a word-level property, and has developed statistical classifiers to classify words as either animate or inanimate. We discuss why this approach to the problem is ill-posed, and present a new approach based on classifying the animacy of co-reference chains. We show that simple voting approaches to inferring the animacy of a chain from its constituent words perform relatively poorly, and then present a hybrid system merging supervised machine learning (ML) and a small number of hand-built rules to compute the animacy of referring expressions and co-reference chains. This method achieves state of the art performance. The supervised ML component leverages features such as word embeddings over referring expressions, parts of speech, and grammatical and semantic roles. The rules take into consideration parts of speech and the hypernymy structure encoded in WordNet. The system achieves an F1 of 0.88 for classifying the animacy of referring expressions, which is comparable to state of the art results for classifying the animacy of words, and achieves an F1 of 0.75 for classifying the animacy of coreference chains themselves. We release our training and test dataset, which includes 142 texts (all narratives) comprising 156,154 words, 34,698 referring expressions, and 10,941 co-reference chains. We test the method on a subset of the OntoNotes dataset, showing using manual sampling that animacy classification is 90% +/- 2% accurate for coreference chains, and 92% +/- 1% for referring expressions. The data also contains 46 folktales, which present an interesting challenge because they often involve characters who are members of traditionally inanimate classes (e.g., stoves that walk, trees that talk). We show that our system is able to detect the animacy of these unusual referents with an F1 of 0.95.", "keyphrases": ["animacy detection", "hand-built rule", "word embedding"]} +{"id": "gerz-etal-2018-language", "title": "Language Modeling for Morphologically Rich Languages: Character-Aware Modeling for Word-Level Prediction", "abstract": "Neural architectures are prominent in the construction of language models (LMs). However, word-level prediction is typically agnostic of subword-level information (characters and character sequences) and operates over a closed vocabulary, consisting of a limited word set. Indeed, while subword-aware models boost performance across a variety of NLP tasks, previous work did not evaluate the ability of these models to assist next-word prediction in language modeling tasks. Such subword-level informed models should be particularly effective for morphologically-rich languages (MRLs) that exhibit high type-to-token ratios. In this work, we present a large-scale LM study on 50 typologically diverse languages covering a wide variety of morphological systems, and offer new LM benchmarks to the community, while considering subword-level information. The main technical contribution of our work is a novel method for injecting subword-level information into semantic word vectors, integrated into the neural language modeling training, to facilitate word-level prediction. We conduct experiments in the LM setting where the number of infrequent words is large, and demonstrate strong perplexity gains across our 50 languages, especially for morphologically-rich languages. Our code and data sets are publicly available.", "keyphrases": ["word-level prediction", "morphologically-rich language", "character-aware nlms"]} +{"id": "gao-etal-2003-improved", "title": "Improved Source-Channel Models for Chinese Word Segmentation", "abstract": "This paper presents a Chinese word segmentation system that uses improved source-channel models of Chinese sentence generation. Chinese words are defined as one of the following four types: lexicon words, morphologically derived words, factoids, and named entities. Our system provides a unified approach to the four fundamental features of word-level Chinese language processing: (1) word segmentation, (2) morphological analysis, (3) factoid detection, and (4) named entity recognition. The performance of the system is evaluated on a manually annotated test set, and is also compared with several state-of-the-art systems, taking into account the fact that the definition of Chinese words often varies from system to system.", "keyphrases": ["source-channel model", "chinese word segmentation", "word-based generative model"]} +{"id": "tay-etal-2018-compare", "title": "Compare, Compress and Propagate: Enhancing Neural Architectures with Alignment Factorization for Natural Language Inference", "abstract": "This paper presents a new deep learning architecture for Natural Language Inference (NLI). Firstly, we introduce a new architecture where alignment pairs are compared, compressed and then propagated to upper layers for enhanced representation learning. Secondly, we adopt factorization layers for efficient and expressive compression of alignment vectors into scalar features, which are then used to augment the base word representations. The design of our approach is aimed to be conceptually simple, compact and yet powerful. We conduct experiments on three popular benchmarks, SNLI, MultiNLI and SciTail, achieving competitive performance on all. A lightweight parameterization of our model also enjoys a 3 times reduction in parameter size compared to the existing state-of-the-art models, e.g., ESIM and DIIN, while maintaining competitive performance. Additionally, visual analysis shows that our propagated features are highly interpretable.", "keyphrases": ["natural language inference", "alignment pair", "factorization layer"]} +{"id": "shaikh-etal-2010-mpc", "title": "MPC: A Multi-Party Chat Corpus for Modeling Social Phenomena in Discourse", "abstract": "In this paper, we describe our experience with collecting and creating an annotated corpus of multi-party online conversations in a chat-room environment. This effort is part of a larger project to develop computational models of social phenomena such as agenda control, influence, and leadership in on-line interactions. Such models will help capturing the dialogue dynamics that are essential for developing, among others, realistic human-machine dialogue systems, including autonomous virtual chat agents. In this paper we describe data collection method used and the characteristics of the initial dataset of English chat. We have devised a multi-tiered collection process in which the subjects start from simple, free-flowing conversations and progress towards more complex and structured interactions. In this paper, we report on the first two stages of this process, which were recently completed. The third, large-scale collection effort is currently being conducted. All English dialogue has been annotated at four levels: communication links, dialogue acts, local topics and meso-topics. Some details of these annotations will be discussed later in this paper, although a full description is impossible within the scope of this article.", "keyphrases": ["discourse", "english chat", "meso-topic"]} +{"id": "akiba-etal-2004-overview", "title": "Overview of the IWSLT evaluation campaign", "abstract": "This paper gives an overview of the evaluation campaign results of the IWSLT04 1 workshop, which is organized by the C-STAR 2 consortium to investigate novel speech translation technologies and their evaluation. The objectives of this workshop is to provide a framework for the applicability validation of existing machine translation evaluation methodologies to evaluate speech translation technologies. The work-shop also strives to \ufb01nd new directions in how to improve current methods.", "keyphrases": ["iwslt evaluation campaign", "ted talk", "translation quality"]} +{"id": "neculoiu-etal-2016-learning", "title": "Learning Text Similarity with Siamese Recurrent Networks", "abstract": "This paper presents a deep architecture for learning a similarity metric on variable-length character sequences. The model combines a stack of character-level bidirectional LSTM\u2019s with a Siamese architecture. It learns to project variable-length strings into a \ufb01xed-dimensional embedding space by using only information about the similarity between pairs of strings. This model is applied to the task of job title normalization based on a manually annotated taxonomy. A small data set is incrementally expanded and augmented with new sources of variance. The model learns a representation that is selective to differences in the input that re\ufb02ect semantic differences (e.g., \u201cJava developer\u201d vs. \u201cHR manager\u201d) but also invariant to non-semantic string differences (e.g., \u201cJava de-veloper\u201d vs. \u201cJava programmer\u201d).", "keyphrases": ["similarity metric", "siamese architecture", "job title normalization"]} +{"id": "nakashole-flauger-2018-characterizing", "title": "Characterizing Departures from Linearity in Word Translation", "abstract": "We investigate the behavior of maps learned by machine translation methods. The maps translate words by projecting between word embedding spaces of different languages. We locally approximate these maps using linear maps, and find that they vary across the word embedding space. This demonstrates that the underlying maps are non-linear. Importantly, we show that the locally linear maps vary by an amount that is tightly correlated with the distance between the neighborhoods on which they are trained. Our results can be used to test non-linear methods, and to drive the design of more accurate maps for word translation.", "keyphrases": ["linearity", "word translation", "behavior", "assumption", "local region"]} +{"id": "wachsmuth-etal-2017-argumentation", "title": "Argumentation Quality Assessment: Theory vs. Practice", "abstract": "Argumentation quality is viewed differently in argumentation theory and in practical assessment approaches. This paper studies to what extent the views match empirically. We find that most observations on quality phrased spontaneously are in fact adequately represented by theory. Even more, relative comparisons of arguments in practice correlate with absolute quality ratings based on theory. Our results clarify how the two views can learn from each other.", "keyphrases": ["argument quality", "quality dimension", "reasoning"]} +{"id": "wang-etal-2018-cross-lingual", "title": "Cross-lingual Knowledge Graph Alignment via Graph Convolutional Networks", "abstract": "Multilingual knowledge graphs (KGs) such as DBpedia and YAGO contain structured knowledge of entities in several distinct languages, and they are useful resources for cross-lingual AI and NLP applications. Cross-lingual KG alignment is the task of matching entities with their counterparts in different languages, which is an important way to enrich the cross-lingual links in multilingual KGs. In this paper, we propose a novel approach for cross-lingual KG alignment via graph convolutional networks (GCNs). Given a set of pre-aligned entities, our approach trains GCNs to embed entities of each language into a unified vector space. Entity alignments are discovered based on the distances between entities in the embedding space. Embeddings can be learned from both the structural and attribute information of entities, and the results of structure embedding and attribute embedding are combined to get accurate alignments. In the experiments on aligning real multilingual KGs, our approach gets the best performance compared with other embedding-based KG alignment approaches.", "keyphrases": ["pre-aligned entity", "graph encoder", "low-dimensional vector space"]} +{"id": "hovy-spruit-2016-social", "title": "The Social Impact of Natural Language Processing", "abstract": "Medical sciences have long since established an ethics code for experiments, to minimize the risk of harm to subjects. Natural language processing (NLP) used to involve mostly anonymous corpora, with the goal of enriching linguistic analysis, and was therefore unlikely to raise ethical concerns. As NLP becomes increasingly wide-spread and uses more data from social media, however, the situation has changed: the outcome of NLP experiments and applications can now have a direct effect on individual users\u2019 lives. Until now, the discourse on this topic in the \ufb01eld has not followed the technological development, while public discourse was often focused on exaggerated dangers. This position paper tries to take back the initiative and start a discussion. We identify a number of social implications of NLP and discuss their ethical signi\ufb01cance, as well as ways to address them.", "keyphrases": ["implication", "nlp system", "million"]} +{"id": "lal-etal-2019-de", "title": "De-Mixing Sentiment from Code-Mixed Text", "abstract": "Code-mixing is the phenomenon of mixing the vocabulary and syntax of multiple languages in the same sentence. It is an increasingly common occurrence in today's multilingual society and poses a big challenge when encountered in different downstream tasks. In this paper, we present a hybrid architecture for the task of Sentiment Analysis of English-Hindi code-mixed data. Our method consists of three components, each seeking to alleviate different issues. We first generate subword level representations for the sentences using a CNN architecture. The generated representations are used as inputs to a Dual Encoder Network which consists of two different BiLSTMs - the Collective and Specific Encoder. The Collective Encoder captures the overall sentiment of the sentence, while the Specific Encoder utilizes an attention mechanism in order to focus on individual sentiment-bearing sub-words. This, combined with a Feature Network consisting of orthographic features and specially trained word embeddings, achieves state-of-the-art results - 83.54% accuracy and 0.827 F1 score - on a benchmark dataset.", "keyphrases": ["code-mixed text", "sentiment analysis", "dual encoder network", "collective", "orthographic feature"]} +{"id": "gao-etal-2016-physical", "title": "Physical Causality of Action Verbs in Grounded Language Understanding", "abstract": "Linguistics studies have shown that action verbs often denote some Change of State (CoS) as the result of an action. However, the causality of action verbs and its potential connection with the physical world has not been systematically explored. To address this limitation, this paper presents a study on physical causality of action verbs and their implied changes in the physical world. We \ufb01rst conducted a crowd-sourcing experiment and identi\ufb01ed eighteen categories of physical causality for action verbs. For a subset of these categories, we then de\ufb01ned a set of detectors that detect the corresponding change from visual perception of the physical environment. We further incorporated physical causality modeling and state detection in grounded language understanding. Our empirical studies have demonstrated the effectiveness of causality modeling in grounding language to perception.", "keyphrases": ["action verb", "language understanding", "change", "perception", "physical causality"]} +{"id": "steingrimsson-etal-2017-malromur", "title": "M\u00e1lr\u00f3mur: A Manually Verified Corpus of Recorded Icelandic Speech", "abstract": "This paper describes the M\u00e1lr\u00f3mur corpus, an open, manually verified, Icelandic speech corpus. The recordings were collected in 2011\u20132012 by Reykjavik University and the Icelandic Center for Language Technology in cooperation with Google. 152 hours of speech were recorded from 563 participants. The recordings were subsequently manually inspected by evaluators listening to all the segments, determining whether any given segment contains the utterance the participant was supposed to read, and nothing else. Out of 127,286 recorded segments 108,568 were approved and 18,718 deemed unsatisfactory.", "keyphrases": ["speech corpus", "hour", "segment"]} +{"id": "chakrabarty-etal-2019-imho", "title": "IMHO Fine-Tuning Improves Claim Detection", "abstract": "Claims are the central component of an argument. Detecting claims across different domains or data sets can often be challenging due to their varying conceptualization. We propose to alleviate this problem by fine-tuning a language model using a Reddit corpus of 5.5 million opinionated claims. These claims are self-labeled by their authors using the internet acronyms IMO/IMHO (in my (humble) opinion). Empirical results show that using this approach improves the state of art performance across four benchmark argumentation data sets by an average of 4 absolute F1 points in claim detection. As these data sets include diverse domains such as social media and student essays this improvement demonstrates the robustness of fine-tuning on this novel corpus.", "keyphrases": ["claim detection", "language model", "reddit corpus", "opinion", "imho"]} +{"id": "callison-burch-etal-2010-findings", "title": "Findings of the 2010 Joint Workshop on Statistical Machine Translation and Metrics for Machine Translation", "abstract": "This paper presents the results of the WMT10 and MetricsMATR10 shared tasks, which included a translation task, a system combination task, and an evaluation task. We conducted a large-scale manual evaluation of 104 machine translation systems and 41 system combination entries. We used the ranking of these systems to measure how strongly automatic metrics correlate with human judgments of translation quality for 26 metrics. This year we also investigated increasing the number of human judgments by hiring non-expert annotators through Amazon's Mechanical Turk.", "keyphrases": ["statistical machine translation", "wmt10", "evaluation task"]} +{"id": "badene-etal-2019-weak", "title": "Weak Supervision for Learning Discourse Structure", "abstract": "This paper provides a detailed comparison of a data programming approach with (i) off-the-shelf, state-of-the-art deep learning architectures that optimize their representations (BERT) and (ii) handcrafted-feature approaches previously used in the discourse analysis literature. We compare these approaches on the task of learning discourse structure for multi-party dialogue. The data programming paradigm offered by the Snorkel framework allows a user to label training data using expert-composed heuristics, which are then transformed via the \u201cgenerative step\u201d into probability distributions of the class labels given the data. We show that on our task the generative model outperforms both deep learning architectures as well as more traditional ML approaches when learning discourse structure\u2014it even outperforms the combination of deep learning methods and hand-crafted features. We also implement several strategies for \u201cdecoding\u201d our generative model output in order to improve our results. We conclude that weak supervision methods hold great promise as a means for creating and improving data sets for discourse structure.", "keyphrases": ["discourse structure", "learning method", "weak supervision"]} +{"id": "lawrence-reed-2015-combining", "title": "Combining Argument Mining Techniques", "abstract": "In this paper, we look at three different methods of extracting the argumentative structure from a piece of natural language text. These methods cover linguistic features, changes in the topic being discussed and a supervised machine learning approach to identify the components of argumentation schemes, patterns of human reasoning which have been detailed extensively in philosophy and psychology. For each of these approaches we achieve results comparable to those previously reported, whilst at the same time achieving a more detailed argument structure. Finally, we use the results from these individual techniques to apply them in combination, further improving the argument structure identification.", "keyphrases": ["argument structure", "discourse indicator", "connection"]} +{"id": "erk-etal-2003-towards", "title": "Towards a Resource for Lexical Semantics: A Large German Corpus with Extensive Semantic Annotation", "abstract": "We describe the ongoing construction of a large, semantically annotated corpus resource as reliable basis for the large-scale acquisition of word-semantic information, e.g. the construction of domain-independent lexica. The backbone of the annotation are semantic roles in the frame semantics paradigm. We report experiences and evaluate the annotated data from the first project stage. On this basis, we discuss the problems of vagueness and ambiguity in semantic annotation.", "keyphrases": ["german corpus", "semantic role", "salsa project"]} +{"id": "fleischman-hovy-2004-multi", "title": "Multi-Document Person Name Resolution", "abstract": "Multi-document person name resolution focuses on the problem of determining if two instances with the same name and from different documents refer to the same individual. We present a two-step approach in which a Maximum Entropy model is trained to give the probability that two names refer to the same individual. We then apply a modified agglomerative clustering technique to partition the instances according to their referents.", "keyphrases": ["person name resolution", "maximum entropy model", "mention"]} +{"id": "song-etal-2021-improved-word", "title": "Improved Word Sense Disambiguation with Enhanced Sense Representations", "abstract": "Current state-of-the-art supervised word sense disambiguation (WSD) systems (such as GlossBERT and bi-encoder model) yield surprisingly good results by purely leveraging pre-trained language models and short dictionary definitions (or glosses) of the different word senses. While concise and intuitive, the sense gloss is just one of many ways to provide information about word senses. In this paper, we focus on enhancing the sense representations via incorporating synonyms, example phrases or sentences showing usage of word senses, and sense gloss of hypernyms. We show that incorporating such additional information boosts the performance on WSD. With the proposed enhancements, our system achieves an F1 score of 82.0% on the standard benchmark test dataset of the English all-words WSD task, surpassing all previous published scores on this benchmark dataset.", "keyphrases": ["word sense disambiguation", "wsd", "hypernyms"]} +{"id": "yu-etal-2017-refining", "title": "Refining Word Embeddings for Sentiment Analysis", "abstract": "Word embeddings that can capture semantic and syntactic information from contexts have been extensively used for various natural language processing tasks. However, existing methods for learning context-based word embeddings typically fail to capture sufficient sentiment information. This may result in words with similar vector representations having an opposite sentiment polarity (e.g., good and bad), thus degrading sentiment analysis performance. Therefore, this study proposes a word vector refinement model that can be applied to any pre-trained word vectors (e.g., Word2vec and GloVe). The refinement model is based on adjusting the vector representations of words such that they can be closer to both semantically and sentimentally similar words and further away from sentimentally dissimilar words. Experimental results show that the proposed method can improve conventional word embeddings and outperform previously proposed sentiment embeddings for both binary and fine-grained classification on Stanford Sentiment Treebank (SST).", "keyphrases": ["sentiment analysis", "syntactic information", "pre-trained word vector"]} +{"id": "moneglia-etal-2014-imagact", "title": "The IMAGACT Visual Ontology. An Extendable Multilingual Infrastructure for the representation of lexical encoding of Action", "abstract": "Action verbs have many meanings, covering actions in different ontological types. Moreover, each language categorizes action in its own way. One verb can refer to many different actions and one action can be identified by more than one verb. The range of variations within and across languages is largely unknown, causing trouble for natural language processing tasks. IMAGACT is a corpus-based ontology of action concepts, derived from English and Italian spontaneous speech corpora, which makes use of the universal language of images to identify the different action types extended by verbs referring to action in English, Italian, Chinese and Spanish. This paper presents the infrastructure and the various linguistic information the user can derive from it. IMAGACT makes explicit the variation of meaning of action verbs within one language and allows comparisons of verb variations within and across languages. Because the action concepts are represented with videos, extension into new languages beyond those presently implemented in IMAGACT is done using competence-based judgments by mother-tongue informants without intense lexicographic work involving underdetermined semantic description", "keyphrases": ["imagact visual ontology", "lexical encoding", "corpus-based ontology", "action concept"]} +{"id": "zaghouani-etal-2010-revised", "title": "The Revised Arabic PropBank", "abstract": "The revised Arabic PropBank (APB) reflects a number of changes to the data and the process of PropBanking. Several changes stem from Treebank revisions. An automatic process was put in place to map existing annotation to the new trees. We have revised the original 493 Frame Files from the Pilot APB and added 1462 new files for a total of 1955 Frame Files with 2446 framesets. In addition to a heightened attention to sense distinctions this cycle includes a greater attempt to address complicated predicates such as light verb constructions and multi-word expressions. New tools facilitate the data tagging and also simplify frame creation.", "keyphrases": ["arabic propbank", "apb", "semantic role labeling"]} +{"id": "waibel-etal-2003-speechalator", "title": "Speechalator: Two-Way Speech-to-Speech Translation in Your Hand", "abstract": "This demonstration involves two-way automatic speech-to-speech translation on a consumer off-the-shelf PDA. This work was done as part of the DARPA-funded Babylon project, investigating better speech-to-speech translation systems for communication in the field. The development of the Speechalator software-based translation system required addressing a number of hard issues, including a new language for the team (Egyptian Arabic), close integration on a small device, computational efficiency on a limited platform, and scalable coverage for the domain.", "keyphrases": ["translation system", "device", "speechalator"]} +{"id": "bender-etal-2013-towards", "title": "Towards Creating Precision Grammars from Interlinear Glossed Text: Inferring Large-Scale Typological Properties", "abstract": "We propose to bring together two kinds of linguistic resources\u2014interlinear glossed text (IGT) and a language-independent precision grammar resource\u2014to automatically create precision grammars in the context of language documentation. This paper takes the first steps in that direction by extracting major-constituent word order and case system properties from IGT for a diverse sample of languages.", "keyphrases": ["precision grammar", "large-scale typological property", "igt"]} +{"id": "perez-ortiz-etal-2014-black", "title": "Black-box integration of heterogeneous bilingual resources into an interactive translation system", "abstract": "The objective of interactive translation prediction (ITP) is to assist human translators in the translation of texts by making context-based computer-generated suggestions as they type. Most of the ITP systems in literature are strongly coupled with a statistical machine translation system that is conveniently adapted to provide the suggestions. In this paper, however, we propose a resource-agnostic approach in which the suggestions are obtained from any bilingual resource (a machine translation system, a translation memory, a bilingual dictionary, etc.) that provides targetlanguage equivalents for source-language segments. These bilingual resources are considered to be black boxes and do not need to be adapted to the peculiarities of the ITP system. Our evaluation shows that savings of up to 85% can be theoretically achieved in the number of keystrokes when using our novel approach. Preliminary user trials indicate that these benefits can be partly transferred to real-world computer-assisted translation interfaces.", "keyphrases": ["bilingual resource", "translator", "interactive translation prediction"]} +{"id": "bosco-lombardo-2004-dependency", "title": "Dependency and relational structure in treebank annotation", "abstract": "Among the variety of proposals currently making the dependency perspective on grammar more concrete, there are several treebanks whose annotation exploits some form of Relational Structure that we can consider a generalization of the fundamental idea of dependency at various degrees and with reference to different types of linguistic knowledge. The paper describes the Relational Structure as the common underlying representation of treebanks which is motivated by both theoretical and task-dependent considerations. Then it presents a system for the annotation of the Relational Structure in treebanks, called Augmented Relational Structure, which allows for a systematic annotation of various components of linguistic knowledge crucial in several tasks. Finally, it shows a dependency-based annotation for an Italian treebank, i.e. the Turin University Treebank, that implements the Augmented Relational Structure.", "keyphrases": ["relational structure", "non-projective dependency tree", "syntax"]} +{"id": "allauzen-etal-2003-generalized", "title": "Generalized Algorithms for Constructing Statistical Language Models", "abstract": "Recent text and speech processing applications such as speech mining raise new and more general problems related to the construction of language models. We present and describe in detail several new and efficient algorithms to address these more general problems and report experimental results demonstrating their usefulness. We give an algorithm for computing efficiently the expected counts of any sequence in a word lattice output by a speech recognizer or any arbitrary weighted automaton; describe a new technique for creating exact representations of n-gram language models by weighted automata whose size is practical for offline use even for a vocabulary size of about 500,000 words and an n-gram order n = 6; and present a simple and more general technique for constructing class-based language models that allows each class to represent an arbitrary weighted automaton. An efficient implementation of our algorithms and techniques has been incorporated in a general software library for language modeling, the GRM Library, that includes many other text and grammar processing functionalities.", "keyphrases": ["n-gram language model", "transducer", "wfst"]} +{"id": "durrett-etal-2013-decentralized", "title": "Decentralized Entity-Level Modeling for Coreference Resolution", "abstract": "Efficiently incorporating entity-level information is a challenge for coreference resolution systems due to the difficulty of exact inference over partitions. We describe an end-to-end discriminative probabilistic model for coreference that, along with standard pairwise features, enforces structural agreement constraints between specified properties of coreferent mentions. This model can be represented as a factor graph for each document that admits efficient inference via belief propagation. We show that our method can use entity-level information to outperform a basic pairwise system.", "keyphrases": ["coreference resolution", "probabilistic model", "factor graph"]} +{"id": "kummerfeld-klein-2013-error", "title": "Error-Driven Analysis of Challenges in Coreference Resolution", "abstract": "Coreference resolution metrics quantify errors but do not analyze them. Here, we consider an automated method of categorizing errors in the output of a coreference system into intuitive underlying error types. Using this tool, we first compare the error distributions across a large set of systems, then analyze common errors across the top ten systems, empirically characterizing the major unsolved challenges of the coreference resolution task.", "keyphrases": ["coreference system", "state-of-the-art system", "reference"]} +{"id": "van-cranenburgh-koolen-2015-identifying", "title": "Identifying Literary Texts with Bigrams", "abstract": "We study perceptions of literariness in a set of contemporary Dutch novels. Experiments with machine learning models show that it is possible to automatically distinguish novels that are seen as highly literary from those that are seen as less literary, using surprisingly simple textual features. The most discriminating features of our classification model indicate that genre might be a confounding factor, but a regression model shows that we can also explain variation between highly literary novels from less literary ones within genre.", "keyphrases": ["novel", "textual feature", "literary one"]} +{"id": "liu-etal-2019-robust", "title": "Robust Neural Machine Translation with Joint Textual and Phonetic Embedding", "abstract": "Neural machine translation (NMT) is notoriously sensitive to noises, but noises are almost inevitable in practice. One special kind of noise is the homophone noise, where words are replaced by other words with similar pronunciations. We propose to improve the robustness of NMT to homophone noises by 1) jointly embedding both textual and phonetic information of source sentences, and 2) augmenting the training dataset with homophone noises. Interestingly, to achieve better translation quality and more robustness, we found that most (though not all) weights should be put on the phonetic rather than textual information. Experiments show that our method not only significantly improves the robustness of NMT to homophone noises, but also surprisingly improves the translation quality on some clean test sets.", "keyphrases": ["neural machine translation", "phonetic information", "training corpus"]} +{"id": "bommasani-cardie-2020-intrinsic", "title": "Intrinsic Evaluation of Summarization Datasets", "abstract": "High quality data forms the bedrock for building meaningful statistical models in NLP. Consequently, data quality must be evaluated either during dataset construction or *post hoc*. Almost all popular summarization datasets are drawn from natural sources and do not come with inherent quality assurance guarantees. In spite of this, data quality has gone largely unquestioned for many of these recent datasets. We perform the first large-scale evaluation of summarization datasets by introducing 5 intrinsic metrics and applying them to 10 popular datasets. We find that data usage in recent summarization research is sometimes inconsistent with the underlying properties of the data. Further, we discover that our metrics can serve the additional purpose of being inexpensive heuristics for detecting generically low quality examples.", "keyphrases": ["summarization dataset", "purpose", "intrinsic evaluation"]} +{"id": "mckeown-etal-2010-time", "title": "Time-Efficient Creation of an Accurate Sentence Fusion Corpus", "abstract": "Sentence fusion enables summarization and question-answering systems to produce output by combining fully formed phrases from different sentences. Yet there is little data that can be used to develop and evaluate fusion techniques. In this paper, we present a methodology for collecting fusions of similar sentence pairs using Amazon's Mechanical Turk, selecting the input pairs in a semi-automated fashion. We evaluate the results using a novel technique for automatically selecting a representative sentence from multiple responses. Our approach allows for rapid construction of a high accuracy fusion corpus.", "keyphrases": ["fusion", "sentence pair", "amazon"]} +{"id": "chen-etal-2018-variational-knowledge", "title": "Variational Knowledge Graph Reasoning", "abstract": "Inferring missing links in knowledge graphs (KG) has attracted a lot of attention from the research community. In this paper, we tackle a practical query answering task involving predicting the relation of a given entity pair. We frame this prediction problem as an inference problem in a probabilistic graphical model and aim at resolving it from a variational inference perspective. In order to model the relation between the query entity pair, we assume that there exists an underlying latent variable (paths connecting two nodes) in the KG, which carries the equivalent semantics of their relations. However, due to the intractability of connections in large KGs, we propose to use variation inference to maximize the evidence lower bound. More specifically, our framework (Diva) is composed of three modules, i.e. a posterior approximator, a prior (path finder), and a likelihood (path reasoner). By using variational inference, we are able to incorporate them closely into a unified architecture and jointly optimize them to perform KG reasoning. With active interactions among these sub-modules, Diva is better at handling noise and coping with more complex reasoning scenarios. In order to evaluate our method, we conduct the experiment of the link prediction task on multiple datasets and achieve state-of-the-art performances on both datasets.", "keyphrases": ["knowledge graph", "latent variable", "path", "diva", "reinforcement learning"]} +{"id": "venhuizen-etal-2013-gamification", "title": "Gamification for Word Sense Labeling", "abstract": "Obtaining gold standard data for word sense disambiguation is important but costly. We show how it can be done using a \u201cGame with a Purpose\u201d (GWAP) called Wordrobe. This game consists of a large set of multiple-choice questions on word senses generated from the Groningen Meaning Bank. The players need to answer these questions, scoring points depending on the agreement with fellow players. The working assumption is that the right sense for a word can be determined by the answers given by the players. To evaluate our method, we gold-standard tagged a portion of the data that was also used in the GWAP. A comparison yielded promising results, ranging from a precision of 0.88 and recall of 0.83 for relative majority agreement, to a precision of 0.98 and recall of 0.35 for questions that were answered unanimously.", "keyphrases": ["word sense disambiguation", "multiple-choice question", "groningen meaning bank"]} +{"id": "globerson-etal-2016-collective", "title": "Collective Entity Resolution with Multi-Focal Attention", "abstract": "Entity resolution is the task of linking each mention of an entity in text to the corresponding record in a knowledge base (KB). Coherence models for entity resolution encourage all referring expressions in a document to resolve to entities that are related in the KB. We explore attention-like mechanisms for coherence, where the evidence for each candidate is based on a small set of strong relations, rather than relations to all other entities in the document. The rationale is that document-wide support may simply not exist for non-salient entities, or entities not densely connected in the KB. Our proposed sys-tem outperforms state-of-the-art systems on the CoNLL 2003, TAC KBP 2010, 2011 and 2012 tasks.", "keyphrases": ["coherence", "other entity", "state-of-the-art system"]} +{"id": "bergsma-cherry-2010-fast", "title": "Fast and Accurate Arc Filtering for Dependency Parsing", "abstract": "We propose a series of learned arc filters to speed up graph-based dependency parsing. A cascade of filters identify implausible head-modifier pairs, with time complexity that is first linear, and then quadratic in the length of the sentence. The linear filters reliably predict, in context, words that are roots or leaves of dependency trees, and words that are likely to have heads on their left or right. We use this information to quickly prune arcs from the dependency graph. More than 78% of total arcs are pruned while retaining 99.5% of the true dependencies. These filters improve the speed of two state-of-the-art dependency parsers, with low overhead and negligible loss in accuracy.", "keyphrases": ["arc", "filter", "dependency parsing"]} +{"id": "singh-etal-2016-quantifying", "title": "Quantifying sentence complexity based on eye-tracking measures", "abstract": "Eye-tracking reading times have been attested to reflect cognitive processes underlying sentence comprehension. However, the use of reading times in NLP applications is an underexplored area of research. In this initial work we build an automatic system to assess sentence complexity using automatically predicted eye-tracking reading time measures and demonstrate the efficacy of these reading times for a well known NLP task, namely, readability assessment. We use a machine learning model and a set of features known to be significant predictors of reading times in order to learn per-word reading times from a corpus of English text having reading times of human readers. Subsequently, we use the model to predict reading times for novel text in the context of the aforementioned task. A model based only on reading times gave competitive results compared to the systems that use extensive syntactic features to compute linguistic complexity. Our work, to the best of our knowledge, is the first study to show that automatically predicted reading times can successfully model the difficulty of a text and can be deployed in practical text processing applications.", "keyphrases": ["sentence complexity", "token-level", "gaze behaviour"]} +{"id": "xu-etal-2016-cached", "title": "Cached Long Short-Term Memory Neural Networks for Document-Level Sentiment Classification", "abstract": "Recently, neural networks have achieved great success on sentiment classification due to their ability to alleviate feature engineering. However, one of the remaining challenges is to model long texts in document-level sentiment classification under a recurrent architecture because of the deficiency of the memory unit. To address this problem, we present a Cached Long Short-Term Memory neural networks (CLSTM) to capture the overall semantic information in long texts. CLSTM introduces a cache mechanism, which divides memory into several groups with different forgetting rates and thus enables the network to keep sentiment information better within a recurrent unit. The proposed CLSTM outperforms the state-of-the-art models on three publicly available document-level sentiment analysis datasets.", "keyphrases": ["short-term memory", "document-level sentiment classification", "semantic information"]} +{"id": "saxena-etal-2020-keygames", "title": "KeyGames: A Game Theoretic Approach to Automatic Keyphrase Extraction", "abstract": "In this paper, we introduce two advancements in the automatic keyphrase extraction (AKE) space - KeyGames and pke+. KeyGames is an unsupervised AKE framework that employs the concept of evolutionary game theory and consistent labelling problem to ensure consistent classification of candidates into keyphrase and non-keyphrase. Pke+ is a python based pipeline built on top of the existing pke library to standardize various AKE steps, namely candidate extraction and evaluation, to ensure truly systematic and comparable performance analysis of AKE models. In the experiments section, we compare the performance of KeyGames across three publicly available datasets (Inspec 2001, SemEval 2010, DUC 2001) against the results quoted by the existing state-of-the-art models as well as their performance when reproduced using pke+. The results show that KeyGames outperforms most of the state-of-the-art systems while generalizing better on input documents with different domains and length. Further, pke+'s pre-processing brings out improvement in several other system's quoted performance as well.", "keyphrases": ["automatic keyphrase extraction", "game theory", "candidate"]} +{"id": "ravichandran-etal-2003-statistical", "title": "Statistical QA - Classifier vs. Re-ranker: What's the difference?", "abstract": "In this paper, we show that we can obtain a good baseline performance for Question Answering (QA) by using only 4 simple features. Using these features, we contrast two approaches used for a Maximum Entropy based QA system. We view the QA problem as a classification problem and as a re-ranking problem. Our results indicate that the QA system viewed as a re-ranker clearly outperforms the QA system used as a classifier. Both systems are trained using the same data.", "keyphrases": ["re-ranker", "question answering", "classification problem"]} +{"id": "downey-etal-2007-sparse", "title": "Sparse Information Extraction: Unsupervised Language Models to the Rescue", "abstract": "Even in a massive corpus such as the Web, a substantial fraction of extractions appear infrequently. This paper shows how to assess the correctness of sparse extractions by utilizing unsupervised language models. The REALM system, which combines HMMbased and n-gram-based language models, ranks candidate extractions by the likelihood that they are correct. Our experiments show that REALM reduces extraction error by 39%, on average, when compared with previous work. Because REALM pre-computes language models based on its corpus and does not require any hand-tagged seeds, it is far more scalable than approaches that learn models for each individual relation from handtagged data. Thus, REALM is ideally suited for open information extraction where the relations of interest are not specified in advance and their number is potentially vast.", "keyphrases": ["sparse information extraction", "candidate relation", "hmm"]} +{"id": "lampert-etal-2006-classifying", "title": "Classifying Speech Acts using Verbal Response Modes", "abstract": "The driving vision for our work is to provide intelligent, automated assistance to users in understanding the status of their email conversations. Our approach is to create tools that enable the detection and connection of speech acts across email messages. We thus require a mechanism for tagging email utterances with some indication of their dialogic function. However, existing dialog act taxonomies as used in computational linguistics tend to be too taskor application-specific for the wide range of acts we find represented in email conversation. The Verbal Response Modes (VRM) taxonomy of speech acts, widely applied for discourse analysis in linguistics and psychology, is distinguished from other speech act taxonomies by its construction from crosscutting principles of classification, which ensure universal applicability across any domain of discourse. The taxonomy categorises on two dimensions, characterised as literal meaning and pragmatic meaning. In this paper, we describe a statistical classifier that automatically identifies the literal meaning category of utterances using the VRM classification. We achieve an accuracy of 60.8% using linguistic features derived from VRM\u2019s human annotation guidelines. Accuracy is improved to 79.8% using additional features.", "keyphrases": ["speech act", "response mode", "email message"]} +{"id": "zhang-gildea-2005-stochastic", "title": "Stochastic Lexicalized Inversion Transduction Grammar for Alignment", "abstract": "We present a version of Inversion Transduction Grammar where rule probabilities are lexicalized throughout the synchronous parse tree, along with pruning techniques for efficient training. Alignment results improve over unlexicalized ITG on short sentences for which full EM is feasible, but pruning seems to have a negative impact on longer sentences.", "keyphrases": ["efficient training", "itg", "word alignment", "tic-tac-toe pruning"]} +{"id": "brooks-youssef-2020-metaphor", "title": "Metaphor Detection using Ensembles of Bidirectional Recurrent Neural Networks", "abstract": "In this paper we present our results from the Second Shared Task on Metaphor Detection, hosted by the Second Workshop on Figurative Language Processing. We use an ensemble of RNN models with bidirectional LSTMs and bidirectional attention mechanisms. Some of the models were trained on all parts of speech. Each of the other models was trained on one of four categories for parts of speech: \u201cnouns\u201d, \u201cverbs\u201d, \u201cadverbs/adjectives\u201d, or \u201cother\u201d. The models were combined into voting pools and the voting pools were combined using the logical \u201cOR\u201d operator.", "keyphrases": ["bidirectional attention mechanism", "metaphor detection", "bi-lstms"]} +{"id": "fang-etal-2020-video2commonsense", "title": "Video2Commonsense: Generating Commonsense Descriptions to Enrich Video Captioning", "abstract": "Captioning is a crucial and challenging task for video understanding. In videos that involve active agents such as humans, the agent's actions can bring about myriad changes in the scene. Observable changes such as movements, manipulations, and transformations of the objects in the scene, are reflected in conventional video captioning. Unlike images, actions in videos are also inherently linked to social aspects such as intentions (why the action is taking place), effects (what changes due to the action), and attributes that describe the agent. Thus for video understanding, such as when captioning videos or when answering questions about videos, one must have an understanding of these commonsense aspects. We present the first work on generating commonsense captions directly from videos, to describe latent aspects such as intentions, effects, and attributes. We present a new dataset \u201cVideo-to-Commonsense (V2C)\u201d that contains ~9k videos of human agents performing various actions, annotated with 3 types of commonsense descriptions. Additionally we explore the use of open-ended video-based commonsense question answering (V2C-QA) as a way to enrich our captions. Both the generation task and the QA task can be used to enrich video captions.", "keyphrases": ["commonsense description", "video caption", "video2commonsense"]} +{"id": "qu-etal-2010-bag", "title": "The Bag-of-Opinions Method for Review Rating Prediction from Sparse Text Patterns", "abstract": "The problem addressed in this paper is to predict a user's numeric rating in a product review from the text of the review. Unigram and n-gram representations of text are common choices in opinion mining. However, unigrams cannot capture important expressions like \"could have been better\", which are essential for prediction models of ratings. N-grams of words, on the other hand, capture such phrases, but typically occur too sparsely in the training set and thus fail to yield robust predictors. This paper overcomes the limitations of these two models, by introducing a novel kind of bag-of-opinions representation, where an opinion, within a review, consists of three components: a root word, a set of modifier words from the same sentence, and one or more negation words. Each opinion is assigned a numeric score which is learned, by ridge regression, from a large, domain-independent corpus of reviews. For the actual test case of a domain-dependent review, the review's rating is predicted by aggregating the scores of all opinions in the review and combining it with a domain-dependent unigram model. The paper presents a constrained ridge regression algorithm for learning opinion scores. Experiments show that the bag-of-opinions method outperforms prior state-of-the-art techniques for review rating prediction.", "keyphrases": ["review rating prediction", "bag-of-opinion representation", "root word"]} +{"id": "liu-etal-2021-mulda", "title": "MulDA: A Multilingual Data Augmentation Framework for Low-Resource Cross-Lingual NER", "abstract": "Named Entity Recognition (NER) for low-resource languages is a both practical and challenging research problem. This paper addresses zero-shot transfer for cross-lingual NER, especially when the amount of source-language training data is also limited. The paper first proposes a simple but effective labeled sequence translation method to translate source-language training data to target languages and avoids problems such as word order change and entity span determination. With the source-language data as well as the translated data, a generation-based multilingual data augmentation method is introduced to further increase diversity by generating synthetic labeled data in multiple languages. These augmented data enable the language model based NER models to generalize better with both the language-specific features from the target-language synthetic data and the language-independent features from multilingual synthetic data. An extensive set of experiments were conducted to demonstrate encouraging cross-lingual transfer performance of the new research on a wide variety of target languages.", "keyphrases": ["cross-lingual ner", "low-resource language", "data augmentation method"]} +{"id": "goutte-etal-2004-aligning", "title": "Aligning words using matrix factorisation", "abstract": "Aligning words from sentences which are mutual translations is an important problem in different settings, such as bilingual terminology extraction, Machine Translation, or projection of linguistic features. Here, we view word alignment as matrix factorisation. In order to produce proper alignments, we show that factors must satisfy a number of constraints such as orthogonality. We then propose an algorithm for orthogonal non-negative matrix factorisation, based on a probabilistic model of the alignment data, and apply it to word alignment. This is illustrated on a French-English alignment task from the Hansard.", "keyphrases": ["matrix factorisation", "machine translation", "aer"]} +{"id": "bansal-klein-2011-web", "title": "Web-Scale Features for Full-Scale Parsing", "abstract": "Counts from large corpora (like the web) can be powerful syntactic cues. Past work has used web counts to help resolve isolated ambiguities, such as binary noun-verb PP attachments and noun compound bracketings. In this work, we first present a method for generating web count features that address the full range of syntactic attachments. These features encode both surface evidence of lexical affinities as well as paraphrase-based cues to syntactic structure. We then integrate our features into full-scale dependency and constituent parsers. We show relative error reductions of 7.0% over the second-order dependency parser of McDonald and Pereira (2006), 9.2% over the constituent parser of Petrov et al. (2006), and 3.4% over a non-local constituent reranker.", "keyphrases": ["web-scale feature", "conjunction", "unlabeled data"]} +{"id": "onoe-durrett-2020-interpretable", "title": "Interpretable Entity Representations through Large-Scale Typing", "abstract": "In standard methodology for natural language processing, entities in text are typically embedded in dense vector spaces with pre-trained models. The embeddings produced this way are effective when fed into downstream models, but they require end-task fine-tuning and are fundamentally difficult to interpret. In this paper, we present an approach to creating entity representations that are human readable and achieve high performance on entity-related tasks out of the box. Our representations are vectors whose values correspond to posterior probabilities over fine-grained entity types, indicating the confidence of a typing model's decision that the entity belongs to the corresponding type. We obtain these representations using a fine-grained entity typing model, trained either on supervised ultra-fine entity typing data (Choi et al. 2018) or distantly-supervised examples from Wikipedia. On entity probing tasks involving recognizing entity identity, our embeddings used in parameter-free downstream models achieve competitive performance with ELMo- and BERT-based embeddings in trained models. We also show that it is possible to reduce the size of our type set in a learning-based way for particular domains. Finally, we show that these embeddings can be post-hoc modified through a small number of rules to incorporate domain knowledge and improve performance.", "keyphrases": ["entity representation", "probability", "ultra-fine entity"]} +{"id": "peng-etal-2015-dual", "title": "Dual Decomposition Inference for Graphical Models over Strings", "abstract": "We investigate dual decomposition for joint MAP inference of many strings. Given an arbitrary graphical model, we decompose it into small acyclic sub-models, whose MAP configurations can be found by finite-state composition and dynamic programming. We force the solutions of these subproblems to agree on overlapping variables, by tuning Lagrange multipliers for an adaptively expanding set of variable-lengthn-gram count features. This is the first inference method for arbitrary graphical models over strings that does not require approximations such as random sampling, message simplification, or a bound on string length. Provided that the inference method terminates, it gives a certificate of global optimality (though MAP inference in our setting is undecidable in general). On our global phonological inference problems, it always terminates, and achieves more accurate results than max-product and sum-product loopy belief propagation.", "keyphrases": ["string", "dual decomposition technique", "instance-level constraint"]} +{"id": "zhou-etal-2020-improving-grammatical", "title": "Improving Grammatical Error Correction with Machine Translation Pairs", "abstract": "We propose a novel data synthesis method to generate diverse error-corrected sentence pairs for improving grammatical error correction, which is based on a pair of machine translation models (e.g., Chinese to English) of different qualities (i.e., poor and good). The poor translation model can resemble the ESL (English as a second language) learner and tends to generate translations of low quality in terms of fluency and grammaticality, while the good translation model generally generates fluent and grammatically correct translations. With the pair of translation models, we can generate unlimited numbers of poor to good English sentence pairs from text in the source language (e.g., Chinese) of the translators. Our approach can generate various error-corrected patterns and nicely complement the other data synthesis approaches for GEC. Experimental results demonstrate the data generated by our approach can effectively help a GEC model to improve the performance and achieve the state-of-the-art single-model performance in BEA-19 and CoNLL-14 benchmark datasets.", "keyphrases": ["grammatical error correction", "sentence pair", "different quality"]} +{"id": "narayanan-harabagiu-2004-question", "title": "Question Answering Based on Semantic Structures", "abstract": "The ability to answer complex questions posed in Natural Language depends on (1) the depth of the available semantic representations and (2) the inferential mechanisms they support. In this paper we describe a QA architecture where questions are analyzed and candidate answers generated by 1) identifying predicate argument structures and semantic frames from the input and 2) performing structured probabilistic inference using the extracted relations in the context of a domain and scenario model. A novel aspect of our system is a scalable and expressive representation of actions and events based on Coordinated Probabilistic Relational Models (CPRM). In this paper we report on the ability of the implemented system to perform several forms of probabilistic and temporal inferences to extract answers to complex questions. The results indicate enhanced accuracy over current state-of-the-art Q/A systems.", "keyphrases": ["action", "question answering", "information extraction"]} +{"id": "schwartz-etal-2017-story", "title": "Story Cloze Task: UW NLP System", "abstract": "This paper describes University of Washington NLP's submission for the Linking Models of Lexical, Sentential and Discourse-level Semantics (LSDSem 2017) shared task\u2014the Story Cloze Task. Our system is a linear classifier with a variety of features, including both the scores of a neural language model and style features. We report 75.2% accuracy on the task. A further discussion of our results can be found in Schwartz et al. (2017).", "keyphrases": ["language model", "story cloze task", "stylistic feature"]} +{"id": "baldwin-2007-scalable", "title": "Scalable Deep Linguistic Processing: Mind the Lexical Gap", "abstract": "Coverage has been a constant thorn in the side of deployed deep linguistic processing applications, largely because of the difficulty in constructing, maintaining and domaintuning the complex lexicons that they rely on. This paper reviews various strands of research on deep lexical acquisition (DLA), i.e. the (semi-)automatic creation of linguistically-rich language resources, particularly from the viewpoint of DLA for precision grammars.", "keyphrases": ["deep lexical acquisition", "dla", "precision grammar"]} +{"id": "white-rajkumar-2012-minimal", "title": "Minimal Dependency Length in Realization Ranking", "abstract": "Comprehension and corpus studies have found that the tendency to minimize dependency length has a strong influence on constituent ordering choices. In this paper, we investigate dependency length minimization in the context of discriminative realization ranking, focusing on its potential to eliminate egregious ordering errors as well as better match the distributional characteristics of sentence orderings in news text. We find that with a state-of-the-art, comprehensive realization ranking model, dependency length minimization yields statistically significant improvements in BLEU scores and significantly reduces the number of heavy/light ordering errors. Through distributional analyses, we also show that with simpler ranking models, dependency length minimization can go overboard, too often sacrificing canonical word order to shorten dependencies, while richer models manage to better counterbalance the dependency length minimization preference against (sometimes) competing canonical word order preferences.", "keyphrases": ["dependency length", "realization", "ranking model"]} +{"id": "iter-etal-2020-pretraining", "title": "Pretraining with Contrastive Sentence Objectives Improves Discourse Performance of Language Models", "abstract": "Recent models for unsupervised representation learning of text have employed a number of techniques to improve contextual word representations but have put little focus on discourse-level representations. We propose Conpono, an inter-sentence objective for pretraining language models that models discourse coherence and the distance between sentences. Given an anchor sentence, our model is trained to predict the text k sentences away using a sampled-softmax objective where the candidates consist of neighboring sentences and sentences randomly sampled from the corpus. On the discourse representation benchmark DiscoEval, our model improves over the previous state-of-the-art by up to 13% and on average 4% absolute across 7 tasks. Our model is the same size as BERT-Base, but outperforms the much larger BERT-Large model and other more recent approaches that incorporate discourse. We also show that Conpono yields gains of 2%-6% absolute even for tasks that do not explicitly evaluate discourse: textual entailment (RTE), common sense reasoning (COPA) and reading comprehension (ReCoRD).", "keyphrases": ["discourse-level representation", "inter-sentence objective", "contrastive learning"]} +{"id": "tsai-etal-2021-style", "title": "Style Control for Schema-Guided Natural Language Generation", "abstract": "Natural Language Generation (NLG) for task-oriented dialogue systems focuses on communicating specific content accurately, fluently, and coherently. While these attributes are crucial for a successful dialogue, it is also desirable to simultaneously accomplish specific stylistic goals, such as response length, point-of-view, descriptiveness, sentiment, formality, and empathy. In this work, we focus on stylistic control and evaluation for schema-guided NLG, with joint goals of achieving both semantic and stylistic control. We experiment in detail with various controlled generation methods for large pretrained language models: specifically, conditional training, guided fine-tuning, and guided decoding. We discuss their advantages and limitations, and evaluate them with a broad range of automatic and human evaluation metrics. Our results show that while high style accuracy and semantic correctness are easier to achieve for more lexically-defined styles with conditional training, stylistic control is also achievable for more semantically complex styles using discriminator-based guided decoding methods. The results also suggest that methods that are more scalable (with less hyper-parameters tuning) and that disentangle context generation and stylistic variations are more effective at achieving semantic correctness and style accuracy.", "keyphrases": ["natural language generation", "nlg", "style accuracy"]} +{"id": "kuhn-etal-2010-phrase", "title": "Phrase Clustering for Smoothing TM Probabilities - or, How to Extract Paraphrases from Phrase Tables", "abstract": "This paper describes how to cluster together the phrases of a phrase-based statistical machine translation (SMT) system, using information in the phrase table itself. The clustering is symmetric and recursive: it is applied both to source-language and target-language phrases, and the clustering in one language helps determine the clustering in the other. The phrase clusters have many possible uses. This paper looks at one of these uses: smoothing the conditional translation model (TM) probabilities employed by the SMT system. We incorporated phrase-cluster-derived probability estimates into a baseline loglinear feature combination that included relative frequency and lexically-weighted conditional probability estimates. In Chinese-English (C-E) and French-English (F-E) learning curve experiments, we obtained a gain over the baseline in 29 of 30 tests, with a maximum gain of 0.55 BLEU points (though most gains were fairly small). The largest gains came with medium (200--400K sentence pairs) rather than with small (less than 100K sentence pairs) amounts of training data, contrary to what one would expect from the paraphrasing literature. We have only begun to explore the original smoothing approach described here.", "keyphrases": ["clustering", "paraphrase", "translation model"]} +{"id": "lapata-lascarides-2004-inferring", "title": "Inferring Sentence-internal Temporal Relations", "abstract": "In this paper we propose a data intensive approach for inferring sentence-internal temporal relations, which relies on a simple probabilistic model and assumes no manual coding. We explore various combinations of features, and evaluate performance against a goldstandard corpus and human subjects performing the same task. The best model achieves 70.7% accuracy in inferring the temporal relation between two clauses and 97.4% accuracy in ordering them, assuming that the temporal relation is known.", "keyphrases": ["temporal relation", "clause", "rhetorical relation"]} +{"id": "kazama-torisawa-2005-speeding", "title": "Speeding up Training with Tree Kernels for Node Relation Labeling", "abstract": "We present a method for speeding up the calculation of tree kernels during training. The calculation of tree kernels is still heavy even with efficient dynamic programming (DP) procedures. Our method maps trees into a small feature space where the inner product, which can be calculated much faster, yields the same value as the tree kernel for most tree pairs. The training is sped up by using the DP procedure only for the exceptional pairs. We describe an algorithm that detects such exceptional pairs and converts trees into vectors in a feature space. We propose tree kernels on marked labeled ordered trees and show that the training of SVMs for semantic role labeling using these kernels can be sped up by a factor of several tens.", "keyphrases": ["tree kernel", "node relation labeling", "calculation"]} +{"id": "lin-och-2004-orange", "title": "ORANGE: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation", "abstract": "Comparisons of automatic evaluation metrics for machine translation are usually conducted on corpus level using correlation statistics such as Pearson's product moment correlation coefficient or Spearman's rank order correlation coefficient between human scores and automatic scores. However, such comparisons rely on human judgments of translation qualities such as adequacy and fluency. Unfortunately, these judgments are often inconsistent and very expensive to acquire. In this paper, we introduce a new evaluation method, Orange, for evaluating automatic machine translation evaluation metrics automatically without extra human involvement other than using a set of reference translations. We also show the results of comparing several existing automatic metrics and three new automatic metrics using Orange.", "keyphrases": ["evaluation metric", "machine translation", "human judgment"]} +{"id": "ma-etal-2020-multi", "title": "Multi-resolution Annotations for Emoji Prediction", "abstract": "Emojis are able to express various linguistic components, including emotions, sentiments, events, etc. Predicting the proper emojis associated with text provides a way to summarize the text accurately, and it has been proven to be a good auxiliary task to many Natural Language Understanding (NLU) tasks. Labels in existing emoji prediction datasets are all passage-based and are usually under the multi-class classification setting. However, in many cases, one single emoji cannot fully cover the theme of a piece of text. It is thus useful to infer the part of text related to each emoji. The lack of multi-label and aspect-level emoji prediction datasets is one of the bottlenecks for this task. This paper annotates an emoji prediction dataset with passage-level multi-class/multi-label, and aspect-level multi-class annotations. We also present a novel annotation method with which we generate the aspect-level annotations. The annotations are generated heuristically, taking advantage of the self-attention mechanism in Transformer networks. We validate the annotations both automatically and manually to ensure their quality. We also benchmark the dataset with a pre-trained BERT model.", "keyphrases": ["emoji prediction", "passage-level multi-class", "aspect-level multi-class annotation"]} +{"id": "pitler-2014-crossing", "title": "A Crossing-Sensitive Third-Order Factorization for Dependency Parsing", "abstract": "Parsers that parametrize over wider scopes are generally more accurate than edge-factored models. For graph-based non-projective parsers, wider factorizations have so far implied large increases in the computational complexity of the parsing problem. This paper introduces a \u201ccrossing-sensitive\u201d generalization of a third-order factorization that trades off complexity in the model structure (i.e., scoring with features over multiple edges) with complexity in the output structure (i.e., producing crossing edges). Under this model, the optimal 1-Endpoint-Crossing tree can be found in O(n4) time, matching the asymptotic run-time of both the third-order projective parser and the edge-factored 1-Endpoint-Crossing parser. The crossing-sensitive third-order parser is significantly more accurate than the third-order projective parser under many experimental settings and significantly less accurate on none.", "keyphrases": ["third-order factorization", "graph-based non-projective parser", "1-endpoint-crossing parser"]} +{"id": "wang-etal-2019-youmakeup", "title": "YouMakeup: A Large-Scale Domain-Specific Multimodal Dataset for Fine-Grained Semantic Comprehension", "abstract": "Multimodal semantic comprehension has attracted increasing research interests recently such as visual question answering and caption generation. However, due to the data limitation, fine-grained semantic comprehension has not been well investigated, which requires to capture semantic details of multimodal contents. In this work, we introduce \u201cYouMakeup\u201d, a large-scale multimodal instructional video dataset to support fine-grained semantic comprehension research in specific domain. YouMakeup contains 2,800 videos from YouTube, spanning more than 420 hours in total. Each video is annotated with a sequence of natural language descriptions for instructional steps, grounded in temporal video range and spatial facial areas. The annotated steps in a video involve subtle difference in actions, products and regions, which requires fine-grained understanding and reasoning both temporally and spatially. In order to evaluate models' ability for fined-grained comprehension, we further propose two groups of tasks including generation tasks and visual question answering from different aspects. We also establish a baseline of step caption generation for future comparison. The dataset will be publicly available at to support research investigation in fine-grained semantic comprehension.", "keyphrases": ["fine-grained semantic comprehension", "natural language description", "instructional step"]} +{"id": "gehrmann-etal-2019-generating", "title": "Generating Abstractive Summaries with Finetuned Language Models", "abstract": "Neural abstractive document summarization is commonly approached by models that exhibit a mostly extractive behavior. This behavior is facilitated by a copy-attention which allows models to copy words from a source document. While models in the mostly extractive news summarization domain benefit from this inductive bias, they commonly fail to paraphrase or compress information from the source document. Recent advances in transfer-learning from large pretrained language models give rise to alternative approaches that do not rely on copy-attention and instead learn to generate concise and abstractive summaries. In this paper, as part of the TL;DR challenge, we compare the abstractiveness of summaries from different summarization approaches and show that transfer-learning can be efficiently utilized without any changes to the model architecture. We demonstrate that the approach leads to a higher level of abstraction for a similar performance on the TL;DR challenge tasks, enabling true natural language compression.", "keyphrases": ["language model", "advance", "abstractive summarization"]} +{"id": "li-etal-2018-guiding", "title": "Guiding Generation for Abstractive Text Summarization Based on Key Information Guide Network", "abstract": "Neural network models, based on the attentional encoder-decoder model, have good capability in abstractive text summarization. However, these models are hard to be controlled in the process of generation, which leads to a lack of key information. We propose a guiding generation model that combines the extractive method and the abstractive method. Firstly, we obtain keywords from the text by a extractive model. Then, we introduce a Key Information Guide Network (KIGN), which encodes the keywords to the key information representation, to guide the process of generation. In addition, we use a prediction-guide mechanism, which can obtain the long-term value for future decoding, to further guide the summary generation. We evaluate our model on the CNN/Daily Mail dataset. The experimental results show that our model leads to significant improvements.", "keyphrases": ["abstractive text summarization", "input document", "generation process"]} +{"id": "samardzic-merlo-2014-likelihood", "title": "Likelihood of External Causation in the Structure of Events", "abstract": "This article addresses the causal structure of events described by verbs: whether an event happens spontaneously or it is caused by an external causer. We automatically estimate the likelihood of external causation of events based on the distribution of causative and anticausative uses of verbs in the causative alternation. We train a Bayesian model and test it on a monolingual and on a bilingual input. The performance is evaluated against an independent scale of likelihood of external causation based on typological data. The accuracy of a two-way classification is 85% in both monolingual and bilingual setting. On the task of a three-way classification, the score is 61% in the monolingual setting and 69% in the bilingual setting.", "keyphrases": ["external causation", "temporal relation", "multi-sieve approach"]} +{"id": "bethard-etal-2008-building", "title": "Building a Corpus of Temporal-Causal Structure", "abstract": "While recent corpus annotation efforts cover a wide variety of semantic structures, work on temporal and causal relations is still in its early stages. Annotation efforts have typically considered either temporal relations or causal relations, but not both, and no corpora currently exist that allow the relation between temporals and causals to be examined empirically. We have annotated a corpus of 1000 event pairs for both temporal and causal relations, focusing on a relatively frequent construction in which the events are conjoined by the word \u0093and\u0094. Temporal relations were annotated using an extension of the BEFORE and AFTER scheme used in the TempEval competition, and causal relations were annotated using a scheme based on connective phrases like \u0093and as a result\u0094. The annotators achieved 81.2% agreement on temporal relations and 77.8% agreement on causal relations. Analysis of the resulting corpus revealed some interesting findings, for example, that over 30% of CAUSAL relations do not have an underlying BEFORE relation. The corpus was also explored using machine learning methods, and while model performance exceeded all baselines, the results suggested that simple grammatical cues may be insufficient for identifying the more difficult temporal and causal relations.", "keyphrases": ["temporal relation", "event pair", "connective phrase"]} +{"id": "nicolai-etal-2013-cognate", "title": "Cognate and Misspelling Features for Natural Language Identification", "abstract": "We apply Support Vector Machines to differentiate between 11 native languages in the 2013 Native Language Identification Shared Task. We expand a set of common language identification features to include cognate interference and spelling mistakes. Our best results are obtained with a classifier which includes both the cognate and the misspelling features, as well as word unigrams, word bigrams, character bigrams, and syntax production rules.", "keyphrases": ["natural language identification", "cognate interference", "spelling error"]} +{"id": "li-etal-2016-discourse", "title": "Discourse Parsing with Attention-based Hierarchical Neural Networks", "abstract": "RST-style document-level discourse parsing remains a dif\ufb01cult task and ef\ufb01cient deep learning models on this task have rarely been presented. In this paper, we propose an attention-based hierarchical neural network model for discourse parsing. We also incorporate tensor-based transformation function to model complicated feature interactions. Experimental results show that our approach obtains comparable performance to the contemporary state-of-the-art systems with little manual feature engineering.", "keyphrases": ["neural network model", "discourse unit", "hierarchical attention"]} +{"id": "srikumar-etal-2012-amortizing", "title": "On Amortizing Inference Cost for Structured Prediction", "abstract": "This paper deals with the problem of predicting structures in the context of NLP. Typically, in structured prediction, an inference procedure is applied to each example independently of the others. In this paper, we seek to optimize the time complexity of inference over entire datasets, rather than individual examples. By considering the general inference representation provided by integer linear programs, we propose three exact inference theorems which allow us to re-use earlier solutions for certain instances, thereby completely avoiding possibly expensive calls to the inference procedure. We also identify several approximation schemes which can provide further speedup. We instantiate these ideas to the structured prediction task of semantic role labeling and show that we can achieve a speedup of over 2.5 using our approach while retaining the guarantees of exactness and a further speedup of over 3 using approximations that do not degrade performance.", "keyphrases": ["structured prediction", "integer linear program", "ilp"]} +{"id": "jie-etal-2019-better", "title": "Better Modeling of Incomplete Annotations for Named Entity Recognition", "abstract": "Supervised approaches to named entity recognition (NER) are largely developed based on the assumption that the training data is fully annotated with named entity information. However, in practice, annotated data can often be imperfect with one typical issue being the training data may contain incomplete annotations. We highlight several pitfalls associated with learning under such a setup in the context of NER and identify limitations associated with existing approaches, proposing a novel yet easy-to-implement approach for recognizing named entities with incomplete data annotations. We demonstrate the effectiveness of our approach through extensive experiments.", "keyphrases": ["incomplete annotation", "label noise", "many study"]} +{"id": "kim-etal-2019-probing", "title": "Probing What Different NLP Tasks Teach Machines about Function Word Comprehension", "abstract": "We introduce a set of nine challenge tasks that test for the understanding of function words. These tasks are created by structurally mutating sentences from existing datasets to target the comprehension of specific types of function words (e.g., prepositions, wh-words). Using these probing tasks, we explore the effects of various pretraining objectives for sentence encoders (e.g., language modeling, CCG supertagging and natural language inference (NLI)) on the learned representations. Our results show that pretraining on CCG\u2014our most syntactic objective\u2014performs the best on average across our probing tasks, suggesting that syntactic knowledge helps function word comprehension. Language modeling also shows strong performance, supporting its widespread use for pretraining state-of-the-art NLP models. Overall, no pretraining objective dominates across the board, and our function word probing tasks highlight several intuitive differences between pretraining objectives, e.g., that NLI helps the comprehension of negation.", "keyphrases": ["function word comprehension", "preposition", "language model", "training objective", "nli dataset"]} +{"id": "niculae-etal-2017-argument", "title": "Argument Mining with Structured SVMs and RNNs", "abstract": "We propose a novel factor graph model for argument mining, designed for settings in which the argumentative relations in a document do not necessarily form a tree structure. (This is the case in over 20% of the web comments dataset we release.) Our model jointly learns elementary unit type classification and argumentative relation prediction. Moreover, our model supports SVM and RNN parametrizations, can enforce structure constraints (e.g., transitivity), and can express dependencies between adjacent relations and propositions. Our approaches outperform unstructured baselines in both web comments and argumentative essay datasets.", "keyphrases": ["factor graph model", "proposition", "argument mining"]} +{"id": "gari-soler-etal-2019-comparison", "title": "A Comparison of Context-sensitive Models for Lexical Substitution", "abstract": "Word embedding representations provide good estimates of word meaning and give state-of-the art performance in semantic tasks. Embedding approaches differ as to whether and how they account for the context surrounding a word. We present a comparison of different word and context representations on the task of proposing substitutes for a target word in context (lexical substitution). We also experiment with tuning contextualized word embeddings on a dataset of sense-specific instances for each target word. We show that powerful contextualized word representations, which give high performance in several semantics-related tasks, deal less well with the subtle in-context similarity relationships needed for substitution. This is better handled by models trained with this objective in mind, where the inter-dependence between word and context representations is explicitly modeled during training.", "keyphrases": ["lexical substitution", "substitute", "elmo embedding"]} +{"id": "tran-etal-2016-unsupervised", "title": "Unsupervised Neural Hidden Markov Models", "abstract": "In this work, we present the first results for neuralizing an Unsupervised Hidden Markov Model. We evaluate our approach on tag in- duction. Our approach outperforms existing generative models and is competitive with the state-of-the-art though with a simpler model easily extended to include additional context.", "keyphrases": ["generative model", "hmm", "word embedding"]} +{"id": "stadler-etal-2021-observing", "title": "Observing the Learning Curve of NMT Systems With Regard to Linguistic Phenomena", "abstract": "In this paper we present our observations and evaluations by observing the linguistic performance of the system on several steps on the training process of various English-to-German Neural Machine Translation models. The linguistic performance is measured through a semi-automatic process using a test suite. Among several linguistic observations, we find that the translation quality of some linguistic categories decreased within the recorded iterations. Additionally, we notice some drops of the translation quality of certain categories when using a larger corpus.", "keyphrases": ["regard", "linguistic phenomena", "translation quality", "iteration"]} +{"id": "takeoka-etal-2021-low", "title": "Low-resource Taxonomy Enrichment with Pretrained Language Models", "abstract": "Taxonomies are symbolic representations of hierarchical relationships between terms or entities. While taxonomies are useful in broad applications, manually updating or maintaining them is labor-intensive and difficult to scale in practice. Conventional supervised methods for this enrichment task fail to find optimal parents of new terms in low-resource settings where only small taxonomies are available because of overfitting to hierarchical relationships in the taxonomies. To tackle the problem of low-resource taxonomy enrichment, we propose Musubu, an efficient framework for taxonomy enrichment in low-resource settings with pretrained language models (LMs) as knowledge bases to compensate for the shortage of information. Musubu leverages an LM-based classifier to determine whether or not inputted term pairs have hierarchical relationships. Musubu also utilizes Hearst patterns to generate queries to leverage implicit knowledge from the LM efficiently for more accurate prediction. We empirically demonstrate the effectiveness of our method in extensive experiments on taxonomies from both a SemEval task and real-world retailer datasets.", "keyphrases": ["language model", "low-resource taxonomy enrichment", "knowledge basis"]} +{"id": "ture-etal-2012-combining", "title": "Combining Statistical Translation Techniques for Cross-Language Information Retrieval", "abstract": "Cross-language information retrieval today is dominated by techniques that rely principally on context-independent token-to-token mappings despite the fact that state-of-the-art statistical machine translation systems now have far richer translation models available in their internal representations. This paper explores combination-of-evidence techniques using three types of statistical translation models: context-independent token translation, token translation using phrase-dependent contexts, and token translation using sentence-dependent contexts. Context-independent translation is performed using statistically-aligned tokens in parallel text, phrase-dependent translation is performed using aligned statistical phrases, and sentence-dependent translation is performed using those same aligned phrases together with an n-gram language model. Experiments on retrieval of Arabic, Chinese, and French documents using English queries show that no one technique is optimal for all queries, but that statistically significant improvements in mean average precision over strong baselines can be achieved by combining translation evidence from all three techniques. The optimal combination is, however, found to be resource-dependent, indicating a need for future work on robust tuning to the characteristics of individual collections.", "keyphrases": ["cross-language information retrieval", "internal representation", "paradigm"]} +{"id": "cross-huang-2016-incremental", "title": "Incremental Parsing with Minimal Features Using Bi-Directional LSTM", "abstract": "Recently, neural network approaches for parsing have largely automated the combination of individual features, but still rely on (often a larger number of) atomic features created from human linguistic intuition, and potentially omitting important global context. To further reduce feature engineering to the bare minimum, we use bi-directional LSTM sentence representations to model a parser state with only three sentence positions, which automatically identifies important aspects of the entire sentence. This model achieves state-of-the-art results among greedy dependency parsers for English. We also introduce a novel transition system for constituency parsing which does not require binarization, and together with the above architecture, achieves state-of-the-art results among greedy parsers for both English and Chinese.", "keyphrases": ["bi-directional lstm", "dependency parser", "binarization"]} +{"id": "tackstrom-etal-2015-efficient", "title": "Efficient Inference and Structured Learning for Semantic Role Labeling", "abstract": "We present a dynamic programming algorithm for efficient constrained inference in semantic role labeling. The algorithm tractably captures a majority of the structural constraints examined by prior work in this area, which has resorted to either approximate methods or off-the-shelf integer linear programming solvers. In addition, it allows training a globally-normalized log-linear model with respect to constrained conditional likelihood. We show that the dynamic program is several times faster than an off-the-shelf integer linear programming solver, while reaching the same solution. Furthermore, we show that our structured model results in significant improvements over its local counterpart, achieving state-of-the-art results on both PropBank- and FrameNet-annotated corpora.", "keyphrases": ["semantic role labeling", "program", "graphical model"]} +{"id": "abad-etal-2017-self", "title": "Self-Crowdsourcing Training for Relation Extraction", "abstract": "In this paper we introduce a self-training strategy for crowdsourcing. The training examples are automatically selected to train the crowd workers. Our experimental results show an impact of 5% Improvement in terms of F1 for relation extraction task, compared to the method based on distant supervision.", "keyphrases": ["relation extraction", "crowdsourcing", "distant supervision"]} +{"id": "koppel-etal-2011-unsupervised", "title": "Unsupervised Decomposition of a Document into Authorial Components", "abstract": "We propose a novel unsupervised method for separating out distinct authorial components of a document. In particular, we show that, given a book artificially \"munged\" from two thematically similar biblical books, we can separate out the two constituent books almost perfectly. This allows us to automatically recapitulate many conclusions reached by Bible scholars over centuries of research. One of the key elements of our method is exploitation of differences in synonym choice by different authors.", "keyphrases": ["authorial component", "unsupervised method", "book"]} +{"id": "vijay-etal-2018-corpus", "title": "Corpus Creation and Emotion Prediction for Hindi-English Code-Mixed Social Media Text", "abstract": "Emotion Prediction is a Natural Language Processing (NLP) task dealing with detection and classification of emotions in various monolingual and bilingual texts. While some work has been done on code-mixed social media text and in emotion prediction separately, our work is the first attempt which aims at identifying the emotion associated with Hindi-English code-mixed social media text. In this paper, we analyze the problem of emotion identification in code-mixed content and present a Hindi-English code-mixed corpus extracted from twitter and annotated with the associated emotion. For every tweet in the dataset, we annotate the source language of all the words present, and also the causal language of the expressed emotion. Finally, we propose a supervised classification system which uses various machine learning techniques for detecting the emotion associated with the text using a variety of character level, word level, and lexicon based features.", "keyphrases": ["emotion prediction", "hindi-english code-mixed corpus", "social medium text"]} +{"id": "beigman-klebanov-flor-2013-argumentation", "title": "Argumentation-Relevant Metaphors in Test-Taker Essays", "abstract": "This article discusses metaphor annotation in a corpus of argumentative essays written by test-takers during a standardized examination for graduate school admission. The quality of argumentation being the focus of the project, we developed a metaphor annotation protocol that targets metaphors that are relevant for the writer\u2019s arguments. The reliability of the protocol is =0.58, on a set of 116 essays (the total of about 30K content-word tokens). We found a moderate-to-strong correlation (r=0.51-0.57) between the percentage of metaphorically used words in an essay and the writing quality score. We also describe encouraging findings regarding the potential of metaphor identification to contribute to automated scoring of essays.", "keyphrases": ["essay", "argumentation", "metaphor detection"]} +{"id": "bjerva-etal-2019-probabilistic", "title": "A Probabilistic Generative Model of Linguistic Typology", "abstract": "In the principles-and-parameters framework, the structural features of languages depend on parameters that may be toggled on or off, with a single parameter often dictating the status of multiple features. The implied covariance between features inspires our probabilisation of this line of linguistic inquiry\u2014we develop a generative model of language based on exponential-family matrix factorisation. By modelling all languages and features within the same architecture, we show how structural similarities between languages can be exploited to predict typological features with near-perfect accuracy, outperforming several baselines on the task of predicting held-out features. Furthermore, we show that language embeddings pre-trained on monolingual text allow for generalisation to unobserved languages. This finding has clear practical and also theoretical implications: the results confirm what linguists have hypothesised, i.e. that there are significant correlations between typological features and languages.", "keyphrases": ["generative model", "typological feature", "language embedding"]} +{"id": "steingrimsson-etal-2021-combalign", "title": "CombAlign: a Tool for Obtaining High-Quality Word Alignments", "abstract": "Being able to generate accurate word alignments is useful for a variety of tasks. While statistical word aligners can work well, especially when parallel training data are plentiful, multilingual embedding models have recently been shown to give good results in unsupervised scenarios. We evaluate an ensemble method for word alignment on four language pairs and demonstrate that by combining multiple tools, taking advantage of their different approaches, substantial gains can be made. This holds for settings ranging from very low-resource to high-resource. Furthermore, we introduce a new gold alignment test set for Icelandic and a new easy-to-use tool for creating manual word alignments.", "keyphrases": ["word alignment", "ensemble method", "combalign"]} +{"id": "tian-etal-2017-facebook", "title": "Facebook sentiment: Reactions and Emojis", "abstract": "Emojis are used frequently in social media. A widely assumed view is that emojis express the emotional state of the user, which has led to research focusing on the expressiveness of emojis independent from the linguistic context. We argue that emojis and the linguistic texts can modify the meaning of each other. The overall communicated meaning is not a simple sum of the two channels. In order to study the meaning interplay, we need data indicating the overall sentiment of the entire message as well as the sentiment of the emojis stand-alone. We propose that Facebook Reactions are a good data source for such a purpose. FB reactions (e.g. \u201cLove\u201d and \u201cAngry\u201d) indicate the readers' overall sentiment, against which we can investigate the types of emojis used the comments under different reaction profiles. We present a data set of 21,000 FB posts (57 million reactions and 8 million comments) from public media pages across four countries.", "keyphrases": ["emojis", "comment", "social medium"]} +{"id": "schulder-etal-2018-introducing", "title": "Introducing a Lexicon of Verbal Polarity Shifters for English", "abstract": "The sentiment polarity of a phrase does not only depend on the polarities of its words, but also on how these are affected by their context. Negation words (e.g. not, no, never) can change the polarity of a phrase. Similarly, verbs and other content words can also act as polarity shifters (e.g. fail, deny, alleviate). While individually more sparse, they are far more numerous. Among verbs alone, there are more than 1200 shifters. However, sentiment analysis systems barely consider polarity shifters other than negation words. A major reason for this is the scarcity of lexicons and corpora that provide information on them. We introduce a lexicon of verbal polarity shifters that covers the entirety of verbs found in WordNet. We provide a fine-grained annotation of individual word senses, as well as information for each verbal shifter on the syntactic scopes that it can affect.", "keyphrases": ["polarity shifter", "word sense", "verbal shifter"]} +{"id": "xu-etal-2020-fact", "title": "Fact-based Content Weighting for Evaluating Abstractive Summarisation", "abstract": "Abstractive summarisation is notoriously hard to evaluate since standard word-overlap-based metrics are insufficient. We introduce a new evaluation metric which is based on fact-level content weighting, i.e. relating the facts of the document to the facts of the summary. We fol- low the assumption that a good summary will reflect all relevant facts, i.e. the ones present in the ground truth (human-generated refer- ence summary). We confirm this hypothe- sis by showing that our weightings are highly correlated to human perception and compare favourably to the recent manual highlight- based metric of Hardy et al. (2019).", "keyphrases": ["abstractive summarisation", "evaluation metric", "contextual embedding"]} +{"id": "liu-etal-2021-mathematical", "title": "Mathematical Word Problem Generation from Commonsense Knowledge Graph and Equations", "abstract": "There is an increasing interest in the use of mathematical word problem (MWP) generation in educational assessment. Different from standard natural question generation, MWP generation needs to maintain the underlying mathematical operations between quantities and variables, while at the same time ensuring the relevance between the output and the given topic. To address above problem, we develop an end-to-end neural model to generate diverse MWPs in real-world scenarios from commonsense knowledge graph and equations. The proposed model (1) learns both representations from edge-enhanced Levi graphs of symbolic equations and commonsense knowledge; (2) automatically fuses equation and commonsense knowledge information via a self-planning module when generating the MWPs. Experiments on an educational gold-standard set and a large-scale generated MWP set show that our approach is superior on the MWP generation task, and it outperforms the SOTA models in terms of both automatic evaluation metrics, i.e., BLEU-4, ROUGE-L, Self-BLEU, and human evaluation metrics, i.e., equation relevance, topic relevance, and language coherence. To encourage reproducible results, we make our code and MWP dataset public available at .", "keyphrases": ["commonsense knowledge graph", "equation", "mwp generation"]} +{"id": "zhu-etal-2020-crosswoz", "title": "CrossWOZ: A Large-Scale Chinese Cross-Domain Task-Oriented Dialogue Dataset", "abstract": "To advance multi-domain (cross-domain) dialogue modeling as well as alleviate the shortage of Chinese task-oriented datasets, we propose CrossWOZ, the first large-scale Chinese Cross-Domain Wizard-of-Oz task-oriented dataset. It contains 6K dialogue sessions and 102K utterances for 5 domains, including hotel, restaurant, attraction, metro, and taxi. Moreover, the corpus contains rich annotation of dialogue states and dialogue acts on both user and system sides. About 60% of the dialogues have cross-domain user goals that favor inter-domain dependency and encourage natural transition across domains in conversation. We also provide a user simulator and several benchmark models for pipelined task-oriented dialogue systems, which will facilitate researchers to compare and evaluate their models on this corpus. The large size and rich annotation of CrossWOZ make it suitable to investigate a variety of tasks in cross-domain dialogue modeling, such as dialogue state tracking, policy learning, user simulation, etc.", "keyphrases": ["large-scale chinese cross-domain", "conversation", "task-oriented dialogue system"]} +{"id": "safi-samghabadi-etal-2020-aggression", "title": "Aggression and Misogyny Detection using BERT: A Multi-Task Approach", "abstract": "In recent times, the focus of the NLP community has increased towards offensive language, aggression, and hate-speech detection. This paper presents our system for TRAC-2 shared task on \u201cAggression Identification\u201d (sub-task A) and \u201cMisogynistic Aggression Identification\u201d (sub-task B). The data for this shared task is provided in three different languages - English, Hindi, and Bengali. Each data instance is annotated into one of the three aggression classes - Not Aggressive, Covertly Aggressive, Overtly Aggressive, as well as one of the two misogyny classes - Gendered and Non-Gendered. We propose an end-to-end neural model using attention on top of BERT that incorporates a multi-task learning paradigm to address both the sub-tasks simultaneously. Our team, \u201cna14\u201d, scored 0.8579 weighted F1-measure on the English sub-task B and secured 3rd rank out of 15 teams for the task. The code and the model weights are publicly available at . Keywords: Aggression, Misogyny, Abusive Language, Hate-Speech Detection, BERT, NLP, Neural Networks, Social Media", "keyphrases": ["misogyny", "detection", "bert", "aggression"]} +{"id": "li-etal-2017-nlp", "title": "An NLP Analysis of Exaggerated Claims in Science News", "abstract": "The discrepancy between science and media has been affecting the effectiveness of science communication. Original findings from science publications may be distorted with altered claim strength when reported to the public, causing misinformation spread. This study conducts an NLP analysis of exaggerated claims in science news, and then constructed prediction models for identifying claim strength levels in science reporting. The results demonstrate different writing styles journal articles and news/press releases use for reporting scientific findings. Preliminary prediction models reached promising result with room for further improvement.", "keyphrases": ["exaggeration", "claim", "science news"]} +{"id": "yoon-etal-2016-spoken", "title": "Spoken Text Difficulty Estimation Using Linguistic Features", "abstract": "We present an automated method for estimating the difficulty of spoken texts for use in generating items that assess non-native learners\u2019 listening proficiency. We collected information on the perceived difficulty of listening to various English monologue speech samples using a Likert-scale questionnaire distributed to 15 non-native English learners. We averaged the overall rating provided by three nonnative learners at different proficiency levels into an overall score of listenability. We then trained a multiple linear regression model with the listenability score as the dependent variable and features from both natural language and speech processing as the independent variables. Our method demonstrated a correlation of 0.76 with the listenability score, comparable to the agreement between the nonnative learners\u2019 ratings and the listenability score.", "keyphrases": ["linguistic feature", "listenability", "english learner"]} +{"id": "liang-etal-2020-beyond", "title": "Beyond User Self-Reported Likert Scale Ratings: A Comparison Model for Automatic Dialog Evaluation", "abstract": "Open Domain dialog system evaluation is one of the most important challenges in dialog research. Existing automatic evaluation metrics, such as BLEU are mostly reference-based. They calculate the difference between the generated response and a limited number of available references. Likert-score based self-reported user rating is widely adopted by social conversational systems, such as Amazon Alexa Prize chatbots. However, self-reported user rating suffers from bias and variance among different users. To alleviate this problem, we formulate dialog evaluation as a comparison task. We also propose an automatic evaluation model CMADE (Comparison Model for Automatic Dialog Evaluation) that automatically cleans self-reported user ratings as it trains on them. Specifically, we first use a self-supervised method to learn better dialog feature representation, and then use KNN and Shapley to remove confusing samples. Our experiments show that CMADE achieves 89.2% accuracy in the dialog comparison task.", "keyphrases": ["automatic dialog evaluation", "user rating", "different user"]} +{"id": "shoemark-etal-2017-aye", "title": "Aye or naw, whit dae ye hink? Scottish independence and linguistic identity on social media", "abstract": "Political surveys have indicated a relationship between a sense of Scottish identity and voting decisions in the 2014 Scottish Independence Referendum. Identity is often reflected in language use, suggesting the intuitive hypothesis that individuals who support Scottish independence are more likely to use distinctively Scottish words than those who oppose it. In the first large-scale study of sociolinguistic variation on social media in the UK, we identify distinctively Scottish terms in a data-driven way, and find that these terms are indeed used at a higher rate by users of pro-independence hashtags than by users of anti-independence hashtags. However, we also find that in general people are less likely to use distinctively Scottish words in tweets with referendum-related hashtags than in their general Twitter activity. We attribute this difference to style shifting relative to audience, aligning with previous work showing that Twitter users tend to use fewer local variants when addressing a broader audience.", "keyphrases": ["scottish independence", "hashtag", "twitter user", "social medium"]} +{"id": "lin-etal-2020-generating", "title": "Generating Informative Conversational Response using Recurrent Knowledge-Interaction and Knowledge-Copy", "abstract": "Knowledge-driven conversation approaches have achieved remarkable research attention recently. However, generating an informative response with multiple relevant knowledge without losing fluency and coherence is still one of the main challenges. To address this issue, this paper proposes a method that uses recurrent knowledge interaction among response decoding steps to incorporate appropriate knowledge. Furthermore, we introduce a knowledge copy mechanism using a knowledge-aware pointer network to copy words from external knowledge according to knowledge attention distribution. Our joint neural conversation model which integrates recurrent Knowledge-Interaction and knowledge Copy (KIC) performs well on generating informative responses. Experiments demonstrate that our model with fewer parameters yields significant improvements over competitive baselines on two datasets Wizard-of-Wikipedia(average Bleu +87%; abs.: 0.034) and DuConv(average Bleu +20%; abs.: 0.047)) with different knowledge formats (textual & structured) and different languages (English & Chinese).", "keyphrases": ["recurrent knowledge-interaction", "conversation model", "response generation"]} +{"id": "mei-zhai-2008-generating", "title": "Generating Impact-Based Summaries for Scientific Literature", "abstract": "In this paper, we present a study of a novel summarization problem, i.e., summarizing the impact of a scientic publication. Given a paper and its citation context, we study how to extract sentences that can represent the most inuential content of the paper. We propose language modeling methods for solving this problem, and study how to incorporate features such as authority and proximity to accurately estimate the impact language model. Experiment results on a SIGIR publication collection show that the proposed methods are effective for generating impact-based summaries.", "keyphrases": ["scientific literature", "summarization", "influence"]} +{"id": "schuster-etal-2021-get", "title": "Get Your Vitamin C! Robust Fact Verification with Contrastive Evidence", "abstract": "Typical fact verification models use retrieved written evidence to verify claims. Evidence sources, however, often change over time as more information is gathered and revised. In order to adapt, models must be sensitive to subtle differences in supporting evidence. We present VitaminC, a benchmark infused with challenging cases that require fact verification models to discern and adjust to slight factual changes. We collect over 100,000 Wikipedia revisions that modify an underlying fact, and leverage these revisions, together with additional synthetically constructed ones, to create a total of over 400,000 claim-evidence pairs. Unlike previous resources, the examples in VitaminC are contrastive, i.e., they contain evidence pairs that are nearly identical in language and content, with the exception that one supports a given claim while the other does not. We show that training using this design increases robustness\u2014improving accuracy by 10% on adversarial fact verification and 6% on adversarial natural language inference (NLI). Moreover, the structure of VitaminC leads us to define additional tasks for fact-checking resources: tagging relevant words in the evidence for verifying the claim, identifying factual revisions, and providing automatic edits via factually consistent text generation.", "keyphrases": ["fact verification", "revision", "wikipedia revision"]} +{"id": "eric-etal-2020-multiwoz", "title": "MultiWOZ 2.1: A Consolidated Multi-Domain Dialogue Dataset with State Corrections and State Tracking Baselines", "abstract": "MultiWOZ 2.0 (Budzianowski et al., 2018) is a recently released multi-domain dialogue dataset spanning 7 distinct domains and containing over 10,000 dialogues. Though immensely useful and one of the largest resources of its kind to-date, MultiWOZ 2.0 has a few shortcomings. Firstly, there are substantial noise in the dialogue state annotations and dialogue utterances which negatively impact the performance of state-tracking models. Secondly, follow-up work (Lee et al., 2019) has augmented the original dataset with user dialogue acts. This leads to multiple co-existent versions of the same dataset with minor modifications. In this work we tackle the aforementioned issues by introducing MultiWOZ 2.1. To fix the noisy state annotations, we use crowdsourced workers to re-annotate state and utterances based on the original utterances in the dataset. This correction process results in changes to over 32% of state annotations across 40% of the dialogue turns. In addition, we fix 146 dialogue utterances by canonicalizing slot values in the utterances to the values in the dataset ontology. To address the second problem, we combined the contributions of the follow-up works into MultiWOZ 2.1. Hence, our dataset also includes user dialogue acts as well as multiple slot descriptions per dialogue state slot. We then benchmark a number of state-of-the-art dialogue state tracking models on the MultiWOZ 2.1 dataset and show the joint state tracking performance on the corrected state annotations. We are publicly releasing MultiWOZ 2.1 to the community, hoping that this dataset resource will allow for more effective models across various dialogue subproblems to be built in the future.", "keyphrases": ["multi-domain dialogue dataset", "state annotation", "dialog state tracking"]} +{"id": "gebre-etal-2013-improving", "title": "Improving Native Language Identification with TF-IDF Weighting", "abstract": "This paper presents a Native Language Identification (NLI) system based on TF-IDF weighting schemes and using linear classifiers - support vector machines, logistic regressions and perceptrons. The system was one of the participants of the 2013 NLI Shared Task in the closed-training track, achieving 0.814 overall accuracy for a set of 11 native languages. This accuracy was only 2.2 percentage points lower than the winner\u2019s performance. Furthermore, with subsequent evaluations using 10-fold cross-validation (as given by the organizers) on the combined training and development data, the best average accuracy obtained is 0.8455 and the features that contributed to this accuracy are the TF-IDF of the combined unigrams and bigrams of words.", "keyphrases": ["native language identification", "tf-idf", "bigram"]} +{"id": "tekiroglu-etal-2020-generating", "title": "Generating Counter Narratives against Online Hate Speech: Data and Strategies", "abstract": "Recently research has started focusing on avoiding undesired effects that come with content moderation, such as censorship and overblocking, when dealing with hatred online. The core idea is to directly intervene in the discussion with textual responses that are meant to counter the hate content and prevent it from further spreading. Accordingly, automation strategies, such as natural language generation, are beginning to be investigated. Still, they suffer from the lack of sufficient amount of quality data and tend to produce generic/repetitive responses. Being aware of the aforementioned limitations, we present a study on how to collect responses to hate effectively, employing large scale unsupervised language models such as GPT-2 for the generation of silver data, and the best annotation strategies/neural architectures that can be used for data filtering before expert validation/post-editing.", "keyphrases": ["hate speech", "textual response", "counter-narratives"]} +{"id": "zhuang-zong-2010-joint", "title": "Joint Inference for Bilingual Semantic Role Labeling", "abstract": "We show that jointly performing semantic role labeling (SRL) on bitext can improve SRL results on both sides. In our approach, we use monolingual SRL systems to produce argument candidates for predicates in bitext at first. Then, we simultaneously generate SRL results for two sides of bitext using our joint inference model. Our model prefers the bilingual SRL result that is not only reasonable on each side of bitext, but also has more consistent argument structures between two sides. To evaluate the consistency between two argument structures, we also formulate a log-linear model to compute the probability of aligning two arguments. We have experimented with our model on Chinese-English parallel Prop-Bank data. Using our joint inference model, F1 scores of SRL results on Chinese and English text achieve 79.53% and 77.87% respectively, which are 1.52 and 1.74 points higher than the results of baseline monolingual SRL combination systems respectively.", "keyphrases": ["semantic role", "consistency", "joint inference"]} +{"id": "chinkina-meurers-2016-linguistically", "title": "Linguistically Aware Information Retrieval: Providing Input Enrichment for Second Language Learners", "abstract": "How can second language teachers retrieve texts that are rich in terms of the grammatical constructions to be taught, but also address the content of interest to the learners? We developed an Information Retrieval system that identifies the 87 grammatical constructions spelled out in the official English language curriculum of schools in Baden-W\u00a8 urttemberg (Germany) and reranks the search results based on the selected (de)prioritization of grammatical forms. In combination with a visualization of the characteristics of the search results, the approach effectively supports teachers in prioritizing those texts that provide the targeted forms. The approach facilitates systematic input enrichment for language learners as a complement to the established notion of input enhancement: while input enrichment aims at richly representing the selected forms and categories in a text, input enhancement targets their presentation to make them more salient and support noticing.", "keyphrases": ["learner", "grammatical construction", "information retrieval system"]} +{"id": "lardilleux-lepage-2009-sampling", "title": "Sampling-based Multilingual Alignment", "abstract": "We present a sub-sentential alignment method that extracts high quality multi-word alignments from sentence-aligned multilingual parallel corpora. Unlike other methods, it exploits low frequency terms, which makes it highly scalable. As it relies on alingual concepts, it can process any number of languages at once. Experiments have shown that it is competitive with state-of-the-art methods.", "keyphrases": ["aligner", "parallel corpora", "frequency term"]} +{"id": "kim-mooney-2012-unsupervised", "title": "Unsupervised PCFG Induction for Grounded Language Learning with Highly Ambiguous Supervision", "abstract": "\"Grounded\" language learning employs training data in the form of sentences paired with relevant but ambiguous perceptual contexts. Borschinger et al. (2011) introduced an approach to grounded language learning based on unsupervised PCFG induction. Their approach works well when each sentence potentially refers to one of a small set of possible meanings, such as in the sportscasting task. However, it does not scale to problems with a large set of potential meanings for each sentence, such as the navigation instruction following task studied by Chen and Mooney (2011). This paper presents an enhancement of the PCFG approach that scales to such problems with highly-ambiguous supervision. Experimental results on the navigation task demonstrates the effectiveness of our approach.", "keyphrases": ["such problem", "highly-ambiguous supervision", "unsupervised pcfg induction"]} +{"id": "luyckx-daelemans-2008-personae", "title": "Personae: a Corpus for Author and Personality Prediction from Text", "abstract": "We present a new corpus for computational stylometry, more specifically authorship attribution and the prediction of author personality from text. Because of the large number of authors (145), the corpus will allow previously impossible studies of variation in features considered predictive for writing style. The innovative meta-information (personality profiles of the authors) associated with these texts allows the study of personality prediction, a not yet very well researched aspect of style. In this paper, we describe the contents of the corpus and show its use in both authorship attribution and personality prediction. We focus on features that have been proven useful in the field of author recognition. Syntactic features like part-of-speech n-grams are generally accepted as not being under the author\u0092s conscious control and therefore providing good clues for predicting gender or authorship. We want to test whether these features are helpful for personality prediction and authorship attribution on a large set of authors. Both tasks are approached as text categorization tasks. First a document representation is constructed based on feature selection from the linguistically analyzed corpus (using the Memory-Based Shallow Parser (MBSP)). These are associated with each of the 145 authors or each of the four components of the Myers-Briggs Type Indicator (Introverted-Extraverted, Sensing-iNtuitive, Thinking-Feeling, Judging-Perceiving). Authorship attribution on 145 authors achieves results around 50%-accuracy. Preliminary results indicate that the first two personality dimensions can be predicted fairly accurately.", "keyphrases": ["personality prediction", "authorship attribution", "syntactic feature", "n-gram", "personae"]} +{"id": "zhang-etal-2019-evidence", "title": "Evidence-based Trustworthiness", "abstract": "The information revolution brought with it information pollution. Information retrieval and extraction help us cope with abundant information from diverse sources. But some sources are of anonymous authorship, and some are of uncertain accuracy, so how can we determine what we should actually believe? Not all information sources are equally trustworthy, and simply accepting the majority view is often wrong. This paper develops a general framework for estimating the trustworthiness of information sources in an environment where multiple sources provide claims and supporting evidence, and each claim can potentially be produced by multiple sources. We consider two settings: one in which information sources directly assert claims, and a more realistic and challenging one, in which claims are inferred from evidence provided by sources, via (possibly noisy) NLP techniques. Our key contribution is to develop a family of probabilistic models that jointly estimate the trustworthiness of sources, and the credibility of claims they assert. This is done while accounting for the (possibly noisy) NLP needed to infer claims from evidence supplied by sources. We evaluate our framework on several datasets, showing strong results and significant improvement over baselines.", "keyphrases": ["trustworthiness", "information pollution", "claim"]} +{"id": "onishi-etal-2016-large", "title": "Who did What: A Large-Scale Person-Centered Cloze Dataset", "abstract": "We have constructed a new \"Who-did-What\" dataset of over 200,000 fill-in-the-gap (cloze) multiple choice reading comprehension problems constructed from the LDC English Gigaword newswire corpus. The WDW dataset has a variety of novel features. First, in contrast with the CNN and Daily Mail datasets (Hermann et al., 2015) we avoid using article summaries for question formation. Instead, each problem is formed from two independent articles --- an article given as the passage to be read and a separate article on the same events used to form the question. Second, we avoid anonymization --- each choice is a person named entity. Third, the problems have been filtered to remove a fraction that are easily solved by simple baselines, while remaining 84% solvable by humans. We report performance benchmarks of standard systems and propose the WDW dataset as a challenge task for the community.", "keyphrases": ["cloze dataset", "who-did-what", "passage"]} +{"id": "garcia-etal-2021-harnessing", "title": "Harnessing Multilinguality in Unsupervised Machine Translation for Rare Languages", "abstract": "Unsupervised translation has reached impressive performance on resource-rich language pairs such as English-French and English-German. However, early studies have shown that in more realistic settings involving low-resource, rare languages, unsupervised translation performs poorly, achieving less than 3.0 BLEU. In this work, we show that multilinguality is critical to making unsupervised systems practical for low-resource settings. In particular, we present a single model for 5 low-resource languages (Gujarati, Kazakh, Nepali, Sinhala, and Turkish) to and from English directions, which leverages monolingual and auxiliary parallel data from other high-resource language pairs via a three-stage training scheme. We outperform all current state-of-the-art unsupervised baselines for these languages, achieving gains of up to 14.4 BLEU. Additionally, we outperform strong supervised baselines for various language pairs as well as match the performance of the current state-of-the-art supervised model for Nepali-English. We conduct a series of ablation studies to establish the robustness of our model under different degrees of data quality, as well as to analyze the factors which led to the superior performance of the proposed approach over traditional unsupervised models.", "keyphrases": ["unsupervised translation", "low-resource language", "auxiliary parallel data"]} +{"id": "mishra-etal-2019-abusive", "title": "Abusive Language Detection with Graph Convolutional Networks", "abstract": "Abuse on the Internet represents a significant societal problem of our time. Previous research on automated abusive language detection in Twitter has shown that community-based profiling of users is a promising technique for this task. However, existing approaches only capture shallow properties of online communities by modeling follower\u2013following relationships. In contrast, working with graph convolutional networks (GCNs), we present the first approach that captures not only the structure of online communities but also the linguistic behavior of the users within them. We show that such a heterogeneous graph-structured modeling of communities significantly advances the current state of the art in abusive language detection.", "keyphrases": ["convolutional network", "art", "abusive language detection"]} +{"id": "darwish-etal-2014-verifiably", "title": "Verifiably Effective Arabic Dialect Identification", "abstract": "Several recent papers on Arabic dialect identification have hinted that using a word unigram model is sufficient and effective for the task. However, most previous work was done on a standard fairly homogeneous dataset of dialectal user comments. In this paper, we show that training on the standard dataset does not generalize, because a unigram model may be tuned to topics in the comments and does not capture the distinguishing features of dialects. We show that effective dialect identification requires that we account for the distinguishing lexical, morphological, and phonological phenomena of dialects. We show that accounting for such can improve dialect detection accuracy by nearly 10% absolute.", "keyphrases": ["arabic dialect identification", "word unigram model", "egyptian"]} +{"id": "zhu-etal-2010-unified", "title": "A Unified Framework for Scope Learning via Simplified Shallow Semantic Parsing", "abstract": "This paper approaches the scope learning problem via simplified shallow semantic parsing. This is done by regarding the cue as the predicate and mapping its scope into several constituents as the arguments of the cue. Evaluation on the BioScope corpus shows that the structural information plays a critical role in capturing the relationship between a cue and its dominated arguments. It also shows that our parsing approach significantly outperforms the state-of-the-art chunking ones. Although our parsing approach is only evaluated on negation and speculation scope learning here, it is portable to other kinds of scope learning.", "keyphrases": ["scope learning", "shallow semantic parsing", "bioscope corpus"]} +{"id": "fang-etal-2017-learning", "title": "Learning how to Active Learn: A Deep Reinforcement Learning Approach", "abstract": "Active learning aims to select a small subset of data for annotation such that a classifier learned on the data is highly accurate. This is usually done using heuristic selection methods, however the effectiveness of such methods is limited and moreover, the performance of heuristics varies between datasets. To address these shortcomings, we introduce a novel formulation by reframing the active learning as a reinforcement learning problem and explicitly learning a data selection policy, where the policy takes the role of the active learning heuristic. Importantly, our method allows the selection policy learned using simulation to one language to be transferred to other languages. We demonstrate our method using cross-lingual named entity recognition, observing uniform improvements over traditional active learning algorithms.", "keyphrases": ["reinforcement", "active learning algorithm", "imitation"]} +{"id": "oncevay-2021-peru", "title": "Peru is Multilingual, Its Machine Translation Should Be Too?", "abstract": "Peru is a multilingual country with a long history of contact between the indigenous languages and Spanish. Taking advantage of this context for machine translation is possible with multilingual approaches for learning both unsupervised subword segmentation and neural machine translation models. The study proposes the first multilingual translation models for four languages spoken in Peru: Aymara, Ashaninka, Quechua and Shipibo-Konibo, providing both many-to-Spanish and Spanish-to-many models and outperforming pairwise baselines in most of them. The task exploited a large English-Spanish dataset for pre-training, monolingual texts with tagged back-translation, and parallel corpora aligned with English. Finally, by fine-tuning the best models, we also assessed the out-of-domain capabilities in two evaluation datasets for Quechua and a new one for Shipibo-Konibo.", "keyphrases": ["machine translation", "aymara", "quechua"]} +{"id": "yang-cardie-2014-joint", "title": "Joint Modeling of Opinion Expression Extraction and Attribute Classification", "abstract": "In this paper, we study the problems of opinion expression extraction and expression-level polarity and intensity classification. Traditional fine-grained opinion analysis systems address these problems in isolation and thus cannot capture interactions among the textual spans of opinion expressions and their opinion-related properties. We present two types of joint approaches that can account for such interactions during 1) both learning and inference or 2) only during inference. Extensive experiments on a standard dataset demonstrate that our approaches provide substantial improvements over previously published results. By analyzing the results, we gain some insight into the advantages of different joint models.", "keyphrases": ["opinion expression extraction", "labeling", "joint model"]} +{"id": "roth-anthonio-2021-unimplicit", "title": "UnImplicit Shared Task Report: Detecting Clarification Requirements in Instructional Text", "abstract": "This paper describes the data, task setup, and results of the shared task at the First Workshop on Understanding Implicit and Underspecified Language (UnImplicit). The task requires computational models to predict whether a sentence contains aspects of meaning that are contextually unspecified and thus require clarification. Two teams participated and the best scoring system achieved an accuracy of 68%.", "keyphrases": ["clarification", "instructional text", "underspecified language"]} +{"id": "druskat-etal-2016-corpus", "title": "corpus-tools.org: An Interoperable Generic Software Tool Set for Multi-layer Linguistic Corpora", "abstract": "This paper introduces an open source, interoperable generic software tool set catering for the entire workflow of creation, migration, annotation, query and analysis of multi-layer linguistic corpora. It consists of four components: Salt, a graph-based meta model and API for linguistic data, the common data model for the rest of the tool set; Pepper, a conversion tool and platform for linguistic data that can be used to convert many different linguistic formats into each other; Atomic, an extensible, platform-independent multi-layer desktop annotation software for linguistic corpora; ANNIS, a search and visualization architecture for multi-layer linguistic corpora with many different visualizations and a powerful native query language. The set was designed to solve the following issues in a multi-layer corpus workflow: Lossless data transition between tools through a common data model generic enough to allow for a potentially unlimited number of different types of annotation, conversion capabilities for different linguistic formats to cater for the processing of data from different sources and/or with existing annotations, a high level of extensibility to enhance the sustainability of the whole tool set, analysis capabilities encompassing corpus and annotation query alongside multi-faceted visualizations of all annotation layers.", "keyphrases": ["multi-layer linguistic corpora", "salt", "pepper"]} +{"id": "hjalmarsson-etal-2007-dealing", "title": "Dealing with DEAL: A Dialogue System for Conversation Training", "abstract": "We present DEAL, a spoken dialogue system for conversation training under development at KTH. DEAL is a game with a spoken language interface designed for second language learners. The system is intended as a multidisciplinary research platform where challenges and potential benefits of combining elements from computer games, dialogue systems and language learning can be explored.", "keyphrases": ["conversation training", "spoken language interface", "second language learner"]} +{"id": "bangalore-etal-2006-learning", "title": "Learning the Structure of Task-Driven Human-Human Dialogs", "abstract": "Data-driven techniques have been used for many computational linguistics tasks. Models derived from data are generally more robust than hand-crafted systems since they better reflect the distribution of the phenomena being modeled. With the availability of large corpora of spoken dialog, dialog management is now reaping the benefits of data-driven techniques. In this paper, we compare two approaches to modeling subtask structure in dialog: a chunk-based model of subdialog sequences, and a parse-based, or hierarchical, model. We evaluate these models using customer agent dialogs from a catalog service domain.", "keyphrases": ["dialog", "task structure", "natural language generation"]} +{"id": "wu-dredze-2020-explicit", "title": "Do Explicit Alignments Robustly Improve Multilingual Encoders?", "abstract": "Multilingual BERT (mBERT), XLM-RoBERTa (XLMR) and other unsupervised multilingual encoders can effectively learn cross-lingual representation. Explicit alignment objectives based on bitexts like Europarl or MultiUN have been shown to further improve these representations. However, word-level alignments are often suboptimal and such bitexts are unavailable for many languages. In this paper, we propose a new contrastive alignment objective that can better utilize such signal, and examine whether these previous alignment methods can be adapted to noisier sources of aligned data: a randomly sampled 1 million pair subset of the OPUS collection. Additionally, rather than report results on a single dataset with a single model run, we report the mean and standard derivation of multiple runs with different seeds, on four datasets and tasks. Our more extensive analysis finds that, while our new objective outperforms previous work, overall these methods do not improve performance with a more robust evaluation framework. Furthermore, the gains from using a better underlying model eclipse any benefits from alignment training. These negative results dictate more care in evaluating these methods and suggest limitations in applying explicit alignment objectives.", "keyphrases": ["multilingual encoder", "cross-lingual representation", "contrastive learning"]} +{"id": "van-de-cruys-etal-2013-melodi-supervised", "title": "MELODI: A Supervised Distributional Approach for Free Paraphrasing of Noun Compounds", "abstract": "This paper describes the system submitted by the MELODI team for the SemEval-2013 Task 4 : Free Paraphrases of Noun Compounds (Hendrickx et al., 2013). Our approach combines the strength of an unsupervised distributional word space model with a supervised maximum-entropy classification model; the distributional model yields a feature representation for a particular compound noun, which is subsequently used by the classifier to induce a number of appropriate paraphrases.", "keyphrases": ["paraphrase", "noun compounds", "distributional model"]} +{"id": "lo-wu-2013-informal", "title": "Can Informal Genres be better Translated by Tuning on Automatic Semantic Metrics?", "abstract": "Even though the informal language of spoken text and web forum genres presents great difficulties for automatic semantic role labeling, we show that surprisingly, tuning statistical machine translation against the SRL-based objective function, MEANT, nevertheless leads more robustly to adequate translations of these informal genres than tuning against BLEU or TER. The accuracy of automatic semantic parsing has been shown to degrade significantly on informal genres such as speech or tweets, compared to formal genres like newswire. In spite of this, human evaluators preferred translations from MEANTtuned systems over the BLEUor TERtuned ones by a significant margin. Error analysis indicates that one of the major sources of errors in automatic shallow semantic parsing of informal genres is failure to identify the semantic frame for copula or existential senses of \u201cbe\u201d. We show that MEANT\u2019s correlation with human adequacy judgment on informal text is improved by reconstructing the missing semantic frames for \u201cbe\u201d. Our tuning approach is independent of the translationmodel architecture, so any SMTmodel can potentially benefit from the semantic knowledge incorporated through our approach.", "keyphrases": ["informal genre", "meant", "adequate translation"]} +{"id": "sun-2010-improving", "title": "Improving Chinese Semantic Role Labeling with Rich Syntactic Features", "abstract": "Developing features has been shown crucial to advancing the state-of-the-art in Semantic Role Labeling (SRL). To improve Chinese SRL, we propose a set of additional features, some of which are designed to better capture structural information. Our system achieves 93.49 Fmeasure, a significant improvement over the best reported performance 92.0. We are further concerned with the effect of parsing in Chinese SRL. We empirically analyze the two-fold effect, grouping words into constituents and providing syntactic information. We also give some preliminary linguistic explanations.", "keyphrases": ["semantic role labeling", "chinese srl", "statistical classifier"]} +{"id": "le-etal-2014-tuhoi", "title": "TUHOI: Trento Universal Human Object Interaction Dataset", "abstract": "This paper describes the Trento Universal Human Object Interaction dataset, TUHOI, which is dedicated to human object interactions in images.1 Recognizing human actions is an important yet challenging task. Most available datasets in this field are limited in numbers of actions and objects. A large dataset with various actions and human object interactions is needed for training and evaluating complicated and robust human action recognition systems, especially systems that combine knowledge learned from language and vision. We introduce an image collection with more than two thousand actions which have been annotated through crowdsourcing. We review publicly available datasets, describe the annotation process of our image collection and some statistics of this dataset. Finally, experimental results on the dataset including human action recognition based on objects and an analysis of the relation between human-object positions in images and prepositions in language are presented.", "keyphrases": ["object", "human action", "tuhoi"]} +{"id": "lubetich-sagae-2014-data", "title": "Data-driven Measurement of Child Language Development with Simple Syntactic Templates", "abstract": "When assessing child language development, researchers have traditionally had to choose between easily computable metrics focused on superficial aspects of language, and more expressive metrics that are carefully designed to cover specific syntactic structures and require substantial and tedious labor. Recent work has shown that existing expressive metrics for child language development can be automated and produce accurate results. We go a step further and propose that measurement of syntactic development can be performed automatically in a completely data-driven way without the need for definition of language-specific inventories of grammatical structures. As a crucial step in that direction, we show that four simple feature templates are as expressive of language development as a carefully crafted standard inventory of grammatical structures that is commonly used and has been validated empirically.", "keyphrases": ["child language development", "developmental level", "linguistic feature"]} +{"id": "oren-etal-2019-distributionally", "title": "Distributionally Robust Language Modeling", "abstract": "Language models are generally trained on data spanning a wide range of topics (e.g., news, reviews, fiction), but they might be applied to an a priori unknown target distribution (e.g., restaurant reviews). In this paper, we first show that training on text outside the test distribution can degrade test performance when using standard maximum likelihood (MLE) training. To remedy this without the knowledge of the test distribution, we propose an approach which trains a model that performs well over a wide range of potential test distributions. In particular, we derive a new distributionally robust optimization (DRO) procedure which minimizes the loss of the model over the worst-case mixture of topics with sufficient overlap with the training distribution. Our approach, called topic conditional value at risk (topic CVaR), obtains a 5.5 point perplexity reduction over MLE when the language models are trained on a mixture of Yelp reviews and news and tested only on reviews.", "keyphrases": ["robust optimization", "dro", "mixture"]} +{"id": "peng-etal-2019-huaweis", "title": "Huawei's NMT Systems for the WMT 2019 Biomedical Translation Task", "abstract": "This paper describes Huawei's neural machine translation systems for the WMT 2019 biomedical translation shared task. We trained and fine-tuned our systems on a combination of out-of-domain and in-domain parallel corpora for six translation directions covering English\u2013Chinese, English\u2013French and English\u2013German language pairs. Our submitted systems achieve the best BLEU scores on English\u2013French and English\u2013German language pairs according to the official evaluation results. In the English\u2013Chinese translation task, our systems are in the second place. The enhanced performance is attributed to more in-domain training and more sophisticated models developed. Development of translation models and transfer learning (or domain adaptation) methods has significantly contributed to the progress of the task.", "keyphrases": ["wmt", "translation direction", "bleu score", "huawei"]} +{"id": "das-smith-2009-paraphrase", "title": "Paraphrase Identification as Probabilistic Quasi-Synchronous Recognition", "abstract": "We present a novel approach to deciding whether two sentences hold a paraphrase relationship. We employ a generative model that generates a paraphrase of a given sentence, and we use probabilistic inference to reason about whether two sentences share the paraphrase relationship. The model cleanly incorporates both syntax and lexical semantics using quasi-synchronous dependency grammars (Smith and Eisner, 2006). Furthermore, using a product of experts (Hinton, 2002), we combine the model with a complementary logistic regression model based on state-of-the-art lexical overlap features. We evaluate our models on the task of distinguishing true paraphrase pairs from false ones on a standard corpus, giving competitive state-of-the-art performance.", "keyphrases": ["probabilistic inference", "paraphrase identification", "inter alia"]} +{"id": "wu-etal-2019-proactive", "title": "Proactive Human-Machine Conversation with Explicit Conversation Goal", "abstract": "Though great progress has been made for human-machine conversation, current dialogue system is still in its infancy: it usually converses passively and utters words more as a matter of response, rather than on its own initiatives. In this paper, we take a radical step towards building a human-like conversational agent: endowing it with the ability of proactively leading the conversation (introducing a new topic or maintaining the current topic). To facilitate the development of such conversation systems, we create a new dataset named Konv where one acts as a conversation leader and the other acts as the follower. The leader is provided with a knowledge graph and asked to sequentially change the discussion topics, following the given conversation goal, and meanwhile keep the dialogue as natural and engaging as possible. Konv enables a very challenging task as the model needs to both understand dialogue and plan over the given knowledge graph. We establish baseline results on this dataset (about 270K utterances and 30k dialogues) using several state-of-the-art models. Experimental results show that dialogue models that plan over the knowledge graph can make full use of related knowledge to generate more diverse multi-turn conversations. The baseline systems along with the dataset are publicly available.", "keyphrases": ["human-machine conversation", "response generation", "path"]} +{"id": "abdulrahman-etal-2019-developing", "title": "Developing a Fine-grained Corpus for a Less-resourced Language: the case of Kurdish", "abstract": "Kurdish is a less-resourced language consisting of different dialects written in various scripts. Approximately 30 million people in different countries speak the language. The lack of corpora is one of the main obstacles in Kurdish language processing. In this paper, we present KTC-the Kurdish Textbooks Corpus, which is composed of 31 K-12 textbooks in Sorani dialect. The corpus is normalized and categorized into 12 educational subjects containing 693,800 tokens (110,297 types). Our resource is publicly available for non-commercial use under the CC BY-NC-SA 4.0 license.", "keyphrases": ["less-resourced language", "kurdish textbooks corpus", "sorani dialect"]} +{"id": "denkowski-lavie-2010-meteor", "title": "METEOR-NEXT and the METEOR Paraphrase Tables: Improved Evaluation Support for Five Target Languages", "abstract": "This paper describes our submission to the WMT10 Shared Evaluation Task and MetricsMATR10. We present a version of the Meteor-next metric with paraphrase tables for five target languages. We describe the creation of these paraphrase tables and conduct a tuning experiment that demonstrates consistent improvement across all languages over baseline versions of the metric without paraphrase resources.", "keyphrases": ["meteor", "paraphrase table", "machine translation evaluation"]} +{"id": "gliozzo-etal-2005-domain", "title": "Domain Kernels for Word Sense Disambiguation", "abstract": "In this paper we present a supervised Word Sense Disambiguation methodology, that exploits kernel methods to model sense distinctions. In particular a combination of kernel functions is adopted to estimate independently both syntagmatic and domain similarity. We defined a kernel function, namely the Domain Kernel, that allowed us to plug \"external knowledge\" into the supervised learning process. External knowledge is acquired from unlabeled data in a totally unsupervised way, and it is represented by means of Domain Models. We evaluated our methodology on several lexical sample tasks in different languages, outperforming significantly the state-of-the-art for each of them, while reducing the amount of labeled training data required for learning.", "keyphrases": ["kernel method", "train", "wsd", "matrix", "term-to-document matrix"]} +{"id": "nakov-ng-2011-translating", "title": "Translating from Morphologically Complex Languages: A Paraphrase-Based Approach", "abstract": "We propose a novel approach to translating from a morphologically complex language. Unlike previous research, which has targeted word inflections and concatenations, we focus on the pairwise relationship between morphologically related words, which we treat as potential paraphrases and handle using paraphrasing techniques at the word, phrase, and sentence level. An important advantage of this framework is that it can cope with derivational morphology, which has so far remained largely beyond the capabilities of statistical machine translation systems. Our experiments translating from Malay, whose morphology is mostly derivational, into English show significant improvements over rivaling approaches based on five automatic evaluation measures (for 320,000 sentence pairs; 9.5 million English word tokens).", "keyphrases": ["paraphrase", "morphological knowledge", "rich language"]} +{"id": "oncevay-etal-2020-bridging", "title": "Bridging Linguistic Typology and Multilingual Machine Translation with Multi-View Language Representations", "abstract": "Sparse language vectors from linguistic typology databases and learned embeddings from tasks like multilingual machine translation have been investigated in isolation, without analysing how they could benefit from each other's language characterisation. We propose to fuse both views using singular vector canonical correlation analysis and study what kind of information is induced from each source. By inferring typological features and language phylogenies, we observe that our representations embed typology and strengthen correlations with language relationships. We then take advantage of our multi-view language vector space for multilingual machine translation, where we achieve competitive overall translation accuracy in tasks that require information about language similarities, such as language clustering and ranking candidates for multilingual transfer. With our method, we can easily project and assess new languages without expensive retraining of massive multilingual or ranking models, which are major disadvantages of related approaches.", "keyphrases": ["multilingual machine translation", "typological database", "mnmt model"]} +{"id": "kamath-etal-2019-specializing", "title": "Specializing Distributional Vectors of All Words for Lexical Entailment", "abstract": "Semantic specialization methods fine-tune distributional word vectors using lexical knowledge from external resources (e.g. WordNet) to accentuate a particular relation between words. However, such post-processing methods suffer from limited coverage as they affect only vectors of words seen in the external resources. We present the first post-processing method that specializes vectors of all vocabulary words \u2013 including those unseen in the resources \u2013 for the asymmetric relation of lexical entailment (LE) (i.e., hyponymy-hypernymy relation). Leveraging a partially LE-specialized distributional space, our POSTLE (i.e., post-specialization for LE) model learns an explicit global specialization function, allowing for specialization of vectors of unseen words, as well as word vectors from other languages via cross-lingual transfer. We capture the function as a deep feed-forward neural network: its objective re-scales vector norms to reflect the concept hierarchy while simultaneously attracting hyponymy-hypernymy pairs to better reflect semantic similarity. An extended model variant augments the basic architecture with an adversarial discriminator. We demonstrate the usefulness and versatility of POSTLE models with different input distributional spaces in different scenarios (monolingual LE and zero-shot cross-lingual LE transfer) and tasks (binary and graded LE). We report consistent gains over state-of-the-art LE-specialization methods, and successfully LE-specialize word vectors for languages without any external lexical knowledge.", "keyphrases": ["lexical entailment", "global specialization function", "unseen word"]} +{"id": "huang-etal-2016-well", "title": "How well do Computers Solve Math Word Problems? Large-Scale Dataset Construction and Evaluation", "abstract": "Recently a few systems for automatically solving math word problems have reported promising results. However, the datasets used for evaluation have limitations in both scale and diversity. In this paper, we build a large-scale dataset which is more than 9 times the size of previous ones, and contains many more problem types. Problems in the dataset are semi-automatically obtained from community question-answering (CQA) web pages. A ranking SVM model is trained to automatically extract problem answers from the answer text provided by CQA users, which signi\ufb01cantly reduces human annotation cost. Experiments conducted on the new dataset lead to interesting and surprising results.", "keyphrases": ["math word problem", "large-scale dataset", "web page"]} +{"id": "dyer-etal-2010-cdec", "title": "cdec: A Decoder, Alignment, and Learning Framework for Finite-State and Context-Free Translation Models", "abstract": "We present cdec, an open source framework for decoding, aligning with, and training a number of statistical machine translation models, including word-based models, phrase-based models, and models based on synchronous context-free grammars. Using a single unified internal representation for translation forests, the decoder strictly separates model-specific translation logic from general rescoring, pruning, and inference algorithms. From this unified representation, the decoder can extract not only the 1or k-best translations, but also alignments to a reference, or the quantities necessary to drive discriminative training using gradient-based or gradient-free optimization techniques. Its efficient C++ implementation means that memory use and runtime performance are significantly better than comparable decoders.", "keyphrases": ["word alignment", "qe-clean system", "cdec"]} +{"id": "krug-etal-2015-rule", "title": "Rule-based Coreference Resolution in German Historic Novels", "abstract": "Coreference resolution (CR) is a key task in the automated analysis of characters in stories. Standard CR systems usually trained on newspaper texts have difficulties with literary texts, even with novels; a comparison with newspaper texts showed that average sentence length is greater in novels and the number of pronouns, as well as the percentage of direct speech is higher. We report promising evaluation results for a rule-based system similar to [Lee et al. 2011], but tailored to the domain which recognizes coreference chains in novels much better than CR systems like CorZu. Rule-based systems performed best on the CoNLL 2011 challenge [Pradhan et al. 2011]. Recent work in machine learning showed similar results as rule-based systems [Durett et al. 2013]. The latter has the advantage that its explanation component facilitates a fine grained error analysis for incremental refinement of the rules.", "keyphrases": ["pronoun", "rule-based coreference resolution", "literary character"]} +{"id": "elming-etal-2013-stream", "title": "Down-stream effects of tree-to-dependency conversions", "abstract": "Dependency analysis relies on morphosyntactic evidence, as well as semantic evidence. In some cases, however, morphosyntactic evidence seems to be in conflict with semantic evidence. For this reason dependency grammar theories, annotation guidelines and tree-to-dependency conversion schemes often differ in how they analyze various syntactic constructions. Most experiments for which constituent-based treebanks such as the Penn Treebank are converted into dependency treebanks rely blindly on one of four-five widely used tree-to-dependency conversion schemes. This paper evaluates the down-stream effect of choice of conversion scheme, showing that it has dramatic impact on end results.", "keyphrases": ["dependency analysis", "conversion scheme", "down-stream effect", "downstream task", "view"]} +{"id": "nozza-etal-2017-multi", "title": "A Multi-View Sentiment Corpus", "abstract": "Sentiment Analysis is a broad task that involves the analysis of various aspect of the natural language text. However, most of the approaches in the state of the art usually investigate independently each aspect, i.e. Subjectivity Classification, Sentiment Polarity Classification, Emotion Recognition, Irony Detection. In this paper we present a Multi-View Sentiment Corpus (MVSC), which comprises 3000 English microblog posts related the movie domain. Three independent annotators manually labelled MVSC, following a broad annotation schema about different aspects that can be grasped from natural language text coming from social networks. The contribution is therefore a corpus that comprises five different views for each message, i.e. subjective/objective, sentiment polarity, implicit/explicit, irony, emotion. In order to allow a more detailed investigation on the human labelling behaviour, we provide the annotations of each human annotator involved.", "keyphrases": ["multi-view sentiment corpus", "emotion", "different view", "implicit"]} +{"id": "bender-etal-2015-layers", "title": "Layers of Interpretation: On Grammar and Compositionality", "abstract": "With the recent resurgence of interest in semantic annotation of corpora for improved semantic parsing, we observe a tendency which we view as ill-advised, to conflate sentence meaning and speaker meaning into a single mapping, whether done by annotators or by a parser. We argue instead for the more traditional hypothesis that sentence meaning, but not speaker meaning, is compositional, and accordingly that NLP systems would benefit from reusable, automatically derivable, taskindependent semantic representations which target sentence meaning, in order to capture exactly the information in the linguistic signal itself. We further argue that compositional construction of such sentence meaning representations affords better consistency, more comprehensiveness, greater scalability, and less duplication of effort for each new NLP application. For concreteness, we describe one well-tested grammar-based method for producing sentence meaning representations which is efficient for annotators, and which exhibits many of the above benefits. We then report on a small inter-annotator agreement study to quantify the consistency of semantic representations produced via this grammar-based method.", "keyphrases": ["interpretation", "compositionality", "scalability", "duplication", "amr"]} +{"id": "hu-etal-2021-one", "title": "One-class Text Classification with Multi-modal Deep Support Vector Data Description", "abstract": "This work presents multi-modal deep SVDD (mSVDD) for one-class text classification. By extending the uni-modal SVDD to a multiple modal one, we build mSVDD with multiple hyperspheres, that enable us to build a much better description for target one-class data. Additionally, the end-to-end architecture of mSVDD can jointly handle neural feature learning and one-class text learning. We also introduce a mechanism for incorporating negative supervision in the absence of real negative data, which can be beneficial to the mSVDD model. We conduct experiments on Reuters and 20 Newsgroup datasets, and the experimental results demonstrate that mSVDD outperforms uni-modal SVDD and mSVDD can get further improvements when negative supervision is incorporated.", "keyphrases": ["multiple modal one", "one-class text classification", "well description"]} +{"id": "roark-hollingshead-2009-linear", "title": "Linear Complexity Context-Free Parsing Pipelines via Chart Constraints", "abstract": "In this paper, we extend methods from Roark and Hollingshead (2008) for reducing the worst-case complexity of a context-free parsing pipeline via hard constraints derived from finite-state tagging pre-processing. Methods from our previous paper achieved quadratic worst-case complexity. We prove here that alternate methods for choosing constraints can achieve either linear or O(Nlog2N) complexity. These worst-case bounds on processing are demonstrated to be achieved without reducing the parsing accuracy, in fact in some cases improving the accuracy. The new methods achieve observed performance comparable to the previously published quadratic complexity method. Finally, we demonstrate improved performance by combining complexity bounding methods with additional high precision constraints.", "keyphrases": ["complexity", "chart constraint", "worst-case"]} +{"id": "chen-etal-2020-exclusive", "title": "Exclusive Hierarchical Decoding for Deep Keyphrase Generation", "abstract": "Keyphrase generation (KG) aims to summarize the main ideas of a document into a set of keyphrases. A new setting is recently introduced into this problem, in which, given a document, the model needs to predict a set of keyphrases and simultaneously determine the appropriate number of keyphrases to produce. Previous work in this setting employs a sequential decoding process to generate keyphrases. However, such a decoding method ignores the intrinsic hierarchical compositionality existing in the keyphrase set of a document. Moreover, previous work tends to generate duplicated keyphrases, which wastes time and computing resources. To overcome these limitations, we propose an exclusive hierarchical decoding framework that includes a hierarchical decoding process and either a soft or a hard exclusion mechanism. The hierarchical decoding process is to explicitly model the hierarchical compositionality of a keyphrase set. Both the soft and the hard exclusion mechanisms keep track of previously-predicted keyphrases within a window size to enhance the diversity of the generated keyphrases. Extensive experiments on multiple KG benchmark datasets demonstrate the effectiveness of our method to generate less duplicated and more accurate keyphrases.", "keyphrases": ["hierarchical decoding", "keyphrase set", "many work"]} +{"id": "wang-etal-2021-selective", "title": "Selective Knowledge Distillation for Neural Machine Translation", "abstract": "Neural Machine Translation (NMT) models achieve state-of-the-art performance on many translation benchmarks. As an active research field in NMT, knowledge distillation is widely applied to enhance the model's performance by transferring teacher model's knowledge on each training sample. However, previous work rarely discusses the different impacts and connections among these samples, which serve as the medium for transferring teacher knowledge. In this paper, we design a novel protocol that can effectively analyze the different impacts of samples by comparing various samples' partitions. Based on above protocol, we conduct extensive experiments and find that the teacher's knowledge is not the more, the better. Knowledge over specific samples may even hurt the whole performance of knowledge distillation. Finally, to address these issues, we propose two simple yet effective strategies, i.e., batch-level and global-level selections, to pick suitable samples for distillation. We evaluate our approaches on two large-scale machine translation tasks, WMT'14 English-German and WMT'19 Chinese-English. Experimental results show that our approaches yield up to +1.28 and +0.89 BLEU points improvements over the Transformer baseline, respectively.", "keyphrases": ["knowledge distillation", "neural machine translation", "sample"]} +{"id": "xie-etal-2020-exploring", "title": "Exploring Question-Specific Rewards for Generating Deep Questions", "abstract": "Recent question generation (QG) approaches often utilize the sequence-to-sequence framework (Seq2Seq) to optimize the log likelihood of ground-truth questions using teacher forcing. However, this training objective is inconsistent with actual question quality, which is often reflected by certain global properties such as whether the question can be answered by the document. As such, we directly optimize for QG-specific objectives via reinforcement learning to improve question quality. We design three different rewards that target to improve the fluency, relevance, and answerability of generated questions. We conduct both automatic and human evaluations in addition to thorough analysis to explore the effect of each QG-specific reward. We find that optimizing on question-specific rewards generally leads to better performance in automatic evaluation metrics. However, only the rewards that correlate well with human judgement (e.g., relevance) lead to real improvement in question quality. Optimizing for the others, especially answerability, introduces incorrect bias to the model, resulting in poorer question quality. The code is publicly available at .", "keyphrases": ["question-specific reward", "fluency", "relevance"]} +{"id": "song-etal-2020-summarizing", "title": "Summarizing Medical Conversations via Identifying Important Utterances", "abstract": "Summarization is an important natural language processing (NLP) task in identifying key information from text. For conversations, the summarization systems need to extract salient contents from spontaneous utterances by multiple speakers. In a special task-oriented scenario, namely medical conversations between patients and doctors, the symptoms, diagnoses, and treatments could be highly important because the nature of such conversation is to find a medical solution to the problem proposed by the patients. Especially consider that current online medical platforms provide millions of public available conversations between real patients and doctors, where the patients propose their medical problems and the registered doctors offer diagnosis and treatment, a conversation in most cases could be too long and the key information is hard to be located. Therefore, summarizations to the patients' problems and the doctors' treatments in the conversations can be highly useful, in terms of helping other patients with similar problems have a precise reference for potential medical solutions. In this paper, we focus on medical conversation summarization, using a dataset of medical conversations and corresponding summaries which were crawled from a well-known online healthcare service provider in China. We propose a hierarchical encoder-tagger model (HET) to generate summaries by identifying important utterances (with respect to problem proposing and solving) in the conversations. For the particular dataset used in this study, we show that high-quality summaries can be generated by extracting two types of utterances, namely, problem statements and treatment recommendations. Experimental results demonstrate that HET outperforms strong baselines and models from previous studies, and adding conversation-related features can further improve system performance.", "keyphrases": ["important utterance", "doctor", "dialogue summarization dataset"]} +{"id": "khademi-2020-multimodal", "title": "Multimodal Neural Graph Memory Networks for Visual Question Answering", "abstract": "We introduce a new neural network architecture, Multimodal Neural Graph Memory Networks (MN-GMN), for visual question answering. The MN-GMN uses graph structure with different region features as node attributes and applies a recently proposed powerful graph neural network model, Graph Network (GN), to reason about objects and their interactions in an image. The input module of the MN-GMN generates a set of visual features plus a set of encoded region-grounded captions (RGCs) for the image. The RGCs capture object attributes and their relationships. Two GNs are constructed from the input module using the visual features and encoded RGCs. Each node of the GNs iteratively computes a question-guided contextualized representation of the visual/textual information assigned to it. Then, to combine the information from both GNs, the nodes write the updated representations to an external spatial memory. The final states of the memory cells are fed into an answer module to predict an answer. Experiments show MN-GMN rivals the state-of-the-art models on Visual7W, VQA-v2.0, and CLEVR datasets.", "keyphrases": ["visual question answering", "neural network architecture", "caption"]} +{"id": "jarrar-etal-2014-building", "title": "Building a Corpus for Palestinian Arabic: a Preliminary Study", "abstract": "This paper presents preliminary results in building an annotated corpus of the Palestinian Arabic dialect. The corpus consists of about 43K words, stemming from diverse resources. The paper discusses some linguistic facts about the Palestinian dialect, compared with the Modern Standard Arabic, especially in terms of morphological, orthographic, and lexical variations, and suggests some directions to resolve the challenges these differences pose to the annotation goal. Furthermore, we present two pilot studies that investigate whether existing tools for processing Modern Standard Arabic and Egyptian Arabic can be used to speed up the annotation process of our Palestinian Arabic corpus.", "keyphrases": ["palestinian arabic", "dialectal arabic", "curras"]} +{"id": "veale-2016-round", "title": "Round Up The Usual Suspects: Knowledge-Based Metaphor Generation", "abstract": "The elasticity of metaphor as a communication strategy has spurred philosophers to question its ability to mean anything at all. If a metaphor can elicit different responses from different people in varying contexts, how can one say it has a single meaning? Davidson has argued that metaphors have no special or secondary meaning, and must thus mean exactly what they seem to mean on the surface. It is this literally anomalous meaning that directs us to the pragmatic inferences that a speaker actually wishes us to explore. Conveniently, this laissez faire strategy assumes that metaphors are crafted from apt knowledge by speakers with real communicative intent, allowing useful inference to be extracted from their words. But this assumption is not valid in the case of many machine-generated metaphors that merely echo the linguistic form \u2013 but lack the actual substance \u2013 of real metaphors. We present here an open public resource with which a metaphor-generation system can give its figurative efforts real meaning. 1 The Dreamwork of Language Metaphor is the rubber cement of language. Not only does it help us to the plug holes on our lexica, we also use it to fill the gaps in our understanding and to hide the cracks in our arguments. For unlike the brittle plaster of literal language, metaphors are elastic and can readily expand to fit our meanings in a shifting conversational context. This elasticity comes at a price, though one which a master orator is happy to pay: our metaphors are elastic because they are indeterminate, underspecified and vague. Like dreams, our metaphors paint vivid pictures with words, albeit with fuzzy and ill-defined edges. Like dreams, metaphors can be highly suggestive, yet leave us feeling confused and uncertain. If metaphorical images are crisp at their focal points but hazy and dreamlike at their edges, just what is the meaning of any metaphor? The philosopher Donald Davidson (1978) has controversially argued that, like our dreams, our metaphors do not have well-defined meanings, at least not of a kind that an AI researcher, semanticist or computational linguist could squeeze into a symbolic structure. Rather, metaphors can move us to think and feel in certain ways, and perhaps act in certain ways, but like dreams, two analysts (a Jungian and a Freudian, say) can hold conflicting views as to how they should be interpreted and as to what they actually \u201cmean\u201d, if anything. So, for Davidson, a metaphor means just what it purports to mean on the surface, that is, what the literal or dictionary senses of its words suggest that it means. This meaning is to be distinguished from the panoply of inferences and insights that might later emerge from a metaphor, for regardless of how salient these may seem, they cannot be considered its definitive meaning. Freud once joked that when it comes to dreams, a cigar is often just a cigar. For Davidson, a figurative cigar is always a cigar, even if the metaphor spurs us to further inference far beyond the realm of tobacco. If all metaphors mean simply what they seem to mean on the surface, and most \u2013 from the very best to the truly awful \u2013 are superficially anomalous, how can we tell the good from the bad by simply looking? Indeed, how can we tell real metaphors from fake metaphors based only on the words they use and their senses in the dictionary? Empirical results seem to bear out Davidson\u2019s intuitions regarding our folk grasp of metaphors. Veale (2015)", "keyphrases": ["knowledge-base", "metaphor generation", "bruce wayne"]} +{"id": "qian-etal-2017-syntax", "title": "Syntax Aware LSTM model for Semantic Role Labeling", "abstract": "In Semantic Role Labeling (SRL) task, the tree structured dependency relation is rich in syntax information, but it is not well handled by existing models. In this paper, we propose Syntax Aware Long Short Time Memory (SA-LSTM). The structure of SA-LSTM changes according to dependency structure of each sentence, so that SA-LSTM can model the whole tree structure of dependency relation in an architecture engineering way. Experiments demonstrate that on Chinese Proposition Bank (CPB) 1.0, SA-LSTM improves F1 by 2.06% than ordinary bi-LSTM with feature engineered dependency relation information, and gives state-of-the-art F1 of 79.92%. On English CoNLL 2005 dataset, SA-LSTM brings improvement (2.1%) to bi-LSTM model and also brings slight improvement (0.3%) when added to the state-of-the-art model.", "keyphrases": ["semantic role labeling", "srl", "tree structure"]} +{"id": "hall-2007-k", "title": "K-best Spanning Tree Parsing", "abstract": "This paper introduces a Maximum Entropy dependency parser based on an efficient kbest Maximum Spanning Tree (MST) algorithm. Although recent work suggests that the edge-factored constraints of the MST algorithm significantly inhibit parsing accuracy, we show that generating the 50-best parses according to an edge-factored model has an oracle performance well above the 1-best performance of the best dependency parsers. This motivates our parsing approach, which is based on reranking the kbest parses generated by an edge-factored model. Oracle parse accuracy results are presented for the edge-factored model and 1-best results for the reranker on eight languages (seven from CoNLL-X and English).", "keyphrases": ["parse", "maximum spanning tree", "edge-factored model", "oracle performance"]} +{"id": "hahn-2020-theoretical", "title": "Theoretical Limitations of Self-Attention in Neural Sequence Models", "abstract": "Transformers are emerging as the new workhorse of NLP, showing great success across tasks. Unlike LSTMs, transformers process input sequences entirely through self-attention. Previous work has suggested that the computational capabilities of self-attention to process hierarchical structures are limited. In this work, we mathematically investigate the computational power of self-attention to model formal languages. Across both soft and hard attention, we show strong theoretical limitations of the computational abilities of self-attention, finding that it cannot model periodic finite-state languages, nor hierarchical structure, unless the number of layers or heads increases with input length. These limitations seem surprising given the practical success of self-attention and the prominent role assigned to hierarchical structure in linguistics, suggesting that natural language can be approximated well with models that are too weak for the formal languages typically assumed in theoretical linguistics.", "keyphrases": ["limitation", "self-attention", "input length"]} +{"id": "skachkova-etal-2018-closing", "title": "Closing Brackets with Recurrent Neural Networks", "abstract": "Many natural and formal languages contain words or symbols that require a matching counterpart for making an expression well-formed. The combination of opening and closing brackets is a typical example of such a construction. Due to their commonness, the ability to follow such rules is important for language modeling. Currently, recurrent neural networks (RNNs) are extensively used for this task. We investigate whether they are capable of learning the rules of opening and closing brackets by applying them to synthetic Dyck languages that consist of different types of brackets. We provide an analysis of the statistical properties of these languages as a baseline and show strengths and limits of Elman-RNNs, GRUs and LSTMs in experiments on random samples of these languages. In terms of perplexity and prediction accuracy, the RNNs get close to the theoretical baseline in most cases.", "keyphrases": ["capability", "dyck language", "string"]} +{"id": "deschacht-moens-2009-semi", "title": "Semi-supervised Semantic Role Labeling Using the Latent Words Language Model", "abstract": "Semantic Role Labeling (SRL) has proved to be a valuable tool for performing automatic analysis of natural language texts. Currently however, most systems rely on a large training set, which is manually annotated, an effort that needs to be repeated whenever different languages or a different set of semantic roles is used in a certain application. A possible solution for this problem is semi-supervised learning, where a small set of training examples is automatically expanded using unlabeled texts. We present the Latent Words Language Model, which is a language model that learns word similarities from unlabeled texts. We use these similarities for different semi-supervised SRL methods as additional features or to automatically expand a small training set. We evaluate the methods on the PropBank dataset and find that for small training sizes our best performing system achieves an error reduction of 33.27% F1-measure compared to a state-of-the-art supervised baseline.", "keyphrases": ["word similarity", "hidden markov model", "unlabeled data"]} +{"id": "jimeno-yepes-etal-2017-findings", "title": "Findings of the WMT 2017 Biomedical Translation Shared Task", "abstract": "Automatic translation of documents is an important task in many domains, including the biological and clinical do-mains. The second edition of the Biomedical Translation task in the Conference of Machine Translation focused on the automatic translation of biomedical-related documents between English and various European languages. This year, we addressed ten languages: Czech, German, English, French, Hungarian, Polish, Portuguese, Spanish, Romanian and Swedish. Test data included both scienti\ufb01c publications (from the Scielo and EDP Sciences databases) and health-related news (from the Cochrane and UK National Health Service web sites). Seven teams participated in the task, submitting a total of 82 runs. Herein we describe the datasets, participating systems and results of both the automatic and manual evaluation of the translations. We identify two main use cases of machine translation (MT) in the", "keyphrases": ["wmt", "edition", "biomedical translation task"]} +{"id": "herbig-etal-2020-mmpe", "title": "MMPE: A Multi-Modal Interface for Post-Editing Machine Translation", "abstract": "Current advances in machine translation (MT) increase the need for translators to switch from traditional translation to post-editing (PE) of machine-translated text, a process that saves time and reduces errors. This affects the design of translation interfaces, as the task changes from mainly generating text to correcting errors within otherwise helpful translation proposals. Since this paradigm shift offers potential for modalities other than mouse and keyboard, we present MMPE, the first prototype to combine traditional input modes with pen, touch, and speech modalities for PE of MT. The results of an evaluation with professional translators suggest that pen and touch interaction are suitable for deletion and reordering tasks, while they are of limited use for longer insertions. On the other hand, speech and multi-modal combinations of select & speech are considered suitable for replacements and insertions but offer less potential for deletion and reordering. Overall, participants were enthusiastic about the new modalities and saw them as good extensions to mouse & keyboard, but not as a complete substitute.", "keyphrases": ["machine translation", "professional translator", "multi-modal combination"]} +{"id": "li-etal-2018-named", "title": "Named-Entity Tagging and Domain adaptation for Better Customized Translation", "abstract": "Customized translation need pay spe-cial attention to the target domain ter-minology especially the named-entities for the domain. Adding linguistic features to neural machine translation (NMT) has been shown to benefit translation in many studies. In this paper, we further demonstrate that adding named-entity (NE) feature with named-entity recognition (NER) into the source language produces better translation with NMT. Our experiments show that by just including the different NE classes and boundary tags, we can increase the BLEU score by around 1 to 2 points using the standard test sets from WMT2017. We also show that adding NE tags using NER and applying in-domain adaptation can be combined to further improve customized machine translation.", "keyphrases": ["domain adaptation", "source language", "entity feature"]} +{"id": "och-etal-2003-efficient", "title": "Efficient Search for Interactive Statistical Machine Translation", "abstract": "The goal of interactive machine translation is to improve the productivity of human translators. An interactive machine translation system operates as follows: the automatic system proposes a translation. Now, the human user has two options: to accept the suggestion or to correct it. During the post-editing process, the human user is assisted by the interactive system in the following way: the system suggests an extension of the current translation prefix. Then, the user either accepts this extension (completely or partially) or ignores it. The two most important factors of such an interactive system are the quality of the proposed extensions and the response time. Here, we will use a fully fledged translation system to ensure the quality of the proposed extensions. To achieve fast response times, we will use word hypotheses graphs as an efficient search space representation. We will show results of our approach on the Verbmobil task and on the Canadian Hansards task.", "keyphrases": ["translator", "interactive system", "alignment template"]} +{"id": "dickinson-ledbetter-2012-annotating", "title": "Annotating Errors in a Hungarian Learner Corpus", "abstract": "We are developing and annotating a learner corpus of Hungarian, composed of student journals from three different proficiency levels written at Indiana University. Our annotation marks learner errors that are of different linguistic categories, including phonology, morphology, and syntax, but defining the annotation for an agglutinative language presents several issues. First, we must adapt an analysis that is centered on the morpheme rather than the word. Second, and more importantly, we see a need to distinguish errors from secondary corrections. We argue that although certain learner errors require a series of corrections to reach a target form, these secondary corrections, conditioned on those that come before, are our own adjustments that link the learner's productions to the target form and are not representative of the learner's internal grammar. In this paper, we report the annotation scheme and the principles that guide it, as well as examples illustrating its functionality and directions for expansion.", "keyphrases": ["hungarian learner corpus", "different linguistic category", "student essay"]} +{"id": "mirroshandel-nasr-2016-integrating", "title": "Integrating Selectional Constraints and Subcategorization Frames in a Dependency Parser", "abstract": "Statistical parsers are trained on treebanks that are composed of a few thousand sentences. In order to prevent data sparseness and computational complexity, such parsers make strong independence hypotheses on the decisions that are made to build a syntactic tree. These independence hypotheses yield a decomposition of the syntactic structures into small pieces, which in turn prevent the parser from adequately modeling many lexico-syntactic phenomena like selectional constraints and subcategorization frames. Additionally, treebanks are several orders of magnitude too small to observe many lexico-syntactic regularities, such as selectional constraints and subcategorization frames. In this article, we propose a solution to both problems: how to account for patterns that exceed the size of the pieces that are modeled in the parser and how to obtain subcategorization frames and selectional constraints from raw corpora and incorporate them in the parsing process. The method proposed was evaluated on French and on English. The experiments on French showed a decrease of 41.6% of selectional constraint violations and a decrease of 22% of erroneous subcategorization frame assignment. These figures are lower for English: 16.21% in the first case and 8.83% in the second.", "keyphrases": ["selectional constraint", "subcategorization frame", "dependency parser"]} +{"id": "park-etal-2022-consistency", "title": "Consistency Training with Virtual Adversarial Discrete Perturbation", "abstract": "Consistency training regularizes a model by enforcing predictions of original and perturbed inputs to be similar. Previous studies have proposed various augmentation methods for the perturbation but are limited in that they are agnostic to the training model. Thus, the perturbed samples may not aid in regularization due to their ease of classification from the model. In this context, we propose an augmentation method of adding a discrete noise that would incur the highest divergence between predictions. This virtual adversarial discrete noise obtained by replacing a small portion of tokens while keeping original semantics as much as possible efficiently pushes a training model's decision boundary. Experimental results show that our proposed method outperforms other consistency training baselines with text editing, paraphrasing, or a continuous noise on semi-supervised text classification tasks and a robustness benchmark.", "keyphrases": ["sample", "consistency training", "training method"]} +{"id": "andrade-etal-2011-learning", "title": "Learning the Optimal Use of Dependency-parsing Information for Finding Translations with Comparable Corpora", "abstract": "Using comparable corpora to find new word translations is a promising approach for extending bilingual dictionaries (semi-) automatically. The basic idea is based on the assumption that similar words have similar contexts across languages. The context of a word is often summarized by using the bag-of-words in the sentence, or by using the words which are in a certain dependency position, e.g. the predecessors and successors. These different context positions are then combined into one context vector and compared across languages. However, previous research makes the (implicit) assumption that these different context positions should be weighted as equally important. Furthermore, only the same context positions are compared with each other, for example the successor position in Spanish is compared with the successor position in English. However, this is not necessarily always appropriate for languages like Japanese and English. To overcome these limitations, we suggest to perform a linear transformation of the context vectors, which is defined by a matrix. We define the optimal transformation matrix by using a Bayesian probabilistic model, and show that it is feasible to find an approximate solution using Markov chain Monte Carlo methods. Our experiments demonstrate that our proposed method constantly improves translation accuracy.", "keyphrases": ["comparable corpora", "new word translation", "context vector"]} +{"id": "bella-etal-2022-language", "title": "Language Diversity: Visible to Humans, Exploitable by Machines", "abstract": "The Universal Knowledge Core (UKC) is a large multilingual lexical database with a focus on language diversity and covering over two thousand languages. The aim of the database, as well as its tools and data catalogue, is to make the abstract notion of linguistic diversity visually understandable for humans and formally exploitable by machines. The UKC website lets users explore millions of individual words and their meanings, but also phenomena of cross-lingual convergence and divergence, such as shared interlingual meanings, lexicon similarities, cognate clusters, or lexical gaps. The UKC LiveLanguage Catalogue, in turn, provides access to the underlying lexical data in a computer-processable form, ready to be reused in cross-lingual applications.", "keyphrases": ["universal knowledge core", "ukc", "language diversity"]} +{"id": "fung-etal-2016-zara-supergirl", "title": "Zara The Supergirl: An Empathetic Personality Recognition System", "abstract": "Zara the Supergirl is an interactive system that, while having a conversation with a user, uses its built in sentiment analysis, emotion recognition, facial and speech recognition modules, to exhibit the human-like response of sharing emotions. In addition, at the end of a 5-10 minute conversation with the user, it can give a comprehensive personality analysis based on the user\u2019s interaction with Zara. This is a first prototype that has incorporated a full empathy module, the recognition and response of human emotions, into a spoken language interactive system that enhances human-robot understanding. Zara was shown at the World Economic Forum in Dalian in September 2015.", "keyphrases": ["supergirl", "empathy", "zara", "agent"]} +{"id": "adrian-bejan-harabagiu-2014-unsupervised", "title": "Unsupervised Event Coreference Resolution", "abstract": "The task of event coreference resolution plays a critical role in many natural language processing applications such as information extraction, question answering, and topic detection and tracking. In this article, we describe a new class of unsupervised, nonparametric Bayesian models with the purpose of probabilistically inferring coreference clusters of event mentions from a collection of unlabeled documents. In order to infer these clusters, we automatically extract various lexical, syntactic, and semantic features for each event mention from the document collection. Extracting a rich set of features for each event mention allows us to cast event coreference resolution as the task of grouping together the mentions that share the same features (they have the same participating entities, share the same location, happen at the same time, etc.).Some of the most important challenges posed by the resolution of event coreference in an unsupervised way stem from (a) the choice of representing event mentions through a rich set of features and (b) the ability of modeling events described both within the same document and across multiple documents. Our first unsupervised model that addresses these challenges is a generalization of the hierarchical Dirichlet process. This new extension presents the hierarchical Dirichlet process's ability to capture the uncertainty regarding the number of clustering components and, additionally, takes into account any finite number of features associated with each event mention. Furthermore, to overcome some of the limitations of this extension, we devised a new hybrid model, which combines an infinite latent class model with a discrete time series model. The main advantage of this hybrid model stands in its capability to automatically infer the number of features associated with each event mention from data and, at the same time, to perform an automatic selection of the most informative features for the task of event coreference. The evaluation performed for solving both within- and cross-document event coreference shows significant improvements of these models when compared against two baselines for this task.", "keyphrases": ["event coreference", "nonparametric bayesian model", "rich set", "multiple document"]} +{"id": "lucy-gauthier-2017-distributional", "title": "Are Distributional Representations Ready for the Real World? Evaluating Word Vectors for Grounded Perceptual Meaning", "abstract": "Distributional word representation methods exploit word co-occurrences to build compact vector encodings of words. While these representations enjoy widespread use in modern natural language processing, it is unclear whether they accurately encode all necessary facets of conceptual meaning. In this paper, we evaluate how well these representations can predict perceptual and conceptual features of concrete concepts, drawing on two semantic norm datasets sourced from human participants. We find that several standard word representations fail to encode many salient perceptual features of concepts, and show that these deficits correlate with word-word similarity prediction errors. Our analyses provide motivation for grounded and embodied language learning approaches, which may help to remedy these deficits.", "keyphrases": ["conceptual feature", "semantic norm dataset", "text corpora"]} +{"id": "hahn-powell-etal-2017-swanson", "title": "Swanson linking revisited: Accelerating literature-based discovery across domains using a conceptual influence graph", "abstract": "We introduce a modular approach for literature-based discovery consisting of a machine reading and knowledge assembly component that together produce a graph of influence relations (e.g., \u201cA promotes B\u201d) from a collection of publications. A search engine is used to explore direct and indirect influence chains. Query results are substantiated with textual evidence, ranked according to their relevance, and presented in both a table-based view, as well as a network graph visualization. Our approach operates in both domain-specific settings, where there are knowledge bases and ontologies available to guide reading, and in multi-domain settings where such resources are absent. We demonstrate that this deep reading and search system reduces the effort needed to uncover \u201cundiscovered public knowledge\u201d, and that with the aid of this tool a domain expert was able to drastically reduce her model building time from months to two days.", "keyphrases": ["literature-based discovery", "influence relation", "publication"]} +{"id": "zhang-etal-2014-triple", "title": "Triple based Background Knowledge Ranking for Document Enrichment", "abstract": "Document enrichment is the task of retrieving additional knowledge from external resource over what is available through source document. This task is essential because of the phenomenon that text is generally replete with gaps and ellipses since authors assume a certain amount of background knowledge. The recovery of these gaps is intuitively useful for better understanding of document. Conventional document enrichment techniques usually rely on Wikipedia which has great coverage but less accuracy, or Ontology which has great accuracy but less coverage. In this study, we propose a document enrichment framework which automatically extracts \u201cargument1,predicate,argument2\u201d triple from any text corpus as background knowledge, so that to ensure the compatibility with any resource (e.g. news text, ontology, and on-line encyclopedia) and improve the enriching accuracy. We first incorporate source document and background knowledge together into a triple based document-level graph and then propose a global iterative ranking model to propagate relevance score and select the most relevant knowledge triple. We evaluate our model as a ranking problem and compute the MAP and P&N score to validate the ranking result. Our final result, a MAP score of 0.676 and P&20 score of 0.417 outperform a strong baseline based on search engine by 0.182 inMAP and 0.04 inP&20.", "keyphrases": ["background knowledge", "document enrichment", "search engine"]} +{"id": "rottmann-vogel-2007-word", "title": "Word reordering in statistical machine translation with a POS-based distortion model", "abstract": "In this paper we describe a word reordering strategy for statistical machine translation that reorders the source side based on Part of Speech (POS) information. Reordering rules are learned from the word aligned corpus. Reordering is integrated into the decoding process by constructing a lattice, which contains all word reorderings according to the reordering rules. Probabilities are assigned to the different reorderings. On this lattice monotone decoding is performed. This reordering strategy is compared with our previous reordering strategy, which looks at all permutations within a sliding window. We extend reordering rules by adding context information. Phrase translation pairs are learned from the original corpus and from a reordered source corpus to better capture the reordered word sequences at decoding time. Results are presented for English \u2192 Spanish and German \u2194 English translations, using the European Parliament Plenary Sessions corpus.", "keyphrases": ["statistical machine translation", "source side", "pos"]} +{"id": "lopez-resnik-2006-word", "title": "Word-Based Alignment, Phrase-Based Translation: What's the Link?", "abstract": "State-of-the-art statistical machine translation is based on alignments between phrases \u2013 sequences of words in the source and target sentences. The learning step in these systems often relies on alignments between words. It is often assumed that the quality of this word alignment is critical for translation. However, recent results suggest that the relationship between alignment quality and translation quality is weaker than previously thought. We investigate this question directly, comparing the impact of high-quality alignments with a carefully constructed set of degraded alignments. In order to tease apart various interactions, we report experiments investigating the impact of alignments on different aspects of the system. Our results confirm a weak correlation, but they also illustrate that more data and better feature engineering may be more beneficial than better alignment.", "keyphrases": ["translation quality", "feature engineering", "alignment performance", "aer", "large gain"]} +{"id": "toshniwal-etal-2020-learning", "title": "Learning to Ignore: Long Document Coreference with Bounded Memory Neural Networks", "abstract": "Long document coreference resolution remains a challenging task due to the large memory and runtime requirements of current models. Recent work doing incremental coreference resolution using just the global representation of entities shows practical benefits but requires keeping all entities in memory, which can be impractical for long documents. We argue that keeping all entities in memory is unnecessary, and we propose a memory-augmented neural network that tracks only a small bounded number of entities at a time, thus guaranteeing a linear runtime in length of document. We show that (a) the model remains competitive with models with high memory and computational requirements on OntoNotes and LitBank, and (b) the model learns an efficient memory management strategy easily outperforming a rule-based strategy", "keyphrases": ["memory", "coreference resolution", "mention"]} +{"id": "salloum-habash-2012-elissa", "title": "Elissa: A Dialectal to Standard Arabic Machine Translation System", "abstract": "Modern Standard Arabic (MSA) has a wealth of natural language processing (NLP) tools and resources. In comparison, resources for dialectal Arabic (DA), the unstandardized spoken varieties of Arabic, are still lacking. We present Elissa , a machine translation (MT) system from DA to MSA. Elissa (version 1.0) employs a rule-based approach that relies on morphological analysis, morphological transfer rules and dictionaries in addition to language models to produce MSA paraphrases of dialectal sentences. Elissa can be employed as a general preprocessor for dialectal Arabic when using MSA NLP tools. u j\u0092 \u00aeE @ e \u000fJ K. QaE@ u I@ e J K. QaE@ H A \u000fJ OA aE @ aO e J E B@ e Og. Q E E u G. n\u0083A g \u00d0A \u00a2 \u001d : A \u0082 E @ eE KA U O XP@ nO\u00f0 H@ \u00f0X @ Q n J K B A O J K. A \u000fJ K. n\u0083A g u j\u0092 \u00aeE @ e \u000fJ K. QaE@ e a EE @ e m.I'A aOI eQ J\u00bb XP@ n O\u00f0 H@ \u00f0 X @ Yg. n K @ Ye A J Jm '. u \u00d0Y \u00ae J\u0083 . e J K. QaE@ e a EE @ aO e \u000fJ \u0083A J \u00aeE @ Q \u00ab e \u000fJ ojOI @ q\u0082 E @ u e\u00f0 , e J K. QaE@ H A \u000fJ OA aE @ em.I'A a UI A \u0082 E @ YO Ja K . u j\u0092 \u00aeE @ e \u000fJ K. QaE@ u I@ e \u000fJ K. QaE@ H A \u000fJ OA aE @ aO e \u000fJ E B@ e Og. Q E AK. \u00d0n \u00aeK u G. n\u0083A g \u00d0A \u00a2 \u001d u e\u00f0 , A \u0082 E @ Nk. A aO\u00f0 e Og. Q E @ Y \u00ab@ n \u0010 aO e \u00abnOm.\u00d7\u00f0 e OE3\u20444E E u Q \u0092E@ EJ Ej JE @ eJ \u00d0Y j J \u0082 , Y \u00ab@ n \u00aeE @ u I\u00ab A \u000fJ J.O C k u j\u0092 \u00aeE @ e E Om.I'@ P A J J kB e \u000fK n a E h. X A U s u I@ e A \u0093@ , e \u000fJ OA aE @ H A OE3\u20444E E H A Og. Q K\u00f0 H A X @ QO Z A \u0082 B e \u000fJ OA \u00ab \u00d0 @ Y j J\u0083@ EJ. \u0010 e \u000fJ K. QaE@ H A \u000fJ OA aE @ em.I'A a UI A \u0082 E @ \u00d0@ Y j J\u0083@ aoOs . e JoO UI @ E O m.I'@ \u00a9 J Og. a K. e\u0010C \u00a3 E \u0092 B@ . A iD E\u00ab u j\u0092 \u00aeE @ e \u000fJ K. QaE@ e a EE e Y a O H @ \u00f0 X @", "keyphrases": ["dialectal", "arabic", "english machine translation"]} +{"id": "specia-etal-2012-semeval", "title": "SemEval-2012 Task 1: English Lexical Simplification", "abstract": "We describe the English Lexical Simplification task at SemEval-2012. This is the first time such a shared task has been organized and its goal is to provide a framework for the evaluation of systems for lexical simplification and foster research on context-aware lexical simplification approaches. The task requires that annotators and systems rank a number of alternative substitutes -- all deemed adequate -- for a target word in context, according to how \"simple\" these substitutes are. The notion of simplicity is biased towards non-native speakers of English. Out of nine participating systems, the best scoring ones combine context-dependent and context-independent information, with the strongest individual contribution given by the frequency of the substitute regardless of its context.", "keyphrases": ["english lexical simplification", "non-native speaker", "semeval task", "difficulty"]} +{"id": "liu-etal-2009-automated", "title": "Automated Suggestions for Miscollocations", "abstract": "One of the most common and persistent error types in second language writing is collocation errors, such as learn knowledge instead of gain or acquire knowledge, or make damage rather than cause damage. In this work-in-progress report, we propose a probabilistic model for suggesting corrections to lexical collocation errors. The probabilistic model incorporates three features: word association strength (MI), semantic similarity (via Word-Net) and the notion of shared collocations (or intercollocability). The results suggest that the combination of all three features outperforms any single feature or any combination of two features.", "keyphrases": ["miscollocation", "wordnet", "learner"]} +{"id": "jiang-etal-2020-multi-domain", "title": "Multi-Domain Neural Machine Translation with Word-Level Adaptive Layer-wise Domain Mixing", "abstract": "Many multi-domain neural machine translation (NMT) models achieve knowledge transfer by enforcing one encoder to learn shared embedding across domains. However, this design lacks adaptation to individual domains. To overcome this limitation, we propose a novel multi-domain NMT model using individual modules for each domain, on which we apply word-level, adaptive and layer-wise domain mixing. We first observe that words in a sentence are often related to multiple domains. Hence, we assume each word has a domain proportion, which indicates its domain preference. Then word representations are obtained by mixing their embedding in individual domains based on their domain proportions. We show this can be achieved by carefully designing multi-head dot-product attention modules for different domains, and eventually taking weighted averages of their parameters by word-level layer-wise domain proportions. Through this, we can achieve effective domain knowledge sharing and capture fine-grained domain-specific knowledge as well. Our experiments show that our proposed model outperforms existing ones in several NMT tasks.", "keyphrases": ["neural machine translation", "domain mixing", "head"]} +{"id": "rimell-clark-2008-adapting", "title": "Adapting a Lexicalized-Grammar Parser to Contrasting Domains", "abstract": "Most state-of-the-art wide-coverage parsers are trained on newspaper text and suffer a loss of accuracy in other domains, making parser adaptation a pressing issue. In this paper we demonstrate that a CCG parser can be adapted to two new domains, biomedical text and questions for a QA system, by using manually-annotated training data at the pos and lexical category levels only. This approach achieves parser accuracy comparable to that on newspaper data without the need for annotated parse trees in the new domain. We find that retraining at the lexical category level yields a larger performance increase for questions than for biomedical text and analyze the two datasets to investigate why different domains might behave differently for parser adaptation.", "keyphrases": ["ccg parser", "biomedical text", "lexical category"]} +{"id": "chen-etal-2016-guided", "title": "Guided Alignment Training for Topic-Aware Neural Machine Translation", "abstract": "In this paper, we propose an effective way for biasing the attention mechanism of a sequence-to-sequence neural machine translation (NMT) model towards the well-studied statistical word alignment models. We show that our novel guided alignment training approach improves translation quality on real-life e-commerce texts consisting of product titles and descriptions, overcoming the problems posed by many unknown words and a large type/token ratio. We also show that meta-data associated with input texts such as topic or category information can significantly improve translation quality when used as an additional signal to the decoder part of the network. With both novel features, the BLEU score of the NMT system on a product title set improves from 18.6 to 21.3%. Even larger MT quality gains are obtained through domain adaptation of a general domain NMT system to e-commerce data. The developed NMT system also performs well on the IWSLT speech translation task, where an ensemble of four variant systems outperforms the phrase-based baseline by 2.1% BLEU absolute.", "keyphrases": ["neural machine translation", "translation quality", "topic vector"]} +{"id": "hoory-etal-2021-learning-evaluating", "title": "Learning and Evaluating a Differentially Private Pre-trained Language Model", "abstract": "Contextual language models have led to significantly better results, especially when pre-trained on the same data as the downstream task. While this additional pre-training usually improves performance, it can lead to information leakage and therefore risks the privacy of individuals mentioned in the training data. One method to guarantee the privacy of such individuals is to train a differentially-private language model, but this usually comes at the expense of model performance. Also, in the absence of a differentially private vocabulary training, it is not possible to modify the vocabulary to fit the new data, which might further degrade results. In this work we bridge these gaps, and provide guidance to future researchers and practitioners on how to improve privacy while maintaining good model performance. We introduce a novel differentially private word-piece algorithm, which allows training a tailored domain-specific vocabulary while maintaining privacy. We then experiment with entity extraction tasks from clinical notes, and demonstrate how to train a differentially private pre-trained language model (i.e., BERT) with a privacy guarantee of \u03f5=1.1 and with only a small degradation in performance. Finally, as it is hard to tell given a privacy parameter \u03f5 what was the effect on the trained representation, we present experiments showing that the trained model does not memorize private information.", "keyphrases": ["pre-trained language model", "batch size", "bert model"]} +{"id": "ashby-weir-2020-leveraging", "title": "Leveraging HTML in Free Text Web Named Entity Recognition", "abstract": "HTML tags are typically discarded in free text Named Entity Recognition from Web pages. We investigate whether these discarded tags might be used to improve NER performance. We compare Text+Tags sentences with their Text-Only equivalents, over five datasets, two free text segmentation granularities and two NER models. We find an increased F1 performance for Text+Tags of between 0.9% and 13.2% over all datasets, variants and models. This performance increase, over datasets of varying entity types, HTML density and construction quality, indicates our method is flexible and adaptable. These findings imply that a similar technique might be of use in other Web-aware NLP tasks, including the enrichment of deep language models.", "keyphrases": ["entity recognition", "text-only equivalent", "language model"]} +{"id": "vulic-etal-2020-probing", "title": "Probing Pretrained Language Models for Lexical Semantics", "abstract": "The success of large pretrained language models (LMs) such as BERT and RoBERTa has sparked interest in probing their representations, in order to unveil what types of knowledge they implicitly capture. While prior research focused on morphosyntactic, semantic, and world knowledge, it remains unclear to which extent LMs also derive lexical type-level knowledge from words in context. In this work, we present a systematic empirical analysis across six typologically diverse languages and five different lexical tasks, addressing the following questions: 1) How do different lexical knowledge extraction strategies (monolingual versus multilingual source LM, out-of-context versus in-context encoding, inclusion of special tokens, and layer-wise averaging) impact performance? How consistent are the observed effects across tasks and languages? 2) Is lexical knowledge stored in few parameters, or is it scattered throughout the network? 3) How do these representations fare against traditional static word vectors in lexical tasks 4) Does the lexical information emerging from independently trained monolingual LMs display latent similarities? Our main results indicate patterns and best practices that hold universally, but also point to prominent variations across languages and tasks. Moreover, we validate the claim that lower Transformer layers carry more type-level lexical knowledge, but also show that this knowledge is distributed across multiple layers.", "keyphrases": ["language model", "previous study", "different layer"]} +{"id": "crego-habash-2008-using", "title": "Using Shallow Syntax Information to Improve Word Alignment and Reordering for SMT", "abstract": "We describe two methods to improve SMT accuracy using shallow syntax information. First, we use chunks to refine the set of word alignments typically used as a starting point in SMT systems. Second, we extend an N-gram-based SMT system with chunk tags to better account for long-distance reorderings. Experiments are reported on an Arabic-English task showing significant improvements. A human error analysis indicates that long-distance reorderings are captured effectively.", "keyphrases": ["shallow syntax information", "reordering", "smt system"]} +{"id": "wan-etal-2006-using", "title": "Using Dependency-Based Features to Take the 'Para-farce' out of Paraphrase", "abstract": "As research in text-to-text paraphrase generation progresses, it has the potential to improve the quality of generated text. However, the use of paraphrase generation methods creates a secondary problem. We must ensure that generated novel sentences are not inconsistent with the text from which it was generated. We propose a machine learning approach be used to filter out inconsistent novel sentences, or False Paraphrases. To train such a filter, we use the Microsoft Research Paraphrase corpus and investigate whether features based on syntactic dependencies can aid us in this task. Like Finch et al. (2005), we obtain a classification accuracy of 75.6%, the best known performance for this corpus. We also examine the strengths and weaknesses of dependency based features and conclude that they may be useful in more accurately classifying cases of False Paraphrase.", "keyphrases": ["paraphrase", "dependency relation", "predicat-argument relation"]} +{"id": "de-gispert-etal-2010-hierarchical-phrase", "title": "Hierarchical Phrase-Based Translation with Weighted Finite-State Transducers and Shallow-n Grammars", "abstract": "In this article we describe HiFST, a lattice-based decoder for hierarchical phrase-based translation and alignment. The decoder is implemented with standard Weighted Finite-State Transducer (WFST) operations as an alternative to the well-known cube pruning procedure. We find that the use of WFSTs rather than k-best lists requires less pruning in translation search, resulting in fewer search errors, better parameter optimization, and improved translation performance. The direct generation of translation lattices in the target language can improve subsequent rescoring procedures, yielding further gains when applying long-span language models and Minimum Bayes Risk decoding. We also provide insights as to how to control the size of the search space defined by hierarchical rules. We show that shallow-n grammars, low-level rule catenation, and other search constraints can help to match the power of the translation system to specific language pairs.", "keyphrases": ["finite-state transducer", "translation performance", "hierarchical rule"]} +{"id": "liu-etal-2009-unsupervised", "title": "Unsupervised Approaches for Automatic Keyword Extraction Using Meeting Transcripts", "abstract": "This paper explores several unsupervised approaches to automatic keyword extraction using meeting transcripts. In the TFIDF (term frequency, inverse document frequency) weighting framework, we incorporated part-of-speech (POS) information, word clustering, and sentence salience score. We also evaluated a graph-based approach that measures the importance of a word based on its connection with other sentences or words. The system performance is evaluated in different ways, including comparison to human annotated keywords using F-measure and a weighted score relative to the oracle system performance, as well as a novel alternative human evaluation. Our results have shown that the simple unsupervised TFIDF approach performs reasonably well, and the additional information from POS and sentence score helps keyword extraction. However, the graph method is less effective for this domain. Experiments were also performed using speech recognition output and we observed degradation and different patterns compared to human transcripts.", "keyphrases": ["automatic keyword extraction", "transcript", "part-of-speech"]} +{"id": "lyu-etal-2008-acoustic", "title": "Acoustic Model Optimization for Multilingual Speech Recognition", "abstract": "Due to abundant resources not always being available for resource-limited languages, training an acoustic model with unbalanced training data for multilingual speech recognition is an interesting research issue. In this paper, we propose a three-step data-driven phone clustering method to train a multilingual acoustic model. The first step is to obtain a clustering rule of context independent phone models driven from a well-trained acoustic model using a similarity measurement. For the second step, we further clustered the sub-phone units using hierarchical agglomerative clustering with delta Bayesian information criteria according to the clustering rules. Then, we chose a parametric modeling technique- model complexity selection -- to adjust the number of Gaussian components in a Gaussian mixture for optimizing the acoustic model between the new phoneme set and the available training data. We used an unbalanced trilingual corpus where the percentages of the amounts of the training sets for Mandarin, Taiwanese, and Hakka are about 60%, 30%, and 10%, respectively. The experimental results show that the proposed sub-phone clustering approach reduced relative syllable error rate by 4.5% over the best result of the decision tree based approach and 13.5% over the best result of the knowledge-based approach.", "keyphrases": ["multilingual speech recognition", "phone clustering method", "taiwanese"]} +{"id": "levy-manning-2004-deep", "title": "Deep Dependencies from Context-Free Statistical Parsers: Correcting the Surface Dependency Approximation", "abstract": "We present a linguistically-motivated algorithm for reconstructing nonlocal dependency in broad-coverage context-free parse trees derived from treebanks. We use an algorithm based on loglinear classifiers to augment and reshape context-free trees so as to reintroduce underlying nonlocal dependencies lost in the context-free approximation. We find that our algorithm compares favorably with prior work on English using an existing evaluation metric, and also introduce and argue for a new dependency-based evaluation metric. By this new evaluation metric our algorithm achieves 60% error reduction on gold-standard input trees and 5% error reduction on state-of-the-art machine-parsed input trees, when compared with the best previous work. We also present the first results on non-local dependency reconstruction for a language other than English, comparing performance on English and German. Our new evaluation metric quantitatively corroborates the intuition that in a language with freer word order, the surface dependencies in context-free parse trees are a poorer approximation to underlying dependency structure.", "keyphrases": ["surface dependency approximation", "long-distance dependency", "body"]} +{"id": "li-etal-2019-multilingual", "title": "Multilingual Entity, Relation, Event and Human Value Extraction", "abstract": "This paper demonstrates a state-of-the-art end-to-end multilingual (English, Russian, and Ukrainian) knowledge extraction system that can perform entity discovery and linking, relation extraction, event extraction, and coreference. It extracts and aggregates knowledge elements across multiple languages and documents as well as provides visualizations of the results along three dimensions: temporal (as displayed in an event timeline), spatial (as displayed in an event heatmap), and relational (as displayed in entity-relation networks). For our system to further support users' analyses of causal sequences of events in complex situations, we also integrate a wide range of human moral value measures, independently derived from region-based survey, into the event heatmap. This system is publicly available as a docker container and a live demo.", "keyphrases": ["russian", "linking", "relation extraction"]} +{"id": "roesiger-etal-2018-bridging", "title": "Bridging resolution: Task definition, corpus resources and rule-based experiments", "abstract": "Recent work on bridging resolution has so far been based on the corpus ISNotes (Markert et al. 2012), as this was the only corpus available with unrestricted bridging annotation. Hou et al. 2014's rule-based system currently achieves state-of-the-art performance on this corpus, as learning-based approaches suffer from the lack of available training data. Recently, a number of new corpora with bridging annotations have become available. To test the generalisability of the approach by Hou et al. 2014, we apply a slightly extended rule-based system to these corpora. Besides the expected out-of-domain effects, we also observe low performance on some of the in-domain corpora. Our analysis shows that this is the result of two very different phenomena being defined as bridging, namely referential and lexical bridging. We also report that filtering out gold or predicted coreferent anaphors before applying the bridging resolution system helps improve bridging resolution.", "keyphrases": ["definition", "anaphor", "bridging resolution"]} +{"id": "li-etal-2010-fast", "title": "Fast-Champollion: A Fast and Robust Sentence Alignment Algorithm", "abstract": "Sentence-level aligned parallel texts are important resources for a number of natural language processing (NLP) tasks and applications such as statistical machine translation and cross-language information retrieval. With the rapid growth of online parallel texts, efficient and robust sentence alignment algorithms become increasingly important. In this paper, we propose a fast and robust sentence alignment algorithm, i.e., Fast-Champollion, which employs a combination of both length-based and lexicon-based algorithm. By optimizing the process of splitting the input bilingual texts into small fragments for alignment, Fast-Champollion, as our extensive experiments show, is 4.0 to 5.1 times as fast as the current baseline methods such as Champollion (Ma, 2006) on short texts and achieves about 39.4 times as fast on long texts, and Fast-Champollion is as robust as Champollion.", "keyphrases": ["lexicon-based algorithm", "fast-champollion", "speed"]} +{"id": "al-twairesh-etal-2016-arasenti", "title": "AraSenTi: Large-Scale Twitter-Specific Arabic Sentiment Lexicons", "abstract": "Sentiment Analysis (SA) is an active research area nowadays due to the tremendous interest in aggregating and evaluating opinions being disseminated by users on the Web. SA of English has been thoroughly researched; however research on SA of Arabic has just flourished. Twitter is considered a powerful tool for disseminating information and a rich resource for opinionated text containing views on many different topics. In this paper we attempt to bridge a gap in Arabic SA of Twitter which is the lack of sentiment lexi-cons that are tailored for the informal language of Twitter. We generate two lexicons extracted from a large dataset of tweets using two approaches and evaluate their use in a simple lexicon based method. The evaluation is performed on internal and external datasets. The performance of these automatically generated lexicons was very promising, albeit the simple method used for classification. The best F-score obtained was 89.58% on the internal dataset and 63.1-64.7% on the exter-nal datasets.", "keyphrases": ["sentiment analysis", "twitter", "several nlp task"]} +{"id": "rubino-etal-2013-dcu", "title": "DCU-Symantec at the WMT 2013 Quality Estimation Shared Task", "abstract": "We describe the two systems submitted by the DCU-Symantec team to Task 1.1. of the WMT 2013 Shared Task on Quality Estimation for Machine Translation. Task 1.1 involve estimating postediting effort for English-Spanish translation pairs in the news domain. The two systems use a wide variety of features, of which the most effective are the word-alignment, n-gram frequency, language model, POS-tag-based and pseudoreferences ones. Both systems perform at a similarly high level in the two tasks of scoring and ranking translations, although there is some evidence that the systems are over-fitting to the training data.", "keyphrases": ["wmt", "quality estimation", "machine translation"]} +{"id": "glavas-etal-2014-hieve", "title": "HiEve: A Corpus for Extracting Event Hierarchies from News Stories", "abstract": "In news stories, event mentions denote real-world events of different spatial and temporal granularity. Narratives in news stories typically describe some real-world event of coarse spatial and temporal granularity along with its subevents. In this work, we present HiEve, a corpus for recognizing relations of spatiotemporal containment between events. In HiEve, the narratives are represented as hierarchies of events based on relations of spatiotemporal containment (i.e., superevent\u2015subevent relations). We describe the process of manual annotation of HiEve. Furthermore, we build a supervised classifier for recognizing spatiotemporal containment between events to serve as a baseline for future research. Preliminary experimental results are encouraging, with classifier performance reaching 58% F1-score, only 11% less than the inter annotator agreement.", "keyphrases": ["event mention", "spatiotemporal containment", "hieve"]} +{"id": "elazar-etal-2021-amnesic", "title": "Amnesic Probing: Behavioral Explanation with Amnesic Counterfactuals", "abstract": "A growing body of work makes use of probing in order to investigate the working of neural models, often considered black boxes. Recently, an ongoing debate emerged surrounding the limitations of the probing paradigm. In this work, we point out the inability to infer behavioral conclusions from probing results, and offer an alternative method that focuses on how the information is being used, rather than on what information is encoded. Our method, Amnesic Probing, follows the intuition that the utility of a property for a given task can be assessed by measuring the influence of a causal intervention that removes it from the representation. Equipped with this new analysis tool, we can ask questions that were not possible before, for example, is part-of-speech information important for word prediction? We perform a series of analyses on BERT to answer these types of questions. Our findings demonstrate that conventional probing performance is not correlated to task importance, and we call for increased scrutiny of claims that draw behavioral or causal conclusions from probing results.1", "keyphrases": ["bert", "amnesic probing", "change"]} +{"id": "lin-och-2004-automatic", "title": "Automatic Evaluation of Machine Translation Quality Using Longest Common Subsequence and Skip-Bigram Statistics", "abstract": "In this paper we describe two new objective automatic evaluation methods for machine translation. The first method is based on longest common subsequence between a candidate translation and a set of reference translations. Longest common subsequence takes into account sentence level structure similarity naturally and identifies longest co-occurring in-sequence n-grams automatically. The second method relaxes strict n-gram matching to skip-bigram matching. Skip-bigram is any pair of words in their sentence order. Skip-bigram cooccurrence statistics measure the overlap of skip-bigrams between a candidate translation and a set of reference translations. The empirical results show that both methods correlate with human judgments very well in both adequacy and fluency.", "keyphrases": ["subsequence", "sentence order", "smoothed bleu"]} +{"id": "peng-etal-2019-distantly", "title": "Distantly Supervised Named Entity Recognition using Positive-Unlabeled Learning", "abstract": "In this work, we explore the way to perform named entity recognition (NER) using only unlabeled data and named entity dictionaries. To this end, we formulate the task as a positive-unlabeled (PU) learning problem and accordingly propose a novel PU learning algorithm to perform the task. We prove that the proposed algorithm can unbiasedly and consistently estimate the task loss as if there is fully labeled data. A key feature of the proposed method is that it does not require the dictionaries to label every entity within a sentence, and it even does not require the dictionaries to label all of the words constituting an entity. This greatly reduces the requirement on the quality of the dictionaries and makes our method generalize well with quite simple dictionaries. Empirical studies on four public NER datasets demonstrate the effectiveness of our proposed method. We have published the source code at .", "keyphrases": ["positive-unlabeled learning", "ner task loss", "entity type"]} +{"id": "lee-etal-2022-direct", "title": "Direct Speech-to-Speech Translation With Discrete Units", "abstract": "We present a direct speech-to-speech translation (S2ST) model that translates speech from one language to speech in another language without relying on intermediate text generation. We tackle the problem by first applying a self-supervised discrete speech encoder on the target speech and then training a sequence-to-sequence speech-to-unit translation (S2UT) model to predict the discrete representations of the target speech. When target text transcripts are available, we design a joint speech and text training framework that enables the model to generate dual modality output (speech and text) simultaneously in the same inference pass. Experiments on the Fisher Spanish-English dataset show that the proposed framework yields improvement of 6.7 BLEU compared with a baseline direct S2ST model that predicts spectrogram features. When trained without any text transcripts, our model performance is comparable to models that predict spectrograms and are trained with text supervision, showing the potential of our system for translation between unwritten languages.", "keyphrases": ["text generation", "s2ut", "discrete representation", "direct speech-to-speech translation"]} +{"id": "zhang-etal-2007-tree", "title": "A tree-to-tree alignment-based model for statistical machine translation", "abstract": "This paper presents a novel statistical machine translation (SMT) model that uses tree-to-tree alignment between a source parse tree and a target parse tree. The model is formally a probabilistic synchronous tree-substitution grammar (STSG) that is a collection of aligned elementary tree pairs with mapping probabilities (which are automatically learned from word-aligned bi-parsed parallel texts). Unlike previous syntax-based SMT models, this new model supports multi-level global structure distortion of the tree typology and can fully utilize the source and target parse tree structure features, which gives our system more expressive power and flexibility. The experimental results on the HIT bi-parsed text show that our method performs significantly better than Pharaoh, a state-of-the-art phrase-based SMT system, and other syntax-based methods, such as the synchronous CFG-based method on the small dataset.", "keyphrases": ["statistical machine translation", "stsg", "smt system"]} +{"id": "buechel-hahn-2018-representation", "title": "Representation Mapping: A Novel Approach to Generate High-Quality Multi-Lingual Emotion Lexicons", "abstract": "In the past years, sentiment analysis has increasingly shifted attention to representational frameworks more expressive than semantic polarity (being positive, negative or neutral). However, these richer formats (like Basic Emotions or Valence-Arousal-Dominance, and variants therefrom), rooted in psychological research, tend to proliferate the number of representation schemes for emotion encoding. Thus, a large amount of representationally incompatible emotion lexicons has been developed by various research groups adopting one or the other emotion representation format. As a consequence, the reusability of these resources decreases as does the comparability of systems using them. In this paper, we propose to solve this dilemma by methods and tools which map different representation formats onto each other for the sake of mutual compatibility and interoperability of language resources. We present the first large-scale investigation of such representation mappings for four typologically diverse languages and find evidence that our approach produces (near-)gold quality emotion lexicons, even in cross-lingual settings. Finally, we use our models to create new lexicons for eight typologically diverse languages.", "keyphrases": ["emotion lexicon", "cross-lingual setting", "representation mapping"]} +{"id": "litvak-last-2008-graph", "title": "Graph-Based Keyword Extraction for Single-Document Summarization", "abstract": "In this paper, we introduce and compare between two novel approaches, supervised and unsupervised, for identifying the keywords to be used in extractive summarization of text documents. Both our approaches are based on the graph-based syntactic representation of text and web documents, which enhances the traditional vector-space model by taking into account some structural document features. In the supervised approach, we train classification algorithms on a summarized collection of documents with the purpose of inducing a keyword identification model. In the unsupervised approach, we run the HITS algorithm on document graphs under the assumption that the top-ranked nodes should represent the document keywords. Our experiments on a collection of benchmark summaries show that given a set of summarized training documents, the supervised classification provides the highest keyword identification accuracy, while the highest F-measure is reached with a simple degree-based ranking. In addition, it is sufficient to perform only the first iteration of HITS rather than running it to its convergence.", "keyphrases": ["summarization", "hits algorithm", "top-ranked node"]} +{"id": "li-etal-2016-fast", "title": "Fast Coupled Sequence Labeling on Heterogeneous Annotations via Context-aware Pruning", "abstract": "The recently proposed coupled sequence labeling is shown to be able to effectively exploit multiple labeled data with heterogeneous annotations but suffer from severe inefficiency problem due to the large bundled tag space (Li et al., 2015). In their case study of part-of-speech (POS) tagging, Li et al. (2015) manually design context-free tag-to-tag mapping rules with a lot of effort to reduce the tag space. This paper proposes a context-aware pruning approach that performs token-wise constraints on the tag space based on contextual evidences, making the coupled approach efficient enough to be applied to the more complex task of joint word segmentation (WS) and POS tagging for the first time. Experiments show that using the large-scale People Daily as auxiliary heterogeneous data, the coupled approach can improve F-score by 95 . 55 \u2212 94 . 88 = 0 . 67 % on WS, and by 90 . 58 \u2212 89 . 49 = 1 . 09 % on joint WS&POS on Penn Chinese Treebank. All codes are released at http://hlt.suda.edu.cn/~zhli .", "keyphrases": ["coupled sequence labeling", "joint word segmentation", "heterogeneous data"]} +{"id": "vasserman-2004-identifying", "title": "Identifying Chemical Names in Biomedical Text: an Investigation of Substring Co-occurrence Based Approaches", "abstract": "We investigate various strategies for finding chemicals in biomedical text using substring co-occurrence information. The goal is to build a system from readily available data with minimal human involvement. Our models are trained from a dictionary of chemical names and general biomedical text. We investigated several strategies including Naive Bayes classifiers and several types of N-gram models. We introduced a new way of interpolating N-grams that does not require tuning any parameters. We also found the task to be similar to Language Identification.", "keyphrases": ["chemical", "biomedical text", "n-gram"]} +{"id": "zhao-etal-2015-improving", "title": "Improving Chinese Grammatical Error Correction with Corpus Augmentation and Hierarchical Phrase-based Statistical Machine Translation", "abstract": "In this study, we describe our system submitted to the 2nd Workshop on Natural Language Processing Techniques for Educational Applications (NLP-TEA-2) shared task on Chinese grammatical error diagnosis (CGED). We use a statistical machine translation method already applied to several similar tasks (Brockett et al., 2006; Chiu et al., 2013; Zhao et al., 2014). In this research, we examine corpus-augmentation and explore alternative translation models including syntaxbased and hierarchical phrase-based models. Finally, we show variations using different combinations of these factors.", "keyphrases": ["grammatical error correction", "corpus-augmentation", "hierarchical phrase-based model"]} +{"id": "yaghoobzadeh-etal-2021-increasing", "title": "Increasing Robustness to Spurious Correlations using Forgettable Examples", "abstract": "Neural NLP models tend to rely on spurious correlations between labels and input features to perform their tasks. Minority examples, i.e., examples that contradict the spurious correlations present in the majority of data points, have been shown to increase the out-of-distribution generalization of pre-trained language models. In this paper, we first propose using example forgetting to find minority examples without prior knowledge of the spurious correlations present in the dataset. Forgettable examples are instances either learned and then forgotten during training or never learned. We show empirically how these examples are related to minorities in our training sets. Then, we introduce a new approach to robustify models by fine-tuning our models twice, first on the full training data and second on the minorities only. We obtain substantial improvements in out-of-distribution generalization when applying our approach to the MNLI, QQP and FEVER datasets.", "keyphrases": ["spurious correlation", "forgettable example", "minority", "prior knowledge"]} +{"id": "lin-ji-2019-attentive", "title": "An Attentive Fine-Grained Entity Typing Model with Latent Type Representation", "abstract": "We propose a fine-grained entity typing model with a novel attention mechanism and a hybrid type classifier. We advance existing methods in two aspects: feature extraction and type prediction. To capture richer contextual information, we adopt contextualized word representations instead of fixed word embeddings used in previous work. In addition, we propose a two-step mention-aware attention mechanism to enable the model to focus on important words in mentions and contexts. We also present a hybrid classification method beyond binary relevance to exploit type inter-dependency with latent type representation. Instead of independently predicting each type, we predict a low-dimensional vector that encodes latent type features and reconstruct the type vector from this latent representation. Experiment results on multiple data sets show that our model significantly advances the state-of-the-art on fine-grained entity typing, obtaining up to 6.1% and 5.5% absolute gains in macro averaged F-score and micro averaged F-score respectively.", "keyphrases": ["fine-grained entity", "latent type representation", "mention"]} +{"id": "chu-etal-2013-accurate", "title": "Accurate Parallel Fragment Extraction from Quasi\u2013Comparable Corpora using Alignment Model and Translation Lexicon", "abstract": "Although parallel sentences rarely exist in quasi\u2010comparable corpora, there could be parallel fragments that are also helpful for statistical machine translation (SMT). Previous studies cannot accurately extract parallel fragments from quasi\u2010comparable corpora. To solve this problem, we propose an accurate parallel fragment extraction system that uses an alignment model to locate the parallel fragment candidates, and uses an accurate lexicon filter to identify the truly parallel ones. Experimental results indicate that our system can accurately extract parallel fragments, and our proposed method significantly outperforms a state\u2010of\u2010the\u2010art approach. Furthermore, we investigate the factors that may affect the performance of our system in detail.", "keyphrases": ["alignment model", "parallel sentence", "quasi-comparable corpora"]} +{"id": "kim-etal-2019-image", "title": "Image Captioning with Very Scarce Supervised Data: Adversarial Semi-Supervised Learning Approach", "abstract": "Constructing an organized dataset comprised of a large number of images and several captions for each image is a laborious task, which requires vast human effort. On the other hand, collecting a large number of images and sentences separately may be immensely easier. In this paper, we develop a novel data-efficient semi-supervised framework for training an image captioning model. We leverage massive unpaired image and caption data by learning to associate them. To this end, our proposed semi-supervised learning method assigns pseudo-labels to unpaired samples via Generative Adversarial Networks to learn the joint distribution of image and caption. To evaluate, we construct scarcely-paired COCO dataset, a modified version of MS COCO caption dataset. The empirical results show the effectiveness of our method compared to several strong baselines, especially when the amount of the paired samples are scarce.", "keyphrases": ["caption", "generative adversarial networks", "image"]} +{"id": "schwitter-etal-2003-ecole", "title": "ECOLE: a look-ahead editor of controlled language", "abstract": "This paper presents ECOLE, a look-ahead text editor that supports authors writing seemingly informal specifications in PENG, a computer-processable controlled natural language. ECOLE communicates via a socket interface with the controlled language processor of the PENG system. After each word form entered the lookahead editor displays appropriate lookahead categories. These syntactic hints tell the author what kind of word or syntactic structure can follow the current input string and reduce thereby the cognitive burden to learn and remember the controlled language. While the author types the text word by word and adds unknown content words on the fly to the lexicon, a discourse representation structure and a paraphrase is built up dynamically for the text in a completely compositional manner. The arising specification can be checked automatically for consistency and informativity with the help of third-party reasoning services.", "keyphrases": ["look-ahead editor", "syntactic structure", "input string", "ecole", "writing process"]} +{"id": "kim-schubert-2016-high", "title": "High-Fidelity Lexical Axiom Construction from Verb Glosses", "abstract": "This paper presents a rule-based approach to constructing lexical axioms from WordNet verb entries in an expressive semantic representation, Episodic Logic (EL). EL differs from other representations in being syntactically close to natural language and covering phenomena such as generalized quantification, modification, and intensionality while still allowing highly effective inference. The presented approach uses a novel preprocessing technique to improve parsing precision of coordinators and incorporates frames, hand-tagged word senses, and examples from WordNet to achieve highly consistent semantic interpretations. EL allows the full content of glosses to be incorporated into the formal lexical axioms, without sacrificing interpretive accuracy, or verb-to-verb inference accuracy on a standard test set. Evaluation of semantic parser performance is based on EL-match, introduced here as a generalization of the smatch metric for semantic structure accuracy. On gloss parses, the approach achieves an ELmatch F1 score of 0.83, and a wholeaxiom F1 score of 0.45; verb entailment identification based on extracted axioms is competitive with the state-of-the-art.", "keyphrases": ["axiom", "wordnet", "great expressivity"]} +{"id": "liu-hwa-2018-heuristically", "title": "Heuristically Informed Unsupervised Idiom Usage Recognition", "abstract": "Many idiomatic expressions can be interpreted figuratively or literally depending on their contexts. This paper proposes an unsupervised learning method for recognizing the intended usages of idioms. We treat the usages as a latent variable in probabilistic models and train them in a linguistically motivated feature space. Crucially, we show that distributional semantics is a helpful heuristic for distinguishing the literal usage of idioms, giving us a way to formulate a literal usage metric to estimate the likelihood that the idiom is intended literally. This information then serves as a form of distant supervision to guide the unsupervised training process for the probabilistic models. Experiments show that our overall model performs competitively against supervised methods.", "keyphrases": ["idiom", "latent variable", "feature space", "literal usage metric"]} +{"id": "yang-etal-2019-simple", "title": "Simple and Effective Text Matching with Richer Alignment Features", "abstract": "In this paper, we present a fast and strong neural approach for general purpose text matching applications. We explore what is sufficient to build a fast and well-performed text matching model and propose to keep three key features available for inter-sequence alignment: original point-wise features, previous aligned features, and contextual features while simplifying all the remaining components. We conduct experiments on four well-studied benchmark datasets across tasks of natural language inference, paraphrase identification and answer selection. The performance of our model is on par with the state-of-the-art on all datasets with much fewer parameters and the inference speed is at least 6 times faster compared with similarly performed ones.", "keyphrases": ["text matching", "original point-wise feature", "contextual feature", "connection"]} +{"id": "aker-gaizauskas-2009-summary", "title": "Summary Generation for Toponym-referenced Images using Object Type Language Models", "abstract": "This paper presents a novel approach to automatic captioning of toponym-referenced images. The automatic captioning procedure works by summarizing multiple web-documents that contain information related to an image\u2019s location. Our summarizer can generate both query-based and language model-biased multidocument summaries. The models are created from large numbers of existing articles pertaining to places of the same \u201cobject type\u201d. Evaluation relative to human written captions shows that when language models are used to bias the summarizer the summaries score more highly than the non-biased ones.", "keyphrases": ["image", "language model", "captioning", "web-document"]} +{"id": "kazama-etal-2010-bayesian", "title": "A Bayesian Method for Robust Estimation of Distributional Similarities", "abstract": "Existing word similarity measures are not robust to data sparseness since they rely only on the point estimation of words' context profiles obtained from a limited amount of data. This paper proposes a Bayesian method for robust distributional word similarities. The method uses a distribution of context profiles obtained by Bayesian estimation and takes the expectation of a base similarity measure under that distribution. When the context profiles are multinomial distributions, the priors are Dirichlet, and the base measure is the Bhattacharyya coefficient, we can derive an analytical form that allows efficient calculation. For the task of word similarity estimation using a large amount of Web data in Japanese, we show that the proposed measure gives better accuracies than other well-known similarity measures.", "keyphrases": ["bayesian method", "similarity measure", "data sparseness", "expectation"]} +{"id": "pavlick-etal-2014-language", "title": "The Language Demographics of Amazon Mechanical Turk", "abstract": "We present a large scale study of the languages spoken by bilingual workers on Mechanical Turk (MTurk). We establish a methodology for determining the language skills of anonymous crowd workers that is more robust than simple surveying. We validate workers' self-reported language skill claims by measuring their ability to correctly translate words, and by geolocating workers to see if they reside in countries where the languages are likely to be spoken. Rather than posting a one-off survey, we posted paid tasks consisting of 1,000 assignments to translate a total of 10,000 words in each of 100 languages. Our study ran for several months, and was highly visible on the MTurk crowdsourcing platform, increasing the chances that bilingual workers would complete it. Our study was useful both to create bilingual dictionaries and to act as census of the bilingual speakers on MTurk. We use this data to recommend languages with the largest speaker populations as good candidates for other researchers who want to develop crowdsourced, multilingual technologies. To further demonstrate the value of creating data via crowdsourcing, we hire workers to create bilingual parallel corpora in six Indian languages, and use them to train statistical machine translation systems.", "keyphrases": ["mechanical turk", "mturk", "world"]} +{"id": "pfeiffer-etal-2021-unks", "title": "UNKs Everywhere: Adapting Multilingual Language Models to New Scripts", "abstract": "Massively multilingual language models such as multilingual BERT offer state-of-the-art cross-lingual transfer performance on a range of NLP tasks. However, due to limited capacity and large differences in pretraining data sizes, there is a profound performance gap between resource-rich and resource-poor target languages. The ultimate challenge is dealing with under-resourced languages not covered at all by the models and written in scripts unseen during pretraining. In this work, we propose a series of novel data-efficient methods that enable quick and effective adaptation of pretrained multilingual models to such low-resource languages and unseen scripts. Relying on matrix factorization, our methods capitalize on the existing latent knowledge about multiple languages already available in the pretrained model's embedding matrix. Furthermore, we show that learning of the new dedicated embedding matrix in the target language can be improved by leveraging a small number of vocabulary items (i.e., the so-called lexically overlapping tokens) shared between mBERT's and target language vocabulary. Our adaptation techniques offer substantial performance gains for languages with unseen scripts. We also demonstrate that they can yield improvements for low-resource languages written in scripts covered by the pretrained model.", "keyphrases": ["new script", "multilingual model", "tokenizer"]} +{"id": "li-etal-2019-hint", "title": "Hint-Based Training for Non-Autoregressive Machine Translation", "abstract": "Due to the unparallelizable nature of the autoregressive factorization, AutoRegressive Translation (ART) models have to generate tokens sequentially during decoding and thus suffer from high inference latency. Non-AutoRegressive Translation (NART) models were proposed to reduce the inference time, but could only achieve inferior translation accuracy. In this paper, we proposed a novel approach to leveraging the hints from hidden states and word alignments to help the training of NART models. The results achieve significant improvement over previous NART models for the WMT14 En-De and De-En datasets and are even comparable to a strong LSTM-based ART baseline but one order of magnitude faster in inference.", "keyphrases": ["hidden state", "non-autoregressive model", "teacher"]} +{"id": "altantawy-etal-2011-fast", "title": "Fast Yet Rich Morphological Analysis", "abstract": "Implementations of models of morphologically rich languages such as Arabic typically achieve speed and small memory footprint at the cost of abandoning linguistically abstract and elegant representations. We present a solution to modeling rich morphologies that is both fast and based on linguistically rich representations. In our approach, we convert a linguistically complex and abstract implementation of Arabic verbs in finite-state machinery into a simple precompiled tabular representation.", "keyphrases": ["morphology", "rich representation", "abstract implementation", "finite-state machinery", "end"]} +{"id": "stevenson-greenwood-2005-semantic", "title": "A Semantic Approach to IE Pattern Induction", "abstract": "This paper presents a novel algorithm for the acquisition of Information Extraction patterns. The approach makes the assumption that useful patterns will have similar meanings to those already identified as relevant. Patterns are compared using a variation of the standard vector space model in which information from an ontology is used to capture semantic similarity. Evaluation shows this algorithm performs well when compared with a previously reported document-centric approach.", "keyphrases": ["variation", "candidate pattern", "sentence filtering", "supervised training", "event extraction system"]} +{"id": "yang-etal-2019-exploiting", "title": "Exploiting Noisy Data in Distant Supervision Relation Classification", "abstract": "Distant supervision has obtained great progress on relation classification task. However, it still suffers from noisy labeling problem. Different from previous works that underutilize noisy data which inherently characterize the property of classification, in this paper, we propose RCEND, a novel framework to enhance Relation Classification by Exploiting Noisy Data. First, an instance discriminator with reinforcement learning is designed to split the noisy data into correctly labeled data and incorrectly labeled data. Second, we learn a robust relation classifier in semi-supervised learning way, whereby the correctly and incorrectly labeled data are treated as labeled and unlabeled data respectively. The experimental results show that our method outperforms the state-of-the-art models.", "keyphrases": ["noisy data", "distant supervision", "labeling problem"]} +{"id": "tack-etal-2017-human", "title": "Human and Automated CEFR-based Grading of Short Answers", "abstract": "This paper is concerned with the task of automatically assessing the written proficiency level of non-native (L2) learners of English. Drawing on previous research on automated L2 writing assessment following the Common European Framework of Reference for Languages (CEFR), we investigate the possibilities and difficulties of deriving the CEFR level from short answers to open-ended questions, which has not yet been subjected to numerous studies up to date. The object of our study is twofold: to examine the intricacy involved with both human and automated CEFR-based grading of short answers. On the one hand, we describe the compilation of a learner corpus of short answers graded with CEFR levels by three certified Cambridge examiners. We mainly observe that, although the shortness of the answers is reported as undermining a clear-cut evaluation, the length of the answer does not necessarily correlate with inter-examiner disagreement. On the other hand, we explore the development of a soft-voting system for the automated CEFR-based grading of short answers and draw tentative conclusions about its use in a computer-assisted testing (CAT) setting.", "keyphrases": ["grading", "short answer", "cefr"]} +{"id": "zhang-etal-2010-cross", "title": "Cross-Lingual Latent Topic Extraction", "abstract": "Probabilistic latent topic models have recently enjoyed much success in extracting and analyzing latent topics in text in an unsupervised way. One common deficiency of existing topic models, though, is that they would not work well for extracting cross-lingual latent topics simply because words in different languages generally do not co-occur with each other. In this paper, we propose a way to incorporate a bilingual dictionary into a probabilistic topic model so that we can apply topic models to extract shared latent topics in text data of different languages. Specifically, we propose a new topic model called Probabilistic Cross-Lingual Latent Semantic Analysis (PCLSA) which extends the Probabilistic Latent Semantic Analysis (PLSA) model by regularizing its likelihood function with soft constraints defined based on a bilingual dictionary. Both qualitative and quantitative experimental results show that the PCLSA model can effectively extract cross-lingual latent topics from multilingual text data.", "keyphrases": ["topic model", "different language", "text data", "pclsa model"]} +{"id": "kong-etal-2021-multilingual", "title": "Multilingual Neural Machine Translation with Deep Encoder and Multiple Shallow Decoders", "abstract": "Recent work in multilingual translation advances translation quality surpassing bilingual baselines using deep transformer models with increased capacity. However, the extra latency and memory costs introduced by this approach may make it unacceptable for efficiency-constrained applications. It has recently been shown for bilingual translation that using a deep encoder and shallow decoder (DESD) can reduce inference latency while maintaining translation quality, so we study similar speed-accuracy trade-offs for multilingual translation. We find that for many-to-one translation we can indeed increase decoder speed without sacrificing quality using this approach, but for one-to-many translation, shallow decoders cause a clear quality drop. To ameliorate this drop, we propose a deep encoder with multiple shallow decoders (DEMSD) where each shallow decoder is responsible for a disjoint subset of target languages. Specifically, the DEMSD model with 2-layer decoders is able to obtain a 1.8x speedup on average compared to a standard transformer model with no drop in translation quality.", "keyphrases": ["multiple shallow decoder", "translation quality", "one-to-many translation"]} +{"id": "heinz-rogers-2010-estimating", "title": "Estimating Strictly Piecewise Distributions", "abstract": "Strictly Piecewise (SP) languages are a subclass of regular languages which encode certain kinds of long-distance dependencies that are found in natural languages. Like the classes in the Chomsky and Subregular hierarchies, there are many independently converging characterizations of the SP class (Rogers et al., to appear). Here we define SP distributions and show that they can be efficiently estimated from positive data.", "keyphrases": ["subclass", "characterization", "k-piecewise language"]} +{"id": "paranjape-etal-2020-information", "title": "An Information Bottleneck Approach for Controlling Conciseness in Rationale Extraction", "abstract": "Decisions of complex models for language understanding can be explained by limiting the inputs they are provided to a relevant subsequence of the original text \u2014 a rationale. Models that condition predictions on a concise rationale, while being more interpretable, tend to be less accurate than models that are able to use the entire context. In this paper, we show that it is possible to better manage the trade-off between concise explanations and high task accuracy by optimizing a bound on the Information Bottleneck (IB) objective. Our approach jointly learns an explainer that predicts sparse binary masks over input sentences without explicit supervision, and an end-task predictor that considers only the residual sentences. Using IB, we derive a learning objective that allows direct control of mask sparsity levels through a tunable sparse prior. Experiments on the ERASER benchmark demonstrate significant gains over previous work for both task performance and agreement with human rationales. Furthermore, we find that in the semi-supervised setting, a modest amount of gold rationales (25% of training examples with gold masks) can close the performance gap with a model that uses the full input.", "keyphrases": ["information bottleneck", "rationale extraction", "explainer"]} +{"id": "putra-etal-2021-parsing", "title": "Parsing Argumentative Structure in English-as-Foreign-Language Essays", "abstract": "This paper presents a study on parsing the argumentative structure in English-as-foreign-language (EFL) essays, which are inherently noisy. The parsing process consists of two steps, linking related sentences and then labelling their relations. We experiment with several deep learning architectures to address each task independently. In the sentence linking task, a biaffine model performed the best. In the relation labelling task, a fine-tuned BERT model performed the best. Two sentence encoders are employed, and we observed that non-fine-tuning models generally performed better when using Sentence-BERT as opposed to BERT encoder. We trained our models using two types of parallel texts: original noisy EFL essays and those improved by annotators, then evaluate them on the original essays. The experiment shows that an end-to-end in-domain system achieved an accuracy of .341. On the other hand, the cross-domain system achieved 94% performance of the in-domain system. This signals that well-written texts can also be useful to train argument mining system for noisy texts.", "keyphrases": ["argumentative structure", "essay", "language learner"]} +{"id": "britz-etal-2017-massive", "title": "Massive Exploration of Neural Machine Translation Architectures", "abstract": "Neural Machine Translation (NMT) has shown remarkable progress over the past few years, with production systems now being deployed to end-users. As the field is moving rapidly, it has become unclear which elements of NMT architectures have a significant impact on translation quality. In this work, we present a large-scale analysis of the sensitivity of NMT architectures to common hyperparameters. We report empirical results and variance numbers for several hundred experimental runs, corresponding to over 250,000 GPU hours on a WMT English to German translation task. Our experiments provide practical insights into the relative importance of factors such as embedding size, network depth, RNN cell type, residual connections, attention mechanism, and decoding heuristics. As part of this contribution, we also release an open-source NMT framework in TensorFlow to make it easy for others to reproduce our results and perform their own experiments.", "keyphrases": ["exploration", "translation quality", "hyperparameter", "gpu hour", "sequence-to-sequence model"]} +{"id": "agarwal-etal-2013-sinnet", "title": "SINNET: Social Interaction Network Extractor from Text", "abstract": "In this paper we present a demo of our system: Social Interaction Network Extractor from Text (SINNET). SINNET is able to extract a social network from unstructured text. Nodes in the network are people and links are social events.", "keyphrases": ["unstructured text", "sinnet", "social network extraction"]} +{"id": "johannsen-etal-2016-joint", "title": "Joint part-of-speech and dependency projection from multiple sources", "abstract": "Most previous work on annotation projection has been limited to a subset of IndoEuropean languages, using only a single source language, and projecting annotation for one task at a time. In contrast, we present an Integer Linear Programming (ILP) algorithm that simultaneously projects annotation for multiple tasks from multiple source languages, relying on parallel corpora available for hundreds of languages. When training POS taggers and dependency parsers on jointly projected POS tags and syntactic dependencies using our algorithm, we obtain better performance than a standard approach on 20/23 languages using one parallel corpus; and 18/27 languages using another.", "keyphrases": ["multiple source", "dependency parser", "pos tag"]} +{"id": "de-gispert-etal-2009-minimum", "title": "Minimum Bayes Risk Combination of Translation Hypotheses from Alternative Morphological Decompositions", "abstract": "We describe a simple strategy to achieve translation performance improvements by combining output from identical statistical machine translation systems trained on alternative morphological decompositions of the source language. Combination is done by means of Minimum Bayes Risk decoding over a shared N-best list. When translating into English from two highly inflected languages such as Arabic and Finnish we obtain significant improvements over simply selecting the best morphological decomposition.", "keyphrases": ["morphological decomposition", "source language", "minimum bayes risk"]} +{"id": "gangula-etal-2019-detecting", "title": "Detecting Political Bias in News Articles Using Headline Attention", "abstract": "Language is a powerful tool which can be used to state the facts as well as express our views and perceptions. Most of the times, we find a subtle bias towards or against someone or something. When it comes to politics, media houses and journalists are known to create bias by shrewd means such as misinterpreting reality and distorting viewpoints towards some parties. This misinterpretation on a large scale can lead to the production of biased news and conspiracy theories. Automating bias detection in newspaper articles could be a good challenge for research in NLP. We proposed a headline attention network for this bias detection. Our model has two distinctive characteristics: (i) it has a structure that mirrors a person's way of reading a news article (ii) it has attention mechanism applied on the article based on its headline, enabling it to attend to more critical content to predict bias. As the required datasets were not available, we created a dataset comprising of 1329 news articles collected from various Telugu newspapers and marked them for bias towards a particular political party. The experiments conducted on it demonstrated that our model outperforms various baseline methods by a substantial margin.", "keyphrases": ["headline attention", "telugu newspaper", "political party"]} +{"id": "jain-2016-question", "title": "Question Answering over Knowledge Base using Factual Memory Networks", "abstract": "In the task of question answering, Memory Networks have recently shown to be quite effective towards complex reasoning as well as scalability, in spite of limited range of topics covered in training data. In this paper, we introduce Factual Memory Network, which learns to answer questions by extracting and reasoning over relevant facts from a Knowledge Base. Our system generate distributed representation of questions and KB in same word vector space, extract a subset of initial candidate facts, then try to find a path to answer entity using multi-hop reasoning and refinement. Additionally, we also improve the run-time efficiency of our model using various computational heuristics.", "keyphrases": ["knowledge base", "factual memory network", "reasoning"]} +{"id": "kontonatsios-etal-2014-using", "title": "Using a Random Forest Classifier to Compile Bilingual Dictionaries of Technical Terms from Comparable Corpora", "abstract": "We describe a machine learning approach, a Random Forest (RF) classifier, that is used to automatically compile bilingual dictionaries of technical terms from comparable corpora. We evaluate the RF classifier against a popular term alignment method, namely context vectors, and we report an improvement of the translation accuracy. As an application, we use the automatically extracted dictionary in combination with a trained Statistical Machine Translation (SMT) system to more accurately translate unknown terms. The dictionary extraction method described in this paper is freely available 1 .", "keyphrases": ["random forest", "comparable corpora", "context vector"]} +{"id": "tu-etal-2014-enhancing", "title": "Enhancing Grammatical Cohesion: Generating Transitional Expressions for SMT", "abstract": "Transitional expressions provide glue that holds ideas together in a text and enhance the logical organization, which together help improve readability of a text. However, in most current statistical machine translation (SMT) systems, the outputs of compound-complex sentences still lack proper transitional expressions. As a result, the translations are often hard to read and understand. To address this issue, we propose two novel models to encourage generating such transitional expressions by introducing the source compoundcomplex sentence structure (CSS). Our models include a CSS-based translation model, which generates new CSS-based translation rules, and a generative transfer model, which encourages producing transitional expressions during decoding. The two models are integrated into a hierarchical phrase-based translation system to evaluate their effectiveness. The experimental results show that significant improvements are achieved on various test data meanwhile the translations are more cohesive and smooth.", "keyphrases": ["grammatical cohesion", "transitional expression", "translation rule"]} +{"id": "qadir-riloff-2011-classifying", "title": "Classifying Sentences as Speech Acts in Message Board Posts", "abstract": "This research studies the text genre of message board forums, which contain a mixture of expository sentences that present factual information and conversational sentences that include communicative acts between the writer and readers. Our goal is to create sentence classifiers that can identify whether a sentence contains a speech act, and can recognize sentences containing four different speech act classes: Commissives, Directives, Expressives, and Representatives. We conduct experiments using a wide variety of features, including lexical and syntactic features, speech act word lists from external resources, and domain-specific semantic class features. We evaluate our results on a collection of message board posts in the domain of veterinary medicine.", "keyphrases": ["speech act", "message board post", "sequence tagger"]} +{"id": "setiawan-etal-2020-variational", "title": "Variational Neural Machine Translation with Normalizing Flows", "abstract": "Variational Neural Machine Translation (VNMT) is an attractive framework for modeling the generation of target translations, conditioned not only on the source sentence but also on some latent random variables. The latent variable modeling may introduce useful statistical dependencies that can improve translation accuracy. Unfortunately, learning informative latent variables is non-trivial, as the latent space can be prohibitively large, and the latent codes are prone to be ignored by many translation models at training time. Previous works impose strong assumptions on the distribution of the latent code and limit the choice of the NMT architecture. In this paper, we propose to apply the VNMT framework to the state-of-the-art Transformer and introduce a more flexible approximate posterior based on normalizing flows. We demonstrate the efficacy of our proposal under both in-domain and out-of-domain conditions, significantly outperforming strong baselines.", "keyphrases": ["normalizing flow", "translation model", "approximate posterior"]} +{"id": "chang-etal-2013-constrained", "title": "A Constrained Latent Variable Model for Coreference Resolution", "abstract": "Coreference resolution is a well known clustering task in Natural Language Processing. In this paper, we describe the Latent Left Linking model (L 3 M), a novel, principled, and linguistically motivated latent structured prediction approach to coreference resolution. We show that L 3 M admits efficient inference and can be augmented with knowledge-based constraints; we also present a fast stochastic gradient based learning. Experiments on ACE and Ontonotes data show that L 3 M and its constrained version, CL 3 M, are more accurate than several state-of-the-art approaches as well as some structured prediction models proposed in the literature.", "keyphrases": ["coreference resolution", "prediction model", "mention", "latent antecedent"]} +{"id": "jayasinghe-etal-2016-csiro", "title": "CSIRO Data61 at the WNUT Geo Shared Task", "abstract": "In this paper, we describe CSIRO Data61's participation in the Geolocation shared task at the Workshop for Noisy User-generated Text. Our approach was to use ensemble methods to capitalise on four component methods: heuristics based on metadata, a label propagation method, timezone text classifiers, and an information retrieval approach. The ensembles we explored focused on examining the role of language technologies in geolocation prediction and also in examining the use of hard voting and cascading ensemble methods. Based on the accuracy of city-level predictions, our systems were the best performing submissions at this year's shared task. Furthermore, when estimating the latitude and longitude of a user, our median error distance was accurate to within 30 kilometers.", "keyphrases": ["cascade", "csiro data61", "ensemble approach"]} +{"id": "vashishth-etal-2018-reside", "title": "RESIDE: Improving Distantly-Supervised Neural Relation Extraction using Side Information", "abstract": "Distantly-supervised Relation Extraction (RE) methods train an extractor by automatically aligning relation instances in a Knowledge Base (KB) with unstructured text. In addition to relation instances, KBs often contain other relevant side information, such as aliases of relations (e.g., founded and co-founded are aliases for the relation founderOfCompany). RE models usually ignore such readily available side information. In this paper, we propose RESIDE, a distantly-supervised neural relation extraction method which utilizes additional side information from KBs for improved relation extraction. It uses entity type and relation alias information for imposing soft constraints while predicting relations. RESIDE employs Graph Convolution Networks (GCN) to encode syntactic information from text and improves performance even when limited side information is available. Through extensive experiments on benchmark datasets, we demonstrate RESIDE's effectiveness. We have made RESIDE's source code available to encourage reproducible research.", "keyphrases": ["neural relation extraction", "graph convolution network", "encode syntactic information"]} +{"id": "joty-etal-2016-joint", "title": "Joint Learning with Global Inference for Comment Classification in Community Question Answering", "abstract": "This paper addresses the problem of comment classi\ufb01cation in community Question Answering. Following the state of the art, we approach the task with a global inference process to exploit the information of all comments in the answer-thread in the form of a fully connected graph. Our contribution comprises two novel joint learning models that are on-line and integrate inference within learning. The \ufb01rst one jointly learns two node - and edge -level MaxEnt classi\ufb01ers with stochastic gradient descent and integrates the inference step with loopy belief propagation. The second model is an instance of fully connected pairwise CRFs (FCCRF). The FCCRF model signi\ufb01cantly outperforms all other approaches and yields the best results on the task to date. Crucial elements for its success are the global normalization and an Ising-like edge potential.", "keyphrases": ["comment", "community question answering", "joint learning model"]} +{"id": "iso-etal-2016-forecasting", "title": "Forecasting Word Model: Twitter-based Influenza Surveillance and Prediction", "abstract": "Because of the increasing popularity of social media, much information has been shared on the internet, enabling social media users to understand various real world events. Particularly, social media-based infectious disease surveillance has attracted increasing attention. In this work, we specifically examine influenza: a common topic of communication on social media. The fundamental theory of this work is that several words, such as symptom words (fever, headache, etc.), appear in advance of flu epidemic occurrence. Consequently, past word occurrence can contribute to estimation of the number of current patients. To employ such forecasting words, one can first estimate the optimal time lag for each word based on their cross correlation. Then one can build a linear model consisting of word frequencies at different time points for nowcasting and for forecasting influenza epidemics. Experimentally obtained results (using 7.7 million tweets of August 2012 \u2013 January 2016), the proposed model achieved the best nowcasting performance to date (correlation ratio 0.93) and practically sufficient forecasting performance (correlation ratio 0.91 in 1-week future prediction, and correlation ratio 0.77 in 3-weeks future prediction). This report is the first of the relevant literature to describe a model enabling prediction of future epidemics using Twitter.", "keyphrases": ["influenza", "infectious disease surveillance", "time lag", "nowcasting", "twitter"]} +{"id": "louvan-magnini-2020-simple", "title": "Simple is Better! Lightweight Data Augmentation for Low Resource Slot Filling and Intent Classification", "abstract": "Neural-based models have achieved outstanding performance on slot filling and intent classification, when fairly large in-domain training data are available. However, as new domains are frequently added, creating sizeable data is expensive. We show that lightweight augmentation, a set of augmentation methods involving word span and sentence level operations, alleviates data scarcity problems. Our experiments on limited data settings show that lightweight augmentation yields significant performance improvement on slot filling on the ATIS and SNIPS datasets, and achieves competitive performance with respect to more complex, state-of-the-art, augmentation approaches. Furthermore, lightweight augmentation is also beneficial when combined with pre-trained LM-based models, as it improves BERT-based joint intent and slot filling models.", "keyphrases": ["slot filling", "intent classification", "augmentation method"]} +{"id": "rangarajan-sridhar-etal-2013-segmentation", "title": "Segmentation Strategies for Streaming Speech Translation", "abstract": "The study presented in this work is a first effort at real-time speech translation of TED talks, a compendium of public talks with different speakers addressing a variety of topics. We address the goal of achieving a system that balances translation accuracy and latency. In order to improve ASR performance for our diverse data set, adaptation techniques such as constrained model adaptation and vocal tract length normalization are found to be useful. In order to improve machine translation (MT) performance, techniques that could be employed in real-time such as monotonic and partial translation retention are found to be of use. We also experiment with inserting text segmenters of various types between ASR and MT in a series of real-time translation experiments. Among other results, our experiments demonstrate that a good segmentation is useful, and a novel conjunction-based segmentation strategy improves translation quality nearly as much as other strategies such as comma-based segmentation. It was also found to be important to synchronize various pipeline components in order to minimize latency.", "keyphrases": ["speech translation", "segmentation strategy", "policy"]} +{"id": "bawden-etal-2020-parbleu", "title": "ParBLEU: Augmenting Metrics with Automatic Paraphrases for the WMT'20 Metrics Shared Task", "abstract": "We describe parBLEU, parCHRF++, and parESIM, which augment baseline metrics with automatically generated paraphrases produced by PRISM (Thompson and Post, 2020a), a multilingual neural machine translation system. We build on recent work studying how to improve BLEU by using diverse automatically paraphrased references (Bawden et al., 2020), extending experiments to the multilingual setting for the WMT2020 metrics shared task and for three base metrics. We compare their capacity to exploit up to 100 additional synthetic references. We find that gains are possible when using additional, automatically paraphrased references, although they are not systematic. However, segment-level correlations, particularly into English, are improved for all three metrics and even with higher numbers of paraphrased references.", "keyphrases": ["parbleu", "sentence-level embedding", "hypothesis"]} +{"id": "beinborn-etal-2015-candidate", "title": "Candidate evaluation strategies for improved difficulty prediction of language tests", "abstract": "Language proficiency tests are a useful tool for evaluating learner progress, if the test difficulty fits the level of the learner. In this work, we describe a generalized framework for test difficulty prediction that is applicable to several languages and test types. In addition, we develop two ranking strategies for candidate evaluation inspired by automatic solving methods based on language model probability and semantic relatedness. These ranking strategies lead to significant improvements for the difficulty prediction of cloze tests.", "keyphrases": ["difficulty prediction", "language learning", "comprehension question"]} +{"id": "al-khatib-etal-2020-exploiting", "title": "Exploiting Personal Characteristics of Debaters for Predicting Persuasiveness", "abstract": "Predicting the persuasiveness of arguments has applications as diverse as writing assistance, essay scoring, and advertising. While clearly relevant to the task, the personal characteristics of an argument's source and audience have not yet been fully exploited toward automated persuasiveness prediction. In this paper, we model debaters' prior beliefs, interests, and personality traits based on their previous activity, without dependence on explicit user profiles or questionnaires. Using a dataset of over 60,000 argumentative discussions, comprising more than three million individual posts collected from the subreddit r/ChangeMyView, we demonstrate that our modeling of debater's characteristics enhances the prediction of argument persuasiveness as well as of debaters' resistance to persuasion.", "keyphrases": ["debater", "persuasiveness", "belief"]} +{"id": "hu-etal-2021-word", "title": "Word Graph Guided Summarization for Radiology Findings", "abstract": "Radiology reports play a critical role in communicating medical findings to physicians. In each report, the impression section summarizes essential radiology findings. In clinical practice, writing impression is highly demanded yet time-consuming and prone to errors for radiologists. Therefore, automatic impression generation has emerged as an attractive research direction to facilitate such clinical practice. Existing studies mainly focused on introducing salient word information to the general text summarization framework to guide the selection of the key content in radiology findings. However, for this task, a model needs not only capture the important words in findings but also accurately describe their relations so as to generate high-quality impressions. In this paper, we propose a novel method for automatic impression generation, where a word graph is constructed from the findings to record the critical words and their relations, then a Word Graph guided Summarization model (WGSum) is designed to generate impressions with the help of the word graph. Experimental results on two datasets, OpenI and MIMIC-CXR, confirm the validity and effectiveness of our proposed approach, where the state-of-the-art results are achieved on both datasets. Further experiments are also conducted to analyze the impact of different graph designs to the performance of our method.", "keyphrases": ["radiology finding", "summarization model", "word graph"]} +{"id": "lin-etal-2019-alpacatag", "title": "AlpacaTag: An Active Learning-based Crowd Annotation Framework for Sequence Tagging", "abstract": "We introduce an open-source web-based data annotation framework (AlpacaTag) for sequence tagging tasks such as named-entity recognition (NER). The distinctive advantages of AlpacaTag are three-fold. 1) Active intelligent recommendation: dynamically suggesting annotations and sampling the most informative unlabeled instances with a back-end active learned model; 2) Automatic crowd consolidation: enhancing real-time inter-annotator agreement by merging inconsistent labels from multiple annotators; 3) Real-time model deployment: users can deploy their models in downstream systems while new annotations are being made. AlpacaTag is a comprehensive solution for sequence labeling tasks, ranging from rapid tagging with recommendations powered by active learning and auto-consolidation of crowd annotations to real-time model deployment.", "keyphrases": ["annotator", "sequence tagging", "active learning"]} +{"id": "huang-etal-2005-mining", "title": "Mining Key Phrase Translations from Web Corpora", "abstract": "Key phrases are usually among the most information-bearing linguistic structures. Translating them correctly will improve many natural language processing applications. We propose a new framework to mine key phrase translations from web corpora. We submit a source phrase to a search engine as a query, then expand queries by adding the translations of topic-relevant hint words from the returned snippets. We retrieve mixed-language web pages based on the expanded queries. Finally, we extract the key phrase translation from the second-round returned web page snippets with phonetic, semantic and frequency-distance features. We achieve 46% phrase translation accuracy when using top 10 returned snippets, and 80% accuracy with 165 snippets. Both results are significantly better than several existing methods.", "keyphrases": ["phrase translation", "web corpora", "query", "topic-relevant hint word", "frequency-distance feature"]} +{"id": "pasupat-liang-2014-zero", "title": "Zero-shot Entity Extraction from Web Pages", "abstract": "In order to extract entities of a fine-grained category from semi-structured data in web pages, existing information extraction systems rely on seed examples or redundancy across multiple web pages. In this paper, we consider a new zero-shot learning task of extracting entities specified by a natural language query (in place of seeds) given only a single web page. Our approach defines a log-linear model over latent extraction predicates, which select lists of entities from the web page. The main challenge is to define features on widely varying candidate entity lists. We tackle this by", "keyphrases": ["web page", "semi-structured data", "natural language query", "zero-shot entity extraction"]} +{"id": "beigman-klebanov-beigman-2010-game", "title": "A Game-Theoretic Model of Metaphorical Bargaining", "abstract": "We present a game-theoretic model of bargaining over a metaphor in the context of political communication, find its equilibrium, and use it to rationalize observed linguistic behavior. We argue that game theory is well suited for modeling discourse as a dynamic resulting from a number of conflicting pressures, and suggest applications of interest to computational linguists.", "keyphrases": ["game-theoretic model", "metaphor", "political communication"]} +{"id": "parnell-etal-2021-rewardsofsum", "title": "RewardsOfSum: Exploring Reinforcement Learning Rewards for Summarisation", "abstract": "To date, most abstractive summarisation models have relied on variants of the negative log-likelihood (NLL) as their training objective. In some cases, reinforcement learning has been added to train the models with an objective that is closer to their evaluation measures (e.g. ROUGE). However, the reward function to be used within the reinforcement learning approach can play a key role for performance and is still partially unexplored. For this reason, in this paper, we propose two reward functions for the task of abstractive summarisation: the first function, referred to as RwB-Hinge, dynamically selects the samples for the gradient update. The second function, nicknamed RISK, leverages a small pool of strong candidates to inform the reward. In the experiments, we probe the proposed approach by fine-tuning an NLL pre-trained model over nine summarisation datasets of diverse size and nature. The experimental results show a consistent improvement over the negative log-likelihood baselines.", "keyphrases": ["reinforcement learning", "summarisation", "objective"]} +{"id": "zhang-etal-2021-towards", "title": "Towards Navigation by Reasoning over Spatial Configurations", "abstract": "We deal with the navigation problem where the agent follows natural language instructions while observing the environment. Focusing on language understanding, we show the importance of spatial semantics in grounding navigation instructions into visual perceptions. We propose a neural agent that uses the elements of spatial configurations and investigate their influence on the navigation agent's reasoning ability. Moreover, we model the sequential execution order and align visual objects with spatial configurations in the instruction. Our neural agent improves strong baselines on the seen environments and shows competitive performance on the unseen environments. Additionally, the experimental results demonstrate that explicit modeling of spatial semantic elements in the instructions can improve the grounding and spatial reasoning of the model.", "keyphrases": ["reasoning", "spatial configuration", "semantic structure"]} +{"id": "kwiatkowski-etal-2013-scaling", "title": "Scaling Semantic Parsers with On-the-Fly Ontology Matching", "abstract": "We consider the challenge of learning semantic parsers that scale to large, open-domain problems, such as question answering with Freebase. In such settings, the sentences cover a wide variety of topics and include many phrases whose meaning is difficult to represent in a fixed target ontology. For example, even simple phrases such as \u2018daughter\u2019 and \u2018number of people living in\u2019 cannot be directly represented in Freebase, whose ontology instead encodes facts about gender, parenthood, and population. In this paper, we introduce a new semantic parsing approach that learns to resolve such ontological mismatches. The parser is learned from question-answer pairs, uses a probabilistic CCG to build linguistically motivated logicalform meaning representations, and includes an ontology matching model that adapts the output logical forms for each target ontology. Experiments demonstrate state-of-the-art performance on two benchmark semantic parsing datasets, including a nine point accuracy improvement on a recent Freebase QA corpus.", "keyphrases": ["ontology matching", "open-domain problem", "current semantic parser"]} +{"id": "avramidis-etal-2011-evaluate", "title": "Evaluate with Confidence Estimation: Machine ranking of translation outputs using grammatical features", "abstract": "We present a pilot study on an evaluation method which is able to rank translation outputs with no reference translation, given only their source sentence. The system employs a statistical classifier trained upon existing human rankings, using several features derived from analysis of both the source and the target sentences. Development experiments on one language pair showed that the method has considerably good correlation with human ranking when using features obtained from a PCFG parser.", "keyphrases": ["ranking", "translation output", "parsing-based feature"]} +{"id": "federico-etal-2020-speech", "title": "From Speech-to-Speech Translation to Automatic Dubbing", "abstract": "We present enhancements to a speech-to-speech translation pipeline in order to perform automatic dubbing. Our architecture features neural machine translation generating output of preferred length, prosodic alignment of the translation with the original speech segments, neural text-to-speech with fine tuning of the duration of each utterance, and, finally, audio rendering to enriches text-to-speech output with background noise and reverberation extracted from the original audio. We report and discuss results of a first subjective evaluation of automatic dubbing of excerpts of TED Talks from English into Italian, which measures the perceived naturalness of automatic dubbing and the relative importance of each proposed enhancement.", "keyphrases": ["automatic dubbing", "prosodic alignment", "duration"]} +{"id": "mukherjee-joshi-2013-sentiment", "title": "Sentiment Aggregation using ConceptNet Ontology", "abstract": "Sentiment analysis of reviews traditionally ignored the association between the features of the given product domain. The hierarchical relationship between the features of a product and their associated sentiment that influence the polarity of a review is not dealt with very well. In this work, we analyze the influence of the hierarchical relationship between the product attributes and their sentiments on the overall review polarity. ConceptNet is used to automatically create a product specific ontology that depicts the hierarchical relationship between the product attributes. The ontology tree is annotated with feature-specific polarities which are aggregated bottom-up, exploiting the ontological information, to find the overall review polarity. We propose a weakly supervised system that achieves a reasonable performance improvement over the baseline without requiring any tagged training data.", "keyphrases": ["product", "ontology tree", "feature-specific polarity", "sentiment aggregation"]} +{"id": "moeller-etal-2020-igt2p", "title": "IGT2P: From Interlinear Glossed Texts to Paradigms", "abstract": "An intermediate step in the linguistic analysis of an under-documented language is to find and organize inflected forms that are attested in natural speech. From this data, linguists generate unseen inflected word forms in order to test hypotheses about the language's inflectional patterns and to complete inflectional paradigm tables. To get the data linguists spend many hours manually creating interlinear glossed texts (IGTs). We introduce a new task that speeds this process and automatically generates new morphological resources for natural language processing systems: IGT-to-paradigms (IGT2P). IGT2P generates entire morphological paradigms from IGT input. We show that existing morphological reinflection models can solve the task with 21% to 64% accuracy, depending on the language. We further find that (i) having a language expert spend only a few hours cleaning the noisy IGT data improves performance by as much as 21 percentage points, and (ii) POS tags, which are generally considered a necessary part of NLP morphological reinflection input, have no effect on the accuracy of the models considered here.", "keyphrases": ["interlinear glossed text", "paradigms", "igt"]} +{"id": "huang-etal-2019-matters", "title": "What Matters for Neural Cross-Lingual Named Entity Recognition: An Empirical Analysis", "abstract": "Building named entity recognition (NER) models for languages that do not have much training data is a challenging task. While recent work has shown promising results on cross-lingual transfer from high-resource languages, it is unclear what knowledge is transferred. In this paper, we first propose a simple and efficient neural architecture for cross-lingual NER. Experiments show that our model achieves competitive performance with the state-of-the-art. We further explore how transfer learning works for cross-lingual NER on two transferable factors: sequential order and multilingual embedding. Our results shed light on future research for improving cross-lingual NER.", "keyphrases": ["entity recognition", "cross-lingual transfer", "multilingual embedding"]} +{"id": "michael-etal-2018-crowdsourcing", "title": "Crowdsourcing Question-Answer Meaning Representations", "abstract": "We introduce Question-Answer Meaning Representations (QAMRs), which represent the predicate-argument structure of a sentence as a set of question-answer pairs. We develop a crowdsourcing scheme to show that QAMRs can be labeled with very little training, and gather a dataset with over 5,000 sentences and 100,000 questions. A qualitative analysis demonstrates that the crowd-generated question-answer pairs cover the vast majority of predicate-argument relationships in existing datasets (including PropBank, NomBank, and QA-SRL) along with many previously under-resourced ones, including implicit arguments and relations. We also report baseline models for question generation and answering, and summarize a recent approach for using QAMR labels to improve an Open IE system. These results suggest the freely available QAMR data and annotation scheme should support significant future work.", "keyphrases": ["qamr", "predicate-argument structure", "qa-driven meaning representation", "crowdsourcing"]} +{"id": "wang-etal-2020-joint", "title": "Joint Constrained Learning for Event-Event Relation Extraction", "abstract": "Understanding natural language involves recognizing how multiple event mentions structurally and temporally interact with each other. In this process, one can induce event complexes that organize multi-granular events with temporal order and membership relations interweaving among them. Due to the lack of jointly labeled data for these relational phenomena and the restriction on the structures they articulate, we propose a joint constrained learning framework for modeling event-event relations. Specifically, the framework enforces logical constraints within and across multiple temporal and subevent relations of events by converting these constraints into differentiable learning objectives. We show that our joint constrained learning approach effectively compensates for the lack of jointly labeled data, and outperforms SOTA methods on benchmarks for both temporal relation extraction and event hierarchy construction, replacing a commonly used but more expensive global inference process. We also present a promising case study to show the effectiveness of our approach to inducing event complexes on an external corpus.", "keyphrases": ["event-event relation", "relation extraction", "learning framework"]} +{"id": "van-schijndel-schuler-2013-analysis", "title": "An Analysis of Frequency- and Memory-Based Processing Costs", "abstract": "The frequency of words and syntactic constructions has been observed to have a substantial effect on language processing. This begs the question of what causes certain constructions to be more or less frequent. A theory of grounding (Phillips, 2010) would suggest that cognitive limitations might cause languages to develop frequent constructions in such a way as to avoid processing costs. This paper studies how current theories of working memory fit into theories of language processing and what influence memory limitations may have over reading times. Measures of such limitations are evaluated on eye-tracking data and the results are compared with predictions made by different theories of processing.", "keyphrases": ["processing cost", "reading time", "memory effect"]} +{"id": "visweswariah-etal-2011-word", "title": "A Word Reordering Model for Improved Machine Translation", "abstract": "Preordering of source side sentences has proved to be useful in improving statistical machine translation. Most work has used a parser in the source language along with rules to map the source language word order into the target language word order. The requirement to have a source language parser is a major drawback, which we seek to overcome in this paper. Instead of using a parser and then using rules to order the source side sentence we learn a model that can directly reorder source side sentences to match target word order using a small parallel corpus with high-quality word alignments. Our model learns pairwise costs of a word immediately preceding another word. We use the Lin-Kernighan heuristic to find the best source reordering efficiently during training and testing and show that it suffices to provide good quality reordering. \n \nWe show gains in translation performance based on our reordering model for translating from Hindi to English, Urdu to English (with a public dataset), and English to Hindi. For English to Hindi we show that our technique achieves better performance than a method that uses rules applied to the source side English parse.", "keyphrases": ["parallel corpus", "structural syntactic information", "part-of-speech tag", "arbitrary permutation"]} +{"id": "reimers-etal-2016-temporal", "title": "Temporal Anchoring of Events for the TimeBank Corpus", "abstract": "Today\u2019s extraction of temporal information for events heavily depends on annotated temporal links. These so called TLINKs capture the relation between pairs of event mentions and time expressions. One problem is that the number of possible TLINKs grows quadratic with the number of event mentions, therefore most annotation studies concentrate on links for mentions in the same or in adjacent sentences. However, as our annotation study shows, this restriction results for 58% of the event mentions in a less precise information when the event took place. This paper proposes a new annotation scheme to anchor events in time. Not only is the annotation effort much lower as it scales linear with the number of events, it also gives a more precise anchoring when the events have happened as the complete document can be taken into account. Using this scheme, we annotated a subset of the TimeBank Corpus and compare our re-sults to other annotation schemes. Additionally, we present some baseline experiments to automatically anchor events in time. Our annotation scheme, the automated system and the annotated corpus are publicly available. 1", "keyphrases": ["timebank corpus", "adjacent sentence", "annotation scheme"]} +{"id": "chambers-2012-labeling", "title": "Labeling Documents with Timestamps: Learning from their Time Expressions", "abstract": "Temporal reasoners for document understanding typically assume that a document's creation date is known. Algorithms to ground relative time expressions and order events often rely on this timestamp to assist the learner. Unfortunately, the timestamp is not always known, particularly on the Web. This paper addresses the task of automatic document timestamping, presenting two new models that incorporate rich linguistic features about time. The first is a discriminative classifier with new features extracted from the text's time expressions (e.g., 'since 1999'). This model alone improves on previous generative models by 77%. The second model learns probabilistic constraints between time expressions and the unknown document time. Imposing these learned constraints on the discriminative model further improves its accuracy. Finally, we present a new experiment design that facilitates easier comparison by future work.", "keyphrases": ["time expression", "web", "discriminative model"]} +{"id": "ontanon-etal-2022-making", "title": "Making Transformers Solve Compositional Tasks", "abstract": "Several studies have reported the inability of Transformer models to generalize compositionally, a key type of generalization in many NLP tasks such as semantic parsing. In this paper we explore the design space of Transformer models showing that the inductive biases given to the model by several design decisions significantly impact compositional generalization. We identified Transformer configurations that generalize compositionally significantly better than previously reported in the literature in many compositional tasks. We achieve state-of-the-art results in a semantic parsing compositional generalization benchmark (COGS), and a string edit operation composition benchmark (PCFG).", "keyphrases": ["transformer", "compositional generalization", "cogs"]} +{"id": "zhang-etal-2014-character", "title": "Character-Level Chinese Dependency Parsing", "abstract": "Recent work on Chinese analysis has led to large-scale annotations of the internal structures of words, enabling characterlevel analysis of Chinese syntactic structures. In this paper, we investigate the problem of character-level Chinese dependency parsing, building dependency trees over characters. Character-level information can benefit downstream applications by offering flexible granularities for word segmentation while improving wordlevel dependency parsing accuracies. We present novel adaptations of two major shift-reduce dependency parsing algorithms to character-level parsing. Experimental results on the Chinese Treebank demonstrate improved performances over word-based parsing methods.", "keyphrases": ["chinese dependency parsing", "character", "joint model", "transition-based framework"]} +{"id": "ovrelid-etal-2018-lia", "title": "The LIA Treebank of Spoken Norwegian Dialects", "abstract": "This article presents the LIA treebank of transcribed spoken Norwegian dialects. It consists of dialect recordings made in the period between 1950\u20131990, which have been digitised, transcribed, and subsequently annotated with morphological and dependency-style syntactic analysis as part of the LIA (Language Infrastructure made Accessible) project at the University of Oslo. In this article, we describe the LIA material of dialect recordings and its transcription, transliteration and further morphosyntactic annotation. We focus in particular on the extension of the native NDT annotation scheme to spoken language phenomena, such as pauses and various types of disfluencies, and present the subsequent conversion of the treebank to the Universal Dependencies scheme. The treebank currently consists of 13,608 tokens, distributed over 1396 segments taken from three different dialects of spoken Norwegian. The LIA treebank annotation is an on-going effort and future releases will extend on the current data set.", "keyphrases": ["lia treebank", "norwegian", "dialect", "dependency-style syntactic analysis"]} +{"id": "sennhauser-berwick-2018-evaluating", "title": "Evaluating the Ability of LSTMs to Learn Context-Free Grammars", "abstract": "While long short-term memory (LSTM) neural net architectures are designed to capture sequence information, human language is generally composed of hierarchical structures. This raises the question as to whether LSTMs can learn hierarchical structures. We explore this question with a well-formed bracket prediction task using two types of brackets modeled by an LSTM. Demonstrating that such a system is learnable by an LSTM is the first step in demonstrating that the entire class of CFLs is also learnable. We observe that the model requires exponential memory in terms of the number of characters and embedded depth, where a sub-linear memory should suffice. Still, the model does more than memorize the training input. It learns how to distinguish between relevant and irrelevant information. On the other hand, we also observe that the model does not generalize well. We conclude that LSTMs do not learn the relevant underlying context-free rules, suggesting the good overall performance is attained rather by an efficient way of evaluating nuisance variables. LSTMs are a way to quickly reach good results for many natural language tasks, but to understand and generate natural language one has to investigate other concepts that can make more direct use of natural language's structural nature.", "keyphrases": ["context-free grammar", "dyck language", "position"]} +{"id": "wang-etal-2019-hmeae", "title": "HMEAE: Hierarchical Modular Event Argument Extraction", "abstract": "Existing event extraction methods classify each argument role independently, ignoring the conceptual correlations between different argument roles. In this paper, we propose a Hierarchical Modular Event Argument Extraction (HMEAE) model, to provide effective inductive bias from the concept hierarchy of event argument roles. Specifically, we design a neural module network for each basic unit of the concept hierarchy, and then hierarchically compose relevant unit modules with logical operations into a role-oriented modular network to classify a specific argument role. As many argument roles share the same high-level unit module, their correlation can be utilized to extract specific event arguments better. Experiments on real-world datasets show that HMEAE can effectively leverage useful knowledge from the concept hierarchy and significantly outperform the state-of-the-art baselines. The source code can be obtained from .", "keyphrases": ["concept hierarchy", "event argument role", "hmeae"]} +{"id": "niu-etal-2018-bi", "title": "Bi-Directional Neural Machine Translation with Synthetic Parallel Data", "abstract": "Despite impressive progress in high-resource settings, Neural Machine Translation (NMT) still struggles in low-resource and out-of-domain scenarios, often failing to match the quality of phrase-based translation. We propose a novel technique that combines back-translation and multilingual NMT to improve performance in these difficult cases. Our technique trains a single model for both directions of a language pair, allowing us to back-translate source or target monolingual data without requiring an auxiliary model. We then continue training on the augmented parallel data, enabling a cycle of improvement for a single model that can incorporate any source, target, or parallel data to improve both translation directions. As a byproduct, these models can reduce training and deployment costs significantly compared to uni-directional models. Extensive experiments show that our technique outperforms standard back-translation in low-resource scenarios, improves quality on cross-domain tasks, and effectively reduces costs across the board.", "keyphrases": ["parallel data", "back-translation", "direction"]} +{"id": "cui-etal-2018-deep", "title": "Deep Attentive Sentence Ordering Network", "abstract": "In this paper, we propose a novel deep attentive sentence ordering network (referred as ATTOrderNet) which integrates self-attention mechanism with LSTMs in the encoding of input sentences. It enables us to capture global dependencies among sentences regardless of their input order and obtains a reliable representation of the sentence set. With this representation, a pointer network is exploited to generate an ordered sequence. The proposed model is evaluated on Sentence Ordering and Order Discrimination tasks. The extensive experimental results demonstrate its effectiveness and superiority to the state-of-the-art methods.", "keyphrases": ["attordernet", "self-attention mechanism", "global dependency"]} +{"id": "kirschner-etal-2015-linking", "title": "Linking the Thoughts: Analysis of Argumentation Structures in Scientific Publications", "abstract": "This paper presents the results of an annotation study focused on the fine-grained analysis of argumentation structures in scientific publications. Our new annotation scheme specifies four types of binary argumentative relations between sentences, resulting in the representation of arguments as small graph structures. We developed an annotation tool that supports the annotation of such graphs and carried out an annotation study with four annotators on 24 scientific articles from the domain of educational research. For calculating the inter-annotator agreement, we adapted existing measures and developed a novel graphbased agreement measure which reflects the semantic similarity of different annotation graphs.", "keyphrases": ["argumentative relation", "scientific article", "inter-annotator agreement"]} +{"id": "reichart-rappoport-2007-self", "title": "Self-Training for Enhancement and Domain Adaptation of Statistical Parsers Trained on Small Datasets", "abstract": "Creating large amounts of annotated data to train statistical PCFG parsers is expensive, and the performance of such parsers declines when training and test data are taken from different domains. In this paper we use selftraining in order to improve the quality of a parser and to adapt it to a different domain, using only small amounts of manually annotated seed data. We report significant improvement both when the seed and test data are in the same domain and in the outof-domain adaptation scenario. In particular, we achieve 50% reduction in annotation cost for the in-domain case, yielding an improvement of 66% over previous work, and a 20-33% reduction for the domain adaptation case. This is the first time that self-training with small labeled datasets is applied successfully to these tasks. We were also able to formulate a characterization of when selftraining is valuable.", "keyphrases": ["domain adaptation", "generative parser", "reranker"]} +{"id": "qi-etal-2018-pre", "title": "When and Why Are Pre-Trained Word Embeddings Useful for Neural Machine Translation?", "abstract": "The performance of Neural Machine Translation (NMT) systems often suffers in low-resource scenarios where sufficiently large-scale parallel corpora cannot be obtained. Pre-trained word embeddings have proven to be invaluable for improving performance in natural language analysis tasks, which often suffer from paucity of data. However, their utility for NMT has not been extensively explored. In this work, we perform five sets of experiments that analyze when we can expect pre-trained word embeddings to help in NMT tasks. We show that such embeddings can be surprisingly effective in some cases \u2013 providing gains of up to 20 BLEU points in the most favorable setting.", "keyphrases": ["neural machine translation", "nmt task", "monolingual data"]} +{"id": "koto-etal-2020-liputan6", "title": "Liputan6: A Large-scale Indonesian Dataset for Text Summarization", "abstract": "In this paper, we introduce a large-scale Indonesian summarization dataset. We harvest articles from Liputan6.com, an online news portal, and obtain 215,827 document\u2013summary pairs. We leverage pre-trained language models to develop benchmark extractive and abstractive summarization methods over the dataset with multilingual and monolingual BERT-based models. We include a thorough error analysis by examining machine-generated summaries that have low ROUGE scores, and expose both issues with ROUGE itself, as well as with extractive and abstractive summarization models.", "keyphrases": ["indonesian summarization dataset", "news portal", "liputan6"]} +{"id": "pianta-etal-2008-textpro", "title": "The TextPro Tool Suite", "abstract": "We present TextPro, a suite of modular Natural Language Processing (NLP) tools for analysis of Italian and English texts. The suite has been designed so as to integrate and reuse state of the art NLP components developed by researchers at FBK. The current version of the tool suite provides functions ranging from tokenization to chunking and Named Entity Recognition (NER). The system\u0092s architecture is organized as a pipeline of processors wherein each stage accepts data from an initial input or from an output of a previous stage, executes a specific task, and sends the resulting data to the next stage, or to the output of the pipeline. TextPro performed the best on the task of Italian NER and Italian PoS Tagging at EVALITA 2007. When tested on a number of other standard English benchmarks, TextPro confirms that it performs as state of the art system. Distributions for Linux, Solaris and Windows are available, for both research and commercial purposes. A web-service version of the system is under development.", "keyphrases": ["textpro", "function", "tokenization", "pos tagging"]} +{"id": "rama-singh-2009-bag", "title": "From Bag of Languages to Family Trees From Noisy Corpus", "abstract": "In this paper, we use corpus-based measures for constructing phylogenetic trees and try to address some questions about the validity of doing this and applicability to linguistic areas as against language families. We experiment with four corpus based distance measures for constructing phylogenetic trees. Three of these measures were earlier tried for estimating language distances. We use a fourth measure based on phonetic and orthographic feature n-grams. We compare the trees obtained using these measures and present our observations.", "keyphrases": ["corpus-based measure", "phylogenetic tree", "linguistic area", "n-gram"]} +{"id": "gao-etal-2019-structuring", "title": "Structuring Latent Spaces for Stylized Response Generation", "abstract": "Generating responses in a targeted style is a useful yet challenging task, especially in the absence of parallel data. With limited data, existing methods tend to generate responses that are either less stylized or less context-relevant. We propose StyleFusion, which bridges conversation modeling and non-parallel style transfer by sharing a structured latent space. This structure allows the system to generate stylized relevant responses by sampling in the neighborhood of the conversation model prediction, and continuously control the style level. We demonstrate this method using dialogues from Reddit data and two sets of sentences with distinct styles (arXiv and Sherlock Holmes novels). Automatic and human evaluation show that, without sacrificing appropriateness, the system generates responses of the targeted style and outperforms competitive baselines.", "keyphrases": ["conversation modeling", "sherlock holmes novel", "style intensity", "structured latent space"]} +{"id": "popovic-ney-2009-syntax", "title": "Syntax-Oriented Evaluation Measures for Machine Translation Output", "abstract": "We explored novel automatic evaluation measures for machine translation output oriented to the syntactic structure of the sentence: the Bleu score on the detailed Part-of-Speech (pos) tags as well as the precision, recall and F-measure obtained on pos n-grams. We also introduced F-measure based on both word and pos n-grams. Correlations between the new metrics and human judgments were calculated on the data of the first, second and third shared task of the Statistical Machine Translation Workshop. Machine translation outputs in four different European languages were taken into account: English, Spanish, French and German. The results show that the new measures correlate very well with the human judgements and that they are competitive with the widely used BLEU, METEOR and TER metrics.", "keyphrases": ["machine translation output", "part-of-speech", "pos"]} +{"id": "nicolae-nicolae-2006-bestcut", "title": "BESTCUT: A Graph Algorithm for Coreference Resolution", "abstract": "In this paper we describe a coreference resolution method that employs a classification and a clusterization phase. In a novel way, the clusterization is produced as a graph cutting algorithm, in which nodes of the graph correspond to the mentions of the text, whereas the edges of the graph constitute the confidences derived from the coreference classification. In experiments, the graph cutting algorithm for coreference resolution, called BestCut, achieves state-of-the-art performance.", "keyphrases": ["coreference resolution", "node", "mention", "graph-cut algorithm"]} +{"id": "baldwin-li-2015-depth", "title": "An In-depth Analysis of the Effect of Text Normalization in Social Media", "abstract": "Recent years have seen increased interest in text normalization in social media, as the informal writing styles found in Twitter and other social media data often cause problems for NLP applications. Unfortunately, most current approaches narrowly regard the normalization task as a \u201cone size fits all\u201d task of replacing non-standard words with their standard counterparts. In this work we build a taxonomy of normalization edits and present a study of normalization to examine its effect on three different downstream applications (dependency parsing, named entity recognition, and text-to-speech synthesis). The results suggest that how the normalization task should be viewed is highly dependent on the targeted application. The results also show that normalization must be thought of as more than word replacement in order to produce results comparable to those seen on clean text.", "keyphrases": ["text normalization", "twitter", "social medium data"]} +{"id": "ladhak-etal-2020-exploring", "title": "Exploring Content Selection in Summarization of Novel Chapters", "abstract": "We present a new summarization task, generating summaries of novel chapters using summary/chapter pairs from online study guides. This is a harder task than the news summarization task, given the chapter length as well as the extreme paraphrasing and generalization found in the summaries. We focus on extractive summarization, which requires the creation of a gold-standard set of extractive summaries. We present a new metric for aligning reference summary sentences with chapter sentences to create gold extracts and also experiment with different alignment methods. Our experiments demonstrate significant improvement over prior alignment approaches for our task as shown through automatic metrics and a crowd-sourced pyramid analysis.", "keyphrases": ["summarization", "chapter", "gold-standard set"]} +{"id": "yu-etal-2015-predicting", "title": "Predicting Valence-Arousal Ratings of Words Using a Weighted Graph Method", "abstract": "Compared to the categorical approach that represents affective states as several discrete classes (e.g., positive and negative), the dimensional approach represents affective states as continuous numerical values on multiple dimensions, such as the valence-arousal (VA) space, thus allowing for more fine-grained sentiment analysis. In building dimensional sentiment applications, affective lexicons with valence-arousal ratings are useful resources but are still very rare. Therefore, this study proposes a weighted graph model that considers both the relations of multiple nodes and their similarities as weights to automatically determine the VA ratings of affective words. Experiments on both English and Chinese affective lexicons show that the proposed method yielded a smaller error rate on VA prediction than the linear regression, kernel method, and pagerank algorithm used in previous studies.", "keyphrases": ["valence-arousal rating", "dimension", "sentiment analysis", "weighted graph model"]} +{"id": "bacchiani-etal-2004-language", "title": "Language Model Adaptation with MAP Estimation and the Perceptron Algorithm", "abstract": "In this paper, we contrast two language model adaptation approaches: MAP estimation and the perceptron algorithm. Used in isolation, we show that MAP estimation outperforms the latter approach, for reasons which argue for combining the two approaches. When combined, the resulting system provides a 0.7 percent absolute reduction in word error rate over MAP estimation alone. In addition, we demonstrate that, in a multi-pass recognition scenario, it is better to use the perceptron algorithm on early pass word lattices, since the improved error rate improves acoustic model adaptation.", "keyphrases": ["map estimation", "perceptron algorithm", "adaptation data", "error-driven learning approach", "background model"]} +{"id": "wang-etal-2020-learning-decouple", "title": "Learning to Decouple Relations: Few-Shot Relation Classification with Entity-Guided Attention and Confusion-Aware Training", "abstract": "This paper aims to enhance the few-shot relation classification especially for sentences that jointly describe multiple relations. Due to the fact that some relations usually keep high co-occurrence in the same context, previous few-shot relation classifiers struggle to distinguish them with few annotated instances. To alleviate the above relation confusion problem, we propose CTEG, a model equipped with two novel mechanisms to learn to decouple these easily-confused relations. On the one hand, an Entity -Guided Attention (EGA) mechanism, which leverages the syntactic relations and relative positions between each word and the specified entity pair, is introduced to guide the attention to filter out information causing confusion. On the other hand, a Confusion-Aware Training (CAT) method is proposed to explicitly learn to distinguish relations by playing a pushing-away game between classifying a sentence into a true relation and its confusing relation. Extensive experiments are conducted on the FewRel dataset, and the results show that our proposed model achieves comparable and even much better results to strong baselines in terms of accuracy. Furthermore, the ablation test and case study verify the effectiveness of our proposed EGA and CAT, especially in addressing the relation confusion problem.", "keyphrases": ["few-shot relation classification", "confusion-aware training", "relation confusion problem"]} +{"id": "troles-schmid-2021-extending", "title": "Extending Challenge Sets to Uncover Gender Bias in Machine Translation: Impact of Stereotypical Verbs and Adjectives", "abstract": "Human gender bias is reflected in language and text production. Because state-of-the-art machine translation (MT) systems are trained on large corpora of text, mostly generated by humans, gender bias can also be found in MT. For instance when occupations are translated from a language like English, which mostly uses gender neutral words, to a language like German, which mostly uses a feminine and a masculine version for an occupation, a decision must be made by the MT System. Recent research showed that MT systems are biased towards stereotypical translation of occupations. In 2019 the first, and so far only, challenge set, explicitly designed to measure the extent of gender bias in MT systems has been published. In this set measurement of gender bias is solely based on the translation of occupations. With our paper we present an extension of this challenge set, called WiBeMT, which adds gender-biased adjectives and sentences with gender-biased verbs. The resulting challenge set consists of over 70, 000 sentences and has been translated with three commercial MT systems: DeepL Translator, Microsoft Translator, and Google Translate. Results show a gender bias for all three MT systems. This gender bias is to a great extent significantly influenced by adjectives and to a lesser extent by verbs.", "keyphrases": ["gender bias", "machine translation", "adjective"]} +{"id": "ahmed-butt-2011-discovering", "title": "Discovering Semantic Classes for Urdu N-V Complex Predicates", "abstract": "This paper reports on an exploratory investigation as to whether classes of Urdu N-V complex predicates can be identified on the basis syntactic patterns and lexical choices associated with the N-V complex predicates. Working with data from a POS annotated corpus, we show that choices with respect to the number of arguments, case marking on subjects and which light verbs are felicitous with which nouns depend heavily on the semantics of the noun in the N-V complex predicate. This initial work represents an important step towards identifying semantic criteria relevant for complex predicate formation. Identifying the semantic criteria and being able to systematically code them in turn represents a first step towards building up a lexical resource for nouns as part of developing natural language processing tools for the underresourced South Asian language Urdu.", "keyphrases": ["complex predicate", "language urdu", "semantic compatibility"]} +{"id": "saunders-etal-2020-neural", "title": "Neural Machine Translation Doesn't Translate Gender Coreference Right Unless You Make It", "abstract": "Neural Machine Translation (NMT) has been shown to struggle with grammatical gender that is dependent on the gender of human referents, which can cause gender bias effects. Many existing approaches to this problem seek to control gender inflection in the target language by explicitly or implicitly adding a gender feature to the source sentence, usually at the sentence level. In this paper we propose schemes for incorporating explicit word-level gender inflection tags into NMT. We explore the potential of this gender-inflection controlled translation when the gender feature can be determined from a human reference, or when a test sentence can be automatically gender-tagged, assessing on English-to-Spanish and English-to-German translation. We find that simple existing approaches can over-generalize a gender-feature to multiple entities in a sentence, and suggest effective alternatives in the form of tagged coreference adaptation data. We also propose an extension to assess translations of gender-neutral entities from English given a corresponding linguistic convention, such as a non-binary inflection, in the target language.", "keyphrases": ["source sentence", "gender inflection tag", "neural machine translation"]} +{"id": "farajian-etal-2017-neural", "title": "Neural vs. Phrase-Based Machine Translation in a Multi-Domain Scenario", "abstract": "State-of-the-art neural machine translation (NMT) systems are generally trained on specific domains by carefully selecting the training sets and applying proper domain adaptation techniques. In this paper we consider the real world scenario in which the target domain is not predefined, hence the system should be able to translate text from multiple domains. We compare the performance of a generic NMT system and phrase-based statistical machine translation (PBMT) system by training them on a generic parallel corpus composed of data from different domains. Our results on multi-domain English-French data show that, in these realistic conditions, PBMT outperforms its neural counterpart. This raises the question: is NMT ready for deployment as a generic/multi-purpose MT backbone in real-world settings?", "keyphrases": ["machine translation", "neural counterpart", "multi-domain data"]} +{"id": "vafa-etal-2020-text", "title": "Text-Based Ideal Points", "abstract": "Ideal point models analyze lawmakers' votes to quantify their political positions, or ideal points. But votes are not the only way to express a political position. Lawmakers also give speeches, release press statements, and post tweets. In this paper, we introduce the text-based ideal point model (TBIP), an unsupervised probabilistic topic model that analyzes texts to quantify the political positions of its authors. We demonstrate the TBIP with two types of politicized text data: U.S. Senate speeches and senator tweets. Though the model does not analyze their votes or political affiliations, the TBIP separates lawmakers by party, learns interpretable politicized topics, and infers ideal points close to the classical vote-based ideal points. One benefit of analyzing texts, as opposed to votes, is that the TBIP can estimate ideal points of anyone who authors political texts, including non-voting actors. To this end, we use it to study tweets from the 2020 Democratic presidential candidates. Using only the texts of their tweets, it identifies them along an interpretable progressive-to-moderate spectrum.", "keyphrases": ["point model", "only way", "tbip", "topic model", "text-based ideal point"]} +{"id": "ruff-etal-2019-self", "title": "Self-Attentive, Multi-Context One-Class Classification for Unsupervised Anomaly Detection on Text", "abstract": "There exist few text-specific methods for unsupervised anomaly detection, and for those that do exist, none utilize pre-trained models for distributed vector representations of words. In this paper we introduce a new anomaly detection method\u2014Context Vector Data Description (CVDD)\u2014which builds upon word embedding models to learn multiple sentence representations that capture multiple semantic contexts via the self-attention mechanism. Modeling multiple contexts enables us to perform contextual anomaly detection of sentences and phrases with respect to the multiple themes and concepts present in an unlabeled text corpus. These contexts in combination with the self-attention weights make our method highly interpretable. We demonstrate the effectiveness of CVDD quantitatively as well as qualitatively on the well-known Reuters, 20 Newsgroups, and IMDB Movie Reviews datasets.", "keyphrases": ["unsupervised anomaly detection", "multiple sentence representation", "self-attention mechanism"]} +{"id": "gu-etal-2018-multimodal", "title": "Multimodal Affective Analysis Using Hierarchical Attention Strategy with Word-Level Alignment", "abstract": "Multimodal affective computing, learning to recognize and interpret human affect and subjective information from multiple data sources, is still a challenge because: (i) it is hard to extract informative features to represent human affects from heterogeneous inputs; (ii) current fusion strategies only fuse different modalities at abstract levels, ignoring time-dependent interactions between modalities. Addressing such issues, we introduce a hierarchical multimodal architecture with attention and word-level fusion to classify utterance-level sentiment and emotion from text and audio data. Our introduced model outperforms state-of-the-art approaches on published datasets, and we demonstrate that our model is able to visualize and interpret synchronized attention over modalities.", "keyphrases": ["word-level alignment", "hierarchical multimodal architecture", "emotion"]} +{"id": "yang-etal-2009-combining", "title": "Combining a Two-step Conditional Random Field Model and a Joint Source Channel Model for Machine Transliteration", "abstract": "This paper describes our system for \"NEWS 2009 Machine Transliteration Shared Task\" (NEWS 2009). We only participated in the standard run, which is a direct orthographical mapping (DOP) between two languages without using any intermediate phonemic mapping. We propose a new two-step conditional random field (CRF) model for DOP machine transliteration, in which the first CRF segments a source word into chunks and the second CRF maps the chunks to a word in the target language. The two-step CRF model obtains a slightly lower top-1 accuracy when compared to a state-of-the-art n-gram joint source-channel model. The combination of the CRF model with the joint source-channel leads to improvements in all the tasks. The official result of our system in the NEWS 2009 shared task confirms the effectiveness of our system; where we achieved 0.627 top-1 accuracy for Japanese transliterated to Japanese Kanji(JJ), 0.713 for English-to-Chinese(E2C) and 0.510 for English-to-Japanese Katakana(E2J).", "keyphrases": ["machine transliteration", "orthographical mapping", "source word"]} +{"id": "berard-etal-2019-machine", "title": "Machine Translation of Restaurant Reviews: New Corpus for Domain Adaptation and Robustness", "abstract": "We share a French-English parallel corpus of Foursquare restaurant reviews, and define a new task to encourage research on Neural Machine Translation robustness and domain adaptation, in a real-world scenario where better-quality MT would be greatly beneficial. We discuss the challenges of such user-generated content, and train good baseline models that build upon the latest techniques for MT robustness. We also perform an extensive evaluation (automatic and human) that shows significant improvements over existing online systems. Finally, we propose task-specific metrics based on sentiment analysis or translation accuracy of domain-specific polysemous words.", "keyphrases": ["restaurant review", "domain adaptation", "online system"]} +{"id": "suzuki-2011-automatic", "title": "Automatic Post-Editing based on SMT and its selective application by Sentence-Level Automatic Quality Evaluation", "abstract": "In the computing assisted translation process with machine translation (MT), postediting costs time and efforts on the part of human. To solve this problem, some have attempted to automate post editing. Post-editing isn\u2019t always necessary, however, when MT outputs are of adequate quality for human. This means that we need to be able to estimate the translation quality of each translated sentence to determine whether post-editing should be performed. While conventional automatic metrics such as BLEU, NIST and METEOR, require the golden standards (references), for wider applications we need to establish methods that can estimate the quality of translations without references. This paper presents a sentence-level automatic quality evaluator, composed of an SMT phrase-based automatic post-editing (APE) module and a confidence estimator characterized by PLS (Partial Least Squares) regression analysis. It is known that this model is a better model for predicting output variable than a normal multiple regression analysis when the multicollinearity exists between the input variables. Experiments with Japanese to English patent translations show the validity of the proposed methods.", "keyphrases": ["automatic post-editing", "spe", "domain adaptation"]} +{"id": "bojar-etal-2011-grain", "title": "A Grain of Salt for the WMT Manual Evaluation", "abstract": "The Workshop on Statistical Machine Translation (WMT) has become one of ACL's flagship workshops, held annually since 2006. In addition to soliciting papers from the research community, WMT also features a shared translation task for evaluating MT systems. This shared task is notable for having manual evaluation as its cornerstone. The Workshop's overview paper, playing a descriptive and administrative role, reports the main results of the evaluation without delving deep into analyzing those results. The aim of this paper is to investigate and explain some interesting idiosyncrasies in the reported results, which only become apparent when performing a more thorough analysis of the collected annotations. Our analysis sheds some light on how the reported results should (and should not) be interpreted, and also gives rise to some helpful recommendation for the organizers of WMT.", "keyphrases": ["workshop", "statistical machine translation", "wmt evaluation", "discrepancy"]} +{"id": "artetxe-etal-2019-effective", "title": "An Effective Approach to Unsupervised Machine Translation", "abstract": "While machine translation has traditionally relied on large amounts of parallel corpora, a recent research line has managed to train both Neural Machine Translation (NMT) and Statistical Machine Translation (SMT) systems using monolingual corpora only. In this paper, we identify and address several deficiencies of existing unsupervised SMT approaches by exploiting subword information, developing a theoretically well founded unsupervised tuning method, and incorporating a joint refinement procedure. Moreover, we use our improved SMT system to initialize a dual NMT model, which is further fine-tuned through on-the-fly back-translation. Together, we obtain large improvements over the previous state-of-the-art in unsupervised machine translation. For instance, we get 22.5 BLEU points in English-to-German WMT 2014, 5.5 points more than the previous best unsupervised system, and 0.5 points more than the (supervised) shared task winner back in 2014.", "keyphrases": ["unsupervised machine translation", "large amount", "unmt model", "bli"]} +{"id": "liu-etal-2018-parsing", "title": "Parsing Tweets into Universal Dependencies", "abstract": "We study the problem of analyzing tweets with universal dependencies (UD). We extend the UD guidelines to cover special constructions in tweets that affect tokenization, part-of-speech tagging, and labeled dependencies. Using the extended guidelines, we create a new tweet treebank for English (Tweebank v2) that is four times larger than the (unlabeled) Tweebank v1 introduced by Kong et al. (2014). We characterize the disagreements between our annotators and show that it is challenging to deliver consistent annotation due to ambiguity in understanding and explaining tweets. Nonetheless, using the new treebank, we build a pipeline system to parse raw tweets into UD. To overcome the annotation noise without sacrificing computational efficiency, we propose a new method to distill an ensemble of 20 transition-based parsers into a single one. Our parser achieves an improvement of 2.2 in LAS over the un-ensembled baseline and outperforms parsers that are state-of-the-art on other treebanks in both accuracy and speed.", "keyphrases": ["universal dependency", "guideline", "treebank"]} +{"id": "nicholson-baldwin-2006-interpretation", "title": "Interpretation of Compound Nominalisations using Corpus and Web Statistics", "abstract": "We present two novel paraphrase tests for automatically predicting the inherent semantic relation of a given compound nominalisation as one of subject, direct object, or prepositional object. We compare these to the usual verb-argument paraphrase test using corpus statistics, and frequencies obtained by scraping the Google search engine interface. We also implemented a more robust statistical measure than maximum likelihood estimation --- the confidence interval. A significant reduction in data sparseness was achieved, but this alone is insufficient to provide a substantial performance improvement.", "keyphrases": ["distributional semantic", "non-literal usage", "few token-based approach"]} +{"id": "poliak-etal-2018-evaluation", "title": "On the Evaluation of Semantic Phenomena in Neural Machine Translation Using Natural Language Inference", "abstract": "We propose a process for investigating the extent to which sentence representations arising from neural machine translation (NMT) systems encode distinct semantic phenomena. We use these representations as features to train a natural language inference (NLI) classifier based on datasets recast from existing semantic annotations. In applying this process to a representative NMT system, we find its encoder appears most suited to supporting inferences at the syntax-semantics interface, as compared to anaphora resolution requiring world knowledge. We conclude with a discussion on the merits and potential deficiencies of the existing process, and how it may be improved and extended as a broader framework for evaluating semantic coverage", "keyphrases": ["natural language inference", "nli", "semantic phenomenon"]} +{"id": "furstenau-lapata-2012-semi", "title": "Semi-Supervised Semantic Role Labeling via Structural Alignment", "abstract": "Large-scale annotated corpora are a prerequisite to developing high-performance semantic role labeling systems. Unfortunately, such corpora are expensive to produce, limited in size, and may not be representative. Our work aims to reduce the annotation effort involved in creating resources for semantic role labeling via semi-supervised learning. The key idea of our approach is to find novel instances for classifier training based on their similarity to manually labeled seed instances. The underlying assumption is that sentences that are similar in their lexical material and syntactic structure are likely to share a frame semantic analysis. We formalize the detection of similar sentences and the projection of role annotations as a graph alignment problem, which we solve exactly using integer linear programming. Experimental results on semantic role labeling show that the automatic annotations produced by our method improve performance over using hand-labeled instances alone.", "keyphrases": ["novel instance", "classifi training", "seed instance", "fu\u0308rstenau", "annotation projection"]} +{"id": "zhang-etal-2018-sentence", "title": "Sentence-State LSTM for Text Representation", "abstract": "Bi-directional LSTMs are a powerful tool for text representation. On the other hand, they have been shown to suffer various limitations due to their sequential nature. We investigate an alternative LSTM structure for encoding text, which consists of a parallel state for each word. Recurrent steps are used to perform local and global information exchange between words simultaneously, rather than incremental reading of a sequence of words. Results on various classification and sequence labelling benchmarks show that the proposed model has strong representation power, giving highly competitive performances compared to stacked BiLSTM models with similar parameter numbers.", "keyphrases": ["text representation", "information exchange", "reading", "graph recurrent network"]} +{"id": "peng-etal-2015-solving", "title": "Solving Hard Coreference Problems", "abstract": "Coreference resolution is a key problem in natural language understanding that still escapes reliable solutions. One fundamental difficulty has been that of resolving instances involving pronouns since they often require deep language understanding and use of background knowledge. In this paper we propose an algorithmic solution that involves a new representation for the knowledge required to address hard coreference problems, along with a constrained optimization framework that uses this knowledge in coreference decision making. Our representation, Predicate Schemas, is instantiated with knowledge acquired in an unsupervised way, and is compiled automatically into constraints that impact the coreference decision. We present a general coreference resolution system that significantly improves state-of-the-art performance on hard, Winograd-style, pronoun resolution cases, while still performing at the stateof-the-art level on standard coreference resolution datasets.", "keyphrases": ["hard coreference problem", "predicate schemas", "knowledge basis", "antecedent"]} +{"id": "yin-etal-2016-neural", "title": "Neural Enquirer: Learning to Query Tables in Natural Language", "abstract": "We propose NEURAL ENQUIRER -- a neural network architecture for answering natural language (NL) questions based on a knowledge base (KB) table. Unlike existing work on end-to-end training of semantic parsers [Pasupat and Liang, 2015; Neelakantan et al., 2015], NEURAL ENQUIRER is fully \"neuralized\": it finds distributed representations of queries and KB tables, and executes queries through a series of neural network components called \"executors\". Executors model query operations and compute intermediate execution results in the form of table annotations at different levels. NEURAL ENQUIRER can be trained with gradient descent, with which the representations of queries and the KB table are jointly optimized with the query execution logic. The training can be done in an end-to-end fashion, and it can also be carried out with stronger guidance, e.g., step-by-step supervision for complex queries. NEURAL ENQUIRER is one step towards building neural network systems that can understand natural language in real-world tasks. As a proof-of-concept, we conduct experiments on a synthetic QA task, and demonstrate that the model can learn to execute reasonably complex NL queries on small-scale KB tables.", "keyphrases": ["query", "semantic parser", "neural enquirer"]} +{"id": "bollmann-2013-pos", "title": "POS Tagging for Historical Texts with Sparse Training Data", "abstract": "This paper presents a method for part-ofspeech tagging of historical data and evaluates it on texts from different corpora of historical German (15th\u201318th century). Spelling normalization is used to preprocess the texts before applying a POS tagger trained on modern German corpora. Using only 250 manually normalized tokens as training data, the tagging accuracy of a manuscript from the 15th century can be raised from 28.65% to 74.89%.", "keyphrases": ["historical text", "spelling normalization", "pos tagging"]} +{"id": "lacroix-2019-dependency", "title": "Dependency Parsing as Sequence Labeling with Head-Based Encoding and Multi-Task Learning", "abstract": "Dependency parsing as sequence labeling has recently proved to be a relevant alternative to the traditional transitionand graph-based approaches. It offers a good trade-off between parsing accuracy and speed. However, recent work on dependency parsing as sequence labeling ignore the pre-processing time of Part-of-Speech tagging \u2013 which is required for this task \u2013 in the evaluation of speed while other studies showed that Part-of-Speech tags are not essential to achieve state-ofthe-art parsing scores. In this paper, we compare the accuracy and speed of shared and stacked multi-task learning strategies \u2013 as well as a strategy that combines both \u2013 to learn Part-of-Speech tagging and dependency parsing in a single sequence labeling pipeline. In addition, we propose an alternative encoding of the dependencies as labels which does not use Part-of-Speech tags and improves dependency parsing accuracy for most of the languages we evaluate.", "keyphrases": ["alternative encoding", "dependency parsing", "pos tag"]} +{"id": "hsi-etal-2016-leveraging", "title": "Leveraging Multilingual Training for Limited Resource Event Extraction", "abstract": "Event extraction has become one of the most important topics in information extraction, but to date, there is very limited work on leveraging cross-lingual training to boost performance. We propose a new event extraction approach that trains on multiple languages using a combination of both language-dependent and language-independent features, with particular focus on the case where target domain training data is of very limited size. We show empirically that multilingual training can boost performance for the tasks of event trigger extraction and event argument extraction on the Chinese ACE 2005 dataset.", "keyphrases": ["multilingual training", "event extraction", "language-independent feature"]} +{"id": "morante-etal-2009-joint", "title": "Joint Memory-Based Learning of Syntactic and Semantic Dependencies in Multiple Languages", "abstract": "In this paper we present a system submitted to the CoNLL Shared Task 2009 performing the identification and labeling of syntactic and semantic dependencies in multiple languages. Dependencies are truly jointly learned, i.e. as if they were a single task. The system works in two phases: a classification phase in which three classifiers predict different types of information, and a ranking phase in which the output of the classifiers is combined.", "keyphrases": ["multiple language", "syntactic parsing", "joint learning"]} +{"id": "pitler-etal-2010-using", "title": "Using Web-scale N-grams to Improve Base NP Parsing Performance", "abstract": "We use web-scale N-grams in a base NP parser that correctly analyzes 95.4% of the base NPs in natural text. Web-scale data improves performance. That is, there is no data like more data. Performance scales log-linearly with the number of parameters in the model (the number of unique N-grams). The web-scale N-grams are particularly helpful in harder cases, such as NPs that contain conjunctions.", "keyphrases": ["web-scale n-grams", "noun phrase", "supervised task"]} +{"id": "konkol-2016-uwb", "title": "UWB at SemEval-2016 Task 11: Exploring Features for Complex Word Identification", "abstract": "In this paper, we present our system developed for the SemEval 2016 Task 11: Complex Word Identification. Our team achieved the 3rd place among 21 participants. Our systems ranked 4th and 13th among 42 submitted systems. We proposed multiple features suitable for complex word identification, evaluated them, and discussed their properties. According to the results of our experiments, our final system used maximum entropy classifier with a single feature \u2013 document frequency.", "keyphrases": ["complex word identification", "document frequency", "wikipedia"]} +{"id": "bicici-2013-feature", "title": "Feature Decay Algorithms for Fast Deployment of Accurate Statistical Machine Translation Systems", "abstract": "We use feature decay algorithms (FDA) for fast deployment of accurate statistical machine translation systems taking only about half a day for each translation direction. We develop parallel FDA for solving computational scalability problems caused by the abundance of training data for SMT models and LM models and still achieve SMT performance that is on par with using all of the training data or better. Parallel FDA runs separate FDA models on randomized subsets of the training data and combines the instance selections later. Parallel FDA can also be used for selecting the LM corpus based on the training set selected by parallel FDA. The high quality of the selected training data allows us to obtain very accurate translation outputs close to the top performing SMT systems. The relevancy of the selected LM corpus can reach up to 86% reduction in the number of OOV tokens and up to 74% reduction in the perplexity. We perform SMT experiments in all language pairs in the WMT13 translation task and obtain SMT performance close to the top systems using significantly less resources for training and development.", "keyphrases": ["fast deployment", "fda", "feature decay algorithm"]} +{"id": "van-der-wees-etal-2016-simple", "title": "A Simple but Effective Approach to Improve Arabizi-to-English Statistical Machine Translation", "abstract": "A major challenge for statistical machine translation (SMT) of Arabic-to-English user-generated text is the prevalence of text written in Arabizi, or Romanized Arabic. When facing such texts, a translation system trained on conventional Arabic-English data will suffer from extremely low model coverage. In addition, Arabizi is not regulated by any official standardization and therefore highly ambiguous, which prevents rule-based approaches from achieving good translation results. In this paper, we improve Arabizi-to-English machine translation by presenting a simple but effective Arabizi-to-Arabic transliteration pipeline that does not require knowledge by experts or native Arabic speakers. We incorporate this pipeline into a phrase-based SMT system, and show that translation quality after automatically transliterating Arabizi to Arabic yields results that are comparable to those achieved after human transliteration.", "keyphrases": ["statistical machine translation", "arabizi", "arabizi-to-arabic transliteration pipeline"]} +{"id": "wang-etal-2013-financial", "title": "Financial Sentiment Analysis for Risk Prediction", "abstract": "This paper attempts to identify the importance of sentiment words in financial reports on financial risk. By using a financespecific sentiment lexicon, we apply regression and ranking techniques to analyze the relations between sentiment words and financial risk. The experimental results show that, based on the bag-of-words model, models trained on sentiment words only result in comparable performance to those on origin texts, which confirms the importance of financial sentiment words on risk prediction. Furthermore, the learned models suggest strong correlations between financial sentiment words and risk of companies. As a result, these findings are of great value for providing us more insight and understanding into the impact of financial sentiment words in financial reports.", "keyphrases": ["risk prediction", "report", "volatility"]} +{"id": "faralli-etal-2020-multiple", "title": "Multiple Knowledge GraphDB (MKGDB)", "abstract": "We present MKGDB, a large-scale graph database created as a combination of multiple taxonomy backbones extracted from 5 existing knowledge graphs, namely: ConceptNet, DBpedia, WebIsAGraph, WordNet and the Wikipedia category hierarchy. MKGDB, thanks the versatility of the Neo4j graph database manager technology, is intended to favour and help the development of open-domain natural language processing applications relying on knowledge bases, such as information extraction, hypernymy discovery, topic clustering, and others. Our resource consists of a large hypernymy graph which counts more than 37 million nodes and more than 81 million hypernymy relations.", "keyphrases": ["mkgdb", "dbpedia", "knowledge basis"]} +{"id": "chan-etal-2019-rapid", "title": "Rapid Customization for Event Extraction", "abstract": "Extracting events in the form of who is involved in what at when and where from text, is one of the core information extraction tasks that has many applications such as web search and question answering. We present a system for rapidly customizing event extraction capability to find new event types (what happened) and their arguments (who, when, and where). To enable extracting events of new types, we develop a novel approach to allow a user to find, expand and filter event triggers by exploring an unannotated development corpus. The system will then generate mention level event annotation automatically and train a neural network model for finding the corresponding events. To enable extracting arguments for new event types, the system makes novel use of the ACE annotation dataset to train a generic argument attachment model for extracting Actor, Place, and Time. We demonstrate that with less than 10 minutes of human effort per event type, the system achieves good performance for 67 novel event types. Experiments also show that the generic argument attachment model performs well on the novel event types. Our system (code, UI, documentation, demonstration video) is released as open source.", "keyphrases": ["event extraction", "new type", "leverage annotation", "keyword"]} +{"id": "huang-etal-2009-bilingually", "title": "Bilingually-Constrained (Monolingual) Shift-Reduce Parsing", "abstract": "Jointly parsing two languages has been shown to improve accuracies on either or both sides. However, its search space is much bigger than the monolingual case, forcing existing approaches to employ complicated modeling and crude approximations. Here we propose a much simpler alternative, bilingually-constrained monolingual parsing, where a source-language parser learns to exploit reorderings as additional observation, but not bothering to build the target-side tree as well. We show specifically how to enhance a shift-reduce dependency parser with alignment features to resolve shift-reduce conflicts. Experiments on the bilingual portion of Chinese Treebank show that, with just 3 bilingual features, we can improve parsing accuracies by 0.6% (absolute) for both English and Chinese over a state-of-the-art baseline, with negligible (~6%) efficiency overhead, thus much faster than biparsing.", "keyphrases": ["shift-reduce parsing", "source-language parser", "shift-reduce conflict", "bilingual constraint"]} +{"id": "ebner-etal-2020-multi", "title": "Multi-Sentence Argument Linking", "abstract": "We present a novel document-level model for finding argument spans that fill an event's roles, connecting related ideas in sentence-level semantic role labeling and coreference resolution. Because existing datasets for cross-sentence linking are small, development of our neural model is supported through the creation of a new resource, Roles Across Multiple Sentences (RAMS), which contains 9,124 annotated events across 139 types. We demonstrate strong performance of our model on RAMS and other event-related datasets.", "keyphrases": ["argument linking", "multiple sentences", "trigger", "eae"]} +{"id": "qin-etal-2020-feature", "title": "Feature Projection for Improved Text Classification", "abstract": "In classification, there are usually some good features that are indicative of class labels. For example, in sentiment classification, words like good and nice are indicative of the positive sentiment and words like bad and terrible are indicative of the negative sentiment. However, there are also many common features (e.g., words) that are not indicative of any specific class (e.g., voice and screen, which are common to both sentiment classes and are not discriminative for classification). Although deep learning has made significant progresses in generating discriminative features through its powerful representation learning, we believe there is still room for improvement. In this paper, we propose a novel angle to further improve this representation learning, i.e., feature projection. This method projects existing features into the orthogonal space of the common features. The resulting projection is thus perpendicular to the common features and more discriminative for classification. We apply this new method to improve CNN, RNN, Transformer, and Bert based text classification and obtain markedly better results.", "keyphrases": ["projection", "sentiment classification", "bert"]} +{"id": "brooks-2006-unsupervised", "title": "Unsupervised Grammar Induction by Distribution and Attachment", "abstract": "Distributional approaches to grammar induction are typically inefficient, enumerating large numbers of candidate constituents. In this paper, we describe a simplified model of distributional analysis which uses heuristics to reduce the number of candidate constituents under consideration. We apply this model to a large corpus of over 400000 words of written English, and evaluate the results using EVALB. We show that the performance of this approach is limited, providing a detailed analysis of learned structure and a comparison with actual constituent-context distributions. This motivates a more structured approach, using a process of attachment to form constituents from their distributional components. Our findings suggest that distributional methods do not generalize enough to learn syntax effectively from raw text, but that attachment methods are more successful.", "keyphrases": ["attachment", "distributional approach", "constituent"]} +{"id": "kantor-etal-2019-learning", "title": "Learning to combine Grammatical Error Corrections", "abstract": "The field of Grammatical Error Correction (GEC) has produced various systems to deal with focused phenomena or general text editing. We propose an automatic way to combine black-box systems. Our method automatically detects the strength of a system or the combination of several systems per error type, improving precision and recall while optimizing F-score directly. We show consistent improvement over the best standalone system in all the configurations tested. This approach also outperforms average ensembling of different RNN models with random initializations. In addition, we analyze the use of BERT for GEC - reporting promising results on this end. We also present a spellchecker created for this task which outperforms standard spellcheckers tested on the task of spellchecking. This paper describes a system submission to Building Educational Applications 2019 Shared Task: Grammatical Error Correction. Combining the output of top BEA 2019 shared task systems using our approach, currently holds the highest reported score in the open phase of the BEA 2019 shared task, improving F-0.5 score by 3.7 points over the best result reported.", "keyphrases": ["recall", "bert", "spellchecker"]} +{"id": "virpioja-etal-2011-empirical", "title": "Empirical Comparison of Evaluation Methods for Unsupervised Learning of Morphology", "abstract": "Unsupervised and semi-supervised learning of morphology provide p ractical solu- tions for processing morphologically rich languages with less human labor th the traditional rule-based analyzers. Direct evaluation of the learning methods using lin guistic reference anal- yses is important for their development, as evaluation through the final app lications is often time consuming. However, even linguistic evaluation is not straightforward for fu ll morphological analysis, because the morpheme labels generated by the learning method can be arbitrary. We review the previous evaluation methods for the learning tasks and propose new variations. In order to compare the methods, we perform an extensive meta-evaluation u sing the large collec- tion of results from the Morpho Challenge competitions. RESUME. L'apprentissage non supervise et semi-supervise de la morphologie fo urnit des so- lutions pratiques pour le traitement des langues morphologiquement riche s et requiert une intervention humaine reduite comparee aux analyseurs traditionnels base s sur des regles. L'evaluation directe des methodes d'apprentissage utilisant des analyse s de reference linguis- tique est importante pour leur developpement, puisque l'evaluation par lesapplications fi- nales prend generalement beaucoup de temps. Cependant, meme l'ev aluation linguistique n'est pas simple pour l'analyse morphologique complete, car les identifiants de m orphemes gene- res par la methode d'apprentissage peuvent se reveler arbitraires. N ous passons en revue les methodes d'evaluation existantes pour les t\u00e2ches d'apprentissage et pr oposons de nouvelles va- riations. Afin de comparer les methodes, nous effectuons une vaste meta -evaluation a l'aide de l'importante base de resultats provenant des competitions Morpho Challen ge.", "keyphrases": ["evaluation method", "morphology", "segmentation algorithm"]} +{"id": "sumita-sugaya-2006-word", "title": "Word Pronunciation Disambiguation using the Web", "abstract": "This paper proposes an automatic method of reading proper names with multiple pronunciations. First, the method obtains Web pages that include both the proper name and its pronunciation. Second, the method feeds them to the learner for classification. The current accuracy is around 90% for open data.", "keyphrases": ["pronunciation disambiguation", "web", "proper name"]} +{"id": "hough-purver-2014-strongly", "title": "Strongly Incremental Repair Detection", "abstract": "We present STIR (STrongly Incremental Repair detection), a system that detects speech repairs and edit terms on transcripts incrementally with minimal latency. STIR uses information-theoretic measures from n-gram models as its principal decision features in a pipeline of classifiers detecting the different stages of repairs. Results on the Switchboard disfluency tagged corpus show utterance-final accuracy on a par with state-of-the-art incremental repair detection methods, but with better incremental accuracy, faster time-to-detection and less computational overhead. We evaluate its performance using incremental metrics and propose new repair processing evaluation standards.", "keyphrases": ["repair", "pipeline", "good decision", "assumption", "word-by-word data"]} +{"id": "kobbe-etal-2020-unsupervised", "title": "Unsupervised stance detection for arguments from consequences", "abstract": "Social media platforms have become an essential venue for online deliberation where users discuss arguments, debate, and form opinions. In this paper, we propose an unsupervised method to detect the stance of argumentative claims with respect to a topic. Most related work focuses on topic-specific supervised models that need to be trained for every emergent debate topic. To address this limitation, we propose a topic independent approach that focuses on a frequently encountered class of arguments, specifically, on arguments from consequences. We do this by extracting the effects that claims refer to, and proposing a means for inferring if the effect is a good or bad consequence. Our experiments provide promising results that are comparable to, and in particular regards even outperform BERT. Furthermore, we publish a novel dataset of arguments relating to consequences, annotated with Amazon Mechanical Turk.", "keyphrases": ["consequence", "opinion", "unsupervised method"]} +{"id": "buck-2012-black", "title": "Black Box Features for the WMT 2012 Quality Estimation Shared Task", "abstract": "In this paper we introduce a number of new features for quality estimation in machine translation that were developed for the WMT 2012 quality estimation shared task. We find that very simple features such as indicators of certain characters are able to outperform complex features that aim to model the connection between two languages.", "keyphrases": ["wmt", "non-linear learning method", "different type"]} +{"id": "sakaguchi-etal-2014-efficient", "title": "Efficient Elicitation of Annotations for Human Evaluation of Machine Translation", "abstract": "A main output of the annual Workshop on Statistical Machine Translation (WMT) is a ranking of the systems that participated in its shared translation tasks, produced by aggregating pairwise sentencelevel comparisons collected from human judges. Over the past few years, there have been a number of tweaks to the aggregation formula in attempts to address issues arising from the inherent ambiguity and subjectivity of the task, as well as weaknesses in the proposed models and the manner of model selection. We continue this line of work by adapting the TrueSkill TM algorithm \u2014 an online approach for modeling the relative skills of players in ongoing competitions, such as Microsoft\u2019s Xbox Live \u2014 to the human evaluation of machine translation output. Our experimental results show that TrueSkill outperforms other recently proposed models on accuracy, and also can significantly reduce the number of pairwise annotations that need to be collected by sampling non-uniformly from the space of system competitions.", "keyphrases": ["human evaluation", "workshop", "statistical machine translation"]} +{"id": "hossain-schwitter-2018-specifying", "title": "Specifying Conceptual Models Using Restricted Natural Language", "abstract": "The key activity to design an information system is conceptual modelling which brings out and describes the general knowledge that is required to build a system. In this paper we propose a novel approach to conceptual modelling where the domain experts will be able to specify and construct a model using a restricted form of natural language. A restricted natural language is a subset of a natural language that has well-defined computational properties and therefore can be translated unambiguously into a formal notation. We will argue that a restricted natural language is suitable for writing precise and consistent specifications that lead to executable conceptual models. Using a restricted natural language will allow the domain experts to describe a scenario in the terminology of the application domain without the need to formally encode this scenario. The resulting textual specification can then be automatically translated into the language of the desired conceptual modelling framework.", "keyphrases": ["conceptual modelling", "restricted natural language", "domain expert", "specification"]} +{"id": "beigman-klebanov-etal-2017-continuous", "title": "Continuous fluency tracking and the challenges of varying text complexity", "abstract": "This paper is a preliminary report on using text complexity measurement in the service of a new educational application. We describe a reading intervention where a child takes turns reading a book aloud with a virtual reading partner. Our ultimate goal is to provide meaningful feedback to the parent or the teacher by continuously tracking the child's improvement in reading fluency. We show that this would not be a simple endeavor, due to an intricate relationship between text complexity from the point of view of comprehension and reading rate.", "keyphrases": ["fluency", "text complexity", "reading rate"]} +{"id": "nie-etal-2020-learn", "title": "What Can We Learn from Collective Human Opinions on Natural Language Inference Data?", "abstract": "Despite the subjective nature of many NLP tasks, most NLU evaluations have focused on using the majority label with presumably high agreement as the ground truth. Less attention has been paid to the distribution of human opinions. We collect ChaosNLI, a dataset with a total of 464,500 annotations to study Collective HumAn OpinionS in oft-used NLI evaluation sets. This dataset is created by collecting 100 annotations per example for 3,113 examples in SNLI and MNLI and 1,532 examples in \u03b1NLI. Analysis reveals that: (1) high human disagreement exists in a noticeable amount of examples in these datasets; (2) the state-of-the-art models lack the ability to recover the distribution over human labels; (3) models achieve near-perfect accuracy on the subset of data with a high level of human agreement, whereas they can barely beat a random guess on the data with low levels of human agreement, which compose most of the common errors made by state-of-the-art models on the evaluation sets. This questions the validity of improving model performance on old metrics for the low-agreement part of evaluation datasets. Hence, we argue for a detailed examination of human agreement in future data collection efforts, and evaluating model outputs against the distribution over collective human opinions.", "keyphrases": ["collective human opinions", "natural language inference", "nli", "annotation artifact"]} +{"id": "fournier-2013-evaluating", "title": "Evaluating Text Segmentation using Boundary Edit Distance", "abstract": "This work proposes a new segmentation evaluation metric, named boundary similarity (B), an inter-coder agreement coefficient adaptation, and a confusion-matrix for segmentation that are all based upon an adaptation of the boundary edit distance in Fournier and Inkpen (2012). Existing segmentation metrics such as Pk, WindowDiff, and Segmentation Similarity (S) are all able to award partial credit for near misses between boundaries, but are biased towards segmentations containing few or tightly clustered boundaries. Despite S\u2019s improvements, its normalization also produces cosmetically high values that overestimate agreement & performance, leading this work to propose a solution.", "keyphrases": ["text segmentation", "edit distance", "agreement", "near miss"]} +{"id": "majumder-etal-2018-iarm", "title": "IARM: Inter-Aspect Relation Modeling with Memory Networks in Aspect-Based Sentiment Analysis", "abstract": "Sentiment analysis has immense implications in e-commerce through user feedback mining. Aspect-based sentiment analysis takes this one step further by enabling businesses to extract aspect specific sentimental information. In this paper, we present a novel approach of incorporating the neighboring aspects related information into the sentiment classification of the target aspect using memory networks. We show that our method outperforms the state of the art by 1.6% on average in two distinct domains: restaurant and laptop.", "keyphrases": ["sentiment analysis", "aspect-aware sentence representation", "aspect term"]} +{"id": "pasca-van-durme-2008-weakly", "title": "Weakly-Supervised Acquisition of Open-Domain Classes and Class Attributes from Web Documents and Query Logs", "abstract": "A new approach to large-scale information extraction exploits both Web documents and query logs to acquire thousands of opendomain classes of instances, along with relevant sets of open-domain class attributes at precision levels previously obtained only on small-scale, manually-assembled classes.", "keyphrases": ["class attribute", "web document", "concept acquisition method"]} +{"id": "watanabe-etal-2006-left", "title": "Left-to-Right Target Generation for Hierarchical Phrase-Based Translation", "abstract": "We present a hierarchical phrase-based statistical machine translation in which a target sentence is efficiently generated in left-to-right order. The model is a class of synchronous-CFG with a Greibach Normal Form-like structure for the projected production rule: The paired target-side of a production rule takes a phrase prefixed form. The decoder for the target-normalized form is based on an Early-style top down parser on the source side. The target-normalized form coupled with our top down parser implies a left-to-right generation of translations which enables us a straightforward integration with ngram language models. Our model was experimented on a Japanese-to-English newswire translation task, and showed statistically significant performance improvements against a phrase-based translation system.", "keyphrases": ["target sentence", "left-to-right order", "manner"]} +{"id": "mou-etal-2015-discriminative", "title": "Discriminative Neural Sentence Modeling by Tree-Based Convolution", "abstract": "This paper proposes a tree-based convolutional neural network (TBCNN) for discriminative sentence modeling. Our models leverage either constituency trees or dependency trees of sentences. The tree-based convolution process extracts sentences' structural features, and these features are aggregated by max pooling. Such architecture allows short propagation paths between the output layer and underlying feature detectors, which enables effective structural feature learning and extraction. We evaluate our models on two tasks: sentiment analysis and question classification. In both experiments, TBCNN outperforms previous state-of-the-art results, including existing neural networks and dedicated feature/rule engineering. We also make efforts to visualize the tree-based convolution process, shedding light on how our models work.", "keyphrases": ["tree-based convolution", "dependency tree", "question classification"]} +{"id": "wu-etal-2020-similarity", "title": "Similarity Analysis of Contextual Word Representation Models", "abstract": "This paper investigates contextual word representation models from the lens of similarity analysis. Given a collection of trained models, we measure the similarity of their internal representations and attention. Critically, these models come from vastly different architectures. We use existing and novel similarity measures that aim to gauge the level of localization of information in the deep models, and facilitate the investigation of which design factors affect model similarity, without requiring any external linguistic annotation. The analysis reveals that models within the same family are more similar to one another, as may be expected. Surprisingly, different architectures have rather similar representations, but different individual neurons. We also observed differences in information localization in lower and higher layers and found that higher layers are more affected by fine-tuning on downstream tasks.", "keyphrases": ["internal representation", "similarity analysis", "cka"]} +{"id": "kim-etal-2015-new", "title": "New Transfer Learning Techniques for Disparate Label Sets", "abstract": "In natural language understanding (NLU), a user utterance can be labeled differently depending on the domain or application (e.g., weather vs. calendar). Standard domain adaptation techniques are not directly applicable to take advantage of the existing annotations because they assume that the label set is invariant. We propose a solution based on label embeddings induced from canonical correlation analysis (CCA) that reduces the problem to a standard domain adaptation task and allows use of a number of transfer learning techniques. We also introduce a new transfer learning technique based on pretraining of hidden-unit CRFs (HUCRFs). We perform extensive experiments on slot tagging on eight personal digital assistant domains and demonstrate that the proposed methods are superior to strong baselines.", "keyphrases": ["language understanding", "canonical correlation analysis", "label space"]} +{"id": "huang-etal-2021-document", "title": "Document-level Entity-based Extraction as Template Generation", "abstract": "Document-level entity-based extraction (EE), aiming at extracting entity-centric information such as entity roles and entity relations, is key to automatic knowledge acquisition from text corpora for various domains. Most document-level EE systems build extractive models, which struggle to model long-term dependencies among entities at the document level. To address this issue, we propose a generative framework for two document-level EE tasks: role-filler entity extraction (REE) and relation extraction (RE). We first formulate them as a template generation problem, allowing models to efficiently capture cross-entity dependencies, exploit label semantics, and avoid the exponential computation complexity of identifying N-ary relations. A novel cross-attention guided copy mechanism, TopK Copy, is incorporated into a pre-trained sequence-to-sequence model to enhance the capabilities of identifying key information in the input document. Experiments done on the MUC-4 and SciREX dataset show new state-of-the-art results on REE (+3.26%), binary RE (+4.8%), and 4-ary RE (+2.7%) in F1 score.", "keyphrases": ["extraction", "template generation problem", "cross-attention"]} +{"id": "duma-klein-2013-generating", "title": "Generating Natural Language from Linked Data: Unsupervised template extraction", "abstract": "We propose an architecture for generating natural language from Linked Data that automatically learns sentence templates and statistical document planning from parallel RDF datasets and text. We have built a proof-of-concept system (LOD-DEF) trained on un-annotated text from the Simple English Wikipedia and RDF triples from DBpedia, focusing exclusively on factual, non-temporal information. The goal of the system is to generate short descriptions, equivalent to Wikipedia stubs, of entities found in Linked Datasets. We have evaluated the LOD-DEF system against a simple generate-from-triples baseline and human-generated output. In evaluation by humans, LOD-DEF significantly outperforms the baseline on two of three measures: non-redundancy and structure and coherence.", "keyphrases": ["linked data", "rdf", "natural-language description"]} +{"id": "stanovsky-etal-2015-open", "title": "Open IE as an Intermediate Structure for Semantic Tasks", "abstract": "Semantic applications typically extract information from intermediate structures derived from sentences, such as dependency parse or semantic role labeling. In this paper, we study Open Information Extraction\u2019s (Open IE) output as an additional intermediate structure and find that for tasks such as text comprehension, word similarity and word analogy it can be very effective. Specifically, for word analogy, Open IE-based embeddings surpass the state of the art. We suggest that semantic applications will likely benefit from adding Open IE format to their set of potential sentencelevel structures.", "keyphrases": ["intermediate structure", "open information extraction", "different semantic task"]} +{"id": "adams-stymne-2017-learning", "title": "Learning with learner corpora: Using the TLE for native language identification", "abstract": "This study investigates the usefulness of the Treebank of Learner English (TLE) when applied to the task of Native Language Identification (NLI). The TLE is effectively a parallel corpus of Standar ...", "keyphrases": ["native language identification", "learner corpus", "example retrieval system"]} +{"id": "katiyar-cardie-2018-nested", "title": "Nested Named Entity Recognition Revisited", "abstract": "We propose a novel recurrent neural network-based approach to simultaneously handle nested named entity recognition and nested entity mention detection. The model learns a hypergraph representation for nested entities using features extracted from a recurrent neural network. In evaluations on three standard data sets, we show that our approach significantly outperforms existing state-of-the-art methods, which are feature-based. The approach is also efficient: it operates linearly in the number of tokens and the number of possible output labels at any token. Finally, we present an extension of our model that jointly learns the head of each entity mention.", "keyphrases": ["recurrent neural network", "extension", "hypergraph-based representation", "bilou tagging scheme"]} +{"id": "morante-daelemans-2009-learning", "title": "Learning the Scope of Hedge Cues in Biomedical Texts", "abstract": "Identifying hedged information in biomedical literature is an important subtask in information extraction because it would be misleading to extract speculative information as factual information. In this paper we present a machine learning system that finds the scope of hedge cues in biomedical texts. The system is based on a similar system that finds the scope of negation cues. We show that the same scope finding approach can be applied to both negation and hedging. To investigate the robustness of the approach, the system is tested on the three subcorpora of the BioScope corpus that represent different text types.", "keyphrases": ["biomedical text", "negation cue", "shallow syntactic feature"]} +{"id": "he-etal-2010-maximum", "title": "Maximum Entropy Based Phrase Reordering for Hierarchical Phrase-Based Translation", "abstract": "Hierarchical phrase-based (HPB) translation provides a powerful mechanism to capture both short and long distance phrase reorderings. However, the phrase reorderings lack of contextual information in conventional HPB systems. This paper proposes a context-dependent phrase reordering approach that uses the maximum entropy (MaxEnt) model to help the HPB decoder select appropriate reordering patterns. We classify translation rules into several reordering patterns, and build a MaxEnt model for each pattern based on various contextual features. We integrate the MaxEnt models into the HPB model. Experimental results show that our approach achieves significant improvements over a standard HPB system on large-scale translation tasks. On Chinese-to-English translation, the absolute improvements in BLEU (case-insensitive) range from 1.2 to 2.1.", "keyphrases": ["hierarchical phrase-based translation", "soft constraint modeling", "rule pattern", "different classifier"]} +{"id": "wachsmuth-etal-2018-retrieval", "title": "Retrieval of the Best Counterargument without Prior Topic Knowledge", "abstract": "Given any argument on any controversial topic, how to counter it? This question implies the challenging retrieval task of finding the best counterargument. Since prior knowledge of a topic cannot be expected in general, we hypothesize the best counterargument to invoke the same aspects as the argument while having the opposite stance. To operationalize our hypothesis, we simultaneously model the similarity and dissimilarity of pairs of arguments, based on the words and embeddings of the arguments' premises and conclusions. A salient property of our model is its independence from the topic at hand, i.e., it applies to arbitrary arguments. We evaluate different model variations on millions of argument pairs derived from the web portal idebate.org. Systematic ranking experiments suggest that our hypothesis is true for many arguments: For 7.6 candidates with opposing stance on average, we rank the best counterargument highest with 60% accuracy. Even among all 2801 test set pairs as candidates, we still find the best one about every third time.", "keyphrases": ["counterargument", "stance", "retrieval"]} +{"id": "li-etal-2019-choosing", "title": "Choosing between Long and Short Word Forms in Mandarin", "abstract": "Between 80% and 90% of all Chinese words have long and short form such as \u8001\u864e/\u864e (lao-hu/hu , tiger) (Duanmu:2013). Consequently, the choice between long and short forms is a key problem for lexical choice across NLP and NLG. Following an earlier work on abbreviations in English (Mahowald et al, 2013), we bring a probabilistic perspective to these questions, using both a behavioral and a corpus-based approach. We hypothesized that there is a higher probability of choosing short form in supportive context than in neutral context in Mandarin. Consistent with our prediction, our findings revealed that predictability of contexts makes effect on speakers' long and short form choice.", "keyphrases": ["mandarin", "short form", "corpus-based approach"]} +{"id": "liu-etal-2021-cross", "title": "Cross Attention Augmented Transducer Networks for Simultaneous Translation", "abstract": "This paper proposes a novel architecture, Cross Attention Augmented Transducer (CAAT), for simultaneous translation. The framework aims to jointly optimize the policy and translation models. To effectively consider all possible READ-WRITE simultaneous translation action paths, we adapt the online automatic speech recognition (ASR) model, RNN-T, but remove the strong monotonic constraint, which is critical for the translation task to consider reordering. To make CAAT work, we introduce a novel latency loss whose expectation can be optimized by a forward-backward algorithm. We implement CAAT with Transformer while the general CAAT architecture can also be implemented with other attention-based encoder-decoder frameworks. Experiments on both speech-to-text (S2T) and text-to-text (T2T) simultaneous translation tasks show that CAAT achieves significantly better latency-quality trade-offs compared to the state-of-the-art simultaneous translation approaches.", "keyphrases": ["transducer", "simultaneous translation", "cross-attention"]} +{"id": "milne-etal-2016-clpsych", "title": "CLPsych 2016 Shared Task: Triaging content in online peer-support forums", "abstract": "This paper introduces a new shared task for the text mining community. It aims to directly support the moderators of a youth mental health forum by asking participants to automatically triage posts into one of four severity labels: green , amber , red or crisis . The task attracted 60 submissions from 15 different teams, the best of whom achieve scores well above baselines. Their approaches and results provide valuable insights to enable moderators of peer support forums to react quickly to the most urgent, concerning content.", "keyphrases": ["participant", "peer support forum", "social medium"]} +{"id": "he-toutanova-2009-joint", "title": "Joint Optimization for Machine Translation System Combination", "abstract": "System combination has emerged as a powerful method for machine translation (MT). This paper pursues a joint optimization strategy for combining outputs from multiple MT systems, where word alignment, ordering, and lexical selection decisions are made jointly according to a set of feature functions combined in a single log-linear model. The decoding algorithm is described in detail and a set of new features that support this joint decoding approach is proposed. The approach is evaluated in comparison to state-of-the-art confusion-network-based system combination methods using equivalent features and shown to outperform them significantly.", "keyphrases": ["system combination", "log-linear model", "flexible word order", "skeleton"]} +{"id": "albrecht-hwa-2007-examination", "title": "A Re-examination of Machine Learning Approaches for Sentence-Level MT Evaluation", "abstract": "Recent studies suggest that machine learning can be applied to develop good automatic evaluation metrics for machine translated sentences. This paper further analyzes aspects of learning that impact performance. We argue that previously proposed approaches of training a HumanLikeness classifier is not as well correlated with human judgments of translation quality, but that regression-based learning produces more reliable metrics. We demonstrate the feasibility of regression-based metrics through empirical analysis of learning curves and generalization studies and show that they can achieve higher correlations with human judgments than standard automatic metrics.", "keyphrases": ["evaluation metric", "regression algorithm", "state-of-the-art correlation"]} +{"id": "das-bandyopadhyay-2010-identifying", "title": "Identifying Emotional Expressions, Intensities and Sentence Level Emotion Tags Using a Supervised Framework", "abstract": "The present work deals with the extraction of emotional expressions and tagging of English blog sentences with Ekman\u2019s six basic emotion tags and any of the three intensities: low, medium and high. Baseline system is developed based on WordNet Affect lists and dependency relations. Support Vector Machine (SVM) based supervised framework is employed by incorporating different word and context level features. The feature analysis is carried out on 358 development sentences followed by Information Gain Based Pruning. Application of admissible tag sequences and a class-splitting technique improves the system\u2019s performance and reduces the label bias problem of SVM. The supervised system outperforms the baseline system and achieves average F-Scores of 82.72%, 76.74% and 89.21% for emotional expressions, sentential emotion tags and intensities respectively on 565 gold standard test sentences. A comparative evaluation shows that sentential emotion tagging based on emotional expressions, intensities and context features bridges the gap of identifying sentential emotion depending only on words.", "keyphrases": ["intensity", "supervised framework", "basic emotion tag"]} +{"id": "chinkina-etal-2016-online", "title": "Online Information Retrieval for Language Learning", "abstract": "The reading material used in a language learning classroom should ideally be rich in terms of the grammatical constructions and vocabulary to be taught and in line with the learner\u2019s interests. We developed an online Information Retrieval system that helps teachers search for texts appropriate in form, content, and reading level. It identifies the 87 grammatical constructions spelled out in the official English language curriculum of schools in Baden-Wurttemberg, Germany. The tool incorporates a classical efficient algorithm for reranking the results by assigning weights to selected constructions and prioritizing the documents containing them. Supplemented by an interactive visualization module, it allows for a multifaceted presentation and analysis of the retrieved documents.", "keyphrases": ["reading material", "grammatical construction", "learner", "information retrieval system", "language curriculum"]} +{"id": "zhang-bowman-2018-language", "title": "Language Modeling Teaches You More than Translation Does: Lessons Learned Through Auxiliary Syntactic Task Analysis", "abstract": "Recently, researchers have found that deep LSTMs trained on tasks like machine translation learn substantial syntactic and semantic information about their input sentences, including part-of-speech. These findings begin to shed light on why pretrained representations, like ELMo and CoVe, are so beneficial for neural language understanding models. We still, though, do not yet have a clear understanding of how the choice of pretraining objective affects the type of linguistic information that models learn. With this in mind, we compare four objectives\u2014language modeling, translation, skip-thought, and autoencoding\u2014on their ability to induce syntactic and part-of-speech information, holding constant the quantity and genre of the training data, as well as the LSTM architecture.", "keyphrases": ["objective", "part-of-speech information", "language model", "pre-trained model"]} +{"id": "silberer-lapata-2012-grounded", "title": "Grounded Models of Semantic Representation", "abstract": "A popular tradition of studying semantic representation has been driven by the assumption that word meaning can be learned from the linguistic environment, despite ample evidence suggesting that language is grounded in perception and action. In this paper we present a comparative study of models that represent word meaning based on linguistic and perceptual data. Linguistic information is approximated by naturally occurring corpora and sensorimotor experience by feature norms (i.e., attributes native speakers consider important in describing the meaning of a word). The models differ in terms of the mechanisms by which they integrate the two modalities. Experimental results show that a closer correspondence to human data can be obtained by uncovering latent information shared among the textual and perceptual modalities rather than arriving at semantic knowledge by concatenating the two.", "keyphrases": ["semantic representation", "sensorimotor experience", "modality", "compositionality"]} +{"id": "church-etal-2007-compressing", "title": "Compressing Trigram Language Models With Golomb Coding", "abstract": "Trigram language models are compressed using a Golomb coding method inspired by the original Unix spell program. Compression methods trade off space, time and accuracy (loss). The proposed HashTBO method optimizes space at the expense of time and accuracy. Trigram language models are normally considered memory hogs, but with HashTBO, it is possible to squeeze a trigram language model into a few megabytes or less. HashTBO made it possible to ship a trigram contextual speller in Microsoft Office 2007.", "keyphrases": ["language model", "golomb coding", "microsoft office"]} +{"id": "roller-etal-2013-un", "title": "The (Un)expected Effects of Applying Standard Cleansing Models to Human Ratings on Compositionality", "abstract": "Human ratings are an important source for evaluating computational models that predict compositionality, but like many data sets of human semantic judgements, are often fraught with uncertainty and noise. However, despite their importance, to our knowledge there has been no extensive look at the effects of cleansing methods on human rating data. This paper assesses two standard cleansing approaches on two sets of compositionality ratings for German noun-noun compounds, in their ability to produce compositionality ratings of higher consistency, while reducing data quantity. We find (i) that our ratings are highly robust against aggressive filtering; (ii) Z-score filtering fails to detect unreliable item ratings; and (iii) Minimum Subject Agreement is highly effective at detecting unreliable subjects.", "keyphrases": ["compositionality", "crowdsourcing", "judgment"]} +{"id": "xu-etal-2020-self", "title": "Self-Attention Guided Copy Mechanism for Abstractive Summarization", "abstract": "Copy module has been widely equipped in the recent abstractive summarization models, which facilitates the decoder to extract words from the source into the summary. Generally, the encoder-decoder attention is served as the copy distribution, while how to guarantee that important words in the source are copied remains a challenge. In this work, we propose a Transformer-based model to enhance the copy mechanism. Specifically, we identify the importance of each source word based on the degree centrality with a directed graph built by the self-attention layer in the Transformer. We use the centrality of each source word to guide the copy process explicitly. Experimental results show that the self-attention graph provides useful guidance for the copy distribution. Our proposed models significantly outperform the baseline methods on the CNN/Daily Mail dataset and the Gigaword dataset.", "keyphrases": ["copy mechanism", "abstractive summarization", "centrality"]} +{"id": "jurgens-etal-2019-just", "title": "A Just and Comprehensive Strategy for Using NLP to Address Online Abuse", "abstract": "Online abusive behavior affects millions and the NLP community has attempted to mitigate this problem by developing technologies to detect abuse. However, current methods have largely focused on a narrow definition of abuse to detriment of victims who seek both validation and solutions. In this position paper, we argue that the community needs to make three substantive changes: (1) expanding our scope of problems to tackle both more subtle and more serious forms of abuse, (2) developing proactive technologies that counter or inhibit abuse before it harms, and (3) reframing our effort within a framework of justice to promote healthy communities.", "keyphrases": ["online abuse", "abusive behavior", "victim"]} +{"id": "gittens-etal-2017-skip", "title": "Skip-Gram \u2212 Zipf + Uniform = Vector Additivity", "abstract": "In recent years word-embedding models have gained great popularity due to their remarkable performance on several tasks, including word analogy questions and caption generation. An unexpected \u201cside-effect\u201d of such models is that their vectors often exhibit compositionality, i.e., addingtwo word-vectors results in a vector that is only a small angle away from the vector of a word representing the semantic composite of the original words, e.g., \u201cman\u201d + \u201croyal\u201d = \u201cking\u201d. This work provides a theoretical justification for the presence of additive compositionality in word vectors learned using the Skip-Gram model. In particular, it shows that additive compositionality holds in an even stricter sense (small distance rather than small angle) under certain assumptions on the process generating the corpus. As a corollary, it explains the success of vector calculus in solving word analogies. When these assumptions do not hold, this work describes the correct non-linear composition operator. Finally, this work establishes a connection between the Skip-Gram model and the Sufficient Dimensionality Reduction (SDR) framework of Globerson and Tishby: the parameters of SDR models can be obtained from those of Skip-Gram models simply by adding information on symbol frequencies. This shows that Skip-Gram embeddings are optimal in the sense of Globerson and Tishby and, further, implies that the heuristics commonly used to approximately fit Skip-Gram models can be used to fit SDR models.", "keyphrases": ["additive compositionality", "assumption", "skip-gram", "theoretical perspective"]} +{"id": "duh-etal-2011-generalized", "title": "Generalized Minimum Bayes Risk System Combination", "abstract": "Minimum Bayes Risk (MBR) has been used as a decision rule for both singlesystem decoding and system combination in machine translation. For system combination, we argue that common MBR implementations are actually not correct, since probabilities in the hypothesis space cannot be reliably estimated. These implementations achieve the effect of consensus decoding (which may be beneficial in its own right), but does not reduce Bayes Risk in the true Bayesian sense. We introduce Generalized MBR, which parameterizes the loss function in MBR and allows it to be optimized in the given hypothesis space of multiple systems. This extension better approximates the true Bayes Risk decision rule and empirically improves over MBR, even in cases where the combined systems are of mixed quality.", "keyphrases": ["minimum bayes risk", "system combination", "mbr", "loss function", "optimal sentence"]} +{"id": "kutuzov-giulianelli-2020-uio", "title": "UiO-UvA at SemEval-2020 Task 1: Contextualised Embeddings for Lexical Semantic Change Detection", "abstract": "We apply contextualised word embeddings to lexical semantic change detection in the SemEval-2020 Shared Task 1. This paper focuses on Subtask 2, ranking words by the degree of their semantic drift over time. We analyse the performance of two contextualising architectures (BERT and ELMo) and three change detection algorithms. We find that the most effective algorithms rely on the cosine similarity between averaged token embeddings and the pairwise distances between token embeddings. They outperform strong baselines by a large margin (in the post-evaluation phase, we have the best Subtask 2 submission for SemEval-2020 Task 1), but interestingly, the choice of a particular algorithm depends on the distribution of gold scores in the test set.", "keyphrases": ["semeval-2020 task", "semantic change detection", "word embedding"]} +{"id": "chi-etal-2021-improving", "title": "Improving Pretrained Cross-Lingual Language Models via Self-Labeled Word Alignment", "abstract": "The cross-lingual language models are typically pretrained with masked language modeling on multilingual text or parallel sentences. In this paper, we introduce denoising word alignment as a new cross-lingual pre-training task. Specifically, the model first self-label word alignments for parallel sentences. Then we randomly mask tokens in a bitext pair. Given a masked token, the model uses a pointer network to predict the aligned token in the other language. We alternately perform the above two steps in an expectation-maximization manner. Experimental results show that our method improves cross-lingual transferability on various datasets, especially on the token-level tasks, such as question answering, and structured prediction. Moreover, the model can serve as a pretrained word aligner, which achieves reasonably low error rate on the alignment benchmarks. The code and pretrained parameters are available at github.com/CZWin32768/XLM-Align.", "keyphrases": ["cross-lingual language model", "pre-training task", "token-level alignment", "xlm-align"]} +{"id": "hacioglu-2004-semantic", "title": "Semantic Role Labeling Using Dependency Trees", "abstract": "In this paper, a novel semantic role labeler based on dependency trees is developed. This is accomplished by formulating the semantic role labeling as a classification problem of dependency relations into one of several semantic roles. A dependency tree is created from a constituency parse of an input sentence. The dependency tree is then linearized into a sequence of dependency relations. A number of features are extracted for each dependency relation using a predefined linguistic context. Finally, the features are input to a set of one-versus-all support vector machine (SVM) classifiers to determine the corresponding semantic role label. We report results on CoNLL2004 shared task data using the representation and scoring scheme adopted for that task.", "keyphrases": ["classification problem", "srl", "semantic role", "proposition"]} +{"id": "macavaney-etal-2018-rsdd", "title": "RSDD-Time: Temporal Annotation of Self-Reported Mental Health Diagnoses", "abstract": "Self-reported diagnosis statements have been widely employed in studying language related to mental health in social media. However, existing research has largely ignored the temporality of mental health diagnoses. In this work, we introduce RSDD-Time: a new dataset of 598 manually annotated self-reported depression diagnosis posts from Reddit that include temporal information about the diagnosis. Annotations include whether a mental health condition is present and how recently the diagnosis happened. Furthermore, we include exact temporal spans that relate to the date of diagnosis. This information is valuable for various computational methods to examine mental health through social media because one's mental health state is not static. We also test several baseline classification and extraction approaches, which suggest that extracting temporal information from self-reported diagnosis statements is challenging.", "keyphrases": ["diagnosis", "mental health condition", "rsdd-time"]} +{"id": "yu-etal-2017-improved", "title": "Improved Neural Relation Detection for Knowledge Base Question Answering", "abstract": "Relation detection is a core component of many NLP applications including Knowledge Base Question Answering (KBQA). In this paper, we propose a hierarchical recurrent neural network enhanced by residual learning which detects KB relations given an input question. Our method uses deep residual bidirectional LSTMs to compare questions and relation names via different levels of abstraction. Additionally, we propose a simple KBQA system that integrates entity linking and our proposed relation detector to make the two components enhance each other. Our experimental results show that our approach not only achieves outstanding relation detection performance, but more importantly, it helps our KBQA system achieve state-of-the-art accuracy for both single-relation (SimpleQuestions) and multi-relation (WebQSP) QA benchmarks.", "keyphrases": ["relation detection", "bidirectional lstm", "entity linking"]} +{"id": "berg-kirkpatrick-etal-2010-painless", "title": "Painless Unsupervised Learning with Features", "abstract": "We show how features can easily be added to standard generative models for unsupervised learning, without requiring complex new training methods. In particular, each component multinomial of a generative model can be turned into a miniature logistic regression model if feature locality permits. The intuitive EM algorithm still applies, but with a gradient-based M-step familiar from discriminative training of logistic regression models. We apply this technique to part-of-speech induction, grammar induction, word alignment, and word segmentation, incorporating a few linguistically-motivated features into the standard generative model for each task. These feature-enhanced models each outperform their basic counterparts by a substantial margin, and even compete with and surpass more complex state-of-the-art models.", "keyphrases": ["unsupervised learning", "generative model", "m-step", "induction", "linguistically-motivated feature"]} +{"id": "chami-etal-2020-low", "title": "Low-Dimensional Hyperbolic Knowledge Graph Embeddings", "abstract": "Knowledge graph (KG) embeddings learn low- dimensional representations of entities and relations to predict missing facts. KGs often exhibit hierarchical and logical patterns which must be preserved in the embedding space. For hierarchical data, hyperbolic embedding methods have shown promise for high-fidelity and parsimonious representations. However, existing hyperbolic embedding methods do not account for the rich logical patterns in KGs. In this work, we introduce a class of hyperbolic KG embedding models that simultaneously capture hierarchical and logical patterns. Our approach combines hyperbolic reflections and rotations with attention to model complex relational patterns. Experimental results on standard KG benchmarks show that our method improves over previous Euclidean- and hyperbolic-based efforts by up to 6.1% in mean reciprocal rank (MRR) in low dimensions. Furthermore, we observe that different geometric transformations capture different types of relations while attention- based transformations generalize to multiple relations. In high dimensions, our approach yields new state-of-the-art MRRs of 49.6% on WN18RR and 57.7% on YAGO3-10.", "keyphrases": ["knowledge graph", "logical pattern", "curvature"]} +{"id": "nisioi-etal-2017-exploring", "title": "Exploring Neural Text Simplification Models", "abstract": "We present the first attempt at using sequence to sequence neural networks to model text simplification (TS). Unlike the previously proposed automated TS systems, our neural text simplification (NTS) systems are able to simultaneously perform lexical simplification and content reduction. An extensive human evaluation of the output has shown that NTS systems achieve almost perfect grammaticality and meaning preservation of output sentences and higher level of simplification than the state-of-the-art automated TS systems", "keyphrases": ["lexical simplification", "content reduction", "neural machine translation", "seq2seq model"]} +{"id": "wiegand-etal-2019-detection", "title": "Detection of Abusive Language: the Problem of Biased Datasets", "abstract": "We discuss the impact of data bias on abusive language detection. We show that classification scores on popular datasets reported in previous work are much lower under realistic settings in which this bias is reduced. Such biases are most notably observed on datasets that are created by focused sampling instead of random sampling. Datasets with a higher proportion of implicit abuse are more affected than datasets with a lower proportion.", "keyphrases": ["abusive language", "explicit abuse", "topic bias"]} +{"id": "agirre-martinez-2004-unsupervised", "title": "Unsupervised WSD based on Automatically Retrieved Examples: The Importance of Bias", "abstract": "This paper explores the large-scale acquisition of sense-tagged examples for Word Sense Disambiguation (WSD). We have applied the \u201cWordNet monosemous relatives\u201d method to construct automatically a web corpus that we have used to train disambiguation systems. The corpus-building process has highlighted important factors, such as the distribution of senses (bias). The corpus has been used to train WSD algorithms that include supervised methods (combining automatic and manuallytagged examples), minimally supervised (requiring sense bias information from hand-tagged corpora), and fully unsupervised. These methods were tested on the Senseval-2 lexical sample test set, and compared successfully to other systems with minimum or no supervision.", "keyphrases": ["monosemous relative", "web", "majority"]} +{"id": "nagy-t-vincze-2014-vpctagger", "title": "VPCTagger: Detecting Verb-Particle Constructions With Syntax-Based Methods", "abstract": "Verb-particle combinations (VPCs) con- sist of a verbal and a preposition/particle component, which often have some addi- tional meaning compared to the meaning of their parts. If a data-driven morpholog- ical parser or a syntactic parser is trained on a dataset annotated with extra informa- tion for VPCs, they will be able to iden- tify VPCs in raw texts. In this paper, we examine how syntactic parsers perform on this task and we introduce VPCTag- ger, a machine learning-based tool that is able to identify English VPCs in context. Our method consists of two steps: it first selects VPC candidates on the basis of syntactic information and then selects gen- uine VPCs among them by exploiting new features like semantic and contextual ones. Based on our results, we see that VPC- Tagger outperforms state-of-the-art meth- ods in the VPC detection task.", "keyphrases": ["vpc candidate", "syntactic information", "vpctagger"]} +{"id": "rosti-etal-2010-bbn", "title": "BBN System Description for WMT10 System Combination Task", "abstract": "BBN submitted system combination outputs for Czech-English, German-English, Spanish-English, French-English, and All-English language pairs. All combinations were based on confusion network decoding. An incremental hypothesis alignment algorithm with flexible matching was used to build the networks. The bi-gram decoding weights for the single source language translations were tuned directly to maximize the BLEU score of the decoding output. Approximate expected BLEU was used as the objective function in gradient based optimization of the combination weights for a 44 system multi-source language combination (All-English). The system combination gained around 0.4--2.0 BLEU points over the best individual systems on the single source conditions. On the multi-source condition, the system combination gained 6.6 BLEU points.", "keyphrases": ["hypothesis alignment algorithm", "bleu score", "objective function", "bbn submission"]} +{"id": "kawano-etal-2019-neural", "title": "Neural Conversation Model Controllable by Given Dialogue Act Based on Adversarial Learning and Label-aware Objective", "abstract": "Building a controllable neural conversation model (NCM) is an important task. In this paper, we focus on controlling the responses of NCMs by using dialogue act labels of responses as conditions. We introduce an adversarial learning framework for the task of generating conditional responses with a new objective to a discriminator, which explicitly distinguishes sentences by using labels. This change strongly encourages the generation of label-conditioned sentences. We compared the proposed method with some existing methods for generating conditional responses. The experimental results show that our proposed method has higher controllability for dialogue acts even though it has higher or comparable naturalness to existing methods.", "keyphrases": ["dialogue act", "adversarial learning framework", "neural conversation model"]} +{"id": "opitz-etal-2021-weisfeiler", "title": "Weisfeiler-Leman in the Bamboo: Novel AMR Graph Metrics and a Benchmark for AMR Graph Similarity", "abstract": "Several metrics have been proposed for assessing the similarity of (abstract) meaning representations (AMRs), but little is known about how they relate to human similarity ratings. Moreover, the current metrics have complementary strengths and weaknesses: Some emphasize speed, while others make the alignment of graph structures explicit, at the price of a costly alignment step. In this work we propose new Weisfeiler-Leman AMR similarity metrics that unify the strengths of previous metrics, while mitigating their weaknesses. Specifically, our new metrics are able to match contextualized substructures and induce n:m alignments between their nodes. Furthermore, we introduce a Benchmark for AMR Metrics based on Overt Objectives (Bamboo), the first benchmark to support empirical assessment of graph-based MR similarity metrics. Bamboo maximizes the interpretability of results by defining multiple overt objectives that range from sentence similarity objectives to stress tests that probe a metric's robustness against meaning-altering and meaning- preserving graph transformations. We show the benefits of Bamboo by profiling previous metrics and our own metrics. Results indicate that our novel metrics may serve as a strong baseline for future work.", "keyphrases": ["bamboo", "strength", "amr similarity metric"]} +{"id": "rello-ilisei-2009-rule", "title": "A Rule-Based Approach to the Identification of Spanish Zero Pronouns", "abstract": "This paper presents a new rule-based method to identify Spanish zero pronouns. The paper describes the comparative evaluation of a baseline method for the identification of zero pronouns with an approach that supplements the baseline by adding a set of restrictions treating impersonal sentences and other zero subject expressions. The identification rules have been tested on a new corpus in which zero pronouns have been manually annotated (the Z-Corpus). The comparative evaluation shows that this rulebased method outperforms the baseline.", "keyphrases": ["pronoun", "rule-based method", "z-corpus"]} +{"id": "merkx-frank-2021-human", "title": "Human Sentence Processing: Recurrence or Attention?", "abstract": "Recurrent neural networks (RNNs) have long been an architecture of interest for computational models of human sentence processing. The recently introduced Transformer architecture outperforms RNNs on many natural language processing tasks but little is known about its ability to model human language processing. We compare Transformer- and RNN-based language models' ability to account for measures of human reading effort. Our analysis shows Transformers to outperform RNNs in explaining self-paced reading times and neural activity during reading English sentences, challenging the widely held idea that human sentence processing involves recurrent and immediate processing and provides evidence for cue-based retrieval.", "keyphrases": ["recurrent neural network", "computational model", "reading time", "human sentence processing"]} +{"id": "hira-etal-2019-exploring", "title": "Exploring Transfer Learning and Domain Data Selection for the Biomedical Translation", "abstract": "Transfer Learning and Selective data training are two of the many approaches being extensively investigated to improve the quality of Neural Machine Translation systems. This paper presents a series of experiments by applying transfer learning and selective data training for participation in the Bio-medical shared task of WMT19. We have used Information Retrieval to selectively choose related sentences from out-of-domain data and used them as additional training data using transfer learning. We also report the effect of tokenization on translation model performance.", "keyphrases": ["transfer learning", "data training", "out-of-domain data"]} +{"id": "bergsma-etal-2013-broadly", "title": "Broadly Improving User Classification via Communication-Based Name and Location Clustering on Twitter", "abstract": "Hidden properties of social media users, such as their ethnicity, gender, and location, are often reflected in their observed attributes, such as their first and last names. Furthermore, users who communicate with each other often have similar hidden properties. We propose an algorithm that exploits these insights to cluster the observed attributes of hundreds of millions of Twitter users. Attributes such as user names are grouped together if users with those names communicate with other similar users. We separately cluster millions of unique first names, last names, and userprovided locations. The efficacy of these clusters is then evaluated on a diverse set of classification tasks that predict hidden users properties such as ethnicity, geographic location, gender, language, and race, using only profile names and locations when appropriate. Our readily-replicable approach and publiclyreleased clusters are shown to be remarkably effective and versatile, substantially outperforming state-of-the-art approaches and human accuracy on each of the tasks studied.", "keyphrases": ["twitter", "ethnicity", "user name"]} +{"id": "bawden-etal-2019-findings", "title": "Findings of the WMT 2019 Biomedical Translation Shared Task: Evaluation for MEDLINE Abstracts and Biomedical Terminologies", "abstract": "In the fourth edition of the WMT Biomedical Translation task, we considered a total of six languages, namely Chinese (zh), English (en), French (fr), German (de), Portuguese (pt), and Spanish (es). We performed an evaluation of automatic translations for a total of 10 language directions, namely, zh/en, en/zh, fr/en, en/fr, de/en, en/de, pt/en, en/pt, es/en, and en/es. We provided training data based on MEDLINE abstracts for eight of the 10 language pairs and test sets for all of them. In addition to that, we offered a new sub-task for the translation of terms in biomedical terminologies for the en/es language direction. Higher BLEU scores (close to 0.5) were obtained for the es/en, en/es and en/pt test sets, as well as for the terminology sub-task. After manual validation of the primary runs, some submissions were judged to be better than the reference translations, for instance, for de/en, en/es and es/en.", "keyphrases": ["wmt", "abstract", "biomedical terminology"]} +{"id": "xu-etal-2020-novel", "title": "A Novel Joint Framework for Multiple Chinese Events Extraction", "abstract": "Event extraction is an essential yet challenging task in information extraction. Previous approaches have paid little attention to the problem of roles overlap which is a common phenomenon in practice. To solve this problem, this paper defines event relation triple to explicitly represent relations among triggers, arguments and roles which are incorporated into the model to learn their inter-dependencies. The task of argument extraction is converted to event relation triple extraction. A novel joint framework for multiple Chinese event extraction is proposed which jointly performs predictions for event triggers and arguments based on shared feature representations from pre-trained language model. Experimental comparison with state-of-the-art baselines on ACE 2005 dataset shows the superiority of the proposed method in both trigger classification and argument classification.", "keyphrases": ["novel joint framework", "trigger", "argument extraction"]} +{"id": "wallace-etal-2020-imitation", "title": "Imitation Attacks and Defenses for Black-box Machine Translation Systems", "abstract": "Adversaries may look to steal or attack black-box NLP systems, either for financial gain or to exploit model errors. One setting of particular interest is machine translation (MT), where models have high commercial value and errors can be costly. We investigate possible exploitations of black-box MT systems and explore a preliminary defense against such threats. We first show that MT systems can be stolen by querying them with monolingual sentences and training models to imitate their outputs. Using simulated experiments, we demonstrate that MT model stealing is possible even when imitation models have different input data or architectures than their target models. Applying these ideas, we train imitation models that reach within 0.6 BLEU of three production MT systems on both high-resource and low-resource language pairs. We then leverage the similarity of our imitation models to transfer adversarial examples to the production systems. We use gradient-based attacks that expose inputs which lead to semantically-incorrect translations, dropped content, and vulgar model outputs. To mitigate these vulnerabilities, we propose a defense that modifies translation outputs in order to misdirect the optimization of imitation models. This defense degrades the adversary's BLEU score and attack success rate at some cost in the defender's BLEU and inference speed.", "keyphrases": ["attack", "machine translation", "imitation model"]} +{"id": "rushdi-saleh-etal-2011-bilingual", "title": "Bilingual Experiments with an Arabic-English Corpus for Opinion Mining", "abstract": "Recently, Opinion Mining (OM) is receiving more attention due to the abundance of forums, blogs, ecommerce web sites, news reports and additional web sources where people tend to express their opinions. There are a number of works about Sentiment Analysis (SA) studying the task of identifying the polarity, whether the opinion expressed in a text is positive or negative about a given topic. However, most of research is focused on English texts and there are very few resources for other languages. In this work we present an Opinion Corpus for Arabic (OCA) composed of Arabic reviews extracted from specialized web pages related to movies and films using this language. Moreover, we have translated the OCA corpus into English, generating the EVOCA corpus (English Version of OCA). In the experiments carried out in this work we have used different machine learning algorithms to classify the polarity in these corpora showing that, although the experiments with EVOCA are worse than OCA, the results are comparable with other English experiments, since the loss of precision due to the translation is very slight.", "keyphrases": ["opinion mining", "arabic", "other english experiment"]} +{"id": "mi-liu-2010-constituency", "title": "Constituency to Dependency Translation with Forests", "abstract": "Tree-to-string systems (and their forest-based extensions) have gained steady popularity thanks to their simplicity and efficiency, but there is a major limitation: they are unable to guarantee the grammaticality of the output, which is explicitly modeled in string-to-tree systems via target-side syntax. We thus propose to combine the advantages of both, and present a novel constituency-to-dependency translation model, which uses constituency forests on the source side to direct the translation, and dependency trees on the target side (as a language model) to ensure grammaticality. Medium-scale experiments show an absolute and statistically significant improvement of +0.7 BLEU points over a state-of-the-art forest-based tree-to-string system even with fewer rules. This is also the first time that a tree-to-tree model can surpass tree-to-string counterparts.", "keyphrases": ["constituency-to-dependency translation model", "source side", "dependency tree", "constituency"]} +{"id": "belz-2005-statistical", "title": "Statistical Generation: Three Methods Compared and Evaluated", "abstract": "Statistical NL G has largely meant n-gram modelling which has the considerable advantages of lending robustness to NL G systems, and of making automatic adaptation to new domains from raw corpora possible. On the downside, n-gram models are expensive to use as selection mechanisms and have a built-in bias towards shorter realisations. This paper looks at treebank-training of generators, an alternative method for building statistical models for NL G from raw corpora, and two different ways of using treebank-trained models during generation. Results show that the treebank-trained generators achieve improvements similar to a 2-gram generator over a baseline of random selection. However, the treebank-trained generators achieve this at a much lower cost than the 2-gram generator, and without its strong preference for shorter reasations.", "keyphrases": ["n-gram model", "realisation", "cost", "generation model"]} +{"id": "pekar-2006-acquisition", "title": "Acquisition of Verb Entailment from Text", "abstract": "The study addresses the problem of automatic acquisition of entailment relations between verbs. While this task has much in common with paraphrases acquisition which aims to discover semantic equivalence between verbs, the main challenge of entailment acquisition is to capture asymmetric, or directional, relations. Motivated by the intuition that it often under-lies the local structure of coherent text, we develop a method that discovers verb entailment using evidence about discourse relations between clauses available in a parsed corpus. In comparison with earlier work, the proposed method covers a much wider range of verb entailment types and learns the mapping between verbs with highly varied argument structures.", "keyphrases": ["entailment relation", "coherent text", "acquisition"]} +{"id": "ma-etal-2011-improving", "title": "Improving Low-Resource Statistical Machine Translation with a Novel Semantic Word Clustering Algorithm", "abstract": "In this paper we present a non-languagespecific strategy that uses large amounts of monolingual data to improve statistical machine translation (SMT) when only a small parallel training corpus is available. This strategy uses word classes derived from monolingual text data to improve the word alignment quality, which generally deteriorates significantly because of insufficient training. We present a novel semantic word clustering algorithm to generate the word classes motivated by the word similarity metric presented in (Lin, 1998). Our clustering results showed this novel word clustering outperforms a state-of-the-art hierarchical clustering. We then designed a new procedure for using the derived word classes to improve word alignment quality. Our experiments showed that the use of the word classes can recover over 90% of the loss resulting from the alignment quality that is lost due to the limited parallel training.", "keyphrases": ["novel semantic word", "monolingual data", "few study"]} +{"id": "goyal-etal-2010-toward", "title": "Toward Plot Units: Automatic Affect State Analysis", "abstract": "We present a system called AESOP that automatically produces affect states associated with characters in a story. This research represents a first step toward the automatic generation of plot unit structures from text. AESOP incorporates several existing sentiment analysis tools and lexicons to evaluate the effectiveness of current sentiment technology on this task. AESOP also includes two novel components: a method for acquiring patient polarity verbs, which impart negative affect on their patients, and affect projection rules to propagate affect tags from surrounding words onto the characters in the story. We evaluate AESOP on a small collection of fables.", "keyphrases": ["character", "story", "projection rule"]} +{"id": "weller-etal-2013-using", "title": "Using subcategorization knowledge to improve case prediction for translation to German", "abstract": "This paper demonstrates the need and impact of subcategorization information for SMT. We combine (i) features on sourceside syntactic subcategorization and (ii) an external knowledge base with quantitative, dependency-based information about target-side subcategorization frames. A manual evaluation of an English-toGerman translation task shows that the subcategorization information has a positive impact on translation quality through better prediction of case.", "keyphrases": ["case prediction", "subcategorization frame", "source-side syntactic feature"]} +{"id": "pratapa-etal-2018-word", "title": "Word Embeddings for Code-Mixed Language Processing", "abstract": "We compare three existing bilingual word embedding approaches, and a novel approach of training skip-grams on synthetic code-mixed text generated through linguistic models of code-mixing, on two tasks - sentiment analysis and POS tagging for code-mixed text. Our results show that while CVM and CCA based embeddings perform as well as the proposed embedding technique on semantic and syntactic tasks respectively, the proposed approach provides the best performance for both tasks overall. Thus, this study demonstrates that existing bilingual embedding techniques are not ideal for code-mixed text processing and there is a need for learning multilingual word embedding from the code-mixed text.", "keyphrases": ["bilingual word", "code-mixed text", "sentiment analysis", "pos", "syntactic task"]} +{"id": "bastianelli-etal-2013-unitor", "title": "UNITOR-HMM-TK: Structured Kernel-based learning for Spatial Role Labeling", "abstract": "In this paper the UNITOR-HMM-TK system participating in the Spatial Role Labeling task at SemEval 2013 is presented. The spatial roles classification is addressed as a sequence-based word classification problem: the SVM learning algorithm is applied, based on a simple feature modeling and a robust lexical generalization achieved through a Distributional Model of Lexical Semantics. In the identification of spatial relations, roles are combined to generate candidate relations, later verified by a SVM classifier. The Smoothed Partial Tree Kernel is applied, i.e. a convolution kernel that enhances both syntactic and lexical properties of the examples, avoiding the need of a manual feature engineering phase. Finally, results on three of the five tasks of the challenge are reported.", "keyphrases": ["spatial relation", "pipeline", "element"]} +{"id": "somasundaran-etal-2008-discourse-level", "title": "Discourse Level Opinion Relations: An Annotation Study", "abstract": "This work proposes opinion frames as a representation of discourse-level associations that arise from related opinion targets and which are common in task-oriented meeting dialogs. We define the opinion frames and explain their interpretation. Additionally we present an annotation scheme that realizes the opinion frames and via human annotation studies, we show that these can be reliably identified.", "keyphrases": ["annotation study", "opinion frame", "meeting dialog"]} +{"id": "bernardy-etal-2018-influence", "title": "The Influence of Context on Sentence Acceptability Judgements", "abstract": "We investigate the influence that document context exerts on human acceptability judgements for English sentences, via two sets of experiments. The first compares ratings for sentences presented on their own with ratings for the same set of sentences given in their document contexts. The second assesses the accuracy with which two types of neural models \u2014 one that incorporates context during training and one that does not \u2014 predict these judgements. Our results indicate that: (1) context improves acceptability ratings for ill-formed sentences, but also reduces them for well-formed sentences; and (2) context helps unsupervised systems to model acceptability.", "keyphrases": ["influence", "acceptability", "document context"]} +{"id": "wong-etal-2012-exploring", "title": "Exploring Adaptor Grammars for Native Language Identification", "abstract": "The task of inferring the native language of an author based on texts written in a second language has generally been tackled as a classification problem, typically using as features a mix of n-grams over characters and part of speech tags (for small and fixed n) and unigram function words. To capture arbitrarily long n-grams that syntax-based approaches have suggested are useful, adaptor grammars have some promise. In this work we investigate their extension to identifying n-gram collocations of arbitrary length over a mix of PoS tags and words, using both maxent and induced syntactic language model approaches to classification. After presenting a new, simple baseline, we show that learned collocations used as features in a maxent model perform better still, but that the story is more mixed for the syntactic language model.", "keyphrases": ["adaptor grammar", "native language identification", "n-gram", "collocation"]} +{"id": "naseem-etal-2019-rewarding", "title": "Rewarding Smatch: Transition-Based AMR Parsing with Reinforcement Learning", "abstract": "Our work involves enriching the Stack-LSTM transition-based AMR parser (Ballesteros and Al-Onaizan, 2017) by augmenting training with Policy Learning and rewarding the Smatch score of sampled graphs. In addition, we also combined several AMR-to-text alignments with an attention mechanism and we supplemented the parser with pre-processed concept identification, named entities and contextualized embeddings. We achieve a highly competitive performance that is comparable to the best published results. We show an in-depth study ablating each of the new components of the parser.", "keyphrases": ["reinforcement learning", "amr parser", "transition-based model"]} +{"id": "banerjee-bhattacharyya-2018-meaningless", "title": "Meaningless yet meaningful: Morphology grounded subword-level NMT", "abstract": "We explore the use of two independent subsystems Byte Pair Encoding (BPE) and Morfessor as basic units for subword-level neural machine translation (NMT). We show that, for linguistically distant language-pairs Morfessor-based segmentation algorithm produces significantly better quality translation than BPE. However, for close language-pairs BPE-based subword-NMT may translate better than Morfessor-based subword-NMT. We propose a combined approach of these two segmentation algorithms Morfessor-BPE (M-BPE) which outperforms these two baseline systems in terms of BLEU score. Our results are supported by experiments on three language-pairs: English-Hindi, Bengali-Hindi and English-Bengali.", "keyphrases": ["morphology", "morfessor", "hindi"]} +{"id": "sokolov-etal-2012-computing", "title": "Computing Lattice BLEU Oracle Scores for Machine Translation", "abstract": "The search space of Phrase-Based Statistical Machine Translation (PBSMT) systems can be represented under the form of a directed acyclic graph (lattice). The quality of this search space can thus be evaluated by computing the best achievable hypothesis in the lattice, the so-called oracle hypothesis. For common SMT metrics, this problem is however NP-hard and can only be solved using heuristics. In this work, we present two new methods for efficiently computing BLEU oracles on lattices: the first one is based on a linear approximation of the corpus BLEU score and is solved using the FST formalism; the second one relies on integer linear programming formulation and is solved directly and using the Lagrangian relaxation framework. These new decoders are positively evaluated and compared with several alternatives from the literature for three language pairs, using lattices produced by two PBSMT systems.", "keyphrases": ["machine translation", "search space", "suboptimal result"]} +{"id": "cai-etal-2018-full", "title": "A Full End-to-End Semantic Role Labeler, Syntactic-agnostic Over Syntactic-aware?", "abstract": "Semantic role labeling (SRL) is to recognize the predicate-argument structure of a sentence, including subtasks of predicate disambiguation and argument labeling. Previous studies usually formulate the entire SRL problem into two or more subtasks. For the first time, this paper introduces an end-to-end neural model which unifiedly tackles the predicate disambiguation and the argument labeling in one shot. Using a biaffine scorer, our model directly predicts all semantic role labels for all given word pairs in the sentence without relying on any syntactic parse information. Specifically, we augment the BiLSTM encoder with a non-linear transformation to further distinguish the predicate and the argument in a given sentence, and model the semantic role labeling process as a word pair classification task by employing the biaffine attentional mechanism. Though the proposed model is syntax-agnostic with local decoder, it outperforms the state-of-the-art syntax-aware SRL systems on the CoNLL-2008, 2009 benchmarks for both English and Chinese. To our best knowledge, we report the first syntax-agnostic SRL model that surpasses all known syntax-aware models.", "keyphrases": ["semantic role labeling", "end-to-end model", "dependency-based srl"]} +{"id": "sheng-etal-2021-casee", "title": "CasEE: A Joint Learning Framework with Cascade Decoding for Overlapping Event Extraction", "abstract": "Event extraction (EE) is a crucial information extraction task that aims to extract event information in texts. Most existing methods assume that events appear in sentences without overlaps, which are not applicable to the complicated overlapping event extraction. This work systematically studies the realistic event overlapping problem, where a word may serve as triggers with several types or arguments with different roles. To tackle the above problem, we propose a novel joint learning framework with cascade decoding for overlapping event extraction, termed as CasEE. Particularly, CasEE sequentially performs type detection, trigger extraction and argument extraction, where the overlapped targets are extracted separately conditioned on the specific former prediction. All the subtasks are jointly learned in a framework to capture dependencies among the subtasks. The evaluation on a public event extraction benchmark FewFC demonstrates that CasEE achieves significant improvements on overlapping event extraction over previous competitive methods.", "keyphrases": ["joint learning framework", "type detection", "trigger extraction", "argument extraction", "casee"]} +{"id": "murahari-etal-2019-improving", "title": "Improving Generative Visual Dialog by Answering Diverse Questions", "abstract": "Prior work on training generative Visual Dialog models with reinforcement learning ((Das et al., ICCV 2017) has explored a Q-Bot-A-Bot image-guessing game and shown that this `self-talk' approach can lead to improved performance at the downstream dialog-conditioned image-guessing task. However, this improvement saturates and starts degrading after a few rounds of interaction, and does not lead to a better Visual Dialog model. We find that this is due in part to repeated interactions between Q-Bot and A-BOT during self-talk, which are not informative with respect to the image. To improve this, we devise a simple auxiliary objective that incentivizes Q-Bot to ask diverse questions, thus reducing repetitions and in turn enabling A-Bot to explore a larger state space during RL i.e. be exposed to more visual concepts to talk about, and varied questions to answer. We evaluate our approach via a host of automatic metrics and human studies, and demonstrate that it leads to better dialog, i.e. dialog that is more diverse (i.e. less repetitive), consistent (i.e. has fewer conflicting exchanges), fluent (i.e., more human-like), and detailed, while still being comparably image-relevant as prior work and ablations.", "keyphrases": ["dialog", "a-bot", "repetition"]} +{"id": "mairesse-young-2014-stochastic", "title": "Stochastic Language Generation in Dialogue using Factored Language Models", "abstract": "Most previous work on trainable language generation has focused on two paradigms: (a) using a generation decisions of an existing generator. Both approaches rely on the existence of a handcrafted generation component, which is likely to limit their scalability to new domains. The first contribution of this article is to present Bagel, a fully data-driven generation method that treats the language generation task as a search for the most likely sequence of semantic concepts and realization phrases, according to Factored Language Models (FLMs). As domain utterances are not readily available for most natural language generation tasks, a large creative effort is required to produce the data necessary to represent human linguistic variation for nontrivial domains. This article is based on the assumption that learning to produce paraphrases can be facilitated by collecting data from a large sample of untrained annotators using crowdsourcing\u2014rather than a few domain experts\u2014by relying on a coarse meaning representation. A second contribution of this article is to use crowdsourced data to show how dialogue naturalness can be improved by learning to vary the output utterances generated for a given semantic input. Two data-driven methods for generating paraphrases in dialogue are presented: (a) by sampling from the n-best list of realizations produced by Bagel's FLM reranker; and (b) by learning a structured perceptron predicting whether candidate realizations are valid paraphrases. We train Bagel on a set of 1,956 utterances produced by 137 annotators, which covers 10 types of dialogue acts and 128 semantic concepts in a tourist information system for Cambridge. An automated evaluation shows that Bagel outperforms utterance class LM baselines on this domain. A human evaluation of 600 resynthesized dialogue extracts shows that Bagel's FLM output produces utterances comparable to a handcrafted baseline, whereas the perceptron classifier performs worse. Interestingly, human judges find the system sampling from the n-best list to be more natural than a system always returning the first-best utterance. The judges are also more willing to interact with the n-best system in the future. These results suggest that capturing the large variation found in human language using data-driven methods is beneficial for dialogue interaction.", "keyphrases": ["generator", "factored language models", "phrase-based nlg system"]} +{"id": "chen-etal-2020-hierarchical", "title": "Hierarchical Entity Typing via Multi-level Learning to Rank", "abstract": "We propose a novel method for hierarchical entity classification that embraces ontological structure at both training and during prediction. At training, our novel multi-level learning-to-rank loss compares positive types against negative siblings according to the type tree. During prediction, we define a coarse-to-fine decoder that restricts viable candidates at each level of the ontology based on already predicted parent type(s). Our approach significantly outperform prior work on strict accuracy, demonstrating the effectiveness of our method.", "keyphrases": ["multi-level learning", "ontology", "multi-level learning-to-rank loss"]} +{"id": "murray-carenini-2009-predicting", "title": "Predicting Subjectivity in Multimodal Conversations", "abstract": "In this research we aim to detect subjective sentences in multimodal conversations. We introduce a novel technique wherein subjective patterns are learned from both labeled and unlabeled data, using n-gram word sequences with varying levels of lexical instantiation. Applying this technique to meeting speech and email conversations, we gain significant improvement over state-of-the-art approaches. Furthermore, we show that coupling the pattern-based approach with features that capture characteristics of general conversation structure yields additional improvement.", "keyphrases": ["subjective pattern", "n-gram word sequence", "lexical instantiation"]} +{"id": "garg-etal-2018-code", "title": "Code-switched Language Models Using Dual RNNs and Same-Source Pretraining", "abstract": "This work focuses on building language models (LMs) for code-switched text. We propose two techniques that significantly improve these LMs: 1) A novel recurrent neural network unit with dual components that focus on each language in the code-switched text separately 2) Pretraining the LM using synthetic text from a generative model estimated using the training data. We demonstrate the effectiveness of our proposed techniques by reporting perplexities on a Mandarin-English task and derive significant reductions in perplexity.", "keyphrases": ["rnn", "code-switched text", "code-mixed machine translation"]} +{"id": "chahuneau-etal-2012-word", "title": "Word Salad: Relating Food Prices and Descriptions", "abstract": "We investigate the use of language in food writing, specifically on restaurant menus and in customer reviews. Our approach is to build predictive models of concrete external variables, such as restaurant menu prices. We make use of a dataset of menus and customer reviews for thousands of restaurants in several U.S. cities. By focusing on prediction tasks and doing our analysis at scale, our methodology allows quantitative, objective measurements of the words and phrases used to describe food in restaurants. We also explore interactions in language use between menu prices and sentiment as expressed in user reviews.", "keyphrases": ["food price", "restaurant menu", "review", "sentiment information"]} +{"id": "uchimoto-den-2008-word", "title": "Word-level Dependency-structure Annotation to Corpus of Spontaneous Japanese and its Application", "abstract": "In Japanese, the syntactic structure of a sentence is generally represented by the relationship between phrasal units, bunsetsus in Japanese, based on a dependency grammar. In many cases, the syntactic structure of a bunsetsu is not considered in syntactic structure annotation. This paper gives the criteria and definitions of dependency relationships between words in a bunsetsu and their applications. The target corpus for the word-level dependency annotation is a large spontaneous Japanese-speech corpus, the Corpus of Spontaneous Japanese (CSJ). One application of word-level dependency relationships is to find basic units for constructing accent phrases.", "keyphrases": ["japanese", "definition", "dependency structure"]} +{"id": "prabhakaran-rambow-2013-written", "title": "Written Dialog and Social Power: Manifestations of Different Types of Power in Dialog Behavior", "abstract": "Dialog behavior is affected by power relations among the discourse participants. We show that four different types of power relations (hierarchical power, situational power, influence, and power over communication) affect written dialog behavior in different ways. We also present a system that can identify power relations given a written dialog.", "keyphrases": ["dialog behavior", "power relation", "communication"]} +{"id": "lawrence-reed-2017-mining", "title": "Mining Argumentative Structure from Natural Language text using Automatically Generated Premise-Conclusion Topic Models", "abstract": "This paper presents a method of extracting argumentative structure from natural language text. The approach presented is based on the way in which we understand an argument being made, not just from the words said, but from existing contextual knowledge and understanding of the broader issues. We leverage high-precision, low-recall techniques in order to automatically build a large corpus of inferential statements related to the text's topic. These statements are then used to produce a matrix representing the inferential relationship between different aspects of the topic. From this matrix, we are able to determine connectedness and directionality of inference between statements in the original text. By following this approach, we obtain results that compare favourably to those of other similar techniques to classify premise-conclusion pairs (with results 22 points above baseline), but without the requirement of large volumes of annotated, domain specific data.", "keyphrases": ["natural language text", "contextual knowledge", "premise-conclusion pair"]} +{"id": "dragut-etal-2012-polarity", "title": "Polarity Consistency Checking for Sentiment Dictionaries", "abstract": "Polarity classification of words is important for applications such as Opinion Mining and Sentiment Analysis. A number of sentiment word/sense dictionaries have been manually or (semi)automatically constructed. The dictionaries have substantial inaccuracies. Besides obvious instances, where the same word appears with different polarities in different dictionaries, the dictionaries exhibit complex cases, which cannot be detected by mere manual inspection. We introduce the concept of polarity consistency of words/senses in sentiment dictionaries in this paper. We show that the consistency problem is NP-complete. We reduce the polarity consistency problem to the satisfiability problem and utilize a fast SAT solver to detect inconsistencies in a sentiment dictionary. We perform experiments on four sentiment dictionaries and WordNet.", "keyphrases": ["sentiment dictionary", "inconsistency", "pcp", "polynomial time"]} +{"id": "cai-etal-2017-fast", "title": "Fast and Accurate Neural Word Segmentation for Chinese", "abstract": "Neural models with minimal feature engineering have achieved competitive performance against traditional methods for the task of Chinese word segmentation. However, both training and working procedures of the current neural models are computationally inefficient. In this paper, we propose a greedy neural word segmenter with balanced word and character embedding inputs to alleviate the existing drawbacks. Our segmenter is truly end-to-end, capable of performing segmentation much faster and even more accurate than state-of-the-art neural models on Chinese benchmark datasets.", "keyphrases": ["neural word segmenter", "segmentation", "chinese", "various nlp task"]} +{"id": "li-etal-2020-empdg", "title": "EmpDG: Multi-resolution Interactive Empathetic Dialogue Generation", "abstract": "A humanized dialogue system is expected to generate empathetic replies, which should be sensitive to the users' expressed emotion. The task of empathetic dialogue generation is proposed to address this problem. The essential challenges lie in accurately capturing the nuances of human emotion and considering the potential of user feedback, which are overlooked by the majority of existing work. In response to this problem, we propose a multi-resolution adversarial model \u2013 EmpDG, to generate more empathetic responses. EmpDG exploits both the coarse-grained dialogue-level and fine-grained token-level emotions, the latter of which helps to better capture the nuances of user emotion. In addition, we introduce an interactive adversarial learning framework which exploits the user feedback, to identify whether the generated responses evoke emotion perceptivity in dialogues. Experimental results show that the proposed approach significantly outperforms the state-of-the-art baselines in both content quality and emotion perceptivity.", "keyphrases": ["dialogue-level", "fine-grained token-level emotion", "empdg"]} +{"id": "kurtyigit-etal-2021-lexical", "title": "Lexical Semantic Change Discovery", "abstract": "While there is a large amount of research in the field of Lexical Semantic Change Detection, only few approaches go beyond a standard benchmark evaluation of existing models. In this paper, we propose a shift of focus from change detection to change discovery, i.e., discovering novel word senses over time from the full corpus vocabulary. By heavily fine-tuning a type-based and a token-based approach on recently published German data, we demonstrate that both models can successfully be applied to discover new words undergoing meaning change. Furthermore, we provide an almost fully automated framework for both evaluation and discovery.", "keyphrases": ["change", "change detection", "vocabulary", "intersection"]} +{"id": "lee-etal-2017-l1", "title": "L1-L2 Parallel Dependency Treebank as Learner Corpus", "abstract": "This opinion paper proposes the use of parallel treebank as learner corpus. We show how an L1-L2 parallel treebank \u2014 i.e., parse trees of non-native sentences, aligned to the parse trees of their target hypotheses \u2014 can facilitate retrieval of sentences with specific learner errors. We argue for its benefits, in terms of corpus re-use and interoperability, over a conventional learner corpus annotated with error tags. As a proof of concept, we conduct a case study on word-order errors made by learners of Chinese as a foreign language. We report precision and recall in retrieving a range of word-order error categories from L1-L2 tree pairs annotated in the Universal Dependency framework.", "keyphrases": ["learner corpus", "l1-l2 parallel treebank", "word-order error", "target hypothesis"]} +{"id": "roy-subramaniam-2006-automatic", "title": "Automatic Generation of Domain Models for Call-Centers from Noisy Transcriptions", "abstract": "Call centers handle customer queries from various domains such as computer sales and support, mobile phones, car rental, etc. Each such domain generally has a domain model which is essential to handle customer complaints. These models contain common problem categories, typical customer issues and their solutions, greeting styles. Currently these models are manually created over time. Towards this, we propose an unsupervised technique to generate domain models automatically from call transcriptions. We use a state of the art Automatic Speech Recognition system to transcribe the calls between agents and customers, which still results in high word error rates (40%) and show that even from these noisy transcriptions of calls we can automatically build a domain model. The domain model is comprised of primarily a topic taxonomy where every node is characterized by topic(s), typical Questions-Answers (Q&As), typical actions and call statistics. We show how such a domain model can be used for topic identification of unseen calls. We also propose applications for aiding agents while handling calls and for agent monitoring based on the domain model.", "keyphrases": ["domain model", "noisy transcription", "call center", "unsupervised technique"]} +{"id": "shi-etal-2016-recurrent", "title": "Recurrent Support Vector Machines For Slot Tagging In Spoken Language Understanding", "abstract": "We propose recurrent support vector machine (RSVM) for slot tagging. This model is a combination of the recurrent neural network (RNN) and the structured support vector machine. RNN extracts features from the input sequence. The structured support vector machine uses a sequence-level discriminative objective function. The proposed model therefore combines the sequence representation capability of an RNN with the sequence-level discriminative objective. We have observed new state-ofthe-art results on two benchmark datasets and one private dataset. RSVM obtained statistical significant 4% and 2% relative average F1 score improvement on ATIS dataset and Chunking dataset, respectively. Out of eight domains in Cortana live log dataset, RSVM achieved F1 score improvement on seven domains. Experiments also show that RSVM significantly speeds up the model training by skipping the weight updating for non-support vector training samples, compared against training using RNN with CRF or minimum cross-entropy objectives.", "keyphrases": ["support vector machine", "slot tagging", "recurrent neural network"]} +{"id": "gao-etal-2011-soft", "title": "Soft Dependency Constraints for Reordering in Hierarchical Phrase-Based Translation", "abstract": "Long-distance reordering remains one of the biggest challenges facing machine translation. We derive soft constraints from the source dependency parsing to directly address the reordering problem for the hierarchical phrase-based model. Our approach significantly improves Chinese--English machine translation on a large-scale task by 0.84 BLEU points on average. Moreover, when we switch the tuning function from BLEU to the LRscore which promotes reordering, we observe total improvements of 1.21 BLEU, 1.30 LRscore and 3.36 TER over the baseline. On average our approach improves reordering precision and recall by 6.9 and 0.3 absolute points, respectively, and is found to be especially effective for long-distance reodering.", "keyphrases": ["soft dependency constraint", "english translation", "parent", "chinese"]} +{"id": "karimi-mahabadi-etal-2020-end", "title": "End-to-End Bias Mitigation by Modelling Biases in Corpora", "abstract": "Several recent studies have shown that strong natural language understanding (NLU) models are prone to relying on unwanted dataset biases without learning the underlying task, resulting in models that fail to generalize to out-of-domain datasets and are likely to perform poorly in real-world scenarios. We propose two learning strategies to train neural models, which are more robust to such biases and transfer better to out-of-domain datasets. The biases are specified in terms of one or more bias-only models, which learn to leverage the dataset biases. During training, the bias-only models' predictions are used to adjust the loss of the base model to reduce its reliance on biases by down-weighting the biased examples and focusing the training on the hard examples. We experiment on large-scale natural language inference and fact verification benchmarks, evaluating on out-of-domain datasets that are specifically designed to assess the robustness of models against known biases in the training data. Results show that our debiasing methods greatly improve robustness in all settings and better transfer to other textual entailment datasets. Our code and data are publicly available in .", "keyphrases": ["loss", "dataset bias", "recent paper", "lexical artifact"]} +{"id": "collell-moens-2016-image", "title": "Is an Image Worth More than a Thousand Words? On the Fine-Grain Semantic Differences between Visual and Linguistic Representations", "abstract": "Human concept representations are often grounded with visual information, yet some aspects of meaning cannot be visually represented or are better described with language. Thus, vision and language provide complementary information that, properly combined, can potentially yield more complete concept representations. Recently, state-of-the-art distributional semantic models and convolutional neural networks have achieved great success in representing linguistic and visual knowledge respectively. In this paper, we compare both, visual and linguistic representations in their ability to capture different types of fine-grain semantic knowledge\u2014or attributes\u2014of concepts. Humans often describe objects using attributes, that is, properties such as shape, color or functionality, which often transcend the linguistic and visual modalities. In our setting, we evaluate how well attributes can be predicted by using the unimodal representations as inputs. We are interested in first, finding out whether attributes are generally better captured by either the vision or by the language modality; and second, if none of them is clearly superior (as we hypothesize), what type of attributes or semantic knowledge are better encoded from each modality. Ultimately, our study sheds light on the potential of combining visual and textual representations.", "keyphrases": ["image", "linguistic representation", "attribute", "collell"]} +{"id": "bethard-2013-cleartk", "title": "ClearTK-TimeML: A minimalist approach to TempEval 2013", "abstract": "The ClearTK-TimeML submission to TempEval 2013 competed in all English tasks: identifying events, identifying times, and identifying temporal relations. The system is a pipeline of machine-learning models, each with a small set of features from a simple morpho-syntactic annotation pipeline, and where temporal relations are only predicted for a small set of syntactic constructions and relation types. ClearTKTimeML ranked 1 st for temporal relation F1, time extent strict F1 and event tense accuracy.", "keyphrases": ["tempeval", "syntactic construction", "temporal relation extraction", "top system"]} +{"id": "hovy-yang-2021-importance", "title": "The Importance of Modeling Social Factors of Language: Theory and Practice", "abstract": "Natural language processing (NLP) applications are now more powerful and ubiquitous than ever before. With rapidly developing (neural) models and ever-more available data, current NLP models have access to more information than any human speaker during their life. Still, it would be hard to argue that NLP models have reached human-level capacity. In this position paper, we argue that the reason for the current limitations is a focus on information content while ignoring language's social factors. We show that current NLP systems systematically break down when faced with interpreting the social factors of language. This limits applications to a subset of information-related tasks and prevents NLP from reaching human-level performance. At the same time, systems that incorporate even a minimum of social factors already show remarkable improvements. We formalize a taxonomy of seven social factors based on linguistic theory and exemplify current failures and emerging successes for each of them. We suggest that the NLP community address social factors to get closer to the goal of human-like language understanding.", "keyphrases": ["theory", "nlp community address", "language understanding"]} +{"id": "takeno-etal-2017-controlling", "title": "Controlling Target Features in Neural Machine Translation via Prefix Constraints", "abstract": "We propose prefix constraints, a novel method to enforce constraints on target sentences in neural machine translation. It places a sequence of special tokens at the beginning of target sentence (target prefix), while side constraints places a special token at the end of source sentence (source suffix). Prefix constraints can be predicted from source sentence jointly with target sentence, while side constraints (Sennrich et al., 2016) must be provided by the user or predicted by some other methods. In both methods, special tokens are designed to encode arbitrary features on target-side or metatextual information. We show that prefix constraints are more flexible than side constraints and can be used to control the behavior of neural machine translation, in terms of output length, bidirectional decoding, domain adaptation, and unaligned target word generation.", "keyphrases": ["neural machine translation", "special token", "decoding", "target word generation"]} +{"id": "dey-fung-2014-hindi", "title": "A Hindi-English Code-Switching Corpus", "abstract": "The aim of this paper is to investigate the rules and constraints of code-switching (CS) in Hindi-English mixed language data. In this paper, we\u0092ll discuss how we collected the mixed language corpus. This corpus is primarily made up of student interview speech. The speech was manually transcribed and verified by bilingual speakers of Hindi and English. The code-switching cases in the corpus are discussed and the reasons for code-switching are explained.", "keyphrases": ["code-switching", "hindi", "context code mixing", "bilingual student"]} +{"id": "habernal-etal-2016-c4corpus", "title": "C4Corpus: Multilingual Web-size Corpus with Free License", "abstract": "Large Web corpora containing full documents with permissive licenses are crucial for many NLP tasks. In this article we present the construction of 12 million-pages Web corpus (over 10 billion tokens) licensed under CreativeCommons license family in 50+ languages that has been extracted from CommonCrawl, the largest publicly available general Web crawl to date with about 2 billion crawled URLs. Our highly-scalable Hadoop-based framework is able to process the full CommonCrawl corpus on 2000+ CPU cluster on the Amazon Elastic Map/Reduce infrastructure. The processing pipeline includes license identification, state-of-the-art boilerplate removal, exact duplicate and near-duplicate document removal, and language detection. The construction of the corpus is highly configurable and fully reproducible, and we provide both the framework (DKPro C4CorpusTools) and the resulting data (C4Corpus) to the research community.", "keyphrases": ["license", "processing pipeline", "c4corpu"]} +{"id": "surya-etal-2019-unsupervised", "title": "Unsupervised Neural Text Simplification", "abstract": "The paper presents a first attempt towards unsupervised neural text simplification that relies only on unlabeled text corpora. The core framework is composed of a shared encoder and a pair of attentional-decoders, crucially assisted by discrimination-based losses and denoising. The framework is trained using unlabeled text collected from en-Wikipedia dump. Our analysis (both quantitative and qualitative involving human evaluators) on public test data shows that the proposed model can perform text-simplification at both lexical and syntactic levels, competitive to existing supervised methods. It also outperforms viable unsupervised baselines. Adding a few labeled pairs helps improve the performance further.", "keyphrases": ["neural text simplification", "loss", "denoising", "complex sentence", "unsupervised approach"]} +{"id": "park-etal-2017-rotated", "title": "Rotated Word Vector Representations and their Interpretability", "abstract": "Vector representation of words improves performance in various NLP tasks, but the high dimensional word vectors are very difficult to interpret. We apply several rotation algorithms to the vector representation of words to improve the interpretability. Unlike previous approaches that induce sparsity, the rotated vectors are interpretable while preserving the expressive performance of the original vectors. Furthermore, any prebuilt word vector representation can be rotated for improved interpretability. We apply rotation to skipgrams and glove and compare the expressive power and interpretability with the original vectors and the sparse overcomplete vectors. The results show that the rotated vectors outperform the original and the sparse overcomplete vectors for interpretability and expressiveness tasks.", "keyphrases": ["word vector", "interpretability", "rotation", "factor analysis"]} +{"id": "ruckle-etal-2020-multicqa", "title": "MultiCQA: Zero-Shot Transfer of Self-Supervised Text Matching Models on a Massive Scale", "abstract": "We study the zero-shot transfer capabilities of text matching models on a massive scale, by self-supervised training on 140 source domains from community question answering forums in English. We investigate the model performances on nine benchmarks of answer selection and question similarity tasks, and show that all 140 models transfer surprisingly well, where the large majority of models substantially outperforms common IR baselines. We also demonstrate that considering a broad selection of source domains is crucial for obtaining the best zero-shot transfer performances, which contrasts the standard procedure that merely relies on the largest and most similar domains. In addition, we extensively study how to best combine multiple source domains. We propose to incorporate self-supervised with supervised multi-task learning on all available source domains. Our best zero-shot transfer model considerably outperforms in-domain BERT and the previous state of the art on six benchmarks. Fine-tuning of our model with in-domain data results in additional large gains and achieves the new state of the art on all nine benchmarks.", "keyphrases": ["text matching model", "massive scale", "multi-task learning"]} +{"id": "hu-etal-2019-diachronic", "title": "Diachronic Sense Modeling with Deep Contextualized Word Embeddings: An Ecological View", "abstract": "Diachronic word embeddings have been widely used in detecting temporal changes. However, existing methods face the meaning conflation deficiency by representing a word as a single vector at each time period. To address this issue, this paper proposes a sense representation and tracking framework based on deep contextualized embeddings, aiming at answering not only what and when, but also how the word meaning changes. The experiments show that our framework is effective in representing fine-grained word senses, and it brings a significant improvement in word change detection task. Furthermore, we model the word change from an ecological viewpoint, and sketch two interesting sense behaviors in the process of language evolution, i.e. sense competition and sense cooperation.", "keyphrases": ["sense representation", "example sentence", "bert embedding"]} +{"id": "chen-etal-2006-chinese", "title": "Chinese Named Entity Recognition with Conditional Random Fields", "abstract": "We present a Chinese Named Entity Recognition (NER) system submitted to the close track of Sighan Bakeoff2006. We define some additional features via doing statistics in training corpus. Our system incorporates basic features and additional features based on Conditional Random Fields (CRFs). In order to correct inconsistently results, we perform the postprocessing procedure according to n-best results given by the CRFs model. Our final system achieved a F-score of 85.14 at MSRA, 89.03 at CityU, and 76.27 at LDC.", "keyphrases": ["conditional random fields", "crfs", "chinese", "character sequence labeling"]} +{"id": "barbieri-etal-2016-emoji", "title": "What does this Emoji Mean? A Vector Space Skip-Gram Model for Twitter Emojis", "abstract": "Emojis allow us to describe objects, situations and even feelings with small images, providing a visual and quick way to communicate. In this paper, we analyse emojis used in Twitter with distributional semantic models. We retrieve 10 millions tweets posted by USA users, and we build several skip gram word embedding models by mapping in the same vectorial space both words and emojis. We test our models with semantic similarity experiments, comparing the output of our models with human assessment. We also carry out an exhaustive qualitative evaluation, showing interesting results.", "keyphrases": ["skip-gram model", "twitter emojis", "word2vec", "vector space embedding"]} +{"id": "klang-nugues-2016-langforia", "title": "Langforia: Language Pipelines for Annotating Large Collections of Documents", "abstract": "In this paper, we describe Langforia, a multilingual processing pipeline to annotate texts with multiple layers: formatting, parts of speech, named entities, dependencies, semantic roles, and entity links. Langforia works as a web service, where the server hosts the language processing components and the client, the input and result visualization. To annotate a text or a Wikipedia page, the user chooses an NLP pipeline and enters the text in the interface or selects the page URL. Once processed, the results are returned to the client, where the user can select the annotation layers s/he wants to visualize. We designed Langforia with a specific focus for Wikipedia, although it can process any type of text. Wikipedia has become an essential encyclopedic corpus used in many NLP projects. However, processing articles and visualizing the annotations are nontrivial tasks that require dealing with multiple markup variants, encodings issues, and tool incompatibilities across the language versions. This motivated the development of a new architecture. A demonstration of Langforia is available for six languages: English, French, German, Spanish, Russian, and Swedish at as well as a web API: . Langforia is also provided as a standalone library and is compatible with cluster computing.", "keyphrases": ["wikipedia", "language version", "swedish", "langforia"]} +{"id": "fonseca-martins-2020-revisiting", "title": "Revisiting Higher-Order Dependency Parsers", "abstract": "Neural encoders have allowed dependency parsers to shift from higher-order structured models to simpler first-order ones, making decoding faster and still achieving better accuracy than non-neural parsers. This has led to a belief that neural encoders can implicitly encode structural constraints, such as siblings and grandparents in a tree. We tested this hypothesis and found that neural parsers may benefit from higher-order features, even when employing a powerful pre-trained encoder, such as BERT. While the gains of higher-order features are small in the presence of a powerful encoder, they are consistent for long-range dependencies and long sentences. In particular, higher-order models are more accurate on full sentence parses and on the exact match of modifier lists, indicating that they deal better with larger, more complex structures.", "keyphrases": ["dependency parser", "presence", "powerful encoder", "graph-based method"]} +{"id": "lang-lapata-2010-unsupervised", "title": "Unsupervised Induction of Semantic Roles", "abstract": "Datasets annotated with semantic roles are an important prerequisite to developing high-performance role labeling systems. Unfortunately, the reliance on manual annotations, which are both difficult and highly expensive to produce, presents a major obstacle to the widespread application of these systems across different languages and text genres. In this paper we describe a method for inducing the semantic roles of verbal arguments directly from unannotated text. We formulate the role induction problem as one of detecting alternations and finding a canonical syntactic form for them. Both steps are implemented in a novel probabilistic model, a latent-variable variant of the logistic classifier. Our method increases the purity of the induced role clusters by a wide margin over a strong baseline.", "keyphrases": ["role induction problem", "alternation", "strong baseline", "non-standard linking", "canonical one"]} +{"id": "sasaki-etal-2018-cross", "title": "Cross-Lingual Learning-to-Rank with Shared Representations", "abstract": "Cross-lingual information retrieval (CLIR) is a document retrieval task where the documents are written in a language different from that of the user's query. This is a challenging problem for data-driven approaches due to the general lack of labeled training data. We introduce a large-scale dataset derived from Wikipedia to support CLIR research in 25 languages. Further, we present a simple yet effective neural learning-to-rank model that shares representations across languages and reduces the data requirement. This model can exploit training data in, for example, Japanese-English CLIR to improve the results of Swahili-English CLIR.", "keyphrases": ["clir", "query", "wikipedia"]} +{"id": "korhonen-2009-automatic", "title": "Automatic Lexical Classification \u2013 Balancing between Machine Learning and Linguistics", "abstract": "Verb classifications have been used to support a number of pra ctical tasks and ap- plications, such as parsing, information extraction, ques tion-answering, and machine trans- lation. However, large-scale exploitation of verb classes in real-world or domain-sensitive tasks has not been possible because existing manually built classifications are incomprehen- sive. This paper describes recent and on-going research on extending and acquiring lexical classifications automatically. The automatic approach is a ttractive since it is cost-effective and opens up the opportunity of learning and tuning lexical classifications for the application and domain in question. However, the development of an optimal approach is challenging, and requires not only expertise in machine learning but also a good understanding of the linguistic principles of lexical classification.", "keyphrases": ["machine learning", "information extraction", "argument structure information", "word-sense disambiguation"]} +{"id": "liu-etal-2021-self", "title": "Self-Alignment Pretraining for Biomedical Entity Representations", "abstract": "Despite the widespread success of self-supervised learning via masked language models (MLM), accurately capturing fine-grained semantic relationships in the biomedical domain remains a challenge. This is of paramount importance for entity-level tasks such as entity linking where the ability to model entity relations (especially synonymy) is pivotal. To address this challenge, we propose SapBERT, a pretraining scheme that self-aligns the representation space of biomedical entities. We design a scalable metric learning framework that can leverage UMLS, a massive collection of biomedical ontologies with 4M+ concepts. In contrast with previous pipeline-based hybrid systems, SapBERT offers an elegant one-model-for-all solution to the problem of medical entity linking (MEL), achieving a new state-of-the-art (SOTA) on six MEL benchmarking datasets. In the scientific domain, we achieve SOTA even without task-specific supervision. With substantial improvement over various domain-specific pretrained MLMs such as BioBERT, SciBERTand and PubMedBERT, our pretraining scheme proves to be both effective and robust.", "keyphrases": ["biomedical domain", "scheme", "umls", "pre-training scheme"]} +{"id": "guo-etal-2020-sequence", "title": "Sequence-Level Mixed Sample Data Augmentation", "abstract": "Despite their empirical success, neural networks still have difficulty capturing compositional aspects of natural language. This work proposes a simple data augmentation approach to encourage compositional behavior in neural models for sequence-to-sequence problems. Our approach, SeqMix, creates new synthetic examples by softly combining input/output sequences from the training set. We connect this approach to existing techniques such as SwitchOut and word dropout, and show that these techniques are all essentially approximating variants of a single objective. SeqMix consistently yields approximately 1.0 BLEU improvement on five different translation datasets over strong Transformer baselines. On tasks that require strong compositional generalization such as SCAN and semantic parsing, SeqMix also offers further improvements.", "keyphrases": ["data augmentation approach", "seqmix", "synthetic example", "switchout", "compositional behaviour"]} +{"id": "gala-etal-2020-alector", "title": "Alector: A Parallel Corpus of Simplified French Texts with Alignments of Misreadings by Poor and Dyslexic Readers", "abstract": "In this paper, we present a new parallel corpus addressed to researchers, teachers, and speech therapists interested in text simplification as a means of alleviating difficulties in children learning to read. The corpus is composed of excerpts drawn from 79 authentic literary (tales, stories) and scientific (documentary) texts commonly used in French schools for children aged between 7 to 9 years old. The excerpts were manually simplified at the lexical, morpho-syntactic, and discourse levels in order to propose a parallel corpus for reading tests and for the development of automatic text simplification tools. A sample of 21 poor-reading and dyslexic children with an average reading delay of 2.5 years read a portion of the corpus. The transcripts of readings errors were integrated into the corpus with the goal of identifying lexical difficulty in the target population. By means of statistical testing, we provide evidence that the manual simplifications significantly reduced reading errors, highlighting that the words targeted for simplification were not only well-chosen but also substituted with substantially easier alternatives. The entire corpus is available for consultation through a web interface and available on demand for research purposes.", "keyphrases": ["parallel corpus", "french", "text simplification"]} +{"id": "murray-etal-2012-using", "title": "Using the Omega Index for Evaluating Abstractive Community Detection", "abstract": "Numerous NLP tasks rely on clustering or community detection algorithms. For many of these tasks, the solutions are disjoint, and the relevant evaluation metrics assume nonoverlapping clusters. In contrast, the relatively recent task of abstractive community detection (ACD) results in overlapping clusters of sentences. ACD is a sub-task of an abstractive summarization system and represents a twostep process. In the first step, we classify sentence pairs according to whether the sentences should be realized by a common abstractive sentence. This results in an undirected graph with sentences as nodes and predicted abstractive links as edges. The second step is to identify communities within the graph, where each community corresponds to an abstractive sentence to be generated. In this paper, we describe how the Omega Index, a metric for comparing non-disjoint clustering solutions, can be used as a summarization evaluation metric for this task. We use the Omega Index to compare and contrast several community detection algorithms.", "keyphrases": ["omega index", "common abstractive sentence", "edge"]} +{"id": "sharoff-2004-stake", "title": "What is at Stake: a Case Study of Russian Expressions Starting with a Preposition", "abstract": "The paper describes an experiment in detecting a specific type of multiword expressions in Russian, namely expressions starting with a preposition. This covers not only prepositional phrases proper, but also fixed syntactic constructions like v techenie ('in the course of'). First, we collect lists of such constructions in a corpus of 50 mln words using a simple mechanism that combines statistical methods with knowledge about the structure of Russian prepositional phrases. Then we analyse the results of this data collection and estimate the efficiency of the collected list for the resolution of morphosyntactic and semantic ambiguity in a corpus.", "keyphrases": ["russian", "preposition", "non-configurational language"]} +{"id": "bosc-vincent-2020-sequence", "title": "Do sequence-to-sequence VAEs learn global features of sentences?", "abstract": "Autoregressive language models are powerful and relatively easy to train. However, these models are usually trained without explicit conditioning labels and do not offer easy ways to control global aspects such as sentiment or topic during generation. Bowman & al. 2016 adapted the Variational Autoencoder (VAE) for natural language with the sequence-to-sequence architecture and claimed that the latent vector was able to capture such global features in an unsupervised manner. We question this claim. We measure which words benefit most from the latent information by decomposing the reconstruction loss per position in the sentence. Using this method, we find that VAEs are prone to memorizing the first words and the sentence length, producing local features of limited usefulness. To alleviate this, we investigate alternative architectures based on bag-of-words assumptions and language model pretraining. These variants learn latent variables that are more global, i.e., more predictive of topic or sentiment labels. Moreover, using reconstructions, we observe that they decrease memorization: the first word and the sentence length are not recovered as accurately than with the baselines, consequently yielding more diverse reconstructions.", "keyphrases": ["vae", "global feature", "latent variable"]} +{"id": "lambert-banchs-2006-tuning", "title": "Tuning machine translation parameters with SPSA", "abstract": "Most of statistical machine translation systems are combinations of various models, and tuning of the scaling factors is an important step. However, this optimisation problem is hard because the objective function has many local minima and the available algorithms cannot achieve a global optimum. Consequently, optimisations starting from different initial settings can converge to fairly different solutions. We present tuning experiments with the Simultaneous Perturbation Stochastic Approximation (SPSA) algorithm, and compare them to tuning with the widely used downhill simplex method. With IWSLT 2006 Chinese-English data, both methods showed similar performance, but SPSA was more robust to the choice of initial settings.", "keyphrases": ["spsa", "translation hypothesis", "beam search"]} +{"id": "lagoudaki-2008-value", "title": "The Value of Machine Translation for the Professional Translator", "abstract": "More and more Translation Memory (TM) systems nowadays are fortified with machine translation (MT) techniques to enable them to propose a translation to the translator when no match is found in his TM resources. The system attempts this by assembling a combination of terms from its terminology database, translations from its memory, and even portions of them. This paper reviews the most popular commercial TM systems with integrated MT techniques and explores their usefulness based on the perceived practical benefits brought to their users. Feedback from translators reveals a variety of attitudes towards machine translation, with some supporting and others contradicting several points of conventional wisdom regarding the relationship between machine translation and human translators.", "keyphrases": ["machine translation", "freelance translator", "language service provider", "productivity"]} +{"id": "hovy-purschke-2018-capturing", "title": "Capturing Regional Variation with Distributed Place Representations and Geographic Retrofitting", "abstract": "Dialects are one of the main drivers of language variation, a major challenge for natural language processing tools. In most languages, dialects exist along a continuum, and are commonly discretized by combining the extent of several preselected linguistic variables. However, the selection of these variables is theory-driven and itself insensitive to change. We use Doc2Vec on a corpus of 16.8M anonymous online posts in the German-speaking area to learn continuous document representations of cities. These representations capture continuous regional linguistic distinctions, and can serve as input to downstream NLP tasks sensitive to regional variation. By incorporating geographic information via retrofitting and agglomerative clustering with structure, we recover dialect areas at various levels of granularity. Evaluating these clusters against an existing dialect map, we achieve a match of up to 0.77 V-score (harmonic mean of cluster completeness and homogeneity). Our results show that representation learning with retrofitting offers a robust general method to automatically expose dialectal differences and regional variation at a finer granularity than was previously possible.", "keyphrases": ["regional variation", "area", "city", "dialect map"]} +{"id": "langer-etal-2004-text", "title": "Text Type Structure and Logical Document Structure", "abstract": "Most research on automated categorization of documents has concentrated on the assignment of one or many categories to a whole text. However, new applications, e.g. in the area of the Semantic Web, require a richer and more fine-grained annotation of documents, such as detailed thematic information about the parts of a document. Hence we investigate the automatic categorization of text segments of scientific articles with XML markup into 16 topic types from a text type structure schema. A corpus of 47 linguistic articles was provided with XML markup on different annotation layers representing text type structure, logical document structure, and grammatical categories. Six different feature extraction strategies were applied to this corpus and combined in various parametrizations in different classifiers. The aim was to explore the contribution of each type of information, in particular the logical structure features, to the classification accuracy. The results suggest that some of the topic types of our hierarchy are successfully learnable, while the features from the logical structure layer had no particular impact on the results.", "keyphrases": ["new application", "area", "semantic web", "fine-grained annotation", "text type structure"]} +{"id": "kauchak-2013-improving", "title": "Improving Text Simplification Language Modeling Using Unsimplified Text Data", "abstract": "In this paper we examine language modeling for text simplification. Unlike some text-to-text translation tasks, text simplification is a monolingual translation task allowing for text in both the input and output domain to be used for training the language model. We explore the relationship between normal English and simplified English and compare language models trained on varying amounts of text from each. We evaluate the models intrinsically with perplexity and extrinsically on the lexical simplification task from SemEval 2012. We find that a combined model using both simplified and normal English data achieves a 23% improvement in perplexity and a 24% improvement on the lexical simplification task over a model trained only on simple data. Post-hoc analysis shows that the additional unsimplified data provides better coverage for unseen and raren-grams.", "keyphrases": ["simplification", "language modeling", "translation task", "readability"]} +{"id": "pitler-etal-2012-dynamic", "title": "Dynamic Programming for Higher Order Parsing of Gap-Minding Trees", "abstract": "We introduce gap inheritance, a new structural property on trees, which provides a way to quantify the degree to which intervals of descendants can be nested. Based on this property, two new classes of trees are derived that provide a closer approximation to the set of plausible natural language dependency trees than some alternative classes of trees: unlike projective trees, a word can have descendants in more than one interval; unlike spanning trees, these intervals cannot be nested in arbitrary ways. The 1-Inherit class of trees has exactly the same empirical coverage of natural language sentences as the class of mildly non-projective trees, yet the optimal scoring tree can be found in an order of magnitude less time. Gap-minding trees (the second class) have the property that all edges into an interval of descendants come from the same node, and thus an algorithm which uses only single intervals can produce trees in which a node has descendants in multiple intervals.", "keyphrases": ["gap-minding tree", "coverage", "non-projective tree"]} +{"id": "wang-etal-2015-language", "title": "Language and Domain Independent Entity Linking with Quantified Collective Validation", "abstract": "Linking named mentions detected in a source document to an existing knowledge base provides disambiguated entity referents for the mentions. This allows better document analysis, knowledge extraction and knowledge base population. Most of the previous research extensively exploited the linguistic features of the source documents in a supervised or semi-supervised way. These systems therefore cannot be easily applied to a new language or domain. In this paper, we present a novel unsupervised algorithm named Quantified Collective Validation that avoids excessive linguistic analysis on the source documents and fully leverages the knowledge base structure for the entity linking task. We show our approach achieves stateof-the-art English entity linking performance and demonstrate successful deployment in a new language (Chinese) and two new domains (Biomedical and Earth Science). Experiment datasets and system demonstration are available at http://tw.rpi.edu/web/doc/ hanwang_emnlp_2015 for research purpose.", "keyphrases": ["quantified collective validation", "mention", "chinese", "new domain"]} +{"id": "tamkin-etal-2020-investigating", "title": "Investigating Transferability in Pretrained Language Models", "abstract": "How does language model pretraining help transfer learning? We consider a simple ablation technique for determining the impact of each pretrained layer on transfer task performance. This method, partial reinitialization, involves replacing different layers of a pretrained model with random weights, then finetuning the entire model on the transfer task and observing the change in performance. This technique reveals that in BERT, layers with high probing performance on downstream GLUE tasks are neither necessary nor sufficient for high accuracy on those tasks. Furthermore, the benefit of using pretrained parameters for a layer varies dramatically with finetuning dataset size: parameters that provide tremendous performance improvement when data is plentiful may provide negligible benefits in data-scarce settings. These results reveal the complexity of the transfer learning process, highlighting the limitations of methods that operate on frozen models or single data samples.", "keyphrases": ["transferability", "language model", "different layer"]} +{"id": "uchimoto-etal-2006-dependency", "title": "Dependency-structure Annotation to Corpus of Spontaneous Japanese", "abstract": "In Japanese, syntactic structure of a sentence is generally represented by the relationship between phrasal units, or bunsetsus inJapanese, based on a dependency grammar. In the same way, thesyntactic structure of a sentence in a large, spontaneous, Japanese-speech corpus, the Corpus of Spontaneous Japanese (CSJ), isrepresented by dependency relationships between bunsetsus. This paper describes the criteria and definitions of dependency relationships between bunsetsus in the CSJ. The dependency structure of the CSJ is investigated, and the difference in the dependency structures ofwritten text and spontaneous speech is discussed in terms of thedependency accuracies obtained by using a corpus-based model. It is shown that the accuracy of automatic dependency-structure analysis canbe improved if characteristic phenomena of spontaneous speech such as self-corrections, basic utterance units in spontaneous speech, and bunsetsus that have no modifiee are detected and used for dependency-structure analysis.", "keyphrases": ["syntactic structure", "same way", "csj", "dependency relationship"]} +{"id": "jakob-etal-2010-mapping", "title": "Mapping between Dependency Structures and Compositional Semantic Representations", "abstract": "This paper investigates the mapping between two semantic formalisms, namely the tectogrammatical layer of the Prague Dependency Treebank 2.0 (PDT) and (Robust) Minimal Recursion Semantics ((R)MRS). It is a first attempt to relate the dependency-based annotation scheme of PDT to a compositional semantics approach like (R)MRS. A mapping algorithm that converts PDT trees to (R)MRS structures is developed, associating (R)MRSs to each node on the dependency tree. Furthermore, composition rules are formulated and the relation between dependency in PDT and semantic heads in (R)MRS is analyzed. It turns out that structure and dependencies, morphological categories and some coreferences can be preserved in the target structures. Moreover, valency and free modifications are distinguished using the valency dictionary of PDT as an additional resource. The validation results show that systematically correct underspecified target representations can be obtained by a rule-based mapping approach, which is an indicator that (R)MRS is indeed robust in relation to the formal representation of Czech data. This finding is novel, for Czech, with its free word order and rich morphology, is typologically different than languages analyzed with (R)MRS to date.", "keyphrases": ["prague dependency treebank", "node", "mapping"]} +{"id": "ivanova-etal-2012-contrastive", "title": "Who Did What to Whom? A Contrastive Study of Syntacto-Semantic Dependencies", "abstract": "We investigate aspects of interoperability between a broad range of common annotation schemes for syntacto-semantic dependencies. With the practical goal of making the LinGO Redwoods Treebank accessible to broader usage, we contrast seven distinct annotation schemes of functor--argument structure, both in terms of syntactic and semantic relations. Drawing examples from a multi-annotated gold standard, we show how abstractly similar information can take quite different forms across frameworks. We further seek to shed light on the representational 'distance' between pure bilexical dependencies, on the one hand, and full-blown logical-form propositional semantics, on the other hand. Furthermore, we propose a fully automated conversion procedure from (logical-form) meaning representation to bilexical semantic dependencies.", "keyphrases": ["syntacto-semantic dependency", "bilexical dependency", "structural comparison", "other parser", "broad hpsg construction"]} +{"id": "popovic-ney-2006-pos", "title": "POS-based Word Reorderings for Statistical Machine Translation", "abstract": "Translation In this work we investigate new possibilities for improving the quality of statistical machine translation (SMT) by applying word reorderings of the source language sentences based on Part-of-Speech tags. Results are presented on the European Parliament corpus containing about 700k sentences and 15M running words. In order to investigate sparse training data scenarios, we also report results obtained on about 1\\% of the original corpus. The source languages are Spanish and English and target languages are Spanish, English and German. We propose two types of reorderings depending on the language pair and the translation direction: local reorderings of nouns and adjectives for translation from and into Spanish and long-range reorderings of verbs for translation into German. For our best translation system, we achieve up to 2\\% relative reduction of WER and up to 7\\% relative increase of BLEU score. Improvements can be seen both on the reordered sentences as well as on the rest of the test corpus. Local reorderings are especially important for the translation systems trained on the small corpus whereas long-range reorderings are more effective for the larger corpus.", "keyphrases": ["statistical machine translation", "pos information", "small set", "popovic\u0301"]} +{"id": "mueller-etal-2014-dependency", "title": "Dependency parsing with latent refinements of part-of-speech tags", "abstract": "In this paper we propose a method to increase dependency parser performance without using additional labeled or unlabeled data by refining the layer of predicted part-of-speech (POS) tags. We perform experiments on English and German and show significant improvements for both languages. The refinement is based on generative split-merge training for Hidden Markov models (HMMs).", "keyphrases": ["refinement", "split-merge training", "hidden markov model"]} +{"id": "chen-etal-2019-evaluating", "title": "Evaluating Question Answering Evaluation", "abstract": "As the complexity of question answering (QA) datasets evolve, moving away from restricted formats like span extraction and multiple-choice (MC) to free-form answer generation, it is imperative to understand how well current metrics perform in evaluating QA. This is especially important as existing metrics (BLEU, ROUGE, METEOR, and F1) are computed using n-gram similarity and have a number of well-known drawbacks. In this work, we study the suitability of existing metrics in QA. For generative QA, we show that while current metrics do well on existing datasets, converting multiple-choice datasets into free-response datasets is challenging for current metrics. We also look at span-based QA, where F1 is a reasonable metric. We show that F1 may not be suitable for all extractive QA tasks depending on the answer types. Our study suggests that while current metrics may be suitable for existing QA datasets, they limit the complexity of QA datasets that can be created. This is especially true in the context of free-form QA, where we would like our models to be able to generate more complex and abstractive answers, thus necessitating new metrics that go beyond n-gram based matching. As a step towards a better QA metric, we explore using BERTScore, a recently proposed metric for evaluating translation, for QA. We find that although it fails to provide stronger correlation with human judgements, future work focused on tailoring a BERT-based metric to QA evaluation may prove fruitful.", "keyphrases": ["rouge", "human judgment", "summarization"]} +{"id": "de-lhoneux-nivre-2016-investigating", "title": "Should Have, Would Have, Could Have. Investigating Verb Group Representations for Parsing with Universal Dependencies.", "abstract": "Treebanks have recently been released for a number of languages with the harmonized annotation created by the Universal Dependencies project. The representation of certain constructions in UD are known to be suboptimal for parsing and may be worth transforming for the purpose of parsing. In this paper, we focus on the representation of verb groups. Several studies have shown that parsing works better when auxiliaries are the head of auxiliary dependency relations which is not the case in UD. We therefore transformed verb groups in UD treebanks, parsed the test set and transformed it back, and contrary to expectations, observed significant decreases in accuracy. We provide suggestive evidence that improvements in previous studies were obtained because the transformation helps disambiguating POS tags of main verbs and auxiliaries. The question of why parsing accuracy decreases with this approach in the case of UD is left open.", "keyphrases": ["verb group", "universal dependencies", "treebank"]} +{"id": "hoshen-wolf-2018-non", "title": "Non-Adversarial Unsupervised Word Translation", "abstract": "Unsupervised word translation from non-parallel inter-lingual corpora has attracted much research interest. Very recently, neural network methods trained with adversarial loss functions achieved high accuracy on this task. Despite the impressive success of the recent techniques, they suffer from the typical drawbacks of generative adversarial models: sensitivity to hyper-parameters, long training time and lack of interpretability. In this paper, we make the observation that two sufficiently similar distributions can be aligned correctly with iterative matching methods. We present a novel method that first aligns the second moment of the word distributions of the two languages and then iteratively refines the alignment. Extensive experiments on word translation of European and Non-European languages show that our method achieves better performance than recent state-of-the-art deep adversarial approaches and is competitive with the supervised baseline. It is also efficient, easy to parallelize on CPU and interpretable.", "keyphrases": ["unsupervised word translation", "adversarial model", "long training time", "interpretability", "second moment"]} +{"id": "nakamura-kawahara-2018-jfckb", "title": "JFCKB: Japanese Feature Change Knowledge Base", "abstract": "Commonsense knowledge plays an essential role in our language activities. Although many projects have aimed to develop language resources for commonsense knowledge, there is little work focusing on connotational meanings. This is because constructing commonsense knowledge including connotational meanings is challenging. In this paper, we present a Japanese knowledge base where arguments in event sentences are associated with various feature changes caused by the events. For example, \u201cmy child\u201d in \u201cmy wife hits my child\u201d is associated with some feature changes, such as increase in pain, increase in anger, increase in disgust, and decrease in joy. We constructed this knowledge base through crowdsourcing tasks by gathering feature changes of arguments in event sentences. After the construction of the knowledge base, we conducted an experiment in anaphora resolution using the knowledge base. We regarded anaphora resolution as an antecedent candidate ranking task and used Ranking SVM as the solver. Experimental results demonstrated the usefulness of our feature change knowledge base.", "keyphrases": ["knowledge base", "wife", "jfckb"]} +{"id": "kiritchenko-mohammad-2016-effect", "title": "The Effect of Negators, Modals, and Degree Adverbs on Sentiment Composition", "abstract": "Negators, modals, and degree adverbs can significantly affect the sentiment of the words they modify. Often, their impact is modeled with simple heuristics; although, recent work has shown that such heuristics do not capture the true sentiment of multi-word phrases. We created a dataset of phrases that include various negators, modals, and degree adverbs, as well as their combinations. Both the phrases and their constituent content words were annotated with real-valued scores of sentiment association. Using phrasal terms in the created dataset, we analyze the impact of individual modifiers and the average effect of the groups of modifiers on overall sentiment. We find that the effect of modifiers varies substantially among the members of the same group. Furthermore, each individual modifier can affect sentiment words in different ways. Therefore, solutions based on statistical learning seem more promising than fixed hand-crafted rules on the task of automatic sentiment prediction.", "keyphrases": ["negator", "modal", "degree adverb", "sentiment composition"]} +{"id": "hope-etal-2021-extracting", "title": "Extracting a Knowledge Base of Mechanisms from COVID-19 Papers", "abstract": "The COVID-19 pandemic has spawned a diverse body of scientific literature that is challenging to navigate, stimulating interest in automated tools to help find useful knowledge. We pursue the construction of a knowledge base (KB) of mechanisms\u2014a fundamental concept across the sciences, which encompasses activities, functions and causal relations, ranging from cellular processes to economic impacts. We extract this information from the natural language of scientific papers by developing a broad, unified schema that strikes a balance between relevance and breadth. We annotate a dataset of mechanisms with our schema and train a model to extract mechanism relations from papers. Our experiments demonstrate the utility of our KB in supporting interdisciplinary scientific search over COVID-19 literature, outperforming the prominent PubMed search in a study with clinical experts. Our search engine, dataset and code are publicly available.", "keyphrases": ["knowledge base", "activity", "mechanism relation"]} +{"id": "leusch-etal-2003-novel", "title": "A novel string-to-string distance measure with applications to machine translation evaluation", "abstract": "We introduce a string-to-string distance measure which extends the edit distance by block transpositions as constant cost edit operation. An algorithm for the calculation of this distance measure in polynomial time is presented. We then demonstrate how this distance measure can be used as an evaluation criterion in machine translation. The correlation between this evaluation criterion and human judgment is systematically compared with that of other automatic evaluation measures on two translation tasks. In general, like other automatic evaluation measures, the criterion shows low correlation at sentence level, but good correlation at system level.", "keyphrases": ["edit distance", "per", "bag-of-word", "word error rate", "invwer"]} +{"id": "jing-etal-2003-howtogetachinesename", "title": "HowtogetaChineseName(Entity): Segmentation and Combination Issues", "abstract": "When building a Chinese named entity recognition system, one must deal with certain language-specific issues such as whether the model should be based on characters or words. While there is no unique answer to this question, we discuss in detail advantages and disadvantages of each model, identify problems in segmentation and suggest possible solutions, presenting our observations, analysis, and experimental results. The second topic of this paper is classifier combination. We present and describe four classifiers for Chinese named entity recognition and describe various methods for combining their outputs. The results demonstrate that classifier combination is an effective technique of improving system performance: experiments over a large annotated corpus of fine-grained entity types exhibit a 10% relative reduction in F-measure error.", "keyphrases": ["segmentation", "entity recognition", "character"]} +{"id": "bowman-etal-2016-fast", "title": "A Fast Unified Model for Parsing and Sentence Understanding", "abstract": "Tree-structured neural networks exploit valuable syntactic parse information as they interpret the meanings of sentences. However, they suer from two key technical problems that make them slow and unwieldyforlarge-scaleNLPtasks: theyusually operate on parsed sentences and they do not directly support batched computation. We address these issues by introducingtheStack-augmentedParser-Interpreter NeuralNetwork(SPINN),whichcombines parsing and interpretation within a single tree-sequence hybrid model by integrating tree-structured sentence interpretation into the linear sequential structure of a shiftreduceparser. Ourmodelsupportsbatched computation for a speedup of up to 25\u25ca over other tree-structured models, and its integrated parser can operate on unparsed data with little loss in accuracy. We evaluate it on the Stanford NLI entailment task and show that it significantly outperforms other sentence-encoding models.", "keyphrases": ["hybrid model", "sentence interpretation", "sequential structure", "shift-reduce parser", "tree-lstm"]} +{"id": "maletti-2010-synchronous", "title": "Why Synchronous Tree Substitution Grammars?", "abstract": "Synchronous tree substitution grammars are a translation model that is used in syntax-based machine translation. They are investigated in a formal setting and compared to a competitor that is at least as expressive. The competitor is the extended multi bottom-up tree transducer, which is the bottom-up analogue with one essential additional feature. This model has been investigated in theoretical computer science, but seems widely unknown in natural language processing. The two models are compared with respect to standard algorithms (binarization, regular restriction, composition, application). Particular attention is paid to the complexity of the algorithms.", "keyphrases": ["restriction", "stssg", "point"]} +{"id": "toutanova-etal-2004-leaf", "title": "The Leaf Path Projection View of Parse Trees: Exploring String Kernels for HPSG Parse Selection", "abstract": "We present a novel representation of parse trees as lists of paths (leaf projection paths) from leaves to the top level of the tree. This representation allows us to achieve significantly higher accuracy in the task of HPSG parse selection than standard models, and makes the application of string kernels natural. We define tree kernels via string kernels on projection paths and explore their performance in the context of parse disambiguation. We apply SVM ranking models and achieve an exact sentence accuracy of 85.40% on the Redwoods corpus.", "keyphrases": ["string kernel", "hpsg parse selection", "semantic dependency", "ancestor node"]} +{"id": "shi-etal-2021-learning", "title": "Learning Syntax from Naturally-Occurring Bracketings", "abstract": "Naturally-occurring bracketings, such as answer fragments to natural language questions and hyperlinks on webpages, can reflect human syntactic intuition regarding phrasal boundaries. Their availability and approximate correspondence to syntax make them appealing as distant information sources to incorporate into unsupervised constituency parsing. But they are noisy and incomplete; to address this challenge, we develop a partial-brackets-aware structured ramp loss in learning. Experiments demonstrate that our distantly-supervised models trained on naturally-occurring bracketing data are more accurate in inducing syntactic structures than competing unsupervised systems. On the English WSJ corpus, our models achieve an unlabeled F1 score of 68.9 for constituency parsing.", "keyphrases": ["bracketing", "answer fragment", "hyperlink"]} +{"id": "johansson-nugues-2007-extended", "title": "Extended Constituent-to-Dependency Conversion for English", "abstract": "We describe a new method to convert English constituent trees using the Penn Treebank annotation style into dependency trees. The new format was inspired by annotation practices used in other dependency treebanks with the intention to produce a better interface to further semantic processing than existing methods. In particular, we used a richer set of edge labels and introduced links to handle long-distance phenomena such as wh-movement and topicalization. The resulting trees generally have a more complex dependency structure. For example, 6% of the trees contain at least one nonprojective link, which is difficult for many parsing algorithms. As can be expected, the more complex structure and the enriched set of edge labels make the trees more difficult to predict, and we observed a decrease in parsing accuracy when applying two dependency parsers to the new corpus. However, the richer information contained in the new trees resulted in a 23% error reduction in a baseline FrameNet semantic role labeler that relied on dependency arc labels only. (Less)", "keyphrases": ["dependency tree", "link", "wh-movement", "conversion scheme"]} +{"id": "ge-mooney-2005-statistical", "title": "A Statistical Semantic Parser that Integrates Syntax and Semantics", "abstract": "We introduce a learning semantic parser, Scissor, that maps natural-language sentences to a detailed, formal, meaning-representation language. It first uses an integrated statistical parser to produce a semantically augmented parse tree, in which each non-terminal node has both a syntactic and a semantic label. A compositional-semantics procedure is then used to map the augmented parse tree into a final meaning representation. We evaluate the system in two domains, a natural-language database interface and an interpreter for coaching instructions in robotic soccer. We present experimental results demonstrating that Scissor produces more accurate semantic representations than several previous approaches.", "keyphrases": ["syntax", "natural language question", "scissor model"]} +{"id": "zhang-etal-2015-neural", "title": "Neural Networks for Open Domain Targeted Sentiment", "abstract": "Open domain targeted sentiment is the joint information extraction task that finds target mentions together with the sentiment towards each mention from a text corpus. The task is typically modeled as a sequence labeling problem, and solved using state-of-the-art labelers such as CRF. We empirically study the effect of word embeddings and automatic feature combinations on the task by extending a CRF baseline using neural networks, which have demonstrated large potentials for sentiment analysis. Results show that the neural model can give better results by significantly increasing the recall. In addition, we propose a novel integration of neural and discrete features, which combines their relative advantages, leading to significantly higher results compared to both baselines.", "keyphrases": ["word embedding", "sentiment-bearing iob label", "sequence labeling task", "unified tagging scheme", "fashion"]} +{"id": "uchimoto-etal-2004-multilingual", "title": "Multilingual Aligned Parallel Treebank Corpus Reflecting Contextual Information and Its Applications", "abstract": "This paper describes Japanese-English-Chinese aligned parallel treebank corpora of newspaper articles. They have been constructed by translating each sentence in the Penn Treebank and the Kyoto University text corpus into a corresponding natural sentence in a target language. Each sentence is translated so as to reflect its contextual information and is annotated with morphological and syntactic structures and phrasal alignment. This paper also describes the possible applications of the parallel corpus and proposes a new framework to aid in translation. In this framework, parallel translations whose source language sentence is similar to a given sentence can be semi-automatically generated. In this paper we show that the framework can be achieved by using our aligned parallel treebank corpus.", "keyphrases": ["contextual information", "parallel corpora", "national institute"]} +{"id": "mehdad-etal-2014-abstractive", "title": "Abstractive Summarization of Spoken and Written Conversations Based on Phrasal Queries", "abstract": "We propose a novel abstractive querybased summarization system for conversations, where queries are defined as phrases reflecting a user information needs. We rank and extract the utterances in a conversation based on the overall content and the phrasal query information. We cluster the selected sentences based on their lexical similarity and aggregate the sentences in each cluster by means of a word graph model. We propose a ranking strategy to select the best path in the constructed graph as a query-based abstract sentence for each cluster. A resulting summary consists of abstractive sentences representing the phrasal query information and the overall content of the conversation. Automatic and manual evaluation results over meeting, chat and email conversations show that our approach significantly outperforms baselines and previous extractive models.", "keyphrases": ["conversation", "word graph", "meeting summarization"]} +{"id": "rubinstein-etal-2013-toward", "title": "Toward Fine-grained Annotation of Modality in Text", "abstract": "We present a linguistically-informed schema for annotating modal expressions and describe its application to a subset of the MPQA corpus of English texts (Wiebe et al. 2005). The annotation is fine-grained in two respects: (i) in the range of expressions that are defined as modal targets and (ii) in the amount of information that is annotated for each target expression. We use inter-annotator reliability results to support a two-way distinction between priority and nonpriority modality types.", "keyphrases": ["modality", "scope annotation", "mpqa english corpus"]} +{"id": "wang-pan-2018-recursive", "title": "Recursive Neural Structural Correspondence Network for Cross-domain Aspect and Opinion Co-Extraction", "abstract": "Fine-grained opinion analysis aims to extract aspect and opinion terms from each sentence for opinion summarization. Supervised learning methods have proven to be effective for this task. However, in many domains, the lack of labeled data hinders the learning of a precise extraction model. In this case, unsupervised domain adaptation methods are desired to transfer knowledge from the source domain to any unlabeled target domain. In this paper, we develop a novel recursive neural network that could reduce domain shift effectively in word level through syntactic relations. We treat these relations as invariant \u201cpivot information\u201d across domains to build structural correspondences and generate an auxiliary task to predict the relation between any two adjacent words in the dependency tree. In the end, we demonstrate state-of-the-art results on three benchmark datasets.", "keyphrases": ["cross-domain aspect", "opinion term", "dependency relation"]} +{"id": "ross-etal-2022-tailor", "title": "Tailor: Generating and Perturbing Text with Semantic Controls", "abstract": "Controlled text perturbation is useful for evaluating and improving model generalizability. However, current techniques rely on training a model for every target perturbation, which is expensive and hard to generalize. We present Tailor, a semantically-controlled text generation system. Tailor builds on a pretrained seq2seq model and produces textual outputs conditioned on control codes derived from semantic representations. We craft a set of operations to modify the control codes, which in turn steer generation towards targeted attributes. These operations can be further composed into higher-level ones, allowing for flexible perturbation strategies. We demonstrate the effectiveness of these perturbations in multiple applications. First, we use Tailor to automatically create high-quality contrast sets for four distinct natural language processing (NLP) tasks. These contrast sets contain fewer spurious artifacts and are complementary to manually annotated ones in their lexical diversity. Second, we show that Tailor perturbations can improve model generalization through data augmentation. Perturbing just \u223c2% of training data leads to a 5.8-point gain on an NLI challenge set measuring reliance on syntactic heuristics.", "keyphrases": ["control code", "perturbation strategy", "tailor"]} +{"id": "dasigi-etal-2019-quoref", "title": "Quoref: A Reading Comprehension Dataset with Questions Requiring Coreferential Reasoning", "abstract": "Machine comprehension of texts longer than a single sentence often requires coreference resolution. However, most current reading comprehension benchmarks do not contain complex coreferential phenomena and hence fail to evaluate the ability of models to resolve coreference. We present a new crowdsourced dataset containing more than 24K span-selection questions that require resolving coreference among entities in over 4.7K English paragraphs from Wikipedia. Obtaining questions focused on such phenomena is challenging, because it is hard to avoid lexical cues that shortcut complex reasoning. We deal with this issue by using a strong baseline model as an adversary in the crowdsourcing loop, which helps crowdworkers avoid writing questions with exploitable surface cues. We show that state-of-the-art reading comprehension models perform significantly worse than humans on this benchmark\u2014the best model performance is 70.5 F1, while the estimated human performance is 93.4 F1.", "keyphrases": ["comprehension", "reasoning", "coreference"]} +{"id": "nishimura-etal-2018-multi", "title": "Multi-Source Neural Machine Translation with Missing Data", "abstract": "Multi-source translation is an approach to exploit multiple inputs (e.g. in two different languages) to increase translation accuracy. In this paper, we examine approaches for multi-source neural machine translation (NMT) using an incomplete multilingual corpus in which some translations are missing. In practice, many multilingual corpora are not complete due to the difficulty to provide translations in all of the relevant languages (for example, in TED talks, most English talks only have subtitles for a small portion of the languages that TED supports). Existing studies on multi-source translation did not explicitly handle such situations. This study focuses on the use of incomplete multilingual corpora in multi-encoder NMT and mixture of NMT experts and examines a very simple implementation where missing source translations are replaced by a special symbol NULL. These methods allow us to use incomplete corpora both at training time and test time. In experiments with real incomplete multilingual corpora of TED Talks, the multi-source NMT with the NULL tokens achieved higher translation accuracies measured by BLEU than those by any one-to-one NMT systems.", "keyphrases": ["neural machine translation", "situation", "multiple source", "direction"]} +{"id": "dasgupta-ng-2007-high", "title": "High-Performance, Language-Independent Morphological Segmentation", "abstract": "This paper introduces an unsupervised morphological segmentation algorithm that shows robust performance for four languages with different levels of morphological complexity. In particular, our algorithm outperforms Goldsmithis Linguistica and Creutz and Lagusis Morphessor for English and Bengali, and achieves performance that is comparable to the best results for all three PASCAL evaluation datasets. Improvements arise from (1) the use of relative corpus frequency and suffix level similarity for detecting incorrect morpheme attachments and (2) the induction of orthographic rules and allomorphs for segmenting words where roots exhibit spelling changes during morpheme attachments.", "keyphrases": ["morphological segmentation", "complexity", "affix", "transitional probability"]} +{"id": "nissim-etal-2020-fair", "title": "Fair Is Better than Sensational: Man Is to Doctor as Woman Is to Doctor", "abstract": "Analogies such as man is to king as woman is to X are often used to illustrate the amazing power of word embeddings. Concurrently, they have also been used to expose how strongly human biases are encoded in vector spaces trained on natural language, with examples like man is to computer programmer as woman is to homemaker. Recent work has shown that analogies are in fact not an accurate diagnostic for bias, but this does not mean that they are not used anymore, or that their legacy is fading. Instead of focusing on the intrinsic problems of the analogy task as a bias detection tool, we discuss a series of issues involving implementation as well as subjective choices that might have yielded a distorted picture of bias in word embeddings. We stand by the truth that human biases are present in word embeddings, and, of course, the need to address them. But analogies are not an accurate tool to do so, and the way they have been most often used has exacerbated some possibly non-existing biases and perhaps hidden others. Because they are still widely popular, and some of them have become classics within and outside the NLP community, we deem it important to provide a series of clarifications that should put well-known, and potentially new analogies, into the right perspective.", "keyphrases": ["woman", "subjective factor", "judgement"]} +{"id": "liu-liu-2008-correlation", "title": "Correlation between ROUGE and Human Evaluation of Extractive Meeting Summaries", "abstract": "Automatic summarization evaluation is critical to the development of summarization systems. While ROUGE has been shown to correlate well with human evaluation for content match in text summarization, there are many characteristics in multiparty meeting domain, which may pose potential problems to ROUGE. In this paper, we carefully examine how well the ROUGE scores correlate with human evaluation for extractive meeting summarization. Our experiments show that generally the correlation is rather low, but a significantly better correlation can be obtained by accounting for several unique meeting characteristics, such as disfluencies and speaker information, especially when evaluating system-generated summaries.", "keyphrases": ["human evaluation", "summarization system", "rouge score"]} +{"id": "you-etal-2020-hard", "title": "Hard-Coded Gaussian Attention for Neural Machine Translation", "abstract": "Recent work has questioned the importance of the Transformer's multi-headed attention for achieving high translation quality. We push further in this direction by developing a \u201chard-coded\u201d attention variant without any learned parameters. Surprisingly, replacing all learned self-attention heads in the encoder and decoder with fixed, input-agnostic Gaussian distributions minimally impacts BLEU scores across four different language pairs. However, additionally, hard-coding cross attention (which connects the decoder to the encoder) significantly lowers BLEU, suggesting that it is more important than self-attention. Much of this BLEU drop can be recovered by adding just a single learned cross attention head to an otherwise hard-coded Transformer. Taken as a whole, our results offer insight into which components of the Transformer are actually important, which we hope will guide future work into the development of simpler and more efficient attention-based models.", "keyphrases": ["gaussian attention", "translation quality", "bleu score", "position"]} +{"id": "zhang-etal-2020-syntax", "title": "Syntax-Aware Opinion Role Labeling with Dependency Graph Convolutional Networks", "abstract": "Opinion role labeling (ORL) is a fine-grained opinion analysis task and aims to answer \u201cwho expressed what kind of sentiment towards what?\u201d. Due to the scarcity of labeled data, ORL remains challenging for data-driven methods. In this work, we try to enhance neural ORL models with syntactic knowledge by comparing and integrating different representations. We also propose dependency graph convolutional networks (DEPGCN) to encode parser information at different processing levels. In order to compensate for parser inaccuracy and reduce error propagation, we introduce multi-task learning (MTL) to train the parser and the ORL model simultaneously. We verify our methods on the benchmark MPQA corpus. The experimental results show that syntactic information is highly valuable for ORL, and our final MTL model effectively boosts the F1 score by 9.29 over the syntax-agnostic baseline. In addition, we find that the contributions from syntactic knowledge do not fully overlap with contextualized word representations (BERT). Our best model achieves 4.34 higher F1 score than the current state-ofthe-art.", "keyphrases": ["opinion role labeling", "convolutional network", "syntactic knowledge"]} +{"id": "kuncoro-etal-2018-lstms", "title": "LSTMs Can Learn Syntax-Sensitive Dependencies Well, But Modeling Structure Makes Them Better", "abstract": "Language exhibits hierarchical structure, but recent work using a subject-verb agreement diagnostic argued that state-of-the-art language models, LSTMs, fail to learn long-range syntax sensitive dependencies. Using the same diagnostic, we show that, in fact, LSTMs do succeed in learning such dependencies\u2014provided they have enough capacity. We then explore whether models that have access to explicit syntactic information learn agreement more effectively, and how the way in which this structural information is incorporated into the model impacts performance. We find that the mere presence of syntactic information does not improve accuracy, but when model architecture is determined by syntax, number agreement is improved. Further, we find that the choice of how syntactic structure is built affects how well number agreement is learned: top-down construction outperforms left-corner and bottom-up variants in capturing non-local structural dependencies.", "keyphrases": ["syntax-sensitive dependency", "language model", "number agreement", "long-distance dependency"]} +{"id": "anzaroot-etal-2014-learning", "title": "Learning Soft Linear Constraints with Application to Citation Field Extraction", "abstract": "Accurately segmenting a citation string into fields for authors, titles, etc. is a challenging task because the output typically obeys various global constraints. Previous work has shown that modeling soft constraints, where the model is encouraged, but not require to obey the constraints, can substantially improve segmentation performance. On the other hand, for imposing hard constraints, dual decomposition is a popular technique for efficient prediction given existing algorithms for unconstrained inference. We extend the technique to perform prediction subject to soft constraints. Moreover, with a technique for performing inference given soft constraints, it is easy to automatically generate large families of constraints and learn their costs with a simple convex optimization problem during training. This allows us to obtain substantial gains in accuracy on a new, challenging citation extraction dataset.", "keyphrases": ["citation field extraction", "global constraint", "large family", "cost"]} +{"id": "arun-etal-2009-monte", "title": "Monte Carlo inference and maximization for phrase-based translation", "abstract": "Recent advances in statistical machine translation have used beam search for approximate NP-complete inference within probabilistic translation models. We present an alternative approach of sampling from the posterior distribution defined by a translation model. We define a novel Gibbs sampler for sampling translations given a source sentence and show that it effectively explores this posterior distribution. In doing so we overcome the limitations of heuristic beam search and obtain theoretically sound solutions to inference problems such as finding the maximum probability translation and minimum expected risk training and decoding.", "keyphrases": ["approximation", "sample", "posterior distribution", "gibbs"]} +{"id": "van-deemter-2006-generating", "title": "Generating Referring Expressions that Involve Gradable Properties", "abstract": "This article examines the role of gradable properties in referring expressions from the perspective of natural language generation. First, we propose a simple semantic analysis of vague descriptions (i.e., referring expressions that contain gradable adjectives) that reflects the context-dependent meaning of the adjectives in them. Second, we show how this type of analysis can inform algorithms for the generation of vague descriptions from numerical data. Third, we ask when such descriptions should be used. The article concludes with a discussion of salience and pointing, which are analyzed as if they were gradable adjectives.", "keyphrases": ["gradable property", "natural language generation", "adjective", "modifier"]} +{"id": "smit-etal-2020-combining", "title": "Combining Automatic Labelers and Expert Annotations for Accurate Radiology Report Labeling Using BERT", "abstract": "The extraction of labels from radiology text reports enables large-scale training of medical imaging models. Existing approaches to report labeling typically rely either on sophisticated feature engineering based on medical domain knowledge or manual annotations by experts. In this work, we introduce a BERT-based approach to medical image report labeling that exploits both the scale of available rule-based systems and the quality of expert annotations. We demonstrate superior performance of a biomedically pretrained BERT model first trained on annotations of a rule-based labeler and then finetuned on a small set of expert annotations augmented with automated backtranslation. We find that our final model, CheXbert, is able to outperform the previous best rules-based labeler with statistical significance, setting a new SOTA for report labeling on one of the largest datasets of chest x-rays.", "keyphrases": ["expert annotation", "radiology report", "image report"]} +{"id": "xu-cohen-2018-stock", "title": "Stock Movement Prediction from Tweets and Historical Prices", "abstract": "Stock movement prediction is a challenging problem: the market is highly stochastic, and we make temporally-dependent predictions from chaotic data. We treat these three complexities and present a novel deep generative model jointly exploiting text and price signals for this task. Unlike the case with discriminative or topic modeling, our model introduces recurrent, continuous latent variables for a better treatment of stochasticity, and uses neural variational inference to address the intractable posterior inference. We also provide a hybrid objective with temporal auxiliary to flexibly capture predictive dependencies. We demonstrate the state-of-the-art performance of our proposed model on a new stock movement prediction dataset which we collected.", "keyphrases": ["temporal auxiliary", "predictive dependency", "stock movement prediction", "news"]} +{"id": "gooding-etal-2021-word", "title": "Word Complexity is in the Eye of the Beholder", "abstract": "Lexical complexity is a highly subjective notion, yet this factor is often neglected in lexical simplification and readability systems which use a \u201done-size-fits-all\u201d approach. In this paper, we investigate which aspects contribute to the notion of lexical complexity in various groups of readers, focusing on native and non-native speakers of English, and how the notion of complexity changes depending on the proficiency level of a non-native reader. To facilitate reproducibility of our approach and foster further research into these aspects, we release a dataset of complex words annotated by readers with different backgrounds.", "keyphrases": ["factor", "non-native speaker", "word complexity", "audience"]} +{"id": "blasi-etal-2022-systematic", "title": "Systematic Inequalities in Language Technology Performance across the World's Languages", "abstract": "Natural language processing (NLP) systems have become a central technology in communication, education, medicine, artificial intelligence, and many other domains of research and development. While the performance of NLP methods has grown enormously over the last decade, this progress has been restricted to a minuscule subset of the world's \u22486,500 languages. We introduce a framework for estimating the global utility of language technologies as revealed in a comprehensive snapshot of recent publications in NLP. Our analyses involve the field at large, but also more in-depth studies on both user-facing technologies (machine translation, language understanding, question answering, text-to-speech synthesis) as well as foundational NLP tasks (dependency parsing, morphological inflection). In the process, we (1) quantify disparities in the current state of NLP research, (2) explore some of its associated societal and academic factors, and (3) produce tailored recommendations for evidence-based policy making aimed at promoting more global and equitable language technologies. Data and code to reproduce the findings discussed in this paper areavailable on GitHub ().", "keyphrases": ["technology", "machine translation", "systematic inequality"]} +{"id": "duan-etal-2012-twitter", "title": "Twitter Topic Summarization by Ranking Tweets using Social Influence and Content Quality", "abstract": "In this paper, we propose a time-line based framework for topic summarization in Twitter. We summarize topics by sub-topics along time line to fully capture rapid topic evolution in Twitter. Specifically, we rank and select salient and diversified tweets as a summary of each sub-topic. We have observed that ranking tweets is significantly different from ranking sentences in traditional extractive document summarization. We model and formulate the tweet ranking in a unified mutual reinforcement graph, where the social influence of users and the content quality of tweets are taken into consideration simultaneously in a mutually reinforcing manner. Extensive experiments are conducted on 3.9 million tweets. The results show that the proposed approach outperforms previous approaches by 14% improvement on average ROUGE-1. Moreover, we show how the content quality of tweets and the social influence of users effectively improve the performance of measuring the salience of tweets. TITLE AND ABSTRACT IN ANOTHER LANGUAGE (CHINESE)", "keyphrases": ["summarization", "social influence", "twitter"]} +{"id": "zhang-etal-2017-position", "title": "Position-aware Attention and Supervised Data Improve Slot Filling", "abstract": "Organized relational knowledge in the form of \u201cknowledge graphs\u201d is important for many applications. However, the ability to populate knowledge bases with facts automatically extracted from documents has improved frustratingly slowly. This paper simultaneously addresses two issues that have held back prior work. We first propose an effective new model, which combines an LSTM sequence model with a form of entity position-aware attention that is better suited to relation extraction. Then we build TACRED, a large (119,474 examples) supervised relation extraction dataset obtained via crowdsourcing and targeted towards TAC KBP relations. The combination of better supervised data and a more appropriate high-capacity model enables much better relation extraction performance. When the model trained on this new dataset replaces the previous relation extraction component of the best TAC KBP 2015 slot filling system, its F1 score increases markedly from 22.2% to 26.7%.", "keyphrases": ["slot filling system", "position-aware attention", "few-shot scenario", "powerful encoder"]} +{"id": "pustejovsky-krishnaswamy-2014-generating", "title": "Generating Simulations of Motion Events from Verbal Descriptions", "abstract": "In this paper, we describe a computational model for motion events in natural language that maps from linguistic expressions, through a dynamic event interpretation, into three-dimensional temporal simulations in a model. Starting with the model from (Pustejovsky and Moszkowicz, 2011), we analyze motion events using temporally-traced Labelled Transition Systems. We model the distinction between path- and manner-motion in an operational semantics, and further distinguish different types of manner-of-motion verbs in terms of the mereo-topological relations that hold throughout the process of movement. From these representations, we generate minimal models, which are realized as three-dimensional simulations in software developed with the game engine, Unity. The generated simulations act as a conceptual \u201cdebugger\u201d for the semantics of different motion verbs: that is, by testing for consistency and informativeness in the model, simulations expose the presuppositions associated with linguistic expressions and their compositions. Because the model generation component is still incomplete, this paper focuses on an implementation which maps directly from linguistic interpretations into the Unity code snippets that create the simulations.", "keyphrases": ["motion event", "unity", "formal semantic"]} +{"id": "gillick-etal-2019-learning", "title": "Learning Dense Representations for Entity Retrieval", "abstract": "We show that it is feasible to perform entity linking by training a dual encoder (two-tower) model that encodes mentions and entities in the same dense vector space, where candidate entities are retrieved by approximate nearest neighbor search. Unlike prior work, this setup does not rely on an alias table followed by a re-ranker, and is thus the first fully learned entity retrieval model. We show that our dual encoder, trained using only anchor-text links in Wikipedia, outperforms discrete alias table and BM25 baselines, and is competitive with the best comparable results on the standard TACKBP-2010 dataset. In addition, it can retrieve candidates extremely fast, and generalizes well to a new dataset derived from Wikinews. On the modeling side, we demonstrate the dramatic value of an unsupervised negative mining algorithm for this task.", "keyphrases": ["entity retrieval", "dual encoder", "candidate", "wikipedia", "bm25 baseline"]} +{"id": "chen-etal-2018-xl", "title": "XL-NBT: A Cross-lingual Neural Belief Tracking Framework", "abstract": "Task-oriented dialog systems are becoming pervasive, and many companies heavily rely on them to complement human agents for customer service in call centers. With globalization, the need for providing cross-lingual customer support becomes more urgent than ever. However, cross-lingual support poses great challenges\u2014it requires a large amount of additional annotated data from native speakers. In order to bypass the expensive human annotation and achieve the first step towards the ultimate goal of building a universal dialog system, we set out to build a cross-lingual state tracking framework. Specifically, we assume that there exists a source language with dialog belief tracking annotations while the target languages have no annotated dialog data of any form. Then, we pre-train a state tracker for the source language as a teacher, which is able to exploit easy-to-access parallel data. We then distill and transfer its own knowledge to the student state tracker in target languages. We specifically discuss two types of common parallel resources: bilingual corpus and bilingual dictionary, and design different transfer learning strategies accordingly. Experimentally, we successfully use English state tracker as the teacher to transfer its knowledge to both Italian and German trackers and achieve promising results.", "keyphrases": ["xl-nbt", "dialogue state tracking", "different language"]} +{"id": "ahmad-etal-2019-cross", "title": "Cross-Lingual Dependency Parsing with Unlabeled Auxiliary Languages", "abstract": "Cross-lingual transfer learning has become an important weapon to battle the unavailability of annotated resources for low-resource languages. One of the fundamental techniques to transfer across languages is learning language-agnostic representations, in the form of word embeddings or contextual encodings. In this work, we propose to leverage unannotated sentences from auxiliary languages to help learning language-agnostic representations. Specifically, we explore adversarial training for learning contextual encoders that produce invariant representations across languages to facilitate cross-lingual transfer. We conduct experiments on cross-lingual dependency parsing where we train a dependency parser on a source language and transfer it to a wide range of target languages. Experiments on 28 target languages demonstrate that adversarial training significantly improves the overall transfer performances under several different settings. We conduct a careful analysis to evaluate the language-agnostic representations resulted from adversarial training.", "keyphrases": ["dependency parsing", "auxiliary language", "cross-lingual transfer", "word embedding", "contextual encoder"]} +{"id": "zhang-etal-2018-simplifying", "title": "Simplifying Neural Machine Translation with Addition-Subtraction Twin-Gated Recurrent Networks", "abstract": "In this paper, we propose an additionsubtraction twin-gated recurrent network (ATR) to simplify neural machine translation. The recurrent units of ATR are heavily simplified to have the smallest number of weight matrices among units of all existing gated RNNs. With the simple addition and subtraction operation, we introduce a twin-gated mechanism to build input and forget gates which are highly correlated. Despite this simplification, the essential non-linearities and capability of modeling long-distance dependencies are preserved. Additionally, the proposed ATR is more transparent than LSTM/GRU due to the simplification. Forward self-attention can be easily established in ATR, which makes the proposed network interpretable. Experiments on WMT14 translation tasks demonstrate that ATR-based neural machine translation can yield competitive performance on English-German and English-French language pairs in terms of both translation quality and speed. Further experiments on NIST Chinese-English translation, natural language inference and Chinese word segmentation verify the generality and applicability of ATR on different natural language processing tasks.", "keyphrases": ["neural machine translation", "recurrent unit", "rnn", "weight matrix"]} +{"id": "rosiger-2018-bashi", "title": "BASHI: A Corpus of Wall Street Journal Articles Annotated with Bridging Links", "abstract": "This paper presents a corpus resource for the anaphoric phenomenon of bridging, named BASHI. The corpus consisting of 50 Wall Street Journal (WSJ) articles adds bridging anaphors and their antecedents to the other gold annotations that have been created as part of the OntoNotes project (Weischedel et al., 2011). Bridging anaphors are context-dependent expressions that do not refer to the same entity as their antecedent, but to a related entity. Bridging resolution is an under-researched area of NLP, where the lack of annotated training data makes the application of statistical models difficult. Thus, we believe that the corpus is a valuable resource for researchers interested in anaphoric phenomena going beyond coreference, as it can be combined with other corpora to create a larger corpus resource. The corpus contains 57,709 tokens and 459 bridging pairs and is available for download in an offset-based format and a CoNLL-12 style bridging column that can be merged with the other annotation layers in OntoNotes. The paper also reviews previous annotation efforts and different definitions of bridging and reports challenges with respect to the bridging annotation.", "keyphrases": ["ontonotes", "bridging annotation", "bashi", "anaphora resolution"]} +{"id": "fort-etal-2020-rigor", "title": "Rigor Mortis: Annotating MWEs with a Gamified Platform", "abstract": "We present here Rigor Mortis, a gamified crowdsourcing platform designed to evaluate the intuition of the speakers, then train them to annotate multi-word expressions (MWEs) in French corpora. We previously showed that the speakers' intuition is reasonably good (65% in recall on non-fixed MWE). We detail here the annotation results, after a training phase using some of the tests developed in the PARSEME-FR project.", "keyphrases": ["mwes", "french corpora", "rigor mortis"]} +{"id": "subramanian-etal-2020-obtaining", "title": "Obtaining Faithful Interpretations from Compositional Neural Networks", "abstract": "Neural module networks (NMNs) are a popular approach for modeling compositionality: they achieve high accuracy when applied to problems in language and vision, while reflecting the compositional structure of the problem in the network architecture. However, prior work implicitly assumed that the structure of the network modules, describing the abstract reasoning process, provides a faithful explanation of the model's reasoning; that is, that all modules perform their intended behaviour. In this work, we propose and conduct a systematic evaluation of the intermediate outputs of NMNs on NLVR2 and DROP, two datasets which require composing multiple reasoning steps. We find that the intermediate outputs differ from the expected output, illustrating that the network structure does not provide a faithful explanation of model behaviour. To remedy that, we train the model with auxiliary supervision and propose particular choices for module architecture that yield much better faithfulness, at a minimal cost to accuracy.", "keyphrases": ["faithfulness", "neural module network", "modeling compositionality", "explanation", "intermediate output"]} +{"id": "liu-etal-2018-entity", "title": "Entity-Duet Neural Ranking: Understanding the Role of Knowledge Graph Semantics in Neural Information Retrieval", "abstract": "This paper presents the Entity-Duet Neural Ranking Model (EDRM), which introduces knowledge graphs to neural search systems. EDRM represents queries and documents by their words and entity annotations. The semantics from knowledge graphs are integrated in the distributed representations of their entities, while the ranking is conducted by interaction-based neural ranking networks. The two components are learned end-to-end, making EDRM a natural combination of entity-oriented search and neural information retrieval. Our experiments on a commercial search log demonstrate the effectiveness of EDRM. Our analyses reveal that knowledge graph semantics significantly improve the generalization ability of neural ranking models.", "keyphrases": ["knowledge graph", "neural information retrieval", "entity annotation"]} +{"id": "benajiba-etal-2008-arabic", "title": "Arabic Named Entity Recognition using Optimized Feature Sets", "abstract": "The Named Entity Recognition (NER) task has been garnering significant attention in NLP as it helps improve the performance of many natural language processing applications. In this paper, we investigate the impact of using different sets of features in two discriminative machine learning frameworks, namely, Support Vector Machines and Conditional Random Fields using Arabic data. We explore lexical, contextual and morphological features on eight standardized data-sets of different genres. We measure the impact of the different features in isolation, rank them according to their impact for each named entity class and incrementally combine them in order to infer the optimal machine learning approach and feature set. Our system yields a performance of F\u03b2=1-measure=83.5 on ACE 2003 Broadcast News data.", "keyphrases": ["entity recognition", "feature set", "arabic ner", "crf sequence labeling"]} +{"id": "naert-etal-2020-lsf", "title": "LSF-ANIMAL: A Motion Capture Corpus in French Sign Language Designed for the Animation of Signing Avatars", "abstract": "Signing avatars allow deaf people to access information in their preferred language using an interactive visualization of the sign language spatio-temporal content. However, avatars are often procedurally animated, resulting in robotic and unnatural movements, which are therefore rejected by the community for which they are intended. To overcome this lack of authenticity, solutions in which the avatar is animated from motion capture data are promising. Yet, the initial data set drastically limits the range of signs that the avatar can produce. Therefore, it can be interesting to enrich the initial corpus with new content by editing the captured motions. For this purpose, we collected the LSF-ANIMAL corpus, a French Sign Language (LSF) corpus composed of captured isolated signs and full sentences that can be used both to study LSF features and to generate new signs and utterances. This paper presents the precise definition and content of this corpus, technical considerations relative to the motion capture process (including the marker set definition), the post-processing steps required to obtain data in a standard motion format and the annotation scheme used to label the data. The quality of the corpus with respect to intelligibility, accuracy and realism is perceptually evaluated by 41 participants including native LSF signers.", "keyphrases": ["sign language", "avatar", "lsf-animal corpus"]} +{"id": "lazaridou-etal-2020-multi", "title": "Multi-agent Communication meets Natural Language: Synergies between Functional and Structural Language Learning", "abstract": "We present a method for combining multi-agent communication and traditional data-driven approaches to natural language learning, with an end goal of teaching agents to communicate with humans in natural language. Our starting point is a language model that has been trained on generic, not task-specific language data. We then place this model in a multi-agent self-play environment that generates task-specific rewards used to adapt or modulate the model, turning it into a task-conditional language model. We introduce a new way for combining the two types of learning based on the idea of reranking language model samples, and show that this method outperforms others in communicating with humans in a visual referential communication task. Finally, we present a taxonomy of different types of language drift that can occur alongside a set of measures to detect them.", "keyphrases": ["language data", "communication task", "multi-agent communication"]} +{"id": "banerjee-etal-2019-hierarchical", "title": "Hierarchical Transfer Learning for Multi-label Text Classification", "abstract": "Multi-Label Hierarchical Text Classification (MLHTC) is the task of categorizing documents into one or more topics organized in an hierarchical taxonomy. MLHTC can be formulated by combining multiple binary classification problems with an independent classifier for each category. We propose a novel transfer learning based strategy, HTrans, where binary classifiers at lower levels in the hierarchy are initialized using parameters of the parent classifier and fine-tuned on the child category classification task. In HTrans, we use a Gated Recurrent Unit (GRU)-based deep learning architecture coupled with attention. Compared to binary classifiers trained from scratch, our HTrans approach results in significant improvements of 1% on micro-F1 and 3% on macro-F1 on the RCV1 dataset. Our experiments also show that binary classifiers trained from scratch are significantly better than single multi-label models.", "keyphrases": ["multi-label text classification", "htrans", "low level", "transfer parameter", "parent model"]} +{"id": "sugawara-etal-2018-makes", "title": "What Makes Reading Comprehension Questions Easier?", "abstract": "A challenge in creating a dataset for machine reading comprehension (MRC) is to collect questions that require a sophisticated understanding of language to answer beyond using superficial cues. In this work, we investigate what makes questions easier across recent 12 MRC datasets with three question styles (answer extraction, description, and multiple choice). We propose to employ simple heuristics to split each dataset into easy and hard subsets and examine the performance of two baseline models for each of the subsets. We then manually annotate questions sampled from each subset with both validity and requisite reasoning skills to investigate which skills explain the difference between easy and hard questions. From this study, we observed that (i) the baseline performances for the hard subsets remarkably degrade compared to those of entire datasets, (ii) hard questions require knowledge inference and multiple-sentence reasoning in comparison with easy questions, and (iii) multiple-choice questions tend to require a broader range of reasoning skills than answer extraction and description questions. These results suggest that one might overestimate recent advances in MRC.", "keyphrases": ["superficial cue", "reasoning skill", "easy question"]} +{"id": "tsvetkov-etal-2016-polyglot", "title": "Polyglot Neural Language Models: A Case Study in Cross-Lingual Phonetic Representation Learning", "abstract": "We introduce polyglot language models, recurrent neural network models trained to predict symbol sequences in many different languages using shared representations of symbols and conditioning on typological information about the language to be predicted. We apply these to the problem of modeling phone sequences---a domain in which universal symbol inventories and cross-linguistically shared feature representations are a natural fit. Intrinsic evaluation on held-out perplexity, qualitative analysis of the learned representations, and extrinsic evaluation in two downstream applications that make use of phonetic features show (i) that polyglot models better generalize to held-out data than comparable monolingual models and (ii) that polyglot phonetic feature representations are of higher quality than those learned monolingually.", "keyphrases": ["language model", "phonetic representation learning", "polyglot language model", "machine polyglotism"]} +{"id": "zhong-etal-2017-time", "title": "Time Expression Analysis and Recognition Using Syntactic Token Types and General Heuristic Rules", "abstract": "Extracting time expressions from free text is a fundamental task for many applications. We analyze the time expressions from four datasets and find that only a small group of words are used to express time information, and the words in time expressions demonstrate similar syntactic behaviour. Based on the findings, we propose a type-based approach, named SynTime, to recognize time expressions. Specifically, we define three main syntactic token types, namely time token, modifier, and numeral, to group time-related regular expressions over tokens. On the types we design general heuristic rules to recognize time expressions. In recognition, SynTime first identifies the time tokens from raw text, then searches their surroundings for modifiers and numerals to form time segments, and finally merges the time segments to time expressions. As a light-weight rule-based tagger, SynTime runs in real time, and can be easily expanded by simply adding keywords for the text of different types and of different domains. Experiment on benchmark datasets and tweets data shows that SynTime outperforms state-of-the-art methods.", "keyphrases": ["recognition", "token type", "syntime", "time expression"]} +{"id": "el-kishky-etal-2020-ccaligned", "title": "CCAligned: A Massive Collection of Cross-Lingual Web-Document Pairs", "abstract": "Cross-lingual document alignment aims to identify pairs of documents in two distinct languages that are of comparable content or translations of each other. In this paper, we exploit the signals embedded in URLs to label web documents at scale with an average precision of 94.5% across different language pairs. We mine sixty-eight snapshots of the Common Crawl corpus and identify web document pairs that are translations of each other. We release a new web dataset consisting of over 392 million URL pairs from Common Crawl covering documents in 8144 language pairs of which 137 pairs include English. In addition to curating this massive dataset, we introduce baseline methods that leverage cross-lingual representations to identify aligned documents based on their textual content. Finally, we demonstrate the value of this parallel documents dataset through a downstream task of mining parallel sentences and measuring the quality of machine translations from models trained on this mined data. Our objective in releasing this dataset is to foster new research in cross-lingual NLP across a variety of low, medium, and high-resource languages.", "keyphrases": ["web", "average precision", "parallel sentence", "parallel data"]} +{"id": "ivanova-etal-2013-survey", "title": "Survey on parsing three dependency representations for English", "abstract": "In this paper we focus on practical issues of data representation for dependency parsing. We carry out an experimental comparison of (a) three syntactic dependency schemes; (b) three data-driven dependency parsers; and (c) the influence of two different approaches to lexical category disambiguation (aka tagging) prior to parsing. Comparing parsing accuracies in various setups, we study the interactions of these three aspects and analyze which configurations are easier to learn for a dependency parser.", "keyphrases": ["dependency representation", "state-of-the-art parser", "accuracy level", "complement", "same data set"]} +{"id": "razmara-sarkar-2013-ensemble", "title": "Ensemble Triangulation for Statistical Machine Translation", "abstract": "State-of-the-art statistical machine translation systems rely heavily on training data and insufficient training data usually results in poor translation quality. One solution to alleviate this problem is triangulation. Triangulation uses a third language as a pivot through which another sourcetarget translation system can be built. In this paper, we dynamically create multiple such triangulated systems and combine them using a novel approach calledensemble decoding. Experimental results of this approach show significant improvements in the BLEU score over the direct sourcetarget system. Our approach also outperforms a strong linear mixture baseline.", "keyphrases": ["triangulation", "pivot", "phrase coverage", "different system", "increase"]} +{"id": "bowman-dahl-2021-will", "title": "What Will it Take to Fix Benchmarking in Natural Language Understanding?", "abstract": "Evaluation for many natural language understanding (NLU) tasks is broken: Unreliable and biased systems score so highly on standard benchmarks that there is little room for researchers who develop better systems to demonstrate their improvements. The recent trend to abandon IID benchmarks in favor of adversarially-constructed, out-of-distribution test sets ensures that current models will perform poorly, but ultimately only obscures the abilities that we want our benchmarks to measure. In this position paper, we lay out four criteria that we argue NLU benchmarks should meet. We argue most current benchmarks fail at these criteria, and that adversarial data collection does not meaningfully address the causes of these failures. Instead, restoring a healthy evaluation ecosystem will require significant progress in the design of benchmark datasets, the reliability with which they are annotated, their size, and the ways they handle social bias.", "keyphrases": ["natural language understanding", "biased system score", "little room", "benchmark dataset"]} +{"id": "bhattacharja-2010-benglish", "title": "Benglish Verbs: A Case of Code-mixing in Bengali", "abstract": "In this article, we show how grammar can account for Benglish verbs, a particular type of complex predicate, which are constituted of an English word and a Bengali verb (e.g. /EksiDenT kOra/ \u2018to have an accident\u2019, /in kOra/ \u2018to get/come/put in\u2019 or /kOnfuz kOra/ \u2018to confuse\u2019). We analyze these verbs in the light of a couple of models (e.g. Kageyama, 1991; Lieber, 1992; Matsumoto, 1996) which claim that complex predicates are necessarily formed in syntax. However, Benglish verbs like /in kOra/ or /kOnfuz kOra/ are problematic for these approaches because it is unclear how preposition in or flexional verb confuse can appear as the arguments of the verb /kOra/ \u2018to do\u2019 in an underlying syntactic structure. We claim that all Benglish verbs can be satisfactorily handled in Morphology in the light of Whole Word Morphology (Ford et al., 1997 and Singh, 2006).", "keyphrases": ["bengali", "complex predicate", "english word"]} +{"id": "yanaka-etal-2019-help", "title": "HELP: A Dataset for Identifying Shortcomings of Neural Models in Monotonicity Reasoning", "abstract": "Large crowdsourced datasets are widely used for training and evaluating neural models on natural language inference (NLI). Despite these efforts, neural models have a hard time capturing logical inferences, including those licensed by phrase replacements, so-called monotonicity reasoning. Since no large dataset has been developed for monotonicity reasoning, it is still unclear whether the main obstacle is the size of datasets or the model architectures themselves. To investigate this issue, we introduce a new dataset, called HELP, for handling entailments with lexical and logical phenomena. We add it to training data for the state-of-the-art neural models and evaluate them on test sets for monotonicity phenomena. The results showed that our data augmentation improved the overall accuracy. We also find that the improvement is better on monotonicity inferences with lexical replacements than on downward inferences with disjunction and modification. This suggests that some types of inferences can be improved by our data augmentation while others are immune to it.", "keyphrases": ["monotonicity reasoning", "natural language inference", "entailment", "data augmentation"]} +{"id": "diab-bhutada-2009-verb", "title": "Verb Noun Construction MWE Token Classification", "abstract": "We address the problem of classifying multi-word expression tokens in running text. We focus our study on Verb-Noun Constructions (VNC) that vary in their idiomaticity depending on context. VNC tokens are classi\ufb01ed as either idiomatic or literal. We present a supervised learning approach to the problem. We ex-periment with different features. Our approach yields the best results to date on MWE clas-si\ufb01cation combining different linguistically motivated features, the overall performance yields an F-measure of 84.58% corresponding to an F-measure of 89.96% for idiomaticity identi\ufb01cation and classi\ufb01cation and 62.03% for literal identi\ufb01-cation and classi\ufb01cation.", "keyphrases": ["mwe", "running text", "verb-noun constructions", "vnc", "idiomatic use"]} +{"id": "choi-etal-2010-propbank-instance", "title": "Propbank Instance Annotation Guidelines Using a Dedicated Editor, Jubilee", "abstract": "This paper gives guidelines of how to annotate Propbank instances using a dedicated editor, Jubilee. Propbank is a corpus in which the arguments of each verb predicate are annotated with their semantic roles in relation to the predicate. Propbank annotation also requires the choice of a sense ID for each predicate. Jubilee facilitates this annotation process by displaying several resources of syntactic and semantic information simultaneously: the syntactic structure of a sentence is displayed in the main frame, the available senses with their corresponding argument structures are displayed in another frame, all available Propbank arguments are displayed for the annotators choice, and example annotations of each sense of the predicate are available to the annotator for viewing. Easy access to each of these resources allows the annotator to quickly absorb and apply the necessary syntactic and semantic information pertinent to each predicate for consistent and efficient annotation. Jubilee has been successfully adapted to many Propbank projects in several universities. The tool runs platform independently, is light enough to run as an X11 application and supports multiple languages such as Arabic, Chinese, English, Hindi and Korean.", "keyphrases": ["annotator", "dedicated editor", "predicate"]} +{"id": "tanvir-etal-2021-estbert", "title": "EstBERT: A Pretrained Language-Specific BERT for Estonian", "abstract": "This paper presents EstBERT, a large pretrained transformer-based language-specific BERT model for Estonian. Recent work has evaluated multilingual BERT models on Estonian tasks and found them to outperform the baselines. Still, based on existing studies on other languages, a language-specific BERT model is expected to improve over the multilingual ones. We first describe the EstBERT pretraining process and then present the models' results based on the finetuned EstBERT for multiple NLP tasks, including POS and morphological tagging, dependency parsing, named entity recognition and text classification. The evaluation results show that the models based on EstBERT outperform multilingual BERT models on five tasks out of seven, providing further evidence towards a view that training language-specific BERT models are still useful, even when multilingual models are available.", "keyphrases": ["estonian", "bert model", "monolingual model"]} +{"id": "yahya-etal-2012-natural", "title": "Natural Language Questions for the Web of Data", "abstract": "The Linked Data initiative comprises structured databases in the Semantic-Web data model RDF. Exploring this heterogeneous data by structured query languages is tedious and error-prone even for skilled users. To ease the task, this paper presents a methodology for translating natural language questions into structured SPARQL queries over linked-data sources. \n \nOur method is based on an integer linear program to solve several disambiguation tasks jointly: the segmentation of questions into phrases; the mapping of phrases to semantic entities, classes, and relations; and the construction of SPARQL triple patterns. Our solution harnesses the rich type system provided by knowledge bases in the web of linked data, to constrain our semantic-coherence objective function. We present experiments on both the question translation and the resulting query answering.", "keyphrases": ["web", "different schema", "knowledge basis"]} +{"id": "kolak-resnik-2005-ocr", "title": "OCR Post-Processing for Low Density Languages", "abstract": "We present a lexicon-free post-processing method for optical character recognition (OCR), implemented using weighted finite state machines. We evaluate the technique in a number of scenarios relevant for natural language processing, including creation of new OCR capabilities for low density languages, improvement of OCR performance for a native commercial system, acquisition of knowledge from a foreign-language dictionary, creation of a parallel text, and machine translation from OCR output.", "keyphrases": ["low density language", "finite state machine", "sequence-to-sequence model", "cebuano"]} +{"id": "bae-etal-2019-summary", "title": "Summary Level Training of Sentence Rewriting for Abstractive Summarization", "abstract": "As an attempt to combine extractive and abstractive summarization, Sentence Rewriting models adopt the strategy of extracting salient sentences from a document first and then paraphrasing the selected ones to generate a summary. However, the existing models in this framework mostly rely on sentence-level rewards or suboptimal labels, causing a mismatch between a training objective and evaluation metric. In this paper, we present a novel training signal that directly maximizes summary-level ROUGE scores through reinforcement learning. In addition, we incorporate BERT into our model, making good use of its ability on natural language understanding. In extensive experiments, we show that a combination of our proposed model and training procedure obtains new state-of-the-art performance on both CNN/Daily Mail and New York Times datasets. We also demonstrate that it generalizes better on DUC-2002 test set.", "keyphrases": ["abstractive summarization", "scoring", "extract-then-rewrite architecture", "language model"]} +{"id": "jagannatha-yu-2016-bidirectional", "title": "Bidirectional RNN for Medical Event Detection in Electronic Health Records", "abstract": "Sequence labeling for extraction of medical events and their attributes from unstructured text in Electronic Health Record (EHR) notes is a key step towards semantic understanding of EHRs. It has important applications in health informatics including pharmacovigilance and drug surveillance. The state of the art supervised machine learning models in this domain are based on Conditional Random Fields (CRFs) with features calculated from fixed context windows. In this application, we explored recurrent neural network frameworks and show that they significantly out-performed the CRF models.", "keyphrases": ["rnn", "medical event", "unstructured text"]} +{"id": "yong-torrent-2020-semi", "title": "Semi-supervised Deep Embedded Clustering with Anomaly Detection for Semantic Frame Induction", "abstract": "Although FrameNet is recognized as one of the most fine-grained lexical databases, its coverage of lexical units is still limited. To tackle this issue, we propose a two-step frame induction process: for a set of lexical units not yet present in Berkeley FrameNet data release 1.7, first remove those that cannot fit into any existing semantic frame in FrameNet; then, assign the remaining lexical units to their correct frames. We also present the Semi-supervised Deep Embedded Clustering with Anomaly Detection (SDEC-AD) model\u2014an algorithm that maps high-dimensional contextualized vector representations of lexical units to a low-dimensional latent space for better frame prediction and uses reconstruction error to identify lexical units that cannot evoke frames in FrameNet. SDEC-AD outperforms the state-of-the-art methods in both steps of the frame induction process. Empirical results also show that definitions provide contextual information for representing and characterizing the frame membership of lexical units.", "keyphrases": ["anomaly detection", "low-dimensional latent space", "well frame prediction"]} +{"id": "uchendu-etal-2021-turingbench-benchmark", "title": "TURINGBENCH: A Benchmark Environment for Turing Test in the Age of Neural Text Generation", "abstract": "Recent progress in generative language models has enabled machines to generate astonishingly realistic texts. While there are many legitimate applications of such models, there is also a rising need to distinguish machine-generated texts from human-written ones (e.g., fake news detection). However, to our best knowledge, there is currently no benchmark environment with datasets and tasks to systematically study the so-called \u201dTuring Test\u201d problem for neural text generation methods. In this work, we present the TURINGBENCH benchmark environment, which is comprised of (1) a dataset with 200K human- or machine-generated samples across 20 labels Human, GPT-1, GPT-2_small, GPT-2_medium, GPT-2_large,GPT-2_xl, GPT-2_PyTorch, GPT-3, GROVER_base, GROVER_large, GROVER_mega, CTRL, XLM, XLNET_base, XLNET_large, FAIR_wmt19, FAIR_wmt20, TRANSFORMER_XL, PPLM_distil, PPLM_gpt2, (2) two benchmark tasks\u2013i.e., Turing Test (TT) and Authorship Attribution (AA), and (3) a website with leaderboards. Our preliminary experimental results using TURINGBENCH show that GPT-3 and FAIR_wmt20 are the current winners, among all language models tested, in generating the most human-like indistinguishable texts with the lowest F1 score by five state-of-the-art TT detection models. The TURINGBENCH is available at: ", "keyphrases": ["language model", "machine-generated text", "turingbench"]} +{"id": "agarwal-etal-2009-contextual", "title": "Contextual Phrase-Level Polarity Analysis Using Lexical Affect Scoring and Syntactic N-Grams", "abstract": "We present a classifier to predict contextual polarity of subjective phrases in a sentence. Our approach features lexical scoring derived from the Dictionary of Affect in Language (DAL) and extended through WordNet, allowing us to automatically score the vast majority of words in our input avoiding the need for manual labeling. We augment lexical scoring with n-gram analysis to capture the effect of context. We combine DAL scores with syntactic constituents and then extract n-grams of constituents from all sentences. We also use the polarity of all syntactic constituents within the sentence as features. Our results show significant improvement over a majority class baseline as well as a more difficult baseline consisting of lexical n-grams.", "keyphrases": ["polarity", "n-gram", "subjectivity analysis"]} +{"id": "zhang-etal-2007-chunk", "title": "Chunk-Level Reordering of Source Language Sentences with Automatically Learned Rules for Statistical Machine Translation", "abstract": "In this paper, we describe a source-side reordering method based on syntactic chunks for phrase-based statistical machine translation. First, we shallow parse the source language sentences. Then, reordering rules are automatically learned from source-side chunks and word alignments. During translation, the rules are used to generate a reordering lattice for each sentence. Experimental results are reported for a Chinese-to-English task, showing an improvement of 0.5%--1.8% BLEU score absolute on various test sets and better computational efficiency than reordering during decoding. The experiments also show that the reordering at the chunk-level performs better than at the POS-level.", "keyphrases": ["statistical machine translation", "chunk level", "source-reordering lattice", "smt system", "well computational efficiency"]} +{"id": "goldberg-zhu-2006-seeing", "title": "Seeing stars when there aren't many stars: Graph-based semi-supervised learning for sentiment categorization", "abstract": "We present a graph-based semi-supervised learning algorithm to address the sentiment analysis task of rating inference. Given a set of documents (e.g., movie reviews) and accompanying ratings (e.g., \"4 stars\"), the task calls for inferring numerical ratings for unlabeled documents based on the perceived sentiment expressed by their text. In particular, we are interested in the situation where labeled data is scarce. We place this task in the semi-supervised setting and demonstrate that considering unlabeled reviews in the learning process can improve rating-inference performance. We do so by creating a graph on both labeled and unlabeled data to encode certain assumptions for this task. We then solve an optimization problem to obtain a smooth rating function over the whole graph. When only limited labeled data is available, this method achieves significantly better predictive accuracy over other methods that ignore the unlabeled examples during training.", "keyphrases": ["star", "semi-supervised learning algorithm", "rating inference", "sentiment classification", "graph-based method"]} +{"id": "oya-etal-2014-template", "title": "A Template-based Abstractive Meeting Summarization: Leveraging Summary and Source Text Relationships", "abstract": "In this paper, we present an automatic abstractive summarization system of meeting conversations. Our system extends a novel multi-sentence fusion algorithm in order to generate abstract templates. It also leverages the relationship between summaries and their source meeting transcripts to select the best templates for generating abstractive summaries of meetings. Our manual and automatic evaluation results demonstrate the success of our system in achieving higher scores both in readability and informativeness.", "keyphrases": ["meeting", "abstractive summarization system", "multi-sentence fusion algorithm", "readability"]} +{"id": "chou-etal-2006-semi", "title": "A Semi-Automatic Method for Annotating a Biomedical Proposition Bank", "abstract": "In this paper, we present a semiautomatic approach for annotating semantic information in biomedical texts. The information is used to construct a biomedical proposition bank called BioProp. Like PropBank in the newswire domain, BioProp contains annotations of predicate argument structures and semantic roles in a treebank schema. To construct BioProp, a semantic role labeling (SRL) system trained on PropBank is used to annotate BioProp. Incorrect tagging results are then corrected by human annotators. To suit the needs in the biomedical domain, we modify the PropBank annotation guidelines and characterize semantic roles as components of biological events. The method can substantially reduce annotation efforts, and we introduce a measure of an upper bound for the saving of annotation efforts. Thus far, the method has been applied experimentally to a 4,389-sentence tree-bank corpus for the construction of BioProp. Inter-annotator agreement measured by kappa statistic reaches .95 for combined decision of role identification and classification when all argument labels are considered. In addition, we show that, when trained on BioProp, our biomedical SRL system called BIOSMILE achieves an F-score of 87%.", "keyphrases": ["annotator", "biomedical proposition bank", "propbank", "predicate argument structure"]} +{"id": "vexler-minkov-2016-multi", "title": "Multi-source named entity typing for social media", "abstract": "Typed lexicons that encode knowledge about the semantic types of an entity name, e.g., that \u2018Paris\u2019 denotes a geolocation, product, or person, have proven useful for many text processing tasks. While lexicons may be derived from large-scale knowledge bases (KBs), KBs are inherently imperfect, in particular they lack coverage with respect to long tail entity names. We infer the types of a given entity name using multi-source learning, considering information obtained by alignment to the Freebase knowledge base, Web-scale distributional patterns, and global semi-structured contexts retrieved by means of Web search. Evaluation in the challenging domain of social media shows that multi-source learning improves performance compared with rule-based KB lookups, boosting typing results for some semantic categories.", "keyphrases": ["multi-source learning", "web search", "social medium"]} +{"id": "galley-etal-2015-deltableu", "title": "deltaBLEU: A Discriminative Metric for Generation Tasks with Intrinsically Diverse Targets", "abstract": "We introduce Discriminative BLEU (\u2206BLEU), a novel metric for intrinsic evaluation of generated text in tasks that admit a diverse range of possible outputs. Reference strings are scored for quality by human raters on a scale of [\u22121, +1] to weight multi-reference BLEU. In tasks involving generation of conversational responses, \u2206BLEU correlates reasonably with human judgments and outperforms sentence-level and IBM BLEU in terms of both Spearman\u2019s \u03c1 and Kendall\u2019s \u03c4 .", "keyphrases": ["discriminative bleu", "human rater", "deltableu", "multiple reference"]} +{"id": "cheng-etal-2015-open", "title": "Open-Domain Name Error Detection using a Multi-Task RNN", "abstract": "Out-of-vocabulary name errors in speech recognition create significant problems for downstream language processing, but the fact that they are rare poses challenges for automatic detection, particularly in an open-domain scenario. To address this problem, a multi-task recurrent neural network language model for sentence-level name detection is proposed for use in combination with out-of-vocabulary word detection. The sentence-level model is also effective for leveraging external text data. Experiments show a 26% improvement in name-error detection F-score over a system using n-gram lexical features.", "keyphrases": ["name error", "multi-task rnn", "network language model", "next word"]} +{"id": "djokic-etal-2019-modeling", "title": "Modeling Affirmative and Negated Action Processing in the Brain with Lexical and Compositional Semantic Models", "abstract": "Recent work shows that distributional semantic models can be used to decode patterns of brain activity associated with individual words and sentence meanings. However, it is yet unclear to what extent such models can be used to study and decode fMRI patterns associated with specific aspects of semantic composition such as the negation function. In this paper, we apply lexical and compositional semantic models to decode fMRI patterns associated with negated and affirmative sentences containing hand-action verbs. Our results show reduced decoding (correlation) of sentences where the verb is in the negated context, as compared to the affirmative one, within brain regions implicated in action-semantic processing. This supports behavioral and brain imaging studies, suggesting that negation involves reduced access to aspects of the affirmative mental representation. The results pave the way for testing alternate semantic models of negation against human semantic processing in the brain.", "keyphrases": ["negation", "brain", "affirmative mental representation"]} +{"id": "francois-2009-combining", "title": "Combining a Statistical Language Model with Logistic Regression to Predict the Lexical and Syntactic Difficulty of Texts for FFL", "abstract": "Reading is known to be an essential task in language learning, but finding the appropriate text for every learner is far from easy. In this context, automatic procedures can support the teacher's work. Some tools exist for English, but at present there are none for French as a foreign language (FFL). In this paper, we present an original approach to assessing the readability of FFL texts using NLP techniques and extracts from FFL textbooks as our corpus. Two logistic regression models based on lexical and grammatical features are explored and give quite good predictions on new texts. The results shows a slight superiority for multinomial logistic regression over the proportional odds model.", "keyphrases": ["logistic regression", "foreign language", "readability"]} +{"id": "horacek-2003-best", "title": "A Best-First Search Algorithm for Generating Referring Expressions", "abstract": "Existing algorithms for generating referential descriptions to sets of objects have serious deficits: while incremental approaches may produce ambiguous and redundant expressions, exhaustive searches are computationally expensive. Mediating between these extreme control regimes, we propose a best-first searching algorithm for uniquely identifying sets of objects. We incorporate linguistically motivated preferences and several techniques to cut down the search space. Preliminary results show the effectiveness of the new algorithm.", "keyphrases": ["object", "deficit", "incremental approach", "resp"]} +{"id": "nenkova-etal-2008-high", "title": "High Frequency Word Entrainment in Spoken Dialogue", "abstract": "Cognitive theories of dialogue hold that entrainment, the automatic alignment between dialogue partners at many levels of linguistic representation, is key to facilitating both production and comprehension in dialogue. In this paper we examine novel types of entrainment in two corpora---Switchboard and the Columbia Games corpus. We examine entrainment in use of high-frequency words (the most common words in the corpus), and its association with dialogue naturalness and flow, as well as with task success. Our results show that such entrainment is predictive of the perceived naturalness of dialogues and is significantly correlated with task success; in overall interaction flow, higher degrees of entrainment are associated with more overlaps and fewer interruptions.", "keyphrases": ["frequency word entrainment", "dialogue naturalness", "more overlap", "engagement", "social variable"]} +{"id": "shaw-etal-2019-generating", "title": "Generating Logical Forms from Graph Representations of Text and Entities", "abstract": "Structured information about entities is critical for many semantic parsing tasks. We present an approach that uses a Graph Neural Network (GNN) architecture to incorporate information about relevant entities and their relations during parsing. Combined with a decoder copy mechanism, this approach provides a conceptually simple mechanism to generate logical forms with entities. We demonstrate that this approach is competitive with the state-of-the-art across several tasks without pre-training, and outperforms existing approaches when combined with BERT pre-training.", "keyphrases": ["semantic parsing", "graph neural network", "gnn", "relevant entity"]} +{"id": "benamara-etal-2017-evaluative", "title": "Evaluative Language Beyond Bags of Words: Linguistic Insights and Computational Applications", "abstract": "The study of evaluation, affect, and subjectivity is a multidisciplinary enterprise, including sociology, psychology, economics, linguistics, and computer science. A number of excellent computational linguistics and linguistic surveys of the field exist. Most surveys, however, do not bring the two disciplines together to show how methods from linguistics can benefit computational sentiment analysis systems. In this survey, we show how incorporating linguistic insights, discourse information, and other contextual phenomena, in combination with the statistical exploitation of data, can result in an improvement over approaches that take advantage of only one of these perspectives. We first provide a comprehensive introduction to evaluative language from both a linguistic and computational perspective. We then argue that the standard computational definition of the concept of evaluative language neglects the dynamic nature of evaluation, in which the interpretation of a given evaluation depends on linguistic and extra-linguistic contextual factors. We thus propose a dynamic definition that incorporates update functions. The update functions allow for different contextual aspects to be incorporated into the calculation of sentiment for evaluative words or expressions, and can be applied at all levels of discourse. We explore each level and highlight which linguistic aspects contribute to accurate extraction of sentiment. We end the review by outlining what we believe the future directions of sentiment analysis are, and the role that discourse and contextual information need to play.", "keyphrases": ["linguistic insight", "subjectivity", "computer science", "sentiment analysis"]} +{"id": "kirov-etal-2017-rich", "title": "A Rich Morphological Tagger for English: Exploring the Cross-Linguistic Tradeoff Between Morphology and Syntax", "abstract": "A traditional claim in linguistics is that all human languages are equally expressive\u2014able to convey the same wide range of meanings. Morphologically rich languages, such as Czech, rely on overt inflectional and derivational morphology to convey many semantic distinctions. Languages with comparatively limited morphology, such as English, should be able to accomplish the same using a combination of syntactic and contextual cues. We capitalize on this idea by training a tagger for English that uses syntactic features obtained by automatic parsing to recover complex morphological tags projected from Czech. The high accuracy of the resulting model provides quantitative confirmation of the underlying linguistic hypothesis of equal expressivity, and bodes well for future improvements in downstream HLT tasks including machine translation.", "keyphrases": ["morphology", "tagger", "rich language"]} +{"id": "echihabi-marcu-2003-noisy", "title": "A Noisy-Channel Approach to Question Answering", "abstract": "We introduce a probabilistic noisy-channel model for question answering and we show how it can be exploited in the context of an end-to-end QA system. Our noisy-channel system outperforms a state-of-the-art rule-based QA system that uses similar resources. We also show that the model we propose is flexible enough to accommodate within one mathematical framework many QA-specific resources and techniques, which range from the exploitation of WordNet, structured, and semi-structured databases to reasoning, and paraphrasing.", "keyphrases": ["noisy-channel approach", "question generation", "machine translation", "ibm model", "intermediate step"]} +{"id": "kennedy-szpakowicz-2008-evaluating", "title": "Evaluating Roget`s Thesauri", "abstract": "Roget\u2019s Thesaurus has gone through many revisions since it was first published 150 years ago. But how do these revisions affect Roget\u2019s usefulness for NLP? We examine the differences in content between the 1911 and 1987 versions of Roget\u2019s, and we test both versions with each other and WordNet on problems such as synonym identification and word relatedness. We also present a novel method for measuring sentence relatedness that can be implemented in either version of Roget\u2019s or in WordNet. Although the 1987 version of the Thesaurus is better, we show that the 1911 version performs surprisingly well and that often the differences between the versions of Roget\u2019s and WordNet are not statistically significant. We hope that this work will encourage others to use the 1911 Roget\u2019s Thesaurus in NLP tasks.", "keyphrases": ["wordnet", "pseudo-word-sense disambiguation", "cohesion", "alternative source"]} +{"id": "huang-etal-2021-shot", "title": "Few-Shot Named Entity Recognition: An Empirical Baseline Study", "abstract": "This paper presents an empirical study to efficiently build named entity recognition (NER) systems when a small amount of in-domain labeled data is available. Based upon recent Transformer-based self-supervised pre-trained language models (PLMs), we investigate three orthogonal schemes to improve model generalization ability in few-shot settings: (1) meta-learning to construct prototypes for different entity types, (2) task-specific supervised pre-training on noisy web data to extract entity-related representations and (3) self-training to leverage unlabeled in-domain data. On 10 public NER datasets, we perform extensive empirical comparisons over the proposed schemes and their combinations with various proportions of labeled data, our experiments show that (i)in the few-shot learning setting, the proposed NER schemes significantly improve or outperform the commonly used baseline, a PLM-based linear classifier fine-tuned using domain labels. (ii) We create new state-of-the-art results on both few-shot and training-free settings compared with existing methods.", "keyphrases": ["entity recognition", "scheme", "model generalization ability", "in-domain data"]} +{"id": "ethayarajh-etal-2019-towards", "title": "Towards Understanding Linear Word Analogies", "abstract": "A surprising property of word vectors is that word analogies can often be solved with vector arithmetic. However, it is unclear why arithmetic operators correspond to non-linear embedding models such as skip-gram with negative sampling (SGNS). We provide a formal explanation of this phenomenon without making the strong assumptions that past theories have made about the vector space and word distribution. Our theory has several implications. Past work has conjectured that linear substructures exist in vector spaces because relations can be represented as ratios; we prove that this holds for SGNS. We provide novel justification for the addition of SGNS word vectors by showing that it automatically down-weights the more frequent word, as weighting schemes do ad hoc. Lastly, we offer an information theoretic interpretation of Euclidean distance in vector spaces, justifying its use in capturing word dissimilarity.", "keyphrases": ["sgns", "explanation", "word pair", "co-occurrence"]} +{"id": "ive-etal-2016-limsis", "title": "LIMSI's Contribution to the WMT'16 Biomedical Translation Task", "abstract": "The article describes LIMSI\u2019s submission to the first WMT\u201916 shared biomedical translation task, focusing on the sole English-French translation direction. Our main submission is the output of a MOSES-based statistical machine translation (SMT) system, rescored with Structured OUtput Layer (SOUL) neural network models. We also present an attempt to circumvent syntactic complexity: our proposal combines the outputs of PBSMT systems trained either to translate entire source sentences or specific syntactic constructs extracted from those sentences. The approach is implemented using Confusion Network (CN) decoding. The quality of the combined output is comparable to the quality of our main system.", "keyphrases": ["limsi", "re-ranking model", "vocabulary", "moses"]} +{"id": "baumann-etal-2009-assessing", "title": "Assessing and Improving the Performance of Speech Recognition for Incremental Systems", "abstract": "In incremental spoken dialogue systems, partial hypotheses about what was said are required even while the utterance is still ongoing. We define measures for evaluating the quality of incremental ASR components with respect to the relative correctness of the partial hypotheses compared to hypotheses that can optimize over the complete input, the timing of hypothesis formation relative to the portion of the input they are about, and hypothesis stability, defined as the number of times they are revised. We show that simple incremental post-processing can improve stability dramatically, at the cost of timeliness (from 90 % of edits of hypotheses being spurious down to 10 % at a lag of 320 ms). The measures are not independent, and we show how system designers can find a desired operating point for their ASR. To our knowledge, we are the first to suggest and examine a variety of measures for assessing incremental ASR and improve performance on this basis.", "keyphrases": ["speech recognition", "spoken dialogue system", "asr", "incremental processing"]} +{"id": "voita-etal-2021-analyzing", "title": "Analyzing the Source and Target Contributions to Predictions in Neural Machine Translation", "abstract": "In Neural Machine Translation (and, more generally, conditional language modeling), the generation of a target token is influenced by two types of context: the source and the prefix of the target sequence. While many attempts to understand the internal workings of NMT models have been made, none of them explicitly evaluates relative source and target contributions to a generation decision. We argue that this relative contribution can be evaluated by adopting a variant of Layerwise Relevance Propagation (LRP). Its underlying `conservation principle' makes relevance propagation unique: differently from other methods, it evaluates not an abstract quantity reflecting token importance, but the proportion of each token's influence. We extend LRP to the Transformer and conduct an analysis of NMT models which explicitly evaluates the source and target relative contributions to the generation process. We analyze changes in these contributions when conditioning on different types of prefixes, when varying the training objective or the amount of training data, and during the training process. We find that models trained with more data tend to rely on source information more and to have more sharp token contributions; the training process is non-monotonic with several stages of different nature.", "keyphrases": ["neural machine translation", "influence", "prefix", "different type"]} +{"id": "haghighi-etal-2009-better", "title": "Better Word Alignments with Supervised ITG Models", "abstract": "This work investigates supervised word alignment methods that exploit inversion transduction grammar (ITG) constraints. We consider maximum margin and conditional likelihood objectives, including the presentation of a new normal form grammar for canonicalizing derivations. Even for non-ITG sentence pairs, we show that it is possible learn ITG alignment models by simple relaxations of structured discriminative learning objectives. For efficiency, we describe a set of pruning techniques that together allow us to align sentences two orders of magnitude faster than naive bitext CKY parsing. Finally, we introduce many-to-one block alignment features, which significantly improve our ITG models. Altogether, our method results in the best reported AER numbers for Chinese-English and a performance improvement of 1.1 BLEU over GIZA++ alignments.", "keyphrases": ["itg", "block", "word alignment community", "probability", "more attention"]} +{"id": "mitchell-etal-2010-syntactic", "title": "Syntactic and Semantic Factors in Processing Difficulty: An Integrated Measure", "abstract": "The analysis of reading times can provide insights into the processes that underlie language comprehension, with longer reading times indicating greater cognitive load. There is evidence that the language processor is highly predictive, such that prior context allows upcoming linguistic material to be anticipated. Previous work has investigated the contributions of semantic and syntactic contexts in isolation, essentially treating them as independent factors. In this paper we analyze reading times in terms of a single predictive measure which integrates a model of semantic composition with an incremental parser and a language model.", "keyphrases": ["processing difficulty", "incremental parser", "psycholinguistic", "semantic component", "coherence"]} +{"id": "sogaard-2010-inversion", "title": "Can inversion transduction grammars generate hand alignments", "abstract": "The adequacy of inversion transduction grammars (ITGs) has been widely debated, and the discussion\u2019s crux seems to be whether the search space is inclusive enough (Zens and Ney, 2003; Wellington et al., 2006; S\u00f8gaard and Wu, 2009). Parse failure rate when parses are constrained by word alignments is one metric that has been used, but no one has studied parse failure rates of the full class of ITGs on representative hand aligned corpora. It has also been noted that ITGs in Chomsky normal form induce strictly less alignments than ITGs (S\u00f8gaard and Wu, 2009). This study is the first study that directly compares parse failure rates for this subclass and the full class of ITGs.", "keyphrases": ["adequacy", "itg", "translation model", "setup"]} +{"id": "cohen-etal-2011-unsupervised", "title": "Unsupervised Structure Prediction with Non-Parallel Multilingual Guidance", "abstract": "We describe a method for prediction of linguistic structure in a language for which only unlabeled data is available, using annotated data from a set of one or more helper languages. Our approach is based on a model that locally mixes between supervised models from the helper languages. Parallel data is not used, allowing the technique to be applied even in domains where human-translated texts are unavailable. We obtain state-of-the-art performance for two tasks of structure prediction: unsupervised part-of-speech tagging and unsupervised dependency parsing.", "keyphrases": ["non-parallel multilingual guidance", "unlabeled data", "helper language", "part-of-speech tagging", "source language"]} +{"id": "bruni-etal-2012-distributional", "title": "Distributional Semantics in Technicolor", "abstract": "Our research aims at building computational models of word meaning that are perceptually grounded. Using computer vision techniques, we build visual and multimodal distributional models and compare them to standard textual models. Our results show that, while visual models with state-of-the-art computer vision techniques perform worse than textual models in general tasks (accounting for semantic relatedness), they are as good or better models of the meaning of words with visual correlates such as color terms, even in a nontrivial task that involves nonliteral uses of such words. Moreover, we show that visual and textual information are tapping on different aspects of meaning, and indeed combining them in multimodal models often improves performance.", "keyphrases": ["relatedness", "color term", "men", "vector concatenation"]} +{"id": "kisselew-etal-2016-predicting", "title": "Predicting the Direction of Derivation in English Conversion", "abstract": "Conversion is a word formation operation that changes the grammatical category of a word in the absence of overt morphology. Conversion is extremely productive in English (e.g., tunnel, talk). This paper investigates whether distributional information can be used to predict the diachronic direction of conversion for homophonous noun\u2010verb pairs. We aim to predict, for example, that tunnel was used as a noun prior to its use as a verb. We test two hypotheses: (1) that derived forms are less frequent than their bases, and (2) that derived forms are more semantically specific than their bases, as approximated by information theoretic measures. We find that hypothesis (1) holds for N-to-V conversion, while hypothesis (2) holds for V-to-N conversion. We achieve the best overall account of the historical data by taking both frequency and semantic specificity into account. These results provide a new perspective on linguistic theories regarding the semantic specificity of derivational morphemes, and on the morphosyntactic status of conversion.", "keyphrases": ["direction", "conversion", "noun"]} +{"id": "srivastava-singh-2021-hinge", "title": "HinGE: A Dataset for Generation and Evaluation of Code-Mixed Hinglish Text", "abstract": "Text generation is a highly active area of research in the computational linguistic community. The evaluation of the generated text is a challenging task and multiple theories and metrics have been proposed over the years. Unfortunately, text generation and evaluation are relatively understudied due to the scarcity of high-quality resources in code-mixed languages where the words and phrases from multiple languages are mixed in a single utterance of text and speech. To address this challenge, we present a corpus (HinGE) for a widely popular code-mixed language Hinglish (code-mixing of Hindi and English languages). HinGE has Hinglish sentences generated by humans as well as two rule-based algorithms corresponding to the parallel Hindi-English sentences. In addition, we demonstrate the in- efficacy of widely-used evaluation metrics on the code-mixed data. The HinGE dataset will facilitate the progress of natural language generation research in code-mixed languages.", "keyphrases": ["efficacy", "evaluation metric", "hinge"]} +{"id": "mcintosh-2010-unsupervised", "title": "Unsupervised Discovery of Negative Categories in Lexicon Bootstrapping", "abstract": "Multi-category bootstrapping algorithms were developed to reduce semantic drift. By extracting multiple semantic lexicons simultaneously, a category's search space may be restricted. The best results have been achieved through reliance on manually crafted negative categories. Unfortunately, identifying these categories is non-trivial, and their use shifts the unsupervised bootstrapping paradigm towards a supervised framework. \n \nWe present NEG-FINDER, the first approach for discovering negative categories automatically. NEG-FINDER exploits unsupervised term clustering to generate multiple negative categories during bootstrapping. Our algorithm effectively removes the necessity of manual intervention and formulation of negative categories, with performance closely approaching that obtained using negative categories defined by a domain expert.", "keyphrases": ["negative category", "bootstrapping", "semantic drift", "iterative process", "limitation"]} +{"id": "engonopoulos-etal-2013-predicting", "title": "Predicting the Resolution of Referring Expressions from User Behavior", "abstract": "We present a statistical model for predicting how the user of an interactive, situated NLP system resolved a referring expression. The model makes an initial prediction based on the meaning of the utterance, and revises it continuously based on the user\u2019s behavior. The combined model outperforms its components in predicting reference resolution and when to give feedback.", "keyphrases": ["resolution", "log-linear model", "semantic model"]} +{"id": "zhou-etal-2021-challenges", "title": "Challenges in Automated Debiasing for Toxic Language Detection", "abstract": "Biased associations have been a challenge in the development of classifiers for detecting toxic language, hindering both fairness and accuracy. As potential solutions, we investigate recently introduced debiasing methods for text classification datasets and models, as applied to toxic language detection. Our focus is on lexical (e.g., swear words, slurs, identity mentions) and dialectal markers (specifically African American English). Our comprehensive experiments establish that existing methods are limited in their ability to prevent biased behavior in current toxicity detectors. We then propose an automatic, dialect-aware data correction method, as a proof-of-concept. Despite the use of synthetic labels, this method reduces dialectal associations with toxicity. Overall, our findings show that debiasing a model trained on biased toxic language data is not as effective as simply relabeling the data to remove existing biases.", "keyphrases": ["toxic language detection", "debiasing method", "offensive content", "hate speech dataset"]} +{"id": "liu-etal-2012-tag", "title": "Tag Dispatch Model with Social Network Regularization for Microblog User Tag Suggestion", "abstract": "Microblog is a popular Web 2.0 service which reserves rich information about Web users. In a microblog service, it is a simple and effective way to annotate tags for users to represent their interests and attributes. The attributes and interests of a microblog user usually hide behind the text and network information of the user. In this paper, we propose a probabilistic model, Network-Regularized Tag Dispatch Model (NTDM), for microblog user tag suggestion. NTDM models the semantic relations between words in user descriptions and tags, and takes the social network structure as regularization. Experiments on a real-world dataset demonstrate the effectiveness and efficiency of NTDM compared to other baseline methods.", "keyphrases": ["microblog", "social network structure", "tag dispatch model"]} +{"id": "ive-etal-2018-deepquest", "title": "deepQuest: A Framework for Neural-based Quality Estimation", "abstract": "Predicting Machine Translation (MT) quality can help in many practical tasks such as MT post-editing. The performance of Quality Estimation (QE) methods has drastically improved recently with the introduction of neural approaches to the problem. However, thus far neural approaches have only been designed for word and sentence-level prediction. We present a neural framework that is able to accommodate neural QE approaches at these fine-grained levels and generalize them to the level of documents. We test the framework with two sentence-level neural QE approaches: a state of the art approach that requires extensive pre-training, and a new light-weight approach that we propose, which employs basic encoders. Our approach is significantly faster and yields performance improvements for a range of document-level quality estimation tasks. To our knowledge, this is the first neural architecture for document-level QE. In addition, for the first time we apply QE models to the output of both statistical and neural MT systems for a series of European languages and highlight the new challenges resulting from the use of neural MT.", "keyphrases": ["quality estimation", "different level", "translation quality", "sentence level"]} +{"id": "bamman-etal-2019-annotated", "title": "An annotated dataset of literary entities", "abstract": "We present a new dataset comprised of 210,532 tokens evenly drawn from 100 different English-language literary texts annotated for ACE entity categories (person, location, geo-political entity, facility, organization, and vehicle). These categories include non-named entities (such as \u201cthe boy\u201d, \u201cthe kitchen\u201d) and nested structure (such as [[the cook]'s sister]). In contrast to existing datasets built primarily on news (focused on geo-political entities and organizations), literary texts offer strikingly different distributions of entity categories, with much stronger emphasis on people and description of settings. We present empirical results demonstrating the performance of nested entity recognition models in this domain; training natively on in-domain literary data yields an improvement of over 20 absolute points in F-score (from 45.7 to 68.3), and mitigates a disparate impact in performance for male and female entities present in models trained on news data.", "keyphrases": ["literary entity", "ace entity category", "person", "geo-political entity", "organization"]} +{"id": "sha-2020-gradient", "title": "Gradient-guided Unsupervised Lexically Constrained Text Generation", "abstract": "Lexically constrained generation requires the target sentence to satisfy some lexical constraints, such as containing some specific words or being the paraphrase to a given sentence, which is very important in many real-world natural language generation applications. Previous works usually apply beam-search-based methods or stochastic searching methods to lexically-constrained generation. However, when the search space is too large, beam-search-based methods always fail to find the constrained optimal solution. At the same time, stochastic search methods always cost too many steps to find the correct optimization direction. In this paper, we propose a novel method G2LC to solve the lexically-constrained generation as an unsupervised gradient-guided optimization problem. We propose a differentiable objective function and use the gradient to help determine which position in the sequence should be changed (deleted or inserted/replaced by another word). The word updating process of the inserted/replaced word also benefits from the guidance of gradient. Besides, our method is free of parallel data training, which is flexible to be used in the inference stage of any pre-trained generation model. We apply G2LC to two generation tasks: keyword-to-sentence generation and unsupervised paraphrase generation. The experiment results show that our method achieves state-of-the-art compared to previous lexically-constrained methods.", "keyphrases": ["paraphrase", "search space", "g2lc", "gradient", "generation task"]} +{"id": "wang-manning-2014-cross", "title": "Cross-lingual Projected Expectation Regularization for Weakly Supervised Learning", "abstract": "We consider a multilingual weakly supervised learning scenario where knowledge from annotated corpora in a resource-rich language is transferred via bitext to guide the learning in other languages. Past approaches project labels across bitext and use them as features or gold labels for training. We propose a new method that projects model expectations rather than labels, which facilities transfer of model uncertainty across language boundaries. We encode expectations as constraints and train a discriminative CRF model using Generalized Expectation Criteria (Mann and McCallum, 2010). Evaluated on standard Chinese-English and German-English NER datasets, our method demonstrates F1 scores of 64% and 60% when no labeled data is used. Attaining the same accuracy with supervised CRFs requires 12k and 1.5k labeled sentences. Furthermore, when combined with labeled examples, our method yields significant improvements over state-of-the-art supervised methods, achieving best reported numbers to date on Chinese OntoNotes and German CoNLL-03 datasets.", "keyphrases": ["model expectation", "language boundary", "parallel corpora", "annotation projection"]} +{"id": "foster-2010-cba", "title": "\u201ccba to check the spelling\u201d: Investigating Parser Performance on Discussion Forum Posts", "abstract": "We evaluate the Berkeley parser on text from an online discussion forum. We evaluate the parser output with and without gold tokens and spellings (using Sparseval and Parseval), and we compile a list of problematic phenomena for this domain. The Parseval f-score for a small development set is 77.56. This increases to 80.27 when we apply a set of simple transformations to the input sentences and to the Wall Street Journal (WSJ) training sections.", "keyphrases": ["spelling", "berkeley parser", "online discussion forum"]} +{"id": "volkova-etal-2014-inferring", "title": "Inferring User Political Preferences from Streaming Communications", "abstract": "Existing models for social media personal analytics assume access to thousands of messages per user, even though most users author content only sporadically over time. Given this sparsity, we: (i) leverage content from the local neighborhood of a user; (ii) evaluate batch models as a function of size and the amount of messages in various types of neighborhoods; and (iii) estimate the amount of time and tweets required for a dynamic model to predict user preferences. We show that even when limited or no selfauthored data is available, language from friend, retweet and user mention communications provide sufficient evidence for prediction. When updating models over time based on Twitter, we find that political preference can be often be predicted using roughly 100 tweets, depending on the context of user selection, where this could mean hours, or weeks, based on the author\u2019s tweeting frequency.", "keyphrases": ["political preference", "friend", "twitter user", "social medium"]} +{"id": "elhadad-sutaria-2007-mining", "title": "Mining a Lexicon of Technical Terms and Lay Equivalents", "abstract": "We present a corpus-driven method for building a lexicon of semantically equivalent pairs of technical and lay medical terms. Using a parallel corpus of abstracts of clinical studies and corresponding news stories written for a lay audience, we identify terms which are good semantic equivalents of technical terms for a lay audience. Our method relies on measures of association. Results show that, despite the small size of our corpus, a promising number of pairs are identified.", "keyphrases": ["technical term", "audience", "comparable corpora", "medical text", "paraphrase"]} +{"id": "woodsend-lapata-2010-automatic", "title": "Automatic Generation of Story Highlights", "abstract": "In this paper we present a joint content selection and compression model for single-document summarization. The model operates over a phrase-based representation of the source document which we obtain by merging information from PCFG parse trees and dependency graphs. Using an integer linear programming formulation, the model learns to select and combine phrases subject to length, coverage and grammar constraints. We evaluate the approach on the task of generating \"story highlights\"---a small number of brief, self-contained sentences that allow readers to quickly gather information on news stories. Experimental results show that the model's output is comparable to human-written highlights in terms of both grammaticality and content.", "keyphrases": ["story highlight", "content selection", "summarization", "length", "self-contained sentence"]} +{"id": "park-zhang-2003-text", "title": "Text Chunking by Combining Hand-Crafted Rules and Memory-Based Learning", "abstract": "This paper proposes a hybrid of hand-crafted rules and a machine learning method for chunking Korean. In the partially free word-order languages such as Korean and Japanese, a small number of rules dominate the performance due to their well-developed postpositions and endings. Thus, the proposed method is primarily based on the rules, and then the residual errors are corrected by adopting a memory-based machine learning method. Since the memory-based learning is an efficient method to handle exceptions in natural language processing, it is good at checking whether the estimates are exceptional cases of the rules and revising them. An evaluation of the method yields the improvement in F-score over the rules or various machine learning methods alone.", "keyphrases": ["hand-crafted rule", "memory-based learning", "machine learning method"]} +{"id": "cho-2017-strawman", "title": "Strawman: An Ensemble of Deep Bag-of-Ngrams for Sentiment Analysis", "abstract": "This paper describes a builder entry, named \u201cstrawman\u201d, to the sentence-level sentiment analysis task of the \u201cBuild It, Break It\u201d shared task of the First Workshop on Building Linguistically Generalizable NLP Systems. The goal of a builder is to provide an automated sentiment analyzer that would serve as a target for breakers whose goal is to find pairs of minimally-differing sentences that break the analyzer.", "keyphrases": ["strawman", "telugu sentiment analysis", "neutral polarity", "several source"]} +{"id": "cross-huang-2016-span", "title": "Span-Based Constituency Parsing with a Structure-Label System and Provably Optimal Dynamic Oracles", "abstract": "Parsing accuracy using efficient greedy transition systems has improved dramatically in recent years thanks to neural networks. Despite striking results in dependency parsing, however, neural models have not surpassed state-of-the-art approaches in constituency parsing. To remedy this, we introduce a new shift-reduce system whose stack contains merely sentence spans, represented by a bare minimum of LSTM features. We also design the first provably optimal dynamic oracle for constituency parsing, which runs in amortized O(1) time, compared to O(n^3) oracles for standard dependency parsing. Training with this oracle, we achieve the best F1 scores on both English and French of any parser that does not use reranking or external data.", "keyphrases": ["dynamic oracle", "stack", "constituency parser", "transition-based parser"]} +{"id": "amidei-etal-2018-rethinking", "title": "Rethinking the Agreement in Human Evaluation Tasks", "abstract": "Human evaluations are broadly thought to be more valuable the higher the inter-annotator agreement. In this paper we examine this idea. We will describe our experiments and analysis within the area of Automatic Question Generation. Our experiments show how annotators diverge in language annotation tasks due to a range of ineliminable factors. For this reason, we believe that annotation schemes for natural language generation tasks that are aimed at evaluating language quality need to be treated with great care. In particular, an unchecked focus on reduction of disagreement among annotators runs the danger of creating generation goals that reward output that is more distant from, rather than closer to, natural human-like language. We conclude the paper by suggesting a new approach to the use of the agreement metrics in natural language generation evaluation tasks.", "keyphrases": ["agreement", "language annotation task", "ineliminable factor", "factor"]} +{"id": "adolphs-etal-2008-fine", "title": "Some Fine Points of Hybrid Natural Language Parsing", "abstract": "Large-scale grammar-based parsing systems nowadays increasingly rely on independently developed, more specialized components for pre-processing their input. However, different tools make conflicting assumptions about very basic properties such as tokenization. To make linguistic annotation gathered in pre-processing available to \u0093deep\u0094 parsing, a hybrid NLP system needs to establish a coherent mapping between the two universes. Our basic assumption is that tokens are best described by attribute value matrices (AVMs) that may be arbitrarily complex. We propose a powerful resource-sensitive rewrite formalism, \u0093chart mapping\u0094, that allows us to mediate between the token descriptions delivered by shallow pre-processing components and the input expected by the grammar. We furthermore propose a novel way of unknown word treatment where all generic lexical entries are instantiated that are licensed by a particular token AVM. Again, chart mapping is used to give the grammar writer full control as to which items (e.g. native vs. generic lexical items) enter syntactic parsing. We discuss several further uses of the original idea and report on early experiences with the new machinery.", "keyphrases": ["tokenization", "syntactico-semantic analysis", "english wikipedia", "newspaper text", "entity recognition"]} +{"id": "marciniak-strube-2005-beyond", "title": "Beyond the Pipeline: Discrete Optimization in NLP", "abstract": "We present a discrete optimization model based on a linear programming formulation as an alternative to the cascade of classifiers implemented in many language processing systems. Since NLP tasks are correlated with one another, sequential processing does not guarantee optimal solutions. We apply our model in an NLG application and show that it performs better than a pipeline-based system.", "keyphrases": ["decision", "ilp approach", "co-dependent", "language processing application", "subtask"]} +{"id": "wisniewski-etal-2010-assessing", "title": "Assessing Phrase-Based Translation Models with Oracle Decoding", "abstract": "Extant Statistical Machine Translation (SMT) systems are very complex softwares, which embed multiple layers of heuristics and embark very large numbers of numerical parameters. As a result, it is difficult to analyze output translations and there is a real need for tools that could help developers to better understand the various causes of errors. \n \nIn this study, we make a step in that direction and present an attempt to evaluate the quality of the phrase-based translation model. In order to identify those translation errors that stem from deficiencies in the phrase table (PT), we propose to compute the oracle BLEU-4 score, that is the best score that a system based on this PT can achieve on a reference corpus. By casting the computation of the oracle BLEU-1 as an Integer Linear Programming (ILP) problem, we show that it is possible to efficiently compute accurate lower-bounds of this score, and report measures performed on several standard benchmarks. Various other applications of these oracle decoding techniques are also reported and discussed.", "keyphrases": ["statistical machine translation", "integer linear programming", "hypothesis", "bottleneck", "suboptimal result"]} +{"id": "gupta-etal-2018-semantic", "title": "Semantic Parsing for Technical Support Questions", "abstract": "Technical support problems are very complex. In contrast to regular web queries (that contain few keywords) or factoid questions (which are a few sentences), these problems usually include attributes like a detailed description of what is failing (symptom), steps taken in an effort to remediate the failure (activity), and sometimes a specific request or ask (intent). Automating support is the task of automatically providing answers to these problems given a corpus of solution documents. Traditional approaches to this task rely on information retrieval and are keyword based; looking for keyword overlap between the question and solution documents and ignoring these attributes. We present an approach for semantic parsing of technical questions that uses grammatical structure to extract these attributes as a baseline, and a CRF based model that can improve performance considerably in the presence of annotated data for training. We also demonstrate that combined with reasoning, these attributes help outperform retrieval baselines.", "keyphrases": ["attribute", "symptom", "request", "intent", "semantic parsing"]} +{"id": "goutte-etal-2012-impact", "title": "The Impact of Sentence Alignment Errors on Phrase-Based Machine Translation Performance", "abstract": "When parallel or comparable corpora are harvested from the web, there is typically a tradeoff between the size and quality of the data. In order to improve quality, corpus collection efforts often attempt to fix or remove misaligned sentence pairs. But, at the same time, Statistical Machine Translation (SMT) systems are widely assumed to be relatively robust to sentence alignment errors. However, there is little empirical evidence to support and characterize this robustness. This contribution investigates the impact of sentence alignment errors on a typical phrase-based SMT system. We confirm that SMT systems are highly tolerant to noise, and that performance only degrades seriously at very high noise levels. Our findings suggest that when collecting larger, noisy parallel data for training phrase-based SMT, cleaning up by trying to detect and remove incorrect alignments can actually degrade performance. Although fixing errors, when applicable, is a preferable strategy to removal, its benefits only become apparent for fairly high misalignment rates. We provide several explanations to support these findings.", "keyphrases": ["sentence alignment error", "degrade", "noisy parallel data", "different type"]} +{"id": "goyal-etal-2012-distributed", "title": "A Distributed Platform for Sanskrit Processing", "abstract": "Sanskrit, the classical language of India, presents specific challenges for computational linguistics: exact phonetic transcription in writing that obscures word boundaries, rich morphology and an enormous corpus, among others. Recent international cooperation has developed innovative solutions to these problems and significant resources for linguistic research. Solutions include efficient segmenting and tagging algorithms and dependency parsers based on constraint programming. The integration of lexical resources, text archives and linguistic software is achieved by distributed interoperable Web services. Resources include a morphological tagger and tagged corpus.", "keyphrases": ["sanskrit", "india", "digitisation", "sanskrit heritage reader"]} +{"id": "marchisio-etal-2020-unsupervised", "title": "When Does Unsupervised Machine Translation Work?", "abstract": "Despite the reported success of unsupervised machine translation (MT), the field has yet to examine the conditions under which the methods succeed and fail. We conduct an extensive empirical evaluation using dissimilar language pairs, dissimilar domains, and diverse datasets. We find that performance rapidly deteriorates when source and target corpora are from different domains, and that stochasticity during embedding training can dramatically affect downstream results. We additionally find that unsupervised MT performance declines when source and target languages use different scripts, and observe very poor performance on authentic low-resource language pairs. We advocate for extensive empirical evaluation of unsupervised MT systems to highlight failure points and encourage continued research on the most promising paradigms. We release our preprocessed dataset to encourage evaluations that stress-test systems under multiple data conditions.", "keyphrases": ["condition", "low-resource language", "distant language pair", "bleu score", "unmt"]} +{"id": "mohiuddin-etal-2021-rethinking", "title": "Rethinking Coherence Modeling: Synthetic vs. Downstream Tasks", "abstract": "Although coherence modeling has come a long way in developing novel models, their evaluation on downstream applications for which they are purportedly developed has largely been neglected. With the advancements made by neural approaches in applications such as machine translation (MT), summarization and dialog systems, the need for coherence evaluation of these tasks is now more crucial than ever. However, coherence models are typically evaluated only on synthetic tasks, which may not be representative of their performance in downstream applications. To investigate how representative the synthetic tasks are of downstream use cases, we conduct experiments on benchmarking well-known traditional and neural coherence models on synthetic sentence ordering tasks, and contrast this with their performance on three downstream applications: coherence evaluation for MT and summarization, and next utterance prediction in retrieval-based dialog. Our results demonstrate a weak correlation between the model performances in the synthetic tasks and the downstream applications, motivating alternate training and evaluation methods for coherence models.", "keyphrases": ["coherence modeling", "machine translation", "summarization", "model performance"]} +{"id": "zeng-etal-2020-counterfactual", "title": "Counterfactual Generator: A Weakly-Supervised Method for Named Entity Recognition", "abstract": "Past progress on neural models has proven that named entity recognition is no longer a problem if we have enough labeled data. However, collecting enough data and annotating them are labor-intensive, time-consuming, and expensive. In this paper, we decompose the sentence into two parts: entity and context, and rethink the relationship between them and model performance from a causal perspective. Based on this, we propose the Counterfactual Generator, which generates counterfactual examples by the interventions on the existing observational examples to enhance the original dataset. Experiments across three datasets show that our method improves the generalization ability of models under limited observational examples. Besides, we provide a theoretical foundation by using a structural causal model to explore the spurious correlations between input features and output labels. We investigate the causal effects of entity or context on model performance under both conditions: the non-augmented and the augmented. Interestingly, we find that the non-spurious correlations are more located in entity representation rather than context representation. As a result, our method eliminates part of the spurious correlations between context representation and output labels. The code is available at .", "keyphrases": ["generalization ability", "counterfactual generator", "various nlp task", "ner model", "causal theory"]} +{"id": "lopes-etal-2020-document", "title": "Document-level Neural MT: A Systematic Comparison", "abstract": "In this paper we provide a systematic comparison of existing and new document-level neural machine translation solutions. As part of this comparison, we introduce and evaluate a document-level variant of the recently proposed Star Transformer architecture. In addition to using the traditional metric BLEU, we report the accuracy of the models in handling anaphoric pronoun translation as well as coherence and cohesion using contrastive test sets. Finally, we report the results of human evaluation in terms of Multidimensional Quality Metrics (MQM) and analyse the correlation of the results obtained by the automatic metrics with human judgments.", "keyphrases": ["systematic comparison", "test set", "sentence-level baseline", "contextual information"]} +{"id": "venugopal-etal-2007-efficient", "title": "An Efficient Two-Pass Approach to Synchronous-CFG Driven Statistical MT", "abstract": "We present an efficient, novel two-pass approach to mitigate the computational impact resulting from online intersection of an n-gram language model (LM) and a probabilistic synchronous context-free grammar (PSCFG) for statistical machine translation. In first pass CYK-style decoding, we consider first-best chart item approximations, generating a hypergraph of sentence spanning target language derivations. In the second stage, we instantiate specific alternative derivations from this hypergraph, using the LM to drive this search process, recovering from search errors made in the first pass. Model search errors in our approach are comparable to those made by the state-of-the-art \u201cCube Pruning\u201d approach in (Chiang, 2007) under comparable pruning conditions evaluated on both hierarchical and syntax-based grammars.", "keyphrases": ["computational impact", "n-gram language model", "cube pruning", "smt decoder", "hypothesis recombination"]} +{"id": "chang-etal-2016-measuring", "title": "Measuring the Information Content of Financial News", "abstract": "Measuring the information content of news text is useful for decision makers in their investments since news information can influence the intrinsic values of companies. We propose a model to automatically measure the information content given news text, trained using news and corresponding cumulative abnormal returns of listed companies. Existing methods in finance literature exploit sentiment signal features, which are limited by not considering factors such as events. We address this issue by leveraging deep neural models to extract rich semantic features from news text. In particular, a novel tree-structured LSTM is used to find target-specific representations of news text given syntax structures. Empirical results show that the neural models can outperform sentiment-based models, demonstrating the effectiveness of recent NLP technology advances for computational finance.", "keyphrases": ["information content", "company", "abnormal return", "stock price prediction", "data extraction"]} +{"id": "bleicken-etal-2016-using", "title": "Using a Language Technology Infrastructure for German in order to Anonymize German Sign Language Corpus Data", "abstract": "For publishing sign language corpus data on the web, anonymization is crucial even if it is impossible to hide the visual appearance of the signers: In a small community, even vague references to third persons may be enough to identify those persons. In the case of the DGS Korpus (German Sign Language corpus) project, we want to publish data as a contribution to the cultural heritage of the sign language community while annotation of the data is still ongoing. This poses the question how well anonymization can be achieved given that no full linguistic analysis of the data is available. Basically, we combine analysis of all data that we have, including named entity recognition on translations into German. For this, we use the WebLicht language technology infrastructure. We report on the reliability of these methods in this special context and also illustrate how the anonymization of the video data is technically achieved in order to minimally disturb the viewer.", "keyphrases": ["german", "anonymization", "sign language"]} +{"id": "wan-etal-2020-improving", "title": "Improving Grammatical Error Correction with Data Augmentation by Editing Latent Representation", "abstract": "The incorporation of data augmentation method in grammatical error correction task has attracted much attention. However, existing data augmentation methods mainly apply noise to tokens, which leads to the lack of diversity of generated errors. In view of this, we propose a new data augmentation method that can apply noise to the latent representation of a sentence. By editing the latent representations of grammatical sentences, we can generate synthetic samples with various error types. Combining with some pre-defined rules, our method can greatly improve the performance and robustness of existing grammatical error correction models. We evaluate our method on public benchmarks of GEC task and it achieves the state-of-the-art performance on CoNLL-2014 and FCE benchmarks.", "keyphrases": ["data augmentation method", "noise", "error type", "seq2seq model"]} +{"id": "misra-walker-2013-topic", "title": "Topic Independent Identification of Agreement and Disagreement in Social Media Dialogue", "abstract": "Research on the structure of dialogue has been hampered for years because large dialogue corpora have not been available. This has impacted the dialogue research community's ability to develop better theories, as well as good off the shelf tools for dialogue processing. Happily, an increasing amount of information and opinion exchange occur in natural dialogue in online forums, where people share their opinions about a vast range of topics. In particular we are interested in rejection in dialogue, also called disagreement and denial, where the size of available dialogue corpora, for the first time, offers an opportunity to empirically test theoretical accounts of the expression and inference of rejection in dialogue. In this paper, we test whether topic-independent features motivated by theoretical predictions can be used to recognize rejection in online forums in a topic independent way. Our results show that our theoretically motivated features achieve 66% accuracy, an improvement over a unigram baseline of an absolute 6%.", "keyphrases": ["disagreement", "natural dialogue", "online forum", "topic-independent feature"]} +{"id": "kann-etal-2018-sentence", "title": "Sentence-Level Fluency Evaluation: References Help, But Can Be Spared!", "abstract": "Motivated by recent findings on the probabilistic modeling of acceptability judgments, we propose syntactic log-odds ratio (SLOR), a normalized language model score, as a metric for referenceless fluency evaluation of natural language generation output at the sentence level. We further introduce WPSLOR, a novel WordPiece-based version, which harnesses a more compact language model. Even though word-overlap metrics like ROUGE are computed with the help of hand-written references, our referenceless methods obtain a significantly higher correlation with human fluency scores on a benchmark dataset of compressed sentences. Finally, we present ROUGE-LM, a reference-based metric which is a natural extension of WPSLOR to the case of available references. We show that ROUGE-LM yields a significantly higher correlation with human judgments than all baseline metrics, including WPSLOR on its own.", "keyphrases": ["log-odds ratio", "language model", "wpslor", "human judgment"]} +{"id": "kawahara-kurohashi-2011-generative", "title": "Generative Modeling of Coordination by Factoring Parallelism and Selectional Preferences", "abstract": "We present a unified generative model of coordination that considers parallelism of conjuncts and selectional preferences. Parallelism of conjuncts, which frequently characterizes coordinate structures, is modeled as a synchronized generation process in the generative parser. Selectional preferences learned from a large web corpus provide an important clue for resolving the ambiguities of coordinate structures. Our experiments of Japanese dependency parsing indicate the effectiveness of our approach, particularly in the domains of newspapers and patents.", "keyphrases": ["coordination", "parallelism", "selectional preference", "dependency parsing"]} +{"id": "clement-etal-2020-pymt5", "title": "PyMT5: multi-mode translation of natural language and Python code with transformers", "abstract": "Simultaneously modeling source code and natural language has many exciting applications in automated software development and understanding. Pursuant to achieving such technology, we introduce PyMT5, the Python method text-to-text transfer transformer, which is trained to translate between all pairs of Python method feature combinations: a single model that can both predict whole methods from natural language documentation strings (docstrings) and summarize code into docstrings of any common style. We present an analysis and modeling effort of a large-scale parallel corpus of 26 million Python methods and 7.7 million method-docstring pairs, demonstrating that for docstring and method generation, PyMT5 outperforms similarly-sized auto-regressive language models (GPT2) which were English pre-trained or randomly initialized. On the CodeSearchNet test set, our best model predicts 92.1% syntactically correct method bodies, achieved a BLEU score of 8.59 for method generation and 16.3 for docstring generation (summarization), and achieved a ROUGE-L F-score of 24.8 for method generation and 36.7 for docstring generation.", "keyphrases": ["docstring", "parallel corpus", "method generation", "pymt5", "code summarization"]} +{"id": "kolomiyets-moens-2010-kul", "title": "KUL: Recognition and Normalization of Temporal Expressions", "abstract": "In this paper we describe a system for the recognition and normalization of temporal expressions (Task 13: TempEval-2, Task A). The recognition task is approached as a classification problem of sentence constituents and the normalization is implemented in a rule-based manner. One of the system features is extending positive annotations in the corpus by semantically similar words automatically obtained from a large unannotated textual corpus. The best results obtained by the system are 0.85 and 0.84 for precision and recall respectively for recognition of temporal expressions; the accuracy values of 0.91 and 0.55 were obtained for the feature values type and val respectively.", "keyphrases": ["normalization", "temporal type", "grounding", "vein", "location"]} +{"id": "daza-frank-2019-translate", "title": "Translate and Label! An Encoder-Decoder Approach for Cross-lingual Semantic Role Labeling", "abstract": "We propose a Cross-lingual Encoder-Decoder model that simultaneously translates and generates sentences with Semantic Role Labeling annotations in a resource-poor target language. Unlike annotation projection techniques, our model does not need parallel data during inference time. Our approach can be applied in monolingual, multilingual and cross-lingual settings and is able to produce dependency-based and span-based SRL annotations. We benchmark the labeling performance of our model in different monolingual and multilingual settings using well-known SRL datasets. We then train our model in a cross-lingual setting to generate new SRL labeled data. Finally, we measure the effectiveness of our method by using the generated data to augment the training basis for resource-poor languages and perform manual evaluation to show that it produces high-quality sentences and assigns accurate semantic role annotations. Our proposed architecture offers a flexible method for leveraging SRL data in multiple languages.", "keyphrases": ["cross-lingual encoder-decoder model", "resource-poor target language", "semantic role annotation"]} +{"id": "deneefe-knight-2009-synchronous", "title": "Synchronous Tree Adjoining Machine Translation", "abstract": "Tree Adjoining Grammars have well-known advantages, but are typically considered too difficult for practical systems. We demonstrate that, when done right, adjoining improves translation quality without becoming computationally intractable. Using adjoining to model optionality allows general translation patterns to be learned without the clutter of endless variations of optional material. The appropriate modifiers can later be spliced in as needed. \n \nIn this paper, we describe a novel method for learning a type of Synchronous Tree Adjoining Grammar and associated probabilities from aligned tree/string training data. We introduce a method of converting these grammars to a weakly equivalent tree transducer for decoding. Finally, we show that adjoining results in an end-to-end improvement of +0.8 Bleu over a baseline statistical syntax-based MT model on a large-scale Arabic/English MT task.", "keyphrases": ["tree adjoining grammar", "machine translation decoder", "foundation"]} +{"id": "lin-etal-2021-differentiable", "title": "Differentiable Open-Ended Commonsense Reasoning", "abstract": "Current commonsense reasoning research focuses on developing models that use commonsense knowledge to answer multiple-choice questions. However, systems designed to answer multiple-choice questions may not be useful in applications that do not provide a small list of candidate answers to choose from. As a step towards making commonsense reasoning research more realistic, we propose to study open-ended commonsense reasoning (OpenCSR) \u2014 the task of answering a commonsense question without any pre-defined choices \u2014 using as a resource only a corpus of commonsense facts written in natural language. OpenCSR is challenging due to a large decision space, and because many questions require implicit multi-hop reasoning. As an approach to OpenCSR, we propose DrFact, an efficient Differentiable model for multi-hop Reasoning over knowledge Facts. To evaluate OpenCSR methods, we adapt several popular commonsense reasoning benchmarks, and collect multiple new answers for each test question via crowd-sourcing. Experiments show that DrFact outperforms strong baseline methods by a large margin.", "keyphrases": ["open-ended commonsense reasoning", "opencsr", "choice"]} +{"id": "steinberger-etal-2012-jrc", "title": "JRC Eurovoc Indexer JEX - A freely available multi-label categorisation tool", "abstract": "EuroVoc (2012) is a highly multilingual thesaurus consisting of over 6,700 hierarchically organised subject domains used by European Institutions and many authorities in Member States of the European Union (EU) for the classification and retrieval of official documents. JEX is JRC-developed multi-label classification software that learns from manually labelled data to automatically assign EuroVoc descriptors to new documents in a profile-based category-ranking task. The JEX release consists of trained classifiers for 22 official EU languages, of parallel training data in the same languages, of an interface that allows viewing and amending the assignment results, and of a module that allows users to re-train the tool on their own document collections. JEX allows advanced users to change the document representation so as to possibly improve the categorisation result through linguistic pre-processing. JEX can be used as a tool for interactive EuroVoc descriptor assignment to increase speed and consistency of the human categorisation process, or it can be used fully automatically. The output of JEX is a language-independent EuroVoc feature vector lending itself also as input to various other Language Technology tasks, including cross-lingual clustering and classification, cross-lingual plagiarism detection, sentence selection and ranking, and more.", "keyphrases": ["jex", "multi-label classification software", "eurovoc descriptor"]} +{"id": "suzgun-etal-2019-lstm", "title": "LSTM Networks Can Perform Dynamic Counting", "abstract": "In this paper, we systematically assess the ability of standard recurrent networks to perform dynamic counting and to encode hierarchical representations. All the neural models in our experiments are designed to be small-sized networks both to prevent them from memorizing the training sets and to visualize and interpret their behaviour at test time. Our results demonstrate that the Long Short-Term Memory (LSTM) networks can learn to recognize the well-balanced parenthesis language (Dyck-1) and the shuffles of multiple Dyck-1 languages, each defined over different parenthesis-pairs, by emulating simple real-time k-counter machines. To the best of our knowledge, this work is the first study to introduce the shuffle languages to analyze the computational power of neural networks. We also show that a single-layer LSTM with only one hidden unit is practically sufficient for recognizing the Dyck-1 language. However, none of our recurrent networks was able to yield a good performance on the Dyck-2 language learning task, which requires a model to have a stack-like mechanism for recognition.", "keyphrases": ["counting", "dyck-1", "shuffle", "k-counter machine", "formal language"]} +{"id": "bohnet-etal-2018-morphosyntactic", "title": "Morphosyntactic Tagging with a Meta-BiLSTM Model over Context Sensitive Token Encodings", "abstract": "The rise of neural networks, and particularly recurrent neural networks, has produced significant advances in part-of-speech tagging accuracy. One characteristic common among these models is the presence of rich initial word encodings. These encodings typically are composed of a recurrent character-based representation with dynamically and pre-trained word embeddings. However, these encodings do not consider a context wider than a single word and it is only through subsequent recurrent layers that word or sub-word information interacts. In this paper, we investigate models that use recurrent neural networks with sentence-level context for initial character and word-based representations. In particular we show that optimal results are obtained by integrating these context sensitive representations through synchronized training with a meta-model that learns to combine their states.", "keyphrases": ["meta-bilstm model", "part-of-speech", "word embedding", "morphological tagging"]} +{"id": "lita-etal-2003-truecasing", "title": "tRuEcasIng", "abstract": "Truecasing is the process of restoring case information to badly-cased or non-cased text. This paper explores truecasing issues and proposes a statistical, language modeling based truecaser which achieves an accuracy of (cid:24) 98 % on news articles. Task based evaluation shows a 26% F-measure improvement in named entity recognition when using truecasing. In the context of automatic content extraction, mention detection on automatic speech recognition text is also improved by a factor of 8. Truecasing also enhances machine translation output legibility and yields a BLEU score improvement of 80 : 2% . This paper argues for the use of truecasing as a valuable component in text processing applications.", "keyphrases": ["case information", "truecasing", "trigram language model"]} +{"id": "nerbonne-wiersma-2006-measure", "title": "A Measure of Aggregate Syntactic Distance", "abstract": "We compare vectors containing counts of trigrams of part-of-speech (POS) tags in order to obtain an aggregate measure of syntax difference. Since lexical syntactic categories reflect more abstract syntax as well, we argue that this procedure reflects more than just the basic syntactic categories. We tag the material automatically and analyze the frequency vectors for POS trigrams using a permutation test. A test analysis of a 305,000 word corpus containing the English of Finnish emigrants to Australia is promising in that the procedure proposed works well in distinguishing two different groups (adult vs. child emigrants) and also in highlighting syntactic deviations between the two groups.", "keyphrases": ["syntactic distance", "part-of-speech", "permutation test"]} +{"id": "montazery-faili-2010-automatic", "title": "Automatic Persian WordNet Construction", "abstract": "In this paper, an automatic method for Persian WordNet construction based on Prenceton WordNet 2.1 (PWN) is introduced. The proposed approach uses Persian and English corpora as well as a bilingual dictionary in order to make a mapping between PWN synsets and Persian words. Our method calculates a score for each candidate synset of a given Persian word and for each of its translation, it selects the synset with maximum score as a link to the Persian word. The manual evaluation on selected links proposed by our method on 500 randomly selected Persian words, shows about 76.4% quality respect to precision measure. By augmenting the Persian WordNet with the un-ambiguous words, the total accuracy of automatically extracted Persian Word-Net is about 82.6% which outperforms the previously semi-automated generated Persian WordNet by about 12.6%.", "keyphrases": ["persian wordnet construction", "automatic method", "pwn"]} +{"id": "kallmeyer-romero-2004-ltag", "title": "LTAG Semantics with Semantic Unification", "abstract": "This paper sets up a framework for LTAG (Lexicalized Tree Adjoining Grammar) semantics that brings together ideas from different recent approaches addressing some shortcomings of TAG semantics based on the derivation tree. Within this framework, several sample analyses are proposed, and it is shown that the framework allows to analyze data that have been claimed to be problematic for derivation tree based LTAG semantics approaches.", "keyphrases": ["tag semantic", "ltag", "quantifier"]} +{"id": "cook-etal-2014-novel", "title": "Novel Word-sense Identification", "abstract": "Automatic lexical acquisition has been an active area of research in computational linguistics for over two decades, but the automatic identification of new word-senses has received attention only very recently. Previous work on this topic has been limited by the availability of appropriate evaluation resources. In this paper we present the largest corpus-based dataset of diachronic sense differences to date, which we believe will encourage further work in this area. We then describe several extensions to a state-of-the-art topic modelling approach for identifying new word-senses. This adapted method shows superior performance on our dataset of two different corpus pairs to that of the original method for both: (a) types having taken on a novel sense over time; and (b) the token instances of such novel senses.", "keyphrases": ["novel sense", "reference corpus", "probability distribution"]} +{"id": "madotto-etal-2020-plug", "title": "Plug-and-Play Conversational Models", "abstract": "There has been considerable progress made towards conversational models that generate coherent and fluent responses; however, this often involves training large language models on large dialogue datasets, such as Reddit. These large conversational models provide little control over the generated responses, and this control is further limited in the absence of annotated conversational datasets for attribute specific generation that can be used for fine-tuning the model. In this paper, we first propose and evaluate plug-and-play methods for controllable response generation, which does not require dialogue specific datasets and does not rely on fine-tuning a large model. While effective, the decoding procedure induces considerable computational overhead, rendering the conversational model unsuitable for interactive usage. To overcome this, we introduce an approach that does not require further computation at decoding time, while also does not require any fine-tuning of a large language model. We demonstrate, through extensive automatic and human evaluation, a high degree of control over the generated conversational responses with regard to multiple desired attributes, while being fluent.", "keyphrases": ["conversational model", "plug-and-play method", "response generation"]} +{"id": "huang-chen-2011-chinese", "title": "Chinese Discourse Relation Recognition", "abstract": "The challenging issues of discourse relation recognition in Chinese are addressed. Due to the lack of Chinese discourse corpora, we construct a moderate corpus with humanannotated discourse relations. Based on the corpus, a statistical classifier is proposed, and various features are explored in the experiments. The experimental results show that our method achieves an accuracy of 88.28% and an F-Score of 63.69% in four-class classification and achieves an F-Score of 93.57% in the best case.", "keyphrases": ["discourse relation recognition", "statistical classifier", "comparison relation"]} +{"id": "moryossef-etal-2021-data", "title": "Data Augmentation for Sign Language Gloss Translation", "abstract": "Sign language translation (SLT) is often decomposed into video-to-gloss recognition and gloss to-text translation, where a gloss is a sequence of transcribed spoken-language words in the order in which they are signed. We focus here on gloss-to-text translation, which we treat as a low-resource neural machine translation (NMT) problem. However, unlike traditional low resource NMT, gloss-to-text translation differs because gloss-text pairs often have a higher lexical overlap and lower syntactic overlap than pairs of spoken languages. We exploit this lexical overlap and handle syntactic divergence by proposing two rule-based heuristics that generate pseudo-parallel gloss-text pairs from monolingual spoken language text. By pre-training on this synthetic data, we improve translation from American Sign Language (ASL) to English and German Sign Language (DGS) to German by up to 3.14 and 2.20 BLEU, respectively.", "keyphrases": ["gloss-to-text translation", "lexical overlap", "spoken language"]} +{"id": "liu-zhang-2017-attention", "title": "Attention Modeling for Targeted Sentiment", "abstract": "Neural network models have been used for target-dependent sentiment analysis. Previous work focus on learning a target specific representation for a given input sentence which is used for classification. However, they do not explicitly model the contribution of each word in a sentence with respect to targeted sentiment polarities. We investigate an attention model to this end. In particular, a vanilla LSTM model is used to induce an attention value of the whole sentence. The model is further extended to differentiate left and right contexts given a certain target following previous work. Results show that by using attention to model the contribution of each word with respect to the target, our model gives significantly improved results over two standard benchmarks. We report the best accuracy for this task.", "keyphrases": ["sentiment classification", "attention model", "good accuracy"]} +{"id": "puduppully-etal-2019-data", "title": "Data-to-text Generation with Entity Modeling", "abstract": "Recent approaches to data-to-text generation have shown great promise thanks to the use of large-scale datasets and the application of neural network architectures which are trained end-to-end. These models rely on representation learning to select content appropriately, structure it coherently, and verbalize it grammatically, treating entities as nothing more than vocabulary tokens. In this work we propose an entity-centric neural architecture for data-to-text generation. Our model creates entity-specific representations which are dynamically updated. Text is generated conditioned on the data input and entity memory representations using hierarchical attention at each time step. We present experiments on the RotoWire benchmark and a (five times larger) new dataset on the baseball domain which we create. Our results show that the proposed model outperforms competitive baselines in automatic and human evaluation.", "keyphrases": ["large-scale dataset", "data-to-text generation", "entity representation"]} +{"id": "kim-etal-2020-unsupervised", "title": "When and Why is Unsupervised Neural Machine Translation Useless?", "abstract": "This paper studies the practicality of the current state-of-the-art unsupervised methods in neural machine translation (NMT). In ten translation tasks with various data settings, we analyze the conditions under which the unsupervised methods fail to produce reasonable translations. We show that their performance is severely affected by linguistic dissimilarity and domain mismatch between source and target monolingual data. Such conditions are common for low-resource language pairs, where unsupervised learning works poorly. In all of our experiments, supervised and semi-supervised baselines with 50k-sentence bilingual data outperform the best unsupervised results. Our analyses pinpoint the limits of the current unsupervised NMT and also suggest immediate research directions.", "keyphrases": ["neural machine translation", "monolingual data", "good unsupervised system"]} +{"id": "lin-etal-2003-word", "title": "Word-Transliteration Alignment", "abstract": "The named-entity phrases in free text represent a formidable challenge to text analysis. Translating a named-entity is important for the task of Cross Language Information Retrieval and Question Answering. However, both tasks are not easy to handle because named-entities found in free text are often not listed in a monolingual or bilingual dictionary. Although it is possible to identify and translate named-entities on the fly without a list of proper names and transliterations, an extensive list certainly will ensure the high accuracy rate of text analysis. We use a list of proper names and transliterations to train a Machine Transliteration Model. With the model it is possible to extract proper names and their transliterations in a bilingual corpus with high average precision and recall rates.", "keyphrases": ["proper name", "transliteration", "average precision"]} +{"id": "joty-etal-2017-cross", "title": "Cross-language Learning with Adversarial Neural Networks", "abstract": "We address the problem of cross-language adaptation for question-question similarity reranking in community question answering, with the objective to port a system trained on one input language to another input language given labeled training data for the first language and only unlabeled data for the second language. In particular, we propose to use adversarial training of neural networks to learn high-level features that are discriminative for the main learning task, and at the same time are invariant across the input languages. The evaluation results show sizable improvements for our cross-language adversarial neural network (CLANN) model over a strong non-adversarial system.", "keyphrases": ["cross-language adaptation", "adversarial training", "clann"]} +{"id": "aghajanyan-etal-2020-conversational", "title": "Conversational Semantic Parsing", "abstract": "The structured representation for semantic parsing in task-oriented assistant systems is geared towards simple understanding of one-turn queries. Due to the limitations of the representation, the session-based properties such as co-reference resolution and context carryover are processed downstream in a pipelined system. In this paper, we propose a semantic representation for such task-oriented conversational systems that can represent concepts such as co-reference and context carryover, enabling comprehensive understanding of queries in a session. We release a new session-based, compositional task-oriented parsing dataset of 20k sessions consisting of 60k utterances. Unlike Dialog State Tracking Challenges, the queries in the dataset have compositional forms. We propose a new family of Seq2Seq models for the session-based parsing above, which also set state-of-the-art in ATIS, SNIPS, TOP and DSTC2. Notably, we improve the best known results on DSTC2 by up to 5 points for slot-carryover.", "keyphrases": ["semantic parsing", "query", "limitation"]} +{"id": "cotterell-etal-2014-stochastic", "title": "Stochastic Contextual Edit Distance and Probabilistic FSTs", "abstract": "String similarity is most often measured by weighted or unweighted edit distance d(x, y). Ristad and Yianilos (1998) defined stochastic edit distance\u2014a probability distribution p(y | x) whose parameters can be trained from data. We generalize this so that the probability of choosing each edit operation can depend on contextual features. We show how to construct and train a probabilistic finite-state transducer that computes our stochastic contextual edit distance. To illustrate the improvement from conditioning on context, we model typos found in social media text.", "keyphrases": ["edit distance", "finite-state transducer", "unigram model"]} +{"id": "power-williams-2012-generating", "title": "Generating Numerical Approximations", "abstract": "We describe a computational model for planning phrases like \u201cmore than a quarter\u201d and \u201c25.9 per cent\u201d which describe proportions at different levels of precision. The model lays out the key choices in planning a numerical description, using formal definitions of mathematical form (e.g., the distinction between fractions and percentages) and roundness adapted from earlier studies. The task is modeled as a constraint satisfaction problem, with solutions subsequently ranked by preferences (e.g., for roundness). Detailed constraints are based on a corpus of numerical expressions collected in the NumGen project,11\u2002NumGen: Generating intelligent descriptions of numerical quantities for people with different levels of numeracy (http://mcs.open.ac.uk/sw6629/numgen). NumGen was funded by the Economic and Social Research Council under Grant Ref. RES-000-22-2760. and evaluated through empirical studies in which subjects were asked to produce (or complete) numerical expressions in specified contexts.", "keyphrases": ["fraction", "preference", "numerical expression"]} +{"id": "huang-etal-2010-classical", "title": "Classical Chinese Sentence Segmentation", "abstract": "Sentence segmentation is a fundamental issue in Classical Chinese language processing. To facilitate reading and processing of the raw Classical Chinese data, we propose a statistical method to split unstructured Classical Chinese text into smaller pieces such as sentences and clauses. The segmenter based on the conditional random field (CRF) model is tested under different tagging schemes and various features including n-gram, jump, word class, and phonetic information. We evaluated our method on four datasets from several eras (i.e., from the 5th century BCE to the 19th century). Our CRF segmenter achieves an F-score of 83.34% and can be applied on a variety of data from different eras.", "keyphrases": ["chinese text", "clause", "n-gram", "jump", "phonetic information"]} +{"id": "kuribayashi-etal-2021-lower", "title": "Lower Perplexity is Not Always Human-Like", "abstract": "In computational psycholinguistics, various language models have been evaluated against human reading behavior (e.g., eye movement) to build human-like computational models. However, most previous efforts have focused almost exclusively on English, despite the recent trend towards linguistic universal within the general community. In order to fill the gap, this paper investigates whether the established results in computational psycholinguistics can be generalized across languages. Specifically, we re-examine an established generalization \u2014the lower perplexity a language model has, the more human-like the language model is\u2014 in Japanese with typologically different structures from English. Our experiments demonstrate that this established generalization exhibits a surprising lack of universality; namely, lower perplexity is not always human-like. Moreover, this discrepancy between English and Japanese is further explored from the perspective of (non-)uniform information density. Overall, our results suggest that a cross-lingual evaluation will be necessary to construct human-like computational models.", "keyphrases": ["language model", "cross-lingual evaluation", "low perplexity"]} +{"id": "hovy-2015-demographic", "title": "Demographic Factors Improve Classification Performance", "abstract": "Extra-linguistic factors influence language use, and are accounted for by speakers and listeners. Most natural language processing (NLP) tasks to date, however, treat language as uniform. This assumption can harm performance. We investigate the effect of including demographic information on performance in a variety of text-classification tasks. We find that by including age or gender information, we consistently and significantly improve performance over demographic-agnostic models. These results hold across three text-classification tasks in five languages.", "keyphrases": ["demographic information", "age", "gender"]} +{"id": "widdows-2003-orthogonal", "title": "Orthogonal Negation in Vector Spaces for Modelling Word-Meanings and Document Retrieval", "abstract": "Standard IR systems can process queries such as \"web NOT internet\", enabling users who are interested in arachnids to avoid documents about computing. The documents retrieved for such a query should be irrelevant to the negated query term. Most systems implement this by reprocessing results after retrieval to remove documents containing the unwanted string of letters.This paper describes and evaluates a theoretically motivated method for removing unwanted meanings directly from the original query in vector models, with the same vector negation operator as used in quantum logic. Irrelevance in vector spaces is modelled using orthogonality, so query vectors are made orthogonal to the negated term or terms.As well as removing unwanted terms, this form of vector negation reduces the occurrence of synonyms and neighbours of the negated terms by as much as 76% compared with standard Boolean methods. By altering the query vector itself, vector negation removes not only unwanted strings but unwanted meanings.", "keyphrases": ["query", "unwanted string", "vector negation operator"]} +{"id": "wu-etal-2020-neural", "title": "Neural Mixed Counting Models for Dispersed Topic Discovery", "abstract": "Mixed counting models that use the negative binomial distribution as the prior can well model over-dispersed and hierarchically dependent random variables; thus they have attracted much attention in mining dispersed document topics. However, the existing parameter inference method like Monte Carlo sampling is quite time-consuming. In this paper, we propose two efficient neural mixed counting models, i.e., the Negative Binomial-Neural Topic Model (NB-NTM) and the Gamma Negative Binomial-Neural Topic Model (GNB-NTM) for dispersed topic discovery. Neural variational inference algorithms are developed to infer model parameters by using the reparameterization of Gamma distribution and the Gaussian approximation of Poisson distribution. Experiments on real-world datasets indicate that our models outperform state-of-the-art baseline models in terms of perplexity and topic coherence. The results also validate that both NB-NTM and GNB-NTM can produce explainable intermediate variables by generating dispersed proportions of document topics.", "keyphrases": ["mixed counting model", "topic model", "gnb-ntm"]} +{"id": "bicici-2018-robust", "title": "Robust parfda Statistical Machine Translation Results", "abstract": "We build parallel feature decay algorithms (parfda) Moses statistical machine translation (SMT) models for language pairs in the translation task. parfda obtains results close to the top constrained phrase-based SMT with an average of 2.252 BLEU points difference on WMT 2017 datasets using significantly less computation for building SMT systems than that would be spent using all available corpora. We obtain BLEU upper bounds based on target coverage to identify which systems used additional data. We use PRO for tuning to decrease fluctuations in the results and postprocess translation outputs to decrease translation errors due to the casing of words. F1 scores on the key phrases of the English to Turkish testsuite that we prepared reveal that parfda achieves 2nd best results. Truecasing translations before scoring obtained the best results overall.", "keyphrases": ["parfda", "key phrase", "good result"]} +{"id": "nissim-markert-2003-syntactic", "title": "Syntactic Features and Word Similarity for Supervised Metonymy Resolution", "abstract": "We present a supervised machine learning algorithm for metonymy resolution, which exploits the similarity between examples of conventional metonymy. We show that syntactic head-modifier relations are a high precision feature for metonymy recognition but suffer from data sparseness. We partially overcome this problem by integrating a thesaurus and introducing simpler grammatical features, thereby preserving precision and increasing recall. Our algorithm generalises over two levels of contextual similarity. Resulting inferences exceed the complexity of inferences undertaken in word sense disambiguation. We also compare automatic and manual methods for syntactic feature extraction.", "keyphrases": ["metonymy resolution", "head-modifier relation", "grammatical role"]} +{"id": "mccrae-etal-2015-reconciling", "title": "Reconciling Heterogeneous Descriptions of Language Resources", "abstract": "Language resources are a cornerstone of linguistic research and for the development of natural language processing tools, but the discovery of relevant resources remains a challenging task. This is due to the fact that relevant metadata records are spread among different repositories and it is currently impossible to query all these repositories in an integrated fashion, as they use different data models and vocabularies. In this paper we present a first attempt to collect and harmonize the metadata of different repositories, thus making them queriable and browsable in an integrated way. We make use of RDF and linked data technologies for this and provide a first level of harmonization of the vocabularies used in the different resources by mapping them to standard RDF vocabularies including Dublin Core and DCAT. Further, we present an approach that relies on NLP and in particular word sense disambiguation techniques to harmonize resources by mapping values of attributes \u2010 such as the type, license or intended use of a resource \u2010 into normalized values. Finally, as there are duplicate entries within the same repository as well as across different repositories, we also report results of detection of these duplicates.", "keyphrases": ["language resource", "repository", "rdf"]} +{"id": "kshirsagar-etal-2017-detecting", "title": "Detecting and Explaining Crisis", "abstract": "Individuals on social media may reveal themselves to be in various states of crisis (e.g. suicide, self-harm, abuse, or eating disorders). Detecting crisis from social media text automatically and accurately can have profound consequences. However, detecting a general state of crisis without explaining why has limited applications. An explanation in this context is a coherent, concise subset of the text that rationalizes the crisis detection. We explore several methods to detect and explain crisis using a combination of neural and non-neural techniques. We evaluate these techniques on a unique data set obtained from Koko, an anonymous emotional support network available through various messaging applications. We annotate a small subset of the samples labeled with crisis with corresponding explanations. Our best technique significantly outperforms the baseline for detection and explanation.", "keyphrases": ["crisis", "self-harm", "explanation"]} +{"id": "bellare-mccallum-2009-generalized", "title": "Generalized Expectation Criteria for Bootstrapping Extractors using Record-Text Alignment", "abstract": "Traditionally, machine learning approaches for information extraction require human annotated data that can be costly and time-consuming to produce. However, in many cases, there already exists a database (DB) with schema related to the desired output, and records related to the expected input text. We present a conditional random field (CRF) that aligns tokens of a given DB record and its realization in text. The CRF model is trained using only the available DB and unlabeled text with generalized expectation criteria. An annotation of the text induced from inferred alignments is used to train an information extractor. We evaluate our method on a citation extraction task in which alignments between DBLP database records and citation texts are used to train an extractor. Experimental results demonstrate an error reduction of 35% over a previous state-of-the-art method that uses heuristic alignments.", "keyphrases": ["database", "information extractor", "generalized expectation criterion"]} +{"id": "menezes-quirk-2005-dependency", "title": "Dependency Treelet Translation: The Convergence of Statistical and Example-based Machine-translation?", "abstract": "We describe a novel approach to machine translation that combines the strengths of the two leading corpus-based approaches: Phrasal SMT and EBMT. We use a syntactically informed decoder and reordering model based on the source dependency tree, in combination with conventional SMT models to incorporate the power of phrasal SMT with the linguistic generality available in a parser. We show that this approach significantly outperforms a leading string-based Phrasal SMT decoder and an EBMT system. We present results from two radically different language pairs, and investigate the sensitivity of this approach to parse quality by using two distinct parsers and oracle experiments. We also validate our automated BLEU scores with a small human evaluation.", "keyphrases": ["dependency structure", "treelet approach", "source side"]} +{"id": "liu-etal-2022-cross", "title": "Cross-Modal Discrete Representation Learning", "abstract": "In contrast to recent advances focusing on high-level representation learning across modalities, in this work we present a self-supervised learning framework that is able to learn a representation that captures finer levels of granularity across different modalities such as concepts or events represented by visual objects or spoken words. Our framework relies on a discretized embedding space created via vector quantization that is shared across different modalities. Beyond the shared embedding space, we propose a Cross-Modal Code Matching objective that forces the representations from different views (modalities) to have a similar distribution over the discrete embedding space such that cross-modal objects/actions localization can be performed without direct supervision. We show that the proposed discretized multi-modal fine-grained representation (e.g., pixel/word/frame) can complement high-level summary representations (e.g., video/sentence/waveform) for improved performance on cross-modal retrieval tasks. We also observe that the discretized representation uses individual clusters to represent the same semantic concept across modalities.", "keyphrases": ["different modality", "vector quantization", "video", "cross-modal retrieval task"]} +{"id": "graham-van-genabith-2008-packed", "title": "Packed rules for automatic transfer-rule induction", "abstract": "We present a method of encoding transfer rules in a highly efficient packed structure using contextualized constraints (Maxwell and Kaplan, 1991), an existing method of encoding adopted from LFG parsing (Kaplan and Bresnan, 1982; Bresnan, 2001; Dalrymple, 2001). The packed representation allows us to encode O(2 n ) transfer rules in a single packed representation only requiring O(n) storage space. Besides reducing space requirements, the representation also has a high impact on the amount of time taken to load large numbers of transfer rules to memory with very little trade-off in time needed to unpack the rules. We include an experimental evaluation which shows a considerable reduction in space and time requirements for a large set of automatically induced transfer rules by storing the rules in the packed representation.", "keyphrases": ["rule induction approach", "generator", "van"]} +{"id": "beck-etal-2014-joint", "title": "Joint Emotion Analysis via Multi-task Gaussian Processes", "abstract": "We propose a model for jointly predicting multiple emotions in natural language sentences. Our model is based on a low-rank coregionalisation approach, which combines a vector-valued Gaussian Process with a rich parameterisation scheme. We show that our approach is able to learn correlations and anti-correlations between emotions on a news headlines dataset. The proposed model outperforms both singletask baselines and other multi-task approaches.", "keyphrases": ["multi-task", "gaussian process", "emotion classification"]} +{"id": "bojar-2007-english", "title": "English-to-Czech Factored Machine Translation", "abstract": "This paper describes experiments with English-to-Czech phrase-based machine translation. Additional annotation of input and output tokens (multiple factors) is used to explicitly model morphology. We vary the translation scenario (the setup of multiple factors) and the amount of information in the morphological tags. Experimental results demonstrate significant improvement of translation quality in terms of BLEU.", "keyphrases": ["output token", "multiple factor", "morphological tag", "english-to-czech"]} +{"id": "husain-etal-2007-simple", "title": "Simple Preposition Correspondence: A Problem in English to Indian Language Machine Translation", "abstract": "The paper describes an approach to automatically select from Indian Language the appropriate lexical correspondence of English simple preposition. The paper describes this task from a Machine Translation (MT) perspective. We use the properties of the head and complement of the preposition to select the appropriate sense in the target language. We later show that the results obtained from this approach are promising.", "keyphrases": ["indian language", "lexical correspondence", "english simple preposition"]} +{"id": "nahnsen-2009-domain", "title": "Domain-Independent Shallow Sentence Ordering", "abstract": "We present a shallow approach to the sentence ordering problem. The employed features are based on discourse entities, shallow syntactic analysis, and temporal precedence relations retrieved from VerbOcean. We show that these relatively simple features perform well in a machine learning algorithm on datasets containing sequences of events, and that the resulting models achieve optimal performance with small amounts of training data. The model does not yet perform well on datasets describing the consequences of events, such as the destructions after an earthquake.", "keyphrases": ["discourse entity", "shallow syntactic analysis", "precedence relation"]} +{"id": "morita-etal-2013-subtree", "title": "Subtree Extractive Summarization via Submodular Maximization", "abstract": "This study proposes a text summarization model that simultaneously performs sentence extraction and compression. We translate the text summarization task into a problem of extracting a set of dependency subtrees in the document cluster. We also encode obligatory case constraints as must-link dependency constraints in order to guarantee the readability of the generated summary. In order to handle the subtree extraction problem, we investigate a new class of submodular maximization problem, and a new algorithm that has the approximation ratio 12(1 \u2212 e\u22121). Our experiments with the NTCIR ACLIA test collections show that our approach outperforms a state-of-the-art algorithm.", "keyphrases": ["compression", "text summarization task", "submodular maximization problem"]} +{"id": "melamud-etal-2014-probabilistic", "title": "Probabilistic Modeling of Joint-context in Distributional Similarity", "abstract": "Most traditional distributional similarity models fail to capture syntagmatic patterns that group together multiple word features within the same joint context. In this work we introduce a novel generic distributional similarity scheme under which the power of probabilistic models can be leveraged to effectively model joint contexts. Based on this scheme, we implement a concrete model which utilizes probabilistic n-gram language models. Our evaluations suggest that this model is particularly wellsuited for measuring similarity for verbs, which are known to exhibit richer syntagmatic patterns, while maintaining comparable or better performance with respect to competitive baselines for nouns. Following this, we propose our scheme as a framework for future semantic similarity models leveraging the substantial body of work that exists in probabilistic language modeling.", "keyphrases": ["joint context", "similarity scheme", "hypernymy"]} +{"id": "lawrence-etal-2019-attending", "title": "Attending to Future Tokens for Bidirectional Sequence Generation", "abstract": "Neural sequence generation is typically performed token-by-token and left-to-right. Whenever a token is generated only previously produced tokens are taken into consideration. In contrast, for problems such as sequence classification, bidirectional attention, which takes both past and future tokens into consideration, has been shown to perform much better. We propose to make the sequence generation process bidirectional by employing special placeholder tokens. Treated as a node in a fully connected graph, a placeholder token can take past and future tokens into consideration when generating the actual output token. We verify the effectiveness of our approach experimentally on two conversational tasks where the proposed bidirectional model outperforms competitive baselines by a large margin.", "keyphrases": ["future token", "bidirectional attention", "placeholder token"]} +{"id": "zhang-wang-2009-cross", "title": "Cross-Domain Dependency Parsing Using a Deep Linguistic Grammar", "abstract": "Pure statistical parsing systems achieves high in-domain accuracy but performs poorly out-domain. In this paper, we propose two different approaches to produce syntactic dependency structures using a large-scale hand-crafted HPSG grammar. The dependency backbone of an HPSG analysis is used to provide general linguistic insights which, when combined with state-of-the-art statistical dependency parsing models, achieves performance improvements on out-domain tests.", "keyphrases": ["bi-lexical syntactic dependency", "deepbank", "theory"]} +{"id": "mason-charniak-2014-nonparametric", "title": "Nonparametric Method for Data-driven Image Captioning", "abstract": "We present a nonparametric density estimation technique for image caption generation. Data-driven matching methods have shown to be effective for a variety of complex problems in Computer Vision. These methods reduce an inference problem for an unknown image to finding an existing labeled image which is semantically similar. However, related approaches for image caption generation (Ordonez et al., 2011; Kuznetsova et al., 2012) are hampered by noisy estimations of visual content and poor alignment between images and human-written captions. Our work addresses this challenge by estimating a word frequency representation of the visual content of a query image. This allows us to cast caption generation as an extractive summarization problem. Our model strongly outperforms two state-ofthe-art caption extraction systems according to human judgments of caption relevance.", "keyphrases": ["image", "caption", "extractive summarization problem"]} +{"id": "augustinus-etal-2012-example", "title": "Example-Based Treebank Querying", "abstract": "The recent construction of large linguistic treebanks for spoken and written Dutch (e.g. CGN, LASSY, Alpino) has created new and exciting opportunities for the empirical investigation of Dutch syntax and semantics. However, the exploitation of those treebanks requires knowledge of specific data structures and query languages such as XPath. Linguists who are unfamiliar with formal languages are often reluctant towards learning such a language. In order to make treebank querying more attractive for non-technical users we developed GrETEL (Greedy Extraction of Trees for Empirical Linguistics), a query engine in which linguists can use natural language examples as a starting point for searching the Lassy treebank without knowledge about tree representations nor formal query languages. By allowing linguists to search for similar constructions as the example they provide, we hope to bridge the gap between traditional and computational linguistics. Two case studies are conducted to provide a concrete demonstration of the tool. The architecture of the tool is optimised for searching the LASSY treebank, but the approach can be adapted to other treebank lay-outs.", "keyphrases": ["treebank", "dutch", "empirical linguistics", "query engine"]} +{"id": "agirre-etal-2006-two", "title": "Two graph-based algorithms for state-of-the-art WSD", "abstract": "This paper explores the use of two graph algorithms for unsupervised induction and tagging of nominal word senses based on corpora. Our main contribution is the optimization of the free parameters of those algorithms and its evaluation against publicly available gold standards. We present a thorough evaluation comprising supervised and unsupervised modes, and both lexical-sample and all-words tasks. The results show that, in spite of the information loss inherent to mapping the induced senses to the gold-standard, the optimization of parameters based on a small sample of nouns carries over to all nouns, performing close to supervised systems in the lexical sample task and yielding the second-best WSD systems for the Senseval-3 all-words task.", "keyphrases": ["word sense", "graph-based method", "fixed-list"]} +{"id": "meng-etal-2022-fast", "title": "Fast Nearest Neighbor Machine Translation", "abstract": "Though nearest neighbor Machine Translation (kNN-MT) (CITATION) has proved to introduce significant performance boosts over standard neural MT systems, it is prohibitively slow since it uses the entire reference corpus as the datastore for the nearest neighbor search. This means each step for each beam in the beam search has to search over the entire reference corpus. kNN-MT is thus two-orders slower than vanilla MT models, making it hard to be applied to real-world applications, especially online services. In this work, we propose Fast kNN-MT to address this issue. Fast kNN-MT constructs a significantly smaller datastore for the nearest neighbor search: for each word in a source sentence, Fast kNN-MT first selects its nearest token-level neighbors, which is limited to tokens that are the same as the query token. Then at each decoding step, in contrast to using the entire corpus as the datastore, the search space is limited to target tokens corresponding to the previously selected reference source tokens. This strategy avoids search through the whole datastore for nearest neighbors and drastically improves decoding efficiency. Without loss of performance, Fast kNN-MT is two-orders faster than kNN-MT, and is only two times slower than the standard NMT model. Fast kNN-MT enables the practical use of kNN-MT systems in real-world MT applications. The code is available at .", "keyphrases": ["neighbor", "knn-mt", "two-order", "source sentence", "efficiency"]} +{"id": "taskar-etal-2004-max", "title": "Max-Margin Parsing", "abstract": "We present a novel discriminative approach to parsing inspired by the large-margin criterion underlying support vector machines. Our formulation uses a factorization analogous to the standard dynamic programs for parsing. In particular, it allows one to efficiently learn a model which discriminates among the entire space of parse trees, as opposed to reranking the top few candidates. Our models can condition on arbitrary features of input sentences, thus incorporating an important kind of lexical information without the added algorithmic complexity of modeling headedness. We provide an efficient algorithm for learning such models and show experimental evidence of the model\u2019s improved performance over a natural baseline model and a lexicalized probabilistic context-free grammar.", "keyphrases": ["dynamic program", "discriminative parsing", "gain"]} +{"id": "mylonakis-simaan-2011-learning", "title": "Learning Hierarchical Translation Structure with Linguistic Annotations", "abstract": "While it is generally accepted that many translation phenomena are correlated with linguistic structures, employing linguistic syntax for translation has proven a highly non-trivial task. The key assumption behind many approaches is that translation is guided by the source and/or target language parse, employing rules extracted from the parse tree or performing tree transformations. These approaches enforce strict constraints and might overlook important translation phenomena that cross linguistic constituents. We propose a novel flexible modelling approach to introduce linguistic information of varying granularity from the source side. Our method induces joint probability synchronous grammars and estimates their parameters, by selecting and weighing together linguistically motivated rules according to an objective function directly targeting generalisation over future data. We obtain statistically significant improvements across 4 different language pairs with English as source, mounting up to +1.92 BLEU for Chinese as target.", "keyphrases": ["probability", "synchronous grammar", "syntactic label"]} +{"id": "wang-etal-2005-strictly", "title": "Strictly Lexical Dependency Parsing", "abstract": "We present a strictly lexical parsing model where all the parameters are based on the words. This model does not rely on part-of-speech tags or grammatical categories. It maximizes the conditional probability of the parse tree given the sentence. This is in contrast with most previous models that compute the joint probability of the parse tree and the sentence. Although the maximization of joint and conditional probabilities are theoretically equivalent, the conditional model allows us to use distributional word similarity to generalize the observed frequency counts in the training corpus. Our experiments with the Chinese Treebank show that the accuracy of the conditional model is 13.6% higher than the joint model and that the strictly lexicalized conditional model outperforms the corresponding unlexicalized model based on part-of-speech tags.", "keyphrases": ["parsing model", "chinese", "treebank data", "central role", "tremendous progress"]} +{"id": "graca-etal-2008-building", "title": "Building a Golden Collection of Parallel Multi-Language Word Alignment", "abstract": "This paper reports an experience on producing manual word alignments over six different language pairs (all combinations between Portuguese, English, French and Spanish) (Gra\u00e7a et al., 2008). Word alignment of each language pair is made over the first 100 sentences of the common test set from the Europarl corpora (Koehn, 2005), corresponding to 600 new annotated sentences. This collection is publicly available at http://www.l2f.inesc- id.pt/resources/translation/. It contains, to our knowledge, the first word alignment gold set for the Portuguese language, with three other languages. Besides, it is to our knowledge, the first multi-language manual word aligned parallel corpus, where the same sentences are annotated for each language pair. We started by using the guidelines presented at (Mari\u00f1o, 2005) and performed several refinements: some due to under-specifications on the original guidelines, others because of disagreement on some choices. This lead to the development of an extensive new set of guidelines for multi-lingual word alignment annotation that, we believe, makes the alignment process less ambiguous. We evaluate the inter-annotator agreement obtaining an average of 91.6% agreement between the different language pairs.", "keyphrases": ["portuguese", "word alignment annotation", "average"]} +{"id": "pavlopoulos-etal-2017-deeper", "title": "Deeper Attention to Abusive User Content Moderation", "abstract": "Experimenting with a new dataset of 1.6M user comments from a news portal and an existing dataset of 115K Wikipedia talk page comments, we show that an RNN operating on word embeddings outpeforms the previous state of the art in moderation, which used logistic regression or an MLP classifier with character or word n-grams. We also compare against a CNN operating on word embeddings, and a word-list baseline. A novel, deep, classificationspecific attention mechanism improves the performance of the RNN further, and can also highlight suspicious words for free, without including highlighted words in the training data. We consider both fully automatic and semi-automatic moderation.", "keyphrases": ["abuse", "user content moderation", "moderator"]} +{"id": "bella-etal-2020-exploring", "title": "Exploring the Language of Data", "abstract": "We set out to uncover the unique grammatical properties of an important yet so far under-researched type of natural language text: that of short labels typically found within structured datasets. We show that such labels obey a specific type of abbreviated grammar that we call the Language of Data, with properties significantly different from the kinds of text typically addressed in computational linguistics and NLP, such as `standard' written language or social media messages. We analyse orthography, parts of speech, and syntax over a large, bilingual, hand-annotated corpus of data labels collected from a variety of domains. We perform experiments on tokenisation, part-of-speech tagging, and named entity recognition over real-world structured data, demonstrating that models adapted to the Language of Data outperform those trained on standard text. These observations point in a new direction to be explored as future research, in order to develop new NLP tools and models dedicated to the Language of Data.", "keyphrases": ["unique grammatical property", "entity recognition", "real-world structured data"]} +{"id": "guta-etal-2015-extended", "title": "Extended Translation Models in Phrase-based Decoding", "abstract": "We propose a novel extended translation model (ETM) to counteract some problems in phrase-based translation: The lack of translation context when using singleword phrases and uncaptured dependencies beyond phrase boundaries. The ETM operates on word-level and augments the IBM models by an additional bilingual word pair and a reordering operation. Its implementation in a phrase-based decoder introduces translation and reordering dependencies for single-word phrases and dependencies across phrase boundaries. More, the model incorporates an explicit treatment of multiple and empty alignments. Its integration outperforms competitive systems that include lexical and phrase translation models as well as hierarchical reordering models on 4 language pairs significantly by +0.7% BLEU on average. Although simpler and using fewer dependencies, the ETM proves to be on par with 7-gram operation sequence models (Durrani et al., 2013b).", "keyphrases": ["translation model", "ibm model", "bilingual word pair"]} +{"id": "lang-lapata-2011-unsupervised", "title": "Unsupervised Semantic Role Induction with Graph Partitioning", "abstract": "In this paper we present a method for unsupervised semantic role induction which we formalize as a graph partitioning problem. Argument instances of a verb are represented as vertices in a graph whose edge weights quantify their role-semantic similarity. Graph partitioning is realized with an algorithm that iteratively assigns vertices to clusters based on the cluster assignments of neighboring vertices. Our method is algorithmically and conceptually simple, especially with respect to how problem-specific knowledge is incorporated into the model. Experimental results on the CoNLL 2008 benchmark dataset demonstrate that our model is competitive with other unsupervised approaches in terms of F1 whilst attaining significantly higher cluster purity.", "keyphrases": ["semantic role induction", "graph partitioning problem", "vertex"]} +{"id": "eryigit-etal-2011-multiword", "title": "Multiword Expressions in Statistical Dependency Parsing", "abstract": "In this paper, we investigated the impact of extracting different types of multiword expressions (MWEs) in improving the accuracy of a data-driven dependency parser for a morphologically rich language (Turkish). We showed that in the training stage, the unification of MWEs of a certain type, namely compound verb and noun formations, has a negative effect on parsing accuracy by increasing the lexical sparsity. Our results gave a statistically significant improvement by using a variant of the treebank excluding this MWE type in the training stage. Our extrinsic evaluation of an ideal MWE recognizer (for only extracting MWEs of type named entities, duplications, numbers, dates and some predefined list of compound prepositions) showed that the preprocessing of the test data would improve the labeled parsing accuracy by 1.5%.", "keyphrases": ["mwes", "dependency parser", "turkish"]} +{"id": "chang-2004-chinese", "title": "Chinese-English Parallel Corpus Construction and its Application", "abstract": "Chinese-English parallel corpora are key resources for Chinese-English cross-language information processing, Chinese-English bilingual lexicography, Chinese-English language research and teaching. But so far large-scale Chinese-English corpus is still unavailable yet, given the difficulties and the intensive labours required. In this paper, our work towards building a large-scale Chinese-English parallel corpus is presented. We elaborate on the collection, annotation and mark-up of the parallel Chinese-English texts and the workflow that we used to construct the corpus. In addition, we also present our work toward building tools for constructing and using the corpus easily for different purposes. Among these tools, a parallel concordance tool developed by us is examined in detail. Several applications of the corpus being conducted are also introduced briefly in the paper.", "keyphrases": ["chinese-english parallel corpora", "information processing", "language research"]} +{"id": "sasano-korhonen-2020-investigating", "title": "Investigating Word-Class Distributions in Word Vector Spaces", "abstract": "This paper presents an investigation on the distribution of word vectors belonging to a certain word class in a pre-trained word vector space. To this end, we made several assumptions about the distribution, modeled the distribution accordingly, and validated each assumption by comparing the goodness of each model. Specifically, we considered two types of word classes \u2013 the semantic class of direct objects of a verb and the semantic class in a thesaurus \u2013 and tried to build models that properly estimate how likely it is that a word in the vector space is a member of a given word class. Our results on selectional preference and WordNet datasets show that the centroid-based model will fail to achieve good enough performance, the geometry of the distribution and the existence of subgroups will have limited impact, and also the negative instances need to be considered for adequate modeling of the distribution. We further investigated the relationship between the scores calculated by each model and the degree of membership and found that discriminative learning-based models are best in finding the boundaries of a class, while models based on the offset between positive and negative instances perform best in determining the degree of membership.", "keyphrases": ["word class", "direct object", "geometry"]} +{"id": "tinsley-etal-2007-robust", "title": "Robust language pair-independent sub-tree alignment", "abstract": "Data-driven approaches to machine translation (MT) achieve state-of-the-art results. Many syntax-aware approaches, such as Example-Based MT and Data-Oriented Translation, make use of tree pairs aligned at sub-sentential level. Obtaining sub-sentential alignments manually is time-consuming and error-prone, and requires expert knowledge of both source and target languages. We propose a novel, language pair-independent algorithm which automatically induces alignments between phrase-structure trees. We evaluate the alignments themselves against a manually aligned gold standard, and perform an extrinsic evaluation by using the aligned data to train and test a DOT system. Our results show that translation accuracy is comparable to that of the same translation system trained on manually aligned data, and coverage improves.", "keyphrases": ["machine translation", "sub-sentential level", "node alignment technique"]} +{"id": "gong-etal-2017-multi", "title": "Multi-Grained Chinese Word Segmentation", "abstract": "Traditionally, word segmentation (WS) adopts the single-grained formalism, where a sentence corresponds to a single word sequence. However, Sproat et al. (1997) show that the inter-native-speaker consistency ratio over Chinese word boundaries is only 76%, indicating single-grained WS (SWS) imposes unnecessary challenges on both manual annotation and statistical modeling. Moreover, WS results of different granularities can be complementary and beneficial for high-level applications. This work proposes and addresses multi-grained WS (MWS). We build a large-scale pseudo MWS dataset for model training and tuning by leveraging the annotation heterogeneity of three SWS datasets. Then we manually annotate 1,500 test sentences with true MWS annotations. Finally, we propose three benchmark approaches by casting MWS as constituent parsing and sequence labeling. Experiments and analysis lead to many interesting findings.", "keyphrases": ["word segmentation", "mws", "model training"]} +{"id": "tannier-moriceau-2013-building", "title": "Building Event Threads out of Multiple News Articles", "abstract": "We present an approach for building multidocument event threads from a large corpus of newswire articles. An event thread is basically a succession of events belonging to the same story. It helps the reader to contextualize the information contained in a single article, by navigating backward or forward in the thread from this article. A specific effort is also made on the detection of reactions to a particular event. In order to build these event threads, we use a cascade of classifiers and other modules, taking advantage of the redundancy of information in the newswire corpus. We also share interesting comments concerning our manual annotation procedure for building a training and testing set 1 .", "keyphrases": ["event thread", "succession", "story"]} +{"id": "roth-woodsend-2014-composition", "title": "Composition of Word Representations Improves Semantic Role Labelling", "abstract": "State-of-the-art semantic role labelling systems require large annotated corpora to achieve full performance. Unfortunately, such corpora are expensive to produce and often do not generalize well across domains. Even in domain, errors are often made where syntactic information does not provide sufficient cues. In this paper, we mitigate both of these problems by employing distributional word representations gathered from unlabelled data. While straight-forward word representations of predicates and arguments improve performance, we show that further gains are achieved by composing representations that model the interaction between predicate and argument, and capture full argument spans.", "keyphrases": ["word representation", "full argument span", "srl", "feature-based system"]} +{"id": "clark-wicentwoski-2013-swatcs", "title": "SwatCS: Combining simple classifiers with estimated accuracy", "abstract": "This paper is an overview of the SwatCS system submitted to SemEval-2013 Task 2A: Contextual Polarity Disambiguation. The sentiment of individual phrases within a tweet are labeled using a combination of classifiers trained on a range of lexical features. The classifiers are combined by estimating the accuracy of the classifiers on each tweet. Performance is measured when using only the provided training data, and separately when including external data.", "keyphrases": ["classifier ensemble", "expression-level", "exception"]} +{"id": "huang-riloff-2012-bootstrapped", "title": "Bootstrapped Training of Event Extraction Classifiers", "abstract": "Most event extraction systems are trained with supervised learning and rely on a collection of annotated documents. Due to the domain-specificity of this task, event extraction systems must be retrained with new annotated data for each domain. In this paper, we propose a bootstrapping solution for event role filler extraction that requires minimal human supervision. We aim to rapidly train a state-of-the-art event extraction system using a small set of \"seed nouns\" for each event role, a collection of relevant (in-domain) and irrelevant (out-of-domain) texts, and a semantic dictionary. The experimental results show that the bootstrapped system outperforms previous weakly supervised event extraction systems on the MUC-4 data set, and achieves performance levels comparable to supervised training with 700 manually annotated documents.", "keyphrases": ["event extraction", "human supervision", "noun"]} +{"id": "tsuruoka-etal-2009-stochastic", "title": "Stochastic Gradient Descent Training for L1-regularized Log-linear Models with Cumulative Penalty", "abstract": "Stochastic gradient descent (SGD) uses approximate gradients estimated from subsets of the training data and updates the parameters in an online fashion. This learning framework is attractive because it often requires much less training time in practice than batch training algorithms. However, L1-regularization, which is becoming popular in natural language processing because of its ability to produce compact models, cannot be efficiently applied in SGD training, due to the large dimensions of feature vectors and the fluctuations of approximate gradients. We present a simple method to solve these problems by penalizing the weights according to cumulative values for L1 penalty. We evaluate the effectiveness of our method in three applications: text chunking, named entity recognition, and part-of-speech tagging. Experimental results demonstrate that our method can produce compact and accurate models much more quickly than a state-of-the-art quasi-Newton method for L1-regularized loglinear models.", "keyphrases": ["sgd", "training time", "stochastic gradient descent"]} +{"id": "owczarzak-etal-2007-labelled", "title": "Labelled Dependencies in Machine Translation Evaluation", "abstract": "We present a method for evaluating the quality of Machine Translation (MT) output, using labelled dependencies produced by a Lexical-Functional Grammar (LFG) parser. Our dependency-based method, in contrast to most popular string-based evaluation metrics, does not unfairly penalize perfectly valid syntactic variations in the translation, and the addition of WordNet provides a way to accommodate lexical variation. In comparison with other metrics on 16,800 sentences of Chinese-English newswire text, our method reaches high correlation with human scores.", "keyphrases": ["reference sentence", "dependency graph", "term-based encoding"]} +{"id": "kollar-etal-2018-alexa", "title": "The Alexa Meaning Representation Language", "abstract": "This paper introduces a meaning representation for spoken language understanding. The Alexa meaning representation language (AMRL), unlike previous approaches, which factor spoken utterances into domains, provides a common representation for how people communicate in spoken language. AMRL is a rooted graph, links to a large-scale ontology, supports cross-domain queries, fine-grained types, complex utterances and composition. A spoken language dataset has been collected for Alexa, which contains \u223c20k examples across eight domains. A version of this meaning representation was released to developers at a trade show in 2016.", "keyphrases": ["meaning representation", "query", "conversation system"]} +{"id": "stevenson-greenwood-2006-comparing", "title": "Comparing Information Extraction Pattern Models", "abstract": "Several recently reported techniques for the automatic acquisition of Information Extraction (IE) systems have used dependency trees as the basis of their extraction pattern representation. These approaches have used a variety of pattern models (schemes for representing IE patterns based on particular parts of the dependency analysis). An appropriate model should be expressive enough to represent the information which is to be extracted from text without being overly complicated. Four previously reported pattern models are evaluated using existing IE evaluation corpora and three dependency parsers. It was found that one model, linked chains, could represent around 95% of the information of interest without generating an unwieldy number of possible patterns.", "keyphrases": ["information extraction", "pattern model", "new domain", "coverage"]} +{"id": "saggion-etal-2010-multilingual", "title": "Multilingual Summarization Evaluation without Human Models", "abstract": "We study correlation of rankings of text summarization systems using evaluation methods with and without human models. We apply our comparison framework to various well-established content-based evaluation measures in text summarization such as coverage, Responsiveness, Pyramids and Rouge studying their associations in various text summarization tasks including generic and focus-based multi-document summarization in English and generic single-document summarization in French and Spanish. The research is carried out using a new content-based evaluation framework called Fresa to compute a variety of divergences among probability distributions.", "keyphrases": ["summarization", "spanish", "automatic evaluation"]} +{"id": "angeli-uszkoreit-2013-language", "title": "Language-Independent Discriminative Parsing of Temporal Expressions", "abstract": "Temporal resolution systems are traditionally tuned to a particular language, requiring significant human effort to translate them to new languages. We present a language independent semantic parser for learning the interpretation of temporal phrases given only a corpus of utterances and the times they reference. We make use of a latent parse that encodes a language-flexible representation of time, and extract rich features over both the parse and associated temporal semantics. The parameters of the model are learned using a weakly supervised bootstrapping approach, without the need for manually tuned parameters or any other language expertise. We achieve state-of-the-art accuracy on all languages in the TempEval2 temporal normalization task, reporting a 4% improvement in both English and Spanish accuracy, and to our knowledge the first results for four other languages.", "keyphrases": ["temporal expression", "semantic parser", "limitation"]} +{"id": "riezler-etal-2014-response", "title": "Response-based Learning for Grounded Machine Translation", "abstract": "We propose a novel learning approach for statistical machine translation (SMT) that allows to extract supervision signals for structured learning from an extrinsic response to a translation input. We show how to generate responses by grounding SMT in the task of executing a semantic parse of a translated query against a database. Experiments on the GEOQUERY database show an improvement of about 6 points in F1-score for responsebased learning over learning from references only on returning the correct answer from a semantic parse of a translated query. In general, our approach alleviates the dependency on human reference translations and solves the reachability problem in structured learning for SMT.", "keyphrases": ["structured learning", "semantic parse", "reference"]} +{"id": "yao-etal-2017-weakly", "title": "A Weakly Supervised Approach to Train Temporal Relation Classifiers and Acquire Regular Event Pairs Simultaneously", "abstract": "Capabilities of detecting temporal and causal relations between two events can benefit many applications. Most of existing temporal relation classifiers were trained in a supervised manner. Instead, we explore the observation that regular event pairs show a consistent temporal relation despite of their various contexts and these rich contexts can be used to train a contextual temporal relation classifier, which can further recognize new temporal relation contexts and identify new regular event pairs. We focus on detecting after and before temporal relations and design a weakly supervised learning approach that extracts thousands of regular event pairs and learns a contextual temporal relation classifier simultaneously. Evaluation shows that the acquired regular event pairs are of high quality and contain rich commonsense knowledge and domain specific knowledge. In addition, the weakly supervised trained temporal relation classifier achieves comparable performance with the state-of-the-art supervised systems.", "keyphrases": ["temporal relation", "relation classifier", "event pair", "observation"]} +{"id": "charniak-etal-2006-multilevel", "title": "Multilevel Coarse-to-Fine PCFG Parsing", "abstract": "We present a PCFG parsing algorithm that uses a multilevel coarse-to-fine (mlctf) scheme to improve the efficiency of search for the best parse. Our approach requires the user to specify a sequence of nested partitions or equivalence classes of the PCFG nonterminals. We define a sequence of PCFGs corresponding to each partition, where the nonterminals of each PCFG are clusters of nonterminals of the original source PCFG. We use the results of parsing at a coarser level (i.e., grammar defined in terms of a coarser partition) to prune the next finer level. We present experiments showing that with our algorithm the work load (as measured by the total number of constituents processed) is decreased by a factor of ten with no decrease in parsing accuracy compared to standard CKY parsing with the original PCFG. We suggest that the search space over mlctf algorithms is almost totally unexplored so that future work should be able to improve significantly on these results.", "keyphrases": ["pcfg", "constituent", "search space", "coarse-to-fine approach"]} +{"id": "libovicky-helcl-2017-attention", "title": "Attention Strategies for Multi-Source Sequence-to-Sequence Learning", "abstract": "Modeling attention in neural multi-source sequence-to-sequence learning remains a relatively unexplored area, despite its usefulness in tasks that incorporate multiple source languages or modalities. We propose two novel approaches to combine the outputs of attention mechanisms over each source sequence, flat and hierarchical. We compare the proposed methods with existing techniques and present results of systematic evaluation of those methods on the WMT16 Multimodal Translation and Automatic Post-editing tasks. We show that the proposed methods achieve competitive results on both tasks.", "keyphrases": ["modality", "sequence-to-sequence model", "attention strategy"]} +{"id": "bjorkelund-etal-2009-multilingual", "title": "Multilingual Semantic Role Labeling", "abstract": "This paper describes our contribution to the semantic role labeling task (SRL-only) of the CoNLL-2009 shared task in the closed challenge (Hajic et al., 2009). Our system consists of a pipeline of independent, local classifiers that identify the predicate sense, the arguments of the predicates, and the argument labels. Using these local models, we carried out a beam search to generate a pool of candidates. We then reranked the candidates using a joint learning approach that combines the local models and proposition features. \n \nTo address the multilingual nature of the data, we implemented a feature selection procedure that systematically explored the feature space, yielding significant gains over a standard set of features. Our system achieved the second best semantic score overall with an average labeled semantic F1 of 80.31. It obtained the best F1 score on the Chinese and German data and the second best one on English.", "keyphrases": ["semantic role", "feature selection procedure", "other type"]} +{"id": "xu-etal-2019-neural", "title": "Neural Response Generation with Meta-words", "abstract": "We present open domain dialogue generation with meta-words. A meta-word is a structured record that describes attributes of a response, and thus allows us to explicitly model the one-to-many relationship within open domain dialogues and perform response generation in an explainable and controllable manner. To incorporate meta-words into generation, we propose a novel goal-tracking memory network that formalizes meta-word expression as a goal in response generation and manages the generation process to achieve the goal with a state memory panel and a state controller. Experimental results from both automatic evaluation and human judgment on two large-scale data sets indicate that our model can significantly outperform state-of-the-art generation models in terms of response relevance, response diversity, and accuracy of meta-word expression.", "keyphrases": ["response generation", "meta-word", "domain dialogue generation", "memory network", "specificity"]} +{"id": "huang-etal-2005-robustness", "title": "The Robustness of Domain Lexico-Taxonomy: Expanding Domain Lexicon with CiLin", "abstract": "This paper deals with the robust expansion of Domain LexicoTaxonomy (DLT). DLT is a domain taxonomy enriched with domain lexica. DLT was proposed as an infrastructure for crossing domain barriers (Huang et al. 2004). The DLT proposal is based on the observation that domain lexica contain entries that are also part of a general lexicon. Hence, when entries of a general lexicon are marked with their associated domain attributes, this information can have two important applications. First, the DLT will serve as seeds for domain lexica. Second, the DLT offers the most reliable evidence for deciding the domain of a new text since these lexical clues belong to the general lexicon and do occur reliably in all texts. Hence general lexicon lemmas are extracted to populate domain lexica, which are situated in domain taxonomy. Based on this previous work, we show in this paper that the original DLT can be further expanded when a new language resource is introduced. We applied CiLin, a Chinese thesaurus, and added more than 1000 new entries for DLT and show with evaluation that the DLT approach is robust since the size and number of domain lexica increased effectively.", "keyphrases": ["domain lexica", "chinese thesaurus", "dlt approach"]} +{"id": "guzman-etal-2019-flores", "title": "The FLORES Evaluation Datasets for Low-Resource Machine Translation: Nepali\u2013English and Sinhala\u2013English", "abstract": "For machine translation, a vast majority of language pairs in the world are considered low-resource because they have little parallel data available. Besides the technical challenges of learning with limited supervision, it is difficult to evaluate methods trained on low-resource language pairs because of the lack of freely and publicly available benchmarks. In this work, we introduce the FLORES evaluation datasets for Nepali\u2013English and Sinhala\u2013 English, based on sentences translated from Wikipedia. Compared to English, these are languages with very different morphology and syntax, for which little out-of-domain parallel data is available and for which relatively large amounts of monolingual data are freely available. We describe our process to collect and cross-check the quality of translations, and we report baseline performance using several learning settings: fully supervised, weakly supervised, semi-supervised, and fully unsupervised. Our experiments demonstrate that current state-of-the-art methods perform rather poorly on this benchmark, posing a challenge to the research community working on low-resource MT. Data and code to reproduce our experiments are available at .", "keyphrases": ["low-resource language pair", "large amount", "bleu score"]} +{"id": "lee-etal-2021-towards", "title": "Towards Few-shot Fact-Checking via Perplexity", "abstract": "Few-shot learning has drawn researchers' attention to overcome the problem of data scarcity. Recently, large pre-trained language models have shown great performance in few-shot learning for various downstream tasks, such as question answering and machine translation. Nevertheless, little exploration has been made to achieve few-shot learning for the fact-checking task. However, fact-checking is an important problem, especially when the amount of information online is growing exponentially every day. In this paper, we propose a new way of utilizing the powerful transfer learning ability of a language model via a perplexity score. The most notable strength of our methodology lies in its capability in few-shot learning. With only two training samples, our methodology can already outperform the Major Class baseline by more than an absolute 10% on the F1-Macro metric across multiple datasets. Through experiments, we empirically verify the plausibility of the rather surprising usage of the perplexity score in the context of fact-checking and highlight the strength of our few-shot methodology by comparing it to strong fine-tuning-based baseline models. Moreover, we construct and publicly release two new fact-checking datasets related to COVID-19.", "keyphrases": ["pre-trained language model", "fact-checking task", "perplexity score"]} +{"id": "peters-etal-2006-lois", "title": "The LOIS Project", "abstract": "The LOIS (Lexical Ontologies for legal Information Sharing) project The legal knowledge base resulting from the LOIS (Lexical Ontologies for legal Information Sharing) (Lexical Ontologies for legal Information Sharing) project consists of legal WordNets in six languages (Italian, Dutch, Portuguese, German, Czech, English). Its architecture is based on the EuroWordNet (EWN) framework (Vossen et al, 1997). Using the EWN framework assures compatibility of the LOIS WordNets with EWN, allowing them to function as an extension of EWN for the legal domain. For each legal system, the document-derived legal concepts are integrated into a taxonomy, which links into existing formal ontologies. These give the legal wordnets a first formal backbone, which can, in future, be further extended. The database consists of 33,000 synsets, and is aimed to be used in information retrieval, where it provides mono- and multi-lingual access to European legal databases for legal experts as well as for laymen. The LOIS knowledge base also provides a flexible, modular architecture that allows integration of multiple classification schemes, and enables the comparison of legal systems by exploring translation, equivalence and structure across the different legal wordnets.", "keyphrases": ["project", "information sharing", "wordnets"]} +{"id": "liang-etal-2009-learning", "title": "Learning Semantic Correspondences with Less Supervision", "abstract": "A central problem in grounded language acquisition is learning the correspondences between a rich world state and a stream of text which references that world state. To deal with the high degree of ambiguity present in this setting, we present a generative model that simultaneously segments the text into utterances and maps each utterance to a meaning representation grounded in the world state. We show that our model generalizes across three domains of increasing difficulty---Robocup sportscasting, weather forecasts (a new domain), and NFL recaps.", "keyphrases": ["correspondence", "language acquisition", "semantic representation"]} +{"id": "palshikar-etal-2019-extraction-message", "title": "Extraction of Message Sequence Charts from Narrative History Text", "abstract": "In this paper, we advocate the use of Message Sequence Chart (MSC) as a knowledge representation to capture and visualize multi-actor interactions and their temporal ordering. We propose algorithms to automatically extract an MSC from a history narrative. For a given narrative, we first identify verbs which indicate interactions and then use dependency parsing and Semantic Role Labelling based approaches to identify senders (initiating actors) and receivers (other actors involved) for these interaction verbs. As a final step in MSC extraction, we employ a state-of-the art algorithm to temporally re-order these interactions. Our evaluation on multiple publicly available narratives shows improvements over four baselines.", "keyphrases": ["narrative", "msc", "semantic role labelling"]} +{"id": "reiter-thomson-2020-shared", "title": "Shared Task on Evaluating Accuracy", "abstract": "We propose a shared task on methodologies and algorithms for evaluating the accuracy of generated texts, specifically summaries of basketball games produced from basketball box score and other game data. We welcome submissions based on protocols for human evaluation, automatic metrics, as well as combinations of human evaluations and metrics.", "keyphrases": ["future work", "multimodal task", "english text-to-text task"]} +{"id": "wang-etal-2016-automatic", "title": "Automatic Construction of Discourse Corpora for Dialogue Translation", "abstract": "In this paper, a novel approach is proposed to automatically construct parallel discourse corpus for dialogue machine translation. Firstly, the parallel subtitle data and its corresponding monolingual movie script data are crawled and collected from Internet. Then tags such as speaker and discourse boundary from the script data are projected to its subtitle data via an information retrieval approach in order to map monolingual discourse to bilingual texts. We not only evaluate the mapping results, but also integrate speaker information into the translation. Experiments show our proposed method can achieve 81.79% and 98.64% accuracy on speaker and dialogue boundary annotation, and speaker-based language model adaptation can obtain around 0.5 BLEU points improvement in translation qualities. Finally, we publicly release around 100K parallel discourse data with manual speaker and dialogue boundary annotation.", "keyphrases": ["novel approach", "dialogue machine translation", "boundary annotation", "parallel discourse data"]} +{"id": "chodorow-etal-2012-problems", "title": "Problems in Evaluating Grammatical Error Detection Systems", "abstract": "Many evaluation issues for grammatical error detection have previously been overlooked, making it hard to draw meaningful comparisons between different approaches, even when they are evaluated on the same corpus. To begin with, the three-way contingency between a writer\u2019s sentence, the annotator\u2019s correction, and the system\u2019s output makes evaluation more complex than in some other NLP tasks, which we address by presenting an intuitive evaluation scheme. Of particular importance to error detection is the skew of the data \u2010 the low frequency of errors as compared to non-errors \u2010 which distorts some traditional measures of performance and limits their usefulness, leading us to recommend the reporting of raw measurements (true positives, false negatives, false positives, true negatives). Other issues that are particularly vexing for error detection focus on defining these raw measurements: specifying the size or scope of an error, properly treating errors as graded rather than discrete phenomena, and counting non-errors. We discuss recommendations for best practices with regard to reporting the results of system evaluation for these cases, recommendations which depend upon making clear one\u2019s assumptions and applications for error detection. By highlighting the problems with current error detection evaluation, the field will be better able to move forward.", "keyphrases": ["grammatical error detection", "same corpus", "annotator", "evaluation scheme"]} +{"id": "lin-etal-2019-unified", "title": "A Unified Linear-Time Framework for Sentence-Level Discourse Parsing", "abstract": "We propose an efficient neural framework for sentence-level discourse analysis in accordance with Rhetorical Structure Theory (RST). Our framework comprises a discourse segmenter to identify the elementary discourse units (EDU) in a text, and a discourse parser that constructs a discourse tree in a top-down fashion. Both the segmenter and the parser are based on Pointer Networks and operate in linear time. Our segmenter yields an F1 score of 95.4%, and our parser achieves an F1 score of 81.7% on the aggregated labeled (relation) metric, surpassing previous approaches by a good margin and approaching human agreement on both tasks (98.3 and 83.0 F1).", "keyphrases": ["sentence-level discourse analysis", "pointer network", "agreement"]} +{"id": "titov-henderson-2007-constituent", "title": "Constituent Parsing with Incremental Sigmoid Belief Networks", "abstract": "We introduce a framework for syntactic parsing with latent variables based on a form of dynamic Sigmoid Belief Networks called Incremental Sigmoid Belief Networks. We demonstrate that a previous feed-forward neural network parsing model can be viewed as a coarse approximation to inference with this class of graphical model. By constructing a more accurate but still tractable approximation, we significantly improve parsing accuracy, suggesting that ISBNs provide a good idealization for parsing. This generative model of parsing achieves state-of-theart results on WSJ text and 8% error reduction over the baseline neural network parser.", "keyphrases": ["graphical model", "isbn", "constituent"]} +{"id": "mohamed-etal-2011-discovering", "title": "Discovering Relations between Noun Categories", "abstract": "Traditional approaches to Relation Extraction from text require manually defining the relations to be extracted. We propose here an approach to automatically discovering relevant relations, given a large text corpus plus an initial ontology defining hundreds of noun categories (e.g., Athlete, Musician, Instrument). Our approach discovers frequently stated relations between pairs of these categories, using a two step process. For each pair of categories (e.g., Musician and Instrument) it first co-clusters the text contexts that connect known instances of the two categories, generating a candidate relation for each resulting cluster. It then applies a trained classifier to determine which of these candidate relations is semantically valid. Our experiments apply this to a text corpus containing approximately 200 million web pages and an ontology containing 122 categories from the NELL system [Carlson et al., 2010b], producing a set of 781 proposed candidate relations, approximately half of which are semantically valid. We conclude this is a useful approach to semi-automatic extension of the ontology for large-scale information extraction systems such as NELL.", "keyphrases": ["text context", "candidate relation", "nell"]} +{"id": "cui-etal-2020-bert", "title": "BERT-enhanced Relational Sentence Ordering Network", "abstract": "In this paper, we introduce a novel BERT-enhanced Relational Sentence Ordering Network (referred to as BRSON) by leveraging BERT for capturing better dependency relationship among sentences to enhance the coherence modeling for the entire paragraph. In particular, we develop a new Relational Pointer Decoder (referred as RPD) by incorporating the relative ordering information into the pointer network with a Deep Relational Module (referred as DRM), which utilizes BERT to exploit the deep semantic connection and relative ordering between sentences. This enables us to strengthen both local and global dependencies among sentences. Extensive evaluations are conducted on six public datasets. The experimental results demonstrate the effectiveness and promise of our BRSON, showing a significant improvement over the state-of-the-art by a wide margin.", "keyphrases": ["ordering", "bert", "semantic connection"]} +{"id": "freitag-etal-2021-results", "title": "Results of the WMT21 Metrics Shared Task: Evaluating Metrics with Expert-based Human Evaluations on TED and News Domain", "abstract": "This paper presents the results of the WMT21 Metrics Shared Task. Participants were asked to score the outputs of the translation systems competing in the WMT21 News Translation Task with automatic metrics on two different domains: news and TED talks. All metrics were evaluated on how well they correlate at the system- and segment-level with human ratings. Contrary to previous years' editions, this year we acquired our own human ratings based on expert-based human evaluation via Multidimensional Quality Metrics (MQM). This setup had several advantages: (i) expert-based evaluation has been shown to be more reliable, (ii) we were able to evaluate all metrics on two different domains using translations of the same MT systems, (iii) we added 5 additional translations coming from the same system during system development. In addition, we designed three challenge sets that evaluate the robustness of all automatic metrics. We present an extensive analysis on how well metrics perform on three language pairs: English to German, English to Russian and Chinese to English. We further show the impact of different reference translations on reference-based metrics and compare our expert-based MQM annotation with the DA scores acquired by WMT.", "keyphrases": ["human evaluation", "different domain", "news"]} +{"id": "mille-etal-2019-second", "title": "The Second Multilingual Surface Realisation Shared Task (SR'19): Overview and Evaluation Results", "abstract": "We report results from the SR'19 Shared Task, the second edition of a multilingual surface realisation task organised as part of the EMNLP'19 Workshop on Multilingual Surface Realisation. As in SR'18, the shared task comprised two tracks with different levels of complexity: (a) a shallow track where the inputs were full UD structures with word order information removed and tokens lemmatised; and (b) a deep track where additionally, functional words and morphological information were removed. The shallow track was offered in eleven, and the deep track in three languages. Systems were evaluated (a) automatically, using a range of intrinsic metrics, and (b) by human judges in terms of readability and meaning similarity. This report presents the evaluation results, along with descriptions of the SR'19 tracks, data and evaluation methods. For full descriptions of the participating systems, please see the separate system reports elsewhere in this volume.", "keyphrases": ["multilingual surface realisation", "functional word", "shared-task", "surface realization task"]} +{"id": "mendes-etal-2019-jointly", "title": "Jointly Extracting and Compressing Documents with Summary State Representations", "abstract": "We present a new neural model for text summarization that first extracts sentences from a document and then compresses them. The pro-posed model offers a balance that sidesteps thedifficulties in abstractive methods while gener-ating more concise summaries than extractivemethods. In addition, our model dynamically determines the length of the output summary based on the gold summaries it observes during training and does not require length constraints typical to extractive summarization. The model achieves state-of-the-art results on the CNN/DailyMail and Newsroom datasets, improving over current extractive and abstractive methods. Human evaluations demonstratethat our model generates concise and informa-tive summaries. We also make available a new dataset of oracle compressive summaries derived automatically from the CNN/DailyMailreference summaries.", "keyphrases": ["summarization", "extractor", "content selection"]} +{"id": "finegan-dollak-etal-2018-improving", "title": "Improving Text-to-SQL Evaluation Methodology", "abstract": "To be informative, an evaluation must measure how well systems generalize to realistic unseen data. We identify limitations of and propose improvements to current evaluations of text-to-SQL systems. First, we compare human-generated and automatically generated questions, characterizing properties of queries necessary for real-world applications. To facilitate evaluation on multiple datasets, we release standardized and improved versions of seven existing datasets and one new text-to-SQL dataset. Second, we show that the current division of data into training and test sets measures robustness to variations in the way questions are asked, but only partially tests how well systems generalize to new queries; therefore, we propose a complementary dataset split for evaluation of future work. Finally, we demonstrate how the common practice of anonymizing variables during evaluation removes an important challenge of the task. Our observations highlight key difficulties, and our methodology enables effective measurement of future development.", "keyphrases": ["text-to-sql system", "query", "semantic parser"]} +{"id": "maamouri-etal-2006-developing", "title": "Developing and Using a Pilot Dialectal Arabic Treebank", "abstract": "In this paper, we describe the methodological procedures and issues that emerged from the development of a pilot Levantine Arabic Treebank (LATB) at the Linguistic Data Consortium (LDC) and its use at the Johns Hopkins University (JHU) Center for Language and Speech Processing workshop on Parsing Arabic Dialects (PAD). This pilot, consisting of morphological and syntactic annotation of approximately 26,000 words of Levantine Arabic conversational telephone speech, was developed under severe time constraints; hence the LDC team drew on their experience in treebanking Modern Standard Arabic (MSA) text. The resulting Levantine dialect treebanked corpus was used by the PAD team to develop and evaluate parsers for Levantine dialect texts. The parsers were trained on MSA resources and adapted using dialect-MSA lexical resources (some developed especially for this task) and existing linguistic knowledge about syntactic differences between MSA and dialect. The use of the LATB for development and evaluation of syntactic parsers allowed the PAD team to provide feedbasck to the LDC treebank developers. In this paper, we describe the creation of resources for this corpus, as well as transformations on the corpus to eliminate speech effects and lessen the gap between our pre-existing MSA resources and the new dialectal corpus", "keyphrases": ["dialectal arabic", "levantine arabic", "syntactic annotation", "egyptian arabic"]} +{"id": "chen-yoon-2011-detecting", "title": "Detecting Structural Events for Assessing Non-Native Speech", "abstract": "Structural events, (i.e., the structure of clauses and disfluencies) in spontaneous speech, are important components of human speaking and have been used to measure language development. However, they have not been actively used in automated speech assessment research. Given the recent substantial progress on automated structural event detection on spontaneous speech, we investigated the detection of clause boundaries and interruption points of edit disfluencies on transcriptions of non-native speech data and extracted features from the detected events for speech assessment. Compared to features computed on human-annotated events, the features computed on machine-generated events show promising correlations to holistic scores that reflect speaking proficiency levels.", "keyphrases": ["structural event", "speech scoring", "syntactic competence"]} +{"id": "tsarfaty-etal-2012-cross", "title": "Cross-Framework Evaluation for Statistical Parsing", "abstract": "A serious bottleneck of comparative parser evaluation is the fact that different parsers subscribe to different formal frameworks and theoretical assumptions. Converting outputs from one framework to another is less than optimal as it easily introduces noise into the process. Here we present a principled protocol for evaluating parsing results across frameworks based on function trees, tree generalization and edit distance metrics. This extends a previously proposed framework for cross-theory evaluation and allows us to compare a wider class of parsers. We demonstrate the usefulness and language independence of our procedure by evaluating constituency and dependency parsers on English and Swedish.", "keyphrases": ["different parser", "optimization", "maltoptimizer"]} +{"id": "acs-etal-2015-two", "title": "A Two-level Classifier for Discriminating Similar Languages", "abstract": "The BRUniBP team\u2019s submission is presented for the Discriminating between Similar Languages Shared Task 2015. Our method is a two phase classifier that utilizes both character and word-level features. The evaluation shows 100% accuracy on language group identification and 93.66% accuracy on language identification. The main contribution of the paper is a memory-efficient correlation based feature selection method.", "keyphrases": ["heli method", "variation", "previous vardial workshop"]} +{"id": "xu-etal-2021-stacked", "title": "Stacked Acoustic-and-Textual Encoding: Integrating the Pre-trained Models into Speech Translation Encoders", "abstract": "Encoder pre-training is promising in end-to-end Speech Translation (ST), given the fact that speech-to-translation data is scarce. But ST encoders are not simple instances of Automatic Speech Recognition (ASR) or Machine Translation (MT) encoders. For example, we find that ASR encoders lack the global context representation, which is necessary for translation, whereas MT encoders are not designed to deal with long but locally attentive acoustic sequences. In this work, we propose a Stacked Acoustic-and-Textual Encoding (SATE) method for speech translation. Our encoder begins with processing the acoustic sequence as usual, but later behaves more like an MT encoder for a global representation of the input sequence. In this way, it is straightforward to incorporate the pre-trained models into the system. Also, we develop an adaptor module to alleviate the representation inconsistency between the pre-trained ASR encoder and MT encoder, and develop a multi-teacher knowledge distillation method to preserve the pre-training knowledge. Experimental results on the LibriSpeech En-Fr and MuST-C En-De ST tasks show that our method achieves state-of-the-art BLEU scores of 18.3 and 25.2. To our knowledge, we are the first to develop an end-to-end ST system that achieves comparable or even better BLEU performance than the cascaded ST counterpart when large-scale ASR and MT data is available.", "keyphrases": ["end-to-end speech translation", "automatic speech recognition", "machine translation"]} +{"id": "bannard-2007-measure", "title": "A Measure of Syntactic Flexibility for Automatically Identifying Multiword Expressions in Corpora", "abstract": "Natural languages contain many multi-word sequences that do not display the variety of syntactic processes we would expect given their phrase type, and consequently must be included in the lexicon as multiword units. This paper describes a method for identifying such items in corpora, focussing on English verb-noun combinations. In an evaluation using a set of dictionary-published MWEs we show that our method achieves greater accuracy than existing MWE extraction methods based on lexical association.", "keyphrases": ["multiword expression", "verb-noun combination", "mwe", "syntactic fixedness", "english vnic"]} +{"id": "suzuki-etal-2003-hierarchical", "title": "Hierarchical Directed Acyclic Graph Kernel: Methods for Structured Natural Language Data", "abstract": "This paper proposes the \"Hierarchical Directed Acyclic Graph (HDAG) Kernel\" for structured natural language data. The HDAG Kernel directly accepts several levels of both chunks and their relations, and then efficiently computes the weighed sum of the number of common attribute sequences of the HDAGs. We applied the proposed method to question classification and sentence alignment tasks to evaluate its performance as a similarity measure and a kernel function. The results of the experiments demonstrate that the HDAG Kernel is superior to other kernel functions and baseline methods.", "keyphrases": ["kernel", "natural language data", "hdag"]} +{"id": "weerkamp-de-rijke-2008-credibility", "title": "Credibility Improves Topical Blog Post Retrieval", "abstract": "Topical blog post retrieval is the task of ranking blog posts with respect to their relevance for a given topic. To improve topical blog post retrieval we incorporate textual credibility indicators in the retrieval process. We consider two groups of indicators: post level (determined using information about individual blog posts only) and blog level (determined using information from the underlying blogs). We describe how to estimate these indicators and how to integrate them into a retrieval approach based on language models. Experiments on the TREC Blog track test set show that both groups of credibility indicators significantly improve retrieval effectiveness; the best performance is achieved when combining them.", "keyphrases": ["blog post retrieval", "credibility indicator", "personal pronoun", "text length", "capital"]} +{"id": "borchert-etal-2020-ggponc", "title": "GGPONC: A Corpus of German Medical Text with Rich Metadata Based on Clinical Practice Guidelines", "abstract": "The lack of publicly accessible text corpora is a major obstacle for progress in natural language processing. For medical applications, unfortunately, all language communities other than English are low-resourced. In this work, we present GGPONC (German Guideline Program in Oncology NLP Corpus), a freely dis tributable German language corpus based on clinical practice guidelines for oncology. This corpus is one of the largest ever built from German medical documents. Unlike clinical documents, clinical guidelines do not contain any patient-related information and can therefore be used without data protection restrictions. Moreover, GGPONC is the first corpus for the German language covering diverse conditions in a large medical subfield and provides a variety of metadata, such as literature references and evidence levels. By applying and evaluating existing medical information extraction pipelines for German text, we are able to draw comparisons for the use of medical language to other corpora, medical and non-medical ones.", "keyphrases": ["oncology nlp corpus", "german language", "clinical guideline"]} +{"id": "zhu-etal-2010-imposing", "title": "Imposing Hierarchical Browsing Structures onto Spoken Documents", "abstract": "This paper studies the problem of imposing a known hierarchical structure onto an unstructured spoken document, aiming to help browse such archives. We formulate our solutions within a dynamic-programming-based alignment framework and use minimum error-rate training to combine a number of global and hierarchical constraints. This pragmatic approach is computationally efficient. Results show that it outperforms a baseline that ignores the hierarchical and global features and the improvement is consistent on transcripts with different WERs. Directly imposing such hierarchical structures onto raw speech without using transcripts yields competitive results.", "keyphrases": ["hierarchical structure", "transcript", "pre-order walk"]} +{"id": "singh-husain-2005-comparison", "title": "Comparison, Selection and Use of Sentence Alignment Algorithms for New Language Pairs", "abstract": "Several algorithms are available for sentence alignment, but there is a lack of systematic evaluation and comparison of these algorithms under different conditions. In most cases, the factors which can significantly affect the performance of a sentence alignment algorithm have not been considered while evaluating. We have used a method for evaluation that can give a better estimate about a sentence alignment algorithm's performance, so that the best one can be selected. We have compared four approaches using this method. These have mostly been tried on European language pairs. We have evaluated manually-checked and validated English-Hindi aligned parallel corpora under different conditions. We also suggest some guidelines on actual alignment.", "keyphrases": ["sentence alignment algorithm", "systematic evaluation", "condition"]} +{"id": "clark-curran-2006-partial", "title": "Partial Training for a Lexicalized-Grammar Parser", "abstract": "We propose a solution to the annotation bottleneck for statistical parsing, by exploiting the lexicalized nature of Combinatory Categorial Grammar (CCG). The parsing model uses predicate-argument dependencies for training, which are derived from sequences of CCG lexical categories rather than full derivations. A simple method is used for extracting dependencies from lexical category sequences, resulting in high precision, yet incomplete and noisy data. The dependency parsing model of Clark and Curran (2004b) is extended to exploit this partial training data. Remarkably, the accuracy of the parser trained on data derived from category sequences alone is only 1.3% worse in terms of F-score than the parser trained on complete dependency structures.", "keyphrases": ["lexical category", "partial training data", "complete dependency structure"]} +{"id": "hu-etal-2021-explicit", "title": "Explicit Alignment Objectives for Multilingual Bidirectional Encoders", "abstract": "Pre-trained cross-lingual encoders such as mBERT (Devlin et al., 2019) and XLM-R (Conneau et al., 2020) have proven impressively effective at enabling transfer-learning of NLP systems from high-resource languages to low-resource languages. This success comes despite the fact that there is no explicit objective to align the contextual embeddings of words/sentences with similar meanings across languages together in the same space. In this paper, we present a new method for learning multilingual encoders, AMBER (Aligned Multilingual Bidirectional EncodeR). AMBER is trained on additional parallel data using two explicit alignment objectives that align the multilingual representations at different granularities. We conduct experiments on zero-shot cross-lingual transfer learning for different tasks including sequence tagging, sentence retrieval and sentence classification. Experimental results on the tasks in the XTREME benchmark (Hu et al., 2020) show that AMBER obtains gains of up to 1.1 average F1 score on sequence tagging and up to 27.3 average accuracy on retrieval over the XLM-R-large model which has 3.2x the parameters of AMBER. Our code and models are available at .", "keyphrases": ["multilingual encoder", "parallel data", "backward attention matrix"]} +{"id": "zalmout-etal-2016-analysis", "title": "Analysis of Foreign Language Teaching Methods: An Automatic Readability Approach", "abstract": "Much research in education has been done on the study of different language teaching methods. However, there has been little investigation using computational analysis to compare such methods in terms of readability or complexity progression. In this paper, we make use of existing readability scoring techniques and our own classifiers to analyze the textbooks used in two very different teaching methods for English as a Second Language \u2013 the grammar-based and the communicative methods. Our analysis indicates that the grammar-based curriculum shows a more coherent readability progression compared to the communicative curriculum. This finding corroborates with the expectations about the differences between these two methods and validates our approach's value in comparing different teaching methods quantitatively.", "keyphrases": ["teaching method", "readability scoring technique", "second language"]} +{"id": "erk-pado-2010-exemplar", "title": "Exemplar-Based Models for Word Meaning in Context", "abstract": "This paper describes ongoing work on distributional models for word meaning in context. We abandon the usual one-vector-per-word paradigm in favor of an exemplar model that activates only relevant occurrences. On a paraphrasing task, we find that a simple exemplar model outperforms more complex state-of-the-art models.", "keyphrases": ["occurrence", "target word", "contextual information"]} +{"id": "iglesias-etal-2009-rule", "title": "Rule Filtering by Pattern for Efficient Hierarchical Translation", "abstract": "We describe refinements to hierarchical translation search procedures intended to reduce both search errors and memory usage through modifications to hypothesis expansion in cube pruning and reductions in the size of the rule sets used in translation. Rules are put into syntactic classes based on the number of non-terminals and the pattern, and various filtering strategies are then applied to assess the impact on translation speed and quality. Results are reported on the 2008 NIST Arabic-to-English evaluation task.", "keyphrases": ["modification", "rule set", "syntactic class", "non-terminal", "maximum phrase jump"]} +{"id": "kolluru-etal-2020-imojie", "title": "IMoJIE: Iterative Memory-Based Joint Open Information Extraction", "abstract": "While traditional systems for Open Information Extraction were statistical and rule-based, recently neural models have been introduced for the task. Our work builds upon CopyAttention, a sequence generation OpenIE model (Cui et. al. 18). Our analysis reveals that CopyAttention produces a constant number of extractions per sentence, and its extracted tuples often express redundant information. We present IMoJIE, an extension to CopyAttention, which produces the next extraction conditioned on all previously extracted tuples. This approach overcomes both shortcomings of CopyAttention, resulting in a variable number of diverse extractions per sentence. We train IMoJIE on training data bootstrapped from extractions of several non-neural systems, which have been automatically filtered to reduce redundancy and noise. IMoJIE outperforms CopyAttention by about 18 F1 pts, and a BERT-based strong baseline by 2 F1 pts, establishing a new state of the art for the task.", "keyphrases": ["open information extraction", "art", "seq2seq architecture"]} +{"id": "lewis-steedman-2013-combined", "title": "Combined Distributional and Logical Semantics", "abstract": "We introduce a new approach to semantics which combines the benefits of distributional and formal logical semantics. Distributional models have been successful in modelling the meanings of content words, but logical semantics is necessary to adequately represent many function words. We follow formal semantics in mapping language to logical representations, but differ in that the relational constants used are induced by offline distributional clustering at the level of predicate-argument structure. Our clustering algorithm is highly scalable, allowing us to run on corpora the size of Gigaword. Different senses of a word are disambiguated based on their induced types. We outperform a variety of existing approaches on a wide-coverage question answering task, and demonstrate the ability to make complex multi-sentence inferences involving quantifiers on the FraCaS suite.", "keyphrases": ["content word", "relational constant", "distributional clustering"]} +{"id": "chandu-etal-2017-tackling", "title": "Tackling Biomedical Text Summarization: OAQA at BioASQ 5B", "abstract": "In this paper, we describe our participation in phase B of task 5b of the fifth edition of the annual BioASQ challenge, which includes answering factoid, list, yes-no and summary questions from biomedical data. We describe our techniques with an emphasis on ideal answer generation, where the goal is to produce a relevant, precise, non-redundant, query-oriented summary from multiple relevant documents. We make use of extractive summarization techniques to address this task and experiment with different biomedical ontologies and various algorithms including agglomerative clustering, Maximum Marginal Relevance (MMR) and sentence compression. We propose a novel word embedding based tf-idf similarity metric and a soft positional constraint which improve our system performance. We evaluate our techniques on test batch 4 from the fourth edition of the challenge. Our best system achieves a ROUGE-2 score of 0.6534 and ROUGE-SU4 score of 0.6536.", "keyphrases": ["edition", "bioasq challenge", "extractive summarization technique"]} +{"id": "liu-etal-2014-iterative", "title": "An Iterative Link-based Method for Parallel Web Page Mining", "abstract": "Identifying parallel web pages from bilingual web sites is a crucial step of bilingual resource construction for crosslingual information processing. In this paper, we propose a link-based approach to distinguish parallel web pages from bilingual web sites. Compared with the existing methods, which only employ the internal translation similarity (such as content-based similarity and page structural similarity), we hypothesize that the external translation similarity is an effective feature to identify parallel web pages. Within a bilingual web site, web pages are interconnected by hyperlinks. The basic idea of our method is that the translation similarity of two pages can be inferred from their neighbor pages, which can be adopted as an important source of external similarity. Thus, the translation similarity of page pairs will influence each other. An iterative algorithm is developed to estimate the external translation similarity and the final translation similarity. Both internal and external similarity measures are combined in the iterative algorithm. Experiments on six bilingual websites demonstrate that our method is effective and obtains significant improvement (6.2% F-Score) over the baseline which only utilizes internal translation similarity.", "keyphrases": ["parallel web page", "link-based approach", "content-based similarity"]} +{"id": "gutierrez-etal-2016-finding", "title": "Finding Non-Arbitrary Form-Meaning Systematicity Using String-Metric Learning for Kernel Regression", "abstract": "Arbitrariness of the sign\u2014the notion that the forms of words are unrelated to their meanings\u2014is an underlying assumption of many linguistic theories. Two lines of research have recently challenged this assumption, but they produce differing characterizations of non-arbitrariness in language. Behavioral and corpus studies have con\ufb01rmed the validity of localized form-meaning patterns manifested in limited subsets of the lexicon. Meanwhile, global (lexicon-wide) statistical analyses instead \ufb01nd diffuse form-meaning system-aticity across the lexicon as a whole. We bridge the gap with an approach that can detect both local and global form-meaning systematicity in language. In the kernel regression formulation we introduce, form-meaning relationships can be used to predict words\u2019 distributional semantic vectors from their forms. Furthermore, we introduce a novel metric learning algorithm that can learn weighted edit distances that minimize kernel regression error. Our results suggest that the English lexicon exhibits far more global form-meaning systematicity than previously discovered, and that much of this systematicity is focused in localized form-meaning patterns.", "keyphrases": ["systematicity", "characterization", "form-meaning association"]} +{"id": "ture-etal-2012-encouraging", "title": "Encouraging Consistent Translation Choices", "abstract": "It has long been observed that monolingual text exhibits a tendency toward \"one sense per discourse,\" and it has been argued that a related \"one translation per discourse\" constraint is operative in bilingual contexts as well. In this paper, we introduce a novel method using forced decoding to confirm the validity of this constraint, and we demonstrate that it can be exploited in order to improve machine translation quality. Three ways of incorporating such a preference into a hierarchical phrase-based MT model are proposed, and the approach where all three are combined yields the greatest improvements for both Arabic-English and Chinese-English translation experiments.", "keyphrases": ["discourse", "consistency constraint", "counting feature"]} +{"id": "strubell-etal-2017-fast", "title": "Fast and Accurate Entity Recognition with Iterated Dilated Convolutions", "abstract": "Today when many practitioners run basic NLP on the entire web and large-volume traffic, faster methods are paramount to saving time and energy costs. Recent advances in GPU hardware have led to the emergence of bi-directional LSTMs as a standard method for obtaining per-token vector representations serving as input to labeling tasks such as NER (often followed by prediction in a linear-chain CRF). Though expressive and accurate, these models fail to fully exploit GPU parallelism, limiting their computational efficiency. This paper proposes a faster alternative to Bi-LSTMs for NER: Iterated Dilated Convolutional Neural Networks (ID-CNNs), which have better capacity than traditional CNNs for large context and structured prediction. Unlike LSTMs whose sequential processing on sentences of length N requires O(N) time even in the face of parallelism, ID-CNNs permit fixed-depth convolutions to run in parallel across entire documents. We describe a distinct combination of network structure, parameter sharing and training procedures that enable dramatic 14-20x test-time speedups while retaining accuracy comparable to the Bi-LSTM-CRF. Moreover, ID-CNNs trained to aggregate context from the entire document are more accurate than Bi-LSTM-CRFs while attaining 8x faster test time speeds.", "keyphrases": ["entity recognition", "convolution", "bi-lstms"]} +{"id": "lewis-steedman-2013-unsupervised", "title": "Unsupervised Induction of Cross-Lingual Semantic Relations", "abstract": "Creating a language-independent meaning representation would benefit many crosslingual NLP tasks. We introduce the first unsupervised approach to this problem, learning clusters of semantically equivalent English and French relations between referring expressions, based on their named-entity arguments in large monolingual corpora. The clusters can be used as language-independent semantic relations, by mapping clustered expressions in different languages onto the same relation. Our approach needs no parallel text for training, but outperforms a baseline that uses machine translation on a cross-lingual question answering task. We also show how to use the semantics to improve the accuracy of machine translation, by using it in a simple reranker.", "keyphrases": ["semantic relation", "unsupervised approach", "equivalent english"]} +{"id": "kaplan-etal-2004-speed", "title": "Speed and Accuracy in Shallow and Deep Stochastic Parsing", "abstract": "Abstract : This paper reports some experiments that Compare the accuracy and performance of two stochastic parsing systems. The currently popular Collins parser is a shallow parser whose output contains more detailed semantically relevant information than other such parsers. The XLE parser is a deep-parsing system that couples a Lexical Functional Grammar to a log- linear disambiguation component and provides much richer representations theory. We measured the accuracy of both systems against a gold standard of the PARC 700 dependency bank, and also measured their processing times. We found the deep-parsing system to be more accurate than the Collins parser with only a slight reduction in parsing speed.", "keyphrases": ["such parser", "parc", "reduction"]} +{"id": "gemechu-reed-2019-decompositional", "title": "Decompositional Argument Mining: A General Purpose Approach for Argument Graph Construction", "abstract": "This work presents an approach decomposing propositions into four functional components and identify the patterns linking those components to determine argument structure. The entities addressed by a proposition are target concepts and the features selected to make a point about the target concepts are aspects. A line of reasoning is followed by providing evidence for the points made about the target concepts via aspects. Opinions on target concepts and opinions on aspects are used to support or attack the ideas expressed by target concepts and aspects. The relations between aspects, target concepts, opinions on target concepts and aspects are used to infer the argument relations. Propositions are connected iteratively to form a graph structure. The approach is generic in that it is not tuned for a specific corpus and evaluated on three different corpora from the literature: AAEC, AMT, US2016G1tv and achieved an F score of 0.79, 0.77 and 0.64, respectively.", "keyphrases": ["argument mining", "functional component", "opinion"]} +{"id": "xiong-etal-2018-session", "title": "Session-level Language Modeling for Conversational Speech", "abstract": "We propose to generalize language models for conversational speech recognition to allow them to operate across utterance boundaries and speaker changes, thereby capturing conversation-level phenomena such as adjacency pairs, lexical entrainment, and topical coherence. The model consists of a long-short-term memory (LSTM) recurrent network that reads the entire word-level history of a conversation, as well as information about turn taking and speaker overlap, in order to predict each next word. The model is applied in a rescoring framework, where the word history prior to the current utterance is approximated with preliminary recognition results. In experiments in the conversational telephone speech domain (Switchboard) we find that such a model gives substantial perplexity reductions over a standard LSTM-LM with utterance scope, as well as improvements in word error rate.", "keyphrases": ["language model", "conversation", "history"]} +{"id": "genzel-2010-automatically", "title": "Automatically Learning Source-side Reordering Rules for Large Scale Machine Translation", "abstract": "We describe an approach to automatically learn reordering rules to be applied as a preprocessing step in phrase-based machine translation. We learn rules for 8 different language pairs, showing BLEU improvements for all of them, and demonstrate that many important order transformations (SVO to SOV or VSO, head-modifier, verb movement) can be captured by this approach.", "keyphrases": ["machine translation", "pre-processing step", "position"]} +{"id": "hopkins-may-2013-models", "title": "Models of Translation Competitions", "abstract": "What do we want to learn from a translation competition and how do we learn it with confidence? We argue that a disproportionate focus on ranking competition participants has led to lots of different rankings, but little insight about which rankings we should trust. In response, we provide the first framework that allows an empirical comparison of different analyses of competition results. We then use this framework to compare several analytical models on data from the Workshop on Machine Translation (WMT).", "keyphrases": ["ranking", "workshop", "wmt"]} +{"id": "amancio-specia-2014-analysis", "title": "An Analysis of Crowdsourced Text Simplifications", "abstract": "We present a study on the text simplification operations undertaken collaboratively by Simple English Wikipedia contributors. The aim is to understand whether a complex-simple parallel corpus involving this version of Wikipedia is appropriate as data source to induce simplification rules, and whether we can automatically categorise the different operations performed by humans. A subset of the corpus was first manually analysed to identify its transformation operations. We then built machine learning models to attempt to automatically classify segments based on such transformations. This classification could be used, e.g., to filter out potentially noisy transformations. Our results show that the most common transformation operations performed by humans are paraphrasing (39.80%) and drop of information (26.76%), which are some of the most difficult operations to generalise from data. They are also the most difficult operations to identify automatically, with the lowest overall classifier accuracy among all operations (73% and 59%, respectively).", "keyphrases": ["text simplification", "wikipedia", "reason"]} +{"id": "sassano-2004-linear", "title": "Linear-Time Dependency Analysis for Japanese", "abstract": "We present a novel algorithm for Japanese dependency analysis. The algorithm allows us to analyze dependency structures of a sentence in linear-time while keeping a state-of-the-art accuracy. In this paper, we show a formal description of the algorithm and discuss it theoretically with respect to time complexity. In addition, we evaluate its efficiency and performance empirically against the Kyoto University Corpus. The proposed algorithm with improved models for dependency yields the best accuracy in the previously published results on the Kyoto University Corpus.", "keyphrases": ["dependency analyzer", "coordinate structure", "sassano"]} +{"id": "laddha-mukherjee-2016-extracting", "title": "Extracting Aspect Specific Opinion Expressions", "abstract": "Opinionated expression extraction is a central problem in \ufb01ne-grained sentiment analysis. Most existing works focus on either generic subjective expression or aspect expression extraction. However, in opinion mining, it is often desirable to mine the aspect speci\ufb01c opinion expressions (or aspect-sentiment phrases) containing both the aspect and the opinion. This paper proposes a hybrid generative-discriminative framework for extracting such expressions. The hybrid model consists of (i) an unsupervised generative component for modeling the semantic coherence of terms (words/phrases) based on their collocations across different documents, and (ii) a supervised discriminative sequence modeling component for opinion phrase extraction. Experimental results using Amazon.com reviews demonstrate the effectiveness of the approach that signi\ufb01cantly outperforms several state-of-the-art baselines.", "keyphrases": ["aspect term", "asc task", "position"]} +{"id": "dehouck-etal-2020-efficient", "title": "Efficient EUD Parsing", "abstract": "We present the system submission from the FASTPARSE team for the EUD Shared Task at IWPT 2020. We engaged with the task by focusing on efficiency. For this we considered training costs and inference efficiency. Our models are a combination of distilled neural dependency parsers and a rule-based system that projects UD trees into EUD graphs. We obtained an average ELAS of 74.04 for our official submission, ranking 4th overall.", "keyphrases": ["dependency parser", "rule-based system", "eud graph"]} +{"id": "yao-etal-2021-adapt", "title": "Adapt-and-Distill: Developing Small, Fast and Effective Pretrained Language Models for Domains", "abstract": "Large pre-trained models have achieved great success in many natural language processing tasks. However, when they are applied in specific domains, these models suffer from domain shift and bring challenges in fine-tuning and online serving for latency and capacity constraints. In this paper, we present a general approach to developing small, fast and effective pre-trained models for specific domains. This is achieved by adapting the off-the-shelf general pre-trained models and performing task-agnostic knowledge distillation in target domains. Specifically, we propose domain-specific vocabulary expansion in the adaptation stage and employ corpus level occurrence probability to choose the size of incremental vocabulary automatically. Then we systematically explore different strategies to compress the large pre-trained models for specific domains. We conduct our experiments in the biomedical and computer science domain. The experimental results demonstrate that our approach achieves better performance over the BERT BASE model in domain-specific tasks while 3.3x smaller and 5.1x faster than BERT BASE. The code and pre-trained models are available at https://aka.ms/adalm.", "keyphrases": ["adapt language model", "domain-specific downstream task", "performance improvement"]} +{"id": "prabhu-etal-2019-sampling", "title": "Sampling Bias in Deep Active Classification: An Empirical Study", "abstract": "The exploding cost and time needed for data labeling and model training are bottlenecks for training DNN models on large datasets. Identifying smaller representative data samples with strategies like active learning can help mitigate such bottlenecks. Previous works on active learning in NLP identify the problem of sampling bias in the samples acquired by uncertainty-based querying and develop costly approaches to address it. Using a large empirical study, we demonstrate that active set selection using the posterior entropy of deep models like FastText.zip (FTZ) is robust to sampling biases and to various algorithmic choices (query size and strategies) unlike that suggested by traditional literature. We also show that FTZ based query strategy produces sample sets similar to those from more sophisticated approaches (e.g ensemble networks). Finally, we show the effectiveness of the selected samples by creating tiny high-quality datasets, and utilizing them for fast and cheap training of large models. Based on the above, we propose a simple baseline for deep active text classification that outperforms the state of the art. We expect the presented work to be useful and informative for dataset compression and for problems involving active, semi-supervised or online learning scenarios. Code and models are available at: .", "keyphrases": ["active learning", "text classification", "sample"]} +{"id": "simon-etal-2019-unsupervised", "title": "Unsupervised Information Extraction: Regularizing Discriminative Approaches with Relation Distribution Losses", "abstract": "Unsupervised relation extraction aims at extracting relations between entities in text. Previous unsupervised approaches are either generative or discriminative. In a supervised setting, discriminative approaches, such as deep neural network classifiers, have demonstrated substantial improvement. However, these models are hard to train without supervision, and the currently proposed solutions are unstable. To overcome this limitation, we introduce a skewness loss which encourages the classifier to predict a relation with confidence given a sentence, and a distribution distance loss enforcing that all relations are predicted in average. These losses improve the performance of discriminative based models, and enable us to train deep neural networks satisfactorily, surpassing current state of the art on three different datasets.", "keyphrases": ["unsupervised relation extraction", "substantial improvement", "skewness loss", "openre model", "instability"]} +{"id": "zhang-etal-2003-hhmm", "title": "HHMM-based Chinese Lexical Analyzer ICTCLAS", "abstract": "This document presents the results from Inst. of Computing Tech., CAS in the ACL SIGHAN-sponsored First International Chinese Word Segmentation Bake-off. The authors introduce the unified HHMM-based frame of our Chinese lexical analyzer ICTCLAS and explain the operation of the six tracks. Then provide the evaluation results and give more analysis. Evaluation on ICTCLAS shows that its performance is competitive. Compared with other system, ICTCLAS has ranked top both in CTB and PK closed track. In PK open track, it ranks second position. ICTCLAS BIG5 version was transformed from GB version only in two days; however, it achieved well in two BIG5 closed tracks. Through the first bakeoff, we could learn more about the development in Chinese word segmentation and become more confident on our HHMM-based approach. At the same time, we really find our problems during the evaluation. The bakeoff is interesting and helpful.", "keyphrases": ["analyzer ictclas", "chinese word segmentation", "word-based generative model"]} +{"id": "xu-rosti-2010-combining", "title": "Combining Unsupervised and Supervised Alignments for MT: An Empirical Study", "abstract": "Word alignment plays a central role in statistical MT (SMT) since almost all SMT systems extract translation rules from word aligned parallel training data. While most SMT systems use unsupervised algorithms (e.g. GIZA++) for training word alignment, supervised methods, which exploit a small amount of human-aligned data, have become increasingly popular recently. This work empirically studies the performance of these two classes of alignment algorithms and explores strategies to combine them to improve overall system performance. We used two unsupervised aligners, GIZA++ and HMM, and one supervised aligner, ITG, in this study. To avoid language and genre specific conclusions, we ran experiments on test sets consisting of two language pairs (Chinese-to-English and Arabic-to-English) and two genres (newswire and weblog). Results show that the two classes of algorithms achieve the same level of MT performance. Modest improvements were achieved by taking the union of the translation grammars extracted from different alignments. Significant improvements (around 1.0 in BLEU) were achieved by combining outputs of different systems trained with different alignments. The improvements are consistent across languages and genres.", "keyphrases": ["aligner", "giza++", "system performance", "union"]} +{"id": "lyu-etal-2019-semantic", "title": "Semantic Role Labeling with Iterative Structure Refinement", "abstract": "Modern state-of-the-art Semantic Role Labeling (SRL) methods rely on expressive sentence encoders (e.g., multi-layer LSTMs) but tend to model only local (if any) interactions between individual argument labeling decisions. This contrasts with earlier work and also with the intuition that the labels of individual arguments are strongly interdependent. We model interactions between argument labeling decisions through iterative refinement. Starting with an output produced by a factorized model, we iteratively refine it using a refinement network. Instead of modeling arbitrary interactions among roles and words, we encode prior knowledge about the SRL problem by designing a restricted network architecture capturing non-local interactions. This modeling choice prevents overfitting and results in an effective model, outperforming strong factorized baseline models on all 7 CoNLL-2009 languages, and achieving state-of-the-art results on 5 of them, including English.", "keyphrases": ["refinement", "srl", "argument labeling decision", "baseline model"]} +{"id": "li-etal-2019-learning", "title": "Learning to Rank for Plausible Plausibility", "abstract": "Researchers illustrate improvements in contextual encoding strategies via resultant performance on a battery of shared Natural Language Understanding (NLU) tasks. Many of these tasks are of a categorical prediction variety: given a conditioning context (e.g., an NLI premise), provide a label based on an associated prompt (e.g., an NLI hypothesis). The categorical nature of these tasks has led to common use of a cross entropy log-loss objective during training. We suggest this loss is intuitively wrong when applied to plausibility tasks, where the prompt by design is neither categorically entailed nor contradictory given the context. Log-loss naturally drives models to assign scores near 0.0 or 1.0, in contrast to our proposed use of a margin-based loss. Following a discussion of our intuition, we describe a confirmation study based on an extreme, synthetically curated task derived from MultiNLI. We find that a margin-based loss leads to a more plausible model of plausibility. Finally, we illustrate improvements on the Choice Of Plausible Alternative (COPA) task through this change in loss.", "keyphrases": ["objective", "plausibility task", "margin-based loss"]} +{"id": "branavan-etal-2008-learning", "title": "Learning Document-Level Semantic Properties from Free-Text Annotations", "abstract": "This paper presents a new method for inferring the semantic properties of documents by leveraging free-text keyphrase annotations. Such annotations are becoming increasingly abundant due to the recent dramatic growth in semi-structured, user-generated online content. One especially relevant domain is product reviews, which are often annotated by their authors with pros/cons keyphrases such as \"a real bargain\" or \"good value.\" These annotations are representative of the underlying semantic properties; however, unlike expert annotations, they are noisy: lay authors may use different labels to denote the same property, and some labels may be missing. To learn using such noisy annotations, we find a hidden paraphrase structure which clusters the keyphrases. The paraphrase structure is linked with a latent topic model of the review texts, enabling the system to predict the properties of unannotated documents and to effectively aggregate the semantic properties of multiple reviews. Our approach is implemented as a hierarchical Bayesian model with joint inference. We find that joint inference increases the robustness of the keyphrase clustering and encourages the latent topics to correlate with semantically meaningful properties. Multiple evaluations demonstrate that our model substantially outperforms alternative approaches for summarizing single and multiple documents into a set of semantically salient keyphrases.", "keyphrases": ["semantic property", "topic model", "user annotation", "category information"]} +{"id": "buys-blunsom-2017-robust", "title": "Robust Incremental Neural Semantic Graph Parsing", "abstract": "Parsing sentences to linguistically-expressive semantic representations is a key goal of Natural Language Processing. Yet statistical parsing has focussed almost exclusively on bilexical dependencies or domain-specific logical forms. We propose a neural encoder-decoder transition-based parser which is the first full-coverage semantic graph parser for Minimal Recursion Semantics (MRS). The model architecture uses stack-based embedding features, predicting graphs jointly with unlexicalized predicates and their token alignments. Our parser is more accurate than attention-based baselines on MRS, and on an additional Abstract Meaning Representation (AMR) benchmark, and GPU batch processing makes it an order of magnitude faster than a high-precision grammar-based parser. Further, the 86.69% Smatch score of our MRS parser is higher than the upper-bound on AMR parsing, making MRS an attractive choice as a semantic representation.", "keyphrases": ["semantic representation", "transition-based parser", "eds"]} +{"id": "cohen-smith-2009-shared", "title": "Shared Logistic Normal Distributions for Soft Parameter Tying in Unsupervised Grammar Induction", "abstract": "We present a family of priors over probabilistic grammar weights, called the shared logistic normal distribution. This family extends the partitioned logistic normal distribution, enabling factored covariance between the probabilities of different derivation events in the probabilistic grammar, providing a new way to encode prior knowledge about an unknown grammar. We describe a variational EM algorithm for learning a probabilistic grammar based on this family of priors. We then experiment with unsupervised dependency grammar induction and show significant improvements using our model for both monolingual learning and bilingual learning with a non-parallel, multilingual corpus.", "keyphrases": ["logistic normal distribution", "new way", "unsupervised parsing", "pos tag"]} +{"id": "ke-etal-2021-jointgt", "title": "JointGT: Graph-Text Joint Representation Learning for Text Generation from Knowledge Graphs", "abstract": "Existing pre-trained models for knowledge-graph-to-text (KG-to-text) generation simply fine-tune text-to-text pre-trained models such as BART or T5 on KG-to-text datasets, which largely ignore the graph structure during encoding and lack elaborate pre-training tasks to explicitly model graph-text alignments. To tackle these problems, we propose a graph-text joint representation learning model called JointGT. During encoding, we devise a structure-aware semantic aggregation module which is plugged into each Transformer layer to preserve the graph structure. Furthermore, we propose three new pre-training tasks to explicitly enhance the graph-text alignment including respective text / graph reconstruction, and graph-text alignment in the embedding space via Optimal Transport. Experiments show that JointGT obtains new state-of-the-art performance on various KG-to-text datasets.", "keyphrases": ["pre-training task", "graph-to-text generation", "structure-aware graph encoding"]} +{"id": "jha-etal-2010-corpus", "title": "Corpus Creation for New Genres: A Crowdsourced Approach to PP Attachment", "abstract": "This paper explores the task of building an accurate prepositional phrase attachment corpus for new genres while avoiding a large investment in terms of time and money by crowd-sourcing judgments. We develop and present a system to extract prepositional phrases and their potential attachments from ungrammatical and informal sentences and pose the subsequent disambiguation tasks as multiple choice questions to workers from Amazon's Mechanical Turk service. Our analysis shows that this two-step approach is capable of producing reliable annotations on informal and potentially noisy blog text, and this semi-automated strategy holds promise for similar annotation projects in new genres.", "keyphrases": ["phrase attachment corpus", "amazon", "crowdsourcing service"]} +{"id": "oh-etal-2021-surprisal", "title": "Surprisal Estimators for Human Reading Times Need Character Models", "abstract": "While the use of character models has been popular in NLP applications, it has not been explored much in the context of psycholinguistic modeling. This paper presents a character model that can be applied to a structural parser-based processing model to calculate word generation probabilities. Experimental results show that surprisal estimates from a structural processing model using this character model deliver substantially better fits to self-paced reading, eye-tracking, and fMRI data than those from large-scale language models trained on much more data. This may suggest that the proposed processing model provides a more humanlike account of sentence processing, which assumes a larger role of morphology, phonotactics, and orthographic complexity than was previously thought.", "keyphrases": ["self-paced reading", "sentence processing", "morphology", "orthographic complexity", "large role"]} +{"id": "kim-baldwin-2006-automatic", "title": "Automatic Identification of English Verb Particle Constructions using Linguistic Features", "abstract": "This paper presents a method for identifying token instances of verb particle constructions (VPCs) automatically, based on the output of the RASP parser. The proposed method pools together instances of VPCs and verb-PPs from the parser output and uses the sentential context of each such instance to differentiate VPCs from verb-PPs. We show our technique to perform at an F-score of 97.4% at identifying VPCs in Wall Street Journal and Brown Corpus data taken from the Penn Tree-bank.", "keyphrases": ["sentential context", "verb-particle construction", "semantic information"]} +{"id": "ma-etal-2014-punctuation", "title": "Punctuation Processing for Projective Dependency Parsing", "abstract": "Modern statistical dependency parsers as- sign lexical heads to punctuations as well as words. Punctuation parsing errors lead to low parsing accuracy on words. In this work, we propose an alternative approach to addressing punctuation in dependency parsing. Rather than assigning lexical heads to punctuations, we treat punctu- ations as properties of their neighbour- ing words, used as features to guide the parser to build the dependency graph. In- tegrating our method with an arc-standard parser yields a 93.06% unlabelled attach- ment score, which is the best accuracy by a single-model transition-based parser re- ported so far.", "keyphrases": ["transition-based parser", "punctuation", "neighboring word"]} +{"id": "rotman-reichart-2019-deep", "title": "Deep Contextualized Self-training for Low Resource Dependency Parsing", "abstract": "Neural dependency parsing has proven very effective, achieving state-of-the-art results on numerous domains and languages. Unfortunately, it requires large amounts of labeled data, which is costly and laborious to create. In this paper we propose a self-training algorithm that alleviates this annotation bottleneck by training a parser on its own output. Our Deep Contextualized Self-training (DCST) algorithm utilizes representation models trained on sequence labeling tasks that are derived from the parser's output when applied to unlabeled data, and integrates these models with the base parser through a gating mechanism. We conduct experiments across multiple languages, both in low resource in-domain and in cross-domain setups, and demonstrate that DCST substantially outperforms traditional self-training as well as recent semi-supervised training methods.1", "keyphrases": ["sequence labeling task", "base parser", "deep contextualized self-training"]} +{"id": "dras-2015-squibs", "title": "Squibs: Evaluating Human Pairwise Preference Judgments", "abstract": "Human evaluation plays an important role in NLP, often in the form of preference judgments. Although there has been some use of classical non-parametric and bespoke approaches to evaluating these sorts of judgments, there is an entire body of work on this in the context of sensory discrimination testing and the human judgments that are central to it, backed by rigorous statistical theory and freely available software, that NLP can draw on. We investigate one approach, Log-Linear Bradley-Terry models, and apply it to sample NLP data.", "keyphrases": ["formulation", "irt", "bayesian model"]} +{"id": "oflazer-durgar-el-kahlout-2007-exploring", "title": "Exploring Different Representational Units in English-to-Turkish Statistical Machine Translation", "abstract": "We investigate different representational granularities for sub-lexical representation in statistical machine translation work from English to Turkish. We find that (i) representing both Turkish and English at the morpheme-level but with some selective morpheme-grouping on the Turkish side of the training data, (ii) augmenting the training data with \"sentences\" comprising only the content words of the original training data to bias root word alignment, (iii) reranking the n-best morpheme-sequence outputs of the decoder with a word-based language model, and (iv) using model iteration all provide a non-trivial improvement over a fully word-based baseline. Despite our very limited training data, we improve from 20.22 BLEU points for our simplest model to 25.08 BLEU points for an improvement of 4.86 points or 24% relative.", "keyphrases": ["sub-lexical representation", "machine translation work", "spe"]} +{"id": "babko-malaya-etal-2004-proposition", "title": "Proposition Bank II: Delving Deeper", "abstract": "The PropBank project is creating a corpus of text annotated with information about basic semantic propositions. PropBank I (Kingsbury & Palmer, 2002) added a layer of predicateargument information, or semantic roles, to the syntactic structures of the English Penn Treebank. This paper presents an overview of the second phase of PropBank Annotation, PropBank II, which is being applied to English and Chinese, and includes (Neodavidsonian) eventuality variables, nominal references, sense tagging, and connections to the Penn Discourse Treebank (PDTB), a project for annotating discourse connectives and their arguments.", "keyphrases": ["propbank", "project", "discourse connective"]} +{"id": "wu-etal-2010-complexity", "title": "Complexity Metrics in an Incremental Right-Corner Parser", "abstract": "Hierarchical HMM (HHMM) parsers make promising cognitive models: while they use a bounded model of working memory and pursue incremental hypotheses in parallel, they still achieve parsing accuracies competitive with chart-based techniques. This paper aims to validate that a right-corner HHMM parser is also able to produce complexity metrics, which quantify a reader's incremental difficulty in understanding a sentence. Besides defining standard metrics in the HHMM framework, a new metric, embedding difference, is also proposed, which tests the hypothesis that HHMM store elements represents syntactic working memory. Results show that HHMM surprisal outperforms all other evaluated metrics in predicting reading times, and that embedding difference makes a significant, independent contribution.", "keyphrases": ["complexity metric", "syntactic structure", "latency"]} +{"id": "zhuo-etal-2016-segment", "title": "Segment-Level Sequence Modeling using Gated Recursive Semi-Markov Conditional Random Fields", "abstract": "Most of the sequence tagging tasks in natural language processing require to recognize segments with certain syntactic role or semantic meaning in a sentence. They are usually tackled with Conditional Random Fields (CRFs), which do indirect word-level modeling over word-level features and thus cannot make full use of segment-level information. Semi-Markov Conditional Random Fields (Semi-CRFs) model segments directly but extracting segment-level features for Semi-CRFs is still a very challenging problem. This paper presents Gated Recursive Semi-CRFs (grSemi-CRFs), which model segments directly and automatically learn segmentlevel features through a gated recursive convolutional neural network. Our experiments on text chunking and named entity recognition (NER) demonstrate that grSemi-CRFs generally outperform other neural models.", "keyphrases": ["modeling", "semi-crfs", "segment-level feature", "entity recognition"]} +{"id": "braud-etal-2017-syntax", "title": "Does syntax help discourse segmentation? Not so much", "abstract": "Discourse segmentation is the first step in building discourse parsers. Most work on discourse segmentation does not scale to real-world discourse parsing across languages, for two reasons: (i) models rely on constituent trees, and (ii) experiments have relied on gold standard identification of sentence and token boundaries. We therefore investigate to what extent constituents can be replaced with universal dependencies, or left out completely, as well as how state-of-the-art segmenters fare in the absence of sentence boundaries. Our results show that dependency information is less useful than expected, but we provide a fully scalable, robust model that only relies on part-of-speech information, and show that it performs well across languages in the absence of any gold-standard annotation.", "keyphrases": ["discourse segmentation", "dependency information", "part-of-speech information", "pos tag"]} +{"id": "mackay-kondrak-2005-computing", "title": "Computing Word Similarity and Identifying Cognates with Pair Hidden Markov Models", "abstract": "We present a system for computing similarity between pairs of words. Our system is based on Pair Hidden Markov Models, a variation on Hidden Markov Models that has been used successfully for the alignment of biological sequences. The parameters of the model are automatically learned from training data that consists of word pairs known to be similar. Our tests focus on the identification of cognates --- words of common origin in related languages. The results show that our system outperforms previously proposed techniques.", "keyphrases": ["cognate", "identification", "language study"]} +{"id": "dehdari-etal-2016-bira", "title": "BIRA: Improved Predictive Exchange Word Clustering", "abstract": "Word clusters are useful for many NLP tasks including training neural network language models, but current increases in datasets are outpacing the ability of word clusterers to handle them. Little attention has been paid thus far on inducing high-quality word clusters at a large scale. The predictive exchange algorithm is quite scalable, but sometimes does not provide as good perplexity as other slower clustering algorithms. We introduce the bidirectional, interpolated, refining, and alternating (BIRA) predictive exchange algorithm. It improves upon the predictive exchange algorithm\u2019s perplexity by up to 18%, giving it perplexities comparable to the slower two-sided exchange algorithm, and better perplexities than the slower Brown clustering algorithm. Our BIRA implementation is fast, clustering a 2.5 billion token English News Crawl corpus in 3 hours. It also reduces machine translation training time while preserving translation quality. Our implementation is portable and freely available.", "keyphrases": ["cluster", "predictive exchange algorithm", "refining"]} +{"id": "zhang-chan-2009-dependency", "title": "Dependency Parsing with Energy-based Reinforcement Learning", "abstract": "We present a model which integrates dependency parsing with reinforcement learning based on Markov decision process. At each time step, a transition is picked up to construct the dependency tree in terms of the long-run reward. The optimal policy for choosing transitions can be found with the SARSA algorithm. In SARSA, an approximation of the state-action function can be obtained by calculating the negative free energies for the Restricted Boltzmann Machine. The experimental results on CoNLL-X multilingual data show that the proposed model achieves comparable results with the current state-of-the-art methods.", "keyphrases": ["reinforcement learning", "optimal policy", "restricted boltzmann machine"]} +{"id": "reddy-knight-2011-unsupervised", "title": "Unsupervised Discovery of Rhyme Schemes", "abstract": "This paper describes an unsupervised, language-independent model for finding rhyme schemes in poetry, using no prior knowledge about rhyme or pronunciation.", "keyphrases": ["rhyme scheme", "poetry", "pronunciation"]} +{"id": "liu-etal-2018-narrative", "title": "Narrative Modeling with Memory Chains and Semantic Supervision", "abstract": "Story comprehension requires a deep semantic understanding of the narrative, making it a challenging task. Inspired by previous studies on ROC Story Cloze Test, we propose a novel method, tracking various semantic aspects with external neural memory chains while encouraging each to focus on a particular semantic aspect. Evaluated on the task of story ending prediction, our model demonstrates superior performance to a collection of competitive baselines, setting a new state of the art.", "keyphrases": ["story", "semantic aspect", "event sequence"]} +{"id": "furstenau-lapata-2009-graph", "title": "Graph Alignment for Semi-Supervised Semantic Role Labeling", "abstract": "Unknown lexical items present a major obstacle to the development of broad-coverage semantic role labeling systems. We address this problem with a semi-supervised learning approach which acquires training instances for unseen verbs from an unlabeled corpus. Our method relies on the hypothesis that unknown lexical items will be structurally and semantically similar to known items for which annotations are available. Accordingly, we represent known and unknown sentences as graphs, formalize the search for the most similar verb as a graph alignment problem and solve the optimization using integer linear programming. Experimental results show that role labeling performance for unknown lexical items improves with training data produced automatically by our method.", "keyphrases": ["semi-supervised learning approach", "unseen verb", "fu\u0308rstenau"]} +{"id": "hazarika-etal-2018-icon", "title": "ICON: Interactive Conversational Memory Network for Multimodal Emotion Detection", "abstract": "Emotion recognition in conversations is crucial for building empathetic machines. Present works in this domain do not explicitly consider the inter-personal influences that thrive in the emotional dynamics of dialogues. To this end, we propose Interactive COnversational memory Network (ICON), a multimodal emotion detection framework that extracts multimodal features from conversational videos and hierarchically models the self- and inter-speaker emotional influences into global memories. Such memories generate contextual summaries which aid in predicting the emotional orientation of utterance-videos. Our model outperforms state-of-the-art networks on multiple classification and regression tasks in two benchmark datasets.", "keyphrases": ["emotion recognition", "conversational video", "inter-speaker emotional influence"]} +{"id": "eskander-etal-2013-processing", "title": "Processing Spontaneous Orthography", "abstract": "In cases in which there is no standard orthography for a language or language variant, written texts will display a variety of orthographic choices. This is problematic for natural language processing (NLP) because it creates spurious data sparseness. We study the transformation of spontaneously spelled Egyptian Arabic into a conventionalized orthography which we have previously proposed for NLP purposes. We show that a two-stage process can reduce divergences from this standard by 69%, making subsequent processing of Egyptian Arabic easier.", "keyphrases": ["orthography", "script", "arabic script"]} +{"id": "singh-etal-2010-constraint", "title": "Constraint-Driven Rank-Based Learning for Information Extraction", "abstract": "Most learning algorithms for undirected graphical models require complete inference over at least one instance before parameter updates can be made. SampleRank is a rank-based learning framework that alleviates this problem by updating the parameters during inference. Most semi-supervised learning algorithms also perform full inference on at least one instance before each parameter update. We extend SampleRank to semi-supervised learning in order to circumvent this computational bottleneck. Different approaches to incorporate unlabeled data and prior knowledge into this framework are explored. When evaluated on a standard information extraction dataset, our method significantly outperforms the supervised method, and matches results of a competing state-of-the-art semi-supervised learning approach.", "keyphrases": ["learning algorithm", "samplerank", "unlabeled data"]} +{"id": "pater-etal-2012-learning", "title": "Learning probabilities over underlying representations", "abstract": "We show that a class of cases that has been previously studied in terms of learning of abstract phonological underlying representations (URs) can be handled by a learner that chooses URs from a contextually conditioned distribution over observed surface representations. We implement such a learner in a Maximum Entropy version of Optimality Theory, in which UR learning is an instance of semi-supervised learning. Our objective function incorporates a term aimed to ensure generalization, independently required for phonotactic learning in Optimality Theory, and does not have a bias for single URs for morphemes. This learner is successful on a test language provided by Tesar (2006) as a challenge for UR learning. We also provide successful results on learning of a toy case modeled on French vowel alternations, which have also been previously analyzed in terms of abstract URs. This case includes lexically conditioned variation, an aspect of the data that cannot be handled by abstract URs, showing that in this respect our approach is more general.", "keyphrases": ["urs", "learner", "morpheme", "underlying form"]} +{"id": "chuang-yeh-2005-aligning", "title": "Aligning Parallel Bilingual Corpora Statistically with Punctuation Criteria", "abstract": "We present a new approach to aligning sentences in bilingual parallel corpora based on punctuation, especially for English and Chinese. Although the length-based approach produces high accuracy rates of sentence alignment for clean parallel corpora written in two Western languages, such as French-English or German-English, it does not work as well for parallel corpora that are noisy or written in two disparate languages such as Chinese-English. It is possible to use cognates on top of the length-based approach to increase the alignment accuracy. However, cognates do not exist between two disparate languages, which limit the applicability of the cognate-based approach. In this paper, we examine the feasibility of exploiting the statistically ordered matching of punctuation marks in two languages to achieve high accuracy sentence alignment. We have experimented with an implementation of the proposed method on parallel corpora, the Chinese-English Sinorama Magazine Corpus and Scientific American Magazine articles, with satisfactory results. Compared with the length-based method, the proposed method exhibits better precision rates based on our experimental reuslts. Highly promising improvement was observed when both the punctuation-based and length-based methods were adopted within a common statistical framework. We also demonstrate that the method can be applied to other language pairs, such as English-Japanese, with minimal additional effort.", "keyphrases": ["chinese", "sentence alignment", "punctuation mark"]} +{"id": "kim-etal-2018-efficient", "title": "Efficient Large-Scale Neural Domain Classification with Personalized Attention", "abstract": "In this paper, we explore the task of mapping spoken language utterances to one of thousands of natural language understanding domains in intelligent personal digital assistants (IPDAs). This scenario is observed in mainstream IPDAs in industry that allow third parties to develop thousands of new domains to augment built-in first party domains to rapidly increase domain coverage and overall IPDA capabilities. We propose a scalable neural model architecture with a shared encoder, a novel attention mechanism that incorporates personalization information and domain-specific classifiers that solves the problem efficiently. Our architecture is designed to efficiently accommodate incremental domain additions achieving two orders of magnitude speed up compared to full model retraining. We consider the practical constraints of real-time production systems, and design to minimize memory footprint and runtime latency. We demonstrate that incorporating personalization significantly improves domain classification accuracy in a setting with thousands of overlapping domains.", "keyphrases": ["personalization", "new domain", "domain classification accuracy"]} +{"id": "gupta-etal-2015-reval", "title": "ReVal: A Simple and Effective Machine Translation Evaluation Metric Based on Recurrent Neural Networks", "abstract": "Many state-of-the-art Machine Translation (MT) evaluation metrics are complex, involve extensive external resources (e.g. for paraphrasing) and require tuning to achieve best results. We present a simple alternative approach based on dense vector spaces and recurrent neural networks (RNNs), in particular Long Short Term Memory (LSTM) networks. ForWMT-14, our new metric scores best for two out of five language pairs, and overall best and second best on all language pairs, using Spearman and Pearson correlation, respectively. We also show how training data is computed automatically from WMT ranks data.", "keyphrases": ["evaluation metric", "tree-lstm", "reference translation"]} +{"id": "lo-etal-2018-accurate", "title": "Accurate semantic textual similarity for cleaning noisy parallel corpora using semantic machine translation evaluation metric: The NRC supervised submissions to the Parallel Corpus Filtering task", "abstract": "We present our semantic textual similarity approach in filtering a noisy web crawled parallel corpus using YiSi\u2014a novel semantic machine translation evaluation metric. The systems mainly based on this supervised approach perform well in the WMT18 Parallel Corpus Filtering shared task (4th place in 100-million-word evaluation, 8th place in 10-million-word evaluation, and 6th place overall, out of 48 submissions). In fact, our best performing system\u2014NRC-yisi-bicov is one of the only four submissions ranked top 10 in both evaluations. Our submitted systems also include some initial filtering steps for scaling down the size of the test corpus and a final redundancy removal step for better semantic and token coverage of the filtered corpus. In this paper, we also describe our unsuccessful attempt in automatically synthesizing a noisy parallel development corpus for tuning the weights to combine different parallelism and fluency features.", "keyphrases": ["parallel corpus", "textual similarity approach", "noisy web"]} +{"id": "renduchintala-etal-2016-creating", "title": "Creating Interactive Macaronic Interfaces for Language Learning", "abstract": "We present a prototype of a novel technology for second language instruction. Our learn-by-reading approach lets a human learner acquire new words and constructions by encountering them in context. To facilitate reading comprehension, our technology presents mixed native language (L1) and second language (L2) sentences to a learner and allows them to interact with the sentences to make the sentences easier (more L1-like) or harder (more L2-like) to read. Eventually, our system should continuously track a learner\u2019s knowledge and learning style by modeling their interactions, including performance on a pop quiz feature. This will allow our system to generate personalized mixed-language texts for learners.", "keyphrases": ["language instruction", "learner", "user interface"]} +{"id": "johny-etal-2021-finite", "title": "Finite-state script normalization and processing utilities: The Nisaba Brahmic library", "abstract": "This paper presents an open-source library for efficient low-level processing of ten major South Asian Brahmic scripts. The library provides a flexible and extensible framework for supporting crucial operations on Brahmic scripts, such as NFC, visual normalization, reversible transliteration, and validity checks, implemented in Python within a finite-state transducer formalism. We survey some common Brahmic script issues that may adversely affect the performance of downstream NLP tasks, and provide the rationale for finite-state design and system implementation details.", "keyphrases": ["normalization", "open-source library", "brahmic script"]} +{"id": "castelli-etal-2020-techqa", "title": "The TechQA Dataset", "abstract": "We introduce TECHQA, a domain-adaptation question answering dataset for the technical support domain. The TECHQA corpus highlights two real-world issues from the automated customer support domain. First, it contains actual questions posed by users on a technical forum, rather than questions generated specifically for a competition or a task. Second, it has a real-world size \u2013 600 training, 310 dev, and 490 evaluation question/answer pairs \u2013 thus reflecting the cost of creating large labeled datasets with actual data. Hence, TECHQA is meant to stimulate research in domain adaptation rather than as a resource to build QA systems from scratch. TECHQA was obtained by crawling the IBMDeveloper and DeveloperWorks forums for questions with accepted answers provided in an IBM Technote\u2014a technical document that addresses a specific technical issue. We also release a collection of the 801,998 Technotes available on the web as of April 4, 2019 as a companion resource that can be used to learn representations of the IT domain language.", "keyphrases": ["support domain", "actual question", "real-world size"]} +{"id": "wang-etal-2020-learning-efficient", "title": "Learning Efficient Dialogue Policy from Demonstrations through Shaping", "abstract": "Training a task-oriented dialogue agent with reinforcement learning is prohibitively expensive since it requires a large volume of interactions with users. Human demonstrations can be used to accelerate learning progress. However, how to effectively leverage demonstrations to learn dialogue policy remains less explored. In this paper, we present S2Agent that efficiently learns dialogue policy from demonstrations through policy shaping and reward shaping. We use an imitation model to distill knowledge from demonstrations, based on which policy shaping estimates feedback on how the agent should act in policy space. Reward shaping is then incorporated to bonus state-actions similar to demonstrations explicitly in value space encouraging better exploration. The effectiveness of the proposed S2Agentt is demonstrated in three dialogue domains and a challenging domain adaptation task with both user simulator evaluation and human evaluation.", "keyphrases": ["dialogue policy", "reward shaping", "imitation model"]} +{"id": "feng-etal-2012-characterizing", "title": "Characterizing Stylistic Elements in Syntactic Structure", "abstract": "Much of the writing styles recognized in rhetorical and composition theories involve deep syntactic elements. However, most previous research for computational stylometric analysis has relied on shallow lexico-syntactic patterns. Some very recent work has shown that PCFG models can detect distributional difference in syntactic styles, but without offering much insights into exactly what constitute salient stylistic elements in sentence structure characterizing each authorship. In this paper, we present a comprehensive exploration of syntactic elements in writing styles, with particular emphasis on interpretable characterization of stylistic elements. We present analytic insights with respect to the authorship attribution task in two different domains.", "keyphrases": ["style", "previous research", "stylometric analysis"]} +{"id": "dai-etal-2018-fine", "title": "Fine-grained Structure-based News Genre Categorization", "abstract": "Journalists usually organize and present the contents of a news article following a well-defined structure. In this work, we propose a new task to categorize news articles based on their content presentation structures, which is beneficial for various NLP applications. We first define a small set of news elements considering their functions (e.g., introducing the main story or event, catching the reader's attention and providing details) in a news story and their writing style (narrative or expository), and then formally define four commonly used news article structures based on their selections and organizations of news elements. We create an annotated dataset for structure-based news genre identification, and finally, we build a predictive model to assess the feasibility of this classification task using structure indicative features.", "keyphrases": ["small set", "news element", "indicative feature"]} +{"id": "recski-2016-building", "title": "Building Concept Graphs from Monolingual Dictionary Entries", "abstract": "We present the dict_to_4lang tool for processing entries of three monolingual dictionaries of English and mapping definitions to concept graphs following the 4lang principles of semantic representation introduced by (Kornai, 2010). 4lang representations are domain- and language-independent, and make use of only a very limited set of primitives to encode the meaning of all utterances. Our pipeline relies on the Stanford Dependency Parser for syntactic analysis, the dep to 4lang module then builds directed graphs of concepts based on dependency relations between words in each definition. Several issues are handled by construction-specific rules that are applied to the output of dep_to_4lang. Manual evaluation suggests that ca. 75% of graphs built from the Longman Dictionary are either entirely correct or contain only minor errors. dict_to_4lang is available under an MIT license as part of the 4lang library and has been used successfully in measuring Semantic Textual Similarity (Recski and \u00c1cs, 2015). An interactive demo of core 4lang functionalities is available at .", "keyphrases": ["definition", "dependency relation", "2-edge"]} +{"id": "kato-etal-2018-construction", "title": "Construction of Large-scale English Verbal Multiword Expression Annotated Corpus", "abstract": "Multiword expressions (MWEs) consist of groups of tokens, which should be treated as a single syntactic or semantic unit. In this work, we focus on verbal MWEs (VMWEs), whose accurate recognition is challenging because they could be discontinuous (e.g., take .. off). Since previous English VMWE annotations are relatively small-scale in terms of VMWE occurrences and types, we conduct large-scale annotations of VMWEs on the Wall Street Journal portion of English Ontonotes by a combination of automatic annotations and crowdsourcing. Concretely, we first construct a VMWE dictionary based on the English-language Wiktionary. After that, we collect possible VMWE occurrences in Ontonotes and filter candidates with the help of gold dependency trees, then we formalize VMWE annotations as a multiword sense disambiguation problem to exploit crowdsourcing. As a result, we annotate 7,833 VMWE instances belonging to various categories, such as phrasal verbs, light verb constructions, and semi-fixed VMWEs. We hope this large-scale VMWE-annotated resource helps to develop models for MWE recognition and dependency parsing that are aware of English MWEs. Our resource is publicly available.", "keyphrases": ["multiword expression", "verbal mwe", "automatic annotation", "verb construction"]} +{"id": "laubli-etal-2013-statistical", "title": "Statistical Machine Translation for Automobile Marketing Texts", "abstract": "We describe a project on introducing an in-house statistical machine translation system for marketing texts from the automobile industry with the final aim of replacing manual translation with post-editing, based on the translation system. The focus of the paper is the suitability of such texts for SMT; we present experiments in domain adaptation and decompounding that improve the baseline translation systems, the results of which are evaluated using automatic metrics as well as manual evaluation.", "keyphrases": ["automobile marketing", "solution text", "topical domain", "film subtitling"]} +{"id": "krone-etal-2020-learning", "title": "Learning to Classify Intents and Slot Labels Given a Handful of Examples", "abstract": "Intent classification (IC) and slot filling (SF) are core components in most goal-oriented dialogue systems. Current IC/SF models perform poorly when the number of training examples per class is small. We propose a new few-shot learning task, few-shot IC/SF, to study and improve the performance of IC and SF models on classes not seen at training time in ultra low resource scenarios. We establish a few-shot IC/SF benchmark by defining few-shot splits for three public IC/SF datasets, ATIS, TOP, and Snips. We show that two popular few-shot learning algorithms, model agnostic meta learning (MAML) and prototypical networks, outperform a fine-tuning baseline on this benchmark. Prototypical networks achieves significant gains in IC performance on the ATIS and TOP datasets, while both prototypical networks and MAML outperform the baseline with respect to SF on all three datasets. In addition, we demonstrate that joint training as well as the use of pre-trained language models, ELMo and BERT in our case, are complementary to these few-shot learning methods and yield further gains.", "keyphrases": ["intent", "training example", "maml", "few-shot learning method"]} +{"id": "takamura-etal-2016-discriminative", "title": "Discriminative Analysis of Linguistic Features for Typological Study", "abstract": "We address the task of automatically estimating the missing values of linguistic features by making use of the fact that some linguistic features in typological databases are informative to each other. The questions to address in this work are (i) how much predictive power do features have on the value of another feature? (ii) to what extent can we attribute this predictive power to genealogical or areal factors, as opposed to being provided by tendencies or implicational universals? To address these questions, we conduct a discriminative or predictive analysis on the typological database. Specifically, we use a machine-learning classifier to estimate the value of each feature of each language using the values of the other features, under different choices of training data: all the other languages, or all the other languages except for the ones having the same origin or area with the target language.", "keyphrases": ["predictive power", "implicational universal", "other feature"]} +{"id": "mota-grishman-2008-ne", "title": "Is this NE tagger getting old?", "abstract": "This paper focuses on the influence of changing the text time frame on the performance of a named entity tagger. We followed a twofold approach to investigate this subject: on the one hand, we analyzed a corpus that spans 8 years, and, on the other hand, we assessed the performance of a name tagger trained and tested on that corpus. We created 8 samples from the corpus, each drawn from the articles for a particular year. In terms of corpus analysis, we calculated the corpus similarity and names shared between samples. To see the effect on tagger performance, we implemented a semi-supervised name tagger based on co-training; then, we trained and tested our tagger on those samples. We observed that corpus similarity, names shared between samples, and tagger performance all decay as the time gap between the samples increases. Furthermore, we observed that the corpus similarity and names shared correlate with the tagger F-measure. These results show that named entity recognition systems may become obsolete in a short period of time.", "keyphrases": ["name tagger", "decay", "time gap"]} +{"id": "belinkov-etal-2020-linguistic", "title": "On the Linguistic Representational Power of Neural Machine Translation Models", "abstract": "Despite the recent success of deep neural networks in natural language processing and other spheres of artificial intelligence, their interpretability remains a challenge. We analyze the representations learned by neural machine translation (NMT) models at various levels of granularity and evaluate their quality through relevant extrinsic properties. In particular, we seek answers to the following questions: (i) How accurately is word structure captured within the learned representations, which is an important aspect in translating morphologically rich languages? (ii) Do the representations capture long-range dependencies, and effectively handle syntactically divergent languages? (iii) Do the representations capture lexical semantics? We conduct a thorough investigation along several parameters: (i) Which layers in the architecture capture each of these linguistic phenomena; (ii) How does the choice of translation unit (word, character, or subword unit) impact the linguistic properties captured by the underlying representations? (iii) Do the encoder and decoder learn differently and independently? (iv) Do the representations learned by multilingual NMT models capture the same amount of linguistic information as their bilingual counterparts? Our data-driven, quantitative evaluation illuminates important aspects in NMT models and their ability to capture various linguistic phenomena. We show that deep NMT models trained in an end-to-end fashion, without being provided any direct supervision during the training process, learn a non-trivial amount of linguistic information. Notable findings include the following observations: (i) Word morphology and part-of-speech information are captured at the lower layers of the model; (ii) In contrast, lexical semantics or non-local syntactic and semantic dependencies are better represented at the higher layers of the model; (iii) Representations learned using characters are more informed about word-morphology compared to those learned using subword units; and (iv) Representations learned by multilingual models are richer compared to bilingual models.", "keyphrases": ["relevant extrinsic property", "investigation", "linguistic information", "character-level representation"]} +{"id": "maharana-etal-2021-improving", "title": "Improving Generation and Evaluation of Visual Stories via Semantic Consistency", "abstract": "Story visualization is an underexplored task that falls at the intersection of many important research directions in both computer vision and natural language processing. In this task, given a series of natural language captions which compose a story, an agent must generate a sequence of images that correspond to the captions. Prior work has introduced recurrent generative models which outperform text-to-image synthesis models on this task. However, there is room for improvement of generated images in terms of visual quality, coherence and relevance. We present a number of improvements to prior modeling approaches, including (1) the addition of a dual learning framework that utilizes video captioning to reinforce the semantic alignment between the story and generated images, (2) a copy-transform mechanism for sequentially-consistent story visualization, and (3) MART-based transformers to model complex interactions between frames. We present ablation studies to demonstrate the effect of each of these techniques on the generative power of the model for both individual images as well as the entire narrative. Furthermore, due to the complexity and generative nature of the task, standard evaluation metrics do not accurately reflect performance. Therefore, we also provide an exploration of evaluation metrics for the model, focused on aspects of the generated frames such as the presence/quality of generated characters, the relevance to captions, and the diversity of the generated images. We also present correlation experiments of our proposed automated metrics with human evaluations.", "keyphrases": ["story visualization", "video captioning", "evaluation metric"]} +{"id": "kim-etal-2018-modeling", "title": "Modeling with Recurrent Neural Networks for Open Vocabulary Slots", "abstract": "Dealing with `open-vocabulary' slots has been among the challenges in the natural language area. While recent studies on attention-based recurrent neural network (RNN) models have performed well in completing several language related tasks such as spoken language understanding and dialogue systems, there has been a lack of attempts to address filling slots that take on values from a virtually unlimited set. In this paper, we propose a new RNN model that can capture the vital concept: Understanding the role of a word may vary according to how long a reader focuses on a particular part of a sentence. The proposed model utilizes a long-term aware attention structure, positional encoding primarily considering the relative distance between words, and multi-task learning of a character-based language model and an intent detection model. We show that the model outperforms the existing RNN models with respect to discovering `open-vocabulary' slots without any external information, such as a named entity database or knowledge base. In particular, we confirm that it performs better with a greater number of slots in a dataset, including unknown words, by evaluating the models on a dataset of several domains. In addition, the proposed model also demonstrates superior performance with regard to intent detection.", "keyphrases": ["aware attention structure", "encoding", "multi-task learning"]} +{"id": "su-markert-2008-words", "title": "From Words to Senses: A Case Study of Subjectivity Recognition", "abstract": "We determine the subjectivity of word senses. To avoid costly annotation, we evaluate how useful existing resources established in opinion mining are for this task. We show that results achieved with existing resources that are not tailored towards word sense subjectivity classification can rival results achieved with supervision on a manually annotated training set. However, results with different resources vary substantially and are dependent on the different definitions of subjectivity used in the establishment of the resources.", "keyphrases": ["subjectivity", "training set", "similar task", "agreement"]} +{"id": "drabek-yarowsky-2005-induction", "title": "Induction of Fine-Grained Part-of-Speech Taggers via Classifier Combination and Crosslingual Projection", "abstract": "This paper presents an original approach to part-of-speech tagging of fine-grained features (such as case, aspect, and adjective person/number) in languages such as English where these properties are generally not morphologically marked. \n \nThe goals of such rich lexical tagging in English are to provide additional features for word alignment models in bilingual corpora (for statistical machine translation), and to provide an information source for part-of-speech tagger induction in new languages via tag projection across bilingual corpora. \n \nFirst, we present a classifier-combination approach to tagging English bitext with very fine-grained part-of-speech tags necessary for annotating morphologically richer languages such as Czech and French, combining the extracted features of three major English parsers, and achieve fine-grained-tag-level syntactic analysis accuracy higher than any individual parser. \n \nSecond, we present experimental results for the cross-language projection of part-of-speech taggers in Czech and French via word-aligned bitext, achieving successful fine-grained part-of-speech tagging of these languages without any Czech or French training data of any kind.", "keyphrases": ["tagger", "french", "rich language"]} +{"id": "cui-etal-2020-unsupervised", "title": "Unsupervised Natural Language Inference via Decoupled Multimodal Contrastive Learning", "abstract": "We propose to solve the natural language inference problem without any supervision from the inference labels via task-agnostic multimodal pretraining. Although recent studies of multimodal self-supervised learning also represent the linguistic and visual context, their encoders for different modalities are coupled. Thus they cannot incorporate visual information when encoding plain text alone. In this paper, we propose Multimodal Aligned Contrastive Decoupled learning (MACD) network. MACD forces the decoupled text encoder to represent the visual information via contrastive learning. Therefore, it embeds visual knowledge even for plain text inference. We conducted comprehensive experiments over plain text inference datasets (i.e. SNLI and STS-B). The unsupervised MACD even outperforms the fully-supervised BiLSTM and BiLSTM+ELMO on STS-B.", "keyphrases": ["multimodal", "contrastive learning", "text encoder"]} +{"id": "brown-2008-exploiting", "title": "Exploiting Document-Level Context for Data-Driven Machine Translation", "abstract": "This paper presents a method for exploiting document-level similarity between the documents in the training corpus for a corpus-driven (statistical or example-based) machine translation system and the input documents it must translate. The method is simple to implement, efficient (increases the translation time of an example-based system by only a few percent), and robust (still works even when the actual document boundaries in the input text are not known). Experiments on French-English and Arabic-English showed relative gains over the same system without using document-level similarity of up to 7.4% and 5.4%, respectively, on the BLEU metric.", "keyphrases": ["document-level similarity", "training corpus", "input document"]} +{"id": "talukdar-etal-2006-context", "title": "A Context Pattern Induction Method for Named Entity Extraction", "abstract": "We present a novel context pattern induction method for information extraction, specifically named entity extraction. Using this method, we extended several classes of seed entity lists into much larger high-precision lists. Using token membership in these extended lists as additional features, we improved the accuracy of a conditional random field-based named entity tagger. In contrast, features derived from the seed lists decreased extractor accuracy.", "keyphrases": ["seed entity list", "unlabeled data", "other approach"]} +{"id": "kaeshammer-2013-synchronous", "title": "Synchronous Linear Context-Free Rewriting Systems for Machine Translation", "abstract": "We propose synchronous linear context-free rewriting systems as an extension to synchronous context-free grammars in which synchronized non-terminals span k 1 continuous blocks on each side of the bitext. Such discontinuous constituents are required for inducing certain alignment configurations that occur relatively frequently in manually annotated parallel corpora and that cannot be generated with less expressive grammar formalisms. As part of our investigations concerning the minimal k that is required for inducing manual alignments, we present a hierarchical aligner in form of a deduction system. We find that by restrictingk to 2 on both sides, 100% of the data can be covered.", "keyphrases": ["constituent", "alignment configuration", "translation model"]} +{"id": "moon-etal-2018-multimodal", "title": "Multimodal Named Entity Recognition for Short Social Media Posts", "abstract": "We introduce a new task called Multimodal Named Entity Recognition (MNER) for noisy user-generated data such as tweets or Snapchat captions, which comprise short text with accompanying images. These social media posts often come in inconsistent or incomplete syntax and lexical notations with very limited surrounding textual contexts, bringing significant challenges for NER. To this end, we create a new dataset for MNER called SnapCaptions (Snapchat image-caption pairs submitted to public and crowd-sourced stories with fully annotated named entities). We then build upon the state-of-the-art Bi-LSTM word/character based NER models with 1) a deep image network which incorporates relevant visual context to augment textual information, and 2) a generic modality-attention module which learns to attenuate irrelevant modalities while amplifying the most informative ones to extract contexts from, adaptive to each sample and token. The proposed MNER model with modality attention significantly outperforms the state-of-the-art text-only NER models by successfully leveraging provided visual contexts, opening up potential applications of MNER on myriads of social media platforms.", "keyphrases": ["entity recognition task", "social medium post", "multimodal ner network"]} +{"id": "wang-etal-2017-crowd", "title": "CROWD-IN-THE-LOOP: A Hybrid Approach for Annotating Semantic Roles", "abstract": "Crowdsourcing has proven to be an effective method for generating labeled data for a range of NLP tasks. However, multiple recent attempts of using crowdsourcing to generate gold-labeled training data for semantic role labeling (SRL) reported only modest results, indicating that SRL is perhaps too difficult a task to be effectively crowdsourced. In this paper, we postulate that while producing SRL annotation does require expert involvement in general, a large subset of SRL labeling tasks is in fact appropriate for the crowd. We present a novel workflow in which we employ a classifier to identify difficult annotation tasks and route each task either to experts or crowd workers according to their difficulties. Our experimental evaluation shows that the proposed approach reduces the workload for experts by over two-thirds, and thus significantly reduces the cost of producing SRL annotation at little loss in quality.", "keyphrases": ["gold-labeled training data", "semantic role labeling", "expert involvement", "crowd", "worker"]} +{"id": "horvat-etal-2015-hierarchical", "title": "Hierarchical Statistical Semantic Realization for Minimal Recursion Semantics", "abstract": "We introduce a robust statistical approach to realization from Minimal Recursion Semantics representations. The approach treats realization as a translation problem, transforming the Dependency MRS graph representation to a surface string. Translation is based on a Synchronous Context-Free Grammar that is automatically extracted from a large corpus of parsed sentences. We have evaluated the new approach on the Wikiwoods corpus, where it shows promising results. 1", "keyphrases": ["realization", "statistical approach", "translation problem"]} +{"id": "durmus-cardie-2018-exploring", "title": "Exploring the Role of Prior Beliefs for Argument Persuasion", "abstract": "Public debate forums provide a common platform for exchanging opinions on a topic of interest. While recent studies in natural language processing (NLP) have provided empirical evidence that the language of the debaters and their patterns of interaction play a key role in changing the mind of a reader, research in psychology has shown that prior beliefs can affect our interpretation of an argument and could therefore constitute a competing alternative explanation for resistance to changing one's stance. To study the actual effect of language use vs. prior beliefs on persuasion, we provide a new dataset and propose a controlled setting that takes into consideration two reader-level factors: political and religious ideology. We find that prior beliefs affected by these reader-level factors play a more important role than language use effects and argue that it is important to account for them in NLP studies of persuasion.", "keyphrases": ["belief", "persuasion", "debate forum"]} +{"id": "sipos-etal-2012-large", "title": "Large-Margin Learning of Submodular Summarization Models", "abstract": "In this paper, we present a supervised learning approach to training submodular scoring functions for extractive multidocument summarization. By taking a structured prediction approach, we provide a large-margin method that directly optimizes a convex relaxation of the desired performance measure. The learning method applies to all submodular summarization methods, and we demonstrate its effectiveness for both pairwise as well as coverage-based scoring functions on multiple datasets. Compared to state-of-the-art functions that were tuned manually, our method significantly improves performance and enables high-fidelity models with number of parameters well beyond what could reasonably be tuned by hand.", "keyphrases": ["structured output learning", "benchmark dataset", "greedy algorithm", "rouge score"]} +{"id": "arcan-etal-2014-identification", "title": "Identification of Bilingual Terms from Monolingual Documents for Statistical Machine Translation", "abstract": "This publication has emanated from research supported in part by a research grant from Science Foundation Ireland (SFI) under Grant Number SFI/12/RC/2289 and by the European Union supported projects \nEuroSentiment (Grant No. 296277), LIDER (Grant No. 610782) and MateCat (ICT-2011.4.2-287688).", "keyphrases": ["english-italian language pair", "translation equivalent", "disambiguation", "wiki machine"]} +{"id": "kachuee-etal-2021-self", "title": "Self-Supervised Contrastive Learning for Efficient User Satisfaction Prediction in Conversational Agents", "abstract": "Turn-level user satisfaction is one of the most important performance metrics for conversational agents. It can be used to monitor the agent's performance and provide insights about defective user experiences. While end-to-end deep learning has shown promising results, having access to a large number of reliable annotated samples required by these methods remains challenging. In a large-scale conversational system, there is a growing number of newly developed skills, making the traditional data collection, annotation, and modeling process impractical due to the required annotation costs and the turnaround times. In this paper, we suggest a self-supervised contrastive learning approach that leverages the pool of unlabeled data to learn user-agent interactions. We show that the pre-trained models using the self-supervised objective are transferable to the user satisfaction prediction. In addition, we propose a novel few-shot transfer learning approach that ensures better transferability for very small sample sizes. The suggested few-shot method does not require any inner loop optimization process and is scalable to very large datasets and complex models. Based on our experiments using real data from a large-scale commercial system, the suggested approach is able to significantly reduce the required number of annotations, while improving the generalization on unseen skills.", "keyphrases": ["user satisfaction prediction", "conversational system", "contrastive learning approach"]} +{"id": "sauri-etal-2005-evita", "title": "Evita: A Robust Event Recognizer For QA Systems", "abstract": "We present Evita, an application for recognizing events in natural language texts. Although developed as part of a suite of tools aimed at providing question answering systems with information about both temporal and intensional relations among events, it can be used independently as an event extraction tool. It is unique in that it is not limited to any pre-established list of relation types (events), nor is it restricted to a specific domain. Evita performs the identification and tagging of event expressions based on fairly simple strategies, informed by both linguistic-and statistically-based data. It achieves a performance ratio of 80.12% F-measure.", "keyphrases": ["event extractor", "rule-based module", "most verb"]} +{"id": "stamborg-etal-2012-using", "title": "Using Syntactic Dependencies to Solve Coreferences", "abstract": "This paper describes the structure of the LTH coreference solver used in the closed track of the CoNLL 2012 shared task (Pradhan et al., 2012). The solver core is a mention classifier that uses Soon et al. (2001)'s algorithm and features extracted from the dependency graphs of the sentences. \n \nThis system builds on Bjorkelund and Nugues (2011)'s solver that we extended so that it can be applied to the three languages of the task: English, Chinese, and Arabic. We designed a new mention detection module that removes pleonastic pronouns, prunes constituents, and recovers mentions when they do not match exactly a noun phrase. We carefully redesigned the features so that they reflect more complex linguistic phenomena as well as discourse properties. Finally, we introduced a minimal cluster model grounded in the first mention of an entity. \n \nWe optimized the feature sets for the three languages: We carried out an extensive evaluation of pairs of features and we complemented the single features with associations that improved the CoNLL score. We obtained the respective scores of 59.57, 56.62, and 48.25 on English, Chinese, and Arabic on the development set, 59.36, 56.85, and 49.43 on the test set, and the combined official score of 55.21.", "keyphrases": ["mention classifier", "dependency graph", "arabic"]} +{"id": "gupta-etal-2012-mining", "title": "Mining Hindi-English Transliteration Pairs from Online Hindi Lyrics", "abstract": "This paper describes a method to mine Hindi-English transliteration pairs from online Hindi song lyrics. The technique is based on the observations that lyrics are transliterated word-by-word, maintaining the precise word order. The mining task is nevertheless challenging because the Hindi lyrics and its transliterations are usually available from different, often unrelated, websites. Therefore, it is a non-trivial task to match the Hindi lyrics to their transliterated counterparts. Moreover, there are various types of noise in lyrics data that needs to be appropriately handled before songs can be aligned at word level. The mined data of 30823 unique Hindi-English transliteration pairs with an accuracy of more than 92% is available publicly. Although the present work reports mining of Hindi-English word pairs, the same technique can be easily adapted for other languages for which song lyrics are available online in native and Roman scripts.", "keyphrases": ["transliteration pair", "hindi song lyric", "word order"]} +{"id": "murphy-etal-2012-selecting", "title": "Selecting Corpus-Semantic Models for Neurolinguistic Decoding", "abstract": "Neurosemantics aims to learn the mapping between concepts and the neural activity which they elicit during neuroimaging experiments. Different approaches have been used to represent individual concepts, but current state-of-the-art techniques require extensive manual intervention to scale to arbitrary words and domains. To overcome this challenge, we initiate a systematic comparison of automatically-derived corpus representations, based on various types of textual co-occurrence. We find that dependency parse-based features are the most effective, achieving accuracies similar to the leading semi-manual approaches and higher than any published for a corpus-based model. We also find that simple word features enriched with directional information provide a close-to-optimal solution at much lower computational cost.", "keyphrases": ["corpus-based model", "brain", "various study"]} +{"id": "qian-etal-2018-hierarchical", "title": "Hierarchical CVAE for Fine-Grained Hate Speech Classification", "abstract": "Existing work on automated hate speech detection typically focuses on binary classification or on differentiating among a small set of categories. In this paper, we propose a novel method on a fine-grained hate speech classification task, which focuses on differentiating among 40 hate groups of 13 different hate group categories. We first explore the Conditional Variational Autoencoder (CVAE) as a discriminative model and then extend it to a hierarchical architecture to utilize the additional hate category information for more accurate prediction. Experimentally, we show that incorporating the hate category information for training can significantly improve the classification performance and our proposed model outperforms commonly-used discriminative models.", "keyphrases": ["hate speech detection", "binary classification", "hate group", "conditional variational autoencoder"]} +{"id": "qiu-etal-2019-dynamically", "title": "Dynamically Fused Graph Network for Multi-hop Reasoning", "abstract": "Text-based question answering (TBQA) has been studied extensively in recent years. Most existing approaches focus on finding the answer to a question within a single paragraph. However, many difficult questions require multiple supporting evidence from scattered text among two or more documents. In this paper, we propose Dynamically Fused Graph Network (DFGN), a novel method to answer those questions requiring multiple scattered evidence and reasoning over them. Inspired by human's step-by-step reasoning behavior, DFGN includes a dynamic fusion layer that starts from the entities mentioned in the given query, explores along the entity graph dynamically built from the text, and gradually finds relevant supporting entities from the given documents. We evaluate DFGN on HotpotQA, a public TBQA dataset requiring multi-hop reasoning. DFGN achieves competitive results on the public board. Furthermore, our analysis shows DFGN produces interpretable reasoning chains.", "keyphrases": ["multi-hop reasoning", "reasoning chain", "dynamic entity graph"]} +{"id": "jha-elhadad-2010-cancer", "title": "Cancer Stage Prediction Based on Patient Online Discourse", "abstract": "Forums and mailing lists dedicated to particular diseases are increasingly popular online. Automatically inferring the health status of a patient can be useful for both forum users and health researchers who study patients' online behaviors. In this paper, we focus on breast cancer forums and present a method to predict the stage of patients' cancers from their online discourse. We show that what the patients talk about (content-based features) and whom they interact with (social network-based features) provide complementary cues to predicting cancer stage and can be leveraged for better prediction. Our methods are extendable and can be applied to other tasks of acquiring contextual information about online health forum participants.", "keyphrases": ["patient", "discourse", "cancer stage"]} +{"id": "scott-etal-2012-corpus", "title": "Corpus Annotation as a Scientific Task", "abstract": "Annotation studies in CL are generally unscientific: they are mostly not reproducible, make use of too few (and often non-independent) annotators and use guidelines that are often something of a moving target. Additionally, the notion of \u0091expert annotators' invariably means only that the annotators have linguistic training. While this can be acceptable in some special contexts, it is often far from ideal. This is particularly the case when subtle judgements are required or when, as increasingly, one is making use of corpora originating from technical texts that have been produced by, and intended to be consumed by, an audience of technical experts in the field. We outline a more rigorous approach to collecting human annotations, using as our example a study designed to capture judgements on the meaning of hedge words in medical records.", "keyphrases": ["ambiguity", "structured data", "medical language"]} +{"id": "le-zuidema-2015-compositional", "title": "Compositional Distributional Semantics with Long Short Term Memory", "abstract": "We are proposing an extension of the recursive neural network that makes use of a variant of the long short-term memory architecture. The extension allows information low in parse trees to be stored in a memory register (the `memory cell') and used much later higher up in the parse tree. This provides a solution to the vanishing gradient problem and allows the network to capture long range dependencies. Experimental results show that our composition outperformed the traditional neural-network composition on the Stanford Sentiment Treebank.", "keyphrases": ["recursive neural network", "memory cell", "tree-structured lstm", "composition function"]} +{"id": "wang-eisner-2018-surface", "title": "Surface Statistics of an Unknown Language Indicate How to Parse It", "abstract": "We introduce a novel framework for delexicalized dependency parsing in a new language. We show that useful features of the target language can be extracted automatically from an unparsed corpus, which consists only of gold part-of-speech (POS) sequences. Providing these features to our neural parser enables it to parse sequences like those in the corpus. Strikingly, our system has no supervision in the target language. Rather, it is a multilingual system that is trained end-to-end on a variety of other languages, so it learns a feature extractor that works well. We show experimentally across multiple languages: (1) Features computed from the unparsed corpus improve parsing accuracy. (2) Including thousands of synthetic languages in the training yields further improvement. (3) Despite being computed from unparsed corpora, our learned task-specific features beat previous work's interpretable typological features that require parsed corpora or expert categorization of the language. Our best method improved attachment scores on held-out test languages by an average of 5.6 percentage points over past work that does not inspect the unparsed data (McDonald et al., 2011), and by 20.7 points over past \u201cgrammar induction\u201d work that does not use training languages (Naseem et al., 2010).", "keyphrases": ["pos", "feature extractor", "word order"]} +{"id": "minard-etal-2016-meantime", "title": "MEANTIME, the NewsReader Multilingual Event and Time Corpus", "abstract": "In this paper, we present the NewsReader MEANTIME corpus, a semantically annotated corpus of Wikinews articles. The corpus consists of 480 news articles, i.e. 120 English news articles and their translations in Spanish, Italian, and Dutch. MEANTIME contains annotations at different levels. The document-level annotation includes markables (e.g. entity mentions, event mentions, time expressions, and numerical expressions), relations between markables (modeling, for example, temporal information and semantic role labeling), and entity and event intra-document coreference. The corpus-level annotation includes entity and event cross-document coreference. Semantic annotation on the English section was performed manually; for the annotation in Italian, Spanish, and (partially) Dutch, a procedure was devised to automatically project the annotations on the English texts onto the translated texts, based on the manual alignment of the annotated elements; this enabled us not only to speed up the annotation process but also provided cross-lingual coreference. The English section of the corpus was extended with timeline annotations for the SemEval 2015 TimeLine shared task. The \u201cFirst CLIN Dutch Shared Task\u201d at CLIN26 was based on the Dutch section, while the EVALITA 2016 FactA (Event Factuality Annotation) shared task, based on the Italian section, is currently being organized.", "keyphrases": ["wikinews article", "cross-document coreference", "temporal relation"]} +{"id": "magnini-etal-2006-cab", "title": "I-CAB: the Italian Content Annotation Bank", "abstract": "In this paper we present work in progress for the creation of the Italian Content Annotation Bank (I-CAB), a corpus of Italian news annotated with semantic information at different levels. The first level is represented by temporal expressions, the second level is represented by different types of entities (i.e. person, organizations, locations and geo-political entities), and the third level is represented by relations between entities (e.g. the affiliation relation connecting a person to an organization). So far I-CAB has been manually annotated with temporal expressions, person entities and organization entities. As we intend I-CAB to become a benchmark for various automatic Information Extraction tasks, we followed a policy of reusing already available markup languages. In particular, we adopted the annotation schemes developed for the ACE Entity Detection and Time Expressions Recognition and Normalization tasks. As the ACE guidelines have originally been developed for English, part of the effort consisted in adapting them to the specific morpho-syntactic features of Italian. Finally, we have extended them to include a wider range of entities, such as conjunctions.", "keyphrases": ["semantic information", "different type", "organization"]} +{"id": "turchi-etal-2013-coping", "title": "Coping with the Subjectivity of Human Judgements in MT Quality Estimation", "abstract": "Supervised approaches to NLP tasks rely on high-quality data annotations, which typically result from expensive manual labelling procedures. For some tasks, however, the subjectivity of human judgements might reduce the usefulness of the annotation for real-world applications. In Machine Translation (MT) Quality Estimation (QE), for instance, using humanannotated data to train a binary classifier that discriminates between good (useful for a post-editor) and bad translations is not trivial. Focusing on this binary task, we show that subjective human judgements can be effectively replaced with an automatic annotation procedure. To this aim, we compare binary classifiers trained on different data: the human-annotated dataset from the 7 th Workshop on Statistical Machine Translation (WMT-12), and an automatically labelled version of the same corpus. Our results show that human labels are less suitable for the task.", "keyphrases": ["judgement", "quality standard", "notion"]} +{"id": "zhang-etal-2017-earth", "title": "Earth Mover's Distance Minimization for Unsupervised Bilingual Lexicon Induction", "abstract": "Cross-lingual natural language processing hinges on the premise that there exists invariance across languages. At the word level, researchers have identified such invariance in the word embedding semantic spaces of different languages. However, in order to connect the separate spaces, cross-lingual supervision encoded in parallel data is typically required. In this paper, we attempt to establish the cross-lingual connection without relying on any cross-lingual supervision. By viewing word embedding spaces as distributions, we propose to minimize their earth mover's distance, a measure of divergence between distributions. We demonstrate the success on the unsupervised bilingual lexicon induction task. In addition, we reveal an interesting finding that the earth mover's distance shows potential as a measure of language difference.", "keyphrases": ["semantic space", "different language", "recent method"]} +{"id": "agrawal-carpuat-2019-controlling", "title": "Controlling Text Complexity in Neural Machine Translation", "abstract": "This work introduces a machine translation task where the output is aimed at audiences of different levels of target language proficiency. We collect a high quality dataset of news articles available in English and Spanish, written for diverse grade levels and propose a method to align segments across comparable bilingual articles. The resulting dataset makes it possible to train multi-task sequence to sequence models that can translate and simplify text jointly. We show that these multi-task models outperform pipeline approaches that translate and simplify text independently.", "keyphrases": ["syntactic diversity", "translation quality", "inference time"]} +{"id": "faruqui-etal-2016-morpho", "title": "Morpho-syntactic Lexicon Generation Using Graph-based Semi-supervised Learning", "abstract": "Morpho-syntactic lexicons provide information about the morphological and syntactic roles of words in a language. Such lexicons are not available for all languages and even when available, their coverage can be limited. We present a graph-based semi-supervised learning method that uses the morphological, syntactic and semantic relations between words to automatically construct wide coverage lexicons from small seed sets. Our method is language-independent, and we show that we can expand a 1000 word seed lexicon to more than 100 times its size with high quality for 11 languages. In addition, the automatically created lexicons provide features that improve performance in two downstream tasks: morphological tagging and dependency parsing.", "keyphrases": ["learning method", "tagging", "morpho-syntactic lexicon"]} +{"id": "murty-etal-2021-dreca", "title": "DReCa: A General Task Augmentation Strategy for Few-Shot Natural Language Inference", "abstract": "Meta-learning promises few-shot learners that can adapt to new distributions by repurposing knowledge acquired from previous training. However, we believe meta-learning has not yet succeeded in NLP due to the lack of a well-defined task distribution, leading to attempts that treat datasets as tasks. Such an ad hoc task distribution causes problems of quantity and quality. Since there's only a handful of datasets for any NLP problem, meta-learners tend to overfit their adaptation mechanism and, since NLP datasets are highly heterogeneous, many learning episodes have poor transfer between their support and query sets, which discourages the meta-learner from adapting. To alleviate these issues, we propose DReCA (Decomposing datasets into Reasoning Categories), a simple method for discovering and using latent reasoning categories in a dataset, to form additional high quality tasks. DReCA works by splitting examples into label groups, embedding them with a finetuned BERT model and then clustering each group into reasoning categories. Across four few-shot NLI problems, we demonstrate that using DReCA improves the accuracy of meta-learners by 1.5-4%", "keyphrases": ["meta-learner", "nlp dataset", "support", "query set", "clustering"]} +{"id": "ferreira-etal-2016-jointly", "title": "Jointly Learning to Embed and Predict with Multiple Languages", "abstract": "We propose a joint formulation for learning task-specific cross-lingual word embeddings, along with classifiers for that task. Unlike prior work, which first learns the embeddings from parallel data and then plugs them in a supervised learning problem, our approach is oneshot: a single optimization problem combines a co-regularizer for the multilingual embeddings with a task-specific loss. We present theoretical results showing the limitation of Euclidean co-regularizers to increase the embedding dimension, a limitation which does not exist for other co-regularizers (such as the \u20181distance). Despite its simplicity, our method achieves state-of-the-art accuracies on the RCV1/RCV2 dataset when transferring from English to German, with training times below 1 minute. On the TED Corpus, we obtain the highest reported scores on 10 out of 11 languages.", "keyphrases": ["word embedding", "single optimization problem", "co-regularizer"]} +{"id": "hossain-etal-2017-filling", "title": "Filling the Blanks (hint: plural noun) for Mad Libs Humor", "abstract": "Computerized generation of humor is a notoriously difficult AI problem. We develop an algorithm called Libitum that helps humans generate humor in a Mad Lib, which is a popular fill-in-the-blank game. The algorithm is based on a machine learned classifier that determines whether a potential fill-in word is funny in the context of the Mad Lib story. We use Amazon Mechanical Turk to create ground truth data and to judge humor for our classifier to mimic, and we make this data freely available. Our testing shows that Libitum successfully aids humans in filling in Mad Libs that are usually judged funnier than those filled in by humans with no computerized help. We go on to analyze why some words are better than others at making a Mad Lib funny.", "keyphrases": ["humor", "fill-in-the-blank game", "mad lib story"]} +{"id": "guillou-etal-2014-parcor", "title": "ParCor 1.0: A Parallel Pronoun-Coreference Corpus to Support Statistical MT", "abstract": "We present ParCor, a parallel corpus of texts in which pronoun coreference \u2015 reduced coreference in which pronouns are used as referring expressions \u2015 has been annotated. The corpus is intended to be used both as a resource from which to learn systematic differences in pronoun use between languages and ultimately for developing and testing informed Statistical Machine Translation systems aimed at addressing the problem of pronoun coreference in translation. At present, the corpus consists of a collection of parallel English-German documents from two different text genres: TED Talks (transcribed planned speech), and EU Bookshop publications (written text). All documents in the corpus have been manually annotated with respect to the type and location of each pronoun and, where relevant, its antecedent. We provide details of the texts that we selected, the guidelines and tools used to support annotation and some corpus statistics. The texts in the corpus have already been translated into many languages, and we plan to expand the corpus into these other languages, as well as other genres, in the future.", "keyphrases": ["pronoun", "detail", "annotation scheme"]} +{"id": "chen-etal-2017-automatically", "title": "Automatically Labeled Data Generation for Large Scale Event Extraction", "abstract": "Modern models of event extraction for tasks like ACE are based on supervised learning of events from small hand-labeled data. However, hand-labeled training data is expensive to produce, in low coverage of event types, and limited in size, which makes supervised methods hard to extract large scale of events for knowledge base population. To solve the data labeling problem, we propose to automatically label training data for event extraction via world knowledge and linguistic knowledge, which can detect key arguments and trigger words for each event type and employ them to label events in texts automatically. The experimental results show that the quality of our large scale automatically labeled data is competitive with elaborately human-labeled data. And our automatically labeled data can incorporate with human-labeled data, then improve the performance of models learned from these data.", "keyphrases": ["linguistic knowledge", "trigger word", "distance supervision"]} +{"id": "gormley-etal-2015-improved", "title": "Improved Relation Extraction with Feature-Rich Compositional Embedding Models", "abstract": "Compositional embedding models build a representation (or embedding) for a linguistic structure based on its component word embeddings. We propose a Feature-rich Compositional Embedding Model (FCM) for relation extraction that is expressive, generalizes to new domains, and is easy-to-implement. The key idea is to combine both (unlexicalized) hand-crafted features with learned word embeddings. The model is able to directly tackle the difficulties met by traditional compositional embeddings models, such as handling arbitrary types of sentence annotations and utilizing global information for composition. We test the proposed model on two relation extraction tasks, and demonstrate that our model outperforms both previous compositional models and traditional feature rich models on the ACE 2005 relation extraction task, and the SemEval 2010 relation classification task. The combination of our model and a log-linear classifier with hand-crafted features gives state-of-the-art results.", "keyphrases": ["word embedding", "entity pair", "pipelined manner"]} +{"id": "armendariz-etal-2020-cosimlex", "title": "CoSimLex: A Resource for Evaluating Graded Word Similarity in Context", "abstract": "State of the art natural language processing tools are built on context-dependent word embeddings, but no direct method for evaluating these representations currently exists. Standard tasks and datasets for intrinsic evaluation of embeddings are based on judgements of similarity, but ignore context; standard tasks for word sense disambiguation take account of context but do not provide continuous measures of meaning similarity. This paper describes an effort to build a new dataset, CoSimLex, intended to fill this gap. Building on the standard pairwise similarity task of SimLex-999, it provides context-dependent similarity measures; covers not only discrete differences in word sense but more subtle, graded changes in meaning; and covers not only a well-resourced language (English) but a number of less-resourced languages. We define the task and evaluation metrics, outline the dataset collection methodology, and describe the status of the dataset so far.", "keyphrases": ["intrinsic evaluation", "gap", "cosimlex dataset"]} +{"id": "marecek-etal-2011-two", "title": "Two-step translation with grammatical post-processing", "abstract": "This paper describes an experiment in which we try to automatically correct mistakes in grammatical agreement in English to Czech MT outputs. We perform several rule-based corrections on sentences parsed to dependency trees. We prove that it is possible to improve the MT quality of majority of the systems participating in WMT shared task. We made both automatic (BLEU) and manual evaluations.", "keyphrases": ["correction", "dependency tree", "marecek"]} +{"id": "wang-etal-2019-crossweigh", "title": "CrossWeigh: Training Named Entity Tagger from Imperfect Annotations", "abstract": "Everyone makes mistakes. So do human annotators when curating labels for named entity recognition (NER). Such label mistakes might hurt model training and interfere model comparison. In this study, we dive deep into one of the widely-adopted NER benchmark datasets, CoNLL03 NER. We are able to identify label mistakes in about 5.38% test sentences, which is a significant ratio considering that the state-of-the-art test F1 score is already around 93%. Therefore, we manually correct these label mistakes and form a cleaner test set. Our re-evaluation of popular models on this corrected test set leads to more accurate assessments, compared to those on the original test set. More importantly, we propose a simple yet effective framework, CrossWeigh, to handle label mistakes during NER model training. Specifically, it partitions the training data into several folds and train independent NER models to identify potential mistakes in each fold. Then it adjusts the weights of training data accordingly to train the final NER model. Extensive experiments demonstrate significant improvements of plugging various NER models into our proposed framework on three datasets. All implementations and corrected test set are available at our Github repo .", "keyphrases": ["annotator", "fold", "search engine"]} +{"id": "zhang-etal-2012-big", "title": "Big Data versus the Crowd: Looking for Relationships in All the Right Places", "abstract": "Classically, training relation extractors relies on high-quality, manually annotated training data, which can be expensive to obtain. To mitigate this cost, NLU researchers have considered two newly available sources of less expensive (but potentially lower quality) labeled data from distant supervision and crowd sourcing. There is, however, no study comparing the relative impact of these two sources on the precision and recall of post-learning answers. To fill this gap, we empirically study how state-of-the-art techniques are affected by scaling these two sources. We use corpus sizes of up to 100 million documents and tens of thousands of crowd-source labeled examples. Our experiments show that increasing the corpus size for distant supervision has a statistically significant, positive impact on quality (F1 score). In contrast, human feedback has a positive and statistically significant, but lower, impact on precision and recall.", "keyphrases": ["ten", "crowdsourcing", "annotated data"]} +{"id": "he-golub-2016-character", "title": "Character-Level Question Answering with Attention", "abstract": "We show that a character-level encoder-decoder framework can be successfully applied to question answering with a structured knowledge base. We use our model for single-relation question answering and demonstrate the effectiveness of our approach on the SimpleQuestions dataset (Bordes et al., 2015), where we improve state-of-the-art accuracy from 63.9% to 70.9%, without use of ensembles. Importantly, our character-level model has 16x fewer parameters than an equivalent word-level model, can be learned with significantly less data compared to previous work, which relies on data augmentation, and is robust to new entities in testing.", "keyphrases": ["encoder-decoder framework", "character-level model", "entity name", "generative framework"]} +{"id": "thater-etal-2009-ranking", "title": "Ranking Paraphrases in Context", "abstract": "We present a vector space model that supports the computation of appropriate vector representations for words in context, and apply it to a paraphrase ranking task. An evaluation on the SemEval 2007 lexical substitution task data shows promising results: the model significantly outperforms a current state of the art model, and our treatment of context is effective.", "keyphrases": ["computation", "vector representation", "preference"]} +{"id": "zhao-etal-2020-reducing", "title": "Reducing Quantity Hallucinations in Abstractive Summarization", "abstract": "It is well-known that abstractive summaries are subject to hallucination\u2014including material that is not supported by the original text. While summaries can be made hallucination-free by limiting them to general phrases, such summaries would fail to be very informative. Alternatively, one can try to avoid hallucinations by verifying that any specific entities in the summary appear in the original text in a similar context. This is the approach taken by our system, Herman. The system learns to recognize and verify quantity entities (dates, numbers, sums of money, etc.) in a beam-worth of abstractive summaries produced by state-of-the-art models, in order to up-rank those summaries whose quantity terms are supported by the original text. Experimental results demonstrate that the ROUGE scores of such up-ranked summaries have a higher Precision than summaries that have not been up-ranked, without a comparable loss in Recall, resulting in higher F1. Preliminary human evaluation of up-ranked vs. original summaries shows people's preference for the former.", "keyphrases": ["hallucination", "inconsistent text", "text generation model", "core issue"]} +{"id": "chai-qu-2005-salience", "title": "A Salience Driven Approach to Robust Input Interpretation in Multimodal Conversational Systems", "abstract": "To improve the robustness in multimodal input interpretation, this paper presents a new salience driven approach. This approach is based on the observation that, during multimodal conversation, information from deictic gestures (e.g., point or circle) on a graphical display can signal a part of the physical world (i.e., representation of the domain and task) of the application which is salient during the communication. This salient part of the physical world will prime what users tend to communicate in speech and in turn can be used to constrain hypotheses for spoken language understanding, thus improving overall input interpretation. Our experimental results have indicated the potential of this approach in reducing word error rate and improving concept identification in multimodal conversation.", "keyphrases": ["salience", "gesture", "language understanding"]} +{"id": "sasano-kurohashi-2008-japanese", "title": "Japanese Named Entity Recognition Using Structural Natural Language Processing", "abstract": "This paper presents an approach that uses structural information for Japanese named entity recognition (NER). Our NER system is based on Support Vector Machine (SVM), and utilizes four types of structural information: cache features, coreference relations, syntactic features and caseframe features, which are obtained from structural analyses. We evaluated our approach on CRL NE data and obtained a higher F-measure than existing approaches that do not use structural information. We also conducted experiments on IREX NE data and an NE-annotated web corpus and confirmed that structural information improves the performance of NER.", "keyphrases": ["caseframe feature", "syntactic dependency feature", "japanese ner"]} +{"id": "jolly-etal-2020-data", "title": "Data-Efficient Paraphrase Generation to Bootstrap Intent Classification and Slot Labeling for New Features in Task-Oriented Dialog Systems", "abstract": "Recent progress through advanced neural models pushed the performance of task-oriented dialog systems to almost perfect accuracy on existing benchmark datasets for intent classification and slot labeling. However, in evolving real-world dialog systems, where new functionality is regularly added, a major additional challenge is the lack of annotated training data for such new functionality, as the necessary data collection efforts are laborious and time-consuming. A potential solution to reduce the effort is to augment initial seed data by paraphrasing existing utterances automatically. In this paper, we propose a new, data-efficient approach following this idea. Using an interpretation-to-text model for paraphrase generation, we are able to rely on existing dialog system training data, and, in combination with shuffling-based sampling techniques, we can obtain diverse and novel paraphrases from small amounts of seed data. In experiments on a public dataset and with a real-world dialog system, we observe improvements for both intent classification and slot labeling, demonstrating the usefulness of our approach.", "keyphrases": ["intent classification", "interpretation-to-text model", "novel paraphrase"]} +{"id": "agic-schluter-2018-baselines", "title": "Baselines and Test Data for Cross-Lingual Inference", "abstract": "The recent years have seen a revival of interest in textual entailment, sparked by i) the emergence of powerful deep neural network learners for natural language processing and ii) the timely development of large-scale evaluation datasets such as SNLI. Recast as natural language inference, the problem now amounts to detecting the relation between pairs of statements: they either contradict or entail one another, or they are mutually neutral. Current research in natural language inference is effectively exclusive to English. In this paper, we propose to advance the research in SNLI-style natural language inference toward multilingual evaluation. To that end, we provide test data for four major languages: Arabic, French, Spanish, and Russian. We experiment with a set of baselines. Our systems are based on cross-lingual word embeddings and machine translation. While our best system scores an average accuracy of just over 75%, we focus largely on enabling further research in multilingual inference.", "keyphrases": ["major language", "arabic", "french"]} +{"id": "liao-veeramachaneni-2009-simple", "title": "A Simple Semi-supervised Algorithm For Named Entity Recognition", "abstract": "We present a simple semi-supervised learning algorithm for named entity recognition (NER) using conditional random fields (CRFs). The algorithm is based on exploiting evidence that is independent from the features used for a classifier, which provides high-precision labels to unlabeled data. Such independent evidence is used to automatically extract high-accuracy and non-redundant data, leading to a much improved classifier at the next iteration. We show that our algorithm achieves an average improvement of 12 in recall and 4 in precision compared to the supervised algorithm. We also show that our algorithm achieves high accuracy when the training and test sets are from different domains.", "keyphrases": ["unlabeled data", "iteration", "self-training"]} +{"id": "wang-etal-2020-hat", "title": "HAT: Hardware-Aware Transformers for Efficient Natural Language Processing", "abstract": "Transformers are ubiquitous in Natural Language Processing (NLP) tasks, but they are difficult to be deployed on hardware due to the intensive computation. To enable low-latency inference on resource-constrained hardware platforms, we propose to design Hardware-Aware Transformers (HAT) with neural architecture search. We first construct a large design space with arbitrary encoder-decoder attention and heterogeneous layers. Then we train a SuperTransformer that covers all candidates in the design space, and efficiently produces many SubTransformers with weight sharing. Finally, we perform an evolutionary search with a hardware latency constraint to find a specialized SubTransformer dedicated to run fast on the target hardware. Extensive experiments on four machine translation tasks demonstrate that HAT can discover efficient models for different hardware (CPU, GPU, IoT device). When running WMT'14 translation task on Raspberry Pi-4, HAT can achieve 3 speedup, 3.7 smaller size over baseline Transformer; 2.7 speedup, 3.6 smaller size over Evolved Transformer with 12,041 less search cost and no performance loss. HAT is open-sourced at .", "keyphrases": ["hardware-aware transformers", "search", "translation task"]} +{"id": "kim-etal-2019-research", "title": "From Research to Production and Back: Ludicrously Fast Neural Machine Translation", "abstract": "This paper describes the submissions of the \u201cMarian\u201d team to the WNGT 2019 efficiency shared task. Taking our dominating submissions to the previous edition of the shared task as a starting point, we develop improved teacher-student training via multi-agent dual-learning and noisy backward-forward translation for Transformer-based student models. For efficient CPU-based decoding, we propose pre-packed 8-bit matrix products, improved batched decoding, cache-friendly student architectures with parameter sharing and light-weight RNN-based decoder architectures. GPU-based decoding benefits from the same architecture changes, from pervasive 16-bit inference and concurrent streams. These modifications together with profiler-based C++ code optimization allow us to push the Pareto frontier established during the 2018 edition towards 24x (CPU) and 14x (GPU) faster models at comparable or higher BLEU values. Our fastest CPU model is more than 4x faster than last year's fastest submission at more than 3 points higher BLEU. Our fastest GPU model at 1.5 seconds translation time is slightly faster than last year's fastest RNN-based submissions, but outperforms them by more than 4 BLEU and 10 BLEU points respectively.", "keyphrases": ["decoding", "cpu", "small student model"]} +{"id": "passonneau-etal-2009-making", "title": "Making Sense of Word Sense Variation", "abstract": "We present a pilot study of word-sense annotation using multiple annotators, relatively polysemous words, and a heterogenous corpus. Annotators selected senses for words in context, using an annotation interface that presented WordNet senses. Interannotator agreement (IA) results show that annotators agree well or not, depending primarily on the individual words and their general usage properties. Our focus is on identifying systematic differences across words and annotators that can account for IA variation. We identify three lexical use factors: semantic specificity of the context, sense concreteness, and similarity of senses. We discuss systematic differences in sense selection across annotators, and present the use of association rules to mine the data for systematic differences across annotators.", "keyphrases": ["pilot study", "annotator", "specificity"]} +{"id": "chowdhury-zamparelli-2018-rnn", "title": "RNN Simulations of Grammaticality Judgments on Long-distance Dependencies", "abstract": "The paper explores the ability of LSTM networks trained on a language modeling task to detect linguistic structures which are ungrammatical due to extraction violations (extra arguments and subject-relative clause island violations), and considers its implications for the debate on language innatism. The results show that the current RNN model can correctly classify (un)grammatical sentences, in certain conditions, but it is sensitive to linguistic processing factors and probably ultimately unable to induce a more abstract notion of grammaticality, at least in the domain we tested.", "keyphrases": ["grammaticality", "processing factor", "abstract notion"]} +{"id": "bohus-horvitz-2009-models", "title": "Models for Multiparty Engagement in Open-World Dialog", "abstract": "We present computational models that allow spoken dialog systems to handle multi-participant engagement in open, dynamic environments, where multiple people may enter and leave conversations, and interact with the system and with others in a natural manner. The models for managing the engagement process include components for (1) sensing the engagement state, actions and intentions of multiple agents in the scene, (2) making engagement decisions (i.e. whom to engage with, and when) and (3) rendering these decisions in a set of coordinated low-level behaviors in an embodied conversational agent. We review results from a study of interactions \"in the wild\" with a system that implements such a model.", "keyphrases": ["dialog system", "engagement state", "user disengagement"]} +{"id": "ravi-knight-2009-minimized", "title": "Minimized Models for Unsupervised Part-of-Speech Tagging", "abstract": "We describe a novel method for the task of unsupervised POS tagging with a dictionary, one that uses integer programming to explicitly search for the smallest model that explains the data, and then uses EM to set parameter values. We evaluate our method on a standard test corpus using different standard tagsets (a 45-tagset as well as a smaller 17-tagset), and show that our approach performs better than existing state-of-the-art systems in both settings.", "keyphrases": ["dictionary", "parameter value", "minimal set"]} +{"id": "petukhova-etal-2016-modelling", "title": "Modelling Multi-issue Bargaining Dialogues: Data Collection, Annotation Design and Corpus", "abstract": "The paper describes experimental dialogue data collection activities, as well semantically annotated corpus creation undertaken within EU-funded METALOGUE project(www.metalogue.eu). The project aims to develop a dialogue system with flexible dialogue management to enable system's adaptive, reactive, interactive and proactive dialogue behavior in setting goals, choosing appropriate strategies and monitoring numerous parallel interpretation and management processes. To achieve these goals negotiation (or more precisely multi-issue bargaining) scenario has been considered as the specific setting and application domain. The dialogue corpus forms the basis for the design of task and interaction models of participants negotiation behavior, and subsequently for dialogue system development which would be capable to replace one of the negotiators. The METALOGUE corpus will be released to the community for research purposes.", "keyphrases": ["dialogue system", "negotiator", "metalogue corpus"]} +{"id": "ramadan-etal-2018-large", "title": "Large-Scale Multi-Domain Belief Tracking with Knowledge Sharing", "abstract": "Robust dialogue belief tracking is a key component in maintaining good quality dialogue systems. The tasks that dialogue systems are trying to solve are becoming increasingly complex, requiring scalability to multi-domain, semantically rich dialogues. However, most current approaches have difficulty scaling up with domains because of the dependency of the model parameters on the dialogue ontology. In this paper, a novel approach is introduced that fully utilizes semantic similarity between dialogue utterances and the ontology terms, allowing the information to be shared across domains. The evaluation is performed on a recently collected multi-domain dialogues dataset, one order of magnitude larger than currently available corpora. Our model demonstrates great capability in handling multi-domain dialogues, simultaneously outperforming existing state-of-the-art models in single-domain dialogue tracking tasks.", "keyphrases": ["ontology", "multi-domain dst", "state tracker", "bi-lstm"]} +{"id": "landwehr-etal-2014-model", "title": "A Model of Individual Differences in Gaze Control During Reading", "abstract": "We develop a statistical model of saccadic eye movements during reading of isolated sentences. The model is focused on representing individual differences between readers and supports the inference of the most likely reader for a novel set of eye movement patterns. We empirically study the model for biometric reader identification using eye-tracking data collected from 20 individuals and observe that the model distinguishes between 20 readers with an accuracy of up to 98%.", "keyphrases": ["individual difference", "reading", "eye movement"]} +{"id": "hu-etal-2019-multi", "title": "A Multi-Type Multi-Span Network for Reading Comprehension that Requires Discrete Reasoning", "abstract": "Rapid progress has been made in the field of reading comprehension and question answering, where several systems have achieved human parity in some simplified settings. However, the performance of these models degrades significantly when they are applied to more realistic scenarios, such as answers involve various types, multiple text strings are correct answers, or discrete reasoning abilities are required. In this paper, we introduce the Multi-Type Multi-Span Network (MTMSN), a neural reading comprehension model that combines a multi-type answer predictor designed to support various answer types (e.g., span, count, negation, and arithmetic expression) with a multi-span extraction method for dynamically producing one or multiple text spans. In addition, an arithmetic expression reranking mechanism is proposed to rank expression candidates for further confirming the prediction. Experiments show that our model achieves 79.9 F1 on the DROP hidden test set, creating new state-of-the-art results. Source code () is released to facilitate future work.", "keyphrases": ["comprehension", "reasoning ability", "span"]} +{"id": "hua-wang-2020-pair", "title": "PAIR: Planning and Iterative Refinement in Pre-trained Transformers for Long Text Generation", "abstract": "Pre-trained Transformers have enabled impressive breakthroughs in generating long and fluent text, yet their outputs are often \u201crambling\u201d without coherently arranged content. In this work, we present a novel content-controlled text generation framework, PAIR, with planning and iterative refinement, which is built upon a large model, BART. We first adapt the BERT model to automatically construct the content plans, consisting of keyphrase assignments and their corresponding sentence-level positions. The BART model is employed for generation without modifying its structure. We then propose a refinement algorithm to gradually enhance the generation quality within the sequence-to-sequence framework. Evaluation with automatic metrics shows that adding planning consistently improves the generation quality on three distinct domains, with an average of 20 BLEU points and 12 METEOR points improvements. In addition, human judges rate our system outputs to be more relevant and coherent than comparisons without planning.", "keyphrases": ["refinement", "content plan", "generation quality"]} +{"id": "reitter-moore-2007-predicting", "title": "Predicting Success in Dialogue", "abstract": "Task-solving in dialogue depends on the linguistic alignment of the interlocutors, which Pickering & Garrod (2004) have suggested to be based on mechanistic repetition effects. In this paper, we seek confirmation of this hypothesis by looking at repetition in corpora, and whether repetition is correlated with task success. We show that the relevant repetition tendency is based on slow adaptation rather than short-term priming and demonstrate that lexical and syntactic repetition is a reliable predictor of task success given the first five minutes of a taskoriented dialogue.", "keyphrases": ["task success", "adaptation", "syntactic repetition", "speech data", "social variable"]} +{"id": "rijhwani-preotiuc-pietro-2020-temporally", "title": "Temporally-Informed Analysis of Named Entity Recognition", "abstract": "Natural language processing models often have to make predictions on text data that evolves over time as a result of changes in language use or the information described in the text. However, evaluation results on existing data sets are seldom reported by taking the timestamp of the document into account. We analyze and propose methods that make better use of temporally-diverse training data, with a focus on the task of named entity recognition. To support these experiments, we introduce a novel data set of English tweets annotated with named entities. We empirically demonstrate the effect of temporal drift on performance, and how the temporal information of documents can be used to obtain better models compared to those that disregard temporal information. Our analysis gives insights into why this information is useful, in the hope of informing potential avenues of improvement for named entity recognition as well as other NLP tasks under similar experimental setups.", "keyphrases": ["other nlp task", "temporal generalization ability", "tweet stream", "fine-tuning scenario", "late data"]} +{"id": "vertanen-kristensson-2011-imagination", "title": "The Imagination of Crowds: Conversational AAC Language Modeling using Crowdsourcing and Large Data Sources", "abstract": "Augmented and alternative communication (AAC) devices enable users with certain communication disabilities to participate in everyday conversations. Such devices often rely on statistical language models to improve text entry by offering word predictions. These predictions can be improved if the language model is trained on data that closely reflects the style of the users' intended communications. Unfortunately, there is no large dataset consisting of genuine AAC messages. In this paper we demonstrate how we can crowd-source the creation of a large set of fictional AAC messages. We show that these messages model conversational AAC better than the currently used datasets based on telephone conversations or newswire text. We leverage our crowdsourced messages to intelligently select sentences from much larger sets of Twitter, blog and Usenet data. Compared to a model trained only on telephone transcripts, our best performing model reduced perplexity on three test sets of AAC-like communications by 60--82% relative. This translated to a potential keystroke savings in a predictive keyboard interface of 5--11%.", "keyphrases": ["language model", "fictional aac message", "telephone transcript"]} +{"id": "blodgett-etal-2016-demographic", "title": "Demographic Dialectal Variation in Social Media: A Case Study of African-American English", "abstract": "Though dialectal language is increasingly abundant on social media, few resources exist for developing NLP tools to handle such language. We conduct a case study of dialectal language in online conversational text by investigating African-American English (AAE) on Twitter. We propose a distantly supervised model to identify AAE-like language from demographics associated with geo-located messages, and we verify that this language follows well-known AAE linguistic phenomena. In addition, we analyze the quality of existing language identification and dependency parsing tools on AAE-like text, demonstrating that they perform poorly on such text compared to text associated with white speakers. We also provide an ensemble classifier for language identification which eliminates this disparity and release a new corpus of tweets containing AAE-like language.", "keyphrases": ["twitter", "demographic", "social medium"]} +{"id": "li-etal-2017-noise", "title": "Noise Reduction Methods for Distantly Supervised Biomedical Relation Extraction", "abstract": "Distant supervision has been applied to automatically generate labeled data for biomedical relation extraction. Noise exists in both positively and negatively-labeled data and affects the performance of supervised machine learning methods. In this paper, we propose three novel heuristics based on the notion of proximity, trigger word and confidence of patterns to leverage lexical and syntactic information to reduce the level of noise in the distantly labeled data. Experiments on three different tasks, extraction of protein-protein-interaction, miRNA-gene regulation relation and protein-localization event, show that the proposed methods can improve the F-score over the baseline by 6, 10 and 14 points for the three tasks, respectively. We also show that when the models are configured to output high-confidence results, high precisions can be obtained using the proposed methods, making them promising for facilitating manual curation for databases.", "keyphrases": ["novel heuristic", "syntactic information", "noise"]} +{"id": "ran-etal-2020-learning", "title": "Learning to Recover from Multi-Modality Errors for Non-Autoregressive Neural Machine Translation", "abstract": "Non-autoregressive neural machine translation (NAT) predicts the entire target sequence simultaneously and significantly accelerates inference process. However, NAT discards the dependency information in a sentence, and thus inevitably suffers from the multi-modality problem: the target tokens may be provided by different possible translations, often causing token repetitions or missing. To alleviate this problem, we propose a novel semi-autoregressive model RecoverSAT in this work, which generates a translation as a sequence of segments. The segments are generated simultaneously while each segment is predicted token-by-token. By dynamically determining segment length and deleting repetitive segments, RecoverSAT is capable of recovering from repetitive and missing token errors. Experimental results on three widely-used benchmark datasets show that our proposed model achieves more than 4 times speedup while maintaining comparable performance compared with the corresponding autoregressive model.", "keyphrases": ["multi-modality problem", "token repetition", "semi-autoregressive model recoversat"]} +{"id": "blunsom-cohn-2011-hierarchical", "title": "A Hierarchical Pitman-Yor Process HMM for Unsupervised Part of Speech Induction", "abstract": "In this work we address the problem of unsupervised part-of-speech induction by bringing together several strands of research into a single model. We develop a novel hidden Markov model incorporating sophisticated smoothing using a hierarchical Pitman-Yor processes prior, providing an elegant and principled means of incorporating lexical characteristics. Central to our approach is a new type-based sampling algorithm for hierarchical Pitman-Yor models in which we track fractional table counts. In an empirical evaluation we show that our model consistently out-performs the current state-of-the-art across 10 languages.", "keyphrases": ["hierarchical pitman-yor process", "sophisticated smoothing", "word type"]} +{"id": "candito-etal-2014-developing", "title": "Developing a French FrameNet: Methodology and First results", "abstract": "The Asfalda project aims to develop a French corpus with frame-based semantic annotations and automatic tools for shallow semantic analysis. We present the first part of the project: focusing on a set of notional domains, we delimited a subset of English frames, adapted them to French data when necessary, and developed the corresponding French lexicon. We believe that working domain by domain helped us to enforce the coherence of the resulting resource, and also has the advantage that, though the number of frames is limited (around a hundred), we obtain full coverage within a given domain.", "keyphrases": ["project", "notional domain", "frame"]} +{"id": "surdeanu-etal-2008-learning", "title": "Learning to Rank Answers on Large Online QA Collections", "abstract": "This work describes an answer ranking engine for non-factoid questions built using a large online community-generated question-answer collection (Yahoo! Answers). We show how such collections may be used to effectively set up large supervised learning experiments. Furthermore we investigate a wide range of feature types, some exploiting NLP processors, and demonstrate that using them in combination leads to considerable improvements in accuracy.", "keyphrases": ["candidate", "question-answer pair", "similar q&a"]} +{"id": "mellebeek-etal-2005-improving", "title": "Improving Online Machine Translation Systems", "abstract": "In (Mellebeek et al., 2005), we proposed the design, implementation and evaluation of a novel and modular approach to boost the translation performance of existing, wide-coverage, freely available machine translation systems, based on reliable and fast automatic decomposition of the translation input and corresponding composition of translation output. Despite showing some initial promise, our method did not improve on the baseline Logomedia1 and Systran2 MT systems. In this paper, we improve on the algorithm presented in (Mellebeek et al., 2005), and on the same test data, show increased scores for a range of automatic evaluation metrics. Our algorithm now outperforms Logomedia, obtains similar results to SDL3 and falls tantalisingly short of the performance achieved by Systran.", "keyphrases": ["input string", "poor quality", "stage"]} +{"id": "mckinley-ray-2014-decision", "title": "A Decision-Theoretic Approach to Natural Language Generation", "abstract": "We study the problem of generating an English sentence given an underlying probabilistic grammar, a world and a communicative goal. We model the generation problem as a Markov decision process with a suitably defined reward function that reflects the communicative goal. We then use probabilistic planning to solve the MDP and generate a sentence that, with high probability, accomplishes the communicative goal. We show empirically that our approach can generate complex sentences with a speed that generally matches or surpasses the state of the art. Further, we show that our approach is anytime and can handle complex communicative goals, including negated goals.", "keyphrases": ["communicative goal", "planning", "mdp"]} +{"id": "ogren-etal-2008-constructing", "title": "Constructing Evaluation Corpora for Automated Clinical Named Entity Recognition", "abstract": "We report on the construction of a gold-standard dataset consisting of annotated clinical notes suitable for evaluating our biomedical named entity recognition system. The dataset is the result of consensus between four human annotators and contains 1,556 annotations on 160 clinical notes using 658 unique concept codes from SNOMED-CT corresponding to human disorders. Inter-annotator agreement was calculated on annotations from 100 of the documents for span (90.9%), concept code (81.7%), context (84.8%), and status (86.0%) agreement. Complete agreement for span, concept code, context, and status was 74.6%. We found that creating a consensus set based on annotations from two independently-created annotation sets can reduce inter-annotator disagreement by 32.3%. We found little benefit to pre-annotating the corpus with a third-party named entity recognizer.", "keyphrases": ["annotator", "disorder", "mayo clinic"]} \ No newline at end of file